From 5f2a8b3b3a2bc388bdb906555048404b743504a4 Mon Sep 17 00:00:00 2001
From: Alex <alex.gpt.llm@gmail.com>
Date: Wed, 27 Mar 2024 20:57:04 +0800
Subject: [PATCH 001/282] [CoreEngine] 1. For better processing the status in
 the unified status center, we split the client runner and server runner into
 four base classes: agent, protocol manager, job runner manager, job runner.
 2. Add the unified account manager to bind into the MLOps backend. 3.
 Abstract the same design paradigm for launching and deployment scheduler with
 the following format: agent -> protocol manager -> job runner manager -> job
 runner. 4. For better debugging messages, we save the message records to
 local files for sender and receiver in the message center. 5. Write the class
 diagram and sequence diagram in the lark doc (
 https://fedml-inc.larksuite.com/wiki/NpTUwdXnciBlpQkxPkBu79k0sPc, section:
 FEDML Launch Python(v2) )

---
 python/fedml/__init__.py                      |    2 +-
 .../scheduler/master/base_master_agent.py     |  126 +
 .../master/base_master_job_runner.py          |  622 ++++
 .../master/base_master_job_runner_manager.py  |   73 +
 .../master/base_master_protocol_manager.py    |  667 ++++
 .../scheduler/master/cloud_server_manager.py  |  164 +
 .../scheduler/master/deploy_job_launcher.py   |   90 +
 .../scheduler/master/launch_job_runner.py     |   44 +
 .../master/launch_job_runner_manager.py       |   20 +
 .../scheduler/master/master_agent.py          |   28 +
 .../master/master_protocol_manager.py         |   36 +
 .../scheduler/master/server_login.py          |  406 +--
 .../scheduler/master/server_runner.py         | 2767 -----------------
 .../model_scheduler/device_client_runner.py   | 1335 --------
 .../model_scheduler/device_model_cache.py     |  193 +-
 .../model_scheduler/device_model_db.py        |   62 +-
 .../device_model_deployment.py                |  778 ++---
 .../model_scheduler/device_model_inference.py |   68 +-
 .../device_model_msg_object.py                |   64 +-
 .../device_replica_controller.py              |  437 +++
 .../model_scheduler/device_replica_handler.py |  138 +
 .../model_scheduler/device_server_runner.py   | 2160 -------------
 .../model_scheduler/job_runner_msg_sender.py  |  204 ++
 .../scheduler/model_scheduler/master_agent.py |   27 +
 .../model_scheduler/master_job_runner.py      |  578 ++++
 .../master_job_runner_manager.py              |   62 +
 .../master_protocol_manager.py                |  365 +++
 .../model_scheduler/model_device_client.py    |  140 +-
 .../model_scheduler/model_device_server.py    |  142 +-
 .../scheduler/model_scheduler/worker_agent.py |   27 +
 .../model_scheduler/worker_job_runner.py      |  489 +++
 .../worker_job_runner_manager.py              |   23 +
 .../worker_protocol_manager.py                |  195 ++
 .../scheduler_core/account_manager.py         |  460 +++
 .../scheduler_core/compute_cache_manager.py   |    9 +-
 .../scheduler_core/compute_status_cache.py    |   76 +
 .../scheduler_core/compute_status_db.py       |  123 +
 .../scheduler_core/endpoint_sync_protocol.py  |   25 +-
 .../scheduler_core/general_constants.py       |  193 ++
 .../scheduler_core/master_api_daemon.py       |   23 +-
 .../scheduler_core/message_center.py          |  239 +-
 .../scheduler_core/message_common.py          |   77 +
 .../scheduler/scheduler_core/ota_upgrade.py   |   99 +
 .../scheduler_base_job_runner.py              |  545 ++++
 .../scheduler_base_job_runner_manager.py      |   66 +
 .../scheduler_base_protocol_manager.py        |  260 ++
 .../scheduler/scheduler_core/status_center.py |  410 +++
 .../status_manager_protocols.py               |  303 ++
 .../scheduler/slave/base_slave_agent.py       |  139 +
 .../scheduler/slave/base_slave_job_runner.py  |  264 ++
 .../slave/base_slave_job_runner_manager.py    |   12 +
 .../slave/base_slave_protocol_manager.py      |  571 ++++
 .../computing/scheduler/slave/client_login.py |  335 +-
 .../scheduler/slave/client_runner.py          | 1775 -----------
 .../scheduler/slave/launch_job_runner.py      |   41 +
 .../slave/launch_job_runner_manager.py        |   22 +
 .../computing/scheduler/slave/slave_agent.py  |   26 +
 .../scheduler/slave/slave_protocol_manager.py |  104 +
 python/fedml/core/mlops/__init__.py           |   19 +-
 python/fedml/core/mlops/mlops_configs.py      |    2 -
 python/fedml/core/mlops/mlops_device_perfs.py |   95 +-
 python/fedml/core/mlops/mlops_metrics.py      |   90 +-
 .../customized_workflow.py                    |  794 ++++-
 .../deploy_image_job.yaml                     |   12 +-
 .../deploy_image_job/fedml_model_config.yaml  |   19 +-
 .../deploy_image_job/mnist_serve_main.py      |   37 +
 .../deploy_image_job/model/minist_model.py    |   11 +
 .../model/model_parms_from_mlops              |  Bin 0 -> 32188 bytes
 .../deploy_llm_job.yaml                       |   29 +
 .../deploy_llm_job/.gitignore                 |    1 +
 .../__init__.py                               |    0
 .../app/__init__.py                           |    0
 .../app/pipe/__init__.py                      |    0
 .../app/pipe/constants.py                     |    0
 .../app/pipe/instruct_pipeline.py             |    0
 .../config/__init__.py                        |    0
 .../deploy_llm_job/fedml_model_config.yaml    |   12 +
 .../main_entry.py                             |    0
 python/setup.py                               |    6 +-
 79 files changed, 10058 insertions(+), 9798 deletions(-)
 create mode 100755 python/fedml/computing/scheduler/master/base_master_agent.py
 create mode 100755 python/fedml/computing/scheduler/master/base_master_job_runner.py
 create mode 100755 python/fedml/computing/scheduler/master/base_master_job_runner_manager.py
 create mode 100755 python/fedml/computing/scheduler/master/base_master_protocol_manager.py
 create mode 100755 python/fedml/computing/scheduler/master/cloud_server_manager.py
 create mode 100755 python/fedml/computing/scheduler/master/deploy_job_launcher.py
 create mode 100755 python/fedml/computing/scheduler/master/launch_job_runner.py
 create mode 100755 python/fedml/computing/scheduler/master/launch_job_runner_manager.py
 create mode 100755 python/fedml/computing/scheduler/master/master_agent.py
 create mode 100755 python/fedml/computing/scheduler/master/master_protocol_manager.py
 delete mode 100755 python/fedml/computing/scheduler/master/server_runner.py
 delete mode 100755 python/fedml/computing/scheduler/model_scheduler/device_client_runner.py
 create mode 100644 python/fedml/computing/scheduler/model_scheduler/device_replica_controller.py
 create mode 100644 python/fedml/computing/scheduler/model_scheduler/device_replica_handler.py
 delete mode 100755 python/fedml/computing/scheduler/model_scheduler/device_server_runner.py
 create mode 100755 python/fedml/computing/scheduler/model_scheduler/job_runner_msg_sender.py
 create mode 100755 python/fedml/computing/scheduler/model_scheduler/master_agent.py
 create mode 100755 python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
 create mode 100755 python/fedml/computing/scheduler/model_scheduler/master_job_runner_manager.py
 create mode 100755 python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py
 create mode 100755 python/fedml/computing/scheduler/model_scheduler/worker_agent.py
 create mode 100755 python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py
 create mode 100755 python/fedml/computing/scheduler/model_scheduler/worker_job_runner_manager.py
 create mode 100755 python/fedml/computing/scheduler/model_scheduler/worker_protocol_manager.py
 create mode 100755 python/fedml/computing/scheduler/scheduler_core/account_manager.py
 create mode 100755 python/fedml/computing/scheduler/scheduler_core/compute_status_cache.py
 create mode 100755 python/fedml/computing/scheduler/scheduler_core/compute_status_db.py
 create mode 100755 python/fedml/computing/scheduler/scheduler_core/general_constants.py
 create mode 100755 python/fedml/computing/scheduler/scheduler_core/message_common.py
 create mode 100755 python/fedml/computing/scheduler/scheduler_core/ota_upgrade.py
 create mode 100755 python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py
 create mode 100755 python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner_manager.py
 create mode 100755 python/fedml/computing/scheduler/scheduler_core/scheduler_base_protocol_manager.py
 create mode 100755 python/fedml/computing/scheduler/scheduler_core/status_center.py
 create mode 100755 python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py
 create mode 100755 python/fedml/computing/scheduler/slave/base_slave_agent.py
 create mode 100755 python/fedml/computing/scheduler/slave/base_slave_job_runner.py
 create mode 100755 python/fedml/computing/scheduler/slave/base_slave_job_runner_manager.py
 create mode 100755 python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py
 delete mode 100755 python/fedml/computing/scheduler/slave/client_runner.py
 create mode 100755 python/fedml/computing/scheduler/slave/launch_job_runner.py
 create mode 100755 python/fedml/computing/scheduler/slave/launch_job_runner_manager.py
 create mode 100755 python/fedml/computing/scheduler/slave/slave_agent.py
 create mode 100755 python/fedml/computing/scheduler/slave/slave_protocol_manager.py
 create mode 100644 python/fedml/workflow/driver_example/customized_job_example/deploy_image_job/mnist_serve_main.py
 create mode 100644 python/fedml/workflow/driver_example/customized_job_example/deploy_image_job/model/minist_model.py
 create mode 100644 python/fedml/workflow/driver_example/customized_job_example/deploy_image_job/model/model_parms_from_mlops
 create mode 100755 python/fedml/workflow/driver_example/customized_job_example/deploy_llm_job.yaml
 create mode 100644 python/fedml/workflow/driver_example/customized_job_example/deploy_llm_job/.gitignore
 rename python/fedml/workflow/driver_example/customized_job_example/{deploy_image_job => deploy_llm_job}/__init__.py (100%)
 rename python/fedml/workflow/driver_example/customized_job_example/{deploy_image_job => deploy_llm_job}/app/__init__.py (100%)
 rename python/fedml/workflow/driver_example/customized_job_example/{deploy_image_job => deploy_llm_job}/app/pipe/__init__.py (100%)
 rename python/fedml/workflow/driver_example/customized_job_example/{deploy_image_job => deploy_llm_job}/app/pipe/constants.py (100%)
 rename python/fedml/workflow/driver_example/customized_job_example/{deploy_image_job => deploy_llm_job}/app/pipe/instruct_pipeline.py (100%)
 rename python/fedml/workflow/driver_example/customized_job_example/{deploy_image_job => deploy_llm_job}/config/__init__.py (100%)
 create mode 100644 python/fedml/workflow/driver_example/customized_job_example/deploy_llm_job/fedml_model_config.yaml
 rename python/fedml/workflow/driver_example/customized_job_example/{deploy_image_job => deploy_llm_job}/main_entry.py (100%)

diff --git a/python/fedml/__init__.py b/python/fedml/__init__.py
index b06c6264a7..f6659cd622 100644
--- a/python/fedml/__init__.py
+++ b/python/fedml/__init__.py
@@ -34,7 +34,7 @@
 _global_training_type = None
 _global_comm_backend = None
 
-__version__ = "0.8.27.dev2"
+__version__ = "0.8.29.dev4"
 
 
 # This is the deployment environment used for different roles (RD/PM/BD/Public Developers). Potential VALUE: local, dev, test, release
diff --git a/python/fedml/computing/scheduler/master/base_master_agent.py b/python/fedml/computing/scheduler/master/base_master_agent.py
new file mode 100755
index 0000000000..66bc35d96f
--- /dev/null
+++ b/python/fedml/computing/scheduler/master/base_master_agent.py
@@ -0,0 +1,126 @@
+
+from multiprocessing import Process
+from ..comm_utils import sys_utils
+from ..comm_utils.job_cleanup import JobCleanup
+from ....core.mlops import MLOpsRuntimeLog, MLOpsMetrics
+from ..scheduler_core.master_api_daemon import MasterApiDaemon
+from ..scheduler_core.account_manager import FedMLAccountManager
+from ..scheduler_core.general_constants import GeneralConstants
+from abc import ABC, abstractmethod
+
+
+class FedMLBaseMasterAgent(ABC):
+
+    def __init__(self):
+        self.agent_args = None
+        self.master_api_daemon = None
+        self.master_api_process = None
+        self.mlops_metrics = MLOpsMetrics()
+        self.status_reporter = None
+        self.enable_simulation_cloud_agent = True
+        self.use_local_process_as_cloud_server = False
+        self.protocol_mgr = None
+
+    def login(
+            self, user_id, api_key=None, device_id=None,
+            os_name=None, role=None
+    ):
+        # Login account
+        login_result = FedMLAccountManager.get_instance().login(
+            user_id, api_key=api_key, device_id=device_id,
+            os_name=os_name, role=role
+        )
+        if login_result is not None:
+            self.agent_args = login_result
+        else:
+            return None
+
+        # Save the bound info
+        self._save_agent_info(
+            login_result.current_device_id + "." + login_result.os_name, login_result.edge_id)
+
+        # Init the logs for protocol manager
+        self._init_logs(login_result, login_result.edge_id)
+
+        # Create the protocol manager to communicate with the slave agents and MLOps.
+        self._create_protocol_manager(role, login_result)
+
+        # Initialize the protocol manager
+        # noinspection PyBoardException
+        try:
+            self._initialize_protocol_manager()
+        except Exception as e:
+            FedMLAccountManager.write_login_failed_file(is_client=False)
+            self.protocol_mgr.stop()
+            raise e
+
+        # Start the protocol manager to process the messages from MLOps and slave agents.
+        self.protocol_mgr.start()
+
+    @staticmethod
+    def logout():
+        GeneralConstants.cleanup_run_process(None, is_master=True)
+        sys_utils.cleanup_all_fedml_server_api_processes()
+
+    def _create_protocol_manager(self, role, login_result):
+        if self.protocol_mgr is not None:
+            return
+        self.protocol_mgr = self._generate_protocol_manager_instance(
+            login_result, agent_config=login_result.agent_config)
+        self.protocol_mgr.run_as_edge_server_and_agent = True \
+            if role == FedMLAccountManager.ROLE_EDGE_SERVER else False
+        self.protocol_mgr.run_as_cloud_agent = True if role == FedMLAccountManager.ROLE_CLOUD_AGENT else False
+        self.protocol_mgr.run_as_cloud_server = True if role == FedMLAccountManager.ROLE_CLOUD_SERVER else False
+        self.protocol_mgr.args = login_result
+        self.protocol_mgr.edge_id = login_result.edge_id
+        self.protocol_mgr.unique_device_id = login_result.unique_device_id
+        self.protocol_mgr.user_name = login_result.user_name
+        self.protocol_mgr.agent_config = login_result.agent_config
+        self.protocol_mgr.enable_simulation_cloud_agent = self.enable_simulation_cloud_agent
+        self.protocol_mgr.use_local_process_as_cloud_server = self.use_local_process_as_cloud_server
+
+    def _initialize_protocol_manager(self):
+        # Init local database
+        self._init_database()
+
+        # Initialize the master protocol
+        self.protocol_mgr.initialize()
+
+        # Report the IDLE status to MLOps
+        self.mlops_metrics.report_server_training_status(
+            None, GeneralConstants.MSG_MLOPS_SERVER_STATUS_IDLE, edge_id=self.agent_args.edge_id)
+
+        # Cleanup data when startup
+        JobCleanup.get_instance().sync_data_on_startup(self.agent_args.edge_id, is_client=False)
+
+        # Start the API server on master agent
+        self.master_api_daemon = MasterApiDaemon()
+        self.master_api_process = Process(target=self.master_api_daemon.run)
+        self.master_api_process.start()
+
+    def _init_logs(self, agent_args, edge_id):
+        # Init runtime logs
+        in_args = agent_args
+        in_args.log_file_dir = self._get_log_file_dir()
+        in_args.run_id = 0
+        in_args.role = "server"
+        in_args.edge_id = edge_id
+        in_args.using_mlops = True
+        in_args.server_agent_id = edge_id
+        MLOpsRuntimeLog.get_instance(in_args).init_logs()
+
+    @abstractmethod
+    def _get_log_file_dir(self):
+        pass
+
+    @abstractmethod
+    def _save_agent_info(self, unique_device_id, edge_id):
+        pass
+
+    @abstractmethod
+    def _init_database(self):
+        pass
+
+    @abstractmethod
+    def _generate_protocol_manager_instance(self, args, agent_config=None):
+        return None
diff --git a/python/fedml/computing/scheduler/master/base_master_job_runner.py b/python/fedml/computing/scheduler/master/base_master_job_runner.py
new file mode 100755
index 0000000000..3dbc1fd891
--- /dev/null
+++ b/python/fedml/computing/scheduler/master/base_master_job_runner.py
@@ -0,0 +1,622 @@
+
+import json
+import logging
+import multiprocessing
+import platform
+import queue
+import os
+import time
+import traceback
+from ..scheduler_entry.constants import Constants
+from ....core.mlops.mlops_runtime_log import MLOpsRuntimeLog
+from ..master.server_constants import ServerConstants
+from ....core.mlops.mlops_runtime_log_daemon import MLOpsRuntimeLogDaemon
+from ..comm_utils import sys_utils
+from .server_data_interface import FedMLServerDataInterface
+from ....core.mlops.mlops_utils import MLOpsUtils
+from ..scheduler_core.log_manager import LogsManager
+from ..scheduler_core.metrics_manager import MetricsManager
+from fedml.utils.debugging import debug
+from ..scheduler_core.status_center import JobStatus
+from ..scheduler_core.compute_cache_manager import ComputeCacheManager
+from multiprocessing import Process, Queue
+from ..scheduler_core.general_constants import GeneralConstants
+from ..scheduler_core.scheduler_base_job_runner import FedMLSchedulerBaseJobRunner, RunnerError, RunnerCompletedError
+from abc import ABC, abstractmethod
+
+
+class FedMLBaseMasterJobRunner(FedMLSchedulerBaseJobRunner, ABC):
+    debug_cloud_server = False
+
+    def __init__(self, args, run_id=0, request_json=None, agent_config=None, edge_id=0,
+                 cuda_visible_gpu_ids_str=None,
+                 agent_data_dir=None, agent_package_download_dir=None,
+                 agent_package_unzip_dir=None, agent_log_file_dir=None):
+        FedMLSchedulerBaseJobRunner.__init__(
+            self, args, edge_id=edge_id, request_json=request_json, agent_config=agent_config, run_id=run_id,
+            cuda_visible_gpu_ids_str=cuda_visible_gpu_ids_str, agent_data_dir=agent_data_dir,
+            agent_package_download_dir=agent_package_download_dir,
+            agent_package_unzip_dir=agent_package_unzip_dir,
+            agent_log_file_dir=agent_package_unzip_dir,
+            is_master_runner=True
+        )
+
+        self.run_edge_id_status_queue = Queue()
+        self.run_metrics_queue = Queue()
+        self.run_events_queue = Queue()
+        self.run_artifacts_queue = Queue()
+        self.run_logs_queue = Queue()
+        self.run_edge_device_info_queue = Queue()
+        self.run_edge_device_info_global_queue = Queue()
+        self.run_extend_queue_list = None
+        self.async_check_timeout = 0
+        self.enable_async_cluster = False
+        self.origin_fedml_config_object = None
+        self.server_agent_id = 0
+        if request_json is not None:
+            self.server_agent_id = request_json.get("server_id", 0)
+        self.fedml_data_base_package_dir = os.path.join("/", "fedml", "data")
+        self.fedml_data_local_package_dir = os.path.join("/", "fedml", "fedml-package", "fedml", "data")
+        self.fedml_data_dir = self.fedml_data_base_package_dir
+        self.fedml_config_dir = os.path.join("/", "fedml", "conf")
+
+    @debug
+    def run(
+            self, process_event, completed_event, edge_id_status_queue=None,
+            edge_device_info_queue=None, run_metrics_queue=None, run_event_queue=None,
+            run_artifacts_queue=None, run_logs_queue=None, edge_device_info_global_queue=None,
+            run_extend_queue_list=None, sender_message_center_queue=None, listener_message_queue=None,
+            status_center_queue=None
+    ):
+        print(f"Master job runner process id {os.getpid()}, run id {self.run_id}")
+
+        if platform.system() != "Windows":
+            os.setsid()
+
+        os.environ['PYTHONWARNINGS'] = 'ignore:semaphore_tracker:UserWarning'
+        os.environ.setdefault('PYTHONWARNINGS', 'ignore:semaphore_tracker:UserWarning')
+
+        self.run_process_event = process_event
+        self.run_process_completed_event = completed_event
+        try:
+            MLOpsUtils.set_ntp_offset(self.ntp_offset)
+
+            self.rebuild_message_status_center(sender_message_center_queue, listener_message_queue, status_center_queue)
+
+            self.run_impl(
+                edge_id_status_queue, edge_device_info_queue, run_metrics_queue,
+                run_event_queue, run_artifacts_queue, run_logs_queue, edge_device_info_global_queue,
+                run_extend_queue_list=run_extend_queue_list, sender_message_queue=sender_message_center_queue,
+                listener_message_queue=listener_message_queue, status_center_queue=status_center_queue
+            )
+        except RunnerError:
+            logging.info("Runner stopped.")
+            self.status_reporter.report_server_id_status(
+                self.run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_KILLED, edge_id=self.edge_id,
+                server_id=self.edge_id, server_agent_id=self.edge_id)
+        except RunnerCompletedError:
+            logging.info("Runner completed.")
+        except Exception as e:
+            logging.error("Runner exits with exceptions. {}".format(traceback.format_exc()))
+            self.status_reporter.report_server_id_status(
+                self.run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED, edge_id=self.edge_id,
+                server_id=self.edge_id, server_agent_id=self.edge_id)
+        finally:
+            logging.info("Release resources.")
+            self._process_run_metrics_queue(run_metrics_queue)
+            self._process_run_logs_queue(run_logs_queue)
+            MLOpsRuntimeLogDaemon.get_instance(self.args).stop_log_processor(self.run_id, self.edge_id)
+            if self.mlops_metrics is not None:
+                self.mlops_metrics.stop_sys_perf()
+            time.sleep(3)
+            ServerConstants.cleanup_run_process(self.run_id)
+            ServerConstants.cleanup_learning_process(self.run_id)
+            ServerConstants.cleanup_bootstrap_process(self.run_id)
+
+    @debug
+    @abstractmethod
+    def run_impl(
+            self, edge_id_status_queue, edge_device_info_queue, run_metrics_queue,
+            run_event_queue, run_artifacts_queue, run_logs_queue, edge_device_info_global_queue,
+            run_extend_queue_list=None, sender_message_queue=None, listener_message_queue=None,
+            status_center_queue=None
+    ):
+        run_id = self.request_json["runId"]
+        run_config = self.request_json["run_config"]
+        data_config = run_config["data_config"]
+        edge_ids = self.request_json["edgeids"]
+
+        self.check_runner_stop_event()
+
+        self.run_id = run_id
+        self.args.run_id = self.run_id
+        MLOpsRuntimeLog.get_instance(self.args).init_logs(log_level=logging.INFO)
+
+        logging.info("Detect all status of Edge ids: " + str(edge_ids))
+
+        status_ok, active_edge_info_dict, inactivate_edges = self.detect_edges_status(
+            edge_device_info_queue, edge_device_info_global_queue=edge_device_info_global_queue,
+            callback_when_edges_ready=self.send_training_request_to_edges)
+        logging.info(f"Status OK: {status_ok}, Active edge info dict: {active_edge_info_dict}, "
+                     f"inactivate edges: {inactivate_edges}")
+        if not status_ok:
+            logging.error(f"Status of edge device is not OK. Active edge info dict: {active_edge_info_dict}, "
+                          f"Inactivate edges: {inactivate_edges}")
+            return
+
+        if not self.should_continue_run_job(run_id):
+            if FedMLBaseMasterJobRunner.debug_cloud_server:
+                while True:
+                    time.sleep(30)
+            # Check if the run status is normal
+            self.aggregate_run_metrics_logs(
+                run_id, edge_ids, edge_id_status_queue, edge_device_info_queue,
+                edge_device_info_global_queue,
+                run_metrics_queue, run_logs_queue)
+            return
+
+        # Start the server job
+        self.start_runner_process(
+            run_id, self.request_json, edge_id=self.edge_id, is_server_job=True,
+            sender_message_queue=sender_message_queue,
+            listener_message_queue=listener_message_queue,
+            status_center_queue=status_center_queue
+        )
+
+        # Check if the run status is normal
+        self.aggregate_run_metrics_logs(
+            run_id, edge_ids, edge_id_status_queue, edge_device_info_queue,
+            edge_device_info_global_queue,
+            run_metrics_queue, run_logs_queue)
+
+    @abstractmethod
+    def _generate_extend_queue_list(self):
+        return list()
+
+    def aggregate_run_metrics_logs(
+            self, run_id, edge_id_list, edge_id_status_queue, edge_device_info_queue,
+            edge_device_info_global_queue, run_metrics_queue, run_logs_queue):
+
+        ComputeCacheManager.get_instance().set_redis_params()
+
+        while True:
+            self.check_runner_stop_event()
+
+            # Process run metrics
+            self._process_run_metrics_queue(run_metrics_queue)
+
+            # Process run logs
+            self._process_run_logs_queue(run_logs_queue)
+
+            # Check the job status
+            job_status = ComputeCacheManager.get_instance().get_status_cache().get_job_status(run_id)
+            if JobStatus.is_job_completed(job_status):
+                break
+
+    def _process_run_metrics_queue(self, run_metrics_queue):
+        # Fetch metrics from the run metrics queue
+        while True:
+            try:
+                metrics_item = run_metrics_queue.get(block=False, timeout=3)
+                MetricsManager.get_instance().save_metrics(metrics_item)
+                metric_json = json.loads(metrics_item)
+                if metric_json.get("is_endpoint", False):
+                    metric_json().pop("is_endpoint")
+                    self.mlops_metrics.report_endpoint_metric({}, payload=json.dumps(metric_json))
+                else:
+                    self.mlops_metrics.report_server_training_metric({}, payload=metrics_item)
+            except queue.Empty as e:  # If queue is empty, then break loop
+                break
+
+    def _process_run_logs_queue(self, run_logs_queue):
+        # Fetch logs from the run logs queue
+        while True:
+            try:
+                logs_item = run_logs_queue.get(block=False, timeout=3)
+                LogsManager.save_logs(logs_item)
+            except queue.Empty as e:  # If queue is empty, then break loop
+                break
+
+    def run_server_job(
+            self, process_event, completed_event, edge_id_status_queue=None,
+            edge_device_info_queue=None, run_metrics_queue=None,
+            run_event_queue=None, run_artifacts_queue=None, run_logs_queue=None,
+            sender_message_queue=None, listener_message_queue=None,
+            edge_device_info_global_queue=None, status_center_queue=None
+    ):
+        print(f"Server runner process id {os.getpid()}, run id {self.run_id}")
+
+        if platform.system() != "Windows":
+            os.setsid()
+
+        os.environ['PYTHONWARNINGS'] = 'ignore:semaphore_tracker:UserWarning'
+        os.environ.setdefault('PYTHONWARNINGS', 'ignore:semaphore_tracker:UserWarning')
+
+        self.run_process_event = process_event
+        self.run_process_completed_event = completed_event
+        try:
+            MLOpsUtils.set_ntp_offset(self.ntp_offset)
+
+            self.rebuild_message_status_center(sender_message_queue, listener_message_queue, status_center_queue)
+
+            self.run_server_job_impl(process_event, completed_event,
+                                     message_center_queue=sender_message_queue)
+        except RunnerError:
+            logging.info("Runner stopped.")
+            self.status_reporter.report_server_id_status(
+                self.run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_KILLED, edge_id=self.edge_id,
+                server_id=self.edge_id, server_agent_id=self.edge_id)
+        except RunnerCompletedError:
+            logging.info("Runner completed.")
+        except Exception as e:
+            logging.error("Runner exits with exceptions. {}".format(traceback.format_exc()))
+            self.status_reporter.report_server_id_status(
+                self.run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED, edge_id=self.edge_id,
+                server_id=self.edge_id, server_agent_id=self.edge_id)
+        finally:
+            logging.info("Release resources.")
+            MLOpsRuntimeLogDaemon.get_instance(self.args).stop_log_processor(self.run_id, self.edge_id)
+            if self.mlops_metrics is not None:
+                self.mlops_metrics.stop_sys_perf()
+            time.sleep(3)
+            ServerConstants.cleanup_run_process(self.run_id)
+            ServerConstants.cleanup_learning_process(self.run_id)
+            ServerConstants.cleanup_bootstrap_process(self.run_id)
+
+    def run_server_job_impl(self, process_event, completed_event,
+                            message_center_queue=None):
+        run_id = self.request_json["runId"]
+        run_config = self.request_json["run_config"]
+        data_config = run_config["data_config"]
+        edge_ids = self.request_json["edgeids"]
+
+        self.check_runner_stop_event()
+
+        self.run_id = run_id
+        self.args.run_id = self.run_id
+        MLOpsRuntimeLog.get_instance(self.args).init_logs(log_level=logging.INFO)
+
+        # get training params
+        private_local_data_dir = data_config.get("privateLocalData", "")
+        is_using_local_data = 0
+        # if private_local_data_dir is not None and len(str(private_local_data_dir).strip(' ')) > 0:
+        #     is_using_local_data = 1
+
+        # start a run according to the hyper-parameters
+        # fedml_local_data_dir = self.cur_dir + "/fedml_data/run_" + run_id_str + "_edge_" + str(edge_id)
+        fedml_local_data_dir = os.path.join(self.cur_dir, "fedml_data")
+        fedml_local_config_dir = os.path.join(self.cur_dir, "fedml_config")
+        if is_using_local_data:
+            fedml_local_data_dir = private_local_data_dir
+        self.fedml_data_dir = self.fedml_data_local_package_dir
+
+        self.check_runner_stop_event()
+
+        logging.info("download packages and run the bootstrap script...")
+
+        # update local config with real time parameters from server and dynamically replace variables value
+        unzip_package_path, fedml_config_object = self.update_local_fedml_config(run_id, run_config)
+        if unzip_package_path is None or fedml_config_object is None:
+            logging.info("failed to update local fedml config.")
+            self.check_runner_stop_event()
+            self.report_exception_status(run_id)
+            return
+
+        logging.info("cleanup the previous aggregation process and check downloaded packages...")
+
+        entry_file_config = fedml_config_object["entry_config"]
+        dynamic_args_config = fedml_config_object["dynamic_args"]
+        entry_file = str(entry_file_config["entry_file"]).replace('\\', os.sep).replace('/', os.sep)
+        entry_file = os.path.basename(entry_file)
+        conf_file = entry_file_config["conf_file"]
+        conf_file = str(conf_file).replace('\\', os.sep).replace('/', os.sep)
+        ServerConstants.cleanup_learning_process(run_id)
+        self.check_runner_stop_event()
+        if not os.path.exists(unzip_package_path):
+            logging.info("failed to unzip file.")
+            self.check_runner_stop_event()
+            self.report_exception_status(run_id)
+            return
+        os.chdir(os.path.join(unzip_package_path, "fedml"))
+
+        self.check_runner_stop_event()
+
+        logging.info("starting the server user process...")
+
+        entry_file_full_path = os.path.join(unzip_package_path, "fedml", entry_file)
+        conf_file_full_path = os.path.join(unzip_package_path, "fedml", conf_file)
+        logging.info("                          ")
+        logging.info("                          ")
+        logging.info("====Your Run Logs Begin===")
+
+        process, is_launch_task, error_list = self.execute_job_task(
+            unzip_package_path=unzip_package_path, entry_file_full_path=entry_file_full_path,
+            conf_file_full_path=conf_file_full_path, dynamic_args_config=dynamic_args_config,
+            fedml_config_object=self.fedml_config_object)
+
+        logging.info("====Your Run Logs End===")
+        logging.info("                        ")
+        logging.info("                        ")
+
+        ret_code, out, err = process.returncode, None, None
+        is_run_ok = sys_utils.is_runner_finished_normally(process.pid)
+        if is_launch_task:
+            is_run_ok = True
+        if error_list is not None and len(error_list) > 0:
+            is_run_ok = False
+        if ret_code is None or ret_code <= 0:
+            self.check_runner_stop_event()
+
+            if is_run_ok:
+                if out is not None:
+                    out_str = sys_utils.decode_our_err_result(out)
+                    if out_str != "":
+                        logging.info("{}".format(out_str))
+
+                self.status_reporter.report_server_id_status(
+                    run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_FINISHED, edge_id=self.edge_id,
+                    server_id=self.edge_id, server_agent_id=self.edge_id)
+
+                if is_launch_task:
+                    sys_utils.log_return_info(f"job {run_id}", 0)
+                else:
+                    sys_utils.log_return_info(entry_file, 0)
+        else:
+            is_run_ok = False
+
+        if not is_run_ok:
+            # If the run status is killed or finished, then return with the normal state.
+            current_job = FedMLServerDataInterface.get_instance().get_job_by_id(run_id)
+            if current_job is not None and (current_job.status == ServerConstants.MSG_MLOPS_SERVER_STATUS_FINISHED or
+                                            current_job.status == ServerConstants.MSG_MLOPS_SERVER_STATUS_KILLED):
+                return
+
+            self.check_runner_stop_event()
+
+            logging.error("failed to run the aggregation process...")
+
+            if err is not None:
+                err_str = sys_utils.decode_our_err_result(err)
+                if err_str != "":
+                    logging.error("{}".format(err_str))
+
+            if is_launch_task:
+                sys_utils.log_return_info(f"job {run_id}", ret_code)
+            else:
+                sys_utils.log_return_info(entry_file, ret_code)
+
+            self.report_exception_status(run_id)
+
+    @abstractmethod
+    def _generate_job_runner_instance(self, args, run_id=None, request_json=None, agent_config=None, edge_id=None):
+        return None
+
+    def start_runner_process(
+        self, run_id, request_json, edge_id=None, is_server_job=False,
+        sender_message_queue=None, listener_message_queue=None,
+        status_center_queue=None,
+    ):
+        server_runner = self._generate_job_runner_instance(
+            self.args, run_id=run_id, request_json=request_json,
+            agent_config=self.agent_config, edge_id=edge_id
+        )
+
+        run_id_str = str(run_id)
+        server_runner.edge_id = self.edge_id
+        server_runner.server_agent_id = self.server_agent_id
+        server_runner.start_request_json = json.dumps(request_json)
+        self.run_process_event = multiprocessing.Event()
+        server_runner.run_process_event = self.run_process_event
+        self.run_process_completed_event = multiprocessing.Event()
+        server_runner.run_process_completed_event = self.run_process_completed_event
+        server_runner.edge_id_status_queue = self.run_edge_id_status_queue
+        server_runner.edge_device_info_queue = self.run_edge_device_info_queue
+        self.run_extend_queue_list = self._generate_extend_queue_list()
+        self.run_process = Process(
+            target=server_runner.run if not is_server_job else server_runner.run_server_job, args=(
+                self.run_process_event, self.run_process_completed_event, self.run_edge_id_status_queue,
+                self.run_edge_device_info_queue, self.run_metrics_queue, self.run_events_queue,
+                self.run_artifacts_queue, self.run_logs_queue, self.run_edge_device_info_global_queue,
+                self.run_extend_queue_list, sender_message_queue, listener_message_queue,  status_center_queue
+            )
+        )
+        self.run_process.start()
+        ServerConstants.save_run_process(run_id, self.run_process.pid)
+        return self.run_process
+
+    def put_run_edge_device_info_to_queue(self, run_id, device_info):
+        run_id_str = str(run_id)
+        if self.run_edge_device_info_queue is None:
+            self.run_edge_device_info_queue = Queue()
+        self.run_edge_device_info_queue.put(device_info)
+
+    def should_continue_run_job(self, run_id):
+        run_config = self.request_json["run_config"]
+        run_params = run_config.get("parameters", {})
+        job_yaml = run_params.get("job_yaml", {})
+        job_yaml_default_none = run_params.get("job_yaml", None)
+        framework_type = job_yaml.get("framework_type", None)
+        job_type = job_yaml.get("job_type", None)
+        job_type = job_yaml.get("task_type", Constants.JOB_TASK_TYPE_TRAIN) if job_type is None else job_type
+        if job_yaml_default_none is not None:
+            if job_type == Constants.JOB_TASK_TYPE_FEDERATE:
+                return True
+
+            if framework_type is None or framework_type != Constants.JOB_FRAMEWORK_TYPE_FEDML:
+                self.status_reporter.report_server_id_status(
+                    run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_RUNNING, edge_id=self.edge_id,
+                    server_id=self.edge_id, server_agent_id=self.edge_id)
+                return False
+
+        return True
+
+    @debug
+    def detect_edges_status(
+            self, edge_device_info_queue, edge_device_info_global_queue=None, callback_when_edges_ready=None,
+            status_timeout=None,
+            need_to_trigger_exception=True, status_check_context=None, given_edge_ids=None,
+            callback_when_detecting=None, args_for_callback_when_detecting=None
+    ):
+        run_id = self.request_json["runId"]
+        run_id_str = str(run_id)
+        edge_id_list = self.request_json["edgeids"]
+        if given_edge_ids is not None:
+            edge_id_list = given_edge_ids
+
+        # Init realtime status of all edges
+        run_edges_realtime_status = dict()
+        run_edges_realtime_status[run_id_str] = dict()
+
+        edge_info_global_dict = dict()
+
+        # Send status message to all edges
+        allowed_cache_edge_status_time = 60
+        for edge_id in edge_id_list:
+            # Check if the edge status was filled allowed_cache_edge_status_time seconds ago,
+            # if so no more checking message would be sent.
+            edge_info = edge_info_global_dict.get(edge_id, None)
+            if edge_info is not None:
+                timestamp = edge_info.get("timestamp", None)
+                time_interval = time.time() - timestamp
+                if time_interval <= allowed_cache_edge_status_time:
+                    continue
+
+            self.send_status_check_msg(run_id, edge_id, self.edge_id, context=status_check_context)
+        time.sleep(3)
+
+        total_sleep_seconds = 0
+        status_check_sleep_seconds = 10
+        allowed_status_check_sleep_seconds = 60 * 2 if status_timeout is None else status_timeout
+        allowed_status_check_sleep_seconds_for_async = 30
+        inactivate_edges = list()
+        active_edge_info_dict = dict()
+        while True:
+            if callback_when_detecting is not None:
+                callback_when_detecting(args_for_callback_when_detecting)
+
+            # Fetch edge info from the edge status queue, which will be added to realtime status map
+            while True:
+                self.check_runner_stop_event()
+
+                try:
+                    edge_info = edge_device_info_queue.get(block=False, timeout=1)
+                    if edge_info is not None:
+                        edge_id = edge_info.get("edge_id", None)
+                        if edge_id is not None:
+                            run_edges_realtime_status[run_id_str][edge_id] = edge_info
+                except queue.Empty as e:  # If queue is empty, then break loop
+                    break
+
+            self.check_runner_stop_event()
+
+            # Check all edges which don't send response status successfully
+            # and retry to send the status checking message.
+            active_edges_count = 0
+            inactivate_edges.clear()
+            active_edge_info_dict.clear()
+            for edge_id in edge_id_list:
+                edge_info_dict = run_edges_realtime_status.get(run_id_str, {})
+                edge_info = edge_info_dict.get(edge_id, None)
+                edge_info = edge_info_dict.get(str(edge_id), None) if edge_info is None else edge_info
+                if edge_info is not None:
+                    active_edges_count += 1
+                    active_edge_info_dict[str(edge_id)] = edge_info
+                else:
+                    # Check if the edge status was filled allowed_cache_edge_status_time seconds ago,
+                    # if so no more checking message would be sent.
+                    edge_info = edge_info_global_dict.get(edge_id, None)
+                    if edge_info is not None:
+                        timestamp = edge_info.get("timestamp", None)
+                        time_interval = time.time() - timestamp
+                        if time_interval <= allowed_cache_edge_status_time:
+                            active_edges_count += 1
+                            active_edge_info_dict[str(edge_id)] = edge_info
+                            continue
+
+                    inactivate_edges.append(edge_id)
+                    self.send_status_check_msg(run_id, edge_id, self.edge_id, context=status_check_context)
+
+            # If all edges are ready then send the starting job message to them
+            if active_edges_count == len(edge_id_list):
+                logging.info(f"All edges are ready. Active edge id list is as follows. {active_edge_info_dict}")
+                if callback_when_edges_ready is not None:
+                    logging.info("All edges are ready. Start to process the callback function.")
+                    callback_when_edges_ready(active_edge_info_dict=active_edge_info_dict)
+                else:
+                    logging.info("All edges are ready. No callback function to process.")
+                break
+            else:
+                logging.info(f"All edges are not ready. Active edge id list: {active_edge_info_dict}, "
+                             f"Inactive edge id list: {inactivate_edges}")
+
+            # Check if runner needs to stop and sleep specific time
+            self.check_runner_stop_event()
+            time.sleep(status_check_sleep_seconds)
+            total_sleep_seconds += status_check_sleep_seconds
+
+            # Check if the status response message has timed out to receive
+            if total_sleep_seconds >= allowed_status_check_sleep_seconds:
+                # If so, send failed message to MLOps and send exception message to all edges.
+                logging.error(f"There are inactive edge devices. "
+                              f"Inactivate edge id list is as follows. {inactivate_edges}")
+                if need_to_trigger_exception:
+                    self.status_reporter.report_server_id_status(
+                        run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED, edge_id=self.edge_id,
+                        server_id=self.edge_id, server_agent_id=self.server_agent_id)
+                    self.report_exception_status(run_id)
+                return False, active_edge_info_dict, inactivate_edges
+
+            # If we enable the mode for async cluster, then sleep some time and send messages to all clients.
+            if callback_when_edges_ready is not None and self.should_process_async_cluster is not None:
+                should_async, async_timeout = self.should_process_async_cluster()
+                if should_async and total_sleep_seconds >= allowed_status_check_sleep_seconds_for_async:
+                    if async_timeout > allowed_status_check_sleep_seconds_for_async:
+                        time.sleep(async_timeout - allowed_status_check_sleep_seconds_for_async)
+                    self.send_training_request_to_edges(active_edge_info_dict)
+                    return True, active_edge_info_dict, inactivate_edges
+
+        return True, active_edge_info_dict, inactivate_edges
+
+    def send_status_check_msg(self, run_id, edge_id, server_id, context=None):
+        topic_get_model_device_id = "server/client/request_device_info/" + str(edge_id)
+        payload = {"server_id": server_id, "run_id": run_id}
+        if context is not None:
+            payload["context"] = context
+        self.message_center.send_message(topic_get_model_device_id, json.dumps(payload))
+
+    def report_exception_status(self, run_id):
+        self.status_reporter.report_job_status(run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_EXCEPTION)
+
+    def callback_run_logs(self, topic, payload):
+        run_id = str(topic).split('/')[-1]
+        run_id_str = str(run_id)
+        if self.run_logs_queue is None:
+            self.run_logs_queue = Queue()
+        self.run_logs_queue.put(payload)
+
+    def callback_run_metrics(self, topic, payload):
+        print(f"callback_run_metrics topic {topic}, payload {payload}")
+        run_id = str(topic).split('/')[-1]
+        run_id_str = str(run_id)
+        if self.run_metrics_queue is None:
+            self.run_metrics_queue = Queue()
+        self.run_metrics_queue.put(payload)
+
+    def send_training_request_to_edges(self, active_edge_info_dict):
+        topic = GeneralConstants.MSG_TOPIC_SEND_TRAINING_REQUEST_TO_EDGES
+        payload = json.dumps(active_edge_info_dict)
+        self.message_center.receive_message(topic, payload)
+
+    def should_process_async_cluster(self):
+        run_config = self.request_json.get("run_config", {})
+        run_params = run_config.get("parameters", {})
+        common_args = run_params.get("common_args", {})
+        self.enable_async_cluster = common_args.get("enable_async_cluster", False)
+        self.async_check_timeout = common_args.get("async_check_timeout", 0)
+        if self.enable_async_cluster:
+            return True, self.async_check_timeout
+
+        return False, self.async_check_timeout
+
+
+
diff --git a/python/fedml/computing/scheduler/master/base_master_job_runner_manager.py b/python/fedml/computing/scheduler/master/base_master_job_runner_manager.py
new file mode 100755
index 0000000000..694fab5f5f
--- /dev/null
+++ b/python/fedml/computing/scheduler/master/base_master_job_runner_manager.py
@@ -0,0 +1,73 @@
+import base64
+import json
+import logging
+import time
+from abc import ABC
+from multiprocessing import Process
+from .cloud_server_manager import FedMLCloudServerManager
+from ..scheduler_core.scheduler_base_job_runner_manager import FedMLSchedulerBaseJobRunnerManager
+
+
+class FedMLBaseMasterJobRunnerManager(FedMLSchedulerBaseJobRunnerManager, ABC):
+    def __init__(self):
+        FedMLSchedulerBaseJobRunnerManager.__init__(self)
+
+    # Override
+    def start_job_runner(
+            self, run_id, request_json, args=None, edge_id=None, is_server_job=False,
+            sender_message_queue=None, listener_message_queue=None, status_center_queue=None,
+            should_start_cloud_server=False, use_local_process_as_cloud_server=False,
+            cuda_visible_gpu_ids_str=None
+    ):
+        if should_start_cloud_server:
+            self._start_cloud_server(args, run_id, request_json, edge_id=edge_id,
+                                     use_local_process_as_cloud_server=use_local_process_as_cloud_server)
+            return
+
+        run_id_str = str(run_id)
+        self.job_runners[run_id_str] = self._generate_job_runner_instance(
+            args, run_id=run_id, request_json=request_json,
+            agent_config=args.agent_config, edge_id=edge_id,
+        )
+        self.job_runners[run_id_str].start_runner_process(
+            run_id, request_json, edge_id=edge_id, is_server_job=is_server_job,
+            sender_message_queue=sender_message_queue,
+            listener_message_queue=listener_message_queue,
+            status_center_queue=status_center_queue
+        )
+
+    def _start_cloud_server(
+            self, args, run_id, request_json, edge_id=None,
+            use_local_process_as_cloud_server=False
+    ):
+        run_id_str = str(run_id)
+        cloud_server_mgr = FedMLCloudServerManager(
+            args, run_id=run_id, edge_id=edge_id, request_json=request_json,
+            agent_config=args.agent_config
+        )
+        if not use_local_process_as_cloud_server:
+            self.cloud_run_process_map[run_id_str] = Process(target=cloud_server_mgr.start_cloud_server_process_entry)
+            self.cloud_run_process_map[run_id_str].start()
+        else:
+            message_bytes = json.dumps(request_json).encode("ascii")
+            base64_bytes = base64.b64encode(message_bytes)
+            runner_cmd_encoded = base64_bytes.decode("ascii")
+            cloud_device_id = request_json.get("cloudServerDeviceId", "0")
+
+            logging.info("runner_cmd_encoded: {}".format(runner_cmd_encoded))
+
+            self.cloud_run_process_map[run_id_str] = Process(
+                target=cloud_server_mgr.start_local_cloud_server,
+                args=(args.account_id, args.version, cloud_device_id, runner_cmd_encoded))
+            self.cloud_run_process_map[run_id_str].start()
+            time.sleep(1)
+
+    def callback_run_logs(self, run_id, topic, payload):
+        run_id_str = str(run_id)
+        if self.job_runners.get(run_id_str, None) is not None:
+            self.job_runners[run_id_str].callback_run_logs(topic, payload)
+
+    def callback_run_metrics(self, run_id, topic, payload):
+        run_id_str = str(run_id)
+        if self.job_runners.get(run_id_str, None) is not None:
+            self.job_runners[run_id_str].callback_run_metrics(topic, payload)
diff --git a/python/fedml/computing/scheduler/master/base_master_protocol_manager.py b/python/fedml/computing/scheduler/master/base_master_protocol_manager.py
new file mode 100755
index 0000000000..bf720515d9
--- /dev/null
+++ b/python/fedml/computing/scheduler/master/base_master_protocol_manager.py
@@ -0,0 +1,667 @@
+
+import base64
+import json
+import logging
+import fedml
+from ..scheduler_core.scheduler_matcher import SchedulerMatcher
+from ..comm_utils.constants import SchedulerConstants
+from ..comm_utils.job_utils import JobRunnerUtils
+from ....core.mlops.mlops_runtime_log import MLOpsRuntimeLog
+from ....core.mlops.mlops_configs import MLOpsConfigs
+from ....core.mlops.mlops_runtime_log_daemon import MLOpsRuntimeLogDaemon
+from ..comm_utils import sys_utils
+from ....core.mlops.mlops_utils import MLOpsUtils
+from ..model_scheduler import device_client_constants
+from fedml.utils.debugging import debug
+from ..scheduler_core.compute_cache_manager import ComputeCacheManager
+from ..scheduler_core.ota_upgrade import FedMLOtaUpgrade
+from .deploy_job_launcher import FedMLDeployJobLauncher
+from ..scheduler_core.general_constants import GeneralConstants
+from ..scheduler_core.scheduler_base_protocol_manager import FedMLSchedulerBaseProtocolManager
+from abc import ABC, abstractmethod
+
+
+class FedMLBaseMasterProtocolManager(FedMLSchedulerBaseProtocolManager, ABC):
+    def __init__(self, args, agent_config=None):
+        FedMLSchedulerBaseProtocolManager.__init__(self, args, agent_config=agent_config, is_master=True)
+
+        self.async_check_timeout = 0
+        self.enable_async_cluster = False
+        self.request_json = None
+        self.run_edge_ids = dict()
+        self.version = fedml.get_env_version()
+        self.args = args
+        self.run_id = None
+        self.edge_id = args.edge_id
+        self.server_agent_id = args.edge_id
+        self.current_device_id = args.current_device_id
+        self.unique_device_id = args.unique_device_id
+        self.agent_config = agent_config
+        self.topic_start_train = None
+        self.topic_stop_train = None
+        self.topic_report_status = None
+        self.topic_ota_msg = None
+        self.topic_response_device_info = None
+        self.topic_request_device_info_from_mlops = None
+        self.topic_requesst_job_status = None
+        self.topic_requesst_device_status_in_job = None
+        self.topic_send_training_request_to_edges = None
+        self.run_as_cloud_agent = False
+        self.run_as_cloud_server = False
+        self.run_as_edge_server_and_agent = False
+        self.run_as_cloud_server_and_agent = False
+        self.enable_simulation_cloud_agent = True
+        self.use_local_process_as_cloud_server = False
+        self.ota_upgrade = FedMLOtaUpgrade(edge_id=args.edge_id)
+        self.running_request_json = dict()
+        self.start_request_json = None
+        self.deploy_job_launcher = FedMLDeployJobLauncher()
+
+    @abstractmethod
+    def generate_topics(self):
+        # The MQTT message topic format is as follows: <sender>/<receiver>/<action>
+
+        # The topic for stopping training
+        self.topic_start_train = "mlops/flserver_agent_" + str(self.edge_id) + "/start_train"
+
+        # The topi for stopping training
+        self.topic_stop_train = "mlops/flserver_agent_" + str(self.edge_id) + "/stop_train"
+
+        # The topic for reporting current device status.
+        self.topic_report_status = "mlops/report_device_status"
+
+        # The topic for OTA messages from the MLOps.
+        self.topic_ota_msg = "mlops/flserver_agent_" + str(self.edge_id) + "/ota"
+
+        # The topic for requesting device info from the client.
+        self.topic_response_device_info = "client/server/response_device_info/" + str(self.edge_id)
+
+        # The topic for requesting device info from MLOps.
+        self.topic_request_device_info_from_mlops = f"mlops/master_agent/request_device_info/{self.edge_id}"
+
+        # The topic for getting job status from the status center.
+        self.topic_requesst_job_status = f"anywhere/master_agent/request_job_status/{self.edge_id}"
+
+        # The topic for getting device status of job from the status center.
+        self.topic_requesst_device_status_in_job = f"anywhere/master_agent/request_device_status_in_job/{self.edge_id}"
+
+        # The topic for reporting online status
+        self.topic_active = "flserver_agent/active"
+
+        # The topic for last-will messages.
+        self.topic_last_will = "flserver_agent/last_will_msg"
+
+        # The topic for sending training request to edges (Request from the job runner when all edges are ready)
+        self.topic_send_training_request_to_edges = GeneralConstants.MSG_TOPIC_SEND_TRAINING_REQUEST_TO_EDGES
+
+        # Subscribe topics for starting train, stopping train and fetching client status.
+        self.subscribed_topics.clear()
+        self.add_subscribe_topic(self.topic_start_train)
+        self.add_subscribe_topic(self.topic_stop_train)
+        self.add_subscribe_topic(self.topic_report_status)
+        self.add_subscribe_topic(self.topic_ota_msg)
+        self.add_subscribe_topic(self.topic_response_device_info)
+        self.add_subscribe_topic(self.topic_request_device_info_from_mlops)
+        self.add_subscribe_topic(self.topic_requesst_job_status)
+        self.add_subscribe_topic(self.topic_requesst_device_status_in_job)
+
+    @abstractmethod
+    def add_protocol_handler(self):
+        # Add the message listeners for all topics, the following is an example.
+        # self.add_message_listener(self.topic_start_train, self.callback_start_train)
+        # Add the message listeners for all topics
+        self.add_message_listener(self.topic_start_train, self.callback_start_train)
+        self.add_message_listener(self.topic_stop_train, self.callback_stop_train)
+        self.add_message_listener(self.topic_ota_msg, FedMLBaseMasterProtocolManager.callback_server_ota_msg)
+        self.add_message_listener(self.topic_report_status, self.callback_report_current_status)
+        self.add_message_listener(self.topic_response_device_info, self.callback_response_device_info)
+        self.add_message_listener(self.topic_request_device_info_from_mlops,
+                                  self.callback_request_device_info_from_mlops)
+        self.add_message_listener(self.topic_requesst_job_status, self.callback_request_job_status)
+        self.add_message_listener(self.topic_requesst_device_status_in_job, self.callback_request_device_status_in_job)
+        self.add_message_listener(self.topic_send_training_request_to_edges,
+                                  self.callback_send_training_request_to_edges)
+
+    @abstractmethod
+    def _get_job_runner_manager(self):
+        return None
+
+    @abstractmethod
+    def _init_extra_items(self):
+        pass
+
+    def add_subscribe_topic(self, topic):
+        self.subscribed_topics.append(topic)
+
+    def on_agent_communication_connected(self, mqtt_client_object):
+        super().on_agent_communication_connected(mqtt_client_object)
+
+        if self.run_as_cloud_server:
+            # Start the FedML cloud server
+            message_bytes = self.args.runner_cmd.encode("ascii")
+            base64_bytes = base64.b64decode(message_bytes)
+            payload = base64_bytes.decode("ascii")
+            self.receive_message_json(self.topic_start_train, payload)
+
+    def callback_start_train(self, topic=None, payload=None):
+        # Fetch config from MLOps
+        # noinspection PyBroadException
+        try:
+            MLOpsConfigs.fetch_all_configs()
+        except Exception:
+            pass
+
+        # Parse the message when running in the cloud server mode.
+        if self.run_as_cloud_server:
+            message_bytes = payload.encode("ascii")
+            base64_bytes = base64.b64decode(message_bytes)
+            payload = base64_bytes.decode("ascii")
+
+        # Parse the parameters
+        # [NOTES] Example Request JSON:
+        # https://fedml-inc.larksuite.com/wiki/ScnIwUif9iupbjkYS0LuBrd6sod#WjbEdhYrvogmlGxKTOGu98C6sSb
+        request_json = json.loads(payload)
+        is_retain = request_json.get("is_retain", False)
+        if is_retain:
+            return
+        run_id = request_json["runId"]
+        run_id_str = str(run_id)
+
+        # Process the log when running in the edge server mode.
+        if self.run_as_edge_server_and_agent or self.enable_simulation_cloud_agent:
+            # Start log processor for current run
+            self.args.run_id = run_id
+            self.args.edge_id = self.edge_id
+            MLOpsRuntimeLog.get_instance(self.args).init_logs(log_level=logging.INFO)
+            MLOpsRuntimeLogDaemon.get_instance(self.args).start_log_processor(
+                run_id, self.edge_id, SchedulerConstants.get_log_source(request_json))
+        # Process the log when running in the cloud agent mode.
+        elif self.run_as_cloud_agent:
+            # Start log processor for current run
+            MLOpsRuntimeLogDaemon.get_instance(self.args).start_log_processor(
+                run_id, request_json.get("server_id", "0"), SchedulerConstants.get_log_source(request_json)
+            )
+        # Process the log when running in the cloud server mode.
+        elif self.run_as_cloud_server:
+            # Parse the parameters.
+            self.server_agent_id = request_json.get("cloud_agent_id", self.edge_id)
+            run_id = request_json["runId"]
+            run_id_str = str(run_id)
+
+            # Start log processor for current run.
+            self.args.run_id = run_id
+            MLOpsRuntimeLogDaemon.get_instance(self.args).start_log_processor(
+                run_id, self.edge_id, SchedulerConstants.get_log_source(request_json))
+
+        # Print the payload
+        logging.info("callback_start_train payload: {}".format(payload))
+        logging.info(
+            f"FedMLDebug - Receive: topic ({topic}), payload ({payload})"
+        )
+
+        # Save the parameters
+        self.start_request_json = payload
+        self.run_id = run_id
+        self.request_json = request_json
+        self.running_request_json[run_id_str] = request_json
+        edge_id_list = request_json.get("edgeids", list())
+        self.run_edge_ids[run_id_str] = edge_id_list
+
+        # report server running status to master agent
+        if not self.run_as_cloud_server:
+            self.mlops_metrics.report_server_id_status(
+                run_id, GeneralConstants.MSG_MLOPS_SERVER_STATUS_STARTING, edge_id=self.edge_id,
+                server_id=self.edge_id, server_agent_id=self.edge_id)
+
+        # Start server with multiprocessing mode
+        if self.run_as_edge_server_and_agent or self.enable_simulation_cloud_agent:
+            self.init_job_task(request_json)
+
+            self.args.run_id = run_id
+
+            self._get_job_runner_manager().start_job_runner(
+                run_id, request_json, args=self.args, edge_id=self.edge_id,
+                sender_message_queue=self.message_center.get_sender_message_queue(),
+                listener_message_queue=self.get_listener_message_queue(),
+                status_center_queue=self.get_status_queue()
+            )
+
+            process = self._get_job_runner_manager().get_runner_process(run_id)
+            if process is not None:
+                GeneralConstants.save_run_process(run_id, process.pid, is_master=True)
+        elif self.run_as_cloud_agent:
+            self.init_job_task(request_json)
+
+            self._get_job_runner_manager().start_job_runner(
+                run_id, request_json, args=self.args, edge_id=self.edge_id,
+                sender_message_queue=self.message_center.get_sender_message_queue(),
+                listener_message_queue=self.get_listener_message_queue(),
+                status_center_queue=self.get_status_queue(), should_start_cloud_server=True,
+                use_local_process_as_cloud_server=self.use_local_process_as_cloud_server
+            )
+
+            process = self._get_job_runner_manager().get_runner_process(run_id, is_cloud_server=True)
+            if process is not None:
+                GeneralConstants.save_run_process(run_id, process.pid, is_master=True)
+        elif self.run_as_cloud_server:
+            self.server_agent_id = request_json.get("cloud_agent_id", self.edge_id)
+            self.start_request_json = json.dumps(request_json)
+            run_id = request_json["runId"]
+            run_id_str = str(run_id)
+
+            self.init_job_task(request_json)
+
+            self.args.run_id = run_id
+
+            self._get_job_runner_manager().start_job_runner(
+                run_id, request_json, args=self.args, edge_id=self.edge_id,
+                sender_message_queue=self.message_center.get_sender_message_queue(),
+                listener_message_queue=self.get_listener_message_queue(),
+                status_center_queue=self.get_status_queue()
+            )
+
+    def callback_stop_train(self, topic, payload, use_payload=None):
+        # Print the payload
+        logging.info(
+            f"FedMLDebug - Receive: topic ({topic}), payload ({payload})"
+        )
+
+        # Parse the parameters.
+        request_json = json.loads(payload)
+        run_id = request_json.get("runId", None)
+        run_id = request_json.get("id", None) if run_id is None else run_id
+        run_id_str = str(run_id)
+
+        # Broadcast the job status to all edges
+        self.rebuild_status_center(self.get_status_queue())
+        self.status_reporter.report_job_status(run_id, GeneralConstants.MSG_MLOPS_SERVER_STATUS_KILLED)
+
+        # Cleanup the cached object
+        if self.running_request_json.get(run_id_str, None) is not None:
+            self.running_request_json.pop(run_id_str)
+
+        # Stop the job runner
+        self._get_job_runner_manager().stop_job_runner(run_id)
+
+    def callback_run_logs(self, topic, payload):
+        run_id = str(topic).split('/')[-1]
+        run_id_str = str(run_id)
+        self._get_job_runner_manager().callback_run_logs(run_id, topic, payload)
+
+    def callback_run_metrics(self, topic, payload):
+        run_id = str(topic).split('/')[-1]
+        run_id_str = str(run_id)
+        self._get_job_runner_manager().callback_run_metrics(run_id, topic, payload)
+
+    def callback_edge_status(self, topic, payload):
+        self.send_status_message(topic, payload)
+
+    def callback_report_current_status(self, topic, payload):
+        logging.info(
+            f"FedMLDebug - Receive: topic ({topic}), payload ({payload})"
+        )
+
+        if self.run_as_edge_server_and_agent:
+            self.send_agent_active_msg(self.edge_id)
+        elif self.run_as_cloud_agent:
+            self.send_agent_active_msg(self.edge_id)
+        elif self.run_as_cloud_server:
+            pass
+
+    @staticmethod
+    def callback_server_ota_msg(topic, payload):
+        logging.info(
+            f"FedMLDebug - Receive: topic ({topic}), payload ({payload})"
+        )
+
+        request_json = json.loads(payload)
+        cmd = request_json["cmd"]
+
+        if cmd == GeneralConstants.FEDML_OTA_CMD_UPGRADE:
+            # noinspection PyBroadException
+            try:
+                FedMLOtaUpgrade.process_ota_upgrade_msg()
+                # Process(target=FedMLServerRunner.process_ota_upgrade_msg).start()
+                raise Exception("After upgraded, restart runner...")
+            except Exception as e:
+                pass
+        elif cmd == GeneralConstants.FEDML_OTA_CMD_RESTART:
+            raise Exception("Restart runner...")
+
+    def callback_response_device_info(self, topic, payload):
+        # Parse payload
+        payload_json = json.loads(payload)
+        run_id = payload_json.get("run_id", 0)
+        context = payload_json.get("context", None)
+        master_device_id = payload_json.get("master_device_id", 0)
+        slave_device_id = payload_json.get("slave_device_id", 0)
+        slave_device_id_list = payload_json.get("slave_device_id_list", 0)
+        edge_id = payload_json.get("edge_id", 0)
+        device_info = payload_json.get("edge_info", 0)
+        device_info["master_device_id"] = master_device_id
+        device_info["slave_device_id"] = slave_device_id
+        device_info["slave_device_id_list"] = slave_device_id_list
+        run_id_str = str(run_id)
+
+        # Put device info into a multiprocessing queue so master runner checks if all edges are ready
+        if context is None:
+            self._get_job_runner_manager().put_run_edge_device_info_to_queue(run_id, device_info)
+
+            # if self.run_edge_device_info_global_queue is None:
+            #     self.run_edge_device_info_global_queue = Array('i', list())
+            #
+            # self.run_edge_device_info_global_queue[len(self.run_edge_device_info_global_queue)] =  \
+            #     {"timestamp": time.time(), "edge_id": edge_id, "device_info": device_info}
+
+            request_json = self.running_request_json.get(str(run_id), None)
+            if request_json is not None:
+                self.deploy_job_launcher.check_model_device_ready_and_deploy(
+                    request_json, run_id, master_device_id, slave_device_id, run_edge_ids=self.run_edge_ids)
+
+    def callback_request_device_info_from_mlops(self, topic, payload):
+        self.response_device_info_to_mlops(topic, payload)
+
+    def callback_request_job_status(self, topic, payload):
+        self.response_job_status(topic, payload)
+
+    def callback_request_device_status_in_job(self, topic, payload):
+        self.response_device_status_in_job(topic, payload)
+
+    def callback_send_training_request_to_edges(self, topic, payload):
+        payload_json = json.loads(payload)
+        self.send_training_request_to_edges(active_edge_info_dict=payload_json)
+
+    def generate_protocol_manager(self):
+        message_status_runner = self._generate_protocol_manager_instance(
+            self.args, agent_config=self.agent_config
+        )
+        message_status_runner.async_check_timeout = self.async_check_timeout
+        message_status_runner.enable_async_cluster = self.enable_async_cluster
+        message_status_runner.request_json = self.request_json
+        message_status_runner.run_edge_ids = self.run_edge_ids
+        message_status_runner.version = self.version
+        message_status_runner.message_center_name = self.message_center_name
+        message_status_runner.run_id = self.run_id
+        message_status_runner.edge_id = self.edge_id
+        message_status_runner.server_agent_id = self.server_agent_id
+        message_status_runner.current_device_id = self.current_device_id
+        message_status_runner.unique_device_id = self.unique_device_id
+        message_status_runner.subscribed_topics = self.subscribed_topics
+        message_status_runner.run_as_cloud_agent = self.run_as_cloud_agent
+        message_status_runner.run_as_cloud_server = self.run_as_cloud_server
+        message_status_runner.run_as_edge_server_and_agent = self.run_as_edge_server_and_agent
+        message_status_runner.run_as_cloud_server_and_agent = self.run_as_cloud_server_and_agent
+        message_status_runner.enable_simulation_cloud_agent = self.enable_simulation_cloud_agent
+        message_status_runner.use_local_process_as_cloud_server = self.use_local_process_as_cloud_server
+        message_status_runner.running_request_json = self.running_request_json
+        message_status_runner.start_request_json = self.start_request_json
+        message_status_runner.user_name = self.user_name
+        message_status_runner.status_queue = self.get_status_queue()
+
+        return message_status_runner
+
+    def response_job_status(self, topic, payload):
+        payload_json = json.loads(payload)
+        if self.mlops_metrics is not None:
+            run_id = payload_json.get("run_id", None)
+            edge_id = payload_json.get("edge_id", None)
+            if run_id is None or edge_id is None:
+                return
+            response_topic = f"master_agent/somewhere/response_job_status/{edge_id}"
+            response_payload = {
+                "run_id": run_id,
+                "master_agent": self.edge_id,
+                "edge_id": edge_id,
+                "job_status": ComputeCacheManager.get_instance().get_status_cache().get_job_status(),
+                "fedml_version": fedml.__version__
+            }
+            self.mlops_metrics.report_json_message(response_topic, json.dumps(response_payload))
+
+    def response_device_status_in_job(self, topic, payload):
+        payload_json = json.loads(payload)
+        if self.mlops_metrics is not None:
+            run_id = payload_json.get("run_id", None)
+            edge_id = payload_json.get("edge_id", None)
+            if run_id is None or edge_id is None:
+                return
+            response_topic = f"master_agent/somewhere/response_device_status_in_job/{edge_id}"
+            response_payload = {
+                "run_id": run_id,
+                "master_agent": self.edge_id,
+                "edge_id": edge_id,
+                "device_status_in_job":
+                    ComputeCacheManager.get_instance().get_status_cache().get_device_status_in_job(run_id, edge_id),
+                "fedml_version": fedml.__version__
+            }
+            self.mlops_metrics.report_json_message(response_topic, json.dumps(response_payload))
+
+    def response_device_info_to_mlops(self, topic, payload):
+        response_topic = f"master_agent/mlops/response_device_info"
+        payload_json = json.loads(payload)
+        need_gpu_info = payload_json.get("need_gpu_info", False)
+        if self.mlops_metrics is not None:
+            if not need_gpu_info:
+                response_payload = {
+                    "run_id": self.run_id,
+                    "master_agent_device_id": self.edge_id,
+                    "fedml_version": fedml.__version__
+                }
+            else:
+                total_mem, free_mem, total_disk_size, free_disk_size, cup_utilization, cpu_cores, \
+                    gpu_cores_total, gpu_cores_available, sent_bytes, recv_bytes, gpu_available_ids = \
+                    sys_utils.get_sys_realtime_stats()
+                gpu_available_ids = JobRunnerUtils.get_instance().get_available_gpu_id_list(self.edge_id)
+                gpu_available_ids = JobRunnerUtils.trim_unavailable_gpu_ids(gpu_available_ids)
+                gpu_cores_available = len(gpu_available_ids)
+                response_payload = {
+                    "run_id": self.run_id,
+                    "master_agent_device_id": self.edge_id,
+                    "memoryTotal": round(total_mem * MLOpsUtils.BYTES_TO_GB, 2),
+                    "memoryAvailable": round(free_mem * MLOpsUtils.BYTES_TO_GB, 2),
+                    "diskSpaceTotal": round(total_disk_size * MLOpsUtils.BYTES_TO_GB, 2),
+                    "diskSpaceAvailable": round(free_disk_size * MLOpsUtils.BYTES_TO_GB, 2),
+                    "cpuUtilization": round(cup_utilization, 2),
+                    "cpuCores": cpu_cores,
+                    "gpuCoresTotal": gpu_cores_total,
+                    "gpuCoresAvailable": gpu_cores_available,
+                    "networkTraffic": sent_bytes + recv_bytes,
+                    "timestamp": int(MLOpsUtils.get_ntp_time()),
+                    "fedml_version": fedml.__version__
+                }
+            self.mlops_metrics.report_json_message(response_topic, json.dumps(response_payload))
+
+    def init_job_task(self, request_json):
+        run_id = request_json["runId"]
+        run_config = request_json["run_config"]
+        edge_ids = request_json["edgeids"]
+        run_params = run_config.get("parameters", {})
+        job_yaml = run_params.get("job_yaml", None)
+        server_id = request_json["server_id"]
+        if self.run_as_cloud_agent:
+            server_id = self.edge_id
+
+        self.setup_listeners_for_edge_status(run_id, edge_ids, server_id)
+        self.setup_listener_for_run_metrics(run_id)
+        self.setup_listener_for_run_logs(run_id)
+
+    @debug
+    def send_training_request_to_edges(self, active_edge_info_dict=None):
+        run_id = self.request_json["runId"]
+        edge_id_list = self.request_json["edgeids"]
+        run_config = self.request_json.get("run_config", {})
+        run_params = run_config.get("parameters", {})
+        job_yaml = run_params.get("job_yaml", {})
+        job_yaml_default_none = run_params.get("job_yaml", None)
+        computing = job_yaml.get("computing", {})
+        request_num_gpus = computing.get("minimum_num_gpus", None)
+        job_gpu_id_list = self.request_json.get("job_gpu_id_list", None)
+        assigned_gpu_num_dict = dict()
+        assigned_gpu_ids_dict = dict()
+        master_node_addr = ""
+        master_node_port = 0
+
+        logging.info("Send training request to Edge ids: " + str(edge_id_list))
+
+        should_match_gpu = False
+        if job_yaml_default_none is not None and request_num_gpus is not None and \
+                int(request_num_gpus) > 0 and active_edge_info_dict is not None:
+            should_match_gpu = True
+            SchedulerMatcher.parse_and_print_gpu_info_for_all_edges(active_edge_info_dict, show_gpu_list=True)
+
+            # Match and assign gpus to each device
+            assigned_gpu_num_dict, assigned_gpu_ids_dict = SchedulerMatcher.match_and_assign_gpu_resources_to_devices(
+                request_num_gpus, edge_id_list, active_edge_info_dict, job_gpu_id_list=job_gpu_id_list)
+            if assigned_gpu_num_dict is None or assigned_gpu_ids_dict is None:
+                # If no resources available, send failed message to MLOps and send exception message to all edges.
+                gpu_count, gpu_available_count = SchedulerMatcher.parse_and_print_gpu_info_for_all_edges(
+                    active_edge_info_dict, should_print=True)
+                err_info = f"No resources available." \
+                           f"Total available GPU count {gpu_available_count} is less than " \
+                           f"request GPU count {request_num_gpus}"
+                logging.error(err_info)
+
+                # Bug fix: This mqtt message needs to be sent so platform can clean up the failed run and change the
+                # status from running to failed.
+                self.mlops_metrics.report_server_training_status(
+                    run_id, GeneralConstants.MSG_MLOPS_SERVER_STATUS_FAILED, edge_id=self.edge_id
+                )
+
+                self.status_reporter.report_server_id_status(
+                    run_id, GeneralConstants.MSG_MLOPS_SERVER_STATUS_FAILED, edge_id=self.edge_id,
+                    server_id=self.edge_id, server_agent_id=self.server_agent_id)
+                self.report_exception_status(run_id)
+
+                serving_args = job_yaml.get("serving_args", {})
+                endpoint_id = serving_args.get("endpoint_id", None)
+                if endpoint_id is not None:
+                    fedml.mlops.log_endpoint_status(
+                        endpoint_id, device_client_constants.ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED)
+                    fedml.mlops.log_run_log_lines(
+                        endpoint_id, 0, [err_info],
+                        log_source=device_client_constants.ClientConstants.FEDML_LOG_SOURCE_TYPE_MODEL_END_POINT
+                    )
+                return
+
+            # Generate master node addr and port
+            master_node_addr, master_node_port = SchedulerMatcher.get_master_node_info(edge_id_list,
+                                                                                       active_edge_info_dict)
+
+            # Generate new edge id list after matched
+            edge_id_list = SchedulerMatcher.generate_new_edge_list_for_gpu_matching(assigned_gpu_num_dict)
+            if len(edge_id_list) <= 0:
+                gpu_count, gpu_available_count = SchedulerMatcher.parse_and_print_gpu_info_for_all_edges(
+                    active_edge_info_dict, should_print=True)
+                logging.error(f"Request parameter for GPU num is invalid."
+                              f"Total available GPU count {gpu_available_count}."
+                              f"Request GPU num {request_num_gpus}")
+                self.status_reporter.report_server_id_status(
+                    run_id, GeneralConstants.MSG_MLOPS_SERVER_STATUS_FAILED, edge_id=self.edge_id,
+                    server_id=self.edge_id, server_agent_id=self.server_agent_id)
+                self.report_exception_status(run_id)
+                return
+
+        if should_match_gpu:
+            # Report gpu num and related infos to MLOps.
+            serving_args = job_yaml.get("serving_args", {})
+            endpoint_id = serving_args.get("endpoint_id", None)
+            if endpoint_id is not None:
+                endpoint_info = list()
+                for edge_id_item, gpu_num in assigned_gpu_num_dict.items():
+                    edge_info = active_edge_info_dict.get(str(edge_id_item), {})
+                    endpoint_info.append({
+                        "machine_id": edge_id_item, "endpoint_gpu_count": gpu_num,
+                        "master_deploy_id": edge_info.get("master_device_id", 0),
+                        "slave_deploy_id": edge_info.get("slave_device_id", 0)})
+                topic_name = f"compute/mlops/endpoint"
+                endpoint_info_json = {"endpoint_id": endpoint_id, "endpoint_info": endpoint_info}
+                print(f"endpoint_info_json {endpoint_info_json}")
+                self.message_center.send_message(topic_name, json.dumps(endpoint_info_json))
+
+        client_rank = 1
+        for edge_id in edge_id_list:
+            topic_start_train = "flserver_agent/" + str(edge_id) + "/start_train"
+            logging.info("start_train: send topic " + topic_start_train + " to client...")
+            request_json = self.request_json
+            request_json["client_rank"] = client_rank
+            client_rank += 1
+
+            if active_edge_info_dict is not None:
+                edge_info = active_edge_info_dict.get(str(edge_id), {})
+                model_master_device_id = edge_info.get("master_device_id", None)
+                model_slave_device_id = edge_info.get("slave_device_id", None)
+                model_slave_device_id_list = edge_info.get("slave_device_id_list", None)
+
+                if should_match_gpu:
+                    request_json["scheduler_match_info"] = SchedulerMatcher.generate_match_info_for_scheduler(
+                        edge_id, edge_id_list, master_node_addr, master_node_port,
+                        assigned_gpu_num_dict, assigned_gpu_ids_dict,
+                        model_master_device_id=model_master_device_id,
+                        model_slave_device_id=model_slave_device_id,
+                        model_slave_device_id_list=model_slave_device_id_list
+                    )
+
+            self.message_center.send_message(topic_start_train, json.dumps(request_json))
+
+    def setup_listeners_for_edge_status(self, run_id, edge_ids, server_id):
+        edge_status_topic = "fl_client/flclient_agent_" + str(server_id) + "/status"
+        payload = {"run_id": run_id, "init_all_edge_id_list": edge_ids, "init_server_id": server_id}
+        self.callback_edge_status(edge_status_topic, json.dumps(payload))
+
+        for edge_id in edge_ids:
+            edge_status_topic = "fl_client/flclient_agent_" + str(edge_id) + "/status"
+            self.add_message_listener(edge_status_topic, self.callback_edge_status)
+            self.subscribe_msg(edge_status_topic)
+
+    def remove_listeners_for_edge_status(self, edge_ids=None):
+        if edge_ids is None:
+            edge_ids = self.request_json["edgeids"]
+
+        for edge_id in edge_ids:
+            edge_status_topic = "fl_client/flclient_agent_" + str(edge_id) + "/status"
+            self.unsubscribe_msg(edge_status_topic)
+
+    def setup_listener_for_run_metrics(self, run_id):
+        metric_topic = f"fedml_slave/fedml_master/metrics/{run_id}"
+        self.add_message_listener(metric_topic, self.callback_run_metrics)
+        self.subscribe_msg(metric_topic)
+
+    def remove_listener_for_run_metrics(self, run_id):
+        metric_topic = f"fedml_slave/fedml_master/metrics/{run_id}"
+        self.unsubscribe_msg(metric_topic)
+
+    def setup_listener_for_run_logs(self, run_id):
+        logs_topic = f"fedml_slave/fedml_master/logs/{run_id}"
+        self.add_message_listener(logs_topic, self.callback_run_logs)
+        self.subscribe_msg(logs_topic)
+
+    def remove_listener_for_run_logs(self, run_id):
+        logs_topic = f"fedml_slave/fedml_master/logs/{run_id}"
+        self.unsubscribe_msg(logs_topic)
+
+    def send_training_stop_request_to_edges(
+            self, edge_id_list, payload=None, run_id=0):
+        if payload is None:
+            payload_obj = {"runId": run_id, "edgeids": edge_id_list}
+        else:
+            payload_obj = json.loads(payload)
+
+        for edge_id in edge_id_list:
+            topic_stop_train = "flserver_agent/" + str(edge_id) + "/stop_train"
+            logging.info("stop_train: send topic " + topic_stop_train)
+            self.message_center.send_message(topic_stop_train, json.dumps(payload_obj))
+
+    def send_training_stop_request_to_specific_edge(self, edge_id, payload):
+        topic_stop_train = "flserver_agent/" + str(edge_id) + "/stop_train"
+        logging.info("stop_train: send topic " + topic_stop_train)
+        self.message_center.send_message(topic_stop_train, payload)
+
+    def report_exception_status(self, run_id):
+        self.status_reporter.report_job_status(run_id, GeneralConstants.MSG_MLOPS_SERVER_STATUS_EXCEPTION)
+
+    @staticmethod
+    def get_start_train_topic_with_edge_id(edge_id):
+        return "mlops/flserver_agent_" + str(edge_id) + "/start_train"
+
+    @abstractmethod
+    def _generate_protocol_manager_instance(self, args, agent_config=None):
+        return None
diff --git a/python/fedml/computing/scheduler/master/cloud_server_manager.py b/python/fedml/computing/scheduler/master/cloud_server_manager.py
new file mode 100755
index 0000000000..ed39707034
--- /dev/null
+++ b/python/fedml/computing/scheduler/master/cloud_server_manager.py
@@ -0,0 +1,164 @@
+import base64
+import json
+import logging
+import os
+import traceback
+from fedml.computing.scheduler.comm_utils.sys_utils import get_python_program
+
+
+class FedMLCloudServerManager:
+    FEDML_CLOUD_SERVER_PREFIX = "fedml-server-run-"
+    LOCAL_RUNNER_INFO_DIR_NAME = 'runner_infos'
+    STATUS_IDLE = "IDLE"
+
+    def __init__(self, args, run_id=None, edge_id=None, request_json=None, agent_config=None, version=None):
+        self.server_docker_image = None
+        self.args = args
+        self.run_id = run_id
+        self.edge_id = edge_id
+        self.request_json = request_json
+        self.agent_config = agent_config
+        self.version = version
+        image_version = self.version
+        if image_version == "local":
+            image_version = "dev"
+        self.server_docker_base_image = "/fedml-device-image:" + image_version
+        self.cloud_server_name = None
+
+    @staticmethod
+    def start_local_cloud_server(user, version, cloud_device_id, runner_cmd_encoded):
+        print(f"start cloud server, device id {cloud_device_id}, runner cmd {runner_cmd_encoded}")
+        pip_source_dir = os.path.dirname(__file__)
+        login_cmd = os.path.join(pip_source_dir, "server_login.py")
+        run_cmd = f"{get_python_program()} -W ignore {login_cmd} -t login -r cloud_server -u {str(user)} " \
+                  f"-v {version} -id {cloud_device_id} -rc {runner_cmd_encoded}"
+        os.system(run_cmd)
+
+    def start_cloud_server_process_entry(self):
+        try:
+            self.start_cloud_server_process()
+        except Exception as e:
+            logging.info(f"Failed to start the cloud server. {traceback.format_exc()}")
+
+    def start_cloud_server_process(self):
+        run_config = self.request_json["run_config"]
+        packages_config = run_config["packages_config"]
+        self.start_cloud_server(packages_config)
+
+    def start_cloud_server(self, packages_config):
+        server_id = self.request_json["server_id"]
+        self.cloud_server_name = f"{FedMLCloudServerManager.FEDML_CLOUD_SERVER_PREFIX}{self.run_id}-{server_id}"
+        self.server_docker_image = (
+                self.agent_config["docker_config"]["registry_server"]
+                + self.agent_config["docker_config"]["registry_dir"]
+                + self.server_docker_base_image
+        )
+
+        logging.info("docker image {}".format(self.server_docker_image))
+        # logging.info("file_sys_driver {}".format(self.agent_config["docker_config"]["file_sys_driver"]))
+
+        registry_secret_cmd = (
+                "kubectl create namespace fedml-devops-aggregator-"
+                + self.version
+                + ";kubectl -n fedml-devops-aggregator-"
+                + self.version
+                + " delete secret secret-"
+                + self.cloud_server_name
+                + " ;kubectl create secret docker-registry secret-"
+                + self.cloud_server_name
+                + " --docker-server="
+                + self.agent_config["docker_config"]["registry_server"]
+                + " --docker-username="
+                + self.agent_config["docker_config"]["user_name"]
+                + " --docker-password=$(aws ecr-public get-login-password --region "
+                + self.agent_config["docker_config"]["public_cloud_region"]
+                + ")"
+                + " --docker-email=fedml@fedml.ai -n fedml-devops-aggregator-"
+                + self.version
+        )
+        logging.info("Create secret cmd: " + registry_secret_cmd)
+        os.system(registry_secret_cmd)
+
+        message_bytes = json.dumps(self.request_json).encode("ascii")
+        base64_bytes = base64.b64encode(message_bytes)
+        runner_cmd_encoded = base64_bytes.decode("ascii")
+        logging.info("runner_cmd_encoded: {}".format(runner_cmd_encoded))
+        # logging.info("runner_cmd_decoded: {}".format(base64.b64decode(runner_cmd_encoded).decode()))
+        cur_dir = os.path.dirname(__file__)
+        run_deployment_cmd = (
+                "export FEDML_AGGREGATOR_NAME="
+                + self.cloud_server_name
+                + ";export FEDML_AGGREGATOR_SVC="
+                + self.cloud_server_name
+                + ";export FEDML_AGGREGATOR_VERSION="
+                + self.version
+                + ';export FEDML_AGGREGATOR_IMAGE_PATH="'
+                + self.server_docker_image
+                + '"'
+                + ";export FEDML_CONF_ID="
+                + self.cloud_server_name
+                + ";export FEDML_DATA_PV_ID="
+                + self.cloud_server_name
+                + ";export FEDML_DATA_PVC_ID="
+                + self.cloud_server_name
+                + ";export FEDML_REGISTRY_SECRET_SUFFIX="
+                + self.cloud_server_name
+                + ";export FEDML_ACCOUNT_ID=0"
+                + ";export FEDML_SERVER_DEVICE_ID="
+                + self.request_json.get("cloudServerDeviceId", "0")
+                + ";export FEDML_VERSION="
+                + self.version
+                + ";export FEDML_PACKAGE_NAME="
+                + packages_config.get("server", "")
+                + ";export FEDML_PACKAGE_URL="
+                + packages_config.get("serverUrl", "")
+                + ";export FEDML_RUNNER_CMD="
+                + runner_cmd_encoded
+                + ";envsubst < "
+                + os.path.join(cur_dir, "templates", "fedml-server-deployment.yaml")
+                + " | kubectl apply -f - "
+        )
+        logging.info("start run with k8s: " + run_deployment_cmd)
+        os.system(run_deployment_cmd)
+
+    def stop_cloud_server(self):
+        self.cloud_server_name = FedMLCloudServerManager.FEDML_CLOUD_SERVER_PREFIX + str(self.run_id) \
+                                 + "-" + str(self.edge_id)
+        self.server_docker_image = (
+                self.agent_config["docker_config"]["registry_server"]
+                + self.agent_config["docker_config"]["registry_dir"]
+                + self.server_docker_base_image
+        )
+        delete_deployment_cmd = (
+                "export FEDML_AGGREGATOR_NAME="
+                + self.cloud_server_name
+                + ";export FEDML_AGGREGATOR_SVC="
+                + self.cloud_server_name
+                + ";export FEDML_AGGREGATOR_VERSION="
+                + self.version
+                + ';export FEDML_AGGREGATOR_IMAGE_PATH="'
+                + self.server_docker_image
+                + '"'
+                + ";export FEDML_CONF_ID="
+                + self.cloud_server_name
+                + ";export FEDML_DATA_PV_ID="
+                + self.cloud_server_name
+                + ";export FEDML_DATA_PVC_ID="
+                + self.cloud_server_name
+                + ";export FEDML_REGISTRY_SECRET_SUFFIX="
+                + self.cloud_server_name
+                + ";kubectl -n fedml-devops-aggregator-"
+                + self.version
+                + " delete deployment "
+                + self.cloud_server_name
+                + ";kubectl -n fedml-devops-aggregator-"
+                + self.version
+                + " delete svc "
+                + self.cloud_server_name
+                + ";kubectl -n fedml-devops-aggregator-"
+                + self.version
+                + " delete secret secret-"
+                + self.cloud_server_name
+        )
+        logging.info("stop run with k8s: " + delete_deployment_cmd)
+        os.system(delete_deployment_cmd)
diff --git a/python/fedml/computing/scheduler/master/deploy_job_launcher.py b/python/fedml/computing/scheduler/master/deploy_job_launcher.py
new file mode 100755
index 0000000000..e4af2a20be
--- /dev/null
+++ b/python/fedml/computing/scheduler/master/deploy_job_launcher.py
@@ -0,0 +1,90 @@
+import json
+from fedml.computing.scheduler.comm_utils import sys_utils
+from fedml.computing.scheduler.model_scheduler import device_client_constants
+from fedml.computing.scheduler.model_scheduler.device_model_cards import FedMLModelCards
+from fedml.computing.scheduler.scheduler_entry.constants import Constants
+
+
+class FedMLDeployJobLauncher:
+    LOCAL_RUNNER_INFO_DIR_NAME = 'runner_infos'
+    STATUS_IDLE = "IDLE"
+
+    def __init__(self, edge_id=None):
+        self.edge_id = edge_id
+        self.run_model_device_ids = dict()
+
+    @staticmethod
+    def deploy_model(serving_devices, request_json, run_id):
+        run_config = request_json["run_config"]
+        run_params = run_config.get("parameters", {})
+        job_yaml = run_params.get("job_yaml", {})
+        job_type = job_yaml.get("job_type", None)
+        job_type = job_yaml.get("task_type", Constants.JOB_TASK_TYPE_TRAIN) if job_type is None else job_type
+        if job_type == Constants.JOB_TASK_TYPE_DEPLOY or job_type == Constants.JOB_TASK_TYPE_SERVE:
+            # computing = job_yaml.get("computing", {})
+            # num_gpus = computing.get("minimum_num_gpus", 1)
+            serving_args = run_params.get("serving_args", {})
+            model_id = serving_args.get("model_id", None)
+            model_name = serving_args.get("model_name", None)
+            model_version = serving_args.get("model_version", None)
+            # model_storage_url = serving_args.get("model_storage_url", None)
+            endpoint_name = serving_args.get("endpoint_name", None)
+            endpoint_id = serving_args.get("endpoint_id", None)
+            random = serving_args.get("random", "")
+            random_out = sys_utils.random2(random, "FEDML@9999GREAT")
+            random_list = random_out.split("FEDML@")
+            device_type = device_client_constants.ClientConstants.login_role_list[
+                device_client_constants.ClientConstants.LOGIN_MODE_FEDML_CLOUD_INDEX]
+            FedMLModelCards.get_instance().deploy_model(
+                model_name, device_type, json.dumps(serving_devices),
+                "", random_list[1], None,
+                in_model_id=model_id, in_model_version=model_version,
+                endpoint_name=endpoint_name, endpoint_id=endpoint_id, run_id=run_id)
+
+    def check_model_device_ready_and_deploy(self, request_json, run_id, master_device_id,
+                                            slave_device_id, run_edge_ids=None):
+        run_config = request_json["run_config"]
+        run_params = run_config.get("parameters", {})
+        job_yaml = run_params.get("job_yaml", {})
+        job_type = job_yaml.get("job_type", None)
+        job_type = job_yaml.get("task_type", Constants.JOB_TASK_TYPE_TRAIN) if job_type is None else job_type
+        if job_type != Constants.JOB_TASK_TYPE_DEPLOY and job_type != Constants.JOB_TASK_TYPE_SERVE:
+            return
+
+        # Init model device ids for each run
+        run_id_str = str(run_id)
+        if self.run_model_device_ids.get(run_id_str, None) is None:
+            self.run_model_device_ids[run_id_str] = list()
+
+        # Append master device and slave devices to the model devices map
+        self.run_model_device_ids[run_id_str].append({"master_device_id": master_device_id,
+                                                      "slave_device_id": slave_device_id})
+        model_device_ids = self.run_model_device_ids.get(run_id_str, None)
+        if model_device_ids is None:
+            return
+        if run_edge_ids is None:
+            return
+
+        # Check if all model devices are ready
+        if len(model_device_ids) != len(run_edge_ids.get(run_id_str, list())):
+            return
+
+        # Generate model master ids and model slave device ids
+        device_master_ids = list()
+        device_slave_ids = list()
+        for device_ids in model_device_ids:
+            model_master_id = device_ids.get("master_device_id")
+            model_slave_id = device_ids.get("slave_device_id")
+            device_master_ids.append(model_master_id)
+            device_slave_ids.append(model_slave_id)
+
+        if len(device_master_ids) <= 0:
+            return
+
+        # Generate serving devices for deploying
+        serving_devices = list()
+        serving_devices.append(device_master_ids[0])
+        serving_devices.extend(device_slave_ids)
+
+        # Start to deploy the model
+        FedMLDeployJobLauncher.deploy_model(serving_devices, request_json, run_id=run_id)
diff --git a/python/fedml/computing/scheduler/master/launch_job_runner.py b/python/fedml/computing/scheduler/master/launch_job_runner.py
new file mode 100755
index 0000000000..c28458fc0f
--- /dev/null
+++ b/python/fedml/computing/scheduler/master/launch_job_runner.py
@@ -0,0 +1,44 @@
+
+from ..master.server_constants import ServerConstants
+from ..scheduler_core.general_constants import GeneralConstants
+from .base_master_job_runner import FedMLBaseMasterJobRunner
+
+
+class FedMLLaunchMasterJobRunner(FedMLBaseMasterJobRunner):
+
+    def __init__(self, args, run_id=0, request_json=None, agent_config=None, edge_id=0,
+                 cuda_visible_gpu_ids_str=None):
+        FedMLBaseMasterJobRunner.__init__(
+            self, args, edge_id=edge_id, request_json=request_json, agent_config=agent_config, run_id=run_id,
+            cuda_visible_gpu_ids_str=cuda_visible_gpu_ids_str, agent_data_dir=ServerConstants.get_data_dir(),
+            agent_package_download_dir=ServerConstants.get_package_download_dir(),
+            agent_package_unzip_dir=GeneralConstants.get_package_unzip_dir(ServerConstants.get_package_download_dir()),
+            agent_log_file_dir=ServerConstants.get_log_file_dir()
+        )
+
+    # Override
+    def _generate_job_runner_instance(self, args, run_id=None, request_json=None, agent_config=None, edge_id=None,):
+        return FedMLLaunchMasterJobRunner(
+            args, run_id=run_id, request_json=request_json, agent_config=self.agent_config, edge_id=edge_id
+        )
+
+    # Override
+    def _generate_extend_queue_list(self):
+        return None
+
+    # Override
+    def get_download_package_info(self, packages_config=None):
+        return super().get_download_package_info(packages_config)
+
+    # Override
+    def run_impl(
+            self, edge_id_status_queue, edge_device_info_queue, run_metrics_queue,
+            run_event_queue, run_artifacts_queue, run_logs_queue, edge_device_info_global_queue,
+            run_extend_queue_list=None, sender_message_queue=None, listener_message_queue=None,
+            status_center_queue=None
+    ):
+        super().run_impl(
+            edge_id_status_queue, edge_device_info_queue, run_metrics_queue,
+            run_event_queue, run_artifacts_queue, run_logs_queue, edge_device_info_global_queue,
+            run_extend_queue_list=run_extend_queue_list, sender_message_queue=sender_message_queue,
+            listener_message_queue=listener_message_queue, status_center_queue=status_center_queue)
diff --git a/python/fedml/computing/scheduler/master/launch_job_runner_manager.py b/python/fedml/computing/scheduler/master/launch_job_runner_manager.py
new file mode 100755
index 0000000000..9e94b089a3
--- /dev/null
+++ b/python/fedml/computing/scheduler/master/launch_job_runner_manager.py
@@ -0,0 +1,20 @@
+
+from fedml.core.common.singleton import Singleton
+from .launch_job_runner import FedMLLaunchMasterJobRunner
+from .base_master_job_runner_manager import FedMLBaseMasterJobRunnerManager
+
+
+class FedMLLaunchJobRunnerManager(FedMLBaseMasterJobRunnerManager, Singleton):
+    def __init__(self):
+        FedMLBaseMasterJobRunnerManager.__init__(self)
+
+    @staticmethod
+    def get_instance():
+        return FedMLLaunchJobRunnerManager()
+
+    # Override
+    def _generate_job_runner_instance(
+            self, args, run_id=None, request_json=None, agent_config=None, edge_id=None
+    ):
+        return FedMLLaunchMasterJobRunner(
+            args, run_id=run_id, request_json=request_json, agent_config=agent_config, edge_id=edge_id)
diff --git a/python/fedml/computing/scheduler/master/master_agent.py b/python/fedml/computing/scheduler/master/master_agent.py
new file mode 100755
index 0000000000..9bbf6eb982
--- /dev/null
+++ b/python/fedml/computing/scheduler/master/master_agent.py
@@ -0,0 +1,28 @@
+
+from ..master.server_constants import ServerConstants
+from .server_data_interface import FedMLServerDataInterface
+from .master_protocol_manager import FedMLLaunchMasterProtocolManager
+from .base_master_agent import FedMLBaseMasterAgent
+
+
+class FedMLLaunchMasterAgent(FedMLBaseMasterAgent):
+
+    def __init__(self):
+        FedMLBaseMasterAgent.__init__(self)
+
+    # Override
+    def _get_log_file_dir(self):
+        return ServerConstants.get_log_file_dir()
+
+    # Override
+    def _save_agent_info(self, unique_device_id, edge_id):
+        ServerConstants.save_runner_infos(unique_device_id, edge_id)
+
+    # Override
+    def _init_database(self):
+        FedMLServerDataInterface.get_instance().create_job_table()
+
+    # Override
+    def _generate_protocol_manager_instance(self, args, agent_config=None):
+        return FedMLLaunchMasterProtocolManager(args, agent_config=agent_config)
+
diff --git a/python/fedml/computing/scheduler/master/master_protocol_manager.py b/python/fedml/computing/scheduler/master/master_protocol_manager.py
new file mode 100755
index 0000000000..5eef5914e7
--- /dev/null
+++ b/python/fedml/computing/scheduler/master/master_protocol_manager.py
@@ -0,0 +1,36 @@
+from abc import ABC
+
+from .base_master_protocol_manager import FedMLBaseMasterProtocolManager
+from .launch_job_runner_manager import FedMLLaunchJobRunnerManager
+
+
+class FedMLLaunchMasterProtocolManager(FedMLBaseMasterProtocolManager, ABC):
+    def __init__(self, args, agent_config=None):
+        FedMLBaseMasterProtocolManager.__init__(self, args, agent_config=agent_config)
+
+    # Override
+    def generate_topics(self):
+        super().generate_topics()
+
+    # Override
+    def add_protocol_handler(self):
+        super().add_protocol_handler()
+
+    # Override
+    def _generate_protocol_manager_instance(self, args, agent_config=None):
+        return FedMLLaunchMasterProtocolManager(args, agent_config=agent_config)
+
+    # Override
+    def _get_job_runner_manager(self):
+        return FedMLLaunchJobRunnerManager.get_instance()
+
+    # Override
+    def _init_extra_items(self):
+        # Start the monitor process
+        self.mlops_metrics.stop_device_realtime_perf()
+        self.mlops_metrics.report_device_realtime_perf(
+            self.args, self.args.agent_config["mqtt_config"], is_client=False)
+
+    # Override
+    def print_connected_info(self):
+        super().print_connected_info()
diff --git a/python/fedml/computing/scheduler/master/server_login.py b/python/fedml/computing/scheduler/master/server_login.py
index dee2c83236..3d8d1f6fc9 100755
--- a/python/fedml/computing/scheduler/master/server_login.py
+++ b/python/fedml/computing/scheduler/master/server_login.py
@@ -1,407 +1,11 @@
 import argparse
-import logging
 import os
-import platform
-import time
-import traceback
-
-import click
 import fedml
-from fedml.computing.scheduler.comm_utils import sys_utils
-from fedml.computing.scheduler.comm_utils.constants import SchedulerConstants
-from fedml.computing.scheduler.master.server_runner import FedMLServerRunner
-from fedml.computing.scheduler.master.server_constants import ServerConstants
-from fedml.core.mlops.mlops_runtime_log import MLOpsRuntimeLog
-from fedml.core.mlops.mlops_runtime_log_daemon import MLOpsRuntimeLogDaemon
-
-
-def __login_as_edge_server_and_agent(args, userid, version, api_key="", use_extra_device_id_suffix=None, role=None):
-    setattr(args, "account_id", userid)
-    setattr(args, "current_running_dir", ServerConstants.get_fedml_home_dir())
-
-    sys_name = platform.system()
-    if sys_name == "Darwin":
-        sys_name = "MacOS"
-    if hasattr(args, "os_name") and args.os_name is not None and args.os_name != "":
-        pass
-    else:
-        setattr(args, "os_name", sys_name)
-    setattr(args, "version", version)
-    setattr(args, "log_file_dir", ServerConstants.get_log_file_dir())
-    is_from_docker = False
-    if hasattr(args, "device_id") and args.device_id is not None and args.device_id != "0":
-        setattr(args, "current_device_id", args.device_id)
-        is_from_docker = True
-    else:
-        setattr(args, "current_device_id", FedMLServerRunner.get_device_id())
-    setattr(args, "config_version", version)
-    setattr(args, "cloud_region", "")
-
-    # Create server runner for communication with the FedML client.
-    runner = FedMLServerRunner(args)
-    runner.run_as_edge_server_and_agent = True
-
-    # Fetch configs from the MLOps config server.
-    service_config = dict()
-    config_try_count = 0
-    edge_id = 0
-    while config_try_count < 5:
-        try:
-            mqtt_config, s3_config, mlops_config, docker_config = runner.fetch_configs()
-            service_config["mqtt_config"] = mqtt_config
-            service_config["s3_config"] = s3_config
-            service_config["ml_ops_config"] = mlops_config
-            service_config["docker_config"] = docker_config
-            runner.agent_config = service_config
-            log_server_url = mlops_config.get("LOG_SERVER_URL", None)
-            if log_server_url is not None:
-                setattr(args, "log_server_url", log_server_url)
-                setattr(runner.args, "log_server_url", log_server_url)
-            break
-        except Exception as e:
-            click.echo("{}\n{}".format(SchedulerConstants.ERR_MSG_BINDING_EXCEPTION_1, traceback.format_exc()))
-            click.echo(SchedulerConstants.ERR_MSG_BINDING_EXIT_RETRYING)
-            config_try_count += 1
-            time.sleep(3)
-            continue
-
-    if config_try_count >= 5:
-        click.echo("")
-        click.echo("[5] Oops, you failed to login the FedML MLOps platform.")
-        click.echo("Please check whether your network is normal!")
-        return
-
-    # Judge whether running from fedml docker hub
-    is_from_fedml_docker_hub = False
-    dock_loc_file = ServerConstants.get_docker_location_file()
-    if os.path.exists(dock_loc_file):
-        is_from_fedml_docker_hub = True
-
-    # Build unique device id
-    if is_from_docker:
-        unique_device_id = args.current_device_id + "@" + args.os_name + ".Docker.Edge.Server"
-    else:
-        unique_device_id = args.current_device_id + "@" + args.os_name + ".Edge.Server"
-    setattr(args, "is_from_docker", is_from_docker)
-
-    if is_from_fedml_docker_hub:
-        unique_device_id = args.current_device_id + "@" + args.os_name + ".DockerHub.Edge.Server"
-
-    if use_extra_device_id_suffix is not None:
-        unique_device_id = args.current_device_id + "@" + args.os_name + use_extra_device_id_suffix
-
-    # Bind account id to FedML® Nexus AI Platform
-    register_try_count = 0
-    edge_id = -1
-    user_name = None
-    while register_try_count < 5:
-        try:
-            edge_id, user_name, extra_url = runner.bind_account_and_device_id(
-                service_config["ml_ops_config"]["EDGE_BINDING_URL"], args.account_id, unique_device_id, args.os_name,
-                api_key=api_key, role=role
-            )
-            if edge_id > 0:
-                runner.edge_id = edge_id
-                break
-        except SystemExit as e:
-            click.echo("Your account does not exist. Please make sure your account correct.")
-            os.system("fedml logout -s")
-            return
-        except Exception as e:
-            click.echo("{}\n{}".format(SchedulerConstants.ERR_MSG_BINDING_EXCEPTION_2, traceback.format_exc()))
-            click.echo(SchedulerConstants.ERR_MSG_BINDING_EXIT_RETRYING)
-            register_try_count += 1
-            time.sleep(3)
-            continue
-
-    if edge_id <= 0:
-        click.echo("")
-        click.echo("[6] Oops, you failed to login the FedML MLOps platform.")
-        click.echo("Please check whether your network is normal!")
-        return
-    setattr(args, "server_id", edge_id)
-    runner.args = args
-    runner.edge_id = edge_id
-    init_logs(args, edge_id)
-
-    # Log arguments and binding results.
-    # logging.info("login: unique_device_id = %s" % str(unique_device_id))
-    # logging.info("login: server_id = %s" % str(edge_id))
-    runner.unique_device_id = unique_device_id
-    runner.user_name = user_name
-    ServerConstants.save_runner_infos(args.current_device_id + "." + args.os_name, edge_id)
-
-    # Setup MQTT connection for communication with the FedML server.
-    try:
-        runner.setup_agent_mqtt_connection(service_config)
-    except Exception as e:
-        login_exit_file = os.path.join(ServerConstants.get_log_file_dir(), "exited.log")
-        with open(login_exit_file, "w") as f:
-            f.writelines(f"{os.getpid()}.")
-        runner.stop_agent()
-        raise e
-
-    # Start mqtt looper
-    runner.start_agent_mqtt_loop()
-
-
-def __login_as_cloud_agent(args, userid, version):
-    setattr(args, "account_id", userid)
-    setattr(args, "current_running_dir", ServerConstants.get_fedml_home_dir())
-
-    sys_name = platform.system()
-    if sys_name == "Darwin":
-        sys_name = "MacOS"
-    setattr(args, "os_name", sys_name)
-    setattr(args, "version", version)
-    setattr(args, "log_file_dir", ServerConstants.get_log_file_dir())
-    if hasattr(args, "device_id") and args.device_id is not None and args.device_id != "0":
-        setattr(args, "current_device_id", args.device_id)
-    else:
-        setattr(args, "current_device_id", FedMLServerRunner.get_device_id())
-    setattr(args, "config_version", version)
-    setattr(args, "cloud_region", "")
-
-    # Create server runner for communication with the FedML client.
-    runner = FedMLServerRunner(args)
-    runner.run_as_cloud_agent = True
-
-    # Fetch configs from the MLOps config server.
-    service_config = dict()
-    config_try_count = 0
-    edge_id = 0
-    while config_try_count < 5:
-        try:
-            mqtt_config, s3_config, mlops_config, docker_config = runner.fetch_configs()
-            service_config["mqtt_config"] = mqtt_config
-            service_config["s3_config"] = s3_config
-            service_config["ml_ops_config"] = mlops_config
-            service_config["docker_config"] = docker_config
-            runner.agent_config = service_config
-            log_server_url = mlops_config.get("LOG_SERVER_URL", None)
-            if log_server_url is not None:
-                setattr(args, "log_server_url", log_server_url)
-                setattr(runner.args, "log_server_url", log_server_url)
-            break
-        except Exception as e:
-            click.echo("{}\n{}".format(SchedulerConstants.ERR_MSG_BINDING_EXCEPTION_1, traceback.format_exc()))
-            click.echo(SchedulerConstants.ERR_MSG_BINDING_EXIT_RETRYING)
-            config_try_count += 1
-            time.sleep(3)
-            continue
-
-    if config_try_count >= 5:
-        click.echo("")
-        click.echo("[7] Oops, you failed to login the FedML MLOps platform.")
-        click.echo("Please check whether your network is normal!")
-        return
-
-    # Build unique device id
-    if args.current_device_id is not None and len(str(args.current_device_id)) > 0:
-        unique_device_id = args.current_device_id + "@" + args.os_name + ".Public.Cloud"
-
-    # Bind account id to FedML® Nexus AI Platform
-    register_try_count = 0
-    if hasattr(args, "server_agent_id") and args.server_agent_id is not None:
-        edge_id = args.server_agent_id
-    else:
-        edge_id = -1
-    user_name = None
-    while register_try_count < 5:
-        try:
-            edge_id, user_name, extra_url = runner.bind_account_and_device_id(
-                service_config["ml_ops_config"]["EDGE_BINDING_URL"], args.account_id, unique_device_id, args.os_name
-            )
-            if edge_id > 0:
-                runner.edge_id = edge_id
-                break
-        except SystemExit as e:
-            click.echo("Your account does not exist. Please make sure your account correct.")
-            os.system("fedml logout -s")
-            return
-        except Exception as e:
-            click.echo("{}\n{}".format(SchedulerConstants.ERR_MSG_BINDING_EXCEPTION_2, traceback.format_exc()))
-            click.echo(SchedulerConstants.ERR_MSG_BINDING_EXIT_RETRYING)
-            register_try_count += 1
-            time.sleep(3)
-            continue
-
-    if edge_id <= 0:
-        click.echo("")
-        click.echo("[8] Oops, you failed to login the FedML MLOps platform.")
-        click.echo("Please check whether your network is normal!")
-        return
-    setattr(args, "server_id", edge_id)
-    runner.args = args
-    runner.edge_id = edge_id
-    init_logs(args, edge_id)
-    logging.info("args {}".format(args))
-
-    # Log arguments and binding results.
-    logging.info("login: unique_device_id = %s" % str(unique_device_id))
-    logging.info("login: server_id = %s" % str(edge_id))
-    runner.unique_device_id = unique_device_id
-    runner.user_name = "cloud_agent" if user_name is None else user_name
-    ServerConstants.save_runner_infos(args.current_device_id + "." + args.os_name, edge_id)
-
-    # Setup MQTT connection for communication with the FedML server.
-    try:
-        runner.setup_agent_mqtt_connection(service_config)
-    except Exception as e:
-        login_exit_file = os.path.join(ServerConstants.get_log_file_dir(), "exited.log")
-        with open(login_exit_file, "w") as f:
-            f.writelines(f"{os.getpid()}.")
-        runner.stop_agent()
-        raise e
-
-    # Start mqtt looper
-    runner.start_agent_mqtt_loop()
-
-
-def __login_as_cloud_server(args, userid, version):
-    setattr(args, "account_id", userid)
-    setattr(args, "current_running_dir", ServerConstants.get_fedml_home_dir())
-
-    sys_name = platform.system()
-    if sys_name == "Darwin":
-        sys_name = "MacOS"
-    setattr(args, "os_name", sys_name)
-    setattr(args, "version", version)
-    setattr(args, "log_file_dir", ServerConstants.get_log_file_dir())
-    if hasattr(args, "device_id") and args.device_id is not None and args.device_id != "0":
-        setattr(args, "current_device_id", args.device_id)
-    else:
-        setattr(args, "current_device_id", FedMLServerRunner.get_device_id())
-    setattr(args, "config_version", version)
-    setattr(args, "cloud_region", "")
-
-    # Create server runner for communication with the FedML client.
-    runner = FedMLServerRunner(args)
-    runner.run_as_cloud_server = True
-
-    # Fetch configs from the MLOps config server.
-    service_config = dict()
-    config_try_count = 0
-    edge_id = 0
-    while config_try_count < 5:
-        try:
-            mqtt_config, s3_config, mlops_config, docker_config = runner.fetch_configs()
-            service_config["mqtt_config"] = mqtt_config
-            service_config["s3_config"] = s3_config
-            service_config["ml_ops_config"] = mlops_config
-            service_config["docker_config"] = docker_config
-            runner.agent_config = service_config
-            log_server_url = mlops_config.get("LOG_SERVER_URL", None)
-            if log_server_url is not None:
-                setattr(args, "log_server_url", log_server_url)
-                setattr(runner.args, "log_server_url", log_server_url)
-            break
-        except Exception as e:
-            click.echo("{}\n{}".format(SchedulerConstants.ERR_MSG_BINDING_EXCEPTION_1, traceback.format_exc()))
-            click.echo(SchedulerConstants.ERR_MSG_BINDING_EXIT_RETRYING)
-            config_try_count += 1
-            time.sleep(3)
-            continue
-
-    if config_try_count >= 5:
-        click.echo("")
-        click.echo("[9] Oops, you failed to login the FedML MLOps platform.")
-        click.echo("Please check whether your network is normal!")
-        return
-
-    # Build unique device id
-    if hasattr(args, "device_id") and args.device_id is not None and args.device_id != "0":
-        unique_device_id = args.current_device_id
-    else:
-        unique_device_id = args.current_device_id + "@" + args.os_name + ".Public.Server"
-
-    # Bind account id to FedML® Nexus AI Platform
-    register_try_count = 0
-    edge_id = -1
-    user_name = None
-    while register_try_count < 5:
-        try:
-            edge_id, user_name, extra_url = runner.bind_account_and_device_id(
-                service_config["ml_ops_config"]["EDGE_BINDING_URL"], args.account_id, unique_device_id, args.os_name
-            )
-            if edge_id > 0:
-                runner.edge_id = edge_id
-                break
-        except SystemExit as e:
-            click.echo("Your account does not exist. Please make sure your account correct.")
-            os.system("fedml logout -s")
-            return
-        except Exception as e:
-            click.echo("{}\n{}".format(SchedulerConstants.ERR_MSG_BINDING_EXCEPTION_2, traceback.format_exc()))
-            click.echo(SchedulerConstants.ERR_MSG_BINDING_EXIT_RETRYING)
-            register_try_count += 1
-            time.sleep(3)
-            continue
-
-    if edge_id <= 0:
-        click.echo("")
-        click.echo("[10] Oops, you failed to login the FedML MLOps platform.")
-        click.echo("Please check whether your network is normal!")
-        return
-    setattr(args, "server_id", edge_id)
-    runner.args = args
-    runner.edge_id = edge_id
-    runner.user_name = "cloud_server" if user_name is None else user_name
-    init_logs(args, edge_id)
-
-    # Log arguments and binding results.
-    logging.info("login: unique_device_id = %s" % str(unique_device_id))
-    logging.info("login: server_id = %s" % str(edge_id))
-    ServerConstants.save_runner_infos(args.current_device_id + "." + args.os_name, edge_id)
-
-    # Echo results
-    print("\n\nCongratulations, your device is connected to the FedML MLOps platform successfully!")
-    print(
-        "Your unique device ID is "
-        + str(unique_device_id)
-        + "\n"
-    )
-
-    # Setup MQTT connection for communication with the FedML server.
-    try:
-        runner.setup_agent_mqtt_connection(service_config)
-    except Exception as e:
-        login_exit_file = os.path.join(ServerConstants.get_log_file_dir(), "exited.log")
-        with open(login_exit_file, "w") as f:
-            f.writelines(f"{os.getpid()}.")
-        runner.stop_agent()
-        raise e
-
-    # Start mqtt looper
-    runner.start_agent_mqtt_loop()
-
-
-def init_logs(args, edge_id):
-    # Init runtime logs
-    args.log_file_dir = ServerConstants.get_log_file_dir()
-    args.run_id = 0
-    args.role = "server"
-    args.edge_id = edge_id
-    setattr(args, "using_mlops", True)
-    setattr(args, "server_agent_id", edge_id)
-    MLOpsRuntimeLog.get_instance(args).init_logs()
-
-
-def login(args):
-    if args.role == ServerConstants.login_role_list[ServerConstants.LOGIN_MODE_LOCAL_INDEX]:
-        __login_as_edge_server_and_agent(args, args.user, args.version, api_key=args.api_key)
-    elif args.role == ServerConstants.login_role_list[ServerConstants.LOGIN_MODE_CLOUD_AGENT_INDEX]:
-        __login_as_cloud_agent(args, args.user, args.version)
-    elif args.role == ServerConstants.login_role_list[ServerConstants.LOGIN_MODE_CLOUD_SERVER_INDEX]:
-        __login_as_cloud_server(args, args.user, args.version)
-    elif args.role == ServerConstants.login_role_list[ServerConstants.LOGIN_MODE_GPU_MASTER_SERVER_INDEX]:
-        __login_as_edge_server_and_agent(args, args.user, args.version, api_key=args.api_key,
-                                         use_extra_device_id_suffix=".Edge.GPU.MasterServer", role=args.role)
+from fedml.computing.scheduler.master.master_agent import FedMLLaunchMasterAgent
 
 
 def logout():
-    ServerConstants.cleanup_run_process(None)
-    sys_utils.cleanup_all_fedml_server_api_processes()
+    FedMLLaunchMasterAgent.logout()
 
 
 if __name__ == "__main__":
@@ -432,7 +36,9 @@ def logout():
         fedml.set_local_on_premise_platform_port(args.local_on_premise_platform_port)
 
     fedml.set_env_version(args.version)
+    master_agent = FedMLLaunchMasterAgent()
     if args.type == 'login':
-        login(args)
+        master_agent.login(args.api_key, api_key=args.api_key, device_id=args.device_id,
+                           os_name=args.os_name, role=args.role)
     else:
-        logout()
+        master_agent.logout()
diff --git a/python/fedml/computing/scheduler/master/server_runner.py b/python/fedml/computing/scheduler/master/server_runner.py
deleted file mode 100755
index 0442c99972..0000000000
--- a/python/fedml/computing/scheduler/master/server_runner.py
+++ /dev/null
@@ -1,2767 +0,0 @@
-import base64
-import copy
-import json
-import logging
-import platform
-import queue
-import sys
-
-import multiprocessing
-from multiprocessing import Process, Queue, Value, Array
-import os
-import shutil
-import stat
-import subprocess
-import threading
-
-import time
-import traceback
-import urllib
-import uuid
-import zipfile
-from os import listdir
-from urllib.parse import urljoin, urlparse
-
-import requests
-
-import fedml
-from ..comm_utils.job_cleanup import JobCleanup
-from ..scheduler_core.scheduler_matcher import SchedulerMatcher
-from ..comm_utils.constants import SchedulerConstants
-from ..comm_utils.job_utils import JobRunnerUtils
-from ..comm_utils.run_process_utils import RunProcessUtils
-from ....core.mlops.mlops_runtime_log import MLOpsRuntimeLog
-
-from ....core.distributed.communication.mqtt.mqtt_manager import MqttManager
-from ..comm_utils.yaml_utils import load_yaml_config
-from ..slave.client_constants import ClientConstants
-from ..master.server_constants import ServerConstants
-
-from ....core.mlops.mlops_metrics import MLOpsMetrics
-
-from ....core.mlops.mlops_configs import MLOpsConfigs
-from ....core.mlops.mlops_runtime_log_daemon import MLOpsRuntimeLogDaemon
-from ....core.mlops.mlops_status import MLOpsStatus
-from ..comm_utils.sys_utils import get_sys_runner_info, get_python_program
-from ..comm_utils import sys_utils
-from .server_data_interface import FedMLServerDataInterface
-from ....core.mlops.mlops_utils import MLOpsUtils
-from ..scheduler_entry.constants import Constants
-from ..model_scheduler.model_device_server import FedMLModelDeviceServerRunner
-from ..model_scheduler.device_model_cards import FedMLModelCards
-from ..model_scheduler import device_client_constants
-from ..scheduler_core.log_manager import LogsManager
-from ..scheduler_core.metrics_manager import MetricsManager
-from ..scheduler_core.master_api_daemon import MasterApiDaemon
-from fedml.utils.debugging import debug
-from ..scheduler_core.message_center import FedMLMessageCenter
-
-
-class RunnerError(Exception):
-    """ Runner stopped. """
-    pass
-
-
-class RunnerCompletedError(Exception):
-    """ Runner completed. """
-    pass
-
-
-class FedMLServerRunner(FedMLMessageCenter):
-    FEDML_CLOUD_SERVER_PREFIX = "fedml-server-run-"
-    debug_cloud_server = False
-
-    def __init__(self, args, run_id=0, request_json=None, agent_config=None, edge_id=0):
-        super().__init__()
-        self.master_api_daemon = None
-        self.run_stop_process = None
-        self.run_stop_process_map = dict()
-        self.run_edge_id_status_queue_map = dict()
-        self.run_metrics_queue_map = dict()
-        self.run_events_queue_map = dict()
-        self.run_artifacts_queue_map = dict()
-        self.run_logs_queue_map = dict()
-        self.async_check_timeout = 0
-        self.enable_async_cluster = False
-        self.origin_fedml_config_object = None
-        self.package_type = SchedulerConstants.JOB_PACKAGE_TYPE_DEFAULT
-        self.local_api_process = None
-        self.run_process_event = None
-        self.run_process_event_map = dict()
-        self.run_process_completed_event = None
-        self.run_process_completed_event_map = dict()
-        self.run_process_event_map_for_stop = dict()
-        self.edge_device_info_queue = None
-        self.run_edge_device_info_queue_map = dict()
-        self.run_edge_device_info_queue_map_for_stop = dict()
-        self.run_edge_device_info_global_queue = None
-        self.run_edge_device_info_global_queue_for_stop = None
-        self.run_process = None
-        self.run_process_map = dict()
-        self.start_request_json = None
-        self.server_docker_image = None
-        self.cloud_server_name = None
-        self.run_as_cloud_agent = False
-        self.run_as_cloud_server = False
-        self.run_as_edge_server_and_agent = False
-        self.run_as_cloud_server_and_agent = False
-        self.fedml_packages_base_dir = None
-        self.fedml_packages_unzip_dir = None
-        self.mqtt_mgr = None
-        self.running_request_json = dict()
-        self.run_id = run_id
-        self.unique_device_id = None
-        self.edge_id = edge_id
-        self.server_agent_id = 0
-        if request_json is not None:
-            self.server_agent_id = request_json.get("server_id", 0)
-        self.process = None
-        self.args = args
-        self.request_json = copy.deepcopy(request_json)
-        self.version = args.version
-        self.device_id = args.device_id
-        self.cur_dir = os.path.split(os.path.realpath(__file__))[0]
-        if args.current_running_dir is not None:
-            self.cur_dir = args.current_running_dir
-
-        image_version = self.version
-        if image_version == "local":
-            image_version = "dev"
-        self.server_docker_base_image = "/fedml-device-image:" + image_version
-
-        self.agent_config = agent_config
-        self.fedml_data_base_package_dir = os.path.join("/", "fedml", "data")
-        self.fedml_data_local_package_dir = os.path.join("/", "fedml", "fedml-package", "fedml", "data")
-        self.fedml_data_dir = self.fedml_data_base_package_dir
-        self.fedml_config_dir = os.path.join("/", "fedml", "conf")
-
-        self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES = {
-            "${FEDSYS.RUN_ID}": "",
-            "${FEDSYS.PRIVATE_LOCAL_DATA}": "",
-            "${FEDSYS.CLIENT_ID_LIST}": "",
-            "${FEDSYS.SYNTHETIC_DATA_URL}": "",
-            "${FEDSYS.IS_USING_LOCAL_DATA}": "",
-            "${FEDSYS.CLIENT_NUM}": "",
-            "${FEDSYS.CLIENT_INDEX}": "",
-            "${FEDSYS.CLIENT_OBJECT_LIST}": "",
-            "${FEDSYS.LOG_SERVER_URL}": "",
-        }
-
-        self.mlops_metrics = None
-        self.client_agent_active_list = dict()
-        self.server_active_list = dict()
-        self.run_status = None
-        self.ntp_offset = MLOpsUtils.get_ntp_offset()
-        self.runner_list = dict()
-        self.enable_simulation_cloud_agent = False
-        self.use_local_process_as_cloud_server = False
-
-        self.model_device_server = None
-        self.run_model_device_ids = dict()
-        self.run_edge_ids = dict()
-        self.master_api_process = None
-
-        self.subscribed_topics = list()
-        self.user_name = None
-        self.message_center = None
-
-    def build_dynamic_constrain_variables(self, run_id, run_config):
-        data_config = run_config.get("data_config", {})
-        server_edge_id_list = self.request_json["edgeids"]
-        is_using_local_data = 0
-        private_data_dir = data_config.get("privateLocalData", "")
-        synthetic_data_url = data_config.get("syntheticDataUrl", "")
-        edges = self.request_json["edges"]
-        # if private_data_dir is not None \
-        #         and len(str(private_data_dir).strip(' ')) > 0:
-        #     is_using_local_data = 1
-        if private_data_dir is None or len(str(private_data_dir).strip(" ")) <= 0:
-            params_config = run_config.get("parameters", None)
-            private_data_dir = ServerConstants.get_data_dir()
-        if synthetic_data_url is None or len(str(synthetic_data_url)) <= 0:
-            synthetic_data_url = private_data_dir
-
-        self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.RUN_ID}"] = run_id
-        self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.PRIVATE_LOCAL_DATA}"] = private_data_dir.replace(" ", "")
-        self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.CLIENT_ID_LIST}"] = str(server_edge_id_list).replace(" ", "")
-        self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.SYNTHETIC_DATA_URL}"] = synthetic_data_url.replace(" ", "")
-        self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.IS_USING_LOCAL_DATA}"] = str(is_using_local_data)
-        self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.CLIENT_NUM}"] = len(server_edge_id_list)
-        client_objects = str(json.dumps(edges))
-        client_objects = client_objects.replace(" ", "").replace("\n", "").replace('"', '\\"')
-        self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.CLIENT_OBJECT_LIST}"] = client_objects
-        self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.LOG_SERVER_URL}"] = self.agent_config["ml_ops_config"][
-            "LOG_SERVER_URL"
-        ]
-
-    def unzip_file(self, zip_file, unzip_file_path) -> str:
-        unziped_file_name = ""
-        if zipfile.is_zipfile(zip_file):
-            with zipfile.ZipFile(zip_file, "r") as zipf:
-                zipf.extractall(unzip_file_path)
-                unziped_file_name = zipf.namelist()[0]
-        else:
-            raise Exception("Invalid zip file {}".format(zip_file))
-
-        return unziped_file_name
-
-    def package_download_progress(self, count, blksize, filesize):
-        self.check_runner_stop_event()
-
-        downloaded = count * blksize
-        downloaded = filesize if downloaded > filesize else downloaded
-        progress = (downloaded / filesize * 100) if filesize != 0 else 0
-        progress_int = int(progress)
-        downloaded_kb = format(downloaded / 1024, '.2f')
-
-        # since this hook funtion is stateless, we need a state to avoid printing progress repeatly
-        if count == 0:
-            self.prev_download_progress = 0
-        if progress_int != self.prev_download_progress and progress_int % 5 == 0:
-            self.prev_download_progress = progress_int
-            logging.info("package downloaded size {} KB, progress {}%".format(downloaded_kb, progress_int))
-
-    def retrieve_and_unzip_package(self, package_name, package_url):
-        local_package_path = ServerConstants.get_package_download_dir()
-        os.makedirs(local_package_path, exist_ok=True)
-        filename, filename_without_extension, file_extension = ServerConstants.get_filename_and_extension(package_url)
-        local_package_file = os.path.join(local_package_path, f"fedml_run_{self.run_id}_{filename_without_extension}")
-        if os.path.exists(local_package_file):
-            os.remove(local_package_file)
-        package_url_without_query_path = urljoin(package_url, urlparse(package_url).path)
-        urllib.request.urlretrieve(package_url_without_query_path, local_package_file,
-                                   reporthook=self.package_download_progress)
-        unzip_package_path = os.path.join(ClientConstants.get_package_unzip_dir(),
-                                          f"unzip_fedml_run_{self.run_id}_{filename_without_extension}")
-        try:
-            shutil.rmtree(unzip_package_path, ignore_errors=True)
-        except Exception as e:
-            pass
-
-        package_dir_name = self.unzip_file(local_package_file, unzip_package_path)  # Using unziped folder name
-        unzip_package_full_path = os.path.join(unzip_package_path, package_dir_name)
-
-        logging.info("local_package_file {}, unzip_package_path {}, unzip file full path {}".format(
-            local_package_file, unzip_package_path, unzip_package_full_path))
-
-        return unzip_package_full_path
-
-    def update_local_fedml_config(self, run_id, run_config):
-        packages_config = run_config["packages_config"]
-
-        # Copy config file from the client
-        server_package_name = packages_config.get("server", None)
-        server_package_url = packages_config.get("serverUrl", None)
-        unzip_package_path = self.retrieve_and_unzip_package(server_package_name, server_package_url)
-        self.fedml_packages_unzip_dir = unzip_package_path
-        fedml_local_config_file = os.path.join(unzip_package_path, "conf", "fedml.yaml")
-
-        # Load the above config to memory
-        config_from_container = load_yaml_config(fedml_local_config_file)
-        container_entry_file_config = config_from_container["entry_config"]
-        container_dynamic_args_config = config_from_container["dynamic_args"]
-        entry_file = container_entry_file_config["entry_file"]
-        conf_file = container_entry_file_config["conf_file"]
-        self.package_type = container_entry_file_config.get("package_type", SchedulerConstants.JOB_PACKAGE_TYPE_DEFAULT)
-        full_conf_path = os.path.join(unzip_package_path, "fedml", "config", os.path.basename(conf_file))
-
-        # Dynamically build constrain variable with realtime parameters from server
-        self.build_dynamic_constrain_variables(run_id, run_config)
-
-        # Update entry arguments value with constrain variable values with realtime parameters from server
-        # currently we support the following constrain variables:
-        # ${FEDSYS_RUN_ID}: a run id represented one entire Federated Learning flow
-        # ${FEDSYS_PRIVATE_LOCAL_DATA}: private local data path in the Federated Learning client
-        # ${FEDSYS_CLIENT_ID_LIST}: client list in one entire Federated Learning flow
-        # ${FEDSYS_SYNTHETIC_DATA_URL}: synthetic data url from server,
-        #                  if this value is not null, the client will download data from this URL to use it as
-        #                  federated training data set
-        # ${FEDSYS_IS_USING_LOCAL_DATA}: whether use private local data as federated training data set
-        # container_dynamic_args_config["data_cache_dir"] = "${FEDSYS.PRIVATE_LOCAL_DATA}"
-        for constrain_variable_key, constrain_variable_value in self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES.items():
-            for argument_key, argument_value in container_dynamic_args_config.items():
-                if argument_value is not None and str(argument_value).find(constrain_variable_key) == 0:
-                    replaced_argument_value = str(argument_value).replace(
-                        constrain_variable_key, str(constrain_variable_value)
-                    )
-                    container_dynamic_args_config[argument_key] = replaced_argument_value
-
-        # Merge all container new config sections as new config dictionary
-        package_conf_object = dict()
-        package_conf_object["entry_config"] = container_entry_file_config
-        package_conf_object["dynamic_args"] = container_dynamic_args_config
-        package_conf_object["dynamic_args"]["config_version"] = self.args.config_version
-        container_dynamic_args_config["mqtt_config_path"] = os.path.join(
-            unzip_package_path, "fedml", "config", os.path.basename(container_dynamic_args_config["mqtt_config_path"])
-        )
-        container_dynamic_args_config["s3_config_path"] = os.path.join(
-            unzip_package_path, "fedml", "config", os.path.basename(container_dynamic_args_config["s3_config_path"])
-        )
-        log_file_dir = ServerConstants.get_log_file_dir()
-        os.makedirs(log_file_dir, exist_ok=True)
-        package_conf_object["dynamic_args"]["log_file_dir"] = log_file_dir
-
-        # Save new config dictionary to local file
-        fedml_updated_config_file = os.path.join(unzip_package_path, "conf", "fedml.yaml")
-        ServerConstants.generate_yaml_doc(package_conf_object, fedml_updated_config_file)
-
-        # Build dynamic arguments and set arguments to fedml config object
-        if not self.build_dynamic_args(run_id, run_config, package_conf_object, unzip_package_path):
-            return None, None
-
-        return unzip_package_path, package_conf_object
-
-    def build_dynamic_args(self, run_id, run_config, package_conf_object, base_dir):
-        fedml_conf_file = package_conf_object["entry_config"]["conf_file"]
-        fedml_conf_file_processed = str(fedml_conf_file).replace('\\', os.sep).replace('/', os.sep)
-        fedml_conf_path = os.path.join(base_dir, "fedml", "config",
-                                       os.path.basename(fedml_conf_file_processed))
-        fedml_conf_object = load_yaml_config(fedml_conf_path)
-        self.origin_fedml_config_object = fedml_conf_object.copy()
-        run_params = run_config.get("parameters", {})
-        job_yaml = run_params.get("job_yaml", {})
-
-        # Replace local fedml config objects with parameters from MLOps web
-        parameters_object = run_config.get("parameters", None)
-        if parameters_object is not None:
-            for config_k, config_v in fedml_conf_object.items():
-                parameter_v = parameters_object.get(config_k, None)
-                if parameter_v is not None:
-                    fedml_conf_object[config_k] = parameter_v
-                    parameters_object.pop(config_k)
-
-            for config_k, config_v in parameters_object.items():
-                fedml_conf_object[config_k] = config_v
-
-        package_dynamic_args = package_conf_object["dynamic_args"]
-        if fedml_conf_object.get("comm_args", None) is not None:
-            fedml_conf_object["comm_args"]["mqtt_config_path"] = package_dynamic_args["mqtt_config_path"]
-            fedml_conf_object["comm_args"]["s3_config_path"] = package_dynamic_args["s3_config_path"]
-            fedml_conf_object["common_args"]["using_mlops"] = True
-        if fedml_conf_object.get("train_args", None) is not None:
-            fedml_conf_object["train_args"]["run_id"] = package_dynamic_args["run_id"]
-            fedml_conf_object["train_args"]["client_id_list"] = package_dynamic_args["client_id_list"]
-            fedml_conf_object["train_args"]["client_num_in_total"] = int(package_dynamic_args["client_num_in_total"])
-            fedml_conf_object["train_args"]["client_num_per_round"] = int(package_dynamic_args["client_num_in_total"])
-            fedml_conf_object["train_args"]["server_id"] = self.edge_id
-            fedml_conf_object["train_args"]["server_agent_id"] = self.request_json.get("cloud_agent_id", self.edge_id)
-            fedml_conf_object["train_args"]["group_server_id_list"] = self.request_json.get("group_server_id_list",
-                                                                                            list())
-        if fedml_conf_object.get("device_args", None) is not None:
-            fedml_conf_object["device_args"]["worker_num"] = int(package_dynamic_args["client_num_in_total"])
-        # fedml_conf_object["data_args"]["data_cache_dir"] = package_dynamic_args["data_cache_dir"]
-        if fedml_conf_object.get("tracking_args", None) is not None:
-            fedml_conf_object["tracking_args"]["log_file_dir"] = package_dynamic_args["log_file_dir"]
-            fedml_conf_object["tracking_args"]["log_server_url"] = package_dynamic_args["log_server_url"]
-
-        bootstrap_script_path = None
-        env_args = fedml_conf_object.get("environment_args", None)
-        if env_args is not None:
-            bootstrap_script_file = env_args.get("bootstrap", None)
-            if bootstrap_script_file is not None:
-                bootstrap_script_file = str(bootstrap_script_file).replace('\\', os.sep).replace('/', os.sep)
-                if platform.system() == 'Windows':
-                    bootstrap_script_file = bootstrap_script_file.rstrip('.sh') + '.bat'
-                if bootstrap_script_file is not None:
-                    bootstrap_script_dir = os.path.join(base_dir, "fedml", os.path.dirname(bootstrap_script_file))
-                    bootstrap_script_path = os.path.join(
-                        bootstrap_script_dir, bootstrap_script_dir, os.path.basename(bootstrap_script_file)
-                    )
-        # try:
-        #     os.makedirs(package_dynamic_args["data_cache_dir"], exist_ok=True)
-        # except Exception as e:
-        #     pass
-        fedml_conf_object["dynamic_args"] = package_dynamic_args
-
-        ServerConstants.generate_yaml_doc(fedml_conf_object, fedml_conf_path)
-
-        is_bootstrap_run_ok = True
-        try:
-            if bootstrap_script_path is not None:
-                if os.path.exists(bootstrap_script_path):
-                    bootstrap_stat = os.stat(bootstrap_script_path)
-                    if platform.system() == 'Windows':
-                        os.chmod(bootstrap_script_path,
-                                 bootstrap_stat.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
-                        bootstrap_scripts = "{}".format(bootstrap_script_path)
-                    else:
-                        os.chmod(bootstrap_script_path,
-                                 bootstrap_stat.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
-                        bootstrap_scripts = "cd {}; ./{}".format(bootstrap_script_dir,
-                                                                 os.path.basename(bootstrap_script_file))
-                    bootstrap_scripts = str(bootstrap_scripts).replace('\\', os.sep).replace('/', os.sep)
-                    logging.info("Bootstrap scripts are being executed...")
-                    shell_cmd_list = list()
-                    shell_cmd_list.append(bootstrap_scripts)
-                    process, error_list = ServerConstants.execute_commands_with_live_logs(
-                        shell_cmd_list, callback=self.callback_run_bootstrap)
-
-                    ret_code, out, err = process.returncode, None, None
-                    if ret_code is None or ret_code <= 0:
-                        if error_list is not None and len(error_list) > 0:
-                            is_bootstrap_run_ok = False
-                        else:
-                            if out is not None:
-                                out_str = sys_utils.decode_our_err_result(out)
-                                if out_str != "":
-                                    logging.info("{}".format(out_str))
-
-                            sys_utils.log_return_info(bootstrap_script_file, 0)
-
-                            is_bootstrap_run_ok = True
-                    else:
-                        if err is not None:
-                            err_str = sys_utils.decode_our_err_result(err)
-                            if err_str != "":
-                                logging.error("{}".format(err_str))
-
-                        sys_utils.log_return_info(bootstrap_script_file, ret_code)
-
-                        is_bootstrap_run_ok = False
-        except Exception as e:
-            logging.error("Bootstrap scripts error: {}".format(traceback.format_exc()))
-
-            is_bootstrap_run_ok = False
-
-        return is_bootstrap_run_ok
-
-    def callback_run_bootstrap(self, job_pid):
-        ServerConstants.save_bootstrap_process(self.run_id, job_pid)
-
-    @debug
-    def run(
-            self, process_event, completed_event, edge_id_status_queue=None,
-            edge_device_info_queue=None, run_metrics_queue=None,
-            run_event_queue=None, run_artifacts_queue=None, run_logs_queue=None,
-            message_center_queue=None, edge_device_info_global_queue=None
-    ):
-        print(f"Server runner process id {os.getpid()}, run id {self.run_id}")
-
-        if platform.system() != "Windows":
-            os.setsid()
-
-        os.environ['PYTHONWARNINGS'] = 'ignore:semaphore_tracker:UserWarning'
-        os.environ.setdefault('PYTHONWARNINGS', 'ignore:semaphore_tracker:UserWarning')
-
-        self.run_process_event = process_event
-        self.run_process_completed_event = completed_event
-        try:
-            MLOpsUtils.set_ntp_offset(self.ntp_offset)
-
-            self.rebuild_message_center(message_center_queue)
-
-            self.run_impl(edge_id_status_queue, edge_device_info_queue, run_metrics_queue,
-                          run_event_queue, run_artifacts_queue, run_logs_queue, edge_device_info_global_queue)
-        except RunnerError:
-            logging.info("Runner stopped.")
-            self.mlops_metrics.report_server_id_status(
-                self.run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_KILLED, edge_id=self.edge_id,
-                server_id=self.edge_id, server_agent_id=self.edge_id)
-        except RunnerCompletedError:
-            logging.info("Runner completed.")
-        except Exception as e:
-            logging.error("Runner exits with exceptions. {}".format(traceback.format_exc()))
-            self.mlops_metrics.report_server_id_status(
-                self.run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED, edge_id=self.edge_id,
-                server_id=self.edge_id, server_agent_id=self.edge_id)
-        finally:
-            logging.info("Release resources.")
-            self._process_run_metrics_queue(run_metrics_queue)
-            self._process_run_logs_queue(run_logs_queue)
-            MLOpsRuntimeLogDaemon.get_instance(self.args).stop_log_processor(self.run_id, self.edge_id)
-            if self.mlops_metrics is not None:
-                self.mlops_metrics.stop_sys_perf()
-            time.sleep(3)
-            ServerConstants.cleanup_run_process(self.run_id)
-            ServerConstants.cleanup_learning_process(self.run_id)
-            ServerConstants.cleanup_bootstrap_process(self.run_id)
-
-    def check_runner_stop_event(self):
-        if self.run_process_event is not None and self.run_process_event.is_set():
-            logging.info("Received stopping event.")
-            raise RunnerError("Runner stopped")
-
-        if self.run_process_completed_event is not None and self.run_process_completed_event.is_set():
-            logging.info("Received completed event.")
-            raise RunnerCompletedError("Runner completed")
-
-    def deploy_model(self, serving_devices, request_json, run_id):
-        run_config = request_json["run_config"]
-        run_params = run_config.get("parameters", {})
-        job_yaml = run_params.get("job_yaml", {})
-        job_type = job_yaml.get("job_type", None)
-        job_type = job_yaml.get("task_type", Constants.JOB_TASK_TYPE_TRAIN) if job_type is None else job_type
-        if job_type == Constants.JOB_TASK_TYPE_DEPLOY or job_type == Constants.JOB_TASK_TYPE_SERVE:
-            computing = job_yaml.get("computing", {})
-            num_gpus = computing.get("minimum_num_gpus", 1)
-            serving_args = run_params.get("serving_args", {})
-            model_id = serving_args.get("model_id", None)
-            model_name = serving_args.get("model_name", None)
-            model_version = serving_args.get("model_version", None)
-            model_storage_url = serving_args.get("model_storage_url", None)
-            endpoint_name = serving_args.get("endpoint_name", None)
-            endpoint_id = serving_args.get("endpoint_id", None)
-            random = serving_args.get("random", "")
-            random_out = sys_utils.random2(random, "FEDML@9999GREAT")
-            random_list = random_out.split("FEDML@")
-            device_type = device_client_constants.ClientConstants.login_role_list[
-                device_client_constants.ClientConstants.LOGIN_MODE_FEDML_CLOUD_INDEX]
-            FedMLModelCards.get_instance().deploy_model(
-                model_name, device_type, json.dumps(serving_devices),
-                "", random_list[1], None,
-                in_model_id=model_id, in_model_version=model_version,
-                endpoint_name=endpoint_name, endpoint_id=endpoint_id, run_id=run_id)
-
-    @debug
-    def run_impl(
-            self, edge_id_status_queue, edge_device_info_queue, run_metrics_queue,
-            run_event_queue, run_artifacts_queue, run_logs_queue, edge_device_info_global_queue
-    ):
-        run_id = self.request_json["runId"]
-        run_config = self.request_json["run_config"]
-        data_config = run_config["data_config"]
-        edge_ids = self.request_json["edgeids"]
-
-        self.check_runner_stop_event()
-
-        self.run_id = run_id
-        self.args.run_id = self.run_id
-        MLOpsRuntimeLog.get_instance(self.args).init_logs(log_level=logging.INFO)
-
-        # report server running status
-        self.mlops_metrics.report_server_id_status(
-            run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_STARTING, edge_id=self.edge_id,
-            server_id=self.edge_id, server_agent_id=self.edge_id)
-
-        logging.info("Detect all status of Edge ids: " + str(edge_ids))
-
-        status_ok, active_edge_info_dict, inactivate_edges = self.detect_edges_status(
-            edge_device_info_queue, edge_device_info_global_queue=edge_device_info_global_queue,
-            callback_when_edges_ready=self.send_training_request_to_edges)
-        logging.info(f"Status OK: {status_ok}, Active edge info dict: {active_edge_info_dict}, "
-                     f"inactivate edges: {inactivate_edges}")
-        if not status_ok:
-            logging.error(f"Status of edge device is not OK. Active edge info dict: {active_edge_info_dict}, "
-                          f"Inactivate edges: {inactivate_edges}")
-            return
-
-        if not self.should_continue_run_job(run_id):
-            if FedMLServerRunner.debug_cloud_server:
-                while True:
-                    time.sleep(30)
-            # Check if the run status is normal
-            self.aggregate_run_status_metrics_logs(
-                run_id, edge_ids, edge_id_status_queue, edge_device_info_queue,
-                edge_device_info_global_queue,
-                run_metrics_queue, run_logs_queue)
-            return
-
-        # Start the server job
-        self._start_runner_process(run_id, self.request_json, is_server_job=True)
-
-        # Check if the run status is normal
-        self.aggregate_run_status_metrics_logs(
-            run_id, edge_ids, edge_id_status_queue, edge_device_info_queue,
-            edge_device_info_global_queue,
-            run_metrics_queue, run_logs_queue)
-
-    def aggregate_run_status_metrics_logs(
-            self, run_id, edge_id_list, edge_id_status_queue, edge_device_info_queue,
-            edge_device_info_global_queue, run_metrics_queue, run_logs_queue):
-        total_sleep_seconds = 0
-        sleep_seconds = 3
-        allowed_status_check_sleep_seconds = 60 * 25
-        server_id = self.edge_id
-        normal_response_status_list = [
-            ClientConstants.MSG_MLOPS_CLIENT_STATUS_OFFLINE, ClientConstants.MSG_MLOPS_CLIENT_STATUS_IDLE,
-            ClientConstants.MSG_MLOPS_CLIENT_STATUS_TRAINING, ClientConstants.MSG_MLOPS_CLIENT_STATUS_KILLED,
-            ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FINISHED,
-            ClientConstants.MSG_MLOPS_CLIENT_STATUS_EXCEPTION, ClientConstants.MSG_MLOPS_CLIENT_STATUS_RUNNING
-        ]
-        edges_id_status_timeout_map = dict()
-        number_of_failed_edges = 0
-        number_of_finished_edges = 0
-        number_of_killed_edges = 0
-        running_edges_list = list()
-        inactivate_edge_list = list()
-        current_edge_id_status_map = dict()
-
-        while True:
-            self.check_runner_stop_event()
-
-            # Process run metrics
-            self._process_run_metrics_queue(run_metrics_queue)
-
-            # Process run logs
-            self._process_run_logs_queue(run_logs_queue)
-
-            # Fetch edge id and status from the edge id status queue
-            while True:
-                try:
-                    queue_item = edge_id_status_queue.get(block=False, timeout=3)
-                    if queue_item is not None:
-                        current_edge_id_status_map.update(queue_item)
-                except queue.Empty as e:  # If queue is empty, then break loop
-                    break
-
-            # Calc the total completed device number
-            server_id = current_edge_id_status_map.get("server", 0)
-            running_edges_list.clear()
-            number_of_failed_edges = 0
-            number_of_finished_edges = 0
-            number_of_killed_edges = 0
-            for edge_id_item, status_item in current_edge_id_status_map.items():
-                if edge_id_item == "server":
-                    continue
-
-                if status_item is None or status_item == ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED or \
-                        status_item == ServerConstants.MSG_MLOPS_SERVER_STATUS_EXCEPTION:
-                    number_of_failed_edges += 1
-                    continue
-
-                if status_item == ServerConstants.MSG_MLOPS_SERVER_STATUS_FINISHED:
-                    number_of_finished_edges += 1
-                    continue
-
-                if status_item == ServerConstants.MSG_MLOPS_SERVER_STATUS_KILLED:
-                    number_of_killed_edges += 1
-                    continue
-
-                if status_item == ServerConstants.MSG_MLOPS_SERVER_STATUS_IDLE or \
-                        status_item == ServerConstants.MSG_MLOPS_SERVER_STATUS_OFFLINE:
-                    continue
-
-                running_edges_list.append(edge_id_item)
-
-            # Process the no response edges and accumulate the counter.
-            for edge_id_item in edge_id_list:
-                status_dict = edges_id_status_timeout_map.get(str(edge_id_item))
-                status_item = current_edge_id_status_map.get(str(edge_id_item))
-                if status_item is None:
-                    continue
-                if status_dict is None:
-                    status_dict = {"status": status_item, "count": 0}
-                else:
-                    if status_item in normal_response_status_list:
-                        status_dict["count"] = 0
-                    else:
-                        status_dict["count"] += 1
-                edges_id_status_timeout_map[str(edge_id_item)] = status_dict
-
-            # If the completed device number is equal total device number, then break
-            if len(running_edges_list) <= 0 and len(current_edge_id_status_map.keys()) == len(edge_id_list) + 1:
-                break
-
-            # Calc the timeout value to wait to device killed.
-            self.check_runner_stop_event()
-            time.sleep(sleep_seconds)
-            total_sleep_seconds += sleep_seconds
-            no_response_edge_ids = list()
-            for no_res_edge, no_res_status in edges_id_status_timeout_map.items():
-                if no_res_status.get("count") * sleep_seconds > allowed_status_check_sleep_seconds:
-                    no_response_edge_ids.append(no_res_edge)
-
-            # If timeout, then report killed device status
-            if len(no_response_edge_ids) > 0:
-                for edge_id_item in no_response_edge_ids:
-                    self.mlops_metrics.report_client_id_status(
-                        edge_id_item, ClientConstants.MSG_MLOPS_CLIENT_STATUS_KILLED,
-                        server_id=self.edge_id, run_id=self.run_id)
-
-            # Check if we can get the response device info from edge devices
-            # and set the inactive edges to killed status.
-            self.check_runner_stop_event()
-            given_edge_ids = list(set(edge_id_list) - set(inactivate_edge_list))
-            status_ok, active_edge_info_dict, inactivate_edges = self.detect_edges_status(
-                edge_device_info_queue, edge_device_info_global_queue=edge_device_info_global_queue,
-                need_to_trigger_exception=False, status_timeout=60,
-                given_edge_ids=given_edge_ids, callback_when_detecting=self.callback_when_detecting_on_aggregation,
-                args_for_callback_when_detecting=(run_metrics_queue, run_logs_queue)
-            )
-            if not status_ok:
-                inactivate_edge_list.extend(inactivate_edges)
-                for edge_id_item in inactivate_edges:
-                    self.mlops_metrics.report_client_id_status(
-                        edge_id_item, ClientConstants.MSG_MLOPS_CLIENT_STATUS_OFFLINE,
-                        server_id=self.edge_id, run_id=self.run_id)
-
-        # Calc the final run status based on the completed device numbers and fault tolerance parameters.
-        enable_fault_tolerance, fault_tolerance_rate = self.parse_fault_tolerance_params(run_id)
-        running_edges_list = list(set(running_edges_list))
-        status_to_report = self.calculate_server_status(
-            run_id, len(edge_id_list), number_of_failed_edges, number_of_finished_edges,
-            number_of_killed_edges, running_edges_list, enable_fault_tolerance=enable_fault_tolerance,
-            fault_tolerance_rate=fault_tolerance_rate)
-        if status_to_report is not None:
-            logging.info(
-                f"Run completed when aggregating status, metrics and logs, will report status {status_to_report}")
-            self.mlops_metrics.report_server_id_status(
-                self.run_id, status_to_report, edge_id=self.edge_id,
-                server_id=self.edge_id, server_agent_id=self.edge_id)
-
-    def callback_when_detecting_on_aggregation(self, detecting_args):
-        # Process run metrics
-        self._process_run_metrics_queue(detecting_args[0])
-
-        # Process run logs
-        self._process_run_logs_queue(detecting_args[1])
-
-    def _process_run_metrics_queue(self, run_metrics_queue):
-        # Fetch metrics from the run metrics queue
-        while True:
-            try:
-                metrics_item = run_metrics_queue.get(block=False, timeout=3)
-                MetricsManager.get_instance().save_metrics(metrics_item)
-                metric_json = json.loads(metrics_item)
-                if metric_json.get("is_endpoint", False):
-                    metric_json().pop("is_endpoint")
-                    self.mlops_metrics.report_endpoint_metric({}, payload=json.dumps(metric_json))
-                else:
-                    self.mlops_metrics.report_server_training_metric({}, payload=metrics_item)
-            except queue.Empty as e:  # If queue is empty, then break loop
-                break
-
-    def _process_run_logs_queue(self, run_logs_queue):
-        # Fetch logs from the run logs queue
-        while True:
-            try:
-                logs_item = run_logs_queue.get(block=False, timeout=3)
-                LogsManager.save_logs(logs_item)
-            except queue.Empty as e:  # If queue is empty, then break loop
-                break
-
-    def run_server_job_impl(self, process_event, completed_event, edge_id_status_queue=None,
-                            edge_device_info_queue=None, run_metrics_queue=None,
-                            run_event_queue=None, run_artifacts_queue=None, run_logs_queue=None,
-                            message_center_queue=None, edge_device_info_global_queue=None):
-        print(f"Server runner process id {os.getpid()}, run id {self.run_id}")
-
-        if platform.system() != "Windows":
-            os.setsid()
-
-        os.environ['PYTHONWARNINGS'] = 'ignore:semaphore_tracker:UserWarning'
-        os.environ.setdefault('PYTHONWARNINGS', 'ignore:semaphore_tracker:UserWarning')
-
-        self.run_process_event = process_event
-        self.run_process_completed_event = completed_event
-
-        MLOpsUtils.set_ntp_offset(self.ntp_offset)
-
-        self.rebuild_message_center(message_center_queue)
-
-        run_id = self.request_json["runId"]
-        run_config = self.request_json["run_config"]
-        data_config = run_config["data_config"]
-        edge_ids = self.request_json["edgeids"]
-
-        self.check_runner_stop_event()
-
-        # get training params
-        private_local_data_dir = data_config.get("privateLocalData", "")
-        is_using_local_data = 0
-        # if private_local_data_dir is not None and len(str(private_local_data_dir).strip(' ')) > 0:
-        #     is_using_local_data = 1
-
-        # start a run according to the hyper-parameters
-        # fedml_local_data_dir = self.cur_dir + "/fedml_data/run_" + run_id_str + "_edge_" + str(edge_id)
-        fedml_local_data_dir = os.path.join(self.cur_dir, "fedml_data")
-        fedml_local_config_dir = os.path.join(self.cur_dir, "fedml_config")
-        if is_using_local_data:
-            fedml_local_data_dir = private_local_data_dir
-        self.fedml_data_dir = self.fedml_data_local_package_dir
-
-        self.check_runner_stop_event()
-
-        logging.info("download packages and run the bootstrap script...")
-
-        # update local config with real time parameters from server and dynamically replace variables value
-        unzip_package_path, fedml_config_object = self.update_local_fedml_config(run_id, run_config)
-        if unzip_package_path is None or fedml_config_object is None:
-            logging.info("failed to update local fedml config.")
-            self.check_runner_stop_event()
-            self.cleanup_run_when_starting_failed()
-            self.send_training_stop_request_to_edges_when_exception(edge_ids, payload=self.start_request_json,
-                                                                    run_id=run_id)
-            return
-
-        logging.info("cleanup the previous aggregation process and check downloaded packages...")
-
-        entry_file_config = fedml_config_object["entry_config"]
-        dynamic_args_config = fedml_config_object["dynamic_args"]
-        entry_file = str(entry_file_config["entry_file"]).replace('\\', os.sep).replace('/', os.sep)
-        entry_file = os.path.basename(entry_file)
-        conf_file = entry_file_config["conf_file"]
-        conf_file = str(conf_file).replace('\\', os.sep).replace('/', os.sep)
-        ServerConstants.cleanup_learning_process(run_id)
-        self.check_runner_stop_event()
-        if not os.path.exists(unzip_package_path):
-            logging.info("failed to unzip file.")
-            self.check_runner_stop_event()
-            self.cleanup_run_when_starting_failed()
-            self.send_training_stop_request_to_edges_when_exception(edge_ids, payload=self.start_request_json,
-                                                                    run_id=run_id)
-            return
-        os.chdir(os.path.join(unzip_package_path, "fedml"))
-
-        self.check_runner_stop_event()
-
-        logging.info("starting the server user process...")
-
-        entry_file_full_path = os.path.join(unzip_package_path, "fedml", entry_file)
-        conf_file_full_path = os.path.join(unzip_package_path, "fedml", conf_file)
-        logging.info("                          ")
-        logging.info("                          ")
-        logging.info("====Your Run Logs Begin===")
-        process, is_launch_task, error_list = self.execute_job_task(entry_file_full_path, conf_file_full_path, run_id)
-        logging.info("====Your Run Logs End===")
-        logging.info("                        ")
-        logging.info("                        ")
-
-        ret_code, out, err = process.returncode, None, None
-        is_run_ok = sys_utils.is_runner_finished_normally(process.pid)
-        if is_launch_task:
-            is_run_ok = True
-        if error_list is not None and len(error_list) > 0:
-            is_run_ok = False
-        if ret_code is None or ret_code <= 0:
-            self.check_runner_stop_event()
-
-            if is_run_ok:
-                if out is not None:
-                    out_str = sys_utils.decode_our_err_result(out)
-                    if out_str != "":
-                        logging.info("{}".format(out_str))
-
-                self.mlops_metrics.report_server_id_status(
-                    run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_FINISHED, edge_id=self.edge_id,
-                    server_id=self.edge_id, server_agent_id=self.edge_id)
-
-                if is_launch_task:
-                    sys_utils.log_return_info(f"job {run_id}", 0)
-                else:
-                    sys_utils.log_return_info(entry_file, 0)
-        else:
-            is_run_ok = False
-
-        if not is_run_ok:
-            # If the run status is killed or finished, then return with the normal state.
-            current_job = FedMLServerDataInterface.get_instance().get_job_by_id(run_id)
-            if current_job is not None and (current_job.status == ServerConstants.MSG_MLOPS_SERVER_STATUS_FINISHED or
-                                            current_job.status == ServerConstants.MSG_MLOPS_SERVER_STATUS_KILLED):
-                return
-
-            self.check_runner_stop_event()
-
-            logging.error("failed to run the aggregation process...")
-
-            if err is not None:
-                err_str = sys_utils.decode_our_err_result(err)
-                if err_str != "":
-                    logging.error("{}".format(err_str))
-
-            if is_launch_task:
-                sys_utils.log_return_info(f"job {run_id}", ret_code)
-            else:
-                sys_utils.log_return_info(entry_file, ret_code)
-
-            self.send_training_stop_request_to_edges_when_exception(edge_ids, run_id=run_id)
-
-    def init_job_task(self, request_json):
-        run_id = request_json["runId"]
-        run_config = request_json["run_config"]
-        edge_ids = request_json["edgeids"]
-        run_params = run_config.get("parameters", {})
-        job_yaml = run_params.get("job_yaml", None)
-        server_id = request_json["server_id"]
-        if self.run_as_cloud_agent:
-            server_id = self.edge_id
-
-        self.setup_listeners_for_edge_status(run_id, edge_ids, server_id)
-        self.setup_listener_for_run_metrics(run_id)
-        self.setup_listener_for_run_logs(run_id)
-
-    def should_continue_run_job(self, run_id):
-        run_config = self.request_json["run_config"]
-        run_params = run_config.get("parameters", {})
-        job_yaml = run_params.get("job_yaml", {})
-        job_yaml_default_none = run_params.get("job_yaml", None)
-        framework_type = job_yaml.get("framework_type", None)
-        job_type = job_yaml.get("job_type", None)
-        job_type = job_yaml.get("task_type", Constants.JOB_TASK_TYPE_TRAIN) if job_type is None else job_type
-        if job_yaml_default_none is not None:
-            if job_type == Constants.JOB_TASK_TYPE_FEDERATE:
-                return True
-
-            if framework_type is None or framework_type != Constants.JOB_FRAMEWORK_TYPE_FEDML:
-                self.mlops_metrics.report_server_id_status(
-                    run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_RUNNING, edge_id=self.edge_id,
-                    server_id=self.edge_id, server_agent_id=self.edge_id)
-                return False
-
-        return True
-
-    def execute_job_task(self, entry_file_full_path, conf_file_full_path, run_id):
-        run_config = self.request_json["run_config"]
-        run_params = run_config.get("parameters", {})
-        job_yaml = run_params.get("job_yaml", {})
-        job_yaml_default_none = run_params.get("job_yaml", None)
-        job_api_key = job_yaml.get("run_api_key", None)
-        job_api_key = job_yaml.get("fedml_run_dynamic_params", None) if job_api_key is None else job_api_key
-        assigned_gpu_ids = run_params.get("gpu_ids", None)
-        framework_type = job_yaml.get("framework_type", None)
-        job_type = job_yaml.get("job_type", None)
-        job_type = job_yaml.get("task_type", Constants.JOB_TASK_TYPE_TRAIN) if job_type is None else job_type
-        conf_file_object = load_yaml_config(conf_file_full_path)
-        entry_args_dict = conf_file_object.get("fedml_entry_args", {})
-        entry_args = entry_args_dict.get("arg_items", None)
-
-        executable_interpreter = ClientConstants.CLIENT_SHELL_PS \
-            if platform.system() == ClientConstants.PLATFORM_WINDOWS else ClientConstants.CLIENT_SHELL_BASH
-
-        if job_yaml_default_none is None:
-            # Generate the job executing commands for previous federated learning (Compatibility)
-            python_program = get_python_program()
-            logging.info("Run the server: {} {} --cf {} --rank 0 --role server".format(
-                python_program, entry_file_full_path, conf_file_full_path))
-            entry_command = f"{python_program} {entry_file_full_path} --cf " \
-                            f"{conf_file_full_path} --rank 0 --role server"
-            shell_cmd_list = [entry_command]
-
-            # Run the job executing commands for previous federated learning (Compatibility)
-            process, error_list = ClientConstants.execute_commands_with_live_logs(
-                shell_cmd_list, callback=self.callback_start_fl_job, should_write_log_file=False)
-            is_launch_task = False
-        else:
-            self.check_runner_stop_event()
-
-            self.mlops_metrics.report_server_id_status(
-                run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_RUNNING, edge_id=self.edge_id,
-                server_id=self.edge_id, server_agent_id=self.edge_id)
-
-            # Generate the job executing commands
-            job_executing_commands = JobRunnerUtils.generate_job_execute_commands(
-                run_id=self.run_id, edge_id=self.edge_id, version=self.version, package_type=self.package_type,
-                executable_interpreter=executable_interpreter, entry_file_full_path=entry_file_full_path,
-                conf_file_object=conf_file_object, entry_args=entry_args, assigned_gpu_ids=assigned_gpu_ids,
-                job_api_key=job_api_key, client_rank=0)
-
-            # Run the job executing commands
-            logging.info(f"Run the server job with job id {self.run_id}, device id {self.edge_id}.")
-            process, error_list = ServerConstants.execute_commands_with_live_logs(
-                job_executing_commands, callback=self.start_job_perf, error_processor=self.job_error_processor)
-            is_launch_task = True
-
-        return process, is_launch_task, error_list
-
-    def callback_start_fl_job(self, job_pid):
-        ServerConstants.save_learning_process(self.run_id, job_pid)
-        self.mlops_metrics.report_sys_perf(
-            self.args, self.agent_config["mqtt_config"], job_process_id=job_pid)
-
-    def start_job_perf(self, job_pid):
-        ServerConstants.save_learning_process(self.run_id, job_pid)
-        self.mlops_metrics.report_job_perf(self.args, self.agent_config["mqtt_config"], job_pid)
-
-    def job_error_processor(self, error_list):
-        self.check_runner_stop_event()
-
-        error_str = "\n".join(error_list)
-        raise Exception(f"Error occurs when running the job... {error_str}")
-
-    def process_job_status(self, run_id, edge_id, status):
-        number_of_failed_edges = 0
-        number_of_finished_edges = 0
-        number_of_killed_edges = 0
-        edge_id_status_dict = self.client_agent_active_list.get(f"{run_id}", {})
-        server_id = edge_id_status_dict.get("server", 0)
-        enable_fault_tolerance, fault_tolerance_rate = self.parse_fault_tolerance_params(run_id)
-        running_edges_list = list()
-        for edge_id_item, status_item in edge_id_status_dict.items():
-            if edge_id_item == "server":
-                continue
-
-            if status_item is None or status_item == ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED or \
-                    status_item == ServerConstants.MSG_MLOPS_SERVER_STATUS_EXCEPTION:
-                number_of_failed_edges += 1
-                continue
-
-            if status_item == ServerConstants.MSG_MLOPS_SERVER_STATUS_FINISHED:
-                number_of_finished_edges += 1
-                continue
-
-            if status_item == ServerConstants.MSG_MLOPS_SERVER_STATUS_KILLED:
-                number_of_killed_edges += 1
-                continue
-
-            if status_item == ServerConstants.MSG_MLOPS_SERVER_STATUS_IDLE or \
-                    status_item == ServerConstants.MSG_MLOPS_SERVER_STATUS_OFFLINE:
-                continue
-
-            running_edges_list.append(edge_id_item)
-
-        # Report client status
-        edge_status = ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED if status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_EXCEPTION else status
-        self.mlops_metrics.report_client_training_status(edge_id, edge_status, run_id=run_id)
-        self.mlops_metrics.report_client_device_status_to_web_ui(edge_id, edge_status, run_id=run_id)
-
-        # Report server status based on the fault tolerance model and parameters
-        edge_nums = len(edge_id_status_dict.keys()) - 1
-        status_to_report = self.calculate_server_status(
-            run_id, edge_nums, number_of_failed_edges, number_of_finished_edges, number_of_killed_edges,
-            running_edges_list, enable_fault_tolerance=enable_fault_tolerance,
-            fault_tolerance_rate=fault_tolerance_rate)
-        if status_to_report is not None:
-            logging.info(f"Run completed when processing edge status, will report status {status_to_report}")
-            self.report_server_status(run_id, server_id, status_to_report)
-
-    def calculate_server_status(
-            self, run_id, total_edge_nums, number_of_failed_edges, number_of_finished_edges,
-            number_of_killed_edges, running_edges_list, enable_fault_tolerance=False,
-            fault_tolerance_rate=0.8
-    ):
-        # Report server status based on the fault tolerance model and parameters
-        actual_failed_rate = number_of_failed_edges / total_edge_nums
-        all_edges_run_completed = True if len(running_edges_list) <= 0 else False
-        if all_edges_run_completed:
-            status_to_report = None
-            if enable_fault_tolerance:
-                if actual_failed_rate >= fault_tolerance_rate:
-                    status_to_report = ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED
-                    self.send_training_stop_request_to_edges_when_exception(
-                        running_edges_list, run_id=run_id, status=status_to_report)
-                    return status_to_report
-                else:
-                    if number_of_killed_edges == total_edge_nums:
-                        status_to_report = ServerConstants.MSG_MLOPS_SERVER_STATUS_KILLED
-                    else:
-                        status_to_report = ServerConstants.MSG_MLOPS_SERVER_STATUS_FINISHED
-            else:
-                if number_of_failed_edges > 0:
-                    status_to_report = ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED
-                elif number_of_finished_edges == total_edge_nums:
-                    status_to_report = ServerConstants.MSG_MLOPS_SERVER_STATUS_FINISHED
-                elif number_of_killed_edges == total_edge_nums:
-                    status_to_report = ServerConstants.MSG_MLOPS_SERVER_STATUS_KILLED
-
-            return status_to_report
-
-    def parse_fault_tolerance_params(self, run_id):
-        run_json = self.running_request_json.get(str(run_id), None)
-        if run_json is None:
-            run_json = self.request_json
-        run_config = run_json.get("run_config", {})
-        run_params = run_config.get("parameters", {})
-        common_args = run_params.get("common_args", {})
-        enable_fault_tolerance = common_args.get("enable_fault_tolerance", False)
-        fault_tolerance_rate = common_args.get("fault_tolerance_rate", 0)
-        return enable_fault_tolerance, fault_tolerance_rate
-
-    def report_server_status(self, run_id, server_id, status):
-        self.mlops_metrics.report_server_id_status(run_id, status, edge_id=self.edge_id,
-                                                   server_id=server_id, server_agent_id=self.edge_id)
-
-    def stop_run_when_starting_failed(self):
-        edge_id_list = self.request_json["edgeids"]
-        run_id = self.request_json.get("run_id", 0)
-        logging.error("edge ids {}".format(str(edge_id_list)))
-
-        payload = self.running_request_json.get(str(run_id))
-        if payload is not None:
-            self.send_training_stop_request_to_edges(edge_id_list, payload=json.dumps(payload), run_id=run_id)
-
-        # logging.info("Stop run successfully when starting failed.")
-
-        self.mlops_metrics.report_server_id_status(
-            self.run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED, edge_id=self.edge_id,
-            server_id=self.edge_id, server_agent_id=self.edge_id)
-
-    def cleanup_run_when_finished(self, should_send_server_id_status=True):
-        # logging.info("Cleanup run successfully when finished.")
-
-        self.mlops_metrics.report_server_training_status(
-            self.run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_FINISHED, edge_id=self.edge_id
-        )
-
-        if should_send_server_id_status:
-            self.mlops_metrics.report_server_id_status(
-                self.run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_FINISHED, edge_id=self.edge_id,
-                server_id=self.edge_id, server_agent_id=self.edge_id)
-
-        try:
-            self.mlops_metrics.stop_sys_perf()
-        except Exception as ex:
-            pass
-
-        time.sleep(1)
-
-        ServerConstants.cleanup_learning_process(self.run_id)
-        ServerConstants.cleanup_bootstrap_process(self.run_id)
-
-        try:
-            local_package_path = ServerConstants.get_package_download_dir()
-            for package_file in listdir(local_package_path):
-                if os.path.basename(package_file).startswith("run_" + str(self.run_id)):
-                    shutil.rmtree(os.path.join(local_package_path, package_file), ignore_errors=True)
-        except Exception as e:
-            pass
-
-    def cleanup_run_when_starting_failed(
-            self, status=ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED, should_send_server_id_status=True):
-        # logging.info("Cleanup run successfully when starting failed.")
-
-        self.mlops_metrics.report_server_training_status(
-            self.run_id, status, edge_id=self.edge_id)
-
-        if should_send_server_id_status:
-            self.mlops_metrics.report_server_id_status(
-                self.run_id, status, edge_id=self.edge_id,
-                server_id=self.edge_id, server_agent_id=self.edge_id)
-
-        try:
-            self.mlops_metrics.stop_sys_perf()
-        except Exception as ex:
-            pass
-
-        time.sleep(1)
-
-        ServerConstants.cleanup_learning_process(self.run_id)
-        ServerConstants.cleanup_bootstrap_process(self.run_id)
-
-        try:
-            local_package_path = ServerConstants.get_package_download_dir()
-            for package_file in listdir(local_package_path):
-                if os.path.basename(package_file).startswith("run_" + str(self.run_id)):
-                    shutil.rmtree(os.path.join(local_package_path, package_file), ignore_errors=True)
-        except Exception as e:
-            pass
-
-    def should_process_async_cluster(self):
-        run_config = self.request_json.get("run_config", {})
-        run_params = run_config.get("parameters", {})
-        common_args = run_params.get("common_args", {})
-        self.enable_async_cluster = common_args.get("enable_async_cluster", False)
-        self.async_check_timeout = common_args.get("async_check_timeout", 0)
-        if self.enable_async_cluster:
-            return True, self.async_check_timeout
-
-        return False, self.async_check_timeout
-
-    @debug
-    def detect_edges_status(
-            self, edge_device_info_queue, edge_device_info_global_queue=None, callback_when_edges_ready=None, status_timeout=None,
-            need_to_trigger_exception=True, status_check_context=None, given_edge_ids=None,
-            callback_when_detecting=None, args_for_callback_when_detecting=None
-    ):
-        run_id = self.request_json["runId"]
-        run_id_str = str(run_id)
-        edge_id_list = self.request_json["edgeids"]
-        if given_edge_ids is not None:
-            edge_id_list = given_edge_ids
-
-        # Init realtime status of all edges
-        run_edges_realtime_status = dict()
-        run_edges_realtime_status[run_id_str] = dict()
-
-        edge_info_global_dict = dict()
-        if edge_device_info_global_queue is not None:
-            for edge_info_global in edge_device_info_global_queue:
-                edge_info_id = edge_info_global.get("edge_id")
-                edge_info_global_dict[edge_info_id] = edge_info_global
-
-        # Send status message to all edges
-        allowed_cache_edge_status_time = 60
-        for edge_id in edge_id_list:
-            # Check if the edge status was filled allowed_cache_edge_status_time seconds ago,
-            # if so no more checking message would be sent.
-            edge_info = edge_info_global_dict.get(edge_id, None)
-            if edge_info is not None:
-                timestamp = edge_info.get("timestamp", None)
-                time_interval = time.time() - timestamp
-                if time_interval <= allowed_cache_edge_status_time:
-                    continue
-
-            self.send_status_check_msg(run_id, edge_id, self.edge_id, context=status_check_context)
-        time.sleep(3)
-
-        total_sleep_seconds = 0
-        status_check_sleep_seconds = 10
-        allowed_status_check_sleep_seconds = 60 * 2 if status_timeout is None else status_timeout
-        allowed_status_check_sleep_seconds_for_async = 30
-        inactivate_edges = list()
-        active_edge_info_dict = dict()
-        while True:
-            if callback_when_detecting is not None:
-                callback_when_detecting(args_for_callback_when_detecting)
-
-            # Fetch edge info from the edge status queue, which will be added to realtime status map
-            while True:
-                self.check_runner_stop_event()
-
-                try:
-                    edge_info = edge_device_info_queue.get(block=False, timeout=1)
-                    if edge_info is not None:
-                        edge_id = edge_info.get("edge_id", None)
-                        if edge_id is not None:
-                            run_edges_realtime_status[run_id_str][edge_id] = edge_info
-                except queue.Empty as e:  # If queue is empty, then break loop
-                    break
-
-            self.check_runner_stop_event()
-
-            # Check all edges which don't send response status successfully
-            # and retry to send the status checking message.
-            active_edges_count = 0
-            inactivate_edges.clear()
-            active_edge_info_dict.clear()
-            for edge_id in edge_id_list:
-                edge_info_dict = run_edges_realtime_status.get(run_id_str, {})
-                edge_info = edge_info_dict.get(edge_id, None)
-                edge_info = edge_info_dict.get(str(edge_id), None) if edge_info is None else edge_info
-                if edge_info is not None:
-                    active_edges_count += 1
-                    active_edge_info_dict[str(edge_id)] = edge_info
-                else:
-                    # Check if the edge status was filled allowed_cache_edge_status_time seconds ago,
-                    # if so no more checking message would be sent.
-                    edge_info = edge_info_global_dict.get(edge_id, None)
-                    if edge_info is not None:
-                        timestamp = edge_info.get("timestamp", None)
-                        time_interval = time.time() - timestamp
-                        if time_interval <= allowed_cache_edge_status_time:
-                            active_edges_count += 1
-                            active_edge_info_dict[str(edge_id)] = edge_info
-                            continue
-
-                    inactivate_edges.append(edge_id)
-                    self.send_status_check_msg(run_id, edge_id, self.edge_id, context=status_check_context)
-
-            # If all edges are ready then send the starting job message to them
-            if active_edges_count == len(edge_id_list):
-                logging.info(f"All edges are ready. Active edge id list is as follows. {active_edge_info_dict}")
-                if callback_when_edges_ready is not None:
-                    logging.info("All edges are ready. Start to process the callback function.")
-                    callback_when_edges_ready(active_edge_info_dict=active_edge_info_dict)
-                else:
-                    logging.info("All edges are ready. No callback function to process.")
-                break
-            else:
-                logging.info(f"All edges are not ready. Active edge id list: {active_edge_info_dict}, "
-                             f"Inactive edge id list: {inactivate_edges}")
-
-            # Check if runner needs to stop and sleep specific time
-            self.check_runner_stop_event()
-            time.sleep(status_check_sleep_seconds)
-            total_sleep_seconds += status_check_sleep_seconds
-
-            # Check if the status response message has timed out to receive
-            if total_sleep_seconds >= allowed_status_check_sleep_seconds:
-                # If so, send failed message to MLOps and send exception message to all edges.
-                logging.error(f"There are inactive edge devices. "
-                              f"Inactivate edge id list is as follows. {inactivate_edges}")
-                if need_to_trigger_exception:
-                    self.mlops_metrics.report_server_id_status(
-                        run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED, edge_id=self.edge_id,
-                        server_id=self.edge_id, server_agent_id=self.server_agent_id)
-                    self.send_training_stop_request_to_edges_when_exception(edge_id_list,
-                                                                            payload=json.dumps(self.request_json),
-                                                                            run_id=run_id)
-                return False, active_edge_info_dict, inactivate_edges
-
-            # If we enable the mode for async cluster, then sleep some time and send messages to all clients.
-            if callback_when_edges_ready is not None:
-                should_async, async_timeout = self.should_process_async_cluster()
-                if should_async and total_sleep_seconds >= allowed_status_check_sleep_seconds_for_async:
-                    if async_timeout > allowed_status_check_sleep_seconds_for_async:
-                        time.sleep(async_timeout - allowed_status_check_sleep_seconds_for_async)
-                    self.send_training_request_to_edges()
-                    return True, active_edge_info_dict, inactivate_edges
-
-        return True, active_edge_info_dict, inactivate_edges
-
-    def send_status_check_msg(self, run_id, edge_id, server_id, context=None):
-        topic_get_model_device_id = "server/client/request_device_info/" + str(edge_id)
-        payload = {"server_id": server_id, "run_id": run_id}
-        if context is not None:
-            payload["context"] = context
-        self.message_center.send_message(topic_get_model_device_id, json.dumps(payload))
-
-    @debug
-    def send_training_request_to_edges(self, active_edge_info_dict=None):
-        run_id = self.request_json["runId"]
-        edge_id_list = self.request_json["edgeids"]
-        run_config = self.request_json.get("run_config", {})
-        run_params = run_config.get("parameters", {})
-        job_yaml = run_params.get("job_yaml", {})
-        job_yaml_default_none = run_params.get("job_yaml", None)
-        computing = job_yaml.get("computing", {})
-        request_num_gpus = computing.get("minimum_num_gpus", None)
-        job_gpu_id_list = self.request_json.get("job_gpu_id_list", None)
-
-        logging.info("Send training request to Edge ids: " + str(edge_id_list))
-
-        should_match_gpu = False
-        if job_yaml_default_none is not None and request_num_gpus is not None and \
-                int(request_num_gpus) > 0 and active_edge_info_dict is not None:
-            should_match_gpu = True
-            SchedulerMatcher.parse_and_print_gpu_info_for_all_edges(active_edge_info_dict, show_gpu_list=True)
-
-            # Match and assign gpus to each device
-            assigned_gpu_num_dict, assigned_gpu_ids_dict = SchedulerMatcher.match_and_assign_gpu_resources_to_devices(
-                request_num_gpus, edge_id_list, active_edge_info_dict, job_gpu_id_list=job_gpu_id_list)
-            if assigned_gpu_num_dict is None or assigned_gpu_ids_dict is None:
-                # If no resources available, send failed message to MLOps and send exception message to all edges.
-                gpu_count, gpu_available_count = SchedulerMatcher.parse_and_print_gpu_info_for_all_edges(
-                    active_edge_info_dict, should_print=True)
-                err_info = f"No resources available." \
-                           f"Total available GPU count {gpu_available_count} is less than " \
-                           f"request GPU count {request_num_gpus}"
-                logging.error(err_info)
-
-                # Bug fix: This mqtt message needs to be sent so platform can clean up the failed run and change the
-                # status from running to failed.
-                self.mlops_metrics.report_server_training_status(
-                    run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED, edge_id=self.edge_id
-                )
-
-                self.mlops_metrics.report_server_id_status(
-                    run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED, edge_id=self.edge_id,
-                    server_id=self.edge_id, server_agent_id=self.server_agent_id)
-                self.send_training_stop_request_to_edges_when_exception(edge_id_list,
-                                                                        payload=json.dumps(self.request_json),
-                                                                        run_id=run_id)
-
-                serving_args = job_yaml.get("serving_args", {})
-                endpoint_id = serving_args.get("endpoint_id", None)
-                if endpoint_id is not None:
-                    fedml.mlops.log_endpoint_status(
-                        endpoint_id, device_client_constants.ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED)
-                    fedml.mlops.log_run_log_lines(
-                        endpoint_id, 0, [err_info],
-                        log_source=device_client_constants.ClientConstants.FEDML_LOG_SOURCE_TYPE_MODEL_END_POINT
-                    )
-                return
-
-            # Generate master node addr and port
-            master_node_addr, master_node_port = SchedulerMatcher.get_master_node_info(edge_id_list,
-                                                                                       active_edge_info_dict)
-
-            # Generate new edge id list after matched
-            edge_id_list = SchedulerMatcher.generate_new_edge_list_for_gpu_matching(assigned_gpu_num_dict)
-            if len(edge_id_list) <= 0:
-                gpu_count, gpu_available_count = SchedulerMatcher.parse_and_print_gpu_info_for_all_edges(
-                    active_edge_info_dict, should_print=True)
-                logging.error(f"Request parameter for GPU num is invalid."
-                              f"Total available GPU count {gpu_available_count}."
-                              f"Request GPU num {request_num_gpus}")
-                self.mlops_metrics.report_server_id_status(
-                    run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED, edge_id=self.edge_id,
-                    server_id=self.edge_id, server_agent_id=self.server_agent_id)
-                self.send_training_stop_request_to_edges_when_exception(edge_id_list,
-                                                                        payload=json.dumps(self.request_json),
-                                                                        run_id=run_id)
-                return
-
-        if should_match_gpu:
-            # Report gpu num and related infos to MLOps.
-            serving_args = job_yaml.get("serving_args", {})
-            endpoint_id = serving_args.get("endpoint_id", None)
-            if endpoint_id is not None:
-                endpoint_info = list()
-                for edge_id_item, gpu_num in assigned_gpu_num_dict.items():
-                    edge_info = active_edge_info_dict.get(str(edge_id_item), {})
-                    endpoint_info.append({
-                        "machine_id": edge_id_item, "endpoint_gpu_count": gpu_num,
-                        "master_deploy_id": edge_info.get("master_device_id", 0),
-                        "slave_deploy_id": edge_info.get("slave_device_id", 0)})
-                topic_name = f"compute/mlops/endpoint"
-                endpoint_info_json = {"endpoint_id": endpoint_id, "endpoint_info": endpoint_info}
-                print(f"endpoint_info_json {endpoint_info_json}")
-                self.message_center.send_message(topic_name, json.dumps(endpoint_info_json))
-
-        client_rank = 1
-        for edge_id in edge_id_list:
-            topic_start_train = "flserver_agent/" + str(edge_id) + "/start_train"
-            logging.info("start_train: send topic " + topic_start_train + " to client...")
-            request_json = self.request_json
-            request_json["client_rank"] = client_rank
-            client_rank += 1
-
-            if active_edge_info_dict is not None:
-                edge_info = active_edge_info_dict.get(str(edge_id), {})
-                model_master_device_id = edge_info.get("master_device_id", None)
-                model_slave_device_id = edge_info.get("slave_device_id", None)
-                model_slave_device_id_list = edge_info.get("slave_device_id_list", None)
-
-                if should_match_gpu:
-                    request_json["scheduler_match_info"] = SchedulerMatcher.generate_match_info_for_scheduler(
-                        edge_id, edge_id_list, master_node_addr, master_node_port,
-                        assigned_gpu_num_dict, assigned_gpu_ids_dict,
-                        model_master_device_id=model_master_device_id,
-                        model_slave_device_id=model_slave_device_id,
-                        model_slave_device_id_list=model_slave_device_id_list
-                    )
-
-            self.message_center.send_message(topic_start_train, json.dumps(request_json))
-
-    def setup_listeners_for_edge_status(self, run_id, edge_ids, server_id):
-        self.client_agent_active_list[f"{run_id}"] = dict()
-        self.client_agent_active_list[f"{run_id}"][f"server"] = server_id
-        for edge_id in edge_ids:
-            self.client_agent_active_list[f"{run_id}"][f"{edge_id}"] = ServerConstants.MSG_MLOPS_SERVER_STATUS_IDLE
-            edge_status_topic = "fl_client/flclient_agent_" + str(edge_id) + "/status"
-            self.add_message_listener(edge_status_topic, self.callback_edge_status)
-            self.subscribe_msg(edge_status_topic)
-
-    def remove_listeners_for_edge_status(self, edge_ids=None):
-        if edge_ids is None:
-            edge_ids = self.request_json["edgeids"]
-
-        for edge_id in edge_ids:
-            edge_status_topic = "fl_client/flclient_agent_" + str(edge_id) + "/status"
-            self.unsubscribe_msg(edge_status_topic)
-
-    def setup_listener_for_run_metrics(self, run_id):
-        metric_topic = f"fedml_slave/fedml_master/metrics/{run_id}"
-        self.add_message_listener(metric_topic, self.callback_run_metrics)
-        self.subscribe_msg(metric_topic)
-
-    def remove_listener_for_run_metrics(self, run_id):
-        metric_topic = f"fedml_slave/fedml_master/metrics/{run_id}"
-        self.unsubscribe_msg(metric_topic)
-
-    def setup_listener_for_run_logs(self, run_id):
-        logs_topic = f"fedml_slave/fedml_master/logs/{run_id}"
-        self.add_message_listener(logs_topic, self.callback_run_logs)
-        self.subscribe_msg(logs_topic)
-
-    def remove_listener_for_run_logs(self, run_id):
-        logs_topic = f"fedml_slave/fedml_master/logs/{run_id}"
-        self.unsubscribe_msg(logs_topic)
-
-    def callback_run_logs(self, topic, payload):
-        run_id = str(topic).split('/')[-1]
-        run_id_str = str(run_id)
-        if self.run_logs_queue_map.get(run_id_str) is None:
-            self.run_logs_queue_map[run_id_str] = Queue()
-        self.run_logs_queue_map[run_id_str].put(payload)
-
-    def callback_run_metrics(self, topic, payload):
-        print(f"callback_run_metrics topic {topic}, payload {payload}")
-        run_id = str(topic).split('/')[-1]
-        run_id_str = str(run_id)
-        if self.run_metrics_queue_map.get(run_id_str) is None:
-            self.run_metrics_queue_map[run_id_str] = Queue()
-        self.run_metrics_queue_map[run_id_str].put(payload)
-
-    def callback_edge_status(self, topic, payload):
-        payload_json = json.loads(payload)
-        run_id = payload_json.get("run_id", None)
-        edge_id = payload_json.get("edge_id", None)
-        status = payload_json.get("status", None)
-        if run_id is not None and edge_id is not None:
-            active_item_dict = self.client_agent_active_list.get(f"{run_id}", None)
-            if active_item_dict is None:
-                return
-            self.client_agent_active_list[f"{run_id}"][f"{edge_id}"] = status
-
-            if self.run_edge_id_status_queue_map.get(f"{run_id}") is None:
-                self.run_edge_id_status_queue_map[f"{run_id}"] = Queue()
-            self.run_edge_id_status_queue_map[f"{run_id}"].put(self.client_agent_active_list[f"{run_id}"])
-
-            self.process_job_status(run_id, edge_id, status)
-
-    def ota_upgrade(self, payload, request_json):
-        run_id = request_json["runId"]
-        force_ota = False
-        ota_version = None
-
-        try:
-            run_config = request_json.get("run_config", None)
-            parameters = run_config.get("parameters", None)
-            common_args = parameters.get("common_args", None)
-            force_ota = common_args.get("force_ota", False)
-            ota_version = common_args.get("ota_version", None)
-        except Exception as e:
-            pass
-
-        if force_ota and ota_version is not None:
-            should_upgrade = True if ota_version != fedml.__version__ else False
-            upgrade_version = ota_version
-        else:
-            try:
-                fedml_is_latest_version, local_ver, remote_ver = sys_utils.check_fedml_is_latest_version(self.version)
-            except Exception as e:
-                return
-
-            should_upgrade = False if fedml_is_latest_version else True
-            upgrade_version = remote_ver
-
-        if should_upgrade:
-            job_obj = FedMLServerDataInterface.get_instance().get_job_by_id(run_id)
-            if job_obj is None:
-                FedMLServerDataInterface.get_instance(). \
-                    save_started_job(run_id, self.edge_id, time.time(),
-                                     ServerConstants.MSG_MLOPS_SERVER_STATUS_UPGRADING,
-                                     ServerConstants.MSG_MLOPS_SERVER_STATUS_UPGRADING,
-                                     payload)
-
-                self.mlops_metrics.report_server_id_status(
-                    run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_UPGRADING, edge_id=self.edge_id,
-                    server_id=self.edge_id, server_agent_id=self.edge_id)
-            logging.info(f"Upgrade to version {upgrade_version} ...")
-
-            sys_utils.do_upgrade(self.version, upgrade_version)
-
-            raise Exception("Restarting after upgraded...")
-
-    def callback_start_train(self, topic=None, payload=None):
-        print("callback_start_train: ")
-        try:
-            MLOpsConfigs.fetch_all_configs()
-        except Exception as e:
-            pass
-
-        # get training params
-        if self.run_as_cloud_server:
-            message_bytes = payload.encode("ascii")
-            base64_bytes = base64.b64decode(message_bytes)
-            payload = base64_bytes.decode("ascii")
-
-        # [NOTES] Example Request JSON: https://fedml-inc.larksuite.com/wiki/ScnIwUif9iupbjkYS0LuBrd6sod#WjbEdhYrvogmlGxKTOGu98C6sSb
-        request_json = json.loads(payload)
-        is_retain = request_json.get("is_retain", False)
-        if is_retain:
-            return
-
-        # Process the log
-        run_id = request_json["runId"]
-        run_id_str = str(run_id)
-        if self.run_as_edge_server_and_agent or self.enable_simulation_cloud_agent:
-            # Start log processor for current run
-            self.args.run_id = run_id
-            self.args.edge_id = self.edge_id
-            MLOpsRuntimeLog.get_instance(self.args).init_logs(log_level=logging.INFO)
-            MLOpsRuntimeLogDaemon.get_instance(self.args).start_log_processor(
-                run_id, self.edge_id, SchedulerConstants.get_log_source(request_json))
-            logging.info("start the log processor.")
-        elif self.run_as_cloud_agent:
-            # Start log processor for current run
-            MLOpsRuntimeLogDaemon.get_instance(self.args).start_log_processor(
-                run_id, request_json.get("server_id", "0"), SchedulerConstants.get_log_source(request_json)
-            )
-        elif self.run_as_cloud_server:
-            self.server_agent_id = request_json.get("cloud_agent_id", self.edge_id)
-            run_id = request_json["runId"]
-            run_id_str = str(run_id)
-
-            # Start log processor for current run
-            self.args.run_id = run_id
-            MLOpsRuntimeLogDaemon.get_instance(self.args).start_log_processor(
-                run_id, self.edge_id, SchedulerConstants.get_log_source(request_json))
-
-        logging.info("callback_start_train payload: {}".format(payload))
-        logging.info(
-            f"FedMLDebug - Receive: topic ({topic}), payload ({payload})"
-        )
-
-        if not self.run_as_cloud_agent and not self.run_as_cloud_server:
-            self.ota_upgrade(payload, request_json)
-
-        self.start_request_json = payload
-        self.run_id = run_id
-        ServerConstants.save_runner_infos(self.args.device_id + "." + self.args.os_name, self.edge_id, run_id=run_id)
-
-        # Start server with multiprocessing mode
-        self.request_json = request_json
-        self.running_request_json[run_id_str] = request_json
-        edge_id_list = request_json.get("edgeids", list())
-        self.run_edge_ids[run_id_str] = edge_id_list
-
-        logging.info("subscribe the client exception message.")
-
-        if self.run_as_edge_server_and_agent or self.enable_simulation_cloud_agent:
-            self.init_job_task(request_json)
-
-            self.args.run_id = run_id
-
-            self._start_runner_process(run_id, request_json)
-
-            ServerConstants.save_run_process(run_id, self.run_process_map[run_id_str].pid)
-        elif self.run_as_cloud_agent:
-            self.init_job_task(request_json)
-
-            server_runner = FedMLServerRunner(
-                self.args, run_id=run_id, request_json=request_json, agent_config=self.agent_config
-            )
-            server_runner.run_as_cloud_agent = self.run_as_cloud_agent
-            server_runner.start_request_json = json.dumps(request_json)
-            self.run_process_event_map[run_id_str] = multiprocessing.Event()
-            self.run_process_event_map[run_id_str].clear()
-            server_runner.run_process_event = self.run_process_event_map[run_id_str]
-
-            if not self.use_local_process_as_cloud_server:
-                self.run_process_map[run_id_str] = Process(target=server_runner.start_cloud_server_process_entry)
-                self.run_process_map[run_id_str].start()
-            else:
-                message_bytes = json.dumps(self.request_json).encode("ascii")
-                base64_bytes = base64.b64encode(message_bytes)
-                runner_cmd_encoded = base64_bytes.decode("ascii")
-                logging.info("runner_cmd_encoded: {}".format(runner_cmd_encoded))
-
-                cloud_device_id = request_json.get("cloudServerDeviceId", "0")
-
-                self.run_process_map[run_id_str] = Process(
-                    target=FedMLServerRunner.start_local_cloud_server,
-                    args=(run_id_str, self.args.user, self.version, cloud_device_id, runner_cmd_encoded))
-                self.run_process_map[run_id_str].start()
-                time.sleep(1)
-
-            ServerConstants.save_run_process(run_id, self.run_process_map[run_id_str].pid)
-        elif self.run_as_cloud_server:
-            self.server_agent_id = request_json.get("cloud_agent_id", self.edge_id)
-            self.start_request_json = json.dumps(request_json)
-            run_id = request_json["runId"]
-            run_id_str = str(run_id)
-
-            self.init_job_task(request_json)
-
-            self.args.run_id = run_id
-
-            self._start_runner_process(run_id, request_json)
-            # ServerConstants.save_run_process(run_id, self.run_process_map[run_id_str].pid)
-
-    @staticmethod
-    def start_local_cloud_server(run_id, user, version, cloud_device_id, runner_cmd_encoded):
-        print(f"start cloud server, device id {cloud_device_id}, runner cmd {runner_cmd_encoded}")
-        if not FedMLServerRunner.debug_cloud_server:
-            pip_source_dir = os.path.dirname(__file__)
-            login_cmd = os.path.join(pip_source_dir, "server_login.py")
-            run_cmd = f"{get_python_program()} -W ignore {login_cmd} -t login -r cloud_server -u {str(user)} " \
-                      f"-v {version} -id {cloud_device_id} -rc {runner_cmd_encoded}"
-            os.system(run_cmd)
-
-    def _start_runner_process(self, run_id, request_json, is_server_job=False):
-        server_runner = FedMLServerRunner(
-            self.args, run_id=run_id, request_json=request_json, agent_config=self.agent_config
-        )
-        run_id_str = str(run_id)
-        server_runner.run_as_edge_server_and_agent = self.run_as_edge_server_and_agent
-        server_runner.edge_id = self.edge_id
-        server_runner.server_agent_id = self.server_agent_id
-        server_runner.start_request_json = json.dumps(request_json)
-        self.run_process_event_map[run_id_str] = multiprocessing.Event()
-        self.run_process_event_map[run_id_str].clear()
-        server_runner.run_process_event = self.run_process_event_map[run_id_str]
-        self.run_process_completed_event_map[run_id_str] = multiprocessing.Event()
-        self.run_process_completed_event_map[run_id_str].clear()
-        server_runner.run_process_completed_event = self.run_process_completed_event_map[run_id_str]
-        if self.run_edge_id_status_queue_map.get(run_id_str, None) is None:
-            self.run_edge_id_status_queue_map[run_id_str] = Queue()
-        if self.run_edge_device_info_queue_map.get(run_id_str, None) is None:
-            self.run_edge_device_info_queue_map[run_id_str] = Queue()
-        if self.run_metrics_queue_map.get(run_id_str, None) is None:
-            self.run_metrics_queue_map[run_id_str] = Queue()
-        if self.run_events_queue_map.get(run_id_str, None) is None:
-            self.run_events_queue_map[run_id_str] = Queue()
-        if self.run_artifacts_queue_map.get(run_id_str, None) is None:
-            self.run_artifacts_queue_map[run_id_str] = Queue()
-        if self.run_logs_queue_map.get(run_id_str, None) is None:
-            self.run_logs_queue_map[run_id_str] = Queue()
-        # if self.run_edge_device_info_global_queue is None:
-        #     self.run_edge_device_info_global_queue = Array('i', list())
-        server_runner.edge_id_status_queue = self.run_edge_id_status_queue_map[run_id_str]
-        server_runner.edge_device_info_queue = self.run_edge_device_info_queue_map[run_id_str]
-        self.run_process_map[run_id_str] = Process(
-            target=server_runner.run if not is_server_job else server_runner.run_server_job_impl, args=(
-                self.run_process_event_map[run_id_str], self.run_process_completed_event_map[run_id_str],
-                self.run_edge_id_status_queue_map[run_id_str], self.run_edge_device_info_queue_map[run_id_str],
-                self.run_metrics_queue_map[run_id_str], self.run_events_queue_map[run_id_str],
-                self.run_artifacts_queue_map[run_id_str], self.run_logs_queue_map[run_id_str],
-                self.message_center.get_message_queue(),
-                self.run_edge_device_info_global_queue
-            )
-        )
-        self.run_process_map[run_id_str].start()
-        ServerConstants.save_run_process(run_id, self.run_process_map[run_id_str].pid)
-
-    def start_cloud_server_process_entry(self):
-        try:
-            self.start_cloud_server_process()
-        except Exception as e:
-            pass
-
-    def start_cloud_server_process(self):
-        run_config = self.request_json["run_config"]
-        packages_config = run_config["packages_config"]
-        self.start_cloud_server(packages_config)
-
-    def start_cloud_server(self, packages_config):
-        server_id = self.request_json["server_id"]
-        self.cloud_server_name = FedMLServerRunner.FEDML_CLOUD_SERVER_PREFIX + str(self.run_id) + "-" + str(server_id)
-        self.server_docker_image = (
-                self.agent_config["docker_config"]["registry_server"]
-                + self.agent_config["docker_config"]["registry_dir"]
-                + self.server_docker_base_image
-        )
-
-        logging.info("docker image {}".format(self.server_docker_image))
-        # logging.info("file_sys_driver {}".format(self.agent_config["docker_config"]["file_sys_driver"]))
-
-        registry_secret_cmd = (
-                "kubectl create namespace fedml-devops-aggregator-"
-                + self.version
-                + ";kubectl -n fedml-devops-aggregator-"
-                + self.version
-                + " delete secret secret-"
-                + self.cloud_server_name
-                + " ;kubectl create secret docker-registry secret-"
-                + self.cloud_server_name
-                + " --docker-server="
-                + self.agent_config["docker_config"]["registry_server"]
-                + " --docker-username="
-                + self.agent_config["docker_config"]["user_name"]
-                + " --docker-password=$(aws ecr-public get-login-password --region "
-                + self.agent_config["docker_config"]["public_cloud_region"]
-                + ")"
-                + " --docker-email=fedml@fedml.ai -n fedml-devops-aggregator-"
-                + self.version
-        )
-        logging.info("Create secret cmd: " + registry_secret_cmd)
-        os.system(registry_secret_cmd)
-
-        message_bytes = json.dumps(self.request_json).encode("ascii")
-        base64_bytes = base64.b64encode(message_bytes)
-        runner_cmd_encoded = base64_bytes.decode("ascii")
-        logging.info("runner_cmd_encoded: {}".format(runner_cmd_encoded))
-        # logging.info("runner_cmd_decoded: {}".format(base64.b64decode(runner_cmd_encoded).decode()))
-        cur_dir = os.path.dirname(__file__)
-        run_deployment_cmd = (
-                "export FEDML_AGGREGATOR_NAME="
-                + self.cloud_server_name
-                + ";export FEDML_AGGREGATOR_SVC="
-                + self.cloud_server_name
-                + ";export FEDML_AGGREGATOR_VERSION="
-                + self.version
-                + ';export FEDML_AGGREGATOR_IMAGE_PATH="'
-                + self.server_docker_image
-                + '"'
-                + ";export FEDML_CONF_ID="
-                + self.cloud_server_name
-                + ";export FEDML_DATA_PV_ID="
-                + self.cloud_server_name
-                + ";export FEDML_DATA_PVC_ID="
-                + self.cloud_server_name
-                + ";export FEDML_REGISTRY_SECRET_SUFFIX="
-                + self.cloud_server_name
-                + ";export FEDML_ACCOUNT_ID=0"
-                + ";export FEDML_SERVER_DEVICE_ID="
-                + self.request_json.get("cloudServerDeviceId", "0")
-                + ";export FEDML_VERSION="
-                + self.version
-                + ";export FEDML_PACKAGE_NAME="
-                + packages_config.get("server", "")
-                + ";export FEDML_PACKAGE_URL="
-                + packages_config.get("serverUrl", "")
-                + ";export FEDML_RUNNER_CMD="
-                + runner_cmd_encoded
-                + ";envsubst < "
-                + os.path.join(cur_dir, "templates", "fedml-server-deployment.yaml")
-                + " | kubectl apply -f - "
-        )
-        logging.info("FedMLServerRunner.run with k8s: " + run_deployment_cmd)
-        os.system(run_deployment_cmd)
-
-    def stop_cloud_server(self):
-        self.cloud_server_name = FedMLServerRunner.FEDML_CLOUD_SERVER_PREFIX + str(self.run_id) \
-                                 + "-" + str(self.edge_id)
-        self.server_docker_image = (
-                self.agent_config["docker_config"]["registry_server"]
-                + self.agent_config["docker_config"]["registry_dir"]
-                + self.server_docker_base_image
-        )
-        delete_deployment_cmd = (
-                "export FEDML_AGGREGATOR_NAME="
-                + self.cloud_server_name
-                + ";export FEDML_AGGREGATOR_SVC="
-                + self.cloud_server_name
-                + ";export FEDML_AGGREGATOR_VERSION="
-                + self.version
-                + ';export FEDML_AGGREGATOR_IMAGE_PATH="'
-                + self.server_docker_image
-                + '"'
-                + ";export FEDML_CONF_ID="
-                + self.cloud_server_name
-                + ";export FEDML_DATA_PV_ID="
-                + self.cloud_server_name
-                + ";export FEDML_DATA_PVC_ID="
-                + self.cloud_server_name
-                + ";export FEDML_REGISTRY_SECRET_SUFFIX="
-                + self.cloud_server_name
-                + ";kubectl -n fedml-devops-aggregator-"
-                + self.version
-                + " delete deployment "
-                + self.cloud_server_name
-                + ";kubectl -n fedml-devops-aggregator-"
-                + self.version
-                + " delete svc "
-                + self.cloud_server_name
-                + ";kubectl -n fedml-devops-aggregator-"
-                + self.version
-                + " delete secret secret-"
-                + self.cloud_server_name
-        )
-        logging.info("FedMLServerRunner.stop_run with k8s: " + delete_deployment_cmd)
-        os.system(delete_deployment_cmd)
-
-    def setup_message_center(self):
-        if self.message_center is not None:
-            return
-
-        self.message_center = FedMLMessageCenter(agent_config=self.agent_config)
-        self.message_center.start_sender()
-
-        if self.mlops_metrics is None:
-            self.mlops_metrics = MLOpsMetrics()
-        self.mlops_metrics.set_messenger(self.message_center)
-        self.mlops_metrics.run_id = self.run_id
-        self.mlops_metrics.edge_id = self.edge_id
-        self.mlops_metrics.server_agent_id = self.server_agent_id
-
-    def rebuild_message_center(self, message_center_queue):
-        self.message_center = FedMLMessageCenter(message_queue=message_center_queue)
-
-        if self.mlops_metrics is None:
-            self.mlops_metrics = MLOpsMetrics()
-        self.mlops_metrics.set_messenger(self.message_center)
-        self.mlops_metrics.run_id = self.run_id
-        self.mlops_metrics.edge_id = self.edge_id
-        self.mlops_metrics.server_agent_id = self.server_agent_id
-
-    def release_message_center(self):
-        try:
-            if self.message_center is not None:
-                self.message_center.stop()
-                self.message_center = None
-
-        except Exception as e:
-            logging.error(
-                f"Failed to release client mqtt manager with Exception {e}. Traceback: {traceback.format_exc()}")
-            pass
-
-    def send_training_stop_request_to_edges(
-            self, edge_id_list, payload=None, run_id=0):
-        if payload is None:
-            payload_obj = {"runId": run_id, "edgeids": edge_id_list}
-        else:
-            payload_obj = json.loads(payload)
-
-        for edge_id in edge_id_list:
-            topic_stop_train = "flserver_agent/" + str(edge_id) + "/stop_train"
-            logging.info("stop_train: send topic " + topic_stop_train)
-            self.message_center.send_message(topic_stop_train, json.dumps(payload_obj))
-
-    def send_training_stop_request_to_specific_edge(self, edge_id, payload):
-        topic_stop_train = "flserver_agent/" + str(edge_id) + "/stop_train"
-        logging.info("stop_train: send topic " + topic_stop_train)
-        self.message_center.send_message(topic_stop_train, payload)
-
-    def send_training_stop_request_to_cloud_server(self, edge_id, payload):
-        topic_stop_train = "mlops/flserver_agent_" + str(edge_id) + "/stop_train"
-        logging.info("stop_train: send topic " + topic_stop_train)
-        self.message_center.send_message(topic_stop_train, payload)
-
-    def send_training_stop_request_to_edges_when_exception(
-            self, edge_id_list, payload=None, run_id=0, server_id=None, status=None):
-        if payload is None:
-            payload_obj = {"runId": run_id, "edgeids": edge_id_list}
-            if server_id is not None:
-                payload_obj["serverId"] = server_id
-        else:
-            payload_obj = json.loads(payload)
-        payload_obj["run_status"] = ClientConstants.MSG_MLOPS_CLIENT_STATUS_EXCEPTION if status is None else status
-        topic_stop_train = "flserver_agent/" + str(self.edge_id) + "/stop_train"
-        self.callback_stop_train(topic_stop_train, json.dumps(payload_obj), use_payload=payload_obj)
-
-    def callback_stop_train(self, topic, payload, use_payload=None):
-        # logging.info("callback_stop_train: topic = %s, payload = %s" % (topic, payload))
-        logging.info(
-            f"FedMLDebug - Receive: topic ({topic}), payload ({payload})"
-        )
-
-        request_json = json.loads(payload)
-        is_retain = request_json.get("is_retain", False)
-        if is_retain:
-            return
-        run_id = request_json.get("runId", None)
-        if run_id is None:
-            run_id = request_json.get("id", None)
-
-        edge_id_list = request_json["edgeids"]
-        server_id = request_json.get("serverId", None)
-        if server_id is None:
-            server_id = request_json.get("server_id", None)
-
-        if run_id is None or server_id is None:
-            logging.info("Json format is not correct!")
-            return
-
-        # logging.info("Stop run with multiprocessing.")
-
-        # Stop server with multiprocessing mode
-        run_id_str = str(run_id)
-        stop_request_json = self.running_request_json.get(run_id_str, None)
-        if stop_request_json is None:
-            stop_request_json = request_json
-        if use_payload is not None:
-            stop_request_json = use_payload
-
-        if self.run_process_event_map.get(run_id_str) is not None:
-            self.run_process_event_map.get(run_id_str).set()
-
-        if self.run_as_edge_server_and_agent or self.enable_simulation_cloud_agent:
-            server_runner = FedMLServerRunner(
-                self.args, run_id=run_id, request_json=stop_request_json, agent_config=self.agent_config,
-                edge_id=self.edge_id
-            )
-            server_runner.run_as_edge_server_and_agent = self.run_as_edge_server_and_agent
-            self.run_process_event_map_for_stop[run_id_str] = multiprocessing.Event()
-            if self.run_edge_id_status_queue_map.get(run_id_str, None) is None:
-                self.run_edge_id_status_queue_map[run_id_str] = Queue()
-            if self.run_edge_device_info_queue_map_for_stop.get(run_id_str, None) is None:
-                self.run_edge_device_info_queue_map_for_stop[run_id_str] = Queue()
-            # if self.run_edge_device_info_global_queue_for_stop is None:
-            #     self.run_edge_device_info_global_queue_for_stop = Array('i', list())
-
-            self.run_stop_process_map[run_id_str] = Process(
-                target=server_runner.run_stop, args=(
-                    self.run_process_event_map_for_stop[run_id_str],
-                    self.run_edge_id_status_queue_map[run_id_str],
-                    self.run_edge_device_info_queue_map_for_stop[run_id_str],
-                    self.run_edge_device_info_global_queue_for_stop,
-                    self.message_center.get_message_queue(),
-                )
-            )
-            self.run_stop_process_map[run_id_str].start()
-        elif self.run_as_cloud_agent:
-            self.send_training_stop_request_to_cloud_server(server_id, payload)
-            return
-        elif self.run_as_cloud_server:
-            # if not self.use_local_process_as_cloud_server:
-            server_runner = FedMLServerRunner(
-                self.args, run_id=run_id, request_json=stop_request_json, agent_config=self.agent_config,
-                edge_id=server_id
-            )
-            server_runner.run_as_cloud_agent = self.run_as_cloud_agent
-            self.run_process_event_map_for_stop[run_id_str] = multiprocessing.Event()
-            if self.run_edge_id_status_queue_map.get(run_id_str, None) is None:
-                self.run_edge_id_status_queue_map[run_id_str] = Queue()
-            if self.run_edge_device_info_queue_map_for_stop.get(run_id_str, None) is None:
-                self.run_edge_device_info_queue_map_for_stop[run_id_str] = Queue()
-            # if self.run_edge_device_info_global_queue_for_stop is None:
-            #     self.run_edge_device_info_global_queue_for_stop = Array('i', list())
-
-            self.run_stop_process_map[run_id_str] = Process(
-                target=server_runner.run_stop, args=(
-                    self.run_process_event_map_for_stop[run_id_str],
-                    self.run_edge_id_status_queue_map[run_id_str],
-                    self.run_edge_device_info_queue_map_for_stop[run_id_str],
-                    self.run_edge_device_info_global_queue_for_stop,
-                    self.message_center.get_message_queue(),
-                )
-            )
-            self.run_stop_process_map[run_id_str].start()
-            return
-
-        if self.running_request_json.get(run_id_str, None) is not None:
-            self.running_request_json.pop(run_id_str)
-
-        if self.run_process_map.get(run_id_str, None) is not None:
-            self.run_process_map.pop(run_id_str)
-
-    def run_stop(self, process_event, edge_id_status_queue, edge_device_info_queue,
-                 edge_device_info_global_queue, message_center_queue):
-        if platform.system() != "Windows":
-            os.setsid()
-
-        os.environ['PYTHONWARNINGS'] = 'ignore:semaphore_tracker:UserWarning'
-        os.environ.setdefault('PYTHONWARNINGS', 'ignore:semaphore_tracker:UserWarning')
-
-        self.run_process_event = process_event
-        try:
-            MLOpsUtils.set_ntp_offset(self.ntp_offset)
-
-            self.rebuild_message_center(message_center_queue)
-
-            self.run_stop_impl(edge_id_status_queue, edge_device_info_queue, edge_device_info_global_queue)
-        except Exception as e:
-            logging.error("Stop runner exits with exceptions. {}".format(traceback.format_exc()))
-        finally:
-            logging.info("Release resources.")
-
-    def run_stop_impl(self, edge_id_status_queue, edge_device_info_queue, edge_device_info_global_queue):
-        run_id_str = str(self.run_id)
-        edge_id_list = self.request_json["edgeids"]
-
-        # Detect running status of all edges
-        status_ok, active_edge_info_dict, inactivate_edges = self.detect_edges_status(
-            edge_device_info_queue, edge_device_info_global_queue=edge_device_info_global_queue,
-            status_timeout=120, need_to_trigger_exception=False,
-            status_check_context=SchedulerConstants.STATUS_CHECK_FRO_RUN_STOP_CONTEXT)
-
-        # Send the training stopping request to running edges.
-        for edge_id_item, _ in active_edge_info_dict.items():
-            self.send_training_stop_request_to_specific_edge(edge_id_item, json.dumps(self.request_json))
-            time.sleep(0.2)
-        time.sleep(3)
-
-        total_sleep_seconds = 0
-        allowed_status_check_sleep_seconds = 60
-        server_id = self.edge_id
-        running_edges_list = list()
-        current_edge_id_status_map = dict()
-
-        while True:
-            # Fetch edge id and status from the edge id status queue
-            while True:
-                try:
-                    queue_item = edge_id_status_queue.get(block=False, timeout=3)
-                    if queue_item is not None:
-                        current_edge_id_status_map.update(queue_item)
-                except queue.Empty as e:  # If queue is empty, then break loop
-                    break
-
-            # Calc the total killed device number
-            running_edges_list.clear()
-            number_of_failed_edges = 0
-            number_of_finished_edges = 0
-            number_of_killed_edges = 0
-            for edge_id_item, status_item in current_edge_id_status_map.items():
-                if edge_id_item == "server":
-                    continue
-
-                if status_item is None or status_item == ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED or \
-                        status_item == ServerConstants.MSG_MLOPS_SERVER_STATUS_EXCEPTION:
-                    number_of_failed_edges += 1
-                    continue
-
-                if status_item == ServerConstants.MSG_MLOPS_SERVER_STATUS_FINISHED:
-                    number_of_finished_edges += 1
-                    continue
-
-                if status_item == ServerConstants.MSG_MLOPS_SERVER_STATUS_KILLED:
-                    number_of_killed_edges += 1
-                    continue
-
-                if status_item == ServerConstants.MSG_MLOPS_SERVER_STATUS_IDLE or \
-                        status_item == ServerConstants.MSG_MLOPS_SERVER_STATUS_OFFLINE:
-                    continue
-
-                running_edges_list.append(edge_id_item)
-
-            # If the killed device number is equal total device number, then break
-            if len(running_edges_list) <= 0 and len(current_edge_id_status_map.keys()) == len(edge_id_list) + 1:
-                break
-
-            # Calc the timeout value to wait to device killed.
-            time.sleep(3)
-            total_sleep_seconds += 3
-            if total_sleep_seconds < allowed_status_check_sleep_seconds:
-                continue
-
-            # If timeout, then report killed device status
-            no_response_edges = list(set(edge_id_list) - set(running_edges_list))
-            if len(no_response_edges) <= 0:
-                break
-            for edge_id_item in no_response_edges:
-                self.mlops_metrics.report_client_id_status(
-                    edge_id_item, ClientConstants.MSG_MLOPS_CLIENT_STATUS_KILLED,
-                    server_id=self.edge_id, run_id=self.run_id)
-
-        if self.run_as_edge_server_and_agent or self.enable_simulation_cloud_agent:
-            # Stop log processor for current run
-            MLOpsRuntimeLogDaemon.get_instance(self.args).stop_log_processor(self.run_id, self.edge_id)
-        elif self.run_as_cloud_agent:
-            # Stop log processor for current run
-            MLOpsRuntimeLogDaemon.get_instance(self.args).stop_log_processor(self.run_id, server_id)
-
-        self.mlops_metrics.report_server_id_status(
-            self.run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_KILLED, edge_id=self.edge_id,
-            server_id=self.edge_id, server_agent_id=self.edge_id)
-
-    def set_run_status(self, run_id, status, running_request_json):
-        server_runner = FedMLServerRunner(
-            self.args, run_id=run_id, request_json=running_request_json, agent_config=self.agent_config
-        )
-        server_runner.edge_id = self.edge_id
-        server_runner.run_as_edge_server_and_agent = self.run_as_edge_server_and_agent
-        server_runner.run_status = status
-        server_runner.message_center = self.message_center
-        server_runner.mlops_metrics = self.mlops_metrics
-        server_runner.cleanup_client_with_status()
-
-    def callback_runner_id_status(self, topic, payload):
-        # logging.info("callback_runner_id_status: topic = %s, payload = %s" % (topic, payload))
-        # logging.info(
-        #     f"FedMLDebug - Receive: topic ({topic}), payload ({payload})"
-        # )
-
-        request_json = json.loads(payload)
-        is_retain = request_json.get("is_retain", False)
-        if is_retain:
-            return
-        run_id = request_json["run_id"]
-        status = request_json["status"]
-        edge_id = request_json["edge_id"]
-        server_id = request_json.get("server_id", None)
-        run_id_str = str(run_id)
-
-        if (
-                status == ServerConstants.MSG_MLOPS_SERVER_STATUS_FINISHED
-                or status == ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED
-                or status == ServerConstants.MSG_MLOPS_SERVER_STATUS_KILLED
-        ):
-            completed_event = self.run_process_completed_event_map.get(run_id_str, None)
-            if completed_event is not None:
-                completed_event.set()
-
-            FedMLServerDataInterface.get_instance().save_job_status(run_id, self.edge_id, status, status)
-
-            # Stop server with multiprocessing mode
-            running_request_json = self.running_request_json.get(run_id_str, None)
-            if running_request_json is None:
-                running_request_json = request_json
-            if self.run_as_edge_server_and_agent or self.enable_simulation_cloud_agent:
-                self.set_run_status(run_id, status, running_request_json)
-
-                run_process = self.run_process_map.get(run_id_str, None)
-                if run_process is not None:
-                    if run_process.pid is not None:
-                        RunProcessUtils.kill_process(run_process.pid)
-
-                    self.run_process_map.pop(run_id_str)
-
-                # Stop log processor for current run
-                MLOpsRuntimeLogDaemon.get_instance(self.args).stop_log_processor(run_id, self.edge_id)
-            elif self.run_as_cloud_agent:
-                pass
-            elif self.run_as_cloud_server:
-                self.set_run_status(run_id, status, running_request_json)
-
-                # Stop log processor for current run
-                MLOpsRuntimeLogDaemon.get_instance(self.args).stop_log_processor(run_id, self.edge_id)
-                if self.use_local_process_as_cloud_server:
-                    # RunProcessUtils.kill_process(os.getpid())
-                    cloud_server_process = self.run_process_map.get(run_id_str, None)
-                    if cloud_server_process is not None:
-                        RunProcessUtils.kill_process(cloud_server_process.pid)
-                else:
-                    self.stop_cloud_server()
-
-            if self.run_process_map.get(run_id_str, None) is not None:
-                self.run_process_map.pop(run_id_str)
-
-            self.remove_listener_for_run_metrics(self.run_id)
-            self.remove_listener_for_run_logs(self.run_id)
-        elif (
-                status == ServerConstants.MSG_MLOPS_SERVER_STATUS_EXCEPTION
-        ):
-            request_json = self.running_request_json.get(run_id_str, None)
-            if request_json is not None:
-                edge_id_list = request_json.get("edgeids", list())
-                server_id = request_json.get("serverId", None)
-                server_id = request_json.get("server_id", None) if server_id is None else server_id
-                self.send_training_stop_request_to_edges_when_exception(
-                    edge_id_list, run_id=run_id, server_id=server_id,
-                    status=ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED)
-
-            FedMLServerDataInterface.get_instance().save_job_status(run_id, self.edge_id, status, status)
-        else:
-            request_json = self.running_request_json.get(run_id_str, None)
-            if request_json is None:
-                request_json = self.start_request_json
-            self.mlops_metrics.report_server_training_status(
-                run_id, status, edge_id=self.edge_id, running_json=json.dumps(request_json))
-
-    def cleanup_client_with_status(self):
-        if self.run_status == ServerConstants.MSG_MLOPS_SERVER_STATUS_FINISHED:
-            # logging.info("received to finished status.")
-            self.cleanup_run_when_finished(should_send_server_id_status=False)
-        elif self.run_status == ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED:
-            # logging.info("received to failed status.")
-            self.cleanup_run_when_starting_failed(should_send_server_id_status=False)
-        elif self.run_status == ServerConstants.MSG_MLOPS_SERVER_STATUS_KILLED:
-            # logging.info("received to failed status.")
-            self.cleanup_run_when_starting_failed(
-                status=self.run_status, should_send_server_id_status=False)
-
-    def callback_report_current_status(self, topic, payload):
-        logging.info(
-            f"FedMLDebug - Receive: topic ({topic}), payload ({payload})"
-        )
-
-        request_json = json.loads(payload)
-        if self.run_as_edge_server_and_agent:
-            self.send_agent_active_msg()
-        elif self.run_as_cloud_agent:
-            self.send_agent_active_msg()
-        elif self.run_as_cloud_server:
-            pass
-
-    @staticmethod
-    def process_ota_upgrade_msg():
-        os.system("pip install -U fedml")
-
-    def callback_server_ota_msg(self, topic, payload):
-        logging.info(
-            f"FedMLDebug - Receive: topic ({topic}), payload ({payload})"
-        )
-
-        request_json = json.loads(payload)
-        cmd = request_json["cmd"]
-
-        if cmd == ServerConstants.FEDML_OTA_CMD_UPGRADE:
-            try:
-                self.process_ota_upgrade_msg()
-                # Process(target=FedMLServerRunner.process_ota_upgrade_msg).start()
-                raise Exception("After upgraded, restart runner...")
-            except Exception as e:
-                pass
-        elif cmd == ServerConstants.FEDML_OTA_CMD_RESTART:
-            raise Exception("Restart runner...")
-
-    def callback_response_device_info(self, topic, payload):
-        # Parse payload
-        payload_json = json.loads(payload)
-        run_id = payload_json.get("run_id", 0)
-        context = payload_json.get("context", None)
-        master_device_id = payload_json.get("master_device_id", 0)
-        slave_device_id = payload_json.get("slave_device_id", 0)
-        slave_device_id_list = payload_json.get("slave_device_id_list", 0)
-        edge_id = payload_json.get("edge_id", 0)
-        device_info = payload_json.get("edge_info", 0)
-        device_info["master_device_id"] = master_device_id
-        device_info["slave_device_id"] = slave_device_id
-        device_info["slave_device_id_list"] = slave_device_id_list
-        run_id_str = str(run_id)
-
-        # Put device info into a multiprocessing queue so master runner checks if all edges are ready
-        if context is None:
-            if self.run_edge_device_info_queue_map.get(run_id_str, None) is None:
-                self.run_edge_device_info_queue_map[run_id_str] = Queue()
-            self.run_edge_device_info_queue_map[run_id_str].put(device_info)
-
-            # if self.run_edge_device_info_global_queue is None:
-            #     self.run_edge_device_info_global_queue = Array('i', list())
-            #
-            # self.run_edge_device_info_global_queue[len(self.run_edge_device_info_global_queue)] =  \
-            #     {"timestamp": time.time(), "edge_id": edge_id, "device_info": device_info}
-
-            self.check_model_device_ready_and_deploy(run_id, master_device_id, slave_device_id,
-                                                     slave_device_id_list=slave_device_id_list)
-        elif context == SchedulerConstants.STATUS_CHECK_FRO_RUN_STOP_CONTEXT:
-            if self.run_edge_device_info_queue_map_for_stop.get(run_id_str, None) is None:
-                self.run_edge_device_info_queue_map_for_stop[run_id_str] = Queue()
-            self.run_edge_device_info_queue_map_for_stop[run_id_str].put(device_info)
-
-            # if self.run_edge_device_info_global_queue_for_stop is None:
-            #     self.run_edge_device_info_global_queue_for_stop = Array('i', list())
-            #
-            # self.run_edge_device_info_global_queue_for_stop[len(self.run_edge_device_info_global_queue_for_stop)] = \
-            #     {"timestamp": time.time(), "edge_id": edge_id, "device_info": device_info}
-
-    def check_model_device_ready_and_deploy(self, run_id, master_device_id, slave_device_id, slave_device_id_list=None):
-        request_json = self.running_request_json.get(str(run_id), None)
-        if request_json is None:
-            return
-        run_config = request_json["run_config"]
-        run_params = run_config.get("parameters", {})
-        job_yaml = run_params.get("job_yaml", {})
-        job_type = job_yaml.get("job_type", None)
-        job_type = job_yaml.get("task_type", Constants.JOB_TASK_TYPE_TRAIN) if job_type is None else job_type
-        if job_type != Constants.JOB_TASK_TYPE_DEPLOY and job_type != Constants.JOB_TASK_TYPE_SERVE:
-            return
-
-        # Init model device ids for each run
-        run_id_str = str(run_id)
-        if self.run_model_device_ids.get(run_id_str, None) is None:
-            self.run_model_device_ids[run_id_str] = list()
-
-        # Append master device and slave devices to the model devices map
-        self.run_model_device_ids[run_id_str].append({"master_device_id": master_device_id,
-                                                      "slave_device_id": slave_device_id})
-        model_device_ids = self.run_model_device_ids.get(run_id_str, None)
-        if model_device_ids is None:
-            return
-
-        # Check if all model devices are ready
-        if len(model_device_ids) != len(self.run_edge_ids.get(run_id_str, list())):
-            return
-
-        # Generate model master ids and model slave device ids
-        device_master_ids = list()
-        device_slave_ids = list()
-        for device_ids in model_device_ids:
-            model_master_id = device_ids.get("master_device_id")
-            model_slave_id = device_ids.get("slave_device_id")
-            device_master_ids.append(model_master_id)
-            device_slave_ids.append(model_slave_id)
-
-        if len(device_master_ids) <= 0:
-            return
-
-        # Generate serving devices for deploying
-        serving_devices = list()
-        serving_devices.append(device_master_ids[0])
-        serving_devices.extend(device_slave_ids)
-
-        # Start to deploy the model
-        self.deploy_model(serving_devices, request_json, run_id=run_id)
-
-    def callback_request_device_info_from_mlops(self, topic, payload):
-        self.response_device_info_to_mlops(topic, payload)
-
-    def response_device_info_to_mlops(self, topic, payload):
-        response_topic = f"master_agent/mlops/response_device_info"
-        payload_json = json.loads(payload)
-        need_gpu_info = payload_json.get("need_gpu_info", False)
-        if self.mlops_metrics is not None:
-            if not need_gpu_info:
-                response_payload = {
-                    "run_id": self.run_id,
-                    "master_agent_device_id": self.edge_id,
-                    "fedml_version": fedml.__version__
-                }
-            else:
-                total_mem, free_mem, total_disk_size, free_disk_size, cup_utilization, cpu_cores, \
-                    gpu_cores_total, gpu_cores_available, sent_bytes, recv_bytes, gpu_available_ids = \
-                    sys_utils.get_sys_realtime_stats()
-                gpu_available_ids = JobRunnerUtils.get_instance().get_available_gpu_id_list(self.edge_id)
-                gpu_available_ids = JobRunnerUtils.trim_unavailable_gpu_ids(gpu_available_ids)
-                gpu_cores_available = len(gpu_available_ids)
-                response_payload = {
-                    "run_id": self.run_id,
-                    "master_agent_device_id": self.edge_id,
-                    "memoryTotal": round(total_mem * MLOpsUtils.BYTES_TO_GB, 2),
-                    "memoryAvailable": round(free_mem * MLOpsUtils.BYTES_TO_GB, 2),
-                    "diskSpaceTotal": round(total_disk_size * MLOpsUtils.BYTES_TO_GB, 2),
-                    "diskSpaceAvailable": round(free_disk_size * MLOpsUtils.BYTES_TO_GB, 2),
-                    "cpuUtilization": round(cup_utilization, 2),
-                    "cpuCores": cpu_cores,
-                    "gpuCoresTotal": gpu_cores_total,
-                    "gpuCoresAvailable": gpu_cores_available,
-                    "networkTraffic": sent_bytes + recv_bytes,
-                    "timestamp": int(MLOpsUtils.get_ntp_time()),
-                    "fedml_version": fedml.__version__
-                }
-            self.mlops_metrics.report_json_message(response_topic, json.dumps(response_payload))
-
-    @staticmethod
-    def get_device_id():
-        device_file_path = os.path.join(ServerConstants.get_data_dir(), ServerConstants.LOCAL_RUNNER_INFO_DIR_NAME)
-        file_for_device_id = os.path.join(device_file_path, "devices.id")
-        if not os.path.exists(device_file_path):
-            os.makedirs(device_file_path)
-        elif os.path.exists(file_for_device_id):
-            with open(file_for_device_id, 'r', encoding='utf-8') as f:
-                device_id_from_file = f.readline()
-                if device_id_from_file is not None and device_id_from_file != "":
-                    return device_id_from_file
-
-        if platform.system() == "Darwin":
-            cmd_get_serial_num = "system_profiler SPHardwareDataType | grep Serial | awk '{gsub(/ /,\"\")}{print}' " \
-                                 "|awk -F':' '{print $2}' "
-            device_id = os.popen(cmd_get_serial_num).read()
-            device_id = device_id.replace('\n', '').replace(' ', '')
-            if device_id is None or device_id == "":
-                device_id = hex(uuid.getnode())
-            else:
-                device_id = "0x" + device_id
-        else:
-            if "nt" in os.name:
-
-                def get_uuid():
-                    guid = ""
-                    try:
-                        cmd = "wmic csproduct get uuid"
-                        guid = str(subprocess.check_output(cmd))
-                        pos1 = guid.find("\\n") + 2
-                        guid = guid[pos1:-15]
-                    except Exception as ex:
-                        pass
-                    return str(guid)
-
-                device_id = str(get_uuid())
-            elif "posix" in os.name:
-                device_id = sys_utils.get_device_id_in_docker()
-                if device_id is None:
-                    device_id = hex(uuid.getnode())
-            else:
-                device_id = sys_utils.run_subprocess_open(
-                    "hal-get-property --udi /org/freedesktop/Hal/devices/computer --key system.hardware.uuid".split()
-                )
-                device_id = hex(device_id)
-
-        if device_id is not None and device_id != "":
-            with open(file_for_device_id, 'w', encoding='utf-8') as f:
-                f.write(device_id)
-        else:
-            device_id = hex(uuid.uuid4())
-            with open(file_for_device_id, 'w', encoding='utf-8') as f:
-                f.write(device_id)
-
-        return device_id
-
-    def bind_account_and_device_id(self, url, account_id, device_id, os_name, api_key="", role=None):
-        if role is None:
-            role = "edge_server"
-            if self.run_as_edge_server_and_agent:
-                role = "edge_server"
-            elif self.run_as_cloud_agent:
-                role = "cloud_agent"
-            elif self.run_as_cloud_server:
-                role = "cloud_server"
-
-        ip = requests.get('https://checkip.amazonaws.com').text.strip()
-        fedml_ver, exec_path, os_ver, cpu_info, python_ver, torch_ver, mpi_installed, \
-            cpu_usage, available_mem, total_mem, gpu_info, gpu_available_mem, gpu_total_mem, \
-            gpu_count, gpu_vendor, cpu_count, gpu_device_name = get_sys_runner_info()
-        host_name = sys_utils.get_host_name()
-        json_params = {
-            "accountid": account_id,
-            "deviceid": device_id,
-            "type": os_name,
-            "state": ServerConstants.MSG_MLOPS_SERVER_STATUS_IDLE,
-            "status": ServerConstants.MSG_MLOPS_SERVER_STATUS_IDLE,
-            "processor": cpu_info,
-            "core_type": cpu_info,
-            "network": "",
-            "role": role,
-            "os_ver": os_ver,
-            "memory": total_mem,
-            "ip": ip,
-            "api_key": api_key,
-            "extra_infos": {"fedml_ver": fedml_ver, "exec_path": exec_path, "os_ver": os_ver,
-                            "cpu_info": cpu_info, "python_ver": python_ver, "torch_ver": torch_ver,
-                            "mpi_installed": mpi_installed, "cpu_usage": cpu_usage,
-                            "available_mem": available_mem, "total_mem": total_mem,
-                            "cpu_count": cpu_count, "gpu_count": 0, "host_name": host_name}
-        }
-        if gpu_count > 0:
-            if gpu_total_mem is not None:
-                json_params["gpu"] = gpu_info if gpu_info is not None else "" + ", Total GPU Memory: " + gpu_total_mem
-            else:
-                json_params["gpu"] = gpu_info if gpu_info is not None else ""
-            json_params["extra_infos"]["gpu_info"] = gpu_info if gpu_info is not None else ""
-            if gpu_available_mem is not None:
-                json_params["extra_infos"]["gpu_available_mem"] = gpu_available_mem
-            if gpu_total_mem is not None:
-                json_params["extra_infos"]["gpu_total_mem"] = gpu_total_mem
-
-            json_params["extra_infos"]["gpu_count"] = gpu_count
-            json_params["extra_infos"]["gpu_vendor"] = gpu_vendor
-            json_params["extra_infos"]["gpu_device_name"] = gpu_device_name
-
-            gpu_available_id_list = sys_utils.get_available_gpu_id_list(limit=gpu_count)
-            gpu_available_count = len(gpu_available_id_list) if gpu_available_id_list is not None else 0
-            gpu_list = sys_utils.get_gpu_list()
-            json_params["extra_infos"]["gpu_available_count"] = gpu_available_count
-            json_params["extra_infos"]["gpu_available_id_list"] = gpu_available_id_list
-            json_params["extra_infos"]["gpu_list"] = gpu_list
-        else:
-            json_params["gpu"] = "None"
-            json_params["extra_infos"]["gpu_available_count"] = 0
-            json_params["extra_infos"]["gpu_available_id_list"] = []
-            json_params["extra_infos"]["gpu_list"] = []
-
-        _, cert_path = MLOpsConfigs.get_request_params()
-        if cert_path is not None:
-            try:
-                requests.session().verify = cert_path
-                response = requests.post(
-                    url, json=json_params, verify=True,
-                    headers={"content-type": "application/json", "Connection": "close"}
-                )
-            except requests.exceptions.SSLError as err:
-                MLOpsConfigs.install_root_ca_file()
-                response = requests.post(
-                    url, json=json_params, verify=True,
-                    headers={"content-type": "application/json", "Connection": "close"}
-                )
-        else:
-            response = requests.post(url, json=json_params, headers={"Connection": "close"})
-        edge_id = -1
-        user_name = None
-        extra_url = None
-        if response.status_code != 200:
-            print(f"Binding to MLOps with response.status_code = {response.status_code}, "
-                  f"response.content: {response.content}")
-            pass
-        else:
-            # print("url = {}, response = {}".format(url, response))
-            status_code = response.json().get("code")
-            if status_code == "SUCCESS":
-                edge_id = response.json().get("data").get("id")
-                user_name = response.json().get("data").get("userName", None)
-                extra_url = response.json().get("data").get("url", None)
-                if edge_id is None or edge_id <= 0:
-                    print(f"Binding to MLOps with response.status_code = {response.status_code}, "
-                          f"response.content: {response.content}")
-            else:
-                if status_code == SchedulerConstants.BINDING_ACCOUNT_NOT_EXIST_ERROR:
-                    raise SystemExit(SchedulerConstants.BINDING_ACCOUNT_NOT_EXIST_ERROR)
-                print(f"Binding to MLOps with response.status_code = {response.status_code}, "
-                      f"response.content: {response.content}")
-                return -1, None, None
-        return edge_id, user_name, extra_url
-
-    def fetch_configs(self):
-        return MLOpsConfigs.fetch_all_configs()
-
-    def send_agent_active_msg(self):
-        active_topic = "flserver_agent/active"
-        status = MLOpsStatus.get_instance().get_server_agent_status(self.edge_id)
-        if (
-                status is not None
-                and status != ServerConstants.MSG_MLOPS_SERVER_STATUS_OFFLINE
-                and status != ServerConstants.MSG_MLOPS_SERVER_STATUS_IDLE
-        ):
-            return
-
-        if self.run_as_cloud_agent:
-            status = ServerConstants.MSG_MLOPS_SERVER_STATUS_IDLE
-        else:
-            try:
-                current_job = FedMLServerDataInterface.get_instance().get_job_by_id(self.run_id)
-            except Exception as e:
-                current_job = None
-            if current_job is None:
-                if status is not None and status == ServerConstants.MSG_MLOPS_SERVER_STATUS_IDLE:
-                    status = ServerConstants.MSG_MLOPS_SERVER_STATUS_IDLE
-                else:
-                    return
-            else:
-                status = ServerConstants.get_device_state_from_run_edge_state(current_job.status)
-        active_msg = {"ID": self.edge_id, "status": status}
-        MLOpsStatus.get_instance().set_server_agent_status(self.edge_id, status)
-        if self.mqtt_mgr is not None:
-            self.mqtt_mgr.send_message_json(active_topic, json.dumps(active_msg))
-        else:
-            self.send_message_json(active_topic, json.dumps(active_msg))
-
-    def recover_start_train_msg_after_upgrading(self):
-        try:
-            current_job = FedMLServerDataInterface.get_instance().get_current_job()
-            if current_job is not None and \
-                    current_job.status == ServerConstants.MSG_MLOPS_SERVER_STATUS_UPGRADING:
-                logging.info("start training after upgrading.")
-                server_agent_id = self.edge_id
-                topic_start_train = "mlops/flserver_agent_" + str(server_agent_id) + "/start_train"
-                self.callback_start_train(topic_start_train, current_job.running_json)
-        except Exception as e:
-            logging.info("recover starting train message after upgrading: {}".format(traceback.format_exc()))
-
-    def on_agent_mqtt_connected(self, mqtt_client_object):
-        # The MQTT message topic format is as follows: <sender>/<receiver>/<action>
-
-        # Setup MQTT message listener for starting training
-        server_agent_id = self.edge_id
-        topic_start_train = "mlops/flserver_agent_" + str(server_agent_id) + "/start_train"
-        self.add_message_listener(topic_start_train, self.callback_start_train)
-        self.mqtt_mgr.add_message_listener(topic_start_train, self.listener_message_dispatch_center)
-
-        # Setup MQTT message listener for stopping training
-        topic_stop_train = "mlops/flserver_agent_" + str(server_agent_id) + "/stop_train"
-        self.add_message_listener(topic_stop_train, self.callback_stop_train)
-        self.mqtt_mgr.add_message_listener(topic_stop_train, self.listener_message_dispatch_center)
-
-        # Setup MQTT message listener for server status switching
-        topic_server_status = "fl_server/flserver_agent_" + str(server_agent_id) + "/status"
-        self.add_message_listener(topic_server_status, self.callback_runner_id_status)
-        self.mqtt_mgr.add_message_listener(topic_server_status, self.listener_message_dispatch_center)
-
-        # Setup MQTT message listener to report current device status.
-        topic_report_status = "mlops/report_device_status"
-        self.add_message_listener(topic_report_status, self.callback_report_current_status)
-        self.mqtt_mgr.add_message_listener(topic_report_status, self.listener_message_dispatch_center)
-
-        # Setup MQTT message listener to OTA messages from the MLOps.
-        topic_ota_msg = "mlops/flserver_agent_" + str(server_agent_id) + "/ota"
-        self.add_message_listener(topic_ota_msg, self.callback_server_ota_msg)
-        self.mqtt_mgr.add_message_listener(topic_ota_msg, self.listener_message_dispatch_center)
-
-        # Setup MQTT message listener to request device info from the client.
-        topic_response_device_info = "client/server/response_device_info/" + str(self.edge_id)
-        self.add_message_listener(topic_response_device_info, self.callback_response_device_info)
-        self.mqtt_mgr.add_message_listener(topic_response_device_info, self.listener_message_dispatch_center)
-
-        # Setup MQTT message listener to request device info from MLOps.
-        topic_request_device_info_from_mlops = f"mlops/master_agent/request_device_info/{self.edge_id}"
-        self.add_message_listener(topic_request_device_info_from_mlops, self.callback_request_device_info_from_mlops)
-        self.mqtt_mgr.add_message_listener(
-            topic_request_device_info_from_mlops, self.listener_message_dispatch_center)
-
-        # Subscribe topics for starting train, stopping train and fetching client status.
-        mqtt_client_object.subscribe(topic_start_train, qos=2)
-        mqtt_client_object.subscribe(topic_stop_train, qos=2)
-        mqtt_client_object.subscribe(topic_server_status, qos=2)
-        mqtt_client_object.subscribe(topic_report_status, qos=2)
-        mqtt_client_object.subscribe(topic_ota_msg, qos=2)
-        mqtt_client_object.subscribe(topic_response_device_info, qos=2)
-        mqtt_client_object.subscribe(topic_request_device_info_from_mlops, qos=2)
-
-        self.subscribed_topics.clear()
-        self.subscribed_topics.append(topic_start_train)
-        self.subscribed_topics.append(topic_stop_train)
-        self.subscribed_topics.append(topic_server_status)
-        self.subscribed_topics.append(topic_report_status)
-        self.subscribed_topics.append(topic_ota_msg)
-        self.subscribed_topics.append(topic_response_device_info)
-        self.subscribed_topics.append(topic_request_device_info_from_mlops)
-
-        # Broadcast the first active message.
-        self.send_agent_active_msg()
-
-        if self.run_as_cloud_server:
-            # Start the FedML server
-            self.callback_start_train(payload=self.args.runner_cmd)
-
-        # Echo results
-        MLOpsRuntimeLog.get_instance(self.args).enable_show_log_to_stdout()
-        print("\nCongratulations, your device is connected to the FedML MLOps platform successfully!")
-        print(
-            "Your FedML Edge ID is " + str(self.edge_id) + ", unique device ID is "
-            + str(self.unique_device_id)
-        )
-        MLOpsRuntimeLog.get_instance(self.args).enable_show_log_to_stdout(enable=True)
-
-        # Start the message center for listener
-        self.start_listener(sender_message_queue=self.message_center.get_message_queue(),
-                            agent_config=self.agent_config)
-
-    def on_agent_mqtt_disconnected(self, mqtt_client_object):
-        MLOpsStatus.get_instance().set_server_agent_status(
-            self.edge_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_OFFLINE
-        )
-
-    def setup_agent_mqtt_connection(self, service_config):
-        # Setup MQTT connection
-        self.mqtt_mgr = MqttManager(
-            service_config["mqtt_config"]["BROKER_HOST"],
-            service_config["mqtt_config"]["BROKER_PORT"],
-            service_config["mqtt_config"]["MQTT_USER"],
-            service_config["mqtt_config"]["MQTT_PWD"],
-            service_config["mqtt_config"]["MQTT_KEEPALIVE"],
-            f"FedML_ServerAgent_Daemon_@{self.user_name}@_@{self.args.current_device_id}@_@{str(uuid.uuid4())}@",
-            "flserver_agent/last_will_msg",
-            json.dumps({"ID": self.edge_id, "status": ServerConstants.MSG_MLOPS_SERVER_STATUS_OFFLINE})
-        )
-
-        # Init local database
-        FedMLServerDataInterface.get_instance().create_job_table()
-
-        # Start the message center to process edge related messages.
-        self.setup_message_center()
-
-        server_api_cmd = "fedml.computing.scheduler.master.server_api:api"
-        server_api_pids = RunProcessUtils.get_pid_from_cmd_line(server_api_cmd)
-        if server_api_pids is None or len(server_api_pids) <= 0:
-            # Start local API services
-            cur_dir = os.path.dirname(__file__)
-            fedml_base_dir = os.path.dirname(os.path.dirname(os.path.dirname(cur_dir)))
-            python_program = get_python_program()
-            self.local_api_process = ServerConstants.exec_console_with_script(
-                "{} -m uvicorn {} --host 0.0.0.0 --port {} --reload --reload-delay 3 --reload-dir {} "
-                "--log-level critical".format(
-                    python_program, server_api_cmd, ServerConstants.LOCAL_SERVER_API_PORT,
-                    fedml_base_dir
-                ),
-                should_capture_stdout=False,
-                should_capture_stderr=False
-            )
-            # if self.local_api_process is not None and self.local_api_process.pid is not None:
-            #     print(f"Server local API process id {self.local_api_process.pid}")
-
-        # Setup MQTT connected listener
-        self.mqtt_mgr.add_connected_listener(self.on_agent_mqtt_connected)
-        self.mqtt_mgr.add_disconnected_listener(self.on_agent_mqtt_disconnected)
-        self.mqtt_mgr.connect()
-
-        # Report the IDLE status to MLOps
-        self.mlops_metrics.report_server_training_status(
-            self.run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_IDLE, edge_id=self.edge_id)
-        MLOpsStatus.get_instance().set_server_agent_status(
-            self.edge_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_IDLE
-        )
-
-        # MLOpsRuntimeLogDaemon.get_instance(self.args).stop_all_log_processor()
-
-        self.mlops_metrics.stop_device_realtime_perf()
-        self.mlops_metrics.report_device_realtime_perf(self.args, service_config["mqtt_config"], is_client=False)
-
-        if not self.run_as_cloud_server:
-            self.recover_start_train_msg_after_upgrading()
-
-        JobCleanup.get_instance().sync_data_on_startup(self.edge_id, is_client=False)
-
-        self.master_api_daemon = MasterApiDaemon()
-        self.master_api_process = Process(target=self.master_api_daemon.run)
-        self.master_api_process.start()
-
-        # if self.model_device_server is None:
-        #     self.model_device_server = FedMLModelDeviceServerRunner(self.args, self.args.current_device_id,
-        #                                                             self.args.os_name, self.args.is_from_docker,
-        #                                                             self.agent_config)
-        #     self.model_device_server.start()
-
-    def start_agent_mqtt_loop(self):
-        # Start MQTT message loop
-        try:
-            self.mqtt_mgr.loop_forever()
-        except Exception as e:
-            if str(e) == "Restarting after upgraded...":
-                logging.info("Restarting after upgraded...")
-            else:
-                logging.info("Server tracing: {}".format(traceback.format_exc()))
-
-        finally:
-            login_exit_file = os.path.join(ServerConstants.get_log_file_dir(), "exited.log")
-            with open(login_exit_file, "w") as f:
-                f.writelines(f"{os.getpid()}.")
-
-            self.stop_agent()
-
-            time.sleep(5)
-            sys_utils.cleanup_all_fedml_server_login_processes(
-                ServerConstants.SERVER_LOGIN_PROGRAM, clean_process_group=False)
-            sys.exit(1)
-
-    def stop_agent(self):
-        if self.run_process_event is not None:
-            self.run_process_event.set()
-
-        if self.mqtt_mgr is not None:
-            try:
-                for topic in self.subscribed_topics:
-                    self.mqtt_mgr.unsubscribe_msg(topic)
-            except Exception as e:
-                pass
-
-            self.mqtt_mgr.loop_stop()
-            self.mqtt_mgr.disconnect()
-        self.release_message_center()
-
-    def get_runner(self):
-        runner = FedMLServerRunner(
-            self.args, run_id=self.run_id, request_json=self.request_json,
-            agent_config=self.agent_config
-        )
-        runner.run_as_edge_server_and_agent = self.run_as_edge_server_and_agent
-        runner.edge_id = self.edge_id
-        runner.server_agent_id = self.server_agent_id
-        runner.start_request_json = self.start_request_json
-        runner.unique_device_id = self.unique_device_id
-        runner.user_name = self.user_name
-        runner.run_as_edge_server_and_agent = self.run_as_edge_server_and_agent
-        runner.run_as_cloud_agent = self.run_as_cloud_agent
-        runner.run_as_cloud_server = self.run_as_cloud_server
-        return runner
diff --git a/python/fedml/computing/scheduler/model_scheduler/device_client_runner.py b/python/fedml/computing/scheduler/model_scheduler/device_client_runner.py
deleted file mode 100755
index e82e8c5542..0000000000
--- a/python/fedml/computing/scheduler/model_scheduler/device_client_runner.py
+++ /dev/null
@@ -1,1335 +0,0 @@
-import json
-import logging
-import multiprocessing
-import sys
-
-from multiprocessing import Process
-import os
-import platform
-import shutil
-import subprocess
-import threading
-
-import time
-import traceback
-import urllib
-import uuid
-import zipfile
-from urllib.parse import urlparse, urljoin
-
-import requests
-import docker
-
-import fedml
-from fedml import mlops
-from fedml.computing.scheduler.model_scheduler.device_model_msg_object import FedMLModelMsgObject
-from fedml.core.distributed.communication.s3.remote_storage import S3Storage
-from .device_model_cache import FedMLModelCache
-from ..comm_utils import sys_utils, security_utils
-
-from ..comm_utils.container_utils import ContainerUtils
-
-from ....core.mlops.mlops_runtime_log import MLOpsRuntimeLog
-
-from ....core.distributed.communication.mqtt.mqtt_manager import MqttManager
-from ..comm_utils.yaml_utils import load_yaml_config
-from .device_client_constants import ClientConstants
-
-from ....core.mlops.mlops_metrics import MLOpsMetrics
-
-from ....core.mlops.mlops_configs import MLOpsConfigs
-from ....core.mlops.mlops_runtime_log_daemon import MLOpsRuntimeLogDaemon
-from ....core.mlops.mlops_status import MLOpsStatus
-from ..comm_utils.sys_utils import get_sys_runner_info, get_python_program
-from .device_model_deployment import start_deployment, run_http_inference_with_curl_request
-from .device_client_data_interface import FedMLClientDataInterface
-from ....core.mlops.mlops_utils import MLOpsUtils
-from ..comm_utils.job_utils import JobRunnerUtils
-from fedml.computing.scheduler.comm_utils.run_process_utils import RunProcessUtils
-from .device_mqtt_inference_protocol import FedMLMqttInference
-from .device_model_db import FedMLModelDatabase
-from ..comm_utils.constants import SchedulerConstants
-from fedml.computing.scheduler.comm_utils.job_monitor import JobMonitor
-
-
-class RunnerError(Exception):
-    """ Runner failed. """
-    pass
-
-
-class RunnerCompletedError(Exception):
-    """ Runner completed. """
-    pass
-
-
-class FedMLClientRunner:
-    FEDML_BOOTSTRAP_RUN_OK = "[FedML]Bootstrap Finished"
-
-    def __init__(self, args, edge_id=0, request_json=None, agent_config=None, run_id=0):
-        self.local_api_process = None
-        self.run_process_event = None
-        self.run_process_event_map = dict()
-        self.run_process_completed_event = None
-        self.run_process_completed_event_map = dict()
-        self.run_inference_event_map = dict()
-        self.run_inference_response_map = dict()
-        self.run_process_map = dict()
-        self.device_status = None
-        self.current_training_status = None
-        self.mqtt_mgr = None
-        self.client_mqtt_mgr = None
-        self.client_mqtt_is_connected = False
-        self.client_mqtt_lock = None
-        self.edge_id = edge_id
-        self.run_id = run_id
-        self.unique_device_id = None
-        self.args = args
-        self.request_json = request_json
-        self.version = args.version
-        self.device_id = args.device_id
-        self.cur_dir = os.path.split(os.path.realpath(__file__))[0]
-        if args.current_running_dir is not None:
-            self.cur_dir = args.current_running_dir
-        self.sudo_cmd = ""
-        self.is_mac = False
-        if platform.system() == "Darwin":
-            self.is_mac = True
-
-        self.agent_config = agent_config
-        self.fedml_data_base_package_dir = os.path.join("/", "fedml", "data")
-        self.fedml_data_local_package_dir = os.path.join("/", "fedml", "fedml-package", "fedml", "data")
-        self.fedml_data_dir = self.fedml_data_base_package_dir
-        self.fedml_config_dir = os.path.join("/", "fedml", "conf")
-
-        self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES = {}
-
-        self.mlops_metrics = None
-        self.client_active_list = dict()
-        self.infer_host = "127.0.0.1"
-        self.redis_addr = "local"
-        self.redis_port = "6379"
-        self.redis_password = "fedml_default"
-        self.model_is_from_open = False
-
-        self.model_runner_mapping = dict()
-        self.ntp_offset = MLOpsUtils.get_ntp_offset()
-        self.running_request_json = dict()
-        self.endpoint_inference_runners = dict()
-        self.mqtt_inference_obj = None
-
-        self.subscribed_topics = list()
-        self.user_name = None
-
-    def unzip_file(self, zip_file, unzip_file_path) -> str:
-        unziped_file_name = ""
-        if zipfile.is_zipfile(zip_file):
-            with zipfile.ZipFile(zip_file, "r") as zipf:
-                zipf.extractall(unzip_file_path)
-                unziped_file_name = zipf.namelist()[0]
-        else:
-            raise Exception("Invalid zip file {}".format(zip_file))
-
-        return unziped_file_name
-
-    def retrieve_and_unzip_package(self, package_name, package_url):
-        local_package_path = ClientConstants.get_model_package_dir()
-        os.makedirs(local_package_path, exist_ok=True)
-        filename, filename_without_extension, file_extension = ClientConstants.get_filename_and_extension(package_url)
-        local_package_file = os.path.join(local_package_path,
-                                          f"fedml_run_{self.run_id}_{self.edge_id}_{filename_without_extension}")
-        if os.path.exists(local_package_file):
-            os.remove(local_package_file)
-        logging.info("Download from package_url {}".format(package_url))
-
-        package_url_without_query_path = urljoin(package_url, urlparse(package_url).path)
-        urllib.request.urlretrieve(package_url_without_query_path, local_package_file,
-                                   reporthook=self.package_download_progress)
-        unzip_package_path = os.path.join(ClientConstants.get_package_unzip_dir(),
-                                          f"unzip_fedml_run_{self.run_id}_{self.edge_id}_{filename_without_extension}")
-        try:
-            shutil.rmtree(unzip_package_path, ignore_errors=True)
-        except Exception as e:
-            pass
-
-        package_dir_name = self.unzip_file(local_package_file, unzip_package_path)  # Using unziped folder name
-        unzip_package_full_path = os.path.join(unzip_package_path, package_dir_name)
-        model_bin_file = os.path.join(unzip_package_path, "fedml_model.bin")
-
-        logging.info("local_package_file {}, unzip_package_path {}, unzip file full path {}".format(
-            local_package_file, unzip_package_path, unzip_package_full_path))
-
-        return unzip_package_full_path, model_bin_file
-
-    def retrieve_binary_model_file(self, package_name, package_url):
-        local_package_path = ClientConstants.get_model_package_dir()
-        if not os.path.exists(local_package_path):
-            os.makedirs(local_package_path, exist_ok=True)
-        unzip_package_path = ClientConstants.get_model_dir()
-        local_package_file = "{}".format(os.path.join(local_package_path, package_name))
-        if os.path.exists(local_package_file):
-            os.remove(local_package_file)
-        package_url_without_query_path = urljoin(package_url, urlparse(package_url).path)
-        urllib.request.urlretrieve(package_url_without_query_path, local_package_file,
-                                   reporthook=self.package_download_progress)
-
-        unzip_package_path = os.path.join(unzip_package_path, package_name)
-        if not os.path.exists(unzip_package_path):
-            os.makedirs(unzip_package_path, exist_ok=True)
-        dst_model_file = os.path.join(unzip_package_path, package_name)
-        if os.path.exists(local_package_file):
-            shutil.copy(local_package_file, dst_model_file)
-
-        return unzip_package_path, dst_model_file
-
-    def package_download_progress(self, count, blksize, filesize):
-        self.check_runner_stop_event()
-
-        downloaded = count * blksize
-        downloaded = filesize if downloaded > filesize else downloaded
-        progress = (downloaded / filesize * 100) if filesize != 0 else 0
-        progress_int = int(progress)
-        downloaded_kb = format(downloaded / 1024, '.2f')
-
-        # since this hook funtion is stateless, we need a state to avoid printing progress repeatly
-        if count == 0:
-            self.prev_download_progress = 0
-        if progress_int != self.prev_download_progress and progress_int % 5 == 0:
-            self.prev_download_progress = progress_int
-            logging.info("package downloaded size {} KB, progress {}%".format(downloaded_kb, progress_int))
-
-    def build_dynamic_constrain_variables(self, run_id, run_config):
-        pass
-
-    def update_local_fedml_config(self, run_id, model_config, model_config_parameters):
-        model_name = model_config["model_name"]
-        model_storage_url = model_config["model_storage_url"]
-        scale_min = model_config.get("instance_scale_min", 0)
-        scale_max = model_config.get("instance_scale_max", 0)
-        inference_engine = model_config.get("inference_engine", 0)
-        inference_end_point_id = run_id
-
-        # Retrieve model package or model binary file.
-        if self.model_is_from_open:
-            unzip_package_path, model_bin_file = self.retrieve_binary_model_file(model_name, model_storage_url)
-        else:
-            unzip_package_path, model_bin_file = self.retrieve_and_unzip_package(model_name, model_storage_url)
-
-        # Load the config to memory
-        package_conf_object = {}
-        fedml_local_config_file = os.path.join(unzip_package_path, "fedml_model_config.yaml")
-        if os.path.exists(fedml_local_config_file):
-            package_conf_object = load_yaml_config(fedml_local_config_file)
-        else:
-            if model_config_parameters is not None:
-                logging.warning(f"The fedml_local_config_file {fedml_local_config_file} does not exist, will \
-                                    create a new one with the model_config_parameters from json.")
-                package_conf_object = model_config_parameters
-                with open(fedml_local_config_file, 'w') as f:
-                    json.dump(package_conf_object, f)
-            else:
-                logging.info(f"The fedml_local_config_file {fedml_local_config_file} does not exist,\
-                             and the model_config_parameters is None.")
-        logging.info("The package_conf_object is {}".format(package_conf_object))
-
-        return unzip_package_path, model_bin_file, package_conf_object
-
-    def build_dynamic_args(self, run_config, package_conf_object, base_dir):
-        pass
-
-    def download_model_package(self, package_name, package_url):
-        # Copy config file from the client
-        unzip_package_path = self.retrieve_and_unzip_package(
-            package_name, package_url
-        )
-
-        return unzip_package_path
-
-    def run(self, process_event, completed_event):
-        # print(f"Model worker runner process id {os.getpid()}, run id {self.run_id}")
-
-        if platform.system() != "Windows":
-            os.setsid()
-
-        os.environ['PYTHONWARNINGS'] = 'ignore:semaphore_tracker:UserWarning'
-        os.environ.setdefault('PYTHONWARNINGS', 'ignore:semaphore_tracker:UserWarning')
-
-        self.run_process_event = process_event
-        self.run_process_completed_event = completed_event
-        run_id = self.request_json.get("end_point_id")
-
-        try:
-            FedMLModelDatabase.get_instance().set_database_base_dir(ClientConstants.get_database_dir())
-            FedMLModelDatabase.get_instance().create_table()
-
-            MLOpsUtils.set_ntp_offset(self.ntp_offset)
-            self.setup_client_mqtt_mgr()
-
-            if not self.run_impl():
-                logging.info(
-                    f"[endpoint/device][{run_id}/{self.edge_id}] Release gpu resource when the worker deployment returned false.")
-                self.release_gpu_ids(run_id)
-        except RunnerError:
-            logging.info("Runner stopped.")
-            logging.info(
-                f"[endpoint/device][{run_id}/{self.edge_id}] Release gpu resource when the worker deployment stopped.")
-            self.release_gpu_ids(run_id)
-            self.reset_devices_status(self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_KILLED)
-            MLOpsRuntimeLogDaemon.get_instance(self.args).stop_log_processor(run_id, self.edge_id)
-        except RunnerCompletedError:
-            logging.info(
-                f"[endpoint/device][{run_id}/{self.edge_id}] Release gpu resource when the worker deployment completed.")
-            self.release_gpu_ids(run_id)
-            logging.info("Runner completed.")
-            MLOpsRuntimeLogDaemon.get_instance(self.args).stop_log_processor(run_id, self.edge_id)
-        except Exception as e:
-            logging.error("Runner exits with exceptions. {}".format(traceback.format_exc()))
-            self.cleanup_run_when_starting_failed()
-            self.mlops_metrics.client_send_exit_train_msg(
-                run_id, self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED)
-            logging.info(
-                f"[endpoint/device][{run_id}/{self.edge_id}] Release gpu resource when the worker deployment occurred exceptions.")
-            self.release_gpu_ids(run_id)
-            MLOpsRuntimeLogDaemon.get_instance(self.args).stop_log_processor(run_id, self.edge_id)
-            time.sleep(2)
-            sys.exit(1)
-        finally:
-            logging.info("Release resources.")
-            MLOpsRuntimeLogDaemon.get_instance(self.args).stop_log_processor(run_id, self.edge_id)
-            if self.mlops_metrics is not None:
-                self.mlops_metrics.stop_sys_perf()
-            time.sleep(3)
-            self.release_client_mqtt_mgr()
-
-    def release_gpu_ids(self, run_id):
-        JobRunnerUtils.get_instance().release_gpu_ids(run_id, self.edge_id)
-
-    def check_runner_stop_event(self):
-        if self.run_process_event.is_set():
-            logging.info("Received stopping event.")
-            raise RunnerError("Runner stopped")
-
-        if self.run_process_completed_event is not None and self.run_process_completed_event.is_set():
-            logging.info("Received completed event.")
-            raise RunnerCompletedError("Runner completed")
-
-    def inference_run(self):
-        # run_id, end_point_name, token, user_id, user_name, device_ids, device_objs, model_config, model_name, \
-        #     model_id, model_storage_url, scale_min, scale_max, inference_engine, model_is_from_open, \
-        #     inference_end_point_id, use_gpu, memory_size, model_version = self.parse_model_run_params(self.request_json)
-        #
-        # inference_client = FedMLModelServingClient(self.args,
-        #                                            end_point_name,
-        #                                            model_name,
-        #                                            model_version,
-        #                                            inference_request=self.request_json)
-        # inference_client.run()
-        pass
-
-    def run_impl(self):
-        run_id = self.request_json["end_point_id"]
-        end_point_name = self.request_json["end_point_name"]
-        token = self.request_json["token"]
-        user_id = self.request_json["user_id"]
-        user_name = self.request_json["user_name"]
-        device_ids = self.request_json["device_ids"]
-        device_objs = self.request_json["device_objs"]
-        master_ip = self.request_json["master_node_ip"]
-
-        model_config = self.request_json["model_config"]
-        model_name = model_config["model_name"]
-        model_id = model_config["model_id"]
-        model_version = model_config["model_version"]
-        model_storage_url = model_config["model_storage_url"]
-        scale_min = model_config.get("instance_scale_min", 0)
-        scale_max = model_config.get("instance_scale_max", 0)
-        model_config_parameters = self.request_json["parameters"]
-
-        inference_port = model_config_parameters.get("worker_internal_port",
-                                                     ClientConstants.MODEL_INFERENCE_DEFAULT_PORT)
-        inference_port_external = model_config_parameters.get("worker_external_port", inference_port)
-
-        if "using_triton" in model_config_parameters and model_config_parameters["using_triton"]:
-            inference_engine = ClientConstants.INFERENCE_ENGINE_TYPE_INT_TRITON
-        else:
-            inference_engine = ClientConstants.INFERENCE_ENGINE_TYPE_INT_DEFAULT
-
-        logging.info("[Critical] The inference_engine is: {}".format(inference_engine))
-
-        self.model_is_from_open = True if model_config.get("is_from_open", 0) == 1 else False
-        if self.model_is_from_open:
-            model_net_url = model_config["model_net_url"]
-        inference_end_point_id = run_id
-        use_gpu = "gpu"  # TODO: Get GPU from device infos
-        memory_size = "4096m"  # TODO: Get Memory size for each instance
-
-        self.mlops_metrics.report_sys_perf(self.args, self.agent_config["mqtt_config"], run_id=run_id)
-
-        self.check_runner_stop_event()
-
-        logging.info("model deployment request: {}".format(self.request_json))
-
-        MLOpsRuntimeLog.get_instance(self.args).init_logs(log_level=logging.INFO)
-
-        # Initiate an FedMLInferenceClient object
-        # client_runner = FedMLClientRunner(
-        #     self.args, edge_id=self.edge_id, run_id=self.run_id, request_json=self.request_json,
-        #     agent_config=self.agent_config
-        # )
-        # inference_process = Process(target=client_runner.inference_run)
-        # inference_process.start()
-
-        self.mlops_metrics.report_client_training_status(
-            self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_INITIALIZING,
-            is_from_model=True, running_json=json.dumps(self.request_json), run_id=run_id)
-
-        self.mlops_metrics.report_client_training_status(
-            self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_RUNNING,
-            is_from_model=True, run_id=run_id)
-
-        self.check_runner_stop_event()
-
-        # update local config with real time parameters from server and dynamically replace variables value
-        logging.info("download and unzip model to local...")
-        unzip_package_path, model_bin_file, fedml_config_object = \
-            self.update_local_fedml_config(run_id, model_config, model_config_parameters)
-        if unzip_package_path is None or fedml_config_object is None:
-            logging.info("failed to update local fedml config.")
-            self.check_runner_stop_event()
-            self.cleanup_run_when_starting_failed()
-            self.mlops_metrics.client_send_exit_train_msg(run_id, self.edge_id,
-                                                          ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED)
-            return False
-
-        logging.info("check downloaded packages...")
-        if not os.path.exists(unzip_package_path):
-            logging.info("failed to unzip file.")
-            self.check_runner_stop_event()
-            self.cleanup_run_when_starting_failed()
-            self.mlops_metrics.client_send_exit_train_msg(run_id, self.edge_id,
-                                                          ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED)
-            return False
-
-        # download model net and load into the torch model
-        model_from_open = None
-        self.model_is_from_open = None
-        if self.model_is_from_open:
-            logging.info("process the model net from open...")
-            self.check_runner_stop_event()
-            s3_config = self.agent_config.get("s3_config", None)
-            if s3_config is not None and model_net_url is not None and model_net_url != "":
-                s3_client = S3Storage(s3_config)
-                url_parsed = urlparse(model_net_url)
-                path_list = url_parsed.path.split("/")
-                if len(path_list) > 0:
-                    model_key = path_list[-1]
-                    model_from_open = s3_client.read_model_net(model_key,
-                                                               ClientConstants.get_model_cache_dir())
-
-                model_input_size, model_input_type = mlops.get_training_model_input_info(model_net_url, s3_config)
-                if model_input_size is not None and model_input_type is not None:
-                    model_config_parameters["input_size"] = model_input_size
-                    model_config_parameters["input_types"] = model_input_type
-                    logging.info(
-                        f"model input size {model_input_size}, input type {model_input_type} from the open platform.")
-
-        logging.info("Check if need update / removing existed container...")
-        if "diff_devices" in self.request_json and str(self.edge_id) in self.request_json["diff_devices"] and \
-                self.request_json["diff_devices"][str(self.edge_id)] == ClientConstants.DEVICE_DIFF_REPLACE_OPERATION:
-            self.handle_replaced_device()
-
-        logging.info("start the model deployment...")
-        self.check_runner_stop_event()
-        running_model_name, inference_output_url, inference_model_version, model_metadata, model_config = \
-            "", "", model_version, {}, {}
-        try:
-            client_ip = self.get_ip_address(self.request_json)
-            running_model_name, inference_output_url, inference_model_version, model_metadata, model_config = \
-                start_deployment(
-                    inference_end_point_id, end_point_name, model_id, model_version,
-                    unzip_package_path, model_bin_file, model_name, inference_engine,
-                    ClientConstants.INFERENCE_HTTP_PORT,
-                    ClientConstants.INFERENCE_GRPC_PORT,
-                    ClientConstants.INFERENCE_METRIC_PORT,
-                    use_gpu, memory_size,
-                    ClientConstants.INFERENCE_CONVERTOR_IMAGE,
-                    ClientConstants.INFERENCE_SERVER_IMAGE,
-                    client_ip,
-                    self.model_is_from_open, model_config_parameters,
-                    model_from_open,
-                    token,
-                    master_ip, self.edge_id, master_device_id=device_ids[0])
-        except Exception as e:
-            inference_output_url = ""
-            logging.error(f"Exception at deployment: {traceback.format_exc()}")
-
-        if inference_output_url == "":
-            logging.error("failed to deploy the model...")
-
-            result_payload = self.send_deployment_results(
-                end_point_name, self.edge_id, ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED,
-                model_id, model_name, inference_output_url, inference_model_version, inference_port,
-                inference_engine, model_metadata, model_config)
-
-            self.mlops_metrics.run_id = self.run_id
-            self.mlops_metrics.broadcast_client_training_status(
-                self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED,
-                is_from_model=True, run_id=self.run_id)
-
-            self.mlops_metrics.client_send_exit_train_msg(
-                run_id, self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED)
-
-            # After sending the deployment status, we should wait for the master to delete the deployment status
-            status_payload = self.send_deployment_status(
-                end_point_name, self.edge_id, model_id, model_name, model_version, inference_output_url,
-                ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED, inference_port=inference_port)
-
-            return False
-        else:
-            logging.info("finished deployment, continue to send results to master...")
-            status_payload = self.send_deployment_status(  # Send Master the external port
-                end_point_name, self.edge_id, model_id, model_name, model_version, inference_output_url,
-                ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED, inference_port=inference_port_external)
-            result_payload = self.send_deployment_results(
-                end_point_name, self.edge_id, ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED,
-                model_id, model_name, inference_output_url, model_version, inference_port_external,
-                inference_engine, model_metadata, model_config)
-
-            if inference_port_external != inference_port:  # For Worker, use internal port
-                logging.info("inference_port_external {} != inference_port {}".format(
-                    inference_port_external, inference_port))
-                status_payload = self.construct_deployment_status(
-                    end_point_name, self.edge_id, model_id, model_name, model_version, inference_output_url,
-                    ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED, inference_port=inference_port)
-                result_payload = self.construct_deployment_results(
-                    end_point_name, self.edge_id, ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED,
-                    model_id, model_name, inference_output_url, model_version, inference_port,
-                    inference_engine, model_metadata, model_config)
-
-            FedMLModelDatabase.get_instance().set_deployment_result(
-                run_id, end_point_name, model_name, model_version, self.edge_id, json.dumps(result_payload))
-
-            FedMLModelDatabase.get_instance().set_deployment_status(
-                run_id, end_point_name, model_name, model_version, self.edge_id, json.dumps(status_payload))
-
-            time.sleep(1)
-            self.mlops_metrics.run_id = self.run_id
-            self.mlops_metrics.broadcast_client_training_status(
-                self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FINISHED,
-                is_from_model=True, run_id=self.run_id)
-            return True
-
-    def handle_replaced_device(self):
-        """
-        Strategy-1:
-        (1) clean local records (2) find and clean current container using diff_version: {device_id: old_version}
-        """
-        end_point_id = self.request_json["end_point_id"]
-        end_point_name = self.request_json["end_point_name"]
-        model_config = self.request_json["model_config"]
-        model_name = model_config["model_name"]
-        model_id = model_config["model_id"]
-        new_model_version = model_config["model_version"]
-        old_model_version = self.request_json["diff_version"][str(self.edge_id)]
-
-        logging.info(f"[endpoint/device][{end_point_id}/{self.edge_id}] "
-                     f"Start to handle replaced device {self.edge_id} to new version {new_model_version}."
-                     f"which originally has old version {old_model_version}.")
-
-        try:
-            JobRunnerUtils.get_instance().release_gpu_ids(end_point_id, self.edge_id)
-
-            # Instead of deleting the records, need to change the job status to "UPGRADING"
-            FedMLClientDataInterface.get_instance().save_job_status(
-                end_point_id, self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_UPGRADING,
-                ClientConstants.MSG_MLOPS_CLIENT_STATUS_UPGRADING
-            )
-
-            FedMLModelDatabase.get_instance().delete_deployment_result_with_device_id(
-                end_point_id, end_point_name, model_name, self.edge_id)
-
-            ClientConstants.remove_deployment(
-                end_point_name, model_name, old_model_version,
-                end_point_id, model_id, edge_id=self.edge_id)
-        except Exception as e:
-            # TODO: 1. Check this release action cause the resource seized by other run
-            #  2. If this atomic op failed, should rolling back
-            logging.info(f"Exception when removing deployment {traceback.format_exc()}")
-            pass
-
-    def construct_deployment_results(self, end_point_name, device_id, model_status,
-                                     model_id, model_name, model_inference_url,
-                                     model_version, inference_port, inference_engine,
-                                     model_metadata, model_config):
-        deployment_results_payload = {"end_point_id": self.run_id, "end_point_name": end_point_name,
-                                      "model_id": model_id, "model_name": model_name,
-                                      "model_url": model_inference_url, "model_version": model_version,
-                                      "port": inference_port,
-                                      "inference_engine": inference_engine,
-                                      "model_metadata": model_metadata,
-                                      "model_config": model_config,
-                                      "model_status": model_status,
-                                      "inference_port": inference_port}
-        return deployment_results_payload
-
-    def construct_deployment_status(self, end_point_name, device_id,
-                                    model_id, model_name, model_version,
-                                    model_inference_url, model_status,
-                                    inference_port=ClientConstants.MODEL_INFERENCE_DEFAULT_PORT):
-        deployment_status_payload = {"end_point_id": self.run_id, "end_point_name": end_point_name,
-                                     "device_id": device_id,
-                                     "model_id": model_id, "model_name": model_name,
-                                     "model_version": model_version,
-                                     "model_url": model_inference_url, "model_status": model_status,
-                                     "inference_port": inference_port}
-        return deployment_status_payload
-
-    def send_deployment_results(self, end_point_name, device_id, model_status,
-                                model_id, model_name, model_inference_url,
-                                model_version, inference_port, inference_engine,
-                                model_metadata, model_config):
-        deployment_results_topic = "model_device/model_device/return_deployment_result/{}".format(device_id)
-        deployment_results_payload = self.construct_deployment_results(
-            end_point_name, device_id, model_status,
-            model_id, model_name, model_inference_url,
-            model_version, inference_port, inference_engine,
-            model_metadata, model_config)
-
-        logging.info("[client] send_deployment_results: topic {}, payload {}.".format(deployment_results_topic,
-                                                                                      deployment_results_payload))
-        self.client_mqtt_mgr.send_message_json(deployment_results_topic, json.dumps(deployment_results_payload))
-        return deployment_results_payload
-
-    def send_deployment_status(self, end_point_name, device_id,
-                               model_id, model_name, model_version,
-                               model_inference_url, model_status,
-                               inference_port=ClientConstants.MODEL_INFERENCE_DEFAULT_PORT):
-        deployment_status_topic = "model_device/model_device/return_deployment_status/{}".format(device_id)
-        deployment_status_payload = self.construct_deployment_status(
-            end_point_name, device_id,
-            model_id, model_name, model_version,
-            model_inference_url, model_status,
-            inference_port=inference_port)
-
-        logging.info("[client] send_deployment_status: topic {}, payload {}.".format(deployment_status_topic,
-                                                                                     deployment_status_payload))
-        self.client_mqtt_mgr.send_message_json(deployment_status_topic, json.dumps(deployment_status_payload))
-        return deployment_status_payload
-
-    def reset_devices_status(self, edge_id, status):
-        self.mlops_metrics.run_id = self.run_id
-        self.mlops_metrics.edge_id = edge_id
-        self.mlops_metrics.broadcast_client_training_status(
-            edge_id, status, is_from_model=True, run_id=self.run_id)
-
-    def cleanup_run_when_starting_failed(self):
-        logging.info("Cleanup run successfully when starting failed.")
-
-        self.reset_devices_status(self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED)
-
-        time.sleep(2)
-
-        try:
-            self.mlops_metrics.stop_sys_perf()
-        except Exception as ex:
-            pass
-
-        time.sleep(1)
-
-    def cleanup_run_when_finished(self):
-        logging.info("Cleanup run successfully when finished.")
-
-        self.reset_devices_status(self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FINISHED)
-
-        time.sleep(2)
-
-        try:
-            self.mlops_metrics.stop_sys_perf()
-        except Exception as ex:
-            pass
-
-        time.sleep(1)
-
-    def on_client_mqtt_disconnected(self, mqtt_client_object):
-        if self.client_mqtt_lock is None:
-            self.client_mqtt_lock = threading.Lock()
-
-        self.client_mqtt_lock.acquire()
-        self.client_mqtt_is_connected = False
-        self.client_mqtt_lock.release()
-
-    def on_client_mqtt_connected(self, mqtt_client_object):
-        if self.mlops_metrics is None:
-            self.mlops_metrics = MLOpsMetrics()
-
-        self.mlops_metrics.set_messenger(self.client_mqtt_mgr)
-        self.mlops_metrics.run_id = self.run_id
-
-        if self.client_mqtt_lock is None:
-            self.client_mqtt_lock = threading.Lock()
-
-        self.client_mqtt_lock.acquire()
-        self.client_mqtt_is_connected = True
-        self.client_mqtt_lock.release()
-
-    def setup_client_mqtt_mgr(self):
-        if self.client_mqtt_mgr is not None:
-            return
-
-        if self.client_mqtt_lock is None:
-            self.client_mqtt_lock = threading.Lock()
-
-        self.client_mqtt_mgr = MqttManager(
-            self.agent_config["mqtt_config"]["BROKER_HOST"],
-            self.agent_config["mqtt_config"]["BROKER_PORT"],
-            self.agent_config["mqtt_config"]["MQTT_USER"],
-            self.agent_config["mqtt_config"]["MQTT_PWD"],
-            self.agent_config["mqtt_config"]["MQTT_KEEPALIVE"],
-            "FedML_ModelClientAgent_Metrics_@{}@_{}_{}_{}".format(self.user_name, self.args.current_device_id,
-                                                                  str(os.getpid()),
-                                                                  str(uuid.uuid4()))
-        )
-
-        self.client_mqtt_mgr.add_connected_listener(self.on_client_mqtt_connected)
-        self.client_mqtt_mgr.add_disconnected_listener(self.on_client_mqtt_disconnected)
-        self.client_mqtt_mgr.connect()
-        self.client_mqtt_mgr.loop_start()
-
-        if self.mlops_metrics is None:
-            self.mlops_metrics = MLOpsMetrics()
-        self.mlops_metrics.set_messenger(self.client_mqtt_mgr)
-        self.mlops_metrics.run_id = self.run_id
-
-    def release_client_mqtt_mgr(self):
-        try:
-            if self.client_mqtt_mgr is not None:
-                self.client_mqtt_mgr.loop_stop()
-                self.client_mqtt_mgr.disconnect()
-
-            self.client_mqtt_lock.acquire()
-            if self.client_mqtt_mgr is not None:
-                self.client_mqtt_is_connected = False
-                self.client_mqtt_mgr = None
-            self.client_mqtt_lock.release()
-        except Exception:
-            pass
-
-    def ota_upgrade(self, payload, request_json):
-        run_id = request_json["end_point_id"]
-        force_ota = False
-        ota_version = None
-
-        try:
-            parameters = request_json.get("parameters", None)
-            common_args = parameters.get("common_args", None)
-            force_ota = common_args.get("force_ota", False)
-            ota_version = common_args.get("ota_version", None)
-        except Exception as e:
-            pass
-
-        if force_ota and ota_version is not None:
-            should_upgrade = True if ota_version != fedml.__version__ else False
-            upgrade_version = ota_version
-        else:
-            try:
-                fedml_is_latest_version, local_ver, remote_ver = sys_utils.check_fedml_is_latest_version(self.version)
-            except Exception as e:
-                return
-
-            should_upgrade = False if fedml_is_latest_version else True
-            upgrade_version = remote_ver
-
-        if should_upgrade:
-            FedMLClientDataInterface.get_instance(). \
-                save_started_job(run_id, self.edge_id, time.time(),
-                                 ClientConstants.MSG_MLOPS_CLIENT_STATUS_UPGRADING,
-                                 ClientConstants.MSG_MLOPS_CLIENT_STATUS_UPGRADING,
-                                 payload)
-
-            logging.info(f"Upgrade to version {upgrade_version} ...")
-
-            sys_utils.do_upgrade(self.version, upgrade_version)
-
-            raise Exception("Restarting after upgraded...")
-
-    def callback_start_deployment(self, topic, payload):
-        """
-        topic: model_ops/model_device/start_deployment/model-agent-device-id
-        payload: {"model_name": "image-model", "model_storage_url":"s3-url",
-        "instance_scale_min":1, "instance_scale_max":3, "inference_engine":"onnx (or tensorrt)"}
-        """
-        # get deployment params
-        request_json = json.loads(payload)
-        run_id = request_json["end_point_id"]
-        token = request_json["token"]
-        user_id = request_json["user_id"]
-        user_name = request_json["user_name"]
-        device_ids = request_json["device_ids"]
-        device_objs = request_json["device_objs"]
-
-        model_config = request_json["model_config"]
-        model_name = model_config["model_name"]
-        model_storage_url = model_config["model_storage_url"]
-        scale_min = model_config.get("instance_scale_min", 0)
-        scale_max = model_config.get("instance_scale_max", 0)
-        inference_engine = model_config.get("inference_engine", 0)
-        inference_end_point_id = run_id
-
-        try:
-            MLOpsConfigs.fetch_all_configs()
-        except Exception as e:
-            pass
-
-        # Start log processor for current run
-        run_id = inference_end_point_id
-        self.args.run_id = run_id
-        self.args.edge_id = self.edge_id
-        MLOpsRuntimeLog.get_instance(self.args).init_logs(log_level=logging.INFO)
-        MLOpsRuntimeLogDaemon.get_instance(self.args).set_log_source(
-            ClientConstants.FEDML_LOG_SOURCE_TYPE_MODEL_END_POINT)
-        MLOpsRuntimeLogDaemon.get_instance(self.args).start_log_processor(run_id, self.edge_id)
-
-        self.ota_upgrade(payload, request_json)
-
-        # Start client with multiprocessing mode
-        request_json["run_id"] = run_id
-        run_id_str = str(run_id)
-        self.request_json = request_json
-        self.running_request_json[run_id_str] = request_json
-        client_runner = FedMLClientRunner(
-            self.args, edge_id=self.edge_id, request_json=request_json, agent_config=self.agent_config, run_id=run_id
-        )
-        client_runner.infer_host = self.get_ip_address(request_json)
-        self.run_process_event_map[run_id_str] = multiprocessing.Event()
-        self.run_process_event_map[run_id_str].clear()
-        client_runner.run_process_event = self.run_process_event_map[run_id_str]
-        self.run_process_completed_event_map[run_id_str] = multiprocessing.Event()
-        self.run_process_completed_event_map[run_id_str].clear()
-        client_runner.run_process_completed_event = self.run_process_completed_event_map[run_id_str]
-        self.model_runner_mapping[run_id_str] = client_runner
-        self.run_id = run_id
-        self.run_process_map[run_id_str] = Process(target=client_runner.run, args=(
-            self.run_process_event_map[run_id_str], self.run_process_completed_event_map[run_id_str]
-        ))
-        # client_runner.run()
-        self.run_process_map[run_id_str].start()
-        ClientConstants.save_run_process(run_id, self.run_process_map[run_id_str].pid)
-        ClientConstants.save_runner_infos(self.args.device_id + "." + self.args.os_name, self.edge_id, run_id=run_id)
-
-    def set_runner_stopped_event(self, run_id):
-        run_id_str = str(run_id)
-        client_runner = self.model_runner_mapping.get(run_id_str, None)
-        if client_runner is not None:
-            if client_runner.run_process_event is not None:
-                client_runner.run_process_event.set()
-            self.model_runner_mapping.pop(run_id_str)
-
-    def set_runner_completed_event(self, run_id):
-        run_id_str = str(run_id)
-        client_runner = self.model_runner_mapping.get(run_id_str, None)
-        if client_runner is not None:
-            if client_runner.run_process_completed_event is not None:
-                client_runner.run_process_completed_event.set()
-            self.model_runner_mapping.pop(run_id_str)
-
-    def callback_delete_deployment(self, topic, payload):
-        logging.info("callback_delete_deployment: topic = %s, payload = %s" % (topic, payload))
-
-        # Parse payload as the model message object.
-        model_msg_object = FedMLModelMsgObject(topic, payload)
-
-        try:
-            ClientConstants.remove_deployment(
-                model_msg_object.end_point_name, model_msg_object.model_name, model_msg_object.model_version,
-                model_msg_object.run_id, model_msg_object.model_id, edge_id=self.edge_id)
-        except Exception as e:
-            logging.info(f"Exception when removing deployment {traceback.format_exc()}")
-            pass
-
-        self.set_runner_stopped_event(model_msg_object.run_id)
-
-        logging.info(f"[endpoint/device][{model_msg_object.run_id}/{self.edge_id}] "
-                     f"Release gpu resource when the worker deployment deleted.")
-        JobRunnerUtils.get_instance().release_gpu_ids(model_msg_object.run_id, self.edge_id)
-
-        if self.running_request_json.get(str(model_msg_object.run_id)) is not None:
-            try:
-                self.running_request_json.pop(str(model_msg_object.run_id))
-            except Exception as e:
-                pass
-
-        FedMLClientDataInterface.get_instance().delete_job_from_db(model_msg_object.run_id)
-        FedMLModelDatabase.get_instance().delete_deployment_result_with_device_id(
-            model_msg_object.run_id, model_msg_object.end_point_name, model_msg_object.model_name,
-            self.edge_id)
-
-    def exit_run_with_exception_entry(self):
-        try:
-            self.setup_client_mqtt_mgr()
-            self.exit_run_with_exception()
-        except Exception as e:
-            self.release_client_mqtt_mgr()
-            sys.exit(1)
-        finally:
-            self.release_client_mqtt_mgr()
-
-    def exit_run_with_exception(self):
-        logging.info("Exit run successfully.")
-
-        ClientConstants.cleanup_learning_process(self.run_id)
-        ClientConstants.cleanup_run_process(self.run_id)
-
-        self.mlops_metrics.report_client_id_status(
-            self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED,
-            is_from_model=True, run_id=self.run_id)
-
-        time.sleep(1)
-
-    def callback_exit_train_with_exception(self, topic, payload):
-        request_json = json.loads(payload)
-        is_retain = request_json.get("is_retain", False)
-        if is_retain:
-            return
-        run_id = request_json.get("runId", None)
-        if run_id is None:
-            run_id = request_json.get("run_id", None)
-            if run_id is None:
-                run_id = request_json.get("id", None)
-
-        if run_id is None:
-            return
-
-        # Stop client with multiprocessing mode
-        self.request_json = request_json
-        client_runner = FedMLClientRunner(
-            self.args, edge_id=self.edge_id, request_json=request_json, agent_config=self.agent_config, run_id=run_id
-        )
-        try:
-            Process(target=client_runner.exit_run_with_exception_entry).start()
-        except Exception as e:
-            pass
-
-    def cleanup_client_with_status(self):
-        self.setup_client_mqtt_mgr()
-
-        if self.device_status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_FINISHED:
-            self.cleanup_run_when_finished()
-        elif self.device_status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED:
-            self.cleanup_run_when_starting_failed()
-
-        self.release_client_mqtt_mgr()
-
-    def callback_runner_id_status(self, topic, payload):
-        # logging.info("callback_runner_id_status: topic = %s, payload = %s" % (topic, payload))
-
-        request_json = json.loads(payload)
-        run_id = request_json["run_id"]
-        edge_id = request_json["edge_id"]
-        status = request_json["status"]
-
-        self.save_training_status(edge_id, status)
-
-        if status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_FINISHED or \
-                status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED:
-            # Stop client with multiprocessing mode
-            self.request_json = request_json
-            client_runner = FedMLClientRunner(
-                self.args,
-                edge_id=self.edge_id,
-                request_json=request_json,
-                agent_config=self.agent_config,
-                run_id=run_id,
-            )
-            client_runner.device_status = status
-            status_process = Process(target=client_runner.cleanup_client_with_status)
-            status_process.start()
-            status_process.join(15)
-
-            # Stop log processor for current run
-            MLOpsRuntimeLogDaemon.get_instance(self.args).stop_log_processor(run_id, edge_id)
-
-    def callback_report_current_status(self, topic, payload):
-        self.send_agent_active_msg()
-
-    @staticmethod
-    def process_ota_upgrade_msg():
-        os.system("pip install -U fedml")
-
-    def callback_client_ota_msg(self, topic, payload):
-        request_json = json.loads(payload)
-        cmd = request_json["cmd"]
-
-        if cmd == ClientConstants.FEDML_OTA_CMD_UPGRADE:
-            FedMLClientRunner.process_ota_upgrade_msg()
-            # Process(target=FedMLClientRunner.process_ota_upgrade_msg).start()
-            raise Exception("After upgraded, restart runner...")
-        elif cmd == ClientConstants.FEDML_OTA_CMD_RESTART:
-            raise Exception("Restart runner...")
-
-    def save_training_status(self, edge_id, training_status):
-        self.current_training_status = training_status
-        ClientConstants.save_training_infos(edge_id, training_status)
-
-    @staticmethod
-    def get_device_id():
-        device_file_path = os.path.join(ClientConstants.get_data_dir(),
-                                        ClientConstants.LOCAL_RUNNER_INFO_DIR_NAME)
-        file_for_device_id = os.path.join(device_file_path, "devices.id")
-        if not os.path.exists(device_file_path):
-            os.makedirs(device_file_path)
-        elif os.path.exists(file_for_device_id):
-            with open(file_for_device_id, 'r', encoding='utf-8') as f:
-                device_id_from_file = f.readline()
-                if device_id_from_file is not None and device_id_from_file != "":
-                    return device_id_from_file
-
-        if platform.system() == "Darwin":
-            cmd_get_serial_num = "system_profiler SPHardwareDataType | grep Serial | awk '{gsub(/ /,\"\")}{print}' " \
-                                 "|awk -F':' '{print $2}' "
-            device_id = os.popen(cmd_get_serial_num).read()
-            device_id = device_id.replace('\n', '').replace(' ', '')
-            if device_id is None or device_id == "":
-                device_id = hex(uuid.getnode())
-            else:
-                device_id = "0x" + device_id
-        else:
-            if "nt" in os.name:
-
-                def get_uuid():
-                    guid = ""
-                    try:
-                        cmd = "wmic csproduct get uuid"
-                        guid = str(subprocess.check_output(cmd))
-                        pos1 = guid.find("\\n") + 2
-                        guid = guid[pos1:-15]
-                    except Exception as ex:
-                        pass
-                    return str(guid)
-
-                device_id = str(get_uuid())
-                logging.info(device_id)
-            elif "posix" in os.name:
-                device_id = sys_utils.get_device_id_in_docker()
-                if device_id is None:
-                    device_id = hex(uuid.getnode())
-            else:
-                device_id = sys_utils.run_subprocess_open(
-                    "hal-get-property --udi /org/freedesktop/Hal/devices/computer --key system.hardware.uuid".split()
-                )
-                device_id = hex(device_id)
-
-        if device_id is not None and device_id != "":
-            with open(file_for_device_id, 'w', encoding='utf-8') as f:
-                f.write(device_id)
-        else:
-            device_id = hex(uuid.uuid4())
-            with open(file_for_device_id, 'w', encoding='utf-8') as f:
-                f.write(device_id)
-
-        return device_id
-
-    def get_ip_address(self, request_json):
-        # OPTION 1: Use local ip
-        ip = ClientConstants.get_local_ip()
-
-        # OPTION 2: Auto detect public ip
-        if "parameters" in request_json and \
-                ClientConstants.AUTO_DETECT_PUBLIC_IP in request_json["parameters"] and \
-                request_json["parameters"][ClientConstants.AUTO_DETECT_PUBLIC_IP]:
-            ip = ClientConstants.get_public_ip()
-            logging.info("Auto detect public ip for worker: " + ip)
-
-        # OPTION 3: Use user indicated ip
-        if self.infer_host is not None and self.infer_host != "127.0.0.1" and self.infer_host != "localhost":
-            ip = self.infer_host
-
-        return ip
-
-    def bind_account_and_device_id(self, url, account_id, device_id, os_name, role="md.on_premise_device"):
-        ip = requests.get('https://checkip.amazonaws.com').text.strip()
-        fedml_ver, exec_path, os_ver, cpu_info, python_ver, torch_ver, mpi_installed, \
-            cpu_usage, available_mem, total_mem, gpu_info, gpu_available_mem, gpu_total_mem, \
-            gpu_count, gpu_vendor, cpu_count, gpu_device_name = get_sys_runner_info()
-        host_name = sys_utils.get_host_name()
-        json_params = {
-            "accountid": account_id,
-            "deviceid": device_id,
-            "state": ClientConstants.MSG_MLOPS_CLIENT_STATUS_IDLE,
-            "status": ClientConstants.MSG_MLOPS_CLIENT_STATUS_IDLE,
-            "type": os_name,
-            "processor": cpu_info,
-            "core_type": cpu_info,
-            "network": "",
-            "role": role,
-            "os_ver": os_ver,
-            "memory": total_mem,
-            "ip": ip,
-            "extra_infos": {"fedml_ver": fedml_ver, "exec_path": exec_path, "os_ver": os_ver,
-                            "cpu_info": cpu_info, "python_ver": python_ver, "torch_ver": torch_ver,
-                            "mpi_installed": mpi_installed, "cpu_usage": cpu_usage,
-                            "available_mem": available_mem, "total_mem": total_mem,
-                            "cpu_count": cpu_count, "gpu_count": 0, "host_name": host_name}
-        }
-        if gpu_count > 0:
-            if gpu_total_mem is not None:
-                json_params["gpu"] = gpu_info if gpu_info is not None else "" + ", Total GPU Memory: " + gpu_total_mem
-            else:
-                json_params["gpu"] = gpu_info if gpu_info is not None else ""
-            json_params["extra_infos"]["gpu_info"] = gpu_info if gpu_info is not None else ""
-            if gpu_available_mem is not None:
-                json_params["extra_infos"]["gpu_available_mem"] = gpu_available_mem
-            if gpu_total_mem is not None:
-                json_params["extra_infos"]["gpu_total_mem"] = gpu_total_mem
-
-            json_params["extra_infos"]["gpu_count"] = gpu_count
-            json_params["extra_infos"]["gpu_vendor"] = gpu_vendor
-            json_params["extra_infos"]["gpu_device_name"] = gpu_device_name
-
-            gpu_available_id_list = sys_utils.get_available_gpu_id_list(limit=gpu_count)
-            gpu_available_count = len(gpu_available_id_list) if gpu_available_id_list is not None else 0
-            gpu_list = sys_utils.get_gpu_list()
-            json_params["extra_infos"]["gpu_available_count"] = gpu_available_count
-            json_params["extra_infos"]["gpu_available_id_list"] = gpu_available_id_list
-            json_params["extra_infos"]["gpu_list"] = gpu_list
-        else:
-            json_params["gpu"] = "None"
-            json_params["extra_infos"]["gpu_available_count"] = 0
-            json_params["extra_infos"]["gpu_available_id_list"] = []
-            json_params["extra_infos"]["gpu_list"] = []
-
-        _, cert_path = MLOpsConfigs.get_request_params()
-        if cert_path is not None:
-            try:
-                requests.session().verify = cert_path
-                response = requests.post(
-                    url, json=json_params, verify=True,
-                    headers={"content-type": "application/json", "Connection": "close"}
-                )
-            except requests.exceptions.SSLError as err:
-                MLOpsConfigs.install_root_ca_file()
-                response = requests.post(
-                    url, json=json_params, verify=True,
-                    headers={"content-type": "application/json", "Connection": "close"}
-                )
-        else:
-            response = requests.post(url, json=json_params, headers={"Connection": "close"})
-        edge_id = -1
-        user_name = None
-        extra_url = None
-        if response.status_code != 200:
-            print(f"Binding to MLOps with response.status_code = {response.status_code}, "
-                  f"response.content: {response.content}")
-            pass
-        else:
-            # print("url = {}, response = {}".format(url, response))
-            status_code = response.json().get("code")
-            if status_code == "SUCCESS":
-                edge_id = response.json().get("data").get("id")
-                user_name = response.json().get("data").get("userName", None)
-                extra_url = response.json().get("data").get("url", None)
-                if edge_id is None or edge_id <= 0:
-                    print(f"Binding to MLOps with response.status_code = {response.status_code}, "
-                          f"response.content: {response.content}")
-            else:
-                if status_code == SchedulerConstants.BINDING_ACCOUNT_NOT_EXIST_ERROR:
-                    raise SystemExit(SchedulerConstants.BINDING_ACCOUNT_NOT_EXIST_ERROR)
-                print(f"Binding to MLOps with response.status_code = {response.status_code}, "
-                      f"response.content: {response.content}")
-                return -1, None, None
-        return edge_id, user_name, extra_url
-
-    def fetch_configs(self):
-        return MLOpsConfigs.fetch_all_configs()
-
-    def send_agent_active_msg(self):
-        active_topic = "flclient_agent/active"
-        status = MLOpsStatus.get_instance().get_client_agent_status(self.edge_id)
-        if (
-                status is not None
-                and status != ClientConstants.MSG_MLOPS_CLIENT_STATUS_OFFLINE
-                and status != ClientConstants.MSG_MLOPS_CLIENT_STATUS_IDLE
-        ):
-            return
-
-        try:
-            current_job = FedMLClientDataInterface.get_instance().get_job_by_id(self.run_id)
-        except Exception as e:
-            current_job = None
-        if current_job is None:
-            if status is not None and status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_OFFLINE:
-                status = ClientConstants.MSG_MLOPS_CLIENT_STATUS_IDLE
-            else:
-                return
-        else:
-            status = ClientConstants.get_device_state_from_run_edge_state(current_job.status)
-        active_msg = {"ID": self.edge_id, "status": status}
-        MLOpsStatus.get_instance().set_client_agent_status(self.edge_id, status)
-        self.mqtt_mgr.send_message_json(active_topic, json.dumps(active_msg))
-
-    def recover_start_deployment_msg_after_upgrading(self):
-        try:
-            current_job = FedMLClientDataInterface.get_instance().get_current_job()
-            if current_job is not None and \
-                    current_job.status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_UPGRADING:
-                logging.info("start deployment after upgrading.")
-                topic_start_deployment = "model_ops/model_device/start_deployment/{}".format(str(self.edge_id))
-                self.callback_start_deployment(topic_start_deployment, current_job.running_json)
-        except Exception as e:
-            logging.info("recover starting deployment message after upgrading: {}".format(traceback.format_exc()))
-
-    def on_agent_mqtt_connected(self, mqtt_client_object):
-        # The MQTT message topic format is as follows: <sender>/<receiver>/<action>
-
-        # Setup MQTT message listener for starting deployment
-        topic_start_deployment = "model_ops/model_device/start_deployment/{}".format(str(self.edge_id))
-        self.mqtt_mgr.add_message_listener(topic_start_deployment, self.callback_start_deployment)
-
-        # Setup MQTT message listener for delete deployment
-        topic_delete_deployment = "model_ops/model_device/delete_deployment/{}".format(str(self.edge_id))
-        self.mqtt_mgr.add_message_listener(topic_delete_deployment, self.callback_delete_deployment)
-
-        # Setup MQTT message listener for running failed
-        topic_exit_train_with_exception = "flserver_agent/" + str(self.edge_id) + "/exit_train_with_exception"
-        self.mqtt_mgr.add_message_listener(topic_exit_train_with_exception, self.callback_exit_train_with_exception)
-
-        # Setup MQTT message listener for client status switching
-        topic_client_status = "fl_client/flclient_agent_" + str(self.edge_id) + "/status"
-        self.mqtt_mgr.add_message_listener(topic_client_status, self.callback_runner_id_status)
-
-        # Setup MQTT message listener to report current device status.
-        topic_report_status = "mlops/report_device_status"
-        self.mqtt_mgr.add_message_listener(topic_report_status, self.callback_report_current_status)
-
-        # Setup MQTT message listener to OTA messages from the MLOps.
-        topic_ota_msg = "mlops/flclient_agent_" + str(self.edge_id) + "/ota"
-        self.mqtt_mgr.add_message_listener(topic_ota_msg, self.callback_client_ota_msg)
-
-        if self.mqtt_inference_obj is None:
-            self.mqtt_inference_obj = FedMLMqttInference(agent_config=self.agent_config, mqtt_mgr=self.mqtt_mgr)
-        self.mqtt_inference_obj.setup_listener_for_endpoint_inference_request(self.edge_id)
-
-        # Subscribe topics for starting deployment, stopping deployment and fetching client status.
-        mqtt_client_object.subscribe(topic_start_deployment, qos=2)
-        mqtt_client_object.subscribe(topic_delete_deployment, qos=2)
-        mqtt_client_object.subscribe(topic_client_status, qos=2)
-        mqtt_client_object.subscribe(topic_report_status, qos=2)
-        mqtt_client_object.subscribe(topic_exit_train_with_exception, qos=2)
-        mqtt_client_object.subscribe(topic_ota_msg, qos=2)
-
-        self.subscribed_topics.clear()
-        self.subscribed_topics.append(topic_start_deployment)
-        self.subscribed_topics.append(topic_delete_deployment)
-        self.subscribed_topics.append(topic_client_status)
-        self.subscribed_topics.append(topic_report_status)
-        self.subscribed_topics.append(topic_exit_train_with_exception)
-        self.subscribed_topics.append(topic_ota_msg)
-
-        # Broadcast the first active message.
-        self.send_agent_active_msg()
-
-        # Echo results
-        # print("\n\nCongratulations, your device is connected to the FedML MLOps platform successfully!")
-        # print(
-        #     "Your FedML Edge ID is " + str(self.edge_id) + ", unique device ID is "
-        #     + str(self.unique_device_id)
-        #     + "\n"
-        # )
-
-        MLOpsRuntimeLog.get_instance(self.args).init_logs(log_level=logging.INFO)
-
-    def on_agent_mqtt_disconnected(self, mqtt_client_object):
-        MLOpsStatus.get_instance().set_client_agent_status(
-            self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_OFFLINE
-        )
-
-        try:
-            if self.mqtt_inference_obj is not None:
-                self.mqtt_inference_obj.remove_listener_for_endpoint_inference_request(self.edge_id)
-        except Exception as e:
-            pass
-
-    def setup_agent_mqtt_connection(self, service_config):
-        # Setup MQTT connection
-        self.mqtt_mgr = MqttManager(
-            service_config["mqtt_config"]["BROKER_HOST"],
-            service_config["mqtt_config"]["BROKER_PORT"],
-            service_config["mqtt_config"]["MQTT_USER"],
-            service_config["mqtt_config"]["MQTT_PWD"],
-            service_config["mqtt_config"]["MQTT_KEEPALIVE"],
-            "FedML_ModelClientAgent_Daemon_@" + self.user_name + "@_" + self.args.current_device_id + str(uuid.uuid4()),
-            "flclient_agent/last_will_msg",
-            json.dumps({"ID": self.edge_id, "status": ClientConstants.MSG_MLOPS_CLIENT_STATUS_OFFLINE})
-        )
-        self.agent_config = service_config
-
-        # Init local database
-        FedMLClientDataInterface.get_instance().create_job_table()
-        try:
-            FedMLModelDatabase.get_instance().set_database_base_dir(ClientConstants.get_database_dir())
-            FedMLModelDatabase.get_instance().create_table()
-        except Exception as e:
-            pass
-
-        client_api_cmd = "fedml.computing.scheduler.model_scheduler.device_client_api:api"
-        client_api_pids = RunProcessUtils.get_pid_from_cmd_line(client_api_cmd)
-        if client_api_pids is None or len(client_api_pids) <= 0:
-            # Start local API services
-            cur_dir = os.path.dirname(__file__)
-            fedml_base_dir = os.path.dirname(os.path.dirname(os.path.dirname(cur_dir)))
-            python_program = get_python_program()
-            self.local_api_process = ClientConstants.exec_console_with_script(
-                "{} -m uvicorn {} --host 0.0.0.0 --port {} --reload --reload-delay 3 --reload-dir {} "
-                "--log-level critical".format(
-                    python_program, client_api_cmd,
-                    ClientConstants.LOCAL_CLIENT_API_PORT, fedml_base_dir
-                ),
-                should_capture_stdout=False,
-                should_capture_stderr=False
-            )
-            # if self.local_api_process is not None and self.local_api_process.pid is not None:
-            #     print(f"Model worker local API process id {self.local_api_process.pid}")
-
-        # MLOpsRuntimeLogDaemon.get_instance(self.args).stop_all_log_processor()
-
-        # Setup MQTT connected listener
-        self.mqtt_mgr.add_connected_listener(self.on_agent_mqtt_connected)
-        self.mqtt_mgr.add_disconnected_listener(self.on_agent_mqtt_disconnected)
-        self.mqtt_mgr.connect()
-
-        self.setup_client_mqtt_mgr()
-        self.mlops_metrics.report_client_training_status(
-            self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_IDLE, is_from_model=True)
-        MLOpsStatus.get_instance().set_client_agent_status(self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_IDLE)
-
-        self.recover_start_deployment_msg_after_upgrading()
-
-    def stop_agent(self):
-        if self.run_process_event is not None:
-            self.run_process_event.set()
-
-        if self.mqtt_mgr is not None:
-            try:
-                for topic in self.subscribed_topics:
-                    self.mqtt_mgr.unsubscribe_msg(topic)
-            except Exception as e:
-                pass
-
-            self.mqtt_mgr.loop_stop()
-            self.mqtt_mgr.disconnect()
-
-        self.release_client_mqtt_mgr()
-
-    def start_agent_mqtt_loop(self, should_exit_sys=False):
-        # Start MQTT message loop
-        try:
-            self.mqtt_mgr.loop_forever()
-        except Exception as e:
-            if str(e) == "Restarting after upgraded...":
-                logging.info("Restarting after upgraded...")
-            else:
-                logging.info("Client tracing: {}".format(traceback.format_exc()))
-        finally:
-            self.stop_agent()
-
-            if should_exit_sys:
-                time.sleep(5)
-                sys.exit(1)
diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_cache.py b/python/fedml/computing/scheduler/model_scheduler/device_model_cache.py
index 9a997a80e2..f637ccde1d 100755
--- a/python/fedml/computing/scheduler/model_scheduler/device_model_cache.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_model_cache.py
@@ -18,6 +18,11 @@ class FedMLModelCache(Singleton):
     FEDML_MODEL_DEVICE_INFO_TAG = "FEDML_MODEL_DEVICE_INFO_TAG-"
     FEDML_MODEL_END_POINT_TOKEN_TAG = "FEDML_MODEL_END_POINT_TOKEN_TAG-"
     FEDML_MODEL_ROUND_ROBIN_PREVIOUS_DEVICE_TAG = "FEDML_MODEL_ROUND_ROBIN_PREVIOUS_DEVICE_TAG-"
+    FEDML_MODEL_ENDPOINT_REPLICA_NUM_TAG = "FEDML_MODEL_ENDPOINT_REPLICA_NUM_TAG-"
+
+    # On the worker
+    FEDML_MODEL_REPLICA_GPU_IDS_TAG = "FEDML_MODEL_REPLICA_GPU_IDS_TAG-"
+
     FEDML_KEY_COUNT_PER_SCAN = 1000
 
     def __init__(self):
@@ -85,16 +90,16 @@ def get_instance(redis_addr="local", redis_port=6379):
         return FedMLModelCache()
 
     def set_deployment_result(self, end_point_id, end_point_name,
-                              model_name, model_version, device_id, deployment_result):
-        result_dict = {"cache_device_id": device_id, "result": deployment_result}
+                              model_name, model_version, device_id, deployment_result, replica_no):
+        result_dict = {"cache_device_id": device_id, "cache_replica_no": replica_no, "result": deployment_result}
         try:
-            # Delete old result
+            # Delete old result using (e_id, end_point_name, model_name, device_id, replica_no)
             # In this list, find the result's complete record, delete it.
             result_list = self.redis_connection.lrange(
                 self.get_deployment_result_key(end_point_id, end_point_name, model_name), 0, -1)
             for result_item in result_list:
-                result_device_id, result_payload = self.get_result_item_info(result_item)
-                if result_device_id == device_id:
+                res_device_id, res_replica_no, res_payload = self.get_result_item_info(result_item)
+                if res_device_id == device_id and res_replica_no == replica_no:
                     self.redis_connection.lrem(
                         self.get_deployment_result_key(end_point_id, end_point_name, model_name), 0, result_item)
 
@@ -105,18 +110,20 @@ def set_deployment_result(self, end_point_id, end_point_name,
             pass
         self.model_deployment_db.set_deployment_result(end_point_id, end_point_name,
                                                        model_name, model_version,
-                                                       device_id, deployment_result)
+                                                       device_id, deployment_result, replica_no)
 
     def set_deployment_status(self, end_point_id, end_point_name,
-                              model_name, model_version, device_id, deployment_status):
+                              model_name, model_version, device_id, deployment_status, replica_no):
         status_dict = {"cache_device_id": device_id, "status": deployment_status}
         try:
-            self.redis_connection.rpush(self.get_deployment_status_key(end_point_id, end_point_name, model_name), json.dumps(status_dict))
+            # rpush could tolerate the same e_id, d_id with different r_no
+            self.redis_connection.rpush(self.get_deployment_status_key(end_point_id, end_point_name, model_name),
+                                        json.dumps(status_dict))
         except Exception as e:
             pass
         self.model_deployment_db.set_deployment_status(end_point_id, end_point_name,
                                                        model_name, model_version,
-                                                       device_id, deployment_status)
+                                                       device_id, deployment_status, replica_no)
 
     def delete_deployment_status(self, element: str, end_point_id, end_point_name, model_name):
         self.redis_connection.lrem(self.get_deployment_status_key(end_point_id, end_point_name, model_name),
@@ -131,10 +138,32 @@ def delete_deployment_result(self, element: str, end_point_id, end_point_name, m
         except Exception as e:
             pass
 
+    def delete_deployment_result_with_device_id_and_replica_no(self, end_point_id, end_point_name, model_name,
+                                                               device_id, replica_no_to_delete):
+        result_item_found = None
+
+        result_list = self.get_deployment_result_list(
+            end_point_id, end_point_name, model_name)
+
+        for result_item in result_list:
+            cache_device_id, cache_replica_no, result_payload = (
+                self.get_result_item_info(result_item))
+
+            if str(cache_device_id) == str(device_id) and cache_replica_no == replica_no_to_delete:
+                result_item_found = result_item
+                break
+
+        # Delete the replica element
+        if result_item_found is not None:
+            self.delete_deployment_result(
+                result_item_found, end_point_id, end_point_name, model_name)
+
     def get_deployment_result_list(self, end_point_id, end_point_name, model_name):
         try:
-            result_list = self.redis_connection.lrange(self.get_deployment_result_key(end_point_id, end_point_name, model_name), 0, -1)
+            result_list = self.redis_connection.lrange(
+                self.get_deployment_result_key(end_point_id, end_point_name, model_name), 0, -1)
         except Exception as e:
+            logging.info(e)
             result_list = None
 
         if result_list is None or len(result_list) <= 0:
@@ -144,13 +173,14 @@ def get_deployment_result_list(self, end_point_id, end_point_name, model_name):
                     self.redis_connection.rpush(self.get_deployment_result_key(end_point_id, end_point_name, model_name),
                                                 json.dumps(result))
             except Exception as e:
+                logging.info(e)
                 pass
         return result_list
 
     def delete_deployment_result(self, element: str, end_point_id, end_point_name, model_name):
         self.redis_connection.lrem(self.get_deployment_result_key(end_point_id, end_point_name, model_name),
                                    0, element)
-        device_id, _ = self.get_result_item_info(element)
+        device_id, _, _ = self.get_result_item_info(element)
         self.model_deployment_db.delete_deployment_result(device_id, end_point_id, end_point_name, model_name)
 
     def get_deployment_result_list_size(self, end_point_id, end_point_name, model_name):
@@ -192,50 +222,43 @@ def get_result_item_info(self, result_item):
         result_item_json = json.loads(result_item)
         if isinstance(result_item_json, str):
             result_item_json = json.loads(result_item_json)
+
         device_id = result_item_json["cache_device_id"]
+        replica_no = result_item_json["cache_replica_no"]
+
         if isinstance(result_item_json["result"], str):
             result_payload = json.loads(result_item_json["result"])
         else:
             result_payload = result_item_json["result"]
-        return device_id, result_payload
+        return device_id, replica_no, result_payload
 
     def get_idle_device(self, end_point_id, end_point_name,
                         model_name, model_version,
-                        check_end_point_status=True):
-        # Find all deployed devices
-        try:
-            status_list = self.get_deployment_status_list(end_point_id, end_point_name, model_name)  # DEPLOYMENT_STATUS
-        except Exception as e:
-            logging.error(f"get_deployment_status_list failed {e}")
-            return None, None
+                        check_end_point_status=True, limit_specific_model_version=False):
+        # Deprecated the model status logic, query directly from the deployment result list
+        idle_device_list = list()
 
-        if len(status_list) == 0:
-            return None, None
+        result_list = self.get_deployment_result_list(end_point_id, end_point_name, model_name)
 
-        idle_device_list = list()
-        if model_version == "latest":
-            model_version = self.get_latest_version(status_list)
-        logging.info(f"model_version {model_version}")
+        for result_item in result_list:
+            device_id, _, result_payload = self.get_result_item_info(result_item)
+            found_end_point_id = result_payload["end_point_id"]
+            found_end_point_name = result_payload["end_point_name"]
+            found_model_name = result_payload["model_name"]
+            found_model_version = result_payload["model_version"]
+
+            if (str(found_end_point_id) == str(end_point_id) and found_end_point_name == end_point_name and
+                    found_model_name == model_name and
+                    (not limit_specific_model_version or found_model_version == model_version)):
+                if "model_status" in result_payload and result_payload["model_status"] == "DEPLOYED":
+                    idle_device_list.append({"device_id": device_id, "end_point_id": end_point_id})
+
+        logging.info(f"{len(idle_device_list)} devices has this model on it: {idle_device_list}")
 
-        # iterate all devices, find those with correct version and deployed
-        for status_item in status_list:
-            try:
-                device_id, status_payload = self.get_status_item_info(status_item)
-                logging.info(f"status_payload {status_payload}")
-                model_status = status_payload["model_status"]
-                model_version_cached = status_payload["model_version"]
-                end_point_id_cache = status_payload["end_point_id"]
-                logging.info(f"model_version {model_version}, model_version_cache {model_version_cached}")
-                if (model_version == model_version_cached or model_version == "*") and \
-                        model_status == ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED:
-                    idle_device_list.append({"device_id": device_id, "end_point_id": end_point_id_cache})
-            except Exception as e:
-                logging.info(f"Get idle device list Failed: {e}, status_item {status_item}")
-                pass
         if len(idle_device_list) <= 0:
             return None, None
-        logging.info(f"{len(idle_device_list)} devices has this model on it: {idle_device_list}")
-        # Randomly shuffle
+
+        # # Randomly shuffle
         # shuffle the list of deployed devices and get the first one as the target idle device.
         # if len(idle_device_list) <= 0:
         #     return None, None
@@ -271,18 +294,17 @@ def get_idle_device(self, end_point_id, end_point_name,
 
         # Find deployment result from the target idle device.
         try:
-            result_list = self.get_deployment_result_list(end_point_id, end_point_name, model_name)
             for result_item in result_list:
-                device_id, result_payload = self.get_result_item_info(result_item)
+                logging.info("enter the for loop")
+                device_id, _, result_payload = self.get_result_item_info(result_item)
                 found_end_point_id = result_payload["end_point_id"]
                 found_end_point_name = result_payload["end_point_name"]
-                # Check whether the end point is activated.
-                if check_end_point_status:
-                    end_point_activated = self.get_end_point_activation(found_end_point_id)
-                    if not end_point_activated:
-                        continue
+                found_model_status = result_payload["model_status"]
 
-                if found_end_point_id == idle_device_dict["end_point_id"] \
+                if found_model_status != "DEPLOYED":
+                    continue
+
+                if str(found_end_point_id) == str(idle_device_dict["end_point_id"]) \
                         and device_id == idle_device_dict["device_id"]:
                     if same_model_device_rank > 0:
                         same_model_device_rank -= 1
@@ -316,10 +338,13 @@ def get_latest_version(self, status_list):
         return latest_version
 
     def get_deployment_result_with_device_id(self, end_point_id, end_point_name, model_name, device_id):
+        """"
+        TODO: Return multiple replicas' result for the same device_id
+        """
         try:
             result_list = self.get_deployment_result_list(end_point_id, end_point_name, model_name)
             for result_item in result_list:
-                result_device_id, result_payload = self.get_result_item_info(result_item)
+                result_device_id, _, result_payload = self.get_result_item_info(result_item)
                 found_end_point_id = result_payload["end_point_id"]
 
                 end_point_activated = self.get_end_point_activation(found_end_point_id)
@@ -363,9 +388,29 @@ def set_end_point_activation(self, end_point_id, end_point_name, activate_status
             pass
         self.model_deployment_db.set_end_point_activation(end_point_id, end_point_name, status)
 
+    def set_replica_gpu_ids(self, end_point_id, end_point_name, model_name, device_id, replica_no, gpu_ids):
+        # Convert the list to string
+        try:
+            self.redis_connection.set(self.get_replica_gpu_ids_key(end_point_id, end_point_name,
+                                                                   model_name, device_id, replica_no), str(gpu_ids))
+        except Exception as e:
+            print(e)
+            logging.error(e)
+
+        # TODO: Use Sqlite for the replica backup
+
+    def get_replica_gpu_ids(self, end_point_id, end_point_name, model_name, device_id, replica_no):
+        try:
+            if self.redis_connection.exists(self.get_replica_gpu_ids_key(end_point_id, end_point_name,
+                                                                         model_name, device_id, replica_no)):
+                return self.redis_connection.get(self.get_replica_gpu_ids_key(end_point_id, end_point_name,
+                                                                              model_name, device_id, replica_no))
+        except Exception as e:
+            pass
+
     def delete_end_point(self, end_point_id, end_point_name, model_name, model_version):
         try:
-            print("Will Delete the realated redis keys permanently")
+            logging.info("Will Delete the related redis keys permanently")
             self.redis_connection.expire(self.get_deployment_result_key(end_point_id, end_point_name, model_name), ServerConstants.MODEL_CACHE_KEY_EXPIRE_TIME)
             self.redis_connection.expire(self.get_deployment_status_key(end_point_id, end_point_name, model_name), ServerConstants.MODEL_CACHE_KEY_EXPIRE_TIME)
             self.redis_connection.expire(self.get_monitor_metrics_key(end_point_id, end_point_name, model_name, model_version), ServerConstants.MODEL_CACHE_KEY_EXPIRE_TIME)
@@ -375,29 +420,16 @@ def delete_end_point(self, end_point_id, end_point_name, model_name, model_versi
             self.redis_connection.expire(self.get_end_point_activation_key(end_point_id), ServerConstants.MODEL_CACHE_KEY_EXPIRE_TIME)
             self.redis_connection.expire(self.get_end_point_status_key(end_point_id), ServerConstants.MODEL_CACHE_KEY_EXPIRE_TIME)
         except Exception as e:
+            logging.error(f"error when deleting the redis keys: {e}")
             pass
 
     def get_end_point_activation(self, end_point_id):
-        status_int = -1
-        try:
-            if self.redis_connection.exists(self.get_end_point_activation_key(end_point_id)):
-                status_int = self.redis_connection.get(self.get_end_point_activation_key(end_point_id))
-        except Exception as e:
-            pass
-
-        if status_int == -1:
-            status_int = self.model_deployment_db.get_end_point_activation(end_point_id)
-            try:
-                self.redis_connection.set(self.get_end_point_activation_key(end_point_id), status_int)
-            except Exception as e:
-                pass
-
-        status = True if int(status_int) == 1 else False
-        return status
+        # [Deprecated] activation logic is removed
+        return True
 
     def get_end_point_full_key_by_id(self, end_point_id):
-        # e.g. FEDML_MODEL_DEPLOYMENT_STATUS--1234-dummy_endpoint_name-dummy_model_name
-        target_prefix = f"{FedMLModelCache.FEDML_MODEL_DEPLOYMENT_STATUS_TAG}-{end_point_id}-*"
+        # e.g. FEDML_MODEL_DEPLOYMENT_RESULT--1234-dummy_endpoint_name-dummy_model_name
+        target_prefix = f"{FedMLModelCache.FEDML_MODEL_DEPLOYMENT_RESULT_TAG}-{end_point_id}-*"
         status_list = list()
         for key in self.redis_connection.scan_iter(target_prefix):
             status_list.append(key)
@@ -497,6 +529,19 @@ def get_end_point_token(self, end_point_id, end_point_name, model_name):
 
         return token
 
+    def get_endpoint_devices_replica_num(self, end_point_id):
+        """
+        Return a endpoint_devices_replica_num dict {id1: 1, id2: 1}, if not exist, return None
+        """
+        try:
+            replica_num = self.redis_connection.get(
+                self.get_endpoint_replica_num_key(end_point_id))
+        except Exception as e:
+            replica_num = None
+        # TODO: Use Sqlite for the replica backup
+
+        return replica_num
+
     def get_deployment_result_key(self, end_point_id, end_point_name, model_name):
         return "{}-{}-{}-{}".format(FedMLModelCache.FEDML_MODEL_DEPLOYMENT_RESULT_TAG, end_point_id, end_point_name, model_name)
 
@@ -518,6 +563,14 @@ def get_deployment_token_key(self, end_point_id, end_point_name, model_name):
     def get_round_robin_prev_device(self, end_point_id, end_point_name, model_name, version):
         return "{}-{}-{}-{}-{}".format(FedMLModelCache.FEDML_MODEL_ROUND_ROBIN_PREVIOUS_DEVICE_TAG, end_point_id, end_point_name, model_name, version)
 
+    def get_endpoint_replica_num_key(self, end_point_id):
+        return "{}-{}-{}-{}-{}".format(FedMLModelCache.FEDML_MODEL_ENDPOINT_REPLICA_NUM_TAG, end_point_id, "replica_num", "key")
+
+    @staticmethod
+    def get_replica_gpu_ids_key(end_point_id, end_point_name, model_name, device_id, replica_no):
+        return "{}-{}-{}-{}-{}-{}".format(FedMLModelCache.FEDML_MODEL_REPLICA_GPU_IDS_TAG, end_point_id,
+                                          end_point_name, model_name, device_id, replica_no)
+
     def set_monitor_metrics(self, end_point_id, end_point_name,
                             model_name, model_version,
                             total_latency, avg_latency,
diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_db.py b/python/fedml/computing/scheduler/model_scheduler/device_model_db.py
index e11e098caf..6ee7af1cdd 100755
--- a/python/fedml/computing/scheduler/model_scheduler/device_model_db.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_model_db.py
@@ -31,20 +31,25 @@ def set_database_base_dir(self, database_base_dir):
         self.db_base_dir = database_base_dir
 
     def set_deployment_result(self, end_point_id, end_point_name, model_name, model_version,
-                              device_id, deployment_result):
+                              device_id, deployment_result, replica_no):
         self.set_deployment_results_info(end_point_id, end_point_name, model_name, model_version,
-                                         device_id, deployment_result=deployment_result)
+                                         device_id, deployment_result=deployment_result, replica_no=replica_no)
 
     def set_deployment_status(self, end_point_id, end_point_name, model_name, model_version,
-                              device_id, deployment_status):
+                              device_id, deployment_status, replica_no):
         self.set_deployment_results_info(end_point_id, end_point_name, model_name, model_version,
-                                         device_id, deployment_status=deployment_status)
+                                         device_id, deployment_status=deployment_status, replica_no=replica_no)
 
     def get_deployment_result_list(self, end_point_id, end_point_name, model_name, model_version=None):
+        """
+        query from sqlite db using e_id
+        """
         result_list = self.get_deployment_results_info(end_point_id, end_point_name, model_name, model_version)
         ret_result_list = list()
         for result in result_list:
-            result_dict = {"cache_device_id": result.device_id, "result": result.deployment_result}
+            result_dict = {"cache_device_id": result.device_id,
+                           "cache_replica_no": result.replica_no,
+                           "result": result.deployment_result}
             ret_result_list.append(json.dumps(result_dict))
         return ret_result_list
 
@@ -58,18 +63,24 @@ def get_deployment_status_list(self, end_point_id, end_point_name, model_name, m
         return ret_status_list
 
     def get_deployment_result_with_device_id(self, end_point_id, end_point_name, model_name, device_id):
+        """
+        Return a list of replica's result given end_point_id, end_point_name, model_name, device_id
+        """
+        replica_result_list = list()
         try:
             result_list = self.get_deployment_result_list(end_point_id, end_point_name, model_name)
             for result_item in result_list:
-                result_device_id, result_payload = self.get_result_item_info(result_item)
+                result_device_id, _, result_payload = self.get_result_item_info(result_item)
                 found_end_point_id = result_payload["end_point_id"]
 
                 if str(found_end_point_id) == str(end_point_id) and str(result_device_id) == str(device_id):
-                    return result_payload
+                    replica_result_list.append(result_payload)
         except Exception as e:
-            logging.info(e)
+            # Do not intervene other endpoints on this device
+            logging.error(f"Error in get_deployment_result_with_device_id: {e}")
+            return None
 
-        return None
+        return replica_result_list
 
     def get_deployment_status_with_device_id(self, end_point_id, end_point_name, model_name, device_id):
         try:
@@ -124,6 +135,18 @@ def delete_deployment_result_with_device_id(self, end_point_id, end_point_name,
                  FedMLDeploymentResultInfoModel.device_id == f'{device_id}')).delete()
         self.db_connection.commit()
 
+    def delete_deployment_result_with_device_id_and_rank(self, end_point_id, end_point_name, model_name,
+                                                         device_id, replica_rank):
+        replica_no = replica_rank + 1
+        self.open_job_db()
+        self.db_connection.query(FedMLDeploymentResultInfoModel).filter(
+            and_(FedMLDeploymentResultInfoModel.end_point_id == f'{end_point_id}',
+                 FedMLDeploymentResultInfoModel.end_point_name == f'{end_point_name}',
+                 FedMLDeploymentResultInfoModel.model_name == f'{model_name}',
+                 FedMLDeploymentResultInfoModel.device_id == f'{device_id}',
+                 FedMLDeploymentResultInfoModel.replica_no == f'{replica_no}')).delete()
+        self.db_connection.commit()
+
     def delete_deployment_run_info(self, end_point_id):
         # db / table -> model-deployment.db / "deployment_run_info"
         self.open_job_db()
@@ -136,11 +159,13 @@ def get_result_item_info(self, result_item):
         if isinstance(result_item_json, dict):
             result_item_json = json.loads(result_item)
         device_id = result_item_json["cache_device_id"]
+        replica_no = result_item_json["cache_replica_no"]
+
         if isinstance(result_item_json["result"], str):
             result_payload = json.loads(result_item_json["result"])
         else:
             result_payload = result_item_json["result"]
-        return device_id, result_payload
+        return device_id, replica_no, result_payload
 
     def get_status_item_info(self, status_item):
         status_item_json = json.loads(status_item)
@@ -274,17 +299,19 @@ def get_deployment_results_info(self, end_point_id, end_point_name, model_name,
 
     def set_deployment_results_info(self, end_point_id, end_point_name,
                                     model_name, model_version, device_id,
-                                    deployment_result=None, deployment_status=None):
-        '''
-        end_point_id + device_id is unique identifier, 
+                                    deployment_result=None, deployment_status=None, replica_no=None):
+        """
+        end_point_id + device_id + replica_no is unique identifier,
         we do not allow duplicate records
-        '''
+        """
         self.open_job_db()
         result_info = self.db_connection.query(FedMLDeploymentResultInfoModel). \
             filter(and_(FedMLDeploymentResultInfoModel.end_point_id == f'{end_point_id}',
                         FedMLDeploymentResultInfoModel.end_point_name == f'{end_point_name}',
                         FedMLDeploymentResultInfoModel.model_name == f'{model_name}',
-                        FedMLDeploymentResultInfoModel.device_id == f'{device_id}')).first()
+                        FedMLDeploymentResultInfoModel.device_id == f'{device_id}',
+                        FedMLDeploymentResultInfoModel.replica_no == f'{replica_no}'
+                        )).first()
         # Insert
         if result_info is None:
             result_info = FedMLDeploymentResultInfoModel(end_point_id=end_point_id,
@@ -293,7 +320,9 @@ def set_deployment_results_info(self, end_point_id, end_point_name,
                                                          model_version=model_version,
                                                          device_id=device_id,
                                                          deployment_result=deployment_result,
-                                                         deployment_status=deployment_status)
+                                                         deployment_status=deployment_status,
+                                                         replica_no=replica_no
+                                                         )
             self.db_connection.add(result_info)
             self.db_connection.commit()
             return
@@ -439,6 +468,7 @@ class FedMLDeploymentResultInfoModel(Base):
     device_id = Column(TEXT)
     deployment_result = Column(TEXT)
     deployment_status = Column(TEXT)
+    replica_no = Column(TEXT)
 
 
 class FedMLDeploymentRunInfoModel(Base):
diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py b/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py
index 0a8c3b6ce9..3a6d891e58 100755
--- a/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py
@@ -34,36 +34,15 @@
 
 from .device_http_inference_protocol import FedMLHttpInference
 
+from fedml.computing.scheduler.model_scheduler.device_model_cache import FedMLModelCache
 
 no_real_gpu_allocation = None
 
 
-class CPUUnpickler(pickle.Unpickler):
-    def find_class(self, module, name):
-        if module == 'torch.storage' and name == '_load_from_bytes':
-            return lambda b: torch.load(io.BytesIO(b), map_location='cpu')
-        else:
-            return super().find_class(module, name)
-
-
 def request_gpu_ids_on_deployment(edge_id, end_point_id, num_gpus=None, master_device_id=None):
     gpu_ids = None
     client_device_id = os.getenv("FEDML_CURRENT_EDGE_ID")
 
-    try:
-        ComputeCacheManager.get_instance().set_redis_params()
-        with ComputeCacheManager.get_instance().lock(
-                ComputeCacheManager.get_instance().get_gpu_cache().get_device_run_lock_key(edge_id, end_point_id)
-        ):
-            if num_gpus is None:
-                num_gpus = ComputeCacheManager.get_instance().get_gpu_cache().get_device_run_num_gpus(edge_id, end_point_id)
-                num_gpus = int(num_gpus) if num_gpus is not None and str(num_gpus) != "" else 1
-            gpu_ids = ComputeCacheManager.get_instance().get_gpu_cache().get_device_run_gpu_ids(edge_id, end_point_id)
-    except Exception as e:
-        logging.info(f"Execption when request gpu ids. {traceback.format_exc()}")
-        gpu_ids = None
-        raise e
-
     if gpu_ids is None:
         cuda_visable_gpu_ids = JobRunnerUtils.get_instance().occupy_gpu_ids(
             end_point_id, num_gpus, client_device_id, inner_id=end_point_id,
@@ -92,234 +71,127 @@ def start_deployment(end_point_id, end_point_name, model_id, model_version,
                      inference_use_gpu, inference_memory_size,
                      inference_convertor_image, inference_server_image,
                      infer_host, model_is_from_open, model_params,
-                     model_from_open, token, master_ip, edge_id, master_device_id=None):
+                     model_from_open, token, master_ip, edge_id, master_device_id=None, replica_rank=0,
+                     gpu_per_replica=1):
     logging.info("Model deployment is starting...")
 
-    use_simulation_test_without_triton = False
-    model_metadata = {'name': inference_model_name,
-                      'versions': ['1'], 'platform': 'onnxruntime_onnx',
-                      'inputs': [{'name': 'input2', 'datatype': 'INT32', 'shape': [1, 24]},
-                                 {'name': 'input1', 'datatype': 'FP32', 'shape': [1, 2]}],
-                      'outputs': [{'name': 'output', 'datatype': 'FP32', 'shape': [1]}]}
-    model_config = {
-        "platform": "onnxruntime",
-        "max_batch_size": 1,
-        "input_size": [[1, 24], [1, 2]],
-        "input_types": ["int", "float"],
-        "input": [
-            {
-                "name": "input",
-                "data_type": "TYPE_FP32",
-                "dims": []
-            }
-        ],
-        "output": [
-            {
-                "name": "output",
-                "data_type": "TYPE_FP32",
-                "dims": []
-            }
-        ]
-    }
-
     sudo_prefix = "sudo "
     sys_name = platform.system()
     if sys_name == "Darwin":
         sudo_prefix = ""
-    num_gpus = 0
+    num_gpus = gpu_per_replica    # Real gpu per replica (container)
     gpu_ids, gpu_attach_cmd = None, ""
 
     running_model_name = ClientConstants.get_running_model_name(
         end_point_name, inference_model_name, model_version, end_point_id, model_id, edge_id=edge_id)
 
-    # Check whether triton server is running.
-    triton_server_is_running = False
-    if not use_simulation_test_without_triton:
-        triton_server_container_name = "{}".format(ClientConstants.FEDML_TRITON_SERVER_CONTAINER_NAME_PREFIX)
-        if not ClientConstants.is_running_on_k8s():
-            check_triton_server_running_cmds = "{}docker ps |grep {}".format(sudo_prefix, triton_server_container_name)
-            running_process = ClientConstants.exec_console_with_script(check_triton_server_running_cmds,
-                                                                       should_capture_stdout=True,
-                                                                       should_capture_stderr=True)
-            ret_code, out, err = ClientConstants.get_console_pipe_out_err_results(running_process)
-            if out is not None:
-                out_str = sys_utils.decode_our_err_result(out)
-                if str(out_str) != "":
-                    triton_server_is_running = True
-
-    # Convert models from pytorch to onnx format
     if model_is_from_open:
-        if model_from_open is None:
-            return running_model_name, "", model_version, {}, {}
-
-        logging.info("model binary file: {}".format(model_bin_file))
-        with open(model_bin_file, 'rb') as model_pkl_file:
-            if not torch.cuda.is_available():
-                try:
-                    open_model_params = CPUUnpickler(model_pkl_file).load()
-                except Exception as ex:
-                    logging.info("load model exceptions when using CPU_Unpickler: {}".format(traceback.format_exc()))
-                    return "", "", model_version, model_metadata, model_config
-            else:
-                open_model_params = pickle.load(model_pkl_file)
-            model_from_open.load_state_dict(open_model_params)
-            model_from_open.eval()
-
-        if inference_engine == ClientConstants.INFERENCE_ENGINE_TYPE_INT_TRITON:
-            logging.info("convert the onnx model when the mode is from FedML® Nexus AI Platform..")
-            logging.info("Input size {}, input types {}".format(model_params["input_size"],
-                                                                model_params["input_types"]))
-            input_size = model_params["input_size"]
-            input_types = model_params["input_types"]
-
-            dummy_input_list = []
-            for index, input_i in enumerate(input_size):
-                if input_types[index] == "int":
-                    this_input = torch.randint(0, 1, input_i).clone().detach()
-                else:
-                    this_input = torch.zeros(input_i).clone().detach()
-                dummy_input_list.append(this_input)
-
-            onnx_model_path = os.path.join(model_storage_local_path,
-                                           ClientConstants.FEDML_CONVERTED_MODEL_DIR_NAME,
-                                           running_model_name, ClientConstants.INFERENCE_MODEL_VERSION)
-            if not os.path.exists(onnx_model_path):
-                os.makedirs(onnx_model_path, exist_ok=True)
-            onnx_model_path = os.path.join(onnx_model_path, "model.onnx")
-
-            convert_model_to_onnx(model_from_open, onnx_model_path, dummy_input_list, input_size)
-        elif ClientConstants.INFERENCE_ENGINE_TYPE_INT_DEFAULT:  # we do not convert the model to onnx in llm
-            logging.info("LLM model loaded from the open")
+        logging.error("The model is directly export from open, currently do not convert the model to servable format.")
+        return "", "", None, None, None
+
+    # Parse the model config file and get the necessary information for the deployment
+    model_config_path = os.path.join(model_storage_local_path, "fedml_model_config.yaml")
+    with open(model_config_path, 'r') as file:
+        config = yaml.safe_load(file)
+        # Resource related
+        use_gpu = config.get('use_gpu', True)
+        in_gpu_ids = config.get('gpu_ids', gpu_ids)
+        num_gpus_frm_yml = config.get('num_gpus', None)
+        if not use_gpu:
+            num_gpus = 0
         else:
-            raise Exception("Unsupported inference engine type: {}".format(inference_engine))
-    elif model_is_from_open == False or model_is_from_open is None:
-        model_location = os.path.join(model_storage_local_path, "fedml_model.bin")
-        try:
-            model = torch.jit.load(model_location)
-            model.eval()
-        except Exception as e:
-            logging.info(
-                "Cannot locate the .bin file, will read it from"
-                " the fedml_model_config.yaml with the key [local_model_dir] ")
-            model_config_path = os.path.join(model_storage_local_path, "fedml_model_config.yaml")
-            with open(model_config_path, 'r') as file:
-                config = yaml.safe_load(file)
-                # Resource related
-                use_gpu = config.get('use_gpu', False)
-                in_gpu_ids = config.get('gpu_ids', gpu_ids)
-                num_gpus = config.get('num_gpus', None)
-                if not use_gpu:
-                    num_gpus = 0
-                else:
-                    if num_gpus is None:
-                        num_gpus = len(in_gpu_ids) if in_gpu_ids is not None else 1
-                usr_indicated_wait_time = config.get('deploy_timeout', 900)
-                usr_indicated_worker_port = config.get('worker_port', "")
-                if usr_indicated_worker_port == "":
-                    usr_indicated_worker_port = os.environ.get("FEDML_WORKER_PORT", "")
-                shm_size = config.get('shm_size', None)
-                storage_opt = config.get('storage_opt', None)
-                tmpfs = config.get('tmpfs', None)
-                cpus = config.get('cpus', None)
-                if cpus is not None:
-                    cpus = int(cpus)
-                memory = config.get('memory', None)
-
-                if usr_indicated_worker_port == "":
-                    usr_indicated_worker_port = None
-                else:
-                    usr_indicated_worker_port = int(usr_indicated_worker_port)
-
-                worker_port_env = os.environ.get("FEDML_WORKER_PORT", "")
-                worker_port_from_config = config.get('worker_port', "")
-                print(f"usr_indicated_worker_port {usr_indicated_worker_port}, worker port env {worker_port_env}, "
-                      f"worker port from config {worker_port_from_config}")
-
-                usr_indicated_retry_cnt = max(int(usr_indicated_wait_time) // 10, 1)
-                inference_image_name = config.get('inference_image_name',
-                                                  ClientConstants.INFERENCE_SERVER_CUSTOME_IMAGE)
-                image_pull_policy = config.get('image_pull_policy', SchedulerConstants.IMAGE_PULL_POLICY_IF_NOT_PRESENT)
-
-                # Source code dir, bootstrap dir, data cache dir
-                src_code_dir = os.path.join(model_storage_local_path, config.get('source_code_dir', ""))
-
-                # Get the bootstrap and job commands inside the yaml file
-                bootstrap_cmds_str_frm_yaml = config.get('bootstrap', "")
-                job_cmds_str_frm_yaml = config.get('job', "")
-
-                if bootstrap_cmds_str_frm_yaml != "" or job_cmds_str_frm_yaml != "":
-                    auto_gen_bootstrap_file_name = "fedml-deploy-bootstrap-entry-auto-gen.sh"
-                    src_bootstrap_file_path = os.path.join(model_storage_local_path, auto_gen_bootstrap_file_name)
-                    with open(src_bootstrap_file_path, 'w') as f:
-                        f.write("cd /home/fedml/models_serving/\n")
-                        f.write(bootstrap_cmds_str_frm_yaml)
-                        f.write("\n")
-                        f.write("cd /home/fedml/models_serving/\n")
-                        f.write(job_cmds_str_frm_yaml)
-                else:
-                    src_bootstrap_file_path = ""
+            if num_gpus_frm_yml is not None:
+                num_gpus = int(num_gpus_frm_yml)
+        usr_indicated_wait_time = config.get('deploy_timeout', 900)
+        usr_indicated_worker_port = config.get('worker_port', "")
+        if usr_indicated_worker_port == "":
+            usr_indicated_worker_port = os.environ.get("FEDML_WORKER_PORT", "")
+        shm_size = config.get('shm_size', None)
+        storage_opt = config.get('storage_opt', None)
+        tmpfs = config.get('tmpfs', None)
+        cpus = config.get('cpus', None)
+        if cpus is not None:
+            cpus = int(cpus)
+        memory = config.get('memory', None)
+
+        if usr_indicated_worker_port == "":
+            usr_indicated_worker_port = None
+        else:
+            usr_indicated_worker_port = int(usr_indicated_worker_port)
+
+        worker_port_env = os.environ.get("FEDML_WORKER_PORT", "")
+        worker_port_from_config = config.get('worker_port', "")
+        print(f"usr_indicated_worker_port {usr_indicated_worker_port}, worker port env {worker_port_env}, "
+              f"worker port from config {worker_port_from_config}")
+
+        usr_indicated_retry_cnt = max(int(usr_indicated_wait_time) // 10, 1)
+        inference_image_name = config.get('inference_image_name',
+                                          ClientConstants.INFERENCE_SERVER_CUSTOME_IMAGE)
+        image_pull_policy = config.get('image_pull_policy', SchedulerConstants.IMAGE_PULL_POLICY_IF_NOT_PRESENT)
+
+        # Source code dir, bootstrap dir, data cache dir
+        src_code_dir = os.path.join(model_storage_local_path, config.get('source_code_dir', ""))
+
+        # Get the bootstrap and job commands inside the yaml file
+        bootstrap_cmds_str_frm_yaml = config.get('bootstrap', "")
+        job_cmds_str_frm_yaml = config.get('job', "")
+
+        if bootstrap_cmds_str_frm_yaml != "" or job_cmds_str_frm_yaml != "":
+            auto_gen_bootstrap_file_name = "fedml-deploy-bootstrap-entry-auto-gen.sh"
+            src_bootstrap_file_path = os.path.join(model_storage_local_path, auto_gen_bootstrap_file_name)
+            with open(src_bootstrap_file_path, 'w') as f:
+                f.write("cd /home/fedml/models_serving/\n")
+                f.write(bootstrap_cmds_str_frm_yaml)
+                f.write("\n")
+                f.write("cd /home/fedml/models_serving/\n")
+                f.write(job_cmds_str_frm_yaml)
+        else:
+            src_bootstrap_file_path = ""
 
-                data_cache_dir_input = config.get('data_cache_dir', "")
-                request_input_example = config.get('request_input_example', None)
-                extra_envs = config.get('environment_variables', None)
+        data_cache_dir_input = config.get('data_cache_dir', "")
+        request_input_example = config.get('request_input_example', None)
+        extra_envs = config.get('environment_variables', None)
 
-                # Serving dir inside docker
-                dst_model_serving_dir = "/home/fedml/models_serving"
-                relative_entry = config.get('entry_point')
-                if src_bootstrap_file_path != "":
-                    dst_bootstrap_dir = os.path.join(dst_model_serving_dir, auto_gen_bootstrap_file_name)
-                else:
-                    dst_bootstrap_dir = ""
+        # Serving dir inside docker
+        dst_model_serving_dir = "/home/fedml/models_serving"
+        relative_entry = config.get('entry_point')
+        if src_bootstrap_file_path != "":
+            dst_bootstrap_dir = os.path.join(dst_model_serving_dir, auto_gen_bootstrap_file_name)
+        else:
+            dst_bootstrap_dir = ""
 
-                # If using customized image, then bootstrap + job will be the entry point
-                enable_custom_image = config.get("enable_custom_image", False)
-                customized_image_entry_cmd = \
-                    "/bin/bash /home/fedml/models_serving/fedml-deploy-bootstrap-entry-auto-gen.sh"
+        # If using customized image, then bootstrap + job will be the entry point
+        enable_custom_image = config.get("enable_custom_image", False)
+        customized_image_entry_cmd = \
+            "/bin/bash /home/fedml/models_serving/fedml-deploy-bootstrap-entry-auto-gen.sh"
 
-                docker_registry_user_name = config.get("docker_registry_user_name", "")
-                docker_registry_user_password = config.get("docker_registry_user_password", "")
-                docker_registry = config.get("docker_registry", "")
+        docker_registry_user_name = config.get("docker_registry_user_name", "")
+        docker_registry_user_password = config.get("docker_registry_user_password", "")
+        docker_registry = config.get("docker_registry", "")
 
-                port_inside_container = int(config.get("port_inside_container", 2345))
-                use_triton = config.get("use_triton", False)
-                if use_triton:
-                    inference_type = "triton"
-                else:
-                    inference_type = "default"
-
-            if src_code_dir == "":
-                raise Exception("Please indicate source_code_dir in the fedml_model_config.yaml")
-            if relative_entry == "":
-                logging.warning("You missed main_entry in the fedml_model_config.yaml")
-
-        if num_gpus > 0:
-            gpu_ids, gpu_attach_cmd = request_gpu_ids_on_deployment(
-                edge_id, end_point_id, num_gpus=num_gpus, master_device_id=master_device_id)
-
-        if inference_engine == ClientConstants.INFERENCE_ENGINE_TYPE_INT_TRITON:
-            # configuration passed by user in the Cli
-            input_size = model_params["input_size"]
-            input_types = model_params["input_types"]
-            logging.info("convert the onnx model when the mode is from the general PyTorch...")
-            logging.info("Input size {}, input types {}".format(model_params["input_size"],
-                                                                model_params["input_types"]))
-            dummy_input_list = []
-            for index, input_i in enumerate(input_size):
-                if input_types[index] == "int":
-                    this_input = torch.randint(0, 1, input_i).clone().detach()
-                else:
-                    this_input = torch.zeros(input_i).clone().detach()
-                dummy_input_list.append(this_input)
+        port_inside_container = int(config.get("port_inside_container", 2345))
+        use_triton = config.get("use_triton", False)
+        if use_triton:
+            inference_type = "triton"
+        else:
+            inference_type = "default"
+
+    # Config check
+    if src_code_dir == "":
+        raise Exception("Please indicate source_code_dir in the fedml_model_config.yaml")
+    if relative_entry == "":
+        logging.warning("You missed main_entry in the fedml_model_config.yaml")
 
-            onnx_model_path = os.path.join(model_storage_local_path,
-                                           ClientConstants.FEDML_CONVERTED_MODEL_DIR_NAME,
-                                           running_model_name, ClientConstants.INFERENCE_MODEL_VERSION)
-            logging.info("converted onnx model path: {}".format(onnx_model_path))
-            if not os.path.exists(onnx_model_path):
-                os.makedirs(onnx_model_path, exist_ok=True)
-            onnx_model_path = os.path.join(onnx_model_path, "model.onnx")
+    # Request the GPU ids for the deployment
+    if num_gpus > 0:
+        gpu_ids, gpu_attach_cmd = request_gpu_ids_on_deployment(
+            edge_id, end_point_id, num_gpus=num_gpus, master_device_id=master_device_id)
 
-            convert_model_to_onnx(model, onnx_model_path, dummy_input_list, input_size)
+        # set replica and their gpu ids
+        FedMLModelCache.get_instance().set_redis_params()
+        FedMLModelCache.get_instance().set_replica_gpu_ids(
+            end_point_id, end_point_name, inference_model_name, edge_id, replica_rank+1, gpu_ids)
+    logging.info("GPU ids allocated: {}".format(gpu_ids))
 
     logging.info("move converted model to serving dir for inference...")
     model_serving_dir = ClientConstants.get_model_serving_dir()
@@ -339,265 +211,199 @@ def start_deployment(end_point_id, end_point_name, model_id, model_version,
                 if not os.path.exists(dst_model_file):
                     shutil.copyfile(src_model_file, dst_model_file)
 
-    if inference_engine == ClientConstants.INFERENCE_ENGINE_TYPE_INT_DEFAULT:
-        logging.info(f"master ip: {master_ip}, worker ip: {infer_host}")
-        if infer_host == master_ip:
-            logging.info("infer_host is the same as master ip, will use 127.0.0.1 to avoid firewall issue")
-            infer_host = "127.0.0.1"
+    if inference_engine != ClientConstants.INFERENCE_ENGINE_TYPE_INT_DEFAULT:
+        raise Exception(f"inference engine {inference_engine} is not supported")
 
-        try:
-            client = docker.from_env()
-            if enable_custom_image and docker_registry_user_name != "" and docker_registry_user_password != "" \
-                    and docker_registry != "":
-                client.login(username=docker_registry_user_name, password=docker_registry_user_password,
-                             registry=docker_registry)
-        except Exception:
-            logging.error("Failed to connect to the docker daemon, please ensure that you have "
-                          "installed Docker Desktop or Docker Engine, and the docker is running")
-            return "", "", None, None, None
-
-        container_prefix = "{}".format(ClientConstants.FEDML_DEFAULT_SERVER_CONTAINER_NAME_PREFIX) + "__" + \
-                                        security_utils.get_content_hash(running_model_name)
-        
-        same_model_container_rank = ContainerUtils.get_container_rank_same_model(container_prefix)
-        if same_model_container_rank == -1:
-            logging.error(f"Fail to get existed docker with {end_point_name} {inference_model_name}")
-            raise Exception("Failed to get the container rank")
-        default_server_container_name = container_prefix + "__" + str(same_model_container_rank)
+    # Get the master device id
+    logging.info(f"master ip: {master_ip}, worker ip: {infer_host}")
+    if infer_host == master_ip:
+        logging.info("infer_host is the same as master ip, will use 127.0.0.1 to avoid firewall issue")
+        infer_host = "127.0.0.1"
 
-        try:
-            exist_container_obj = client.containers.get(default_server_container_name)
-        except docker.errors.NotFound:
-            exist_container_obj = None
-        except docker.errors.APIError:
-            raise Exception("Failed to get the container object")
-
-        if exist_container_obj is not None:
-            client.api.remove_container(exist_container_obj.id, v=True, force=True)
-        device_requests = []
-        if no_real_gpu_allocation is not None:
-            use_gpu = not no_real_gpu_allocation
-        if use_gpu:
-            logging.info("Number of GPUs: {}".format(num_gpus))
-            if gpu_ids is not None:
-                gpu_id_list = map(lambda x: str(x), gpu_ids)
-                device_requests.append(
-                    docker.types.DeviceRequest(device_ids=list(gpu_id_list), capabilities=[['gpu']]))
+    try:
+        client = docker.from_env()
+        if enable_custom_image and docker_registry_user_name != "" and docker_registry_user_password != "" \
+                and docker_registry != "":
+            client.login(username=docker_registry_user_name, password=docker_registry_user_password,
+                         registry=docker_registry)
+    except Exception:
+        logging.error("Failed to connect to the docker daemon, please ensure that you have "
+                      "installed Docker Desktop or Docker Engine, and the docker is running")
+        return "", "", None, None, None
+
+    container_prefix = ("{}".format(ClientConstants.FEDML_DEFAULT_SERVER_CONTAINER_NAME_PREFIX) + "__" +
+                        security_utils.get_content_hash(running_model_name))
+
+    default_server_container_name = container_prefix + "__" + str(replica_rank)
+
+    try:
+        exist_container_obj = client.containers.get(default_server_container_name)
+    except docker.errors.NotFound:
+        exist_container_obj = None
+    except docker.errors.APIError:
+        raise Exception("Failed to get the container object")
+
+    # Allocate the GPU
+    # TODO: Make sure no competition for each replica in a single deployment
+    if exist_container_obj is not None:
+        client.api.remove_container(exist_container_obj.id, v=True, force=True)
+    device_requests = []
+    if no_real_gpu_allocation is not None:
+        use_gpu = not no_real_gpu_allocation
+    use_gpu = False
+    if use_gpu:
+        logging.info("Number of GPUs: {}".format(num_gpus))
+        if gpu_ids is not None:
+            gpu_id_list = map(lambda x: str(x), gpu_ids)
+            device_requests.append(
+                docker.types.DeviceRequest(device_ids=list(gpu_id_list), capabilities=[['gpu']]))
+        else:
+            device_requests.append(
+                docker.types.DeviceRequest(count=num_gpus, capabilities=[['gpu']]))
+    logging.info(f"device_requests: {device_requests}")
+
+    # Pull the inference image
+    logging.info(f"Start pulling the inference image {inference_image_name}..., may take a few minutes...")
+    ContainerUtils.get_instance().pull_image_with_policy(image_pull_policy, inference_image_name)
+
+    logging.info("Start creating the inference container...")
+    volumns = []
+    binds = {}
+    environment = {}
+
+    # data_cache_dir mounting
+    assert type(data_cache_dir_input) == dict or type(data_cache_dir_input) == str
+    if type(data_cache_dir_input) == str:
+        # In this case, we mount to the same folder, if has ~, we replace it with /home/fedml
+        src_data_cache_dir, dst_data_cache_dir = "", ""
+        if data_cache_dir_input != "":
+            if data_cache_dir_input[0] == "~":
+                src_data_cache_dir = os.path.expanduser(data_cache_dir_input)
+                dst_data_cache_dir = data_cache_dir_input.replace("~", "/home/fedml")
             else:
-                device_requests.append(
-                    docker.types.DeviceRequest(count=num_gpus, capabilities=[['gpu']]))
-        logging.info(f"device_requests: {device_requests}")
-        logging.info(f"Start pulling the inference image {inference_image_name}..., may take a few minutes...")
-
-        ContainerUtils.get_instance().pull_image_with_policy(image_pull_policy, inference_image_name)
-
-        logging.info("Start creating the inference container...")
-        volumns = []
-        binds = {}
-        environment = {}
-
-        assert type(data_cache_dir_input) == dict or type(data_cache_dir_input) == str
-        if type(data_cache_dir_input) == str:
-            # In this case, we mount to the same folder, if has ~, we replace it with /home/fedml
-            src_data_cache_dir, dst_data_cache_dir = "", ""
-            if data_cache_dir_input != "":
-                if data_cache_dir_input[0] == "~":
-                    src_data_cache_dir = os.path.expanduser(data_cache_dir_input)
-                    dst_data_cache_dir = data_cache_dir_input.replace("~", "/home/fedml")
+                # check if the data_cache_dir is a relative path
+                if data_cache_dir_input[0] != "/":
+                    raise "data_cache_dir_input has to be an absolute path or start with ~"
                 else:
-                    # check if the data_cache_dir is a relative path
-                    if data_cache_dir_input[0] != "/":
-                        raise "data_cache_dir_input has to be an absolute path or start with ~"
-                    else:
-                        src_data_cache_dir = data_cache_dir_input
-                        dst_data_cache_dir = data_cache_dir_input
-                logging.info(f"src_data_cache_dir: {src_data_cache_dir}, dst_data_cache_dir: {dst_data_cache_dir}")
-                
-                if type(src_data_cache_dir) == str and src_data_cache_dir != "":
-                    logging.info("Start copying the data cache to the container...")
-                    if os.path.exists(src_data_cache_dir):
-                        volumns.append(src_data_cache_dir)
-                        binds[src_data_cache_dir] = {
-                            "bind": dst_data_cache_dir,
-                            "mode": "rw"
-                        }
-                        environment["DATA_CACHE_FOLDER"] = dst_data_cache_dir
-        else:
-            for k, v in data_cache_dir_input.items():
-                if os.path.exists(k):
-                    volumns.append(v)
-                    binds[k] = {
-                        "bind": v,
+                    src_data_cache_dir = data_cache_dir_input
+                    dst_data_cache_dir = data_cache_dir_input
+            logging.info(f"src_data_cache_dir: {src_data_cache_dir}, dst_data_cache_dir: {dst_data_cache_dir}")
+
+            if type(src_data_cache_dir) == str and src_data_cache_dir != "":
+                logging.info("Start copying the data cache to the container...")
+                if os.path.exists(src_data_cache_dir):
+                    volumns.append(src_data_cache_dir)
+                    binds[src_data_cache_dir] = {
+                        "bind": dst_data_cache_dir,
                         "mode": "rw"
                     }
-                else:
-                    logging.warning(f"{k} does not exist, skip mounting it to the container")
-            logging.info(f"Data cache mount: {volumns}, {binds}")
-
-        # Default
-        if not enable_custom_image or (enable_custom_image and relative_entry != ""):
-            logging.info("Start copying the source code to the container...")
-            volumns.append(src_code_dir)
-            binds[src_code_dir] = {
-                "bind": dst_model_serving_dir,
-                "mode": "rw"
-            }
-            environment["MAIN_ENTRY"] = relative_entry
-
-        if not enable_custom_image:
-            # For some image, the default user is root. Unified to fedml.
-            environment["HOME"] = "/home/fedml"
-        environment["BOOTSTRAP_DIR"] = dst_bootstrap_dir
-        environment["FEDML_CURRENT_RUN_ID"] = end_point_id
-        environment["FEDML_CURRENT_EDGE_ID"] = edge_id
-        environment["FEDML_CURRENT_VERSION"] = fedml.get_env_version()
-        environment["FEDML_ENV_VERSION"] = fedml.get_env_version()
-        environment["FEDML_ENV_LOCAL_ON_PREMISE_PLATFORM_HOST"] = fedml.get_local_on_premise_platform_host()
-        environment["FEDML_ENV_LOCAL_ON_PREMISE_PLATFORM_PORT"] = fedml.get_local_on_premise_platform_port()
-        logging.info(f"volume: {volumns}, binds: {binds}, environment: {environment}")
-        logging.info(f"dst_model_serving_dir: {dst_model_serving_dir}")
-        logging.info(f"relative_entry: {relative_entry}")
-        logging.info(f"src_bootstrap_file_path: {src_bootstrap_file_path}")
-        logging.info(f"dst_bootstrap_dir: {dst_bootstrap_dir}")
-        logging.info(f"src_code_dir: {src_code_dir}")
-        logging.info(f"model_serving_dir: {model_serving_dir}")
-
-        if extra_envs is not None:
-            for key in extra_envs:
-                environment[key] = extra_envs[key]
+                    environment["DATA_CACHE_FOLDER"] = dst_data_cache_dir
+    else:
+        for k, v in data_cache_dir_input.items():
+            if os.path.exists(k):
+                volumns.append(v)
+                binds[k] = {
+                    "bind": v,
+                    "mode": "rw"
+                }
+            else:
+                logging.warning(f"{k} does not exist, skip mounting it to the container")
+        logging.info(f"Data cache mount: {volumns}, {binds}")
+
+    # Default mounting
+    if not enable_custom_image or (enable_custom_image and relative_entry != ""):
+        logging.info("Start copying the source code to the container...")
+        volumns.append(src_code_dir)
+        binds[src_code_dir] = {
+            "bind": dst_model_serving_dir,
+            "mode": "rw"
+        }
+        environment["MAIN_ENTRY"] = relative_entry
+
+    # Environment variables
+    if not enable_custom_image:
+        # For some image, the default user is root. Unified to fedml.
+        environment["HOME"] = "/home/fedml"
+
+    environment["BOOTSTRAP_DIR"] = dst_bootstrap_dir
+    environment["FEDML_CURRENT_RUN_ID"] = end_point_id
+    environment["FEDML_CURRENT_EDGE_ID"] = edge_id
+    environment["FEDML_CURRENT_VERSION"] = fedml.get_env_version()
+    environment["FEDML_ENV_VERSION"] = fedml.get_env_version()
+    environment["FEDML_ENV_LOCAL_ON_PREMISE_PLATFORM_HOST"] = fedml.get_local_on_premise_platform_host()
+    environment["FEDML_ENV_LOCAL_ON_PREMISE_PLATFORM_PORT"] = fedml.get_local_on_premise_platform_port()
+
+    if extra_envs is not None:
+        for key in extra_envs:
+            environment[key] = extra_envs[key]
 
+    try:
+        new_container = client.api.create_container(
+            image=inference_image_name,
+            name=default_server_container_name,
+            volumes=volumns,
+            ports=[port_inside_container],  # port open inside the container
+            environment=environment,
+            host_config=client.api.create_host_config(
+                binds=binds,
+                port_bindings={
+                    port_inside_container: usr_indicated_worker_port  # Could be either None or a port number
+                },
+                device_requests=device_requests,
+                shm_size=shm_size,
+                storage_opt=storage_opt,
+                tmpfs=tmpfs,
+                cpu_count=cpus,
+                mem_limit=memory,
+            ),
+            detach=True,
+            command=customized_image_entry_cmd if enable_custom_image else None
+        )
+        client.api.start(container=new_container.get("Id"))
+    except Exception as e:
+        logging.error(f"Failed to create the container with exception {e}, traceback : {traceback.format_exc()}")
+        return "", "", None, None, None
+
+    # Get the port allocation
+    cnt = 0
+    while True:
+        cnt += 1
         try:
-            new_container = client.api.create_container(
-                image=inference_image_name,
-                name=default_server_container_name,
-                volumes=volumns,
-                ports=[port_inside_container],  # port open inside the container
-                environment=environment,
-                host_config=client.api.create_host_config(
-                    binds=binds,
-                    port_bindings={
-                        port_inside_container: usr_indicated_worker_port  # Could be either None or a port number
-                    },
-                    device_requests=device_requests,
-                    shm_size=shm_size,
-                    storage_opt=storage_opt,
-                    tmpfs=tmpfs,
-                    cpu_count=cpus,
-                    mem_limit=memory,
-                ),
-                detach=True,
-                command=customized_image_entry_cmd if enable_custom_image else None
-            )
-            client.api.start(container=new_container.get("Id"))
-        except Exception as e:
-            logging.error(f"Failed to create the container with exception {e}, traceback : {traceback.format_exc()}")
+            if usr_indicated_worker_port is not None:
+                inference_http_port = usr_indicated_worker_port
+                break
+            else:
+                # Find the random port
+                port_info = client.api.port(new_container.get("Id"), port_inside_container)
+                inference_http_port = port_info[0]["HostPort"]
+                logging.info("inference_http_port: {}".format(inference_http_port))
+                break
+        except:
+            if cnt >= 5:
+                raise Exception("Failed to get the port allocation")
+            time.sleep(3)
 
-        # Get the port allocation
-        cnt = 0
-        while True:
-            cnt += 1
-            try:
-                if usr_indicated_worker_port is not None:
-                    inference_http_port = usr_indicated_worker_port
-                    break
-                else:
-                    # Find the random port
-                    port_info = client.api.port(new_container.get("Id"), port_inside_container)
-                    inference_http_port = port_info[0]["HostPort"]
-                    logging.info("inference_http_port: {}".format(inference_http_port))
-                    break
-            except:
-                if cnt >= 5:
-                    raise Exception("Failed to get the port allocation")
-                time.sleep(3)
-
-        # Logging the info from the container
-        log_deployment_result(end_point_id, model_id, default_server_container_name,
-                              ClientConstants.CMD_TYPE_RUN_DEFAULT_SERVER,
-                              inference_model_name, inference_engine, inference_http_port, inference_type,
-                              retry_interval=10, deploy_attempt_threshold=usr_indicated_retry_cnt,
-                              request_input_example=request_input_example, infer_host=infer_host,
-                              enable_custom_image=enable_custom_image)
-
-        # Check if the inference server is ready
-        inference_output_url, running_model_version, ret_model_metadata, ret_model_config = \
-            get_model_info(inference_model_name, inference_engine, inference_http_port,
-                           infer_host, False, inference_type, request_input_example=request_input_example,
-                           enable_custom_image=enable_custom_image)
-
-        if inference_output_url == "":
-            return running_model_name, "", None, None, None
-
-        # testing the inference container
-        test_input = ret_model_metadata["inputs"]
-
-        # try:
-        #     inference_response = run_http_inference_with_curl_request(inference_output_url, test_input, [],
-        #                                                               inference_type="default")
-        #     logging.info(f"Tested the inference backend with {test_input}, the response is {inference_response}")
-        # except Exception as e:
-        #     logging.info("Tested the inference backend, exceptions occurred: {}".format(traceback.format_exc()))
-        #     inference_output_url = ""
-
-        model_metadata = ret_model_metadata
-        logging.info(model_metadata)
-    elif inference_engine == ClientConstants.INFERENCE_ENGINE_TYPE_INT_TRITON:
-        logging.info("prepare to run triton server...")
-        if not use_simulation_test_without_triton:
-            if not triton_server_is_running and not ClientConstants.is_running_on_k8s():
-                triton_server_cmd = "{}docker stop {}; {}docker rm {}; {}docker run --name {} {} -p{}:8000 " \
-                                    "-p{}:8001 -p{}:8002 " \
-                                    "--shm-size {} " \
-                                    "-v {}:/models {} " \
-                                    "bash -c \"pip install transformers && tritonserver --strict-model-config=false " \
-                                    "--model-control-mode=poll --repository-poll-secs={} " \
-                                    "--model-repository=/models\" ".format(sudo_prefix, triton_server_container_name,
-                                                                           sudo_prefix, triton_server_container_name,
-                                                                           sudo_prefix, triton_server_container_name,
-                                                                           gpu_attach_cmd,
-                                                                           inference_http_port,
-                                                                           inference_grpc_port,
-                                                                           inference_metric_port,
-                                                                           inference_memory_size,
-                                                                           model_serving_dir,
-                                                                           inference_server_image,
-                                                                           ClientConstants.FEDML_MODEL_SERVING_REPO_SCAN_INTERVAL)
-                logging.info("Run triton inference server: {}".format(triton_server_cmd))
-                triton_server_process = ClientConstants.exec_console_with_script(triton_server_cmd,
-                                                                                 should_capture_stdout=False,
-                                                                                 should_capture_stderr=False,
-                                                                                 no_sys_out_err=True)
-                log_deployment_result(end_point_id, model_id, triton_server_container_name,
-                                      ClientConstants.CMD_TYPE_RUN_TRITON_SERVER, triton_server_process.pid,
-                                      running_model_name, inference_engine, inference_http_port)
-
-            inference_output_url, running_model_version, ret_model_metadata, ret_model_config = \
-                get_model_info(running_model_name, inference_engine, inference_http_port, infer_host)
-            if inference_output_url != "":
-                # Send the test request to the inference backend and check if the response is normal
-                input_json, output_json = build_inference_req(end_point_name, inference_model_name,
-                                                              token, ret_model_metadata)
-                try:
-                    inference_response = run_http_inference_with_curl_request(inference_output_url,
-                                                                              input_json["inputs"],
-                                                                              input_json["outputs"])
-                    logging.info("Tested the inference backend, the response is {}".format(inference_response))
-                except Exception as e:
-                    logging.info("Tested the inference backend, exceptions occurred: {}".format(traceback.format_exc()))
-                    inference_output_url = ""
-
-                if inference_output_url != "":
-                    logging.info(
-                        "Deploy model successfully, inference url: {}, model metadata: {}, model config: {}".format(
-                            inference_output_url, model_metadata, model_config))
-                    model_metadata = ret_model_metadata
-                    model_config = ret_model_config
-        else:
-            inference_output_url = f"http://localhost:{inference_http_port}/v2/models/{running_model_name}/versions/1/infer"
-    else:
-        raise Exception("inference engine {} is not supported".format(inference_engine))
+    # Logging the info from the container
+    log_deployment_result(end_point_id, model_id, default_server_container_name,
+                          ClientConstants.CMD_TYPE_RUN_DEFAULT_SERVER,
+                          inference_model_name, inference_engine, inference_http_port, inference_type,
+                          retry_interval=10, deploy_attempt_threshold=usr_indicated_retry_cnt,
+                          request_input_example=request_input_example, infer_host=infer_host,
+                          enable_custom_image=enable_custom_image)
+
+    # Check if the inference server is ready
+    inference_output_url, running_model_version, ret_model_metadata, ret_model_config = \
+        get_model_info(inference_model_name, inference_engine, inference_http_port,
+                       infer_host, False, inference_type, request_input_example=request_input_example,
+                       enable_custom_image=enable_custom_image)
+
+    if inference_output_url == "":
+        return running_model_name, "", None, None, None
 
-    return running_model_name, inference_output_url, model_version, model_metadata, model_config
+    model_metadata = ret_model_metadata
+    logging.info(model_metadata)
+
+    return running_model_name, inference_output_url, model_version, model_metadata, ret_model_config
 
 
 def build_inference_req(end_point_name, model_name, token, in_model_metadata):
@@ -719,8 +525,14 @@ def log_deployment_result(end_point_id, model_id, cmd_container_name, cmd_type,
                 break
 
             if container_obj is not None:
-                out_logs = container_obj.logs(stdout=True, stderr=False, stream=False, follow=False, since=last_log_time)
-                err_logs = container_obj.logs(stdout=False, stderr=True, stream=False, follow=False, since=last_log_time)
+                try:
+                    out_logs = container_obj.logs(stdout=True, stderr=False, stream=False, follow=False,
+                                                  since=last_log_time)
+                    err_logs = container_obj.logs(stdout=False, stderr=True, stream=False, follow=False,
+                                                  since=last_log_time)
+                except Exception as e:
+                    logging.error(f"Failed to get the logs from the container with exception {e}")
+                    pass
 
                 last_log_time = datetime.datetime.now()
 
@@ -741,9 +553,7 @@ def log_deployment_result(end_point_id, model_id, cmd_container_name, cmd_type,
                     logging.info(f"Logs from docker: {format(out_logs)}")
 
                 if container_obj.status == "exited":
-                    logging.info("Container {} has exited, automatically"
-                                 " remove it ...".format(cmd_container_name))
-                    client.api.remove_container(container_obj.id, v=True, force=True)
+                    logging.info("Container {} has exited".format(cmd_container_name))
                     break
 
         # should_exit_logs will ping the inference container
diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py b/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py
index 6e99851d73..faa16e7b4c 100755
--- a/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py
@@ -26,34 +26,34 @@
     pass
 
 
-class Settings(BaseSettings):
-    redis_addr: str
-    redis_port: str
-    redis_password: str
-    end_point_name: str
-    model_name: str
-    model_version: str
-    model_infer_url: str
-    version: str
-    use_mqtt_inference: bool
-    use_worker_gateway: bool
-    ext_info: str
-
-
-settings = Settings()
-
-# class settings:
-#     redis_addr = "127.0.0.1"
-#     redis_port = 6379
-#     redis_password = "fedml_default"
-#     end_point_name = ""
-#     model_name = ""
-#     model_version = ""
-#     model_infer_url = "127.0.0.1"
-#     version = "dev"
-#     use_mqtt_inference = False
-#     use_worker_gateway = False
-#     ext_info = "2b34303961245c4f175f2236282d7a272c040b0904747579087f6a760112030109010c215d54505707140005190a051c347f365c4a430c020a7d39120e26032a78730f797f7c031f0901657e75"
+# class Settings(BaseSettings):
+#     redis_addr: str
+#     redis_port: str
+#     redis_password: str
+#     end_point_name: str
+#     model_name: str
+#     model_version: str
+#     model_infer_url: str
+#     version: str
+#     use_mqtt_inference: bool
+#     use_worker_gateway: bool
+#     ext_info: str
+#
+#
+# settings = Settings()
+
+class settings:
+    redis_addr = "127.0.0.1"
+    redis_port = 6379
+    redis_password = "fedml_default"
+    end_point_name = ""
+    model_name = ""
+    model_version = ""
+    model_infer_url = "127.0.0.1"
+    version = "dev"
+    use_mqtt_inference = False
+    use_worker_gateway = False
+    ext_info = "2b34303961245c4f175f2236282d7a272c040b0904747579087f6a760112030109010c215d54505707140005190a051c347f365c4a430c020a7d39120e26032a78730f797f7c031f0901657e75"
 
 
 api = FastAPI()
@@ -176,7 +176,8 @@ async def _predict(
         idle_device, end_point_id, model_id, model_name, model_version, inference_host, inference_output_url = \
             found_idle_inference_device(in_end_point_id, in_end_point_name, in_model_name, in_model_version)
         if idle_device is None or idle_device == "":
-            return {"error": True, "error_code": status.HTTP_404_NOT_FOUND, "message": "can not found the active endpoint."}
+            return {"error": True, "error_code": status.HTTP_404_NOT_FOUND,
+                    "message": "can not found active inference worker for this endpoint."}
 
         # Start timing for model metrics
         model_metrics = FedMLModelMetrics(end_point_id, in_end_point_name,
@@ -224,12 +225,17 @@ def retrieve_info_by_endpoint_id(end_point_id, in_end_point_name=None, in_model_
     redis_key = FedMLModelCache.get_instance(settings.redis_addr, settings.redis_port). \
         get_end_point_full_key_by_id(end_point_id)
     if redis_key is not None:
+        end_point_name = ""
+        model_name = ""
         if in_end_point_name is not None:
             end_point_name = in_end_point_name
             model_name = redis_key[len(f"{FedMLModelCache.FEDML_MODEL_DEPLOYMENT_STATUS_TAG}-{end_point_id}-{in_end_point_name}-"):]
         else:
             # e.g. FEDML_MODEL_DEPLOYMENT_STATUS--1234-dummy_endpoint_name-dummy_model_name
-            end_point_id, end_point_name, model_name = redis_key.split("--")[1].split("-")
+            try:
+                end_point_id, end_point_name, model_name = redis_key.split("--")[1].split("-")
+            except Exception as e:
+                logging.warning(f"Failed to parse redis_key: {redis_key}. Could not retrieve only use end_point_id.")
 
         if enable_check:
             if end_point_name != in_end_point_name or model_name != in_model_name:
@@ -352,6 +358,6 @@ def logging_inference_request(request, response):
 
 if __name__ == "__main__":
     import uvicorn
-    port = 2204
+    port = 2203
     logging.basicConfig(level=logging.INFO)
     uvicorn.run(api, host="0.0.0.0", port=port, log_level="info")
diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_msg_object.py b/python/fedml/computing/scheduler/model_scheduler/device_model_msg_object.py
index 062b591853..a6c6244108 100755
--- a/python/fedml/computing/scheduler/model_scheduler/device_model_msg_object.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_model_msg_object.py
@@ -1,5 +1,5 @@
-
 import json
+import logging
 
 
 class FedMLModelMsgObject(object):
@@ -37,7 +37,10 @@ def __init__(self, topic, payload):
         }"""
 
         # get deployment params
-        request_json = json.loads(payload)
+        if isinstance(payload, dict):
+            request_json = payload
+        else:
+            request_json = json.loads(payload)
         self.msg_topic = topic
         self.request_json = request_json
         self.run_id = request_json["end_point_id"]
@@ -58,13 +61,54 @@ def __init__(self, topic, payload):
         self.inference_engine = self.model_config.get("inference_engine", 0)
         self.inference_end_point_id = self.run_id
 
-        self. request_json["run_id"] = self.run_id
+        self.request_json["run_id"] = self.run_id
+
+        self.gpu_topology = self.get_devices_avail_gpus()
+        self.gpu_per_replica = self.get_gpu_per_replica()
+
+        self.max_unavailable_rate = self.model_config.get("max_unavailable_rate", 0.1)
+
+    def get_devices_avail_gpus(self):
+        """
+        {
+            "gpu_topology": {"id1": 1, "id2": 1}    # Here the 1 means gpu card, not replica
+        }
+        """
+        # [Test1] using self.request_json["parameters"]["gpu_topology"]
+        # logging.info(f"[Replica Controller] [endpoint {self.run_id} ] devices_avail_gpus:"
+        #              f" {self.request_json['parameters']['gpu_topology']}")
+        # res = self.request_json["parameters"]["gpu_topology"]
+
+        # [Test2] Using self.scale_min
+        # res = {}
+        # for id in self.request_json["device_ids"]:
+        #     if str(id) == str(self.device_ids[0]):
+        #         continue
+        #     res[id] = int(self.scale_min)
+        # return res
+
+        # [Prod] Using self.request_json["gpu_topology"]
+        if "gpu_topology" not in self.request_json:
+            logging.warning("gpu_topology not found in request_json, using scale_min instead")
+            res = {}
+            for id in self.request_json["device_ids"]:
+                if str(id) == str(self.device_ids[0]):
+                    continue
+                res[id] = int(self.scale_min)
+            return res
+
+        logging.info(f"[Replica Controller] [endpoint {self.run_id}] "
+                     f"devices_avail_gpus: {self.request_json['gpu_topology']}")
+
+        return self.request_json["gpu_topology"]
+
+    def get_gpu_per_replica(self):
+        """
+        Read gpu_per_replica from user's config yaml file. Default 1.
+        """
+        if "parameters" in self.request_json and "gpu_per_replica" in self.request_json["parameters"]:
+            return self.request_json["parameters"]["gpu_per_replica"]
+        return 1
 
     def show(self, prefix=""):
-        print("{}end point id: {}, model name: {}, model id: {},"
-              " model version: {}, model url: {}".format(prefix,
-                                                         self.inference_end_point_id,
-                                                         self.model_name,
-                                                         self.id,
-                                                         self.model_version,
-                                                         self.model_url))
+        logging.info(f"{prefix} [FedMLModelMsgObject] [run_id {self.run_id}] [end_point_name {self.end_point_name}]")
diff --git a/python/fedml/computing/scheduler/model_scheduler/device_replica_controller.py b/python/fedml/computing/scheduler/model_scheduler/device_replica_controller.py
new file mode 100644
index 0000000000..9c43130687
--- /dev/null
+++ b/python/fedml/computing/scheduler/model_scheduler/device_replica_controller.py
@@ -0,0 +1,437 @@
+import logging
+import copy
+from .device_model_cache import FedMLModelCache
+from .device_model_msg_object import FedMLModelMsgObject
+from .device_client_constants import ClientConstants
+
+
+class FedMLDeviceReplicaController:
+    def __init__(self, master_id, request_json: dict):
+        """
+        For each deployment, we have:
+        master_id: unique id for the master device
+        e_id: unique id (i.e. endpoint_id) for each deployment
+        devices_avail_gpus = {device_id1: gpu_num, device_id2: gpu_num, ...}
+        request_json: json from MLOps for this deployment
+        total_gpu_num: total number of gpus will be used for this deployment
+        gpu_per_replica: number of gpus required per replica
+        min_replica_num: minimum number of replicas required
+        max_replica_num: maximum number of replicas required
+        endpoint_name: endpoint name
+        model_name: model name
+        target_replica_num: target replica number for each device
+        target_replica_version: target replica version
+        curr_replica_num: current replica number for each device
+        intermediate_replica_num: intermediate replica number for each device
+        total_replica_version_diff_num: total replica version difference number
+        max_unavailable_rate: maximum unavailable rate
+        curr_replica_updating_window: current replica updating window
+        curr_replica_version: current replica version for each device
+        intermediate_replica_version: intermediate replica version for each device
+        """
+        self.master_id = master_id
+        self.request_json = request_json
+        self.request_msg_obj = FedMLModelMsgObject("replica_controller", request_json)
+
+        self.e_id = self.request_msg_obj.run_id
+        self.devices_avail_gpus = self.request_msg_obj.gpu_topology
+        self.total_gpu_num = self.calc_total_gpu_num()
+        self.gpu_per_replica = self.request_msg_obj.gpu_per_replica
+        self.min_replica_num = self.request_msg_obj.scale_min
+        self.max_replica_num = self.request_msg_obj.scale_max
+        self.endpoint_name = self.request_msg_obj.end_point_name
+        self.model_name = self.request_msg_obj.model_name
+
+        self.target_replica_num = self.init_id_replica_num()
+
+        self.curr_replica_num = self.get_curr_replica_num_state_frm_db()
+        self.intermediate_replica_num = copy.deepcopy(self.curr_replica_num)
+
+        # Version control
+        self.target_replica_version = self.request_msg_obj.model_version
+        self.max_unavailable_rate = self.request_msg_obj.max_unavailable_rate
+        self.curr_replica_updating_window = {}
+
+        self.curr_replica_version = self.get_curr_replica_version_frm_db()
+        self.intermediate_replica_version = copy.deepcopy(self.curr_replica_version)
+
+        self.total_replica_version_diff_num, self.total_replica_version_diff = self.diff_target_curr_replica_version()
+
+    def calc_total_gpu_num(self):
+        total_gpu_num = 0
+        for device_id, gpu_num in self.devices_avail_gpus.items():
+            total_gpu_num += gpu_num
+        return total_gpu_num
+
+    def init_id_replica_num(self):
+        """
+        Initialize the target replica number for each device.
+        id_replica_num[id] = avail_num // self.gpu_per_replica
+        """
+        id_replica_num = {}
+        for id, avail_num in self.devices_avail_gpus.items():
+            if avail_num % self.gpu_per_replica != 0:
+                raise ValueError("The number of gpus for each device should be divisible by gpu_per_replica")
+            id_replica_num[str(id)] = avail_num // self.gpu_per_replica
+        return id_replica_num
+
+    def diff_target_curr_replica_num(self):
+        logging.info(f"[Replica Controller] [endpoint {self.e_id} ]target_replica_state: {self.target_replica_num}")
+        logging.info(f"[Replica Controller] [endpoint {self.e_id} ]curr_replica_state: {self.curr_replica_num}")
+        diff = self.diff_target_curr_replica_num_impl(self.target_replica_num, self.curr_replica_num)
+        logging.info(
+            f"[Replica Controller] [endpoint {self.e_id} ]diff_target_curr_replica_num: {diff}")
+        return diff
+
+    def diff_target_curr_replica_version(self):
+        logging.info(f"[Replica Controller] [endpoint {self.e_id} ]"
+                     f"target_replica_version: {self.target_replica_version}")
+        logging.info(f"[Replica Controller] [endpoint {self.e_id} ]"
+                     f"curr_replica_version: {self.curr_replica_version}")
+
+        num_diff, diff = self.diff_target_curr_replica_version_impl(
+            self.target_replica_version, self.curr_replica_version)
+
+        logging.info(
+            f"[Replica Controller] [endpoint {self.e_id} ]diff_target_curr_replica_version: {diff}")
+        return num_diff, diff
+
+    @staticmethod
+    def diff_target_curr_replica_num_impl(target_replica_state, curr_replica_state):
+        """
+        Return the difference between target and current replica number.
+        "op" could only be "add" or "remove".
+        e.g.
+        curr_replica_state = {id1: 1, id2: 1}
+        target_replica_state = {id1: 2, id2: 2}
+
+        return {id1: {"op": "add", "curr_num": 1, "target_num": 2}, id2: {"op": "add", "curr_num": 1, "target_num": 2}}
+        """
+        diff_target_curr_replica_num = {}
+        assert target_replica_state is not None
+
+        if curr_replica_state is None:
+            curr_replica_state = {}
+            for id, target_num in target_replica_state.items():
+                diff_target_curr_replica_num[id] = {"op": "add", "curr_num": 0, "target_num": target_num}
+            return diff_target_curr_replica_num
+
+        for id, target_num in target_replica_state.items():
+            if id not in curr_replica_state:
+                # In one scale-out operation, the device may not be deployed yet.
+                diff_target_curr_replica_num[id] = {"op": "add", "curr_num": 0, "target_num": target_num}
+            elif target_num > curr_replica_state[id]:
+                diff_target_curr_replica_num[id] = {"op": "add", "curr_num": curr_replica_state[id],
+                                                    "target_num": target_num}
+            elif target_num < curr_replica_state[id]:
+                diff_target_curr_replica_num[id] = {"op": "remove", "curr_num": curr_replica_state[id],
+                                                    "target_num": target_num}
+            else:
+                pass
+
+        for id, curr_num in curr_replica_state.items():
+            if id not in target_replica_state:
+                diff_target_curr_replica_num[id] = {"op": "remove", "curr_num": curr_num, "target_num": 0}
+
+        return diff_target_curr_replica_num
+
+    @staticmethod
+    def diff_target_curr_replica_version_impl(target_replica_version: str, curr_replica_version):
+        """
+        Return the number of difference, and difference between target and current replica version.
+        "op" could only be "update".
+        e.g.
+        curr_replica_version = {
+            "id1": {$replica_no: "v1", $replica_no: "v1"},
+            "id2": {$replica_no: "v1", $replica_no: "v1"},
+        }
+        target_replica_version = "v2"   # Could be different for each device in the future.
+
+        return {
+            "id1": {
+                $replica_no: {"op": "update", "new_version": "v2", "old_version": "v1"},
+                $replica_no: {"op": "update", "new_version": "v2", "old_version": "v1"}
+             },
+            "id2": {
+                $replica_no: {"op": "update", "new_version": "v2", "old_version": "v1"},
+                $replica_no: {"op": "update", "new_version": "v2", "old_version": "v1"}
+            }
+        }
+
+        Return None if curr_replica_version is None.(i.e. this model has not been deployed yet.)
+        """
+        if curr_replica_version is None:
+            return 0, None
+
+        diff_target_curr_replica_version = {}
+        num_diff = 0
+        for device_id, device_replicas_version in curr_replica_version.items():
+            diff_target_curr_replica_version[device_id] = {}
+            for replica_no, curr_version in device_replicas_version.items():
+                if curr_version != target_replica_version:
+                    num_diff += 1
+                    diff_target_curr_replica_version[device_id][replica_no] = {
+                        "op": "update",
+                        "new_version": target_replica_version,
+                        "old_version": curr_version
+                    }
+        if num_diff == 0:
+            return 0, None
+
+        return num_diff, diff_target_curr_replica_version
+
+    def get_curr_replica_num_state_frm_db(self):
+        """
+        Sync the current replica number state from the database.
+        Return the current replica number state.
+        """
+        res_frm_db = FedMLModelCache.get_instance().get_deployment_result_list(
+            self.e_id, self.endpoint_name, self.model_name)
+
+        curr_state = {}
+        if res_frm_db is None or len(res_frm_db) == 0:
+            # First time to get the replica number from the database
+            for id, target_num in self.target_replica_num.items():
+                curr_state[str(id)] = 0
+        else:
+            for result_item in res_frm_db:
+                # Unpack the result_item
+                result_device_id, _, result_payload = FedMLModelCache.get_instance().get_result_item_info(result_item)
+                curr_state[str(result_device_id)] = curr_state.get(str(result_device_id), 0) + 1
+
+        logging.info(f"[Replica Controller] [endpoint {self.e_id} ] curr_replica_state from db: {curr_state}")
+        return curr_state
+
+    def get_curr_replica_version_frm_db(self):
+        """
+        Sync the current replica version from the database.
+        Return the current replica version.
+        {
+            "id1": {$replica_no: "v1", $replica_no: "v2"},
+            "id2": {$replica_no: "v1", $replica_no: "v2"},
+        }
+        Return None if this model has not been deployed yet.
+        """
+        curr_versions = {}
+        res_frm_db = FedMLModelCache.get_instance().get_deployment_result_list(
+            self.e_id, self.endpoint_name, self.model_name)
+        if res_frm_db is None or len(res_frm_db) == 0:
+            return None
+        else:
+            for result_item in res_frm_db:
+                # Unpack the result_item
+                result_device_id, replica_no, result_payload = (FedMLModelCache.get_instance().
+                                                                get_result_item_info(result_item))
+                if str(result_device_id) not in curr_versions:
+                    curr_versions[str(result_device_id)] = {}
+                curr_versions[str(result_device_id)][str(replica_no)] = result_payload["model_version"]
+
+        return curr_versions
+
+    def generate_diff_to_request_json(self):
+        """
+        Write the diff (curr <> target) to the self.request_json. e.g.
+        {
+            "replica_num_diff": {
+                id1: {"op": "add", "curr_num": 1, "target_num": 2},
+                id2: {"op": "add", "curr_num": 1, "target_num": 2},
+                id3: {"op": "remove", "curr_num": 1, "target_num": 0}
+            },
+            "replica_version_diff": {
+            {
+                "id1": {
+                    $replica_no: {"op": "update", "new_version": "v2", "old_version": "v1"},
+                    $replica_no: {"op": "update", "new_version": "v2", "old_version": "v1"}
+                 },
+                "id2": {
+                    $replica_no: {"op": "update", "new_version": "v2", "old_version": "v1"},
+                    $replica_no: {"op": "update", "new_version": "v2", "old_version": "v1"}
+            }
+        }
+            "gpus_per_replica": 1,
+        }
+        """
+        replica_num_diff_key = "replica_num_diff"
+        gpu_per_replica_key = "gpus_per_replica"
+
+        replica_num_diff = self.diff_target_curr_replica_num()
+        self.request_json[replica_num_diff_key] = replica_num_diff
+
+        self.request_json[gpu_per_replica_key] = self.gpu_per_replica
+        return self.request_json
+
+    def callback_update_curr_replica_num_state(self, changed_device_id, replica_no, op_type):
+        """
+        Callback function to update the current replica number.
+        curr_state: {id1: 1, id2: 1}
+        target_replica_state = {id1: 2, id2: 2}
+        intermediate_state = {id1: 2, id2: 1}
+        op_type: "add" or "remove"
+        """
+        if (str(changed_device_id) in self.curr_replica_updating_window) and \
+                (str(replica_no) in self.curr_replica_updating_window[str(changed_device_id)]):
+            # Should be viewed as updated, replica number will not be changed.
+            return
+
+        if str(changed_device_id) not in self.intermediate_replica_num:
+            assert op_type == ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED
+
+            # Intermediate state is not initialized yet. Since it may derive from the database.
+            self.intermediate_replica_num[str(changed_device_id)] = 0
+
+        if op_type == ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED:
+            self.intermediate_replica_num[str(changed_device_id)] += 1
+        elif op_type == ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DELETED:
+            self.intermediate_replica_num[str(changed_device_id)] -= 1
+
+    def is_all_replica_num_reconciled(self):
+        """
+        Check if all the replicas are ready. Including the number and version.
+        """
+        for id, replica_no in self.intermediate_replica_num.items():
+            if id not in self.target_replica_num:   # Delete all replica in this device
+                if replica_no != 0:
+                    return False
+                else:
+                    continue
+            if replica_no != self.target_replica_num[id]:
+                return False
+
+        for id, target_replica_num in self.target_replica_num.items():
+            if id not in self.intermediate_replica_num or self.intermediate_replica_num[id] != target_replica_num:
+                return False
+
+        logging.info(f"[Replica Controller] [endpoint {self.e_id} ] Replicas are reconciled as expected.")
+        logging.info(f"[Replica Controller] [endpoint {self.e_id} ] "
+                     f"intermediate_replica_num: {self.intermediate_replica_num}")
+        logging.info(f"[Replica Controller] [endpoint {self.e_id} ] "
+                     f"target_replica_num: {self.target_replica_num}")
+        return True
+
+    def get_first_chunk_devices_replica_update(self):
+        """
+        Scroll update.
+        Set the schema request json, which, will trans to subprocess (device_server_runner).
+        The subprocess will send the init deployment msg to the worker device(s),
+            then, the callback_deployment_result will handle the rest updating msg.
+
+        e.g.
+        {
+            "replica_version_diff": {
+                "id1": {
+                    $replica_no: {"op": "update", "new_version": "v2", "old_version": "v1"},
+                    $replica_no: {"op": "update", "new_version": "v2", "old_version": "v1"}
+                 },
+                "id2": {
+                    $replica_no: {"op": "update", "new_version": "v2", "old_version": "v1"},
+                    $replica_no: {"op": "update", "new_version": "v2", "old_version": "v1"}
+                }
+            },
+        }
+
+        Return None if there is no replica version difference.
+        """
+        if self.total_replica_version_diff_num == 0:
+            return None
+
+        window_size = max(1, int(self.total_replica_version_diff_num * self.max_unavailable_rate))
+
+        first_chunk_devices_update = {}
+
+        for device_id, device_replicas_version in self.total_replica_version_diff.items():
+            for replica_no, diff in device_replicas_version.items():
+                if len(first_chunk_devices_update) >= window_size:
+                    break
+                if device_id not in first_chunk_devices_update:
+                    first_chunk_devices_update[device_id] = {}
+                first_chunk_devices_update[device_id][replica_no] = diff
+
+        return first_chunk_devices_update
+
+    def init_update_updating_window(self, first_chunk_devices_update):
+        """
+        Initialize the current replica updating window.
+        """
+        self.curr_replica_updating_window = copy.deepcopy(first_chunk_devices_update)
+
+    def callback_update_updating_window(self, device_id, replica_no):
+        """
+        Update the current replica updating window.
+        """
+        if str(device_id) not in self.curr_replica_updating_window:
+            return
+
+        if str(replica_no) not in self.curr_replica_updating_window[str(device_id)]:
+            return
+
+        # Remove the replica_no from the updating window
+        del self.curr_replica_updating_window[str(device_id)][str(replica_no)]
+
+        if len(self.curr_replica_updating_window[str(device_id)]) == 0:
+            del self.curr_replica_updating_window[str(device_id)]
+
+        # Change this replica's state in the global map
+        self.intermediate_replica_version[str(device_id)][str(replica_no)] = self.target_replica_version
+
+    def get_next_chunk_devices_replica(self):
+        """
+        If no need for updating, return None
+        If the intermediate equal to target, return None
+        If the current updating window is not empty, return None
+        else, determine the next window, and send the request msg to the device -> replica handler.
+        """
+        if self.total_replica_version_diff_num == 0:
+            return None
+
+        if self.is_all_replica_version_reconciled():
+            return None
+
+        if len(self.curr_replica_updating_window) > 0:
+            return None
+
+        # Determine the next window
+        window_size = max(1, int(self.total_replica_version_diff_num * self.max_unavailable_rate))
+
+        next_chunk_devices_replicas_update = {}
+
+        for id, device_replicas_version in self.intermediate_replica_version.items():
+            for replica_no, version in device_replicas_version.items():
+                if version != self.target_replica_version:
+                    if id not in next_chunk_devices_replicas_update:
+                        next_chunk_devices_replicas_update[id] = {}
+                    next_chunk_devices_replicas_update[id][replica_no] = {
+                        "op": "update",
+                        "new_version": self.target_replica_version,
+                        "old_version": version
+                    }
+                    if len(next_chunk_devices_replicas_update) >= window_size:
+                        break
+
+        return next_chunk_devices_replicas_update
+
+    def is_all_replica_version_reconciled(self):
+        """
+        Check if all the replicas are ready. Including the number and version.
+        """
+        if self.total_replica_version_diff_num == 0:
+            return True
+
+        for id, device_replicas_version in self.intermediate_replica_version.items():
+            for replica_no, version in device_replicas_version.items():
+                if version != self.target_replica_version:
+                    return False
+        return True
+
+    def init_first_update_device_replica_mapping(self):
+        # Check if there is no replica version difference. return first_chunk_devices_update
+        first_chunk_dict = self.get_first_chunk_devices_replica_update()
+        if first_chunk_dict is None:
+            return self.request_json
+
+        # Update the updating window
+        self.init_update_updating_window(first_chunk_dict)
+
+        # Prepare and  return the request json
+        replica_num_diff_key = "replica_version_diff"
+        self.request_json[replica_num_diff_key] = first_chunk_dict
+        return self.request_json
diff --git a/python/fedml/computing/scheduler/model_scheduler/device_replica_handler.py b/python/fedml/computing/scheduler/model_scheduler/device_replica_handler.py
new file mode 100644
index 0000000000..d8865ed854
--- /dev/null
+++ b/python/fedml/computing/scheduler/model_scheduler/device_replica_handler.py
@@ -0,0 +1,138 @@
+import logging
+from ..scheduler_core.compute_cache_manager import ComputeCacheManager
+from ..comm_utils.container_utils import ContainerUtils
+from ..comm_utils import security_utils
+from .device_client_constants import ClientConstants
+from .device_model_msg_object import FedMLModelMsgObject
+
+
+class FedMLDeviceReplicaHandler:
+    def __init__(self, worker_id, request_json: dict):
+        """
+        Handler on the worker to actually exec the reconciliation logic (Including add, remove, update).
+
+        e_id: unique id (i.e. endpoint_id) for each deployment
+        devices_avail_gpus = {device_id1: gpu_num, device_id2: gpu_num, ...}
+        request_json: json from MLOps for this deployment
+        total_gpu_num: total number of gpus will be used for this deployment
+        gpu_per_replica: number of gpus required per replica
+        """
+        self.worker_id = worker_id
+        self.request_json = request_json
+        self.request_msg_obj = FedMLModelMsgObject("replica_handler", request_json)
+        self.e_id = self.request_msg_obj.run_id
+        self.gpu_per_replica = self.request_msg_obj.gpu_per_replica
+
+        self.replica_num_diff = self.get_diff_replica_num_frm_request_json()
+        self.replica_version_diff = self.get_diff_replica_version_frm_request_json()
+
+        self.end_point_name = self.request_msg_obj.end_point_name
+        self.inference_model_name = self.request_msg_obj.model_name
+        self.model_version = self.request_msg_obj.model_version
+        self.model_id = self.request_msg_obj.model_id
+
+        self.device_avail_gpus = self.get_device_avail_gpus_frm_db()
+
+    def get_device_avail_gpus_frm_db(self):
+        """
+        Get the available gpus from db.
+        """
+        available_gpu_ids = ComputeCacheManager.get_instance().get_gpu_cache().get_device_available_gpu_ids(
+            self.worker_id)
+        logging.info(f"[Replica Handler] [endpoint {self.e_id} ] [worker {self.worker_id}] "
+                     f"All device_avail_gpus: {available_gpu_ids}")
+        return available_gpu_ids
+
+    def get_diff_replica_num_frm_request_json(self):
+        """
+        Read replica_diff passing by master's request json.
+        Return:
+        {
+            id1_str: {"op": "add", "curr_num": 1, "target_num": 2},
+            id2_str: {"op": "add", "curr_num": 1, "target_num": 2}
+        }
+        """
+        if "replica_num_diff" in self.request_json and str(self.worker_id) in self.request_json["replica_num_diff"]:
+            return self.request_json["replica_num_diff"][str(self.worker_id)]
+        return None
+
+    def get_diff_replica_version_frm_request_json(self):
+        """
+        Read replica_diff passing by master's request json.
+        Return:
+        {
+            "id1": {
+                $replica_no: {"op": "update", "new_version": "v2", "old_version": "v1"},
+                $replica_no: {"op": "update", "new_version": "v2", "old_version": "v1"}
+             },
+            "id2": {
+                $replica_no: {"op": "update", "new_version": "v2", "old_version": "v1"},
+                $replica_no: {"op": "update", "new_version": "v2", "old_version": "v1"}
+            }
+        }
+        """
+        if ("replica_version_diff" in self.request_json and
+                str(self.worker_id) in self.request_json["replica_version_diff"]):
+            return self.request_json["replica_version_diff"][str(self.worker_id)]
+
+        return None
+
+    def reconcile_num_replica(self):
+        """
+        To solve the conflict between different reconciliation requests. The request & delete reqs should be
+        executed in order and atomic (i.e. rollback).
+
+        return (op, number of op)
+        """
+        if not self.replica_num_diff:
+            logging.info(f"replica_num_diff is empty, will not reconcile.")
+            return None, None, None
+
+        if self.replica_num_diff["op"] not in ["add", "remove"]:
+            raise ValueError(f"op should be add or remove. Got {self.replica_num_diff['op']}")
+
+        prev_rank = self.replica_num_diff["curr_num"] - 1
+        if self.replica_num_diff["op"] == "add":
+            assert self.replica_num_diff["target_num"] > self.replica_num_diff["curr_num"]
+            op, op_num = (self.replica_num_diff["op"],
+                          self.replica_num_diff["target_num"] - self.replica_num_diff["curr_num"])
+        else:
+            assert self.replica_num_diff["target_num"] < self.replica_num_diff["curr_num"]
+            op, op_num = (self.replica_num_diff["op"],
+                          self.replica_num_diff["curr_num"] - self.replica_num_diff["target_num"])
+        return prev_rank, op, op_num
+
+    def remove_replica(self, rank):
+        """
+        Remove replica_num replicas from device_id.
+        """
+        running_model_name = ClientConstants.get_running_model_name(
+            self.end_point_name, self.inference_model_name, self.model_version, self.e_id, self.model_id,
+            self.worker_id)
+        container_prefix = ("{}".format(ClientConstants.FEDML_DEFAULT_SERVER_CONTAINER_NAME_PREFIX) + "__" +
+                            security_utils.get_content_hash(running_model_name))
+        container_name = container_prefix + "__" + str(rank)
+        logging.info(f"[Replica Handler] [Remove Replica] [Device {self.worker_id}] [Endpoint {self.e_id}]"
+                     f" [Replica {rank}] [Container {container_name}]")
+        ContainerUtils.get_instance().remove_container(container_name)
+
+    def reconcile_replica_version(self):
+        """
+        Return a list of replica_rank to be updated.
+        Giving {
+                $replica_no: {"op": "update", "new_version": "v2", "old_version": "v1"},
+                $replica_no: {"op": "update", "new_version": "v2", "old_version": "v1"}
+             }
+        for all replicas, update the version. i.e. stop and  remove the container, records in db, then start the new
+        container, and report when the new container is ready.
+        """
+        replica_rank_to_update = []
+        ret_op = "update"
+        if not self.replica_version_diff:
+            logging.info(f"replica_version_diff is empty, will not reconcile.")
+            return None, None
+
+        for replica_no, diff in self.replica_version_diff.items():
+            replica_rank_to_update.append(int(replica_no)-1)
+
+        return replica_rank_to_update, ret_op
diff --git a/python/fedml/computing/scheduler/model_scheduler/device_server_runner.py b/python/fedml/computing/scheduler/model_scheduler/device_server_runner.py
deleted file mode 100755
index 41e2e5cd44..0000000000
--- a/python/fedml/computing/scheduler/model_scheduler/device_server_runner.py
+++ /dev/null
@@ -1,2160 +0,0 @@
-import copy
-import json
-import logging
-import multiprocessing
-import platform
-import sys
-
-from multiprocessing import Process
-import os
-import shutil
-import subprocess
-import threading
-
-import time
-import traceback
-import urllib
-import uuid
-import zipfile
-from os import listdir
-
-import requests
-import torch
-
-import fedml
-from fedml.computing.scheduler.comm_utils.run_process_utils import RunProcessUtils
-
-from ..comm_utils import sys_utils
-from .device_server_data_interface import FedMLServerDataInterface
-from ..scheduler_core.endpoint_sync_protocol import FedMLEndpointSyncProtocol
-from ....core.mlops.mlops_runtime_log import MLOpsRuntimeLog
-
-from ....core.distributed.communication.mqtt.mqtt_manager import MqttManager
-from ..comm_utils.yaml_utils import load_yaml_config
-from .device_client_constants import ClientConstants
-from .device_server_constants import ServerConstants
-
-from ....core.mlops.mlops_metrics import MLOpsMetrics
-
-from ....core.mlops.mlops_configs import MLOpsConfigs
-from ....core.mlops.mlops_runtime_log_daemon import MLOpsRuntimeLogDaemon
-from ....core.mlops.mlops_status import MLOpsStatus
-from ..comm_utils.sys_utils import get_sys_runner_info, get_python_program
-from .device_model_cache import FedMLModelCache
-from .device_model_msg_object import FedMLModelMsgObject
-#from ....serving.fedml_server import FedMLModelServingServer
-from ....core.mlops.mlops_utils import MLOpsUtils
-from ..comm_utils.constants import SchedulerConstants
-from .device_model_db import FedMLModelDatabase
-
-
-class RunnerError(BaseException):
-    """ Runner failed. """
-    pass
-
-
-class RunnerCompletedError(Exception):
-    """ Runner completed. """
-    pass
-
-
-class FedMLServerRunner:
-    FEDML_CLOUD_SERVER_PREFIX = "fedml-server-run-"
-
-    def __init__(self, args, run_id=0, request_json=None, agent_config=None, edge_id=0):
-        self.inference_gateway_process = None
-        self.local_api_process = None
-        self.run_process_event = None
-        self.run_process_event_map = dict()
-        self.run_process_completed_event = None
-        self.run_process_completed_event_map = dict()
-        self.run_as_cloud_agent = False
-        self.run_as_cloud_server = False
-        self.run_as_edge_server_and_agent = False
-        self.run_as_cloud_server_and_agent = False
-        self.fedml_packages_base_dir = None
-        self.fedml_packages_unzip_dir = None
-        self.mqtt_mgr = None
-        self.running_request_json = dict()
-        self.run_id = run_id
-        self.client_mqtt_mgr = None
-        self.client_mqtt_is_connected = False
-        self.client_mqtt_lock = None
-        self.unique_device_id = None
-        self.edge_id = edge_id
-        self.server_agent_id = 0
-        if request_json is not None:
-            self.server_agent_id = request_json.get("server_id", 0)
-        self.process = None
-        self.args = args
-        self.request_json = copy.deepcopy(request_json)
-        self.version = args.version
-        self.device_id = args.device_id
-        self.cur_dir = os.path.split(os.path.realpath(__file__))[0]
-        if args.current_running_dir is not None:
-            self.cur_dir = args.current_running_dir
-
-        self.agent_config = agent_config
-        self.fedml_data_base_package_dir = os.path.join("/", "fedml", "data")
-        self.fedml_data_local_package_dir = os.path.join("/", "fedml", "fedml-package", "fedml", "data")
-        self.fedml_data_dir = self.fedml_data_base_package_dir
-        self.fedml_config_dir = os.path.join("/", "fedml", "conf")
-
-        self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES = {}
-
-        self.mlops_metrics = None
-        self.run_status = None
-        self.infer_host = "127.0.0.1"
-        self.redis_addr = "local"
-        self.redis_port = "6379"
-        self.redis_password = "fedml_default"
-
-        self.slave_deployment_statuses_mapping = dict()
-        self.slave_deployment_results_mapping = dict()
-        self.slave_update_result_mapping = dict()
-
-        self.model_runner_mapping = dict()
-        self.ntp_offset = MLOpsUtils.get_ntp_offset()
-
-        self.subscribed_topics = list()
-        self.user_name = None
-
-    def build_dynamic_constrain_variables(self, run_id, run_config):
-        pass
-
-    def unzip_file(self, zip_file, unzip_file_path):
-        unziped_file_name = ""
-        if zipfile.is_zipfile(zip_file):
-            with zipfile.ZipFile(zip_file, "r") as zipf:
-                zipf.extractall(unzip_file_path)
-                unziped_file_name = zipf.namelist()[0]
-
-        return unziped_file_name
-
-    def package_download_progress(self, count, blksize, filesize):
-        self.check_runner_stop_event()
-
-        downloaded = count * blksize
-        downloaded = filesize if downloaded > filesize else downloaded
-        progress = (downloaded / filesize * 100) if filesize != 0 else 0
-        progress_int = int(progress)
-        downloaded_kb = format(downloaded / 1024, '.2f')
-
-        # since this hook funtion is stateless, we need a state to avoid printing progress repeatly
-        if count == 0:
-            self.prev_download_progress = 0
-        if progress_int != self.prev_download_progress and progress_int % 5 == 0:
-            self.prev_download_progress = progress_int
-            logging.info("package downloaded size {} KB, progress {}%".format(downloaded_kb, progress_int))
-
-    def retrieve_and_unzip_package(self, package_name, package_url):
-        local_package_path = ServerConstants.get_model_package_dir()
-        if not os.path.exists(local_package_path):
-            os.makedirs(local_package_path, exist_ok=True)
-        local_package_file = "{}.zip".format(os.path.join(local_package_path, package_name))
-        if os.path.exists(local_package_file):
-            os.remove(local_package_file)
-        urllib.request.urlretrieve(package_url, filename=None, reporthook=self.package_download_progress) # do not rename
-        unzip_package_path = ServerConstants.get_model_dir()
-        self.fedml_packages_base_dir = unzip_package_path
-        try:
-            shutil.rmtree(
-                os.path.join(unzip_package_path, package_name), ignore_errors=True
-            )
-        except Exception as e:
-            pass
-        logging.info("local_package_file {}, unzip_package_path {}".format(
-            local_package_file, unzip_package_path))
-        package_name = self.unzip_file(local_package_file, unzip_package_path)
-        unzip_package_path = os.path.join(unzip_package_path, package_name)
-        return unzip_package_path
-
-    def update_local_fedml_config(self, run_id, run_config):
-        model_config = run_config
-        model_name = model_config["model_name"]
-        model_storage_url = model_config["model_storage_url"]
-        scale_min = model_config.get("instance_scale_min", 0)
-        scale_max = model_config.get("instance_scale_max", 0)
-        inference_engine = model_config.get("inference_engine", 0)
-        inference_end_point_id = run_id
-
-        # Copy config file from the client
-        unzip_package_path = self.retrieve_and_unzip_package(
-            model_name, model_storage_url
-        )
-        fedml_local_config_file = os.path.join(unzip_package_path, "fedml_model_config.yaml")
-
-        # Load the above config to memory
-        package_conf_object = {}
-        if os.path.exists(fedml_local_config_file):
-            package_conf_object = load_yaml_config(fedml_local_config_file)
-
-        return unzip_package_path, package_conf_object
-
-    def get_usr_indicated_token(self, request_json) -> str:
-        usr_indicated_token = ""
-        if "parameters" in request_json and "authentication_token" in request_json["parameters"]:
-            usr_indicated_token = request_json["parameters"]["authentication_token"]
-        return usr_indicated_token
-
-    def build_dynamic_args(self, run_config, package_conf_object, base_dir):
-        pass
-
-    def run(self, process_event, completed_event):
-        # print(f"Model master runner process id {os.getpid()}, run id {self.run_id}")
-
-        if platform.system() != "Windows":
-            os.setsid()
-
-        os.environ['PYTHONWARNINGS'] = 'ignore:semaphore_tracker:UserWarning'
-        os.environ.setdefault('PYTHONWARNINGS', 'ignore:semaphore_tracker:UserWarning')
-
-        self.run_process_event = process_event
-        self.run_process_completed_event = completed_event
-        run_id = self.request_json.get("end_point_id")
-
-        try:
-            MLOpsUtils.set_ntp_offset(self.ntp_offset)
-
-            self.setup_client_mqtt_mgr()
-
-            self.run_impl()
-        except RunnerError:
-            logging.info("Runner stopped.")
-            self.mlops_metrics.report_server_training_status(
-                self.run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_KILLED,
-                is_from_model=True, edge_id=self.edge_id)
-        except RunnerCompletedError:
-            logging.info("Runner completed.")
-        except Exception as e:
-            logging.error("Runner exits with exceptions.")
-            logging.error(traceback.format_exc())
-            logging.error(e)
-            self.mlops_metrics.report_server_training_status(
-                self.run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED,
-                is_from_model=True, edge_id=self.edge_id)
-            MLOpsRuntimeLogDaemon.get_instance(self.args).stop_log_processor(run_id, self.edge_id)
-            if self.mlops_metrics is not None:
-                self.mlops_metrics.stop_sys_perf()
-            time.sleep(3)
-            sys.exit(1)
-        finally:
-            logging.info("Release resources.")
-            MLOpsRuntimeLogDaemon.get_instance(self.args).stop_log_processor(run_id, self.edge_id)
-            if self.mlops_metrics is not None:
-                self.mlops_metrics.stop_sys_perf()
-            time.sleep(3)
-            if not self.run_as_cloud_server:
-                self.release_client_mqtt_mgr()
-
-    def parse_model_run_params(self, running_json):
-        run_id = running_json["end_point_id"]
-        end_point_name = running_json["end_point_name"]
-        token = running_json["token"]
-        user_id = running_json["user_id"]
-        user_name = running_json["user_name"]
-        device_ids = running_json["device_ids"]
-        device_objs = running_json["device_objs"]
-
-        model_config = running_json["model_config"]
-        model_name = model_config["model_name"]
-        model_id = model_config["model_id"]
-        model_storage_url = model_config["model_storage_url"]
-        scale_min = model_config.get("instance_scale_min", 0)
-        scale_max = model_config.get("instance_scale_max", 0)
-        inference_engine = model_config.get("inference_engine", 0)
-        model_is_from_open = model_config["is_from_open"]
-        inference_end_point_id = run_id
-        use_gpu = "gpu"  # TODO: Get GPU from device infos
-        memory_size = "256m"  # TODO: Get Memory size for each instance
-        model_version = model_config["model_version"]        
-        model_config_parameters = running_json.get("parameters", {})
-
-        inference_port = model_config_parameters.get("server_internal_port",    # Internal port is for the gateway
-                                                     ServerConstants.MODEL_INFERENCE_DEFAULT_PORT)
-        inference_port_external = model_config_parameters.get("server_external_port", inference_port)
-
-        return run_id, end_point_name, token, user_id, user_name, device_ids, device_objs, model_config, model_name, \
-            model_id, model_storage_url, scale_min, scale_max, inference_engine, model_is_from_open, \
-            inference_end_point_id, use_gpu, memory_size, model_version, inference_port
-
-    def inference_run(self):
-        # run_id, end_point_name, token, user_id, user_name, device_ids, device_objs, model_config, model_name, \
-        #     model_id, model_storage_url, scale_min, scale_max, inference_engine, model_is_from_open, \
-        #     inference_end_point_id, use_gpu, memory_size, model_version, inference_port = self.parse_model_run_params(self.request_json)
-        #
-        # inference_server = FedMLModelServingServer(self.args,
-        #                                            end_point_name,
-        #                                            model_name,
-        #                                            model_version,
-        #                                            inference_request=self.request_json)
-        # inference_server.run()
-        pass
-
-    def run_impl(self):
-        run_id, end_point_name, token, user_id, user_name, device_ids, device_objs, model_config, model_name, \
-            model_id, model_storage_url, scale_min, scale_max, inference_engine, model_is_from_open, \
-            inference_end_point_id, use_gpu, memory_size, model_version, inference_port = self.parse_model_run_params(self.request_json)
-
-        logging.info("model deployment request: {}".format(self.request_json))
-
-        # Initiate an FedMLInferenceServer object which the request will be forwarded to
-        # server_runner = FedMLServerRunner(
-        #     self.args, run_id=self.run_id, request_json=self.request_json, agent_config=self.agent_config
-        # )
-        # inference_process = Process(target=server_runner.inference_run)
-        # inference_process.start()
-
-        logging.info("send deployment stages...")
-
-        self.mlops_metrics.report_sys_perf(self.args, self.agent_config["mqtt_config"], run_id=run_id)
-
-        self.check_runner_stop_event()
-
-        # Send stage: MODEL_DEPLOYMENT_STAGE4 = "ForwardRequest2Slave"
-        self.send_deployment_stages(self.run_id, model_name, model_id,
-                                    "",
-                                    ServerConstants.MODEL_DEPLOYMENT_STAGE4["index"],
-                                    ServerConstants.MODEL_DEPLOYMENT_STAGE4["text"],
-                                    ServerConstants.MODEL_DEPLOYMENT_STAGE4["text"])
-
-        self.args.run_id = self.run_id
-        MLOpsRuntimeLog.get_instance(self.args).init_logs(log_level=logging.INFO)
-
-        # report server running status
-        logging.info("report deployment status...")
-        self.check_runner_stop_event()
-        self.mlops_metrics.report_server_training_status(
-            run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_STARTING,
-            is_from_model=True, running_json=json.dumps(self.request_json), edge_id=self.edge_id)
-        self.send_deployment_status(self.run_id, end_point_name,
-                                    model_name, "",
-                                    ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYING)
-
-        # start unified inference server
-        self.start_device_inference_gateway(
-            run_id, end_point_name, model_id, model_name, model_version, inference_port=inference_port)
-
-        # start inference monitor server
-        self.stop_device_inference_monitor(run_id, end_point_name, model_id, model_name, model_version)
-        self.start_device_inference_monitor(run_id, end_point_name, model_id, model_name, model_version)
-
-        # Changed the status to "IDLE"
-        self.mlops_metrics.broadcast_server_training_status(
-            run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_FINISHED,
-            is_from_model=True, edge_id=self.edge_id)
-
-        # forward deployment request to slave devices
-        logging.info("send the model inference request to slave devices...")
-        self.check_runner_stop_event()
-
-        # handle "op:replace"
-        first_chunk_devices_update = self.request_json.get("first_chunk_devices_update", list())
-        if len(first_chunk_devices_update) > 0:
-            self.send_first_scroll_update_msg(first_chunk_devices_update)
-
-        # handle "op:add"
-        should_added_devices = self.send_deployment_start_request_to_edges()
-
-        # handle "op:delete"
-        self.send_deployment_delete_request_to_edges(payload=json.dumps(self.request_json), model_msg_object=None)
-
-        if len(should_added_devices) == 0 and len(first_chunk_devices_update) == 0:
-            '''
-            If just including delete op, we do not need to wait for the slave devices to finish the delete.
-            '''
-            ip = self.get_ip_address(self.request_json)
-            master_port = os.getenv("FEDML_MASTER_PORT", None)
-            if master_port is not None:
-                inference_port = int(master_port)
-            model_inference_port = inference_port
-            if ip.startswith("http://") or ip.startswith("https://"):
-                model_inference_url = "{}/api/v1/predict".format(ip)
-            else:
-                model_inference_url = "http://{}:{}/api/v1/predict".format(ip, model_inference_port)
-
-            self.send_deployment_status(self.run_id, end_point_name,
-                                        model_name,
-                                        model_inference_url,
-                                        ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED)
-
-        while True:
-            self.check_runner_stop_event()
-            time.sleep(3)
-
-    def check_runner_stop_event(self):
-        if self.run_process_event is not None and self.run_process_event.is_set():
-            logging.info("Received stopping event.")
-            raise RunnerError("Runner stopped")
-
-        if self.run_process_completed_event is not None and self.run_process_completed_event.is_set():
-            logging.info("Received completed event.")
-            raise RunnerCompletedError("Runner completed")
-
-    def start_device_inference_gateway(
-            self, run_id, end_point_name, model_id,
-            model_name, model_version, inference_port=ServerConstants.MODEL_INFERENCE_DEFAULT_PORT):
-        # start unified inference server
-        running_model_name = ServerConstants.get_running_model_name(end_point_name,
-                                                                    model_name, model_version, run_id, model_id)
-        python_program = get_python_program()
-        master_port = os.getenv("FEDML_MASTER_PORT", None)
-        if master_port is not None:
-            inference_port = int(master_port)
-        if not ServerConstants.is_running_on_k8s():
-            logging.info(f"start the model inference gateway, end point {run_id}, "
-                         f"model name {model_name} at port {inference_port}...")
-            self.check_runner_stop_event()
-
-            use_mqtt_inference = os.getenv("FEDML_USE_MQTT_INFERENCE", "False")
-            use_mqtt_inference = True if use_mqtt_inference.lower() == 'true' else False
-            use_worker_gateway = os.getenv("FEDML_USE_WORKER_GATEWAY", "False")
-            use_worker_gateway = True if use_worker_gateway.lower() == 'true' else False
-            inference_gw_cmd = "fedml.computing.scheduler.model_scheduler.device_model_inference:api"
-            inference_gateway_pids = RunProcessUtils.get_pid_from_cmd_line(inference_gw_cmd)
-            if inference_gateway_pids is None or len(inference_gateway_pids) <= 0:
-                cur_dir = os.path.dirname(__file__)
-                fedml_base_dir = os.path.dirname(os.path.dirname(os.path.dirname(cur_dir)))
-                connect_str = "@FEDML@"
-                ext_info = sys_utils.random1(
-                    self.agent_config["mqtt_config"]["BROKER_HOST"] + connect_str +
-                    str(self.agent_config["mqtt_config"]["BROKER_PORT"]) + connect_str +
-                    self.agent_config["mqtt_config"]["MQTT_USER"] + connect_str +
-                    self.agent_config["mqtt_config"]["MQTT_PWD"] + connect_str +
-                    str(self.agent_config["mqtt_config"]["MQTT_KEEPALIVE"]), "FEDML@9999GREAT")
-                self.inference_gateway_process = ServerConstants.exec_console_with_script(
-                    "REDIS_ADDR=\"{}\" REDIS_PORT=\"{}\" REDIS_PASSWORD=\"{}\" "
-                    "END_POINT_NAME=\"{}\" "
-                    "MODEL_NAME=\"{}\" MODEL_VERSION=\"{}\" MODEL_INFER_URL=\"{}\" VERSION=\"{}\" "
-                    "USE_MQTT_INFERENCE={} USE_WORKER_GATEWAY={} EXT_INFO={} "
-                    "{} -m uvicorn {} --host 0.0.0.0 --port {} --reload --reload-delay 3 --reload-dir {} "
-                    "--log-level critical".format(
-                        self.redis_addr, self.redis_port, self.redis_password,
-                        end_point_name,
-                        model_name, model_version, "", self.args.version,
-                        use_mqtt_inference, use_worker_gateway, ext_info,
-                        python_program, inference_gw_cmd, str(inference_port), fedml_base_dir
-                    ),
-                    should_capture_stdout=False,
-                    should_capture_stderr=False
-                )
-
-    def start_device_inference_monitor(self, run_id, end_point_name,
-                                       model_id, model_name, model_version, check_stopped_event=True):
-        # start inference monitor server
-        logging.info(f"start the model inference monitor, end point {run_id}, model name {model_name}...")
-        if check_stopped_event:
-            self.check_runner_stop_event()
-        run_id_str = str(run_id)
-        pip_source_dir = os.path.dirname(__file__)
-        monitor_file = os.path.join(pip_source_dir, "device_model_monitor.py")
-        python_program = get_python_program()
-        running_model_name = ServerConstants.get_running_model_name(end_point_name,
-                                                                    model_name, model_version, run_id, model_id)
-        self.monitor_process = ServerConstants.exec_console_with_shell_script_list(
-            [
-                python_program,
-                monitor_file,
-                "-v",
-                self.args.version,
-                "-ep",
-                run_id_str,
-                "-epn",
-                str(end_point_name),
-                "-mi",
-                str(model_id),
-                "-mn",
-                model_name,
-                "-mv",
-                model_version,
-                "-iu",
-                "infer_url",
-                "-ra",
-                self.redis_addr,
-                "-rp",
-                self.redis_port,
-                "-rpw",
-                self.redis_password
-            ],
-            should_capture_stdout=False,
-            should_capture_stderr=False
-        )
-
-    def stop_device_inference_monitor(self, run_id, end_point_name, model_id, model_name, model_version):
-        # stop inference monitor server
-        logging.info(f"stop the model inference monitor, end point {run_id}, model name {model_name}...")
-        sys_utils.cleanup_model_monitor_processes(run_id, end_point_name,
-                                                  model_id, model_name, model_version)
-
-    def cleanup_run_when_finished(self):
-        logging.info("Cleanup run successfully when finished.")
-
-        self.mlops_metrics.broadcast_server_training_status(
-            self.run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_FINISHED,
-            is_from_model=True, edge_id=self.edge_id
-        )
-
-        try:
-            self.mlops_metrics.stop_sys_perf()
-        except Exception as ex:
-            pass
-
-        time.sleep(1)
-
-        try:
-            local_package_path = ServerConstants.get_package_download_dir()
-            for package_file in listdir(local_package_path):
-                if os.path.basename(package_file).startswith("run_" + str(self.run_id)):
-                    shutil.rmtree(os.path.join(local_package_path, package_file), ignore_errors=True)
-        except Exception as e:
-            pass
-
-    def cleanup_run_when_starting_failed(self):
-        logging.info("Cleanup run successfully when starting failed.")
-
-        self.mlops_metrics.broadcast_server_training_status(
-            self.run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED,
-            is_from_model=True, edge_id=self.edge_id)
-
-        try:
-            self.mlops_metrics.stop_sys_perf()
-        except Exception as ex:
-            pass
-
-        time.sleep(1)
-
-        try:
-            local_package_path = ServerConstants.get_package_download_dir()
-            for package_file in listdir(local_package_path):
-                if os.path.basename(package_file).startswith("run_" + str(self.run_id)):
-                    shutil.rmtree(os.path.join(local_package_path, package_file), ignore_errors=True)
-        except Exception as e:
-            pass
-    
-    def cleanup_run_when_deploy_failed(self):
-        topic = f"model_ops/model_device/delete_deployment/{self.edge_id}"
-        self.callback_delete_deployment(topic, payload=json.dumps(self.request_json))
-
-    def callback_deployment_result_message(self, topic=None, payload=None):
-        # Save deployment result to local cache
-        topic_splits = str(topic).split('/')
-        device_id = topic_splits[-1]
-        payload_json = json.loads(payload)
-        end_point_id = payload_json["end_point_id"]
-        end_point_name = payload_json["end_point_name"]
-        model_id = payload_json["model_id"]
-        model_name = payload_json["model_name"]
-        model_version = payload_json["model_version"]
-        model_status = payload_json["model_status"]
-        run_id_str = str(end_point_id)
-        FedMLModelCache.get_instance().set_redis_params(self.redis_addr, self.redis_port, self.redis_password)
-        FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \
-            set_deployment_result(end_point_id, end_point_name,
-                                  model_name, model_version,
-                                  device_id, payload)
-        if self.slave_deployment_results_mapping.get(run_id_str, None) is None:
-            self.slave_deployment_results_mapping[run_id_str] = dict()
-        self.slave_deployment_results_mapping[run_id_str][str(device_id)] = model_status
-
-        logging.info("callback_deployment_result_message: topic {}, payload {}, mapping {}.".format(
-            topic, payload, self.slave_deployment_results_mapping[run_id_str]))
-
-        request_json = self.running_request_json.get(run_id_str, None)
-        if request_json is None:
-            logging.error(f"The endpoint {end_point_id} is not running.")
-            self.send_deployment_status(
-                end_point_id, end_point_name, payload_json["model_name"], "",
-                ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED)
-            return
-
-        all_device_id_list = request_json["device_ids"]
-
-        device_id_list = []
-
-        for device in all_device_id_list:
-            if str(device) == str(self.edge_id):
-                continue
-                
-            if device in request_json["diff_devices"] and \
-                    (request_json["diff_devices"][device] == ServerConstants.DEVICE_DIFF_ADD_OPERATION or
-                     request_json["diff_devices"][device] == ServerConstants.DEVICE_DIFF_REPLACE_OPERATION):
-                device_id_list.append(device)
-
-        if request_json["diff_devices"].get(int(device_id), None) == ServerConstants.DEVICE_DIFF_REPLACE_OPERATION:
-            if model_status == ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED:
-                # TODO: Support rollback
-                return
-            else:
-                # Get record from the first message that Java mlops sent
-                total_device_objs_list = self.request_json["device_objs"]
-                device_obj_to_insert = None
-
-                for device_obj in total_device_objs_list:
-                    if device_obj["id"] == int(device_id):
-                        device_obj["status"] = ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED
-                        device_obj_to_insert = device_obj
-                        break
-                if not device_obj_to_insert:
-                    raise Exception(f"Cannot find device {device_id} in the device list {total_device_objs_list}")
-                
-                FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \
-                    add_end_point_device_info(request_json["end_point_id"], end_point_name,
-                                              json.dumps(device_obj_to_insert))
-
-                self.send_next_scroll_update_msg(int(device_id))
-
-        if len(self.slave_deployment_results_mapping[run_id_str].keys()) >= len(device_id_list):
-            '''
-            When all the devices have finished the add / update operation
-            '''
-            failed_to_deploy_all_models = False
-            for device_item in device_id_list:
-                if device_item == self.edge_id:  # Skip the master
-                    continue
-                status = self.slave_deployment_results_mapping[run_id_str]. \
-                    get(str(device_item), ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED)
-                if status == ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED:
-                    failed_to_deploy_all_models = True
-                    break
-
-            # Failed to deploy models.
-            if failed_to_deploy_all_models:
-                # Send stage: MODEL_DEPLOYMENT_STAGE5 = "StartInferenceIngress"
-                self.send_deployment_stages(end_point_id, model_name, model_id,
-                                            "",
-                                            ServerConstants.MODEL_DEPLOYMENT_STAGE5["index"],
-                                            ServerConstants.MODEL_DEPLOYMENT_STAGE5["text"],
-                                            "Failed to deploy the model to all devices.")
-                FedMLModelDatabase.get_instance().delete_deployment_status(
-                    run_id_str, end_point_name, model_name, model_version=model_version)
-                FedMLModelDatabase.get_instance().delete_deployment_result(
-                    run_id_str, end_point_name, model_name, model_version=model_version)
-
-                # reset slave_deployment_results_mapping, incase user might use this for redeployment
-                self.slave_deployment_results_mapping[run_id_str] = dict()
-                return
-
-            # 1. We should generate one unified inference api
-            # Note that here we use the gateway port instead of the inference port that is used by the slave device
-            model_config_parameters = request_json["parameters"]
-            inference_port = model_config_parameters.get("server_internal_port", ServerConstants.MODEL_INFERENCE_DEFAULT_PORT)
-            inference_port_external = model_config_parameters.get("server_external_port", inference_port)
-            ip = self.get_ip_address(request_json)
-
-            if ip.startswith("http://") or ip.startswith("https://"):
-                model_inference_url = "{}/inference/{}".format(ip, end_point_id)
-            else:
-                model_inference_url = "http://{}:{}/inference/{}".format(ip, inference_port_external, end_point_id)
-
-            # Send stage: MODEL_DEPLOYMENT_STAGE5 = "StartInferenceIngress"
-            self.send_deployment_stages(end_point_id, model_name, model_id,
-                                        model_inference_url,
-                                        ServerConstants.MODEL_DEPLOYMENT_STAGE5["index"],
-                                        ServerConstants.MODEL_DEPLOYMENT_STAGE5["text"],
-                                        "inference url: {}".format(model_inference_url))
-
-            # 2. We should send to MBE(ModelOps Backend)
-            model_slave_url = payload_json["model_url"]
-            payload_json["model_url"] = model_inference_url
-            payload_json["port"] = inference_port_external
-            token = FedMLModelCache.get_instance(self.redis_addr, self.redis_port).get_end_point_token(end_point_id, end_point_name, model_name)
-
-            model_metadata = payload_json["model_metadata"]
-            model_inputs = model_metadata["inputs"]
-            ret_inputs = list()
-            if "type" in model_metadata and model_metadata["type"] == "default":
-                payload_json["input_json"] = {"end_point_name": end_point_name,
-                                              "model_name": model_name,
-                                              "token": str(token),
-                                              "inputs": model_inputs,
-                                              "outputs": []}
-                payload_json["output_json"] = model_metadata["outputs"]
-            else:
-                # triton model, auto generate inputs
-                for input_item in model_inputs:
-                    ret_item = input_item
-                    shape = ret_item["shape"]
-                    data_type = ret_item["datatype"]
-                    if ServerConstants.MODEL_DATA_TYPE_MAPPING[data_type] == ServerConstants.MODEL_DATA_TYPE_INT:
-                        for i in range(len(shape)):
-                            if shape[i] == -1:  # if input shape is dynamic, we set a default value 1
-                                shape[i] = 1
-                        ret_item["data"] = torch.randint(0, 1, shape).tolist()
-                    else:
-                        for i in range(len(shape)):
-                            if shape[i] == -1:  # if input shape is dynamic, we set a default value 1
-                                shape[i] = 1
-                        ret_item["data"] = torch.zeros(shape).tolist()
-                    ret_inputs.append(ret_item)
-
-                payload_json["input_json"] = {"end_point_name": end_point_name,
-                                              "model_name": model_name,
-                                              "token": str(token),
-                                              "inputs": {"inputs": ret_inputs}, # Nested inputs
-                                              "outputs": model_metadata["outputs"]}
-                payload_json["output_json"] = model_metadata["outputs"]
-            FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \
-                set_deployment_result(end_point_id, end_point_name,
-                                      model_name, model_version,
-                                      self.edge_id, json.dumps(payload_json))
-            FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \
-                set_end_point_activation(end_point_id, end_point_name, True)
-            self.send_deployment_results_with_payload(end_point_id, end_point_name, payload_json)
-
-            payload_json_saved = payload_json
-            payload_json_saved["model_slave_url"] = model_slave_url
-            FedMLServerDataInterface.get_instance().save_job_result(end_point_id, self.edge_id,
-                                                                    json.dumps(payload_json_saved))
-
-            self.slave_deployment_results_mapping[run_id_str] = dict()
-
-            time.sleep(3)
-            self.set_runner_completed_event(end_point_id)
-
-    def callback_deployment_status_message(self, topic=None, payload=None):
-        # Save deployment status to local cache
-        topic_splits = str(topic).split('/')
-        device_id = topic_splits[-1]
-        payload_json = json.loads(payload)
-        end_point_id = payload_json["end_point_id"]
-        end_point_name = payload_json["end_point_name"]
-        model_name = payload_json["model_name"]
-        model_version = payload_json["model_version"]
-        inference_port = payload_json.get("inference_external_api_port", ServerConstants.MODEL_INFERENCE_DEFAULT_PORT)
-        run_id_str = str(end_point_id)
-
-        model_status = payload_json["model_status"]
-        FedMLModelCache.get_instance().set_redis_params(self.redis_addr, self.redis_port, self.redis_password)
-        FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \
-            set_deployment_status(end_point_id, end_point_name,
-                                  model_name, model_version,
-                                  device_id, payload)
-        if self.slave_deployment_statuses_mapping.get(run_id_str, None) is None:
-            self.slave_deployment_statuses_mapping[run_id_str] = dict()
-        self.slave_deployment_statuses_mapping[run_id_str][device_id] = model_status
-        logging.info("callback_deployment_status_message: topic {}, payload {}, mapping {}.".format(
-            topic, payload, self.slave_deployment_statuses_mapping[run_id_str]))
-
-        # When all deployments are finished
-        request_json = self.running_request_json.get(run_id_str, None)
-        if request_json is None:
-            logging.error(f"The endpoint {end_point_id} is not running.")
-            self.send_deployment_status(
-                self.run_id, end_point_name, payload_json["model_name"], "",
-                ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED)
-            return
-
-        device_id_list = []
-        for device in request_json["device_ids"]:
-            if str(device) == str(self.edge_id):
-                continue
-            if device in request_json["diff_devices"] and \
-                    (request_json["diff_devices"][device] == ServerConstants.DEVICE_DIFF_ADD_OPERATION or
-                     request_json["diff_devices"][device] == ServerConstants.DEVICE_DIFF_REPLACE_OPERATION):
-                device_id_list.append(device)
-
-        if len(self.slave_deployment_statuses_mapping[run_id_str].keys()) >= len(device_id_list):
-            failed_to_deploy_all_models = False
-            for device_item in device_id_list:
-                if device_item == self.edge_id: # Skip the master
-                    continue
-                status = self.slave_deployment_statuses_mapping[run_id_str]. \
-                    get(str(device_item), ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED)
-                if status == ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED:
-                    failed_to_deploy_all_models = True
-                    break
-
-            # Failed to deploy the model to all devices
-            if failed_to_deploy_all_models:
-                FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \
-                    set_end_point_activation(end_point_id, end_point_name, False)
-                FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \
-                    set_end_point_status(end_point_id, end_point_name,
-                                         ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED)
-                self.send_deployment_status(end_point_id, end_point_name,
-                                            payload_json["model_name"], "",
-                                            ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED)
-                
-                time.sleep(2)
-                self.cleanup_run_when_deploy_failed()
-
-                # reset slave_deployment_statuses_mapping, incase user might use this for redeployment
-                self.slave_deployment_statuses_mapping[run_id_str] = dict()
-                return
-
-            # Send deployment finished message to ModelOps
-            ip = self.get_ip_address(request_json)
-            master_port = os.getenv("FEDML_MASTER_PORT", None)
-            if master_port is not None:
-                inference_port = int(master_port)
-            model_inference_port = inference_port
-            if ip.startswith("http://") or ip.startswith("https://"):
-                model_inference_url = "{}/inference/{}".format(ip, end_point_id)
-            else:
-                model_inference_url = "http://{}:{}/inference/{}".format(ip, model_inference_port, end_point_id)
-            FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \
-                set_end_point_activation(end_point_id, end_point_name, True)
-            FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \
-                set_end_point_status(end_point_id, end_point_name,
-                                     ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED)
-            self.send_deployment_status(end_point_id, end_point_name,
-                                        payload_json["model_name"],
-                                        model_inference_url,
-                                        ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED)
-            
-            # Clean the status in case next deployment
-            self.slave_deployment_statuses_mapping[run_id_str] = dict()
-
-    def send_deployment_start_request_to_edges(self):
-        run_id = self.request_json["run_id"]
-
-        edge_id_list = []
-        for device_id in self.request_json["device_ids"]:
-            if device_id in self.request_json["diff_devices"] and \
-                    (self.request_json["diff_devices"][device_id] == ServerConstants.DEVICE_DIFF_ADD_OPERATION):
-                edge_id_list.append(device_id)
-
-        logging.info("Edge ids before diff: " + str(self.request_json["device_ids"]))
-        logging.info("Edge ids diff: " + str(self.request_json["diff_devices"]))
-        logging.info("Edge ids after diff: " + str(edge_id_list))
-
-        self.request_json["master_node_ip"] = self.get_ip_address(self.request_json)
-        should_added_devices = []
-        for edge_id in edge_id_list:
-            if edge_id == self.edge_id:
-                continue
-            should_added_devices.append(edge_id)
-            # send start deployment request to each device
-            self.send_deployment_start_request_to_edge(edge_id)
-        return should_added_devices
-
-    def send_deployment_start_request_to_edge(self, edge_id):
-        topic_start_deployment = "model_ops/model_device/start_deployment/{}".format(str(edge_id))
-        logging.info("start_deployment: send topic " + topic_start_deployment + " to client...")
-        self.client_mqtt_mgr.send_message_json(topic_start_deployment, json.dumps(self.request_json))
-    
-    def get_ip_address(self, request_json):
-        # OPTION 1: Use local ip
-        ip = ServerConstants.get_local_ip()
-
-        # OPTION 2: Auto detect public ip
-        if "parameters" in request_json and \
-                ServerConstants.AUTO_DETECT_PUBLIC_IP in request_json["parameters"] and \
-                request_json["parameters"][ServerConstants.AUTO_DETECT_PUBLIC_IP]:
-            ip = ServerConstants.get_public_ip()
-            logging.info("Auto detect public ip for master: " + ip)
-
-        # OPTION 3: Use user indicated ip
-        if self.infer_host is not None and self.infer_host != "127.0.0.1" and self.infer_host != "localhost":
-            ip = self.infer_host
-
-        return ip
-
-    def send_deployment_delete_request_to_edges(self, payload, model_msg_object):
-        if model_msg_object is None:    # Called after the diff operation
-            if "diff_devices" not in self.request_json or self.request_json["diff_devices"] is None:
-                return
-            else:
-                edge_id_list_to_delete = []
-                for device_id in self.request_json["diff_devices"]:
-                    if self.request_json["diff_devices"][device_id] == ServerConstants.DEVICE_DIFF_DELETE_OPERATION:
-                        edge_id_list_to_delete.append(device_id)
-                if len(edge_id_list_to_delete) == 0:
-                    return
-
-                try:
-                    FedMLModelCache.get_instance().set_redis_params(self.redis_addr, self.redis_port,
-                                                                    self.redis_password)
-
-                    # 1. Get & Delete Deployment Status in Redis / SQLite
-                    devices_status_list = FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \
-                        get_deployment_status_list(self.request_json["end_point_id"],
-                                                   self.request_json["end_point_name"],
-                                                   self.request_json["model_config"]["model_name"])
-                    delete_devices_status_list = []
-                    for device_status in devices_status_list:
-                        device_status_dict = json.loads(device_status)
-                        if int(device_status_dict["cache_device_id"]) in edge_id_list_to_delete:
-                            delete_devices_status_list.append(device_status)
-
-                    for delete_item in delete_devices_status_list:
-                        FedMLModelCache.get_instance(self.redis_addr, self.redis_port).delete_deployment_status(
-                            delete_item, self.request_json["end_point_id"],
-                            self.request_json["end_point_name"],
-                            self.request_json["model_config"]["model_name"]
-                        )
-
-                    # 2. Get & Delete the endpoint device info in Redis / SQLite
-                    device_objs = FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \
-                        get_end_point_device_info(self.request_json["run_id"])
-
-                    if device_objs is None:
-                        raise Exception("The device list in local redis is None")
-                    else:
-                        total_device_objs_list = json.loads(device_objs)
-                        for device_obj in total_device_objs_list:
-                            if device_obj["id"] in edge_id_list_to_delete:
-                                total_device_objs_list.remove(device_obj)
-
-                    FedMLModelCache.get_instance(self.redis_addr, self.redis_port).set_end_point_device_info(
-                        self.request_json["end_point_id"], self.request_json["end_point_name"],
-                        json.dumps(total_device_objs_list))
-                    
-                    # 3. Delete the result in deployment result list in Redis / SQLite
-                    device_result_list = FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \
-                        get_deployment_result_list(self.request_json["end_point_id"],
-                                                    self.request_json["end_point_name"],
-                                                    self.request_json["model_config"]["model_name"])
-                    delete_device_result_list = []
-                    for device_result in device_result_list:
-                        device_result_dict = json.loads(device_result)
-                        if int(device_result_dict["cache_device_id"]) in edge_id_list_to_delete:
-                            delete_device_result_list.append(device_result)
-                    
-                    for delete_item in delete_device_result_list:
-                        FedMLModelCache.get_instance(self.redis_addr, self.redis_port).delete_deployment_result(
-                            delete_item, self.request_json["end_point_id"],
-                            self.request_json["end_point_name"],
-                            self.request_json["model_config"]["model_name"]
-                        )
-
-                except Exception as e:
-                    run_id = self.request_json["run_id"]
-                    error_log_path = f"~/.fedml/fedml-model-server/fedml/logs/error_delete_{run_id}.txt"
-                    if not os.path.exists(os.path.dirname(os.path.expanduser(error_log_path))):
-                        os.makedirs(os.path.dirname(os.path.expanduser(error_log_path)))
-                    with open(os.path.expanduser(error_log_path), "w") as f:
-                        f.write(str(self.request_json))
-                        f.write(str(e))
-                        f.write('\n')
-                    raise e
-
-        else:   # Delete the whole endpoint
-            edge_id_list_to_delete = model_msg_object.device_ids
-
-        # For Debug
-        if payload is not None:
-            debug_log_path = f"~/.fedml/fedml-model-server/fedml/logs/tmp_debug_delete_payload.txt"
-            if not os.path.exists(os.path.dirname(os.path.expanduser(debug_log_path))):
-                os.makedirs(os.path.dirname(os.path.expanduser(debug_log_path)))
-            with open(os.path.expanduser(debug_log_path), "w") as f:
-                f.write(str(payload))
-
-        logging.info("Device ids to be deleted: " + str(edge_id_list_to_delete))
-        for edge_id in edge_id_list_to_delete:
-            if edge_id == self.edge_id:
-                continue
-            # send delete deployment request to each model device
-            topic_delete_deployment = "model_ops/model_device/delete_deployment/{}".format(str(edge_id))
-            logging.info("delete_deployment: send topic " + topic_delete_deployment + " to client...")
-            self.client_mqtt_mgr.send_message_json(topic_delete_deployment, payload)
-
-    def ota_upgrade(self, payload, request_json):
-        run_id = request_json["end_point_id"]
-        force_ota = False
-        ota_version = None
-
-        try:
-            parameters = request_json.get("parameters", None)
-            common_args = parameters.get("common_args", None)
-            force_ota = common_args.get("force_ota", False)
-            ota_version = common_args.get("ota_version", None)
-        except Exception as e:
-            pass
-
-        if force_ota and ota_version is not None:
-            should_upgrade = True if ota_version != fedml.__version__ else False
-            upgrade_version = ota_version
-        else:
-            try:
-                fedml_is_latest_version, local_ver, remote_ver = sys_utils.check_fedml_is_latest_version(self.version)
-            except Exception as e:
-                return
-
-            should_upgrade = False if fedml_is_latest_version else True
-            upgrade_version = remote_ver
-
-        if should_upgrade:
-            job_obj = FedMLServerDataInterface.get_instance().get_job_by_id(run_id)
-            if job_obj is None:
-                FedMLServerDataInterface.get_instance(). \
-                    save_started_job(run_id, self.edge_id, time.time(),
-                                     ServerConstants.MSG_MLOPS_SERVER_STATUS_UPGRADING,
-                                     ServerConstants.MSG_MLOPS_SERVER_STATUS_UPGRADING,
-                                     payload)
-
-            logging.info(f"Upgrade to version {upgrade_version} ...")
-
-            sys_utils.do_upgrade(self.version, upgrade_version)
-
-            raise Exception("Restarting after upgraded...")
-
-    def callback_start_deployment(self, topic, payload):
-        """
-        topic: model_ops/model_device/start_deployment/model-agent-device-id
-        payload:
-        {
-          "timestamp": 1671440005119,
-          "end_point_id": 4325,
-          "token": "FCpWU",
-          "state": "STARTING",
-          "user_id": "105",
-          "user_name": "alex.liang2",
-          "device_ids": [
-            693
-          ],
-          "device_objs": [
-            {
-              "device_id": "0xT3630FW2YM@MacOS.Edge.Device",
-              "os_type": "MacOS",
-              "id": 693,
-              "ip": "1.1.1.1",
-              "memory": 1024,
-              "cpu": "1.7",
-              "gpu": "Nvidia",
-              "extra_infos": {}
-            }
-          ],
-          "model_config": {
-            "model_name": "image-model",
-            "model_id": 111,
-            "model_version": "v1",
-            "is_from_open": 0,
-            "model_storage_url": "https://fedml.s3.us-west-1.amazonaws.com/1666239314792client-package.zip",
-            "instance_scale_min": 1,
-            "instance_scale_max": 3,
-            "inference_engine": "onnx"
-          },
-          "parameters": {
-            "hidden_size": 128,
-            "hidden_act": "gelu",
-            "initializer_range": 0.02,
-            "vocab_size": 30522,
-            "hidden_dropout_prob": 0.1,
-            "num_attention_heads": 2,
-            "type_vocab_size": 2,
-            "max_position_embeddings": 512,
-            "num_hidden_layers": 2,
-            "intermediate_size": 512,
-            "attention_probs_dropout_prob": 0.1
-          }
-        }
-        """
-        try:
-            MLOpsConfigs.fetch_all_configs()
-        except Exception as e:
-            pass
-
-        # get deployment params
-        request_json = json.loads(payload)
-        run_id = request_json["end_point_id"]
-        end_point_name = request_json["end_point_name"]
-        token = request_json["token"]
-        user_id = request_json["user_id"]
-        user_name = request_json["user_name"]
-        device_ids = request_json["device_ids"]
-        device_objs = request_json["device_objs"]
-
-        model_config = request_json["model_config"]
-        model_name = model_config["model_name"]
-        model_id = model_config["model_id"]
-        model_storage_url = model_config["model_storage_url"]
-        scale_min = model_config.get("instance_scale_min", 0)
-        scale_max = model_config.get("instance_scale_max", 0)
-        inference_engine = model_config.get("inference_engine", 0)
-        inference_end_point_id = run_id
-
-        # Start log processor for current run
-        self.args.run_id = run_id
-        self.args.edge_id = self.edge_id
-        MLOpsRuntimeLog.get_instance(self.args).init_logs()
-        MLOpsRuntimeLogDaemon.get_instance(self.args).set_log_source(
-            ServerConstants.FEDML_LOG_SOURCE_TYPE_MODEL_END_POINT)
-        MLOpsRuntimeLogDaemon.get_instance(self.args).start_log_processor(run_id, self.edge_id)
-
-        logging.info("callback_start_deployment {}".format(payload))
-
-        self.ota_upgrade(payload, request_json)
-
-        run_id = inference_end_point_id
-        self.args.run_id = run_id
-        self.run_id = run_id
-        request_json["run_id"] = run_id
-        self.request_json = request_json
-        run_id_str = str(run_id)
-        self.running_request_json[run_id_str] = request_json
-
-        diff_devices, diff_version = self.get_diff_devices(run_id)
-        self.request_json["diff_devices"] = diff_devices
-        self.request_json["diff_version"] = diff_version
-        self.request_json["master_node_ip"] = self.get_ip_address(self.request_json)
-
-        self.init_device_update_map()
-
-        FedMLModelCache.get_instance().set_redis_params(self.redis_addr, self.redis_port, self.redis_password)
-
-        # Target status of the devices
-        FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \
-            set_end_point_device_info(request_json["end_point_id"], end_point_name, json.dumps(device_objs))
-
-        usr_indicated_token = self.get_usr_indicated_token(request_json)
-        if usr_indicated_token != "":
-            logging.info(f"Change Token from{token} to {usr_indicated_token}")
-            token = usr_indicated_token
-        FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \
-            set_end_point_token(run_id, end_point_name, model_name, token)
-
-        self.subscribe_slave_devices_message(request_json)
-
-        # Send stage: MODEL_DEPLOYMENT_STAGE1 = "Received"
-        time.sleep(2)
-        self.send_deployment_stages(self.run_id, model_name, model_id,
-                                    "",
-                                    ServerConstants.MODEL_DEPLOYMENT_STAGE1["index"],
-                                    ServerConstants.MODEL_DEPLOYMENT_STAGE1["text"],
-                                    "Received request for end point {}".format(run_id))
-        time.sleep(1)
-
-        # Send stage: MODEL_DEPLOYMENT_STAGE2 = "Initializing"
-        self.send_deployment_stages(self.run_id, model_name, model_id,
-                                    "",
-                                    ServerConstants.MODEL_DEPLOYMENT_STAGE2["index"],
-                                    ServerConstants.MODEL_DEPLOYMENT_STAGE2["text"],
-                                    ServerConstants.MODEL_DEPLOYMENT_STAGE2["text"])
-
-        ServerConstants.save_runner_infos(self.args.device_id + "." + self.args.os_name, self.edge_id, run_id=run_id)
-        time.sleep(1)
-
-        if self.run_as_edge_server_and_agent:
-            server_runner = FedMLServerRunner(
-                self.args, run_id=run_id, request_json=request_json, agent_config=self.agent_config
-            )
-            server_runner.run_as_edge_server_and_agent = self.run_as_edge_server_and_agent
-            server_runner.edge_id = self.edge_id
-            server_runner.infer_host = self.infer_host
-            server_runner.redis_addr = self.redis_addr
-            server_runner.redis_port = self.redis_port
-            server_runner.redis_password = self.redis_password
-            self.run_process_event_map[run_id_str] = multiprocessing.Event()
-            self.run_process_event_map[run_id_str].clear()
-            server_runner.run_process_event = self.run_process_event_map[run_id_str]
-            self.run_process_completed_event_map[run_id_str] = multiprocessing.Event()
-            self.run_process_completed_event_map[run_id_str].clear()
-            server_runner.run_process_completed_event = self.run_process_completed_event_map[run_id_str]
-            self.model_runner_mapping[run_id_str] = server_runner
-            server_process = Process(target=server_runner.run, args=(
-                self.run_process_event_map[run_id_str], self.run_process_completed_event_map[run_id_str]
-            ))
-            server_process.start()
-            ServerConstants.save_run_process(run_id, server_process.pid)
-
-            # Send stage: MODEL_DEPLOYMENT_STAGE3 = "StartRunner"
-            self.send_deployment_stages(self.run_id, model_name, model_id,
-                                        "",
-                                        ServerConstants.MODEL_DEPLOYMENT_STAGE3["index"],
-                                        ServerConstants.MODEL_DEPLOYMENT_STAGE3["text"],
-                                        ServerConstants.MODEL_DEPLOYMENT_STAGE3["text"])
-
-    def get_diff_devices(self, run_id) -> (dict, dict):
-        '''
-        {device_id(int): "op: add" | "op: delete" | "op: replace"}
-        "op: add" -> need to add 
-        "op: delete" -> need to delete device
-        "op: replace" -> need to restart the container of the device on same port with new (same) model pkg
-
-        {device_id(int): "old_version"}   
-        '''
-        try:
-            logging.info(f"Get diff devices for run {run_id}")
-            request_json = self.running_request_json.get(str(run_id))
-            
-            diff_devices = {}
-            diff_version = {}
-            FedMLModelCache.get_instance().set_redis_params(self.redis_addr, self.redis_port, self.redis_password)
-            device_objs = FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \
-                get_end_point_device_info(run_id)
-            if device_objs is None:
-                for new_device_id in request_json["device_ids"]:
-                    diff_devices[new_device_id] = ServerConstants.DEVICE_DIFF_ADD_OPERATION
-            else:
-                device_objs_dict = json.loads(device_objs)
-                device_ids_frm_db = [d["id"] for d in device_objs_dict]
-
-                for exist_device_id in device_ids_frm_db:
-                    if exist_device_id not in request_json["device_ids"]:
-                        diff_devices[exist_device_id] = ServerConstants.DEVICE_DIFF_DELETE_OPERATION
-
-                for new_device_id in request_json["device_ids"]:
-                    if new_device_id not in device_ids_frm_db:
-                        diff_devices[new_device_id] = ServerConstants.DEVICE_DIFF_ADD_OPERATION
-                    else:
-                        if new_device_id == self.edge_id:
-                            continue
-
-                        old_version = self.should_update_device(request_json, new_device_id)
-                        if old_version:
-                            diff_devices[new_device_id] = ServerConstants.DEVICE_DIFF_REPLACE_OPERATION
-                            diff_version[new_device_id] = old_version
-                        else:
-                            pass
-            logging.info(f"Diff devices: {diff_devices}")
-        except Exception as e:
-            error_log_path = f"~/.fedml/fedml-model-server/fedml/logs/{run_id}_error.txt"
-            if not os.path.exists(os.path.dirname(os.path.expanduser(error_log_path))):
-                os.makedirs(os.path.dirname(os.path.expanduser(error_log_path)))
-            with open(os.path.expanduser(error_log_path), "w") as f:
-                f.write(str(e))
-            raise e
-        return diff_devices, diff_version
-    
-    def should_update_device(self, payload, new_device_id):
-        '''
-        Query the device info in local redis, if the device info is different from the payload, 
-        return the old model version
-        '''
-        device_result_list = FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \
-                        get_deployment_result_list(self.request_json["end_point_id"],
-                                                    self.request_json["end_point_name"],
-                                                    self.request_json["model_config"]["model_name"])
-        
-        for device_result in device_result_list:
-            if device_result is None:
-                continue
-            device_result_dict = json.loads(device_result)
-            
-            if int(device_result_dict["cache_device_id"]) == new_device_id:
-                result_body = json.loads(device_result_dict["result"])
-                if result_body["model_version"] != payload["model_config"]["model_version"]:
-                    return result_body["model_version"]
-                else:
-                    return None
-        return None
-
-    def init_device_update_map(self):
-        """
-        Scroll update.
-        Send first scroll update message to the device(s), then the callback_deployment_result will handle the rest
-        """
-        self.slave_update_result_mapping[self.run_id] = {
-            "devices_need_update": [],
-            "total_updated_devices": [],
-            "curr_update_window": [],
-            "max_unavailable_rate": 0.1
-        }
-
-        for device_id, device_op in self.request_json["diff_devices"].items():
-            if device_op == ServerConstants.DEVICE_DIFF_REPLACE_OPERATION:
-                self.slave_update_result_mapping[self.run_id]["devices_need_update"].append(device_id)
-
-        total_num = len(self.slave_update_result_mapping[self.run_id]["devices_need_update"])
-
-        if total_num == 0:
-            return
-
-        max_unavailable_rate = self.request_json["parameters"].get("max_unavailable_rate", 0.1)
-
-        window_size = max(1, int(total_num * max_unavailable_rate))
-
-        first_chunk_devices_update = \
-            self.slave_update_result_mapping[self.run_id]["devices_need_update"][:window_size].copy()
-
-        self.slave_update_result_mapping[self.run_id]["curr_update_window"] = first_chunk_devices_update
-
-        self.request_json["first_chunk_devices_update"] = first_chunk_devices_update.copy()    # to Notify sub-process
-
-    def send_first_scroll_update_msg(self, first_chunk_devices_update):
-        """
-        Delete the record of the replaced device and send the deployment msg to the devices
-        """
-        if len(first_chunk_devices_update) == 0:
-            return
-
-        # Delete the record of the replaced device
-        self.delete_device_info_on_master(first_chunk_devices_update)
-
-        # Send the deployment msg to the devices, (we reuse the start_deployment msg)
-        for edge_id in first_chunk_devices_update:
-            if edge_id == self.edge_id:
-                continue
-            # send start deployment request to each device
-            self.send_deployment_start_request_to_edge(edge_id)
-        return
-    
-    def delete_device_info_on_master(self, edge_id_list_to_delete):
-        # Remove the record of the replaced device
-        FedMLModelCache.get_instance().set_redis_params(self.redis_addr, self.redis_port, self.redis_password)
-        # 1.1 Get & Delete Deployment Status in Redis / SQLite
-        devices_status_list = FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \
-            get_deployment_status_list(self.request_json["end_point_id"], self.request_json["end_point_name"],
-                                       self.request_json["model_config"]["model_name"])
-        delete_devices_status_list = []
-        for device_status in devices_status_list:
-            device_status_dict = json.loads(device_status)
-            if int(device_status_dict["cache_device_id"]) in edge_id_list_to_delete:
-                delete_devices_status_list.append(device_status)
-        
-        for delete_item in delete_devices_status_list:
-            FedMLModelCache.get_instance(self.redis_addr, self.redis_port).delete_deployment_status(
-                delete_item, self.request_json["end_point_id"],
-                self.request_json["end_point_name"],
-                self.request_json["model_config"]["model_name"]
-            )
-
-        # 1.2 Get & Delete the endpoint device info in Redis / SQLite
-        device_objs = FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \
-            get_end_point_device_info(self.request_json["run_id"])
-
-        if device_objs is None:
-            return
-        
-        total_device_objs_list = json.loads(device_objs)
-        for device_obj in total_device_objs_list:
-            if device_obj["id"] in edge_id_list_to_delete:
-                total_device_objs_list.remove(device_obj)
-        
-        FedMLModelCache.get_instance(self.redis_addr, self.redis_port).set_end_point_device_info(
-            self.request_json["end_point_id"], self.request_json["end_point_name"],
-            json.dumps(total_device_objs_list))
-
-        # 1.3 Delete the result in deployment result list in Redis / SQLite
-        device_result_list = FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \
-            get_deployment_result_list(self.request_json["end_point_id"], self.request_json["end_point_name"],
-                                       self.request_json["model_config"]["model_name"])
-        delete_device_result_list = []
-        for device_result in device_result_list:
-            device_result_dict = json.loads(device_result)
-            if int(device_result_dict["cache_device_id"]) in edge_id_list_to_delete:
-                delete_device_result_list.append(device_result)
-        
-        for delete_item in delete_device_result_list:
-            FedMLModelCache.get_instance(self.redis_addr, self.redis_port).delete_deployment_result(
-                delete_item, self.request_json["end_point_id"],
-                self.request_json["end_point_name"],
-                self.request_json["model_config"]["model_name"]
-            )
-        
-        logging.info(f"Deleted the record of the replaced device {edge_id_list_to_delete}")
-
-    def send_next_scroll_update_msg(self, device_id: int):
-        this_run_meta_data = self.slave_update_result_mapping[self.run_id]
-
-        devices_need_update = this_run_meta_data["devices_need_update"]
-        devices_updated = this_run_meta_data["total_updated_devices"]
-        curr_update_window = this_run_meta_data["curr_update_window"]
-        max_unavailable_rate = this_run_meta_data["max_unavailable_rate"]
-
-        if (device_id not in devices_need_update) or (device_id in devices_updated):
-            # Prevent duplicate message / cross talk
-            # TODO: Check the cross talk if multiple run update the same device
-            logging.info(f"Device {device_id} is not in the update window nor need to be updated")
-            return
-
-        devices_updated.append(device_id)
-        curr_update_window.remove(device_id)
-
-        logging.info(f"Current update window {curr_update_window} after deleting: Device {device_id}")
-
-        if len(curr_update_window) == 0:
-            remain_devices = list(set(devices_need_update) - set(devices_updated))
-            logging.info(f"Devices need to be updated: {remain_devices}")
-            if len(remain_devices) == 0:    # All devices are updated
-                return
-            else:
-                window_size = max(1, int(len(remain_devices) * max_unavailable_rate))
-                edges_in_window = remain_devices[:window_size]
-                logging.info(f"Devices in next round window: {edges_in_window}")
-                curr_update_window = edges_in_window.copy()     # Slide the window
-                self.slave_update_result_mapping[self.run_id]["curr_update_window"] = edges_in_window.copy()
-
-                self.delete_device_info_on_master(edges_in_window)
-
-                # Send the deployment msg to the devices, (we reuse the deployment msg)
-                for edge_id in edges_in_window:
-                    if edge_id == self.edge_id:
-                        continue
-                    self.send_deployment_start_request_to_edge(edge_id)
-        else:
-            pass    # Wait for the callback of other devices in this window
-
-    def callback_activate_deployment(self, topic, payload):
-        logging.info("callback_activate_deployment: topic = %s, payload = %s" % (topic, payload))
-
-        # Parse payload as the model message object.
-        model_msg_object = FedMLModelMsgObject(topic, payload)
-
-        # Get the previous deployment status.
-        FedMLModelCache.get_instance().set_redis_params(self.redis_addr, self.redis_port, self.redis_password)
-        endpoint_status = FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \
-            get_end_point_status(model_msg_object.inference_end_point_id)
-        if endpoint_status != ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED:
-            return
-
-        # Set end point as activated status
-        FedMLModelCache.get_instance(self.redis_addr, self.redis_port).set_end_point_activation(
-            model_msg_object.inference_end_point_id, model_msg_object.end_point_name, True)
-
-    def callback_deactivate_deployment(self, topic, payload):
-        logging.info("callback_deactivate_deployment: topic = %s, payload = %s" % (topic, payload))
-
-        # Parse payload as the model message object.
-        model_msg_object = FedMLModelMsgObject(topic, payload)
-
-        # Get the endpoint status
-        FedMLModelCache.get_instance().set_redis_params(self.redis_addr, self.redis_port, self.redis_password)
-        endpoint_status = FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \
-            get_end_point_status(model_msg_object.inference_end_point_id)
-        if endpoint_status != ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED:
-            return
-
-        # Set end point as deactivated status
-        FedMLModelCache.get_instance(self.redis_addr, self.redis_port).set_end_point_activation(
-            model_msg_object.inference_end_point_id, model_msg_object.model_name, False)
-
-    def set_runner_stopped_event(self, run_id):
-        run_id_str = str(run_id)
-        server_runner = self.model_runner_mapping.get(run_id_str, None)
-        if server_runner is not None:
-            if server_runner.run_process_event is not None:
-                server_runner.run_process_event.set()
-            self.model_runner_mapping.pop(run_id_str)
-
-    def set_runner_completed_event(self, run_id):
-        run_id_str = str(run_id)
-        server_runner = self.model_runner_mapping.get(run_id_str, None)
-        if server_runner is not None:
-            if server_runner.run_process_completed_event is not None:
-                server_runner.run_process_completed_event.set()
-            self.model_runner_mapping.pop(run_id_str)
-
-    def callback_delete_deployment(self, topic, payload):
-        logging.info("callback_delete_deployment: topic = %s, payload = %s" % (topic, payload))
-
-        # Parse payload as the model message object.
-        model_msg_object = FedMLModelMsgObject(topic, payload)
-
-        # Set end point as deactivated status
-        FedMLModelCache.get_instance().set_redis_params(self.redis_addr, self.redis_port, self.redis_password)
-        FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \
-            set_end_point_activation(model_msg_object.inference_end_point_id,
-                                     model_msg_object.end_point_name, False)
-        FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \
-            delete_end_point(model_msg_object.inference_end_point_id, model_msg_object.end_point_name, model_msg_object.model_name, model_msg_object.model_version)
-
-        self.send_deployment_delete_request_to_edges(payload, model_msg_object)
-
-        self.set_runner_stopped_event(model_msg_object.run_id)
-
-        self.stop_device_inference_monitor(model_msg_object.run_id, model_msg_object.end_point_name,
-                                           model_msg_object.model_id, model_msg_object.model_name,
-                                           model_msg_object.model_version)
-
-        FedMLServerDataInterface.get_instance().delete_job_from_db(model_msg_object.run_id)
-        FedMLModelDatabase.get_instance().delete_deployment_status(
-            model_msg_object.run_id, model_msg_object.end_point_name, model_msg_object.model_name,
-            model_version=model_msg_object.model_version)
-        FedMLModelDatabase.get_instance().delete_deployment_result(
-            model_msg_object.run_id, model_msg_object.end_point_name, model_msg_object.model_name,
-            model_version=model_msg_object.model_version)
-        FedMLModelDatabase.get_instance().delete_deployment_run_info(
-            end_point_id=model_msg_object.inference_end_point_id)
-
-    def send_deployment_results_with_payload(self, end_point_id, end_point_name, payload):
-        self.send_deployment_results(end_point_id, end_point_name,
-                                     payload["model_name"], payload["model_url"],
-                                     payload["model_version"], payload["port"],
-                                     payload["inference_engine"],
-                                     payload["model_metadata"],
-                                     payload["model_config"],
-                                     payload["input_json"],
-                                     payload["output_json"])
-
-    def send_deployment_results(self, end_point_id, end_point_name,
-                                model_name, model_inference_url,
-                                model_version, inference_port, inference_engine,
-                                model_metadata, model_config, input_json, output_json):
-        deployment_results_topic_prefix = "model_ops/model_device/return_deployment_result"
-        deployment_results_topic = "{}/{}".format(deployment_results_topic_prefix, end_point_id)
-        deployment_results_payload = {"end_point_id": end_point_id, "end_point_name": end_point_name,
-                                      "model_name": model_name, "model_url": model_inference_url,
-                                      "version": model_version, "port": inference_port,
-                                      "inference_engine": inference_engine,
-                                      "model_metadata": model_metadata,
-                                      "model_config": model_config,
-                                      "input_json": input_json,
-                                      "output_json": output_json,
-                                      "timestamp": int(format(time.time_ns() / 1000.0, '.0f'))}
-        logging.info(f"[Server] deployment_results_payload to mlops: {deployment_results_payload}")
-
-        self.client_mqtt_mgr.send_message_json(deployment_results_topic, json.dumps(deployment_results_payload))
-        self.client_mqtt_mgr.send_message_json(deployment_results_topic_prefix, json.dumps(deployment_results_payload))
-
-    def send_deployment_status(self, end_point_id, end_point_name, model_name, model_inference_url, model_status):
-        deployment_status_topic_prefix = "model_ops/model_device/return_deployment_status"
-        deployment_status_topic = "{}/{}".format(deployment_status_topic_prefix, end_point_id)
-        deployment_status_payload = {"end_point_id": end_point_id, "end_point_name": end_point_name,
-                                     "model_name": model_name,
-                                     "model_url": model_inference_url,
-                                     "model_status": model_status,
-                                     "timestamp": int(format(time.time_ns() / 1000.0, '.0f'))}
-        logging.info(f"[Server] deployment_status_payload to mlops: {deployment_status_payload}")
-
-        self.client_mqtt_mgr.send_message_json(deployment_status_topic, json.dumps(deployment_status_payload))
-        self.client_mqtt_mgr.send_message_json(deployment_status_topic_prefix, json.dumps(deployment_status_payload))
-
-    def send_deployment_stages(self, end_point_id, model_name, model_id, model_inference_url,
-                               model_stages_index, model_stages_title, model_stage_detail):
-        deployment_stages_topic_prefix = "model_ops/model_device/return_deployment_stages"
-        deployment_stages_topic = "{}/{}".format(deployment_stages_topic_prefix, end_point_id)
-        deployment_stages_payload = {"model_name": model_name,
-                                     "model_id": model_id,
-                                     "model_url": model_inference_url,
-                                     "end_point_id": end_point_id,
-                                     "model_stage_index": model_stages_index,
-                                     "model_stage_title": model_stages_title,
-                                     "model_stage_detail": model_stage_detail,
-                                     "timestamp": int(format(time.time_ns() / 1000.0, '.0f'))}
-        logging.info("-------- Stages{}:{} --------".format(model_stages_index, deployment_stages_payload))
-
-        self.client_mqtt_mgr.send_message_json(deployment_stages_topic, json.dumps(deployment_stages_payload))
-        self.client_mqtt_mgr.send_message_json(deployment_stages_topic_prefix, json.dumps(deployment_stages_payload))
-
-    def on_client_mqtt_disconnected(self, mqtt_client_object):
-        if self.client_mqtt_lock is None:
-            self.client_mqtt_lock = threading.Lock()
-
-        self.client_mqtt_lock.acquire()
-        self.client_mqtt_is_connected = False
-        self.client_mqtt_lock.release()
-
-        logging.info("on_client_mqtt_disconnected: {}.".format(self.client_mqtt_is_connected))
-
-    def on_client_mqtt_connected(self, mqtt_client_object):
-        if self.mlops_metrics is None:
-            self.mlops_metrics = MLOpsMetrics()
-
-        self.mlops_metrics.set_messenger(self.client_mqtt_mgr)
-        self.mlops_metrics.run_id = self.run_id
-        self.mlops_metrics.edge_id = self.edge_id
-        self.mlops_metrics.server_agent_id = self.server_agent_id
-
-        if self.client_mqtt_lock is None:
-            self.client_mqtt_lock = threading.Lock()
-
-        self.client_mqtt_lock.acquire()
-        self.client_mqtt_is_connected = True
-        self.client_mqtt_lock.release()
-
-        # logging.info("on_client_mqtt_connected: {}.".format(self.client_mqtt_is_connected))
-
-    def setup_client_mqtt_mgr(self):
-        if self.client_mqtt_mgr is not None:
-            return
-
-        if self.client_mqtt_lock is None:
-            self.client_mqtt_lock = threading.Lock()
-
-        # logging.info(
-        #     "server agent config: {},{}".format(
-        #         self.agent_config["mqtt_config"]["BROKER_HOST"], self.agent_config["mqtt_config"]["BROKER_PORT"]
-        #     )
-        # )
-
-        self.client_mqtt_mgr = MqttManager(
-            self.agent_config["mqtt_config"]["BROKER_HOST"],
-            self.agent_config["mqtt_config"]["BROKER_PORT"],
-            self.agent_config["mqtt_config"]["MQTT_USER"],
-            self.agent_config["mqtt_config"]["MQTT_PWD"],
-            self.agent_config["mqtt_config"]["MQTT_KEEPALIVE"],
-            "FedML_ModelServerAgent_Metrics_@{}@_{}_{}_{}".format(self.user_name, self.args.current_device_id,
-                                                             str(os.getpid()),
-                                                             str(uuid.uuid4()))
-        )
-        self.client_mqtt_mgr.add_connected_listener(self.on_client_mqtt_connected)
-        self.client_mqtt_mgr.add_disconnected_listener(self.on_client_mqtt_disconnected)
-        self.client_mqtt_mgr.connect()
-        self.client_mqtt_mgr.loop_start()
-
-        if self.mlops_metrics is None:
-            self.mlops_metrics = MLOpsMetrics()
-        self.mlops_metrics.set_messenger(self.client_mqtt_mgr)
-        self.mlops_metrics.run_id = self.run_id
-        self.mlops_metrics.edge_id = self.edge_id
-        self.mlops_metrics.server_agent_id = self.server_agent_id
-
-    def release_client_mqtt_mgr(self):
-        try:
-            if self.client_mqtt_mgr is not None:
-                self.client_mqtt_mgr.loop_stop()
-                self.client_mqtt_mgr.disconnect()
-
-            self.client_mqtt_lock.acquire()
-            if self.client_mqtt_mgr is not None:
-                self.client_mqtt_is_connected = False
-                self.client_mqtt_mgr = None
-            self.client_mqtt_lock.release()
-        except Exception:
-            pass
-
-    def send_deployment_stop_request_to_edges(self, edge_id_list, payload):
-        for edge_id in edge_id_list:
-            topic_stop_deployment = "model_ops/model_device/stop_deployment/{}".format(str(self.edge_id))
-            logging.info("stop_deployment: send topic " + topic_stop_deployment)
-            self.client_mqtt_mgr.send_message_json(topic_stop_deployment, payload)
-
-    def send_exit_train_with_exception_request_to_edges(self, edge_id_list, payload):
-        for edge_id in edge_id_list:
-            topic_exit_train = "flserver_agent/" + str(edge_id) + "/exit_train_with_exception"
-            logging.info("exit_train_with_exception: send topic " + topic_exit_train)
-            self.client_mqtt_mgr.send_message_json(topic_exit_train, payload)
-
-    def exit_run_with_exception_entry(self):
-        try:
-            self.setup_client_mqtt_mgr()
-            self.exit_run_with_exception()
-        except Exception as e:
-            self.release_client_mqtt_mgr()
-            sys_utils.cleanup_all_fedml_server_login_processes(
-                ServerConstants.SERVER_LOGIN_PROGRAM, clean_process_group=False)
-            sys.exit(1)
-        finally:
-            self.release_client_mqtt_mgr()
-
-    def exit_run_with_exception(self):
-        logging.info("Exit run successfully.")
-
-        ServerConstants.cleanup_learning_process(self.run_id)
-        ServerConstants.cleanup_run_process(self.run_id)
-
-        self.mlops_metrics.report_server_id_status(
-            self.run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED, edge_id=self.edge_id)
-
-        time.sleep(1)
-
-    def callback_exit_train_with_exception(self, topic, payload):
-        # logging.info("callback_exit_train_with_exception: topic = %s, payload = %s" % (topic, payload))
-
-        request_json = json.loads(payload)
-        is_retain = request_json.get("is_retain", False)
-        if is_retain:
-            return
-        run_id = request_json.get("runId", None)
-        if run_id is None:
-            run_id = request_json.get("run_id", None)
-            if run_id is None:
-                run_id = request_json.get("id", None)
-
-        if run_id is None:
-            return
-
-        edge_ids = request_json.get("edgeids", None)
-
-        self.send_exit_train_with_exception_request_to_edges(edge_ids, payload)
-
-        # Stop server with multiprocessing mode
-        self.request_json = request_json
-        server_runner = FedMLServerRunner(
-            self.args, edge_id=self.edge_id, request_json=request_json, agent_config=self.agent_config, run_id=run_id
-        )
-        try:
-            Process(target=server_runner.exit_run_with_exception_entry).start()
-        except Exception as e:
-            pass
-
-    def callback_client_exit_train_with_exception(self, topic, payload):
-        # logging.info("callback_client_exit_train_with_exception: topic = %s, payload = %s" % (topic, payload))
-
-        request_json = json.loads(payload)
-        run_id = request_json.get("run_id", None)
-        edge_id = request_json.get("edge_id", None)
-        if run_id is None:
-            logging.info("callback_client_exit_train_with_exception run id is none")
-            return
-
-        job = FedMLServerDataInterface.get_instance().get_job_by_id(run_id)
-        if job is not None and job.running_json is not None and job.running_json != "":
-            job_json_obj = json.loads(job.running_json)
-            edge_ids = job_json_obj.get("edgeids", None)
-
-            self.mlops_metrics.broadcast_server_training_status(
-                run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED,
-                is_from_model=True, edge_id=edge_id)
-
-            self.send_exit_train_with_exception_request_to_edges(edge_ids, job.running_json)
-
-            self.exit_run_with_exception()
-
-    def callback_runner_id_status(self, topic, payload):
-        logging.info("callback_runner_id_status: topic = %s, payload = %s" % (topic, payload))
-
-        request_json = json.loads(payload)
-        is_retain = request_json.get("is_retain", False)
-        if is_retain:
-            return
-        run_id = request_json["run_id"]
-        status = request_json["status"]
-        edge_id = request_json["edge_id"]
-        run_id_str = str(run_id)
-
-        if (
-                status == ServerConstants.MSG_MLOPS_SERVER_STATUS_FINISHED
-                or status == ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED
-        ):
-            # Stop server with multiprocessing mode
-            stop_request_json = self.running_request_json.get(run_id_str, None)
-            if stop_request_json is None:
-                stop_request_json = request_json
-            if self.run_as_edge_server_and_agent:
-                server_runner = FedMLServerRunner(
-                    self.args, run_id=run_id, request_json=stop_request_json, agent_config=self.agent_config
-                )
-                server_runner.edge_id = self.edge_id
-                server_runner.run_as_edge_server_and_agent = self.run_as_edge_server_and_agent
-                server_runner.run_status = status
-                status_process = Process(target=server_runner.cleanup_client_with_status)
-                status_process.start()
-                status_process.join(10)
-
-                # Stop log processor for current run
-                MLOpsRuntimeLogDaemon.get_instance(self.args).stop_log_processor(run_id, self.edge_id)
-
-    def cleanup_client_with_status(self):
-        if self.run_status == ServerConstants.MSG_MLOPS_SERVER_STATUS_FINISHED:
-            logging.info("received to finished status.")
-            self.cleanup_run_when_finished()
-        elif self.run_status == ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED:
-            logging.info("received to failed status.")
-            self.cleanup_run_when_starting_failed()
-
-    def callback_report_current_status(self, topic, payload):
-        request_json = json.loads(payload)
-        if self.run_as_edge_server_and_agent:
-            self.send_agent_active_msg()
-        elif self.run_as_cloud_agent:
-            self.send_agent_active_msg()
-        elif self.run_as_cloud_server:
-            pass
-
-    @staticmethod
-    def process_ota_upgrade_msg():
-        os.system("pip install -U fedml")
-
-    def callback_server_ota_msg(self, topic, payload):
-        request_json = json.loads(payload)
-        cmd = request_json["cmd"]
-
-        if cmd == ServerConstants.FEDML_OTA_CMD_UPGRADE:
-            try:
-                self.process_ota_upgrade_msg()
-                # Process(target=FedMLServerRunner.process_ota_upgrade_msg).start()
-                raise Exception("After upgraded, restart runner...")
-            except Exception as e:
-                pass
-        elif cmd == ServerConstants.FEDML_OTA_CMD_RESTART:
-            raise Exception("Restart runner...")
-
-    @staticmethod
-    def get_device_id():
-        device_file_path = os.path.join(ServerConstants.get_data_dir(), ServerConstants.LOCAL_RUNNER_INFO_DIR_NAME)
-        file_for_device_id = os.path.join(device_file_path, "devices.id")
-        if not os.path.exists(device_file_path):
-            os.makedirs(device_file_path)
-        elif os.path.exists(file_for_device_id):
-            with open(file_for_device_id, 'r', encoding='utf-8') as f:
-                device_id_from_file = f.readline()
-                if device_id_from_file is not None and device_id_from_file != "":
-                    return device_id_from_file
-
-        if platform.system() == "Darwin":
-            cmd_get_serial_num = "system_profiler SPHardwareDataType | grep Serial | awk '{gsub(/ /,\"\")}{print}' " \
-                                 "|awk -F':' '{print $2}' "
-            device_id = os.popen(cmd_get_serial_num).read()
-            device_id = device_id.replace('\n', '').replace(' ', '')
-            if device_id is None or device_id == "":
-                device_id = hex(uuid.getnode())
-            else:
-                device_id = "0x" + device_id
-        else:
-            if "nt" in os.name:
-
-                def get_uuid():
-                    guid = ""
-                    try:
-                        cmd = "wmic csproduct get uuid"
-                        guid = str(subprocess.check_output(cmd))
-                        pos1 = guid.find("\\n") + 2
-                        guid = guid[pos1:-15]
-                    except Exception as ex:
-                        pass
-                    return str(guid)
-
-                device_id = str(get_uuid())
-            elif "posix" in os.name:
-                device_id = sys_utils.get_device_id_in_docker()
-                if device_id is None:
-                    device_id = hex(uuid.getnode())
-            else:
-                device_id = sys_utils.run_subprocess_open(
-                    "hal-get-property --udi /org/freedesktop/Hal/devices/computer --key system.hardware.uuid".split()
-                )
-                device_id = hex(device_id)
-
-        if device_id is not None and device_id != "":
-            with open(file_for_device_id, 'w', encoding='utf-8') as f:
-                f.write(device_id)
-        else:
-            device_id = hex(uuid.uuid4())
-            with open(file_for_device_id, 'w', encoding='utf-8') as f:
-                f.write(device_id)
-
-        return device_id
-
-    def bind_account_and_device_id(self, url, account_id, device_id, os_name):
-        role = ServerConstants.login_role_list[ServerConstants.LOGIN_MODE_ON_PREMISE_MASTER_INDEX]
-        if self.run_as_edge_server_and_agent:
-            role = ServerConstants.login_role_list[ServerConstants.LOGIN_MODE_ON_PREMISE_MASTER_INDEX]
-        elif self.run_as_cloud_agent:
-            role = ServerConstants.login_role_list[ServerConstants.LOGIN_MODE_FEDML_CLOUD_MASTER_INDEX]
-        elif self.run_as_cloud_server:
-            role = ServerConstants.login_role_list[ServerConstants.LOGIN_MODE_INFERENCE_INSTANCE_INDEX]
-
-        ip = requests.get('https://checkip.amazonaws.com').text.strip()
-        fedml_ver, exec_path, os_ver, cpu_info, python_ver, torch_ver, mpi_installed, \
-            cpu_usage, available_mem, total_mem, gpu_info, gpu_available_mem, gpu_total_mem, \
-            gpu_count, gpu_vendor, cpu_count, gpu_device_name = get_sys_runner_info()
-        host_name = sys_utils.get_host_name()
-        json_params = {
-            "accountid": account_id,
-            "deviceid": device_id,
-            "type": os_name,
-            "state": ServerConstants.MSG_MLOPS_SERVER_STATUS_IDLE,
-            "status": ServerConstants.MSG_MLOPS_SERVER_STATUS_IDLE,
-            "processor": cpu_info,
-            "core_type": cpu_info,
-            "network": "",
-            "role": role,
-            "os_ver": os_ver,
-            "memory": total_mem,
-            "ip": ip,
-            "extra_infos": {"fedml_ver": fedml_ver, "exec_path": exec_path, "os_ver": os_ver,
-                            "cpu_info": cpu_info, "python_ver": python_ver, "torch_ver": torch_ver,
-                            "mpi_installed": mpi_installed, "cpu_usage": cpu_usage,
-                            "available_mem": available_mem, "total_mem": total_mem,
-                            "cpu_count": cpu_count, "gpu_count": 0, "host_name": host_name}
-        }
-        if gpu_count > 0:
-            if gpu_total_mem is not None:
-                json_params["gpu"] = gpu_info if gpu_info is not None else "" + ", Total GPU Memory: " + gpu_total_mem
-            else:
-                json_params["gpu"] = gpu_info if gpu_info is not None else ""
-            json_params["extra_infos"]["gpu_info"] = gpu_info if gpu_info is not None else ""
-            if gpu_available_mem is not None:
-                json_params["extra_infos"]["gpu_available_mem"] = gpu_available_mem
-            if gpu_total_mem is not None:
-                json_params["extra_infos"]["gpu_total_mem"] = gpu_total_mem
-
-            json_params["extra_infos"]["gpu_count"] = gpu_count
-            json_params["extra_infos"]["gpu_vendor"] = gpu_vendor
-            json_params["extra_infos"]["gpu_device_name"] = gpu_device_name
-
-            gpu_available_id_list = sys_utils.get_available_gpu_id_list(limit=gpu_count)
-            gpu_available_count = len(gpu_available_id_list) if gpu_available_id_list is not None else 0
-            gpu_list = sys_utils.get_gpu_list()
-            json_params["extra_infos"]["gpu_available_count"] = gpu_available_count
-            json_params["extra_infos"]["gpu_available_id_list"] = gpu_available_id_list
-            json_params["extra_infos"]["gpu_list"] = gpu_list
-        else:
-            json_params["gpu"] = "None"
-            json_params["extra_infos"]["gpu_available_count"] = 0
-            json_params["extra_infos"]["gpu_available_id_list"] = []
-            json_params["extra_infos"]["gpu_list"] = []
-
-        _, cert_path = MLOpsConfigs.get_request_params()
-        if cert_path is not None:
-            try:
-                requests.session().verify = cert_path
-                response = requests.post(
-                    url, json=json_params, verify=True,
-                    headers={"content-type": "application/json", "Connection": "close"}
-                )
-            except requests.exceptions.SSLError as err:
-                MLOpsConfigs.install_root_ca_file()
-                response = requests.post(
-                    url, json=json_params, verify=True,
-                    headers={"content-type": "application/json", "Connection": "close"}
-                )
-        else:
-            response = requests.post(url, json=json_params, headers={"Connection": "close"})
-        edge_id = -1
-        user_name = None
-        extra_url = None
-        if response.status_code != 200:
-            print(f"Binding to MLOps with response.status_code = {response.status_code}, "
-                  f"response.content: {response.content}")
-            pass
-        else:
-            # print("url = {}, response = {}".format(url, response))
-            status_code = response.json().get("code")
-            if status_code == "SUCCESS":
-                edge_id = response.json().get("data").get("id")
-                user_name = response.json().get("data").get("userName", None)
-                extra_url = response.json().get("data").get("url", None)
-                if edge_id is None or edge_id <= 0:
-                    print(f"Binding to MLOps with response.status_code = {response.status_code}, "
-                          f"response.content: {response.content}")
-            else:
-                if status_code == SchedulerConstants.BINDING_ACCOUNT_NOT_EXIST_ERROR:
-                    raise SystemExit(SchedulerConstants.BINDING_ACCOUNT_NOT_EXIST_ERROR)
-                print(f"Binding to MLOps with response.status_code = {response.status_code}, "
-                      f"response.content: {response.content}")
-                return -1, None, None
-        return edge_id, user_name, extra_url
-
-    def fetch_configs(self):
-        return MLOpsConfigs.fetch_all_configs()
-
-    def send_agent_active_msg(self):
-        active_topic = "flserver_agent/active"
-        status = MLOpsStatus.get_instance().get_server_agent_status(self.edge_id)
-        if (
-                status is not None
-                and status != ServerConstants.MSG_MLOPS_SERVER_STATUS_OFFLINE
-                and status != ServerConstants.MSG_MLOPS_SERVER_STATUS_IDLE
-        ):
-            return
-
-        status = ServerConstants.MSG_MLOPS_SERVER_STATUS_IDLE
-        active_msg = {"ID": self.edge_id, "status": status}
-        MLOpsStatus.get_instance().set_server_agent_status(self.edge_id, status)
-        self.mqtt_mgr.send_message_json(active_topic, json.dumps(active_msg))
-
-    def subscribe_slave_devices_message(self, request_json):
-        if request_json is None:
-            return
-        run_id = request_json["run_id"]
-        edge_id_list = request_json["device_ids"]
-        logging.info("Edge ids: " + str(edge_id_list))
-        for edge_id in edge_id_list:
-            if str(edge_id) == str(self.edge_id):
-                continue
-            # subscribe deployment result message for each model device
-            deployment_results_topic = "model_device/model_device/return_deployment_result/{}".format(edge_id)
-            self.mqtt_mgr.add_message_listener(deployment_results_topic, self.callback_deployment_result_message)
-            self.mqtt_mgr.subscribe_msg(deployment_results_topic)
-
-            # subscribe deployment status message for each model device
-            deployment_status_topic = "model_device/model_device/return_deployment_status/{}".format(edge_id)
-            self.mqtt_mgr.add_message_listener(deployment_status_topic, self.callback_deployment_status_message)
-            self.mqtt_mgr.subscribe_msg(deployment_status_topic)
-
-            logging.info("subscribe device messages {}, {}".format(
-                deployment_results_topic, deployment_status_topic))
-
-    def on_agent_mqtt_connected(self, mqtt_client_object):
-        # The MQTT message topic format is as follows: <sender>/<receiver>/<action>
-
-        # Setup MQTT message listener for starting deployment
-        server_agent_id = self.edge_id
-        topic_start_deployment = "model_ops/model_device/start_deployment/{}".format(str(self.edge_id))
-        self.mqtt_mgr.add_message_listener(topic_start_deployment, self.callback_start_deployment)
-
-        # Setup MQTT message listener for activating deployment
-        topic_activate_deployment = "model_ops/model_device/activate_deployment/{}".format(str(self.edge_id))
-        self.mqtt_mgr.add_message_listener(topic_activate_deployment, self.callback_activate_deployment)
-
-        # Setup MQTT message listener for deactivating deployment
-        topic_deactivate_deployment = "model_ops/model_device/deactivate_deployment/{}".format(str(self.edge_id))
-        self.mqtt_mgr.add_message_listener(topic_deactivate_deployment, self.callback_deactivate_deployment)
-
-        # Setup MQTT message listener for delete deployment
-        topic_delete_deployment = "model_ops/model_device/delete_deployment/{}".format(str(self.edge_id))
-        self.mqtt_mgr.add_message_listener(topic_delete_deployment, self.callback_delete_deployment)
-
-        # Setup MQTT message listener for server status switching
-        topic_server_status = "fl_server/flserver_agent_" + str(server_agent_id) + "/status"
-        self.mqtt_mgr.add_message_listener(topic_server_status, self.callback_runner_id_status)
-
-        # Setup MQTT message listener to report current device status.
-        topic_report_status = "mlops/report_device_status"
-        self.mqtt_mgr.add_message_listener(topic_report_status, self.callback_report_current_status)
-
-        # Setup MQTT message listener to OTA messages from the MLOps.
-        topic_ota_msg = "mlops/flserver_agent_" + str(server_agent_id) + "/ota"
-        self.mqtt_mgr.add_message_listener(topic_ota_msg, self.callback_server_ota_msg)
-
-        # Subscribe topics for starting train, stopping train and fetching client status.
-        mqtt_client_object.subscribe(topic_start_deployment, qos=2)
-        mqtt_client_object.subscribe(topic_activate_deployment, qos=2)
-        mqtt_client_object.subscribe(topic_deactivate_deployment, qos=2)
-        mqtt_client_object.subscribe(topic_delete_deployment, qos=2)
-        mqtt_client_object.subscribe(topic_server_status, qos=2)
-        mqtt_client_object.subscribe(topic_report_status, qos=2)
-        mqtt_client_object.subscribe(topic_ota_msg, qos=2)
-
-        self.subscribed_topics.clear()
-        self.subscribed_topics.append(topic_start_deployment)
-        self.subscribed_topics.append(topic_activate_deployment)
-        self.subscribed_topics.append(topic_deactivate_deployment)
-        self.subscribed_topics.append(topic_delete_deployment)
-        self.subscribed_topics.append(topic_server_status)
-        self.subscribed_topics.append(topic_report_status)
-        self.subscribed_topics.append(topic_ota_msg)
-
-        self.endpoint_sync_protocol = FedMLEndpointSyncProtocol(agent_config=self.agent_config, mqtt_mgr=self.mqtt_mgr)
-        self.endpoint_sync_protocol.setup_listener_for_sync_device_info(self.edge_id)
-
-        # Broadcast the first active message.
-        self.send_agent_active_msg()
-
-        # Echo results
-        # print("\n\nCongratulations, your device is connected to the FedML MLOps platform successfully!")
-        # print(
-        #     "Your FedML Edge ID is " + str(self.edge_id) + ", unique device ID is "
-        #     + str(self.unique_device_id)
-        #     + "\n"
-        # )
-
-        MLOpsRuntimeLog.get_instance(self.args).init_logs(log_level=logging.INFO)
-
-    def on_agent_mqtt_disconnected(self, mqtt_client_object):
-        MLOpsStatus.get_instance().set_server_agent_status(
-            self.edge_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_OFFLINE
-        )
-
-    def recover_inference_and_monitor(self):
-        try:
-            history_jobs = FedMLServerDataInterface.get_instance().get_history_jobs()
-            for job in history_jobs.job_list:
-                if job.running_json is None:
-                    continue
-
-                if job.deployment_result == "":
-                    continue
-
-                run_id, end_point_name, token, user_id, user_name, device_ids, device_objs, model_config, model_name, \
-                    model_id, model_storage_url, scale_min, scale_max, inference_engine, model_is_from_open, \
-                    inference_end_point_id, use_gpu, memory_size, model_version, inference_port = \
-                    self.parse_model_run_params(json.loads(job.running_json))
-
-                FedMLModelCache.get_instance().set_redis_params(self.redis_addr, self.redis_port, self.redis_password)
-                is_activated = FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \
-                    get_end_point_activation(run_id)
-                if not is_activated:
-                    continue
-
-                self.start_device_inference_gateway(run_id, end_point_name, model_id, model_name, model_version,
-                                                    inference_port=inference_port)
-
-                self.stop_device_inference_monitor(run_id, end_point_name, model_id, model_name, model_version)
-                self.start_device_inference_monitor(run_id, end_point_name, model_id, model_name, model_version)
-        except Exception as e:
-            logging.info("recover inference and monitor: {}".format(traceback.format_exc()))
-
-    def recover_start_deployment_msg_after_upgrading(self):
-        try:
-            current_job = FedMLServerDataInterface.get_instance().get_current_job()
-            if current_job is not None and \
-                    current_job.status == ServerConstants.MSG_MLOPS_SERVER_STATUS_UPGRADING:
-                FedMLModelCache.get_instance().set_redis_params(self.redis_addr, self.redis_port, self.redis_password)
-                is_activated = FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \
-                    get_end_point_activation(current_job.job_id)
-                if not is_activated:
-                    return
-                logging.info("start deployment after upgrading.")
-                topic_start_deployment = "model_ops/model_device/start_deployment/{}".format(str(self.edge_id))
-                self.callback_start_deployment(topic_start_deployment, current_job.running_json)
-        except Exception as e:
-            logging.info("recover starting deployment message after upgrading: {}".format(traceback.format_exc()))
-
-    def setup_agent_mqtt_connection(self, service_config):
-        # Setup MQTT connection
-        self.mqtt_mgr = MqttManager(
-            service_config["mqtt_config"]["BROKER_HOST"],
-            service_config["mqtt_config"]["BROKER_PORT"],
-            service_config["mqtt_config"]["MQTT_USER"],
-            service_config["mqtt_config"]["MQTT_PWD"],
-            service_config["mqtt_config"]["MQTT_KEEPALIVE"],
-            "FedML_ModelServerAgent_Daemon_@" + self.user_name + "@_" + self.args.current_device_id + str(uuid.uuid4()),
-            "flserver_agent/last_will_msg",
-            json.dumps({"ID": self.edge_id, "status": ServerConstants.MSG_MLOPS_SERVER_STATUS_OFFLINE})
-            )
-        self.agent_config = service_config
-
-        # Init local database
-        FedMLServerDataInterface.get_instance().create_job_table()
-        try:
-            FedMLModelDatabase.get_instance().set_database_base_dir(ServerConstants.get_database_dir())
-            FedMLModelDatabase.get_instance().create_table()
-        except Exception as e:
-            pass
-
-        server_api_cmd = "fedml.computing.scheduler.model_scheduler.device_server_api:api"
-        server_api_pids = RunProcessUtils.get_pid_from_cmd_line(server_api_cmd)
-        if server_api_pids is None or len(server_api_pids) <= 0:
-            # Start local API services
-            cur_dir = os.path.dirname(__file__)
-            fedml_base_dir = os.path.dirname(os.path.dirname(os.path.dirname(cur_dir)))
-            python_program = get_python_program()
-            self.local_api_process = ServerConstants.exec_console_with_script(
-                "{} -m uvicorn {} --host 0.0.0.0 --port {} --reload --reload-delay 3 --reload-dir {} "
-                "--log-level critical".format(
-                    python_program, server_api_cmd, ServerConstants.LOCAL_SERVER_API_PORT,
-                    fedml_base_dir
-                ),
-                should_capture_stdout=False,
-                should_capture_stderr=False
-            )
-            # if self.local_api_process is not None and self.local_api_process.pid is not None:
-            #     print(f"Model master local API process id {self.local_api_process.pid}")
-
-        self.recover_inference_and_monitor()
-
-        # MLOpsRuntimeLogDaemon.get_instance(self.args).stop_all_log_processor()
-
-        # Setup MQTT connected listener
-        self.mqtt_mgr.add_connected_listener(self.on_agent_mqtt_connected)
-        self.mqtt_mgr.add_disconnected_listener(self.on_agent_mqtt_disconnected)
-        self.mqtt_mgr.connect()
-
-        self.setup_client_mqtt_mgr()
-        self.mlops_metrics.report_server_training_status(
-            self.run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_IDLE,
-            is_from_model=True, edge_id=self.edge_id)
-        MLOpsStatus.get_instance().set_server_agent_status(
-            self.edge_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_IDLE
-        )
-
-        self.recover_start_deployment_msg_after_upgrading()
-
-    def stop_agent(self):
-        if self.run_process_event is not None:
-            self.run_process_event.set()
-
-        if self.mqtt_mgr is not None:
-            try:
-                for topic in self.subscribed_topics:
-                    self.mqtt_mgr.unsubscribe_msg(topic)
-            except Exception as e:
-                pass
-
-            self.mqtt_mgr.loop_stop()
-            self.mqtt_mgr.disconnect()
-
-        self.release_client_mqtt_mgr()
-
-    def start_agent_mqtt_loop(self, should_exit_sys=True):
-        # Start MQTT message loop
-        try:
-            self.mqtt_mgr.loop_forever()
-        except Exception as e:
-            if str(e) == "Restarting after upgraded...":
-                logging.info("Restarting after upgraded...")
-            else:
-                print("Server tracing: {}".format(traceback.format_exc()))
-        finally:
-           self.stop_agent()
-
-           if should_exit_sys:
-               time.sleep(5)
-               sys_utils.cleanup_all_fedml_server_login_processes(
-                   ServerConstants.SERVER_LOGIN_PROGRAM, clean_process_group=False)
-               sys.exit(1)
diff --git a/python/fedml/computing/scheduler/model_scheduler/job_runner_msg_sender.py b/python/fedml/computing/scheduler/model_scheduler/job_runner_msg_sender.py
new file mode 100755
index 0000000000..3fe45401ac
--- /dev/null
+++ b/python/fedml/computing/scheduler/model_scheduler/job_runner_msg_sender.py
@@ -0,0 +1,204 @@
+
+import json
+import logging
+import os
+import time
+from .device_model_cache import FedMLModelCache
+from .device_server_constants import ServerConstants
+from ..scheduler_core.general_constants import GeneralConstants
+
+
+class FedMLDeployJobRunnerMsgSender(object):
+    def __init__(self):
+        self.infer_host = "127.0.0.1"
+        self.redis_addr = "local"
+        self.redis_port = "6379"
+        self.redis_password = "fedml_default"
+        self.message_center = None
+        self.request_json = None
+        self.edge_id = None
+
+    def send_deployment_results_with_payload(self, end_point_id, end_point_name, payload):
+        self.send_deployment_results(end_point_id, end_point_name,
+                                     payload["model_name"], payload["model_url"],
+                                     payload["model_version"], payload["port"],
+                                     payload["inference_engine"],
+                                     payload["model_metadata"],
+                                     payload["model_config"],
+                                     payload["input_json"],
+                                     payload["output_json"])
+
+    def send_deployment_results(self, end_point_id, end_point_name,
+                                model_name, model_inference_url,
+                                model_version, inference_port, inference_engine,
+                                model_metadata, model_config, input_json, output_json):
+        deployment_results_topic_prefix = "model_ops/model_device/return_deployment_result"
+        deployment_results_topic = "{}/{}".format(deployment_results_topic_prefix, end_point_id)
+        deployment_results_payload = {"end_point_id": end_point_id, "end_point_name": end_point_name,
+                                      "model_name": model_name, "model_url": model_inference_url,
+                                      "version": model_version, "port": inference_port,
+                                      "inference_engine": inference_engine,
+                                      "model_metadata": model_metadata,
+                                      "model_config": model_config,
+                                      "input_json": input_json,
+                                      "output_json": output_json,
+                                      "timestamp": int(format(time.time_ns() / 1000.0, '.0f'))}
+        logging.info(f"[Master] deployment_results_payload is sent to mlops: {deployment_results_payload}")
+
+        self.message_center.send_message_json(deployment_results_topic, json.dumps(deployment_results_payload))
+        self.message_center.send_message_json(deployment_results_topic_prefix, json.dumps(deployment_results_payload))
+
+    @staticmethod
+    def send_deployment_status(
+            end_point_id, end_point_name, model_name, model_inference_url, model_status, message_center=None):
+        if message_center is None:
+            return
+        deployment_status_topic_prefix = "model_ops/model_device/return_deployment_status"
+        deployment_status_topic = "{}/{}".format(deployment_status_topic_prefix, end_point_id)
+        deployment_status_payload = {"end_point_id": end_point_id, "end_point_name": end_point_name,
+                                     "model_name": model_name,
+                                     "model_url": model_inference_url,
+                                     "model_status": model_status,
+                                     "timestamp": int(format(time.time_ns() / 1000.0, '.0f'))}
+        logging.info(f"[Master] deployment_status_payload is sent to mlops: {deployment_status_payload}")
+
+        message_center.send_message_json(deployment_status_topic, json.dumps(deployment_status_payload))
+        message_center.send_message_json(deployment_status_topic_prefix, json.dumps(deployment_status_payload))
+
+    @staticmethod
+    def send_deployment_stages(end_point_id, model_name, model_id, model_inference_url,
+                               model_stages_index, model_stages_title, model_stage_detail,
+                               message_center=None):
+        if message_center is None:
+            return
+        deployment_stages_topic_prefix = "model_ops/model_device/return_deployment_stages"
+        deployment_stages_topic = "{}/{}".format(deployment_stages_topic_prefix, end_point_id)
+        deployment_stages_payload = {"model_name": model_name,
+                                     "model_id": model_id,
+                                     "model_url": model_inference_url,
+                                     "end_point_id": end_point_id,
+                                     "model_stage_index": model_stages_index,
+                                     "model_stage_title": model_stages_title,
+                                     "model_stage_detail": model_stage_detail,
+                                     "timestamp": int(format(time.time_ns() / 1000.0, '.0f'))}
+
+        message_center.send_message_json(deployment_stages_topic, json.dumps(deployment_stages_payload))
+        message_center.send_message_json(deployment_stages_topic_prefix, json.dumps(deployment_stages_payload))
+
+        logging.info(f"-------- Stages has been sent to mlops with stage {model_stages_index} and "
+                     f"payload {deployment_stages_payload}")
+
+    def send_deployment_start_request_to_edges(self):
+        # Iterate through replica_num_diff, both add and replace should be sent to the edge devices
+        if "replica_num_diff" not in self.request_json or self.request_json["replica_num_diff"] is None:
+            return []
+
+        edge_id_list = []
+        for device_id in self.request_json["replica_num_diff"].keys():
+            edge_id_list.append(device_id)
+
+        self.request_json["master_node_ip"] = GeneralConstants.get_ip_address(self.request_json)
+        should_added_devices = []
+        for edge_id in edge_id_list:
+            if edge_id == self.edge_id:
+                continue
+            should_added_devices.append(edge_id)
+            # send start deployment request to each device
+            self.send_deployment_start_request_to_edge(edge_id)
+        return should_added_devices
+
+    def send_deployment_start_request_to_edge(self, edge_id):
+        topic_start_deployment = "model_ops/model_device/start_deployment/{}".format(str(edge_id))
+        logging.info("start_deployment: send topic " + topic_start_deployment + " to client...")
+        self.message_center.send_message_json(topic_start_deployment, json.dumps(self.request_json))
+
+    def send_deployment_delete_request_to_edges(self, payload, model_msg_object):
+        if model_msg_object is None:    # Called after the diff operation
+            if "diff_devices" not in self.request_json or self.request_json["diff_devices"] is None:
+                return
+            else:
+                edge_id_list_to_delete = []
+                for device_id in self.request_json["diff_devices"]:
+                    if self.request_json["diff_devices"][device_id] == ServerConstants.DEVICE_DIFF_DELETE_OPERATION:
+                        edge_id_list_to_delete.append(device_id)
+                if len(edge_id_list_to_delete) == 0:
+                    return
+
+                try:
+                    FedMLModelCache.get_instance().set_redis_params(self.redis_addr, self.redis_port,
+                                                                    self.redis_password)
+
+                    # 1. Get & Delete the endpoint device info in Redis / SQLite
+                    device_objs = FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \
+                        get_end_point_device_info(self.request_json["run_id"])
+
+                    if device_objs is None:
+                        raise Exception("The device list in local redis is None")
+                    else:
+                        total_device_objs_list = json.loads(device_objs)
+                        for device_obj in total_device_objs_list:
+                            if device_obj["id"] in edge_id_list_to_delete:
+                                total_device_objs_list.remove(device_obj)
+
+                    FedMLModelCache.get_instance(self.redis_addr, self.redis_port).set_end_point_device_info(
+                        self.request_json["end_point_id"], self.request_json["end_point_name"],
+                        json.dumps(total_device_objs_list))
+
+                    # 2 Delete the result in deployment result list in Redis / SQLite
+                    device_result_list = FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \
+                        get_deployment_result_list(self.request_json["end_point_id"],
+                                                   self.request_json["end_point_name"],
+                                                   self.request_json["model_config"]["model_name"])
+                    delete_device_result_list = []
+                    for device_result in device_result_list:
+                        device_result_dict = json.loads(device_result)
+                        if int(device_result_dict["cache_device_id"]) in edge_id_list_to_delete:
+                            delete_device_result_list.append(device_result)
+
+                    for delete_item in delete_device_result_list:
+                        FedMLModelCache.get_instance(self.redis_addr, self.redis_port).delete_deployment_result(
+                            delete_item, self.request_json["end_point_id"],
+                            self.request_json["end_point_name"],
+                            self.request_json["model_config"]["model_name"]
+                        )
+
+                except Exception as e:
+                    run_id = self.request_json["run_id"]
+                    error_log_path = f"~/.fedml/fedml-model-server/fedml/logs/error_delete_{run_id}.txt"
+                    if not os.path.exists(os.path.dirname(os.path.expanduser(error_log_path))):
+                        os.makedirs(os.path.dirname(os.path.expanduser(error_log_path)))
+                    with open(os.path.expanduser(error_log_path), "w") as f:
+                        f.write(str(self.request_json))
+                        f.write(str(e))
+                        f.write('\n')
+                    raise e
+
+        else:   # Delete the whole endpoint
+            edge_id_list_to_delete = model_msg_object.device_ids
+
+        # For Debug
+        if payload is not None:
+            debug_log_path = f"~/.fedml/fedml-model-server/fedml/logs/tmp_debug_delete_payload.txt"
+            if not os.path.exists(os.path.dirname(os.path.expanduser(debug_log_path))):
+                os.makedirs(os.path.dirname(os.path.expanduser(debug_log_path)))
+            with open(os.path.expanduser(debug_log_path), "w") as f:
+                f.write(str(payload))
+
+        # Remove the model master node id from the list using index 0
+        edge_id_list_to_delete = edge_id_list_to_delete[1:]
+
+        logging.info("Device ids to be deleted: " + str(edge_id_list_to_delete))
+
+        for edge_id in edge_id_list_to_delete:
+            if edge_id == self.edge_id:
+                continue
+            # send delete deployment request to each model device
+            topic_delete_deployment = "model_ops/model_device/delete_deployment/{}".format(str(edge_id))
+            logging.info("delete_deployment: send topic " + topic_delete_deployment + " to client...")
+            self.message_center.send_message_json(topic_delete_deployment, payload)
+
+    def send_deployment_stop_request_to_edges(self, edge_id_list, payload):
+        for edge_id in edge_id_list:
+            topic_stop_deployment = "model_ops/model_device/stop_deployment/{}".format(str(self.edge_id))
+            logging.info("stop_deployment: send topic " + topic_stop_deployment)
+            self.message_center.send_message_json(topic_stop_deployment, payload)
diff --git a/python/fedml/computing/scheduler/model_scheduler/master_agent.py b/python/fedml/computing/scheduler/model_scheduler/master_agent.py
new file mode 100755
index 0000000000..2f30ae8b8e
--- /dev/null
+++ b/python/fedml/computing/scheduler/model_scheduler/master_agent.py
@@ -0,0 +1,27 @@
+
+from .device_server_constants import ServerConstants
+from .device_server_data_interface import FedMLServerDataInterface
+from .master_protocol_manager import FedMLDeployMasterProtocolManager
+from ..master.base_master_agent import FedMLBaseMasterAgent
+
+
+class FedMLDeployMasterAgent(FedMLBaseMasterAgent):
+
+    def __init__(self):
+        FedMLBaseMasterAgent.__init__(self)
+
+    # Override
+    def _get_log_file_dir(self):
+        return ServerConstants.get_log_file_dir()
+
+    # Override
+    def _save_agent_info(self, unique_device_id, edge_id):
+        ServerConstants.save_runner_infos(unique_device_id, edge_id)
+
+    # Override
+    def _init_database(self):
+        FedMLServerDataInterface.get_instance().create_job_table()
+
+    # Override
+    def _generate_protocol_manager_instance(self, args, agent_config=None):
+        return FedMLDeployMasterProtocolManager(args, agent_config=agent_config)
\ No newline at end of file
diff --git a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
new file mode 100755
index 0000000000..f3d68c1f6a
--- /dev/null
+++ b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
@@ -0,0 +1,578 @@
+import copy
+import json
+import logging
+import os
+import time
+import queue
+import traceback
+from abc import ABC
+from multiprocessing import Queue
+
+import fedml
+from fedml.core.mlops import MLOpsRuntimeLog
+from .device_client_constants import ClientConstants
+from .device_model_cache import FedMLModelCache
+from .device_server_constants import ServerConstants
+from .device_server_data_interface import FedMLServerDataInterface
+from ..comm_utils import sys_utils
+from ..comm_utils.run_process_utils import RunProcessUtils
+from ..comm_utils.sys_utils import get_python_program
+from ..scheduler_core.general_constants import GeneralConstants
+from ..master.base_master_job_runner import FedMLBaseMasterJobRunner
+from .device_replica_controller import FedMLDeviceReplicaController
+from .job_runner_msg_sender import FedMLDeployJobRunnerMsgSender
+
+
+class FedMLDeployMasterJobRunner(FedMLBaseMasterJobRunner, FedMLDeployJobRunnerMsgSender, ABC):
+
+    default_redis_addr = "local"
+    default_redis_port = "6379"
+    default_redis_password = "fedml_default"
+
+    def __init__(self, args, run_id=0, request_json=None, agent_config=None, edge_id=0,
+                 cuda_visible_gpu_ids_str=None):
+        FedMLDeployJobRunnerMsgSender.__init__(self)
+        FedMLBaseMasterJobRunner.__init__(
+            self, args, edge_id=edge_id, request_json=request_json, agent_config=agent_config, run_id=run_id,
+            cuda_visible_gpu_ids_str=cuda_visible_gpu_ids_str, agent_data_dir=ServerConstants.get_data_dir(),
+            agent_package_download_dir=ServerConstants.get_package_download_dir(),
+            agent_package_unzip_dir=GeneralConstants.get_package_unzip_dir(ServerConstants.get_package_download_dir()),
+            agent_log_file_dir=ServerConstants.get_log_file_dir()
+        )
+
+        self.infer_host = "127.0.0.1"
+        self.redis_addr = "local"
+        self.redis_port = "6379"
+        self.redis_password = "fedml_default"
+        self.inference_gateway_process = None
+        self.monitor_process = None
+        self.replica_controller = None
+        self.deployed_replica_payload = None
+        self.slave_deployment_results_map = dict()
+        self.deployment_result_queue = Queue()
+
+    # Override
+    def _generate_job_runner_instance(self, args, run_id=None, request_json=None, agent_config=None, edge_id=None,):
+        return FedMLDeployMasterJobRunner(
+            args, run_id=run_id, request_json=request_json, agent_config=self.agent_config, edge_id=edge_id
+        )
+
+    # Override
+    def _generate_extend_queue_list(self):
+        return [self.deployment_result_queue]
+
+    # Override
+    def run_impl(
+        self, edge_id_status_queue, edge_device_info_queue, run_metrics_queue,
+        run_event_queue, run_artifacts_queue, run_logs_queue, edge_device_info_global_queue,
+        run_extend_queue_list=None, sender_message_queue=None, listener_message_queue=None,
+        status_center_queue=None
+    ):
+        # Parse the model parameters.
+        run_id, end_point_name, token, user_id, user_name, device_ids, device_objs, model_config, model_name, \
+            model_id, model_storage_url, scale_min, scale_max, inference_engine, model_is_from_open, \
+            inference_end_point_id, use_gpu, memory_size, model_version, inference_port = \
+            FedMLDeployMasterJobRunner.parse_model_run_params(self.request_json)
+
+        # Print request parameters.
+        logging.info("model deployment request: {}".format(self.request_json))
+        logging.info("send deployment stages...")
+
+        # Generate the replica controller object.
+        self.replica_controller = FedMLDeviceReplicaController(self.edge_id, self.request_json)
+
+        # Start the process to report system performance(cpu,memory,etc.) to MLOps
+        self.mlops_metrics.report_sys_perf(self.args, self.agent_config["mqtt_config"], run_id=run_id)
+
+        # Check if we should stop the runner
+        self.check_runner_stop_event()
+
+        # Send stage: MODEL_DEPLOYMENT_STAGE4 = "ForwardRequest2Slave"
+        self.send_deployment_stages(
+            self.run_id, model_name, model_id, "", ServerConstants.MODEL_DEPLOYMENT_STAGE4["index"],
+            ServerConstants.MODEL_DEPLOYMENT_STAGE4["text"], ServerConstants.MODEL_DEPLOYMENT_STAGE4["text"],
+            message_center=self.message_center)
+
+        # Init the runtime logs
+        self.args.run_id = self.run_id
+        MLOpsRuntimeLog.get_instance(self.args).init_logs(log_level=logging.INFO)
+
+        # Report server running status
+        logging.info("report deployment status...")
+        self.check_runner_stop_event()
+        self.status_reporter.report_server_id_status(
+            run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_STARTING,
+            is_from_model=True, running_json=json.dumps(self.request_json),
+            server_agent_id=self.edge_id, server_id=self.edge_id, edge_id=self.edge_id)
+        self.send_deployment_status(
+            self.run_id, end_point_name, model_name, "",
+            ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYING,
+            message_center=self.message_center)
+
+        # start unified inference server
+        self.start_device_inference_gateway(
+            run_id, end_point_name, model_id, model_name, model_version,
+            agent_config=self.agent_config, inference_port=inference_port)
+
+        # start inference monitor server
+        self.stop_device_inference_monitor(
+            run_id, end_point_name, model_id, model_name, model_version)
+        self.start_device_inference_monitor(
+            run_id, end_point_name, model_id, model_name, model_version,
+            redis_addr=self.redis_addr, redis_port=self.redis_port, redis_password=self.redis_password
+        )
+
+        # Changed the status to "IDLE"
+        self.status_reporter.report_server_id_status(
+            run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_FINISHED,
+            is_from_model=True, server_agent_id=self.edge_id, server_id=self.edge_id, edge_id=self.edge_id,)
+
+        # Check if we should stop the runner
+        logging.info("send the model inference request to slave devices...")
+        self.check_runner_stop_event()
+
+        # Forward deployment request to slave devices
+        # Handle "op:add" && "op:remove"
+        devices_sent_add_or_remove_msg = self.send_deployment_start_request_to_edges()
+
+        # Handle "op:update"
+        devices_sent_update_remove_msg = self.send_first_scroll_update_msg()
+
+        if len(devices_sent_add_or_remove_msg) == 0 and len(devices_sent_update_remove_msg) == 0:
+            # No device is added or removed, and no device is updated or removed
+            ip = GeneralConstants.get_ip_address(self.request_json)
+            master_port = os.getenv("FEDML_MASTER_PORT", None)
+            if master_port is not None:
+                inference_port = int(master_port)
+            model_inference_port = inference_port
+            if ip.startswith("http://") or ip.startswith("https://"):
+                model_inference_url = "{}/api/v1/predict".format(ip)
+            else:
+                model_inference_url = "http://{}:{}/api/v1/predict".format(ip, model_inference_port)
+
+            self.send_deployment_status(
+                run_id, end_point_name, model_name, model_inference_url,
+                ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED,
+                message_center=self.message_center
+            )
+
+            self.trigger_completed_event()
+            return
+
+        self.deployment_result_queue = run_extend_queue_list[0]
+        while True:
+            self.check_runner_stop_event()
+
+            try:
+                deployment_result = self.deployment_result_queue.get(block=False, timeout=0.2)
+                result_topic = deployment_result.get("topic", None)
+                result_payload = deployment_result.get("payload", None)
+                self.process_deployment_result_message(topic=result_topic, payload=result_payload)
+            except queue.Empty as e:  # If queue is empty, then continue
+                pass
+
+            time.sleep(0.5)
+
+    def save_deployment_result(self, topic=None, payload=None):
+        self.deployment_result_queue.put({"topic": topic, "payload": payload})
+
+    def process_deployment_result_message(self, topic=None, payload=None):
+        # Parse the parameters
+        topic_splits = str(topic).split('/')
+        device_id = topic_splits[-1]
+        payload_json = json.loads(payload)
+        end_point_id = payload_json["end_point_id"]
+        end_point_name = payload_json["end_point_name"]
+        model_id = payload_json["model_id"]
+        model_name = payload_json["model_name"]
+        model_version = payload_json["model_version"]
+        model_status = payload_json["model_status"]
+        replica_no = payload_json.get("replica_no", None)  # Idx start from 1
+        run_id_str = str(end_point_id)
+
+        # Set redis + sqlite deployment result
+        FedMLModelCache.get_instance().set_redis_params(self.redis_addr, self.redis_port, self.redis_password)
+
+        # Save deployment result to local cache
+        if model_status == ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DELETED:
+            FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \
+                delete_deployment_result_with_device_id_and_replica_no(
+                end_point_id, end_point_name, model_name, device_id, replica_no)
+        elif model_status == ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED:
+            # add or update
+            FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \
+                set_deployment_result(end_point_id, end_point_name,
+                                      model_name, model_version,
+                                      device_id, payload, replica_no)
+
+            # Note: To display the result in the UI, we need to save successful deployment result to the database
+            self.save_deployed_replica_payload(payload_json)
+        else:
+            if model_status != ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED:
+                logging.error(f"Unsupported model status {model_status}.")
+            self.send_deployment_status(
+                end_point_id, end_point_name, payload_json["model_name"], "",
+                ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED,
+                message_center=self.message_center
+            )
+
+        # Notify the replica number controller
+        self.callback_update_curr_replica_num_state(device_id, replica_no, model_status)
+
+        # Notify the replica version controller, which might trigger the next rolling update
+        self.send_next_scroll_update_msg(device_id, replica_no)
+
+        # Update the global deployment result mapping
+        self.slave_deployment_results_map[str(device_id)] = model_status
+
+        # Check if the endpoint is running
+        request_json = self.request_json
+        if request_json is None:
+            logging.error(f"The endpoint {end_point_id} is not running.")
+            self.send_deployment_status(
+                end_point_id, end_point_name, payload_json["model_name"], "",
+                ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED,
+                message_center=self.message_center
+            )
+            return
+
+        # Wait for all replica's result, not device-level
+        if self.is_all_replica_num_reconciled() and self.is_all_replica_version_reconciled():
+            '''
+            When all the devices have finished the add / delete / update operation
+            '''
+            # 1. We should generate one unified inference api
+            # Note that here we use the gateway port instead of the inference port that is used by the slave device
+            model_config_parameters = request_json["parameters"]
+            inference_port = model_config_parameters.get("server_internal_port",
+                                                         ServerConstants.MODEL_INFERENCE_DEFAULT_PORT)
+            inference_port_external = model_config_parameters.get("server_external_port", inference_port)
+            ip = GeneralConstants.get_ip_address(request_json)
+
+            if ip.startswith("http://") or ip.startswith("https://"):
+                model_inference_url = "{}/inference/{}".format(ip, end_point_id)
+            else:
+                model_inference_url = "http://{}:{}/inference/{}".format(ip, inference_port_external, end_point_id)
+
+            # Send stage: MODEL_DEPLOYMENT_STAGE5 = "StartInferenceIngress"
+            self.send_deployment_stages(
+                end_point_id, model_name, model_id, model_inference_url,
+                ServerConstants.MODEL_DEPLOYMENT_STAGE5["index"], ServerConstants.MODEL_DEPLOYMENT_STAGE5["text"],
+                "inference url: {}".format(model_inference_url), message_center=self.message_center)
+
+            # Prepare the result to MLOps
+            deployed_replica_payload = self.get_deployed_replica_payload()
+            if deployed_replica_payload is not None:
+                payload_json = deployed_replica_payload
+                model_slave_url = payload_json["model_url"]
+                payload_json["model_url"] = model_inference_url
+                payload_json["port"] = inference_port_external
+                token = FedMLModelCache.get_instance(self.redis_addr, self.redis_port).get_end_point_token(
+                    end_point_id, end_point_name, model_name)
+
+                model_metadata = payload_json["model_metadata"]
+                model_inputs = model_metadata["inputs"]
+                ret_inputs = list()
+                if "type" in model_metadata and model_metadata["type"] == "default":
+                    payload_json["input_json"] = {
+                        "end_point_name": end_point_name, "model_name": model_name, "token": str(token),
+                        "inputs": model_inputs, "outputs": []}
+                    payload_json["output_json"] = model_metadata["outputs"]
+                else:
+                    raise Exception(f"Unsupported model metadata type {model_metadata['type']}")
+
+                self.send_deployment_results_with_payload(
+                    end_point_id, end_point_name, payload_json)
+
+                payload_json_saved = payload_json
+                payload_json_saved["model_slave_url"] = model_slave_url
+                FedMLServerDataInterface.get_instance().save_job_result(end_point_id, self.edge_id,
+                                                                        json.dumps(payload_json_saved))
+            else:
+                # Arrive here because only contains remove ops, so we do not need to update the model metadata
+                pass
+
+            FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \
+                set_end_point_activation(end_point_id, end_point_name, True)
+
+            self.send_deployment_status(
+                end_point_id, end_point_name, payload_json["model_name"],
+                model_inference_url, ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED,
+                message_center=self.message_center
+            )
+
+            self.trigger_completed_event()
+
+    @staticmethod
+    def start_device_inference_gateway(
+            run_id, end_point_name, model_id,
+            model_name, model_version, inference_port=ServerConstants.MODEL_INFERENCE_DEFAULT_PORT,
+            agent_config=None, redis_addr=None, redis_port=None, redis_password=None
+    ):
+        # start unified inference server
+        running_model_name = ServerConstants.get_running_model_name(end_point_name,
+                                                                    model_name, model_version, run_id, model_id)
+        python_program = get_python_program()
+        master_port = os.getenv("FEDML_MASTER_PORT", None)
+        if master_port is not None:
+            inference_port = int(master_port)
+        if not ServerConstants.is_running_on_k8s():
+            logging.info(f"start the model inference gateway, end point {run_id}, "
+                         f"model name {model_name} at port {inference_port}...")
+            use_mqtt_inference = os.getenv("FEDML_USE_MQTT_INFERENCE", "False")
+            use_mqtt_inference = True if use_mqtt_inference.lower() == 'true' else False
+            use_worker_gateway = os.getenv("FEDML_USE_WORKER_GATEWAY", "False")
+            use_worker_gateway = True if use_worker_gateway.lower() == 'true' else False
+            inference_gw_cmd = "fedml.computing.scheduler.model_scheduler.device_model_inference:api"
+            inference_gateway_pids = RunProcessUtils.get_pid_from_cmd_line(inference_gw_cmd)
+            if inference_gateway_pids is None or len(inference_gateway_pids) <= 0:
+                cur_dir = os.path.dirname(__file__)
+                fedml_base_dir = os.path.dirname(os.path.dirname(os.path.dirname(cur_dir)))
+                connect_str = "@FEDML@"
+                ext_info = sys_utils.random1(
+                    agent_config["mqtt_config"]["BROKER_HOST"] + connect_str +
+                    str(agent_config["mqtt_config"]["BROKER_PORT"]) + connect_str +
+                    agent_config["mqtt_config"]["MQTT_USER"] + connect_str +
+                    agent_config["mqtt_config"]["MQTT_PWD"] + connect_str +
+                    str(agent_config["mqtt_config"]["MQTT_KEEPALIVE"]), "FEDML@9999GREAT")
+                inference_gateway_process = ServerConstants.exec_console_with_script(
+                    "REDIS_ADDR=\"{}\" REDIS_PORT=\"{}\" REDIS_PASSWORD=\"{}\" "
+                    "END_POINT_NAME=\"{}\" "
+                    "MODEL_NAME=\"{}\" MODEL_VERSION=\"{}\" MODEL_INFER_URL=\"{}\" VERSION=\"{}\" "
+                    "USE_MQTT_INFERENCE={} USE_WORKER_GATEWAY={} EXT_INFO={} "
+                    "{} -m uvicorn {} --host 0.0.0.0 --port {} --reload --reload-delay 3 --reload-dir {} "
+                    "--log-level critical".format(
+                        redis_addr, redis_port, redis_password, end_point_name,
+                        model_name, model_version, "", fedml.get_env_version(), use_mqtt_inference,
+                        use_worker_gateway, ext_info, python_program, inference_gw_cmd, str(inference_port),
+                        fedml_base_dir),
+                    should_capture_stdout=False, should_capture_stderr=False)
+
+                return inference_gateway_process
+
+        return None
+
+    @staticmethod
+    def start_device_inference_monitor(
+            run_id, end_point_name, model_id, model_name, model_version, check_stopped_event=True,
+            redis_addr=None, redis_port=None, redis_password=None
+    ):
+        # start inference monitor server
+        logging.info(f"start the model inference monitor, end point {run_id}, model name {model_name}...")
+        run_id_str = str(run_id)
+        pip_source_dir = os.path.dirname(__file__)
+        monitor_file = os.path.join(pip_source_dir, "device_model_monitor.py")
+        python_program = get_python_program()
+        running_model_name = ServerConstants.get_running_model_name(end_point_name,
+                                                                    model_name, model_version, run_id, model_id)
+        monitor_process = ServerConstants.exec_console_with_shell_script_list(
+            [python_program, monitor_file, "-v", fedml.get_env_version(), "-ep", run_id_str,
+             "-epn", str(end_point_name), "-mi", str(model_id), "-mn", model_name,
+             "-mv", model_version, "-iu", "infer_url", "-ra", redis_addr,
+             "-rp", redis_port, "-rpw", redis_password],
+            should_capture_stdout=False, should_capture_stderr=False
+        )
+        return monitor_process
+
+    @staticmethod
+    def stop_device_inference_monitor(run_id, end_point_name, model_id, model_name, model_version):
+        # stop inference monitor server
+        logging.info(f"stop the model inference monitor, end point {run_id}, model name {model_name}...")
+        sys_utils.cleanup_model_monitor_processes(run_id, end_point_name,
+                                                  model_id, model_name, model_version)
+
+    @staticmethod
+    def recover_inference_and_monitor(redis_addr=None, redis_port=None, redis_password=None):
+        # noinspection PyBroadException
+        try:
+            history_jobs = FedMLServerDataInterface.get_instance().get_history_jobs()
+            for job in history_jobs.job_list:
+                if job.running_json is None:
+                    continue
+
+                if job.deployment_result == "":
+                    continue
+
+                run_id, end_point_name, token, user_id, user_name, device_ids, device_objs, model_config, model_name, \
+                    model_id, model_storage_url, scale_min, scale_max, inference_engine, model_is_from_open, \
+                    inference_end_point_id, use_gpu, memory_size, model_version, inference_port = \
+                    FedMLDeployMasterJobRunner.parse_model_run_params(json.loads(job.running_json))
+
+                FedMLModelCache.get_instance().set_redis_params(redis_addr, redis_password)
+                is_activated = FedMLModelCache.get_instance(redis_addr, redis_port). \
+                    get_end_point_activation(run_id)
+                if not is_activated:
+                    continue
+
+                FedMLDeployMasterJobRunner.start_device_inference_gateway(
+                    run_id, end_point_name, model_id, model_name, model_version, inference_port=inference_port)
+
+                FedMLDeployMasterJobRunner.stop_device_inference_monitor(
+                    run_id, end_point_name, model_id, model_name, model_version)
+                FedMLDeployMasterJobRunner.start_device_inference_monitor(
+                    run_id, end_point_name, model_id, model_name, model_version,
+                    redis_addr=FedMLDeployMasterJobRunner.default_redis_addr,
+                    redis_port=FedMLDeployMasterJobRunner.default_redis_port,
+                    redis_password=FedMLDeployMasterJobRunner.default_redis_password
+                )
+        except Exception as e:
+            logging.info("recover inference and monitor: {}".format(traceback.format_exc()))
+
+    def send_first_scroll_update_msg(self):
+        """
+        Replica-level rolling update.
+        Delete the record of the replaced device and send the deployment msg to the devices
+        """
+        if "replica_version_diff" not in self.request_json or self.request_json["replica_version_diff"] is None:
+            return []
+
+        first_chunk_dict = self.request_json["replica_version_diff"]
+
+        # Delete the record of the replaced device
+        self.delete_device_replica_info_on_master(first_chunk_dict)
+
+        # Send the deployment msg to the devices, (we reuse the start_deployment msg)
+        for edge_id in first_chunk_dict.keys():
+            if edge_id == self.edge_id:
+                continue
+            # send start deployment request to each device
+            self.send_deployment_start_request_to_edge(edge_id)
+        return list(first_chunk_dict.keys())
+
+    def send_next_scroll_update_msg(self, device_id, replica_no):
+        if replica_no is None:
+            return
+
+        replica_controller = self.replica_controller
+
+        if replica_controller.total_replica_version_diff_num == 0:
+            return
+
+        replica_controller.callback_update_updating_window(device_id, replica_no)
+
+        # Decide whether to send the next scroll update
+        next_chunk_dict = replica_controller.get_next_chunk_devices_replica()
+
+        replica_controller.curr_replica_updating_window = copy.deepcopy(next_chunk_dict)
+
+        if next_chunk_dict:
+            self.request_json["replica_version_diff"] = next_chunk_dict
+            self.delete_device_replica_info_on_master(next_chunk_dict)
+
+            # Send the deployment msg to the devices, (we reuse the start_deployment msg)
+            for edge_id in next_chunk_dict.keys():
+                if edge_id == self.edge_id:
+                    continue
+                # send start deployment request to each device
+                self.send_deployment_start_request_to_edge(edge_id)
+        return
+
+    def delete_device_replica_info_on_master(self, edge_id_replica_no_dict):
+        FedMLModelCache.get_instance().set_redis_params(self.redis_addr, self.redis_port, self.redis_password)
+        # Remove the record of the replaced device
+        # [Deprecated] deployment status & device info
+        # Delete the result in deployment result list in Redis / SQLite
+        device_result_list = FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \
+            get_deployment_result_list(self.request_json["end_point_id"], self.request_json["end_point_name"],
+                                       self.request_json["model_config"]["model_name"])
+        delete_device_result_list = []
+        for device_result in device_result_list:
+            device_result_dict = json.loads(device_result)
+            if (str(device_result_dict["cache_device_id"]) in edge_id_replica_no_dict.keys() and
+                    str(device_result_dict["cache_replica_no"]) in
+                    edge_id_replica_no_dict[str(device_result_dict["cache_device_id"])]):
+                delete_device_result_list.append(device_result)
+
+        for delete_item in delete_device_result_list:
+            FedMLModelCache.get_instance(self.redis_addr, self.redis_port).delete_deployment_result(
+                delete_item, self.request_json["end_point_id"],
+                self.request_json["end_point_name"],
+                self.request_json["model_config"]["model_name"]
+            )
+
+        logging.info(f"Deleted the record of the replaced device {delete_device_result_list}")
+
+    def save_deployed_replica_payload(self, payload_json):
+        self.deployed_replica_payload = copy.deepcopy(payload_json)
+
+    def get_deployed_replica_payload(self):
+        return self.deployed_replica_payload
+
+    def callback_update_curr_replica_num_state(self, changed_device_id, replica_no, op_type):
+        if self.replica_controller is not None:
+            self.replica_controller.callback_update_curr_replica_num_state(changed_device_id, replica_no, op_type)
+
+    def is_all_replica_num_reconciled(self):
+        if self.replica_controller is not None:
+            return self.replica_controller.is_all_replica_num_reconciled()
+
+        return False
+
+    def is_all_replica_version_reconciled(self):
+        if self.replica_controller is not None:
+            return self.replica_controller.is_all_replica_version_reconciled()
+
+        return False
+
+    @staticmethod
+    def generate_request_json_with_replica_diff(run_id, edge_id, request_json):
+        # Replica Controller is per deployment!
+        replica_controller = FedMLDeviceReplicaController(edge_id, request_json)
+        logging.info(f"Start Diff Replica controller for run {run_id} on edge {edge_id}")
+
+        # Prepare num diff
+        run_id_str = str(run_id)
+        new_request_with_num_diff = replica_controller.generate_diff_to_request_json()
+        request_json = new_request_with_num_diff
+
+        # Prepare version diff
+        new_request_with_version_diff = replica_controller.init_first_update_device_replica_mapping()
+        request_json = new_request_with_version_diff
+
+        return request_json
+
+    @staticmethod
+    def parse_model_run_params(running_json):
+        run_id = running_json["end_point_id"]
+        end_point_name = running_json["end_point_name"]
+        token = running_json["token"]
+        user_id = running_json["user_id"]
+        user_name = running_json["user_name"]
+        device_ids = running_json["device_ids"]
+        device_objs = running_json["device_objs"]
+
+        model_config = running_json["model_config"]
+        model_name = model_config["model_name"]
+        model_id = model_config["model_id"]
+        model_storage_url = model_config["model_storage_url"]
+        scale_min = model_config.get("instance_scale_min", 0)
+        scale_max = model_config.get("instance_scale_max", 0)
+        inference_engine = model_config.get("inference_engine", 0)
+        model_is_from_open = model_config["is_from_open"]
+        inference_end_point_id = run_id
+        use_gpu = "gpu"  # TODO: Get GPU from device infos
+        memory_size = "256m"  # TODO: Get Memory size for each instance
+        model_version = model_config["model_version"]
+        model_config_parameters = running_json.get("parameters", {})
+
+        inference_port = model_config_parameters.get("server_internal_port",    # Internal port is for the gateway
+                                                     ServerConstants.MODEL_INFERENCE_DEFAULT_PORT)
+        inference_port_external = model_config_parameters.get("server_external_port", inference_port)
+
+        return run_id, end_point_name, token, user_id, user_name, device_ids, device_objs, model_config, model_name, \
+            model_id, model_storage_url, scale_min, scale_max, inference_engine, model_is_from_open, \
+            inference_end_point_id, use_gpu, memory_size, model_version, inference_port
+
+    # Override
+    def get_download_package_info(self, packages_config=None):
+        model_name = packages_config["model_name"]
+        model_storage_url = packages_config["model_storage_url"]
+        return model_name, model_storage_url
+
+    # Override
+    def build_dynamic_args(self, run_id, run_config, package_conf_object, base_dir):
+        pass
+
+    # Override
+    def build_dynamic_constrain_variables(self, run_id, run_config):
+        pass
diff --git a/python/fedml/computing/scheduler/model_scheduler/master_job_runner_manager.py b/python/fedml/computing/scheduler/model_scheduler/master_job_runner_manager.py
new file mode 100755
index 0000000000..40896b9ee8
--- /dev/null
+++ b/python/fedml/computing/scheduler/model_scheduler/master_job_runner_manager.py
@@ -0,0 +1,62 @@
+
+import json
+from fedml.core.common.singleton import Singleton
+from ..master.base_master_job_runner_manager import FedMLBaseMasterJobRunnerManager
+from .master_job_runner import FedMLDeployMasterJobRunner
+from ..scheduler_core.general_constants import GeneralConstants
+
+
+class FedMLDeployJobRunnerManager(FedMLBaseMasterJobRunnerManager, Singleton):
+    def __init__(self):
+        FedMLBaseMasterJobRunnerManager.__init__(self)
+
+    @staticmethod
+    def get_instance():
+        return FedMLDeployJobRunnerManager()
+
+    # Override
+    def _generate_job_runner_instance(
+            self, args, run_id=None, request_json=None, agent_config=None, edge_id=None
+    ):
+        job_runner = FedMLDeployMasterJobRunner(
+            args, run_id=run_id, request_json=request_json, agent_config=agent_config, edge_id=edge_id)
+        job_runner.infer_host = GeneralConstants.get_ip_address(request_json)
+        return job_runner
+
+    def save_deployment_result(self, topic, payload):
+        payload_json = json.loads(payload)
+        endpoint_id = payload_json["end_point_id"]
+        run_id_str = str(endpoint_id)
+        if self.job_runners.get(run_id_str, None) is not None:
+            self.job_runners[run_id_str].save_deployment_result(topic=topic, payload=payload)
+
+    def send_deployment_stages(
+            self, end_point_id, model_name, model_id, model_inference_url,
+            model_stages_index, model_stages_title, model_stage_detail, message_center=None
+    ):
+        run_id_str = str(end_point_id)
+        if self.job_runners.get(run_id_str, None) is not None:
+            self.job_runners[run_id_str].send_deployment_stages(
+                end_point_id, model_name, model_id, model_inference_url,
+                model_stages_index, model_stages_title, model_stage_detail,
+                message_center=message_center
+            )
+
+    def send_deployment_delete_request_to_edges(self, end_point_id, payload, model_msg_object):
+        run_id_str = str(end_point_id)
+        if self.job_runners.get(run_id_str, None) is not None:
+            self.job_runners[run_id_str].send_deployment_delete_request_to_edges(payload, model_msg_object)
+
+    def stop_device_inference_monitor(self, run_id, end_point_name, model_id, model_name, model_version):
+        run_id_str = str(run_id)
+        if self.job_runners.get(run_id_str, None) is not None:
+            self.job_runners[run_id_str].stop_device_inference_monitor(
+                run_id, end_point_name, model_id, model_name, model_version)
+
+    @staticmethod
+    def recover_inference_and_monitor():
+        FedMLDeployMasterJobRunner.recover_inference_and_monitor()
+
+    @staticmethod
+    def generate_request_json_with_replica_diff(run_id, edge_id, request_json):
+        return FedMLDeployMasterJobRunner.generate_request_json_with_replica_diff(run_id, edge_id, request_json)
diff --git a/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py b/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py
new file mode 100755
index 0000000000..e8be50f77f
--- /dev/null
+++ b/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py
@@ -0,0 +1,365 @@
+
+import json
+import logging
+import os
+from fedml.core.mlops import MLOpsConfigs, MLOpsRuntimeLog, MLOpsRuntimeLogDaemon
+from .device_model_cache import FedMLModelCache
+from .device_model_db import FedMLModelDatabase
+from .device_model_msg_object import FedMLModelMsgObject
+from .device_server_constants import ServerConstants
+from .device_server_data_interface import FedMLServerDataInterface
+from ..master.base_master_protocol_manager import FedMLBaseMasterProtocolManager
+from .master_job_runner_manager import FedMLDeployJobRunnerManager
+from ..scheduler_core.general_constants import GeneralConstants
+from ..scheduler_core.endpoint_sync_protocol import FedMLEndpointSyncProtocol
+
+
+class FedMLDeployMasterProtocolManager(FedMLBaseMasterProtocolManager):
+    def __init__(self, args, agent_config=None):
+        FedMLBaseMasterProtocolManager.__init__(self, args, agent_config=agent_config)
+
+        self.topic_start_deployment = None
+        self.topic_activate_endpoint = None
+        self.topic_deactivate_deployment = None
+        self.topic_delete_deployment = None
+
+        self.infer_host = "127.0.0.1"
+        self.redis_addr = "local"
+        self.redis_port = "6379"
+        self.redis_password = "fedml_default"
+        self.endpoint_sync_protocol = None
+
+    # Override
+    def _generate_protocol_manager_instance(self, args, agent_config=None):
+        return FedMLDeployMasterProtocolManager(args, agent_config=agent_config)
+
+    # Override
+    def generate_topics(self):
+        super().generate_topics()
+
+        # The topic for start deployment
+        self.topic_start_deployment = "model_ops/model_device/start_deployment/{}".format(str(self.edge_id))
+
+        # The topic for activating endpoint
+        self.topic_activate_endpoint = "model_ops/model_device/activate_deployment/{}".format(str(self.edge_id))
+
+        # The topic for activating endpoint
+        self.topic_deactivate_deployment = "model_ops/model_device/deactivate_deployment/{}".format(str(self.edge_id))
+
+        # The topic for deleting endpoint
+        self.topic_delete_deployment = "model_ops/model_device/delete_deployment/{}".format(str(self.edge_id))
+
+        # Subscribe topics for endpoints
+        self.add_subscribe_topic(self.topic_start_deployment)
+        self.add_subscribe_topic(self.topic_activate_endpoint)
+        self.add_subscribe_topic(self.topic_deactivate_deployment)
+        self.add_subscribe_topic(self.topic_delete_deployment)
+
+    # Override
+    def add_protocol_handler(self):
+        super().add_protocol_handler()
+
+        # Add the message listeners for endpoint related topics
+        self.add_message_listener(self.topic_start_deployment, self.callback_start_deployment)
+        self.add_message_listener(self.topic_activate_endpoint, self.callback_activate_deployment)
+        self.add_message_listener(self.topic_deactivate_deployment, self.callback_deactivate_deployment)
+        self.add_message_listener(self.topic_delete_deployment, self.callback_delete_deployment)
+
+    # Override
+    def _get_job_runner_manager(self):
+        return FedMLDeployJobRunnerManager.get_instance()
+
+    # Override
+    def _init_extra_items(self):
+        # Init local database
+        FedMLServerDataInterface.get_instance().create_job_table()
+        try:
+            FedMLModelDatabase.get_instance().set_database_base_dir(ServerConstants.get_database_dir())
+            FedMLModelDatabase.get_instance().create_table()
+        except Exception as e:
+            pass
+
+        FedMLDeployJobRunnerManager.recover_inference_and_monitor()
+
+    # Override
+    def _process_connection_ready(self):
+        self.endpoint_sync_protocol = FedMLEndpointSyncProtocol(
+            agent_config=self.agent_config, mqtt_mgr=self.message_center)
+        self.endpoint_sync_protocol.setup_listener_for_sync_device_info(self.edge_id)
+
+        MLOpsRuntimeLog.get_instance(self.args).init_logs(log_level=logging.INFO)
+
+    # Override
+    def _process_connection_lost(self):
+        pass
+
+    # Override
+    def print_connected_info(self):
+        pass
+
+    def callback_deployment_result_message(self, topic=None, payload=None):
+        logging.info(f"Received deployment result: {self}")
+        FedMLDeployJobRunnerManager.get_instance().save_deployment_result(topic, payload)
+
+    def callback_delete_deployment(self, topic, payload):
+        # Parse payload as the model message object.
+        logging.info("[Master] callback_delete_deployment")
+        model_msg_object = FedMLModelMsgObject(topic, payload)
+
+        # Set end point as deactivated status
+        FedMLModelCache.get_instance().set_redis_params(self.redis_addr, self.redis_port, self.redis_password)
+        FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \
+            set_end_point_activation(model_msg_object.inference_end_point_id,
+                                     model_msg_object.end_point_name, False)
+        FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \
+            delete_end_point(model_msg_object.inference_end_point_id, model_msg_object.end_point_name,
+                             model_msg_object.model_name, model_msg_object.model_version)
+
+        FedMLDeployJobRunnerManager.get_instance().send_deployment_delete_request_to_edges(
+            model_msg_object.inference_end_point_id, payload, model_msg_object)
+
+        FedMLDeployJobRunnerManager.get_instance().stop_job_runner(model_msg_object.run_id)
+
+        FedMLDeployJobRunnerManager.get_instance().stop_device_inference_monitor(
+            model_msg_object.run_id, model_msg_object.end_point_name, model_msg_object.model_id,
+            model_msg_object.model_name, model_msg_object.model_version)
+
+        FedMLServerDataInterface.get_instance().delete_job_from_db(model_msg_object.run_id)
+        FedMLModelDatabase.get_instance().delete_deployment_result(
+            model_msg_object.run_id, model_msg_object.end_point_name, model_msg_object.model_name,
+            model_version=model_msg_object.model_version)
+        FedMLModelDatabase.get_instance().delete_deployment_run_info(
+            end_point_id=model_msg_object.inference_end_point_id)
+
+    def callback_start_deployment(self, topic, payload):
+        # noinspection PyBroadException
+        try:
+            MLOpsConfigs.fetch_all_configs()
+        except Exception as e:
+            pass
+
+        # Parse the deployment parameters
+        request_json = json.loads(payload)
+        run_id = request_json["end_point_id"]
+        end_point_name = request_json["end_point_name"]
+        token = request_json["token"]
+        user_id = request_json["user_id"]
+        user_name = request_json["user_name"]
+        device_ids = request_json["device_ids"]
+        device_objs = request_json["device_objs"]
+        model_config = request_json["model_config"]
+        model_name = model_config["model_name"]
+        model_id = model_config["model_id"]
+        model_storage_url = model_config["model_storage_url"]
+        scale_min = model_config.get("instance_scale_min", 0)
+        scale_max = model_config.get("instance_scale_max", 0)
+        inference_engine = model_config.get("inference_engine", 0)
+        inference_end_point_id = run_id
+
+        # Start log processor for current run
+        self.args.run_id = run_id
+        self.args.edge_id = self.edge_id
+        MLOpsRuntimeLog.get_instance(self.args).init_logs()
+        MLOpsRuntimeLogDaemon.get_instance(self.args).set_log_source(
+            ServerConstants.FEDML_LOG_SOURCE_TYPE_MODEL_END_POINT)
+        MLOpsRuntimeLogDaemon.get_instance(self.args).start_log_processor(run_id, self.edge_id)
+
+        # Generate the deployment new parameters
+        logging.info("callback_start_deployment {}".format(payload))
+        run_id = inference_end_point_id
+        run_id_str = str(run_id)
+        request_json["run_id"] = run_id
+        self.request_json = request_json
+        self.running_request_json[run_id_str] = request_json
+        diff_devices, diff_version = self.get_diff_devices(run_id)
+        self.request_json["diff_devices"] = diff_devices
+        self.request_json["diff_version"] = diff_version
+        self.request_json["master_node_ip"] = GeneralConstants.get_ip_address(self.request_json)
+
+        # Save the endpoint device info
+        self.init_device_update_map()
+        FedMLModelCache.get_instance().set_redis_params(self.redis_addr, self.redis_port, self.redis_password)
+        FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \
+            set_end_point_device_info(request_json["end_point_id"], end_point_name, json.dumps(device_objs))
+
+        # Save the endpoint token
+        usr_indicated_token = FedMLDeployMasterProtocolManager.get_usr_indicated_token(request_json)
+        if usr_indicated_token != "":
+            logging.info(f"Change Token from{token} to {usr_indicated_token}")
+            token = usr_indicated_token
+        FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \
+            set_end_point_token(run_id, end_point_name, model_name, token)
+
+        # Subscribe deployment result messages from slave devices
+        self.subscribe_deployment_messages_from_slave_devices(request_json)
+
+        # Send stage: MODEL_DEPLOYMENT_STAGE1 = "Received"
+        FedMLDeployJobRunnerManager.get_instance().send_deployment_stages(
+            self.run_id, model_name, model_id, "", ServerConstants.MODEL_DEPLOYMENT_STAGE1["index"],
+            ServerConstants.MODEL_DEPLOYMENT_STAGE1["text"], "Received request for end point {}".format(run_id),
+            message_center=self.message_center)
+
+        # Send stage: MODEL_DEPLOYMENT_STAGE2 = "Initializing"
+        FedMLDeployJobRunnerManager.get_instance().send_deployment_stages(
+            self.run_id, model_name, model_id, "", ServerConstants.MODEL_DEPLOYMENT_STAGE2["index"],
+            ServerConstants.MODEL_DEPLOYMENT_STAGE2["text"], ServerConstants.MODEL_DEPLOYMENT_STAGE2["text"],
+            message_center=self.message_center)
+
+        # Save the runner info
+        ServerConstants.save_runner_infos(self.args.device_id + "." + self.args.os_name, self.edge_id, run_id=run_id)
+
+        # Start the job runner to deploy models
+        self.running_request_json[run_id_str] = FedMLDeployJobRunnerManager.generate_request_json_with_replica_diff(
+            run_id, self.edge_id, request_json
+        )
+        self._get_job_runner_manager().start_job_runner(
+            run_id, request_json, args=self.args, edge_id=self.edge_id,
+            sender_message_queue=self.message_center.get_sender_message_queue(),
+            listener_message_queue=self.get_listener_message_queue(),
+            status_center_queue=self.get_status_queue()
+        )
+        process = self._get_job_runner_manager().get_runner_process(run_id)
+        if process is not None:
+            ServerConstants.save_run_process(run_id, process.pid)
+
+        # Send stage: MODEL_DEPLOYMENT_STAGE3 = "StartRunner"
+        FedMLDeployJobRunnerManager.get_instance().send_deployment_stages(
+            self.run_id, model_name, model_id, "", ServerConstants.MODEL_DEPLOYMENT_STAGE3["index"],
+            ServerConstants.MODEL_DEPLOYMENT_STAGE3["text"], ServerConstants.MODEL_DEPLOYMENT_STAGE3["text"],
+            message_center=self.message_center)
+
+    def callback_activate_deployment(self, topic, payload):
+        logging.info("callback_activate_deployment: topic = %s, payload = %s" % (topic, payload))
+
+        # Parse payload as the model message object.
+        model_msg_object = FedMLModelMsgObject(topic, payload)
+
+        # Get the previous deployment status.
+        FedMLModelCache.get_instance().set_redis_params(self.redis_addr, self.redis_port, self.redis_password)
+        endpoint_status = FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \
+            get_end_point_status(model_msg_object.inference_end_point_id)
+        if endpoint_status != ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED:
+            return
+
+        # Set end point as activated status
+        FedMLModelCache.get_instance(self.redis_addr, self.redis_port).set_end_point_activation(
+            model_msg_object.inference_end_point_id, model_msg_object.end_point_name, True)
+
+    def callback_deactivate_deployment(self, topic, payload):
+        logging.info("callback_deactivate_deployment: topic = %s, payload = %s" % (topic, payload))
+
+        # Parse payload as the model message object.
+        model_msg_object = FedMLModelMsgObject(topic, payload)
+
+        # Get the endpoint status
+        FedMLModelCache.get_instance().set_redis_params(self.redis_addr, self.redis_port, self.redis_password)
+        endpoint_status = FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \
+            get_end_point_status(model_msg_object.inference_end_point_id)
+        if endpoint_status != ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED:
+            return
+
+        # Set end point as deactivated status
+        FedMLModelCache.get_instance(self.redis_addr, self.redis_port).set_end_point_activation(
+            model_msg_object.inference_end_point_id, model_msg_object.model_name, False)
+
+    def get_diff_devices(self, run_id) -> (dict, dict):
+        """
+        {device_id(int): "op: add" | "op: delete" | "op: replace"}
+        "op: add" -> need to add
+        "op: delete" -> need to delete device
+        "op: replace" -> need to restart the container of the device on same port with new (same) model pkg
+
+        {device_id(int): "old_version"}
+        """
+        try:
+            logging.info(f"Get diff devices for run {run_id}")
+            request_json = self.running_request_json.get(str(run_id))
+
+            diff_devices = {}
+            diff_version = {}
+            FedMLModelCache.get_instance().set_redis_params(self.redis_addr, self.redis_port, self.redis_password)
+            device_objs = FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \
+                get_end_point_device_info(run_id)
+            if device_objs is None:
+                for new_device_id in request_json["device_ids"]:
+                    diff_devices[new_device_id] = ServerConstants.DEVICE_DIFF_ADD_OPERATION
+            else:
+                device_objs_dict = json.loads(device_objs)
+                device_ids_frm_db = [d["id"] for d in device_objs_dict]
+
+                for exist_device_id in device_ids_frm_db:
+                    if exist_device_id not in request_json["device_ids"]:
+                        diff_devices[exist_device_id] = ServerConstants.DEVICE_DIFF_DELETE_OPERATION
+
+                for new_device_id in request_json["device_ids"]:
+                    if new_device_id not in device_ids_frm_db:
+                        diff_devices[new_device_id] = ServerConstants.DEVICE_DIFF_ADD_OPERATION
+                    else:
+                        if new_device_id == self.edge_id:
+                            continue
+
+                        old_version = self.should_update_device(request_json, new_device_id)
+                        if old_version:
+                            diff_devices[new_device_id] = ServerConstants.DEVICE_DIFF_REPLACE_OPERATION
+                            diff_version[new_device_id] = old_version
+                        else:
+                            pass
+            logging.info(f"Diff devices: {diff_devices}")
+        except Exception as e:
+            error_log_path = f"~/.fedml/fedml-model-server/fedml/logs/{run_id}_error.txt"
+            if not os.path.exists(os.path.dirname(os.path.expanduser(error_log_path))):
+                os.makedirs(os.path.dirname(os.path.expanduser(error_log_path)))
+            with open(os.path.expanduser(error_log_path), "w") as f:
+                f.write(str(e))
+            raise e
+        return diff_devices, diff_version
+
+    def should_update_device(self, payload, new_device_id):
+        """
+        Query the device info in local redis, if the device info is different from the payload,
+        return the old model version
+        """
+        device_result_list = FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \
+            get_deployment_result_list(self.request_json["end_point_id"],
+                                       self.request_json["end_point_name"],
+                                       self.request_json["model_config"]["model_name"])
+
+        for device_result in device_result_list:
+            if device_result is None:
+                continue
+            device_result_dict = json.loads(device_result)
+
+            if int(device_result_dict["cache_device_id"]) == new_device_id:
+                result_body = json.loads(device_result_dict["result"])
+                if result_body["model_version"] != payload["model_config"]["model_version"]:
+                    return result_body["model_version"]
+                else:
+                    return None
+        return None
+
+    @staticmethod
+    def get_usr_indicated_token(request_json) -> str:
+        usr_indicated_token = ""
+        if "parameters" in request_json and "authentication_token" in request_json["parameters"]:
+            usr_indicated_token = request_json["parameters"]["authentication_token"]
+        return usr_indicated_token
+
+    def init_device_update_map(self):
+        # [Deprecated] Use the replica controller to manage the device update
+        pass
+
+    def subscribe_deployment_messages_from_slave_devices(self, request_json):
+        if request_json is None:
+            return
+        run_id = request_json["run_id"]
+        edge_id_list = request_json["device_ids"]
+        logging.info("Edge ids: " + str(edge_id_list))
+        for edge_id in edge_id_list:
+            if str(edge_id) == str(self.edge_id):
+                continue
+            # subscribe deployment result message for each model device
+            deployment_results_topic = "model_device/model_device/return_deployment_result/{}".format(edge_id)
+            self.add_message_listener(deployment_results_topic, self.callback_deployment_result_message)
+            self.subscribe_msg(deployment_results_topic)
+
+            logging.info("subscribe device messages {}".format(deployment_results_topic))
diff --git a/python/fedml/computing/scheduler/model_scheduler/model_device_client.py b/python/fedml/computing/scheduler/model_scheduler/model_device_client.py
index f397c5421f..05f43afc5f 100755
--- a/python/fedml/computing/scheduler/model_scheduler/model_device_client.py
+++ b/python/fedml/computing/scheduler/model_scheduler/model_device_client.py
@@ -1,16 +1,12 @@
-import json
+
+import copy
 import logging
 import multiprocessing
-import os
 import time
 import traceback
 from multiprocessing import Process
-
-import click
-from fedml.computing.scheduler.comm_utils.constants import SchedulerConstants
-
-from fedml.computing.scheduler.model_scheduler import device_client_runner
-from fedml.computing.scheduler.model_scheduler import device_client_constants
+from ..scheduler_core.account_manager import FedMLAccountManager
+from .worker_agent import FedMLDeployWorkerAgent
 
 
 class FedMLModelDeviceClientRunner:
@@ -18,8 +14,7 @@ def __init__(self, args, current_device_id, os_name, is_from_docker, service_con
         self.agent_process = None
         self.agent_runner = None
         self.agent_process_event = None
-        self.real_client_runner = None
-        self.args = args
+        self.args = copy.deepcopy(args)
         self.service_config = service_config
         self.unique_device_id = None
         self.current_device_id = current_device_id
@@ -31,8 +26,6 @@ def __init__(self, args, current_device_id, os_name, is_from_docker, service_con
         self.redis_port = "6379"
         self.redis_password = "fedml_default"
 
-        self.agent_runner = None
-
     def get_edge_id(self):
         return self.edge_id
 
@@ -45,33 +38,34 @@ def start(self):
         self.agent_runner.redis_password = self.redis_password
         if self.agent_process_event is None:
             self.agent_process_event = multiprocessing.Event()
-        self.agent_process = Process(target=self.agent_runner.run_entry, args=(self.agent_process_event,))
-        self.edge_id = self.bind_device(init_params=False)
+        self.agent_process = Process(target=self.agent_runner.run_entry, args=(self.agent_process_event, self.args,))
+        self.edge_id = self.bind_device()
         self.agent_process.start()
 
-    def run_entry(self, process_event):
+    def run_entry(self, process_event, in_args):
         # print(f"Model worker process id {os.getpid()}")
 
         self.agent_process_event = process_event
 
+        worker_agent = FedMLDeployWorkerAgent()
+
         while not self.agent_process_event.is_set():
             try:
                 try:
-                    if self.real_client_runner is not None:
-                        self.real_client_runner.stop_agent()
+                    worker_agent.logout()
                 except Exception as e:
                     pass
 
-                self.bind_device()
-
-                self.start_agent()
+                worker_agent.login(
+                    in_args.account_id, api_key=in_args.api_key, device_id=in_args.device_id,
+                    os_name=in_args.os_name, role=FedMLAccountManager.ROLE_DEPLOY_WORKER_ON_PREM
+                )
             except Exception as e:
                 logging.info("Restart model device client: {}".format(traceback.format_exc()))
                 pass
             finally:
                 try:
-                    if self.real_client_runner is not None:
-                        self.real_client_runner.stop_agent()
+                    worker_agent.logout()
                 except Exception as e:
                     pass
                 time.sleep(15)
@@ -87,100 +81,18 @@ def check_runner_stop_event(self):
             raise Exception("Runner stopped")
 
     def stop(self):
-        if self.real_client_runner is not None:
-            self.real_client_runner.stop_agent()
+        FedMLDeployWorkerAgent.logout()
 
         if self.agent_process_event is not None:
             self.agent_process_event.set()
 
-    def get_binding_unique_device_id(self, current_device_id, os_name, is_from_docker=False):
-        role_str = "OnPremise"
-
-        # Judge whether running from fedml docker hub
-        is_from_fedml_docker_hub = False
-        dock_loc_file = device_client_constants.ClientConstants.get_docker_location_file()
-        if os.path.exists(dock_loc_file):
-            is_from_fedml_docker_hub = True
-
-        # Build unique device id
-        is_from_k8s = device_client_constants.ClientConstants.is_running_on_k8s()
-        if is_from_k8s:
-            unique_device_id = current_device_id + "@" + os_name + ".MDA.K8S." + role_str + ".Device"
-        elif is_from_docker:
-            unique_device_id = current_device_id + "@" + os_name + ".MDA.Docker." + role_str + ".Device"
+    def bind_device(self):
+        # Login account
+        login_result = FedMLAccountManager.get_instance().login(
+            self.args.account_id, api_key=self.args.api_key, device_id=self.args.device_id,
+            os_name=self.args.os_name, role=FedMLAccountManager.ROLE_DEPLOY_WORKER_ON_PREM
+        )
+        if login_result is not None:
+            return login_result.edge_id
         else:
-            unique_device_id = current_device_id + "@" + os_name + ".MDA." + role_str + ".Device"
-        if is_from_fedml_docker_hub:
-            unique_device_id = current_device_id + "@" + os_name + ".MDA.DockerHub." + role_str + ".Device"
-
-        return unique_device_id
-
-    def init_logs_param(self, edge_id):
-        # Init runtime logs
-        self.args.log_file_dir = device_client_constants.ClientConstants.get_log_file_dir()
-        self.args.run_id = 0
-        self.args.role = "client"
-        client_ids = list()
-        client_ids.append(edge_id)
-        self.args.client_id_list = json.dumps(client_ids)
-        setattr(self.args, "using_mlops", True)
-
-    def bind_device(self, init_params=True):
-        self.unique_device_id = self.get_binding_unique_device_id(self.current_device_id, self.os_name,
-                                                                  self.is_from_docker)
-
-        # Create client runner for communication with the FedML server.
-        if self.real_client_runner is None:
-            self.real_client_runner = device_client_runner.FedMLClientRunner(self.args)
-
-        # Bind account id to the ModelOps platform.
-        register_try_count = 0
-        edge_id = -1
-        user_name = None
-        extra_url = None
-        while register_try_count < 5:
-            try:
-                edge_id, user_name, extra_url = self.real_client_runner.bind_account_and_device_id(
-                    self.service_config["ml_ops_config"]["EDGE_BINDING_URL"], self.args.account_id,
-                    self.unique_device_id, self.os_name
-                )
-                if edge_id > 0:
-                    self.real_client_runner.edge_id = edge_id
-                    break
-            except Exception as e:
-                click.echo("{}\n{}".format(SchedulerConstants.ERR_MSG_BINDING_EXCEPTION_2, traceback.format_exc()))
-                click.echo(SchedulerConstants.ERR_MSG_BINDING_EXIT_RETRYING)
-                register_try_count += 1
-                time.sleep(3)
-                continue
-
-        if edge_id <= 0:
-            click.echo("")
-            click.echo("Oops, you failed to login the FedML ModelOps platform.")
-            click.echo("Please check whether your network is normal!")
-            return
-        self.edge_id = edge_id
-
-        # Init runtime logs
-        if init_params:
-            setattr(self.args, "client_id", edge_id)
-            self.init_logs_param(edge_id)
-            self.real_client_runner.args = self.args
-            self.real_client_runner.user_name = user_name
-
-        return edge_id
-
-    def start_agent(self):
-        self.real_client_runner.unique_device_id = self.unique_device_id
-        device_client_constants.ClientConstants.save_runner_infos(self.current_device_id + "." + self.os_name,
-                                                                  self.edge_id, run_id=0)
-
-        # Setup MQTT connection for communication with the FedML server.
-        self.real_client_runner.infer_host = self.infer_host
-        self.real_client_runner.redis_addr = self.redis_addr
-        self.real_client_runner.redis_port = self.redis_port
-        self.real_client_runner.redis_password = self.redis_password
-        self.real_client_runner.setup_agent_mqtt_connection(self.service_config)
-
-        # Start mqtt looper
-        self.real_client_runner.start_agent_mqtt_loop(should_exit_sys=False)
+            return None
diff --git a/python/fedml/computing/scheduler/model_scheduler/model_device_server.py b/python/fedml/computing/scheduler/model_scheduler/model_device_server.py
index 01228125aa..b2ecd144b1 100755
--- a/python/fedml/computing/scheduler/model_scheduler/model_device_server.py
+++ b/python/fedml/computing/scheduler/model_scheduler/model_device_server.py
@@ -1,16 +1,12 @@
-import json
+
+import copy
 import logging
 import multiprocessing
-import os
 import time
 import traceback
 from multiprocessing import Process
-
-import click
-from fedml.computing.scheduler.comm_utils.constants import SchedulerConstants
-
-from fedml.computing.scheduler.model_scheduler import device_server_runner
-from fedml.computing.scheduler.model_scheduler import device_server_constants
+from ..scheduler_core.account_manager import FedMLAccountManager
+from .master_agent import FedMLDeployMasterAgent
 
 
 class FedMLModelDeviceServerRunner:
@@ -18,8 +14,7 @@ def __init__(self, args, current_device_id, os_name, is_from_docker, service_con
         self.agent_process = None
         self.agent_runner = None
         self.agent_process_event = None
-        self.real_server_runner = None
-        self.args = args
+        self.args = copy.deepcopy(args)
         self.service_config = service_config
         self.unique_device_id = None
         self.current_device_id = current_device_id
@@ -30,7 +25,6 @@ def __init__(self, args, current_device_id, os_name, is_from_docker, service_con
         self.redis_addr = "local"
         self.redis_port = "6379"
         self.redis_password = "fedml_default"
-        self.agent_runner = None
 
     def get_edge_id(self):
         return self.edge_id
@@ -44,33 +38,33 @@ def start(self):
         self.agent_runner.redis_password = self.redis_password
         if self.agent_process_event is None:
             self.agent_process_event = multiprocessing.Event()
-        self.agent_process = Process(target=self.agent_runner.run_entry, args=(self.agent_process_event,))
-        self.edge_id = self.bind_device(init_params=False)
+        self.agent_process = Process(target=self.agent_runner.run_entry, args=(self.agent_process_event, self.args))
+        self.edge_id = self.bind_device()
         self.agent_process.start()
 
-    def run_entry(self, process_event):
+    def run_entry(self, process_event, in_args):
         # print(f"Model master process id {os.getpid()}")
 
         self.agent_process_event = process_event
+        master_agent = FedMLDeployMasterAgent()
 
         while not self.agent_process_event.is_set():
             try:
                 try:
-                    if self.real_server_runner is not None:
-                        self.real_server_runner.stop_agent()
+                    master_agent.logout()
                 except Exception as e:
                     pass
 
-                self.bind_device()
-
-                self.start_agent()
+                master_agent.login(
+                    in_args.account_id, api_key=in_args.api_key, device_id=in_args.device_id,
+                    os_name=in_args.os_name, role=FedMLAccountManager.ROLE_DEPLOY_MASTER_ON_PREM
+                )
             except Exception as e:
                 logging.info("Restart model device server: {}".format(traceback.format_exc()))
                 pass
             finally:
                 try:
-                    if self.real_server_runner is not None:
-                        self.real_server_runner.stop_agent()
+                    master_agent.logout()
                 except Exception as e:
                     pass
                 time.sleep(15)
@@ -86,104 +80,18 @@ def check_runner_stop_event(self):
             raise Exception("Runner stopped")
 
     def stop(self):
-        if self.real_server_runner is not None:
-            self.real_server_runner.stop_agent()
+        FedMLDeployMasterAgent.logout()
 
         if self.agent_process_event is not None:
             self.agent_process_event.set()
 
-    def get_binding_unique_device_id(self, current_device_id, os_name, is_from_docker=False):
-        role_str = "OnPremise"
-
-        # Judge whether running from fedml docker hub
-        is_from_fedml_docker_hub = False
-        dock_loc_file = device_server_constants.ServerConstants.get_docker_location_file()
-        if os.path.exists(dock_loc_file):
-            is_from_fedml_docker_hub = True
-
-        # Build unique device id
-        is_from_k8s = device_server_constants.ServerConstants.is_running_on_k8s()
-        if is_from_k8s:
-            unique_device_id = current_device_id + "@" + os_name + ".MDA.K8S." + role_str + ".Master.Device"
-        elif is_from_docker:
-            unique_device_id = current_device_id + "@" + os_name + ".MDA.Docker." + role_str + ".Master.Device"
+    def bind_device(self):
+        # Login account
+        login_result = FedMLAccountManager.get_instance().login(
+            self.args.account_id, api_key=self.args.api_key, device_id=self.args.device_id,
+            os_name=self.args.os_name, role=FedMLAccountManager.ROLE_DEPLOY_MASTER_ON_PREM
+        )
+        if login_result is not None:
+            return login_result.edge_id
         else:
-            unique_device_id = current_device_id + "@" + os_name + ".MDA." + role_str + ".Master.Device"
-
-        if is_from_fedml_docker_hub:
-            unique_device_id = current_device_id + "@" + os_name + ".MDA.DockerHub." + role_str + ".Master.Device"
-
-        return unique_device_id
-
-    def init_logs_param(self, edge_id):
-        self.args.log_file_dir = device_server_constants.ServerConstants.get_log_file_dir()
-        self.args.run_id = 0
-        self.args.role = "server"
-        self.args.edge_id = edge_id
-        setattr(self.args, "using_mlops", True)
-        setattr(self.args, "server_agent_id", edge_id)
-
-    def bind_device(self, init_params=True):
-        self.unique_device_id = self.get_binding_unique_device_id(self.current_device_id, self.os_name,
-                                                                  self.is_from_docker)
-
-        # Create client runner for communication with the FedML server.
-        if self.real_server_runner is None:
-            self.real_server_runner = device_server_runner.FedMLServerRunner(self.args)
-
-        # Bind account id to the ModelOps platform.
-        register_try_count = 0
-        edge_id = -1
-        user_name = None
-        extra_url = None
-        while register_try_count < 5:
-            try:
-                edge_id, user_name, extra_url = self.real_server_runner.bind_account_and_device_id(
-                    self.service_config["ml_ops_config"]["EDGE_BINDING_URL"], self.args.account_id,
-                    self.unique_device_id, self.os_name
-                )
-                if edge_id > 0:
-                    self.real_server_runner.edge_id = edge_id
-                    break
-            except Exception as e:
-                click.echo("{}\n{}".format(SchedulerConstants.ERR_MSG_BINDING_EXCEPTION_2, traceback.format_exc()))
-                click.echo(SchedulerConstants.ERR_MSG_BINDING_EXIT_RETRYING)
-                register_try_count += 1
-                time.sleep(3)
-                continue
-
-        if edge_id <= 0:
-            click.echo("")
-            click.echo("Oops, you failed to login the FedML ModelOps platform.")
-            click.echo("Please check whether your network is normal!")
-            return
-        self.edge_id = edge_id
-
-        # Init runtime logs
-        if init_params:
-            setattr(self.args, "client_id", edge_id)
-            self.real_server_runner.infer_host = self.infer_host
-            self.real_server_runner.redis_addr = self.redis_addr
-            self.real_server_runner.redis_port = self.redis_port
-            self.real_server_runner.redis_password = self.redis_password
-            self.init_logs_param(edge_id)
-            self.real_server_runner.args = self.args
-            self.real_server_runner.run_as_edge_server_and_agent = True
-            self.real_server_runner.user_name = user_name
-
-        return edge_id
-
-    def start_agent(self):
-        # Log arguments and binding results.
-        # logging.info("login: unique_device_id = %s" % str(unique_device_id))
-        # logging.info("login: edge_id = %s" % str(edge_id))
-        self.real_server_runner.unique_device_id = self.unique_device_id
-        device_server_constants.ServerConstants.save_runner_infos(self.current_device_id + "." + self.os_name,
-                                                                  self.edge_id, run_id=0)
-
-        # Setup MQTT connection for communication with the FedML server.
-        self.real_server_runner.infer_host = self.infer_host
-        self.real_server_runner.setup_agent_mqtt_connection(self.service_config)
-
-        # Start mqtt looper
-        self.real_server_runner.start_agent_mqtt_loop(should_exit_sys=False)
+            return None
diff --git a/python/fedml/computing/scheduler/model_scheduler/worker_agent.py b/python/fedml/computing/scheduler/model_scheduler/worker_agent.py
new file mode 100755
index 0000000000..bdbe5fc143
--- /dev/null
+++ b/python/fedml/computing/scheduler/model_scheduler/worker_agent.py
@@ -0,0 +1,27 @@
+
+from .device_client_constants import ClientConstants
+from .device_client_data_interface import FedMLClientDataInterface
+from .worker_protocol_manager import FedMLDeployWorkerProtocolManager
+from ..slave.base_slave_agent import FedMLBaseSlaveAgent
+
+
+class FedMLDeployWorkerAgent(FedMLBaseSlaveAgent):
+
+    def __init__(self):
+        FedMLBaseSlaveAgent.__init__(self)
+
+    # Override
+    def _get_log_file_dir(self):
+        return ClientConstants.get_log_file_dir()
+
+    # Override
+    def _save_agent_info(self, unique_device_id, edge_id):
+        ClientConstants.save_runner_infos(unique_device_id, edge_id)
+
+    # Override
+    def _init_database(self):
+        FedMLClientDataInterface.get_instance().create_job_table()
+
+    # Override
+    def _generate_protocol_manager_instance(self, args, agent_config=None):
+        return FedMLDeployWorkerProtocolManager(args, agent_config=agent_config)
diff --git a/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py b/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py
new file mode 100755
index 0000000000..5d6f1a4d8e
--- /dev/null
+++ b/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py
@@ -0,0 +1,489 @@
+
+import json
+import logging
+import os
+import shutil
+import time
+import traceback
+import urllib
+from abc import ABC
+from urllib.parse import urljoin, urlparse
+import yaml
+from fedml.computing.scheduler.comm_utils.job_utils import JobRunnerUtils
+from fedml.core.mlops import MLOpsRuntimeLog
+from .device_client_constants import ClientConstants
+from .device_model_cache import FedMLModelCache
+from ..scheduler_core.general_constants import GeneralConstants
+from ..slave.base_slave_job_runner import FedMLBaseSlaveJobRunner
+from .device_model_deployment import start_deployment
+from .device_model_db import FedMLModelDatabase
+from .device_replica_handler import FedMLDeviceReplicaHandler
+
+
+class FedMLDeployWorkerJobRunner(FedMLBaseSlaveJobRunner, ABC):
+
+    def __init__(self, args, run_id=0, request_json=None, agent_config=None, edge_id=0,
+                 cuda_visible_gpu_ids_str=None):
+        FedMLBaseSlaveJobRunner.__init__(
+            self, args, edge_id=edge_id, request_json=request_json, agent_config=agent_config, run_id=run_id,
+            cuda_visible_gpu_ids_str=cuda_visible_gpu_ids_str, agent_data_dir=ClientConstants.get_data_dir(),
+            agent_package_download_dir=ClientConstants.get_package_download_dir(),
+            agent_package_unzip_dir=GeneralConstants.get_package_unzip_dir(ClientConstants.get_package_download_dir()),
+            agent_log_file_dir=ClientConstants.get_log_file_dir()
+        )
+
+        self.infer_host = "127.0.0.1"
+        self.redis_addr = "local"
+        self.redis_port = "6379"
+        self.redis_password = "fedml_default"
+        self.model_is_from_open = False
+        self.replica_handler = None
+
+    # Override
+    def _generate_job_runner_instance(self, args, run_id=None, request_json=None, agent_config=None, edge_id=None):
+        return FedMLDeployWorkerJobRunner(
+            args, run_id=run_id, request_json=request_json, agent_config=self.agent_config, edge_id=edge_id
+        )
+
+    # Override
+    def _generate_extend_queue_list(self):
+        return None
+
+    def retrieve_binary_model_file(self, package_name, package_url):
+        local_package_path = ClientConstants.get_model_package_dir()
+        if not os.path.exists(local_package_path):
+            os.makedirs(local_package_path, exist_ok=True)
+        unzip_package_path = ClientConstants.get_model_dir()
+        local_package_file = "{}".format(os.path.join(local_package_path, package_name))
+        if os.path.exists(local_package_file):
+            os.remove(local_package_file)
+        package_url_without_query_path = urljoin(package_url, urlparse(package_url).path)
+        urllib.request.urlretrieve(package_url_without_query_path, local_package_file,
+                                   reporthook=self.package_download_progress)
+
+        unzip_package_path = os.path.join(unzip_package_path, package_name)
+        if not os.path.exists(unzip_package_path):
+            os.makedirs(unzip_package_path, exist_ok=True)
+        dst_model_file = os.path.join(unzip_package_path, package_name)
+        if os.path.exists(local_package_file):
+            shutil.copy(local_package_file, dst_model_file)
+
+        return unzip_package_path, dst_model_file
+
+    @staticmethod
+    def get_model_bin_file(unzip_package_full_path):
+        unzip_package_path = os.path.dirname(unzip_package_full_path)
+        model_bin_file = os.path.join(unzip_package_path, "fedml_model.bin")
+        return model_bin_file
+
+    def update_local_fedml_config(self, run_id, model_config, model_config_parameters=None):
+        model_name = model_config["model_name"]
+        model_storage_url = model_config["model_storage_url"]
+        scale_min = model_config.get("instance_scale_min", 0)
+        scale_max = model_config.get("instance_scale_max", 0)
+        inference_engine = model_config.get("inference_engine", 0)
+        inference_end_point_id = run_id
+
+        # Retrieve model package or model binary file.
+        if self.model_is_from_open:
+            unzip_package_path, model_bin_file = self.retrieve_binary_model_file(model_name, model_storage_url)
+        else:
+            unzip_package_path = self.retrieve_and_unzip_package(model_name, model_storage_url)
+            model_bin_file = FedMLDeployWorkerJobRunner.get_model_bin_file(unzip_package_path)
+
+        # Load the config to memory
+        package_conf_object = {}
+        fedml_local_config_file = os.path.join(unzip_package_path, "fedml_model_config.yaml")
+
+        # Inject the config from UI to pkg yaml
+        package_conf_object = model_config_parameters
+
+        # Save the config to local
+        with open(fedml_local_config_file, "w") as f:
+            yaml.dump(package_conf_object, f)
+
+        logging.info("The package_conf_object is {}".format(package_conf_object))
+
+        return unzip_package_path, model_bin_file, package_conf_object
+
+    def download_model_package(self, package_name, package_url):
+        # Copy config file from the client
+        unzip_package_path = self.retrieve_and_unzip_package(
+            package_name, package_url
+        )
+
+        return unzip_package_path
+
+    # Override
+    def run_impl(self, run_extend_queue_list, sender_message_center,
+                 listener_message_queue, status_center_queue):
+        run_id = self.request_json["end_point_id"]
+        end_point_name = self.request_json["end_point_name"]
+        token = self.request_json["token"]
+        user_id = self.request_json["user_id"]
+        user_name = self.request_json["user_name"]
+        device_ids = self.request_json["device_ids"]
+        device_objs = self.request_json["device_objs"]
+        master_ip = self.request_json["master_node_ip"]
+
+        model_config = self.request_json["model_config"]
+        model_name = model_config["model_name"]
+        model_id = model_config["model_id"]
+        model_version = model_config["model_version"]
+        model_storage_url = model_config["model_storage_url"]
+        scale_min = model_config.get("instance_scale_min", 0)
+        scale_max = model_config.get("instance_scale_max", 0)
+        model_config_parameters = self.request_json["parameters"]
+
+        self.replica_handler = FedMLDeviceReplicaHandler(self.edge_id, self.request_json)
+
+        inference_port = model_config_parameters.get("worker_internal_port",
+                                                     ClientConstants.MODEL_INFERENCE_DEFAULT_PORT)
+        inference_port_external = model_config_parameters.get("worker_external_port", inference_port)
+
+        if "using_triton" in model_config_parameters and model_config_parameters["using_triton"]:
+            inference_engine = ClientConstants.INFERENCE_ENGINE_TYPE_INT_TRITON
+        else:
+            inference_engine = ClientConstants.INFERENCE_ENGINE_TYPE_INT_DEFAULT
+
+        logging.info("[Critical] The inference_engine is: {}".format(inference_engine))
+
+        self.model_is_from_open = True if model_config.get("is_from_open", 0) == 1 else False
+        if self.model_is_from_open:
+            model_net_url = model_config["model_net_url"]
+        inference_end_point_id = run_id
+        use_gpu = "gpu"  # TODO: Get GPU from device infos
+        memory_size = "4096m"  # TODO: Get Memory size for each instance
+
+        self.mlops_metrics.report_sys_perf(self.args, self.agent_config["mqtt_config"], run_id=run_id)
+
+        self.check_runner_stop_event()
+
+        logging.info("model deployment request: {}".format(self.request_json))
+
+        MLOpsRuntimeLog.get_instance(self.args).init_logs(log_level=logging.INFO)
+
+        self.status_reporter.report_client_id_status(
+            self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_INITIALIZING,
+            is_from_model=True, running_json=json.dumps(self.request_json), run_id=run_id)
+
+        self.status_reporter.report_client_id_status(
+            self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_RUNNING,
+            is_from_model=True, run_id=run_id)
+
+        self.check_runner_stop_event()
+
+        # update local config with real time parameters from server and dynamically replace variables value
+        logging.info("download and unzip model to local...")
+        unzip_package_path, model_bin_file, fedml_config_object = \
+            self.update_local_fedml_config(run_id, model_config, model_config_parameters)
+        if unzip_package_path is None or fedml_config_object is None:
+            logging.info("failed to update local fedml config.")
+            self.check_runner_stop_event()
+            self.status_reporter.report_client_id_status(
+                self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED,
+                is_from_model=True, run_id=run_id)
+            return False
+
+        logging.info("check downloaded packages...")
+        if not os.path.exists(unzip_package_path):
+            logging.info("failed to unzip file.")
+            self.check_runner_stop_event()
+            self.status_reporter.report_client_id_status(
+                self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED,
+                is_from_model=True, run_id=run_id)
+            return False
+
+        # download model net and load into the torch model
+        model_from_open = None
+        self.model_is_from_open = None
+
+        logging.info("start the model deployment...")
+        self.check_runner_stop_event()
+        running_model_name, inference_output_url, inference_model_version, model_metadata, model_config = \
+            "", "", model_version, {}, {}
+
+        # Reconcile the replica number (op: add, remove)
+        prev_rank, op, op_num = self.replica_handler.reconcile_num_replica()
+
+        # Reconcile the replica version (op: update)
+        replica_rank_to_update = []
+        if not op:
+            replica_rank_to_update, op = self.replica_handler.reconcile_replica_version()
+
+        if not op:
+            logging.info("No need to reconcile.")
+            return True
+
+        if op == "add":
+            worker_ip = GeneralConstants.get_ip_address(self.request_json)
+            for rank in range(prev_rank+1, prev_rank+1+op_num):
+                # TODO: Support Rollback if this for loop failed
+                try:
+                    running_model_name, inference_output_url, inference_model_version, model_metadata, model_config = \
+                        start_deployment(
+                            inference_end_point_id, end_point_name, model_id, model_version,
+                            unzip_package_path, model_bin_file, model_name, inference_engine,
+                            ClientConstants.INFERENCE_HTTP_PORT,
+                            ClientConstants.INFERENCE_GRPC_PORT,
+                            ClientConstants.INFERENCE_METRIC_PORT,
+                            use_gpu, memory_size,
+                            ClientConstants.INFERENCE_CONVERTOR_IMAGE,
+                            ClientConstants.INFERENCE_SERVER_IMAGE,
+                            worker_ip,
+                            self.model_is_from_open, model_config_parameters,
+                            model_from_open,
+                            token,
+                            master_ip, self.edge_id, master_device_id=device_ids[0], replica_rank=rank,
+                            gpu_per_replica=int(self.replica_handler.gpu_per_replica)
+                        )
+                except Exception as e:
+                    inference_output_url = ""
+                    logging.error(f"Exception at deployment: {traceback.format_exc()}")
+
+                if inference_output_url == "":
+                    logging.error("failed to deploy the model...")
+
+                    result_payload = self.send_deployment_results(
+                        end_point_name, self.edge_id, ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED,
+                        model_id, model_name, inference_output_url, inference_model_version, inference_port,
+                        inference_engine, model_metadata, model_config)
+
+                    self.status_reporter.report_client_id_status(
+                        self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED,
+                        is_from_model=True, run_id=self.run_id)
+                    return False
+                else:
+                    logging.info("finished deployment, continue to send results to master...")
+                    result_payload = self.send_deployment_results(
+                        end_point_name, self.edge_id, ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED,
+                        model_id, model_name, inference_output_url, model_version, inference_port_external,
+                        inference_engine, model_metadata, model_config, replica_no=rank + 1)
+
+                    if inference_port_external != inference_port:  # Save internal port to local db
+                        logging.info("inference_port_external {} != inference_port {}".format(
+                            inference_port_external, inference_port))
+                        result_payload = self.construct_deployment_results(
+                            end_point_name, self.edge_id, ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED,
+                            model_id, model_name, inference_output_url, model_version, inference_port,
+                            inference_engine, model_metadata, model_config, replica_no=rank + 1)
+
+                    FedMLModelDatabase.get_instance().set_deployment_result(
+                        run_id, end_point_name, model_name, model_version, self.edge_id,
+                        json.dumps(result_payload), replica_no=rank + 1)
+
+                    logging.info(f"Deploy replica {rank+1} / {prev_rank+1+op_num} successfully.")
+                    time.sleep(5)
+
+            time.sleep(1)
+            self.status_reporter.report_client_id_status(
+                self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FINISHED,
+                is_from_model=True, run_id=self.run_id)
+            return True
+        elif op == "remove":
+            for rank_to_delete in range(prev_rank, prev_rank-op_num, -1):
+                self.replica_handler.remove_replica(rank_to_delete)
+
+                FedMLModelCache.get_instance().set_redis_params()
+                replica_occupied_gpu_ids_str = FedMLModelCache.get_instance().get_replica_gpu_ids(
+                    run_id, end_point_name, model_name, self.edge_id, rank_to_delete+1)
+
+                replica_occupied_gpu_ids = json.loads(replica_occupied_gpu_ids_str)
+
+                JobRunnerUtils.get_instance().release_partial_job_gpu(run_id, self.edge_id, replica_occupied_gpu_ids)
+
+                FedMLModelDatabase.get_instance().delete_deployment_result_with_device_id_and_rank(
+                    run_id, end_point_name, model_name, self.edge_id, rank_to_delete)
+
+                # Report the deletion msg to master
+                result_payload = self.send_deployment_results(
+                    end_point_name, self.edge_id, ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DELETED,
+                    model_id, model_name, inference_output_url, model_version, inference_port_external,
+                    inference_engine, model_metadata, model_config, replica_no=rank_to_delete + 1)
+
+                time.sleep(1)
+                self.status_reporter.report_client_id_status(
+                    self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FINISHED,
+                    is_from_model=True, run_id=self.run_id)
+
+                # TODO: If delete all replica, then delete the job and related resources
+                if rank_to_delete == 0:
+                    pass
+            return True
+        elif op == "update":
+            # Update is combine of delete and add
+            worker_ip = GeneralConstants.get_ip_address(self.request_json)
+            for rank in replica_rank_to_update:
+                # Delete the container
+                self.replica_handler.remove_replica(rank)
+
+                FedMLModelCache.get_instance().set_redis_params()
+                replica_occupied_gpu_ids_str = FedMLModelCache.get_instance().get_replica_gpu_ids(
+                    run_id, end_point_name, model_name, self.edge_id, rank + 1)
+
+                replica_occupied_gpu_ids = json.loads(replica_occupied_gpu_ids_str)
+
+                JobRunnerUtils.get_instance().release_partial_job_gpu(run_id, self.edge_id, replica_occupied_gpu_ids)
+
+                # Delete the deployment result from local db
+                FedMLModelDatabase.get_instance().delete_deployment_result_with_device_id_and_rank(
+                    run_id, end_point_name, model_name, self.edge_id, rank)
+
+                time.sleep(1)
+
+                # Add the container
+                # TODO: Reduce the duplicated code
+                try:
+                    running_model_name, inference_output_url, inference_model_version, model_metadata, model_config = \
+                        start_deployment(
+                            inference_end_point_id, end_point_name, model_id, model_version,
+                            unzip_package_path, model_bin_file, model_name, inference_engine,
+                            ClientConstants.INFERENCE_HTTP_PORT,
+                            ClientConstants.INFERENCE_GRPC_PORT,
+                            ClientConstants.INFERENCE_METRIC_PORT,
+                            use_gpu, memory_size,
+                            ClientConstants.INFERENCE_CONVERTOR_IMAGE,
+                            ClientConstants.INFERENCE_SERVER_IMAGE,
+                            worker_ip,
+                            self.model_is_from_open, model_config_parameters,
+                            model_from_open,
+                            token,
+                            master_ip, self.edge_id, master_device_id=device_ids[0], replica_rank=rank,
+                            gpu_per_replica=int(self.replica_handler.gpu_per_replica)
+                        )
+                except Exception as e:
+                    inference_output_url = ""
+                    logging.error(f"Exception at deployment: {traceback.format_exc()}")
+
+                if inference_output_url == "":
+                    logging.error("failed to deploy the model...")
+
+                    result_payload = self.send_deployment_results(
+                        end_point_name, self.edge_id, ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED,
+                        model_id, model_name, inference_output_url, inference_model_version, inference_port,
+                        inference_engine, model_metadata, model_config)
+
+                    self.status_reporter.report_client_id_status(
+                        self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED,
+                        is_from_model=True, run_id=self.run_id)
+
+                    return False
+                else:
+                    logging.info("finished deployment, continue to send results to master...")
+                    result_payload = self.send_deployment_results(
+                        end_point_name, self.edge_id, ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED,
+                        model_id, model_name, inference_output_url, model_version, inference_port_external,
+                        inference_engine, model_metadata, model_config, replica_no=rank + 1)
+
+                    if inference_port_external != inference_port:  # Save internal port to local db
+                        logging.info("inference_port_external {} != inference_port {}".format(
+                            inference_port_external, inference_port))
+                        result_payload = self.construct_deployment_results(
+                            end_point_name, self.edge_id, ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED,
+                            model_id, model_name, inference_output_url, model_version, inference_port,
+                            inference_engine, model_metadata, model_config, replica_no=rank + 1)
+
+                    FedMLModelDatabase.get_instance().set_deployment_result(
+                        run_id, end_point_name, model_name, model_version, self.edge_id,
+                        json.dumps(result_payload), replica_no=rank + 1)
+
+                    logging.info(f"Update replica with no {rank + 1}  successfully. Op num {op_num}")
+                    time.sleep(5)
+            time.sleep(1)
+            self.status_reporter.report_client_id_status(
+                self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FINISHED,
+                is_from_model=True, run_id=self.run_id)
+            return True
+
+        else:
+            # The delete op will be handled by callback_delete_deployment
+            logging.error(f"Unsupported op {op} with op num {op_num}")
+            return False
+
+    def construct_deployment_results(self, end_point_name, device_id, model_status,
+                                     model_id, model_name, model_inference_url,
+                                     model_version, inference_port, inference_engine,
+                                     model_metadata, model_config, replica_no=1):
+        deployment_results_payload = {"end_point_id": self.run_id, "end_point_name": end_point_name,
+                                      "model_id": model_id, "model_name": model_name,
+                                      "model_url": model_inference_url, "model_version": model_version,
+                                      "port": inference_port,
+                                      "inference_engine": inference_engine,
+                                      "model_metadata": model_metadata,
+                                      "model_config": model_config,
+                                      "model_status": model_status,
+                                      "inference_port": inference_port,
+                                      "replica_no": replica_no,
+                                      }
+        return deployment_results_payload
+
+    def construct_deployment_status(self, end_point_name, device_id,
+                                    model_id, model_name, model_version,
+                                    model_inference_url, model_status,
+                                    inference_port=ClientConstants.MODEL_INFERENCE_DEFAULT_PORT,
+                                    replica_no=1,     # start from 1
+                                    ):
+        deployment_status_payload = {"end_point_id": self.run_id, "end_point_name": end_point_name,
+                                     "device_id": device_id,
+                                     "model_id": model_id, "model_name": model_name,
+                                     "model_version": model_version,
+                                     "model_url": model_inference_url, "model_status": model_status,
+                                     "inference_port": inference_port,
+                                     "replica_no": replica_no,
+                                     }
+        return deployment_status_payload
+
+    def send_deployment_results(self, end_point_name, device_id, model_status,
+                                model_id, model_name, model_inference_url,
+                                model_version, inference_port, inference_engine,
+                                model_metadata, model_config, replica_no=1):
+        deployment_results_topic = "model_device/model_device/return_deployment_result/{}".format(device_id)
+        deployment_results_payload = self.construct_deployment_results(
+            end_point_name, device_id, model_status,
+            model_id, model_name, model_inference_url,
+            model_version, inference_port, inference_engine,
+            model_metadata, model_config, replica_no=replica_no)
+
+        logging.info("[client] send_deployment_results: topic {}, payload {}.".format(deployment_results_topic,
+                                                               deployment_results_payload))
+        self.message_center.send_message_json(deployment_results_topic, json.dumps(deployment_results_payload))
+        return deployment_results_payload
+
+    def send_deployment_status(self, end_point_name, device_id,
+                               model_id, model_name, model_version,
+                               model_inference_url, model_status,
+                               inference_port=ClientConstants.MODEL_INFERENCE_DEFAULT_PORT,
+                               replica_no=1,     # start from 1
+                               ):
+        deployment_status_topic = "model_device/model_device/return_deployment_status/{}".format(device_id)
+        deployment_status_payload = self.construct_deployment_status(
+            end_point_name, device_id,
+            model_id, model_name, model_version,
+            model_inference_url, model_status,
+            inference_port=inference_port,
+            replica_no=replica_no)
+
+        logging.info("[client] send_deployment_status: topic {}, payload {}.".format(deployment_status_topic,
+                                                                                     deployment_status_payload))
+        self.message_center.send_message_json(deployment_status_topic, json.dumps(deployment_status_payload))
+        return deployment_status_payload
+
+    def reset_devices_status(self, edge_id, status):
+        self.status_reporter.run_id = self.run_id
+        self.status_reporter.edge_id = edge_id
+        self.status_reporter.report_client_id_status(
+            edge_id, status, is_from_model=True, run_id=self.run_id)
+
+    # Override
+    def get_download_package_info(self, packages_config=None):
+        model_name = packages_config["model_name"]
+        model_storage_url = packages_config["model_storage_url"]
+        return model_name, model_storage_url
+
+    # Override
+    def build_dynamic_args(self, run_id, run_config, package_conf_object, base_dir):
+        pass
+
+    # Override
+    def build_dynamic_constrain_variables(self, run_id, run_config):
+        pass
diff --git a/python/fedml/computing/scheduler/model_scheduler/worker_job_runner_manager.py b/python/fedml/computing/scheduler/model_scheduler/worker_job_runner_manager.py
new file mode 100755
index 0000000000..4fe35d5a8a
--- /dev/null
+++ b/python/fedml/computing/scheduler/model_scheduler/worker_job_runner_manager.py
@@ -0,0 +1,23 @@
+
+from fedml.core.common.singleton import Singleton
+from .worker_job_runner import FedMLDeployWorkerJobRunner
+from ..scheduler_core.general_constants import GeneralConstants
+from ..slave.base_slave_job_runner_manager import FedMLBaseSlaveJobRunnerManager
+
+
+class FedMLDeployJobRunnerManager(FedMLBaseSlaveJobRunnerManager, Singleton):
+    def __init__(self):
+        FedMLBaseSlaveJobRunnerManager.__init__(self)
+
+    @staticmethod
+    def get_instance():
+        return FedMLDeployJobRunnerManager()
+
+    # Override
+    def _generate_job_runner_instance(
+            self, args, run_id=None, request_json=None, agent_config=None, edge_id=None
+    ):
+        job_runner = FedMLDeployWorkerJobRunner(
+            args, run_id=run_id, request_json=request_json, agent_config=agent_config, edge_id=edge_id)
+        job_runner.infer_host = GeneralConstants.get_ip_address(request_json)
+        return job_runner
diff --git a/python/fedml/computing/scheduler/model_scheduler/worker_protocol_manager.py b/python/fedml/computing/scheduler/model_scheduler/worker_protocol_manager.py
new file mode 100755
index 0000000000..43bb3c4582
--- /dev/null
+++ b/python/fedml/computing/scheduler/model_scheduler/worker_protocol_manager.py
@@ -0,0 +1,195 @@
+
+import json
+import logging
+import os
+import traceback
+
+from fedml.computing.scheduler.comm_utils.job_utils import JobRunnerUtils
+from fedml.computing.scheduler.comm_utils.run_process_utils import RunProcessUtils
+from fedml.computing.scheduler.comm_utils.sys_utils import get_python_program
+from fedml.core.mlops import MLOpsConfigs, MLOpsRuntimeLog, MLOpsRuntimeLogDaemon
+from .device_model_db import FedMLModelDatabase
+from .device_model_msg_object import FedMLModelMsgObject
+from .device_client_constants import ClientConstants
+from .device_client_data_interface import FedMLClientDataInterface
+from ..slave.base_slave_protocol_manager import FedMLBaseSlaveProtocolManager
+from .worker_job_runner_manager import FedMLDeployJobRunnerManager
+from .device_mqtt_inference_protocol import FedMLMqttInference
+
+
+class FedMLDeployWorkerProtocolManager(FedMLBaseSlaveProtocolManager):
+    def __init__(self, args, agent_config=None):
+        FedMLBaseSlaveProtocolManager.__init__(self, args, agent_config=agent_config)
+
+        self.topic_start_deployment = None
+        self.topic_delete_deployment = None
+
+        self.infer_host = "127.0.0.1"
+        self.redis_addr = "local"
+        self.redis_port = "6379"
+        self.redis_password = "fedml_default"
+        self.endpoint_sync_protocol = None
+        self.local_api_process = None
+        self.mqtt_inference_obj = None
+
+    # Override
+    def _generate_protocol_manager_instance(self, args, agent_config=None):
+        return FedMLDeployWorkerProtocolManager(args, agent_config=agent_config)
+
+    # Override
+    def generate_topics(self):
+        super().generate_topics()
+
+        # The topic for start deployment
+        self.topic_start_deployment = "model_ops/model_device/start_deployment/{}".format(str(self.edge_id))
+
+        # The topic for deleting endpoint
+        self.topic_delete_deployment = "model_ops/model_device/delete_deployment/{}".format(str(self.edge_id))
+
+        # Subscribe topics for endpoints
+        self.add_subscribe_topic(self.topic_start_deployment)
+        self.add_subscribe_topic(self.topic_delete_deployment)
+
+    # Override
+    def add_protocol_handler(self):
+        super().add_protocol_handler()
+
+        # Add the message listeners for endpoint related topics
+        self.add_message_listener(self.topic_start_deployment, self.callback_start_deployment)
+        self.add_message_listener(self.topic_delete_deployment, self.callback_delete_deployment)
+
+    # Override
+    def _get_job_runner_manager(self):
+        return FedMLDeployJobRunnerManager.get_instance()
+
+    # Override
+    def _init_extra_items(self):
+        # Init local database
+        FedMLClientDataInterface.get_instance().create_job_table()
+        try:
+            FedMLModelDatabase.get_instance().set_database_base_dir(ClientConstants.get_database_dir())
+            FedMLModelDatabase.get_instance().create_table()
+        except Exception as e:
+            pass
+
+        client_api_cmd = "fedml.computing.scheduler.model_scheduler.device_client_api:api"
+        client_api_pids = RunProcessUtils.get_pid_from_cmd_line(client_api_cmd)
+        if client_api_pids is None or len(client_api_pids) <= 0:
+            # Start local API services
+            cur_dir = os.path.dirname(__file__)
+            fedml_base_dir = os.path.dirname(os.path.dirname(os.path.dirname(cur_dir)))
+            python_program = get_python_program()
+            self.local_api_process = ClientConstants.exec_console_with_script(
+                "{} -m uvicorn {} --host 0.0.0.0 --port {} --reload --reload-delay 3 --reload-dir {} "
+                "--log-level critical".format(
+                    python_program, client_api_cmd,
+                    ClientConstants.LOCAL_CLIENT_API_PORT, fedml_base_dir
+                ),
+                should_capture_stdout=False,
+                should_capture_stderr=False
+            )
+
+    # Override
+    def _process_connection_ready(self):
+        MLOpsRuntimeLog.get_instance(self.args).init_logs(log_level=logging.INFO)
+
+        if self.mqtt_inference_obj is None:
+            self.mqtt_inference_obj = FedMLMqttInference(
+                agent_config=self.agent_config, mqtt_mgr=self.communication_mgr)
+        self.mqtt_inference_obj.setup_listener_for_endpoint_inference_request(self.edge_id)
+
+    # Override
+    def _process_connection_lost(self):
+        try:
+            if self.mqtt_inference_obj is not None:
+                self.mqtt_inference_obj.remove_listener_for_endpoint_inference_request(self.edge_id)
+        except Exception as e:
+            pass
+
+    # Override
+    def print_connected_info(self):
+        pass
+
+    def callback_start_deployment(self, topic, payload):
+        """
+        topic: model_ops/model_device/start_deployment/model-agent-device-id
+        payload: {"model_name": "image-model", "model_storage_url":"s3-url",
+        "instance_scale_min":1, "instance_scale_max":3, "inference_engine":"onnx (or tensorrt)"}
+        """
+        # Parse deployment parameters
+        request_json = json.loads(payload)
+        run_id = request_json["end_point_id"]
+        token = request_json["token"]
+        user_id = request_json["user_id"]
+        user_name = request_json["user_name"]
+        device_ids = request_json["device_ids"]
+        device_objs = request_json["device_objs"]
+        model_config = request_json["model_config"]
+        model_name = model_config["model_name"]
+        model_storage_url = model_config["model_storage_url"]
+        scale_min = model_config.get("instance_scale_min", 0)
+        scale_max = model_config.get("instance_scale_max", 0)
+        inference_engine = model_config.get("inference_engine", 0)
+        inference_end_point_id = run_id
+
+        try:
+            MLOpsConfigs.fetch_all_configs()
+        except Exception as e:
+            pass
+
+        # Start log processor for current run
+        run_id = inference_end_point_id
+        self.args.run_id = run_id
+        self.args.edge_id = self.edge_id
+        MLOpsRuntimeLog.get_instance(self.args).init_logs(log_level=logging.INFO)
+        MLOpsRuntimeLogDaemon.get_instance(self.args).set_log_source(
+            ClientConstants.FEDML_LOG_SOURCE_TYPE_MODEL_END_POINT)
+        MLOpsRuntimeLogDaemon.get_instance(self.args).start_log_processor(run_id, self.edge_id)
+
+        # Start the job runner
+        request_json["run_id"] = run_id
+        run_id_str = str(run_id)
+        self.request_json = request_json
+        self.running_request_json[run_id_str] = request_json
+        self._get_job_runner_manager().start_job_runner(
+            run_id, request_json, args=self.args, edge_id=self.edge_id,
+            sender_message_queue=self.message_center.get_sender_message_queue(),
+            listener_message_queue=self.get_listener_message_queue(),
+            status_center_queue=self.get_status_queue()
+        )
+        process = self._get_job_runner_manager().get_runner_process(run_id)
+        if process is not None:
+            ClientConstants.save_run_process(run_id, process.pid)
+
+    def callback_delete_deployment(self, topic, payload):
+        logging.info("[Worker] callback_delete_deployment")
+
+        # Parse payload as the model message object.
+        model_msg_object = FedMLModelMsgObject(topic, payload)
+
+        # Delete all replicas on this device
+        try:
+            ClientConstants.remove_deployment(
+                model_msg_object.end_point_name, model_msg_object.model_name, model_msg_object.model_version,
+                model_msg_object.run_id, model_msg_object.model_id, edge_id=self.edge_id)
+        except Exception as e:
+            logging.info(f"Exception when removing deployment {traceback.format_exc()}")
+            pass
+
+        self._get_job_runner_manager().stop_job_runner(model_msg_object.run_id)
+
+        logging.info(f"[endpoint/device][{model_msg_object.run_id}/{self.edge_id}] "
+                     f"Release gpu resource when the worker deployment deleted.")
+        JobRunnerUtils.get_instance().release_gpu_ids(model_msg_object.run_id, self.edge_id)
+
+        if self.running_request_json.get(str(model_msg_object.run_id)) is not None:
+            try:
+                self.running_request_json.pop(str(model_msg_object.run_id))
+            except Exception as e:
+                logging.error(f"Error when removing running_request_json: {traceback.format_exc()}")
+                pass
+
+        FedMLClientDataInterface.get_instance().delete_job_from_db(model_msg_object.run_id)
+        FedMLModelDatabase.get_instance().delete_deployment_result_with_device_id(
+            model_msg_object.run_id, model_msg_object.end_point_name, model_msg_object.model_name,
+            self.edge_id)
diff --git a/python/fedml/computing/scheduler/scheduler_core/account_manager.py b/python/fedml/computing/scheduler/scheduler_core/account_manager.py
new file mode 100755
index 0000000000..61ffd20988
--- /dev/null
+++ b/python/fedml/computing/scheduler/scheduler_core/account_manager.py
@@ -0,0 +1,460 @@
+import logging
+import os
+import platform
+import subprocess
+import time
+import traceback
+import uuid
+
+import requests
+
+import fedml
+from fedml.computing.scheduler.comm_utils import sys_utils, security_utils
+from fedml.computing.scheduler.comm_utils.constants import SchedulerConstants
+from fedml.computing.scheduler.comm_utils.sys_utils import get_sys_runner_info
+from fedml.computing.scheduler.scheduler_core.general_constants import GeneralConstants
+from fedml.core.common.singleton import Singleton
+from fedml.core.mlops import MLOpsConfigs
+
+
+class FedMLAccountManager(Singleton):
+    LOCAL_RUNNER_INFO_DIR_NAME = 'runner_infos'
+    STATUS_IDLE = "IDLE"
+    ROLE_EDGE_SERVER = "edge_server"
+    ROLE_CLOUD_AGENT = "cloud_agent"
+    ROLE_CLOUD_SERVER = "cloud_server"
+    ROLE_EDGE_DEVICE = "client"
+    ROLE_GPU_PROVIDER = "gpu_supplier"
+    ROLE_DEPLOY_MASTER_ON_PREM = "md.on_premise_device.master"
+    ROLE_DEPLOY_WORKER_ON_PREM = "md.on_premise_device"
+
+    DEVICE_ID_SUFFIX_EDGE_SERVER = ".Edge.Server"
+    DEVICE_ID_SUFFIX_CLOUD_AGENT = ".Public.Cloud"
+    DEVICE_ID_SUFFIX_CLOUD_SERVER = ".Public.Server"
+    DEVICE_ID_SUFFIX_EDGE_DEVICE = ".Edge.Device"
+    DEVICE_ID_SUFFIX_GPU_PROVIDER = ".Edge.GPU.Supplier"
+    DEVICE_ID_SUFFIX_DEPLOY = "MDA"
+    DEVICE_ID_SUFFIX_DEPLOY_MASTER_ON_PREM = ".OnPremise.Master.Device"
+    DEVICE_ID_SUFFIX_DEPLOY_WORKER_ON_PREM = ".OnPremise.Device"
+
+    DEVICE_ID_DOCKER_TAG = ".Docker"
+    DEVICE_ID_DOCKER_HUB_TAG = ".DockerHub"
+
+    def __init__(self):
+        if not hasattr(self, "agent_args"):
+            self.agent_args = None
+
+    @staticmethod
+    def get_instance():
+        return FedMLAccountManager()
+
+    def login(self, user_id, api_key="", device_id=None, os_name=None, role=None):
+        # Build the agent args
+        self.build_agent_args(
+            user_id, api_key=api_key, device_id=device_id, os_name=os_name, role=role
+        )
+
+        # Fetch configs from the MLOps config server.
+        service_config = dict()
+        log_server_url = None
+        config_try_count = 0
+        edge_id = 0
+        while config_try_count < 5:
+            # noinspection PyBroadException
+            try:
+                mqtt_config, s3_config, mlops_config, docker_config = FedMLAccountManager.fetch_configs()
+                service_config["mqtt_config"] = mqtt_config
+                service_config["s3_config"] = s3_config
+                service_config["ml_ops_config"] = mlops_config
+                service_config["docker_config"] = docker_config
+                log_server_url = mlops_config.get("LOG_SERVER_URL", None)
+                break
+            except Exception as e:
+                print("{}\n{}".format(SchedulerConstants.ERR_MSG_BINDING_EXCEPTION_1, traceback.format_exc()))
+                print(SchedulerConstants.ERR_MSG_BINDING_EXIT_RETRYING)
+                config_try_count += 1
+                time.sleep(3)
+                continue
+
+        # Failed to fetch the config after retrying many times.
+        if config_try_count >= 5:
+            print("")
+            print("[5] Oops, you failed to login the FedML MLOps platform.")
+            print("Please check whether your network is normal!")
+            return None
+
+        # Bind account id to FedML® Nexus AI Platform
+        register_try_count = 0
+        edge_id = -1
+        user_name = None
+        extra_url = None
+        general_edge_id = None
+        while register_try_count < 5:
+            # noinspection PyBroadException
+            try:
+                edge_id, user_name, extra_url, general_edge_id = FedMLAccountManager.bind_account_and_device_id(
+                    service_config["ml_ops_config"]["EDGE_BINDING_URL"], self.agent_args.account_id,
+                    self.agent_args.unique_device_id, self.agent_args.os_name,
+                    api_key=api_key, role=role
+                )
+                if edge_id > 0:
+                    break
+            except SystemExit as e:
+                print("Your account does not exist. Please make sure your account correct.")
+                os.system("fedml logout -s")
+                return
+            except Exception as e:
+                print("{}\n{}".format(SchedulerConstants.ERR_MSG_BINDING_EXCEPTION_2, traceback.format_exc()))
+                print(SchedulerConstants.ERR_MSG_BINDING_EXIT_RETRYING)
+                register_try_count += 1
+                time.sleep(3)
+                continue
+
+        # Failed to bind your account after retrying many times.
+        if edge_id <= 0:
+            print("")
+            print("[6] Oops, you failed to login the FedML MLOps platform.")
+            print("Please check whether your network is normal!")
+            return None
+
+        # Fill the bound result to agent args.
+        self.fill_argent_args(
+            log_server_url=log_server_url, server_id=edge_id,
+            edge_id=edge_id, general_edge_id=general_edge_id,
+            user_name=user_name, extra_url=extra_url,
+            agent_config=service_config)
+
+        return self.agent_args
+
+    def build_agent_args(self, user_id, api_key=None, device_id=None, os_name=None, role=None):
+        # Generate the suffix for device based on the role
+        device_id_suffix = None
+        is_master = False
+        is_deploy = False
+        if role == FedMLAccountManager.ROLE_EDGE_SERVER:
+            device_id_suffix = FedMLAccountManager.DEVICE_ID_SUFFIX_EDGE_SERVER
+            is_master = True
+        elif role == FedMLAccountManager.ROLE_CLOUD_AGENT:
+            device_id_suffix = FedMLAccountManager.DEVICE_ID_SUFFIX_CLOUD_AGENT
+            is_master = True
+        elif role == FedMLAccountManager.ROLE_CLOUD_SERVER:
+            device_id_suffix = FedMLAccountManager.DEVICE_ID_SUFFIX_CLOUD_SERVER
+            is_master = True
+        elif role == FedMLAccountManager.ROLE_EDGE_DEVICE:
+            device_id_suffix = FedMLAccountManager.DEVICE_ID_SUFFIX_EDGE_DEVICE
+        elif role == FedMLAccountManager.ROLE_GPU_PROVIDER:
+            device_id_suffix = FedMLAccountManager.DEVICE_ID_SUFFIX_GPU_PROVIDER
+        elif role == FedMLAccountManager.ROLE_DEPLOY_MASTER_ON_PREM:
+            device_id_suffix = FedMLAccountManager.DEVICE_ID_SUFFIX_DEPLOY_MASTER_ON_PREM
+            is_master = True
+            is_deploy = True
+        elif role == FedMLAccountManager.ROLE_DEPLOY_WORKER_ON_PREM:
+            device_id_suffix = FedMLAccountManager.DEVICE_ID_SUFFIX_DEPLOY_WORKER_ON_PREM
+            is_deploy = True
+
+        # Build the agent args
+        version = fedml.get_env_version()
+        if self.agent_args is None:
+            self.agent_args = AgentArgs()
+        self.agent_args.role = role
+        self.agent_args.account_id = user_id
+        self.agent_args.api_key = api_key
+        self.agent_args.current_running_dir = GeneralConstants.get_deploy_fedml_home_dir(is_master=is_master) \
+            if is_deploy else GeneralConstants.get_launch_fedml_home_dir(is_master=is_master)
+        sys_name = platform.system()
+        if sys_name == "Darwin":
+            sys_name = "MacOS"
+        self.agent_args.os_name = sys_name if os_name is None or os_name == "" else os_name
+        self.agent_args.version = version
+        self.agent_args.log_file_dir = GeneralConstants.get_deploy_log_file_dir(is_master=is_master) \
+            if is_deploy else GeneralConstants.get_launch_log_file_dir(is_master=is_master)
+        is_from_docker = False
+        if device_id is not None and device_id != "0":
+            self.agent_args.current_device_id = device_id
+        else:
+            data_dir = GeneralConstants.get_deploy_data_dir(is_master=is_master) \
+                if is_deploy else GeneralConstants.get_launch_data_dir(is_master=is_master)
+            is_gpu_provider = True if role == FedMLAccountManager.ROLE_GPU_PROVIDER else False
+            self.agent_args.current_device_id = FedMLAccountManager.get_device_id(
+                data_dir=data_dir, use_machine_id=is_gpu_provider)
+        self.agent_args.device_id = self.agent_args.current_device_id
+        self.agent_args.config_version = version
+        self.agent_args.cloud_region = ""
+
+        # Check if it is running in the fedml docker hub
+        is_from_fedml_docker_hub = False
+        dock_loc_file = GeneralConstants.get_deploy_docker_location_file(is_master=is_master) \
+            if is_deploy else GeneralConstants.get_deploy_docker_location_file(is_master=is_master)
+        if os.path.exists(dock_loc_file):
+            is_from_fedml_docker_hub = True
+
+        # Build unique device id
+        docker_tag = FedMLAccountManager.DEVICE_ID_DOCKER_TAG if is_from_docker else ""
+        docker_tag = FedMLAccountManager.DEVICE_ID_DOCKER_HUB_TAG if is_from_fedml_docker_hub else docker_tag
+        unique_device_id = f"{self.agent_args.current_device_id}@{self.agent_args.os_name}" \
+                           f"{docker_tag}{device_id_suffix}"
+
+        # Set the unique device id
+        self.agent_args.is_from_docker = is_from_docker or is_from_fedml_docker_hub
+        self.agent_args.unique_device_id = unique_device_id
+
+    def fill_argent_args(
+            self, log_server_url=None, server_id=None, edge_id=None,
+            user_name=None, extra_url=None, general_edge_id=None, agent_config=None):
+        self.agent_args.log_server_url = log_server_url
+        self.agent_args.server_id = server_id
+        self.agent_args.edge_id = edge_id
+        self.agent_args.user_name = user_name
+        self.agent_args.extra_url = extra_url
+        self.agent_args.general_edge_id = general_edge_id
+        self.agent_args.agent_config = agent_config
+
+    @staticmethod
+    def write_login_failed_file(is_client=True):
+        login_exit_file = os.path.join(
+            GeneralConstants.get_launch_log_file_dir(is_master=not is_client), "exited.log")
+        with open(login_exit_file, "w") as f:
+            f.writelines(f"{os.getpid()}.")
+
+    @staticmethod
+    def get_device_id(data_dir, use_machine_id=False):
+        device_file_path = os.path.join(data_dir, FedMLAccountManager.LOCAL_RUNNER_INFO_DIR_NAME)
+        file_for_device_id = os.path.join(device_file_path, "devices.id")
+        if not os.path.exists(device_file_path):
+            os.makedirs(device_file_path, exist_ok=True)
+        elif os.path.exists(file_for_device_id):
+            with open(file_for_device_id, 'r', encoding='utf-8') as f:
+                device_id_from_file = f.readline()
+                if device_id_from_file is not None and device_id_from_file != "":
+                    return device_id_from_file
+
+        if platform.system() == "Darwin":
+            cmd_get_serial_num = "system_profiler SPHardwareDataType | grep Serial | awk '{gsub(/ /,\"\")}{print}' " \
+                                 "|awk -F':' '{print $2}' "
+            device_id = os.popen(cmd_get_serial_num).read()
+            device_id = device_id.replace('\n', '').replace(' ', '')
+            if device_id is None or device_id == "":
+                if not use_machine_id:
+                    device_id = hex(uuid.getnode())
+                else:
+                    device_id = FedMLAccountManager.get_gpu_machine_id()
+            else:
+                device_id = "0x" + device_id
+        else:
+            if "nt" in os.name:
+
+                def get_uuid():
+                    guid = ""
+                    try:
+                        cmd = "wmic csproduct get uuid"
+                        guid = str(subprocess.check_output(cmd))
+                        pos1 = guid.find("\\n") + 2
+                        guid = guid[pos1:-15]
+                    except Exception as ex:
+                        logging.error(f"Failed to get uuid with Exception {ex}. Traceback: {traceback.format_exc()}")
+                        pass
+                    return str(guid)
+
+                device_id = str(get_uuid())
+                logging.info(device_id)
+            elif "posix" in os.name:
+                device_id = sys_utils.get_device_id_in_docker()
+                if device_id is None:
+                    if not use_machine_id:
+                        device_id = hex(uuid.getnode())
+                    else:
+                        device_id = device_id = FedMLAccountManager.get_gpu_machine_id()
+            else:
+                device_id = sys_utils.run_subprocess_open(
+                    "hal-get-property --udi /org/freedesktop/Hal/devices/computer --key system.hardware.uuid".split()
+                )
+                device_id = hex(device_id)
+
+        if device_id is not None and device_id != "":
+            with open(file_for_device_id, 'w', encoding='utf-8') as f:
+                f.write(device_id)
+        else:
+            device_id = hex(uuid.uuid4())
+            with open(file_for_device_id, 'w', encoding='utf-8') as f:
+                f.write(device_id)
+
+        return device_id
+
+    @staticmethod
+    def get_gpu_machine_id():
+        gpu_list = sys_utils.get_gpu_list()
+        gpu_uuids = ""
+        if len(gpu_list) > 0:
+            for gpu in gpu_list:
+                gpu_uuids += gpu.get("uuid", "")
+        else:
+            gpu_uuids = str(uuid.uuid4())
+        device_id_combination = \
+            f"{FedMLAccountManager.get_machine_id()}-{hex(uuid.getnode())}-{gpu_uuids}"
+        device_id = security_utils.get_content_hash(device_id_combination)
+        return device_id
+
+    @staticmethod
+    def get_machine_id():
+        try:
+            import machineid
+            return machineid.id().replace('\n', '').replace('\r\n', '').strip()
+        except Exception as e:
+            logging.error(f"Failed to get machine id with Exception {e}. Traceback: {traceback.format_exc()}")
+            return hex(uuid.getnode())
+
+    @staticmethod
+    def bind_account_and_device_id(
+            url, account_id, device_id, os_name, api_key="",
+            role=ROLE_EDGE_SERVER):
+        ip = requests.get('https://checkip.amazonaws.com').text.strip()
+        fedml_ver, exec_path, os_ver, cpu_info, python_ver, torch_ver, mpi_installed, \
+            cpu_usage, available_mem, total_mem, gpu_info, gpu_available_mem, gpu_total_mem, \
+            gpu_count, gpu_vendor, cpu_count, gpu_device_name = get_sys_runner_info()
+        host_name = sys_utils.get_host_name()
+        json_params = {
+            "accountid": account_id,
+            "deviceid": device_id,
+            "type": os_name,
+            "state": FedMLAccountManager.STATUS_IDLE,
+            "status": FedMLAccountManager.STATUS_IDLE,
+            "processor": cpu_info,
+            "core_type": cpu_info,
+            "network": "",
+            "role": role,
+            "os_ver": os_ver,
+            "memory": total_mem,
+            "ip": ip,
+            "api_key": api_key,
+            "extra_infos": {"fedml_ver": fedml_ver, "exec_path": exec_path, "os_ver": os_ver,
+                            "cpu_info": cpu_info, "python_ver": python_ver, "torch_ver": torch_ver,
+                            "mpi_installed": mpi_installed, "cpu_usage": cpu_usage,
+                            "available_mem": available_mem, "total_mem": total_mem,
+                            "cpu_count": cpu_count, "gpu_count": 0, "host_name": host_name}
+        }
+        if gpu_count > 0:
+            if gpu_total_mem is not None:
+                json_params["gpu"] = gpu_info if gpu_info is not None else "" + ", Total GPU Memory: " + gpu_total_mem
+            else:
+                json_params["gpu"] = gpu_info if gpu_info is not None else ""
+            json_params["extra_infos"]["gpu_info"] = gpu_info if gpu_info is not None else ""
+            if gpu_available_mem is not None:
+                json_params["extra_infos"]["gpu_available_mem"] = gpu_available_mem
+            if gpu_total_mem is not None:
+                json_params["extra_infos"]["gpu_total_mem"] = gpu_total_mem
+
+            json_params["extra_infos"]["gpu_count"] = gpu_count
+            json_params["extra_infos"]["gpu_vendor"] = gpu_vendor
+            json_params["extra_infos"]["gpu_device_name"] = gpu_device_name
+
+            gpu_available_id_list = sys_utils.get_available_gpu_id_list(limit=gpu_count)
+            gpu_available_count = len(gpu_available_id_list) if gpu_available_id_list is not None else 0
+            gpu_list = sys_utils.get_gpu_list()
+            json_params["extra_infos"]["gpu_available_count"] = gpu_available_count
+            json_params["extra_infos"]["gpu_available_id_list"] = gpu_available_id_list
+            json_params["extra_infos"]["gpu_list"] = gpu_list
+        else:
+            json_params["gpu"] = "None"
+            json_params["extra_infos"]["gpu_available_count"] = 0
+            json_params["extra_infos"]["gpu_available_id_list"] = []
+            json_params["extra_infos"]["gpu_list"] = []
+
+        _, cert_path = MLOpsConfigs.get_request_params()
+        if cert_path is not None:
+            try:
+                requests.session().verify = cert_path
+                response = requests.post(
+                    url, json=json_params, verify=True,
+                    headers={"content-type": "application/json", "Connection": "close"}
+                )
+            except requests.exceptions.SSLError as err:
+                logging.error(
+                    f"Failed to bind account and device id with error: {err}, traceback: {traceback.format_exc()}")
+                MLOpsConfigs.install_root_ca_file()
+                response = requests.post(
+                    url, json=json_params, verify=True,
+                    headers={"content-type": "application/json", "Connection": "close"}
+                )
+        else:
+            response = requests.post(url, json=json_params, headers={"Connection": "close"})
+        edge_id, user_name, extra_url, general_edge_id = -1, None, None, None
+        if response.status_code != 200:
+            print(f"Binding to MLOps with response.status_code = {response.status_code}, "
+                  f"response.content: {response.content}")
+            pass
+        else:
+            # print("url = {}, response = {}".format(url, response))
+            status_code = response.json().get("code")
+            if status_code == "SUCCESS":
+                edge_id = response.json().get("data").get("id")
+                user_name = response.json().get("data").get("userName", None)
+                extra_url = response.json().get("data").get("url", None)
+                general_edge_id = response.json().get("data").get("general_edge_id", None)
+                if edge_id is None or edge_id <= 0:
+                    print(f"Binding to MLOps with response.status_code = {response.status_code}, "
+                          f"response.content: {response.content}")
+            else:
+                if status_code == SchedulerConstants.BINDING_ACCOUNT_NOT_EXIST_ERROR:
+                    raise SystemExit(SchedulerConstants.BINDING_ACCOUNT_NOT_EXIST_ERROR)
+                print(f"Binding to MLOps with response.status_code = {response.status_code}, "
+                      f"response.content: {response.content}")
+                return -1, None, None, None
+        return edge_id, user_name, extra_url, general_edge_id
+
+    @staticmethod
+    def fetch_configs():
+        return MLOpsConfigs.fetch_all_configs()
+
+    @staticmethod
+    def _role_is_slave_agent(role):
+        return True if role == FedMLAccountManager.ROLE_EDGE_DEVICE or \
+                       role == FedMLAccountManager.ROLE_GPU_PROVIDER else False
+
+
+class AgentArgs:
+    def __init__(self, role=None, account_id=None, api_key=None, server_id=None, current_running_dir=None,
+                 os_name=None, version=None, log_file_dir=None, log_server_url=None, device_id=None,
+                 current_device_id=None, config_version=None, cloud_region=None, is_from_docker=False,
+                 edge_id=None, agent_config=None, user_name=None, extra_url=None, unique_device_id=None):
+        self.role = role
+        self.account_id = account_id
+        self.api_key = api_key
+        self.current_running_dir = current_running_dir
+        self.server_id = server_id
+        self.os_name = os_name
+        self.version = version
+        self.log_file_dir = log_file_dir
+        self.log_server_url = log_server_url
+        self.device_id = device_id
+        self.current_device_id = current_device_id
+        self.config_version = config_version
+        self.cloud_region = cloud_region
+        self.is_from_docker = is_from_docker
+        self.edge_id = edge_id
+        self.client_id = edge_id
+        self.agent_config = agent_config
+        self.user_name = user_name
+        self.extra_url = extra_url
+        self.unique_device_id = unique_device_id
+        self.client_id_list = None
+        self.using_mlops = True
+        self.server_agent_id = None
+        self.general_edge_id = None
+
+    def is_cloud_server(self):
+        return self.role == FedMLAccountManager.ROLE_CLOUD_SERVER
+
+    def is_cloud_agent(self):
+        return self.role == FedMLAccountManager.ROLE_CLOUD_AGENT
+
+    def is_edge_server(self):
+        return self.role == FedMLAccountManager.ROLE_EDGE_SERVER
+
+    def is_edge_device(self):
+        return self.role == FedMLAccountManager.ROLE_EDGE_DEVICE
+
+    def is_gpu_provider(self):
+        return self.role == FedMLAccountManager.ROLE_GPU_PROVIDER
+
+    def is_slave_agent(self):
+        return self.is_edge_device() or self.is_gpu_provider()
diff --git a/python/fedml/computing/scheduler/scheduler_core/compute_cache_manager.py b/python/fedml/computing/scheduler/scheduler_core/compute_cache_manager.py
index f918c785e2..6247cebe4f 100755
--- a/python/fedml/computing/scheduler/scheduler_core/compute_cache_manager.py
+++ b/python/fedml/computing/scheduler/scheduler_core/compute_cache_manager.py
@@ -1,10 +1,11 @@
-import threading
 
+import threading
 import redis
 from .compute_gpu_cache import ComputeGpuCache
 from .compute_logs_cache import ComputeLogsCache
 from .business_models import LogsUploadModel, MetricsModel
 from ..comm_utils.constants import SchedulerConstants
+from .compute_status_cache import ComputeStatusCache
 
 
 class ComputeCacheManager(object):
@@ -23,6 +24,7 @@ def init(self):
         self.redis_connection = None
         self.gpu_cache = ComputeGpuCache(self.redis_connection)
         self.logs_cache = ComputeLogsCache(self.redis_connection)
+        self.status_cache = ComputeStatusCache(self.redis_connection)
         self.local_lock = threading.Lock()
 
     def setup_redis_connection(self, redis_addr, redis_port, redis_password="fedml_default"):
@@ -48,6 +50,7 @@ def setup_redis_connection(self, redis_addr, redis_port, redis_password="fedml_d
             self.redis_connection.set("FEDML_TEST_KEYS", "TEST")
             self.gpu_cache.redis_connection = self.redis_connection
             self.logs_cache.redis_connection = self.redis_connection
+            self.status_cache.redis_connection = self.redis_connection
             is_connected = True
         except Exception as e:
             is_connected = False
@@ -69,6 +72,7 @@ def setup_public_redis_connection(self):
             self.redis_connection.set("FEDML_TEST_KEYS", "TEST")
             self.gpu_cache.redis_connection = self.redis_connection
             self.logs_cache.redis_connection = self.redis_connection
+            self.status_cache.redis_connection = self.redis_connection
             is_connected = True
         except Exception as e:
             pass
@@ -134,6 +138,9 @@ def get_artifact_logs(self):
     def get_artifacts(self):
         pass
 
+    def get_status_cache(self):
+        return self.status_cache
+
 
 
 
diff --git a/python/fedml/computing/scheduler/scheduler_core/compute_status_cache.py b/python/fedml/computing/scheduler/scheduler_core/compute_status_cache.py
new file mode 100755
index 0000000000..a1929abbef
--- /dev/null
+++ b/python/fedml/computing/scheduler/scheduler_core/compute_status_cache.py
@@ -0,0 +1,76 @@
+import logging
+import traceback
+from .compute_status_db import ComputeStatusDatabase
+from ..master.server_constants import ServerConstants
+
+
+class ComputeStatusCache(object):
+    FEDML_JOB_STATUS_TAG = "FEDML_JOB_STATUS_TAG-"
+    FEDML_DEVICE_STATUS_IN_JOB_TAG = "FEDML_DEVICE_STATUS_IN_JOB_TAG-"
+
+    def __init__(self, redis_connection):
+        self.redis_connection = redis_connection
+        ComputeStatusDatabase.get_instance().set_database_base_dir(ServerConstants.get_database_dir())
+        ComputeStatusDatabase.get_instance().create_table()
+
+    def save_job_status(self, run_id, status):
+        try:
+            self.redis_connection.set(self._get_job_status_key(run_id), status)
+        except Exception as e:
+            logging.error(f"Error setting job status: {e}, Traceback: {traceback.format_exc()}")
+            pass
+
+        ComputeStatusDatabase.get_instance().set_job_status(run_id, status)
+
+    def get_job_status(self, run_id):
+        status = None
+        try:
+            if self.redis_connection.exists(self._get_job_status_key(run_id)):
+                status = self.redis_connection.get(self._get_job_status_key(run_id))
+        except Exception as e:
+            logging.error(f"Error getting job status: {e}, Traceback: {traceback.format_exc()}")
+            pass
+
+        if status is None:
+            status = ComputeStatusDatabase.get_instance().get_job_status(run_id)
+            try:
+                if status is not None:
+                    self.redis_connection.set(self._get_job_status_key(run_id), status)
+            except Exception as e:
+                pass
+
+        return status
+
+    def save_device_status_in_job(self, run_id, device_id, status):
+        try:
+            self.redis_connection.set(self._get_device_status_in_job_key(run_id, device_id), status)
+        except Exception as e:
+            logging.error(f"Error setting device status in job: {e}, Traceback: {traceback.format_exc()}")
+            pass
+
+        ComputeStatusDatabase.get_instance().set_device_status_in_job(run_id, device_id, status)
+
+    def get_device_status_in_job(self, run_id, device_id):
+        status = None
+        try:
+            if self.redis_connection.exists(self._get_device_status_in_job_key(run_id, device_id)):
+                status = self.redis_connection.get(self._get_device_status_in_job_key(run_id, device_id))
+        except Exception as e:
+            logging.error(f"Error getting device status in job: {e}, Traceback: {traceback.format_exc()}")
+            pass
+
+        if status is None:
+            status = ComputeStatusDatabase.get_instance().get_device_status_in_job(run_id, device_id)
+            try:
+                if status is not None:
+                    self.redis_connection.set(self._get_device_status_in_job_key(run_id, device_id), status)
+            except Exception as e:
+                pass
+
+        return status
+
+    def _get_job_status_key(self, run_id):
+        return f"{ComputeStatusCache.FEDML_JOB_STATUS_TAG}{run_id}"
+
+    def _get_device_status_in_job_key(self, run_id, device_id):
+        return f"{ComputeStatusCache.FEDML_DEVICE_STATUS_IN_JOB_TAG}{run_id}-{device_id}"
diff --git a/python/fedml/computing/scheduler/scheduler_core/compute_status_db.py b/python/fedml/computing/scheduler/scheduler_core/compute_status_db.py
new file mode 100755
index 0000000000..14219eeb6a
--- /dev/null
+++ b/python/fedml/computing/scheduler/scheduler_core/compute_status_db.py
@@ -0,0 +1,123 @@
+import json
+import os
+import time
+
+from sqlalchemy import Column, String, TEXT, Integer, Float, create_engine, and_
+from sqlalchemy.orm import sessionmaker
+from sqlalchemy.ext.declarative import declarative_base
+from fedml.core.common.singleton import Singleton
+from .base_db import FedMLBaseDb
+from .compute_utils import ComputeUtils
+from ..master.server_constants import ServerConstants
+
+Base = declarative_base()
+
+
+class ComputeStatusDatabase(Singleton, FedMLBaseDb):
+    COMPUTE_STATUS_DB = "compute-status.db"
+
+    def __init__(self):
+        super().__init__()
+
+    @staticmethod
+    def get_instance():
+        return ComputeStatusDatabase()
+
+    def get_job_status(self, run_id):
+        self.open_job_db()
+        job = self.db_connection.query(FedMLJobStatus). \
+            filter(FedMLJobStatus.job_id == f'{run_id}').first()
+        if job is None:
+            return
+
+        return job.job_status
+
+    def get_device_status_in_job(self, device_id, run_id):
+        self.open_job_db()
+        device = self.db_connection.query(FedMLDeviceStatusInJob). \
+            filter(and_(FedMLDeviceStatusInJob.device_id == f'{device_id}',
+                        FedMLDeviceStatusInJob.job_id == f'{run_id}')).first()
+
+        return device.device_status
+
+    def set_job_status(self, run_id, job_status):
+        self.open_job_db()
+        job = self.db_connection.query(FedMLJobStatus). \
+            filter(FedMLJobStatus.job_id == f'{run_id}').first()
+        if job is None:
+            job = FedMLJobStatus(job_id=run_id, job_status=job_status)
+            self.db_connection.add(job)
+            self.db_connection.commit()
+            return
+
+        if run_id is not None:
+            job.job_id = run_id
+        if job_status is not None:
+            job.job_status = job_status
+
+        self.db_connection.commit()
+
+    def set_device_status_in_job(self, run_id, device_id, status):
+        self.open_job_db()
+        device = self.db_connection.query(FedMLDeviceStatusInJob). \
+            filter(and_(FedMLDeviceStatusInJob.device_id == f'{device_id}',
+                        FedMLDeviceStatusInJob.job_id == f'{run_id}')).first()
+        if device is None:
+            job = FedMLDeviceStatusInJob(job_id=run_id, device_id=device_id, device_status=status)
+            self.db_connection.add(job)
+            self.db_connection.commit()
+            return
+
+        if run_id is not None:
+            device.job_id = run_id
+        if device_id is not None:
+            device.device_id = device_id
+        if status is not None:
+            device.device_status = status
+
+        self.db_connection.commit()
+
+    def set_database_base_dir(self, database_base_dir):
+        self.db_base_dir = database_base_dir
+        self.init_db_path()
+
+    def init_db_path(self):
+        if self.db_base_dir is None:
+            if not os.path.exists(ServerConstants.get_database_dir()):
+                os.makedirs(ServerConstants.get_database_dir(), exist_ok=True)
+            self.db_base_dir = ServerConstants.get_database_dir()
+
+        self.db_path = os.path.join(self.db_base_dir, ComputeStatusDatabase.COMPUTE_STATUS_DB)
+
+    def create_table(self):
+        self.open_job_db()
+        try:
+            Base.metadata.create_all(self.db_engine, checkfirst=True)
+        except Exception as e:
+            pass
+
+    def drop_table(self):
+        self.open_job_db()
+        try:
+            Base.metadata.drop_all(self.db_engine, checkfirst=True)
+        except Exception as e:
+            pass
+
+
+class FedMLJobStatus(Base):
+    __tablename__ = 'job_status'
+
+    id = Column(Integer, primary_key=True)
+    job_id = Column(TEXT)
+    job_status = Column(TEXT)
+    timestamp = Column(Integer)
+
+
+class FedMLDeviceStatusInJob(Base):
+    __tablename__ = 'device_status_in_job'
+
+    id = Column(Integer, primary_key=True)
+    job_id = Column(TEXT)
+    device_id = Column(TEXT)
+    device_status = Column(TEXT)
+    timestamp = Column(Integer)
diff --git a/python/fedml/computing/scheduler/scheduler_core/endpoint_sync_protocol.py b/python/fedml/computing/scheduler/scheduler_core/endpoint_sync_protocol.py
index 545ba75650..91e0815645 100755
--- a/python/fedml/computing/scheduler/scheduler_core/endpoint_sync_protocol.py
+++ b/python/fedml/computing/scheduler/scheduler_core/endpoint_sync_protocol.py
@@ -4,7 +4,8 @@
 from ..model_scheduler.device_model_cache import FedMLModelCache
 from ..model_scheduler.device_model_db import FedMLModelDatabase
 from ..model_scheduler.device_server_data_interface import FedMLServerDataInterface
-from .endpoint_monitor_protocol import EndpointDeviceDeploymentResultModel, EndpointDeviceDeploymentStatusModel, EndpointDeviceDeploymentInfoModel
+from .endpoint_monitor_protocol import EndpointDeviceDeploymentResultModel, \
+    EndpointDeviceDeploymentStatusModel, EndpointDeviceDeploymentInfoModel
 from ..model_scheduler.device_server_constants import ServerConstants
 from urllib.parse import urlparse
 import logging
@@ -82,8 +83,8 @@ def callback_sync_device_result(self, topic, payload):
         topic_splits = str(topic).split('/')
         device_id = topic_splits[-1]
         deployment_result = EndpointDeviceDeploymentResultModel(payload)
-        FedMLModelCache.get_instance().set_redis_params(self.redis_addr, self.redis_port, self.redis_password)
-        FedMLModelCache.get_instance(self.redis_addr, self.redis_port).set_deployment_result(
+        FedMLModelCache.get_instance().set_redis_params()
+        FedMLModelCache.get_instance().set_deployment_result(
             deployment_result.endpoint_id, deployment_result.endpoint_name, deployment_result.model_name,
             deployment_result.model_version, device_id, payload)
 
@@ -97,8 +98,8 @@ def callback_sync_device_status(self, topic, payload):
         topic_splits = str(topic).split('/')
         device_id = topic_splits[-1]
         deployment_status = EndpointDeviceDeploymentStatusModel(payload)
-        FedMLModelCache.get_instance().set_redis_params(self.redis_addr, self.redis_port, self.redis_password)
-        FedMLModelCache.get_instance(self.redis_addr, self.redis_port).set_deployment_status(
+        FedMLModelCache.get_instance().set_redis_params()
+        FedMLModelCache.get_instance().set_deployment_status(
             deployment_status.endpoint_id, deployment_status.endpoint_name, deployment_status.model_name,
             deployment_status.model_version, device_id, deployment_status.model_status)
 
@@ -130,7 +131,7 @@ def callback_sync_device_info(self, topic, payload):
                 break
 
         if status_item_found is not None:
-            #print(f"status_item_found {status_item_found}, status_payload_found {status_payload_found}")
+            # print(f"status_item_found {status_item_found}, status_payload_found {status_payload_found}")
             # Delete Status
             FedMLModelCache.get_instance().delete_deployment_status(
                 status_item_found, deployment_info.endpoint_id, deployment_info.endpoint_name,
@@ -143,7 +144,8 @@ def callback_sync_device_info(self, topic, payload):
 
             # Update Status
             model_url_parsed = urlparse(status_payload_found.get("model_url", ""))
-            status_payload_found["model_url"] = f"http://{model_url_parsed.hostname}:{deployment_info.inference_port}{model_url_parsed.path}"
+            status_payload_found["model_url"] = f"http://{model_url_parsed.hostname}:{deployment_info.inference_port}" \
+                                                f"{model_url_parsed.path}"
             status_payload_found["inference_port"] = deployment_info.inference_port
             FedMLModelCache.get_instance().set_deployment_status(
                 deployment_info.endpoint_id, deployment_info.endpoint_name, deployment_info.model_name,
@@ -163,7 +165,7 @@ def callback_sync_device_info(self, topic, payload):
                 break
 
         if result_item_found is not None:
-            #print(f"result_item_found {result_item_found}, result_payload_found {result_payload_found}")
+            # print(f"result_item_found {result_item_found}, result_payload_found {result_payload_found}")
             FedMLModelCache.get_instance().delete_deployment_result(
                 result_item_found, deployment_info.endpoint_id, deployment_info.endpoint_name,
                 deployment_info.model_name)
@@ -174,7 +176,8 @@ def callback_sync_device_info(self, topic, payload):
                 result_payload_found["model_status"] = ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED
             
             model_url_parsed = urlparse(result_payload_found.get("model_url", ""))
-            result_payload_found["model_url"] = f"http://{model_url_parsed.hostname}:{deployment_info.inference_port}{model_url_parsed.path}"
+            result_payload_found["model_url"] = f"http://{model_url_parsed.hostname}:{deployment_info.inference_port}" \
+                                                f"{model_url_parsed.path}"
             result_payload_found["inference_port"] = deployment_info.inference_port
             FedMLModelCache.get_instance().set_deployment_result(
                 deployment_info.endpoint_id, deployment_info.endpoint_name, deployment_info.model_name,
@@ -183,12 +186,12 @@ def callback_sync_device_info(self, topic, payload):
     def set_local_deployment_status_result(
             self, endpoint_id, endpoint_name, model_name, model_version, device_id,
             inference_port, status_payload, result_payload):
-        '''
+        """
         The result and status are saved in the local sqlite table.
         They both belong to the table deployment_result_info;
         deployment_result column is used to save the result;
         deployment_status column is used to save the status.
-        '''
+        """
         if status_payload is not None:
             model_url_parsed = urlparse(status_payload.get("model_url", ""))
             status_payload["model_url"] = f"http://{model_url_parsed.hostname}:{inference_port}{model_url_parsed.path}"
diff --git a/python/fedml/computing/scheduler/scheduler_core/general_constants.py b/python/fedml/computing/scheduler/scheduler_core/general_constants.py
new file mode 100755
index 0000000000..e642cacf1b
--- /dev/null
+++ b/python/fedml/computing/scheduler/scheduler_core/general_constants.py
@@ -0,0 +1,193 @@
+import logging
+import os
+
+from fedml.computing.scheduler.comm_utils.constants import SchedulerConstants
+from fedml.computing.scheduler.comm_utils.run_process_utils import RunProcessUtils
+from fedml.computing.scheduler.slave.client_constants import ClientConstants
+from fedml.computing.scheduler.master.server_constants import ServerConstants
+from fedml.computing.scheduler.model_scheduler import device_client_constants
+from fedml.computing.scheduler.model_scheduler import device_server_constants
+
+
+class GeneralConstants:
+    MSG_TOPIC_REQUEST_JOB_STATUS_PREFIX = f"anywhere/master_agent/request_job_status/"
+    MSG_TOPIC_REPORT_DEVICE_STATUS_IN_JOB = f"slave_job/slave_agent/report_device_status_in_job"
+    MSG_TOPIC_SEND_TRAINING_REQUEST_TO_EDGES = "job_runner/master_protocol_manager/send_training_request_to_edges"
+
+    CLIENT_SHELL_BASH = SchedulerConstants.CLIENT_SHELL_BASH
+    CLIENT_SHELL_PS = SchedulerConstants.CLIENT_SHELL_PS
+    PLATFORM_WINDOWS = "Windows"
+
+    MSG_MLOPS_CLIENT_STATUS_OFFLINE = "OFFLINE"
+    MSG_MLOPS_CLIENT_STATUS_PROVISIONING = "PROVISIONING"
+    MSG_MLOPS_CLIENT_STATUS_IDLE = "IDLE"
+    MSG_MLOPS_CLIENT_STATUS_UPGRADING = "UPGRADING"
+    MSG_MLOPS_CLIENT_STATUS_QUEUED = "QUEUED"
+    MSG_MLOPS_CLIENT_STATUS_INITIALIZING = "INITIALIZING"
+    MSG_MLOPS_CLIENT_STATUS_TRAINING = "TRAINING"
+    MSG_MLOPS_CLIENT_STATUS_RUNNING = "RUNNING"
+    MSG_MLOPS_CLIENT_STATUS_STOPPING = "STOPPING"
+    MSG_MLOPS_CLIENT_STATUS_KILLED = "KILLED"
+    MSG_MLOPS_CLIENT_STATUS_FAILED = "FAILED"
+    MSG_MLOPS_CLIENT_STATUS_EXCEPTION = "EXCEPTION"
+    MSG_MLOPS_CLIENT_STATUS_FINISHED = "FINISHED"
+
+    MSG_MLOPS_SERVER_STATUS_OFFLINE = "OFFLINE"
+    MSG_MLOPS_SERVER_STATUS_PROVISIONING = "PROVISIONING"
+    MSG_MLOPS_SERVER_STATUS_IDLE = "IDLE"
+    MSG_MLOPS_SERVER_STATUS_UPGRADING = "UPGRADING"
+    MSG_MLOPS_SERVER_STATUS_STARTING = "STARTING"
+    MSG_MLOPS_SERVER_STATUS_RUNNING = "RUNNING"
+    MSG_MLOPS_SERVER_STATUS_STOPPING = "STOPPING"
+    MSG_MLOPS_SERVER_STATUS_KILLED = "KILLED"
+    MSG_MLOPS_SERVER_STATUS_FAILED = "FAILED"
+    MSG_MLOPS_SERVER_STATUS_FINISHED = "FINISHED"
+    MSG_MLOPS_SERVER_STATUS_EXCEPTION = "EXCEPTION"
+
+    MASTER_LOGIN_PROGRAM = "server_login.py"
+    SLAVE_LOGIN_PROGRAM = "client_login.py"
+
+    CONFIG_KEY_AUTO_DETECT_PUBLIC_IP = "auto_detect_public_ip"
+    FEDML_OTA_CMD_UPGRADE = "upgrade"
+    FEDML_OTA_CMD_RESTART = "restart"
+
+    @staticmethod
+    def get_package_unzip_dir(package_download_dir):
+        package_unzip_dir = package_download_dir
+        if not os.path.exists(package_unzip_dir):
+            os.makedirs(package_unzip_dir, exist_ok=True)
+        return package_unzip_dir
+
+    @staticmethod
+    def get_filename_and_extension(url):
+        return ClientConstants.get_filename_and_extension(url)
+
+    @staticmethod
+    def generate_yaml_doc(run_config_object, yaml_file):
+        ClientConstants.generate_yaml_doc(run_config_object, yaml_file)
+
+    @staticmethod
+    def execute_commands_with_live_logs(cmds, join='&&', should_write_log_file=True,
+                                        callback=None, error_processor=None):
+        return ClientConstants.execute_commands_with_live_logs(
+            cmds, join=join, should_write_log_file=should_write_log_file,
+            callback=callback, error_processor=error_processor
+        )
+
+    @staticmethod
+    def cleanup_run_process(run_id, is_master=False):
+        if is_master:
+            ServerConstants.cleanup_run_process(run_id)
+        else:
+            ClientConstants.cleanup_run_process(run_id)
+
+    @staticmethod
+    def cleanup_learning_process(run_id, data_dir=None):
+        RunProcessUtils.cleanup_run_process(
+            run_id, data_dir, ClientConstants.LOCAL_RUNNER_INFO_DIR_NAME,
+            info_file_prefix=SchedulerConstants.RUN_PROCESS_TYPE_USER_PROCESS)
+
+    @staticmethod
+    def cleanup_bootstrap_process(run_id, data_dir=None):
+        RunProcessUtils.cleanup_run_process(
+            run_id, data_dir, ClientConstants.LOCAL_RUNNER_INFO_DIR_NAME,
+            info_file_prefix=SchedulerConstants.RUN_PROCESS_TYPE_BOOTSTRAP_PROCESS)
+
+    @staticmethod
+    def save_learning_process(run_id, learning_id, data_dir=None):
+        RunProcessUtils.save_run_process(
+            run_id, learning_id, data_dir, ClientConstants.LOCAL_RUNNER_INFO_DIR_NAME,
+            info_file_prefix=SchedulerConstants.RUN_PROCESS_TYPE_USER_PROCESS)
+
+    @staticmethod
+    def save_bootstrap_process(run_id, process_id, data_dir=None):
+        RunProcessUtils.save_run_process(
+            run_id, process_id, data_dir, ClientConstants.LOCAL_RUNNER_INFO_DIR_NAME,
+            info_file_prefix=SchedulerConstants.RUN_PROCESS_TYPE_BOOTSTRAP_PROCESS)
+
+    @staticmethod
+    def save_run_process(run_id, process_id, is_master=False):
+        RunProcessUtils.save_run_process(
+            run_id, process_id, ServerConstants.get_data_dir() if is_master else ClientConstants.get_data_dir(),
+            ClientConstants.LOCAL_RUNNER_INFO_DIR_NAME)
+
+    @staticmethod
+    def get_learning_process_list(run_id, is_master=False):
+        return RunProcessUtils.get_run_process_list(
+            run_id, ServerConstants.get_data_dir() if is_master else ClientConstants.get_data_dir(),
+            ClientConstants.LOCAL_RUNNER_INFO_DIR_NAME,
+            info_file_prefix=SchedulerConstants.RUN_PROCESS_TYPE_USER_PROCESS)
+
+    @staticmethod
+    def get_launch_fedml_home_dir(is_master=False):
+        return ServerConstants.get_fedml_home_dir() if is_master else ClientConstants.get_fedml_home_dir()
+
+    @staticmethod
+    def get_deploy_fedml_home_dir(is_master=False):
+        return device_server_constants.ServerConstants.get_fedml_home_dir() if is_master \
+            else device_client_constants.ClientConstants.get_fedml_home_dir()
+
+    @staticmethod
+    def get_launch_log_file_dir(is_master=False):
+        return ServerConstants.get_log_file_dir() if is_master else ClientConstants.get_log_file_dir()
+
+    @staticmethod
+    def get_deploy_log_file_dir(is_master=False):
+        return device_server_constants.ServerConstants.get_log_file_dir() if is_master \
+            else device_client_constants.ClientConstants.get_log_file_dir()
+
+    @staticmethod
+    def get_launch_data_dir(is_master=False):
+        return ServerConstants.get_data_dir() if is_master else ClientConstants.get_data_dir()
+
+    @staticmethod
+    def get_deploy_data_dir(is_master=False):
+        return device_server_constants.ServerConstants.get_data_dir() if is_master \
+            else device_client_constants.ClientConstants.get_data_dir()
+
+    @staticmethod
+    def get_deploy_docker_location_file(is_master=False):
+        return device_server_constants.ServerConstants.get_docker_location_file() if is_master \
+            else device_client_constants.ClientConstants.get_docker_location_file()
+
+    @staticmethod
+    def get_launch_docker_location_file(is_master=False):
+        return ServerConstants.get_docker_location_file() if is_master \
+            else ClientConstants.get_docker_location_file()
+
+    @staticmethod
+    def get_local_ip():
+        import socket
+        s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+        conn = s.connect(('8.8.8.8', 53))
+        ip = s.getsockname()[0]
+        s.close()
+        return ip
+
+    @staticmethod
+    def get_public_ip():
+        import requests
+        ip = None
+        try:
+            ip = requests.get('https://checkip.amazonaws.com').text.strip()
+        except Exception as e:
+            logging.info("Failed to get public ip: {}".format(e))
+        return ip
+
+    @staticmethod
+    def get_ip_address(request_json, infer_host=None):
+        # OPTION 1: Use local ip
+        ip = GeneralConstants.get_local_ip()
+
+        # OPTION 2: Auto detect public ip
+        if "parameters" in request_json and \
+                GeneralConstants.CONFIG_KEY_AUTO_DETECT_PUBLIC_IP in request_json["parameters"] and \
+                request_json["parameters"][GeneralConstants.CONFIG_KEY_AUTO_DETECT_PUBLIC_IP]:
+            ip = GeneralConstants.get_public_ip()
+            logging.info("Auto detect public ip for master: " + ip)
+
+        # OPTION 3: Use user indicated ip
+        if infer_host is not None and infer_host != "127.0.0.1" and infer_host != "localhost":
+            ip = infer_host
+
+        return ip
diff --git a/python/fedml/computing/scheduler/scheduler_core/master_api_daemon.py b/python/fedml/computing/scheduler/scheduler_core/master_api_daemon.py
index 5cebf757d6..5876a787ce 100755
--- a/python/fedml/computing/scheduler/scheduler_core/master_api_daemon.py
+++ b/python/fedml/computing/scheduler/scheduler_core/master_api_daemon.py
@@ -1,7 +1,8 @@
 from fastapi import FastAPI, Request
-from .log_manager import LogsManager
-from .metrics_manager import MetricsManager
-from ..comm_utils import  sys_utils
+from fedml.computing.scheduler.scheduler_core.log_manager import LogsManager
+from fedml.computing.scheduler.scheduler_core.metrics_manager import MetricsManager
+from fedml.computing.scheduler.comm_utils import sys_utils
+from fedml.computing.scheduler.scheduler_core.compute_cache_manager import ComputeCacheManager
 import os
 
 
@@ -52,6 +53,19 @@ async def update_log(request: Request):
         async def ready():
             return {"status": "Success"}
 
+        @api.get("/get_job_status")
+        async def get_job_status(job_id):
+            ComputeCacheManager.get_instance().set_redis_params()
+            job_status = ComputeCacheManager.get_instance().get_status_cache().get_job_status(job_id)
+            return {"job_status": job_status}
+
+        @api.get("/get_device_status_in_job")
+        async def get_device_status_in_job(job_id, device_id):
+            ComputeCacheManager.get_instance().set_redis_params()
+            device_status_in_job = ComputeCacheManager.get_instance().get_status_cache().get_device_status_in_job(
+                job_id, device_id)
+            return {"device_status_in_job": device_status_in_job}
+
         import uvicorn
         port = 30800
         if sys_utils.check_port("localhost", port):
@@ -59,7 +73,6 @@ async def ready():
 
         cur_dir = os.path.dirname(__file__)
         fedml_base_dir = os.path.dirname(os.path.dirname(os.path.dirname(cur_dir)))
-        uvicorn.run(api, host="0.0.0.0", port=port, reload=True, reload_delay=3, reload_dirs=fedml_base_dir)
-
+        uvicorn.run(api, host="0.0.0.0", port=port)
 
 
diff --git a/python/fedml/computing/scheduler/scheduler_core/message_center.py b/python/fedml/computing/scheduler/scheduler_core/message_center.py
index c7af555a99..7ae1e4c0b5 100755
--- a/python/fedml/computing/scheduler/scheduler_core/message_center.py
+++ b/python/fedml/computing/scheduler/scheduler_core/message_center.py
@@ -1,3 +1,4 @@
+import json
 import logging
 import os
 import threading
@@ -7,20 +8,28 @@
 import multiprocessing
 from multiprocessing import Process, Queue
 import queue
+from os.path import expanduser
 
 from fedml.core.distributed.communication.mqtt.mqtt_manager import MqttManager
 from ....core.mlops.mlops_metrics import MLOpsMetrics
 from operator import methodcaller
+from .message_common import FedMLMessageEntity, FedMLMessageRecord
 
 
-class FedMLMessageCenter:
+class FedMLMessageCenter(object):
     FUNC_SETUP_MESSAGE_CENTER = "setup_message_center"
     FUNC_REBUILD_MESSAGE_CENTER = "rebuild_message_center"
-
-    def __init__(self, agent_config=None, message_queue=None, listener_message_queue=None):
+    ENABLE_SAVE_MESSAGE_TO_FILE = True
+    PUBLISH_MESSAGE_RETRY_TIMEOUT = 60 * 1000.0
+    PUBLISH_MESSAGE_RETRY_COUNT = 3
+    MESSAGE_SENT_RECORDS_FILE = "message-sent-records.log"
+    MESSAGE_SENT_SUCCESS_RECORDS_FILE = "message-sent-success-records.log"
+    MESSAGE_RECEIVED_RECORDS_FILE = "message-received-records.log"
+
+    def __init__(self, agent_config=None, sender_message_queue=None, listener_message_queue=None):
         self.sender_agent_config = agent_config
         self.listener_agent_config = agent_config
-        self.message_queue = message_queue
+        self.sender_message_queue = sender_message_queue
         self.message_event = None
         self.message_center_process = None
         self.sender_mqtt_mgr = None
@@ -32,9 +41,15 @@ def __init__(self, agent_config=None, message_queue=None, listener_message_queue
         self.listener_payloads = dict()
         self.listener_handler_funcs = dict()
         self.listener_handler_object = None
-        self.listener_message_queue = None
+        self.listener_message_queue = listener_message_queue
         self.listener_message_event = None
         self.listener_message_center_process = None
+        self.sender_message_list = list()
+        self.receiver_message_list = list()
+        self.published_message_ids = list()
+        self.retry_sending_count_map = dict()
+        self.constants = FedMLMessageCenterConstants()
+        self.message_center_name = None
 
     def __repr__(self):
         return "<{klass} @{id:x} {attrs}>".format(
@@ -64,6 +79,10 @@ def on_sender_mqtt_connected(self, mqtt_client_object):
         self.sender_mqtt_is_connected = True
         self.sender_mqtt_lock.release()
 
+    def on_sender_mqtt_published(self, mqtt_client_object, obj, mid):
+        self.published_message_ids.append({"message_id": mid, "timestamp": time.time_ns()/100.0/1000.0})
+        self.save_published_message_record(mid)
+
     def setup_sender_mqtt_mgr(self):
         if self.sender_mqtt_mgr is not None:
             return
@@ -82,6 +101,7 @@ def setup_sender_mqtt_mgr(self):
 
         self.sender_mqtt_mgr.add_connected_listener(self.on_sender_mqtt_connected)
         self.sender_mqtt_mgr.add_disconnected_listener(self.on_sender_mqtt_disconnected)
+        self.sender_mqtt_mgr.add_published_listener(self.on_sender_mqtt_published)
         self.sender_mqtt_mgr.connect()
         self.sender_mqtt_mgr.loop_start()
 
@@ -90,6 +110,7 @@ def setup_sender_mqtt_mgr(self):
         self.sender_mlops_metrics.set_messenger(self)
 
     def release_sender_mqtt_mgr(self):
+        # noinspection PyBroadException
         try:
             if self.sender_mqtt_mgr is not None:
                 self.sender_mqtt_mgr.loop_stop()
@@ -105,17 +126,19 @@ def release_sender_mqtt_mgr(self):
                 f"Failed to release sender mqtt manager with Exception {e}. Traceback: {traceback.format_exc()}")
             pass
 
-    def get_message_queue(self):
-        return self.message_queue
+    def get_sender_message_queue(self):
+        return self.sender_message_queue
 
-    def start_sender(self):
-        self.message_queue = Queue()
+    def start_sender(self, message_center_name=None):
+        self.sender_message_queue = Queue()
         self.message_event = multiprocessing.Event()
         self.message_event.clear()
-        message_center = FedMLMessageCenter(agent_config=self.sender_agent_config, message_queue=self.message_queue)
+        message_center = FedMLMessageCenter(agent_config=self.sender_agent_config,
+                                            sender_message_queue=self.sender_message_queue)
         self.message_center_process = Process(
             target=message_center.run_sender, args=(
-                self.message_event, self.message_queue,
+                self.message_event, self.sender_message_queue,
+                message_center_name
             )
         )
         self.message_center_process.start()
@@ -130,43 +153,96 @@ def stop(self):
     def check_message_stop_event(self):
         if self.message_event is not None and self.message_event.is_set():
             logging.info("Received message center stopping event.")
-            raise Exception("Message center stopped (for sender)")
+            raise MessageCenterStoppedException("Message center stopped (for sender)")
 
     def send_message(self, topic, payload, run_id=None):
         message_entity = FedMLMessageEntity(topic=topic, payload=payload, run_id=run_id)
-        self.message_queue.put(message_entity.get_message_body())
+        self.sender_message_queue.put(message_entity.get_message_body())
 
     def send_message_json(self, topic, payload):
         self.send_message(topic, payload)
 
-    def run_sender(self, message_event, message_queue):
+    def retry_sending_undelivered_message(self):
+        for sender_message in self.sender_message_list:
+            # Check if the message is published.
+            message_record = FedMLMessageRecord(json_record=sender_message)
+            is_published = False
+            for published_message in self.published_message_ids:
+                published_message_record = FedMLMessageRecord(json_record=published_message)
+                if published_message_record.message_id == message_record.message_id:
+                    is_published = True
+                    break
+            if is_published:
+                continue
+
+            # Retry to send the unpublished message based on the timeout value
+            timeout_ms = time.time() * 1000.0 - message_record.timestamp
+            if timeout_ms >= FedMLMessageCenter.PUBLISH_MESSAGE_RETRY_TIMEOUT and \
+                self.retry_sending_count_map.get(message_record.message_id, 0) < \
+                    FedMLMessageCenter.PUBLISH_MESSAGE_RETRY_COUNT:
+                # Send the message
+                message_entity = FedMLMessageEntity(message_body=message_record.message_body)
+                message_id = self.sender_mqtt_mgr.send_message_json(message_entity.topic, message_entity.payload)
+                self.retry_sending_count_map[message_record.message_id] += 1
+
+                # Generate the new message record.
+                sent_message_record = FedMLMessageRecord(message_id=message_id,
+                                                         message_body=message_record.message_body)
+
+                # Save the message
+                self.save_message_record(message_entity.run_id, message_entity.device_id, sent_message_record)
+
+    def run_sender(self, message_event, message_queue, message_center_name):
         self.message_event = message_event
-        self.message_queue = message_queue
+        self.sender_message_queue = message_queue
+        self.message_center_name = message_center_name
         self.setup_sender_mqtt_mgr()
-        time.sleep(5)
 
         while True:
+            message_entity = None
             try:
                 self.check_message_stop_event()
             except MessageCenterStoppedException as e:
                 break
 
+            # noinspection PyBroadException
             try:
+                # Setup the mqtt connection
                 self.setup_sender_mqtt_mgr()
 
+                # Get the message from the queue
                 try:
-                    message_body = self.message_queue.get(block=False, timeout=0.1)
+                    message_body = message_queue.get(block=False, timeout=0.1)
                 except queue.Empty as e:  # If queue is empty, then break loop
                     message_body = None
                 if message_body is None:
                     time.sleep(0.1)
+                    self.retry_sending_undelivered_message()
                     continue
 
+                # Generate the message entity object
                 message_entity = FedMLMessageEntity(message_body=message_body)
-                self.sender_mqtt_mgr.send_message_json(message_entity.topic, message_entity.payload)
+
+                # Send the message to mqtt server
+                message_id = self.sender_mqtt_mgr.send_message_json(message_entity.topic, message_entity.payload)
+
+                # Generate the message record.
+                message_record = FedMLMessageRecord(message_id=message_id, message_body=message_body)
+
+                # Cache the message
+                self.cache_message_record(message_record, is_sender=True)
+
+                # Save the message
+                self.save_message_record(message_entity.run_id, message_entity.device_id, message_record)
+
             except Exception as e:
-                logging.info(
-                    f"Failed to send the message with topic {message_entity.topic}, payload {message_entity.payload}, {traceback.format_exc()}")
+                if message_entity is not None:
+                    logging.info(
+                        f"Failed to send the message with topic {message_entity.topic}, "
+                        f"payload {message_entity.payload}, {traceback.format_exc()}"
+                    )
+                else:
+                    logging.info(f"Failed to send the message: {traceback.format_exc()}")
 
         self.release_sender_mqtt_mgr()
 
@@ -194,7 +270,9 @@ def release_listener_mqtt_mgr(self):
                 self.listener_mqtt_mgr = None
         except Exception as e:
             logging.error(
-                f"Failed to release listener mqtt manager with Exception {e}. Traceback: {traceback.format_exc()}")
+                f"Failed to release listener mqtt manager with Exception {e}. "
+                f"Traceback: {traceback.format_exc()}"
+            )
             pass
 
     def add_message_listener(self, topic, listener_func):
@@ -207,10 +285,13 @@ def remove_message_listener(self, topic):
             self.listener_topics.remove(topic)
             self.listener_handler_funcs.pop(topic)
 
-    def get_runner(self):
+    def get_message_runner(self):
         return None
 
-    def start_listener(self, sender_message_queue=None, agent_config=None):
+    def get_listener_message_queue(self):
+        return self.listener_message_queue
+
+    def start_listener(self, sender_message_queue=None, agent_config=None, message_center_name=None):
         if self.listener_message_center_process is not None:
             return
 
@@ -218,12 +299,13 @@ def start_listener(self, sender_message_queue=None, agent_config=None):
         self.listener_message_event = multiprocessing.Event()
         self.listener_message_event.clear()
         self.listener_agent_config = agent_config
-        message_runner = self.get_runner()
+        message_runner = self.get_message_runner()
         message_runner.listener_agent_config = agent_config
         self.listener_message_center_process = Process(
             target=message_runner.run_listener_dispatcher, args=(
                 self.listener_message_event, self.listener_message_queue,
-                self.listener_handler_funcs, sender_message_queue
+                self.listener_handler_funcs, sender_message_queue,
+                message_center_name
             )
         )
         self.listener_message_center_process.start()
@@ -231,11 +313,17 @@ def start_listener(self, sender_message_queue=None, agent_config=None):
     def check_listener_message_stop_event(self):
         if self.listener_message_event is not None and self.listener_message_event.is_set():
             logging.info("Received listener message center stopping event.")
-            raise Exception("Message center stopped (for listener)")
+            raise MessageCenterStoppedException("Message center stopped (for listener)")
 
     def listener_message_dispatch_center(self, topic, payload):
         self.receive_message_json(topic, payload)
 
+    def listener_message_passthrough_dispatch_center(self, message):
+        payload_obj = json.loads(message.payload)
+        payload_obj["is_retain"] = message.retain
+        payload = json.dumps(payload_obj)
+        self.receive_message_json(message.topic, payload)
+
     def receive_message(self, topic, payload, run_id=None):
         message_entity = FedMLMessageEntity(topic=topic, payload=payload, run_id=run_id)
         self.listener_message_queue.put(message_entity.get_message_body())
@@ -252,10 +340,13 @@ def unsubscribe_msg(self, topic):
         self.listener_mqtt_mgr.unsubscribe_msg(topic)
 
     def run_listener_dispatcher(
-            self, message_event, message_queue, listener_funcs, sender_message_queue):
+            self, message_event, message_queue, listener_funcs, sender_message_queue,
+            message_center_name
+    ):
         self.listener_message_event = message_event
         self.listener_message_queue = message_queue
         self.listener_handler_funcs = listener_funcs
+        self.message_center_name = message_center_name
 
         self.setup_listener_mqtt_mgr()
 
@@ -265,51 +356,109 @@ def run_listener_dispatcher(
             methodcaller(FedMLMessageCenter.FUNC_REBUILD_MESSAGE_CENTER, sender_message_queue)(self)
 
         while True:
+            message_entity = None
             try:
                 self.check_listener_message_stop_event()
             except MessageCenterStoppedException as e:
                 break
 
+            # noinspection PyBroadException
             try:
+                # Setup the mqtt connection
                 self.setup_listener_mqtt_mgr()
 
+                # Get the message from the queue
                 try:
-                    message_body = self.listener_message_queue.get(block=False, timeout=0.1)
+                    message_body = message_queue.get(block=False, timeout=0.1)
                 except queue.Empty as e:  # If queue is empty, then break loop
                     message_body = None
                 if message_body is None:
                     time.sleep(0.1)
                     continue
 
+                # Generate the message entity object
                 message_entity = FedMLMessageEntity(message_body=message_body)
 
+                # Generate the message record
+                message_record = FedMLMessageRecord(message_id=str(uuid.uuid4()), message_body=message_body)
+
+                # Cache the message
+                self.cache_message_record(message_record, is_sender=False)
+
+                # Save the message
+                self.save_message_record(message_entity.run_id, message_entity.device_id,
+                                         message_record, is_sender=False)
+
+                # Dispatch the message to corresponding handler
                 message_handler_func_name = self.listener_handler_funcs.get(message_entity.topic, None)
                 if message_handler_func_name is not None:
                     methodcaller(message_handler_func_name, message_entity.topic, message_entity.payload)(self)
             except Exception as e:
-                logging.info(
-                    f"Failed to dispatch messages with topic {message_entity.topic}, payload {message_entity.payload}, {traceback.format_exc()}")
-
+                if message_entity is not None:
+                    logging.info(
+                        f"Failed to dispatch messages with topic {message_entity.topic}, "
+                        f"payload {message_entity.payload}, {traceback.format_exc()}")
+                else:
+                    logging.info(f"Failed to dispatch messages:  {traceback.format_exc()}")
         self.release_listener_mqtt_mgr()
 
-class FedMLMessageEntity(object):
-    def __init__(self, topic=None, payload=None, run_id=None, message_body: dict = None):
-        self.topic = topic
-        self.payload = payload
-        self.run_id = run_id
-        if message_body is not None:
-            self.from_message_body(message_body=message_body)
+    def cache_message_record(self, message_record, is_sender=True):
+        # Save the message to the cached list.
+        if is_sender:
+            self.sender_message_list.append(message_record.get_json_record())
+        else:
+            self.receiver_message_list.append(message_record.get_json_record())
+
+    def save_message_record(self, run_id, device_id, message_record, is_sender=True):
+        # Check if we enable to log messages to file
+        if not FedMLMessageCenter.ENABLE_SAVE_MESSAGE_TO_FILE:
+            return
+
+        # Log messages to file
+        if is_sender:
+            print(f"save sent message record: {message_record.get_json_record()}")
+        else:
+            print(f"save received message record: {message_record.get_json_record()}")
+        saved_message_file = os.path.join(
+            self.constants.message_log_dir,
+            self.message_center_name,
+            FedMLMessageCenter.MESSAGE_SENT_RECORDS_FILE if is_sender else
+            FedMLMessageCenter.MESSAGE_RECEIVED_RECORDS_FILE
+        )
+        os.makedirs(os.path.dirname(saved_message_file), exist_ok=True)
+        with open(saved_message_file, "a+") as f:
+            f.writelines([json.dumps(message_record.get_json_record()) + "\n"])
+
+    def save_published_message_record(self, message_id):
+        # Check if we enable to log messages to file
+        if not FedMLMessageCenter.ENABLE_SAVE_MESSAGE_TO_FILE:
+            return
 
-    def from_message_body(self, message_body: dict = None):
-        self.topic = message_body.get("topic", None)
-        self.payload = message_body.get("payload", None)
-        self.run_id = message_body.get("run_id", None)
+        # Log published message ids to file
+        message_record = {"message_id": message_id, "timestamp": time.time_ns()/1000.0/1000.0}
+        published_msg_record_file = os.path.join(
+            self.constants.message_log_dir, self.message_center_name,
+            FedMLMessageCenter.MESSAGE_SENT_SUCCESS_RECORDS_FILE)
+        os.makedirs(os.path.dirname(published_msg_record_file), exist_ok=True)
+        print(f"save sent success message record: {message_record}")
+        with open(published_msg_record_file, "a+") as f:
+            f.writelines([json.dumps(message_record) + "\n"])
 
-    def get_message_body(self):
-        message_body = {"topic": self.topic, "payload": self.payload, "run_id": self.run_id}
-        return message_body
+    @staticmethod
+    def rebuild_message_center_from_queue(sender_message_queue, listener_message_queue=None):
+        message_center = FedMLMessageCenter(sender_message_queue=sender_message_queue,
+                                            listener_message_queue=listener_message_queue)
+        return message_center
 
 
 class MessageCenterStoppedException(Exception):
     """ Message center stopped. """
     pass
+
+
+class FedMLMessageCenterConstants:
+    def __init__(self):
+        self.home_dir = expanduser("~")
+        self.message_center_dir = os.path.join(self.home_dir, ".fedml", "global_services", "message_center")
+        self.message_log_dir = os.path.join(self.message_center_dir, "logs")
+        os.makedirs(self.message_log_dir, exist_ok=True)
diff --git a/python/fedml/computing/scheduler/scheduler_core/message_common.py b/python/fedml/computing/scheduler/scheduler_core/message_common.py
new file mode 100755
index 0000000000..24449af3b5
--- /dev/null
+++ b/python/fedml/computing/scheduler/scheduler_core/message_common.py
@@ -0,0 +1,77 @@
+import json
+import time
+
+
+class FedMLMessageEntity(object):
+    def __init__(self, topic=None, payload=None, run_id=None, device_id=None, message_body: dict = None):
+        self.topic = topic
+        self.payload = payload
+        self.run_id = run_id
+        self.device_id = device_id
+        if message_body is not None:
+            self.from_message_body(message_body=message_body)
+
+    def from_message_body(self, message_body: dict = None):
+        self.topic = message_body.get("topic", None)
+        self.payload = message_body.get("payload", None)
+        if self.payload is not None:
+            payload_json = json.loads(self.payload)
+            self.run_id = payload_json.get("run_id", None)
+            self.run_id = payload_json.get("runId", None) if self.run_id is None else self.run_id
+            self.device_id = payload_json.get("edge_id", None)
+            self.device_id = payload_json.get("ID", None) if self.device_id is None else self.device_id
+
+    def get_message_body(self):
+        message_body = {"topic": self.topic, "payload": self.payload, "run_id": self.run_id}
+        return message_body
+
+
+class FedMLMessageRecord(object):
+    def __init__(self, message_id=None, message_body=None, json_record=None):
+        self.message_id = message_id
+        self.message_body = message_body
+        self.timestamp = time.time_ns() / 1000.0 / 1000.0
+        if json_record is not None:
+            self.from_message_record(json_record=json_record)
+
+    def get_json_record(self):
+        return {"message_id": self.message_id, "message_body": self.message_body, "timestamp": self.timestamp}
+
+    def from_message_record(self, json_record: dict = None):
+        self.message_id = json_record.get("message_id", None)
+        self.message_body = json_record.get("message_body", None)
+        self.timestamp = json_record.get("timestamp", None)
+
+
+class FedMLStatusEntity(object):
+    def __init__(self, topic=None, payload=None, status_msg_body: dict = None):
+        self.topic = topic
+        self.payload = payload
+        self.run_id = None
+        self.edge_id = None
+        self.status = None
+        if status_msg_body is not None:
+            self.from_message_body(status_msg_body=status_msg_body)
+
+    def from_message_body(self, status_msg_body: dict = None):
+        self.topic = status_msg_body.get("topic", None)
+        self.payload = status_msg_body.get("payload", None)
+        if self.payload is not None:
+            payload_json = json.loads(self.payload)
+            self.run_id = payload_json.get("run_id", None)
+            self.run_id = payload_json.get("runId", None) if self.run_id is None else self.run_id
+            self.edge_id = payload_json.get("edge_id", None)
+            self.status = payload_json.get("status", None)
+
+    def get_message_body(self):
+        status_msg_body = {"topic": self.topic, "payload": self.payload, "run_id": self.run_id}
+        return status_msg_body
+
+
+class LogArgs:
+    def __init__(self, role=None, edge_id=None, server_id=None, log_server_url=None, log_file_dir=None):
+        self.role = role
+        self.edge_id = edge_id
+        self.server_id = server_id
+        self.log_server_url = log_server_url
+        self.log_file_dir = log_file_dir
diff --git a/python/fedml/computing/scheduler/scheduler_core/ota_upgrade.py b/python/fedml/computing/scheduler/scheduler_core/ota_upgrade.py
new file mode 100755
index 0000000000..e32f1df806
--- /dev/null
+++ b/python/fedml/computing/scheduler/scheduler_core/ota_upgrade.py
@@ -0,0 +1,99 @@
+import logging
+import os
+import time
+import traceback
+import fedml
+from fedml.computing.scheduler.comm_utils import sys_utils
+from .general_constants import GeneralConstants
+
+
+class FedMLOtaUpgrade:
+    LOCAL_RUNNER_INFO_DIR_NAME = 'runner_infos'
+    STATUS_IDLE = "IDLE"
+
+    def __init__(self, edge_id=None):
+        self.edge_id = edge_id
+        self.version = fedml.get_env_version()
+
+    def ota_upgrade(self, payload, request_json, status_reporter=None,
+                    is_master=False, is_deploy=False):
+        run_id = request_json["runId"]
+        force_ota = False
+        ota_version = None
+
+        try:
+            run_config = request_json.get("run_config", None)
+            parameters = run_config.get("parameters", None)
+            common_args = parameters.get("common_args", None)
+            force_ota = common_args.get("force_ota", False) if common_args is not None else False
+            ota_version = common_args.get("ota_version", None) if common_args is not None else None
+        except Exception as e:
+            logging.error(
+                f"Failed to get ota upgrade parameters with Exception {e}. Traceback: {traceback.format_exc()}")
+            pass
+
+        if force_ota and ota_version is not None:
+            should_upgrade = True if ota_version != fedml.__version__ else False
+            upgrade_version = ota_version
+        else:
+            try:
+                fedml_is_latest_version, local_ver, remote_ver = sys_utils.check_fedml_is_latest_version(self.version)
+            except Exception as e:
+                logging.error(f"Failed to check fedml version with Exception {e}. Traceback: {traceback.format_exc()}")
+                return
+
+            should_upgrade = False if fedml_is_latest_version else True
+            upgrade_version = remote_ver
+
+        if should_upgrade:
+            FedMLOtaUpgrade._save_upgrading_job(
+                run_id, self.edge_id, payload, is_master=is_master, is_deploy=is_deploy
+            )
+            if status_reporter is not None:
+                if is_master:
+                    status_reporter.report_server_id_status(
+                        run_id, GeneralConstants.MSG_MLOPS_SERVER_STATUS_UPGRADING, edge_id=self.edge_id,
+                        server_id=self.edge_id, server_agent_id=self.edge_id)
+                else:
+                    status_reporter.report_client_id_status(
+                        self.edge_id, GeneralConstants.MSG_MLOPS_CLIENT_STATUS_UPGRADING, run_id=run_id)
+
+            logging.info(f"Upgrade to version {upgrade_version} ...")
+
+            sys_utils.do_upgrade(self.version, upgrade_version)
+            raise Exception("Restarting after upgraded...")
+
+    @staticmethod
+    def process_ota_upgrade_msg():
+        os.system("pip install -U fedml")
+
+    @staticmethod
+    def _save_upgrading_job(run_id, edge_id, payload, is_master=False, is_deploy=False):
+        if is_master and is_deploy:
+            from ..model_scheduler.device_server_data_interface import FedMLServerDataInterface
+            FedMLServerDataInterface.get_instance(). \
+                save_started_job(run_id, edge_id, time.time(),
+                                 GeneralConstants.MSG_MLOPS_CLIENT_STATUS_UPGRADING,
+                                 GeneralConstants.MSG_MLOPS_CLIENT_STATUS_UPGRADING,
+                                 payload)
+        elif is_master and not is_deploy:
+            from ..master.server_data_interface import FedMLServerDataInterface
+            FedMLServerDataInterface.get_instance(). \
+                save_started_job(run_id, edge_id, time.time(),
+                                 GeneralConstants.MSG_MLOPS_CLIENT_STATUS_UPGRADING,
+                                 GeneralConstants.MSG_MLOPS_CLIENT_STATUS_UPGRADING,
+                                 payload)
+        elif not is_master and is_deploy:
+            from ..model_scheduler.device_client_data_interface import FedMLClientDataInterface
+            FedMLClientDataInterface.get_instance(). \
+                save_started_job(run_id, edge_id, time.time(),
+                                 GeneralConstants.MSG_MLOPS_CLIENT_STATUS_UPGRADING,
+                                 GeneralConstants.MSG_MLOPS_CLIENT_STATUS_UPGRADING,
+                                 payload)
+        elif not is_master and not is_deploy:
+            from ..slave.client_data_interface import FedMLClientDataInterface
+            FedMLClientDataInterface.get_instance(). \
+                save_started_job(run_id, edge_id, time.time(),
+                                 GeneralConstants.MSG_MLOPS_CLIENT_STATUS_UPGRADING,
+                                 GeneralConstants.MSG_MLOPS_CLIENT_STATUS_UPGRADING,
+                                 payload)
diff --git a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py
new file mode 100755
index 0000000000..e2e090596d
--- /dev/null
+++ b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py
@@ -0,0 +1,545 @@
+import json
+import logging
+import os
+import platform
+import shutil
+import traceback
+import urllib
+import zipfile
+from urllib.parse import urljoin, urlparse
+from ..comm_utils.constants import SchedulerConstants
+from ..comm_utils.job_utils import JobRunnerUtils, DockerArgs
+from ..scheduler_entry.constants import Constants
+from ....core.mlops import MLOpsMetrics
+from ....core.mlops.mlops_device_perfs import MLOpsDevicePerfStats
+from ..comm_utils.yaml_utils import load_yaml_config
+from .general_constants import GeneralConstants
+from ..comm_utils.sys_utils import get_python_program
+from ..comm_utils import sys_utils
+from ....core.mlops.mlops_utils import MLOpsUtils
+from ..scheduler_core.message_center import FedMLMessageCenter
+from ..scheduler_core.status_center import FedMLStatusCenter
+from abc import ABC, abstractmethod
+
+
+class RunnerError(Exception):
+    """ Runner stopped. """
+    pass
+
+
+class RunnerCompletedError(Exception):
+    """ Runner completed. """
+    pass
+
+
+class FedMLSchedulerBaseJobRunner(ABC):
+
+    def __init__(self, args, edge_id=0, request_json=None, agent_config=None, run_id=0,
+                 cuda_visible_gpu_ids_str=None, is_master_runner=False,
+                 agent_data_dir=None, agent_package_download_dir=None,
+                 agent_package_unzip_dir=None, agent_log_file_dir=None):
+        self.args = args
+        self.is_master_runner = is_master_runner
+        self.agent_data_dir = agent_data_dir
+        self.agent_package_download_dir = agent_package_download_dir
+        self.agent_package_unzip_dir = agent_package_unzip_dir
+        self.agent_log_file_dir = agent_log_file_dir
+        self.prev_download_progress = 0
+        self.run_process_event = None
+        self.run_process_completed_event = None
+        self.run_process = None
+        self.running_request_json = dict()
+        self.start_request_json = None
+        self.edge_id = edge_id
+        self.edge_user_name = None
+        self.edge_extra_url = None
+        self.run_id = run_id
+        self.unique_device_id = args.unique_device_id
+        self.request_json = request_json
+        self.version = args.version
+        self.device_id = args.device_id
+        self.cur_dir = os.path.split(os.path.realpath(__file__))[0]
+        self.agent_config = agent_config
+        self.mlops_metrics = None
+        self.status_reporter = None
+        self.ntp_offset = MLOpsUtils.get_ntp_offset()
+        self.server_id = None
+        self.fedml_config_object = None
+        self.package_type = SchedulerConstants.JOB_PACKAGE_TYPE_DEFAULT
+        self.cuda_visible_gpu_ids_str = cuda_visible_gpu_ids_str
+        self.user_name = None
+        self.general_edge_id = None
+        self.message_center = None
+        self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES = {
+            "${FEDSYS.RUN_ID}": "",
+            "${FEDSYS.PRIVATE_LOCAL_DATA}": "",
+            "${FEDSYS.CLIENT_ID_LIST}": "",
+            "${FEDSYS.SYNTHETIC_DATA_URL}": "",
+            "${FEDSYS.IS_USING_LOCAL_DATA}": "",
+            "${FEDSYS.CLIENT_NUM}": "",
+            "${FEDSYS.CLIENT_INDEX}": "",
+            "${FEDSYS.CLIENT_OBJECT_LIST}": "",
+            "${FEDSYS.LOG_SERVER_URL}": "",
+        }
+
+    def __repr__(self):
+        return "<{klass} @{id:x} {attrs}>".format(
+            klass=self.__class__.__name__,
+            id=id(self) & 0xFFFFFF,
+            attrs=" ".join("{}={!r}".format(k, v) for k, v in self.__dict__.items()),
+        )
+
+    def build_dynamic_constrain_variables(self, run_id, run_config):
+        data_config = run_config.get("data_config", {})
+        server_edge_id_list = self.request_json["edgeids"]
+        local_edge_id_list = list()
+        local_edge_id_list.append(int(self.edge_id))
+        is_using_local_data = 0
+        private_data_dir = data_config.get("privateLocalData", "")
+        synthetic_data_url = data_config.get("syntheticDataUrl", "")
+        edges = self.request_json["edges"]
+        # if private_data_dir is not None \
+        #         and len(str(private_data_dir).strip(' ')) > 0:
+        #     is_using_local_data = 1
+        if private_data_dir is None or len(str(private_data_dir).strip(" ")) <= 0:
+            params_config = run_config.get("parameters", None)
+            private_data_dir = self.agent_data_dir
+        if synthetic_data_url is None or len(str(synthetic_data_url)) <= 0:
+            synthetic_data_url = private_data_dir
+
+        self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.RUN_ID}"] = run_id
+        self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.PRIVATE_LOCAL_DATA}"] = private_data_dir.replace(" ", "")
+        self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.CLIENT_ID_LIST}"] = str(local_edge_id_list).replace(" ", "")
+        self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.SYNTHETIC_DATA_URL}"] = synthetic_data_url.replace(" ", "")
+        self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.IS_USING_LOCAL_DATA}"] = str(is_using_local_data)
+        self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.CLIENT_NUM}"] = len(server_edge_id_list)
+        if not self.is_master_runner:
+            self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.CLIENT_INDEX}"] = 1
+            for cur_index, id_value in enumerate(server_edge_id_list):
+                if str(id_value) == str(self.edge_id):
+                    self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.CLIENT_INDEX}"] = cur_index + 1
+                    break
+        client_objects = str(json.dumps(edges))
+        client_objects = client_objects.replace(" ", "").replace("\n", "").replace('"', '\\"')
+        self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.CLIENT_OBJECT_LIST}"] = client_objects
+        self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.LOG_SERVER_URL}"] = self.agent_config["ml_ops_config"][
+            "LOG_SERVER_URL"
+        ]
+
+    @staticmethod
+    def unzip_file(zip_file, unzip_file_path) -> str:
+        if zipfile.is_zipfile(zip_file):
+            with zipfile.ZipFile(zip_file, "r") as zipf:
+                zipf.extractall(unzip_file_path)
+                unzipped_file_name = zipf.namelist()[0]
+        else:
+            raise Exception("Invalid zip file {}".format(zip_file))
+
+        return unzipped_file_name
+
+    def package_download_progress(self, count, blksize, filesize):
+        self.check_runner_stop_event()
+
+        downloaded = count * blksize
+        downloaded = filesize if downloaded > filesize else downloaded
+        progress = (downloaded / filesize * 100) if filesize != 0 else 0
+        progress_int = int(progress)
+        downloaded_kb = format(downloaded / 1024, '.2f')
+
+        # since this hook funtion is stateless, we need a state to avoid print progress repeatly
+        if count == 0:
+            self.prev_download_progress = 0
+        if progress_int != self.prev_download_progress and progress_int % 5 == 0:
+            self.prev_download_progress = progress_int
+            logging.info("package downloaded size {} KB, progress {}%".format(downloaded_kb, progress_int))
+
+    def retrieve_and_unzip_package(self, package_name, package_url):
+        local_package_path = self.agent_package_download_dir
+        os.makedirs(local_package_path, exist_ok=True)
+        filename, filename_without_extension, file_extension = GeneralConstants.get_filename_and_extension(package_url)
+        local_package_file = os.path.join(local_package_path, f"fedml_run_{self.run_id}_{filename_without_extension}")
+        if os.path.exists(local_package_file):
+            os.remove(local_package_file)
+        package_url_without_query_path = urljoin(package_url, urlparse(package_url).path)
+        urllib.request.urlretrieve(package_url_without_query_path, local_package_file,
+                                   reporthook=self.package_download_progress)
+        unzip_package_path = os.path.join(self.agent_package_unzip_dir,
+                                          f"unzip_fedml_run_{self.run_id}_{filename_without_extension}")
+        try:
+            shutil.rmtree(unzip_package_path, ignore_errors=True)
+        except Exception as e:
+            logging.error(
+                f"Failed to remove directory {unzip_package_path}, Exception: {e}, Traceback: {traceback.format_exc()}")
+            pass
+
+        # Using unzipped folder name
+        package_dir_name = FedMLSchedulerBaseJobRunner.unzip_file(local_package_file, unzip_package_path)
+        unzip_package_full_path = os.path.join(unzip_package_path, package_dir_name)
+
+        logging.info("local_package_file {}, unzip_package_path {}, unzip file full path {}".format(
+            local_package_file, unzip_package_path, unzip_package_full_path))
+
+        return unzip_package_full_path
+
+    @abstractmethod
+    def get_download_package_info(self, packages_config=None):
+        download_package_name = packages_config.get("server", None) if self.is_master_runner \
+            else packages_config["linuxClient"]
+        download_package_url = packages_config.get("serverUrl", None) if self.is_master_runner \
+            else packages_config["linuxClientUrl"]
+        return download_package_name, download_package_url
+
+    def update_local_fedml_config(self, run_id, run_config):
+        # Download the package
+        packages_config = run_config["packages_config"]
+        download_package_name, download_package_url = self.get_download_package_info(packages_config)
+        unzip_package_path = self.retrieve_and_unzip_package(download_package_name, download_package_url)
+        fedml_local_config_file = os.path.join(unzip_package_path, "conf", "fedml.yaml")
+
+        # Load the config file to memory
+        config_from_container = load_yaml_config(fedml_local_config_file)
+        container_entry_file_config = config_from_container["entry_config"]
+        container_dynamic_args_config = config_from_container["dynamic_args"]
+        entry_file = container_entry_file_config["entry_file"]
+        conf_file = container_entry_file_config["conf_file"]
+        self.package_type = container_entry_file_config.get("package_type", SchedulerConstants.JOB_PACKAGE_TYPE_DEFAULT)
+        full_conf_path = os.path.join(unzip_package_path, "fedml", "config", os.path.basename(conf_file))
+
+        # Dynamically build constrain variable with realtime parameters from server
+        self.build_dynamic_constrain_variables(run_id, run_config)
+
+        # Update entry arguments value with constrain variable values with realtime parameters from server
+        # currently we support the following constrain variables:
+        # ${FEDSYS_RUN_ID}: a run id represented one entire Federated Learning flow
+        # ${FEDSYS_PRIVATE_LOCAL_DATA}: private local data path in the Federated Learning client
+        # ${FEDSYS_CLIENT_ID_LIST}: client list in one entire Federated Learning flow
+        # ${FEDSYS_SYNTHETIC_DATA_URL}: synthetic data url from server,
+        #                  if this value is not null, the client will download data from this URL to use it as
+        #                  federated training data set
+        # ${FEDSYS_IS_USING_LOCAL_DATA}: whether we use private local data as federated training data set
+        # container_dynamic_args_config["data_cache_dir"] = "${FEDSYS.PRIVATE_LOCAL_DATA}"
+        for constrain_variable_key, constrain_variable_value in self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES.items():
+            for argument_key, argument_value in container_dynamic_args_config.items():
+                if argument_value is not None and str(argument_value).find(constrain_variable_key) == 0:
+                    replaced_argument_value = str(argument_value).replace(
+                        constrain_variable_key, str(constrain_variable_value)
+                    )
+                    container_dynamic_args_config[argument_key] = replaced_argument_value
+
+        # Merge all container new config sections as new config dictionary
+        package_conf_object = dict()
+        package_conf_object["entry_config"] = container_entry_file_config
+        package_conf_object["dynamic_args"] = container_dynamic_args_config
+        package_conf_object["dynamic_args"]["config_version"] = self.args.config_version
+        container_dynamic_args_config["mqtt_config_path"] = os.path.join(
+            unzip_package_path, "fedml", "config", os.path.basename(container_dynamic_args_config["mqtt_config_path"])
+        )
+        container_dynamic_args_config["s3_config_path"] = os.path.join(
+            unzip_package_path, "fedml", "config", os.path.basename(container_dynamic_args_config["s3_config_path"])
+        )
+        log_file_dir = self.agent_log_file_dir
+        os.makedirs(log_file_dir, exist_ok=True)
+        package_conf_object["dynamic_args"]["log_file_dir"] = log_file_dir
+
+        # Save new config dictionary to local file
+        fedml_updated_config_file = os.path.join(unzip_package_path, "conf", "fedml.yaml")
+        GeneralConstants.generate_yaml_doc(package_conf_object, fedml_updated_config_file)
+
+        # Build dynamic arguments and set arguments to fedml config object
+        self.build_dynamic_args(run_id, run_config, package_conf_object, unzip_package_path)
+
+        return unzip_package_path, package_conf_object
+
+    def build_dynamic_args(self, run_id, run_config, package_conf_object, base_dir):
+        fedml_conf_file = package_conf_object["entry_config"]["conf_file"]
+        fedml_conf_file_processed = str(fedml_conf_file).replace('\\', os.sep).replace('/', os.sep)
+        fedml_conf_path = os.path.join(base_dir, "fedml", "config",
+                                       os.path.basename(fedml_conf_file_processed))
+        fedml_conf_object = load_yaml_config(fedml_conf_path)
+        run_params = run_config.get("parameters", {})
+        job_yaml = run_params.get("job_yaml", {})
+
+        # Replace local fedml config objects with parameters from MLOps web
+        parameters_object = run_config.get("parameters", None)
+        if parameters_object is not None:
+            for config_k, config_v in fedml_conf_object.items():
+                parameter_v = parameters_object.get(config_k, None)
+                if parameter_v is not None:
+                    fedml_conf_object[config_k] = parameter_v
+                    parameters_object.pop(config_k)
+
+            for config_k, config_v in parameters_object.items():
+                fedml_conf_object[config_k] = config_v
+
+        package_dynamic_args = package_conf_object["dynamic_args"]
+        if fedml_conf_object.get("comm_args", None) is not None:
+            fedml_conf_object["comm_args"]["mqtt_config_path"] = package_dynamic_args["mqtt_config_path"]
+            fedml_conf_object["comm_args"]["s3_config_path"] = package_dynamic_args["s3_config_path"]
+            fedml_conf_object["common_args"]["using_mlops"] = True
+        if fedml_conf_object.get("train_args", None) is not None:
+            fedml_conf_object["train_args"]["run_id"] = package_dynamic_args["run_id"]
+            fedml_conf_object["train_args"]["client_id_list"] = package_dynamic_args["client_id_list"]
+            fedml_conf_object["train_args"]["client_num_in_total"] = int(package_dynamic_args["client_num_in_total"])
+            fedml_conf_object["train_args"]["client_num_per_round"] = int(package_dynamic_args["client_num_in_total"])
+            fedml_conf_object["train_args"]["client_id"] = self.edge_id
+            fedml_conf_object["train_args"]["server_id"] = self.request_json.get("server_id", "0")
+        if fedml_conf_object.get("device_args", None) is not None:
+            fedml_conf_object["device_args"]["worker_num"] = int(package_dynamic_args["client_num_in_total"])
+        # fedml_conf_object["data_args"]["data_cache_dir"] = package_dynamic_args["data_cache_dir"]
+        data_args = fedml_conf_object.get("data_args")
+        if data_args is not None:
+            data_cache_dir = fedml_conf_object["data_args"].get("data_cache_dir")
+            if data_cache_dir is not None:
+                data_cache_dir = os.path.join(data_cache_dir, str(self.edge_id))
+                fedml_conf_object["data_args"]["data_cache_dir"] = data_cache_dir
+        if fedml_conf_object.get("tracking_args", None) is not None:
+            fedml_conf_object["tracking_args"]["log_file_dir"] = package_dynamic_args["log_file_dir"]
+            fedml_conf_object["tracking_args"]["log_server_url"] = package_dynamic_args["log_server_url"]
+
+        fedml_conf_object["dynamic_args"] = package_dynamic_args
+        self.fedml_config_object = fedml_conf_object.copy()
+        GeneralConstants.generate_yaml_doc(fedml_conf_object, fedml_conf_path)
+
+    def callback_run_bootstrap(self, job_pid):
+        GeneralConstants.save_bootstrap_process(self.run_id, job_pid, data_dir=self.agent_data_dir)
+
+    def run_bootstrap_script(self, bootstrap_cmd_list, bootstrap_script_file):
+        try:
+            logging.info("Bootstrap commands are being executed...")
+            process, error_list = GeneralConstants.execute_commands_with_live_logs(
+                bootstrap_cmd_list, callback=self.callback_run_bootstrap)
+
+            ret_code, out, err = process.returncode, None, None
+            if ret_code is None or ret_code <= 0:
+                if error_list is not None and len(error_list) > 0:
+                    is_bootstrap_run_ok = False
+                else:
+                    if out is not None:
+                        out_str = sys_utils.decode_our_err_result(out)
+                        if out_str != "":
+                            logging.info("{}".format(out_str))
+
+                    sys_utils.log_return_info(bootstrap_script_file, 0)
+
+                    is_bootstrap_run_ok = True
+            else:
+                if err is not None:
+                    err_str = sys_utils.decode_our_err_result(err)
+                    if err_str != "":
+                        logging.error("{}".format(err_str))
+
+                sys_utils.log_return_info(bootstrap_script_file, ret_code)
+
+                is_bootstrap_run_ok = False
+        except Exception as e:
+            logging.error(f"Bootstrap script error: Exception: {e}, Traceback: {traceback.format_exc()}")
+            is_bootstrap_run_ok = False
+        return is_bootstrap_run_ok
+
+    def check_runner_stop_event(self):
+        if self.run_process_event.is_set():
+            logging.info("Received stopping event.")
+            raise RunnerError("Runner stopped")
+
+        if self.run_process_completed_event.is_set():
+            logging.info("Received completed event.")
+            raise RunnerCompletedError("Runner completed")
+
+    def trigger_stop_event(self):
+        if self.run_process_event is not None:
+            self.run_process_event.set()
+
+    def trigger_completed_event(self):
+        if self.run_process_completed_event is not None:
+            self.run_process_completed_event.set()
+
+    def execute_job_task(self, unzip_package_path, entry_file_full_path, conf_file_full_path, dynamic_args_config,
+                         fedml_config_object):
+        run_config = self.request_json["run_config"]
+        run_params = run_config.get("parameters", {})
+        client_rank = self.request_json.get("client_rank", 1)
+        job_yaml = run_params.get("job_yaml", {})
+        job_yaml_default_none = run_params.get("job_yaml", None)
+        job_api_key = job_yaml.get("run_api_key", None)
+        job_api_key = job_yaml.get("fedml_run_dynamic_params", None) if job_api_key is None else job_api_key
+        assigned_gpu_ids = run_params.get("gpu_ids", None)
+        job_type = job_yaml.get("job_type", None)
+        containerize = fedml_config_object.get("containerize", None)
+        image_pull_policy = fedml_config_object.get("image_pull_policy", Constants.IMAGE_PULL_POLICY_ALWAYS)
+        job_type = job_yaml.get("task_type", Constants.JOB_TASK_TYPE_TRAIN) if job_type is None else job_type
+        conf_file_object = load_yaml_config(conf_file_full_path)
+        entry_args_dict = conf_file_object.get("fedml_entry_args", {})
+        entry_args = entry_args_dict.get("arg_items", None)
+        scheduler_match_info = self.request_json.get("scheduler_match_info", {})
+        if job_type == Constants.JOB_TASK_TYPE_TRAIN:
+            containerize = True if containerize is None else containerize
+
+        # Bootstrap Info
+        bootstrap_script_path, bootstrap_script_dir, bootstrap_script_file = [None] * 3
+        env_args = fedml_config_object.get("environment_args", None)
+
+        if env_args is not None:
+            bootstrap_script_file = env_args.get("bootstrap", None)
+            if bootstrap_script_file is not None:
+                bootstrap_script_file = str(bootstrap_script_file).replace('\\', os.sep).replace('/', os.sep)
+                if platform.system() == 'Windows':
+                    bootstrap_script_file = bootstrap_script_file.rstrip('.sh') + '.bat'
+                if bootstrap_script_file is not None:
+                    bootstrap_script_dir = os.path.join(unzip_package_path, "fedml",
+                                                        os.path.dirname(bootstrap_script_file))
+                    bootstrap_script_path = os.path.join(
+                        bootstrap_script_dir, bootstrap_script_dir, os.path.basename(bootstrap_script_file)
+                    )
+
+        bootstrap_cmd_list = list()
+        if bootstrap_script_path:
+            logging.info("Bootstrap commands are being generated...")
+            bootstrap_cmd_list = JobRunnerUtils.generate_bootstrap_commands(bootstrap_script_path=bootstrap_script_path,
+                                                                            bootstrap_script_dir=bootstrap_script_dir,
+                                                                            bootstrap_script_file=bootstrap_script_file)
+            logging.info(f"Generated following Bootstrap commands: {bootstrap_cmd_list}")
+
+        if not containerize:
+            if len(bootstrap_cmd_list) and not (job_type == Constants.JOB_TASK_TYPE_DEPLOY or
+                                                job_type == Constants.JOB_TASK_TYPE_SERVE):
+                bootstrapping_successful = self.run_bootstrap_script(bootstrap_cmd_list=bootstrap_cmd_list,
+                                                                     bootstrap_script_file=bootstrap_script_file)
+
+                if not bootstrapping_successful:
+                    logging.info("failed to update local fedml config.")
+                    self.check_runner_stop_event()
+                    # Send failed msg when exceptions.
+                    raise Exception(f"Failed to execute following bootstrap commands: {bootstrap_cmd_list}")
+
+                logging.info("cleanup the previous learning process and bootstrap process...")
+                GeneralConstants.cleanup_learning_process(self.request_json["runId"], data_dir=self.agent_data_dir)
+                GeneralConstants.cleanup_bootstrap_process(self.request_json["runId"], data_dir=self.agent_data_dir)
+
+        executable_interpreter = GeneralConstants.CLIENT_SHELL_PS \
+            if platform.system() == GeneralConstants.PLATFORM_WINDOWS else GeneralConstants.CLIENT_SHELL_BASH
+
+        if job_yaml_default_none is None:
+            # Generate the job executing commands for previous federated learning (Compatibility)
+            python_program = get_python_program()
+            logging.info("Run the client: {} {} --cf {} --rank {} --role client".format(
+                python_program, entry_file_full_path, conf_file_full_path, str(dynamic_args_config.get("rank", 1))))
+            rank = str(dynamic_args_config.get("rank", 1))
+            entry_command = f"{python_program} {entry_file_full_path} --cf " \
+                            f"{conf_file_full_path} --rank {rank} --role client"
+            shell_cmd_list = [entry_command]
+
+            # Run the job executing commands for previous federated learning (Compatibility)
+            process, error_list = GeneralConstants.execute_commands_with_live_logs(
+                shell_cmd_list, callback=self.callback_start_fl_job, should_write_log_file=False)
+            is_launch_task = False
+        else:
+            self.check_runner_stop_event()
+
+            if self.is_master_runner:
+                self.status_reporter.report_server_id_status(
+                    self.run_id, GeneralConstants.MSG_MLOPS_SERVER_STATUS_RUNNING, edge_id=self.edge_id,
+                    server_id=self.edge_id, server_agent_id=self.edge_id)
+            else:
+                self.status_reporter.report_client_id_status(
+                    self.edge_id, GeneralConstants.MSG_MLOPS_CLIENT_STATUS_RUNNING, run_id=self.run_id)
+
+            # Generate the job executing commands
+            job_executing_commands = JobRunnerUtils.generate_job_execute_commands(
+                self.run_id, self.edge_id, self.version,
+                self.package_type, executable_interpreter, entry_file_full_path,
+                conf_file_object, entry_args, assigned_gpu_ids,
+                job_api_key, client_rank, scheduler_match_info=scheduler_match_info,
+                cuda_visible_gpu_ids_str=self.cuda_visible_gpu_ids_str)
+
+            if containerize is not None and containerize is True:
+                docker_args = fedml_config_object.get("docker", {})
+                docker_args = JobRunnerUtils.create_instance_from_dict(DockerArgs, docker_args)
+                try:
+                    job_executing_commands = JobRunnerUtils.generate_launch_docker_command(
+                        docker_args=docker_args,  run_id=self.run_id, edge_id=self.edge_id,
+                        unzip_package_path=unzip_package_path, executable_interpreter=executable_interpreter,
+                        entry_file_full_path=entry_file_full_path, bootstrap_cmd_list=bootstrap_cmd_list,
+                        cuda_visible_gpu_ids_str=self.cuda_visible_gpu_ids_str, image_pull_policy=image_pull_policy)
+                except Exception as e:
+                    logging.error(f"Error occurred while generating containerized launch commands. "
+                                  f"Exception: {e}, Traceback: {traceback.format_exc()}")
+                    return None, None, None
+
+                if not job_executing_commands:
+                    raise Exception("Failed to generate docker execution command")
+
+            # Run the job executing commands
+            logging.info(f"Run the client job with job id {self.run_id}, device id {self.edge_id}.")
+            process, error_list = GeneralConstants.execute_commands_with_live_logs(
+                job_executing_commands, callback=self.start_job_perf, error_processor=self.job_error_processor,
+                should_write_log_file=False if job_type == Constants.JOB_TASK_TYPE_FEDERATE else True)
+            is_launch_task = False if job_type == Constants.JOB_TASK_TYPE_FEDERATE else True
+
+        return process, is_launch_task, error_list
+
+    def callback_start_fl_job(self, job_pid):
+        GeneralConstants.save_learning_process(self.run_id, job_pid, data_dir=self.agent_data_dir)
+        self.mlops_metrics.report_sys_perf(
+            self.args, self.agent_config["mqtt_config"], job_process_id=job_pid)
+
+    def start_job_perf(self, job_pid):
+        GeneralConstants.save_learning_process(self.run_id, job_pid, data_dir=self.agent_data_dir)
+        self.mlops_metrics.report_job_perf(self.args, self.agent_config["mqtt_config"], job_pid)
+
+    def job_error_processor(self, error_list):
+        self.check_runner_stop_event()
+
+        error_str = "\n".join(error_list)
+        error_message = f"Error occurred when running the job... {error_str}"
+        logging.error(error_message)
+        raise Exception(error_message)
+
+    def start_runner_process(
+            self, run_id, edge_id, request_json,  cuda_visible_gpu_ids_str=None,
+            sender_message_queue=None, status_center_queue=None
+    ):
+        return None
+
+    @staticmethod
+    def cleanup_containers_and_release_gpus(run_id, edge_id):
+        job_type = JobRunnerUtils.get_job_type_from_run_id(run_id)
+
+        if not job_type:
+            logging.info(f"Failed to get job type from run id {run_id}. This is not an error as it would usually "
+                         f"happen when the job is not found in the database because job is already finished and "
+                         f"cleaned up. Exiting cleanup_containers_and_release_gpus.")
+            return
+
+        # Check if the job type is not "serve" or "deploy"
+        if not (job_type == SchedulerConstants.JOB_TASK_TYPE_SERVE or
+                job_type == SchedulerConstants.JOB_TASK_TYPE_DEPLOY):
+
+            # Terminate the run docker container if exists
+            container_name = JobRunnerUtils.get_run_container_name(run_id)
+            docker_client = JobRunnerUtils.get_docker_client(DockerArgs())
+            logging.info(f"Terminating the run docker container {container_name} if exists...")
+            try:
+                JobRunnerUtils.remove_run_container_if_exists(container_name, docker_client)
+            except Exception as e:
+                logging.error(f"Exception {e} occurred when terminating docker container. "
+                              f"Traceback: {traceback.format_exc()}")
+
+            # Release the GPU ids and update the GPU availability in the persistent store
+            JobRunnerUtils.get_instance().release_gpu_ids(run_id, edge_id)
+
+            # Send mqtt message reporting the new gpu availability to the backend
+            MLOpsDevicePerfStats.report_gpu_device_info(edge_id)
+
+    def rebuild_message_status_center(self, sender_message_queue, listener_message_queue, status_queue):
+        self.message_center = FedMLMessageCenter.rebuild_message_center_from_queue(
+            sender_message_queue, listener_message_queue=listener_message_queue)
+        if self.mlops_metrics is None:
+            self.mlops_metrics = MLOpsMetrics()
+        self.mlops_metrics.set_messenger(self.message_center)
+        self.mlops_metrics.run_id = self.run_id
+
+        status_center = FedMLStatusCenter.rebuild_status_center_from_queue(status_queue)
+        if self.status_reporter is None:
+            self.status_reporter = MLOpsMetrics()
+        self.status_reporter.set_messenger(status_center)
+        self.status_reporter.run_id = self.run_id
diff --git a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner_manager.py b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner_manager.py
new file mode 100755
index 0000000000..58198b6661
--- /dev/null
+++ b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner_manager.py
@@ -0,0 +1,66 @@
+
+from abc import ABC, abstractmethod
+
+
+class FedMLSchedulerBaseJobRunnerManager(ABC):
+
+    def __init__(self):
+        if not hasattr(self, "job_runners"):
+            self.job_runners = dict()
+        if not hasattr(self, "cloud_run_process_map"):
+            self.cloud_run_process_map = dict()
+
+    @abstractmethod
+    def _generate_job_runner_instance(
+            self, args, run_id=None, request_json=None, agent_config=None, edge_id=None
+    ):
+        return None
+
+    def start_job_runner(
+            self, run_id, request_json, args=None, edge_id=None, is_server_job=False,
+            sender_message_queue=None, listener_message_queue=None, status_center_queue=None,
+            should_start_cloud_server=False, use_local_process_as_cloud_server=False,
+            cuda_visible_gpu_ids_str=None
+    ):
+        run_id_str = str(run_id)
+        self.job_runners[run_id_str] = self._generate_job_runner_instance(
+            args, run_id=run_id, request_json=request_json,
+            agent_config=args.agent_config, edge_id=edge_id,
+        )
+        self.job_runners[run_id_str].start_runner_process(
+            run_id, request_json, edge_id=edge_id,
+            sender_message_queue=sender_message_queue,
+            listener_message_queue=listener_message_queue,
+            status_center_queue=status_center_queue
+        )
+
+    def stop_job_runner(self, run_id):
+        run_id_str = str(run_id)
+        if self.job_runners.get(run_id_str, None) is not None:
+            self.job_runners[run_id_str].trigger_stop_event()
+
+    def complete_job_runner(self, run_id):
+        run_id_str = str(run_id)
+        if self.job_runners.get(run_id_str, None) is not None:
+            self.job_runners[run_id_str].trigger_completed_event()
+
+    def put_run_edge_device_info_to_queue(self, run_id, device_info):
+        run_id_str = str(run_id)
+        if self.job_runners.get(run_id_str, None) is not None:
+            self.job_runners[run_id_str].put_run_edge_device_info_to_queue(run_id, device_info)
+
+    def get_runner_process(self, run_id, is_cloud_server=False):
+        run_id_str = str(run_id)
+
+        if self.job_runners.get(run_id_str, None) is None:
+            return None
+
+        return self.job_runners[run_id_str].run_process
+
+    def get_all_runner_pid_map(self):
+        process_id_dict = dict()
+        for run_id, runner in self.job_runners.items():
+            if runner.run_process is not None:
+                process_id_dict[str(run_id)] = runner.run_process.pid
+
+        return process_id_dict
diff --git a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_protocol_manager.py b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_protocol_manager.py
new file mode 100755
index 0000000000..4a0c950655
--- /dev/null
+++ b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_protocol_manager.py
@@ -0,0 +1,260 @@
+
+import json
+import logging
+import multiprocessing
+import sys
+import time
+import traceback
+import uuid
+import fedml
+from ....core.mlops.mlops_runtime_log import MLOpsRuntimeLog
+from ....core.distributed.communication.mqtt.mqtt_manager import MqttManager
+from ....core.mlops.mlops_metrics import MLOpsMetrics
+from ..comm_utils import sys_utils
+from ..scheduler_core.message_center import FedMLMessageCenter
+from ..scheduler_core.status_center import FedMLStatusCenter
+from .account_manager import FedMLAccountManager
+from .general_constants import GeneralConstants
+from abc import ABC, abstractmethod
+
+
+class FedMLSchedulerBaseProtocolManager(FedMLMessageCenter, FedMLStatusCenter, ABC):
+
+    def __init__(self, args, agent_config=None, is_master=False):
+        FedMLMessageCenter.__init__(self)
+        FedMLStatusCenter.__init__(self)
+        self.request_json = None
+        self.version = fedml.get_env_version()
+        self.args = args
+        self.is_master_agent = is_master
+        self.message_status_runner = None
+        self.message_center = None
+        self.status_center = None
+        self.message_center_name = "master_agent" if not is_master else "slave_agent"
+        self.run_id = None
+        self.edge_id = args.edge_id
+        self.general_edge_id = None
+        self.server_agent_id = args.edge_id
+        self.current_device_id = args.current_device_id
+        self.unique_device_id = args.unique_device_id
+        self.agent_config = agent_config
+        self.topic_active = None
+        self.topic_last_will = None
+        self.communication_mgr = None
+        self.subscribed_topics = list()
+        self.mlops_metrics = None
+        self.status_reporter = None
+        self.user_name = args.user_name
+
+        if multiprocessing.get_start_method() != "fork":
+            # force all platforms (Windows/Linux/macOS) to use the same way (fork) for multiprocessing
+            multiprocessing.set_start_method("fork", force=True)
+
+    def generate_topics(self):
+        # generate the subscribed topics.
+        self.subscribed_topics.clear()
+        # self.subscribed_topics.append(self.topic_start_train)
+
+    def add_protocol_handler(self):
+        # Add the message listeners for all topics, the following is an example.
+        # self.add_message_listener(self.topic_start_train, self.callback_start_train)
+        pass
+
+    def initialize(self):
+        # Generate the message topics
+        self.generate_topics()
+
+        # Setup MQTT connection
+        self.communication_mgr = MqttManager(
+            self.agent_config["mqtt_config"]["BROKER_HOST"],
+            self.agent_config["mqtt_config"]["BROKER_PORT"],
+            self.agent_config["mqtt_config"]["MQTT_USER"],
+            self.agent_config["mqtt_config"]["MQTT_PWD"],
+            self.agent_config["mqtt_config"]["MQTT_KEEPALIVE"],
+            f"FedML_Agent_Daemon_@{self.user_name}@_@{self.current_device_id}@_@{str(uuid.uuid4())}@",
+            self.topic_last_will,
+            json.dumps({"ID": self.edge_id, "status": GeneralConstants.MSG_MLOPS_SERVER_STATUS_OFFLINE})
+        )
+
+        # Add the message listeners for all topics
+        self.add_protocol_handler()
+
+        # Start the message center to process edge related messages.
+        self.setup_message_center()
+
+        # Start the status center to process edge related status.
+        self.start_status_listener_center()
+
+        # Start the message center for listener
+        self.start_listener(sender_message_queue=self.message_center.get_sender_message_queue(),
+                            agent_config=self.agent_config,
+                            message_center_name=self.message_center_name)
+
+        # Init extra items, e.g. database, recovery, etc.
+        self._init_extra_items()
+
+        # Setup MQTT connected listener
+        self.communication_mgr.add_connected_listener(self.on_agent_communication_connected)
+        self.communication_mgr.add_disconnected_listener(self.on_agent_communication_disconnected)
+        self.communication_mgr.connect()
+
+    def start(self):
+        # Start MQTT message loop
+        try:
+            self.communication_mgr.loop_forever()
+        except Exception as e:
+            if str(e) == "Restarting after upgraded...":
+                logging.info("Restarting after upgraded...")
+            else:
+                logging.info("Server tracing: {}".format(traceback.format_exc()))
+
+        finally:
+            FedMLAccountManager.write_login_failed_file(is_client=not self.is_master_agent)
+
+            self.stop()
+
+            time.sleep(5)
+            sys_utils.cleanup_all_fedml_server_login_processes(
+                GeneralConstants.MASTER_LOGIN_PROGRAM if self.is_master_agent else GeneralConstants.SLAVE_LOGIN_PROGRAM,
+                clean_process_group=False)
+            sys.exit(1)
+
+    def stop(self):
+        if self.communication_mgr is not None:
+            # noinspection PyBroadException
+            try:
+                for topic in self.subscribed_topics:
+                    self.communication_mgr.unsubscribe_msg(topic)
+            except Exception:
+                pass
+
+            self.communication_mgr.loop_stop()
+            self.communication_mgr.disconnect()
+
+        self.release_message_center()
+
+    @abstractmethod
+    def _init_extra_items(self):
+        pass
+
+    def on_agent_communication_connected(self, mqtt_client_object):
+        # Setup MQTT message passthrough listener for all messages
+        self.communication_mgr.add_message_passthrough_listener(self.listener_message_passthrough_dispatch_center)
+
+        # Subscribe topics for starting train, stopping train and fetching client status.
+        for topic in self.subscribed_topics:
+            self.communication_mgr.subscribe_msg(topic)
+
+        # Broadcast the first active message.
+        self.send_agent_active_msg(self.edge_id)
+        if self.general_edge_id is not None:
+            self.send_agent_active_msg(self.general_edge_id)
+
+        # Echo results
+        MLOpsRuntimeLog.get_instance(self.args).enable_show_log_to_stdout()
+        self.print_connected_info()
+        MLOpsRuntimeLog.get_instance(self.args).enable_show_log_to_stdout(enable=True)
+
+    @abstractmethod
+    def print_connected_info(self):
+        print("\nCongratulations, your device is connected to the FedML MLOps platform successfully!")
+        print(
+            "Your FedML Edge ID is " + str(self.edge_id) + ", unique device ID is "
+            + str(self.unique_device_id)
+        )
+
+    def on_agent_communication_disconnected(self, mqtt_client_object):
+        pass
+
+    def setup_message_center(self):
+        if self.message_center is not None:
+            return
+
+        self.message_center = FedMLMessageCenter(agent_config=self.agent_config)
+        self.message_center.start_sender(message_center_name=self.message_center_name)
+
+        if self.mlops_metrics is None:
+            self.mlops_metrics = MLOpsMetrics()
+        self.mlops_metrics.set_messenger(self)
+        self.mlops_metrics.run_id = self.run_id
+        self.mlops_metrics.edge_id = self.edge_id
+        self.mlops_metrics.server_agent_id = self.server_agent_id
+
+    def send_message_json(self, topic, payload):
+        self.message_center.send_message_json(topic, payload)
+
+    def rebuild_message_center(self, message_center_queue):
+        self.message_center = FedMLMessageCenter(sender_message_queue=message_center_queue)
+
+        if self.mlops_metrics is None:
+            self.mlops_metrics = MLOpsMetrics()
+        self.mlops_metrics.set_messenger(self)
+        self.mlops_metrics.run_id = self.run_id
+        self.mlops_metrics.edge_id = self.edge_id
+        self.mlops_metrics.server_agent_id = self.server_agent_id
+
+    def release_message_center(self):
+        try:
+            if self.message_center is not None:
+                self.message_center.stop()
+                self.message_center = None
+
+        except Exception as e:
+            logging.error(
+                f"Failed to release slave communication manager with Exception {e}. "
+                f"Traceback: {traceback.format_exc()}")
+            pass
+
+    def start_status_listener_center(self):
+        self.start_status_center(
+            sender_message_center_queue=self.message_center.get_sender_message_queue(),
+            listener_message_center_queue=self.get_listener_message_queue(),
+            is_slave_agent=not self.is_master_agent
+        )
+
+        if self.status_reporter is None:
+            self.status_reporter = MLOpsMetrics()
+        self.status_reporter.set_messenger(self, send_message_func=self.send_status_message)
+        self.status_reporter.run_id = self.run_id
+        self.status_reporter.edge_id = self.edge_id
+        self.status_reporter.server_agent_id = self.server_agent_id
+
+    def rebuild_status_center(self, status_center_queue):
+        self.status_center = FedMLStatusCenter(message_queue=status_center_queue)
+
+        if self.status_reporter is None:
+            self.status_reporter = MLOpsMetrics()
+        self.status_reporter.set_messenger(self.status_center, send_message_func=self.status_center.send_status_message)
+        self.status_reporter.run_id = self.run_id
+        self.status_reporter.edge_id = self.edge_id
+        self.status_reporter.server_agent_id = self.server_agent_id
+
+    @abstractmethod
+    def generate_protocol_manager(self):
+        # Generate the protocol manager instance and set the attribute values.
+        return None
+
+    def get_message_runner(self):
+        if self.message_status_runner is not None:
+            return self.message_status_runner
+
+        self.message_status_runner = self.generate_protocol_manager()
+        self.message_status_runner.status_queue = self.get_status_queue()
+
+        return self.message_status_runner
+
+    def get_status_runner(self):
+        if self.message_status_runner is None:
+            self.get_message_runner()
+            if self.message_status_runner is not None:
+                self.message_status_runner.sender_message_queue = self.message_center.get_sender_message_queue()
+
+        if self.message_status_runner is not None:
+            self.message_status_runner.sender_message_queue = self.message_center.get_sender_message_queue()
+            return self.message_status_runner
+
+        return None
+
+    def send_agent_active_msg(self, edge_id):
+        active_msg = {"ID": edge_id, "status": GeneralConstants.MSG_MLOPS_SERVER_STATUS_IDLE}
+        self.message_center.send_message_json(self.topic_active, json.dumps(active_msg))
diff --git a/python/fedml/computing/scheduler/scheduler_core/status_center.py b/python/fedml/computing/scheduler/scheduler_core/status_center.py
new file mode 100755
index 0000000000..569f4d9257
--- /dev/null
+++ b/python/fedml/computing/scheduler/scheduler_core/status_center.py
@@ -0,0 +1,410 @@
+import logging
+import time
+
+from ..slave.client_constants import ClientConstants
+from ..master.server_constants import ServerConstants
+from enum import Enum, unique
+import multiprocessing
+from multiprocessing import Process, Queue
+import queue
+from .message_common import FedMLMessageEntity, FedMLStatusEntity
+from .message_center import FedMLMessageCenter
+import traceback
+from .status_manager_protocols import FedMLStatusManager
+from .compute_cache_manager import ComputeCacheManager
+
+
+@unique
+class JobStatus(Enum):
+    STATUS_OFFLINE = "OFFLINE"
+    STATUS_PROVISIONING = "PROVISIONING"
+    STATUS_IDLE = "IDLE"
+    UPGRADING = "UPGRADING"
+    STARTING = "STARTING"
+    STATUS_RUNNING = "RUNNING"
+    STATUS_STOPPING = "STOPPING"
+    STATUS_KILLED = "KILLED"
+    STATUS_FAILED = "FAILED"
+    STATUS_FINISHED = "FINISHED"
+    STATUS_EXCEPTION = "EXCEPTION"
+
+    def __str__(self):
+        return self.value
+
+    @classmethod
+    def get_job_enum_from_str(cls, job_status_str: str):
+        for job_status in cls:
+            if job_status.value == job_status_str:
+                return job_status
+        return cls.STATUS_OFFLINE
+
+    @staticmethod
+    def is_job_completed(job_status_str: str):
+        if job_status_str == JobStatus.STATUS_FINISHED.value or \
+                job_status_str == JobStatus.STATUS_FAILED.value or \
+                job_status_str == JobStatus.STATUS_KILLED.value or \
+                job_status_str == JobStatus.STATUS_EXCEPTION.value:
+            return True
+
+        return False
+
+
+@unique
+class DeviceStatus(Enum):
+    STATUS_OFFLINE = "OFFLINE"
+    STATUS_PROVISIONING = "PROVISIONING"
+    STATUS_IDLE = "IDLE"
+    STATUS_UPGRADING = "UPGRADING"
+    STATUS_QUEUED = "QUEUED"
+    STATUS_INITIALIZING = "INITIALIZING"
+    STATUS_TRAINING = "TRAINING"
+    STATUS_RUNNING = "RUNNING"
+    STATUS_STOPPING = "STOPPING"
+    STATUS_KILLED = "KILLED"
+    STATUS_FAILED = "FAILED"
+    STATUS_EXCEPTION = "EXCEPTION"
+    STATUS_FINISHED = "FINISHED"
+
+    def __str__(self):
+        return self.value
+
+    @classmethod
+    def get_device_enum_from_str(cls, device_status_str: str):
+        for device_status in cls:
+            if device_status.value == device_status_str:
+                return device_status
+        return cls.STATUS_OFFLINE
+
+
+class FedMLStatusCenter(object):
+    TOPIC_MASTER_STATUS_PREFIX = "fl_server/flserver_agent_"
+    TOPIC_SLAVE_STATUS_PREFIX = "fl_client/flclient_agent_"
+    TOPIC_SLAVE_STATUS_TO_MLOPS_PREFIX = "fl_run/fl_client/mlops/status"
+    TOPIC_SLAVE_JOB_LAUNCH_PREFIX = "flserver_agent/"
+    TOPIC_SLAVE_JOB_LAUNCH_SUFFIX = "/start_train"
+    TOPIC_SLAVE_JOB_STOP_PREFIX = "flserver_agent/"
+    TOPIC_SLAVE_JOB_STOP_SUFFIX = "/stop_train"
+
+    def __init__(self, message_queue=None):
+        self.status_queue = message_queue
+        self.job_status_in_slave = dict()
+        self.entire_job_status = None
+        self.job_status_in_master = dict()
+        self.slave_devices_status = dict()
+        self.master_devices_status = dict()
+        self.status_center_process = None
+        self.status_event = None
+        self.status_sender_message_center_queue = None
+        self.status_listener_message_center_queue = None
+        self.status_message_center = None
+        self.status_manager_instance = None
+        self.status_runner = None
+
+    def __repr__(self):
+        return "<{klass} @{id:x} {attrs}>".format(
+            klass=self.__class__.__name__,
+            id=id(self) & 0xFFFFFF,
+            attrs=" ".join("{}={!r}".format(k, v) for k, v in self.__dict__.items()),
+        )
+
+    def add_job_status_in_slave(self, device_id, status):
+        self.job_status_in_slave[device_id] = self._status_transition(status)
+
+    def add_job_status_in_master(self, device_id, status):
+        self.job_status_in_master[device_id] = self._status_transition(status)
+
+    def set_entire_job_status(self, status):
+        self.entire_job_status = status
+
+    def add_slave_device_status(self, device_id, status):
+        self.slave_devices_status[device_id] = self._status_transition(status)
+
+    def add_master_device_status(self, device_id, status):
+        self.master_devices_status[device_id] = self._status_transition(status)
+
+    def get_job_status_in_slave(self, device_id):
+        return self.job_status_in_slave.get(device_id, None)
+
+    def get_job_status_in_master(self, device_id):
+        return self.job_status_in_master.get(device_id, None)
+
+    def get_entire_job_status(self):
+        return self.entire_job_status
+
+    def get_slave_device_status(self, device_id):
+        return self.slave_devices_status.get(device_id, None)
+
+    def get_master_device_status(self, device_id):
+        return self.master_devices_status.get(device_id, None)
+
+    def _status_transition(self, status):
+        transition_status = status
+        if self.entire_job_status is not None:
+            if self.entire_job_status == ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED or \
+                    self.entire_job_status == ServerConstants.MSG_MLOPS_SERVER_STATUS_FINISHED:
+                if status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED or \
+                        status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_FINISHED or \
+                        status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_KILLED:
+                    transition_status = status
+                else:
+                    transition_status = ClientConstants.MSG_MLOPS_CLIENT_STATUS_KILLED
+
+        return transition_status
+
+    def get_status_runner(self):
+        return None
+
+    def start_status_center(self, sender_message_center_queue=None,
+                            listener_message_center_queue=None, is_slave_agent=False):
+        self.status_queue = Queue()
+        self.status_event = multiprocessing.Event()
+        self.status_event.clear()
+        self.status_sender_message_center_queue = sender_message_center_queue
+        self.status_listener_message_center_queue = listener_message_center_queue
+        self.status_runner = self.get_status_runner()
+        target_func = self.status_runner.run_status_dispatcher if not is_slave_agent else \
+            self.status_runner.run_status_dispatcher_in_slave
+        self.status_center_process = Process(
+            target=target_func, args=(
+                self.status_event, self.status_queue, self.status_sender_message_center_queue,
+                self.status_listener_message_center_queue
+            )
+        )
+
+        self.status_center_process.start()
+
+    def check_message_stop_event(self):
+        if self.status_event is not None and self.status_event.is_set():
+            logging.info("Received status center stopping event.")
+            raise StatusCenterStoppedException("Status center stopped (for sender)")
+
+    def send_message(self, topic, payload, run_id=None):
+        message_entity = FedMLMessageEntity(topic=topic, payload=payload, run_id=run_id)
+        self.status_queue.put(message_entity.get_message_body())
+
+    def send_message_json(self, topic, payload):
+        self.send_message(topic, payload)
+
+    def send_status_message(self, topic, payload):
+        message_entity = FedMLMessageEntity(topic=topic, payload=payload)
+        self.status_queue.put(message_entity.get_message_body())
+
+    def get_status_queue(self):
+        return self.status_queue
+
+    def status_center_process_master_status(self, topic, payload):
+        pass
+
+    def status_center_process_slave_status(self, topic, payload):
+        pass
+
+    def rebuild_message_center(self, message_center_queue):
+        pass
+
+    def rebuild_status_center(self, status_queue):
+        pass
+
+    @staticmethod
+    def save_job_status(run_id, status):
+        ComputeCacheManager.get_instance().set_redis_params()
+        ComputeCacheManager.get_instance().get_status_cache().save_job_status(run_id, status)
+
+    @staticmethod
+    def save_device_status_in_job(run_id, device_id, status):
+        ComputeCacheManager.get_instance().set_redis_params()
+        ComputeCacheManager.get_instance().get_status_cache().save_device_status_in_job(run_id, device_id, status)
+
+    def run_status_dispatcher(self, status_event, status_queue,
+                              sender_message_center_queue,
+                              listener_message_center_queue):
+        # Save the parameters
+        self.status_event = status_event
+        self.status_queue = status_queue
+        self.status_sender_message_center_queue = sender_message_center_queue
+        self.status_listener_message_center_queue = listener_message_center_queue
+
+        # Rebuild message center
+        message_center = None
+        if sender_message_center_queue is not None:
+            self.rebuild_message_center(sender_message_center_queue)
+            message_center = FedMLMessageCenter(
+                sender_message_queue=sender_message_center_queue,
+                listener_message_queue=listener_message_center_queue
+            )
+
+        if sender_message_center_queue is not None:
+            self.rebuild_status_center(status_queue)
+
+        # Init status manager instances
+        status_manager_instances = dict()
+
+        while True:
+            message_entity = None
+
+            # Check if we should stop status dispatcher
+            try:
+                self.check_message_stop_event()
+            except StatusCenterStoppedException as e:
+                break
+
+            # Dispatch status messages.
+            # noinspection PyBroadException
+            try:
+                # Get the status message from the queue
+                try:
+                    message_body = status_queue.get(block=False, timeout=0.1)
+                except queue.Empty as e:  # If queue is empty, then break loop
+                    message_body = None
+                if message_body is None:
+                    time.sleep(0.1)
+                    continue
+
+                # Build message and status entity
+                message_entity = FedMLMessageEntity(message_body=message_body)
+                status_entity = FedMLStatusEntity(status_msg_body=message_body)
+
+                # Generate status manager instance
+                if status_manager_instances.get(status_entity.run_id) is None:
+                    status_manager_instances[status_entity.run_id] = FedMLStatusManager(
+                        run_id=status_entity.run_id, edge_id=status_entity.edge_id, status_center=self,
+                        message_center=message_center)
+                else:
+                    status_manager_instances[status_entity.run_id].edge_id = status_entity.edge_id
+
+                # Process the master and slave status.
+                if message_entity.topic.startswith(FedMLStatusCenter.TOPIC_MASTER_STATUS_PREFIX):
+                    # Process the job status
+                    status_manager_instances[status_entity.run_id].status_center_process_master_status(
+                        message_entity.topic, message_entity.payload)
+
+                    # Save the job status
+                    FedMLStatusCenter.save_job_status(status_entity.run_id, self.get_entire_job_status())
+
+                elif message_entity.topic.startswith(FedMLStatusCenter.TOPIC_SLAVE_STATUS_PREFIX):
+                    # Process the slave device status
+                    status_manager_instances[status_entity.run_id].status_center_process_slave_status(
+                        message_entity.topic, message_entity.payload)
+
+                    # Save the device status in job
+                    FedMLStatusCenter.save_device_status_in_job(status_entity.run_id, status_entity.edge_id,
+                                                                self.get_job_status_in_slave(status_entity.edge_id))
+
+            except Exception as e:
+                if message_entity is not None:
+                    logging.info(
+                        f"Failed to process the status with topic {message_entity.topic}, "
+                        f"payload {message_entity.payload}, {traceback.format_exc()}")
+                else:
+                    logging.info(f"Failed to process the status: {traceback.format_exc()}")
+
+    def run_status_dispatcher_in_slave(self, status_event, status_queue,
+                                       sender_message_center_queue,
+                                       listener_message_center_queue):
+        # Save the parameters
+        self.status_event = status_event
+        self.status_queue = status_queue
+        self.status_sender_message_center_queue = sender_message_center_queue
+        self.status_listener_message_center_queue = listener_message_center_queue
+
+        # Rebuild message center
+        message_center = None
+        if sender_message_center_queue is not None:
+            self.rebuild_message_center(sender_message_center_queue)
+            message_center = FedMLMessageCenter(
+                sender_message_queue=sender_message_center_queue,
+                listener_message_queue=listener_message_center_queue
+            )
+
+        if sender_message_center_queue is not None:
+            self.rebuild_status_center(status_queue)
+
+        # Init status manager instances
+        status_manager_instances = dict()
+        job_launch_message_map = dict()
+
+        while True:
+            message_entity = None
+
+            # Check if we should stop status dispatcher
+            try:
+                self.check_message_stop_event()
+            except StatusCenterStoppedException as e:
+                break
+
+            # Dispatch status messages.
+            # noinspection PyBroadException
+            try:
+                # Get the status message from the queue
+                try:
+                    message_body = status_queue.get(block=False, timeout=0.1)
+                except queue.Empty as e:  # If queue is empty, then break loop
+                    message_body = None
+                if message_body is None:
+                    time.sleep(0.1)
+                    continue
+
+                # Build message and status entity
+                message_entity = FedMLMessageEntity(message_body=message_body)
+                status_entity = FedMLStatusEntity(status_msg_body=message_body)
+
+                # Generate status manager instance
+                if status_manager_instances.get(status_entity.run_id) is None:
+                    status_manager_instances[status_entity.run_id] = FedMLStatusManager(
+                        run_id=status_entity.run_id, edge_id=status_entity.edge_id, status_center=self,
+                        message_center=message_center)
+                else:
+                    status_manager_instances[status_entity.run_id].edge_id = status_entity.edge_id
+
+                # Process the slave status
+                if message_entity.topic.startswith(FedMLStatusCenter.TOPIC_SLAVE_STATUS_PREFIX):
+                    # Report the slave status to master
+                    status_manager_instances[status_entity.run_id]. \
+                        status_center_process_slave_status_to_master_in_slave_agent(
+                        message_entity.topic, message_entity.payload
+                    )
+                elif message_entity.topic.startswith(FedMLStatusCenter.TOPIC_SLAVE_STATUS_TO_MLOPS_PREFIX):
+                    # Report slave status to mlops (Active/IDLE message)
+                    status_manager_instances[status_entity.run_id]. \
+                        status_center_process_slave_status_to_mlops_in_slave_agent(
+                        message_entity.topic, message_entity.payload
+                    )
+                elif (message_entity.topic.startswith(FedMLStatusCenter.TOPIC_SLAVE_JOB_LAUNCH_PREFIX) and
+                      message_entity.topic.endswith(FedMLStatusCenter.TOPIC_SLAVE_JOB_LAUNCH_SUFFIX)):
+                    # Async request the job status from master when launching the job
+                    job_launch_message_map[status_entity.run_id] = {"topic": message_entity.topic,
+                                                                    "payload": message_entity.payload}
+                    status_manager_instances[status_entity.run_id]. \
+                        status_center_request_job_status_from_master_in_slave_agent(
+                        message_entity.topic, message_entity.payload
+                    )
+                elif (message_entity.topic.startswith(FedMLStatusCenter.TOPIC_SLAVE_JOB_STOP_PREFIX) and
+                      message_entity.topic.endswith(FedMLStatusCenter.TOPIC_SLAVE_JOB_STOP_SUFFIX)):
+                    # Cleanup when stopped the job
+                    if job_launch_message_map.get(status_entity.run_id, None) is not None:
+                        job_launch_message_map.pop(status_entity.run_id)
+
+            except Exception as e:
+                if message_entity is not None:
+                    logging.info(
+                        f"Failed to process the status with topic {message_entity.topic}, "
+                        f"payload {message_entity.payload}, {traceback.format_exc()}")
+                else:
+                    logging.info(f"Failed to process the status: {traceback.format_exc()}")
+
+    def register_job_launch_message(self, topic, payload):
+        message_entity = FedMLMessageEntity(topic=topic, payload=payload)
+        self.status_queue.put(message_entity.get_message_body())
+
+    def register_job_stop_message(self, topic, payload):
+        message_entity = FedMLMessageEntity(topic=topic, payload=payload)
+        self.status_queue.put(message_entity.get_message_body())
+
+    @staticmethod
+    def rebuild_status_center_from_queue(status_queue):
+        status_center = FedMLStatusCenter(message_queue=status_queue)
+        return status_center
+
+
+class StatusCenterStoppedException(Exception):
+    """ Status center stopped. """
+    pass
diff --git a/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py b/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py
new file mode 100755
index 0000000000..06b222cfd1
--- /dev/null
+++ b/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py
@@ -0,0 +1,303 @@
+import json
+import logging
+import os
+import shutil
+from os import listdir
+
+from ....core.mlops.mlops_runtime_log_daemon import MLOpsRuntimeLogDaemon
+from ....core.mlops.mlops_metrics import MLOpsMetrics
+from ..slave.client_constants import ClientConstants
+from ..master.server_constants import ServerConstants
+from ..master.server_data_interface import FedMLServerDataInterface
+from .message_common import LogArgs
+from .general_constants import GeneralConstants
+
+
+class FedMLStatusManager(object):
+    def __init__(self, run_id=None, edge_id=None, server_id=None,
+                 edge_id_list=None, running_scheduler_contract=None,
+                 status_center=None, message_center=None):
+        self.run_id = run_id
+        self.edge_id = edge_id
+        self.server_id = server_id
+        self.edge_id_list = edge_id_list
+        self.client_agent_active_list = dict()
+        self.running_scheduler_contract = running_scheduler_contract if running_scheduler_contract is not None else dict()
+        self.message_reporter = MLOpsMetrics()
+        self.message_reporter.set_messenger(message_center)
+        self.status_reporter = MLOpsMetrics()
+        self.status_reporter.set_messenger(status_center, send_message_func=status_center.send_status_message)
+        self.status_center = status_center
+        self.message_center = message_center
+        self.log_args = LogArgs(role="server", edge_id=self.edge_id,
+                                server_id=self.server_id, log_file_dir=ServerConstants.get_log_file_dir())
+
+    def __repr__(self):
+        return "<{klass} @{id:x} {attrs}>".format(
+            klass=self.__class__.__name__,
+            id=id(self) & 0xFFFFFF,
+            attrs=" ".join("{}={!r}".format(k, v) for k, v in self.__dict__.items()),
+        )
+
+    def process_job_completed_status(self, master_id, status):
+        # Stop the system performance monitor
+        try:
+            self.message_reporter.stop_sys_perf()
+        except Exception as ex:
+            pass
+
+        # Stop the job process
+        ServerConstants.cleanup_learning_process(self.run_id)
+        ServerConstants.cleanup_bootstrap_process(self.run_id)
+
+        # Remove the package download directory.
+        try:
+            local_package_path = ServerConstants.get_package_download_dir()
+            for package_file in listdir(local_package_path):
+                if os.path.basename(package_file).startswith("run_" + str(self.run_id)):
+                    shutil.rmtree(os.path.join(local_package_path, package_file), ignore_errors=True)
+        except Exception as e:
+            pass
+
+        # Stop log processor for current run
+        MLOpsRuntimeLogDaemon.get_instance(self.log_args).stop_log_processor(self.run_id, master_id)
+
+        # RunProcessUtils.kill_process(cloud_server_process.pid)
+        # self.stop_cloud_server()
+        # self.remove_listener_for_run_metrics(self.run_id)
+        # self.remove_listener_for_run_logs(self.run_id)
+
+    def process_job_exception_status(self, master_id, status):
+        # Send the exception status to slave devices.
+        self.report_exception_status(
+            self.edge_id_list, run_id=self.run_id, server_id=master_id,
+            status=ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED)
+
+        # Save the job status to local storage
+        FedMLServerDataInterface.get_instance().save_job_status(self.run_id, master_id, status, status)
+
+    def process_job_running_status(self, master_id, status):
+        self.message_reporter.report_server_training_status(
+            self.run_id, status, edge_id=master_id, running_json=self.running_scheduler_contract)
+
+    def status_center_process_master_status(self, topic, payload):
+        request_json = json.loads(payload)
+        is_retain = request_json.get("is_retain", False)
+        if is_retain:
+            return
+        run_id = request_json["run_id"]
+        status = request_json["status"]
+        edge_id = request_json["edge_id"]
+        server_id = request_json.get("server_id", None)
+        run_id_str = str(run_id)
+
+        # Process the job status
+        if status == ServerConstants.MSG_MLOPS_SERVER_STATUS_FINISHED:
+            self.process_job_completed_status(server_id, status)
+        elif status == ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED:
+            self.process_job_completed_status(server_id, status)
+        elif status == ServerConstants.MSG_MLOPS_SERVER_STATUS_KILLED:
+            self.process_job_completed_status(server_id, status)
+        elif status == ServerConstants.MSG_MLOPS_SERVER_STATUS_EXCEPTION:
+            self.process_job_exception_status(server_id, status)
+        else:
+            self.process_job_running_status(server_id, status)
+
+        # Process the consensus status
+        self.process_job_status_consensus(run_id, server_id, status)
+
+    def process_job_status_consensus(self, run_id, master_id, status):
+        # Set the master status in the job and entire job status
+        self.status_center.set_entire_job_status(status)
+        self.status_center.add_job_status_in_master(master_id, status)
+        status = self.status_center.get_entire_job_status()
+
+        # Set the device status based on the job status
+        edge_id_status_dict = self.client_agent_active_list.get(f"{run_id}", {})
+        for edge_id_item, edge_status_item in edge_id_status_dict.items():
+            if edge_id_item == "server":
+                continue
+
+            # Calc the device status based on the job status
+            consensus_device_status = FedMLStatusManager.get_device_consensus_status_in_job(
+                status, edge_status_item)
+            if consensus_device_status is not None:
+                self.message_reporter.report_client_training_status(
+                    edge_id_item, consensus_device_status, run_id=run_id)
+
+        # Save the job status to local storage
+        FedMLServerDataInterface.get_instance().save_job_status(run_id, master_id, status, status)
+
+        # Report the status to message center
+        self.message_reporter.report_server_training_status(run_id, status, edge_id=master_id)
+
+        # Broadcast the status to slave agents
+        self.message_reporter.report_job_status(run_id, status)
+
+    @staticmethod
+    def get_device_consensus_status_in_job(job_status, device_status):
+        if job_status == ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED:
+            if device_status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED or \
+                    device_status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_FINISHED or \
+                    device_status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_KILLED:
+                return device_status
+            else:
+                return ClientConstants.MSG_MLOPS_CLIENT_STATUS_KILLED
+        else:
+            return None
+
+    def get_device_consensus_status_in_current_device(self, edge_id, status):
+        self.status_center.add_job_status_in_slave(edge_id, status)
+        consensus_status = self.status_center.get_job_status_in_slave(edge_id)
+        consensus_status = ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED \
+            if consensus_status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_EXCEPTION else consensus_status
+        return consensus_status
+
+    def status_center_process_slave_status(self, topic, payload):
+        payload_json = json.loads(payload)
+        run_id = payload_json.get("run_id", None)
+        edge_id = payload_json.get("edge_id", None)
+        status = payload_json.get("status", None)
+        init_edge_id_list = payload_json.get("init_all_edge_id_list", None)
+        init_server_id = payload_json.get("init_server_id", None)
+
+        active_item_dict = self.client_agent_active_list.get(f"{run_id}", None)
+        if active_item_dict is None:
+            self.client_agent_active_list[f"{run_id}"] = dict()
+
+        if init_edge_id_list is not None:
+            self.client_agent_active_list[f"{run_id}"][f"server"] = init_server_id
+            for edge_id_item in init_edge_id_list:
+                self.client_agent_active_list[f"{run_id}"][f"{edge_id_item}"] = \
+                    ClientConstants.MSG_MLOPS_CLIENT_STATUS_IDLE
+
+        if run_id is not None and edge_id is not None:
+            self.client_agent_active_list[f"{run_id}"][f"{edge_id}"] = status
+
+            self.process_device_status(run_id, edge_id, status)
+
+    def process_device_status(self, run_id, edge_id, status):
+        number_of_failed_edges = 0
+        number_of_finished_edges = 0
+        number_of_killed_edges = 0
+        edge_id_status_dict = self.client_agent_active_list.get(f"{run_id}", {})
+        server_id = edge_id_status_dict.get("server", 0)
+        enable_fault_tolerance, fault_tolerance_rate = self.parse_fault_tolerance_params(run_id)
+        running_edges_list = list()
+        for edge_id_item, status_item in edge_id_status_dict.items():
+            if edge_id_item == "server":
+                continue
+
+            if status_item is None or status_item == ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED or \
+                    status_item == ServerConstants.MSG_MLOPS_SERVER_STATUS_EXCEPTION:
+                number_of_failed_edges += 1
+                continue
+
+            if status_item == ServerConstants.MSG_MLOPS_SERVER_STATUS_FINISHED:
+                number_of_finished_edges += 1
+                continue
+
+            if status_item == ServerConstants.MSG_MLOPS_SERVER_STATUS_KILLED:
+                number_of_killed_edges += 1
+                continue
+
+            if status_item == ServerConstants.MSG_MLOPS_SERVER_STATUS_IDLE or \
+                    status_item == ServerConstants.MSG_MLOPS_SERVER_STATUS_OFFLINE:
+                continue
+
+            running_edges_list.append(edge_id_item)
+
+        # Report client status
+        consensus_status = self.get_device_consensus_status_in_current_device(edge_id, status)
+        self.message_reporter.report_client_training_status(edge_id, consensus_status, run_id=run_id)
+
+        # Report server status based on the fault tolerance model and parameters
+        edge_nums = len(edge_id_status_dict.keys()) - 1
+        status_to_report = self.calculate_server_status(
+            run_id, edge_nums, number_of_failed_edges, number_of_finished_edges, number_of_killed_edges,
+            running_edges_list, enable_fault_tolerance=enable_fault_tolerance,
+            fault_tolerance_rate=fault_tolerance_rate)
+        if status_to_report is not None:
+            logging.info(f"Run completed when processing edge status, will report status {status_to_report}")
+            self.report_server_status(run_id, edge_id, server_id, status_to_report)
+
+    def calculate_server_status(
+            self, run_id, total_edge_nums, number_of_failed_edges, number_of_finished_edges,
+            number_of_killed_edges, running_edges_list, enable_fault_tolerance=False,
+            fault_tolerance_rate=0.8
+    ):
+        # Report server status based on the fault tolerance model and parameters
+        actual_failed_rate = number_of_failed_edges / total_edge_nums
+        all_edges_run_completed = True if len(running_edges_list) <= 0 else False
+        if all_edges_run_completed:
+            status_to_report = None
+            if enable_fault_tolerance:
+                if actual_failed_rate >= fault_tolerance_rate:
+                    status_to_report = ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED
+                    self.report_exception_status(
+                        running_edges_list, run_id=run_id, status=status_to_report)
+                    return status_to_report
+                else:
+                    if number_of_killed_edges == total_edge_nums:
+                        status_to_report = ServerConstants.MSG_MLOPS_SERVER_STATUS_KILLED
+                    else:
+                        status_to_report = ServerConstants.MSG_MLOPS_SERVER_STATUS_FINISHED
+            else:
+                if number_of_failed_edges > 0:
+                    status_to_report = ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED
+                elif number_of_finished_edges == total_edge_nums:
+                    status_to_report = ServerConstants.MSG_MLOPS_SERVER_STATUS_FINISHED
+                elif number_of_killed_edges == total_edge_nums:
+                    status_to_report = ServerConstants.MSG_MLOPS_SERVER_STATUS_KILLED
+
+            return status_to_report
+
+    def parse_fault_tolerance_params(self, run_id):
+        run_json = self.running_scheduler_contract.get(str(run_id), None)
+        if run_json is None:
+            return False, 0
+        run_config = run_json.get("run_config", {})
+        run_params = run_config.get("parameters", {})
+        common_args = run_params.get("common_args", {})
+        enable_fault_tolerance = common_args.get("enable_fault_tolerance", False)
+        fault_tolerance_rate = common_args.get("fault_tolerance_rate", 0)
+        return enable_fault_tolerance, fault_tolerance_rate
+
+    def report_server_status(self, run_id, edge_id, server_id, status):
+        self.status_reporter.report_server_id_status(
+            run_id, status, edge_id=edge_id, server_id=server_id, server_agent_id=edge_id)
+
+    def report_exception_status(
+            self, edge_id_list, run_id=0, server_id=None, status=None, payload=None):
+        if payload is None:
+            payload_obj = {"runId": run_id, "edgeids": edge_id_list}
+            if server_id is not None:
+                payload_obj["serverId"] = server_id
+        else:
+            payload_obj = json.loads(payload)
+        payload_obj["run_status"] = ClientConstants.MSG_MLOPS_CLIENT_STATUS_EXCEPTION if status is None else status
+        topic_exception = "flserver_agent/" + str(self.edge_id) + "/stop_train"
+        self.message_reporter.send_message(topic_exception, json.dumps(payload_obj))
+
+    def status_center_process_slave_status_to_master_in_slave_agent(self, topic, payload):
+        # Forward the status message to the sender queue of message center.
+        self.message_center.send_message(topic, payload)
+
+        # Post the status message to the listener queue of message center
+        self.message_center.receive_message(GeneralConstants.MSG_TOPIC_REPORT_DEVICE_STATUS_IN_JOB, payload)
+
+    def status_center_process_slave_status_to_mlops_in_slave_agent(self, topic, payload):
+        # Forward the status message to message center.
+        self.message_center.send_message(topic, payload)
+
+    def status_center_request_job_status_from_master_in_slave_agent(self, topic, payload):
+        # Parse the parameters
+        payload_json = json.loads(payload)
+        run_id = payload_json.get("run_id", None)
+        master_id = payload_json.get("master_id", None)
+        edge_id = payload_json.get("edge_id", None)
+
+        # Request the job status from master agent.
+        topic_request_job_status = f"{GeneralConstants.MSG_TOPIC_REQUEST_JOB_STATUS_PREFIX}{master_id}"
+        payload_request_job_status = {"run_id": run_id, "edge_id": edge_id}
+        self.message_center.send_message(topic_request_job_status, payload_request_job_status)
diff --git a/python/fedml/computing/scheduler/slave/base_slave_agent.py b/python/fedml/computing/scheduler/slave/base_slave_agent.py
new file mode 100755
index 0000000000..01c0a39195
--- /dev/null
+++ b/python/fedml/computing/scheduler/slave/base_slave_agent.py
@@ -0,0 +1,139 @@
+
+import json
+import os
+from ..comm_utils import sys_utils
+from ..comm_utils.run_process_utils import RunProcessUtils
+from ..comm_utils.sys_utils import get_python_program
+from ....core.mlops import MLOpsRuntimeLog, MLOpsMetrics
+from .client_data_interface import ClientConstants
+from ..scheduler_core.account_manager import FedMLAccountManager
+from ..scheduler_core.general_constants import GeneralConstants
+from abc import ABC, abstractmethod
+
+
+class FedMLBaseSlaveAgent(ABC):
+    CLIENT_API_CMD = "fedml.computing.scheduler.slave.client_api:api"
+
+    def __init__(self):
+        self.agent_args = None
+        self.local_api_process = None
+        self.process = None
+        self.cur_dir = os.path.split(os.path.realpath(__file__))[0]
+        self.mlops_metrics = MLOpsMetrics()
+        self.protocol_mgr = None
+
+    def login(
+            self, userid, api_key=None, device_id=None,
+            os_name=None, need_to_check_gpu=False, role=None
+    ):
+        # Preprocess the login args
+        if need_to_check_gpu:
+            gpu_count, _ = sys_utils.get_gpu_count_vendor()
+            if gpu_count <= 0:
+                print("We can't find any gpu device on your machine. \n"
+                      "With the gpu_supplier(-g) option, you need to check if your machine "
+                      "has nvidia GPUs and installs CUDA related drivers.")
+                return
+
+        # Login account
+        login_result = FedMLAccountManager.get_instance().login(
+            userid, api_key=api_key, device_id=device_id,
+            os_name=os_name, role=role
+        )
+        if login_result is not None:
+            self.agent_args = login_result
+        else:
+            return None
+
+        # Save the bound info
+        self._save_agent_info(login_result.current_device_id + "." + login_result.os_name, login_result.edge_id)
+
+        # Init the logs for protocol manager
+        self._init_logs(login_result, login_result.edge_id)
+
+        # Create the protocol manager to communicate with the slave agents and MLOps.
+        self._create_protocol_manager(login_result)
+
+        # Initialize the protocol manager
+        # noinspection PyBoardException
+        try:
+            self._initialize_protocol_manager()
+        except Exception as e:
+            FedMLAccountManager.write_login_failed_file(is_client=True)
+            self.protocol_mgr.stop()
+            raise e
+
+        # Start the protocol manager to process the messages from MLOps and slave agents.
+        self.protocol_mgr.start()
+
+        return login_result
+
+    @staticmethod
+    def logout():
+        GeneralConstants.cleanup_run_process(None)
+        sys_utils.cleanup_all_fedml_client_api_processes()
+
+    def _create_protocol_manager(self, login_result):
+        if self.protocol_mgr is not None:
+            return
+        self.protocol_mgr = self._generate_protocol_manager_instance(
+            login_result, agent_config=login_result.agent_config)
+        self.protocol_mgr.args = login_result
+        self.protocol_mgr.edge_id = login_result.edge_id
+        self.protocol_mgr.unique_device_id = login_result.unique_device_id
+        self.protocol_mgr.user_name = login_result.user_name
+        self.protocol_mgr.agent_config = login_result.agent_config
+
+    def _initialize_protocol_manager(self):
+        # Init local database
+        self._init_database()
+
+        # Initialize the master protocol
+        self.protocol_mgr.initialize()
+
+        # Start the client API process
+        self._start_slave_api()
+
+    def _init_logs(self, login_result, edge_id):
+        # Init runtime logs
+        in_args = login_result
+        in_args.log_file_dir = self._get_log_file_dir()
+        in_args.run_id = 0
+        in_args.role = "client"
+        client_ids = list()
+        client_ids.append(edge_id)
+        in_args.client_id_list = json.dumps(client_ids)
+        in_args.using_mlops = True
+        MLOpsRuntimeLog.get_instance(in_args).init_logs()
+
+    def _start_slave_api(self):
+        # Start the local API services
+        client_api_cmd = FedMLBaseSlaveAgent.CLIENT_API_CMD
+        client_api_pids = RunProcessUtils.get_pid_from_cmd_line(client_api_cmd)
+        if client_api_pids is None or len(client_api_pids) <= 0:
+            python_program = get_python_program()
+            cur_dir = os.path.dirname(__file__)
+            fedml_base_dir = os.path.dirname(os.path.dirname(os.path.dirname(cur_dir)))
+            self.local_api_process = ClientConstants.exec_console_with_script(
+                "{} -m uvicorn {} --host 0.0.0.0 --port {} "
+                "--reload --reload-delay 3 --reload-dir {} --log-level critical".format(
+                    python_program, client_api_cmd, ClientConstants.LOCAL_CLIENT_API_PORT, fedml_base_dir),
+                should_capture_stdout=False,
+                should_capture_stderr=False
+            )
+
+    @abstractmethod
+    def _get_log_file_dir(self):
+        pass
+
+    @abstractmethod
+    def _save_agent_info(self, unique_device_id, edge_id):
+        pass
+
+    @abstractmethod
+    def _init_database(self):
+        pass
+
+    @abstractmethod
+    def _generate_protocol_manager_instance(self, args, agent_config=None):
+        return None
diff --git a/python/fedml/computing/scheduler/slave/base_slave_job_runner.py b/python/fedml/computing/scheduler/slave/base_slave_job_runner.py
new file mode 100755
index 0000000000..4448dd49fa
--- /dev/null
+++ b/python/fedml/computing/scheduler/slave/base_slave_job_runner.py
@@ -0,0 +1,264 @@
+import logging
+import multiprocessing
+import os
+import platform
+import time
+import traceback
+from abc import ABC, abstractmethod
+
+from ....core.mlops.mlops_runtime_log import MLOpsRuntimeLog
+from ....core.mlops.mlops_runtime_log_daemon import MLOpsRuntimeLogDaemon
+from .client_data_interface import FedMLClientDataInterface
+from ..comm_utils import sys_utils
+from ....core.mlops.mlops_utils import MLOpsUtils
+from multiprocessing import Process
+from ..scheduler_core.scheduler_base_job_runner import FedMLSchedulerBaseJobRunner, RunnerError, RunnerCompletedError
+from ..scheduler_core.general_constants import GeneralConstants
+
+
+class FedMLBaseSlaveJobRunner(FedMLSchedulerBaseJobRunner, ABC):
+
+    def __init__(self, args, edge_id=0, request_json=None, agent_config=None, run_id=0,
+                 cuda_visible_gpu_ids_str=None,
+                 agent_data_dir=None, agent_package_download_dir=None,
+                 agent_package_unzip_dir=None, agent_log_file_dir=None):
+        FedMLSchedulerBaseJobRunner.__init__(
+            self, args, edge_id=edge_id, request_json=request_json, agent_config=agent_config, run_id=run_id,
+            cuda_visible_gpu_ids_str=cuda_visible_gpu_ids_str, agent_data_dir=agent_data_dir,
+            agent_package_download_dir=agent_package_download_dir,
+            agent_package_unzip_dir=agent_package_unzip_dir,
+            agent_log_file_dir=agent_log_file_dir
+        )
+
+        self.fedml_data_base_package_dir = os.path.join("/", "fedml", "data")
+        self.fedml_data_local_package_dir = os.path.join("/", "fedml", "fedml-package", "fedml", "data")
+        self.fedml_data_dir = self.fedml_data_base_package_dir
+        self.fedml_config_dir = os.path.join("/", "fedml", "conf")
+        self.run_extend_queue_list = None
+        self.computing_started_time = 0
+
+    def __repr__(self):
+        return "<{klass} @{id:x} {attrs}>".format(
+            klass=self.__class__.__name__,
+            id=id(self) & 0xFFFFFF,
+            attrs=" ".join("{}={!r}".format(k, v) for k, v in self.__dict__.items()),
+        )
+
+    def run(self, process_event, completed_event,  run_extend_queue_list,
+            sender_message_center, listener_message_queue, status_center_queue):
+        print(f"Client runner process id {os.getpid()}, run id {self.run_id}")
+
+        if platform.system() != "Windows":
+            os.setsid()
+
+        os.environ['PYTHONWARNINGS'] = 'ignore:semaphore_tracker:UserWarning'
+        os.environ.setdefault('PYTHONWARNINGS', 'ignore:semaphore_tracker:UserWarning')
+
+        self.run_process_event = process_event
+        self.run_process_completed_event = completed_event
+        try:
+            MLOpsUtils.set_ntp_offset(self.ntp_offset)
+            self.rebuild_message_status_center(sender_message_center, listener_message_queue, status_center_queue)
+            self.run_impl(run_extend_queue_list, sender_message_center, listener_message_queue, status_center_queue)
+        except RunnerError:
+            logging.info("Runner stopped.")
+            self.reset_devices_status(self.edge_id, GeneralConstants.MSG_MLOPS_CLIENT_STATUS_KILLED)
+        except RunnerCompletedError:
+            logging.info("Runner completed.")
+        except Exception as e:
+            logging.error(f"Runner exited with errors. Exception: {e}, Traceback {traceback.format_exc()}")
+            self.status_reporter.report_client_id_status(
+                self.edge_id, GeneralConstants.MSG_MLOPS_CLIENT_STATUS_FAILED,
+                server_id=self.server_id, run_id=self.run_id)
+        finally:
+            if self.mlops_metrics is not None:
+                computing_ended_time = MLOpsUtils.get_ntp_time()
+                self.mlops_metrics.report_edge_job_computing_cost(self.run_id, self.edge_id,
+                                                                  self.computing_started_time, computing_ended_time,
+                                                                  self.args.account_id, self.args.api_key)
+            logging.info("Release resources.")
+            FedMLSchedulerBaseJobRunner.cleanup_containers_and_release_gpus(self.run_id, self.edge_id)
+            MLOpsRuntimeLogDaemon.get_instance(self.args).stop_log_processor(self.run_id, self.edge_id)
+            if self.mlops_metrics is not None:
+                self.mlops_metrics.stop_sys_perf()
+            time.sleep(3)
+            GeneralConstants.cleanup_learning_process(self.run_id)
+            GeneralConstants.cleanup_run_process(self.run_id)
+
+    @abstractmethod
+    def run_impl(self, run_extend_queue_list, sender_message_center,
+                 listener_message_queue, status_center_queue):
+        run_id = self.request_json["runId"]
+        run_config = self.request_json["run_config"]
+        data_config = run_config.get("data_config", {})
+        packages_config = run_config["packages_config"]
+
+        self.computing_started_time = MLOpsUtils.get_ntp_time()
+        self.mlops_metrics.report_edge_job_computing_cost(run_id, self.edge_id,
+                                                          self.computing_started_time, 0,
+                                                          self.args.account_id, self.args.api_key)
+
+        self.check_runner_stop_event()
+
+        MLOpsRuntimeLog.get_instance(self.args).init_logs(log_level=logging.INFO)
+
+        self.status_reporter.report_client_id_status(
+            self.edge_id, GeneralConstants.MSG_MLOPS_CLIENT_STATUS_INITIALIZING,
+            running_json=self.start_request_json, run_id=run_id)
+
+        # get training params
+        private_local_data_dir = data_config.get("privateLocalData", "")
+        is_using_local_data = 0
+        # if private_local_data_dir is not None and len(str(private_local_data_dir).strip(' ')) > 0:
+        #     is_using_local_data = 1
+
+        # start a run according to the hyper-parameters
+        # fedml_local_data_dir = self.cur_dir + "/fedml_data/run_" + run_id_str + "_edge_" + str(edge_id)
+        fedml_local_data_dir = os.path.join(self.cur_dir, "fedml_data")
+        fedml_local_config_dir = os.path.join(self.cur_dir, "fedml_config")
+        if is_using_local_data:
+            fedml_local_data_dir = private_local_data_dir
+        self.fedml_data_dir = self.fedml_data_local_package_dir
+
+        self.check_runner_stop_event()
+
+        logging.info("Download packages")
+
+        # update local config with real time parameters from server and dynamically replace variables value
+        unzip_package_path, fedml_config_object = self.update_local_fedml_config(run_id, run_config)
+        # if unzip_package_path is None or fedml_config_object is None:
+        #     logging.info("failed to update local fedml config.")
+        #     self.check_runner_stop_event()
+        #     # Send failed msg when exceptions.
+        #     self.cleanup_run_when_starting_failed(status=GeneralConstants.MSG_MLOPS_CLIENT_STATUS_EXCEPTION)
+        #     return
+
+        logging.info("Check downloaded packages...")
+
+        entry_file_config = fedml_config_object.get("entry_config", None)
+        dynamic_args_config = fedml_config_object.get("dynamic_args", None)
+        entry_file = str(entry_file_config["entry_file"]).replace('\\', os.sep).replace('/', os.sep)
+        entry_file = os.path.basename(entry_file)
+        conf_file = entry_file_config["conf_file"]
+        conf_file = str(conf_file).replace('\\', os.sep).replace('/', os.sep)
+        #####
+        # GeneralConstants.cleanup_learning_process(run_id)
+        # GeneralConstants.cleanup_bootstrap_process(run_id)
+        #####
+
+        if not os.path.exists(unzip_package_path):
+            logging.info("failed to unzip file.")
+            self.check_runner_stop_event()
+            return
+        os.chdir(os.path.join(unzip_package_path, "fedml"))
+
+        self.check_runner_stop_event()
+
+        logging.info("starting the user process...")
+
+        entry_file_full_path = os.path.join(unzip_package_path, "fedml", entry_file)
+        conf_file_full_path = os.path.join(unzip_package_path, "fedml", conf_file)
+        logging.info("waiting the user process to finish...")
+        logging.info("                          ")
+        logging.info("                          ")
+        logging.info("====Your Run Logs Begin===")
+
+        process, is_launch_task, error_list = self.execute_job_task(
+            unzip_package_path=unzip_package_path, entry_file_full_path=entry_file_full_path,
+            conf_file_full_path=conf_file_full_path, dynamic_args_config=dynamic_args_config,
+            fedml_config_object=self.fedml_config_object)
+
+        logging.info("====Your Run Logs End===")
+        logging.info("                        ")
+        logging.info("                        ")
+
+        ret_code, out, err = process.returncode if process else None, None, None
+        is_run_ok = sys_utils.is_runner_finished_normally(process.pid)
+        if is_launch_task:
+            is_run_ok = True
+        if error_list is not None and len(error_list) > 0:
+            is_run_ok = False
+        if ret_code is None or ret_code <= 0:
+            self.check_runner_stop_event()
+
+            if is_run_ok:
+                if out is not None:
+                    out_str = sys_utils.decode_our_err_result(out)
+                    if out_str != "":
+                        logging.info("{}".format(out_str))
+
+                self.status_reporter.report_client_id_status(
+                    self.edge_id, GeneralConstants.MSG_MLOPS_CLIENT_STATUS_FINISHED,
+                    server_id=self.server_id, run_id=run_id)
+
+                if is_launch_task:
+                    sys_utils.log_return_info(f"job {run_id}", ret_code)
+                else:
+                    sys_utils.log_return_info(entry_file, ret_code)
+        else:
+            is_run_ok = False
+
+        if not is_run_ok:
+            # If the run status is killed or finished, then return with the normal state.
+            current_job = FedMLClientDataInterface.get_instance().get_job_by_id(run_id)
+            if current_job is not None and (current_job.status == GeneralConstants.MSG_MLOPS_CLIENT_STATUS_FINISHED or
+                                            current_job.status == GeneralConstants.MSG_MLOPS_CLIENT_STATUS_KILLED):
+                return
+
+            self.check_runner_stop_event()
+
+            logging.error("failed to run the learning process...")
+
+            if err is not None:
+                err_str = sys_utils.decode_our_err_result(err)
+                if err_str != "":
+                    logging.error("{}".format(err_str))
+
+            if is_launch_task:
+                sys_utils.log_return_info(f"job {run_id}", ret_code)
+            else:
+                sys_utils.log_return_info(entry_file, ret_code)
+
+            # Send failed msg when exceptions.
+            self.status_reporter.report_client_id_status(
+                self.edge_id, GeneralConstants.MSG_MLOPS_CLIENT_STATUS_FAILED,
+                server_id=self.server_id, run_id=run_id)
+
+    @abstractmethod
+    def _generate_job_runner_instance(self, args, run_id=None, request_json=None, agent_config=None, edge_id=None):
+        return None
+
+    @abstractmethod
+    def _generate_extend_queue_list(self):
+        return list()
+
+    def reset_devices_status(self, edge_id, status):
+        self.status_reporter.run_id = self.run_id
+        self.status_reporter.edge_id = edge_id
+        self.status_reporter.report_client_id_status(
+            edge_id, status, server_id=self.server_id, run_id=self.run_id)
+
+    def start_runner_process(
+            self, run_id, request_json, edge_id=None,
+            sender_message_queue=None, listener_message_queue=None,
+            status_center_queue=None, cuda_visible_gpu_ids_str=None
+    ):
+        client_runner = self._generate_job_runner_instance(
+            self.args, run_id=run_id, request_json=request_json,
+            agent_config=None, edge_id=edge_id
+        )
+        client_runner.start_request_json = request_json
+        run_id_str = str(run_id)
+        self.run_process_event = multiprocessing.Event()
+        client_runner.run_process_event = self.run_process_event
+        self.run_process_completed_event = multiprocessing.Event()
+        client_runner.run_process_completed_event = self.run_process_completed_event
+        client_runner.server_id = request_json.get("server_id", "0")
+        self.run_extend_queue_list = self._generate_extend_queue_list()
+        logging.info("start the runner process.")
+        self.run_process = Process(target=client_runner.run, args=(
+            self.run_process_event, self.run_process_completed_event, self.run_extend_queue_list,
+            sender_message_queue, listener_message_queue, status_center_queue
+        ))
+        self.run_process.start()
+        return self.run_process
diff --git a/python/fedml/computing/scheduler/slave/base_slave_job_runner_manager.py b/python/fedml/computing/scheduler/slave/base_slave_job_runner_manager.py
new file mode 100755
index 0000000000..c058d5dd0e
--- /dev/null
+++ b/python/fedml/computing/scheduler/slave/base_slave_job_runner_manager.py
@@ -0,0 +1,12 @@
+
+from abc import ABC, abstractmethod
+from ..scheduler_core.scheduler_base_job_runner_manager import FedMLSchedulerBaseJobRunnerManager
+from ..scheduler_core.scheduler_base_job_runner import FedMLSchedulerBaseJobRunner
+
+
+class FedMLBaseSlaveJobRunnerManager(FedMLSchedulerBaseJobRunnerManager, ABC):
+    def __init__(self):
+        FedMLSchedulerBaseJobRunnerManager.__init__(self)
+
+    def cleanup_containers_and_release_gpus(self, run_id, edge_id):
+        FedMLSchedulerBaseJobRunner.cleanup_containers_and_release_gpus(run_id, edge_id)
diff --git a/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py b/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py
new file mode 100755
index 0000000000..0543459dd0
--- /dev/null
+++ b/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py
@@ -0,0 +1,571 @@
+
+import json
+import logging
+import os
+import time
+import traceback
+from abc import ABC, abstractmethod
+
+import fedml
+from ..comm_utils.constants import SchedulerConstants
+from ..comm_utils.job_utils import JobRunnerUtils, DockerArgs
+from ..comm_utils.run_process_utils import RunProcessUtils
+from ....core.mlops.mlops_runtime_log import MLOpsRuntimeLog
+from ....core.mlops.mlops_configs import MLOpsConfigs
+from ....core.mlops.mlops_runtime_log_daemon import MLOpsRuntimeLogDaemon
+from ..comm_utils import sys_utils
+from ....core.mlops.mlops_utils import MLOpsUtils
+from ..scheduler_core.compute_cache_manager import ComputeCacheManager
+from ..scheduler_core.ota_upgrade import FedMLOtaUpgrade
+from .client_data_interface import FedMLClientDataInterface
+from ..scheduler_core.scheduler_base_protocol_manager import FedMLSchedulerBaseProtocolManager
+from ..scheduler_core.general_constants import GeneralConstants
+
+
+class FedMLBaseSlaveProtocolManager(FedMLSchedulerBaseProtocolManager, ABC):
+
+    def __init__(self, args, agent_config=None):
+        FedMLSchedulerBaseProtocolManager.__init__(self, args, agent_config=agent_config)
+
+        self.request_json = None
+        self.disable_client_login = None
+        self.args = args
+        self.message_status_runner = None
+        self.message_center = None
+        self.status_center = None
+        self.message_center_name = "master_agent"
+        self.run_id = None
+        self.edge_id = args.edge_id
+        self.general_edge_id = None
+        self.edge_user_name = args.user_name
+        self.edge_extra_url = args.extra_url
+        self.server_agent_id = args.edge_id
+        self.current_device_id = args.current_device_id
+        self.unique_device_id = args.unique_device_id
+        self.agent_config = agent_config
+        self.topic_start_train = None
+        self.topic_stop_train = None
+        self.topic_report_status = None
+        self.topic_ota_msg = None
+        self.topic_request_device_info = None
+        self.topic_client_logout = None
+        self.topic_response_job_status = None
+        self.topic_report_device_status_in_job = None
+        self.fl_topic_start_train = None
+        self.fl_topic_stop_train = None
+        self.fl_topic_request_device_info = None
+        self.communication_mgr = None
+        self.subscribed_topics = list()
+        self.mlops_metrics = None
+        self.status_reporter = None
+        self.job_runners = dict()
+        self.ota_upgrade = FedMLOtaUpgrade(edge_id=args.edge_id)
+        self.running_request_json = dict()
+        self.start_request_json = None
+        self.user_name = args.user_name
+        self.general_edge_id = args.general_edge_id
+        self.server_id = args.server_id
+        self.model_device_server_id = None
+        self.model_device_client_edge_id_list = None
+        self.model_device_server = None
+        self.model_device_client_list = None
+
+    @abstractmethod
+    def generate_topics(self):
+        # The MQTT message topic format is as follows: <sender>/<receiver>/<action>
+
+        # The topic for stopping training
+        self.topic_start_train = "flserver_agent/" + str(self.edge_id) + "/start_train"
+
+        # The topi for stopping training
+        self.topic_stop_train = "flserver_agent/" + str(self.edge_id) + "/stop_train"
+
+        # The topic for reporting current device status.
+        self.topic_report_status = "mlops/report_device_status"
+
+        # The topic for OTA messages from the MLOps.
+        self.topic_ota_msg = "mlops/flclient_agent_" + str(self.edge_id) + "/ota"
+
+        # The topic for requesting device info from the client.
+        self.topic_request_device_info = "server/client/request_device_info/" + str(self.edge_id)
+
+        # The topic for requesting device info from MLOps.
+        self.topic_client_logout = "mlops/client/logout/" + str(self.edge_id)
+
+        # The topic for getting job status from the status center.
+        self.topic_response_job_status = f"master_agent/somewhere/response_job_status/{self.edge_id}"
+
+        # The topic for getting device status of job from the status center.
+        self.topic_report_device_status_in_job = f"slave_job/slave_agent/report_device_status_in_job"
+
+        # The topic for reporting online status
+        self.topic_active = "flclient_agent/active"
+
+        # The topic for last-will messages.
+        self.topic_last_will = "flclient_agent/last_will_msg"
+
+        if self.general_edge_id is not None:
+            self.fl_topic_start_train = "flserver_agent/" + str(self.general_edge_id) + "/start_train"
+            self.fl_topic_stop_train = "flserver_agent/" + str(self.general_edge_id) + "/stop_train"
+            self.fl_topic_request_device_info = "server/client/request_device_info/" + str(self.general_edge_id)
+
+        # Subscribe topics for starting train, stopping train and fetching client status.
+        self.subscribed_topics.clear()
+        self.add_subscribe_topic(self.topic_start_train)
+        self.add_subscribe_topic(self.topic_stop_train)
+        self.add_subscribe_topic(self.topic_report_status)
+        self.add_subscribe_topic(self.topic_ota_msg)
+        self.add_subscribe_topic(self.topic_request_device_info)
+        self.add_subscribe_topic(self.topic_client_logout)
+        self.add_subscribe_topic(self.topic_response_job_status)
+        self.add_subscribe_topic(self.topic_report_device_status_in_job)
+        if self.general_edge_id is not None:
+            self.add_subscribe_topic(self.fl_topic_start_train)
+            self.add_subscribe_topic(self.fl_topic_stop_train)
+            self.add_subscribe_topic(self.fl_topic_request_device_info)
+
+    @abstractmethod
+    def add_protocol_handler(self):
+        # Add the message listeners for all topics, the following is an example.
+        # self.add_message_listener(self.topic_start_train, self.callback_start_train)
+        # Add the message listeners for all topics
+        self.add_message_listener(self.topic_start_train, self.callback_start_train)
+        self.add_message_listener(self.topic_stop_train, self.callback_stop_train)
+        self.add_message_listener(self.topic_ota_msg, FedMLBaseSlaveProtocolManager.callback_client_ota_msg)
+        self.add_message_listener(self.topic_report_status, self.callback_report_current_status)
+        self.add_message_listener(self.topic_request_device_info, self.callback_report_device_info)
+        self.add_message_listener(self.topic_client_logout, self.callback_client_logout)
+        self.add_message_listener(self.topic_response_job_status, self.callback_response_job_status)
+        self.add_message_listener(self.topic_report_device_status_in_job, self.callback_response_device_status_in_job)
+        self.add_message_listener(self.fl_topic_start_train, self.callback_start_train)
+        self.add_message_listener(self.fl_topic_stop_train, self.callback_stop_train)
+        self.add_message_listener(self.fl_topic_request_device_info, self.callback_report_device_info)
+
+    @abstractmethod
+    def _get_job_runner_manager(self):
+        return None
+
+    @abstractmethod
+    def _init_extra_items(self):
+        os.environ["FEDML_CURRENT_EDGE_ID"] = str(self.edge_id)
+        if not ComputeCacheManager.get_instance().set_redis_params():
+            os.environ["FEDML_DISABLE_REDIS_CONNECTION"] = "1"
+
+    def add_subscribe_topic(self, topic):
+        self.subscribed_topics.append(topic)
+
+    def stop(self):
+        if self.model_device_server is not None:
+            self.model_device_server.stop()
+            self.model_device_server = None
+
+        if self.model_device_client_list is not None:
+            for model_client in self.model_device_client_list:
+                model_client.stop()
+            self.model_device_client_list.clear()
+            self.model_device_client_list = None
+
+        super().stop()
+
+    def on_agent_communication_connected(self, mqtt_client_object):
+        super().on_agent_communication_connected(mqtt_client_object)
+
+        self._process_connection_ready()
+
+        payload = {"model_master_device_id": self.model_device_server_id,
+                   "model_slave_device_id_list": self.model_device_client_edge_id_list}
+        self.receive_message(self.topic_request_device_info, json.dumps(payload))
+
+    def on_agent_communication_disconnected(self, mqtt_client_object):
+        super().on_agent_communication_disconnected(mqtt_client_object)
+
+        self._process_connection_lost()
+
+    @abstractmethod
+    def _process_connection_ready(self):
+        pass
+
+    @abstractmethod
+    def _process_connection_lost(self):
+        pass
+
+    def print_connected_info(self):
+        print("\nCongratulations, your device is connected to the FedML MLOps platform successfully!")
+        print(f"Your FedML Edge ID is {str(self.edge_id)}, unique device ID is {str(self.unique_device_id)}, "
+              f"master deploy ID is {str(self.model_device_server_id)}, "
+              f"worker deploy ID is {self.model_device_client_edge_id_list}"
+              )
+        if self.edge_extra_url is not None and self.edge_extra_url != "":
+            print(f"You may visit the following url to fill in more information with your device.\n"
+                  f"{self.edge_extra_url}")
+
+    def callback_start_train(self, topic, payload):
+        # Parse the parameters
+        request_json = json.loads(payload)
+        is_retain = request_json.get("is_retain", False)
+        if is_retain:
+            return
+        run_id = request_json["runId"]
+        edge_id = str(topic).split("/")[-2]
+        self.args.run_id = run_id
+        self.args.edge_id = edge_id
+
+        # Start log processor for current run
+        MLOpsRuntimeLog.get_instance(self.args).init_logs(log_level=logging.INFO)
+        MLOpsRuntimeLogDaemon.get_instance(self.args).start_log_processor(
+            run_id, edge_id, log_source=SchedulerConstants.get_log_source(request_json))
+        logging.info("start the log processor")
+
+        # Fetch the config
+        try:
+            MLOpsConfigs.fetch_all_configs()
+        except Exception as e:
+            logging.error(f"Failed to fetch all configs with Exception {e}. Traceback: {traceback.format_exc()}")
+            pass
+
+        # Check if the slave agent is disabled.
+        if not FedMLClientDataInterface.get_instance().get_agent_status():
+            request_json = json.loads(payload)
+            run_id = request_json["runId"]
+            logging.error(
+                "FedMLDebug - Receive: topic ({}), payload ({}), but the client agent is disabled. {}".format(
+                    topic, payload, traceback.format_exc()
+                )
+            )
+            # Send failed msg when exceptions.
+            self.status_reporter.report_client_id_status(
+                edge_id, GeneralConstants.MSG_MLOPS_CLIENT_STATUS_EXCEPTION, run_id=run_id,
+                msg=f"the client agent {edge_id} is disabled")
+            MLOpsRuntimeLogDaemon.get_instance(self.args).stop_log_processor(run_id, edge_id)
+            return
+
+        # Print the payload
+        logging.info(
+            f"FedMLDebug - Receive: topic ({topic}), payload ({payload})"
+        )
+
+        # Occupy GPUs
+        server_agent_id = request_json["cloud_agent_id"]
+        scheduler_match_info = request_json.get("scheduler_match_info", {})
+        matched_gpu_num = scheduler_match_info.get("matched_gpu_num", 0)
+        model_master_device_id = scheduler_match_info.get("model_master_device_id", None)
+        model_slave_device_id = scheduler_match_info.get("model_slave_device_id", None)
+        model_slave_device_id_list = scheduler_match_info.get("model_slave_device_id_list", None)
+        run_config = request_json.get("run_config", {})
+        run_params = run_config.get("parameters", {})
+        serving_args = run_params.get("serving_args", {})
+        endpoint_id = serving_args.get("endpoint_id", None)
+        cuda_visible_gpu_ids_str = JobRunnerUtils.get_instance().occupy_gpu_ids(
+            run_id, matched_gpu_num, edge_id, inner_id=endpoint_id,
+            model_master_device_id=model_master_device_id,
+            model_slave_device_id=model_slave_device_id)
+        logging.info(
+            f"Run started, available gpu ids: {JobRunnerUtils.get_instance().get_available_gpu_id_list(edge_id)}")
+
+        # Set the listener for job status from master agent
+        self.setup_listener_job_status(run_id)
+
+        # Start server with multiprocessing mode
+        self.request_json = request_json
+        run_id_str = str(run_id)
+        self.running_request_json[run_id_str] = request_json
+        self._get_job_runner_manager().start_job_runner(
+            run_id, request_json, args=self.args, edge_id=edge_id,
+            sender_message_queue=self.message_center.get_sender_message_queue(),
+            listener_message_queue=self.get_listener_message_queue(),
+            status_center_queue=self.get_status_queue(),
+            cuda_visible_gpu_ids_str=cuda_visible_gpu_ids_str,
+        )
+        run_process = self._get_job_runner_manager().get_runner_process(run_id)
+        if run_process is not None:
+            GeneralConstants.save_run_process(run_id, run_process.pid)
+
+        # Register the job launch message into the status center
+        self.register_job_launch_message(topic, payload)
+
+    def callback_stop_train(self, topic, payload):
+        # Parse the parameters.
+        edge_id = str(topic).split("/")[-2]
+        request_json = json.loads(payload)
+        is_retain = request_json.get("is_retain", False)
+        if is_retain:
+            return
+        run_id = request_json.get("runId", None)
+        run_id = request_json.get("id", None) if run_id is None else run_id
+        run_status = request_json.get("run_status", GeneralConstants.MSG_MLOPS_CLIENT_STATUS_KILLED)
+
+        # logging.info("Stop run with multiprocessing...")
+        # Stop client with multiprocessing mode
+        run_id_str = str(run_id)
+        self._get_job_runner_manager().cleanup_containers_and_release_gpus(run_id, edge_id)
+        self.sync_run_stop_status(run_status=run_status)
+
+        # Register the job stopping message into the status center
+        self.register_job_stop_message(topic, payload)
+
+    def callback_report_current_status(self, topic, payload):
+        logging.info(
+            f"FedMLDebug - Receive: topic ({topic}), payload ({payload})"
+        )
+
+        self.send_agent_active_msg(self.edge_id)
+        if self.general_edge_id is not None:
+            self.send_agent_active_msg(self.general_edge_id)
+
+    @staticmethod
+    def callback_client_ota_msg(topic, payload):
+        logging.info(
+            f"FedMLDebug - Receive: topic ({topic}), payload ({payload})"
+        )
+
+        request_json = json.loads(payload)
+        cmd = request_json["cmd"]
+
+        if cmd == GeneralConstants.FEDML_OTA_CMD_UPGRADE:
+            FedMLOtaUpgrade.process_ota_upgrade_msg()
+            # Process(target=FedMLClientRunner.process_ota_upgrade_msg).start()
+            raise Exception("After upgraded, restart runner...")
+        elif cmd == GeneralConstants.FEDML_OTA_CMD_RESTART:
+            raise Exception("Restart runner...")
+
+    def callback_report_device_info(self, topic, payload):
+        payload_json = json.loads(payload)
+        server_id = payload_json.get("server_id", 0)
+        run_id = payload_json.get("run_id", 0)
+        listen_edge_id = str(topic).split("/")[-1]
+        context = payload_json.get("context", None)
+        need_gpu_info = payload_json.get("need_gpu_info", False)
+        need_running_process_list = payload_json.get("need_running_process_list", False)
+        model_master_device_id = payload_json.get("model_master_device_id", None)
+        model_slave_device_id_list = payload_json.get("model_slave_device_id_list", None)
+        if model_master_device_id is not None:
+            self.model_device_server_id = model_master_device_id
+        if model_slave_device_id_list is not None:
+            self.model_device_client_edge_id_list = model_slave_device_id_list
+        response_topic = f"client/server/response_device_info/{server_id}"
+        if self.mlops_metrics is not None:
+            if not need_gpu_info:
+                device_info_json = {
+                    "edge_id": listen_edge_id,
+                    "fedml_version": fedml.__version__,
+                    "user_id": self.args.user_name
+                }
+            else:
+                total_mem, free_mem, total_disk_size, free_disk_size, cup_utilization, cpu_cores, gpu_cores_total, \
+                    gpu_cores_available, sent_bytes, recv_bytes, gpu_available_ids = sys_utils.get_sys_realtime_stats()
+                host_ip = sys_utils.get_host_ip()
+                host_port = sys_utils.get_available_port()
+                gpu_available_ids = JobRunnerUtils.get_available_gpu_id_list(self.edge_id)
+                gpu_available_ids = JobRunnerUtils.trim_unavailable_gpu_ids(gpu_available_ids)
+                gpu_cores_available = len(gpu_available_ids)
+                gpu_list = sys_utils.get_gpu_list()
+                device_info_json = {
+                    "edge_id": listen_edge_id,
+                    "memoryTotal": round(total_mem * MLOpsUtils.BYTES_TO_GB, 2),
+                    "memoryAvailable": round(free_mem * MLOpsUtils.BYTES_TO_GB, 2),
+                    "diskSpaceTotal": round(total_disk_size * MLOpsUtils.BYTES_TO_GB, 2),
+                    "diskSpaceAvailable": round(free_disk_size * MLOpsUtils.BYTES_TO_GB, 2),
+                    "cpuUtilization": round(cup_utilization, 2),
+                    "cpuCores": cpu_cores,
+                    "gpuCoresTotal": gpu_cores_total,
+                    "gpuCoresAvailable": gpu_cores_available,
+                    "gpu_available_ids": gpu_available_ids,
+                    "gpu_list": gpu_list,
+                    "node_ip": host_ip,
+                    "node_port": host_port,
+                    "networkTraffic": sent_bytes + recv_bytes,
+                    "updateTime": int(MLOpsUtils.get_ntp_time()),
+                    "fedml_version": fedml.__version__,
+                    "user_id": self.args.user_name
+                }
+            if need_running_process_list:
+                device_info_json["run_process_list_map"] = self.get_all_run_process_list_map()
+            salve_device_ids = list()
+            if self.model_device_client_edge_id_list is not None and \
+                    isinstance(self.model_device_client_edge_id_list, list):
+                for model_client_edge_id in self.model_device_client_edge_id_list:
+                    salve_device_ids.append(model_client_edge_id)
+            response_payload = {"slave_device_id": None if len(salve_device_ids) <= 0 else salve_device_ids[0],
+                                "slave_device_id_list": salve_device_ids,
+                                "master_device_id": self.model_device_server_id,
+                                "run_id": run_id, "edge_id": listen_edge_id,
+                                "edge_info": device_info_json}
+            if context is not None:
+                response_payload["context"] = context
+            self.message_center.send_message(response_topic, json.dumps(response_payload), run_id=run_id)
+
+    def callback_client_logout(self, topic, payload):
+        payload_json = json.loads(payload)
+        secret = payload_json.get("auth", None)
+        if secret is None or str(secret) != "246b1be6-0eeb-4b17-b118-7d74de1975d4":
+            return
+        logging.info("Received the logout request.")
+        for runner in self.job_runners:
+            runner.trigger_stop_event()
+        self.disable_client_login = True
+        time.sleep(3)
+        os.system("fedml logout")
+
+    def callback_response_device_status_in_job(self, topic, payload):
+        # Parse the parameters
+        payload_json = json.loads(payload)
+        run_id = payload_json.get("run_id", None)
+        job_status = payload_json.get("status", None)
+        edge_id = payload_json.get("edge_id", None)
+
+        # process the status
+        self.process_status(run_id, job_status, edge_id)
+
+    def callback_response_job_status(self, topic, payload):
+        # Parse the parameters
+        payload_json = json.loads(payload)
+        run_id = payload_json.get("run_id", None)
+        master_agent = payload_json.get("master_agent", None)
+        job_status = payload_json.get("job_status", None)
+        fedml_version = payload_json.get("fedml_version", None)
+        edge_id = payload_json.get("edge_id", None)
+
+        # process the status
+        self.process_status(run_id, job_status, edge_id)
+
+    def callback_broadcasted_job_status(self, topic, payload):
+        # Parse the parameters
+        payload_json = json.loads(payload)
+        run_id = payload_json.get("run_id", None)
+        job_status = payload_json.get("status", None)
+
+        # process the status
+        self.process_status(run_id, job_status, self.edge_id)
+
+    def generate_protocol_manager(self):
+        message_status_runner = self._generate_protocol_manager_instance(
+            self.args, agent_config=self.agent_config
+        )
+        message_status_runner.request_json = self.request_json
+        message_status_runner.disable_client_login = self.disable_client_login
+        message_status_runner.message_center_name = self.message_center_name
+        message_status_runner.run_id = self.run_id
+        message_status_runner.edge_id = self.edge_id
+        message_status_runner.edge_user_name = self.edge_user_name
+        message_status_runner.edge_extra_url = self.edge_extra_url
+        message_status_runner.server_agent_id = self.server_agent_id
+        message_status_runner.current_device_id = self.current_device_id
+        message_status_runner.unique_device_id = self.unique_device_id
+        message_status_runner.subscribed_topics = self.subscribed_topics
+        message_status_runner.running_request_json = self.running_request_json
+        message_status_runner.request_json = self.start_request_json
+        message_status_runner.user_name = self.user_name
+        message_status_runner.general_edge_id = self.general_edge_id
+        message_status_runner.server_id = self.server_id
+        message_status_runner.model_device_server_id = self.model_device_server_id
+        message_status_runner.model_device_client_edge_id_list = self.model_device_client_edge_id_list
+        message_status_runner.status_queue = self.get_status_queue()
+
+        return message_status_runner
+
+    def process_status(self, run_id, status, edge_id):
+        run_id_str = str(run_id)
+
+        # Process the completed status
+        if status == GeneralConstants.MSG_MLOPS_CLIENT_STATUS_FINISHED or \
+                status == GeneralConstants.MSG_MLOPS_CLIENT_STATUS_FAILED or \
+                status == GeneralConstants.MSG_MLOPS_CLIENT_STATUS_KILLED:
+            if self.job_runners.get(run_id_str, None) is not None:
+                self.job_runners[run_id_str].trigger_completed_event()
+
+            # Stop the sys perf process
+            # noinspection PyBoardException
+            try:
+                self.mlops_metrics.stop_sys_perf()
+            except Exception as ex:
+                logging.error(f"Failed to stop sys perf with Exception {ex}. Traceback: {traceback.format_exc()}")
+                pass
+
+            # Stop the user process
+            try:
+                GeneralConstants.cleanup_learning_process(run_id)
+                GeneralConstants.cleanup_bootstrap_process(run_id)
+                GeneralConstants.cleanup_run_process(run_id)
+            except Exception as e:
+                logging.error(
+                    f"Failed to cleanup run when finished with Exception {e}. Traceback: {traceback.format_exc()}")
+                pass
+
+            # Get the running json.
+            running_json = self.running_request_json.get(run_id_str)
+            if running_json is None:
+                try:
+                    current_job = FedMLClientDataInterface.get_instance().get_job_by_id(run_id)
+                    running_json = json.loads(current_job.running_json)
+                except Exception as e:
+                    logging.error(f"Failed to get running json with Exception {e}. Traceback: {traceback.format_exc()}")
+
+            # Cleanup the containers and release the gpu ids.
+            if running_json is not None:
+                job_type = JobRunnerUtils.parse_job_type(running_json)
+                if not SchedulerConstants.is_deploy_job(job_type):
+                    logging.info(f"[run/device][{run_id}/{edge_id}] Release gpu resource when run ended.")
+                    self._get_job_runner_manager().cleanup_containers_and_release_gpus(run_id, edge_id)
+
+            # Stop the runner process
+            run_process = self._get_job_runner_manager().get_runner_process(run_id)
+            if run_process is not None:
+                if run_process.pid is not None:
+                    RunProcessUtils.kill_process(run_process.pid)
+
+                    # Terminate the run docker container if exists
+                    try:
+                        container_name = JobRunnerUtils.get_run_container_name(run_id)
+                        docker_client = JobRunnerUtils.get_docker_client(DockerArgs())
+                        logging.info(f"Terminating the run docker container {container_name} if exists...")
+                        JobRunnerUtils.remove_run_container_if_exists(container_name, docker_client)
+                    except Exception as e:
+                        logging.error(f"Error occurred when terminating docker container."
+                                      f"Exception: {e}, Traceback: {traceback.format_exc()}.")
+
+            # Stop log processor for current run
+            MLOpsRuntimeLogDaemon.get_instance(self.args).stop_log_processor(run_id, edge_id)
+
+    def setup_listener_job_status(self, run_id):
+        # Setup MQTT message listener to receive the job status from master agent;
+        topic_job_status_from_master = f"master_agent/slave_agent/job_status/{run_id}"
+        self.add_message_listener(topic_job_status_from_master, self.callback_broadcasted_job_status)
+        self.subscribe_msg(topic_job_status_from_master)
+
+    def remove_listener_job_status(self, run_id):
+        # Remove MQTT message listener from master agent;
+        topic_job_status_from_master = f"master_agent/slave_agent/job_status/{run_id}"
+        self.remove_message_listener(topic_job_status_from_master)
+        self.unsubscribe_msg(topic_job_status_from_master)
+
+    def sync_run_stop_status(self, run_status=GeneralConstants.MSG_MLOPS_CLIENT_STATUS_KILLED):
+        try:
+            self.status_reporter.report_client_id_status(
+                self.edge_id, run_status, server_id=self.server_id, run_id=self.run_id)
+        except Exception as e:
+            logging.error(f"Failed to sync run stop status with Exception {e}. Traceback: {traceback.format_exc()}")
+            pass
+
+    def get_all_run_process_list_map(self):
+        run_process_dict = dict()
+        all_runner_pid_dict = self._get_job_runner_manager().get_all_runner_pid_map()
+        if all_runner_pid_dict is None:
+            return run_process_dict
+        for run_id_str, process in all_runner_pid_dict.items():
+            cur_run_process_list = GeneralConstants.get_learning_process_list(run_id_str)
+            run_process_dict[run_id_str] = cur_run_process_list
+
+        return run_process_dict
+
+    def stop_job(self, run_id):
+        run_id_str = str(run_id)
+        if self.job_runners.get(run_id_str, None) is not None:
+            self.job_runners[run_id_str].trigger_stop_event()
+
+    @staticmethod
+    def get_start_train_topic_with_edge_id(edge_id):
+        return "flserver_agent/" + str(edge_id) + "/start_train"
+
+    @abstractmethod
+    def _generate_protocol_manager_instance(self, args, agent_config=None):
+        return None
diff --git a/python/fedml/computing/scheduler/slave/client_login.py b/python/fedml/computing/scheduler/slave/client_login.py
index c8123a717c..37a6dc8064 100755
--- a/python/fedml/computing/scheduler/slave/client_login.py
+++ b/python/fedml/computing/scheduler/slave/client_login.py
@@ -1,332 +1,11 @@
-
 import argparse
-import json
-import logging
 import os
-import platform
-import subprocess
-import time
-import traceback
-
-import click
 import fedml
-from fedml.computing.scheduler.comm_utils import sys_utils
-from fedml.computing.scheduler.comm_utils.constants import SchedulerConstants
-from fedml.computing.scheduler.slave.client_runner import FedMLClientRunner
-from fedml.computing.scheduler.slave.client_constants import ClientConstants
-from fedml.core.mlops.mlops_runtime_log import MLOpsRuntimeLog
-from fedml.core.mlops.mlops_runtime_log_daemon import MLOpsRuntimeLogDaemon
-
-
-def init_logs(args, edge_id):
-    # Init runtime logs
-    args.log_file_dir = ClientConstants.get_log_file_dir()
-    args.run_id = 0
-    args.role = "client"
-    client_ids = list()
-    client_ids.append(edge_id)
-    args.client_id_list = json.dumps(client_ids)
-    setattr(args, "using_mlops", True)
-    MLOpsRuntimeLog.get_instance(args).init_logs()
-
-
-def __login_as_client(args, userid, api_key="", use_extra_device_id_suffix=None, role="client"):
-    setattr(args, "account_id", userid)
-    setattr(args, "current_running_dir", ClientConstants.get_fedml_home_dir())
-
-    sys_name = platform.system()
-    if sys_name == "Darwin":
-        sys_name = "MacOS"
-    if hasattr(args, "os_name") and args.os_name is not None and args.os_name != "":
-        pass
-    else:
-        setattr(args, "os_name", sys_name)
-    version = fedml.get_env_version()
-    setattr(args, "version", version)
-    setattr(args, "log_file_dir", ClientConstants.get_log_file_dir())
-    is_from_docker = False
-    if hasattr(args, "device_id") and args.device_id is not None and args.device_id != "0":
-        setattr(args, "current_device_id", args.device_id)
-        is_from_docker = True
-    else:
-        is_gpu_supplier = (role == ClientConstants.login_role_list[ClientConstants.LOGIN_MODE_GPU_SUPPLIER_INDEX])
-        setattr(args, "current_device_id", FedMLClientRunner.get_device_id(use_machine_id=is_gpu_supplier))
-    setattr(args, "config_version", version)
-    setattr(args, "cloud_region", "")
-
-    # Create client runner for communication with the FedML server.
-    runner = FedMLClientRunner(args)
-
-    # Fetch configs from the MLOps config server.
-    service_config = dict()
-    config_try_count = 0
-    edge_id = 0
-    while config_try_count < 5:
-        try:
-            mqtt_config, s3_config, mlops_config, docker_config = runner.fetch_configs()
-            service_config["mqtt_config"] = mqtt_config
-            service_config["s3_config"] = s3_config
-            service_config["ml_ops_config"] = mlops_config
-            service_config["docker_config"] = docker_config
-            runner.agent_config = service_config
-            # click.echo("service_config = {}".format(service_config))
-            log_server_url = mlops_config.get("LOG_SERVER_URL", None)
-            if log_server_url is not None:
-                setattr(args, "log_server_url", log_server_url)
-                setattr(runner.args, "log_server_url", log_server_url)
-            break
-        except Exception as e:
-            click.echo("{}\n{}".format(SchedulerConstants.ERR_MSG_BINDING_EXCEPTION_1, traceback.format_exc()))
-            click.echo(SchedulerConstants.ERR_MSG_BINDING_EXIT_RETRYING)
-            config_try_count += 1
-            time.sleep(3)
-            continue
-
-    if config_try_count >= 5:
-        click.echo("")
-        click.echo("[1] Oops, you failed to login the FedML MLOps platform.")
-        click.echo("Please check whether your network is normal!")
-        return
-
-    # Judge whether running from fedml docker hub
-    is_from_fedml_docker_hub = False
-    dock_loc_file = ClientConstants.get_docker_location_file()
-    if os.path.exists(dock_loc_file):
-        is_from_fedml_docker_hub = True
-
-    # Build unique device id
-    if is_from_docker:
-        unique_device_id = args.current_device_id + "@" + args.os_name + ".Docker.Edge.Device"
-    else:
-        unique_device_id = args.current_device_id + "@" + args.os_name + ".Edge.Device"
-    if is_from_fedml_docker_hub:
-        unique_device_id = args.current_device_id + "@" + args.os_name + ".DockerHub.Edge.Device"
-
-    if use_extra_device_id_suffix is not None:
-        unique_device_id = args.current_device_id + "@" + args.os_name + use_extra_device_id_suffix
-
-    # Bind account id to FedML® Nexus AI Platform
-    register_try_count = 0
-    edge_id = -1
-    user_name = None
-    extra_url = None
-    general_edge_id = None
-    while register_try_count < 5:
-        try:
-            edge_id, user_name, extra_url, general_edge_id = runner.bind_account_and_device_id(
-                service_config["ml_ops_config"]["EDGE_BINDING_URL"], args.account_id, unique_device_id, args.os_name,
-                api_key=api_key, role=role
-            )
-            if edge_id > 0:
-                runner.edge_id = edge_id
-                runner.edge_user_name = user_name
-                runner.edge_extra_url = extra_url
-                break
-        except SystemExit as e:
-            click.echo("Your account does not exist. Please make sure your account correct.")
-            os.system("fedml logout -c")
-            return
-        except Exception as e:
-            click.echo("{}\n{}".format(SchedulerConstants.ERR_MSG_BINDING_EXCEPTION_2, traceback.format_exc()))
-            click.echo(SchedulerConstants.ERR_MSG_BINDING_EXIT_RETRYING)
-            register_try_count += 1
-            time.sleep(3)
-            continue
-
-    if edge_id <= 0:
-        click.echo("")
-        click.echo("[2] Oops, you failed to login the FedML MLOps platform.")
-        click.echo("Please check whether your network is normal!")
-        return
-
-    # Init runtime logs
-    setattr(args, "client_id", edge_id)
-    setattr(args, "is_from_docker", is_from_docker)
-    runner.args = args
-    init_logs(args, edge_id)
-    # logging.info("args {}".format(args))
-
-    # Log arguments and binding results.
-    # logging.info("login: unique_device_id = %s" % str(unique_device_id))
-    # logging.info("login: edge_id = %s" % str(edge_id))
-    runner.unique_device_id = unique_device_id
-    runner.user_name = user_name
-    runner.general_edge_id = general_edge_id
-    ClientConstants.save_runner_infos(args.current_device_id + "." + args.os_name, edge_id, run_id=0)
-
-    # Setup MQTT connection for communication with the FedML server.
-    try:
-        runner.setup_agent_mqtt_connection(service_config)
-    except Exception as e:
-        login_exit_file = os.path.join(ClientConstants.get_log_file_dir(), "exited.log")
-        with open(login_exit_file, "w") as f:
-            f.writelines(f"{os.getpid()}.")
-        print("finally")
-        runner.stop_agent()
-        raise e
-
-    # Start mqtt looper
-    runner.start_agent_mqtt_loop()
-
-
-def __login_as_simulator(args, userid, mqtt_connection=True):
-    setattr(args, "account_id", userid)
-    setattr(args, "current_running_dir", ClientConstants.get_fedml_home_dir())
-
-    sys_name = platform.system()
-    if sys_name == "Darwin":
-        sys_name = "MacOS"
-    setattr(args, "os_name", sys_name)
-    version = fedml.get_env_version()
-    setattr(args, "version", version)
-    setattr(args, "log_file_dir", ClientConstants.get_log_file_dir())
-    setattr(args, "device_id", FedMLClientRunner.get_device_id())
-    setattr(args, "current_device_id", FedMLClientRunner.get_device_id())
-    setattr(args, "config_version", version)
-    setattr(args, "cloud_region", "")
-
-
-    # Create client runner for communication with the FedML server.
-    runner = FedMLClientRunner(args)
-
-    # Fetch configs from the MLOps config server.
-    service_config = dict()
-    config_try_count = 0
-    edge_id = 0
-    while config_try_count < 5:
-        try:
-            mqtt_config, s3_config, mlops_config, docker_config = runner.fetch_configs()
-            service_config["mqtt_config"] = mqtt_config
-            service_config["s3_config"] = s3_config
-            service_config["ml_ops_config"] = mlops_config
-            service_config["docker_config"] = docker_config
-            runner.agent_config = service_config
-            log_server_url = mlops_config.get("LOG_SERVER_URL", None)
-            if log_server_url is not None:
-                setattr(args, "log_server_url", log_server_url)
-                setattr(runner.args, "log_server_url", log_server_url)
-            break
-        except Exception as e:
-            config_try_count += 1
-            time.sleep(3)
-            continue
-
-    if config_try_count >= 5:
-        click.echo("")
-        click.echo("[3] Oops, you failed to login the FedML MLOps platform.")
-        click.echo("Please check whether your network is normal!")
-        return False, edge_id, args
-
-    # Build unique device id
-    if args.device_id is not None and len(str(args.device_id)) > 0:
-        unique_device_id = args.device_id + "@" + args.os_name + ".Edge.Simulator"
-
-    # Bind account id to FedML® Nexus AI Platform
-    register_try_count = 0
-    edge_id = -1
-    user_name = None
-    extra_url = None
-    general_edge_id = None
-    while register_try_count < 5:
-        try:
-            edge_id, _, _, _ = runner.bind_account_and_device_id(
-                service_config["ml_ops_config"]["EDGE_BINDING_URL"], args.account_id,
-                unique_device_id, args.os_name, role="simulator"
-            )
-            if edge_id > 0:
-                runner.edge_id = edge_id
-                break
-        except SystemExit as e:
-            click.echo("Your account does not exist. Please make sure your account correct.")
-            os.system("fedml logout -c")
-            return
-        except Exception as e:
-            register_try_count += 1
-            time.sleep(3)
-            continue
-
-    if edge_id <= 0:
-        click.echo("")
-        click.echo("[4] Oops, you failed to login the FedML MLOps platform.")
-        click.echo("Please check whether your network is normal!")
-        return False, edge_id, args
-
-    # Init runtime logs
-    setattr(args, "client_id", edge_id)
-    runner.args = args
-    #init_logs(args, edge_id)
-    logging.info("args {}".format(args))
-
-    # Log arguments and binding results.
-    logging.info("login: unique_device_id = %s" % str(unique_device_id))
-    logging.info("login: edge_id = %s" % str(edge_id))
-    runner.unique_device_id = unique_device_id
-
-    if mqtt_connection:
-        ClientConstants.save_runner_infos(args.device_id + "." + args.os_name, edge_id, run_id=0)
-
-        # Setup MQTT connection for communication with the FedML server.
-        try:
-            runner.setup_agent_mqtt_connection(service_config)
-        except Exception as e:
-            pass
-
-        # Open simulator daemon process to process run status.
-        simulator_daemon_cmd = os.path.join(os.path.dirname(__file__), "simulator_daemon.py")
-        env_version = fedml.get_env_version()
-        simulator_daemon_process = sys_utils.run_subprocess_open(
-            [
-                sys_utils.get_python_program(),
-                simulator_daemon_cmd,
-                "-t",
-                "login",
-                "-u",
-                str(args.user),
-                "-v",
-                env_version,
-                "-r",
-                args.role,
-                "-id",
-                args.device_id,
-                "-os",
-                args.os_name,
-                "-rk",
-                "1",
-                "-lfd",
-                args.log_file_dir,
-                "-cf",
-                env_version,
-                "-ci",
-                str(edge_id)
-            ]
-        ).pid
-
-        # Start mqtt looper
-        runner.start_agent_mqtt_loop()
-
-    return True, edge_id, args
-
-
-def login(args):
-    if args.role == ClientConstants.login_role_list[ClientConstants.LOGIN_MODE_CLIENT_INDEX]:
-        __login_as_client(args, args.user, api_key=args.api_key)
-    elif args.role == ClientConstants.login_role_list[ClientConstants.LOGIN_MODE_GPU_SUPPLIER_INDEX]:
-        if args.no_gpu_check == 0:
-            gpu_count, _ = sys_utils.get_gpu_count_vendor()
-            if gpu_count <= 0:
-                click.echo("We can't find any gpu device on your machine. \n"
-                           "With the gpu_supplier(-g) option, you need to check if your machine "
-                           "has nvidia GPUs and installs CUDA related drivers.")
-                return
-        __login_as_client(args, args.user, api_key=args.api_key,
-                          use_extra_device_id_suffix=".Edge.GPU.Supplier", role=args.role)
-    elif args.role == ClientConstants.login_role_list[ClientConstants.LOGIN_MODE_EDGE_SIMULATOR_INDEX]:
-        __login_as_simulator(args, args.user)
+from fedml.computing.scheduler.slave.slave_agent import FedMLLaunchSlaveAgent
 
 
 def logout():
-    ClientConstants.cleanup_run_process(None)
-    sys_utils.cleanup_all_fedml_client_api_processes()
+    FedMLLaunchSlaveAgent.logout()
 
 
 if __name__ == "__main__":
@@ -351,15 +30,17 @@ def logout():
     if args.api_key == "":
         args.api_key = args.user
 
+    fedml.set_env_version("test")
+
     if args.local_on_premise_platform_host != "127.0.0.1":
         fedml.set_local_on_premise_platform_host(args.local_on_premise_platform_host)
     if args.local_on_premise_platform_port != 80:
         fedml.set_local_on_premise_platform_port(args.local_on_premise_platform_port)
 
     fedml.set_env_version(args.version)
+    slave_agent = FedMLLaunchSlaveAgent()
     if args.type == 'login':
-        login(args)
+        slave_agent.login(args.api_key, api_key=args.api_key, device_id=args.device_id,
+                          os_name=args.os_name, role=args.role)
     else:
-        logout()
-
-
+        FedMLLaunchSlaveAgent.logout()
diff --git a/python/fedml/computing/scheduler/slave/client_runner.py b/python/fedml/computing/scheduler/slave/client_runner.py
deleted file mode 100755
index aac57d2174..0000000000
--- a/python/fedml/computing/scheduler/slave/client_runner.py
+++ /dev/null
@@ -1,1775 +0,0 @@
-import json
-import logging
-import multiprocessing
-import sys
-
-from multiprocessing import Process
-import os
-import platform
-import shutil
-import subprocess
-import threading
-
-import time
-import traceback
-import urllib
-import uuid
-import zipfile
-from urllib.parse import urljoin, urlparse
-
-import requests
-
-import fedml
-from ..comm_utils.constants import SchedulerConstants
-from ..comm_utils.job_cleanup import JobCleanup
-from ..comm_utils.job_utils import JobRunnerUtils, DockerArgs
-from ..comm_utils.run_process_utils import RunProcessUtils
-from ..scheduler_entry.constants import Constants
-from ....core.mlops.mlops_device_perfs import MLOpsDevicePerfStats
-from ....core.mlops.mlops_runtime_log import MLOpsRuntimeLog
-
-from ....core.distributed.communication.mqtt.mqtt_manager import MqttManager
-from ..comm_utils.yaml_utils import load_yaml_config
-from .client_constants import ClientConstants
-
-from ....core.mlops.mlops_metrics import MLOpsMetrics
-
-from ....core.mlops.mlops_configs import MLOpsConfigs
-from ....core.mlops.mlops_runtime_log_daemon import MLOpsRuntimeLogDaemon
-from ....core.mlops.mlops_status import MLOpsStatus
-from ..comm_utils.sys_utils import get_sys_runner_info, get_python_program
-from .client_data_interface import FedMLClientDataInterface
-from ..comm_utils import sys_utils
-from ....core.mlops.mlops_utils import MLOpsUtils
-from ..model_scheduler.model_device_client import FedMLModelDeviceClientRunner
-from ..model_scheduler.model_device_server import FedMLModelDeviceServerRunner
-from ..comm_utils import security_utils
-from ..scheduler_core.compute_cache_manager import ComputeCacheManager
-from ..scheduler_core.message_center import FedMLMessageCenter
-
-
-class RunnerError(Exception):
-    """ Runner stopped. """
-    pass
-
-
-class RunnerCompletedError(Exception):
-    """ Runner completed. """
-    pass
-
-
-class FedMLClientRunner(FedMLMessageCenter):
-
-    def __init__(self, args, edge_id=0, request_json=None, agent_config=None, run_id=0,
-                 cuda_visible_gpu_ids_str=None):
-        super().__init__()
-        self.model_device_server_id = None
-        self.model_device_client_edge_id_list = None
-        self.disable_client_login = False
-        self.model_device_server = None
-        self.model_device_client_list = None
-        self.run_process_event = None
-        self.run_process_event_map = dict()
-        self.run_process_completed_event = None
-        self.run_process_completed_event_map = dict()
-        self.run_process = None
-        self.run_process_map = dict()
-        self.running_request_json = dict()
-        self.local_api_process = None
-        self.start_request_json = None
-        self.device_status = None
-        self.current_training_status = None
-        self.mqtt_mgr = None
-        self.edge_id = edge_id
-        self.edge_user_name = None
-        self.edge_extra_url = None
-        self.run_id = run_id
-        self.unique_device_id = None
-        self.args = args
-        self.request_json = request_json
-        self.version = args.version
-        self.device_id = args.device_id
-        self.cur_dir = os.path.split(os.path.realpath(__file__))[0]
-        if args.current_running_dir is not None:
-            self.cur_dir = args.current_running_dir
-        self.sudo_cmd = ""
-        self.is_mac = False
-        if platform.system() == "Darwin":
-            self.is_mac = True
-
-        self.agent_config = agent_config
-        self.fedml_data_base_package_dir = os.path.join("/", "fedml", "data")
-        self.fedml_data_local_package_dir = os.path.join("/", "fedml", "fedml-package", "fedml", "data")
-        self.fedml_data_dir = self.fedml_data_base_package_dir
-        self.fedml_config_dir = os.path.join("/", "fedml", "conf")
-
-        self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES = {
-            "${FEDSYS.RUN_ID}": "",
-            "${FEDSYS.PRIVATE_LOCAL_DATA}": "",
-            "${FEDSYS.CLIENT_ID_LIST}": "",
-            "${FEDSYS.SYNTHETIC_DATA_URL}": "",
-            "${FEDSYS.IS_USING_LOCAL_DATA}": "",
-            "${FEDSYS.CLIENT_NUM}": "",
-            "${FEDSYS.CLIENT_INDEX}": "",
-            "${FEDSYS.CLIENT_OBJECT_LIST}": "",
-            "${FEDSYS.LOG_SERVER_URL}": "",
-        }
-
-        self.mlops_metrics = None
-        self.client_active_list = dict()
-        self.ntp_offset = MLOpsUtils.get_ntp_offset()
-        self.server_id = None
-        self.computing_started_time = 0
-        self.fedml_config_object = None
-        self.package_type = SchedulerConstants.JOB_PACKAGE_TYPE_DEFAULT
-        self.cuda_visible_gpu_ids_str = cuda_visible_gpu_ids_str
-        # logging.info("Current directory of client agent: " + self.cur_dir)
-        self.subscribed_topics = list()
-        self.user_name = None
-        self.general_edge_id = None
-        self.message_center = None
-
-    def __repr__(self):
-        return "<{klass} @{id:x} {attrs}>".format(
-            klass=self.__class__.__name__,
-            id=id(self) & 0xFFFFFF,
-            attrs=" ".join("{}={!r}".format(k, v) for k, v in self.__dict__.items()),
-        )
-
-    def copy_runner(self):
-        copy_runner = FedMLClientRunner(self.args)
-        copy_runner.disable_client_login =  self.disable_client_login
-        copy_runner.model_device_server = self.model_device_server
-        copy_runner.model_device_client_list = self.model_device_client_list
-        copy_runner.run_process_event = self.run_process_event
-        copy_runner.run_process_event_map = self.run_process_event_map
-        copy_runner.run_process_completed_event = self.run_process_completed_event
-        copy_runner.run_process_completed_event_map = self.run_process_completed_event_map
-        copy_runner.run_process = self.run_process
-        copy_runner.run_process_map = self.run_process_map
-        copy_runner.running_request_json = self.running_request_json
-        copy_runner.local_api_process = self.local_api_process
-        copy_runner.start_request_json = self.start_request_json
-        copy_runner.device_status = self.device_status
-        copy_runner.current_training_status = self.current_training_status
-        copy_runner.mqtt_mgr = self.mqtt_mgr
-        copy_runner.edge_id = self.edge_id
-        copy_runner.edge_user_name = self.edge_user_name
-        copy_runner.edge_extra_url = self.edge_extra_url
-        copy_runner.run_id = self.run_id
-        copy_runner.unique_device_id = self.unique_device_id
-        copy_runner.args = self.args
-        copy_runner.request_json = self.request_json
-        copy_runner.version =self.version
-        copy_runner.device_id = self.device_id
-        copy_runner.cur_dir = self.cur_dir
-        copy_runner.cur_dir = self.cur_dir
-        copy_runner.sudo_cmd = self.sudo_cmd
-        copy_runner.is_mac = self.is_mac
-
-        copy_runner.agent_config = self.agent_config
-        copy_runner.fedml_data_base_package_dir = self.fedml_data_base_package_dir
-        copy_runner.fedml_data_local_package_dir = self.fedml_data_local_package_dir
-        copy_runner.fedml_data_dir = self.fedml_data_dir
-        copy_runner.fedml_config_dir = self.fedml_config_dir
-
-        copy_runner.FEDML_DYNAMIC_CONSTRAIN_VARIABLES = self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES
-
-        copy_runner.mlops_metrics = self.mlops_metrics
-        copy_runner.client_active_list = self.client_active_list
-        copy_runner.ntp_offset = self.ntp_offset
-        copy_runner.server_id = self.server_id
-        copy_runner.computing_started_time = self.computing_started_time
-        copy_runner.fedml_config_object = self.fedml_config_object
-        copy_runner.package_type = self.package_type
-        copy_runner.cuda_visible_gpu_ids_str = self.cuda_visible_gpu_ids_str
-        copy_runner.subscribed_topics = self.subscribed_topics
-        copy_runner.user_name = self.user_name
-        copy_runner.general_edge_id = self.general_edge_id
-        copy_runner.message_center = self.message_center
-
-        return copy_runner
-
-    def build_dynamic_constrain_variables(self, run_id, run_config):
-        data_config = run_config.get("data_config", {})
-        server_edge_id_list = self.request_json["edgeids"]
-        local_edge_id_list = list()
-        local_edge_id_list.append(int(self.edge_id))
-        is_using_local_data = 0
-        private_data_dir = data_config.get("privateLocalData", "")
-        synthetic_data_url = data_config.get("syntheticDataUrl", "")
-        edges = self.request_json["edges"]
-        # if private_data_dir is not None \
-        #         and len(str(private_data_dir).strip(' ')) > 0:
-        #     is_using_local_data = 1
-        if private_data_dir is None or len(str(private_data_dir).strip(" ")) <= 0:
-            params_config = run_config.get("parameters", None)
-            private_data_dir = ClientConstants.get_data_dir()
-        if synthetic_data_url is None or len(str(synthetic_data_url)) <= 0:
-            synthetic_data_url = private_data_dir
-
-        self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.RUN_ID}"] = run_id
-        self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.PRIVATE_LOCAL_DATA}"] = private_data_dir.replace(" ", "")
-        self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.CLIENT_ID_LIST}"] = str(local_edge_id_list).replace(" ", "")
-        self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.SYNTHETIC_DATA_URL}"] = synthetic_data_url.replace(" ", "")
-        self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.IS_USING_LOCAL_DATA}"] = str(is_using_local_data)
-        self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.CLIENT_NUM}"] = len(server_edge_id_list)
-        self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.CLIENT_INDEX}"] = 1
-        for cur_index, id_value in enumerate(server_edge_id_list):
-            if str(id_value) == str(self.edge_id):
-                self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.CLIENT_INDEX}"] = cur_index + 1
-                break
-        client_objects = str(json.dumps(edges))
-        client_objects = client_objects.replace(" ", "").replace("\n", "").replace('"', '\\"')
-        self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.CLIENT_OBJECT_LIST}"] = client_objects
-        self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.LOG_SERVER_URL}"] = self.agent_config["ml_ops_config"][
-            "LOG_SERVER_URL"
-        ]
-
-    def unzip_file(self, zip_file, unzip_file_path) -> str:
-        if zipfile.is_zipfile(zip_file):
-            with zipfile.ZipFile(zip_file, "r") as zipf:
-                zipf.extractall(unzip_file_path)
-                unzipped_file_name = zipf.namelist()[0]
-        else:
-            raise Exception("Invalid zip file {}".format(zip_file))
-
-        return unzipped_file_name
-
-    def package_download_progress(self, count, blksize, filesize):
-        self.check_runner_stop_event()
-
-        downloaded = count * blksize
-        downloaded = filesize if downloaded > filesize else downloaded
-        progress = (downloaded / filesize * 100) if filesize != 0 else 0
-        progress_int = int(progress)
-        downloaded_kb = format(downloaded / 1024, '.2f')
-
-        # since this hook funtion is stateless, we need a state to avoid print progress repeatly
-        if count == 0:
-            self.prev_download_progress = 0
-        if progress_int != self.prev_download_progress and progress_int % 5 == 0:
-            self.prev_download_progress = progress_int
-            logging.info("package downloaded size {} KB, progress {}%".format(downloaded_kb, progress_int))
-
-    def retrieve_and_unzip_package(self, package_name, package_url):
-        local_package_path = ClientConstants.get_package_download_dir()
-        os.makedirs(local_package_path, exist_ok=True)
-        filename, filename_without_extension, file_extension = ClientConstants.get_filename_and_extension(package_url)
-        local_package_file = os.path.join(local_package_path, f"fedml_run_{self.run_id}_{filename_without_extension}")
-        if os.path.exists(local_package_file):
-            os.remove(local_package_file)
-        package_url_without_query_path = urljoin(package_url, urlparse(package_url).path)
-        urllib.request.urlretrieve(package_url_without_query_path, local_package_file,
-                                   reporthook=self.package_download_progress)
-        unzip_package_path = os.path.join(ClientConstants.get_package_unzip_dir(),
-                                          f"unzip_fedml_run_{self.run_id}_{filename_without_extension}")
-        try:
-            shutil.rmtree(unzip_package_path, ignore_errors=True)
-        except Exception as e:
-            logging.error(
-                f"Failed to remove directory {unzip_package_path}, Exception: {e}, Traceback: {traceback.format_exc()}")
-            pass
-
-        package_dir_name = self.unzip_file(local_package_file, unzip_package_path)  # Using unziped folder name
-        unzip_package_full_path = os.path.join(unzip_package_path, package_dir_name)
-
-        logging.info("local_package_file {}, unzip_package_path {}, unzip file full path {}".format(
-            local_package_file, unzip_package_path, unzip_package_full_path))
-
-        return unzip_package_full_path
-
-    def update_local_fedml_config(self, run_id, run_config):
-        packages_config = run_config["packages_config"]
-
-        # Copy config file from the client
-        unzip_package_path = self.retrieve_and_unzip_package(
-            packages_config["linuxClient"], packages_config["linuxClientUrl"]
-        )
-        fedml_local_config_file = os.path.join(unzip_package_path, "conf", "fedml.yaml")
-
-        # Load the above config to memory
-        config_from_container = load_yaml_config(fedml_local_config_file)
-        container_entry_file_config = config_from_container["entry_config"]
-        container_dynamic_args_config = config_from_container["dynamic_args"]
-        entry_file = container_entry_file_config["entry_file"]
-        conf_file = container_entry_file_config["conf_file"]
-        self.package_type = container_entry_file_config.get("package_type", SchedulerConstants.JOB_PACKAGE_TYPE_DEFAULT)
-        full_conf_path = os.path.join(unzip_package_path, "fedml", "config", os.path.basename(conf_file))
-
-        # Dynamically build constrain variable with realtime parameters from server
-        self.build_dynamic_constrain_variables(run_id, run_config)
-
-        # Update entry arguments value with constrain variable values with realtime parameters from server
-        # currently we support the following constrain variables:
-        # ${FEDSYS_RUN_ID}: a run id represented one entire Federated Learning flow
-        # ${FEDSYS_PRIVATE_LOCAL_DATA}: private local data path in the Federated Learning client
-        # ${FEDSYS_CLIENT_ID_LIST}: client list in one entire Federated Learning flow
-        # ${FEDSYS_SYNTHETIC_DATA_URL}: synthetic data url from server,
-        #                  if this value is not null, the client will download data from this URL to use it as
-        #                  federated training data set
-        # ${FEDSYS_IS_USING_LOCAL_DATA}: whether use private local data as federated training data set
-        # container_dynamic_args_config["data_cache_dir"] = "${FEDSYS.PRIVATE_LOCAL_DATA}"
-        for constrain_variable_key, constrain_variable_value in self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES.items():
-            for argument_key, argument_value in container_dynamic_args_config.items():
-                if argument_value is not None and str(argument_value).find(constrain_variable_key) == 0:
-                    replaced_argument_value = str(argument_value).replace(
-                        constrain_variable_key, str(constrain_variable_value)
-                    )
-                    container_dynamic_args_config[argument_key] = replaced_argument_value
-
-        # Merge all container new config sections as new config dictionary
-        package_conf_object = dict()
-        package_conf_object["entry_config"] = container_entry_file_config
-        package_conf_object["dynamic_args"] = container_dynamic_args_config
-        package_conf_object["dynamic_args"]["config_version"] = self.args.config_version
-        container_dynamic_args_config["mqtt_config_path"] = os.path.join(
-            unzip_package_path, "fedml", "config", os.path.basename(container_dynamic_args_config["mqtt_config_path"])
-        )
-        container_dynamic_args_config["s3_config_path"] = os.path.join(
-            unzip_package_path, "fedml", "config", os.path.basename(container_dynamic_args_config["s3_config_path"])
-        )
-        log_file_dir = ClientConstants.get_log_file_dir()
-        os.makedirs(log_file_dir, exist_ok=True)
-        package_conf_object["dynamic_args"]["log_file_dir"] = log_file_dir
-
-        # Save new config dictionary to local file
-        fedml_updated_config_file = os.path.join(unzip_package_path, "conf", "fedml.yaml")
-        ClientConstants.generate_yaml_doc(package_conf_object, fedml_updated_config_file)
-
-        # Build dynamic arguments and set arguments to fedml config object
-        self.build_dynamic_args(run_id, run_config, package_conf_object, unzip_package_path)
-        return unzip_package_path, package_conf_object
-
-    def build_dynamic_args(self, run_id, run_config, package_conf_object, base_dir):
-        fedml_conf_file = package_conf_object["entry_config"]["conf_file"]
-        fedml_conf_file_processed = str(fedml_conf_file).replace('\\', os.sep).replace('/', os.sep)
-        fedml_conf_path = os.path.join(base_dir, "fedml", "config",
-                                       os.path.basename(fedml_conf_file_processed))
-        fedml_conf_object = load_yaml_config(fedml_conf_path)
-        run_params = run_config.get("parameters", {})
-        job_yaml = run_params.get("job_yaml", {})
-
-        # Replace local fedml config objects with parameters from MLOps web
-        parameters_object = run_config.get("parameters", None)
-        if parameters_object is not None:
-            for config_k, config_v in fedml_conf_object.items():
-                parameter_v = parameters_object.get(config_k, None)
-                if parameter_v is not None:
-                    fedml_conf_object[config_k] = parameter_v
-                    parameters_object.pop(config_k)
-
-            for config_k, config_v in parameters_object.items():
-                fedml_conf_object[config_k] = config_v
-
-        package_dynamic_args = package_conf_object["dynamic_args"]
-        if fedml_conf_object.get("comm_args", None) is not None:
-            fedml_conf_object["comm_args"]["mqtt_config_path"] = package_dynamic_args["mqtt_config_path"]
-            fedml_conf_object["comm_args"]["s3_config_path"] = package_dynamic_args["s3_config_path"]
-            fedml_conf_object["common_args"]["using_mlops"] = True
-        if fedml_conf_object.get("train_args", None) is not None:
-            fedml_conf_object["train_args"]["run_id"] = package_dynamic_args["run_id"]
-            fedml_conf_object["train_args"]["client_id_list"] = package_dynamic_args["client_id_list"]
-            fedml_conf_object["train_args"]["client_num_in_total"] = int(package_dynamic_args["client_num_in_total"])
-            fedml_conf_object["train_args"]["client_num_per_round"] = int(package_dynamic_args["client_num_in_total"])
-            fedml_conf_object["train_args"]["client_id"] = self.edge_id
-            fedml_conf_object["train_args"]["server_id"] = self.request_json.get("server_id", "0")
-        if fedml_conf_object.get("device_args", None) is not None:
-            fedml_conf_object["device_args"]["worker_num"] = int(package_dynamic_args["client_num_in_total"])
-        # fedml_conf_object["data_args"]["data_cache_dir"] = package_dynamic_args["data_cache_dir"]
-        data_args = fedml_conf_object.get("data_args")
-        if data_args is not None:
-            data_cache_dir = fedml_conf_object["data_args"].get("data_cache_dir")
-            if data_cache_dir is not None:
-                data_cache_dir = os.path.join(data_cache_dir, str(self.edge_id))
-                fedml_conf_object["data_args"]["data_cache_dir"] = data_cache_dir
-        if fedml_conf_object.get("tracking_args", None) is not None:
-            fedml_conf_object["tracking_args"]["log_file_dir"] = package_dynamic_args["log_file_dir"]
-            fedml_conf_object["tracking_args"]["log_server_url"] = package_dynamic_args["log_server_url"]
-
-        fedml_conf_object["dynamic_args"] = package_dynamic_args
-        self.fedml_config_object = fedml_conf_object.copy()
-        ClientConstants.generate_yaml_doc(fedml_conf_object, fedml_conf_path)
-
-    def run_bootstrap_script(self, bootstrap_cmd_list, bootstrap_script_file):
-        try:
-            logging.info("Bootstrap commands are being executed...")
-            process, error_list = ClientConstants.execute_commands_with_live_logs(bootstrap_cmd_list,
-                                                                                  callback=self.callback_run_bootstrap)
-
-            ret_code, out, err = process.returncode, None, None
-            if ret_code is None or ret_code <= 0:
-                if error_list is not None and len(error_list) > 0:
-                    is_bootstrap_run_ok = False
-                else:
-                    if out is not None:
-                        out_str = sys_utils.decode_our_err_result(out)
-                        if out_str != "":
-                            logging.info("{}".format(out_str))
-
-                    sys_utils.log_return_info(bootstrap_script_file, 0)
-
-                    is_bootstrap_run_ok = True
-            else:
-                if err is not None:
-                    err_str = sys_utils.decode_our_err_result(err)
-                    if err_str != "":
-                        logging.error("{}".format(err_str))
-
-                sys_utils.log_return_info(bootstrap_script_file, ret_code)
-
-                is_bootstrap_run_ok = False
-        except Exception as e:
-            logging.error(f"Bootstrap script error: Exception: {e}, Traceback: {traceback.format_exc()}")
-            is_bootstrap_run_ok = False
-        return is_bootstrap_run_ok
-
-    def callback_run_bootstrap(self, job_pid):
-        ClientConstants.save_bootstrap_process(self.run_id, job_pid)
-
-    def run(self, process_event, completed_event, message_center_queue):
-        print(f"Client runner process id {os.getpid()}, run id {self.run_id}")
-
-        if platform.system() != "Windows":
-            os.setsid()
-
-        os.environ['PYTHONWARNINGS'] = 'ignore:semaphore_tracker:UserWarning'
-        os.environ.setdefault('PYTHONWARNINGS', 'ignore:semaphore_tracker:UserWarning')
-
-        self.run_process_event = process_event
-        self.run_process_completed_event = completed_event
-        try:
-            MLOpsUtils.set_ntp_offset(self.ntp_offset)
-            self.rebuild_message_center(message_center_queue)
-            self.run_impl()
-        except RunnerError:
-            logging.info("Runner stopped.")
-            self.reset_devices_status(self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_KILLED)
-        except RunnerCompletedError:
-            logging.info("Runner completed.")
-        except Exception as e:
-            logging.error(f"Runner exited with errors. Exception: {e}, Traceback {traceback.format_exc()}")
-            self.mlops_metrics.report_client_id_status(
-                self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED,
-                server_id=self.server_id, run_id=self.run_id)
-        finally:
-            if self.mlops_metrics is not None:
-                computing_ended_time = MLOpsUtils.get_ntp_time()
-                self.mlops_metrics.report_edge_job_computing_cost(self.run_id, self.edge_id,
-                                                                  self.computing_started_time, computing_ended_time,
-                                                                  self.args.user, self.args.api_key)
-            logging.info("Release resources.")
-            self.cleanup_containers_and_release_gpus(self.run_id, self.edge_id)
-            MLOpsRuntimeLogDaemon.get_instance(self.args).stop_log_processor(self.run_id, self.edge_id)
-            if self.mlops_metrics is not None:
-                self.mlops_metrics.stop_sys_perf()
-            time.sleep(3)
-            ClientConstants.cleanup_learning_process(self.run_id)
-            ClientConstants.cleanup_run_process(self.run_id)
-
-    def check_runner_stop_event(self):
-        if self.run_process_event.is_set():
-            logging.info("Received stopping event.")
-            raise RunnerError("Runner stopped")
-
-        if self.run_process_completed_event.is_set():
-            logging.info("Received completed event.")
-            raise RunnerCompletedError("Runner completed")
-
-    def run_impl(self):
-        run_id = self.request_json["runId"]
-        run_config = self.request_json["run_config"]
-        data_config = run_config.get("data_config", {})
-        packages_config = run_config["packages_config"]
-
-        self.computing_started_time = MLOpsUtils.get_ntp_time()
-        self.mlops_metrics.report_edge_job_computing_cost(run_id, self.edge_id,
-                                                          self.computing_started_time, 0,
-                                                          self.args.user, self.args.api_key)
-
-        self.check_runner_stop_event()
-
-        MLOpsRuntimeLog.get_instance(self.args).init_logs(log_level=logging.INFO)
-
-        self.mlops_metrics.report_client_id_status(
-            self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_INITIALIZING,
-            running_json=self.start_request_json, run_id=run_id)
-
-        # get training params
-        private_local_data_dir = data_config.get("privateLocalData", "")
-        is_using_local_data = 0
-        # if private_local_data_dir is not None and len(str(private_local_data_dir).strip(' ')) > 0:
-        #     is_using_local_data = 1
-
-        # start a run according to the hyper-parameters
-        # fedml_local_data_dir = self.cur_dir + "/fedml_data/run_" + run_id_str + "_edge_" + str(edge_id)
-        fedml_local_data_dir = os.path.join(self.cur_dir, "fedml_data")
-        fedml_local_config_dir = os.path.join(self.cur_dir, "fedml_config")
-        if is_using_local_data:
-            fedml_local_data_dir = private_local_data_dir
-        self.fedml_data_dir = self.fedml_data_local_package_dir
-
-        self.check_runner_stop_event()
-
-        logging.info("Download packages")
-
-        # update local config with real time parameters from server and dynamically replace variables value
-        unzip_package_path, fedml_config_object = self.update_local_fedml_config(run_id, run_config)
-        # if unzip_package_path is None or fedml_config_object is None:
-        #     logging.info("failed to update local fedml config.")
-        #     self.check_runner_stop_event()
-        #     # Send failed msg when exceptions.
-        #     self.cleanup_run_when_starting_failed(status=ClientConstants.MSG_MLOPS_CLIENT_STATUS_EXCEPTION)
-        #     return
-
-        logging.info("Check downloaded packages...")
-
-        entry_file_config = fedml_config_object["entry_config"]
-        dynamic_args_config = fedml_config_object["dynamic_args"]
-        entry_file = str(entry_file_config["entry_file"]).replace('\\', os.sep).replace('/', os.sep)
-        entry_file = os.path.basename(entry_file)
-        conf_file = entry_file_config["conf_file"]
-        conf_file = str(conf_file).replace('\\', os.sep).replace('/', os.sep)
-        #####
-        # ClientConstants.cleanup_learning_process(run_id)
-        # ClientConstants.cleanup_bootstrap_process(run_id)
-        #####
-
-        if not os.path.exists(unzip_package_path):
-            logging.info("failed to unzip file.")
-            self.check_runner_stop_event()
-            # Send failed msg when exceptions.
-            self.cleanup_run_when_starting_failed(status=ClientConstants.MSG_MLOPS_CLIENT_STATUS_EXCEPTION)
-            return
-        os.chdir(os.path.join(unzip_package_path, "fedml"))
-
-        self.check_runner_stop_event()
-
-        logging.info("starting the user process...")
-
-        entry_file_full_path = os.path.join(unzip_package_path, "fedml", entry_file)
-        conf_file_full_path = os.path.join(unzip_package_path, "fedml", conf_file)
-        logging.info("waiting the user process to finish...")
-        logging.info("                          ")
-        logging.info("                          ")
-        logging.info("====Your Run Logs Begin===")
-
-        process, is_launch_task, error_list = self.execute_job_task(unzip_package_path=unzip_package_path,
-                                                                    entry_file_full_path=entry_file_full_path,
-                                                                    conf_file_full_path=conf_file_full_path,
-                                                                    dynamic_args_config=dynamic_args_config,
-                                                                    fedml_config_object=self.fedml_config_object)
-
-        logging.info("====Your Run Logs End===")
-        logging.info("                        ")
-        logging.info("                        ")
-
-        ret_code, out, err = process.returncode if process else None, None, None
-        is_run_ok = sys_utils.is_runner_finished_normally(process.pid)
-        if is_launch_task:
-            is_run_ok = True
-        if error_list is not None and len(error_list) > 0:
-            is_run_ok = False
-        if ret_code is None or ret_code <= 0:
-            self.check_runner_stop_event()
-
-            if is_run_ok:
-                if out is not None:
-                    out_str = sys_utils.decode_our_err_result(out)
-                    if out_str != "":
-                        logging.info("{}".format(out_str))
-
-                self.mlops_metrics.report_client_id_status(
-                    self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FINISHED,
-                    server_id=self.server_id, run_id=run_id)
-
-                if is_launch_task:
-                    sys_utils.log_return_info(f"job {run_id}", ret_code)
-                else:
-                    sys_utils.log_return_info(entry_file, ret_code)
-        else:
-            is_run_ok = False
-
-        if not is_run_ok:
-            # If the run status is killed or finished, then return with the normal state.
-            current_job = FedMLClientDataInterface.get_instance().get_job_by_id(run_id)
-            if current_job is not None and (current_job.status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_FINISHED or
-                                            current_job.status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_KILLED):
-                return
-
-            self.check_runner_stop_event()
-
-            logging.error("failed to run the learning process...")
-
-            if err is not None:
-                err_str = sys_utils.decode_our_err_result(err)
-                if err_str != "":
-                    logging.error("{}".format(err_str))
-
-            if is_launch_task:
-                sys_utils.log_return_info(f"job {run_id}", ret_code)
-            else:
-                sys_utils.log_return_info(entry_file, ret_code)
-
-            # Send failed msg when exceptions.
-            self.mlops_metrics.report_client_id_status(
-                self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED,
-                server_id=self.server_id, run_id=run_id)
-
-    def execute_job_task(self, unzip_package_path, entry_file_full_path, conf_file_full_path, dynamic_args_config,
-                         fedml_config_object):
-        run_config = self.request_json["run_config"]
-        run_params = run_config.get("parameters", {})
-        client_rank = self.request_json.get("client_rank", 1)
-        job_yaml = run_params.get("job_yaml", {})
-        job_yaml_default_none = run_params.get("job_yaml", None)
-        job_api_key = job_yaml.get("run_api_key", None)
-        job_api_key = job_yaml.get("fedml_run_dynamic_params", None) if job_api_key is None else job_api_key
-        assigned_gpu_ids = run_params.get("gpu_ids", None)
-        job_type = job_yaml.get("job_type", None)
-        containerize = fedml_config_object.get("containerize", None)
-        image_pull_policy = fedml_config_object.get("image_pull_policy", Constants.IMAGE_PULL_POLICY_ALWAYS)
-        # TODO: Can we remove task_type?
-        job_type = job_yaml.get("task_type", Constants.JOB_TASK_TYPE_TRAIN) if job_type is None else job_type
-        conf_file_object = load_yaml_config(conf_file_full_path)
-        entry_args_dict = conf_file_object.get("fedml_entry_args", {})
-        entry_args = entry_args_dict.get("arg_items", None)
-        scheduler_match_info = self.request_json.get("scheduler_match_info", {})
-        if job_type == Constants.JOB_TASK_TYPE_TRAIN:
-            containerize = True if containerize is None else containerize
-
-        # Bootstrap Info
-        bootstrap_script_path, bootstrap_script_dir, bootstrap_script_file = [None] * 3
-        env_args = fedml_config_object.get("environment_args", None)
-
-        if env_args is not None:
-            bootstrap_script_file = env_args.get("bootstrap", None)
-            if bootstrap_script_file is not None:
-                bootstrap_script_file = str(bootstrap_script_file).replace('\\', os.sep).replace('/', os.sep)
-                if platform.system() == 'Windows':
-                    bootstrap_script_file = bootstrap_script_file.rstrip('.sh') + '.bat'
-                if bootstrap_script_file is not None:
-                    bootstrap_script_dir = os.path.join(unzip_package_path, "fedml",
-                                                        os.path.dirname(bootstrap_script_file))
-                    bootstrap_script_path = os.path.join(
-                        bootstrap_script_dir, bootstrap_script_dir, os.path.basename(bootstrap_script_file)
-                    )
-
-        bootstrap_cmd_list = list()
-        if bootstrap_script_path:
-            logging.info("Bootstrap commands are being generated...")
-            bootstrap_cmd_list = JobRunnerUtils.generate_bootstrap_commands(bootstrap_script_path=bootstrap_script_path,
-                                                                            bootstrap_script_dir=bootstrap_script_dir,
-                                                                            bootstrap_script_file=bootstrap_script_file)
-            logging.info(f"Generated following Bootstrap commands: {bootstrap_cmd_list}")
-
-        if not containerize:
-            if len(bootstrap_cmd_list) and not (job_type == Constants.JOB_TASK_TYPE_DEPLOY or
-                                                job_type == Constants.JOB_TASK_TYPE_SERVE):
-                bootstrapping_successful = self.run_bootstrap_script(bootstrap_cmd_list=bootstrap_cmd_list,
-                                                                     bootstrap_script_file=bootstrap_script_file)
-
-                if not bootstrapping_successful:
-                    logging.info("failed to update local fedml config.")
-                    self.check_runner_stop_event()
-                    # Send failed msg when exceptions.
-                    self.cleanup_run_when_starting_failed(status=ClientConstants.MSG_MLOPS_CLIENT_STATUS_EXCEPTION)
-                    raise Exception(f"Failed to execute following bootstrap commands: {bootstrap_cmd_list}")
-
-                logging.info("cleanup the previous learning process and bootstrap process...")
-                ClientConstants.cleanup_learning_process(self.request_json["runId"])
-                ClientConstants.cleanup_bootstrap_process(self.request_json["runId"])
-
-        executable_interpreter = ClientConstants.CLIENT_SHELL_PS \
-            if platform.system() == ClientConstants.PLATFORM_WINDOWS else ClientConstants.CLIENT_SHELL_BASH
-
-        if job_yaml_default_none is None:
-            # Generate the job executing commands for previous federated learning (Compatibility)
-            python_program = get_python_program()
-            logging.info("Run the client: {} {} --cf {} --rank {} --role client".format(
-                python_program, entry_file_full_path, conf_file_full_path, str(dynamic_args_config.get("rank", 1))))
-            rank = str(dynamic_args_config.get("rank", 1))
-            entry_command = f"{python_program} {entry_file_full_path} --cf " \
-                            f"{conf_file_full_path} --rank {rank} --role client"
-            shell_cmd_list = [entry_command]
-
-            # Run the job executing commands for previous federated learning (Compatibility)
-            process, error_list = ClientConstants.execute_commands_with_live_logs(
-                shell_cmd_list, callback=self.callback_start_fl_job, should_write_log_file=False)
-            is_launch_task = False
-        else:
-            self.check_runner_stop_event()
-
-            self.mlops_metrics.report_client_id_status(
-                self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_RUNNING, run_id=self.run_id)
-
-            # Generate the job executing commands
-            job_executing_commands = JobRunnerUtils.generate_job_execute_commands(
-                self.run_id, self.edge_id, self.version,
-                self.package_type, executable_interpreter, entry_file_full_path,
-                conf_file_object, entry_args, assigned_gpu_ids,
-                job_api_key, client_rank, scheduler_match_info=scheduler_match_info,
-                cuda_visible_gpu_ids_str=self.cuda_visible_gpu_ids_str)
-
-            if containerize is not None and containerize is True:
-                docker_args = fedml_config_object.get("docker", {})
-                docker_args = JobRunnerUtils.create_instance_from_dict(DockerArgs, docker_args)
-                try:
-                    job_executing_commands = JobRunnerUtils.generate_launch_docker_command(docker_args=docker_args,
-                                                                                           run_id=self.run_id,
-                                                                                           edge_id=self.edge_id,
-                                                                                           unzip_package_path=unzip_package_path,
-                                                                                           executable_interpreter=executable_interpreter,
-                                                                                           entry_file_full_path=entry_file_full_path,
-                                                                                           bootstrap_cmd_list=bootstrap_cmd_list,
-                                                                                           cuda_visible_gpu_ids_str=self.cuda_visible_gpu_ids_str,
-                                                                                           image_pull_policy=image_pull_policy)
-                except Exception as e:
-                    logging.error(f"Error occurred while generating containerized launch commands. "
-                                  f"Exception: {e}, Traceback: {traceback.format_exc()}")
-                    return None, None, None
-
-                if not job_executing_commands:
-                    raise Exception("Failed to generate docker execution command")
-
-            # Run the job executing commands
-            logging.info(f"Run the client job with job id {self.run_id}, device id {self.edge_id}.")
-            process, error_list = ClientConstants.execute_commands_with_live_logs(
-                job_executing_commands, callback=self.start_job_perf, error_processor=self.job_error_processor,
-                should_write_log_file=False if job_type == Constants.JOB_TASK_TYPE_FEDERATE else True)
-            is_launch_task = False if job_type == Constants.JOB_TASK_TYPE_FEDERATE else True
-
-        return process, is_launch_task, error_list
-
-    def callback_start_fl_job(self, job_pid):
-        ClientConstants.save_learning_process(self.run_id, job_pid)
-        self.mlops_metrics.report_sys_perf(
-            self.args, self.agent_config["mqtt_config"], job_process_id=job_pid)
-
-    def start_job_perf(self, job_pid):
-        ClientConstants.save_learning_process(self.run_id, job_pid)
-        self.mlops_metrics.report_job_perf(self.args, self.agent_config["mqtt_config"], job_pid)
-
-    def job_error_processor(self, error_list):
-        self.check_runner_stop_event()
-
-        error_str = "\n".join(error_list)
-        error_message = f"Error occurred when running the job... {error_str}"
-        logging.error(error_message)
-        raise Exception(error_message)
-
-    def reset_devices_status(self, edge_id, status, should_send_client_id_status=True):
-        self.mlops_metrics.run_id = self.run_id
-        self.mlops_metrics.edge_id = edge_id
-
-        if should_send_client_id_status:
-            if status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED or \
-                    status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_FINISHED or \
-                    status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_EXCEPTION:
-                self.mlops_metrics.report_client_id_status(
-                    edge_id, status, server_id=self.server_id, run_id=self.run_id)
-
-    def sync_run_stop_status(self, run_status=ClientConstants.MSG_MLOPS_CLIENT_STATUS_KILLED):
-        try:
-            if self.run_process_event is not None:
-                self.run_process_event.set()
-
-            self.mlops_metrics.report_client_id_status(
-                self.edge_id, run_status, server_id=self.server_id, run_id=self.run_id)
-        except Exception as e:
-            logging.error(f"Failed to sync run stop status with Exception {e}. Traceback: {traceback.format_exc()}")
-            pass
-
-    def cleanup_run_when_starting_failed(
-            self, status=ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED, should_send_client_id_status=True):
-        # logging.error("Cleanup run successfully when starting failed.")
-
-        self.reset_devices_status(
-            self.edge_id, status, should_send_client_id_status=should_send_client_id_status)
-
-        time.sleep(2)
-
-        try:
-            self.mlops_metrics.stop_sys_perf()
-        except Exception as ex:
-            logging.error(f"Failed to stop sys perf with Exception {ex}. Traceback: {traceback.format_exc()}")
-            pass
-
-        time.sleep(1)
-
-        try:
-            ClientConstants.cleanup_learning_process(self.run_id)
-            ClientConstants.cleanup_bootstrap_process(self.run_id)
-            ClientConstants.cleanup_run_process(self.run_id)
-        except Exception as e:
-            logging.error(
-                f"Failed to cleanup run when starting failed with Exception {e}. Traceback: {traceback.format_exc()}")
-            pass
-
-    def cleanup_run_when_finished(self):
-        # logging.info("Cleanup run successfully when finished.")
-
-        self.reset_devices_status(self.edge_id,
-                                  ClientConstants.MSG_MLOPS_CLIENT_STATUS_FINISHED,
-                                  should_send_client_id_status=False)
-
-        time.sleep(2)
-
-        try:
-            self.mlops_metrics.stop_sys_perf()
-        except Exception as ex:
-            logging.error(f"Failed to stop sys perf with Exception {ex}. Traceback: {traceback.format_exc()}")
-            pass
-
-        time.sleep(1)
-
-        try:
-            ClientConstants.cleanup_learning_process(self.run_id)
-            ClientConstants.cleanup_bootstrap_process(self.run_id)
-            ClientConstants.cleanup_run_process(self.run_id)
-        except Exception as e:
-            logging.error(
-                f"Failed to cleanup run when finished with Exception {e}. Traceback: {traceback.format_exc()}")
-            pass
-
-    def setup_message_center(self):
-        if self.message_center is not None:
-            return
-
-        self.message_center = FedMLMessageCenter(agent_config=self.agent_config)
-        self.message_center.start_sender()
-
-        if self.mlops_metrics is None:
-            self.mlops_metrics = MLOpsMetrics()
-        self.mlops_metrics.set_messenger(self.message_center)
-        self.mlops_metrics.run_id = self.run_id
-
-    def rebuild_message_center(self, message_center_queue):
-        self.message_center = FedMLMessageCenter(message_queue=message_center_queue)
-
-        if self.mlops_metrics is None:
-            self.mlops_metrics = MLOpsMetrics()
-        self.mlops_metrics.set_messenger(self.message_center)
-        self.mlops_metrics.run_id = self.run_id
-
-    def release_message_center(self):
-        try:
-            if self.message_center is not None:
-                self.message_center.stop()
-                self.message_center = None
-
-        except Exception as e:
-            logging.error(
-                f"Failed to release client mqtt manager with Exception {e}. Traceback: {traceback.format_exc()}")
-            pass
-
-    def ota_upgrade(self, payload, request_json):
-        run_id = request_json["runId"]
-        force_ota = False
-        ota_version = None
-
-        try:
-            run_config = request_json.get("run_config", None)
-            parameters = run_config.get("parameters", None)
-            common_args = parameters.get("common_args", None)
-            force_ota = common_args.get("force_ota", False) if common_args is not None else False
-            ota_version = common_args.get("ota_version", None) if common_args is not None else None
-        except Exception as e:
-            logging.error(
-                f"Failed to get ota upgrade parameters with Exception {e}. Traceback: {traceback.format_exc()}")
-            pass
-
-        if force_ota and ota_version is not None:
-            should_upgrade = True if ota_version != fedml.__version__ else False
-            upgrade_version = ota_version
-        else:
-            try:
-                fedml_is_latest_version, local_ver, remote_ver = sys_utils.check_fedml_is_latest_version(self.version)
-            except Exception as e:
-                logging.error(f"Failed to check fedml version with Exception {e}. Traceback: {traceback.format_exc()}")
-                return
-
-            should_upgrade = False if fedml_is_latest_version else True
-            upgrade_version = remote_ver
-
-        if should_upgrade:
-            FedMLClientDataInterface.get_instance(). \
-                save_started_job(run_id, self.edge_id, time.time(),
-                                 ClientConstants.MSG_MLOPS_CLIENT_STATUS_UPGRADING,
-                                 ClientConstants.MSG_MLOPS_CLIENT_STATUS_UPGRADING,
-                                 payload)
-            self.mlops_metrics.report_client_id_status(
-                self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_UPGRADING, run_id=run_id)
-
-            logging.info(f"Upgrade to version {upgrade_version} ...")
-
-            sys_utils.do_upgrade(self.version, upgrade_version)
-            raise Exception("Restarting after upgraded...")
-
-    def callback_start_train(self, topic, payload):
-        # Get training params
-
-        request_json = json.loads(payload)
-        is_retain = request_json.get("is_retain", False)
-        if is_retain:
-            return
-        run_id = request_json["runId"]
-
-        # Start log processor for current run
-        train_edge_id = str(topic).split("/")[-2]
-        self.args.run_id = run_id
-        self.args.edge_id = train_edge_id
-        MLOpsRuntimeLog.get_instance(self.args).init_logs(log_level=logging.INFO)
-        MLOpsRuntimeLogDaemon.get_instance(self.args).start_log_processor(
-            run_id, train_edge_id, log_source=SchedulerConstants.get_log_source(request_json))
-        logging.info("start the log processor")
-
-        try:
-            MLOpsConfigs.fetch_all_configs()
-        except Exception as e:
-            logging.error(f"Failed to fetch all configs with Exception {e}. Traceback: {traceback.format_exc()}")
-            pass
-
-        if not FedMLClientDataInterface.get_instance().get_agent_status():
-            request_json = json.loads(payload)
-            run_id = request_json["runId"]
-            logging.error(
-                "FedMLDebug - Receive: topic ({}), payload ({}), but the client agent is disabled. {}".format(
-                    topic, payload, traceback.format_exc()
-                )
-            )
-            # Send failed msg when exceptions.
-            self.mlops_metrics.report_client_id_status(
-                train_edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_EXCEPTION, run_id=run_id,
-                msg=f"the client agent {train_edge_id} is disabled")
-            MLOpsRuntimeLogDaemon.get_instance(self.args).stop_log_processor(run_id, train_edge_id)
-            return
-
-        logging.info(
-            f"FedMLDebug - Receive: topic ({topic}), payload ({payload})"
-        )
-
-        # Terminate previous process about starting or stopping run command
-        logging.info("cleanup and save runner information")
-        server_agent_id = request_json["cloud_agent_id"]
-        ClientConstants.save_runner_infos(self.args.device_id + "." + self.args.os_name, train_edge_id, run_id=run_id)
-
-        # OTA upgrade
-        self.ota_upgrade(payload, request_json)
-
-        # Occupy GPUs
-        scheduler_match_info = request_json.get("scheduler_match_info", {})
-        matched_gpu_num = scheduler_match_info.get("matched_gpu_num", 0)
-        model_master_device_id = scheduler_match_info.get("model_master_device_id", None)
-        model_slave_device_id = scheduler_match_info.get("model_slave_device_id", None)
-        model_slave_device_id_list = scheduler_match_info.get("model_slave_device_id_list", None)
-        run_config = request_json.get("run_config", {})
-        run_params = run_config.get("parameters", {})
-        serving_args = run_params.get("serving_args", {})
-        endpoint_id = serving_args.get("endpoint_id", None)
-        cuda_visible_gpu_ids_str = JobRunnerUtils.get_instance().occupy_gpu_ids(
-            run_id, matched_gpu_num, train_edge_id, inner_id=endpoint_id,
-            model_master_device_id=model_master_device_id,
-            model_slave_device_id=model_slave_device_id)
-        logging.info(
-            f"Run started, available gpu ids: {JobRunnerUtils.get_instance().get_available_gpu_id_list(train_edge_id)}")
-
-        # Start server with multiprocessing mode
-        self.request_json = request_json
-        run_id_str = str(run_id)
-        self.running_request_json[run_id_str] = request_json
-        client_runner = FedMLClientRunner(
-            self.args, edge_id=train_edge_id, request_json=request_json, agent_config=self.agent_config, run_id=run_id,
-            cuda_visible_gpu_ids_str=cuda_visible_gpu_ids_str
-        )
-        client_runner.start_request_json = payload
-        self.run_process_event_map[run_id_str] = multiprocessing.Event()
-        self.run_process_event_map[run_id_str].clear()
-        client_runner.run_process_event = self.run_process_event_map[run_id_str]
-        self.run_process_completed_event_map[run_id_str] = multiprocessing.Event()
-        self.run_process_completed_event_map[run_id_str].clear()
-        client_runner.run_process_completed_event = self.run_process_completed_event_map[run_id_str]
-        client_runner.server_id = request_json.get("server_id", "0")
-        logging.info("start the runner process.")
-        self.run_process_map[run_id_str] = Process(target=client_runner.run, args=(
-            self.run_process_event_map[run_id_str], self.run_process_completed_event_map[run_id_str],
-            self.message_center.get_message_queue()))
-        self.run_process_map[run_id_str].start()
-        ClientConstants.save_run_process(run_id, self.run_process_map[run_id_str].pid)
-
-    def callback_stop_train(self, topic, payload):
-        # logging.info("callback_stop_train: topic = %s, payload = %s" % (topic, payload))
-        # logging.info(
-        #     f"FedMLDebug - Receive: topic ({topic}), payload ({payload})"
-        # )
-
-        train_edge_id = str(topic).split("/")[-2]
-        request_json = json.loads(payload)
-        is_retain = request_json.get("is_retain", False)
-        if is_retain:
-            return
-        run_id = request_json.get("runId", None)
-        if run_id is None:
-            run_id = request_json.get("id", None)
-        run_status = request_json.get("run_status", ClientConstants.MSG_MLOPS_CLIENT_STATUS_KILLED)
-
-        # logging.info("Stop run with multiprocessing...")
-
-        # Stop client with multiprocessing mode
-        run_id_str = str(run_id)
-        client_runner = FedMLClientRunner(
-            self.args, edge_id=train_edge_id, request_json=request_json, agent_config=self.agent_config, run_id=run_id
-        )
-        self.cleanup_containers_and_release_gpus(run_id, train_edge_id)
-        client_runner.run_process_event = self.run_process_event_map.get(run_id_str, None)
-        client_runner.run_process = self.run_process_map.get(run_id_str, None)
-        client_runner.message_center = self.message_center
-        client_runner.mlops_metrics = self.mlops_metrics
-        client_runner.sync_run_stop_status(run_status=run_status)
-
-    def cleanup_containers_and_release_gpus(self, run_id, edge_id):
-        job_type = JobRunnerUtils.get_job_type_from_run_id(run_id)
-
-        if not job_type:
-            logging.info(f"Failed to get job type from run id {run_id}. This is not an error as it would usually "
-                         f"happen when the job is not found in the database because job is already finished and "
-                         f"cleaned up. Exiting cleanup_containers_and_release_gpus.")
-            return
-
-        # Check if the job type is not "serve" or "deploy"
-        if not (job_type == SchedulerConstants.JOB_TASK_TYPE_SERVE or
-                job_type == SchedulerConstants.JOB_TASK_TYPE_DEPLOY):
-
-            # Terminate the run docker container if exists
-            container_name = JobRunnerUtils.get_run_container_name(run_id)
-            docker_client = JobRunnerUtils.get_docker_client(DockerArgs())
-            logging.info(f"Terminating the run docker container {container_name} if exists...")
-            try:
-                JobRunnerUtils.remove_run_container_if_exists(container_name, docker_client)
-            except Exception as e:
-                logging.error(f"Exception {e} occurred when terminating docker container. "
-                              f"Traceback: {traceback.format_exc()}")
-
-            # Release the GPU ids and update the GPU availability in the persistent store
-            JobRunnerUtils.get_instance().release_gpu_ids(run_id, edge_id)
-
-            # Send mqtt message reporting the new gpu availability to the backend
-            MLOpsDevicePerfStats.report_gpu_device_info(self.edge_id, mqtt_mgr=self.mqtt_mgr)
-
-    def cleanup_client_with_status(self):
-        if self.device_status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_FINISHED:
-            # logging.info("received to finished status.")
-            self.cleanup_run_when_finished()
-        elif self.device_status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED:
-            # logging.error("received to failed status from the server agent")
-            self.cleanup_run_when_starting_failed(should_send_client_id_status=False)
-        elif self.device_status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_KILLED:
-            # logging.error("received to failed status from the server agent")
-            self.cleanup_run_when_starting_failed(status=self.device_status, should_send_client_id_status=False)
-
-    def callback_runner_id_status(self, topic, payload):
-        # logging.info("callback_runner_id_status: topic = %s, payload = %s" % (topic, payload))
-        # logging.info(f"FedMLDebug - Receive: topic ({topic}), payload ({payload})")
-        request_json = json.loads(payload)
-        is_retain = request_json.get("is_retain", False)
-        if is_retain:
-            return
-        run_id = request_json["run_id"]
-        edge_id = str(topic).split("/")[-2].split('_')[-1]
-        status = request_json["status"]
-        run_id_str = str(run_id)
-
-        self.save_training_status(
-            edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED
-            if status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_EXCEPTION else status)
-
-        if status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_FINISHED or \
-                status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED or \
-                status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_KILLED:
-            completed_event = self.run_process_completed_event_map.get(run_id_str, None)
-            if completed_event is not None:
-                completed_event.set()
-
-            # Stop client with multiprocessing mode
-            client_runner = FedMLClientRunner(
-                self.args,
-                edge_id=edge_id,
-                request_json=request_json,
-                agent_config=self.agent_config,
-                run_id=run_id,
-            )
-            client_runner.device_status = status
-            client_runner.message_center = self.message_center
-            client_runner.mlops_metrics = self.mlops_metrics
-            client_runner.cleanup_client_with_status()
-
-            running_json = self.running_request_json.get(run_id_str)
-            if running_json is None:
-                try:
-                    current_job = FedMLClientDataInterface.get_instance().get_job_by_id(run_id)
-                    running_json = json.loads(current_job.running_json)
-                except Exception as e:
-                    logging.error(f"Failed to get running json with Exception {e}. Traceback: {traceback.format_exc()}")
-
-            if running_json is not None:
-                job_type = JobRunnerUtils.parse_job_type(running_json)
-                if not SchedulerConstants.is_deploy_job(job_type):
-                    logging.info(f"[run/device][{run_id}/{edge_id}] Release gpu resource when run ended.")
-                    self.cleanup_containers_and_release_gpus(run_id, edge_id)
-
-            run_process = self.run_process_map.get(run_id_str, None)
-            if run_process is not None:
-                if run_process.pid is not None:
-                    RunProcessUtils.kill_process(run_process.pid)
-
-                    # Terminate the run docker container if exists
-                    try:
-                        container_name = JobRunnerUtils.get_run_container_name(run_id)
-                        docker_client = JobRunnerUtils.get_docker_client(DockerArgs())
-                        logging.info(f"Terminating the run docker container {container_name} if exists...")
-                        JobRunnerUtils.remove_run_container_if_exists(container_name, docker_client)
-                    except Exception as e:
-                        logging.error(f"Error occurred when terminating docker container."
-                                      f"Exception: {e}, Traceback: {traceback.format_exc()}.")
-
-                self.run_process_map.pop(run_id_str)
-
-            # Stop log processor for current run
-            MLOpsRuntimeLogDaemon.get_instance(self.args).stop_log_processor(run_id, edge_id)
-
-    def callback_report_current_status(self, topic, payload):
-        logging.info(
-            f"FedMLDebug - Receive: topic ({topic}), payload ({payload})"
-        )
-
-        self.send_agent_active_msg()
-        if self.general_edge_id is not None:
-            self.send_agent_active_msg(self.general_edge_id)
-
-    @staticmethod
-    def process_ota_upgrade_msg():
-        os.system("pip install -U fedml")
-
-    @staticmethod
-    def callback_client_ota_msg(topic, payload):
-        logging.info(
-            f"FedMLDebug - Receive: topic ({topic}), payload ({payload})"
-        )
-
-        request_json = json.loads(payload)
-        cmd = request_json["cmd"]
-
-        if cmd == ClientConstants.FEDML_OTA_CMD_UPGRADE:
-            FedMLClientRunner.process_ota_upgrade_msg()
-            # Process(target=FedMLClientRunner.process_ota_upgrade_msg).start()
-            raise Exception("After upgraded, restart runner...")
-        elif cmd == ClientConstants.FEDML_OTA_CMD_RESTART:
-            raise Exception("Restart runner...")
-
-    def get_all_run_process_list_map(self):
-        run_process_dict = dict()
-        for run_id_str, process in self.run_process_map.items():
-            cur_run_process_list = ClientConstants.get_learning_process_list(run_id_str)
-            run_process_dict[run_id_str] = cur_run_process_list
-
-        return run_process_dict
-
-    def callback_report_device_info(self, topic, payload):
-        payload_json = json.loads(payload)
-        server_id = payload_json.get("server_id", 0)
-        run_id = payload_json.get("run_id", 0)
-        listen_edge_id = str(topic).split("/")[-1]
-        context = payload_json.get("context", None)
-        need_gpu_info = payload_json.get("need_gpu_info", False)
-        need_running_process_list = payload_json.get("need_running_process_list", False)
-        response_topic = f"client/server/response_device_info/{server_id}"
-        if self.mlops_metrics is not None and self.model_device_client_edge_id_list is not None and \
-                self.model_device_server_id is not None:
-            if not need_gpu_info:
-                device_info_json = {
-                    "edge_id": listen_edge_id,
-                    "fedml_version": fedml.__version__,
-                    "user_id": self.args.user
-                }
-            else:
-                total_mem, free_mem, total_disk_size, free_disk_size, cup_utilization, cpu_cores, gpu_cores_total, \
-                    gpu_cores_available, sent_bytes, recv_bytes, gpu_available_ids = sys_utils.get_sys_realtime_stats()
-                host_ip = sys_utils.get_host_ip()
-                host_port = sys_utils.get_available_port()
-                gpu_available_ids = JobRunnerUtils.get_available_gpu_id_list(self.edge_id)
-                gpu_available_ids = JobRunnerUtils.trim_unavailable_gpu_ids(gpu_available_ids)
-                gpu_cores_available = len(gpu_available_ids)
-                gpu_list = sys_utils.get_gpu_list()
-                device_info_json = {
-                    "edge_id": listen_edge_id,
-                    "memoryTotal": round(total_mem * MLOpsUtils.BYTES_TO_GB, 2),
-                    "memoryAvailable": round(free_mem * MLOpsUtils.BYTES_TO_GB, 2),
-                    "diskSpaceTotal": round(total_disk_size * MLOpsUtils.BYTES_TO_GB, 2),
-                    "diskSpaceAvailable": round(free_disk_size * MLOpsUtils.BYTES_TO_GB, 2),
-                    "cpuUtilization": round(cup_utilization, 2),
-                    "cpuCores": cpu_cores,
-                    "gpuCoresTotal": gpu_cores_total,
-                    "gpuCoresAvailable": gpu_cores_available,
-                    "gpu_available_ids": gpu_available_ids,
-                    "gpu_list": gpu_list,
-                    "node_ip": host_ip,
-                    "node_port": host_port,
-                    "networkTraffic": sent_bytes + recv_bytes,
-                    "updateTime": int(MLOpsUtils.get_ntp_time()),
-                    "fedml_version": fedml.__version__,
-                    "user_id": self.args.user
-                }
-            if need_running_process_list:
-                device_info_json["run_process_list_map"] = self.get_all_run_process_list_map()
-            salve_device_ids = list()
-            for model_client_edge_id in self.model_device_client_edge_id_list:
-                salve_device_ids.append(model_client_edge_id)
-            response_payload = {"slave_device_id": self.model_device_client_edge_id_list[0],
-                                "slave_device_id_list": salve_device_ids,
-                                "master_device_id": self.model_device_server_id,
-                                "run_id": run_id, "edge_id": listen_edge_id,
-                                "edge_info": device_info_json}
-            if context is not None:
-                response_payload["context"] = context
-            self.message_center.send_message(response_topic, json.dumps(response_payload), run_id=run_id)
-
-    def callback_client_logout(self, topic, payload):
-        payload_json = json.loads(payload)
-        secret = payload_json.get("auth", None)
-        if secret is None or str(secret) != "246b1be6-0eeb-4b17-b118-7d74de1975d4":
-            return
-        logging.info("Received the logout request.")
-        if self.run_process_event is not None:
-            self.run_process_event.set()
-        if self.run_process_completed_event is not None:
-            self.run_process_completed_event.set()
-        self.disable_client_login = True
-        time.sleep(3)
-        os.system("fedml logout")
-
-    def save_training_status(self, edge_id, training_status):
-        self.current_training_status = training_status
-        ClientConstants.save_training_infos(edge_id, training_status)
-
-    @staticmethod
-    def get_gpu_machine_id():
-        gpu_list = sys_utils.get_gpu_list()
-        gpu_uuids = ""
-        if len(gpu_list) > 0:
-            for gpu in gpu_list:
-                gpu_uuids += gpu.get("uuid", "")
-        else:
-            gpu_uuids = str(uuid.uuid4())
-        device_id_combination = \
-            f"{FedMLClientRunner.get_machine_id()}-{hex(uuid.getnode())}-{gpu_uuids}"
-        device_id = security_utils.get_content_hash(device_id_combination)
-        return device_id
-
-    @staticmethod
-    def get_device_id(use_machine_id=False):
-        device_file_path = os.path.join(ClientConstants.get_data_dir(),
-                                        ClientConstants.LOCAL_RUNNER_INFO_DIR_NAME)
-        file_for_device_id = os.path.join(device_file_path, "devices.id")
-        if not os.path.exists(device_file_path):
-            os.makedirs(device_file_path, exist_ok=True)
-        elif os.path.exists(file_for_device_id):
-            with open(file_for_device_id, 'r', encoding='utf-8') as f:
-                device_id_from_file = f.readline()
-                if device_id_from_file is not None and device_id_from_file != "":
-                    return device_id_from_file
-
-        if platform.system() == "Darwin":
-            cmd_get_serial_num = "system_profiler SPHardwareDataType | grep Serial | awk '{gsub(/ /,\"\")}{print}' " \
-                                 "|awk -F':' '{print $2}' "
-            device_id = os.popen(cmd_get_serial_num).read()
-            device_id = device_id.replace('\n', '').replace(' ', '')
-            if device_id is None or device_id == "":
-                if not use_machine_id:
-                    device_id = hex(uuid.getnode())
-                else:
-                    device_id = FedMLClientRunner.get_gpu_machine_id()
-            else:
-                device_id = "0x" + device_id
-        else:
-            if "nt" in os.name:
-
-                def get_uuid():
-                    guid = ""
-                    try:
-                        cmd = "wmic csproduct get uuid"
-                        guid = str(subprocess.check_output(cmd))
-                        pos1 = guid.find("\\n") + 2
-                        guid = guid[pos1:-15]
-                    except Exception as ex:
-                        logging.error(f"Failed to get uuid with Exception {ex}. Traceback: {traceback.format_exc()}")
-                        pass
-                    return str(guid)
-
-                device_id = str(get_uuid())
-                logging.info(device_id)
-            elif "posix" in os.name:
-                device_id = sys_utils.get_device_id_in_docker()
-                if device_id is None:
-                    if not use_machine_id:
-                        device_id = hex(uuid.getnode())
-                    else:
-                        device_id = device_id = FedMLClientRunner.get_gpu_machine_id()
-            else:
-                device_id = sys_utils.run_subprocess_open(
-                    "hal-get-property --udi /org/freedesktop/Hal/devices/computer --key system.hardware.uuid".split()
-                )
-                device_id = hex(device_id)
-
-        if device_id is not None and device_id != "":
-            with open(file_for_device_id, 'w', encoding='utf-8') as f:
-                f.write(device_id)
-        else:
-            device_id = hex(uuid.uuid4())
-            with open(file_for_device_id, 'w', encoding='utf-8') as f:
-                f.write(device_id)
-
-        return device_id
-
-    @staticmethod
-    def get_machine_id():
-        try:
-            import machineid
-            return machineid.id().replace('\n', '').replace('\r\n', '').strip()
-        except Exception as e:
-            logging.error(f"Failed to get machine id with Exception {e}. Traceback: {traceback.format_exc()}")
-            return hex(uuid.getnode())
-
-    @staticmethod
-    def bind_account_and_device_id(url, account_id, device_id, os_name, api_key="", role="client"):
-        ip = requests.get('https://checkip.amazonaws.com').text.strip()
-        fedml_ver, exec_path, os_ver, cpu_info, python_ver, torch_ver, mpi_installed, \
-            cpu_usage, available_mem, total_mem, gpu_info, gpu_available_mem, gpu_total_mem, \
-            gpu_count, gpu_vendor, cpu_count, gpu_device_name = get_sys_runner_info()
-        host_name = sys_utils.get_host_name()
-        json_params = {
-            "accountid": account_id,
-            "deviceid": device_id,
-            "type": os_name,
-            "state": ClientConstants.MSG_MLOPS_CLIENT_STATUS_IDLE,
-            "status": ClientConstants.MSG_MLOPS_CLIENT_STATUS_IDLE,
-            "processor": cpu_info,
-            "core_type": cpu_info,
-            "network": "",
-            "role": role,
-            "os_ver": os_ver,
-            "memory": total_mem,
-            "ip": ip,
-            "api_key": api_key,
-            "extra_infos": {"fedml_ver": fedml_ver, "exec_path": exec_path, "os_ver": os_ver,
-                            "cpu_info": cpu_info, "python_ver": python_ver, "torch_ver": torch_ver,
-                            "mpi_installed": mpi_installed, "cpu_usage": cpu_usage,
-                            "available_mem": available_mem, "total_mem": total_mem,
-                            "cpu_count": cpu_count, "gpu_count": 0, "host_name": host_name}
-        }
-        if gpu_count > 0:
-            if gpu_total_mem is not None:
-                json_params["gpu"] = gpu_info if gpu_info is not None else "" + ", Total GPU Memory: " + gpu_total_mem
-            else:
-                json_params["gpu"] = gpu_info if gpu_info is not None else ""
-            json_params["extra_infos"]["gpu_info"] = gpu_info if gpu_info is not None else ""
-            if gpu_available_mem is not None:
-                json_params["extra_infos"]["gpu_available_mem"] = gpu_available_mem
-            if gpu_total_mem is not None:
-                json_params["extra_infos"]["gpu_total_mem"] = gpu_total_mem
-
-            json_params["extra_infos"]["gpu_count"] = gpu_count
-            json_params["extra_infos"]["gpu_vendor"] = gpu_vendor
-            json_params["extra_infos"]["gpu_device_name"] = gpu_device_name
-
-            gpu_available_id_list = sys_utils.get_available_gpu_id_list(limit=gpu_count)
-            gpu_available_count = len(gpu_available_id_list) if gpu_available_id_list is not None else 0
-            gpu_list = sys_utils.get_gpu_list()
-            json_params["extra_infos"]["gpu_available_count"] = gpu_available_count
-            json_params["extra_infos"]["gpu_available_id_list"] = gpu_available_id_list
-            json_params["extra_infos"]["gpu_list"] = gpu_list
-        else:
-            json_params["gpu"] = "None"
-            json_params["extra_infos"]["gpu_available_count"] = 0
-            json_params["extra_infos"]["gpu_available_id_list"] = []
-            json_params["extra_infos"]["gpu_list"] = []
-
-        _, cert_path = MLOpsConfigs.get_request_params()
-        if cert_path is not None:
-            try:
-                requests.session().verify = cert_path
-                response = requests.post(
-                    url, json=json_params, verify=True,
-                    headers={"content-type": "application/json", "Connection": "close"}
-                )
-            except requests.exceptions.SSLError as err:
-                logging.error(
-                    f"Failed to bind account and device id with error: {err}, traceback: {traceback.format_exc()}")
-                MLOpsConfigs.install_root_ca_file()
-                response = requests.post(
-                    url, json=json_params, verify=True,
-                    headers={"content-type": "application/json", "Connection": "close"}
-                )
-        else:
-            response = requests.post(url, json=json_params, headers={"Connection": "close"})
-        edge_id, user_name, extra_url, general_edge_id = -1, None, None, None
-        if response.status_code != 200:
-            print(f"Binding to MLOps with response.status_code = {response.status_code}, "
-                  f"response.content: {response.content}")
-            pass
-        else:
-            # print("url = {}, response = {}".format(url, response))
-            status_code = response.json().get("code")
-            if status_code == "SUCCESS":
-                edge_id = response.json().get("data").get("id")
-                user_name = response.json().get("data").get("userName", None)
-                extra_url = response.json().get("data").get("url", None)
-                general_edge_id = response.json().get("data").get("general_edge_id", None)
-                if edge_id is None or edge_id <= 0:
-                    print(f"Binding to MLOps with response.status_code = {response.status_code}, "
-                          f"response.content: {response.content}")
-            else:
-                if status_code == SchedulerConstants.BINDING_ACCOUNT_NOT_EXIST_ERROR:
-                    raise SystemExit(SchedulerConstants.BINDING_ACCOUNT_NOT_EXIST_ERROR)
-                print(f"Binding to MLOps with response.status_code = {response.status_code}, "
-                      f"response.content: {response.content}")
-                return -1, None, None, None
-        return edge_id, user_name, extra_url, general_edge_id
-
-    def fetch_configs(self):
-        return MLOpsConfigs.fetch_all_configs()
-
-    def send_agent_active_msg(self, edge_id):
-        active_topic = "flclient_agent/active"
-        status = MLOpsStatus.get_instance().get_client_agent_status(edge_id)
-        if (
-                status is not None
-                and status != ClientConstants.MSG_MLOPS_CLIENT_STATUS_OFFLINE
-                and status != ClientConstants.MSG_MLOPS_CLIENT_STATUS_IDLE
-        ):
-            return
-
-        try:
-            current_job = FedMLClientDataInterface.get_instance().get_job_by_id(self.run_id)
-        except Exception as e:
-            logging.error(f"Failed to get current job with Exception {e}. Traceback: {traceback.format_exc()}")
-            current_job = None
-        if current_job is None:
-            if status is not None and status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_OFFLINE:
-                status = ClientConstants.MSG_MLOPS_CLIENT_STATUS_IDLE
-            else:
-                return
-        else:
-            status = ClientConstants.get_device_state_from_run_edge_state(current_job.status)
-        active_msg = {"ID": edge_id, "status": status}
-        MLOpsStatus.get_instance().set_client_agent_status(edge_id, status)
-        self.mqtt_mgr.send_message_json(active_topic, json.dumps(active_msg))
-        logging.info(f"Send agent active msg {active_msg}")
-
-    def recover_start_train_msg_after_upgrading(self):
-        try:
-            current_job = FedMLClientDataInterface.get_instance().get_current_job()
-            if current_job is not None and \
-                    current_job.status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_UPGRADING:
-                logging.info("start training after upgrading.")
-                topic_start_train = "flserver_agent/" + str(self.edge_id) + "/start_train"
-                self.callback_start_train(topic_start_train, current_job.running_json)
-        except Exception as e:
-            logging.error(f"recover starting train message after upgrading failed with exception {e}, "
-                          f"Traceback {traceback.format_exc()}")
-
-    def on_agent_mqtt_connected(self, mqtt_client_object):
-        # The MQTT message topic format is as follows: <sender>/<receiver>/<action>
-
-        # Setup MQTT message listener for starting training
-        topic_start_train = "flserver_agent/" + str(self.edge_id) + "/start_train"
-        self.add_message_listener(topic_start_train, self.callback_start_train)
-        self.mqtt_mgr.add_message_listener(topic_start_train, self.listener_message_dispatch_center)
-
-        # Setup MQTT message listener for stopping training
-        topic_stop_train = "flserver_agent/" + str(self.edge_id) + "/stop_train"
-        self.add_message_listener(topic_stop_train, self.callback_stop_train)
-        self.mqtt_mgr.add_message_listener(topic_stop_train, self.listener_message_dispatch_center)
-
-
-        # Setup MQTT message listener for client status switching
-        topic_client_status = "fl_client/flclient_agent_" + str(self.edge_id) + "/status"
-        self.add_message_listener(topic_client_status, self.callback_runner_id_status)
-        self.mqtt_mgr.add_message_listener(topic_client_status, self.listener_message_dispatch_center)
-
-        # Setup MQTT message listener to report current device status.
-        topic_report_status = "mlops/report_device_status"
-        self.add_message_listener(topic_report_status, self.callback_report_current_status)
-        self.mqtt_mgr.add_message_listener(topic_report_status, self.listener_message_dispatch_center)
-
-        # Setup MQTT message listener to OTA messages from the MLOps.
-        topic_ota_msg = "mlops/flclient_agent_" + str(self.edge_id) + "/ota"
-        self.add_message_listener(topic_ota_msg, self.callback_client_ota_msg)
-        self.mqtt_mgr.add_message_listener(topic_ota_msg, self.listener_message_dispatch_center)
-
-        # Setup MQTT message listener to OTA messages from the MLOps.
-        topic_request_device_info = "server/client/request_device_info/" + str(self.edge_id)
-        self.add_message_listener(topic_request_device_info, self.callback_report_device_info)
-        self.mqtt_mgr.add_message_listener(topic_request_device_info, self.listener_message_dispatch_center)
-
-        # Setup MQTT message listener to logout from MLOps.
-        topic_client_logout = "mlops/client/logout/" + str(self.edge_id)
-        self.add_message_listener(topic_client_logout, self.callback_client_logout)
-        self.mqtt_mgr.add_message_listener(topic_client_logout, self.listener_message_dispatch_center)
-
-        # Subscribe topics for starting train, stopping train and fetching client status.
-        mqtt_client_object.subscribe(topic_start_train, qos=2)
-        mqtt_client_object.subscribe(topic_stop_train, qos=2)
-        mqtt_client_object.subscribe(topic_client_status, qos=2)
-        mqtt_client_object.subscribe(topic_report_status, qos=2)
-        mqtt_client_object.subscribe(topic_ota_msg, qos=2)
-        mqtt_client_object.subscribe(topic_request_device_info, qos=2)
-        mqtt_client_object.subscribe(topic_client_logout, qos=2)
-
-        self.subscribed_topics.clear()
-        self.subscribed_topics.append(topic_start_train)
-        self.subscribed_topics.append(topic_stop_train)
-        self.subscribed_topics.append(topic_client_status)
-        self.subscribed_topics.append(topic_report_status)
-        self.subscribed_topics.append(topic_ota_msg)
-        self.subscribed_topics.append(topic_request_device_info)
-        self.subscribed_topics.append(topic_client_logout)
-
-        # Subscribe the messages for federated learning.
-        self.subscribe_fl_msgs()
-
-        # Broadcast the first active message.
-        self.send_agent_active_msg(self.edge_id)
-        if self.general_edge_id is not None:
-            self.send_agent_active_msg(self.general_edge_id)
-
-        # Echo results
-        MLOpsRuntimeLog.get_instance(self.args).enable_show_log_to_stdout()
-        worker_deploy_id_list = [modeld_device_clint.edge_id for index, modeld_device_clint in
-                                 enumerate(self.model_device_client_list)]
-        print("\nCongratulations, your device is connected to the FedML MLOps platform successfully!")
-        print(f"Your FedML Edge ID is {str(self.edge_id)}, unique device ID is {str(self.unique_device_id)}, "
-              f"master deploy ID is {str(self.model_device_server.edge_id)}, "
-              f"worker deploy ID is {worker_deploy_id_list}"
-              )
-        if self.edge_extra_url is not None and self.edge_extra_url != "":
-            print(f"You may visit the following url to fill in more information with your device.\n"
-                  f"{self.edge_extra_url}")
-        MLOpsRuntimeLog.get_instance(self.args).enable_show_log_to_stdout(enable=False)
-
-        from fedml.core.mlops import sync_deploy_id
-        sync_deploy_id(
-            self.edge_id, self.model_device_server.edge_id, worker_deploy_id_list)
-
-        # Start the message center for listener
-        self.start_listener(sender_message_queue=self.message_center.get_message_queue(),
-                            agent_config=self.agent_config)
-
-    def subscribe_fl_msgs(self):
-        if self.general_edge_id is None:
-            return
-
-        # Setup MQTT message listener for starting training
-        topic_start_train = "flserver_agent/" + str(self.general_edge_id) + "/start_train"
-        self.add_message_listener(topic_start_train, self.callback_start_train)
-        self.mqtt_mgr.add_message_listener(topic_start_train, self.listener_message_dispatch_center)
-
-        # Setup MQTT message listener for stopping training
-        topic_stop_train = "flserver_agent/" + str(self.general_edge_id) + "/stop_train"
-        self.add_message_listener(topic_stop_train, self.callback_stop_train)
-        self.mqtt_mgr.add_message_listener(topic_stop_train, self.listener_message_dispatch_center)
-
-        # Setup MQTT message listener for client status switching
-        topic_client_status = "fl_client/flclient_agent_" + str(self.general_edge_id) + "/status"
-        self.add_message_listener(topic_client_status, self.callback_runner_id_status)
-        self.mqtt_mgr.add_message_listener(topic_client_status, self.listener_message_dispatch_center)
-
-        # Setup MQTT message listener to OTA messages from the MLOps.
-        topic_request_device_info = "server/client/request_device_info/" + str(self.general_edge_id)
-        self.add_message_listener(topic_request_device_info, self.callback_report_device_info)
-        self.mqtt_mgr.add_message_listener(topic_request_device_info, self.listener_message_dispatch_center)
-
-        # Subscribe topics for starting train, stopping train and fetching client status.
-        self.mqtt_mgr.subscribe_msg(topic_start_train)
-        self.mqtt_mgr.subscribe_msg(topic_stop_train)
-        self.mqtt_mgr.subscribe_msg(topic_client_status)
-        self.mqtt_mgr.subscribe_msg(topic_request_device_info)
-
-        self.subscribed_topics.append(topic_start_train)
-        self.subscribed_topics.append(topic_stop_train)
-        self.subscribed_topics.append(topic_client_status)
-        self.subscribed_topics.append(topic_request_device_info)
-
-    def on_agent_mqtt_disconnected(self, mqtt_client_object):
-        MLOpsStatus.get_instance().set_client_agent_status(
-            self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_OFFLINE
-        )
-        pass
-
-    def setup_agent_mqtt_connection(self, service_config):
-        # Setup MQTT connection
-        self.mqtt_mgr = MqttManager(
-            service_config["mqtt_config"]["BROKER_HOST"],
-            service_config["mqtt_config"]["BROKER_PORT"],
-            service_config["mqtt_config"]["MQTT_USER"],
-            service_config["mqtt_config"]["MQTT_PWD"],
-            service_config["mqtt_config"]["MQTT_KEEPALIVE"],
-            f"FedML_ClientAgent_Daemon_@{self.user_name}@_@{self.args.current_device_id}@_@{str(uuid.uuid4())}@",
-            "flclient_agent/last_will_msg",
-            json.dumps({"ID": self.edge_id, "status": ClientConstants.MSG_MLOPS_CLIENT_STATUS_OFFLINE})
-        )
-        self.agent_config = service_config
-
-        # Init local database
-        FedMLClientDataInterface.get_instance().create_job_table()
-
-        # Start the message center to process edge related messages.
-        self.setup_message_center()
-
-        # Start local API services
-        client_api_cmd = "fedml.computing.scheduler.slave.client_api:api"
-        client_api_pids = RunProcessUtils.get_pid_from_cmd_line(client_api_cmd)
-        if client_api_pids is None or len(client_api_pids) <= 0:
-            python_program = get_python_program()
-            cur_dir = os.path.dirname(__file__)
-            fedml_base_dir = os.path.dirname(os.path.dirname(os.path.dirname(cur_dir)))
-            self.local_api_process = ClientConstants.exec_console_with_script(
-                "{} -m uvicorn {} --host 0.0.0.0 --port {} "
-                "--reload --reload-delay 3 --reload-dir {} --log-level critical".format(
-                    python_program, client_api_cmd, ClientConstants.LOCAL_CLIENT_API_PORT, fedml_base_dir),
-                should_capture_stdout=False,
-                should_capture_stderr=False
-            )
-            # if self.local_api_process is not None and self.local_api_process.pid is not None:
-            #     print(f"Client local API process id {self.local_api_process.pid}")
-
-        # Setup MQTT connected listener
-        self.mqtt_mgr.add_connected_listener(self.on_agent_mqtt_connected)
-        self.mqtt_mgr.add_disconnected_listener(self.on_agent_mqtt_disconnected)
-        self.mqtt_mgr.connect()
-
-        # Report the IDLE status to MLOps
-        self.mlops_metrics.report_client_training_status(
-            self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_IDLE)
-        MLOpsStatus.get_instance().set_client_agent_status(self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_IDLE)
-
-        # MLOpsRuntimeLogDaemon.get_instance(self.args).stop_all_log_processor()
-        self.recover_start_train_msg_after_upgrading()
-
-        infer_host = os.getenv("FEDML_INFER_HOST", None)
-        infer_redis_addr = os.getenv("FEDML_INFER_REDIS_ADDR", None)
-        infer_redis_port = os.getenv("FEDML_INFER_REDIS_PORT", None)
-        infer_redis_password = os.getenv("FEDML_INFER_REDIS_PASSWORD", None)
-        model_client_num = os.getenv("FEDML_MODEL_WORKER_NUM", None)
-        os.environ["FEDML_CURRENT_EDGE_ID"] = str(self.edge_id)
-
-        if not ComputeCacheManager.get_instance().set_redis_params():
-            os.environ["FEDML_DISABLE_REDIS_CONNECTION"] = "1"
-
-        if self.model_device_client_edge_id_list is None:
-            self.model_device_client_edge_id_list = list()
-        if self.model_device_client_list is None:
-            model_client_num = 1 if model_client_num is None else int(model_client_num)
-            self.model_device_client_list = list()
-            for client_index in range(model_client_num):
-                model_device_client = FedMLModelDeviceClientRunner(
-                    self.args, f"{self.args.current_device_id}_{client_index + 1}", self.args.os_name,
-                    self.args.is_from_docker, self.agent_config)
-                if infer_host is not None:
-                    model_device_client.infer_host = infer_host
-                if infer_redis_addr is not None:
-                    model_device_client.redis_addr = infer_redis_addr
-                if infer_redis_port is not None:
-                    model_device_client.redis_port = infer_redis_port
-                if infer_redis_password is not None:
-                    model_device_client.redis_password = infer_redis_password
-                model_device_client.start()
-                self.model_device_client_list.append(model_device_client)
-                self.model_device_client_edge_id_list.append(model_device_client.get_edge_id())
-
-        if self.model_device_server is None:
-            self.model_device_server = FedMLModelDeviceServerRunner(self.args, self.args.current_device_id,
-                                                                    self.args.os_name, self.args.is_from_docker,
-                                                                    self.agent_config)
-            if infer_host is not None:
-                self.model_device_server.infer_host = infer_host
-            if infer_redis_addr is not None:
-                self.model_device_server.redis_addr = infer_redis_addr
-            if infer_redis_port is not None:
-                self.model_device_server.redis_port = infer_redis_port
-            if infer_redis_password is not None:
-                self.model_device_server.redis_password = infer_redis_password
-
-            self.model_device_server.start()
-            self.model_device_server_id = self.model_device_server.get_edge_id()
-
-        JobCleanup.get_instance().sync_data_on_startup(self.edge_id)
-
-        os.environ["FEDML_DEPLOY_MASTER_ID"] = str(self.model_device_server.get_edge_id())
-        os.environ["FEDML_DEPLOY_WORKER_IDS"] = str([client.get_edge_id() for client in self.model_device_client_list])
-        self.mlops_metrics.stop_device_realtime_perf()
-        self.mlops_metrics.report_device_realtime_perf(self.args, service_config["mqtt_config"])
-
-    def start_agent_mqtt_loop(self):
-        # Start MQTT message loop
-        try:
-            self.mqtt_mgr.loop_forever()
-        except Exception as e:
-            logging.error(f"Errors in the MQTT loop: Exception {e}, Traceback: {traceback.format_exc()}")
-            if str(e) == "Restarting after upgraded...":
-                logging.info("Restarting after upgraded...")
-            else:
-                logging.info("Client tracing: {}".format(traceback.format_exc()))
-        finally:
-            print("finally")
-            login_exit_file = os.path.join(ClientConstants.get_log_file_dir(), "exited.log")
-            with open(login_exit_file, "w") as f:
-                f.writelines(f"{os.getpid()}.")
-
-            self.stop_agent()
-
-            time.sleep(5)
-            sys_utils.cleanup_all_fedml_client_login_processes(
-                ClientConstants.CLIENT_LOGIN_PROGRAM, clean_process_group=False)
-            sys.exit(1)
-
-    def stop_agent(self):
-        if self.run_process_event is not None:
-            self.run_process_event.set()
-
-        if self.model_device_server is not None:
-            self.model_device_server.stop()
-            self.model_device_server = None
-
-        if self.model_device_client_list is not None:
-            for model_client in self.model_device_client_list:
-                model_client.stop()
-            self.model_device_client_list.clear()
-            self.model_device_client_list = None
-
-        if self.mqtt_mgr is not None:
-            try:
-                for topic in self.subscribed_topics:
-                    self.mqtt_mgr.unsubscribe_msg(topic)
-            except Exception as e:
-                logging.error(f"Unsubscribe topics error: {e}, Traceback: {traceback.format_exc()}")
-                pass
-
-            self.mqtt_mgr.loop_stop()
-            self.mqtt_mgr.disconnect()
-
-        self.release_message_center()
-
-    def get_runner(self):
-        runner = FedMLClientRunner(
-            self.args, edge_id=self.edge_id, request_json=self.request_json,
-            agent_config=self.agent_config, run_id=self.run_id,
-            cuda_visible_gpu_ids_str=self.cuda_visible_gpu_ids_str
-        )
-        runner.edge_user_name = self.user_name
-        runner.edge_extra_url = self.edge_extra_url
-        runner.unique_device_id = self.unique_device_id
-        runner.user_name = self.user_name
-        runner.general_edge_id = self.general_edge_id
-        runner.model_device_client_edge_id_list = self.model_device_client_edge_id_list
-        runner.model_device_server_id = self.model_device_server_id
-        return runner
diff --git a/python/fedml/computing/scheduler/slave/launch_job_runner.py b/python/fedml/computing/scheduler/slave/launch_job_runner.py
new file mode 100755
index 0000000000..07533af399
--- /dev/null
+++ b/python/fedml/computing/scheduler/slave/launch_job_runner.py
@@ -0,0 +1,41 @@
+from abc import ABC
+
+from .base_slave_job_runner import FedMLBaseSlaveJobRunner
+from .client_constants import ClientConstants
+
+
+class FedMLLaunchSlaveJobRunner(FedMLBaseSlaveJobRunner, ABC):
+
+    def __init__(self, args, edge_id=0, request_json=None, agent_config=None, run_id=0,
+                 cuda_visible_gpu_ids_str=None):
+        FedMLBaseSlaveJobRunner.__init__(
+            self, args, edge_id=edge_id, request_json=request_json, agent_config=agent_config, run_id=run_id,
+            cuda_visible_gpu_ids_str=cuda_visible_gpu_ids_str, agent_data_dir=ClientConstants.get_data_dir(),
+            agent_package_download_dir=ClientConstants.get_package_download_dir(),
+            agent_package_unzip_dir=ClientConstants.get_package_unzip_dir(),
+            agent_log_file_dir=ClientConstants.get_log_file_dir()
+        )
+
+    # Override
+    def _generate_job_runner_instance(self, args, run_id=None, request_json=None, agent_config=None, edge_id=None):
+        return FedMLLaunchSlaveJobRunner(
+            args, run_id=run_id, request_json=request_json, agent_config=self.agent_config, edge_id=edge_id
+        )
+
+    # Override
+    def _generate_extend_queue_list(self):
+        return None
+
+    # Override
+    def get_download_package_info(self, packages_config=None):
+        return super().get_download_package_info(packages_config)
+
+    # Override
+    def run_impl(
+            self, run_extend_queue_list, sender_message_center,
+            listener_message_queue, status_center_queue
+    ):
+        super().run_impl(
+            run_extend_queue_list, sender_message_center,
+            listener_message_queue, status_center_queue)
+
diff --git a/python/fedml/computing/scheduler/slave/launch_job_runner_manager.py b/python/fedml/computing/scheduler/slave/launch_job_runner_manager.py
new file mode 100755
index 0000000000..3f65438f9e
--- /dev/null
+++ b/python/fedml/computing/scheduler/slave/launch_job_runner_manager.py
@@ -0,0 +1,22 @@
+
+from fedml.core.common.singleton import Singleton
+from .base_slave_job_runner_manager import FedMLBaseSlaveJobRunnerManager
+from .launch_job_runner import FedMLLaunchSlaveJobRunner
+
+
+class FedMLLaunchJobRunnerManager(FedMLBaseSlaveJobRunnerManager, Singleton):
+    def __init__(self):
+        FedMLBaseSlaveJobRunnerManager.__init__(self)
+
+    @staticmethod
+    def get_instance():
+        return FedMLLaunchJobRunnerManager()
+
+    # Override
+    def _generate_job_runner_instance(
+            self, args, run_id=None, request_json=None, agent_config=None, edge_id=None
+    ):
+        return FedMLLaunchSlaveJobRunner(
+            args, run_id=run_id, request_json=request_json, agent_config=agent_config, edge_id=edge_id)
+
+
diff --git a/python/fedml/computing/scheduler/slave/slave_agent.py b/python/fedml/computing/scheduler/slave/slave_agent.py
new file mode 100755
index 0000000000..e9c8b2fc93
--- /dev/null
+++ b/python/fedml/computing/scheduler/slave/slave_agent.py
@@ -0,0 +1,26 @@
+
+from .base_slave_agent import FedMLBaseSlaveAgent
+from .client_constants import ClientConstants
+from .client_data_interface import FedMLClientDataInterface
+from .slave_protocol_manager import FedMLLaunchSlaveProtocolManager
+
+
+class FedMLLaunchSlaveAgent(FedMLBaseSlaveAgent):
+    def __init__(self):
+        FedMLBaseSlaveAgent.__init__(self)
+
+    # Override
+    def _get_log_file_dir(self):
+        return ClientConstants.get_log_file_dir()
+
+    # Override
+    def _save_agent_info(self, unique_device_id, edge_id):
+        ClientConstants.save_runner_infos(unique_device_id, edge_id)
+
+    # Override
+    def _init_database(self):
+        FedMLClientDataInterface.get_instance().create_job_table()
+
+    # Override
+    def _generate_protocol_manager_instance(self, args, agent_config=None):
+        return FedMLLaunchSlaveProtocolManager(args, agent_config=agent_config)
diff --git a/python/fedml/computing/scheduler/slave/slave_protocol_manager.py b/python/fedml/computing/scheduler/slave/slave_protocol_manager.py
new file mode 100755
index 0000000000..cd8e40d7e8
--- /dev/null
+++ b/python/fedml/computing/scheduler/slave/slave_protocol_manager.py
@@ -0,0 +1,104 @@
+import copy
+import os
+from ..comm_utils.job_cleanup import JobCleanup
+from .base_slave_protocol_manager import FedMLBaseSlaveProtocolManager
+from .launch_job_runner_manager import FedMLLaunchJobRunnerManager
+from ..model_scheduler.model_device_server import FedMLModelDeviceServerRunner
+from ..model_scheduler.model_device_client import FedMLModelDeviceClientRunner
+
+
+class FedMLLaunchSlaveProtocolManager(FedMLBaseSlaveProtocolManager):
+
+    def __init__(self, args, agent_config=None):
+        FedMLBaseSlaveProtocolManager.__init__(self, args, agent_config=agent_config)
+
+    # Override
+    def generate_topics(self):
+        super().generate_topics()
+
+    # Override
+    def add_protocol_handler(self):
+        super().add_protocol_handler()
+
+    # Override
+    def _generate_protocol_manager_instance(self, args, agent_config=None):
+        return FedMLLaunchSlaveProtocolManager(args, agent_config=agent_config)
+
+    # Override
+    def _get_job_runner_manager(self):
+        return FedMLLaunchJobRunnerManager.get_instance()
+
+    # Override
+    def _process_connection_ready(self):
+        from fedml.core.mlops import sync_deploy_id
+        sync_deploy_id(
+            self.edge_id, self.model_device_server.edge_id, self.model_device_client_edge_id_list)
+
+    # Override
+    def _process_connection_lost(self):
+        pass
+
+    # Override
+    def _init_extra_items(self):
+        super()._init_extra_items()
+
+        # Sync the data when startup
+        JobCleanup.get_instance().sync_data_on_startup(self.args.edge_id)
+
+        # Get the environment variables
+        infer_host = os.getenv("FEDML_INFER_HOST", None)
+        infer_redis_addr = os.getenv("FEDML_INFER_REDIS_ADDR", None)
+        infer_redis_port = os.getenv("FEDML_INFER_REDIS_PORT", None)
+        infer_redis_password = os.getenv("FEDML_INFER_REDIS_PASSWORD", None)
+        model_client_num = os.getenv("FEDML_MODEL_WORKER_NUM", None)
+
+        # Start deploy master agent and slave agent
+        in_args = copy.deepcopy(self.args)
+        if self.model_device_client_edge_id_list is None:
+            self.model_device_client_edge_id_list = list()
+        if self.model_device_client_list is None:
+            model_client_num = 1 if model_client_num is None else int(model_client_num)
+            self.model_device_client_list = list()
+            for client_index in range(model_client_num):
+                model_device_client = FedMLModelDeviceClientRunner(
+                    in_args, f"{in_args.current_device_id}_{client_index + 1}", in_args.os_name,
+                    in_args.is_from_docker, self.agent_config)
+                if infer_host is not None:
+                    model_device_client.infer_host = infer_host
+                if infer_redis_addr is not None:
+                    model_device_client.redis_addr = infer_redis_addr
+                if infer_redis_port is not None:
+                    model_device_client.redis_port = infer_redis_port
+                if infer_redis_password is not None:
+                    model_device_client.redis_password = infer_redis_password
+                model_device_client.start()
+                self.model_device_client_list.append(model_device_client)
+                self.model_device_client_edge_id_list.append(model_device_client.get_edge_id())
+
+        self.args = copy.deepcopy(in_args)
+        if self.model_device_server is None:
+            self.model_device_server = FedMLModelDeviceServerRunner(in_args, in_args.current_device_id,
+                                                                    in_args.os_name, in_args.is_from_docker,
+                                                                    self.agent_config)
+            if infer_host is not None:
+                self.model_device_server.infer_host = infer_host
+            if infer_redis_addr is not None:
+                self.model_device_server.redis_addr = infer_redis_addr
+            if infer_redis_port is not None:
+                self.model_device_server.redis_port = infer_redis_port
+            if infer_redis_password is not None:
+                self.model_device_server.redis_password = infer_redis_password
+
+            self.model_device_server.start()
+            self.model_device_server_id = self.model_device_server.get_edge_id()
+
+        # Save the deployed master and worker id list to the environment variable.
+        os.environ["FEDML_DEPLOY_MASTER_ID"] = str(self.model_device_server_id)
+        os.environ["FEDML_DEPLOY_WORKER_IDS"] = str(self.model_device_client_edge_id_list)
+
+        # Start the monitor process
+        self.args = copy.deepcopy(in_args)
+        self.mlops_metrics.stop_device_realtime_perf()
+        self.mlops_metrics.report_device_realtime_perf(self.args, self.args.agent_config["mqtt_config"])
+        pass
+
diff --git a/python/fedml/core/mlops/__init__.py b/python/fedml/core/mlops/__init__.py
index 9ccd4d2265..a2e9fcc241 100644
--- a/python/fedml/core/mlops/__init__.py
+++ b/python/fedml/core/mlops/__init__.py
@@ -17,8 +17,6 @@
 from fedml.core.mlops.mlops_configs import MLOpsConfigs
 
 from ...computing.scheduler.slave.client_constants import ClientConstants
-from ...computing.scheduler.slave.client_runner import FedMLClientRunner
-from ...computing.scheduler.master.server_runner import FedMLServerRunner
 from ...constants import FEDML_TRAINING_PLATFORM_SIMULATION, FEDML_TRAINING_PLATFORM_SIMULATION_TYPE
 from ...computing.scheduler.master.server_constants import ServerConstants
 
@@ -35,6 +33,8 @@
 from ...computing.scheduler.slave.client_data_interface import FedMLClientDataInterface
 from .mlops_utils import MLOpsUtils
 from .mlops_constants import MLOpsConstants
+from ...computing.scheduler.master.master_protocol_manager import FedMLLaunchMasterProtocolManager
+from ...computing.scheduler.scheduler_core.account_manager import FedMLAccountManager
 
 
 FEDML_MLOPS_API_RESPONSE_SUCCESS_CODE = "SUCCESS"
@@ -50,6 +50,8 @@
     "log_aggregation_failed_status",
     "log_training_failed_status",
     "log_endpoint_status",
+    "MLOpsConfigs",
+    "sync_deploy_id"
 ]
 
 
@@ -1244,12 +1246,13 @@ def bind_simulation_device(args, userid):
     setattr(args, "version", version)
     if args.rank == 0:
         setattr(args, "log_file_dir", ServerConstants.get_log_file_dir())
-        setattr(args, "device_id", FedMLServerRunner.get_device_id())
-        runner = FedMLServerRunner(args)
+        setattr(args, "device_id",
+                FedMLAccountManager.get_device_id(ServerConstants.get_data_dir()))
+        runner = FedMLLaunchMasterProtocolManager(args)
     else:
         setattr(args, "log_file_dir", ClientConstants.get_log_file_dir())
-        setattr(args, "device_id", FedMLClientRunner.get_device_id())
-        runner = FedMLClientRunner(args)
+        setattr(args, "device_id", FedMLAccountManager.get_device_id())
+        runner = FedMLSlaveProtocolManager(args)
     setattr(args, "config_version", version)
     setattr(args, "cloud_region", "")
 
@@ -1326,10 +1329,10 @@ def fetch_config(args, version="release"):
     setattr(args, "version", version)
     if args.rank == 0:
         setattr(args, "log_file_dir", ServerConstants.get_log_file_dir())
-        setattr(args, "device_id", FedMLServerRunner.get_device_id())
+        setattr(args, "device_id", FedMLAccountManager.get_device_id(ServerConstants.get_data_dir()))
     else:
         setattr(args, "log_file_dir", ClientConstants.get_log_file_dir())
-        setattr(args, "device_id", FedMLClientRunner.get_device_id())
+        setattr(args, "device_id", FedMLAccountManager.get_device_id(ClientConstants.get_data_dir()))
     setattr(args, "config_version", version)
     setattr(args, "cloud_region", "")
 
diff --git a/python/fedml/core/mlops/mlops_configs.py b/python/fedml/core/mlops/mlops_configs.py
index b83e80a4dd..e0410a880d 100644
--- a/python/fedml/core/mlops/mlops_configs.py
+++ b/python/fedml/core/mlops/mlops_configs.py
@@ -2,8 +2,6 @@
 import time
 from enum import Enum
 
-
-
 import certifi
 import requests
 import fedml
diff --git a/python/fedml/core/mlops/mlops_device_perfs.py b/python/fedml/core/mlops/mlops_device_perfs.py
index d488ef27a4..4ed3cd1b6f 100644
--- a/python/fedml/core/mlops/mlops_device_perfs.py
+++ b/python/fedml/core/mlops/mlops_device_perfs.py
@@ -6,7 +6,7 @@
 import uuid
 from os.path import expanduser
 
-import multiprocess as multiprocessing
+import multiprocessing
 import psutil
 
 from fedml.computing.scheduler.comm_utils import sys_utils
@@ -19,22 +19,14 @@
 from .device_info_report_protocol import FedMLDeviceInfoReportProtocol
 
 ROLE_DEVICE_INFO_REPORTER = 1
-ROLE_ENDPOINT_MASTER = 2
-ROLE_ENDPOINT_SLAVE = 3
-ROLE_RUN_MASTER = 4
-ROLE_RUN_SLAVE = 5
-ROLE_ENDPOINT_LOGS = 6
+ROLE_DEVICE_JOB_MONITOR = 2
 
 
 class MLOpsDevicePerfStats(object):
     def __init__(self):
         self.device_realtime_stats_process = None
         self.device_realtime_stats_event = None
-        self.monitor_run_slave_process = None
-        self.monitor_run_master_process = None
-        self.monitor_endpoint_master_process = None
-        self.monitor_endpoint_slave_process = None
-        self.monitor_endpoint_logs_process = None
+        self.device_monitor_process = None
         self.args = None
         self.device_id = None
         self.run_id = None
@@ -70,36 +62,15 @@ def setup_realtime_stats_process(self, sys_args):
 
         self.device_realtime_stats_process = multiprocessing.Process(
             target=perf_stats.report_device_realtime_stats_entry,
-            args=(self.device_realtime_stats_event, ROLE_DEVICE_INFO_REPORTER))
+            args=(self.device_realtime_stats_event, ROLE_DEVICE_INFO_REPORTER, self.is_client))
         self.device_realtime_stats_process.start()
 
-        if self.is_client:
-            self.monitor_endpoint_slave_process = multiprocessing.Process(
-                target=perf_stats.report_device_realtime_stats_entry,
-                args=(self.device_realtime_stats_event, ROLE_ENDPOINT_SLAVE))
-            self.monitor_endpoint_slave_process.start()
-
-            self.monitor_endpoint_master_process = multiprocessing.Process(
-                target=perf_stats.report_device_realtime_stats_entry,
-                args=(self.device_realtime_stats_event, ROLE_ENDPOINT_MASTER))
-            self.monitor_endpoint_master_process.start()
-
-            self.monitor_run_slave_process = multiprocessing.Process(
-                target=perf_stats.report_device_realtime_stats_entry,
-                args=(self.device_realtime_stats_event, ROLE_RUN_SLAVE))
-            self.monitor_run_slave_process.start()
-
-            self.monitor_endpoint_logs_process = multiprocessing.Process(
-                target=perf_stats.report_device_realtime_stats_entry,
-                args=(self.device_realtime_stats_event, ROLE_ENDPOINT_LOGS))
-            self.monitor_endpoint_logs_process.start()
-        else:
-            self.monitor_run_master_process = multiprocessing.Process(
-                target=perf_stats.report_device_realtime_stats_entry,
-                args=(self.device_realtime_stats_event, ROLE_RUN_MASTER))
-            self.monitor_run_master_process.start()
-
-    def report_device_realtime_stats_entry(self, sys_event, role):
+        self.device_monitor_process = multiprocessing.Process(
+            target=perf_stats.report_device_realtime_stats_entry,
+            args=(self.device_realtime_stats_event, ROLE_DEVICE_JOB_MONITOR, self.is_client))
+        self.device_monitor_process.start()
+
+    def report_device_realtime_stats_entry(self, sys_event, role, is_client):
         # print(f"Report device realtime stats, process id {os.getpid()}")
 
         self.device_realtime_stats_event = sys_event
@@ -117,40 +88,38 @@ def report_device_realtime_stats_entry(self, sys_event, role):
         parent_pid = psutil.Process(os.getpid()).ppid()
         sys_stats_obj = SysStats(process_id=parent_pid)
 
-        if role == ROLE_RUN_MASTER:
-            device_info_reporter = FedMLDeviceInfoReportProtocol(run_id=self.run_id, mqtt_mgr=mqtt_mgr)
+        device_info_reporter = FedMLDeviceInfoReportProtocol(run_id=self.run_id, mqtt_mgr=mqtt_mgr)
 
         JobMonitor.get_instance().mqtt_config = self.args.mqtt_config_path
 
         # Notify MLOps with system information.
-        sleep_time_interval = 10
-        time_interval_map = {
-            ROLE_DEVICE_INFO_REPORTER: 10, ROLE_RUN_SLAVE: 60, ROLE_RUN_MASTER: 70,
-            ROLE_ENDPOINT_SLAVE: 80, ROLE_ENDPOINT_MASTER: 90, ROLE_ENDPOINT_LOGS: 30}
+        sleep_time_interval_for_device_info = 60
+        sleep_time_interval_for_client_monitor = 30
+        sleep_time_interval_for_server_monitor = 60
         while not self.should_stop_device_realtime_stats():
-            try:
-                time.sleep(time_interval_map[role])
+            if role == ROLE_DEVICE_INFO_REPORTER:
+                time.sleep(sleep_time_interval_for_device_info)
+            elif role == ROLE_DEVICE_JOB_MONITOR:
+                time.sleep(sleep_time_interval_for_client_monitor if is_client
+                           else sleep_time_interval_for_server_monitor)
 
+            try:
                 if role == ROLE_DEVICE_INFO_REPORTER:
                     MLOpsDevicePerfStats.report_gpu_device_info(self.edge_id, mqtt_mgr=mqtt_mgr)
-                elif role == ROLE_RUN_SLAVE:
-                    JobMonitor.get_instance().monitor_slave_run_process_status()
-                elif role == ROLE_RUN_MASTER:
-                    JobMonitor.get_instance().monitor_master_run_process_status(
-                        self.edge_id, device_info_reporter=device_info_reporter)
-                elif role == ROLE_ENDPOINT_SLAVE:
-                    JobMonitor.get_instance().monitor_slave_endpoint_status()
-                elif role == ROLE_ENDPOINT_MASTER:
-                    JobMonitor.get_instance().monitor_master_endpoint_status()
-                elif role == ROLE_ENDPOINT_LOGS:
-                    JobMonitor.get_instance().monitor_endpoint_logs()
+                elif role == ROLE_DEVICE_JOB_MONITOR:
+                    if is_client:
+                        JobMonitor.get_instance().monitor_slave_run_process_status()
+                        JobMonitor.get_instance().monitor_slave_endpoint_status()
+                        JobMonitor.get_instance().monitor_master_endpoint_status()
+                        JobMonitor.get_instance().monitor_endpoint_logs()
+                    else:
+                        JobMonitor.get_instance().monitor_master_run_process_status(
+                            self.edge_id, device_info_reporter=device_info_reporter)
 
             except Exception as e:
                 logging.error(f"exception {e} when reporting device pref: {traceback.format_exc()}.")
                 pass
 
-            time.sleep(sleep_time_interval)
-
             if role == ROLE_DEVICE_INFO_REPORTER:
                 self.check_fedml_client_parent_process()
 
@@ -201,6 +170,7 @@ def check_fedml_client_parent_process(self):
         if not self.is_client:
             return
 
+        # inspection PyBroadException
         try:
             home_dir = expanduser("~")
             fedml_ppids_dir = os.path.join(home_dir, ".fedml", "fedml-client", "fedml", "data", "ppids")
@@ -222,13 +192,14 @@ def check_fedml_client_parent_process(self):
                 print(f"Parent client process {file_list} has been killed, so fedml will exit.")
                 logging.info(f"Parent client process {file_list} has been killed, so fedml will exit.")
                 os.system("fedml logout")
-        except Exception as e:
+        except Exception:
             pass
 
     def check_fedml_server_parent_process(self):
         if self.is_client:
             return
 
+        # inspection PyBroadException
         try:
             home_dir = expanduser("~")
             fedml_ppids_dir = os.path.join(home_dir, ".fedml", "fedml-server", "fedml", "data", "ppids")
@@ -250,5 +221,5 @@ def check_fedml_server_parent_process(self):
                 print(f"Parent server process {file_list} has been killed, so fedml will exit.")
                 logging.info(f"Parent server process {file_list} has been killed, so fedml will exit.")
                 os.system("fedml logout -s")
-        except Exception as e:
+        except Exception:
             pass
diff --git a/python/fedml/core/mlops/mlops_metrics.py b/python/fedml/core/mlops/mlops_metrics.py
index 57860ab7cd..ca41df09f2 100644
--- a/python/fedml/core/mlops/mlops_metrics.py
+++ b/python/fedml/core/mlops/mlops_metrics.py
@@ -16,18 +16,12 @@
 
 
 class MLOpsMetrics(object):
-    def __new__(cls, *args, **kw):
-        if not hasattr(cls, "_instance"):
-            orig = super(MLOpsMetrics, cls)
-            cls._instance = orig.__new__(cls, *args, **kw)
-            cls._instance.init()
-        return cls._instance
-
     def __init__(self):
-        pass
+        self.init()
 
     def init(self):
         self.messenger = None
+        self.send_message_func = None
         self.args = None
         self.run_id = None
         self.edge_id = None
@@ -38,8 +32,9 @@ def init(self):
         self.job_perfs = MLOpsJobPerfStats()
         self.device_perfs = MLOpsDevicePerfStats()
 
-    def set_messenger(self, msg_messenger, args=None):
+    def set_messenger(self, msg_messenger, args=None, send_message_func=None):
         self.messenger = msg_messenger
+        self.send_message_func = send_message_func
         if args is not None:
             self.args = args
             self.run_id = args.run_id
@@ -94,7 +89,7 @@ def report_client_device_status_to_web_ui(self, edge_id, status, run_id=0):
         message_json = json.dumps(msg)
         logging.info("report_client_device_status. message_json = %s" % message_json)
         MLOpsStatus.get_instance().set_client_status(edge_id, status)
-        self.messenger.send_message_json(topic_name, message_json)
+        self.send_message(topic_name, message_json)
 
     def common_report_client_training_status(self, edge_id, status, run_id=0):
         # if not self.comm_sanity_check():
@@ -109,7 +104,7 @@ def common_report_client_training_status(self, edge_id, status, run_id=0):
         message_json = json.dumps(msg)
         logging.info("report_client_training_status. message_json = %s" % message_json)
         MLOpsStatus.get_instance().set_client_status(edge_id, status)
-        self.messenger.send_message_json(topic_name, message_json)
+        self.send_message(topic_name, message_json)
 
     def broadcast_client_training_status(self, edge_id, status, is_from_model=False, run_id=0):
         # if not self.comm_sanity_check():
@@ -137,14 +132,14 @@ def common_broadcast_client_training_status(self, edge_id, status, run_id=0):
         msg = {"edge_id": edge_id, "run_id": run_id, "status": status}
         message_json = json.dumps(msg)
         logging.info("broadcast_client_training_status. message_json = %s" % message_json)
-        self.messenger.send_message_json(topic_name, message_json)
+        self.send_message(topic_name, message_json)
 
     def client_send_exit_train_msg(self, run_id, edge_id, status, msg=None):
         topic_exit_train_with_exception = "flserver_agent/" + str(run_id) + "/client_exit_train_with_exception"
         msg = {"run_id": run_id, "edge_id": edge_id, "status": status, "msg": msg if msg is not None else ""}
         message_json = json.dumps(msg)
         logging.info("client_send_exit_train_msg.")
-        self.messenger.send_message_json(topic_exit_train_with_exception, message_json)
+        self.send_message(topic_exit_train_with_exception, message_json)
 
     def report_client_id_status(self, edge_id, status, running_json=None,
                                 is_from_model=False, server_id="0", run_id=0, msg=""):
@@ -172,7 +167,7 @@ def common_report_client_id_status(self, run_id, edge_id, status, server_id="0",
         msg = {"run_id": run_id, "edge_id": edge_id, "status": status, "server_id": server_id, "msg": msg}
         message_json = json.dumps(msg)
         # logging.info("report_client_id_status. message_json = %s" % message_json)
-        self.messenger.send_message_json(topic_name, message_json)
+        self.send_message(topic_name, message_json)
 
     def report_server_training_status(self, run_id, status, edge_id=0, role=None, running_json=None, is_from_model=False):
         # if not self.comm_sanity_check():
@@ -186,6 +181,13 @@ def report_server_training_status(self, run_id, status, edge_id=0, role=None, ru
             from ...computing.scheduler.master.server_data_interface import FedMLServerDataInterface
             FedMLServerDataInterface.get_instance().save_job(run_id, self.edge_id, status, running_json)
 
+    def report_job_status(self, run_id, status):
+        topic_name = "master_agent/slave_agent/job_status"
+        payload = {"run_id": run_id, "status": status}
+
+        message_json = json.dumps(payload)
+        self.send_message(topic_name, message_json)
+
     def report_server_device_status_to_web_ui(self, run_id, status, edge_id=0, role=None):
         """
         this is used for notifying the server device status to MLOps Frontend
@@ -206,7 +208,7 @@ def report_server_device_status_to_web_ui(self, run_id, status, edge_id=0, role=
         # logging.info("report_server_device_status. msg = %s" % msg)
         message_json = json.dumps(msg)
         MLOpsStatus.get_instance().set_server_status(self.edge_id, status)
-        self.messenger.send_message_json(topic_name, message_json)
+        self.send_message(topic_name, message_json)
 
     def common_report_server_training_status(self, run_id, status, role=None, edge_id=0):
         # if not self.comm_sanity_check():
@@ -223,7 +225,7 @@ def common_report_server_training_status(self, run_id, status, role=None, edge_i
         # logging.info("report_server_training_status. msg = %s" % msg)
         message_json = json.dumps(msg)
         MLOpsStatus.get_instance().set_server_status(self.edge_id, status)
-        self.messenger.send_message_json(topic_name, message_json)
+        self.send_message(topic_name, message_json)
 
     def broadcast_server_training_status(self, run_id, status, role=None, is_from_model=False, edge_id=None):
         if self.messenger is None:
@@ -239,7 +241,7 @@ def broadcast_server_training_status(self, run_id, status, role=None, is_from_mo
         }
         logging.info("broadcast_server_training_status. msg = %s" % msg)
         message_json = json.dumps(msg)
-        self.messenger.send_message_json(topic_name, message_json)
+        self.send_message(topic_name, message_json)
 
         if is_from_model:
             from ...computing.scheduler.model_scheduler.device_server_data_interface import FedMLServerDataInterface
@@ -248,19 +250,29 @@ def broadcast_server_training_status(self, run_id, status, role=None, is_from_mo
             from ...computing.scheduler.master.server_data_interface import FedMLServerDataInterface
             FedMLServerDataInterface.get_instance().save_job(run_id, self.edge_id, status)
 
-    def report_server_id_status(self, run_id, status, edge_id=None, server_id=None, server_agent_id=None):
+    def report_server_id_status(self, run_id, status, edge_id=None, server_id=None, server_agent_id=None,
+                                is_from_model=False, running_json=None):
         # if not self.comm_sanity_check():
         #     return
         topic_name = "fl_server/flserver_agent_" + str(server_agent_id if server_agent_id is not None else
                                                        self.server_agent_id) + "/status"
-        msg = {"run_id": run_id, "edge_id": edge_id if edge_id is not None else self.edge_id, "status": status}
+        in_edge_id = edge_id if edge_id is not None else self.edge_id
+        msg = {"run_id": run_id, "edge_id": in_edge_id,
+               "status": status, "is_from_model": is_from_model}
         if server_id is not None:
             msg["server_id"] = server_id
         message_json = json.dumps(msg)
         logging.info(f"report_server_id_status; topic_name: {topic_name}, msg: {msg}")
         # logging.info("report_server_id_status server id {}".format(server_agent_id))
         # logging.info("report_server_id_status. message_json = %s" % message_json)
-        self.messenger.send_message_json(topic_name, message_json)
+        self.send_message(topic_name, message_json)
+
+        if is_from_model:
+            from ...computing.scheduler.model_scheduler.device_server_data_interface import FedMLServerDataInterface
+            FedMLServerDataInterface.get_instance().save_job(run_id, in_edge_id, status, running_json)
+        else:
+            from ...computing.scheduler.master.server_data_interface import FedMLServerDataInterface
+            FedMLServerDataInterface.get_instance().save_job(run_id, in_edge_id, status, running_json)
 
     def report_client_training_metric(self, metric_json):
         # if not self.comm_sanity_check():
@@ -268,7 +280,7 @@ def report_client_training_metric(self, metric_json):
         topic_name = "fl_client/mlops/training_metrics"
         logging.info("report_client_training_metric. message_json = %s" % metric_json)
         message_json = json.dumps(metric_json)
-        self.messenger.send_message_json(topic_name, message_json)
+        self.send_message(topic_name, message_json)
 
     def report_server_training_metric(self, metric_json, payload=None):
         # if not self.comm_sanity_check():
@@ -279,7 +291,7 @@ def report_server_training_metric(self, metric_json, payload=None):
         else:
             message_json = json.dumps(metric_json)
         # logging.info("report_server_training_metric. message_json = %s" % metric_json)
-        self.messenger.send_message_json(topic_name, message_json)
+        self.send_message(topic_name, message_json)
 
     def report_endpoint_metric(self, metric_json, payload=None):
         # if not self.comm_sanity_check():
@@ -290,7 +302,7 @@ def report_endpoint_metric(self, metric_json, payload=None):
         else:
             message_json = json.dumps(metric_json)
         # logging.info("report_endpoint_metric. message_json = %s" % metric_json)
-        self.messenger.send_message_json(topic_name, message_json)
+        self.send_message(topic_name, message_json)
 
     def report_fedml_train_metric(self, metric_json, run_id=0, is_endpoint=False):
         # if not self.comm_sanity_check():
@@ -299,42 +311,42 @@ def report_fedml_train_metric(self, metric_json, run_id=0, is_endpoint=False):
         logging.info("report_fedml_train_metric. message_json = %s" % metric_json)
         metric_json["is_endpoint"] = is_endpoint
         message_json = json.dumps(metric_json)
-        self.messenger.send_message_json(topic_name, message_json)
+        self.send_message(topic_name, message_json)
 
     def report_fedml_run_logs(self, logs_json, run_id=0):
         # if not self.comm_sanity_check():
         #     return
         topic_name = f"fedml_slave/fedml_master/logs/{run_id}"
         message_json = json.dumps(logs_json)
-        self.messenger.send_message_json(topic_name, message_json)
+        self.send_message(topic_name, message_json)
 
     def report_server_training_round_info(self, round_info):
         # if not self.comm_sanity_check():
         #     return
         topic_name = "fl_server/mlops/training_roundx"
         message_json = json.dumps(round_info)
-        self.messenger.send_message_json(topic_name, message_json)
+        self.send_message(topic_name, message_json)
 
     def report_client_model_info(self, model_info_json):
         # if not self.comm_sanity_check():
         #     return
         topic_name = "fl_server/mlops/client_model"
         message_json = json.dumps(model_info_json)
-        self.messenger.send_message_json(topic_name, message_json)
+        self.send_message(topic_name, message_json)
 
     def report_aggregated_model_info(self, model_info_json):
         # if not self.comm_sanity_check():
         #     return
         topic_name = "fl_server/mlops/global_aggregated_model"
         message_json = json.dumps(model_info_json)
-        self.messenger.send_message_json(topic_name, message_json)
+        self.send_message(topic_name, message_json)
 
     def report_training_model_net_info(self, model_net_info_json):
         # if not self.comm_sanity_check():
         #     return
         topic_name = "fl_server/mlops/training_model_net"
         message_json = json.dumps(model_net_info_json)
-        self.messenger.send_message_json(topic_name, message_json)
+        self.send_message(topic_name, message_json)
 
     def report_llm_record(self, metric_json):
         # if not self.comm_sanity_check():
@@ -342,7 +354,7 @@ def report_llm_record(self, metric_json):
         topic_name = "model_serving/mlops/llm_input_output_record"
         logging.info("report_llm_record. message_json = %s" % metric_json)
         message_json = json.dumps(metric_json)
-        self.messenger.send_message_json(topic_name, message_json)
+        self.send_message(topic_name, message_json)
 
     def report_edge_job_computing_cost(self, job_id, edge_id,
                                        computing_started_time, computing_ended_time,
@@ -359,7 +371,7 @@ def report_edge_job_computing_cost(self, job_id, edge_id,
                "computing_ended_time": computing_ended_time,
                "duration": duration, "user_id": user_id, "api_key": api_key}
         message_json = json.dumps(msg)
-        self.messenger.send_message_json(topic_name, message_json)
+        self.send_message(topic_name, message_json)
         # logging.info("report_job_computing_cost. message_json = %s" % message_json)
 
     def report_logs_updated(self, run_id):
@@ -369,7 +381,7 @@ def report_logs_updated(self, run_id):
         msg = {"time": time.time()}
         message_json = json.dumps(msg)
         logging.info("report_logs_updated. message_json = %s" % message_json)
-        self.messenger.send_message_json(topic_name, message_json)
+        self.send_message(topic_name, message_json)
 
     def report_artifact_info(self, job_id, edge_id, artifact_name, artifact_type,
                              artifact_local_path, artifact_url,
@@ -388,7 +400,7 @@ def report_artifact_info(self, job_id, edge_id, artifact_name, artifact_type,
             "timestamp": timestamp
         }
         message_json = json.dumps(artifact_info_json)
-        self.messenger.send_message_json(topic_name, message_json)
+        self.send_message(topic_name, message_json)
 
     def report_endpoint_status(self, end_point_id, model_status, timestamp=None,
                                end_point_name="", model_name="", model_inference_url=""):
@@ -401,8 +413,8 @@ def report_endpoint_status(self, end_point_id, model_status, timestamp=None,
                                      "model_status": model_status,
                                      "timestamp": int(format(time_param, '.0f'))}
 
-        self.messenger.send_message_json(deployment_status_topic, json.dumps(deployment_status_payload))
-        self.messenger.send_message_json(deployment_status_topic_prefix, json.dumps(deployment_status_payload))
+        self.send_message(deployment_status_topic, json.dumps(deployment_status_payload))
+        self.send_message(deployment_status_topic_prefix, json.dumps(deployment_status_payload))
 
     def report_run_log(
             self, run_id, device_id, log_list, log_source=None, use_mqtt=False
@@ -480,4 +492,10 @@ def stop_device_realtime_perf(self):
         self.device_perfs.stop_device_realtime_stats()
 
     def report_json_message(self, topic, payload):
-        self.messenger.send_message_json(topic, payload)
\ No newline at end of file
+        self.send_message(topic, payload)
+
+    def send_message(self, topic, payload):
+        if self.send_message_func is not None:
+            self.send_message_func(topic, payload)
+        elif self.messenger is not None:
+            self.messenger.send_message_json(topic, payload)
\ No newline at end of file
diff --git a/python/fedml/workflow/driver_example/customized_job_example/customized_workflow.py b/python/fedml/workflow/driver_example/customized_job_example/customized_workflow.py
index 2a8f2008eb..b948231c96 100644
--- a/python/fedml/workflow/driver_example/customized_job_example/customized_workflow.py
+++ b/python/fedml/workflow/driver_example/customized_job_example/customized_workflow.py
@@ -109,7 +109,7 @@ def create_deploy_workflow(job_api_key=None):
     # DeployImageJob.generate_yaml_doc(deploy_image_job_yaml_obj, deploy_image_job_yaml)
 
     # Generate the job object
-    endpoint_id = 100  # Here you need to set your own endpoint id
+    endpoint_id = None  # Here you need to set your own endpoint id
     deploy_image_job = DeployImageJob(
         name="deploy_image_job", endpoint_id=endpoint_id,
         job_yaml_absolute_path=deploy_image_job_yaml, job_api_key=job_api_key)
@@ -168,7 +168,795 @@ def create_inference_train_workflow(
     # workflow.add_job(train_job, dependencies=[inference_jobs[-1]])
 
     # Set the input to the workflow
-    input_json = {"text": "What is a good cure for hiccups?"} if input_json is None else input_json
+    # input_json = {"text": "What is a good cure for hiccups?"} if input_json is None else input_json
+    input_json = {
+        "arr": [
+            0,
+            0,
+            0,
+            0,
+            0,
+            0,
+            0,
+            0,
+            0,
+            0,
+            0,
+            0,
+            0,
+            0,
+            0,
+            0,
+            0,
+            0,
+            0,
+            0,
+            0,
+            0,
+            0,
+            0,
+            0,
+            0,
+            0,
+            0,
+            0,
+            0,
+            0,
+            0,
+            0,
+            -0.0100005,
+            -0.0100005,
+            -0.0100005,
+            -0.013973799,
+            -0.0189315247,
+            -0.023184301,
+            -0.0360728861,
+            -0.0392619154,
+            -0.0380269994,
+            -0.0390143887,
+            -0.0346046778,
+            -0.0257765396,
+            -0.0209733754,
+            -0.0217809993,
+            -0.0144984527,
+            -0.0118807892,
+            0,
+            0,
+            0,
+            0,
+            0,
+            0,
+            0,
+            0,
+            0,
+            0,
+            0,
+            0,
+            -0.0178081425,
+            -0.0232058779,
+            -0.0298662898,
+            -0.0414395151,
+            -0.0586512813,
+            -0.0812643979,
+            -0.105997038,
+            -0.121704878,
+            -0.134457288,
+            -0.139756261,
+            -0.141562422,
+            -0.135229133,
+            -0.120246727,
+            -0.104490087,
+            -0.0870044931,
+            -0.0716699334,
+            -0.0485892545,
+            -0.0324260775,
+            -0.0216926329,
+            -0.0100005,
+            0,
+            0,
+            0,
+            0,
+            0,
+            0,
+            0,
+            -0.0132956624,
+            -0.0225936238,
+            -0.0383702224,
+            -0.0598206019,
+            -0.0842014426,
+            -0.118390816,
+            -0.154266827,
+            -0.188282524,
+            -0.219803054,
+            -0.242936317,
+            -0.255020324,
+            -0.259481423,
+            -0.249404582,
+            -0.226727106,
+            -0.200418885,
+            -0.16716117,
+            -0.134317009,
+            -0.0958717755,
+            -0.0736565245,
+            -0.0503983075,
+            -0.0269783475,
+            -0.0168919,
+            -0.0100005,
+            0,
+            0,
+            0,
+            0,
+            -0.0147795885,
+            -0.025122101,
+            -0.0381226487,
+            -0.0786317321,
+            -0.119593671,
+            -0.165704529,
+            -0.228814281,
+            -0.288620224,
+            -0.354491034,
+            -0.421140618,
+            -0.480243669,
+            -0.527064646,
+            -0.540807419,
+            -0.521388017,
+            -0.474446021,
+            -0.403948632,
+            -0.336571539,
+            -0.271580657,
+            -0.20666741,
+            -0.154539645,
+            -0.108856709,
+            -0.0677589146,
+            -0.0340327281,
+            -0.0215091205,
+            0,
+            0,
+            -0.0100005,
+            -0.0107381289,
+            -0.0260253876,
+            -0.0570600482,
+            -0.0914378767,
+            -0.143000013,
+            -0.199005834,
+            -0.266034404,
+            -0.353401549,
+            -0.450251488,
+            -0.551598332,
+            -0.647939202,
+            -0.743171364,
+            -0.818162561,
+            -0.851073275,
+            -0.83112168,
+            -0.763764496,
+            -0.659992784,
+            -0.547527626,
+            -0.439376979,
+            -0.33557659,
+            -0.254856553,
+            -0.183933732,
+            -0.126755715,
+            -0.0706477667,
+            -0.0388818206,
+            0,
+            0,
+            0,
+            -0.0134176155,
+            -0.0390612132,
+            -0.0873974922,
+            -0.133107017,
+            -0.194532142,
+            -0.27478633,
+            -0.369886454,
+            -0.482920333,
+            -0.605294063,
+            -0.735621386,
+            -0.869509827,
+            -0.989564738,
+            -1.09132506,
+            -1.13182948,
+            -1.09408349,
+            -0.996373436,
+            -0.868781173,
+            -0.717778845,
+            -0.570649327,
+            -0.439021868,
+            -0.326889344,
+            -0.235934504,
+            -0.167697996,
+            -0.0995100269,
+            -0.0479392976,
+            -0.0187851186,
+            0,
+            -0.0117322667,
+            -0.0288274493,
+            -0.0646532861,
+            -0.118956716,
+            -0.17783758,
+            1.53795878,
+            2.57176245,
+            1.53212043,
+            1.00392168,
+            -0.179355647,
+            -0.591732991,
+            -1.05273662,
+            -1.15378689,
+            -1.22142979,
+            -1.2388156,
+            -1.21321586,
+            -1.14302847,
+            -1.02018313,
+            -0.857098743,
+            -0.676706697,
+            -0.516203262,
+            -0.379287244,
+            -0.271402545,
+            -0.189934521,
+            -0.119940614,
+            -0.0556340911,
+            -0.0145752163,
+            0,
+            -0.0206611389,
+            -0.0437166621,
+            -0.0808756237,
+            -0.140488164,
+            -0.207699245,
+            3.7747726,
+            3.14033146,
+            2.28939169,
+            1.76127332,
+            1.4318542,
+            1.1313135,
+            0.679164893,
+            0.665484747,
+            0.666043389,
+            0.680680095,
+            0.677305174,
+            0.665508286,
+            0.721340316,
+            0.883661589,
+            0.91751869,
+            0.0282541074,
+            -0.401002939,
+            -0.283099723,
+            -0.194831338,
+            -0.123075256,
+            -0.066612686,
+            -0.0161462821,
+            -0.0112546885,
+            -0.0293918605,
+            -0.0484646663,
+            -0.093178326,
+            -0.146682925,
+            -0.218121209,
+            0.830460131,
+            1.04725853,
+            0.147086928,
+            0.259684517,
+            0.495679969,
+            0.998953721,
+            1.29535061,
+            1.12204782,
+            1.41528197,
+            1.4259952,
+            1.36416372,
+            1.22805443,
+            1.03395727,
+            1.40874227,
+            1.73166837,
+            1.00260058,
+            -0.401823716,
+            -0.275049233,
+            -0.181713744,
+            -0.107567122,
+            -0.0566041118,
+            -0.0189159236,
+            -0.0121427928,
+            -0.0243168731,
+            -0.050270377,
+            -0.0887358114,
+            -0.138806025,
+            -0.212706019,
+            -0.321729999,
+            -0.462313723,
+            -0.652442841,
+            -0.845524923,
+            -0.961258323,
+            -0.793125052,
+            -0.226359955,
+            -0.640468216,
+            -0.12372009,
+            -0.167157468,
+            -0.255843161,
+            -0.441448335,
+            -0.792766628,
+            1.30597044,
+            1.81460411,
+            0.691054579,
+            -0.383665051,
+            -0.26310513,
+            -0.166473946,
+            -0.0799663431,
+            -0.0455007946,
+            -0.0195541446,
+            -0.0100005,
+            -0.0186206584,
+            -0.0414986832,
+            -0.0722615997,
+            -0.123238725,
+            -0.212256343,
+            -0.331309824,
+            -0.491126078,
+            -0.687704902,
+            -0.86260267,
+            -0.939124713,
+            -0.869991467,
+            -0.758168797,
+            -0.722198511,
+            -0.739826964,
+            -0.809980626,
+            -0.911188613,
+            -1.00032001,
+            -0.221550751,
+            1.53134484,
+            1.47605194,
+            -0.273150738,
+            -0.363157263,
+            -0.252975575,
+            -0.157152039,
+            -0.0652009258,
+            -0.0335283586,
+            -0.0124209728,
+            0,
+            -0.014849279,
+            -0.0329699917,
+            -0.0601451792,
+            -0.118353377,
+            -0.219271688,
+            -0.354392407,
+            -0.523006773,
+            -0.71568287,
+            -0.862626101,
+            -0.90524289,
+            -0.831592288,
+            -0.751312636,
+            -0.762948163,
+            -0.825877849,
+            -0.930232292,
+            -1.04727288,
+            -0.879016953,
+            1.11455708,
+            1.61660969,
+            0.264000765,
+            -0.464282235,
+            -0.354907482,
+            -0.256014147,
+            -0.158427696,
+            -0.0620647188,
+            -0.0242921899,
+            0,
+            0,
+            -0.0117874599,
+            -0.0252632841,
+            -0.0502423656,
+            -0.115068847,
+            -0.235195531,
+            -0.377531303,
+            -0.547311188,
+            -0.723069536,
+            -0.848981953,
+            -0.878897369,
+            -0.826469482,
+            -0.795496372,
+            -0.883536617,
+            -0.994814123,
+            -1.13364619,
+            -1.20871511,
+            0.0000560198157,
+            1.28700658,
+            1.50082995,
+            -0.122561277,
+            -0.462110102,
+            -0.360151562,
+            -0.263898374,
+            -0.166295096,
+            -0.0568635009,
+            -0.0105441394,
+            0,
+            0,
+            0,
+            -0.016636779,
+            -0.0423254862,
+            -0.119931644,
+            -0.252550583,
+            -0.39191634,
+            -0.556171069,
+            -0.717849905,
+            -0.829516019,
+            -0.854549188,
+            -0.84598967,
+            -0.889246054,
+            -1.03761315,
+            -1.16457617,
+            -1.30025654,
+            -0.740699086,
+            1.05188993,
+            1.3036988,
+            -0.163440609,
+            -0.59058464,
+            -0.474233049,
+            -0.368789557,
+            -0.274082099,
+            -0.174264813,
+            -0.0696188843,
+            -0.018003151,
+            0,
+            0,
+            0,
+            -0.0168610568,
+            -0.0451688568,
+            -0.131668459,
+            -0.267838929,
+            -0.398906806,
+            -0.548202377,
+            -0.690077015,
+            -0.789823563,
+            -0.831599129,
+            -0.861314493,
+            -0.95681566,
+            -1.11036634,
+            -1.22743073,
+            -1.31006468,
+            -0.02573686,
+            1.14239899,
+            0.761423491,
+            -0.706825874,
+            -0.608999426,
+            -0.492457882,
+            -0.380502867,
+            -0.279282191,
+            -0.173984018,
+            -0.0767235054,
+            -0.0195871373,
+            -0.0100005,
+            0,
+            -0.0100005,
+            -0.024817808,
+            -0.0552275065,
+            -0.148243512,
+            -0.283202341,
+            -0.4022125,
+            -0.534598048,
+            -0.656007943,
+            -0.738083794,
+            -0.781657503,
+            -0.824620535,
+            -0.918824463,
+            -1.04078449,
+            -1.13391454,
+            -1.09212795,
+            0.70592031,
+            1.17679031,
+            -0.37378182,
+            -0.758547572,
+            -0.62868064,
+            -0.501492113,
+            -0.381043892,
+            -0.270505206,
+            -0.168251255,
+            -0.0784168728,
+            -0.022799968,
+            -0.0157856413,
+            0,
+            0,
+            -0.0269850288,
+            -0.0676999793,
+            -0.167498207,
+            -0.298089736,
+            -0.411096027,
+            -0.522810883,
+            -0.625838621,
+            -0.693423683,
+            -0.731704263,
+            -0.767086709,
+            -0.82998003,
+            -0.921590434,
+            -1.00562716,
+            0.0779492952,
+            1.22959017,
+            0.636500653,
+            -0.901400043,
+            -0.769630793,
+            -0.635363773,
+            -0.494618472,
+            -0.369117095,
+            -0.255794246,
+            -0.156732083,
+            -0.0783809414,
+            -0.0267109338,
+            -0.0148726634,
+            0,
+            -0.0100005,
+            -0.0348385687,
+            -0.0869311199,
+            -0.185622432,
+            -0.311777198,
+            -0.427690033,
+            -0.530457702,
+            -0.612837575,
+            -0.669073252,
+            -0.706628103,
+            -0.737178903,
+            -0.779583917,
+            -0.866698428,
+            -0.288157768,
+            1.2193059,
+            1.10500698,
+            -0.50413989,
+            -0.909137779,
+            -0.774520432,
+            -0.619405771,
+            -0.472096102,
+            -0.344822207,
+            -0.235626373,
+            -0.144455008,
+            -0.0769092863,
+            -0.0286146987,
+            -0.0100005,
+            0,
+            -0.0100005,
+            -0.0342628198,
+            -0.101174053,
+            -0.195711272,
+            -0.324606261,
+            -0.442716711,
+            -0.545960978,
+            -0.637281741,
+            -0.703742928,
+            -0.753441795,
+            -0.788772419,
+            -0.829773267,
+            -0.745526297,
+            0.949893727,
+            1.18293215,
+            0.385795002,
+            -1.023299,
+            -0.89872884,
+            -0.736858006,
+            -0.575258663,
+            -0.430322485,
+            -0.30912025,
+            -0.209889823,
+            -0.13189517,
+            -0.0731506415,
+            -0.0276674735,
+            -0.0100005,
+            0,
+            -0.0100005,
+            -0.0400234981,
+            -0.10709374,
+            -0.194645695,
+            -0.316981297,
+            -0.440895564,
+            -0.560086039,
+            -0.667605659,
+            -0.763806998,
+            -0.843535003,
+            -0.903604039,
+            -0.938010529,
+            0.763887624,
+            1.12176928,
+            0.784111,
+            -0.818046093,
+            -0.991046672,
+            -0.828340182,
+            -0.652780006,
+            -0.495325185,
+            -0.364891317,
+            -0.261772085,
+            -0.17529887,
+            -0.112966586,
+            -0.0617374486,
+            -0.0270715466,
+            0,
+            0,
+            0,
+            -0.0406825662,
+            -0.0978606438,
+            -0.177848987,
+            -0.287783481,
+            -0.412614752,
+            -0.543271605,
+            -0.671018812,
+            -0.798159188,
+            -0.916686263,
+            -1.02499517,
+            -0.773682132,
+            1.09355574,
+            1.05041156,
+            -0.498209852,
+            -1.05256459,
+            -0.870980804,
+            -0.688431167,
+            -0.523166414,
+            -0.391308572,
+            -0.282035183,
+            -0.199071147,
+            -0.13652517,
+            -0.0893688913,
+            -0.041317086,
+            -0.016850831,
+            0,
+            0,
+            0,
+            -0.0283386899,
+            -0.0765120563,
+            -0.141969555,
+            -0.232658498,
+            -0.341261378,
+            -0.469723228,
+            -0.606194512,
+            -0.747366354,
+            -0.880786554,
+            -0.729389144,
+            0.895224865,
+            1.11943124,
+            -0.105438374,
+            -1.00783177,
+            -0.859696548,
+            -0.683890026,
+            -0.531181637,
+            -0.395889778,
+            -0.289956123,
+            -0.203267966,
+            -0.14295145,
+            -0.0963532989,
+            -0.0643914026,
+            -0.0337070214,
+            -0.0111853003,
+            0,
+            0,
+            -0.0100005,
+            -0.0151722732,
+            -0.0480051146,
+            -0.0951161616,
+            -0.160643556,
+            -0.245453283,
+            -0.353245922,
+            -0.474265429,
+            -0.598667391,
+            -0.729305101,
+            0.389322873,
+            1.38694264,
+            1.37486731,
+            -0.403963644,
+            -0.77444593,
+            -0.638730244,
+            -0.502999283,
+            -0.387339921,
+            -0.279971294,
+            -0.198381814,
+            -0.135822721,
+            -0.0965383286,
+            -0.0633365644,
+            -0.0427549534,
+            -0.0257581657,
+            -0.0100005,
+            0,
+            0,
+            0,
+            0,
+            -0.0237543896,
+            -0.0522032466,
+            -0.0858749627,
+            -0.140703979,
+            -0.208515621,
+            -0.290149335,
+            -0.368567087,
+            0.334201602,
+            2.33307288,
+            2.27286258,
+            2.23777229,
+            0.0412218057,
+            -0.494890333,
+            -0.422342015,
+            -0.339048837,
+            -0.257069088,
+            -0.185534152,
+            -0.136577185,
+            -0.0860242391,
+            -0.0578259874,
+            -0.033636416,
+            -0.0181122384,
+            -0.0100005,
+            0,
+            0,
+            0,
+            0,
+            0,
+            -0.0136274661,
+            -0.0285803164,
+            -0.0474793553,
+            -0.0779785591,
+            -0.118532172,
+            -0.167201555,
+            -0.214787719,
+            2.22171299,
+            4.30500754,
+            4.03125111,
+            3.36505818,
+            0.379953648,
+            -0.284269948,
+            -0.247694588,
+            -0.205869945,
+            -0.155925102,
+            -0.116435448,
+            -0.0857647974,
+            -0.0546508166,
+            -0.0401800073,
+            -0.023758997,
+            -0.0165780693,
+            -0.0100005,
+            0,
+            0,
+            0,
+            0,
+            0,
+            0,
+            -0.0115748833,
+            -0.0284271584,
+            -0.0506655656,
+            -0.0740332846,
+            -0.100455604,
+            -0.124744578,
+            4.17363552,
+            7.81243004,
+            5.7896979,
+            0.322149281,
+            -0.181506609,
+            -0.160333393,
+            -0.139182079,
+            -0.118875455,
+            -0.0873316648,
+            -0.0700227708,
+            -0.0540690537,
+            -0.0384297037,
+            -0.0265616274,
+            -0.0161844507,
+            -0.0119683967,
+            0,
+            0,
+            0,
+            0,
+            0,
+            0,
+            0,
+            0,
+            0,
+            -0.0132918601,
+            -0.0159980455,
+            -0.0207236291,
+            -0.0266997366,
+            -0.0284703819,
+            -0.0343035092,
+            -0.0410336906,
+            -0.0488886427,
+            -0.0548357917,
+            -0.0551988782,
+            -0.0469971082,
+            -0.0388769026,
+            -0.0316010302,
+            -0.0285226846,
+            -0.021736589,
+            -0.0100005,
+            0,
+            0,
+            0,
+            0,
+            0,
+            0
+        ]
+    }
     workflow.set_workflow_input(input_json)
 
     # Run workflow
@@ -213,6 +1001,6 @@ def create_inference_train_workflow(
 
     if is_inference and deployed_endpoint_id is not None:
         create_inference_train_workflow(
-            job_api_key=args.api_key, endpoint_id_list=[deployed_endpoint_id, deployed_endpoint_id],
+            job_api_key=args.api_key, endpoint_id_list=[deployed_endpoint_id],
             input_json=args.infer_json)
         exit(0)
diff --git a/python/fedml/workflow/driver_example/customized_job_example/deploy_image_job.yaml b/python/fedml/workflow/driver_example/customized_job_example/deploy_image_job.yaml
index 6ec64b0404..52ac79344e 100755
--- a/python/fedml/workflow/driver_example/customized_job_example/deploy_image_job.yaml
+++ b/python/fedml/workflow/driver_example/customized_job_example/deploy_image_job.yaml
@@ -6,12 +6,12 @@ workspace: deploy_image_job
 # Running entry commands which will be executed as the job entry point.
 # Support multiple lines, which can not be empty.
 job: |
-  echo "current job id: $FEDML_CURRENT_RUN_ID"
-  echo "current edge id: $FEDML_CURRENT_EDGE_ID"
-  echo "Hello, Here is the FedML Nexus AI platform."
-  echo "Current directory is as follows."
-  pwd
-  sleep 3
+    echo "current job id: $FEDML_CURRENT_RUN_ID"
+    echo "current edge id: $FEDML_CURRENT_EDGE_ID"
+    echo "Hello, Here is the FedML Nexus AI platform."
+    echo "Current directory is as follows."
+    pwd
+    sleep 3
 
 job_type: deploy              # options: train, deploy, federate
 
diff --git a/python/fedml/workflow/driver_example/customized_job_example/deploy_image_job/fedml_model_config.yaml b/python/fedml/workflow/driver_example/customized_job_example/deploy_image_job/fedml_model_config.yaml
index ab8dbc4747..6992bb37df 100644
--- a/python/fedml/workflow/driver_example/customized_job_example/deploy_image_job/fedml_model_config.yaml
+++ b/python/fedml/workflow/driver_example/customized_job_example/deploy_image_job/fedml_model_config.yaml
@@ -1,12 +1,13 @@
-workspace: "."
-entry_point: "main_entry.py"
+workspace: "./"
+entry_point: "mnist_serve_main.py"
 
-auto_detect_public_ip: true
-server_external_port: 20215
+data_cache_dir: ""
+bootstrap: ""
+
+server_external_port: 20203
 server_internal_port: 2203
 
-bootstrap: |
-  echo "Bootstrap start..."
-  pip install -U fedml
-  sh ./config/bootstrap.sh
-  echo "Bootstrap finished"
+auto_detect_public_ip: true
+
+request_input_example: {"arr":[0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,-1.00005000e-02,-1.00005000e-02,-1.00005000e-02,-1.39737990e-02,-1.89315247e-02,-2.31843010e-02,-3.60728861e-02,-3.92619154e-02,-3.80269994e-02,-3.90143887e-02,-3.46046778e-02,-2.57765396e-02,-2.09733754e-02,-2.17809993e-02,-1.44984527e-02,-1.18807892e-02, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,-1.78081425e-02,-2.32058779e-02,-2.98662898e-02,-4.14395151e-02,-5.86512813e-02,-8.12643979e-02,-1.05997038e-01,-1.21704878e-01,-1.34457288e-01,-1.39756261e-01,-1.41562422e-01,-1.35229133e-01,-1.20246727e-01,-1.04490087e-01,-8.70044931e-02,-7.16699334e-02,-4.85892545e-02,-3.24260775e-02,-2.16926329e-02,-1.00005000e-02, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,-1.32956624e-02,-2.25936238e-02,-3.83702224e-02,-5.98206019e-02,-8.42014426e-02,-1.18390816e-01,-1.54266827e-01,-1.88282524e-01,-2.19803054e-01,-2.42936317e-01,-2.55020324e-01,-2.59481423e-01,-2.49404582e-01,-2.26727106e-01,-2.00418885e-01,-1.67161170e-01,-1.34317009e-01,-9.58717755e-02,-7.36565245e-02,-5.03983075e-02,-2.69783475e-02,-1.68919000e-02,-1.00005000e-02, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,-1.47795885e-02,-2.51221010e-02,-3.81226487e-02,-7.86317321e-02,-1.19593671e-01,-1.65704529e-01,-2.28814281e-01,-2.88620224e-01,-3.54491034e-01,-4.21140618e-01,-4.80243669e-01,-5.27064646e-01,-5.40807419e-01,-5.21388017e-01,-4.74446021e-01,-4.03948632e-01,-3.36571539e-01,-2.71580657e-01,-2.06667410e-01,-1.54539645e-01,-1.08856709e-01,-6.77589146e-02,-3.40327281e-02,-2.15091205e-02, 0.00000000e+00, 0.00000000e+00,-1.00005000e-02,-1.07381289e-02,-2.60253876e-02,-5.70600482e-02,-9.14378767e-02,-1.43000013e-01,-1.99005834e-01,-2.66034404e-01,-3.53401549e-01,-4.50251488e-01,-5.51598332e-01,-6.47939202e-01,-7.43171364e-01,-8.18162561e-01,-8.51073275e-01,-8.31121680e-01,-7.63764496e-01,-6.59992784e-01,-5.47527626e-01,-4.39376979e-01,-3.35576590e-01,-2.54856553e-01,-1.83933732e-01,-1.26755715e-01,-7.06477667e-02,-3.88818206e-02, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,-1.34176155e-02,-3.90612132e-02,-8.73974922e-02,-1.33107017e-01,-1.94532142e-01,-2.74786330e-01,-3.69886454e-01,-4.82920333e-01,-6.05294063e-01,-7.35621386e-01,-8.69509827e-01,-9.89564738e-01,-1.09132506e+00,-1.13182948e+00,-1.09408349e+00,-9.96373436e-01,-8.68781173e-01,-7.17778845e-01,-5.70649327e-01,-4.39021868e-01,-3.26889344e-01,-2.35934504e-01,-1.67697996e-01,-9.95100269e-02,-4.79392976e-02,-1.87851186e-02, 0.00000000e+00,-1.17322667e-02,-2.88274493e-02,-6.46532861e-02,-1.18956716e-01,-1.77837580e-01, 1.53795878e+00, 2.57176245e+00, 1.53212043e+00, 1.00392168e+00,-1.79355647e-01,-5.91732991e-01,-1.05273662e+00,-1.15378689e+00,-1.22142979e+00,-1.23881560e+00,-1.21321586e+00,-1.14302847e+00,-1.02018313e+00,-8.57098743e-01,-6.76706697e-01,-5.16203262e-01,-3.79287244e-01,-2.71402545e-01,-1.89934521e-01,-1.19940614e-01,-5.56340911e-02,-1.45752163e-02, 0.00000000e+00,-2.06611389e-02,-4.37166621e-02,-8.08756237e-02,-1.40488164e-01,-2.07699245e-01, 3.77477260e+00, 3.14033146e+00, 2.28939169e+00, 1.76127332e+00, 1.43185420e+00, 1.13131350e+00, 6.79164893e-01, 6.65484747e-01, 6.66043389e-01, 6.80680095e-01, 6.77305174e-01, 6.65508286e-01, 7.21340316e-01, 8.83661589e-01, 9.17518690e-01, 2.82541074e-02,-4.01002939e-01,-2.83099723e-01,-1.94831338e-01,-1.23075256e-01,-6.66126860e-02,-1.61462821e-02,-1.12546885e-02,-2.93918605e-02,-4.84646663e-02,-9.31783260e-02,-1.46682925e-01,-2.18121209e-01, 8.30460131e-01, 1.04725853e+00, 1.47086928e-01, 2.59684517e-01, 4.95679969e-01, 9.98953721e-01, 1.29535061e+00, 1.12204782e+00, 1.41528197e+00, 1.42599520e+00, 1.36416372e+00, 1.22805443e+00, 1.03395727e+00, 1.40874227e+00, 1.73166837e+00, 1.00260058e+00,-4.01823716e-01,-2.75049233e-01,-1.81713744e-01,-1.07567122e-01,-5.66041118e-02,-1.89159236e-02,-1.21427928e-02,-2.43168731e-02,-5.02703770e-02,-8.87358114e-02,-1.38806025e-01,-2.12706019e-01,-3.21729999e-01,-4.62313723e-01,-6.52442841e-01,-8.45524923e-01,-9.61258323e-01,-7.93125052e-01,-2.26359955e-01,-6.40468216e-01,-1.23720090e-01,-1.67157468e-01,-2.55843161e-01,-4.41448335e-01,-7.92766628e-01, 1.30597044e+00, 1.81460411e+00, 6.91054579e-01,-3.83665051e-01,-2.63105130e-01,-1.66473946e-01,-7.99663431e-02,-4.55007946e-02,-1.95541446e-02,-1.00005000e-02,-1.86206584e-02,-4.14986832e-02,-7.22615997e-02,-1.23238725e-01,-2.12256343e-01,-3.31309824e-01,-4.91126078e-01,-6.87704902e-01,-8.62602670e-01,-9.39124713e-01,-8.69991467e-01,-7.58168797e-01,-7.22198511e-01,-7.39826964e-01,-8.09980626e-01,-9.11188613e-01,-1.00032001e+00,-2.21550751e-01, 1.53134484e+00, 1.47605194e+00,-2.73150738e-01,-3.63157263e-01,-2.52975575e-01,-1.57152039e-01,-6.52009258e-02,-3.35283586e-02,-1.24209728e-02, 0.00000000e+00,-1.48492790e-02,-3.29699917e-02,-6.01451792e-02,-1.18353377e-01,-2.19271688e-01,-3.54392407e-01,-5.23006773e-01,-7.15682870e-01,-8.62626101e-01,-9.05242890e-01,-8.31592288e-01,-7.51312636e-01,-7.62948163e-01,-8.25877849e-01,-9.30232292e-01,-1.04727288e+00,-8.79016953e-01, 1.11455708e+00, 1.61660969e+00, 2.64000765e-01,-4.64282235e-01,-3.54907482e-01,-2.56014147e-01,-1.58427696e-01,-6.20647188e-02,-2.42921899e-02, 0.00000000e+00, 0.00000000e+00,-1.17874599e-02,-2.52632841e-02,-5.02423656e-02,-1.15068847e-01,-2.35195531e-01,-3.77531303e-01,-5.47311188e-01,-7.23069536e-01,-8.48981953e-01,-8.78897369e-01,-8.26469482e-01,-7.95496372e-01,-8.83536617e-01,-9.94814123e-01,-1.13364619e+00,-1.20871511e+00, 5.60198157e-05, 1.28700658e+00, 1.50082995e+00,-1.22561277e-01,-4.62110102e-01,-3.60151562e-01,-2.63898374e-01,-1.66295096e-01,-5.68635009e-02,-1.05441394e-02, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,-1.66367790e-02,-4.23254862e-02,-1.19931644e-01,-2.52550583e-01,-3.91916340e-01,-5.56171069e-01,-7.17849905e-01,-8.29516019e-01,-8.54549188e-01,-8.45989670e-01,-8.89246054e-01,-1.03761315e+00,-1.16457617e+00,-1.30025654e+00,-7.40699086e-01, 1.05188993e+00, 1.30369880e+00,-1.63440609e-01,-5.90584640e-01,-4.74233049e-01,-3.68789557e-01,-2.74082099e-01,-1.74264813e-01,-6.96188843e-02,-1.80031510e-02, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,-1.68610568e-02,-4.51688568e-02,-1.31668459e-01,-2.67838929e-01,-3.98906806e-01,-5.48202377e-01,-6.90077015e-01,-7.89823563e-01,-8.31599129e-01,-8.61314493e-01,-9.56815660e-01,-1.11036634e+00,-1.22743073e+00,-1.31006468e+00,-2.57368600e-02, 1.14239899e+00, 7.61423491e-01,-7.06825874e-01,-6.08999426e-01,-4.92457882e-01,-3.80502867e-01,-2.79282191e-01,-1.73984018e-01,-7.67235054e-02,-1.95871373e-02,-1.00005000e-02, 0.00000000e+00,-1.00005000e-02,-2.48178080e-02,-5.52275065e-02,-1.48243512e-01,-2.83202341e-01,-4.02212500e-01,-5.34598048e-01,-6.56007943e-01,-7.38083794e-01,-7.81657503e-01,-8.24620535e-01,-9.18824463e-01,-1.04078449e+00,-1.13391454e+00,-1.09212795e+00, 7.05920310e-01, 1.17679031e+00,-3.73781820e-01,-7.58547572e-01,-6.28680640e-01,-5.01492113e-01,-3.81043892e-01,-2.70505206e-01,-1.68251255e-01,-7.84168728e-02,-2.27999680e-02,-1.57856413e-02, 0.00000000e+00, 0.00000000e+00,-2.69850288e-02,-6.76999793e-02,-1.67498207e-01,-2.98089736e-01,-4.11096027e-01,-5.22810883e-01,-6.25838621e-01,-6.93423683e-01,-7.31704263e-01,-7.67086709e-01,-8.29980030e-01,-9.21590434e-01,-1.00562716e+00, 7.79492952e-02, 1.22959017e+00, 6.36500653e-01,-9.01400043e-01,-7.69630793e-01,-6.35363773e-01,-4.94618472e-01,-3.69117095e-01,-2.55794246e-01,-1.56732083e-01,-7.83809414e-02,-2.67109338e-02,-1.48726634e-02, 0.00000000e+00,-1.00005000e-02,-3.48385687e-02,-8.69311199e-02,-1.85622432e-01,-3.11777198e-01,-4.27690033e-01,-5.30457702e-01,-6.12837575e-01,-6.69073252e-01,-7.06628103e-01,-7.37178903e-01,-7.79583917e-01,-8.66698428e-01,-2.88157768e-01, 1.21930590e+00, 1.10500698e+00,-5.04139890e-01,-9.09137779e-01,-7.74520432e-01,-6.19405771e-01,-4.72096102e-01,-3.44822207e-01,-2.35626373e-01,-1.44455008e-01,-7.69092863e-02,-2.86146987e-02,-1.00005000e-02, 0.00000000e+00,-1.00005000e-02,-3.42628198e-02,-1.01174053e-01,-1.95711272e-01,-3.24606261e-01,-4.42716711e-01,-5.45960978e-01,-6.37281741e-01,-7.03742928e-01,-7.53441795e-01,-7.88772419e-01,-8.29773267e-01,-7.45526297e-01, 9.49893727e-01, 1.18293215e+00, 3.85795002e-01,-1.02329900e+00,-8.98728840e-01,-7.36858006e-01,-5.75258663e-01,-4.30322485e-01,-3.09120250e-01,-2.09889823e-01,-1.31895170e-01,-7.31506415e-02,-2.76674735e-02,-1.00005000e-02, 0.00000000e+00,-1.00005000e-02,-4.00234981e-02,-1.07093740e-01,-1.94645695e-01,-3.16981297e-01,-4.40895564e-01,-5.60086039e-01,-6.67605659e-01,-7.63806998e-01,-8.43535003e-01,-9.03604039e-01,-9.38010529e-01, 7.63887624e-01, 1.12176928e+00, 7.84111000e-01,-8.18046093e-01,-9.91046672e-01,-8.28340182e-01,-6.52780006e-01,-4.95325185e-01,-3.64891317e-01,-2.61772085e-01,-1.75298870e-01,-1.12966586e-01,-6.17374486e-02,-2.70715466e-02, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,-4.06825662e-02,-9.78606438e-02,-1.77848987e-01,-2.87783481e-01,-4.12614752e-01,-5.43271605e-01,-6.71018812e-01,-7.98159188e-01,-9.16686263e-01,-1.02499517e+00,-7.73682132e-01, 1.09355574e+00, 1.05041156e+00,-4.98209852e-01,-1.05256459e+00,-8.70980804e-01,-6.88431167e-01,-5.23166414e-01,-3.91308572e-01,-2.82035183e-01,-1.99071147e-01,-1.36525170e-01,-8.93688913e-02,-4.13170860e-02,-1.68508310e-02, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,-2.83386899e-02,-7.65120563e-02,-1.41969555e-01,-2.32658498e-01,-3.41261378e-01,-4.69723228e-01,-6.06194512e-01,-7.47366354e-01,-8.80786554e-01,-7.29389144e-01, 8.95224865e-01, 1.11943124e+00,-1.05438374e-01,-1.00783177e+00,-8.59696548e-01,-6.83890026e-01,-5.31181637e-01,-3.95889778e-01,-2.89956123e-01,-2.03267966e-01,-1.42951450e-01,-9.63532989e-02,-6.43914026e-02,-3.37070214e-02,-1.11853003e-02, 0.00000000e+00, 0.00000000e+00,-1.00005000e-02,-1.51722732e-02,-4.80051146e-02,-9.51161616e-02,-1.60643556e-01,-2.45453283e-01,-3.53245922e-01,-4.74265429e-01,-5.98667391e-01,-7.29305101e-01, 3.89322873e-01, 1.38694264e+00, 1.37486731e+00,-4.03963644e-01,-7.74445930e-01,-6.38730244e-01,-5.02999283e-01,-3.87339921e-01,-2.79971294e-01,-1.98381814e-01,-1.35822721e-01,-9.65383286e-02,-6.33365644e-02,-4.27549534e-02,-2.57581657e-02,-1.00005000e-02, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,-2.37543896e-02,-5.22032466e-02,-8.58749627e-02,-1.40703979e-01,-2.08515621e-01,-2.90149335e-01,-3.68567087e-01, 3.34201602e-01, 2.33307288e+00, 2.27286258e+00, 2.23777229e+00, 4.12218057e-02,-4.94890333e-01,-4.22342015e-01,-3.39048837e-01,-2.57069088e-01,-1.85534152e-01,-1.36577185e-01,-8.60242391e-02,-5.78259874e-02,-3.36364160e-02,-1.81122384e-02,-1.00005000e-02, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,-1.36274661e-02,-2.85803164e-02,-4.74793553e-02,-7.79785591e-02,-1.18532172e-01,-1.67201555e-01,-2.14787719e-01, 2.22171299e+00, 4.30500754e+00, 4.03125111e+00, 3.36505818e+00, 3.79953648e-01,-2.84269948e-01,-2.47694588e-01,-2.05869945e-01,-1.55925102e-01,-1.16435448e-01,-8.57647974e-02,-5.46508166e-02,-4.01800073e-02,-2.37589970e-02,-1.65780693e-02,-1.00005000e-02, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,-1.15748833e-02,-2.84271584e-02,-5.06655656e-02,-7.40332846e-02,-1.00455604e-01,-1.24744578e-01, 4.17363552e+00, 7.81243004e+00, 5.78969790e+00, 3.22149281e-01,-1.81506609e-01,-1.60333393e-01,-1.39182079e-01,-1.18875455e-01,-8.73316648e-02,-7.00227708e-02,-5.40690537e-02,-3.84297037e-02,-2.65616274e-02,-1.61844507e-02,-1.19683967e-02, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,-1.32918601e-02,-1.59980455e-02,-2.07236291e-02,-2.66997366e-02,-2.84703819e-02,-3.43035092e-02,-4.10336906e-02,-4.88886427e-02,-5.48357917e-02,-5.51988782e-02,-4.69971082e-02,-3.88769026e-02,-3.16010302e-02,-2.85226846e-02,-2.17365890e-02,-1.00005000e-02, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00]}
+
diff --git a/python/fedml/workflow/driver_example/customized_job_example/deploy_image_job/mnist_serve_main.py b/python/fedml/workflow/driver_example/customized_job_example/deploy_image_job/mnist_serve_main.py
new file mode 100644
index 0000000000..6367ea487f
--- /dev/null
+++ b/python/fedml/workflow/driver_example/customized_job_example/deploy_image_job/mnist_serve_main.py
@@ -0,0 +1,37 @@
+from fedml.serving import FedMLPredictor
+from fedml.serving import FedMLInferenceRunner
+from model.minist_model import LogisticRegression
+
+# This is the model file that will upload to MLOps
+MODEL_PARMS_DIR = "./model/model_parms_from_mlops"
+# If you do not want to upload the model file to MLOps,
+# (i.e., you want to use the model file in the lcoal DATA_CACHE_DIR)
+# Please use the DATA_CACHE_DIR and specify DATA_CACHE_DIR
+# in the fedml_model_config.yaml
+# DATA_CACHE_DIR = ""
+
+class MnistPredictor(FedMLPredictor):
+    def __init__(self):
+        import pickle
+        import torch
+
+        with open(MODEL_PARMS_DIR, 'rb') as model_file_obj:
+            model_params = pickle.load(model_file_obj)
+        
+        output_dim = 10
+
+        self.model = LogisticRegression(28 * 28, output_dim)
+
+        self.model.load_state_dict(model_params)
+
+        self.list_to_tensor_func = torch.tensor
+        
+    def predict(self, request):
+        arr = request["arr"]
+        input_tensor = self.list_to_tensor_func(arr)
+        return self.model(input_tensor)
+
+if __name__ == "__main__":
+    predictor = MnistPredictor()
+    fedml_inference_runner = FedMLInferenceRunner(predictor)
+    fedml_inference_runner.run()
\ No newline at end of file
diff --git a/python/fedml/workflow/driver_example/customized_job_example/deploy_image_job/model/minist_model.py b/python/fedml/workflow/driver_example/customized_job_example/deploy_image_job/model/minist_model.py
new file mode 100644
index 0000000000..25789d4e1c
--- /dev/null
+++ b/python/fedml/workflow/driver_example/customized_job_example/deploy_image_job/model/minist_model.py
@@ -0,0 +1,11 @@
+import torch
+class LogisticRegression(torch.nn.Module):
+    def __init__(self, input_dim, output_dim):
+        super(LogisticRegression, self).__init__()
+        self.linear = torch.nn.Linear(input_dim, output_dim)
+
+    def forward(self, x):
+        import torch
+        outputs = torch.sigmoid(self.linear(x))
+        return outputs
+
diff --git a/python/fedml/workflow/driver_example/customized_job_example/deploy_image_job/model/model_parms_from_mlops b/python/fedml/workflow/driver_example/customized_job_example/deploy_image_job/model/model_parms_from_mlops
new file mode 100644
index 0000000000000000000000000000000000000000..8c31b9f85b8d6964c585c43189ab5cf7a0a7d3c3
GIT binary patch
literal 32188
zcmZsCc|285_`iMMQzRh?sU)em=Q*WPsZ^4r<s-?SWKTOnwj^7UkPt$$#hvE}NfMGI
z2}zNVkP1n^`~80Z{hdGVecf|s=FFLMW}fH0j2~az!wX#hdqjvHKjq?LcihYAl&fcK
zgy<d*TRRUsTO+6AUa>K;OU+_6Bg9>tT<wl|XrH!oa&YvDjS%xX<#F6m`>3~<lMAPc
z%ux?J8*e8U+oN7~uAZknj{4|wYI3UmUolV4*D(jX*a)ekE~k#!9<}#4b@Hgq8816e
zPN_{7&v9}2@q~%E%p`4{QgO~Qov-c3v+FA_=ktO)m!_o@r;~0Tr@T&adU@2x&cl<_
zO?Pfi0ZVbtKQ2yQUM_Y=?Obi0j=8$?gjkAjPI{ejvpeePbk@$(o%ez}pQa^0=cK3O
zDGx7q{+&FQyqsT7u3qi}JNYd6{-=#ot`6>kJNdl5IoH&*6#SpU|JP-AuH*lE3nE)N
z9rxPze<wxUxh*9*Wmf60Tf1hJ?z&YvI&0S$=(_)3!^hpc-FbJVD(&`i=iAF^?>~30
z|8HsTW7?@ooL2tl(1|E79Z!P%hEa?+eT&Uj$5864BB)<7V9%NU0{W$Uh*X;boNv>?
zEa84K^F)+MeNlxahVGavoyY3h=K%Q;58$q4Ew(pZ#QYF*$Q&OB^Y&3<{JjI*?jZRu
z=QL&N%}4tb3o>Ra%W%hHSuCAri#l~Pz?02jTxl>yc{bn^VPU4AI|ub`{(zWM0K7`Q
z3blqmSQoY9(Hx#q)wx29Pt0q~oQcQtK4k!>?n7a_KRUh5W*5p3@Nt%=Z&8lWFP}^q
zYt?{>Q7+og>7XRu^rBxwJqBvsz~23V@L2Z*tZ>T19bP)Xwo8T4Z?R;)buOzvP5`(D
ziy=)}jJ8?Zi%W$pF~`~#Ze@>w@z7sTs^_9#Dt)02;w+Aa?x$ecBfMxO#QgffO=o#`
zL+r3L<16}}I=kd4i7u=pTc(0At;-s;PUIoK@g6v@5C&eip22_Lx5L)?vP|8}YP8FX
zgo8JSsK*TrINN<4FHA|$)#M4RKjDw_TjVf)K$M~0QEZVXov8EUBet9K5P>5Ps1HS_
za56Cu!bA%3Y&joo=v@cPntoy1fHc|*J!dJT22zRqT@*WHE4Z!DDl_|e2dz6!fY4(x
zCh&G7?p-cNvH}<22`nHMcb=la<Sp{(y*LEMu~Dj#kEn*e1k3&aQvJ5t;G|Ljal9~0
z`TQ$~sOuuM;<Iw_?GT|WwrLT8C|>65bPUGNW5cI#5kfm;BCBu;8!P0PL^(l5f1x*P
zZ^Um{yXH68UwecqyTs^IQ&$jDezW(;>EI^Go1|L4mF#R3Wa@5kGwpp($cz}2*KY~K
z-}{e%OAaryL`R9FC34Y&@~1Gri<eFULx?RCVI<=-z`5`X88^6t@$s)=0Q)iNgf4XY
z-eB)E<)za<hl94sb674J52d{eNYHd8J8#ExG#M@fAB{nnEzgI|hTX_E^JSe})`GQa
zcW}9X4o<(?2D~x(nEW%3EjsANYDz2uzleVH-dKwbV-GQ)u?DKm*JIcTY5KBR28{io
zKxJ+?6-J^+?&(5G&8wQs?98O<wAEqL7#kCO{=>m(ar(}Y0i2y*MPfqv>Dc?G5UhHc
z<re)HpS4E8A){A#Azla1pE1QI=pvbS{Mjp)kAuaHSZZ2bl0FvN1(6rSu#Ub9IcbmZ
z<joWKSI__^hS$USB?Hh?7);dD7DB&n7ufvlLKhKHdPB}ztiL?LwkhUD38zGyJ)B6T
ze;$J?yO-hAzI@EowZrR)vdke%cZgec9f@QEzVpjKuWfI^QN0f4FkJL#n-_rA0hE~2
zM-uMu0oOMlq0v$bQdc-)gYG1`y|x?|Mcsk*PMWBzRF7KGymWA41xgzRlbO>Rka<3q
zW%jQIC+dgs@3POZb!#5ABl#Pc@^3~5S4B{gcO~EZ94WDj(P%T|1D|5I!lkYtmIsqa
z>I0wSE{ui4U0h7>VhR@MDPtn*E*W`}4VtswMDcAUG)L})Lxt~fy-hx)A$tb4hB`oH
zT?`ph97UtXT2kY#2WHBf@x0y<_+{D)xn{cPBfEk!KM(@u!)cVs%e&MDM=rQ=<r|8?
zA!6U|56jgXAm{EeU^S^xVKr@F?k~W)`EVI-l5$3+{mbc@|H@%h_8sQ7*h19TPWYuQ
zLw~#d4;Eef2<-a`I5%(wD!dS(6&%K(M$sERLIvsi&Cy^uSOg*$DzMB}65MuPLu<`D
zu>Hq5R2Yk)EIZGTvL<DSJlspF1>|Vs&Qx|zaSap%rDKY(6q6}4glC8QVeyt^Fj^-}
zYtw@u)X2j`__koI>Jp-0y#mA5NYGY2MQE=Ojfv?pOtC^KF5-O(d}klRymfxi{>>ZI
zbZ){ajSTEBOQw>f+raE%Bt$tMr3ZRf&~DG==$ZRkDEK%WA}52Wr7fc9Ii8LoYtqqz
z`iRZSH4ysm1%!PRq!LLrv5cKh#}9r0cHcWzSWrF=&3_8bQbE+b*~N^KTPkSI(6D9I
z8$1vx0IGt2&}>8wgY)F+-$l`|$zcxZ`_4sK4`&emMSW!R7c1((7D0LiYns)cQ;bsU
zM_5ub;gB@H93;Ae@w~|-^(#3DpWI!5ZN>IbS}}zw(?AJ0#sh1P9`;fTn2}2z=xyvp
z>MIo)V_OMGOypv=oo~PoCvKob^d1lymY}CoKjY|;afq{XM(3@|fM?MgR?|0LW+Y%7
zJaukE=I~<Lc3hNp-Lwq*jx47Q%U3{YN(TH?6J*XGy@~3rI*jpWHr}i+VgCwKV2<n$
zWBrX<2z=C1)=mM$GuZ)HmiCA$O8pBpL#niyVm)w+r<1xm6M%3@279NF%QplxyE1Te
ze*~<}yNqcTOUZ5zC%pJckk<6%g+-&>w6kOm;LsV^k?2T{TZiK=>y!9mUOL(D!C6yg
zC$J)`57eTnSv(i6qHLEFafw()?{Ac*CjzGsf7HUUlhL5QFb{6t{S9Shw=v?lK5%by
zf&&-iSe6bgFjE*s<^88X8`+;>t^d+T8cwKzR>lUnR~UjyMcb&%y*Ht;Z<=_Yn5LF5
zQ^z;MwlHWJhQqFcDDqw$^Zg`fpFa~sb-_ggk5|{xXk3~$X_ufYb35@Ne8VSyyD*wh
zip?WxOnn~MjDs2)tPi;a<c_BhhtI*#BB@Jw4(&kuDFH~{%th}#)P@2d)Y(tY+u-na
zCEAbm5yf^q#?1;8t(1Qo)2IJXCH9ZmDx0~{Czco6E6Tw^=mSgSqy!WAEEms8?!~Z;
z*WjhAHX4?!qIGn7Si|o`V4a0N4gO+u$<rltP-_?P)%B6?SLIk?6-Y$!5A+BN;k?3E
z*yCOY!RJ=cmUb~DByIq)T!6725P~0-Yl*XREH0TCqK4J2AUJ0L6y!fa{)Z=|v7e98
z+m(q*tYvhxR5{r4ya&U=OSmAzgkGj@L|ce!&_-T0xH}v0kZd-#?ps2qt!^U2g(9f*
zxe5+t`eV<9MAX)}OIEE-A^rwm(4lP%WQzQ8`Tjjn?>P&j!7<Q(ErZI9{{><dXDB^8
zFI?LxM>qUf4N4Mx%<D=GGGVj_^T(IdrQ3#B6=D4twDhpS%GCEzt$7*vf|KB@4nNF&
z3b6aCIjRVxV&0dz%)Apr_*vB(xNbj06uwK@rm?^yVhLqZz~ES~D1CTi4k|4A2G1o&
zk>}TPtlJd=7#WHl<KN&cS3cHqKgKiNOL3orER25?r`s84{G90np05PZcJDTHTb2z*
zn}nIWWI1_JJ`SB$?L>KfCoJ~>3}X#KeTX>Yo0CO+BO6I;8Vy?BcF=dW2kv-OliuAo
zNVj<!nDQJ$?b%NN32#tB*pqZ-HnAbc2E-Z)!0hB0*=sV3o5tMW=ZY3eeE~nbIW>$G
zKfS^3-#EOCTfjPTavqaTH^R@EcGUD10DY+(O6_|H23G6@!%iNiU7a5`sn?-a@+&f}
zq(Rw>e+8a9H?hnu6EyxT#nvY+*r~jNF}=KksoVJ-QUeVzW$s({k3~1BF^h00ur7o{
zQu&}RJ%Ke3dWpi5a@=cp5B1q1&}1S^n;jd(I;n@SwcrMdjeR1~YX{)GKOb5b?V}d8
zG~m3OlTc~)lJve9z=$|*c<3~XYe!_5ReEZ~d~-L<oYlmvwE?h6XcE$xN@(xBi6IyL
z$@xfqTxC0iK~q(0*DvS<uQR_O!1@riv7Zlg%r2mFdncry^Tgfo0w?8)sZ`Sk&^*G+
zsI1_lXI7cPCCx_c5&KKv`d@IY=|Q*bWvEYWfZn}aOr(4&WIl_7%{s*pmCZ)Ed(yPm
z`8@3Alc56~#29G{9%fTv5tJo+Q5gb)R3vX2&U$)d=DcON(_ez=+VX={Hq}JMrvH7^
z{SzRoQG^bOT>$oa7Pzz74bQ(}<MBiFXtb1&isDA%t`PvSif2)m{tB5p8j+``Rln{M
zFWuuKO;?}EMtz@VsuIE|o#ml8v2+J=T~LBt_ZJu|S4BP0y8un2c6cJ_2b`6aC0j@k
zsQXLP&Os}2Lh%kPR8~dv))dmVkdSxF1PzAs4&Y*E39yZBW951hP(0aB9uIj_Eqw>@
zgyjS(UpxuskK-tX1I{?zwgtHa=R?4)*Mz0>81!@nDf;t&V4>*;1tsEmQ`-;x_jf?{
z7k;Yx+;8gpYJLh7T7XZ}4qY}}$M|<)@H%1~(_8r|O>ZA8J^lh3J9rsC^)fKgoI`hR
zn5LxCL`hhb6z$sRizAT~u~70u{fIkc)4^G)PcWIfu(E@g=1DV4Oav((v(xO0&vPi#
zQ$6tgl_c#GmWi^y&TPM_ov7P2jYmeFVn~rRt-ZPe#C!@NbfOr3HM~QUBe~SJIx%|W
zZW4Z8%}1Y^Qi9~=FM%)h47z;`hPC<Jj0(35IpSgjV&b~+ZRi)&luOfh6ZvROiE8M!
zXn@GHIP$~i15SDvqixG75~m-7KH_(wc=QyQ)eD2x3nN&YB0-l52SWV#AZ|B*f}NYL
zg5Af*)Q#$BSol|(nKBn(w0nxcN{xW%N<OAq${JK6da-SBFI@e&1|of>XtNEHO!7W4
zCanB9+{@*sXXaLbl6NgEHy*{mPMr{}!^_-!t_r?Z`KWvj&|5?fCKK1;Oz=UXwwssP
zA;ia+&N~fS7u|3RU4;9t+u<eM?|9YYE5;uxfTevmV0X(Sip86ZnzK9L>Q_PPYIHr$
z)s+O7QDNHtycm=8XD+jJwI21WuMR4Yy}$;U2UK+S6HpN`0AG>0P@OTC?ii|qs6}^B
z$lHkGn~eueZ(jOcpA^$jl>%1OQk2b?VD{bThiUgzM7vU|@<bW3KR4s`j28BOMiZ~B
zD2K$daB3Uh2Y9sLGWDe6JSQi)151~mATF;GSc~6<qoDOgGS8A1lU}Kkp2eSWN1rx(
zz3BvQnHELk+bQ6;{~20;e1e105;(qlA(Ah()PxX29kxw?O~-3+vF2sA(uqRI=i{d<
zql?*RQ}VI(-$&|B|3x?`)Qi2z4QQIW8Rp!4Ln2=;U`p~{;5@PIAoX?={(AfZn2LU|
z)u^GKomfpSZZyOTBgyQDx9=$9kKH)?T@W1$Mp0yaA*{Yyij7vn^ttzw#B9e6)DDUV
zg)Pa{e7A?N_Kq^-XQ?uqIpgkfLKWo;Vv&`R3p;Y2L!%Bq!@843j(_?Jbt?`-d)7Gg
zKHq>h(&Dh)iiZ|GavpZeiqlJUJkhbU92I&_l23Pqm_kc$=xXnUk&{{=_HHrB8jFC!
zC9OE}cZ74_n#`w1^BEZ{Ros;^3=Qier~&O0kkIxD2WyQX=YBDH#<L4XjccGs(iJ}V
zhZDC=-Rx5L0@RtqiH$~&q1M3(lg~ba_8&*F*^w6=FBr30RCyWJk{DK1K`iNRtS9jN
z5WaFPf;syM{AjDgmsg`9d!Q9B%pb+KDgZlg7qdD^ESjm!XGF|H@bnT@7;qA2@~dQ+
z-Xm!gacIQj>p1Uf-BDC1Ws`Zp$M`@c4v%i6KC*?G{1*|VUhfgE<6u*sfQ9f^PJ-z$
z;$dKJGK3WTqcZb^X`xslWStDb9aZaz(9{3esS!^hHf<5Z*PKoigd89<<{^d)B;v?K
zER44ZGieqQFj4#wBPy@Mw$)QOI<FL3goT(_+kc_2&MlTsW)BP>6JcuF9-xxlQ)qWy
zgKK}-!=RriliacrC7L2J<>&<Td^iT_eghbt%|%rv`H{t<oSe|J7>oLjV9nc)kT_n8
z!`EM6K$#AfjjUmrjVeP-WjrhLkuchd<Y8M!8_834z>Ky}P@AQ|oc-B@M~0-BsG`;Q
zbE*lXb`FBgQ30k(5=i#1G|1m-3x|KU!so@oWUhKUj;t;Ni;es6+_tM=8$TbXRWC#3
zVF9{8LYQ`b$HyqDSK*1x?kF%_hG*|c&`&>vkdf#nYz>dV-R`BRcViXoc`MCGZIWjO
zwUwEV$y=erDF~9cmty9JIpBJ$35vH#QepCov7xk;RTu0}vXWxp_-{_kJQ)QlLRm2M
zJqK%#jzO!86g?tfj}1Ah*j5$;T>m~`_Q_)~SpAJkQ_P36tNEDpCEU#K1D^2v*g6<h
z5ogZIrl7IyFKn4A!UYxX=yI|j^n{jS{QNlBeMOiSvj~N))w~Q><27ijI0|kepP=~~
zXMCQQg4Kahu$nK##NIheWzYP95Vt10Z?g{6t<OO4erdSwFG(vM{|=hNJoJWe4H9sQ
zkN#Qo8#^EPp<aFx30ta<ifW@UNo*jDvle^as)HW>Hz?B?P2vsiQJbzUfT*H42<x*)
zK`$+wsQU>{@`}itT8}k5p2Pfv7r?ULhB{>v#OT8tOkesNmw$Bty5unWx^mHpHcc3>
z^PWBV#EJ^|mIMb!+hOO1``|R}2n{{~X!o}fU$D8D-FttqT)Cua5z<WdeRE|$Uiu1y
z#SP%fel}Q1Nim<58?i#66Dr=zfzj2SB>s9UCE%V(GF)F^(;t53>)%$0zGukhpCf}N
zZ!<{Jx+|!%Wj*f55MrWzU%)P1caUzliV1VJpwE^dkkvN9BkJl<ut^)1E9aw-gbMH+
zSxm9#bJ6yX_MuIYES`#*0+~l&k)3sqHTYGWwqJD?OJ>Ds`Bpx9VZRh@XrsmIP?D#E
z?`}tnjRx3aqyyGD`BaBMA~bb~(U}X%(75OpxG&iTb$O{^HFXW<DQ`u`r=9ru^LMy>
zJssy<Ch%*OC{sf}BN;h&AC)=PQ?*kDtf9J9;8kx&zWkDbGA}7wRkH~HWb_*-t!ANw
z6)(NxdLN`Xg%aOOhf(WmG4{M@hP9od#KK;bzHgofmAklUx2A6pFJ6qAQ~IF!&>4j;
z$AWC@9x}_?MOk${$JO;b%)l{A5a|3xnJ2EKI%b|jq|b5G5IF`zO1H58rXUO`a-+8U
zW7wwA3vTy^V8fXgEMw*YoN|(4QX}tTL&-ZTQMIJ}xO+c#UiQON8ke#1gCw)H){W}D
zPzQI_Tp^Pu9MiP~m`=4p%nmVS$zQ35OgT5K%}hpt%QrD9SCr&j6l8jy{zHv^QMzqI
z7OH;H!PzDjHc|gD%_NpYE!zn;B@XD;B1oQ!wX?@A%w>j6UV^665Ck|q1I2;$m{xuh
z#w9pwWt9|$^NkSIX%pO;aTX-agRx%w6`33}!|E*(%!u@3(s=PV&h{lkR^nOUv;7EL
z)`a62e`I;HE+6>n&8Oj-5Zzmyf`u-;OrWd;^Ru55#}0Gwb&&*UtdpQkp1p&zu{kUy
z^Ac2gCqWxM9E48$i?HEqGLBwo1OF>(EK9Xg*kOADuF<#Q?tN}%{WS({<4-~Tco0^e
zpU>3#OhK3g>icC4KuG9!oS>A!x;zXO1*6e+1s^W6x5N73Sa9rbCo{*oVPv@gl)Ww{
z`hT-<`44`EuSg3`cL_4P-oGZ%D^9T{_&>9?K84_*)f2e)@+KT~QODtc+t}215hfn?
z!^Aibqo4nrL^iG_Yd0C;L=_jE^EieW-I;+%Q&|>RyAt*AY^=My5k2&1Fn9Kc9g&%A
z_2Z&+?4^1XJY)@FRlIb~o#$kf>nk+%Z)VL>@9^9DWE9jt3ezLrxH?A``VD00ZWmLS
zcWV`)`4yo|Jstg5{=lta@37M62gd&WN%>@cp$gWW#^m=BOwIXZnBmSud+R5p+w>=%
zm&icX`CedcBE(F;$;7~e@8P0oJXQpF;=~R$98x|DJVTE$GtZeapK=AW1Pv0R^oH^o
z>PDrIKyZ9;2$JffiT2PKPKr3g`oRh?c-TOW_lJV-CrhXmc1HF&RbrkjO~*Fq7=#ZE
zVCF+Da^&iDQ0Lx&fj<)P`H3-bICcwqZayJ~HUC)mvRp{yUQuX{tz(5e%SVCVAE~6l
z6jsz5O=_a>5Zb+(4=TR~(Ku`s7(GY<u2<)XVR<jARo9lqrkm@ZemjGD-<wctw;*j<
z-vOO!Y4Bvc9wvSsgORlvU=dmk`f>so`#OS{Z+{IwDuV_Rr_1rwqfR_Jlmlarrs4CM
zVc520kWzd48hRdbGZ7B;ctBs6(Pxdpkv|#+3e%TS$80;etUQRSDg<{F90T=jZIoEx
zE6ggc#JDS=fTACuXZcnNj$OyJ(EG4w@De8{dd7(bVb~yL0PRcb;Ax~17NjP@b%AV*
zc`rlra4`613n|c_h=SAIHZYlyhA~1aEX_`J(9M}9)%(TiO5dYs?8?LJ*dC5EPXw87
zhsR(~YXENLXc(QAr^qjfYTWeL3nQ<TLtgX)RG2D&!uvnr{cJGR6n_T}tqw!C7r#Np
zOODoiwF;CxrD@|-envjMlx<}q2e+S(;_tsVK+MG)b&K}l5x#F|=im-j{;g<rV3G(z
z0I>ABK+;7J@?V?b93OvN*>MNjdVf;tZ+Ync$5-H@DnC8^wgJ0Rx#(y2ZE$P-Fga_t
zoU*-hlUy{F2d>THwARmC_$&G#OuX+z^MOw4pwA?{;cI}k3DY1Xa|<?F(!~0iJp6j$
ziCnf*aEKG5WjvZODCsZg$JCa0+q`1S$Z_~E`}gF$PXcBA%@;2p6JTC8CE^@OLA0K3
zge-#>Fz#E57f+sJA2pRi#oO`trmu!Hlx{`!_B3cVxrXDfDk;dxz-ezi9E<CR*%FRE
z!^2IM<cQFj;k#iu2Tum*W?}RL8g<5`vC{4)4A?#<8yzPhcVz*3o%2PvO|mfGy&9Iu
zkHgbdn^|F=@<eqY9Gk@M!{W9a912rG%kZDLo=7lv6>o#;lC_kk=^(6K{1pQ06Cpw0
z2};@*;J@74FdiNWSHez0yz(<v<fm!MJY|?AoYIDhOA4uUscGy`+zywvwPJDFW!z_-
zgB>@XgJSatzCGXvNB6~miC80iR*-_vb$7vH-vF3~^-|WoyP^8|CzP04fZ3DQ6fTXU
z^jpmEkhBz2wowXe!!JX=gCPBz%wj2A0k3OvjK$z0#tA=RlHnYjnHZ+tYfqpWX@bS_
z{~%eelI5EB9+FOnW82^<OiH>()Lg=;U3;IP?8{yp6m^BKEn1*^SdvcNcLHamE#SgG
zCrq`8hfF$%y{4%T+UhgmNW)hMGjRsPU%4co^%N$ZUZUzOH%;x4q@5Zh=(P(ubLCV5
z{uKDcI&*a{gt-R7?4zT=CpQjSM^up=X@D=&_JER>3Jj8LoOZUS#`8PTS#}A&lQ@o3
z0{=ivl!pm;oQW2f<}w3`#Z*?L7?W8ULA^dSj`X&t7#b9g-f1(G=8Pz9aYlrW3X>&W
zf%1&~l`~kjD-b&x#L*+t1QI_)z&nv3(EqX$Hum^ovF@#M5A%5H-R@Yd<fF+e=>Q^<
zBf?yqt%Lq;HYD<#FjKzjG59R`fN%c2qsVa~IyypzQqk1|SBndvsmU1wwl3DbFGk%r
zzlfQ871-|$XWg=X1_ED(*po-JVN)YNC9ob@65b+=g33QED>R~{;;+N00~}m_oS$0h
z>j_>O=Qx_pEdG`dW!~xwFoFC{WEWQt(&>gM)R#o!PMm<{p~@gJDL{{`o&;UvF4W=8
zASF&x)bA_fu(^|k(kIX0RRe-&Ne@H|D50#FC9XeO3)dI{I`I>-ohKUb_xCXni8sM-
zThHR(lzP1XJsDLu@G{*BZFoL@E;D*w5pxf30w&-SrmYSsw;XjslST)47uN>Qs>SeX
zRVH3B>BEefTolrm2Oa?)2#!pLI`t%C&X)kM=JPN|hZaJFx&&RTBE|S<$D&0{5qu84
zPqI`JG2~MgL_BdTpLJbKM)#h?;)9Q2ZE_@>x>ZTt_jp9TR*r^V?lc$}NWj}S-;>Sf
zJt4Ow9CM6>XrFV9xF#eT>Z~eJoNFueA87)=jBAvnk0bdT!G`|J%{aQa3T=+fpif&q
z+QxBFJAy9a_?|v0e{8KmeOe#X@La(q=j715rxn}}5W>1EPA6Uq#H6Z2gzKp?{?oYz
zy4k+4Tq+T}b(&yz^%j=-L2F{!CI~vCjWF+>B;9-@5*91Jq~2FmLe73?G-oBDi;p~(
zO=e@BLpbDA-=QKc)Ud@bl<mzULk+AFp^e%k>7<z!%&OP|rk|Gs-+4or-oVl423EpF
zet^*%vUJojF6M5QAlgg2W0$%l7KMDLT&N<TJ3G<ic?Ebs`2~C1!qLpBnEEOli$}7S
zLyeX@9=)>~=N`X@WztO;!!1l3`P1bk_hwk88X>q|^f6@Xb2O}_CsAM@2ji!6@=gzN
zx+kv@y}TclFV+hpRvq`@u*3=Am+i*fGLAMJFU>q(f0B}PSV4r|O_AirYRKO_1+g37
zP=OuVh@GRbBjyu?%bUPWZBH`t@eL(=oU=ywZKtG0Ww6!lFFHPm!O#CbLia$ULDV?`
zmLqQzgf*t)@Y2I9$2aZdQgkxZYK%hb#xiJE>3~=p9wtfH6ctyeQ0i{~aO;pNG;{EP
zp<o0QkNkxzhu@)2r8Mm^%Z7X&4*%XGLzkW5XG*N=F{C>JFPt%Eo4b6avU7ydllqJ9
zo%7iB3VraB833EViLf#jsi=8#L1WoROgnoL9&M6l=DqPiy*KiB*z`F(7{13YT^xWN
zeEpQA<p_R#`GGw9bPO)dk7q?CerG$*%ZA4$-|<ge434goqEYWB6&aIH1g4Y0CZ_@h
zHLj7@ah$or;YyPRtU&ff2(iz-5BtnU&}Z8<)OP&<)lEs%gV4{E+*LhL5|G5uvv$bt
z&;g0KxtMn36lQ;UKq<(I!@9-@l6RyZ^<qU(K{gjFcgBNn!ZQ@!`UU>G=>R_Ezfs`Z
z7Z5uw#T@wE%r5BT<kU;1Q4d|IiukLz?A&W`W%uE<%>-mVEPxf&|KLQcHfP-~M#JSb
zc%ktJI7h_dNKP@)S;U3e-}6}t-z6BmycT%>@&gjK485$+3Uhyipv}(`A|$ySK6zCU
z6;D~_{;CG(wU?s(-o}9Qc}ZIFoHEEdt|Lo?zG8;pKlF5ehDS@~Fko{6hl99=_WC<X
zI(IDFG50ANN0<>>{wKUVUI<UZ^I)*>HF=^K#<H5sg2>xja5~`%D&OmcKi_BA)AQco
zOnE=Ndds4?b|k~bLw_;Jxr2)RQ3$QAStQIP6K@9$LS9P<+|3q*zUP(H0`K+gGnZ2#
zL7f{DgC1gCp(Y&El%x2HE>M97SEKWl&!qLUB#HEs2JiIm_)$TQ36vItN$Drp;}k~T
z9-j;TV{TMZ83Rqax2QUMX(;pGOZqBusoF>jC{B0=K4(Oj?s9ibT*kw^e0~gXe0>D_
zpNcVlGlGm`#4XU8--${$;-J2LoJ#kTgY#A6P?*Kfr0R6SiwE1G;Cls{wax|mj|;Jq
zgU?J^KEyX56s5$u=tZyoK;wfmM5vsb@mncL{L&SuglkX0F)xW#cVQU>SuX^xi|NRd
zR)j0kxtXw~x%ve~6s%k-Oqza4(mIElNYV%!qffn}yaG9VN<t!ujFzB}+_h(SFXf^w
zGLxz8BbI36*9NStOmyu{WLXFE!m#OOcsVJ+@Z9ZV?eLewD-Rja>^1^{U}v!S`T&x+
zcro_pT;lR*5Z1jMLVk|+aPRk0Dw)?2`Ly||xdz<yX!aG-<28lz{sp4Ar2t(P!G#;P
z`(f$B%iz}&MX677IE&05Fs~{d&Y8@Do1h;A7;nJ`d>e3U-c@)V_KDm$`I#yk6lbC(
zuH)%-mSAc=&bA7_itaL8Ox0`|FuLDK&Ppj}`hgg|FzPD2%YA^FbK0;(Nu2)mdPu*N
zlgC-nSukDmgUDK_P$EtV=-c!gvvyT;K%*b|H24}XEs&yJo)Ux6bZcC)!X9K7@G$Px
z@o1f42{w18v3L_l|MXZ+#hm($Rtx4ZJg5VstAjz+Y>L>-=Vewz-^aoe_SBW(YplUb
zbHH|K19d6v4_kI+B53!E)8<(}K<yhJ-F|)xKI1V)Lx%vOuf80b!%l$L@3q9QAs;VF
zRNz#wJUzX*k=*x}ASE2V>rGH3+E)%!uB|puZtjaiZFYF3qz5+bzm0Rdy3t|j6Cxxd
zLgxrfQ3^WA6i-MDsm~|{XWr}B{a-D-<`sbkBY$ese<@V;k`in^{SFNC7_gk1f$7T{
zG5>KWYhb4o&A0d!(b%X<XIe`m*XRp$?dEX$?y=ZEkU~1HZ@~+>dr3m?KfIpL%jmZT
zf=I*#ayzvIOIE#RTRi4xI=L5s%UBrXH;dE3`6IaSViXB9xD6e_bLdTesmLYp47476
z0RBKZdgOWwStX}ObE&$Nv&5F;S?+MqQc<KkoX_A)MI-Du8OomHWdg$rBf#Id5`wyh
zQU9Y1#}C#*RZY)ev3NFRyYDudqz;f_p}lbC%Mcb0A%@FbhVwKJ^Zo2USZ;F>bv3=A
zw7eBOr1)sG8zH;(JV8Jy8wMVW(mS_TA&>h(q_|H(X+kQCE9My%@cqRDB7)4eo^~AH
zb{V@hyRlW5j|qD#MlD}-A11}aP<z^*>bF#2&*&6_$BX;O=If<8cP^&`>*mrrTOXm)
z&l;+EjWRIBU1Wfd;H1e1G#<!;*mxIAn++sdocL7`T8F0H8_WH+C4>KPIi78rrC9FQ
ziRQilP*lnRovOKDM+LJw|Ng<YVFz&c$%7kSuZf@d7^d78rMKQnV256@fzseO5_97P
z99<(spVLg>@GS|59B%@T&@SMaqd@&rXve+d+i)aAfssFQ1K!$k=3{*nTs$cTi!utR
z{;k~f_QVW4@OT^QUgc#@I5?4l^$Vzvo906(UPG3sCe<U^f-0+C;kxxtDU-5!jEB$}
zD#c+nGis#Fd|h-8<6~AZ3U#kJu}Y9mKc<L-H(fxqJ`$db<WXD_8fdgkkvX}o5X^5%
zGCfgT7?nB7Iuy4VKlNE)jPzeLR{qWUCLK=JI?tgCN`%mV=`cVWHzjocE*e({A>Io{
z+0#K7`LGnN>)sh~?HL1}jmM!-s2R-viqTHR=fS7b161pbAfWLT><(GPEDx`ye4Rg$
z;~XB}%*Gxwx8DcbRDRliY6Q(r--fkE>)_;eO*q=s&$2D(0N#Cf*vqd3V%&}sIN1<S
zmK2l|8wUxj>OKu#Yf8Xc!Wwm#wt!=SC|&wM6n+VOBokaIu(ep7xnT5y<LyLr)M<tM
zN?9g4>LBHHU=9=JmCVj>J_x~e!WeZ%oW35T#%k@jg`+X=sERyCfYv40qjv@JkI6Gf
z{O5w#-CFRr`vX$v!y%Kq7dHN}Li3z!Xn0RTUn^q)Ha8`Jz4A>kD!LAbejs|L@iCuI
zOE8mhrtn6EkGZKO!+1=_;9*Nn4xr$OitXj-b|V!pNdE)tA`6T^9uJhwK2#Y9MbUC|
z<o@N4@zW02@k52yaW64w>TJjElX7&=#_IA}=TEpJk)M%zF2p>PkzfSVKf-?5EKstZ
zWTo2tK^qGNbPf_`zMtF<iH!^J;Pn}DD8v9mww{IQ4u1&H4r7^bw#WR+eJJ3x5Es-9
zVo8ZSZTw|5PTi^nmurdm{a+Y+;>k)-`zgw-9`J>mQk|%>t%{oKFiJXVZ$LAfo4#Y&
zLPajlW1qfv9hcV~1nVCWc=n7txEEbSrNJ67ws--FI#(dE=QD9N<^{hWOBl1fa>$jj
zz*r?MIQ&Qk;X@$SeJ#MPKW@Nk%_^*&62uE0nJ`lMnR*%j5<mYCV08VjVU7bYEy&U9
zt#5I-%=l)2Ii5JV#~;5wY{BT!B1%6mhh$!nWGjItaOrVD>Qn-ju|mmV=RK63`b~1L
zK$a>~O~z@rXRx?a8O<VA;kQ5~rf}&uy!FJ73i)@1{dwai>^NNrSNE<U!GFD=fqM{E
ziAQ4EuP9X6KTOJ2ZzUKLjn%W_IN+SkroNd&{_cLb{VW5FqpU~@S%DG!^QgGf<7Dgc
zbvW<84$4MXic!;3U^2&+V$_RNeAsmdH69LOz?+|7vpb7wpibkr-|O&1z)m>g{{fY^
z6tLD$&0)*tZiQVO{8shfV{FYk3v2d&18b_atoV}~w5<z42b~Zq#CV9R*UU#yii9U?
z7Ba_=-o{DOj~Lzm8!H_7nUD+(ra7dUQgpgqZqZ(Z{jc+=-B0GRvNN(kH@Xp%1J~l*
z>i5)ZT_3Qo)~0+9DS_Ht8QO$<7bbpa16#9!|IvvVceV`ty!Qy|A08!z=9y4AdPIL=
zzzc}DZA=>czhm)-7_iL0Ksic&hr(WaNL|+lfumC((X<O<KmIOn88`;l`O48pSdZ15
zVaU<F#i@}GUof(UvqmclGR+)J9lH4y1RoS-v`zWw#Ec5OF1D5onQ-#CU@2747ojuO
zdf~w{O=R_dZI}qDXfG&D7i0_5j%#;-ai0~mQKc9f!og6TM_8dj;+UBB5mRS}!L~jG
ze7zok_<VP2etInqEI9@1$3IZHLDAs<pp1-9oI?8-g7mCbJiM7(N`|S8!1vOToqWxT
zN-S-JkNY{-yDdXo7Yos*(#DV=wwH`l1wsG&DCD>5gq|mMDAXkl5w|${X8jd#TwepG
zeD5K5>3YgFEf@B$+y-vGjnI1KJtUhIW5zNGh#2R_dE3@;I8tFcBB78fDZdOg{(Q9g
zO>R1b>lJ9W@1|H$S!n)+LEXDIAb-Y(GI)@IrHNNS!kC-3skjL>p9tx24W^{>5nVA9
z8rynt?ce7x%LHNb5ho0@?LxOBi%GMtaG8vn3#xqj45vL_f=5jQwl@S)TB#X8XRM%S
zx(Qp)ljBh~>8DP|*O7YVY7pRP7+(wrfM?flSh~dq1A0whut0(iHdTlHM~(q|hXvfU
zJB12wBskuhTb#Kx1)=um(e39qs{ZvRtXOgy8vPVV>a`e{Jg9{27MZ9MejK_ePM&8t
zi^{XkF!3Y}mp!t^_^1tdaL*><_&NzwH1g2m+Cg0UfI!56Ac(|W#k%l%Y=6c<>fRKX
z3~pvu7~Kc6FF)bKM=kivtq8{ZuYq@=2m1Q@QIt>z4D9FN>4%p{X-@^V)k`x*EF+lM
zTn&w^5=?!o4E~x(ElO<S_>`AJDbon?mmb4PijS#@_d)&PM3}ec9_k$IqZ~TUqPgn;
zHCPo1?St#U&7~DO0wT$h!P^im%7Oq<8YSF%uugCsvTB`i(U&C9uGs~Tl*dWWd@g3J
zY7Vu7?*<yjiNRp%DLi5I0Sc@67?}cPj2J4R3^yI2=4p?@<^={g6#f+limM>Ue=)Gx
zl`yH9hs)pI$C7-`b8}UrwC%UR@2V_(blHa7A8RBJ-t*BTkM?tLx)^OVmy<Kz@xmje
zU8pD02AfJ6@cTknkh8Z$jVsbLwA`hJ_s*hxT^(g^97(BM5NED-4N|hvqICapP2kbo
zR=zKBoJ?L^0Ea9CFjYc|b|0L9*}M=Cm^g?MhlQCzmn=-8B<OTyj*sP>D6{E;I*}-1
zK}Ui#bF*2H*^#)@pm(bgs<pm=^<0|Z{PQmwz2jv<hU-8(@Fhu{x`YyY^dX+Bp9~L$
z!-$P6OdY&{W%`5op&=O>G#+3NeV&}%o=#<TWuYj?8~i!`1-cyL@VYU<)Ii{2Xmyid
zR(zDFw9=xWHmw{xIXS#kYOI06_#{|F{v`X`qYYI3E@JZ<DKx*V0qJwa8OQN1a6jGx
zy@z}Oj=hJK|MW1aT>wU2a8nLObr|4%7?1tghM%JaaLW5HI&(bk8@J6T*c(oa-Tz@X
zB@Jhrvf$zN84wEk3p`J2vFYe)d^33kzn|}-6ibxg*m_>DoXmmTLy6!bd>hT%Io_GF
zrx3RJHdS%$FG{sM#Pw~9KtI9|?4$+hhS4~VpVkT%cP%Kd{KLu1n%8hVB>}*N=kRW5
z8_TD@0z5AyL9O>~V*m6uYx-^?o|L-^MizAt{bU!o&WbVWT(3}}>j}tSzef)IC<1lu
zc5vFT6rn#GHwX$bA^#4-!q?Y9C-)8c@2LcHv4kKirHs7}!ojDRqu<V!powo4@^Sbr
zmZ%>{_`XC}n*?ld?MKtG9xB@CAZqm~Qsetxmvy;TqSI0yrZTh_ZKb?1R`C%f@H7NH
zwp?YeyLX0zk@)F3w}bKVQ(@Y5x(D5rejwMZ5oE2&Ky`8*^4Ddeyul}s@RXukB-T(5
zZAD=wO`2X&R|URv2XXO^)l{JHdyIGZ!`a!ellb=Sq?*EtF(@P%k9@pARaA|^)<4(r
zO1cLXbWo6vsOP19vfh!3c_Q?AjsM_hqaY1GI#@zieqn&!M%=h#k}B*z3s3V^8Lh!;
zC_lv+ugWZ_4xd9iKVAqj{HY{?T?(;VRk1X=9Rqt`pzZ50_!HR!s$*5G{3GH_>njN+
zQ=SJ)&!<5{34`q(x}Y@`hu6<dL9&kk-X7^Cs;32*l4uzwa?d)V#xJM;)NBuA-dVt?
zwfmKoEnS3m7jDDspR<^}W(%yycLnD?S7G?!QQT+07+S2}lG+!_U=vk@-&cIW<k?}|
z$}YuSn_Y0lw<pxLux8>#-^4KYDpsekE;@Y51lOe^%$XfYIFU7qgWj&}Vn;*574(&4
zaZSNzu6UeWG)0OPG$F$5AM8K+4z@uk6+G7#KQ`!rHRsikbkJb8s@{Qp^EkeItD9JV
zVVY>X=VG>Bk_Gvm_fWCoA+e6Fr!GdTK&y2M@u43=aPAs*zL6TU>4h+}ZmSY=&2uq3
zbTkTHU2P!u75{*sY73#_J1Da`{~%>|40PAuC<}QkMOzr&M`e{G5NTWv-_I9u=BgmG
z^V?>SU9t;n^uq8`f(SI9e1+V?)1dD@LHq||aBKE+P@9^dcKbh|j6CCTdz=VeFuW9h
z3{6APRa1hy!=P00F>H<<#I-#y@%0-y#^dK}fG1qE+p_(*-Xaq7e?O&+cv4`)S~=MM
z<s%V_DMyKmf8m45BW!*s%UF-cp>xW91Jx9cn(Mg%Hm%LX)qSRTf9)wuxe$Womxr-8
zdoCDn)W^0sc_t~L4u>adF{eEetKL>|p3PIzR(J@?g~pL@a1+VR*T7v)u{dr$Mffva
zQ6e>x^=<zv%=CUwjd8eEyUn#=rBH_I?p*LNQ<Slh;G$<1i!=Mw|DpU^E@tW|H&ZN;
zO2OJ4sQ2G#Dm9#gYv!1d!Nv=CmZucwIeo|ZQ8yrVR~+*0A0kypBVpHWNgQvM#O|>Q
zEFb%WE<IP_u>L|k!j6T8H=Bsuk}{&Ucn-6D<R~j3y^s9Xzk@l3zsbZLeah=y8xB*P
z*sq@rJI}A6_<gzP&3aA{wr~n(Y`t+h>L*DGS_cLF2iT4yL*OG8O0_*M!_wY;nELoT
zwt_lJaQd)Zwwj$-FUK@Eb1>r!FX+A(V^kiG<HDD0R1D{*m#or7?Y}lCKlTiFv}b}%
z**xqmy+E0zzNhN$ibBip`_TNxj}_+<O`YZa$kx665#Rpi!~Zz?d$IIOoZ9sWGdD^y
zZe3xlF`DBoEWCpelYarOvf%ZB(;zNsLkT<cGsTdE?zt2A>&$&Dr#ODCb4plV%}eJd
z591<^=NF2_Y2F@gX2x(Bti)v?<N`0_v4V$o(^!DooV@S*&rf)6;un6n#1EqW;!N-=
zN6^dGK;sI1=wPX%_jV0v5K<%ZPI6Fi&JPU_)}ci6Ak`}ui$~2;@uQ&xbFjw?v*J5(
z;MRMPeb@}Lvg1_m{XSsxN5GQ5LJWy<BFeVKq;0SOi|(mIx62wlA{2(g_q+`PmFsXr
z=pdwr6+&h+KfUz9DAB*R9Sny(AN_(pOl-UfUi<Fh`a?44QWT6GdW#sb3M0}!-<9G~
zb0q0Ct#HOxi*<kX7h)!vf_aU3`1SN%(xaP=n%b3^wCX02PiP<=Y6U34{{Y@PkAT?S
zM*KbE3*T48U^f#=y1B&Z=RbO=c7a*ss*<BEj0~vGSAiI!%JJHiw84O&6p4ge&_S`m
zwAT^+1~sU3>vEj1`~`Kf@wm41DRjP&hkTt9$n9#xFeZqd>5&NSOZKsi?zFQ<qQc>Q
zcozon^WYqg-`RNQZP@-)mbUySNg9Nm;b#0{G+H@KCgv7lZ{TC7+#AlWwzY+po&>On
zsHJirgis&r_zC}?as2SBi_*;;Chr@5phk=ky(9PxY1?@nJ2fSl4MBX2+@~$zReA*t
z(#|25R37?Ne1);pN#Y{f0c?3udQg!;Er(uMm8gy8;nOf+e;HcONi!nf@~IGRLnu6X
z7}rO91jF8MD9A|^zt&39iCd)UX7O04+?0=3z5bw^-Z$)ZnunH(88E`ZMFwZGNqPeh
zUC7xBgy*AC+2%A3jLJ|^uXRAl_A1L%ZzagKrGq?=G;X{b2q~+*C?4hjczizzdfaj-
zq#1x~y8Yq)L?QASrUJF149C^$A;J6vv^Pp%eA+$8-EN5IKaNm#6N<nRXHkOn{gl1$
zhw?LbG%%lJz~<xrKv%edc~%KD6jk8uB^%Lr!+bKltsmmf4CCF_bW9Hw$IPEx%o?Xb
zv|LaJ>no(GLDm<Hy6}i3ya{I?KD!5F$|ex}QlM}}7>S*hpe1(qpwON)e7u66tgUTC
zt4kfAuJIjg#)avtd#dnh=Y25sO#!XWa2zbzNtNlxz%kEuu%0i><Wz~%iU;)2qt=qM
zHv}=muNTMrp5UYOJg7;!hFWuSAgxaa5~>`*wUP_%XZ0{`doYGf=VQm--!Si_6N#4h
z!!OdjjLy{tmg+`T#`1X==@<#;>`{A+vI=+M>uGh&Io5;IVscEBxF3WZp2E60mmp`O
zG3B-^2&7axkiCbC4h)xOJY>2t-tz%;%B8S2y}gT*C&$U+YrM>xb}mLrkq7<NIw5NP
zO4y`QOUZoOk4o4AbK(S$zoLxFRr<qrG_InWoz_9lU<;(3xCZuB^I7-iX~WzUV{Dnr
z@kF1I0KKs;km@NWx``ubzVZw8Vdf;*I1EGk3mXWnumsT$Ux5{Egidn2v<|riNuK49
z&8~*;S7*?6{4kijdIzh9KH>ItD&)hnd)S{TfXfY@QlF2dfuMylN+$|2PWMvbc})hW
zoM<L4U#5ZY)khGS<ifI=VwUTYCTe6~A92-`LMiD$WdF^;$K}!B7`+VH4Pp56B{!|G
zriMz;XoIy=0<^@7IAZ#|g3@Uere{_^MKxhfPEMFbS>K*Z&aYO6K&e~seB~*$Sx8Y~
zVSKdf)eMZXuf_DEJ*fC8AFi$tV3f8+V(-nTFwgb@9v}V&Ml~T=u(+HuTy>M$nQq6P
zF(`%V^OKN&zZ7jwrNfR*3n5xcjU8*)fc7yfVD@k;sHyxxQw2dL>!l>4ttSYTi!||9
z^%|VFPKX}aFc&9Zlw)kD8C4&fijnTi&?h_*RKx_K&7%&D0!8Uxd9{%1Q;YM8{6M49
z6D(R^qCNi$wBv9S`SXL|v(Z~>-L3PG+;9%h9qQ)zGHz1}@4p~-fD}fpj0b_g+z@v6
zA<=W>czFJm;JQOI;F@p`70qA5*7uw7QQu1gAzFel>Hr+S`Ws@xc^LVWI7s2FNn3v-
zrIX3oyQ|)V@&`U){<XCjvA2Qx9Z`?t%F99F1c6oglMwZqpHAQN49p8uQDqKIEZunM
znw)5;{Wc%fK6k?T2mesqVH!jV?{c_<HnbZ0hLS%yal}ZP?q_yTdsm*s)UtF){Tl@3
z)viQm%TF-sYJl$T>tM^yEI2y#4}0sKFyT%s)p}$zqz;>q`BvW09B%}{UwIjgN<$cF
zT1rHD>W~eYtj+;RocZq-7A@vysG~n&$e{rDoz0-yEJiqcp%O^ck1BkZBhCakKBvS!
zU&SrpLwULEgS7dgI6tHfQhVo;h<7Y3KAC}@qR!xQ`4-;oUIIt-7ZDS!Hyl1A1lx?R
zq0`lDl4<o2EB_q@D+>!uD0Sm-5IJy`oeS@=hL}30LS!^Y?}?OTl3s5hDa)F$!0{c9
zRxgJIoV{@}SIluyg($n)z8ce`#o=4s9_V<kN{VHsAgiDSZ!TL%TJ2*ow}_wqqR&Mi
zl$ZX0)xG&!PH*)0-8@f{kRl-=Nv68awL|8Fgd~I{A!SGiNs&~NN}8leDrrVt=h{iq
zES2<0(j-YLO)5$5{e7PMIPUw0=O1|bq2p-!Wna5%pX)um7Nb}##^}o(gXdz+m?l=p
zb(-ZMSX7YRsgVbpjXgjGra<bv-}w6UXZWQ%hZ?9J!q<nQV5_}6S>kvHhpv{84KM3)
z_J4mtGx9h1t-lSXr=CDvgA}{GHw>Faq}W!xk5|tA#NaibAz*tH$UJxons=_@LA7R>
z5*AITPgTJYw-`9Mx)@%%7vdGK3C#4YHom;da+tDNmen!aPl_cU;l<sxxS}Z%lg5!c
zDqKUqwhq*w88DVG3%uin*zQB(a9}0pSFJAOWr>MFOTP<su)Gcq@#UDGegSPV8{oS5
zGtBh$1HV&}j2m~x=?u0-B~ByAt~1~_SPH^Hu5YnEk$`4gBpR&I<bOI83mXKu_lYuP
zHsz}Tym)^AUvoO}k=jT|FaJUQ7(Ky-moMRQv&U5Y+(dM{)<%Vj3o*dE2GpK}!qu=o
zVtGdh+^a&!Ld#b8I9&r~Z|fzgwV|l+dpWGNnZzFME+dAYC$i1HVYtJ}6)NlU(dBkM
zm@h~}(RsoQu~!1C)9$!Io;$@o`G*17>FDtE3Eth3OQyf@AP)OR(bZX&DPSxN?6dFV
zzKuaV{b?s~ts)OBI6uLqR|Vy4lX%mk10d;G7hZF-#49e+_;znOKHoK;tX$Q}^M2Kd
zf0}xTFgK&>fG>D4$@o!Fn5pM{P<72voY8h2H0V$0HkD>Hg#W_Wl_*R)Aqg4JGtsU6
zHt%J@5Ll1ihgpvWS%nZUT%aYv*5A^B<G2t{t?fm21)#f<1)O@5NP5a;nA?_RIbBD1
z;GAlX+vBeAEL!9EjWWHEpLq<-+mg^}nE?COQ-aM)vBivlFw$8g$=n*Sq_V@uVKgHY
zQo8H0bNLTkzS0(CJ&(ZP&Zj7Hr-(mvZYEnb^>L2QsabHOFP9fLD})A~I1g$&W?|&b
zGO)3fWjj9=;g=CXcETnJ_TcIYSQ0r27p-W-dyj;`g#QVLBFA|hM}43_ae$_UgaDH-
z2oLqz@p0cjf`^`izUFFd*P2EZwX|use*+&g6yO_ozb`RphHxWyh`4Zuq)9}R{6;}`
z`kv#s%J%`4SpA)>Ui29AHZCUWV1Um2Cg>qU=-haX%&aJcN#9$sWb8bD<fRv=<vGE(
z%uuq$&k6YZ-{VHJk1*TChnlfoU}|qeRwva_hc}xcjXO&T{FP*KS_Ig<yNmR%%<sWr
zt-Fv`mk%4YE3spOB-I~#4MNXeg3+Q6=*o4BF9{XV&b1@BJi3!SiuQs#Qt2S#n2$aE
zHQfAHhL65K0IADT>|F7?)a>~s{Bzm>^afXxst;+XXBAAm7Kk#X2`|W$xesuqS2y3K
zM2;=a%f(M3C3Mg?69x7Rg5UgRvWxu+Gr#xJhTkDHOKTF0nJ+{~!}a*~wjAr>-vXE`
z%4kO_vU}pTp|ip_WYRowXmS-So$3wcQ-kSH^#!mBGliH(*%1EnD~@cQ0{jz`Su=%q
z_#x384?1hpfe=a7Q}iUyRHlf?yg!OQJOB+wkm<QvKzUW+=*1Ic-#ffV2d{9n+VCGn
zu0MilQ=*|=eJKR#h%jfWCozr%EAV5C5%$RZCIz<dP(L*T8Yd6YjngFAHN|~6GqeI@
zpK8HcsSbGJ(g6wMs;E*IkN!(M(E3Udmj6`1;b>u2>|+E-TT8M<(=st6umo<IaM`s2
zd9a?*0!EPv%ubUCP%apy;j8js+VaU{Z`3FypAAUR2L-TaGN4OoJ^X8RhZvEU#34P4
zuV-})7P>2eU8Xp@E~1<~n|cXzV(;UeM@PZwNk2J%u88RTxP{@q-f-%Y45Kc3ogC#n
zEAz46c<N>br_JsMyFX#zv^xy;oe!n2cTXheyj2+f=sU1+Zo|4uT*vl`A~S2t|M<^e
zFn*|?*m0Wnh3<NoUn9Y4Uz!KcwB8w%FEJspT_q5tK98rF-idBUp5j`|gXosHk=Q=8
zhm4v`y3o@CX9szMPq;9<n49J8ojiCuKXgFymJV=TS<2HHRssI3CpchN0|oycgXUC9
z!zzw~xq=&iG_f0W_#Tk;xCu;W39%LN^32p8ODOaAgDHQ6A@~Wxd1)!8@b5jk{%H>?
zHc2rNzdzB=mtNR-^%ca(@nK~4FbVs7j9)FTfSSF7P@MM!R=r7qV<I_l=vOF?ZTLlu
z$A&QAS_*7$l4j$AW!UUq8Mf_WHZ%vgk@Hf2^~D@0F}u1CMb-4Mx$+GTT%3$6%`@>@
z&p6NRPCt0cw&U|1p&*78IOW(8^h$n3EDD#9LW|E(r*taYwbTL?Y8=q=>^q*+cmoPA
z>%vjX3)tmVz^grwi+ZlHc+37h%+C^MVq%x0ywD%CJ9Gg5b~?iBO-2y4SQlp={)>4%
z>bQPqJ2hF61?6EkvGZyc+_`>>@Gh&M@dG9H`C<ts$*!EX^}PX4K|lO$&&~dA1K9sD
z5$E@8LPNnAn(Q$I2CK!`MtQCiE;tOr5~mID+;bGlxyMUBF9R1FJ;49TLfm<KH+C8X
z;oCAfHqm`I@UGV5ET#LX<mN?l8&Y8F@&91ls{=E1gc(zAj*(r^4c}ew@J5bT!<X1y
zAoi#U6*Quey}XOBqW>1ZeKN!vqb^Wg;ROfF#zDaMj)C>#ApF4P*h_c(;#ChkBKFx=
zut)zCEUkKvsg>WrB()vts$P@L68B&*+yRmzui=dKLwGvYhxGY8hUVuZG;`ims=18o
z2^Q~$pzC#@xH*(u4)P*fnx!yhQ2}kf5{^dUui@MYQKn$-9=N;Eo7bLch>xervnMYW
z(#YOo6sY)wS2v2__f|<RkJQUEdLhAn{PGO`Mhmgwg_qEM^G=*~Vlx@zxq)+`EZXE1
z6UXIT-(ssZnr(VWS7ddQsvcR^DXt8y1KOZ@^L3cJ!-X&OA&_^#JDR-sAput6MR=k|
z72lYPvZo8bV8!_=I%BFY)y?k!{oS8PPgD{;5}ccp!b<@A8(O$DyN0%N-v0BB@2Fh<
zfZ7*|u^~PY=v0t{DSaQo|CJbfW<Z9iKe87JUc5mkn|@I36TsZJ!>Dy5hPvMO2OGk5
zSB@;AS(6;VOWFp5w0DuLok#Ha&)3)y^c8H5zr#g)igI{gjG)jh7KPW|hJoRJta~QS
zM4GrmV9F!9=fD8xfBEykZySt!jl}pvfwZ@H0xM>y$Xe;S(yEF<@LB#J-^x)RzMil{
z4K)Ufm!GGRs{?RQ{TZB<%z=Q+W;`Hd02^fO@zDK8*m3YW6)V!k>ydHz=|ct)Kgj9I
z%IUPtaXPjCu172`T|$wzF!1DNE8`!#sN1v|WG()H{|0};@QROkywVdjCFNO}!Yt^0
z)eZ};ZAJacwXiR0DvYil#H%|l!Sl>e*rq2-{GL4~HtY<JC-oT&cZxBJZlz@Kpa+Cp
zmB+NabkLb?jWJTa==bFoc#H0VBV_`>{<;7`Mm{9%-v$`|kIT(GiNM@#4Sb2=r=)gc
z5rw7^+-PS<JbSajwdO9)o;sPSaGeiz|J{ZZN^o{sJ$h#@!e9vt5;bxX3-(Je%T+V5
z=V~rqI=lg+u5;e(x$9_rUz{lp^#!G>bb2}NKK#k@fsRLkcqXBP+Jwq8Sr7;7a|7|k
zJPjNfjfKK`uFnwSLj1;4G2?s*h8;?y?kC;AMcS0FMe0!F-cy>LHJu?=4fth4H9CtH
zK(N6iCM-#s^@<Z^*KOyt0VPF7Og0|Rq#Bch<v+pHdNXz(n~x8dy~Y}TA<ryHl95w6
z3&rVGa9D@sN4MOiKc4WZxn2qcFWyJRR(?n4Y)$SAdK28mrZF32_t5^NcC;ufKpFLq
z=o#b&GfSJ{evmF`h^z(I)i1$SvKV@2rh;8_9MnFV4apr3K*y&5A1A-Sl4o<FaYGHU
z4I4&xtuoBJ+JgIPTA)ND1KhNqV1b7&vD~SJy1m1ow@#4VSMdgqU++LUja2Azy@MMs
zJpr|{b?{Wq2R9y3!zS(H`1Z&(5IAXv9qWzZ@LVk-D%^%AH>Keet6Y@Oh=)~$YG71j
z!^;+nL+089_Q2g!7-2MqJbMjRtWlj!&-;#dx@WP9jRT~>c`IB$e;MS;<=L;1S<r9$
znFv_;g4h*J+-l$tUX!F)8I_G-c=J0fkaLD1kYtN{l(93|hEC3S3<Guq?{_v6o!$Cq
z?~PQz*n;{!EycXi0ZjW^36(4BK)ElH&X#EawTF3NR91vthF17V{53k7t%uIHUA%tF
zM>KWS2hf|o7ma%&@%rI+_@^TPE7r*|5@|lTcHcxSD7g*#2~~LhRTQcuwLp^hK~&kw
zW%VqkqQyl`&ck0setT#_?a6-3eLN2Jf(oqEdp&6MWl(eG6}-4ck}(Gv)_=lI(&bnP
zt4ze1y$c4=L@j_6N=#rhw#l)px+cSHwG1qr(N265|De&fhhWNnq2|Azg5Did%KJAT
z+kNe@%XlKII`0OWZ@t0OJ<@;{(n8E5CyFy#oUp#FgqHgc<54v)eqHDtTswCJfA3G`
z!I%i6SR@O5j{T55)jwx2dn0Zr`N_?V!Fa7%gv}V0hv}^mkQJMSFnKQg+kYP&U;*l^
zGe9x(zcgrR3m$RMfstBC+#1~i$D`kqN6mk+cJD7#KA(jB*WKXx^w;>>$rG=pwh?uo
zH(1vhhMw}GjH^)?UR=5g*Y2$0yfm)k{OLP99T-LI58=+s-Ly0F6B?yH$H7{4_Vm=(
zV7-7(og$^!jj@8vQWY)(Z+-^MY$chc&;^|;17JII7(&dyg41F#kUF`HetSEW)P6sP
zyiGn}uOGzANSA_6cZxy-yRl!Z84MR+A^KZ(L)a7ncBywDrkIz|V>9%yBIzzf-6rVz
z?iW0n^pS3GItC82-(x{u2-&%5Dm!pXmUSrpfQ37y;goYfPy41GG4D7-(`*Hp8_y;%
z0&6Zq@#^m&rrpY4^>G)bzxYn{QVL+=KmyG;^a<rV8R!XHiO#+mXjyavc!QK`735;>
zm13-H7GTOBra|qq&p63joXS7*qVu;OhH@{iW91*H@6fdd3g6y^H8yTU`{gZENt9&0
z7Dj<*4L1XPUxWhwLhSmzI`AvD7&XUo(O+5x2A5r>_JJYPb8!a*{L;af#vHTk@CBT8
z2w~&!BsiNWz#J6V1jWyf;1#|J6d!8`u_@K0bg>W2nOKZ&(;@*YxO23DFk`gZ6$ZQ)
z5|x`0tb(p6V?U2$_i-Mkw&^Lz72g6U4k&Xu$qP6Xy9Y8pErRtEm1&pFMRJ|fN(Hvb
zuwDK?QC<5j1-W;iy5t(nYn#Ge-n|8teU8zPcV-ZDcmTX!`S7GATTm}skw|#hVDDHe
zMzqI35AOlpI_M3bh1wx_vM(`Pn+SXTZs4SnM__mF70*QX4aA8|U_u-#@Ok${=E=Ds
zXns_My<$7yi|Y`&y%hoRc^gSmqAVM?<{(5(mcoXNiHw-B0%NgvD@1&5gLm_HKy9-$
z^V!Oj*4&-|o!@h*^MX4$*=Ofqe3UhItW5>p15YrS<BCtW{l*xXhv4#I0A->^dDlFf
zuzDZIzPLUJOWz2v@I#ReQ_sSbM~eVreXuLBhqlQ`VpZV~sk?R)+l#%xs_QMjALCIy
zr6GtQ(Wqk1{SQ412knQ8P%C&lZ25t>>8l4B?9=1Uqr!~-oZD!*%oB^#cj3g|5p0+@
z7m^xBaFw(n%3b!so4*$k%TudCL820U_sqb3hbB?Mk|>n3RN^%I6Y%j<3EFvgz(4;=
z2r6BK6`twDF84a=xAdhRZLe{;?I=htiGYVDKX9a=7TXe)nbOL6oS(6h+Pk&D2gU%J
zRdry<{4*?<Sxp7v<d{p%<rqOz(JiBzI4;;tOI1?g+#5+OvS=YDcN^%w*3a<q^CUVX
zpu%&V`5z=lbF<NG8KzlRku^LJh1Ue%(;-vN>o0r?xy1p{%#xg{>fJ;qUm7fgf56~~
z2iJ`YLV>^jm^qQagt3EEXG#vIwM(*LJ=vrlr(l}ZLwx&Vn8Z6MFjF%IAw=UbOgC@9
z2~Llp-tayYb*}{5!pX=F>ByO*8-o3lo2Y<bIJFA#rAAxj*q>`mVYccRerp-Tq#tHb
zp85l?ewAekJ!F~gO*$A6UPM=YXv5p)6NvBS7E)gwioMamY14Q3PABH0^}e)hn{rw9
z0p~La9f=_p6|zi_NHY4x@<HW@IWcd$$g#HwuXE@QP`ewXzGN6)2WVk#MH{;R`~wRu
zXQIAV5txUU!duM>Qh#?CPb`?sUWj)GNSA?4O-~>g1=y8KBB?0XVN`t{3R1HMU}M%-
z(Cd*!o%aQ}FE|zd-I)QuDtytwt(>nLl|rWKWq{M7AjsFf0f%WIHQaa!B(y`Y(tbCt
z)AFD%pT?t;;|Mf3j8n7rHd44?8Qomj!Y^xjf+jyysJN{V8{_4JWtuH;VVNYm>s}st
z;frLJzcicsDVAiY+i=fxu5;G>n)nX3!OFV}d1=Knp=+Ksd08aSUQV*cQ0`o&Ztw%%
z=G=sy=U$i_r;3|>)5&s9cbKHIJ;$y0Cam~)mZ;n}p_^h6HZQyiQ)GnLc%5Xh%x^~<
zi#V`J83LPYhltFXRXG1%0BTw|<H&{)>bAxc8&*p(i>#kuU&mi!cazJ`bNaezl@P-_
z_7=sqh-2G>sT?O%g3SrdLa*}eP>?0Xd<@q{ufw-;!elv?)iD`%|LJj9xnTsXcIlzT
z!J`;|onve+<7OryS=Oy63e6b?H|x7#PR>len!rcM4;F_vTo)rOMUsh5>jkl<)nMQs
z2=a0JAn9NqTuzr}dFfeLH0)3Ry!=F^KEA@lO)_lCX^zE{|BUMI9RbCWSJ0ZM1zXcI
z@t*AobZ|LDYVM1(4e1~F@uS^j{b~`$GIT2H-<M~4f^^BI_)?7GI))v;{z64_JE~?U
zVi?ECv=`07l|Ku?GQ<b%!<S*oZ_Zb{!)by|>+pf!7s?Y)hcWG9bU3^;2mZETfrvP}
zHAEDp?zxk=q2Hj{E6u9j(BwLMs!%fNAdGe=!wrsW75JYd7W>J7*N1Ot+<y<oETmZB
z@@y#hy&8DBv+&e8j^!CW0!{V*!Isi=@HS0C+c{cL=*MLRk5-Vn%vTV3<{66hy#@2C
zAbvuZ5KDhf!g?toCgtM>6g!?vr=2{6;-^A!-@kV9>~jtje@!FyIrjWRzYdX=YL&Rr
z=MJi_Fe0VTTCkV74`=QbkUwWP@QgdfsMmrK2rL(68teU_aNAu7Yq*WEdXAuU=?&h|
zcLnLk6`1g*c18J8?tG#B62^WCFm?7O@T0*IgasBuNaYufW%&d&R5rm@9c_$R<ctN*
zRW!dO0&Yd$#f*Q;A$^Ak^JDNYINyAa%CQTe&MyzPMRN?KF+LnO;rKc59@jQHqjKsV
zd@_C#=O-+J-9Pt%VBlUz;1_Y)c@D0=D9QF6{eqE}_i?WI1++Nt3*nXp7<@7x_p{I7
z#Su=oJ){aoo6GQTpeq!q=7aBwpLl)ZX1LxSjyga25Ktn_j8zSx&ZWKJ;#LTxO_X6*
zwScl>3vpK*<y~|fBBsetakqUPEjMyAP+yUT7ZO>%RX`w}Z@3my)MsK^gEad>OOc7-
z*erVcJAmEQhNkbt*wXQ@prIL0?}&aR{%eDvjeC~On|+smq3$WD&FX})cy+8V8N#lX
zP|UNe2CKot<V*2iZpK-S31yszCcX|FVosxI6XN2TQBbFKm>-khNsV_2Fkkh5f)2;?
z>~|An7o>P$Ven_%`Xm6gY>T0~H4nY!OE4~bmqR0I;)xtBA$mrC$e!Lc@cL8-t}-lG
zVT*;}a8Q|n6La|cmlvR|RRH+%gjoKizmR`4jLWxj0%PH}>=dzhXbX)&yd%%H+<J+f
zpR3`*uUlC8pbe9@ih$T~8qOaOrNL8Qp+T?^>`~;?sIG7llja2*{!2o?QwDgn?-_b;
z6J*r-bkOv)8=S&<V75Y%ZTM4#t+S2skE1Z#+;|WQpK#BUn8S2r*9l(sU=~rV5d_aC
zida3y^*y3fu&cI#r?KKE<k{V#!j7YGuw)mQ+1<mJb~bQX+5uioKZ&jlFKE=VZoaXT
zGpB?92JOmZ+A34QdBc$q+$71aI+6^BRX)-sTKS-^R0jL*DYLd2|L|Zp$D=gTg_Apf
z(i<OS*;3y~h)YPs<|Q{lq_ht7mvB7Id0DX3C5p)0UXH8sG|9nXO9+}13LB3`BJWZ=
zY^ZTYOTR&`XLJpvzR9wBnLZ??VjJZjild)vDzH&76uh?8lNWAcI6mhmc`v&THVPmP
z_pQM-g12!smm?EU9wZH0>v1mm0J$wJbxfE?T4*RS5Ok;W56Li=wQexy)NfM%!339D
zE5o=*3ua%uf=`N9Lx|~o)?{-Kb^W&++a+QlTqPbmUv!hnZk#7#G!C;f8mW=YPYjxS
z1g)Z2=-{sfQT<xds>p{R5qoeUxzuQ37)~jbX6Fg|fz;%0kgso!5-&Z_bkPTL{Nffm
z<6jAA%uk>p!4kN4t13HuT@A>?NFEkX1sgR!q-wi?(t>$B-85Sol9q!1dfTCKPa`Pa
zkfvrblFX)MW6<<76+}zJAWZcHtT!p8+oevB!XG^_+JdzD^-AdbVTjmLLSizbAgU(>
zZVRq~S#@eKss1pUwFoe}ZRK#?REiOQ_X9<uJ#qGyQM9sk;i)_2LWJ*E{N>V2_+9fU
zo|0!ZKm6q@DXyYttZt!uImb}-d<qWqBpvelO10P-n7ru*z0@`v7eu9^P26FqY`aQ-
z&Y#Q{Oe#hH=uN1o?2QhNapdafG00lmitbbD`3ci{q2y^Hj^IC3=Q1+w(!>0xj>uQ|
zkPOmS#xdb;0_v`-<oG^iI9(*1-qiJiPv;(D>7f>MaZbX9z!rSHPna#1=%E!J6IuJ7
z_h@3TNwqS+bF7d*kU6IsR`#^;8jLgH)SjhydqE+_ZgvC_;K4^{eb^{=1C~}!Mzb03
zxa3thT#mYcLcI-Oe{DCswR*y9JE_bwk{*MXwOPFBzPll*vKnUcPV%Iej>B@#91I(K
zL7fWj^DcCB<OJOQiapDv813Fb9G}()4ik3Z>GW+7_B<C|asu#(jRG{kD}ij61Jo|b
z3<`pxacHAH%vZfdANK#FrS<+G1V&gOn1M<aGg0i*eW*Jr#kg>JD#xc>ry?;2KZ^=6
zYxZA;>nHw@&4<IVI`syI=xu?5gNoUe`Z}n{^@+4onqX(&EJ%m@WY)ct<lW;CID9t)
zLkcqZs?!9R(1*{c6vu5^8p&m#Yo0++To`mcDTj{kwYcTEAFe(x#5y}@k;QGIn6g%u
z+4Y^v?`OxMzR+C|%TdRSvaTGD0%c~o+#`^gJA=*NAYu^rUl}jzksRYNVGK21WPs->
zSyrM+j%6<f;IrLQY}UzPzN1_>oZ!#I0GZERA8ZlLo7jO%`to3RWePcvDa6c|J`WA|
zETDbbF}ThsGea*qo$%xxeCj(2$w?bwYsnBKJg6boRoh|rfoy<G3YJn2VPtm;w(r*i
zzw&%EE2X5=B_1Rj3^Dw|ILNK!SXGMi!7Qd0XA~vzQabCw?QR44u$1F?#4RIvJ|1Yb
z?j895aRY0e61Wo+j5At4VD^Djknx@eC|Chk(g*N_=`Fl^_8tGk(J&<Le2!n=1y0Vw
z%%;0#WQCRxdoN}Su8)*}$}MUbaP$()`EM2;OYZ~M?ru=8muHr}+61%r6r!c3DlT*j
z#I8^GKzm6j|KNwyuqZ(kd_Gv?oSPoR-|Z#sm&hZ^vaitP_e5UHl@=H)3Wjl)H2iJD
z<)gdJuqK>iCoburhTLo8w>KHATNv6!3&6N^2&}?8ae8SXan37+w0eD7>5>P_?{a<U
zkyZ>6dk8P0W!U;f9JBR62JTVP$8j!yUCRE(>WPB<u^pzkYRX)WBeRgMRS{(q9*Cjv
z;#?vOF|d-;1+U#rN8URRXfRF1-TcEihs&n>+DKsx=K<G@zaYO1enNtp7ZeNjV3)oy
z<NHsH)f(`|qZ>Tn-qQ<U!DVbhqMGT2szGchl4ISx2YK~A%B;8QTXbEM19AIusL0S}
zhzt9Q#~&f7sT5}1^J|EXeFeExkcsnFSK%zHL`XXz!)B}rq#|<%(f8F{GA7As*%KGS
z@@4z!38SAV^eF&WiCjmW#ZKI7^@D0#d?%{b>3BKq6Um4aW51phWQ_Lap}|8Vu;%jU
zgC7bpe@_zCdlHW+&u5{Krz>uFq=6T<x8US=VvLlpKSVFsjzaV6pxDO=jTO4^ZmJ@<
z=>?<a(IBX~a~YF9aJuh*<FFuWglJdHfR3zuy6l=LTPFCMT-}<*uQ*i9Wd+0GxP=6(
zr0@(Ys>Cr%O_I$>7iQO&FT$Yk2D+@P7JjAO0-g1eO!FZ_NKt!E)PJ?pRcoAa-&9Vc
zZJo^KjZ1TE@*><GycvzBg+lq>G`#)pH1K5w_*NbwU_Hj=xwhAV@#SuuC?mnv<hMb*
z<r~yHDZ^YCxCS*J4#B=If7&pkfjG!1!<pG!%Tb~Zd?lYjz1L)7D54EnEgsb7rU`fJ
ze@2Ic7MSo}l8xG;gKCc+z<dD(M!a+sAI|DTd+PzPs@#lw0%scd8D)cisUWi@bU(=T
zDL`=UA^xeKCwTcP_AnGC$>v7LvK8yO43nNboA5FezH;0lU6Fk7bkT!SQFoH3b`wl?
zr@(Q`U|!z(Y82Fa4yuwKM8z--oD~Xi;yOhP=;x|oa$;m|)ClA|HWG`B5x76Z<z%1)
z9qeV8AORsVZ+H;)KWHT)l|@wZzzAfR*5Tk*OWrE$7z+4_S9@C%Rkgmu&}b|Fw7V00
zY;=VVVKGM6W)~>T<XEUWeb{wRl6Czm$GYF^$HFbGIbkVM%mqOft8O`il<N%)i*Mx^
zBe_t-&4lhv0!(#M18!e4f`u#ZA^V_*2;3E9cK&+>R*nWRX`q<Ky@<uxGF-QF!5Ato
zc!S+H(xGE^C&cgJm~rij1_I-@)b`y|GCAE3vkxgUFZNAjya%l?h~o*YYnMc&j`wK(
zqnNlSHPH%_0Z3S)L4K9~qk57Z_*^g#%GVdbzw!sfw#$wBpWT8|YlLCa4;iNJb~;UW
z^dvf*A2jl51_bzXoQv6gv|7m@*U8H;QIjg*;zdWC+#-rEW^(*d*Mm@N<Aoc=9l=41
zl7<-xpsf;#i&;eVy<T82{DhzV_A;tJ{suyd0rcsT5)98{slDHAEVtQ-D<=-(CAD@g
ztN0ft<n`l{vjvd+!5ny}_3`q%ewu6|3(xmYhbB`&M(4d8Gu23ysDw%|<#RI8Wloua
z;E_J;(Da2|<xcYEMH<?yYQ&O;Mk+RZ5G$W@ERVAS?7purF@SV|j=?>QxnK)=yM}p<
z52b1H+<UOgn$t7Z-hz$u<iPK?Ec5EKB18o51JN0?nI6py?C5U+{Y^Kq*T&Z1&ofuF
zws{4Gray4I%{4S`DJR;I`w-Xrk`Yre=E5zGGvNOU4;HrLV6_xuS}O*+;Z>*+(Tlaq
zf5L}^iOhyc_p$u?Vwe>+Mon#B!`H!!II^V~de2Q~lDf+vv-&2r{PK-7KMjPj!CMFs
zH&ISRp6z1ZfvMI_6u&YIi?6lP^6=fba{hf#^4v?i?D8<~*k$71>x=u=zr(TiEUe*T
z*{6nbcy0H%*#>37^8H=3nJL1$nHhn$D#0m|iWqbM2UxZKhNY`S*`3=*A#1t><F~0A
zXXQ>{8}5C<_8ZetZB&w)D$M|^%c2nf^Ba+iZNMwdGEDwhAE@rR#PyMCNc_SaymsdX
zKYz+UTv}TN&K^pvgF^|-c<~X2<CAdy2_g1I%vF$i$KoB1o%90(u&SX5-ha%()^0mY
z;rdiR2P?ri=P)Gn#llE>Gu1hkf@fMSApgik5M{d|DpiU7vbqb`s>VQr%zYyMBNW~n
zdSKFxpCEFH0Bvx`y>p(U2**x26ROEe<D28&S_h1p#pS&#(x5O?mZ|qVgNJ34F?mKd
z*KvP?T4@s)+yC(lIz2fS_eUrS?18#Ian?Pvln!02Cfl9I4bBu+V)>&n3|`ibC*{N-
z&M+J+|8X6GR|?F9P7AI-X^f$^9i+lpf%63t5Q7BSU*C!$a<wSi@7sii{6XmY`2r_K
z7~zWd4F;ws6<GHj;;f-dHx_?Pqox_xFuykiLRxQuhFmZBPmy4!?YoR_`>LUR#SU0V
z5&56Fvw2MrE{y#PX5u60-x>pHGl#Q}j0|G;?g*G=VUE?>mw8Pwo3ZDoAZD6Q;uyTw
zVV#^VUBA5)LjPKG-1Q#)`l{Vvmg9@pIh}uN>n|K#*^Jtkdhm6z0+fA~<?FG-n71Si
zX6Y0Fe^Mtj8QAb;+TCc#)MR*S_6I%28+Zc0UO_lh4hoM0iNq-$wfeCgjL-ch?K=1P
zLSH*zAJ@r>zx4!LUjC21um+bdKgkpML&2f-4QjZbfcFx5^i<nrTri(UCIxf%Q4>O$
zkq1Voh%usnyvXs|21tq%WIC2@K#__juIrmlwaq^g<!?WDH*cx)A~crJ;(wy7mf2R4
z7%Rq@pIZy+>!jFQN8Xc!Bb=9B>`ZFHwb&K)vUJkl?|8hwo|wd!L(Pgl&hwX}Zu4as
zYn~YWbBf~eRUCio(=57e<rvnsa=PlMC=(eL374;4raSNUpjXZx*mg4<Efv!VZ@maR
zzK!d&+YA!<b`d7!<W&$dpUieF&V`Csa_sC1il&)oKw=%27f+Cd!L&wlzo3Tr-g$uK
ziAf;vvRVJjG*!5)n9g%bh(tHm2@TVpK?b)+q&RUNZuJvldmcW;&W=cuJtTrw(QBZ*
zy9sJs1t8;t7&EC$h<eG0F@5unl4H}4(&g{OVK})5<+gJ7Y_u%1;S-m+D)GiMSHA&_
zyOOy6`?yP<>+j88$aPiv=zm2*jCMT5U-6I0c8$#(YbF4nOcbLNimPY|uL(4>s$l2#
zcYLYn0PGS^#~^tRoRTpH)qj7&1+F_$Kkye<A2<O!=Nr&Y-*<3!Z$7+{`v=X16LRd1
z?x4%$?Wlr6259W8fC(D6z*bBMs~T6}-fRixw3RomN-rV;$^@nhhd|QObX@hMnV#u8
z$WOQ3NOOn)Ti@mZ`Lr6pTMKi%NHcJJEyUPeE#ilHU&NhrmZEPHLyFR$;^KI2UyPjz
zD1ZnP)ZYd1E5z`$nG2*(2qmg}eV|6yo%0{$n92cpUj8#av^so*GW}s{{HPhK+b5#t
zhd{8_ctsD+`G^U-{!*{UPGC4ykf~6Ajc*3ru>IvTkc{2S^^x6)TW>2KvC8CE?h3?I
z3sIPG<NyQ?yd^<C-_V3<fNdIsFe8>@d?yaV`@|6({ooC=Pm0rt3WP2b^26DpTn6*X
zOOh&SNTrUL@yaVWZ*Ki$=KL~mRM=66TXZAPKh_0R?TT@_`$}B(+83M4yZF~jSHjrJ
zmoWcW85SRI!Zh%~4CzKJP^{+3<<{fB_ilWFl|hs{#N)z`9;hwn2JzoD@p6nX`{ToR
zP_S5sx8<|Q%0(k!YY+=H(<<?)tP<mwoK0nvIbYE7G0gU5;N5TrBv_us7Qt`8+nx!{
zgPcY?>oUHm_ytmTTG8;12&>QM!|bM0U~}po@n75tr&fID?a7g2!oI!2x4*{lZlWYw
zG8})gvKuGMd4YQD7I<8{0Nc1dIqqZOu>0~&ND7l6PN%kmOERa`Tz!V(Z%43My`9=j
zeM=*(u3>f0C(=wfuG38^#&_m>YUg|z-~G8uC#!SUd3+3`cF%(#Lv?&`<^k4@-GI@}
ztq|qtLRQc5Lf2JU;A}38Zao{YTkil>YWqxc7bq}BOQkSgUx3ZdQA3Vk3ZHhlLYF`d
z&MWBPo)H&tx#41d(7<o}8g?CQH&3TZiqAk~?IIX`$)`eO2Tg5S0^7sx<6F&1SUT$l
zsP{{<rc2dOGPHqwI(`d_^`pSZHv-&`R#CnBBN!*@0Zr4a$>!2}jQE@ejfRp;igOLj
zJyePA?KO~WAB;+kKXG~DGZ0($7L_u^829t?OhIe`=DvCbDf61BUGQX%gOEvd2aUmY
z&wCt`mS$`{OQ9hk7T-Me1VyJIUgsY@$ZcPP`b~$)H_IY&F+-HO6TXHtU#fs-no9ii
zrF#i)zdkNnTTg>Oe8CQm9h7wM0VW&|g<=bF=;U(@o3m1^$X-iYqbI}uozc$iINApC
zj^Y?n@rGk2eumhaPx;LcOi|bQIp|8Xz*YSl)W%Sny*>3b`2S9%i~g%a!!trKcW(^O
zd+AwTO^zZHGA|gavvNQxGtZ!7YdiIM#qra>)uSOJ#Y{YVmze(+XIuLcuuJJS^xqJN
z<E{P>@I#)hd*YNGbTt)Lae7(m8ac8mOr8xB;%0)PBar#)7(_N@fXlK}$XCBfGsN>@
zXLAwP&rpQGp=P{WQVwUP+`;eHxc(>aIl39o<nkoeXs#E9m&aFO_8o)|+gAg>aW{Q9
z+KJ&xF<7Y;54<N{_$0Ii+s;Q}X~G)Zs^EcA7ee9WA31(QxFqA3B*uER2Vjb|5jY*n
zgqn?~P(<N4Xf24w3wr4|{`VpD-jJi~RBF*g{u#eFSAa1N5e4%lom5t!hOXV7%JDJc
zpl3y*ftzd?Hm!Zn3zJ*`?h0#p$91pJ^e|ag`>q6iJIw?)S#keh7bx!FZ-rQ4irRPA
z<N0WB+}d^zmqqy+Ofv95kBwKbG1ZxD>p2Ib%Y3kWI2^nmECjudW(YC1APO=)aA~n9
z8=^E5#*GD8DK#5#PrQU^JB{l;wt#=&FlemUhJK$)Fze9;ke7Ib?>drDZqq0_1#|DC
zU!!x9qMnd5Vp+&qyF-?>3_Ce+A?~ZopaF|GHvU9Q8n2K*@=iCxg_=#kGwg)5MP_I+
zGZ98BMe*RQ0=lopj)ac&!{E~tdPYGLztCh9k{QL}unkZq<AzadqOs`NZeGRonFbHG
z&qp1@kI-@PA(T0G5t|hOxM`ycE@<L*?}hh~sJ9}l(V=K6)ccPZ+bV;Rd_NrjUXrtw
zH$>;~AA?D(0k*%nLaK@<va8m-Mj4|ythD|*yfxo|HBX!jS&JlCv!xu<q>jPaU%%n=
zS-)}r&j)a@yqc&C$P!uGQ_!qvhk1{8kmW8ijG@LAoY!y+rf|Mv>{t#J+VBOwmTpF$
zz_lozAWgQ8oyF@7X(%w1iC4`OaYR*(&B$Wdc-{4EZsIPO@9_xb?(L-OCv9c@pFP5|
zV++{uJ}3O~CmW9TJOHik+c<>JG3_4L3)K%mi|HG|m)lLXRQv(F5ITY?*G6IAYja%4
z+l?tx0J60=K<cm-w-0Fngc+@34b|3yu|OPN$bJWkYrcVfWEg!J%khx>#(8$~0$kQN
z0ls=n0R1e2=PYGFqHiMH<=&gqtTbSXcRr2=WkasLAT!_Cos5Yz!I-E8{k_K*2B%E~
zrMdIiu<!-!x?ye?(3fyBTW1-Ts?DOC<t3Qo0V_DKSA?mJYy^e76PV}Y_aXmWD+*an
zWN%H7VNVPHLxp}pX6=V1P?y88*(LMIzt{J3W+TUcHIidZhu(8q{VGo1y^9~3%5Z_r
zeU#dt08KfHtmAMTR$t-a(*A`^!t$q(+{rLOr8c0QuL$d1{z2?yA4;W#nMT7&Omfa*
zDCzToHO-=IuthHnY;eVQYkOfKmjM~7mm{&FN|0cy$Og%XFf|shsn>-%FgkvVIxWe<
zUvAgnj4{wVa*6mS^)3k6F30h8cSv)O1nN#X07D*Ua6|fTnv_0<_ndk$z2_$1cful`
zWmXI<m*?h!^Iu>$w~s_OSqV<oNU{N(u9|!HApUz40U_U#^?MHcV|<7O79RBB&P%sJ
zaB>UuCaE&DFFz8WBSFx?W!l}}nuGH!JqYTP!b48NY+S@!dVRcyN-lhZ4eWYqrS^;8
zGN8g9_|?gqvOSo5jp6RG7lX7lT7$&c%R%e-ZOm=CMpdUrfRF1Qw2+k~t=#iSXyiXm
z8`wdf<OT9pyWM28FXRGAK8M-vRbYH<HXAv|8t%Mu#5P-5zIOFAc4SbOQTwq4LOG3j
zl|&VO-Fu!qeyPbu+(`ka+y0=Luf~*$3o_rLo?>vECT5B!LF}DGa8^-cR%@QbevwC{
z*ZMChoubQ_T)hpWhxRhVH`KB3+iMaxX#*(FThA`h59g1b`HAH&IbdbDj_Hl~PEMM-
zq3*PJTstivH0*6~Zl)m1OKO2Lhxrh#=tY+|x)bBW*U4{nDJIY?9I{lG!adtqgAQ9&
zQ0_9pZ1t7Q`i^x>R(C$<>4rhaQg;j%n#YzbkYfwYB^cMIq8Q|H7FQ+gXU>lcVBtC?
zruX9y_~N?}mA2YJm~j?3m~t7@r7@Vb<SwbXz6=V&OEBTqFPv2*%J8mkL)}NpY|_@v
z@Rswm<@c(=tm)UG?YSv)ytN#<UIFt~>?ReHmSbctN1^{cX(mZF0*1HThY`Jn%);-4
z%=3GOd!zqgMuj7Pr(OV>bzFmu4x`w~X@wzD;~?s0gX*4&?Ac$h;o`_2`b#pB*3O|Y
z(Kj8cYJJf)_BHT5wlN1(ufg3iUFM&-4kH?y0@_Pi9Ee-VyjNStOgLT%wL<ZH?`7ib
zO2zv$QjqfvV=BO8(gL<_dosz@{Ez=EV0})=_Ios0!U>vA3b0Q-bBT6NA%1%sNgCJB
zr9TdRA=^$hfpeNNe$)!bkO$TDW}-N=g3I-66pV#00qJzQB&XqMsWPtw*YH(c%21|s
z7B#*p1O?xO$ymn-)oPeaedjaaoi-2l`2^zKuhlR-^$|56IE#fpcHzFG%MAi#>e1Iz
zgl*b%8;V-GKy0-VJJR0_$DO!cs;+NH#Og6{TKf>Qcby_(hn1Q6HbL}gfdm@qDzo;%
zW;l%ukd4F9{PS}T;LO%$E+<h8X}=$HT<JI%l8{724UQEaYCtM-O~Cl^8#I_vi#xeJ
z41Ws$qR3o|*WDJ7wxBDdwOxqU-xvYGf6AaTWCFtz)nWpg??AWMO)k$|fc&&gSU0o}
z<J@%Ex{+v*J$@BC7iPeUvq#}kQ#{JLYeLoMYj`w-W8%M_0~SYYKv|0Gh=hFO=5h=0
z?)r((5@)h278|i^d_qXW(}|Gl@C0_<;}{O+K5&@#1oBj8z|8ld1|d#{Y>4$bHinzI
zvo1)Yl>TGt-A1s=eKzaX5Ci?bWw>KV3}$l~Jh6k7u+_Z*B3{4XI+zH~A2ZN<U_I;m
zJ{L6`*Rjfp{<vP@0iyRTwkPTp$Fp4u|NZ_=T_tWne47GWl2A;_kBy>R$y~N>@gKg?
zpJUXyO^`MC*a{)Ly->S96|~RIMl0idc=_)(PT{}B(|%kh;CUAI#57=OQ5VV3kz(_v
zMxw&bf86tWCfgY}hL?^v<d`LtVaPxQr)yP1?S%>KKEFKT#ht^HL=NFH^KUS2eVSzZ
zNU$-7Cb9vql-S=1eYo(30q_RwU?7g$#iK0FIvq)YNQvK2!(R*sdt!0o<%w*9@CX>s
zx=%06E1}NzUg*5KhJ;5E_WGk6@USWa<EtuQ+s2h_wrd`S<;@_u1q!e#)gHPuWT1`9
z6CJDzMZsO0sAgV2DfemyuQh$h|7M5EGYU{;P6&3~NkheDKk3Pb92a|=H7z{I?dd#G
zLDvVXumfotls~<Snu}KAxpRN;r;!yDUiU}Ec?mQyH5_e(=VSJ*0V<ZS$24&MvY3Mh
zx!_sBKb5MEerr6zO>i8l-}iv2l^-rMyNbD+?V#^|7!cPn^y{_3@-d?vmEl;(`E;3h
z3O+=et4egkrD%SPU_3a=4-@@eCN%AzD5JmqEM7|p1GC0pJUf6quRs~*_yiGVx0@EH
z=j$<sMF!mNUK75ERuUyU8bLi?7Gk%1fxGWG?Nyk-G+A1~$`d{CV2c8b3MAk}BThF>
zZbRG3&-iTaKQLP$$q0Qm2CvaQ%(tx7kmK)$>d8XPs3XVIUF=WKa{L!>+vzl_Ll(^o
z_Cvl>7*v@bM_<G3m|QW6Ax&9m^-_s8=q2!$N5nz5?JHQC{0ADEUXy9!TyN#k7Oa+f
zjsr*KVE5N982U5>meZCo^<p!bi-ngV-(e%S(@20xoaYU;^?hjS1|(t2RZ!~C0=?go
zjL3%9*b*5@>~_mC5|>|~Zr}sdH!K7jn?UGc+94atXrf#)^o0Dy6!&Dj_4*NM*jS5t
zt1J0qZyeFTP>LaB6OQXHf^F%s#I!(^=^lyXZ!8P}E2zZhQXjdU%Yh&xlL<m5!B`{w
z9cl+wf$qCUC_d#j7&&n}8Eyob1#K4CEVV^J*fWUxv{}$I=$4%jxBA<O`-!jg|K9#>
zeeBTLAUkm_J1L_%g8#o>+PzG7>EdNe43_Kv-%GnUOZ|WE?EXK$%<b&{pY`JZ^U-`w
z3WYcLg4xn&%$W2GBPUH^L(XL5s-|4fDt<&1W#2*24CDWEZEpl^7QFF)UDb0}f_x<p
woIK)m$mWpKp`icgYHRrl1aaRM|KD5AxxYB;EB60heD3>pGr6zNowe5aU#sc~K>z>%

literal 0
HcmV?d00001

diff --git a/python/fedml/workflow/driver_example/customized_job_example/deploy_llm_job.yaml b/python/fedml/workflow/driver_example/customized_job_example/deploy_llm_job.yaml
new file mode 100755
index 0000000000..8ac9300165
--- /dev/null
+++ b/python/fedml/workflow/driver_example/customized_job_example/deploy_llm_job.yaml
@@ -0,0 +1,29 @@
+# Local directory where your source code resides.
+# It should be the relative path to this job yaml file or the absolute path.
+# If your job doesn't contain any source code, it can be empty.
+workspace: deploy_llm_job
+
+# Running entry commands which will be executed as the job entry point.
+# Support multiple lines, which can not be empty.
+job: |
+    echo "current job id: $FEDML_CURRENT_RUN_ID"
+    echo "current edge id: $FEDML_CURRENT_EDGE_ID"
+    echo "Hello, Here is the FedML Nexus AI platform."
+    echo "Current directory is as follows."
+    pwd
+    sleep 3
+
+job_type: deploy              # options: train, deploy, federate
+
+# Bootstrap shell commands which will be executed before running entry commands.
+# Support multiple lines, which can be empty.
+bootstrap: |
+  pip install -r requirements.txt
+  echo "Bootstrap finished."
+
+computing:
+  #resource_type: RTX-3090    # e.g., A100-80G, please check the resource type list by "fedml show-resource-type" or visiting URL: https://open.fedml.ai/accelerator_resource_type
+  resource_type: A100-80GB-SXM
+  minimum_num_gpus: 1             # minimum # of GPUs to provision
+  maximum_cost_per_hour: $10    # max cost per hour of all machines for your job
+  # device_type: GPU # GPU or CPU
diff --git a/python/fedml/workflow/driver_example/customized_job_example/deploy_llm_job/.gitignore b/python/fedml/workflow/driver_example/customized_job_example/deploy_llm_job/.gitignore
new file mode 100644
index 0000000000..0d20b6487c
--- /dev/null
+++ b/python/fedml/workflow/driver_example/customized_job_example/deploy_llm_job/.gitignore
@@ -0,0 +1 @@
+*.pyc
diff --git a/python/fedml/workflow/driver_example/customized_job_example/deploy_image_job/__init__.py b/python/fedml/workflow/driver_example/customized_job_example/deploy_llm_job/__init__.py
similarity index 100%
rename from python/fedml/workflow/driver_example/customized_job_example/deploy_image_job/__init__.py
rename to python/fedml/workflow/driver_example/customized_job_example/deploy_llm_job/__init__.py
diff --git a/python/fedml/workflow/driver_example/customized_job_example/deploy_image_job/app/__init__.py b/python/fedml/workflow/driver_example/customized_job_example/deploy_llm_job/app/__init__.py
similarity index 100%
rename from python/fedml/workflow/driver_example/customized_job_example/deploy_image_job/app/__init__.py
rename to python/fedml/workflow/driver_example/customized_job_example/deploy_llm_job/app/__init__.py
diff --git a/python/fedml/workflow/driver_example/customized_job_example/deploy_image_job/app/pipe/__init__.py b/python/fedml/workflow/driver_example/customized_job_example/deploy_llm_job/app/pipe/__init__.py
similarity index 100%
rename from python/fedml/workflow/driver_example/customized_job_example/deploy_image_job/app/pipe/__init__.py
rename to python/fedml/workflow/driver_example/customized_job_example/deploy_llm_job/app/pipe/__init__.py
diff --git a/python/fedml/workflow/driver_example/customized_job_example/deploy_image_job/app/pipe/constants.py b/python/fedml/workflow/driver_example/customized_job_example/deploy_llm_job/app/pipe/constants.py
similarity index 100%
rename from python/fedml/workflow/driver_example/customized_job_example/deploy_image_job/app/pipe/constants.py
rename to python/fedml/workflow/driver_example/customized_job_example/deploy_llm_job/app/pipe/constants.py
diff --git a/python/fedml/workflow/driver_example/customized_job_example/deploy_image_job/app/pipe/instruct_pipeline.py b/python/fedml/workflow/driver_example/customized_job_example/deploy_llm_job/app/pipe/instruct_pipeline.py
similarity index 100%
rename from python/fedml/workflow/driver_example/customized_job_example/deploy_image_job/app/pipe/instruct_pipeline.py
rename to python/fedml/workflow/driver_example/customized_job_example/deploy_llm_job/app/pipe/instruct_pipeline.py
diff --git a/python/fedml/workflow/driver_example/customized_job_example/deploy_image_job/config/__init__.py b/python/fedml/workflow/driver_example/customized_job_example/deploy_llm_job/config/__init__.py
similarity index 100%
rename from python/fedml/workflow/driver_example/customized_job_example/deploy_image_job/config/__init__.py
rename to python/fedml/workflow/driver_example/customized_job_example/deploy_llm_job/config/__init__.py
diff --git a/python/fedml/workflow/driver_example/customized_job_example/deploy_llm_job/fedml_model_config.yaml b/python/fedml/workflow/driver_example/customized_job_example/deploy_llm_job/fedml_model_config.yaml
new file mode 100644
index 0000000000..bff517ef6d
--- /dev/null
+++ b/python/fedml/workflow/driver_example/customized_job_example/deploy_llm_job/fedml_model_config.yaml
@@ -0,0 +1,12 @@
+workspace: "."
+entry_point: "main_entry.py"
+
+auto_detect_public_ip: true
+server_external_port: 20203
+server_internal_port: 2203
+
+bootstrap: |
+  echo "Bootstrap start..."
+  pip install -U fedml
+  sh ./config/bootstrap.sh
+  echo "Bootstrap finished"
diff --git a/python/fedml/workflow/driver_example/customized_job_example/deploy_image_job/main_entry.py b/python/fedml/workflow/driver_example/customized_job_example/deploy_llm_job/main_entry.py
similarity index 100%
rename from python/fedml/workflow/driver_example/customized_job_example/deploy_image_job/main_entry.py
rename to python/fedml/workflow/driver_example/customized_job_example/deploy_llm_job/main_entry.py
diff --git a/python/setup.py b/python/setup.py
index a9d61b352a..c531f722e2 100644
--- a/python/setup.py
+++ b/python/setup.py
@@ -41,7 +41,7 @@ def finalize_options(self):
     "wandb==0.13.2",
     "httpx",
     "attrs",
-    "fastapi>=0.92.0",
+    "fastapi",
     "uvicorn",
     "geventhttpclient>=1.4.4,<=2.0.9",
     "aiohttp>=3.8.1",
@@ -62,7 +62,7 @@ def finalize_options(self):
     "py-machineid",
     "cachetools",
     "toposort",
-    "pydantic>=2.0",
+    "pydantic",
     "pydantic-settings",
 ]
 
@@ -116,7 +116,7 @@ def finalize_options(self):
 
 setup(
     name="fedml",
-    version="0.8.27.dev2",
+    version="0.8.29.dev4",
     author="FedML Team",
     author_email="ch@fedml.ai",
     description="A research and production integrated edge-cloud library for "

From 4ea1c7cc76ab0c1ff55ebf38a5d9b7130c9b09eb Mon Sep 17 00:00:00 2001
From: Alex <alex.gpt.llm@gmail.com>
Date: Thu, 28 Mar 2024 17:27:14 +0800
Subject: [PATCH 002/282] [CoreEngine] download packages without the ssl
 certification.

---
 .../scheduler/scheduler_core/scheduler_base_job_runner.py    | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

diff --git a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py
index e2e090596d..46f1e7ff8f 100755
--- a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py
+++ b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py
@@ -20,6 +20,7 @@
 from ..scheduler_core.message_center import FedMLMessageCenter
 from ..scheduler_core.status_center import FedMLStatusCenter
 from abc import ABC, abstractmethod
+import ssl
 
 
 class RunnerError(Exception):
@@ -160,8 +161,8 @@ def retrieve_and_unzip_package(self, package_name, package_url):
         local_package_file = os.path.join(local_package_path, f"fedml_run_{self.run_id}_{filename_without_extension}")
         if os.path.exists(local_package_file):
             os.remove(local_package_file)
-        package_url_without_query_path = urljoin(package_url, urlparse(package_url).path)
-        urllib.request.urlretrieve(package_url_without_query_path, local_package_file,
+        ssl._create_default_https_context = ssl._create_unverified_context
+        urllib.request.urlretrieve(package_url, local_package_file,
                                    reporthook=self.package_download_progress)
         unzip_package_path = os.path.join(self.agent_package_unzip_dir,
                                           f"unzip_fedml_run_{self.run_id}_{filename_without_extension}")

From 46cbab24068b348c148a7c7e39c7f1c458240b04 Mon Sep 17 00:00:00 2001
From: Alex <alex.gpt.llm@gmail.com>
Date: Sat, 6 Apr 2024 21:00:38 +0800
Subject: [PATCH 003/282] [CoreEngine] sync the deployment and launch modules.

---
 .../master/base_master_protocol_manager.py    |  39 +--
 .../model_scheduler/job_runner_msg_sender.py  |  87 +----
 .../model_scheduler/master_job_runner.py      | 299 +++++++++++++-----
 .../master_job_runner_manager.py              |   8 +-
 .../master_protocol_manager.py                | 146 +++------
 .../model_scheduler/worker_job_runner.py      | 287 +++++++++--------
 .../slave/base_slave_protocol_manager.py      |  13 +-
 .../scheduler/slave/slave_protocol_manager.py |  51 +++
 8 files changed, 520 insertions(+), 410 deletions(-)

diff --git a/python/fedml/computing/scheduler/master/base_master_protocol_manager.py b/python/fedml/computing/scheduler/master/base_master_protocol_manager.py
index bf720515d9..25cab5a17c 100755
--- a/python/fedml/computing/scheduler/master/base_master_protocol_manager.py
+++ b/python/fedml/computing/scheduler/master/base_master_protocol_manager.py
@@ -76,8 +76,8 @@ def generate_topics(self):
         # The topic for requesting device info from the client.
         self.topic_response_device_info = "client/server/response_device_info/" + str(self.edge_id)
 
-        # The topic for requesting device info from MLOps.
-        self.topic_request_device_info_from_mlops = f"mlops/master_agent/request_device_info/{self.edge_id}"
+        # The topic for requesting device info from mlops.
+        self.topic_request_device_info_from_mlops = f"deploy/mlops/master_agent/request_device_info/{self.edge_id}"
 
         # The topic for getting job status from the status center.
         self.topic_requesst_job_status = f"anywhere/master_agent/request_job_status/{self.edge_id}"
@@ -115,6 +115,7 @@ def add_protocol_handler(self):
         self.add_message_listener(self.topic_ota_msg, FedMLBaseMasterProtocolManager.callback_server_ota_msg)
         self.add_message_listener(self.topic_report_status, self.callback_report_current_status)
         self.add_message_listener(self.topic_response_device_info, self.callback_response_device_info)
+        self.add_message_listener(self.topic_response_device_info, self.callback_response_device_info)
         self.add_message_listener(self.topic_request_device_info_from_mlops,
                                   self.callback_request_device_info_from_mlops)
         self.add_message_listener(self.topic_requesst_job_status, self.callback_request_job_status)
@@ -436,38 +437,10 @@ def response_device_status_in_job(self, topic, payload):
             self.mlops_metrics.report_json_message(response_topic, json.dumps(response_payload))
 
     def response_device_info_to_mlops(self, topic, payload):
-        response_topic = f"master_agent/mlops/response_device_info"
-        payload_json = json.loads(payload)
-        need_gpu_info = payload_json.get("need_gpu_info", False)
+        response_topic = f"deploy/master_agent/mlops/response_device_info"
         if self.mlops_metrics is not None:
-            if not need_gpu_info:
-                response_payload = {
-                    "run_id": self.run_id,
-                    "master_agent_device_id": self.edge_id,
-                    "fedml_version": fedml.__version__
-                }
-            else:
-                total_mem, free_mem, total_disk_size, free_disk_size, cup_utilization, cpu_cores, \
-                    gpu_cores_total, gpu_cores_available, sent_bytes, recv_bytes, gpu_available_ids = \
-                    sys_utils.get_sys_realtime_stats()
-                gpu_available_ids = JobRunnerUtils.get_instance().get_available_gpu_id_list(self.edge_id)
-                gpu_available_ids = JobRunnerUtils.trim_unavailable_gpu_ids(gpu_available_ids)
-                gpu_cores_available = len(gpu_available_ids)
-                response_payload = {
-                    "run_id": self.run_id,
-                    "master_agent_device_id": self.edge_id,
-                    "memoryTotal": round(total_mem * MLOpsUtils.BYTES_TO_GB, 2),
-                    "memoryAvailable": round(free_mem * MLOpsUtils.BYTES_TO_GB, 2),
-                    "diskSpaceTotal": round(total_disk_size * MLOpsUtils.BYTES_TO_GB, 2),
-                    "diskSpaceAvailable": round(free_disk_size * MLOpsUtils.BYTES_TO_GB, 2),
-                    "cpuUtilization": round(cup_utilization, 2),
-                    "cpuCores": cpu_cores,
-                    "gpuCoresTotal": gpu_cores_total,
-                    "gpuCoresAvailable": gpu_cores_available,
-                    "networkTraffic": sent_bytes + recv_bytes,
-                    "timestamp": int(MLOpsUtils.get_ntp_time()),
-                    "fedml_version": fedml.__version__
-                }
+            response_payload = {"run_id": self.run_id, "master_agent_device_id": self.edge_id,
+                                "fedml_version": fedml.__version__}
             self.mlops_metrics.report_json_message(response_topic, json.dumps(response_payload))
 
     def init_job_task(self, request_json):
diff --git a/python/fedml/computing/scheduler/model_scheduler/job_runner_msg_sender.py b/python/fedml/computing/scheduler/model_scheduler/job_runner_msg_sender.py
index 3fe45401ac..acce17d20b 100755
--- a/python/fedml/computing/scheduler/model_scheduler/job_runner_msg_sender.py
+++ b/python/fedml/computing/scheduler/model_scheduler/job_runner_msg_sender.py
@@ -18,7 +18,7 @@ def __init__(self):
         self.request_json = None
         self.edge_id = None
 
-    def send_deployment_results_with_payload(self, end_point_id, end_point_name, payload):
+    def send_deployment_results_with_payload(self, end_point_id, end_point_name, payload, replica_id_list=None):
         self.send_deployment_results(end_point_id, end_point_name,
                                      payload["model_name"], payload["model_url"],
                                      payload["model_version"], payload["port"],
@@ -26,12 +26,13 @@ def send_deployment_results_with_payload(self, end_point_id, end_point_name, pay
                                      payload["model_metadata"],
                                      payload["model_config"],
                                      payload["input_json"],
-                                     payload["output_json"])
+                                     payload["output_json"],
+                                     replica_id_list=replica_id_list)
 
     def send_deployment_results(self, end_point_id, end_point_name,
                                 model_name, model_inference_url,
                                 model_version, inference_port, inference_engine,
-                                model_metadata, model_config, input_json, output_json):
+                                model_metadata, model_config, input_json, output_json, replica_id_list=None):
         deployment_results_topic_prefix = "model_ops/model_device/return_deployment_result"
         deployment_results_topic = "{}/{}".format(deployment_results_topic_prefix, end_point_id)
         deployment_results_payload = {"end_point_id": end_point_id, "end_point_name": end_point_name,
@@ -42,7 +43,8 @@ def send_deployment_results(self, end_point_id, end_point_name,
                                       "model_config": model_config,
                                       "input_json": input_json,
                                       "output_json": output_json,
-                                      "timestamp": int(format(time.time_ns() / 1000.0, '.0f'))}
+                                      "timestamp": int(format(time.time_ns() / 1000.0, '.0f')),
+                                      "replica_ids": replica_id_list}
         logging.info(f"[Master] deployment_results_payload is sent to mlops: {deployment_results_payload}")
 
         self.message_center.send_message_json(deployment_results_topic, json.dumps(deployment_results_payload))
@@ -104,85 +106,16 @@ def send_deployment_start_request_to_edges(self):
                 continue
             should_added_devices.append(edge_id)
             # send start deployment request to each device
-            self.send_deployment_start_request_to_edge(edge_id)
+            self.send_deployment_start_request_to_edge(edge_id, self.request_json)
         return should_added_devices
 
-    def send_deployment_start_request_to_edge(self, edge_id):
+    def send_deployment_start_request_to_edge(self, edge_id, request_json):
         topic_start_deployment = "model_ops/model_device/start_deployment/{}".format(str(edge_id))
         logging.info("start_deployment: send topic " + topic_start_deployment + " to client...")
-        self.message_center.send_message_json(topic_start_deployment, json.dumps(self.request_json))
+        self.message_center.send_message_json(topic_start_deployment, json.dumps(request_json))
 
     def send_deployment_delete_request_to_edges(self, payload, model_msg_object):
-        if model_msg_object is None:    # Called after the diff operation
-            if "diff_devices" not in self.request_json or self.request_json["diff_devices"] is None:
-                return
-            else:
-                edge_id_list_to_delete = []
-                for device_id in self.request_json["diff_devices"]:
-                    if self.request_json["diff_devices"][device_id] == ServerConstants.DEVICE_DIFF_DELETE_OPERATION:
-                        edge_id_list_to_delete.append(device_id)
-                if len(edge_id_list_to_delete) == 0:
-                    return
-
-                try:
-                    FedMLModelCache.get_instance().set_redis_params(self.redis_addr, self.redis_port,
-                                                                    self.redis_password)
-
-                    # 1. Get & Delete the endpoint device info in Redis / SQLite
-                    device_objs = FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \
-                        get_end_point_device_info(self.request_json["run_id"])
-
-                    if device_objs is None:
-                        raise Exception("The device list in local redis is None")
-                    else:
-                        total_device_objs_list = json.loads(device_objs)
-                        for device_obj in total_device_objs_list:
-                            if device_obj["id"] in edge_id_list_to_delete:
-                                total_device_objs_list.remove(device_obj)
-
-                    FedMLModelCache.get_instance(self.redis_addr, self.redis_port).set_end_point_device_info(
-                        self.request_json["end_point_id"], self.request_json["end_point_name"],
-                        json.dumps(total_device_objs_list))
-
-                    # 2 Delete the result in deployment result list in Redis / SQLite
-                    device_result_list = FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \
-                        get_deployment_result_list(self.request_json["end_point_id"],
-                                                   self.request_json["end_point_name"],
-                                                   self.request_json["model_config"]["model_name"])
-                    delete_device_result_list = []
-                    for device_result in device_result_list:
-                        device_result_dict = json.loads(device_result)
-                        if int(device_result_dict["cache_device_id"]) in edge_id_list_to_delete:
-                            delete_device_result_list.append(device_result)
-
-                    for delete_item in delete_device_result_list:
-                        FedMLModelCache.get_instance(self.redis_addr, self.redis_port).delete_deployment_result(
-                            delete_item, self.request_json["end_point_id"],
-                            self.request_json["end_point_name"],
-                            self.request_json["model_config"]["model_name"]
-                        )
-
-                except Exception as e:
-                    run_id = self.request_json["run_id"]
-                    error_log_path = f"~/.fedml/fedml-model-server/fedml/logs/error_delete_{run_id}.txt"
-                    if not os.path.exists(os.path.dirname(os.path.expanduser(error_log_path))):
-                        os.makedirs(os.path.dirname(os.path.expanduser(error_log_path)))
-                    with open(os.path.expanduser(error_log_path), "w") as f:
-                        f.write(str(self.request_json))
-                        f.write(str(e))
-                        f.write('\n')
-                    raise e
-
-        else:   # Delete the whole endpoint
-            edge_id_list_to_delete = model_msg_object.device_ids
-
-        # For Debug
-        if payload is not None:
-            debug_log_path = f"~/.fedml/fedml-model-server/fedml/logs/tmp_debug_delete_payload.txt"
-            if not os.path.exists(os.path.dirname(os.path.expanduser(debug_log_path))):
-                os.makedirs(os.path.dirname(os.path.expanduser(debug_log_path)))
-            with open(os.path.expanduser(debug_log_path), "w") as f:
-                f.write(str(payload))
+        edge_id_list_to_delete = model_msg_object.device_ids
 
         # Remove the model master node id from the list using index 0
         edge_id_list_to_delete = edge_id_list_to_delete[1:]
diff --git a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
index f3d68c1f6a..867f299ccc 100755
--- a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
+++ b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
@@ -125,7 +125,7 @@ def run_impl(
         # Changed the status to "IDLE"
         self.status_reporter.report_server_id_status(
             run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_FINISHED,
-            is_from_model=True, server_agent_id=self.edge_id, server_id=self.edge_id, edge_id=self.edge_id,)
+            is_from_model=True, server_agent_id=self.edge_id, server_id=self.edge_id, edge_id=self.edge_id)
 
         # Check if we should stop the runner
         logging.info("send the model inference request to slave devices...")
@@ -136,28 +136,32 @@ def run_impl(
         devices_sent_add_or_remove_msg = self.send_deployment_start_request_to_edges()
 
         # Handle "op:update"
-        devices_sent_update_remove_msg = self.send_first_scroll_update_msg()
-
-        if len(devices_sent_add_or_remove_msg) == 0 and len(devices_sent_update_remove_msg) == 0:
-            # No device is added or removed, and no device is updated or removed
-            ip = GeneralConstants.get_ip_address(self.request_json)
-            master_port = os.getenv("FEDML_MASTER_PORT", None)
-            if master_port is not None:
-                inference_port = int(master_port)
-            model_inference_port = inference_port
-            if ip.startswith("http://") or ip.startswith("https://"):
-                model_inference_url = "{}/api/v1/predict".format(ip)
-            else:
-                model_inference_url = "http://{}:{}/api/v1/predict".format(ip, model_inference_port)
+        try:
+            devices_sent_update_remove_msg = self.send_first_scroll_update_msg()
+
+            if len(devices_sent_add_or_remove_msg) == 0 and len(devices_sent_update_remove_msg) == 0:
+                # No device is added or removed, and no device is updated or removed
+                logging.info("No device is added, updated or removed. No action needed for reconciliation.")
+                ip = GeneralConstants.get_ip_address(self.request_json)
+                master_port = os.getenv("FEDML_MASTER_PORT", None)
+                if master_port is not None:
+                    inference_port = int(master_port)
+                model_inference_port = inference_port
+                if ip.startswith("http://") or ip.startswith("https://"):
+                    model_inference_url = "{}/api/v1/predict".format(ip)
+                else:
+                    model_inference_url = "http://{}:{}/api/v1/predict".format(ip, model_inference_port)
 
-            self.send_deployment_status(
-                run_id, end_point_name, model_name, model_inference_url,
-                ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED,
-                message_center=self.message_center
-            )
+                self.send_deployment_status(
+                    run_id, end_point_name, model_name, model_inference_url,
+                    ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED,
+                    message_center=self.message_center
+                )
 
-            self.trigger_completed_event()
-            return
+                self.trigger_completed_event()
+                return
+        except Exception as e:
+            logging.info(f"Exception at run impl {traceback.format_exc()}")
 
         self.deployment_result_queue = run_extend_queue_list[0]
         while True:
@@ -187,19 +191,78 @@ def process_deployment_result_message(self, topic=None, payload=None):
         model_name = payload_json["model_name"]
         model_version = payload_json["model_version"]
         model_status = payload_json["model_status"]
-        replica_no = payload_json.get("replica_no", None)  # Idx start from 1
+        replica_no = payload_json.get("replica_no", None)  # "no" Idx start from 1
         run_id_str = str(end_point_id)
 
+        # HotFix(Raphael): logging service cross talk
+        # Change the handler since each handler need to write to different log files
+        try:
+            # Remove the existing file handler
+            root_logger = logging.getLogger()
+            for handler in root_logger.handlers:
+                if isinstance(handler, logging.FileHandler):
+                    root_logger.removeHandler(handler)
+
+            # Correct log path: ~/.fedml/fedml-model-server/fedml/logs/fedml-run-$rid-edge-$eid.log
+            log_file = os.path.join(ServerConstants.get_log_file_dir(),
+                                    f"fedml-run-{run_id_str}-edge-{self.edge_id}.log")
+
+            filehandler = logging.FileHandler(log_file, "a")
+
+            program_prefix = "FedML-Server @device-id-{}".format(self.edge_id)
+            formatter = logging.Formatter(fmt="[" + program_prefix + "] [%(asctime)s] [%(levelname)s] "
+                                                                     "[%(filename)s:%(lineno)d:%(funcName)s] %("
+                                                                     "message)s",
+                                          datefmt="%a, %d %b %Y %H:%M:%S")
+
+            filehandler.setFormatter(formatter)
+            root_logger.addHandler(filehandler)
+        except Exception as e:
+            logging.warning(f"Failed to change the logging handler due to {e}.")
+
+        logging.info("========== callback_deployment_result_message ==========\n")
+        #  Identify the operation for this run (add, remove, update)
+        if run_id_str not in self.running_request_json:
+            logging.error(f"Run id {run_id_str} is not in the running request json.")
+            return
+
+        # The rolling update and scale out / in operation should not happen at the same time
+        assert not ("replica_num_diff" in self.running_request_json[run_id_str] and
+                    len(self.running_request_json[run_id_str]["replica_num_diff"]) > 0 and
+                    "replica_version_diff" in self.running_request_json[run_id_str])
+
+        if "replica_version_diff" in self.running_request_json[run_id_str]:
+            run_operation = "UPDATE"
+        elif "replica_num_diff" in self.running_request_json[run_id_str] and \
+                len(self.running_request_json[run_id_str]["replica_num_diff"]) > 0:
+            run_operation = "ADD_OR_REMOVE"
+        else:
+            logging.error(f"Unsupported operation for run id {run_id_str}. and request json "
+                          f"{self.running_request_json[run_id_str]}")
+            return
+
+        logging.info(f"End point {end_point_id}; Device {device_id}; replica {replica_no}; "
+                     f"run_operation {run_operation} model status {model_status}.")
+
+        # OPTIONAL DEBUG PARAMS
+        # this_run_controller = self.model_runner_mapping[run_id_str].replica_controller
+        # logging.info(f"The current replica controller state is "
+        #              f"Total version diff num {this_run_controller.total_replica_version_diff_num}")
+        # logging.info(f"self.request_json now {self.request_json}")    # request_json will be deprecated
+        # this_run_request_json = self.running_request_json.get(run_id_str, None)
+        # logging.info(f"self.running_request_json now {this_run_request_json}")
+
         # Set redis + sqlite deployment result
         FedMLModelCache.get_instance().set_redis_params(self.redis_addr, self.redis_port, self.redis_password)
 
-        # Save deployment result to local cache
+        # Deal with different model status
         if model_status == ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DELETED:
+            # remove
             FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \
                 delete_deployment_result_with_device_id_and_replica_no(
                 end_point_id, end_point_name, model_name, device_id, replica_no)
         elif model_status == ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED:
-            # add or update
+            # add or update or update-failed-rollback
             FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \
                 set_deployment_result(end_point_id, end_point_name,
                                       model_name, model_version,
@@ -210,38 +273,66 @@ def process_deployment_result_message(self, topic=None, payload=None):
         else:
             if model_status != ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED:
                 logging.error(f"Unsupported model status {model_status}.")
-            self.send_deployment_status(
-                end_point_id, end_point_name, payload_json["model_name"], "",
-                ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED,
-                message_center=self.message_center
-            )
 
+            # Failure handler
+            if run_operation == "ADD_OR_REMOVE":
+                # TODO(Raphael): Also support rollback for scale out / in operation
+                self.send_deployment_status(
+                    end_point_id, end_point_name, payload_json["model_name"], "",
+                    ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED,
+                    message_center=self.message_center)
+                return
+            else:
+                # Overwrite the json with the rollback version diff
+                rollback_version_diff = self.replica_controller.rollback_get_replica_version_diff(
+                        device_id_trigger=device_id, replica_no_trigger=replica_no)
+
+                # Change the target version to the start version
+                self.replica_controller.rollback_setback_target_replica_version()
+
+                self.running_request_json[run_id_str]["replica_version_diff"] = copy.deepcopy(rollback_version_diff)
+
+                # Send the rollback message to the worker devices
+                self.send_rollback_msg(run_id_str)
+
+                # Set the deployment status to ABORTING
+                self.send_deployment_status(
+                    end_point_id, end_point_name, payload_json["model_name"], "",
+                    ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_ABORTING,
+                    message_center=self.message_center)
+
+                # TODO(Raphael): Check if resource left not cleaned up
+                return
+
+        # Move to the next state (rolling update, finish the deployment, etc.)
         # Notify the replica number controller
-        self.callback_update_curr_replica_num_state(device_id, replica_no, model_status)
+        (self.replica_controller.callback_update_curr_replica_num_state(device_id, replica_no, model_status))
 
         # Notify the replica version controller, which might trigger the next rolling update
-        self.send_next_scroll_update_msg(device_id, replica_no)
+        self.send_next_scroll_update_msg(run_id_str, device_id, replica_no)
 
         # Update the global deployment result mapping
         self.slave_deployment_results_map[str(device_id)] = model_status
 
-        # Check if the endpoint is running
-        request_json = self.request_json
+        logging.info("callback_deployment_result_message: topic {}, payload {}, result mapping {}.".format(
+            topic, payload, self.slave_deployment_results_map))
+
+        request_json = self.running_request_json.get(run_id_str, None)
         if request_json is None:
-            logging.error(f"The endpoint {end_point_id} is not running.")
+            logging.error(f"The endpoint {end_point_id} is no longer running.")
             self.send_deployment_status(
                 end_point_id, end_point_name, payload_json["model_name"], "",
                 ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED,
-                message_center=self.message_center
-            )
+                message_center=self.message_center)
             return
 
-        # Wait for all replica's result, not device-level
-        if self.is_all_replica_num_reconciled() and self.is_all_replica_version_reconciled():
+        # Wait for all replica-level's result, not device-level
+        if (self.replica_controller.is_all_replica_num_reconciled() and
+                self.replica_controller.is_all_replica_version_reconciled()):
             '''
             When all the devices have finished the add / delete / update operation
             '''
-            # 1. We should generate one unified inference api
+            # Generate one unified inference api
             # Note that here we use the gateway port instead of the inference port that is used by the slave device
             model_config_parameters = request_json["parameters"]
             inference_port = model_config_parameters.get("server_internal_port",
@@ -255,15 +346,16 @@ def process_deployment_result_message(self, topic=None, payload=None):
                 model_inference_url = "http://{}:{}/inference/{}".format(ip, inference_port_external, end_point_id)
 
             # Send stage: MODEL_DEPLOYMENT_STAGE5 = "StartInferenceIngress"
-            self.send_deployment_stages(
-                end_point_id, model_name, model_id, model_inference_url,
-                ServerConstants.MODEL_DEPLOYMENT_STAGE5["index"], ServerConstants.MODEL_DEPLOYMENT_STAGE5["text"],
-                "inference url: {}".format(model_inference_url), message_center=self.message_center)
-
-            # Prepare the result to MLOps
-            deployed_replica_payload = self.get_deployed_replica_payload()
-            if deployed_replica_payload is not None:
-                payload_json = deployed_replica_payload
+            self.send_deployment_stages(end_point_id, model_name, model_id,
+                                        model_inference_url,
+                                        ServerConstants.MODEL_DEPLOYMENT_STAGE5["index"],
+                                        ServerConstants.MODEL_DEPLOYMENT_STAGE5["text"],
+                                        "inference url: {}".format(model_inference_url),
+                                        message_center=self.message_center)
+
+            # Send the result to MLOps
+            if self.deployed_replica_payload is not None:
+                payload_json = self.deployed_replica_payload
                 model_slave_url = payload_json["model_url"]
                 payload_json["model_url"] = model_inference_url
                 payload_json["port"] = inference_port_external
@@ -274,15 +366,18 @@ def process_deployment_result_message(self, topic=None, payload=None):
                 model_inputs = model_metadata["inputs"]
                 ret_inputs = list()
                 if "type" in model_metadata and model_metadata["type"] == "default":
-                    payload_json["input_json"] = {
-                        "end_point_name": end_point_name, "model_name": model_name, "token": str(token),
-                        "inputs": model_inputs, "outputs": []}
+                    payload_json["input_json"] = {"end_point_name": end_point_name,
+                                                  "model_name": model_name,
+                                                  "token": str(token),
+                                                  "inputs": model_inputs,
+                                                  "outputs": []}
                     payload_json["output_json"] = model_metadata["outputs"]
                 else:
                     raise Exception(f"Unsupported model metadata type {model_metadata['type']}")
 
                 self.send_deployment_results_with_payload(
-                    end_point_id, end_point_name, payload_json)
+                    end_point_id, end_point_name, payload_json,
+                    self.replica_controller.target_replica_ids)
 
                 payload_json_saved = payload_json
                 payload_json_saved["model_slave_url"] = model_slave_url
@@ -295,12 +390,20 @@ def process_deployment_result_message(self, topic=None, payload=None):
             FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \
                 set_end_point_activation(end_point_id, end_point_name, True)
 
-            self.send_deployment_status(
-                end_point_id, end_point_name, payload_json["model_name"],
-                model_inference_url, ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED,
-                message_center=self.message_center
-            )
+            if self.replica_controller.under_rollback:
+                self.send_deployment_status(end_point_id, end_point_name,
+                                            payload_json["model_name"],
+                                            model_inference_url,
+                                            ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_ABORTED)
+                self.replica_controller.under_rollback = False
+            else:
+                self.send_deployment_status(end_point_id, end_point_name,
+                                            payload_json["model_name"],
+                                            model_inference_url,
+                                            ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED,
+                                            message_center=self.message_center)
 
+            time.sleep(3)
             self.trigger_completed_event()
 
     @staticmethod
@@ -429,17 +532,28 @@ def send_first_scroll_update_msg(self):
         first_chunk_dict = self.request_json["replica_version_diff"]
 
         # Delete the record of the replaced device
-        self.delete_device_replica_info_on_master(first_chunk_dict)
+        try:
+            self.delete_device_replica_info_on_master(
+                self.request_json["end_point_id"], self.request_json["end_point_name"],
+                self.request_json["model_config"]["model_name"], first_chunk_dict)
+        except Exception as e:
+            logging.info(f"Exception at send_first_scroll_update_msg {traceback.format_exc()}")
+
+        logging.info(f"Send the first scroll update msg to the device {first_chunk_dict} ")
 
         # Send the deployment msg to the devices, (we reuse the start_deployment msg)
         for edge_id in first_chunk_dict.keys():
             if edge_id == self.edge_id:
                 continue
             # send start deployment request to each device
-            self.send_deployment_start_request_to_edge(edge_id)
+            self.send_deployment_start_request_to_edge(edge_id, self.request_json)
         return list(first_chunk_dict.keys())
 
-    def send_next_scroll_update_msg(self, device_id, replica_no):
+    def send_next_scroll_update_msg(self, run_id_str, device_id, replica_no):
+        """
+        Send the next scroll update msg to the devices if needed.
+        If there is no need for the next scroll update, directly return.
+        """
         if replica_no is None:
             return
 
@@ -448,33 +562,70 @@ def send_next_scroll_update_msg(self, device_id, replica_no):
         if replica_controller.total_replica_version_diff_num == 0:
             return
 
+        if replica_controller.under_rollback:
+            replica_controller.intermediate_replica_version[device_id][replica_no] = replica_controller.start_version
+            return
+
+        logging.info(f"Curr updating window: {replica_controller.curr_replica_updating_window} "
+                     f"Curr version diff num: {replica_controller.total_replica_version_diff_num}")
+
         replica_controller.callback_update_updating_window(device_id, replica_no)
 
         # Decide whether to send the next scroll update
         next_chunk_dict = replica_controller.get_next_chunk_devices_replica()
 
-        replica_controller.curr_replica_updating_window = copy.deepcopy(next_chunk_dict)
-
         if next_chunk_dict:
+            logging.info(f"The next scroll update for end point {run_id_str} is {next_chunk_dict}")
+            # Update curr updating window
+            replica_controller.curr_replica_updating_window = copy.deepcopy(next_chunk_dict)
+
+            # Use global deployment result mapping to decide whether to send the next scroll update
             self.request_json["replica_version_diff"] = next_chunk_dict
-            self.delete_device_replica_info_on_master(next_chunk_dict)
+
+            # Avoid using the old request_json
+            try:
+                self.delete_device_replica_info_on_master(
+                    self.request_json["end_point_id"],
+                    self.request_json["end_point_name"],
+                    self.request_json["model_config"]["model_name"],
+                    next_chunk_dict)
+            except Exception as e:
+                logging.info(f"Exception at send_next_scroll_update_msg {traceback.format_exc()}")
 
             # Send the deployment msg to the devices, (we reuse the start_deployment msg)
             for edge_id in next_chunk_dict.keys():
                 if edge_id == self.edge_id:
                     continue
                 # send start deployment request to each device
-                self.send_deployment_start_request_to_edge(edge_id)
+                self.send_deployment_start_request_to_edge(edge_id, self.request_json)
         return
 
-    def delete_device_replica_info_on_master(self, edge_id_replica_no_dict):
+    def send_rollback_msg(self, run_id_str):
+        # Avoid using the old request_json
+        try:
+            self.delete_device_replica_info_on_master(
+                self.request_json["end_point_id"],
+                self.request_json["end_point_name"],
+                self.request_json["model_config"]["model_name"],
+                self.request_json["replica_version_diff"])
+        except Exception as e:
+            logging.info(f"Exception at send_rollback_msg {traceback.format_exc()}")
+
+        # Send the deployment msg to the devices, (we reuse the start_deployment msg)
+        for edge_id in self.request_json["replica_version_diff"].keys():
+            if edge_id == self.edge_id:
+                continue
+            # send start deployment request to each device
+            self.send_deployment_start_request_to_edge(edge_id, self.request_json)
+
+    def delete_device_replica_info_on_master(self, endpoint_id, endpoint_name, model_name, edge_id_replica_no_dict):
         FedMLModelCache.get_instance().set_redis_params(self.redis_addr, self.redis_port, self.redis_password)
         # Remove the record of the replaced device
         # [Deprecated] deployment status & device info
         # Delete the result in deployment result list in Redis / SQLite
         device_result_list = FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \
-            get_deployment_result_list(self.request_json["end_point_id"], self.request_json["end_point_name"],
-                                       self.request_json["model_config"]["model_name"])
+            get_deployment_result_list(endpoint_id, endpoint_name, model_name)
+
         delete_device_result_list = []
         for device_result in device_result_list:
             device_result_dict = json.loads(device_result)
@@ -485,9 +636,7 @@ def delete_device_replica_info_on_master(self, edge_id_replica_no_dict):
 
         for delete_item in delete_device_result_list:
             FedMLModelCache.get_instance(self.redis_addr, self.redis_port).delete_deployment_result(
-                delete_item, self.request_json["end_point_id"],
-                self.request_json["end_point_name"],
-                self.request_json["model_config"]["model_name"]
+                delete_item, endpoint_id, endpoint_name, model_name
             )
 
         logging.info(f"Deleted the record of the replaced device {delete_device_result_list}")
@@ -515,7 +664,7 @@ def is_all_replica_version_reconciled(self):
         return False
 
     @staticmethod
-    def generate_request_json_with_replica_diff(run_id, edge_id, request_json):
+    def generate_request_json_with_replica_num_diff(run_id, edge_id, request_json):
         # Replica Controller is per deployment!
         replica_controller = FedMLDeviceReplicaController(edge_id, request_json)
         logging.info(f"Start Diff Replica controller for run {run_id} on edge {edge_id}")
@@ -525,6 +674,14 @@ def generate_request_json_with_replica_diff(run_id, edge_id, request_json):
         new_request_with_num_diff = replica_controller.generate_diff_to_request_json()
         request_json = new_request_with_num_diff
 
+        return request_json
+
+    @staticmethod
+    def generate_request_json_with_replica_version_diff(run_id, edge_id, request_json):
+        # Replica Controller is per deployment!
+        replica_controller = FedMLDeviceReplicaController(edge_id, request_json)
+        logging.info(f"Start Diff Replica controller for run {run_id} on edge {edge_id}")
+
         # Prepare version diff
         new_request_with_version_diff = replica_controller.init_first_update_device_replica_mapping()
         request_json = new_request_with_version_diff
diff --git a/python/fedml/computing/scheduler/model_scheduler/master_job_runner_manager.py b/python/fedml/computing/scheduler/model_scheduler/master_job_runner_manager.py
index 40896b9ee8..7221a09574 100755
--- a/python/fedml/computing/scheduler/model_scheduler/master_job_runner_manager.py
+++ b/python/fedml/computing/scheduler/model_scheduler/master_job_runner_manager.py
@@ -58,5 +58,9 @@ def recover_inference_and_monitor():
         FedMLDeployMasterJobRunner.recover_inference_and_monitor()
 
     @staticmethod
-    def generate_request_json_with_replica_diff(run_id, edge_id, request_json):
-        return FedMLDeployMasterJobRunner.generate_request_json_with_replica_diff(run_id, edge_id, request_json)
+    def generate_request_json_with_replica_num_diff(run_id, edge_id, request_json):
+        return FedMLDeployMasterJobRunner.generate_request_json_with_replica_num_diff(run_id, edge_id, request_json)
+
+    @staticmethod
+    def generate_request_json_with_replica_version_diff(run_id, edge_id, request_json):
+        return FedMLDeployMasterJobRunner.generate_request_json_with_replica_num_diff(run_id, edge_id, request_json)
diff --git a/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py b/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py
index e8be50f77f..8566848ec6 100755
--- a/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py
+++ b/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py
@@ -1,7 +1,6 @@
 
 import json
 import logging
-import os
 from fedml.core.mlops import MLOpsConfigs, MLOpsRuntimeLog, MLOpsRuntimeLogDaemon
 from .device_model_cache import FedMLModelCache
 from .device_model_db import FedMLModelDatabase
@@ -102,8 +101,8 @@ def callback_deployment_result_message(self, topic=None, payload=None):
         FedMLDeployJobRunnerManager.get_instance().save_deployment_result(topic, payload)
 
     def callback_delete_deployment(self, topic, payload):
-        # Parse payload as the model message object.
         logging.info("[Master] callback_delete_deployment")
+        # Parse payload as the model message object.
         model_msg_object = FedMLModelMsgObject(topic, payload)
 
         # Set end point as deactivated status
@@ -115,8 +114,7 @@ def callback_delete_deployment(self, topic, payload):
             delete_end_point(model_msg_object.inference_end_point_id, model_msg_object.end_point_name,
                              model_msg_object.model_name, model_msg_object.model_version)
 
-        FedMLDeployJobRunnerManager.get_instance().send_deployment_delete_request_to_edges(
-            model_msg_object.inference_end_point_id, payload, model_msg_object)
+        FedMLDeployJobRunnerManager.get_instance().send_deployment_delete_request_to_edges(payload, model_msg_object)
 
         FedMLDeployJobRunnerManager.get_instance().stop_job_runner(model_msg_object.run_id)
 
@@ -138,7 +136,7 @@ def callback_start_deployment(self, topic, payload):
         except Exception as e:
             pass
 
-        # Parse the deployment parameters
+        # Get deployment params
         request_json = json.loads(payload)
         run_id = request_json["end_point_id"]
         end_point_name = request_json["end_point_name"]
@@ -147,6 +145,7 @@ def callback_start_deployment(self, topic, payload):
         user_name = request_json["user_name"]
         device_ids = request_json["device_ids"]
         device_objs = request_json["device_objs"]
+
         model_config = request_json["model_config"]
         model_name = model_config["model_name"]
         model_id = model_config["model_id"]
@@ -156,62 +155,76 @@ def callback_start_deployment(self, topic, payload):
         inference_engine = model_config.get("inference_engine", 0)
         inference_end_point_id = run_id
 
+        logging.info("[Master] received start deployment request for end point {}.".format(run_id))
+
         # Start log processor for current run
         self.args.run_id = run_id
         self.args.edge_id = self.edge_id
-        MLOpsRuntimeLog.get_instance(self.args).init_logs()
+        MLOpsRuntimeLog(args=self.args).init_logs()
         MLOpsRuntimeLogDaemon.get_instance(self.args).set_log_source(
             ServerConstants.FEDML_LOG_SOURCE_TYPE_MODEL_END_POINT)
         MLOpsRuntimeLogDaemon.get_instance(self.args).start_log_processor(run_id, self.edge_id)
 
-        # Generate the deployment new parameters
-        logging.info("callback_start_deployment {}".format(payload))
+        # Add additional parameters to the request_json
         run_id = inference_end_point_id
-        run_id_str = str(run_id)
+        self.args.run_id = run_id
+        self.run_id = run_id
         request_json["run_id"] = run_id
         self.request_json = request_json
+        run_id_str = str(run_id)
         self.running_request_json[run_id_str] = request_json
-        diff_devices, diff_version = self.get_diff_devices(run_id)
-        self.request_json["diff_devices"] = diff_devices
-        self.request_json["diff_version"] = diff_version
         self.request_json["master_node_ip"] = GeneralConstants.get_ip_address(self.request_json)
 
-        # Save the endpoint device info
-        self.init_device_update_map()
+        # Target status of the devices
         FedMLModelCache.get_instance().set_redis_params(self.redis_addr, self.redis_port, self.redis_password)
         FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \
             set_end_point_device_info(request_json["end_point_id"], end_point_name, json.dumps(device_objs))
 
-        # Save the endpoint token
-        usr_indicated_token = FedMLDeployMasterProtocolManager.get_usr_indicated_token(request_json)
+        # Setup Token
+        usr_indicated_token = self.get_usr_indicated_token(request_json)
         if usr_indicated_token != "":
             logging.info(f"Change Token from{token} to {usr_indicated_token}")
             token = usr_indicated_token
         FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \
             set_end_point_token(run_id, end_point_name, model_name, token)
 
-        # Subscribe deployment result messages from slave devices
         self.subscribe_deployment_messages_from_slave_devices(request_json)
 
-        # Send stage: MODEL_DEPLOYMENT_STAGE1 = "Received"
+        # Report stage to mlops: MODEL_DEPLOYMENT_STAGE1 = "Received"
         FedMLDeployJobRunnerManager.get_instance().send_deployment_stages(
             self.run_id, model_name, model_id, "", ServerConstants.MODEL_DEPLOYMENT_STAGE1["index"],
             ServerConstants.MODEL_DEPLOYMENT_STAGE1["text"], "Received request for end point {}".format(run_id),
             message_center=self.message_center)
 
-        # Send stage: MODEL_DEPLOYMENT_STAGE2 = "Initializing"
+        # Report stage to mlops: MODEL_DEPLOYMENT_STAGE2 = "Initializing"
         FedMLDeployJobRunnerManager.get_instance().send_deployment_stages(
             self.run_id, model_name, model_id, "", ServerConstants.MODEL_DEPLOYMENT_STAGE2["index"],
             ServerConstants.MODEL_DEPLOYMENT_STAGE2["text"], ServerConstants.MODEL_DEPLOYMENT_STAGE2["text"],
             message_center=self.message_center)
 
-        # Save the runner info
         ServerConstants.save_runner_infos(self.args.device_id + "." + self.args.os_name, self.edge_id, run_id=run_id)
 
-        # Start the job runner to deploy models
-        self.running_request_json[run_id_str] = FedMLDeployJobRunnerManager.generate_request_json_with_replica_diff(
+        # Num diff
+        request_json = FedMLDeployJobRunnerManager.generate_request_json_with_replica_num_diff(
+            run_id, self.edge_id, request_json
+        )
+
+        # Listen to extra worker topics, especially when worker's replica remove to zero,
+        # In this case, currently Java will NOT send those worker ids to the master, but still need to listen to it.
+        if "replica_num_diff" in request_json and len(request_json["replica_num_diff"]) > 0:
+            for device_id in request_json["replica_num_diff"].keys():
+                # {"op": "remove", "curr_num": 1, "target_num": 0}
+                if request_json["replica_num_diff"][device_id]["op"] == "remove" and \
+                        request_json["replica_num_diff"][device_id]["target_num"] == 0:
+                    self.subscribe_spec_device_message(run_id, device_id)
+
+        # Version diff
+        request_json = FedMLDeployJobRunnerManager.generate_request_json_with_replica_version_diff(
             run_id, self.edge_id, request_json
         )
+        self.running_request_json[run_id_str] = request_json
+
+        # Start the job runner to deploy models
         self._get_job_runner_manager().start_job_runner(
             run_id, request_json, args=self.args, edge_id=self.edge_id,
             sender_message_queue=self.message_center.get_sender_message_queue(),
@@ -262,81 +275,6 @@ def callback_deactivate_deployment(self, topic, payload):
         FedMLModelCache.get_instance(self.redis_addr, self.redis_port).set_end_point_activation(
             model_msg_object.inference_end_point_id, model_msg_object.model_name, False)
 
-    def get_diff_devices(self, run_id) -> (dict, dict):
-        """
-        {device_id(int): "op: add" | "op: delete" | "op: replace"}
-        "op: add" -> need to add
-        "op: delete" -> need to delete device
-        "op: replace" -> need to restart the container of the device on same port with new (same) model pkg
-
-        {device_id(int): "old_version"}
-        """
-        try:
-            logging.info(f"Get diff devices for run {run_id}")
-            request_json = self.running_request_json.get(str(run_id))
-
-            diff_devices = {}
-            diff_version = {}
-            FedMLModelCache.get_instance().set_redis_params(self.redis_addr, self.redis_port, self.redis_password)
-            device_objs = FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \
-                get_end_point_device_info(run_id)
-            if device_objs is None:
-                for new_device_id in request_json["device_ids"]:
-                    diff_devices[new_device_id] = ServerConstants.DEVICE_DIFF_ADD_OPERATION
-            else:
-                device_objs_dict = json.loads(device_objs)
-                device_ids_frm_db = [d["id"] for d in device_objs_dict]
-
-                for exist_device_id in device_ids_frm_db:
-                    if exist_device_id not in request_json["device_ids"]:
-                        diff_devices[exist_device_id] = ServerConstants.DEVICE_DIFF_DELETE_OPERATION
-
-                for new_device_id in request_json["device_ids"]:
-                    if new_device_id not in device_ids_frm_db:
-                        diff_devices[new_device_id] = ServerConstants.DEVICE_DIFF_ADD_OPERATION
-                    else:
-                        if new_device_id == self.edge_id:
-                            continue
-
-                        old_version = self.should_update_device(request_json, new_device_id)
-                        if old_version:
-                            diff_devices[new_device_id] = ServerConstants.DEVICE_DIFF_REPLACE_OPERATION
-                            diff_version[new_device_id] = old_version
-                        else:
-                            pass
-            logging.info(f"Diff devices: {diff_devices}")
-        except Exception as e:
-            error_log_path = f"~/.fedml/fedml-model-server/fedml/logs/{run_id}_error.txt"
-            if not os.path.exists(os.path.dirname(os.path.expanduser(error_log_path))):
-                os.makedirs(os.path.dirname(os.path.expanduser(error_log_path)))
-            with open(os.path.expanduser(error_log_path), "w") as f:
-                f.write(str(e))
-            raise e
-        return diff_devices, diff_version
-
-    def should_update_device(self, payload, new_device_id):
-        """
-        Query the device info in local redis, if the device info is different from the payload,
-        return the old model version
-        """
-        device_result_list = FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \
-            get_deployment_result_list(self.request_json["end_point_id"],
-                                       self.request_json["end_point_name"],
-                                       self.request_json["model_config"]["model_name"])
-
-        for device_result in device_result_list:
-            if device_result is None:
-                continue
-            device_result_dict = json.loads(device_result)
-
-            if int(device_result_dict["cache_device_id"]) == new_device_id:
-                result_body = json.loads(device_result_dict["result"])
-                if result_body["model_version"] != payload["model_config"]["model_version"]:
-                    return result_body["model_version"]
-                else:
-                    return None
-        return None
-
     @staticmethod
     def get_usr_indicated_token(request_json) -> str:
         usr_indicated_token = ""
@@ -358,8 +296,20 @@ def subscribe_deployment_messages_from_slave_devices(self, request_json):
             if str(edge_id) == str(self.edge_id):
                 continue
             # subscribe deployment result message for each model device
-            deployment_results_topic = "model_device/model_device/return_deployment_result/{}".format(edge_id)
+            deployment_results_topic = "model_device/model_device/return_deployment_result/{}/{}".format(
+                run_id, edge_id)
             self.add_message_listener(deployment_results_topic, self.callback_deployment_result_message)
             self.subscribe_msg(deployment_results_topic)
 
             logging.info("subscribe device messages {}".format(deployment_results_topic))
+
+    def subscribe_spec_device_message(self, run_id, device_id):
+        if device_id == self.edge_id:
+            return
+
+        # subscribe deployment result message for each model device
+        deployment_results_topic = "model_device/model_device/return_deployment_result/{}/{}".format(
+            run_id, device_id)
+
+        self.add_message_listener(deployment_results_topic, self.callback_deployment_result_message)
+        self.subscribe_msg(deployment_results_topic)
diff --git a/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py b/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py
index 5d6f1a4d8e..78e2527e0c 100755
--- a/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py
+++ b/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py
@@ -7,7 +7,6 @@
 import traceback
 import urllib
 from abc import ABC
-from urllib.parse import urljoin, urlparse
 import yaml
 from fedml.computing.scheduler.comm_utils.job_utils import JobRunnerUtils
 from fedml.core.mlops import MLOpsRuntimeLog
@@ -27,7 +26,7 @@ def __init__(self, args, run_id=0, request_json=None, agent_config=None, edge_id
         FedMLBaseSlaveJobRunner.__init__(
             self, args, edge_id=edge_id, request_json=request_json, agent_config=agent_config, run_id=run_id,
             cuda_visible_gpu_ids_str=cuda_visible_gpu_ids_str, agent_data_dir=ClientConstants.get_data_dir(),
-            agent_package_download_dir=ClientConstants.get_package_download_dir(),
+            agent_package_download_dir=ClientConstants.get_model_package_dir(),
             agent_package_unzip_dir=GeneralConstants.get_package_unzip_dir(ClientConstants.get_package_download_dir()),
             agent_log_file_dir=ClientConstants.get_log_file_dir()
         )
@@ -57,8 +56,7 @@ def retrieve_binary_model_file(self, package_name, package_url):
         local_package_file = "{}".format(os.path.join(local_package_path, package_name))
         if os.path.exists(local_package_file):
             os.remove(local_package_file)
-        package_url_without_query_path = urljoin(package_url, urlparse(package_url).path)
-        urllib.request.urlretrieve(package_url_without_query_path, local_package_file,
+        urllib.request.urlretrieve(package_url, local_package_file,
                                    reporthook=self.package_download_progress)
 
         unzip_package_path = os.path.join(unzip_package_path, package_name)
@@ -79,10 +77,6 @@ def get_model_bin_file(unzip_package_full_path):
     def update_local_fedml_config(self, run_id, model_config, model_config_parameters=None):
         model_name = model_config["model_name"]
         model_storage_url = model_config["model_storage_url"]
-        scale_min = model_config.get("instance_scale_min", 0)
-        scale_max = model_config.get("instance_scale_max", 0)
-        inference_engine = model_config.get("inference_engine", 0)
-        inference_end_point_id = run_id
 
         # Retrieve model package or model binary file.
         if self.model_is_from_open:
@@ -92,7 +86,6 @@ def update_local_fedml_config(self, run_id, model_config, model_config_parameter
             model_bin_file = FedMLDeployWorkerJobRunner.get_model_bin_file(unzip_package_path)
 
         # Load the config to memory
-        package_conf_object = {}
         fedml_local_config_file = os.path.join(unzip_package_path, "fedml_model_config.yaml")
 
         # Inject the config from UI to pkg yaml
@@ -117,92 +110,49 @@ def download_model_package(self, package_name, package_url):
     # Override
     def run_impl(self, run_extend_queue_list, sender_message_center,
                  listener_message_queue, status_center_queue):
+        # Get deployment params
         run_id = self.request_json["end_point_id"]
         end_point_name = self.request_json["end_point_name"]
-        token = self.request_json["token"]
-        user_id = self.request_json["user_id"]
-        user_name = self.request_json["user_name"]
         device_ids = self.request_json["device_ids"]
-        device_objs = self.request_json["device_objs"]
         master_ip = self.request_json["master_node_ip"]
-
         model_config = self.request_json["model_config"]
         model_name = model_config["model_name"]
         model_id = model_config["model_id"]
         model_version = model_config["model_version"]
-        model_storage_url = model_config["model_storage_url"]
-        scale_min = model_config.get("instance_scale_min", 0)
-        scale_max = model_config.get("instance_scale_max", 0)
         model_config_parameters = self.request_json["parameters"]
-
-        self.replica_handler = FedMLDeviceReplicaHandler(self.edge_id, self.request_json)
-
         inference_port = model_config_parameters.get("worker_internal_port",
                                                      ClientConstants.MODEL_INFERENCE_DEFAULT_PORT)
         inference_port_external = model_config_parameters.get("worker_external_port", inference_port)
-
-        if "using_triton" in model_config_parameters and model_config_parameters["using_triton"]:
-            inference_engine = ClientConstants.INFERENCE_ENGINE_TYPE_INT_TRITON
-        else:
-            inference_engine = ClientConstants.INFERENCE_ENGINE_TYPE_INT_DEFAULT
-
-        logging.info("[Critical] The inference_engine is: {}".format(inference_engine))
-
-        self.model_is_from_open = True if model_config.get("is_from_open", 0) == 1 else False
-        if self.model_is_from_open:
-            model_net_url = model_config["model_net_url"]
+        inference_engine = model_config_parameters.get("inference_engine",
+                                                       ClientConstants.INFERENCE_ENGINE_TYPE_INT_DEFAULT)
         inference_end_point_id = run_id
-        use_gpu = "gpu"  # TODO: Get GPU from device infos
-        memory_size = "4096m"  # TODO: Get Memory size for each instance
 
         self.mlops_metrics.report_sys_perf(self.args, self.agent_config["mqtt_config"], run_id=run_id)
+        MLOpsRuntimeLog.get_instance(self.args).init_logs(log_level=logging.INFO)
 
-        self.check_runner_stop_event()
-
-        logging.info("model deployment request: {}".format(self.request_json))
+        logging.info(f"[Worker] Received model deployment request from master for endpoint {run_id}.")
+        self.replica_handler = FedMLDeviceReplicaHandler(self.edge_id, self.request_json)
+        if self.replica_handler is not None:
+            logging.info(f"=================Worker replica Handler ======================"
+                         f"Reconcile with num diff {self.replica_handler.replica_num_diff} "
+                         f"and version diff {self.replica_handler.replica_version_diff}."
+                         f"=============================================================")
+        else:
+            logging.error(f"[Worker] Replica handler is None.")
+            return False
 
-        MLOpsRuntimeLog.get_instance(self.args).init_logs(log_level=logging.INFO)
+        self.check_runner_stop_event()
 
+        # Report the deployment status to mlops
         self.status_reporter.report_client_id_status(
             self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_INITIALIZING,
             is_from_model=True, running_json=json.dumps(self.request_json), run_id=run_id)
-
         self.status_reporter.report_client_id_status(
             self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_RUNNING,
             is_from_model=True, run_id=run_id)
 
         self.check_runner_stop_event()
 
-        # update local config with real time parameters from server and dynamically replace variables value
-        logging.info("download and unzip model to local...")
-        unzip_package_path, model_bin_file, fedml_config_object = \
-            self.update_local_fedml_config(run_id, model_config, model_config_parameters)
-        if unzip_package_path is None or fedml_config_object is None:
-            logging.info("failed to update local fedml config.")
-            self.check_runner_stop_event()
-            self.status_reporter.report_client_id_status(
-                self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED,
-                is_from_model=True, run_id=run_id)
-            return False
-
-        logging.info("check downloaded packages...")
-        if not os.path.exists(unzip_package_path):
-            logging.info("failed to unzip file.")
-            self.check_runner_stop_event()
-            self.status_reporter.report_client_id_status(
-                self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED,
-                is_from_model=True, run_id=run_id)
-            return False
-
-        # download model net and load into the torch model
-        model_from_open = None
-        self.model_is_from_open = None
-
-        logging.info("start the model deployment...")
-        self.check_runner_stop_event()
-        running_model_name, inference_output_url, inference_model_version, model_metadata, model_config = \
-            "", "", model_version, {}, {}
-
         # Reconcile the replica number (op: add, remove)
         prev_rank, op, op_num = self.replica_handler.reconcile_num_replica()
 
@@ -212,55 +162,134 @@ def run_impl(self, run_extend_queue_list, sender_message_center,
             replica_rank_to_update, op = self.replica_handler.reconcile_replica_version()
 
         if not op:
-            logging.info("No need to reconcile.")
+            logging.info("[Worker] No need to reconcile.")
             return True
 
+        logging.info(
+            f"================Worker Reconcile Operations ======================\n"
+            f" op: {op}; op num: {op_num}.\n"
+            f"==================================================================\n")
+
+        # If not rollback, download package from MLOps; otherwise, use the backup package
+        if op != "rollback":
+            logging.info("Download and unzip model to local...")
+            unzip_package_path, _, _ = \
+                self.update_local_fedml_config(run_id, model_config, model_config_parameters)
+            if unzip_package_path is None:
+                logging.info("Failed to update local fedml config.")
+                self.check_runner_stop_event()
+                self.status_reporter.report_client_id_status(
+                    self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED,
+                    is_from_model=True, run_id=run_id)
+                return False
+
+            if not os.path.exists(unzip_package_path):
+                logging.info("Failed to unzip file.")
+                self.check_runner_stop_event()
+                self.status_reporter.report_client_id_status(
+                    self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED,
+                    is_from_model=True, run_id=run_id)
+                return False
+        else:
+            logging.info("Try to use backup package to rollback...")
+            # Find folder under "~/.fedml/fedml-model-client/fedml/model_packages \
+            # /${end_point_id}_${end_point_name}_${model_name}_${model_version}"
+            backup_folder_full_path = None
+            models_root_dir = ClientConstants.get_model_package_dir()
+
+            # Find the version (notified by master) to rollback
+            version_diff_dict = self.request_json["replica_version_diff"][str(self.edge_id)]
+            version_rollback_to = None
+            for replica_no, rollback_ops in version_diff_dict.items():
+                version_rollback_to = rollback_ops["new_version"]     # Note that new_version is the version to rollback
+                break
+            if version_rollback_to is None:
+                logging.error(f"No old version found for run_id: {self.run_id} "
+                              f"edge_id: {self.edge_id}, rollback failed. No old version found in request_json.")
+                return False
+            model_version = version_rollback_to
+
+            # Format the version to match the folder name
+            model_version_formatted = version_rollback_to.replace(" ", "-")
+            model_version_formatted = model_version_formatted.replace(":", "-")
+
+            last_run_folder_sub_fd = f"{run_id}_{end_point_name}_{model_name}_{model_version_formatted}"
+            for folder in os.listdir(models_root_dir):
+                if last_run_folder_sub_fd in folder:
+                    backup_folder_full_path = os.path.join(models_root_dir, folder)
+                    break
+            if backup_folder_full_path is None:
+                logging.error(f"No backup folder found for run_id: {self.run_id} edge_id: {self.edge_id} "
+                              f"under {models_root_dir} with sub folder {last_run_folder_sub_fd}, rollback failed.")
+                return False
+
+            # Inside backup folder, find unzipped package with prefix unzip_fedml_run
+            unzip_package_path_parent = None
+            for folder in os.listdir(backup_folder_full_path):
+                if folder.startswith("unzip_fedml_run"):
+                    unzip_package_path_parent = os.path.join(backup_folder_full_path, folder)
+                    break
+
+            # Inside unzip folder, find the unzipped package, should be the only one
+            unzip_package_path = None
+            for folder in os.listdir(unzip_package_path_parent):
+                if os.path.isdir(os.path.join(unzip_package_path_parent, folder)):
+                    unzip_package_path = os.path.join(unzip_package_path_parent, folder)
+                    break
+
+            if unzip_package_path is None:
+                logging.error(f"No unzipped package found for run_id: {self.run_id} edge_id: {self.edge_id} "
+                              f"under {backup_folder_full_path}, rollback failed.")
+                return False
+
+        self.check_runner_stop_event()
+
+        running_model_name, inference_output_url, inference_model_version, model_metadata, model_config = \
+            "", "", model_version, {}, {}
+
         if op == "add":
             worker_ip = GeneralConstants.get_ip_address(self.request_json)
-            for rank in range(prev_rank+1, prev_rank+1+op_num):
+            for rank in range(prev_rank + 1, prev_rank + 1 + op_num):
                 # TODO: Support Rollback if this for loop failed
                 try:
                     running_model_name, inference_output_url, inference_model_version, model_metadata, model_config = \
                         start_deployment(
-                            inference_end_point_id, end_point_name, model_id, model_version,
-                            unzip_package_path, model_bin_file, model_name, inference_engine,
-                            ClientConstants.INFERENCE_HTTP_PORT,
-                            ClientConstants.INFERENCE_GRPC_PORT,
-                            ClientConstants.INFERENCE_METRIC_PORT,
-                            use_gpu, memory_size,
-                            ClientConstants.INFERENCE_CONVERTOR_IMAGE,
-                            ClientConstants.INFERENCE_SERVER_IMAGE,
-                            worker_ip,
-                            self.model_is_from_open, model_config_parameters,
-                            model_from_open,
-                            token,
-                            master_ip, self.edge_id, master_device_id=device_ids[0], replica_rank=rank,
+                            end_point_id=inference_end_point_id, end_point_name=end_point_name, model_id=model_id,
+                            model_version=model_version, model_storage_local_path=unzip_package_path,
+                            inference_model_name=model_name, inference_engine=inference_engine,
+                            infer_host=worker_ip, master_ip=master_ip, edge_id=self.edge_id,
+                            master_device_id=device_ids[0], replica_rank=rank,
                             gpu_per_replica=int(self.replica_handler.gpu_per_replica)
                         )
                 except Exception as e:
                     inference_output_url = ""
-                    logging.error(f"Exception at deployment: {traceback.format_exc()}")
+                    logging.error(f"[Worker] Exception at deployment: {traceback.format_exc()}")
 
                 if inference_output_url == "":
-                    logging.error("failed to deploy the model...")
+                    logging.error("[Worker] Failed to deploy the model.")
 
+                    # Send failed result back to master
                     result_payload = self.send_deployment_results(
                         end_point_name, self.edge_id, ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED,
                         model_id, model_name, inference_output_url, inference_model_version, inference_port,
                         inference_engine, model_metadata, model_config)
 
+                    self.status_reporter.run_id = self.run_id
                     self.status_reporter.report_client_id_status(
                         self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED,
                         is_from_model=True, run_id=self.run_id)
+
                     return False
                 else:
-                    logging.info("finished deployment, continue to send results to master...")
+                    # Send failed successful result back to master
+                    logging.info("Finished deployment, continue to send results to master...")
                     result_payload = self.send_deployment_results(
                         end_point_name, self.edge_id, ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED,
                         model_id, model_name, inference_output_url, model_version, inference_port_external,
                         inference_engine, model_metadata, model_config, replica_no=rank + 1)
 
-                    if inference_port_external != inference_port:  # Save internal port to local db
+                    if inference_port_external != inference_port:
+                        # Save internal port to local db
                         logging.info("inference_port_external {} != inference_port {}".format(
                             inference_port_external, inference_port))
                         result_payload = self.construct_deployment_results(
@@ -272,21 +301,22 @@ def run_impl(self, run_extend_queue_list, sender_message_center,
                         run_id, end_point_name, model_name, model_version, self.edge_id,
                         json.dumps(result_payload), replica_no=rank + 1)
 
-                    logging.info(f"Deploy replica {rank+1} / {prev_rank+1+op_num} successfully.")
+                    logging.info(f"Deploy replica {rank + 1} / {prev_rank + 1 + op_num} successfully.")
                     time.sleep(5)
 
             time.sleep(1)
+            self.status_reporter.run_id = self.run_id
             self.status_reporter.report_client_id_status(
                 self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FINISHED,
                 is_from_model=True, run_id=self.run_id)
             return True
         elif op == "remove":
-            for rank_to_delete in range(prev_rank, prev_rank-op_num, -1):
+            for rank_to_delete in range(prev_rank, prev_rank - op_num, -1):
                 self.replica_handler.remove_replica(rank_to_delete)
 
                 FedMLModelCache.get_instance().set_redis_params()
                 replica_occupied_gpu_ids_str = FedMLModelCache.get_instance().get_replica_gpu_ids(
-                    run_id, end_point_name, model_name, self.edge_id, rank_to_delete+1)
+                    run_id, end_point_name, model_name, self.edge_id, rank_to_delete + 1)
 
                 replica_occupied_gpu_ids = json.loads(replica_occupied_gpu_ids_str)
 
@@ -302,6 +332,7 @@ def run_impl(self, run_extend_queue_list, sender_message_center,
                     inference_engine, model_metadata, model_config, replica_no=rank_to_delete + 1)
 
                 time.sleep(1)
+                self.status_reporter.run_id = self.run_id
                 self.status_reporter.report_client_id_status(
                     self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FINISHED,
                     is_from_model=True, run_id=self.run_id)
@@ -310,11 +341,11 @@ def run_impl(self, run_extend_queue_list, sender_message_center,
                 if rank_to_delete == 0:
                     pass
             return True
-        elif op == "update":
+        elif op == "update" or op == "rollback":
             # Update is combine of delete and add
-            worker_ip = GeneralConstants.get_ip_address(self.request_json)
+            worker_ip = self.get_ip_address(self.request_json)
             for rank in replica_rank_to_update:
-                # Delete the container
+                # Delete a replica (container) if exists
                 self.replica_handler.remove_replica(rank)
 
                 FedMLModelCache.get_instance().set_redis_params()
@@ -322,33 +353,36 @@ def run_impl(self, run_extend_queue_list, sender_message_center,
                     run_id, end_point_name, model_name, self.edge_id, rank + 1)
 
                 replica_occupied_gpu_ids = json.loads(replica_occupied_gpu_ids_str)
+                logging.info(f"Release gpu ids {replica_occupied_gpu_ids} for update / rollback.")
 
-                JobRunnerUtils.get_instance().release_partial_job_gpu(run_id, self.edge_id, replica_occupied_gpu_ids)
+                # TODO (Raphael) check if this will allow another job to seize the gpu during high concurrency:
+                try:
+                    JobRunnerUtils.get_instance().release_partial_job_gpu(run_id, self.edge_id, replica_occupied_gpu_ids)
+                except Exception as e:
+                    if op == "rollback":
+                        pass
+                    else:
+                        logging.error(f"Failed to release gpu ids {replica_occupied_gpu_ids} for update.")
+                        return False
 
                 # Delete the deployment result from local db
                 FedMLModelDatabase.get_instance().delete_deployment_result_with_device_id_and_rank(
                     run_id, end_point_name, model_name, self.edge_id, rank)
 
+                logging.info(f"Delete replica with no {rank + 1} successfully.")
                 time.sleep(1)
 
-                # Add the container
+                # Add a replica (container)
                 # TODO: Reduce the duplicated code
+                logging.info(f"Start to deploy the model with replica no {rank + 1} ...")
                 try:
                     running_model_name, inference_output_url, inference_model_version, model_metadata, model_config = \
                         start_deployment(
-                            inference_end_point_id, end_point_name, model_id, model_version,
-                            unzip_package_path, model_bin_file, model_name, inference_engine,
-                            ClientConstants.INFERENCE_HTTP_PORT,
-                            ClientConstants.INFERENCE_GRPC_PORT,
-                            ClientConstants.INFERENCE_METRIC_PORT,
-                            use_gpu, memory_size,
-                            ClientConstants.INFERENCE_CONVERTOR_IMAGE,
-                            ClientConstants.INFERENCE_SERVER_IMAGE,
-                            worker_ip,
-                            self.model_is_from_open, model_config_parameters,
-                            model_from_open,
-                            token,
-                            master_ip, self.edge_id, master_device_id=device_ids[0], replica_rank=rank,
+                            end_point_id=inference_end_point_id, end_point_name=end_point_name, model_id=model_id,
+                            model_version=model_version, model_storage_local_path=unzip_package_path,
+                            inference_model_name=model_name, inference_engine=inference_engine,
+                            infer_host=worker_ip, master_ip=master_ip, edge_id=self.edge_id,
+                            master_device_id=device_ids[0], replica_rank=rank,
                             gpu_per_replica=int(self.replica_handler.gpu_per_replica)
                         )
                 except Exception as e:
@@ -356,20 +390,30 @@ def run_impl(self, run_extend_queue_list, sender_message_center,
                     logging.error(f"Exception at deployment: {traceback.format_exc()}")
 
                 if inference_output_url == "":
-                    logging.error("failed to deploy the model...")
+                    logging.error("Failed to deploy the model...")
+
+                    # If update failed, should release this replica's gpu
+                    FedMLModelCache.get_instance().set_redis_params()
+                    replica_occupied_gpu_ids_str = FedMLModelCache.get_instance().get_replica_gpu_ids(
+                        run_id, end_point_name, model_name, self.edge_id, rank + 1)
+
+                    replica_occupied_gpu_ids = json.loads(replica_occupied_gpu_ids_str)
+
+                    JobRunnerUtils.get_instance().release_partial_job_gpu(
+                        run_id, self.edge_id, replica_occupied_gpu_ids)
 
                     result_payload = self.send_deployment_results(
                         end_point_name, self.edge_id, ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED,
                         model_id, model_name, inference_output_url, inference_model_version, inference_port,
                         inference_engine, model_metadata, model_config)
 
+                    self.status_reporter.run_id = self.run_id
                     self.status_reporter.report_client_id_status(
                         self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED,
                         is_from_model=True, run_id=self.run_id)
-
                     return False
                 else:
-                    logging.info("finished deployment, continue to send results to master...")
+                    logging.info("Finished deployment, continue to send results to master...")
                     result_payload = self.send_deployment_results(
                         end_point_name, self.edge_id, ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED,
                         model_id, model_name, inference_output_url, model_version, inference_port_external,
@@ -390,6 +434,7 @@ def run_impl(self, run_extend_queue_list, sender_message_center,
                     logging.info(f"Update replica with no {rank + 1}  successfully. Op num {op_num}")
                     time.sleep(5)
             time.sleep(1)
+            self.status_reporter.run_id = self.run_id
             self.status_reporter.report_client_id_status(
                 self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FINISHED,
                 is_from_model=True, run_id=self.run_id)
@@ -437,7 +482,9 @@ def send_deployment_results(self, end_point_name, device_id, model_status,
                                 model_id, model_name, model_inference_url,
                                 model_version, inference_port, inference_engine,
                                 model_metadata, model_config, replica_no=1):
-        deployment_results_topic = "model_device/model_device/return_deployment_result/{}".format(device_id)
+        deployment_results_topic = "model_device/model_device/return_deployment_result/{}/{}".format(
+            self.run_id, device_id)
+
         deployment_results_payload = self.construct_deployment_results(
             end_point_name, device_id, model_status,
             model_id, model_name, model_inference_url,
@@ -445,7 +492,7 @@ def send_deployment_results(self, end_point_name, device_id, model_status,
             model_metadata, model_config, replica_no=replica_no)
 
         logging.info("[client] send_deployment_results: topic {}, payload {}.".format(deployment_results_topic,
-                                                               deployment_results_payload))
+                                                                                      deployment_results_payload))
         self.message_center.send_message_json(deployment_results_topic, json.dumps(deployment_results_payload))
         return deployment_results_payload
 
@@ -455,18 +502,8 @@ def send_deployment_status(self, end_point_name, device_id,
                                inference_port=ClientConstants.MODEL_INFERENCE_DEFAULT_PORT,
                                replica_no=1,     # start from 1
                                ):
-        deployment_status_topic = "model_device/model_device/return_deployment_status/{}".format(device_id)
-        deployment_status_payload = self.construct_deployment_status(
-            end_point_name, device_id,
-            model_id, model_name, model_version,
-            model_inference_url, model_status,
-            inference_port=inference_port,
-            replica_no=replica_no)
-
-        logging.info("[client] send_deployment_status: topic {}, payload {}.".format(deployment_status_topic,
-                                                                                     deployment_status_payload))
-        self.message_center.send_message_json(deployment_status_topic, json.dumps(deployment_status_payload))
-        return deployment_status_payload
+        # Deprecated
+        pass
 
     def reset_devices_status(self, edge_id, status):
         self.status_reporter.run_id = self.run_id
diff --git a/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py b/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py
index 0543459dd0..856abdac40 100755
--- a/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py
+++ b/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py
@@ -255,10 +255,15 @@ def callback_start_train(self, topic, payload):
         run_params = run_config.get("parameters", {})
         serving_args = run_params.get("serving_args", {})
         endpoint_id = serving_args.get("endpoint_id", None)
-        cuda_visible_gpu_ids_str = JobRunnerUtils.get_instance().occupy_gpu_ids(
-            run_id, matched_gpu_num, edge_id, inner_id=endpoint_id,
-            model_master_device_id=model_master_device_id,
-            model_slave_device_id=model_slave_device_id)
+        job_yaml = run_params.get("job_yaml", {})
+        job_type = job_yaml.get("job_type", SchedulerConstants.JOB_TASK_TYPE_TRAIN)
+        cuda_visible_gpu_ids_str = None
+        if not (job_type == SchedulerConstants.JOB_TASK_TYPE_SERVE or
+                job_type == SchedulerConstants.JOB_TASK_TYPE_DEPLOY):
+            cuda_visible_gpu_ids_str = JobRunnerUtils.get_instance().occupy_gpu_ids(
+                run_id, matched_gpu_num, edge_id, inner_id=endpoint_id,
+                model_master_device_id=model_master_device_id,
+                model_slave_device_id=model_slave_device_id)
         logging.info(
             f"Run started, available gpu ids: {JobRunnerUtils.get_instance().get_available_gpu_id_list(edge_id)}")
 
diff --git a/python/fedml/computing/scheduler/slave/slave_protocol_manager.py b/python/fedml/computing/scheduler/slave/slave_protocol_manager.py
index cd8e40d7e8..6e2a03a8b4 100755
--- a/python/fedml/computing/scheduler/slave/slave_protocol_manager.py
+++ b/python/fedml/computing/scheduler/slave/slave_protocol_manager.py
@@ -1,5 +1,7 @@
 import copy
+import json
 import os
+import fedml
 from ..comm_utils.job_cleanup import JobCleanup
 from .base_slave_protocol_manager import FedMLBaseSlaveProtocolManager
 from .launch_job_runner_manager import FedMLLaunchJobRunnerManager
@@ -11,15 +13,38 @@ class FedMLLaunchSlaveProtocolManager(FedMLBaseSlaveProtocolManager):
 
     def __init__(self, args, agent_config=None):
         FedMLBaseSlaveProtocolManager.__init__(self, args, agent_config=agent_config)
+        self.topic_request_deploy_slave_device_info_from_mlops = None
+        self.topic_request_deploy_master_device_info_from_mlops = None
+        self.topic_request_edge_device_info_from_mlops = None
 
     # Override
     def generate_topics(self):
         super().generate_topics()
 
+        # The topic for requesting device info from mlops.
+        self.topic_request_edge_device_info_from_mlops = f"deploy/mlops/slave_agent/request_device_info/{self.edge_id}"
+
+        # The topic for requesting deployment master device info from mlops.
+        self.topic_request_deploy_master_device_info_from_mlops = f"deploy/mlops/master_agent/request_device_info/{self.model_device_server_id}"
+
+        # The topic for requesting deployment slave device info from mlops.
+        self.topic_request_deploy_slave_device_info_from_mlops = f"deploy/mlops/slave_agent/request_device_info/{self.model_device_client_edge_id_list[0]}"
+
+        self.add_subscribe_topic(self.topic_request_edge_device_info_from_mlops)
+        self.add_subscribe_topic(self.topic_request_deploy_master_device_info_from_mlops)
+        self.add_subscribe_topic(self.topic_request_deploy_slave_device_info_from_mlops)
+
     # Override
     def add_protocol_handler(self):
         super().add_protocol_handler()
 
+        self.add_message_listener(
+            self.topic_request_edge_device_info_from_mlops, self.callback_response_device_info_to_mlops)
+        self.add_message_listener(
+            self.topic_request_deploy_master_device_info_from_mlops, self.callback_response_device_info_to_mlops)
+        self.add_message_listener(
+            self.topic_request_deploy_slave_device_info_from_mlops, self.callback_response_device_info_to_mlops)
+
     # Override
     def _generate_protocol_manager_instance(self, args, agent_config=None):
         return FedMLLaunchSlaveProtocolManager(args, agent_config=agent_config)
@@ -102,3 +127,29 @@ def _init_extra_items(self):
         self.mlops_metrics.report_device_realtime_perf(self.args, self.args.agent_config["mqtt_config"])
         pass
 
+    def callback_response_device_info_to_mlops(self, topic, payload):
+        payload_json = json.loads(payload)
+        server_id = payload_json.get("server_id", 0)
+        run_id = payload_json.get("run_id", 0)
+        listen_edge_id = str(topic).split("/")[-1]
+        context = payload_json.get("context", None)
+        response_topic = f"deploy/slave_agent/mlops/response_device_info"
+        if self.mlops_metrics is not None and self.model_device_client_edge_id_list is not None and \
+                self.model_device_server_id is not None:
+            device_info_json = {
+                "edge_id": listen_edge_id,
+                "fedml_version": fedml.__version__,
+                "user_id": self.args.user
+            }
+            salve_device_ids = list()
+            for model_client_edge_id in self.model_device_client_edge_id_list:
+                salve_device_ids.append(model_client_edge_id)
+            response_payload = {"slave_device_id": self.model_device_client_edge_id_list[0],
+                                "slave_device_id_list": salve_device_ids,
+                                "master_device_id": self.model_device_server_id,
+                                "run_id": run_id, "edge_id": listen_edge_id,
+                                "edge_info": device_info_json}
+            if context is not None:
+                response_payload["context"] = context
+            self.message_center.send_message(response_topic, json.dumps(response_payload), run_id=run_id)
+

From b3742bb710068ea85f73baf12bd14930633854ba Mon Sep 17 00:00:00 2001
From: Alex <alex.gpt.llm@gmail.com>
Date: Tue, 9 Apr 2024 21:32:13 +0800
Subject: [PATCH 004/282] [CoreEngine] make the latest deployment module work
 with the refactored paradigm.

---
 .../model_scheduler/job_runner_msg_sender.py  |  7 ++--
 .../model_scheduler/master_job_runner.py      | 26 ++++++--------
 .../master_job_runner_manager.py              |  5 +--
 .../master_protocol_manager.py                |  3 +-
 .../scheduler_core/compute_status_cache.py    |  2 ++
 .../scheduler_core/message_center.py          | 14 ++++++--
 .../scheduler_base_protocol_manager.py        |  3 ++
 .../status_manager_protocols.py               |  2 +-
 .../scheduler/slave/slave_protocol_manager.py | 34 +++++++++++--------
 9 files changed, 57 insertions(+), 39 deletions(-)

diff --git a/python/fedml/computing/scheduler/model_scheduler/job_runner_msg_sender.py b/python/fedml/computing/scheduler/model_scheduler/job_runner_msg_sender.py
index acce17d20b..104dacf716 100755
--- a/python/fedml/computing/scheduler/model_scheduler/job_runner_msg_sender.py
+++ b/python/fedml/computing/scheduler/model_scheduler/job_runner_msg_sender.py
@@ -114,7 +114,7 @@ def send_deployment_start_request_to_edge(self, edge_id, request_json):
         logging.info("start_deployment: send topic " + topic_start_deployment + " to client...")
         self.message_center.send_message_json(topic_start_deployment, json.dumps(request_json))
 
-    def send_deployment_delete_request_to_edges(self, payload, model_msg_object):
+    def send_deployment_delete_request_to_edges(self, payload, model_msg_object, message_center=None):
         edge_id_list_to_delete = model_msg_object.device_ids
 
         # Remove the model master node id from the list using index 0
@@ -128,7 +128,10 @@ def send_deployment_delete_request_to_edges(self, payload, model_msg_object):
             # send delete deployment request to each model device
             topic_delete_deployment = "model_ops/model_device/delete_deployment/{}".format(str(edge_id))
             logging.info("delete_deployment: send topic " + topic_delete_deployment + " to client...")
-            self.message_center.send_message_json(topic_delete_deployment, payload)
+            if message_center is not None:
+                message_center.send_message_json(topic_delete_deployment, payload)
+            else:
+                self.message_center.send_message_json(topic_delete_deployment, payload)
 
     def send_deployment_stop_request_to_edges(self, edge_id_list, payload):
         for edge_id in edge_id_list:
diff --git a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
index 867f299ccc..d1cc68dc98 100755
--- a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
+++ b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
@@ -221,24 +221,20 @@ def process_deployment_result_message(self, topic=None, payload=None):
             logging.warning(f"Failed to change the logging handler due to {e}.")
 
         logging.info("========== callback_deployment_result_message ==========\n")
-        #  Identify the operation for this run (add, remove, update)
-        if run_id_str not in self.running_request_json:
-            logging.error(f"Run id {run_id_str} is not in the running request json.")
-            return
 
         # The rolling update and scale out / in operation should not happen at the same time
-        assert not ("replica_num_diff" in self.running_request_json[run_id_str] and
-                    len(self.running_request_json[run_id_str]["replica_num_diff"]) > 0 and
-                    "replica_version_diff" in self.running_request_json[run_id_str])
+        assert not ("replica_num_diff" in self.request_json and
+                    len(self.request_json["replica_num_diff"]) > 0 and
+                    "replica_version_diff" in self.request_json)
 
-        if "replica_version_diff" in self.running_request_json[run_id_str]:
+        if "replica_version_diff" in self.request_json:
             run_operation = "UPDATE"
-        elif "replica_num_diff" in self.running_request_json[run_id_str] and \
-                len(self.running_request_json[run_id_str]["replica_num_diff"]) > 0:
+        elif "replica_num_diff" in self.request_json and \
+                len(self.request_json["replica_num_diff"]) > 0:
             run_operation = "ADD_OR_REMOVE"
         else:
             logging.error(f"Unsupported operation for run id {run_id_str}. and request json "
-                          f"{self.running_request_json[run_id_str]}")
+                          f"{self.request_json}")
             return
 
         logging.info(f"End point {end_point_id}; Device {device_id}; replica {replica_no}; "
@@ -249,8 +245,8 @@ def process_deployment_result_message(self, topic=None, payload=None):
         # logging.info(f"The current replica controller state is "
         #              f"Total version diff num {this_run_controller.total_replica_version_diff_num}")
         # logging.info(f"self.request_json now {self.request_json}")    # request_json will be deprecated
-        # this_run_request_json = self.running_request_json.get(run_id_str, None)
-        # logging.info(f"self.running_request_json now {this_run_request_json}")
+        # this_run_request_json = self.request_json
+        # logging.info(f"self.request_json now {this_run_request_json}")
 
         # Set redis + sqlite deployment result
         FedMLModelCache.get_instance().set_redis_params(self.redis_addr, self.redis_port, self.redis_password)
@@ -290,7 +286,7 @@ def process_deployment_result_message(self, topic=None, payload=None):
                 # Change the target version to the start version
                 self.replica_controller.rollback_setback_target_replica_version()
 
-                self.running_request_json[run_id_str]["replica_version_diff"] = copy.deepcopy(rollback_version_diff)
+                self.request_json["replica_version_diff"] = copy.deepcopy(rollback_version_diff)
 
                 # Send the rollback message to the worker devices
                 self.send_rollback_msg(run_id_str)
@@ -317,7 +313,7 @@ def process_deployment_result_message(self, topic=None, payload=None):
         logging.info("callback_deployment_result_message: topic {}, payload {}, result mapping {}.".format(
             topic, payload, self.slave_deployment_results_map))
 
-        request_json = self.running_request_json.get(run_id_str, None)
+        request_json = self.request_json
         if request_json is None:
             logging.error(f"The endpoint {end_point_id} is no longer running.")
             self.send_deployment_status(
diff --git a/python/fedml/computing/scheduler/model_scheduler/master_job_runner_manager.py b/python/fedml/computing/scheduler/model_scheduler/master_job_runner_manager.py
index 7221a09574..0bfc205b34 100755
--- a/python/fedml/computing/scheduler/model_scheduler/master_job_runner_manager.py
+++ b/python/fedml/computing/scheduler/model_scheduler/master_job_runner_manager.py
@@ -42,10 +42,11 @@ def send_deployment_stages(
                 message_center=message_center
             )
 
-    def send_deployment_delete_request_to_edges(self, end_point_id, payload, model_msg_object):
+    def send_deployment_delete_request_to_edges(self, end_point_id, payload, model_msg_object, message_center=None):
         run_id_str = str(end_point_id)
         if self.job_runners.get(run_id_str, None) is not None:
-            self.job_runners[run_id_str].send_deployment_delete_request_to_edges(payload, model_msg_object)
+            self.job_runners[run_id_str].send_deployment_delete_request_to_edges(
+                payload, model_msg_object, message_center=message_center)
 
     def stop_device_inference_monitor(self, run_id, end_point_name, model_id, model_name, model_version):
         run_id_str = str(run_id)
diff --git a/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py b/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py
index 8566848ec6..962dcbbcb3 100755
--- a/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py
+++ b/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py
@@ -114,7 +114,8 @@ def callback_delete_deployment(self, topic, payload):
             delete_end_point(model_msg_object.inference_end_point_id, model_msg_object.end_point_name,
                              model_msg_object.model_name, model_msg_object.model_version)
 
-        FedMLDeployJobRunnerManager.get_instance().send_deployment_delete_request_to_edges(payload, model_msg_object)
+        FedMLDeployJobRunnerManager.get_instance().send_deployment_delete_request_to_edges(
+            model_msg_object.run_id, payload, model_msg_object, message_center=self.message_center)
 
         FedMLDeployJobRunnerManager.get_instance().stop_job_runner(model_msg_object.run_id)
 
diff --git a/python/fedml/computing/scheduler/scheduler_core/compute_status_cache.py b/python/fedml/computing/scheduler/scheduler_core/compute_status_cache.py
index a1929abbef..f224806b8c 100755
--- a/python/fedml/computing/scheduler/scheduler_core/compute_status_cache.py
+++ b/python/fedml/computing/scheduler/scheduler_core/compute_status_cache.py
@@ -42,6 +42,8 @@ def get_job_status(self, run_id):
         return status
 
     def save_device_status_in_job(self, run_id, device_id, status):
+        if status is None:
+            return
         try:
             self.redis_connection.set(self._get_device_status_in_job_key(run_id, device_id), status)
         except Exception as e:
diff --git a/python/fedml/computing/scheduler/scheduler_core/message_center.py b/python/fedml/computing/scheduler/scheduler_core/message_center.py
index 7ae1e4c0b5..dcf21d33b7 100755
--- a/python/fedml/computing/scheduler/scheduler_core/message_center.py
+++ b/python/fedml/computing/scheduler/scheduler_core/message_center.py
@@ -200,6 +200,7 @@ def run_sender(self, message_event, message_queue, message_center_name):
 
         while True:
             message_entity = None
+            message_body = None
             try:
                 self.check_message_stop_event()
             except MessageCenterStoppedException as e:
@@ -242,7 +243,7 @@ def run_sender(self, message_event, message_queue, message_center_name):
                         f"payload {message_entity.payload}, {traceback.format_exc()}"
                     )
                 else:
-                    logging.info(f"Failed to send the message: {traceback.format_exc()}")
+                    logging.info(f"Failed to send the message with body {message_body}, {traceback.format_exc()}")
 
         self.release_sender_mqtt_mgr()
 
@@ -291,11 +292,18 @@ def get_message_runner(self):
     def get_listener_message_queue(self):
         return self.listener_message_queue
 
-    def start_listener(self, sender_message_queue=None, agent_config=None, message_center_name=None):
+    def setup_listener_message_queue(self):
+        self.listener_message_queue = Queue()
+
+    def start_listener(self, sender_message_queue=None, listener_message_queue=None, agent_config=None, message_center_name=None):
         if self.listener_message_center_process is not None:
             return
 
-        self.listener_message_queue = Queue()
+        if listener_message_queue is None:
+            if self.listener_message_queue is None:
+                self.listener_message_queue = Queue()
+        else:
+            self.listener_message_queue = listener_message_queue
         self.listener_message_event = multiprocessing.Event()
         self.listener_message_event.clear()
         self.listener_agent_config = agent_config
diff --git a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_protocol_manager.py b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_protocol_manager.py
index 4a0c950655..8c1756880a 100755
--- a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_protocol_manager.py
+++ b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_protocol_manager.py
@@ -82,6 +82,9 @@ def initialize(self):
         # Start the message center to process edge related messages.
         self.setup_message_center()
 
+        # Setup the message listener queue
+        self.setup_listener_message_queue()
+
         # Start the status center to process edge related status.
         self.start_status_listener_center()
 
diff --git a/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py b/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py
index 06b222cfd1..1a43653bd9 100755
--- a/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py
+++ b/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py
@@ -300,4 +300,4 @@ def status_center_request_job_status_from_master_in_slave_agent(self, topic, pay
         # Request the job status from master agent.
         topic_request_job_status = f"{GeneralConstants.MSG_TOPIC_REQUEST_JOB_STATUS_PREFIX}{master_id}"
         payload_request_job_status = {"run_id": run_id, "edge_id": edge_id}
-        self.message_center.send_message(topic_request_job_status, payload_request_job_status)
+        self.message_center.send_message(topic_request_job_status, json.dumps(payload_request_job_status))
diff --git a/python/fedml/computing/scheduler/slave/slave_protocol_manager.py b/python/fedml/computing/scheduler/slave/slave_protocol_manager.py
index 6e2a03a8b4..ef8dac8730 100755
--- a/python/fedml/computing/scheduler/slave/slave_protocol_manager.py
+++ b/python/fedml/computing/scheduler/slave/slave_protocol_manager.py
@@ -24,27 +24,12 @@ def generate_topics(self):
         # The topic for requesting device info from mlops.
         self.topic_request_edge_device_info_from_mlops = f"deploy/mlops/slave_agent/request_device_info/{self.edge_id}"
 
-        # The topic for requesting deployment master device info from mlops.
-        self.topic_request_deploy_master_device_info_from_mlops = f"deploy/mlops/master_agent/request_device_info/{self.model_device_server_id}"
-
-        # The topic for requesting deployment slave device info from mlops.
-        self.topic_request_deploy_slave_device_info_from_mlops = f"deploy/mlops/slave_agent/request_device_info/{self.model_device_client_edge_id_list[0]}"
-
         self.add_subscribe_topic(self.topic_request_edge_device_info_from_mlops)
-        self.add_subscribe_topic(self.topic_request_deploy_master_device_info_from_mlops)
-        self.add_subscribe_topic(self.topic_request_deploy_slave_device_info_from_mlops)
 
     # Override
     def add_protocol_handler(self):
         super().add_protocol_handler()
 
-        self.add_message_listener(
-            self.topic_request_edge_device_info_from_mlops, self.callback_response_device_info_to_mlops)
-        self.add_message_listener(
-            self.topic_request_deploy_master_device_info_from_mlops, self.callback_response_device_info_to_mlops)
-        self.add_message_listener(
-            self.topic_request_deploy_slave_device_info_from_mlops, self.callback_response_device_info_to_mlops)
-
     # Override
     def _generate_protocol_manager_instance(self, args, agent_config=None):
         return FedMLLaunchSlaveProtocolManager(args, agent_config=agent_config)
@@ -121,6 +106,9 @@ def _init_extra_items(self):
         os.environ["FEDML_DEPLOY_MASTER_ID"] = str(self.model_device_server_id)
         os.environ["FEDML_DEPLOY_WORKER_IDS"] = str(self.model_device_client_edge_id_list)
 
+        # Subscribe handshaking messages from MLOps.
+        self.subscribe_handshaking_messages_from_mlops()
+
         # Start the monitor process
         self.args = copy.deepcopy(in_args)
         self.mlops_metrics.stop_device_realtime_perf()
@@ -153,3 +141,19 @@ def callback_response_device_info_to_mlops(self, topic, payload):
                 response_payload["context"] = context
             self.message_center.send_message(response_topic, json.dumps(response_payload), run_id=run_id)
 
+    def subscribe_handshaking_messages_from_mlops(self):
+        # The topic for requesting deployment master device info from mlops.
+        self.topic_request_deploy_master_device_info_from_mlops = f"deploy/mlops/master_agent/request_device_info/{self.model_device_server_id}"
+
+        # The topic for requesting deployment slave device info from mlops.
+        self.topic_request_deploy_slave_device_info_from_mlops = f"deploy/mlops/slave_agent/request_device_info/{self.model_device_client_edge_id_list[0]}"
+
+        self.add_subscribe_topic(self.topic_request_deploy_master_device_info_from_mlops)
+        self.add_subscribe_topic(self.topic_request_deploy_slave_device_info_from_mlops)
+
+        self.add_message_listener(
+            self.topic_request_edge_device_info_from_mlops, self.callback_response_device_info_to_mlops)
+        self.add_message_listener(
+            self.topic_request_deploy_master_device_info_from_mlops, self.callback_response_device_info_to_mlops)
+        self.add_message_listener(
+            self.topic_request_deploy_slave_device_info_from_mlops, self.callback_response_device_info_to_mlops)
\ No newline at end of file

From d6c5a78e76d18721a634ee66f8ed5a1a2951e62c Mon Sep 17 00:00:00 2001
From: alaydshah <alay11shah@gmail.com>
Date: Mon, 15 Apr 2024 22:49:00 +0000
Subject: [PATCH 005/282] Minor Fixes

---
 .../computing/scheduler/scheduler_core/account_manager.py | 2 +-
 .../scheduler/scheduler_core/status_manager_protocols.py  | 8 +++-----
 .../scheduler/slave/base_slave_protocol_manager.py        | 1 -
 python/fedml/computing/scheduler/slave/client_login.py    | 2 +-
 4 files changed, 5 insertions(+), 8 deletions(-)

diff --git a/python/fedml/computing/scheduler/scheduler_core/account_manager.py b/python/fedml/computing/scheduler/scheduler_core/account_manager.py
index 61ffd20988..da04fc3989 100755
--- a/python/fedml/computing/scheduler/scheduler_core/account_manager.py
+++ b/python/fedml/computing/scheduler/scheduler_core/account_manager.py
@@ -184,7 +184,7 @@ def build_agent_args(self, user_id, api_key=None, device_id=None, os_name=None,
         # Check if it is running in the fedml docker hub
         is_from_fedml_docker_hub = False
         dock_loc_file = GeneralConstants.get_deploy_docker_location_file(is_master=is_master) \
-            if is_deploy else GeneralConstants.get_deploy_docker_location_file(is_master=is_master)
+            if is_deploy else GeneralConstants.get_launch_docker_location_file(is_master=is_master)
         if os.path.exists(dock_loc_file):
             is_from_fedml_docker_hub = True
 
diff --git a/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py b/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py
index 1a43653bd9..4d2cf3a5ed 100755
--- a/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py
+++ b/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py
@@ -92,11 +92,9 @@ def status_center_process_master_status(self, topic, payload):
         run_id_str = str(run_id)
 
         # Process the job status
-        if status == ServerConstants.MSG_MLOPS_SERVER_STATUS_FINISHED:
-            self.process_job_completed_status(server_id, status)
-        elif status == ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED:
-            self.process_job_completed_status(server_id, status)
-        elif status == ServerConstants.MSG_MLOPS_SERVER_STATUS_KILLED:
+        if status in (ServerConstants.MSG_MLOPS_SERVER_STATUS_FINISHED,
+                      ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED,
+                      ServerConstants.MSG_MLOPS_SERVER_STATUS_KILLED):
             self.process_job_completed_status(server_id, status)
         elif status == ServerConstants.MSG_MLOPS_SERVER_STATUS_EXCEPTION:
             self.process_job_exception_status(server_id, status)
diff --git a/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py b/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py
index 856abdac40..514aa98cd7 100755
--- a/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py
+++ b/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py
@@ -33,7 +33,6 @@ def __init__(self, args, agent_config=None):
         self.message_status_runner = None
         self.message_center = None
         self.status_center = None
-        self.message_center_name = "master_agent"
         self.run_id = None
         self.edge_id = args.edge_id
         self.general_edge_id = None
diff --git a/python/fedml/computing/scheduler/slave/client_login.py b/python/fedml/computing/scheduler/slave/client_login.py
index 37a6dc8064..a4df2ccb6a 100755
--- a/python/fedml/computing/scheduler/slave/client_login.py
+++ b/python/fedml/computing/scheduler/slave/client_login.py
@@ -30,7 +30,7 @@ def logout():
     if args.api_key == "":
         args.api_key = args.user
 
-    fedml.set_env_version("test")
+    # fedml.set_env_version("test")
 
     if args.local_on_premise_platform_host != "127.0.0.1":
         fedml.set_local_on_premise_platform_host(args.local_on_premise_platform_host)

From 3e6e7d1847009f3e756baab80fb45102623600ee Mon Sep 17 00:00:00 2001
From: alaydshah <alay11shah@gmail.com>
Date: Tue, 16 Apr 2024 18:02:50 +0000
Subject: [PATCH 006/282] Fix logging

---
 .../scheduler_core/scheduler_base_protocol_manager.py  |  2 +-
 python/fedml/core/mlops/mlops_utils.py                 | 10 ++++++----
 2 files changed, 7 insertions(+), 5 deletions(-)

diff --git a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_protocol_manager.py b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_protocol_manager.py
index 8c1756880a..e3cac7a425 100755
--- a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_protocol_manager.py
+++ b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_protocol_manager.py
@@ -30,7 +30,7 @@ def __init__(self, args, agent_config=None, is_master=False):
         self.message_status_runner = None
         self.message_center = None
         self.status_center = None
-        self.message_center_name = "master_agent" if not is_master else "slave_agent"
+        self.message_center_name = "master_agent" if is_master else "slave_agent"
         self.run_id = None
         self.edge_id = args.edge_id
         self.general_edge_id = None
diff --git a/python/fedml/core/mlops/mlops_utils.py b/python/fedml/core/mlops/mlops_utils.py
index 7313141550..1d6db23d02 100644
--- a/python/fedml/core/mlops/mlops_utils.py
+++ b/python/fedml/core/mlops/mlops_utils.py
@@ -128,15 +128,17 @@ def get_program_prefix(args, edge_id):
     @staticmethod
     def get_edge_id_from_args(args):
         if args.role == "server":
-            if hasattr(args, "server_id"):
+            # Considering that 0 is a valid value, we need to ensure it is not None rather than solely checking
+            # for truthiness
+            if getattr(args, "server_id", None) is not None:
                 edge_id = args.server_id
             else:
-                if hasattr(args, "edge_id"):
+                if getattr(args, "edge_id", None) is not None:
                     edge_id = args.edge_id
                 else:
                     edge_id = 0
         else:
-            if hasattr(args, "client_id"):
+            if getattr(args, "client_id", None) is not None:
                 edge_id = args.client_id
             elif hasattr(args, "client_id_list"):
                 if args.client_id_list is None:
@@ -148,7 +150,7 @@ def get_edge_id_from_args(args):
                     else:
                         edge_id = 0
             else:
-                if hasattr(args, "edge_id"):
+                if getattr(args, "client_id", None) is not None:
                     edge_id = args.edge_id
                 else:
                     edge_id = 0

From 8a544172638f2cb3a12dc9865ce0acbef20b8dac Mon Sep 17 00:00:00 2001
From: Alay Dilipbhai Shah <alay11shah@gmail.com>
Date: Tue, 16 Apr 2024 11:04:31 -0700
Subject: [PATCH 007/282] Update client_login.py

---
 python/fedml/computing/scheduler/slave/client_login.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/python/fedml/computing/scheduler/slave/client_login.py b/python/fedml/computing/scheduler/slave/client_login.py
index a4df2ccb6a..37a6dc8064 100755
--- a/python/fedml/computing/scheduler/slave/client_login.py
+++ b/python/fedml/computing/scheduler/slave/client_login.py
@@ -30,7 +30,7 @@ def logout():
     if args.api_key == "":
         args.api_key = args.user
 
-    # fedml.set_env_version("test")
+    fedml.set_env_version("test")
 
     if args.local_on_premise_platform_host != "127.0.0.1":
         fedml.set_local_on_premise_platform_host(args.local_on_premise_platform_host)

From 852a3bf4b6e4d9f65e6a317579c691b7e6a36f21 Mon Sep 17 00:00:00 2001
From: Alay Shah <alay11shah@gmail.com>
Date: Fri, 19 Apr 2024 22:43:52 -0700
Subject: [PATCH 008/282] Update Launch Job Docker Image name

---
 python/examples/launch/hello_job_with_container.yaml            | 2 +-
 python/fedml/computing/scheduler/comm_utils/constants.py        | 2 +-
 .../driver_example/customized_job_example/train_job.yaml        | 2 +-
 python/fedml/workflow/driver_example/hello_world_job.yaml       | 2 +-
 4 files changed, 4 insertions(+), 4 deletions(-)

diff --git a/python/examples/launch/hello_job_with_container.yaml b/python/examples/launch/hello_job_with_container.yaml
index 2c520beb24..26202a3d98 100755
--- a/python/examples/launch/hello_job_with_container.yaml
+++ b/python/examples/launch/hello_job_with_container.yaml
@@ -43,7 +43,7 @@ job_type: train              # options: train, deploy, federate
 job_subtype: generate_training
 
 docker:
-  image: fedml/fedml-default-launch:cu12.1-u22.04
+  image: fedml/fedml-launch-job:cu12.1-u22.04
   #registry: docker.io
   #username: my_hub_user
   #password: my_hub_password
diff --git a/python/fedml/computing/scheduler/comm_utils/constants.py b/python/fedml/computing/scheduler/comm_utils/constants.py
index b1294181bb..f89d5640ce 100644
--- a/python/fedml/computing/scheduler/comm_utils/constants.py
+++ b/python/fedml/computing/scheduler/comm_utils/constants.py
@@ -103,7 +103,7 @@ class SchedulerConstants:
     RUN_PROCESS_TYPE_BOOTSTRAP_PROCESS = "bootstrap-process"
 
     FEDML_DEFAULT_LAUNCH_CONTAINER_PREFIX = "fedml_default_launch_container"
-    FEDML_DEFAULT_LAUNCH_IMAGE = "fedml/fedml-default-launch:cu12.1-u22.04"
+    FEDML_DEFAULT_LAUNCH_IMAGE = "fedml/fedml-launch-job:cu12.1-u22.04"
     FEDML_DEFAULT_LOG_DIR = ".fedml/fedml-client/fedml/logs"
     FEDML_DEFAULT_DATA_DIR = ".fedml/fedml-client/fedml/data"
 
diff --git a/python/fedml/workflow/driver_example/customized_job_example/train_job.yaml b/python/fedml/workflow/driver_example/customized_job_example/train_job.yaml
index e057791431..2ccbc897f0 100755
--- a/python/fedml/workflow/driver_example/customized_job_example/train_job.yaml
+++ b/python/fedml/workflow/driver_example/customized_job_example/train_job.yaml
@@ -7,7 +7,7 @@ workspace: train_job
 # It should be the full name of the image with tag.
 # If you want to use the default image, it can be empty.
 docker:
-  image: fedml/fedml-default-launch:cu12.1-u22.04
+  image: fedml/fedml-launch-job:cu12.1-u22.04
 
 # Running entry commands which will be executed as the job entry point.
 # Support multiple lines, which can not be empty.
diff --git a/python/fedml/workflow/driver_example/hello_world_job.yaml b/python/fedml/workflow/driver_example/hello_world_job.yaml
index e1dcb02f7e..e63712f99a 100755
--- a/python/fedml/workflow/driver_example/hello_world_job.yaml
+++ b/python/fedml/workflow/driver_example/hello_world_job.yaml
@@ -10,7 +10,7 @@ workspace: hello_world
 # It should be the full name of the image with tag.
 # If you want to use the default image, it can be empty.
 #docker:
-#  image: fedml/fedml-default-launch:cu12.1-u22.04
+#  image: fedml/fedml-launch-job:cu12.1-u22.04
 
 # Running entry commands which will be executed as the job entry point.
 # Support multiple lines, which can not be empty.

From 06ed07fa8c4796a7719a9882cc2086996ea88424 Mon Sep 17 00:00:00 2001
From: bhargav191098 <bhargav3514@gmail.com>
Date: Wed, 1 May 2024 19:14:26 -0700
Subject: [PATCH 009/282] Adding tags to data storage object and the
 corresponding pretty table changes

---
 python/fedml/api/__init__.py        |  5 ++---
 python/fedml/api/modules/storage.py |  5 +++--
 python/fedml/cli/modules/storage.py | 20 ++++++++++++++------
 3 files changed, 19 insertions(+), 11 deletions(-)

diff --git a/python/fedml/api/__init__.py b/python/fedml/api/__init__.py
index 4e004f07d3..3e75b987d6 100755
--- a/python/fedml/api/__init__.py
+++ b/python/fedml/api/__init__.py
@@ -179,13 +179,12 @@ def cluster_killall(api_key=None) -> bool:
     return cluster.kill(cluster_names=(), api_key=api_key)
 
 
-def upload(data_path, api_key=None, service="R2", name=None, description=None, metadata=None, show_progress=False,
+def upload(data_path, api_key=None, tag_list=[], service="R2", name=None, description=None, metadata=None, show_progress=False,
            out_progress_to_err=True, progress_desc=None) -> FedMLResponse:
-    return storage.upload(data_path=data_path, api_key=api_key, name=name, description=description,
+    return storage.upload(data_path=data_path, api_key=api_key, name=name, description=description, tag_list =tag_list,
                           service=service, progress_desc=progress_desc, show_progress=show_progress,
                           out_progress_to_err=out_progress_to_err, metadata=metadata)
 
-
 def get_storage_user_defined_metadata(data_name, api_key=None) -> FedMLResponse:
     return storage.get_user_metadata(data_name=data_name, api_key=api_key)
 
diff --git a/python/fedml/api/modules/storage.py b/python/fedml/api/modules/storage.py
index 1582788e3a..51f58539bf 100644
--- a/python/fedml/api/modules/storage.py
+++ b/python/fedml/api/modules/storage.py
@@ -18,12 +18,13 @@ def __init__(self, data: dict):
         self.createdAt = data.get("createTime", None)
         self.updatedAt = data.get("updateTime", None)
         self.size = _get_size(data.get("fileSize",None))
+        self.tag_list = data.get("tags", None)
 
 
 # Todo (alaydshah): Store service name in metadata
 # Todo (alaydshah): If data already exists, don't upload again. Instead suggest to use update command
 
-def upload(data_path, api_key, name, description, service, show_progress, out_progress_to_err, progress_desc,
+def upload(data_path, api_key, name, description, tag_list, service, show_progress, out_progress_to_err, progress_desc,
            metadata) -> FedMLResponse:
     api_key = authenticate(api_key)
 
@@ -58,7 +59,7 @@ def upload(data_path, api_key, name, description, service, show_progress, out_pr
         "description": description,
         "fileSize": file_size,
         "fileUrl": file_uploaded_url,
-        "tagNameList": [],
+        "tagNameList": tag_list,
     }
 
     try:
diff --git a/python/fedml/cli/modules/storage.py b/python/fedml/cli/modules/storage.py
index 93ce273e92..af75cda85f 100644
--- a/python/fedml/cli/modules/storage.py
+++ b/python/fedml/cli/modules/storage.py
@@ -53,6 +53,7 @@ def validate_argument(ctx, param, value):
 @click.option("--user_metadata", "-um", type=str, help="User-defined metadata in the form of a dictionary, for instance, "
                                                        " {'name':'value'} within double quotes. "" "
                                                        "Defaults to None.")
+@click.option("--tags", "-t", type=str, help="Add tags to your data to store. Give tags in comma separated form like 'cv,unet,segmentation' If not provided, the tags will be empty.")
 @click.option('--service', "-s", type=click.Choice(['R2']), default="R2", help="Storage service for object storage. "
                                                                                "Only R2 is supported as of now")
 @click.option(
@@ -65,10 +66,11 @@ def validate_argument(ctx, param, value):
     default="release",
     help=version_help,
 )
-def upload(data_path: str, name: str, user_metadata: str, description: str, version: str, api_key: str, service):
+def upload(data_path: str, name: str, user_metadata: str, description: str, version: str, api_key: str, tags:str, service):
     metadata = _parse_metadata(user_metadata)
+    tag_list = _parse_tags(tags)
     fedml.set_env_version(version)
-    response = fedml.api.upload(data_path=data_path, api_key=api_key, name=name, service=service, show_progress=True,
+    response = fedml.api.upload(data_path=data_path, api_key=api_key, name=name, tag_list = tag_list, service=service, show_progress=True,
                                 description=description, metadata=metadata)
     if response.code == ResponseCode.SUCCESS:
         click.echo(f"Data uploaded successfully. | url: {response.data}")
@@ -96,10 +98,10 @@ def list_data(version, api_key):
         if not response.data:
             click.echo(f"No stored objects found for account linked with apikey: {api_key}")
             return
-        object_list_table = PrettyTable(["Data Name", "Data Size", "Description", "Created At", "Updated At"])
+        object_list_table = PrettyTable(["Data Name", "Data Size", "Description", "Data Tags","Created At", "Updated At"])
         for stored_object in response.data:
             object_list_table.add_row(
-                [stored_object.dataName, stored_object.size, stored_object.description, stored_object.createdAt, stored_object.updatedAt])
+                [stored_object.dataName, stored_object.size, stored_object.description, stored_object.tag_list,stored_object.createdAt, stored_object.updatedAt])
         click.echo(object_list_table)
     else:
         click.echo(f"Failed to list stored objects for account linked with apikey {api_key}. "
@@ -157,8 +159,8 @@ def get_metadata(data_name, version, api_key):
             return
         click.echo(f"Successfully fetched metadata for object {data_name}:")
         # Todo (alaydshah): Add file size and tags
-        metadata_table = PrettyTable(["Data Name","Data Size","Description", "Created At", "Updated At"])
-        metadata_table.add_row([metadata.dataName,metadata.size,metadata.description, metadata.createdAt, metadata.updatedAt])
+        metadata_table = PrettyTable(["Data Name","Data Size","Description","Data Tags","Created At", "Updated At"])
+        metadata_table.add_row([metadata.dataName,metadata.size,metadata.description,metadata.tag_list,metadata.createdAt, metadata.updatedAt])
         click.echo(metadata_table)
         click.echo("")
     else:
@@ -238,3 +240,9 @@ def _parse_metadata(metadata: str):
         click.echo(
             f"Input metadata cannot be evaluated. Please make sure metadata is in the correct format. Error: {e}.")
         exit()
+
+def _parse_tags(tags:str):
+    if not tags:
+        return []
+    tag_list = tags.split(",")
+    return tag_list 
\ No newline at end of file

From 6e09a604b7ee5df016dcb4011a613bd4f1f9e103 Mon Sep 17 00:00:00 2001
From: Alay Shah <alay11shah@gmail.com>
Date: Wed, 1 May 2024 20:04:36 -0700
Subject: [PATCH 010/282] Fix Log Rotation Bug

---
 python/fedml/core/mlops/__init__.py          | 20 ++++++++--------
 python/fedml/core/mlops/mlops_runtime_log.py | 24 ++++++++++++++------
 2 files changed, 26 insertions(+), 18 deletions(-)

diff --git a/python/fedml/core/mlops/__init__.py b/python/fedml/core/mlops/__init__.py
index 77ad06165e..2b4dc24c95 100644
--- a/python/fedml/core/mlops/__init__.py
+++ b/python/fedml/core/mlops/__init__.py
@@ -9,7 +9,6 @@
 import uuid
 from multiprocessing import Process
 
-import click
 import requests
 
 import fedml
@@ -95,9 +94,9 @@ def init(args, should_init_logs=True):
     if not mlops_parrot_enabled(args):
         if not hasattr(args, "config_version"):
             args.config_version = "release"
-        fetch_config(args, args.config_version)
         if should_init_logs:
             MLOpsRuntimeLog.get_instance(args).init_logs()
+        fetch_config(args, args.config_version)
         return
     else:
         if hasattr(args, "simulator_daemon"):
@@ -137,7 +136,7 @@ def init(args, should_init_logs=True):
             MLOpsStore.mlops_project_id = project_id
             MLOpsStore.mlops_run_id = run_id
     if result_project is False or result_run is False:
-        click.echo("Failed to init project and run.")
+        print("Failed to init project and run.")
         return
 
     # Init runtime logs
@@ -973,10 +972,9 @@ def _generate_log_metrics(metrics: dict, step: int = None, customized_step_key:
 
 def log_mlops_running_logs(artifact: fedml.mlops.Artifact, version=None, run_id=None, edge_id=None,
                            only_push_artifact=False):
-    fedml_args = get_fedml_args()
 
     artifact_archive_zip_file, artifact_storage_url = push_artifact_to_s3(
-        artifact, version=version if version is not None else fedml_args.config_version, show_progress=False)
+        artifact, version=version if version is not None else fedml.get_env_version(), show_progress=False)
 
     if only_push_artifact:
         return artifact_storage_url
@@ -1274,8 +1272,8 @@ def bind_simulation_device(args, userid):
             continue
 
     if config_try_count >= 5:
-        click.echo("\nNote: Internet is not connected. "
-                   "Experimental tracking results will not be synchronized to the MLOps (open.fedml.ai).\n")
+        logging.info("\nNote: Internet is not connected. "
+                     "Experimental tracking results will not be synchronized to the MLOps (open.fedml.ai).\n")
         return False
 
     # Build unique device id
@@ -1301,8 +1299,8 @@ def bind_simulation_device(args, userid):
             continue
 
     if edge_id <= 0:
-        click.echo("Oops, you failed to login the FedML MLOps platform.")
-        click.echo("Please check whether your network is normal!")
+        print("Oops, you failed to login the FedML MLOps platform.")
+        print("Please check whether your network is normal!")
         return False
     MLOpsStore.mlops_edge_id = edge_id
     setattr(MLOpsStore.mlops_args, "client_id", edge_id)
@@ -1353,8 +1351,8 @@ def fetch_config(args, version="release"):
             continue
 
     if config_try_count >= 5:
-        click.echo("\nNote: Internet is not connected. "
-                   "Experimental tracking results will not be synchronized to the MLOps (open.fedml.ai).\n")
+        logging.info("\nNote: Internet is not connected. "
+                     "Experimental tracking results will not be synchronized to the MLOps (open.fedml.ai).\n")
         return False
 
 
diff --git a/python/fedml/core/mlops/mlops_runtime_log.py b/python/fedml/core/mlops/mlops_runtime_log.py
index 6992c44555..0bc4dc6b6c 100644
--- a/python/fedml/core/mlops/mlops_runtime_log.py
+++ b/python/fedml/core/mlops/mlops_runtime_log.py
@@ -5,6 +5,7 @@
 import sys
 import threading
 import time
+import shutil
 from logging.handlers import TimedRotatingFileHandler
 
 from fedml import mlops
@@ -12,19 +13,19 @@
 
 LOG_LEVEL = logging.INFO
 ROTATION_FREQUENCY = 'D'
+# when rollover is done, no more than backupCount files are kept - the oldest ones are deleted.
 BACKUP_COUNT = 100
 
 
 class MLOpsFileHandler(TimedRotatingFileHandler):
 
     def __init__(self, run_id, edge_id, log_config_file, filepath):
-        super(MLOpsFileHandler, self).__init__(filename=filepath, when=ROTATION_FREQUENCY, backupCount=BACKUP_COUNT,
-                                               encoding='utf-8')
+        super().__init__(filename=filepath, when=ROTATION_FREQUENCY,
+                         backupCount=BACKUP_COUNT,encoding='utf-8')
         self.run_id = run_id
         self.edge_id = edge_id
         self.file_path = filepath
         self.rotate_count = 0
-        self.backupCount = BACKUP_COUNT
         self.rotator: callable = self.update_config_and_rotate
         self.log_config_file = log_config_file
         self.__initialize_config()
@@ -32,17 +33,26 @@ def __init__(self, run_id, edge_id, log_config_file, filepath):
     def update_config_and_rotate(self, source, dest):
         # source = current log file name
         # dest = log file name (dated)
-        if os.path.exists(source):
-            os.rename(source, dest)
         MLOpsLoggingUtils.acquire_lock()
-        config_data = MLOpsLoggingUtils.load_log_config(self.run_id, self.edge_id, self.log_config_file)
+
+        # Check if the source and destination files exist. If it does, return
+        if os.path.exists(source):
+            # Copy the contents of the source file to the destination file
+            shutil.copy(source, dest)
+            # Clear everything in the source file
+            with open(source, 'w') as src_file:
+                src_file.truncate(0)
+            src_file.close()
+
+        config_data = MLOpsLoggingUtils.load_log_config(self.run_id, self.edge_id,
+                                                        self.log_config_file)
 
         # Update file name of current log file
         config_data[self.rotate_count].file_path = dest
         self.rotate_count += 1
 
         # Store the rotate count, and corresponding log file name in the config file
-        rotated_log_file = LogFile(file_path=source, uploaded_file_index=self.backupCount)
+        rotated_log_file = LogFile(file_path=source)
         config_data[self.rotate_count] = rotated_log_file
         MLOpsLoggingUtils.save_log_config(run_id=self.run_id, device_id=self.edge_id,
                                           log_config_file=self.log_config_file,

From d150998a2a8cf245aef030abbac7896317cac367 Mon Sep 17 00:00:00 2001
From: Alay Shah <alay11shah@gmail.com>
Date: Wed, 1 May 2024 21:50:44 -0700
Subject: [PATCH 011/282] Bug spotted, added FIXME Comment

---
 python/fedml/computing/scheduler/comm_utils/job_monitor.py | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/python/fedml/computing/scheduler/comm_utils/job_monitor.py b/python/fedml/computing/scheduler/comm_utils/job_monitor.py
index 5874adfef7..fc97a8e077 100644
--- a/python/fedml/computing/scheduler/comm_utils/job_monitor.py
+++ b/python/fedml/computing/scheduler/comm_utils/job_monitor.py
@@ -352,6 +352,8 @@ def monitor_slave_run_process_status(self):
                     continue
 
                 # Check if all processes of the specific run are exited
+                # FIXME: Proactively release the gpu ids when the run processes have not even started yet as the docker
+                #  image is being pulled
                 run_process_list = client_constants.ClientConstants.get_learning_process_list(job.job_id)
                 all_run_processes_exited = True if len(run_process_list) <= 0 else False
                 if all_run_processes_exited:

From 4ce4c78753ae9ef3aa22b0e0578039d9b28a5140 Mon Sep 17 00:00:00 2001
From: fedml-dimitris <dimitris@fedml.ai>
Date: Wed, 1 May 2024 18:09:21 +0000
Subject: [PATCH 012/282] Disabling replica release after idle secs.

---
 .../model_scheduler/autoscaler/autoscaler.py  | 31 ++++++++++---------
 .../model_scheduler/autoscaler/policies.py    |  2 +-
 2 files changed, 17 insertions(+), 16 deletions(-)

diff --git a/python/fedml/computing/scheduler/model_scheduler/autoscaler/autoscaler.py b/python/fedml/computing/scheduler/model_scheduler/autoscaler/autoscaler.py
index dd6ca67706..5f0d425505 100644
--- a/python/fedml/computing/scheduler/model_scheduler/autoscaler/autoscaler.py
+++ b/python/fedml/computing/scheduler/model_scheduler/autoscaler/autoscaler.py
@@ -303,25 +303,26 @@ def scale_operation_endpoint(self,
             # If no metric exists then no scaling operation.
             return scale_op
 
-        # If we continue here, then it means that there was at least one request.
-        # The `most_recent_metric` is of type list, hence we need to access index 0.
-        most_recent_metric = metrics[-1]
-        latest_request_timestamp_micro_secs = most_recent_metric["timestamp"]
-        # The time module does not have a micro-second function built-in, so we need to
-        # divide nanoseconds by 1e3 and convert to micro-seconds.
-        current_time_micro_seconds = time.time_ns() / 1e3
-        # compute elapsed time and convert to seconds
-        elapsed_time_secs = \
-            (current_time_micro_seconds - latest_request_timestamp_micro_secs) / 1e6
-        if elapsed_time_secs > autoscaling_policy.release_replica_after_idle_secs:
+        if autoscaling_policy.release_replica_after_idle_secs:
+            # At this point it means that there was at least one request. The
+            # `most_recent_metric` is of type list, hence we need to access index 0.
+            most_recent_metric = metrics[-1]
+            latest_request_timestamp_micro_secs = most_recent_metric["timestamp"]
+            # The time module does not have a micro-second function built-in,
+            # so we need to divide nanoseconds by 1e3 and convert to micro-seconds.
+            current_time_micro_seconds = time.time_ns() / 1e3
+            # Compute the elapsed time and convert to seconds.
+            elapsed_time_secs = \
+                (current_time_micro_seconds - latest_request_timestamp_micro_secs) / 1e6
             # If the elapsed time is greater than the requested idle time,
             # in other words there was no incoming request then scale down.
-            scale_op = ScaleOp.DOWN_IN_OP
+            if elapsed_time_secs > autoscaling_policy.release_replica_after_idle_secs:
+                scale_op = ScaleOp.DOWN_IN_OP
         else:
-            # Otherwise, it means there was a request within the elapsed time, then:
+            # Otherwise, it means there was a request within the elapsed time, then,
+            # Check if the current number of running replicas is 0 it means
+            # we need more resources, hence we need to scale up: ScaleOp.UP_OUT_OP.
             if autoscaling_policy.current_replicas == 0:
-                # Check if the current number of running replicas is 0,
-                # then we need more resources, hence ScaleOp.UP_OUT_OP.
                 scale_op = ScaleOp.UP_OUT_OP
             else:
                 # Else, trigger the autoscaling policy with all existing values.
diff --git a/python/fedml/computing/scheduler/model_scheduler/autoscaler/policies.py b/python/fedml/computing/scheduler/model_scheduler/autoscaler/policies.py
index 546817ec82..fd49549812 100644
--- a/python/fedml/computing/scheduler/model_scheduler/autoscaler/policies.py
+++ b/python/fedml/computing/scheduler/model_scheduler/autoscaler/policies.py
@@ -22,7 +22,7 @@ class AutoscalingPolicy(BaseModel):
     min_replicas: NonNegativeInt
     max_replicas: NonNegativeInt
     previous_triggering_value: float = None
-    release_replica_after_idle_secs: NonNegativeInt = 300  # default is after 5 minutes
+    release_replica_after_idle_secs: NonNegativeInt = None
     scaledown_delay_secs: NonNegativeInt = 60  # default is 1 minute
     scaleup_cost_secs: NonNegativeInt = 300  # default is 5 minutes
 

From 975e53de8185777f146215f552bbfb7d7f0740b5 Mon Sep 17 00:00:00 2001
From: fedml-dimitris <dimitris@fedml.ai>
Date: Wed, 1 May 2024 22:34:13 +0000
Subject: [PATCH 013/282] Return scale down operation when no incoming request
 with then policy's time frame window.

---
 .../model_scheduler/autoscaler/autoscaler.py  | 41 ++++++++++++-------
 1 file changed, 26 insertions(+), 15 deletions(-)

diff --git a/python/fedml/computing/scheduler/model_scheduler/autoscaler/autoscaler.py b/python/fedml/computing/scheduler/model_scheduler/autoscaler/autoscaler.py
index 5f0d425505..03666a5919 100644
--- a/python/fedml/computing/scheduler/model_scheduler/autoscaler/autoscaler.py
+++ b/python/fedml/computing/scheduler/model_scheduler/autoscaler/autoscaler.py
@@ -55,13 +55,17 @@ def scale_operation_ewm(cls,
         with warnings.catch_warnings():
             warnings.simplefilter(action='ignore', category=FutureWarning)
             period_data = metrics.last("{}min".format(ewm_policy.ewm_mins))
-            # If the data frame window is empty then do nothing more, just return.
-            if period_data.empty:
-                return ScaleOp.NO_OP
-            metric_name = "current_latency" \
-                if "ewm_latency" == ewm_policy.metric else "current_qps"
-            ewm_period = period_data[metric_name] \
-                .ewm(alpha=ewm_policy.ewm_alpha).mean()
+
+        # If the data frame window is empty then it means we
+        # did not have any incoming request, so we need to scale down.
+        if period_data.empty:
+            return ScaleOp.DOWN_IN_OP
+
+        # Otherwise, we proceed as normal.
+        metric_name = "current_latency" \
+            if "ewm_latency" == ewm_policy.metric else "current_qps"
+        ewm_period = period_data[metric_name] \
+            .ewm(alpha=ewm_policy.ewm_alpha).mean()
 
         scale_op = ScaleOp.NO_OP
         # If there is no exponential moving average within this
@@ -115,10 +119,14 @@ def scale_operation_query_concurrency(cls,
             warnings.simplefilter(action='ignore', category=FutureWarning)
             # Here, the number of queries is the number of rows in the short period data frame.
             period_data = metrics.last("{}s".format(concurrent_query_policy.window_size_secs))
-            # If the data frame window is empty then do nothing more, just return.
-            if period_data.empty:
-                return ScaleOp.NO_OP
-            queries_num = period_data.shape[0]
+
+        # If the data frame window is empty then it means we
+        # did not have any incoming request, so we need to scale down.
+        if period_data.empty:
+            return ScaleOp.DOWN_IN_OP
+
+        # Otherwise, we proceed as normal.
+        queries_num = period_data.shape[0]
 
         try:
             # QSR: Queries per Second per Replica: (Number of Queries / Number of Current Replicas) / Window Size
@@ -159,10 +167,13 @@ def scale_operation_meet_traffic_demand(cls,
             warnings.simplefilter(action='ignore', category=FutureWarning)
             # Here, the number of queries is the number of rows in the short period data frame.
             period_data = metrics.last("{}s".format(meet_traffic_demand_policy.window_size_secs))
-            # If the data frame window is empty then do nothing more, just return.
-            if period_data.empty:
-                return ScaleOp.NO_OP
 
+        # If the data frame window is empty then it means we
+        # did not have any incoming request, so we need to scale down.
+        if period_data.empty:
+            return ScaleOp.DOWN_IN_OP
+
+        # Otherwise, we proceed as normal.
         period_requests_num = period_data.shape[0]
         all_latencies = metrics["current_latency"]
         # Original value is milliseconds, convert to seconds.
@@ -293,7 +304,7 @@ def scale_operation_endpoint(self,
             0: do nothing
         """
 
-        # Fetch most recent metric record from the database.
+        # Fetch all metrics record from the database.
         metrics = self.fedml_model_cache.get_endpoint_metrics(
             endpoint_id=endpoint_id)
 

From 5fb500f04762f17d797ac6717380ce2a555382d1 Mon Sep 17 00:00:00 2001
From: fedml-dimitris <dimitris@fedml.ai>
Date: Wed, 1 May 2024 22:58:43 +0000
Subject: [PATCH 014/282] Adding logging to figure out scaling down delay.

---
 .../model_scheduler/autoscaler/autoscaler.py         | 12 +++++++++++-
 1 file changed, 11 insertions(+), 1 deletion(-)

diff --git a/python/fedml/computing/scheduler/model_scheduler/autoscaler/autoscaler.py b/python/fedml/computing/scheduler/model_scheduler/autoscaler/autoscaler.py
index 03666a5919..d81e0148eb 100644
--- a/python/fedml/computing/scheduler/model_scheduler/autoscaler/autoscaler.py
+++ b/python/fedml/computing/scheduler/model_scheduler/autoscaler/autoscaler.py
@@ -51,6 +51,7 @@ def scale_operation_ewm(cls,
                             ewm_policy: EWMPolicy,
                             metrics: pd.DataFrame) -> ScaleOp:
 
+        logging.info("Executing the ExponentialWeightMoving average autoscaling policy.")
         # Adding the context below to avoid having a series of warning messages.
         with warnings.catch_warnings():
             warnings.simplefilter(action='ignore', category=FutureWarning)
@@ -114,6 +115,7 @@ def scale_operation_query_concurrency(cls,
                                           concurrent_query_policy: ConcurrentQueryPolicy,
                                           metrics: pd.DataFrame) -> ScaleOp:
 
+        logging.info("Executing the QueryConcurrency autoscaling policy.")
         # Adding the context below to avoid having a series of warning messages.
         with warnings.catch_warnings():
             warnings.simplefilter(action='ignore', category=FutureWarning)
@@ -162,6 +164,7 @@ def scale_operation_meet_traffic_demand(cls,
                                             meet_traffic_demand_policy: MeetTrafficDemandPolicy,
                                             metrics: pd.DataFrame) -> ScaleOp:
 
+        logging.info("Executing the MeetTrafficDemand autoscaling policy.")
         # Adding the context below to avoid having a series of warning messages.
         with warnings.catch_warnings():
             warnings.simplefilter(action='ignore', category=FutureWarning)
@@ -227,6 +230,7 @@ def run_autoscaling_policy(self,
     def validate_scaling_bounds(cls,
                                 scale_op: ScaleOp,
                                 autoscaling_policy: AutoscalingPolicy) -> ScaleOp:
+        logging.info("Validating scaling bounds.")
         # We cannot be lower than the minimum number of replicas,
         # nor exceed the maximum number of requested replicas.
         new_running_replicas = autoscaling_policy.current_replicas + scale_op.value
@@ -266,6 +270,7 @@ def enforce_scaling_down_delay_interval(self,
             previous_timestamp = \
                 self.fedml_model_cache.get_endpoint_scaling_down_decision_time(endpoint_id)
             diff_secs = (current_timestamp - previous_timestamp) / 1e6
+            logging.info("Difference in seconds between scaling down operations: {}".format(diff_secs))
             if diff_secs > autoscaling_policy.scaledown_delay_secs:
                 # At this point, we will perform the scaling down operation, hence
                 # we need to delete the previously stored scaling down timestamp (if any).
@@ -279,7 +284,8 @@ def enforce_scaling_down_delay_interval(self,
         return scale_op
 
     def clean_up_scaling_down_operation_state(self, endpoint_id) -> bool:
-        # We return True if the clean up operation succeeded, else False.
+        # We return True if the cleaning up operation succeeded, else False.
+        logging.info("Not a scaling down operation, cleaning up scale down state from Redis.")
         to_clean_up = \
             self.fedml_model_cache.exists_endpoint_scaling_down_decision_time(endpoint_id)
         if to_clean_up:
@@ -312,6 +318,7 @@ def scale_operation_endpoint(self,
         scale_op = ScaleOp.NO_OP
         if not metrics:
             # If no metric exists then no scaling operation.
+            logging.info("No existing metric, so no scaling operation.")
             return scale_op
 
         if autoscaling_policy.release_replica_after_idle_secs:
@@ -328,12 +335,15 @@ def scale_operation_endpoint(self,
             # If the elapsed time is greater than the requested idle time,
             # in other words there was no incoming request then scale down.
             if elapsed_time_secs > autoscaling_policy.release_replica_after_idle_secs:
+                logging.info("Endpoint remained idle for {} seconds, need to scale down.".format(
+                    elapsed_time_secs))
                 scale_op = ScaleOp.DOWN_IN_OP
         else:
             # Otherwise, it means there was a request within the elapsed time, then,
             # Check if the current number of running replicas is 0 it means
             # we need more resources, hence we need to scale up: ScaleOp.UP_OUT_OP.
             if autoscaling_policy.current_replicas == 0:
+                logging.info("Incoming requests but with 0 replicas, scaling up.")
                 scale_op = ScaleOp.UP_OUT_OP
             else:
                 # Else, trigger the autoscaling policy with all existing values.

From 14a75a93e86b90feb000b014c9ff8592bd47a606 Mon Sep 17 00:00:00 2001
From: fedml-dimitris <dimitris@fedml.ai>
Date: Thu, 2 May 2024 01:48:28 +0000
Subject: [PATCH 015/282] Fixed indefinite no scale down. Problem was the data
 frame period parsing.

---
 .../model_scheduler/autoscaler/autoscaler.py  | 44 +++++++++++++++----
 1 file changed, 35 insertions(+), 9 deletions(-)

diff --git a/python/fedml/computing/scheduler/model_scheduler/autoscaler/autoscaler.py b/python/fedml/computing/scheduler/model_scheduler/autoscaler/autoscaler.py
index d81e0148eb..009345863a 100644
--- a/python/fedml/computing/scheduler/model_scheduler/autoscaler/autoscaler.py
+++ b/python/fedml/computing/scheduler/model_scheduler/autoscaler/autoscaler.py
@@ -8,7 +8,7 @@
 from enum import Enum
 from fedml.computing.scheduler.model_scheduler.device_model_cache import FedMLModelCache
 from fedml.computing.scheduler.model_scheduler.autoscaler.policies import *
-from utils.singleton import Singleton
+from fedml.computing.scheduler.model_scheduler.autoscaler.utils.singleton import Singleton
 
 
 class ScaleOp(Enum):
@@ -38,6 +38,26 @@ def get_current_timestamp_micro_seconds(cls):
         # in REDIS we record/operate in micro-seconds, hence the division by 1e3!
         return int(format(time.time_ns() / 1000.0, '.0f'))
 
+    @classmethod
+    def filter_by_timestamp(cls,
+                            metrics,
+                            before_now_minutes=None,
+                            before_now_seconds=None) -> pd.DataFrame:
+
+        # We subtract the number of seconds/minutes from the current timestamp, and then we query
+        # the data frame to fetch all the records whose timestamp is within the given range.
+        # By default, we return all records.
+        filtered = metrics
+        if before_now_minutes:
+            less_than_ts = \
+                str(pd.Timestamp.now() - pd.Timedelta(minutes=before_now_minutes))
+            filtered = metrics.query("'{}' <= {}".format(less_than_ts, "timestamp"))
+        if before_now_seconds:
+            less_than_ts = \
+                str(pd.Timestamp.now() - pd.Timedelta(seconds=before_now_seconds))
+            filtered = metrics.query("'{}' <= {}".format(less_than_ts, "timestamp"))
+        return filtered
+
     @classmethod
     def scale_operation_predictive(cls,
                                    predictive_policy: PredictivePolicy,
@@ -55,7 +75,8 @@ def scale_operation_ewm(cls,
         # Adding the context below to avoid having a series of warning messages.
         with warnings.catch_warnings():
             warnings.simplefilter(action='ignore', category=FutureWarning)
-            period_data = metrics.last("{}min".format(ewm_policy.ewm_mins))
+            period_data = cls.filter_by_timestamp(metrics,
+                                                  before_now_minutes=ewm_policy.ewm_mins)
 
         # If the data frame window is empty then it means we
         # did not have any incoming request, so we need to scale down.
@@ -119,8 +140,9 @@ def scale_operation_query_concurrency(cls,
         # Adding the context below to avoid having a series of warning messages.
         with warnings.catch_warnings():
             warnings.simplefilter(action='ignore', category=FutureWarning)
-            # Here, the number of queries is the number of rows in the short period data frame.
-            period_data = metrics.last("{}s".format(concurrent_query_policy.window_size_secs))
+            period_data = cls.filter_by_timestamp(
+                metrics,
+                before_now_seconds=concurrent_query_policy.window_size_secs)
 
         # If the data frame window is empty then it means we
         # did not have any incoming request, so we need to scale down.
@@ -168,8 +190,9 @@ def scale_operation_meet_traffic_demand(cls,
         # Adding the context below to avoid having a series of warning messages.
         with warnings.catch_warnings():
             warnings.simplefilter(action='ignore', category=FutureWarning)
-            # Here, the number of queries is the number of rows in the short period data frame.
-            period_data = metrics.last("{}s".format(meet_traffic_demand_policy.window_size_secs))
+            period_data = cls.filter_by_timestamp(
+                metrics,
+                before_now_seconds=meet_traffic_demand_policy.window_size_secs)
 
         # If the data frame window is empty then it means we
         # did not have any incoming request, so we need to scale down.
@@ -257,6 +280,7 @@ def enforce_scaling_down_delay_interval(self,
 
         # If the policy has no scaledown delay then return immediately.
         if autoscaling_policy.scaledown_delay_secs == 0:
+            logging.info("No scale down delay, so scale down immediately.")
             return ScaleOp.DOWN_IN_OP
 
         # By default, we return a no operation.
@@ -270,11 +294,13 @@ def enforce_scaling_down_delay_interval(self,
             previous_timestamp = \
                 self.fedml_model_cache.get_endpoint_scaling_down_decision_time(endpoint_id)
             diff_secs = (current_timestamp - previous_timestamp) / 1e6
-            logging.info("Difference in seconds between scaling down operations: {}".format(diff_secs))
             if diff_secs > autoscaling_policy.scaledown_delay_secs:
+                logging.info("Scaling down since the time difference: {}secs, "
+                             "is above the delay period: {} secs.".format(
+                    diff_secs, autoscaling_policy.scaledown_delay_secs))
                 # At this point, we will perform the scaling down operation, hence
                 # we need to delete the previously stored scaling down timestamp (if any).
-                self.fedml_model_cache.delete_endpoint_scaling_down_decision_time(endpoint_id)
+                self.clean_up_scaling_down_operation_state(endpoint_id)
                 scale_op = ScaleOp.DOWN_IN_OP
         else:
             # Record the timestamp of the scaling down operation.
@@ -285,7 +311,7 @@ def enforce_scaling_down_delay_interval(self,
 
     def clean_up_scaling_down_operation_state(self, endpoint_id) -> bool:
         # We return True if the cleaning up operation succeeded, else False.
-        logging.info("Not a scaling down operation, cleaning up scale down state from Redis.")
+        logging.info("Cleaning up scale down state from Redis.")
         to_clean_up = \
             self.fedml_model_cache.exists_endpoint_scaling_down_decision_time(endpoint_id)
         if to_clean_up:

From 2ff516e59877164f290e93c16cfcd43a48b46ef5 Mon Sep 17 00:00:00 2001
From: Raphael Jin <kimheavy@yahoo.com>
Date: Thu, 2 May 2024 16:08:46 -0700
Subject: [PATCH 016/282] [Deploy] Fix edge case of readiness check.

---
 .../scheduler/comm_utils/job_monitor.py       | 32 +++++++++++++++++++
 1 file changed, 32 insertions(+)

diff --git a/python/fedml/computing/scheduler/comm_utils/job_monitor.py b/python/fedml/computing/scheduler/comm_utils/job_monitor.py
index 5874adfef7..c24dd2a830 100644
--- a/python/fedml/computing/scheduler/comm_utils/job_monitor.py
+++ b/python/fedml/computing/scheduler/comm_utils/job_monitor.py
@@ -574,6 +574,15 @@ def monitor_slave_endpoint_status(self):
                             is_endpoint_ready = self._check_and_reset_endpoint_status(
                                 job.job_id, job.edge_id, deployment_result, only_check_inference_ready_status=True)
 
+                            # [Hotfix] Under high-concurrency situation, the ready endpoint might not be available
+                            # But the container is in health state
+                            # In this case, we need to have an exact 503 code, instead of timeout to decide to restart
+                            # TODO(Raphael): Split the /ready endpoint and predict endpoint traffic
+                            if not self._lenient_check_replica_ready(deployment_result):
+                                is_endpoint_ready = False
+                            else:
+                                is_endpoint_ready = True
+
                             # Get endpoint container name prefix, prepare for restart
                             endpoint_container_name_prefix = \
                                 (device_client_constants.ClientConstants.get_endpoint_container_name(
@@ -736,6 +745,28 @@ def monitor_slave_endpoint_status(self):
                 except Exception as e:
                     pass
 
+    def _lenient_check_replica_ready(
+            self, deployment_result
+    ):
+        """
+        Double-check the replica's liveness using /ready api:
+            if 200 -> return True
+            [Critical] if timeout -> Could be under high pressure -> return True
+            if HTTP_202_ACCEPTED -> unhealthy -> return False
+        """
+        result_json = deployment_result
+        inference_url = result_json.get("model_url", None)
+
+        # Make a curl get to inference_url with timeout 5s
+        # TODO(Raphael): Also support PROXY and MQTT to check the readiness
+        response_ok = asyncio.run(FedMLHttpInference.is_inference_ready(inference_url, timeout=5))
+        if response_ok is None:
+            # This means the server return 202
+            return False
+
+        # 200 or Timeout
+        return True
+
     def _check_and_reset_endpoint_status(
             self, endpoint_id, device_id, deployment_result, only_check_inference_ready_status=False,
             should_release_gpu_ids=False
@@ -761,6 +792,7 @@ def _check_and_reset_endpoint_status(
 
             if self.endpoint_unavailable_counter.get(str(endpoint_id)) is None:
                 self.endpoint_unavailable_counter[str(endpoint_id)] = 0
+
             if not response_ok:
                 self.endpoint_unavailable_counter[str(endpoint_id)] += 1
             else:

From 162d896894e829e3adcded97287232307fe75539 Mon Sep 17 00:00:00 2001
From: Raphael Jin <kimheavy@yahoo.com>
Date: Fri, 3 May 2024 12:13:22 -0700
Subject: [PATCH 017/282] [Deploy] Catch exception when process autoscale.

---
 .../scheduler/comm_utils/job_monitor.py       | 168 +++++++++---------
 1 file changed, 86 insertions(+), 82 deletions(-)

diff --git a/python/fedml/computing/scheduler/comm_utils/job_monitor.py b/python/fedml/computing/scheduler/comm_utils/job_monitor.py
index 75ef647e3f..11123d9c63 100644
--- a/python/fedml/computing/scheduler/comm_utils/job_monitor.py
+++ b/python/fedml/computing/scheduler/comm_utils/job_monitor.py
@@ -104,91 +104,95 @@ def autoscaler_reconcile_after_interval(self):
         for endpoint_settings in endpoints_settings_list:
             endpoint_state = endpoint_settings["state"]
             if endpoint_state == "DEPLOYED" and endpoint_settings["enable_auto_scaling"]:
-                logging.info(f"After interval, check the autoscaler for async future list."
-                             f"{self.endpoints_autoscale_predict_future}")
-                # TODO(fedml-dimitris): The policy can be set dynamically or be user specific.
-                # Set the policy, here we use latency, but other metrics are possible as well, such as qps.
-                # For more advanced use cases look for the testing scripts under the autoscaler/test directory.
-                autoscaling_policy_config = \
-                    {
-                        "current_replicas": int(endpoint_settings["replica_num"]),
-                        "min_replicas": int(endpoint_settings["scale_min"]),
-                        "max_replicas": int(endpoint_settings["scale_max"]),
-                        "queries_per_replica": int(endpoint_settings["target_queries_per_replica"]),
-                        "window_size_secs": int(endpoint_settings["aggregation_window_size_seconds"]),
-                        "scaledown_delay_secs": int(endpoint_settings["scale_down_delay_seconds"]),
+                try:    # Should not let one endpoint affect the others
+                    logging.info(f"After interval, check the autoscaler for async future list."
+                                 f"{self.endpoints_autoscale_predict_future}")
+                    # TODO(fedml-dimitris): The policy can be set dynamically or be user specific.
+                    # Set the policy, here we use latency, but other metrics are possible as well, such as qps.
+                    # For more advanced use cases look for the testing scripts under the autoscaler/test directory.
+                    autoscaling_policy_config = \
+                        {
+                            "current_replicas": int(endpoint_settings["replica_num"]),
+                            "min_replicas": int(endpoint_settings["scale_min"]),
+                            "max_replicas": int(endpoint_settings["scale_max"]),
+                            "queries_per_replica": int(endpoint_settings["target_queries_per_replica"]),
+                            "window_size_secs": int(endpoint_settings["aggregation_window_size_seconds"]),
+                            "scaledown_delay_secs": int(endpoint_settings["scale_down_delay_seconds"]),
+                        }
+                    autoscaling_policy = ConcurrentQueryPolicy(**autoscaling_policy_config)
+
+                    e_id, e_name, model_name = endpoint_settings["endpoint_id"], endpoint_settings["endpoint_name"], \
+                                                  endpoint_settings["model_name"]
+
+                    logging.info(f"Querying the autoscaler for endpoint {e_id} with user settings {endpoint_settings}.")
+
+                    # For every endpoint we just update the policy configuration.
+                    autoscaling_policy.min_replicas = endpoint_settings["scale_min"]
+                    autoscaling_policy.max_replicas = endpoint_settings["scale_max"]
+                    # We retrieve a list of replicas for every endpoint. The number
+                    # of running replicas is the length of that list.
+                    current_replicas = len(fedml_model_cache.get_endpoint_replicas_results(e_id))
+                    autoscaling_policy.current_replicas = current_replicas
+                    logging.info(f"Endpoint {e_id} autoscaling policy: {autoscaling_policy}.")
+
+                    scale_op = autoscaler.scale_operation_endpoint(
+                        autoscaling_policy,
+                        str(e_id))
+
+                    new_replicas = current_replicas + scale_op.value
+
+                    logging.info(f"Scaling operation {scale_op.value} for endpoint {e_id} .")
+                    logging.info(f"New Replicas {new_replicas} for endpoint {e_id} .")
+                    logging.info(f"Current Replicas {current_replicas} for endpoint {e_id} .")
+                    if current_replicas == new_replicas:
+                        # Basically the autoscaler decided that no scaling operation should take place.
+                        logging.info(f"No scaling operation for endpoint {e_id}.")
+                        return
+
+                    # Should scale in / out
+                    curr_version = fedml.get_env_version()
+
+                    if curr_version == "release":
+                        mlops_prefix = "https://open.fedml.ai/"
+                    elif curr_version == "test":
+                        mlops_prefix = "https://open-test.fedml.ai/"
+                    else:
+                        logging.error(f"Do not support the version {curr_version}.")
+                        return
+                    autoscale_url_path = "fedmlModelServer/api/v1/endpoint/auto-scale"
+                    url = f"{mlops_prefix}{autoscale_url_path}"
+
+                    # Get cached token for authorization of autoscale request
+                    cached_token = fedml_model_cache.get_end_point_token(e_id, e_name, model_name)
+                    if cached_token is None:
+                        logging.error(f"Failed to get the cached token for endpoint {e_id}.")
+                        return
+
+                    req_header = {
+                        "Authorization": f"Bearer {cached_token}"
+                    }
+                    req_body = {
+                        "endpointId": int(e_id),
+                        "replicasDesired": int(new_replicas)
                     }
-                autoscaling_policy = ConcurrentQueryPolicy(**autoscaling_policy_config)
-
-                e_id, e_name, model_name = endpoint_settings["endpoint_id"], endpoint_settings["endpoint_name"], \
-                                              endpoint_settings["model_name"]
-
-                logging.info(f"Querying the autoscaler for endpoint {e_id} with user settings {endpoint_settings}.")
-
-                # For every endpoint we just update the policy configuration.
-                autoscaling_policy.min_replicas = endpoint_settings["scale_min"]
-                autoscaling_policy.max_replicas = endpoint_settings["scale_max"]
-                # We retrieve a list of replicas for every endpoint. The number
-                # of running replicas is the length of that list.
-                current_replicas = len(fedml_model_cache.get_endpoint_replicas_results(e_id))
-                autoscaling_policy.current_replicas = current_replicas
-                logging.info(f"Endpoint {e_id} autoscaling policy: {autoscaling_policy}.")
-
-                scale_op = autoscaler.scale_operation_endpoint(
-                    autoscaling_policy,
-                    str(e_id))
-
-                new_replicas = current_replicas + scale_op.value
-
-                logging.info(f"Scaling operation {scale_op.value} for endpoint {e_id} .")
-                logging.info(f"New Replicas {new_replicas} for endpoint {e_id} .")
-                logging.info(f"Current Replicas {current_replicas} for endpoint {e_id} .")
-                if current_replicas == new_replicas:
-                    # Basically the autoscaler decided that no scaling operation should take place.
-                    logging.info(f"No scaling operation for endpoint {e_id}.")
-                    return
-
-                # Should scale in / out
-                curr_version = fedml.get_env_version()
-
-                if curr_version == "release":
-                    mlops_prefix = "https://open.fedml.ai/"
-                elif curr_version == "test":
-                    mlops_prefix = "https://open-test.fedml.ai/"
-                else:
-                    logging.error(f"Do not support the version {curr_version}.")
-                    return
-                autoscale_url_path = "fedmlModelServer/api/v1/endpoint/auto-scale"
-                url = f"{mlops_prefix}{autoscale_url_path}"
-
-                # Get cached token for authorization of autoscale request
-                cached_token = fedml_model_cache.get_end_point_token(e_id, e_name, model_name)
-                if cached_token is None:
-                    logging.error(f"Failed to get the cached token for endpoint {e_id}.")
-                    return
-
-                req_header = {
-                    "Authorization": f"Bearer {cached_token}"
-                }
-                req_body = {
-                    "endpointId": int(e_id),
-                    "replicasDesired": int(new_replicas)
-                }
 
-                try:
-                    logging.info(f"Sending the autoscale request to MLOps platform. url {url}, "
-                                 f"body {req_body}., header {req_header}")
-                    response = requests.post(
-                        url,
-                        headers=req_header,
-                        json=req_body
-                    )
-                    if response.status_code != 200:
-                        logging.error(f"Failed to send the autoscale request to MLOps platform.")
-                    else:
-                        logging.info(f"Successfully sent the autoscale request to MLOps platform.")
+                    try:
+                        logging.info(f"Sending the autoscale request to MLOps platform. url {url}, "
+                                     f"body {req_body}., header {req_header}")
+                        response = requests.post(
+                            url,
+                            headers=req_header,
+                            json=req_body
+                        )
+                        if response.status_code != 200:
+                            logging.error(f"Failed to send the autoscale request to MLOps platform.")
+                        else:
+                            logging.info(f"Successfully sent the autoscale request to MLOps platform.")
+                    except Exception as e:
+                        logging.error(f"Failed to send the autoscale request to MLOps platform. {e}")
                 except Exception as e:
-                    logging.error(f"Failed to send the autoscale request to MLOps platform. {e}")
+                    logging.error(f"Error in autoscaler reconcile after interval. {e}")
+                    pass
         return
 
     @staticmethod

From 0002dcc2ea9eb12a7a72e92c2f4d887b8cddb06f Mon Sep 17 00:00:00 2001
From: Raphael Jin <kimheavy@yahoo.com>
Date: Fri, 3 May 2024 16:53:03 -0700
Subject: [PATCH 018/282] [Deploy] Restrict spacy version.

---
 python/setup.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/python/setup.py b/python/setup.py
index ae1efc0dff..b4b6545808 100644
--- a/python/setup.py
+++ b/python/setup.py
@@ -54,7 +54,7 @@ def finalize_options(self):
     'redis',
     'scikit-learn',
     'smart-open==6.3.0',
-    'spacy',
+    'spacy>=3.2.0,<3.3.0',
     'sqlalchemy',
     'toposort',
     'torch>=1.13.1',

From addd91c8c3412951eed68662c0b1652577770bf2 Mon Sep 17 00:00:00 2001
From: Raphael Jin <kimheavy@yahoo.com>
Date: Fri, 3 May 2024 17:02:55 -0700
Subject: [PATCH 019/282] [Deploy] Remove pydantic-settings to avoid PyYAML
 compatibility issue.

---
 python/setup.py | 1 -
 1 file changed, 1 deletion(-)

diff --git a/python/setup.py b/python/setup.py
index b4b6545808..c78a597a4b 100644
--- a/python/setup.py
+++ b/python/setup.py
@@ -47,7 +47,6 @@ def finalize_options(self):
     'prettytable',
     'py-machineid',
     'pydantic',
-    'pydantic-settings',
     'pytest',
     'pytest-mock',
     'python-rapidjson>=0.9.1',

From 064cfcabb840f9235c283814bf8f347de70cc993 Mon Sep 17 00:00:00 2001
From: Raphael Jin <kimheavy@yahoo.com>
Date: Fri, 3 May 2024 19:03:06 -0700
Subject: [PATCH 020/282] [Deploy] Put spacy in extra requirement

---
 python/setup.py | 6 +++++-
 1 file changed, 5 insertions(+), 1 deletion(-)

diff --git a/python/setup.py b/python/setup.py
index c78a597a4b..cce0ddb2ca 100644
--- a/python/setup.py
+++ b/python/setup.py
@@ -53,7 +53,6 @@ def finalize_options(self):
     'redis',
     'scikit-learn',
     'smart-open==6.3.0',
-    'spacy>=3.2.0,<3.3.0',
     'sqlalchemy',
     'toposort',
     'torch>=1.13.1',
@@ -112,6 +111,10 @@ def finalize_options(self):
     "deepspeed>=0.10.2",
 ]
 
+requirements_extra_nlp = [
+    'spacy>=3.2.0,<3.3.0',
+]
+
 # if platform.machine() == "x86_64":
 #    requirements.append("MNN==1.1.6")
 
@@ -177,6 +180,7 @@ def finalize_options(self):
         "llm": requirements_extra_llm,
         "mxnet": requirements_extra_mxnet,
         "tensorflow": requirements_extra_tf,
+        "nlp": requirements_extra_nlp,
     },
     package_data={"": ["py.typed"]},
     license="Apache 2.0",

From 282f2e12785de3d04116886cb0343062e7f16ed8 Mon Sep 17 00:00:00 2001
From: Raphael Jin <kimheavy@yahoo.com>
Date: Mon, 6 May 2024 10:51:34 -0700
Subject: [PATCH 021/282] [Deploy] Handle status when deploy failed.

---
 .../model_scheduler/device_server_runner.py   | 25 ++++++++++++++++++-
 1 file changed, 24 insertions(+), 1 deletion(-)

diff --git a/python/fedml/computing/scheduler/model_scheduler/device_server_runner.py b/python/fedml/computing/scheduler/model_scheduler/device_server_runner.py
index 89e74bbd74..4bcac6d2db 100755
--- a/python/fedml/computing/scheduler/model_scheduler/device_server_runner.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_server_runner.py
@@ -667,7 +667,30 @@ def callback_deployment_result_message(self, topic=None, payload=None):
                     self.send_rollback_add_remove_op(run_id_str, rollback_dict)
                     return
                 else:
-                    pass    # This is the last worker that failed, so we should continue to "ABORTED" status
+                    # This is the last worker that failed, so we should continue to "ABORTED" status
+                    model_config_parameters = self.running_request_json[run_id_str]["parameters"]
+                    inference_port = model_config_parameters.get("server_internal_port",
+                                                                 ServerConstants.MODEL_INFERENCE_DEFAULT_PORT)
+                    inference_port_external = model_config_parameters.get("server_external_port", inference_port)
+                    ip = self.get_ip_address(self.running_request_json[run_id_str])
+                    if ip.startswith("http://") or ip.startswith("https://"):
+                        model_inference_url = "{}/inference/{}".format(ip, end_point_id)
+                    else:
+                        model_inference_url = "http://{}:{}/inference/{}".format(ip, inference_port_external,
+                                                                                 end_point_id)
+
+                    self.send_deployment_status(end_point_id, end_point_name,
+                                                payload_json["model_name"],
+                                                model_inference_url,
+                                                ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_ABORTED)
+
+                    # For auto-scaling, should update the state to "DEPLOYED"
+                    FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \
+                        update_user_setting_replica_num(end_point_id=end_point_id, state="DEPLOYED")
+
+                    self.model_runner_mapping[run_id_str].replica_controller.under_rollback = False
+
+                    return
             elif run_operation == "UPDATE":
                 # Overwrite the json with the rollback version diff
                 rollback_version_diff = \

From e64b6c6b46e0dc81904abcaeed5932b373111ce6 Mon Sep 17 00:00:00 2001
From: Raphael Jin <kimheavy@yahoo.com>
Date: Mon, 6 May 2024 12:32:23 -0700
Subject: [PATCH 022/282] [Deploy] Saved failed deployment log to a file.

---
 .../model_scheduler/device_client_constants.py   |  7 +++++++
 .../model_scheduler/device_model_deployment.py   | 16 ++++++++++++++++
 2 files changed, 23 insertions(+)

diff --git a/python/fedml/computing/scheduler/model_scheduler/device_client_constants.py b/python/fedml/computing/scheduler/model_scheduler/device_client_constants.py
index 915690e9a4..d2093569c3 100644
--- a/python/fedml/computing/scheduler/model_scheduler/device_client_constants.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_client_constants.py
@@ -274,6 +274,13 @@ def get_model_serving_dir():
             os.makedirs(model_file_dir, exist_ok=True)
         return model_file_dir
 
+    @staticmethod
+    def get_deploy_failed_log_dir():
+        model_file_dir = os.path.join(ClientConstants.get_fedml_home_dir(), "fedml", "logs", "failed_logs")
+        if not os.path.exists(model_file_dir):
+            os.makedirs(model_file_dir, exist_ok=True)
+        return model_file_dir
+
     @staticmethod
     def get_model_infer_data_dir():
         model_infer_data_dir = os.path.join(ClientConstants.get_fedml_home_dir(), "fedml", "models_infer_data")
diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py b/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py
index 8670633eeb..bd04228355 100755
--- a/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py
@@ -507,6 +507,22 @@ def log_deployment_result(end_point_id, model_id, cmd_container_name, cmd_type,
 
                 if container_obj.status == "exited":
                     logging.info("Container {} has exited, automatically remove it".format(cmd_container_name))
+
+                    # Save the failed log into ~/.fedml/fedml-model-client/fedml/logs/failed_logs/
+                    # $run_id/$container_name.log
+                    try:
+                        parent_dir = os.path.join(ClientConstants.get_deploy_failed_log_dir())
+                        os.makedirs(parent_dir, exist_ok=True)
+                        error_logs_dir = os.path.join(ClientConstants.get_deploy_failed_log_dir(), str(end_point_id))
+                        os.makedirs(error_logs_dir, exist_ok=True)
+                        error_log_file = os.path.join(error_logs_dir, f"{cmd_container_name}.log")
+                        with open(error_log_file, "w") as f:
+                            f.write(f"Container {cmd_container_name} has exited\n")
+                            f.write(f"Error logs: {err_logs}\n")
+                            f.write(f"Output logs: {out_logs}\n")
+                    except Exception as e:
+                        logging.error(f"Failed to save the error logs with exception {e}")
+
                     client.api.remove_container(container_obj.id, v=True, force=True)
                     break
 

From c6cb5b0d1c9f75de449e8f2fce8f3e3d9e48d60d Mon Sep 17 00:00:00 2001
From: fedml-dimitris <dimitris@fedml.ai>
Date: Tue, 7 May 2024 15:39:22 -0400
Subject: [PATCH 023/282] Removing field validator for resolving spacy and
 pydantic conflict.

---
 .../model_scheduler/autoscaler/policies.py         |  9 +++++----
 .../autoscaler/test/autoscaler_test.py             | 14 +++++++-------
 2 files changed, 12 insertions(+), 11 deletions(-)

diff --git a/python/fedml/computing/scheduler/model_scheduler/autoscaler/policies.py b/python/fedml/computing/scheduler/model_scheduler/autoscaler/policies.py
index fd49549812..0ad2cc0d13 100644
--- a/python/fedml/computing/scheduler/model_scheduler/autoscaler/policies.py
+++ b/python/fedml/computing/scheduler/model_scheduler/autoscaler/policies.py
@@ -1,4 +1,4 @@
-from pydantic import BaseModel, field_validator, NonNegativeInt, NonNegativeFloat
+from pydantic import BaseModel, NonNegativeInt, NonNegativeFloat, validator
 
 
 class AutoscalingPolicy(BaseModel):
@@ -70,9 +70,10 @@ class EWMPolicy(AutoscalingPolicy):
     ub_threshold: NonNegativeFloat  # recommended value: 0.5
     lb_threshold: NonNegativeFloat  # recommended value: 0.5
 
-    @field_validator("metric")
-    def validate_option(cls, v):
-        assert v in ["ewm_latency", "ewm_qps"]
+    @validator("metric")
+    def metric_match(cls, v) -> str:
+        if v not in ["ewm_latency", "ewm_qps"]:
+            raise ValueError("Wrong metric name.")
         return v
 
 
diff --git a/python/fedml/computing/scheduler/model_scheduler/autoscaler/test/autoscaler_test.py b/python/fedml/computing/scheduler/model_scheduler/autoscaler/test/autoscaler_test.py
index 7af1022c7d..eadc2dc9a9 100644
--- a/python/fedml/computing/scheduler/model_scheduler/autoscaler/test/autoscaler_test.py
+++ b/python/fedml/computing/scheduler/model_scheduler/autoscaler/test/autoscaler_test.py
@@ -111,21 +111,21 @@ def test_validate_scaling_bounds(self):
 
         # Validate scale up.
         scale_up = autoscaler.validate_scaling_bounds(ScaleOp.UP_OUT_OP, autoscaling_policy)
-        self.assertEquals(scale_up, ScaleOp.UP_OUT_OP)
+        self.assertEqual(scale_up, ScaleOp.UP_OUT_OP)
 
         # Validate scale down.
         scale_down = autoscaler.validate_scaling_bounds(ScaleOp.DOWN_IN_OP, autoscaling_policy)
-        self.assertEquals(scale_down, ScaleOp.DOWN_IN_OP)
+        self.assertEqual(scale_down, ScaleOp.DOWN_IN_OP)
 
         # Validate max out-of-bounds.
         autoscaling_policy.current_replicas = 3
         scale_oob_max = autoscaler.validate_scaling_bounds(ScaleOp.UP_OUT_OP, autoscaling_policy)
-        self.assertEquals(scale_oob_max, ScaleOp.NO_OP)
+        self.assertEqual(scale_oob_max, ScaleOp.NO_OP)
 
         # Validate min out-of-bounds.
         autoscaling_policy.current_replicas = 1
         scale_oob_min = autoscaler.validate_scaling_bounds(ScaleOp.DOWN_IN_OP, autoscaling_policy)
-        self.assertEquals(scale_oob_min, ScaleOp.NO_OP)
+        self.assertEqual(scale_oob_min, ScaleOp.NO_OP)
 
     def test_enforce_scaling_down_delay_interval(self):
         self.populate_redis_with_dummy_metrics()
@@ -140,15 +140,15 @@ def test_enforce_scaling_down_delay_interval(self):
 
         autoscaling_policy.scaledown_delay_secs = 0.0
         scale_down = autoscaler.enforce_scaling_down_delay_interval(ENV_ENDPOINT_ID_1, autoscaling_policy)
-        self.assertEquals(scale_down, ScaleOp.DOWN_IN_OP)
+        self.assertEqual(scale_down, ScaleOp.DOWN_IN_OP)
 
         autoscaling_policy.scaledown_delay_secs = 1
         scale_noop = autoscaler.enforce_scaling_down_delay_interval(ENV_ENDPOINT_ID_1, autoscaling_policy)
-        self.assertEquals(scale_noop, ScaleOp.NO_OP)
+        self.assertEqual(scale_noop, ScaleOp.NO_OP)
 
         time.sleep(2)
         scale_down = autoscaler.enforce_scaling_down_delay_interval(ENV_ENDPOINT_ID_1, autoscaling_policy)
-        self.assertEquals(scale_down, ScaleOp.DOWN_IN_OP)
+        self.assertEqual(scale_down, ScaleOp.DOWN_IN_OP)
         self.clear_redis()
 
 

From 3cc9bb26ea96cbcaf1833bbf5edd9cf7ff7c6713 Mon Sep 17 00:00:00 2001
From: Alay Shah <alay11shah@gmail.com>
Date: Fri, 10 May 2024 17:02:38 -0700
Subject: [PATCH 024/282] Add Util classes

---
 .../scheduler/comm_utils/GPUCardUtil.py       | 46 +++++++++++++++++++
 .../scheduler/comm_utils/HardwareUtil.py      | 34 ++++++++++++++
 .../scheduler/comm_utils/NvidiaGPUtil.py      | 42 +++++++++++++++++
 .../scheduler/comm_utils/QualcommNPUtil.py    |  0
 .../utils => comm_utils}/singleton.py         | 11 ++++-
 .../model_scheduler/autoscaler/autoscaler.py  |  2 +-
 6 files changed, 133 insertions(+), 2 deletions(-)
 create mode 100644 python/fedml/computing/scheduler/comm_utils/GPUCardUtil.py
 create mode 100644 python/fedml/computing/scheduler/comm_utils/HardwareUtil.py
 create mode 100644 python/fedml/computing/scheduler/comm_utils/NvidiaGPUtil.py
 create mode 100644 python/fedml/computing/scheduler/comm_utils/QualcommNPUtil.py
 rename python/fedml/computing/scheduler/{model_scheduler/autoscaler/utils => comm_utils}/singleton.py (56%)

diff --git a/python/fedml/computing/scheduler/comm_utils/GPUCardUtil.py b/python/fedml/computing/scheduler/comm_utils/GPUCardUtil.py
new file mode 100644
index 0000000000..dd5cb9ff97
--- /dev/null
+++ b/python/fedml/computing/scheduler/comm_utils/GPUCardUtil.py
@@ -0,0 +1,46 @@
+from abc import ABC, abstractmethod
+from dataclasses import dataclass
+from enum import Enum, auto
+from typing import Optional, List
+
+
+class GPUCardType(Enum):
+    NVIDIA = auto()
+    QUALCOMM = auto()
+    UNKNOWN = auto()
+
+    def __str__(self):
+        return self.name
+
+
+@dataclass
+class GPUCard:
+    id: int
+    uuid: str
+    name: str
+    load: float
+    memoryTotal: float
+    memoryUsed: float
+    memoryFree: float
+    driver: str
+    serial: Optional[str]
+    display_mode: Optional[str]
+    display_active: Optional[str]
+    temperature: Optional[float]
+
+
+class GPUCardUtil(ABC):
+
+    @classmethod
+    def detectGPUCardType(cls) -> GPUCardType:
+        raise NotImplementedError
+
+    @staticmethod
+    @abstractmethod
+    def getAvailableGPUCardIDs() -> List[int]:
+        raise NotImplementedError
+
+    @staticmethod
+    @abstractmethod
+    def getGPUCards() -> List[GPUCard]:
+        raise NotImplementedError
diff --git a/python/fedml/computing/scheduler/comm_utils/HardwareUtil.py b/python/fedml/computing/scheduler/comm_utils/HardwareUtil.py
new file mode 100644
index 0000000000..9d908886b0
--- /dev/null
+++ b/python/fedml/computing/scheduler/comm_utils/HardwareUtil.py
@@ -0,0 +1,34 @@
+import logging
+from typing import Optional, List
+
+from fedml.computing.scheduler.comm_utils.GPUCardUtil import GPUCardUtil, GPUCard
+from fedml.computing.scheduler.comm_utils.singleton import Singleton
+
+
+class HardwareUtil(metaclass=Singleton):
+
+    def __init__(self):
+        self._gpu_util: Optional[GPUCardUtil] = self.__get_util()
+
+    @staticmethod
+    def __get_util() -> Optional[GPUCardUtil]:
+        for cls in GPUCardUtil.__subclasses__():
+            try:
+                if cls.detectGPUCardType() is not None:
+                    return cls()
+            except Exception as e:
+                pass
+
+        logging.error("No GPU card detected")
+        return None
+
+    def getGPUs(self) -> List[GPUCard]:
+        if self._gpu_util is None:
+            return []
+        return self._gpu_util.getGPUCards()
+
+    def getAvailableGPUCardIDs(self) -> List[int]:
+        if self._gpu_util is None:
+            return []
+        return self._gpu_util.getAvailableGPUCardIDs()
+
diff --git a/python/fedml/computing/scheduler/comm_utils/NvidiaGPUtil.py b/python/fedml/computing/scheduler/comm_utils/NvidiaGPUtil.py
new file mode 100644
index 0000000000..66317c67c8
--- /dev/null
+++ b/python/fedml/computing/scheduler/comm_utils/NvidiaGPUtil.py
@@ -0,0 +1,42 @@
+import subprocess
+from typing import List
+
+from GPUtil import GPUtil, GPU
+
+from fedml.computing.scheduler.comm_utils.GPUCardUtil import GPUCard, GPUCardUtil, GPUCardType
+
+
+def _convert(gpu: GPU) -> GPUCard:
+    return GPUCard(
+        id=gpu.id,
+        uuid=gpu.uuid,
+        name=gpu.name,
+        load=gpu.load,
+        memoryTotal=gpu.memoryTotal,
+        memoryUsed=gpu.memoryUsed,
+        memoryFree=gpu.memoryFree,
+        driver=gpu.driver,
+        serial=gpu.serial,
+        display_mode=gpu.display_mode,
+        display_active=gpu.display_active,
+        temperature=gpu.temperature
+    )
+
+
+class NvidiaGPUtil(GPUCardUtil):
+
+    @staticmethod
+    def getAvailableGPUCardIDs() -> List[int]:
+        return GPUtil.getAvailable()
+
+    @staticmethod
+    def getGPUCards() -> List[GPUCard]:
+        return [_convert(gpu) for gpu in GPUtil.getGPUs()]
+
+    @classmethod
+    def detectGPUCardType(cls):
+        try:
+            subprocess.check_output(["nvidia-smi"], universal_newlines=True)
+            return GPUCardType.NVIDIA
+        except Exception:
+            return None
diff --git a/python/fedml/computing/scheduler/comm_utils/QualcommNPUtil.py b/python/fedml/computing/scheduler/comm_utils/QualcommNPUtil.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/python/fedml/computing/scheduler/model_scheduler/autoscaler/utils/singleton.py b/python/fedml/computing/scheduler/comm_utils/singleton.py
similarity index 56%
rename from python/fedml/computing/scheduler/model_scheduler/autoscaler/utils/singleton.py
rename to python/fedml/computing/scheduler/comm_utils/singleton.py
index 5c76acea97..dd403965c1 100644
--- a/python/fedml/computing/scheduler/model_scheduler/autoscaler/utils/singleton.py
+++ b/python/fedml/computing/scheduler/comm_utils/singleton.py
@@ -1,3 +1,6 @@
+import threading
+
+
 class Singleton(type):
 
     """
@@ -8,8 +11,14 @@ class Singleton(type):
     """
 
     _instances = {}
+    # For thread safety
+    _lock = threading.Lock()
 
     def __call__(cls, *args, **kwargs):
         if cls not in cls._instances:
-            cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
+            with cls._lock:
+                # Another thread might have created the instance before the lock was acquired.
+                # So check again if the instance is already created.
+                if cls not in cls._instances:
+                    cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
         return cls._instances[cls]
diff --git a/python/fedml/computing/scheduler/model_scheduler/autoscaler/autoscaler.py b/python/fedml/computing/scheduler/model_scheduler/autoscaler/autoscaler.py
index 009345863a..bb2b59e7d9 100644
--- a/python/fedml/computing/scheduler/model_scheduler/autoscaler/autoscaler.py
+++ b/python/fedml/computing/scheduler/model_scheduler/autoscaler/autoscaler.py
@@ -8,7 +8,7 @@
 from enum import Enum
 from fedml.computing.scheduler.model_scheduler.device_model_cache import FedMLModelCache
 from fedml.computing.scheduler.model_scheduler.autoscaler.policies import *
-from fedml.computing.scheduler.model_scheduler.autoscaler.utils.singleton import Singleton
+from fedml.computing.scheduler.comm_utils.singleton import Singleton
 
 
 class ScaleOp(Enum):

From e18428603a8e31767cef653e6b37370e68a4cbec Mon Sep 17 00:00:00 2001
From: Alay Shah <alay11shah@gmail.com>
Date: Fri, 10 May 2024 18:20:39 -0700
Subject: [PATCH 025/282] Rename util files for pattern matching

---
 .../scheduler/comm_utils/{GPUCardUtil.py => gpu_utils.py}       | 0
 .../scheduler/comm_utils/{HardwareUtil.py => hardware_utils.py} | 2 +-
 .../scheduler/comm_utils/{NvidiaGPUtil.py => nvidia_utils.py}   | 2 +-
 .../comm_utils/{QualcommNPUtil.py => qualcomm_utils.py}         | 0
 4 files changed, 2 insertions(+), 2 deletions(-)
 rename python/fedml/computing/scheduler/comm_utils/{GPUCardUtil.py => gpu_utils.py} (100%)
 rename python/fedml/computing/scheduler/comm_utils/{HardwareUtil.py => hardware_utils.py} (91%)
 rename python/fedml/computing/scheduler/comm_utils/{NvidiaGPUtil.py => nvidia_utils.py} (91%)
 rename python/fedml/computing/scheduler/comm_utils/{QualcommNPUtil.py => qualcomm_utils.py} (100%)

diff --git a/python/fedml/computing/scheduler/comm_utils/GPUCardUtil.py b/python/fedml/computing/scheduler/comm_utils/gpu_utils.py
similarity index 100%
rename from python/fedml/computing/scheduler/comm_utils/GPUCardUtil.py
rename to python/fedml/computing/scheduler/comm_utils/gpu_utils.py
diff --git a/python/fedml/computing/scheduler/comm_utils/HardwareUtil.py b/python/fedml/computing/scheduler/comm_utils/hardware_utils.py
similarity index 91%
rename from python/fedml/computing/scheduler/comm_utils/HardwareUtil.py
rename to python/fedml/computing/scheduler/comm_utils/hardware_utils.py
index 9d908886b0..e57b905e4a 100644
--- a/python/fedml/computing/scheduler/comm_utils/HardwareUtil.py
+++ b/python/fedml/computing/scheduler/comm_utils/hardware_utils.py
@@ -1,7 +1,7 @@
 import logging
 from typing import Optional, List
 
-from fedml.computing.scheduler.comm_utils.GPUCardUtil import GPUCardUtil, GPUCard
+from fedml.computing.scheduler.comm_utils.gpu_utils import GPUCardUtil, GPUCard
 from fedml.computing.scheduler.comm_utils.singleton import Singleton
 
 
diff --git a/python/fedml/computing/scheduler/comm_utils/NvidiaGPUtil.py b/python/fedml/computing/scheduler/comm_utils/nvidia_utils.py
similarity index 91%
rename from python/fedml/computing/scheduler/comm_utils/NvidiaGPUtil.py
rename to python/fedml/computing/scheduler/comm_utils/nvidia_utils.py
index 66317c67c8..0ce49e1ccd 100644
--- a/python/fedml/computing/scheduler/comm_utils/NvidiaGPUtil.py
+++ b/python/fedml/computing/scheduler/comm_utils/nvidia_utils.py
@@ -3,7 +3,7 @@
 
 from GPUtil import GPUtil, GPU
 
-from fedml.computing.scheduler.comm_utils.GPUCardUtil import GPUCard, GPUCardUtil, GPUCardType
+from fedml.computing.scheduler.comm_utils.gpu_utils import GPUCard, GPUCardUtil, GPUCardType
 
 
 def _convert(gpu: GPU) -> GPUCard:
diff --git a/python/fedml/computing/scheduler/comm_utils/QualcommNPUtil.py b/python/fedml/computing/scheduler/comm_utils/qualcomm_utils.py
similarity index 100%
rename from python/fedml/computing/scheduler/comm_utils/QualcommNPUtil.py
rename to python/fedml/computing/scheduler/comm_utils/qualcomm_utils.py

From 8c8c1da3a07b7862bc7c7ba1893c77e4080277ae Mon Sep 17 00:00:00 2001
From: alaydshah <alay11shah@gmail.com>
Date: Sat, 11 May 2024 03:30:57 +0000
Subject: [PATCH 026/282] MVP

---
 .../scheduler/comm_utils/__init__.py          |  3 ++
 .../__init__.py}                              |  0
 .../comm_utils/{ => gpu_utils}/gpu_utils.py   |  5 ++-
 .../{ => gpu_utils}/nvidia_utils.py           | 23 +++++-----
 .../comm_utils/gpu_utils/qualcomm_utils.py    |  0
 .../scheduler/comm_utils/hardware_utils.py    | 42 ++++++++++++-------
 6 files changed, 44 insertions(+), 29 deletions(-)
 rename python/fedml/computing/scheduler/comm_utils/{qualcomm_utils.py => gpu_utils/__init__.py} (100%)
 rename python/fedml/computing/scheduler/comm_utils/{ => gpu_utils}/gpu_utils.py (89%)
 rename python/fedml/computing/scheduler/comm_utils/{ => gpu_utils}/nvidia_utils.py (82%)
 create mode 100644 python/fedml/computing/scheduler/comm_utils/gpu_utils/qualcomm_utils.py

diff --git a/python/fedml/computing/scheduler/comm_utils/__init__.py b/python/fedml/computing/scheduler/comm_utils/__init__.py
index e69de29bb2..adf0269b67 100644
--- a/python/fedml/computing/scheduler/comm_utils/__init__.py
+++ b/python/fedml/computing/scheduler/comm_utils/__init__.py
@@ -0,0 +1,3 @@
+import gpu_utils.gpu_utils
+import gpu_utils.qualcomm_utils
+import gpu_utils.nvidia_utils
\ No newline at end of file
diff --git a/python/fedml/computing/scheduler/comm_utils/qualcomm_utils.py b/python/fedml/computing/scheduler/comm_utils/gpu_utils/__init__.py
similarity index 100%
rename from python/fedml/computing/scheduler/comm_utils/qualcomm_utils.py
rename to python/fedml/computing/scheduler/comm_utils/gpu_utils/__init__.py
diff --git a/python/fedml/computing/scheduler/comm_utils/gpu_utils.py b/python/fedml/computing/scheduler/comm_utils/gpu_utils/gpu_utils.py
similarity index 89%
rename from python/fedml/computing/scheduler/comm_utils/gpu_utils.py
rename to python/fedml/computing/scheduler/comm_utils/gpu_utils/gpu_utils.py
index dd5cb9ff97..e6691b4b5d 100644
--- a/python/fedml/computing/scheduler/comm_utils/gpu_utils.py
+++ b/python/fedml/computing/scheduler/comm_utils/gpu_utils/gpu_utils.py
@@ -1,4 +1,4 @@
-from abc import ABC, abstractmethod
+from abc import ABC, abstractmethod, ABCMeta
 from dataclasses import dataclass
 from enum import Enum, auto
 from typing import Optional, List
@@ -32,7 +32,7 @@ class GPUCard:
 class GPUCardUtil(ABC):
 
     @classmethod
-    def detectGPUCardType(cls) -> GPUCardType:
+    def detectGPUCardType(cls) -> Optional[GPUCardType]:
         raise NotImplementedError
 
     @staticmethod
@@ -44,3 +44,4 @@ def getAvailableGPUCardIDs() -> List[int]:
     @abstractmethod
     def getGPUCards() -> List[GPUCard]:
         raise NotImplementedError
+
diff --git a/python/fedml/computing/scheduler/comm_utils/nvidia_utils.py b/python/fedml/computing/scheduler/comm_utils/gpu_utils/nvidia_utils.py
similarity index 82%
rename from python/fedml/computing/scheduler/comm_utils/nvidia_utils.py
rename to python/fedml/computing/scheduler/comm_utils/gpu_utils/nvidia_utils.py
index 0ce49e1ccd..349230cef5 100644
--- a/python/fedml/computing/scheduler/comm_utils/nvidia_utils.py
+++ b/python/fedml/computing/scheduler/comm_utils/gpu_utils/nvidia_utils.py
@@ -1,9 +1,9 @@
 import subprocess
-from typing import List
+from typing import List, Optional
 
 from GPUtil import GPUtil, GPU
 
-from fedml.computing.scheduler.comm_utils.gpu_utils import GPUCard, GPUCardUtil, GPUCardType
+from fedml.computing.scheduler.comm_utils.gpu_utils.gpu_utils import GPUCard, GPUCardUtil, GPUCardType
 
 
 def _convert(gpu: GPU) -> GPUCard:
@@ -24,19 +24,18 @@ def _convert(gpu: GPU) -> GPUCard:
 
 
 class NvidiaGPUtil(GPUCardUtil):
-
-    @staticmethod
-    def getAvailableGPUCardIDs() -> List[int]:
-        return GPUtil.getAvailable()
-
-    @staticmethod
-    def getGPUCards() -> List[GPUCard]:
-        return [_convert(gpu) for gpu in GPUtil.getGPUs()]
-
     @classmethod
-    def detectGPUCardType(cls):
+    def detectGPUCardType(cls) -> Optional[GPUCardType]:
         try:
             subprocess.check_output(["nvidia-smi"], universal_newlines=True)
             return GPUCardType.NVIDIA
         except Exception:
             return None
+
+    @staticmethod
+    def getGPUCards() -> List[GPUCard]:
+        return [_convert(gpu) for gpu in GPUtil.getGPUs()]
+
+    @staticmethod
+    def getAvailableGPUCardIDs() -> List[int]:
+        return GPUtil.getAvailable()
diff --git a/python/fedml/computing/scheduler/comm_utils/gpu_utils/qualcomm_utils.py b/python/fedml/computing/scheduler/comm_utils/gpu_utils/qualcomm_utils.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/python/fedml/computing/scheduler/comm_utils/hardware_utils.py b/python/fedml/computing/scheduler/comm_utils/hardware_utils.py
index e57b905e4a..d26fb9c5b5 100644
--- a/python/fedml/computing/scheduler/comm_utils/hardware_utils.py
+++ b/python/fedml/computing/scheduler/comm_utils/hardware_utils.py
@@ -1,34 +1,46 @@
 import logging
+
 from typing import Optional, List
 
-from fedml.computing.scheduler.comm_utils.gpu_utils import GPUCardUtil, GPUCard
+from fedml.computing.scheduler.comm_utils.gpu_utils.gpu_utils import GPUCardUtil, GPUCard
+from fedml.computing.scheduler.comm_utils.gpu_utils.nvidia_utils import NvidiaGPUtil
 from fedml.computing.scheduler.comm_utils.singleton import Singleton
 
 
 class HardwareUtil(metaclass=Singleton):
 
-    def __init__(self):
-        self._gpu_util: Optional[GPUCardUtil] = self.__get_util()
+    _gpu_utils = [NvidiaGPUtil]
+    _gpu_util: Optional[GPUCardUtil] = None
 
     @staticmethod
-    def __get_util() -> Optional[GPUCardUtil]:
-        for cls in GPUCardUtil.__subclasses__():
+    def _get_util() -> Optional[GPUCardUtil]:
+        if HardwareUtil._gpu_util is not None:
+            return HardwareUtil._gpu_util
+
+        for gpu_util in HardwareUtil._gpu_utils:
             try:
-                if cls.detectGPUCardType() is not None:
-                    return cls()
+                if gpu_util.detectGPUCardType() is not None:
+                    HardwareUtil._gpu_util = gpu_util()
+                    return HardwareUtil._gpu_util
             except Exception as e:
                 pass
 
         logging.error("No GPU card detected")
         return None
 
-    def getGPUs(self) -> List[GPUCard]:
-        if self._gpu_util is None:
-            return []
-        return self._gpu_util.getGPUCards()
+    @staticmethod
+    def getGPUs() -> List[GPUCard]:
+        gpu_util = HardwareUtil._get_util()
+        return gpu_util.getGPUCards() if gpu_util is not None else []
+
+    @staticmethod
+    def getAvailableGPUCardIDs() -> List[int]:
+        gpu_util = HardwareUtil._get_util()
+        return gpu_util.getAvailainfbleGPUCardIDs() if gpu_util is not None else []
 
-    def getAvailableGPUCardIDs(self) -> List[int]:
-        if self._gpu_util is None:
-            return []
-        return self._gpu_util.getAvailableGPUCardIDs()
 
+if __name__ == "__main__":
+    gpus = HardwareUtil.getGPUs()
+    get_available_gpu_cards = HardwareUtil.getAvailableGPUCardIDs()
+    print(gpus)
+    print(get_available_gpu_cards)

From a35d433d4d2ec87dff89b3c01ff9057aee70f2d3 Mon Sep 17 00:00:00 2001
From: Alex <alex.gpt.llm@gmail.com>
Date: Sat, 11 May 2024 20:28:21 +0800
Subject: [PATCH 027/282] 1. Get the docker container with the timeout value so
 that we can avoid the system is blocked. 2. Fix the time error when checking
 the run process in the job monitor. 3. Support docker authentication when
 launching the job. 4. Send the edge status detecting message once in the
 protocol manager, save the status response message to the queue of job
 runner, and process the edge status in the job runner. 5. Open a process to
 download the package so that we can avoid the request is blocked and check
 the timeout. 6. Refactor the status center to make the status sequence work
 and stable. 7. Change the retain flag to true when establishing an MQTT
 connection so that we can handle messages even if the device is not started.
 8. Sync the latest deployment module to this branch.

---
 python/fedml/api/api_test.py                  |  19 +-
 .../scheduler/comm_utils/container_utils.py   |   4 +-
 .../scheduler/comm_utils/job_cleanup.py       |   1 +
 .../scheduler/comm_utils/job_monitor.py       |  15 +-
 .../scheduler/comm_utils/job_utils.py         |   5 +-
 .../master/base_master_job_runner.py          | 179 +++++++++++----
 .../master/base_master_protocol_manager.py    | 156 ++------------
 .../scheduler/master/launch_job_runner.py     |   2 +-
 .../scheduler/master/server_daemon.py         |  18 +-
 .../model_scheduler/device_client_runner.py   |  38 +++-
 .../device_model_deployment.py                |   4 +-
 .../model_scheduler/device_server_runner.py   | 203 +++++++++++++-----
 .../model_scheduler/job_runner_msg_sender.py  |   5 +-
 .../model_scheduler/master_job_runner.py      | 133 +++++++++---
 .../master_protocol_manager.py                |  46 ++--
 .../model_scheduler/worker_job_runner.py      |  26 ++-
 .../worker_protocol_manager.py                |  24 ++-
 .../scheduler_core/general_constants.py       |   2 +
 .../scheduler_core/message_center.py          |   2 +-
 .../scheduler_base_job_runner.py              |  95 +++++++-
 .../scheduler_base_job_runner_manager.py      |   7 +-
 .../scheduler/scheduler_core/status_center.py |   8 +-
 .../status_manager_protocols.py               |  12 +-
 .../scheduler/slave/base_slave_job_runner.py  |   3 +-
 .../slave/base_slave_protocol_manager.py      |   5 +-
 .../scheduler/slave/client_data_interface.py  |  14 +-
 .../communication/mqtt/mqtt_manager.py        |   2 +-
 python/fedml/core/mlops/mlops_metrics.py      |  49 +++--
 28 files changed, 711 insertions(+), 366 deletions(-)

diff --git a/python/fedml/api/api_test.py b/python/fedml/api/api_test.py
index 54da088d0d..fc2fb77b20 100755
--- a/python/fedml/api/api_test.py
+++ b/python/fedml/api/api_test.py
@@ -18,20 +18,23 @@
 yaml_file = os.path.join(python_dir, "examples", "launch", "hello_job.yaml")
 
 # Launch job
-for i in range(0, 10):
+launch_result_list = list()
+for i in range(0, 1):
     launch_result = fedml.api.launch_job(yaml_file)
+    launch_result_list.append(launch_result)
     # launch_result = fedml.api.launch_job_on_cluster(yaml_file, "alex-cluster")
     if launch_result.result_code != 0:
         print(f"Failed to launch job. Reason: {launch_result.result_message}")
 
-exit(1)
-
 # Get job status
-log_result = fedml.api.run_logs(launch_result.run_id, 1, 100)
-if log_result is None or log_result.run_status is None:
-    print(f"Failed to get job status.")
-    exit(1)
-print(f"Run status {log_result.run_status}")
+while len(launch_result_list) > 0:
+    for launch_result in launch_result_list:
+        log_result = fedml.api.run_logs(launch_result.run_id, 1, 5)
+        if log_result is None or log_result.run_status is None:
+            print(f"Failed to get job status.")
+            #exit(1)
+        print(f"Run status {log_result.run_status}")
+        time.sleep(0.5)
 
 # Get job logs
 time.sleep(30)
diff --git a/python/fedml/computing/scheduler/comm_utils/container_utils.py b/python/fedml/computing/scheduler/comm_utils/container_utils.py
index cfaa5b6457..417aa7ba81 100644
--- a/python/fedml/computing/scheduler/comm_utils/container_utils.py
+++ b/python/fedml/computing/scheduler/comm_utils/container_utils.py
@@ -20,7 +20,7 @@ def get_instance():
 
     def get_docker_client(self):
         try:
-            client = docker.from_env()
+            client = docker.from_env(timeout=5, version="auto")
         except Exception:
             logging.error("Failed to connect to the docker daemon, please ensure that you have "
                           "installed Docker Desktop or Docker Engine, and the docker is running")
@@ -175,7 +175,7 @@ def get_container_rank_same_model(prefix: str):
         running_model_name = hash("model_endpoint_id_{}_name_{}_model_id_{}_name_{}_ver_{}")
         """
         try:
-            client = docker.from_env()
+            docker.from_env(timeout=5, version="auto")
         except Exception:
             logging.error("Failed to connect to the docker daemon, please ensure that you have "
                           "installed Docker Desktop or Docker Engine, and the docker is running")
diff --git a/python/fedml/computing/scheduler/comm_utils/job_cleanup.py b/python/fedml/computing/scheduler/comm_utils/job_cleanup.py
index ed30c1bf2e..6700b0bc7a 100644
--- a/python/fedml/computing/scheduler/comm_utils/job_cleanup.py
+++ b/python/fedml/computing/scheduler/comm_utils/job_cleanup.py
@@ -44,6 +44,7 @@ def sync_run_process_gpu(self):
                     ComputeCacheManager.get_instance().get_gpu_cache().get_run_info_sync_lock_key("")
             ):
                 count = 0
+                client_data_interface.FedMLClientDataInterface.get_instance().create_job_table()
                 job_list = client_data_interface.FedMLClientDataInterface.get_instance().get_jobs_from_db()
                 for job in job_list.job_list:
                     count += 1
diff --git a/python/fedml/computing/scheduler/comm_utils/job_monitor.py b/python/fedml/computing/scheduler/comm_utils/job_monitor.py
index 30182a6207..6a9afb9d69 100644
--- a/python/fedml/computing/scheduler/comm_utils/job_monitor.py
+++ b/python/fedml/computing/scheduler/comm_utils/job_monitor.py
@@ -81,7 +81,7 @@ def monitor_slave_run_process_status(self):
                     break
 
                 # Calc the timeout
-                started_time = int(float(job.started_time))
+                started_time = JobMonitor.get_started_time(job)
                 timeout = time.time() - started_time
 
                 job_type = JobRunnerUtils.parse_job_type(job.running_json)
@@ -157,6 +157,15 @@ def monitor_slave_run_process_status(self):
             print(f"Exception when monitoring endpoint process on the slave agent.{traceback.format_exc()}")
             pass
 
+    @staticmethod
+    def get_started_time(job):
+        started_time = int(float(job.started_time))
+        if started_time <= 0:
+            started_time = int(float(job.updated_time))
+            if started_time <= 0:
+                started_time = time.time()
+        return started_time
+
     def monitor_master_run_process_status(self, server_id, device_info_reporter=None):
         try:
             ComputeCacheManager.get_instance().set_redis_params()
@@ -168,7 +177,7 @@ def monitor_master_run_process_status(self, server_id, device_info_reporter=None
                     break
 
                 # Calc the timeout
-                started_time = int(float(job.started_time))
+                started_time = JobMonitor.get_started_time(job)
                 timeout = time.time() - started_time
 
                 # Get the timeout threshold
@@ -416,7 +425,7 @@ def monitor_slave_endpoint_status(self):
                         endpoint_name = endpoint_json.get("end_point_name", None)
                         device_ids = endpoint_json.get("device_ids", [])
 
-                        started_time = int(float(job.started_time))
+                        started_time = JobMonitor.get_started_time(job)
                         timeout = time.time() - started_time
                         if timeout > SchedulerConstants.ENDPOINT_DEPLOYMENT_DEPLOYING_TIMEOUT:
                             print(f"[Worker][{job.job_id}:{job.edge_id}] Due to timeout, "
diff --git a/python/fedml/computing/scheduler/comm_utils/job_utils.py b/python/fedml/computing/scheduler/comm_utils/job_utils.py
index 1423c3e6ab..44290de37d 100644
--- a/python/fedml/computing/scheduler/comm_utils/job_utils.py
+++ b/python/fedml/computing/scheduler/comm_utils/job_utils.py
@@ -568,8 +568,9 @@ def get_run_container_name(run_id: int) -> str:
     @staticmethod
     def get_docker_client(docker_args: DockerArgs) -> DockerClient:
         try:
-            client = docker.from_env()
-            client.login(username=docker_args.username, password=docker_args.password, registry=docker_args.registry)
+            client = docker.from_env(timeout=5, version="auto")
+            if docker_args.username != "" and docker_args.registry != "":
+                client.login(username=docker_args.username, password=docker_args.password, registry=docker_args.registry)
         except Exception as e:
             raise Exception(f"Failed to connect to the docker daemon, please ensure that you have "
                             f"installed Docker Desktop or Docker Engine, and the docker is running. Exception {e}")
diff --git a/python/fedml/computing/scheduler/master/base_master_job_runner.py b/python/fedml/computing/scheduler/master/base_master_job_runner.py
index 3dbc1fd891..ce0515160f 100755
--- a/python/fedml/computing/scheduler/master/base_master_job_runner.py
+++ b/python/fedml/computing/scheduler/master/base_master_job_runner.py
@@ -23,6 +23,8 @@
 from ..scheduler_core.general_constants import GeneralConstants
 from ..scheduler_core.scheduler_base_job_runner import FedMLSchedulerBaseJobRunner, RunnerError, RunnerCompletedError
 from abc import ABC, abstractmethod
+from ..scheduler_core.scheduler_matcher import SchedulerMatcher
+import fedml
 
 
 class FedMLBaseMasterJobRunner(FedMLSchedulerBaseJobRunner, ABC):
@@ -424,11 +426,15 @@ def start_runner_process(
         ServerConstants.save_run_process(run_id, self.run_process.pid)
         return self.run_process
 
-    def put_run_edge_device_info_to_queue(self, run_id, device_info):
-        run_id_str = str(run_id)
-        if self.run_edge_device_info_queue is None:
-            self.run_edge_device_info_queue = Queue()
-        self.run_edge_device_info_queue.put(device_info)
+    def put_run_edge_device_info_to_queue(self, run_id, edge_id, device_info):
+        edge_ids = self.request_json.get("edgeids", None)
+        if edge_ids is None:
+            return
+        if int(edge_id) in edge_ids or str(edge_id) in edge_ids:
+            run_id_str = str(run_id)
+            if self.run_edge_device_info_queue is None:
+                self.run_edge_device_info_queue = Queue()
+            self.run_edge_device_info_queue.put(device_info)
 
     def should_continue_run_job(self, run_id):
         run_config = self.request_json["run_config"]
@@ -467,23 +473,6 @@ def detect_edges_status(
         run_edges_realtime_status = dict()
         run_edges_realtime_status[run_id_str] = dict()
 
-        edge_info_global_dict = dict()
-
-        # Send status message to all edges
-        allowed_cache_edge_status_time = 60
-        for edge_id in edge_id_list:
-            # Check if the edge status was filled allowed_cache_edge_status_time seconds ago,
-            # if so no more checking message would be sent.
-            edge_info = edge_info_global_dict.get(edge_id, None)
-            if edge_info is not None:
-                timestamp = edge_info.get("timestamp", None)
-                time_interval = time.time() - timestamp
-                if time_interval <= allowed_cache_edge_status_time:
-                    continue
-
-            self.send_status_check_msg(run_id, edge_id, self.edge_id, context=status_check_context)
-        time.sleep(3)
-
         total_sleep_seconds = 0
         status_check_sleep_seconds = 10
         allowed_status_check_sleep_seconds = 60 * 2 if status_timeout is None else status_timeout
@@ -522,26 +511,14 @@ def detect_edges_status(
                     active_edges_count += 1
                     active_edge_info_dict[str(edge_id)] = edge_info
                 else:
-                    # Check if the edge status was filled allowed_cache_edge_status_time seconds ago,
-                    # if so no more checking message would be sent.
-                    edge_info = edge_info_global_dict.get(edge_id, None)
-                    if edge_info is not None:
-                        timestamp = edge_info.get("timestamp", None)
-                        time_interval = time.time() - timestamp
-                        if time_interval <= allowed_cache_edge_status_time:
-                            active_edges_count += 1
-                            active_edge_info_dict[str(edge_id)] = edge_info
-                            continue
-
                     inactivate_edges.append(edge_id)
-                    self.send_status_check_msg(run_id, edge_id, self.edge_id, context=status_check_context)
 
             # If all edges are ready then send the starting job message to them
             if active_edges_count == len(edge_id_list):
                 logging.info(f"All edges are ready. Active edge id list is as follows. {active_edge_info_dict}")
                 if callback_when_edges_ready is not None:
                     logging.info("All edges are ready. Start to process the callback function.")
-                    callback_when_edges_ready(active_edge_info_dict=active_edge_info_dict)
+                    callback_when_edges_ready(self.request_json, active_edge_info_dict=active_edge_info_dict)
                 else:
                     logging.info("All edges are ready. No callback function to process.")
                 break
@@ -572,18 +549,11 @@ def detect_edges_status(
                 if should_async and total_sleep_seconds >= allowed_status_check_sleep_seconds_for_async:
                     if async_timeout > allowed_status_check_sleep_seconds_for_async:
                         time.sleep(async_timeout - allowed_status_check_sleep_seconds_for_async)
-                    self.send_training_request_to_edges(active_edge_info_dict)
+                    self.send_training_request_to_edges(self.request_json, active_edge_info_dict)
                     return True, active_edge_info_dict, inactivate_edges
 
         return True, active_edge_info_dict, inactivate_edges
 
-    def send_status_check_msg(self, run_id, edge_id, server_id, context=None):
-        topic_get_model_device_id = "server/client/request_device_info/" + str(edge_id)
-        payload = {"server_id": server_id, "run_id": run_id}
-        if context is not None:
-            payload["context"] = context
-        self.message_center.send_message(topic_get_model_device_id, json.dumps(payload))
-
     def report_exception_status(self, run_id):
         self.status_reporter.report_job_status(run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_EXCEPTION)
 
@@ -602,10 +572,125 @@ def callback_run_metrics(self, topic, payload):
             self.run_metrics_queue = Queue()
         self.run_metrics_queue.put(payload)
 
-    def send_training_request_to_edges(self, active_edge_info_dict):
-        topic = GeneralConstants.MSG_TOPIC_SEND_TRAINING_REQUEST_TO_EDGES
-        payload = json.dumps(active_edge_info_dict)
-        self.message_center.receive_message(topic, payload)
+    # def send_training_request_to_edges(self, active_edge_info_dict):
+    #     topic = GeneralConstants.MSG_TOPIC_SEND_TRAINING_REQUEST_TO_EDGES
+    #     payload = json.dumps(active_edge_info_dict)
+    #     self.message_center.receive_message(topic, payload)
+    def send_training_request_to_edges(self, request_json, active_edge_info_dict=None):
+        run_id = request_json["runId"]
+        edge_id_list = request_json["edgeids"]
+        run_config = request_json.get("run_config", {})
+        run_params = run_config.get("parameters", {})
+        job_yaml = run_params.get("job_yaml", {})
+        job_yaml_default_none = run_params.get("job_yaml", None)
+        computing = job_yaml.get("computing", {})
+        request_num_gpus = computing.get("minimum_num_gpus", None)
+        job_gpu_id_list = request_json.get("job_gpu_id_list", None)
+        assigned_gpu_num_dict = dict()
+        assigned_gpu_ids_dict = dict()
+        master_node_addr = ""
+        master_node_port = 0
+
+        logging.info(f"Send training request to Edge ids: {edge_id_list}, run_id {run_id}")
+
+        should_match_gpu = False
+        if job_yaml_default_none is not None and request_num_gpus is not None and \
+                int(request_num_gpus) > 0 and active_edge_info_dict is not None:
+            should_match_gpu = True
+            SchedulerMatcher.parse_and_print_gpu_info_for_all_edges(active_edge_info_dict, show_gpu_list=True)
+
+            # Match and assign gpus to each device
+            assigned_gpu_num_dict, assigned_gpu_ids_dict = SchedulerMatcher.match_and_assign_gpu_resources_to_devices(
+                request_num_gpus, edge_id_list, active_edge_info_dict, job_gpu_id_list=job_gpu_id_list)
+            if assigned_gpu_num_dict is None or assigned_gpu_ids_dict is None:
+                # If no resources available, send failed message to MLOps and send exception message to all edges.
+                gpu_count, gpu_available_count = SchedulerMatcher.parse_and_print_gpu_info_for_all_edges(
+                    active_edge_info_dict, should_print=True)
+                err_info = f"No resources available." \
+                           f"Total available GPU count {gpu_available_count} is less than " \
+                           f"request GPU count {request_num_gpus}"
+                logging.error(err_info)
+
+                # Bug fix: This mqtt message needs to be sent so platform can clean up the failed run and change the
+                # status from running to failed.
+                self.mlops_metrics.report_server_training_status(
+                    run_id, GeneralConstants.MSG_MLOPS_SERVER_STATUS_FAILED, edge_id=self.edge_id
+                )
+
+                self.status_reporter.report_server_id_status(
+                    run_id, GeneralConstants.MSG_MLOPS_SERVER_STATUS_FAILED, edge_id=self.edge_id,
+                    server_id=self.edge_id, server_agent_id=self.server_agent_id)
+                self.report_exception_status(run_id)
+
+                serving_args = job_yaml.get("serving_args", {})
+                endpoint_id = serving_args.get("endpoint_id", None)
+                if endpoint_id is not None:
+                    fedml.mlops.log_endpoint_status(
+                        endpoint_id, GeneralConstants.MSG_MLOPS_SERVER_STATUS_FAILED)
+                    fedml.mlops.log_run_log_lines(
+                        endpoint_id, 0, [err_info],
+                        log_source=GeneralConstants.FEDML_LOG_SOURCE_TYPE_MODEL_END_POINT
+                    )
+                return
+
+            # Generate master node addr and port
+            master_node_addr, master_node_port = SchedulerMatcher.get_master_node_info(edge_id_list,
+                                                                                       active_edge_info_dict)
+
+            # Generate new edge id list after matched
+            edge_id_list = SchedulerMatcher.generate_new_edge_list_for_gpu_matching(assigned_gpu_num_dict)
+            if len(edge_id_list) <= 0:
+                gpu_count, gpu_available_count = SchedulerMatcher.parse_and_print_gpu_info_for_all_edges(
+                    active_edge_info_dict, should_print=True)
+                logging.error(f"Request parameter for GPU num is invalid."
+                              f"Total available GPU count {gpu_available_count}."
+                              f"Request GPU num {request_num_gpus}")
+                self.status_reporter.report_server_id_status(
+                    run_id, GeneralConstants.MSG_MLOPS_SERVER_STATUS_FAILED, edge_id=self.edge_id,
+                    server_id=self.edge_id, server_agent_id=self.server_agent_id)
+                self.report_exception_status(run_id)
+                return
+
+        if should_match_gpu:
+            # Report gpu num and related infos to MLOps.
+            serving_args = job_yaml.get("serving_args", {})
+            endpoint_id = serving_args.get("endpoint_id", None)
+            if endpoint_id is not None:
+                endpoint_info = list()
+                for edge_id_item, gpu_num in assigned_gpu_num_dict.items():
+                    edge_info = active_edge_info_dict.get(str(edge_id_item), {})
+                    endpoint_info.append({
+                        "machine_id": edge_id_item, "endpoint_gpu_count": gpu_num,
+                        "master_deploy_id": edge_info.get("master_device_id", 0),
+                        "slave_deploy_id": edge_info.get("slave_device_id", 0)})
+                topic_name = f"compute/mlops/endpoint"
+                endpoint_info_json = {"endpoint_id": endpoint_id, "endpoint_info": endpoint_info}
+                print(f"endpoint_info_json {endpoint_info_json}")
+                self.message_center.send_message(topic_name, json.dumps(endpoint_info_json))
+
+        client_rank = 1
+        for edge_id in edge_id_list:
+            topic_start_train = "flserver_agent/" + str(edge_id) + "/start_train"
+            logging.info("start_train: send topic " + topic_start_train + " to client...")
+            request_json["client_rank"] = client_rank
+            client_rank += 1
+
+            if active_edge_info_dict is not None:
+                edge_info = active_edge_info_dict.get(str(edge_id), {})
+                model_master_device_id = edge_info.get("master_device_id", None)
+                model_slave_device_id = edge_info.get("slave_device_id", None)
+                model_slave_device_id_list = edge_info.get("slave_device_id_list", None)
+
+                if should_match_gpu:
+                    request_json["scheduler_match_info"] = SchedulerMatcher.generate_match_info_for_scheduler(
+                        edge_id, edge_id_list, master_node_addr, master_node_port,
+                        assigned_gpu_num_dict, assigned_gpu_ids_dict,
+                        model_master_device_id=model_master_device_id,
+                        model_slave_device_id=model_slave_device_id,
+                        model_slave_device_id_list=model_slave_device_id_list
+                    )
+
+            self.message_center.send_message(topic_start_train, json.dumps(request_json))
 
     def should_process_async_cluster(self):
         run_config = self.request_json.get("run_config", {})
diff --git a/python/fedml/computing/scheduler/master/base_master_protocol_manager.py b/python/fedml/computing/scheduler/master/base_master_protocol_manager.py
index 25cab5a17c..ef59431ee8 100755
--- a/python/fedml/computing/scheduler/master/base_master_protocol_manager.py
+++ b/python/fedml/computing/scheduler/master/base_master_protocol_manager.py
@@ -3,16 +3,10 @@
 import json
 import logging
 import fedml
-from ..scheduler_core.scheduler_matcher import SchedulerMatcher
 from ..comm_utils.constants import SchedulerConstants
-from ..comm_utils.job_utils import JobRunnerUtils
 from ....core.mlops.mlops_runtime_log import MLOpsRuntimeLog
 from ....core.mlops.mlops_configs import MLOpsConfigs
 from ....core.mlops.mlops_runtime_log_daemon import MLOpsRuntimeLogDaemon
-from ..comm_utils import sys_utils
-from ....core.mlops.mlops_utils import MLOpsUtils
-from ..model_scheduler import device_client_constants
-from fedml.utils.debugging import debug
 from ..scheduler_core.compute_cache_manager import ComputeCacheManager
 from ..scheduler_core.ota_upgrade import FedMLOtaUpgrade
 from .deploy_job_launcher import FedMLDeployJobLauncher
@@ -91,9 +85,6 @@ def generate_topics(self):
         # The topic for last-will messages.
         self.topic_last_will = "flserver_agent/last_will_msg"
 
-        # The topic for sending training request to edges (Request from the job runner when all edges are ready)
-        self.topic_send_training_request_to_edges = GeneralConstants.MSG_TOPIC_SEND_TRAINING_REQUEST_TO_EDGES
-
         # Subscribe topics for starting train, stopping train and fetching client status.
         self.subscribed_topics.clear()
         self.add_subscribe_topic(self.topic_start_train)
@@ -115,13 +106,10 @@ def add_protocol_handler(self):
         self.add_message_listener(self.topic_ota_msg, FedMLBaseMasterProtocolManager.callback_server_ota_msg)
         self.add_message_listener(self.topic_report_status, self.callback_report_current_status)
         self.add_message_listener(self.topic_response_device_info, self.callback_response_device_info)
-        self.add_message_listener(self.topic_response_device_info, self.callback_response_device_info)
         self.add_message_listener(self.topic_request_device_info_from_mlops,
                                   self.callback_request_device_info_from_mlops)
         self.add_message_listener(self.topic_requesst_job_status, self.callback_request_job_status)
         self.add_message_listener(self.topic_requesst_device_status_in_job, self.callback_request_device_status_in_job)
-        self.add_message_listener(self.topic_send_training_request_to_edges,
-                                  self.callback_send_training_request_to_edges)
 
     @abstractmethod
     def _get_job_runner_manager(self):
@@ -197,7 +185,7 @@ def callback_start_train(self, topic=None, payload=None):
         # Print the payload
         logging.info("callback_start_train payload: {}".format(payload))
         logging.info(
-            f"FedMLDebug - Receive: topic ({topic}), payload ({payload})"
+            f"FedMLDebug - run id {run_id}, Receive at callback_start_train: topic ({topic}), payload ({payload})"
         )
 
         # Save the parameters
@@ -212,7 +200,7 @@ def callback_start_train(self, topic=None, payload=None):
         if not self.run_as_cloud_server:
             self.mlops_metrics.report_server_id_status(
                 run_id, GeneralConstants.MSG_MLOPS_SERVER_STATUS_STARTING, edge_id=self.edge_id,
-                server_id=self.edge_id, server_agent_id=self.edge_id)
+                server_id=self.edge_id, server_agent_id=self.edge_id, running_json=payload)
 
         # Start server with multiprocessing mode
         if self.run_as_edge_server_and_agent or self.enable_simulation_cloud_agent:
@@ -230,6 +218,8 @@ def callback_start_train(self, topic=None, payload=None):
             process = self._get_job_runner_manager().get_runner_process(run_id)
             if process is not None:
                 GeneralConstants.save_run_process(run_id, process.pid, is_master=True)
+
+            self.send_status_msg_to_edges(edge_id_list, run_id, self.edge_id)
         elif self.run_as_cloud_agent:
             self.init_job_task(request_json)
 
@@ -261,6 +251,8 @@ def callback_start_train(self, topic=None, payload=None):
                 status_center_queue=self.get_status_queue()
             )
 
+            self.send_status_msg_to_edges(edge_id_list, run_id, self.edge_id)
+
     def callback_stop_train(self, topic, payload, use_payload=None):
         # Print the payload
         logging.info(
@@ -346,7 +338,7 @@ def callback_response_device_info(self, topic, payload):
 
         # Put device info into a multiprocessing queue so master runner checks if all edges are ready
         if context is None:
-            self._get_job_runner_manager().put_run_edge_device_info_to_queue(run_id, device_info)
+            self._get_job_runner_manager().put_run_edge_device_info_to_queue(run_id, edge_id, device_info)
 
             # if self.run_edge_device_info_global_queue is None:
             #     self.run_edge_device_info_global_queue = Array('i', list())
@@ -368,10 +360,6 @@ def callback_request_job_status(self, topic, payload):
     def callback_request_device_status_in_job(self, topic, payload):
         self.response_device_status_in_job(topic, payload)
 
-    def callback_send_training_request_to_edges(self, topic, payload):
-        payload_json = json.loads(payload)
-        self.send_training_request_to_edges(active_edge_info_dict=payload_json)
-
     def generate_protocol_manager(self):
         message_status_runner = self._generate_protocol_manager_instance(
             self.args, agent_config=self.agent_config
@@ -457,124 +445,6 @@ def init_job_task(self, request_json):
         self.setup_listener_for_run_metrics(run_id)
         self.setup_listener_for_run_logs(run_id)
 
-    @debug
-    def send_training_request_to_edges(self, active_edge_info_dict=None):
-        run_id = self.request_json["runId"]
-        edge_id_list = self.request_json["edgeids"]
-        run_config = self.request_json.get("run_config", {})
-        run_params = run_config.get("parameters", {})
-        job_yaml = run_params.get("job_yaml", {})
-        job_yaml_default_none = run_params.get("job_yaml", None)
-        computing = job_yaml.get("computing", {})
-        request_num_gpus = computing.get("minimum_num_gpus", None)
-        job_gpu_id_list = self.request_json.get("job_gpu_id_list", None)
-        assigned_gpu_num_dict = dict()
-        assigned_gpu_ids_dict = dict()
-        master_node_addr = ""
-        master_node_port = 0
-
-        logging.info("Send training request to Edge ids: " + str(edge_id_list))
-
-        should_match_gpu = False
-        if job_yaml_default_none is not None and request_num_gpus is not None and \
-                int(request_num_gpus) > 0 and active_edge_info_dict is not None:
-            should_match_gpu = True
-            SchedulerMatcher.parse_and_print_gpu_info_for_all_edges(active_edge_info_dict, show_gpu_list=True)
-
-            # Match and assign gpus to each device
-            assigned_gpu_num_dict, assigned_gpu_ids_dict = SchedulerMatcher.match_and_assign_gpu_resources_to_devices(
-                request_num_gpus, edge_id_list, active_edge_info_dict, job_gpu_id_list=job_gpu_id_list)
-            if assigned_gpu_num_dict is None or assigned_gpu_ids_dict is None:
-                # If no resources available, send failed message to MLOps and send exception message to all edges.
-                gpu_count, gpu_available_count = SchedulerMatcher.parse_and_print_gpu_info_for_all_edges(
-                    active_edge_info_dict, should_print=True)
-                err_info = f"No resources available." \
-                           f"Total available GPU count {gpu_available_count} is less than " \
-                           f"request GPU count {request_num_gpus}"
-                logging.error(err_info)
-
-                # Bug fix: This mqtt message needs to be sent so platform can clean up the failed run and change the
-                # status from running to failed.
-                self.mlops_metrics.report_server_training_status(
-                    run_id, GeneralConstants.MSG_MLOPS_SERVER_STATUS_FAILED, edge_id=self.edge_id
-                )
-
-                self.status_reporter.report_server_id_status(
-                    run_id, GeneralConstants.MSG_MLOPS_SERVER_STATUS_FAILED, edge_id=self.edge_id,
-                    server_id=self.edge_id, server_agent_id=self.server_agent_id)
-                self.report_exception_status(run_id)
-
-                serving_args = job_yaml.get("serving_args", {})
-                endpoint_id = serving_args.get("endpoint_id", None)
-                if endpoint_id is not None:
-                    fedml.mlops.log_endpoint_status(
-                        endpoint_id, device_client_constants.ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED)
-                    fedml.mlops.log_run_log_lines(
-                        endpoint_id, 0, [err_info],
-                        log_source=device_client_constants.ClientConstants.FEDML_LOG_SOURCE_TYPE_MODEL_END_POINT
-                    )
-                return
-
-            # Generate master node addr and port
-            master_node_addr, master_node_port = SchedulerMatcher.get_master_node_info(edge_id_list,
-                                                                                       active_edge_info_dict)
-
-            # Generate new edge id list after matched
-            edge_id_list = SchedulerMatcher.generate_new_edge_list_for_gpu_matching(assigned_gpu_num_dict)
-            if len(edge_id_list) <= 0:
-                gpu_count, gpu_available_count = SchedulerMatcher.parse_and_print_gpu_info_for_all_edges(
-                    active_edge_info_dict, should_print=True)
-                logging.error(f"Request parameter for GPU num is invalid."
-                              f"Total available GPU count {gpu_available_count}."
-                              f"Request GPU num {request_num_gpus}")
-                self.status_reporter.report_server_id_status(
-                    run_id, GeneralConstants.MSG_MLOPS_SERVER_STATUS_FAILED, edge_id=self.edge_id,
-                    server_id=self.edge_id, server_agent_id=self.server_agent_id)
-                self.report_exception_status(run_id)
-                return
-
-        if should_match_gpu:
-            # Report gpu num and related infos to MLOps.
-            serving_args = job_yaml.get("serving_args", {})
-            endpoint_id = serving_args.get("endpoint_id", None)
-            if endpoint_id is not None:
-                endpoint_info = list()
-                for edge_id_item, gpu_num in assigned_gpu_num_dict.items():
-                    edge_info = active_edge_info_dict.get(str(edge_id_item), {})
-                    endpoint_info.append({
-                        "machine_id": edge_id_item, "endpoint_gpu_count": gpu_num,
-                        "master_deploy_id": edge_info.get("master_device_id", 0),
-                        "slave_deploy_id": edge_info.get("slave_device_id", 0)})
-                topic_name = f"compute/mlops/endpoint"
-                endpoint_info_json = {"endpoint_id": endpoint_id, "endpoint_info": endpoint_info}
-                print(f"endpoint_info_json {endpoint_info_json}")
-                self.message_center.send_message(topic_name, json.dumps(endpoint_info_json))
-
-        client_rank = 1
-        for edge_id in edge_id_list:
-            topic_start_train = "flserver_agent/" + str(edge_id) + "/start_train"
-            logging.info("start_train: send topic " + topic_start_train + " to client...")
-            request_json = self.request_json
-            request_json["client_rank"] = client_rank
-            client_rank += 1
-
-            if active_edge_info_dict is not None:
-                edge_info = active_edge_info_dict.get(str(edge_id), {})
-                model_master_device_id = edge_info.get("master_device_id", None)
-                model_slave_device_id = edge_info.get("slave_device_id", None)
-                model_slave_device_id_list = edge_info.get("slave_device_id_list", None)
-
-                if should_match_gpu:
-                    request_json["scheduler_match_info"] = SchedulerMatcher.generate_match_info_for_scheduler(
-                        edge_id, edge_id_list, master_node_addr, master_node_port,
-                        assigned_gpu_num_dict, assigned_gpu_ids_dict,
-                        model_master_device_id=model_master_device_id,
-                        model_slave_device_id=model_slave_device_id,
-                        model_slave_device_id_list=model_slave_device_id_list
-                    )
-
-            self.message_center.send_message(topic_start_train, json.dumps(request_json))
-
     def setup_listeners_for_edge_status(self, run_id, edge_ids, server_id):
         edge_status_topic = "fl_client/flclient_agent_" + str(server_id) + "/status"
         payload = {"run_id": run_id, "init_all_edge_id_list": edge_ids, "init_server_id": server_id}
@@ -628,6 +498,18 @@ def send_training_stop_request_to_specific_edge(self, edge_id, payload):
         logging.info("stop_train: send topic " + topic_stop_train)
         self.message_center.send_message(topic_stop_train, payload)
 
+    def send_status_check_msg(self, run_id, edge_id, server_id, context=None):
+        topic_status_check = f"server/client/request_device_info/{edge_id}"
+        payload = {"server_id": server_id, "run_id": run_id}
+        if context is not None:
+            payload["context"] = context
+        self.message_center.send_message(topic_status_check, json.dumps(payload))
+
+    def send_status_msg_to_edges(self, edge_id_list, run_id, server_id, context=None):
+        # Send status message to all edges
+        for edge_id in edge_id_list:
+            self.send_status_check_msg(run_id, edge_id, self.edge_id, context=context)
+
     def report_exception_status(self, run_id):
         self.status_reporter.report_job_status(run_id, GeneralConstants.MSG_MLOPS_SERVER_STATUS_EXCEPTION)
 
diff --git a/python/fedml/computing/scheduler/master/launch_job_runner.py b/python/fedml/computing/scheduler/master/launch_job_runner.py
index c28458fc0f..3f26da1ef7 100755
--- a/python/fedml/computing/scheduler/master/launch_job_runner.py
+++ b/python/fedml/computing/scheduler/master/launch_job_runner.py
@@ -19,7 +19,7 @@ def __init__(self, args, run_id=0, request_json=None, agent_config=None, edge_id
     # Override
     def _generate_job_runner_instance(self, args, run_id=None, request_json=None, agent_config=None, edge_id=None,):
         return FedMLLaunchMasterJobRunner(
-            args, run_id=run_id, request_json=request_json, agent_config=self.agent_config, edge_id=edge_id
+            args, run_id=run_id, request_json=request_json, agent_config=agent_config, edge_id=edge_id
         )
 
     # Override
diff --git a/python/fedml/computing/scheduler/master/server_daemon.py b/python/fedml/computing/scheduler/master/server_daemon.py
index bc02621a44..8fe85f3381 100755
--- a/python/fedml/computing/scheduler/master/server_daemon.py
+++ b/python/fedml/computing/scheduler/master/server_daemon.py
@@ -115,18 +115,24 @@
                 if os.path.exists(login_exit_file):
                     print(f"[Server] Login process is exited, check the exit file {login_exit_file}")
                     if retry_count > 3:
-                        print(f"Retry count is over 3 times, exit the process. Check the log file for more details. "
-                              f"Login logs: {login_logs}, Exit file: {login_exit_file}")
-                        exit(1)
+                        if args.role == ServerConstants.login_role_list[ServerConstants.LOGIN_MODE_CLOUD_AGENT_INDEX]:
+                            retry_count = 0
+                        else:
+                            print(f"Retry count is over 3 times, exit the process. Check the log file for more details. "
+                                  f"Login logs: {login_logs}, Exit file: {login_exit_file}")
+                            exit(1)
                     retry_flag = True
 
                 if len(login_pids) == 0:
                     message = f"[Server] Login process is exited, check the log file {login_logs}"
                     print(message)
                     if retry_count >= 3:
-                        print(f"Retry count is over 3 times, exit the process. Check the log file for more details. "
-                              f"Login logs: {login_logs}, Exit file: {login_exit_file}")
-                        exit(1)
+                        if args.role == ServerConstants.login_role_list[ServerConstants.LOGIN_MODE_CLOUD_AGENT_INDEX]:
+                            retry_count = 0
+                        else:
+                            print(f"Retry count is over 3 times, exit the process. Check the log file for more details. "
+                                  f"Login logs: {login_logs}, Exit file: {login_exit_file}")
+                            exit(1)
                     retry_flag = True
 
                 if retry_flag:
diff --git a/python/fedml/computing/scheduler/model_scheduler/device_client_runner.py b/python/fedml/computing/scheduler/model_scheduler/device_client_runner.py
index 3c7d0fb05b..8bb03eebbd 100755
--- a/python/fedml/computing/scheduler/model_scheduler/device_client_runner.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_client_runner.py
@@ -24,6 +24,8 @@
 import fedml
 from fedml import mlops
 from fedml.computing.scheduler.model_scheduler.device_model_msg_object import FedMLModelMsgObject
+from fedml.computing.scheduler.scheduler_core.compute_cache_manager import ComputeCacheManager
+
 from fedml.computing.scheduler.scheduler_core.compute_utils import ComputeUtils
 from fedml.core.distributed.communication.s3.remote_storage import S3Storage
 from .device_model_cache import FedMLModelCache
@@ -356,7 +358,6 @@ def run_impl(self):
                                                        ClientConstants.INFERENCE_ENGINE_TYPE_INT_DEFAULT)
         inference_end_point_id = run_id
 
-        self.mlops_metrics.report_sys_perf(self.args, self.agent_config["mqtt_config"], run_id=run_id)
         MLOpsRuntimeLog.get_instance(self.args).init_logs(log_level=logging.INFO)
 
         logging.info(f"[Worker] Received model deployment request from master for endpoint {run_id}.")
@@ -478,7 +479,6 @@ def run_impl(self):
         if op == "add":
             worker_ip = self.get_ip_address(self.request_json)
             for rank in range(prev_rank + 1, prev_rank + 1 + op_num):
-                # TODO: Support Rollback if this for loop failed
                 try:
                     running_model_name, inference_output_url, inference_model_version, model_metadata, model_config = \
                         start_deployment(
@@ -496,6 +496,18 @@ def run_impl(self):
                 if inference_output_url == "":
                     logging.error("[Worker] Failed to deploy the model.")
 
+                    # Release the gpu occupancy
+                    FedMLModelCache.get_instance().set_redis_params()
+                    replica_occupied_gpu_ids_str = FedMLModelCache.get_instance().get_replica_gpu_ids(
+                        run_id, end_point_name, model_name, self.edge_id, rank + 1)
+                    logging.info(f"Release gpu ids {replica_occupied_gpu_ids_str} for "
+                                 f"failed deployment of replica no {rank + 1}.")
+
+                    if replica_occupied_gpu_ids_str is not None:
+                        replica_occupied_gpu_ids = json.loads(replica_occupied_gpu_ids_str)
+                        JobRunnerUtils.get_instance().release_partial_job_gpu(run_id,
+                                                                              self.edge_id, replica_occupied_gpu_ids)
+
                     # Send failed result back to master
                     result_payload = self.send_deployment_results(
                         end_point_name, self.edge_id, ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED,
@@ -892,7 +904,7 @@ def callback_start_deployment(self, topic, payload):
         run_id = inference_end_point_id
         self.args.run_id = run_id
         self.args.edge_id = self.edge_id
-        MLOpsRuntimeLog.get_instance(self.args).init_logs(log_level=logging.INFO)
+        MLOpsRuntimeLog(args=self.args).init_logs()
         MLOpsRuntimeLogDaemon.get_instance(self.args).set_log_source(
             ClientConstants.FEDML_LOG_SOURCE_TYPE_MODEL_END_POINT)
         MLOpsRuntimeLogDaemon.get_instance(self.args).start_log_processor(run_id, self.edge_id)
@@ -978,6 +990,26 @@ def callback_delete_deployment(self, topic, payload):
             model_msg_object.run_id, model_msg_object.end_point_name, model_msg_object.model_name,
             self.edge_id)
 
+        # Delete FEDML_GLOBAL_ENDPOINT_RUN_ID_MAP_TAG-${run_id} both in redis and local db
+        ComputeCacheManager.get_instance().gpu_cache.delete_endpoint_run_id_map(str(model_msg_object.run_id))
+
+        # Delete FEDML_EDGE_ID_MODEL_DEVICE_ID_MAP_TAG-${run_id} both in redis and local db
+        ComputeCacheManager.get_instance().gpu_cache.delete_edge_model_id_map(str(model_msg_object.run_id))
+
+        # Delete FEDML_GLOBAL_DEVICE_RUN_GPU_IDS_TAG-${run_id}-${device_id} both in redis and local db
+        ComputeCacheManager.get_instance().gpu_cache.delete_device_run_gpu_ids(str(self.edge_id),
+                                                                               str(model_msg_object.run_id))
+
+        # Delete FEDML_GLOBAL_DEVICE_RUN_NUM_GPUS_TAG-${run_id}-${device_id} both in redis and local db
+        ComputeCacheManager.get_instance().gpu_cache.delete_device_run_num_gpus(str(self.edge_id),
+                                                                                str(model_msg_object.run_id))
+
+        # Delete FEDML_MODEL_REPLICA_GPU_IDS_TAG-${run_id}-${end_point_name}-${model_name}-${device_id}-*
+        FedMLModelCache.get_instance().set_redis_params()
+        FedMLModelCache.get_instance().delete_all_replica_gpu_ids(model_msg_object.run_id,
+                                                                  model_msg_object.end_point_name,
+                                                                  model_msg_object.model_name, self.edge_id)
+
     def exit_run_with_exception_entry(self):
         try:
             self.setup_client_mqtt_mgr()
diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py b/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py
index 9b370d9ae4..bf476dd468 100755
--- a/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py
@@ -209,7 +209,7 @@ def start_deployment(end_point_id, end_point_name, model_id, model_version,
         infer_host = "127.0.0.1"
 
     try:
-        client = docker.from_env()
+        client = docker.from_env(timeout=5, version="auto")
         if enable_custom_image and docker_registry_user_name != "" and docker_registry_user_password != "" \
                 and docker_registry != "":
             client.login(username=docker_registry_user_name, password=docker_registry_user_password,
@@ -466,7 +466,7 @@ def log_deployment_result(end_point_id, model_id, cmd_container_name, cmd_type,
             logging.info(f"Test: {inference_http_port}, Attempt: {deploy_attempt} / {deploy_attempt_threshold}")
 
             try:
-                client = docker.from_env()
+                client = docker.from_env(timeout=5, version="auto")
             except Exception:
                 logging.error("Failed to connect to the docker daemon, please ensure that you have "
                               "installed Docker Desktop or Docker Engine, and the docker is running")
diff --git a/python/fedml/computing/scheduler/model_scheduler/device_server_runner.py b/python/fedml/computing/scheduler/model_scheduler/device_server_runner.py
index 38acff8d82..4bcac6d2db 100755
--- a/python/fedml/computing/scheduler/model_scheduler/device_server_runner.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_server_runner.py
@@ -23,6 +23,7 @@
 
 import fedml
 from fedml.computing.scheduler.comm_utils.run_process_utils import RunProcessUtils
+from fedml.core.mlops.mlops_runtime_log import MLOpsFormatter
 
 from ..comm_utils import sys_utils
 from .device_server_data_interface import FedMLServerDataInterface
@@ -122,6 +123,8 @@ def __init__(self, args, run_id=0, request_json=None, agent_config=None, edge_id
         self.replica_controller = None
         self.deployed_replica_payload = None
 
+        self.autoscaler_launcher = None
+
     def build_dynamic_constrain_variables(self, run_id, run_config):
         pass
 
@@ -304,6 +307,7 @@ def run_impl(self):
             inference_end_point_id, use_gpu, memory_size, model_version, inference_port = self.parse_model_run_params(
             self.request_json)
 
+        # TODO(Raphael): This measurement is for the host machine. Change to container's metrics
         self.mlops_metrics.report_sys_perf(self.args, self.agent_config["mqtt_config"], run_id=run_id)
 
         self.check_runner_stop_event()
@@ -347,28 +351,38 @@ def run_impl(self):
         devices_sent_add_or_remove_msg = self.send_deployment_start_request_to_edges()
 
         # Handle "op:update"
-        devices_sent_update_remove_msg = self.send_first_scroll_update_msg()
-
-        if len(devices_sent_add_or_remove_msg) == 0 and len(devices_sent_update_remove_msg) == 0:
-            # No device is added, updated or removed
-            logging.info("No device is added, updated or removed. No action needed for reconciliation.")
-            ip = self.get_ip_address(self.request_json)
-            master_port = os.getenv("FEDML_MASTER_PORT", None)
-            if master_port is not None:
-                inference_port = int(master_port)
-            model_inference_port = inference_port
-            if ip.startswith("http://") or ip.startswith("https://"):
-                model_inference_url = "{}/api/v1/predict".format(ip)
-            else:
-                model_inference_url = "http://{}:{}/api/v1/predict".format(ip, model_inference_port)
+        try:
+            devices_sent_update_remove_msg = self.send_first_scroll_update_msg()
+
+            if len(devices_sent_add_or_remove_msg) == 0 and len(devices_sent_update_remove_msg) == 0:
+                # No device is added, updated or removed
+                logging.info("No device is added, updated or removed. No action needed for reconciliation.")
+                ip = self.get_ip_address(self.request_json)
+                master_port = os.getenv("FEDML_MASTER_PORT", None)
+                if master_port is not None:
+                    inference_port = int(master_port)
+                model_inference_port = inference_port
+                if ip.startswith("http://") or ip.startswith("https://"):
+                    model_inference_url = "{}/api/v1/predict".format(ip)
+                else:
+                    model_inference_url = "http://{}:{}/api/v1/predict".format(ip, model_inference_port)
 
-            self.set_runner_completed_event(run_id)
+                self.set_runner_completed_event(run_id)
 
-            self.send_deployment_status(run_id, end_point_name,
-                                        model_name,
-                                        model_inference_url,
-                                        ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED)
-            return
+                self.send_deployment_status(run_id, end_point_name,
+                                            model_name,
+                                            model_inference_url,
+                                            ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED)
+
+                # Set setting to "DEPLOYED" for autoscaling service reference
+                FedMLModelCache.get_instance().set_redis_params(self.redis_addr, self.redis_port, self.redis_password)
+                FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \
+                    update_user_setting_replica_num(end_point_id=run_id, state="DEPLOYED")
+
+                return
+        except Exception as e:
+            logging.error(f"Failed to send first scroll update message due to {e}.")
+            logging.error(f"Exception traceback {traceback.format_exc()}.")
 
         logging.info("Start waiting for result callback from workers ...")
 
@@ -437,6 +451,7 @@ def start_device_inference_gateway(
     def start_device_inference_monitor(self, run_id, end_point_name,
                                        model_id, model_name, model_version, check_stopped_event=True):
         # start inference monitor server
+        # Will report the qps related metrics to the MLOps
         logging.info(f"start the model inference monitor, end point {run_id}, model name {model_name}...")
         if check_stopped_event:
             self.check_runner_stop_event()
@@ -563,10 +578,9 @@ def callback_deployment_result_message(self, topic=None, payload=None):
             filehandler = logging.FileHandler(log_file, "a")
 
             program_prefix = "FedML-Server @device-id-{}".format(self.edge_id)
-            formatter = logging.Formatter(fmt="[" + program_prefix + "] [%(asctime)s] [%(levelname)s] "
-                                                                     "[%(filename)s:%(lineno)d:%(funcName)s] %("
-                                                                     "message)s",
-                                          datefmt="%a, %d %b %Y %H:%M:%S")
+            formatter = MLOpsFormatter(fmt="[" + program_prefix + "] [%(asctime)s] [%(levelname)s] "
+                                                                  "[%(filename)s:%(lineno)d:%(funcName)s] %("
+                                                                  "message)s")
 
             filehandler.setFormatter(formatter)
             root_logger.addHandler(filehandler)
@@ -630,21 +644,54 @@ def callback_deployment_result_message(self, topic=None, payload=None):
             if model_status != ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED:
                 logging.error(f"Unsupported model status {model_status}.")
 
-            # Failure handler
-            if run_operation == "ADD_OR_REMOVE":
-                # TODO(Raphael): Also support rollback for scale out / in operation
+            # Avoid endless loop, if the rollback also failed, we should report the failure to the MLOps
+            if self.model_runner_mapping[run_id_str].replica_controller.under_rollback:
                 self.send_deployment_status(
                     end_point_id, end_point_name, payload_json["model_name"], "",
                     ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED)
                 return
-            elif run_operation == "UPDATE":
-                # Send the rollback message to the worker devices only if it has not been rollback
-                if self.model_runner_mapping[run_id_str].replica_controller.under_rollback:
+
+            # Failure handler, send the rollback message to the worker devices only if it has not been rollback
+            if run_operation == "ADD_OR_REMOVE":
+                # During Scale out / in,
+                # the worker that already been scaled out / in should be sent the rollback message
+                rollback_dict = self.model_runner_mapping[run_id_str].replica_controller.rollback_add_or_remove_replica(
+                    device_id=device_id, replica_no=replica_no, op_type=run_operation
+                )
+                self.model_runner_mapping[run_id_str].replica_controller.under_rollback = True
+
+                if rollback_dict is not None and len(rollback_dict) > 0:
                     self.send_deployment_status(
                         end_point_id, end_point_name, payload_json["model_name"], "",
-                        ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED)
+                        ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_ABORTING)
+                    self.send_rollback_add_remove_op(run_id_str, rollback_dict)
                     return
+                else:
+                    # This is the last worker that failed, so we should continue to "ABORTED" status
+                    model_config_parameters = self.running_request_json[run_id_str]["parameters"]
+                    inference_port = model_config_parameters.get("server_internal_port",
+                                                                 ServerConstants.MODEL_INFERENCE_DEFAULT_PORT)
+                    inference_port_external = model_config_parameters.get("server_external_port", inference_port)
+                    ip = self.get_ip_address(self.running_request_json[run_id_str])
+                    if ip.startswith("http://") or ip.startswith("https://"):
+                        model_inference_url = "{}/inference/{}".format(ip, end_point_id)
+                    else:
+                        model_inference_url = "http://{}:{}/inference/{}".format(ip, inference_port_external,
+                                                                                 end_point_id)
+
+                    self.send_deployment_status(end_point_id, end_point_name,
+                                                payload_json["model_name"],
+                                                model_inference_url,
+                                                ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_ABORTED)
+
+                    # For auto-scaling, should update the state to "DEPLOYED"
+                    FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \
+                        update_user_setting_replica_num(end_point_id=end_point_id, state="DEPLOYED")
+
+                    self.model_runner_mapping[run_id_str].replica_controller.under_rollback = False
 
+                    return
+            elif run_operation == "UPDATE":
                 # Overwrite the json with the rollback version diff
                 rollback_version_diff = \
                     self.model_runner_mapping[run_id_str].replica_controller.rollback_get_replica_version_diff(
@@ -698,9 +745,9 @@ def callback_deployment_result_message(self, topic=None, payload=None):
         # Wait for all replica-level's result, not device-level
         if (self.model_runner_mapping[run_id_str].replica_controller.is_all_replica_num_reconciled() and
                 self.model_runner_mapping[run_id_str].replica_controller.is_all_replica_version_reconciled()):
-            '''
+            """
             When all the devices have finished the add / delete / update operation
-            '''
+            """
             # Generate one unified inference api
             # Note that here we use the gateway port instead of the inference port that is used by the slave device
             model_config_parameters = request_json["parameters"]
@@ -755,16 +802,28 @@ def callback_deployment_result_message(self, topic=None, payload=None):
                 # Arrive here because only contains remove ops, so we do not need to update the model metadata
                 pass
 
+            # For auto-scaling, should update the state to "DEPLOYED"
             FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \
-                set_end_point_activation(end_point_id, end_point_name, True)
+                update_user_setting_replica_num(end_point_id=end_point_id, state="DEPLOYED")
 
             if self.model_runner_mapping[run_id_str].replica_controller.under_rollback:
-                self.send_deployment_status(end_point_id, end_point_name,
-                                            payload_json["model_name"],
-                                            model_inference_url,
-                                            ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_ABORTED)
+                # If first time failed (Still might need rollback), then send failed message to the MLOps
+                if not (FedMLModelCache.get_instance(self.redis_addr, self.redis_port).
+                        get_end_point_activation(end_point_id)):
+                    self.send_deployment_status(
+                        end_point_id, end_point_name, payload_json["model_name"], "",
+                        ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED)
+                else:
+                    self.send_deployment_status(end_point_id, end_point_name,
+                                                payload_json["model_name"],
+                                                model_inference_url,
+                                                ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_ABORTED)
                 self.model_runner_mapping[run_id_str].replica_controller.under_rollback = False
             else:
+                # Set the end point activation status to True, for scaling out / in and rolling update
+                FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \
+                    set_end_point_activation(end_point_id, end_point_name, True)
+
                 self.send_deployment_status(end_point_id, end_point_name,
                                             payload_json["model_name"],
                                             model_inference_url,
@@ -781,7 +840,10 @@ def callback_deployment_status_message(self, topic=None, payload=None):
             topic, payload))
         pass
 
-    def send_deployment_start_request_to_edges(self):
+    def send_deployment_start_request_to_edges(self, in_request_json=None):
+        if in_request_json is not None:
+            self.request_json = in_request_json
+
         # Iterate through replica_num_diff, both add and replace should be sent to the edge devices
         if "replica_num_diff" not in self.request_json or self.request_json["replica_num_diff"] is None:
             return []
@@ -895,15 +957,36 @@ def callback_start_deployment(self, topic, payload):
 
         model_config = request_json["model_config"]
         model_name = model_config["model_name"]
+        model_version = model_config["model_version"]
         model_id = model_config["model_id"]
         model_storage_url = model_config["model_storage_url"]
         scale_min = model_config.get("instance_scale_min", 0)
         scale_max = model_config.get("instance_scale_max", 0)
         inference_engine = model_config.get("inference_engine", 0)
+        enable_auto_scaling = request_json.get("enable_auto_scaling", False)
+        desired_replica_num = request_json.get("desired_replica_num", 1)
+
+        target_queries_per_replica = request_json.get("target_queries_per_replica", 10)
+        aggregation_window_size_seconds = request_json.get("aggregation_window_size_seconds", 60)
+        scale_down_delay_seconds = request_json.get("scale_down_delay_seconds", 120)
+
         inference_end_point_id = run_id
 
         logging.info("[Master] received start deployment request for end point {}.".format(run_id))
 
+        # Set redis config
+        FedMLModelCache.get_instance().set_redis_params(self.redis_addr, self.redis_port, self.redis_password)
+
+        # Save the user setting (about replica number) of this run to Redis, if existed, update it
+        FedMLModelCache.get_instance(self.redis_addr, self.redis_port).set_user_setting_replica_num(
+            end_point_id=run_id, end_point_name=end_point_name, model_name=model_name, model_version=model_version,
+            replica_num=desired_replica_num, enable_auto_scaling=enable_auto_scaling,
+            scale_min=scale_min, scale_max=scale_max, state="DEPLOYING",
+            aggregation_window_size_seconds=aggregation_window_size_seconds,
+            target_queries_per_replica=target_queries_per_replica,
+            scale_down_delay_seconds=int(scale_down_delay_seconds)
+        )
+
         # Start log processor for current run
         self.args.run_id = run_id
         self.args.edge_id = self.edge_id
@@ -912,6 +995,7 @@ def callback_start_deployment(self, topic, payload):
             ServerConstants.FEDML_LOG_SOURCE_TYPE_MODEL_END_POINT)
         MLOpsRuntimeLogDaemon.get_instance(self.args).start_log_processor(run_id, self.edge_id)
 
+        # # Deprecated
         # self.ota_upgrade(payload, request_json)
 
         # Add additional parameters to the request_json
@@ -924,8 +1008,7 @@ def callback_start_deployment(self, topic, payload):
         self.running_request_json[run_id_str] = request_json
         self.request_json["master_node_ip"] = self.get_ip_address(self.request_json)
 
-        # Target status of the devices
-        FedMLModelCache.get_instance().set_redis_params(self.redis_addr, self.redis_port, self.redis_password)
+        # Set the target status of the devices to redis
         FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \
             set_end_point_device_info(request_json["end_point_id"], end_point_name, json.dumps(device_objs))
 
@@ -944,7 +1027,7 @@ def callback_start_deployment(self, topic, payload):
                                     "",
                                     ServerConstants.MODEL_DEPLOYMENT_STAGE1["index"],
                                     ServerConstants.MODEL_DEPLOYMENT_STAGE1["text"],
-                                    "Received request for end point {}".format(run_id))
+                                    "Received request for endpoint {}".format(run_id))
 
         # Report stage to mlops: MODEL_DEPLOYMENT_STAGE2 = "Initializing"
         self.send_deployment_stages(self.run_id, model_name, model_id,
@@ -1078,7 +1161,7 @@ def delete_device_replica_info_on_master(self, endpoint_id, endpoint_name, model
                 delete_item, endpoint_id, endpoint_name, model_name
             )
 
-        logging.info(f"Deleted the record of the replaced device {delete_device_result_list}")
+        logging.info(f"Deleted the replica record on master: {edge_id_replica_no_dict}")
 
     def send_next_scroll_update_msg(self, run_id_str, device_id, replica_no):
         """
@@ -1128,6 +1211,20 @@ def send_next_scroll_update_msg(self, run_id_str, device_id, replica_no):
                 self.send_deployment_start_request_to_edge(edge_id, self.running_request_json[run_id_str])
         return
 
+    def send_rollback_add_remove_op(self, run_id, rollback_replica_dict):
+        """
+        This method is used when the original add op failed, we need to rollback by delete the existed replicas
+        Input example:
+        rollback_replica_dict = {'96684': {'curr_num': 2, 'op': 'remove', 'target_num': 1}}
+        """
+        existed_request_json = self.running_request_json[str(run_id)]
+        updated_request_json = copy.deepcopy(existed_request_json)
+
+        # Reverse the replica_num_diff
+        updated_request_json["replica_num_diff"] = rollback_replica_dict
+
+        self.send_deployment_start_request_to_edges(in_request_json=updated_request_json)
+
     def callback_activate_deployment(self, topic, payload):
         logging.info("callback_activate_deployment: topic = %s, payload = %s" % (topic, payload))
 
@@ -1183,7 +1280,15 @@ def callback_delete_deployment(self, topic, payload):
         # Parse payload as the model message object.
         model_msg_object = FedMLModelMsgObject(topic, payload)
 
-        # Set end point as deactivated status
+        # Delete SQLite records
+        FedMLServerDataInterface.get_instance().delete_job_from_db(model_msg_object.run_id)
+        FedMLModelDatabase.get_instance().delete_deployment_result(
+            model_msg_object.run_id, model_msg_object.end_point_name, model_msg_object.model_name,
+            model_version=model_msg_object.model_version)
+        FedMLModelDatabase.get_instance().delete_deployment_run_info(
+            end_point_id=model_msg_object.inference_end_point_id)
+
+        # Delete Redis Records
         FedMLModelCache.get_instance().set_redis_params(self.redis_addr, self.redis_port, self.redis_password)
         FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \
             set_end_point_activation(model_msg_object.inference_end_point_id,
@@ -1192,21 +1297,15 @@ def callback_delete_deployment(self, topic, payload):
             delete_end_point(model_msg_object.inference_end_point_id, model_msg_object.end_point_name,
                              model_msg_object.model_name, model_msg_object.model_version)
 
+        # Send delete deployment request to the edge devices
         self.send_deployment_delete_request_to_edges(payload, model_msg_object)
 
+        # Stop processes on master
         self.set_runner_stopped_event(model_msg_object.run_id)
-
         self.stop_device_inference_monitor(model_msg_object.run_id, model_msg_object.end_point_name,
                                            model_msg_object.model_id, model_msg_object.model_name,
                                            model_msg_object.model_version)
 
-        FedMLServerDataInterface.get_instance().delete_job_from_db(model_msg_object.run_id)
-        FedMLModelDatabase.get_instance().delete_deployment_result(
-            model_msg_object.run_id, model_msg_object.end_point_name, model_msg_object.model_name,
-            model_version=model_msg_object.model_version)
-        FedMLModelDatabase.get_instance().delete_deployment_run_info(
-            end_point_id=model_msg_object.inference_end_point_id)
-
     def send_deployment_results_with_payload(self, end_point_id, end_point_name, payload, replica_id_list=None):
         self.send_deployment_results(end_point_id, end_point_name,
                                      payload["model_name"], payload["model_url"],
diff --git a/python/fedml/computing/scheduler/model_scheduler/job_runner_msg_sender.py b/python/fedml/computing/scheduler/model_scheduler/job_runner_msg_sender.py
index 104dacf716..482a21b2d4 100755
--- a/python/fedml/computing/scheduler/model_scheduler/job_runner_msg_sender.py
+++ b/python/fedml/computing/scheduler/model_scheduler/job_runner_msg_sender.py
@@ -90,7 +90,10 @@ def send_deployment_stages(end_point_id, model_name, model_id, model_inference_u
         logging.info(f"-------- Stages has been sent to mlops with stage {model_stages_index} and "
                      f"payload {deployment_stages_payload}")
 
-    def send_deployment_start_request_to_edges(self):
+    def send_deployment_start_request_to_edges(self, in_request_json=None):
+        if in_request_json is not None:
+            self.request_json = in_request_json
+
         # Iterate through replica_num_diff, both add and replace should be sent to the edge devices
         if "replica_num_diff" not in self.request_json or self.request_json["replica_num_diff"] is None:
             return []
diff --git a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
index d1cc68dc98..13876d0184 100755
--- a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
+++ b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
@@ -9,7 +9,8 @@
 from multiprocessing import Queue
 
 import fedml
-from fedml.core.mlops import MLOpsRuntimeLog
+from fedml.core.mlops import MLOpsRuntimeLog, MLOpsConfigs
+from fedml.core.mlops.mlops_runtime_log import MLOpsFormatter
 from .device_client_constants import ClientConstants
 from .device_model_cache import FedMLModelCache
 from .device_server_constants import ServerConstants
@@ -82,6 +83,7 @@ def run_impl(
         self.replica_controller = FedMLDeviceReplicaController(self.edge_id, self.request_json)
 
         # Start the process to report system performance(cpu,memory,etc.) to MLOps
+        # TODO(Raphael): This measurement is for the host machine. Change to container's metrics
         self.mlops_metrics.report_sys_perf(self.args, self.agent_config["mqtt_config"], run_id=run_id)
 
         # Check if we should stop the runner
@@ -140,7 +142,7 @@ def run_impl(
             devices_sent_update_remove_msg = self.send_first_scroll_update_msg()
 
             if len(devices_sent_add_or_remove_msg) == 0 and len(devices_sent_update_remove_msg) == 0:
-                # No device is added or removed, and no device is updated or removed
+                # No device is added, updated or removed
                 logging.info("No device is added, updated or removed. No action needed for reconciliation.")
                 ip = GeneralConstants.get_ip_address(self.request_json)
                 master_port = os.getenv("FEDML_MASTER_PORT", None)
@@ -158,10 +160,20 @@ def run_impl(
                     message_center=self.message_center
                 )
 
+                # Set setting to "DEPLOYED" for autoscaling service reference
+                FedMLModelCache.get_instance().set_redis_params(self.redis_addr, self.redis_port, self.redis_password)
+                FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \
+                    update_user_setting_replica_num(end_point_id=run_id, state="DEPLOYED")
+
+                # Complete the job runner
                 self.trigger_completed_event()
+
                 return
         except Exception as e:
-            logging.info(f"Exception at run impl {traceback.format_exc()}")
+            logging.error(f"Failed to send first scroll update message due to {e}.")
+            logging.error(f"Exception traceback {traceback.format_exc()}.")
+
+        logging.info("Start waiting for result callback from workers ...")
 
         self.deployment_result_queue = run_extend_queue_list[0]
         while True:
@@ -210,10 +222,9 @@ def process_deployment_result_message(self, topic=None, payload=None):
             filehandler = logging.FileHandler(log_file, "a")
 
             program_prefix = "FedML-Server @device-id-{}".format(self.edge_id)
-            formatter = logging.Formatter(fmt="[" + program_prefix + "] [%(asctime)s] [%(levelname)s] "
-                                                                     "[%(filename)s:%(lineno)d:%(funcName)s] %("
-                                                                     "message)s",
-                                          datefmt="%a, %d %b %Y %H:%M:%S")
+            formatter = MLOpsFormatter(fmt="[" + program_prefix + "] [%(asctime)s] [%(levelname)s] "
+                                                                  "[%(filename)s:%(lineno)d:%(funcName)s] %("
+                                                                  "message)s")
 
             filehandler.setFormatter(formatter)
             root_logger.addHandler(filehandler)
@@ -270,15 +281,55 @@ def process_deployment_result_message(self, topic=None, payload=None):
             if model_status != ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED:
                 logging.error(f"Unsupported model status {model_status}.")
 
-            # Failure handler
-            if run_operation == "ADD_OR_REMOVE":
-                # TODO(Raphael): Also support rollback for scale out / in operation
+            # Avoid endless loop, if the rollback also failed, we should report the failure to the MLOps
+            if self.replica_controller.under_rollback:
                 self.send_deployment_status(
                     end_point_id, end_point_name, payload_json["model_name"], "",
                     ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED,
                     message_center=self.message_center)
                 return
-            else:
+
+            # Failure handler, send the rollback message to the worker devices only if it has not been rollback
+            if run_operation == "ADD_OR_REMOVE":
+                # During Scale out / in,
+                # the worker that already been scaled out / in should be sent the rollback message
+                rollback_dict = self.replica_controller.rollback_add_or_remove_replica(
+                    device_id=device_id, replica_no=replica_no, op_type=run_operation
+                )
+                self.replica_controller.under_rollback = True
+
+                if rollback_dict is not None and len(rollback_dict) > 0:
+                    self.send_deployment_status(
+                        end_point_id, end_point_name, payload_json["model_name"], "",
+                        ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_ABORTING,
+                        message_center=self.message_center)
+                    self.send_rollback_add_remove_op(run_id_str, rollback_dict)
+                    return
+                else:
+                    # This is the last worker that failed, so we should continue to "ABORTED" status
+                    model_config_parameters = self.running_request_json[run_id_str]["parameters"]
+                    inference_port = model_config_parameters.get("server_internal_port",
+                                                                 ServerConstants.MODEL_INFERENCE_DEFAULT_PORT)
+                    inference_port_external = model_config_parameters.get("server_external_port", inference_port)
+                    ip = GeneralConstants.get_ip_address(self.request_json)
+                    if ip.startswith("http://") or ip.startswith("https://"):
+                        model_inference_url = "{}/inference/{}".format(ip, end_point_id)
+                    else:
+                        model_inference_url = "http://{}:{}/inference/{}".format(ip, inference_port_external,
+                                                                                 end_point_id)
+
+                    self.send_deployment_status(
+                        end_point_id, end_point_name, payload_json["model_name"], model_inference_url,
+                        ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_ABORTED, message_center=self.message_center)
+
+                    # For auto-scaling, should update the state to "DEPLOYED"
+                    FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \
+                        update_user_setting_replica_num(end_point_id=end_point_id, state="DEPLOYED")
+
+                    self.replica_controller.under_rollback = False
+
+                    return
+            elif run_operation == "UPDATE":
                 # Overwrite the json with the rollback version diff
                 rollback_version_diff = self.replica_controller.rollback_get_replica_version_diff(
                         device_id_trigger=device_id, replica_no_trigger=replica_no)
@@ -325,9 +376,9 @@ def process_deployment_result_message(self, topic=None, payload=None):
         # Wait for all replica-level's result, not device-level
         if (self.replica_controller.is_all_replica_num_reconciled() and
                 self.replica_controller.is_all_replica_version_reconciled()):
-            '''
+            """
             When all the devices have finished the add / delete / update operation
-            '''
+            """
             # Generate one unified inference api
             # Note that here we use the gateway port instead of the inference port that is used by the slave device
             model_config_parameters = request_json["parameters"]
@@ -383,21 +434,31 @@ def process_deployment_result_message(self, topic=None, payload=None):
                 # Arrive here because only contains remove ops, so we do not need to update the model metadata
                 pass
 
+            # For auto-scaling, should update the state to "DEPLOYED"
             FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \
-                set_end_point_activation(end_point_id, end_point_name, True)
+                update_user_setting_replica_num(end_point_id=end_point_id, state="DEPLOYED")
 
             if self.replica_controller.under_rollback:
-                self.send_deployment_status(end_point_id, end_point_name,
-                                            payload_json["model_name"],
-                                            model_inference_url,
-                                            ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_ABORTED)
+                # If first time failed (Still might need rollback), then send failed message to the MLOps
+                if not (FedMLModelCache.get_instance(self.redis_addr, self.redis_port).
+                        get_end_point_activation(end_point_id)):
+                    self.send_deployment_status(
+                        end_point_id, end_point_name, payload_json["model_name"], "",
+                        ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED, message_center=self.message_center)
+                else:
+                    self.send_deployment_status(
+                        end_point_id, end_point_name, payload_json["model_name"], model_inference_url,
+                        ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_ABORTED, message_center=self.message_center)
+
                 self.replica_controller.under_rollback = False
             else:
-                self.send_deployment_status(end_point_id, end_point_name,
-                                            payload_json["model_name"],
-                                            model_inference_url,
-                                            ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED,
-                                            message_center=self.message_center)
+                # Set the end point activation status to True, for scaling out / in and rolling update
+                FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \
+                    set_end_point_activation(end_point_id, end_point_name, True)
+
+                self.send_deployment_status(
+                    end_point_id, end_point_name, payload_json["model_name"], model_inference_url,
+                    ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED, message_center=self.message_center)
 
             time.sleep(3)
             self.trigger_completed_event()
@@ -457,6 +518,7 @@ def start_device_inference_monitor(
             redis_addr=None, redis_port=None, redis_password=None
     ):
         # start inference monitor server
+        # Will report the qps related metrics to the MLOps
         logging.info(f"start the model inference monitor, end point {run_id}, model name {model_name}...")
         run_id_str = str(run_id)
         pip_source_dir = os.path.dirname(__file__)
@@ -503,8 +565,15 @@ def recover_inference_and_monitor(redis_addr=None, redis_port=None, redis_passwo
                 if not is_activated:
                     continue
 
+                agent_config = dict()
+                try:
+                    agent_config["mqtt_config"], _, _, _ = MLOpsConfigs.fetch_all_configs()
+                except Exception as e:
+                    pass
+
                 FedMLDeployMasterJobRunner.start_device_inference_gateway(
-                    run_id, end_point_name, model_id, model_name, model_version, inference_port=inference_port)
+                    run_id, end_point_name, model_id, model_name, model_version, inference_port=inference_port,
+                    agent_config=agent_config, redis_addr=redis_addr, redis_port=redis_port, redis_password=redis_password)
 
                 FedMLDeployMasterJobRunner.stop_device_inference_monitor(
                     run_id, end_point_name, model_id, model_name, model_version)
@@ -614,6 +683,20 @@ def send_rollback_msg(self, run_id_str):
             # send start deployment request to each device
             self.send_deployment_start_request_to_edge(edge_id, self.request_json)
 
+    def send_rollback_add_remove_op(self, run_id, rollback_replica_dict):
+        """
+        This method is used when the original add op failed, we need to rollback by delete the existed replicas
+        Input example:
+        rollback_replica_dict = {'96684': {'curr_num': 2, 'op': 'remove', 'target_num': 1}}
+        """
+        existed_request_json = self.request_json
+        updated_request_json = copy.deepcopy(existed_request_json)
+
+        # Reverse the replica_num_diff
+        updated_request_json["replica_num_diff"] = rollback_replica_dict
+
+        self.send_deployment_start_request_to_edges(in_request_json=updated_request_json)
+
     def delete_device_replica_info_on_master(self, endpoint_id, endpoint_name, model_name, edge_id_replica_no_dict):
         FedMLModelCache.get_instance().set_redis_params(self.redis_addr, self.redis_port, self.redis_password)
         # Remove the record of the replaced device
@@ -635,7 +718,7 @@ def delete_device_replica_info_on_master(self, endpoint_id, endpoint_name, model
                 delete_item, endpoint_id, endpoint_name, model_name
             )
 
-        logging.info(f"Deleted the record of the replaced device {delete_device_result_list}")
+        logging.info(f"Deleted the replica record on master: {edge_id_replica_no_dict}")
 
     def save_deployed_replica_payload(self, payload_json):
         self.deployed_replica_payload = copy.deepcopy(payload_json)
diff --git a/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py b/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py
index 962dcbbcb3..144d17fd02 100755
--- a/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py
+++ b/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py
@@ -105,7 +105,15 @@ def callback_delete_deployment(self, topic, payload):
         # Parse payload as the model message object.
         model_msg_object = FedMLModelMsgObject(topic, payload)
 
-        # Set end point as deactivated status
+        # Delete SQLite records
+        FedMLServerDataInterface.get_instance().delete_job_from_db(model_msg_object.run_id)
+        FedMLModelDatabase.get_instance().delete_deployment_result(
+            model_msg_object.run_id, model_msg_object.end_point_name, model_msg_object.model_name,
+            model_version=model_msg_object.model_version)
+        FedMLModelDatabase.get_instance().delete_deployment_run_info(
+            end_point_id=model_msg_object.inference_end_point_id)
+
+        # Delete Redis Records
         FedMLModelCache.get_instance().set_redis_params(self.redis_addr, self.redis_port, self.redis_password)
         FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \
             set_end_point_activation(model_msg_object.inference_end_point_id,
@@ -114,22 +122,16 @@ def callback_delete_deployment(self, topic, payload):
             delete_end_point(model_msg_object.inference_end_point_id, model_msg_object.end_point_name,
                              model_msg_object.model_name, model_msg_object.model_version)
 
+        # Send delete deployment request to the edge devices
         FedMLDeployJobRunnerManager.get_instance().send_deployment_delete_request_to_edges(
             model_msg_object.run_id, payload, model_msg_object, message_center=self.message_center)
 
+        # Stop processes on master
         FedMLDeployJobRunnerManager.get_instance().stop_job_runner(model_msg_object.run_id)
-
         FedMLDeployJobRunnerManager.get_instance().stop_device_inference_monitor(
             model_msg_object.run_id, model_msg_object.end_point_name, model_msg_object.model_id,
             model_msg_object.model_name, model_msg_object.model_version)
 
-        FedMLServerDataInterface.get_instance().delete_job_from_db(model_msg_object.run_id)
-        FedMLModelDatabase.get_instance().delete_deployment_result(
-            model_msg_object.run_id, model_msg_object.end_point_name, model_msg_object.model_name,
-            model_version=model_msg_object.model_version)
-        FedMLModelDatabase.get_instance().delete_deployment_run_info(
-            end_point_id=model_msg_object.inference_end_point_id)
-
     def callback_start_deployment(self, topic, payload):
         # noinspection PyBroadException
         try:
@@ -149,15 +151,36 @@ def callback_start_deployment(self, topic, payload):
 
         model_config = request_json["model_config"]
         model_name = model_config["model_name"]
+        model_version = model_config["model_version"]
         model_id = model_config["model_id"]
         model_storage_url = model_config["model_storage_url"]
         scale_min = model_config.get("instance_scale_min", 0)
         scale_max = model_config.get("instance_scale_max", 0)
         inference_engine = model_config.get("inference_engine", 0)
+        enable_auto_scaling = request_json.get("enable_auto_scaling", False)
+        desired_replica_num = request_json.get("desired_replica_num", 1)
+
+        target_queries_per_replica = request_json.get("target_queries_per_replica", 10)
+        aggregation_window_size_seconds = request_json.get("aggregation_window_size_seconds", 60)
+        scale_down_delay_seconds = request_json.get("scale_down_delay_seconds", 120)
+
         inference_end_point_id = run_id
 
         logging.info("[Master] received start deployment request for end point {}.".format(run_id))
 
+        # Set redis config
+        FedMLModelCache.get_instance().set_redis_params(self.redis_addr, self.redis_port, self.redis_password)
+
+        # Save the user setting (about replica number) of this run to Redis, if existed, update it
+        FedMLModelCache.get_instance(self.redis_addr, self.redis_port).set_user_setting_replica_num(
+            end_point_id=run_id, end_point_name=end_point_name, model_name=model_name, model_version=model_version,
+            replica_num=desired_replica_num, enable_auto_scaling=enable_auto_scaling,
+            scale_min=scale_min, scale_max=scale_max, state="DEPLOYING",
+            aggregation_window_size_seconds=aggregation_window_size_seconds,
+            target_queries_per_replica=target_queries_per_replica,
+            scale_down_delay_seconds=int(scale_down_delay_seconds)
+        )
+
         # Start log processor for current run
         self.args.run_id = run_id
         self.args.edge_id = self.edge_id
@@ -176,8 +199,7 @@ def callback_start_deployment(self, topic, payload):
         self.running_request_json[run_id_str] = request_json
         self.request_json["master_node_ip"] = GeneralConstants.get_ip_address(self.request_json)
 
-        # Target status of the devices
-        FedMLModelCache.get_instance().set_redis_params(self.redis_addr, self.redis_port, self.redis_password)
+        # Set the target status of the devices to redis
         FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \
             set_end_point_device_info(request_json["end_point_id"], end_point_name, json.dumps(device_objs))
 
@@ -194,7 +216,7 @@ def callback_start_deployment(self, topic, payload):
         # Report stage to mlops: MODEL_DEPLOYMENT_STAGE1 = "Received"
         FedMLDeployJobRunnerManager.get_instance().send_deployment_stages(
             self.run_id, model_name, model_id, "", ServerConstants.MODEL_DEPLOYMENT_STAGE1["index"],
-            ServerConstants.MODEL_DEPLOYMENT_STAGE1["text"], "Received request for end point {}".format(run_id),
+            ServerConstants.MODEL_DEPLOYMENT_STAGE1["text"], "Received request for endpoint {}".format(run_id),
             message_center=self.message_center)
 
         # Report stage to mlops: MODEL_DEPLOYMENT_STAGE2 = "Initializing"
diff --git a/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py b/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py
index 78e2527e0c..d1cfd3b83c 100755
--- a/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py
+++ b/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py
@@ -77,6 +77,18 @@ def get_model_bin_file(unzip_package_full_path):
     def update_local_fedml_config(self, run_id, model_config, model_config_parameters=None):
         model_name = model_config["model_name"]
         model_storage_url = model_config["model_storage_url"]
+        end_point_name = self.request_json["end_point_name"]
+        model_version = model_config["model_version"]
+
+        # Generate the model package dir for downloading.
+        model_version = model_version.replace(" ", "-")     # Avoid using space for folder name
+        model_version = model_version.replace(":", "-")     # Since docker mount will conflict with ":"
+        local_package_path = ClientConstants.get_model_package_dir()
+        os.makedirs(local_package_path, exist_ok=True)
+        this_run_model_dir = f"{run_id}_{end_point_name}_{model_name}_{model_version}"
+        this_run_model_full_path = os.path.join(local_package_path, this_run_model_dir)
+        self.agent_package_download_dir = this_run_model_full_path
+        self.agent_package_unzip_dir = this_run_model_full_path
 
         # Retrieve model package or model binary file.
         if self.model_is_from_open:
@@ -127,7 +139,6 @@ def run_impl(self, run_extend_queue_list, sender_message_center,
                                                        ClientConstants.INFERENCE_ENGINE_TYPE_INT_DEFAULT)
         inference_end_point_id = run_id
 
-        self.mlops_metrics.report_sys_perf(self.args, self.agent_config["mqtt_config"], run_id=run_id)
         MLOpsRuntimeLog.get_instance(self.args).init_logs(log_level=logging.INFO)
 
         logging.info(f"[Worker] Received model deployment request from master for endpoint {run_id}.")
@@ -250,7 +261,6 @@ def run_impl(self, run_extend_queue_list, sender_message_center,
         if op == "add":
             worker_ip = GeneralConstants.get_ip_address(self.request_json)
             for rank in range(prev_rank + 1, prev_rank + 1 + op_num):
-                # TODO: Support Rollback if this for loop failed
                 try:
                     running_model_name, inference_output_url, inference_model_version, model_metadata, model_config = \
                         start_deployment(
@@ -392,15 +402,17 @@ def run_impl(self, run_extend_queue_list, sender_message_center,
                 if inference_output_url == "":
                     logging.error("Failed to deploy the model...")
 
-                    # If update failed, should release this replica's gpu
+                    # Release the gpu occupancy
                     FedMLModelCache.get_instance().set_redis_params()
                     replica_occupied_gpu_ids_str = FedMLModelCache.get_instance().get_replica_gpu_ids(
                         run_id, end_point_name, model_name, self.edge_id, rank + 1)
+                    logging.info(f"Release gpu ids {replica_occupied_gpu_ids_str} for "
+                                 f"failed deployment of replica no {rank + 1}.")
 
-                    replica_occupied_gpu_ids = json.loads(replica_occupied_gpu_ids_str)
-
-                    JobRunnerUtils.get_instance().release_partial_job_gpu(
-                        run_id, self.edge_id, replica_occupied_gpu_ids)
+                    if replica_occupied_gpu_ids_str is not None:
+                        replica_occupied_gpu_ids = json.loads(replica_occupied_gpu_ids_str)
+                        JobRunnerUtils.get_instance().release_partial_job_gpu(
+                            run_id, self.edge_id, replica_occupied_gpu_ids)
 
                     result_payload = self.send_deployment_results(
                         end_point_name, self.edge_id, ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED,
diff --git a/python/fedml/computing/scheduler/model_scheduler/worker_protocol_manager.py b/python/fedml/computing/scheduler/model_scheduler/worker_protocol_manager.py
index 43bb3c4582..5f4835d9aa 100755
--- a/python/fedml/computing/scheduler/model_scheduler/worker_protocol_manager.py
+++ b/python/fedml/computing/scheduler/model_scheduler/worker_protocol_manager.py
@@ -15,6 +15,8 @@
 from ..slave.base_slave_protocol_manager import FedMLBaseSlaveProtocolManager
 from .worker_job_runner_manager import FedMLDeployJobRunnerManager
 from .device_mqtt_inference_protocol import FedMLMqttInference
+from ..scheduler_core.compute_cache_manager import ComputeCacheManager
+from .device_model_cache import FedMLModelCache
 
 
 class FedMLDeployWorkerProtocolManager(FedMLBaseSlaveProtocolManager):
@@ -141,7 +143,7 @@ def callback_start_deployment(self, topic, payload):
         run_id = inference_end_point_id
         self.args.run_id = run_id
         self.args.edge_id = self.edge_id
-        MLOpsRuntimeLog.get_instance(self.args).init_logs(log_level=logging.INFO)
+        MLOpsRuntimeLog(args=self.args).init_logs()
         MLOpsRuntimeLogDaemon.get_instance(self.args).set_log_source(
             ClientConstants.FEDML_LOG_SOURCE_TYPE_MODEL_END_POINT)
         MLOpsRuntimeLogDaemon.get_instance(self.args).start_log_processor(run_id, self.edge_id)
@@ -193,3 +195,23 @@ def callback_delete_deployment(self, topic, payload):
         FedMLModelDatabase.get_instance().delete_deployment_result_with_device_id(
             model_msg_object.run_id, model_msg_object.end_point_name, model_msg_object.model_name,
             self.edge_id)
+
+        # Delete FEDML_GLOBAL_ENDPOINT_RUN_ID_MAP_TAG-${run_id} both in redis and local db
+        ComputeCacheManager.get_instance().gpu_cache.delete_endpoint_run_id_map(str(model_msg_object.run_id))
+
+        # Delete FEDML_EDGE_ID_MODEL_DEVICE_ID_MAP_TAG-${run_id} both in redis and local db
+        ComputeCacheManager.get_instance().gpu_cache.delete_edge_model_id_map(str(model_msg_object.run_id))
+
+        # Delete FEDML_GLOBAL_DEVICE_RUN_GPU_IDS_TAG-${run_id}-${device_id} both in redis and local db
+        ComputeCacheManager.get_instance().gpu_cache.delete_device_run_gpu_ids(str(self.edge_id),
+                                                                               str(model_msg_object.run_id))
+
+        # Delete FEDML_GLOBAL_DEVICE_RUN_NUM_GPUS_TAG-${run_id}-${device_id} both in redis and local db
+        ComputeCacheManager.get_instance().gpu_cache.delete_device_run_num_gpus(str(self.edge_id),
+                                                                                str(model_msg_object.run_id))
+
+        # Delete FEDML_MODEL_REPLICA_GPU_IDS_TAG-${run_id}-${end_point_name}-${model_name}-${device_id}-*
+        FedMLModelCache.get_instance().set_redis_params()
+        FedMLModelCache.get_instance().delete_all_replica_gpu_ids(model_msg_object.run_id,
+                                                                  model_msg_object.end_point_name,
+                                                                  model_msg_object.model_name, self.edge_id)
diff --git a/python/fedml/computing/scheduler/scheduler_core/general_constants.py b/python/fedml/computing/scheduler/scheduler_core/general_constants.py
index e642cacf1b..ba8842b30e 100755
--- a/python/fedml/computing/scheduler/scheduler_core/general_constants.py
+++ b/python/fedml/computing/scheduler/scheduler_core/general_constants.py
@@ -51,6 +51,8 @@ class GeneralConstants:
     FEDML_OTA_CMD_UPGRADE = "upgrade"
     FEDML_OTA_CMD_RESTART = "restart"
 
+    FEDML_LOG_SOURCE_TYPE_MODEL_END_POINT = "MODEL_END_POINT"
+
     @staticmethod
     def get_package_unzip_dir(package_download_dir):
         package_unzip_dir = package_download_dir
diff --git a/python/fedml/computing/scheduler/scheduler_core/message_center.py b/python/fedml/computing/scheduler/scheduler_core/message_center.py
index dcf21d33b7..869ed6e510 100755
--- a/python/fedml/computing/scheduler/scheduler_core/message_center.py
+++ b/python/fedml/computing/scheduler/scheduler_core/message_center.py
@@ -218,7 +218,7 @@ def run_sender(self, message_event, message_queue, message_center_name):
                     message_body = None
                 if message_body is None:
                     time.sleep(0.1)
-                    self.retry_sending_undelivered_message()
+                    # self.retry_sending_undelivered_message()
                     continue
 
                 # Generate the message entity object
diff --git a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py
index 46f1e7ff8f..0b4d47d52c 100755
--- a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py
+++ b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py
@@ -2,11 +2,11 @@
 import logging
 import os
 import platform
+import random
 import shutil
+import time
 import traceback
-import urllib
 import zipfile
-from urllib.parse import urljoin, urlparse
 from ..comm_utils.constants import SchedulerConstants
 from ..comm_utils.job_utils import JobRunnerUtils, DockerArgs
 from ..scheduler_entry.constants import Constants
@@ -82,6 +82,8 @@ def __init__(self, args, edge_id=0, request_json=None, agent_config=None, run_id
             "${FEDSYS.CLIENT_OBJECT_LIST}": "",
             "${FEDSYS.LOG_SERVER_URL}": "",
         }
+        self.download_time = time.time()
+        self.download_finished = False
 
     def __repr__(self):
         return "<{klass} @{id:x} {attrs}>".format(
@@ -154,18 +156,91 @@ def package_download_progress(self, count, blksize, filesize):
             self.prev_download_progress = progress_int
             logging.info("package downloaded size {} KB, progress {}%".format(downloaded_kb, progress_int))
 
+    def download_package_proc(self, package_url, local_package_file):
+        import requests
+        headers = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) '
+                                 'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36'}
+        user_agent_list = [
+            'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_5) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.1.1 Safari/605.1.15',
+            'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:77.0) Gecko/20100101 Firefox/77.0',
+            'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36',
+            'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:77.0) Gecko/20100101 Firefox/77.0',
+            'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36',
+        ]
+        for _ in user_agent_list:
+            user_agent = random.choice(user_agent_list)
+            headers = {'User-Agent': user_agent}
+
+        # Set the stream to true so that we can reduce the memory footprint when downloading large files.
+        request = requests.get(package_url, headers=headers, timeout=(10, 15), stream=True)
+        with open(local_package_file, 'wb') as f:
+            # 1024 * 1024 is 1MiB
+            download_size = 1024 * 1024
+            total_size = 0
+            for chunk in request.iter_content(download_size):
+                # Write the chunk to the file
+                written_size = f.write(chunk)
+                total_size += written_size
+                logging.info(f"package downloaded size {total_size/1024} KB")
+                self.download_time = time.time()
+        self.download_finished = True
+
     def retrieve_and_unzip_package(self, package_name, package_url):
         local_package_path = self.agent_package_download_dir
         os.makedirs(local_package_path, exist_ok=True)
         filename, filename_without_extension, file_extension = GeneralConstants.get_filename_and_extension(package_url)
-        local_package_file = os.path.join(local_package_path, f"fedml_run_{self.run_id}_{filename_without_extension}")
+        local_package_file = os.path.join(
+            local_package_path, f"fedml_run_{self.run_id}_{self.edge_id}_{filename_without_extension}")
         if os.path.exists(local_package_file):
             os.remove(local_package_file)
         ssl._create_default_https_context = ssl._create_unverified_context
-        urllib.request.urlretrieve(package_url, local_package_file,
-                                   reporthook=self.package_download_progress)
+
+        # Open a process to download the package so that we can avoid the request is blocked and check the timeout.
+        self.download_finished = False
+        self.download_time = time.time()
+        from multiprocessing import Process
+        download_process = Process(target=self.download_package_proc, args=(package_url, local_package_file))
+        download_process.start()
+        allowed_block_download_time = 30
+        while True:
+            block_time = time.time() - self.download_time
+            if block_time > allowed_block_download_time:
+                break
+            if self.download_finished:
+                break
+            time.sleep(3)
+        try:
+            if not self.download_finished:
+                download_process.terminate()
+                download_process.kill()
+        except Exception as e:
+            pass
+
+        # Another method to async download.
+        # import socket
+        # socket.setdefaulttimeout(15)
+        # try:
+        #     urllib.request.urlretrieve(package_url, local_package_file,
+        #                                reporthook=self.package_download_progress)
+        # except socket.timeout:
+        #     retry_count = 1
+        #     max_retry_num = 5
+        #     while retry_count <= max_retry_num:
+        #         try:
+        #             urllib.request.urlretrieve(package_url, local_package_file,
+        #                                        reporthook=self.package_download_progress)
+        #             break
+        #         except socket.timeout:
+        #             error_info = 'Retry %d time' % retry_count if retry_count == 1 else \
+        #                 'Reloading for %d times' % retry_count
+        #             logging.info(error_info)
+        #             retry_count += 1
+        #     if retry_count > max_retry_num:
+        #         logging.error("Download failed.")
+        #         raise Exception("Download failed")
+
         unzip_package_path = os.path.join(self.agent_package_unzip_dir,
-                                          f"unzip_fedml_run_{self.run_id}_{filename_without_extension}")
+                                          f"unzip_fedml_run_{self.run_id}_{self.edge_id}_{filename_without_extension}")
         try:
             shutil.rmtree(unzip_package_path, ignore_errors=True)
         except Exception as e:
@@ -485,7 +560,7 @@ def callback_start_fl_job(self, job_pid):
 
     def start_job_perf(self, job_pid):
         GeneralConstants.save_learning_process(self.run_id, job_pid, data_dir=self.agent_data_dir)
-        self.mlops_metrics.report_job_perf(self.args, self.agent_config["mqtt_config"], job_pid)
+        #self.mlops_metrics.report_job_perf(self.args, self.agent_config["mqtt_config"], job_pid)
 
     def job_error_processor(self, error_list):
         self.check_runner_stop_event()
@@ -516,10 +591,10 @@ def cleanup_containers_and_release_gpus(run_id, edge_id):
                 job_type == SchedulerConstants.JOB_TASK_TYPE_DEPLOY):
 
             # Terminate the run docker container if exists
-            container_name = JobRunnerUtils.get_run_container_name(run_id)
-            docker_client = JobRunnerUtils.get_docker_client(DockerArgs())
-            logging.info(f"Terminating the run docker container {container_name} if exists...")
             try:
+                container_name = JobRunnerUtils.get_run_container_name(run_id)
+                docker_client = JobRunnerUtils.get_docker_client(DockerArgs())
+                logging.info(f"Terminating the run docker container {container_name} if exists...")
                 JobRunnerUtils.remove_run_container_if_exists(container_name, docker_client)
             except Exception as e:
                 logging.error(f"Exception {e} occurred when terminating docker container. "
diff --git a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner_manager.py b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner_manager.py
index 58198b6661..77768da6c0 100755
--- a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner_manager.py
+++ b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner_manager.py
@@ -44,10 +44,9 @@ def complete_job_runner(self, run_id):
         if self.job_runners.get(run_id_str, None) is not None:
             self.job_runners[run_id_str].trigger_completed_event()
 
-    def put_run_edge_device_info_to_queue(self, run_id, device_info):
-        run_id_str = str(run_id)
-        if self.job_runners.get(run_id_str, None) is not None:
-            self.job_runners[run_id_str].put_run_edge_device_info_to_queue(run_id, device_info)
+    def put_run_edge_device_info_to_queue(self, run_id, edge_id, device_info):
+        for job_run_id, job_runner in self.job_runners.items():
+            job_runner.put_run_edge_device_info_to_queue(run_id, edge_id, device_info)
 
     def get_runner_process(self, run_id, is_cloud_server=False):
         run_id_str = str(run_id)
diff --git a/python/fedml/computing/scheduler/scheduler_core/status_center.py b/python/fedml/computing/scheduler/scheduler_core/status_center.py
index 569f4d9257..76f811993e 100755
--- a/python/fedml/computing/scheduler/scheduler_core/status_center.py
+++ b/python/fedml/computing/scheduler/scheduler_core/status_center.py
@@ -373,10 +373,10 @@ def run_status_dispatcher_in_slave(self, status_event, status_queue,
                     # Async request the job status from master when launching the job
                     job_launch_message_map[status_entity.run_id] = {"topic": message_entity.topic,
                                                                     "payload": message_entity.payload}
-                    status_manager_instances[status_entity.run_id]. \
-                        status_center_request_job_status_from_master_in_slave_agent(
-                        message_entity.topic, message_entity.payload
-                    )
+                    # status_manager_instances[status_entity.run_id]. \
+                    #     status_center_request_job_status_from_master_in_slave_agent(
+                    #     message_entity.topic, message_entity.payload
+                    # )
                 elif (message_entity.topic.startswith(FedMLStatusCenter.TOPIC_SLAVE_JOB_STOP_PREFIX) and
                       message_entity.topic.endswith(FedMLStatusCenter.TOPIC_SLAVE_JOB_STOP_SUFFIX)):
                     # Cleanup when stopped the job
diff --git a/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py b/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py
index 4d2cf3a5ed..871b9026bf 100755
--- a/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py
+++ b/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py
@@ -78,7 +78,7 @@ def process_job_exception_status(self, master_id, status):
 
     def process_job_running_status(self, master_id, status):
         self.message_reporter.report_server_training_status(
-            self.run_id, status, edge_id=master_id, running_json=self.running_scheduler_contract)
+            self.run_id, status, edge_id=master_id, running_json=self.running_scheduler_contract, update_db=False)
 
     def status_center_process_master_status(self, topic, payload):
         request_json = json.loads(payload)
@@ -121,13 +121,13 @@ def process_job_status_consensus(self, run_id, master_id, status):
                 status, edge_status_item)
             if consensus_device_status is not None:
                 self.message_reporter.report_client_training_status(
-                    edge_id_item, consensus_device_status, run_id=run_id)
+                    edge_id_item, consensus_device_status, run_id=run_id, update_db=False)
 
         # Save the job status to local storage
         FedMLServerDataInterface.get_instance().save_job_status(run_id, master_id, status, status)
 
         # Report the status to message center
-        self.message_reporter.report_server_training_status(run_id, status, edge_id=master_id)
+        self.message_reporter.report_server_training_status(run_id, status, edge_id=master_id, update_db=False)
 
         # Broadcast the status to slave agents
         self.message_reporter.report_job_status(run_id, status)
@@ -207,7 +207,7 @@ def process_device_status(self, run_id, edge_id, status):
 
         # Report client status
         consensus_status = self.get_device_consensus_status_in_current_device(edge_id, status)
-        self.message_reporter.report_client_training_status(edge_id, consensus_status, run_id=run_id)
+        self.message_reporter.report_client_training_status(edge_id, consensus_status, run_id=run_id, update_db=False)
 
         # Report server status based on the fault tolerance model and parameters
         edge_nums = len(edge_id_status_dict.keys()) - 1
@@ -263,7 +263,7 @@ def parse_fault_tolerance_params(self, run_id):
 
     def report_server_status(self, run_id, edge_id, server_id, status):
         self.status_reporter.report_server_id_status(
-            run_id, status, edge_id=edge_id, server_id=server_id, server_agent_id=edge_id)
+            run_id, status, edge_id=edge_id, server_id=server_id, server_agent_id=edge_id, update_db=False)
 
     def report_exception_status(
             self, edge_id_list, run_id=0, server_id=None, status=None, payload=None):
@@ -282,7 +282,7 @@ def status_center_process_slave_status_to_master_in_slave_agent(self, topic, pay
         self.message_center.send_message(topic, payload)
 
         # Post the status message to the listener queue of message center
-        self.message_center.receive_message(GeneralConstants.MSG_TOPIC_REPORT_DEVICE_STATUS_IN_JOB, payload)
+        #self.message_center.receive_message(GeneralConstants.MSG_TOPIC_REPORT_DEVICE_STATUS_IN_JOB, payload)
 
     def status_center_process_slave_status_to_mlops_in_slave_agent(self, topic, payload):
         # Forward the status message to message center.
diff --git a/python/fedml/computing/scheduler/slave/base_slave_job_runner.py b/python/fedml/computing/scheduler/slave/base_slave_job_runner.py
index 4448dd49fa..de2956ad94 100755
--- a/python/fedml/computing/scheduler/slave/base_slave_job_runner.py
+++ b/python/fedml/computing/scheduler/slave/base_slave_job_runner.py
@@ -1,3 +1,4 @@
+import json
 import logging
 import multiprocessing
 import os
@@ -104,7 +105,7 @@ def run_impl(self, run_extend_queue_list, sender_message_center,
 
         self.status_reporter.report_client_id_status(
             self.edge_id, GeneralConstants.MSG_MLOPS_CLIENT_STATUS_INITIALIZING,
-            running_json=self.start_request_json, run_id=run_id)
+            running_json=json.dumps(self.request_json), run_id=run_id)
 
         # get training params
         private_local_data_dir = data_config.get("privateLocalData", "")
diff --git a/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py b/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py
index 514aa98cd7..b3cd154d23 100755
--- a/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py
+++ b/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py
@@ -240,7 +240,7 @@ def callback_start_train(self, topic, payload):
 
         # Print the payload
         logging.info(
-            f"FedMLDebug - Receive: topic ({topic}), payload ({payload})"
+            f"FedMLDebug - run id {run_id}, Receive at callback_start_train: topic ({topic}), payload ({payload})"
         )
 
         # Occupy GPUs
@@ -418,6 +418,7 @@ def callback_response_device_status_in_job(self, topic, payload):
         edge_id = payload_json.get("edge_id", None)
 
         # process the status
+        logging.info("process status in the device status callback.")
         self.process_status(run_id, job_status, edge_id)
 
     def callback_response_job_status(self, topic, payload):
@@ -430,6 +431,7 @@ def callback_response_job_status(self, topic, payload):
         edge_id = payload_json.get("edge_id", None)
 
         # process the status
+        logging.info("process status in the job status callback.")
         self.process_status(run_id, job_status, edge_id)
 
     def callback_broadcasted_job_status(self, topic, payload):
@@ -439,6 +441,7 @@ def callback_broadcasted_job_status(self, topic, payload):
         job_status = payload_json.get("status", None)
 
         # process the status
+        logging.info("process status in the broadcast job status callback.")
         self.process_status(run_id, job_status, self.edge_id)
 
     def generate_protocol_manager(self):
diff --git a/python/fedml/computing/scheduler/slave/client_data_interface.py b/python/fedml/computing/scheduler/slave/client_data_interface.py
index 34a7b89bd2..0e9e84381a 100755
--- a/python/fedml/computing/scheduler/slave/client_data_interface.py
+++ b/python/fedml/computing/scheduler/slave/client_data_interface.py
@@ -143,7 +143,7 @@ def create_job_table(self):
                    updated_time TEXT,
                    round_index INT,
                    total_rounds INT,
-                   running_json TEXT);''')
+                   running_json TEXT NULL);''')
             self.db_connection.commit()
         except Exception as e:
             pass
@@ -405,14 +405,14 @@ class FedMLClientJobModel(object):
     def __init__(self):
         self.job_id = 0
         self.edge_id = 0
-        self.started_time = ""
-        self.ended_time = ""
-        self.progress = 0
-        self.eta = 0
-        self.failed_time = ""
+        self.started_time = "0"
+        self.ended_time = "0"
+        self.progress = 0.0
+        self.eta = 0.0
+        self.failed_time = "0"
         self.error_code = -1
         self.msg = ""
-        self.updated_time = ""
+        self.updated_time = "0"
         self.round_index = 0
         self.total_rounds = 0
         self.status = ""
diff --git a/python/fedml/core/distributed/communication/mqtt/mqtt_manager.py b/python/fedml/core/distributed/communication/mqtt/mqtt_manager.py
index bdafe159c2..937e9f6644 100644
--- a/python/fedml/core/distributed/communication/mqtt/mqtt_manager.py
+++ b/python/fedml/core/distributed/communication/mqtt/mqtt_manager.py
@@ -14,7 +14,7 @@
 class MqttManager(object):
     def __init__(self, host, port, user, pwd, keepalive_time,
                  client_id, last_will_topic=None, last_will_msg=None,
-                 clean_session=True, retain_msg=False):
+                 clean_session=True, retain_msg=True):
         self._client = None
         self.mqtt_connection_id = None
         self._host = host
diff --git a/python/fedml/core/mlops/mlops_metrics.py b/python/fedml/core/mlops/mlops_metrics.py
index e0855c74b5..afa96f6870 100644
--- a/python/fedml/core/mlops/mlops_metrics.py
+++ b/python/fedml/core/mlops/mlops_metrics.py
@@ -67,15 +67,17 @@ def comm_sanity_check(self):
         else:
             return True
 
-    def report_client_training_status(self, edge_id, status, running_json=None, is_from_model=False, run_id=0):
+    def report_client_training_status(self, edge_id, status, running_json=None,
+                                      is_from_model=False, run_id=0, update_db=True):
         self.common_report_client_training_status(edge_id, status, run_id=run_id)
 
-        if is_from_model:
-            from ...computing.scheduler.model_scheduler.device_client_data_interface import FedMLClientDataInterface
-            FedMLClientDataInterface.get_instance().save_job(run_id, edge_id, status, running_json)
-        else:
-            from ...computing.scheduler.slave.client_data_interface import FedMLClientDataInterface
-            FedMLClientDataInterface.get_instance().save_job(run_id, edge_id, status, running_json)
+        if update_db:
+            if is_from_model:
+                from ...computing.scheduler.model_scheduler.device_client_data_interface import FedMLClientDataInterface
+                FedMLClientDataInterface.get_instance().save_job(run_id, edge_id, status, running_json)
+            else:
+                from ...computing.scheduler.slave.client_data_interface import FedMLClientDataInterface
+                FedMLClientDataInterface.get_instance().save_job(run_id, edge_id, status, running_json)
 
     def report_client_device_status_to_web_ui(self, edge_id, status, run_id=0):
         """
@@ -169,20 +171,22 @@ def common_report_client_id_status(self, run_id, edge_id, status, server_id="0",
         # logging.info("report_client_id_status. message_json = %s" % message_json)
         self.send_message(topic_name, message_json)
 
-    def report_server_training_status(self, run_id, status, edge_id=0, role=None, running_json=None, is_from_model=False):
+    def report_server_training_status(self, run_id, status, edge_id=0, role=None,
+                                      running_json=None, is_from_model=False, update_db=True):
         # if not self.comm_sanity_check():
         #     return
         self.common_report_server_training_status(run_id, status, role=role, edge_id=edge_id)
 
-        if is_from_model:
-            from ...computing.scheduler.model_scheduler.device_server_data_interface import FedMLServerDataInterface
-            FedMLServerDataInterface.get_instance().save_job(run_id, self.edge_id, status, running_json)
-        else:
-            from ...computing.scheduler.master.server_data_interface import FedMLServerDataInterface
-            FedMLServerDataInterface.get_instance().save_job(run_id, self.edge_id, status, running_json)
+        if update_db:
+            if is_from_model:
+                from ...computing.scheduler.model_scheduler.device_server_data_interface import FedMLServerDataInterface
+                FedMLServerDataInterface.get_instance().save_job(run_id, self.edge_id, status, running_json)
+            else:
+                from ...computing.scheduler.master.server_data_interface import FedMLServerDataInterface
+                FedMLServerDataInterface.get_instance().save_job(run_id, self.edge_id, status, running_json)
 
     def report_job_status(self, run_id, status):
-        topic_name = "master_agent/slave_agent/job_status"
+        topic_name = f"master_agent/slave_agent/job_status/{run_id}"
         payload = {"run_id": run_id, "status": status}
 
         message_json = json.dumps(payload)
@@ -251,7 +255,7 @@ def broadcast_server_training_status(self, run_id, status, role=None, is_from_mo
             FedMLServerDataInterface.get_instance().save_job(run_id, self.edge_id, status)
 
     def report_server_id_status(self, run_id, status, edge_id=None, server_id=None, server_agent_id=None,
-                                is_from_model=False, running_json=None):
+                                is_from_model=False, running_json=None, update_db=True):
         # if not self.comm_sanity_check():
         #     return
         topic_name = "fl_server/flserver_agent_" + str(server_agent_id if server_agent_id is not None else
@@ -267,12 +271,13 @@ def report_server_id_status(self, run_id, status, edge_id=None, server_id=None,
         # logging.info("report_server_id_status. message_json = %s" % message_json)
         self.send_message(topic_name, message_json)
 
-        if is_from_model:
-            from ...computing.scheduler.model_scheduler.device_server_data_interface import FedMLServerDataInterface
-            FedMLServerDataInterface.get_instance().save_job(run_id, in_edge_id, status, running_json)
-        else:
-            from ...computing.scheduler.master.server_data_interface import FedMLServerDataInterface
-            FedMLServerDataInterface.get_instance().save_job(run_id, in_edge_id, status, running_json)
+        if update_db:
+            if is_from_model:
+                from ...computing.scheduler.model_scheduler.device_server_data_interface import FedMLServerDataInterface
+                FedMLServerDataInterface.get_instance().save_job(run_id, in_edge_id, status, running_json)
+            else:
+                from ...computing.scheduler.master.server_data_interface import FedMLServerDataInterface
+                FedMLServerDataInterface.get_instance().save_job(run_id, in_edge_id, status, running_json)
 
     def report_client_training_metric(self, metric_json):
         # if not self.comm_sanity_check():

From 6b987edd4ed0f421cdd1dbc872b5fa89fe736313 Mon Sep 17 00:00:00 2001
From: Alay Shah <alay11shah@gmail.com>
Date: Sat, 11 May 2024 14:51:15 -0700
Subject: [PATCH 028/282] Add GPU Type Registry

---
 .../comm_utils/gpu_utils/gpu_utils.py         | 17 +++++++++++++--
 .../scheduler/comm_utils/hardware_utils.py    | 21 ++++++++-----------
 2 files changed, 24 insertions(+), 14 deletions(-)

diff --git a/python/fedml/computing/scheduler/comm_utils/gpu_utils/gpu_utils.py b/python/fedml/computing/scheduler/comm_utils/gpu_utils/gpu_utils.py
index e6691b4b5d..2fc5cf619b 100644
--- a/python/fedml/computing/scheduler/comm_utils/gpu_utils/gpu_utils.py
+++ b/python/fedml/computing/scheduler/comm_utils/gpu_utils/gpu_utils.py
@@ -29,9 +29,23 @@ class GPUCard:
     temperature: Optional[float]
 
 
-class GPUCardUtil(ABC):
+class GPUTypeRegistry(type, ABC):
+    GPU_TYPE_REGISTRY = {}
+
+    def __new__(cls, name, bases, attrs):
+        new_cls = type.__new__(cls, name, bases, attrs)
+        cls.GPU_TYPE_REGISTRY[new_cls.__name__.lower()] = new_cls
+        return new_cls
 
     @classmethod
+    def get_gpu_utils(cls):
+        return cls.GPU_TYPE_REGISTRY.values()
+
+
+class GPUCardUtil(metaclass=GPUTypeRegistry):
+
+    @classmethod
+    @abstractmethod
     def detectGPUCardType(cls) -> Optional[GPUCardType]:
         raise NotImplementedError
 
@@ -44,4 +58,3 @@ def getAvailableGPUCardIDs() -> List[int]:
     @abstractmethod
     def getGPUCards() -> List[GPUCard]:
         raise NotImplementedError
-
diff --git a/python/fedml/computing/scheduler/comm_utils/hardware_utils.py b/python/fedml/computing/scheduler/comm_utils/hardware_utils.py
index d26fb9c5b5..d12effa826 100644
--- a/python/fedml/computing/scheduler/comm_utils/hardware_utils.py
+++ b/python/fedml/computing/scheduler/comm_utils/hardware_utils.py
@@ -2,22 +2,19 @@
 
 from typing import Optional, List
 
-from fedml.computing.scheduler.comm_utils.gpu_utils.gpu_utils import GPUCardUtil, GPUCard
-from fedml.computing.scheduler.comm_utils.gpu_utils.nvidia_utils import NvidiaGPUtil
+from fedml.computing.scheduler.comm_utils.gpu_utils.gpu_utils import GPUCardUtil, GPUCard, GPUTypeRegistry
 from fedml.computing.scheduler.comm_utils.singleton import Singleton
 
 
 class HardwareUtil(metaclass=Singleton):
-
-    _gpu_utils = [NvidiaGPUtil]
-    _gpu_util: Optional[GPUCardUtil] = None
+    __gpu_util: Optional[GPUCardUtil] = None
 
     @staticmethod
-    def _get_util() -> Optional[GPUCardUtil]:
-        if HardwareUtil._gpu_util is not None:
-            return HardwareUtil._gpu_util
+    def __get_util() -> Optional[GPUCardUtil]:
+        if HardwareUtil.__gpu_util is not None:
+            return HardwareUtil.__gpu_util
 
-        for gpu_util in HardwareUtil._gpu_utils:
+        for gpu_util in GPUTypeRegistry.get_gpu_utils():
             try:
                 if gpu_util.detectGPUCardType() is not None:
                     HardwareUtil._gpu_util = gpu_util()
@@ -30,13 +27,13 @@ def _get_util() -> Optional[GPUCardUtil]:
 
     @staticmethod
     def getGPUs() -> List[GPUCard]:
-        gpu_util = HardwareUtil._get_util()
+        gpu_util = HardwareUtil.__get_util()
         return gpu_util.getGPUCards() if gpu_util is not None else []
 
     @staticmethod
     def getAvailableGPUCardIDs() -> List[int]:
-        gpu_util = HardwareUtil._get_util()
-        return gpu_util.getAvailainfbleGPUCardIDs() if gpu_util is not None else []
+        gpu_util = HardwareUtil.__get_util()
+        return gpu_util.getAvailableGPUCardIDs() if gpu_util is not None else []
 
 
 if __name__ == "__main__":

From 436233b752740ddae7304d1581f030baf8536173 Mon Sep 17 00:00:00 2001
From: Alay Shah <alay11shah@gmail.com>
Date: Sat, 11 May 2024 15:11:13 -0700
Subject: [PATCH 029/282] Rolling back GPU Registry change

---
 .../computing/scheduler/comm_utils/__init__.py  |  3 ---
 .../scheduler/comm_utils/gpu_utils/gpu_utils.py | 17 ++---------------
 .../scheduler/comm_utils/hardware_utils.py      |  8 +++++---
 3 files changed, 7 insertions(+), 21 deletions(-)

diff --git a/python/fedml/computing/scheduler/comm_utils/__init__.py b/python/fedml/computing/scheduler/comm_utils/__init__.py
index adf0269b67..e69de29bb2 100644
--- a/python/fedml/computing/scheduler/comm_utils/__init__.py
+++ b/python/fedml/computing/scheduler/comm_utils/__init__.py
@@ -1,3 +0,0 @@
-import gpu_utils.gpu_utils
-import gpu_utils.qualcomm_utils
-import gpu_utils.nvidia_utils
\ No newline at end of file
diff --git a/python/fedml/computing/scheduler/comm_utils/gpu_utils/gpu_utils.py b/python/fedml/computing/scheduler/comm_utils/gpu_utils/gpu_utils.py
index 2fc5cf619b..ced1c53d3e 100644
--- a/python/fedml/computing/scheduler/comm_utils/gpu_utils/gpu_utils.py
+++ b/python/fedml/computing/scheduler/comm_utils/gpu_utils/gpu_utils.py
@@ -1,4 +1,4 @@
-from abc import ABC, abstractmethod, ABCMeta
+from abc import ABC, abstractmethod
 from dataclasses import dataclass
 from enum import Enum, auto
 from typing import Optional, List
@@ -29,20 +29,7 @@ class GPUCard:
     temperature: Optional[float]
 
 
-class GPUTypeRegistry(type, ABC):
-    GPU_TYPE_REGISTRY = {}
-
-    def __new__(cls, name, bases, attrs):
-        new_cls = type.__new__(cls, name, bases, attrs)
-        cls.GPU_TYPE_REGISTRY[new_cls.__name__.lower()] = new_cls
-        return new_cls
-
-    @classmethod
-    def get_gpu_utils(cls):
-        return cls.GPU_TYPE_REGISTRY.values()
-
-
-class GPUCardUtil(metaclass=GPUTypeRegistry):
+class GPUCardUtil(ABC):
 
     @classmethod
     @abstractmethod
diff --git a/python/fedml/computing/scheduler/comm_utils/hardware_utils.py b/python/fedml/computing/scheduler/comm_utils/hardware_utils.py
index d12effa826..c468e2181c 100644
--- a/python/fedml/computing/scheduler/comm_utils/hardware_utils.py
+++ b/python/fedml/computing/scheduler/comm_utils/hardware_utils.py
@@ -1,10 +1,12 @@
 import logging
-
 from typing import Optional, List
 
-from fedml.computing.scheduler.comm_utils.gpu_utils.gpu_utils import GPUCardUtil, GPUCard, GPUTypeRegistry
+from fedml.computing.scheduler.comm_utils.gpu_utils.gpu_utils import GPUCardUtil, GPUCard
+from fedml.computing.scheduler.comm_utils.gpu_utils.nvidia_utils import NvidiaGPUtil
 from fedml.computing.scheduler.comm_utils.singleton import Singleton
 
+GPU_CARD_UTILS = [NvidiaGPUtil]
+
 
 class HardwareUtil(metaclass=Singleton):
     __gpu_util: Optional[GPUCardUtil] = None
@@ -14,7 +16,7 @@ def __get_util() -> Optional[GPUCardUtil]:
         if HardwareUtil.__gpu_util is not None:
             return HardwareUtil.__gpu_util
 
-        for gpu_util in GPUTypeRegistry.get_gpu_utils():
+        for gpu_util in GPU_CARD_UTILS:
             try:
                 if gpu_util.detectGPUCardType() is not None:
                     HardwareUtil._gpu_util = gpu_util()

From 502c031e3838526de681eb2cf062789023659c08 Mon Sep 17 00:00:00 2001
From: alaydshah <alay11shah@gmail.com>
Date: Sun, 12 May 2024 02:21:38 +0000
Subject: [PATCH 030/282] Qualcomm Util -> get gpus

---
 .../comm_utils/gpu_utils/gpu_utils.py         | 26 ++++----
 .../comm_utils/gpu_utils/nvidia_utils.py      | 45 +++++++-------
 .../comm_utils/gpu_utils/qualcomm_utils.py    | 60 +++++++++++++++++++
 .../scheduler/comm_utils/hardware_utils.py    | 29 ++++-----
 4 files changed, 112 insertions(+), 48 deletions(-)

diff --git a/python/fedml/computing/scheduler/comm_utils/gpu_utils/gpu_utils.py b/python/fedml/computing/scheduler/comm_utils/gpu_utils/gpu_utils.py
index ced1c53d3e..2731e51a3b 100644
--- a/python/fedml/computing/scheduler/comm_utils/gpu_utils/gpu_utils.py
+++ b/python/fedml/computing/scheduler/comm_utils/gpu_utils/gpu_utils.py
@@ -1,5 +1,5 @@
 from abc import ABC, abstractmethod
-from dataclasses import dataclass
+from dataclasses import dataclass, field
 from enum import Enum, auto
 from typing import Optional, List
 
@@ -16,32 +16,34 @@ def __str__(self):
 @dataclass
 class GPUCard:
     id: int
-    uuid: str
     name: str
-    load: float
+    driver: str
+    serial: str
     memoryTotal: float
-    memoryUsed: float
     memoryFree: float
-    driver: str
-    serial: Optional[str]
-    display_mode: Optional[str]
-    display_active: Optional[str]
-    temperature: Optional[float]
+    memoryUsed: float
+    memoryUtil: float
+    load: Optional[float] = 0.0
+    uuid: Optional[str] = ""
+    display_mode: Optional[str] = ""
+    display_active: Optional[str] = ""
+    temperature: Optional[float] = 0.0
 
 
 class GPUCardUtil(ABC):
 
     @classmethod
     @abstractmethod
-    def detectGPUCardType(cls) -> Optional[GPUCardType]:
+    def detect_gpu_card_type(cls) -> Optional[GPUCardType]:
         raise NotImplementedError
 
     @staticmethod
     @abstractmethod
-    def getAvailableGPUCardIDs() -> List[int]:
+    def get_available_gpu_card_ids(order: str = "memory", limit: int = 1, max_load: float = 0.01,
+                                   max_memory: float = 0.01) -> List[int]:
         raise NotImplementedError
 
     @staticmethod
     @abstractmethod
-    def getGPUCards() -> List[GPUCard]:
+    def get_gpu_cards() -> List[GPUCard]:
         raise NotImplementedError
diff --git a/python/fedml/computing/scheduler/comm_utils/gpu_utils/nvidia_utils.py b/python/fedml/computing/scheduler/comm_utils/gpu_utils/nvidia_utils.py
index 349230cef5..f229774ce0 100644
--- a/python/fedml/computing/scheduler/comm_utils/gpu_utils/nvidia_utils.py
+++ b/python/fedml/computing/scheduler/comm_utils/gpu_utils/nvidia_utils.py
@@ -6,26 +6,9 @@
 from fedml.computing.scheduler.comm_utils.gpu_utils.gpu_utils import GPUCard, GPUCardUtil, GPUCardType
 
 
-def _convert(gpu: GPU) -> GPUCard:
-    return GPUCard(
-        id=gpu.id,
-        uuid=gpu.uuid,
-        name=gpu.name,
-        load=gpu.load,
-        memoryTotal=gpu.memoryTotal,
-        memoryUsed=gpu.memoryUsed,
-        memoryFree=gpu.memoryFree,
-        driver=gpu.driver,
-        serial=gpu.serial,
-        display_mode=gpu.display_mode,
-        display_active=gpu.display_active,
-        temperature=gpu.temperature
-    )
-
-
 class NvidiaGPUtil(GPUCardUtil):
     @classmethod
-    def detectGPUCardType(cls) -> Optional[GPUCardType]:
+    def detect_gpu_card_type(cls) -> Optional[GPUCardType]:
         try:
             subprocess.check_output(["nvidia-smi"], universal_newlines=True)
             return GPUCardType.NVIDIA
@@ -33,9 +16,27 @@ def detectGPUCardType(cls) -> Optional[GPUCardType]:
             return None
 
     @staticmethod
-    def getGPUCards() -> List[GPUCard]:
-        return [_convert(gpu) for gpu in GPUtil.getGPUs()]
+    def get_gpu_cards() -> List[GPUCard]:
+        return [NvidiaGPUtil.__convert(gpu) for gpu in GPUtil.getGPUs()]
+
+    @staticmethod
+    def get_available_gpu_card_ids(order: str = "memory", limit: int = 1, maxLoad: float = 0.01, maxMemory: float = 0.01) -> List[int]:
+        return GPUtil.getAvailable(order='memory', limit=limit, maxLoad=0.01, maxMemory=0.01)
 
     @staticmethod
-    def getAvailableGPUCardIDs() -> List[int]:
-        return GPUtil.getAvailable()
+    def __convert(gpu: GPU) -> GPUCard:
+        return GPUCard(
+            id=gpu.id,
+            name=gpu.name,
+            driver=gpu.driver,
+            serial=gpu.serial,
+            memoryTotal=gpu.memoryTotal,
+            memoryFree=gpu.memoryFree,
+            memoryUsed=gpu.memoryUsed,
+            memoryUtil=gpu.memoryUtil,
+            load=gpu.load,
+            uuid=gpu.uuid,
+            display_mode=gpu.display_mode,
+            display_active=gpu.display_active,
+            temperature=gpu.temperature
+        )
diff --git a/python/fedml/computing/scheduler/comm_utils/gpu_utils/qualcomm_utils.py b/python/fedml/computing/scheduler/comm_utils/gpu_utils/qualcomm_utils.py
index e69de29bb2..1b56b7d05e 100644
--- a/python/fedml/computing/scheduler/comm_utils/gpu_utils/qualcomm_utils.py
+++ b/python/fedml/computing/scheduler/comm_utils/gpu_utils/qualcomm_utils.py
@@ -0,0 +1,60 @@
+import logging
+import subprocess
+from typing import List, Optional
+
+from fedml.computing.scheduler.comm_utils.gpu_utils.gpu_utils import GPUCard, GPUCardUtil, GPUCardType
+from qaicrt import Util, QIDList, QDevInfo, QStatus
+
+
+class QualcommNPUtil(GPUCardUtil):
+    @classmethod
+    def detect_gpu_card_type(cls) -> Optional[GPUCardType]:
+        try:
+            subprocess.check_output(["/opt/qti-aic/tools/qaic-util"], universal_newlines=True)
+            return GPUCardType.QUALCOMM
+        except Exception:
+            return None
+
+    @staticmethod
+    def get_gpu_cards() -> List[GPUCard]:
+        cards = []
+        util = Util()
+        status, card_list = util.getDeviceIds()
+        if status.value == 0:
+            for card in card_list:
+                status, card_info = util.getDeviceInfo(card)
+                if status.value == 0 and card_info.devStatus.value == 1:
+                    cards.append(QualcommNPUtil.__convert(card_info))
+
+        else:
+            logging.error("Qualcomm Card Status not Healthy")
+        return cards
+
+    @staticmethod
+    def get_available_gpu_card_ids(order: str = "memory", limit: int = 1, max_load: float = 0.01,
+                                   max_memory: float = 0.01) -> List[int]:
+        available_gpu_card_ids = []
+
+        if order != "memory":
+            raise NotImplementedError(f"Qualcomm utils doesn't have support to compute availability based on {order}. "
+                                      f"Supported criteria: [memory]")
+
+        return available_gpu_card_ids
+
+    @staticmethod
+    def __convert(npu) -> GPUCard:
+        memory_total = npu.devData.resourceInfo.dramTotal / 1024
+        memory_free = npu.devData.resourceInfo.dramFree / 1024
+        memory_used = memory_total - memory_free
+        memory_utilized = float(memory_used) / float(memory_total)
+
+        return GPUCard(
+            id=npu.qid,
+            name=npu.pciInfo.devicename,
+            driver=npu.devData.fwQCImageVersionString,
+            serial=npu.devData.serial,
+            memoryTotal=memory_total,
+            memoryFree=memory_free,
+            memoryUsed=memory_used,
+            memoryUtil=memory_utilized,
+        )
diff --git a/python/fedml/computing/scheduler/comm_utils/hardware_utils.py b/python/fedml/computing/scheduler/comm_utils/hardware_utils.py
index c468e2181c..4e6f83e963 100644
--- a/python/fedml/computing/scheduler/comm_utils/hardware_utils.py
+++ b/python/fedml/computing/scheduler/comm_utils/hardware_utils.py
@@ -3,24 +3,25 @@
 
 from fedml.computing.scheduler.comm_utils.gpu_utils.gpu_utils import GPUCardUtil, GPUCard
 from fedml.computing.scheduler.comm_utils.gpu_utils.nvidia_utils import NvidiaGPUtil
+from fedml.computing.scheduler.comm_utils.gpu_utils.qualcomm_utils import QualcommNPUtil
 from fedml.computing.scheduler.comm_utils.singleton import Singleton
 
-GPU_CARD_UTILS = [NvidiaGPUtil]
+GPU_CARD_UTILS = [NvidiaGPUtil, QualcommNPUtil]
 
 
 class HardwareUtil(metaclass=Singleton):
     __gpu_util: Optional[GPUCardUtil] = None
 
-    @staticmethod
-    def __get_util() -> Optional[GPUCardUtil]:
-        if HardwareUtil.__gpu_util is not None:
-            return HardwareUtil.__gpu_util
+    @classmethod
+    def __get_util(cls) -> Optional[GPUCardUtil]:
+        if cls.__gpu_util is not None:
+            return cls.__gpu_util
 
         for gpu_util in GPU_CARD_UTILS:
             try:
-                if gpu_util.detectGPUCardType() is not None:
-                    HardwareUtil._gpu_util = gpu_util()
-                    return HardwareUtil._gpu_util
+                if gpu_util.detect_gpu_card_type() is not None:
+                    cls.__gpu_util = gpu_util()
+                    return cls.__gpu_util
             except Exception as e:
                 pass
 
@@ -28,18 +29,18 @@ def __get_util() -> Optional[GPUCardUtil]:
         return None
 
     @staticmethod
-    def getGPUs() -> List[GPUCard]:
+    def get_gpus() -> List[GPUCard]:
         gpu_util = HardwareUtil.__get_util()
-        return gpu_util.getGPUCards() if gpu_util is not None else []
+        return gpu_util.get_gpu_cards() if gpu_util is not None else []
 
     @staticmethod
-    def getAvailableGPUCardIDs() -> List[int]:
+    def get_available_gpu_card_ids() -> List[int]:
         gpu_util = HardwareUtil.__get_util()
-        return gpu_util.getAvailableGPUCardIDs() if gpu_util is not None else []
+        return gpu_util.get_available_gpu_card_ids() if gpu_util is not None else []
 
 
 if __name__ == "__main__":
-    gpus = HardwareUtil.getGPUs()
-    get_available_gpu_cards = HardwareUtil.getAvailableGPUCardIDs()
+    gpus = HardwareUtil.get_gpus()
+    get_available_gpu_cards = HardwareUtil.get_available_gpu_card_ids()
     print(gpus)
     print(get_available_gpu_cards)

From fba65b28d14d3013b6893726ea60bfc8f5200904 Mon Sep 17 00:00:00 2001
From: alaydshah <alay11shah@gmail.com>
Date: Sun, 12 May 2024 04:40:22 +0000
Subject: [PATCH 031/282] Qualcomm Util -> get_available_gpu_card_ids

---
 .../scheduler/comm_utils/gpu_utils/gpu_utils.py       |  3 +--
 .../scheduler/comm_utils/gpu_utils/nvidia_utils.py    |  2 +-
 .../scheduler/comm_utils/gpu_utils/qualcomm_utils.py  | 11 +++++++----
 .../computing/scheduler/comm_utils/hardware_utils.py  |  7 ++++---
 4 files changed, 13 insertions(+), 10 deletions(-)

diff --git a/python/fedml/computing/scheduler/comm_utils/gpu_utils/gpu_utils.py b/python/fedml/computing/scheduler/comm_utils/gpu_utils/gpu_utils.py
index 2731e51a3b..e098ce55ac 100644
--- a/python/fedml/computing/scheduler/comm_utils/gpu_utils/gpu_utils.py
+++ b/python/fedml/computing/scheduler/comm_utils/gpu_utils/gpu_utils.py
@@ -39,8 +39,7 @@ def detect_gpu_card_type(cls) -> Optional[GPUCardType]:
 
     @staticmethod
     @abstractmethod
-    def get_available_gpu_card_ids(order: str = "memory", limit: int = 1, max_load: float = 0.01,
-                                   max_memory: float = 0.01) -> List[int]:
+    def get_available_gpu_card_ids(order: str, limit: int, max_load: float, max_memory: float) -> List[int]:
         raise NotImplementedError
 
     @staticmethod
diff --git a/python/fedml/computing/scheduler/comm_utils/gpu_utils/nvidia_utils.py b/python/fedml/computing/scheduler/comm_utils/gpu_utils/nvidia_utils.py
index f229774ce0..8da4e89573 100644
--- a/python/fedml/computing/scheduler/comm_utils/gpu_utils/nvidia_utils.py
+++ b/python/fedml/computing/scheduler/comm_utils/gpu_utils/nvidia_utils.py
@@ -20,7 +20,7 @@ def get_gpu_cards() -> List[GPUCard]:
         return [NvidiaGPUtil.__convert(gpu) for gpu in GPUtil.getGPUs()]
 
     @staticmethod
-    def get_available_gpu_card_ids(order: str = "memory", limit: int = 1, maxLoad: float = 0.01, maxMemory: float = 0.01) -> List[int]:
+    def get_available_gpu_card_ids(order: str, limit: int, maxLoad: float, maxMemory: float) -> List[int]:
         return GPUtil.getAvailable(order='memory', limit=limit, maxLoad=0.01, maxMemory=0.01)
 
     @staticmethod
diff --git a/python/fedml/computing/scheduler/comm_utils/gpu_utils/qualcomm_utils.py b/python/fedml/computing/scheduler/comm_utils/gpu_utils/qualcomm_utils.py
index 1b56b7d05e..9ab629a9cc 100644
--- a/python/fedml/computing/scheduler/comm_utils/gpu_utils/qualcomm_utils.py
+++ b/python/fedml/computing/scheduler/comm_utils/gpu_utils/qualcomm_utils.py
@@ -1,4 +1,5 @@
 import logging
+import math
 import subprocess
 from typing import List, Optional
 
@@ -31,15 +32,17 @@ def get_gpu_cards() -> List[GPUCard]:
         return cards
 
     @staticmethod
-    def get_available_gpu_card_ids(order: str = "memory", limit: int = 1, max_load: float = 0.01,
-                                   max_memory: float = 0.01) -> List[int]:
-        available_gpu_card_ids = []
+    def get_available_gpu_card_ids(order: str, limit: int, max_load: float, max_memory: float) -> List[int]:
 
         if order != "memory":
             raise NotImplementedError(f"Qualcomm utils doesn't have support to compute availability based on {order}. "
                                       f"Supported criteria: [memory]")
 
-        return available_gpu_card_ids
+        gpu_cards: List[GPUCard] = QualcommNPUtil.get_gpu_cards()
+        gpu_cards = list(filter(lambda card: card.memoryUtil < max_memory, gpu_cards))
+        gpu_cards.sort(key=lambda card: float('inf') if math.isnan(card.memoryUtil) else card.memoryUtil, reverse=False)
+        gpu_cards = gpu_cards[0:min(limit, len(gpu_cards))]
+        return list(map(lambda card: card.id, gpu_cards))
 
     @staticmethod
     def __convert(npu) -> GPUCard:
diff --git a/python/fedml/computing/scheduler/comm_utils/hardware_utils.py b/python/fedml/computing/scheduler/comm_utils/hardware_utils.py
index 4e6f83e963..0ba8aa664d 100644
--- a/python/fedml/computing/scheduler/comm_utils/hardware_utils.py
+++ b/python/fedml/computing/scheduler/comm_utils/hardware_utils.py
@@ -34,13 +34,14 @@ def get_gpus() -> List[GPUCard]:
         return gpu_util.get_gpu_cards() if gpu_util is not None else []
 
     @staticmethod
-    def get_available_gpu_card_ids() -> List[int]:
+    def get_available_gpu_card_ids(order: str = "memory", limit: int = 1, max_load: float = 0.01,
+                                   max_memory: float = 0.01) -> List[int]:
         gpu_util = HardwareUtil.__get_util()
-        return gpu_util.get_available_gpu_card_ids() if gpu_util is not None else []
+        return gpu_util.get_available_gpu_card_ids(order, limit, max_load, max_memory) if gpu_util is not None else []
 
 
 if __name__ == "__main__":
     gpus = HardwareUtil.get_gpus()
-    get_available_gpu_cards = HardwareUtil.get_available_gpu_card_ids()
+    get_available_gpu_cards = HardwareUtil.get_available_gpu_card_ids(limit=len(gpus))
     print(gpus)
     print(get_available_gpu_cards)

From 63c682ca3e64be70cb9e66e104a4e45dcf17d8b8 Mon Sep 17 00:00:00 2001
From: Alay Shah <alay11shah@gmail.com>
Date: Sat, 11 May 2024 21:53:17 -0700
Subject: [PATCH 032/282] Add sys path in init

---
 .../scheduler/comm_utils/gpu_utils/qualcomm_utils.py       | 7 ++++++-
 1 file changed, 6 insertions(+), 1 deletion(-)

diff --git a/python/fedml/computing/scheduler/comm_utils/gpu_utils/qualcomm_utils.py b/python/fedml/computing/scheduler/comm_utils/gpu_utils/qualcomm_utils.py
index 9ab629a9cc..38d95e0836 100644
--- a/python/fedml/computing/scheduler/comm_utils/gpu_utils/qualcomm_utils.py
+++ b/python/fedml/computing/scheduler/comm_utils/gpu_utils/qualcomm_utils.py
@@ -1,13 +1,16 @@
 import logging
 import math
 import subprocess
+import sys
 from typing import List, Optional
 
 from fedml.computing.scheduler.comm_utils.gpu_utils.gpu_utils import GPUCard, GPUCardUtil, GPUCardType
-from qaicrt import Util, QIDList, QDevInfo, QStatus
 
 
 class QualcommNPUtil(GPUCardUtil):
+    def __init__(self):
+        sys.path.append("/opt/qti-aic/dev/lib/x86_64/")
+
     @classmethod
     def detect_gpu_card_type(cls) -> Optional[GPUCardType]:
         try:
@@ -18,6 +21,8 @@ def detect_gpu_card_type(cls) -> Optional[GPUCardType]:
 
     @staticmethod
     def get_gpu_cards() -> List[GPUCard]:
+        from qaicrt import Util, QIDList, QDevInfo, QStatus
+
         cards = []
         util = Util()
         status, card_list = util.getDeviceIds()

From d3c081d5ec0cc3285582ce6712b8b3ff7429f42a Mon Sep 17 00:00:00 2001
From: Alay Shah <alay11shah@gmail.com>
Date: Sat, 11 May 2024 23:09:40 -0700
Subject: [PATCH 033/282] Replace GPUtil with Hardware Util

---
 .../scheduler/comm_utils/container_utils.py   |  6 +++---
 .../comm_utils/gpu_utils/gpu_utils.py         |  3 ++-
 .../comm_utils/gpu_utils/nvidia_utils.py      |  3 ++-
 .../comm_utils/gpu_utils/qualcomm_utils.py    |  2 ++
 .../scheduler/comm_utils/job_utils.py         | 16 ---------------
 .../scheduler/comm_utils/sys_utils.py         | 20 +++++++++++--------
 .../computing/scheduler/env/collect_env.py    |  6 ++----
 7 files changed, 23 insertions(+), 33 deletions(-)

diff --git a/python/fedml/computing/scheduler/comm_utils/container_utils.py b/python/fedml/computing/scheduler/comm_utils/container_utils.py
index f337dd9997..4e09315b78 100644
--- a/python/fedml/computing/scheduler/comm_utils/container_utils.py
+++ b/python/fedml/computing/scheduler/comm_utils/container_utils.py
@@ -8,10 +8,10 @@
 from docker import errors
 
 from fedml.computing.scheduler.comm_utils import sys_utils
+from fedml.computing.scheduler.comm_utils.hardware_utils import HardwareUtil
 from fedml.core.common.singleton import Singleton
 from fedml.computing.scheduler.comm_utils.constants import SchedulerConstants
 import time
-from GPUtil import getGPUs
 
 
 class ContainerUtils(Singleton):
@@ -252,7 +252,7 @@ def get_container_perf(self, c_name) -> ContainerMetrics:
         CPU %     MEM USAGE / LIMIT     MEM %     NET I/O          BLOCK I/O
         0.26%     8.703GiB / 503.5GiB   1.73%     17.4GB / 176MB   545kB / 20.9GB
 
-        GPU: We currently use GPUtil to get the GPU stats on host machine since one GPU is not
+        GPU: We currently use HardwareUtil to get the GPU stats on host machine since one GPU is not
         shared by multiple containers
         (TODO: get the GPU stats inside the container)
         """
@@ -350,7 +350,7 @@ def gpu_stats(gpu_ids):
         utilz, memory, temp = None, None, None
         gpu_stats_map = {}  # gpu_id: int -> {"gpu_utilization", "gpu_memory_allocated", "gpu_temp"}
         try:
-            gpus = getGPUs()
+            gpus = HardwareUtil.get_gpus()
 
             for i in gpu_ids:
                 gpu = gpus[i]
diff --git a/python/fedml/computing/scheduler/comm_utils/gpu_utils/gpu_utils.py b/python/fedml/computing/scheduler/comm_utils/gpu_utils/gpu_utils.py
index e098ce55ac..3007bc07bc 100644
--- a/python/fedml/computing/scheduler/comm_utils/gpu_utils/gpu_utils.py
+++ b/python/fedml/computing/scheduler/comm_utils/gpu_utils/gpu_utils.py
@@ -1,5 +1,5 @@
 from abc import ABC, abstractmethod
-from dataclasses import dataclass, field
+from dataclasses import dataclass
 from enum import Enum, auto
 from typing import Optional, List
 
@@ -19,6 +19,7 @@ class GPUCard:
     name: str
     driver: str
     serial: str
+    vendor: str
     memoryTotal: float
     memoryFree: float
     memoryUsed: float
diff --git a/python/fedml/computing/scheduler/comm_utils/gpu_utils/nvidia_utils.py b/python/fedml/computing/scheduler/comm_utils/gpu_utils/nvidia_utils.py
index 8da4e89573..58c3888e68 100644
--- a/python/fedml/computing/scheduler/comm_utils/gpu_utils/nvidia_utils.py
+++ b/python/fedml/computing/scheduler/comm_utils/gpu_utils/nvidia_utils.py
@@ -30,6 +30,7 @@ def __convert(gpu: GPU) -> GPUCard:
             name=gpu.name,
             driver=gpu.driver,
             serial=gpu.serial,
+            vendor=GPUCardType.NVIDIA.name,
             memoryTotal=gpu.memoryTotal,
             memoryFree=gpu.memoryFree,
             memoryUsed=gpu.memoryUsed,
@@ -38,5 +39,5 @@ def __convert(gpu: GPU) -> GPUCard:
             uuid=gpu.uuid,
             display_mode=gpu.display_mode,
             display_active=gpu.display_active,
-            temperature=gpu.temperature
+            temperature=gpu.temperature,
         )
diff --git a/python/fedml/computing/scheduler/comm_utils/gpu_utils/qualcomm_utils.py b/python/fedml/computing/scheduler/comm_utils/gpu_utils/qualcomm_utils.py
index 38d95e0836..ca55fdab7c 100644
--- a/python/fedml/computing/scheduler/comm_utils/gpu_utils/qualcomm_utils.py
+++ b/python/fedml/computing/scheduler/comm_utils/gpu_utils/qualcomm_utils.py
@@ -51,6 +51,7 @@ def get_available_gpu_card_ids(order: str, limit: int, max_load: float, max_memo
 
     @staticmethod
     def __convert(npu) -> GPUCard:
+        # TODO (alaydshah): Add support for load, memoryUtil, temperature
         memory_total = npu.devData.resourceInfo.dramTotal / 1024
         memory_free = npu.devData.resourceInfo.dramFree / 1024
         memory_used = memory_total - memory_free
@@ -61,6 +62,7 @@ def __convert(npu) -> GPUCard:
             name=npu.pciInfo.devicename,
             driver=npu.devData.fwQCImageVersionString,
             serial=npu.devData.serial,
+            vendor=GPUCardType.QUALCOMM.name,
             memoryTotal=memory_total,
             memoryFree=memory_free,
             memoryUsed=memory_used,
diff --git a/python/fedml/computing/scheduler/comm_utils/job_utils.py b/python/fedml/computing/scheduler/comm_utils/job_utils.py
index 384cbacd1d..afa6293396 100644
--- a/python/fedml/computing/scheduler/comm_utils/job_utils.py
+++ b/python/fedml/computing/scheduler/comm_utils/job_utils.py
@@ -2,7 +2,6 @@
 import os
 import platform
 import traceback
-import GPUtil
 import docker
 import fedml
 from docker import errors, DockerClient
@@ -159,23 +158,8 @@ def occupy_gpu_ids(self, run_id, request_gpu_num, device_id, inner_id=None,
     @staticmethod
     def search_and_refresh_available_gpu_ids(available_gpu_ids):
         trimmed_gpu_ids = JobRunnerUtils.trim_unavailable_gpu_ids(available_gpu_ids)
-        # if len(trimmed_gpu_ids) <= 0:
-        #     available_gpu_ids = JobRunnerUtils.balance_available_gpu_ids(trimmed_gpu_ids)
         return trimmed_gpu_ids
 
-    @staticmethod
-    def balance_available_gpu_ids(available_gpu_ids):
-        gpu_list, realtime_available_gpu_ids = JobRunnerUtils.get_gpu_list_and_realtime_gpu_available_ids()
-        available_gpu_ids = realtime_available_gpu_ids
-        if len(available_gpu_ids) <= 0:
-            for gpu in gpu_list:
-                gpu = GPUtil.GPU(gpu)
-                if gpu.memoryUtil > 0.8:
-                    continue
-                available_gpu_ids.append(gpu.id)
-
-        return available_gpu_ids.copy()
-
     @staticmethod
     def request_gpu_ids(request_gpu_num, available_gpu_ids):
         available_gpu_count = len(available_gpu_ids)
diff --git a/python/fedml/computing/scheduler/comm_utils/sys_utils.py b/python/fedml/computing/scheduler/comm_utils/sys_utils.py
index 64313b0864..f1989fbe5a 100644
--- a/python/fedml/computing/scheduler/comm_utils/sys_utils.py
+++ b/python/fedml/computing/scheduler/comm_utils/sys_utils.py
@@ -10,6 +10,7 @@
 import psutil
 import yaml
 
+from fedml.computing.scheduler.comm_utils.hardware_utils import HardwareUtil
 from fedml.computing.scheduler.comm_utils.yaml_utils import load_yaml_config
 import json
 from urllib import request
@@ -18,7 +19,6 @@
 from packaging import version
 import sys
 import subprocess
-import GPUtil
 
 from fedml.computing.scheduler.slave.client_constants import ClientConstants
 
@@ -95,7 +95,7 @@ def get_sys_runner_info():
         pass
 
     try:
-        gpus = GPUtil.getGPUs()
+        gpus = HardwareUtil.get_gpus()
         memory_total = 0.0
         memory_free = 0.0
         for gpu in gpus:
@@ -105,9 +105,11 @@ def get_sys_runner_info():
         gpu_available_mem = "{:.1f} G".format(memory_free / 1024.0)
         gpu_total_mem = "{:.1f}G".format(memory_total / 1024.0)
         gpu_count = len(gpus)
-        gpu_vendor = "nvidia"
+        if gpu_count:
+            gpu_vendor = gpus[0].vendor
+            gpu_device_name = gpus[0].name
 
-        gpu_device_name = torch.cuda.get_device_name(0)
+        # gpu_device_name = torch.cuda.get_device_name(0)
         gpu_info = gpu_device_name
     except:
         pass
@@ -168,7 +170,7 @@ def get_gpu_list():
 
         return ret_gpu_list[0:simulation_gpu_count]
 
-    gpu_list = GPUtil.getGPUs()
+    gpu_list = HardwareUtil.get_gpus()
     ret_gpu_list = list()
     for gpu in gpu_list:
         ret_gpu_item = {"ID": gpu.id, "uuid": gpu.uuid, "load": gpu.load,
@@ -189,7 +191,8 @@ def get_available_gpu_id_list(limit=1) -> List[int]:
                 available_gpu_ids.append(count)
         return available_gpu_ids[0:simulation_gpu_count]
 
-    gpu_available_list = GPUtil.getAvailable(order='memory', limit=limit, maxLoad=0.01, maxMemory=0.01)
+    gpu_available_list = HardwareUtil.get_available_gpu_card_ids(order='memory', limit=limit, max_load=0.01,
+                                                                 max_memory=0.01)
     return gpu_available_list
 
 
@@ -219,9 +222,10 @@ def get_gpu_count_vendor():
     gpu_count = 0
     gpu_vendor = ""
     try:
-        gpus = GPUtil.getGPUs()
+        gpus = HardwareUtil.get_gpus()
         gpu_count = len(gpus)
-        gpu_vendor = "nvidia"
+        if gpu_count:
+            gpu_vendor = gpus[0].vendor
     except:
         pass
 
diff --git a/python/fedml/computing/scheduler/env/collect_env.py b/python/fedml/computing/scheduler/env/collect_env.py
index dcece6a720..63f7e66b85 100644
--- a/python/fedml/computing/scheduler/env/collect_env.py
+++ b/python/fedml/computing/scheduler/env/collect_env.py
@@ -1,9 +1,8 @@
 import os
 import traceback
 
-import GPUtil
-
 import fedml
+from fedml.computing.scheduler.comm_utils.hardware_utils import HardwareUtil
 from fedml.computing.scheduler.slave.client_diagnosis import ClientDiagnosis
 
 
@@ -59,8 +58,7 @@ def collect_env():
 
     try:
         print("\n======== GPU Configuration ========")
-        import GPUtil
-        gpus = GPUtil.getGPUs()
+        gpus = HardwareUtil.get_gpus()
         memory_total = 0.0
         memory_free = 0.0
         gpu_name = ""

From 8b2ff80167e085f9763e44083cd7d8de95b17811 Mon Sep 17 00:00:00 2001
From: alaydshah <alay11shah@gmail.com>
Date: Sun, 12 May 2024 06:23:05 +0000
Subject: [PATCH 034/282] Make fedml env hardware agnostic

---
 .../computing/scheduler/env/collect_env.py      | 17 ++++++++---------
 1 file changed, 8 insertions(+), 9 deletions(-)

diff --git a/python/fedml/computing/scheduler/env/collect_env.py b/python/fedml/computing/scheduler/env/collect_env.py
index 63f7e66b85..b2f7bd7f5e 100644
--- a/python/fedml/computing/scheduler/env/collect_env.py
+++ b/python/fedml/computing/scheduler/env/collect_env.py
@@ -11,7 +11,7 @@ def collect_env():
     print("FedML version: " + str(fedml.__version__))
     env_version = fedml.get_env_version()
     print("FedML ENV version: " + str(env_version))
-    
+
     print("Execution path:" + str(os.path.abspath(fedml.__file__)))
 
     print("\n======== Running Environment ========")
@@ -62,26 +62,25 @@ def collect_env():
         memory_total = 0.0
         memory_free = 0.0
         gpu_name = ""
+        vendor = ""
         for gpu in gpus:
             memory_total += gpu.memoryTotal
             memory_free += gpu.memoryFree
             gpu_name = gpu.name
+            vendor = gpu.vendor
 
-        print("NVIDIA GPU Info: " + gpu_name)
+        print(f"{vendor} GPU Info: " + gpu_name)
         print("Available GPU memory: {:.1f} G / {:.1f}G".format(
             memory_free / 1024.0, memory_total / 1024.0))
 
+        device_count = len(gpus)
+        print("device_count = {}".format(device_count))
+
         import torch
 
         torch_is_available = torch.cuda.is_available()
         print("torch_is_available = {}".format(torch_is_available))
 
-        device_count = torch.cuda.device_count()
-        print("device_count = {}".format(device_count))
-
-        device_name = torch.cuda.get_device_name(0)
-        print("device_name = {}".format(device_name))
-
     except:
         print("No GPU devices")
 
@@ -108,4 +107,4 @@ def collect_env():
             print(f"You can not connect to {mqtt_url}.\n")
     except Exception as e:
         print(f"The connection exception: {traceback.format_exc()}")
-        pass
\ No newline at end of file
+        pass

From f9aaee926f31810697d060cc271f5fec5a9d8e5f Mon Sep 17 00:00:00 2001
From: Alay Shah <alay11shah@gmail.com>
Date: Sun, 12 May 2024 00:09:45 -0700
Subject: [PATCH 035/282] Nit

---
 .../fedml/computing/scheduler/comm_utils/hardware_utils.py  | 6 +++---
 python/fedml/computing/scheduler/comm_utils/sys_utils.py    | 4 ++--
 2 files changed, 5 insertions(+), 5 deletions(-)

diff --git a/python/fedml/computing/scheduler/comm_utils/hardware_utils.py b/python/fedml/computing/scheduler/comm_utils/hardware_utils.py
index 0ba8aa664d..1aeb5eb0be 100644
--- a/python/fedml/computing/scheduler/comm_utils/hardware_utils.py
+++ b/python/fedml/computing/scheduler/comm_utils/hardware_utils.py
@@ -34,14 +34,14 @@ def get_gpus() -> List[GPUCard]:
         return gpu_util.get_gpu_cards() if gpu_util is not None else []
 
     @staticmethod
-    def get_available_gpu_card_ids(order: str = "memory", limit: int = 1, max_load: float = 0.01,
-                                   max_memory: float = 0.01) -> List[int]:
+    def get_available_gpu_ids(order: str = "memory", limit: int = 1, max_load: float = 0.01,
+                              max_memory: float = 0.01) -> List[int]:
         gpu_util = HardwareUtil.__get_util()
         return gpu_util.get_available_gpu_card_ids(order, limit, max_load, max_memory) if gpu_util is not None else []
 
 
 if __name__ == "__main__":
     gpus = HardwareUtil.get_gpus()
-    get_available_gpu_cards = HardwareUtil.get_available_gpu_card_ids(limit=len(gpus))
+    get_available_gpu_cards = HardwareUtil.get_available_gpu_ids(limit=len(gpus))
     print(gpus)
     print(get_available_gpu_cards)
diff --git a/python/fedml/computing/scheduler/comm_utils/sys_utils.py b/python/fedml/computing/scheduler/comm_utils/sys_utils.py
index f1989fbe5a..aaa37bc4db 100644
--- a/python/fedml/computing/scheduler/comm_utils/sys_utils.py
+++ b/python/fedml/computing/scheduler/comm_utils/sys_utils.py
@@ -191,8 +191,8 @@ def get_available_gpu_id_list(limit=1) -> List[int]:
                 available_gpu_ids.append(count)
         return available_gpu_ids[0:simulation_gpu_count]
 
-    gpu_available_list = HardwareUtil.get_available_gpu_card_ids(order='memory', limit=limit, max_load=0.01,
-                                                                 max_memory=0.01)
+    gpu_available_list = HardwareUtil.get_available_gpu_ids(order='memory', limit=limit, max_load=0.01,
+                                                            max_memory=0.01)
     return gpu_available_list
 
 

From 7e547f4ea78d08202ae23ff19b9ba6a05fffe2f6 Mon Sep 17 00:00:00 2001
From: Alex <alex.gpt.llm@gmail.com>
Date: Sun, 12 May 2024 20:29:44 +0800
Subject: [PATCH 036/282] [CoreEngine] make the deployment and federated
 learning work.

---
 python/fedml/__init__.py                          | 10 +++++++---
 .../scheduler/comm_utils/container_utils.py       |  2 +-
 .../computing/scheduler/comm_utils/job_monitor.py | 10 +++-------
 .../scheduler/master/base_master_job_runner.py    | 15 +++++++++------
 .../model_scheduler/worker_job_runner.py          |  2 +-
 .../scheduler_core/scheduler_base_job_runner.py   | 15 +++++++++++----
 .../scheduler_core/status_manager_protocols.py    |  5 ++++-
 python/fedml/core/mlops/mlops_device_perfs.py     | 13 ++++++++-----
 8 files changed, 44 insertions(+), 28 deletions(-)

diff --git a/python/fedml/__init__.py b/python/fedml/__init__.py
index 92b72357a0..8044387b65 100644
--- a/python/fedml/__init__.py
+++ b/python/fedml/__init__.py
@@ -90,9 +90,13 @@ def init(args=None, check_env=True, should_init_logs=True):
     # Windows/Linux/MacOS compatability issues on multi-processing
     # https://github.com/pytorch/pytorch/issues/3492
     """
-    if multiprocessing.get_start_method() != "spawn":
-        # force all platforms (Windows/Linux/MacOS) to use the same way (spawn) for multiprocessing
-        multiprocessing.set_start_method("spawn", force=True)
+    if multiprocessing.get_start_method() != "fork":
+        # force all platforms (Windows/Linux/macOS) to use the same way (fork) for multiprocessing
+        multiprocessing.set_start_method("fork", force=True)
+
+    # if multiprocessing.get_start_method() != "spawn":
+    #     # force all platforms (Windows/Linux/MacOS) to use the same way (spawn) for multiprocessing
+    #     multiprocessing.set_start_method("spawn", force=True)
 
     """
     # https://stackoverflow.com/questions/53014306/error-15-initializing-libiomp5-dylib-but-found-libiomp5-dylib-already-initial
diff --git a/python/fedml/computing/scheduler/comm_utils/container_utils.py b/python/fedml/computing/scheduler/comm_utils/container_utils.py
index 9469c8b471..4cbf642a45 100644
--- a/python/fedml/computing/scheduler/comm_utils/container_utils.py
+++ b/python/fedml/computing/scheduler/comm_utils/container_utils.py
@@ -178,7 +178,7 @@ def get_container_rank_same_model(prefix: str):
         running_model_name = hash("model_endpoint_id_{}_name_{}_model_id_{}_name_{}_ver_{}")
         """
         try:
-            docker.from_env(timeout=5, version="auto")
+            client = docker.from_env(timeout=5, version="auto")
         except Exception:
             logging.error("Failed to connect to the docker daemon, please ensure that you have "
                           "installed Docker Desktop or Docker Engine, and the docker is running")
diff --git a/python/fedml/computing/scheduler/comm_utils/job_monitor.py b/python/fedml/computing/scheduler/comm_utils/job_monitor.py
index 84723d373a..9bee76e780 100644
--- a/python/fedml/computing/scheduler/comm_utils/job_monitor.py
+++ b/python/fedml/computing/scheduler/comm_utils/job_monitor.py
@@ -208,6 +208,8 @@ def monitor_replicas_number():
             endpoint_replicas_details = {}
             if isinstance(endpoint_detail, str):
                 endpoint_replicas_details = json.loads(endpoint_detail)
+                if isinstance(endpoint_replicas_details, str):
+                    endpoint_replicas_details = json.loads(endpoint_replicas_details)
 
             if "result" in endpoint_replicas_details:
                 endpoint_replica_details = {}
@@ -220,13 +222,7 @@ def monitor_replicas_number():
         for endpoint_id, num_replica in res_to_mlops.items():
             curr_version = fedml.get_env_version()
             num_replica_url_path = "fedmlModelServer/api/v1/endpoint/replica-info"
-            if curr_version == "release":
-                mlops_prefix = "https://open.fedml.ai/"
-            elif curr_version == "test":
-                mlops_prefix = "https://open-test.fedml.ai/"
-            else:
-                logging.error(f"Do not support the version {curr_version}.")
-                return
+            mlops_prefix = fedml._get_backend_service()
             url = f"{mlops_prefix}{num_replica_url_path}"
 
             cached_token = FedMLModelCache.get_instance().get_end_point_token_with_eid(endpoint_id)
diff --git a/python/fedml/computing/scheduler/master/base_master_job_runner.py b/python/fedml/computing/scheduler/master/base_master_job_runner.py
index ce0515160f..07c297c65d 100755
--- a/python/fedml/computing/scheduler/master/base_master_job_runner.py
+++ b/python/fedml/computing/scheduler/master/base_master_job_runner.py
@@ -221,10 +221,10 @@ def _process_run_logs_queue(self, run_logs_queue):
 
     def run_server_job(
             self, process_event, completed_event, edge_id_status_queue=None,
-            edge_device_info_queue=None, run_metrics_queue=None,
-            run_event_queue=None, run_artifacts_queue=None, run_logs_queue=None,
-            sender_message_queue=None, listener_message_queue=None,
-            edge_device_info_global_queue=None, status_center_queue=None
+            edge_device_info_queue=None, run_metrics_queue=None, run_event_queue=None,
+            run_artifacts_queue=None, run_logs_queue=None, edge_device_info_global_queue=None,
+            run_extend_queue_list=None, sender_message_center_queue=None, listener_message_queue=None,
+            status_center_queue=None
     ):
         print(f"Server runner process id {os.getpid()}, run id {self.run_id}")
 
@@ -239,10 +239,10 @@ def run_server_job(
         try:
             MLOpsUtils.set_ntp_offset(self.ntp_offset)
 
-            self.rebuild_message_status_center(sender_message_queue, listener_message_queue, status_center_queue)
+            self.rebuild_message_status_center(sender_message_center_queue, listener_message_queue, status_center_queue)
 
             self.run_server_job_impl(process_event, completed_event,
-                                     message_center_queue=sender_message_queue)
+                                     message_center_queue=sender_message_center_queue)
         except RunnerError:
             logging.info("Runner stopped.")
             self.status_reporter.report_server_id_status(
@@ -703,5 +703,8 @@ def should_process_async_cluster(self):
 
         return False, self.async_check_timeout
 
+    def get_client_id_list(self, server_edge_id_list):
+        return server_edge_id_list
+
 
 
diff --git a/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py b/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py
index d1cfd3b83c..ac9328592c 100755
--- a/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py
+++ b/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py
@@ -353,7 +353,7 @@ def run_impl(self, run_extend_queue_list, sender_message_center,
             return True
         elif op == "update" or op == "rollback":
             # Update is combine of delete and add
-            worker_ip = self.get_ip_address(self.request_json)
+            worker_ip = GeneralConstants.get_ip_address(self.request_json)
             for rank in replica_rank_to_update:
                 # Delete a replica (container) if exists
                 self.replica_handler.remove_replica(rank)
diff --git a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py
index 0b4d47d52c..f40b8ecfb6 100755
--- a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py
+++ b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py
@@ -112,7 +112,8 @@ def build_dynamic_constrain_variables(self, run_id, run_config):
 
         self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.RUN_ID}"] = run_id
         self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.PRIVATE_LOCAL_DATA}"] = private_data_dir.replace(" ", "")
-        self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.CLIENT_ID_LIST}"] = str(local_edge_id_list).replace(" ", "")
+        self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.CLIENT_ID_LIST}"] = \
+            str(self.get_client_id_list(server_edge_id_list)).replace(" ", "")
         self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.SYNTHETIC_DATA_URL}"] = synthetic_data_url.replace(" ", "")
         self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.IS_USING_LOCAL_DATA}"] = str(is_using_local_data)
         self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.CLIENT_NUM}"] = len(server_edge_id_list)
@@ -129,6 +130,11 @@ def build_dynamic_constrain_variables(self, run_id, run_config):
             "LOG_SERVER_URL"
         ]
 
+    def get_client_id_list(self, server_edge_id_list):
+        local_edge_id_list = list()
+        local_edge_id_list.append(int(self.edge_id))
+        return local_edge_id_list
+
     @staticmethod
     def unzip_file(zip_file, unzip_file_path) -> str:
         if zipfile.is_zipfile(zip_file):
@@ -497,11 +503,12 @@ def execute_job_task(self, unzip_package_path, entry_file_full_path, conf_file_f
         if job_yaml_default_none is None:
             # Generate the job executing commands for previous federated learning (Compatibility)
             python_program = get_python_program()
-            logging.info("Run the client: {} {} --cf {} --rank {} --role client".format(
-                python_program, entry_file_full_path, conf_file_full_path, str(dynamic_args_config.get("rank", 1))))
             rank = str(dynamic_args_config.get("rank", 1))
+            role = "server" if rank == "0" else "client"
+            logging.info(f"Run the {role}: {python_program} {entry_file_full_path} --cf {conf_file_full_path} "
+                         f"--rank {rank} --role {role}")
             entry_command = f"{python_program} {entry_file_full_path} --cf " \
-                            f"{conf_file_full_path} --rank {rank} --role client"
+                            f"{conf_file_full_path} --rank {rank} --role {role}"
             shell_cmd_list = [entry_command]
 
             # Run the job executing commands for previous federated learning (Compatibility)
diff --git a/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py b/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py
index 871b9026bf..811ff2a2d5 100755
--- a/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py
+++ b/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py
@@ -182,10 +182,12 @@ def process_device_status(self, run_id, edge_id, status):
         server_id = edge_id_status_dict.get("server", 0)
         enable_fault_tolerance, fault_tolerance_rate = self.parse_fault_tolerance_params(run_id)
         running_edges_list = list()
+        edge_nums = 0
         for edge_id_item, status_item in edge_id_status_dict.items():
             if edge_id_item == "server":
                 continue
 
+            edge_nums += 1
             if status_item is None or status_item == ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED or \
                     status_item == ServerConstants.MSG_MLOPS_SERVER_STATUS_EXCEPTION:
                 number_of_failed_edges += 1
@@ -210,7 +212,8 @@ def process_device_status(self, run_id, edge_id, status):
         self.message_reporter.report_client_training_status(edge_id, consensus_status, run_id=run_id, update_db=False)
 
         # Report server status based on the fault tolerance model and parameters
-        edge_nums = len(edge_id_status_dict.keys()) - 1
+        if edge_nums <= 0:
+            return
         status_to_report = self.calculate_server_status(
             run_id, edge_nums, number_of_failed_edges, number_of_finished_edges, number_of_killed_edges,
             running_edges_list, enable_fault_tolerance=enable_fault_tolerance,
diff --git a/python/fedml/core/mlops/mlops_device_perfs.py b/python/fedml/core/mlops/mlops_device_perfs.py
index b258692645..a0e1e972b6 100644
--- a/python/fedml/core/mlops/mlops_device_perfs.py
+++ b/python/fedml/core/mlops/mlops_device_perfs.py
@@ -166,11 +166,14 @@ def report_device_realtime_stats_entry(self, sys_event, role, is_client=False):
         sleep_time_interval_for_server_monitor = 60
 
         while not self.should_stop_device_realtime_stats():
-            if role == ROLE_DEVICE_INFO_REPORTER:
-                time.sleep(sleep_time_interval_for_device_info)
-            elif role == ROLE_DEVICE_JOB_TOTAL_MONITOR:
-                time.sleep(sleep_time_interval_for_client_monitor if is_client
-                           else sleep_time_interval_for_server_monitor)
+            if self.enable_job_total_monitor:
+                if role == ROLE_DEVICE_INFO_REPORTER:
+                    time.sleep(sleep_time_interval_for_device_info)
+                elif role == ROLE_DEVICE_JOB_TOTAL_MONITOR:
+                    time.sleep(sleep_time_interval_for_client_monitor if is_client
+                               else sleep_time_interval_for_server_monitor)
+            else:
+                time.sleep(time_interval_map[role])
 
             try:
                 if role == ROLE_DEVICE_INFO_REPORTER:

From d1e4fbeaad73a252d4118eceeab9d3414db948db Mon Sep 17 00:00:00 2001
From: Alex <alex.gpt.llm@gmail.com>
Date: Sun, 12 May 2024 20:58:48 +0800
Subject: [PATCH 037/282] [CoreEngine] change the text of package downloading.

---
 .../scheduler/scheduler_core/scheduler_base_job_runner.py       | 2 +-
 python/fedml/core/mlops/mlops_device_perfs.py                   | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py
index f40b8ecfb6..054efe437a 100755
--- a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py
+++ b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py
@@ -187,7 +187,7 @@ def download_package_proc(self, package_url, local_package_file):
                 # Write the chunk to the file
                 written_size = f.write(chunk)
                 total_size += written_size
-                logging.info(f"package downloaded size {total_size/1024} KB")
+                logging.info("package downloaded size %.2f KB", total_size/1024)
                 self.download_time = time.time()
         self.download_finished = True
 
diff --git a/python/fedml/core/mlops/mlops_device_perfs.py b/python/fedml/core/mlops/mlops_device_perfs.py
index a0e1e972b6..29183a6e78 100644
--- a/python/fedml/core/mlops/mlops_device_perfs.py
+++ b/python/fedml/core/mlops/mlops_device_perfs.py
@@ -157,7 +157,7 @@ def report_device_realtime_stats_entry(self, sys_event, role, is_client=False):
         }
 
         job_monitor_obj = None
-        if role == ROLE_AUTO_SCALER:
+        if role == ROLE_AUTO_SCALER or role == ROLE_DEVICE_JOB_TOTAL_MONITOR:
             # job_monitor Should be initialized once
             job_monitor_obj = JobMonitor.get_instance()
 

From 4f65467df09602d2ae67b1d89f7d41d5457c8fa4 Mon Sep 17 00:00:00 2001
From: Alex <alex.gpt.llm@gmail.com>
Date: Sun, 12 May 2024 22:43:44 +0800
Subject: [PATCH 038/282] [CoreEngine] pass the job type when release gpu ids,
 set the message center name for deployments.

---
 .../fedml/computing/scheduler/comm_utils/job_utils.py  |  3 +++
 .../model_scheduler/master_protocol_manager.py         |  2 ++
 .../model_scheduler/worker_protocol_manager.py         |  2 ++
 .../scheduler_core/scheduler_base_job_runner.py        | 10 +---------
 .../computing/scheduler/slave/base_slave_job_runner.py |  4 +++-
 .../scheduler/slave/base_slave_job_runner_manager.py   |  4 ++--
 .../scheduler/slave/base_slave_protocol_manager.py     |  5 +++--
 7 files changed, 16 insertions(+), 14 deletions(-)

diff --git a/python/fedml/computing/scheduler/comm_utils/job_utils.py b/python/fedml/computing/scheduler/comm_utils/job_utils.py
index ece165e92c..bc5985533b 100644
--- a/python/fedml/computing/scheduler/comm_utils/job_utils.py
+++ b/python/fedml/computing/scheduler/comm_utils/job_utils.py
@@ -728,6 +728,9 @@ def parse_job_type(running_json):
         job_type = job_yaml.get("job_type", None)
         job_type = job_yaml.get("task_type",
                                 SchedulerConstants.JOB_TASK_TYPE_TRAIN) if job_type is None else job_type
+        model_config = running_json_obj.get("model_config", None)
+        if model_config is not None:
+            job_type = SchedulerConstants.JOB_TASK_TYPE_DEPLOY
         return job_type
 
     @staticmethod
diff --git a/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py b/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py
index 144d17fd02..a5f2a37dfe 100755
--- a/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py
+++ b/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py
@@ -17,6 +17,8 @@ class FedMLDeployMasterProtocolManager(FedMLBaseMasterProtocolManager):
     def __init__(self, args, agent_config=None):
         FedMLBaseMasterProtocolManager.__init__(self, args, agent_config=agent_config)
 
+        self.message_center_name = "deploy_master_agent"
+
         self.topic_start_deployment = None
         self.topic_activate_endpoint = None
         self.topic_deactivate_deployment = None
diff --git a/python/fedml/computing/scheduler/model_scheduler/worker_protocol_manager.py b/python/fedml/computing/scheduler/model_scheduler/worker_protocol_manager.py
index 5f4835d9aa..3a0f835b6c 100755
--- a/python/fedml/computing/scheduler/model_scheduler/worker_protocol_manager.py
+++ b/python/fedml/computing/scheduler/model_scheduler/worker_protocol_manager.py
@@ -23,6 +23,8 @@ class FedMLDeployWorkerProtocolManager(FedMLBaseSlaveProtocolManager):
     def __init__(self, args, agent_config=None):
         FedMLBaseSlaveProtocolManager.__init__(self, args, agent_config=agent_config)
 
+        self.message_center_name = "deploy_slave_agent"
+
         self.topic_start_deployment = None
         self.topic_delete_deployment = None
 
diff --git a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py
index 054efe437a..03d3fd5d92 100755
--- a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py
+++ b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py
@@ -584,15 +584,7 @@ def start_runner_process(
         return None
 
     @staticmethod
-    def cleanup_containers_and_release_gpus(run_id, edge_id):
-        job_type = JobRunnerUtils.get_job_type_from_run_id(run_id)
-
-        if not job_type:
-            logging.info(f"Failed to get job type from run id {run_id}. This is not an error as it would usually "
-                         f"happen when the job is not found in the database because job is already finished and "
-                         f"cleaned up. Exiting cleanup_containers_and_release_gpus.")
-            return
-
+    def cleanup_containers_and_release_gpus(run_id, edge_id, job_type=SchedulerConstants.JOB_TASK_TYPE_TRAIN):
         # Check if the job type is not "serve" or "deploy"
         if not (job_type == SchedulerConstants.JOB_TASK_TYPE_SERVE or
                 job_type == SchedulerConstants.JOB_TASK_TYPE_DEPLOY):
diff --git a/python/fedml/computing/scheduler/slave/base_slave_job_runner.py b/python/fedml/computing/scheduler/slave/base_slave_job_runner.py
index de2956ad94..cc7c3c222b 100755
--- a/python/fedml/computing/scheduler/slave/base_slave_job_runner.py
+++ b/python/fedml/computing/scheduler/slave/base_slave_job_runner.py
@@ -15,6 +15,7 @@
 from multiprocessing import Process
 from ..scheduler_core.scheduler_base_job_runner import FedMLSchedulerBaseJobRunner, RunnerError, RunnerCompletedError
 from ..scheduler_core.general_constants import GeneralConstants
+from ..comm_utils.job_utils import JobRunnerUtils
 
 
 class FedMLBaseSlaveJobRunner(FedMLSchedulerBaseJobRunner, ABC):
@@ -78,7 +79,8 @@ def run(self, process_event, completed_event,  run_extend_queue_list,
                                                                   self.computing_started_time, computing_ended_time,
                                                                   self.args.account_id, self.args.api_key)
             logging.info("Release resources.")
-            FedMLSchedulerBaseJobRunner.cleanup_containers_and_release_gpus(self.run_id, self.edge_id)
+            job_type = JobRunnerUtils.parse_job_type(self.request_json)
+            FedMLSchedulerBaseJobRunner.cleanup_containers_and_release_gpus(self.run_id, self.edge_id, job_type)
             MLOpsRuntimeLogDaemon.get_instance(self.args).stop_log_processor(self.run_id, self.edge_id)
             if self.mlops_metrics is not None:
                 self.mlops_metrics.stop_sys_perf()
diff --git a/python/fedml/computing/scheduler/slave/base_slave_job_runner_manager.py b/python/fedml/computing/scheduler/slave/base_slave_job_runner_manager.py
index c058d5dd0e..80e486224e 100755
--- a/python/fedml/computing/scheduler/slave/base_slave_job_runner_manager.py
+++ b/python/fedml/computing/scheduler/slave/base_slave_job_runner_manager.py
@@ -8,5 +8,5 @@ class FedMLBaseSlaveJobRunnerManager(FedMLSchedulerBaseJobRunnerManager, ABC):
     def __init__(self):
         FedMLSchedulerBaseJobRunnerManager.__init__(self)
 
-    def cleanup_containers_and_release_gpus(self, run_id, edge_id):
-        FedMLSchedulerBaseJobRunner.cleanup_containers_and_release_gpus(run_id, edge_id)
+    def cleanup_containers_and_release_gpus(self, run_id, edge_id, job_type):
+        FedMLSchedulerBaseJobRunner.cleanup_containers_and_release_gpus(run_id, edge_id, job_type)
diff --git a/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py b/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py
index b3cd154d23..fc67ec2ece 100755
--- a/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py
+++ b/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py
@@ -301,7 +301,8 @@ def callback_stop_train(self, topic, payload):
         # logging.info("Stop run with multiprocessing...")
         # Stop client with multiprocessing mode
         run_id_str = str(run_id)
-        self._get_job_runner_manager().cleanup_containers_and_release_gpus(run_id, edge_id)
+        self._get_job_runner_manager().cleanup_containers_and_release_gpus(
+            run_id, edge_id, SchedulerConstants.JOB_TASK_TYPE_TRAIN)
         self.sync_run_stop_status(run_status=run_status)
 
         # Register the job stopping message into the status center
@@ -512,7 +513,7 @@ def process_status(self, run_id, status, edge_id):
                 job_type = JobRunnerUtils.parse_job_type(running_json)
                 if not SchedulerConstants.is_deploy_job(job_type):
                     logging.info(f"[run/device][{run_id}/{edge_id}] Release gpu resource when run ended.")
-                    self._get_job_runner_manager().cleanup_containers_and_release_gpus(run_id, edge_id)
+                    self._get_job_runner_manager().cleanup_containers_and_release_gpus(run_id, edge_id, job_type)
 
             # Stop the runner process
             run_process = self._get_job_runner_manager().get_runner_process(run_id)

From 906e8b01de21988a358687e31b4854a73885e9b7 Mon Sep 17 00:00:00 2001
From: alaydshah <alay11shah@gmail.com>
Date: Sun, 12 May 2024 17:43:57 +0000
Subject: [PATCH 039/282] Minor Bug

---
 .../computing/scheduler/comm_utils/gpu_utils/nvidia_utils.py  | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/python/fedml/computing/scheduler/comm_utils/gpu_utils/nvidia_utils.py b/python/fedml/computing/scheduler/comm_utils/gpu_utils/nvidia_utils.py
index 58c3888e68..14d8230d06 100644
--- a/python/fedml/computing/scheduler/comm_utils/gpu_utils/nvidia_utils.py
+++ b/python/fedml/computing/scheduler/comm_utils/gpu_utils/nvidia_utils.py
@@ -20,8 +20,8 @@ def get_gpu_cards() -> List[GPUCard]:
         return [NvidiaGPUtil.__convert(gpu) for gpu in GPUtil.getGPUs()]
 
     @staticmethod
-    def get_available_gpu_card_ids(order: str, limit: int, maxLoad: float, maxMemory: float) -> List[int]:
-        return GPUtil.getAvailable(order='memory', limit=limit, maxLoad=0.01, maxMemory=0.01)
+    def get_available_gpu_card_ids(order: str, limit: int, max_load: float, max_memory: float) -> List[int]:
+        return GPUtil.getAvailable(order=order, limit=limit, maxLoad=max_load, maxMemory=max_memory)
 
     @staticmethod
     def __convert(gpu: GPU) -> GPUCard:

From 3a5daf65ac8908cdbc94b8cb9fb0e4bc8b7bb5ca Mon Sep 17 00:00:00 2001
From: alaydshah <alay11shah@gmail.com>
Date: Sun, 12 May 2024 19:37:11 +0000
Subject: [PATCH 040/282] Add Hardware specific docker device mapping

---
 .../scheduler/comm_utils/gpu_utils/gpu_utils.py       |  7 ++++++-
 .../scheduler/comm_utils/gpu_utils/nvidia_utils.py    | 11 ++++++++++-
 .../scheduler/comm_utils/gpu_utils/qualcomm_utils.py  |  9 ++++++++-
 .../computing/scheduler/comm_utils/hardware_utils.py  |  7 ++++++-
 4 files changed, 30 insertions(+), 4 deletions(-)

diff --git a/python/fedml/computing/scheduler/comm_utils/gpu_utils/gpu_utils.py b/python/fedml/computing/scheduler/comm_utils/gpu_utils/gpu_utils.py
index 3007bc07bc..c7ce91f694 100644
--- a/python/fedml/computing/scheduler/comm_utils/gpu_utils/gpu_utils.py
+++ b/python/fedml/computing/scheduler/comm_utils/gpu_utils/gpu_utils.py
@@ -1,7 +1,7 @@
 from abc import ABC, abstractmethod
 from dataclasses import dataclass
 from enum import Enum, auto
-from typing import Optional, List
+from typing import Optional, List, Dict
 
 
 class GPUCardType(Enum):
@@ -47,3 +47,8 @@ def get_available_gpu_card_ids(order: str, limit: int, max_load: float, max_memo
     @abstractmethod
     def get_gpu_cards() -> List[GPUCard]:
         raise NotImplementedError
+
+    @staticmethod
+    @abstractmethod
+    def get_docker_gpu_device_mapping(gpu_ids: List[int]) -> Optional[Dict]:
+        raise NotImplementedError
diff --git a/python/fedml/computing/scheduler/comm_utils/gpu_utils/nvidia_utils.py b/python/fedml/computing/scheduler/comm_utils/gpu_utils/nvidia_utils.py
index 14d8230d06..f0da4f8fb4 100644
--- a/python/fedml/computing/scheduler/comm_utils/gpu_utils/nvidia_utils.py
+++ b/python/fedml/computing/scheduler/comm_utils/gpu_utils/nvidia_utils.py
@@ -1,12 +1,15 @@
 import subprocess
-from typing import List, Optional
+from typing import List, Optional, Dict
 
+import docker
+from docker import types
 from GPUtil import GPUtil, GPU
 
 from fedml.computing.scheduler.comm_utils.gpu_utils.gpu_utils import GPUCard, GPUCardUtil, GPUCardType
 
 
 class NvidiaGPUtil(GPUCardUtil):
+
     @classmethod
     def detect_gpu_card_type(cls) -> Optional[GPUCardType]:
         try:
@@ -23,6 +26,12 @@ def get_gpu_cards() -> List[GPUCard]:
     def get_available_gpu_card_ids(order: str, limit: int, max_load: float, max_memory: float) -> List[int]:
         return GPUtil.getAvailable(order=order, limit=limit, maxLoad=max_load, maxMemory=max_memory)
 
+    @staticmethod
+    def get_docker_gpu_device_mapping(gpu_ids: List[int]) -> Optional[Dict]:
+        if gpu_ids and len(gpu_ids):
+            return {"device_requests": [docker.types.DeviceRequest(device_ids=gpu_ids, capabilities=[["gpu"]])]}
+        return None
+
     @staticmethod
     def __convert(gpu: GPU) -> GPUCard:
         return GPUCard(
diff --git a/python/fedml/computing/scheduler/comm_utils/gpu_utils/qualcomm_utils.py b/python/fedml/computing/scheduler/comm_utils/gpu_utils/qualcomm_utils.py
index ca55fdab7c..c25c34dcaa 100644
--- a/python/fedml/computing/scheduler/comm_utils/gpu_utils/qualcomm_utils.py
+++ b/python/fedml/computing/scheduler/comm_utils/gpu_utils/qualcomm_utils.py
@@ -2,12 +2,13 @@
 import math
 import subprocess
 import sys
-from typing import List, Optional
+from typing import List, Optional, Dict
 
 from fedml.computing.scheduler.comm_utils.gpu_utils.gpu_utils import GPUCard, GPUCardUtil, GPUCardType
 
 
 class QualcommNPUtil(GPUCardUtil):
+
     def __init__(self):
         sys.path.append("/opt/qti-aic/dev/lib/x86_64/")
 
@@ -49,6 +50,12 @@ def get_available_gpu_card_ids(order: str, limit: int, max_load: float, max_memo
         gpu_cards = gpu_cards[0:min(limit, len(gpu_cards))]
         return list(map(lambda card: card.id, gpu_cards))
 
+    @staticmethod
+    def get_docker_gpu_device_mapping(gpu_ids: List[int]) -> Optional[Dict]:
+        if gpu_ids and len(gpu_ids):
+            return {"devices": [f"/dev/accel/accel{gpu_id}:/dev/accel/accel{gpu_id}" for gpu_id in gpu_ids]}
+        return None
+
     @staticmethod
     def __convert(npu) -> GPUCard:
         # TODO (alaydshah): Add support for load, memoryUtil, temperature
diff --git a/python/fedml/computing/scheduler/comm_utils/hardware_utils.py b/python/fedml/computing/scheduler/comm_utils/hardware_utils.py
index 1aeb5eb0be..140f316554 100644
--- a/python/fedml/computing/scheduler/comm_utils/hardware_utils.py
+++ b/python/fedml/computing/scheduler/comm_utils/hardware_utils.py
@@ -1,5 +1,5 @@
 import logging
-from typing import Optional, List
+from typing import Optional, List, Dict
 
 from fedml.computing.scheduler.comm_utils.gpu_utils.gpu_utils import GPUCardUtil, GPUCard
 from fedml.computing.scheduler.comm_utils.gpu_utils.nvidia_utils import NvidiaGPUtil
@@ -39,6 +39,11 @@ def get_available_gpu_ids(order: str = "memory", limit: int = 1, max_load: float
         gpu_util = HardwareUtil.__get_util()
         return gpu_util.get_available_gpu_card_ids(order, limit, max_load, max_memory) if gpu_util is not None else []
 
+    @staticmethod
+    def get_docker_gpu_device_mapping(gpu_ids: List[int]) -> Optional[Dict]:
+        gpu_util = HardwareUtil.__get_util()
+        return gpu_util.get_docker_gpu_device_mapping(gpu_ids)
+
 
 if __name__ == "__main__":
     gpus = HardwareUtil.get_gpus()

From e57af1eaaa49490a4d28c802a844ca33cf6e42cd Mon Sep 17 00:00:00 2001
From: alaydshah <alay11shah@gmail.com>
Date: Sun, 12 May 2024 19:51:36 +0000
Subject: [PATCH 041/282] Bug Fix

---
 .../computing/scheduler/comm_utils/gpu_utils/nvidia_utils.py   | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/python/fedml/computing/scheduler/comm_utils/gpu_utils/nvidia_utils.py b/python/fedml/computing/scheduler/comm_utils/gpu_utils/nvidia_utils.py
index f0da4f8fb4..79071bf935 100644
--- a/python/fedml/computing/scheduler/comm_utils/gpu_utils/nvidia_utils.py
+++ b/python/fedml/computing/scheduler/comm_utils/gpu_utils/nvidia_utils.py
@@ -29,7 +29,8 @@ def get_available_gpu_card_ids(order: str, limit: int, max_load: float, max_memo
     @staticmethod
     def get_docker_gpu_device_mapping(gpu_ids: List[int]) -> Optional[Dict]:
         if gpu_ids and len(gpu_ids):
-            return {"device_requests": [docker.types.DeviceRequest(device_ids=gpu_ids, capabilities=[["gpu"]])]}
+            gpu_id_list = list(map(lambda x: str(x), gpu_ids))
+            return {"device_requests": [docker.types.DeviceRequest(device_ids=gpu_id_list, capabilities=[["gpu"]])]}
         return None
 
     @staticmethod

From e70f56f7b0449e9be8ae59262a1a47bb8eb2ab02 Mon Sep 17 00:00:00 2001
From: alaydshah <alay11shah@gmail.com>
Date: Sun, 12 May 2024 22:10:56 +0000
Subject: [PATCH 042/282] Add util function to get gpu_ids from container name

---
 .../comm_utils/gpu_utils/gpu_utils.py         |  7 +++
 .../comm_utils/gpu_utils/nvidia_utils.py      | 13 ++++-
 .../comm_utils/gpu_utils/qualcomm_utils.py    | 55 ++++++++++++++++++-
 .../scheduler/comm_utils/hardware_utils.py    | 14 ++++-
 4 files changed, 86 insertions(+), 3 deletions(-)

diff --git a/python/fedml/computing/scheduler/comm_utils/gpu_utils/gpu_utils.py b/python/fedml/computing/scheduler/comm_utils/gpu_utils/gpu_utils.py
index c7ce91f694..292bcb3624 100644
--- a/python/fedml/computing/scheduler/comm_utils/gpu_utils/gpu_utils.py
+++ b/python/fedml/computing/scheduler/comm_utils/gpu_utils/gpu_utils.py
@@ -3,6 +3,8 @@
 from enum import Enum, auto
 from typing import Optional, List, Dict
 
+from docker import DockerClient
+
 
 class GPUCardType(Enum):
     NVIDIA = auto()
@@ -52,3 +54,8 @@ def get_gpu_cards() -> List[GPUCard]:
     @abstractmethod
     def get_docker_gpu_device_mapping(gpu_ids: List[int]) -> Optional[Dict]:
         raise NotImplementedError
+
+    @staticmethod
+    @abstractmethod
+    def get_docker_gpu_ids_by_container_name(container_name: str, docker_client: DockerClient) -> List[int]:
+        raise NotImplementedError
diff --git a/python/fedml/computing/scheduler/comm_utils/gpu_utils/nvidia_utils.py b/python/fedml/computing/scheduler/comm_utils/gpu_utils/nvidia_utils.py
index 79071bf935..0c05b25644 100644
--- a/python/fedml/computing/scheduler/comm_utils/gpu_utils/nvidia_utils.py
+++ b/python/fedml/computing/scheduler/comm_utils/gpu_utils/nvidia_utils.py
@@ -1,8 +1,9 @@
+import logging
 import subprocess
 from typing import List, Optional, Dict
 
 import docker
-from docker import types
+from docker import types, DockerClient
 from GPUtil import GPUtil, GPU
 
 from fedml.computing.scheduler.comm_utils.gpu_utils.gpu_utils import GPUCard, GPUCardUtil, GPUCardType
@@ -33,6 +34,16 @@ def get_docker_gpu_device_mapping(gpu_ids: List[int]) -> Optional[Dict]:
             return {"device_requests": [docker.types.DeviceRequest(device_ids=gpu_id_list, capabilities=[["gpu"]])]}
         return None
 
+    @staticmethod
+    def get_docker_gpu_ids_by_container_name(container_name: str, docker_client: DockerClient) -> List[int]:
+        try:
+            gpu_ids = docker_client.api.inspect_container(container_name)["HostConfig"]["DeviceRequests"][0]["DeviceIDs"]
+            return list(map(int, gpu_ids))
+        except Exception as e:
+            logging.error(f"Failed to get GPU IDs: {e}")
+            pass
+        return []
+
     @staticmethod
     def __convert(gpu: GPU) -> GPUCard:
         return GPUCard(
diff --git a/python/fedml/computing/scheduler/comm_utils/gpu_utils/qualcomm_utils.py b/python/fedml/computing/scheduler/comm_utils/gpu_utils/qualcomm_utils.py
index c25c34dcaa..5f0eb3b5c6 100644
--- a/python/fedml/computing/scheduler/comm_utils/gpu_utils/qualcomm_utils.py
+++ b/python/fedml/computing/scheduler/comm_utils/gpu_utils/qualcomm_utils.py
@@ -1,13 +1,17 @@
 import logging
 import math
+import re
 import subprocess
 import sys
 from typing import List, Optional, Dict
 
+from docker import DockerClient
+
 from fedml.computing.scheduler.comm_utils.gpu_utils.gpu_utils import GPUCard, GPUCardUtil, GPUCardType
 
 
 class QualcommNPUtil(GPUCardUtil):
+    NPU_CARD_PATH = "/dev/accel/accel"
 
     def __init__(self):
         sys.path.append("/opt/qti-aic/dev/lib/x86_64/")
@@ -53,9 +57,22 @@ def get_available_gpu_card_ids(order: str, limit: int, max_load: float, max_memo
     @staticmethod
     def get_docker_gpu_device_mapping(gpu_ids: List[int]) -> Optional[Dict]:
         if gpu_ids and len(gpu_ids):
-            return {"devices": [f"/dev/accel/accel{gpu_id}:/dev/accel/accel{gpu_id}" for gpu_id in gpu_ids]}
+            return {
+                "devices": [f"{QualcommNPUtil.NPU_CARD_PATH}{gpu_id}:{QualcommNPUtil.NPU_CARD_PATH}{gpu_id}" for gpu_id
+                            in gpu_ids]}
         return None
 
+    @staticmethod
+    def get_docker_gpu_ids_by_container_name(container_name: str, docker_client: DockerClient) -> List[int]:
+        gpu_ids = []
+        try:
+            docker_inspect_info = docker_client.api.inspect_container(container_name)
+            gpu_ids = QualcommNPUtil.__parse_gpu_ids(docker_inspect_info.get("HostConfig", {}))
+        except Exception as e:
+            logging.error(f"Failed to get GPU IDs: {e}")
+            pass
+        return gpu_ids
+
     @staticmethod
     def __convert(npu) -> GPUCard:
         # TODO (alaydshah): Add support for load, memoryUtil, temperature
@@ -75,3 +92,39 @@ def __convert(npu) -> GPUCard:
             memoryUsed=memory_used,
             memoryUtil=memory_utilized,
         )
+
+    @staticmethod
+    def __parse_gpu_ids(host_config: dict) -> List[int]:
+        devices = host_config.get('Devices', [])
+        gpu_ids = []
+        for device in devices:
+            gpu_id = QualcommNPUtil.__extract_integer_from_host_path(device.get('PathOnHost', None))
+
+            # Check explicitly if gpu_id is not None, as gpu_id can be 0, which is a valid value to include.
+            if gpu_id is not None:
+                gpu_ids.append(gpu_id)
+        return gpu_ids
+
+    @staticmethod
+    def __extract_integer_from_host_path(host_path: str) -> Optional[int]:
+        if not host_path:
+            logging.error("Host Path is None; GPU Id extraction Failed")
+            return None
+
+        npu_card_path = QualcommNPUtil.NPU_CARD_PATH
+
+        # Check if host_path starts with npu_card_path
+        if host_path.startswith(npu_card_path):
+
+            # Extract the numeric suffix from the host path
+            suffix = host_path[len(npu_card_path):]  # Get the substring after npu_card_path
+            match = re.match(r'^(\d+)', suffix)  # Use regex to match the leading integer
+            if match:
+                return int(match.group(1))  # Return the extracted integer
+            else:
+                logging.error(f"Failed to extract GPU id from Host Path {host_path}")
+        else:
+            logging.error(f"Host Path {host_path} doesn't start with NPU Card Path {npu_card_path}")
+
+        # Return None if extraction fails
+        return None
diff --git a/python/fedml/computing/scheduler/comm_utils/hardware_utils.py b/python/fedml/computing/scheduler/comm_utils/hardware_utils.py
index 140f316554..a0d27fd7db 100644
--- a/python/fedml/computing/scheduler/comm_utils/hardware_utils.py
+++ b/python/fedml/computing/scheduler/comm_utils/hardware_utils.py
@@ -1,6 +1,8 @@
 import logging
 from typing import Optional, List, Dict
 
+from docker import DockerClient
+
 from fedml.computing.scheduler.comm_utils.gpu_utils.gpu_utils import GPUCardUtil, GPUCard
 from fedml.computing.scheduler.comm_utils.gpu_utils.nvidia_utils import NvidiaGPUtil
 from fedml.computing.scheduler.comm_utils.gpu_utils.qualcomm_utils import QualcommNPUtil
@@ -42,7 +44,17 @@ def get_available_gpu_ids(order: str = "memory", limit: int = 1, max_load: float
     @staticmethod
     def get_docker_gpu_device_mapping(gpu_ids: List[int]) -> Optional[Dict]:
         gpu_util = HardwareUtil.__get_util()
-        return gpu_util.get_docker_gpu_device_mapping(gpu_ids)
+        if gpu_util is not None:
+            return gpu_util.get_docker_gpu_device_mapping(gpu_ids)
+        return None
+
+    @staticmethod
+    def get_docker_gpu_ids_by_container_name(container_name: str, docker_client: DockerClient) -> List[int]:
+        gpu_ids = []
+        gpu_util = HardwareUtil.__get_util()
+        if gpu_util is not None:
+            gpu_ids = gpu_util.get_docker_gpu_ids_by_container_name(container_name, docker_client)
+        return gpu_ids
 
 
 if __name__ == "__main__":

From 33da11e4024b617085b497e2fbc7a5a339fe4d45 Mon Sep 17 00:00:00 2001
From: Alay Shah <alay11shah@gmail.com>
Date: Sun, 12 May 2024 15:24:26 -0700
Subject: [PATCH 043/282] Make gpu stats fetching hardware agnostic

---
 .../scheduler/comm_utils/container_utils.py   | 40 +++++++------------
 1 file changed, 14 insertions(+), 26 deletions(-)

diff --git a/python/fedml/computing/scheduler/comm_utils/container_utils.py b/python/fedml/computing/scheduler/comm_utils/container_utils.py
index 4e09315b78..3d076c0ffe 100644
--- a/python/fedml/computing/scheduler/comm_utils/container_utils.py
+++ b/python/fedml/computing/scheduler/comm_utils/container_utils.py
@@ -320,7 +320,7 @@ def get_container_perf(self, c_name) -> ContainerMetrics:
             round(blk_read_bytes / (1024 * 1024), 1), round(blk_write_bytes / (1024 * 1024), 1))
 
         # Calculate the gpu usage
-        gpus_stat = self.generate_container_gpu_stats(c_name)
+        gpus_stat = self.generate_container_gpu_stats(container_name=c_name)
 
         # Record timestamp
         timestamp = stats["read"]
@@ -328,39 +328,27 @@ def get_container_perf(self, c_name) -> ContainerMetrics:
         return ContainerUtils.ContainerMetrics(cpu_percent, mem_gb_used, mem_gb_avail, recv_megabytes, sent_megabytes,
                                                blk_read_bytes, blk_write_bytes, timestamp, gpus_stat)
 
-    def generate_container_gpu_stats(self, c_name):
-        gpu_ids = self.get_gpu_ids_by_container_name(c_name)
+    def generate_container_gpu_stats(self, container_name):
+        client = self.get_docker_client()
+        gpu_ids = HardwareUtil.get_docker_gpu_ids_by_container_name(container_name=container_name, docker_client=client)
         gpu_stats = self.gpu_stats(gpu_ids)
         return gpu_stats
 
-    def get_gpu_ids_by_container_name(self, c_name):
-        client = self.get_docker_client()
-        gpu_ids = []
-        try:
-            gpu_ids = client.api.inspect_container(c_name)["HostConfig"]["DeviceRequests"][0]["DeviceIDs"]
-            gpu_ids = list(map(int, gpu_ids))
-        except Exception as e:
-            logging.error(f"Failed to get GPU IDs: {e}")
-            pass
-
-        return gpu_ids
-
     @staticmethod
     def gpu_stats(gpu_ids):
         utilz, memory, temp = None, None, None
         gpu_stats_map = {}  # gpu_id: int -> {"gpu_utilization", "gpu_memory_allocated", "gpu_temp"}
+        gpu_ids = set(gpu_ids)
         try:
-            gpus = HardwareUtil.get_gpus()
-
-            for i in gpu_ids:
-                gpu = gpus[i]
-                gpu_stats_map[i] = {
-                    "gpu_utilization": gpu.load*100,
-                    "gpu_memory_allocated": gpu.memoryUtil*100,
-                    "gpu_temp": gpu.temperature,
-                    # "gpu_power_usage": pynvml.nvmlDeviceGetPowerUsage(handle) / 1000,   # in watts
-                    # "gpu_time_spent_accessing_memory": utilz.memory   # in ms
-                }
+            for gpu in HardwareUtil.get_gpus():
+                if gpu.id in gpu_ids:
+                    gpu_stats_map[gpu.id] = {
+                        "gpu_utilization": gpu.load * 100,
+                        "gpu_memory_allocated": gpu.memoryUsed / gpu.memoryTotal * 100,
+                        "gpu_temp": gpu.temperature,
+                        # "gpu_power_usage": pynvml.nvmlDeviceGetPowerUsage(handle) / 1000,   # in watts
+                        # "gpu_time_spent_accessing_memory": utilz.memory   # in ms
+                    }
         except Exception as e:
             logging.error(f"Failed to get GPU stats: {e}")
 

From 2d458a8342d7f545fe0991d0d6bb0bf2293a7cc8 Mon Sep 17 00:00:00 2001
From: Alay Shah <alay11shah@gmail.com>
Date: Sun, 12 May 2024 15:27:11 -0700
Subject: [PATCH 044/282] Nits

---
 .../fedml/computing/scheduler/comm_utils/container_utils.py   | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/python/fedml/computing/scheduler/comm_utils/container_utils.py b/python/fedml/computing/scheduler/comm_utils/container_utils.py
index 3d076c0ffe..c7645104c0 100644
--- a/python/fedml/computing/scheduler/comm_utils/container_utils.py
+++ b/python/fedml/computing/scheduler/comm_utils/container_utils.py
@@ -2,6 +2,8 @@
 import os
 import traceback
 import datetime
+from typing import List
+
 from dateutil.parser import isoparse
 
 import docker
@@ -335,7 +337,7 @@ def generate_container_gpu_stats(self, container_name):
         return gpu_stats
 
     @staticmethod
-    def gpu_stats(gpu_ids):
+    def gpu_stats(gpu_ids: List[int]):
         utilz, memory, temp = None, None, None
         gpu_stats_map = {}  # gpu_id: int -> {"gpu_utilization", "gpu_memory_allocated", "gpu_temp"}
         gpu_ids = set(gpu_ids)

From be635dbe2cf2ce29b36508846062fccc112485a9 Mon Sep 17 00:00:00 2001
From: Alay Shah <alay11shah@gmail.com>
Date: Sun, 12 May 2024 16:22:44 -0700
Subject: [PATCH 045/282] Update container creation during deployment

---
 .../scheduler/comm_utils/container_utils.py   |  5 +-
 .../comm_utils/gpu_utils/gpu_utils.py         |  2 +-
 .../comm_utils/gpu_utils/nvidia_utils.py      |  7 ++-
 .../comm_utils/gpu_utils/qualcomm_utils.py    |  4 +-
 .../scheduler/comm_utils/hardware_utils.py    |  4 +-
 .../device_model_deployment.py                | 59 +++++++++----------
 6 files changed, 40 insertions(+), 41 deletions(-)

diff --git a/python/fedml/computing/scheduler/comm_utils/container_utils.py b/python/fedml/computing/scheduler/comm_utils/container_utils.py
index c7645104c0..2f5fa31fb5 100644
--- a/python/fedml/computing/scheduler/comm_utils/container_utils.py
+++ b/python/fedml/computing/scheduler/comm_utils/container_utils.py
@@ -227,9 +227,8 @@ def pull_image_with_policy(self, image_pull_policy, image_name, client=None):
             raise Exception(f"Unsupported image pull policy: {image_pull_policy}")
 
     class ContainerMetrics:
-        def __init__(self, cpu_percent, mem_used_megabytes, mem_avail_megabytes, network_recv_megabytes, network_sent_megabytes,
-                     blk_read_megabytes, blk_write_megabytes, timestamp, gpus_stat
-                     ):
+        def __init__(self, cpu_percent, mem_used_megabytes, mem_avail_megabytes, network_recv_megabytes,
+                     network_sent_megabytes, blk_read_megabytes, blk_write_megabytes, timestamp, gpus_stat):
             self.cpu_percent = cpu_percent
             self.mem_used_megabytes = mem_used_megabytes
             self.mem_avail_megabytes = mem_avail_megabytes
diff --git a/python/fedml/computing/scheduler/comm_utils/gpu_utils/gpu_utils.py b/python/fedml/computing/scheduler/comm_utils/gpu_utils/gpu_utils.py
index 292bcb3624..bc7a3b8216 100644
--- a/python/fedml/computing/scheduler/comm_utils/gpu_utils/gpu_utils.py
+++ b/python/fedml/computing/scheduler/comm_utils/gpu_utils/gpu_utils.py
@@ -52,7 +52,7 @@ def get_gpu_cards() -> List[GPUCard]:
 
     @staticmethod
     @abstractmethod
-    def get_docker_gpu_device_mapping(gpu_ids: List[int]) -> Optional[Dict]:
+    def get_docker_gpu_device_mapping(gpu_ids: Optional[List[int]], num_gpus: int = 0) -> Optional[Dict]:
         raise NotImplementedError
 
     @staticmethod
diff --git a/python/fedml/computing/scheduler/comm_utils/gpu_utils/nvidia_utils.py b/python/fedml/computing/scheduler/comm_utils/gpu_utils/nvidia_utils.py
index 0c05b25644..34d0c3be1c 100644
--- a/python/fedml/computing/scheduler/comm_utils/gpu_utils/nvidia_utils.py
+++ b/python/fedml/computing/scheduler/comm_utils/gpu_utils/nvidia_utils.py
@@ -28,11 +28,12 @@ def get_available_gpu_card_ids(order: str, limit: int, max_load: float, max_memo
         return GPUtil.getAvailable(order=order, limit=limit, maxLoad=max_load, maxMemory=max_memory)
 
     @staticmethod
-    def get_docker_gpu_device_mapping(gpu_ids: List[int]) -> Optional[Dict]:
-        if gpu_ids and len(gpu_ids):
+    def get_docker_gpu_device_mapping(gpu_ids: List[int], num_gpus: int = 0) -> Optional[Dict]:
+        if gpu_ids is not None and len(gpu_ids):
             gpu_id_list = list(map(lambda x: str(x), gpu_ids))
             return {"device_requests": [docker.types.DeviceRequest(device_ids=gpu_id_list, capabilities=[["gpu"]])]}
-        return None
+        else:
+            return {"device_requests": [docker.types.DeviceRequest(count=num_gpus, capabilities=[['gpu']])]}
 
     @staticmethod
     def get_docker_gpu_ids_by_container_name(container_name: str, docker_client: DockerClient) -> List[int]:
diff --git a/python/fedml/computing/scheduler/comm_utils/gpu_utils/qualcomm_utils.py b/python/fedml/computing/scheduler/comm_utils/gpu_utils/qualcomm_utils.py
index 5f0eb3b5c6..9c7ea21ea9 100644
--- a/python/fedml/computing/scheduler/comm_utils/gpu_utils/qualcomm_utils.py
+++ b/python/fedml/computing/scheduler/comm_utils/gpu_utils/qualcomm_utils.py
@@ -55,8 +55,8 @@ def get_available_gpu_card_ids(order: str, limit: int, max_load: float, max_memo
         return list(map(lambda card: card.id, gpu_cards))
 
     @staticmethod
-    def get_docker_gpu_device_mapping(gpu_ids: List[int]) -> Optional[Dict]:
-        if gpu_ids and len(gpu_ids):
+    def get_docker_gpu_device_mapping(gpu_ids: Optional[List[int]], num_gpus: int = 0) -> Optional[Dict]:
+        if gpu_ids is not None and len(gpu_ids):
             return {
                 "devices": [f"{QualcommNPUtil.NPU_CARD_PATH}{gpu_id}:{QualcommNPUtil.NPU_CARD_PATH}{gpu_id}" for gpu_id
                             in gpu_ids]}
diff --git a/python/fedml/computing/scheduler/comm_utils/hardware_utils.py b/python/fedml/computing/scheduler/comm_utils/hardware_utils.py
index a0d27fd7db..0062418631 100644
--- a/python/fedml/computing/scheduler/comm_utils/hardware_utils.py
+++ b/python/fedml/computing/scheduler/comm_utils/hardware_utils.py
@@ -42,10 +42,10 @@ def get_available_gpu_ids(order: str = "memory", limit: int = 1, max_load: float
         return gpu_util.get_available_gpu_card_ids(order, limit, max_load, max_memory) if gpu_util is not None else []
 
     @staticmethod
-    def get_docker_gpu_device_mapping(gpu_ids: List[int]) -> Optional[Dict]:
+    def get_docker_gpu_device_mapping(gpu_ids: Optional[List[int]], num_gpus: int = 0) -> Optional[Dict]:
         gpu_util = HardwareUtil.__get_util()
         if gpu_util is not None:
-            return gpu_util.get_docker_gpu_device_mapping(gpu_ids)
+            return gpu_util.get_docker_gpu_device_mapping(gpu_ids, num_gpus)
         return None
 
     @staticmethod
diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py b/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py
index bd04228355..8d3be211a2 100755
--- a/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py
@@ -18,6 +18,7 @@
 import fedml
 from fedml.computing.scheduler.comm_utils import sys_utils, security_utils
 from fedml.computing.scheduler.comm_utils.container_utils import ContainerUtils
+from fedml.computing.scheduler.comm_utils.hardware_utils import HardwareUtil
 from fedml.computing.scheduler.comm_utils.job_utils import JobRunnerUtils
 
 for type_name in collections.abc.__all__:
@@ -231,24 +232,6 @@ def start_deployment(end_point_id, end_point_name, model_id, model_version,
     except docker.errors.APIError:
         raise Exception("Failed to get the container object")
 
-    # Allocate the GPU
-    # TODO: Make sure no competition for each replica in a single deployment
-    if exist_container_obj is not None:
-        client.api.remove_container(exist_container_obj.id, v=True, force=True)
-    device_requests = []
-    if no_real_gpu_allocation is not None:
-        use_gpu = not no_real_gpu_allocation
-    if use_gpu:
-        logging.info("Number of GPUs: {}".format(num_gpus))
-        if gpu_ids is not None:
-            gpu_id_list = map(lambda x: str(x), gpu_ids)
-            device_requests.append(
-                docker.types.DeviceRequest(device_ids=list(gpu_id_list), capabilities=[['gpu']]))
-        else:
-            device_requests.append(
-                docker.types.DeviceRequest(count=num_gpus, capabilities=[['gpu']]))
-    logging.info(f"device_requests: {device_requests}")
-
     # Pull the inference image
     logging.info(f"Start pulling the inference image {inference_image_name}... with policy {image_pull_policy}")
     ContainerUtils.get_instance().pull_image_with_policy(image_pull_policy, inference_image_name)
@@ -306,6 +289,32 @@ def start_deployment(end_point_id, end_point_name, model_id, model_version,
         }
         environment["MAIN_ENTRY"] = relative_entry
 
+    host_config_dict = {
+        "binds": binds,
+        "port_bindings": {
+            port_inside_container: usr_indicated_worker_port
+        },
+        "shm_size": shm_size,
+        "storage_opt": storage_opt,
+        "tmpfs": tmpfs,
+        "cpu_count": cpus,
+        "mem_limit": memory
+    }
+
+    # Allocate the GPU
+    # TODO: Make sure no competition for each replica in a single deployment
+    if exist_container_obj is not None:
+        client.api.remove_container(exist_container_obj.id, v=True, force=True)
+    device_requests = {}
+    if no_real_gpu_allocation is not None:
+        use_gpu = not no_real_gpu_allocation
+    if use_gpu:
+        logging.info("Number of GPUs: {}".format(num_gpus))
+        device_requests = HardwareUtil.get_docker_gpu_device_mapping(gpu_ids, num_gpus)
+    logging.info(f"device_requests: {device_requests}")
+
+    host_config_dict.update(device_requests)
+
     # Environment variables
     if not enable_custom_image:
         # For some image, the default user is root. Unified to fedml.
@@ -325,24 +334,14 @@ def start_deployment(end_point_id, end_point_name, model_id, model_version,
             environment[key] = extra_envs[key]
 
     try:
+        host_config = client.api.create_host_config(**host_config_dict)
         new_container = client.api.create_container(
             image=inference_image_name,
             name=default_server_container_name,
             volumes=volumns,
             ports=[port_inside_container],  # port open inside the container
             environment=environment,
-            host_config=client.api.create_host_config(
-                binds=binds,
-                port_bindings={
-                    port_inside_container: usr_indicated_worker_port  # Could be either None or a port number
-                },
-                device_requests=device_requests,
-                shm_size=shm_size,
-                storage_opt=storage_opt,
-                tmpfs=tmpfs,
-                cpu_count=cpus,
-                mem_limit=memory,
-            ),
+            host_config=host_config,
             detach=True,
             command=customized_image_entry_cmd if enable_custom_image else None,
             entrypoint=customized_image_entry_cmd if enable_custom_image else None

From 9d5b54f1c0ab826e6a900def7bd26743500f4575 Mon Sep 17 00:00:00 2001
From: Alay Shah <alay11shah@gmail.com>
Date: Sun, 12 May 2024 18:02:52 -0700
Subject: [PATCH 046/282] Nit: Update naming

---
 .../scheduler/model_scheduler/device_model_deployment.py  | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py b/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py
index 8d3be211a2..ca83f21c33 100755
--- a/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py
@@ -305,15 +305,15 @@ def start_deployment(end_point_id, end_point_name, model_id, model_version,
     # TODO: Make sure no competition for each replica in a single deployment
     if exist_container_obj is not None:
         client.api.remove_container(exist_container_obj.id, v=True, force=True)
-    device_requests = {}
+    device_mapping = {}
     if no_real_gpu_allocation is not None:
         use_gpu = not no_real_gpu_allocation
     if use_gpu:
         logging.info("Number of GPUs: {}".format(num_gpus))
-        device_requests = HardwareUtil.get_docker_gpu_device_mapping(gpu_ids, num_gpus)
-    logging.info(f"device_requests: {device_requests}")
+        device_mapping = HardwareUtil.get_docker_gpu_device_mapping(gpu_ids, num_gpus)
+    logging.info(f"device_mapping: {device_mapping}")
 
-    host_config_dict.update(device_requests)
+    host_config_dict.update(device_mapping)
 
     # Environment variables
     if not enable_custom_image:

From 2fcf57d9ca5b25dba4fe166ec7a95d9ea89c4239 Mon Sep 17 00:00:00 2001
From: Alay Shah <alay11shah@gmail.com>
Date: Sun, 12 May 2024 18:06:51 -0700
Subject: [PATCH 047/282] Add check as device_mapping can be None

---
 .../scheduler/model_scheduler/device_model_deployment.py       | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py b/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py
index ca83f21c33..1876373d25 100755
--- a/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py
@@ -313,7 +313,8 @@ def start_deployment(end_point_id, end_point_name, model_id, model_version,
         device_mapping = HardwareUtil.get_docker_gpu_device_mapping(gpu_ids, num_gpus)
     logging.info(f"device_mapping: {device_mapping}")
 
-    host_config_dict.update(device_mapping)
+    if device_mapping:
+        host_config_dict.update(device_mapping)
 
     # Environment variables
     if not enable_custom_image:

From 3f763957438256e63161bdd5d27ff262a855d340 Mon Sep 17 00:00:00 2001
From: Alex <alex.gpt.llm@gmail.com>
Date: Tue, 14 May 2024 01:21:30 +0800
Subject: [PATCH 048/282] [CoreEngine] make the protocol manager and job runner
 work on concurrent jobs.

---
 .../model_scheduler/master_job_runner.py      | 26 +++++++------------
 .../master_protocol_manager.py                |  8 +++---
 .../model_scheduler/worker_job_runner.py      |  1 +
 .../scheduler_base_protocol_manager.py        |  9 +++++++
 .../slave/base_slave_protocol_manager.py      |  7 +++--
 5 files changed, 29 insertions(+), 22 deletions(-)

diff --git a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
index 13876d0184..e3073700d2 100755
--- a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
+++ b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
@@ -74,6 +74,7 @@ def run_impl(
             model_id, model_storage_url, scale_min, scale_max, inference_engine, model_is_from_open, \
             inference_end_point_id, use_gpu, memory_size, model_version, inference_port = \
             FedMLDeployMasterJobRunner.parse_model_run_params(self.request_json)
+        self.run_id = run_id
 
         # Print request parameters.
         logging.info("model deployment request: {}".format(self.request_json))
@@ -120,9 +121,7 @@ def run_impl(
         self.stop_device_inference_monitor(
             run_id, end_point_name, model_id, model_name, model_version)
         self.start_device_inference_monitor(
-            run_id, end_point_name, model_id, model_name, model_version,
-            redis_addr=self.redis_addr, redis_port=self.redis_port, redis_password=self.redis_password
-        )
+            run_id, end_point_name, model_id, model_name, model_version)
 
         # Changed the status to "IDLE"
         self.status_reporter.report_server_id_status(
@@ -467,7 +466,7 @@ def process_deployment_result_message(self, topic=None, payload=None):
     def start_device_inference_gateway(
             run_id, end_point_name, model_id,
             model_name, model_version, inference_port=ServerConstants.MODEL_INFERENCE_DEFAULT_PORT,
-            agent_config=None, redis_addr=None, redis_port=None, redis_password=None
+            agent_config=None, redis_addr="localhost", redis_port=6379, redis_password="fedml_default"
     ):
         # start unified inference server
         running_model_name = ServerConstants.get_running_model_name(end_point_name,
@@ -515,7 +514,7 @@ def start_device_inference_gateway(
     @staticmethod
     def start_device_inference_monitor(
             run_id, end_point_name, model_id, model_name, model_version, check_stopped_event=True,
-            redis_addr=None, redis_port=None, redis_password=None
+            redis_addr="localhost", redis_port=6379, redis_password="fedml_default"
     ):
         # start inference monitor server
         # Will report the qps related metrics to the MLOps
@@ -530,7 +529,7 @@ def start_device_inference_monitor(
             [python_program, monitor_file, "-v", fedml.get_env_version(), "-ep", run_id_str,
              "-epn", str(end_point_name), "-mi", str(model_id), "-mn", model_name,
              "-mv", model_version, "-iu", "infer_url", "-ra", redis_addr,
-             "-rp", redis_port, "-rpw", redis_password],
+             "-rp", str(redis_port), "-rpw", redis_password],
             should_capture_stdout=False, should_capture_stderr=False
         )
         return monitor_process
@@ -543,7 +542,7 @@ def stop_device_inference_monitor(run_id, end_point_name, model_id, model_name,
                                                   model_id, model_name, model_version)
 
     @staticmethod
-    def recover_inference_and_monitor(redis_addr=None, redis_port=None, redis_password=None):
+    def recover_inference_and_monitor():
         # noinspection PyBroadException
         try:
             history_jobs = FedMLServerDataInterface.get_instance().get_history_jobs()
@@ -559,9 +558,8 @@ def recover_inference_and_monitor(redis_addr=None, redis_port=None, redis_passwo
                     inference_end_point_id, use_gpu, memory_size, model_version, inference_port = \
                     FedMLDeployMasterJobRunner.parse_model_run_params(json.loads(job.running_json))
 
-                FedMLModelCache.get_instance().set_redis_params(redis_addr, redis_password)
-                is_activated = FedMLModelCache.get_instance(redis_addr, redis_port). \
-                    get_end_point_activation(run_id)
+                FedMLModelCache.get_instance().set_redis_params()
+                is_activated = FedMLModelCache.get_instance().get_end_point_activation(run_id)
                 if not is_activated:
                     continue
 
@@ -573,16 +571,12 @@ def recover_inference_and_monitor(redis_addr=None, redis_port=None, redis_passwo
 
                 FedMLDeployMasterJobRunner.start_device_inference_gateway(
                     run_id, end_point_name, model_id, model_name, model_version, inference_port=inference_port,
-                    agent_config=agent_config, redis_addr=redis_addr, redis_port=redis_port, redis_password=redis_password)
+                    agent_config=agent_config)
 
                 FedMLDeployMasterJobRunner.stop_device_inference_monitor(
                     run_id, end_point_name, model_id, model_name, model_version)
                 FedMLDeployMasterJobRunner.start_device_inference_monitor(
-                    run_id, end_point_name, model_id, model_name, model_version,
-                    redis_addr=FedMLDeployMasterJobRunner.default_redis_addr,
-                    redis_port=FedMLDeployMasterJobRunner.default_redis_port,
-                    redis_password=FedMLDeployMasterJobRunner.default_redis_password
-                )
+                    run_id, end_point_name, model_id, model_name, model_version)
         except Exception as e:
             logging.info("recover inference and monitor: {}".format(traceback.format_exc()))
 
diff --git a/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py b/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py
index a5f2a37dfe..eb23bf1278 100755
--- a/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py
+++ b/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py
@@ -199,7 +199,7 @@ def callback_start_deployment(self, topic, payload):
         self.request_json = request_json
         run_id_str = str(run_id)
         self.running_request_json[run_id_str] = request_json
-        self.request_json["master_node_ip"] = GeneralConstants.get_ip_address(self.request_json)
+        self.request_json["master_node_ip"] = GeneralConstants.get_ip_address(request_json)
 
         # Set the target status of the devices to redis
         FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \
@@ -217,13 +217,13 @@ def callback_start_deployment(self, topic, payload):
 
         # Report stage to mlops: MODEL_DEPLOYMENT_STAGE1 = "Received"
         FedMLDeployJobRunnerManager.get_instance().send_deployment_stages(
-            self.run_id, model_name, model_id, "", ServerConstants.MODEL_DEPLOYMENT_STAGE1["index"],
+            run_id, model_name, model_id, "", ServerConstants.MODEL_DEPLOYMENT_STAGE1["index"],
             ServerConstants.MODEL_DEPLOYMENT_STAGE1["text"], "Received request for endpoint {}".format(run_id),
             message_center=self.message_center)
 
         # Report stage to mlops: MODEL_DEPLOYMENT_STAGE2 = "Initializing"
         FedMLDeployJobRunnerManager.get_instance().send_deployment_stages(
-            self.run_id, model_name, model_id, "", ServerConstants.MODEL_DEPLOYMENT_STAGE2["index"],
+            run_id, model_name, model_id, "", ServerConstants.MODEL_DEPLOYMENT_STAGE2["index"],
             ServerConstants.MODEL_DEPLOYMENT_STAGE2["text"], ServerConstants.MODEL_DEPLOYMENT_STAGE2["text"],
             message_center=self.message_center)
 
@@ -262,7 +262,7 @@ def callback_start_deployment(self, topic, payload):
 
         # Send stage: MODEL_DEPLOYMENT_STAGE3 = "StartRunner"
         FedMLDeployJobRunnerManager.get_instance().send_deployment_stages(
-            self.run_id, model_name, model_id, "", ServerConstants.MODEL_DEPLOYMENT_STAGE3["index"],
+            run_id, model_name, model_id, "", ServerConstants.MODEL_DEPLOYMENT_STAGE3["index"],
             ServerConstants.MODEL_DEPLOYMENT_STAGE3["text"], ServerConstants.MODEL_DEPLOYMENT_STAGE3["text"],
             message_center=self.message_center)
 
diff --git a/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py b/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py
index ac9328592c..f9cfdcd921 100755
--- a/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py
+++ b/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py
@@ -138,6 +138,7 @@ def run_impl(self, run_extend_queue_list, sender_message_center,
         inference_engine = model_config_parameters.get("inference_engine",
                                                        ClientConstants.INFERENCE_ENGINE_TYPE_INT_DEFAULT)
         inference_end_point_id = run_id
+        self.run_id = run_id
 
         MLOpsRuntimeLog.get_instance(self.args).init_logs(log_level=logging.INFO)
 
diff --git a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_protocol_manager.py b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_protocol_manager.py
index e3cac7a425..11d6fa44fd 100755
--- a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_protocol_manager.py
+++ b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_protocol_manager.py
@@ -232,6 +232,15 @@ def rebuild_status_center(self, status_center_queue):
         self.status_reporter.edge_id = self.edge_id
         self.status_reporter.server_agent_id = self.server_agent_id
 
+    def generate_status_report(self, run_id, edge_id, server_agent_id=None):
+        status_reporter = MLOpsMetrics()
+        status_reporter.set_messenger(self, send_message_func=self.send_status_message)
+        status_reporter.run_id = run_id
+        status_reporter.edge_id = edge_id
+        if server_agent_id is not None:
+            status_reporter.server_agent_id = server_agent_id
+        return status_reporter
+
     @abstractmethod
     def generate_protocol_manager(self):
         # Generate the protocol manager instance and set the attribute values.
diff --git a/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py b/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py
index fc67ec2ece..4ff931e6fd 100755
--- a/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py
+++ b/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py
@@ -10,6 +10,7 @@
 from ..comm_utils.constants import SchedulerConstants
 from ..comm_utils.job_utils import JobRunnerUtils, DockerArgs
 from ..comm_utils.run_process_utils import RunProcessUtils
+from ....core.mlops import MLOpsMetrics
 from ....core.mlops.mlops_runtime_log import MLOpsRuntimeLog
 from ....core.mlops.mlops_configs import MLOpsConfigs
 from ....core.mlops.mlops_runtime_log_daemon import MLOpsRuntimeLogDaemon
@@ -55,8 +56,6 @@ def __init__(self, args, agent_config=None):
         self.fl_topic_request_device_info = None
         self.communication_mgr = None
         self.subscribed_topics = list()
-        self.mlops_metrics = None
-        self.status_reporter = None
         self.job_runners = dict()
         self.ota_upgrade = FedMLOtaUpgrade(edge_id=args.edge_id)
         self.running_request_json = dict()
@@ -263,6 +262,10 @@ def callback_start_train(self, topic, payload):
                 run_id, matched_gpu_num, edge_id, inner_id=endpoint_id,
                 model_master_device_id=model_master_device_id,
                 model_slave_device_id=model_slave_device_id)
+        else:
+            self.generate_status_report(run_id, edge_id, server_agent_id=server_agent_id).report_client_id_status(
+                edge_id, GeneralConstants.MSG_MLOPS_CLIENT_STATUS_FINISHED, run_id=run_id)
+            return
         logging.info(
             f"Run started, available gpu ids: {JobRunnerUtils.get_instance().get_available_gpu_id_list(edge_id)}")
 

From 3417f300f2d9977042cb5a9d8fb4531eec67ce57 Mon Sep 17 00:00:00 2001
From: Alex <alex.gpt.llm@gmail.com>
Date: Wed, 15 May 2024 01:17:03 +0800
Subject: [PATCH 049/282] [CoreEngine] make the deployment status more stable.

---
 python/fedml/api/api_test.py                  |  6 +--
 .../model_scheduler/job_runner_msg_sender.py  | 12 ++----
 .../model_scheduler/master_job_runner.py      |  3 +-
 .../master_protocol_manager.py                | 27 ++++++------
 .../model_scheduler/worker_job_runner.py      |  6 +--
 .../worker_protocol_manager.py                |  1 +
 .../scheduler_core/general_constants.py       | 13 ++++++
 .../scheduler_base_job_runner.py              | 42 +++++++++++++------
 .../scheduler_base_protocol_manager.py        |  1 +
 .../scheduler/scheduler_core/status_center.py |  1 +
 .../status_manager_protocols.py               | 15 +++++++
 .../scheduler/slave/base_slave_job_runner.py  | 10 ++---
 12 files changed, 91 insertions(+), 46 deletions(-)

diff --git a/python/fedml/api/api_test.py b/python/fedml/api/api_test.py
index fc2fb77b20..5d899bb1fd 100755
--- a/python/fedml/api/api_test.py
+++ b/python/fedml/api/api_test.py
@@ -4,7 +4,7 @@
 import fedml
 
 # Login
-fedml.set_env_version("local")
+fedml.set_env_version("test")
 fedml.set_local_on_premise_platform_port(18080)
 error_code, error_msg = fedml.api.fedml_login(api_key="1316b93c82da40ce90113a2ed12f0b14")
 if error_code != 0:
@@ -19,7 +19,7 @@
 
 # Launch job
 launch_result_list = list()
-for i in range(0, 1):
+for i in range(0, 10):
     launch_result = fedml.api.launch_job(yaml_file)
     launch_result_list.append(launch_result)
     # launch_result = fedml.api.launch_job_on_cluster(yaml_file, "alex-cluster")
@@ -33,7 +33,7 @@
         if log_result is None or log_result.run_status is None:
             print(f"Failed to get job status.")
             #exit(1)
-        print(f"Run status {log_result.run_status}")
+        print(f"Run {launch_result.run_id}, status {log_result.run_status}")
         time.sleep(0.5)
 
 # Get job logs
diff --git a/python/fedml/computing/scheduler/model_scheduler/job_runner_msg_sender.py b/python/fedml/computing/scheduler/model_scheduler/job_runner_msg_sender.py
index 482a21b2d4..235c4deb74 100755
--- a/python/fedml/computing/scheduler/model_scheduler/job_runner_msg_sender.py
+++ b/python/fedml/computing/scheduler/model_scheduler/job_runner_msg_sender.py
@@ -33,8 +33,7 @@ def send_deployment_results(self, end_point_id, end_point_name,
                                 model_name, model_inference_url,
                                 model_version, inference_port, inference_engine,
                                 model_metadata, model_config, input_json, output_json, replica_id_list=None):
-        deployment_results_topic_prefix = "model_ops/model_device/return_deployment_result"
-        deployment_results_topic = "{}/{}".format(deployment_results_topic_prefix, end_point_id)
+        deployment_results_topic = "model_ops/model_device/return_deployment_result"
         deployment_results_payload = {"end_point_id": end_point_id, "end_point_name": end_point_name,
                                       "model_name": model_name, "model_url": model_inference_url,
                                       "version": model_version, "port": inference_port,
@@ -48,15 +47,13 @@ def send_deployment_results(self, end_point_id, end_point_name,
         logging.info(f"[Master] deployment_results_payload is sent to mlops: {deployment_results_payload}")
 
         self.message_center.send_message_json(deployment_results_topic, json.dumps(deployment_results_payload))
-        self.message_center.send_message_json(deployment_results_topic_prefix, json.dumps(deployment_results_payload))
 
     @staticmethod
     def send_deployment_status(
             end_point_id, end_point_name, model_name, model_inference_url, model_status, message_center=None):
         if message_center is None:
             return
-        deployment_status_topic_prefix = "model_ops/model_device/return_deployment_status"
-        deployment_status_topic = "{}/{}".format(deployment_status_topic_prefix, end_point_id)
+        deployment_status_topic = "model_ops/model_device/return_deployment_status"
         deployment_status_payload = {"end_point_id": end_point_id, "end_point_name": end_point_name,
                                      "model_name": model_name,
                                      "model_url": model_inference_url,
@@ -65,7 +62,6 @@ def send_deployment_status(
         logging.info(f"[Master] deployment_status_payload is sent to mlops: {deployment_status_payload}")
 
         message_center.send_message_json(deployment_status_topic, json.dumps(deployment_status_payload))
-        message_center.send_message_json(deployment_status_topic_prefix, json.dumps(deployment_status_payload))
 
     @staticmethod
     def send_deployment_stages(end_point_id, model_name, model_id, model_inference_url,
@@ -73,8 +69,7 @@ def send_deployment_stages(end_point_id, model_name, model_id, model_inference_u
                                message_center=None):
         if message_center is None:
             return
-        deployment_stages_topic_prefix = "model_ops/model_device/return_deployment_stages"
-        deployment_stages_topic = "{}/{}".format(deployment_stages_topic_prefix, end_point_id)
+        deployment_stages_topic = "model_ops/model_device/return_deployment_stages"
         deployment_stages_payload = {"model_name": model_name,
                                      "model_id": model_id,
                                      "model_url": model_inference_url,
@@ -85,7 +80,6 @@ def send_deployment_stages(end_point_id, model_name, model_id, model_inference_u
                                      "timestamp": int(format(time.time_ns() / 1000.0, '.0f'))}
 
         message_center.send_message_json(deployment_stages_topic, json.dumps(deployment_stages_payload))
-        message_center.send_message_json(deployment_stages_topic_prefix, json.dumps(deployment_stages_payload))
 
         logging.info(f"-------- Stages has been sent to mlops with stage {model_stages_index} and "
                      f"payload {deployment_stages_payload}")
diff --git a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
index e3073700d2..8ce9d0e102 100755
--- a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
+++ b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
@@ -41,6 +41,7 @@ def __init__(self, args, run_id=0, request_json=None, agent_config=None, edge_id
             agent_log_file_dir=ServerConstants.get_log_file_dir()
         )
 
+        self.is_deployment_runner = True
         self.infer_host = "127.0.0.1"
         self.redis_addr = "local"
         self.redis_port = "6379"
@@ -306,7 +307,7 @@ def process_deployment_result_message(self, topic=None, payload=None):
                     return
                 else:
                     # This is the last worker that failed, so we should continue to "ABORTED" status
-                    model_config_parameters = self.running_request_json[run_id_str]["parameters"]
+                    model_config_parameters = self.request_json["parameters"]
                     inference_port = model_config_parameters.get("server_internal_port",
                                                                  ServerConstants.MODEL_INFERENCE_DEFAULT_PORT)
                     inference_port_external = model_config_parameters.get("server_external_port", inference_port)
diff --git a/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py b/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py
index eb23bf1278..b4c5b41d74 100755
--- a/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py
+++ b/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py
@@ -18,6 +18,7 @@ def __init__(self, args, agent_config=None):
         FedMLBaseMasterProtocolManager.__init__(self, args, agent_config=agent_config)
 
         self.message_center_name = "deploy_master_agent"
+        self.is_deployment_status_center = True
 
         self.topic_start_deployment = None
         self.topic_activate_endpoint = None
@@ -215,18 +216,6 @@ def callback_start_deployment(self, topic, payload):
 
         self.subscribe_deployment_messages_from_slave_devices(request_json)
 
-        # Report stage to mlops: MODEL_DEPLOYMENT_STAGE1 = "Received"
-        FedMLDeployJobRunnerManager.get_instance().send_deployment_stages(
-            run_id, model_name, model_id, "", ServerConstants.MODEL_DEPLOYMENT_STAGE1["index"],
-            ServerConstants.MODEL_DEPLOYMENT_STAGE1["text"], "Received request for endpoint {}".format(run_id),
-            message_center=self.message_center)
-
-        # Report stage to mlops: MODEL_DEPLOYMENT_STAGE2 = "Initializing"
-        FedMLDeployJobRunnerManager.get_instance().send_deployment_stages(
-            run_id, model_name, model_id, "", ServerConstants.MODEL_DEPLOYMENT_STAGE2["index"],
-            ServerConstants.MODEL_DEPLOYMENT_STAGE2["text"], ServerConstants.MODEL_DEPLOYMENT_STAGE2["text"],
-            message_center=self.message_center)
-
         ServerConstants.save_runner_infos(self.args.device_id + "." + self.args.os_name, self.edge_id, run_id=run_id)
 
         # Num diff
@@ -260,6 +249,18 @@ def callback_start_deployment(self, topic, payload):
         if process is not None:
             ServerConstants.save_run_process(run_id, process.pid)
 
+        # Report stage to mlops: MODEL_DEPLOYMENT_STAGE1 = "Received"
+        FedMLDeployJobRunnerManager.get_instance().send_deployment_stages(
+            run_id, model_name, model_id, "", ServerConstants.MODEL_DEPLOYMENT_STAGE1["index"],
+            ServerConstants.MODEL_DEPLOYMENT_STAGE1["text"], "Received request for endpoint {}".format(run_id),
+            message_center=self.message_center)
+
+        # Report stage to mlops: MODEL_DEPLOYMENT_STAGE2 = "Initializing"
+        FedMLDeployJobRunnerManager.get_instance().send_deployment_stages(
+            run_id, model_name, model_id, "", ServerConstants.MODEL_DEPLOYMENT_STAGE2["index"],
+            ServerConstants.MODEL_DEPLOYMENT_STAGE2["text"], ServerConstants.MODEL_DEPLOYMENT_STAGE2["text"],
+            message_center=self.message_center)
+
         # Send stage: MODEL_DEPLOYMENT_STAGE3 = "StartRunner"
         FedMLDeployJobRunnerManager.get_instance().send_deployment_stages(
             run_id, model_name, model_id, "", ServerConstants.MODEL_DEPLOYMENT_STAGE3["index"],
@@ -328,6 +329,8 @@ def subscribe_deployment_messages_from_slave_devices(self, request_json):
 
             logging.info("subscribe device messages {}".format(deployment_results_topic))
 
+        self.setup_listeners_for_edge_status(run_id, edge_id_list, self.edge_id)
+
     def subscribe_spec_device_message(self, run_id, device_id):
         if device_id == self.edge_id:
             return
diff --git a/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py b/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py
index f9cfdcd921..332dab2547 100755
--- a/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py
+++ b/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py
@@ -31,6 +31,7 @@ def __init__(self, args, run_id=0, request_json=None, agent_config=None, edge_id
             agent_log_file_dir=ClientConstants.get_log_file_dir()
         )
 
+        self.is_deployment_runner = True
         self.infer_host = "127.0.0.1"
         self.redis_addr = "local"
         self.redis_port = "6379"
@@ -286,11 +287,8 @@ def run_impl(self, run_extend_queue_list, sender_message_center,
                         inference_engine, model_metadata, model_config)
 
                     self.status_reporter.run_id = self.run_id
-                    self.status_reporter.report_client_id_status(
-                        self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED,
-                        is_from_model=True, run_id=self.run_id)
 
-                    return False
+                    raise Exception("[Worker] Failed to deploy the model.")
                 else:
                     # Send failed successful result back to master
                     logging.info("Finished deployment, continue to send results to master...")
diff --git a/python/fedml/computing/scheduler/model_scheduler/worker_protocol_manager.py b/python/fedml/computing/scheduler/model_scheduler/worker_protocol_manager.py
index 3a0f835b6c..f9bc70452d 100755
--- a/python/fedml/computing/scheduler/model_scheduler/worker_protocol_manager.py
+++ b/python/fedml/computing/scheduler/model_scheduler/worker_protocol_manager.py
@@ -24,6 +24,7 @@ def __init__(self, args, agent_config=None):
         FedMLBaseSlaveProtocolManager.__init__(self, args, agent_config=agent_config)
 
         self.message_center_name = "deploy_slave_agent"
+        self.is_deployment_status_center = True
 
         self.topic_start_deployment = None
         self.topic_delete_deployment = None
diff --git a/python/fedml/computing/scheduler/scheduler_core/general_constants.py b/python/fedml/computing/scheduler/scheduler_core/general_constants.py
index ba8842b30e..0cc6044d4b 100755
--- a/python/fedml/computing/scheduler/scheduler_core/general_constants.py
+++ b/python/fedml/computing/scheduler/scheduler_core/general_constants.py
@@ -44,6 +44,19 @@ class GeneralConstants:
     MSG_MLOPS_SERVER_STATUS_FINISHED = "FINISHED"
     MSG_MLOPS_SERVER_STATUS_EXCEPTION = "EXCEPTION"
 
+    MSG_MODELOPS_DEPLOYMENT_STATUS_INITIALIZING = "INITIALIZING"
+    MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYING = "DEPLOYING"
+    MSG_MODELOPS_DEPLOYMENT_STATUS_INFERRING = "INFERRING"
+    MSG_MODELOPS_DEPLOYMENT_STATUS_OVERLOAD = "OVERLOAD"
+    MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED = "FAILED"
+    MSG_MODELOPS_DEPLOYMENT_STATUS_RESCALING = "RESCALING"
+    MSG_MODELOPS_DEPLOYMENT_STATUS_UPDATING = "UPDATING"
+    MSG_MODELOPS_DEPLOYMENT_STATUS_UPDATING_FAILED = "UPDATING_FAILED"
+    MSG_MODELOPS_DEPLOYMENT_STATUS_ABORTING = "ABORTING"
+    MSG_MODELOPS_DEPLOYMENT_STATUS_ABORTED = "ABORTED"
+    MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED = "DEPLOYED"
+    MSG_MODELOPS_DEPLOYMENT_STATUS_KILLED = "KILLED"
+
     MASTER_LOGIN_PROGRAM = "server_login.py"
     SLAVE_LOGIN_PROGRAM = "client_login.py"
 
diff --git a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py
index 03d3fd5d92..69b69f4d4c 100755
--- a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py
+++ b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py
@@ -1,5 +1,6 @@
 import json
 import logging
+import multiprocessing
 import os
 import platform
 import random
@@ -7,6 +8,7 @@
 import time
 import traceback
 import zipfile
+import queue
 from ..comm_utils.constants import SchedulerConstants
 from ..comm_utils.job_utils import JobRunnerUtils, DockerArgs
 from ..scheduler_entry.constants import Constants
@@ -82,8 +84,7 @@ def __init__(self, args, edge_id=0, request_json=None, agent_config=None, run_id
             "${FEDSYS.CLIENT_OBJECT_LIST}": "",
             "${FEDSYS.LOG_SERVER_URL}": "",
         }
-        self.download_time = time.time()
-        self.download_finished = False
+        self.is_deployment_runner = False
 
     def __repr__(self):
         return "<{klass} @{id:x} {attrs}>".format(
@@ -162,7 +163,7 @@ def package_download_progress(self, count, blksize, filesize):
             self.prev_download_progress = progress_int
             logging.info("package downloaded size {} KB, progress {}%".format(downloaded_kb, progress_int))
 
-    def download_package_proc(self, package_url, local_package_file):
+    def download_package_proc(self, package_url, local_package_file, completed_event, info_queue):
         import requests
         headers = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) '
                                  'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36'}
@@ -188,8 +189,8 @@ def download_package_proc(self, package_url, local_package_file):
                 written_size = f.write(chunk)
                 total_size += written_size
                 logging.info("package downloaded size %.2f KB", total_size/1024)
-                self.download_time = time.time()
-        self.download_finished = True
+                info_queue.put(time.time())
+        completed_event.set()
 
     def retrieve_and_unzip_package(self, package_name, package_url):
         local_package_path = self.agent_package_download_dir
@@ -202,26 +203,43 @@ def retrieve_and_unzip_package(self, package_name, package_url):
         ssl._create_default_https_context = ssl._create_unverified_context
 
         # Open a process to download the package so that we can avoid the request is blocked and check the timeout.
-        self.download_finished = False
-        self.download_time = time.time()
         from multiprocessing import Process
-        download_process = Process(target=self.download_package_proc, args=(package_url, local_package_file))
+        completed_event = multiprocessing.Event()
+        info_queue = multiprocessing.Queue()
+        download_process = Process(target=self.download_package_proc,
+                                   args=(package_url, local_package_file, completed_event, info_queue))
         download_process.start()
-        allowed_block_download_time = 30
+        allowed_block_download_time = 60
+        download_finished = False
+        download_time = time.time()
         while True:
-            block_time = time.time() - self.download_time
+            try:
+                queue_time = info_queue.get(block=False, timeout=3)
+                download_time = queue_time
+            except queue.Empty as e:
+                pass
+
+            block_time = time.time() - download_time
             if block_time > allowed_block_download_time:
                 break
-            if self.download_finished:
+
+            if completed_event.is_set():
+                download_finished = True
                 break
             time.sleep(3)
         try:
-            if not self.download_finished:
+            if not download_finished:
                 download_process.terminate()
                 download_process.kill()
         except Exception as e:
             pass
 
+        if not download_finished:
+            raise Exception("Download timeout, please check if your network is stable.")
+
+        if not os.path.exists(local_package_file):
+            raise Exception(f"Failed to download, the zip file is not exist at {local_package_file}.")
+
         # Another method to async download.
         # import socket
         # socket.setdefaulttimeout(15)
diff --git a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_protocol_manager.py b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_protocol_manager.py
index 11d6fa44fd..9bb8b7a7ec 100755
--- a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_protocol_manager.py
+++ b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_protocol_manager.py
@@ -224,6 +224,7 @@ def start_status_listener_center(self):
 
     def rebuild_status_center(self, status_center_queue):
         self.status_center = FedMLStatusCenter(message_queue=status_center_queue)
+        self.status_center.is_deployment_status_center = self.is_deployment_status_center
 
         if self.status_reporter is None:
             self.status_reporter = MLOpsMetrics()
diff --git a/python/fedml/computing/scheduler/scheduler_core/status_center.py b/python/fedml/computing/scheduler/scheduler_core/status_center.py
index 76f811993e..4a55dbb679 100755
--- a/python/fedml/computing/scheduler/scheduler_core/status_center.py
+++ b/python/fedml/computing/scheduler/scheduler_core/status_center.py
@@ -99,6 +99,7 @@ def __init__(self, message_queue=None):
         self.status_message_center = None
         self.status_manager_instance = None
         self.status_runner = None
+        self.is_deployment_status_center = False
 
     def __repr__(self):
         return "<{klass} @{id:x} {attrs}>".format(
diff --git a/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py b/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py
index 811ff2a2d5..01caf7db67 100755
--- a/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py
+++ b/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py
@@ -2,6 +2,7 @@
 import logging
 import os
 import shutil
+import time
 from os import listdir
 
 from ....core.mlops.mlops_runtime_log_daemon import MLOpsRuntimeLogDaemon
@@ -67,6 +68,9 @@ def process_job_completed_status(self, master_id, status):
         # self.remove_listener_for_run_metrics(self.run_id)
         # self.remove_listener_for_run_logs(self.run_id)
 
+        if self.status_center.is_deployment_status_center and status == ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED:
+            self.report_deployment_status(self.run_id, GeneralConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED)
+
     def process_job_exception_status(self, master_id, status):
         # Send the exception status to slave devices.
         self.report_exception_status(
@@ -302,3 +306,14 @@ def status_center_request_job_status_from_master_in_slave_agent(self, topic, pay
         topic_request_job_status = f"{GeneralConstants.MSG_TOPIC_REQUEST_JOB_STATUS_PREFIX}{master_id}"
         payload_request_job_status = {"run_id": run_id, "edge_id": edge_id}
         self.message_center.send_message(topic_request_job_status, json.dumps(payload_request_job_status))
+
+    def report_deployment_status(self, run_id, status):
+        deployment_status_topic = "model_ops/model_device/return_deployment_status"
+        deployment_status_payload = {"end_point_id": run_id, "end_point_name": "",
+                                     "model_name": "",
+                                     "model_url": "",
+                                     "model_status": status,
+                                     "timestamp": int(format(time.time_ns() / 1000.0, '.0f'))}
+        logging.info(f"[StatusCenter] deployment_status_payload is sent to mlops: {deployment_status_payload}")
+
+        self.message_center.send_message_json(deployment_status_topic, json.dumps(deployment_status_payload))
diff --git a/python/fedml/computing/scheduler/slave/base_slave_job_runner.py b/python/fedml/computing/scheduler/slave/base_slave_job_runner.py
index cc7c3c222b..5e530dbba7 100755
--- a/python/fedml/computing/scheduler/slave/base_slave_job_runner.py
+++ b/python/fedml/computing/scheduler/slave/base_slave_job_runner.py
@@ -71,7 +71,7 @@ def run(self, process_event, completed_event,  run_extend_queue_list,
             logging.error(f"Runner exited with errors. Exception: {e}, Traceback {traceback.format_exc()}")
             self.status_reporter.report_client_id_status(
                 self.edge_id, GeneralConstants.MSG_MLOPS_CLIENT_STATUS_FAILED,
-                server_id=self.server_id, run_id=self.run_id)
+                is_from_model=self.is_deployment_runner, server_id=self.server_id, run_id=self.run_id)
         finally:
             if self.mlops_metrics is not None:
                 computing_ended_time = MLOpsUtils.get_ntp_time()
@@ -107,7 +107,7 @@ def run_impl(self, run_extend_queue_list, sender_message_center,
 
         self.status_reporter.report_client_id_status(
             self.edge_id, GeneralConstants.MSG_MLOPS_CLIENT_STATUS_INITIALIZING,
-            running_json=json.dumps(self.request_json), run_id=run_id)
+            is_from_model=self.is_deployment_runner, running_json=json.dumps(self.request_json), run_id=run_id)
 
         # get training params
         private_local_data_dir = data_config.get("privateLocalData", "")
@@ -192,7 +192,7 @@ def run_impl(self, run_extend_queue_list, sender_message_center,
 
                 self.status_reporter.report_client_id_status(
                     self.edge_id, GeneralConstants.MSG_MLOPS_CLIENT_STATUS_FINISHED,
-                    server_id=self.server_id, run_id=run_id)
+                    is_from_model=self.is_deployment_runner, server_id=self.server_id, run_id=run_id)
 
                 if is_launch_task:
                     sys_utils.log_return_info(f"job {run_id}", ret_code)
@@ -225,7 +225,7 @@ def run_impl(self, run_extend_queue_list, sender_message_center,
             # Send failed msg when exceptions.
             self.status_reporter.report_client_id_status(
                 self.edge_id, GeneralConstants.MSG_MLOPS_CLIENT_STATUS_FAILED,
-                server_id=self.server_id, run_id=run_id)
+                is_from_model=self.is_deployment_runner, server_id=self.server_id, run_id=run_id)
 
     @abstractmethod
     def _generate_job_runner_instance(self, args, run_id=None, request_json=None, agent_config=None, edge_id=None):
@@ -239,7 +239,7 @@ def reset_devices_status(self, edge_id, status):
         self.status_reporter.run_id = self.run_id
         self.status_reporter.edge_id = edge_id
         self.status_reporter.report_client_id_status(
-            edge_id, status, server_id=self.server_id, run_id=self.run_id)
+            edge_id, status, is_from_model=self.is_deployment_runner, server_id=self.server_id, run_id=self.run_id)
 
     def start_runner_process(
             self, run_id, request_json, edge_id=None,

From 060eef68749c68cf1d5ad187c8c8f82f2dd78e1c Mon Sep 17 00:00:00 2001
From: Alex <alex.gpt.llm@gmail.com>
Date: Wed, 15 May 2024 01:27:53 +0800
Subject: [PATCH 050/282] [CoreEngine] update the keys.

---
 python/fedml/api/api_test.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/python/fedml/api/api_test.py b/python/fedml/api/api_test.py
index 5d899bb1fd..5a01a76448 100755
--- a/python/fedml/api/api_test.py
+++ b/python/fedml/api/api_test.py
@@ -6,7 +6,7 @@
 # Login
 fedml.set_env_version("test")
 fedml.set_local_on_premise_platform_port(18080)
-error_code, error_msg = fedml.api.fedml_login(api_key="1316b93c82da40ce90113a2ed12f0b14")
+error_code, error_msg = fedml.api.fedml_login(api_key="")
 if error_code != 0:
     print("API Key is invalid!")
     exit(1)

From 66af3db9bbe06fd13f91ead915dd4e860dd83bac Mon Sep 17 00:00:00 2001
From: Alex <alex.gpt.llm@gmail.com>
Date: Wed, 15 May 2024 01:57:35 +0800
Subject: [PATCH 051/282] [CoreEngine] make the client and server runner
 deprecated.

---
 .../master/{server_runner.py => server_runner_deprecated.py}      | 0
 ...device_client_runner.py => device_client_runner_deprecated.py} | 0
 ...device_server_runner.py => device_server_runner_deprecated.py} | 0
 .../slave/{client_runner.py => client_runner_deprecated.py}       | 0
 4 files changed, 0 insertions(+), 0 deletions(-)
 rename python/fedml/computing/scheduler/master/{server_runner.py => server_runner_deprecated.py} (100%)
 rename python/fedml/computing/scheduler/model_scheduler/{device_client_runner.py => device_client_runner_deprecated.py} (100%)
 rename python/fedml/computing/scheduler/model_scheduler/{device_server_runner.py => device_server_runner_deprecated.py} (100%)
 rename python/fedml/computing/scheduler/slave/{client_runner.py => client_runner_deprecated.py} (100%)

diff --git a/python/fedml/computing/scheduler/master/server_runner.py b/python/fedml/computing/scheduler/master/server_runner_deprecated.py
similarity index 100%
rename from python/fedml/computing/scheduler/master/server_runner.py
rename to python/fedml/computing/scheduler/master/server_runner_deprecated.py
diff --git a/python/fedml/computing/scheduler/model_scheduler/device_client_runner.py b/python/fedml/computing/scheduler/model_scheduler/device_client_runner_deprecated.py
similarity index 100%
rename from python/fedml/computing/scheduler/model_scheduler/device_client_runner.py
rename to python/fedml/computing/scheduler/model_scheduler/device_client_runner_deprecated.py
diff --git a/python/fedml/computing/scheduler/model_scheduler/device_server_runner.py b/python/fedml/computing/scheduler/model_scheduler/device_server_runner_deprecated.py
similarity index 100%
rename from python/fedml/computing/scheduler/model_scheduler/device_server_runner.py
rename to python/fedml/computing/scheduler/model_scheduler/device_server_runner_deprecated.py
diff --git a/python/fedml/computing/scheduler/slave/client_runner.py b/python/fedml/computing/scheduler/slave/client_runner_deprecated.py
similarity index 100%
rename from python/fedml/computing/scheduler/slave/client_runner.py
rename to python/fedml/computing/scheduler/slave/client_runner_deprecated.py

From 54f8b00201dad42b04f232b3d9cb310c214a1ed2 Mon Sep 17 00:00:00 2001
From: Alex <alex.gpt.llm@gmail.com>
Date: Wed, 15 May 2024 18:12:16 +0800
Subject: [PATCH 052/282] [CoreEngine] make the cloud agent work.

---
 .../scheduler/master/base_master_agent.py     |  6 ++---
 .../master/base_master_job_runner.py          |  4 ++++
 .../master/base_master_job_runner_manager.py  | 13 ++++++++++
 .../master/base_master_protocol_manager.py    | 24 ++++++++++++-------
 .../scheduler/master/cloud_server_manager.py  |  6 ++++-
 .../scheduler/master/server_login.py          |  2 +-
 .../scheduler_core/account_manager.py         |  8 ++++---
 7 files changed, 47 insertions(+), 16 deletions(-)

diff --git a/python/fedml/computing/scheduler/master/base_master_agent.py b/python/fedml/computing/scheduler/master/base_master_agent.py
index 66bc35d96f..3aff523c24 100755
--- a/python/fedml/computing/scheduler/master/base_master_agent.py
+++ b/python/fedml/computing/scheduler/master/base_master_agent.py
@@ -17,18 +17,18 @@ def __init__(self):
         self.master_api_process = None
         self.mlops_metrics = MLOpsMetrics()
         self.status_reporter = None
-        self.enable_simulation_cloud_agent = True
+        self.enable_simulation_cloud_agent = False
         self.use_local_process_as_cloud_server = False
         self.protocol_mgr = None
 
     def login(
             self, user_id, api_key=None, device_id=None,
-            os_name=None, role=None
+            os_name=None, role=None, runner_cmd=None
     ):
         # Login account
         login_result = FedMLAccountManager.get_instance().login(
             user_id, api_key=api_key, device_id=device_id,
-            os_name=os_name, role=role
+            os_name=os_name, role=role, runner_cmd=runner_cmd
         )
         if login_result is not None:
             self.agent_args = login_result
diff --git a/python/fedml/computing/scheduler/master/base_master_job_runner.py b/python/fedml/computing/scheduler/master/base_master_job_runner.py
index 07c297c65d..1827de481d 100755
--- a/python/fedml/computing/scheduler/master/base_master_job_runner.py
+++ b/python/fedml/computing/scheduler/master/base_master_job_runner.py
@@ -136,6 +136,10 @@ def run_impl(
 
         logging.info("Detect all status of Edge ids: " + str(edge_ids))
 
+        self.status_reporter.report_server_id_status(
+            self.run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_STARTING, edge_id=self.edge_id,
+            server_id=self.edge_id, server_agent_id=self.edge_id)
+
         status_ok, active_edge_info_dict, inactivate_edges = self.detect_edges_status(
             edge_device_info_queue, edge_device_info_global_queue=edge_device_info_global_queue,
             callback_when_edges_ready=self.send_training_request_to_edges)
diff --git a/python/fedml/computing/scheduler/master/base_master_job_runner_manager.py b/python/fedml/computing/scheduler/master/base_master_job_runner_manager.py
index 694fab5f5f..b1066910c1 100755
--- a/python/fedml/computing/scheduler/master/base_master_job_runner_manager.py
+++ b/python/fedml/computing/scheduler/master/base_master_job_runner_manager.py
@@ -36,6 +36,19 @@ def start_job_runner(
             status_center_queue=status_center_queue
         )
 
+    def stop_job_runner(
+            self, run_id, args=None, edge_id=None, request_json=None,
+            run_as_cloud_agent=False
+    ):
+        super().stop_job_runner(run_id)
+
+        if run_as_cloud_agent:
+            cloud_server_mgr = FedMLCloudServerManager(
+                args, run_id=run_id, edge_id=edge_id, request_json=request_json,
+                agent_config=args.agent_config
+            )
+            cloud_server_mgr.stop_cloud_server()
+
     def _start_cloud_server(
             self, args, run_id, request_json, edge_id=None,
             use_local_process_as_cloud_server=False
diff --git a/python/fedml/computing/scheduler/master/base_master_protocol_manager.py b/python/fedml/computing/scheduler/master/base_master_protocol_manager.py
index ef59431ee8..b8479ebc03 100755
--- a/python/fedml/computing/scheduler/master/base_master_protocol_manager.py
+++ b/python/fedml/computing/scheduler/master/base_master_protocol_manager.py
@@ -44,7 +44,7 @@ def __init__(self, args, agent_config=None):
         self.run_as_cloud_server = False
         self.run_as_edge_server_and_agent = False
         self.run_as_cloud_server_and_agent = False
-        self.enable_simulation_cloud_agent = True
+        self.enable_simulation_cloud_agent = False
         self.use_local_process_as_cloud_server = False
         self.ota_upgrade = FedMLOtaUpgrade(edge_id=args.edge_id)
         self.running_request_json = dict()
@@ -140,12 +140,6 @@ def callback_start_train(self, topic=None, payload=None):
         except Exception:
             pass
 
-        # Parse the message when running in the cloud server mode.
-        if self.run_as_cloud_server:
-            message_bytes = payload.encode("ascii")
-            base64_bytes = base64.b64decode(message_bytes)
-            payload = base64_bytes.decode("ascii")
-
         # Parse the parameters
         # [NOTES] Example Request JSON:
         # https://fedml-inc.larksuite.com/wiki/ScnIwUif9iupbjkYS0LuBrd6sod#WjbEdhYrvogmlGxKTOGu98C6sSb
@@ -264,6 +258,9 @@ def callback_stop_train(self, topic, payload, use_payload=None):
         run_id = request_json.get("runId", None)
         run_id = request_json.get("id", None) if run_id is None else run_id
         run_id_str = str(run_id)
+        server_id = request_json.get("serverId", None)
+        if server_id is None:
+            server_id = request_json.get("server_id", None)
 
         # Broadcast the job status to all edges
         self.rebuild_status_center(self.get_status_queue())
@@ -273,8 +270,14 @@ def callback_stop_train(self, topic, payload, use_payload=None):
         if self.running_request_json.get(run_id_str, None) is not None:
             self.running_request_json.pop(run_id_str)
 
+        # Send the stopping request to edges
+        if self.run_as_cloud_agent:
+            self.send_training_stop_request_to_cloud_server(server_id, payload)
+
         # Stop the job runner
-        self._get_job_runner_manager().stop_job_runner(run_id)
+        self._get_job_runner_manager().stop_job_runner(
+            run_id, args=self.args, edge_id=self.edge_id, request_json=request_json,
+            run_as_cloud_agent=self.run_as_cloud_agent)
 
     def callback_run_logs(self, topic, payload):
         run_id = str(topic).split('/')[-1]
@@ -498,6 +501,11 @@ def send_training_stop_request_to_specific_edge(self, edge_id, payload):
         logging.info("stop_train: send topic " + topic_stop_train)
         self.message_center.send_message(topic_stop_train, payload)
 
+    def send_training_stop_request_to_cloud_server(self, edge_id, payload):
+        topic_stop_train = "mlops/flserver_agent_" + str(edge_id) + "/stop_train"
+        logging.info("stop_train: send topic " + topic_stop_train)
+        self.message_center.send_message(topic_stop_train, payload)
+
     def send_status_check_msg(self, run_id, edge_id, server_id, context=None):
         topic_status_check = f"server/client/request_device_info/{edge_id}"
         payload = {"server_id": server_id, "run_id": run_id}
diff --git a/python/fedml/computing/scheduler/master/cloud_server_manager.py b/python/fedml/computing/scheduler/master/cloud_server_manager.py
index ed39707034..342d785afe 100755
--- a/python/fedml/computing/scheduler/master/cloud_server_manager.py
+++ b/python/fedml/computing/scheduler/master/cloud_server_manager.py
@@ -3,6 +3,8 @@
 import logging
 import os
 import traceback
+
+import fedml
 from fedml.computing.scheduler.comm_utils.sys_utils import get_python_program
 
 
@@ -18,10 +20,12 @@ def __init__(self, args, run_id=None, edge_id=None, request_json=None, agent_con
         self.edge_id = edge_id
         self.request_json = request_json
         self.agent_config = agent_config
+        if version is None:
+            version = fedml.get_env_version()
         self.version = version
         image_version = self.version
         if image_version == "local":
-            image_version = "dev"
+            image_version = "test"
         self.server_docker_base_image = "/fedml-device-image:" + image_version
         self.cloud_server_name = None
 
diff --git a/python/fedml/computing/scheduler/master/server_login.py b/python/fedml/computing/scheduler/master/server_login.py
index 3d8d1f6fc9..8dd0696bc8 100755
--- a/python/fedml/computing/scheduler/master/server_login.py
+++ b/python/fedml/computing/scheduler/master/server_login.py
@@ -39,6 +39,6 @@ def logout():
     master_agent = FedMLLaunchMasterAgent()
     if args.type == 'login':
         master_agent.login(args.api_key, api_key=args.api_key, device_id=args.device_id,
-                           os_name=args.os_name, role=args.role)
+                           os_name=args.os_name, role=args.role, runner_cmd=args.runner_cmd)
     else:
         master_agent.logout()
diff --git a/python/fedml/computing/scheduler/scheduler_core/account_manager.py b/python/fedml/computing/scheduler/scheduler_core/account_manager.py
index da04fc3989..4b4cc9fd31 100755
--- a/python/fedml/computing/scheduler/scheduler_core/account_manager.py
+++ b/python/fedml/computing/scheduler/scheduler_core/account_manager.py
@@ -48,10 +48,10 @@ def __init__(self):
     def get_instance():
         return FedMLAccountManager()
 
-    def login(self, user_id, api_key="", device_id=None, os_name=None, role=None):
+    def login(self, user_id, api_key="", device_id=None, os_name=None, role=None, runner_cmd=None):
         # Build the agent args
         self.build_agent_args(
-            user_id, api_key=api_key, device_id=device_id, os_name=os_name, role=role
+            user_id, api_key=api_key, device_id=device_id, os_name=os_name, role=role, runner_cmd=runner_cmd
         )
 
         # Fetch configs from the MLOps config server.
@@ -126,7 +126,7 @@ def login(self, user_id, api_key="", device_id=None, os_name=None, role=None):
 
         return self.agent_args
 
-    def build_agent_args(self, user_id, api_key=None, device_id=None, os_name=None, role=None):
+    def build_agent_args(self, user_id, api_key=None, device_id=None, os_name=None, role=None, runner_cmd=None):
         # Generate the suffix for device based on the role
         device_id_suffix = None
         is_master = False
@@ -197,6 +197,7 @@ def build_agent_args(self, user_id, api_key=None, device_id=None, os_name=None,
         # Set the unique device id
         self.agent_args.is_from_docker = is_from_docker or is_from_fedml_docker_hub
         self.agent_args.unique_device_id = unique_device_id
+        self.agent_args.runner_cmd = runner_cmd
 
     def fill_argent_args(
             self, log_server_url=None, server_id=None, edge_id=None,
@@ -440,6 +441,7 @@ def __init__(self, role=None, account_id=None, api_key=None, server_id=None, cur
         self.using_mlops = True
         self.server_agent_id = None
         self.general_edge_id = None
+        self.runner_cmd = None
 
     def is_cloud_server(self):
         return self.role == FedMLAccountManager.ROLE_CLOUD_SERVER

From 75cd1781468de07756af9d194d4b77b8eba2432b Mon Sep 17 00:00:00 2001
From: Alex <alex.gpt.llm@gmail.com>
Date: Wed, 15 May 2024 18:19:29 +0800
Subject: [PATCH 053/282] [CoreEngine] update the version.

---
 python/fedml/__init__.py | 2 +-
 python/setup.py          | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/python/fedml/__init__.py b/python/fedml/__init__.py
index 8044387b65..6b3ac3f61b 100644
--- a/python/fedml/__init__.py
+++ b/python/fedml/__init__.py
@@ -34,7 +34,7 @@
 _global_training_type = None
 _global_comm_backend = None
 
-__version__ = "0.8.30"
+__version__ = "0.8.31"
 
 
 # This is the deployment environment used for different roles (RD/PM/BD/Public Developers). Potential VALUE: local, dev, test, release
diff --git a/python/setup.py b/python/setup.py
index cce0ddb2ca..fa425c98f7 100644
--- a/python/setup.py
+++ b/python/setup.py
@@ -120,7 +120,7 @@ def finalize_options(self):
 
 setup(
     name="fedml",
-    version="0.8.30",
+    version="0.8.31",
     author="FedML Team",
     author_email="ch@fedml.ai",
     description="A research and production integrated edge-cloud library for "

From 90dc19bb957fb0b175d5e1f75e1bc3e91c18b700 Mon Sep 17 00:00:00 2001
From: Alex <alex.gpt.llm@gmail.com>
Date: Wed, 15 May 2024 19:32:36 +0800
Subject: [PATCH 054/282] [CoreEngine] stop the run in the cloud agent.

---
 .../master/base_master_job_runner_manager.py  | 10 ++--
 .../master/base_master_protocol_manager.py    | 25 +++++++--
 .../scheduler/master/cloud_server_manager.py  | 53 +++++++++++--------
 .../scheduler_core/general_constants.py       |  5 ++
 .../status_manager_protocols.py               |  3 ++
 5 files changed, 64 insertions(+), 32 deletions(-)

diff --git a/python/fedml/computing/scheduler/master/base_master_job_runner_manager.py b/python/fedml/computing/scheduler/master/base_master_job_runner_manager.py
index b1066910c1..f4735227bc 100755
--- a/python/fedml/computing/scheduler/master/base_master_job_runner_manager.py
+++ b/python/fedml/computing/scheduler/master/base_master_job_runner_manager.py
@@ -37,17 +37,15 @@ def start_job_runner(
         )
 
     def stop_job_runner(
-            self, run_id, args=None, edge_id=None, request_json=None,
+            self, run_id, args=None, server_id=None, request_json=None,
             run_as_cloud_agent=False
     ):
         super().stop_job_runner(run_id)
 
         if run_as_cloud_agent:
-            cloud_server_mgr = FedMLCloudServerManager(
-                args, run_id=run_id, edge_id=edge_id, request_json=request_json,
-                agent_config=args.agent_config
-            )
-            cloud_server_mgr.stop_cloud_server()
+            stopping_process = Process(
+                target=FedMLCloudServerManager.stop_cloud_server, args=(run_id, server_id, args.agent_config))
+            stopping_process.start()
 
     def _start_cloud_server(
             self, args, run_id, request_json, edge_id=None,
diff --git a/python/fedml/computing/scheduler/master/base_master_protocol_manager.py b/python/fedml/computing/scheduler/master/base_master_protocol_manager.py
index b8479ebc03..0873548a42 100755
--- a/python/fedml/computing/scheduler/master/base_master_protocol_manager.py
+++ b/python/fedml/computing/scheduler/master/base_master_protocol_manager.py
@@ -33,6 +33,7 @@ def __init__(self, args, agent_config=None):
         self.agent_config = agent_config
         self.topic_start_train = None
         self.topic_stop_train = None
+        self.topic_exit_train = None
         self.topic_report_status = None
         self.topic_ota_msg = None
         self.topic_response_device_info = None
@@ -61,6 +62,9 @@ def generate_topics(self):
         # The topi for stopping training
         self.topic_stop_train = "mlops/flserver_agent_" + str(self.edge_id) + "/stop_train"
 
+        # The topi for exiting training
+        self.topic_exit_train = GeneralConstants.get_topic_exit_train(self.edge_id)
+
         # The topic for reporting current device status.
         self.topic_report_status = "mlops/report_device_status"
 
@@ -89,6 +93,7 @@ def generate_topics(self):
         self.subscribed_topics.clear()
         self.add_subscribe_topic(self.topic_start_train)
         self.add_subscribe_topic(self.topic_stop_train)
+        self.add_subscribe_topic(self.topic_exit_train)
         self.add_subscribe_topic(self.topic_report_status)
         self.add_subscribe_topic(self.topic_ota_msg)
         self.add_subscribe_topic(self.topic_response_device_info)
@@ -103,6 +108,7 @@ def add_protocol_handler(self):
         # Add the message listeners for all topics
         self.add_message_listener(self.topic_start_train, self.callback_start_train)
         self.add_message_listener(self.topic_stop_train, self.callback_stop_train)
+        self.add_message_listener(self.topic_exit_train, self.callback_exit_train)
         self.add_message_listener(self.topic_ota_msg, FedMLBaseMasterProtocolManager.callback_server_ota_msg)
         self.add_message_listener(self.topic_report_status, self.callback_report_current_status)
         self.add_message_listener(self.topic_response_device_info, self.callback_response_device_info)
@@ -270,13 +276,24 @@ def callback_stop_train(self, topic, payload, use_payload=None):
         if self.running_request_json.get(run_id_str, None) is not None:
             self.running_request_json.pop(run_id_str)
 
-        # Send the stopping request to edges
-        if self.run_as_cloud_agent:
-            self.send_training_stop_request_to_cloud_server(server_id, payload)
+        # Stop the job runner
+        self._get_job_runner_manager().stop_job_runner(
+            run_id, args=self.args, server_id=server_id, request_json=request_json,
+            run_as_cloud_agent=self.run_as_cloud_agent)
+
+    def callback_exit_train(self, topic, payload):
+        # Parse the parameters.
+        request_json = json.loads(payload)
+        run_id = request_json.get("runId", None)
+        run_id = request_json.get("id", None) if run_id is None else run_id
+        run_id_str = str(run_id)
+        server_id = request_json.get("serverId", None)
+        if server_id is None:
+            server_id = request_json.get("server_id", None)
 
         # Stop the job runner
         self._get_job_runner_manager().stop_job_runner(
-            run_id, args=self.args, edge_id=self.edge_id, request_json=request_json,
+            run_id, args=self.args, server_id=server_id, request_json=request_json,
             run_as_cloud_agent=self.run_as_cloud_agent)
 
     def callback_run_logs(self, topic, payload):
diff --git a/python/fedml/computing/scheduler/master/cloud_server_manager.py b/python/fedml/computing/scheduler/master/cloud_server_manager.py
index 342d785afe..040a0f38a3 100755
--- a/python/fedml/computing/scheduler/master/cloud_server_manager.py
+++ b/python/fedml/computing/scheduler/master/cloud_server_manager.py
@@ -12,6 +12,7 @@ class FedMLCloudServerManager:
     FEDML_CLOUD_SERVER_PREFIX = "fedml-server-run-"
     LOCAL_RUNNER_INFO_DIR_NAME = 'runner_infos'
     STATUS_IDLE = "IDLE"
+    FEDML_SERVER_BASE_IMAGE = "/fedml-device-image:"
 
     def __init__(self, args, run_id=None, edge_id=None, request_json=None, agent_config=None, version=None):
         self.server_docker_image = None
@@ -26,7 +27,7 @@ def __init__(self, args, run_id=None, edge_id=None, request_json=None, agent_con
         image_version = self.version
         if image_version == "local":
             image_version = "test"
-        self.server_docker_base_image = "/fedml-device-image:" + image_version
+        self.server_docker_base_image = FedMLCloudServerManager._get_server_base_image(image_version)
         self.cloud_server_name = None
 
     @staticmethod
@@ -125,44 +126,52 @@ def start_cloud_server(self, packages_config):
         logging.info("start run with k8s: " + run_deployment_cmd)
         os.system(run_deployment_cmd)
 
-    def stop_cloud_server(self):
-        self.cloud_server_name = FedMLCloudServerManager.FEDML_CLOUD_SERVER_PREFIX + str(self.run_id) \
-                                 + "-" + str(self.edge_id)
-        self.server_docker_image = (
-                self.agent_config["docker_config"]["registry_server"]
-                + self.agent_config["docker_config"]["registry_dir"]
-                + self.server_docker_base_image
+    @staticmethod
+    def stop_cloud_server(run_id, server_id, agent_config):
+        cloud_server_name = FedMLCloudServerManager._get_cloud_server_name(run_id, server_id)
+        server_docker_image = (
+                agent_config["docker_config"]["registry_server"]
+                + agent_config["docker_config"]["registry_dir"]
+                + FedMLCloudServerManager._get_server_base_image(fedml.get_env_version())
         )
         delete_deployment_cmd = (
                 "export FEDML_AGGREGATOR_NAME="
-                + self.cloud_server_name
+                + cloud_server_name
                 + ";export FEDML_AGGREGATOR_SVC="
-                + self.cloud_server_name
+                + cloud_server_name
                 + ";export FEDML_AGGREGATOR_VERSION="
-                + self.version
+                + fedml.get_env_version()
                 + ';export FEDML_AGGREGATOR_IMAGE_PATH="'
-                + self.server_docker_image
+                + server_docker_image
                 + '"'
                 + ";export FEDML_CONF_ID="
-                + self.cloud_server_name
+                + cloud_server_name
                 + ";export FEDML_DATA_PV_ID="
-                + self.cloud_server_name
+                + cloud_server_name
                 + ";export FEDML_DATA_PVC_ID="
-                + self.cloud_server_name
+                + cloud_server_name
                 + ";export FEDML_REGISTRY_SECRET_SUFFIX="
-                + self.cloud_server_name
+                + cloud_server_name
                 + ";kubectl -n fedml-devops-aggregator-"
-                + self.version
+                + fedml.get_env_version()
                 + " delete deployment "
-                + self.cloud_server_name
+                + cloud_server_name
                 + ";kubectl -n fedml-devops-aggregator-"
-                + self.version
+                + fedml.get_env_version()
                 + " delete svc "
-                + self.cloud_server_name
+                + cloud_server_name
                 + ";kubectl -n fedml-devops-aggregator-"
-                + self.version
+                + fedml.get_env_version()
                 + " delete secret secret-"
-                + self.cloud_server_name
+                + cloud_server_name
         )
         logging.info("stop run with k8s: " + delete_deployment_cmd)
         os.system(delete_deployment_cmd)
+
+    @staticmethod
+    def _get_server_base_image(version):
+        return f"{FedMLCloudServerManager.FEDML_SERVER_BASE_IMAGE}{version}"
+
+    @staticmethod
+    def _get_cloud_server_name(run_id, server_id):
+        return f"{FedMLCloudServerManager.FEDML_CLOUD_SERVER_PREFIX}{run_id}-{server_id}"
diff --git a/python/fedml/computing/scheduler/scheduler_core/general_constants.py b/python/fedml/computing/scheduler/scheduler_core/general_constants.py
index 0cc6044d4b..347f157333 100755
--- a/python/fedml/computing/scheduler/scheduler_core/general_constants.py
+++ b/python/fedml/computing/scheduler/scheduler_core/general_constants.py
@@ -206,3 +206,8 @@ def get_ip_address(request_json, infer_host=None):
             ip = infer_host
 
         return ip
+
+    @staticmethod
+    def get_topic_exit_train(server_id):
+        topic_exit_train = f"status_center/flserver_agent_{server_id}/exit_train"
+        return topic_exit_train
diff --git a/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py b/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py
index 01caf7db67..921632472b 100755
--- a/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py
+++ b/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py
@@ -67,6 +67,9 @@ def process_job_completed_status(self, master_id, status):
         # self.stop_cloud_server()
         # self.remove_listener_for_run_metrics(self.run_id)
         # self.remove_listener_for_run_logs(self.run_id)
+        payload_exit_train = {"runId": self.run_id, "serverId": master_id}
+        self.message_center.receive_message(
+            GeneralConstants.get_topic_exit_train(master_id), json.dumps(payload_exit_train))
 
         if self.status_center.is_deployment_status_center and status == ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED:
             self.report_deployment_status(self.run_id, GeneralConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED)

From 7064783779837b7bd73d68fcdc4d7ac269b4586f Mon Sep 17 00:00:00 2001
From: Alex <alex.gpt.llm@gmail.com>
Date: Wed, 15 May 2024 20:17:11 +0800
Subject: [PATCH 055/282] [CoreEngine] stop the run in the cloud agent.

---
 .../scheduler/master/base_master_job_runner_manager.py        | 4 ++--
 .../scheduler/master/base_master_protocol_manager.py          | 2 +-
 2 files changed, 3 insertions(+), 3 deletions(-)

diff --git a/python/fedml/computing/scheduler/master/base_master_job_runner_manager.py b/python/fedml/computing/scheduler/master/base_master_job_runner_manager.py
index f4735227bc..2b6c4d3b5a 100755
--- a/python/fedml/computing/scheduler/master/base_master_job_runner_manager.py
+++ b/python/fedml/computing/scheduler/master/base_master_job_runner_manager.py
@@ -38,11 +38,11 @@ def start_job_runner(
 
     def stop_job_runner(
             self, run_id, args=None, server_id=None, request_json=None,
-            run_as_cloud_agent=False
+            run_as_cloud_agent=False, run_as_cloud_server=False
     ):
         super().stop_job_runner(run_id)
 
-        if run_as_cloud_agent:
+        if run_as_cloud_agent or run_as_cloud_server:
             stopping_process = Process(
                 target=FedMLCloudServerManager.stop_cloud_server, args=(run_id, server_id, args.agent_config))
             stopping_process.start()
diff --git a/python/fedml/computing/scheduler/master/base_master_protocol_manager.py b/python/fedml/computing/scheduler/master/base_master_protocol_manager.py
index 0873548a42..88b25f33ab 100755
--- a/python/fedml/computing/scheduler/master/base_master_protocol_manager.py
+++ b/python/fedml/computing/scheduler/master/base_master_protocol_manager.py
@@ -294,7 +294,7 @@ def callback_exit_train(self, topic, payload):
         # Stop the job runner
         self._get_job_runner_manager().stop_job_runner(
             run_id, args=self.args, server_id=server_id, request_json=request_json,
-            run_as_cloud_agent=self.run_as_cloud_agent)
+            run_as_cloud_agent=self.run_as_cloud_agent, run_as_cloud_server=self.run_as_cloud_server)
 
     def callback_run_logs(self, topic, payload):
         run_id = str(topic).split('/')[-1]

From 162f7598cdd84a19d3c6d230f27985b229aaaa7b Mon Sep 17 00:00:00 2001
From: Alex <alex.gpt.llm@gmail.com>
Date: Wed, 15 May 2024 20:57:47 +0800
Subject: [PATCH 056/282] [CoreEngine] make the cloud server id work.

---
 .../computing/scheduler/scheduler_core/account_manager.py     | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/python/fedml/computing/scheduler/scheduler_core/account_manager.py b/python/fedml/computing/scheduler/scheduler_core/account_manager.py
index 4b4cc9fd31..3491e102f6 100755
--- a/python/fedml/computing/scheduler/scheduler_core/account_manager.py
+++ b/python/fedml/computing/scheduler/scheduler_core/account_manager.py
@@ -138,7 +138,7 @@ def build_agent_args(self, user_id, api_key=None, device_id=None, os_name=None,
             device_id_suffix = FedMLAccountManager.DEVICE_ID_SUFFIX_CLOUD_AGENT
             is_master = True
         elif role == FedMLAccountManager.ROLE_CLOUD_SERVER:
-            device_id_suffix = FedMLAccountManager.DEVICE_ID_SUFFIX_CLOUD_SERVER
+            device_id_suffix = ""
             is_master = True
         elif role == FedMLAccountManager.ROLE_EDGE_DEVICE:
             device_id_suffix = FedMLAccountManager.DEVICE_ID_SUFFIX_EDGE_DEVICE
@@ -193,6 +193,8 @@ def build_agent_args(self, user_id, api_key=None, device_id=None, os_name=None,
         docker_tag = FedMLAccountManager.DEVICE_ID_DOCKER_HUB_TAG if is_from_fedml_docker_hub else docker_tag
         unique_device_id = f"{self.agent_args.current_device_id}@{self.agent_args.os_name}" \
                            f"{docker_tag}{device_id_suffix}"
+        if role == FedMLAccountManager.ROLE_CLOUD_SERVER:
+            unique_device_id = self.agent_args.current_device_id
 
         # Set the unique device id
         self.agent_args.is_from_docker = is_from_docker or is_from_fedml_docker_hub

From 496732af205ca526da3f6c5fbf0c3d9ca7e77fa3 Mon Sep 17 00:00:00 2001
From: Alex <alex.gpt.llm@gmail.com>
Date: Thu, 16 May 2024 02:47:13 +0800
Subject: [PATCH 057/282] [CoreEngine] make the deployment work.

---
 .../master/base_master_job_runner_manager.py          | 11 +++++++++++
 .../scheduler/master/base_master_protocol_manager.py  |  2 +-
 .../scheduler/model_scheduler/master_job_runner.py    |  3 +--
 .../model_scheduler/master_protocol_manager.py        |  5 ++++-
 4 files changed, 17 insertions(+), 4 deletions(-)

diff --git a/python/fedml/computing/scheduler/master/base_master_job_runner_manager.py b/python/fedml/computing/scheduler/master/base_master_job_runner_manager.py
index 2b6c4d3b5a..6831c9d034 100755
--- a/python/fedml/computing/scheduler/master/base_master_job_runner_manager.py
+++ b/python/fedml/computing/scheduler/master/base_master_job_runner_manager.py
@@ -47,6 +47,17 @@ def stop_job_runner(
                 target=FedMLCloudServerManager.stop_cloud_server, args=(run_id, server_id, args.agent_config))
             stopping_process.start()
 
+    def complete_job_runner(
+            self, run_id, args=None, server_id=None, request_json=None,
+            run_as_cloud_agent=False, run_as_cloud_server=False
+    ):
+        super().complete_job_runner(run_id)
+
+        if run_as_cloud_agent or run_as_cloud_server:
+            stopping_process = Process(
+                target=FedMLCloudServerManager.stop_cloud_server, args=(run_id, server_id, args.agent_config))
+            stopping_process.start()
+
     def _start_cloud_server(
             self, args, run_id, request_json, edge_id=None,
             use_local_process_as_cloud_server=False
diff --git a/python/fedml/computing/scheduler/master/base_master_protocol_manager.py b/python/fedml/computing/scheduler/master/base_master_protocol_manager.py
index 88b25f33ab..53a0aee151 100755
--- a/python/fedml/computing/scheduler/master/base_master_protocol_manager.py
+++ b/python/fedml/computing/scheduler/master/base_master_protocol_manager.py
@@ -292,7 +292,7 @@ def callback_exit_train(self, topic, payload):
             server_id = request_json.get("server_id", None)
 
         # Stop the job runner
-        self._get_job_runner_manager().stop_job_runner(
+        self._get_job_runner_manager().complete_job_runner(
             run_id, args=self.args, server_id=server_id, request_json=request_json,
             run_as_cloud_agent=self.run_as_cloud_agent, run_as_cloud_server=self.run_as_cloud_server)
 
diff --git a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
index 8ce9d0e102..6149e60939 100755
--- a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
+++ b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
@@ -361,8 +361,7 @@ def process_deployment_result_message(self, topic=None, payload=None):
         # Update the global deployment result mapping
         self.slave_deployment_results_map[str(device_id)] = model_status
 
-        logging.info("callback_deployment_result_message: topic {}, payload {}, result mapping {}.".format(
-            topic, payload, self.slave_deployment_results_map))
+        logging.info("callback_deployment_result_message: topic {}, payload {}.".format(topic, payload))
 
         request_json = self.request_json
         if request_json is None:
diff --git a/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py b/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py
index b4c5b41d74..e7cf150040 100755
--- a/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py
+++ b/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py
@@ -100,9 +100,12 @@ def print_connected_info(self):
         pass
 
     def callback_deployment_result_message(self, topic=None, payload=None):
-        logging.info(f"Received deployment result: {self}")
+        logging.info(f"Received deployment result")
         FedMLDeployJobRunnerManager.get_instance().save_deployment_result(topic, payload)
 
+    def callback_exit_train(self, topic, payload):
+        pass
+
     def callback_delete_deployment(self, topic, payload):
         logging.info("[Master] callback_delete_deployment")
         # Parse payload as the model message object.

From 48c0a4c96c5ab8c0ae08fc25391c0b57feb6a925 Mon Sep 17 00:00:00 2001
From: Alex <alex.gpt.llm@gmail.com>
Date: Thu, 16 May 2024 08:27:59 +0800
Subject: [PATCH 058/282] [CoreEngine] refactor the complete job callback.

---
 .../master/base_master_protocol_manager.py    | 14 +++++------
 .../master_protocol_manager.py                |  2 +-
 .../scheduler_core/general_constants.py       | 11 ++++++---
 .../status_manager_protocols.py               |  4 ++--
 .../scheduler_core/task_protocol_manager.py   | 24 -------------------
 5 files changed, 18 insertions(+), 37 deletions(-)
 delete mode 100755 python/fedml/computing/scheduler/scheduler_core/task_protocol_manager.py

diff --git a/python/fedml/computing/scheduler/master/base_master_protocol_manager.py b/python/fedml/computing/scheduler/master/base_master_protocol_manager.py
index 53a0aee151..f678389489 100755
--- a/python/fedml/computing/scheduler/master/base_master_protocol_manager.py
+++ b/python/fedml/computing/scheduler/master/base_master_protocol_manager.py
@@ -33,7 +33,7 @@ def __init__(self, args, agent_config=None):
         self.agent_config = agent_config
         self.topic_start_train = None
         self.topic_stop_train = None
-        self.topic_exit_train = None
+        self.topic_complete_job = None
         self.topic_report_status = None
         self.topic_ota_msg = None
         self.topic_response_device_info = None
@@ -62,8 +62,8 @@ def generate_topics(self):
         # The topi for stopping training
         self.topic_stop_train = "mlops/flserver_agent_" + str(self.edge_id) + "/stop_train"
 
-        # The topi for exiting training
-        self.topic_exit_train = GeneralConstants.get_topic_exit_train(self.edge_id)
+        # The topic for completing job
+        self.topic_complete_job = GeneralConstants.get_topic_complete_job(self.edge_id)
 
         # The topic for reporting current device status.
         self.topic_report_status = "mlops/report_device_status"
@@ -93,7 +93,7 @@ def generate_topics(self):
         self.subscribed_topics.clear()
         self.add_subscribe_topic(self.topic_start_train)
         self.add_subscribe_topic(self.topic_stop_train)
-        self.add_subscribe_topic(self.topic_exit_train)
+        self.add_subscribe_topic(self.topic_complete_job)
         self.add_subscribe_topic(self.topic_report_status)
         self.add_subscribe_topic(self.topic_ota_msg)
         self.add_subscribe_topic(self.topic_response_device_info)
@@ -108,7 +108,7 @@ def add_protocol_handler(self):
         # Add the message listeners for all topics
         self.add_message_listener(self.topic_start_train, self.callback_start_train)
         self.add_message_listener(self.topic_stop_train, self.callback_stop_train)
-        self.add_message_listener(self.topic_exit_train, self.callback_exit_train)
+        self.add_message_listener(self.topic_complete_job, self.callback_complete_job)
         self.add_message_listener(self.topic_ota_msg, FedMLBaseMasterProtocolManager.callback_server_ota_msg)
         self.add_message_listener(self.topic_report_status, self.callback_report_current_status)
         self.add_message_listener(self.topic_response_device_info, self.callback_response_device_info)
@@ -281,7 +281,7 @@ def callback_stop_train(self, topic, payload, use_payload=None):
             run_id, args=self.args, server_id=server_id, request_json=request_json,
             run_as_cloud_agent=self.run_as_cloud_agent)
 
-    def callback_exit_train(self, topic, payload):
+    def callback_complete_job(self, topic, payload):
         # Parse the parameters.
         request_json = json.loads(payload)
         run_id = request_json.get("runId", None)
@@ -291,7 +291,7 @@ def callback_exit_train(self, topic, payload):
         if server_id is None:
             server_id = request_json.get("server_id", None)
 
-        # Stop the job runner
+        # Complete the job runner
         self._get_job_runner_manager().complete_job_runner(
             run_id, args=self.args, server_id=server_id, request_json=request_json,
             run_as_cloud_agent=self.run_as_cloud_agent, run_as_cloud_server=self.run_as_cloud_server)
diff --git a/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py b/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py
index e7cf150040..8a578cb7d2 100755
--- a/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py
+++ b/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py
@@ -103,7 +103,7 @@ def callback_deployment_result_message(self, topic=None, payload=None):
         logging.info(f"Received deployment result")
         FedMLDeployJobRunnerManager.get_instance().save_deployment_result(topic, payload)
 
-    def callback_exit_train(self, topic, payload):
+    def callback_complete_job(self, topic, payload):
         pass
 
     def callback_delete_deployment(self, topic, payload):
diff --git a/python/fedml/computing/scheduler/scheduler_core/general_constants.py b/python/fedml/computing/scheduler/scheduler_core/general_constants.py
index 347f157333..68c1a8e09d 100755
--- a/python/fedml/computing/scheduler/scheduler_core/general_constants.py
+++ b/python/fedml/computing/scheduler/scheduler_core/general_constants.py
@@ -208,6 +208,11 @@ def get_ip_address(request_json, infer_host=None):
         return ip
 
     @staticmethod
-    def get_topic_exit_train(server_id):
-        topic_exit_train = f"status_center/flserver_agent_{server_id}/exit_train"
-        return topic_exit_train
+    def get_topic_complete_job(server_id):
+        topic_complete_job = f"status_center/master_agent_{server_id}/complete_job"
+        return topic_complete_job
+
+    @staticmethod
+    def get_payload_complete_job(run_id, server_id):
+        payload_complete_job = {"runId": run_id, "serverId": server_id}
+        return payload_complete_job
diff --git a/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py b/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py
index 921632472b..e5dd312c80 100755
--- a/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py
+++ b/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py
@@ -67,9 +67,9 @@ def process_job_completed_status(self, master_id, status):
         # self.stop_cloud_server()
         # self.remove_listener_for_run_metrics(self.run_id)
         # self.remove_listener_for_run_logs(self.run_id)
-        payload_exit_train = {"runId": self.run_id, "serverId": master_id}
         self.message_center.receive_message(
-            GeneralConstants.get_topic_exit_train(master_id), json.dumps(payload_exit_train))
+            GeneralConstants.get_topic_complete_job(master_id),
+            json.dumps(GeneralConstants.get_payload_complete_job(self.run_id, master_id)))
 
         if self.status_center.is_deployment_status_center and status == ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED:
             self.report_deployment_status(self.run_id, GeneralConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED)
diff --git a/python/fedml/computing/scheduler/scheduler_core/task_protocol_manager.py b/python/fedml/computing/scheduler/scheduler_core/task_protocol_manager.py
deleted file mode 100755
index ddf4bb9b6e..0000000000
--- a/python/fedml/computing/scheduler/scheduler_core/task_protocol_manager.py
+++ /dev/null
@@ -1,24 +0,0 @@
-
-
-class TaskProtocolManager(object):
-    def __init__(self):
-        pass
-
-    def log_metrics(self):
-        # Build the message for logging metrics
-
-        # Send the message to MQTT server
-
-        pass
-
-    def log_model(self):
-        pass
-
-    def log_artifacts_log(self):
-        pass
-
-    def log_artifacts(self):
-        pass
-
-
-

From 525479399feae51cbfc9bdf66ce20e843694f10e Mon Sep 17 00:00:00 2001
From: Alex <alex.gpt.llm@gmail.com>
Date: Thu, 16 May 2024 08:41:33 +0800
Subject: [PATCH 059/282] [CoreEngine] refactor the complete job callback.

---
 .../scheduler/master/base_master_protocol_manager.py      | 8 ++++----
 .../computing/scheduler/master/master_protocol_manager.py | 7 +++++++
 .../scheduler/model_scheduler/master_protocol_manager.py  | 3 ---
 3 files changed, 11 insertions(+), 7 deletions(-)

diff --git a/python/fedml/computing/scheduler/master/base_master_protocol_manager.py b/python/fedml/computing/scheduler/master/base_master_protocol_manager.py
index f678389489..67507182a9 100755
--- a/python/fedml/computing/scheduler/master/base_master_protocol_manager.py
+++ b/python/fedml/computing/scheduler/master/base_master_protocol_manager.py
@@ -291,10 +291,10 @@ def callback_complete_job(self, topic, payload):
         if server_id is None:
             server_id = request_json.get("server_id", None)
 
-        # Complete the job runner
-        self._get_job_runner_manager().complete_job_runner(
-            run_id, args=self.args, server_id=server_id, request_json=request_json,
-            run_as_cloud_agent=self.run_as_cloud_agent, run_as_cloud_server=self.run_as_cloud_server)
+        self._process_job_complete_status(run_id, server_id, request_json)
+
+    def _process_job_complete_status(self, run_id, server_id, complete_payload):
+        pass
 
     def callback_run_logs(self, topic, payload):
         run_id = str(topic).split('/')[-1]
diff --git a/python/fedml/computing/scheduler/master/master_protocol_manager.py b/python/fedml/computing/scheduler/master/master_protocol_manager.py
index 5eef5914e7..ca9621e41d 100755
--- a/python/fedml/computing/scheduler/master/master_protocol_manager.py
+++ b/python/fedml/computing/scheduler/master/master_protocol_manager.py
@@ -34,3 +34,10 @@ def _init_extra_items(self):
     # Override
     def print_connected_info(self):
         super().print_connected_info()
+
+    # Override
+    def _process_job_complete_status(self, run_id, server_id, complete_payload):
+        # Complete the job runner
+        self._get_job_runner_manager().complete_job_runner(
+            run_id, args=self.args, server_id=server_id, request_json=complete_payload,
+            run_as_cloud_agent=self.run_as_cloud_agent, run_as_cloud_server=self.run_as_cloud_server)
diff --git a/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py b/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py
index 8a578cb7d2..01165ff82e 100755
--- a/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py
+++ b/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py
@@ -103,9 +103,6 @@ def callback_deployment_result_message(self, topic=None, payload=None):
         logging.info(f"Received deployment result")
         FedMLDeployJobRunnerManager.get_instance().save_deployment_result(topic, payload)
 
-    def callback_complete_job(self, topic, payload):
-        pass
-
     def callback_delete_deployment(self, topic, payload):
         logging.info("[Master] callback_delete_deployment")
         # Parse payload as the model message object.

From 86852c7cd1ff65c3540e07d0f965c84382ebd294 Mon Sep 17 00:00:00 2001
From: Alex <alex.gpt.llm@gmail.com>
Date: Thu, 16 May 2024 09:16:27 +0800
Subject: [PATCH 060/282] [CoreEngine] fixed the monitor issue.

---
 python/fedml/computing/scheduler/comm_utils/job_monitor.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/python/fedml/computing/scheduler/comm_utils/job_monitor.py b/python/fedml/computing/scheduler/comm_utils/job_monitor.py
index 9bee76e780..8ae6e1c744 100644
--- a/python/fedml/computing/scheduler/comm_utils/job_monitor.py
+++ b/python/fedml/computing/scheduler/comm_utils/job_monitor.py
@@ -223,7 +223,7 @@ def monitor_replicas_number():
             curr_version = fedml.get_env_version()
             num_replica_url_path = "fedmlModelServer/api/v1/endpoint/replica-info"
             mlops_prefix = fedml._get_backend_service()
-            url = f"{mlops_prefix}{num_replica_url_path}"
+            url = f"{mlops_prefix}/{num_replica_url_path}"
 
             cached_token = FedMLModelCache.get_instance().get_end_point_token_with_eid(endpoint_id)
             if cached_token is None:

From 349493cefb4cf64605fec2c348874435339a1f30 Mon Sep 17 00:00:00 2001
From: Alex <alex.gpt.llm@gmail.com>
Date: Thu, 16 May 2024 09:45:10 +0800
Subject: [PATCH 061/282] [CoreEngine] update the serve example.

---
 python/examples/launch/serve_mnist/fedml_model_config.yaml | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/python/examples/launch/serve_mnist/fedml_model_config.yaml b/python/examples/launch/serve_mnist/fedml_model_config.yaml
index 48254ccca4..f212dbb81d 100644
--- a/python/examples/launch/serve_mnist/fedml_model_config.yaml
+++ b/python/examples/launch/serve_mnist/fedml_model_config.yaml
@@ -1,6 +1,8 @@
 workspace: "./"
 entry_point: "mnist_serve_main.py"
 
+auto_detect_public_ip: true
+
 data_cache_dir: ""
 bootstrap: ""
 

From ba453d1b9f0259ca68cb640f97790e19ff1b3ebe Mon Sep 17 00:00:00 2001
From: Alex <alex.gpt.llm@gmail.com>
Date: Thu, 16 May 2024 10:01:35 +0800
Subject: [PATCH 062/282] [CoreEngine] update the model master runner.

---
 .../model_scheduler/device_model_inference.py | 56 +++++++++----------
 .../model_scheduler/master_job_runner.py      | 16 ++----
 2 files changed, 33 insertions(+), 39 deletions(-)

diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py b/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py
index b8d85edd31..eb3088f327 100755
--- a/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py
@@ -26,35 +26,35 @@
     pass
 
 
-# class Settings(BaseSettings):
-#     redis_addr: str
-#     redis_port: str
-#     redis_password: str
-#     end_point_name: str
-#     model_name: str
-#     model_version: str
-#     model_infer_url: str
-#     version: str
-#     use_mqtt_inference: bool
-#     use_worker_gateway: bool
-#     ext_info: str
+class Settings(BaseSettings):
+    redis_addr: str
+    redis_port: str
+    redis_password: str
+    end_point_name: str
+    model_name: str
+    model_version: str
+    model_infer_url: str
+    version: str
+    use_mqtt_inference: bool
+    use_worker_gateway: bool
+    ext_info: str
+
+
+settings = Settings()
+
+# class settings:
+#     redis_addr = "127.0.0.1"
+#     redis_port = 6379
+#     redis_password = "fedml_default"
+#     end_point_name = ""
+#     model_name = ""
+#     model_version = ""
+#     model_infer_url = "127.0.0.1"
+#     version = "dev"
+#     use_mqtt_inference = False
+#     use_worker_gateway = False
+#     ext_info = "2b34303961245c4f175f2236282d7a272c040b0904747579087f6a760112030109010c215d54505707140005190a051c347f365c4a430c020a7d39120e26032a78730f797f7c031f0901657e75"
 #
-#
-# settings = Settings()
-
-class settings:
-    redis_addr = "127.0.0.1"
-    redis_port = 6379
-    redis_password = "fedml_default"
-    end_point_name = ""
-    model_name = ""
-    model_version = ""
-    model_infer_url = "127.0.0.1"
-    version = "dev"
-    use_mqtt_inference = False
-    use_worker_gateway = False
-    ext_info = "2b34303961245c4f175f2236282d7a272c040b0904747579087f6a760112030109010c215d54505707140005190a051c347f365c4a430c020a7d39120e26032a78730f797f7c031f0901657e75"
-
 
 api = FastAPI()
 
diff --git a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
index 6149e60939..cecb9de7bc 100755
--- a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
+++ b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
@@ -115,7 +115,6 @@ def run_impl(
 
         # start unified inference server
         self.start_device_inference_gateway(
-            run_id, end_point_name, model_id, model_name, model_version,
             agent_config=self.agent_config, inference_port=inference_port)
 
         # start inference monitor server
@@ -464,20 +463,16 @@ def process_deployment_result_message(self, topic=None, payload=None):
 
     @staticmethod
     def start_device_inference_gateway(
-            run_id, end_point_name, model_id,
-            model_name, model_version, inference_port=ServerConstants.MODEL_INFERENCE_DEFAULT_PORT,
+            inference_port=ServerConstants.MODEL_INFERENCE_DEFAULT_PORT,
             agent_config=None, redis_addr="localhost", redis_port=6379, redis_password="fedml_default"
     ):
         # start unified inference server
-        running_model_name = ServerConstants.get_running_model_name(end_point_name,
-                                                                    model_name, model_version, run_id, model_id)
         python_program = get_python_program()
         master_port = os.getenv("FEDML_MASTER_PORT", None)
         if master_port is not None:
             inference_port = int(master_port)
         if not ServerConstants.is_running_on_k8s():
-            logging.info(f"start the model inference gateway, end point {run_id}, "
-                         f"model name {model_name} at port {inference_port}...")
+            logging.info(f"start the model inference gateway...")
             use_mqtt_inference = os.getenv("FEDML_USE_MQTT_INFERENCE", "False")
             use_mqtt_inference = True if use_mqtt_inference.lower() == 'true' else False
             use_worker_gateway = os.getenv("FEDML_USE_WORKER_GATEWAY", "False")
@@ -501,8 +496,8 @@ def start_device_inference_gateway(
                     "USE_MQTT_INFERENCE={} USE_WORKER_GATEWAY={} EXT_INFO={} "
                     "{} -m uvicorn {} --host 0.0.0.0 --port {} --reload --reload-delay 3 --reload-dir {} "
                     "--log-level critical".format(
-                        redis_addr, redis_port, redis_password, end_point_name,
-                        model_name, model_version, "", fedml.get_env_version(), use_mqtt_inference,
+                        redis_addr, str(redis_port), redis_password, "",
+                        "", "", "", fedml.get_env_version(), use_mqtt_inference,
                         use_worker_gateway, ext_info, python_program, inference_gw_cmd, str(inference_port),
                         fedml_base_dir),
                     should_capture_stdout=False, should_capture_stderr=False)
@@ -570,8 +565,7 @@ def recover_inference_and_monitor():
                     pass
 
                 FedMLDeployMasterJobRunner.start_device_inference_gateway(
-                    run_id, end_point_name, model_id, model_name, model_version, inference_port=inference_port,
-                    agent_config=agent_config)
+                    inference_port=inference_port, agent_config=agent_config)
 
                 FedMLDeployMasterJobRunner.stop_device_inference_monitor(
                     run_id, end_point_name, model_id, model_name, model_version)

From 14429adfd510b9b95c99bc4c7318c723accd2cb4 Mon Sep 17 00:00:00 2001
From: Alex <alex.gpt.llm@gmail.com>
Date: Thu, 16 May 2024 10:07:14 +0800
Subject: [PATCH 063/282] [CoreEngine] update the model master runner.

---
 .../model_scheduler/master_job_runner.py      | 19 +++++++++----------
 1 file changed, 9 insertions(+), 10 deletions(-)

diff --git a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
index cecb9de7bc..4d43d7c5c3 100755
--- a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
+++ b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
@@ -115,7 +115,7 @@ def run_impl(
 
         # start unified inference server
         self.start_device_inference_gateway(
-            agent_config=self.agent_config, inference_port=inference_port)
+            inference_port=inference_port, agent_config=self.agent_config)
 
         # start inference monitor server
         self.stop_device_inference_monitor(
@@ -540,6 +540,14 @@ def stop_device_inference_monitor(run_id, end_point_name, model_id, model_name,
     def recover_inference_and_monitor():
         # noinspection PyBroadException
         try:
+            agent_config = dict()
+            try:
+                agent_config["mqtt_config"], _, _, _ = MLOpsConfigs.fetch_all_configs()
+            except Exception as e:
+                pass
+
+            FedMLDeployMasterJobRunner.start_device_inference_gateway(agent_config=agent_config)
+
             history_jobs = FedMLServerDataInterface.get_instance().get_history_jobs()
             for job in history_jobs.job_list:
                 if job.running_json is None:
@@ -558,15 +566,6 @@ def recover_inference_and_monitor():
                 if not is_activated:
                     continue
 
-                agent_config = dict()
-                try:
-                    agent_config["mqtt_config"], _, _, _ = MLOpsConfigs.fetch_all_configs()
-                except Exception as e:
-                    pass
-
-                FedMLDeployMasterJobRunner.start_device_inference_gateway(
-                    inference_port=inference_port, agent_config=agent_config)
-
                 FedMLDeployMasterJobRunner.stop_device_inference_monitor(
                     run_id, end_point_name, model_id, model_name, model_version)
                 FedMLDeployMasterJobRunner.start_device_inference_monitor(

From aa1df9922a4f50d9e6b2c372919163eacaca73aa Mon Sep 17 00:00:00 2001
From: Alex <alex.gpt.llm@gmail.com>
Date: Thu, 16 May 2024 13:18:32 +0800
Subject: [PATCH 064/282] [CoreEngine] make the handshaking protocol work.

---
 .../slave/base_slave_protocol_manager.py      | 15 +++++
 .../scheduler/slave/slave_protocol_manager.py | 56 +------------------
 2 files changed, 16 insertions(+), 55 deletions(-)

diff --git a/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py b/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py
index 4ff931e6fd..1384e9906a 100755
--- a/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py
+++ b/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py
@@ -48,6 +48,7 @@ def __init__(self, args, agent_config=None):
         self.topic_report_status = None
         self.topic_ota_msg = None
         self.topic_request_device_info = None
+        self.topic_request_device_info_from_mlops = None
         self.topic_client_logout = None
         self.topic_response_job_status = None
         self.topic_report_device_status_in_job = None
@@ -87,6 +88,9 @@ def generate_topics(self):
         # The topic for requesting device info from the client.
         self.topic_request_device_info = "server/client/request_device_info/" + str(self.edge_id)
 
+        # The topic for requesting device info from mlops.
+        self.topic_request_device_info_from_mlops = f"deploy/mlops/slave_agent/request_device_info/{self.edge_id}"
+
         # The topic for requesting device info from MLOps.
         self.topic_client_logout = "mlops/client/logout/" + str(self.edge_id)
 
@@ -114,6 +118,7 @@ def generate_topics(self):
         self.add_subscribe_topic(self.topic_report_status)
         self.add_subscribe_topic(self.topic_ota_msg)
         self.add_subscribe_topic(self.topic_request_device_info)
+        self.add_subscribe_topic(self.topic_request_device_info_from_mlops)
         self.add_subscribe_topic(self.topic_client_logout)
         self.add_subscribe_topic(self.topic_response_job_status)
         self.add_subscribe_topic(self.topic_report_device_status_in_job)
@@ -132,6 +137,7 @@ def add_protocol_handler(self):
         self.add_message_listener(self.topic_ota_msg, FedMLBaseSlaveProtocolManager.callback_client_ota_msg)
         self.add_message_listener(self.topic_report_status, self.callback_report_current_status)
         self.add_message_listener(self.topic_request_device_info, self.callback_report_device_info)
+        self.add_message_listener(self.topic_request_device_info_from_mlops, self.callback_request_device_info_from_mlops)
         self.add_message_listener(self.topic_client_logout, self.callback_client_logout)
         self.add_message_listener(self.topic_response_job_status, self.callback_response_job_status)
         self.add_message_listener(self.topic_report_device_status_in_job, self.callback_response_device_status_in_job)
@@ -402,6 +408,15 @@ def callback_report_device_info(self, topic, payload):
                 response_payload["context"] = context
             self.message_center.send_message(response_topic, json.dumps(response_payload), run_id=run_id)
 
+    def callback_request_device_info_from_mlops(self, topic, payload):
+        self.response_device_info_to_mlops(topic, payload)
+
+    def response_device_info_to_mlops(self, topic, payload):
+        response_topic = f"deploy/slave_agent/mlops/response_device_info"
+        response_payload = {"run_id": self.run_id, "slave_agent_device_id": self.edge_id,
+                            "fedml_version": fedml.__version__}
+        self.message_center.send_message(response_topic, json.dumps(response_payload))
+
     def callback_client_logout(self, topic, payload):
         payload_json = json.loads(payload)
         secret = payload_json.get("auth", None)
diff --git a/python/fedml/computing/scheduler/slave/slave_protocol_manager.py b/python/fedml/computing/scheduler/slave/slave_protocol_manager.py
index ef8dac8730..a1067a0d96 100755
--- a/python/fedml/computing/scheduler/slave/slave_protocol_manager.py
+++ b/python/fedml/computing/scheduler/slave/slave_protocol_manager.py
@@ -13,19 +13,11 @@ class FedMLLaunchSlaveProtocolManager(FedMLBaseSlaveProtocolManager):
 
     def __init__(self, args, agent_config=None):
         FedMLBaseSlaveProtocolManager.__init__(self, args, agent_config=agent_config)
-        self.topic_request_deploy_slave_device_info_from_mlops = None
-        self.topic_request_deploy_master_device_info_from_mlops = None
-        self.topic_request_edge_device_info_from_mlops = None
 
     # Override
     def generate_topics(self):
         super().generate_topics()
 
-        # The topic for requesting device info from mlops.
-        self.topic_request_edge_device_info_from_mlops = f"deploy/mlops/slave_agent/request_device_info/{self.edge_id}"
-
-        self.add_subscribe_topic(self.topic_request_edge_device_info_from_mlops)
-
     # Override
     def add_protocol_handler(self):
         super().add_protocol_handler()
@@ -106,54 +98,8 @@ def _init_extra_items(self):
         os.environ["FEDML_DEPLOY_MASTER_ID"] = str(self.model_device_server_id)
         os.environ["FEDML_DEPLOY_WORKER_IDS"] = str(self.model_device_client_edge_id_list)
 
-        # Subscribe handshaking messages from MLOps.
-        self.subscribe_handshaking_messages_from_mlops()
-
         # Start the monitor process
         self.args = copy.deepcopy(in_args)
         self.mlops_metrics.stop_device_realtime_perf()
         self.mlops_metrics.report_device_realtime_perf(self.args, self.args.agent_config["mqtt_config"])
-        pass
-
-    def callback_response_device_info_to_mlops(self, topic, payload):
-        payload_json = json.loads(payload)
-        server_id = payload_json.get("server_id", 0)
-        run_id = payload_json.get("run_id", 0)
-        listen_edge_id = str(topic).split("/")[-1]
-        context = payload_json.get("context", None)
-        response_topic = f"deploy/slave_agent/mlops/response_device_info"
-        if self.mlops_metrics is not None and self.model_device_client_edge_id_list is not None and \
-                self.model_device_server_id is not None:
-            device_info_json = {
-                "edge_id": listen_edge_id,
-                "fedml_version": fedml.__version__,
-                "user_id": self.args.user
-            }
-            salve_device_ids = list()
-            for model_client_edge_id in self.model_device_client_edge_id_list:
-                salve_device_ids.append(model_client_edge_id)
-            response_payload = {"slave_device_id": self.model_device_client_edge_id_list[0],
-                                "slave_device_id_list": salve_device_ids,
-                                "master_device_id": self.model_device_server_id,
-                                "run_id": run_id, "edge_id": listen_edge_id,
-                                "edge_info": device_info_json}
-            if context is not None:
-                response_payload["context"] = context
-            self.message_center.send_message(response_topic, json.dumps(response_payload), run_id=run_id)
-
-    def subscribe_handshaking_messages_from_mlops(self):
-        # The topic for requesting deployment master device info from mlops.
-        self.topic_request_deploy_master_device_info_from_mlops = f"deploy/mlops/master_agent/request_device_info/{self.model_device_server_id}"
-
-        # The topic for requesting deployment slave device info from mlops.
-        self.topic_request_deploy_slave_device_info_from_mlops = f"deploy/mlops/slave_agent/request_device_info/{self.model_device_client_edge_id_list[0]}"
-
-        self.add_subscribe_topic(self.topic_request_deploy_master_device_info_from_mlops)
-        self.add_subscribe_topic(self.topic_request_deploy_slave_device_info_from_mlops)
-
-        self.add_message_listener(
-            self.topic_request_edge_device_info_from_mlops, self.callback_response_device_info_to_mlops)
-        self.add_message_listener(
-            self.topic_request_deploy_master_device_info_from_mlops, self.callback_response_device_info_to_mlops)
-        self.add_message_listener(
-            self.topic_request_deploy_slave_device_info_from_mlops, self.callback_response_device_info_to_mlops)
\ No newline at end of file
+        pass
\ No newline at end of file

From 94f408f186a2c4d889eb73ee445655e25eccdfd4 Mon Sep 17 00:00:00 2001
From: Alex <alex.gpt.llm@gmail.com>
Date: Thu, 16 May 2024 13:56:27 +0800
Subject: [PATCH 065/282] [CoreEngine] make the handshaking protocol work.

---
 .../computing/scheduler/master/base_master_protocol_manager.py  | 2 +-
 .../computing/scheduler/slave/base_slave_protocol_manager.py    | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/python/fedml/computing/scheduler/master/base_master_protocol_manager.py b/python/fedml/computing/scheduler/master/base_master_protocol_manager.py
index 67507182a9..cee91578dd 100755
--- a/python/fedml/computing/scheduler/master/base_master_protocol_manager.py
+++ b/python/fedml/computing/scheduler/master/base_master_protocol_manager.py
@@ -448,7 +448,7 @@ def response_device_info_to_mlops(self, topic, payload):
         response_topic = f"deploy/master_agent/mlops/response_device_info"
         if self.mlops_metrics is not None:
             response_payload = {"run_id": self.run_id, "master_agent_device_id": self.edge_id,
-                                "fedml_version": fedml.__version__}
+                                "fedml_version": fedml.__version__, "edge_id": self.edge_id}
             self.mlops_metrics.report_json_message(response_topic, json.dumps(response_payload))
 
     def init_job_task(self, request_json):
diff --git a/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py b/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py
index 1384e9906a..aa69d4482d 100755
--- a/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py
+++ b/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py
@@ -414,7 +414,7 @@ def callback_request_device_info_from_mlops(self, topic, payload):
     def response_device_info_to_mlops(self, topic, payload):
         response_topic = f"deploy/slave_agent/mlops/response_device_info"
         response_payload = {"run_id": self.run_id, "slave_agent_device_id": self.edge_id,
-                            "fedml_version": fedml.__version__}
+                            "fedml_version": fedml.__version__, "edge_id": self.edge_id}
         self.message_center.send_message(response_topic, json.dumps(response_payload))
 
     def callback_client_logout(self, topic, payload):

From 0726f66c6a3a3c3ba7263983741552803a6bb5b2 Mon Sep 17 00:00:00 2001
From: Alex <alex.gpt.llm@gmail.com>
Date: Thu, 16 May 2024 18:36:24 +0800
Subject: [PATCH 066/282] [CoreEngine] update the model master runner.

---
 .../computing/scheduler/model_scheduler/master_job_runner.py  | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
index 4d43d7c5c3..5991151e94 100755
--- a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
+++ b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
@@ -114,13 +114,13 @@ def run_impl(
             message_center=self.message_center)
 
         # start unified inference server
-        self.start_device_inference_gateway(
+        self.inference_gateway_process = self.start_device_inference_gateway(
             inference_port=inference_port, agent_config=self.agent_config)
 
         # start inference monitor server
         self.stop_device_inference_monitor(
             run_id, end_point_name, model_id, model_name, model_version)
-        self.start_device_inference_monitor(
+        self.monitor_process = self.start_device_inference_monitor(
             run_id, end_point_name, model_id, model_name, model_version)
 
         # Changed the status to "IDLE"

From 28984d54a8b3cb470438be98931bae50f7939e6b Mon Sep 17 00:00:00 2001
From: Alex <alex.gpt.llm@gmail.com>
Date: Thu, 16 May 2024 18:57:20 +0800
Subject: [PATCH 067/282] [CoreEngine] update the model master runner.

---
 .../scheduler/model_scheduler/master_job_runner.py    | 11 ++++++-----
 1 file changed, 6 insertions(+), 5 deletions(-)

diff --git a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
index 5991151e94..b0a15ed8ba 100755
--- a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
+++ b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
@@ -114,13 +114,13 @@ def run_impl(
             message_center=self.message_center)
 
         # start unified inference server
-        self.inference_gateway_process = self.start_device_inference_gateway(
+        FedMLDeployMasterJobRunner.start_device_inference_gateway(
             inference_port=inference_port, agent_config=self.agent_config)
 
         # start inference monitor server
-        self.stop_device_inference_monitor(
+        FedMLDeployMasterJobRunner.stop_device_inference_monitor(
             run_id, end_point_name, model_id, model_name, model_version)
-        self.monitor_process = self.start_device_inference_monitor(
+        FedMLDeployMasterJobRunner.start_device_inference_monitor(
             run_id, end_point_name, model_id, model_name, model_version)
 
         # Changed the status to "IDLE"
@@ -546,8 +546,6 @@ def recover_inference_and_monitor():
             except Exception as e:
                 pass
 
-            FedMLDeployMasterJobRunner.start_device_inference_gateway(agent_config=agent_config)
-
             history_jobs = FedMLServerDataInterface.get_instance().get_history_jobs()
             for job in history_jobs.job_list:
                 if job.running_json is None:
@@ -566,6 +564,9 @@ def recover_inference_and_monitor():
                 if not is_activated:
                     continue
 
+                FedMLDeployMasterJobRunner.start_device_inference_gateway(
+                    inference_port=inference_port, agent_config=agent_config)
+
                 FedMLDeployMasterJobRunner.stop_device_inference_monitor(
                     run_id, end_point_name, model_id, model_name, model_version)
                 FedMLDeployMasterJobRunner.start_device_inference_monitor(

From 310d43cbcde6d6fea8249b18ecf82e061816ce84 Mon Sep 17 00:00:00 2001
From: Alex <alex.gpt.llm@gmail.com>
Date: Thu, 16 May 2024 19:18:10 +0800
Subject: [PATCH 068/282] [CoreEngine] update the model master runner.

---
 .../model_scheduler/master_job_runner.py      | 101 ++++++++++++------
 1 file changed, 71 insertions(+), 30 deletions(-)

diff --git a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
index b0a15ed8ba..9b21237878 100755
--- a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
+++ b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
@@ -25,7 +25,6 @@
 
 
 class FedMLDeployMasterJobRunner(FedMLBaseMasterJobRunner, FedMLDeployJobRunnerMsgSender, ABC):
-
     default_redis_addr = "local"
     default_redis_port = "6379"
     default_redis_password = "fedml_default"
@@ -54,7 +53,7 @@ def __init__(self, args, run_id=0, request_json=None, agent_config=None, edge_id
         self.deployment_result_queue = Queue()
 
     # Override
-    def _generate_job_runner_instance(self, args, run_id=None, request_json=None, agent_config=None, edge_id=None,):
+    def _generate_job_runner_instance(self, args, run_id=None, request_json=None, agent_config=None, edge_id=None, ):
         return FedMLDeployMasterJobRunner(
             args, run_id=run_id, request_json=request_json, agent_config=self.agent_config, edge_id=edge_id
         )
@@ -65,10 +64,10 @@ def _generate_extend_queue_list(self):
 
     # Override
     def run_impl(
-        self, edge_id_status_queue, edge_device_info_queue, run_metrics_queue,
-        run_event_queue, run_artifacts_queue, run_logs_queue, edge_device_info_global_queue,
-        run_extend_queue_list=None, sender_message_queue=None, listener_message_queue=None,
-        status_center_queue=None
+            self, edge_id_status_queue, edge_device_info_queue, run_metrics_queue,
+            run_event_queue, run_artifacts_queue, run_logs_queue, edge_device_info_global_queue,
+            run_extend_queue_list=None, sender_message_queue=None, listener_message_queue=None,
+            status_center_queue=None
     ):
         # Parse the model parameters.
         run_id, end_point_name, token, user_id, user_name, device_ids, device_objs, model_config, model_name, \
@@ -331,7 +330,7 @@ def process_deployment_result_message(self, topic=None, payload=None):
             elif run_operation == "UPDATE":
                 # Overwrite the json with the rollback version diff
                 rollback_version_diff = self.replica_controller.rollback_get_replica_version_diff(
-                        device_id_trigger=device_id, replica_no_trigger=replica_no)
+                    device_id_trigger=device_id, replica_no_trigger=replica_no)
 
                 # Change the target version to the start version
                 self.replica_controller.rollback_setback_target_replica_version()
@@ -465,6 +464,15 @@ def process_deployment_result_message(self, topic=None, payload=None):
     def start_device_inference_gateway(
             inference_port=ServerConstants.MODEL_INFERENCE_DEFAULT_PORT,
             agent_config=None, redis_addr="localhost", redis_port=6379, redis_password="fedml_default"
+    ):
+        from multiprocessing import Process
+        Process(target=FedMLDeployMasterJobRunner.start_device_inference_gateway_entry,
+                args=(inference_port, agent_config, redis_addr, redis_port, redis_password)).start()
+
+    @staticmethod
+    def start_device_inference_gateway_entry(
+            inference_port=ServerConstants.MODEL_INFERENCE_DEFAULT_PORT,
+            agent_config=None, redis_addr="localhost", redis_port=6379, redis_password="fedml_default"
     ):
         # start unified inference server
         python_program = get_python_program()
@@ -489,20 +497,32 @@ def start_device_inference_gateway(
                     agent_config["mqtt_config"]["MQTT_USER"] + connect_str +
                     agent_config["mqtt_config"]["MQTT_PWD"] + connect_str +
                     str(agent_config["mqtt_config"]["MQTT_KEEPALIVE"]), "FEDML@9999GREAT")
-                inference_gateway_process = ServerConstants.exec_console_with_script(
-                    "REDIS_ADDR=\"{}\" REDIS_PORT=\"{}\" REDIS_PASSWORD=\"{}\" "
-                    "END_POINT_NAME=\"{}\" "
-                    "MODEL_NAME=\"{}\" MODEL_VERSION=\"{}\" MODEL_INFER_URL=\"{}\" VERSION=\"{}\" "
-                    "USE_MQTT_INFERENCE={} USE_WORKER_GATEWAY={} EXT_INFO={} "
-                    "{} -m uvicorn {} --host 0.0.0.0 --port {} --reload --reload-delay 3 --reload-dir {} "
-                    "--log-level critical".format(
-                        redis_addr, str(redis_port), redis_password, "",
-                        "", "", "", fedml.get_env_version(), use_mqtt_inference,
-                        use_worker_gateway, ext_info, python_program, inference_gw_cmd, str(inference_port),
-                        fedml_base_dir),
-                    should_capture_stdout=False, should_capture_stderr=False)
-
-                return inference_gateway_process
+                python_program = get_python_program()
+                os.system("REDIS_ADDR=\"{}\" REDIS_PORT=\"{}\" REDIS_PASSWORD=\"{}\" "
+                          "END_POINT_NAME=\"{}\" "
+                          "MODEL_NAME=\"{}\" MODEL_VERSION=\"{}\" MODEL_INFER_URL=\"{}\" VERSION=\"{}\" "
+                          "USE_MQTT_INFERENCE={} USE_WORKER_GATEWAY={} EXT_INFO={} "
+                          "{} -m uvicorn {} --host 0.0.0.0 --port {} --reload --reload-delay 3 --reload-dir {} "
+                          "--log-level critical".format(
+                    redis_addr, str(redis_port), redis_password, "",
+                    "", "", "", fedml.get_env_version(), use_mqtt_inference,
+                    use_worker_gateway, ext_info, python_program, inference_gw_cmd, str(inference_port),
+                    fedml_base_dir))
+
+                # inference_gateway_process = ServerConstants.exec_console_with_script(
+                #     "REDIS_ADDR=\"{}\" REDIS_PORT=\"{}\" REDIS_PASSWORD=\"{}\" "
+                #     "END_POINT_NAME=\"{}\" "
+                #     "MODEL_NAME=\"{}\" MODEL_VERSION=\"{}\" MODEL_INFER_URL=\"{}\" VERSION=\"{}\" "
+                #     "USE_MQTT_INFERENCE={} USE_WORKER_GATEWAY={} EXT_INFO={} "
+                #     "{} -m uvicorn {} --host 0.0.0.0 --port {} --reload --reload-delay 3 --reload-dir {} "
+                #     "--log-level critical".format(
+                #         redis_addr, str(redis_port), redis_password, "",
+                #         "", "", "", fedml.get_env_version(), use_mqtt_inference,
+                #         use_worker_gateway, ext_info, python_program, inference_gw_cmd, str(inference_port),
+                #         fedml_base_dir),
+                #     should_capture_stdout=False, should_capture_stderr=False)
+                #
+                # return inference_gateway_process
 
         return None
 
@@ -510,6 +530,16 @@ def start_device_inference_gateway(
     def start_device_inference_monitor(
             run_id, end_point_name, model_id, model_name, model_version, check_stopped_event=True,
             redis_addr="localhost", redis_port=6379, redis_password="fedml_default"
+    ):
+        from multiprocessing import Process
+        Process(target=FedMLDeployMasterJobRunner.start_device_inference_monitor_entry,
+                args=(run_id, end_point_name, model_id, model_name, model_version, check_stopped_event,
+                      redis_addr, redis_port, redis_password)).start()
+
+    @staticmethod
+    def start_device_inference_monitor_entry(
+            run_id, end_point_name, model_id, model_name, model_version, check_stopped_event=True,
+            redis_addr="localhost", redis_port=6379, redis_password="fedml_default"
     ):
         # start inference monitor server
         # Will report the qps related metrics to the MLOps
@@ -520,14 +550,25 @@ def start_device_inference_monitor(
         python_program = get_python_program()
         running_model_name = ServerConstants.get_running_model_name(end_point_name,
                                                                     model_name, model_version, run_id, model_id)
-        monitor_process = ServerConstants.exec_console_with_shell_script_list(
-            [python_program, monitor_file, "-v", fedml.get_env_version(), "-ep", run_id_str,
-             "-epn", str(end_point_name), "-mi", str(model_id), "-mn", model_name,
-             "-mv", model_version, "-iu", "infer_url", "-ra", redis_addr,
-             "-rp", str(redis_port), "-rpw", redis_password],
-            should_capture_stdout=False, should_capture_stderr=False
-        )
-        return monitor_process
+
+        os.system(f"{python_program} {monitor_file} -v {fedml.get_env_version()} -ep {run_id_str} "
+                  f"-epn {end_point_name} -mi {model_id} -mn {model_name} -mv \"{model_version}\" "
+                  f"-iu infer_url -ra {redis_addr} -rp {redis_port} -rpw redis_password")
+
+        # from fedml.computing.scheduler.model_scheduler.device_model_monitor import FedMLModelMetrics
+        # monitor_center = FedMLModelMetrics(
+        #     run_id_str, end_point_name, model_id, model_name, model_version,
+        #     "infer_url", redis_addr, redis_port, redis_password, version=fedml.get_env_version())
+        # monitor_center.start_monitoring_metrics_center()
+
+        # monitor_process = ServerConstants.exec_console_with_shell_script_list(
+        #     [python_program, monitor_file, "-v", fedml.get_env_version(), "-ep", run_id_str,
+        #      "-epn", str(end_point_name), "-mi", str(model_id), "-mn", model_name,
+        #      "-mv", model_version, "-iu", "infer_url", "-ra", redis_addr,
+        #      "-rp", str(redis_port), "-rpw", redis_password],
+        #     should_capture_stdout=False, should_capture_stderr=False
+        # )
+        # return monitor_process
 
     @staticmethod
     def stop_device_inference_monitor(run_id, end_point_name, model_id, model_name, model_version):
@@ -779,7 +820,7 @@ def parse_model_run_params(running_json):
         model_version = model_config["model_version"]
         model_config_parameters = running_json.get("parameters", {})
 
-        inference_port = model_config_parameters.get("server_internal_port",    # Internal port is for the gateway
+        inference_port = model_config_parameters.get("server_internal_port",  # Internal port is for the gateway
                                                      ServerConstants.MODEL_INFERENCE_DEFAULT_PORT)
         inference_port_external = model_config_parameters.get("server_external_port", inference_port)
 

From f378d075d1d612f78a8340ef13eadf5566d03c68 Mon Sep 17 00:00:00 2001
From: Alex <alex.gpt.llm@gmail.com>
Date: Thu, 16 May 2024 19:37:50 +0800
Subject: [PATCH 069/282] [CoreEngine] update the model master runner.

---
 .../model_scheduler/device_model_inference.py | 56 +++++++++----------
 1 file changed, 28 insertions(+), 28 deletions(-)

diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py b/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py
index eb3088f327..b8d85edd31 100755
--- a/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py
@@ -26,35 +26,35 @@
     pass
 
 
-class Settings(BaseSettings):
-    redis_addr: str
-    redis_port: str
-    redis_password: str
-    end_point_name: str
-    model_name: str
-    model_version: str
-    model_infer_url: str
-    version: str
-    use_mqtt_inference: bool
-    use_worker_gateway: bool
-    ext_info: str
-
-
-settings = Settings()
-
-# class settings:
-#     redis_addr = "127.0.0.1"
-#     redis_port = 6379
-#     redis_password = "fedml_default"
-#     end_point_name = ""
-#     model_name = ""
-#     model_version = ""
-#     model_infer_url = "127.0.0.1"
-#     version = "dev"
-#     use_mqtt_inference = False
-#     use_worker_gateway = False
-#     ext_info = "2b34303961245c4f175f2236282d7a272c040b0904747579087f6a760112030109010c215d54505707140005190a051c347f365c4a430c020a7d39120e26032a78730f797f7c031f0901657e75"
+# class Settings(BaseSettings):
+#     redis_addr: str
+#     redis_port: str
+#     redis_password: str
+#     end_point_name: str
+#     model_name: str
+#     model_version: str
+#     model_infer_url: str
+#     version: str
+#     use_mqtt_inference: bool
+#     use_worker_gateway: bool
+#     ext_info: str
 #
+#
+# settings = Settings()
+
+class settings:
+    redis_addr = "127.0.0.1"
+    redis_port = 6379
+    redis_password = "fedml_default"
+    end_point_name = ""
+    model_name = ""
+    model_version = ""
+    model_infer_url = "127.0.0.1"
+    version = "dev"
+    use_mqtt_inference = False
+    use_worker_gateway = False
+    ext_info = "2b34303961245c4f175f2236282d7a272c040b0904747579087f6a760112030109010c215d54505707140005190a051c347f365c4a430c020a7d39120e26032a78730f797f7c031f0901657e75"
+
 
 api = FastAPI()
 

From 871cba9792972cf6bd8956d3cac1fd09b4b2f67f Mon Sep 17 00:00:00 2001
From: Alex <alex.gpt.llm@gmail.com>
Date: Thu, 16 May 2024 19:40:51 +0800
Subject: [PATCH 070/282] [CoreEngine] update the model master runner.

---
 .../computing/scheduler/model_scheduler/master_job_runner.py    | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
index 9b21237878..2c48a4277e 100755
--- a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
+++ b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
@@ -553,7 +553,7 @@ def start_device_inference_monitor_entry(
 
         os.system(f"{python_program} {monitor_file} -v {fedml.get_env_version()} -ep {run_id_str} "
                   f"-epn {end_point_name} -mi {model_id} -mn {model_name} -mv \"{model_version}\" "
-                  f"-iu infer_url -ra {redis_addr} -rp {redis_port} -rpw redis_password")
+                  f"-iu infer_url -ra {redis_addr} -rp {redis_port} -rpw {redis_password}")
 
         # from fedml.computing.scheduler.model_scheduler.device_model_monitor import FedMLModelMetrics
         # monitor_center = FedMLModelMetrics(

From 9c9b20f0cfcf4f99572f025d14729b5c789e0cdf Mon Sep 17 00:00:00 2001
From: Alex <alex.gpt.llm@gmail.com>
Date: Thu, 16 May 2024 20:14:01 +0800
Subject: [PATCH 071/282] [CoreEngine] not kill the subprocess when exiting the
 model master runner.

---
 .../master/base_master_job_runner.py          |  5 +-
 .../scheduler/master/server_constants.py      |  5 +-
 .../device_server_constants.py                |  5 +-
 .../model_scheduler/master_job_runner.py      | 90 ++++++-------------
 4 files changed, 37 insertions(+), 68 deletions(-)

diff --git a/python/fedml/computing/scheduler/master/base_master_job_runner.py b/python/fedml/computing/scheduler/master/base_master_job_runner.py
index 1827de481d..18aa8a6eed 100755
--- a/python/fedml/computing/scheduler/master/base_master_job_runner.py
+++ b/python/fedml/computing/scheduler/master/base_master_job_runner.py
@@ -111,10 +111,13 @@ def run(
             if self.mlops_metrics is not None:
                 self.mlops_metrics.stop_sys_perf()
             time.sleep(3)
-            ServerConstants.cleanup_run_process(self.run_id)
+            self.cleanup_runner_process(self.run_id)
             ServerConstants.cleanup_learning_process(self.run_id)
             ServerConstants.cleanup_bootstrap_process(self.run_id)
 
+    def cleanup_runner_process(self, run_id):
+        ServerConstants.cleanup_run_process(run_id)
+
     @debug
     @abstractmethod
     def run_impl(
diff --git a/python/fedml/computing/scheduler/master/server_constants.py b/python/fedml/computing/scheduler/master/server_constants.py
index 058c57e199..b835ba1bde 100644
--- a/python/fedml/computing/scheduler/master/server_constants.py
+++ b/python/fedml/computing/scheduler/master/server_constants.py
@@ -268,9 +268,10 @@ def get_dataset_metadata_url():
         return get_dataset_metadata_url
 
     @staticmethod
-    def cleanup_run_process(run_id):
+    def cleanup_run_process(run_id, not_kill_subprocess=False):
         RunProcessUtils.cleanup_run_process(
-            run_id, ServerConstants.get_data_dir(), ServerConstants.LOCAL_RUNNER_INFO_DIR_NAME)
+            run_id, ServerConstants.get_data_dir(), ServerConstants.LOCAL_RUNNER_INFO_DIR_NAME,
+            not_kill_subprocess=not_kill_subprocess)
 
     @staticmethod
     def save_run_process(run_id, process_id):
diff --git a/python/fedml/computing/scheduler/model_scheduler/device_server_constants.py b/python/fedml/computing/scheduler/model_scheduler/device_server_constants.py
index 86c7aac992..6b5b335863 100644
--- a/python/fedml/computing/scheduler/model_scheduler/device_server_constants.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_server_constants.py
@@ -295,9 +295,10 @@ def get_public_ip():
         return ip
 
     @staticmethod
-    def cleanup_run_process(run_id):
+    def cleanup_run_process(run_id, not_kill_subprocess=False):
         RunProcessUtils.cleanup_run_process(
-            run_id, ServerConstants.get_data_dir(), ServerConstants.LOCAL_RUNNER_INFO_DIR_NAME)
+            run_id, ServerConstants.get_data_dir(), ServerConstants.LOCAL_RUNNER_INFO_DIR_NAME,
+            not_kill_subprocess=not_kill_subprocess)
 
     @staticmethod
     def save_run_process(run_id, process_id):
diff --git a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
index 2c48a4277e..eef03d53f2 100755
--- a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
+++ b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
@@ -460,17 +460,11 @@ def process_deployment_result_message(self, topic=None, payload=None):
             time.sleep(3)
             self.trigger_completed_event()
 
-    @staticmethod
-    def start_device_inference_gateway(
-            inference_port=ServerConstants.MODEL_INFERENCE_DEFAULT_PORT,
-            agent_config=None, redis_addr="localhost", redis_port=6379, redis_password="fedml_default"
-    ):
-        from multiprocessing import Process
-        Process(target=FedMLDeployMasterJobRunner.start_device_inference_gateway_entry,
-                args=(inference_port, agent_config, redis_addr, redis_port, redis_password)).start()
+    def cleanup_runner_process(self, run_id):
+        ServerConstants.cleanup_run_process(run_id, not_kill_subprocess=True)
 
     @staticmethod
-    def start_device_inference_gateway_entry(
+    def start_device_inference_gateway(
             inference_port=ServerConstants.MODEL_INFERENCE_DEFAULT_PORT,
             agent_config=None, redis_addr="localhost", redis_port=6379, redis_password="fedml_default"
     ):
@@ -498,31 +492,22 @@ def start_device_inference_gateway_entry(
                     agent_config["mqtt_config"]["MQTT_PWD"] + connect_str +
                     str(agent_config["mqtt_config"]["MQTT_KEEPALIVE"]), "FEDML@9999GREAT")
                 python_program = get_python_program()
-                os.system("REDIS_ADDR=\"{}\" REDIS_PORT=\"{}\" REDIS_PASSWORD=\"{}\" "
-                          "END_POINT_NAME=\"{}\" "
-                          "MODEL_NAME=\"{}\" MODEL_VERSION=\"{}\" MODEL_INFER_URL=\"{}\" VERSION=\"{}\" "
-                          "USE_MQTT_INFERENCE={} USE_WORKER_GATEWAY={} EXT_INFO={} "
-                          "{} -m uvicorn {} --host 0.0.0.0 --port {} --reload --reload-delay 3 --reload-dir {} "
-                          "--log-level critical".format(
-                    redis_addr, str(redis_port), redis_password, "",
-                    "", "", "", fedml.get_env_version(), use_mqtt_inference,
-                    use_worker_gateway, ext_info, python_program, inference_gw_cmd, str(inference_port),
-                    fedml_base_dir))
-
-                # inference_gateway_process = ServerConstants.exec_console_with_script(
-                #     "REDIS_ADDR=\"{}\" REDIS_PORT=\"{}\" REDIS_PASSWORD=\"{}\" "
-                #     "END_POINT_NAME=\"{}\" "
-                #     "MODEL_NAME=\"{}\" MODEL_VERSION=\"{}\" MODEL_INFER_URL=\"{}\" VERSION=\"{}\" "
-                #     "USE_MQTT_INFERENCE={} USE_WORKER_GATEWAY={} EXT_INFO={} "
-                #     "{} -m uvicorn {} --host 0.0.0.0 --port {} --reload --reload-delay 3 --reload-dir {} "
-                #     "--log-level critical".format(
-                #         redis_addr, str(redis_port), redis_password, "",
-                #         "", "", "", fedml.get_env_version(), use_mqtt_inference,
-                #         use_worker_gateway, ext_info, python_program, inference_gw_cmd, str(inference_port),
-                #         fedml_base_dir),
-                #     should_capture_stdout=False, should_capture_stderr=False)
-                #
-                # return inference_gateway_process
+                inference_gateway_process = ServerConstants.exec_console_with_script(
+                    "REDIS_ADDR=\"{}\" REDIS_PORT=\"{}\" REDIS_PASSWORD=\"{}\" "
+                    "END_POINT_NAME=\"{}\" "
+                    "MODEL_NAME=\"{}\" MODEL_VERSION=\"{}\" MODEL_INFER_URL=\"{}\" VERSION=\"{}\" "
+                    "USE_MQTT_INFERENCE={} USE_WORKER_GATEWAY={} EXT_INFO={} "
+                    "{} -m uvicorn {} --host 0.0.0.0 --port {} --reload --reload-delay 3 --reload-dir {} "
+                    "--log-level critical".format(
+                        redis_addr, str(redis_port), redis_password, "",
+                        "", "", "", fedml.get_env_version(), use_mqtt_inference,
+                        use_worker_gateway, ext_info, python_program, inference_gw_cmd, str(inference_port),
+                        fedml_base_dir),
+                    should_capture_stdout=False, should_capture_stderr=False)
+
+                return inference_gateway_process
+            else:
+                return inference_gateway_pids[0]
 
         return None
 
@@ -530,16 +515,6 @@ def start_device_inference_gateway_entry(
     def start_device_inference_monitor(
             run_id, end_point_name, model_id, model_name, model_version, check_stopped_event=True,
             redis_addr="localhost", redis_port=6379, redis_password="fedml_default"
-    ):
-        from multiprocessing import Process
-        Process(target=FedMLDeployMasterJobRunner.start_device_inference_monitor_entry,
-                args=(run_id, end_point_name, model_id, model_name, model_version, check_stopped_event,
-                      redis_addr, redis_port, redis_password)).start()
-
-    @staticmethod
-    def start_device_inference_monitor_entry(
-            run_id, end_point_name, model_id, model_name, model_version, check_stopped_event=True,
-            redis_addr="localhost", redis_port=6379, redis_password="fedml_default"
     ):
         # start inference monitor server
         # Will report the qps related metrics to the MLOps
@@ -550,25 +525,14 @@ def start_device_inference_monitor_entry(
         python_program = get_python_program()
         running_model_name = ServerConstants.get_running_model_name(end_point_name,
                                                                     model_name, model_version, run_id, model_id)
-
-        os.system(f"{python_program} {monitor_file} -v {fedml.get_env_version()} -ep {run_id_str} "
-                  f"-epn {end_point_name} -mi {model_id} -mn {model_name} -mv \"{model_version}\" "
-                  f"-iu infer_url -ra {redis_addr} -rp {redis_port} -rpw {redis_password}")
-
-        # from fedml.computing.scheduler.model_scheduler.device_model_monitor import FedMLModelMetrics
-        # monitor_center = FedMLModelMetrics(
-        #     run_id_str, end_point_name, model_id, model_name, model_version,
-        #     "infer_url", redis_addr, redis_port, redis_password, version=fedml.get_env_version())
-        # monitor_center.start_monitoring_metrics_center()
-
-        # monitor_process = ServerConstants.exec_console_with_shell_script_list(
-        #     [python_program, monitor_file, "-v", fedml.get_env_version(), "-ep", run_id_str,
-        #      "-epn", str(end_point_name), "-mi", str(model_id), "-mn", model_name,
-        #      "-mv", model_version, "-iu", "infer_url", "-ra", redis_addr,
-        #      "-rp", str(redis_port), "-rpw", redis_password],
-        #     should_capture_stdout=False, should_capture_stderr=False
-        # )
-        # return monitor_process
+        monitor_process = ServerConstants.exec_console_with_shell_script_list(
+            [python_program, monitor_file, "-v", fedml.get_env_version(), "-ep", run_id_str,
+             "-epn", str(end_point_name), "-mi", str(model_id), "-mn", model_name,
+             "-mv", model_version, "-iu", "infer_url", "-ra", redis_addr,
+             "-rp", str(redis_port), "-rpw", redis_password],
+            should_capture_stdout=False, should_capture_stderr=False
+        )
+        return monitor_process
 
     @staticmethod
     def stop_device_inference_monitor(run_id, end_point_name, model_id, model_name, model_version):

From 17b053cf7243860791616e75abcab647528daac9 Mon Sep 17 00:00:00 2001
From: Alex <alex.gpt.llm@gmail.com>
Date: Thu, 16 May 2024 20:19:05 +0800
Subject: [PATCH 072/282] [CoreEngine] not kill the subprocess when exiting the
 model master runner.

---
 .../scheduler/comm_utils/run_process_utils.py | 19 +++++++++++--------
 1 file changed, 11 insertions(+), 8 deletions(-)

diff --git a/python/fedml/computing/scheduler/comm_utils/run_process_utils.py b/python/fedml/computing/scheduler/comm_utils/run_process_utils.py
index e64e708fb5..05cc342e36 100644
--- a/python/fedml/computing/scheduler/comm_utils/run_process_utils.py
+++ b/python/fedml/computing/scheduler/comm_utils/run_process_utils.py
@@ -14,8 +14,10 @@ def get_run_process_prefix(prefix, run_id):
         return f"{prefix}-run@{run_id}@pid@"
 
     @staticmethod
-    def cleanup_run_process(run_id, data_dir, info_dir,
-                            info_file_prefix=SchedulerConstants.RUN_PROCESS_TYPE_RUNNER_PROCESS):
+    def cleanup_run_process(
+            run_id, data_dir, info_dir,
+            info_file_prefix=SchedulerConstants.RUN_PROCESS_TYPE_RUNNER_PROCESS, not_kill_subprocess=False
+    ):
         try:
             local_pkg_data_dir = data_dir
             run_process_dir = os.path.join(local_pkg_data_dir, info_dir)
@@ -43,12 +45,13 @@ def cleanup_run_process(run_id, data_dir, info_dir,
 
                 try:
                     process = psutil.Process(int(process_id))
-                    child_processes = process.children(recursive=True)
-                    for sub_process in child_processes:
-                        if platform.system() == 'Windows':
-                            os.system("taskkill /PID {} /T /F".format(sub_process.pid))
-                        else:
-                            os.kill(sub_process.pid, signal.SIGKILL)
+                    if not not_kill_subprocess:
+                        child_processes = process.children(recursive=True)
+                        for sub_process in child_processes:
+                            if platform.system() == 'Windows':
+                                os.system("taskkill /PID {} /T /F".format(sub_process.pid))
+                            else:
+                                os.kill(sub_process.pid, signal.SIGKILL)
 
                     if process is not None:
                         if platform.system() == 'Windows':

From b8ec6163cd9df168d315f8f019e01d5b78cd2e9a Mon Sep 17 00:00:00 2001
From: Alex <alex.gpt.llm@gmail.com>
Date: Fri, 17 May 2024 21:34:25 +0800
Subject: [PATCH 073/282] [CoreEngine] fixed the issue that the gpu id is
 released with no properly cases.

---
 .../scheduler/comm_utils/job_monitor.py       |  23 ++--
 .../scheduler/scheduler_core/status_center.py |  76 ++-----------
 .../status_manager_protocols.py               | 101 ++++++++++++++----
 3 files changed, 101 insertions(+), 99 deletions(-)

diff --git a/python/fedml/computing/scheduler/comm_utils/job_monitor.py b/python/fedml/computing/scheduler/comm_utils/job_monitor.py
index 8ae6e1c744..bada84d96e 100644
--- a/python/fedml/computing/scheduler/comm_utils/job_monitor.py
+++ b/python/fedml/computing/scheduler/comm_utils/job_monitor.py
@@ -354,15 +354,15 @@ def monitor_slave_run_process_status(self):
                 # Check if all processes of the specific run are exited
                 # FIXME: Proactively release the gpu ids when the run processes have not even started yet as the docker
                 #  image is being pulled
-                run_process_list = client_constants.ClientConstants.get_learning_process_list(job.job_id)
-                all_run_processes_exited = True if len(run_process_list) <= 0 else False
-                if all_run_processes_exited:
-                    if not self.released_runs.get(str(job.job_id), False):
-                        self.released_runs[str(job.job_id)] = True
-                        # Release the gpu ids
-                        print(
-                            f"[run/device][{job.job_id}/{job.edge_id}] Release gpu resource when run processes has exited on monioring slave runs periodically.")
-                        JobRunnerUtils.get_instance().release_gpu_ids(job.job_id, job.edge_id)
+                # run_process_list = client_constants.ClientConstants.get_learning_process_list(job.job_id)
+                # all_run_processes_exited = True if len(run_process_list) <= 0 else False
+                # if all_run_processes_exited:
+                #     if not self.released_runs.get(str(job.job_id), False):
+                #         self.released_runs[str(job.job_id)] = True
+                #         # Release the gpu ids
+                #         print(
+                #             f"[run/device][{job.job_id}/{job.edge_id}] Release gpu resource when run processes has exited on monioring slave runs periodically.")
+                #         JobRunnerUtils.get_instance().release_gpu_ids(job.job_id, job.edge_id)
 
                 # Get the timeout threshold
                 timeout_threshold = None
@@ -381,8 +381,9 @@ def monitor_slave_run_process_status(self):
 
                 # If the run processes have exited but run status is not completed and
                 # timeout is out of the range, then release gpu ids and report failed status to the master agent.
-                if all_run_processes_exited and not SchedulerConstants.is_run_completed(job.status) and \
-                        timeout_threshold is not None and timeout > timeout_threshold:
+                # if all_run_processes_exited and not SchedulerConstants.is_run_completed(job.status) and \
+                #         timeout_threshold is not None and timeout > timeout_threshold:
+                if timeout_threshold is not None and timeout > timeout_threshold:
                     # Report failed status to the master agent
                     mlops.log_training_failed_status(
                         run_id=job.job_id, edge_id=job.edge_id, enable_broadcast=True)
diff --git a/python/fedml/computing/scheduler/scheduler_core/status_center.py b/python/fedml/computing/scheduler/scheduler_core/status_center.py
index 4a55dbb679..c0e1b6633a 100755
--- a/python/fedml/computing/scheduler/scheduler_core/status_center.py
+++ b/python/fedml/computing/scheduler/scheduler_core/status_center.py
@@ -1,8 +1,6 @@
 import logging
 import time
 
-from ..slave.client_constants import ClientConstants
-from ..master.server_constants import ServerConstants
 from enum import Enum, unique
 import multiprocessing
 from multiprocessing import Process, Queue
@@ -11,7 +9,6 @@
 from .message_center import FedMLMessageCenter
 import traceback
 from .status_manager_protocols import FedMLStatusManager
-from .compute_cache_manager import ComputeCacheManager
 
 
 @unique
@@ -87,11 +84,6 @@ class FedMLStatusCenter(object):
 
     def __init__(self, message_queue=None):
         self.status_queue = message_queue
-        self.job_status_in_slave = dict()
-        self.entire_job_status = None
-        self.job_status_in_master = dict()
-        self.slave_devices_status = dict()
-        self.master_devices_status = dict()
         self.status_center_process = None
         self.status_event = None
         self.status_sender_message_center_queue = None
@@ -108,50 +100,6 @@ def __repr__(self):
             attrs=" ".join("{}={!r}".format(k, v) for k, v in self.__dict__.items()),
         )
 
-    def add_job_status_in_slave(self, device_id, status):
-        self.job_status_in_slave[device_id] = self._status_transition(status)
-
-    def add_job_status_in_master(self, device_id, status):
-        self.job_status_in_master[device_id] = self._status_transition(status)
-
-    def set_entire_job_status(self, status):
-        self.entire_job_status = status
-
-    def add_slave_device_status(self, device_id, status):
-        self.slave_devices_status[device_id] = self._status_transition(status)
-
-    def add_master_device_status(self, device_id, status):
-        self.master_devices_status[device_id] = self._status_transition(status)
-
-    def get_job_status_in_slave(self, device_id):
-        return self.job_status_in_slave.get(device_id, None)
-
-    def get_job_status_in_master(self, device_id):
-        return self.job_status_in_master.get(device_id, None)
-
-    def get_entire_job_status(self):
-        return self.entire_job_status
-
-    def get_slave_device_status(self, device_id):
-        return self.slave_devices_status.get(device_id, None)
-
-    def get_master_device_status(self, device_id):
-        return self.master_devices_status.get(device_id, None)
-
-    def _status_transition(self, status):
-        transition_status = status
-        if self.entire_job_status is not None:
-            if self.entire_job_status == ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED or \
-                    self.entire_job_status == ServerConstants.MSG_MLOPS_SERVER_STATUS_FINISHED:
-                if status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED or \
-                        status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_FINISHED or \
-                        status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_KILLED:
-                    transition_status = status
-                else:
-                    transition_status = ClientConstants.MSG_MLOPS_CLIENT_STATUS_KILLED
-
-        return transition_status
-
     def get_status_runner(self):
         return None
 
@@ -205,16 +153,6 @@ def rebuild_message_center(self, message_center_queue):
     def rebuild_status_center(self, status_queue):
         pass
 
-    @staticmethod
-    def save_job_status(run_id, status):
-        ComputeCacheManager.get_instance().set_redis_params()
-        ComputeCacheManager.get_instance().get_status_cache().save_job_status(run_id, status)
-
-    @staticmethod
-    def save_device_status_in_job(run_id, device_id, status):
-        ComputeCacheManager.get_instance().set_redis_params()
-        ComputeCacheManager.get_instance().get_status_cache().save_device_status_in_job(run_id, device_id, status)
-
     def run_status_dispatcher(self, status_event, status_queue,
                               sender_message_center_queue,
                               listener_message_center_queue):
@@ -272,6 +210,10 @@ def run_status_dispatcher(self, status_event, status_queue,
                 else:
                     status_manager_instances[status_entity.run_id].edge_id = status_entity.edge_id
 
+                # if the job status is completed then continue
+                if status_manager_instances[status_entity.run_id].is_job_completed():
+                    continue
+
                 # Process the master and slave status.
                 if message_entity.topic.startswith(FedMLStatusCenter.TOPIC_MASTER_STATUS_PREFIX):
                     # Process the job status
@@ -279,7 +221,12 @@ def run_status_dispatcher(self, status_event, status_queue,
                         message_entity.topic, message_entity.payload)
 
                     # Save the job status
-                    FedMLStatusCenter.save_job_status(status_entity.run_id, self.get_entire_job_status())
+                    status_manager_instances[status_entity.run_id].save_job_status()
+
+                    # Popup the status manager instance when the job status is completed
+                    if status_manager_instances[status_entity.run_id].is_job_completed():
+                        status_manager_instances.pop(status_entity.run_id)
+                        continue
 
                 elif message_entity.topic.startswith(FedMLStatusCenter.TOPIC_SLAVE_STATUS_PREFIX):
                     # Process the slave device status
@@ -287,8 +234,7 @@ def run_status_dispatcher(self, status_event, status_queue,
                         message_entity.topic, message_entity.payload)
 
                     # Save the device status in job
-                    FedMLStatusCenter.save_device_status_in_job(status_entity.run_id, status_entity.edge_id,
-                                                                self.get_job_status_in_slave(status_entity.edge_id))
+                    status_manager_instances[status_entity.run_id].save_device_status_in_job(status_entity.edge_id)
 
             except Exception as e:
                 if message_entity is not None:
diff --git a/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py b/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py
index e5dd312c80..272423f147 100755
--- a/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py
+++ b/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py
@@ -12,6 +12,7 @@
 from ..master.server_data_interface import FedMLServerDataInterface
 from .message_common import LogArgs
 from .general_constants import GeneralConstants
+from ..scheduler_core.compute_cache_manager import ComputeCacheManager
 
 
 class FedMLStatusManager(object):
@@ -33,6 +34,15 @@ def __init__(self, run_id=None, edge_id=None, server_id=None,
         self.log_args = LogArgs(role="server", edge_id=self.edge_id,
                                 server_id=self.server_id, log_file_dir=ServerConstants.get_log_file_dir())
 
+        self.job_status_in_slave = dict()
+        self.entire_job_status = None
+        self.job_status_in_master = dict()
+        self.slave_devices_status = dict()
+        self.master_devices_status = dict()
+        self.completed_job_status_list = [ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED,
+                                          ServerConstants.MSG_MLOPS_SERVER_STATUS_FINISHED,
+                                          ServerConstants.MSG_MLOPS_SERVER_STATUS_KILLED]
+
     def __repr__(self):
         return "<{klass} @{id:x} {attrs}>".format(
             klass=self.__class__.__name__,
@@ -40,6 +50,65 @@ def __repr__(self):
             attrs=" ".join("{}={!r}".format(k, v) for k, v in self.__dict__.items()),
         )
 
+    def add_job_status_in_slave(self, device_id, status):
+        self.job_status_in_slave[device_id] = self._status_transition(status)
+
+    def add_job_status_in_master(self, device_id, status):
+        self.job_status_in_master[device_id] = self._status_transition(status)
+
+    def set_entire_job_status(self, status):
+        self.entire_job_status = status
+
+    def add_slave_device_status(self, device_id, status):
+        self.slave_devices_status[device_id] = self._status_transition(status)
+
+    def add_master_device_status(self, run_id, device_id, status):
+        self.master_devices_status[device_id] = self._status_transition(status)
+
+    def get_job_status_in_slave(self, device_id):
+        return self.job_status_in_slave.get(device_id, None)
+
+    def get_job_status_in_master(self, device_id):
+        return self.job_status_in_master.get(device_id, None)
+
+    def get_entire_job_status(self):
+        return self.entire_job_status
+
+    def get_slave_device_status(self, device_id):
+        return self.slave_devices_status.get(device_id, None)
+
+    def get_master_device_status(self, device_id):
+        return self.master_devices_status.get(device_id, None)
+
+    def is_job_completed(self):
+        if self.entire_job_status and self.entire_job_status in self.completed_job_status_list:
+            return True
+        return False
+
+    def _status_transition(self, status):
+        transition_status = status
+        if self.entire_job_status is not None:
+            if self.entire_job_status == ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED or \
+                    self.entire_job_status == ServerConstants.MSG_MLOPS_SERVER_STATUS_FINISHED:
+                if status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED or \
+                        status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_FINISHED or \
+                        status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_KILLED:
+                    transition_status = status
+                else:
+                    transition_status = ClientConstants.MSG_MLOPS_CLIENT_STATUS_KILLED
+
+        return transition_status
+
+    def save_job_status(self):
+        ComputeCacheManager.get_instance().set_redis_params()
+        ComputeCacheManager.get_instance().get_status_cache().save_job_status(
+            self.run_id, self.get_entire_job_status())
+
+    def save_device_status_in_job(self, device_id):
+        ComputeCacheManager.get_instance().set_redis_params()
+        ComputeCacheManager.get_instance().get_status_cache().save_device_status_in_job(
+            self.run_id, device_id, self.get_job_status_in_slave(device_id))
+
     def process_job_completed_status(self, master_id, status):
         # Stop the system performance monitor
         try:
@@ -75,10 +144,8 @@ def process_job_completed_status(self, master_id, status):
             self.report_deployment_status(self.run_id, GeneralConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED)
 
     def process_job_exception_status(self, master_id, status):
-        # Send the exception status to slave devices.
-        self.report_exception_status(
-            self.edge_id_list, run_id=self.run_id, server_id=master_id,
-            status=ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED)
+        # Report exception job status
+        self.report_exception_status(status)
 
         # Save the job status to local storage
         FedMLServerDataInterface.get_instance().save_job_status(self.run_id, master_id, status, status)
@@ -113,9 +180,9 @@ def status_center_process_master_status(self, topic, payload):
 
     def process_job_status_consensus(self, run_id, master_id, status):
         # Set the master status in the job and entire job status
-        self.status_center.set_entire_job_status(status)
-        self.status_center.add_job_status_in_master(master_id, status)
-        status = self.status_center.get_entire_job_status()
+        self.set_entire_job_status(status)
+        self.add_job_status_in_master(master_id, status)
+        status = self.get_entire_job_status()
 
         # Set the device status based on the job status
         edge_id_status_dict = self.client_agent_active_list.get(f"{run_id}", {})
@@ -152,8 +219,8 @@ def get_device_consensus_status_in_job(job_status, device_status):
             return None
 
     def get_device_consensus_status_in_current_device(self, edge_id, status):
-        self.status_center.add_job_status_in_slave(edge_id, status)
-        consensus_status = self.status_center.get_job_status_in_slave(edge_id)
+        self.add_job_status_in_slave(edge_id, status)
+        consensus_status = self.get_job_status_in_slave(edge_id)
         consensus_status = ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED \
             if consensus_status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_EXCEPTION else consensus_status
         return consensus_status
@@ -275,25 +342,13 @@ def report_server_status(self, run_id, edge_id, server_id, status):
         self.status_reporter.report_server_id_status(
             run_id, status, edge_id=edge_id, server_id=server_id, server_agent_id=edge_id, update_db=False)
 
-    def report_exception_status(
-            self, edge_id_list, run_id=0, server_id=None, status=None, payload=None):
-        if payload is None:
-            payload_obj = {"runId": run_id, "edgeids": edge_id_list}
-            if server_id is not None:
-                payload_obj["serverId"] = server_id
-        else:
-            payload_obj = json.loads(payload)
-        payload_obj["run_status"] = ClientConstants.MSG_MLOPS_CLIENT_STATUS_EXCEPTION if status is None else status
-        topic_exception = "flserver_agent/" + str(self.edge_id) + "/stop_train"
-        self.message_reporter.send_message(topic_exception, json.dumps(payload_obj))
+    def report_exception_status(self, status):
+        self.status_reporter.report_job_status(self.run_id, status)
 
     def status_center_process_slave_status_to_master_in_slave_agent(self, topic, payload):
         # Forward the status message to the sender queue of message center.
         self.message_center.send_message(topic, payload)
 
-        # Post the status message to the listener queue of message center
-        #self.message_center.receive_message(GeneralConstants.MSG_TOPIC_REPORT_DEVICE_STATUS_IN_JOB, payload)
-
     def status_center_process_slave_status_to_mlops_in_slave_agent(self, topic, payload):
         # Forward the status message to message center.
         self.message_center.send_message(topic, payload)

From 8975a30028b966e899872c9d53f2b3aaa17afeda Mon Sep 17 00:00:00 2001
From: Alex <alex.gpt.llm@gmail.com>
Date: Fri, 17 May 2024 22:47:16 +0800
Subject: [PATCH 074/282] [CoreEngine] change the training timeout.

---
 python/fedml/computing/scheduler/comm_utils/constants.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/python/fedml/computing/scheduler/comm_utils/constants.py b/python/fedml/computing/scheduler/comm_utils/constants.py
index f89d5640ce..f3fcd4ed5a 100644
--- a/python/fedml/computing/scheduler/comm_utils/constants.py
+++ b/python/fedml/computing/scheduler/comm_utils/constants.py
@@ -83,7 +83,7 @@ class SchedulerConstants:
     TRAIN_PROVISIONING_TIMEOUT = 60 * 25
     TRAIN_STARTING_TIMEOUT = 60 * 15
     TRAIN_STOPPING_TIMEOUT = 60 * 5
-    TRAIN_RUNNING_TIMEOUT = 60 * 60 * 12
+    TRAIN_RUNNING_TIMEOUT = 60 * 60 * 24 * 2000
     TRAIN_INIT_TIMEOUT = 60 * 5
 
     PUBLIC_REDIS_PORT = 6379

From c26eaff6bb7ca0fb5bd1dad5bde9ad5410d23253 Mon Sep 17 00:00:00 2001
From: Alay Shah <alay11shah@gmail.com>
Date: Fri, 17 May 2024 13:37:00 -0700
Subject: [PATCH 075/282] Add timestamp in status payload

---
 python/fedml/core/mlops/mlops_metrics.py | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/python/fedml/core/mlops/mlops_metrics.py b/python/fedml/core/mlops/mlops_metrics.py
index afa96f6870..2ae415fab2 100644
--- a/python/fedml/core/mlops/mlops_metrics.py
+++ b/python/fedml/core/mlops/mlops_metrics.py
@@ -7,7 +7,7 @@
 import requests
 
 import fedml
-from . import MLOpsConfigs
+from . import MLOpsConfigs, MLOpsUtils
 from .mlops_device_perfs import MLOpsDevicePerfStats
 from .mlops_job_perfs import MLOpsJobPerfStats
 from ...computing.scheduler.master.server_constants import ServerConstants
@@ -221,6 +221,7 @@ def common_report_server_training_status(self, run_id, status, role=None, edge_i
         if role is None:
             role = "normal"
         msg = {
+            "timestamp": MLOpsUtils.get_ntp_time(),
             "run_id": run_id,
             "edge_id": edge_id,
             "status": status,

From 7b576cbdc56fb5a6d4946a34d86a042eb41b2cc5 Mon Sep 17 00:00:00 2001
From: Alay Shah <alay11shah@gmail.com>
Date: Fri, 17 May 2024 14:24:57 -0700
Subject: [PATCH 076/282] Fix Import

---
 python/fedml/core/mlops/mlops_metrics.py | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/python/fedml/core/mlops/mlops_metrics.py b/python/fedml/core/mlops/mlops_metrics.py
index 2ae415fab2..3746f498b4 100644
--- a/python/fedml/core/mlops/mlops_metrics.py
+++ b/python/fedml/core/mlops/mlops_metrics.py
@@ -7,7 +7,8 @@
 import requests
 
 import fedml
-from . import MLOpsConfigs, MLOpsUtils
+from .mlops_utils import MLOpsUtils
+from .mlops_configs import MLOpsConfigs
 from .mlops_device_perfs import MLOpsDevicePerfStats
 from .mlops_job_perfs import MLOpsJobPerfStats
 from ...computing.scheduler.master.server_constants import ServerConstants

From 3f40511877a060e8d25149af5aab9ed0f351ce0d Mon Sep 17 00:00:00 2001
From: Alay Shah <alay11shah@gmail.com>
Date: Fri, 17 May 2024 17:46:59 -0700
Subject: [PATCH 077/282] Fix run logs cli command

---
 python/fedml/api/modules/run.py                      | 11 ++++++-----
 python/fedml/cli/modules/run.py                      | 12 ++++++------
 .../scheduler/scheduler_entry/run_manager.py         |  9 ++++-----
 3 files changed, 16 insertions(+), 16 deletions(-)

diff --git a/python/fedml/api/modules/run.py b/python/fedml/api/modules/run.py
index 120a964316..cf50ce24b4 100644
--- a/python/fedml/api/modules/run.py
+++ b/python/fedml/api/modules/run.py
@@ -51,7 +51,7 @@ def start(platform: str, create_run_result: FedMLRunStartedModel, device_server:
 
     run_start_result = FedMLRunManager.get_instance().start_run(platform=platform, create_run_result=create_run_result,
                                                                 device_server=device_server, device_edges=device_edges,
-                                                                api_key=api_key,
+                                                                api_key=get_api_key(),
                                                                 feature_entry_point=feature_entry_point)
 
     return run_start_result
@@ -79,7 +79,7 @@ def status(run_name: Optional[str], run_id: str, platform: str, api_key: str) ->
     _authenticate_and_validate_platform(api_key, platform)
 
     run_status = None
-    run_list_obj = list_run(run_name=run_name, run_id=run_id, platform=platform, api_key=api_key)
+    run_list_obj = list_run(run_name=run_name, run_id=run_id, platform=platform, api_key=get_api_key())
 
     if run_list_obj is not None:
         if len(run_list_obj.run_list) > 1:
@@ -93,12 +93,13 @@ def status(run_name: Optional[str], run_id: str, platform: str, api_key: str) ->
 # input: run_id, page_num, page_size, need_all_logs, platform, api_key
 # return RunLogResult(run_status, total_log_lines, total_log_pages, log_line_list, run_logs)
 def logs(run_id: str, page_num: int, page_size: int, need_all_logs: bool, platform: str, api_key: str) -> RunLogResult:
-    _authenticate_and_validate_platform(api_key, platform)
+    api_key = authenticate(api_key)
+    validate_platform(platform)
 
     if run_id is None:
         raise Exception("Please specify run id.")
 
-    _, run_status = status(run_name=None, run_id=run_id, platform=platform, api_key=get_api_key())
+    _, run_status = status(run_name=None, run_id=run_id, platform=platform, api_key=api_key)
 
     total_log_nums, total_log_pages, log_line_list, run_logs = 0, 0, list(), None
 
@@ -110,7 +111,7 @@ def logs(run_id: str, page_num: int, page_size: int, need_all_logs: bool, platfo
                                                                user_api_key=api_key)
 
         if run_logs is not None:
-            total_log_pages, total_log_nums = run_logs.total_num, run_logs.total_pages
+            total_log_pages, total_log_nums = run_logs.total_pages, run_logs.total_num
             _parse_logs(log_line_list, run_logs)
 
         return RunLogResult(run_status=run_status, total_log_lines=total_log_nums, total_log_pages=total_log_pages,
diff --git a/python/fedml/cli/modules/run.py b/python/fedml/cli/modules/run.py
index b4e8a947fd..f2c24b445a 100644
--- a/python/fedml/cli/modules/run.py
+++ b/python/fedml/cli/modules/run.py
@@ -184,21 +184,21 @@ def status(platform, run_name, run_id, api_key, version):
     "--page_num",
     "-pn",
     type=int,
-    default=0,
+    default=1,
     help="request page num for logs. --need_all_logs should be set to False if you want to use this option.",
 )
 @click.option(
     "--page_size",
     "-ps",
     type=int,
-    default=0,
+    default=10,
     help="request page size for logs, --need_all_logs should be set to False if you want to use this option.",
 )
 @click.option(
     "--need_all_logs",
     "-a",
     type=bool,
-    default=True,
+    default=False,
     help="boolean value representing if all logs are needed. Default to True",
 )
 def logs(platform, run_id, api_key, version, page_num, page_size, need_all_logs):
@@ -217,8 +217,8 @@ def logs(platform, run_id, api_key, version, page_num, page_size, need_all_logs)
         return
 
     # Show run log summary info
-    log_head_table = PrettyTable(['Run ID', 'Total Log Lines', 'Log URL'])
-    log_head_table.add_row([run_id, run_log_result.total_log_lines, run_logs.log_full_url])
+    log_head_table = PrettyTable(['Run ID', 'Printed Log Lines', 'Total Log Lines', 'Log URL'])
+    log_head_table.add_row([run_id, len(run_log_result.log_line_list), run_logs.total_num, run_logs.log_full_url])
     click.echo("\nLogs summary info is as follows.")
     print(log_head_table)
 
@@ -234,7 +234,7 @@ def logs(platform, run_id, api_key, version, page_num, page_size, need_all_logs)
     if len(run_log_result.log_line_list) > 0:
         click.echo("\nAll logs is as follows.")
         for log_line in run_log_result.log_line_list:
-            click.echo(log_line.rstrip('\n'))
+            click.echo(log_line)
 
 
 def _print_run_table(run_list_obj):
diff --git a/python/fedml/computing/scheduler/scheduler_entry/run_manager.py b/python/fedml/computing/scheduler/scheduler_entry/run_manager.py
index 84fe109054..b91935e7b2 100755
--- a/python/fedml/computing/scheduler/scheduler_entry/run_manager.py
+++ b/python/fedml/computing/scheduler/scheduler_entry/run_manager.py
@@ -162,10 +162,10 @@ def __init__(self, run_log_list_json):
         self.log_devices = list()
         for log_dev in log_devices_json:
             self.log_devices.append(FedMLRunLogDeviceModel(log_dev))
-        self.total_num = run_log_list_json.get("total_num", 0)
-        self.total_pages = run_log_list_json.get("total_pages", 0)
-        self.current_page = run_log_list_json.get("current_page", 0)
-        self.log_lines = run_log_list_json.get("logs", [])
+        self.total_num = run_log_list_json.get("totalSize", 0)
+        self.total_pages = run_log_list_json.get("totalPages", 0)
+        self.current_page = run_log_list_json.get("pageNum", 0)
+        self.log_lines = run_log_list_json.get("logList", [])
 
 
 class FedMLRunLogDeviceModel(object):
@@ -277,7 +277,6 @@ def get_run_logs(self, run_id: str, page_num: int, page_size: int, user_api_key:
         run_log_list_result = None
         run_logs_json = {
             "apiKey": user_api_key,
-            "edgeId": "-1",
             "pageNum": page_num,
             "pageSize": page_size,
             "runId": run_id,

From 8c13ed2ad04b94bb291124c5c70b91c78cd50900 Mon Sep 17 00:00:00 2001
From: Alex <alex.gpt.llm@gmail.com>
Date: Mon, 20 May 2024 17:38:46 +0800
Subject: [PATCH 078/282] [CoreEngine] make the server status work.

---
 .../master_protocol_manager.py                |  6 ++
 .../scheduler_core/message_common.py          |  2 +
 .../scheduler/scheduler_core/status_center.py | 62 ++++++++++++-------
 .../status_manager_protocols.py               |  4 +-
 4 files changed, 48 insertions(+), 26 deletions(-)

diff --git a/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py b/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py
index 01165ff82e..09c2dd5d17 100755
--- a/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py
+++ b/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py
@@ -11,6 +11,7 @@
 from .master_job_runner_manager import FedMLDeployJobRunnerManager
 from ..scheduler_core.general_constants import GeneralConstants
 from ..scheduler_core.endpoint_sync_protocol import FedMLEndpointSyncProtocol
+from ..scheduler_core.compute_cache_manager import ComputeCacheManager
 
 
 class FedMLDeployMasterProtocolManager(FedMLBaseMasterProtocolManager):
@@ -135,6 +136,11 @@ def callback_delete_deployment(self, topic, payload):
             model_msg_object.run_id, model_msg_object.end_point_name, model_msg_object.model_id,
             model_msg_object.model_name, model_msg_object.model_version)
 
+        # Report the launch job status with killed status.
+        launch_job_id = ComputeCacheManager.get_instance().get_gpu_cache().get_endpoint_run_id_map(self.run_id)
+        if launch_job_id is not None:
+            self.status_reporter.report_server_id_status(launch_job_id, GeneralConstants.MSG_MLOPS_SERVER_STATUS_KILLED)
+
     def callback_start_deployment(self, topic, payload):
         # noinspection PyBroadException
         try:
diff --git a/python/fedml/computing/scheduler/scheduler_core/message_common.py b/python/fedml/computing/scheduler/scheduler_core/message_common.py
index 24449af3b5..13b99ff39d 100755
--- a/python/fedml/computing/scheduler/scheduler_core/message_common.py
+++ b/python/fedml/computing/scheduler/scheduler_core/message_common.py
@@ -49,6 +49,7 @@ def __init__(self, topic=None, payload=None, status_msg_body: dict = None):
         self.payload = payload
         self.run_id = None
         self.edge_id = None
+        self.server_id = None
         self.status = None
         if status_msg_body is not None:
             self.from_message_body(status_msg_body=status_msg_body)
@@ -61,6 +62,7 @@ def from_message_body(self, status_msg_body: dict = None):
             self.run_id = payload_json.get("run_id", None)
             self.run_id = payload_json.get("runId", None) if self.run_id is None else self.run_id
             self.edge_id = payload_json.get("edge_id", None)
+            self.server_id = payload_json.get("server_id", None)
             self.status = payload_json.get("status", None)
 
     def get_message_body(self):
diff --git a/python/fedml/computing/scheduler/scheduler_core/status_center.py b/python/fedml/computing/scheduler/scheduler_core/status_center.py
index c0e1b6633a..fc1c726b5f 100755
--- a/python/fedml/computing/scheduler/scheduler_core/status_center.py
+++ b/python/fedml/computing/scheduler/scheduler_core/status_center.py
@@ -81,6 +81,7 @@ class FedMLStatusCenter(object):
     TOPIC_SLAVE_JOB_LAUNCH_SUFFIX = "/start_train"
     TOPIC_SLAVE_JOB_STOP_PREFIX = "flserver_agent/"
     TOPIC_SLAVE_JOB_STOP_SUFFIX = "/stop_train"
+    ALLOWED_MAX_JOB_STATUS_CACHE_NUM = 1000
 
     def __init__(self, message_queue=None):
         self.status_queue = message_queue
@@ -203,38 +204,43 @@ def run_status_dispatcher(self, status_event, status_queue,
                 status_entity = FedMLStatusEntity(status_msg_body=message_body)
 
                 # Generate status manager instance
-                if status_manager_instances.get(status_entity.run_id) is None:
-                    status_manager_instances[status_entity.run_id] = FedMLStatusManager(
-                        run_id=status_entity.run_id, edge_id=status_entity.edge_id, status_center=self,
+                run_id_str = str(status_entity.run_id)
+                run_id_int = int(status_entity.run_id)
+                if status_manager_instances.get(run_id_str) is None:
+                    if len(status_manager_instances.keys()) >= FedMLStatusCenter.ALLOWED_MAX_JOB_STATUS_CACHE_NUM:
+                        for iter_run_id, iter_status_mgr in status_manager_instances.items():
+                            if iter_status_mgr.is_job_completed():
+                                status_manager_instances.pop(iter_run_id)
+                                break
+                    status_manager_instances[run_id_str] = FedMLStatusManager(
+                        run_id=run_id_int, edge_id=status_entity.edge_id,
+                        server_id=status_entity.server_id, status_center=self,
                         message_center=message_center)
                 else:
-                    status_manager_instances[status_entity.run_id].edge_id = status_entity.edge_id
+                    status_manager_instances[run_id_str].edge_id = status_entity.edge_id
+                    if status_entity.server_id is None and status_entity.server_id != 0:
+                        status_manager_instances[run_id_str].server_id = status_entity.server_id
 
                 # if the job status is completed then continue
-                if status_manager_instances[status_entity.run_id].is_job_completed():
+                if status_manager_instances[run_id_str].is_job_completed():
                     continue
 
                 # Process the master and slave status.
                 if message_entity.topic.startswith(FedMLStatusCenter.TOPIC_MASTER_STATUS_PREFIX):
                     # Process the job status
-                    status_manager_instances[status_entity.run_id].status_center_process_master_status(
+                    status_manager_instances[run_id_str].status_center_process_master_status(
                         message_entity.topic, message_entity.payload)
 
                     # Save the job status
-                    status_manager_instances[status_entity.run_id].save_job_status()
-
-                    # Popup the status manager instance when the job status is completed
-                    if status_manager_instances[status_entity.run_id].is_job_completed():
-                        status_manager_instances.pop(status_entity.run_id)
-                        continue
+                    status_manager_instances[run_id_str].save_job_status()
 
                 elif message_entity.topic.startswith(FedMLStatusCenter.TOPIC_SLAVE_STATUS_PREFIX):
                     # Process the slave device status
-                    status_manager_instances[status_entity.run_id].status_center_process_slave_status(
+                    status_manager_instances[run_id_str].status_center_process_slave_status(
                         message_entity.topic, message_entity.payload)
 
                     # Save the device status in job
-                    status_manager_instances[status_entity.run_id].save_device_status_in_job(status_entity.edge_id)
+                    status_manager_instances[run_id_str].save_device_status_in_job(status_entity.edge_id)
 
             except Exception as e:
                 if message_entity is not None:
@@ -295,40 +301,48 @@ def run_status_dispatcher_in_slave(self, status_event, status_queue,
                 status_entity = FedMLStatusEntity(status_msg_body=message_body)
 
                 # Generate status manager instance
-                if status_manager_instances.get(status_entity.run_id) is None:
-                    status_manager_instances[status_entity.run_id] = FedMLStatusManager(
-                        run_id=status_entity.run_id, edge_id=status_entity.edge_id, status_center=self,
+                run_id_str = str(status_entity.run_id)
+                run_id_int = int(status_entity.run_id)
+                if status_manager_instances.get(run_id_str) is None:
+                    if len(status_manager_instances.keys()) >= FedMLStatusCenter.ALLOWED_MAX_JOB_STATUS_CACHE_NUM:
+                        for iter_run_id, iter_status_mgr in status_manager_instances.items():
+                            if iter_status_mgr.is_job_completed():
+                                status_manager_instances.pop(iter_run_id)
+                                break
+                                
+                    status_manager_instances[run_id_str] = FedMLStatusManager(
+                        run_id=run_id_int, edge_id=status_entity.edge_id, status_center=self,
                         message_center=message_center)
                 else:
-                    status_manager_instances[status_entity.run_id].edge_id = status_entity.edge_id
+                    status_manager_instances[run_id_str].edge_id = status_entity.edge_id
 
                 # Process the slave status
                 if message_entity.topic.startswith(FedMLStatusCenter.TOPIC_SLAVE_STATUS_PREFIX):
                     # Report the slave status to master
-                    status_manager_instances[status_entity.run_id]. \
+                    status_manager_instances[run_id_str]. \
                         status_center_process_slave_status_to_master_in_slave_agent(
                         message_entity.topic, message_entity.payload
                     )
                 elif message_entity.topic.startswith(FedMLStatusCenter.TOPIC_SLAVE_STATUS_TO_MLOPS_PREFIX):
                     # Report slave status to mlops (Active/IDLE message)
-                    status_manager_instances[status_entity.run_id]. \
+                    status_manager_instances[run_id_str]. \
                         status_center_process_slave_status_to_mlops_in_slave_agent(
                         message_entity.topic, message_entity.payload
                     )
                 elif (message_entity.topic.startswith(FedMLStatusCenter.TOPIC_SLAVE_JOB_LAUNCH_PREFIX) and
                       message_entity.topic.endswith(FedMLStatusCenter.TOPIC_SLAVE_JOB_LAUNCH_SUFFIX)):
                     # Async request the job status from master when launching the job
-                    job_launch_message_map[status_entity.run_id] = {"topic": message_entity.topic,
+                    job_launch_message_map[run_id_str] = {"topic": message_entity.topic,
                                                                     "payload": message_entity.payload}
-                    # status_manager_instances[status_entity.run_id]. \
+                    # status_manager_instances[run_id_str]. \
                     #     status_center_request_job_status_from_master_in_slave_agent(
                     #     message_entity.topic, message_entity.payload
                     # )
                 elif (message_entity.topic.startswith(FedMLStatusCenter.TOPIC_SLAVE_JOB_STOP_PREFIX) and
                       message_entity.topic.endswith(FedMLStatusCenter.TOPIC_SLAVE_JOB_STOP_SUFFIX)):
                     # Cleanup when stopped the job
-                    if job_launch_message_map.get(status_entity.run_id, None) is not None:
-                        job_launch_message_map.pop(status_entity.run_id)
+                    if job_launch_message_map.get(run_id_str, None) is not None:
+                        job_launch_message_map.pop(run_id_str)
 
             except Exception as e:
                 if message_entity is not None:
diff --git a/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py b/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py
index 272423f147..31afed463b 100755
--- a/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py
+++ b/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py
@@ -294,7 +294,7 @@ def process_device_status(self, run_id, edge_id, status):
             fault_tolerance_rate=fault_tolerance_rate)
         if status_to_report is not None:
             logging.info(f"Run completed when processing edge status, will report status {status_to_report}")
-            self.report_server_status(run_id, edge_id, server_id, status_to_report)
+            self.report_server_status(run_id, server_id, server_id, status_to_report)
 
     def calculate_server_status(
             self, run_id, total_edge_nums, number_of_failed_edges, number_of_finished_edges,
@@ -340,7 +340,7 @@ def parse_fault_tolerance_params(self, run_id):
 
     def report_server_status(self, run_id, edge_id, server_id, status):
         self.status_reporter.report_server_id_status(
-            run_id, status, edge_id=edge_id, server_id=server_id, server_agent_id=edge_id, update_db=False)
+            run_id, status, edge_id=edge_id, server_id=server_id, server_agent_id=server_id, update_db=False)
 
     def report_exception_status(self, status):
         self.status_reporter.report_job_status(self.run_id, status)

From 98eff5585390047ff99e7c268abb92cc9cfd7f11 Mon Sep 17 00:00:00 2001
From: Alex <alex.gpt.llm@gmail.com>
Date: Mon, 20 May 2024 17:40:26 +0800
Subject: [PATCH 079/282] [CoreEngine] make the server status work.

---
 .../fedml/computing/scheduler/scheduler_core/status_center.py   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/python/fedml/computing/scheduler/scheduler_core/status_center.py b/python/fedml/computing/scheduler/scheduler_core/status_center.py
index fc1c726b5f..65258af2d9 100755
--- a/python/fedml/computing/scheduler/scheduler_core/status_center.py
+++ b/python/fedml/computing/scheduler/scheduler_core/status_center.py
@@ -309,7 +309,7 @@ def run_status_dispatcher_in_slave(self, status_event, status_queue,
                             if iter_status_mgr.is_job_completed():
                                 status_manager_instances.pop(iter_run_id)
                                 break
-                                
+
                     status_manager_instances[run_id_str] = FedMLStatusManager(
                         run_id=run_id_int, edge_id=status_entity.edge_id, status_center=self,
                         message_center=message_center)

From f037b32d4aca2142152aec3f03aa0ef1855616c8 Mon Sep 17 00:00:00 2001
From: Alex <alex.gpt.llm@gmail.com>
Date: Mon, 20 May 2024 19:59:01 +0800
Subject: [PATCH 080/282] [CoreEngine] make the job stopping work.

---
 .../master/base_master_job_runner.py          | 12 +++++-------
 .../master/base_master_protocol_manager.py    | 19 +++++++++++--------
 .../scheduler_base_job_runner_manager.py      |  4 ++++
 .../status_manager_protocols.py               |  2 +-
 .../slave/base_slave_protocol_manager.py      | 15 +++++----------
 python/fedml/core/mlops/mlops_metrics.py      |  6 ++++--
 6 files changed, 30 insertions(+), 28 deletions(-)

diff --git a/python/fedml/computing/scheduler/master/base_master_job_runner.py b/python/fedml/computing/scheduler/master/base_master_job_runner.py
index 18aa8a6eed..9ebab258bb 100755
--- a/python/fedml/computing/scheduler/master/base_master_job_runner.py
+++ b/python/fedml/computing/scheduler/master/base_master_job_runner.py
@@ -285,6 +285,10 @@ def run_server_job_impl(self, process_event, completed_event,
         self.args.run_id = self.run_id
         MLOpsRuntimeLog.get_instance(self.args).init_logs(log_level=logging.INFO)
 
+        self.status_reporter.report_server_id_status(
+            run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_RUNNING, edge_id=self.edge_id,
+            server_id=self.edge_id, server_agent_id=self.edge_id)
+
         # get training params
         private_local_data_dir = data_config.get("privateLocalData", "")
         is_using_local_data = 0
@@ -562,7 +566,7 @@ def detect_edges_status(
         return True, active_edge_info_dict, inactivate_edges
 
     def report_exception_status(self, run_id):
-        self.status_reporter.report_job_status(run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_EXCEPTION)
+        self.mlops_metrics.report_job_status(run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_EXCEPTION)
 
     def callback_run_logs(self, topic, payload):
         run_id = str(topic).split('/')[-1]
@@ -618,12 +622,6 @@ def send_training_request_to_edges(self, request_json, active_edge_info_dict=Non
                            f"request GPU count {request_num_gpus}"
                 logging.error(err_info)
 
-                # Bug fix: This mqtt message needs to be sent so platform can clean up the failed run and change the
-                # status from running to failed.
-                self.mlops_metrics.report_server_training_status(
-                    run_id, GeneralConstants.MSG_MLOPS_SERVER_STATUS_FAILED, edge_id=self.edge_id
-                )
-
                 self.status_reporter.report_server_id_status(
                     run_id, GeneralConstants.MSG_MLOPS_SERVER_STATUS_FAILED, edge_id=self.edge_id,
                     server_id=self.edge_id, server_agent_id=self.server_agent_id)
diff --git a/python/fedml/computing/scheduler/master/base_master_protocol_manager.py b/python/fedml/computing/scheduler/master/base_master_protocol_manager.py
index cee91578dd..46a6448269 100755
--- a/python/fedml/computing/scheduler/master/base_master_protocol_manager.py
+++ b/python/fedml/computing/scheduler/master/base_master_protocol_manager.py
@@ -267,19 +267,22 @@ def callback_stop_train(self, topic, payload, use_payload=None):
         server_id = request_json.get("serverId", None)
         if server_id is None:
             server_id = request_json.get("server_id", None)
+        edge_ids = request_json.get("edgeids", None)
 
-        # Broadcast the job status to all edges
-        self.rebuild_status_center(self.get_status_queue())
-        self.status_reporter.report_job_status(run_id, GeneralConstants.MSG_MLOPS_SERVER_STATUS_KILLED)
+        # Stop the job runner
+        self._get_job_runner_manager().stop_job_runner(
+            run_id, args=self.args, server_id=server_id, request_json=request_json,
+            run_as_cloud_agent=self.run_as_cloud_agent)
 
         # Cleanup the cached object
         if self.running_request_json.get(run_id_str, None) is not None:
             self.running_request_json.pop(run_id_str)
 
-        # Stop the job runner
-        self._get_job_runner_manager().stop_job_runner(
-            run_id, args=self.args, server_id=server_id, request_json=request_json,
-            run_as_cloud_agent=self.run_as_cloud_agent)
+        # Reset all edge status and server status
+        for iter_edge_id in edge_ids:
+            self.generate_status_report(run_id, iter_edge_id, server_agent_id=server_id).\
+                report_client_id_status(iter_edge_id, GeneralConstants.MSG_MLOPS_SERVER_STATUS_KILLED,
+                                        run_id=run_id, server_id=server_id)
 
     def callback_complete_job(self, topic, payload):
         # Parse the parameters.
@@ -536,7 +539,7 @@ def send_status_msg_to_edges(self, edge_id_list, run_id, server_id, context=None
             self.send_status_check_msg(run_id, edge_id, self.edge_id, context=context)
 
     def report_exception_status(self, run_id):
-        self.status_reporter.report_job_status(run_id, GeneralConstants.MSG_MLOPS_SERVER_STATUS_EXCEPTION)
+        self.mlops_metrics.report_job_status(run_id, GeneralConstants.MSG_MLOPS_SERVER_STATUS_EXCEPTION)
 
     @staticmethod
     def get_start_train_topic_with_edge_id(edge_id):
diff --git a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner_manager.py b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner_manager.py
index 77768da6c0..dcc4045699 100755
--- a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner_manager.py
+++ b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner_manager.py
@@ -39,6 +39,10 @@ def stop_job_runner(self, run_id):
         if self.job_runners.get(run_id_str, None) is not None:
             self.job_runners[run_id_str].trigger_stop_event()
 
+    def stop_all_job_runner(self):
+        for run_id, job_runner in self.job_runners.items():
+            job_runner.trigger_stop_event()
+
     def complete_job_runner(self, run_id):
         run_id_str = str(run_id)
         if self.job_runners.get(run_id_str, None) is not None:
diff --git a/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py b/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py
index 31afed463b..96f5e4920f 100755
--- a/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py
+++ b/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py
@@ -343,7 +343,7 @@ def report_server_status(self, run_id, edge_id, server_id, status):
             run_id, status, edge_id=edge_id, server_id=server_id, server_agent_id=server_id, update_db=False)
 
     def report_exception_status(self, status):
-        self.status_reporter.report_job_status(self.run_id, status)
+        self.message_reporter.report_job_status(self.run_id, status)
 
     def status_center_process_slave_status_to_master_in_slave_agent(self, topic, payload):
         # Forward the status message to the sender queue of message center.
diff --git a/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py b/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py
index aa69d4482d..a6d43936d2 100755
--- a/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py
+++ b/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py
@@ -57,7 +57,6 @@ def __init__(self, args, agent_config=None):
         self.fl_topic_request_device_info = None
         self.communication_mgr = None
         self.subscribed_topics = list()
-        self.job_runners = dict()
         self.ota_upgrade = FedMLOtaUpgrade(edge_id=args.edge_id)
         self.running_request_json = dict()
         self.start_request_json = None
@@ -423,8 +422,7 @@ def callback_client_logout(self, topic, payload):
         if secret is None or str(secret) != "246b1be6-0eeb-4b17-b118-7d74de1975d4":
             return
         logging.info("Received the logout request.")
-        for runner in self.job_runners:
-            runner.trigger_stop_event()
+        self._get_job_runner_manager().stop_all_job_runner()
         self.disable_client_login = True
         time.sleep(3)
         os.system("fedml logout")
@@ -451,7 +449,7 @@ def callback_response_job_status(self, topic, payload):
 
         # process the status
         logging.info("process status in the job status callback.")
-        self.process_status(run_id, job_status, edge_id)
+        self.process_status(run_id, job_status, edge_id, master_id=master_agent)
 
     def callback_broadcasted_job_status(self, topic, payload):
         # Parse the parameters
@@ -489,15 +487,14 @@ def generate_protocol_manager(self):
 
         return message_status_runner
 
-    def process_status(self, run_id, status, edge_id):
+    def process_status(self, run_id, status, edge_id, master_id=None):
         run_id_str = str(run_id)
 
         # Process the completed status
         if status == GeneralConstants.MSG_MLOPS_CLIENT_STATUS_FINISHED or \
                 status == GeneralConstants.MSG_MLOPS_CLIENT_STATUS_FAILED or \
                 status == GeneralConstants.MSG_MLOPS_CLIENT_STATUS_KILLED:
-            if self.job_runners.get(run_id_str, None) is not None:
-                self.job_runners[run_id_str].trigger_completed_event()
+            self._get_job_runner_manager().complete_job_runner(run_id)
 
             # Stop the sys perf process
             # noinspection PyBoardException
@@ -584,9 +581,7 @@ def get_all_run_process_list_map(self):
         return run_process_dict
 
     def stop_job(self, run_id):
-        run_id_str = str(run_id)
-        if self.job_runners.get(run_id_str, None) is not None:
-            self.job_runners[run_id_str].trigger_stop_event()
+        self._get_job_runner_manager().stop_job_runner(run_id)
 
     @staticmethod
     def get_start_train_topic_with_edge_id(edge_id):
diff --git a/python/fedml/core/mlops/mlops_metrics.py b/python/fedml/core/mlops/mlops_metrics.py
index afa96f6870..c27a683759 100644
--- a/python/fedml/core/mlops/mlops_metrics.py
+++ b/python/fedml/core/mlops/mlops_metrics.py
@@ -185,9 +185,11 @@ def report_server_training_status(self, run_id, status, edge_id=0, role=None,
                 from ...computing.scheduler.master.server_data_interface import FedMLServerDataInterface
                 FedMLServerDataInterface.get_instance().save_job(run_id, self.edge_id, status, running_json)
 
-    def report_job_status(self, run_id, status):
+    def report_job_status(self, run_id, status, master_id=None):
         topic_name = f"master_agent/slave_agent/job_status/{run_id}"
-        payload = {"run_id": run_id, "status": status}
+        payload = {"run_id": run_id, "status": status, "fedml_version": fedml.__version__}
+        if master_id is not None:
+            payload["master_agent"] = master_id
 
         message_json = json.dumps(payload)
         self.send_message(topic_name, message_json)

From a0664d87f54cb4d2e3faca683f4562bbdb8f5976 Mon Sep 17 00:00:00 2001
From: Ubuntu <cirrascale@QAIECC5.san01.cirrascale.net>
Date: Mon, 20 May 2024 22:55:59 +0000
Subject: [PATCH 081/282] Add support for GPU Utilization

---
 .../comm_utils/gpu_utils/qualcomm_utils.py    | 20 ++++++++++++-------
 1 file changed, 13 insertions(+), 7 deletions(-)

diff --git a/python/fedml/computing/scheduler/comm_utils/gpu_utils/qualcomm_utils.py b/python/fedml/computing/scheduler/comm_utils/gpu_utils/qualcomm_utils.py
index 9c7ea21ea9..88114cf2ad 100644
--- a/python/fedml/computing/scheduler/comm_utils/gpu_utils/qualcomm_utils.py
+++ b/python/fedml/computing/scheduler/comm_utils/gpu_utils/qualcomm_utils.py
@@ -43,14 +43,16 @@ def get_gpu_cards() -> List[GPUCard]:
 
     @staticmethod
     def get_available_gpu_card_ids(order: str, limit: int, max_load: float, max_memory: float) -> List[int]:
-
-        if order != "memory":
+        gpu_cards: List[GPUCard] = QualcommNPUtil.get_gpu_cards()
+        gpu_cards = list(filter(lambda card: (card.memoryUtil < max_memory and card.load < max_load), gpu_cards))
+        if order == 'memory':
+            gpu_cards.sort(key=lambda card: float('inf') if math.isnan(card.memoryUtil) else card.memoryUtil, reverse=False)
+        elif order == 'load':
+            gpu_cards.sort(key=lambda card: float('inf') if math.isnan(card.memoryUtil) else card.load, reverse=False)
+        else:
             raise NotImplementedError(f"Qualcomm utils doesn't have support to compute availability based on {order}. "
-                                      f"Supported criteria: [memory]")
+                                      f"Supported criteria: [memory, load]")
 
-        gpu_cards: List[GPUCard] = QualcommNPUtil.get_gpu_cards()
-        gpu_cards = list(filter(lambda card: card.memoryUtil < max_memory, gpu_cards))
-        gpu_cards.sort(key=lambda card: float('inf') if math.isnan(card.memoryUtil) else card.memoryUtil, reverse=False)
         gpu_cards = gpu_cards[0:min(limit, len(gpu_cards))]
         return list(map(lambda card: card.id, gpu_cards))
 
@@ -75,11 +77,14 @@ def get_docker_gpu_ids_by_container_name(container_name: str, docker_client: Doc
 
     @staticmethod
     def __convert(npu) -> GPUCard:
-        # TODO (alaydshah): Add support for load, memoryUtil, temperature
+        # TODO (alaydshah): Add support for temperature
         memory_total = npu.devData.resourceInfo.dramTotal / 1024
         memory_free = npu.devData.resourceInfo.dramFree / 1024
         memory_used = memory_total - memory_free
         memory_utilized = float(memory_used) / float(memory_total)
+        nsp_free = npu.devData.resourceInfo.nspFree
+        nsp_total = npu.devData.resourceInfo.nspTotal
+        load = (nsp_total - nsp_free) / nsp_total
 
         return GPUCard(
             id=npu.qid,
@@ -91,6 +96,7 @@ def __convert(npu) -> GPUCard:
             memoryFree=memory_free,
             memoryUsed=memory_used,
             memoryUtil=memory_utilized,
+            load=load,
         )
 
     @staticmethod

From ae06bf312bf7feb1467a0d481557f2bfa95bc1a4 Mon Sep 17 00:00:00 2001
From: Alay Shah <alay11shah@gmail.com>
Date: Tue, 21 May 2024 01:52:39 -0700
Subject: [PATCH 082/282] Add 10 minutes TTL Cache for config fetch

---
 .../computing/scheduler/model_scheduler/modelops_configs.py     | 2 ++
 python/fedml/core/mlops/mlops_configs.py                        | 2 ++
 2 files changed, 4 insertions(+)

diff --git a/python/fedml/computing/scheduler/model_scheduler/modelops_configs.py b/python/fedml/computing/scheduler/model_scheduler/modelops_configs.py
index e988c29a8a..719f3825c4 100644
--- a/python/fedml/computing/scheduler/model_scheduler/modelops_configs.py
+++ b/python/fedml/computing/scheduler/model_scheduler/modelops_configs.py
@@ -4,6 +4,7 @@
 
 import certifi
 import requests
+import cachetools.func
 
 import fedml
 from fedml.core.mlops.mlops_utils import MLOpsUtils
@@ -32,6 +33,7 @@ def get_instance(args):
         return ModelOpsConfigs._config_instance
 
     @staticmethod
+    @cachetools.func.ttl_cache(ttl=600)
     def get_request_params():
         url = fedml._get_backend_service()
         url = "{}/fedmlOpsServer/configs/fetch".format(url)
diff --git a/python/fedml/core/mlops/mlops_configs.py b/python/fedml/core/mlops/mlops_configs.py
index c8c6422d6c..6c25c38128 100644
--- a/python/fedml/core/mlops/mlops_configs.py
+++ b/python/fedml/core/mlops/mlops_configs.py
@@ -4,6 +4,7 @@
 
 import certifi
 import requests
+import cachetools.func
 
 import fedml
 from fedml.core.mlops.mlops_utils import MLOpsUtils
@@ -41,6 +42,7 @@ def __init__(self):
         pass
 
     @staticmethod
+    @cachetools.func.ttl_cache(ttl=600)
     def get_request_params():
         url = fedml._get_backend_service()
         url = f"{url}/fedmlOpsServer/configs/fetch"

From 3214f4a569e63a260000a14267e6e1f1147b7791 Mon Sep 17 00:00:00 2001
From: Alex <alex.gpt.llm@gmail.com>
Date: Tue, 21 May 2024 20:38:01 +0800
Subject: [PATCH 083/282] [CoreEngine] fixed the issue that the endpoint status
 is aborted when the deployment is fresh and failed.

---
 .../master/base_master_protocol_manager.py    |  9 +++--
 .../scheduler/master/deploy_job_launcher.py   | 12 +++++-
 .../model_scheduler/master_job_runner.py      |  6 ++-
 .../master_protocol_manager.py                | 10 ++++-
 .../scheduler/scheduler_core/status_center.py |  5 ++-
 .../slave/base_slave_protocol_manager.py      | 40 -------------------
 python/fedml/core/mlops/mlops_runtime_log.py  |  2 +
 7 files changed, 35 insertions(+), 49 deletions(-)

diff --git a/python/fedml/computing/scheduler/master/base_master_protocol_manager.py b/python/fedml/computing/scheduler/master/base_master_protocol_manager.py
index 46a6448269..c95d73f4bf 100755
--- a/python/fedml/computing/scheduler/master/base_master_protocol_manager.py
+++ b/python/fedml/computing/scheduler/master/base_master_protocol_manager.py
@@ -284,6 +284,10 @@ def callback_stop_train(self, topic, payload, use_payload=None):
                 report_client_id_status(iter_edge_id, GeneralConstants.MSG_MLOPS_SERVER_STATUS_KILLED,
                                         run_id=run_id, server_id=server_id)
 
+        # To be compatible to the previous version of edge devices, we just send the stopping train message to edges.
+        # Currently, the latest version of edge devices don't need to process the stopping train message.
+        self.send_training_stop_request_to_edges(edge_ids, payload=payload, run_id=run_id)
+
     def callback_complete_job(self, topic, payload):
         # Parse the parameters.
         request_json = json.loads(payload)
@@ -508,13 +512,12 @@ def send_training_stop_request_to_edges(
             self, edge_id_list, payload=None, run_id=0):
         if payload is None:
             payload_obj = {"runId": run_id, "edgeids": edge_id_list}
-        else:
-            payload_obj = json.loads(payload)
+            payload = json.dumps(payload_obj)
 
         for edge_id in edge_id_list:
             topic_stop_train = "flserver_agent/" + str(edge_id) + "/stop_train"
             logging.info("stop_train: send topic " + topic_stop_train)
-            self.message_center.send_message(topic_stop_train, json.dumps(payload_obj))
+            self.message_center.send_message(topic_stop_train, payload)
 
     def send_training_stop_request_to_specific_edge(self, edge_id, payload):
         topic_stop_train = "flserver_agent/" + str(edge_id) + "/stop_train"
diff --git a/python/fedml/computing/scheduler/master/deploy_job_launcher.py b/python/fedml/computing/scheduler/master/deploy_job_launcher.py
index e4af2a20be..359c6e641f 100755
--- a/python/fedml/computing/scheduler/master/deploy_job_launcher.py
+++ b/python/fedml/computing/scheduler/master/deploy_job_launcher.py
@@ -3,6 +3,7 @@
 from fedml.computing.scheduler.model_scheduler import device_client_constants
 from fedml.computing.scheduler.model_scheduler.device_model_cards import FedMLModelCards
 from fedml.computing.scheduler.scheduler_entry.constants import Constants
+from fedml.computing.scheduler.scheduler_core.compute_cache_manager import ComputeCacheManager
 
 
 class FedMLDeployJobLauncher:
@@ -40,6 +41,8 @@ def deploy_model(serving_devices, request_json, run_id):
                 "", random_list[1], None,
                 in_model_id=model_id, in_model_version=model_version,
                 endpoint_name=endpoint_name, endpoint_id=endpoint_id, run_id=run_id)
+            return endpoint_id
+        return None
 
     def check_model_device_ready_and_deploy(self, request_json, run_id, master_device_id,
                                             slave_device_id, run_edge_ids=None):
@@ -87,4 +90,11 @@ def check_model_device_ready_and_deploy(self, request_json, run_id, master_devic
         serving_devices.extend(device_slave_ids)
 
         # Start to deploy the model
-        FedMLDeployJobLauncher.deploy_model(serving_devices, request_json, run_id=run_id)
+        endpoint_id = FedMLDeployJobLauncher.deploy_model(serving_devices, request_json, run_id=run_id)
+
+        # Save the relationship between run id and endpoint
+        ComputeCacheManager.get_instance().set_redis_params()
+        ComputeCacheManager.get_instance().get_gpu_cache().set_endpoint_run_id_map(
+            endpoint_id, run_id)
+
+
diff --git a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
index eef03d53f2..bf9cee3279 100755
--- a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
+++ b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
@@ -51,6 +51,7 @@ def __init__(self, args, run_id=0, request_json=None, agent_config=None, edge_id
         self.deployed_replica_payload = None
         self.slave_deployment_results_map = dict()
         self.deployment_result_queue = Queue()
+        self.is_fresh_endpoint = True
 
     # Override
     def _generate_job_runner_instance(self, args, run_id=None, request_json=None, agent_config=None, edge_id=None, ):
@@ -75,6 +76,7 @@ def run_impl(
             inference_end_point_id, use_gpu, memory_size, model_version, inference_port = \
             FedMLDeployMasterJobRunner.parse_model_run_params(self.request_json)
         self.run_id = run_id
+        self.is_fresh_endpoint = self.request_json.get("is_fresh_endpoint", True)
 
         # Print request parameters.
         logging.info("model deployment request: {}".format(self.request_json))
@@ -246,7 +248,7 @@ def process_deployment_result_message(self, topic=None, payload=None):
                           f"{self.request_json}")
             return
 
-        logging.info(f"End point {end_point_id}; Device {device_id}; replica {replica_no}; "
+        logging.info(f"Endpoint {end_point_id}; Device {device_id}; replica {replica_no}; "
                      f"run_operation {run_operation} model status {model_status}.")
 
         # OPTIONAL DEBUG PARAMS
@@ -280,7 +282,7 @@ def process_deployment_result_message(self, topic=None, payload=None):
                 logging.error(f"Unsupported model status {model_status}.")
 
             # Avoid endless loop, if the rollback also failed, we should report the failure to the MLOps
-            if self.replica_controller.under_rollback:
+            if self.replica_controller.under_rollback or self.is_fresh_endpoint:
                 self.send_deployment_status(
                     end_point_id, end_point_name, payload_json["model_name"], "",
                     ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED,
diff --git a/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py b/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py
index 09c2dd5d17..d21ab44c6e 100755
--- a/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py
+++ b/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py
@@ -109,6 +109,10 @@ def callback_delete_deployment(self, topic, payload):
         # Parse payload as the model message object.
         model_msg_object = FedMLModelMsgObject(topic, payload)
 
+        # Get the launch job id
+        ComputeCacheManager.get_instance().set_redis_params(self.redis_addr, self.redis_port, self.redis_password)
+        launch_job_id = ComputeCacheManager.get_instance().get_gpu_cache().get_endpoint_run_id_map(self.run_id)
+
         # Delete SQLite records
         FedMLServerDataInterface.get_instance().delete_job_from_db(model_msg_object.run_id)
         FedMLModelDatabase.get_instance().delete_deployment_result(
@@ -137,7 +141,6 @@ def callback_delete_deployment(self, topic, payload):
             model_msg_object.model_name, model_msg_object.model_version)
 
         # Report the launch job status with killed status.
-        launch_job_id = ComputeCacheManager.get_instance().get_gpu_cache().get_endpoint_run_id_map(self.run_id)
         if launch_job_id is not None:
             self.status_reporter.report_server_id_status(launch_job_id, GeneralConstants.MSG_MLOPS_SERVER_STATUS_KILLED)
 
@@ -180,6 +183,11 @@ def callback_start_deployment(self, topic, payload):
         # Set redis config
         FedMLModelCache.get_instance().set_redis_params(self.redis_addr, self.redis_port, self.redis_password)
 
+        # Query if the endpoint exists
+        endpoint_device_info = FedMLModelCache.get_instance(self.redis_addr, self.redis_port).get_end_point_device_info(
+            request_json["end_point_id"])
+        request_json["is_fresh_endpoint"] = True if endpoint_device_info is None else False
+
         # Save the user setting (about replica number) of this run to Redis, if existed, update it
         FedMLModelCache.get_instance(self.redis_addr, self.redis_port).set_user_setting_replica_num(
             end_point_id=run_id, end_point_name=end_point_name, model_name=model_name, model_version=model_version,
diff --git a/python/fedml/computing/scheduler/scheduler_core/status_center.py b/python/fedml/computing/scheduler/scheduler_core/status_center.py
index 65258af2d9..4ababbc826 100755
--- a/python/fedml/computing/scheduler/scheduler_core/status_center.py
+++ b/python/fedml/computing/scheduler/scheduler_core/status_center.py
@@ -331,9 +331,10 @@ def run_status_dispatcher_in_slave(self, status_event, status_queue,
                     )
                 elif (message_entity.topic.startswith(FedMLStatusCenter.TOPIC_SLAVE_JOB_LAUNCH_PREFIX) and
                       message_entity.topic.endswith(FedMLStatusCenter.TOPIC_SLAVE_JOB_LAUNCH_SUFFIX)):
+                    pass
                     # Async request the job status from master when launching the job
-                    job_launch_message_map[run_id_str] = {"topic": message_entity.topic,
-                                                                    "payload": message_entity.payload}
+                    # job_launch_message_map[run_id_str] = {"topic": message_entity.topic,
+                    #                                       "payload": message_entity.payload}
                     # status_manager_instances[run_id_str]. \
                     #     status_center_request_job_status_from_master_in_slave_agent(
                     #     message_entity.topic, message_entity.payload
diff --git a/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py b/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py
index a6d43936d2..de97684061 100755
--- a/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py
+++ b/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py
@@ -10,7 +10,6 @@
 from ..comm_utils.constants import SchedulerConstants
 from ..comm_utils.job_utils import JobRunnerUtils, DockerArgs
 from ..comm_utils.run_process_utils import RunProcessUtils
-from ....core.mlops import MLOpsMetrics
 from ....core.mlops.mlops_runtime_log import MLOpsRuntimeLog
 from ....core.mlops.mlops_configs import MLOpsConfigs
 from ....core.mlops.mlops_runtime_log_daemon import MLOpsRuntimeLogDaemon
@@ -44,7 +43,6 @@ def __init__(self, args, agent_config=None):
         self.unique_device_id = args.unique_device_id
         self.agent_config = agent_config
         self.topic_start_train = None
-        self.topic_stop_train = None
         self.topic_report_status = None
         self.topic_ota_msg = None
         self.topic_request_device_info = None
@@ -53,7 +51,6 @@ def __init__(self, args, agent_config=None):
         self.topic_response_job_status = None
         self.topic_report_device_status_in_job = None
         self.fl_topic_start_train = None
-        self.fl_topic_stop_train = None
         self.fl_topic_request_device_info = None
         self.communication_mgr = None
         self.subscribed_topics = list()
@@ -75,9 +72,6 @@ def generate_topics(self):
         # The topic for stopping training
         self.topic_start_train = "flserver_agent/" + str(self.edge_id) + "/start_train"
 
-        # The topi for stopping training
-        self.topic_stop_train = "flserver_agent/" + str(self.edge_id) + "/stop_train"
-
         # The topic for reporting current device status.
         self.topic_report_status = "mlops/report_device_status"
 
@@ -107,13 +101,11 @@ def generate_topics(self):
 
         if self.general_edge_id is not None:
             self.fl_topic_start_train = "flserver_agent/" + str(self.general_edge_id) + "/start_train"
-            self.fl_topic_stop_train = "flserver_agent/" + str(self.general_edge_id) + "/stop_train"
             self.fl_topic_request_device_info = "server/client/request_device_info/" + str(self.general_edge_id)
 
         # Subscribe topics for starting train, stopping train and fetching client status.
         self.subscribed_topics.clear()
         self.add_subscribe_topic(self.topic_start_train)
-        self.add_subscribe_topic(self.topic_stop_train)
         self.add_subscribe_topic(self.topic_report_status)
         self.add_subscribe_topic(self.topic_ota_msg)
         self.add_subscribe_topic(self.topic_request_device_info)
@@ -123,7 +115,6 @@ def generate_topics(self):
         self.add_subscribe_topic(self.topic_report_device_status_in_job)
         if self.general_edge_id is not None:
             self.add_subscribe_topic(self.fl_topic_start_train)
-            self.add_subscribe_topic(self.fl_topic_stop_train)
             self.add_subscribe_topic(self.fl_topic_request_device_info)
 
     @abstractmethod
@@ -132,7 +123,6 @@ def add_protocol_handler(self):
         # self.add_message_listener(self.topic_start_train, self.callback_start_train)
         # Add the message listeners for all topics
         self.add_message_listener(self.topic_start_train, self.callback_start_train)
-        self.add_message_listener(self.topic_stop_train, self.callback_stop_train)
         self.add_message_listener(self.topic_ota_msg, FedMLBaseSlaveProtocolManager.callback_client_ota_msg)
         self.add_message_listener(self.topic_report_status, self.callback_report_current_status)
         self.add_message_listener(self.topic_request_device_info, self.callback_report_device_info)
@@ -141,7 +131,6 @@ def add_protocol_handler(self):
         self.add_message_listener(self.topic_response_job_status, self.callback_response_job_status)
         self.add_message_listener(self.topic_report_device_status_in_job, self.callback_response_device_status_in_job)
         self.add_message_listener(self.fl_topic_start_train, self.callback_start_train)
-        self.add_message_listener(self.fl_topic_stop_train, self.callback_stop_train)
         self.add_message_listener(self.fl_topic_request_device_info, self.callback_report_device_info)
 
     @abstractmethod
@@ -295,27 +284,6 @@ def callback_start_train(self, topic, payload):
         # Register the job launch message into the status center
         self.register_job_launch_message(topic, payload)
 
-    def callback_stop_train(self, topic, payload):
-        # Parse the parameters.
-        edge_id = str(topic).split("/")[-2]
-        request_json = json.loads(payload)
-        is_retain = request_json.get("is_retain", False)
-        if is_retain:
-            return
-        run_id = request_json.get("runId", None)
-        run_id = request_json.get("id", None) if run_id is None else run_id
-        run_status = request_json.get("run_status", GeneralConstants.MSG_MLOPS_CLIENT_STATUS_KILLED)
-
-        # logging.info("Stop run with multiprocessing...")
-        # Stop client with multiprocessing mode
-        run_id_str = str(run_id)
-        self._get_job_runner_manager().cleanup_containers_and_release_gpus(
-            run_id, edge_id, SchedulerConstants.JOB_TASK_TYPE_TRAIN)
-        self.sync_run_stop_status(run_status=run_status)
-
-        # Register the job stopping message into the status center
-        self.register_job_stop_message(topic, payload)
-
     def callback_report_current_status(self, topic, payload):
         logging.info(
             f"FedMLDebug - Receive: topic ({topic}), payload ({payload})"
@@ -561,14 +529,6 @@ def remove_listener_job_status(self, run_id):
         self.remove_message_listener(topic_job_status_from_master)
         self.unsubscribe_msg(topic_job_status_from_master)
 
-    def sync_run_stop_status(self, run_status=GeneralConstants.MSG_MLOPS_CLIENT_STATUS_KILLED):
-        try:
-            self.status_reporter.report_client_id_status(
-                self.edge_id, run_status, server_id=self.server_id, run_id=self.run_id)
-        except Exception as e:
-            logging.error(f"Failed to sync run stop status with Exception {e}. Traceback: {traceback.format_exc()}")
-            pass
-
     def get_all_run_process_list_map(self):
         run_process_dict = dict()
         all_runner_pid_dict = self._get_job_runner_manager().get_all_runner_pid_map()
diff --git a/python/fedml/core/mlops/mlops_runtime_log.py b/python/fedml/core/mlops/mlops_runtime_log.py
index 0bc4dc6b6c..0fc5db3d23 100644
--- a/python/fedml/core/mlops/mlops_runtime_log.py
+++ b/python/fedml/core/mlops/mlops_runtime_log.py
@@ -143,6 +143,8 @@ def __init__(self, args):
             self.should_write_log_file = args.using_mlops
         else:
             self.should_write_log_file = False
+        if not hasattr(args, "log_file_dir"):
+            setattr(args, "log_file_dir", "./logs")
         self.log_file_dir = args.log_file_dir
         self.log_file = None
         self.run_id = args.run_id

From bcf988144acbccd998d4facefa379f51e847d111 Mon Sep 17 00:00:00 2001
From: Alex <alex.gpt.llm@gmail.com>
Date: Tue, 21 May 2024 20:58:04 +0800
Subject: [PATCH 084/282] [CoreEngine] save the relationship between endpoint
 and run id.

---
 .../computing/scheduler/master/deploy_job_launcher.py      | 7 +------
 .../scheduler/slave/base_slave_protocol_manager.py         | 6 ++++++
 2 files changed, 7 insertions(+), 6 deletions(-)

diff --git a/python/fedml/computing/scheduler/master/deploy_job_launcher.py b/python/fedml/computing/scheduler/master/deploy_job_launcher.py
index 359c6e641f..50e4517547 100755
--- a/python/fedml/computing/scheduler/master/deploy_job_launcher.py
+++ b/python/fedml/computing/scheduler/master/deploy_job_launcher.py
@@ -3,7 +3,6 @@
 from fedml.computing.scheduler.model_scheduler import device_client_constants
 from fedml.computing.scheduler.model_scheduler.device_model_cards import FedMLModelCards
 from fedml.computing.scheduler.scheduler_entry.constants import Constants
-from fedml.computing.scheduler.scheduler_core.compute_cache_manager import ComputeCacheManager
 
 
 class FedMLDeployJobLauncher:
@@ -90,11 +89,7 @@ def check_model_device_ready_and_deploy(self, request_json, run_id, master_devic
         serving_devices.extend(device_slave_ids)
 
         # Start to deploy the model
-        endpoint_id = FedMLDeployJobLauncher.deploy_model(serving_devices, request_json, run_id=run_id)
+        FedMLDeployJobLauncher.deploy_model(serving_devices, request_json, run_id=run_id)
 
-        # Save the relationship between run id and endpoint
-        ComputeCacheManager.get_instance().set_redis_params()
-        ComputeCacheManager.get_instance().get_gpu_cache().set_endpoint_run_id_map(
-            endpoint_id, run_id)
 
 
diff --git a/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py b/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py
index de97684061..447bd05cd9 100755
--- a/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py
+++ b/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py
@@ -257,6 +257,12 @@ def callback_start_train(self, topic, payload):
                 model_master_device_id=model_master_device_id,
                 model_slave_device_id=model_slave_device_id)
         else:
+            # Save the relationship between run id and endpoint
+            ComputeCacheManager.get_instance().set_redis_params()
+            ComputeCacheManager.get_instance().get_gpu_cache().set_endpoint_run_id_map(
+                endpoint_id, run_id)
+
+            # Report the run status with finished status and return
             self.generate_status_report(run_id, edge_id, server_agent_id=server_agent_id).report_client_id_status(
                 edge_id, GeneralConstants.MSG_MLOPS_CLIENT_STATUS_FINISHED, run_id=run_id)
             return

From 1e69f68712a6439344126cc6a5b0dc82de59e179 Mon Sep 17 00:00:00 2001
From: Alex <alex.gpt.llm@gmail.com>
Date: Wed, 22 May 2024 01:24:20 +0800
Subject: [PATCH 085/282] [CoreEngine] report the killed status when deleting
 the deployment.

---
 .../scheduler/model_scheduler/master_protocol_manager.py    | 6 ++++--
 1 file changed, 4 insertions(+), 2 deletions(-)

diff --git a/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py b/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py
index d21ab44c6e..b65d1bc8de 100755
--- a/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py
+++ b/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py
@@ -111,7 +111,7 @@ def callback_delete_deployment(self, topic, payload):
 
         # Get the launch job id
         ComputeCacheManager.get_instance().set_redis_params(self.redis_addr, self.redis_port, self.redis_password)
-        launch_job_id = ComputeCacheManager.get_instance().get_gpu_cache().get_endpoint_run_id_map(self.run_id)
+        launch_job_id = ComputeCacheManager.get_instance().get_gpu_cache().get_endpoint_run_id_map(model_msg_object.run_id)
 
         # Delete SQLite records
         FedMLServerDataInterface.get_instance().delete_job_from_db(model_msg_object.run_id)
@@ -142,7 +142,9 @@ def callback_delete_deployment(self, topic, payload):
 
         # Report the launch job status with killed status.
         if launch_job_id is not None:
-            self.status_reporter.report_server_id_status(launch_job_id, GeneralConstants.MSG_MLOPS_SERVER_STATUS_KILLED)
+            self.generate_status_report(model_msg_object.run_id, self.edge_id, server_agent_id=self.edge_id).\
+                report_server_id_status(launch_job_id, GeneralConstants.MSG_MLOPS_SERVER_STATUS_KILLED,
+                                        server_id=self.edge_id, server_agent_id=self.edge_id)
 
     def callback_start_deployment(self, topic, payload):
         # noinspection PyBroadException

From 2306ee366d8ffe574b1da14092f7271a76b1441c Mon Sep 17 00:00:00 2001
From: Alex <alex.gpt.llm@gmail.com>
Date: Wed, 22 May 2024 02:11:10 +0800
Subject: [PATCH 086/282] [CoreEngine] make the server status work.

---
 .../fedml/computing/scheduler/scheduler_core/status_center.py   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/python/fedml/computing/scheduler/scheduler_core/status_center.py b/python/fedml/computing/scheduler/scheduler_core/status_center.py
index 4ababbc826..fa43fd649a 100755
--- a/python/fedml/computing/scheduler/scheduler_core/status_center.py
+++ b/python/fedml/computing/scheduler/scheduler_core/status_center.py
@@ -218,7 +218,7 @@ def run_status_dispatcher(self, status_event, status_queue,
                         message_center=message_center)
                 else:
                     status_manager_instances[run_id_str].edge_id = status_entity.edge_id
-                    if status_entity.server_id is None and status_entity.server_id != 0:
+                    if status_entity.server_id is not None and status_entity.server_id != 0:
                         status_manager_instances[run_id_str].server_id = status_entity.server_id
 
                 # if the job status is completed then continue

From cd84d8209144e07d18e729c0d93287029a567e74 Mon Sep 17 00:00:00 2001
From: Alex <alex.gpt.llm@gmail.com>
Date: Wed, 22 May 2024 02:19:23 +0800
Subject: [PATCH 087/282] [CoreEngine] make the server status work.

---
 .../fedml/computing/scheduler/scheduler_core/status_center.py   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/python/fedml/computing/scheduler/scheduler_core/status_center.py b/python/fedml/computing/scheduler/scheduler_core/status_center.py
index fa43fd649a..97c2115e76 100755
--- a/python/fedml/computing/scheduler/scheduler_core/status_center.py
+++ b/python/fedml/computing/scheduler/scheduler_core/status_center.py
@@ -218,7 +218,7 @@ def run_status_dispatcher(self, status_event, status_queue,
                         message_center=message_center)
                 else:
                     status_manager_instances[run_id_str].edge_id = status_entity.edge_id
-                    if status_entity.server_id is not None and status_entity.server_id != 0:
+                    if status_entity.server_id is not None and str(status_entity.server_id) != "0":
                         status_manager_instances[run_id_str].server_id = status_entity.server_id
 
                 # if the job status is completed then continue

From f7ab709a39af6981a3ae4d8985a060f92b937fdb Mon Sep 17 00:00:00 2001
From: Alay Dilipbhai Shah <alay11shah@gmail.com>
Date: Tue, 21 May 2024 16:29:09 -0700
Subject: [PATCH 088/282] Update setup.py

---
 python/setup.py | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/python/setup.py b/python/setup.py
index fa425c98f7..0e314de29c 100644
--- a/python/setup.py
+++ b/python/setup.py
@@ -64,6 +64,8 @@ def finalize_options(self):
     'uvicorn',
     'wandb==0.13.2',
     'wget',
+    # Need to pin this version due to breaking change released in python docker sdk
+    'requests<2.32',
 ]
 
 requirements_extra_mpi = [

From 649e42fa3f259cc892ca257b822aa844627da39d Mon Sep 17 00:00:00 2001
From: Alay Shah <alay11shah@gmail.com>
Date: Tue, 21 May 2024 22:25:18 -0700
Subject: [PATCH 089/282] Remove Docker Client Timeout

---
 .../fedml/computing/scheduler/comm_utils/container_utils.py   | 4 ++--
 python/fedml/computing/scheduler/comm_utils/job_utils.py      | 2 +-
 .../scheduler/model_scheduler/device_model_deployment.py      | 4 ++--
 3 files changed, 5 insertions(+), 5 deletions(-)

diff --git a/python/fedml/computing/scheduler/comm_utils/container_utils.py b/python/fedml/computing/scheduler/comm_utils/container_utils.py
index f86e9fe1a2..2f5fa31fb5 100644
--- a/python/fedml/computing/scheduler/comm_utils/container_utils.py
+++ b/python/fedml/computing/scheduler/comm_utils/container_utils.py
@@ -26,7 +26,7 @@ def get_instance():
 
     def get_docker_client(self):
         try:
-            client = docker.from_env(timeout=5, version="auto")
+            client = docker.from_env()
         except Exception:
             logging.error("Failed to connect to the docker daemon, please ensure that you have "
                           "installed Docker Desktop or Docker Engine, and the docker is running")
@@ -180,7 +180,7 @@ def get_container_rank_same_model(prefix: str):
         running_model_name = hash("model_endpoint_id_{}_name_{}_model_id_{}_name_{}_ver_{}")
         """
         try:
-            client = docker.from_env(timeout=5, version="auto")
+            client = docker.from_env()
         except Exception:
             logging.error("Failed to connect to the docker daemon, please ensure that you have "
                           "installed Docker Desktop or Docker Engine, and the docker is running")
diff --git a/python/fedml/computing/scheduler/comm_utils/job_utils.py b/python/fedml/computing/scheduler/comm_utils/job_utils.py
index 08ce44d1dd..5b9a2c812a 100644
--- a/python/fedml/computing/scheduler/comm_utils/job_utils.py
+++ b/python/fedml/computing/scheduler/comm_utils/job_utils.py
@@ -570,7 +570,7 @@ def get_run_container_name(run_id: int) -> str:
     @staticmethod
     def get_docker_client(docker_args: DockerArgs) -> DockerClient:
         try:
-            client = docker.from_env(timeout=5, version="auto")
+            client = docker.from_env()
             if docker_args.username != "" and docker_args.registry != "":
                 client.login(username=docker_args.username, password=docker_args.password, registry=docker_args.registry)
         except Exception as e:
diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py b/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py
index f54965b599..1876373d25 100755
--- a/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py
@@ -210,7 +210,7 @@ def start_deployment(end_point_id, end_point_name, model_id, model_version,
         infer_host = "127.0.0.1"
 
     try:
-        client = docker.from_env(timeout=5, version="auto")
+        client = docker.from_env()
         if enable_custom_image and docker_registry_user_name != "" and docker_registry_user_password != "" \
                 and docker_registry != "":
             client.login(username=docker_registry_user_name, password=docker_registry_user_password,
@@ -467,7 +467,7 @@ def log_deployment_result(end_point_id, model_id, cmd_container_name, cmd_type,
             logging.info(f"Attempt: {deploy_attempt} / {deploy_attempt_threshold} ...")
 
             try:
-                client = docker.from_env(timeout=5, version="auto")
+                client = docker.from_env()
             except Exception:
                 logging.error("Failed to connect to the docker daemon, please ensure that you have "
                               "installed Docker Desktop or Docker Engine, and the docker is running")

From 8d9c8ed2876eee9dc14ff6f1ac30573c6a600c38 Mon Sep 17 00:00:00 2001
From: Alex <alex.gpt.llm@gmail.com>
Date: Thu, 23 May 2024 02:21:44 +0800
Subject: [PATCH 090/282] [CoreEngine] change the edge status in the status
 center.

---
 .../status_manager_protocols.py               | 23 +++++++++----------
 1 file changed, 11 insertions(+), 12 deletions(-)

diff --git a/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py b/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py
index 96f5e4920f..e045458db5 100755
--- a/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py
+++ b/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py
@@ -23,7 +23,7 @@ def __init__(self, run_id=None, edge_id=None, server_id=None,
         self.edge_id = edge_id
         self.server_id = server_id
         self.edge_id_list = edge_id_list
-        self.client_agent_active_list = dict()
+        self.edge_status_dict = None
         self.running_scheduler_contract = running_scheduler_contract if running_scheduler_contract is not None else dict()
         self.message_reporter = MLOpsMetrics()
         self.message_reporter.set_messenger(message_center)
@@ -163,6 +163,8 @@ def status_center_process_master_status(self, topic, payload):
         status = request_json["status"]
         edge_id = request_json["edge_id"]
         server_id = request_json.get("server_id", None)
+        if server_id is None or str(server_id) == "0":
+            server_id = self.server_id
         run_id_str = str(run_id)
 
         # Process the job status
@@ -185,8 +187,7 @@ def process_job_status_consensus(self, run_id, master_id, status):
         status = self.get_entire_job_status()
 
         # Set the device status based on the job status
-        edge_id_status_dict = self.client_agent_active_list.get(f"{run_id}", {})
-        for edge_id_item, edge_status_item in edge_id_status_dict.items():
+        for edge_id_item, edge_status_item in self.edge_status_dict.items():
             if edge_id_item == "server":
                 continue
 
@@ -233,18 +234,17 @@ def status_center_process_slave_status(self, topic, payload):
         init_edge_id_list = payload_json.get("init_all_edge_id_list", None)
         init_server_id = payload_json.get("init_server_id", None)
 
-        active_item_dict = self.client_agent_active_list.get(f"{run_id}", None)
-        if active_item_dict is None:
-            self.client_agent_active_list[f"{run_id}"] = dict()
+        if self.edge_status_dict is None:
+            self.edge_status_dict = dict()
 
         if init_edge_id_list is not None:
-            self.client_agent_active_list[f"{run_id}"][f"server"] = init_server_id
+            self.edge_status_dict[f"server"] = init_server_id
             for edge_id_item in init_edge_id_list:
-                self.client_agent_active_list[f"{run_id}"][f"{edge_id_item}"] = \
+                self.edge_status_dict[f"{edge_id_item}"] = \
                     ClientConstants.MSG_MLOPS_CLIENT_STATUS_IDLE
 
         if run_id is not None and edge_id is not None:
-            self.client_agent_active_list[f"{run_id}"][f"{edge_id}"] = status
+            self.edge_status_dict[f"{edge_id}"] = status
 
             self.process_device_status(run_id, edge_id, status)
 
@@ -252,12 +252,11 @@ def process_device_status(self, run_id, edge_id, status):
         number_of_failed_edges = 0
         number_of_finished_edges = 0
         number_of_killed_edges = 0
-        edge_id_status_dict = self.client_agent_active_list.get(f"{run_id}", {})
-        server_id = edge_id_status_dict.get("server", 0)
+        server_id = self.edge_status_dict.get("server", 0)
         enable_fault_tolerance, fault_tolerance_rate = self.parse_fault_tolerance_params(run_id)
         running_edges_list = list()
         edge_nums = 0
-        for edge_id_item, status_item in edge_id_status_dict.items():
+        for edge_id_item, status_item in self.edge_status_dict.items():
             if edge_id_item == "server":
                 continue
 

From 1162f6c18b266796ac958d766ab4790ce50a0b88 Mon Sep 17 00:00:00 2001
From: Alex <alex.gpt.llm@gmail.com>
Date: Thu, 23 May 2024 15:13:23 +0800
Subject: [PATCH 091/282] [CoreEngine] forward the stopping request to the
 cloud server.

---
 .../master/base_master_protocol_manager.py      | 17 ++++++++++-------
 1 file changed, 10 insertions(+), 7 deletions(-)

diff --git a/python/fedml/computing/scheduler/master/base_master_protocol_manager.py b/python/fedml/computing/scheduler/master/base_master_protocol_manager.py
index c95d73f4bf..1c4cbba4f4 100755
--- a/python/fedml/computing/scheduler/master/base_master_protocol_manager.py
+++ b/python/fedml/computing/scheduler/master/base_master_protocol_manager.py
@@ -264,23 +264,26 @@ def callback_stop_train(self, topic, payload, use_payload=None):
         run_id = request_json.get("runId", None)
         run_id = request_json.get("id", None) if run_id is None else run_id
         run_id_str = str(run_id)
+        edge_ids = request_json.get("edgeids", None)
         server_id = request_json.get("serverId", None)
         if server_id is None:
             server_id = request_json.get("server_id", None)
-        edge_ids = request_json.get("edgeids", None)
-
-        # Stop the job runner
-        self._get_job_runner_manager().stop_job_runner(
-            run_id, args=self.args, server_id=server_id, request_json=request_json,
-            run_as_cloud_agent=self.run_as_cloud_agent)
+        server_agent_id = server_id
 
         # Cleanup the cached object
         if self.running_request_json.get(run_id_str, None) is not None:
             self.running_request_json.pop(run_id_str)
 
+        # If it is the cloud agent, then forward the stopping request to the corresponding cloud server.
+        if self.run_as_cloud_agent:
+            server_agent_id = self.edge_id
+            topic_stop_train_to_cloud_server = f"mlops/flserver_agent_{server_id}/stop_train"
+            self.message_center.send_message(topic_stop_train_to_cloud_server, payload)
+            return
+
         # Reset all edge status and server status
         for iter_edge_id in edge_ids:
-            self.generate_status_report(run_id, iter_edge_id, server_agent_id=server_id).\
+            self.generate_status_report(run_id, iter_edge_id, server_agent_id=server_agent_id).\
                 report_client_id_status(iter_edge_id, GeneralConstants.MSG_MLOPS_SERVER_STATUS_KILLED,
                                         run_id=run_id, server_id=server_id)
 

From 92b7e162b66fa09b22a6af2dcfc22acf46ddf5cb Mon Sep 17 00:00:00 2001
From: Raphael Jin <kimheavy@yahoo.com>
Date: Tue, 28 May 2024 17:46:52 +0000
Subject: [PATCH 092/282] [Deploy] Try to convert the gpu_topology value type
 to int.

---
 .../model_scheduler/device_replica_controller.py         | 9 ++++++++-
 1 file changed, 8 insertions(+), 1 deletion(-)

diff --git a/python/fedml/computing/scheduler/model_scheduler/device_replica_controller.py b/python/fedml/computing/scheduler/model_scheduler/device_replica_controller.py
index 667d57c4f4..ea19efb8b6 100644
--- a/python/fedml/computing/scheduler/model_scheduler/device_replica_controller.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_replica_controller.py
@@ -67,7 +67,9 @@ def __init__(self, master_id, request_json: dict):
     def calc_total_gpu_num(self):
         total_gpu_num = 0
         for device_id, gpu_num in self.devices_avail_gpus.items():
-            total_gpu_num += gpu_num
+            if type(gpu_num) is not int:
+                logging.warning(f"The value in gpu_topology should be int, but got {type(gpu_num)}. Try to convert it.")
+            total_gpu_num += int(gpu_num)
         return total_gpu_num
 
     def init_id_replica_num(self):
@@ -77,6 +79,11 @@ def init_id_replica_num(self):
         """
         id_replica_num = {}
         for id, avail_num in self.devices_avail_gpus.items():
+            if type(avail_num) is not int:
+                logging.warning(f"The value in gpu_topology should be int, "
+                                f"but got {type(avail_num)}. Try to convert it.")
+            avail_num = int(avail_num)
+
             if avail_num % self.gpu_per_replica != 0:
                 raise ValueError("The number of gpus for each device should be divisible by gpu_per_replica")
             id_replica_num[str(id)] = avail_num // self.gpu_per_replica

From 0a6eba9f6dfda93ed0df500e63b8b64bad5df44d Mon Sep 17 00:00:00 2001
From: Raphael Jin <kimheavy@yahoo.com>
Date: Tue, 28 May 2024 22:48:38 +0000
Subject: [PATCH 093/282] [Deploy] Fix version diff function.

---
 .../scheduler/model_scheduler/master_job_runner_manager.py      | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/python/fedml/computing/scheduler/model_scheduler/master_job_runner_manager.py b/python/fedml/computing/scheduler/model_scheduler/master_job_runner_manager.py
index 0bfc205b34..c761cd6d8f 100755
--- a/python/fedml/computing/scheduler/model_scheduler/master_job_runner_manager.py
+++ b/python/fedml/computing/scheduler/model_scheduler/master_job_runner_manager.py
@@ -64,4 +64,4 @@ def generate_request_json_with_replica_num_diff(run_id, edge_id, request_json):
 
     @staticmethod
     def generate_request_json_with_replica_version_diff(run_id, edge_id, request_json):
-        return FedMLDeployMasterJobRunner.generate_request_json_with_replica_num_diff(run_id, edge_id, request_json)
+        return FedMLDeployMasterJobRunner.generate_request_json_with_replica_version_diff(run_id, edge_id, request_json)

From e8844d389b1aa7809df3ea3d8255c6ce83501e7b Mon Sep 17 00:00:00 2001
From: Raphael Jin <kimheavy@yahoo.com>
Date: Tue, 28 May 2024 22:50:46 -0400
Subject: [PATCH 094/282] [Deploy] Fix timezone issue using pandas

---
 .../scheduler/model_scheduler/autoscaler/autoscaler.py       | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

diff --git a/python/fedml/computing/scheduler/model_scheduler/autoscaler/autoscaler.py b/python/fedml/computing/scheduler/model_scheduler/autoscaler/autoscaler.py
index bb2b59e7d9..eb9f08b0eb 100644
--- a/python/fedml/computing/scheduler/model_scheduler/autoscaler/autoscaler.py
+++ b/python/fedml/computing/scheduler/model_scheduler/autoscaler/autoscaler.py
@@ -50,11 +50,11 @@ def filter_by_timestamp(cls,
         filtered = metrics
         if before_now_minutes:
             less_than_ts = \
-                str(pd.Timestamp.now() - pd.Timedelta(minutes=before_now_minutes))
+                str(pd.Timestamp.utcnow().replace(tzinfo=None) - pd.Timedelta(minutes=before_now_minutes))
             filtered = metrics.query("'{}' <= {}".format(less_than_ts, "timestamp"))
         if before_now_seconds:
             less_than_ts = \
-                str(pd.Timestamp.now() - pd.Timedelta(seconds=before_now_seconds))
+                str(pd.Timestamp.utcnow().replace(tzinfo=None) - pd.Timedelta(seconds=before_now_seconds))
             filtered = metrics.query("'{}' <= {}".format(less_than_ts, "timestamp"))
         return filtered
 
@@ -151,6 +151,7 @@ def scale_operation_query_concurrency(cls,
 
         # Otherwise, we proceed as normal.
         queries_num = period_data.shape[0]
+        logging.info(f"Detect {queries_num} of requests in {concurrent_query_policy.window_size_secs} seconds")
 
         try:
             # QSR: Queries per Second per Replica: (Number of Queries / Number of Current Replicas) / Window Size

From b58720cd93952f8ea95366af59cf160e882706aa Mon Sep 17 00:00:00 2001
From: Alex <alex.gpt.llm@gmail.com>
Date: Wed, 29 May 2024 19:15:37 +0800
Subject: [PATCH 095/282] [CoreEngine] In order to make the inference logs
 work, we save the container inference logs to the single dir.

---
 .../computing/scheduler/comm_utils/job_monitor.py     | 11 ++++++++---
 .../scheduler_core/scheduler_base_job_runner.py       |  8 +++++++-
 python/fedml/core/mlops/mlops_utils.py                |  3 ++-
 3 files changed, 17 insertions(+), 5 deletions(-)

diff --git a/python/fedml/computing/scheduler/comm_utils/job_monitor.py b/python/fedml/computing/scheduler/comm_utils/job_monitor.py
index bada84d96e..a7d5214a02 100644
--- a/python/fedml/computing/scheduler/comm_utils/job_monitor.py
+++ b/python/fedml/computing/scheduler/comm_utils/job_monitor.py
@@ -48,6 +48,7 @@
 class JobMonitor(Singleton):
     ENDPOINT_CONTAINER_LOG_PREFIX = "endpoint"
     TIME_INTERVAL_FOR_INFERENCE_ON_GATEWAY = 60 * 10
+    ENDPOINT_CONTAINER_LOG_SUBDIR = "monitor_endpoint_logs"
 
     def __init__(self):
         if not hasattr(self, "endpoint_unavailable_counter"):
@@ -1055,8 +1056,11 @@ def monitor_endpoint_logs(self):
                 model_version = model_config.get("model_version", None)
                 endpoint_name = endpoint_json.get("end_point_name", None)
 
+                log_file_dir = os.path.join(
+                    device_client_constants.ClientConstants.get_log_file_dir(),
+                    JobMonitor.ENDPOINT_CONTAINER_LOG_SUBDIR)
                 log_file_path, program_prefix = MLOpsLoggingUtils.build_log_file_path_with_run_params(
-                    job.job_id, int(job.edge_id), device_server_constants.ServerConstants.get_log_file_dir(), is_server=True,
+                    job.job_id, int(job.edge_id), log_file_dir, is_server=False,
                     log_file_prefix=JobMonitor.ENDPOINT_CONTAINER_LOG_PREFIX,
                 )
 
@@ -1130,8 +1134,9 @@ def monitor_endpoint_logs(self):
                                 nano_second_str = container_time.split(".")[1][:9]
                                 t_datetime_obj = isoparse(container_time)
 
-                                if t_sec_offset is not None:
-                                    t_datetime_obj = t_datetime_obj + datetime.timedelta(seconds=t_sec_offset)
+                                # ISSUE: this will cause the timestamp is not correct.
+                                #if t_sec_offset is not None:
+                                #    t_datetime_obj = t_datetime_obj + datetime.timedelta(seconds=t_sec_offset)
                             except Exception as e:
                                 logging.error(f"Exception when parsing the container log time {e}")
                                 t_datetime_obj = datetime.datetime.now()
diff --git a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py
index 69b69f4d4c..648ab18cf1 100755
--- a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py
+++ b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py
@@ -12,7 +12,7 @@
 from ..comm_utils.constants import SchedulerConstants
 from ..comm_utils.job_utils import JobRunnerUtils, DockerArgs
 from ..scheduler_entry.constants import Constants
-from ....core.mlops import MLOpsMetrics
+from ....core.mlops import MLOpsMetrics, MLOpsRuntimeLogDaemon
 from ....core.mlops.mlops_device_perfs import MLOpsDevicePerfStats
 from ..comm_utils.yaml_utils import load_yaml_config
 from .general_constants import GeneralConstants
@@ -449,10 +449,16 @@ def trigger_stop_event(self):
         if self.run_process_event is not None:
             self.run_process_event.set()
 
+        time.sleep(1)
+        MLOpsRuntimeLogDaemon.get_instance(self.args).stop_log_processor(self.run_id, self.edge_id)
+
     def trigger_completed_event(self):
         if self.run_process_completed_event is not None:
             self.run_process_completed_event.set()
 
+        time.sleep(1)
+        MLOpsRuntimeLogDaemon.get_instance(self.args).stop_log_processor(self.run_id, self.edge_id)
+
     def execute_job_task(self, unzip_package_path, entry_file_full_path, conf_file_full_path, dynamic_args_config,
                          fedml_config_object):
         run_config = self.request_json["run_config"]
diff --git a/python/fedml/core/mlops/mlops_utils.py b/python/fedml/core/mlops/mlops_utils.py
index 1d6db23d02..8bde9e4299 100644
--- a/python/fedml/core/mlops/mlops_utils.py
+++ b/python/fedml/core/mlops/mlops_utils.py
@@ -150,10 +150,11 @@ def get_edge_id_from_args(args):
                     else:
                         edge_id = 0
             else:
-                if getattr(args, "client_id", None) is not None:
+                if getattr(args, "edge_id", None) is not None:
                     edge_id = args.edge_id
                 else:
                     edge_id = 0
+
         return edge_id
 
     @staticmethod

From 76250750358b57e572e78bb07fa03927bc460003 Mon Sep 17 00:00:00 2001
From: Raphael Jin <kimheavy@yahoo.com>
Date: Thu, 30 May 2024 07:29:38 +0000
Subject: [PATCH 096/282] [Deploy] Avoid re-download the same model serving
 package.

---
 .../model_scheduler/worker_job_runner.py      | 116 ++++++++----------
 1 file changed, 53 insertions(+), 63 deletions(-)

diff --git a/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py b/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py
index 332dab2547..831064b591 100755
--- a/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py
+++ b/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py
@@ -146,10 +146,10 @@ def run_impl(self, run_extend_queue_list, sender_message_center,
         logging.info(f"[Worker] Received model deployment request from master for endpoint {run_id}.")
         self.replica_handler = FedMLDeviceReplicaHandler(self.edge_id, self.request_json)
         if self.replica_handler is not None:
-            logging.info(f"=================Worker replica Handler ======================"
-                         f"Reconcile with num diff {self.replica_handler.replica_num_diff} "
-                         f"and version diff {self.replica_handler.replica_version_diff}."
-                         f"=============================================================")
+            logging.info("\n================= Worker replica Handler ======================\n"
+                         f"Reconcile with num diff {self.replica_handler.replica_num_diff}\n"
+                         f"and version diff {self.replica_handler.replica_version_diff}\n"
+                         "===============================================================\n")
         else:
             logging.error(f"[Worker] Replica handler is None.")
             return False
@@ -178,39 +178,13 @@ def run_impl(self, run_extend_queue_list, sender_message_center,
             logging.info("[Worker] No need to reconcile.")
             return True
 
-        logging.info(
-            f"================Worker Reconcile Operations ======================\n"
-            f" op: {op}; op num: {op_num}.\n"
-            f"==================================================================\n")
-
-        # If not rollback, download package from MLOps; otherwise, use the backup package
-        if op != "rollback":
-            logging.info("Download and unzip model to local...")
-            unzip_package_path, _, _ = \
-                self.update_local_fedml_config(run_id, model_config, model_config_parameters)
-            if unzip_package_path is None:
-                logging.info("Failed to update local fedml config.")
-                self.check_runner_stop_event()
-                self.status_reporter.report_client_id_status(
-                    self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED,
-                    is_from_model=True, run_id=run_id)
-                return False
-
-            if not os.path.exists(unzip_package_path):
-                logging.info("Failed to unzip file.")
-                self.check_runner_stop_event()
-                self.status_reporter.report_client_id_status(
-                    self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED,
-                    is_from_model=True, run_id=run_id)
-                return False
-        else:
-            logging.info("Try to use backup package to rollback...")
-            # Find folder under "~/.fedml/fedml-model-client/fedml/model_packages \
-            # /${end_point_id}_${end_point_name}_${model_name}_${model_version}"
-            backup_folder_full_path = None
-            models_root_dir = ClientConstants.get_model_package_dir()
+        logging.info("\n================ Worker Reconcile Operations ======================\n"
+                     f"                op: {op}; op num: {op_num}.\n"
+                     "===================================================================\n")
 
+        if op == "rollback":
             # Find the version (notified by master) to rollback
+            logging.info("Try to use backup package to rollback...")
             version_diff_dict = self.request_json["replica_version_diff"][str(self.edge_id)]
             version_rollback_to = None
             for replica_no, rollback_ops in version_diff_dict.items():
@@ -222,39 +196,38 @@ def run_impl(self, run_extend_queue_list, sender_message_center,
                 return False
             model_version = version_rollback_to
 
-            # Format the version to match the folder name
-            model_version_formatted = version_rollback_to.replace(" ", "-")
-            model_version_formatted = model_version_formatted.replace(":", "-")
-
-            last_run_folder_sub_fd = f"{run_id}_{end_point_name}_{model_name}_{model_version_formatted}"
-            for folder in os.listdir(models_root_dir):
-                if last_run_folder_sub_fd in folder:
-                    backup_folder_full_path = os.path.join(models_root_dir, folder)
-                    break
-            if backup_folder_full_path is None:
-                logging.error(f"No backup folder found for run_id: {self.run_id} edge_id: {self.edge_id} "
-                              f"under {models_root_dir} with sub folder {last_run_folder_sub_fd}, rollback failed.")
-                return False
+        # Construct the parent folder name for the package
+        model_version_formatted = model_version.replace(" ", "-")
+        model_version_formatted = model_version_formatted.replace(":", "-")
+        models_root_dir = ClientConstants.get_model_package_dir()
+        parent_fd = f"{run_id}_{end_point_name}_{model_name}_{model_version_formatted}"
 
-            # Inside backup folder, find unzipped package with prefix unzip_fedml_run
-            unzip_package_path_parent = None
-            for folder in os.listdir(backup_folder_full_path):
-                if folder.startswith("unzip_fedml_run"):
-                    unzip_package_path_parent = os.path.join(backup_folder_full_path, folder)
-                    break
-
-            # Inside unzip folder, find the unzipped package, should be the only one
-            unzip_package_path = None
-            for folder in os.listdir(unzip_package_path_parent):
-                if os.path.isdir(os.path.join(unzip_package_path_parent, folder)):
-                    unzip_package_path = os.path.join(unzip_package_path_parent, folder)
-                    break
+        # Check if the package is already downloaded
+        unzip_package_path = ""
+        if os.path.exists(os.path.join(models_root_dir, parent_fd)):
+            unzip_package_path = self.find_previous_downloaded_pkg(os.path.join(models_root_dir, parent_fd))
 
+        # Download the package if not found
+        if unzip_package_path == "":
+            logging.info("Download and unzip model to local...")
+            unzip_package_path, _, _ = \
+                self.update_local_fedml_config(run_id, model_config, model_config_parameters)
             if unzip_package_path is None:
-                logging.error(f"No unzipped package found for run_id: {self.run_id} edge_id: {self.edge_id} "
-                              f"under {backup_folder_full_path}, rollback failed.")
+                logging.info("Failed to update local fedml config.")
+                self.check_runner_stop_event()
+                self.status_reporter.report_client_id_status(
+                    self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED,
+                    is_from_model=True, run_id=run_id)
                 return False
 
+        if not os.path.exists(unzip_package_path):
+            logging.info("Failed to unzip file.")
+            self.check_runner_stop_event()
+            self.status_reporter.report_client_id_status(
+                self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED,
+                is_from_model=True, run_id=run_id)
+            return False
+
         self.check_runner_stop_event()
 
         running_model_name, inference_output_url, inference_model_version, model_metadata, model_config = \
@@ -535,3 +508,20 @@ def build_dynamic_args(self, run_id, run_config, package_conf_object, base_dir):
     # Override
     def build_dynamic_constrain_variables(self, run_id, run_config):
         pass
+
+    @staticmethod
+    def find_previous_downloaded_pkg(parent_dir) -> str:
+        unzip_fd = ""
+        res = ""
+
+        for folder in os.listdir(parent_dir):
+            if folder.startswith("unzip_fedml_run"):
+                unzip_fd = os.path.join(parent_dir, folder)
+                break
+
+        for folder in os.listdir(unzip_fd):
+            if os.path.isdir(os.path.join(unzip_fd, folder)):
+                res = os.path.join(unzip_fd, folder)
+                break
+
+        return res

From 9d8b0df6ae63c22f4ef95d330ff4b154db68b500 Mon Sep 17 00:00:00 2001
From: Alay Shah <alay11shah@gmail.com>
Date: Thu, 30 May 2024 12:22:59 -0700
Subject: [PATCH 097/282] Add inference gateway logs

---
 .../model_scheduler/device_model_inference.py | 18 ++++-
 .../model_scheduler/master_job_runner.py      | 77 +++++++++++--------
 .../master_job_runner_manager.py              |  4 +-
 .../master_protocol_manager.py                |  2 +-
 4 files changed, 65 insertions(+), 36 deletions(-)

diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py b/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py
index b8d85edd31..1b6d71ebb7 100755
--- a/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py
@@ -15,6 +15,7 @@
 from fedml.computing.scheduler.model_scheduler.device_mqtt_inference_protocol import FedMLMqttInference
 from fedml.computing.scheduler.model_scheduler.device_http_proxy_inference_protocol import FedMLHttpProxyInference
 from fedml.computing.scheduler.comm_utils import sys_utils
+from fedml.core.mlops import MLOpsRuntimeLog, MLOpsRuntimeLogDaemon
 
 try:
     from pydantic import BaseSettings
@@ -56,6 +57,8 @@ class settings:
     ext_info = "2b34303961245c4f175f2236282d7a272c040b0904747579087f6a760112030109010c215d54505707140005190a051c347f365c4a430c020a7d39120e26032a78730f797f7c031f0901657e75"
 
 
+logging_args = None
+
 api = FastAPI()
 
 
@@ -229,7 +232,8 @@ def retrieve_info_by_endpoint_id(end_point_id, in_end_point_name=None, in_model_
         model_name = ""
         if in_end_point_name is not None:
             end_point_name = in_end_point_name
-            model_name = redis_key[len(f"{FedMLModelCache.FEDML_MODEL_DEPLOYMENT_STATUS_TAG}-{end_point_id}-{in_end_point_name}-"):]
+            model_name = redis_key[
+                         len(f"{FedMLModelCache.FEDML_MODEL_DEPLOYMENT_STATUS_TAG}-{end_point_id}-{in_end_point_name}-"):]
         else:
             # e.g. FEDML_MODEL_DEPLOYMENT_STATUS--1234-dummy_endpoint_name-dummy_model_name
             try:
@@ -366,8 +370,20 @@ def logging_inference_request(request, response):
         logging.info("failed to log inference request and response to file.")
 
 
+def set_logging_args(args=None):
+    global logging_args
+    logging_args = args
+    if logging_args is not None:
+        # Force run id to 0, as the gateway is shared by all the runs.
+        setattr(args, "run_id", "0")
+        MLOpsRuntimeLog.get_instance(args).init_logs(log_level=logging.INFO)
+        MLOpsRuntimeLogDaemon.get_instance(args).start_log_processor(args.run_id, args.edge_id)
+        logging.info("start the log processor")
+
+
 if __name__ == "__main__":
     import uvicorn
+
     port = 2203
     logging.basicConfig(level=logging.INFO)
     uvicorn.run(api, host="0.0.0.0", port=port, log_level="info")
diff --git a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
index bf9cee3279..5680b2ac6d 100755
--- a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
+++ b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
@@ -1,6 +1,7 @@
 import copy
 import json
 import logging
+import multiprocessing
 import os
 import time
 import queue
@@ -9,6 +10,7 @@
 from multiprocessing import Queue
 
 import fedml
+import uvicorn
 from fedml.core.mlops import MLOpsRuntimeLog, MLOpsConfigs
 from fedml.core.mlops.mlops_runtime_log import MLOpsFormatter
 from .device_client_constants import ClientConstants
@@ -22,6 +24,7 @@
 from ..master.base_master_job_runner import FedMLBaseMasterJobRunner
 from .device_replica_controller import FedMLDeviceReplicaController
 from .job_runner_msg_sender import FedMLDeployJobRunnerMsgSender
+from .device_model_inference import set_logging_args
 
 
 class FedMLDeployMasterJobRunner(FedMLBaseMasterJobRunner, FedMLDeployJobRunnerMsgSender, ABC):
@@ -63,6 +66,11 @@ def _generate_job_runner_instance(self, args, run_id=None, request_json=None, ag
     def _generate_extend_queue_list(self):
         return [self.deployment_result_queue]
 
+    @staticmethod
+    def start_inference_gateway_server(inference_gw_cmd, port, args):
+        set_logging_args(args)
+        uvicorn.run(inference_gw_cmd, host="0.0.0.0", port=port, log_level="info")
+
     # Override
     def run_impl(
             self, edge_id_status_queue, edge_device_info_queue, run_metrics_queue,
@@ -116,7 +124,7 @@ def run_impl(
 
         # start unified inference server
         FedMLDeployMasterJobRunner.start_device_inference_gateway(
-            inference_port=inference_port, agent_config=self.agent_config)
+            args=self.args, inference_port=inference_port, agent_config=self.agent_config)
 
         # start inference monitor server
         FedMLDeployMasterJobRunner.stop_device_inference_monitor(
@@ -462,13 +470,14 @@ def process_deployment_result_message(self, topic=None, payload=None):
             time.sleep(3)
             self.trigger_completed_event()
 
+
     def cleanup_runner_process(self, run_id):
         ServerConstants.cleanup_run_process(run_id, not_kill_subprocess=True)
 
     @staticmethod
     def start_device_inference_gateway(
-            inference_port=ServerConstants.MODEL_INFERENCE_DEFAULT_PORT,
-            agent_config=None, redis_addr="localhost", redis_port=6379, redis_password="fedml_default"
+            args, inference_port=ServerConstants.MODEL_INFERENCE_DEFAULT_PORT, agent_config=None,
+            redis_addr="localhost", redis_port=6379, redis_password="fedml_default"
     ):
         # start unified inference server
         python_program = get_python_program()
@@ -477,35 +486,39 @@ def start_device_inference_gateway(
             inference_port = int(master_port)
         if not ServerConstants.is_running_on_k8s():
             logging.info(f"start the model inference gateway...")
-            use_mqtt_inference = os.getenv("FEDML_USE_MQTT_INFERENCE", "False")
-            use_mqtt_inference = True if use_mqtt_inference.lower() == 'true' else False
-            use_worker_gateway = os.getenv("FEDML_USE_WORKER_GATEWAY", "False")
-            use_worker_gateway = True if use_worker_gateway.lower() == 'true' else False
+            # use_mqtt_inference = os.getenv("FEDML_USE_MQTT_INFERENCE", "False")
+            # use_mqtt_inference = True if use_mqtt_inference.lower() == 'true' else False
+            # use_worker_gateway = os.getenv("FEDML_USE_WORKER_GATEWAY", "False")
+            # use_worker_gateway = True if use_worker_gateway.lower() == 'true' else False
             inference_gw_cmd = "fedml.computing.scheduler.model_scheduler.device_model_inference:api"
             inference_gateway_pids = RunProcessUtils.get_pid_from_cmd_line(inference_gw_cmd)
             if inference_gateway_pids is None or len(inference_gateway_pids) <= 0:
-                cur_dir = os.path.dirname(__file__)
-                fedml_base_dir = os.path.dirname(os.path.dirname(os.path.dirname(cur_dir)))
-                connect_str = "@FEDML@"
-                ext_info = sys_utils.random1(
-                    agent_config["mqtt_config"]["BROKER_HOST"] + connect_str +
-                    str(agent_config["mqtt_config"]["BROKER_PORT"]) + connect_str +
-                    agent_config["mqtt_config"]["MQTT_USER"] + connect_str +
-                    agent_config["mqtt_config"]["MQTT_PWD"] + connect_str +
-                    str(agent_config["mqtt_config"]["MQTT_KEEPALIVE"]), "FEDML@9999GREAT")
-                python_program = get_python_program()
-                inference_gateway_process = ServerConstants.exec_console_with_script(
-                    "REDIS_ADDR=\"{}\" REDIS_PORT=\"{}\" REDIS_PASSWORD=\"{}\" "
-                    "END_POINT_NAME=\"{}\" "
-                    "MODEL_NAME=\"{}\" MODEL_VERSION=\"{}\" MODEL_INFER_URL=\"{}\" VERSION=\"{}\" "
-                    "USE_MQTT_INFERENCE={} USE_WORKER_GATEWAY={} EXT_INFO={} "
-                    "{} -m uvicorn {} --host 0.0.0.0 --port {} --reload --reload-delay 3 --reload-dir {} "
-                    "--log-level critical".format(
-                        redis_addr, str(redis_port), redis_password, "",
-                        "", "", "", fedml.get_env_version(), use_mqtt_inference,
-                        use_worker_gateway, ext_info, python_program, inference_gw_cmd, str(inference_port),
-                        fedml_base_dir),
-                    should_capture_stdout=False, should_capture_stderr=False)
+                # cur_dir = os.path.dirname(__file__)
+                # fedml_base_dir = os.path.dirname(os.path.dirname(os.path.dirname(cur_dir)))
+                # connect_str = "@FEDML@"
+                # ext_info = sys_utils.random1(
+                #     agent_config["mqtt_config"]["BROKER_HOST"] + connect_str +
+                #     str(agent_config["mqtt_config"]["BROKER_PORT"]) + connect_str +
+                #     agent_config["mqtt_config"]["MQTT_USER"] + connect_str +
+                #     agent_config["mqtt_config"]["MQTT_PWD"] + connect_str +
+                #     str(agent_config["mqtt_config"]["MQTT_KEEPALIVE"]), "FEDML@9999GREAT")
+                # python_program = get_python_program()
+                inference_gateway_process = multiprocessing.Process(
+                    target=FedMLDeployMasterJobRunner.start_inference_gateway_server, args=(inference_gw_cmd,
+                                                                                            inference_port, args)
+                )
+                # inference_gateway_process = ServerConstants.exec_console_with_script(
+                #     "REDIS_ADDR=\"{}\" REDIS_PORT=\"{}\" REDIS_PASSWORD=\"{}\" "
+                #     "END_POINT_NAME=\"{}\" "
+                #     "MODEL_NAME=\"{}\" MODEL_VERSION=\"{}\" MODEL_INFER_URL=\"{}\" VERSION=\"{}\" "
+                #     "USE_MQTT_INFERENCE={} USE_WORKER_GATEWAY={} EXT_INFO={} "
+                #     "{} -m uvicorn {} --host 0.0.0.0 --port {} --reload --reload-delay 3 --reload-dir {} "
+                #     "--log-level critical".format(
+                #         redis_addr, str(redis_port), redis_password, "",
+                #         "", "", "", fedml.get_env_version(), use_mqtt_inference,
+                #         use_worker_gateway, ext_info, python_program, inference_gw_cmd, str(inference_port),
+                #         fedml_base_dir),
+                #     should_capture_stdout=False, should_capture_stderr=False)
 
                 return inference_gateway_process
             else:
@@ -544,7 +557,7 @@ def stop_device_inference_monitor(run_id, end_point_name, model_id, model_name,
                                                   model_id, model_name, model_version)
 
     @staticmethod
-    def recover_inference_and_monitor():
+    def recover_inference_and_monitor(args):
         # noinspection PyBroadException
         try:
             agent_config = dict()
@@ -571,8 +584,7 @@ def recover_inference_and_monitor():
                 if not is_activated:
                     continue
 
-                FedMLDeployMasterJobRunner.start_device_inference_gateway(
-                    inference_port=inference_port, agent_config=agent_config)
+                FedMLDeployMasterJobRunner.start_device_inference_gateway(args=args, inference_port=inference_port, agent_config=agent_config)
 
                 FedMLDeployMasterJobRunner.stop_device_inference_monitor(
                     run_id, end_point_name, model_id, model_name, model_version)
@@ -807,3 +819,4 @@ def build_dynamic_args(self, run_id, run_config, package_conf_object, base_dir):
     # Override
     def build_dynamic_constrain_variables(self, run_id, run_config):
         pass
+
diff --git a/python/fedml/computing/scheduler/model_scheduler/master_job_runner_manager.py b/python/fedml/computing/scheduler/model_scheduler/master_job_runner_manager.py
index 0bfc205b34..7c700bb10f 100755
--- a/python/fedml/computing/scheduler/model_scheduler/master_job_runner_manager.py
+++ b/python/fedml/computing/scheduler/model_scheduler/master_job_runner_manager.py
@@ -55,8 +55,8 @@ def stop_device_inference_monitor(self, run_id, end_point_name, model_id, model_
                 run_id, end_point_name, model_id, model_name, model_version)
 
     @staticmethod
-    def recover_inference_and_monitor():
-        FedMLDeployMasterJobRunner.recover_inference_and_monitor()
+    def recover_inference_and_monitor(args):
+        FedMLDeployMasterJobRunner.recover_inference_and_monitor(args=args)
 
     @staticmethod
     def generate_request_json_with_replica_num_diff(run_id, edge_id, request_json):
diff --git a/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py b/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py
index b65d1bc8de..8f77f609d0 100755
--- a/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py
+++ b/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py
@@ -82,7 +82,7 @@ def _init_extra_items(self):
         except Exception as e:
             pass
 
-        FedMLDeployJobRunnerManager.recover_inference_and_monitor()
+        FedMLDeployJobRunnerManager.recover_inference_and_monitor(args = self.args)
 
     # Override
     def _process_connection_ready(self):

From 3fb45aac083d2ec7e341251ade6e0a18db986686 Mon Sep 17 00:00:00 2001
From: Ubuntu <cirrascale@QAIED41.san01.cirrascale.net>
Date: Mon, 3 Jun 2024 19:00:55 +0000
Subject: [PATCH 098/282] Make Inference Gateway Daemon Process

---
 .../computing/scheduler/model_scheduler/master_job_runner.py  | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
index 5680b2ac6d..4d5974237d 100755
--- a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
+++ b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
@@ -90,7 +90,7 @@ def run_impl(
         logging.info("model deployment request: {}".format(self.request_json))
         logging.info("send deployment stages...")
 
-        # Generate the replica controller object.
+        # Generate the replica controller object
         self.replica_controller = FedMLDeviceReplicaController(self.edge_id, self.request_json)
 
         # Start the process to report system performance(cpu,memory,etc.) to MLOps
@@ -519,6 +519,8 @@ def start_device_inference_gateway(
                 #         use_worker_gateway, ext_info, python_program, inference_gw_cmd, str(inference_port),
                 #         fedml_base_dir),
                 #     should_capture_stdout=False, should_capture_stderr=False)
+                inference_gateway_process.daemon = True
+                inference_gateway_process.start()
 
                 return inference_gateway_process
             else:

From 8595e0f6a7693ba6cd043731d8bcf4c648288214 Mon Sep 17 00:00:00 2001
From: fedml-dimitris <dimitris@fedml.ai>
Date: Mon, 3 Jun 2024 18:25:05 -0400
Subject: [PATCH 099/282] Adding fail fast and timeout enforcement per request
 policies.

---
 .../debug/inference_timeout/config.yaml       |  10 ++
 .../debug/inference_timeout/src/serve_main.py |  32 ++++
 .../scheduler/comm_utils/constants.py         |   2 -
 .../device_client_constants.py                |   1 +
 .../device_http_inference_protocol.py         |   9 +-
 .../model_scheduler/device_model_cache.py     | 102 +++++++-----
 .../model_scheduler/device_model_inference.py | 153 +++++++++++++-----
 .../model_scheduler/master_job_runner.py      |   4 +-
 .../master_protocol_manager.py                |   6 +-
 .../customized_job_example/train_job.yaml     |   4 +-
 10 files changed, 226 insertions(+), 97 deletions(-)
 create mode 100644 python/examples/deploy/debug/inference_timeout/config.yaml
 create mode 100644 python/examples/deploy/debug/inference_timeout/src/serve_main.py

diff --git a/python/examples/deploy/debug/inference_timeout/config.yaml b/python/examples/deploy/debug/inference_timeout/config.yaml
new file mode 100644
index 0000000000..f6d2566e00
--- /dev/null
+++ b/python/examples/deploy/debug/inference_timeout/config.yaml
@@ -0,0 +1,10 @@
+workspace: "./src"
+entry_point: "serve_main.py"
+bootstrap: |
+  echo "Bootstrap start..."
+  sleep 5
+  echo "Bootstrap finished"
+auto_detect_public_ip: true
+use_gpu: true
+
+request_timeout_sec: 10
diff --git a/python/examples/deploy/debug/inference_timeout/src/serve_main.py b/python/examples/deploy/debug/inference_timeout/src/serve_main.py
new file mode 100644
index 0000000000..5884e41f85
--- /dev/null
+++ b/python/examples/deploy/debug/inference_timeout/src/serve_main.py
@@ -0,0 +1,32 @@
+from fedml.serving import FedMLPredictor
+from fedml.serving import FedMLInferenceRunner
+import uuid
+import torch
+
+# Calculate the number of elements
+num_elements = 1_073_741_824 // 4  # using integer division for whole elements
+
+
+class DummyPredictor(FedMLPredictor):
+    def __init__(self):
+        super().__init__()
+        # Create a tensor with these many elements
+        tensor = torch.empty(num_elements, dtype=torch.float32)
+
+        # Move the tensor to GPU
+        tensor_gpu = tensor.cuda()
+
+        # for debug
+        with open("/tmp/dummy_gpu_occupier.txt", "w") as f:
+            f.write("GPU is occupied")
+
+        self.worker_id = uuid.uuid4()
+
+    def predict(self, request):
+        return {f"AlohaV0From{self.worker_id}": request}
+
+
+if __name__ == "__main__":
+    predictor = DummyPredictor()
+    fedml_inference_runner = FedMLInferenceRunner(predictor)
+    fedml_inference_runner.run()
diff --git a/python/fedml/computing/scheduler/comm_utils/constants.py b/python/fedml/computing/scheduler/comm_utils/constants.py
index f3fcd4ed5a..22cb31de45 100644
--- a/python/fedml/computing/scheduler/comm_utils/constants.py
+++ b/python/fedml/computing/scheduler/comm_utils/constants.py
@@ -78,8 +78,6 @@ class SchedulerConstants:
     ENDPOINT_INFERENCE_READY_TIMEOUT = 15
     ENDPOINT_STATUS_CHECK_TIMEOUT = 60 * 3
 
-    MQTT_INFERENCE_TIMEOUT = 60 * 6
-
     TRAIN_PROVISIONING_TIMEOUT = 60 * 25
     TRAIN_STARTING_TIMEOUT = 60 * 15
     TRAIN_STOPPING_TIMEOUT = 60 * 5
diff --git a/python/fedml/computing/scheduler/model_scheduler/device_client_constants.py b/python/fedml/computing/scheduler/model_scheduler/device_client_constants.py
index d2093569c3..7894f2c73e 100644
--- a/python/fedml/computing/scheduler/model_scheduler/device_client_constants.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_client_constants.py
@@ -95,6 +95,7 @@ class ClientConstants(object):
     INFERENCE_ENGINE_TYPE_INT_DEFAULT = 2
     INFERENCE_MODEL_VERSION = "1"
     INFERENCE_INFERENCE_SERVER_VERSION = "v2"
+    INFERENCE_REQUEST_TIMEOUT = 30
 
     MSG_MODELOPS_DEPLOYMENT_STATUS_INITIALIZING = "INITIALIZING"
     MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYING = "DEPLOYING"
diff --git a/python/fedml/computing/scheduler/model_scheduler/device_http_inference_protocol.py b/python/fedml/computing/scheduler/model_scheduler/device_http_inference_protocol.py
index e711a9e6a6..7e4c06ea5d 100755
--- a/python/fedml/computing/scheduler/model_scheduler/device_http_inference_protocol.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_http_inference_protocol.py
@@ -1,13 +1,12 @@
-import traceback
-from typing import Mapping
-from urllib.parse import urlparse
-
 import httpx
+import traceback
 
 from .device_client_constants import ClientConstants
-import requests
+
 from fastapi.responses import Response
 from fastapi.responses import StreamingResponse
+from urllib.parse import urlparse
+from typing import Mapping
 
 
 class FedMLHttpInference:
diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_cache.py b/python/fedml/computing/scheduler/model_scheduler/device_model_cache.py
index edcdf7d0f1..fca7b81d42 100755
--- a/python/fedml/computing/scheduler/model_scheduler/device_model_cache.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_model_cache.py
@@ -33,6 +33,8 @@ class FedMLModelCache(Singleton):
 
     FEDML_KEY_COUNT_PER_SCAN = 1000
 
+    FEDML_PENDING_REQUESTS_COUNTER = "FEDML_PENDING_REQUESTS_COUNTER"
+
     def __init__(self):
         if not hasattr(self, "redis_pool"):
             self.redis_pool = None
@@ -110,7 +112,7 @@ def set_user_setting_replica_num(self, end_point_id,
                                      replica_num: int, enable_auto_scaling: bool = False,
                                      scale_min: int = 0, scale_max: int = 0, state: str = "UNKNOWN",
                                      target_queries_per_replica: int = 60, aggregation_window_size_seconds: int = 60,
-                                     scale_down_delay_seconds: int = 120
+                                     scale_down_delay_seconds: int = 120, timeout_s: int = 30
                                      ) -> bool:
         """
         Key: FEDML_MODEL_ENDPOINT_REPLICA_USER_SETTING_TAG--<end_point_id>
@@ -136,7 +138,8 @@ def set_user_setting_replica_num(self, end_point_id,
             "scale_min": scale_min, "scale_max": scale_max, "state": state,
             "target_queries_per_replica": target_queries_per_replica,
             "aggregation_window_size_seconds": aggregation_window_size_seconds,
-            "scale_down_delay_seconds": scale_down_delay_seconds
+            "scale_down_delay_seconds": scale_down_delay_seconds,
+            "request_timeout_sec": timeout_s
         }
         try:
             self.redis_connection.set(self.get_user_setting_replica_num_key(end_point_id), json.dumps(replica_num_dict))
@@ -362,7 +365,7 @@ def get_idle_device(self, end_point_id, end_point_name,
                 if "model_status" in result_payload and result_payload["model_status"] == "DEPLOYED":
                     idle_device_list.append({"device_id": device_id, "end_point_id": end_point_id})
 
-        logging.info(f"{len(idle_device_list)} devices has this model on it: {idle_device_list}")
+        logging.info(f"{len(idle_device_list)} devices this model has on it: {idle_device_list}")
 
         if len(idle_device_list) <= 0:
             return None, None
@@ -824,38 +827,37 @@ def get_monitor_metrics_key(self, end_point_id, end_point_name, model_name, mode
                                       end_point_id, end_point_name, model_name, model_version)
 
     def get_endpoint_metrics(self,
-                             endpoint_id,
+                             end_point_id,
                              k_recent=None) -> List[Any]:
         model_deployment_monitor_metrics = list()
         try:
             key_pattern = "{}*{}*".format(
                 self.FEDML_MODEL_DEPLOYMENT_MONITOR_TAG,
-                endpoint_id)
-            model_deployment_monitor_endpoint_keys = \
+                end_point_id)
+            model_deployment_monitor_endpoint_key = \
                 self.redis_connection.keys(pattern=key_pattern)
             # Since the reply is a list, we need to make sure the list
             # is non-empty otherwise the index will raise an error.
-            if model_deployment_monitor_endpoint_keys:
+            if model_deployment_monitor_endpoint_key:
                 model_deployment_monitor_endpoint_key = \
-                    model_deployment_monitor_endpoint_keys[0]
-            else:
-                raise Exception("Function `get_endpoint_metrics` Key {} does not exist."
-                                .format(key_pattern))
-            # Set start and end index depending on the size of the
-            # list and the requested number of most recent records.
-            num_records = self.redis_connection.llen(name=model_deployment_monitor_endpoint_key)
-            # if k_most_recent is None, then fetch all by default.
-            start, end = 0, -1
-            # if k_most_recent is positive then fetch [-k_most_recent:]
-            if k_recent and k_recent > 0:
-                start = num_records - k_recent
-            model_deployment_monitor_metrics = \
-                self.redis_connection.lrange(
-                    name=model_deployment_monitor_endpoint_key,
-                    start=start,
-                    end=end)
-            model_deployment_monitor_metrics = [
-                json.loads(m) for m in model_deployment_monitor_metrics]
+                    model_deployment_monitor_endpoint_key[0]
+
+                # Set start and end index depending on the size of the
+                # list and the requested number of most recent records.
+                num_records = self.redis_connection.llen(
+                    name=model_deployment_monitor_endpoint_key)
+                # if k_most_recent is None, then fetch all by default.
+                start, end = 0, -1
+                # if k_most_recent is positive then fetch [-k_most_recent:]
+                if k_recent and k_recent > 0:
+                    start = num_records - k_recent
+                model_deployment_monitor_metrics = \
+                    self.redis_connection.lrange(
+                        name=model_deployment_monitor_endpoint_key,
+                        start=start,
+                        end=end)
+                model_deployment_monitor_metrics = [
+                    json.loads(m) for m in model_deployment_monitor_metrics]
 
         except Exception as e:
             logging.error(e)
@@ -868,24 +870,24 @@ def get_endpoint_replicas_results(self, endpoint_id) -> List[Any]:
             key_pattern = "{}*{}*".format(
                 self.FEDML_MODEL_DEPLOYMENT_RESULT_TAG,
                 endpoint_id)
-            model_deployment_result_key = \
+            model_deployment_result_keys = \
                 self.redis_connection.keys(pattern=key_pattern)
-            if model_deployment_result_key:
+            if model_deployment_result_keys:
                 model_deployment_result_key = \
-                    model_deployment_result_key[0]
+                    model_deployment_result_keys[0]
+                replicas_results = \
+                    self.redis_connection.lrange(
+                        name=model_deployment_result_key,
+                        start=0,
+                        end=-1)
+                # Format the result value to a properly formatted json.
+                for replica_idx, replica in enumerate(replicas_results):
+                    replicas_results[replica_idx] = json.loads(replica)
+                    replicas_results[replica_idx]["result"] = \
+                        json.loads(replicas_results[replica_idx]["result"])
             else:
                 raise Exception("Function `get_endpoint_replicas_results` Key {} does not exist."
                                 .format(key_pattern))
-            replicas_results = \
-                self.redis_connection.lrange(
-                    name=model_deployment_result_key,
-                    start=0,
-                    end=-1)
-
-            # Format the result value to a properly formatted json.
-            for replica_idx, replica in enumerate(replicas_results):
-                replicas_results[replica_idx] = json.loads(replica)
-                replicas_results[replica_idx]["result"] = json.loads(replicas_results[replica_idx]["result"])
 
         except Exception as e:
             logging.error(e)
@@ -898,11 +900,13 @@ def get_endpoint_settings(self, endpoint_id) -> Dict:
             key_pattern = "{}*{}*".format(
                 self.FEDML_MODEL_ENDPOINT_REPLICA_USER_SETTING_TAG,
                 endpoint_id)
-            endpoint_settings = \
+            endpoint_settings_keys = \
                 self.redis_connection.keys(pattern=key_pattern)
-            if endpoint_settings:
+            if endpoint_settings_keys:
                 endpoint_settings = \
-                    json.load(endpoint_settings[0])
+                    json.load(endpoint_settings_keys[0])
+                if not isinstance(endpoint_settings, dict):
+                    endpoint_settings = json.loads(endpoint_settings)
             else:
                 raise Exception("Function `get_endpoint_settings` Key {} does not exist."
                                 .format(key_pattern))
@@ -966,3 +970,17 @@ def delete_endpoint_scaling_down_decision_time(self, end_point_id) -> bool:
         return bool(self.redis_connection.hdel(
             self.FEDML_MODEL_ENDPOINT_SCALING_DOWN_DECISION_TIME_TAG,
             end_point_id))
+
+    def get_pending_requests_counter(self) -> int:
+        if not self.redis_connection.exists(self.FEDML_PENDING_REQUESTS_COUNTER):
+            self.redis_connection.set(self.FEDML_PENDING_REQUESTS_COUNTER, 0)
+        return int(self.redis_connection.get(self.FEDML_PENDING_REQUESTS_COUNTER))
+
+    def update_pending_requests_counter(self, increase=False, decrease=False) -> int:
+        if not self.redis_connection.exists(self.FEDML_PENDING_REQUESTS_COUNTER):
+            self.redis_connection.set(self.FEDML_PENDING_REQUESTS_COUNTER, 0)
+        if increase:
+            self.redis_connection.incr(self.FEDML_PENDING_REQUESTS_COUNTER)
+        if decrease:
+            self.redis_connection.decr(self.FEDML_PENDING_REQUESTS_COUNTER)
+        return self.get_pending_requests_counter()
diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py b/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py
index b8d85edd31..26c25bc09f 100755
--- a/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py
@@ -1,13 +1,16 @@
+import json
 import logging
 import time
 import traceback
-from urllib.parse import urlparse
 import os
+
+from urllib.parse import urlparse
 from typing import Any, Mapping, MutableMapping, Union
 
 from fastapi import FastAPI, Request, Response, status
 from fastapi.responses import StreamingResponse
 
+from fedml.computing.scheduler.model_scheduler.device_client_constants import ClientConstants
 from fedml.computing.scheduler.model_scheduler.device_http_inference_protocol import FedMLHttpInference
 from fedml.computing.scheduler.model_scheduler.device_server_constants import ServerConstants
 from fedml.computing.scheduler.model_scheduler.device_model_monitor import FedMLModelMetrics
@@ -26,23 +29,7 @@
     pass
 
 
-# class Settings(BaseSettings):
-#     redis_addr: str
-#     redis_port: str
-#     redis_password: str
-#     end_point_name: str
-#     model_name: str
-#     model_version: str
-#     model_infer_url: str
-#     version: str
-#     use_mqtt_inference: bool
-#     use_worker_gateway: bool
-#     ext_info: str
-#
-#
-# settings = Settings()
-
-class settings:
+class Settings:
     redis_addr = "127.0.0.1"
     redis_port = 6379
     redis_password = "fedml_default"
@@ -58,10 +45,54 @@ class settings:
 
 api = FastAPI()
 
+FEDML_MODEL_CACHE = FedMLModelCache.get_instance()
+FEDML_MODEL_CACHE.set_redis_params(
+    redis_addr=Settings.redis_addr,
+    redis_port=Settings.redis_port,
+    redis_password=Settings.redis_password)
+
+
+@api.middleware("http")
+async def auth_middleware(request: Request, call_next):
+
+    if "/inference" in request.url.path or "/api/v1/predict" in request.url.path:
+        try:
+            # Attempt to parse the JSON body.
+            request_json = await request.json()
+        except json.JSONDecodeError:
+            return Response("Invalid JSON.", status_code=status.HTTP_400_BAD_REQUEST)
+
+        # Get total pending requests.
+        pending_requests_num = FEDML_MODEL_CACHE.get_pending_requests_counter()
+        if pending_requests_num:
+            end_point_id = request_json.get("end_point_id", None)
+            # Fetch metrics of the past k=3 requests.
+            pask_k_metrics = FEDML_MODEL_CACHE.get_endpoint_metrics(
+                end_point_id=end_point_id,
+                k_recent=3)
+
+            # Get the request timeout from the endpoint settings.
+            request_timeout_s = FEDML_MODEL_CACHE.get_endpoint_settings(end_point_id) \
+                .get("request_timeout_s", ClientConstants.INFERENCE_REQUEST_TIMEOUT)
+
+            # Only proceed if the past k metrics collection is not empty.
+            if pask_k_metrics:
+                # Measure the average latency in seconds(!), hence the 0.001 multiplier.
+                past_k_latencies_sec = \
+                    [float(j_obj["current_latency"]) * 0.001 for j_obj in pask_k_metrics]
+                mean_latency = sum(past_k_latencies_sec) / len(past_k_latencies_sec)
+
+                # If timeout threshold is exceeded then cancel and return time out error.
+                if (mean_latency * pending_requests_num) > request_timeout_s:
+                    return Response("Request timed out.", status_code=status.HTTP_504_GATEWAY_TIMEOUT)
+
+    response = await call_next(request)
+    return response
+
 
 @api.get('/')
 async def root():
-    return {'message': 'FedML Federated Inference Service!'}
+    return {'message': 'TensorOpera Inference Service!'}
 
 
 @api.get('/ready')
@@ -141,6 +172,10 @@ async def _predict(
         input_json,
         header=None
 ) -> Union[MutableMapping[str, Any], Response, StreamingResponse]:
+
+    FEDML_MODEL_CACHE.update_pending_requests_counter(increase=True)
+    inference_response = {}
+
     in_end_point_id = end_point_id
     in_end_point_name = input_json.get("end_point_name", None)
     in_model_name = input_json.get("model_name", None)
@@ -170,21 +205,26 @@ async def _predict(
         if not is_endpoint_activated(in_end_point_id):
             inference_response = {"error": True, "message": "endpoint is not activated."}
             logging_inference_request(input_json, inference_response)
+            FEDML_MODEL_CACHE.update_pending_requests_counter(decrease=True)
             return inference_response
 
         # Found idle inference device
         idle_device, end_point_id, model_id, model_name, model_version, inference_host, inference_output_url = \
             found_idle_inference_device(in_end_point_id, in_end_point_name, in_model_name, in_model_version)
         if idle_device is None or idle_device == "":
+            FEDML_MODEL_CACHE.update_pending_requests_counter(decrease=True)
             return {"error": True, "error_code": status.HTTP_404_NOT_FOUND,
                     "message": "can not found active inference worker for this endpoint."}
 
         # Start timing for model metrics
         model_metrics = FedMLModelMetrics(end_point_id, in_end_point_name,
                                           model_id, in_model_name, model_version,
-                                          settings.model_infer_url,
-                                          settings.redis_addr, settings.redis_port, settings.redis_password,
-                                          version=settings.version)
+                                          Settings.model_infer_url,
+                                          Settings.redis_addr,
+                                          Settings.redis_port,
+                                          Settings.redis_password,
+                                          version=Settings.version)
+        # Setting time to the time before authentication and idle device discovery.
         model_metrics.set_start_time(start_time)
 
         # Send inference request to idle device
@@ -195,7 +235,12 @@ async def _predict(
             input_list["stream"] = input_list.get("stream", stream_flag)
             output_list = input_json.get("outputs", [])
             inference_response = await send_inference_request(
-                idle_device, end_point_id, inference_output_url, input_list, output_list, inference_type=in_return_type)
+                idle_device,
+                end_point_id,
+                inference_output_url,
+                input_list,
+                output_list,
+                inference_type=in_return_type)
 
         # Calculate model metrics
         try:
@@ -207,11 +252,12 @@ async def _predict(
             pass
 
         logging_inference_request(input_json, inference_response)
-
+        FEDML_MODEL_CACHE.update_pending_requests_counter(decrease=True)
         return inference_response
     else:
         inference_response = {"error": True, "message": "token is not valid."}
         logging_inference_request(input_json, inference_response)
+        FEDML_MODEL_CACHE.update_pending_requests_counter(decrease=True)
         return inference_response
 
 
@@ -221,9 +267,7 @@ def retrieve_info_by_endpoint_id(end_point_id, in_end_point_name=None, in_model_
     We allow missing end_point_name and model_name in the input parameters.
     return end_point_name, model_name
     """
-    FedMLModelCache.get_instance().set_redis_params(settings.redis_addr, settings.redis_port, settings.redis_password)
-    redis_key = FedMLModelCache.get_instance(settings.redis_addr, settings.redis_port). \
-        get_end_point_full_key_by_id(end_point_id)
+    redis_key = FEDML_MODEL_CACHE.get_end_point_full_key_by_id(end_point_id)
     if redis_key is not None:
         end_point_name = ""
         model_name = ""
@@ -254,8 +298,7 @@ def found_idle_inference_device(end_point_id, end_point_name, in_model_name, in_
     inference_output_url = ""
     model_version = ""
     # Found idle device (TODO: optimize the algorithm to search best device for inference)
-    FedMLModelCache.get_instance().set_redis_params(settings.redis_addr, settings.redis_port, settings.redis_password)
-    payload, idle_device = FedMLModelCache.get_instance(settings.redis_addr, settings.redis_port). \
+    payload, idle_device = FEDML_MODEL_CACHE.\
         get_idle_device(end_point_id, end_point_name, in_model_name, in_model_version)
     if payload is not None:
         logging.info("found idle deployment result {}".format(payload))
@@ -273,8 +316,12 @@ def found_idle_inference_device(end_point_id, end_point_name, in_model_name, in_
     return idle_device, end_point_id, model_id, model_name, model_version, inference_host, inference_output_url
 
 
-async def send_inference_request(idle_device, endpoint_id, inference_url, input_list, output_list,
+async def send_inference_request(idle_device, end_point_id, inference_url, input_list, output_list,
                                  inference_type="default", has_public_ip=True):
+
+    request_timeout_sec = FEDML_MODEL_CACHE.get_endpoint_settings(end_point_id) \
+        .get("request_timeout_sec", ClientConstants.INFERENCE_REQUEST_TIMEOUT)
+
     try:
         http_infer_available = os.getenv("FEDML_INFERENCE_HTTP_AVAILABLE", True)
         if not http_infer_available:
@@ -283,24 +330,35 @@ async def send_inference_request(idle_device, endpoint_id, inference_url, input_
 
         if http_infer_available:
             response_ok = await FedMLHttpInference.is_inference_ready(
-                inference_url, timeout=os.getenv("FEDML_GATEWAY_HTTP_READY_TIMEOUT", 20))
+                inference_url,
+                timeout=request_timeout_sec)
             if response_ok:
                 response_ok, inference_response = await FedMLHttpInference.run_http_inference_with_curl_request(
-                    inference_url, input_list, output_list, inference_type=inference_type)
+                    inference_url,
+                    input_list,
+                    output_list,
+                    inference_type=inference_type,
+                    timeout=request_timeout_sec)
                 logging.info(f"Use http inference. return {response_ok}")
                 return inference_response
 
         response_ok = await FedMLHttpProxyInference.is_inference_ready(
-            inference_url, timeout=os.getenv("FEDML_GATEWAY_HTTP_PROXY_READY_TIMEOUT", 20))
+            inference_url,
+            timeout=request_timeout_sec)
         if response_ok:
             response_ok, inference_response = await FedMLHttpProxyInference.run_http_proxy_inference_with_request(
-                endpoint_id, inference_url, input_list, output_list, inference_type=inference_type)
+                end_point_id,
+                inference_url,
+                input_list,
+                output_list,
+                inference_type=inference_type,
+                timeout=request_timeout_sec)
             logging.info(f"Use http proxy inference. return {response_ok}")
             return inference_response
 
         if not has_public_ip:
             connect_str = "@FEDML@"
-            random_out = sys_utils.random2(settings.ext_info, "FEDML@9999GREAT")
+            random_out = sys_utils.random2(Settings.ext_info, "FEDML@9999GREAT")
             config_list = random_out.split(connect_str)
             agent_config = dict()
             agent_config["mqtt_config"] = dict()
@@ -309,13 +367,24 @@ async def send_inference_request(idle_device, endpoint_id, inference_url, input_
             agent_config["mqtt_config"]["MQTT_USER"] = config_list[2]
             agent_config["mqtt_config"]["MQTT_PWD"] = config_list[3]
             agent_config["mqtt_config"]["MQTT_KEEPALIVE"] = int(config_list[4])
-            mqtt_inference = FedMLMqttInference(agent_config=agent_config, run_id=endpoint_id)
+            mqtt_inference = FedMLMqttInference(
+                agent_config=agent_config,
+                run_id=end_point_id)
             response_ok = mqtt_inference.run_mqtt_health_check_with_request(
-                idle_device, endpoint_id, inference_url)
+                idle_device,
+                end_point_id,
+                inference_url,
+                timeout=request_timeout_sec)
             inference_response = {"error": True, "message": "Failed to use http, http-proxy and mqtt for inference."}
             if response_ok:
                 response_ok, inference_response = mqtt_inference.run_mqtt_inference_with_request(
-                    idle_device, endpoint_id, inference_url, input_list, output_list, inference_type=inference_type)
+                    idle_device,
+                    end_point_id,
+                    inference_url,
+                    input_list,
+                    output_list,
+                    inference_type=inference_type,
+                    timeout=request_timeout_sec)
 
             logging.info(f"Use mqtt inference. return {response_ok}.")
             return inference_response
@@ -332,22 +401,18 @@ def auth_request_token(end_point_id, end_point_name, model_name, token):
     if token is None:
         return False
 
-    FedMLModelCache.get_instance().set_redis_params(settings.redis_addr, settings.redis_port, settings.redis_password)
-    cached_token = FedMLModelCache.get_instance(settings.redis_addr, settings.redis_port). \
+    cached_token = FEDML_MODEL_CACHE.\
         get_end_point_token(end_point_id, end_point_name, model_name)
     if cached_token is not None and str(cached_token) == str(token):
         return True
 
     return False
 
-
 def is_endpoint_activated(end_point_id):
     if end_point_id is None:
         return False
 
-    FedMLModelCache.get_instance().set_redis_params(settings.redis_addr, settings.redis_port, settings.redis_password)
-    activated = FedMLModelCache.get_instance(settings.redis_addr, settings.redis_port).get_end_point_activation(
-        end_point_id)
+    activated = FEDML_MODEL_CACHE.get_end_point_activation(end_point_id)
     return activated
 
 
diff --git a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
index bf9cee3279..cc1901de65 100755
--- a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
+++ b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
@@ -114,11 +114,11 @@ def run_impl(
             ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYING,
             message_center=self.message_center)
 
-        # start unified inference server
+        # start unified inference gateway process if not started
         FedMLDeployMasterJobRunner.start_device_inference_gateway(
             inference_port=inference_port, agent_config=self.agent_config)
 
-        # start inference monitor server
+        # start inference monitor process
         FedMLDeployMasterJobRunner.stop_device_inference_monitor(
             run_id, end_point_name, model_id, model_name, model_version)
         FedMLDeployMasterJobRunner.start_device_inference_monitor(
diff --git a/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py b/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py
index b65d1bc8de..668d1192ce 100755
--- a/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py
+++ b/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py
@@ -178,6 +178,9 @@ def callback_start_deployment(self, topic, payload):
         aggregation_window_size_seconds = request_json.get("aggregation_window_size_seconds", 60)
         scale_down_delay_seconds = request_json.get("scale_down_delay_seconds", 120)
 
+        model_config_parameters = request_json.get("parameters", {})
+        timeout_s = model_config_parameters.get("request_timeout_sec", 30)
+
         inference_end_point_id = run_id
 
         logging.info("[Master] received start deployment request for end point {}.".format(run_id))
@@ -197,7 +200,8 @@ def callback_start_deployment(self, topic, payload):
             scale_min=scale_min, scale_max=scale_max, state="DEPLOYING",
             aggregation_window_size_seconds=aggregation_window_size_seconds,
             target_queries_per_replica=target_queries_per_replica,
-            scale_down_delay_seconds=int(scale_down_delay_seconds)
+            scale_down_delay_seconds=int(scale_down_delay_seconds),
+            timeout_s=timeout_s
         )
 
         # Start log processor for current run
diff --git a/python/fedml/workflow/driver_example/customized_job_example/train_job.yaml b/python/fedml/workflow/driver_example/customized_job_example/train_job.yaml
index e0e8f0f3be..86c9df6594 100755
--- a/python/fedml/workflow/driver_example/customized_job_example/train_job.yaml
+++ b/python/fedml/workflow/driver_example/customized_job_example/train_job.yaml
@@ -25,10 +25,12 @@ bootstrap: |
   pip install PyYAML==5.3.1 -i https://pypi.org/simple
   pip install fedml==0.8.29
   pip install -U typing_extensions -i https://pypi.org/simple
+  pip install -U pydantic
+  pip install -U fastapi
   echo "Bootstrap finished."
 
 computing:
-  resource_type: RTX-4090    # e.g., A100-80G, please check the resource type list by "fedml show-resource-type" or visiting URL: https://open.fedml.ai/accelerator_resource_type
+  resource_type: A100-80GB-SXM    # e.g., A100-80G, please check the resource type list by "fedml show-resource-type" or visiting URL: https://open.fedml.ai/accelerator_resource_type
   minimum_num_gpus: 1             # minimum # of GPUs to provision
   maximum_cost_per_hour: $10    # max cost per hour of all machines for your job
   # device_type: GPU # GPU or CPU

From 9296884402f058f9fb64beca79aad6e5dea91596 Mon Sep 17 00:00:00 2001
From: Raphael Jin <kimheavy@yahoo.com>
Date: Mon, 3 Jun 2024 22:53:22 +0000
Subject: [PATCH 100/282] [Deploy] Fix config reading from redis.

---
 .../scheduler/model_scheduler/device_model_cache.py        | 7 +++++--
 1 file changed, 5 insertions(+), 2 deletions(-)

diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_cache.py b/python/fedml/computing/scheduler/model_scheduler/device_model_cache.py
index fca7b81d42..45a58c7ab9 100755
--- a/python/fedml/computing/scheduler/model_scheduler/device_model_cache.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_model_cache.py
@@ -900,11 +900,14 @@ def get_endpoint_settings(self, endpoint_id) -> Dict:
             key_pattern = "{}*{}*".format(
                 self.FEDML_MODEL_ENDPOINT_REPLICA_USER_SETTING_TAG,
                 endpoint_id)
+
             endpoint_settings_keys = \
                 self.redis_connection.keys(pattern=key_pattern)
-            if endpoint_settings_keys:
+
+            if len(endpoint_settings_keys) > 0:
                 endpoint_settings = \
-                    json.load(endpoint_settings_keys[0])
+                    self.redis_connection.get(endpoint_settings_keys[0])
+
                 if not isinstance(endpoint_settings, dict):
                     endpoint_settings = json.loads(endpoint_settings)
             else:

From b1312e1db9131aec05213ef22bd48e397e708cf4 Mon Sep 17 00:00:00 2001
From: Alay Shah <alay11shah@gmail.com>
Date: Mon, 3 Jun 2024 17:25:58 -0700
Subject: [PATCH 101/282] Add global env file

---
 python/fedml/__init__.py                              |  9 +++++++--
 python/fedml/computing/scheduler/env/__init__.py      |  3 +++
 python/fedml/computing/scheduler/env/collect_env.py   | 11 +++++++++++
 .../scheduler/scheduler_core/message_center.py        |  4 +++-
 .../computing/scheduler/slave/client_constants.py     |  8 +++++++-
 python/setup.py                                       |  1 +
 6 files changed, 32 insertions(+), 4 deletions(-)

diff --git a/python/fedml/__init__.py b/python/fedml/__init__.py
index 6b3ac3f61b..913457b5c7 100644
--- a/python/fedml/__init__.py
+++ b/python/fedml/__init__.py
@@ -1,5 +1,4 @@
 import logging
-from copy import deepcopy
 
 import multiprocess as multiprocessing
 import os
@@ -9,7 +8,10 @@
 import torch
 
 import fedml
+import dotenv
+
 from .computing.scheduler.env.collect_env import collect_env
+from fedml.computing.scheduler.env import get_env_file
 from .constants import (
     FEDML_BACKEND_SERVICE_URL_DEV,
     FEDML_BACKEND_SERVICE_URL_LOCAL,
@@ -449,10 +451,13 @@ def _run_distributed():
 
 
 def set_env_version(version):
-    os.environ['FEDML_ENV_VERSION'] = version
+    env_file = get_env_file()
+    dotenv.load_dotenv(dotenv_path=env_file)
+    dotenv.set_key(env_file, "FEDML_ENV_VERSION", version)
 
 
 def get_env_version():
+    dotenv.load_dotenv(dotenv_path=get_env_file())
     return "release" if os.environ.get('FEDML_ENV_VERSION') is None else os.environ['FEDML_ENV_VERSION']
 
 
diff --git a/python/fedml/computing/scheduler/env/__init__.py b/python/fedml/computing/scheduler/env/__init__.py
index e69de29bb2..5bfeaa5509 100644
--- a/python/fedml/computing/scheduler/env/__init__.py
+++ b/python/fedml/computing/scheduler/env/__init__.py
@@ -0,0 +1,3 @@
+import os
+
+from collect_env import get_env_file
diff --git a/python/fedml/computing/scheduler/env/collect_env.py b/python/fedml/computing/scheduler/env/collect_env.py
index b2f7bd7f5e..ef471da3bc 100644
--- a/python/fedml/computing/scheduler/env/collect_env.py
+++ b/python/fedml/computing/scheduler/env/collect_env.py
@@ -4,6 +4,7 @@
 import fedml
 from fedml.computing.scheduler.comm_utils.hardware_utils import HardwareUtil
 from fedml.computing.scheduler.slave.client_diagnosis import ClientDiagnosis
+from ..slave.client_constants import ClientConstants
 
 
 def collect_env():
@@ -108,3 +109,13 @@ def collect_env():
     except Exception as e:
         print(f"The connection exception: {traceback.format_exc()}")
         pass
+
+
+def get_env_file():
+    global_serivces_dir = ClientConstants.get_global_services_dir()
+    env_config_file = os.path.join(global_serivces_dir, ".env")
+    # Create file if not exists
+    if not os.path.exists(env_config_file):
+        with open(env_config_file, 'w') as f:
+            f.write("")
+    return env_config_file
diff --git a/python/fedml/computing/scheduler/scheduler_core/message_center.py b/python/fedml/computing/scheduler/scheduler_core/message_center.py
index 869ed6e510..dbe11700a0 100755
--- a/python/fedml/computing/scheduler/scheduler_core/message_center.py
+++ b/python/fedml/computing/scheduler/scheduler_core/message_center.py
@@ -11,6 +11,7 @@
 from os.path import expanduser
 
 from fedml.core.distributed.communication.mqtt.mqtt_manager import MqttManager
+from ..slave.client_constants import ClientConstants
 from ....core.mlops.mlops_metrics import MLOpsMetrics
 from operator import methodcaller
 from .message_common import FedMLMessageEntity, FedMLMessageRecord
@@ -466,7 +467,8 @@ class MessageCenterStoppedException(Exception):
 
 class FedMLMessageCenterConstants:
     def __init__(self):
+        global_services_dir = ClientConstants.get_global_services_dir()
         self.home_dir = expanduser("~")
-        self.message_center_dir = os.path.join(self.home_dir, ".fedml", "global_services", "message_center")
+        self.message_center_dir = os.path.join(global_services_dir, "message_center")
         self.message_log_dir = os.path.join(self.message_center_dir, "logs")
         os.makedirs(self.message_log_dir, exist_ok=True)
diff --git a/python/fedml/computing/scheduler/slave/client_constants.py b/python/fedml/computing/scheduler/slave/client_constants.py
index 2e15080541..e5b3d41846 100644
--- a/python/fedml/computing/scheduler/slave/client_constants.py
+++ b/python/fedml/computing/scheduler/slave/client_constants.py
@@ -153,6 +153,13 @@ def get_database_dir():
             os.makedirs(database_dir, exist_ok=True)
         return database_dir
 
+    @staticmethod
+    def get_global_services_dir():
+        home_dir = expanduser("~")
+        global_services_dir = os.path.join(home_dir, ".fedml", "global_services")
+        os.makedirs(global_services_dir, exist_ok=True)
+        return global_services_dir
+
     @staticmethod
     def cleanup_run_process(run_id):
         RunProcessUtils.cleanup_run_process(
@@ -454,7 +461,6 @@ def remove_fedml_parent_pid_file():
                           f"Traceback: {traceback.format_exc()}")
             pass
 
-
 if __name__ == "__main__":
     ignore = "*test*,abc*"
     ignore = tuple(ignore.split(','))
diff --git a/python/setup.py b/python/setup.py
index 0e314de29c..e88788d1ff 100644
--- a/python/setup.py
+++ b/python/setup.py
@@ -66,6 +66,7 @@ def finalize_options(self):
     'wget',
     # Need to pin this version due to breaking change released in python docker sdk
     'requests<2.32',
+    'python-dotenv',
 ]
 
 requirements_extra_mpi = [

From 19160a2e2eec75807605a7431eaa342d3ab1b009 Mon Sep 17 00:00:00 2001
From: Alay Shah <alay11shah@gmail.com>
Date: Mon, 3 Jun 2024 17:28:18 -0700
Subject: [PATCH 102/282] Nits

---
 python/fedml/computing/scheduler/env/__init__.py | 2 --
 1 file changed, 2 deletions(-)

diff --git a/python/fedml/computing/scheduler/env/__init__.py b/python/fedml/computing/scheduler/env/__init__.py
index 5bfeaa5509..cc765f0979 100644
--- a/python/fedml/computing/scheduler/env/__init__.py
+++ b/python/fedml/computing/scheduler/env/__init__.py
@@ -1,3 +1 @@
-import os
-
 from collect_env import get_env_file

From 1b2eefe4c140ce86eb500af8e62945f64cf4d52a Mon Sep 17 00:00:00 2001
From: Alay Shah <alay11shah@gmail.com>
Date: Mon, 3 Jun 2024 17:46:06 -0700
Subject: [PATCH 103/282] Bug fix

---
 python/fedml/computing/scheduler/env/__init__.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/python/fedml/computing/scheduler/env/__init__.py b/python/fedml/computing/scheduler/env/__init__.py
index cc765f0979..f157a33ec2 100644
--- a/python/fedml/computing/scheduler/env/__init__.py
+++ b/python/fedml/computing/scheduler/env/__init__.py
@@ -1 +1 @@
-from collect_env import get_env_file
+from .collect_env import get_env_file

From 3481aa8914636208ea7f152ff214a9619d212f71 Mon Sep 17 00:00:00 2001
From: Alay Shah <alay11shah@gmail.com>
Date: Mon, 3 Jun 2024 17:50:07 -0700
Subject: [PATCH 104/282] Write it to release by default

---
 python/fedml/__init__.py | 6 ++++--
 1 file changed, 4 insertions(+), 2 deletions(-)

diff --git a/python/fedml/__init__.py b/python/fedml/__init__.py
index 913457b5c7..adde71a6d4 100644
--- a/python/fedml/__init__.py
+++ b/python/fedml/__init__.py
@@ -457,8 +457,10 @@ def set_env_version(version):
 
 
 def get_env_version():
-    dotenv.load_dotenv(dotenv_path=get_env_file())
-    return "release" if os.environ.get('FEDML_ENV_VERSION') is None else os.environ['FEDML_ENV_VERSION']
+    env_file = get_env_file()
+    dotenv.load_dotenv(dotenv_path=env_file)
+    version = "release" if os.environ.get('FEDML_ENV_VERSION') is None else os.environ['FEDML_ENV_VERSION']
+    dotenv.set_key(env_file, "FEDML_ENV_VERSION", version)
 
 
 def _get_backend_service():

From b2ea4d0bd44aaff5378de53dde774d8e5344f0bc Mon Sep 17 00:00:00 2001
From: Alay Shah <alay11shah@gmail.com>
Date: Mon, 3 Jun 2024 17:51:05 -0700
Subject: [PATCH 105/282] Nit

---
 python/fedml/__init__.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/python/fedml/__init__.py b/python/fedml/__init__.py
index adde71a6d4..a79fc7d60d 100644
--- a/python/fedml/__init__.py
+++ b/python/fedml/__init__.py
@@ -461,7 +461,7 @@ def get_env_version():
     dotenv.load_dotenv(dotenv_path=env_file)
     version = "release" if os.environ.get('FEDML_ENV_VERSION') is None else os.environ['FEDML_ENV_VERSION']
     dotenv.set_key(env_file, "FEDML_ENV_VERSION", version)
-
+    return version
 
 def _get_backend_service():
     version = get_env_version()

From 21138dd06521a91c2abb598c833f1f09c37134c5 Mon Sep 17 00:00:00 2001
From: Alay Shah <alay11shah@gmail.com>
Date: Mon, 3 Jun 2024 17:51:18 -0700
Subject: [PATCH 106/282] Nit

---
 python/fedml/__init__.py | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/python/fedml/__init__.py b/python/fedml/__init__.py
index a79fc7d60d..35658d4920 100644
--- a/python/fedml/__init__.py
+++ b/python/fedml/__init__.py
@@ -463,6 +463,7 @@ def get_env_version():
     dotenv.set_key(env_file, "FEDML_ENV_VERSION", version)
     return version
 
+
 def _get_backend_service():
     version = get_env_version()
     # from inspect import getframeinfo, stack
@@ -517,7 +518,7 @@ def get_local_on_premise_platform_port():
 
 def _get_local_s3_like_service_url():
     return FEDML_S3_DOMAIN_LOCAL
-    
+
 
 from fedml import device
 from fedml import data

From 1cc15525e2b81a22bd3589b1fc3501bd32f467da Mon Sep 17 00:00:00 2001
From: fedml-dimitris <dimitris@fedml.ai>
Date: Tue, 4 Jun 2024 04:38:30 -0400
Subject: [PATCH 107/282] Hotfix mqtt timeout inference constant after
 refactoring.

---
 .../scheduler/model_scheduler/device_mqtt_inference_protocol.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/python/fedml/computing/scheduler/model_scheduler/device_mqtt_inference_protocol.py b/python/fedml/computing/scheduler/model_scheduler/device_mqtt_inference_protocol.py
index b0bff261a4..1fac5a984b 100755
--- a/python/fedml/computing/scheduler/model_scheduler/device_mqtt_inference_protocol.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_mqtt_inference_protocol.py
@@ -105,7 +105,7 @@ def run_mqtt_inference_with_request(
             only_do_health_check=only_do_health_check, timeout=timeout
         )
 
-        allowed_inference_timeout = SchedulerConstants.MQTT_INFERENCE_TIMEOUT if timeout is None else timeout
+        allowed_inference_timeout = timeout if timeout else -1
         sleep_time_interval = 0.05
         total_sleep_time = 0
         while True:

From 8e0318321ac267f18106d2cdead745076ce3dd3b Mon Sep 17 00:00:00 2001
From: fedml-dimitris <dimitris@fedml.ai>
Date: Tue, 4 Jun 2024 05:19:36 -0400
Subject: [PATCH 108/282] Improving pending requests counter robustness.

---
 .../model_scheduler/device_model_cache.py     |   6 +-
 .../model_scheduler/device_model_inference.py | 160 +++++++++---------
 2 files changed, 88 insertions(+), 78 deletions(-)

diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_cache.py b/python/fedml/computing/scheduler/model_scheduler/device_model_cache.py
index 45a58c7ab9..75cf4dbc2a 100755
--- a/python/fedml/computing/scheduler/model_scheduler/device_model_cache.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_model_cache.py
@@ -985,5 +985,9 @@ def update_pending_requests_counter(self, increase=False, decrease=False) -> int
         if increase:
             self.redis_connection.incr(self.FEDML_PENDING_REQUESTS_COUNTER)
         if decrease:
-            self.redis_connection.decr(self.FEDML_PENDING_REQUESTS_COUNTER)
+            # Making sure the counter never becomes negative!
+            if self.get_pending_requests_counter() < 0:
+                self.redis_connection.set(self.FEDML_PENDING_REQUESTS_COUNTER, 0)
+            else:
+                self.redis_connection.decr(self.FEDML_PENDING_REQUESTS_COUNTER)
         return self.get_pending_requests_counter()
diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py b/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py
index 26c25bc09f..0e866c9626 100755
--- a/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py
@@ -173,92 +173,98 @@ async def _predict(
         header=None
 ) -> Union[MutableMapping[str, Any], Response, StreamingResponse]:
 
+    # Always increase the pending requests counter on a new incoming request.
     FEDML_MODEL_CACHE.update_pending_requests_counter(increase=True)
     inference_response = {}
 
-    in_end_point_id = end_point_id
-    in_end_point_name = input_json.get("end_point_name", None)
-    in_model_name = input_json.get("model_name", None)
-    in_model_version = input_json.get("model_version", None)
-    in_end_point_token = input_json.get("token", None)
-    in_return_type = "default"
-    if header is not None:
-        in_return_type = header.get("Accept", "default")
-
-    if in_model_version is None:
-        in_model_version = "*"  # * | latest | specific version
-
-    start_time = time.time_ns()
-
-    # Allow missing end_point_name and model_name in the input parameters.
-    if in_model_name is None or in_end_point_name is None:
-        ret_endpoint_name, ret_model_name = retrieve_info_by_endpoint_id(in_end_point_id, in_end_point_name)
-        if in_model_name is None:
-            in_model_name = ret_model_name
-        if in_end_point_name is None:
-            in_end_point_name = ret_endpoint_name
-
-    # Authenticate request token
-    inference_response = {}
-    if auth_request_token(in_end_point_id, in_end_point_name, in_model_name, in_end_point_token):
-        # Check the endpoint is activated
-        if not is_endpoint_activated(in_end_point_id):
-            inference_response = {"error": True, "message": "endpoint is not activated."}
+    try:
+        in_end_point_id = end_point_id
+        in_end_point_name = input_json.get("end_point_name", None)
+        in_model_name = input_json.get("model_name", None)
+        in_model_version = input_json.get("model_version", None)
+        in_end_point_token = input_json.get("token", None)
+        in_return_type = "default"
+        if header is not None:
+            in_return_type = header.get("Accept", "default")
+
+        if in_model_version is None:
+            in_model_version = "*"  # * | latest | specific version
+
+        start_time = time.time_ns()
+
+        # Allow missing end_point_name and model_name in the input parameters.
+        if in_model_name is None or in_end_point_name is None:
+            ret_endpoint_name, ret_model_name = retrieve_info_by_endpoint_id(in_end_point_id, in_end_point_name)
+            if in_model_name is None:
+                in_model_name = ret_model_name
+            if in_end_point_name is None:
+                in_end_point_name = ret_endpoint_name
+
+        # Authenticate request token
+        if auth_request_token(in_end_point_id, in_end_point_name, in_model_name, in_end_point_token):
+            # Check the endpoint is activated
+            if not is_endpoint_activated(in_end_point_id):
+                inference_response = {"error": True, "message": "endpoint is not activated."}
+                logging_inference_request(input_json, inference_response)
+                FEDML_MODEL_CACHE.update_pending_requests_counter(decrease=True)
+                return inference_response
+
+            # Found idle inference device
+            idle_device, end_point_id, model_id, model_name, model_version, inference_host, inference_output_url = \
+                found_idle_inference_device(in_end_point_id, in_end_point_name, in_model_name, in_model_version)
+            if idle_device is None or idle_device == "":
+                FEDML_MODEL_CACHE.update_pending_requests_counter(decrease=True)
+                return {"error": True, "error_code": status.HTTP_404_NOT_FOUND,
+                        "message": "can not found active inference worker for this endpoint."}
+
+            # Start timing for model metrics
+            model_metrics = FedMLModelMetrics(end_point_id, in_end_point_name,
+                                              model_id, in_model_name, model_version,
+                                              Settings.model_infer_url,
+                                              Settings.redis_addr,
+                                              Settings.redis_port,
+                                              Settings.redis_password,
+                                              version=Settings.version)
+            # Setting time to the time before authentication and idle device discovery.
+            model_metrics.set_start_time(start_time)
+
+            # Send inference request to idle device
+            logging.info("inference url {}.".format(inference_output_url))
+            if inference_output_url != "":
+                input_list = input_json.get("inputs", input_json)
+                stream_flag = input_json.get("stream", False)
+                input_list["stream"] = input_list.get("stream", stream_flag)
+                output_list = input_json.get("outputs", [])
+                inference_response = await send_inference_request(
+                    idle_device,
+                    end_point_id,
+                    inference_output_url,
+                    input_list,
+                    output_list,
+                    inference_type=in_return_type)
+
+            # Calculate model metrics
+            try:
+                model_metrics.calc_metrics(end_point_id, in_end_point_name,
+                                           model_id, model_name, model_version,
+                                           inference_output_url, idle_device)
+            except Exception as e:
+                logging.info("Calculate Inference Metrics Exception: {}".format(traceback.format_exc()))
+                pass
+
             logging_inference_request(input_json, inference_response)
             FEDML_MODEL_CACHE.update_pending_requests_counter(decrease=True)
             return inference_response
-
-        # Found idle inference device
-        idle_device, end_point_id, model_id, model_name, model_version, inference_host, inference_output_url = \
-            found_idle_inference_device(in_end_point_id, in_end_point_name, in_model_name, in_model_version)
-        if idle_device is None or idle_device == "":
+        else:
+            inference_response = {"error": True, "message": "token is not valid."}
+            logging_inference_request(input_json, inference_response)
             FEDML_MODEL_CACHE.update_pending_requests_counter(decrease=True)
-            return {"error": True, "error_code": status.HTTP_404_NOT_FOUND,
-                    "message": "can not found active inference worker for this endpoint."}
-
-        # Start timing for model metrics
-        model_metrics = FedMLModelMetrics(end_point_id, in_end_point_name,
-                                          model_id, in_model_name, model_version,
-                                          Settings.model_infer_url,
-                                          Settings.redis_addr,
-                                          Settings.redis_port,
-                                          Settings.redis_password,
-                                          version=Settings.version)
-        # Setting time to the time before authentication and idle device discovery.
-        model_metrics.set_start_time(start_time)
-
-        # Send inference request to idle device
-        logging.info("inference url {}.".format(inference_output_url))
-        if inference_output_url != "":
-            input_list = input_json.get("inputs", input_json)
-            stream_flag = input_json.get("stream", False)
-            input_list["stream"] = input_list.get("stream", stream_flag)
-            output_list = input_json.get("outputs", [])
-            inference_response = await send_inference_request(
-                idle_device,
-                end_point_id,
-                inference_output_url,
-                input_list,
-                output_list,
-                inference_type=in_return_type)
+            return inference_response
 
-        # Calculate model metrics
-        try:
-            model_metrics.calc_metrics(end_point_id, in_end_point_name,
-                                       model_id, model_name, model_version,
-                                       inference_output_url, idle_device)
-        except Exception as e:
-            logging.info("Calculate Inference Metrics Exception: {}".format(traceback.format_exc()))
-            pass
-
-        logging_inference_request(input_json, inference_response)
-        FEDML_MODEL_CACHE.update_pending_requests_counter(decrease=True)
-        return inference_response
-    else:
-        inference_response = {"error": True, "message": "token is not valid."}
-        logging_inference_request(input_json, inference_response)
+    except Exception as e:
+        logging.error("Inference Exception: {}".format(traceback.format_exc()))
+        # Need to reduce the pending requests counter in whatever exception that may be raised.
         FEDML_MODEL_CACHE.update_pending_requests_counter(decrease=True)
-        return inference_response
 
 
 def retrieve_info_by_endpoint_id(end_point_id, in_end_point_name=None, in_model_name=None,

From b0a55adc403dc4196204d12a596726d0ccdbb156 Mon Sep 17 00:00:00 2001
From: fedml-dimitris <dimitris@fedml.ai>
Date: Tue, 4 Jun 2024 08:11:20 -0400
Subject: [PATCH 109/282] Returning well formatted json messages in the case of
 errored requests.

---
 .../model_scheduler/device_model_inference.py          | 10 +++++++---
 1 file changed, 7 insertions(+), 3 deletions(-)

diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py b/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py
index 0e866c9626..8192e7c300 100755
--- a/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py
@@ -8,7 +8,7 @@
 from typing import Any, Mapping, MutableMapping, Union
 
 from fastapi import FastAPI, Request, Response, status
-from fastapi.responses import StreamingResponse
+from fastapi.responses import StreamingResponse, JSONResponse
 
 from fedml.computing.scheduler.model_scheduler.device_client_constants import ClientConstants
 from fedml.computing.scheduler.model_scheduler.device_http_inference_protocol import FedMLHttpInference
@@ -60,7 +60,9 @@ async def auth_middleware(request: Request, call_next):
             # Attempt to parse the JSON body.
             request_json = await request.json()
         except json.JSONDecodeError:
-            return Response("Invalid JSON.", status_code=status.HTTP_400_BAD_REQUEST)
+            return JSONResponse(
+                {"error": True, "message": "Invalid JSON."},
+                status_code=status.HTTP_400_BAD_REQUEST)
 
         # Get total pending requests.
         pending_requests_num = FEDML_MODEL_CACHE.get_pending_requests_counter()
@@ -84,7 +86,9 @@ async def auth_middleware(request: Request, call_next):
 
                 # If timeout threshold is exceeded then cancel and return time out error.
                 if (mean_latency * pending_requests_num) > request_timeout_s:
-                    return Response("Request timed out.", status_code=status.HTTP_504_GATEWAY_TIMEOUT)
+                    return JSONResponse(
+                        {"error": True, "message": "Request timed out."},
+                        status_code=status.HTTP_504_GATEWAY_TIMEOUT)
 
     response = await call_next(request)
     return response

From 2e5353609b278f9b4c906d99b6e1f38b179bd3b2 Mon Sep 17 00:00:00 2001
From: Alay Shah <alay11shah@gmail.com>
Date: Tue, 4 Jun 2024 11:59:44 -0700
Subject: [PATCH 110/282] Fix bug

---
 python/fedml/__init__.py | 6 ++++--
 1 file changed, 4 insertions(+), 2 deletions(-)

diff --git a/python/fedml/__init__.py b/python/fedml/__init__.py
index 35658d4920..a41b3e56af 100644
--- a/python/fedml/__init__.py
+++ b/python/fedml/__init__.py
@@ -459,8 +459,10 @@ def set_env_version(version):
 def get_env_version():
     env_file = get_env_file()
     dotenv.load_dotenv(dotenv_path=env_file)
-    version = "release" if os.environ.get('FEDML_ENV_VERSION') is None else os.environ['FEDML_ENV_VERSION']
-    dotenv.set_key(env_file, "FEDML_ENV_VERSION", version)
+    version = os.getenv('FEDML_ENV_VERSION')
+    if version is None:
+        version = "release"
+        set_env_version(version)
     return version
 
 

From b5e4c252395c0ed316fa8d7b987381c0caeb738c Mon Sep 17 00:00:00 2001
From: Alay Shah <alay11shah@gmail.com>
Date: Tue, 4 Jun 2024 12:24:37 -0700
Subject: [PATCH 111/282] Make env variables override system, abstract dotenv
 api calls into functions

---
 python/fedml/__init__.py                         | 12 +++++-------
 python/fedml/computing/scheduler/env/__init__.py |  2 +-
 .../fedml/computing/scheduler/env/collect_env.py | 16 ++++++++++++++--
 3 files changed, 20 insertions(+), 10 deletions(-)

diff --git a/python/fedml/__init__.py b/python/fedml/__init__.py
index a41b3e56af..c13a64566e 100644
--- a/python/fedml/__init__.py
+++ b/python/fedml/__init__.py
@@ -11,7 +11,7 @@
 import dotenv
 
 from .computing.scheduler.env.collect_env import collect_env
-from fedml.computing.scheduler.env import get_env_file
+from fedml.computing.scheduler.env import set_env_kv, load_env
 from .constants import (
     FEDML_BACKEND_SERVICE_URL_DEV,
     FEDML_BACKEND_SERVICE_URL_LOCAL,
@@ -451,15 +451,13 @@ def _run_distributed():
 
 
 def set_env_version(version):
-    env_file = get_env_file()
-    dotenv.load_dotenv(dotenv_path=env_file)
-    dotenv.set_key(env_file, "FEDML_ENV_VERSION", version)
+    set_env_kv("FEDML_ENV_VERSION", version)
+    load_env()
 
 
 def get_env_version():
-    env_file = get_env_file()
-    dotenv.load_dotenv(dotenv_path=env_file)
-    version = os.getenv('FEDML_ENV_VERSION')
+    load_env()
+    version = os.getenv("FEDML_ENV_VERSION")
     if version is None:
         version = "release"
         set_env_version(version)
diff --git a/python/fedml/computing/scheduler/env/__init__.py b/python/fedml/computing/scheduler/env/__init__.py
index f157a33ec2..0f71de6038 100644
--- a/python/fedml/computing/scheduler/env/__init__.py
+++ b/python/fedml/computing/scheduler/env/__init__.py
@@ -1 +1 @@
-from .collect_env import get_env_file
+from .collect_env import load_env, set_env_kv
diff --git a/python/fedml/computing/scheduler/env/collect_env.py b/python/fedml/computing/scheduler/env/collect_env.py
index ef471da3bc..84b903cdbb 100644
--- a/python/fedml/computing/scheduler/env/collect_env.py
+++ b/python/fedml/computing/scheduler/env/collect_env.py
@@ -2,6 +2,7 @@
 import traceback
 
 import fedml
+import dotenv
 from fedml.computing.scheduler.comm_utils.hardware_utils import HardwareUtil
 from fedml.computing.scheduler.slave.client_diagnosis import ClientDiagnosis
 from ..slave.client_constants import ClientConstants
@@ -112,10 +113,21 @@ def collect_env():
 
 
 def get_env_file():
-    global_serivces_dir = ClientConstants.get_global_services_dir()
-    env_config_file = os.path.join(global_serivces_dir, ".env")
+    global_services_dir = ClientConstants.get_global_services_dir()
+    env_config_file = os.path.join(global_services_dir, ".env")
     # Create file if not exists
     if not os.path.exists(env_config_file):
         with open(env_config_file, 'w') as f:
             f.write("")
     return env_config_file
+
+
+def load_env():
+    env_config_file = get_env_file()
+    dotenv.load_dotenv(dotenv_path=env_config_file, override=True)
+
+
+def set_env_kv(key, value):
+    env_config_file = get_env_file()
+    dotenv.set_key(env_config_file, key, value)
+    load_env()

From e2430fca23c361b37c2ff96614ce155829915541 Mon Sep 17 00:00:00 2001
From: fedml-dimitris <dimitris@fedml.ai>
Date: Wed, 5 Jun 2024 11:18:30 -0400
Subject: [PATCH 112/282] Renaming endpoint_id key to end_point_id

---
 .../scheduler/model_scheduler/autoscaler/autoscaler.py          | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/python/fedml/computing/scheduler/model_scheduler/autoscaler/autoscaler.py b/python/fedml/computing/scheduler/model_scheduler/autoscaler/autoscaler.py
index eb9f08b0eb..4cab1e133c 100644
--- a/python/fedml/computing/scheduler/model_scheduler/autoscaler/autoscaler.py
+++ b/python/fedml/computing/scheduler/model_scheduler/autoscaler/autoscaler.py
@@ -339,7 +339,7 @@ def scale_operation_endpoint(self,
 
         # Fetch all metrics record from the database.
         metrics = self.fedml_model_cache.get_endpoint_metrics(
-            endpoint_id=endpoint_id)
+            end_point_id=endpoint_id)
 
         # Default to nothing.
         scale_op = ScaleOp.NO_OP

From 600905f283dfc9d57b8f315c6844e76cd4ab4f20 Mon Sep 17 00:00:00 2001
From: Raphael Jin <kimheavy@yahoo.com>
Date: Wed, 5 Jun 2024 19:04:19 +0000
Subject: [PATCH 113/282] [Deploy] Fix multi sub folder issue during
 deployment.

---
 .../model_scheduler/worker_job_runner.py          | 15 ++++++++++++---
 1 file changed, 12 insertions(+), 3 deletions(-)

diff --git a/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py b/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py
index 831064b591..370ba57b49 100755
--- a/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py
+++ b/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py
@@ -205,7 +205,7 @@ def run_impl(self, run_extend_queue_list, sender_message_center,
         # Check if the package is already downloaded
         unzip_package_path = ""
         if os.path.exists(os.path.join(models_root_dir, parent_fd)):
-            unzip_package_path = self.find_previous_downloaded_pkg(os.path.join(models_root_dir, parent_fd))
+            unzip_package_path = self.find_previous_downloaded_pkg(os.path.join(models_root_dir, parent_fd), model_name)
 
         # Download the package if not found
         if unzip_package_path == "":
@@ -510,7 +510,7 @@ def build_dynamic_constrain_variables(self, run_id, run_config):
         pass
 
     @staticmethod
-    def find_previous_downloaded_pkg(parent_dir) -> str:
+    def find_previous_downloaded_pkg(parent_dir: str, model_name: str) -> str:
         unzip_fd = ""
         res = ""
 
@@ -519,8 +519,17 @@ def find_previous_downloaded_pkg(parent_dir) -> str:
                 unzip_fd = os.path.join(parent_dir, folder)
                 break
 
+        exact_matched = False
+
         for folder in os.listdir(unzip_fd):
-            if os.path.isdir(os.path.join(unzip_fd, folder)):
+            if folder == model_name:
+                res = os.path.join(unzip_fd, folder)
+                exact_matched = True
+                break
+
+        if not exact_matched:
+            # Use the first folder found
+            for folder in os.listdir(unzip_fd):
                 res = os.path.join(unzip_fd, folder)
                 break
 

From 2ce07f4c53c3670b7b88e41f9c9ed101834d7807 Mon Sep 17 00:00:00 2001
From: alaydshah <alay11shah@gmail.com>
Date: Wed, 5 Jun 2024 19:41:50 +0000
Subject: [PATCH 114/282] Optimize Inference

---
 python/fedml/api/modules/device.py            |   9 +-
 .../computing/scheduler/env/collect_env.py    |   1 +
 .../model_scheduler/device_model_inference.py | 108 ++++++++----------
 .../device_server_constants.py                |  17 ++-
 .../model_scheduler/master_job_runner.py      |  33 +++---
 .../computing/scheduler/slave/client_login.py |   2 -
 python/fedml/core/mlops/mlops_configs.py      |   4 +
 7 files changed, 92 insertions(+), 82 deletions(-)

diff --git a/python/fedml/api/modules/device.py b/python/fedml/api/modules/device.py
index 84aa42e7b2..497fde9005 100644
--- a/python/fedml/api/modules/device.py
+++ b/python/fedml/api/modules/device.py
@@ -58,14 +58,15 @@ def _bind(
         docker, docker_rank, infer_host,
         redis_addr, redis_port, redis_password
 ):
+    fedml.load_env()
     if os.getenv(ModuleConstants.ENV_FEDML_INFER_HOST) is None:
-        os.environ[ModuleConstants.ENV_FEDML_INFER_HOST] = infer_host
+        fedml.set_env_kv(ModuleConstants.ENV_FEDML_INFER_HOST, infer_host)
     if os.getenv(ModuleConstants.ENV_FEDML_INFER_REDIS_ADDR) is None:
-        os.environ[ModuleConstants.ENV_FEDML_INFER_REDIS_ADDR] = redis_addr
+        fedml.set_env_kv(ModuleConstants.ENV_FEDML_INFER_REDIS_ADDR, redis_addr)
     if os.getenv(ModuleConstants.ENV_FEDML_INFER_REDIS_PORT) is None:
-        os.environ[ModuleConstants.ENV_FEDML_INFER_REDIS_PORT] = redis_port
+        fedml.set_env_kv(ModuleConstants.ENV_FEDML_INFER_REDIS_PORT, redis_port)
     if os.getenv(ModuleConstants.ENV_FEDML_INFER_REDIS_PASSWORD) is None:
-        os.environ[ModuleConstants.ENV_FEDML_INFER_REDIS_PASSWORD] = redis_password
+        fedml.set_env_kv(ModuleConstants.ENV_FEDML_INFER_REDIS_PASSWORD, redis_password)
 
     url = fedml._get_backend_service()
     platform_name = platform.system()
diff --git a/python/fedml/computing/scheduler/env/collect_env.py b/python/fedml/computing/scheduler/env/collect_env.py
index 84b903cdbb..da4d54e7a0 100644
--- a/python/fedml/computing/scheduler/env/collect_env.py
+++ b/python/fedml/computing/scheduler/env/collect_env.py
@@ -128,6 +128,7 @@ def load_env():
 
 
 def set_env_kv(key, value):
+    os.environ[key] = value
     env_config_file = get_env_file()
     dotenv.set_key(env_config_file, key, value)
     load_env()
diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py b/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py
index 1b6d71ebb7..d39ac0e4e7 100755
--- a/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py
@@ -1,3 +1,4 @@
+import argparse
 import logging
 import time
 import traceback
@@ -8,52 +9,27 @@
 from fastapi import FastAPI, Request, Response, status
 from fastapi.responses import StreamingResponse
 
+import fedml
+from fedml.api.modules.constants import ModuleConstants
 from fedml.computing.scheduler.model_scheduler.device_http_inference_protocol import FedMLHttpInference
 from fedml.computing.scheduler.model_scheduler.device_server_constants import ServerConstants
 from fedml.computing.scheduler.model_scheduler.device_model_monitor import FedMLModelMetrics
 from fedml.computing.scheduler.model_scheduler.device_model_cache import FedMLModelCache
 from fedml.computing.scheduler.model_scheduler.device_mqtt_inference_protocol import FedMLMqttInference
 from fedml.computing.scheduler.model_scheduler.device_http_proxy_inference_protocol import FedMLHttpProxyInference
+from fedml.core.mlops.mlops_configs import MLOpsConfigs
 from fedml.computing.scheduler.comm_utils import sys_utils
 from fedml.core.mlops import MLOpsRuntimeLog, MLOpsRuntimeLogDaemon
 
-try:
-    from pydantic import BaseSettings
-except Exception as e:
-    pass
-try:
-    from pydantic_settings import BaseSettings
-except Exception as e:
-    pass
-
-
-# class Settings(BaseSettings):
-#     redis_addr: str
-#     redis_port: str
-#     redis_password: str
-#     end_point_name: str
-#     model_name: str
-#     model_version: str
-#     model_infer_url: str
-#     version: str
-#     use_mqtt_inference: bool
-#     use_worker_gateway: bool
-#     ext_info: str
-#
-#
-# settings = Settings()
-
-class settings:
-    redis_addr = "127.0.0.1"
-    redis_port = 6379
-    redis_password = "fedml_default"
-    end_point_name = ""
-    model_name = ""
-    model_version = ""
-    model_infer_url = "127.0.0.1"
-    version = "dev"
-    use_mqtt_inference = False
-    use_worker_gateway = False
+
+class Settings:
+    fedml.load_env()
+    redis_addr = os.getenv(ModuleConstants.ENV_FEDML_INFER_REDIS_ADDR)
+    redis_port = os.getenv(ModuleConstants.ENV_FEDML_INFER_REDIS_PORT)
+    redis_password = os.getenv(ModuleConstants.ENV_FEDML_INFER_REDIS_PASSWORD)
+    model_infer_host = os.getenv(ModuleConstants.ENV_FEDML_INFER_HOST)
+    version = fedml.get_env_version()
+    mqtt_config = MLOpsConfigs.fetch_mqtt_config()
     ext_info = "2b34303961245c4f175f2236282d7a272c040b0904747579087f6a760112030109010c215d54505707140005190a051c347f365c4a430c020a7d39120e26032a78730f797f7c031f0901657e75"
 
 
@@ -62,6 +38,11 @@ class settings:
 api = FastAPI()
 
 
+@api.on_event("startup")
+async def startup_event():
+    configure_logging()
+
+
 @api.get('/')
 async def root():
     return {'message': 'FedML Federated Inference Service!'}
@@ -185,9 +166,9 @@ async def _predict(
         # Start timing for model metrics
         model_metrics = FedMLModelMetrics(end_point_id, in_end_point_name,
                                           model_id, in_model_name, model_version,
-                                          settings.model_infer_url,
-                                          settings.redis_addr, settings.redis_port, settings.redis_password,
-                                          version=settings.version)
+                                          Settings.model_infer_host,
+                                          Settings.redis_addr, Settings.redis_port, Settings.redis_password,
+                                          version=Settings.version)
         model_metrics.set_start_time(start_time)
 
         # Send inference request to idle device
@@ -224,8 +205,8 @@ def retrieve_info_by_endpoint_id(end_point_id, in_end_point_name=None, in_model_
     We allow missing end_point_name and model_name in the input parameters.
     return end_point_name, model_name
     """
-    FedMLModelCache.get_instance().set_redis_params(settings.redis_addr, settings.redis_port, settings.redis_password)
-    redis_key = FedMLModelCache.get_instance(settings.redis_addr, settings.redis_port). \
+    FedMLModelCache.get_instance().set_redis_params(Settings.redis_addr, Settings.redis_port, Settings.redis_password)
+    redis_key = FedMLModelCache.get_instance(Settings.redis_addr, Settings.redis_port). \
         get_end_point_full_key_by_id(end_point_id)
     if redis_key is not None:
         end_point_name = ""
@@ -258,8 +239,8 @@ def found_idle_inference_device(end_point_id, end_point_name, in_model_name, in_
     inference_output_url = ""
     model_version = ""
     # Found idle device (TODO: optimize the algorithm to search best device for inference)
-    FedMLModelCache.get_instance().set_redis_params(settings.redis_addr, settings.redis_port, settings.redis_password)
-    payload, idle_device = FedMLModelCache.get_instance(settings.redis_addr, settings.redis_port). \
+    FedMLModelCache.get_instance().set_redis_params(Settings.redis_addr, Settings.redis_port, Settings.redis_password)
+    payload, idle_device = FedMLModelCache.get_instance(Settings.redis_addr, Settings.redis_port). \
         get_idle_device(end_point_id, end_point_name, in_model_name, in_model_version)
     if payload is not None:
         logging.info("found idle deployment result {}".format(payload))
@@ -304,7 +285,7 @@ async def send_inference_request(idle_device, endpoint_id, inference_url, input_
 
         if not has_public_ip:
             connect_str = "@FEDML@"
-            random_out = sys_utils.random2(settings.ext_info, "FEDML@9999GREAT")
+            random_out = sys_utils.random2(Settings.ext_info, "FEDML@9999GREAT")
             config_list = random_out.split(connect_str)
             agent_config = dict()
             agent_config["mqtt_config"] = dict()
@@ -336,8 +317,8 @@ def auth_request_token(end_point_id, end_point_name, model_name, token):
     if token is None:
         return False
 
-    FedMLModelCache.get_instance().set_redis_params(settings.redis_addr, settings.redis_port, settings.redis_password)
-    cached_token = FedMLModelCache.get_instance(settings.redis_addr, settings.redis_port). \
+    FedMLModelCache.get_instance().set_redis_params(Settings.redis_addr, Settings.redis_port, Settings.redis_password)
+    cached_token = FedMLModelCache.get_instance(Settings.redis_addr, Settings.redis_port). \
         get_end_point_token(end_point_id, end_point_name, model_name)
     if cached_token is not None and str(cached_token) == str(token):
         return True
@@ -349,8 +330,8 @@ def is_endpoint_activated(end_point_id):
     if end_point_id is None:
         return False
 
-    FedMLModelCache.get_instance().set_redis_params(settings.redis_addr, settings.redis_port, settings.redis_password)
-    activated = FedMLModelCache.get_instance(settings.redis_addr, settings.redis_port).get_end_point_activation(
+    FedMLModelCache.get_instance().set_redis_params(Settings.redis_addr, Settings.redis_port, Settings.redis_password)
+    activated = FedMLModelCache.get_instance(Settings.redis_addr, Settings.redis_port).get_end_point_activation(
         end_point_id)
     return activated
 
@@ -361,8 +342,6 @@ def logging_inference_request(request, response):
 
     try:
         log_dir = ServerConstants.get_log_file_dir()
-        if not os.path.exists(log_dir):
-            os.makedirs(log_dir, exist_ok=True)
         inference_log_file = os.path.join(log_dir, "inference.log")
         with open(inference_log_file, "a") as f:
             f.writelines([f"request: {request}, response: {response}\n"])
@@ -370,15 +349,24 @@ def logging_inference_request(request, response):
         logging.info("failed to log inference request and response to file.")
 
 
-def set_logging_args(args=None):
-    global logging_args
-    logging_args = args
-    if logging_args is not None:
-        # Force run id to 0, as the gateway is shared by all the runs.
-        setattr(args, "run_id", "0")
-        MLOpsRuntimeLog.get_instance(args).init_logs(log_level=logging.INFO)
-        MLOpsRuntimeLogDaemon.get_instance(args).start_log_processor(args.run_id, args.edge_id)
-        logging.info("start the log processor")
+def configure_logging():
+    parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
+    args = parser.parse_args([])
+
+    setattr(args, "log_file_dir", ServerConstants.get_log_file_dir())
+    setattr(args, "run_id", "inference_gateway")
+    setattr(args, "role", "server")
+    setattr(args, "using_mlops", True)
+    setattr(args, "config_version", fedml.get_env_version())
+
+    runner_info = ServerConstants.get_runner_infos()
+    if not (runner_info and "edge_id" in runner_info):
+        raise Exception("Inference gateway couldn't be started as edge_id couldn't be parsed from runner_infos.yaml")
+    setattr(args, "edge_id", runner_info.get("edge_id"))
+
+    MLOpsRuntimeLog.get_instance(args).init_logs(log_level=logging.INFO)
+    MLOpsRuntimeLogDaemon.get_instance(args).start_log_processor(args.run_id, args.edge_id)
+    logging.info("start the log processor for inference gateway")
 
 
 if __name__ == "__main__":
diff --git a/python/fedml/computing/scheduler/model_scheduler/device_server_constants.py b/python/fedml/computing/scheduler/model_scheduler/device_server_constants.py
index 6b5b335863..eb01fbb599 100644
--- a/python/fedml/computing/scheduler/model_scheduler/device_server_constants.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_server_constants.py
@@ -5,6 +5,7 @@
 import subprocess
 import sys
 from os.path import expanduser
+from pathlib import Path
 
 import psutil
 import yaml
@@ -329,9 +330,23 @@ def save_bootstrap_process(run_id, process_id):
             run_id, process_id, ServerConstants.get_data_dir(), ServerConstants.LOCAL_RUNNER_INFO_DIR_NAME,
             info_file_prefix=SchedulerConstants.RUN_PROCESS_TYPE_BOOTSTRAP_PROCESS)
 
+    @staticmethod
+    def get_runner_infos():
+        local_pkg_data_dir = ServerConstants.get_data_dir()
+        os.makedirs(local_pkg_data_dir, exist_ok=True)
+        os.makedirs(os.path.join(local_pkg_data_dir, ServerConstants.LOCAL_RUNNER_INFO_DIR_NAME), exist_ok=True)
+
+        runner_info_file = os.path.join(local_pkg_data_dir, ServerConstants.LOCAL_RUNNER_INFO_DIR_NAME,
+                                        "runner_infos.yaml")
+        runner_info = {}
+        try:
+            runner_info = yaml.safe_load(Path(runner_info_file).read_text())
+        except Exception as e:
+            logging.error(f"Failed to parse runner info: {e}")
+        return runner_info
+
     @staticmethod
     def save_runner_infos(unique_device_id, edge_id, run_id=None):
-        home_dir = expanduser("~")
         local_pkg_data_dir = ServerConstants.get_data_dir()
         os.makedirs(local_pkg_data_dir, exist_ok=True)
         os.makedirs(os.path.join(local_pkg_data_dir, ServerConstants.LOCAL_RUNNER_INFO_DIR_NAME), exist_ok=True)
diff --git a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
index 4d5974237d..15841600ad 100755
--- a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
+++ b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
@@ -24,7 +24,6 @@
 from ..master.base_master_job_runner import FedMLBaseMasterJobRunner
 from .device_replica_controller import FedMLDeviceReplicaController
 from .job_runner_msg_sender import FedMLDeployJobRunnerMsgSender
-from .device_model_inference import set_logging_args
 
 
 class FedMLDeployMasterJobRunner(FedMLBaseMasterJobRunner, FedMLDeployJobRunnerMsgSender, ABC):
@@ -66,11 +65,6 @@ def _generate_job_runner_instance(self, args, run_id=None, request_json=None, ag
     def _generate_extend_queue_list(self):
         return [self.deployment_result_queue]
 
-    @staticmethod
-    def start_inference_gateway_server(inference_gw_cmd, port, args):
-        set_logging_args(args)
-        uvicorn.run(inference_gw_cmd, host="0.0.0.0", port=port, log_level="info")
-
     # Override
     def run_impl(
             self, edge_id_status_queue, edge_device_info_queue, run_metrics_queue,
@@ -493,8 +487,8 @@ def start_device_inference_gateway(
             inference_gw_cmd = "fedml.computing.scheduler.model_scheduler.device_model_inference:api"
             inference_gateway_pids = RunProcessUtils.get_pid_from_cmd_line(inference_gw_cmd)
             if inference_gateway_pids is None or len(inference_gateway_pids) <= 0:
-                # cur_dir = os.path.dirname(__file__)
-                # fedml_base_dir = os.path.dirname(os.path.dirname(os.path.dirname(cur_dir)))
+                cur_dir = os.path.dirname(__file__)
+                fedml_base_dir = os.path.dirname(os.path.dirname(os.path.dirname(cur_dir)))
                 # connect_str = "@FEDML@"
                 # ext_info = sys_utils.random1(
                 #     agent_config["mqtt_config"]["BROKER_HOST"] + connect_str +
@@ -503,10 +497,19 @@ def start_device_inference_gateway(
                 #     agent_config["mqtt_config"]["MQTT_PWD"] + connect_str +
                 #     str(agent_config["mqtt_config"]["MQTT_KEEPALIVE"]), "FEDML@9999GREAT")
                 # python_program = get_python_program()
-                inference_gateway_process = multiprocessing.Process(
-                    target=FedMLDeployMasterJobRunner.start_inference_gateway_server, args=(inference_gw_cmd,
-                                                                                            inference_port, args)
-                )
+                # inference_gateway_process = multiprocessing.Process(
+                #     target=FedMLDeployMasterJobRunner.start_inference_gateway_server, args=(inference_gw_cmd,
+                #                                                                             inference_port, args)
+                # )
+                inference_gateway_process = ServerConstants.exec_console_with_script(f"{python_program} "
+                                                                                     f"-m uvicorn {inference_gw_cmd} "
+                                                                                     f"--host 0.0.0.0 "
+                                                                                     f"--port {str(inference_port)} "
+                                                                                     f"--reload --reload-delay 3 "
+                                                                                     f"--reload-dir {fedml_base_dir} "
+                                                                                     f"--log-level critical",
+                                                                                     should_capture_stdout=False,
+                                                                                     should_capture_stderr=False)
                 # inference_gateway_process = ServerConstants.exec_console_with_script(
                 #     "REDIS_ADDR=\"{}\" REDIS_PORT=\"{}\" REDIS_PASSWORD=\"{}\" "
                 #     "END_POINT_NAME=\"{}\" "
@@ -518,9 +521,9 @@ def start_device_inference_gateway(
                 #         "", "", "", fedml.get_env_version(), use_mqtt_inference,
                 #         use_worker_gateway, ext_info, python_program, inference_gw_cmd, str(inference_port),
                 #         fedml_base_dir),
-                #     should_capture_stdout=False, should_capture_stderr=False)
-                inference_gateway_process.daemon = True
-                inference_gateway_process.start()
+                #     )
+                # inference_gateway_process.daemon = True
+                # inference_gateway_process.start()
 
                 return inference_gateway_process
             else:
diff --git a/python/fedml/computing/scheduler/slave/client_login.py b/python/fedml/computing/scheduler/slave/client_login.py
index 37a6dc8064..95c772a225 100755
--- a/python/fedml/computing/scheduler/slave/client_login.py
+++ b/python/fedml/computing/scheduler/slave/client_login.py
@@ -30,8 +30,6 @@ def logout():
     if args.api_key == "":
         args.api_key = args.user
 
-    fedml.set_env_version("test")
-
     if args.local_on_premise_platform_host != "127.0.0.1":
         fedml.set_local_on_premise_platform_host(args.local_on_premise_platform_host)
     if args.local_on_premise_platform_port != 80:
diff --git a/python/fedml/core/mlops/mlops_configs.py b/python/fedml/core/mlops/mlops_configs.py
index 6c25c38128..c1b46fd0dd 100644
--- a/python/fedml/core/mlops/mlops_configs.py
+++ b/python/fedml/core/mlops/mlops_configs.py
@@ -154,6 +154,10 @@ def fetch_all_configs():
                 fetched_configs[Configs.ML_OPS_CONFIG],
                 fetched_configs[Configs.DOCKER_CONFIG])
 
+    @staticmethod
+    def fetch_mqtt_config():
+        return MLOpsConfigs._fetch_configs({Configs.MQTT_CONFIG})
+
 
 if __name__ == "__main__":
     fedml.set_env_version("release")

From c1e37af1385f2746193658ff852f5b545afda13d Mon Sep 17 00:00:00 2001
From: Raphael Jin <kimheavy@yahoo.com>
Date: Wed, 5 Jun 2024 20:02:57 +0000
Subject: [PATCH 115/282] Nit.

---
 .../computing/scheduler/model_scheduler/worker_job_runner.py   | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py b/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py
index 370ba57b49..348b760153 100755
--- a/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py
+++ b/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py
@@ -521,6 +521,9 @@ def find_previous_downloaded_pkg(parent_dir: str, model_name: str) -> str:
 
         exact_matched = False
 
+        if unzip_fd == "":
+            return res
+
         for folder in os.listdir(unzip_fd):
             if folder == model_name:
                 res = os.path.join(unzip_fd, folder)

From a4b8ad23969efec0c51758a3068590b7c0152bb0 Mon Sep 17 00:00:00 2001
From: alaydshah <alay11shah@gmail.com>
Date: Wed, 5 Jun 2024 22:18:27 +0000
Subject: [PATCH 116/282] Pipe in Mqtt config directly instead of deserializing
 object

---
 .../model_scheduler/device_model_inference.py | 30 +++----------
 .../model_scheduler/master_job_runner.py      | 45 ++-----------------
 .../master_job_runner_manager.py              |  4 +-
 .../master_protocol_manager.py                |  2 +-
 python/fedml/core/mlops/mlops_configs.py      |  3 +-
 5 files changed, 16 insertions(+), 68 deletions(-)

diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py b/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py
index f55eda9047..3d9db78a23 100755
--- a/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py
@@ -21,7 +21,6 @@
 from fedml.computing.scheduler.model_scheduler.device_mqtt_inference_protocol import FedMLMqttInference
 from fedml.computing.scheduler.model_scheduler.device_http_proxy_inference_protocol import FedMLHttpProxyInference
 from fedml.core.mlops.mlops_configs import MLOpsConfigs
-from fedml.computing.scheduler.comm_utils import sys_utils
 from fedml.core.mlops import MLOpsRuntimeLog, MLOpsRuntimeLogDaemon
 
 
@@ -33,11 +32,8 @@ class Settings:
     model_infer_host = os.getenv(ModuleConstants.ENV_FEDML_INFER_HOST)
     version = fedml.get_env_version()
     mqtt_config = MLOpsConfigs.fetch_mqtt_config()
-    ext_info = "2b34303961245c4f175f2236282d7a272c040b0904747579087f6a760112030109010c215d54505707140005190a051c347f365c4a430c020a7d39120e26032a78730f797f7c031f0901657e75"
 
 
-logging_args = None
-
 api = FastAPI()
 
 FEDML_MODEL_CACHE = FedMLModelCache.get_instance().set_redis_params(redis_addr=Settings.redis_addr,
@@ -47,7 +43,6 @@ class Settings:
 
 @api.middleware("http")
 async def auth_middleware(request: Request, call_next):
-
     if "/inference" in request.url.path or "/api/v1/predict" in request.url.path:
         try:
             # Attempt to parse the JSON body.
@@ -138,7 +133,7 @@ async def predict_openai(end_point_id, request: Request):
     try:
         response = await _predict(end_point_id, input_json, header)
     except Exception as e:
-        response = {"error": True, "message": f"{traceback.format_exc()}"}
+        response = {"error": True, "message": f"{traceback.format_exc()}, exception {e}"}
 
     return response
 
@@ -174,7 +169,6 @@ async def _predict(
         input_json,
         header=None
 ) -> Union[MutableMapping[str, Any], Response, StreamingResponse]:
-
     # Always increase the pending requests counter on a new incoming request.
     FEDML_MODEL_CACHE.update_pending_requests_counter(increase=True)
     inference_response = {}
@@ -222,7 +216,7 @@ async def _predict(
             # Start timing for model metrics
             model_metrics = FedMLModelMetrics(end_point_id, in_end_point_name,
                                               model_id, in_model_name, model_version,
-                                              Settings.model_infer_url,
+                                              Settings.model_infer_host,
                                               Settings.redis_addr,
                                               Settings.redis_port,
                                               Settings.redis_password,
@@ -269,7 +263,6 @@ async def _predict(
         FEDML_MODEL_CACHE.update_pending_requests_counter(decrease=True)
 
 
-
 def retrieve_info_by_endpoint_id(end_point_id, in_end_point_name=None, in_model_name=None,
                                  in_model_version=None, enable_check=False):
     """
@@ -308,7 +301,7 @@ def found_idle_inference_device(end_point_id, end_point_name, in_model_name, in_
     inference_output_url = ""
     model_version = ""
     # Found idle device (TODO: optimize the algorithm to search best device for inference)
-    payload, idle_device = FEDML_MODEL_CACHE.\
+    payload, idle_device = FEDML_MODEL_CACHE. \
         get_idle_device(end_point_id, end_point_name, in_model_name, in_model_version)
     if payload is not None:
         logging.info("found idle deployment result {}".format(payload))
@@ -328,7 +321,6 @@ def found_idle_inference_device(end_point_id, end_point_name, in_model_name, in_
 
 async def send_inference_request(idle_device, end_point_id, inference_url, input_list, output_list,
                                  inference_type="default", has_public_ip=True):
-
     request_timeout_sec = FEDML_MODEL_CACHE.get_endpoint_settings(end_point_id) \
         .get("request_timeout_sec", ClientConstants.INFERENCE_REQUEST_TIMEOUT)
 
@@ -367,16 +359,7 @@ async def send_inference_request(idle_device, end_point_id, inference_url, input
             return inference_response
 
         if not has_public_ip:
-            connect_str = "@FEDML@"
-            random_out = sys_utils.random2(Settings.ext_info, "FEDML@9999GREAT")
-            config_list = random_out.split(connect_str)
-            agent_config = dict()
-            agent_config["mqtt_config"] = dict()
-            agent_config["mqtt_config"]["BROKER_HOST"] = config_list[0]
-            agent_config["mqtt_config"]["BROKER_PORT"] = int(config_list[1])
-            agent_config["mqtt_config"]["MQTT_USER"] = config_list[2]
-            agent_config["mqtt_config"]["MQTT_PWD"] = config_list[3]
-            agent_config["mqtt_config"]["MQTT_KEEPALIVE"] = int(config_list[4])
+            agent_config = {"mqtt_config": Settings.mqtt_config}
             mqtt_inference = FedMLMqttInference(
                 agent_config=agent_config,
                 run_id=end_point_id)
@@ -410,12 +393,13 @@ async def send_inference_request(idle_device, end_point_id, inference_url, input
 def auth_request_token(end_point_id, end_point_name, model_name, token):
     if token is None:
         return False
-    cached_token = FEDML_MODEL_CACHE.\
+    cached_token = FEDML_MODEL_CACHE. \
         get_end_point_token(end_point_id, end_point_name, model_name)
     if cached_token is not None and str(cached_token) == str(token):
         return True
     return False
 
+
 def is_endpoint_activated(end_point_id):
     if end_point_id is None:
         return False
@@ -433,7 +417,7 @@ def logging_inference_request(request, response):
         with open(inference_log_file, "a") as f:
             f.writelines([f"request: {request}, response: {response}\n"])
     except Exception as ex:
-        logging.info("failed to log inference request and response to file.")
+        logging.info(f"failed to log inference request and response to file with exception {ex}")
 
 
 def configure_logging():
diff --git a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
index bd40b71c0e..d8a81e016f 100755
--- a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
+++ b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
@@ -1,7 +1,6 @@
 import copy
 import json
 import logging
-import multiprocessing
 import os
 import time
 import queue
@@ -10,7 +9,6 @@
 from multiprocessing import Queue
 
 import fedml
-import uvicorn
 from fedml.core.mlops import MLOpsRuntimeLog, MLOpsConfigs
 from fedml.core.mlops.mlops_runtime_log import MLOpsFormatter
 from .device_client_constants import ClientConstants
@@ -117,8 +115,7 @@ def run_impl(
             message_center=self.message_center)
 
         # start unified inference gateway process if not started
-        FedMLDeployMasterJobRunner.start_device_inference_gateway(
-            args=self.args, inference_port=inference_port, agent_config=self.agent_config)
+        FedMLDeployMasterJobRunner.start_device_inference_gateway(inference_port=inference_port)
 
         # start inference monitor process
         FedMLDeployMasterJobRunner.stop_device_inference_monitor(
@@ -469,10 +466,7 @@ def cleanup_runner_process(self, run_id):
         ServerConstants.cleanup_run_process(run_id, not_kill_subprocess=True)
 
     @staticmethod
-    def start_device_inference_gateway(
-            args, inference_port=ServerConstants.MODEL_INFERENCE_DEFAULT_PORT, agent_config=None,
-            redis_addr="localhost", redis_port=6379, redis_password="fedml_default"
-    ):
+    def start_device_inference_gateway(inference_port=ServerConstants.MODEL_INFERENCE_DEFAULT_PORT):
         # start unified inference server
         python_program = get_python_program()
         master_port = os.getenv("FEDML_MASTER_PORT", None)
@@ -480,27 +474,11 @@ def start_device_inference_gateway(
             inference_port = int(master_port)
         if not ServerConstants.is_running_on_k8s():
             logging.info(f"start the model inference gateway...")
-            # use_mqtt_inference = os.getenv("FEDML_USE_MQTT_INFERENCE", "False")
-            # use_mqtt_inference = True if use_mqtt_inference.lower() == 'true' else False
-            # use_worker_gateway = os.getenv("FEDML_USE_WORKER_GATEWAY", "False")
-            # use_worker_gateway = True if use_worker_gateway.lower() == 'true' else False
             inference_gw_cmd = "fedml.computing.scheduler.model_scheduler.device_model_inference:api"
             inference_gateway_pids = RunProcessUtils.get_pid_from_cmd_line(inference_gw_cmd)
             if inference_gateway_pids is None or len(inference_gateway_pids) <= 0:
                 cur_dir = os.path.dirname(__file__)
                 fedml_base_dir = os.path.dirname(os.path.dirname(os.path.dirname(cur_dir)))
-                # connect_str = "@FEDML@"
-                # ext_info = sys_utils.random1(
-                #     agent_config["mqtt_config"]["BROKER_HOST"] + connect_str +
-                #     str(agent_config["mqtt_config"]["BROKER_PORT"]) + connect_str +
-                #     agent_config["mqtt_config"]["MQTT_USER"] + connect_str +
-                #     agent_config["mqtt_config"]["MQTT_PWD"] + connect_str +
-                #     str(agent_config["mqtt_config"]["MQTT_KEEPALIVE"]), "FEDML@9999GREAT")
-                # python_program = get_python_program()
-                # inference_gateway_process = multiprocessing.Process(
-                #     target=FedMLDeployMasterJobRunner.start_inference_gateway_server, args=(inference_gw_cmd,
-                #                                                                             inference_port, args)
-                # )
                 inference_gateway_process = ServerConstants.exec_console_with_script(f"{python_program} "
                                                                                      f"-m uvicorn {inference_gw_cmd} "
                                                                                      f"--host 0.0.0.0 "
@@ -510,21 +488,6 @@ def start_device_inference_gateway(
                                                                                      f"--log-level critical",
                                                                                      should_capture_stdout=False,
                                                                                      should_capture_stderr=False)
-                # inference_gateway_process = ServerConstants.exec_console_with_script(
-                #     "REDIS_ADDR=\"{}\" REDIS_PORT=\"{}\" REDIS_PASSWORD=\"{}\" "
-                #     "END_POINT_NAME=\"{}\" "
-                #     "MODEL_NAME=\"{}\" MODEL_VERSION=\"{}\" MODEL_INFER_URL=\"{}\" VERSION=\"{}\" "
-                #     "USE_MQTT_INFERENCE={} USE_WORKER_GATEWAY={} EXT_INFO={} "
-                #     "{} -m uvicorn {} --host 0.0.0.0 --port {} --reload --reload-delay 3 --reload-dir {} "
-                #     "--log-level critical".format(
-                #         redis_addr, str(redis_port), redis_password, "",
-                #         "", "", "", fedml.get_env_version(), use_mqtt_inference,
-                #         use_worker_gateway, ext_info, python_program, inference_gw_cmd, str(inference_port),
-                #         fedml_base_dir),
-                #     )
-                # inference_gateway_process.daemon = True
-                # inference_gateway_process.start()
-
                 return inference_gateway_process
             else:
                 return inference_gateway_pids[0]
@@ -562,7 +525,7 @@ def stop_device_inference_monitor(run_id, end_point_name, model_id, model_name,
                                                   model_id, model_name, model_version)
 
     @staticmethod
-    def recover_inference_and_monitor(args):
+    def recover_inference_and_monitor():
         # noinspection PyBroadException
         try:
             agent_config = dict()
@@ -589,7 +552,7 @@ def recover_inference_and_monitor(args):
                 if not is_activated:
                     continue
 
-                FedMLDeployMasterJobRunner.start_device_inference_gateway(args=args, inference_port=inference_port, agent_config=agent_config)
+                FedMLDeployMasterJobRunner.start_device_inference_gateway(inference_port=inference_port)
 
                 FedMLDeployMasterJobRunner.stop_device_inference_monitor(
                     run_id, end_point_name, model_id, model_name, model_version)
diff --git a/python/fedml/computing/scheduler/model_scheduler/master_job_runner_manager.py b/python/fedml/computing/scheduler/model_scheduler/master_job_runner_manager.py
index 02a6fde329..c761cd6d8f 100755
--- a/python/fedml/computing/scheduler/model_scheduler/master_job_runner_manager.py
+++ b/python/fedml/computing/scheduler/model_scheduler/master_job_runner_manager.py
@@ -55,8 +55,8 @@ def stop_device_inference_monitor(self, run_id, end_point_name, model_id, model_
                 run_id, end_point_name, model_id, model_name, model_version)
 
     @staticmethod
-    def recover_inference_and_monitor(args):
-        FedMLDeployMasterJobRunner.recover_inference_and_monitor(args=args)
+    def recover_inference_and_monitor():
+        FedMLDeployMasterJobRunner.recover_inference_and_monitor()
 
     @staticmethod
     def generate_request_json_with_replica_num_diff(run_id, edge_id, request_json):
diff --git a/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py b/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py
index d90857c0ab..668d1192ce 100755
--- a/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py
+++ b/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py
@@ -82,7 +82,7 @@ def _init_extra_items(self):
         except Exception as e:
             pass
 
-        FedMLDeployJobRunnerManager.recover_inference_and_monitor(args = self.args)
+        FedMLDeployJobRunnerManager.recover_inference_and_monitor()
 
     # Override
     def _process_connection_ready(self):
diff --git a/python/fedml/core/mlops/mlops_configs.py b/python/fedml/core/mlops/mlops_configs.py
index c1b46fd0dd..1ed2e0476d 100644
--- a/python/fedml/core/mlops/mlops_configs.py
+++ b/python/fedml/core/mlops/mlops_configs.py
@@ -156,7 +156,8 @@ def fetch_all_configs():
 
     @staticmethod
     def fetch_mqtt_config():
-        return MLOpsConfigs._fetch_configs({Configs.MQTT_CONFIG})
+        fetched_config = MLOpsConfigs._fetch_configs({Configs.MQTT_CONFIG})
+        return fetched_config[Configs.MQTT_CONFIG]
 
 
 if __name__ == "__main__":

From 0ad81ce9819ecf48ee678fd51d27b01a99741953 Mon Sep 17 00:00:00 2001
From: alaydshah <alay11shah@gmail.com>
Date: Wed, 5 Jun 2024 23:59:21 +0000
Subject: [PATCH 117/282] Nits

---
 .../model_scheduler/device_model_inference.py     |  2 +-
 .../device_mqtt_inference_protocol.py             |  1 -
 .../model_scheduler/master_job_runner.py          |  2 +-
 .../fedml/core/mlops/mlops_runtime_log_daemon.py  | 15 ++++++++++++---
 4 files changed, 14 insertions(+), 6 deletions(-)

diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py b/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py
index 3d9db78a23..111052faf1 100755
--- a/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py
@@ -442,7 +442,7 @@ def configure_logging():
 
 if __name__ == "__main__":
     import uvicorn
-
     port = 2203
     logging.basicConfig(level=logging.INFO)
+    configure_logging()
     uvicorn.run(api, host="0.0.0.0", port=port, log_level="info")
diff --git a/python/fedml/computing/scheduler/model_scheduler/device_mqtt_inference_protocol.py b/python/fedml/computing/scheduler/model_scheduler/device_mqtt_inference_protocol.py
index 1fac5a984b..9cd5c1e9a2 100755
--- a/python/fedml/computing/scheduler/model_scheduler/device_mqtt_inference_protocol.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_mqtt_inference_protocol.py
@@ -10,7 +10,6 @@
 
 import asyncio
 
-from ..comm_utils.constants import SchedulerConstants
 from ....core.distributed.communication.mqtt.mqtt_manager import MqttManager
 from .device_http_inference_protocol import FedMLHttpInference
 
diff --git a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
index d8a81e016f..a10bd2c559 100755
--- a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
+++ b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
@@ -485,7 +485,7 @@ def start_device_inference_gateway(inference_port=ServerConstants.MODEL_INFERENC
                                                                                      f"--port {str(inference_port)} "
                                                                                      f"--reload --reload-delay 3 "
                                                                                      f"--reload-dir {fedml_base_dir} "
-                                                                                     f"--log-level critical",
+                                                                                     f"--log-level info",
                                                                                      should_capture_stdout=False,
                                                                                      should_capture_stderr=False)
                 return inference_gateway_process
diff --git a/python/fedml/core/mlops/mlops_runtime_log_daemon.py b/python/fedml/core/mlops/mlops_runtime_log_daemon.py
index 64bd982ae3..7791d8f4e5 100644
--- a/python/fedml/core/mlops/mlops_runtime_log_daemon.py
+++ b/python/fedml/core/mlops/mlops_runtime_log_daemon.py
@@ -223,23 +223,32 @@ def __upload(self, log_upload_request) -> bool:
         if cert_path is not None:
             try:
                 requests.session().verify = cert_path
+                logging.info(f"FedMLDebug POST log to server. log_headers: {log_headers}, "
+                             f"log_upload_request: {log_upload_request}, url: {self.log_server_url}")
+
                 # logging.info(f"FedMLDebug POST log to server. run_id {run_id}, device_id {device_id}")
                 response = requests.post(
                     self.log_server_url, json=log_upload_request, verify=True, headers=log_headers
                 )
+                logging.info(f"FedMLDebug POST log to server. response: {response}")
                 # logging.info(f"FedMLDebug POST log to server run_id {run_id}, device_id {device_id}. response.status_code: {response.status_code}")
 
             except requests.exceptions.SSLError as err:
                 MLOpsConfigs.install_root_ca_file()
                 # logging.info(f"FedMLDebug POST log to server. run_id {run_id}, device_id {device_id}")
+                logging.info(f"FedMLDebug POST log to server. log_headers: {log_headers}, "
+                             f"log_upload_request: {log_upload_request}, url: {self.log_server_url}")
+
                 response = requests.post(
                     self.log_server_url, json=log_upload_request, verify=True, headers=log_headers
                 )
+                logging.info(f"FedMLDebug POST log to server. response: {response}")
                 # logging.info(f"FedMLDebug POST log to server run_id {run_id}, device_id {device_id}. response.status_code: {response.status_code}")
         else:
-            # logging.info(f"FedMLDebug POST log to server. run_id {run_id}, device_id {device_id}")
+            logging.info(f"FedMLDebug POST log to server. log_headers: {log_headers}, "
+                         f"log_upload_request: {log_upload_request}, url: {self.log_server_url}")
             response = requests.post(self.log_server_url, headers=log_headers, json=log_upload_request)
-            # logging.info(f"FedMLDebug POST log to server. run_id {run_id}, device_id {device_id}. response.status_code: {response.status_code}")
+            logging.info(f"FedMLDebug POST log to server. response: {response}")
         if response.status_code != 200:
             logging.error(f"Failed to upload log to server. run_id {self.run_id}, device_id {self.device_id}. "
                           f"response.status_code: {response.status_code}")
@@ -404,9 +413,9 @@ def __new__(cls, *args, **kwargs):
     def __init__(self, in_args):
         self.args = in_args
         self.edge_id = MLOpsLoggingUtils.get_edge_id_from_args(self.args)
+        url = fedml._get_backend_service()
         try:
             if self.args.log_server_url is None or self.args.log_server_url == "":
-                url = fedml._get_backend_service()
                 self.log_server_url = f"{url}/fedmlLogsServer/logs/update"
             else:
                 self.log_server_url = self.args.log_server_url

From d71387651e813e93865fd2e864cb685c874a813c Mon Sep 17 00:00:00 2001
From: alaydshah <alay11shah@gmail.com>
Date: Thu, 6 Jun 2024 04:48:58 +0000
Subject: [PATCH 118/282] Fix bugs

---
 .../model_scheduler/device_model_inference.py          | 10 ++++++----
 1 file changed, 6 insertions(+), 4 deletions(-)

diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py b/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py
index 111052faf1..57f9d80208 100755
--- a/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py
@@ -25,6 +25,7 @@
 
 
 class Settings:
+    server_name = "DEVICE_INFERENCE_GATEWAY"
     fedml.load_env()
     redis_addr = os.getenv(ModuleConstants.ENV_FEDML_INFER_REDIS_ADDR)
     redis_port = os.getenv(ModuleConstants.ENV_FEDML_INFER_REDIS_PORT)
@@ -425,7 +426,7 @@ def configure_logging():
     args = parser.parse_args([])
 
     setattr(args, "log_file_dir", ServerConstants.get_log_file_dir())
-    setattr(args, "run_id", "inference_gateway")
+    setattr(args, "run_id", -1)
     setattr(args, "role", "server")
     setattr(args, "using_mlops", True)
     setattr(args, "config_version", fedml.get_env_version())
@@ -433,10 +434,12 @@ def configure_logging():
     runner_info = ServerConstants.get_runner_infos()
     if not (runner_info and "edge_id" in runner_info):
         raise Exception("Inference gateway couldn't be started as edge_id couldn't be parsed from runner_infos.yaml")
-    setattr(args, "edge_id", runner_info.get("edge_id"))
+    setattr(args, "edge_id", int(runner_info.get("edge_id")))
 
     MLOpsRuntimeLog.get_instance(args).init_logs(log_level=logging.INFO)
-    MLOpsRuntimeLogDaemon.get_instance(args).start_log_processor(args.run_id, args.edge_id)
+    MLOpsRuntimeLogDaemon.get_instance(args).start_log_processor(log_run_id=args.run_id, log_device_id=args.edge_id,
+                                                                 log_source=Settings.server_name,
+                                                                 log_file_prefix=Settings.server_name)
     logging.info("start the log processor for inference gateway")
 
 
@@ -444,5 +447,4 @@ def configure_logging():
     import uvicorn
     port = 2203
     logging.basicConfig(level=logging.INFO)
-    configure_logging()
     uvicorn.run(api, host="0.0.0.0", port=port, log_level="info")

From 134f63e3adab71c976cfd012459fc7db6b744906 Mon Sep 17 00:00:00 2001
From: alaydshah <alay11shah@gmail.com>
Date: Thu, 6 Jun 2024 05:03:03 +0000
Subject: [PATCH 119/282] Remove info logging added for debugging

---
 .../fedml/core/mlops/mlops_runtime_log_daemon.py  | 15 ---------------
 1 file changed, 15 deletions(-)

diff --git a/python/fedml/core/mlops/mlops_runtime_log_daemon.py b/python/fedml/core/mlops/mlops_runtime_log_daemon.py
index 7791d8f4e5..ff06dc91b3 100644
--- a/python/fedml/core/mlops/mlops_runtime_log_daemon.py
+++ b/python/fedml/core/mlops/mlops_runtime_log_daemon.py
@@ -223,32 +223,17 @@ def __upload(self, log_upload_request) -> bool:
         if cert_path is not None:
             try:
                 requests.session().verify = cert_path
-                logging.info(f"FedMLDebug POST log to server. log_headers: {log_headers}, "
-                             f"log_upload_request: {log_upload_request}, url: {self.log_server_url}")
-
-                # logging.info(f"FedMLDebug POST log to server. run_id {run_id}, device_id {device_id}")
                 response = requests.post(
                     self.log_server_url, json=log_upload_request, verify=True, headers=log_headers
                 )
-                logging.info(f"FedMLDebug POST log to server. response: {response}")
-                # logging.info(f"FedMLDebug POST log to server run_id {run_id}, device_id {device_id}. response.status_code: {response.status_code}")
 
             except requests.exceptions.SSLError as err:
                 MLOpsConfigs.install_root_ca_file()
-                # logging.info(f"FedMLDebug POST log to server. run_id {run_id}, device_id {device_id}")
-                logging.info(f"FedMLDebug POST log to server. log_headers: {log_headers}, "
-                             f"log_upload_request: {log_upload_request}, url: {self.log_server_url}")
-
                 response = requests.post(
                     self.log_server_url, json=log_upload_request, verify=True, headers=log_headers
                 )
-                logging.info(f"FedMLDebug POST log to server. response: {response}")
-                # logging.info(f"FedMLDebug POST log to server run_id {run_id}, device_id {device_id}. response.status_code: {response.status_code}")
         else:
-            logging.info(f"FedMLDebug POST log to server. log_headers: {log_headers}, "
-                         f"log_upload_request: {log_upload_request}, url: {self.log_server_url}")
             response = requests.post(self.log_server_url, headers=log_headers, json=log_upload_request)
-            logging.info(f"FedMLDebug POST log to server. response: {response}")
         if response.status_code != 200:
             logging.error(f"Failed to upload log to server. run_id {self.run_id}, device_id {self.device_id}. "
                           f"response.status_code: {response.status_code}")

From fd446b0a1aed81b320da001295c3e44b232cbe4c Mon Sep 17 00:00:00 2001
From: alaydshah <alay11shah@gmail.com>
Date: Thu, 6 Jun 2024 05:49:01 +0000
Subject: [PATCH 120/282] Fix

---
 .../scheduler/model_scheduler/device_model_inference.py    | 7 ++++---
 1 file changed, 4 insertions(+), 3 deletions(-)

diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py b/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py
index 57f9d80208..7b3ac1d0bf 100755
--- a/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py
@@ -37,9 +37,10 @@ class Settings:
 
 api = FastAPI()
 
-FEDML_MODEL_CACHE = FedMLModelCache.get_instance().set_redis_params(redis_addr=Settings.redis_addr,
-                                                                    redis_port=Settings.redis_port,
-                                                                    redis_password=Settings.redis_password)
+FEDML_MODEL_CACHE = FedMLModelCache.get_instance()
+FEDML_MODEL_CACHE.set_redis_params(redis_addr=Settings.redis_addr,
+                                   redis_port=Settings.redis_port,
+                                   redis_password=Settings.redis_password)
 
 
 @api.middleware("http")

From 19abac107d31439ca1c3269b53414c92eff02c2f Mon Sep 17 00:00:00 2001
From: Alex <alex.gpt.llm@gmail.com>
Date: Thu, 6 Jun 2024 17:29:56 +0800
Subject: [PATCH 121/282] [CoreEngine] update the version and dependent libs.

---
 python/fedml/__init__.py | 2 +-
 python/setup.py          | 5 ++++-
 2 files changed, 5 insertions(+), 2 deletions(-)

diff --git a/python/fedml/__init__.py b/python/fedml/__init__.py
index c13a64566e..21da84c9ab 100644
--- a/python/fedml/__init__.py
+++ b/python/fedml/__init__.py
@@ -36,7 +36,7 @@
 _global_training_type = None
 _global_comm_backend = None
 
-__version__ = "0.8.31"
+__version__ = "0.9.0"
 
 
 # This is the deployment environment used for different roles (RD/PM/BD/Public Developers). Potential VALUE: local, dev, test, release
diff --git a/python/setup.py b/python/setup.py
index e88788d1ff..9651465d32 100644
--- a/python/setup.py
+++ b/python/setup.py
@@ -67,6 +67,9 @@ def finalize_options(self):
     # Need to pin this version due to breaking change released in python docker sdk
     'requests<2.32',
     'python-dotenv',
+    'protobuf>=3.20.2,<4.0dev',
+    'typer<0.10.0,>=0.3.0',
+    'fastapi-cli==0.0.1'
 ]
 
 requirements_extra_mpi = [
@@ -123,7 +126,7 @@ def finalize_options(self):
 
 setup(
     name="fedml",
-    version="0.8.31",
+    version="0.9.0",
     author="FedML Team",
     author_email="ch@fedml.ai",
     description="A research and production integrated edge-cloud library for "

From 27ad2e7a21cf3f98ffbd9443a4db6f85218bbb92 Mon Sep 17 00:00:00 2001
From: Alex <alex.gpt.llm@gmail.com>
Date: Thu, 6 Jun 2024 18:11:16 +0800
Subject: [PATCH 122/282] [CoreEngine] remove the deprecated files in the
 scheduler.

---
 .../master/server_runner_deprecated.py        | 2775 -----------------
 .../device_client_runner_deprecated.py        | 1483 ---------
 .../device_server_runner_deprecated.py        | 2022 ------------
 .../slave/client_runner_deprecated.py         | 1872 -----------
 4 files changed, 8152 deletions(-)
 delete mode 100755 python/fedml/computing/scheduler/master/server_runner_deprecated.py
 delete mode 100755 python/fedml/computing/scheduler/model_scheduler/device_client_runner_deprecated.py
 delete mode 100755 python/fedml/computing/scheduler/model_scheduler/device_server_runner_deprecated.py
 delete mode 100755 python/fedml/computing/scheduler/slave/client_runner_deprecated.py

diff --git a/python/fedml/computing/scheduler/master/server_runner_deprecated.py b/python/fedml/computing/scheduler/master/server_runner_deprecated.py
deleted file mode 100755
index 238349a3e4..0000000000
--- a/python/fedml/computing/scheduler/master/server_runner_deprecated.py
+++ /dev/null
@@ -1,2775 +0,0 @@
-import base64
-import copy
-import json
-import logging
-import platform
-import queue
-import sys
-
-import multiprocessing
-from multiprocessing import Process, Queue, Value, Array
-import os
-import shutil
-import stat
-import subprocess
-import threading
-
-import time
-import traceback
-import urllib
-import uuid
-import zipfile
-from os import listdir
-from urllib.parse import urljoin, urlparse
-
-import requests
-
-import fedml
-from ..comm_utils.job_cleanup import JobCleanup
-from ..scheduler_core.scheduler_matcher import SchedulerMatcher
-from ..comm_utils.constants import SchedulerConstants
-from ..comm_utils.job_utils import JobRunnerUtils
-from ..comm_utils.run_process_utils import RunProcessUtils
-from ....core.mlops.mlops_runtime_log import MLOpsRuntimeLog
-
-from ....core.distributed.communication.mqtt.mqtt_manager import MqttManager
-from ..comm_utils.yaml_utils import load_yaml_config
-from ..slave.client_constants import ClientConstants
-from ..master.server_constants import ServerConstants
-
-from ....core.mlops.mlops_metrics import MLOpsMetrics
-
-from ....core.mlops.mlops_configs import MLOpsConfigs
-from ....core.mlops.mlops_runtime_log_daemon import MLOpsRuntimeLogDaemon
-from ....core.mlops.mlops_status import MLOpsStatus
-from ..comm_utils.sys_utils import get_sys_runner_info, get_python_program
-from ..comm_utils import sys_utils
-from .server_data_interface import FedMLServerDataInterface
-from ....core.mlops.mlops_utils import MLOpsUtils
-from ..scheduler_entry.constants import Constants
-from ..model_scheduler.model_device_server import FedMLModelDeviceServerRunner
-from ..model_scheduler.device_model_cards import FedMLModelCards
-from ..model_scheduler import device_client_constants
-from ..scheduler_core.log_manager import LogsManager
-from ..scheduler_core.metrics_manager import MetricsManager
-from ..scheduler_core.master_api_daemon import MasterApiDaemon
-from fedml.utils.debugging import debug
-from ..scheduler_core.message_center import FedMLMessageCenter
-import ssl
-
-
-class RunnerError(Exception):
-    """ Runner stopped. """
-    pass
-
-
-class RunnerCompletedError(Exception):
-    """ Runner completed. """
-    pass
-
-
-class FedMLServerRunner(FedMLMessageCenter):
-    FEDML_CLOUD_SERVER_PREFIX = "fedml-server-run-"
-    debug_cloud_server = False
-
-    def __init__(self, args, run_id=0, request_json=None, agent_config=None, edge_id=0):
-        super().__init__()
-        self.master_api_daemon = None
-        self.run_stop_process = None
-        self.run_stop_process_map = dict()
-        self.run_edge_id_status_queue_map = dict()
-        self.run_metrics_queue_map = dict()
-        self.run_events_queue_map = dict()
-        self.run_artifacts_queue_map = dict()
-        self.run_logs_queue_map = dict()
-        self.async_check_timeout = 0
-        self.enable_async_cluster = False
-        self.origin_fedml_config_object = None
-        self.package_type = SchedulerConstants.JOB_PACKAGE_TYPE_DEFAULT
-        self.local_api_process = None
-        self.run_process_event = None
-        self.run_process_event_map = dict()
-        self.run_process_completed_event = None
-        self.run_process_completed_event_map = dict()
-        self.run_process_event_map_for_stop = dict()
-        self.edge_device_info_queue = None
-        self.run_edge_device_info_queue_map = dict()
-        self.run_edge_device_info_queue_map_for_stop = dict()
-        self.run_edge_device_info_global_queue = None
-        self.run_edge_device_info_global_queue_for_stop = None
-        self.run_process = None
-        self.run_process_map = dict()
-        self.start_request_json = None
-        self.server_docker_image = None
-        self.cloud_server_name = None
-        self.run_as_cloud_agent = False
-        self.run_as_cloud_server = False
-        self.run_as_edge_server_and_agent = False
-        self.run_as_cloud_server_and_agent = False
-        self.fedml_packages_base_dir = None
-        self.fedml_packages_unzip_dir = None
-        self.mqtt_mgr = None
-        self.running_request_json = dict()
-        self.run_id = run_id
-        self.unique_device_id = None
-        self.edge_id = edge_id
-        self.server_agent_id = 0
-        if request_json is not None:
-            self.server_agent_id = request_json.get("server_id", 0)
-        self.process = None
-        self.args = args
-        self.request_json = copy.deepcopy(request_json)
-        self.version = args.version
-        self.device_id = args.device_id
-        self.cur_dir = os.path.split(os.path.realpath(__file__))[0]
-        if args.current_running_dir is not None:
-            self.cur_dir = args.current_running_dir
-
-        image_version = self.version
-        if image_version == "local":
-            image_version = "dev"
-        self.server_docker_base_image = "/fedml-device-image:" + image_version
-
-        self.agent_config = agent_config
-        self.fedml_data_base_package_dir = os.path.join("/", "fedml", "data")
-        self.fedml_data_local_package_dir = os.path.join("/", "fedml", "fedml-package", "fedml", "data")
-        self.fedml_data_dir = self.fedml_data_base_package_dir
-        self.fedml_config_dir = os.path.join("/", "fedml", "conf")
-
-        self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES = {
-            "${FEDSYS.RUN_ID}": "",
-            "${FEDSYS.PRIVATE_LOCAL_DATA}": "",
-            "${FEDSYS.CLIENT_ID_LIST}": "",
-            "${FEDSYS.SYNTHETIC_DATA_URL}": "",
-            "${FEDSYS.IS_USING_LOCAL_DATA}": "",
-            "${FEDSYS.CLIENT_NUM}": "",
-            "${FEDSYS.CLIENT_INDEX}": "",
-            "${FEDSYS.CLIENT_OBJECT_LIST}": "",
-            "${FEDSYS.LOG_SERVER_URL}": "",
-        }
-
-        self.mlops_metrics = None
-        self.client_agent_active_list = dict()
-        self.server_active_list = dict()
-        self.run_status = None
-        self.ntp_offset = MLOpsUtils.get_ntp_offset()
-        self.runner_list = dict()
-        self.enable_simulation_cloud_agent = False
-        self.use_local_process_as_cloud_server = False
-
-        self.model_device_server = None
-        self.run_model_device_ids = dict()
-        self.run_edge_ids = dict()
-        self.master_api_process = None
-
-        self.subscribed_topics = list()
-        self.user_name = None
-        self.message_center = None
-
-    def build_dynamic_constrain_variables(self, run_id, run_config):
-        data_config = run_config.get("data_config", {})
-        server_edge_id_list = self.request_json["edgeids"]
-        is_using_local_data = 0
-        private_data_dir = data_config.get("privateLocalData", "")
-        synthetic_data_url = data_config.get("syntheticDataUrl", "")
-        edges = self.request_json["edges"]
-        # if private_data_dir is not None \
-        #         and len(str(private_data_dir).strip(' ')) > 0:
-        #     is_using_local_data = 1
-        if private_data_dir is None or len(str(private_data_dir).strip(" ")) <= 0:
-            params_config = run_config.get("parameters", None)
-            private_data_dir = ServerConstants.get_data_dir()
-        if synthetic_data_url is None or len(str(synthetic_data_url)) <= 0:
-            synthetic_data_url = private_data_dir
-
-        self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.RUN_ID}"] = run_id
-        self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.PRIVATE_LOCAL_DATA}"] = private_data_dir.replace(" ", "")
-        self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.CLIENT_ID_LIST}"] = str(server_edge_id_list).replace(" ", "")
-        self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.SYNTHETIC_DATA_URL}"] = synthetic_data_url.replace(" ", "")
-        self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.IS_USING_LOCAL_DATA}"] = str(is_using_local_data)
-        self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.CLIENT_NUM}"] = len(server_edge_id_list)
-        client_objects = str(json.dumps(edges))
-        client_objects = client_objects.replace(" ", "").replace("\n", "").replace('"', '\\"')
-        self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.CLIENT_OBJECT_LIST}"] = client_objects
-        self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.LOG_SERVER_URL}"] = self.agent_config["ml_ops_config"][
-            "LOG_SERVER_URL"
-        ]
-
-    def unzip_file(self, zip_file, unzip_file_path) -> str:
-        unziped_file_name = ""
-        if zipfile.is_zipfile(zip_file):
-            with zipfile.ZipFile(zip_file, "r") as zipf:
-                zipf.extractall(unzip_file_path)
-                unziped_file_name = zipf.namelist()[0]
-        else:
-            raise Exception("Invalid zip file {}".format(zip_file))
-
-        return unziped_file_name
-
-    def package_download_progress(self, count, blksize, filesize):
-        self.check_runner_stop_event()
-
-        downloaded = count * blksize
-        downloaded = filesize if downloaded > filesize else downloaded
-        progress = (downloaded / filesize * 100) if filesize != 0 else 0
-        progress_int = int(progress)
-        downloaded_kb = format(downloaded / 1024, '.2f')
-
-        # since this hook funtion is stateless, we need a state to avoid printing progress repeatly
-        if count == 0:
-            self.prev_download_progress = 0
-        if progress_int != self.prev_download_progress and progress_int % 5 == 0:
-            self.prev_download_progress = progress_int
-            logging.info("package downloaded size {} KB, progress {}%".format(downloaded_kb, progress_int))
-
-    def retrieve_and_unzip_package(self, package_name, package_url):
-        local_package_path = ServerConstants.get_package_download_dir()
-        os.makedirs(local_package_path, exist_ok=True)
-        filename, filename_without_extension, file_extension = ServerConstants.get_filename_and_extension(package_url)
-        local_package_file = os.path.join(local_package_path, f"fedml_run_{self.run_id}_{filename_without_extension}")
-        if os.path.exists(local_package_file):
-            os.remove(local_package_file)
-        ssl._create_default_https_context = ssl._create_unverified_context
-        urllib.request.urlretrieve(package_url, local_package_file,
-                                   reporthook=self.package_download_progress)
-        unzip_package_path = os.path.join(ClientConstants.get_package_unzip_dir(),
-                                          f"unzip_fedml_run_{self.run_id}_{filename_without_extension}")
-        try:
-            shutil.rmtree(unzip_package_path, ignore_errors=True)
-        except Exception as e:
-            pass
-
-        package_dir_name = self.unzip_file(local_package_file, unzip_package_path)  # Using unziped folder name
-        unzip_package_full_path = os.path.join(unzip_package_path, package_dir_name)
-
-        logging.info("local_package_file {}, unzip_package_path {}, unzip file full path {}".format(
-            local_package_file, unzip_package_path, unzip_package_full_path))
-
-        return unzip_package_full_path
-
-    def update_local_fedml_config(self, run_id, run_config):
-        packages_config = run_config["packages_config"]
-
-        # Copy config file from the client
-        server_package_name = packages_config.get("server", None)
-        server_package_url = packages_config.get("serverUrl", None)
-        unzip_package_path = self.retrieve_and_unzip_package(server_package_name, server_package_url)
-        self.fedml_packages_unzip_dir = unzip_package_path
-        fedml_local_config_file = os.path.join(unzip_package_path, "conf", "fedml.yaml")
-
-        # Load the above config to memory
-        config_from_container = load_yaml_config(fedml_local_config_file)
-        container_entry_file_config = config_from_container["entry_config"]
-        container_dynamic_args_config = config_from_container["dynamic_args"]
-        entry_file = container_entry_file_config["entry_file"]
-        conf_file = container_entry_file_config["conf_file"]
-        self.package_type = container_entry_file_config.get("package_type", SchedulerConstants.JOB_PACKAGE_TYPE_DEFAULT)
-        full_conf_path = os.path.join(unzip_package_path, "fedml", "config", os.path.basename(conf_file))
-
-        # Dynamically build constrain variable with realtime parameters from server
-        self.build_dynamic_constrain_variables(run_id, run_config)
-
-        # Update entry arguments value with constrain variable values with realtime parameters from server
-        # currently we support the following constrain variables:
-        # ${FEDSYS_RUN_ID}: a run id represented one entire Federated Learning flow
-        # ${FEDSYS_PRIVATE_LOCAL_DATA}: private local data path in the Federated Learning client
-        # ${FEDSYS_CLIENT_ID_LIST}: client list in one entire Federated Learning flow
-        # ${FEDSYS_SYNTHETIC_DATA_URL}: synthetic data url from server,
-        #                  if this value is not null, the client will download data from this URL to use it as
-        #                  federated training data set
-        # ${FEDSYS_IS_USING_LOCAL_DATA}: whether use private local data as federated training data set
-        # container_dynamic_args_config["data_cache_dir"] = "${FEDSYS.PRIVATE_LOCAL_DATA}"
-        for constrain_variable_key, constrain_variable_value in self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES.items():
-            for argument_key, argument_value in container_dynamic_args_config.items():
-                if argument_value is not None and str(argument_value).find(constrain_variable_key) == 0:
-                    replaced_argument_value = str(argument_value).replace(
-                        constrain_variable_key, str(constrain_variable_value)
-                    )
-                    container_dynamic_args_config[argument_key] = replaced_argument_value
-
-        # Merge all container new config sections as new config dictionary
-        package_conf_object = dict()
-        package_conf_object["entry_config"] = container_entry_file_config
-        package_conf_object["dynamic_args"] = container_dynamic_args_config
-        package_conf_object["dynamic_args"]["config_version"] = self.args.config_version
-        container_dynamic_args_config["mqtt_config_path"] = os.path.join(
-            unzip_package_path, "fedml", "config", os.path.basename(container_dynamic_args_config["mqtt_config_path"])
-        )
-        container_dynamic_args_config["s3_config_path"] = os.path.join(
-            unzip_package_path, "fedml", "config", os.path.basename(container_dynamic_args_config["s3_config_path"])
-        )
-        log_file_dir = ServerConstants.get_log_file_dir()
-        os.makedirs(log_file_dir, exist_ok=True)
-        package_conf_object["dynamic_args"]["log_file_dir"] = log_file_dir
-
-        # Save new config dictionary to local file
-        fedml_updated_config_file = os.path.join(unzip_package_path, "conf", "fedml.yaml")
-        ServerConstants.generate_yaml_doc(package_conf_object, fedml_updated_config_file)
-
-        # Build dynamic arguments and set arguments to fedml config object
-        if not self.build_dynamic_args(run_id, run_config, package_conf_object, unzip_package_path):
-            return None, None
-
-        return unzip_package_path, package_conf_object
-
-    def build_dynamic_args(self, run_id, run_config, package_conf_object, base_dir):
-        fedml_conf_file = package_conf_object["entry_config"]["conf_file"]
-        fedml_conf_file_processed = str(fedml_conf_file).replace('\\', os.sep).replace('/', os.sep)
-        fedml_conf_path = os.path.join(base_dir, "fedml", "config",
-                                       os.path.basename(fedml_conf_file_processed))
-        fedml_conf_object = load_yaml_config(fedml_conf_path)
-        self.origin_fedml_config_object = fedml_conf_object.copy()
-        run_params = run_config.get("parameters", {})
-        job_yaml = run_params.get("job_yaml", {})
-
-        # Replace local fedml config objects with parameters from MLOps web
-        parameters_object = run_config.get("parameters", None)
-        if parameters_object is not None:
-            for config_k, config_v in fedml_conf_object.items():
-                parameter_v = parameters_object.get(config_k, None)
-                if parameter_v is not None:
-                    fedml_conf_object[config_k] = parameter_v
-                    parameters_object.pop(config_k)
-
-            for config_k, config_v in parameters_object.items():
-                fedml_conf_object[config_k] = config_v
-
-        package_dynamic_args = package_conf_object["dynamic_args"]
-        if fedml_conf_object.get("comm_args", None) is not None:
-            fedml_conf_object["comm_args"]["mqtt_config_path"] = package_dynamic_args["mqtt_config_path"]
-            fedml_conf_object["comm_args"]["s3_config_path"] = package_dynamic_args["s3_config_path"]
-            fedml_conf_object["common_args"]["using_mlops"] = True
-        if fedml_conf_object.get("train_args", None) is not None:
-            fedml_conf_object["train_args"]["run_id"] = package_dynamic_args["run_id"]
-            fedml_conf_object["train_args"]["client_id_list"] = package_dynamic_args["client_id_list"]
-            fedml_conf_object["train_args"]["client_num_in_total"] = int(package_dynamic_args["client_num_in_total"])
-            fedml_conf_object["train_args"]["client_num_per_round"] = int(package_dynamic_args["client_num_in_total"])
-            fedml_conf_object["train_args"]["server_id"] = self.edge_id
-            fedml_conf_object["train_args"]["server_agent_id"] = self.request_json.get("cloud_agent_id", self.edge_id)
-            fedml_conf_object["train_args"]["group_server_id_list"] = self.request_json.get("group_server_id_list",
-                                                                                            list())
-        if fedml_conf_object.get("device_args", None) is not None:
-            fedml_conf_object["device_args"]["worker_num"] = int(package_dynamic_args["client_num_in_total"])
-        # fedml_conf_object["data_args"]["data_cache_dir"] = package_dynamic_args["data_cache_dir"]
-        if fedml_conf_object.get("tracking_args", None) is not None:
-            fedml_conf_object["tracking_args"]["log_file_dir"] = package_dynamic_args["log_file_dir"]
-            fedml_conf_object["tracking_args"]["log_server_url"] = package_dynamic_args["log_server_url"]
-
-        bootstrap_script_path = None
-        env_args = fedml_conf_object.get("environment_args", None)
-        if env_args is not None:
-            bootstrap_script_file = env_args.get("bootstrap", None)
-            if bootstrap_script_file is not None:
-                bootstrap_script_file = str(bootstrap_script_file).replace('\\', os.sep).replace('/', os.sep)
-                if platform.system() == 'Windows':
-                    bootstrap_script_file = bootstrap_script_file.rstrip('.sh') + '.bat'
-                if bootstrap_script_file is not None:
-                    bootstrap_script_dir = os.path.join(base_dir, "fedml", os.path.dirname(bootstrap_script_file))
-                    bootstrap_script_path = os.path.join(
-                        bootstrap_script_dir, bootstrap_script_dir, os.path.basename(bootstrap_script_file)
-                    )
-        # try:
-        #     os.makedirs(package_dynamic_args["data_cache_dir"], exist_ok=True)
-        # except Exception as e:
-        #     pass
-        fedml_conf_object["dynamic_args"] = package_dynamic_args
-
-        ServerConstants.generate_yaml_doc(fedml_conf_object, fedml_conf_path)
-
-        is_bootstrap_run_ok = True
-        try:
-            if bootstrap_script_path is not None:
-                if os.path.exists(bootstrap_script_path):
-                    bootstrap_stat = os.stat(bootstrap_script_path)
-                    if platform.system() == 'Windows':
-                        os.chmod(bootstrap_script_path,
-                                 bootstrap_stat.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
-                        bootstrap_scripts = "{}".format(bootstrap_script_path)
-                    else:
-                        os.chmod(bootstrap_script_path,
-                                 bootstrap_stat.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
-                        bootstrap_scripts = "cd {}; ./{}".format(bootstrap_script_dir,
-                                                                 os.path.basename(bootstrap_script_file))
-                    bootstrap_scripts = str(bootstrap_scripts).replace('\\', os.sep).replace('/', os.sep)
-                    logging.info("Bootstrap scripts are being executed...")
-                    shell_cmd_list = list()
-                    shell_cmd_list.append(bootstrap_scripts)
-                    process, error_list = ServerConstants.execute_commands_with_live_logs(
-                        shell_cmd_list, callback=self.callback_run_bootstrap)
-
-                    ret_code, out, err = process.returncode, None, None
-                    if ret_code is None or ret_code <= 0:
-                        if error_list is not None and len(error_list) > 0:
-                            is_bootstrap_run_ok = False
-                        else:
-                            if out is not None:
-                                out_str = sys_utils.decode_our_err_result(out)
-                                if out_str != "":
-                                    logging.info("{}".format(out_str))
-
-                            sys_utils.log_return_info(bootstrap_script_file, 0)
-
-                            is_bootstrap_run_ok = True
-                    else:
-                        if err is not None:
-                            err_str = sys_utils.decode_our_err_result(err)
-                            if err_str != "":
-                                logging.error("{}".format(err_str))
-
-                        sys_utils.log_return_info(bootstrap_script_file, ret_code)
-
-                        is_bootstrap_run_ok = False
-        except Exception as e:
-            logging.error("Bootstrap scripts error: {}".format(traceback.format_exc()))
-
-            is_bootstrap_run_ok = False
-
-        return is_bootstrap_run_ok
-
-    def callback_run_bootstrap(self, job_pid):
-        ServerConstants.save_bootstrap_process(self.run_id, job_pid)
-
-    @debug
-    def run(
-            self, process_event, completed_event, edge_id_status_queue=None,
-            edge_device_info_queue=None, run_metrics_queue=None,
-            run_event_queue=None, run_artifacts_queue=None, run_logs_queue=None,
-            message_center_queue=None, edge_device_info_global_queue=None
-    ):
-        print(f"Server runner process id {os.getpid()}, run id {self.run_id}")
-
-        if platform.system() != "Windows":
-            os.setsid()
-
-        os.environ['PYTHONWARNINGS'] = 'ignore:semaphore_tracker:UserWarning'
-        os.environ.setdefault('PYTHONWARNINGS', 'ignore:semaphore_tracker:UserWarning')
-
-        self.run_process_event = process_event
-        self.run_process_completed_event = completed_event
-        try:
-            MLOpsUtils.set_ntp_offset(self.ntp_offset)
-
-            self.rebuild_message_center(message_center_queue)
-
-            self.run_impl(edge_id_status_queue, edge_device_info_queue, run_metrics_queue,
-                          run_event_queue, run_artifacts_queue, run_logs_queue, edge_device_info_global_queue)
-        except RunnerError:
-            logging.info("Runner stopped.")
-            self.mlops_metrics.report_server_id_status(
-                self.run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_KILLED, edge_id=self.edge_id,
-                server_id=self.edge_id, server_agent_id=self.edge_id)
-        except RunnerCompletedError:
-            logging.info("Runner completed.")
-        except Exception as e:
-            logging.error("Runner exits with exceptions. {}".format(traceback.format_exc()))
-            self.mlops_metrics.report_server_id_status(
-                self.run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED, edge_id=self.edge_id,
-                server_id=self.edge_id, server_agent_id=self.edge_id)
-        finally:
-            logging.info("Release resources.")
-            self._process_run_metrics_queue(run_metrics_queue)
-            self._process_run_logs_queue(run_logs_queue)
-            MLOpsRuntimeLogDaemon.get_instance(self.args).stop_log_processor(self.run_id, self.edge_id)
-            if self.mlops_metrics is not None:
-                self.mlops_metrics.stop_sys_perf()
-            time.sleep(3)
-            ServerConstants.cleanup_run_process(self.run_id)
-            ServerConstants.cleanup_learning_process(self.run_id)
-            ServerConstants.cleanup_bootstrap_process(self.run_id)
-
-    def check_runner_stop_event(self):
-        if self.run_process_event is not None and self.run_process_event.is_set():
-            logging.info("Received stopping event.")
-            raise RunnerError("Runner stopped")
-
-        if self.run_process_completed_event is not None and self.run_process_completed_event.is_set():
-            logging.info("Received completed event.")
-            raise RunnerCompletedError("Runner completed")
-
-    def deploy_model(self, serving_devices, request_json, run_id):
-        run_config = request_json["run_config"]
-        run_params = run_config.get("parameters", {})
-        job_yaml = run_params.get("job_yaml", {})
-        job_type = job_yaml.get("job_type", None)
-        job_type = job_yaml.get("task_type", Constants.JOB_TASK_TYPE_TRAIN) if job_type is None else job_type
-        if job_type == Constants.JOB_TASK_TYPE_DEPLOY or job_type == Constants.JOB_TASK_TYPE_SERVE:
-            computing = job_yaml.get("computing", {})
-            num_gpus = computing.get("minimum_num_gpus", 1)
-            serving_args = run_params.get("serving_args", {})
-            model_id = serving_args.get("model_id", None)
-            model_name = serving_args.get("model_name", None)
-            model_version = serving_args.get("model_version", None)
-            model_storage_url = serving_args.get("model_storage_url", None)
-            endpoint_name = serving_args.get("endpoint_name", None)
-            endpoint_id = serving_args.get("endpoint_id", None)
-            random = serving_args.get("random", "")
-            random_out = sys_utils.random2(random, "FEDML@9999GREAT")
-            random_list = random_out.split("FEDML@")
-            device_type = device_client_constants.ClientConstants.login_role_list[
-                device_client_constants.ClientConstants.LOGIN_MODE_FEDML_CLOUD_INDEX]
-            FedMLModelCards.get_instance().deploy_model(
-                model_name, device_type, json.dumps(serving_devices),
-                "", random_list[1], None,
-                in_model_id=model_id, in_model_version=model_version,
-                endpoint_name=endpoint_name, endpoint_id=endpoint_id, run_id=run_id)
-
-    @debug
-    def run_impl(
-            self, edge_id_status_queue, edge_device_info_queue, run_metrics_queue,
-            run_event_queue, run_artifacts_queue, run_logs_queue, edge_device_info_global_queue
-    ):
-        run_id = self.request_json["runId"]
-        run_config = self.request_json["run_config"]
-        data_config = run_config["data_config"]
-        edge_ids = self.request_json["edgeids"]
-
-        self.check_runner_stop_event()
-
-        self.run_id = run_id
-        self.args.run_id = self.run_id
-        MLOpsRuntimeLog.get_instance(self.args).init_logs(log_level=logging.INFO)
-
-        # report server running status
-        self.mlops_metrics.report_server_id_status(
-            run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_STARTING, edge_id=self.edge_id,
-            server_id=self.edge_id, server_agent_id=self.edge_id)
-
-        logging.info("Detect all status of Edge ids: " + str(edge_ids))
-
-        status_ok, active_edge_info_dict, inactivate_edges = self.detect_edges_status(
-            edge_device_info_queue, edge_device_info_global_queue=edge_device_info_global_queue,
-            callback_when_edges_ready=self.send_training_request_to_edges)
-        logging.info(f"Status OK: {status_ok}, Active edge info dict: {active_edge_info_dict}, "
-                     f"inactivate edges: {inactivate_edges}")
-        if not status_ok:
-            logging.error(f"Status of edge device is not OK. Active edge info dict: {active_edge_info_dict}, "
-                          f"Inactivate edges: {inactivate_edges}")
-            return
-
-        if not self.should_continue_run_job(run_id):
-            if FedMLServerRunner.debug_cloud_server:
-                while True:
-                    time.sleep(30)
-            # Check if the run status is normal
-            self.aggregate_run_status_metrics_logs(
-                run_id, edge_ids, edge_id_status_queue, edge_device_info_queue,
-                edge_device_info_global_queue,
-                run_metrics_queue, run_logs_queue)
-            return
-
-        # Start the server job
-        self._start_runner_process(run_id, self.request_json, is_server_job=True)
-
-        # Check if the run status is normal
-        self.aggregate_run_status_metrics_logs(
-            run_id, edge_ids, edge_id_status_queue, edge_device_info_queue,
-            edge_device_info_global_queue,
-            run_metrics_queue, run_logs_queue)
-
-    def aggregate_run_status_metrics_logs(
-            self, run_id, edge_id_list, edge_id_status_queue, edge_device_info_queue,
-            edge_device_info_global_queue, run_metrics_queue, run_logs_queue):
-        total_sleep_seconds = 0
-        sleep_seconds = 3
-        allowed_status_check_sleep_seconds = 60 * 25
-        server_id = self.edge_id
-        normal_response_status_list = [
-            ClientConstants.MSG_MLOPS_CLIENT_STATUS_OFFLINE, ClientConstants.MSG_MLOPS_CLIENT_STATUS_IDLE,
-            ClientConstants.MSG_MLOPS_CLIENT_STATUS_TRAINING, ClientConstants.MSG_MLOPS_CLIENT_STATUS_KILLED,
-            ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FINISHED,
-            ClientConstants.MSG_MLOPS_CLIENT_STATUS_EXCEPTION, ClientConstants.MSG_MLOPS_CLIENT_STATUS_RUNNING
-        ]
-        edges_id_status_timeout_map = dict()
-        number_of_failed_edges = 0
-        number_of_finished_edges = 0
-        number_of_killed_edges = 0
-        running_edges_list = list()
-        inactivate_edge_list = list()
-        current_edge_id_status_map = dict()
-
-        while True:
-            self.check_runner_stop_event()
-
-            # Process run metrics
-            self._process_run_metrics_queue(run_metrics_queue)
-
-            # Process run logs
-            self._process_run_logs_queue(run_logs_queue)
-
-            # Fetch edge id and status from the edge id status queue
-            while True:
-                try:
-                    queue_item = edge_id_status_queue.get(block=False, timeout=3)
-                    if queue_item is not None:
-                        current_edge_id_status_map.update(queue_item)
-                except queue.Empty as e:  # If queue is empty, then break loop
-                    break
-
-            # Calc the total completed device number
-            server_id = current_edge_id_status_map.get("server", 0)
-            running_edges_list.clear()
-            number_of_failed_edges = 0
-            number_of_finished_edges = 0
-            number_of_killed_edges = 0
-            for edge_id_item, status_item in current_edge_id_status_map.items():
-                if edge_id_item == "server":
-                    continue
-
-                if status_item is None or status_item == ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED or \
-                        status_item == ServerConstants.MSG_MLOPS_SERVER_STATUS_EXCEPTION:
-                    number_of_failed_edges += 1
-                    continue
-
-                if status_item == ServerConstants.MSG_MLOPS_SERVER_STATUS_FINISHED:
-                    number_of_finished_edges += 1
-                    continue
-
-                if status_item == ServerConstants.MSG_MLOPS_SERVER_STATUS_KILLED:
-                    number_of_killed_edges += 1
-                    continue
-
-                if status_item == ServerConstants.MSG_MLOPS_SERVER_STATUS_IDLE or \
-                        status_item == ServerConstants.MSG_MLOPS_SERVER_STATUS_OFFLINE:
-                    continue
-
-                running_edges_list.append(edge_id_item)
-
-            # Process the no response edges and accumulate the counter.
-            for edge_id_item in edge_id_list:
-                status_dict = edges_id_status_timeout_map.get(str(edge_id_item))
-                status_item = current_edge_id_status_map.get(str(edge_id_item))
-                if status_item is None:
-                    continue
-                if status_dict is None:
-                    status_dict = {"status": status_item, "count": 0}
-                else:
-                    if status_item in normal_response_status_list:
-                        status_dict["count"] = 0
-                    else:
-                        status_dict["count"] += 1
-                edges_id_status_timeout_map[str(edge_id_item)] = status_dict
-
-            # If the completed device number is equal total device number, then break
-            if len(running_edges_list) <= 0 and len(current_edge_id_status_map.keys()) == len(edge_id_list) + 1:
-                break
-
-            # Calc the timeout value to wait to device killed.
-            self.check_runner_stop_event()
-            time.sleep(sleep_seconds)
-            total_sleep_seconds += sleep_seconds
-            no_response_edge_ids = list()
-            for no_res_edge, no_res_status in edges_id_status_timeout_map.items():
-                if no_res_status.get("count") * sleep_seconds > allowed_status_check_sleep_seconds:
-                    no_response_edge_ids.append(no_res_edge)
-
-            # If timeout, then report killed device status
-            if len(no_response_edge_ids) > 0:
-                for edge_id_item in no_response_edge_ids:
-                    self.mlops_metrics.report_client_id_status(
-                        edge_id_item, ClientConstants.MSG_MLOPS_CLIENT_STATUS_KILLED,
-                        server_id=self.edge_id, run_id=self.run_id)
-
-            # Check if we can get the response device info from edge devices
-            # and set the inactive edges to killed status.
-            self.check_runner_stop_event()
-            given_edge_ids = list(set(edge_id_list) - set(inactivate_edge_list))
-            status_ok, active_edge_info_dict, inactivate_edges = self.detect_edges_status(
-                edge_device_info_queue, edge_device_info_global_queue=edge_device_info_global_queue,
-                need_to_trigger_exception=False, status_timeout=60,
-                given_edge_ids=given_edge_ids, callback_when_detecting=self.callback_when_detecting_on_aggregation,
-                args_for_callback_when_detecting=(run_metrics_queue, run_logs_queue)
-            )
-            if not status_ok:
-                inactivate_edge_list.extend(inactivate_edges)
-                for edge_id_item in inactivate_edges:
-                    self.mlops_metrics.report_client_id_status(
-                        edge_id_item, ClientConstants.MSG_MLOPS_CLIENT_STATUS_OFFLINE,
-                        server_id=self.edge_id, run_id=self.run_id)
-
-        # Calc the final run status based on the completed device numbers and fault tolerance parameters.
-        enable_fault_tolerance, fault_tolerance_rate = self.parse_fault_tolerance_params(run_id)
-        running_edges_list = list(set(running_edges_list))
-        status_to_report = self.calculate_server_status(
-            run_id, len(edge_id_list), number_of_failed_edges, number_of_finished_edges,
-            number_of_killed_edges, running_edges_list, enable_fault_tolerance=enable_fault_tolerance,
-            fault_tolerance_rate=fault_tolerance_rate)
-        if status_to_report is not None:
-            logging.info(
-                f"Run completed when aggregating status, metrics and logs, will report status {status_to_report}")
-            self.mlops_metrics.report_server_id_status(
-                self.run_id, status_to_report, edge_id=self.edge_id,
-                server_id=self.edge_id, server_agent_id=self.edge_id)
-
-    def callback_when_detecting_on_aggregation(self, detecting_args):
-        # Process run metrics
-        self._process_run_metrics_queue(detecting_args[0])
-
-        # Process run logs
-        self._process_run_logs_queue(detecting_args[1])
-
-    def _process_run_metrics_queue(self, run_metrics_queue):
-        # Fetch metrics from the run metrics queue
-        while True:
-            try:
-                metrics_item = run_metrics_queue.get(block=False, timeout=3)
-                MetricsManager.get_instance().save_metrics(metrics_item)
-                metric_json = json.loads(metrics_item)
-                if metric_json.get("is_endpoint", False):
-                    metric_json().pop("is_endpoint")
-                    self.mlops_metrics.report_endpoint_metric({}, payload=json.dumps(metric_json))
-                else:
-                    self.mlops_metrics.report_server_training_metric({}, payload=metrics_item)
-            except queue.Empty as e:  # If queue is empty, then break loop
-                break
-
-    def _process_run_logs_queue(self, run_logs_queue):
-        # Fetch logs from the run logs queue
-        while True:
-            try:
-                logs_item = run_logs_queue.get(block=False, timeout=3)
-                LogsManager.save_logs(logs_item)
-            except queue.Empty as e:  # If queue is empty, then break loop
-                break
-
-    def run_server_job_impl(self, process_event, completed_event, edge_id_status_queue=None,
-                            edge_device_info_queue=None, run_metrics_queue=None,
-                            run_event_queue=None, run_artifacts_queue=None, run_logs_queue=None,
-                            message_center_queue=None, edge_device_info_global_queue=None):
-        print(f"Server runner process id {os.getpid()}, run id {self.run_id}")
-
-        if platform.system() != "Windows":
-            os.setsid()
-
-        os.environ['PYTHONWARNINGS'] = 'ignore:semaphore_tracker:UserWarning'
-        os.environ.setdefault('PYTHONWARNINGS', 'ignore:semaphore_tracker:UserWarning')
-
-        self.run_process_event = process_event
-        self.run_process_completed_event = completed_event
-
-        MLOpsUtils.set_ntp_offset(self.ntp_offset)
-
-        self.rebuild_message_center(message_center_queue)
-
-        run_id = self.request_json["runId"]
-        run_config = self.request_json["run_config"]
-        data_config = run_config["data_config"]
-        edge_ids = self.request_json["edgeids"]
-
-        self.check_runner_stop_event()
-
-        # get training params
-        private_local_data_dir = data_config.get("privateLocalData", "")
-        is_using_local_data = 0
-        # if private_local_data_dir is not None and len(str(private_local_data_dir).strip(' ')) > 0:
-        #     is_using_local_data = 1
-
-        # start a run according to the hyper-parameters
-        # fedml_local_data_dir = self.cur_dir + "/fedml_data/run_" + run_id_str + "_edge_" + str(edge_id)
-        fedml_local_data_dir = os.path.join(self.cur_dir, "fedml_data")
-        fedml_local_config_dir = os.path.join(self.cur_dir, "fedml_config")
-        if is_using_local_data:
-            fedml_local_data_dir = private_local_data_dir
-        self.fedml_data_dir = self.fedml_data_local_package_dir
-
-        self.check_runner_stop_event()
-
-        logging.info("download packages and run the bootstrap script...")
-
-        # update local config with real time parameters from server and dynamically replace variables value
-        unzip_package_path, fedml_config_object = self.update_local_fedml_config(run_id, run_config)
-        if unzip_package_path is None or fedml_config_object is None:
-            logging.info("failed to update local fedml config.")
-            self.check_runner_stop_event()
-            self.cleanup_run_when_starting_failed()
-            self.send_training_stop_request_to_edges_when_exception(edge_ids, payload=self.start_request_json,
-                                                                    run_id=run_id)
-            return
-
-        logging.info("cleanup the previous aggregation process and check downloaded packages...")
-
-        entry_file_config = fedml_config_object["entry_config"]
-        dynamic_args_config = fedml_config_object["dynamic_args"]
-        entry_file = str(entry_file_config["entry_file"]).replace('\\', os.sep).replace('/', os.sep)
-        entry_file = os.path.basename(entry_file)
-        conf_file = entry_file_config["conf_file"]
-        conf_file = str(conf_file).replace('\\', os.sep).replace('/', os.sep)
-        ServerConstants.cleanup_learning_process(run_id)
-        self.check_runner_stop_event()
-        if not os.path.exists(unzip_package_path):
-            logging.info("failed to unzip file.")
-            self.check_runner_stop_event()
-            self.cleanup_run_when_starting_failed()
-            self.send_training_stop_request_to_edges_when_exception(edge_ids, payload=self.start_request_json,
-                                                                    run_id=run_id)
-            return
-        os.chdir(os.path.join(unzip_package_path, "fedml"))
-
-        self.check_runner_stop_event()
-
-        logging.info("starting the server user process...")
-
-        entry_file_full_path = os.path.join(unzip_package_path, "fedml", entry_file)
-        conf_file_full_path = os.path.join(unzip_package_path, "fedml", conf_file)
-        logging.info("                          ")
-        logging.info("                          ")
-        logging.info("====Your Run Logs Begin===")
-        process, is_launch_task, error_list = self.execute_job_task(entry_file_full_path, conf_file_full_path, run_id)
-        logging.info("====Your Run Logs End===")
-        logging.info("                        ")
-        logging.info("                        ")
-
-        ret_code, out, err = process.returncode, None, None
-        is_run_ok = sys_utils.is_runner_finished_normally(process.pid)
-        if is_launch_task:
-            is_run_ok = True
-        if error_list is not None and len(error_list) > 0:
-            is_run_ok = False
-        if ret_code is None or ret_code <= 0:
-            self.check_runner_stop_event()
-
-            if is_run_ok:
-                if out is not None:
-                    out_str = sys_utils.decode_our_err_result(out)
-                    if out_str != "":
-                        logging.info("{}".format(out_str))
-
-                self.mlops_metrics.report_server_id_status(
-                    run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_FINISHED, edge_id=self.edge_id,
-                    server_id=self.edge_id, server_agent_id=self.edge_id)
-
-                if is_launch_task:
-                    sys_utils.log_return_info(f"job {run_id}", 0)
-                else:
-                    sys_utils.log_return_info(entry_file, 0)
-        else:
-            is_run_ok = False
-
-        if not is_run_ok:
-            # If the run status is killed or finished, then return with the normal state.
-            current_job = FedMLServerDataInterface.get_instance().get_job_by_id(run_id)
-            if current_job is not None and (current_job.status == ServerConstants.MSG_MLOPS_SERVER_STATUS_FINISHED or
-                                            current_job.status == ServerConstants.MSG_MLOPS_SERVER_STATUS_KILLED):
-                return
-
-            self.check_runner_stop_event()
-
-            logging.error("failed to run the aggregation process...")
-
-            if err is not None:
-                err_str = sys_utils.decode_our_err_result(err)
-                if err_str != "":
-                    logging.error("{}".format(err_str))
-
-            if is_launch_task:
-                sys_utils.log_return_info(f"job {run_id}", ret_code)
-            else:
-                sys_utils.log_return_info(entry_file, ret_code)
-
-            self.send_training_stop_request_to_edges_when_exception(edge_ids, run_id=run_id)
-
-    def init_job_task(self, request_json):
-        run_id = request_json["runId"]
-        run_config = request_json["run_config"]
-        edge_ids = request_json["edgeids"]
-        run_params = run_config.get("parameters", {})
-        job_yaml = run_params.get("job_yaml", None)
-        server_id = request_json["server_id"]
-        if self.run_as_cloud_agent:
-            server_id = self.edge_id
-
-        self.setup_listeners_for_edge_status(run_id, edge_ids, server_id)
-        self.setup_listener_for_run_metrics(run_id)
-        self.setup_listener_for_run_logs(run_id)
-
-    def should_continue_run_job(self, run_id):
-        run_config = self.request_json["run_config"]
-        run_params = run_config.get("parameters", {})
-        job_yaml = run_params.get("job_yaml", {})
-        job_yaml_default_none = run_params.get("job_yaml", None)
-        framework_type = job_yaml.get("framework_type", None)
-        job_type = job_yaml.get("job_type", None)
-        job_type = job_yaml.get("task_type", Constants.JOB_TASK_TYPE_TRAIN) if job_type is None else job_type
-        if job_yaml_default_none is not None:
-            if job_type == Constants.JOB_TASK_TYPE_FEDERATE:
-                return True
-
-            if framework_type is None or framework_type != Constants.JOB_FRAMEWORK_TYPE_FEDML:
-                self.mlops_metrics.report_server_id_status(
-                    run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_RUNNING, edge_id=self.edge_id,
-                    server_id=self.edge_id, server_agent_id=self.edge_id)
-                return False
-
-        return True
-
-    def execute_job_task(self, entry_file_full_path, conf_file_full_path, run_id):
-        run_config = self.request_json["run_config"]
-        run_params = run_config.get("parameters", {})
-        job_yaml = run_params.get("job_yaml", {})
-        job_yaml_default_none = run_params.get("job_yaml", None)
-        job_api_key = job_yaml.get("run_api_key", None)
-        job_api_key = job_yaml.get("fedml_run_dynamic_params", None) if job_api_key is None else job_api_key
-        assigned_gpu_ids = run_params.get("gpu_ids", None)
-        framework_type = job_yaml.get("framework_type", None)
-        job_type = job_yaml.get("job_type", None)
-        job_type = job_yaml.get("task_type", Constants.JOB_TASK_TYPE_TRAIN) if job_type is None else job_type
-        conf_file_object = load_yaml_config(conf_file_full_path)
-        entry_args_dict = conf_file_object.get("fedml_entry_args", {})
-        entry_args = entry_args_dict.get("arg_items", None)
-
-        executable_interpreter = ClientConstants.CLIENT_SHELL_PS \
-            if platform.system() == ClientConstants.PLATFORM_WINDOWS else ClientConstants.CLIENT_SHELL_BASH
-
-        if job_yaml_default_none is None:
-            # Generate the job executing commands for previous federated learning (Compatibility)
-            python_program = get_python_program()
-            logging.info("Run the server: {} {} --cf {} --rank 0 --role server".format(
-                python_program, entry_file_full_path, conf_file_full_path))
-            entry_command = f"{python_program} {entry_file_full_path} --cf " \
-                            f"{conf_file_full_path} --rank 0 --role server"
-            shell_cmd_list = [entry_command]
-
-            # Run the job executing commands for previous federated learning (Compatibility)
-            process, error_list = ClientConstants.execute_commands_with_live_logs(
-                shell_cmd_list, callback=self.callback_start_fl_job, should_write_log_file=False)
-            is_launch_task = False
-        else:
-            self.check_runner_stop_event()
-
-            self.mlops_metrics.report_server_id_status(
-                run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_RUNNING, edge_id=self.edge_id,
-                server_id=self.edge_id, server_agent_id=self.edge_id)
-
-            # Generate the job executing commands
-            job_executing_commands = JobRunnerUtils.generate_job_execute_commands(
-                run_id=self.run_id, edge_id=self.edge_id, version=self.version, package_type=self.package_type,
-                executable_interpreter=executable_interpreter, entry_file_full_path=entry_file_full_path,
-                conf_file_object=conf_file_object, entry_args=entry_args, assigned_gpu_ids=assigned_gpu_ids,
-                job_api_key=job_api_key, client_rank=0)
-
-            # Run the job executing commands
-            logging.info(f"Run the server job with job id {self.run_id}, device id {self.edge_id}.")
-            process, error_list = ServerConstants.execute_commands_with_live_logs(
-                job_executing_commands, callback=self.start_job_perf, error_processor=self.job_error_processor)
-            is_launch_task = True
-
-        return process, is_launch_task, error_list
-
-    def callback_start_fl_job(self, job_pid):
-        ServerConstants.save_learning_process(self.run_id, job_pid)
-        self.mlops_metrics.report_sys_perf(
-            self.args, self.agent_config["mqtt_config"], job_process_id=job_pid)
-
-    def start_job_perf(self, job_pid):
-        ServerConstants.save_learning_process(self.run_id, job_pid)
-        self.mlops_metrics.report_job_perf(self.args, self.agent_config["mqtt_config"], job_pid)
-
-    def job_error_processor(self, error_list):
-        self.check_runner_stop_event()
-
-        error_str = "\n".join(error_list)
-        raise Exception(f"Error occurs when running the job... {error_str}")
-
-    def process_job_status(self, run_id, edge_id, status):
-        number_of_failed_edges = 0
-        number_of_finished_edges = 0
-        number_of_killed_edges = 0
-        edge_id_status_dict = self.client_agent_active_list.get(f"{run_id}", {})
-        server_id = edge_id_status_dict.get("server", 0)
-        enable_fault_tolerance, fault_tolerance_rate = self.parse_fault_tolerance_params(run_id)
-        running_edges_list = list()
-        for edge_id_item, status_item in edge_id_status_dict.items():
-            if edge_id_item == "server":
-                continue
-
-            if status_item is None or status_item == ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED or \
-                    status_item == ServerConstants.MSG_MLOPS_SERVER_STATUS_EXCEPTION:
-                number_of_failed_edges += 1
-                continue
-
-            if status_item == ServerConstants.MSG_MLOPS_SERVER_STATUS_FINISHED:
-                number_of_finished_edges += 1
-                continue
-
-            if status_item == ServerConstants.MSG_MLOPS_SERVER_STATUS_KILLED:
-                number_of_killed_edges += 1
-                continue
-
-            if status_item == ServerConstants.MSG_MLOPS_SERVER_STATUS_IDLE or \
-                    status_item == ServerConstants.MSG_MLOPS_SERVER_STATUS_OFFLINE:
-                continue
-
-            running_edges_list.append(edge_id_item)
-
-        # Report client status
-        edge_status = ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED if status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_EXCEPTION else status
-        self.mlops_metrics.report_client_training_status(edge_id, edge_status, run_id=run_id)
-        self.mlops_metrics.report_client_device_status_to_web_ui(edge_id, edge_status, run_id=run_id)
-
-        # Report server status based on the fault tolerance model and parameters
-        edge_nums = len(edge_id_status_dict.keys()) - 1
-        status_to_report = self.calculate_server_status(
-            run_id, edge_nums, number_of_failed_edges, number_of_finished_edges, number_of_killed_edges,
-            running_edges_list, enable_fault_tolerance=enable_fault_tolerance,
-            fault_tolerance_rate=fault_tolerance_rate)
-        if status_to_report is not None:
-            logging.info(f"Run completed when processing edge status, will report status {status_to_report}")
-            self.report_server_status(run_id, server_id, status_to_report)
-
-    def calculate_server_status(
-            self, run_id, total_edge_nums, number_of_failed_edges, number_of_finished_edges,
-            number_of_killed_edges, running_edges_list, enable_fault_tolerance=False,
-            fault_tolerance_rate=0.8
-    ):
-        # Report server status based on the fault tolerance model and parameters
-        actual_failed_rate = number_of_failed_edges / total_edge_nums
-        all_edges_run_completed = True if len(running_edges_list) <= 0 else False
-        if all_edges_run_completed:
-            status_to_report = None
-            if enable_fault_tolerance:
-                if actual_failed_rate >= fault_tolerance_rate:
-                    status_to_report = ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED
-                    self.send_training_stop_request_to_edges_when_exception(
-                        running_edges_list, run_id=run_id, status=status_to_report)
-                    return status_to_report
-                else:
-                    if number_of_killed_edges == total_edge_nums:
-                        status_to_report = ServerConstants.MSG_MLOPS_SERVER_STATUS_KILLED
-                    else:
-                        status_to_report = ServerConstants.MSG_MLOPS_SERVER_STATUS_FINISHED
-            else:
-                if number_of_failed_edges > 0:
-                    status_to_report = ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED
-                elif number_of_finished_edges == total_edge_nums:
-                    status_to_report = ServerConstants.MSG_MLOPS_SERVER_STATUS_FINISHED
-                elif number_of_killed_edges == total_edge_nums:
-                    status_to_report = ServerConstants.MSG_MLOPS_SERVER_STATUS_KILLED
-
-            return status_to_report
-
-    def parse_fault_tolerance_params(self, run_id):
-        run_json = self.running_request_json.get(str(run_id), None)
-        if run_json is None:
-            run_json = self.request_json
-        run_config = run_json.get("run_config", {})
-        run_params = run_config.get("parameters", {})
-        common_args = run_params.get("common_args", {})
-        enable_fault_tolerance = common_args.get("enable_fault_tolerance", False)
-        fault_tolerance_rate = common_args.get("fault_tolerance_rate", 0)
-        return enable_fault_tolerance, fault_tolerance_rate
-
-    def report_server_status(self, run_id, server_id, status):
-        self.mlops_metrics.report_server_id_status(run_id, status, edge_id=self.edge_id,
-                                                   server_id=server_id, server_agent_id=self.edge_id)
-
-    def stop_run_when_starting_failed(self):
-        edge_id_list = self.request_json["edgeids"]
-        run_id = self.request_json.get("run_id", 0)
-        logging.error("edge ids {}".format(str(edge_id_list)))
-
-        payload = self.running_request_json.get(str(run_id))
-        if payload is not None:
-            self.send_training_stop_request_to_edges(edge_id_list, payload=json.dumps(payload), run_id=run_id)
-
-        # logging.info("Stop run successfully when starting failed.")
-
-        self.mlops_metrics.report_server_id_status(
-            self.run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED, edge_id=self.edge_id,
-            server_id=self.edge_id, server_agent_id=self.edge_id)
-
-    def cleanup_run_when_finished(self, should_send_server_id_status=True):
-        # logging.info("Cleanup run successfully when finished.")
-
-        self.mlops_metrics.report_server_training_status(
-            self.run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_FINISHED, edge_id=self.edge_id
-        )
-
-        if should_send_server_id_status:
-            self.mlops_metrics.report_server_id_status(
-                self.run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_FINISHED, edge_id=self.edge_id,
-                server_id=self.edge_id, server_agent_id=self.edge_id)
-
-        try:
-            self.mlops_metrics.stop_sys_perf()
-        except Exception as ex:
-            pass
-
-        time.sleep(1)
-
-        ServerConstants.cleanup_learning_process(self.run_id)
-        ServerConstants.cleanup_bootstrap_process(self.run_id)
-
-        try:
-            local_package_path = ServerConstants.get_package_download_dir()
-            for package_file in listdir(local_package_path):
-                if os.path.basename(package_file).startswith("run_" + str(self.run_id)):
-                    shutil.rmtree(os.path.join(local_package_path, package_file), ignore_errors=True)
-        except Exception as e:
-            pass
-
-    def cleanup_run_when_starting_failed(
-            self, status=ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED, should_send_server_id_status=True):
-        # logging.info("Cleanup run successfully when starting failed.")
-
-        self.mlops_metrics.report_server_training_status(
-            self.run_id, status, edge_id=self.edge_id)
-
-        if should_send_server_id_status:
-            self.mlops_metrics.report_server_id_status(
-                self.run_id, status, edge_id=self.edge_id,
-                server_id=self.edge_id, server_agent_id=self.edge_id)
-
-        try:
-            self.mlops_metrics.stop_sys_perf()
-        except Exception as ex:
-            pass
-
-        time.sleep(1)
-
-        ServerConstants.cleanup_learning_process(self.run_id)
-        ServerConstants.cleanup_bootstrap_process(self.run_id)
-
-        try:
-            local_package_path = ServerConstants.get_package_download_dir()
-            for package_file in listdir(local_package_path):
-                if os.path.basename(package_file).startswith("run_" + str(self.run_id)):
-                    shutil.rmtree(os.path.join(local_package_path, package_file), ignore_errors=True)
-        except Exception as e:
-            pass
-
-    def should_process_async_cluster(self):
-        run_config = self.request_json.get("run_config", {})
-        run_params = run_config.get("parameters", {})
-        common_args = run_params.get("common_args", {})
-        self.enable_async_cluster = common_args.get("enable_async_cluster", False)
-        self.async_check_timeout = common_args.get("async_check_timeout", 0)
-        if self.enable_async_cluster:
-            return True, self.async_check_timeout
-
-        return False, self.async_check_timeout
-
-    @debug
-    def detect_edges_status(
-            self, edge_device_info_queue, edge_device_info_global_queue=None, callback_when_edges_ready=None, status_timeout=None,
-            need_to_trigger_exception=True, status_check_context=None, given_edge_ids=None,
-            callback_when_detecting=None, args_for_callback_when_detecting=None
-    ):
-        run_id = self.request_json["runId"]
-        run_id_str = str(run_id)
-        edge_id_list = self.request_json["edgeids"]
-        if given_edge_ids is not None:
-            edge_id_list = given_edge_ids
-
-        # Init realtime status of all edges
-        run_edges_realtime_status = dict()
-        run_edges_realtime_status[run_id_str] = dict()
-
-        edge_info_global_dict = dict()
-        if edge_device_info_global_queue is not None:
-            for edge_info_global in edge_device_info_global_queue:
-                edge_info_id = edge_info_global.get("edge_id")
-                edge_info_global_dict[edge_info_id] = edge_info_global
-
-        # Send status message to all edges
-        allowed_cache_edge_status_time = 60
-        for edge_id in edge_id_list:
-            # Check if the edge status was filled allowed_cache_edge_status_time seconds ago,
-            # if so no more checking message would be sent.
-            edge_info = edge_info_global_dict.get(edge_id, None)
-            if edge_info is not None:
-                timestamp = edge_info.get("timestamp", None)
-                time_interval = time.time() - timestamp
-                if time_interval <= allowed_cache_edge_status_time:
-                    continue
-
-            self.send_status_check_msg(run_id, edge_id, self.edge_id, context=status_check_context)
-        time.sleep(3)
-
-        total_sleep_seconds = 0
-        status_check_sleep_seconds = 10
-        allowed_status_check_sleep_seconds = 60 * 2 if status_timeout is None else status_timeout
-        allowed_status_check_sleep_seconds_for_async = 30
-        inactivate_edges = list()
-        active_edge_info_dict = dict()
-        log_active_edge_info_flag = True
-        while True:
-            if callback_when_detecting is not None:
-                callback_when_detecting(args_for_callback_when_detecting)
-
-            # Fetch edge info from the edge status queue, which will be added to realtime status map
-            while True:
-                self.check_runner_stop_event()
-
-                try:
-                    edge_info = edge_device_info_queue.get(block=False, timeout=1)
-                    if edge_info is not None:
-                        edge_id = edge_info.get("edge_id", None)
-                        if edge_id is not None:
-                            run_edges_realtime_status[run_id_str][edge_id] = edge_info
-                except queue.Empty as e:  # If queue is empty, then break loop
-                    break
-
-            self.check_runner_stop_event()
-
-            # Check all edges which don't send response status successfully
-            # and retry to send the status checking message.
-            active_edges_count = 0
-            inactivate_edges.clear()
-            active_edge_info_dict.clear()
-            for edge_id in edge_id_list:
-                edge_info_dict = run_edges_realtime_status.get(run_id_str, {})
-                edge_info = edge_info_dict.get(edge_id, None)
-                edge_info = edge_info_dict.get(str(edge_id), None) if edge_info is None else edge_info
-                if edge_info is not None:
-                    active_edges_count += 1
-                    active_edge_info_dict[str(edge_id)] = edge_info
-                else:
-                    # Check if the edge status was filled allowed_cache_edge_status_time seconds ago,
-                    # if so no more checking message would be sent.
-                    edge_info = edge_info_global_dict.get(edge_id, None)
-                    if edge_info is not None:
-                        timestamp = edge_info.get("timestamp", None)
-                        time_interval = time.time() - timestamp
-                        if time_interval <= allowed_cache_edge_status_time:
-                            active_edges_count += 1
-                            active_edge_info_dict[str(edge_id)] = edge_info
-                            continue
-
-                    inactivate_edges.append(edge_id)
-                    self.send_status_check_msg(run_id, edge_id, self.edge_id, context=status_check_context)
-
-            # If all edges are ready then send the starting job message to them
-            if active_edges_count == len(edge_id_list):
-                if log_active_edge_info_flag:
-                    logging.debug(f"All edges are ready. Active edge id list is as follows. {active_edge_info_dict}")
-                    log_active_edge_info_flag = False
-                if callback_when_edges_ready is not None:
-                    logging.info("All edges are ready. Start to process the callback function.")
-                    callback_when_edges_ready(active_edge_info_dict=active_edge_info_dict)
-                else:
-                    logging.debug("All edges are ready. No callback function to process.")
-                break
-            else:
-                logging.info(f"All edges are not ready. Active edge id list: {active_edge_info_dict}, "
-                             f"Inactive edge id list: {inactivate_edges}")
-                log_active_edge_info_flag = True
-
-            # Check if runner needs to stop and sleep specific time
-            self.check_runner_stop_event()
-            time.sleep(status_check_sleep_seconds)
-            total_sleep_seconds += status_check_sleep_seconds
-
-            # Check if the status response message has timed out to receive
-            if total_sleep_seconds >= allowed_status_check_sleep_seconds:
-                # If so, send failed message to MLOps and send exception message to all edges.
-                logging.error(f"There are inactive edge devices. "
-                              f"Inactivate edge id list is as follows. {inactivate_edges}")
-                if need_to_trigger_exception:
-                    self.mlops_metrics.report_server_id_status(
-                        run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED, edge_id=self.edge_id,
-                        server_id=self.edge_id, server_agent_id=self.server_agent_id)
-                    self.send_training_stop_request_to_edges_when_exception(edge_id_list,
-                                                                            payload=json.dumps(self.request_json),
-                                                                            run_id=run_id)
-                return False, active_edge_info_dict, inactivate_edges
-
-            # If we enable the mode for async cluster, then sleep some time and send messages to all clients.
-            if callback_when_edges_ready is not None:
-                should_async, async_timeout = self.should_process_async_cluster()
-                if should_async and total_sleep_seconds >= allowed_status_check_sleep_seconds_for_async:
-                    if async_timeout > allowed_status_check_sleep_seconds_for_async:
-                        time.sleep(async_timeout - allowed_status_check_sleep_seconds_for_async)
-                    self.send_training_request_to_edges()
-                    return True, active_edge_info_dict, inactivate_edges
-
-        return True, active_edge_info_dict, inactivate_edges
-
-    def send_status_check_msg(self, run_id, edge_id, server_id, context=None):
-        topic_get_model_device_id = "server/client/request_device_info/" + str(edge_id)
-        payload = {"server_id": server_id, "run_id": run_id}
-        if context is not None:
-            payload["context"] = context
-        self.message_center.send_message(topic_get_model_device_id, json.dumps(payload))
-
-    @debug
-    def send_training_request_to_edges(self, active_edge_info_dict=None):
-        run_id = self.request_json["runId"]
-        edge_id_list = self.request_json["edgeids"]
-        run_config = self.request_json.get("run_config", {})
-        run_params = run_config.get("parameters", {})
-        job_yaml = run_params.get("job_yaml", {})
-        job_yaml_default_none = run_params.get("job_yaml", None)
-        computing = job_yaml.get("computing", {})
-        request_num_gpus = computing.get("minimum_num_gpus", None)
-        job_gpu_id_list = self.request_json.get("job_gpu_id_list", None)
-
-        logging.info("Send training request to Edge ids: " + str(edge_id_list))
-
-        should_match_gpu = False
-        if job_yaml_default_none is not None and request_num_gpus is not None and \
-                int(request_num_gpus) > 0 and active_edge_info_dict is not None:
-            should_match_gpu = True
-            SchedulerMatcher.parse_and_print_gpu_info_for_all_edges(active_edge_info_dict, show_gpu_list=True)
-
-            # Match and assign gpus to each device
-            assigned_gpu_num_dict, assigned_gpu_ids_dict = SchedulerMatcher.match_and_assign_gpu_resources_to_devices(
-                request_num_gpus, edge_id_list, active_edge_info_dict, job_gpu_id_list=job_gpu_id_list)
-            if assigned_gpu_num_dict is None or assigned_gpu_ids_dict is None:
-                # If no resources available, send failed message to MLOps and send exception message to all edges.
-                gpu_count, gpu_available_count = SchedulerMatcher.parse_and_print_gpu_info_for_all_edges(
-                    active_edge_info_dict, should_print=True)
-                err_info = f"No resources available." \
-                           f"Total available GPU count {gpu_available_count} is less than " \
-                           f"request GPU count {request_num_gpus}"
-                logging.error(err_info)
-
-                # Bug fix: This mqtt message needs to be sent so platform can clean up the failed run and change the
-                # status from running to failed.
-                self.mlops_metrics.report_server_training_status(
-                    run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED, edge_id=self.edge_id
-                )
-
-                self.mlops_metrics.report_server_id_status(
-                    run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED, edge_id=self.edge_id,
-                    server_id=self.edge_id, server_agent_id=self.server_agent_id)
-                self.send_training_stop_request_to_edges_when_exception(edge_id_list,
-                                                                        payload=json.dumps(self.request_json),
-                                                                        run_id=run_id)
-
-                serving_args = job_yaml.get("serving_args", {})
-                endpoint_id = serving_args.get("endpoint_id", None)
-                if endpoint_id is not None:
-                    fedml.mlops.log_endpoint_status(
-                        endpoint_id, device_client_constants.ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED)
-                    fedml.mlops.log_run_log_lines(
-                        endpoint_id, 0, [err_info],
-                        log_source=device_client_constants.ClientConstants.FEDML_LOG_SOURCE_TYPE_MODEL_END_POINT
-                    )
-                return
-
-            # Generate master node addr and port
-            master_node_addr, master_node_port = SchedulerMatcher.get_master_node_info(edge_id_list,
-                                                                                       active_edge_info_dict)
-
-            # Generate new edge id list after matched
-            edge_id_list = SchedulerMatcher.generate_new_edge_list_for_gpu_matching(assigned_gpu_num_dict)
-            if len(edge_id_list) <= 0:
-                gpu_count, gpu_available_count = SchedulerMatcher.parse_and_print_gpu_info_for_all_edges(
-                    active_edge_info_dict, should_print=True)
-                logging.error(f"Request parameter for GPU num is invalid."
-                              f"Total available GPU count {gpu_available_count}."
-                              f"Request GPU num {request_num_gpus}")
-                self.mlops_metrics.report_server_id_status(
-                    run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED, edge_id=self.edge_id,
-                    server_id=self.edge_id, server_agent_id=self.server_agent_id)
-                self.send_training_stop_request_to_edges_when_exception(edge_id_list,
-                                                                        payload=json.dumps(self.request_json),
-                                                                        run_id=run_id)
-                return
-
-        if should_match_gpu:
-            # Report gpu num and related infos to MLOps.
-            serving_args = job_yaml.get("serving_args", {})
-            endpoint_id = serving_args.get("endpoint_id", None)
-            if endpoint_id is not None:
-                endpoint_info = list()
-                for edge_id_item, gpu_num in assigned_gpu_num_dict.items():
-                    edge_info = active_edge_info_dict.get(str(edge_id_item), {})
-                    endpoint_info.append({
-                        "machine_id": edge_id_item, "endpoint_gpu_count": gpu_num,
-                        "master_deploy_id": edge_info.get("master_device_id", 0),
-                        "slave_deploy_id": edge_info.get("slave_device_id", 0)})
-                topic_name = f"compute/mlops/endpoint"
-                endpoint_info_json = {"endpoint_id": endpoint_id, "endpoint_info": endpoint_info}
-                print(f"endpoint_info_json {endpoint_info_json}")
-                self.message_center.send_message(topic_name, json.dumps(endpoint_info_json))
-
-        client_rank = 1
-        for edge_id in edge_id_list:
-            topic_start_train = "flserver_agent/" + str(edge_id) + "/start_train"
-            logging.info("start_train: send topic " + topic_start_train + " to client...")
-            request_json = self.request_json
-            request_json["client_rank"] = client_rank
-            client_rank += 1
-
-            if active_edge_info_dict is not None:
-                edge_info = active_edge_info_dict.get(str(edge_id), {})
-                model_master_device_id = edge_info.get("master_device_id", None)
-                model_slave_device_id = edge_info.get("slave_device_id", None)
-                model_slave_device_id_list = edge_info.get("slave_device_id_list", None)
-
-                if should_match_gpu:
-                    request_json["scheduler_match_info"] = SchedulerMatcher.generate_match_info_for_scheduler(
-                        edge_id, edge_id_list, master_node_addr, master_node_port,
-                        assigned_gpu_num_dict, assigned_gpu_ids_dict,
-                        model_master_device_id=model_master_device_id,
-                        model_slave_device_id=model_slave_device_id,
-                        model_slave_device_id_list=model_slave_device_id_list
-                    )
-
-            self.message_center.send_message(topic_start_train, json.dumps(request_json))
-
-    def setup_listeners_for_edge_status(self, run_id, edge_ids, server_id):
-        self.client_agent_active_list[f"{run_id}"] = dict()
-        self.client_agent_active_list[f"{run_id}"][f"server"] = server_id
-        for edge_id in edge_ids:
-            self.client_agent_active_list[f"{run_id}"][f"{edge_id}"] = ServerConstants.MSG_MLOPS_SERVER_STATUS_IDLE
-            edge_status_topic = "fl_client/flclient_agent_" + str(edge_id) + "/status"
-            self.add_message_listener(edge_status_topic, self.callback_edge_status)
-            self.subscribe_msg(edge_status_topic)
-
-    def remove_listeners_for_edge_status(self, edge_ids=None):
-        if edge_ids is None:
-            edge_ids = self.request_json["edgeids"]
-
-        for edge_id in edge_ids:
-            edge_status_topic = "fl_client/flclient_agent_" + str(edge_id) + "/status"
-            self.unsubscribe_msg(edge_status_topic)
-
-    def setup_listener_for_run_metrics(self, run_id):
-        metric_topic = f"fedml_slave/fedml_master/metrics/{run_id}"
-        self.add_message_listener(metric_topic, self.callback_run_metrics)
-        self.subscribe_msg(metric_topic)
-
-    def remove_listener_for_run_metrics(self, run_id):
-        metric_topic = f"fedml_slave/fedml_master/metrics/{run_id}"
-        self.unsubscribe_msg(metric_topic)
-
-    def setup_listener_for_run_logs(self, run_id):
-        logs_topic = f"fedml_slave/fedml_master/logs/{run_id}"
-        self.add_message_listener(logs_topic, self.callback_run_logs)
-        self.subscribe_msg(logs_topic)
-
-    def remove_listener_for_run_logs(self, run_id):
-        logs_topic = f"fedml_slave/fedml_master/logs/{run_id}"
-        self.unsubscribe_msg(logs_topic)
-
-    def callback_run_logs(self, topic, payload):
-        run_id = str(topic).split('/')[-1]
-        run_id_str = str(run_id)
-        if self.run_logs_queue_map.get(run_id_str) is None:
-            self.run_logs_queue_map[run_id_str] = Queue()
-        self.run_logs_queue_map[run_id_str].put(payload)
-
-    def callback_run_metrics(self, topic, payload):
-        print(f"callback_run_metrics topic {topic}, payload {payload}")
-        run_id = str(topic).split('/')[-1]
-        run_id_str = str(run_id)
-        if self.run_metrics_queue_map.get(run_id_str) is None:
-            self.run_metrics_queue_map[run_id_str] = Queue()
-        self.run_metrics_queue_map[run_id_str].put(payload)
-
-    def callback_edge_status(self, topic, payload):
-        payload_json = json.loads(payload)
-        run_id = payload_json.get("run_id", None)
-        edge_id = payload_json.get("edge_id", None)
-        status = payload_json.get("status", None)
-        if run_id is not None and edge_id is not None:
-            active_item_dict = self.client_agent_active_list.get(f"{run_id}", None)
-            if active_item_dict is None:
-                return
-            self.client_agent_active_list[f"{run_id}"][f"{edge_id}"] = status
-
-            if self.run_edge_id_status_queue_map.get(f"{run_id}") is None:
-                self.run_edge_id_status_queue_map[f"{run_id}"] = Queue()
-            self.run_edge_id_status_queue_map[f"{run_id}"].put(self.client_agent_active_list[f"{run_id}"])
-
-            self.process_job_status(run_id, edge_id, status)
-
-    def ota_upgrade(self, payload, request_json):
-        run_id = request_json["runId"]
-        force_ota = False
-        ota_version = None
-
-        try:
-            run_config = request_json.get("run_config", None)
-            parameters = run_config.get("parameters", None)
-            common_args = parameters.get("common_args", None)
-            force_ota = common_args.get("force_ota", False)
-            ota_version = common_args.get("ota_version", None)
-        except Exception as e:
-            pass
-
-        if force_ota and ota_version is not None:
-            should_upgrade = True if ota_version != fedml.__version__ else False
-            upgrade_version = ota_version
-        else:
-            try:
-                fedml_is_latest_version, local_ver, remote_ver = sys_utils.check_fedml_is_latest_version(self.version)
-            except Exception as e:
-                return
-
-            should_upgrade = False if fedml_is_latest_version else True
-            upgrade_version = remote_ver
-
-        if should_upgrade:
-            job_obj = FedMLServerDataInterface.get_instance().get_job_by_id(run_id)
-            if job_obj is None:
-                FedMLServerDataInterface.get_instance(). \
-                    save_started_job(run_id, self.edge_id, time.time(),
-                                     ServerConstants.MSG_MLOPS_SERVER_STATUS_UPGRADING,
-                                     ServerConstants.MSG_MLOPS_SERVER_STATUS_UPGRADING,
-                                     payload)
-
-                self.mlops_metrics.report_server_id_status(
-                    run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_UPGRADING, edge_id=self.edge_id,
-                    server_id=self.edge_id, server_agent_id=self.edge_id)
-            logging.info(f"Upgrade to version {upgrade_version} ...")
-
-            sys_utils.do_upgrade(self.version, upgrade_version)
-
-            raise Exception("Restarting after upgraded...")
-
-    def callback_start_train(self, topic=None, payload=None):
-        print("callback_start_train: ")
-        try:
-            MLOpsConfigs.fetch_all_configs()
-        except Exception as e:
-            pass
-
-        # [NOTES] Example Request JSON: https://fedml-inc.larksuite.com/wiki/ScnIwUif9iupbjkYS0LuBrd6sod#WjbEdhYrvogmlGxKTOGu98C6sSb
-        request_json = json.loads(payload)
-        is_retain = request_json.get("is_retain", False)
-        if is_retain:
-            return
-
-        # Process the log
-        run_id = request_json["runId"]
-        run_id_str = str(run_id)
-        if self.run_as_edge_server_and_agent or self.enable_simulation_cloud_agent:
-            # Start log processor for current run
-            self.args.run_id = run_id
-            self.args.edge_id = self.edge_id
-            MLOpsRuntimeLog.get_instance(self.args).init_logs(log_level=logging.INFO)
-            MLOpsRuntimeLogDaemon.get_instance(self.args).start_log_processor(
-                run_id, self.edge_id, SchedulerConstants.get_log_source(request_json))
-            logging.info("start the log processor.")
-        elif self.run_as_cloud_agent:
-            # Start log processor for current run
-            MLOpsRuntimeLogDaemon.get_instance(self.args).start_log_processor(
-                run_id, request_json.get("server_id", "0"), SchedulerConstants.get_log_source(request_json)
-            )
-        elif self.run_as_cloud_server:
-            self.server_agent_id = request_json.get("cloud_agent_id", self.edge_id)
-            run_id = request_json["runId"]
-            run_id_str = str(run_id)
-
-            # Start log processor for current run
-            self.args.run_id = run_id
-            MLOpsRuntimeLogDaemon.get_instance(self.args).start_log_processor(
-                run_id, self.edge_id, SchedulerConstants.get_log_source(request_json))
-
-        logging.info("callback_start_train payload: {}".format(payload))
-        logging.info(
-            f"FedMLDebug - Receive: topic ({topic}), payload ({payload})"
-        )
-
-        # if not self.run_as_cloud_agent and not self.run_as_cloud_server:
-        #    self.ota_upgrade(payload, request_json)
-
-        # report server running status
-        if not self.run_as_cloud_server:
-            self.mlops_metrics.report_server_id_status(
-                run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_STARTING, edge_id=self.edge_id,
-                server_id=self.edge_id, server_agent_id=self.edge_id)
-
-        self.start_request_json = payload
-        self.run_id = run_id
-        ServerConstants.save_runner_infos(self.args.device_id + "." + self.args.os_name, self.edge_id, run_id=run_id)
-
-        # Start server with multiprocessing mode
-        self.request_json = request_json
-        self.running_request_json[run_id_str] = request_json
-        edge_id_list = request_json.get("edgeids", list())
-        self.run_edge_ids[run_id_str] = edge_id_list
-
-        logging.info("subscribe the client exception message.")
-
-        if self.run_as_edge_server_and_agent or self.enable_simulation_cloud_agent:
-            self.init_job_task(request_json)
-
-            self.args.run_id = run_id
-
-            self._start_runner_process(run_id, request_json)
-
-            ServerConstants.save_run_process(run_id, self.run_process_map[run_id_str].pid)
-        elif self.run_as_cloud_agent:
-            self.init_job_task(request_json)
-
-            server_runner = FedMLServerRunner(
-                self.args, run_id=run_id, request_json=request_json, agent_config=self.agent_config
-            )
-            server_runner.run_as_cloud_agent = self.run_as_cloud_agent
-            server_runner.start_request_json = json.dumps(request_json)
-            self.run_process_event_map[run_id_str] = multiprocessing.Event()
-            self.run_process_event_map[run_id_str].clear()
-            server_runner.run_process_event = self.run_process_event_map[run_id_str]
-
-            if not self.use_local_process_as_cloud_server:
-                self.run_process_map[run_id_str] = Process(target=server_runner.start_cloud_server_process_entry)
-                self.run_process_map[run_id_str].start()
-            else:
-                message_bytes = json.dumps(self.request_json).encode("ascii")
-                base64_bytes = base64.b64encode(message_bytes)
-                runner_cmd_encoded = base64_bytes.decode("ascii")
-                logging.info("runner_cmd_encoded: {}".format(runner_cmd_encoded))
-
-                cloud_device_id = request_json.get("cloudServerDeviceId", "0")
-
-                self.run_process_map[run_id_str] = Process(
-                    target=FedMLServerRunner.start_local_cloud_server,
-                    args=(run_id_str, self.args.user, self.version, cloud_device_id, runner_cmd_encoded))
-                self.run_process_map[run_id_str].start()
-                time.sleep(1)
-
-            ServerConstants.save_run_process(run_id, self.run_process_map[run_id_str].pid)
-        elif self.run_as_cloud_server:
-            self.server_agent_id = request_json.get("cloud_agent_id", self.edge_id)
-            self.start_request_json = json.dumps(request_json)
-            run_id = request_json["runId"]
-            run_id_str = str(run_id)
-
-            self.init_job_task(request_json)
-
-            self.args.run_id = run_id
-
-            self._start_runner_process(run_id, request_json)
-            # ServerConstants.save_run_process(run_id, self.run_process_map[run_id_str].pid)
-
-    @staticmethod
-    def start_local_cloud_server(run_id, user, version, cloud_device_id, runner_cmd_encoded):
-        print(f"start cloud server, device id {cloud_device_id}, runner cmd {runner_cmd_encoded}")
-        if not FedMLServerRunner.debug_cloud_server:
-            pip_source_dir = os.path.dirname(__file__)
-            login_cmd = os.path.join(pip_source_dir, "server_login.py")
-            run_cmd = f"{get_python_program()} -W ignore {login_cmd} -t login -r cloud_server -u {str(user)} " \
-                      f"-v {version} -id {cloud_device_id} -rc {runner_cmd_encoded}"
-            os.system(run_cmd)
-
-    def _start_runner_process(self, run_id, request_json, is_server_job=False):
-        server_runner = FedMLServerRunner(
-            self.args, run_id=run_id, request_json=request_json, agent_config=self.agent_config
-        )
-        run_id_str = str(run_id)
-        server_runner.run_as_edge_server_and_agent = self.run_as_edge_server_and_agent
-        server_runner.edge_id = self.edge_id
-        server_runner.server_agent_id = self.server_agent_id
-        server_runner.start_request_json = json.dumps(request_json)
-        self.run_process_event_map[run_id_str] = multiprocessing.Event()
-        self.run_process_event_map[run_id_str].clear()
-        server_runner.run_process_event = self.run_process_event_map[run_id_str]
-        self.run_process_completed_event_map[run_id_str] = multiprocessing.Event()
-        self.run_process_completed_event_map[run_id_str].clear()
-        server_runner.run_process_completed_event = self.run_process_completed_event_map[run_id_str]
-        if self.run_edge_id_status_queue_map.get(run_id_str, None) is None:
-            self.run_edge_id_status_queue_map[run_id_str] = Queue()
-        if self.run_edge_device_info_queue_map.get(run_id_str, None) is None:
-            self.run_edge_device_info_queue_map[run_id_str] = Queue()
-        if self.run_metrics_queue_map.get(run_id_str, None) is None:
-            self.run_metrics_queue_map[run_id_str] = Queue()
-        if self.run_events_queue_map.get(run_id_str, None) is None:
-            self.run_events_queue_map[run_id_str] = Queue()
-        if self.run_artifacts_queue_map.get(run_id_str, None) is None:
-            self.run_artifacts_queue_map[run_id_str] = Queue()
-        if self.run_logs_queue_map.get(run_id_str, None) is None:
-            self.run_logs_queue_map[run_id_str] = Queue()
-        # if self.run_edge_device_info_global_queue is None:
-        #     self.run_edge_device_info_global_queue = Array('i', list())
-        server_runner.edge_id_status_queue = self.run_edge_id_status_queue_map[run_id_str]
-        server_runner.edge_device_info_queue = self.run_edge_device_info_queue_map[run_id_str]
-        self.run_process_map[run_id_str] = Process(
-            target=server_runner.run if not is_server_job else server_runner.run_server_job_impl, args=(
-                self.run_process_event_map[run_id_str], self.run_process_completed_event_map[run_id_str],
-                self.run_edge_id_status_queue_map[run_id_str], self.run_edge_device_info_queue_map[run_id_str],
-                self.run_metrics_queue_map[run_id_str], self.run_events_queue_map[run_id_str],
-                self.run_artifacts_queue_map[run_id_str], self.run_logs_queue_map[run_id_str],
-                self.message_center.get_message_queue(),
-                self.run_edge_device_info_global_queue
-            )
-        )
-        self.run_process_map[run_id_str].start()
-        ServerConstants.save_run_process(run_id, self.run_process_map[run_id_str].pid)
-
-    def start_cloud_server_process_entry(self):
-        try:
-            self.start_cloud_server_process()
-        except Exception as e:
-            pass
-
-    def start_cloud_server_process(self):
-        run_config = self.request_json["run_config"]
-        packages_config = run_config["packages_config"]
-        self.start_cloud_server(packages_config)
-
-    def start_cloud_server(self, packages_config):
-        server_id = self.request_json["server_id"]
-        self.cloud_server_name = FedMLServerRunner.FEDML_CLOUD_SERVER_PREFIX + str(self.run_id) + "-" + str(server_id)
-        self.server_docker_image = (
-                self.agent_config["docker_config"]["registry_server"]
-                + self.agent_config["docker_config"]["registry_dir"]
-                + self.server_docker_base_image
-        )
-
-        logging.info("docker image {}".format(self.server_docker_image))
-        # logging.info("file_sys_driver {}".format(self.agent_config["docker_config"]["file_sys_driver"]))
-
-        registry_secret_cmd = (
-                "kubectl create namespace fedml-devops-aggregator-"
-                + self.version
-                + ";kubectl -n fedml-devops-aggregator-"
-                + self.version
-                + " delete secret secret-"
-                + self.cloud_server_name
-                + " ;kubectl create secret docker-registry secret-"
-                + self.cloud_server_name
-                + " --docker-server="
-                + self.agent_config["docker_config"]["registry_server"]
-                + " --docker-username="
-                + self.agent_config["docker_config"]["user_name"]
-                + " --docker-password=$(aws ecr-public get-login-password --region "
-                + self.agent_config["docker_config"]["public_cloud_region"]
-                + ")"
-                + " --docker-email=fedml@fedml.ai -n fedml-devops-aggregator-"
-                + self.version
-        )
-        logging.info("Create secret cmd: " + registry_secret_cmd)
-        os.system(registry_secret_cmd)
-
-        message_bytes = json.dumps(self.request_json).encode("ascii")
-        base64_bytes = base64.b64encode(message_bytes)
-        runner_cmd_encoded = base64_bytes.decode("ascii")
-        logging.info("runner_cmd_encoded: {}".format(runner_cmd_encoded))
-        # logging.info("runner_cmd_decoded: {}".format(base64.b64decode(runner_cmd_encoded).decode()))
-        cur_dir = os.path.dirname(__file__)
-        run_deployment_cmd = (
-                "export FEDML_AGGREGATOR_NAME="
-                + self.cloud_server_name
-                + ";export FEDML_AGGREGATOR_SVC="
-                + self.cloud_server_name
-                + ";export FEDML_AGGREGATOR_VERSION="
-                + self.version
-                + ';export FEDML_AGGREGATOR_IMAGE_PATH="'
-                + self.server_docker_image
-                + '"'
-                + ";export FEDML_CONF_ID="
-                + self.cloud_server_name
-                + ";export FEDML_DATA_PV_ID="
-                + self.cloud_server_name
-                + ";export FEDML_DATA_PVC_ID="
-                + self.cloud_server_name
-                + ";export FEDML_REGISTRY_SECRET_SUFFIX="
-                + self.cloud_server_name
-                + ";export FEDML_ACCOUNT_ID=0"
-                + ";export FEDML_SERVER_DEVICE_ID="
-                + self.request_json.get("cloudServerDeviceId", "0")
-                + ";export FEDML_VERSION="
-                + self.version
-                + ";export FEDML_PACKAGE_NAME="
-                + packages_config.get("server", "")
-                + ";export FEDML_PACKAGE_URL="
-                + packages_config.get("serverUrl", "")
-                + ";export FEDML_RUNNER_CMD="
-                + runner_cmd_encoded
-                + ";envsubst < "
-                + os.path.join(cur_dir, "templates", "fedml-server-deployment.yaml")
-                + " | kubectl apply -f - "
-        )
-        logging.info("FedMLServerRunner.run with k8s: " + run_deployment_cmd)
-        os.system(run_deployment_cmd)
-
-    def stop_cloud_server(self):
-        self.cloud_server_name = FedMLServerRunner.FEDML_CLOUD_SERVER_PREFIX + str(self.run_id) \
-                                 + "-" + str(self.edge_id)
-        self.server_docker_image = (
-                self.agent_config["docker_config"]["registry_server"]
-                + self.agent_config["docker_config"]["registry_dir"]
-                + self.server_docker_base_image
-        )
-        delete_deployment_cmd = (
-                "export FEDML_AGGREGATOR_NAME="
-                + self.cloud_server_name
-                + ";export FEDML_AGGREGATOR_SVC="
-                + self.cloud_server_name
-                + ";export FEDML_AGGREGATOR_VERSION="
-                + self.version
-                + ';export FEDML_AGGREGATOR_IMAGE_PATH="'
-                + self.server_docker_image
-                + '"'
-                + ";export FEDML_CONF_ID="
-                + self.cloud_server_name
-                + ";export FEDML_DATA_PV_ID="
-                + self.cloud_server_name
-                + ";export FEDML_DATA_PVC_ID="
-                + self.cloud_server_name
-                + ";export FEDML_REGISTRY_SECRET_SUFFIX="
-                + self.cloud_server_name
-                + ";kubectl -n fedml-devops-aggregator-"
-                + self.version
-                + " delete deployment "
-                + self.cloud_server_name
-                + ";kubectl -n fedml-devops-aggregator-"
-                + self.version
-                + " delete svc "
-                + self.cloud_server_name
-                + ";kubectl -n fedml-devops-aggregator-"
-                + self.version
-                + " delete secret secret-"
-                + self.cloud_server_name
-        )
-        logging.info("FedMLServerRunner.stop_run with k8s: " + delete_deployment_cmd)
-        os.system(delete_deployment_cmd)
-
-    def setup_message_center(self):
-        if self.message_center is not None:
-            return
-
-        self.message_center = FedMLMessageCenter(agent_config=self.agent_config)
-        self.message_center.start_sender()
-
-        if self.mlops_metrics is None:
-            self.mlops_metrics = MLOpsMetrics()
-        self.mlops_metrics.set_messenger(self.message_center)
-        self.mlops_metrics.run_id = self.run_id
-        self.mlops_metrics.edge_id = self.edge_id
-        self.mlops_metrics.server_agent_id = self.server_agent_id
-
-    def rebuild_message_center(self, message_center_queue):
-        self.message_center = FedMLMessageCenter(message_queue=message_center_queue)
-
-        if self.mlops_metrics is None:
-            self.mlops_metrics = MLOpsMetrics()
-        self.mlops_metrics.set_messenger(self.message_center)
-        self.mlops_metrics.run_id = self.run_id
-        self.mlops_metrics.edge_id = self.edge_id
-        self.mlops_metrics.server_agent_id = self.server_agent_id
-
-    def release_message_center(self):
-        try:
-            if self.message_center is not None:
-                self.message_center.stop()
-                self.message_center = None
-
-        except Exception as e:
-            logging.error(
-                f"Failed to release client mqtt manager with Exception {e}. Traceback: {traceback.format_exc()}")
-            pass
-
-    def send_training_stop_request_to_edges(
-            self, edge_id_list, payload=None, run_id=0):
-        if payload is None:
-            payload_obj = {"runId": run_id, "edgeids": edge_id_list}
-        else:
-            payload_obj = json.loads(payload)
-
-        for edge_id in edge_id_list:
-            topic_stop_train = "flserver_agent/" + str(edge_id) + "/stop_train"
-            logging.info("stop_train: send topic " + topic_stop_train)
-            self.message_center.send_message(topic_stop_train, json.dumps(payload_obj))
-
-    def send_training_stop_request_to_specific_edge(self, edge_id, payload):
-        topic_stop_train = "flserver_agent/" + str(edge_id) + "/stop_train"
-        logging.info("stop_train: send topic " + topic_stop_train)
-        self.message_center.send_message(topic_stop_train, payload)
-
-    def send_training_stop_request_to_cloud_server(self, edge_id, payload):
-        topic_stop_train = "mlops/flserver_agent_" + str(edge_id) + "/stop_train"
-        logging.info("stop_train: send topic " + topic_stop_train)
-        self.message_center.send_message(topic_stop_train, payload)
-
-    def send_training_stop_request_to_edges_when_exception(
-            self, edge_id_list, payload=None, run_id=0, server_id=None, status=None):
-        if payload is None:
-            payload_obj = {"runId": run_id, "edgeids": edge_id_list}
-            if server_id is not None:
-                payload_obj["serverId"] = server_id
-        else:
-            payload_obj = json.loads(payload)
-        payload_obj["run_status"] = ClientConstants.MSG_MLOPS_CLIENT_STATUS_EXCEPTION if status is None else status
-        topic_stop_train = "flserver_agent/" + str(self.edge_id) + "/stop_train"
-        self.callback_stop_train(topic_stop_train, json.dumps(payload_obj), use_payload=payload_obj)
-
-    def callback_stop_train(self, topic, payload, use_payload=None):
-        # logging.info("callback_stop_train: topic = %s, payload = %s" % (topic, payload))
-        logging.info(
-            f"FedMLDebug - Receive: topic ({topic}), payload ({payload})"
-        )
-
-        request_json = json.loads(payload)
-        is_retain = request_json.get("is_retain", False)
-        if is_retain:
-            return
-        run_id = request_json.get("runId", None)
-        if run_id is None:
-            run_id = request_json.get("id", None)
-
-        edge_id_list = request_json["edgeids"]
-        server_id = request_json.get("serverId", None)
-        if server_id is None:
-            server_id = request_json.get("server_id", None)
-
-        if run_id is None or server_id is None:
-            logging.info("Json format is not correct!")
-            return
-
-        # logging.info("Stop run with multiprocessing.")
-
-        # Stop server with multiprocessing mode
-        run_id_str = str(run_id)
-        stop_request_json = self.running_request_json.get(run_id_str, None)
-        if stop_request_json is None:
-            stop_request_json = request_json
-        if use_payload is not None:
-            stop_request_json = use_payload
-
-        if self.run_process_event_map.get(run_id_str) is not None:
-            self.run_process_event_map.get(run_id_str).set()
-
-        if self.run_as_edge_server_and_agent or self.enable_simulation_cloud_agent:
-            server_runner = FedMLServerRunner(
-                self.args, run_id=run_id, request_json=stop_request_json, agent_config=self.agent_config,
-                edge_id=self.edge_id
-            )
-            server_runner.run_as_edge_server_and_agent = self.run_as_edge_server_and_agent
-            self.run_process_event_map_for_stop[run_id_str] = multiprocessing.Event()
-            if self.run_edge_id_status_queue_map.get(run_id_str, None) is None:
-                self.run_edge_id_status_queue_map[run_id_str] = Queue()
-            if self.run_edge_device_info_queue_map_for_stop.get(run_id_str, None) is None:
-                self.run_edge_device_info_queue_map_for_stop[run_id_str] = Queue()
-            # if self.run_edge_device_info_global_queue_for_stop is None:
-            #     self.run_edge_device_info_global_queue_for_stop = Array('i', list())
-
-            self.run_stop_process_map[run_id_str] = Process(
-                target=server_runner.run_stop, args=(
-                    self.run_process_event_map_for_stop[run_id_str],
-                    self.run_edge_id_status_queue_map[run_id_str],
-                    self.run_edge_device_info_queue_map_for_stop[run_id_str],
-                    self.run_edge_device_info_global_queue_for_stop,
-                    self.message_center.get_message_queue(),
-                )
-            )
-            self.run_stop_process_map[run_id_str].start()
-        elif self.run_as_cloud_agent:
-            self.send_training_stop_request_to_cloud_server(server_id, payload)
-            return
-        elif self.run_as_cloud_server:
-            # if not self.use_local_process_as_cloud_server:
-            server_runner = FedMLServerRunner(
-                self.args, run_id=run_id, request_json=stop_request_json, agent_config=self.agent_config,
-                edge_id=server_id
-            )
-            server_runner.run_as_cloud_agent = self.run_as_cloud_agent
-            self.run_process_event_map_for_stop[run_id_str] = multiprocessing.Event()
-            if self.run_edge_id_status_queue_map.get(run_id_str, None) is None:
-                self.run_edge_id_status_queue_map[run_id_str] = Queue()
-            if self.run_edge_device_info_queue_map_for_stop.get(run_id_str, None) is None:
-                self.run_edge_device_info_queue_map_for_stop[run_id_str] = Queue()
-            # if self.run_edge_device_info_global_queue_for_stop is None:
-            #     self.run_edge_device_info_global_queue_for_stop = Array('i', list())
-
-            self.run_stop_process_map[run_id_str] = Process(
-                target=server_runner.run_stop, args=(
-                    self.run_process_event_map_for_stop[run_id_str],
-                    self.run_edge_id_status_queue_map[run_id_str],
-                    self.run_edge_device_info_queue_map_for_stop[run_id_str],
-                    self.run_edge_device_info_global_queue_for_stop,
-                    self.message_center.get_message_queue(),
-                )
-            )
-            self.run_stop_process_map[run_id_str].start()
-            return
-
-        if self.running_request_json.get(run_id_str, None) is not None:
-            self.running_request_json.pop(run_id_str)
-
-        if self.run_process_map.get(run_id_str, None) is not None:
-            self.run_process_map.pop(run_id_str)
-
-    def run_stop(self, process_event, edge_id_status_queue, edge_device_info_queue,
-                 edge_device_info_global_queue, message_center_queue):
-        if platform.system() != "Windows":
-            os.setsid()
-
-        os.environ['PYTHONWARNINGS'] = 'ignore:semaphore_tracker:UserWarning'
-        os.environ.setdefault('PYTHONWARNINGS', 'ignore:semaphore_tracker:UserWarning')
-
-        self.run_process_event = process_event
-        try:
-            MLOpsUtils.set_ntp_offset(self.ntp_offset)
-
-            self.rebuild_message_center(message_center_queue)
-
-            self.run_stop_impl(edge_id_status_queue, edge_device_info_queue, edge_device_info_global_queue)
-        except Exception as e:
-            logging.error("Stop runner exits with exceptions. {}".format(traceback.format_exc()))
-        finally:
-            logging.info("Release resources.")
-
-    def run_stop_impl(self, edge_id_status_queue, edge_device_info_queue, edge_device_info_global_queue):
-        run_id_str = str(self.run_id)
-        edge_id_list = self.request_json["edgeids"]
-
-        # Detect running status of all edges
-        status_ok, active_edge_info_dict, inactivate_edges = self.detect_edges_status(
-            edge_device_info_queue, edge_device_info_global_queue=edge_device_info_global_queue,
-            status_timeout=120, need_to_trigger_exception=False,
-            status_check_context=SchedulerConstants.STATUS_CHECK_FRO_RUN_STOP_CONTEXT)
-
-        # Send the training stopping request to running edges.
-        for edge_id_item, _ in active_edge_info_dict.items():
-            self.send_training_stop_request_to_specific_edge(edge_id_item, json.dumps(self.request_json))
-            time.sleep(0.2)
-        time.sleep(3)
-
-        total_sleep_seconds = 0
-        allowed_status_check_sleep_seconds = 60
-        server_id = self.edge_id
-        running_edges_list = list()
-        current_edge_id_status_map = dict()
-
-        while True:
-            # Fetch edge id and status from the edge id status queue
-            while True:
-                try:
-                    queue_item = edge_id_status_queue.get(block=False, timeout=3)
-                    if queue_item is not None:
-                        current_edge_id_status_map.update(queue_item)
-                except queue.Empty as e:  # If queue is empty, then break loop
-                    break
-
-            # Calc the total killed device number
-            running_edges_list.clear()
-            number_of_failed_edges = 0
-            number_of_finished_edges = 0
-            number_of_killed_edges = 0
-            for edge_id_item, status_item in current_edge_id_status_map.items():
-                if edge_id_item == "server":
-                    continue
-
-                if status_item is None or status_item == ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED or \
-                        status_item == ServerConstants.MSG_MLOPS_SERVER_STATUS_EXCEPTION:
-                    number_of_failed_edges += 1
-                    continue
-
-                if status_item == ServerConstants.MSG_MLOPS_SERVER_STATUS_FINISHED:
-                    number_of_finished_edges += 1
-                    continue
-
-                if status_item == ServerConstants.MSG_MLOPS_SERVER_STATUS_KILLED:
-                    number_of_killed_edges += 1
-                    continue
-
-                if status_item == ServerConstants.MSG_MLOPS_SERVER_STATUS_IDLE or \
-                        status_item == ServerConstants.MSG_MLOPS_SERVER_STATUS_OFFLINE:
-                    continue
-
-                running_edges_list.append(edge_id_item)
-
-            # If the killed device number is equal total device number, then break
-            if len(running_edges_list) <= 0 and len(current_edge_id_status_map.keys()) == len(edge_id_list) + 1:
-                break
-
-            # Calc the timeout value to wait to device killed.
-            time.sleep(3)
-            total_sleep_seconds += 3
-            if total_sleep_seconds < allowed_status_check_sleep_seconds:
-                continue
-
-            # If timeout, then report killed device status
-            no_response_edges = list(set(edge_id_list) - set(running_edges_list))
-            if len(no_response_edges) <= 0:
-                break
-            for edge_id_item in no_response_edges:
-                self.mlops_metrics.report_client_id_status(
-                    edge_id_item, ClientConstants.MSG_MLOPS_CLIENT_STATUS_KILLED,
-                    server_id=self.edge_id, run_id=self.run_id)
-
-        if self.run_as_edge_server_and_agent or self.enable_simulation_cloud_agent:
-            # Stop log processor for current run
-            MLOpsRuntimeLogDaemon.get_instance(self.args).stop_log_processor(self.run_id, self.edge_id)
-        elif self.run_as_cloud_agent:
-            # Stop log processor for current run
-            MLOpsRuntimeLogDaemon.get_instance(self.args).stop_log_processor(self.run_id, server_id)
-
-        self.mlops_metrics.report_server_id_status(
-            self.run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_KILLED, edge_id=self.edge_id,
-            server_id=self.edge_id, server_agent_id=self.edge_id)
-
-    def set_run_status(self, run_id, status, running_request_json):
-        server_runner = FedMLServerRunner(
-            self.args, run_id=run_id, request_json=running_request_json, agent_config=self.agent_config
-        )
-        server_runner.edge_id = self.edge_id
-        server_runner.run_as_edge_server_and_agent = self.run_as_edge_server_and_agent
-        server_runner.run_status = status
-        server_runner.message_center = self.message_center
-        server_runner.mlops_metrics = self.mlops_metrics
-        server_runner.cleanup_client_with_status()
-
-    def callback_runner_id_status(self, topic, payload):
-        # logging.info("callback_runner_id_status: topic = %s, payload = %s" % (topic, payload))
-        # logging.info(
-        #     f"FedMLDebug - Receive: topic ({topic}), payload ({payload})"
-        # )
-
-        request_json = json.loads(payload)
-        is_retain = request_json.get("is_retain", False)
-        if is_retain:
-            return
-        run_id = request_json["run_id"]
-        status = request_json["status"]
-        edge_id = request_json["edge_id"]
-        server_id = request_json.get("server_id", None)
-        run_id_str = str(run_id)
-
-        if (
-                status == ServerConstants.MSG_MLOPS_SERVER_STATUS_FINISHED
-                or status == ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED
-                or status == ServerConstants.MSG_MLOPS_SERVER_STATUS_KILLED
-        ):
-            completed_event = self.run_process_completed_event_map.get(run_id_str, None)
-            if completed_event is not None:
-                completed_event.set()
-
-            FedMLServerDataInterface.get_instance().save_job_status(run_id, self.edge_id, status, status)
-
-            # Stop server with multiprocessing mode
-            running_request_json = self.running_request_json.get(run_id_str, None)
-            if running_request_json is None:
-                running_request_json = request_json
-            if self.run_as_edge_server_and_agent or self.enable_simulation_cloud_agent:
-                self.set_run_status(run_id, status, running_request_json)
-
-                run_process = self.run_process_map.get(run_id_str, None)
-                if run_process is not None:
-                    if run_process.pid is not None:
-                        RunProcessUtils.kill_process(run_process.pid)
-
-                    self.run_process_map.pop(run_id_str)
-
-                # Stop log processor for current run
-                MLOpsRuntimeLogDaemon.get_instance(self.args).stop_log_processor(run_id, self.edge_id)
-            elif self.run_as_cloud_agent:
-                pass
-            elif self.run_as_cloud_server:
-                self.set_run_status(run_id, status, running_request_json)
-
-                # Stop log processor for current run
-                MLOpsRuntimeLogDaemon.get_instance(self.args).stop_log_processor(run_id, self.edge_id)
-                if self.use_local_process_as_cloud_server:
-                    # RunProcessUtils.kill_process(os.getpid())
-                    cloud_server_process = self.run_process_map.get(run_id_str, None)
-                    if cloud_server_process is not None:
-                        RunProcessUtils.kill_process(cloud_server_process.pid)
-                else:
-                    self.stop_cloud_server()
-
-            if self.run_process_map.get(run_id_str, None) is not None:
-                self.run_process_map.pop(run_id_str)
-
-            self.remove_listener_for_run_metrics(self.run_id)
-            self.remove_listener_for_run_logs(self.run_id)
-        elif (
-                status == ServerConstants.MSG_MLOPS_SERVER_STATUS_EXCEPTION
-        ):
-            request_json = self.running_request_json.get(run_id_str, None)
-            if request_json is not None:
-                edge_id_list = request_json.get("edgeids", list())
-                server_id = request_json.get("serverId", None)
-                server_id = request_json.get("server_id", None) if server_id is None else server_id
-                self.send_training_stop_request_to_edges_when_exception(
-                    edge_id_list, run_id=run_id, server_id=server_id,
-                    status=ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED)
-
-            FedMLServerDataInterface.get_instance().save_job_status(run_id, self.edge_id, status, status)
-        else:
-            request_json = self.running_request_json.get(run_id_str, None)
-            if request_json is None:
-                request_json = self.start_request_json
-            self.mlops_metrics.report_server_training_status(
-                run_id, status, edge_id=self.edge_id, running_json=json.dumps(request_json))
-
-    def cleanup_client_with_status(self):
-        if self.run_status == ServerConstants.MSG_MLOPS_SERVER_STATUS_FINISHED:
-            # logging.info("received to finished status.")
-            self.cleanup_run_when_finished(should_send_server_id_status=False)
-        elif self.run_status == ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED:
-            # logging.info("received to failed status.")
-            self.cleanup_run_when_starting_failed(should_send_server_id_status=False)
-        elif self.run_status == ServerConstants.MSG_MLOPS_SERVER_STATUS_KILLED:
-            # logging.info("received to failed status.")
-            self.cleanup_run_when_starting_failed(
-                status=self.run_status, should_send_server_id_status=False)
-
-    def callback_report_current_status(self, topic, payload):
-        logging.info(
-            f"FedMLDebug - Receive: topic ({topic}), payload ({payload})"
-        )
-
-        request_json = json.loads(payload)
-        if self.run_as_edge_server_and_agent:
-            self.send_agent_active_msg()
-        elif self.run_as_cloud_agent:
-            self.send_agent_active_msg()
-        elif self.run_as_cloud_server:
-            pass
-
-    @staticmethod
-    def process_ota_upgrade_msg():
-        os.system("pip install -U fedml")
-
-    def callback_server_ota_msg(self, topic, payload):
-        logging.info(
-            f"FedMLDebug - Receive: topic ({topic}), payload ({payload})"
-        )
-
-        request_json = json.loads(payload)
-        cmd = request_json["cmd"]
-
-        if cmd == ServerConstants.FEDML_OTA_CMD_UPGRADE:
-            try:
-                self.process_ota_upgrade_msg()
-                # Process(target=FedMLServerRunner.process_ota_upgrade_msg).start()
-                raise Exception("After upgraded, restart runner...")
-            except Exception as e:
-                pass
-        elif cmd == ServerConstants.FEDML_OTA_CMD_RESTART:
-            raise Exception("Restart runner...")
-
-    def callback_response_device_info(self, topic, payload):
-        # Parse payload
-        payload_json = json.loads(payload)
-        run_id = payload_json.get("run_id", 0)
-        context = payload_json.get("context", None)
-        master_device_id = payload_json.get("master_device_id", 0)
-        slave_device_id = payload_json.get("slave_device_id", 0)
-        slave_device_id_list = payload_json.get("slave_device_id_list", 0)
-        edge_id = payload_json.get("edge_id", 0)
-        device_info = payload_json.get("edge_info", 0)
-        device_info["master_device_id"] = master_device_id
-        device_info["slave_device_id"] = slave_device_id
-        device_info["slave_device_id_list"] = slave_device_id_list
-        run_id_str = str(run_id)
-
-        # Put device info into a multiprocessing queue so master runner checks if all edges are ready
-        if context is None:
-            if self.run_edge_device_info_queue_map.get(run_id_str, None) is None:
-                self.run_edge_device_info_queue_map[run_id_str] = Queue()
-            self.run_edge_device_info_queue_map[run_id_str].put(device_info)
-
-            # if self.run_edge_device_info_global_queue is None:
-            #     self.run_edge_device_info_global_queue = Array('i', list())
-            #
-            # self.run_edge_device_info_global_queue[len(self.run_edge_device_info_global_queue)] =  \
-            #     {"timestamp": time.time(), "edge_id": edge_id, "device_info": device_info}
-
-            self.check_model_device_ready_and_deploy(run_id, master_device_id, slave_device_id,
-                                                     slave_device_id_list=slave_device_id_list)
-        elif context == SchedulerConstants.STATUS_CHECK_FRO_RUN_STOP_CONTEXT:
-            if self.run_edge_device_info_queue_map_for_stop.get(run_id_str, None) is None:
-                self.run_edge_device_info_queue_map_for_stop[run_id_str] = Queue()
-            self.run_edge_device_info_queue_map_for_stop[run_id_str].put(device_info)
-
-            # if self.run_edge_device_info_global_queue_for_stop is None:
-            #     self.run_edge_device_info_global_queue_for_stop = Array('i', list())
-            #
-            # self.run_edge_device_info_global_queue_for_stop[len(self.run_edge_device_info_global_queue_for_stop)] = \
-            #     {"timestamp": time.time(), "edge_id": edge_id, "device_info": device_info}
-
-    def check_model_device_ready_and_deploy(self, run_id, master_device_id, slave_device_id, slave_device_id_list=None):
-        request_json = self.running_request_json.get(str(run_id), None)
-        if request_json is None:
-            return
-        run_config = request_json["run_config"]
-        run_params = run_config.get("parameters", {})
-        job_yaml = run_params.get("job_yaml", {})
-        job_type = job_yaml.get("job_type", None)
-        job_type = job_yaml.get("task_type", Constants.JOB_TASK_TYPE_TRAIN) if job_type is None else job_type
-        if job_type != Constants.JOB_TASK_TYPE_DEPLOY and job_type != Constants.JOB_TASK_TYPE_SERVE:
-            return
-
-        # Init model device ids for each run
-        run_id_str = str(run_id)
-        if self.run_model_device_ids.get(run_id_str, None) is None:
-            self.run_model_device_ids[run_id_str] = list()
-
-        # Append master device and slave devices to the model devices map
-        self.run_model_device_ids[run_id_str].append({"master_device_id": master_device_id,
-                                                      "slave_device_id": slave_device_id})
-        model_device_ids = self.run_model_device_ids.get(run_id_str, None)
-        if model_device_ids is None:
-            return
-
-        # Check if all model devices are ready
-        if len(model_device_ids) != len(self.run_edge_ids.get(run_id_str, list())):
-            return
-
-        # Generate model master ids and model slave device ids
-        device_master_ids = list()
-        device_slave_ids = list()
-        for device_ids in model_device_ids:
-            model_master_id = device_ids.get("master_device_id")
-            model_slave_id = device_ids.get("slave_device_id")
-            device_master_ids.append(model_master_id)
-            device_slave_ids.append(model_slave_id)
-
-        if len(device_master_ids) <= 0:
-            return
-
-        # Generate serving devices for deploying
-        serving_devices = list()
-        serving_devices.append(device_master_ids[0])
-        serving_devices.extend(device_slave_ids)
-
-        # Start to deploy the model
-        self.deploy_model(serving_devices, request_json, run_id=run_id)
-
-    def callback_request_device_info_from_mlops(self, topic, payload):
-        self.response_device_info_to_mlops(topic, payload)
-
-    def response_device_info_to_mlops(self, topic, payload):
-        response_topic = f"deploy/master_agent/mlops/response_device_info"
-        payload_json = json.loads(payload)
-        need_gpu_info = payload_json.get("need_gpu_info", False)
-        if self.mlops_metrics is not None:
-            if not need_gpu_info:
-                response_payload = {
-                    "run_id": self.run_id,
-                    "master_agent_device_id": self.edge_id,
-                    "fedml_version": fedml.__version__
-                }
-            else:
-                total_mem, free_mem, total_disk_size, free_disk_size, cup_utilization, cpu_cores, \
-                    gpu_cores_total, gpu_cores_available, sent_bytes, recv_bytes, gpu_available_ids = \
-                    sys_utils.get_sys_realtime_stats()
-                gpu_available_ids = JobRunnerUtils.get_instance().get_available_gpu_id_list(self.edge_id)
-                gpu_available_ids = JobRunnerUtils.trim_unavailable_gpu_ids(gpu_available_ids)
-                gpu_cores_available = len(gpu_available_ids)
-                response_payload = {
-                    "run_id": self.run_id,
-                    "master_agent_device_id": self.edge_id,
-                    "memoryTotal": round(total_mem * MLOpsUtils.BYTES_TO_GB, 2),
-                    "memoryAvailable": round(free_mem * MLOpsUtils.BYTES_TO_GB, 2),
-                    "diskSpaceTotal": round(total_disk_size * MLOpsUtils.BYTES_TO_GB, 2),
-                    "diskSpaceAvailable": round(free_disk_size * MLOpsUtils.BYTES_TO_GB, 2),
-                    "cpuUtilization": round(cup_utilization, 2),
-                    "cpuCores": cpu_cores,
-                    "gpuCoresTotal": gpu_cores_total,
-                    "gpuCoresAvailable": gpu_cores_available,
-                    "networkTraffic": sent_bytes + recv_bytes,
-                    "timestamp": int(MLOpsUtils.get_ntp_time()),
-                    "fedml_version": fedml.__version__
-                }
-            self.mlops_metrics.report_json_message(response_topic, json.dumps(response_payload))
-
-    @staticmethod
-    def get_device_id():
-        device_file_path = os.path.join(ServerConstants.get_data_dir(), ServerConstants.LOCAL_RUNNER_INFO_DIR_NAME)
-        file_for_device_id = os.path.join(device_file_path, "devices.id")
-        if not os.path.exists(device_file_path):
-            os.makedirs(device_file_path)
-        elif os.path.exists(file_for_device_id):
-            with open(file_for_device_id, 'r', encoding='utf-8') as f:
-                device_id_from_file = f.readline()
-                if device_id_from_file is not None and device_id_from_file != "":
-                    return device_id_from_file
-
-        if platform.system() == "Darwin":
-            cmd_get_serial_num = "system_profiler SPHardwareDataType | grep Serial | awk '{gsub(/ /,\"\")}{print}' " \
-                                 "|awk -F':' '{print $2}' "
-            device_id = os.popen(cmd_get_serial_num).read()
-            device_id = device_id.replace('\n', '').replace(' ', '')
-            if device_id is None or device_id == "":
-                device_id = hex(uuid.getnode())
-            else:
-                device_id = "0x" + device_id
-        else:
-            if "nt" in os.name:
-
-                def get_uuid():
-                    guid = ""
-                    try:
-                        cmd = "wmic csproduct get uuid"
-                        guid = str(subprocess.check_output(cmd))
-                        pos1 = guid.find("\\n") + 2
-                        guid = guid[pos1:-15]
-                    except Exception as ex:
-                        pass
-                    return str(guid)
-
-                device_id = str(get_uuid())
-            elif "posix" in os.name:
-                device_id = sys_utils.get_device_id_in_docker()
-                if device_id is None:
-                    device_id = hex(uuid.getnode())
-            else:
-                device_id = sys_utils.run_subprocess_open(
-                    "hal-get-property --udi /org/freedesktop/Hal/devices/computer --key system.hardware.uuid".split()
-                )
-                device_id = hex(device_id)
-
-        if device_id is not None and device_id != "":
-            with open(file_for_device_id, 'w', encoding='utf-8') as f:
-                f.write(device_id)
-        else:
-            device_id = hex(uuid.uuid4())
-            with open(file_for_device_id, 'w', encoding='utf-8') as f:
-                f.write(device_id)
-
-        return device_id
-
-    def bind_account_and_device_id(self, url, account_id, device_id, os_name, api_key="", role=None):
-        if role is None:
-            role = "edge_server"
-            if self.run_as_edge_server_and_agent:
-                role = "edge_server"
-            elif self.run_as_cloud_agent:
-                role = "cloud_agent"
-            elif self.run_as_cloud_server:
-                role = "cloud_server"
-
-        ip = requests.get('https://checkip.amazonaws.com').text.strip()
-        fedml_ver, exec_path, os_ver, cpu_info, python_ver, torch_ver, mpi_installed, \
-            cpu_usage, available_mem, total_mem, gpu_info, gpu_available_mem, gpu_total_mem, \
-            gpu_count, gpu_vendor, cpu_count, gpu_device_name = get_sys_runner_info()
-        host_name = sys_utils.get_host_name()
-        json_params = {
-            "accountid": account_id,
-            "deviceid": device_id,
-            "type": os_name,
-            "state": ServerConstants.MSG_MLOPS_SERVER_STATUS_IDLE,
-            "status": ServerConstants.MSG_MLOPS_SERVER_STATUS_IDLE,
-            "processor": cpu_info,
-            "core_type": cpu_info,
-            "network": "",
-            "role": role,
-            "os_ver": os_ver,
-            "memory": total_mem,
-            "ip": ip,
-            "api_key": api_key,
-            "extra_infos": {"fedml_ver": fedml_ver, "exec_path": exec_path, "os_ver": os_ver,
-                            "cpu_info": cpu_info, "python_ver": python_ver, "torch_ver": torch_ver,
-                            "mpi_installed": mpi_installed, "cpu_usage": cpu_usage,
-                            "available_mem": available_mem, "total_mem": total_mem,
-                            "cpu_count": cpu_count, "gpu_count": 0, "host_name": host_name}
-        }
-        if gpu_count > 0:
-            if gpu_total_mem is not None:
-                json_params["gpu"] = gpu_info if gpu_info is not None else "" + ", Total GPU Memory: " + gpu_total_mem
-            else:
-                json_params["gpu"] = gpu_info if gpu_info is not None else ""
-            json_params["extra_infos"]["gpu_info"] = gpu_info if gpu_info is not None else ""
-            if gpu_available_mem is not None:
-                json_params["extra_infos"]["gpu_available_mem"] = gpu_available_mem
-            if gpu_total_mem is not None:
-                json_params["extra_infos"]["gpu_total_mem"] = gpu_total_mem
-
-            json_params["extra_infos"]["gpu_count"] = gpu_count
-            json_params["extra_infos"]["gpu_vendor"] = gpu_vendor
-            json_params["extra_infos"]["gpu_device_name"] = gpu_device_name
-
-            gpu_available_id_list = sys_utils.get_available_gpu_id_list(limit=gpu_count)
-            gpu_available_count = len(gpu_available_id_list) if gpu_available_id_list is not None else 0
-            gpu_list = sys_utils.get_gpu_list()
-            json_params["extra_infos"]["gpu_available_count"] = gpu_available_count
-            json_params["extra_infos"]["gpu_available_id_list"] = gpu_available_id_list
-            json_params["extra_infos"]["gpu_list"] = gpu_list
-        else:
-            json_params["gpu"] = "None"
-            json_params["extra_infos"]["gpu_available_count"] = 0
-            json_params["extra_infos"]["gpu_available_id_list"] = []
-            json_params["extra_infos"]["gpu_list"] = []
-
-        _, cert_path = MLOpsConfigs.get_request_params()
-        if cert_path is not None:
-            try:
-                requests.session().verify = cert_path
-                response = requests.post(
-                    url, json=json_params, verify=True,
-                    headers={"content-type": "application/json", "Connection": "close"}
-                )
-            except requests.exceptions.SSLError as err:
-                MLOpsConfigs.install_root_ca_file()
-                response = requests.post(
-                    url, json=json_params, verify=True,
-                    headers={"content-type": "application/json", "Connection": "close"}
-                )
-        else:
-            response = requests.post(url, json=json_params, headers={"Connection": "close"})
-        edge_id = -1
-        user_name = None
-        extra_url = None
-        if response.status_code != 200:
-            print(f"Binding to MLOps with response.status_code = {response.status_code}, "
-                  f"response.content: {response.content}")
-            pass
-        else:
-            # print("url = {}, response = {}".format(url, response))
-            status_code = response.json().get("code")
-            if status_code == "SUCCESS":
-                edge_id = response.json().get("data").get("id")
-                user_name = response.json().get("data").get("userName", None)
-                extra_url = response.json().get("data").get("url", None)
-                if edge_id is None or edge_id <= 0:
-                    print(f"Binding to MLOps with response.status_code = {response.status_code}, "
-                          f"response.content: {response.content}")
-            else:
-                if status_code == SchedulerConstants.BINDING_ACCOUNT_NOT_EXIST_ERROR:
-                    raise SystemExit(SchedulerConstants.BINDING_ACCOUNT_NOT_EXIST_ERROR)
-                print(f"Binding to MLOps with response.status_code = {response.status_code}, "
-                      f"response.content: {response.content}")
-                return -1, None, None
-        return edge_id, user_name, extra_url
-
-    def fetch_configs(self):
-        return MLOpsConfigs.fetch_all_configs()
-
-    def send_agent_active_msg(self):
-        active_topic = "flserver_agent/active"
-        status = MLOpsStatus.get_instance().get_server_agent_status(self.edge_id)
-        if (
-                status is not None
-                and status != ServerConstants.MSG_MLOPS_SERVER_STATUS_OFFLINE
-                and status != ServerConstants.MSG_MLOPS_SERVER_STATUS_IDLE
-        ):
-            return
-
-        if self.run_as_cloud_agent:
-            status = ServerConstants.MSG_MLOPS_SERVER_STATUS_IDLE
-        else:
-            try:
-                current_job = FedMLServerDataInterface.get_instance().get_job_by_id(self.run_id)
-            except Exception as e:
-                current_job = None
-            if current_job is None:
-                if status is not None and status == ServerConstants.MSG_MLOPS_SERVER_STATUS_IDLE:
-                    status = ServerConstants.MSG_MLOPS_SERVER_STATUS_IDLE
-                else:
-                    return
-            else:
-                status = ServerConstants.get_device_state_from_run_edge_state(current_job.status)
-        active_msg = {"ID": self.edge_id, "status": status}
-        MLOpsStatus.get_instance().set_server_agent_status(self.edge_id, status)
-        if self.mqtt_mgr is not None:
-            self.mqtt_mgr.send_message_json(active_topic, json.dumps(active_msg))
-        else:
-            self.send_message_json(active_topic, json.dumps(active_msg))
-
-    def recover_start_train_msg_after_upgrading(self):
-        try:
-            current_job = FedMLServerDataInterface.get_instance().get_current_job()
-            if current_job is not None and \
-                    current_job.status == ServerConstants.MSG_MLOPS_SERVER_STATUS_UPGRADING:
-                logging.info("start training after upgrading.")
-                server_agent_id = self.edge_id
-                topic_start_train = "mlops/flserver_agent_" + str(server_agent_id) + "/start_train"
-                self.callback_start_train(topic_start_train, current_job.running_json)
-        except Exception as e:
-            logging.info("recover starting train message after upgrading: {}".format(traceback.format_exc()))
-
-    def on_agent_mqtt_connected(self, mqtt_client_object):
-        # The MQTT message topic format is as follows: <sender>/<receiver>/<action>
-
-        # Setup MQTT message listener for starting training
-        server_agent_id = self.edge_id
-        topic_start_train = "mlops/flserver_agent_" + str(server_agent_id) + "/start_train"
-        self.add_message_listener(topic_start_train, self.callback_start_train)
-        self.mqtt_mgr.add_message_listener(topic_start_train, self.listener_message_dispatch_center)
-
-        # Setup MQTT message listener for stopping training
-        topic_stop_train = "mlops/flserver_agent_" + str(server_agent_id) + "/stop_train"
-        self.add_message_listener(topic_stop_train, self.callback_stop_train)
-        self.mqtt_mgr.add_message_listener(topic_stop_train, self.listener_message_dispatch_center)
-
-        # Setup MQTT message listener for server status switching
-        topic_server_status = "fl_server/flserver_agent_" + str(server_agent_id) + "/status"
-        self.add_message_listener(topic_server_status, self.callback_runner_id_status)
-        self.mqtt_mgr.add_message_listener(topic_server_status, self.listener_message_dispatch_center)
-
-        # Setup MQTT message listener to report current device status.
-        topic_report_status = "mlops/report_device_status"
-        self.add_message_listener(topic_report_status, self.callback_report_current_status)
-        self.mqtt_mgr.add_message_listener(topic_report_status, self.listener_message_dispatch_center)
-
-        # Setup MQTT message listener to OTA messages from the MLOps.
-        topic_ota_msg = "mlops/flserver_agent_" + str(server_agent_id) + "/ota"
-        self.add_message_listener(topic_ota_msg, self.callback_server_ota_msg)
-        self.mqtt_mgr.add_message_listener(topic_ota_msg, self.listener_message_dispatch_center)
-
-        # Setup MQTT message listener to request device info from the client.
-        topic_response_device_info = "client/server/response_device_info/" + str(self.edge_id)
-        self.add_message_listener(topic_response_device_info, self.callback_response_device_info)
-        self.mqtt_mgr.add_message_listener(topic_response_device_info, self.listener_message_dispatch_center)
-
-        # Setup MQTT message listener to request device info from MLOps.
-        topic_request_device_info_from_mlops = f"deploy/mlops/master_agent/request_device_info/{self.edge_id}"
-        self.add_message_listener(topic_request_device_info_from_mlops, self.callback_request_device_info_from_mlops)
-        self.mqtt_mgr.add_message_listener(
-            topic_request_device_info_from_mlops, self.listener_message_dispatch_center)
-
-        # Subscribe topics for starting train, stopping train and fetching client status.
-        mqtt_client_object.subscribe(topic_start_train, qos=2)
-        mqtt_client_object.subscribe(topic_stop_train, qos=2)
-        mqtt_client_object.subscribe(topic_server_status, qos=2)
-        mqtt_client_object.subscribe(topic_report_status, qos=2)
-        mqtt_client_object.subscribe(topic_ota_msg, qos=2)
-        mqtt_client_object.subscribe(topic_response_device_info, qos=2)
-        mqtt_client_object.subscribe(topic_request_device_info_from_mlops, qos=2)
-
-        self.subscribed_topics.clear()
-        self.subscribed_topics.append(topic_start_train)
-        self.subscribed_topics.append(topic_stop_train)
-        self.subscribed_topics.append(topic_server_status)
-        self.subscribed_topics.append(topic_report_status)
-        self.subscribed_topics.append(topic_ota_msg)
-        self.subscribed_topics.append(topic_response_device_info)
-        self.subscribed_topics.append(topic_request_device_info_from_mlops)
-
-        # Broadcast the first active message.
-        self.send_agent_active_msg()
-
-        # Start the message center for listener
-        self.start_listener(sender_message_queue=self.message_center.get_message_queue(),
-                            agent_config=self.agent_config)
-
-        if self.run_as_cloud_server:
-            # Start the FedML server
-            message_bytes = self.args.runner_cmd.encode("ascii")
-            base64_bytes = base64.b64decode(message_bytes)
-            payload = base64_bytes.decode("ascii")
-            self.receive_message_json(topic_start_train, payload)
-
-        # Echo results
-        MLOpsRuntimeLog.get_instance(self.args).enable_show_log_to_stdout()
-        print("\nCongratulations, your device is connected to the FedML MLOps platform successfully!")
-        print(
-            "Your FedML Edge ID is " + str(self.edge_id) + ", unique device ID is "
-            + str(self.unique_device_id)
-        )
-        MLOpsRuntimeLog.get_instance(self.args).enable_show_log_to_stdout(enable=True)
-
-    def on_agent_mqtt_disconnected(self, mqtt_client_object):
-        MLOpsStatus.get_instance().set_server_agent_status(
-            self.edge_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_OFFLINE
-        )
-
-    def setup_agent_mqtt_connection(self, service_config):
-        # Setup MQTT connection
-        self.mqtt_mgr = MqttManager(
-            service_config["mqtt_config"]["BROKER_HOST"],
-            service_config["mqtt_config"]["BROKER_PORT"],
-            service_config["mqtt_config"]["MQTT_USER"],
-            service_config["mqtt_config"]["MQTT_PWD"],
-            service_config["mqtt_config"]["MQTT_KEEPALIVE"],
-            f"FedML_ServerAgent_Daemon_@{self.user_name}@_@{self.args.current_device_id}@_@{str(uuid.uuid4())}@",
-            "flserver_agent/last_will_msg",
-            json.dumps({"ID": self.edge_id, "status": ServerConstants.MSG_MLOPS_SERVER_STATUS_OFFLINE})
-        )
-
-        # Init local database
-        FedMLServerDataInterface.get_instance().create_job_table()
-
-        # Start the message center to process edge related messages.
-        self.setup_message_center()
-
-        server_api_cmd = "fedml.computing.scheduler.master.server_api:api"
-        server_api_pids = RunProcessUtils.get_pid_from_cmd_line(server_api_cmd)
-        if server_api_pids is None or len(server_api_pids) <= 0:
-            # Start local API services
-            cur_dir = os.path.dirname(__file__)
-            fedml_base_dir = os.path.dirname(os.path.dirname(os.path.dirname(cur_dir)))
-            python_program = get_python_program()
-            self.local_api_process = ServerConstants.exec_console_with_script(
-                "{} -m uvicorn {} --host 0.0.0.0 --port {} --reload --reload-delay 3 --reload-dir {} "
-                "--log-level critical".format(
-                    python_program, server_api_cmd, ServerConstants.LOCAL_SERVER_API_PORT,
-                    fedml_base_dir
-                ),
-                should_capture_stdout=False,
-                should_capture_stderr=False
-            )
-            # if self.local_api_process is not None and self.local_api_process.pid is not None:
-            #     print(f"Server local API process id {self.local_api_process.pid}")
-
-        # Setup MQTT connected listener
-        self.mqtt_mgr.add_connected_listener(self.on_agent_mqtt_connected)
-        self.mqtt_mgr.add_disconnected_listener(self.on_agent_mqtt_disconnected)
-        self.mqtt_mgr.connect()
-
-        # Report the IDLE status to MLOps
-        self.mlops_metrics.report_server_training_status(
-            self.run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_IDLE, edge_id=self.edge_id)
-        MLOpsStatus.get_instance().set_server_agent_status(
-            self.edge_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_IDLE
-        )
-
-        # MLOpsRuntimeLogDaemon.get_instance(self.args).stop_all_log_processor()
-
-        self.mlops_metrics.stop_device_realtime_perf()
-        self.mlops_metrics.report_device_realtime_perf(self.args, service_config["mqtt_config"], is_client=False)
-
-        if not self.run_as_cloud_server:
-            self.recover_start_train_msg_after_upgrading()
-
-        JobCleanup.get_instance().sync_data_on_startup(self.edge_id, is_client=False)
-
-        self.master_api_daemon = MasterApiDaemon()
-        self.master_api_process = Process(target=self.master_api_daemon.run)
-        self.master_api_process.start()
-
-        # if self.model_device_server is None:
-        #     self.model_device_server = FedMLModelDeviceServerRunner(self.args, self.args.current_device_id,
-        #                                                             self.args.os_name, self.args.is_from_docker,
-        #                                                             self.agent_config)
-        #     self.model_device_server.start()
-
-    def start_agent_mqtt_loop(self):
-        # Start MQTT message loop
-        try:
-            self.mqtt_mgr.loop_forever()
-        except Exception as e:
-            if str(e) == "Restarting after upgraded...":
-                logging.info("Restarting after upgraded...")
-            else:
-                logging.info("Server tracing: {}".format(traceback.format_exc()))
-
-        finally:
-            login_exit_file = os.path.join(ServerConstants.get_log_file_dir(), "exited.log")
-            with open(login_exit_file, "w") as f:
-                f.writelines(f"{os.getpid()}.")
-
-            self.stop_agent()
-
-            time.sleep(5)
-            sys_utils.cleanup_all_fedml_server_login_processes(
-                ServerConstants.SERVER_LOGIN_PROGRAM, clean_process_group=False)
-            sys.exit(1)
-
-    def stop_agent(self):
-        if self.run_process_event is not None:
-            self.run_process_event.set()
-
-        if self.mqtt_mgr is not None:
-            try:
-                for topic in self.subscribed_topics:
-                    self.mqtt_mgr.unsubscribe_msg(topic)
-            except Exception as e:
-                pass
-
-            self.mqtt_mgr.loop_stop()
-            self.mqtt_mgr.disconnect()
-        self.release_message_center()
-
-    def get_runner(self):
-        runner = FedMLServerRunner(
-            self.args, run_id=self.run_id, request_json=self.request_json,
-            agent_config=self.agent_config
-        )
-        runner.run_as_edge_server_and_agent = self.run_as_edge_server_and_agent
-        runner.edge_id = self.edge_id
-        runner.server_agent_id = self.server_agent_id
-        runner.start_request_json = self.start_request_json
-        runner.unique_device_id = self.unique_device_id
-        runner.user_name = self.user_name
-        runner.run_as_edge_server_and_agent = self.run_as_edge_server_and_agent
-        runner.run_as_cloud_agent = self.run_as_cloud_agent
-        runner.run_as_cloud_server = self.run_as_cloud_server
-        return runner
diff --git a/python/fedml/computing/scheduler/model_scheduler/device_client_runner_deprecated.py b/python/fedml/computing/scheduler/model_scheduler/device_client_runner_deprecated.py
deleted file mode 100755
index 8bb03eebbd..0000000000
--- a/python/fedml/computing/scheduler/model_scheduler/device_client_runner_deprecated.py
+++ /dev/null
@@ -1,1483 +0,0 @@
-import json
-import logging
-import multiprocessing
-import sys
-
-from multiprocessing import Process
-import os
-import platform
-import shutil
-import subprocess
-import threading
-
-import time
-import traceback
-import urllib
-import uuid
-import zipfile
-from urllib.parse import urlparse, urljoin
-
-import requests
-
-import yaml
-
-import fedml
-from fedml import mlops
-from fedml.computing.scheduler.model_scheduler.device_model_msg_object import FedMLModelMsgObject
-from fedml.computing.scheduler.scheduler_core.compute_cache_manager import ComputeCacheManager
-
-from fedml.computing.scheduler.scheduler_core.compute_utils import ComputeUtils
-from fedml.core.distributed.communication.s3.remote_storage import S3Storage
-from .device_model_cache import FedMLModelCache
-from ..comm_utils import sys_utils, security_utils
-
-from ..comm_utils.container_utils import ContainerUtils
-
-from ....core.mlops.mlops_runtime_log import MLOpsRuntimeLog
-
-from ....core.distributed.communication.mqtt.mqtt_manager import MqttManager
-from ..comm_utils.yaml_utils import load_yaml_config
-from .device_client_constants import ClientConstants
-
-from ....core.mlops.mlops_metrics import MLOpsMetrics
-
-from ....core.mlops.mlops_configs import MLOpsConfigs
-from ....core.mlops.mlops_runtime_log_daemon import MLOpsRuntimeLogDaemon
-from ....core.mlops.mlops_status import MLOpsStatus
-from ..comm_utils.sys_utils import get_sys_runner_info, get_python_program
-from .device_model_deployment import start_deployment, run_http_inference_with_curl_request
-from .device_client_data_interface import FedMLClientDataInterface
-from ....core.mlops.mlops_utils import MLOpsUtils
-from ..comm_utils.job_utils import JobRunnerUtils
-from fedml.computing.scheduler.comm_utils.run_process_utils import RunProcessUtils
-from .device_mqtt_inference_protocol import FedMLMqttInference
-from .device_model_db import FedMLModelDatabase
-from ..comm_utils.constants import SchedulerConstants
-from fedml.computing.scheduler.comm_utils.job_monitor import JobMonitor
-
-from .device_replica_handler import FedMLDeviceReplicaHandler
-
-from fedml.computing.scheduler.scheduler_core.endpoint_sync_protocol import FedMLEndpointSyncProtocol
-import ssl
-
-
-class RunnerError(Exception):
-    """ Runner failed. """
-    pass
-
-
-class RunnerCompletedError(Exception):
-    """ Runner completed. """
-    pass
-
-
-class FedMLClientRunner:
-    FEDML_BOOTSTRAP_RUN_OK = "[FedML]Bootstrap Finished"
-
-    def __init__(self, args, edge_id=0, request_json=None, agent_config=None, run_id=0):
-        self.local_api_process = None
-        self.run_process_event = None
-        self.run_process_event_map = dict()
-        self.run_process_completed_event = None
-        self.run_process_completed_event_map = dict()
-        self.run_inference_event_map = dict()
-        self.run_inference_response_map = dict()
-        self.run_process_map = dict()
-        self.device_status = None
-        self.current_training_status = None
-        self.mqtt_mgr = None
-        self.client_mqtt_mgr = None
-        self.client_mqtt_is_connected = False
-        self.client_mqtt_lock = None
-        self.edge_id = edge_id
-        self.run_id = run_id
-        self.unique_device_id = None
-        self.args = args
-        self.request_json = request_json
-        self.version = args.version
-        self.device_id = args.device_id
-        self.cur_dir = os.path.split(os.path.realpath(__file__))[0]
-        if args.current_running_dir is not None:
-            self.cur_dir = args.current_running_dir
-        self.sudo_cmd = ""
-        self.is_mac = False
-        if platform.system() == "Darwin":
-            self.is_mac = True
-
-        self.agent_config = agent_config
-        self.fedml_data_base_package_dir = os.path.join("/", "fedml", "data")
-        self.fedml_data_local_package_dir = os.path.join("/", "fedml", "fedml-package", "fedml", "data")
-        self.fedml_data_dir = self.fedml_data_base_package_dir
-        self.fedml_config_dir = os.path.join("/", "fedml", "conf")
-
-        self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES = {}
-
-        self.mlops_metrics = None
-        self.client_active_list = dict()
-        self.infer_host = "127.0.0.1"
-        self.redis_addr = "local"
-        self.redis_port = "6379"
-        self.redis_password = "fedml_default"
-
-        self.model_runner_mapping = dict()
-        self.ntp_offset = MLOpsUtils.get_ntp_offset()
-        self.running_request_json = dict()
-        self.endpoint_inference_runners = dict()
-        self.mqtt_inference_obj = None
-
-        self.subscribed_topics = list()
-        self.user_name = None
-
-        self.replica_handler = None
-
-    def unzip_file(self, zip_file, unzip_file_path) -> str:
-        unziped_file_name = ""
-        if zipfile.is_zipfile(zip_file):
-            with zipfile.ZipFile(zip_file, "r") as zipf:
-                zipf.extractall(unzip_file_path)
-                unziped_file_name = zipf.namelist()[0]
-        else:
-            raise Exception("Invalid zip file {}".format(zip_file))
-
-        return unziped_file_name
-
-    def retrieve_and_unzip_package(self, package_name, package_url):
-        """
-        Download the package from the url and unzip it to the local package directory
-        ~/.fedml/fedml-model-client/fedml/model_packages/${end_point_id}_${end_point_name}_${model_name}_${model_version}
-        Under this folder, there should be the zipped file and the unzipped folder.
-        the zipped file starts with fedml_run_${end_point_id}_${end_point_name}_${model_name}_${model_version}
-        """
-        # Models root directory
-        local_package_path = ClientConstants.get_model_package_dir()
-        os.makedirs(local_package_path, exist_ok=True)
-
-        # Specify this model directory using ${end_point_id}_${end_point_name}_${model_name}_${model_version}
-        run_id = self.request_json["end_point_id"]
-        end_point_name = self.request_json["end_point_name"]
-        model_config = self.request_json["model_config"]
-        model_name = model_config["model_name"]
-        model_version = model_config["model_version"]
-
-        model_version = model_version.replace(" ", "-")     # Avoid using space for folder name
-        model_version = model_version.replace(":", "-")     # Since docker mount will conflict with ":"
-
-        this_run_model_dir = f"{run_id}_{end_point_name}_{model_name}_{model_version}"
-        this_run_model_full_path = os.path.join(local_package_path, this_run_model_dir)
-        os.makedirs(this_run_model_full_path, exist_ok=True)
-
-        # Download the zipped package, overwrite it even if it exists
-        filename, filename_without_extension, file_extension = ClientConstants.get_filename_and_extension(package_url)
-        local_package_file = os.path.join(this_run_model_full_path,
-                                          f"fedml_run_{self.run_id}_{self.edge_id}_{filename_without_extension}")
-        if os.path.exists(local_package_file):
-            os.remove(local_package_file)
-        logging.info("Download from package_url {}".format(package_url))
-        ssl._create_default_https_context = ssl._create_unverified_context
-        urllib.request.urlretrieve(package_url, local_package_file,
-                                   reporthook=self.package_download_progress)
-
-        # Unzip the package in the same folder, overwrite the unzipped folder even if it exists
-        unzip_package_path = os.path.join(this_run_model_full_path,
-                                          f"unzip_fedml_run_{self.run_id}_{self.edge_id}_{filename_without_extension}")
-        try:
-            shutil.rmtree(unzip_package_path, ignore_errors=True)
-        except Exception as e:
-            pass
-        package_dir_name = self.unzip_file(local_package_file, unzip_package_path)
-        unzip_package_full_path = os.path.join(unzip_package_path, package_dir_name)
-        model_bin_file = os.path.join(unzip_package_path, "fedml_model.bin")        # Will deprecated
-        logging.info("local_package_file {}, unzip_package_path {}, unzip file full path {}".format(
-            local_package_file, unzip_package_path, unzip_package_full_path))
-
-        return unzip_package_full_path, model_bin_file
-
-    def retrieve_binary_model_file(self, package_name, package_url):
-        local_package_path = ClientConstants.get_model_package_dir()
-        if not os.path.exists(local_package_path):
-            os.makedirs(local_package_path, exist_ok=True)
-        unzip_package_path = ClientConstants.get_model_dir()
-        local_package_file = "{}".format(os.path.join(local_package_path, package_name))
-        if os.path.exists(local_package_file):
-            os.remove(local_package_file)
-        urllib.request.urlretrieve(package_url, local_package_file,
-                                   reporthook=self.package_download_progress)
-
-        unzip_package_path = os.path.join(unzip_package_path, package_name)
-        if not os.path.exists(unzip_package_path):
-            os.makedirs(unzip_package_path, exist_ok=True)
-        dst_model_file = os.path.join(unzip_package_path, package_name)
-        if os.path.exists(local_package_file):
-            shutil.copy(local_package_file, dst_model_file)
-
-        return unzip_package_path, dst_model_file
-
-    def package_download_progress(self, count, blksize, filesize):
-        self.check_runner_stop_event()
-
-        downloaded = count * blksize
-        downloaded = filesize if downloaded > filesize else downloaded
-        progress = (downloaded / filesize * 100) if filesize != 0 else 0
-        progress_int = int(progress)
-        downloaded_kb = format(downloaded / 1024, '.2f')
-
-        # since this hook function is stateless, we need a state to avoid printing progress repeatedly
-        if count == 0:
-            self.prev_download_progress = 0
-        if progress_int != self.prev_download_progress and progress_int % 5 == 0:
-            self.prev_download_progress = progress_int
-            logging.info("package downloaded size {} KB, progress {}%".format(downloaded_kb, progress_int))
-
-    def build_dynamic_constrain_variables(self, run_id, run_config):
-        pass
-
-    def update_local_fedml_config(self, run_id, model_config, model_config_parameters):
-        model_name = model_config["model_name"]
-        model_storage_url = model_config["model_storage_url"]
-
-        # Retrieve model package or model binary file.
-        unzip_package_path, model_bin_file = self.retrieve_and_unzip_package(model_name, model_storage_url)
-
-        # Load the config to memory
-        fedml_local_config_file = os.path.join(unzip_package_path, "fedml_model_config.yaml")
-
-        # Inject the config from UI to pkg yaml
-        package_conf_object = model_config_parameters
-
-        # Save the config to local
-        with open(fedml_local_config_file, "w") as f:
-            yaml.dump(package_conf_object, f)
-
-        logging.info("The package_conf_object is {}".format(package_conf_object))
-
-        return unzip_package_path, model_bin_file, package_conf_object
-
-    def build_dynamic_args(self, run_config, package_conf_object, base_dir):
-        pass
-
-    def download_model_package(self, package_name, package_url):
-        # Copy config file from the client
-        unzip_package_path = self.retrieve_and_unzip_package(
-            package_name, package_url
-        )
-
-        return unzip_package_path
-
-    def run(self, process_event, completed_event):
-        # print(f"Model worker runner process id {os.getpid()}, run id {self.run_id}")
-
-        if platform.system() != "Windows":
-            os.setsid()
-
-        os.environ['PYTHONWARNINGS'] = 'ignore:semaphore_tracker:UserWarning'
-        os.environ.setdefault('PYTHONWARNINGS', 'ignore:semaphore_tracker:UserWarning')
-
-        self.run_process_event = process_event
-        self.run_process_completed_event = completed_event
-        run_id = self.request_json.get("end_point_id")
-
-        try:
-            FedMLModelDatabase.get_instance().set_database_base_dir(ClientConstants.get_database_dir())
-            FedMLModelDatabase.get_instance().create_table()
-
-            MLOpsUtils.set_ntp_offset(self.ntp_offset)
-            self.setup_client_mqtt_mgr()
-
-            if not self.run_impl():
-                logging.info(
-                    f"[endpoint/device][{run_id}/{self.edge_id}] "
-                    f"Failed to run the model deployment. run_impl return False.")
-
-                # This if condition only happens when run_impl return False in a controllable way
-                # Under this condition, the run_impl itself should have handled the cleanup
-                # So no need to self.release_gpu_ids(run_id)
-        except RunnerError:
-            logging.error(
-                f"[endpoint/device][{run_id}/{self.edge_id}] "
-                f"Failed due to RunnerError {traceback.format_exc()}")
-            self.release_gpu_ids(run_id)
-
-            self.reset_devices_status(self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_KILLED)
-            MLOpsRuntimeLogDaemon.get_instance(self.args).stop_log_processor(run_id, self.edge_id)
-        except RunnerCompletedError:
-            logging.error(
-                f"[endpoint/device][{run_id}/{self.edge_id}] "
-                f"Failed due to RunnerCompletedError {traceback.format_exc()}")
-            self.release_gpu_ids(run_id)
-
-            MLOpsRuntimeLogDaemon.get_instance(self.args).stop_log_processor(run_id, self.edge_id)
-        except Exception as e:
-            logging.error(
-                f"[endpoint/device][{run_id}/{self.edge_id}] "
-                f"Failed due to exception {traceback.format_exc()}")
-
-            self.cleanup_run_when_starting_failed()
-            self.mlops_metrics.client_send_exit_train_msg(
-                run_id, self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED)
-
-            self.release_gpu_ids(run_id)
-
-            MLOpsRuntimeLogDaemon.get_instance(self.args).stop_log_processor(run_id, self.edge_id)
-            time.sleep(2)
-            sys.exit(1)
-        finally:
-            logging.info("[Worker] Release resources after deployment.")
-            MLOpsRuntimeLogDaemon.get_instance(self.args).stop_log_processor(run_id, self.edge_id)
-            if self.mlops_metrics is not None:
-                self.mlops_metrics.stop_sys_perf()
-            time.sleep(3)
-            self.release_client_mqtt_mgr()
-
-    def release_gpu_ids(self, run_id):
-        JobRunnerUtils.get_instance().release_gpu_ids(run_id, self.edge_id)
-
-    def check_runner_stop_event(self):
-        if self.run_process_event.is_set():
-            logging.info("Received stopping event.")
-            raise RunnerError("Runner stopped")
-
-        if self.run_process_completed_event is not None and self.run_process_completed_event.is_set():
-            logging.info("Received completed event.")
-            raise RunnerCompletedError("Runner completed")
-
-    def run_impl(self):
-        # Get deployment params
-        run_id = self.request_json["end_point_id"]
-        end_point_name = self.request_json["end_point_name"]
-        device_ids = self.request_json["device_ids"]
-        master_ip = self.request_json["master_node_ip"]
-        model_config = self.request_json["model_config"]
-        model_name = model_config["model_name"]
-        model_id = model_config["model_id"]
-        model_version = model_config["model_version"]
-        model_config_parameters = self.request_json["parameters"]
-        inference_port = model_config_parameters.get("worker_internal_port",
-                                                     ClientConstants.MODEL_INFERENCE_DEFAULT_PORT)
-        inference_port_external = model_config_parameters.get("worker_external_port", inference_port)
-        inference_engine = model_config_parameters.get("inference_engine",
-                                                       ClientConstants.INFERENCE_ENGINE_TYPE_INT_DEFAULT)
-        inference_end_point_id = run_id
-
-        MLOpsRuntimeLog.get_instance(self.args).init_logs(log_level=logging.INFO)
-
-        logging.info(f"[Worker] Received model deployment request from master for endpoint {run_id}.")
-        if self.replica_handler is not None:
-            logging.info(f"=================Worker replica Handler ======================"
-                         f"Reconcile with num diff {self.replica_handler.replica_num_diff} "
-                         f"and version diff {self.replica_handler.replica_version_diff}."
-                         f"=============================================================")
-        else:
-            logging.error(f"[Worker] Replica handler is None.")
-            return False
-
-        self.check_runner_stop_event()
-
-        # Report the deployment status to mlops
-        self.mlops_metrics.report_client_training_status(
-            self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_INITIALIZING,
-            is_from_model=True, running_json=json.dumps(self.request_json), run_id=run_id)
-        self.mlops_metrics.report_client_training_status(
-            self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_RUNNING,
-            is_from_model=True, run_id=run_id)
-
-        self.check_runner_stop_event()
-
-        # Reconcile the replica number (op: add, remove)
-        prev_rank, op, op_num = self.replica_handler.reconcile_num_replica()
-
-        # Reconcile the replica version (op: update)
-        replica_rank_to_update = []
-        if not op:
-            replica_rank_to_update, op = self.replica_handler.reconcile_replica_version()
-
-        if not op:
-            logging.info("[Worker] No need to reconcile.")
-            return True
-
-        logging.info(
-            f"================Worker Reconcile Operations ======================\n"
-            f" op: {op}; op num: {op_num}.\n"
-            f"==================================================================\n")
-
-        # If not rollback, download package from MLOps; otherwise, use the backup package
-        if op != "rollback":
-            logging.info("Download and unzip model to local...")
-            unzip_package_path, _, _ = \
-                self.update_local_fedml_config(run_id, model_config, model_config_parameters)
-            if unzip_package_path is None:
-                logging.info("Failed to update local fedml config.")
-                self.check_runner_stop_event()
-                self.cleanup_run_when_starting_failed()
-                self.mlops_metrics.client_send_exit_train_msg(run_id, self.edge_id,
-                                                              ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED)
-                return False
-
-            if not os.path.exists(unzip_package_path):
-                logging.info("Failed to unzip file.")
-                self.check_runner_stop_event()
-                self.cleanup_run_when_starting_failed()
-                self.mlops_metrics.client_send_exit_train_msg(run_id, self.edge_id,
-                                                              ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED)
-                return False
-        else:
-            logging.info("Try to use backup package to rollback...")
-            # Find folder under "~/.fedml/fedml-model-client/fedml/model_packages \
-            # /${end_point_id}_${end_point_name}_${model_name}_${model_version}"
-            backup_folder_full_path = None
-            models_root_dir = ClientConstants.get_model_package_dir()
-
-            # Find the version (notified by master) to rollback
-            version_diff_dict = self.request_json["replica_version_diff"][str(self.edge_id)]
-            version_rollback_to = None
-            for replica_no, rollback_ops in version_diff_dict.items():
-                version_rollback_to = rollback_ops["new_version"]     # Note that new_version is the version to rollback
-                break
-            if version_rollback_to is None:
-                logging.error(f"No old version found for run_id: {self.run_id} "
-                              f"edge_id: {self.edge_id}, rollback failed. No old version found in request_json.")
-                return False
-            model_version = version_rollback_to
-
-            # Format the version to match the folder name
-            model_version_formatted = version_rollback_to.replace(" ", "-")
-            model_version_formatted = model_version_formatted.replace(":", "-")
-
-            last_run_folder_sub_fd = f"{run_id}_{end_point_name}_{model_name}_{model_version_formatted}"
-            for folder in os.listdir(models_root_dir):
-                if last_run_folder_sub_fd in folder:
-                    backup_folder_full_path = os.path.join(models_root_dir, folder)
-                    break
-            if backup_folder_full_path is None:
-                logging.error(f"No backup folder found for run_id: {self.run_id} edge_id: {self.edge_id} "
-                              f"under {models_root_dir} with sub folder {last_run_folder_sub_fd}, rollback failed.")
-                return False
-
-            # Inside backup folder, find unzipped package with prefix unzip_fedml_run
-            unzip_package_path_parent = None
-            for folder in os.listdir(backup_folder_full_path):
-                if folder.startswith("unzip_fedml_run"):
-                    unzip_package_path_parent = os.path.join(backup_folder_full_path, folder)
-                    break
-
-            # Inside unzip folder, find the unzipped package, should be the only one
-            unzip_package_path = None
-            for folder in os.listdir(unzip_package_path_parent):
-                if os.path.isdir(os.path.join(unzip_package_path_parent, folder)):
-                    unzip_package_path = os.path.join(unzip_package_path_parent, folder)
-                    break
-
-            if unzip_package_path is None:
-                logging.error(f"No unzipped package found for run_id: {self.run_id} edge_id: {self.edge_id} "
-                              f"under {backup_folder_full_path}, rollback failed.")
-                return False
-
-        self.check_runner_stop_event()
-
-        running_model_name, inference_output_url, inference_model_version, model_metadata, model_config = \
-            "", "", model_version, {}, {}
-
-        if op == "add":
-            worker_ip = self.get_ip_address(self.request_json)
-            for rank in range(prev_rank + 1, prev_rank + 1 + op_num):
-                try:
-                    running_model_name, inference_output_url, inference_model_version, model_metadata, model_config = \
-                        start_deployment(
-                            end_point_id=inference_end_point_id, end_point_name=end_point_name, model_id=model_id,
-                            model_version=model_version, model_storage_local_path=unzip_package_path,
-                            inference_model_name=model_name, inference_engine=inference_engine,
-                            infer_host=worker_ip, master_ip=master_ip, edge_id=self.edge_id,
-                            master_device_id=device_ids[0], replica_rank=rank,
-                            gpu_per_replica=int(self.replica_handler.gpu_per_replica)
-                        )
-                except Exception as e:
-                    inference_output_url = ""
-                    logging.error(f"[Worker] Exception at deployment: {traceback.format_exc()}")
-
-                if inference_output_url == "":
-                    logging.error("[Worker] Failed to deploy the model.")
-
-                    # Release the gpu occupancy
-                    FedMLModelCache.get_instance().set_redis_params()
-                    replica_occupied_gpu_ids_str = FedMLModelCache.get_instance().get_replica_gpu_ids(
-                        run_id, end_point_name, model_name, self.edge_id, rank + 1)
-                    logging.info(f"Release gpu ids {replica_occupied_gpu_ids_str} for "
-                                 f"failed deployment of replica no {rank + 1}.")
-
-                    if replica_occupied_gpu_ids_str is not None:
-                        replica_occupied_gpu_ids = json.loads(replica_occupied_gpu_ids_str)
-                        JobRunnerUtils.get_instance().release_partial_job_gpu(run_id,
-                                                                              self.edge_id, replica_occupied_gpu_ids)
-
-                    # Send failed result back to master
-                    result_payload = self.send_deployment_results(
-                        end_point_name, self.edge_id, ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED,
-                        model_id, model_name, inference_output_url, inference_model_version, inference_port,
-                        inference_engine, model_metadata, model_config)
-
-                    self.mlops_metrics.run_id = self.run_id
-                    self.mlops_metrics.broadcast_client_training_status(
-                        self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED,
-                        is_from_model=True, run_id=self.run_id)
-
-                    self.mlops_metrics.client_send_exit_train_msg(
-                        run_id, self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED)
-
-                    return False
-                else:
-                    # Send failed successful result back to master
-                    logging.info("Finished deployment, continue to send results to master...")
-                    result_payload = self.send_deployment_results(
-                        end_point_name, self.edge_id, ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED,
-                        model_id, model_name, inference_output_url, model_version, inference_port_external,
-                        inference_engine, model_metadata, model_config, replica_no=rank + 1)
-
-                    if inference_port_external != inference_port:
-                        # Save internal port to local db
-                        logging.info("inference_port_external {} != inference_port {}".format(
-                            inference_port_external, inference_port))
-                        result_payload = self.construct_deployment_results(
-                            end_point_name, self.edge_id, ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED,
-                            model_id, model_name, inference_output_url, model_version, inference_port,
-                            inference_engine, model_metadata, model_config, replica_no=rank + 1)
-
-                    FedMLModelDatabase.get_instance().set_deployment_result(
-                        run_id, end_point_name, model_name, model_version, self.edge_id,
-                        json.dumps(result_payload), replica_no=rank + 1)
-
-                    logging.info(f"Deploy replica {rank + 1} / {prev_rank + 1 + op_num} successfully.")
-                    time.sleep(5)
-
-            time.sleep(1)
-            self.mlops_metrics.run_id = self.run_id
-            self.mlops_metrics.broadcast_client_training_status(
-                self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FINISHED,
-                is_from_model=True, run_id=self.run_id)
-            return True
-        elif op == "remove":
-            for rank_to_delete in range(prev_rank, prev_rank - op_num, -1):
-                self.replica_handler.remove_replica(rank_to_delete)
-
-                FedMLModelCache.get_instance().set_redis_params()
-                replica_occupied_gpu_ids_str = FedMLModelCache.get_instance().get_replica_gpu_ids(
-                    run_id, end_point_name, model_name, self.edge_id, rank_to_delete + 1)
-
-                replica_occupied_gpu_ids = json.loads(replica_occupied_gpu_ids_str)
-
-                JobRunnerUtils.get_instance().release_partial_job_gpu(run_id, self.edge_id, replica_occupied_gpu_ids)
-
-                FedMLModelDatabase.get_instance().delete_deployment_result_with_device_id_and_rank(
-                    run_id, end_point_name, model_name, self.edge_id, rank_to_delete)
-
-                # Report the deletion msg to master
-                result_payload = self.send_deployment_results(
-                    end_point_name, self.edge_id, ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DELETED,
-                    model_id, model_name, inference_output_url, model_version, inference_port_external,
-                    inference_engine, model_metadata, model_config, replica_no=rank_to_delete + 1)
-
-                time.sleep(1)
-                self.mlops_metrics.run_id = self.run_id
-                self.mlops_metrics.broadcast_client_training_status(
-                    self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FINISHED,
-                    is_from_model=True, run_id=self.run_id)
-
-                # TODO: If delete all replica, then delete the job and related resources
-                if rank_to_delete == 0:
-                    pass
-            return True
-        elif op == "update" or op == "rollback":
-            # Update is combine of delete and add
-            worker_ip = self.get_ip_address(self.request_json)
-            for rank in replica_rank_to_update:
-                # Delete a replica (container) if exists
-                self.replica_handler.remove_replica(rank)
-
-                FedMLModelCache.get_instance().set_redis_params()
-                replica_occupied_gpu_ids_str = FedMLModelCache.get_instance().get_replica_gpu_ids(
-                    run_id, end_point_name, model_name, self.edge_id, rank + 1)
-
-                replica_occupied_gpu_ids = json.loads(replica_occupied_gpu_ids_str)
-                logging.info(f"Release gpu ids {replica_occupied_gpu_ids} for update / rollback.")
-
-                # TODO (Raphael) check if this will allow another job to seize the gpu during high concurrency:
-                try:
-                    JobRunnerUtils.get_instance().release_partial_job_gpu(
-                        run_id, self.edge_id, replica_occupied_gpu_ids)
-                except Exception as e:
-                    if op == "rollback":
-                        pass
-                    else:
-                        logging.error(f"Failed to release gpu ids {replica_occupied_gpu_ids} for update.")
-                        return False
-
-                # Delete the deployment result from local db
-                FedMLModelDatabase.get_instance().delete_deployment_result_with_device_id_and_rank(
-                    run_id, end_point_name, model_name, self.edge_id, rank)
-
-                logging.info(f"Delete replica with no {rank + 1} successfully.")
-                time.sleep(1)
-
-                # Add a replica (container)
-                # TODO: Reduce the duplicated code
-                logging.info(f"Start to deploy the model with replica no {rank + 1} ...")
-                try:
-                    running_model_name, inference_output_url, inference_model_version, model_metadata, model_config = \
-                        start_deployment(
-                            end_point_id=inference_end_point_id, end_point_name=end_point_name, model_id=model_id,
-                            model_version=model_version, model_storage_local_path=unzip_package_path,
-                            inference_model_name=model_name, inference_engine=inference_engine,
-                            infer_host=worker_ip, master_ip=master_ip, edge_id=self.edge_id,
-                            master_device_id=device_ids[0], replica_rank=rank,
-                            gpu_per_replica=int(self.replica_handler.gpu_per_replica)
-                        )
-                except Exception as e:
-                    inference_output_url = ""
-                    logging.error(f"Exception at deployment: {traceback.format_exc()}")
-
-                if inference_output_url == "":
-                    logging.error("Failed to deploy the model...")
-
-                    # If update failed, should release this replica's gpu
-                    FedMLModelCache.get_instance().set_redis_params()
-                    replica_occupied_gpu_ids_str = FedMLModelCache.get_instance().get_replica_gpu_ids(
-                        run_id, end_point_name, model_name, self.edge_id, rank + 1)
-
-                    replica_occupied_gpu_ids = json.loads(replica_occupied_gpu_ids_str)
-
-                    JobRunnerUtils.get_instance().release_partial_job_gpu(
-                        run_id, self.edge_id, replica_occupied_gpu_ids)
-
-                    result_payload = self.send_deployment_results(
-                        end_point_name, self.edge_id, ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED,
-                        model_id, model_name, inference_output_url, inference_model_version, inference_port,
-                        inference_engine, model_metadata, model_config)
-
-                    self.mlops_metrics.run_id = self.run_id
-                    self.mlops_metrics.broadcast_client_training_status(
-                        self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED,
-                        is_from_model=True, run_id=self.run_id)
-
-                    self.mlops_metrics.client_send_exit_train_msg(
-                        run_id, self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED)
-
-                    return False
-                else:
-                    logging.info("Finished deployment, continue to send results to master...")
-                    result_payload = self.send_deployment_results(
-                        end_point_name, self.edge_id, ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED,
-                        model_id, model_name, inference_output_url, model_version, inference_port_external,
-                        inference_engine, model_metadata, model_config, replica_no=rank + 1)
-
-                    if inference_port_external != inference_port:  # Save internal port to local db
-                        logging.info("inference_port_external {} != inference_port {}".format(
-                            inference_port_external, inference_port))
-                        result_payload = self.construct_deployment_results(
-                            end_point_name, self.edge_id, ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED,
-                            model_id, model_name, inference_output_url, model_version, inference_port,
-                            inference_engine, model_metadata, model_config, replica_no=rank + 1)
-
-                    FedMLModelDatabase.get_instance().set_deployment_result(
-                        run_id, end_point_name, model_name, model_version, self.edge_id,
-                        json.dumps(result_payload), replica_no=rank + 1)
-
-                    logging.info(f"Update replica with no {rank + 1}  successfully. Op num {op_num}")
-                    time.sleep(5)
-            time.sleep(1)
-            self.mlops_metrics.run_id = self.run_id
-            self.mlops_metrics.broadcast_client_training_status(
-                self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FINISHED,
-                is_from_model=True, run_id=self.run_id)
-            return True
-
-        else:
-            # The delete op will be handled by callback_delete_deployment
-            logging.error(f"Unsupported op {op} with op num {op_num}")
-            return False
-
-    def construct_deployment_results(self, end_point_name, device_id, model_status,
-                                     model_id, model_name, model_inference_url,
-                                     model_version, inference_port, inference_engine,
-                                     model_metadata, model_config, replica_no=1):
-        deployment_results_payload = {"end_point_id": self.run_id, "end_point_name": end_point_name,
-                                      "model_id": model_id, "model_name": model_name,
-                                      "model_url": model_inference_url, "model_version": model_version,
-                                      "port": inference_port,
-                                      "inference_engine": inference_engine,
-                                      "model_metadata": model_metadata,
-                                      "model_config": model_config,
-                                      "model_status": model_status,
-                                      "inference_port": inference_port,
-                                      "replica_no": replica_no,
-                                      }
-        return deployment_results_payload
-
-    def construct_deployment_status(self, end_point_name, device_id,
-                                    model_id, model_name, model_version,
-                                    model_inference_url, model_status,
-                                    inference_port=ClientConstants.MODEL_INFERENCE_DEFAULT_PORT,
-                                    replica_no=1,     # start from 1
-                                    ):
-        deployment_status_payload = {"end_point_id": self.run_id, "end_point_name": end_point_name,
-                                     "device_id": device_id,
-                                     "model_id": model_id, "model_name": model_name,
-                                     "model_version": model_version,
-                                     "model_url": model_inference_url, "model_status": model_status,
-                                     "inference_port": inference_port,
-                                     "replica_no": replica_no,
-                                     }
-        return deployment_status_payload
-
-    def send_deployment_results(self, end_point_name, device_id, model_status,
-                                model_id, model_name, model_inference_url,
-                                model_version, inference_port, inference_engine,
-                                model_metadata, model_config, replica_no=1):
-        deployment_results_topic = "model_device/model_device/return_deployment_result/{}/{}".format(
-            self.run_id, device_id)
-
-        deployment_results_payload = self.construct_deployment_results(
-            end_point_name, device_id, model_status,
-            model_id, model_name, model_inference_url,
-            model_version, inference_port, inference_engine,
-            model_metadata, model_config, replica_no=replica_no)
-
-        logging.info("[client] send_deployment_results: topic {}, payload {}.".format(deployment_results_topic,
-                                                                                      deployment_results_payload))
-        self.client_mqtt_mgr.send_message_json(deployment_results_topic, json.dumps(deployment_results_payload))
-        return deployment_results_payload
-
-    def send_deployment_status(self, end_point_name, device_id,
-                               model_id, model_name, model_version,
-                               model_inference_url, model_status,
-                               inference_port=ClientConstants.MODEL_INFERENCE_DEFAULT_PORT,
-                               replica_no=1,     # start from 1
-                               ):
-        # Deprecated
-        pass
-
-    def reset_devices_status(self, edge_id, status):
-        self.mlops_metrics.run_id = self.run_id
-        self.mlops_metrics.edge_id = edge_id
-        self.mlops_metrics.broadcast_client_training_status(
-            edge_id, status, is_from_model=True, run_id=self.run_id)
-
-    def cleanup_run_when_starting_failed(self):
-        logging.info("Cleanup run successfully when starting failed.")
-
-        self.reset_devices_status(self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED)
-
-        time.sleep(2)
-
-        try:
-            self.mlops_metrics.stop_sys_perf()
-        except Exception as ex:
-            pass
-
-        time.sleep(1)
-
-    def cleanup_run_when_finished(self):
-        logging.info("Cleanup run successfully when finished.")
-
-        self.reset_devices_status(self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FINISHED)
-
-        time.sleep(2)
-
-        try:
-            self.mlops_metrics.stop_sys_perf()
-        except Exception as ex:
-            pass
-
-        time.sleep(1)
-
-    def on_client_mqtt_disconnected(self, mqtt_client_object):
-        if self.client_mqtt_lock is None:
-            self.client_mqtt_lock = threading.Lock()
-
-        self.client_mqtt_lock.acquire()
-        self.client_mqtt_is_connected = False
-        self.client_mqtt_lock.release()
-
-    def on_client_mqtt_connected(self, mqtt_client_object):
-        if self.mlops_metrics is None:
-            self.mlops_metrics = MLOpsMetrics()
-
-        self.mlops_metrics.set_messenger(self.client_mqtt_mgr)
-        self.mlops_metrics.run_id = self.run_id
-
-        if self.client_mqtt_lock is None:
-            self.client_mqtt_lock = threading.Lock()
-
-        self.client_mqtt_lock.acquire()
-        self.client_mqtt_is_connected = True
-        self.client_mqtt_lock.release()
-
-    def setup_client_mqtt_mgr(self):
-        if self.client_mqtt_mgr is not None:
-            return
-
-        if self.client_mqtt_lock is None:
-            self.client_mqtt_lock = threading.Lock()
-
-        self.client_mqtt_mgr = MqttManager(
-            self.agent_config["mqtt_config"]["BROKER_HOST"],
-            self.agent_config["mqtt_config"]["BROKER_PORT"],
-            self.agent_config["mqtt_config"]["MQTT_USER"],
-            self.agent_config["mqtt_config"]["MQTT_PWD"],
-            self.agent_config["mqtt_config"]["MQTT_KEEPALIVE"],
-            "FedML_ModelClientAgent_Metrics_@{}@_{}_{}_{}".format(self.user_name, self.args.current_device_id,
-                                                                  str(os.getpid()),
-                                                                  str(uuid.uuid4()))
-        )
-
-        self.client_mqtt_mgr.add_connected_listener(self.on_client_mqtt_connected)
-        self.client_mqtt_mgr.add_disconnected_listener(self.on_client_mqtt_disconnected)
-        self.client_mqtt_mgr.connect()
-        self.client_mqtt_mgr.loop_start()
-
-        if self.mlops_metrics is None:
-            self.mlops_metrics = MLOpsMetrics()
-        self.mlops_metrics.set_messenger(self.client_mqtt_mgr)
-        self.mlops_metrics.run_id = self.run_id
-
-    def release_client_mqtt_mgr(self):
-        try:
-            if self.client_mqtt_mgr is not None:
-                self.client_mqtt_mgr.loop_stop()
-                self.client_mqtt_mgr.disconnect()
-
-            self.client_mqtt_lock.acquire()
-            if self.client_mqtt_mgr is not None:
-                self.client_mqtt_is_connected = False
-                self.client_mqtt_mgr = None
-            self.client_mqtt_lock.release()
-        except Exception:
-            pass
-
-    def ota_upgrade(self, payload, request_json):
-        run_id = request_json["end_point_id"]
-        force_ota = False
-        ota_version = None
-
-        try:
-            parameters = request_json.get("parameters", None)
-            common_args = parameters.get("common_args", None)
-            force_ota = common_args.get("force_ota", False)
-            ota_version = common_args.get("ota_version", None)
-        except Exception as e:
-            pass
-
-        if force_ota and ota_version is not None:
-            should_upgrade = True if ota_version != fedml.__version__ else False
-            upgrade_version = ota_version
-        else:
-            try:
-                fedml_is_latest_version, local_ver, remote_ver = sys_utils.check_fedml_is_latest_version(self.version)
-            except Exception as e:
-                return
-
-            should_upgrade = False if fedml_is_latest_version else True
-            upgrade_version = remote_ver
-
-        if should_upgrade:
-            FedMLClientDataInterface.get_instance(). \
-                save_started_job(run_id, self.edge_id, time.time(),
-                                 ClientConstants.MSG_MLOPS_CLIENT_STATUS_UPGRADING,
-                                 ClientConstants.MSG_MLOPS_CLIENT_STATUS_UPGRADING,
-                                 payload)
-
-            logging.info(f"Upgrade to version {upgrade_version} ...")
-
-            sys_utils.do_upgrade(self.version, upgrade_version)
-
-            raise Exception("Restarting after upgraded...")
-
-    def callback_start_deployment(self, topic, payload):
-        # Get deployment params
-        request_json = json.loads(payload)
-        run_id = request_json["end_point_id"]
-        inference_end_point_id = run_id
-
-        try:
-            MLOpsConfigs.fetch_all_configs()
-        except Exception as e:
-            pass
-
-        # Start log processor for current run
-        run_id = inference_end_point_id
-        self.args.run_id = run_id
-        self.args.edge_id = self.edge_id
-        MLOpsRuntimeLog(args=self.args).init_logs()
-        MLOpsRuntimeLogDaemon.get_instance(self.args).set_log_source(
-            ClientConstants.FEDML_LOG_SOURCE_TYPE_MODEL_END_POINT)
-        MLOpsRuntimeLogDaemon.get_instance(self.args).start_log_processor(run_id, self.edge_id)
-
-        # self.ota_upgrade(payload, request_json)
-
-        # Start client with multiprocessing mode
-        request_json["run_id"] = run_id
-        run_id_str = str(run_id)
-        self.request_json = request_json
-        self.running_request_json[run_id_str] = request_json
-        client_runner = FedMLClientRunner(
-            self.args, edge_id=self.edge_id, request_json=request_json, agent_config=self.agent_config, run_id=run_id
-        )
-        client_runner.infer_host = self.get_ip_address(request_json)
-        self.run_process_event_map[run_id_str] = multiprocessing.Event()
-        self.run_process_event_map[run_id_str].clear()
-        client_runner.run_process_event = self.run_process_event_map[run_id_str]
-        self.run_process_completed_event_map[run_id_str] = multiprocessing.Event()
-        self.run_process_completed_event_map[run_id_str].clear()
-        client_runner.run_process_completed_event = self.run_process_completed_event_map[run_id_str]
-        self.model_runner_mapping[run_id_str] = client_runner
-
-        # Replica Handler will be init for every deployment
-        replica_handler = FedMLDeviceReplicaHandler(self.edge_id, self.request_json)
-        client_runner.replica_handler = replica_handler
-
-        self.run_id = run_id
-        self.run_process_map[run_id_str] = Process(target=client_runner.run, args=(
-            self.run_process_event_map[run_id_str], self.run_process_completed_event_map[run_id_str]
-        ))
-
-        self.run_process_map[run_id_str].start()
-        ClientConstants.save_run_process(run_id, self.run_process_map[run_id_str].pid)
-        ClientConstants.save_runner_infos(self.args.device_id + "." + self.args.os_name, self.edge_id, run_id=run_id)
-
-    def set_runner_stopped_event(self, run_id):
-        run_id_str = str(run_id)
-        client_runner = self.model_runner_mapping.get(run_id_str, None)
-        if client_runner is not None:
-            if client_runner.run_process_event is not None:
-                client_runner.run_process_event.set()
-            self.model_runner_mapping.pop(run_id_str)
-
-    def set_runner_completed_event(self, run_id):
-        run_id_str = str(run_id)
-        client_runner = self.model_runner_mapping.get(run_id_str, None)
-        if client_runner is not None:
-            if client_runner.run_process_completed_event is not None:
-                client_runner.run_process_completed_event.set()
-            self.model_runner_mapping.pop(run_id_str)
-
-    def callback_delete_deployment(self, topic, payload):
-        logging.info("[Worker] callback_delete_deployment")
-
-        # Parse payload as the model message object.
-        model_msg_object = FedMLModelMsgObject(topic, payload)
-
-        # Delete all replicas on this device
-        try:
-            ClientConstants.remove_deployment(
-                model_msg_object.end_point_name, model_msg_object.model_name, model_msg_object.model_version,
-                model_msg_object.run_id, model_msg_object.model_id, edge_id=self.edge_id)
-        except Exception as e:
-            logging.info(f"Exception when removing deployment {traceback.format_exc()}")
-            pass
-
-        self.set_runner_stopped_event(model_msg_object.run_id)
-
-        logging.info(f"[endpoint/device][{model_msg_object.run_id}/{self.edge_id}] "
-                     f"Release gpu resource when the worker deployment deleted.")
-        JobRunnerUtils.get_instance().release_gpu_ids(model_msg_object.run_id, self.edge_id)
-
-        if self.running_request_json.get(str(model_msg_object.run_id)) is not None:
-            try:
-                self.running_request_json.pop(str(model_msg_object.run_id))
-            except Exception as e:
-                logging.error(f"Error when removing running_request_json: {traceback.format_exc()}")
-                pass
-
-        FedMLClientDataInterface.get_instance().delete_job_from_db(model_msg_object.run_id)
-        FedMLModelDatabase.get_instance().delete_deployment_result_with_device_id(
-            model_msg_object.run_id, model_msg_object.end_point_name, model_msg_object.model_name,
-            self.edge_id)
-
-        # Delete FEDML_GLOBAL_ENDPOINT_RUN_ID_MAP_TAG-${run_id} both in redis and local db
-        ComputeCacheManager.get_instance().gpu_cache.delete_endpoint_run_id_map(str(model_msg_object.run_id))
-
-        # Delete FEDML_EDGE_ID_MODEL_DEVICE_ID_MAP_TAG-${run_id} both in redis and local db
-        ComputeCacheManager.get_instance().gpu_cache.delete_edge_model_id_map(str(model_msg_object.run_id))
-
-        # Delete FEDML_GLOBAL_DEVICE_RUN_GPU_IDS_TAG-${run_id}-${device_id} both in redis and local db
-        ComputeCacheManager.get_instance().gpu_cache.delete_device_run_gpu_ids(str(self.edge_id),
-                                                                               str(model_msg_object.run_id))
-
-        # Delete FEDML_GLOBAL_DEVICE_RUN_NUM_GPUS_TAG-${run_id}-${device_id} both in redis and local db
-        ComputeCacheManager.get_instance().gpu_cache.delete_device_run_num_gpus(str(self.edge_id),
-                                                                                str(model_msg_object.run_id))
-
-        # Delete FEDML_MODEL_REPLICA_GPU_IDS_TAG-${run_id}-${end_point_name}-${model_name}-${device_id}-*
-        FedMLModelCache.get_instance().set_redis_params()
-        FedMLModelCache.get_instance().delete_all_replica_gpu_ids(model_msg_object.run_id,
-                                                                  model_msg_object.end_point_name,
-                                                                  model_msg_object.model_name, self.edge_id)
-
-    def exit_run_with_exception_entry(self):
-        try:
-            self.setup_client_mqtt_mgr()
-            self.exit_run_with_exception()
-        except Exception as e:
-            self.release_client_mqtt_mgr()
-            sys.exit(1)
-        finally:
-            self.release_client_mqtt_mgr()
-
-    def exit_run_with_exception(self):
-        logging.info("Exit run successfully.")
-
-        ClientConstants.cleanup_learning_process(self.run_id)
-        ClientConstants.cleanup_run_process(self.run_id)
-
-        self.mlops_metrics.report_client_id_status(
-            self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED,
-            is_from_model=True, run_id=self.run_id)
-
-        time.sleep(1)
-
-    def callback_exit_train_with_exception(self, topic, payload):
-        request_json = json.loads(payload)
-        is_retain = request_json.get("is_retain", False)
-        if is_retain:
-            return
-        run_id = request_json.get("runId", None)
-        if run_id is None:
-            run_id = request_json.get("run_id", None)
-            if run_id is None:
-                run_id = request_json.get("id", None)
-
-        if run_id is None:
-            return
-
-        # Stop client with multiprocessing mode
-        self.request_json = request_json
-        client_runner = FedMLClientRunner(
-            self.args, edge_id=self.edge_id, request_json=request_json, agent_config=self.agent_config, run_id=run_id
-        )
-        try:
-            Process(target=client_runner.exit_run_with_exception_entry).start()
-        except Exception as e:
-            pass
-
-    def cleanup_client_with_status(self):
-        self.setup_client_mqtt_mgr()
-
-        if self.device_status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_FINISHED:
-            self.cleanup_run_when_finished()
-        elif self.device_status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED:
-            self.cleanup_run_when_starting_failed()
-
-        self.release_client_mqtt_mgr()
-
-    def callback_runner_id_status(self, topic, payload):
-        # logging.info("callback_runner_id_status: topic = %s, payload = %s" % (topic, payload))
-
-        request_json = json.loads(payload)
-        run_id = request_json["run_id"]
-        edge_id = request_json["edge_id"]
-        status = request_json["status"]
-
-        self.save_training_status(edge_id, status)
-
-        if status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_FINISHED or \
-                status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED:
-            # Stop client with multiprocessing mode
-            self.request_json = request_json
-            client_runner = FedMLClientRunner(
-                self.args,
-                edge_id=self.edge_id,
-                request_json=request_json,
-                agent_config=self.agent_config,
-                run_id=run_id,
-            )
-            client_runner.device_status = status
-            status_process = Process(target=client_runner.cleanup_client_with_status)
-            status_process.start()
-            status_process.join(15)
-
-            # Stop log processor for current run
-            MLOpsRuntimeLogDaemon.get_instance(self.args).stop_log_processor(run_id, edge_id)
-
-    def callback_report_current_status(self, topic, payload):
-        self.send_agent_active_msg()
-
-    @staticmethod
-    def process_ota_upgrade_msg():
-        os.system("pip install -U fedml")
-
-    def callback_client_ota_msg(self, topic, payload):
-        request_json = json.loads(payload)
-        cmd = request_json["cmd"]
-
-        if cmd == ClientConstants.FEDML_OTA_CMD_UPGRADE:
-            FedMLClientRunner.process_ota_upgrade_msg()
-            # Process(target=FedMLClientRunner.process_ota_upgrade_msg).start()
-            raise Exception("After upgraded, restart runner...")
-        elif cmd == ClientConstants.FEDML_OTA_CMD_RESTART:
-            raise Exception("Restart runner...")
-
-    def save_training_status(self, edge_id, training_status):
-        self.current_training_status = training_status
-        ClientConstants.save_training_infos(edge_id, training_status)
-
-    @staticmethod
-    def get_device_id():
-        device_file_path = os.path.join(ClientConstants.get_data_dir(),
-                                        ClientConstants.LOCAL_RUNNER_INFO_DIR_NAME)
-        file_for_device_id = os.path.join(device_file_path, "devices.id")
-        if not os.path.exists(device_file_path):
-            os.makedirs(device_file_path)
-        elif os.path.exists(file_for_device_id):
-            with open(file_for_device_id, 'r', encoding='utf-8') as f:
-                device_id_from_file = f.readline()
-                if device_id_from_file is not None and device_id_from_file != "":
-                    return device_id_from_file
-
-        if platform.system() == "Darwin":
-            cmd_get_serial_num = "system_profiler SPHardwareDataType | grep Serial | awk '{gsub(/ /,\"\")}{print}' " \
-                                 "|awk -F':' '{print $2}' "
-            device_id = os.popen(cmd_get_serial_num).read()
-            device_id = device_id.replace('\n', '').replace(' ', '')
-            if device_id is None or device_id == "":
-                device_id = hex(uuid.getnode())
-            else:
-                device_id = "0x" + device_id
-        else:
-            if "nt" in os.name:
-
-                def get_uuid():
-                    guid = ""
-                    try:
-                        cmd = "wmic csproduct get uuid"
-                        guid = str(subprocess.check_output(cmd))
-                        pos1 = guid.find("\\n") + 2
-                        guid = guid[pos1:-15]
-                    except Exception as ex:
-                        pass
-                    return str(guid)
-
-                device_id = str(get_uuid())
-                logging.info(device_id)
-            elif "posix" in os.name:
-                device_id = sys_utils.get_device_id_in_docker()
-                if device_id is None:
-                    device_id = hex(uuid.getnode())
-            else:
-                device_id = sys_utils.run_subprocess_open(
-                    "hal-get-property --udi /org/freedesktop/Hal/devices/computer --key system.hardware.uuid".split()
-                )
-                device_id = hex(device_id)
-
-        if device_id is not None and device_id != "":
-            with open(file_for_device_id, 'w', encoding='utf-8') as f:
-                f.write(device_id)
-        else:
-            device_id = hex(uuid.uuid4())
-            with open(file_for_device_id, 'w', encoding='utf-8') as f:
-                f.write(device_id)
-
-        return device_id
-
-    def get_ip_address(self, request_json):
-        # OPTION 1: Use local ip
-        ip = ClientConstants.get_local_ip()
-
-        # OPTION 2: Auto detect public ip
-        if "parameters" in request_json and \
-                ClientConstants.AUTO_DETECT_PUBLIC_IP in request_json["parameters"] and \
-                request_json["parameters"][ClientConstants.AUTO_DETECT_PUBLIC_IP]:
-            ip = ClientConstants.get_public_ip()
-            logging.info("Auto detect public ip for worker: " + ip)
-
-        # OPTION 3: Use user indicated ip
-        if self.infer_host is not None and self.infer_host != "127.0.0.1" and self.infer_host != "localhost":
-            ip = self.infer_host
-
-        return ip
-
-    def bind_account_and_device_id(self, url, account_id, device_id, os_name, role="md.on_premise_device"):
-        ip = requests.get('https://checkip.amazonaws.com').text.strip()
-        fedml_ver, exec_path, os_ver, cpu_info, python_ver, torch_ver, mpi_installed, \
-            cpu_usage, available_mem, total_mem, gpu_info, gpu_available_mem, gpu_total_mem, \
-            gpu_count, gpu_vendor, cpu_count, gpu_device_name = get_sys_runner_info()
-        host_name = sys_utils.get_host_name()
-        json_params = {
-            "accountid": account_id,
-            "deviceid": device_id,
-            "state": ClientConstants.MSG_MLOPS_CLIENT_STATUS_IDLE,
-            "status": ClientConstants.MSG_MLOPS_CLIENT_STATUS_IDLE,
-            "type": os_name,
-            "processor": cpu_info,
-            "core_type": cpu_info,
-            "network": "",
-            "role": role,
-            "os_ver": os_ver,
-            "memory": total_mem,
-            "ip": ip,
-            "extra_infos": {"fedml_ver": fedml_ver, "exec_path": exec_path, "os_ver": os_ver,
-                            "cpu_info": cpu_info, "python_ver": python_ver, "torch_ver": torch_ver,
-                            "mpi_installed": mpi_installed, "cpu_usage": cpu_usage,
-                            "available_mem": available_mem, "total_mem": total_mem,
-                            "cpu_count": cpu_count, "gpu_count": 0, "host_name": host_name}
-        }
-        if gpu_count > 0:
-            if gpu_total_mem is not None:
-                json_params["gpu"] = gpu_info if gpu_info is not None else "" + ", Total GPU Memory: " + gpu_total_mem
-            else:
-                json_params["gpu"] = gpu_info if gpu_info is not None else ""
-            json_params["extra_infos"]["gpu_info"] = gpu_info if gpu_info is not None else ""
-            if gpu_available_mem is not None:
-                json_params["extra_infos"]["gpu_available_mem"] = gpu_available_mem
-            if gpu_total_mem is not None:
-                json_params["extra_infos"]["gpu_total_mem"] = gpu_total_mem
-
-            json_params["extra_infos"]["gpu_count"] = gpu_count
-            json_params["extra_infos"]["gpu_vendor"] = gpu_vendor
-            json_params["extra_infos"]["gpu_device_name"] = gpu_device_name
-
-            gpu_available_id_list = sys_utils.get_available_gpu_id_list(limit=gpu_count)
-            gpu_available_count = len(gpu_available_id_list) if gpu_available_id_list is not None else 0
-            gpu_list = sys_utils.get_gpu_list()
-            json_params["extra_infos"]["gpu_available_count"] = gpu_available_count
-            json_params["extra_infos"]["gpu_available_id_list"] = gpu_available_id_list
-            json_params["extra_infos"]["gpu_list"] = gpu_list
-        else:
-            json_params["gpu"] = "None"
-            json_params["extra_infos"]["gpu_available_count"] = 0
-            json_params["extra_infos"]["gpu_available_id_list"] = []
-            json_params["extra_infos"]["gpu_list"] = []
-
-        _, cert_path = MLOpsConfigs.get_request_params()
-        if cert_path is not None:
-            try:
-                requests.session().verify = cert_path
-                response = requests.post(
-                    url, json=json_params, verify=True,
-                    headers={"content-type": "application/json", "Connection": "close"}
-                )
-            except requests.exceptions.SSLError as err:
-                MLOpsConfigs.install_root_ca_file()
-                response = requests.post(
-                    url, json=json_params, verify=True,
-                    headers={"content-type": "application/json", "Connection": "close"}
-                )
-        else:
-            response = requests.post(url, json=json_params, headers={"Connection": "close"})
-        edge_id = -1
-        user_name = None
-        extra_url = None
-        if response.status_code != 200:
-            print(f"Binding to MLOps with response.status_code = {response.status_code}, "
-                  f"response.content: {response.content}")
-            pass
-        else:
-            # print("url = {}, response = {}".format(url, response))
-            status_code = response.json().get("code")
-            if status_code == "SUCCESS":
-                edge_id = response.json().get("data").get("id")
-                user_name = response.json().get("data").get("userName", None)
-                extra_url = response.json().get("data").get("url", None)
-                if edge_id is None or edge_id <= 0:
-                    print(f"Binding to MLOps with response.status_code = {response.status_code}, "
-                          f"response.content: {response.content}")
-            else:
-                if status_code == SchedulerConstants.BINDING_ACCOUNT_NOT_EXIST_ERROR:
-                    raise SystemExit(SchedulerConstants.BINDING_ACCOUNT_NOT_EXIST_ERROR)
-                print(f"Binding to MLOps with response.status_code = {response.status_code}, "
-                      f"response.content: {response.content}")
-                return -1, None, None
-        return edge_id, user_name, extra_url
-
-    def fetch_configs(self):
-        return MLOpsConfigs.fetch_all_configs()
-
-    def send_agent_active_msg(self):
-        active_topic = "flclient_agent/active"
-        status = MLOpsStatus.get_instance().get_client_agent_status(self.edge_id)
-        if (
-                status is not None
-                and status != ClientConstants.MSG_MLOPS_CLIENT_STATUS_OFFLINE
-                and status != ClientConstants.MSG_MLOPS_CLIENT_STATUS_IDLE
-        ):
-            return
-
-        try:
-            current_job = FedMLClientDataInterface.get_instance().get_job_by_id(self.run_id)
-        except Exception as e:
-            current_job = None
-        if current_job is None:
-            if status is not None and status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_OFFLINE:
-                status = ClientConstants.MSG_MLOPS_CLIENT_STATUS_IDLE
-            else:
-                return
-        else:
-            status = ClientConstants.get_device_state_from_run_edge_state(current_job.status)
-        active_msg = {"ID": self.edge_id, "status": status}
-        MLOpsStatus.get_instance().set_client_agent_status(self.edge_id, status)
-        self.mqtt_mgr.send_message_json(active_topic, json.dumps(active_msg))
-
-    def recover_start_deployment_msg_after_upgrading(self):
-        try:
-            current_job = FedMLClientDataInterface.get_instance().get_current_job()
-            if current_job is not None and \
-                    current_job.status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_UPGRADING:
-                logging.info("start deployment after upgrading.")
-                topic_start_deployment = "model_ops/model_device/start_deployment/{}".format(str(self.edge_id))
-                self.callback_start_deployment(topic_start_deployment, current_job.running_json)
-        except Exception as e:
-            logging.info("recover starting deployment message after upgrading: {}".format(traceback.format_exc()))
-
-    def on_agent_mqtt_connected(self, mqtt_client_object):
-        # The MQTT message topic format is as follows: <sender>/<receiver>/<action>
-
-        # Setup MQTT message listener for starting deployment
-        topic_start_deployment = "model_ops/model_device/start_deployment/{}".format(str(self.edge_id))
-        self.mqtt_mgr.add_message_listener(topic_start_deployment, self.callback_start_deployment)
-
-        # Setup MQTT message listener for delete deployment
-        topic_delete_deployment = "model_ops/model_device/delete_deployment/{}".format(str(self.edge_id))
-        self.mqtt_mgr.add_message_listener(topic_delete_deployment, self.callback_delete_deployment)
-
-        # Setup MQTT message listener for running failed
-        topic_exit_train_with_exception = "flserver_agent/" + str(self.edge_id) + "/exit_train_with_exception"
-        self.mqtt_mgr.add_message_listener(topic_exit_train_with_exception, self.callback_exit_train_with_exception)
-
-        # Setup MQTT message listener for client status switching
-        topic_client_status = "fl_client/flclient_agent_" + str(self.edge_id) + "/status"
-        self.mqtt_mgr.add_message_listener(topic_client_status, self.callback_runner_id_status)
-
-        # Setup MQTT message listener to report current device status.
-        topic_report_status = "mlops/report_device_status"
-        self.mqtt_mgr.add_message_listener(topic_report_status, self.callback_report_current_status)
-
-        # Setup MQTT message listener to OTA messages from the MLOps.
-        topic_ota_msg = "mlops/flclient_agent_" + str(self.edge_id) + "/ota"
-        self.mqtt_mgr.add_message_listener(topic_ota_msg, self.callback_client_ota_msg)
-
-        if self.mqtt_inference_obj is None:
-            self.mqtt_inference_obj = FedMLMqttInference(agent_config=self.agent_config, mqtt_mgr=self.mqtt_mgr)
-        self.mqtt_inference_obj.setup_listener_for_endpoint_inference_request(self.edge_id)
-
-        # Subscribe topics for starting deployment, stopping deployment and fetching client status.
-        mqtt_client_object.subscribe(topic_start_deployment, qos=2)
-        mqtt_client_object.subscribe(topic_delete_deployment, qos=2)
-        mqtt_client_object.subscribe(topic_client_status, qos=2)
-        mqtt_client_object.subscribe(topic_report_status, qos=2)
-        mqtt_client_object.subscribe(topic_exit_train_with_exception, qos=2)
-        mqtt_client_object.subscribe(topic_ota_msg, qos=2)
-
-        self.subscribed_topics.clear()
-        self.subscribed_topics.append(topic_start_deployment)
-        self.subscribed_topics.append(topic_delete_deployment)
-        self.subscribed_topics.append(topic_client_status)
-        self.subscribed_topics.append(topic_report_status)
-        self.subscribed_topics.append(topic_exit_train_with_exception)
-        self.subscribed_topics.append(topic_ota_msg)
-
-        # Broadcast the first active message.
-        self.send_agent_active_msg()
-
-        # Echo results
-        # print("\n\nCongratulations, your device is connected to the FedML MLOps platform successfully!")
-        # print(
-        #     "Your FedML Edge ID is " + str(self.edge_id) + ", unique device ID is "
-        #     + str(self.unique_device_id)
-        #     + "\n"
-        # )
-
-        MLOpsRuntimeLog.get_instance(self.args).init_logs(log_level=logging.INFO)
-
-    def on_agent_mqtt_disconnected(self, mqtt_client_object):
-        MLOpsStatus.get_instance().set_client_agent_status(
-            self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_OFFLINE
-        )
-
-        try:
-            if self.mqtt_inference_obj is not None:
-                self.mqtt_inference_obj.remove_listener_for_endpoint_inference_request(self.edge_id)
-        except Exception as e:
-            pass
-
-    def setup_agent_mqtt_connection(self, service_config):
-        # Setup MQTT connection
-        self.mqtt_mgr = MqttManager(
-            service_config["mqtt_config"]["BROKER_HOST"],
-            service_config["mqtt_config"]["BROKER_PORT"],
-            service_config["mqtt_config"]["MQTT_USER"],
-            service_config["mqtt_config"]["MQTT_PWD"],
-            service_config["mqtt_config"]["MQTT_KEEPALIVE"],
-            "FedML_ModelClientAgent_Daemon_@" + self.user_name + "@_" + self.args.current_device_id + str(uuid.uuid4()),
-            "flclient_agent/last_will_msg",
-            json.dumps({"ID": self.edge_id, "status": ClientConstants.MSG_MLOPS_CLIENT_STATUS_OFFLINE})
-        )
-        self.agent_config = service_config
-
-        # Init local database
-        FedMLClientDataInterface.get_instance().create_job_table()
-        try:
-            FedMLModelDatabase.get_instance().set_database_base_dir(ClientConstants.get_database_dir())
-            FedMLModelDatabase.get_instance().create_table()
-        except Exception as e:
-            pass
-
-        client_api_cmd = "fedml.computing.scheduler.model_scheduler.device_client_api:api"
-        client_api_pids = RunProcessUtils.get_pid_from_cmd_line(client_api_cmd)
-        if client_api_pids is None or len(client_api_pids) <= 0:
-            # Start local API services
-            cur_dir = os.path.dirname(__file__)
-            fedml_base_dir = os.path.dirname(os.path.dirname(os.path.dirname(cur_dir)))
-            python_program = get_python_program()
-            self.local_api_process = ClientConstants.exec_console_with_script(
-                "{} -m uvicorn {} --host 0.0.0.0 --port {} --reload --reload-delay 3 --reload-dir {} "
-                "--log-level critical".format(
-                    python_program, client_api_cmd,
-                    ClientConstants.LOCAL_CLIENT_API_PORT, fedml_base_dir
-                ),
-                should_capture_stdout=False,
-                should_capture_stderr=False
-            )
-            # if self.local_api_process is not None and self.local_api_process.pid is not None:
-            #     print(f"Model worker local API process id {self.local_api_process.pid}")
-
-        # MLOpsRuntimeLogDaemon.get_instance(self.args).stop_all_log_processor()
-
-        # Setup MQTT connected listener
-        self.mqtt_mgr.add_connected_listener(self.on_agent_mqtt_connected)
-        self.mqtt_mgr.add_disconnected_listener(self.on_agent_mqtt_disconnected)
-        self.mqtt_mgr.connect()
-
-        self.setup_client_mqtt_mgr()
-        self.mlops_metrics.report_client_training_status(
-            self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_IDLE, is_from_model=True)
-        MLOpsStatus.get_instance().set_client_agent_status(self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_IDLE)
-
-        self.recover_start_deployment_msg_after_upgrading()
-
-    def stop_agent(self):
-        if self.run_process_event is not None:
-            self.run_process_event.set()
-
-        if self.mqtt_mgr is not None:
-            try:
-                for topic in self.subscribed_topics:
-                    self.mqtt_mgr.unsubscribe_msg(topic)
-            except Exception as e:
-                pass
-
-            self.mqtt_mgr.loop_stop()
-            self.mqtt_mgr.disconnect()
-
-        self.release_client_mqtt_mgr()
-
-    def start_agent_mqtt_loop(self, should_exit_sys=False):
-        # Start MQTT message loop
-        try:
-            self.mqtt_mgr.loop_forever()
-        except Exception as e:
-            if str(e) == "Restarting after upgraded...":
-                logging.info("Restarting after upgraded...")
-            else:
-                logging.info("Client tracing: {}".format(traceback.format_exc()))
-        finally:
-            self.stop_agent()
-
-            if should_exit_sys:
-                time.sleep(5)
-                sys.exit(1)
diff --git a/python/fedml/computing/scheduler/model_scheduler/device_server_runner_deprecated.py b/python/fedml/computing/scheduler/model_scheduler/device_server_runner_deprecated.py
deleted file mode 100755
index 4bcac6d2db..0000000000
--- a/python/fedml/computing/scheduler/model_scheduler/device_server_runner_deprecated.py
+++ /dev/null
@@ -1,2022 +0,0 @@
-import copy
-import json
-import logging
-import multiprocessing
-import platform
-import sys
-
-from multiprocessing import Process
-import os
-import shutil
-import subprocess
-import threading
-
-import time
-import traceback
-import urllib
-import uuid
-import zipfile
-from os import listdir
-
-import requests
-import torch
-
-import fedml
-from fedml.computing.scheduler.comm_utils.run_process_utils import RunProcessUtils
-from fedml.core.mlops.mlops_runtime_log import MLOpsFormatter
-
-from ..comm_utils import sys_utils
-from .device_server_data_interface import FedMLServerDataInterface
-from ..scheduler_core.endpoint_sync_protocol import FedMLEndpointSyncProtocol
-from ....core.mlops.mlops_runtime_log import MLOpsRuntimeLog
-
-from ....core.distributed.communication.mqtt.mqtt_manager import MqttManager
-from ..comm_utils.yaml_utils import load_yaml_config
-from .device_client_constants import ClientConstants
-from .device_server_constants import ServerConstants
-
-from ....core.mlops.mlops_metrics import MLOpsMetrics
-
-from ....core.mlops.mlops_configs import MLOpsConfigs
-from ....core.mlops.mlops_runtime_log_daemon import MLOpsRuntimeLogDaemon
-from ....core.mlops.mlops_status import MLOpsStatus
-from ..comm_utils.sys_utils import get_sys_runner_info, get_python_program
-from .device_model_cache import FedMLModelCache
-from .device_model_msg_object import FedMLModelMsgObject
-from ....core.mlops.mlops_utils import MLOpsUtils
-from ..comm_utils.constants import SchedulerConstants
-from .device_model_db import FedMLModelDatabase
-from .device_replica_controller import FedMLDeviceReplicaController
-
-
-class RunnerError(BaseException):
-    """ Runner failed. """
-    pass
-
-
-class RunnerCompletedError(Exception):
-    """ Runner completed. """
-    pass
-
-
-class FedMLServerRunner:
-    FEDML_CLOUD_SERVER_PREFIX = "fedml-server-run-"
-
-    def __init__(self, args, run_id=0, request_json=None, agent_config=None, edge_id=0):
-        self.inference_gateway_process = None
-        self.local_api_process = None
-        self.run_process_event = None
-        self.run_process_event_map = dict()
-        self.run_process_completed_event = None
-        self.run_process_completed_event_map = dict()
-        self.run_as_cloud_agent = False
-        self.run_as_cloud_server = False
-        self.run_as_edge_server_and_agent = False
-        self.run_as_cloud_server_and_agent = False
-        self.fedml_packages_base_dir = None
-        self.fedml_packages_unzip_dir = None
-        self.mqtt_mgr = None
-        self.running_request_json = dict()
-        self.run_id = run_id
-        self.client_mqtt_mgr = None
-        self.client_mqtt_is_connected = False
-        self.client_mqtt_lock = None
-        self.unique_device_id = None
-        self.edge_id = edge_id
-        self.server_agent_id = 0
-        if request_json is not None:
-            self.server_agent_id = request_json.get("server_id", 0)
-        self.process = None
-        self.args = args
-        self.request_json = copy.deepcopy(request_json)
-        self.version = args.version
-        self.device_id = args.device_id
-        self.cur_dir = os.path.split(os.path.realpath(__file__))[0]
-        if args.current_running_dir is not None:
-            self.cur_dir = args.current_running_dir
-
-        self.agent_config = agent_config
-        self.fedml_data_base_package_dir = os.path.join("/", "fedml", "data")
-        self.fedml_data_local_package_dir = os.path.join("/", "fedml", "fedml-package", "fedml", "data")
-        self.fedml_data_dir = self.fedml_data_base_package_dir
-        self.fedml_config_dir = os.path.join("/", "fedml", "conf")
-
-        self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES = {}
-
-        self.mlops_metrics = None
-        self.run_status = None
-        self.infer_host = "127.0.0.1"
-        self.redis_addr = "local"
-        self.redis_port = "6379"
-        self.redis_password = "fedml_default"
-
-        self.slave_deployment_statuses_mapping = dict()
-        self.slave_deployment_results_mapping = dict()
-        self.slave_update_result_mapping = dict()
-
-        self.model_runner_mapping = dict()
-        self.ntp_offset = MLOpsUtils.get_ntp_offset()
-
-        self.subscribed_topics = list()
-        self.user_name = None
-
-        self.replica_controller = None
-        self.deployed_replica_payload = None
-
-        self.autoscaler_launcher = None
-
-    def build_dynamic_constrain_variables(self, run_id, run_config):
-        pass
-
-    def unzip_file(self, zip_file, unzip_file_path):
-        unziped_file_name = ""
-        if zipfile.is_zipfile(zip_file):
-            with zipfile.ZipFile(zip_file, "r") as zipf:
-                zipf.extractall(unzip_file_path)
-                unziped_file_name = zipf.namelist()[0]
-
-        return unziped_file_name
-
-    def package_download_progress(self, count, blksize, filesize):
-        self.check_runner_stop_event()
-
-        downloaded = count * blksize
-        downloaded = filesize if downloaded > filesize else downloaded
-        progress = (downloaded / filesize * 100) if filesize != 0 else 0
-        progress_int = int(progress)
-        downloaded_kb = format(downloaded / 1024, '.2f')
-
-        # since this hook function is stateless, we need a state to avoid printing progress repeatedly
-        if count == 0:
-            self.prev_download_progress = 0
-        if progress_int != self.prev_download_progress and progress_int % 5 == 0:
-            self.prev_download_progress = progress_int
-            logging.info("package downloaded size {} KB, progress {}%".format(downloaded_kb, progress_int))
-
-    def retrieve_and_unzip_package(self, package_name, package_url):
-        local_package_path = ServerConstants.get_model_package_dir()
-        if not os.path.exists(local_package_path):
-            os.makedirs(local_package_path, exist_ok=True)
-        local_package_file = "{}.zip".format(os.path.join(local_package_path, package_name))
-        if os.path.exists(local_package_file):
-            os.remove(local_package_file)
-
-        # Download without renaming
-        urllib.request.urlretrieve(package_url, filename=None, reporthook=self.package_download_progress)
-
-        unzip_package_path = ServerConstants.get_model_dir()
-        self.fedml_packages_base_dir = unzip_package_path
-        try:
-            shutil.rmtree(
-                os.path.join(unzip_package_path, package_name), ignore_errors=True
-            )
-        except Exception as e:
-            pass
-        logging.info("local_package_file {}, unzip_package_path {}".format(
-            local_package_file, unzip_package_path))
-        package_name = self.unzip_file(local_package_file, unzip_package_path)
-        unzip_package_path = os.path.join(unzip_package_path, package_name)
-        return unzip_package_path
-
-    def update_local_fedml_config(self, run_id, run_config):
-        model_config = run_config
-        model_name = model_config["model_name"]
-        model_storage_url = model_config["model_storage_url"]
-        scale_min = model_config.get("instance_scale_min", 0)
-        scale_max = model_config.get("instance_scale_max", 0)
-        inference_engine = model_config.get("inference_engine", 0)
-        inference_end_point_id = run_id
-
-        # Copy config file from the client
-        unzip_package_path = self.retrieve_and_unzip_package(
-            model_name, model_storage_url
-        )
-        fedml_local_config_file = os.path.join(unzip_package_path, "fedml_model_config.yaml")
-
-        # Load the above config to memory
-        package_conf_object = {}
-        if os.path.exists(fedml_local_config_file):
-            package_conf_object = load_yaml_config(fedml_local_config_file)
-
-        return unzip_package_path, package_conf_object
-
-    def get_usr_indicated_token(self, request_json) -> str:
-        usr_indicated_token = ""
-        if "parameters" in request_json and "authentication_token" in request_json["parameters"]:
-            usr_indicated_token = request_json["parameters"]["authentication_token"]
-        return usr_indicated_token
-
-    def build_dynamic_args(self, run_config, package_conf_object, base_dir):
-        pass
-
-    def run(self, process_event, completed_event):
-        # print(f"Model master runner process id {os.getpid()}, run id {self.run_id}")
-
-        if platform.system() != "Windows":
-            os.setsid()
-
-        os.environ['PYTHONWARNINGS'] = 'ignore:semaphore_tracker:UserWarning'
-        os.environ.setdefault('PYTHONWARNINGS', 'ignore:semaphore_tracker:UserWarning')
-
-        self.run_process_event = process_event
-        self.run_process_completed_event = completed_event
-        run_id = self.request_json.get("end_point_id")
-
-        try:
-            MLOpsUtils.set_ntp_offset(self.ntp_offset)
-
-            self.setup_client_mqtt_mgr()
-
-            self.run_impl()
-        except RunnerError:
-            logging.info("Runner stopped.")
-            self.mlops_metrics.report_server_training_status(
-                self.run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_KILLED,
-                is_from_model=True, edge_id=self.edge_id)
-        except RunnerCompletedError:
-            logging.info("Runner completed.")
-        except Exception as e:
-            logging.error("Runner exits with exceptions.")
-            logging.error(traceback.format_exc())
-            logging.error(e)
-            self.mlops_metrics.report_server_training_status(
-                self.run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED,
-                is_from_model=True, edge_id=self.edge_id)
-            MLOpsRuntimeLogDaemon.get_instance(self.args).stop_log_processor(run_id, self.edge_id)
-            if self.mlops_metrics is not None:
-                self.mlops_metrics.stop_sys_perf()
-            time.sleep(3)
-            sys.exit(1)
-        finally:
-            logging.info("[Master] Deployment finished, release resources.")
-            MLOpsRuntimeLogDaemon.get_instance(self.args).stop_log_processor(run_id, self.edge_id)
-            if self.mlops_metrics is not None:
-                self.mlops_metrics.stop_sys_perf()
-            time.sleep(3)
-            if not self.run_as_cloud_server:
-                self.release_client_mqtt_mgr()
-
-    def parse_model_run_params(self, running_json):
-        run_id = running_json["end_point_id"]
-        end_point_name = running_json["end_point_name"]
-        token = running_json["token"]
-        user_id = running_json["user_id"]
-        user_name = running_json["user_name"]
-        device_ids = running_json["device_ids"]
-        device_objs = running_json["device_objs"]
-
-        model_config = running_json["model_config"]
-        model_name = model_config["model_name"]
-        model_id = model_config["model_id"]
-        model_storage_url = model_config["model_storage_url"]
-        scale_min = model_config.get("instance_scale_min", 0)
-        scale_max = model_config.get("instance_scale_max", 0)
-        inference_engine = model_config.get("inference_engine", 0)
-        model_is_from_open = model_config["is_from_open"]
-        inference_end_point_id = run_id
-        use_gpu = "gpu"  # TODO: Get GPU from device infos
-        memory_size = "256m"  # TODO: Get Memory size for each instance
-        model_version = model_config["model_version"]
-        model_config_parameters = running_json.get("parameters", {})
-
-        inference_port = model_config_parameters.get("server_internal_port",  # Internal port is for the gateway
-                                                     ServerConstants.MODEL_INFERENCE_DEFAULT_PORT)
-        inference_port_external = model_config_parameters.get("server_external_port", inference_port)
-
-        return run_id, end_point_name, token, user_id, user_name, device_ids, device_objs, model_config, model_name, \
-            model_id, model_storage_url, scale_min, scale_max, inference_engine, model_is_from_open, \
-            inference_end_point_id, use_gpu, memory_size, model_version, inference_port
-
-    def inference_run(self):
-        # run_id, end_point_name, token, user_id, user_name, device_ids, device_objs, model_config, model_name, \
-        #     model_id, model_storage_url, scale_min, scale_max, inference_engine, model_is_from_open, \
-        #     inference_end_point_id, use_gpu, memory_size, model_version, inference_port =
-        #     self.parse_model_run_params(self.request_json)
-        #
-        # inference_server = FedMLModelServingServer(self.args,
-        #                                            end_point_name,
-        #                                            model_name,
-        #                                            model_version,
-        #                                            inference_request=self.request_json)
-        # inference_server.run()
-        pass
-
-    def run_impl(self):
-        run_id, end_point_name, token, user_id, user_name, device_ids, device_objs, model_config, model_name, \
-            model_id, model_storage_url, scale_min, scale_max, inference_engine, model_is_from_open, \
-            inference_end_point_id, use_gpu, memory_size, model_version, inference_port = self.parse_model_run_params(
-            self.request_json)
-
-        # TODO(Raphael): This measurement is for the host machine. Change to container's metrics
-        self.mlops_metrics.report_sys_perf(self.args, self.agent_config["mqtt_config"], run_id=run_id)
-
-        self.check_runner_stop_event()
-
-        # Send stage: MODEL_DEPLOYMENT_STAGE4 = "ForwardRequest2Slave"
-        self.send_deployment_stages(self.run_id, model_name, model_id,
-                                    "",
-                                    ServerConstants.MODEL_DEPLOYMENT_STAGE4["index"],
-                                    ServerConstants.MODEL_DEPLOYMENT_STAGE4["text"],
-                                    ServerConstants.MODEL_DEPLOYMENT_STAGE4["text"])
-
-        self.args.run_id = self.run_id
-        MLOpsRuntimeLog.get_instance(self.args).init_logs(log_level=logging.INFO)
-
-        # Report server running status
-        self.check_runner_stop_event()
-        self.mlops_metrics.report_server_training_status(
-            run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_STARTING,
-            is_from_model=True, running_json=json.dumps(self.request_json), edge_id=self.edge_id)
-        self.send_deployment_status(self.run_id, end_point_name,
-                                    model_name, "",
-                                    ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYING)
-
-        # Start unified inference gateway if it has not started
-        self.start_device_inference_gateway(
-            run_id, end_point_name, model_id, model_name, model_version, inference_port=inference_port)
-
-        # (re)Start inference monitor server
-        self.stop_device_inference_monitor(run_id, end_point_name, model_id, model_name, model_version)
-        self.start_device_inference_monitor(run_id, end_point_name, model_id, model_name, model_version)
-
-        # Changed the master's status to "IDLE"
-        self.mlops_metrics.broadcast_server_training_status(
-            run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_FINISHED,
-            is_from_model=True, edge_id=self.edge_id)
-
-        # Forward deployment request to slave devices
-        self.check_runner_stop_event()
-
-        # Handle "op:add" && "op:remove"
-        devices_sent_add_or_remove_msg = self.send_deployment_start_request_to_edges()
-
-        # Handle "op:update"
-        try:
-            devices_sent_update_remove_msg = self.send_first_scroll_update_msg()
-
-            if len(devices_sent_add_or_remove_msg) == 0 and len(devices_sent_update_remove_msg) == 0:
-                # No device is added, updated or removed
-                logging.info("No device is added, updated or removed. No action needed for reconciliation.")
-                ip = self.get_ip_address(self.request_json)
-                master_port = os.getenv("FEDML_MASTER_PORT", None)
-                if master_port is not None:
-                    inference_port = int(master_port)
-                model_inference_port = inference_port
-                if ip.startswith("http://") or ip.startswith("https://"):
-                    model_inference_url = "{}/api/v1/predict".format(ip)
-                else:
-                    model_inference_url = "http://{}:{}/api/v1/predict".format(ip, model_inference_port)
-
-                self.set_runner_completed_event(run_id)
-
-                self.send_deployment_status(run_id, end_point_name,
-                                            model_name,
-                                            model_inference_url,
-                                            ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED)
-
-                # Set setting to "DEPLOYED" for autoscaling service reference
-                FedMLModelCache.get_instance().set_redis_params(self.redis_addr, self.redis_port, self.redis_password)
-                FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \
-                    update_user_setting_replica_num(end_point_id=run_id, state="DEPLOYED")
-
-                return
-        except Exception as e:
-            logging.error(f"Failed to send first scroll update message due to {e}.")
-            logging.error(f"Exception traceback {traceback.format_exc()}.")
-
-        logging.info("Start waiting for result callback from workers ...")
-
-        while True:
-            # Wait for all devices to finish the add / delete / update operation
-            self.check_runner_stop_event()
-            time.sleep(3)
-
-    def check_runner_stop_event(self):
-        if self.run_process_event is not None and self.run_process_event.is_set():
-            logging.info("Received stopping event.")
-            raise RunnerError("Runner stopped")
-
-        if self.run_process_completed_event is not None and self.run_process_completed_event.is_set():
-            logging.info("Received completed event.")
-            raise RunnerCompletedError("Runner completed")
-
-    def start_device_inference_gateway(
-            self, run_id, end_point_name, model_id,
-            model_name, model_version, inference_port=ServerConstants.MODEL_INFERENCE_DEFAULT_PORT):
-        # start unified inference server
-        running_model_name = ServerConstants.get_running_model_name(end_point_name,
-                                                                    model_name, model_version, run_id, model_id)
-        python_program = get_python_program()
-        master_port = os.getenv("FEDML_MASTER_PORT", None)
-        if master_port is not None:
-            inference_port = int(master_port)
-        if not ServerConstants.is_running_on_k8s():
-            logging.info(f"start the model inference gateway, end point {run_id}, "
-                         f"model name {model_name} at port {inference_port}...")
-            self.check_runner_stop_event()
-
-            use_mqtt_inference = os.getenv("FEDML_USE_MQTT_INFERENCE", "False")
-            use_mqtt_inference = True if use_mqtt_inference.lower() == 'true' else False
-            use_worker_gateway = os.getenv("FEDML_USE_WORKER_GATEWAY", "False")
-            use_worker_gateway = True if use_worker_gateway.lower() == 'true' else False
-            inference_gw_cmd = "fedml.computing.scheduler.model_scheduler.device_model_inference:api"
-            inference_gateway_pids = RunProcessUtils.get_pid_from_cmd_line(inference_gw_cmd)
-            if inference_gateway_pids is None or len(inference_gateway_pids) <= 0:
-                cur_dir = os.path.dirname(__file__)
-                fedml_base_dir = os.path.dirname(os.path.dirname(os.path.dirname(cur_dir)))
-                connect_str = "@FEDML@"
-                ext_info = sys_utils.random1(
-                    self.agent_config["mqtt_config"]["BROKER_HOST"] + connect_str +
-                    str(self.agent_config["mqtt_config"]["BROKER_PORT"]) + connect_str +
-                    self.agent_config["mqtt_config"]["MQTT_USER"] + connect_str +
-                    self.agent_config["mqtt_config"]["MQTT_PWD"] + connect_str +
-                    str(self.agent_config["mqtt_config"]["MQTT_KEEPALIVE"]), "FEDML@9999GREAT")
-                self.inference_gateway_process = ServerConstants.exec_console_with_script(
-                    "REDIS_ADDR=\"{}\" REDIS_PORT=\"{}\" REDIS_PASSWORD=\"{}\" "
-                    "END_POINT_NAME=\"{}\" "
-                    "MODEL_NAME=\"{}\" MODEL_VERSION=\"{}\" MODEL_INFER_URL=\"{}\" VERSION=\"{}\" "
-                    "USE_MQTT_INFERENCE={} USE_WORKER_GATEWAY={} EXT_INFO={} "
-                    "{} -m uvicorn {} --host 0.0.0.0 --port {} --reload --reload-delay 3 --reload-dir {} "
-                    "--log-level critical".format(
-                        self.redis_addr, self.redis_port, self.redis_password,
-                        end_point_name,
-                        model_name, model_version, "", self.args.version,
-                        use_mqtt_inference, use_worker_gateway, ext_info,
-                        python_program, inference_gw_cmd, str(inference_port), fedml_base_dir
-                    ),
-                    should_capture_stdout=False,
-                    should_capture_stderr=False
-                )
-
-    def start_device_inference_monitor(self, run_id, end_point_name,
-                                       model_id, model_name, model_version, check_stopped_event=True):
-        # start inference monitor server
-        # Will report the qps related metrics to the MLOps
-        logging.info(f"start the model inference monitor, end point {run_id}, model name {model_name}...")
-        if check_stopped_event:
-            self.check_runner_stop_event()
-        run_id_str = str(run_id)
-        pip_source_dir = os.path.dirname(__file__)
-        monitor_file = os.path.join(pip_source_dir, "device_model_monitor.py")
-        python_program = get_python_program()
-        running_model_name = ServerConstants.get_running_model_name(end_point_name,
-                                                                    model_name, model_version, run_id, model_id)
-        self.monitor_process = ServerConstants.exec_console_with_shell_script_list(
-            [
-                python_program,
-                monitor_file,
-                "-v",
-                self.args.version,
-                "-ep",
-                run_id_str,
-                "-epn",
-                str(end_point_name),
-                "-mi",
-                str(model_id),
-                "-mn",
-                model_name,
-                "-mv",
-                model_version,
-                "-iu",
-                "infer_url",
-                "-ra",
-                self.redis_addr,
-                "-rp",
-                self.redis_port,
-                "-rpw",
-                self.redis_password
-            ],
-            should_capture_stdout=False,
-            should_capture_stderr=False
-        )
-
-    def stop_device_inference_monitor(self, run_id, end_point_name, model_id, model_name, model_version):
-        # stop inference monitor server
-        logging.info(f"stop the model inference monitor, end point {run_id}, model name {model_name}...")
-        sys_utils.cleanup_model_monitor_processes(run_id, end_point_name,
-                                                  model_id, model_name, model_version)
-
-    def cleanup_run_when_finished(self):
-        logging.info("Cleanup run successfully when finished.")
-
-        self.mlops_metrics.broadcast_server_training_status(
-            self.run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_FINISHED,
-            is_from_model=True, edge_id=self.edge_id
-        )
-
-        try:
-            self.mlops_metrics.stop_sys_perf()
-        except Exception as ex:
-            pass
-
-        time.sleep(1)
-
-        try:
-            local_package_path = ServerConstants.get_package_download_dir()
-            for package_file in listdir(local_package_path):
-                if os.path.basename(package_file).startswith("run_" + str(self.run_id)):
-                    shutil.rmtree(os.path.join(local_package_path, package_file), ignore_errors=True)
-        except Exception as e:
-            pass
-
-    def cleanup_run_when_starting_failed(self):
-        logging.info("Cleanup run successfully when starting failed.")
-
-        self.mlops_metrics.broadcast_server_training_status(
-            self.run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED,
-            is_from_model=True, edge_id=self.edge_id)
-
-        try:
-            self.mlops_metrics.stop_sys_perf()
-        except Exception as ex:
-            pass
-
-        time.sleep(1)
-
-        try:
-            local_package_path = ServerConstants.get_package_download_dir()
-            for package_file in listdir(local_package_path):
-                if os.path.basename(package_file).startswith("run_" + str(self.run_id)):
-                    shutil.rmtree(os.path.join(local_package_path, package_file), ignore_errors=True)
-        except Exception as e:
-            pass
-
-    def cleanup_run_when_deploy_failed(self):
-        topic = f"model_ops/model_device/delete_deployment/{self.edge_id}"
-        self.callback_delete_deployment(topic, payload=json.dumps(self.request_json))
-
-    def callback_deployment_result_message(self, topic=None, payload=None):
-        """
-        This method is called when a deployment result is received from a worker device.
-        """
-        # Save deployment result to local cache
-        topic_splits = str(topic).split('/')
-        device_id = topic_splits[-1]
-        payload_json = json.loads(payload)
-        end_point_id = payload_json["end_point_id"]
-        end_point_name = payload_json["end_point_name"]
-        model_id = payload_json["model_id"]
-        model_name = payload_json["model_name"]
-        model_version = payload_json["model_version"]
-        model_status = payload_json["model_status"]
-        replica_no = payload_json.get("replica_no", None)  # "no" Idx start from 1
-        run_id_str = str(end_point_id)
-
-        # HotFix(Raphael): logging service cross talk
-        # Change the handler since each handler need to write to different log files
-        try:
-            # Remove the existing file handler
-            root_logger = logging.getLogger()
-            for handler in root_logger.handlers:
-                if isinstance(handler, logging.FileHandler):
-                    root_logger.removeHandler(handler)
-
-            # Correct log path: ~/.fedml/fedml-model-server/fedml/logs/fedml-run-$rid-edge-$eid.log
-            log_file = os.path.join(ServerConstants.get_log_file_dir(),
-                                    f"fedml-run-{run_id_str}-edge-{self.edge_id}.log")
-
-            filehandler = logging.FileHandler(log_file, "a")
-
-            program_prefix = "FedML-Server @device-id-{}".format(self.edge_id)
-            formatter = MLOpsFormatter(fmt="[" + program_prefix + "] [%(asctime)s] [%(levelname)s] "
-                                                                  "[%(filename)s:%(lineno)d:%(funcName)s] %("
-                                                                  "message)s")
-
-            filehandler.setFormatter(formatter)
-            root_logger.addHandler(filehandler)
-        except Exception as e:
-            logging.warning(f"Failed to change the logging handler due to {e}.")
-
-        assert run_id_str in self.model_runner_mapping, (f"Run id {run_id_str} is not in the model runner mapping."
-                                                         f"Current mapping {self.model_runner_mapping}.")
-
-        logging.info("========== callback_deployment_result_message ==========\n")
-        #  Identify the operation for this run (add, remove, update)
-        if run_id_str not in self.running_request_json:
-            logging.error(f"Run id {run_id_str} is not in the running request json.")
-            return
-
-        # The rolling update and scale out / in operation should not happen at the same time
-        assert not ("replica_num_diff" in self.running_request_json[run_id_str] and
-                    len(self.running_request_json[run_id_str]["replica_num_diff"]) > 0 and
-                    "replica_version_diff" in self.running_request_json[run_id_str])
-
-        if "replica_version_diff" in self.running_request_json[run_id_str]:
-            run_operation = "UPDATE"
-        elif "replica_num_diff" in self.running_request_json[run_id_str] and \
-                len(self.running_request_json[run_id_str]["replica_num_diff"]) > 0:
-            run_operation = "ADD_OR_REMOVE"
-        else:
-            logging.error(f"Unsupported operation for run id {run_id_str}. and request json "
-                          f"{self.running_request_json[run_id_str]}")
-            return
-
-        logging.info(f"End point {end_point_id}; Device {device_id}; replica {replica_no}; "
-                     f"run_operation {run_operation} model status {model_status}.")
-
-        # OPTIONAL DEBUG PARAMS
-        # this_run_controller = self.model_runner_mapping[run_id_str].replica_controller
-        # logging.info(f"The current replica controller state is "
-        #              f"Total version diff num {this_run_controller.total_replica_version_diff_num}")
-        # logging.info(f"self.request_json now {self.request_json}")    # request_json will be deprecated
-        # this_run_request_json = self.running_request_json.get(run_id_str, None)
-        # logging.info(f"self.running_request_json now {this_run_request_json}")
-
-        # Set redis + sqlite deployment result
-        FedMLModelCache.get_instance().set_redis_params(self.redis_addr, self.redis_port, self.redis_password)
-
-        # Deal with different model status
-        if model_status == ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DELETED:
-            # remove
-            FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \
-                delete_deployment_result_with_device_id_and_replica_no(
-                end_point_id, end_point_name, model_name, device_id, replica_no)
-        elif model_status == ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED:
-            # add or update or update-failed-rollback
-            FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \
-                set_deployment_result(end_point_id, end_point_name,
-                                      model_name, model_version,
-                                      device_id, payload, replica_no)
-
-            # Note: To display the result in the UI, we need to save successful deployment result to the database
-            self.model_runner_mapping[run_id_str].deployed_replica_payload = copy.deepcopy(payload_json)
-        else:
-            if model_status != ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED:
-                logging.error(f"Unsupported model status {model_status}.")
-
-            # Avoid endless loop, if the rollback also failed, we should report the failure to the MLOps
-            if self.model_runner_mapping[run_id_str].replica_controller.under_rollback:
-                self.send_deployment_status(
-                    end_point_id, end_point_name, payload_json["model_name"], "",
-                    ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED)
-                return
-
-            # Failure handler, send the rollback message to the worker devices only if it has not been rollback
-            if run_operation == "ADD_OR_REMOVE":
-                # During Scale out / in,
-                # the worker that already been scaled out / in should be sent the rollback message
-                rollback_dict = self.model_runner_mapping[run_id_str].replica_controller.rollback_add_or_remove_replica(
-                    device_id=device_id, replica_no=replica_no, op_type=run_operation
-                )
-                self.model_runner_mapping[run_id_str].replica_controller.under_rollback = True
-
-                if rollback_dict is not None and len(rollback_dict) > 0:
-                    self.send_deployment_status(
-                        end_point_id, end_point_name, payload_json["model_name"], "",
-                        ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_ABORTING)
-                    self.send_rollback_add_remove_op(run_id_str, rollback_dict)
-                    return
-                else:
-                    # This is the last worker that failed, so we should continue to "ABORTED" status
-                    model_config_parameters = self.running_request_json[run_id_str]["parameters"]
-                    inference_port = model_config_parameters.get("server_internal_port",
-                                                                 ServerConstants.MODEL_INFERENCE_DEFAULT_PORT)
-                    inference_port_external = model_config_parameters.get("server_external_port", inference_port)
-                    ip = self.get_ip_address(self.running_request_json[run_id_str])
-                    if ip.startswith("http://") or ip.startswith("https://"):
-                        model_inference_url = "{}/inference/{}".format(ip, end_point_id)
-                    else:
-                        model_inference_url = "http://{}:{}/inference/{}".format(ip, inference_port_external,
-                                                                                 end_point_id)
-
-                    self.send_deployment_status(end_point_id, end_point_name,
-                                                payload_json["model_name"],
-                                                model_inference_url,
-                                                ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_ABORTED)
-
-                    # For auto-scaling, should update the state to "DEPLOYED"
-                    FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \
-                        update_user_setting_replica_num(end_point_id=end_point_id, state="DEPLOYED")
-
-                    self.model_runner_mapping[run_id_str].replica_controller.under_rollback = False
-
-                    return
-            elif run_operation == "UPDATE":
-                # Overwrite the json with the rollback version diff
-                rollback_version_diff = \
-                    self.model_runner_mapping[run_id_str].replica_controller.rollback_get_replica_version_diff(
-                        device_id_trigger=device_id, replica_no_trigger=replica_no)
-
-                # Change the target version to the start version
-                self.model_runner_mapping[run_id_str].replica_controller.rollback_setback_target_replica_version()
-
-                self.running_request_json[run_id_str]["replica_version_diff"] = copy.deepcopy(rollback_version_diff)
-
-                # Send the rollback message to the worker devices
-                self.send_rollback_msg(run_id_str)
-
-                # Set the deployment status to ABORTING
-                self.send_deployment_status(
-                    end_point_id, end_point_name, payload_json["model_name"], "",
-                    ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_ABORTING)
-
-                # TODO(Raphael): Check if resource left not cleaned up
-                return
-            else:
-                logging.error(f"Unsupported operation {run_operation}.")
-                return
-
-        # Move to the next state (rolling update, finish the deployment, etc.)
-        # Notify the replica number controller
-        (self.model_runner_mapping[run_id_str].
-         replica_controller.callback_update_curr_replica_num_state(device_id, replica_no, model_status))
-
-        # Notify the replica version controller, which might trigger the next rolling update
-        self.send_next_scroll_update_msg(run_id_str, device_id, replica_no)
-
-        # Update the global deployment result mapping
-        if run_id_str not in self.slave_deployment_results_mapping:
-            self.slave_deployment_results_mapping[run_id_str] = dict()
-        if str(device_id) not in self.slave_deployment_results_mapping[run_id_str]:
-            self.slave_deployment_results_mapping[run_id_str][str(device_id)] = dict()
-        self.slave_deployment_results_mapping[run_id_str][str(device_id)][str(replica_no)] = model_status
-
-        logging.info("callback_deployment_result_message: topic {}, payload {}, result mapping {}.".format(
-            topic, payload, self.slave_deployment_results_mapping[run_id_str]))
-
-        request_json = self.running_request_json.get(run_id_str, None)
-        if request_json is None:
-            logging.error(f"The endpoint {end_point_id} is no longer running.")
-            self.send_deployment_status(
-                end_point_id, end_point_name, payload_json["model_name"], "",
-                ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED)
-            return
-
-        # Wait for all replica-level's result, not device-level
-        if (self.model_runner_mapping[run_id_str].replica_controller.is_all_replica_num_reconciled() and
-                self.model_runner_mapping[run_id_str].replica_controller.is_all_replica_version_reconciled()):
-            """
-            When all the devices have finished the add / delete / update operation
-            """
-            # Generate one unified inference api
-            # Note that here we use the gateway port instead of the inference port that is used by the slave device
-            model_config_parameters = request_json["parameters"]
-            inference_port = model_config_parameters.get("server_internal_port",
-                                                         ServerConstants.MODEL_INFERENCE_DEFAULT_PORT)
-            inference_port_external = model_config_parameters.get("server_external_port", inference_port)
-            ip = self.get_ip_address(request_json)
-
-            if ip.startswith("http://") or ip.startswith("https://"):
-                model_inference_url = "{}/inference/{}".format(ip, end_point_id)
-            else:
-                model_inference_url = "http://{}:{}/inference/{}".format(ip, inference_port_external, end_point_id)
-
-            # Send stage: MODEL_DEPLOYMENT_STAGE5 = "StartInferenceIngress"
-            self.send_deployment_stages(end_point_id, model_name, model_id,
-                                        model_inference_url,
-                                        ServerConstants.MODEL_DEPLOYMENT_STAGE5["index"],
-                                        ServerConstants.MODEL_DEPLOYMENT_STAGE5["text"],
-                                        "inference url: {}".format(model_inference_url))
-
-            # Send the result to MLOps
-            if self.model_runner_mapping[run_id_str].deployed_replica_payload is not None:
-                payload_json = self.model_runner_mapping[run_id_str].deployed_replica_payload
-                model_slave_url = payload_json["model_url"]
-                payload_json["model_url"] = model_inference_url
-                payload_json["port"] = inference_port_external
-                token = FedMLModelCache.get_instance(self.redis_addr, self.redis_port).get_end_point_token(
-                    end_point_id, end_point_name, model_name)
-
-                model_metadata = payload_json["model_metadata"]
-                model_inputs = model_metadata["inputs"]
-                ret_inputs = list()
-                if "type" in model_metadata and model_metadata["type"] == "default":
-                    payload_json["input_json"] = {"end_point_name": end_point_name,
-                                                  "model_name": model_name,
-                                                  "token": str(token),
-                                                  "inputs": model_inputs,
-                                                  "outputs": []}
-                    payload_json["output_json"] = model_metadata["outputs"]
-                else:
-                    raise Exception(f"Unsupported model metadata type {model_metadata['type']}")
-
-                self.send_deployment_results_with_payload(
-                    end_point_id, end_point_name, payload_json,
-                    self.model_runner_mapping[run_id_str].replica_controller.target_replica_ids)
-
-                payload_json_saved = payload_json
-                payload_json_saved["model_slave_url"] = model_slave_url
-                FedMLServerDataInterface.get_instance().save_job_result(end_point_id, self.edge_id,
-                                                                        json.dumps(payload_json_saved))
-            else:
-                # Arrive here because only contains remove ops, so we do not need to update the model metadata
-                pass
-
-            # For auto-scaling, should update the state to "DEPLOYED"
-            FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \
-                update_user_setting_replica_num(end_point_id=end_point_id, state="DEPLOYED")
-
-            if self.model_runner_mapping[run_id_str].replica_controller.under_rollback:
-                # If first time failed (Still might need rollback), then send failed message to the MLOps
-                if not (FedMLModelCache.get_instance(self.redis_addr, self.redis_port).
-                        get_end_point_activation(end_point_id)):
-                    self.send_deployment_status(
-                        end_point_id, end_point_name, payload_json["model_name"], "",
-                        ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED)
-                else:
-                    self.send_deployment_status(end_point_id, end_point_name,
-                                                payload_json["model_name"],
-                                                model_inference_url,
-                                                ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_ABORTED)
-                self.model_runner_mapping[run_id_str].replica_controller.under_rollback = False
-            else:
-                # Set the end point activation status to True, for scaling out / in and rolling update
-                FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \
-                    set_end_point_activation(end_point_id, end_point_name, True)
-
-                self.send_deployment_status(end_point_id, end_point_name,
-                                            payload_json["model_name"],
-                                            model_inference_url,
-                                            ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED)
-
-            self.slave_deployment_results_mapping[run_id_str] = dict()
-
-            time.sleep(3)
-            self.set_runner_completed_event(end_point_id)
-
-    def callback_deployment_status_message(self, topic=None, payload=None):
-        # [Deprecated] Merge the logic into callback_deployment_result_message
-        logging.info("[Deprecated] callback_deployment_status_message: topic {}, payload {}.".format(
-            topic, payload))
-        pass
-
-    def send_deployment_start_request_to_edges(self, in_request_json=None):
-        if in_request_json is not None:
-            self.request_json = in_request_json
-
-        # Iterate through replica_num_diff, both add and replace should be sent to the edge devices
-        if "replica_num_diff" not in self.request_json or self.request_json["replica_num_diff"] is None:
-            return []
-
-        edge_id_list = []
-        for device_id in self.request_json["replica_num_diff"].keys():
-            edge_id_list.append(device_id)
-
-        self.request_json["master_node_ip"] = self.get_ip_address(self.request_json)
-        should_added_devices = []
-        for edge_id in edge_id_list:
-            if edge_id == self.edge_id:
-                continue
-            should_added_devices.append(edge_id)
-            # send start deployment request to each device
-            self.send_deployment_start_request_to_edge(edge_id, self.request_json)
-        return should_added_devices
-
-    def send_deployment_start_request_to_edge(self, edge_id, res_json):
-        topic_start_deployment = "model_ops/model_device/start_deployment/{}".format(str(edge_id))
-        logging.info("start_deployment: send topic " + topic_start_deployment + f" to client {edge_id}...")
-        self.client_mqtt_mgr.send_message_json(topic_start_deployment, json.dumps(res_json))
-
-    def get_ip_address(self, request_json):
-        # OPTION 1: Use local ip
-        ip = ServerConstants.get_local_ip()
-
-        # OPTION 2: Auto detect public ip
-        if "parameters" in request_json and \
-                ServerConstants.AUTO_DETECT_PUBLIC_IP in request_json["parameters"] and \
-                request_json["parameters"][ServerConstants.AUTO_DETECT_PUBLIC_IP]:
-            ip = ServerConstants.get_public_ip()
-
-        # OPTION 3: Use user indicated ip
-        if self.infer_host is not None and self.infer_host != "127.0.0.1" and self.infer_host != "localhost":
-            ip = self.infer_host
-
-        return ip
-
-    def send_deployment_delete_request_to_edges(self, payload, model_msg_object):
-        edge_id_list_to_delete = model_msg_object.device_ids
-
-        # Remove the model master node id from the list using index 0
-        edge_id_list_to_delete = edge_id_list_to_delete[1:]
-
-        logging.info("Device ids to be deleted: " + str(edge_id_list_to_delete))
-
-        for edge_id in edge_id_list_to_delete:
-            if edge_id == self.edge_id:
-                continue
-            # send delete deployment request to each model device
-            topic_delete_deployment = "model_ops/model_device/delete_deployment/{}".format(str(edge_id))
-            logging.info("delete_deployment: send topic " + topic_delete_deployment + " to client...")
-            self.client_mqtt_mgr.send_message_json(topic_delete_deployment, payload)
-
-    def ota_upgrade(self, payload, request_json):
-        run_id = request_json["end_point_id"]
-        force_ota = False
-        ota_version = None
-
-        try:
-            parameters = request_json.get("parameters", None)
-            common_args = parameters.get("common_args", None)
-            force_ota = common_args.get("force_ota", False)
-            ota_version = common_args.get("ota_version", None)
-        except Exception as e:
-            pass
-
-        if force_ota and ota_version is not None:
-            should_upgrade = True if ota_version != fedml.__version__ else False
-            upgrade_version = ota_version
-        else:
-            try:
-                fedml_is_latest_version, local_ver, remote_ver = sys_utils.check_fedml_is_latest_version(self.version)
-            except Exception as e:
-                return
-
-            should_upgrade = False if fedml_is_latest_version else True
-            upgrade_version = remote_ver
-
-        if should_upgrade:
-            job_obj = FedMLServerDataInterface.get_instance().get_job_by_id(run_id)
-            if job_obj is None:
-                FedMLServerDataInterface.get_instance(). \
-                    save_started_job(run_id, self.edge_id, time.time(),
-                                     ServerConstants.MSG_MLOPS_SERVER_STATUS_UPGRADING,
-                                     ServerConstants.MSG_MLOPS_SERVER_STATUS_UPGRADING,
-                                     payload)
-
-            logging.info(f"Upgrade to version {upgrade_version} ...")
-
-            sys_utils.do_upgrade(self.version, upgrade_version)
-
-            raise Exception("Restarting after upgraded...")
-
-    def callback_start_deployment(self, topic, payload):
-        try:
-            MLOpsConfigs.fetch_all_configs()
-        except Exception as e:
-            pass
-
-        # Get deployment params
-        request_json = json.loads(payload)
-        run_id = request_json["end_point_id"]
-        end_point_name = request_json["end_point_name"]
-        token = request_json["token"]
-        user_id = request_json["user_id"]
-        user_name = request_json["user_name"]
-        device_ids = request_json["device_ids"]
-        device_objs = request_json["device_objs"]
-
-        model_config = request_json["model_config"]
-        model_name = model_config["model_name"]
-        model_version = model_config["model_version"]
-        model_id = model_config["model_id"]
-        model_storage_url = model_config["model_storage_url"]
-        scale_min = model_config.get("instance_scale_min", 0)
-        scale_max = model_config.get("instance_scale_max", 0)
-        inference_engine = model_config.get("inference_engine", 0)
-        enable_auto_scaling = request_json.get("enable_auto_scaling", False)
-        desired_replica_num = request_json.get("desired_replica_num", 1)
-
-        target_queries_per_replica = request_json.get("target_queries_per_replica", 10)
-        aggregation_window_size_seconds = request_json.get("aggregation_window_size_seconds", 60)
-        scale_down_delay_seconds = request_json.get("scale_down_delay_seconds", 120)
-
-        inference_end_point_id = run_id
-
-        logging.info("[Master] received start deployment request for end point {}.".format(run_id))
-
-        # Set redis config
-        FedMLModelCache.get_instance().set_redis_params(self.redis_addr, self.redis_port, self.redis_password)
-
-        # Save the user setting (about replica number) of this run to Redis, if existed, update it
-        FedMLModelCache.get_instance(self.redis_addr, self.redis_port).set_user_setting_replica_num(
-            end_point_id=run_id, end_point_name=end_point_name, model_name=model_name, model_version=model_version,
-            replica_num=desired_replica_num, enable_auto_scaling=enable_auto_scaling,
-            scale_min=scale_min, scale_max=scale_max, state="DEPLOYING",
-            aggregation_window_size_seconds=aggregation_window_size_seconds,
-            target_queries_per_replica=target_queries_per_replica,
-            scale_down_delay_seconds=int(scale_down_delay_seconds)
-        )
-
-        # Start log processor for current run
-        self.args.run_id = run_id
-        self.args.edge_id = self.edge_id
-        MLOpsRuntimeLog(args=self.args).init_logs()
-        MLOpsRuntimeLogDaemon.get_instance(self.args).set_log_source(
-            ServerConstants.FEDML_LOG_SOURCE_TYPE_MODEL_END_POINT)
-        MLOpsRuntimeLogDaemon.get_instance(self.args).start_log_processor(run_id, self.edge_id)
-
-        # # Deprecated
-        # self.ota_upgrade(payload, request_json)
-
-        # Add additional parameters to the request_json
-        run_id = inference_end_point_id
-        self.args.run_id = run_id
-        self.run_id = run_id
-        request_json["run_id"] = run_id
-        self.request_json = request_json
-        run_id_str = str(run_id)
-        self.running_request_json[run_id_str] = request_json
-        self.request_json["master_node_ip"] = self.get_ip_address(self.request_json)
-
-        # Set the target status of the devices to redis
-        FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \
-            set_end_point_device_info(request_json["end_point_id"], end_point_name, json.dumps(device_objs))
-
-        # Setup Token
-        usr_indicated_token = self.get_usr_indicated_token(request_json)
-        if usr_indicated_token != "":
-            logging.info(f"Change Token from{token} to {usr_indicated_token}")
-            token = usr_indicated_token
-        FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \
-            set_end_point_token(run_id, end_point_name, model_name, token)
-
-        self.subscribe_slave_devices_message(request_json)
-
-        # Report stage to mlops: MODEL_DEPLOYMENT_STAGE1 = "Received"
-        self.send_deployment_stages(self.run_id, model_name, model_id,
-                                    "",
-                                    ServerConstants.MODEL_DEPLOYMENT_STAGE1["index"],
-                                    ServerConstants.MODEL_DEPLOYMENT_STAGE1["text"],
-                                    "Received request for endpoint {}".format(run_id))
-
-        # Report stage to mlops: MODEL_DEPLOYMENT_STAGE2 = "Initializing"
-        self.send_deployment_stages(self.run_id, model_name, model_id,
-                                    "",
-                                    ServerConstants.MODEL_DEPLOYMENT_STAGE2["index"],
-                                    ServerConstants.MODEL_DEPLOYMENT_STAGE2["text"],
-                                    ServerConstants.MODEL_DEPLOYMENT_STAGE2["text"])
-
-        ServerConstants.save_runner_infos(self.args.device_id + "." + self.args.os_name, self.edge_id, run_id=run_id)
-
-        if self.run_as_edge_server_and_agent:
-            # Replica Controller is per deployment
-            replica_controller = FedMLDeviceReplicaController(self.edge_id, self.request_json)
-
-            # Prepare num diff
-            new_request_with_num_diff = replica_controller.generate_diff_to_request_json()
-            self.running_request_json[run_id_str] = new_request_with_num_diff
-            request_json = new_request_with_num_diff
-
-            # Listen to extra worker topics, especially when worker's replica remove to zero,
-            # In this case, currently Java will NOT send those worker ids to the master, but still need to listen to it.
-            if "replica_num_diff" in request_json and len(request_json["replica_num_diff"]) > 0:
-                for device_id in request_json["replica_num_diff"].keys():
-                    # {"op": "remove", "curr_num": 1, "target_num": 0}
-                    if request_json["replica_num_diff"][device_id]["op"] == "remove" and \
-                            request_json["replica_num_diff"][device_id]["target_num"] == 0:
-                        self.subscribe_spec_device_message(run_id, device_id)
-
-            # Prepare version diff
-            new_request_with_version_diff = replica_controller.init_first_update_device_replica_mapping()
-            self.running_request_json[run_id_str] = new_request_with_version_diff
-            request_json = new_request_with_version_diff
-
-            # Init the model runner
-            server_runner = FedMLServerRunner(
-                self.args, run_id=run_id, request_json=request_json, agent_config=self.agent_config
-            )
-            server_runner.run_as_edge_server_and_agent = self.run_as_edge_server_and_agent
-            server_runner.edge_id = self.edge_id
-            server_runner.infer_host = self.infer_host
-            server_runner.redis_addr = self.redis_addr
-            server_runner.redis_port = self.redis_port
-            server_runner.redis_password = self.redis_password
-            server_runner.replica_controller = replica_controller
-
-            logging.info(f"[Master] new request for id {run_id_str}")
-            logging.info(f"[Master] model runner mapping before: {self.model_runner_mapping.items()}")
-
-            self.run_process_event_map[run_id_str] = multiprocessing.Event()
-            self.run_process_event_map[run_id_str].clear()
-            server_runner.run_process_event = self.run_process_event_map[run_id_str]
-            self.run_process_completed_event_map[run_id_str] = multiprocessing.Event()
-            self.run_process_completed_event_map[run_id_str].clear()
-            server_runner.run_process_completed_event = self.run_process_completed_event_map[run_id_str]
-            self.model_runner_mapping[run_id_str] = server_runner
-
-            logging.info(f"[Master] model runner mapping after: {self.model_runner_mapping.items()}")
-
-            # This subprocess will copy the server_runner and run it, but they are not the same object
-            server_process = Process(target=server_runner.run, args=(
-                self.run_process_event_map[run_id_str], self.run_process_completed_event_map[run_id_str]
-            ))
-            server_process.start()
-            ServerConstants.save_run_process(run_id, server_process.pid)
-
-            # Send stage: MODEL_DEPLOYMENT_STAGE3 = "StartRunner"
-            self.send_deployment_stages(self.run_id, model_name, model_id,
-                                        "",
-                                        ServerConstants.MODEL_DEPLOYMENT_STAGE3["index"],
-                                        ServerConstants.MODEL_DEPLOYMENT_STAGE3["text"],
-                                        ServerConstants.MODEL_DEPLOYMENT_STAGE3["text"])
-
-    def send_first_scroll_update_msg(self):
-        """
-        Replica-level rolling update.
-        Delete the record of the replaced device and send the deployment msg to the devices
-        """
-        if "replica_version_diff" not in self.request_json or self.request_json["replica_version_diff"] is None:
-            return []
-
-        first_chunk_dict = self.request_json["replica_version_diff"]
-
-        # Delete the record of the replaced device
-        self.delete_device_replica_info_on_master(
-            self.request_json["end_point_id"], self.request_json["end_point_name"],
-            self.request_json["model_config"]["model_name"], first_chunk_dict)
-
-        logging.info(f"Send the first scroll update msg to the device {first_chunk_dict} ")
-
-        # Send the deployment msg to the devices, (we reuse the start_deployment msg)
-        for edge_id in first_chunk_dict.keys():
-            if edge_id == self.edge_id:
-                continue
-            # send start deployment request to each device
-            self.send_deployment_start_request_to_edge(edge_id, self.request_json)
-        return list(first_chunk_dict.keys())
-
-    def send_rollback_msg(self, run_id_str):
-        # Avoid using the old request_json
-        self.delete_device_replica_info_on_master(
-            self.running_request_json[run_id_str]["end_point_id"],
-            self.running_request_json[run_id_str]["end_point_name"],
-            self.running_request_json[run_id_str]["model_config"]["model_name"],
-            self.running_request_json[run_id_str]["replica_version_diff"])
-
-        # Send the deployment msg to the devices, (we reuse the start_deployment msg)
-        for edge_id in self.running_request_json[run_id_str]["replica_version_diff"].keys():
-            if edge_id == self.edge_id:
-                continue
-            # send start deployment request to each device
-            self.send_deployment_start_request_to_edge(edge_id, self.running_request_json[run_id_str])
-
-    def delete_device_replica_info_on_master(self, endpoint_id, endpoint_name, model_name, edge_id_replica_no_dict):
-        FedMLModelCache.get_instance().set_redis_params(self.redis_addr, self.redis_port, self.redis_password)
-        # Remove the record of the replaced device
-        # [Deprecated] deployment status & device info
-        # Delete the result in deployment result list in Redis / SQLite
-        device_result_list = FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \
-            get_deployment_result_list(endpoint_id, endpoint_name, model_name)
-
-        delete_device_result_list = []
-        for device_result in device_result_list:
-            device_result_dict = json.loads(device_result)
-            if (str(device_result_dict["cache_device_id"]) in edge_id_replica_no_dict.keys() and
-                    str(device_result_dict["cache_replica_no"]) in
-                    edge_id_replica_no_dict[str(device_result_dict["cache_device_id"])]):
-                delete_device_result_list.append(device_result)
-
-        for delete_item in delete_device_result_list:
-            FedMLModelCache.get_instance(self.redis_addr, self.redis_port).delete_deployment_result(
-                delete_item, endpoint_id, endpoint_name, model_name
-            )
-
-        logging.info(f"Deleted the replica record on master: {edge_id_replica_no_dict}")
-
-    def send_next_scroll_update_msg(self, run_id_str, device_id, replica_no):
-        """
-        Send the next scroll update msg to the devices if needed.
-        If there is no need for the next scroll update, directly return.
-        """
-        if replica_no is None:
-            return
-
-        replica_controller = self.model_runner_mapping[run_id_str].replica_controller
-
-        if replica_controller.total_replica_version_diff_num == 0:
-            return
-
-        if replica_controller.under_rollback:
-            replica_controller.intermediate_replica_version[device_id][replica_no] = replica_controller.start_version
-            return
-
-        logging.info(f"Curr updating window: {replica_controller.curr_replica_updating_window} "
-                     f"Curr version diff num: {replica_controller.total_replica_version_diff_num}")
-
-        replica_controller.callback_update_updating_window(device_id, replica_no)
-
-        # Decide whether to send the next scroll update
-        next_chunk_dict = replica_controller.get_next_chunk_devices_replica()
-
-        if next_chunk_dict:
-            logging.info(f"The next scroll update for end point {run_id_str} is {next_chunk_dict}")
-            # Update curr updating window
-            replica_controller.curr_replica_updating_window = copy.deepcopy(next_chunk_dict)
-
-            # Use global deployment result mapping to decide whether to send the next scroll update
-            self.running_request_json[run_id_str]["replica_version_diff"] = next_chunk_dict
-
-            # Avoid using the old request_json
-            self.delete_device_replica_info_on_master(
-                self.running_request_json[run_id_str]["end_point_id"],
-                self.running_request_json[run_id_str]["end_point_name"],
-                self.running_request_json[run_id_str]["model_config"]["model_name"],
-                next_chunk_dict)
-
-            # Send the deployment msg to the devices, (we reuse the start_deployment msg)
-            for edge_id in next_chunk_dict.keys():
-                if edge_id == self.edge_id:
-                    continue
-                # send start deployment request to each device
-                self.send_deployment_start_request_to_edge(edge_id, self.running_request_json[run_id_str])
-        return
-
-    def send_rollback_add_remove_op(self, run_id, rollback_replica_dict):
-        """
-        This method is used when the original add op failed, we need to rollback by delete the existed replicas
-        Input example:
-        rollback_replica_dict = {'96684': {'curr_num': 2, 'op': 'remove', 'target_num': 1}}
-        """
-        existed_request_json = self.running_request_json[str(run_id)]
-        updated_request_json = copy.deepcopy(existed_request_json)
-
-        # Reverse the replica_num_diff
-        updated_request_json["replica_num_diff"] = rollback_replica_dict
-
-        self.send_deployment_start_request_to_edges(in_request_json=updated_request_json)
-
-    def callback_activate_deployment(self, topic, payload):
-        logging.info("callback_activate_deployment: topic = %s, payload = %s" % (topic, payload))
-
-        # Parse payload as the model message object.
-        model_msg_object = FedMLModelMsgObject(topic, payload)
-
-        # Get the previous deployment status.
-        FedMLModelCache.get_instance().set_redis_params(self.redis_addr, self.redis_port, self.redis_password)
-        endpoint_status = FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \
-            get_end_point_status(model_msg_object.inference_end_point_id)
-        if endpoint_status != ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED:
-            return
-
-        # Set end point as activated status
-        FedMLModelCache.get_instance(self.redis_addr, self.redis_port).set_end_point_activation(
-            model_msg_object.inference_end_point_id, model_msg_object.end_point_name, True)
-
-    def callback_deactivate_deployment(self, topic, payload):
-        logging.info("callback_deactivate_deployment: topic = %s, payload = %s" % (topic, payload))
-
-        # Parse payload as the model message object.
-        model_msg_object = FedMLModelMsgObject(topic, payload)
-
-        # Get the endpoint status
-        FedMLModelCache.get_instance().set_redis_params(self.redis_addr, self.redis_port, self.redis_password)
-        endpoint_status = FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \
-            get_end_point_status(model_msg_object.inference_end_point_id)
-        if endpoint_status != ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED:
-            return
-
-        # Set end point as deactivated status
-        FedMLModelCache.get_instance(self.redis_addr, self.redis_port).set_end_point_activation(
-            model_msg_object.inference_end_point_id, model_msg_object.model_name, False)
-
-    def set_runner_stopped_event(self, run_id):
-        run_id_str = str(run_id)
-        server_runner = self.model_runner_mapping.get(run_id_str, None)
-        if server_runner is not None:
-            if server_runner.run_process_event is not None:
-                server_runner.run_process_event.set()
-            self.model_runner_mapping.pop(run_id_str)
-
-    def set_runner_completed_event(self, run_id):
-        run_id_str = str(run_id)
-        server_runner = self.model_runner_mapping.get(run_id_str, None)
-        if server_runner is not None:
-            if server_runner.run_process_completed_event is not None:
-                server_runner.run_process_completed_event.set()
-            self.model_runner_mapping.pop(run_id_str)
-
-    def callback_delete_deployment(self, topic, payload):
-        logging.info("[Master] callback_delete_deployment")
-        # Parse payload as the model message object.
-        model_msg_object = FedMLModelMsgObject(topic, payload)
-
-        # Delete SQLite records
-        FedMLServerDataInterface.get_instance().delete_job_from_db(model_msg_object.run_id)
-        FedMLModelDatabase.get_instance().delete_deployment_result(
-            model_msg_object.run_id, model_msg_object.end_point_name, model_msg_object.model_name,
-            model_version=model_msg_object.model_version)
-        FedMLModelDatabase.get_instance().delete_deployment_run_info(
-            end_point_id=model_msg_object.inference_end_point_id)
-
-        # Delete Redis Records
-        FedMLModelCache.get_instance().set_redis_params(self.redis_addr, self.redis_port, self.redis_password)
-        FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \
-            set_end_point_activation(model_msg_object.inference_end_point_id,
-                                     model_msg_object.end_point_name, False)
-        FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \
-            delete_end_point(model_msg_object.inference_end_point_id, model_msg_object.end_point_name,
-                             model_msg_object.model_name, model_msg_object.model_version)
-
-        # Send delete deployment request to the edge devices
-        self.send_deployment_delete_request_to_edges(payload, model_msg_object)
-
-        # Stop processes on master
-        self.set_runner_stopped_event(model_msg_object.run_id)
-        self.stop_device_inference_monitor(model_msg_object.run_id, model_msg_object.end_point_name,
-                                           model_msg_object.model_id, model_msg_object.model_name,
-                                           model_msg_object.model_version)
-
-    def send_deployment_results_with_payload(self, end_point_id, end_point_name, payload, replica_id_list=None):
-        self.send_deployment_results(end_point_id, end_point_name,
-                                     payload["model_name"], payload["model_url"],
-                                     payload["model_version"], payload["port"],
-                                     payload["inference_engine"],
-                                     payload["model_metadata"],
-                                     payload["model_config"],
-                                     payload["input_json"],
-                                     payload["output_json"],
-                                     replica_id_list=replica_id_list)
-
-    def send_deployment_results(self, end_point_id, end_point_name,
-                                model_name, model_inference_url,
-                                model_version, inference_port, inference_engine,
-                                model_metadata, model_config, input_json, output_json, replica_id_list=None):
-        deployment_results_topic_prefix = "model_ops/model_device/return_deployment_result"
-        deployment_results_topic = "{}/{}".format(deployment_results_topic_prefix, end_point_id)
-        deployment_results_payload = {"end_point_id": end_point_id, "end_point_name": end_point_name,
-                                      "model_name": model_name, "model_url": model_inference_url,
-                                      "version": model_version, "port": inference_port,
-                                      "inference_engine": inference_engine,
-                                      "model_metadata": model_metadata,
-                                      "model_config": model_config,
-                                      "input_json": input_json,
-                                      "output_json": output_json,
-                                      "timestamp": int(format(time.time_ns() / 1000.0, '.0f')),
-                                      "replica_ids": replica_id_list}
-        logging.info(f"[Master] deployment_results_payload is sent to mlops: {deployment_results_payload}")
-
-        self.client_mqtt_mgr.send_message_json(deployment_results_topic, json.dumps(deployment_results_payload))
-        self.client_mqtt_mgr.send_message_json(deployment_results_topic_prefix, json.dumps(deployment_results_payload))
-
-    def send_deployment_status(self, end_point_id, end_point_name, model_name, model_inference_url, model_status):
-        deployment_status_topic_prefix = "model_ops/model_device/return_deployment_status"
-        deployment_status_topic = "{}/{}".format(deployment_status_topic_prefix, end_point_id)
-        deployment_status_payload = {"end_point_id": end_point_id, "end_point_name": end_point_name,
-                                     "model_name": model_name,
-                                     "model_url": model_inference_url,
-                                     "model_status": model_status,
-                                     "timestamp": int(format(time.time_ns() / 1000.0, '.0f'))}
-        logging.info(f"[Master] deployment_status_payload is sent to mlops: {deployment_status_payload}")
-
-        self.client_mqtt_mgr.send_message_json(deployment_status_topic, json.dumps(deployment_status_payload))
-        self.client_mqtt_mgr.send_message_json(deployment_status_topic_prefix, json.dumps(deployment_status_payload))
-
-    def send_deployment_stages(self, end_point_id, model_name, model_id, model_inference_url,
-                               model_stages_index, model_stages_title, model_stage_detail):
-        deployment_stages_topic_prefix = "model_ops/model_device/return_deployment_stages"
-        deployment_stages_topic = "{}/{}".format(deployment_stages_topic_prefix, end_point_id)
-        deployment_stages_payload = {"model_name": model_name,
-                                     "model_id": model_id,
-                                     "model_url": model_inference_url,
-                                     "end_point_id": end_point_id,
-                                     "model_stage_index": model_stages_index,
-                                     "model_stage_title": model_stages_title,
-                                     "model_stage_detail": model_stage_detail,
-                                     "timestamp": int(format(time.time_ns() / 1000.0, '.0f'))}
-
-        self.client_mqtt_mgr.send_message_json(deployment_stages_topic, json.dumps(deployment_stages_payload))
-        self.client_mqtt_mgr.send_message_json(deployment_stages_topic_prefix, json.dumps(deployment_stages_payload))
-
-        logging.info(f"-------- Stages has been sent to mlops with stage {model_stages_index} and "
-                     f"payload {deployment_stages_payload}")
-        time.sleep(2)
-
-    def on_client_mqtt_disconnected(self, mqtt_client_object):
-        if self.client_mqtt_lock is None:
-            self.client_mqtt_lock = threading.Lock()
-
-        self.client_mqtt_lock.acquire()
-        self.client_mqtt_is_connected = False
-        self.client_mqtt_lock.release()
-
-        logging.info("on_client_mqtt_disconnected: {}.".format(self.client_mqtt_is_connected))
-
-    def on_client_mqtt_connected(self, mqtt_client_object):
-        if self.mlops_metrics is None:
-            self.mlops_metrics = MLOpsMetrics()
-
-        self.mlops_metrics.set_messenger(self.client_mqtt_mgr)
-        self.mlops_metrics.run_id = self.run_id
-        self.mlops_metrics.edge_id = self.edge_id
-        self.mlops_metrics.server_agent_id = self.server_agent_id
-
-        if self.client_mqtt_lock is None:
-            self.client_mqtt_lock = threading.Lock()
-
-        self.client_mqtt_lock.acquire()
-        self.client_mqtt_is_connected = True
-        self.client_mqtt_lock.release()
-
-        # logging.info("on_client_mqtt_connected: {}.".format(self.client_mqtt_is_connected))
-
-    def setup_client_mqtt_mgr(self):
-        if self.client_mqtt_mgr is not None:
-            return
-
-        if self.client_mqtt_lock is None:
-            self.client_mqtt_lock = threading.Lock()
-
-        # logging.info(
-        #     "server agent config: {},{}".format(
-        #         self.agent_config["mqtt_config"]["BROKER_HOST"], self.agent_config["mqtt_config"]["BROKER_PORT"]
-        #     )
-        # )
-
-        self.client_mqtt_mgr = MqttManager(
-            self.agent_config["mqtt_config"]["BROKER_HOST"],
-            self.agent_config["mqtt_config"]["BROKER_PORT"],
-            self.agent_config["mqtt_config"]["MQTT_USER"],
-            self.agent_config["mqtt_config"]["MQTT_PWD"],
-            self.agent_config["mqtt_config"]["MQTT_KEEPALIVE"],
-            "FedML_ModelServerAgent_Metrics_@{}@_{}_{}_{}".format(self.user_name, self.args.current_device_id,
-                                                                  str(os.getpid()),
-                                                                  str(uuid.uuid4()))
-        )
-        self.client_mqtt_mgr.add_connected_listener(self.on_client_mqtt_connected)
-        self.client_mqtt_mgr.add_disconnected_listener(self.on_client_mqtt_disconnected)
-        self.client_mqtt_mgr.connect()
-        self.client_mqtt_mgr.loop_start()
-
-        if self.mlops_metrics is None:
-            self.mlops_metrics = MLOpsMetrics()
-        self.mlops_metrics.set_messenger(self.client_mqtt_mgr)
-        self.mlops_metrics.run_id = self.run_id
-        self.mlops_metrics.edge_id = self.edge_id
-        self.mlops_metrics.server_agent_id = self.server_agent_id
-
-    def release_client_mqtt_mgr(self):
-        try:
-            if self.client_mqtt_mgr is not None:
-                self.client_mqtt_mgr.loop_stop()
-                self.client_mqtt_mgr.disconnect()
-
-            self.client_mqtt_lock.acquire()
-            if self.client_mqtt_mgr is not None:
-                self.client_mqtt_is_connected = False
-                self.client_mqtt_mgr = None
-            self.client_mqtt_lock.release()
-        except Exception:
-            pass
-
-    def send_deployment_stop_request_to_edges(self, edge_id_list, payload):
-        for edge_id in edge_id_list:
-            topic_stop_deployment = "model_ops/model_device/stop_deployment/{}".format(str(self.edge_id))
-            logging.info("stop_deployment: send topic " + topic_stop_deployment)
-            self.client_mqtt_mgr.send_message_json(topic_stop_deployment, payload)
-
-    def send_exit_train_with_exception_request_to_edges(self, edge_id_list, payload):
-        for edge_id in edge_id_list:
-            topic_exit_train = "flserver_agent/" + str(edge_id) + "/exit_train_with_exception"
-            logging.info("exit_train_with_exception: send topic " + topic_exit_train)
-            self.client_mqtt_mgr.send_message_json(topic_exit_train, payload)
-
-    def exit_run_with_exception_entry(self):
-        try:
-            self.setup_client_mqtt_mgr()
-            self.exit_run_with_exception()
-        except Exception as e:
-            self.release_client_mqtt_mgr()
-            sys_utils.cleanup_all_fedml_server_login_processes(
-                ServerConstants.SERVER_LOGIN_PROGRAM, clean_process_group=False)
-            sys.exit(1)
-        finally:
-            self.release_client_mqtt_mgr()
-
-    def exit_run_with_exception(self):
-        logging.info("Exit run successfully.")
-
-        ServerConstants.cleanup_learning_process(self.run_id)
-        ServerConstants.cleanup_run_process(self.run_id)
-
-        self.mlops_metrics.report_server_id_status(
-            self.run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED, edge_id=self.edge_id)
-
-        time.sleep(1)
-
-    def callback_exit_train_with_exception(self, topic, payload):
-        # logging.info("callback_exit_train_with_exception: topic = %s, payload = %s" % (topic, payload))
-
-        request_json = json.loads(payload)
-        is_retain = request_json.get("is_retain", False)
-        if is_retain:
-            return
-        run_id = request_json.get("runId", None)
-        if run_id is None:
-            run_id = request_json.get("run_id", None)
-            if run_id is None:
-                run_id = request_json.get("id", None)
-
-        if run_id is None:
-            return
-
-        edge_ids = request_json.get("edgeids", None)
-
-        self.send_exit_train_with_exception_request_to_edges(edge_ids, payload)
-
-        # Stop server with multiprocessing mode
-        self.request_json = request_json
-        server_runner = FedMLServerRunner(
-            self.args, edge_id=self.edge_id, request_json=request_json, agent_config=self.agent_config, run_id=run_id
-        )
-        try:
-            Process(target=server_runner.exit_run_with_exception_entry).start()
-        except Exception as e:
-            pass
-
-    def callback_client_exit_train_with_exception(self, topic, payload):
-        # logging.info("callback_client_exit_train_with_exception: topic = %s, payload = %s" % (topic, payload))
-
-        request_json = json.loads(payload)
-        run_id = request_json.get("run_id", None)
-        edge_id = request_json.get("edge_id", None)
-        if run_id is None:
-            logging.info("callback_client_exit_train_with_exception run id is none")
-            return
-
-        job = FedMLServerDataInterface.get_instance().get_job_by_id(run_id)
-        if job is not None and job.running_json is not None and job.running_json != "":
-            job_json_obj = json.loads(job.running_json)
-            edge_ids = job_json_obj.get("edgeids", None)
-
-            self.mlops_metrics.broadcast_server_training_status(
-                run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED,
-                is_from_model=True, edge_id=edge_id)
-
-            self.send_exit_train_with_exception_request_to_edges(edge_ids, job.running_json)
-
-            self.exit_run_with_exception()
-
-    def callback_runner_id_status(self, topic, payload):
-        logging.info("callback_runner_id_status: topic = %s, payload = %s" % (topic, payload))
-
-        request_json = json.loads(payload)
-        is_retain = request_json.get("is_retain", False)
-        if is_retain:
-            return
-        run_id = request_json["run_id"]
-        status = request_json["status"]
-        edge_id = request_json["edge_id"]
-        run_id_str = str(run_id)
-
-        if (
-                status == ServerConstants.MSG_MLOPS_SERVER_STATUS_FINISHED
-                or status == ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED
-        ):
-            # Stop server with multiprocessing mode
-            stop_request_json = self.running_request_json.get(run_id_str, None)
-            if stop_request_json is None:
-                stop_request_json = request_json
-            if self.run_as_edge_server_and_agent:
-                server_runner = FedMLServerRunner(
-                    self.args, run_id=run_id, request_json=stop_request_json, agent_config=self.agent_config
-                )
-                server_runner.edge_id = self.edge_id
-                server_runner.run_as_edge_server_and_agent = self.run_as_edge_server_and_agent
-                server_runner.run_status = status
-                status_process = Process(target=server_runner.cleanup_client_with_status)
-                status_process.start()
-                status_process.join(10)
-
-                # Stop log processor for current run
-                MLOpsRuntimeLogDaemon.get_instance(self.args).stop_log_processor(run_id, self.edge_id)
-
-    def cleanup_client_with_status(self):
-        if self.run_status == ServerConstants.MSG_MLOPS_SERVER_STATUS_FINISHED:
-            logging.info("received to finished status.")
-            self.cleanup_run_when_finished()
-        elif self.run_status == ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED:
-            logging.info("received to failed status.")
-            self.cleanup_run_when_starting_failed()
-
-    def callback_report_current_status(self, topic, payload):
-        request_json = json.loads(payload)
-        if self.run_as_edge_server_and_agent:
-            self.send_agent_active_msg()
-        elif self.run_as_cloud_agent:
-            self.send_agent_active_msg()
-        elif self.run_as_cloud_server:
-            pass
-
-    @staticmethod
-    def process_ota_upgrade_msg():
-        os.system("pip install -U fedml")
-
-    def callback_server_ota_msg(self, topic, payload):
-        request_json = json.loads(payload)
-        cmd = request_json["cmd"]
-
-        if cmd == ServerConstants.FEDML_OTA_CMD_UPGRADE:
-            try:
-                self.process_ota_upgrade_msg()
-                # Process(target=FedMLServerRunner.process_ota_upgrade_msg).start()
-                raise Exception("After upgraded, restart runner...")
-            except Exception as e:
-                pass
-        elif cmd == ServerConstants.FEDML_OTA_CMD_RESTART:
-            raise Exception("Restart runner...")
-
-    @staticmethod
-    def get_device_id():
-        device_file_path = os.path.join(ServerConstants.get_data_dir(), ServerConstants.LOCAL_RUNNER_INFO_DIR_NAME)
-        file_for_device_id = os.path.join(device_file_path, "devices.id")
-        if not os.path.exists(device_file_path):
-            os.makedirs(device_file_path)
-        elif os.path.exists(file_for_device_id):
-            with open(file_for_device_id, 'r', encoding='utf-8') as f:
-                device_id_from_file = f.readline()
-                if device_id_from_file is not None and device_id_from_file != "":
-                    return device_id_from_file
-
-        if platform.system() == "Darwin":
-            cmd_get_serial_num = "system_profiler SPHardwareDataType | grep Serial | awk '{gsub(/ /,\"\")}{print}' " \
-                                 "|awk -F':' '{print $2}' "
-            device_id = os.popen(cmd_get_serial_num).read()
-            device_id = device_id.replace('\n', '').replace(' ', '')
-            if device_id is None or device_id == "":
-                device_id = hex(uuid.getnode())
-            else:
-                device_id = "0x" + device_id
-        else:
-            if "nt" in os.name:
-
-                def get_uuid():
-                    guid = ""
-                    try:
-                        cmd = "wmic csproduct get uuid"
-                        guid = str(subprocess.check_output(cmd))
-                        pos1 = guid.find("\\n") + 2
-                        guid = guid[pos1:-15]
-                    except Exception as ex:
-                        pass
-                    return str(guid)
-
-                device_id = str(get_uuid())
-            elif "posix" in os.name:
-                device_id = sys_utils.get_device_id_in_docker()
-                if device_id is None:
-                    device_id = hex(uuid.getnode())
-            else:
-                device_id = sys_utils.run_subprocess_open(
-                    "hal-get-property --udi /org/freedesktop/Hal/devices/computer --key system.hardware.uuid".split()
-                )
-                device_id = hex(device_id)
-
-        if device_id is not None and device_id != "":
-            with open(file_for_device_id, 'w', encoding='utf-8') as f:
-                f.write(device_id)
-        else:
-            device_id = hex(uuid.uuid4())
-            with open(file_for_device_id, 'w', encoding='utf-8') as f:
-                f.write(device_id)
-
-        return device_id
-
-    def bind_account_and_device_id(self, url, account_id, device_id, os_name):
-        role = ServerConstants.login_role_list[ServerConstants.LOGIN_MODE_ON_PREMISE_MASTER_INDEX]
-        if self.run_as_edge_server_and_agent:
-            role = ServerConstants.login_role_list[ServerConstants.LOGIN_MODE_ON_PREMISE_MASTER_INDEX]
-        elif self.run_as_cloud_agent:
-            role = ServerConstants.login_role_list[ServerConstants.LOGIN_MODE_FEDML_CLOUD_MASTER_INDEX]
-        elif self.run_as_cloud_server:
-            role = ServerConstants.login_role_list[ServerConstants.LOGIN_MODE_INFERENCE_INSTANCE_INDEX]
-
-        ip = requests.get('https://checkip.amazonaws.com').text.strip()
-        fedml_ver, exec_path, os_ver, cpu_info, python_ver, torch_ver, mpi_installed, \
-            cpu_usage, available_mem, total_mem, gpu_info, gpu_available_mem, gpu_total_mem, \
-            gpu_count, gpu_vendor, cpu_count, gpu_device_name = get_sys_runner_info()
-        host_name = sys_utils.get_host_name()
-        json_params = {
-            "accountid": account_id,
-            "deviceid": device_id,
-            "type": os_name,
-            "state": ServerConstants.MSG_MLOPS_SERVER_STATUS_IDLE,
-            "status": ServerConstants.MSG_MLOPS_SERVER_STATUS_IDLE,
-            "processor": cpu_info,
-            "core_type": cpu_info,
-            "network": "",
-            "role": role,
-            "os_ver": os_ver,
-            "memory": total_mem,
-            "ip": ip,
-            "extra_infos": {"fedml_ver": fedml_ver, "exec_path": exec_path, "os_ver": os_ver,
-                            "cpu_info": cpu_info, "python_ver": python_ver, "torch_ver": torch_ver,
-                            "mpi_installed": mpi_installed, "cpu_usage": cpu_usage,
-                            "available_mem": available_mem, "total_mem": total_mem,
-                            "cpu_count": cpu_count, "gpu_count": 0, "host_name": host_name}
-        }
-        if gpu_count > 0:
-            if gpu_total_mem is not None:
-                json_params["gpu"] = gpu_info if gpu_info is not None else "" + ", Total GPU Memory: " + gpu_total_mem
-            else:
-                json_params["gpu"] = gpu_info if gpu_info is not None else ""
-            json_params["extra_infos"]["gpu_info"] = gpu_info if gpu_info is not None else ""
-            if gpu_available_mem is not None:
-                json_params["extra_infos"]["gpu_available_mem"] = gpu_available_mem
-            if gpu_total_mem is not None:
-                json_params["extra_infos"]["gpu_total_mem"] = gpu_total_mem
-
-            json_params["extra_infos"]["gpu_count"] = gpu_count
-            json_params["extra_infos"]["gpu_vendor"] = gpu_vendor
-            json_params["extra_infos"]["gpu_device_name"] = gpu_device_name
-
-            gpu_available_id_list = sys_utils.get_available_gpu_id_list(limit=gpu_count)
-            gpu_available_count = len(gpu_available_id_list) if gpu_available_id_list is not None else 0
-            gpu_list = sys_utils.get_gpu_list()
-            json_params["extra_infos"]["gpu_available_count"] = gpu_available_count
-            json_params["extra_infos"]["gpu_available_id_list"] = gpu_available_id_list
-            json_params["extra_infos"]["gpu_list"] = gpu_list
-        else:
-            json_params["gpu"] = "None"
-            json_params["extra_infos"]["gpu_available_count"] = 0
-            json_params["extra_infos"]["gpu_available_id_list"] = []
-            json_params["extra_infos"]["gpu_list"] = []
-
-        _, cert_path = MLOpsConfigs.get_request_params()
-        if cert_path is not None:
-            try:
-                requests.session().verify = cert_path
-                response = requests.post(
-                    url, json=json_params, verify=True,
-                    headers={"content-type": "application/json", "Connection": "close"}
-                )
-            except requests.exceptions.SSLError as err:
-                MLOpsConfigs.install_root_ca_file()
-                response = requests.post(
-                    url, json=json_params, verify=True,
-                    headers={"content-type": "application/json", "Connection": "close"}
-                )
-        else:
-            response = requests.post(url, json=json_params, headers={"Connection": "close"})
-        edge_id = -1
-        user_name = None
-        extra_url = None
-        if response.status_code != 200:
-            print(f"Binding to MLOps with response.status_code = {response.status_code}, "
-                  f"response.content: {response.content}")
-            pass
-        else:
-            # print("url = {}, response = {}".format(url, response))
-            status_code = response.json().get("code")
-            if status_code == "SUCCESS":
-                edge_id = response.json().get("data").get("id")
-                user_name = response.json().get("data").get("userName", None)
-                extra_url = response.json().get("data").get("url", None)
-                if edge_id is None or edge_id <= 0:
-                    print(f"Binding to MLOps with response.status_code = {response.status_code}, "
-                          f"response.content: {response.content}")
-            else:
-                if status_code == SchedulerConstants.BINDING_ACCOUNT_NOT_EXIST_ERROR:
-                    raise SystemExit(SchedulerConstants.BINDING_ACCOUNT_NOT_EXIST_ERROR)
-                print(f"Binding to MLOps with response.status_code = {response.status_code}, "
-                      f"response.content: {response.content}")
-                return -1, None, None
-        return edge_id, user_name, extra_url
-
-    def fetch_configs(self):
-        return MLOpsConfigs.fetch_all_configs()
-
-    def send_agent_active_msg(self):
-        active_topic = "flserver_agent/active"
-        status = MLOpsStatus.get_instance().get_server_agent_status(self.edge_id)
-        if (
-                status is not None
-                and status != ServerConstants.MSG_MLOPS_SERVER_STATUS_OFFLINE
-                and status != ServerConstants.MSG_MLOPS_SERVER_STATUS_IDLE
-        ):
-            return
-
-        status = ServerConstants.MSG_MLOPS_SERVER_STATUS_IDLE
-        active_msg = {"ID": self.edge_id, "status": status}
-        MLOpsStatus.get_instance().set_server_agent_status(self.edge_id, status)
-        self.mqtt_mgr.send_message_json(active_topic, json.dumps(active_msg))
-
-    def subscribe_slave_devices_message(self, request_json):
-        if request_json is None:
-            return
-        run_id = request_json["run_id"]
-        edge_id_list = request_json["device_ids"]
-        for edge_id in edge_id_list:
-            if str(edge_id) == str(self.edge_id):
-                continue
-
-            # subscribe deployment result message for each model device
-            deployment_results_topic = "model_device/model_device/return_deployment_result/{}/{}".format(
-                run_id, edge_id)
-
-            self.mqtt_mgr.add_message_listener(deployment_results_topic, self.callback_deployment_result_message)
-            self.mqtt_mgr.subscribe_msg(deployment_results_topic)
-
-    def subscribe_spec_device_message(self, run_id, device_id):
-        if device_id == self.edge_id:
-            return
-
-        # subscribe deployment result message for each model device
-        deployment_results_topic = "model_device/model_device/return_deployment_result/{}/{}".format(
-            run_id, device_id)
-
-        self.mqtt_mgr.add_message_listener(deployment_results_topic, self.callback_deployment_result_message)
-        self.mqtt_mgr.subscribe_msg(deployment_results_topic)
-
-    def on_agent_mqtt_connected(self, mqtt_client_object):
-        # The MQTT message topic format is as follows: <sender>/<receiver>/<action>
-
-        # Setup MQTT message listener for starting deployment
-        server_agent_id = self.edge_id
-        topic_start_deployment = "model_ops/model_device/start_deployment/{}".format(str(self.edge_id))
-        self.mqtt_mgr.add_message_listener(topic_start_deployment, self.callback_start_deployment)
-
-        # Setup MQTT message listener for activating deployment
-        topic_activate_deployment = "model_ops/model_device/activate_deployment/{}".format(str(self.edge_id))
-        self.mqtt_mgr.add_message_listener(topic_activate_deployment, self.callback_activate_deployment)
-
-        # Setup MQTT message listener for deactivating deployment
-        topic_deactivate_deployment = "model_ops/model_device/deactivate_deployment/{}".format(str(self.edge_id))
-        self.mqtt_mgr.add_message_listener(topic_deactivate_deployment, self.callback_deactivate_deployment)
-
-        # Setup MQTT message listener for delete deployment
-        topic_delete_deployment = "model_ops/model_device/delete_deployment/{}".format(str(self.edge_id))
-        self.mqtt_mgr.add_message_listener(topic_delete_deployment, self.callback_delete_deployment)
-
-        # Setup MQTT message listener for server status switching
-        topic_server_status = "fl_server/flserver_agent_" + str(server_agent_id) + "/status"
-        self.mqtt_mgr.add_message_listener(topic_server_status, self.callback_runner_id_status)
-
-        # Setup MQTT message listener to report current device status.
-        topic_report_status = "mlops/report_device_status"
-        self.mqtt_mgr.add_message_listener(topic_report_status, self.callback_report_current_status)
-
-        # Setup MQTT message listener to OTA messages from the MLOps.
-        topic_ota_msg = "mlops/flserver_agent_" + str(server_agent_id) + "/ota"
-        self.mqtt_mgr.add_message_listener(topic_ota_msg, self.callback_server_ota_msg)
-
-        # Subscribe topics for starting train, stopping train and fetching client status.
-        mqtt_client_object.subscribe(topic_start_deployment, qos=2)
-        mqtt_client_object.subscribe(topic_activate_deployment, qos=2)
-        mqtt_client_object.subscribe(topic_deactivate_deployment, qos=2)
-        mqtt_client_object.subscribe(topic_delete_deployment, qos=2)
-        mqtt_client_object.subscribe(topic_server_status, qos=2)
-        mqtt_client_object.subscribe(topic_report_status, qos=2)
-        mqtt_client_object.subscribe(topic_ota_msg, qos=2)
-
-        self.subscribed_topics.clear()
-        self.subscribed_topics.append(topic_start_deployment)
-        self.subscribed_topics.append(topic_activate_deployment)
-        self.subscribed_topics.append(topic_deactivate_deployment)
-        self.subscribed_topics.append(topic_delete_deployment)
-        self.subscribed_topics.append(topic_server_status)
-        self.subscribed_topics.append(topic_report_status)
-        self.subscribed_topics.append(topic_ota_msg)
-
-        self.endpoint_sync_protocol = FedMLEndpointSyncProtocol(agent_config=self.agent_config, mqtt_mgr=self.mqtt_mgr)
-        self.endpoint_sync_protocol.setup_listener_for_sync_device_info(self.edge_id)
-
-        # Broadcast the first active message.
-        self.send_agent_active_msg()
-
-        # Echo results
-        # print("\n\nCongratulations, your device is connected to the FedML MLOps platform successfully!")
-        # print(
-        #     "Your FedML Edge ID is " + str(self.edge_id) + ", unique device ID is "
-        #     + str(self.unique_device_id)
-        #     + "\n"
-        # )
-
-        MLOpsRuntimeLog.get_instance(self.args).init_logs(log_level=logging.INFO)
-
-    def on_agent_mqtt_disconnected(self, mqtt_client_object):
-        MLOpsStatus.get_instance().set_server_agent_status(
-            self.edge_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_OFFLINE
-        )
-
-    def recover_inference_and_monitor(self):
-        try:
-            history_jobs = FedMLServerDataInterface.get_instance().get_history_jobs()
-            for job in history_jobs.job_list:
-                if job.running_json is None:
-                    continue
-
-                if job.deployment_result == "":
-                    continue
-
-                run_id, end_point_name, token, user_id, user_name, device_ids, device_objs, model_config, model_name, \
-                    model_id, model_storage_url, scale_min, scale_max, inference_engine, model_is_from_open, \
-                    inference_end_point_id, use_gpu, memory_size, model_version, inference_port = \
-                    self.parse_model_run_params(json.loads(job.running_json))
-
-                FedMLModelCache.get_instance().set_redis_params(self.redis_addr, self.redis_port, self.redis_password)
-                is_activated = FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \
-                    get_end_point_activation(run_id)
-                if not is_activated:
-                    continue
-
-                self.start_device_inference_gateway(run_id, end_point_name, model_id, model_name, model_version,
-                                                    inference_port=inference_port)
-
-                self.stop_device_inference_monitor(run_id, end_point_name, model_id, model_name, model_version)
-                self.start_device_inference_monitor(run_id, end_point_name, model_id, model_name, model_version)
-        except Exception as e:
-            logging.info("recover inference and monitor: {}".format(traceback.format_exc()))
-
-    def recover_start_deployment_msg_after_upgrading(self):
-        try:
-            current_job = FedMLServerDataInterface.get_instance().get_current_job()
-            if current_job is not None and \
-                    current_job.status == ServerConstants.MSG_MLOPS_SERVER_STATUS_UPGRADING:
-                FedMLModelCache.get_instance().set_redis_params(self.redis_addr, self.redis_port, self.redis_password)
-                is_activated = FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \
-                    get_end_point_activation(current_job.job_id)
-                if not is_activated:
-                    return
-                logging.info("start deployment after upgrading.")
-                topic_start_deployment = "model_ops/model_device/start_deployment/{}".format(str(self.edge_id))
-                self.callback_start_deployment(topic_start_deployment, current_job.running_json)
-        except Exception as e:
-            logging.info("recover starting deployment message after upgrading: {}".format(traceback.format_exc()))
-
-    def setup_agent_mqtt_connection(self, service_config):
-        # Setup MQTT connection
-        self.mqtt_mgr = MqttManager(
-            service_config["mqtt_config"]["BROKER_HOST"],
-            service_config["mqtt_config"]["BROKER_PORT"],
-            service_config["mqtt_config"]["MQTT_USER"],
-            service_config["mqtt_config"]["MQTT_PWD"],
-            service_config["mqtt_config"]["MQTT_KEEPALIVE"],
-            "FedML_ModelServerAgent_Daemon_@" + self.user_name + "@_" + self.args.current_device_id + str(uuid.uuid4()),
-            "flserver_agent/last_will_msg",
-            json.dumps({"ID": self.edge_id, "status": ServerConstants.MSG_MLOPS_SERVER_STATUS_OFFLINE})
-        )
-        self.agent_config = service_config
-
-        # Init local database
-        FedMLServerDataInterface.get_instance().create_job_table()
-        try:
-            FedMLModelDatabase.get_instance().set_database_base_dir(ServerConstants.get_database_dir())
-            FedMLModelDatabase.get_instance().create_table()
-        except Exception as e:
-            pass
-
-        server_api_cmd = "fedml.computing.scheduler.model_scheduler.device_server_api:api"
-        server_api_pids = RunProcessUtils.get_pid_from_cmd_line(server_api_cmd)
-        if server_api_pids is None or len(server_api_pids) <= 0:
-            # Start local API services
-            cur_dir = os.path.dirname(__file__)
-            fedml_base_dir = os.path.dirname(os.path.dirname(os.path.dirname(cur_dir)))
-            python_program = get_python_program()
-            self.local_api_process = ServerConstants.exec_console_with_script(
-                "{} -m uvicorn {} --host 0.0.0.0 --port {} --reload --reload-delay 3 --reload-dir {} "
-                "--log-level critical".format(
-                    python_program, server_api_cmd, ServerConstants.LOCAL_SERVER_API_PORT,
-                    fedml_base_dir
-                ),
-                should_capture_stdout=False,
-                should_capture_stderr=False
-            )
-            # if self.local_api_process is not None and self.local_api_process.pid is not None:
-            #     print(f"Model master local API process id {self.local_api_process.pid}")
-
-        self.recover_inference_and_monitor()
-
-        # MLOpsRuntimeLogDaemon.get_instance(self.args).stop_all_log_processor()
-
-        # Setup MQTT connected listener
-        self.mqtt_mgr.add_connected_listener(self.on_agent_mqtt_connected)
-        self.mqtt_mgr.add_disconnected_listener(self.on_agent_mqtt_disconnected)
-        self.mqtt_mgr.connect()
-
-        self.setup_client_mqtt_mgr()
-        self.mlops_metrics.report_server_training_status(
-            self.run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_IDLE,
-            is_from_model=True, edge_id=self.edge_id)
-        MLOpsStatus.get_instance().set_server_agent_status(
-            self.edge_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_IDLE
-        )
-
-        self.recover_start_deployment_msg_after_upgrading()
-
-    def stop_agent(self):
-        if self.run_process_event is not None:
-            self.run_process_event.set()
-
-        if self.mqtt_mgr is not None:
-            try:
-                for topic in self.subscribed_topics:
-                    self.mqtt_mgr.unsubscribe_msg(topic)
-            except Exception as e:
-                pass
-
-            self.mqtt_mgr.loop_stop()
-            self.mqtt_mgr.disconnect()
-
-        self.release_client_mqtt_mgr()
-
-    def start_agent_mqtt_loop(self, should_exit_sys=True):
-        # Start MQTT message loop
-        try:
-            self.mqtt_mgr.loop_forever()
-        except Exception as e:
-            if str(e) == "Restarting after upgraded...":
-                logging.info("Restarting after upgraded...")
-            else:
-                print("Server tracing: {}".format(traceback.format_exc()))
-        finally:
-            self.stop_agent()
-            if should_exit_sys:
-                pass
-                """
-                    # Deprecated, will kill the process by the parent process.
-                    time.sleep(5)
-                    sys_utils.cleanup_all_fedml_server_login_processes(
-                    ServerConstants.SERVER_LOGIN_PROGRAM, clean_process_group=False)
-                    sys.exit(1)
-                """
-
diff --git a/python/fedml/computing/scheduler/slave/client_runner_deprecated.py b/python/fedml/computing/scheduler/slave/client_runner_deprecated.py
deleted file mode 100755
index 79b5697728..0000000000
--- a/python/fedml/computing/scheduler/slave/client_runner_deprecated.py
+++ /dev/null
@@ -1,1872 +0,0 @@
-import json
-import logging
-import multiprocessing
-import sys
-
-from multiprocessing import Process
-import os
-import platform
-import shutil
-import subprocess
-import threading
-
-import time
-import traceback
-import urllib
-import uuid
-import zipfile
-from urllib.parse import urljoin, urlparse
-
-import requests
-
-import fedml
-from ..comm_utils.constants import SchedulerConstants
-from ..comm_utils.job_cleanup import JobCleanup
-from ..comm_utils.job_utils import JobRunnerUtils, DockerArgs
-from ..comm_utils.run_process_utils import RunProcessUtils
-from ..scheduler_entry.constants import Constants
-from ....core.mlops.mlops_device_perfs import MLOpsDevicePerfStats
-from ....core.mlops.mlops_runtime_log import MLOpsRuntimeLog
-
-from ....core.distributed.communication.mqtt.mqtt_manager import MqttManager
-from ..comm_utils.yaml_utils import load_yaml_config
-from .client_constants import ClientConstants
-
-from ....core.mlops.mlops_metrics import MLOpsMetrics
-
-from ....core.mlops.mlops_configs import MLOpsConfigs
-from ....core.mlops.mlops_runtime_log_daemon import MLOpsRuntimeLogDaemon
-from ....core.mlops.mlops_status import MLOpsStatus
-from ..comm_utils.sys_utils import get_sys_runner_info, get_python_program
-from .client_data_interface import FedMLClientDataInterface
-from ..comm_utils import sys_utils
-from ....core.mlops.mlops_utils import MLOpsUtils
-from ..model_scheduler.model_device_client import FedMLModelDeviceClientRunner
-from ..model_scheduler.model_device_server import FedMLModelDeviceServerRunner
-from ..comm_utils import security_utils
-from ..scheduler_core.compute_cache_manager import ComputeCacheManager
-from ..scheduler_core.message_center import FedMLMessageCenter
-import ssl
-
-
-class RunnerError(Exception):
-    """ Runner stopped. """
-    pass
-
-
-class RunnerCompletedError(Exception):
-    """ Runner completed. """
-    pass
-
-
-class FedMLClientRunner(FedMLMessageCenter):
-
-    def __init__(self, args, edge_id=0, request_json=None, agent_config=None, run_id=0,
-                 cuda_visible_gpu_ids_str=None):
-        super().__init__()
-        self.model_device_server_id = None
-        self.model_device_client_edge_id_list = None
-        self.disable_client_login = False
-        self.model_device_server = None
-        self.model_device_client_list = None
-        self.run_process_event = None
-        self.run_process_event_map = dict()
-        self.run_process_completed_event = None
-        self.run_process_completed_event_map = dict()
-        self.run_process = None
-        self.run_process_map = dict()
-        self.running_request_json = dict()
-        self.local_api_process = None
-        self.start_request_json = None
-        self.device_status = None
-        self.current_training_status = None
-        self.mqtt_mgr = None
-        self.edge_id = edge_id
-        self.edge_user_name = None
-        self.edge_extra_url = None
-        self.run_id = run_id
-        self.unique_device_id = None
-        self.args = args
-        self.request_json = request_json
-        self.version = args.version
-        self.device_id = args.device_id
-        self.cur_dir = os.path.split(os.path.realpath(__file__))[0]
-        if args.current_running_dir is not None:
-            self.cur_dir = args.current_running_dir
-        self.sudo_cmd = ""
-        self.is_mac = False
-        if platform.system() == "Darwin":
-            self.is_mac = True
-
-        self.agent_config = agent_config
-        self.fedml_data_base_package_dir = os.path.join("/", "fedml", "data")
-        self.fedml_data_local_package_dir = os.path.join("/", "fedml", "fedml-package", "fedml", "data")
-        self.fedml_data_dir = self.fedml_data_base_package_dir
-        self.fedml_config_dir = os.path.join("/", "fedml", "conf")
-
-        self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES = {
-            "${FEDSYS.RUN_ID}": "",
-            "${FEDSYS.PRIVATE_LOCAL_DATA}": "",
-            "${FEDSYS.CLIENT_ID_LIST}": "",
-            "${FEDSYS.SYNTHETIC_DATA_URL}": "",
-            "${FEDSYS.IS_USING_LOCAL_DATA}": "",
-            "${FEDSYS.CLIENT_NUM}": "",
-            "${FEDSYS.CLIENT_INDEX}": "",
-            "${FEDSYS.CLIENT_OBJECT_LIST}": "",
-            "${FEDSYS.LOG_SERVER_URL}": "",
-        }
-
-        self.mlops_metrics = None
-        self.client_active_list = dict()
-        self.ntp_offset = MLOpsUtils.get_ntp_offset()
-        self.server_id = None
-        self.computing_started_time = 0
-        self.fedml_config_object = None
-        self.package_type = SchedulerConstants.JOB_PACKAGE_TYPE_DEFAULT
-        self.cuda_visible_gpu_ids_str = cuda_visible_gpu_ids_str
-        # logging.info("Current directory of client agent: " + self.cur_dir)
-        self.subscribed_topics = list()
-        self.user_name = None
-        self.general_edge_id = None
-        self.message_center = None
-
-    def __repr__(self):
-        return "<{klass} @{id:x} {attrs}>".format(
-            klass=self.__class__.__name__,
-            id=id(self) & 0xFFFFFF,
-            attrs=" ".join("{}={!r}".format(k, v) for k, v in self.__dict__.items()),
-        )
-
-    def copy_runner(self):
-        copy_runner = FedMLClientRunner(self.args)
-        copy_runner.disable_client_login =  self.disable_client_login
-        copy_runner.model_device_server = self.model_device_server
-        copy_runner.model_device_client_list = self.model_device_client_list
-        copy_runner.run_process_event = self.run_process_event
-        copy_runner.run_process_event_map = self.run_process_event_map
-        copy_runner.run_process_completed_event = self.run_process_completed_event
-        copy_runner.run_process_completed_event_map = self.run_process_completed_event_map
-        copy_runner.run_process = self.run_process
-        copy_runner.run_process_map = self.run_process_map
-        copy_runner.running_request_json = self.running_request_json
-        copy_runner.local_api_process = self.local_api_process
-        copy_runner.start_request_json = self.start_request_json
-        copy_runner.device_status = self.device_status
-        copy_runner.current_training_status = self.current_training_status
-        copy_runner.mqtt_mgr = self.mqtt_mgr
-        copy_runner.edge_id = self.edge_id
-        copy_runner.edge_user_name = self.edge_user_name
-        copy_runner.edge_extra_url = self.edge_extra_url
-        copy_runner.run_id = self.run_id
-        copy_runner.unique_device_id = self.unique_device_id
-        copy_runner.args = self.args
-        copy_runner.request_json = self.request_json
-        copy_runner.version =self.version
-        copy_runner.device_id = self.device_id
-        copy_runner.cur_dir = self.cur_dir
-        copy_runner.cur_dir = self.cur_dir
-        copy_runner.sudo_cmd = self.sudo_cmd
-        copy_runner.is_mac = self.is_mac
-
-        copy_runner.agent_config = self.agent_config
-        copy_runner.fedml_data_base_package_dir = self.fedml_data_base_package_dir
-        copy_runner.fedml_data_local_package_dir = self.fedml_data_local_package_dir
-        copy_runner.fedml_data_dir = self.fedml_data_dir
-        copy_runner.fedml_config_dir = self.fedml_config_dir
-
-        copy_runner.FEDML_DYNAMIC_CONSTRAIN_VARIABLES = self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES
-
-        copy_runner.mlops_metrics = self.mlops_metrics
-        copy_runner.client_active_list = self.client_active_list
-        copy_runner.ntp_offset = self.ntp_offset
-        copy_runner.server_id = self.server_id
-        copy_runner.computing_started_time = self.computing_started_time
-        copy_runner.fedml_config_object = self.fedml_config_object
-        copy_runner.package_type = self.package_type
-        copy_runner.cuda_visible_gpu_ids_str = self.cuda_visible_gpu_ids_str
-        copy_runner.subscribed_topics = self.subscribed_topics
-        copy_runner.user_name = self.user_name
-        copy_runner.general_edge_id = self.general_edge_id
-        copy_runner.message_center = self.message_center
-
-        return copy_runner
-
-    def build_dynamic_constrain_variables(self, run_id, run_config):
-        data_config = run_config.get("data_config", {})
-        server_edge_id_list = self.request_json["edgeids"]
-        local_edge_id_list = list()
-        local_edge_id_list.append(int(self.edge_id))
-        is_using_local_data = 0
-        private_data_dir = data_config.get("privateLocalData", "")
-        synthetic_data_url = data_config.get("syntheticDataUrl", "")
-        edges = self.request_json["edges"]
-        # if private_data_dir is not None \
-        #         and len(str(private_data_dir).strip(' ')) > 0:
-        #     is_using_local_data = 1
-        if private_data_dir is None or len(str(private_data_dir).strip(" ")) <= 0:
-            params_config = run_config.get("parameters", None)
-            private_data_dir = ClientConstants.get_data_dir()
-        if synthetic_data_url is None or len(str(synthetic_data_url)) <= 0:
-            synthetic_data_url = private_data_dir
-
-        self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.RUN_ID}"] = run_id
-        self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.PRIVATE_LOCAL_DATA}"] = private_data_dir.replace(" ", "")
-        self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.CLIENT_ID_LIST}"] = str(local_edge_id_list).replace(" ", "")
-        self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.SYNTHETIC_DATA_URL}"] = synthetic_data_url.replace(" ", "")
-        self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.IS_USING_LOCAL_DATA}"] = str(is_using_local_data)
-        self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.CLIENT_NUM}"] = len(server_edge_id_list)
-        self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.CLIENT_INDEX}"] = 1
-        for cur_index, id_value in enumerate(server_edge_id_list):
-            if str(id_value) == str(self.edge_id):
-                self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.CLIENT_INDEX}"] = cur_index + 1
-                break
-        client_objects = str(json.dumps(edges))
-        client_objects = client_objects.replace(" ", "").replace("\n", "").replace('"', '\\"')
-        self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.CLIENT_OBJECT_LIST}"] = client_objects
-        self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.LOG_SERVER_URL}"] = self.agent_config["ml_ops_config"][
-            "LOG_SERVER_URL"
-        ]
-
-    def unzip_file(self, zip_file, unzip_file_path) -> str:
-        if zipfile.is_zipfile(zip_file):
-            with zipfile.ZipFile(zip_file, "r") as zipf:
-                zipf.extractall(unzip_file_path)
-                unzipped_file_name = zipf.namelist()[0]
-        else:
-            raise Exception("Invalid zip file {}".format(zip_file))
-
-        return unzipped_file_name
-
-    def package_download_progress(self, count, blksize, filesize):
-        self.check_runner_stop_event()
-
-        downloaded = count * blksize
-        downloaded = filesize if downloaded > filesize else downloaded
-        progress = (downloaded / filesize * 100) if filesize != 0 else 0
-        progress_int = int(progress)
-        downloaded_kb = format(downloaded / 1024, '.2f')
-
-        # since this hook funtion is stateless, we need a state to avoid print progress repeatly
-        if count == 0:
-            self.prev_download_progress = 0
-        if progress_int != self.prev_download_progress and progress_int % 5 == 0:
-            self.prev_download_progress = progress_int
-            logging.info("package downloaded size {} KB, progress {}%".format(downloaded_kb, progress_int))
-
-    def retrieve_and_unzip_package(self, package_name, package_url):
-        local_package_path = ClientConstants.get_package_download_dir()
-        os.makedirs(local_package_path, exist_ok=True)
-        filename, filename_without_extension, file_extension = ClientConstants.get_filename_and_extension(package_url)
-        local_package_file = os.path.join(local_package_path, f"fedml_run_{self.run_id}_{filename_without_extension}")
-        if os.path.exists(local_package_file):
-            os.remove(local_package_file)
-        ssl._create_default_https_context = ssl._create_unverified_context
-        urllib.request.urlretrieve(package_url, local_package_file,
-                                   reporthook=self.package_download_progress)
-        unzip_package_path = os.path.join(ClientConstants.get_package_unzip_dir(),
-                                          f"unzip_fedml_run_{self.run_id}_{filename_without_extension}")
-        try:
-            shutil.rmtree(unzip_package_path, ignore_errors=True)
-        except Exception as e:
-            logging.error(
-                f"Failed to remove directory {unzip_package_path}, Exception: {e}, Traceback: {traceback.format_exc()}")
-            pass
-
-        package_dir_name = self.unzip_file(local_package_file, unzip_package_path)  # Using unziped folder name
-        unzip_package_full_path = os.path.join(unzip_package_path, package_dir_name)
-
-        logging.info("local_package_file {}, unzip_package_path {}, unzip file full path {}".format(
-            local_package_file, unzip_package_path, unzip_package_full_path))
-
-        return unzip_package_full_path
-
-    def update_local_fedml_config(self, run_id, run_config):
-        packages_config = run_config["packages_config"]
-
-        # Copy config file from the client
-        unzip_package_path = self.retrieve_and_unzip_package(
-            packages_config["linuxClient"], packages_config["linuxClientUrl"]
-        )
-        fedml_local_config_file = os.path.join(unzip_package_path, "conf", "fedml.yaml")
-
-        # Load the above config to memory
-        config_from_container = load_yaml_config(fedml_local_config_file)
-        container_entry_file_config = config_from_container["entry_config"]
-        container_dynamic_args_config = config_from_container["dynamic_args"]
-        entry_file = container_entry_file_config["entry_file"]
-        conf_file = container_entry_file_config["conf_file"]
-        self.package_type = container_entry_file_config.get("package_type", SchedulerConstants.JOB_PACKAGE_TYPE_DEFAULT)
-        full_conf_path = os.path.join(unzip_package_path, "fedml", "config", os.path.basename(conf_file))
-
-        # Dynamically build constrain variable with realtime parameters from server
-        self.build_dynamic_constrain_variables(run_id, run_config)
-
-        # Update entry arguments value with constrain variable values with realtime parameters from server
-        # currently we support the following constrain variables:
-        # ${FEDSYS_RUN_ID}: a run id represented one entire Federated Learning flow
-        # ${FEDSYS_PRIVATE_LOCAL_DATA}: private local data path in the Federated Learning client
-        # ${FEDSYS_CLIENT_ID_LIST}: client list in one entire Federated Learning flow
-        # ${FEDSYS_SYNTHETIC_DATA_URL}: synthetic data url from server,
-        #                  if this value is not null, the client will download data from this URL to use it as
-        #                  federated training data set
-        # ${FEDSYS_IS_USING_LOCAL_DATA}: whether use private local data as federated training data set
-        # container_dynamic_args_config["data_cache_dir"] = "${FEDSYS.PRIVATE_LOCAL_DATA}"
-        for constrain_variable_key, constrain_variable_value in self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES.items():
-            for argument_key, argument_value in container_dynamic_args_config.items():
-                if argument_value is not None and str(argument_value).find(constrain_variable_key) == 0:
-                    replaced_argument_value = str(argument_value).replace(
-                        constrain_variable_key, str(constrain_variable_value)
-                    )
-                    container_dynamic_args_config[argument_key] = replaced_argument_value
-
-        # Merge all container new config sections as new config dictionary
-        package_conf_object = dict()
-        package_conf_object["entry_config"] = container_entry_file_config
-        package_conf_object["dynamic_args"] = container_dynamic_args_config
-        package_conf_object["dynamic_args"]["config_version"] = self.args.config_version
-        container_dynamic_args_config["mqtt_config_path"] = os.path.join(
-            unzip_package_path, "fedml", "config", os.path.basename(container_dynamic_args_config["mqtt_config_path"])
-        )
-        container_dynamic_args_config["s3_config_path"] = os.path.join(
-            unzip_package_path, "fedml", "config", os.path.basename(container_dynamic_args_config["s3_config_path"])
-        )
-        log_file_dir = ClientConstants.get_log_file_dir()
-        os.makedirs(log_file_dir, exist_ok=True)
-        package_conf_object["dynamic_args"]["log_file_dir"] = log_file_dir
-
-        # Save new config dictionary to local file
-        fedml_updated_config_file = os.path.join(unzip_package_path, "conf", "fedml.yaml")
-        ClientConstants.generate_yaml_doc(package_conf_object, fedml_updated_config_file)
-
-        # Build dynamic arguments and set arguments to fedml config object
-        self.build_dynamic_args(run_id, run_config, package_conf_object, unzip_package_path)
-        return unzip_package_path, package_conf_object
-
-    def build_dynamic_args(self, run_id, run_config, package_conf_object, base_dir):
-        fedml_conf_file = package_conf_object["entry_config"]["conf_file"]
-        fedml_conf_file_processed = str(fedml_conf_file).replace('\\', os.sep).replace('/', os.sep)
-        fedml_conf_path = os.path.join(base_dir, "fedml", "config",
-                                       os.path.basename(fedml_conf_file_processed))
-        fedml_conf_object = load_yaml_config(fedml_conf_path)
-        run_params = run_config.get("parameters", {})
-        job_yaml = run_params.get("job_yaml", {})
-
-        # Replace local fedml config objects with parameters from MLOps web
-        parameters_object = run_config.get("parameters", None)
-        if parameters_object is not None:
-            for config_k, config_v in fedml_conf_object.items():
-                parameter_v = parameters_object.get(config_k, None)
-                if parameter_v is not None:
-                    fedml_conf_object[config_k] = parameter_v
-                    parameters_object.pop(config_k)
-
-            for config_k, config_v in parameters_object.items():
-                fedml_conf_object[config_k] = config_v
-
-        package_dynamic_args = package_conf_object["dynamic_args"]
-        if fedml_conf_object.get("comm_args", None) is not None:
-            fedml_conf_object["comm_args"]["mqtt_config_path"] = package_dynamic_args["mqtt_config_path"]
-            fedml_conf_object["comm_args"]["s3_config_path"] = package_dynamic_args["s3_config_path"]
-            fedml_conf_object["common_args"]["using_mlops"] = True
-        if fedml_conf_object.get("train_args", None) is not None:
-            fedml_conf_object["train_args"]["run_id"] = package_dynamic_args["run_id"]
-            fedml_conf_object["train_args"]["client_id_list"] = package_dynamic_args["client_id_list"]
-            fedml_conf_object["train_args"]["client_num_in_total"] = int(package_dynamic_args["client_num_in_total"])
-            fedml_conf_object["train_args"]["client_num_per_round"] = int(package_dynamic_args["client_num_in_total"])
-            fedml_conf_object["train_args"]["client_id"] = self.edge_id
-            fedml_conf_object["train_args"]["server_id"] = self.request_json.get("server_id", "0")
-        if fedml_conf_object.get("device_args", None) is not None:
-            fedml_conf_object["device_args"]["worker_num"] = int(package_dynamic_args["client_num_in_total"])
-        # fedml_conf_object["data_args"]["data_cache_dir"] = package_dynamic_args["data_cache_dir"]
-        data_args = fedml_conf_object.get("data_args")
-        if data_args is not None:
-            data_cache_dir = fedml_conf_object["data_args"].get("data_cache_dir")
-            if data_cache_dir is not None:
-                data_cache_dir = os.path.join(data_cache_dir, str(self.edge_id))
-                fedml_conf_object["data_args"]["data_cache_dir"] = data_cache_dir
-        if fedml_conf_object.get("tracking_args", None) is not None:
-            fedml_conf_object["tracking_args"]["log_file_dir"] = package_dynamic_args["log_file_dir"]
-            fedml_conf_object["tracking_args"]["log_server_url"] = package_dynamic_args["log_server_url"]
-
-        fedml_conf_object["dynamic_args"] = package_dynamic_args
-        self.fedml_config_object = fedml_conf_object.copy()
-        ClientConstants.generate_yaml_doc(fedml_conf_object, fedml_conf_path)
-
-    def run_bootstrap_script(self, bootstrap_cmd_list, bootstrap_script_file):
-        try:
-            logging.info("Bootstrap commands are being executed...")
-            process, error_list = ClientConstants.execute_commands_with_live_logs(bootstrap_cmd_list,
-                                                                                  callback=self.callback_run_bootstrap)
-
-            ret_code, out, err = process.returncode, None, None
-            if ret_code is None or ret_code <= 0:
-                if error_list is not None and len(error_list) > 0:
-                    is_bootstrap_run_ok = False
-                else:
-                    if out is not None:
-                        out_str = sys_utils.decode_our_err_result(out)
-                        if out_str != "":
-                            logging.info("{}".format(out_str))
-
-                    sys_utils.log_return_info(bootstrap_script_file, 0)
-
-                    is_bootstrap_run_ok = True
-            else:
-                if err is not None:
-                    err_str = sys_utils.decode_our_err_result(err)
-                    if err_str != "":
-                        logging.error("{}".format(err_str))
-
-                sys_utils.log_return_info(bootstrap_script_file, ret_code)
-
-                is_bootstrap_run_ok = False
-        except Exception as e:
-            logging.error(f"Bootstrap script error: Exception: {e}, Traceback: {traceback.format_exc()}")
-            is_bootstrap_run_ok = False
-        return is_bootstrap_run_ok
-
-    def callback_run_bootstrap(self, job_pid):
-        ClientConstants.save_bootstrap_process(self.run_id, job_pid)
-
-    def run(self, process_event, completed_event, message_center_queue):
-        print(f"Client runner process id {os.getpid()}, run id {self.run_id}")
-
-        if platform.system() != "Windows":
-            os.setsid()
-
-        os.environ['PYTHONWARNINGS'] = 'ignore:semaphore_tracker:UserWarning'
-        os.environ.setdefault('PYTHONWARNINGS', 'ignore:semaphore_tracker:UserWarning')
-
-        self.run_process_event = process_event
-        self.run_process_completed_event = completed_event
-        try:
-            MLOpsUtils.set_ntp_offset(self.ntp_offset)
-            self.rebuild_message_center(message_center_queue)
-            self.run_impl()
-        except RunnerError:
-            logging.info("Runner stopped.")
-            self.reset_devices_status(self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_KILLED)
-        except RunnerCompletedError:
-            logging.info("Runner completed.")
-        except Exception as e:
-            logging.error(f"Runner exited with errors. Exception: {e}, Traceback {traceback.format_exc()}")
-            self.mlops_metrics.report_client_id_status(
-                self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED,
-                server_id=self.server_id, run_id=self.run_id)
-        finally:
-            if self.mlops_metrics is not None:
-                computing_ended_time = MLOpsUtils.get_ntp_time()
-                self.mlops_metrics.report_edge_job_computing_cost(self.run_id, self.edge_id,
-                                                                  self.computing_started_time, computing_ended_time,
-                                                                  self.args.user, self.args.api_key)
-            logging.info("Release resources.")
-            self.cleanup_containers_and_release_gpus(self.run_id, self.edge_id)
-            MLOpsRuntimeLogDaemon.get_instance(self.args).stop_log_processor(self.run_id, self.edge_id)
-            if self.mlops_metrics is not None:
-                self.mlops_metrics.stop_sys_perf()
-            time.sleep(3)
-            ClientConstants.cleanup_learning_process(self.run_id)
-            ClientConstants.cleanup_run_process(self.run_id)
-
-    def check_runner_stop_event(self):
-        if self.run_process_event.is_set():
-            logging.info("Received stopping event.")
-            raise RunnerError("Runner stopped")
-
-        if self.run_process_completed_event.is_set():
-            logging.info("Received completed event.")
-            raise RunnerCompletedError("Runner completed")
-
-    def run_impl(self):
-        run_id = self.request_json["runId"]
-        run_config = self.request_json["run_config"]
-        data_config = run_config.get("data_config", {})
-        packages_config = run_config["packages_config"]
-
-        self.computing_started_time = MLOpsUtils.get_ntp_time()
-        self.mlops_metrics.report_edge_job_computing_cost(run_id, self.edge_id,
-                                                          self.computing_started_time, 0,
-                                                          self.args.user, self.args.api_key)
-
-        self.check_runner_stop_event()
-
-        MLOpsRuntimeLog.get_instance(self.args).init_logs(log_level=logging.INFO)
-
-        self.mlops_metrics.report_client_id_status(
-            self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_INITIALIZING,
-            running_json=self.start_request_json, run_id=run_id)
-
-        # get training params
-        private_local_data_dir = data_config.get("privateLocalData", "")
-        is_using_local_data = 0
-        # if private_local_data_dir is not None and len(str(private_local_data_dir).strip(' ')) > 0:
-        #     is_using_local_data = 1
-
-        # start a run according to the hyper-parameters
-        # fedml_local_data_dir = self.cur_dir + "/fedml_data/run_" + run_id_str + "_edge_" + str(edge_id)
-        fedml_local_data_dir = os.path.join(self.cur_dir, "fedml_data")
-        fedml_local_config_dir = os.path.join(self.cur_dir, "fedml_config")
-        if is_using_local_data:
-            fedml_local_data_dir = private_local_data_dir
-        self.fedml_data_dir = self.fedml_data_local_package_dir
-
-        self.check_runner_stop_event()
-
-        logging.info("Download packages")
-
-        # update local config with real time parameters from server and dynamically replace variables value
-        unzip_package_path, fedml_config_object = self.update_local_fedml_config(run_id, run_config)
-        # if unzip_package_path is None or fedml_config_object is None:
-        #     logging.info("failed to update local fedml config.")
-        #     self.check_runner_stop_event()
-        #     # Send failed msg when exceptions.
-        #     self.cleanup_run_when_starting_failed(status=ClientConstants.MSG_MLOPS_CLIENT_STATUS_EXCEPTION)
-        #     return
-
-        logging.info("Check downloaded packages...")
-
-        entry_file_config = fedml_config_object["entry_config"]
-        dynamic_args_config = fedml_config_object["dynamic_args"]
-        entry_file = str(entry_file_config["entry_file"]).replace('\\', os.sep).replace('/', os.sep)
-        entry_file = os.path.basename(entry_file)
-        conf_file = entry_file_config["conf_file"]
-        conf_file = str(conf_file).replace('\\', os.sep).replace('/', os.sep)
-        #####
-        # ClientConstants.cleanup_learning_process(run_id)
-        # ClientConstants.cleanup_bootstrap_process(run_id)
-        #####
-
-        if not os.path.exists(unzip_package_path):
-            logging.info("failed to unzip file.")
-            self.check_runner_stop_event()
-            # Send failed msg when exceptions.
-            self.cleanup_run_when_starting_failed(status=ClientConstants.MSG_MLOPS_CLIENT_STATUS_EXCEPTION)
-            return
-        os.chdir(os.path.join(unzip_package_path, "fedml"))
-
-        self.check_runner_stop_event()
-
-        logging.info("starting the user process...")
-
-        entry_file_full_path = os.path.join(unzip_package_path, "fedml", entry_file)
-        conf_file_full_path = os.path.join(unzip_package_path, "fedml", conf_file)
-        logging.info("waiting the user process to finish...")
-        logging.info("                          ")
-        logging.info("                          ")
-        logging.info("====Your Run Logs Begin===")
-
-        process, is_launch_task, error_list = self.execute_job_task(unzip_package_path=unzip_package_path,
-                                                                    entry_file_full_path=entry_file_full_path,
-                                                                    conf_file_full_path=conf_file_full_path,
-                                                                    dynamic_args_config=dynamic_args_config,
-                                                                    fedml_config_object=self.fedml_config_object)
-
-        logging.info("====Your Run Logs End===")
-        logging.info("                        ")
-        logging.info("                        ")
-
-        ret_code, out, err = process.returncode if process else None, None, None
-        is_run_ok = sys_utils.is_runner_finished_normally(process.pid)
-        if is_launch_task:
-            is_run_ok = True
-        if error_list is not None and len(error_list) > 0:
-            is_run_ok = False
-        if ret_code is None or ret_code <= 0:
-            self.check_runner_stop_event()
-
-            if is_run_ok:
-                if out is not None:
-                    out_str = sys_utils.decode_our_err_result(out)
-                    if out_str != "":
-                        logging.info("{}".format(out_str))
-
-                self.mlops_metrics.report_client_id_status(
-                    self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FINISHED,
-                    server_id=self.server_id, run_id=run_id)
-
-                if is_launch_task:
-                    sys_utils.log_return_info(f"job {run_id}", ret_code)
-                else:
-                    sys_utils.log_return_info(entry_file, ret_code)
-        else:
-            is_run_ok = False
-
-        if not is_run_ok:
-            # If the run status is killed or finished, then return with the normal state.
-            current_job = FedMLClientDataInterface.get_instance().get_job_by_id(run_id)
-            if current_job is not None and (current_job.status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_FINISHED or
-                                            current_job.status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_KILLED):
-                return
-
-            self.check_runner_stop_event()
-
-            logging.error("failed to run the learning process...")
-
-            if err is not None:
-                err_str = sys_utils.decode_our_err_result(err)
-                if err_str != "":
-                    logging.error("{}".format(err_str))
-
-            if is_launch_task:
-                sys_utils.log_return_info(f"job {run_id}", ret_code)
-            else:
-                sys_utils.log_return_info(entry_file, ret_code)
-
-            # Send failed msg when exceptions.
-            self.mlops_metrics.report_client_id_status(
-                self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED,
-                server_id=self.server_id, run_id=run_id)
-
-    def execute_job_task(self, unzip_package_path, entry_file_full_path, conf_file_full_path, dynamic_args_config,
-                         fedml_config_object):
-        run_config = self.request_json["run_config"]
-        run_params = run_config.get("parameters", {})
-        client_rank = self.request_json.get("client_rank", 1)
-        job_yaml = run_params.get("job_yaml", {})
-        job_yaml_default_none = run_params.get("job_yaml", None)
-        job_api_key = job_yaml.get("run_api_key", None)
-        job_api_key = job_yaml.get("fedml_run_dynamic_params", None) if job_api_key is None else job_api_key
-        assigned_gpu_ids = run_params.get("gpu_ids", None)
-        job_type = job_yaml.get("job_type", None)
-        containerize = fedml_config_object.get("containerize", None)
-        image_pull_policy = fedml_config_object.get("image_pull_policy", Constants.IMAGE_PULL_POLICY_ALWAYS)
-        # TODO: Can we remove task_type?
-        job_type = job_yaml.get("task_type", Constants.JOB_TASK_TYPE_TRAIN) if job_type is None else job_type
-        conf_file_object = load_yaml_config(conf_file_full_path)
-        entry_args_dict = conf_file_object.get("fedml_entry_args", {})
-        entry_args = entry_args_dict.get("arg_items", None)
-        scheduler_match_info = self.request_json.get("scheduler_match_info", {})
-        if job_type == Constants.JOB_TASK_TYPE_TRAIN:
-            containerize = True if containerize is None else containerize
-
-        # Bootstrap Info
-        bootstrap_script_path, bootstrap_script_dir, bootstrap_script_file = [None] * 3
-        env_args = fedml_config_object.get("environment_args", None)
-
-        if env_args is not None:
-            bootstrap_script_file = env_args.get("bootstrap", None)
-            if bootstrap_script_file is not None:
-                bootstrap_script_file = str(bootstrap_script_file).replace('\\', os.sep).replace('/', os.sep)
-                if platform.system() == 'Windows':
-                    bootstrap_script_file = bootstrap_script_file.rstrip('.sh') + '.bat'
-                if bootstrap_script_file is not None:
-                    bootstrap_script_dir = os.path.join(unzip_package_path, "fedml",
-                                                        os.path.dirname(bootstrap_script_file))
-                    bootstrap_script_path = os.path.join(
-                        bootstrap_script_dir, bootstrap_script_dir, os.path.basename(bootstrap_script_file)
-                    )
-
-        bootstrap_cmd_list = list()
-        if bootstrap_script_path:
-            logging.info("Bootstrap commands are being generated...")
-            bootstrap_cmd_list = JobRunnerUtils.generate_bootstrap_commands(bootstrap_script_path=bootstrap_script_path,
-                                                                            bootstrap_script_dir=bootstrap_script_dir,
-                                                                            bootstrap_script_file=bootstrap_script_file)
-            logging.info(f"Generated following Bootstrap commands: {bootstrap_cmd_list}")
-
-        if not containerize:
-            if len(bootstrap_cmd_list) and not (job_type == Constants.JOB_TASK_TYPE_DEPLOY or
-                                                job_type == Constants.JOB_TASK_TYPE_SERVE):
-                bootstrapping_successful = self.run_bootstrap_script(bootstrap_cmd_list=bootstrap_cmd_list,
-                                                                     bootstrap_script_file=bootstrap_script_file)
-
-                if not bootstrapping_successful:
-                    logging.info("failed to update local fedml config.")
-                    self.check_runner_stop_event()
-                    # Send failed msg when exceptions.
-                    self.cleanup_run_when_starting_failed(status=ClientConstants.MSG_MLOPS_CLIENT_STATUS_EXCEPTION)
-                    raise Exception(f"Failed to execute following bootstrap commands: {bootstrap_cmd_list}")
-
-                logging.info("cleanup the previous learning process and bootstrap process...")
-                ClientConstants.cleanup_learning_process(self.request_json["runId"])
-                ClientConstants.cleanup_bootstrap_process(self.request_json["runId"])
-
-        executable_interpreter = ClientConstants.CLIENT_SHELL_PS \
-            if platform.system() == ClientConstants.PLATFORM_WINDOWS else ClientConstants.CLIENT_SHELL_BASH
-
-        if job_yaml_default_none is None:
-            # Generate the job executing commands for previous federated learning (Compatibility)
-            python_program = get_python_program()
-            logging.info("Run the client: {} {} --cf {} --rank {} --role client".format(
-                python_program, entry_file_full_path, conf_file_full_path, str(dynamic_args_config.get("rank", 1))))
-            rank = str(dynamic_args_config.get("rank", 1))
-            entry_command = f"{python_program} {entry_file_full_path} --cf " \
-                            f"{conf_file_full_path} --rank {rank} --role client"
-            shell_cmd_list = [entry_command]
-
-            # Run the job executing commands for previous federated learning (Compatibility)
-            process, error_list = ClientConstants.execute_commands_with_live_logs(
-                shell_cmd_list, callback=self.callback_start_fl_job, should_write_log_file=False)
-            is_launch_task = False
-        else:
-            self.check_runner_stop_event()
-
-            self.mlops_metrics.report_client_id_status(
-                self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_RUNNING, run_id=self.run_id)
-
-            # Generate the job executing commands
-            job_executing_commands = JobRunnerUtils.generate_job_execute_commands(
-                self.run_id, self.edge_id, self.version,
-                self.package_type, executable_interpreter, entry_file_full_path,
-                conf_file_object, entry_args, assigned_gpu_ids,
-                job_api_key, client_rank, scheduler_match_info=scheduler_match_info,
-                cuda_visible_gpu_ids_str=self.cuda_visible_gpu_ids_str)
-
-            if containerize is not None and containerize is True:
-                docker_args = fedml_config_object.get("docker", {})
-                docker_args = JobRunnerUtils.create_instance_from_dict(DockerArgs, docker_args)
-                try:
-                    job_executing_commands = JobRunnerUtils.generate_launch_docker_command(docker_args=docker_args,
-                                                                                           run_id=self.run_id,
-                                                                                           edge_id=self.edge_id,
-                                                                                           unzip_package_path=unzip_package_path,
-                                                                                           executable_interpreter=executable_interpreter,
-                                                                                           entry_file_full_path=entry_file_full_path,
-                                                                                           bootstrap_cmd_list=bootstrap_cmd_list,
-                                                                                           cuda_visible_gpu_ids_str=self.cuda_visible_gpu_ids_str,
-                                                                                           image_pull_policy=image_pull_policy)
-                except Exception as e:
-                    logging.error(f"Error occurred while generating containerized launch commands. "
-                                  f"Exception: {e}, Traceback: {traceback.format_exc()}")
-                    return None, None, None
-
-                if not job_executing_commands:
-                    raise Exception("Failed to generate docker execution command")
-
-            # Run the job executing commands
-            logging.info(f"Run the client job with job id {self.run_id}, device id {self.edge_id}.")
-            process, error_list = ClientConstants.execute_commands_with_live_logs(
-                job_executing_commands, callback=self.start_job_perf, error_processor=self.job_error_processor,
-                should_write_log_file=False if job_type == Constants.JOB_TASK_TYPE_FEDERATE else True)
-            is_launch_task = False if job_type == Constants.JOB_TASK_TYPE_FEDERATE else True
-
-        return process, is_launch_task, error_list
-
-    def callback_start_fl_job(self, job_pid):
-        ClientConstants.save_learning_process(self.run_id, job_pid)
-        self.mlops_metrics.report_sys_perf(
-            self.args, self.agent_config["mqtt_config"], job_process_id=job_pid)
-
-    def start_job_perf(self, job_pid):
-        ClientConstants.save_learning_process(self.run_id, job_pid)
-        self.mlops_metrics.report_job_perf(self.args, self.agent_config["mqtt_config"], job_pid)
-
-    def job_error_processor(self, error_list):
-        self.check_runner_stop_event()
-
-        error_str = "\n".join(error_list)
-        error_message = f"Error occurred when running the job... {error_str}"
-        logging.error(error_message)
-        raise Exception(error_message)
-
-    def reset_devices_status(self, edge_id, status, should_send_client_id_status=True):
-        self.mlops_metrics.run_id = self.run_id
-        self.mlops_metrics.edge_id = edge_id
-
-        if should_send_client_id_status:
-            if status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED or \
-                    status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_FINISHED or \
-                    status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_EXCEPTION:
-                self.mlops_metrics.report_client_id_status(
-                    edge_id, status, server_id=self.server_id, run_id=self.run_id)
-
-    def sync_run_stop_status(self, run_status=ClientConstants.MSG_MLOPS_CLIENT_STATUS_KILLED):
-        try:
-            if self.run_process_event is not None:
-                self.run_process_event.set()
-
-            self.mlops_metrics.report_client_id_status(
-                self.edge_id, run_status, server_id=self.server_id, run_id=self.run_id)
-        except Exception as e:
-            logging.error(f"Failed to sync run stop status with Exception {e}. Traceback: {traceback.format_exc()}")
-            pass
-
-    def cleanup_run_when_starting_failed(
-            self, status=ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED, should_send_client_id_status=True):
-        # logging.error("Cleanup run successfully when starting failed.")
-
-        self.reset_devices_status(
-            self.edge_id, status, should_send_client_id_status=should_send_client_id_status)
-
-        time.sleep(2)
-
-        try:
-            self.mlops_metrics.stop_sys_perf()
-        except Exception as ex:
-            logging.error(f"Failed to stop sys perf with Exception {ex}. Traceback: {traceback.format_exc()}")
-            pass
-
-        time.sleep(1)
-
-        try:
-            ClientConstants.cleanup_learning_process(self.run_id)
-            ClientConstants.cleanup_bootstrap_process(self.run_id)
-            ClientConstants.cleanup_run_process(self.run_id)
-        except Exception as e:
-            logging.error(
-                f"Failed to cleanup run when starting failed with Exception {e}. Traceback: {traceback.format_exc()}")
-            pass
-
-    def cleanup_run_when_finished(self):
-        # logging.info("Cleanup run successfully when finished.")
-
-        self.reset_devices_status(self.edge_id,
-                                  ClientConstants.MSG_MLOPS_CLIENT_STATUS_FINISHED,
-                                  should_send_client_id_status=False)
-
-        time.sleep(2)
-
-        try:
-            self.mlops_metrics.stop_sys_perf()
-        except Exception as ex:
-            logging.error(f"Failed to stop sys perf with Exception {ex}. Traceback: {traceback.format_exc()}")
-            pass
-
-        time.sleep(1)
-
-        try:
-            ClientConstants.cleanup_learning_process(self.run_id)
-            ClientConstants.cleanup_bootstrap_process(self.run_id)
-            ClientConstants.cleanup_run_process(self.run_id)
-        except Exception as e:
-            logging.error(
-                f"Failed to cleanup run when finished with Exception {e}. Traceback: {traceback.format_exc()}")
-            pass
-
-    def setup_message_center(self):
-        if self.message_center is not None:
-            return
-
-        self.message_center = FedMLMessageCenter(agent_config=self.agent_config)
-        self.message_center.start_sender()
-
-        if self.mlops_metrics is None:
-            self.mlops_metrics = MLOpsMetrics()
-        self.mlops_metrics.set_messenger(self.message_center)
-        self.mlops_metrics.run_id = self.run_id
-
-    def rebuild_message_center(self, message_center_queue):
-        self.message_center = FedMLMessageCenter(message_queue=message_center_queue)
-
-        if self.mlops_metrics is None:
-            self.mlops_metrics = MLOpsMetrics()
-        self.mlops_metrics.set_messenger(self.message_center)
-        self.mlops_metrics.run_id = self.run_id
-
-    def release_message_center(self):
-        try:
-            if self.message_center is not None:
-                self.message_center.stop()
-                self.message_center = None
-
-        except Exception as e:
-            logging.error(
-                f"Failed to release client mqtt manager with Exception {e}. Traceback: {traceback.format_exc()}")
-            pass
-
-    def ota_upgrade(self, payload, request_json):
-        run_id = request_json["runId"]
-        force_ota = False
-        ota_version = None
-
-        try:
-            run_config = request_json.get("run_config", None)
-            parameters = run_config.get("parameters", None)
-            common_args = parameters.get("common_args", None)
-            force_ota = common_args.get("force_ota", False) if common_args is not None else False
-            ota_version = common_args.get("ota_version", None) if common_args is not None else None
-        except Exception as e:
-            logging.error(
-                f"Failed to get ota upgrade parameters with Exception {e}. Traceback: {traceback.format_exc()}")
-            pass
-
-        if force_ota and ota_version is not None:
-            should_upgrade = True if ota_version != fedml.__version__ else False
-            upgrade_version = ota_version
-        else:
-            try:
-                fedml_is_latest_version, local_ver, remote_ver = sys_utils.check_fedml_is_latest_version(self.version)
-            except Exception as e:
-                logging.error(f"Failed to check fedml version with Exception {e}. Traceback: {traceback.format_exc()}")
-                return
-
-            should_upgrade = False if fedml_is_latest_version else True
-            upgrade_version = remote_ver
-
-        if should_upgrade:
-            FedMLClientDataInterface.get_instance(). \
-                save_started_job(run_id, self.edge_id, time.time(),
-                                 ClientConstants.MSG_MLOPS_CLIENT_STATUS_UPGRADING,
-                                 ClientConstants.MSG_MLOPS_CLIENT_STATUS_UPGRADING,
-                                 payload)
-            self.mlops_metrics.report_client_id_status(
-                self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_UPGRADING, run_id=run_id)
-
-            logging.info(f"Upgrade to version {upgrade_version} ...")
-
-            sys_utils.do_upgrade(self.version, upgrade_version)
-            raise Exception("Restarting after upgraded...")
-
-    def callback_start_train(self, topic, payload):
-        # Get training params
-
-        request_json = json.loads(payload)
-        is_retain = request_json.get("is_retain", False)
-        if is_retain:
-            return
-        run_id = request_json["runId"]
-
-        # Start log processor for current run
-        train_edge_id = str(topic).split("/")[-2]
-        self.args.run_id = run_id
-        self.args.edge_id = train_edge_id
-        MLOpsRuntimeLog.get_instance(self.args).init_logs(log_level=logging.INFO)
-        MLOpsRuntimeLogDaemon.get_instance(self.args).start_log_processor(
-            run_id, train_edge_id, log_source=SchedulerConstants.get_log_source(request_json))
-        logging.info("start the log processor")
-
-        try:
-            MLOpsConfigs.fetch_all_configs()
-        except Exception as e:
-            logging.error(f"Failed to fetch all configs with Exception {e}. Traceback: {traceback.format_exc()}")
-            pass
-
-        if not FedMLClientDataInterface.get_instance().get_agent_status():
-            request_json = json.loads(payload)
-            run_id = request_json["runId"]
-            logging.error(
-                "FedMLDebug - Receive: topic ({}), payload ({}), but the client agent is disabled. {}".format(
-                    topic, payload, traceback.format_exc()
-                )
-            )
-            # Send failed msg when exceptions.
-            self.mlops_metrics.report_client_id_status(
-                train_edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_EXCEPTION, run_id=run_id,
-                msg=f"the client agent {train_edge_id} is disabled")
-            MLOpsRuntimeLogDaemon.get_instance(self.args).stop_log_processor(run_id, train_edge_id)
-            return
-
-        logging.info(
-            f"FedMLDebug - Receive: topic ({topic}), payload ({payload})"
-        )
-
-        # Terminate previous process about starting or stopping run command
-        logging.info("cleanup and save runner information")
-        server_agent_id = request_json["cloud_agent_id"]
-        ClientConstants.save_runner_infos(self.args.device_id + "." + self.args.os_name, train_edge_id, run_id=run_id)
-
-        # OTA upgrade
-        # self.ota_upgrade(payload, request_json)
-
-        # Occupy GPUs
-        scheduler_match_info = request_json.get("scheduler_match_info", {})
-        matched_gpu_num = scheduler_match_info.get("matched_gpu_num", 0)
-        model_master_device_id = scheduler_match_info.get("model_master_device_id", None)
-        model_slave_device_id = scheduler_match_info.get("model_slave_device_id", None)
-        model_slave_device_id_list = scheduler_match_info.get("model_slave_device_id_list", None)
-        run_config = request_json.get("run_config", {})
-        run_params = run_config.get("parameters", {})
-        serving_args = run_params.get("serving_args", {})
-        endpoint_id = serving_args.get("endpoint_id", None)
-        job_yaml = run_params.get("job_yaml", {})
-        job_type = job_yaml.get("job_type", SchedulerConstants.JOB_TASK_TYPE_TRAIN)
-        cuda_visible_gpu_ids_str = None
-        if not (job_type == SchedulerConstants.JOB_TASK_TYPE_SERVE or
-                job_type == SchedulerConstants.JOB_TASK_TYPE_DEPLOY):
-            cuda_visible_gpu_ids_str = JobRunnerUtils.get_instance().occupy_gpu_ids(
-                run_id, matched_gpu_num, train_edge_id, inner_id=endpoint_id,
-                model_master_device_id=model_master_device_id,
-                model_slave_device_id=model_slave_device_id)
-        logging.info(
-            f"Run started, available gpu ids: {JobRunnerUtils.get_instance().get_available_gpu_id_list(train_edge_id)}")
-
-        # Start server with multiprocessing mode
-        self.request_json = request_json
-        run_id_str = str(run_id)
-        self.running_request_json[run_id_str] = request_json
-        client_runner = FedMLClientRunner(
-            self.args, edge_id=train_edge_id, request_json=request_json, agent_config=self.agent_config, run_id=run_id,
-            cuda_visible_gpu_ids_str=cuda_visible_gpu_ids_str
-        )
-        client_runner.start_request_json = payload
-        self.run_process_event_map[run_id_str] = multiprocessing.Event()
-        self.run_process_event_map[run_id_str].clear()
-        client_runner.run_process_event = self.run_process_event_map[run_id_str]
-        self.run_process_completed_event_map[run_id_str] = multiprocessing.Event()
-        self.run_process_completed_event_map[run_id_str].clear()
-        client_runner.run_process_completed_event = self.run_process_completed_event_map[run_id_str]
-        client_runner.server_id = request_json.get("server_id", "0")
-        logging.info("start the runner process.")
-        self.run_process_map[run_id_str] = Process(target=client_runner.run, args=(
-            self.run_process_event_map[run_id_str], self.run_process_completed_event_map[run_id_str],
-            self.message_center.get_message_queue()))
-        self.run_process_map[run_id_str].start()
-        ClientConstants.save_run_process(run_id, self.run_process_map[run_id_str].pid)
-
-    def callback_stop_train(self, topic, payload):
-        # logging.info("callback_stop_train: topic = %s, payload = %s" % (topic, payload))
-        # logging.info(
-        #     f"FedMLDebug - Receive: topic ({topic}), payload ({payload})"
-        # )
-
-        train_edge_id = str(topic).split("/")[-2]
-        request_json = json.loads(payload)
-        is_retain = request_json.get("is_retain", False)
-        if is_retain:
-            return
-        run_id = request_json.get("runId", None)
-        if run_id is None:
-            run_id = request_json.get("id", None)
-        run_status = request_json.get("run_status", ClientConstants.MSG_MLOPS_CLIENT_STATUS_KILLED)
-
-        # logging.info("Stop run with multiprocessing...")
-
-        # Stop client with multiprocessing mode
-        run_id_str = str(run_id)
-        client_runner = FedMLClientRunner(
-            self.args, edge_id=train_edge_id, request_json=request_json, agent_config=self.agent_config, run_id=run_id
-        )
-        self.cleanup_containers_and_release_gpus(run_id, train_edge_id)
-        client_runner.run_process_event = self.run_process_event_map.get(run_id_str, None)
-        client_runner.run_process = self.run_process_map.get(run_id_str, None)
-        client_runner.message_center = self.message_center
-        client_runner.mlops_metrics = self.mlops_metrics
-        client_runner.sync_run_stop_status(run_status=run_status)
-
-    def cleanup_containers_and_release_gpus(self, run_id, edge_id):
-        job_type = JobRunnerUtils.get_job_type_from_run_id(run_id)
-
-        if not job_type:
-            logging.info(f"Failed to get job type from run id {run_id}. This is not an error as it would usually "
-                         f"happen when the job is not found in the database because job is already finished and "
-                         f"cleaned up. Exiting cleanup_containers_and_release_gpus.")
-            return
-
-        # Check if the job type is not "serve" or "deploy"
-        if not (job_type == SchedulerConstants.JOB_TASK_TYPE_SERVE or
-                job_type == SchedulerConstants.JOB_TASK_TYPE_DEPLOY):
-
-            # Terminate the run docker container if exists
-            container_name = JobRunnerUtils.get_run_container_name(run_id)
-            docker_client = JobRunnerUtils.get_docker_client(DockerArgs())
-            logging.info(f"Terminating the run docker container {container_name} if exists...")
-            try:
-                JobRunnerUtils.remove_run_container_if_exists(container_name, docker_client)
-            except Exception as e:
-                logging.error(f"Exception {e} occurred when terminating docker container. "
-                              f"Traceback: {traceback.format_exc()}")
-
-            # Release the GPU ids and update the GPU availability in the persistent store
-            JobRunnerUtils.get_instance().release_gpu_ids(run_id, edge_id)
-
-            # Send mqtt message reporting the new gpu availability to the backend
-            MLOpsDevicePerfStats.report_gpu_device_info(self.edge_id, mqtt_mgr=self.mqtt_mgr)
-
-    def cleanup_client_with_status(self):
-        if self.device_status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_FINISHED:
-            # logging.info("received to finished status.")
-            self.cleanup_run_when_finished()
-        elif self.device_status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED:
-            # logging.error("received to failed status from the server agent")
-            self.cleanup_run_when_starting_failed(should_send_client_id_status=False)
-        elif self.device_status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_KILLED:
-            # logging.error("received to failed status from the server agent")
-            self.cleanup_run_when_starting_failed(status=self.device_status, should_send_client_id_status=False)
-
-    def callback_runner_id_status(self, topic, payload):
-        # logging.info("callback_runner_id_status: topic = %s, payload = %s" % (topic, payload))
-        # logging.info(f"FedMLDebug - Receive: topic ({topic}), payload ({payload})")
-        request_json = json.loads(payload)
-        is_retain = request_json.get("is_retain", False)
-        if is_retain:
-            return
-        run_id = request_json["run_id"]
-        edge_id = str(topic).split("/")[-2].split('_')[-1]
-        status = request_json["status"]
-        run_id_str = str(run_id)
-
-        self.save_training_status(
-            edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED
-            if status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_EXCEPTION else status)
-
-        if status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_FINISHED or \
-                status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED or \
-                status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_KILLED:
-            completed_event = self.run_process_completed_event_map.get(run_id_str, None)
-            if completed_event is not None:
-                completed_event.set()
-
-            # Stop client with multiprocessing mode
-            client_runner = FedMLClientRunner(
-                self.args,
-                edge_id=edge_id,
-                request_json=request_json,
-                agent_config=self.agent_config,
-                run_id=run_id,
-            )
-            client_runner.device_status = status
-            client_runner.message_center = self.message_center
-            client_runner.mlops_metrics = self.mlops_metrics
-            client_runner.cleanup_client_with_status()
-
-            running_json = self.running_request_json.get(run_id_str)
-            if running_json is None:
-                try:
-                    current_job = FedMLClientDataInterface.get_instance().get_job_by_id(run_id)
-                    running_json = json.loads(current_job.running_json)
-                except Exception as e:
-                    logging.error(f"Failed to get running json with Exception {e}. Traceback: {traceback.format_exc()}")
-
-            if running_json is not None:
-                job_type = JobRunnerUtils.parse_job_type(running_json)
-                if not SchedulerConstants.is_deploy_job(job_type):
-                    logging.info(f"[run/device][{run_id}/{edge_id}] Release gpu resource when run ended.")
-                    self.cleanup_containers_and_release_gpus(run_id, edge_id)
-
-            run_process = self.run_process_map.get(run_id_str, None)
-            if run_process is not None:
-                if run_process.pid is not None:
-                    RunProcessUtils.kill_process(run_process.pid)
-
-                    # Terminate the run docker container if exists
-                    try:
-                        container_name = JobRunnerUtils.get_run_container_name(run_id)
-                        docker_client = JobRunnerUtils.get_docker_client(DockerArgs())
-                        logging.info(f"Terminating the run docker container {container_name} if exists...")
-                        JobRunnerUtils.remove_run_container_if_exists(container_name, docker_client)
-                    except Exception as e:
-                        logging.error(f"Error occurred when terminating docker container."
-                                      f"Exception: {e}, Traceback: {traceback.format_exc()}.")
-
-                self.run_process_map.pop(run_id_str)
-
-            # Stop log processor for current run
-            MLOpsRuntimeLogDaemon.get_instance(self.args).stop_log_processor(run_id, edge_id)
-
-    def callback_report_current_status(self, topic, payload):
-        logging.info(
-            f"FedMLDebug - Receive: topic ({topic}), payload ({payload})"
-        )
-
-        self.send_agent_active_msg()
-        if self.general_edge_id is not None:
-            self.send_agent_active_msg(self.general_edge_id)
-
-    @staticmethod
-    def process_ota_upgrade_msg():
-        os.system("pip install -U fedml")
-
-    @staticmethod
-    def callback_client_ota_msg(topic, payload):
-        logging.info(
-            f"FedMLDebug - Receive: topic ({topic}), payload ({payload})"
-        )
-
-        request_json = json.loads(payload)
-        cmd = request_json["cmd"]
-
-        if cmd == ClientConstants.FEDML_OTA_CMD_UPGRADE:
-            FedMLClientRunner.process_ota_upgrade_msg()
-            # Process(target=FedMLClientRunner.process_ota_upgrade_msg).start()
-            raise Exception("After upgraded, restart runner...")
-        elif cmd == ClientConstants.FEDML_OTA_CMD_RESTART:
-            raise Exception("Restart runner...")
-
-    def get_all_run_process_list_map(self):
-        run_process_dict = dict()
-        for run_id_str, process in self.run_process_map.items():
-            cur_run_process_list = ClientConstants.get_learning_process_list(run_id_str)
-            run_process_dict[run_id_str] = cur_run_process_list
-
-        return run_process_dict
-
-    def response_device_info_to_mlops(self, topic, payload):
-        payload_json = json.loads(payload)
-        server_id = payload_json.get("server_id", 0)
-        run_id = payload_json.get("run_id", 0)
-        listen_edge_id = str(topic).split("/")[-1]
-        context = payload_json.get("context", None)
-        need_gpu_info = payload_json.get("need_gpu_info", False)
-        need_running_process_list = payload_json.get("need_running_process_list", False)
-        response_topic = f"deploy/slave_agent/mlops/response_device_info"
-        if self.mlops_metrics is not None and self.model_device_client_edge_id_list is not None and \
-                self.model_device_server_id is not None:
-            if not need_gpu_info:
-                device_info_json = {
-                    "edge_id": listen_edge_id,
-                    "fedml_version": fedml.__version__,
-                    "user_id": self.args.user
-                }
-            else:
-                total_mem, free_mem, total_disk_size, free_disk_size, cup_utilization, cpu_cores, gpu_cores_total, \
-                    gpu_cores_available, sent_bytes, recv_bytes, gpu_available_ids = sys_utils.get_sys_realtime_stats()
-                host_ip = sys_utils.get_host_ip()
-                host_port = sys_utils.get_available_port()
-                gpu_available_ids = JobRunnerUtils.get_available_gpu_id_list(self.edge_id)
-                gpu_available_ids = JobRunnerUtils.trim_unavailable_gpu_ids(gpu_available_ids)
-                gpu_cores_available = len(gpu_available_ids)
-                gpu_list = sys_utils.get_gpu_list()
-                device_info_json = {
-                    "edge_id": listen_edge_id,
-                    "memoryTotal": round(total_mem * MLOpsUtils.BYTES_TO_GB, 2),
-                    "memoryAvailable": round(free_mem * MLOpsUtils.BYTES_TO_GB, 2),
-                    "diskSpaceTotal": round(total_disk_size * MLOpsUtils.BYTES_TO_GB, 2),
-                    "diskSpaceAvailable": round(free_disk_size * MLOpsUtils.BYTES_TO_GB, 2),
-                    "cpuUtilization": round(cup_utilization, 2),
-                    "cpuCores": cpu_cores,
-                    "gpuCoresTotal": gpu_cores_total,
-                    "gpuCoresAvailable": gpu_cores_available,
-                    "gpu_available_ids": gpu_available_ids,
-                    "gpu_list": gpu_list,
-                    "node_ip": host_ip,
-                    "node_port": host_port,
-                    "networkTraffic": sent_bytes + recv_bytes,
-                    "updateTime": int(MLOpsUtils.get_ntp_time()),
-                    "fedml_version": fedml.__version__,
-                    "user_id": self.args.user
-                }
-            if need_running_process_list:
-                device_info_json["run_process_list_map"] = self.get_all_run_process_list_map()
-            salve_device_ids = list()
-            for model_client_edge_id in self.model_device_client_edge_id_list:
-                salve_device_ids.append(model_client_edge_id)
-            response_payload = {"slave_device_id": self.model_device_client_edge_id_list[0],
-                                "slave_device_id_list": salve_device_ids,
-                                "master_device_id": self.model_device_server_id,
-                                "run_id": run_id, "edge_id": listen_edge_id,
-                                "edge_info": device_info_json}
-            if context is not None:
-                response_payload["context"] = context
-            self.message_center.send_message(response_topic, json.dumps(response_payload), run_id=run_id)
-    
-    def callback_report_device_info(self, topic, payload):
-        payload_json = json.loads(payload)
-        server_id = payload_json.get("server_id", 0)
-        run_id = payload_json.get("run_id", 0)
-        listen_edge_id = str(topic).split("/")[-1]
-        context = payload_json.get("context", None)
-        need_gpu_info = payload_json.get("need_gpu_info", False)
-        need_running_process_list = payload_json.get("need_running_process_list", False)
-        response_topic = f"client/server/response_device_info/{server_id}"
-        if self.mlops_metrics is not None and self.model_device_client_edge_id_list is not None and \
-                self.model_device_server_id is not None:
-            if not need_gpu_info:
-                device_info_json = {
-                    "edge_id": listen_edge_id,
-                    "fedml_version": fedml.__version__,
-                    "user_id": self.args.user
-                }
-            else:
-                total_mem, free_mem, total_disk_size, free_disk_size, cup_utilization, cpu_cores, gpu_cores_total, \
-                    gpu_cores_available, sent_bytes, recv_bytes, gpu_available_ids = sys_utils.get_sys_realtime_stats()
-                host_ip = sys_utils.get_host_ip()
-                host_port = sys_utils.get_available_port()
-                gpu_available_ids = JobRunnerUtils.get_available_gpu_id_list(self.edge_id)
-                gpu_available_ids = JobRunnerUtils.trim_unavailable_gpu_ids(gpu_available_ids)
-                gpu_cores_available = len(gpu_available_ids)
-                gpu_list = sys_utils.get_gpu_list()
-                device_info_json = {
-                    "edge_id": listen_edge_id,
-                    "memoryTotal": round(total_mem * MLOpsUtils.BYTES_TO_GB, 2),
-                    "memoryAvailable": round(free_mem * MLOpsUtils.BYTES_TO_GB, 2),
-                    "diskSpaceTotal": round(total_disk_size * MLOpsUtils.BYTES_TO_GB, 2),
-                    "diskSpaceAvailable": round(free_disk_size * MLOpsUtils.BYTES_TO_GB, 2),
-                    "cpuUtilization": round(cup_utilization, 2),
-                    "cpuCores": cpu_cores,
-                    "gpuCoresTotal": gpu_cores_total,
-                    "gpuCoresAvailable": gpu_cores_available,
-                    "gpu_available_ids": gpu_available_ids,
-                    "gpu_list": gpu_list,
-                    "node_ip": host_ip,
-                    "node_port": host_port,
-                    "networkTraffic": sent_bytes + recv_bytes,
-                    "updateTime": int(MLOpsUtils.get_ntp_time()),
-                    "fedml_version": fedml.__version__,
-                    "user_id": self.args.user
-                }
-            if need_running_process_list:
-                device_info_json["run_process_list_map"] = self.get_all_run_process_list_map()
-            salve_device_ids = list()
-            for model_client_edge_id in self.model_device_client_edge_id_list:
-                salve_device_ids.append(model_client_edge_id)
-            response_payload = {"slave_device_id": self.model_device_client_edge_id_list[0],
-                                "slave_device_id_list": salve_device_ids,
-                                "master_device_id": self.model_device_server_id,
-                                "run_id": run_id, "edge_id": listen_edge_id,
-                                "edge_info": device_info_json}
-            if context is not None:
-                response_payload["context"] = context
-            self.message_center.send_message(response_topic, json.dumps(response_payload), run_id=run_id)
-
-    def callback_client_logout(self, topic, payload):
-        payload_json = json.loads(payload)
-        secret = payload_json.get("auth", None)
-        if secret is None or str(secret) != "246b1be6-0eeb-4b17-b118-7d74de1975d4":
-            return
-        logging.info("Received the logout request.")
-        if self.run_process_event is not None:
-            self.run_process_event.set()
-        if self.run_process_completed_event is not None:
-            self.run_process_completed_event.set()
-        self.disable_client_login = True
-        time.sleep(3)
-        os.system("fedml logout")
-
-    def save_training_status(self, edge_id, training_status):
-        self.current_training_status = training_status
-        ClientConstants.save_training_infos(edge_id, training_status)
-
-    @staticmethod
-    def get_gpu_machine_id():
-        gpu_list = sys_utils.get_gpu_list()
-        gpu_uuids = ""
-        if len(gpu_list) > 0:
-            for gpu in gpu_list:
-                gpu_uuids += gpu.get("uuid", "")
-        else:
-            gpu_uuids = str(uuid.uuid4())
-        device_id_combination = \
-            f"{FedMLClientRunner.get_machine_id()}-{hex(uuid.getnode())}-{gpu_uuids}"
-        device_id = security_utils.get_content_hash(device_id_combination)
-        return device_id
-
-    @staticmethod
-    def get_device_id(use_machine_id=False):
-        device_file_path = os.path.join(ClientConstants.get_data_dir(),
-                                        ClientConstants.LOCAL_RUNNER_INFO_DIR_NAME)
-        file_for_device_id = os.path.join(device_file_path, "devices.id")
-        if not os.path.exists(device_file_path):
-            os.makedirs(device_file_path, exist_ok=True)
-        elif os.path.exists(file_for_device_id):
-            with open(file_for_device_id, 'r', encoding='utf-8') as f:
-                device_id_from_file = f.readline()
-                if device_id_from_file is not None and device_id_from_file != "":
-                    return device_id_from_file
-
-        if platform.system() == "Darwin":
-            cmd_get_serial_num = "system_profiler SPHardwareDataType | grep Serial | awk '{gsub(/ /,\"\")}{print}' " \
-                                 "|awk -F':' '{print $2}' "
-            device_id = os.popen(cmd_get_serial_num).read()
-            device_id = device_id.replace('\n', '').replace(' ', '')
-            if device_id is None or device_id == "":
-                if not use_machine_id:
-                    device_id = hex(uuid.getnode())
-                else:
-                    device_id = FedMLClientRunner.get_gpu_machine_id()
-            else:
-                device_id = "0x" + device_id
-        else:
-            if "nt" in os.name:
-
-                def get_uuid():
-                    guid = ""
-                    try:
-                        cmd = "wmic csproduct get uuid"
-                        guid = str(subprocess.check_output(cmd))
-                        pos1 = guid.find("\\n") + 2
-                        guid = guid[pos1:-15]
-                    except Exception as ex:
-                        logging.error(f"Failed to get uuid with Exception {ex}. Traceback: {traceback.format_exc()}")
-                        pass
-                    return str(guid)
-
-                device_id = str(get_uuid())
-                logging.info(device_id)
-            elif "posix" in os.name:
-                device_id = sys_utils.get_device_id_in_docker()
-                if device_id is None:
-                    if not use_machine_id:
-                        device_id = hex(uuid.getnode())
-                    else:
-                        device_id = device_id = FedMLClientRunner.get_gpu_machine_id()
-            else:
-                device_id = sys_utils.run_subprocess_open(
-                    "hal-get-property --udi /org/freedesktop/Hal/devices/computer --key system.hardware.uuid".split()
-                )
-                device_id = hex(device_id)
-
-        if device_id is not None and device_id != "":
-            with open(file_for_device_id, 'w', encoding='utf-8') as f:
-                f.write(device_id)
-        else:
-            device_id = hex(uuid.uuid4())
-            with open(file_for_device_id, 'w', encoding='utf-8') as f:
-                f.write(device_id)
-
-        return device_id
-
-    @staticmethod
-    def get_machine_id():
-        try:
-            import machineid
-            return machineid.id().replace('\n', '').replace('\r\n', '').strip()
-        except Exception as e:
-            logging.error(f"Failed to get machine id with Exception {e}. Traceback: {traceback.format_exc()}")
-            return hex(uuid.getnode())
-
-    @staticmethod
-    def bind_account_and_device_id(url, account_id, device_id, os_name, api_key="", role="client"):
-        ip = requests.get('https://checkip.amazonaws.com').text.strip()
-        fedml_ver, exec_path, os_ver, cpu_info, python_ver, torch_ver, mpi_installed, \
-            cpu_usage, available_mem, total_mem, gpu_info, gpu_available_mem, gpu_total_mem, \
-            gpu_count, gpu_vendor, cpu_count, gpu_device_name = get_sys_runner_info()
-        host_name = sys_utils.get_host_name()
-        json_params = {
-            "accountid": account_id,
-            "deviceid": device_id,
-            "type": os_name,
-            "state": ClientConstants.MSG_MLOPS_CLIENT_STATUS_IDLE,
-            "status": ClientConstants.MSG_MLOPS_CLIENT_STATUS_IDLE,
-            "processor": cpu_info,
-            "core_type": cpu_info,
-            "network": "",
-            "role": role,
-            "os_ver": os_ver,
-            "memory": total_mem,
-            "ip": ip,
-            "api_key": api_key,
-            "extra_infos": {"fedml_ver": fedml_ver, "exec_path": exec_path, "os_ver": os_ver,
-                            "cpu_info": cpu_info, "python_ver": python_ver, "torch_ver": torch_ver,
-                            "mpi_installed": mpi_installed, "cpu_usage": cpu_usage,
-                            "available_mem": available_mem, "total_mem": total_mem,
-                            "cpu_count": cpu_count, "gpu_count": 0, "host_name": host_name}
-        }
-        if gpu_count > 0:
-            if gpu_total_mem is not None:
-                json_params["gpu"] = gpu_info if gpu_info is not None else "" + ", Total GPU Memory: " + gpu_total_mem
-            else:
-                json_params["gpu"] = gpu_info if gpu_info is not None else ""
-            json_params["extra_infos"]["gpu_info"] = gpu_info if gpu_info is not None else ""
-            if gpu_available_mem is not None:
-                json_params["extra_infos"]["gpu_available_mem"] = gpu_available_mem
-            if gpu_total_mem is not None:
-                json_params["extra_infos"]["gpu_total_mem"] = gpu_total_mem
-
-            json_params["extra_infos"]["gpu_count"] = gpu_count
-            json_params["extra_infos"]["gpu_vendor"] = gpu_vendor
-            json_params["extra_infos"]["gpu_device_name"] = gpu_device_name
-
-            gpu_available_id_list = sys_utils.get_available_gpu_id_list(limit=gpu_count)
-            gpu_available_count = len(gpu_available_id_list) if gpu_available_id_list is not None else 0
-            gpu_list = sys_utils.get_gpu_list()
-            json_params["extra_infos"]["gpu_available_count"] = gpu_available_count
-            json_params["extra_infos"]["gpu_available_id_list"] = gpu_available_id_list
-            json_params["extra_infos"]["gpu_list"] = gpu_list
-        else:
-            json_params["gpu"] = "None"
-            json_params["extra_infos"]["gpu_available_count"] = 0
-            json_params["extra_infos"]["gpu_available_id_list"] = []
-            json_params["extra_infos"]["gpu_list"] = []
-
-        _, cert_path = MLOpsConfigs.get_request_params()
-        if cert_path is not None:
-            try:
-                requests.session().verify = cert_path
-                response = requests.post(
-                    url, json=json_params, verify=True,
-                    headers={"content-type": "application/json", "Connection": "close"}
-                )
-            except requests.exceptions.SSLError as err:
-                logging.error(
-                    f"Failed to bind account and device id with error: {err}, traceback: {traceback.format_exc()}")
-                MLOpsConfigs.install_root_ca_file()
-                response = requests.post(
-                    url, json=json_params, verify=True,
-                    headers={"content-type": "application/json", "Connection": "close"}
-                )
-        else:
-            response = requests.post(url, json=json_params, headers={"Connection": "close"})
-        edge_id, user_name, extra_url, general_edge_id = -1, None, None, None
-        if response.status_code != 200:
-            print(f"Binding to MLOps with response.status_code = {response.status_code}, "
-                  f"response.content: {response.content}")
-            pass
-        else:
-            # print("url = {}, response = {}".format(url, response))
-            status_code = response.json().get("code")
-            if status_code == "SUCCESS":
-                edge_id = response.json().get("data").get("id")
-                user_name = response.json().get("data").get("userName", None)
-                extra_url = response.json().get("data").get("url", None)
-                general_edge_id = response.json().get("data").get("general_edge_id", None)
-                if edge_id is None or edge_id <= 0:
-                    print(f"Binding to MLOps with response.status_code = {response.status_code}, "
-                          f"response.content: {response.content}")
-            else:
-                if status_code == SchedulerConstants.BINDING_ACCOUNT_NOT_EXIST_ERROR:
-                    raise SystemExit(SchedulerConstants.BINDING_ACCOUNT_NOT_EXIST_ERROR)
-                print(f"Binding to MLOps with response.status_code = {response.status_code}, "
-                      f"response.content: {response.content}")
-                return -1, None, None, None
-        return edge_id, user_name, extra_url, general_edge_id
-
-    def fetch_configs(self):
-        return MLOpsConfigs.fetch_all_configs()
-
-    def send_agent_active_msg(self, edge_id):
-        active_topic = "flclient_agent/active"
-        status = MLOpsStatus.get_instance().get_client_agent_status(edge_id)
-        if (
-                status is not None
-                and status != ClientConstants.MSG_MLOPS_CLIENT_STATUS_OFFLINE
-                and status != ClientConstants.MSG_MLOPS_CLIENT_STATUS_IDLE
-        ):
-            return
-
-        try:
-            current_job = FedMLClientDataInterface.get_instance().get_job_by_id(self.run_id)
-        except Exception as e:
-            logging.error(f"Failed to get current job with Exception {e}. Traceback: {traceback.format_exc()}")
-            current_job = None
-        if current_job is None:
-            if status is not None and status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_OFFLINE:
-                status = ClientConstants.MSG_MLOPS_CLIENT_STATUS_IDLE
-            else:
-                return
-        else:
-            status = ClientConstants.get_device_state_from_run_edge_state(current_job.status)
-        active_msg = {"ID": edge_id, "status": status}
-        MLOpsStatus.get_instance().set_client_agent_status(edge_id, status)
-        self.mqtt_mgr.send_message_json(active_topic, json.dumps(active_msg))
-        logging.info(f"Send agent active msg {active_msg}")
-
-    def recover_start_train_msg_after_upgrading(self):
-        try:
-            current_job = FedMLClientDataInterface.get_instance().get_current_job()
-            if current_job is not None and \
-                    current_job.status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_UPGRADING:
-                logging.info("start training after upgrading.")
-                topic_start_train = "flserver_agent/" + str(self.edge_id) + "/start_train"
-                self.callback_start_train(topic_start_train, current_job.running_json)
-        except Exception as e:
-            logging.error(f"recover starting train message after upgrading failed with exception {e}, "
-                          f"Traceback {traceback.format_exc()}")
-
-    def on_agent_mqtt_connected(self, mqtt_client_object):
-        # The MQTT message topic format is as follows: <sender>/<receiver>/<action>
-
-        # Setup MQTT message listener for starting training
-        topic_start_train = "flserver_agent/" + str(self.edge_id) + "/start_train"
-        self.add_message_listener(topic_start_train, self.callback_start_train)
-        self.mqtt_mgr.add_message_listener(topic_start_train, self.listener_message_dispatch_center)
-
-        # Setup MQTT message listener for stopping training
-        topic_stop_train = "flserver_agent/" + str(self.edge_id) + "/stop_train"
-        self.add_message_listener(topic_stop_train, self.callback_stop_train)
-        self.mqtt_mgr.add_message_listener(topic_stop_train, self.listener_message_dispatch_center)
-
-
-        # Setup MQTT message listener for client status switching
-        topic_client_status = "fl_client/flclient_agent_" + str(self.edge_id) + "/status"
-        self.add_message_listener(topic_client_status, self.callback_runner_id_status)
-        self.mqtt_mgr.add_message_listener(topic_client_status, self.listener_message_dispatch_center)
-
-        # Setup MQTT message listener to report current device status.
-        topic_report_status = "mlops/report_device_status"
-        self.add_message_listener(topic_report_status, self.callback_report_current_status)
-        self.mqtt_mgr.add_message_listener(topic_report_status, self.listener_message_dispatch_center)
-
-        # Setup MQTT message listener to OTA messages from the MLOps.
-        topic_ota_msg = "mlops/flclient_agent_" + str(self.edge_id) + "/ota"
-        self.add_message_listener(topic_ota_msg, self.callback_client_ota_msg)
-        self.mqtt_mgr.add_message_listener(topic_ota_msg, self.listener_message_dispatch_center)
-
-        # Setup MQTT message listener to OTA messages from the MLOps.
-        topic_request_device_info = "server/client/request_device_info/" + str(self.edge_id)
-        self.add_message_listener(topic_request_device_info, self.callback_report_device_info)
-        self.mqtt_mgr.add_message_listener(topic_request_device_info, self.listener_message_dispatch_center)
-
-        topic_request_edge_device_info_from_mlops = f"deploy/mlops/slave_agent/request_device_info/{self.edge_id}"
-        self.add_message_listener(topic_request_edge_device_info_from_mlops, self.response_device_info_to_mlops)
-        self.mqtt_mgr.add_message_listener(topic_request_edge_device_info_from_mlops, self.listener_message_dispatch_center)
-
-        topic_request_deploy_master_device_info_from_mlops = None
-        if self.model_device_server_id is not None:
-            topic_request_deploy_master_device_info_from_mlops = f"deploy/mlops/master_agent/request_device_info/{self.model_device_server_id}"
-            self.add_message_listener(topic_request_deploy_master_device_info_from_mlops, self.response_device_info_to_mlops)
-            self.mqtt_mgr.add_message_listener(topic_request_deploy_master_device_info_from_mlops, self.listener_message_dispatch_center)
-
-        topic_request_deploy_slave_device_info_from_mlops = None
-        if self.model_device_client_edge_id_list is not None and len(self.model_device_client_edge_id_list) > 0:
-            topic_request_deploy_slave_device_info_from_mlops = f"deploy/mlops/slave_agent/request_device_info/{self.model_device_client_edge_id_list[0]}"
-            self.add_message_listener(topic_request_deploy_slave_device_info_from_mlops, self.response_device_info_to_mlops)
-            self.mqtt_mgr.add_message_listener(topic_request_deploy_slave_device_info_from_mlops, self.listener_message_dispatch_center)
-        
-        # Setup MQTT message listener to logout from MLOps.
-        topic_client_logout = "mlops/client/logout/" + str(self.edge_id)
-        self.add_message_listener(topic_client_logout, self.callback_client_logout)
-        self.mqtt_mgr.add_message_listener(topic_client_logout, self.listener_message_dispatch_center)
-
-        # Subscribe topics for starting train, stopping train and fetching client status.
-        mqtt_client_object.subscribe(topic_start_train, qos=2)
-        mqtt_client_object.subscribe(topic_stop_train, qos=2)
-        mqtt_client_object.subscribe(topic_client_status, qos=2)
-        mqtt_client_object.subscribe(topic_report_status, qos=2)
-        mqtt_client_object.subscribe(topic_ota_msg, qos=2)
-        mqtt_client_object.subscribe(topic_request_device_info, qos=2)
-        mqtt_client_object.subscribe(topic_request_edge_device_info_from_mlops, qos=2)
-        if topic_request_deploy_master_device_info_from_mlops is not None:
-            mqtt_client_object.subscribe(topic_request_deploy_master_device_info_from_mlops, qos=2)
-        if topic_request_deploy_slave_device_info_from_mlops is not None:
-            mqtt_client_object.subscribe(topic_request_deploy_slave_device_info_from_mlops, qos=2)
-        mqtt_client_object.subscribe(topic_client_logout, qos=2)
-
-        self.subscribed_topics.clear()
-        self.subscribed_topics.append(topic_start_train)
-        self.subscribed_topics.append(topic_stop_train)
-        self.subscribed_topics.append(topic_client_status)
-        self.subscribed_topics.append(topic_report_status)
-        self.subscribed_topics.append(topic_ota_msg)
-        self.subscribed_topics.append(topic_request_device_info)
-        self.subscribed_topics.append(topic_request_edge_device_info_from_mlops)
-        if topic_request_deploy_master_device_info_from_mlops is not None:
-            self.subscribed_topics.append(topic_request_deploy_master_device_info_from_mlops)
-        if topic_request_deploy_slave_device_info_from_mlops is not None:
-            self.subscribed_topics.append(topic_request_deploy_slave_device_info_from_mlops)
-        self.subscribed_topics.append(topic_client_logout)
-
-        # Subscribe the messages for federated learning.
-        self.subscribe_fl_msgs()
-
-        # Broadcast the first active message.
-        self.send_agent_active_msg(self.edge_id)
-        if self.general_edge_id is not None:
-            self.send_agent_active_msg(self.general_edge_id)
-
-        # Echo results
-        MLOpsRuntimeLog.get_instance(self.args).enable_show_log_to_stdout()
-        worker_deploy_id_list = [modeld_device_clint.edge_id for index, modeld_device_clint in
-                                 enumerate(self.model_device_client_list)]
-        print("\nCongratulations, your device is connected to the FedML MLOps platform successfully!")
-        print(f"Your FedML Edge ID is {str(self.edge_id)}, unique device ID is {str(self.unique_device_id)}, "
-              f"master deploy ID is {str(self.model_device_server.edge_id)}, "
-              f"worker deploy ID is {worker_deploy_id_list}"
-              )
-        if self.edge_extra_url is not None and self.edge_extra_url != "":
-            print(f"You may visit the following url to fill in more information with your device.\n"
-                  f"{self.edge_extra_url}")
-        MLOpsRuntimeLog.get_instance(self.args).enable_show_log_to_stdout(enable=False)
-
-        from fedml.core.mlops import sync_deploy_id
-        sync_deploy_id(
-            self.edge_id, self.model_device_server.edge_id, worker_deploy_id_list)
-
-        # Start the message center for listener
-        self.start_listener(sender_message_queue=self.message_center.get_message_queue(),
-                            agent_config=self.agent_config)
-
-    def subscribe_fl_msgs(self):
-        if self.general_edge_id is None:
-            return
-
-        # Setup MQTT message listener for starting training
-        topic_start_train = "flserver_agent/" + str(self.general_edge_id) + "/start_train"
-        self.add_message_listener(topic_start_train, self.callback_start_train)
-        self.mqtt_mgr.add_message_listener(topic_start_train, self.listener_message_dispatch_center)
-
-        # Setup MQTT message listener for stopping training
-        topic_stop_train = "flserver_agent/" + str(self.general_edge_id) + "/stop_train"
-        self.add_message_listener(topic_stop_train, self.callback_stop_train)
-        self.mqtt_mgr.add_message_listener(topic_stop_train, self.listener_message_dispatch_center)
-
-        # Setup MQTT message listener for client status switching
-        topic_client_status = "fl_client/flclient_agent_" + str(self.general_edge_id) + "/status"
-        self.add_message_listener(topic_client_status, self.callback_runner_id_status)
-        self.mqtt_mgr.add_message_listener(topic_client_status, self.listener_message_dispatch_center)
-
-        # Setup MQTT message listener to OTA messages from the MLOps.
-        topic_request_device_info = "server/client/request_device_info/" + str(self.general_edge_id)
-        self.add_message_listener(topic_request_device_info, self.callback_report_device_info)
-        self.mqtt_mgr.add_message_listener(topic_request_device_info, self.listener_message_dispatch_center)
-
-        topic_request_device_info_from_mlops = f"deploy/mlops/client_agent/request_device_info/{self.general_edge_id}"
-        self.add_message_listener(topic_request_device_info_from_mlops, self.response_device_info_to_mlops)
-        self.mqtt_mgr.add_message_listener(topic_request_device_info_from_mlops, self.listener_message_dispatch_center)
-
-        # Subscribe topics for starting train, stopping train and fetching client status.
-        self.mqtt_mgr.subscribe_msg(topic_start_train)
-        self.mqtt_mgr.subscribe_msg(topic_stop_train)
-        self.mqtt_mgr.subscribe_msg(topic_client_status)
-        self.mqtt_mgr.subscribe_msg(topic_request_device_info)
-        self.mqtt_mgr.subscribe_msg(topic_request_device_info_from_mlops)
-
-        self.subscribed_topics.append(topic_start_train)
-        self.subscribed_topics.append(topic_stop_train)
-        self.subscribed_topics.append(topic_client_status)
-        self.subscribed_topics.append(topic_request_device_info)
-        self.subscribed_topics.append(topic_request_device_info_from_mlops)
-
-    def on_agent_mqtt_disconnected(self, mqtt_client_object):
-        MLOpsStatus.get_instance().set_client_agent_status(
-            self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_OFFLINE
-        )
-        pass
-
-    def setup_agent_mqtt_connection(self, service_config):
-        # Setup MQTT connection
-        self.mqtt_mgr = MqttManager(
-            service_config["mqtt_config"]["BROKER_HOST"],
-            service_config["mqtt_config"]["BROKER_PORT"],
-            service_config["mqtt_config"]["MQTT_USER"],
-            service_config["mqtt_config"]["MQTT_PWD"],
-            service_config["mqtt_config"]["MQTT_KEEPALIVE"],
-            f"FedML_ClientAgent_Daemon_@{self.user_name}@_@{self.args.current_device_id}@_@{str(uuid.uuid4())}@",
-            "flclient_agent/last_will_msg",
-            json.dumps({"ID": self.edge_id, "status": ClientConstants.MSG_MLOPS_CLIENT_STATUS_OFFLINE})
-        )
-        self.agent_config = service_config
-
-        # Init local database
-        FedMLClientDataInterface.get_instance().create_job_table()
-
-        # Start the message center to process edge related messages.
-        self.setup_message_center()
-
-        # Start local API services
-        client_api_cmd = "fedml.computing.scheduler.slave.client_api:api"
-        client_api_pids = RunProcessUtils.get_pid_from_cmd_line(client_api_cmd)
-        if client_api_pids is None or len(client_api_pids) <= 0:
-            python_program = get_python_program()
-            cur_dir = os.path.dirname(__file__)
-            fedml_base_dir = os.path.dirname(os.path.dirname(os.path.dirname(cur_dir)))
-            self.local_api_process = ClientConstants.exec_console_with_script(
-                "{} -m uvicorn {} --host 0.0.0.0 --port {} "
-                "--reload --reload-delay 3 --reload-dir {} --log-level critical".format(
-                    python_program, client_api_cmd, ClientConstants.LOCAL_CLIENT_API_PORT, fedml_base_dir),
-                should_capture_stdout=False,
-                should_capture_stderr=False
-            )
-            # if self.local_api_process is not None and self.local_api_process.pid is not None:
-            #     print(f"Client local API process id {self.local_api_process.pid}")
-
-        # Setup MQTT connected listener
-        self.mqtt_mgr.add_connected_listener(self.on_agent_mqtt_connected)
-        self.mqtt_mgr.add_disconnected_listener(self.on_agent_mqtt_disconnected)
-        self.mqtt_mgr.connect()
-
-        # Report the IDLE status to MLOps
-        self.mlops_metrics.report_client_training_status(
-            self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_IDLE)
-        MLOpsStatus.get_instance().set_client_agent_status(self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_IDLE)
-
-        # MLOpsRuntimeLogDaemon.get_instance(self.args).stop_all_log_processor()
-        self.recover_start_train_msg_after_upgrading()
-
-        infer_host = os.getenv("FEDML_INFER_HOST", None)
-        infer_redis_addr = os.getenv("FEDML_INFER_REDIS_ADDR", None)
-        infer_redis_port = os.getenv("FEDML_INFER_REDIS_PORT", None)
-        infer_redis_password = os.getenv("FEDML_INFER_REDIS_PASSWORD", None)
-        model_client_num = os.getenv("FEDML_MODEL_WORKER_NUM", None)
-        os.environ["FEDML_CURRENT_EDGE_ID"] = str(self.edge_id)
-
-        if not ComputeCacheManager.get_instance().set_redis_params():
-            os.environ["FEDML_DISABLE_REDIS_CONNECTION"] = "1"
-
-        if self.model_device_client_edge_id_list is None:
-            self.model_device_client_edge_id_list = list()
-        if self.model_device_client_list is None:
-            model_client_num = 1 if model_client_num is None else int(model_client_num)
-            self.model_device_client_list = list()
-            for client_index in range(model_client_num):
-                model_device_client = FedMLModelDeviceClientRunner(
-                    self.args, f"{self.args.current_device_id}_{client_index + 1}", self.args.os_name,
-                    self.args.is_from_docker, self.agent_config)
-                if infer_host is not None:
-                    model_device_client.infer_host = infer_host
-                if infer_redis_addr is not None:
-                    model_device_client.redis_addr = infer_redis_addr
-                if infer_redis_port is not None:
-                    model_device_client.redis_port = infer_redis_port
-                if infer_redis_password is not None:
-                    model_device_client.redis_password = infer_redis_password
-                model_device_client.start()
-                self.model_device_client_list.append(model_device_client)
-                self.model_device_client_edge_id_list.append(model_device_client.get_edge_id())
-
-        if self.model_device_server is None:
-            self.model_device_server = FedMLModelDeviceServerRunner(self.args, self.args.current_device_id,
-                                                                    self.args.os_name, self.args.is_from_docker,
-                                                                    self.agent_config)
-            if infer_host is not None:
-                self.model_device_server.infer_host = infer_host
-            if infer_redis_addr is not None:
-                self.model_device_server.redis_addr = infer_redis_addr
-            if infer_redis_port is not None:
-                self.model_device_server.redis_port = infer_redis_port
-            if infer_redis_password is not None:
-                self.model_device_server.redis_password = infer_redis_password
-
-            self.model_device_server.start()
-            self.model_device_server_id = self.model_device_server.get_edge_id()
-
-        JobCleanup.get_instance().sync_data_on_startup(self.edge_id)
-
-        os.environ["FEDML_DEPLOY_MASTER_ID"] = str(self.model_device_server.get_edge_id())
-        os.environ["FEDML_DEPLOY_WORKER_IDS"] = str([client.get_edge_id() for client in self.model_device_client_list])
-        self.mlops_metrics.stop_device_realtime_perf()
-        self.mlops_metrics.report_device_realtime_perf(self.args, service_config["mqtt_config"])
-
-    def start_agent_mqtt_loop(self):
-        # Start MQTT message loop
-        try:
-            self.mqtt_mgr.loop_forever()
-        except Exception as e:
-            logging.error(f"Errors in the MQTT loop: Exception {e}, Traceback: {traceback.format_exc()}")
-            if str(e) == "Restarting after upgraded...":
-                logging.info("Restarting after upgraded...")
-            else:
-                logging.info("Client tracing: {}".format(traceback.format_exc()))
-        finally:
-            print("finally")
-            login_exit_file = os.path.join(ClientConstants.get_log_file_dir(), "exited.log")
-            with open(login_exit_file, "w") as f:
-                f.writelines(f"{os.getpid()}.")
-
-            self.stop_agent()
-
-            time.sleep(5)
-            sys_utils.cleanup_all_fedml_client_login_processes(
-                ClientConstants.CLIENT_LOGIN_PROGRAM, clean_process_group=False)
-            sys.exit(1)
-
-    def stop_agent(self):
-        if self.run_process_event is not None:
-            self.run_process_event.set()
-
-        if self.model_device_server is not None:
-            self.model_device_server.stop()
-            self.model_device_server = None
-
-        if self.model_device_client_list is not None:
-            for model_client in self.model_device_client_list:
-                model_client.stop()
-            self.model_device_client_list.clear()
-            self.model_device_client_list = None
-
-        if self.mqtt_mgr is not None:
-            try:
-                for topic in self.subscribed_topics:
-                    self.mqtt_mgr.unsubscribe_msg(topic)
-            except Exception as e:
-                logging.error(f"Unsubscribe topics error: {e}, Traceback: {traceback.format_exc()}")
-                pass
-
-            self.mqtt_mgr.loop_stop()
-            self.mqtt_mgr.disconnect()
-
-        self.release_message_center()
-
-    def get_runner(self):
-        runner = FedMLClientRunner(
-            self.args, edge_id=self.edge_id, request_json=self.request_json,
-            agent_config=self.agent_config, run_id=self.run_id,
-            cuda_visible_gpu_ids_str=self.cuda_visible_gpu_ids_str
-        )
-        runner.edge_user_name = self.user_name
-        runner.edge_extra_url = self.edge_extra_url
-        runner.unique_device_id = self.unique_device_id
-        runner.user_name = self.user_name
-        runner.general_edge_id = self.general_edge_id
-        runner.model_device_client_edge_id_list = self.model_device_client_edge_id_list
-        runner.model_device_server_id = self.model_device_server_id
-        return runner

From 493463e3b4002edb929df966cd1dd409b3a60522 Mon Sep 17 00:00:00 2001
From: Raphael Jin <kimheavy@yahoo.com>
Date: Thu, 6 Jun 2024 19:14:28 +0000
Subject: [PATCH 123/282] [Deploy] Recursively find the model serving package
 folder

---
 .../scheduler/comm_utils/file_utils.py        | 13 ++++++
 .../model_scheduler/worker_job_runner.py      | 40 ++++++-------------
 2 files changed, 25 insertions(+), 28 deletions(-)
 create mode 100644 python/fedml/computing/scheduler/comm_utils/file_utils.py

diff --git a/python/fedml/computing/scheduler/comm_utils/file_utils.py b/python/fedml/computing/scheduler/comm_utils/file_utils.py
new file mode 100644
index 0000000000..1d8fc6ca83
--- /dev/null
+++ b/python/fedml/computing/scheduler/comm_utils/file_utils.py
@@ -0,0 +1,13 @@
+import os
+
+
+def find_file_inside_folder(folder_path, file_name):
+    """
+    Recursively search for a file inside a folder and its sub-folders.
+    return the full path of the file if found, otherwise return None.
+    """
+    for root, dirs, files in os.walk(folder_path):
+        if file_name in files:
+            return os.path.join(root, file_name)
+
+    return None
diff --git a/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py b/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py
index 348b760153..3c357e9dab 100755
--- a/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py
+++ b/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py
@@ -10,6 +10,7 @@
 import yaml
 from fedml.computing.scheduler.comm_utils.job_utils import JobRunnerUtils
 from fedml.core.mlops import MLOpsRuntimeLog
+from fedml.computing.scheduler.comm_utils import file_utils
 from .device_client_constants import ClientConstants
 from .device_model_cache import FedMLModelCache
 from ..scheduler_core.general_constants import GeneralConstants
@@ -205,7 +206,7 @@ def run_impl(self, run_extend_queue_list, sender_message_center,
         # Check if the package is already downloaded
         unzip_package_path = ""
         if os.path.exists(os.path.join(models_root_dir, parent_fd)):
-            unzip_package_path = self.find_previous_downloaded_pkg(os.path.join(models_root_dir, parent_fd), model_name)
+            unzip_package_path = self.find_previous_downloaded_pkg(os.path.join(models_root_dir, parent_fd))
 
         # Download the package if not found
         if unzip_package_path == "":
@@ -510,30 +511,13 @@ def build_dynamic_constrain_variables(self, run_id, run_config):
         pass
 
     @staticmethod
-    def find_previous_downloaded_pkg(parent_dir: str, model_name: str) -> str:
-        unzip_fd = ""
-        res = ""
-
-        for folder in os.listdir(parent_dir):
-            if folder.startswith("unzip_fedml_run"):
-                unzip_fd = os.path.join(parent_dir, folder)
-                break
-
-        exact_matched = False
-
-        if unzip_fd == "":
-            return res
-
-        for folder in os.listdir(unzip_fd):
-            if folder == model_name:
-                res = os.path.join(unzip_fd, folder)
-                exact_matched = True
-                break
-
-        if not exact_matched:
-            # Use the first folder found
-            for folder in os.listdir(unzip_fd):
-                res = os.path.join(unzip_fd, folder)
-                break
-
-        return res
+    def find_previous_downloaded_pkg(parent_dir: str) -> str:
+        """
+        Find a folder inside parent_dir that contains the fedml_model_config.yaml file.
+        """
+        res = file_utils.find_file_inside_folder(parent_dir, ClientConstants.MODEL_REQUIRED_MODEL_CONFIG_FILE)
+        if res is not None:
+            # return the parent folder of res
+            return os.path.dirname(res)
+        else:
+            return ""

From b4cb7c56a68f4a589c9c09e5092914be7dd2b423 Mon Sep 17 00:00:00 2001
From: fedml-dimitris <dimitris@fedml.ai>
Date: Thu, 6 Jun 2024 16:01:10 -0400
Subject: [PATCH 124/282] Making sure the unzipped file is a directory during
 initial deployment.

---
 .../scheduler_core/scheduler_base_job_runner.py          | 9 ++++++---
 1 file changed, 6 insertions(+), 3 deletions(-)

diff --git a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py
index 648ab18cf1..5e7a71f25a 100755
--- a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py
+++ b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py
@@ -138,10 +138,13 @@ def get_client_id_list(self, server_edge_id_list):
 
     @staticmethod
     def unzip_file(zip_file, unzip_file_path) -> str:
+        unzipped_file_name = ""
         if zipfile.is_zipfile(zip_file):
-            with zipfile.ZipFile(zip_file, "r") as zipf:
+            with (zipfile.ZipFile(zip_file, "r") as zipf):
                 zipf.extractall(unzip_file_path)
-                unzipped_file_name = zipf.namelist()[0]
+                # Make sure the unzipped file is a directory.
+                if zipf.namelist()[0].endswith("/"):
+                    unzipped_file_name = zipf.namelist()[0]
         else:
             raise Exception("Invalid zip file {}".format(zip_file))
 
@@ -156,7 +159,7 @@ def package_download_progress(self, count, blksize, filesize):
         progress_int = int(progress)
         downloaded_kb = format(downloaded / 1024, '.2f')
 
-        # since this hook funtion is stateless, we need a state to avoid print progress repeatly
+        # Since this hook function is stateless, we need a state to avoid print progress repeatedly.
         if count == 0:
             self.prev_download_progress = 0
         if progress_int != self.prev_download_progress and progress_int % 5 == 0:

From f76d88ee5eb6e2c4d2e9a05be83c48de1a020659 Mon Sep 17 00:00:00 2001
From: Raphael Jin <kimheavy@yahoo.com>
Date: Thu, 6 Jun 2024 22:53:56 +0000
Subject: [PATCH 125/282] [Deploy] Hot fix grammar.

---
 .../scheduler/scheduler_core/scheduler_base_job_runner.py       | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py
index 5e7a71f25a..6e0010f556 100755
--- a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py
+++ b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py
@@ -140,7 +140,7 @@ def get_client_id_list(self, server_edge_id_list):
     def unzip_file(zip_file, unzip_file_path) -> str:
         unzipped_file_name = ""
         if zipfile.is_zipfile(zip_file):
-            with (zipfile.ZipFile(zip_file, "r") as zipf):
+            with zipfile.ZipFile(zip_file, "r") as zipf:
                 zipf.extractall(unzip_file_path)
                 # Make sure the unzipped file is a directory.
                 if zipf.namelist()[0].endswith("/"):

From 4b1127052dff23ce58956918571d680ea9c7ba9f Mon Sep 17 00:00:00 2001
From: Alay Shah <alay11shah@gmail.com>
Date: Thu, 6 Jun 2024 16:45:31 -0700
Subject: [PATCH 126/282] Hot fix to support local debugging

---
 python/fedml/api/modules/device.py            | 22 ++++++-------------
 .../scheduler/comm_utils/constants.py         |  6 +++++
 .../model_scheduler/device_model_inference.py |  9 ++++----
 3 files changed, 18 insertions(+), 19 deletions(-)

diff --git a/python/fedml/api/modules/device.py b/python/fedml/api/modules/device.py
index 497fde9005..77f7eb2424 100644
--- a/python/fedml/api/modules/device.py
+++ b/python/fedml/api/modules/device.py
@@ -7,6 +7,7 @@
 import fedml
 from fedml.api.modules.constants import ModuleConstants
 from fedml.computing.scheduler.comm_utils import sys_utils
+from fedml.computing.scheduler.comm_utils.constants import SchedulerConstants
 from fedml.computing.scheduler.comm_utils.run_process_utils import RunProcessUtils
 from fedml.computing.scheduler.master.server_constants import ServerConstants
 from fedml.computing.scheduler.master.server_login import logout as server_logout
@@ -23,11 +24,6 @@ def bind(
     device_id = "0"
     os_name = ""
     docker = None
-    docker_rank = 1
-    infer_host = "127.0.0.1"
-    redis_addr = "local"
-    redis_port = "6379"
-    redis_password = "fedml_default"
     role = ""
     is_client = computing
     is_server = server
@@ -47,26 +43,22 @@ def bind(
     _bind(
         userid, computing, server,
         api_key, role, runner_cmd, device_id, os_name,
-        docker, docker_rank, infer_host,
-        redis_addr, redis_port, redis_password
-    )
+        docker)
 
 
 def _bind(
         userid, computing, server,
         api_key, role, runner_cmd, device_id, os_name,
-        docker, docker_rank, infer_host,
-        redis_addr, redis_port, redis_password
-):
+        docker):
     fedml.load_env()
     if os.getenv(ModuleConstants.ENV_FEDML_INFER_HOST) is None:
-        fedml.set_env_kv(ModuleConstants.ENV_FEDML_INFER_HOST, infer_host)
+        fedml.set_env_kv(ModuleConstants.ENV_FEDML_INFER_HOST, SchedulerConstants.REDIS_INFER_HOST)
     if os.getenv(ModuleConstants.ENV_FEDML_INFER_REDIS_ADDR) is None:
-        fedml.set_env_kv(ModuleConstants.ENV_FEDML_INFER_REDIS_ADDR, redis_addr)
+        fedml.set_env_kv(ModuleConstants.ENV_FEDML_INFER_REDIS_ADDR, SchedulerConstants.REDIS_ADDR)
     if os.getenv(ModuleConstants.ENV_FEDML_INFER_REDIS_PORT) is None:
-        fedml.set_env_kv(ModuleConstants.ENV_FEDML_INFER_REDIS_PORT, redis_port)
+        fedml.set_env_kv(ModuleConstants.ENV_FEDML_INFER_REDIS_PORT, SchedulerConstants.REDIS_PORT)
     if os.getenv(ModuleConstants.ENV_FEDML_INFER_REDIS_PASSWORD) is None:
-        fedml.set_env_kv(ModuleConstants.ENV_FEDML_INFER_REDIS_PASSWORD, redis_password)
+        fedml.set_env_kv(ModuleConstants.ENV_FEDML_INFER_REDIS_PASSWORD, SchedulerConstants.REDIS_ADDR)
 
     url = fedml._get_backend_service()
     platform_name = platform.system()
diff --git a/python/fedml/computing/scheduler/comm_utils/constants.py b/python/fedml/computing/scheduler/comm_utils/constants.py
index 22cb31de45..67b9d8b14b 100644
--- a/python/fedml/computing/scheduler/comm_utils/constants.py
+++ b/python/fedml/computing/scheduler/comm_utils/constants.py
@@ -109,6 +109,12 @@ class SchedulerConstants:
     IMAGE_PULL_POLICY_IF_NOT_PRESENT = "IfNotPresent"
     IMAGE_PULL_POLICY_NEVER = "Never"
 
+    REDIS_INFER_HOST = "127.0.0.1"
+    REDIS_ADDR = "local"
+    REDIS_PORT = "6379"
+    REDIS_PASSWORD = "fedml_default"
+
+
     @staticmethod
     def get_log_source(run_json):
         run_config = run_json.get("run_config", {})
diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py b/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py
index 7b3ac1d0bf..7bc7d6f097 100755
--- a/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py
@@ -13,6 +13,7 @@
 
 import fedml
 from fedml.api.modules.constants import ModuleConstants
+from fedml.computing.scheduler.comm_utils.constants import SchedulerConstants
 from fedml.computing.scheduler.model_scheduler.device_client_constants import ClientConstants
 from fedml.computing.scheduler.model_scheduler.device_http_inference_protocol import FedMLHttpInference
 from fedml.computing.scheduler.model_scheduler.device_server_constants import ServerConstants
@@ -27,10 +28,10 @@
 class Settings:
     server_name = "DEVICE_INFERENCE_GATEWAY"
     fedml.load_env()
-    redis_addr = os.getenv(ModuleConstants.ENV_FEDML_INFER_REDIS_ADDR)
-    redis_port = os.getenv(ModuleConstants.ENV_FEDML_INFER_REDIS_PORT)
-    redis_password = os.getenv(ModuleConstants.ENV_FEDML_INFER_REDIS_PASSWORD)
-    model_infer_host = os.getenv(ModuleConstants.ENV_FEDML_INFER_HOST)
+    redis_addr = os.getenv(ModuleConstants.ENV_FEDML_INFER_REDIS_ADDR, SchedulerConstants.REDIS_ADDR)
+    redis_port = os.getenv(ModuleConstants.ENV_FEDML_INFER_REDIS_PORT, SchedulerConstants.REDIS_PORT)
+    redis_password = os.getenv(ModuleConstants.ENV_FEDML_INFER_REDIS_PASSWORD, SchedulerConstants.REDIS_PASSWORD)
+    model_infer_host = os.getenv(ModuleConstants.ENV_FEDML_INFER_HOST, SchedulerConstants.REDIS_INFER_HOST)
     version = fedml.get_env_version()
     mqtt_config = MLOpsConfigs.fetch_mqtt_config()
 

From 2de8c370bb35684121d8f512bb2f2aecf5624eec Mon Sep 17 00:00:00 2001
From: Alay Shah <alay11shah@gmail.com>
Date: Thu, 6 Jun 2024 17:12:37 -0700
Subject: [PATCH 127/282] Bug fix

---
 python/fedml/api/modules/device.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/python/fedml/api/modules/device.py b/python/fedml/api/modules/device.py
index 77f7eb2424..a853d538d0 100644
--- a/python/fedml/api/modules/device.py
+++ b/python/fedml/api/modules/device.py
@@ -58,7 +58,7 @@ def _bind(
     if os.getenv(ModuleConstants.ENV_FEDML_INFER_REDIS_PORT) is None:
         fedml.set_env_kv(ModuleConstants.ENV_FEDML_INFER_REDIS_PORT, SchedulerConstants.REDIS_PORT)
     if os.getenv(ModuleConstants.ENV_FEDML_INFER_REDIS_PASSWORD) is None:
-        fedml.set_env_kv(ModuleConstants.ENV_FEDML_INFER_REDIS_PASSWORD, SchedulerConstants.REDIS_ADDR)
+        fedml.set_env_kv(ModuleConstants.ENV_FEDML_INFER_REDIS_PASSWORD, SchedulerConstants.REDIS_PASSWORD)
 
     url = fedml._get_backend_service()
     platform_name = platform.system()

From 38bc898388a9cb458c8e2b8d90f3e653f12bdd9a Mon Sep 17 00:00:00 2001
From: bhargav191098 <bhargav3514@gmail.com>
Date: Thu, 6 Jun 2024 23:30:08 -0700
Subject: [PATCH 128/282] Adding sequential uploads & download using presigned
 URL

---
 python/fedml/api/modules/storage.py           | 248 ++++++++++++++++--
 .../scheduler/master/server_constants.py      |  14 +
 2 files changed, 242 insertions(+), 20 deletions(-)

diff --git a/python/fedml/api/modules/storage.py b/python/fedml/api/modules/storage.py
index 51f58539bf..cc93fd6f87 100644
--- a/python/fedml/api/modules/storage.py
+++ b/python/fedml/api/modules/storage.py
@@ -3,6 +3,13 @@
 import shutil
 
 import requests
+import math
+
+import requests.exceptions
+import tqdm
+import sys
+from concurrent.futures import ThreadPoolExecutor
+import concurrent.futures
 from fedml.api.modules.utils import authenticate
 from fedml.core.distributed.communication.s3.remote_storage import S3Storage
 from fedml.core.mlops.mlops_configs import Configs, MLOpsConfigs
@@ -19,6 +26,7 @@ def __init__(self, data: dict):
         self.updatedAt = data.get("updateTime", None)
         self.size = _get_size(data.get("fileSize",None))
         self.tag_list = data.get("tags", None)
+        self.download_url = data.get("fileUrl", None)
 
 
 # Todo (alaydshah): Store service name in metadata
@@ -40,16 +48,16 @@ def upload(data_path, api_key, name, description, tag_list, service, show_progre
     if not archive_path:
         return FedMLResponse(code=ResponseCode.FAILURE, message=message)
 
-    store = _get_storage_service(service)
     name = os.path.splitext(os.path.basename(archive_path))[0] if name is None else name
     file_name = name + ".zip"
     dest_path = os.path.join(user_id, file_name)
     file_size = os.path.getsize(archive_path)
 
-    file_uploaded_url = store.upload_file_with_progress(src_local_path=archive_path, dest_s3_path=dest_path,
-                                                        show_progress=show_progress,
-                                                        out_progress_to_err=out_progress_to_err,
-                                                        progress_desc=progress_desc, metadata=metadata)
+    file_uploaded_url, message = _upload_multipart(api_key, file_name, archive_path, show_progress,
+                                                       out_progress_to_err,
+                                                       progress_desc, metadata)
+
+
     os.remove(archive_path)
     if not file_uploaded_url:
         return FedMLResponse(code=ResponseCode.FAILURE, message=f"Failed to upload file: {archive_path}")
@@ -81,24 +89,36 @@ def download(data_name, api_key, service, dest_path, show_progress=True) -> FedM
     if user_id is None:
         return FedMLResponse(code=ResponseCode.FAILURE, message=message)
 
-    store = _get_storage_service(service)
-    zip_file_name = data_name + ".zip"
-    key = os.path.join(user_id, zip_file_name)
-    path_local = os.path.abspath(zip_file_name)
-    dest_path = os.path.abspath(dest_path) if dest_path else data_name
-    if store.download_file_with_progress(path_s3=key, path_local=path_local, show_progress=show_progress):
-        try:
-            shutil.unpack_archive(path_local, dest_path)
-            os.remove(path_local)
-            abs_dest_path = os.path.abspath(dest_path)
-            return FedMLResponse(code=ResponseCode.SUCCESS, message=f"Successfully downloaded and unzipped data at "
-                                                                    f"{abs_dest_path}", data=abs_dest_path)
-        except Exception as e:
-            error_message = f"Failed to unpack archive: {e}"
+    metadata_response = get_metadata(data_name, api_key)
+    if metadata_response.code == ResponseCode.SUCCESS:
+        metadata = metadata_response.data
+        if not metadata or not isinstance(metadata, StorageMetadata):
+            error_message = f"Unable to get the download URL"
+            logging.error(error_message)
+            return FedMLResponse(code=ResponseCode.FAILURE, message=error_message)
+        download_url = metadata.download_url
+        zip_file_name = data_name + ".zip"
+        path_local = os.path.abspath(zip_file_name)
+        dest_path = os.path.abspath(dest_path) if dest_path else data_name
+        if _download_using_presigned_url(download_url, zip_file_name, show_progress=show_progress):
+            try:
+                shutil.unpack_archive(path_local, dest_path)
+                os.remove(path_local)
+                abs_dest_path = os.path.abspath(dest_path)
+                return FedMLResponse(code=ResponseCode.SUCCESS, message=f"Successfully downloaded and unzipped data at "
+                                                                        f"{abs_dest_path}", data=abs_dest_path)
+            except Exception as e:
+                error_message = f"Failed to unpack archive: {e}"
+                logging.error(error_message)
+                return FedMLResponse(code=ResponseCode.FAILURE, message=error_message)
+
+        else:
+            error_message = "Failed to download data from source"
             logging.error(error_message)
             return FedMLResponse(code=ResponseCode.FAILURE, message=error_message)
+
     else:
-        error_message = f"Failed to download data: {data_name}"
+        error_message = "Unable to get the download URL"
         logging.error(error_message)
         return FedMLResponse(code=ResponseCode.FAILURE, message=error_message)
 
@@ -196,6 +216,194 @@ def delete(data_name, service, api_key=None) -> FedMLResponse:
         logging.error(message, data_name, service)
         return FedMLResponse(code=ResponseCode.FAILURE, message=message, data=False)
 
+def _get_num_chunks(file_size, max_chunk_size):
+    num_chunks = math.ceil(file_size / max_chunk_size)
+    return num_chunks
+
+
+def get_chunks(file_path, chunk_size):
+    with open(file_path, 'rb') as file:
+        while True:
+            chunk = file.read(chunk_size)
+            if not chunk:
+                break
+            yield chunk
+
+
+def _get_presigned_url(api_key, request_url, file_name, part_number=None):
+    cert_path = MLOpsConfigs.get_cert_path_with_version()
+    headers = ServerConstants.API_HEADERS
+    headers["Authorization"] = f"Bearer {api_key}"
+    params_dict = {'fileKey': file_name}
+    if part_number is not None:
+        params_dict['partNumber'] = part_number
+    if cert_path is None:
+        try:
+            requests.session().verify = cert_path
+            response = requests.get(request_url, verify=True, headers=headers, params=params_dict)
+        except requests.exceptions.SSLError as err:
+            MLOpsConfigs.install_root_ca_file()
+            response = requests.get(request_url, verify=True, headers=headers, params=params_dict)
+    else:
+        response = requests.get(request_url, verify=True, headers=headers, params=params_dict)
+    return response
+
+
+def _upload_part(url,part_data,session):
+    response = session.put(url,data=part_data,verify=True)
+    return response
+
+
+def _upload_chunk(presigned_url, chunk, part, pbar=None, max_retries=20,session=None):
+    for retry_attempt in range(max_retries):
+        try:
+            response = _upload_part(presigned_url,chunk,session)
+        except requests.exceptions.RequestException as e:
+            if retry_attempt < max_retries:
+                continue
+            else:
+                raise requests.exceptions.RequestException
+
+        if(pbar is not None):
+            pbar.update(chunk.__sizeof__())
+        return {'etag': response.headers['ETag'], 'partNumber': part}
+    raise requests.exceptions.RequestException
+
+def _process_post_response(response):
+    if response.status_code != 200:
+        message = (f"Failed to complete multipart upload with status code = {response.status_code}, "
+                   f"response.content: {response.content}")
+        logging.error(message)
+        return None, message
+    else:
+        resp_data = response.json()
+        code = resp_data.get("code", None)
+        data_url = resp_data.get("data", None)
+
+        if code is None or data_url is None or code == "FAILURE":
+            message = resp_data.get("message", None)
+            message = (f"Failed to complete multipart upload with following message: {message}, "
+                       f"response.content: {response.content}")
+            return None, message
+
+        return data_url, "Successfully uploaded the data! "
+
+def _complete_multipart_upload(api_key, file_key, part_info, upload_id):
+    complete_multipart_url = ServerConstants.get_complete_multipart_upload_url()
+    body_dict = {"fileKey": file_key, 'partETags': part_info, 'uploadId': upload_id}
+
+    cert_path = MLOpsConfigs.get_cert_path_with_version()
+    headers = ServerConstants.API_HEADERS
+    headers["Authorization"] = f"Bearer {api_key}"
+    if cert_path is None:
+        try:
+            requests.session().verify = cert_path
+            complete_multipart_response = requests.post(complete_multipart_url, json=body_dict, verify=True,
+                                                        headers=headers)
+        except requests.exceptions.SSLError as err:
+            MLOpsConfigs.install_root_ca_file()
+            complete_multipart_response = requests.post(complete_multipart_url, json=body_dict, verify=True,
+                                                        headers=headers)
+    else:
+        complete_multipart_response = requests.post(complete_multipart_url, json=body_dict, verify=True,
+                                                    headers=headers)
+
+    return _process_post_response(complete_multipart_response)
+
+def _upload_multipart(api_key: str, file_key, archive_path, show_progress, out_progress_to_err,
+                      progress_desc_text, metadata):
+    request_url = ServerConstants.get_presigned_multi_part_url()
+
+    file_size = os.path.getsize(archive_path)
+
+    max_chunk_size = 20 * 1024 * 1024
+
+    num_chunks = _get_num_chunks(file_size, max_chunk_size)
+
+    upload_id = ""
+    presigned_urls = []
+
+    presigned_url_response = _get_presigned_url(api_key, request_url, file_key, num_chunks)
+
+    if presigned_url_response.status_code != 200:
+        message = (f"Failed to get presigned URL with status code = {presigned_url_response.status_code}, "
+                   f"response.content: {presigned_url_response.content}")
+        logging.error(message)
+        return None, message
+    else:
+        resp_data = presigned_url_response.json()
+        code = resp_data.get("code", None)
+        data = resp_data.get("data", None)
+
+        if code is None or data is None or code == "FAILURE":
+            message = resp_data.get("message", None)
+            message = (f"Failed getting presigned URL with following message: {message}, "
+                       f"response.content: {presigned_url_response.content}")
+            return None, message
+
+        upload_id = data['uploadId']
+        presigned_urls = data['urls']
+
+    parts = []
+    chunks = get_chunks(archive_path, max_chunk_size)
+    part_info = []
+    chunk_count = 0
+    successful_chunks = 0
+
+    atomic_session = requests.session()
+    atomic_session.verify = MLOpsConfigs.get_cert_path_with_version()
+    with tqdm.tqdm(total=file_size, unit="B", unit_scale=True,
+                   file=sys.stderr if out_progress_to_err else sys.stdout,
+                   desc=progress_desc_text, leave=False) as pbar:
+        for part, chunk in enumerate(chunks, start=1):
+            presigned_url = presigned_urls[part - 1]
+            chunk_count += 1
+            # Upload chunk to presigned_url in a separate thread from the thread pool of 10 workers.
+            if show_progress:
+                try:
+                    part_data = _upload_chunk(presigned_url=presigned_url, chunk=chunk, part=part,
+                                             pbar=pbar,session=atomic_session)
+                    part_info.append(part_data)
+                    successful_chunks += 1
+                except Exception as e:
+                    return None, "unsuccessful"
+
+            else:
+                try:
+                    part_data = _upload_chunk(presigned_url=presigned_url, chunk=chunk, part=part,
+                                             pbar=pbar,session=atomic_session)
+                    part_info.append(part_data)
+                    successful_chunks += 1
+                except Exception as e:
+                    return None, "unsuccessful"
+
+    if successful_chunks == chunk_count:
+        return _complete_multipart_upload(api_key, file_key, part_info, upload_id)
+    else:
+        return None, "Unsuccessful!"
+
+
+def _download_using_presigned_url(url, fname, chunk_size=1024 * 1024, show_progress=True):
+    download_response = requests.get(url, verify=True, stream=True)
+    if download_response.status_code == 200:
+        total = int(download_response.headers.get('content-length', 0))
+        if show_progress:
+            with open(fname, 'wb') as file, tqdm.tqdm(
+                    desc=fname,
+                    total=total,
+                    unit='B',
+                    unit_scale=True,
+                    unit_divisor=1024,
+            ) as bar:
+                for data in download_response.iter_content(chunk_size=chunk_size):
+                    size = file.write(data)
+                    bar.update(size)
+        else:
+            with open(fname, "wb") as file:
+                for data in download_response.iter_content(chunk_size=chunk_size):
+                    size = file.write(data)
+        return True
+    return False
 
 def _get_user_id_from_api_key(api_key: str) -> (str, str):
     user_url = ServerConstants.get_user_url()
diff --git a/python/fedml/computing/scheduler/master/server_constants.py b/python/fedml/computing/scheduler/master/server_constants.py
index b835ba1bde..ebd8b2aef6 100644
--- a/python/fedml/computing/scheduler/master/server_constants.py
+++ b/python/fedml/computing/scheduler/master/server_constants.py
@@ -255,6 +255,20 @@ def get_dataset_url():
             ServerConstants.get_mlops_url())
         return create_dataset_url
 
+    @staticmethod
+    def get_presigned_multi_part_url():
+        get_presigned_multi_part_url = "{}/system/api/v1/cli/oss/multipart/presigned-url".format(
+            ServerConstants.get_mlops_url()
+        )
+        return get_presigned_multi_part_url
+
+    @staticmethod
+    def get_complete_multipart_upload_url():
+        complete_multipart_upload_url = "{}/system/api/v1/cli/oss/multipart/upload/complete".format(
+            ServerConstants.get_mlops_url()
+        )
+        return complete_multipart_upload_url
+
     @staticmethod
     def list_dataset_url():
         list_dataset_url = "{}/fedmlOpsServer/api/v1/cli/dataset/list".format(

From aa62a94da7dc68100ed80a75714b8c6b3b60e57d Mon Sep 17 00:00:00 2001
From: bhargav191098 <bhargav3514@gmail.com>
Date: Thu, 6 Jun 2024 23:57:08 -0700
Subject: [PATCH 129/282] minor comments and some error handling

---
 python/fedml/api/modules/storage.py | 12 +++++-------
 1 file changed, 5 insertions(+), 7 deletions(-)

diff --git a/python/fedml/api/modules/storage.py b/python/fedml/api/modules/storage.py
index cc93fd6f87..e7d492c999 100644
--- a/python/fedml/api/modules/storage.py
+++ b/python/fedml/api/modules/storage.py
@@ -8,8 +8,6 @@
 import requests.exceptions
 import tqdm
 import sys
-from concurrent.futures import ThreadPoolExecutor
-import concurrent.futures
 from fedml.api.modules.utils import authenticate
 from fedml.core.distributed.communication.s3.remote_storage import S3Storage
 from fedml.core.mlops.mlops_configs import Configs, MLOpsConfigs
@@ -31,7 +29,7 @@ def __init__(self, data: dict):
 
 # Todo (alaydshah): Store service name in metadata
 # Todo (alaydshah): If data already exists, don't upload again. Instead suggest to use update command
-
+# Todo (bhargav) : Discuss and remove the service variable. Maybe needed sometime later.
 def upload(data_path, api_key, name, description, tag_list, service, show_progress, out_progress_to_err, progress_desc,
            metadata) -> FedMLResponse:
     api_key = authenticate(api_key)
@@ -118,8 +116,7 @@ def download(data_name, api_key, service, dest_path, show_progress=True) -> FedM
             return FedMLResponse(code=ResponseCode.FAILURE, message=error_message)
 
     else:
-        error_message = "Unable to get the download URL"
-        logging.error(error_message)
+        error_message = metadata_response.message
         return FedMLResponse(code=ResponseCode.FAILURE, message=error_message)
 
 
@@ -288,6 +285,7 @@ def _process_post_response(response):
 
         return data_url, "Successfully uploaded the data! "
 
+
 def _complete_multipart_upload(api_key, file_key, part_info, upload_id):
     complete_multipart_url = ServerConstants.get_complete_multipart_upload_url()
     body_dict = {"fileKey": file_key, 'partETags': part_info, 'uploadId': upload_id}
@@ -310,6 +308,7 @@ def _complete_multipart_upload(api_key, file_key, part_info, upload_id):
 
     return _process_post_response(complete_multipart_response)
 
+
 def _upload_multipart(api_key: str, file_key, archive_path, show_progress, out_progress_to_err,
                       progress_desc_text, metadata):
     request_url = ServerConstants.get_presigned_multi_part_url()
@@ -349,7 +348,7 @@ def _upload_multipart(api_key: str, file_key, archive_path, show_progress, out_p
     part_info = []
     chunk_count = 0
     successful_chunks = 0
-
+    #TODO: (bhargav191098) Using Thread pool and confirming openssl issue
     atomic_session = requests.session()
     atomic_session.verify = MLOpsConfigs.get_cert_path_with_version()
     with tqdm.tqdm(total=file_size, unit="B", unit_scale=True,
@@ -358,7 +357,6 @@ def _upload_multipart(api_key: str, file_key, archive_path, show_progress, out_p
         for part, chunk in enumerate(chunks, start=1):
             presigned_url = presigned_urls[part - 1]
             chunk_count += 1
-            # Upload chunk to presigned_url in a separate thread from the thread pool of 10 workers.
             if show_progress:
                 try:
                     part_data = _upload_chunk(presigned_url=presigned_url, chunk=chunk, part=part,

From 14bae990b9a610f7cdfb25f82355025b3d58b3e0 Mon Sep 17 00:00:00 2001
From: Alex <alex.gpt.llm@gmail.com>
Date: Fri, 7 Jun 2024 16:20:34 +0800
Subject: [PATCH 130/282] =?UTF-8?q?[CoreEngine]=201.=20fixed=20the=20issue?=
 =?UTF-8?q?=20that=20the=20fork=20method=20is=20not=20support=20in=20Windo?=
 =?UTF-8?q?ws=20OS.=E2=80=A82.=20fixed=20the=20issue=20the=20sqlite=20path?=
 =?UTF-8?q?=20is=20illegal=20=20in=20Windows=20OS.?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

---
 python/fedml/__init__.py                      | 24 +++++++++++++------
 .../scheduler/comm_utils/hardware_utils.py    |  2 +-
 .../model_scheduler/device_model_db.py        |  6 ++++-
 .../scheduler/scheduler_core/base_db.py       |  6 ++++-
 .../scheduler_base_protocol_manager.py        |  4 +---
 5 files changed, 29 insertions(+), 13 deletions(-)

diff --git a/python/fedml/__init__.py b/python/fedml/__init__.py
index 21da84c9ab..c96d65adc5 100644
--- a/python/fedml/__init__.py
+++ b/python/fedml/__init__.py
@@ -1,4 +1,5 @@
 import logging
+import platform
 
 import multiprocess as multiprocessing
 import os
@@ -92,13 +93,7 @@ def init(args=None, check_env=True, should_init_logs=True):
     # Windows/Linux/MacOS compatability issues on multi-processing
     # https://github.com/pytorch/pytorch/issues/3492
     """
-    if multiprocessing.get_start_method() != "fork":
-        # force all platforms (Windows/Linux/macOS) to use the same way (fork) for multiprocessing
-        multiprocessing.set_start_method("fork", force=True)
-
-    # if multiprocessing.get_start_method() != "spawn":
-    #     # force all platforms (Windows/Linux/MacOS) to use the same way (spawn) for multiprocessing
-    #     multiprocessing.set_start_method("spawn", force=True)
+    _init_multiprocessing()
 
     """
     # https://stackoverflow.com/questions/53014306/error-15-initializing-libiomp5-dylib-but-found-libiomp5-dylib-already-initial
@@ -450,6 +445,21 @@ def _run_distributed():
     pass
 
 
+def _init_multiprocessing():
+    """
+    # Windows/Linux/MacOS compatability issues on multi-processing
+    # https://github.com/pytorch/pytorch/issues/3492
+    """
+    if platform.system() == "Windows":
+        if multiprocessing.get_start_method() != "spawn":
+            # force all platforms (Windows/Linux/macOS) to use the same way (spawn) for multiprocessing
+            multiprocessing.set_start_method("spawn", force=True)
+    else:
+        if multiprocessing.get_start_method() != "fork":
+            # force all platforms (Windows/Linux/macOS) to use the same way (fork) for multiprocessing
+            multiprocessing.set_start_method("fork", force=True)
+
+
 def set_env_version(version):
     set_env_kv("FEDML_ENV_VERSION", version)
     load_env()
diff --git a/python/fedml/computing/scheduler/comm_utils/hardware_utils.py b/python/fedml/computing/scheduler/comm_utils/hardware_utils.py
index 0062418631..e73809955e 100644
--- a/python/fedml/computing/scheduler/comm_utils/hardware_utils.py
+++ b/python/fedml/computing/scheduler/comm_utils/hardware_utils.py
@@ -27,7 +27,7 @@ def __get_util(cls) -> Optional[GPUCardUtil]:
             except Exception as e:
                 pass
 
-        logging.error("No GPU card detected")
+        # logging.error("No GPU card detected")
         return None
 
     @staticmethod
diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_db.py b/python/fedml/computing/scheduler/model_scheduler/device_model_db.py
index 1f43f719f3..09573a1d1b 100755
--- a/python/fedml/computing/scheduler/model_scheduler/device_model_db.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_model_db.py
@@ -1,6 +1,7 @@
 import json
 import logging
 import os
+import platform
 import time
 
 from fedml.computing.scheduler.model_scheduler.device_server_constants import ServerConstants
@@ -261,7 +262,10 @@ def open_job_db(self):
             self.db_base_dir = ServerConstants.get_database_dir()
 
         job_db_path = os.path.join(self.db_base_dir, FedMLModelDatabase.MODEL_DEPLOYMENT_DB)
-        self.db_engine = create_engine('sqlite:////{}'.format(job_db_path), echo=False)
+        if platform.system() == "Windows":
+            self.db_engine = create_engine('sqlite:///{}'.format(job_db_path), echo=False)
+        else:
+            self.db_engine = create_engine('sqlite:////{}'.format(job_db_path), echo=False)
 
         db_session_class = sessionmaker(bind=self.db_engine)
         self.db_connection = db_session_class()
diff --git a/python/fedml/computing/scheduler/scheduler_core/base_db.py b/python/fedml/computing/scheduler/scheduler_core/base_db.py
index b827efacf7..dbb322cfae 100755
--- a/python/fedml/computing/scheduler/scheduler_core/base_db.py
+++ b/python/fedml/computing/scheduler/scheduler_core/base_db.py
@@ -1,5 +1,6 @@
 import json
 import os
+import platform
 import time
 
 from sqlalchemy import Column, String, TEXT, Integer, Float, create_engine, and_
@@ -25,7 +26,10 @@ def open_job_db(self):
         if self.db_connection is not None:
             return
 
-        self.db_engine = create_engine('sqlite:////{}'.format(self.db_path), echo=False)
+        if platform.system() == "Windows":
+            self.db_engine = create_engine('sqlite:///{}'.format(self.db_path), echo=False)
+        else:
+            self.db_engine = create_engine('sqlite:////{}'.format(self.db_path), echo=False)
 
         db_session_class = sessionmaker(bind=self.db_engine)
         self.db_connection = db_session_class()
diff --git a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_protocol_manager.py b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_protocol_manager.py
index 9bb8b7a7ec..19bb7e9882 100755
--- a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_protocol_manager.py
+++ b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_protocol_manager.py
@@ -46,9 +46,7 @@ def __init__(self, args, agent_config=None, is_master=False):
         self.status_reporter = None
         self.user_name = args.user_name
 
-        if multiprocessing.get_start_method() != "fork":
-            # force all platforms (Windows/Linux/macOS) to use the same way (fork) for multiprocessing
-            multiprocessing.set_start_method("fork", force=True)
+        fedml._init_multiprocessing()
 
     def generate_topics(self):
         # generate the subscribed topics.

From 28ff0f3c1f1621f9994da08836a5920749b22fc6 Mon Sep 17 00:00:00 2001
From: Alex <alex.gpt.llm@gmail.com>
Date: Fri, 7 Jun 2024 16:25:07 +0800
Subject: [PATCH 131/282] [CoreEngine] add the missed import.

---
 .../fedml/computing/scheduler/scheduler_core/compute_gpu_db.py   | 1 +
 1 file changed, 1 insertion(+)

diff --git a/python/fedml/computing/scheduler/scheduler_core/compute_gpu_db.py b/python/fedml/computing/scheduler/scheduler_core/compute_gpu_db.py
index d50555d3c9..eb80c1424e 100755
--- a/python/fedml/computing/scheduler/scheduler_core/compute_gpu_db.py
+++ b/python/fedml/computing/scheduler/scheduler_core/compute_gpu_db.py
@@ -8,6 +8,7 @@
 from fedml.core.common.singleton import Singleton
 from .base_db import FedMLBaseDb
 from .compute_utils import ComputeUtils
+from ..master.server_constants import ServerConstants
 
 Base = declarative_base()
 

From c151831deb46e297e182f74cc0315491d97d535d Mon Sep 17 00:00:00 2001
From: fedml-dimitris <dimitris@fedml.ai>
Date: Thu, 6 Jun 2024 17:03:46 -0400
Subject: [PATCH 132/282] Adding hash set  for counting the number of pending
 requests per endpoint.

---
 .../model_scheduler/device_model_cache.py     | 29 ++++++++++---------
 .../model_scheduler/device_model_inference.py | 18 ++++++------
 2 files changed, 24 insertions(+), 23 deletions(-)

diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_cache.py b/python/fedml/computing/scheduler/model_scheduler/device_model_cache.py
index 75cf4dbc2a..242501f2fa 100755
--- a/python/fedml/computing/scheduler/model_scheduler/device_model_cache.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_model_cache.py
@@ -974,20 +974,21 @@ def delete_endpoint_scaling_down_decision_time(self, end_point_id) -> bool:
             self.FEDML_MODEL_ENDPOINT_SCALING_DOWN_DECISION_TIME_TAG,
             end_point_id))
 
-    def get_pending_requests_counter(self) -> int:
-        if not self.redis_connection.exists(self.FEDML_PENDING_REQUESTS_COUNTER):
-            self.redis_connection.set(self.FEDML_PENDING_REQUESTS_COUNTER, 0)
-        return int(self.redis_connection.get(self.FEDML_PENDING_REQUESTS_COUNTER))
-
-    def update_pending_requests_counter(self, increase=False, decrease=False) -> int:
-        if not self.redis_connection.exists(self.FEDML_PENDING_REQUESTS_COUNTER):
-            self.redis_connection.set(self.FEDML_PENDING_REQUESTS_COUNTER, 0)
+    def get_pending_requests_counter(self, end_point_id) -> int:
+        # If the endpoint does not exist inside the Hash collection, set its counter to 0.
+        if self.redis_connection.hexists(self.FEDML_PENDING_REQUESTS_COUNTER, end_point_id):
+            return int(self.redis_connection.hget(self.FEDML_PENDING_REQUESTS_COUNTER, end_point_id))
+        return 0
+
+    def update_pending_requests_counter(self, end_point_id, increase=False, decrease=False) -> int:
+        if not self.redis_connection.hexists(self.FEDML_PENDING_REQUESTS_COUNTER, end_point_id):
+            self.redis_connection.hset(self.FEDML_PENDING_REQUESTS_COUNTER, mapping={end_point_id: 0})
         if increase:
-            self.redis_connection.incr(self.FEDML_PENDING_REQUESTS_COUNTER)
+            self.redis_connection.hincrby(self.FEDML_PENDING_REQUESTS_COUNTER, end_point_id, 1)
         if decrease:
+            # Careful on the negative, there is no native function for hash decreases.
+            self.redis_connection.hincrby(self.FEDML_PENDING_REQUESTS_COUNTER, end_point_id, -1)
             # Making sure the counter never becomes negative!
-            if self.get_pending_requests_counter() < 0:
-                self.redis_connection.set(self.FEDML_PENDING_REQUESTS_COUNTER, 0)
-            else:
-                self.redis_connection.decr(self.FEDML_PENDING_REQUESTS_COUNTER)
-        return self.get_pending_requests_counter()
+            if self.get_pending_requests_counter(end_point_id) < 0:
+                self.redis_connection.hset(self.FEDML_PENDING_REQUESTS_COUNTER, mapping={end_point_id: 0})
+        return self.get_pending_requests_counter(end_point_id)
diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py b/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py
index 7bc7d6f097..c6e26ba53c 100755
--- a/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py
@@ -55,10 +55,10 @@ async def auth_middleware(request: Request, call_next):
                 {"error": True, "message": "Invalid JSON."},
                 status_code=status.HTTP_400_BAD_REQUEST)
 
-        # Get total pending requests.
-        pending_requests_num = FEDML_MODEL_CACHE.get_pending_requests_counter()
+        # Get endpoint's total pending requests.
+        end_point_id = request_json.get("end_point_id", None)
+        pending_requests_num = FEDML_MODEL_CACHE.get_pending_requests_counter(end_point_id)
         if pending_requests_num:
-            end_point_id = request_json.get("end_point_id", None)
             # Fetch metrics of the past k=3 requests.
             pask_k_metrics = FEDML_MODEL_CACHE.get_endpoint_metrics(
                 end_point_id=end_point_id,
@@ -173,7 +173,7 @@ async def _predict(
         header=None
 ) -> Union[MutableMapping[str, Any], Response, StreamingResponse]:
     # Always increase the pending requests counter on a new incoming request.
-    FEDML_MODEL_CACHE.update_pending_requests_counter(increase=True)
+    FEDML_MODEL_CACHE.update_pending_requests_counter(end_point_id, increase=True)
     inference_response = {}
 
     try:
@@ -205,14 +205,14 @@ async def _predict(
             if not is_endpoint_activated(in_end_point_id):
                 inference_response = {"error": True, "message": "endpoint is not activated."}
                 logging_inference_request(input_json, inference_response)
-                FEDML_MODEL_CACHE.update_pending_requests_counter(decrease=True)
+                FEDML_MODEL_CACHE.update_pending_requests_counter(end_point_id, decrease=True)
                 return inference_response
 
             # Found idle inference device
             idle_device, end_point_id, model_id, model_name, model_version, inference_host, inference_output_url = \
                 found_idle_inference_device(in_end_point_id, in_end_point_name, in_model_name, in_model_version)
             if idle_device is None or idle_device == "":
-                FEDML_MODEL_CACHE.update_pending_requests_counter(decrease=True)
+                FEDML_MODEL_CACHE.update_pending_requests_counter(end_point_id, decrease=True)
                 return {"error": True, "error_code": status.HTTP_404_NOT_FOUND,
                         "message": "can not found active inference worker for this endpoint."}
 
@@ -252,18 +252,18 @@ async def _predict(
                 pass
 
             logging_inference_request(input_json, inference_response)
-            FEDML_MODEL_CACHE.update_pending_requests_counter(decrease=True)
+            FEDML_MODEL_CACHE.update_pending_requests_counter(end_point_id, decrease=True)
             return inference_response
         else:
             inference_response = {"error": True, "message": "token is not valid."}
             logging_inference_request(input_json, inference_response)
-            FEDML_MODEL_CACHE.update_pending_requests_counter(decrease=True)
+            FEDML_MODEL_CACHE.update_pending_requests_counter(end_point_id, decrease=True)
             return inference_response
 
     except Exception as e:
         logging.error("Inference Exception: {}".format(traceback.format_exc()))
         # Need to reduce the pending requests counter in whatever exception that may be raised.
-        FEDML_MODEL_CACHE.update_pending_requests_counter(decrease=True)
+        FEDML_MODEL_CACHE.update_pending_requests_counter(end_point_id, decrease=True)
 
 
 def retrieve_info_by_endpoint_id(end_point_id, in_end_point_name=None, in_model_name=None,

From c29cf1d6e6be0c231e6f2c3bd5e13e67d7431956 Mon Sep 17 00:00:00 2001
From: Raphael Jin <kimheavy@yahoo.com>
Date: Mon, 10 Jun 2024 19:58:00 +0000
Subject: [PATCH 133/282] [Deploy] Unified timeout key.

---
 .../scheduler/model_scheduler/device_model_cache.py          | 2 +-
 .../scheduler/model_scheduler/device_model_inference.py      | 5 +++--
 .../scheduler/model_scheduler/device_server_constants.py     | 3 +++
 3 files changed, 7 insertions(+), 3 deletions(-)

diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_cache.py b/python/fedml/computing/scheduler/model_scheduler/device_model_cache.py
index 242501f2fa..30e4f460e6 100755
--- a/python/fedml/computing/scheduler/model_scheduler/device_model_cache.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_model_cache.py
@@ -139,7 +139,7 @@ def set_user_setting_replica_num(self, end_point_id,
             "target_queries_per_replica": target_queries_per_replica,
             "aggregation_window_size_seconds": aggregation_window_size_seconds,
             "scale_down_delay_seconds": scale_down_delay_seconds,
-            "request_timeout_sec": timeout_s
+            ServerConstants.INFERENCE_REQUEST_TIMEOUT_KEY: timeout_s
         }
         try:
             self.redis_connection.set(self.get_user_setting_replica_num_key(end_point_id), json.dumps(replica_num_dict))
diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py b/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py
index c6e26ba53c..d073533b72 100755
--- a/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py
@@ -66,7 +66,7 @@ async def auth_middleware(request: Request, call_next):
 
             # Get the request timeout from the endpoint settings.
             request_timeout_s = FEDML_MODEL_CACHE.get_endpoint_settings(end_point_id) \
-                .get("request_timeout_s", ClientConstants.INFERENCE_REQUEST_TIMEOUT)
+                .get(ServerConstants.INFERENCE_REQUEST_TIMEOUT_KEY, ServerConstants.INFERENCE_REQUEST_TIMEOUT_DEFAULT)
 
             # Only proceed if the past k metrics collection is not empty.
             if pask_k_metrics:
@@ -76,7 +76,8 @@ async def auth_middleware(request: Request, call_next):
                 mean_latency = sum(past_k_latencies_sec) / len(past_k_latencies_sec)
 
                 # If timeout threshold is exceeded then cancel and return time out error.
-                if (mean_latency * pending_requests_num) > request_timeout_s:
+                should_block = (mean_latency * pending_requests_num) > request_timeout_s
+                if should_block:
                     return JSONResponse(
                         {"error": True, "message": "Request timed out."},
                         status_code=status.HTTP_504_GATEWAY_TIMEOUT)
diff --git a/python/fedml/computing/scheduler/model_scheduler/device_server_constants.py b/python/fedml/computing/scheduler/model_scheduler/device_server_constants.py
index eb01fbb599..243c197b2f 100644
--- a/python/fedml/computing/scheduler/model_scheduler/device_server_constants.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_server_constants.py
@@ -104,6 +104,9 @@ class ServerConstants(object):
     AUTO_DETECT_PUBLIC_IP = "auto_detect_public_ip"
     MODEL_INFERENCE_DEFAULT_PORT = 2203
     MODEL_CACHE_KEY_EXPIRE_TIME = 1 * 10
+
+    INFERENCE_REQUEST_TIMEOUT_KEY = "request_timeout_sec"
+    INFERENCE_REQUEST_TIMEOUT_DEFAULT = 30
     # -----End-----
 
     MODEL_DEPLOYMENT_STAGE1 = {"index": 1, "text": "ReceivedRequest"}

From c4a87149e3af296310f0a7ca04cd467e0bc9b06f Mon Sep 17 00:00:00 2001
From: Raphael Jin <kimheavy@yahoo.com>
Date: Tue, 11 Jun 2024 00:06:14 +0000
Subject: [PATCH 134/282] [Deploy] Report worker's connectivity when it
 finished.

---
 .../scheduler/comm_utils/network_util.py      | 16 +++++
 .../device_client_constants.py                |  5 ++
 .../model_scheduler/device_model_inference.py | 60 +++++++++++--------
 .../model_scheduler/master_job_runner.py      |  8 ---
 .../model_scheduler/worker_job_runner.py      | 33 +++++++---
 .../scheduler_core/general_constants.py       | 16 ++---
 6 files changed, 87 insertions(+), 51 deletions(-)
 create mode 100644 python/fedml/computing/scheduler/comm_utils/network_util.py

diff --git a/python/fedml/computing/scheduler/comm_utils/network_util.py b/python/fedml/computing/scheduler/comm_utils/network_util.py
new file mode 100644
index 0000000000..13674840c5
--- /dev/null
+++ b/python/fedml/computing/scheduler/comm_utils/network_util.py
@@ -0,0 +1,16 @@
+import os
+from fedml.computing.scheduler.model_scheduler.device_client_constants import ClientConstants
+
+
+def return_this_device_connectivity_type() -> str:
+    """
+    Return -> "http" | "http_proxy" |"mqtt"
+    """
+    if os.environ.get(ClientConstants.ENV_CONNECTION_TYPE_KEY) == ClientConstants.WORKER_CONNECTIVITY_TYPE_HTTP:
+        return ClientConstants.WORKER_CONNECTIVITY_TYPE_HTTP
+    elif os.environ.get(ClientConstants.ENV_CONNECTION_TYPE_KEY) == ClientConstants.WORKER_CONNECTIVITY_TYPE_HTTP_PROXY:
+        return ClientConstants.WORKER_CONNECTIVITY_TYPE_HTTP_PROXY
+    elif os.environ.get(ClientConstants.ENV_CONNECTION_TYPE_KEY) == ClientConstants.WORKER_CONNECTIVITY_TYPE_MQTT:
+        return ClientConstants.WORKER_CONNECTIVITY_TYPE_MQTT
+    else:
+        return ClientConstants.WORKER_CONNECTIVITY_TYPE_HTTP
diff --git a/python/fedml/computing/scheduler/model_scheduler/device_client_constants.py b/python/fedml/computing/scheduler/model_scheduler/device_client_constants.py
index 7894f2c73e..d66c2f966a 100644
--- a/python/fedml/computing/scheduler/model_scheduler/device_client_constants.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_client_constants.py
@@ -97,6 +97,11 @@ class ClientConstants(object):
     INFERENCE_INFERENCE_SERVER_VERSION = "v2"
     INFERENCE_REQUEST_TIMEOUT = 30
 
+    ENV_CONNECTION_TYPE_KEY = "FEDML_CONNECTION_TYPE"
+    WORKER_CONNECTIVITY_TYPE_HTTP = "http"
+    WORKER_CONNECTIVITY_TYPE_HTTP_PROXY = "http_proxy"
+    WORKER_CONNECTIVITY_TYPE_MQTT = "mqtt"
+
     MSG_MODELOPS_DEPLOYMENT_STATUS_INITIALIZING = "INITIALIZING"
     MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYING = "DEPLOYING"
     MSG_MODELOPS_DEPLOYMENT_STATUS_INFERRING = "INFERRING"
diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py b/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py
index d073533b72..a9205ceb9a 100755
--- a/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py
@@ -210,7 +210,8 @@ async def _predict(
                 return inference_response
 
             # Found idle inference device
-            idle_device, end_point_id, model_id, model_name, model_version, inference_host, inference_output_url = \
+            idle_device, end_point_id, model_id, model_name, model_version, inference_host, inference_output_url,\
+                connectivity_type = \
                 found_idle_inference_device(in_end_point_id, in_end_point_name, in_model_name, in_model_version)
             if idle_device is None or idle_device == "":
                 FEDML_MODEL_CACHE.update_pending_requests_counter(end_point_id, decrease=True)
@@ -235,13 +236,16 @@ async def _predict(
                 stream_flag = input_json.get("stream", False)
                 input_list["stream"] = input_list.get("stream", stream_flag)
                 output_list = input_json.get("outputs", [])
+
+                # main execution of redirecting the inference request to the idle device
                 inference_response = await send_inference_request(
                     idle_device,
                     end_point_id,
                     inference_output_url,
                     input_list,
                     output_list,
-                    inference_type=in_return_type)
+                    inference_type=in_return_type,
+                    connectivity_type=connectivity_type)
 
             # Calculate model metrics
             try:
@@ -304,11 +308,12 @@ def found_idle_inference_device(end_point_id, end_point_name, in_model_name, in_
     inference_host = ""
     inference_output_url = ""
     model_version = ""
+    connectivity_type = ""
+
     # Found idle device (TODO: optimize the algorithm to search best device for inference)
     payload, idle_device = FEDML_MODEL_CACHE. \
         get_idle_device(end_point_id, end_point_name, in_model_name, in_model_version)
     if payload is not None:
-        logging.info("found idle deployment result {}".format(payload))
         deployment_result = payload
         model_name = deployment_result["model_name"]
         model_version = deployment_result["model_version"]
@@ -317,24 +322,25 @@ def found_idle_inference_device(end_point_id, end_point_name, in_model_name, in_
         inference_output_url = deployment_result["model_url"]
         url_parsed = urlparse(inference_output_url)
         inference_host = url_parsed.hostname
+        connectivity_type = deployment_result.get("connectivity_type", ClientConstants.WORKER_CONNECTIVITY_TYPE_HTTP)
     else:
         logging.info("not found idle deployment result")
 
-    return idle_device, end_point_id, model_id, model_name, model_version, inference_host, inference_output_url
+    res = (idle_device, end_point_id, model_id, model_name, model_version, inference_host, inference_output_url,
+           connectivity_type)
+    logging.info(f"found idle device with metrics: {res}")
+
+    return res
 
 
 async def send_inference_request(idle_device, end_point_id, inference_url, input_list, output_list,
-                                 inference_type="default", has_public_ip=True):
+                                 inference_type="default",
+                                 connectivity_type=ClientConstants.WORKER_CONNECTIVITY_TYPE_HTTP):
     request_timeout_sec = FEDML_MODEL_CACHE.get_endpoint_settings(end_point_id) \
         .get("request_timeout_sec", ClientConstants.INFERENCE_REQUEST_TIMEOUT)
 
     try:
-        http_infer_available = os.getenv("FEDML_INFERENCE_HTTP_AVAILABLE", True)
-        if not http_infer_available:
-            if http_infer_available == "False" or http_infer_available == "false":
-                http_infer_available = False
-
-        if http_infer_available:
+        if connectivity_type == ClientConstants.WORKER_CONNECTIVITY_TYPE_HTTP:
             response_ok = await FedMLHttpInference.is_inference_ready(
                 inference_url,
                 timeout=request_timeout_sec)
@@ -347,22 +353,23 @@ async def send_inference_request(idle_device, end_point_id, inference_url, input
                     timeout=request_timeout_sec)
                 logging.info(f"Use http inference. return {response_ok}")
                 return inference_response
-
-        response_ok = await FedMLHttpProxyInference.is_inference_ready(
-            inference_url,
-            timeout=request_timeout_sec)
-        if response_ok:
-            response_ok, inference_response = await FedMLHttpProxyInference.run_http_proxy_inference_with_request(
-                end_point_id,
+        elif connectivity_type == ClientConstants.WORKER_CONNECTIVITY_TYPE_HTTP_PROXY:
+            logging.warning("Use http proxy inference.")
+            response_ok = await FedMLHttpProxyInference.is_inference_ready(
                 inference_url,
-                input_list,
-                output_list,
-                inference_type=inference_type,
                 timeout=request_timeout_sec)
-            logging.info(f"Use http proxy inference. return {response_ok}")
-            return inference_response
-
-        if not has_public_ip:
+            if response_ok:
+                response_ok, inference_response = await FedMLHttpProxyInference.run_http_proxy_inference_with_request(
+                    end_point_id,
+                    inference_url,
+                    input_list,
+                    output_list,
+                    inference_type=inference_type,
+                    timeout=request_timeout_sec)
+                logging.info(f"Use http proxy inference. return {response_ok}")
+                return inference_response
+        elif connectivity_type == ClientConstants.WORKER_CONNECTIVITY_TYPE_MQTT:
+            logging.warning("Use mqtt inference.")
             agent_config = {"mqtt_config": Settings.mqtt_config}
             mqtt_inference = FedMLMqttInference(
                 agent_config=agent_config,
@@ -385,7 +392,8 @@ async def send_inference_request(idle_device, end_point_id, inference_url, input
 
             logging.info(f"Use mqtt inference. return {response_ok}.")
             return inference_response
-        return {"error": True, "message": "Failed to use http, http-proxy for inference, no response from replica."}
+        else:
+            return {"error": True, "message": "Failed to use http, http-proxy for inference, no response from replica."}
     except Exception as e:
         inference_response = {"error": True,
                               "message": f"Exception when using http, http-proxy and mqtt "
diff --git a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
index a10bd2c559..b9b9b4c356 100755
--- a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
+++ b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
@@ -250,14 +250,6 @@ def process_deployment_result_message(self, topic=None, payload=None):
         logging.info(f"Endpoint {end_point_id}; Device {device_id}; replica {replica_no}; "
                      f"run_operation {run_operation} model status {model_status}.")
 
-        # OPTIONAL DEBUG PARAMS
-        # this_run_controller = self.model_runner_mapping[run_id_str].replica_controller
-        # logging.info(f"The current replica controller state is "
-        #              f"Total version diff num {this_run_controller.total_replica_version_diff_num}")
-        # logging.info(f"self.request_json now {self.request_json}")    # request_json will be deprecated
-        # this_run_request_json = self.request_json
-        # logging.info(f"self.request_json now {this_run_request_json}")
-
         # Set redis + sqlite deployment result
         FedMLModelCache.get_instance().set_redis_params(self.redis_addr, self.redis_port, self.redis_password)
 
diff --git a/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py b/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py
index 3c357e9dab..9e178228b2 100755
--- a/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py
+++ b/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py
@@ -9,6 +9,8 @@
 from abc import ABC
 import yaml
 from fedml.computing.scheduler.comm_utils.job_utils import JobRunnerUtils
+from fedml.computing.scheduler.comm_utils.network_util import return_this_device_connectivity_type
+
 from fedml.core.mlops import MLOpsRuntimeLog
 from fedml.computing.scheduler.comm_utils import file_utils
 from .device_client_constants import ClientConstants
@@ -234,8 +236,11 @@ def run_impl(self, run_extend_queue_list, sender_message_center,
         running_model_name, inference_output_url, inference_model_version, model_metadata, model_config = \
             "", "", model_version, {}, {}
 
+        # ip and connectivity
+        worker_ip = GeneralConstants.get_ip_address(self.request_json)
+        connectivity = return_this_device_connectivity_type()
+
         if op == "add":
-            worker_ip = GeneralConstants.get_ip_address(self.request_json)
             for rank in range(prev_rank + 1, prev_rank + 1 + op_num):
                 try:
                     running_model_name, inference_output_url, inference_model_version, model_metadata, model_config = \
@@ -269,7 +274,9 @@ def run_impl(self, run_extend_queue_list, sender_message_center,
                     result_payload = self.send_deployment_results(
                         end_point_name, self.edge_id, ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED,
                         model_id, model_name, inference_output_url, model_version, inference_port_external,
-                        inference_engine, model_metadata, model_config, replica_no=rank + 1)
+                        inference_engine, model_metadata, model_config, replica_no=rank + 1,
+                        connectivity=connectivity
+                    )
 
                     if inference_port_external != inference_port:
                         # Save internal port to local db
@@ -278,7 +285,9 @@ def run_impl(self, run_extend_queue_list, sender_message_center,
                         result_payload = self.construct_deployment_results(
                             end_point_name, self.edge_id, ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED,
                             model_id, model_name, inference_output_url, model_version, inference_port,
-                            inference_engine, model_metadata, model_config, replica_no=rank + 1)
+                            inference_engine, model_metadata, model_config, replica_no=rank + 1,
+                            connectivity=connectivity
+                        )
 
                     FedMLModelDatabase.get_instance().set_deployment_result(
                         run_id, end_point_name, model_name, model_version, self.edge_id,
@@ -326,7 +335,6 @@ def run_impl(self, run_extend_queue_list, sender_message_center,
             return True
         elif op == "update" or op == "rollback":
             # Update is combine of delete and add
-            worker_ip = GeneralConstants.get_ip_address(self.request_json)
             for rank in replica_rank_to_update:
                 # Delete a replica (container) if exists
                 self.replica_handler.remove_replica(rank)
@@ -402,7 +410,9 @@ def run_impl(self, run_extend_queue_list, sender_message_center,
                     result_payload = self.send_deployment_results(
                         end_point_name, self.edge_id, ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED,
                         model_id, model_name, inference_output_url, model_version, inference_port_external,
-                        inference_engine, model_metadata, model_config, replica_no=rank + 1)
+                        inference_engine, model_metadata, model_config, replica_no=rank + 1,
+                        connectivity=connectivity
+                    )
 
                     if inference_port_external != inference_port:  # Save internal port to local db
                         logging.info("inference_port_external {} != inference_port {}".format(
@@ -410,7 +420,9 @@ def run_impl(self, run_extend_queue_list, sender_message_center,
                         result_payload = self.construct_deployment_results(
                             end_point_name, self.edge_id, ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED,
                             model_id, model_name, inference_output_url, model_version, inference_port,
-                            inference_engine, model_metadata, model_config, replica_no=rank + 1)
+                            inference_engine, model_metadata, model_config, replica_no=rank + 1,
+                            connectivity=connectivity
+                        )
 
                     FedMLModelDatabase.get_instance().set_deployment_result(
                         run_id, end_point_name, model_name, model_version, self.edge_id,
@@ -433,7 +445,8 @@ def run_impl(self, run_extend_queue_list, sender_message_center,
     def construct_deployment_results(self, end_point_name, device_id, model_status,
                                      model_id, model_name, model_inference_url,
                                      model_version, inference_port, inference_engine,
-                                     model_metadata, model_config, replica_no=1):
+                                     model_metadata, model_config, replica_no=1,
+                                     connectivity=ClientConstants.WORKER_CONNECTIVITY_TYPE_HTTP):
         deployment_results_payload = {"end_point_id": self.run_id, "end_point_name": end_point_name,
                                       "model_id": model_id, "model_name": model_name,
                                       "model_url": model_inference_url, "model_version": model_version,
@@ -444,6 +457,7 @@ def construct_deployment_results(self, end_point_name, device_id, model_status,
                                       "model_status": model_status,
                                       "inference_port": inference_port,
                                       "replica_no": replica_no,
+                                      "connectivity_type": connectivity,
                                       }
         return deployment_results_payload
 
@@ -466,7 +480,8 @@ def construct_deployment_status(self, end_point_name, device_id,
     def send_deployment_results(self, end_point_name, device_id, model_status,
                                 model_id, model_name, model_inference_url,
                                 model_version, inference_port, inference_engine,
-                                model_metadata, model_config, replica_no=1):
+                                model_metadata, model_config, replica_no=1,
+                                connectivity=ClientConstants.WORKER_CONNECTIVITY_TYPE_HTTP):
         deployment_results_topic = "model_device/model_device/return_deployment_result/{}/{}".format(
             self.run_id, device_id)
 
@@ -474,7 +489,7 @@ def send_deployment_results(self, end_point_name, device_id, model_status,
             end_point_name, device_id, model_status,
             model_id, model_name, model_inference_url,
             model_version, inference_port, inference_engine,
-            model_metadata, model_config, replica_no=replica_no)
+            model_metadata, model_config, replica_no=replica_no, connectivity=connectivity)
 
         logging.info("[client] send_deployment_results: topic {}, payload {}.".format(deployment_results_topic,
                                                                                       deployment_results_payload))
diff --git a/python/fedml/computing/scheduler/scheduler_core/general_constants.py b/python/fedml/computing/scheduler/scheduler_core/general_constants.py
index 68c1a8e09d..8c60b17bdf 100755
--- a/python/fedml/computing/scheduler/scheduler_core/general_constants.py
+++ b/python/fedml/computing/scheduler/scheduler_core/general_constants.py
@@ -192,14 +192,14 @@ def get_public_ip():
     @staticmethod
     def get_ip_address(request_json, infer_host=None):
         # OPTION 1: Use local ip
-        ip = GeneralConstants.get_local_ip()
-
-        # OPTION 2: Auto detect public ip
-        if "parameters" in request_json and \
-                GeneralConstants.CONFIG_KEY_AUTO_DETECT_PUBLIC_IP in request_json["parameters"] and \
-                request_json["parameters"][GeneralConstants.CONFIG_KEY_AUTO_DETECT_PUBLIC_IP]:
-            ip = GeneralConstants.get_public_ip()
-            logging.info("Auto detect public ip for master: " + ip)
+        # ip = GeneralConstants.get_local_ip()
+        #
+        # # OPTION 2: Auto detect public ip
+        # if "parameters" in request_json and \
+        #         GeneralConstants.CONFIG_KEY_AUTO_DETECT_PUBLIC_IP in request_json["parameters"] and \
+        #         request_json["parameters"][GeneralConstants.CONFIG_KEY_AUTO_DETECT_PUBLIC_IP]:
+        ip = GeneralConstants.get_public_ip()
+        logging.info("Auto detect public ip for master: " + ip)
 
         # OPTION 3: Use user indicated ip
         if infer_host is not None and infer_host != "127.0.0.1" and infer_host != "localhost":

From ea03b600c9709ba951fdd47e757f07c522891b16 Mon Sep 17 00:00:00 2001
From: Raphael Jin <kimheavy@yahoo.com>
Date: Mon, 10 Jun 2024 17:21:10 -0700
Subject: [PATCH 135/282] [Deploy] Refactor the quick start example, use public
 ip as default.

---
 .../examples/deploy/quick_start/config.yaml   |  21 +-
 .../examples/deploy/quick_start/main_entry.py |  27 ++
 .../deploy/quick_start/src/__init__.py        |   0
 .../deploy/quick_start/src/app/__init__.py    |   0
 .../quick_start/src/app/pipe/__init__.py      |   0
 .../quick_start/src/app/pipe/constants.py     |  68 -----
 .../src/app/pipe/instruct_pipeline.py         | 261 ------------------
 .../quick_start/src/config/bootstrap.sh       |  14 -
 .../deploy/quick_start/src/main_entry.py      |  67 -----
 python/fedml/api/modules/model.py             |   3 +
 .../device_client_runner_deprecated.py}       |   0
 .../model_scheduler/sample_model/README.md    |  57 ----
 .../sample_model/fedml_model.bin              | Bin 1476451 -> 0 bytes
 .../sample_model/fedml_model_config.yaml      |  20 --
 .../scheduler_core/general_constants.py       |  16 +-
 15 files changed, 42 insertions(+), 512 deletions(-)
 create mode 100644 python/examples/deploy/quick_start/main_entry.py
 delete mode 100644 python/examples/deploy/quick_start/src/__init__.py
 delete mode 100644 python/examples/deploy/quick_start/src/app/__init__.py
 delete mode 100644 python/examples/deploy/quick_start/src/app/pipe/__init__.py
 delete mode 100644 python/examples/deploy/quick_start/src/app/pipe/constants.py
 delete mode 100644 python/examples/deploy/quick_start/src/app/pipe/instruct_pipeline.py
 delete mode 100644 python/examples/deploy/quick_start/src/config/bootstrap.sh
 delete mode 100644 python/examples/deploy/quick_start/src/main_entry.py
 rename python/{examples/deploy/quick_start/__init__.py => fedml/computing/scheduler/model_scheduler/device_client_runner_deprecated.py} (100%)
 mode change 100644 => 100755
 delete mode 100644 python/fedml/computing/scheduler/model_scheduler/sample_model/README.md
 delete mode 100644 python/fedml/computing/scheduler/model_scheduler/sample_model/fedml_model.bin
 delete mode 100644 python/fedml/computing/scheduler/model_scheduler/sample_model/fedml_model_config.yaml

diff --git a/python/examples/deploy/quick_start/config.yaml b/python/examples/deploy/quick_start/config.yaml
index 83479068e6..880ea92d2d 100644
--- a/python/examples/deploy/quick_start/config.yaml
+++ b/python/examples/deploy/quick_start/config.yaml
@@ -1,21 +1,8 @@
-workspace: "./src"
+workspace: "."
 entry_point: "main_entry.py"
+
 # If you want to install some packages
 # Please write the command in the bootstrap.sh
 bootstrap: |
-  echo "Bootstrap start..."
-  sh ./config/bootstrap.sh
-  echo "Bootstrap finished"
-
-# If you do not have any GPU resource but want to serve the model
-# Try FedML® Nexus AI Platform, and Uncomment the following lines.
-# ------------------------------------------------------------
-computing:
-  minimum_num_gpus: 1           # minimum # of GPUs to provision
-  maximum_cost_per_hour: $3000   # max cost per hour for your job per gpu card
-  #allow_cross_cloud_resources: true # true, false
-  #device_type: CPU              # options: GPU, CPU, hybrid
-  resource_type: A100-80G       # e.g., A100-80G,
-  # please check the resource type list by "fedml show-resource-type"
-  # or visiting URL: https://open.fedml.ai/accelerator_resource_type
-# ------------------------------------------------------------
+  echo "Install some packages..."
+  echo "Install finished!"
diff --git a/python/examples/deploy/quick_start/main_entry.py b/python/examples/deploy/quick_start/main_entry.py
new file mode 100644
index 0000000000..7c4fb910b0
--- /dev/null
+++ b/python/examples/deploy/quick_start/main_entry.py
@@ -0,0 +1,27 @@
+from fedml.serving import FedMLPredictor
+from fedml.serving import FedMLInferenceRunner
+
+
+class Bot(FedMLPredictor):  # Inherit FedMLClientPredictor
+    def __init__(self):
+        super().__init__()
+
+        # --- Your model initialization code here ---
+
+        # -------------------------------------------
+    
+    def predict(self, request: dict):
+        input_dict = request
+        question: str = input_dict.get("text", "").strip()
+
+        # --- Your model inference code here ---
+        response = "I do not know the answer to your question."
+        # ---------------------------------------
+
+        return {"generated_text": f"The answer to your question {question} is: {response}"}
+
+
+if __name__ == "__main__":
+    chatbot = Bot()
+    fedml_inference_runner = FedMLInferenceRunner(chatbot)
+    fedml_inference_runner.run()
diff --git a/python/examples/deploy/quick_start/src/__init__.py b/python/examples/deploy/quick_start/src/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/python/examples/deploy/quick_start/src/app/__init__.py b/python/examples/deploy/quick_start/src/app/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/python/examples/deploy/quick_start/src/app/pipe/__init__.py b/python/examples/deploy/quick_start/src/app/pipe/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/python/examples/deploy/quick_start/src/app/pipe/constants.py b/python/examples/deploy/quick_start/src/app/pipe/constants.py
deleted file mode 100644
index 811418bf5e..0000000000
--- a/python/examples/deploy/quick_start/src/app/pipe/constants.py
+++ /dev/null
@@ -1,68 +0,0 @@
-"""
-Adapted from https://github.com/databrickslabs/dolly/blob/master/training/consts.py
-"""
-
-# -----------------------------------------------------------------
-DEFAULT_MAX_SEQ_LENGTH = 1024
-IGNORE_INDEX = -100
-
-# -----------------------------------------------------------------
-MODEL_NAMES = [
-    "EleutherAI/pythia-70m",
-    "EleutherAI/pythia-160m",
-    "EleutherAI/pythia-2.8b",
-    "EleutherAI/pythia-6.9b",
-    "EleutherAI/pythia-12b",
-    "EleutherAI/gpt-j-6B",
-    "databricks/dolly-v2-3b",
-    "databricks/dolly-v2-7b",
-    "databricks/dolly-v2-12b",
-]
-
-# -----------------------------------------------------------------
-INTRO_BLURB = (
-    "Below is an instruction that describes a task. Write a response that appropriately completes the request."
-)
-
-INSTRUCTION_KEY = "### Instruction:"
-INPUT_KEY = "Input:"
-RESPONSE_KEY = "### Response:"
-END_KEY = "### End"
-RESPONSE_KEY_NL = f"{RESPONSE_KEY}\n"
-
-# This is a training prompt that does not contain an input string. The instruction by itself has enough information
-# to respond.For example, the instruction might ask for the year a historic figure was born.
-PROMPT_NO_INPUT_FORMAT = f"""{INTRO_BLURB}
-
-{INSTRUCTION_KEY}
-{{instruction}}
-
-{RESPONSE_KEY}
-{{response}}
-
-{END_KEY}"""
-
-# This is a training prompt that contains an input string that serves as context for the instruction. For example,
-# the input might be a passage from Wikipedia and the instruction is to extract some information from it.
-PROMPT_WITH_INPUT_FORMAT = f"""{INTRO_BLURB}
-
-{INSTRUCTION_KEY}
-{{instruction}}
-
-{INPUT_KEY}
-{{input}}
-
-{RESPONSE_KEY}
-{{response}}
-
-{END_KEY}"""
-
-# This is the prompt that is used for generating responses using an already trained model. It ends with the response
-# key, where the job of the model is to provide the completion that follows it (i.e. the response itself).
-PROMPT_FOR_GENERATION_FORMAT = f"""{INTRO_BLURB}
-
-{INSTRUCTION_KEY}
-{{instruction}}
-
-{RESPONSE_KEY}
-"""
\ No newline at end of file
diff --git a/python/examples/deploy/quick_start/src/app/pipe/instruct_pipeline.py b/python/examples/deploy/quick_start/src/app/pipe/instruct_pipeline.py
deleted file mode 100644
index edcc1a643b..0000000000
--- a/python/examples/deploy/quick_start/src/app/pipe/instruct_pipeline.py
+++ /dev/null
@@ -1,261 +0,0 @@
-"""
-Adapted from https://github.com/databrickslabs/dolly/blob/master/training/generate.py
-"""
-from typing import List, Optional, Tuple
-
-import logging
-import re
-
-import torch
-from transformers import (
-    AutoModelForCausalLM,
-    AutoTokenizer,
-    Pipeline,
-    PreTrainedModel,
-    PreTrainedTokenizer,
-)
-from transformers.utils import is_tf_available
-
-if is_tf_available():
-    import tensorflow as tf
-
-from .constants import END_KEY, PROMPT_FOR_GENERATION_FORMAT, RESPONSE_KEY
-
-logger = logging.getLogger(__name__)
-
-
-def load_model_tokenizer_for_generate(
-        pretrained_model_name_or_path: str,
-) -> Tuple[PreTrainedModel, PreTrainedTokenizer]:
-    """Loads the model and tokenizer so that it can be used for generating responses.
-
-    Args:
-        pretrained_model_name_or_path (str): name or path for model
-
-    Returns:
-        Tuple[PreTrainedModel, PreTrainedTokenizer]: model and tokenizer
-    """
-    tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name_or_path, padding_side="left")
-    model = AutoModelForCausalLM.from_pretrained(
-        pretrained_model_name_or_path, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True
-    )
-    return model, tokenizer
-
-
-def get_special_token_id(tokenizer: PreTrainedTokenizer, key: str) -> int:
-    """Gets the token ID for a given string that has been added to the tokenizer as a special token.
-
-    When training, we configure the tokenizer so that the sequences like "### Instruction:" and "### End" are
-    treated specially and converted to a single, new token.  This retrieves the token ID each of these keys map to.
-
-    Args:
-        tokenizer (PreTrainedTokenizer): the tokenizer
-        key (str): the key to convert to a single token
-
-    Raises:
-        ValueError: if more than one ID was generated
-
-    Returns:
-        int: the token ID for the given key
-    """
-    token_ids = tokenizer.encode(key)
-    if len(token_ids) > 1:
-        raise ValueError(f"Expected only a single token for '{key}' but found {token_ids}")
-    return token_ids[0]
-
-
-class InstructionTextGenerationPipeline(Pipeline):
-    def __init__(
-            self,
-            *args,
-            do_sample: bool = True,
-            max_new_tokens: int = 256,
-            top_p: float = 0.92,
-            top_k: int = 0,
-            **kwargs
-    ):
-        """Initialize the pipeline
-
-        Args:
-            do_sample (bool, optional): Whether to use sampling. Defaults to True.
-            max_new_tokens (int, optional): Max new tokens after the prompt to generate. Defaults to 128.
-            top_p (float, optional): If set to float < 1, only the smallest set of most probable tokens with
-                probabilities that add up to top_p or higher are kept for generation. Defaults to 0.92.
-            top_k (int, optional): The number of highest probability vocabulary tokens to keep for top-k-filtering.
-                Defaults to 0.
-        """
-        super().__init__(
-            *args,
-            do_sample=do_sample,
-            max_new_tokens=max_new_tokens,
-            top_p=top_p,
-            top_k=top_k,
-            **kwargs
-        )
-
-    def _sanitize_parameters(
-            self,
-            return_full_text: bool = None,
-            **generate_kwargs
-    ):
-        preprocess_params = {}
-
-        # newer versions of the tokenizer configure the response key as a special token.  newer versions still may
-        # append a newline to yield a single token.  find whatever token is configured for the response key.
-        tokenizer_response_key = next(
-            (token for token in self.tokenizer.additional_special_tokens if token.startswith(RESPONSE_KEY)), None
-        )
-
-        response_key_token_id = None
-        end_key_token_id = None
-        if tokenizer_response_key:
-            try:
-                response_key_token_id = get_special_token_id(self.tokenizer, tokenizer_response_key)
-                end_key_token_id = get_special_token_id(self.tokenizer, END_KEY)
-
-                # Ensure generation stops once it generates "### End"
-                generate_kwargs["eos_token_id"] = end_key_token_id
-            except ValueError:
-                pass
-
-        forward_params = generate_kwargs
-        postprocess_params = {
-            "response_key_token_id": response_key_token_id,
-            "end_key_token_id": end_key_token_id
-        }
-
-        if return_full_text is not None:
-            postprocess_params["return_full_text"] = return_full_text
-
-        return preprocess_params, forward_params, postprocess_params
-
-    def preprocess(self, instruction_text, **generate_kwargs):
-        prompt_text = PROMPT_FOR_GENERATION_FORMAT.format(instruction=instruction_text)
-        inputs = self.tokenizer(
-            prompt_text,
-            return_tensors="pt",
-        )
-        inputs["prompt_text"] = prompt_text
-        inputs["instruction_text"] = instruction_text
-        return inputs
-
-    def _forward(self, model_inputs, **generate_kwargs):
-        input_ids = model_inputs["input_ids"]
-        attention_mask = model_inputs.get("attention_mask", None)
-
-        if input_ids.shape[1] == 0:
-            input_ids = None
-            attention_mask = None
-            in_b = 1
-        else:
-            in_b = input_ids.shape[0]
-
-        generated_sequence = self.model.generate(
-            input_ids=input_ids,
-            attention_mask=attention_mask,
-            pad_token_id=self.tokenizer.pad_token_id,
-            **generate_kwargs,
-        )
-
-        out_b = generated_sequence.shape[0]
-        if self.framework == "pt":
-            generated_sequence = generated_sequence.reshape(in_b, out_b // in_b, *generated_sequence.shape[1:])
-        elif self.framework == "tf":
-            generated_sequence = tf.reshape(generated_sequence, (in_b, out_b // in_b, *generated_sequence.shape[1:]))
-
-        instruction_text = model_inputs.pop("instruction_text")
-        return {"generated_sequence": generated_sequence, "input_ids": input_ids, "instruction_text": instruction_text}
-
-    def postprocess(
-            self,
-            model_outputs,
-            response_key_token_id: Optional[int] = None,
-            end_key_token_id: Optional[int] = None,
-            return_full_text: bool = False
-    ):
-        generated_sequence: torch.Tensor = model_outputs["generated_sequence"][0]
-        instruction_text = model_outputs["instruction_text"]
-
-        generated_sequence: List[List[int]] = generated_sequence.tolist()
-        records = []
-        for sequence in generated_sequence:
-
-            # The response will be set to this variable if we can identify it.
-            decoded = None
-
-            # If we have token IDs for the response and end, then we can find the tokens and only decode between them.
-            if response_key_token_id and end_key_token_id:
-                # Find where "### Response:" is first found in the generated tokens.  Considering this is part of the
-                # prompt, we should definitely find it.  We will return the tokens found after this token.
-                try:
-                    response_pos = sequence.index(response_key_token_id)
-                except ValueError:
-                    logger.warning(f"Could not find response key {response_key_token_id} in: {sequence}")
-                    response_pos = None
-
-                if response_pos:
-                    # Next find where "### End" is located.  The model has been trained to end its responses with this
-                    # sequence (or actually, the token ID it maps to, since it is a special token).  We may not find
-                    # this token, as the response could be truncated.  If we don't find it then just return everything
-                    # to the end. Note that even though we set eos_token_id, we still see this token at the end.
-                    try:
-                        end_pos = sequence.index(end_key_token_id)
-                    except ValueError:
-                        end_pos = None
-
-                    decoded = self.tokenizer.decode(sequence[response_pos + 1: end_pos]).strip()
-
-            if not decoded:
-                # Otherwise we'll decode everything and use a regex to find the response and end.
-
-                fully_decoded = self.tokenizer.decode(sequence)
-
-                # The response appears after "### Response:".  The model has been trained to append "### End" at the
-                # end.
-                m = re.search(r"#+\s*Response:\s*(.+?)#+\s*End", fully_decoded, flags=re.DOTALL)
-
-                if m:
-                    decoded = m.group(1).strip()
-                else:
-                    # The model might not generate the "### End" sequence before reaching the max tokens. In this case,
-                    # return everything after "### Response:".
-                    m = re.search(r"#+\s*Response:\s*(.+)", fully_decoded, flags=re.DOTALL)
-                    if m:
-                        decoded = m.group(1).strip()
-                    else:
-                        logger.warning(f"Failed to find response in:\n{fully_decoded}")
-
-            # If the full text is requested, then append the decoded text to the original instruction.
-            # This technically isn't the full text, as we format the instruction in the prompt the model has been
-            # trained on, but to the client it will appear to be the full text.
-            if return_full_text:
-                decoded = f"{instruction_text}\n{decoded}"
-
-            rec = {"generated_text": decoded}
-
-            records.append(rec)
-
-        return records
-
-
-def generate_response(
-        instruction: str,
-        *,
-        model: PreTrainedModel,
-        tokenizer: PreTrainedTokenizer,
-        **kwargs,
-) -> str:
-    """Given an instruction, uses the model and tokenizer to generate a response.  This formats the instruction in
-    the instruction format that the model was fine-tuned on.
-
-    Args:
-        instruction (str): _description_
-        model (PreTrainedModel): the model to use
-        tokenizer (PreTrainedTokenizer): the tokenizer to use
-
-    Returns:
-        str: response
-    """
-
-    generation_pipeline = InstructionTextGenerationPipeline(model=model, tokenizer=tokenizer, **kwargs)
-    return generation_pipeline(instruction)[0]["generated_text"]
\ No newline at end of file
diff --git a/python/examples/deploy/quick_start/src/config/bootstrap.sh b/python/examples/deploy/quick_start/src/config/bootstrap.sh
deleted file mode 100644
index 950b749792..0000000000
--- a/python/examples/deploy/quick_start/src/config/bootstrap.sh
+++ /dev/null
@@ -1,14 +0,0 @@
-### don't modify this part ###
-set -x
-##############################
-
-
-### please customize your script in this region ####
-pip install langchain
-pip install transformers
-pip install accelerate
-pip install "pydantic>=1.8.0,<2.0.0"
-
-### don't modify this part ###
-exit 0
-##############################
\ No newline at end of file
diff --git a/python/examples/deploy/quick_start/src/main_entry.py b/python/examples/deploy/quick_start/src/main_entry.py
deleted file mode 100644
index 82ff90155e..0000000000
--- a/python/examples/deploy/quick_start/src/main_entry.py
+++ /dev/null
@@ -1,67 +0,0 @@
-import os
-from fedml.serving import FedMLPredictor
-from fedml.serving import FedMLInferenceRunner
-from langchain import PromptTemplate, LLMChain
-from langchain.llms import HuggingFacePipeline
-import torch
-from transformers import (
-    AutoConfig,
-    AutoModelForCausalLM,
-    AutoTokenizer,
-    TextGenerationPipeline,
-)
-
-class Chatbot(FedMLPredictor):                # Inherit FedMLClientPredictor
-    def __init__(self):
-        super().__init__()
-        PROMPT_FOR_GENERATION_FORMAT = f""""Below is an instruction that describes a task. Write a response that appropriately completes the request."
-
-        ### Instruction:
-        {{instruction}}
-
-        ### Response:
-        """
-
-        prompt = PromptTemplate(
-            input_variables=["instruction"],
-            template=PROMPT_FOR_GENERATION_FORMAT
-        )
-
-        config = AutoConfig.from_pretrained("EleutherAI/pythia-70m")
-        model = AutoModelForCausalLM.from_pretrained(
-            "EleutherAI/pythia-70m",
-            torch_dtype=torch.float32,      # float 16 not supported on CPU
-            trust_remote_code=True,
-            device_map="auto"
-        )
-        tokenizer = AutoTokenizer.from_pretrained("EleutherAI/pythia-70m", device_map="auto")
-
-        hf_pipeline = HuggingFacePipeline(
-            pipeline=TextGenerationPipeline(
-                model=model,
-                tokenizer=tokenizer,
-                return_full_text=True,
-                task="text-generation",
-                do_sample=True,
-                max_new_tokens=256,
-                top_p=0.92,
-                top_k=0
-            )
-        )
-        self.chatbot = LLMChain(llm=hf_pipeline, prompt=prompt, verbose=True)
-    
-    def predict(self, request:dict):
-        input_dict = request
-        question: str = input_dict.get("text", "").strip()
-
-        if len(question) == 0:
-            response_text = "<received empty input; no response generated.>"
-        else:
-            response_text = self.chatbot.predict(instruction=question)
-
-        return {"generated_text": str(response_text)}
-
-if __name__ == "__main__":
-    chatbot = Chatbot()
-    fedml_inference_runner = FedMLInferenceRunner(chatbot)
-    fedml_inference_runner.run()
\ No newline at end of file
diff --git a/python/fedml/api/modules/model.py b/python/fedml/api/modules/model.py
index ca5d0b95c1..a02e674f47 100644
--- a/python/fedml/api/modules/model.py
+++ b/python/fedml/api/modules/model.py
@@ -21,6 +21,9 @@ def create(name: str, model: str = None, model_config: str = None) -> bool:
                 return True
             else:
                 return False
+        elif model.startswith("tutorial:quick_start"):
+            # ../../../python/examples/deploy/quick_start
+            return False
         else:
             # TODO: Support arbitrary model creation from GitHub / Nexus AI Job Store
             click.echo("Model {} is not supported yet.".format(model))
diff --git a/python/examples/deploy/quick_start/__init__.py b/python/fedml/computing/scheduler/model_scheduler/device_client_runner_deprecated.py
old mode 100644
new mode 100755
similarity index 100%
rename from python/examples/deploy/quick_start/__init__.py
rename to python/fedml/computing/scheduler/model_scheduler/device_client_runner_deprecated.py
diff --git a/python/fedml/computing/scheduler/model_scheduler/sample_model/README.md b/python/fedml/computing/scheduler/model_scheduler/sample_model/README.md
deleted file mode 100644
index fcb51bd792..0000000000
--- a/python/fedml/computing/scheduler/model_scheduler/sample_model/README.md
+++ /dev/null
@@ -1,57 +0,0 @@
-## 1 Device Login:
-Login as fedml cloud device: 
-```fedml model device login $user_id_or_api_key -c```
-
-Login as on premise device: 
-```fedml model device login $user_id_or_api_key -p```
-
-
-## 2. Model Card:
-Create local model repository: 
-```fedml model create -n $model_name```
-
-Delete local model repository: 
-```fedml model delete -n $model_name -f $model_file_name```
-
-Add file to local model repository: 
-```fedml model add -n $model_name -p $model_file_path```
-
-Remove file from local model repository: 
-```fedml model remove -n $model_name -f $model_file_name```
-
-List model in the local model repository: 
-```fedml model list -n $model_name```
-
-Build local model repository as zip model package: 
-```fedml model package -n $model_name```
-
-Push local model repository to ModelOps(open.fedml.ai): 
-```fedml model push -n $model_name -u $user_id_or_api_key```
-
-Pull remote model(ModelOps) to local model repository: 
-```fedml model pull -n $model_name -u $user_id_or_api_key```
-
-
-## 3. Model Package:
-Create local model repository: 
-```fedml model create -n $model_name```
-
-Delete local model repository: 
-```fedml model delete -n $model_name -f $model_file_name```
-
-Add file to local model repository: 
-```fedml model add -n $model_name -p $model_file_path```
-
-Remove file from local model repository: 
-```fedml model remove -n $model_name -f $model_file_name```
-
-List model in the local model repository: 
-```fedml model list -n $model_name```
-
-Build local model repository as zip model package: 
-```fedml model package -n $model_name```
-
-## 4. Model Deploy:
-```
-fedml model deploy -n $model_name -dt $device_type(md.on_premise_device/md.fedml_cloud_device) -d $master_device_id -u $user_id_or_api_key -p $deployment_extra_params
-```
diff --git a/python/fedml/computing/scheduler/model_scheduler/sample_model/fedml_model.bin b/python/fedml/computing/scheduler/model_scheduler/sample_model/fedml_model.bin
deleted file mode 100644
index d98296eb617c99fdf257707704fa9c61c28bb308..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001

literal 1476451
zcmZUadq7P4_x>lM5C%ylj3gsr5Nh5FgCv9yg=El&B9$Z|jBdKA=|0kBbiX&cPW!#8
zNl6GHP6%;aLg=JJhw$6qzkmH(|FrkoYd!0EUdwjfV08{hQ<L-m{b+G?IEOu)|K~Q>
zd5`y=xdN*dV>vtizaR0(Gs1PcPS}0Fm7KJXM&*oyxb$EwsP+g^IZqSZUY`Qj!!qpn
znvcBGHBeYy3_WN4K+U-T%Rjn9<pCL({U9JwvnTw4YH)G;LYf>I<b0J7-fJtN=ei(a
z@x>?*h*QC3>3(qEo`B_<rKsIn36{$<pjH}<UFK(re3KhEteA$K6W&qP{XqKrmK^fK
zFM{KqQYd~}4AQ|SpgUqQQN~Q5y52vj`rsODyI6qD+jkIe_Z%wOwH|KjSEAjpMX=wM
zi=HOmsQjCpQS86Lv@Kr`Zfk15sNNb>PfUp-cqar-Z70fCi9j3}T&tS__DLzI`J@0`
z##GYs>H?glX^o0I^PsEa3{e!FAjW5^z~yc?Dg2oX+0TN|{Hz1vl!Yoar^LhSnjX5j
zCJL-|y&+!97deZ4goC;YL9j$0IAcyQS_c)-ad8O@cVB}Cw#VQ}Wg;kRheM-J2s$%z
z;(Nmgb1w`BE!R5eY+4ITs29TF7o_#|J7On&Ncy!o*s1iSg5FBv`M{Tm9jqA*?LZVg
zu^=r=lc;cTE^z;SOmh<=k(ZW-a~8-U{EY~8>+C3RsRD9NjmEtj%rN;x9xA`AMqjK2
z!Q*R;^~_SznsgNes<*_E69XLgGr}X$X~<0+4{hm<v~_JL_%7KA!A2)BAw(1Pvg^@v
zktWzpJq>O*L%{dgWN_M-3<I61u<Vlrhi*uOsyzkhQsqH(&(+e}VGA(up*@h85Nv+D
z0_|3xgYLiLpz+fxFyf~}?>x57^sI%=f|=;@YY|zPJ`rt(3{cVXT4>ctpztXg?!-mo
zt_OP9d|L~xerBVsU?2Rw%LiKrYK0HN_0g!ToZ3#whE1(2@mh@^&YiD~t!J(f&DtRl
z_qq^uN7PgK*4Z>TZZ$S*=7a6nQlVn-UJxl_X|S>hdcL?*PS-U$>2?jCE>&QcXc4CF
zzXm4t-k47sF>+}#s8*L)nE%Wmb)n@rFhPdlt4mNVFC8U6H&Wk+(Qxsa7~6{f2M(NB
z;A=UL&KZ9NP7hAQa^pBO+o>W$Wd||vsV=^bN~TLjoWR;2f!KfZJQ3eC!0yyjAZv&s
zvVap1QFs;{+AHX{X=TuT*$+OvtAH&&mKe4!8f7|Gv^$^(?2;YP>T?JvZ4t*@$U~c_
zg*aQh9z(vl0!PghHv2_@^*`R&-cU$;oKhLlr|TAct`8~hH$`RJAR6KnPCFmf2yu2E
zRV1s!hP+g4^?XG}(E?aIo&jsrg5UpM25xk+u)pFm+8vt+oak*JwRa)rudImf+|`6L
z=%9JeEFOHgSqhSAJeqnZ4g}XssM8%CeAaUX@3luG$72r-8Dt5(8;_`X%MNHM%mdF^
z1VJ|)>Yp6LEnkn~s@!d;8giGp05K@uU`%Y!eJ1T+5}+%5DS7l>h57B5p)>v`txS`_
zfjKj=?shIZMEDTN(x>Fk;qxGI3$HGHFT$;}vXJj&go^jiNqd+ldE1)=n*V9g7Kd<<
zde;bJ`j%nXs3??{?F9E?F>Zf(0OJ;ogMqtGsrYjZomeyzx&O7(T+wN0o_vnjI0b-P
zSTdbrQirfFk`DB}qPDkmV1;ud=uY1V`zCKeL;rP{QYAs_zdjLO(ilc1w-ml!a~`^k
ztBBE*f2nzlF$SKugyu7UlVqz{kbLoDTBgK;`G4ogkK7RGIC-7?cXABcY`RKdMGl%@
zcLwQ#JYl!@dC<zJrlRpr=-cT5$e-0qJVOto?0gb^tF{oObF@(M>{zwV!eMA>ABvJO
zM^O<U27HV4#B@ds@jakPUc23=edj_^=Gad-`d7>+myE;%(#hCwFb9GZQvCfc3PyD{
zfb%deh}_wCzGNy&;(rU3|0d98tx&YpSb&Efu0&}lpnBgL=$Ud~*rB@!UJXxz_HQMy
z>fSVLcHIKo{tiK>Q;DGIFq~BFNJhTiAyPUr2trDVsrI%DpnKY$s7FVUn14%QbfpkA
zbCQWh_yIH@s{zT+i@?^Rgnk&80ipg;nA9nQ)~8p6PdsuEUWI|RNj7S3i-eB1f%G*u
zp1Qb3Vz*fxDG&944gHDuO>!RStq|lV{Gc&gZ7}(d5IkEOsk})GX1wu6us#bO&yJ$f
zY!xXywjM+gxg>v+Extdo72hjYqv_ZH?6R^TOL^D8=g1&@eOf{G88Ucxbrg14nxePI
z9uQ<NB4JfNxMDX0ac93$Yq!<V{8t&3ZgZql4^UK$oeAwN?u0Wm7jo-A(PiH5IG|ku
z$(1X>{{ALxzW9acm|eqMnH0OC7tr?1n}qkd9zqf<&>$xanXo9(o@NYEk9grg@>mT1
zUo5m=oJ15h56IB3V-SZ+NX!BWa&8iH{hFO<>0FN?MwIUVRR!XS|08mpr;Jxt3}|!f
z;h?b@n!5#&?3G!dUVZ>Af0;ths6s|{sKBCYg&r7PsUZWCD4ISzhmB<oz)3Pe$(DFZ
zERu1fE+09+JA^%D<Ef#I6rB>XP^&coH7zS>m(?ujsjUH<k<}#AAPc0QmqD*q1>^>n
zQajreaEMn!$<h*w=6*Q@UQ+|X9vje2RR{6rPSW-t`;ESy361tYD6ZTBybZBLbmu5(
ziiyII!%CcNTnPR{0>HO)DZyG9x-aBmVU7S)CC0S-&kFDfD#4Q_HSoT*0K3|4z-Egt
z$m6zQt|}Wcx2;0Iqx-?>^Hxlq7XXirXM<(eO=4HqMwOzopnwr1x9Bl3vB<{#%c7Cj
zt%3Zf^T-|peRN5EL(MPN60@OlkoG!4rerHv?OTlUC;KS+4~IoFEU{1T40NXs0qx;&
zpyV_Xc#%$fY(1!p(OPQa?uk27U9r)okb-(L92{MN<twc~GApyXZCExL)Ef#fvJPR=
zXf;e%xS(R$I3hWIS*V&HEtFg{V7kqw!^tBtAlo5<zPII2_-qJD-}<ocz7qPL%jv-_
z<<PZj1|%28LhBZ!wxP|8+DIjKznlryIU?{a+(T_eUn&2eskHr=54!nH1jVkmgtyZX
z+Fhm*zMCERF0{eXZx&+I&rSH2uZ|Aa2FUQ%B2;c_fKvZ`*m}R4(e0f@wT*%y<jF)T
z-@T8?9au*MYrYdVJ%8-uQ5<)Hoh{M%*c$9bTU)e1a^i!9T2du;rKP~DGybR^|A}%M
z^BLvD_rx(H7M*WLK=%!M=v{dZtdltS<jzUFb0QsV8!Vwe{}ejT%>XgY7k;-BV&*I%
zs2>{%oHJ8N*63rn=N|<cEp`TP+dy>rIvOI=GNCSiF=~41;PJn+5Efa0!o-ZNYkzR@
zFaw2=2W<oubgZjK9YzXTeio2v=?iVXZ8YYi6@FYDiQ-*}bmyOZT-}z9hK@BLda{8`
zejkfo-__9NggceADrn>6YoPk}oIYJtf)kRiqHdKh2+S-%vo{hneY!~c_fmM5T#jzX
z!>F&;JJP;(GF#IF!obodkgmGTcse9wf!#HXD{dlwF(a_>YzP{EsDq7z^--|5kKD1J
zi*YMzNavvqD0#dETBpmPw=D?O-%kOjdkS#sXu#V3gSd0`c5GkgO3HV}LfcsnIQr~7
zs`l4HTy-SEi|wHGBNP1ph%tSf58fLdk8WSC!Ry(Y(BGq?kGD8u+<7PPU7trpyF+p4
zHVyRr%Lja?-Xs%M<FHLV2Qw|?gtKz9#iY7)bTNHNwfQPgWF(PV_ax{rnhV^O59pxv
z*=RlNDA?|dWNerOB0i@D<|8t(dBs)2HOXL_js7EdCyc^X>r*lLUm4Mk-VLO=4y_vn
zp!+t5n%dP;{z3!#<FpW)vqnO<f&+rp%Rt1F5Xb6R$kn$YJ=2VE*V+ll4>?LXr><7Z
zuRN#HgamNl-X$(^Qff1)3|?ab?AFUg-F;T1Cx>G1#thKRpF$e9>VxAg5eQ~;Vem>B
zuFSVUb+Z%{pagn`gfZhb?ZCp-bwrV&N5)14pyO^QxStY--R5V(%<C-sJhG_PsVp=X
z+Ct!zD<E2s2zurY2vdh3XUIy@ZJdYv%KbDoJqim4RpR=)p{VlRPjqG{V*dIDFmup@
z{N8K0b6gfq6kB03yF;wH7k~m*_cc50g|PNGwC+$R4z(P@bI27s8@!>-h5}Sf?j!~p
zLDb~EB^GnTQ88i~9b1`*X0b`sYknZeZ+xS(PlVznpOxs=a0s-=JEKA*hEePQp8byr
zmf!kGPYp9eP5X)T>B~kG->F8<z-@A{>Iz6k9ED^inA&n>)Zyq%IN=wHk)JpiU$X=s
z`4nNdQ!btF6bXDj5@*k9>hh(W4E#@w?=_dO^>~BH^i2a!^Pa^Yre=Y8RRi$0KV;VL
zP~y$GBhf3S4A=e+NA8C#bo)CS6W3C7njQ$;PuJ<Uo3-HZa4jYmxI$drMi6(#GQ&S#
zgutdW=uaL`wJu0dSu~dR@Me)7ml3pQf*3f%>Z>hdV{o`K1B0JkL(iE%iRbFuB&|Uk
z6F=Ew$lg%UvAhC}(UDMoI|f?G2f`2E3@HI?Q0nuQF>km{&sJ?h(V`Hln06AyOS*-n
z>OtVSpoDTh#+V1`T)@9FLa=d4G-Mh1;A{UmpmeYW{)lqq`NflF+x>9!axLD_&cHcc
zB2-K?0|5tV{tyZ5In#iP61Jn!cMXWx%*DP%abW6Kg`208;O>v*ST;l#p&<gBYy*Jv
z{xKu|5|0+g_o1v_103^Hpk<~#G~0EN$_X|2GinKTZEm3#A1+7Fx64ra%@`%*8d3Sh
zVEO6O;9XLUrmMHXj`v|uwDuB8PS0nwOw+JukvBBHi9*v5HOQ4;gzmh0F#OvJwKJ<B
zbH)Xb-1v*g$0`}?uV=|gemwM7&Ld#A5M&>cAaKtaI9nEqx%=)?^<DP~=ht(ZsS^ji
zUt_UiGh4?CACipoHTYebg1+suY4hzRAW=6Z3$!gz#Q8}$x(_Y-Z(M>o5&;&C+ktT(
zeWB}r)nGGwJn-{G)Mz=o<Db{Bws}xZd3yH3t`|GOvaA>!M41o~v<XsM>@XypeZKCw
zpy?hDiuLD6prQoy_9^kkEqCOv_(DAw3qdflnd!Q$1Ah5xcqM8%&NolQi>*rZ3se!a
zt#9b4xtb_Z=26EHodj%^F#W^?w7XmkT5mGoWl01oK9w;7W)?MHw-t<1{6IC(%Sa{&
zsyQ=H(T!&H*oozYQ`IDl3(tn6Iw?H*ht(`s;uyt0VUX>*9(7IrM@GKW$EJ_fn0!%;
zi=H=tbkTYCnVbg8UCBh1`;SFP-bNbss}?0vGuop%0by<z&`ePZhhk-@E;>hD{@F^k
zN^)S2Pbn5A#zOGpD-c$&7F&0o1W|RI#j}!8=wZJGduBhU#UYDP;3fkjn<8k{TMV{?
zXT#UbO4RmxOD(kygWSDHST?m9`*$6rMrt{vyx9(TOJt1q+HxYE&V$?=#~@&G6pAXR
zF`^O2(Qd4Y`sUrFE%z?N_6IqrTR(~R|461@Uy?!Da|O!XRsp}LltjHAi@slH0{_em
z6s>9(@($TEcA+onzJYPbDNd%I`^-VP`7N<DI0@!wj*-b248d0tfE)Ok@`8bA-)^Vs
z&Zf{Z_!M;h{6He#Z9&bjHqy=s0!HHs{612J0^<gT*S1N>&AKPF+H)1H?`)&+VkIPd
zL}B+&F4#F1g4_I47&3e`WOkc?=G)1nH8hTqEg41+46VR58hf!bNuSDRB;n&IE{5@p
z&}<5U{#U!m%RkjPzTO0NTY_P-JRiD_+mqJ8fkJ6dF%#I41f8-{qP((}s+}%Ik=AF*
z=}sX0N&Xl=HVTcu1cQ(HIk0I(*j%(2OPwU(Q6a#q-y=bpo{HX!V?cT602p2Hr?%=J
z7;&nPaOU(lY&>QKg$Z*&kg<pquPO&##SF&a@pI}MmIosbpGLI{322tN7<s9d)K?ge
zn!C18`9Lx0Pg?~cUnj%fzeV_d#zfqI#Sh)jZo<yKlc2Th3GMiih?2H0;%KlD&3x|C
zGl4Sjxy+zwbvasd21uB&40>75L2FSc;ZGJ*w=FxN|K(P!yc>o+>~}Em;5OmjcuEb6
z+1~i41~q%d==;!`u5guO!g%(75ypZ0)M$J<a{;1^-D%$x;9onNu$Mmv!M>0NUNnF=
zygb~UWsNT3!C+?glIXsVf^A<eV(H#xs8`9pySv9|uZJDB;U?q{(n7^GEeP3BP9)B&
zK<=$C+|pc)DrP>u`mKY`m&!o$&K(Sf+QIug3+(oxa7SQ;T*Ki|_<bu#(^`aq!&Bji
zmmD8y1%RzE0le$yLEGsg(CmJdc53$v9fvGHom)AGA&s>C;TjYS=^!0C`B1n&i2UFD
zA6S<SZN2Q@PhA4$J4RvuVSkXA<T67~)nKd6Arz~PM8|_%xU)YW1LcFjHrEwASD~=`
ziHz*!4#J3^SBbGZf?Q`hwoXjOu(cJ?{(pP;;%(L9`P&)6z6Zp@%mp{pgrTXLFW6n9
zAm+tj>V{R=TzecXQ@2Cn`+AhDGOtcQmk8yn)`Rj7JB#1>K&SLF6;uU)X!0cHFDFYB
z?Ky|ifvv3H5(Qo67s$Z$SnMwJh1ZcX2wSZ}U#sWDV0#KME6cDazn9kJCbK(T5X>2D
zg`9*<7NrHOudv0CUVAhLH+a>7U(iC#HEJN98|-OBx*3eCyny7r9&)sUm7T-Zpz2I*
zHC#3a@0leu`K1asy>&vti)^NUiv?k4ECgLzhxwr~xOH|pmOp$&X9_6V&#uL|!6U#a
zDH--O=OF*P9_@+yO_EbBVX5PKG*MTf=t41wa|SV;rJX|1iA8lIR%eR~px5Fmdd-`G
z-uIHB{qAdGv^5t^+s{G9KnCJkSLlrL0`FNebQ$}E_9uivo{Jr-BJ}9NniLdU9>Do)
z3PF6;idlL{1C66nkz<>OA?5d}=R6Aa$CFU_Dhqtat)NwZ&&U6koyS{mr=n5ZecG`y
zm2lO9aqpR3xFa|Y{K}=&wqhj-9I+l0O|i7;au^up7(;2747gWj5oLZA@Gh<ZgDj+y
z(TgoqrjrS;C4q!3NJ3TpP^Nc#D!SZQLb!AdY2)Qn?f<OdTXO{%9n*wC7rasIBgNH6
z%kan73o!HgR@~4vA5(Wy==_oeqEXMNqrVpX+^vr)`&cY@Eyc{*jUfFHfTd~$ATW4N
zME~T2q~c+<>BR=z{3{K%O7k(->Iao<ctnkU8G*$Amjyq|5PEv{fohDjy19NYZ5e!@
z2v5eM_o_syWfg#ClS?u6Qw(l+UI(5Fmw?N?8Mxwj16W@@LOWXuAtE4@%8Jc_qw%WR
z^wt63G+q?SA812<WGr|%)uE(gdiBpWVQAfC54>(4kQT_%b5bYida(zlO&N`Cfg51v
ziAwg{n}U+8JB-&OO)TV85?<RGFnV%<Dh377l%OJ%F5LyP++9SndOJvD)2d5<<)ZBI
z1n|B$8VvUgL-9C6q2uB!^!+P8<ap;$S&1RYO|66;`y5eXqO49%%ZK)0eQ5nyB5cfT
zBeurj3}YPueK&@H^}spm9CRM0R+M1fMm}n7Z^Amw0yHXWr^%XC)Vw?hcvW9W&+Bla
zel!N22i2qVku%U-;7P?dCWGreXYBlPoup{ZMAg(Y<aNFq%r7{IoRKCLemhK{b*&Sz
z{oTME?Xket33sR<p&I<YIKt+eRjAS}1K&x%>5qg}_|{_ua)O_lck8Bs@AGP+HTn^K
z-q?UX+tNULzLeOSAaPut0L7p4AUz}#O)Vm*qfa{Y)Ey?m-7%oyGy{KeC!u@8Hk^Og
z0S(vf#(j^Lp?Mu*#Pf7;{I3+|8w^9qy;XGh`*bioHVqq}2jNe?1iMKOH3Az@z1=C~
zJh^JlE$O1&pLc@p*<VC<xEcbl4}!w>VESyBF7{nUEUcOXmX>K?88;O&Z#*ZCrgsVN
zUx3N|86XwMqrtEnRJAt7!qin4+YkIpezCpAT&f_VaXQp;)-K?jm|C42a2CD|#Gra!
zILfj{QO>{FU^+^Ud*!T$)Hwp=LVGApnTleMP2g+)opcHm!Wv02HjZ5dnkTei%G*<{
zcN7hFlhvVZ<9Zl;Y6j|hgyE9D6try4r#5bbv3u<!Dt*~Y=9E}siRNY$$M3=!^=pw6
zH(MwXQZyD*I6T`8?_bOY`GPXAu3~HS`e+DlXu|dgJFxv{E8KOMjvu#XqJL^SmRkkT
zupf~iAL>d}YZn2}D;mCEvO!y=2W=HK6WK~@ux#{zFQ+OnV!Sq}=2{8)1Ao&K2hJex
zpLC($-3`D|#a1LNh`^5jhC$Oz20NYGg_8NNh1w?$LCdIcXl|_szL5;ey0p<bWi}jg
z&_}5t0%>g)ay0%Sk53rlij$$(rPE8)yyCI#&?b;~`eJV6P~Z$iQlC8ypm=XjWc|Bv
zT!k<4J)Tnz>RT-PA;6gEMu@bZh9{hCak3~4(&zePW&8nbz8*|f{r{t%Kg2-Xq$ea|
zt2v5G)ycSLJ8+Ijirn(m&|@X0T#H}it#c--^mYg(4ld|&*M>@`&IZ|31~i)qaeGk@
zCfC!UP<=2OKdplY-L5$1X*%w3jzaU!{h(@k%ouDx2|4*OSo3Tu@+QtB4mOji)`ykQ
zdWrRm`VRqT`dFs$U?zRe+ly^`SdXzJ9NIY^^wYosoO#X}RWd{BGEPCH|GPts)LzoQ
z<PqRF`4#nK`-a$Tl+dB<3oZ4N!F)YmOuohXBZI3zI(;g5sRCgs#~K|DPlv*?CQ`n!
zi7J22Am5IkXLlukYP{<*d}w5A&&Ch@;>6gjACK*hV@XA(Es8%n2<LmIKyJ?ldduS!
z3Ty|+*bh5V+juz$52vE<y&zm(+Jw4iia?kj37o4Fg}ZDix{T~1A+L>Tx}z1Qjh=z}
zZ-bGaF%<Oz8O(h&3*1c8XyBPj>Y%<KIy#zY?je9UpAr(-R!JSt##4OG2T$e@X-?}S
zX8&^G=B$}mJk1R?^-q%Ux+2{1>;(Eg&1T=rP#C(a0Ww`UkZYiW%^Rz+d}AS4Hb_9f
zz7gijLqXMh7OV~jgF)dCXv^Y)wbokFtdk9z%hlNYaSllIw5r$Ir~nskhar#0qa-Yt
z5!!fQV8Bq&|0fky0prM-5*1kATS%V_=A&Tn76>xSN6&HZi8NB1@Hc7zv#1yyo%KO-
zM$00(>TeovGafbPxe>+q8$@22%ecJvr7OF$(euq*NR4DO%$3dnl9%LW)OJj+`A%(O
z!qIYhF8KOgL2lI$GH^srPh>~o7(qJ5%nt+Qml9Mu%%qurq~P)=kv?9kixDGIY170?
z?AzafS+5hYb*ur~JFY?4d>IVft7dnj_jJXeM1XJw_N)pZ^45QuV98aGoqJFAejI`W
z#$}M){+LLnz7Se}-i}8e>__<k&_w=KYzp0hyy?ZlOtaDOZBi6$VEy0VCsyc5qX<`{
z1VR#PN!h(wxTq==+V|u`sLL$OP2NgMzsX^Z5Yh2j5CqPT!BJ`Z&~D)rXgxg+bQNac
zJUNQCPw}B)YNhC=+y&ZOE>n#V8|<82LLA=ipr4Y;aZ$DmTl~g=;L%)mhjs*gb9P6K
z$VTOzdDQl8Ha4>R?EL;J)U7y3MP<H>H0>qRnS7N<H>k1ujy<%#s>2cDVw4q7%A2T7
zwGIXmw+U&G%s&aX3ttnFMipcxm&22`g{T^w4YS@X#z=`Dwylv8&%+DATxmcg4f-%;
zO9iU#?-#1nGLe@HR2KISo1y*9sLbTmx68^gb4LS_=&Z!-nIT}U+yQ+@)&SF&03*)q
z!w${y5ORAxc1)Xttp&QM{BxIvc(_pI`42>8agb>o`G)#jQ$o-8JmI-7$1%v9i|r<@
zq%$J}6WoSk;K*Q%(6a-Zd_Cw)t0AwRg|WIwi|DF)iKw@il<S`bX<{&Aayb;Zp>K%W
zyjEhImIbHt*!RHXfjo1uP~wt<{U`fKeqAH%m=OUzIe$^-tE@gxJwk?+8DlHsPhn~`
zB*%F`u67E(PAY+EkEUU_xCr=D-x0w;HSN6e7yY27kC$g8V#{|qG^y19|FRX)H1r2;
zaRI(wH4^UJyu#{x74z>OHq)^SLG6_D=yLiXa25!NV5tVS-~B)qIdU<H<qCXi^FjXW
zFw<}P54GK~keGc}5a-ZTe0;wI1I+?R_2$j^zbX^dKU|HYtm2`$v4uoeUqRK;3$XT8
z8RV*K5R-kQ@yEMz+_YyY=KeVaZ$AaY9h*7`UsA+AZzae-EMR_fVqvXb9Vq88R8Zpz
zSIU;+l-tWNAyESTLAu0lekJgRULrhVDjb=bhn-HbIOl#P-rnATyhIb>_*qlXZD2gi
zW<_k_@PknKuPdg#yNEXR$5^jw4Th*>BxYVQ8*d4~`ZJpwq&}zObS}OMy9AoDk3=!8
z38l_r;+MeCBk3;K6V$-&Mu~Lu^||<#%}jO8zEOkAFR6T<ifJ9F!y1Q0$VobA&U4iR
z^WKT@#ESI;SEg9l^*ko6W%sE1R&97Q+5=znONi|~bHcx|kdFM<61VLd&gw00wZCo+
zaAWTaCogJ3?Y9fTvnCfB<TiM$c_Z?4sc`+7Jj^}JawGFLLGTDZ#*G;P8e@v^A7vxn
z$%utG%p$m=8dN7wK--LRl&*ivX!e{3!Ldea`r;PZ&F1oGLqMMOfruv?($>_=Lf@P`
zXdS$mxD^+Wra2eEK@y3%H%0KnDH%O~_7XST0N(!BX=u+0+&?M>vOgAqXW%`?S8j~Q
z<+*q&LLbGX5PiMu@jcHSyEK9^*J&Eb99xMIUHhR^`vged+!J>4>Z#Pg7_=V!q>#J=
z`p?LyrPVR$@Cpab{=>K!{ZTZ_g~(opP=ibESaHArm1hH>=fF~sPU&O*mer%*Qxkx>
z@nH9=o8pReB3_*&EPk{XJ(5^|nt4gNn~g}fG9OH|Sp75fGvQ9Y$h4)rrnC2_qFuNE
z2eW-y@J9xn2e>5e-8wK&n~2S00#Gt3hg7dPgN#c!#08qbYzrX{`o05Qu6!lD#!;ly
zwGkW(Ps6DGT-aP+i>)CesY|W`cC*=w?}X_@%1OaEjq@lUypV|sVm*zu?@7q!CBPRK
zvpI4+PKXRgOf92l3Nv7@w-{a4UWN7(dT=9WG^!lZ8Bz9ip;6H}s^72=U0!b_p6Nq@
zmv<GtM+}F!_RqAXH5@iIPRGJ1OI)3sf~)wk*c|T)PIL00vqOVO20WqTg%NnoOF^&f
z4d^&2nJ}SSv4z_J=Jj%_bE^;z?#~0a8wz5UTucJp1L@k<At-;%db%Nn5J#6lfNK%@
z)=r?!OGI?#r0wW%N{9}cQ;Fl3JOZIh;1RzPn`dt&HhzzZ<u4(2`fVZQ{*OqH?P5Bx
z<RP*1rm*%xDm(Yug_`CHTDvF;-;G>_m4$JrFkAs6-y36PeFkv;8&CIoM&K~}`Dk3X
z2*+O-i`<U~L9xaYsz<Z_!-IHm%(aJi6K3LtfdXt>bP39P{4n`RDB;<jWyJqvGY=~W
zwn-ZxV)`~{DbB?C?6XPN2?5XUN@4r8eA>U{DD@Nx=pC;Tu#QuKqad1?Zm1&%o-k-$
zIU81l=Yzxi^Yq0JFLdCH2VRMo4o$cK>NiIMFEfI%o*<;6y{rewW%K&9O~ickV90fn
zlL^nKp)$ybHjfyA0`u8ub4yAOyNy8|!!#UwKOg)484sLdM+>99evtmY9Q=FFvOew}
zBH^urt5edU%>i%_HxkF*WI33{pUAR}W6^rG4Esl<(4NALq<L;WIoe!-zVGvii@_@J
z{gq1dDhcM+6_6gs=S+LmZ(=*!7)xyu&|h^4a)aDRl#@S7EfvDfLNnsn$Oq-HwP0(u
z6QFH0@-0e8XF@*BEY$#0?{@m)MiDwxSD-YlLg<&f2;6?#;e=B<$Y&VJIiOzMAvgh(
z=fvakk;NGKpa@n}79&*e27cfX*f}#En_enGoPGcmk~75Y49i|vE<{tezldf!J8MpK
zk{<=}=<O_pOeUS4&EAEU_iV7QB!N1zOw*w1A=v)agnqfl_Ir*Ga!S4{dF4aE+1Cmz
zYL=qqu|ptHWI_A?EU1H^3GS3eAfL@f_m{X~^T~VE^1By!j>x3mx2x#h$ziDeTLj#B
zUPKw^0=0VQQ8_A^^;CvpPi%_Nwrni*w`fFsX%B57N@#mlMI~W7KsRSU>{(lm^7g4j
zdy*1Ak4i(8ARVH=<fF_xi&{_O6NiF_#A%ci3J0E0U6=FV^~42Rb2fupgAOFQm9r4X
zd7^GqPF`=xfU(w6FkLqsn{GQ`xm^N@I8g})4x3}Fe;Vt%uuMbOTx$5m8IOM;*fU5I
zTAR9rVjK;QE{P=5YZo5W9)=@BQqh0&7PNM`faUj;*xERew)ZB0O6`qB>5L%27aL)H
z-ch_W>Kg1?D!}H27T9iogv69xg18&YP<A?yoHAaB8{1r251v9x`*7$mb_Cra!K8nE
zAvCjj`uOSy>|ZjGu6k68JrjF`9s3@W?4o24jSm0^X(eV#GO^!`)ft}d=xwJ3qN=Fs
zXOCG1?N&Y5&RR<P>iub}=6SG=Hb$OQmv-n)1Km|W$T{&bOg4=q;=!Y^yLTSyikGvz
zRt*GnpF>>t7xCQkmQjpxg5;hFs7S06a(Xf>Q2!j1W;K9*btSgaHBh(T0^bb3h9TLj
z(a4FyBWq_II3|SQWBib}_c7z3qY1h{&x21*B9<rT0LNwrG_(F9pLm0AS2-0}EJf8`
z736jvqMkfWI#*4Eo|`L$H%H9ED|;2#zRQ)oHf~~>C=EunIhhQNEPxM(ud@850f`-U
z3C$x8!rui`k)Q7Zg6?O+^zdRZeQpa~CUdFGh-F>6=Q5e*{{xM1a}=Ew(mwtQSaYui
zIr-~opK2SJop*qRtXGj1p}<W0;~<%`i_uIQMc;0%1bCJQ+vaXXW%n9b`&)&L<3d0$
zupIk?mlEgA2Y_#G2WfX}P`PM0b#iM!e%k^}c#oiUaSqmYk3`#z6^yS$gXZ4!qz2C|
zV8Qg$*eLF%5w}f2UTlsMb#o!_+eom>yF<;^Unb@K6ujD^VQ$4-44E^OwB_=kBC;5V
z?bbzGHqYbiy2F@X;84rO{?x=UAN-awfG2Xvdy^w585D*con`dNlSCLOt|xEbhO!#$
zBe5NR6**-uh1oX)K>c7jIPabWIuB#PX7zHAI?bZ;6IX%bl#X9c7@@U(5STfi0-pCw
z&}xZ9?tN`&rS8DH%Ol-W=ArZ7%hA+25fVZ&@S7?V+8)Hg{2_+e(RUE2LoCWHv(f2V
zC<=8=P`1MjLX=sAV?3BCj@gNv-X0~#>pJ~xn+h!#AJ9(AOTx|-=ED7xF2h@|dJz3e
zMCaN;Xt~J-B9_;ZJ2NYgcPk2QZ+H<G@eJrIx<i+}*@~VM%Rsivm&{qs`dQ1DQ`dR_
z!{c`%FyW>>Uhz4L=H-{D#Xq5_NI4JOX`86E$qRCDmK1EH-)Q6b2pUn+Mtr~PQll~J
zp)fZAlq~|%<GclB#V<&D7CWml)3MlnC+>>R!Qa>1P;zdeg=ljh6S6G<Tqdxy*W^A4
z;PLP?oIy9;!Elq-fn$O(D9eV@&R_Z<em5D?OJcz>$r_w*E<wN5r$8JX%5btX7;9fm
z(jKjXuJdW6fxj6!t|~^b-GR3Ij{@D5yV!m17`1uI=Cd!(L+2ts;}=y6;(Z&zMaK#Z
z?aOdtuMs9rKZJ7AWgzZXfm5>%iYBI0W<@RJy0=j?Louo%hYKYmh1I_j12M3*0vz1N
zfJ4eC=okDU60RmGe3wr=E4PvU<%{6XuWX1o+d+2h2?W0}#q>i2%a?TI;JSh&9Ihq<
zPv<+-=!rTwRbN8xxK_s5(GgEvn}C+%8(42P2|L<?iG#|L+ATxqWb=UtgX;texuEjW
zMBcwMNd6-;G{B>vQ2&dLZOa0m;CNW9e+`$3Q;_p*nNaz19hyXr#J-hEbk=@Gl$;Xi
zS4<-E%nzjVa0@9Pk^>>yH6Ul_OqVB@X6}lF#wGEz^;R~RoehKGYI$I`OAHF*E5f>@
zRLuS11W$y{xLFv01~W%OqrV##W_FT}@Q0*a6h|GK`YBF73hAC1P#)%iEt{@Dh0{fB
z`A`b76GD<1r3<N=EN^SZexLc#G&lb;`7&@0Z>@|+>)h>N5MzoVv!ki?b(Y`aFJnYu
zMW~1<g64}HlIv4J<7_3A>!?S|$J7&D+h?@zs3wSpN5aV)4bXAJ24q{@z-;_O>Kz@x
zW=V^{#%VmvSs2S^M2W&x>x%LG&S;dRxm$$aVl(QJOVsj12<}*!0-cxBAz7{mGS`<h
zb4Lv<zc0g1uiap6Y)<9}MuCw~LdEmVnfY}|a5Xv@lGQS4dzTL3&oUsqVk05%z6S6#
z-J#QUCzG@$A2pkXVC$YL#@hHSbv%9&IR9!{e8F^--?U_s1IobW^kp(2V`aY1Y`A|o
zA6jF7Ffk*XSS?r%=Z3R9>oHfxz;`CgF>NCbA@iy8T_v;*q@WD<VZ%z6-B=lh#{E^$
zFKwcNdL4)>uLi|X2}#J;W#`yT+VEr^ijEZ`_wV0AxyumfPuNce2DOqeefcOq1ys=q
zMD*Yq;Yd=Hx>uHg<f#v6x*kB^z`?lmjS~DGj)ugE;rMfiC2}8)Af?`mG4RMGaI9AY
z=eiv5YFUj}HOj&B&qdTx6@o$$LUcJ{=u*oEf!A9yc3VA!jZ46z+gKjTa0))UtBDa_
zRTxv52+4Oek(2eC__%FjcLD?09DWEhb<&B%UW0+>EdTZaKvQyyzPgbDZhLZwMEr`h
zUO5CVty1h+5{~yJZ0>S@1l>Bw7+dyC!0NI^81&j3wcov>x}kH3{OL6)S7W``88cD4
z#Re3`QJ`fv49rile7aTy&FqkYq(H>X3v)&p^#{(D&y05D5p+&I0_u}|0iH$xCxTea
zdt8Wm3ojtA@*2qv34q_@;*q1HMpX-=7>9Lo!do<jncEnO5l>Y_DY{JF=bpp1$^ptr
z@~_rxmO!(#2)v)}hF6Xaz{RbkC5HXoGHJC)L68Fjv`6`hY7Co-9Mw4TZKNlro!^YF
zSzpvbtczjuRbVPUhX?vf@VB-Yhn^dZ{QK5qmz?cgXG76*+di6UQIB&w)6wB&BlPqY
zGP~-sQ8+annyd7w=tT)R7?S~u=^_lgaFIB^&<9`1O$t-?1M?~xL`0Vkt386&f7spe
zP&kTSGSy}`^>Of7TZ}U+0MBK|X{YOCVQ0t@NSLkyPJbuETNxy58R?G-{dY`f;9MbB
zKLL35f7ABqqshT|CBCK%W(+ODt3Sd(zV(^V<>@%;GC3LUtzXA_I-`VcUnYR=_+cdQ
z_i`9Dhvhe&*t#*=2>NEFFz*}7td^<pdfWvx{I3yGy5d-0VGottjTcJ8#)C@+>l44-
z2)vPA&>pZ1<QH5){`WWXc6KyqK@v!}c!9)&Yhm;G5LoKQfFySi?QmQLC(a*12fZ4~
zy_G9$`Kb;ep$CY<<QuX1c$XIDoq;cYA*k+g4n>D~plPuQ_=S;FE@f{;{BoeN#TA@;
zYN-`-1%jH#VPb9>?p!E9?j%hjW9!s=L=E`cN22EgE-9*ZMoI2{X6M&LJY!e@aPtv0
zHJ728MJB||ticcq2V$qR264!5hW@a_p}KL{WF`Z7U#w8T=7IiY4EVbSBPefDyWiz#
zr*jJKrRAXDh9=p(o7KrHooM^C5ulwMMj}nB!1@h)v*A<*x}9+(c-JVFi{irpKQ<rL
zyT;~+FBu%1g_dbIX!oy~;Ibo+u%Q@QFG*zYZtl_cIyR5>903xcCzNM2P-%-3T^f}E
z9iJ;eVE3H4x~~qUUq`~L$63&?-iVSqS5Vs;j^L^fD#J2itLhxJ^y5Lu<S$fpI^Lpf
zf-WS~>_yy~1Fz~caklYNv}1E;(VS{%*7`u|0)w%qS|)UxQHyU~3{k+Cp#P^pXtOc`
zTw4#mhc1$Smc`<Xl99gNEK(O+jXj}+_<gbgk!%6Az2*tM+JjJ~<phGxk3<wdMEGvE
zH{NWj1C!s;@bddKOr02nzGiQT(V5GjDvA?ww9hY&i;lte$3sbZR5Ea<wo~DMEc5-J
z3%PKj2)_hh#^o>gn3-`2WyX_<R^tV_xO_3TyjzYk*6%npjAgCX4FY>Le>S5i0B6h5
z@M&Nt+9t>#;?Wz*-ReO<Z&jfDN-lNe-Y0JIJCyG;kG%oPXKq@hpmjkq?b#B?bT)4l
zo}3T`^T!v$oXTJ}hxcG_XFd?qiEhyN!3X>r?@-OL`9vyVYdv#-WM&Nm$rH0`j$4sX
zwC(`u^UtSy@|v*un>TGgGadpQqtV5N&Am^?Q`4Yx$oXkr-62~5A}w1Gm!B7M+O*8&
zk3LZho(*nVLg@NxhgxcXXqI>^s;i|?7_}SyZvdV$48#>@%24&L8vMo<qoz$f>>Za2
z4jD_(Q)4z6w|)vPl#Rv_G9O&KK?(UDRd9Q52zFnIp^~vNgm;Q%3!I06YGtWVK5T>V
z@7_4{%<2@1=44d!lu`zqk|AN^S$6)fL|Z*A_@0`}ayd&u^k5g|e+&byB^fk+{5q8T
zRx+8zMJ%KKfeLnbfnv!yhBq#esoFRPc{MyC_j9)JWJUqzdfU?rGq2$9kTWROJ3`yd
z@*vqF4f3aifbeG!<n|4rj=oMXVuk@G91UQ11XhdRXv8bO2}XR?gUvrG!A~>^x}p_e
zJ#;^j7rrB$^g2i`p%fizNvq*F=+a|QF5D<|A94=uUJXKlbTJq>A#`M&#^Ef}Gc0}*
zwj6Ay;<^Vy^Uc|ydzJOYymzDGUoJ5yoeS6h-G-gaHPU|UHubJ7Aso7++R>w(nvF69
z=3^XoZR{nl+bW^FbTdfUG<K(lJL=yk1p^VwYLOSr?;|V&_;(d?PI*i@@0(ylwhZ>|
zwnTh00d=3vrET-$aCn6Ya<15e`0*UNzaj+#Urhr=fgZKKZw$8X56IC%13bfzM!{NL
zqIz}_O&eJT$Sr{Ni~gnqA|r4yzXZ+>v9Lc{3bs0IDttH_OXemaf4UEq)mLEC`#Ai#
zD;f9y5TkWkBDl$K6Qx`e0{X|`Yfb~<wv-AFTE?Q}TMH96EQiel2GQ=2ZrWit2c<V3
z5wX@Y`Zk~x0u3gBGUhVv{VN%I(pyQP(-^p)FbLlSp1`(wEEBwd<({YB6!IAj5N-d=
z^tiLTgrOcSJsg5>#X_|EumIYZzoBuhIpA~87Ry7!p;O@mwJtvR;||L(4-UeRh^M6g
zjXh+x7J}~&Kho-Q1a6d`LBaJZ^auz>)u>rQ-`Dvl%{qpn=`M`2Gn_~dsUxx4hLtDf
zAkNmOodz?6`!AHErYna;yw(7(Dh50sw$aWby5Qbkf}D_Iu$;LLc*n$y>enD*x4seQ
zdq<+^S%<JM_A~u<Mh*Xzc%xZ<J$#IfML|O>UhSyGK*Ln*^0osADj;uWAHq%Q^(Yvn
zPn;FkASBWd9{ufzowfp|XF(7|o+|~*E3MSr(VuX(U$<~9x=NEOVo-2;Jl%7i&2Hvr
zF?KiA(e}u0;B^cULRc}oH!cCqs)OL@y#x==zJy`P3sL824cK<MLF&^$cwbkFjz1gG
zdYdk&zZ?dgKlU@tyX&BRK|MMQGeu5;oGR3s7<_pY1V#~TE|-nnEk_`(B^w^JpF$UX
zDX5-GQM!COJ?Nl;%~}(vap5rJ6nf$})(=M=3O3O%*&K(1viy+{f<MU7b2+%DTaLQ?
zaO&q;M#Q5_k^8k(=wwifn)TMyeOVmNy|4|X#WHg3$zJ52U_FjK?r3Ss0hG=JyK@yF
ziKwK~ewOq5c9FF98q<^NWia$<Eci}vLeu{agXNnEAo5iwkH!Y$H=>2&$3KO<^CuV)
zQ$j(K4bp{!Aaa@rJ6J|_TWlh>dmV?3OZ4&4p+GdXsiv9JH^9iJhB(-D6}o6{qhj+o
zCZtzE5_Vn$K~Fd1eDw~o3%vk-OBnjKYZ;o#($W1w9L79rf{qhO$c&N0l0&gL_E89E
z&tx^K-(2*yT~EOME47_=fEG(@ps(>1aHl^L>de@Q<+BgLeM1$bhXuj#&ynzGB_F%i
zD)4T`dF1RIQ>}UZCzTdA2}K8AQ|`%H#z?|Ij<gmP4ev>&;eFEnY9-;g8!<gI10dq`
zeh{sAU#+9f_RDWW={C=N@by7B_u?w@rT%d7GV2+x-2ufcALPC6G8NB?0QbN0VCeW-
z*i7Rvcx(hrh6wmRBaY=omV>VgtM~j1(6q)1JX!Y6<VXREHKx<gGj}6*X&7AE8;kw#
zy9nodvqj%qT||>~_FncI)!$qN`FrP}nC+P=(Qjejpi|)M5fANs0oc0YArpODj<yxG
z*s@uLHnDnWP=5h<2mGl;ZZ^7~V)xvqaX2u13mDk!2hQH17J}YXVc4O1R&%SFOSHl)
zk`JxHvA*L__av7Nw120~A|G&bbb{8F*F-S>Fp9GbQM7a}(e*sU>IVaum39Hc<F29R
ztR9MGEi~e)Bi4Sbf{?osu&F2@lC1&YTzwT?&e_7^KDM?KZxcyAA03BRfX`1Q^bgxl
z8sGSX@fJ;tOL$J=gAd@1E!8M?1={>60$M8kiKUFy&)c@qytD$8eKN(@f1JR-ZVc|^
zZDIY$x77W540PAtrGe^B*phDpaT&qX<W307CS(XhT}{wzmp%$kE<?F^Aq}Zws8oEP
z>i<v9a#=%!9odILx_&e`|12QuhFwJd;N#HqNkY6fu^AtKEhfQ8<d_;-^k>`Qs~QU1
zM#sVZ$MLB2kb||A5K31x!qs^}V7);L18Z_f@y{99x=%)T1g^$M(`zx8E2sK1i}2(K
z9c*eHgreRKX6mjORL)Mq-4T1R>)3dZX5C{1Th%FN#YT(7IyPe;oq&5&91%vf60^4o
zB7ScL&(*x}Zt@(QI&UHBer8Z*<_!BCQsB{wAPk*Vist_L;MKwM`^WE5j^QeE)#;0j
z>@T)ItoetGjVXb}(hywa>4N5lBk|R+5ZLlyJ__oeGw-Kn;D}N?e7^1~jQp60hGSSA
zs$&VByT1xKJ8xMGS62f6wg+h)-3Z&vhv3;&B{;U=GU!+h$Mm8o2-!KAwpJFA&F8A2
z(_^%7AaE#1B}(C5RDs0(nZ>5ld8nB5pYTK4c?c9&k{LReP~wnM&D*mIZ2i`vt;HnD
zIgIef(GpoOlek4UQrsH|-P`i1ZR#nSG$RIvx~IYGW-AC8V+a2})W!4D6ESyL6`0+b
z1QGUG=&coxNoUJZ)%l^iCw(^9L<VB}rXon!$N_$p4RH3%B}pu!kS|Swq~~LBqM8kI
z*X9btmR|zZ`w7JEq$BK5%fM__yA(eBmwsKEgT`BO!Eb0I@MI$DVECAdU*<Cw`8udu
zw30}oWLSE}1C7}|k;C?DGjA!88(qe;VU4(daVmZ-D#sOWY|XtWB!=A$XvBJrN@g#6
z`&YzfQn9ozg1u#2lLn4%+2Hur3phG@Xk(N~mH%5tCVGV8oEDZVDh(!K?6Yzs<;1p}
z3%bi6P`)X9BR*q0?ul85;-Wva`LjLAydy^LsxI>3vM>JCbQu>YXW*w_ndoj<1pY}X
z(E2A6%6)y|cVju^#->qS{e#pz?-3PnRue-mn}NRBi014qKribjavTm?a4dc+Z3iNR
ze$`1JS$V*s`|2gqH+mv+`@hnNle1yE#R+`XJ_se#?^j!IwV;xzzp2YmV^~(S2Onv~
z!Kz82D83$qt(UV&8k3E`ThpNZk`oS}Q2_0S&l2-{TR>HKkzUPg#J!1E+1veSSdo<v
z+_3$GHzx+?{sFXVON7oaebiySE)LCN9QIUz)<Hw)I=7i*9j?V4ZS&AmcLWqHtVNF5
zrfP>jSIFLPaiDUFt-jyKK*;ZMB5QD`Vf{vkI+wAhje^~U0PO282aZ{%lD}jlgan&|
zT4DqW=Bu&2I)PXoxKBnGMWXsfTlCu;O8urSf)8UafaRFuAiZV|aR)CG?$kB3UGjr)
zj%X6^N1-6Q6-D_0T$*2>hOJ-pX?rP~4GXtJ;GAJ#k}?6sgMSJgc4`o#^c&RgY$M@I
zH6S?gJT~W~!N6RACnEMPsnbokRNo)F<^__zxH2#!_o%izLwesKTn$UWd9fG%A4TUL
z7Gu|i@j()j5JEB%LP*l#*-Hi?sU#sJ=_EuU31Lbnl_be@P@|Gbsp+7@vzJU6=_TYG
zLI}NtklrMG`}^0Gi)Nl@@3q!_-@k?aP)<xfG|-;ubD?FYD&nBi5HV{2`sX|(+LcL6
zTS$S9<^nh|N00SsDcE+y3fz7V10!=WzJ7Ebd=u`|xRO<9G~^z?x7`&jN)mAO?a3HV
z<XrTV5d3f<3+nn85&ymaaR<Nb#&~5P+H)ri)H*Lhr`{P1NJvCg&C?L@p9(ZI?SrXv
zlCb^M4pdqVg0P`^D45*A-&zUSt|Nxe#w>4l`Y6on-iogxjZsnex>9+Gl6*^3%o&}9
z)p^TMd}c!B(4k=ve`7v8Au+5!Jwv00>;a#m^E8&Q{JdTYOk24M<(~bB%l$&4a*roL
zE{sEPB@K2w$p)iagsiq*kMXk%iFsc?YU}@#PSPERnj#T2L}hc%Nfw}U_adkYu42Q7
zpQQ0@9JxL)99tXIQ93o2MENa2tHpL;@Q`Iimp5`M5sSfk)FvvNI+a-OEu;;_{ty(U
z#CFjqQY<S2>wyVqzFUnb=G*Zd%}vz#>1^8iEe`|#9);(oHuzLG1Ezm5!OZBhF#O9E
ze6Xe()(w%s?8^!~ZW4x*>|;?pyNt@hVsU@nWfZQs#hLf^1JA!FiQBD}SUrd7DZLCa
z|C$+y;TUhEF6Nin#^4XFnP_tNG~H&MfSMcG4E$;(dj~C0)_Xl<Y9=vlRtBP@*QoUT
zR64GU<#-Dh<AzyV(4XB={l7{v>Q5s*WX_lmFXLcPe`i#EdKMbm>-qITsfbL6ab=$D
z83k9ssKyzH6Z5|S^9P2G<G-C;jW$U~!H9Wd#l^db`1U9Z+ZUG55?e!uE{XxCv&&Ji
zcrK_|iiz=?^H4V>m$np$;N68Y=yIbJcj;5m8pM1{(~dyjc{9-4(-K`irJ+?X4y4yD
zz~3$aWj52Oio^%ZNg)+`#a8+|mr#ccA7~xe2i5)NfLC=OYVVo_&^QwZRPDy!^Wsps
z><ZHsm{wPKjoi#bgg0xji!Nbv^s%5aMgoP0INUhC8bwa0sc6z1fOrwyxK{{*T_?=X
zZ=HZUmP(;9<|P$)zN9~eNjNQiG^(XdBF_Kz5a)A|kZo~?w66^WBmckr(cv*zx^)z2
zR@ZQXY)4-7Wgl_-JeP_Z+@Pc1MPhiOn;HzOfv)>wST@26&TI=uhkL=)wmFM=xks~k
z@N6(}UrnuOFqG}dg68mHM7z<NjQGyZjU~tMM;gmg7Dzzwqz}tUtODW7No<D3vLIbM
zAb+(LY$~SMa$q)CF`awX+)C^T*Tz;omIq$)mOSi6lvn4HK#f`Gl(2~P#}W)UcN~Kz
zyr+pVQYs9|;5vk2=6(9eThqZbZTCemxgLkZme^u%R<{Sxk<c|+1QG4&&~%h}PHu*R
zk(NKoL&Et}LF=$JWeM0!QGhnf<pqy*#6BZTQTFpTH9s_sOnP4c&(reJ^yzUFJUCm?
zaBeBdITQsh^#IOtKiIK^Vq_BY%m-M&<je(VerGw|s>awS;;k@Rrv!Fv%SNqn{y1Lq
z5Nf(@pg|dC;A}S&$~TBHO??Y&zPkziJM*a1jFr&3j`1iy{xmOIIR>pp`avn1J#Mf%
zg_mCYV}oWI2Hltp%9a?~a(^oJgbe{M)Bx-5#Ni=DG06NEf%&Hyu#{!q`&hEOM*k?-
zx?IDs|E5E;<~`0js)^oNo`I5-aANDlbp31pk@kV}sB<C97T?$d)0Wv|*pD$Fyzb5k
zQZqpR`6)V8X^L%Ae^brqv%LD(NNUvQA}<)9iq4;RQkRQiEZ>}i3N2kw%=^w2dKY5y
zu2THSYoqV<ftY!(26{XS(R_)Bs2GN$wLuNM9izZNxh3j!`LHvl1c$nf#9X5Qmb1u0
z#m2)RD2`)&@&#1>cbT|M$fT~04%n5TM?zb8jJ|Uf#o0kHXVf^fvW&uw)yXJ3ypy&q
zcLb-Z0=)VBAnMF3AwJUvg5py<C+t{2bPfW{a@vV4+Y-PsQH&kGpRn0<DCwFZBwZ6{
zqlR$>#G6|X@rPv=!EGAY6w*Kh6ACI9vzfz;xv4OruRDr7=Wxr4&S1stJw#hl0Jh7Q
z;)}6TwE8!S+Me8kJ3qx?lLxztcNF8X3PWrQE(6)~uSEOKH0Yg@hE1VcK>n_V_#Xyh
zutN;l1%0t;L<x~+dV=)fD551ug^oCXU_27|+20Hebnlb+FWsEM$(bOn=-{313BYar
zeB^#IPlvSy?Ke-3XHB(HR#b^~hH<oYpfv=?F>lH^b#y!tjcO<75ntsSqTT9G<=uU$
zdGSz?{$;+cL&sq8h;ZC{au+J1N~t)sSLNqH$6>&r1Q3Rq@lG36(fCvp_`EG5DduUA
zSat|H=k7s8;2W-E<8hoVi$v~BB__&`;A@psXl8$J*MEiZ);|ZWwhe;#o=Iff8W;3q
z{t&O7%x~)+!6{unP~qBjy!B>dqUD$jAG$7~v(H$puo*<^SSDKF@WQllm;-ro{WNy%
zI7`J$Gpdi9isN<5QC~6=Y*rOO_|~Q9v@VTU?^{HQSBGQ$uC)-MTY*0FW8sqXQf!r4
zlK35};QVwNX}!<pmM0kBMKK6I!fFg_yi6AT=Y$o(LK>u4!~8t+NalxVFxk(}qOU9H
zM3$#hUAzH<9vvWp9<vI;_xwt5&7tvgr?Y$YEqNF1gkcs1^zulS$;#YM?qM#9#{aOW
zuMC0)%OZO9k0Ev(I!?tOKXXwZE8rf>d9-Jza{i>1>j>|K0&AOc2e}mph!+W3ONo&}
z2K9H@y)QoueLhv9GPe*vSOHppIz}B$SWow9FggjsXq+INX*M==**^uSJ$wb!(BrW7
z(owXYbd1&+ErJ%?gRGX!grE(Zh_p{6wRI@QyGK3o@vCbn{*(k;W*)%Ib3W+5<}k4}
zW&Nn|CKXt!fn{6;tDVNs*4YuXU{o|1hEZZGIEDjmvP{Utp`bl$8=PpgLziqDkbaxP
zhkfp(S}+znbhp6GlEwI_ifMqa{iyxz9CT@jBUT@!MCLS&$lBHe#(Bcir8?-{KM04u
zErvGxP-xts0f(y3g3D$j7_(&+>%&JuF!N%0Zu*y6dsKp^+EP%<-2?$&i_mT2R8G{O
z&S%HYhjpE4ASi#$2N^FXieytFXFWsg6fsD>=Moj&i4Z`a5WP>i*j=3p-JuuJ$fSz2
zKVCrFUx;YBMjve0|ANynivewQb_ch<AsbHHqh#o8h)+o5Od=CV<ABMe>0}uwJlcrj
z$1-qgUJnL#0vhHP4~hNKkP}5=ORNl3SMpdf?=unI4W(mj7=vW58|}=LVDF-9=y!i5
zcBVwLXZbj;pxPKdJpt>E^=IE*490h#rv@7O5GYK>(%DI%{d^yFm|+Bh#kPE8^H?lR
zTm|B#qv`0M+0dMl%$M~Eg<hA+u+2V`2K3KI-_7Cd&!<7a=uj%ox=Yf_PNGAfd-VN3
z6Hze33xi(%Ax5tvc#U@=RDZP*wz!9*Jo*n&mOh}iD{H9mlZ^exZ9!bwt1_<hJeaE0
zLPOU)&@opBPu361jK)Fh%W0^&wUflxw2*1Xree}XmL>S0M3d`jMD+JFDIKx}<IOao
zOX31T;SA8cbq);R4{3I-ClxQ>5W#T|DlW05&QY#JP})^)9(<ED4=+T)()Nl$`)A-W
zy<E(mKNvp96F_^Y8*R8%0NW1DKxOAUsx-_&@vP}+KE{}CI(P{?2311h!)|JHdj}B@
z9m_lCj{+Aq({p70{g$q7IxTKFj$AYmyXJcm!(W%-gFEx9y9=S+?h5*s>A`}9UfBM2
z0v%x+io)VkR4!ZrH%6wQR?sMr-l_oKi9B5w%e<sYmg$c7fj(0X;(py^bd8Gz`LB5-
z_V!X})GETKFHgguhyXMSVfs)`5@&V27qnj-PsG#xSuHviZ3ZyCYmS7MY*|h1lf0N#
zwbcSnTqfV1NHA_r1PBYF;lrIGsI7@*T(LB$XC8s5jDJWgKM2!Z#E^UI9JVE$rKQ&6
z!0JRU<otIL8;p)}EwqIOzAOea3nx@oj>fuwuM^d99<4Vmp+=*^=`mw3OsnH@!=<Yz
z(pUptFGEmkcqP3wL<_^l_fSWTax{%ukB7ZjuJH#F-~Wn;WWP69*G&P3vFyFyQqJno
znY?7kNrI|&5HM^K%Y$C$IxJP8QEwPAqZhEtvKDgQUk34$7;dPz0GbCMrNTe|a)J_T
z_<o`QgAyi@0d`90{9%SSt48CO`AT$nHi!zQRMAmu{85yyOShj~hmGsB7+Yfqy!q~n
zI*jX)f8#dIDLV-Rm>;*}@JN)bNdebRDFo?%AX76=V(eUs@2;z%Lt;CTF;A#T_&9J;
z9tTey#*P@$oAm$9qvnQ8a^>St)Eeml6~q1`Fzg__o?Qk;$4-IyjZZYY!WVY@7m04q
zHxuo|5#)6))0QSpCtnl9SUV;W#Md{_{p$+Rs&o&C&il~kA>Qa?UQPP155TCciI6DS
z2b*kj(IsIpjH6mOdi+_)FPH&BfslVXDiVYV%TcB?i_Rag5r-SH{KC3Ko*Nzq`G3mc
zy*Lk@FEO2GO$rhH8bQSS(k%pDRrJC3m(=ufZxqxua{1qG(yrcXASLBAoBf-R?@d=w
z)^94<Kf46JkL5&X&SyHQf_X}6OUZ4MG0cCM0zt=DqUfhHuYP(X%(xN{0*m|#YxyVQ
z-+YH#@F*CM-cM$JhB~VL_9o+O<Ulv`KvbN5Ne@X`Z~I>WtUJ##1^ag4=ru>7b-6#N
zJTd?kdn1haaSiT&PGY~$G-%NHN)5{5LGySmR~OjD?t&`lG>ZiJlHauF$OSxg6)<T2
z5)7CX2epAKQ73y2)Ww7o`I3{oBH}7YPkblaXCex=>+&jH>%m<w1hv~o(|4?->zS$x
zMpi0hQ~fz~^KIfq8s9DQMT_7_P%_$o%Y)Y5M(|GHj%~fOQM~*&FS=ksq+rZ7>n`Qg
ziUfFT%3N&pSdQ_74}x@lIREUQ<>>C21y1Ll&<3Ao8ho;h)|*U*s-<BNGWh_?8=^4n
zB7xGXM5_Ha4jbGr^Xg?`B=IL>HawjHD&kXcU2_UH&x*(I$L6B`v2!%ch4Hu?E%A|Y
zHTHVJvPo~x63<}+A(-hVVjNElA{SG2_KvF5oFxvXxzN?O4`@y{gtVj#@bCUk?xod&
zAmK;F^rcr(GN=f4cBi3XPCPV?+7Ghx%u7>Rj&7!N=!3j^>J-u!HY(5IgoCWlTAWHt
zJ1QXY)H^ymV=(5Nj>nd{iy+~nF)BxGf}TUMwB=V8HjWTOSf7#b%Uy}G>JL=nJc_g|
zKLSx<QZN$s@Uj`Ipno<WG#T?z8ek4fZg}7dzlrFkyMeDT(SbyVc<S1#1oU0TVjo`v
z?0gl1bpxHiL>!7OD=Nr3A9gQPq(HXxAUG`dfCmLfh?;2tm44eyx}<U}yAT2PnYobu
z=Lm`t2<IQzO{DIf#9u!YUMH8KG2_h4NG@d0VlC6?=HY|tA>hFBzdhFt;dE*c8g7Xr
zZF`&O;Q}^4>5PXCS3hX9T?r<M9GGVYfQlolhrM)R+|bdedwm3I{V{{Ve}|**-7>&Q
zWf)b^8~cz$_<ZeEJd<(?h1R{<cmIw?9$SUtNe;wUn`xP@Z@H&$s=$1+3Dn0ehlS(W
z>|*Z$Vo<Y(4w>VDTQ!csanVu~RfqCtW{g0i&h3!fD+)!T6nvGs27UHQi2mdGI35ad
zpz35yt5>4Yp)35V9eG%FP0qBb1q92Ez=kpD*k$ySDz6{K)5ZCi_`@6|--l5nHUslt
zY{h)Iwa|S$5o|NB!qYu0`=a+RG1zmEh`;bulA1^7U$Vw%$K%l8vl+YUlfgdeASON#
z!-(~nAdR<$y5~1Z?AI&w$$x9m+R~f!PL6^Jr_SNCh$=LC+`$PF)hputs)&*=N9%*@
zsMGbiU{Y!gs{V<XI6aSy`df~*UI`GEKNg#QrvZ9#u;OJ3wo2B(wrjz-;S-w?C#MtG
zl|$t1o*+1*23JZs=KW&c+Ho$BVQGy?3@xX)yq_2CPPLHqzYNx8s-*eBLjD1#pl)mS
zaAM7mWJg>-%nIbuZN@XAr5Z&8a@0Yv_Ak+wO@JgB^Fr^~M?Ov3f(@Qmh}-2e#Antw
z;yoq|;Cnr3njX#WmGh|iLq<{pF2lN)QV?36L}^S5FWuNhB!7p1%;P0>x-<|RR-PkG
zX(p)nluHfm(&@d`=fTKvCXtW*#9h4~g~BJPoU3Xgs15W0LFqB>5j%_9mE|;jjRMM|
zV!^yoMqg(aU`D`hbgE7V=em>7u`nNO{R>FD-C~v(^uij06zpm$h9y5)o=0fIZ_(97
z?Yn!aIQ3Vhj_VX?QjaDLYZLf@bQ2WpEha$=<>bO<=I@$xn+gicL2#A_&FPjzf42g<
zt;MMQXA_{h8x$W&hY?4Va41j?k=x4AOU5!6=QqR6b&+^+zy=g$^e5eCnJ$pnLlz9F
zL}~w_oc|&_@L^|xLOzD1U9BY>Y?)p$&YU(j><8^jVPN<r47O&;QT*>TB34|r5T9KL
z=Vgb{Uo8rzHWWa^^G&#5lNV!fq`<!R@!0TbIH>*-L;c`oz|dXL@=yw^t7AZ3?+fue
zJ`iynN41a1sc_^5PMTefEmrO<+xLU?Y-Ah-rvi*06U+a4QH<7FexNg#dDsSSvq)*V
zj6Li0Ab5lv5=};9`9K9e?!)qpG3%(?A%9-HZ6Nkrae?v27ULARg=jwZ7je22LB#U?
z;4)bc(IcC-uN@8FzYW2t*MCWbC<%2W`f#=68aAXc&Q8N=&>pmnI?Oyx1pDTKq{$dU
z&x_&S;iIt7bSIvUPs8@l8$tZZf|tGT#rz$!VaMRh%tx0;UoSg`4|*{MYRd~Qld*7o
z*B8>npTl5QYZ;nsWo+E}*Lac52VT0x1H!iFq1H58?D8@r;=mBjZSy?X>QDepV_wo=
z0|oj_ItR_J14;8|U+(Ti#uX{DC;Pt@fI+<-4a_S;>CbX{1g7Dn5+}^D3_yil3g#p;
zFWIlN06Cf1u6qbXW;!gNuFkv@3b@gic|KP?r_)`SCo{;0vsR>$4wkESxSvWKw(JDW
zk}9aTrzGlZ6X^+Pr-~u5pd4ZhZ5=}(Z1`+&`L-8s2cH55)#b!0=m<1DmQYV=7W&)O
z(H9)cy;2>t&e{RS)v+k*_tC;S`zoZRreMVeO$=t9&H6$G{c~OhQyrP-q9Y#yx_x2H
zbstnpcYw_9C3Ws+4Enk8Sf+Ue#pBeuriyAhTCxIdrE_64IRzroO1>fRD;0(Qun>6v
zwa7D!z@F{5s10N9`R3oIt-3jwkh~b<A67xv(<5x2bA%XOc*z$m3PPdIYw|lc3MHvP
zbs{%2ev%BVZGTY6S`8gmXHe5Bh4Ek*)9u51&ilb}T<9H*Cfiunu5%#Ge|!RT9Oi@i
zZN@qJkqie|Em3zf9DP3CW}L5RJhLJYKWkq?hj)xee6)hwrE?G!H<YA#c^GtDUkrv$
z1@I(k6RI3dCw04PNmzOWgyBjk-ER$Iqj?sJ123tsvXn}*9?}%=;ds?;1(w=}!-&CQ
zxSpN=H5#FyM=C+|^9!f(QvuoMC$oF!Iln^x8hU2Np!VW8XiLha{r<5->&<%59%~Pi
zt`~yy^B~yPu@Qr$Q^4rGD%4wZ%x9Iuw=B*DrGYo}{Wc%r6U!FvS3#3k(U>DkfU!gD
zP^rc|3M-la_-s0f{~5?K)S+OozCR5D=5Nvvp;bf>?EfjiH`9zTcGGK0cH6Rks}f!<
zsY3hvSFs~MhqRvTAfj#NynJgZ=M!^_M5MW5RCPAk{+SK2EdN?`R*dRNa!k7LfBuy!
zmVK~Co17ykc>mm7(p3qQnC4p_GYb?i-tn3zuMs0(eY$H$0yZxM=ra6Gl-ol|TIO*O
zre1;q_su9;=}J_rQ&D8#4AQ%2h+?iD3LYP#x``3!wz)SAXFg5EynlJ;+JUgz{{)=g
zn~cUGF=&0H0?J&~(QD;F{Pk9b-i<P_E#b+bjbRX%c>>ytc<vC(E?#q_Xt1IfL>pG{
zJx*buW#R^gZI+-A_5#Io5$~`-lW5#w*@@Y##|TY>Z5Nhe^8r_0=SLHX7KkArBo)%U
zQn7Vu4anOZXv5ensCYOI+Ed<g>Wf4$Cu0?Q+6(|FEMxDZ3ksC)%-g;kqs0f<UEL(5
z&g-|4o~s0+cgnEGj=iTU$55^BWDu@+M0ZRYgL<pxqxSi9I@9SI%J-h3cV;=F!{i*^
zd9xJ>bK3)3Uln1~!%;Aoo%?EIS=|%*hcJy1<VH+`>DNhH-LuhXTRHO|4u@cV24o-j
zmkL}8ct`zWaNjf!6??j9`!s!eVM_(ZhAfB11)s@AmRU2;VBWmjqhQL?7);yP8yZ6T
zGtPJg2<(EP*fblfk}rVVk@<<3E+byy%7v{@rjn>=SlHr%#hOLnJg$WKG7mv}>v^s@
zE`ur$&cF(_<K)2&GdS`p2JhA<;QA?%*m$lSn)4zd^k5`*HLIbtb}rhjw?@Z<W!UVu
z8=OivK;zyK#J6Y~^GqLvS*(uhtt~-eo)w8+l!I%eNhlQ6lPH$mwAMO8g^Xn-e!rG4
zK9>Rl*i4ezdZW+&5ct+o2>V#NKw)UXOPge1)ixALS4KeXo=|jUey#R43JXX+Xzek_
zrt}(uYj2ZQw?c3mJCwr<c_eG{9+b4aCi;_mL;U@D5O%W)#EGtaM#Lz5GO85Yb}|0U
zmUU2J_lMe;c%VU-8F8EB2WqoF65k!6@aa`38cr6%fWwS;^On80bC!YKrYNS@E}<hX
zTtm@+=Wz6=C@`2Og7!HqPw?~t7eBX^iyLzU>#uk-?7%nDHY6R~m5Ja`GnM$Sy~H2-
zegaf%%E+8PjBn&L!y;mV3=c*ycJlXoByC_lZHY92N1M+;VqP>PrX_%zXf<Qflyg(;
z6sTCpLxVA)Qg}*&Mwmfu@ij1gzZeZ_R<L^@AE)jM0RwR{3doO&ifLcSmBprL4iCtK
zV}(@1Y&`bdoC$8JpNTa7B-d)Um9{R-AQ6{S@CS*-6U_VUA6>+|e+b9Q@!=?nGvW>W
z)rh&-c6j4jf=PSE;OhPP?4CRZMw-m;zrhDYUwZM`!N#C+Ydg!<-KImm*PvEr1eNa9
z;6Kc;!9_he*t{o*>h!-t&3hPA>f%Krex=8mOGXhsArS<ddFnH<5*ytZ>nc@^m*|Zn
zE;)zMHESY%Ppm~ztveVyv-)}WH!7LvNP;qTz{pj}i-e5DZPOczBbfK%c`$W9oCam>
z`>{DB9E|>r;GH{az=WQr!j8AJrTQ_Ey&Qycc_z#~V8$}Ydr=*m=)Eh<U!g})u}exE
zE;`eK8L9YUdKxPJ-3a+!MbKQf6{U|NNQInvAfB_@uB?fMe#u7@^#$NHcNfl)=Hrt)
zt5Mf30j<24M{IQ=V@KrSx5p>Jw=x(%3woiO`yLQSL~uW6&O(955iH#qPl8fdz4G%t
zQDc7Grxs~o`BIGQqfAjd_yLVQeg$M@-k{#uK%~(JQMysci&{sM|7^-o+cgUXuZ%$9
zqRWjAnvQ(rTITgTK*mixj<s8k0dzMJ(LpOPZP!QHut4(SY7IEp&7xL^8GmI~7__P+
z(6CCDYuoJz*#RTLyyO%foi!ZI`+lK!SPiF8iM9Cb7YQ0>>R9$`4O;lepun8XgjNya
zaKajzUzO9HhK{&1Y%kuP7slrMRamGj!HTS0((s!xQ<k3w#gEU#zh4}5<(oni5rU3U
zJbDV-sFi3ksNXn2x&;;JvNIF17id8#YqNtpsxj<W1n9?ahN%8oL^00@+A32?o84we
zEcrndWtt>y)Nlxr#)H4ZN2>Yu7&r+XA+vuqWEUTR_S{wQWBe#oOD=-=1vWJ4t`l0W
z&ciELG*NBnMcUA^72>l@aY;!Os&~k+>QW3gjsH!XL=(XwwgiQL&het}-mLHIVmY8L
zntc=DZg~wRe5r<x%VsDpJz$}8@gxb{EWr;auVAR2A0{4rMCx~4p><{>VZ>dQ3tmj9
za=R4k9xo;;qYHow$$$r&67efI;i5isP@;8)q|q2KE-wWCDOVsfH5Vktv9z8s?9_96
zLDzOw+S0uT>OO`*>*`q$rn?4|iUClgS&RPvKR;B@)2JhUP(fYUESqH_X4ZmQ`(BWQ
zGlp|>2)=%?1IO%&MuWWmRKE5+?XoK-iYh0radiX=1?%B`>ndCo;f?~`98g%)f}f!r
z-QFdVv=i%bz_V<oU&_peUcHK0e`HvZw+b|Ov#fvgSfaf*1@F8WgyXjC#Cn^N;2;gc
z?AT3EQS1qK1~JZm^;K+MJ`XhyG5+kWSZLTx!T+X`7uXx3X#P=)8!9=Vdxmj>WcRo>
zxsv*5JtKbmS>EIO0utvd2JI$y)O&USMRxrdTj@B~r9UN!w=>Wn(Sb;FGC&w4g2T}x
z(C4m_`ZiyK5-kOGh3|vyt;`3k=LF7Uw-UDl9n|)r53Czr3Yx1w^TH{Mc!9SHU$<r^
zw3(S>{1+qI{K}F0=^@5W{n<G5*LfJB!up;ydl*A>27DfR8S9$^K(=%&4EUObrTeac
z+GJDeBgsdf4XO0CBp&YFSKxZjb@-Zb-{XgRk<_PKQPEvpnaQ%Rj%?N(^fwp%15ZJN
z;4x?UHx&fq*OKdRt8usED75w)2WpX9K#<p1*}Q)*^_eOrzh0ced&|$l45ovY%&o?r
zQX8<DSOBNZ^{^wi2HS?Kg8JAokhuCBdUj=DOAfn-7L7&~l`x31DP>%U6}-c#dftfT
zi;Q#AK@<=}1-tygn)jgH9@!9ZIssa&8mZ`}9{3pQfUxchsmR;{*T2T0Z?FD1tQXTJ
zm#Cw#Vl7{4dK!ZA%)l-y3aymKNL2V;dX43KtnW@`9OscZWG~a^j_d(T`$`BJoDQap
zT~suF3uAbEC!)>_i=f}`=;D2id@(+bMt5?!&7I~b?Waq+nda$W77Y%`^QrcYP-@b6
zi9}wL;kbpHu!C`stXGd9>vpff0=H@~x)MZ7+jPJIv)PQ1&GmkV;Ly-&P%Dr!J#s9p
zu?awNYis4S&?pqN4&_awGuSSPr_|g^mo`08koGr|XoJODUd3@AG@lQo?aELxb!aTg
zIx8VCDFW}iSz~+TIj(C*43UgGLIcJr00T~dOgNJI%nTwooY|bnLJypF+dxm73-sJx
z30?labZSv3p7uR}H$JkTeOv|C+ZSN;b!6;a##Mg*gS`BvM4c_ZaQpTK)H`<ywyssh
zplnST@qRYuzd8jfyKj)@{ZF_r#m0Cv%MfvL7|}j%P3<2!Vq;?(u)Pmt0P_#IeQ722
z9cgTz%+1POx#d`GSBztB#$bKtG`M&9B<9?zWei81$_w|G<4(SoF+|u5U|k@a5B<v-
zC{_{O!AHUA@iORVd<;8QN08Yo_h7BU8ZFNkLd)Pp;>mg?s}>Cm5Ne=PYylNNSMU}7
z^Qkx~8Dxi<hW-3AkqnL{hCi6k%V;ZLMJ3@6)uQBjFXlb^PK)+$Vm<l_xV*y!|IOWm
zmtEH~O<@>1MJu7{SuAR+zox>ytVi!Pgc`111PY&neB^6Ql#V<`+Xk{6)Bxwock#zj
zW#nNHcMPLVcGrpei+Nx$lY(&YYT7Y!5oFJ14A_5O^P^J)xOo)Iim%S%)<Hgqw?$IL
zc|u%HCZWof1nAtv*i37kQPKL)qH#G7J_o1MIq_%E^HdIzPOiYtJ7wTml}3596b!Hl
zhLcI#(O>8eQZH*LiadkjoV6CBwj!>@;WY8pWtoHDlOcJ+Ol&OArHvyT!3_`5Wt*;I
zYyJ_^(C;^?ANP<bx~CC1U(2%b_h?<54Y7>tg>k!+z*fBg)WQUyeKr~<eNF?RQ#>Ep
z;DFkqk3{AurCk|CaKC9kx=9}MI*B*PuIFqnl*2r%d&1!KE*`bJ8N=&BGTSSX15@r8
zVd<PZRItSleD^Peu%~;$`BpGh$Sb*TnuFLmo58tV+(R`ZjEPayR)~7oL9$<7ME+u5
zbTI!;H2M~T#K9I<FBy#L{f!`sWofp~uR^o;O3a@nVJs*IFdRRMX&&`lPHGl*`F<r&
zFCs>(S3ve&36w39;IQdiF)c<F3itx7=q!h#LvbiL&n(j#1F=-k6%-SGkWrhCpj%!O
zt(ZL(oL9<;fyYDI>W~esHus3ytPt?|&2kmGi(r3gDcsYLLFx7i(8{a;TaS^@8evJL
zOTFmTrGv5K*)r65=Ze`Ep3t<&g)E=F76Pw@fl_M(8ZWZMokdIou((Z|m9BiYd>|P3
zWRj`{>8QHC1h&Q1qS2Qq-u7-hl}`AF6UKz`!ic{7`#vG)dwCJKC2XcXeaZ<pKnyFg
z89yQpvDUZ>g03ag{9cKqLc9~2!m6Q8;s}nd3E0Z=oLbZ7K-1RVm}a*F{7*#D)+|%%
z8Q=<PzH3R<<2WqSIRSFrPo!aK4E(vwyi36!sN0(XP&y<RlJiZlqqT(=4-#W=vI^wp
zl;JwQ2tE6SgSh0Tg~>h%p4~)I`s*ORSbiK@Ix5iMMj>MqjY9AGU<{DiL+}nW*nRLQ
z=CfYy!AW+eKbXmv7FJRDRtYL@r_h8K5^R{O;ME3(LH<1t(2n(||3!>Ip&H?;21UV7
zPdn7?Xd%xFui%-H$>`Q>$>sLbL#3e%gU_;ODq=j#fYv}tR|xo~%>>by4l;R273Peo
zf-_-faBV{*CVF2aJtsD!=i4fD=-dsOOhe)2wICaCg0yodInVSWqViD{eD2$UKxd+P
zy!KUaE8ovMEGy;JZ}-O)CpV*+-$K+GW(ty%rLfLrIU49WQO%%FWL?%VaDR{hTxkiY
zMBX5+>shb=YaKpt`$gN1PKMUoS!D6RYOJ{Nil*CyqEvMxr#_g~Y)i!KbIS!!uL>y6
zm4ZR)6^xZKPkV3)ILg!E_(m7}ka7l|oEe3=qfN1Sz(ugS^_o^i=0IAa71XwcG8W7V
z9O*v^+wQQuX1*7UwW>k2kPhm!LLH}A#i8HS2-NCc1J72)q5jlzBK=dtEtzu|(>m_a
zzABY?BQ*zhh^xTJv@h}4FG8)jB;wB%6M-K`;|06G{i+gef1g0xmB)$aiyWF^tATE+
z>p4}&xp+Qd7e1WL{I~BP(%6&3!Ce*y*1o4ebK5?M>zxg&9x)$^a06XAcR0e7bQGN_
z=A({orT?6tf$ee+$ZVwG@b5dmqA--?n@j{Qz61)W9ENL;V0GCn?65<emv)r-kn%vW
zY6#JcSV^qk>r#{bnJh=clk)B;+}rOO!WZV1wOoPP%_lHw&Nx!)e~mimxzQl60BD>s
zot@)GIR2{y#m9J}{%jVw?c2|32i1ZoEsa~AT!cTa`QtsO^EiN=6{`Pb!o2I%sJJYy
zeC99%drqGsqQpFlnjXg6QC#AizvgkJc?6mai_!4^+TYSL6l&N|X}t*vQxAb--?Naj
z?}O9++Tyn-VeD?IBq@OrC`}v9C9?C&;MaJf{xhEVG#<h6wq@ur_6!&15em+|>S^oe
zYC7vy1&UY2(9A2tvF%_O<ZK^<Qg0>KuK9{+4veDl7JJCmMl%$Q^{5mbUP05@Jp4(r
z1<HFLqZ*&p&~~F1bPip|{1onNHkZrCX8%JUj7y_x{SSb^Y&_{|xk@}!)6q>)&1s+L
zqRt0rLsPvwxG;~Ra^C=0w=13fn~C6)zMGx-{UB*<UsO~GK^(|t{UfSDtd#|xvO-Yo
zW8Ae(7pN$EITe}pg1TE8wAH!<ZaU|pLUkvYd-;H6jt450Kj?t-256sj1f&`glrHKI
zW!{D0#Adhw1`?{7g^asb1txj3P<p(cK20nG#~`*J>MENxzF~~DTOp9?cL{<lu7WU-
zF>tkZP_<cN`v2UC(;*GE2Vg9CDzD-6$Nr3AmJjCp!%?!t9lF+>!ziZ|GQZLS<NKU|
zMVo5yW{VRx*(%UdBL%fx*#5h9mT>Db%kY1=N}=rvG543k;a?NcT7N0EoPPo&mj@By
z)uZrqNnaE?o&vW?R{RtPC+z<P(5-ASXS0w;2d&ebdWR*HnX~<8-TAmPZ#X)uu7^99
zhM}y)1Y#X(NP0^Yx|A|*p7~lh8M+JWS&be*$AH1?nWUk1G~67&lj(c|K+|RlZ2w+`
z!B<QG?fl@8CG(tp^5orn$xv}70Mt4ru}oS4EE<!E@xJP0+tXxx^2Y-gtYg`bNnS)f
zCWp>jmxYEm$Dol4+s)U=K9Al3)Uh-d+yeWeNpdYIkmoR;R}QgUKa}l1I6`-vKaL;M
zmta>xFOV7!WsLP7)cWsoP&v-tfpO3Ho+c+4usRpY;wnMA&k)8H*5hh+r^26zER?t&
zAbtVKD4ihWl%fI9zAy-k=VgGPI-|1qRRPqUsvzpKRPh_j9BLj@WuN(2nB`o~&TM<;
z-3%u>%bt;j8~SV~pgn~8RYTyRY~UYXg^ITysN!`C>FAw{M@+_|;vIW-78?-#-p8;m
zu#_%16$a<FFuzS<AbxEML1~Q*<2{T)(ZkOc{wCu=-ik!ia3eUFnu7VPB5;`Wi0bTP
z+S3I;RC!;7;?n_K;y<hhUZVq#?DDXqL<cShkHN+T4{1_^C*~hn2*T)I5WlRF4=T`t
zpv>V|x}u)MU+|+7^d_VJzbxBb*h8!|mO{hmeGrnp9)oJwd_?C3?OOVi@R>H)CEg4I
zKV2$lF5wi%3oGjvpCf%o9755GcbuZ|84aEn3iYdFP+7eOx)!^E+h0Q-`#QiEjKjq0
zYs?>U39oclGFH|~S|?Y-*E`Rm2!?U4K}xI_*h7bN6i9bFGL7B~ey%x%!77ia<?0dW
zk~|ISb)J$qmcz_1iG-+^>#32>D`HJeSeC{WJL?wW#|vjL$Gs2Rd6>oOf94hH6+(mq
zoM7kSVVL$klyvv0#%#Z_@If^J%sjksf!;~b_HHL`Lu|N;AU1ElbcEHguPt=uGFI<_
zmFPQ{d1WqcBt{d|K#dLr-?0ktQ+L32D_7DuZ#8jCJWaF<o)E*FA!xWUlm2?S7BBqD
zg6!`}Y#);+SUxJi9$z_G-4X={^<%N#xsohAm%)7T%OU35NTy#$z{|-Bbb4RJ=J~zg
z((Oy=)~X6RI*B0E`@+q5E(a^0>$Gvj9<nYp0v`zfP;vYw+PkzLsy0TTPTd~x{Qix$
zHlGFiapxhsz8Fp?FU7KgQ=lBh(>pL2MH6DVfNR6yLhsqQn9aOp_TOk!Nht~1;|f;h
z>CiY=hW=p(XqwU-8tsCJ&Z7*>E||wWQ=t`I6Lg8lh$qh5H6Zc)Xmm650NcMR(6NBk
zenOU~oZ?GgjWb5;-F?7H^%-%t@I>)EZ`^I2hUO8MsG##6@eJ31wn1Y0ZKxPp)<2+H
zTOLxg`#kDT8V-%&VlXiF0Ih`=F@5oI@a6h|f_qOi0`pOFqm{HLI-<NY9RxNim6GkB
zNNJG?+<z^@M;n<J?}Chs4`iO!g{mMbNQaulR9GivnpNU9QkP##U$y1qgZ_iDy?hE}
zzCI5<r%Lcunh5`SIvXpxCV~7;U(yw=4wBR9;I?uFuau62Q`TCjKEWM6%shee`6FnX
z`7<ITrR=Pp4(EGPQLbe9y5=?L3o4*_A(HFSKScu8z9MRuHqc?-M{$8&4pu#5dm|wP
z>gTf@B<r7cEV4t<Uj-<a3;^rzHl*}-Z%8z(WphOxm_Fw$>fVZBeZK@{>Jsp1a6x%e
zH4*>W2BH@d>hkqEF>YOlE!I~+YyV&n4!48-WfADmyM=4sGlXXCS;M&E|5eJyAE3{^
zNwMCgi1wsjrFrjJhPdBy((D*aU6OCmw#PrH+#TWYySdnLeickTW{rk5w%B;Io86)M
zVEwE=Z9WjrwdB_m+5U8@_ST)ey;gyZKTlGDTT%Jqya>>ocL7Azs*JmFkIL?MkfcBH
z@HwRd>Q=E@YONF;+IWb2a313)m_XC_IjCtr6~qBY*nTq^s7yHvllm3I?fMWD1?GTf
z(N{XZsSvMTW;<}&7Jz*bn=y|(2uWw9*s||A)%Ng&#&SKV#R8B<mBD4t7<Bj&OoZiS
zxR$F$owN<ml$B1f-W!yC2SA7AO{%<-4T)xKo+OYn&B&XpOJmtN3u?Y8<^XD)TZ|V<
z<m~(PL-j}HR9mf)_}YD;+Fw|2w^2d`_Akt>T~i>=Tn^qjQK0kVHx+Uc(xd*I<j)b)
zgr6m-TyPB37%yn$PBuT&i$s^g(_k!)1;>e1;4p(y=Orts;b<v6P|f&_%OzZERR)x9
zEJmlx3fiE3llR%*NyH0g)5M*jkgdu%x=(gMw#sWNI^+atuctw5;$RRPKW95eBB0oJ
z5Q;9nt#q2rxCD<PP&8r;IA+EJn4d!Nit(WEzRfF&_YvD|M$qbRO_D!ZAphzx>P$Wf
znr#{2Gp7Xo>+TYL)oN-jSq_R&29BBYnkt>fLRCHU9sE`2McadU0qbiTrw5>Ba3%@5
zvX&GS<bviTmL+(YN@`9pc3rv|it^%l_1qV9hBeb=C;TSjg|sr8)&D)3u~^EFgZE*p
z(P8!$NSWo0{Z{*;+uq(_d!!2L?w3;YDf+a!nq^wX4TJ|S1uR47L3InO!1+KK92jMV
z-45}%E;t{o=ZR5cMJ4!~pWyvhXJe0RAvAp52krMaQmyJa5VW2nF0a}A{m^G#^j`{Q
zcT;#^glMpnF~rWc5!F$0IIuem^^LE9_Vx219~?#myXR5GueX(Az2oRSSdaDaHspEG
z7PL`>;jS4h?`>*AOw7FD{_ZPiRdxtUZ4SfcuM{oshBBYuO!N9H$I-vLh;{{~lDfUc
zX!J9PE?p)@^UIwiST-F5vey*~M*%pEzCp9idxLLh3H81{4o!bYz>@1>YzK`qI3(mk
z1>=QbW*rqLD#1`$No*BMAZcMOEP9ZQrI!FkyY#{fcaKAoG!Bk#-h->oCZkDo2{=Dv
zOmFXS@Geioj?!=p{yqy<FQ3D7j@d*@b`T^H{n;7u52w&s&$*vv8N}ZLP>XPf%B>mL
zGQbx7Ng;XpfTCOa4^r{o1I07EIGuBwK|^r}m!>k$?y&yQu1Mg|1cu|F=kpN*E3tWk
zHEc7>#`yZJVDK}K<|k6-vt|tMz9+byd!}RaM|)5|RzYvu9A&zg2IzbEL5N^CUhq7O
zZCyfWN=X2N2j#@-{ssEu?j%%9UJ6rxQH(Vz2KDi&py~gNpOX=RM$hd*h`XuJh%F#~
z^Mz~VoYCM2%li#J4l?~zqW<p%@E>OjPNUd6{wf=^U-kxbZFMj?F#&fi41uqQOVHua
zGwR{bxMKPzs9W!0e0F9v6~!(f%9c_zOl2%a_ZR$WFCVlz^^~;yh^1<lmkFABV)p=+
zXZ;ldeQHNw`=b)Bd8Qm%J<gH%HRr(gZ8XfQzKqa462#57dBO4Vm7C{HM`enFd|-F1
zvXTQeM;F>N*dD~gR4Wh1S>eg|S5f(<itbn~#s*1W>JW)w^kO6^b?2k>p9gg0T2qww
zTn4>M%mX@=)f2aMK-+T-Y25T55p1b~%zBp1Zixm(zkIBJGY11ASwEt$hoMW;(6=>|
z$`w(>LHvxf3cL)edj{YJqeB?ga0Pq<%Q1CWDU!#_aB)C7(h8;pjkd(7)8U{!Y9(gg
zWb@?f$wZr6L*Y<XkMsz@b9^=Ew|=FQjSrya!0!|^gCQCsAiIYH*Qw{&j)!}^i});U
z;Kamb^+9YlmeHv(N1*v?9W{TLL&f{sNs#4C=wUtjN4Bd{|78|U?|&E<uV8%GxE`X$
z{h=*Z6w-_mpsRZ>x!RAh9K$w{?gewOPooQJ2&OXsO*%|X72twFEGOQ{=JpZ(p#PiA
z?F<T0d?^fL#j`=+yV^Wx{15W^M-o_#6`{iRC)wd7WgIvaXp5_&XG5pr2zSPA*<nx0
z@+j=ky8@-&YK-l!gZ}o@NXOAUY>v|-Pi1*96e~gSd=B^RbQP$7-U0*q7eo5BVJPf&
z<gJ*`ZRe&0blsH)FjpJJpUo@;=6)41Yscb)?T?AhR%_OS48+mXD?v7wafvQYBvk`s
zIA90U?U*+*x=an-T*662%>dLlHNbD<i$T+?Kgv#|Lcos*FxMJP-;6fIh6xc|y#HJ%
zs7Zs~I;Qx?z6k3wma<*NdC=VZ9v9@N0NYy;AegcM+((q*Y+A%}f|+38*+mBVZ^N|A
zebCi<0Cd_Ksl4O>4$EK~c7`V3GtU<cA1{KY#dBCzItSC6W1()@X58f&gjVPU!f|Xi
zT(k<BJ7@4`{|v$4yCHDFAenvMyO@rcOWJq$h0@>?u;V`S<E?$ety@!wv6tnrq`?TI
z98UnbXp21;SWeh)8F~y$V_f7!%uYT8^KEM|_)Y-4a?!;7g935+3L)Aq?WUThhETD0
z2r1CL26_g}vpjeK$bB35Qe_@>8#0M+xch`F&0hf9GAWu2lYonl0t^)2XmjOiD%Mwk
z^D8%)ygC`T`2ha7SAok89>8xa2cdoALX?zv!Mu|~Y=3u+OP`PezCJn3hj$S=#xs`O
zxCLan>T(p!??daJ6{E?H9P0nAgxnr?4YHVqsNb&=VsDgTWEI=VRGI`q$}_YvXdU=;
zzoT6z22#a9AZOWgCR><F7uB4=nlX$ysHml#uce@?U^)4hrz)fuIEY=(?z~6IY=^}l
z@R{_K<QK#c^;e7$_46+$#EXn+_KoDLuLts*aWWXIK{RbF_McviFxC+CGxQ+#FS`@I
zeB`tiyd<qwL9i1X(Ed&a`b?~a=IJ;1KTYSM*^Y7oaYaR&<Pr>3Q^Q){5~wgP0_z!G
zG;{Gp?DxP5O%D{o3#0y+zO@Xm^weOn`dr5M?#G^UY2|;CEjW~EP|{rnyz@78JgGAR
zqvS@!-|QzzJDWov%s7qG2hGI#NG4ADlnKkd67Z^O4GQL^@lI_7I%ZU1^UgywYe^p3
zx;voF+z8O|I7#~R%klP@7|>;WHSx;}(4|{Wgi9wu-PkaaYcU&rF2&+dHeXN6?x8B5
z#qeA;2|v!<hj1<!mhi}ShUn6&g%x<`T|92O6pbRat8~gk7nG&z;um=`K7FgisHV{%
z&JVFT^e7J7#V+vTA@iC&Hb9%{@woqZ9LqhN;_7u0=;4fQIP{1Wepq`1j%7+v|A{HW
zie~z^P!q2dPeAF)G8%mG0dY$^!HKwbE@OQ;Y6ezOsg?(q<1EMc95uT5&}h`TJPL&U
zS$-#Kq(yL*8uO-=LPL2B1Q(pdb(3q+L2o>nJlPAy5sk#Np@0nCo`??FulRz<Y<%#~
zMvT`EC5^{>({Xuzn5}UU#2%i!vO1f_+jdZgx)8WMVIH~{4@9*}rX5>8CrO)5gG)mZ
z{T6;6;x~CwC&Re_y9a=qk2P8RsutUZoWc%sTL}2O8JvzYFFviW>?!hQnrksEJrjaf
zv)ZU)fF&un3`0@N3zGeTX^z1UsQ^zCwSW54cPv9}Zkr26A2q;u&U%!luHpo3G1wRs
zKs9shsrCGQ81KKH>YdAgeb$pOJMICI^&Ca9<Zop+n_DFxF~{~>ju70HPp?m#jIJ$d
zjCX9rm{Uc(jPZ*i^wV(Y{<GlPo(+O~*EngPK*oS8C7plIgJ{z*uIJxC2rxZ^?aMBb
zP$wnKI2aA)4vC=8dfPigjIcN|1UlodVXFyaMqV1udKs1{=syTrD#Kul$_DJ{?xFIC
z3~uS{cvQ{`q1^_BpluulDRx6}K3RgBo4j#c28WyVqcJ(87~}8qB)i8O-14uJ;B86N
zeDE6PA8sLcH;d7z-%!}GCIK`TmGOp^YoX=6lIk<fAZ2AF>nFFEH}yA!7cVAafAukF
zAbmv>HXOvK+vQ+>E1z~n?qL~lDSR820HvqQK_Cb=m(1FS`o4dttaLXO@SCal6Wc@d
zvxbWgxdxrH+3fI4F?m)!94oBZ&XxoHFk3PeRrRyMdR+riA0z>-faApbM?B;Aoxz!1
zJQ^HhY|ffeQg<#L($<tAkaARMU`%>FTdZnf`$=E9Ln)t4f)A!r(L2UCUp56rqYa>4
z<OhlvPi{Z+yo)D}C8PGt!_jADz;Vg&SO+k<l5scBhGI(P1-9=j8|q9V!3F=)fo+Si
z#FFg>itQ#99xq6BB}HLv7C+rJ8tI3Ls5K;$f_VdNwI50x5{DE2)_5-e)Oz^CK0{Bd
zKIpVC1x4W#z;RCo>{czni4rH&w0lG%wp7F7>#-on?#BhZk0W}!l-OzIid!OWFv@!x
zI;~Bm4W|rgv(G+iagcehZ$?%w2+G728+B0D%KUSiOrgHz8}0N=MYRGk5j`@54vQBg
zenJ~{)89qZiY=&qIOD-zJC5cFD#W&71-O5ogbofTDP{}@trRcpN!tt3QNHLilj(iZ
zei&v{j?%CQe4@1wW0x_;?=msJ-y;fx4;*4UyZs^F*q7v21GMc~hTmr>P%VBRj9A5X
zFCH6*>c4g1d+ucBpO&G0JY&+l=|x;NM4(H=3bu#zn}z)D1`>374Uugsg2Pv?v3_6;
z5x-r@iKK~eOR0?mR4;>-X$bU`Gfv38VrUNDN(B<{a<OtU_vdQ@ocm`1DxX%7VEsyJ
z!aQ*Q#nfWH)mCVGGZwr*odH3`VQL=t4|Qn#mpA!79O`@+({!IPbv8ApZpdbAf2Wae
z?x(PGXFlrGEQ5R5PH5Qw7_=Nd!8oCjP;qDlR6VMJI=|k;UW@JXym1gr7Mj4AG#$*^
z&Guc!i=gsNIvNdFPlR0>Aeual-{%;LRtf%KV)}vFnv^rHvkMi>pHx|6c>(IaOYmXA
zMT}(ny(EHZCrpQw8}`Dm$5#obng;r(n@DyRg6MI!#kIA=P~YweH5hXi;3(sjIGv%P
zk8@z{qQ&T4c^L=n&tf{%U#b<5$2j68*zsKqE~eFxeEk>(2dCl|QjGE$o7g-~#Fw3R
zM(byNuzrFZ-m0<Pufl<R>4s<`STm=xt7bMZtrk^&mx4H>qtbWr2FNH|joR1eK=ZT_
z<VIsQG(}3G`Fk|~`g%5$HHxvv?>h08B!cFL9KJ_QjH4WOV(Yw6B07`J%V!&d=Z<Xp
z`Cta5nOtBTi-VkITLd+$`X5E-8dlTRhT(-&k_sV&5Rwq0n&ZWiBuPafN>+qa2uTP_
zXXzyAKsujGt<E*aTdk6W5JK#*5JCuT+JtX>Km6RTYiBm+81MT$_dOkN|EdCAlWM5V
z_rpuu=HVROQ;_i>n)Doh#YwURVBEeAdwLx}JZ2|N+$n%9el-}Oe-+{MX;eRzLIO4^
zg8ut5qS34f2A$z-WZ?<=E{BNXF=t%8S%e`Ua$y}wgZAGOXnb-!?F+n$I^X6(@LyBZ
z-kORr#$GTs@DNr7yMxnT#!88ZpuSV?kOsq45d0Q#W0VTfRBk)G4|Bkm_2r<sBZ>9A
z*zZ)yW)Pp5P~BttnWXT|#B+2mWMu3C<v%>wCvPNszzB6mT!z<G$64>dfcDiZ!xHOE
zj4)e{>g5r5;CU!mZYibzgt_7E`xlX%U5aTB_n}tbcFxh&8%(d(V&nZx8l**WHtW%7
zeaPWk--)RXY7(x*8GqU_kNv_@_{_Won};Xj^Sc{SMJ|CkhB|1FiXY2{e4|}^7=P+i
zD$0VV^Cu!$rZy&l7`4BpCYS}=hq3$6%v91gOBXiD=b)T^I($o-$y|>MAR!?Od%P6r
zoX=IT=vNe^+?MB~UoaoVH6gKo`H4y{Kczp3B~Is;q2i$LL~+bMu*u!Z?um;)Ao*&f
zcxDeh$M&9*1)8AtVIPR1l&Jc#3-GbRgL$=I(Y$ft(BV=K>|>96N3hP|`(TjOJ>-Pd
z>o~JZxtRGW9eXUZ;F(uCBnp;7&#KczT$NlM<8+dpPm92GcLx>oF=!cR14Ey<<GD|B
zG0mv~zAul3f!-mEDRKq24QBj-6Q1x}KLrKau2p_7%At9I7ikwdfLR;M?oYR-64jfu
zeYOCc?a!dBPYUsDPS!fngS6Qw!27){KYel`s9zfdwdEDib|i$V=+3}nq2A2f{gO8d
zqSUo{1(A)h1>2V_&pF?fru@_(JsQm2Vzi2;v@s5$EErxsO#{(MMcTSE3~X$Yx%M@u
z;ma&`_W0gOqaK*Rt<=RR+5Cf>n!Xs<-7AA%V<w=@N_KX3K0)m_jix&0*9d+IB)6B2
z#*T@K5IXE44%0b;Jyj(@mNCB7r9t51Isv4LpNZgs5cKCy!_ik*CP1<e4c4~Nw%`id
z>%18R4@K1JFk?IhnZUZBBJh26nT*xQWgNjDyz68QYBDMkv%WR}m#u^;=3-uVN-)6v
z;Vf5M2a=v@F#R(C<=F30qS7_;X-yznDjdKqK_l_DI0tgBekTxonaUmAg+Ccv$IC{7
zy`&Dyjs=1Kqvg~t+8HVg^Px{{LraHhqO0Z`qLH?pgseVE`{tj4*lY*<Z=*jde`4I9
z)C!!rJs0I1LSY}9MeFGVqxpZmv~5Nuz~m&Ndol=I_7xMK{aZjhc`zTf)DdhlA9B6!
ziy-8OA=|k;BaNYJaLNpSc$qJOPPu=nko5`ThpUq_567T7%gweq?FG|zD=-;75UYB0
zpfs%!wQa^=V&oW*ZTUvM%PzpnF^kaC{a;>K-^14j*WkLX`RpDs2kcWUz;tdna=r$b
z7shsS0uQWwk%5k5p3=U$<FI;73>M7P1lhq=wC6+~X|?#sRgA6!VEh{U?ptL3f+Xxe
zpANytdHn8=@X0I(mA`$bWB0S!<D3H+ukQ|i&P}lG%~tgC;EB%hgY@j%6iifjN^)!V
zp>SRTH%O?9vg^mV(Wj)~c)pT&%WBbL-XOGA2*WSmbMVcO1vt{l4aWx?;UTk4_}P^0
z(SGLeqTN~il(=dz&m4*xMNLp!a2_I_guvaO8rb<qLORBrfsAV`bG&i_=ydL*0nZyi
zPGKO(35_vK`80cGhJsRIJ_wuJcu9R0LH7i3Ib{rC)+I#c5zDy5vN^K9Gb)y)VA+$A
zAeJA`+ictmN@v*Fi#=ZkPgbGF(wo#}^&*g+9Z!uif^f{0U<`~fM1%PX;A6ph$@$To
zw^lmd)`$ns;Gew8ya?!7yq0ygb}?p)gu<@nFn&`hj<dM}`b$bd)YQp8J66az>TY=9
zUNpYjk&FuQr*VUOG=A*WMK$vTF1@A(z1#)ZzG)rwf0@S|y}QXFm#b*GB#uhG|Karf
z93ZPZ7rLf3QrQOPz{~3*+va-U&ju?ru?~Zxm&fs~N&vPM42R~f1{%Mxg7GjHR{J#F
zriQ1pFv<==>N|$dcyg77#jrDl?-bgyf-zdk;z?NPN{sn031pA=^FG~fq*K8P%2Uix
zR#l5e2M>UiRWh6vT*CaPJl>hCiSuC*^B7+wACF1UMrH$EzqT-D;S)9!drQAXo<~QE
zCSnjf4}|ypsA#B^%ze2A%a#U`BbHau-mHZ_TuGs6MGaQeO~Ua-)6jpsJJu@r!Thds
z04&4j+Ym-n?lz&<UPIjTaR!d)OasM9tY=XCgxB{@hGq?UVpAW<Rcr2L8TCip--q)s
z&z(I#o3p{FZxr>=Uk;tcLqOhx-Opm|AZ4mIwXr?V?|s0!&_=N++^9#|XR}>=7~3IR
zy&)m{7m<RMA827>6(}mJ!scgezAsw?T|e3F>)TpTKAwn*T9ar`?;E1_+JSEPTMb&N
zgV94jpQ=Bx2CD~+aB*=YI=WKOz9o<P-CLP|hKC}DIoKI#1fv4ip~qHdtXh2vh6~EE
z{Y?x6d}5vOuF2>UmPd`Q4<~kGw!x)o<!GMwfyxw5a0RUYSu>o+&aH<)xwwNG*Qlf1
zKRr~%kM$n!dg8mY=BPBJ06OF{z^7Fkwm!FI{6HaW{uu~KAJP!i#mu4iAJ_5V3{`tJ
zmW)a(!**|LqUoAMhHS3G!Y%8uVCoV`xsuA~Jb6tLcf6o2*DT;``Xmh6uYrx1#zWuT
zAyoTa5|Jy}2G2G#enBOh|Lfl1K+_CDFC|0B$YIoKFFXI1&!!=>{w0!acQ~8F^T^Ht
z#VAn9;4OVJNx+85AR2m$h<3*iwVx%tc$HJN)$nw%|58aCSBEkly&ZJkS_9@!S*HBR
zQRog>4Ql!Yd|q!fbO#;){VUGISEZVSNL;|xeLdJ+yhOqd`=b8(QwXEAz;qb}neiVk
z;}4r(-Wrcm&3&Y=$&O_~%!xoyX6Si&0+>Zi!h)JI(s*wNbX%XMfqOjgy~-i<9#aag
zwcki|<tUVnU&HC$eMRjCnZjge)}xK4w0&P2748TmU4ysKf{6>U?EWn9Wh@Zc4jFBn
zaFs52<$@WTZW7Z4w`j92^Pe;3;ZJ!f*!U{IAEl$1(KZ-ER{o?>hmts<mnolEI1{x`
zu$|bmT%!1-noQrva)cS5jdET$fYItX#N)^nmPPePN$^T&ow5riO~}IcFDv0*Ryvrh
zD~G;-jZ|cx$;%z+q8t9K!anscq-x7b9ACH}9T%{yWT`n{<uR1Fx5wbau6j7Sp$04f
znWMmmmm^Z@xweZ6oSztn3QIA^bQi{6&j4Z7Y$|zmk()S%W#9a-K-;!U&>H@i+tyGG
z2hS(r&NmI<rF4;QUX}(aLmm@-lT`c`T7`aeFn;k|h0aZ3=y@=Zb<^62Sapu^rtj*w
zWDDbkHTCdwo<+mnoMia9A`apo1j3#lfVm6LqD62r>)eh954QhPT%3(IIzw?=Z31*v
zsgmZe8bq*Y0$8pMBJE#W$fmK(weeDiPV0+B>F!6=u3inMhqD~mJ!>d%_=gycN`RjM
zsi<F^#1Foki;riWMFR~U-&8uHKbyS-HAZ8|s?8w%Rm1%$TZmx}jnMUQ6P?0X%{_yS
z!Sl)rxRDe9J~s+*ob^#u3=AV3Q?oH*Z4A1!&w%bb3nAr~JExcEhgL7oL(dycY7VD?
zZB1ast4M5ld7XH!yHB+SW^rM&qN&|V6Ob+mqyPLojCDg3@MYb3=uOUoamF#QFgyY~
z*v&$4T#YzRuf^upVCqts2(rwhT;KB|_>@%v1*5e=Qn4BYiw+s9tjvNn6VG7o=JS~K
zM~r!UkKjkMQ7E##K{BrCfa2YKFqUQ6IdL_7D9>P?j4YVqTLaSL&x|Dr+o5{<Sv;6I
z7a!X@;a*pk=Q@0wzF8Ov!Mmf;cegRv?PlMkmw_x3wE%V2%>?O;^ITwZEX!FKfb8ZB
zF2vCh3gv3C({mFB*2JO(`zCwlv3uL~<)Ab1ILTDqioTOCG5${@#fV^dcrqGf8yTle
zItC8dBtdI+C#mtu!e5g;Q6?EjmFkP3b9FvGDJX{M=*v*Hc@+NBZI92k`T^eZMHQbC
z)SgpDLH;&vnap~9PZksV>+`5Nb9_EH6pd{PtHGYRjZb^z;h&;YFmR$D*n~EL)5=Sz
z_jD*kY24)-hdieu-*J3L-5OAK$OXBgYsC4@NlbgpvM@tWk^Vew#?z}}yUi@rH4jC#
zL^+oIk;0SC1hhC@22ItBjqvg`w5na^oAri)Wy?d_dqx#(&ZU#qhlXHhG=X(MuMxW?
zGtg~iEH=E*z_t+~<XvGNhCCPv3M1yD{9Hd!G@3=Fxo@d(r~)*sI)~}thmEU_Vg0=v
z>?;VQLeXY^`G+#JpLm)kF5iytb6BsEFE@_JVJ`Cb7m0phB+1f9L+6=AsJ*J5JoYg~
zpWSKnde=_ui%O+YWpP|B%N|d0U4}yEA-tp^3{}=Kp5nIeyl$v19?+?T{*ePgFz`KJ
zawr8|pPa&2<roxBH^wKwMxp2V9dz8`Y;5%|g=Uul#NbapoS)~2`lSuD{}LswCzI$z
z1;A(Bir6>SkXXLi1E2D)z^(2*sPXYTNinSD?FG!Y*%pY62Quk0t{z>JY)F%gLP7U^
zI`Bm$be5QduQSVd#N~m{-jxtls7-rsDS`0J0r0X2hO)IcNx;-<(&V`p1IScZGUN(~
zj~(P98ssoV-GNJ*#P~yl{;OUXyc6}5RzS+zwOl;w4WE!tMmhiakYdbuVJ4AqL?s=S
z7wDpoz?s@jIsvk+lQ^}Or-XB5?(*M5P@Z*HMu#DUt!cm>g`4#EmL(`ysLx4$dDHr~
zN)#UUq5{2kz9#Mf>j&<HkcAWIPH7<ctk{9Z1?MoJe=2kBg%YcSPMDC+_AY%pK>5fI
zDu_B@tg<c}Yz#SYWqxk$8<|jYmHApqzEJg7^{~k`14TQAa*8a&p2OI;u45-r^*yXp
zQdIy$UfW?vsXbKhFhptOdQM`S2b(W$N0*C1&|7YT2c||~^XqizIzEYtiD`BJpJS*u
zH5g8Qsld;5ny9ujh!-XvgWh+hpcC&(7X00hg5ZV5o2`%F2CF=9Wn3p=R5ow$%MWrU
z*+Q7E0G!@44$@cOD$%WSFz<4sy{^%;X-Ey^-?@zXx$9A+&4E{^8aVzo$4@Us?3=j-
zv^5W7S#%h17VI27w4K_F-AxUaIzf``GUSxS5b=i`;}p%;+{;V}gl=fWKJQ)Fe!Ypz
zImfa{wqdZOCku4xSsdSW0?}<6bS-m3n_AZYiuEDm?EKK{U^SIaqsD6eQ^9qu70ZE-
z2bfAhoN|mh_T@w8ql2KG9m+Va6=?oq9_=Z=43;A+q4D_x+P~o=Rc@-JJ_Cn={rf~Z
zdG0Q3O^)NEb{R8naxv)4Rs>wpN@SMHK@-?~up<|>2KaMo=}N@+-D4tpz}P3j&&Y4L
z33zYiMX)d044>30pk-nm^s_!!eDpXluX#iy(|tfTz=9MGEMWW59cX{Vn)oD8h@Bn}
zBgXH*u95NBHE9hf-c~|QBV&*+$cGRX9FJ;=0_|Almwu1~RZA7fhi5@}s9Y0;4ypY2
zRVAqW;~?AL7i0C)Qq(lv3yStG5PGi|b+%ujex1wFK<@;c9-xY)>SHlvwFB5_F<%Tl
zN;@yOgU7aDR9l`+oA0`S;QQe!;RcrF8Xynx`ph$;H<{R%Dv)tQv!P?91l0;mS#D67
z#IIAM3OSR}XP*ucs(8_^$bAr2bDzZ52jKg^8qB$Oj8h*S3G%ZBVN^{GxPPz4_d27{
z+4dsZ+^HgcS_aIMkVZW}HA3MsWsLf|4?Ih55)H}|joDkM=3PCuf0m<Xi<?mVCLW$V
zZ$zoMfuyl+yvD_9=8Tz2ceY3wi%x;G7<#bIb}3hNz6rzVMi`Zu!k%vn&?;TbcZJu0
zmcuSSCfytD-Q#HEoT-pCi?No6{^8=^rh^W*80>T!pw{Ffc!oM)zuRCq{wf<KKR=LW
zQ8G2^DZm$bhj4bP4+c2o(>Yhdam3D2@X3}VZ`9ZvCv7`7^Z7_r*J&U-R8{b8mL+zW
z-=T^B$w*)6SK7Bnn+V>FBUL|sQLS_~r^-sLUL#+H_IFZIH2yJfekTdkIF>!WoC2jq
z`6vx$GYA($T54R50=B1Xoe{<tJXiv$>}UUipM);c#=&hbb_P0<1X1yf=d6~&&u5Gd
zo7QM_JoSpSOLSoVpICU@!(5oVMw8`A9KJd;1C3^RlP={8pm?m1rah>H?^Y}~ap(XU
z>mGz573Wd<`!EPxbgEltmT^Z!+Yo1_k$~;zAZ!lH<8RO-;`}8b&<d}v7?*|_1!^ER
z+g;siluHxyS>9OJj1$Z#qS}%KI@PQRb!HwXig!$jnm4lCodQS;C-a(&smmXXfpzmL
z;nU9)kQBu5;^*1b?HWxusKE@whA)IR&<4ek=RwB&zaA$UpENNPd}9YQmedTsip`nL
zPj96EcbX;gjMq74E;L)7|9?gc1^2DNCG<HFjt>X1;~-A-vWh>`kEmWY7UsK#!Sh3m
zZ}ZP&xawz(iUHH9SnY82!(WF`^zaMm^0`I1_BedQI^pJH&p^fW6zCgu5K{QP)c(L=
zVq$k5)H9MWWnU$VRaH1g$q?A*xq&gw%6N;@tn+f{G4-j=g0IaiBU1B@inouVg}0cq
zZE!j1d2B<vdr~0dQ4)!LAc3e+{upsK3x=O_z<;ut`(uqK+PLvVBp$;Z+7`}O6hl$^
z&=G@J#z!{f2p982fMUH88b53nm1LBX5S>5}UQdBTpL1}a$yrRfrofv9XtF+3Iq(}M
zVU>OZ(eGy&j8}P>GH@d?ALt1s{S^>3^A1<Ayp?&_135q2<=7t?jNex*zy~|ooww&L
zk!Zi6;!o<lX<;B#)y@U=HGir2OEPAD4#$Qv#+9>}NoEE3V~)F&>=;*$dykr9{DGrn
zz~gj$sj&*ft}Fx3&dIQFjXJ*hoQZ$rPJ!`eZ}jM?2j7k1L`=Q8oLR>~&N~sx7Bf#N
zlt96x^XL)ui%RY-#M^A{VsDj0e->Xrqjf@%#$~cxR{`1u6Ofr%aX3E#q#6O7?SpuR
z4@m*_uxG?b-I+EY`AO=|I-vBAfPUQ-i&atq6;=;p?$Q`u6aP?I?@7>!h{3Gyj3f3l
zxLVyZ8|M|IF|JP#`F<@5<jT%Lzle1d#>@cME-&;F8bP9SHp!jDSe^zg)mpcHk>*cc
zwAJ(s*WMgW?R#?2FFg}#N3nm0r>m&q6gB8j%_goNCPLQ^7Z~><88TKzfb~`zj31=}
zVWUQX-`W(^5S3G%E6Uh7cL)Ud@gSFyO;?RNiG52QY3sjE^nObWYJU%<UVm&Ms)OxT
zr-Y-IZ6PsNP^QgsZ%A5v4Yb@i&GMyJsba}bYM?e0Q#>m{yx+81<IZc+UTg>t6AID5
zzz*6r{zIp%D}d_$EqLMdR>p`pO5d9c&_(Ykcq}EDVj$r>RF^^gk8Wxs&?H7)eXMUC
z$a@+Jsns<ef9#CHRuy*|YLbrvUu&5Dj&&eLJS6B=09v-!X+d%~?b+DK`y~!Wxo8R5
zzD$T!q3nCXvUg8bHe%XxMbxSPOf82gLeN+?-!B~x%G=I>d5#%LA$z$u)+Hb~##Q&-
zTuBV_qNvQzn3Gm-=b~~a)6%cIaYIZJT7;*8TPEX;1cXBCtB+*Q(n7T1ZK&Y45X8FC
z#$JIpY5%bhXxo6$Aq~e*b@gDCJslrvp2BvGOtSl;n9VomfnA&hxSGVH-J&ZP;I4|>
z3gofRG8q*8-V^>_4YVE_2^s-mu<K?i#&Zivz={Hh`m%$Bvbl7c`B?~=7fuEJ>|Qxp
zLe~v-MbY&?r1yj^02e_sPU%8X=PJx$yfT|76<B4khUk4b1oMwD&c^aq<GXDh=$T$c
zjm+6>p50f!Mx2AL`-PNqMQE!Ir(LT=pcPvVD&8q@Ln8$JW}U*H?w6pg&l-Iwl04&V
z=4318Tn%@@8F@$MFc}QOY4%`lVZ~T39rQ8FehD*g(4UnoQ+MwJiE5$r&AqGOrRE2&
z9a*S0MuQ4>iMW8d_9)DdfS$OE{*hf|T8T>P7;_qO9DG3-`pLK~ow3&@AbpnBfDM+h
zaHC=;N}4wjyX%p3@wy7!ziB3R-P}tOpHG9>nTRF*?Emj>fpI(g`9|h7pq=7i^z5jI
zu!CJxx-FScF)QRASX{)GH7t|Sx{u4C-4wi7cU_a6ZSPM8*X@eLg=N`oRI|v98>cYQ
z+z%DrQ%t%z5H)JDh)L`i{CO)IzQ>(K7yDY6V^fcQ=DF}|=M=<Cm+0oSMJQxklb*MW
z**=icsP2O#XWV_F-GQX_KqYr#&03tbxEf<$vGe~HdGM}Yh>C8vNd)hYVI3v3Bb;S5
z-Hzk81~!+g3B%fIe-KFP7s}~gqF)<paQvpZ*e0lgnU&t?_+$WBbg)@WfISo(sKHOi
z5}=l`LMMf<!<3{M5HNNI>Z|8sfwCf0HQExJ!BT4Ztq$y8#lffjF}QfO3QFgiW61cq
zkUKI5BSuIWGt>a$N1g?vX4cbax&ZR)S#BwdWtw;2Ctd|3@Egl8+zzcr&*>{cnPmr!
z>Mp~g^9H!-S~luWn2iH=rQ)9pbufFycI<vN4LntmU(5F6H~g<+l3h9W?F^!#^g+CI
zTmo1-R$`1<33Jg60nvs*bV}TM^okn}9wDjF-T07*efq2C3?GDQF`L;L?<t9ri$+D)
zB<6P;3wry<z{BlIXuA0<3d5K3X-8Q1(0&DZvnd^Jh}pZKGmH0R{b9c;64bx+miXwN
zq+ZixM4TkybTaos>-QU6&h2C>n>`4+Plx03SvGiLc@fLAb<>!|vmr641oVS7Amn}-
zh#oxRg_+{&mL@+Mbp9arRZRtZt+h0wN{m?<(=qG}k48h{sEyfnkRK3%DIJfAxoRbP
z{!zqXFKTg7HtW;<b)suuP&C`<z<i(suw+jY{OHI=kz5MBo3;}}Jhs#3k#0m|xh8B_
zbQYdX^1#2!QVjSMN*tHPBA1(oYSK6^=htY4%I_qeSIRJF=y~$__E=nHXphy4ldwgp
zgGkJe!GvEaI6ue_Cw*Zaop&rdlTd?6r^R5K?v5d&$B<`iCn|Um0Iegcpz-@+gdQ(w
z!{^jzz!~(HXYY#GU?R&96Va^o(0n$5xtp2Ovh*?MQ92mAtE))HGH-|q%p=lGEOUQ5
zm^XUeNd#XitIpPnm}JQia@@0E{SKBpvep3s^L|PLp3^0l3vpTbNvyDQK+%ds$QdoB
zU1h-}=G+SsZ{tqX@8y!CyVhb>{Z&wsB(WUja%gN%1cMLzvDp}iWx+)vzA_9RZHmFN
z%POEVwwIPiUdGNl8u0pM209TbJbo9>oP^BF?HGU_X)NN~dKRXNa+$9ylNhhNiu##_
z(0El5h8&!QN7Y!)Exy*UXW|&rIdK?!?iGnFu$1)738gM<PZp)Uk!ZcLr3cDbhW?@e
zHk|N>rOG?dk?p7FFAoO0f*lxYkc3WK5DhkQ=;D?PL#F8BOT9dZe>#<ZvSVJAM@R*>
z+QycFc~DiZfE7_!!HWIN1>%tvG1*4oa&91uE8mJb4}(E_Q4<;1;)lXJ%Wz3d61*IK
z45cTB@KcJCakZ=-qt8jegJqjyZMWk6dyKQI&b)nNW`pC=t?=`0B%E+>VDrs(V>?MK
zb__WWT~}9tT3ji4lbXRYn9Nmu<}~_LbP}tnqgb}OiF!6<@~hlWVBhp$8e^1BehJs3
z`E}Mm{@0#rE>!?u)?e^3974UVGoUY5l~^2Kjq&MQQ4)WPTNjcGik}QY@K#o}!)-1$
z{LMq@6gETK5)QosH$mDVMcnwU3Vn2^;2v`!>bQ@B4cC(KXuvt<ld)p%V093^6G5zb
z5;P^TJ;Z-WpnZ^aSNn^(_!o!B#LYPnV}1bKBv%;cKo|Aj<YJCGQqR?9pb?vfS&ubP
z`%xrBnJs}gvSaA7GoO0*v3yVs$8%j*p|$)Nn%|)GW<eER={H8zz5duW@G4|D9EGd}
zjO~|^hsqr#L?&ziakN@>?!RUDaZe>~wNAmDk5^g0>|^!ke~;n;$5hr`x&%{)M4)1O
z6|sMt2$0fAP3n_z*7pr4iXIMn=AY=NW$~c6Rspr-E>aIeJLum(5elTCM7S`O+P93s
zZ&@)o?rtpDg&YNO_#pgo-VEhV4Mp?!L*d7V8q`VJLHTJ#;MW-p;=!ua<EuByt(^dC
z&afW4WgsW{ua_v_6@%VU0f?LCa4|E7fYz_4+^bS`+;A=ow$&$sj}hYmu24YbtSU%|
zuEtU9T->djLaT<wlfLzognsphfG9ICP$&d7%dNCG*A;v#=R&8KAGG@4CW0IDjWm;P
z(SB29s7@B5QSdMf6|F@5y$e7ssgf()mJM1d&P1xd1Vo<J&{O|{>v(mKD6OkR?V9&g
z!(<&PQ<?&uza(_=_<A&(b_s;X|8kQO^RPSmEcyM+5WhFtqyDrD)G~V@#-ASrau;ey
zYg{JkwP{1iE9TJ7C;<C22@vd(j^RsfFr*<CjC6mHsQV^hdajVz|6zBWiLsoSLJ~~e
zc?#lXn;DZ)j%0j^hZMQryujoK@zL21T2~HYNX$Cs&Mt<R`j<fSF>_=a?}uj!r|@25
z9AsE|W3yQ}mEu>@XjTf@EKk|~F&wr1bLkA0@y)qM(L)+T%C7xGTX@#%_KYE4Wz2uO
zG?v^O9tWPzU1X9On{CWv`#|kR*g8uWdm`+iM|A*^K`$qsbe$)mC-BX3c2~^U0?(hC
zU|$^TFh}1a;{z5kzk5INEOmqwI|0eGQpFf)InfV|<n(6jhNyQb__1Fduh>*z^NuE*
z-Le!V^RDr2uZwBd*^g8|ET5iR;Exv{g`w9e2e3cKSmP5z;KT%fOms-aQ`$M$k#U{K
zJBwj5>u3oqEvp`#kHUw}0<6jz4=OEZ(Q^RfDg`u9C&ywmRT&JTh&q<J`^FcTF`iT#
z;afFKnE!VV4LKA^A|E!P8XwC!6$U_`Nhmvuv{$!@CNn;`8&qk3r0Nec@tVSR+&i`e
z=3nE%5q3fMm^q+fD37w&BY2mcVf0=L^MdospeKJc>5oR#-zksVo)xfr@hLQDEymuK
zdsNUjquNz0BHOZ3AnI`zZ(jSEsQ$yet=;MT78hOA=#3<jkM1Zv_J~NM&vL11&KN$n
z5*t`gwfn?cWK4e2TbKhLquG0p#t}o~e4LVa8p_&^0j8XVn7(XK^ZiUhqFz&T-DO~Z
zVLg1+_5)uH9;djmS--0){Ea9@apxY=W%Q0Lc%Fx{p&PiQ&5P0JT|CXp8;tKug3#5o
z7PVea;<YVviKtA>>nx4~iO7?-%UMz(J~h5xScK`v%24=ZHHiPLg%>g}6w6}xk`MVH
zzVMw7+1WrA8D4;@y~?0+sT5_Ob*XOEVZ37#&D?~^pp&%|J}zXw@8zs}<2DNF><Qa_
zd7<W~(@^T<g&7an9yKPEjJ{QeEh}rleo;B~81sP&mF?L4yP;a}nw8^+T46y$6n3jr
zg2$>3@=umKn(Iy^VW*o&&!q@na<!C~tz>TNlYfo9S<k89V>QW<lfuOIA}F{P1Tv!u
z)KV}3?q1x5eeEkrz`%GgVc)r?Y{u>OHv*GQ$;?H@L5SlF;<KU<n%1-IS>pg|6mSx+
zJ9)FbaUeFVzlef61|VVc$YUQ|(Eg?v%wyf4`nMC>C>3({%FKiQi7|?lY-!=sP{>-r
z_=M9Q5Lb(OVsl^%2&`l159S{(WIfpq*ASRtj@apUn<zU>1)m+m0MZA7*oLE)^5wAi
zUO5QuxA4sqPJ>Z-D9EQ?1`Vw>q=;iZ)e2Q?OUMSn(Z6)eP(93fsza3r7r@^g=GZbW
z6ohNDIXV9jkd&yvmo<^tm>3HO`qc1b0PB+{jVDK0r?M)2FS`@2#S}pfOdKDHwS8$I
zw)@NXDO-b5Kn(6;d6jb$W}))@q1fq~NA(|Na8mbEynXZpFi02;UAd#6(VVdih1TS`
zU>eH(aRt~q6sD%{!kC2P5Wu<w3ynvjOmL5z@5gvs@oaBzqD-nLUL{t2@vK*%50Vqd
zXw}F-G@YUZel8awY@IUXls3}lzIbdt9mn$UfyS<DCV|$o$s{N1IEWQbR0oY^8Cf55
zwAPBk5wmijo$*W-eJ=&G)>2e@mkMDmG1TTk4~G*oQDSEUW&H`TL_Qu&ZF8~rIeQk9
zZ;))e6KE>Q2JItmtlQ7>W5tiToP-6iy*veVr?{j4s<rrry|Xfc*siFgkGLFuN$eh9
zpjy+8;X0Ou6jif6M$jgHwsI-Ujuw;eT3JvfNQYOwh_WTIRG@ka?^qaP%vJ<gw*eZ4
zH-eg`78UFWH}Wa=A@gI)(RB9|(3rE8>Z#o)r=#o8AwLuQ{GU+E@uNYm*9-(#2Jk^;
zwcyL{j~TzSnN!is2sMwPt5+@<#5NK=M)K_X*95*cHiGo+Rgx;n$7nkW%>j)t{Ej<z
zo>Kvf5q21{Cmt=`(oj+41=5@Y@aJ<SNdNoBi(S~9TDF%OjTsDPlY+qG5X-A=U&48+
z8W8)tYf!4a4Bm#E#6RVUXnlJmHirq&QM&*Vn*;GH^FQdntK*~o8%(ku_(P5)gQl3R
z;No|jAx&EutE<qRPQ7~>-Oe&@=m}la+am`iwuRVmYdX5r#6iyBQ1o#vM?2|YT+qJ?
zr`Mjt(4Q=`RHFi&X^TOEwseO=ChFvk26fX{)J?7u^PXjby-f@8jE;gWjD=r{6r+62
zK=f-EivMe&K1LsjT_>A8-kF1f-glh%%nr=yUO?<mW<a~_7}4;pL%~OXqn!8Juw;2U
z2&48<>qVC6;xif~`|j|oeHO8~A7jLXQpmU%fgLKPRJ~M6RK@A&ad9QY*j=DD2ey*P
z)m!m_pbq0B#*vry%va=ZO0yG~kGOIP(Vtb1b3B;md&>;|VX_elY)%8uo;{^kO{gB=
zfm;k&Pw}n~_$;%4BlWsyzdaN!^jQYyGlAvo9hemyfJSBYw8tR<M8nfK<+V46Sa1on
zg#YsHlf$sp&5U#%)Ta3g)7Z|7v4v;ra6+~J=&pC6*gwOMdYy9vvF9b0W!wl#-GvZ-
zOTsu(@m!uC%VjUQOAXvO$QYUk%FZ*P?|CH*XWxT>pOXML9K*(te`(w7Pt<JjVYE5)
zf!ls39(y9QsAeJS2ddR#l<^1NK!xoLmR(@XeLqg=cs&Z0H}Lwy&f$}R;h^v&9Qy)t
zSy#~;445&{^w<i}cH0H9M%yv-avm1U3ZZk9C*a1$D3mQ*ie5UqAvA<_bZ%SFdoh`)
zQSyhx$BIEZ@&sJ5Wb@{*Bs3Ue3L|C}ptLQyT7%6jwRG9<gUem&rB{!ia@qG_Qz+Y`
zI6&iQWzY~IX=gilrTqgjXLu&Y$N+2tO3}P{H+52BjQ;COiNPn9SKZc6^<FlC@*&3E
zT5uG!-tOVb+U^tKvm&Y`&)fy0CHVL~^E+m3f+cS+;G3>IP+H`U6-{}VxwHYLy%V4_
zyb%Pd3uwlJJTNYgMWf;WkPw+BbEDtn#Uln859vLQp_f_Lew7Dorjt<cev+}lD#r5b
z@`9tQhoboX2(Gifo~$pMj(u9H;2O4!dVW7jyu$_9QgfSV*l#17i`f41kR$v)$)oD6
zEVO?*kw&fDOP0%O@O5(t=Is2B%rp)|o3**z?6_<cG`uqk8|Vri4$M@vJ{y+WIAcWb
zIlMs{AjPSkjEFEsasjbOs>^yiFO7c<%E8lH7h~47xopN>$LU={(Emm;eoPvWwp!+p
zJx8T;J4r+1E_`QQjuWp0z(=J}Y!BH9ka`FV;!I#gXFUGM@I}P|JHYXe1bytkl7RQI
zw6kFk`;IDt^yXsP*LfM#mZgJq%zZB5a~_(${z1)DB0)A+k<7lpylgLP;hSPQ4%~ea
z1UD8y&yf*SZS@EK$+q2?c#dTgtDULW)>b+$oVhah>JxF<Z$3VynDnzVoWWY=OZVA`
z0!e?R$Sj`^S+gC$gLw&EHbVOqHv3<*i7X6v#<KUhq;2w6@Db00=YRKNUv~)={PU92
zS-gw3=KbbOel>#4g{7o#!f=|ncL6#Ll|xxRg-wGP&w~97d!8Jl&lQJbU~@EXJHY<!
zTT+cj`xm0NqdfcxAB;DqCW5FT1(l0OL2J=Pss=B32+F6f3d+Rbl{<Jl`D2P>7rhX)
z6XmbRfc~2Yq;=*9f(NW{>j4f2W+Y<4w*uOg*F!e>#G>B*2paONlE~`WyKagd_8l1r
z6?bc~UwR%)1*~T%K1<rYj-X2cV~t(jhi$hF(e%uA*l=(-rVVCnsgLH^{j3sQ9!_U$
z>0ENCDj7wS<QNCwcvXPt4x0gMf#mXJ0R1eSGCh+qr?X++)14@Hgw5<9z9E9(?VRN6
zNidJgCe6_&X>&;e2%oH?HgDHKjz<9Nzm5jiw;nL?o-4+kt;UKQ%<WtdfyKoK8Ru{c
zsDCNJt{Pq9IvCM<dNC@mZ>NqOwbXZF81|{U()MHt`m_0?)vXkGdS3;PD;&X$mw#x5
zX&y-HCR4eLAvm9JfYyJ<(v*)ysP*I}pEBhT&G=VB!UpzH)j1=vcx5OC9XXF>A53xd
zYUXD;zm6v6u{rl4UkKY;00wzE;5H~8GVfnPvG->pd9s|w4<85mjx77PUlTj6W`d(O
zW4CA%`r~gf>*4g%mIMCmJFN;Gt{16i*CpC%I}&?3tiZO3Ik}Ppp*t-JVAep0KfI5q
zm6qX?bAz$#xDjZ^+@mLCj6Wm}=kiBnVMv=AZZMWW&v?c^*enK3)+O4Xu?bxlzM*Ni
zBC+ehSy(-U<rZpVN&g@P?7xyoMSv_X`<iJmBgpRa(b)5c@`61x$=o$AsAWp|m)c?&
z7`qy$j=qZ1tje$#n@H<}{p1_V^Sa(^rCH8bp=Xecmx({{T@!hhXLG0RH5L#%O$s69
zUrFszPt<<%mo~4-#r?l)*t2n-wtvwlLiJh1^S>{A&+j_05L||g=|wbTWj_fq*$J)N
zhd~$b#VSRH#8GZGp7}q|@s1Moo!o|X%8qDgJsPWq)`RsOZ+y~S2~|UP(s+%J#6`Cb
zhbYux>wGU_y0n2rDX#?eUBjStv<S={D?q3h3^(Ma;-jahQPlQ^-n*`au{GzQbxr`E
z;}y+3c@xp-$s#D|T!W&SPl-S+t#Vv5o25*X(hFnbVA<>8xPRC_^xT(^ehEt0Z?zk|
zBz?4dF5?L*v{A=QU1<3)n!elEh@)&XP&z6c-@l7NeNhP4zkD1N#ASlH*o6$(YK7Vj
zBJliTNCj?(7^~Zhd|>au3s0FJWyLm7T#^iZ_udoJ$Lu`z{iKnN(nCHa+Ml_SGT^6o
z0w(4s5!seV8k121i+<~4Vg3meU%v{HzAozO=nL|30$lE)g(Ah#oZnwh=2Q*@fm;EI
zD*jB2v_f&hoJL%=Jrm7OHKE=7G-_s83*H_YSimx)<8J1&u2=vseS6k8Rlxy&&Ki#e
zub5A2xd%H>U7*sDGvV7k#@xR-4&$Q@A!fNTL<9-ZCG-+>omo#^4)a8%bTK+zFhq&l
ze$aVdjO$*NfLBrn?fWnagssJ(bVUx`Ec~IX-Hy1LP|!>E0O4JI5F~viU%JHjX?8NG
z9iPE^ty{R(RkNX_wE#kefl%W#1$S<_1UWaC!Fz_N)QVWf=`XYd7g~n0r8cC>VIlKA
z93U3D1>jaP0S$U4K;q4HsGY*jV58;v&WCK@yJ-kDeajd%Lw^}3>Ik9JxqyhHw;TWI
zI0vq`7C_&b#ZV&ejMtkMq2u2Is<w|gsQnJW(SLG5dqzFg{40j|32aZA<$|6Ag^d5)
zO>St#flX-#-}r9>i0-FSi#dm|t8NeIPr3m8^Q*u>Y5=`a*QlRmIT##fE{CK-BGcOp
zHX_DqF?WUD_k+>Yf^`$pk8?+VRiMwXT-K>E1Ml8MyjRR-4X(B{bt-cnN?AX6_at;w
ztR%ggZc`gM#<YEV1kAS&U^&H5((hu3F-p0p$apwWo4=C&jVh>r`wOj}&+b5>*0kHc
zoOxGwf@`z`Ni@s^&t<z|M?A{|e+XdNr|*2n!z)CfsR3Tzff(+Z&1UT(=;m@5k3Oeh
zJRuHMPUNB8w7VpNV?D^DS&+xxe}aIAhI8f=WB&R~w9oS(vH)wa|5^{iiN^fqrOd(I
zB?Xtwn(%2wIC?W^g{uKiZ>?8Dd6I-zwnt!4Xbl*B3uXH|wh!Jn2-Xcr0%@)*SF$t^
zMT|uz{(Q08N2LmclN$IPUY>aM^)}{n3<r&WMiZ^q5<ak?606V^p2WtXM>LzohuV`x
z6&4s^_mqmeN6<cn2UIlQ3O-H^!rsbsnsF%!r#q;kI7!D?kW#_-Xmyg!%1dxb1i|ap
zcG%-0;~#eiVyPm_W&e8|hB}8~`(#22&f1{z!K>ty>tyuU#%988B~X2X!>G_J)LWVi
z;K#bNgCfZHg$$dvT?=-mmV=GIh?*Ky!>nTyaos=)a(W{`2em+SNWdTM-H$J8^HF{#
z>pv=m8b@fSgJ|n2_|EduRXsE45qc6+GM9jw&<BK(N2tAgBn<huf%T;%=&7^|L=U@Z
zLl9*%oTD(Js2uDMZXum^L&0ce1T9m3M;l)i5%akw^n#BcY#Stx-P4<CXHN;V6b^x)
zyh^aYl15tPM4-0dG&^?LLcx+~(06Cu;>V3tSkOZjU5){}Z@VER)EYD@ONnCIA!w5e
zMYqx8@!!vEey2JOU(OOUx4IYy<z(YVw#T+y3($C-`8;L^f#vRV%-gk%d3G*<r)DmI
zNhAIGxq-P5L!jX89jbKT0@!U`4&D}FIK?RgH75mOSegVf4wR6Nt4%P_fbA6iT}j>w
zO_}q49+7LD4r0kcUVl2<#r3_Q0_N%~j`l%8_=~FR>N8Pe`E&Yj?Ix70E@!zK_TSbe
z@dEohT=GX}G*~_g{Cn8l<z*J}m=Fs}#$^~V_z1k(YL41%m!K-7ioXA^2Hp*9M6J4W
zU}|6n7nTP?XL>BzxSwLen2Y#!L>fx%6NuocF^Hv6;Iga<yFMHR*ETik+LniwMy2$V
zdpLCOOd>zd?_e{gRI*#L3~kz;^G5T5`iT<3zDgAgR^$**u^oJM^uw2f%(3_GD-s=|
zi1+p(_*j?G<6H&GhRtO&pk-jC%@`J`=_oMC=2EPr^w!Zzd=_27_8bea;!Po5-?Ij@
z!~xjq{|`~S)xmlG_{g_4vUkcdXCkjv54Rsg;grgFNSu&GW4>>IF4hU^^?yPti}i4p
z0b@}_{y*dAi?U7y;@WnDV$oV~{qHpyziSoZ+Gr5E8W4VwGa5~0oJlx|K21}wa-#<8
zR{DW5>!@`KqDaev5c+jSGT5ZeC03iA@ODoc$U-LbQm(T)syCZUZZpQ|s`VgH-)7ux
zxD1whB{HXOCj6~S!V?&YQGI8?wm}mwoH_+f`_yq6<A1;Dip8`g4RCTJ^F1aNquZBy
zZ0}?2!P+(O#3dSfl-SuS_JXn2=T36LDHVhj?j)n*A~lGW1D91f)c5@=T$CRL%~z9X
z)}za4o)ihDhic&Akv%Ak90Ag~J;ZdTCsw_)hg<OnF+wf@B}WmO2OpsErhTNxX$-XL
zs6gYBHJCVICj3ZaZd-#?=%18Ijgsbr`lBn5vMU%v?&V{9vLapm@(?=SWf^&gaPXMF
z24tU^`z3ZTnZtM-D%Q-6bT|`p@-~6im%jv8C&C50LHL(tYi+J%pu=;<UobI7`;H0Z
z2D@_`DqlvyIL<g{j1=PhoDjxl5Mj;~5`XV7T{3zuO6{spG+_g8=zSQ2ek)>y^eXn3
zGhS3(C3Hv41md_Ii^8h#T0dhxZk&TUtD31Ac|&t97m}X+F?<KhkzF`;3U4G8VV`aQ
z9JQ@PlUwzWy1kB_v8rLd&sxmUi^Rc}J{W&|8I={E;>r#NLuXzbs7W-LKemRmXD-yR
zmO7Rj8cMdlFGr2eJSu*>imNJFPkbEAz^ICG=L%dw^p+#73uF1t6^o(w%WPOv7=wR5
z`QewuR7_UON1MT6_&uuuIzAtOfXkn$OvL7s_vEpx*AlySOa;e_L*RCjIeN`#B*Mfa
zbnfs4*jf<*EwMGIai)R(eBg~;Z(fn>S2v(y;x-8DE<w#ITjq~WqkV$4w5Pq0#}Jkq
zmgE=<EQ&b6`kBT#>5IYV`A!-&UJYaq;!w?ICBl(B-0E-uW$uO`6^(=GV|`G+a0_kM
zRKk6)_h6WlDcHES@)M@aN6ktXh;NZVqhdL839RMJcW@}O7zAgWN1*;nmS10{h=(f6
zQM2eYb2tc5<A@lPA9v6YjbvzCRD>zNH;|m6-b5?+D+w*ILBSA&+|gxtJ8}uO_h@6+
z$$>Ov%{Gu5<qQIyCh#g&gaP;J@MXmrs5H6ExC#_b)CGX!P)jiV7C<c4G=gwInz4^8
zhqg_g53>3qPIeyot<koq!#d_(A5x)<Wt0A&QC^&Om2r!XqWzRp<eJuAG&0ttr@WHz
ztKR}l>38CScErP+=!+P3s{n5M8l&LEeQLTQAODV@g03lHaOJHJb}S4=ABUM}r1X)x
zY@7xTiDyxCQk8dP+>%XelF?tGmbo<RpnT>|jAsmGn}{OXeKnrk*Hl6KYrzm7uLx0d
z)Bf*w$7Zb!>`Z)$uK%2X@4JFPB>PRa`GlgR;s|-*$$lXl&rw)97$1&Jgl+UH>cJ%{
zvpvX3+p|y<Gy>ws42Lc&6G&OtL3JdDL8O|&Ezw|3QJ-L<rnV6Jw=_b@@fuVc@Gt2p
zVqJ|#EK7c11Z}gvOl=(4`)yh#<ee#FyB0ruFszaJ#E-+0!a9h+e8A)i;;QFN1v+|$
zTDEbVmsbK*rTrm|OL^wN$wT#w1kx+FkNNG}iO9Br>YQIfQWjspsF$m0+F46fe&hg_
zWGFOg9YoLdwlv`3cDQF73F7giKt(YeqfGuKQN9mp$=>s57fwN0R!IHsWJBBMC~OgJ
z1p5umq~}XL>&GF;r_@1|N*&ibstA1c?V~*<ceqZ+l`wnKW)#+0Fm{MBvx4eDUT`9o
zGY0RX^fLT&Jr&BL)2ZeQ4>-9@iuZCP?2h}Ry8Y}_>S5bRi?<xX%?6gJHldGrT+cxL
zei6(Toj@P=MYQeSV2Ik2Lxg&9r0bL-fy2&F*m@q)<ch%KQYMO<o>vRdjMLh7o%don
zNWI2W>^${@H>%KrIqT1YDBwHwsAf#*x3PS{p%Ku%dLsCG=F%RnpLU&C2p<B(*x;>-
zdl<K&b0I-*^GL{;tOSBTYE{<l5)8;XOU0Mo(5|X!u=#ug>{#J};$5piyuPqHqbwPf
z&8mo|g%89xpMtU+8<c&i=S-hQLT}+kh#7H)czwD<QwV#XTq)$$5C5fop*!G-ay{nc
z?}e!f)~J!<LJW>Xu{})^D6v2KjYr7kxFgs;;|Z~MT~72I+5KSt8`|$yK}Fsl`Tq81
zipsmeq~SE<=WPd>oINq$pa5wGEN^~Pg%}J;AX>WSB=k=sYK{tk9kcz|yrvM2ysyFi
z_X2Ta^hzAGp@!Z4nQz3!8_aV{LHT_;+tHtc&i?{v;oBgT=^r9$jcH(UITQNkXW*ZO
zn=oaP77q0-L2~pUT2<w<dD<X&k$D<F$L&THejWB@mcwD|DExXZ0?~=x;n+D|&e9NC
z8>dr&leMwOiwPhRx<kxF)>}QZ0u?7EVZz%C)Y+SY^V}Lx?Z#qiGg%GPwApu|GKG2+
z2|?>!3RmEy%$O+&=wdY(oM;p%%U94Zvvkl+cnK~Pvt5VQaLiC23bL>o5>;+RzrJO=
zQJK`({>2TdUXj7}MW?CGaWQ))GKttWtNKa#IyN(v2VaK+V5uENt&Z~ScXT#{EmVXX
z4HB^IG{>0YXGHy06tLSW4Os+KqkJVyE|Frx>r0Te&>#I|ti#0Rq2}cou&ta2;rR_P
z;#?4WMQ|Ygt4{Q1`$3A@df>M=LBI@oXnH;v7b|+;_k~+=L)lp{I~avO|1>ZUOA_6C
z^8^b1aWZx+`AA!T8!#@iDr#gVf{}+N+4FQUHh=b_9TvkFtM-7gm#Y#??5s!Sb<3co
zRS7?~X`)dWhb>lLXh^|iuyeY~y7$9j$vFvXG^#-ZFxNxs43f5BBH9ed=f!twjUjIk
z%wc=JS#mk3HJLF9D@IU*fMg<5curb&A7j0TV<g`8G5O+Ohx2;^U@F_!E5@+Bo%Vm^
zH?m#9L~oF6FJhjq82Trjb;dK}FhR))^`i4JXC}+s_Mbwp8za~msgM5j<>B_56u7FF
zj*B^Kmh~>IUbFlvw%<AntBjHvH~3t2^Xd^W-Il$pMm^_6OIFd=<#S-M+z31|ow+(@
z`hwK~WG+4C?2csJWrbCEO4S=5cF)C@VOwCj{7y8pI|s>|s!*8I#OVb@vYv4Q_hb4#
zl)bx2+U(EajwXWRVwnHo^>JvI{-I310xp@Oq2rzc3J!gxqJ%5_@T*Cv6+f6*262p+
zU=2cF1+M1dGK?;X!zFbopsB%lZRuCQF}03pCQSf4WoHndlMv~tc;hD?Le$h91AQM3
zVY7udaU58Jj&CF^4>FQAuDeIXG5U~bbqbU-PNSZ2E((N<J7V~X8+e|>=7tg?x~fZ}
zntqX@hxw?ZdI{IDdFR8HOmHn<3mqSdh)rz+Z3#F70aYRplQqVxKJ(Z$p%Q9^EYHb$
zW-VH4A*OyP9<*R*rVB10DQ8R=vjZ4)ialTVMpBS)uw(=CD{iZV#1U4|KY15OH(jsR
za-Kq@g=5LhEu*pQpKvUx8;||w1JU#w2gibEqL)77OfAje+g7E)`AGH*2rJ352njAL
z(!w(7BXXz64s8_0WYNoN5NcjF<_jmI<=0?1p<4pYB@03SpV>q+%>|ps?V=f~3Gjpl
zf$xfOF#5g#{TSzQ!|q`CsZkAY#+}8gmusowtq*iY;5HOF1aP9A8nm<QJ@r)5gqhiq
z_!BQ<%tk3q`{0hkIcNEv(RN@;y|DcT`+I+DVC%(qw0Hju%-m3i9v*CYwfZnSA4kIA
z%$XRt!xa163en^D5Be~p7WVEqjWas-vfe^GU+`!buA|lPtuhaL&LeT+6ENV+Mkx3Y
zg`Wx|(dlU!?2M^H-`TO)wkDO>u<yK<l^ThkvlGPQZdW_qxd3JC{_Bxq4I77qqU<pK
zkE64Xi*ft<@W@+8l1dUn5|Sj;?1hkokaQA~K?orvNf?#hB%{*1Oz%l)R8sd|GExbl
zLkMv~2qA<Jp7neF_c_O8y6?T$`hKsAyB3ZF!;9r$-BVp`6zq%7K1Sg$?RcDeaTfhQ
zOu+Tj9#D37;EAedu|~6)cePrN^>t@?%i{;kT@k?z+KAIJCkic22V#dEIo*~QgISVX
zklDSVE<!o0aGZmCc2uE<P5_fNZ-BA%8KqaB@tgyTSnHX+AnO_}Zqxb5O<E1X-Q5<W
zJ_VrU&vGGv^198%ltW=^Od0o?c4{w#mQ&kdqO}9LLO(Er*RupEWO1XN>F{(_Ja`l1
zLj082KudJ6h0lcu>tFowg+<syZz{?{>%>#3$0@t11#GSoW9J&9%k|6bp5HQTI9&oU
zr-^IlFb_QWIFzs1Db9E^4{S!x1<m?msC-I%gFG)dbD$J2J&VVN`|o&*c5jgVl;WM^
znP^}*O{kad2Y*K&(6D_d&fBQ~h$QyPk1O<irR@AcFU%W%5uV!)$Bdl=!DXI|{D)VW
zM&>JVbLcT%X?c;Sh0^`3??ABm${@KogIOM1%im5v2QE)lP&IQmEWK`uO~F@~=k--+
zcvb-A!7!|o&www(_F%ZT9IGW6<e=EX989&L;}i9a^{0R|^$TD7xe{cVsp6Q=UMP`n
zgV5jI!MtlJTpXK-+pg_I-%B@{eCRH&G&KZ6$tzw|av2TxhQZbf$`YQML!KY%6MANY
zMB^7Ty&iz!McOE`*kWb*%!N3Z<b9mF7S+n4pvRB1AiA4s>G7x%aMB1={jik13QvG-
zPXj^yYcUjA8{&$6=kfFWQpj=fg$;M8vr``|I7<3rYj1M&t$5B3)<>b!%sS?t2I$zc
z7_{n($kmg8_3xIU%iKWbNBhUx`w9q(BtK!T6%1YAgAQsU#(!4e6ysEom2G5NTHYYC
z>@4&8Qp(0uiP0foJM4+K#iN%u;c>5E^8N)dxvM(2UmOR|weqQBw1iJf(<YDRPu~0S
zA<z$HU}drry*|;Ick(89aB2ryy#2xLES5u_w<E|tToC7)=fJ0qQfzG>57K+Pg`tzf
z;HIuQWu&<{^hG;2{Ca`<c$aZ4Wf>$ZH!^*j8r~4^Nqn<~w9lk|-h3&__E&@P_8F|^
z*I%Ajund>oBu+pV%F*b?qm~Wj<NgwFcGV`ZKOD~umK}nYI49;*T!|qrN?egrKz`?P
z<}%#?MJEbNeO)O7XMBJe#9U;PuKJ>Q!e}hUA{Inl1oNTGsZYq6y-yOTyq<w=m3O%B
zl)c~-lnu338ZfXj6%P&`fUS3RAX)b^gi5}%+;I`$YY_<It}}32>IGtezGvq>mZ6vZ
zDj2n*h_Yru&~R%dy!~?wN-edSd@%X7CT)errH6^RWCF71o4MtxM9>@?McLw1h<9I!
zCjT7+>+Opm_r@uF-syr`ig4(5d@E?Hacpr{^0lRh!M}Pg8XTC&jsJ$iTdNqbYoMLa
zRc*1Qj)dpVJ_0ThBkupRnwtty@R{aKetnN%%9+8KR*{LtS<V={DI8kelgWn{#;rpa
zLC5~XY(sY`26Pnw9`?uN=KVBJ-_9%-b;HG>gRnJQ1^jadgSEp%P&|tgG-eHf+Q3L=
z+-nWjRFJDYbU1n}B$n%(3OxOJ8ru7x=Pv4v+(zG?@@~Xr+iAo{-f_d)@AILFKN2D}
zlTmbRT3L`|9gis<4q9I+xBVzZFxceJ`pitmm1W^*Z*!8)AKO7?NFOwawGg|z`+=`(
z1WL+d#hroiyfcm11Wuc{VzLwpO-@1Wuvnlii})B%!Kbvl>UA*<Es}mxmOKjFaxZ~o
z$yjb&H;`H84~F2_Y*4v(1w4vuP~m(Mnms7LZUay{pf8K}ZQ@hwLQw5c9`l$-EUxZd
zustS&r5!nmIhsp&qizDw6CZ!}p{$MjQrKyh4${>cd{_4X+SB^7Z&D=~kLu4wv-(>q
z_OyweswTqG!HY1XEeU)R0<q~*8B5p6N56kALxWl{pW=TGBv+@2^;<MSG{Kn1xE8V&
z*Vim!eLPo+tDyc6bt*;#vjK6*=(RJMZ@7B|nyx#7#+lLFa{o!zUeM09+%6J-TPbd|
z)dkm{v5@zeayr#Ah!20t)c?kT<?2QrKdn3V_`9FH9-GnIVIZ_F(}dGcO;H{&6EyS|
z!IHE6uy_mYiTNk7V$e%ouy{7M7hGq#4n2wWcNER~xPfIe^&{x#`MA^*TYqW+E!kK@
zpK;86${0u_*7l$^N73N)La}k-DlqGLjn^%_&QNv+dR+2En`Z~fN%h34JdwB%^<iAv
z{6Hwx?adsUQn1_S6u_>DAV2qkE0sN&nL`e9Z4bbVDUm#=%RG4WUWTI4v&#}r2?&Gl
zGtuKzED85QT~k%CX;MJzR#Oy)Ps1L6E}~poj3(g#gZ;u#s<lk4arYSa-kZ#Vy4~P2
zf`{O?6Lel)p^sYMgV0$2BzPZL0(Po_%#w28TAPTAbfSql+c;BCV-{HV`ptfQx`M_t
zL%{vJ4obV7DD&`-!iMz)aLS?zt!vMt|8Fnwc2P3r=Mzkn;}1={ZN>6^Zeo{}&)EH@
zZ8#&9GW8Sh3i>O)^TzFkTroU|m+slgr#VGoPGSrM9wruqTN<*=5RBg<1L^fQ?E1L`
z?6~@kDKn3V<HK^G5l*w5Gp3Mhc@XpL7lQJ`3*tL^u{#?n&)_{6ys9=(-gPbh_i-TR
zNdxdhA+dC7Qp+5i26IvDIsRWn91h)b9%j6xvr&BtD}^~=v2Z9Pga)E$+H1><DC!9L
zEd!HD?qGSil_e%rfn;)}I8rYUOzf+m=i6$G58nl?J9Bvi+X>}`biVGh1tYI`qbOpJ
zl~>6Z@#bp^uyaXbUl;kKWFWbf4X&}QCE4KNBBdPXB>34|N}nSHrw=a!wZ9u+p`ki;
z>)x?Ol_$K!g8tm`eoVHP&P^{bgWc+pOgi8_`@CEZqPmDubvNP_^#6zFRD9xiO^Kx^
zeIRhoM6|!70yS$KK<nOf)=ZtFDfT7s`TKF`y#Ipbwjah~+f?-KHxz>hhr#OjGE{4d
zVrG%%&}Eks%5o-vuVX7yOp=3S-Csd^v$bs8ZW|2#F#;{0$grVt0ep_S0?)q&qU^pk
zZpJteg>{s^5J%yGeM(~7+n0?j-iIpo!B{h=7x8~0AZzGxNXv);_i2^Tbn=M!fBP5T
ziD_K^qEu|qvVqkXZbh~E+o9zc{keNGx@%a&(856U%|3+sxvl(9dj$2fUb3JYflR6$
z!0RpqqdT!I+$IG=?N2*|>N8lkTrLj1@4)c9KEyk1ft&_&-lm<1p;j|^ho(Q)-dqE^
z-TlC6f)>m%9*xpn23D@Wj&j{+TbTazKRo&iWs{#JL(I!^$j>H5-R}ErKu{?9uP^3y
zz6J1|*gbL=Q+QG7j2ch1;hTtN+FQ%8)YuIgmSupV+d1gCxClhakr&MNE~75pL>@n6
z5BCaQ1vl#kVCgnPxHFJuLZ8B!gVRN!?XVj-yo!fzU*qBIoJ(*$E)U&q7eL33%TPf*
zR*$bv;ILr^3U_iK`JdjDy*UEYL#jbCZW`?m{lTtU52e<t%4(*%v8U(B-FU?g+QVv@
zN5WMmxqDOGm`~0+zpqTaF%`~tp>z2AHC!_1DAetJ&*~m#@?@bG8s<ks@~=dYPS7sX
zs5v7P4?Y8`V|Ibp;Nj#Tp}A2v@*$rpWY!yg3Jw<%nRVa8g4wfba;sA(Y1bkl;@lN9
z5~aaxA5-e39^yIOR>RqJVtWtsA}04C?1)mMY+)A=UHdE45Wlcy_+f;pb1_-(EEsh8
zE*4q6wz7V^735K_ETc~*`nMZFM^qrOTsU}Dlt51GA#8eZhBAhFVx_$qL=0XHr#bC@
z^W4B^cob+SN8ybf!FcV+R5bJc!7Lpe`Q#_bIO>@Ut%jVz2+<;9FqZ-7R?xkP@?lF0
zAh&fKHhtF-D)gvJWE)%dJJSVAj~xM3lYLNI6@{-}rb2Db4k!(8;(auR;~uk1nA0<e
zdA>UUe@vn<sNyI0T}+-;^#W+Y6ej9iROXO81T>ab3+;4fwwvO@o3@03bomNFnsW};
zsGh_OStQha`j2=$jpC?~Dzu4BMnk=i+(p*FGDh`>pMD3?8i>i*FdIJiI)c)Ax|6qC
z!G{P}I$yR}jq{XZ?=zt|%W5D#T67duU;3d@^LZ#u?8m?BxuSeq7?+Twy1YYyqU5e1
zNmmN@Hq*V*fO6ZR<zi8<!{kiSgDD+$IHlzTcoA!EtaCCXdwGIt<TyC}umX#JoaPn#
zdtznMM$j1$LkyCgAg7LBy$yB3AMF9D^R2SjD_LM`M=XY$TgCa4=Ah=4Xt3Tmojc!D
z!Qzr4*0HENYk6D_((;Q|k)zeHEn_dl3kz6t^nGU1Yzd;tcI<lvv3k6^f`!>w;yS0o
z=J}df8IaFpOVzOOE;&&zjfdJL=1@;Pe7gr%xUOM$ycklAzENYi++#JfpI{HxzlNb)
ze;SjN_7bG06=k3QH|I_{Fa9|@11-%~^SX$k(BR|AL+^%xVbw<H$ae<)`NvRnJgT&1
zKE1!%ud)@p<Kg;XFMQfH4xKin!f>rX{H)p?#jmTux$j79TT>2>2g?vsW1wTv6o|F$
zfsW1=n78T$x7Vp+EeFnk<q_(TXqE`2CF7YyHij$cyRRM-4i0&z1&?iexMxT?JXj&d
zO3!kHZMjfl7z7yu@AJw7p*-ZwHdHj+5qqj{04JNi(0V7IrBO!g=}q!!uMg#hLo~s<
zau+KZdJ<B4he6YMa~M=G6f<6vyP;(?nElOX3)YyT(@qNzZJG>0QJ&bgb2h|Wh+;0|
zCUcFOdzsdj9O?yAANFQ4KDwvGhT!Mi^51SOWv&1QYmcI_*%!99bRT-r87t;_ALx>6
zjP4JQv+&ERSU2Pp3!<5RXyYIpbZt7CS+0dUH<sY4qHt99A1FL&bV38$?_v#Wnp<q{
z2UGu9j89Hgqdf5duRCi4SH9?A-H8x#q|;vBJBO<zg@T&XQ!cyK%q*1ec+Sy0R4<WI
zX171sJu8JqRtU!)?ILDp!T;y)3=BpKIpK1qWz@)g2fyLl`j-N3J_Os6tDr+C9^<c!
zfTnpVD4dK(-BE7vCte0Y(~sesBD#0h2eamjRXAZlB~EXQ#o|{UFy+}P2(mj0``<+1
z+p8s@T)+j59v8$r+q0+-7Xd26_n_ITmC$+kC%0KU7rM>f1csN-fmInfd_9U_vL$7F
zrVlJzv;QRej{eQ%*5pc^9|H|3BcRrE9b{4tV#cIgR7iVZw@Wg9t{0s>r+0_@ch+Ib
zv!1ADG7F8>^0|IM1uwoj53YZmi+@jNp?`$|np+cNW!NX4Gu4R8j}&o{b4Y37s&ovx
zO5E3}=U7|)UM8Dj&D#85FbCsQ*0k*mxW2p1JPw%<i>8@5s7451$C#m7&`6lNxF1@^
zSkQNGC*~LHk}F|>xM_+b|La1oi>WT4+!I;mx~LoEnKA6MDV1jNN5Ht-a)d1mg!CfN
zh<YFx1m&WHX25DMuJTfsHfCQG$$X#pC*Q0QzSF*h*!2Y8upkI!33_bSzmw_wZOQu#
z+=qs@%*g?44x6Rss1*PZPkWU+-LF7h74<2WYccu1_qpX;H#oZ@7iDUOU~`Sm(*-f;
zIx-(kA`gP_Gz(m$6PRVe1>Urgvb)lCOlIl9uFI1_+Aj^g?aHuNe=c~ae&u(=$;I`w
zgL|gT0onL$v6n8fx&{Q|+}n95-{^o|D_h0Q(J{<FS^_o28@Wb>1kGv_xG3$j_=gY2
zLa7W(dzEsNFX^C}y8=U}1oKJLY|+lRkn2|U!#lcX!7@fbm!A@5`n3rCtyhDcauXNb
zv}CGQv)HTGt1x(x6!cq4(W@|&PqwEn;>}^;eR?bSf1ZP_fk2&^PCm%c7)uRIS$oVt
zxYvW@gU^$3t#LVI_a8wH!$)Pc@e9HKsFH6UqyPgOZNXyL1zwkVNNiplLcEm~yy?$-
zZnN117j}fB=wfqO-m8ruy;fOf5`2R_`kaTMSB#)KfpWp6)_kL1KMY7ah;Olw9FU9$
zJzfm4bz9N*<s=qddJ;^&o?uT)6JgsS>c4jlhd;kc!7S#j5E{H2V{EFKWcGdjX4*sy
ze-H<C&IN+}&?#}e&lc7;GFzDahxqwH7Wm~2v0eJK@g6owSfi4Up4(5;T|>a2k;HfC
z7Y4rH42Z{l0<M&XW2otFrn}ghNyrCoQ0T%Wa}E)s#|W-j<zdS0C@7T#@u$z^5F8l{
z_jS+1^he2%vA%%k+sm>3XaxMZL5|(Kqrh@j2+TM(934(OqS=`1<n4;4neqVQB_hbG
zf3rKC#qjc@0rqz6inqRuN4U?yctk0A+kc8bybl65zaH56w<}a1vO)K8CwScg4K6a!
z0xdc}bo+Y*9eN64Y3f0I^h6&=*Uv|%;B<C>;|}zU?@LZHGxT>^3=T)!;BA(S^8R+b
zMHtD-^|L5@uFq8c&cjsU3cAl&K^^#5Fx<Baoz`kXw_&$f=lWydVg84?e^NrX#pPV9
zJb~|s567Vgi=bu7EPh=X56#3Hid#*yny7HlwK;&+SJ(3~xqj$mp9S>^Q4n#kkz1<|
zfnnVTV&|G;=t%ROt*jsB?Wv$0*;}izX<^tlR|i5@A3(W;Jnik&cMmn@rB@hnaL9l7
zB^n1^aKpsr3lO7X1K|ZK=(NNNGkixte3TE`tybgy-}6x?XaY7S9pw?re4#F>7<7AE
z<Jva~)G7PPn|%MFd&&@O{alFux+~E5L?@F(i}{)v9G&~BVpGBlu`#i3_ER>y^K1rF
z9;hw**hd|4T{rl%)e|jsj^m#vSs-%h%UgFZhnBo~tnPdOjE%@b<)`k<qM(*b_Dz5f
ze(9L@z!o*Kqgg=;vF{$yJn4)jm%S>3#{SWuk-rk#?_A=gKYOruYclD(5QdJwhhpW`
z7;LL?=Q`i$9Wieetbc;!DD317kNz@g_7zaP2o=v-UIP9BZeTI>7;Ae_$D96JF4T9a
z=NZGvOYEx3Jf7;|iW57qLz&0jC(Q+G9Velorw^0u`pjJ%bNKWIB{uD^7QDvf;c~kX
z=yov%ot9aUQ~NV34LHeWG<#q%&4=>NFTj|U=eg2&4Ga3Wg1ErhSba$V#neG8%_IyW
za?8OeCzP@tlsivd#GZ*RV2{NYKr^cdLJiKaghkbO?dv(r;cr+c^~WTUeZ(iGTH?yt
zCsDRg!DqZv$KTQE7#i3W<hqZ<s+O0S;;1Ui`kg^Nu3T_G9KtMrx`OrBGw^mr32Yde
zjU74z(ev6w;zq<`Z6aNfoYc{FS_);$25{4RHt2EkGf()fkD<GFLbJY{d(2X0<yO&P
zI4+B+WouK8G9FGHp>t->Ct}I!1bB6$1iXWfFn5=;Fy!3-a_b^Yc9~90lt}3Iw3GKB
zpJ8YYWmeV0QQChyh-Zh>dCUeM4NnEFTI#ReiD1Wj16mKC#oKLzpil?^{Xc17Jue^B
zeprHkhB<UR+=ne~KbXbwe&DooIJ{L%#fxKCBFnjm&lAsL(6?=%`@w;w>?($t(@U^^
zcLJ;ou)v5BBJ>}<9iG#B-76tnocn4wK5R<FYWiI?%ijYXMt|7&8M$bgN1n2lrJ$p)
zgEq?on7{pBUKo288haW*T(~{jYb^(_eHq}rk#gSU6=4233hrIB!OG|`p7ZS_Q`RqK
zYJaFh_q+r|qZMovofAq7_u=b>X~f2*aBMjBuH;TUdE+SHgLF_nBSDyBe;y4TbwT6h
zc5wJbKZl4d;>z1iOx5fIH?iCXV;2lXkGkFPX8mI9Iddl(eW?VCJC*!Z!DW;dIB=B&
z%7ysEqu0n^;?YSH(YnD8dOb@;Z5=Z_YUhlD4;SKr&j_!2hQaiI&yee~09`aUgZDr>
zH_{#8(DjSx@|(VUwvEgwtORQ}O^3FtSGh#5lLd{U9$!WeFgrIB>c$FuNB$~ocsU7@
z6Q;xTS*Ky=u9M(<$AIfbonm&TpO}rQGbVqz&h##qfbI|47yfVq<-RDC3=I(1`mY4(
zo-6Er;ATAY!ww}eq2!lb3l2-u0M?ymC+_=V=D29=7}1@totue!dQYL<f~VMI)G!E`
zkp;#n`n*}afJcRAq3&zSY^7hu)&MQ6Pd5VBi?;AV8US7<6=ICJf-{e=KuMJ^zi(6u
zhDH0)Wx{b5|5FM21H;JW6)64|9EV=bR?Ix*3?@Ia0<)6g-0pTFlks~@`rJnto1ue4
zC+gyVb|E<X%uL*(wg*+U7__8Yf!s&RJZvX`@eWrW)t`R<P9$UP>QUgPU4kuN#}d<d
z7+X6p03_R<aQtzU>9uD=sZ$urzCPeCU7ENZG3g|9pEPOw%SHb*h&@uX;O(?9s2xd;
znTZF%W#d-fE&U@8`WVb3e6(TF;iGWM>oR8FqYlUE?VyZ8mbUZ)e5$NK@AhyOy?GG&
zjN6NPJ(VbniUn)Et?0UBCu9V!##ZY~;PAzm?L8BRr7o*@MZjqEolu22tFpni^#n)@
zt;;klOyT^CG8C;GY9$TgWmojQF{qjL5R_Texl~Cxy}sZ{T{z=C{;Y>#BDRg|PQI1v
ztZwCDaGw&z{aRdcMRqaXQD&g8HtjARycKNxrPv%)$~2zrMc?p5Ov^fszWsEeu1`;L
zU*BM&^5;TOPbGYRt4(g7LLM-1C0cs;L+zf6(A@7MH*R<3W>d%o_tpZ;W`%+2sl#+m
zUdg1kUxm8#2W;vhLt>EJXNtTUX1gN~>Y|&)-s35Mc3sZ8pZ7wSJE8pP^;o$0emW{2
zrNTir3v*J*B{--zY@?r%*DNhus~Q1{I|JcO`CM$t_`#%4Z3PdJC8Q))qREjoX8(C3
z1dT`nlQ9}#F<~a0lt-X%-9^ZGyAo<<*ONQ#dfCvzvk?AB8%;8fusb`>frtHe+9^hg
zRr_s%Qw=gK9!vWNUt1`jaGIRMO7V`C0&H_y1~x|vxw7rMc&riSa3@$(rnbo{@AzQs
zt3sVJ#c=SyZUgOJ6NnvpgNHVjvv0bAV0C*w4h@KhzkS!CaqVx`s=5S<-poQ(*H-Rf
z`iK2KT!BIJgV1nL89LY=$1MFg9CmLF8fDP?ui!K$c1Zx~?VG%5(M7>J<%?iYRW9^M
zS%H3U)4(8jkXQsqnC|HzykuGxR*f!1$HGv2+`bzPy(s%VNotkjKr?+;Q^?t|9f$6$
zfDSc#o^0KdJjkoiWc+H#G0TDW4IW_7;w@I5-vFKCR#EV{h`Ws5OfIo0pq$%Y=KAb7
zb9@|+vV!Mq?eR+}-?LBrd);|d^J?ZFCSS(6;Y09d+68oamV#|dE(<BoG!V|ku;lSe
zVU;;$RBMYtu_KvWj~O)UJXV@hHy>1>63-3trangz?ak_Wat&o^$I|y>TW{`rE08M<
zZd&E7+z0ZHGX$^9v*J57dgRTe|Gpw!+%a=Gs~a(!_8r#*37zexEEZ7eog@}pt-_i|
z!@z4<hwx}YB08KoEtb6>4-%1&*fLX_brbD_$W28M;Sml}{fM&UQ33q!>r*K9DhGA*
zYUrhwgG+zpW1cIyGe3pHi|@pYG8qAivIlJbp;UOK;L!El0JLw=gF)$g@ZA)5bS+7R
z8@dy)<A)ZDDvCz^>U6O2Yv3aJMl0#0PS#^}1T<>rp@F+Au^nb$x6~-=0CpjUNpEKD
zt0q`i_zN0i_pqcxGqCUuF>i-_;c3pf=zb%cmv7I2#;<k|6ndK*>yAccU&@GAr?An*
zA#@j9A@u!HflWECLPNJ=t{Fo8_T)ce+u3VTP#U5^nFp7K46w4<Gyy{&PlENqrs(~}
z1yauBVq<UOc@$p6wjCdZHkDK8Ue(S&%*{jZvDuI}d?rZWS6GRv&hXsHaj09rnkDLG
zqlQWjv<xrj$`>K*)0s33s`LbdS}p!3>jdDz)zCD(O}tz+3yrm-pyl^c=!i21{Y&{g
zd;4sBtA7?G&NsPepoFUg2gAfJzUY50g8!*JiY|34m{*!3Oy949iEqxs*OinlJ8jOS
zUypIEg<T<M!g2@+)Iw=eff%<&@D=0ZG3H<hWcpQL_c$rabq)%ejS<l4XbDA!cB64s
zch(#`7*~Gx!$*DMz$k#af9Bnx_T~aqPUrl@j2W2XMfZpq5h!i^!*4$sh88*;A8n@h
zS<Vk$>T{V(=iDz-Jo?UC9A&IncL;F>sNXKl<EonWO!Q%ZSkjt+qVC_sxW*OKE<NPi
zp3sc2Z88{z8KK#od%}IyDlDuDf<%!XdhQ<%&STDEQ~DcmNqP!-PogepJnRDdbRAKo
zHbE#HR|1*#N6@)f2-JV@14+tt-n$OLdQ|{w;cCc;ufl}fPz?0Fj3Zo%z)*E77|i_5
zEEmg|#eV`fnE915UZ=R{g<;ST(v?~EwPE6g<j0HmqHfJI)({@Rh{GpDQGYFFTqSR#
zKA@N9D(I*Q!}`0|x$m1aa4hPAUdL+1XL?;i%fE;DjM_pB=#vOlo#dYV8P6SR&WI~V
zTxG6RbHUYeHI%A%a@Qa!Jf+!-q@M>U^=C5QNK23paN_5Xb{D;?z;9S2ejArh9(VGt
zrJsPc9!J3Ii6)rgYq9kwFUWEZgwoC;=<zlV4NkZTYj3!qg|-Nd4-W>_-B+08ekTt;
zn+t-~Ih6df1>*;Z*=@G~)ZP=0m7|*YL)&6J{w9oa8^4(7xxb~orZap{>_k5`dTthm
z!ls>$xMYVM?RIVhiwpg*{oFqI^E--|2BuJxYy!%dNQlY$#EYL5aNWZyyy4YTUZ?NJ
zq93fo7h6}LLAteIeKW31G=H3x+NCa#7kC(EY4yjHc*^U?M#AHUFzj|JhyJ0A!J#|}
zTrz{fNw0)8jk_V#864)aWhp|-f4Ok`pHh?$>?LlIRlx?!Kxkd%g=VySZ>7DMNHVs>
z^Cj)0#wc0NVry{wP=&90l;RR)4w?;}0tY&}qdYo|mHL~p;FHu{FplDO=c2*wa4_hO
zpNsk}DzIJaAGB}{0-N4vKvdleIwrhly_bc-jY)Cn(ld#5_5sv?7YaeIPD9SD6jsyj
z%{mS1m}f;O9?#WB-IwH1_Y?Sk&6e2v?JT@*-3NR0RZzdUhgjuE7<7}_gKEbb*q%c>
zf6pvv_$g&%QG+Rz=a85jhw6RD(S^+cyPgZ6$xkeZ{<8xUNj6HKtQH1Pp8mkAAgJu7
z4Z2&X)84IJ&{$~7%(O$nTD^{`x97uG$Hi#+_c$1QxWaP2p5SeBp9qT2V_A%LIFoEy
z!Y{qn#ulAi@ZJ^58itkQ8Rc0Nr_I1bAqX?<zVWAP)X-+mHjqyjF=?9|eNM^I;sfQW
z%9o(SQ~>`YA#|pEBP7?j!m4Of)TOLr^15T-@G=(`#U4ZXlcy{`X*2s3J{w=_3`fa-
zC4AkQ$+%)}GBk`)Lfw*Kpq!~7KXwmr4mblDb7&{E;V`&p`*UX>1z5YOiFN3^VNxAW
zS<$uNVwcGaqv-s$sg22=WHP7Qzxnh2XTka(E07fpXId|VcuekGh;*kjS|;%@E^V_?
zJu@2i|Ik3|ga4M5E|9U#{8*+o;Sd|GPB~Xqa?Vtq1^0nc=2EefyjoeT)0=uJ2_u9X
zAB=FfpCPV2a0)g+7i6vi?%8z_4f>p9d!J@tj%+Zvrf1`Z%w?#Pmka$jT*TJFce&E$
zC`!keLermX;@0!^EQazM$JUh4b2WekyPX8R`H}e3Dh2=1aL4CIw_sfQIb2t;2R$cB
zz@TuR;P@sQTC>*Cj*UK}NiX_cosAD)WYYe+qf}w8&1F;KS%=|JWZQ;d+axX6nVm_q
zhZX2#I1#RgtU}Y3a(H@T6H2z^vV%Irw(sx<*&!4<r^tBgfC6ZWek=@&n~xfEa<P37
z<(`J<Ler+Lg7VpDDAq}5LAqDil>3479MDCLqbW?cR|DVUOkR_nefZg}apW3M2an}6
zbCI3qHtCcvvW&*g={o^t`k?5+rP8`2a#-(eV(8}&mf{s`wzxk|+m(QtPb#3>&YoaB
zYag5IH~^b3XhM*n4?&(G5Ye*|EGKEhT5WothW=nPa~9!e8#9d3J`Q$XlOW_qJXUro
zWFpmUc)l?fM!s=Gr-@F`?U#&o%qACA<z2R8-&mYh6@n>x<R$|dB=@RiKjV^!e=?K*
z%t^tCUis)M-VXA=a!~I}oYKtgWqRh3C|Y3&jfXwJtT7K}_tV59Lzd$!*BD|yr*QG?
z5*+zF7hP_Q<r)K@v2l76@m9=vke*hf*}XeRUw1FdDRu*oZIkI;>IVyG9ydR41vXU{
zuy=R1V$Iu6+;Vg%rr75}{X`=;YcUghpIm^7*V;TPO&u)?$q9cOp_8)D8aM9oL3@Uw
zucCt6J0*ecZWU~_*$y#Rwv*p+3OG4RP#0{OXkMG8WK=yfHZujegDo@2Q*za1DkwKw
z#LSGN#GiMB<G1qTXlqajuI54P+w9Ze@SXVfl${NK7YJTsiFLcW53iXW11Yh@1AH1H
z{&mz4y&Rq~x7F0q2s0J;8n*&P%ez_y4+z14ccwUOYCab4Xyp<QQ&8Y(unx_)(y!Ks
zoN;BSG;<cb3}U!nAL?%#dvMv`izsbK2YLQ$)@J0z6CPbcy;B$9`wM?;?#lW6JF#Gz
zbqw+*^~7Ygo`4T!sPSbC)MgvOmoeEWS6>c#?+0MKP6|&rl#6!9E<@As^Fnfd3XJG7
znb_(d1(W?>_|ppNzoqHI!!vZ>y|fXuf2CpYoGY+*i#Fch83Rpr)azDR!@d4ofSN~J
z__m{$q2<YUChd>>cCHmFfB6Zbrm_nW153D9mXv#?M6#U9KV11woZxX^U>$x{&~Ee(
zXk4&{<g_%Nn<ItFnC0;H`bPBZpl<DEGbT&;mswxF#E!2i#f&=%`27pHUsaUok<=6S
z^_q!hP6NPfv?dqDm6YjY2r3T<5Ru=&>^541zil9QC~RX_6Q-dub~aC`4nW`WTe*j|
z8Z=EU76(wzddnMYlzss#n+O+>A5~{fR{k*mz%)Gak@!;R54ug~v2W}NRC!YbmZ~w#
zYxg3^kzHmRu9f2Tl@~!0p(?cKoPb4*`S|08F}A-C2cJ=e;Gum8%o8q>d%l?28=Zix
z1!j0<?JSIV6v87jvQVAQLUzYh;i+jDMzs-R^RFc@^)umbDr`_(S`2V?6T;=AP%ot(
z`m({yw@+Ut*|AqhZtRDZZ@Yr4nH9jKJKU18MIWBW!1O7^J{>q3jg-fsDgQjkBg0u8
zkHm)kKG1oxod=OG-r?K-&WvLzi&F^2rzp#mzF08)sz%N|N7lORBwT-AiE_2s;`z;#
zCx{Pc?$_snx8D;!L){qnY*OI&Y5TDAQxWX648Zzz9X!&J<CulS3yui0bXa|c+YUXB
zPF;F{%fEfF_2eQLvgHCA<R$WM&z~~kULfWf=7G~xUFbb36&h&vwlH=Ub+zp==T8u?
zm?%cvzX#0xcXHjd!GNn*f@=M6@>y?!Pr>oTfO^GkY%<XyY%;`jkA+Ip6Wo4iBhQ}b
zf(FMfLGqYOV6JiqJ-(^%irodE_UkIsJRSz_cZ%?BFC|_Xa~$ul%7Wx4?%<mj1kKM!
z!_unJcqb|d`n`0<A76^GZIL^V_}T-s3I>8%;6!k5JHo9G=Are2IHue)UZ}3@j@Ap!
zar*KQSg>Xk)|x!$&l4#hpuUmG6z|!Id27&a*Gu+$Ul9uC`LJ-)cJ!&|_-<Aa+D6mw
zMyeT)i0B73%1-u1Ydc!M0I|WY5#lF)&iEmx8Vvdv3PJCrXp%MzJqxdJ<&U1=YD4GI
zzO$G~SpXDG_C`DZOR%={EVQM+;O2@DFpG=A3cWD!95){nV{*Yk?hO_j4pP_TI5`BR
zX#MXtK{oWTxS5<+g~Tmr<r6X3mY6&bbMf87llZ7O9Yl6uxruy#l}S<HFfxG+zfq2N
zR-}SXU{ADn3S)J%=aH{*zS!0C1oUR)pMS;691hPR@9$?WvN%vSk<NR^H^ri{x;vLF
zdd(Xa-{d7TcHw4vt|SI7K&_bqIA>pi71@>0+J2S#QE{xzwoW+oeiH@{IZt_}wNSgp
z5qk7lhRWiXWvaW#HD5{di2!dfdA<yiS55=Vh8NsY=tJ4%hdd`|COaKTyDaT4lxIE+
zA99YM0|cR}^H8oe=n^rUQ^03u985Vv`9l5m;C|`^^M1%cLH#VPnsnCk`XKSYvmkt-
z8Qz?ig|~MF<F`rK*w`k4ruFXP)<ap?p6Cf?#JK8ulsd!a7N}Nl4wmDBnUeZ7ZrN+G
zW{e)M|6dR4F?optyF<f;bv%CMW!AaqA1L*;#EebI{U1An`@thv7ynDFICWHrDhY(7
zCr5E;Z5&MN6M{w)qrgBM&!rZ(gwJ(n(4(go|6X5$AEs1;+$DpjE$)c}uNt9Ccr^3y
z(E}ULOlY-GfLrPbaGyH@<B!Jjw4y+0m=w!P;uE2+d?xqLDS=8(oz$E3{=7C5f?gy-
zXpBFrQ%{Exdt)K)W*|!c8N)3;CZl{vD{I~}4946#fEM?PK{P7>Z${+d&o%Mbd)0Yp
zU_aTw4Oj5io^xm)5(>F$-l)+z9dusj;_+W6&|u+4VYC+AVa`=E#TokZxf$RUw}@G=
z6neHr@??KvHzak6<>$tW=NY@>hV6(@5(J8aJ^0Q9;`T|$bLmtQrsFsTJ$jvF4To35
zgpf#dI85ArdJfyHWHA1%J!%F;V8V?{7*sV3jTW5)Yh7a&Fq4>h-6>xlvJ6yDp9Njz
z4c_u^3`T8CM6>c*ajh$zZ9gm}_wa7;iAsRvu8l0&ES)(&qC4z^0@e_?6jm(Agc64(
zDDCNm5rN*Am7fC|^J8K3SU1esKMLA^<#B~%I8*%o!V4FsLZxLhH(2E^*6mV&r=!PX
z0697>{SPqd%2mQYxE7VoL!m7&8`YlvZ(kpR>l+VY>koYt5ABbu2aH9Jcn^@O)d;te
zwxLLRn3w*kXR@~8yw~oj*!2D^KCX#Grva(_z||mV^EZc<=r`PW<3y&WlE~ME>fqhN
z^SI&3W$b_N2>SQv1&aTqu>M0|w7#qd(nI6L$0z5Z^v@l^D?AK>#wUSvv=mCmyRjMh
zsVJdm>siliXdX>mF;yirZX@pfv~$dL3(dv8Dg|XOd46`%yrM>v>w6x97+Ey44<85>
z!713(Zv&4g83#`eu0n;E4|vT#gl1DVFxAh!(YRC-N4!gg*JFq!l46A0e;&ju%5iA+
zE<*4jZ<B7<`Ml*zAKszr2=X#BuvD&reJ7V<e{BiMx^5;mMkui`jj+>qJ`P#7m3-G-
zp=m}QYyJ}sqPUrCw|yb?D>PV;c{=z?wm|7;JJ1|?1mcx1xCK1q4Ye~cM{h27b(;(#
z?~!1Au7h=aY-HXF7Z7!?6gRDQCx@U+-0`L_7qy+3y6H^}if--}`wv_J3o{GQtnD;!
ztr$Ri`xKT8U7@W~461)4py}csu|VIuAj23gagW0dSIGVG<OYj6cNQD$wZZ?xRlepQ
zUmUEy4^KC5Lx-9pyry*)7oAKJq%oFa`(Yzs_SSP4uvZN;-qHE=OA-9EPeRYjnK-mL
z0POV`*gXxvn1`F7MRJw9wQtADUJmen-yD3On2XYn4$R?8Du27~6m<lRpdslbYa0Je
z957@7+6;?A#iLT8E%6E@ckYJ!I?3>PP#SK!l!mkD9CAfP2Wt&f!Fb3#^cbJYq|NWG
zzKxB7=Y2z=^y)&c_KiHRTf@M6_kDh-&<X$EPQsr2D7yYx0b13C=#lY<54f3tMfzr#
zBZ+|sb1Cldpgc}i2>x7L4Dm)cnKHRq=+IOJjWO;h{ZuSe&CNk&w?b?%b%V{x`B1ZS
z1lJF<hT_R_Jkr-1l@qr>hqDA$C-=kE%fit6zvXbIUmV&VONCphTXFoSa+LPUEDJK`
z(9pn`tRK1CV%qo;lWLT0psaPyzpTf~Y`pz)80Pq_gV4*_+|VWx?H8Y={Y6(ksCNOn
zc3%Qnvx;HojUsGo=mV+`nt9AG7q~;8ZB~6Y1}&ZgzrQcW*j(bVZ#)6BR-6FeM~2v;
zQo(8t?t=Kka#l?Ig}N(6{E5z4)T=o`T}yJ$FI9l)S3s{rKX_=`KGxE*ki|Ich7m*a
zz^<SK+piviEvj?z_@Q;gWibbMWDLo&Jmx;P8_3;92}`FJWAT#jeE4T|)YVr&V`vl0
z`?(B;BvZyd!9sYdMstb$I_6N{$jl4Fu%%H23{EZ=M*TT}ONXyVr@U=oy;1_s`5Dk(
z*p5G~1Brc*4~iUp2nFgQXPB^hts7j}yAzA&RC9+{b<Aw=dMJG|ljar!&|ugqvBs#w
zf}ME;m})9<ir*3RI2KF$#Y5b%{}UE5jCR>CRRA~jgiZ80^PTtOt3Dy1D=J}?_uQau
zg<RalX9qD=XNl!UJ_#^lH5gc~6}!MD{^nsg3aW9G^$KTRRgam*{V5PmS!Ays%fO*z
zF1N`r1?vKD_RJv>TlKtPS?)m;EhQdE<UTO?uL{Oa>4nALGPrTqQQYrP6@=xV#7;{U
z6cwE+YbkJI?u&M#Mn47K@5l!quQE_(mHz+ks!r=ap>D}uZgy-ssQINb<LTR>@@+KC
zdy;|!lYLQ>nD*{E_3T?b$GVYA;hc5`D*o*YT1qAOXk^m8GKuBHt3!tFW{5YQ2Ti@7
zQX_dBgzD%rd2kirxIkV>jsm-{P25Hp0*)sRqd}Y|D^3i6yS`@FZB+{wsg{bBzl*Gt
zuT{YARx?xHR}xcVfjA-ODCTY*g+?xgpdH|VzgG^x?+HfOt!oIpDhr2gu@%r}nSdH=
z0$Jy5IscfbK)Zc6`E{?$5Vm_9{&}B5U7(l5VsnLG<KnQ#s1oWt>y{mh0L-w~L<8+}
z;`R&lynDO6tlN}4{#<Vd%Exb@9d3GQvio{ek<G@|UBt&w-;N%eHbRwdG`e^Y6Z%0U
z_h@ls1O8KDXjL!nH2E4|QW1xZ|Jj3q&KKse#fg}z)I<0jgQ4oTc+L4y-1uQ8v|K2G
z#Bn`w$|ZZm`T`iZF#&y3tl4LK+8b$LnL)&TW_t7j_@6!qHd8I&&s~l=s+K%0I{_CC
zbHIAKYnQjlXqOwpYG`)a{qI8DMP2Qop{J4Rby&Ab22u2!Q+BcCH}br($9_+Ax}=Y?
zieuO|VHg{rISs3StVW4<9(1Y|a^FTP)=pWvp!G#4zZr%WMf9_}d5=T7Ev7gNAes1v
z?R*$czN!yGP-Q$?kJ&9&JU<17OWSzpHOd>d=|Qj=o%ae9<dEox#-p+zC2thwO<oS8
z=x!qm&lJn8JGjY_0npKuj`Bkyv3YbTHs?g}#I-Tx;)z3r&qioz%VcIoelWr`4|2`O
z*?C6}Ip5bq+4~Co7@a^22piZJQ;Bg_7twcq78_d>fU1-k>hf<j8eFGLNA)5$s3{I*
z!wUH*1%1}JE1ALFzwGTZ>b>+{4l7Ku;cpjWC%+mY9BNsPA(Lp2?)_CLPKsjE-u;=u
z{Cp-aUoVJuJ+zXp*+-6+p7^;T4Q6U-;kNxjAiw^TW$r$Qu1NXilhN#VMJ9IVC74Iu
z8B=}=WL})RT&DNu*Gy2^MZ4nur$M>(EWimrP^g!)!G$4ctec2u%hurDQ{+XM(g*Ha
zZKj>S57%9{0E|{&#-M|Jz~oaJ*0`-<HFNV}`W!XNQjSBtMggAvw8FfniC|io5AE&2
zym{<8j5)IpzOCzqJ_g4ibKn~EbPr=qlKX;P@&$IOaxEqgea7WEXNe182t6N9K(ilm
zuJkToxw-*RoRr8i4o1VIRjKGu8qIolIe=!}dP3u|AT%+JNB@gCJm^?2$a<SeTyj@#
zJo+-S5%bW|CkTpdud?%wC(+zsAJ%O-&lTg^g{DA#CN&F(=9L1=3ru3gwM(Gel!sj1
zxe)7nUt<Ohy5vhaj%S_$c62|>gV&t|`N0Axo*f16Pp!naQA#+tU?Miy$*7~F$(wbz
zfnLR0R2AoeS7a#!RnV;C@iOpj%VAfJj6{#$xgb)1SY}=HkJxOtE)!kVVKJNka*6jq
z);OgHT2KzpOFd2etyd`ZRT7A+L*0V+A9;gG47_g~j_yaRAhtib-zAGt`e2mR&`nh!
z8k;L9d*+Fq^J38Spdk(t9Y*PzjIyV>*`WE?6&o&ObKUM<V0~{AFFvvm-W@xSht|x;
z>H3ja-*$}$)dgVN-N)h$dhrlywu!d`%}<7f!tG_$zu2ip9^o6@X75p^NUmafFEb&T
z&S9|jAg=3Ph05$<Lbo0dnNqt}e0#18pLkEj%e8w@*Kjef{awS7YmPH7^HW&4HWGbp
zdV})QEvv$*2k^;74ct^Bp!GI8Y#1*F8}TN(vj$>pi9T9<jfMDNOYTPw*2c6v=pu0R
z+R%q*^|?Ur%0kxsYzOuyx1jW(3>@uf-&pcd?EORqRC`(QmCOr8`$N!ja4$AOmWwG1
z<M4gKO0@o)48e;suw&R}P-x9$&RI0KIvB`|meRSb=Przp-e!fji9xb)6+D`h4K=M*
z+%AUL|0ngJy*!$jeDQq6@_1;<ir^l1&q2Qs8Ab)4M9Z<cEaH?YnEj{V$(d6?vUCwP
zh3lc+luWSvUI>o=?7%!_8tb+%h4s!(0FPzGtWNu>_;_>#x#vw_SKn+bO{rs&io?V?
z8w_=0&ax5r$D#DaL?Or3h_?;t$6MFsqi%#bJJ4DJ8n2hJ-7%&p(W=6xF-Ji8l-OKF
zxgg&^iCs68;iOs#Hibrt>$<*Sk3ug)A4tGk#3E}+8jBqv@m$xjH#AMo$5Hy(=p+?^
zg^e+||H%N2!y@ol?+88@h_|z>3pS5^!4B<6LF1&?+)he;%yNa`_5GZ1>&Yd^okD%}
z&~B(&GnbG2eh8y??#Jon#8G~c%p_YUh+9kAd4qTyxE4$R#g%sExG@569iWW7G6;?C
zB!c?)<7irc3fi8{2kXbLte&s5z?Lv`FzH9ES?YZ|PmiTL6Q{0WAmuzR!<^SsF_gy<
zXJa(apX-cjpYJmF^E&w0H3OgCD?>?N8;EI5L5bA~y!?18N;_S}wbOfp%Xw!sFx^1x
zh4YZ{gFHtqF{nCwEjP^D46cr4@TQY~uH8dnR$p>v{j&>gR|}K_$-`@}FQ6mwXJze`
z0RL%%xvedF-&_OM6Qn}vD9R1>^yjZGO~d)DFaG{E7kkXuh}cLz8z&QP_R^D^f6fD4
zuQTBO;Rny@*9Q%a#)C;~Bpj{WgyUkU<NENHrLiV5uU$Kt;##p-IUpIF<Eo+7A{uoE
z4d?%kA4xra85r#AP2C6?YqhBck=LA3>vTCZe~*TK%8O`ty$DsM8<}XmkCp00@=QHE
zLI3V<pn7>3Y}vORI}OYEl^_k`p<RTkfrrp#RuMR4(DQ!TPobL-#`+Dgz|os2JDw)z
z@=a2)dwd;Fl+n&7=@XA|(}uRzp%4>phcgpg@j%)VR4vp6QT-+`c(RONOCq+!)05ya
zu?M$YR)8Mw&#;zv5q$H2V$jGb5qou^dG3D;c%Px<Z^{Y5S!?q_-S8xqI-N$v>S@$V
z*MXX!!7O8C6?9cpqH@oDt~^O@Q0nod<tQLxyE=&O9fO+k*Q~}f5k~v0L#HTPu=5$f
z10zo2hFh8F5jmdc(%v<G_9Ae+kdN|g6|tL57xax;25X})!Vle}n5>lxe`Te3kaFV9
zo7F+S@SC9OD*;^<FXpZHn3dL+@T=`c*mREi`A4k<pL|W6@OuH)9=3)1$`RQ5s}Qz5
zip1nDsvtc$6?>SRz&15Ewzf+Z{Fp~~m1YAtmT!dD&yiQ|eG>9cde0uN;jz@cQhRij
z`&@{E&~fXarFjPzekY)E+FssyaXdE7>&Gmdd%&rTe1vP4!N$50^*e+4r{q#>|Cr8`
zPYr~4VjhefnTc7;%AsRme^}?1fd&spGLO=w%tB`!)Vy*A!`r%GWN;Rz)13Kb5&0Wx
z`hx6Y7~Wn{f^FkV1W)4kxy}*rqiH5eC(ahnx|)u%^KRgubO1(ubwp{XvH08*D^y(;
z0A}+SFpa>MEao`P+DZmN@&I*E`t=0G-OJpdAetN9BxdfrTU<9=l_`hp5gb;fU<S?f
zoQ`IoB;d7>SCvZ6FWR-zE?BK}B4~8`#p1u{p^F!XUPHssed%;icsWDyXE)HFQ@{*V
z-mn}MXKwg5ow<wrFfv>L)2JVyoc!2Ia-JB7IscU|mnTq<M+KB+zXkWfM_IDrL*}8l
z&t~i`N5B5WQ~uwaASi?{n@<c|hnr<V{f0u*)=d~q^XRta8hkVL4D~<L-2GPy*jo@^
z=h}AowwGL4_Qd(QKZm%td&L?%hS5Dc6B_?Xg1W99LV{Z=iOwb?tgs=kcOe*?EGG`e
zA&3cf1ov4hV9}BcT<ao-sn6}PgSaqtOQ~x$`8ux~7>RdIB;oWGOYo-sakPsY2miKS
zz_vbFLU~Lm*l+&EebrOJX>0^|zOClQ#=GE~x(q(2XM&DEd!gRVf=R<!xHW7s7Fr*H
zH7hP-HFXZnG~cjA2kANgiuSBK{H%_w*Tc}`My#awQ5f@UFIETvXrDsgn{6R5eaSk?
zYvytby<_}iCvnqhCXjR42S%h(#&2LeUwu~jzwDYIF3LiW>iazAR5g6dkm1Zox*vAg
zCN7_q3Z~nV&;`~oOHExgez}Kc=Q{9AIT^cuNW_4s3=ETx#FoHlkfoj#`*<IQZ$n1m
zhvC#iOCN!bXQ>kq=Y#V0Xz{5pOVMEQD)DOh7L?Xrwz9aBf;FlPhIZx9_U}(V|A!3H
zC-lcxdU;s4bu`-=M|+#BD5jxBoawx|{6=ggO8)NPw_-EE?76@_%^JA<h#l0QeaLl#
zw_*FCbS%@&LGP_0SZw*8*lww;D(4(J8)iWL=;u7*urc`sew11A?U39?{)MwOymFKP
zY8N+yY9q0%b_ig0{F3<6+5&u)EI^A!Ew7s}me+mz!6LuXdBt}w*m@7e1}5V4*I`~8
zj)BXm4WJP|2n@&_|MTr$T<cD`uh!c<eIj}0bhhw2E#WYsW((d{ARZGPM9JY{Tp8bA
z_;7)q2fmaSKX^jW?0X36X-2PQM)x2q%FMrAU8X3Mf}u(%)ROzsI$DEww<$vP8^lkI
zvt<nrpL1urV}%9IM7t~rGrmtbuO0f>w96UlyK6vcG5MS{%Fr>s7`zU==c&@osIhPW
z>+t-`Bv*3ShcU#W9(GW?q{mup2$@8@he9r&_M0`XTS=YJ7h>ZTV?p_sc38OzLALoF
z%OeLy$DU;<nZ7`roT-9!^ACglEd$DIMe!AXHSl%K2K+Lv5EbPUSrg8K3S#aX>vS^X
z&r88vk_56n+W2T#1lkRp3G(exf}-dwwmta6tVL7!x~1-@co)sz8byJ~G?6=1{vSu@
z9v9R0{_#N)Ck#RnMnV!oQF}cy2uT=(5JE^oh?9irp01-((?zC>bk&r~wAWKcDj^9W
zgb+d}p+o5KTi@T`o!9Gl#ol|Z=lOizZ*4XQ*kM^eeLV~gvSn<71f07m18eP;fz<pt
zpLcmGX3X=)eDm2j_lOu1j%P!_ubtSOFq6u68-oU=&=8P8bu5*n=+i1tHjbv!x6Yhe
zQ;MMuj(C2d6*m370~*cd(PWF>*!1!Sy;sO)qDP{zuR#tTeUysTxf#$hFAVHPC(+K_
zY+A^2pq)WaX_xFc>9q3(<+FC`zxM^noKZy`?o$xld1aZK`<#MYMh5r{M4RdfB>SB|
zzArd|$!kI}^m`e09lS{E-8O(H>w9(ed`IfocgXO<G!T}z5#Qc35a}SS?aDyAnWi9&
zF{H^o)1lEM1-;V-K-3Yo-$yTAG*yS6wsau#yiKFwd$wS^Rv7j=J|AVL?-Q??!EjhB
z3H2}a1ic6!RG)F=Jdce83)5L7dcipqeUq^Lcpm6`mq17BZ%+4uHnckX(xH~A%u#n4
zvL^)Lhi%L+{8LF=r$-W-ITrAA%5LmTeo8Br2V#?15jOZAhxC`1XhLEp^SsQ*h?7;|
zlrax9_NAgQt%45Y+3YF$EmuG8B3+nkgC(YVDBUv`v^K<}Yf%*jD^C;Q>UXT;ThAF8
zj|XM#C(g+uh8OoJELDf6mu9|sOzmG?BzEC_*-Y&>pBYn11&KSz++V9vHcAa5t<{|K
zP$aqsPqIBxB)9L4AGQ?5k*LQk_j06_Gc6lLg_X(p>&rZ>J$(*zJ0sxo?NUs?a{>D5
zsIXF;58qfuzIo(HUS0o|Xq{jT<wI6PSjtm}0}2rH%!M>@G_l*>3xethI_|1sxs>%F
zC^-o|gL<P2J1;e+Z3or<V(OwOB(h`zf?>K~o0xzChd_Rc-x(BKy-7LM5VVJn^xT=H
zsPL-b3IuF!CSuOH9@?<aLy9)M1$a1?qfJ%>bj-Ix_fB)FCR;cS_dTF)%f_g6N*pjP
z9yGSkM3K=1sI3VH)uxlAk!8b|Jhj5u8XngRMzcQo6*R{vT)4p>?;VJT?ujeGZCo*V
zdSMeL{c{S2c~zmH<hV-bpJ*!Sc){gHC6M+$m*H(e1Y;xn!GxCca4)1c>qyNZ%4eEj
zH17e4`6a`cK|P@MT?JLO9Vf;?39z#%1STm=(dp@AjMx^0hS57fQ8NYFydsEH|C42G
zxFx>-&jq`uU7{ToV(5}|kS!bb<GR&5G2Yw+V<u)nTlQSY-B<v*BjSiM>?}LW6%&z-
zCppDBL;g*Rp;CPgcZQaM$=+Jp&E6GSN0woXG7y@32Oybp1~bE>0snCYoxCaVOnDXc
zlm5^fjfb!?*pPX;wlY*j7cbeCz;_31!Pe&^h`2BFB8DucY|j8TGapllaWbc~fxYXJ
z5@5?*c7HBmUJ$2p=3Tj8+QmF;Gs1kJk76pColQsUX=$XrUktj0cN5baPEeV2o^-Ba
z&(y>K%eD?*+VR$$-02gAl9obfU`z|O<SR|;;|+q5QKd?QU{djey-%Jtl-e|%WZ$XJ
zv{|-;uY`M~b9)2|`!m1J0u$&mItUhhlgPG|5R}h63G&uj`ro}o{Pvys+~q}hROcKD
zrgPx3O#tb~0wC6k^*HVq!L{~l*m5fxoV5T2C$lW7vzfob(Fip@PXwDbKe|@33$-qs
z0Jq!8ETfVECP!>RZ8i$D4qDQtFMcrHMT+Y9DB?IfowO|S!G?#KjAdL-jS`QMWz|<O
zJ$Wif`(&Wa(wRj6P$g7`X+l@J3#~kvhO1^RKxvR0H)|B@n59o9X7vtW#G6z9f9vUt
z-D&u%E)6?|=5sn<pV3!)#^a8mnJE2c55fh%*zSliO5Mvb#-bnQ&YuH{wl7>II)X>~
zTnJG6piuJyo9UfJLF!aEGE)f(uTj*gYY*oh+>10UWIgupy|8-=<9;$9j?`bqRqu%e
z&h8?p2EHa9_BZHW^Al(hdV@?IlnAXSTd25YG4bSYQw>8ekhmX!83~Kg^ETthZ{AsY
z`r}p%PWhX;GSBgi0$1wvRF~7)H30H|8K8ab8OFVK2M6(C8WV30&tIBj*B)ysW*!%(
zWahQjj{v82PGG#zAKH%Uq4WGmFyGDky|P8{UV9;K`8plzC2y#>i}0;n05<EVLizR!
zATOH+b@w@JO}mJAdjoW2pM%D6dqHySE@xOopgy3STVP*=*mp9(dkg4roI%Gd(#M7e
z*I>iQ0<ck(gWA}XuPHhTzQJsE$?l0p3BAz$#%8#qITtgSm%(IG3W#rS1a)*`sd3i@
zZ2WVDm@QfbMjzQ+sC|T`z~Lwu*b(&4jDxEsThVc82sn-$%)BPsNbpBBG`97j;z{qg
z5gGw3Q@9L;?78o^IT1WOoIy4)g}r~4l9IqOcIWfI+{nBqvOMJTb}Yl4tW#pSd=a{i
zUXL>jD#0jW6g*qbV|E<tXJu7@Y-|`W3E*+Uf&$2N3MZy^8mL@hjyG!~&}G$h+Oc;k
zAiJAMefIK3AzH9=TQ-beI~Mas6L$80VrkT_pwc(ykkea)S9@_Ns7$iV9;A)#m8Uto
z3kA@0DvIJUIqYKIrI&HW=uU$nO!1IRdRT>2EJKT=_e3)BBcHr36hu-Hb(`dXPkf`n
z_)`D`ulPZA!zW<PbYt8&3K^s54X0vkE}3B}fA8u!5KK-ht$(<h>^mO=3YSw*GV(8+
z|Bd3B;SMNRaha&!?I1>DXHn7Wsnp6Rlli4(_<j~*WtKDO-rm7FdXuT=CXQTet%9hb
zNhtiM9BP`aq3D+ucn52u!zzv>e@Z}IHz6@w=mdqyY{qamf`*Lo$A4M&eYrz0l=X~2
zk2d!G`pXZ+8{bpM84hr=ei52&Q<31Uwoq^93~O$)9k@^dn-+zkb4`CN`B#RHr&6iY
z5<A**Uw|<^&S7@49~w<rhzaRw&`@;<<{n*wsY{Dsfc!FuZd;M<cC)c3^TjxOouj7H
zQ&?Wt2pf#^iQ&6*<fyoq<(Vg;gQ7p$9iD<-B~ehDe+hOTNQV0TUpdnbCFhiS4lDu-
zG3adyddB$@+g(YpXei@K&d`Q1vsd)BFPo)}c|>d3nPB4QVdxm-j+w%jG-R0+WP2{4
z*XKy)UJ55&Tqsp+&L<pWaSUVGb1T*hXB{1tk+_z1n0A5Pn)RT(Y)?>cB0|s&8g}*r
zZ6qwCe$s`w)OmpI7j{-1?8bRph#;&ofd~$3Sf1#g0OO}*qbOhmuf;OXQT;DKtK&4t
z>}5zce=b1bfna(sS;aCqpGfgUHTos*#+c!|@n+ds=xW(VtW*QAWQQ*9)JVk!uhXC#
zok*RUchJU}S4k6|qt^3ti2F@7mCY@ry<+sx!n2T$4jhZZ`_HMrb}6K-j6=`k%wOKA
z;uHe{Id$k@a33omCT*(#2jq~3hGyC!F$MAP94wg>i2W7>g8I-_PJT559^YpEwUOh=
za!(KJe9o9jht5!~Prhi_uOD<fh64K9Ggstl6jbGd;LvKzqPvB7rPT{9jSiy6TowF0
zn1cFK*$lMnJe{>efRfu%5P0?~6$)~}``ZN~)Jx;-`kf{J^p8ZBibKE$o1pD?*2&Vh
z#@tbi8Qfk8&vTBW*0y|>qb&gUmV7KaT1pzDf03dS9w5v4NP3kl!5%~N@z91{Xrq@8
zO+}&f$Z0JUo{(d~H6xVr+br$gQ1m?Tj>uj<Cto6ZqE5dJFgtP)#`>g#lQUyy3<)5o
zmX%@5B^$6gxCV3t-XOg`kT*qpdPl-Kdbx6fzvjW_zmxFFkg=$+SODF7L~NcN#=2HJ
zskitP%(^9m7B4k+zhb+X8LMgR^$V<X0GvQqn^)H~(82~4xEnI|)!@ZcV$;oCeaxQq
zRsoe(94ZZoUx1%~WMQpxFmosQ!l1<6sEk#hbKhC8?0FeBdHNyelF0aO3DB`;6S!Qo
zAfAJS5H@ftc#M+M1zV!A?Qa6xKkK8RK^kO_$i$MXacGzwkM-MJV8W3C?7mWt?k_Hq
zB%R}+sAK(XXH(i(tN^%{3NCS$G`OS}UA}K7=ANtunPwo=Sc+K3ZV$8=-KR<6ejrJT
z;(9nq&~UyIy@O6c@Wv=;JD-M0Cm#@p-Iu}cT>zC#6p)UgKZ#N^18SZZF#qvBQg|g1
zI(t<@e#K=xGBpu{k8Gl5+ul>BPo}&>vygODn}M^2n#e9`g4Gu(xXsR@^{*cD-4n|h
z-)}!JEM?vZX&|a?zmv_&(;;agW4moR$=NvDlI4;s(6Z+|h@*cH|79oPt$7mGt&afZ
zxs4E3Jr|sH&r{8kJiLE!E2f%xpuvN1sCAzVbCyM+Q=1>wv-7*@t72%~ph0CWDMY9K
z5ZFAZBhQ^!R=83EMu{nq<RXFnmfM*Z@GNZ#2_b@=Eo5omC78RnCwd)<!?PRD;v@|=
z^Ep3+XxO-cLy|8RX^g=OC8bz9=r|gMWKijQ!pZ*3M78z(Qo*xcrB2B!_=^`9<GZF3
zGP{+uL>dSVduyoMqmeK=wi2A;7lZ5MBKR^q7P^PV!6T#L7`${o3@)q0438XizlW%<
zUI<uu1Z@5M@ZsbNbUzb=a^`eWDa}CWTFqN{N@-WvR3g2cU8+Am7rkZe)Mmc{EMdNO
z>vgB8EcgiO_cMj8Z&_#--atN3#=1D(hdCqu*FirF4&}y#i@(OY0F~6NxSqbfR0PfC
zyJ+TLu55p^66zO6(?yTcFz2#6O3fBoHYFVZ>9UoU4bzu^ncEO{zRCdQ=ZR>OGYrIA
zZ&5STb663Y3vEY?L9p0`e!X)6zb=%Z^DHUuYuCh@os2=~GKuYbYRL|b)!2|5i@}e6
zkOeHuaw*9gmv3V4V%wRjCiC$i%GRO673*k?x)1Am=CgD5H!?b{KVz;0aWVXANM5=T
zAFPi@qY>#WcY1}s`j6#~#u=0Dw+86sb(-(l?gJ84CR)tvL5FGNfuG$mw4Kd5CxgR?
zl>KbR06(}mC>9$(>Y(n@YWmNyNUS!!3Uc?=Q2&BCjbG{zk6+QmZ9H2RJ1{QZmQ?b-
z?<w?kjDhQimtpPvRw@a%gw&iMa2T>4HFgFN|KaYS;YC@m_y+CpJq(pk!|5uPX>c&y
zOs4p#adfg5dJ4<w&h61K>B<_^`NEUzHBxLQv#IgK?Tp#PGE)P+h(dEcIH=#y_={y&
z>ly_yE{ye1#^yCS56MH1i#V$=0xAn+tpA+I#g6KMqO#|NZI_|`OfWBx=tJwCusy=6
zi4YXaD#AiLFl{rz+*UQqhOZ*W2hC>Q^BfYKeUwN}w9$^Vdz`g02#>^T<J!55XViR(
zt7N-7)3Q9mnOWnBofV+lpeCwsk<?~B`~U7fLrT^9xO{IkR(KD^bhCI`tGLg+J3Sfa
zaIk72n@MNrT?B3byHE7j#m#5Uv1r0IaLXtKL98pMzy1V{*1v-Kg-b!bJiGKq!UZ%P
z`i!4&))l>Xh0&-1@pxw0W(<p{0H@I(i9<#t5%2Fs8`YkWuK$?I7R-Y7P4RH=w=W*a
z@CDDjGemK}7wmdhjO#0yN8q3%_u;7=a(9}+%9Tq|cA^gmn6s+*KoKIB0I@v^A!)h@
zyjX5+n2j!)EM<Ro`BnIIa6XDxO8Hd%#i%~oLYhMv1Jo*rn5?d*U7yYAf2re{7wHq#
z$qqvIroo_U?TsNF(=cabHQL!E(5})ZDtvi^H#5FXbSsvkNGRcr`Yt6!HAlhXZaJ}c
zVSJvC!9*vU-JRO*apuiMXdT=WwFZ~7`)UCM`sYE|I!dbrDrjmwLn}RwK=q-cc*o5N
zTW_&!bo3RNvnK>K3Zn6tvkKK~hLOe<lcBpahsv4PK{Rn6wfU+?x}Ob$uzoCO;v>i0
zZ!1Ae&V%)>hoswKJN!JSgsy)tvE5w;sXx)lH>oB<%iCb`eCZCXzno4Fvfjkmfg{mo
znv6N_7c+L<HPB5v4MxYz!Sgx$`4K-{9XAJ0v=AIK*c=5dl|*S}3k?^;>B*(cE#Zym
z_TU_9jQT@Wp5sW^d==4M^Mfe9YY-2MdJ?uMfMslaiE$$9O^#j$+QUOYqAe%QjDzE(
zahf;w7>e<?*W>P&qtSS35boK28S9hp(vEH$$Uaes!+n|arqr1hn_HsDWikBxa{?K;
z6Fb-E(U@KHK(xb-i@|atiP^#SP+LlG=J?=@ie9K57De-HS$A`C8@FNKJeIw@N980F
zCTxlV#pF1?u743U)oj2;I-Bv|p}M%wz#3z^B(QJuQ7mi@K<7WjIKHeHjg7-`X!lUe
zF7<&->ki^AWqI~J#TL(W%CK?UP;lF$2~Ng8i58tgb#G0?R_hH^F~NX@C65BHwU$^{
z7RmS<$+S~hO++<+arIWC>Fd7du<hu0aI?yThCih=<Uf|PZjzH!Sq$3Uw8Pa4ZO|)0
z6HV_jZ$R1t)+^ft58Vo&g{%chS|#iDTN6RB1M#<X2SG~^^G4f~!o2|)bF7;2LvNCf
zeMki(Q^+_g<`vz46r|m(pBMLuDi-(Un)eI&j*v2%`!<@&8J`DvF~)S-gRMynjvB%C
z#k-_j(e1m$^+gi8RgDJg+Ot$RXcf4xSp*#lik<)YFt7XHq%bW99(VX*&8%zKaqkpK
zI;+8a)D2{UsV@e_q%wZDgx7I)hFRl^8JDpDdTL$<9UKh?U;jc(oDJWTqcCh<HNEw9
zFABUPXVa%s*xleH)W5pNo49I#cENcFUOAAyw#mfB+D5q4{}BE>Wr?z-6KJM6z}T2Y
zcw+Do#x}M_UCmLTYQIFDKAwT!Peq`=&K$Hd)xxQ1Y&J1tFz5N|F|nV^dO`h90I<G5
z*M4VGy*C*B*RFzdsnvLPxf$x}ltNM$V?RCI4muwT*d9p;%f|6&pwFJ2TaKK?hUw7h
zQ~+7sMfhfb6hCi`!!=vaV#m5%#Gp&)Ji7p;`-+I5p^>+~HxJIANkwJ9({R&^WpNH@
zLci>55WMvWdiEU#?Hud*Ze{s0{q-P`A0&$yKh37$E19siCx-tQg%$}xWR*e-8!aUC
z>8D+Iv!4`_Zw*E37mtb4`SVbFY6o_>B+$b2Oy*#JNN-Hi$BsWO#N>|=_>Z53HLg90
z^5i!vFt%3(dW18#hYat<mq7i%zi9eI#?g*G3axj&Amgzg>+Ds)rfr8&Bo=_E@j6l8
zc|dZbFQQ^}5I1qpQFQ3{h;(GKd#8OIJ)v0uD$@@%IfC^J<stA&Zh?98#xfU19^_IP
zkqHxMKVctie&<ED(J;i?dC>i5E_AORhVd+4p)+_g{+Pk?eV2Fet42$~W$Rb6ze<T(
ze+|S>*V!|7Z5V8=%feMt;uxd(Ug?Ga#uVW%gKUisrY6QiNy{aSsW!v%fn{L8b~CfO
z<mgr;0Kq~ptUE44NmD#E*?5MXnF~>ot_PY0gHSA$0X{ziUG9r$)B6Y-)ffskGIl4X
z>q%SKHInmqKZ>lT@OBSa56wCsKGdW$Um%;KxU7Z?fioCa?HqF>P9y<wBDBqp!Xm9$
zBAKTI)v}Kyc&#n<otz2|wzG*-emV7iB&DX;Uz1oJDHs)d(i3O2(Q5@`kFLrgq8%)=
zI&>Z4(Lyj=eihU|pIUmnp2PO8pNak#1&s_}hQV9>QGa3{RlbyS&tEc*abzj|GB^Yu
z&n?0FixFgkXa^RooQ=^B@|lOhABzeLKz@OB|NSyiyyOQbtqVinK~jvCD$(t)b<lj&
zA07)PqgQqeE{iF|%y(KCqq_r^$=F{1<pMC<cmj$t7<1$6buQ-X58CQGhPYo7Vxz`(
zV!HSTac@3LVhompB&C-ANTMifYbPT@V%d(ltu*C^4oX%OfU$2fI51C};jRuUC}bTL
zlk`&j%5sYRw}Zx7<|Y1kpZW$4!tziLY&kpt{=Rw?MYhAhXq!F#aNPqP94=FJga#QR
z+J(yH(NuHw1+3pa3^s2chST5s;@ZIhIOAIYc04nHmmj?_j4vW)|0QAEpL8twcpmES
zyrRx`!pV_{bog+RpvNIOX4-jz?D$CH|MoCe^@>Dkd@|8=FGP1T8EkfJr%t0{NNvw3
z+7@sL!&<V4bmat=A?c=`Tem~ym@GWa?q;5j?)1d12(<cr6or8^Xl|F3?z+ySB<LpR
zQgn;vX8RD?l{CIhe<tc~j3LeQhmx<-U6{b~W=k?gV%)q#X#edel}4t~W(zl7(;x&p
zqxV63njIebD}%WVOL3*D8r7@65|^KhK@>a_nw}j69oP*uPY2^)#il6N971&Z{|%kR
zQKYT;EsfJZhDpKUFeo<~)e%O#v+fmW?OA}z?E+B$-h#hOlkoI#C5Ewc*_6#g83XAT
zSu2l(MLiNgbSf9!#i6`#tvy;yzeu$V|AG$pX++CzGfleoo5q``;@tAXSc@!oz3K{)
z4H*TK9%bUb*g!D49D^za!6zB4Gjv3X;-6Dcd+R)wJz~t%8|B!}_MyQ&|B#YYDayBu
zr<TqoxJVKO>a<Xh37xQ`^(}qkpAPds&ce#NEModJ7sQ^<oU`j;Xcl+S#);?X^6l)L
z=~~7*{W$>T%(FRr1>o%yVDcakGVFRXu4yrx4-;a?yLOUr?;7S+XX9G?G&mWYhBZo-
zD=aET)2IU=a^KIFf69ZU*%hc5JcS4*TTq8V0Z{(a2fLR%qww1tS{`J9_Q1zP^=K<r
zt#)JXm4AtKyD^-)b_Nr7u0x%aPt<+R1tRu1&ufW2ne*L@j?0U{4u^IkX`K&O7+a|5
zgbA!-uBQu5i`bsh11I(lMcLQ{=rbh-m74!@f6;O*nHCBiJ+DBw^InK@RAbQRC1^5e
z9xSp=!zRHbEWWFc$NObscVsbfVtWXDIvku9_C+=Ajgt3SRKZ8^;sR&X{hACf9U`%*
zkDNr;nWON|zf>SOU!t`#8wQlh5hwGw(lrA!m#}=`opNrs*>;pJW_P1QokVnEDQ_^G
zv92)y6}1NV=H3>pEXbgP**n~56nnqQOu;SqJaKZ>;bklb&^@Y<ejPUrF`8vP_ISYe
zos^xCEqI#Xg35>9<okzQ{HJ0$`(0a8jlTU*yWJ6!Hc9EsQO8jnm0)SWo{?Ou_tdno
zkgw@fp^nvZy#FBt74GI3aX1m&RI6ahCpS!oYtT8dnHb;Sir!5E>=!c_cg;=5x`9{0
z!S@QwDijk=au72*a^PZ+8a#T;WR7OmF*ex)X~(YO?;1bYc~gMuCIkA8ref`-E5uW=
z89QtJX=~gz+BC(7DpyX!89n{bA+!=Z2LDGEus)IA6D76`5@3085xOnS1Aj4Nn#}-g
z6=g6FJL^NeDFPe4YSKFQH`R14fK>rJ80W;`_NC6)>N*;7gvYVdRSPnXO400S7<E3c
z2eY1B08xbwujtMp+U)sX`?3O>=KG*5p^9Z72jI;Nf&;GaK?{u|@FG3|wQhG2soO_t
zs(+Md-#-f8LDyiA(hbYo*thYz0oQT5jE42jLfs~||I15JRSsN+Ax&zi9;S~X$7Srf
zrCf8U7w0|62c2?KI6DI`FbX+Arq@=ZwKyFY`T0Uj(?JZLOQ`wqNZiNv1=7hjD9Fz)
z^%%-B4<@l(_e8dT$V{hZVf$Df(gqKHi~-?qwih`z0qjpmAwee@XBaZ)ZlNXCNA&|m
zsx`<QuTljov)0aMJvfJbH0t$Hgt5C}^IP_gWsC*&D`SvZ*nx7aCMcrqdBOG9mNCzI
zLu_z9NS61cwHt+?pR@{EXWt`FOu|9(VmR2p=z}JV!EchV1{_2z|Ml9>qSpR35zGmL
z`}3Be$sGlBHJLFcBAd|~E+gu^|19fOJ)mn!A27Z9le5@VPnl~EGhdbgc^ivO@6$-p
zjsx)V!)6rKCUcud8ls!-B+&ITh14fD7^0R#$gv98pAd<a<F3-yJa5Q-6Aiz;gTeMi
zC}TsImkQ3x`NY#X=%9OxKq2Ed)y9MAft^IxcseyZm_yQ8pIqX@vLt>#`F&v{uqEdu
zk>5=MBPT0zWPLeSPs)Hy(HD{w!tN(I>0p;@1ufHhK_6m`+L~-O?>?NaU7U`=%>&2+
zwrkwrngSM2wCRX5X>enB8ghpepqO6BcVQHq-98`9B6Xnt+d$$lvJh1kSLv#(<yg}r
zfHu9YqLNoCu>Y_J8pdB@Ili;t<a3|P+%y^0UA_PhDxt-621s;G!AbWMCwgmzHR-w3
z%q<V)C;Cwu?tof-?$UQ(b5P4;0CspAW24j^G8aw;M|RKFiAJ#2lfmY=81O!C24Q-+
z&~IcSYMECM(^DlxJGP9SJwB7x`(vr%YCi9;UIW^l2~?OIh}w74s9^soF1MzPOucm!
z8v=`o^9MI@Pu#_3))8P+mXGp>9ID=BLerPZ|M8>o!zK#u?;jAin0y!!R04`IjAt08
zi}+|22>w*4>hzU3|HmoVSV^$xd=E68y#yOn1e|UxCnCL_T;E=7j|gl>DeQ~EZ(6BV
z#B6Hr=70-^SYh4ADx7b??uedqsmJtXAb*s~^2OCe-I+}78QaNd*hZ-SM-54<s%YcO
z-?TkF7R6gAi99M~-2+<M8SBcrGKZ;WkG}M{ggF~ZX5b*U+pZnm2TZKaV_081$kgtS
zb$_PgRC_UAG+{lirjs<+WFO2TIoS0)le$@*h7jg7w4Wycoz<qu`CJ60i5-2I%(4%X
z$!I_G0AusaK=m#kOVjF~M6_=$nw7LtapG()$tn!Ge4f&=b2g*;M^ve<ynrZgc2SL8
zD(b}h675GykPO&Cr0%_l70X%$Z3DEfl!ETG5A@gMSiC>L4poiOs4bJBKsbd{D~G~?
zy-paMwS<1z!gf(l32){w9K{E`q3g|E(s4Z!8oh+%)e8%(cU}(G+s|WlK`@$Kl@W~+
z8Ju=!e!r89IdU@_(ic0!Fr5Q9QG5==zH9^Ecd6i5Pz-IF(fIdRD>m!dk5$FysO+If
z(@$i;a_ue5*^ms?V-WiEV>>&|J4C{IB%)qs;O^?j>F*H0?swG~Ts?u!q@Pg{>-x86
zrLnvIMr!zvIqVy`3V**3M8V!(uq1yhI$9qAP0dTdT{#Ui{`1G>D_5e*X&VHOo<;0#
zhCp;)Ip#jr1QvpWgjzrN!1|X`>noNmlJ#(p%~;l`M_`ID0b9NE=^|wnh+d86r3=Pc
zf~E>?Pw~Y}wnyj`?E`<sI*2ofN86Y2_}eiIq%((>y1%X?x@nzcxkV`auPf@QS3s;X
z^WnLzHzq|HgZAwJI^jk>h+bB4`T{nAm?H*@Yb=|zZ!<649Ea@{G1#@t4@K=d#QDWR
z5KvzT`F#=GHd+HTZi0r{6;u-6pSevI!k@&IEPJBF^qBeVT;xkWERTl(^B^of8IPl5
z0#W5$feUT6;L23ML$8Km*VOeS*|HKcmOG-$&uTKc&la4o6AP;610bn+G(63<!XK?_
z{I*1d|FCy}c;a9_CbWp^N7YhExi9fl>e9w-8%dRN1SXxC1n#*U?Oi8Df$1@@ij{-J
z>Jk+fwi2V6x^Ta@Bj*0M7}W3ga8*-BVea@WRM_S+*W)$RtF*?A!u7a%ChN9o_XgYg
zRLBUZhA|ZiG-a+Qk3~^n*M9}bH^spe!$7pMG)1pk%Dgh!@H<KmhisUP6}l|j8o`m;
zHxrOQ04Rwc!FYwEz}hjA-Qz>4AI!qK$)(`Gs~B&NiiauEYj|{$0@G7iM`9;OlnYAu
z+7-;Vs~iBLzc)eavVN%E!tq8+E@O-06BXF}B!O%{xoAudUVI!26AI7c#Zi?QSTYE6
zGj~D?`|osDdf}-LDje{{A9Hs!k?#9?FuXn*jp84XitX0;*1{VV28*fZJVNScpXW{2
z2Se)?AJF+x1<wt3;lDlDef`H$s(5-43%~r0!A^rfdz2E)4on5FeyQwtv5Gjo{fC&{
zK8zh5b^Nb}NbFu5irQVQ=c}b;#HezTt~`$#I>X^xTnf&eCP!m0Dg0wH7`Imyp(#Iv
z`p1@1|I|v_P+|^dGrv%m@0Z}O?HACwfPHV>tx4u;U1GCE7yO+<z&bJ;u1EnJSl>nf
z;ruM?XsrKojf(ZkOFOcPQDvD%73(ZW$Ee5Lr#@CFICYR8EMT+uA(2G1VG<F(nGOlI
z)~Gn@33fY$u$IWs(^UjD{aIf0pF>brJ{~KlF}HSWHr0K537>1R_q#_9C-SnPiu{*U
z==gzHe|LfnZxyV&oQk5|R-8cl9VdKR$J>v*LhAeaK*xi<bkKZ%<|dp+)c;N9B-%Ax
z$L*)I=l3FP?X3dEF&;|HucFcPQ=oP6H{BM#2A}_Bj)iGiC=~yVP68WD+jtQL2IJws
zDQ8i{ZzoX+ry-bAlL!MI1h;iL&$53=_JPY_GU_t8YaN8wkH=uZnP?0yaUx5HW?@j-
z5^P!R0ZB92E|6v8Y+odzOrwzV+;g3pmNyggY{ryxO+%Y)W1;X=Jmj9tLiNZnUh44L
zQm9*u8tcZv6N4=%59LThfgT*GiUOrqD0mhGqWH}t&hzFCa(}QT>YrnIkp=H)OT{2)
z=raNwd+ULQRx~+jxfabg#^99rLKIm~hEV|r(Q(KPGNJD&=Kh|J+MUNyBR>Ss{l#{j
zi}Q)GR}UOKI|+_t=c4k$KYWL{kr#WrfJUYtN_GW;ckEdzh?>CL-(CqV$1gz1zvWnV
z!wZGC`=I;UfB9Ol1DnZfVac)CsCtwJ1Ky{ja1ZNo+|Go?P&d#sIKi@P->K@-1n6it
zC2IL7jM;aU4s**zeT!5oqut!x&2p^$5f0Kt)%=#fO0mJ#4R*Q&v)*wuHqTGP*7|4?
zeC!udk|#v>pHk3Yb&XbR^8=$fa~WrG2=<*g9S5ZjL4S`F%)IxRD!Q+sc!dn6F6e`1
zy{d>&P5>#I&GybME=29WmKP|}V1!j4G|s&WMt^NUb<QUE6<dPZrnw;C^(|XP6G+4O
z{`k?H?JpuHg7L_4D9!SR;F2_0wX%$PTv9=L={6s`Cm)o_GF~bi#u#oxz=ZosPru)X
z!pI*~ka_8HurwDsZZcp%iWAoN{y@5NblE=f6!G|do(kNz@RIdTV50UX32pu`#Xkd!
zhn_>3aVy_oe2*6HNkW?kW?=Ut8UNmX6$dl^Mg5i&R3OdZKYUU{kHons|Ljkcf0*a%
z)J|9>vBs9^j3@f|YiUz%AyK@(g8cLpP<7VR!ry9K>{X3XRR(zLG~(a2+tGKG8eVEG
zMw<;HSoKGYO+`^?e=VO3PVhzTZE~u(euLjOggL3QpHa74j2Ce;4-?bbS$^|U5QhHd
zD<|o&^V|qxSO1D`iAzWEr)9j@Sw_4Erh$#2n9A-3a<akaiFRLGsQGacIxfb*UYE((
zG}9j6yj+Aw*2tNkZ5xeQ;{lOjkr@0(NhYKvLhd44l+0R7H6jt3$8Y39X8Yr~lWbns
z?#(__`P5CF1~lUm2CZF*4~Ct_<+tRZI90*hWo0m)el*egIGLSqcY^=!G6?f1CfWy+
zKsjv-Ejj29i{sR&Gu8|YBj$n*%ePwG|4Ho~!{O}6D+p;qP`qdJSuxvdD)T@wq73bt
zj)9$%CiCu@!;2)w?@MJ_D0iU#J_A^$umZ&2KUl(JA&jU$51EyYF!Tk7$;pwJbRi2(
zFO-q6Do3y@Uk`KA7!zn#9#_0447rRH$n=@Qx(Xve{c%12c0mx-)iA~=e+D{^v2%yQ
z2ZmWBpwo(p{FkTk@SjyKPF$CQ7nP}?(Ax^#7g-Ocb1gdaXQ_Aia~iin&GOakJ<!lX
zg_mD)f*m*Mk^FKn^VLJU*S~0B{bp?6R020AT)_|KD!3$##b(EP>QUB69rhPvauvmQ
zHdE2XCLFZXt|Sb1;NRa_)@O%4H1ua3the#h;U1fR*e#=qr6Xb1jYK>?{|t_FE<=Z}
z#%x}%3!MoQnJe1_f<r4o5bQd8BFVtQfP7HZPo<6hkI@qv9$5b`%bl4W232AXF}tfG
zo#*C4yPQI|jV<VJe@Pv_9fqPI0boRDl5HO^gH4ZRq&a;b?^Jq|GrI0hwS*DGre2Tk
zoO+J!sQMA-+9Y<~lS9wV%v<0wn#zj<X}^V4khFtCo3V>PI5nK;?u@~k@$*pS6GDZa
z>0r?|n9Su;Sr%NA)*t)MiL?4sN4*%bT{9469?7)l{VK4yzJZ9|bn*Qrp1?S>!}zoR
zC^VtEsMx^zEek!MgZYnb=lFt+$cacF48Y|>4`Tas_B|Nk2oYnlp}y%1pJNz|d4E=-
z&7@r{yUBw@&7Q|mPpNzCcQW*`8cU>^;IW+T<yOomgS=*;?@M;){ZmI)uIJHibRnHu
zT@8&Zu7UJ+F6a5+BkA@Ffrimiw4m<P+tG>j_baKe&K){rCPeJS?xA)fet1JUKB@49
zdwtSaUU4Li=^01Y9$>yh+ZRM$*g;iO)Fdn>h4ms>Ms`^nnKeHV`m>#?-F7)zm#c_i
zMY2UqKrmVHNDqgFN!WK<1^+sSp(OD!?|J10x!LzJv^`HHX0BK1wxKMyTRs&`Hflrg
zjs+lV{Esy76;SB(7wSG)4$@JjrHy|kGB#N!=ikE;T$BIDo0|;v&OJ%6O(@Iw<l#-5
zGN?No2Q_hzXwneE+#|6t;C?s;%q_rzp&L-+Z2?SWT_jg6=9bHS$aii>{1~(w)ekh_
zm0ub2B-GNbqFwM$^;P`vOO8e*0vr)F9}U;4$X{=ZapiH=$%;<|RS9!eFFOsdY>S}#
z+FDSW?jg3zvzhC&5>;O9q-J+M`sT4*VV_Ku-Q*}DiYN!u<Z#e`Z3;#=`a^opT4M8o
z%{iN6iFjr-|KR~++78Y`_p@4b`C!HqT@t`{AO*Cjmd%}C`SCVR^;Exg40C4eMsY+k
z*IjoUI^JjU!Do_i+dKAe_M@EkJCp)OM2EQFNZ?rx1SWhb0n?`!X|hWZPEEXk-5%q>
zIqm{5+&_4}Yost^2N;UKP;KoR+V<KJpRb8$yVrwkW^s&DE_uM!ett>YvUZ?roD%hB
zl;iJ|M3he(LE@U`pssm1m{bOWWZfjP1zCO~C)L7j?mlQ?3{U4LSHU^$7Zt9|rJ~lp
zTs?dtVe1IUJdB_vMM(q`BZyaT1FZNN1tpRDP^@0YZ5++Gz%Ii$7uLP1`7r`&y`?0x
z<2p^U&7)RxD`BvTb?UxrlHzAs%=tM9cDQDuz0(S)u~#vt&T%k&T1jd?t!Ljsc9z{$
zNCmGhk=obHv3esCzJ#8}&bWLOFC7k@*`Y)~B9z^09f&OP1Cebz0e{B>N)7ES)!$9{
zY<52ESag{2n%jBhTtlK69tTk`V_;`LwiglJBC6Ng<nn_=>~Lc_%JE!j_K;<$wY8qe
zmk$NS>9L$-b`meELN@<+PVT40;qp7$C{_gWhAWKFeGp@Q@f^uKwHz&~WH2y;y<<g*
zB)518KHPc=QhQW_`nHM>XXi%c{UxAd7XgwHO3tNx8YG?Uiw;lIv1`;&A}0BKx<wC=
zf3bqI&s`XIe-eBceHJ|XoTCXR!tmq5;pm|q3uBs=p!~c$Idw*bjbdxcE;#TYe+#C4
zn}8;Br=Zh<t?=xd2u-u5zzok~*0cSaZ`4`Bb^!(0tWGA<8#A~DOU5}{eF~fFH}ex7
zo`x@*;#ddg3<i7d0Hl|x?BCz?$Fa@W#WC)@oj+{25`(#(QUsY6xG$>Z%hf56@UQ^d
z-cgdY0wB}EggDvOlLNbBFeyG6sxHjLZCUJ$J*<odbY)>{YXQqUHI>$^8%C3?99ceJ
z!mWN1jM~qRgTA8>Fvk@7Egymd4n~4<@-2ps`$Lj??qM9iCQf*5Fw5CYBbwhRyjKUI
zuB8hVy}XFq%JZSa@*O8FbfEGVB9<pP1(InVwEAxk95yl)4PLOnhZ8f2WX(LHasLRE
zek;eNS2v)3XCn4kzX)Y>J`?*)Tgd%o4PvKH)TZ5x$bJoDT$Lt%cJvACTA2W9ImZXr
z2%sS^oU|@#C*S@YLF;^b$W3Ltt#y@P`u-ES+K0J(V<&*9tA!K$-k{QXr?_Db=TQ7~
zDy}k8qR)sNl;`!r)k|1*{8BK;e+^=DP{uiO970Z2yQ0ZuH5Q(&z@+7D_NxkFOc4vN
zdHr3|IhoCjOY^uNTnKI(BLl%_#<3rkPE;O0sbpLw%DbNut!Z8Kt#vA{;WKc_<{fDC
zI0u}bdeBDE2)gx2C~Dnx$CkGm;JITwDw@xLtKB{vTN#b-9CPr|c`qzqMWLDHD2+~h
zB)ZQAk#y%=5GI?DywEbl#@XPy1>s*GJ8W>j0_pD=Td4CaZSMVsDoQ<wYSm66xJ&7C
z^K>{UJOj%9N^XVT611KkN*x~DAT|?ysm69UP`x-pE#~f~?yXANSvdj(EJr8V%fXWU
zhA8X*i_{mfY{AS5zO9z^$m%@7$Z$Uqjrhg6kMTlf=1_DR!8j2H2l%Gv_4LUX#$E6{
zgRz-;P?W=bxvV4YusRpROwyoc+&XGA=nRJayn^lH8F#<uC0O3m5w(&w(YVwsoIZx#
ziJVyv-=`dnb}oV$S4w~_4<Iw~6!m*K3T;nQ{4X>KhusK7x4>Z#Rj)w9f*ew4GZvff
z=Ym3$(5TC&(cdH<p0MvtjKmv^9%-QX>IIhlR#^tyye7)EyI2M)nq>zr5W{8qpwm+a
zg+VTK>mSzTS2B0X=X5mM9s^H4PeWB@5s?mFMFa`KkR@D&!s9K}h)Rg}s1zDgybzvG
z^9SiJwiDJ{OFvH;j$I4buB>zcTt1hJlZuX`>3J3U|1t;Bk@Zx68e{dwuHaJd%b@XB
zB8nc&CitcpI)e3~@ns+Qs=kD9J_52?*6X2RId0AJL7TcYWK4A^jvcoI9m=}MKt36R
z$v-5%Dgm{r1az$m@Loe1imKa4;R{E+XBdxxiVDoPoQF?JE3mtq?P;?^SVq>GwAsiZ
z)BY0ad8HJ9SA)*T6`)0TVdk<-gqKFpteXVWi^kyxngUvn<H7jiamEI&;*>`Zk&!po
zqEYO?|K~uPvVTWUwyD9w{v&a934|gV%ig~S@I`ONa`-zEd_PDr)g}mZjK)IW$11d4
z%eY$_iRigy2c*xR2Y>W8;=QIknC}x0A1bb)1@n@IkGqD#f+5s!n-1FOtC-7511oRD
zQstvWK1u&3@f;pVNgr=iPFn)1)m9|9-$znE@;&Etp_o?c3vlR}J*YNlEtS{Ir_!o0
zPHOO-CP=5_A4|4lWjhFCg$-KZ6p+Xkk`=d<SRzVc^R6D$Xj~rlwcdpSRi~v=;~+T5
zQ>b&#CGcF&2gP@6Y5i^=5Si(4DpxNM%w=7@dINHBS~2<-CZp+AFJgV{0WnM83lmun
zC-%lAczYlV-T$#9))!LI#NamZ4GaO*lIvvZ-wKcqTZfL9%dz9q6W%rAJRbaY9{u0W
zM>V;^T<B>aIaEkn)-s>WsW8}Sa}nOzM4`jhc;*r(Xq4<lgL_j{8A(8}w3PGuAjdCL
zQ&Ht1U}vT<aA^9QR{I5^{@9;Xqu*eVO}#=S0y9{BY&?!t48WR!#vnE9TN*A~hr&%6
zR9D(aR!iohtZNQVtzvVaKM5Fob`UtdUdWi=R>aph7!-=nT<Y~qnBO}Tq>tB7$BB`U
zRhET<ivx+?-dX6@eH9e3lR1xlbwnI7h3}IYkN(51z~WmrXwowm2OeYYsp%Kc<!U-<
z9-+f|o0~HJ;wPHae=K~iD@R%K0!*295+}5?J404EsgGO)U5~`{*~fAe^O|UQ@H%O8
zwSi`3En|rlF>XLGiKv@}>SK)U*K!qVuf>Bq|B&-c=ml&3gfot_fd2HiMC;+l;Q045
z=qBukG4~tj@8f;3`Ctp}xb&KP?!6uRjd8<%Rp)WQm(eIl>`)1`cUveIn?d^HQ0iE8
z8PuJ5V7Kx#II^5|zWoBscySgQ86QzvEP>*wn=$Ir8K|mTibbB)C>}Wz!{+!gz9P%9
zn^NAeWg9g12!+gH3(4J(C{&nPfcj={s7nin?m@lLNd7nI9|~Z5q25sNc@0{QUxN>g
zuY!^rOP_2z3mqk^pwc@6>xRZcX1fOSvY+8<ZvQ0KSu4R*afbJ3oX1#%K2(0~D!Y#g
zsY8;O=(azllJ*+jVdpK<$DGH<W~XrDP_~bqlw{d+LMX;;(gTkXY@QeYiyZRF#iOA|
zu_Y}N7sZ!?<bE#w+~tYNddeH6ECL1lj&&RzMe*5Gh{<NI=J-4G&sf&Aj0=KS;XTpe
zb~N_Cz_L@7XGq8G3DEYUodz4ZgXkSA=nTD2a$Odq$!<BEU_9*63C#5c&uH6aAENf&
zO+T=`!8FFtl;7a6^W}7KD%nf;8%4Nw*?H6+$>tCrVo*BrASVvk&;8TFe0in!nNxZ<
zWZdb4deuc3_Ya^`pCHyZ_ht^xK$2uq0{m)UuwQF}V^hRf5X8<ZkKG`uPXR`qi-o>Z
zqS5JGHg(#`n2!tm!TZ(#>>Hv$_x|r_nAIcFwbd6y15R=pZ+0_AZ5MCzr3%ueN1y{-
z;mfWq*gf2r6fg%?>U{PInpnYoKT(3ev*)0vc`U8C7K|1wJM*2*GDI8mN#==a)`2WT
zfuM9Yff#gaaYWV0C)D^|J_whVaO-~9qHN(8Vx6mj?R|$}o9qgDD#k+QBX_zMS%<)H
zIPGV`_5|<x;h_UN*{&o6WV^a~>sZG8yuFlYEz|(J^;^MZd@;4X!n}43XULK<LvZi@
zr6^?X0*`s87>_0ZcG^3jOOhBw)4o!(AO(btHe{UgRG4)%2D+!LgHujI{B~H5-)B^z
zMbJXT3%<DgWF+VWFfU@{YPdI(hvYCl47jrtM=yzk3U>e7eA*D5)~55<noTgw=o}V_
zw}VsDRPH|eyXz=DMh0u=vCJ|B&vqZGwPh>_stPUcT=Kx0&K$DRY%uE1ctrHN8K){K
zlP+KGibaRny!hTTV#DYr%1p*e4zQ={35j%a{TWONsluT@C_c<hfR?#7pdNI9H`SIw
zox49)mOdb*?At8Z_oGz3G?Qgv*txm#70sM~mkO4PNms9G`sR2hS~oP(1QRum>drx(
zqwAnHy^a{ko{@D0LCCS*)r!WMtSgcOdsBJT%ZWqlhY_f|Z6G$@_Jxj)Q+(#{U@8wR
zAP(0YS$@aBQrPy6SHF>SCdI)p-Omjb|IXv@-3vweyN@*0Ist#I*o-y>6Y<dQa(q6$
zFP5L{gRhS#;J*cO^tRLi{eM{}Ai6iNQ=ujj@3}-VatCqeEKs1waf>pwQNO*M#@a`N
z;v(U!HZflNqzc;X-9di8oQU3P1zMcD05&-Xp*|^+Gg<O4HCi(mj>Jttiy?QZa83{U
z;k-AFyX}mbF#-&^Z~+4~E75L976J7qdUKs0E_KO3S79J7G%CYgEsOE_?kGHVaxN+r
zv7le*1@y`s6m$n&R_!Q7;Wlr`JRSy)H)lfOg9VHa+#hmZSV4!x28Qo*!)G(6;$G(j
zbSo=F&5X;y4=I68btNQz4Mx%Devn(zMP>7^!GDV`qPrd2o$-^gabEzDKAlQhkMzg4
zgM9FFFZSLw?jZi3PLa$vX;9be3@TF2l61d)(CSkRegBlB&L6gqZzu<Iw-Y$Fu^fdZ
zS+MqoC$@S<(?h+_p=#7oqVN8a6h-xcX6Gv6lu^aiADRj++55p`vja%I0!odg)p$X^
z3G<FcV9&=qw78xp<#X8X&8!&2Uph(Kfq86K@rS&7d<DOpWqt6twVXTq4zIMfL-9Jm
zmW2v1{bE7W_w9t11A5>v^D_SFEXD432cb4;Jy;nBF`iy5X{b@dy|@rGJ~1Cv!yV}X
z+XGlRqMkT}J41?%l5vGT&<!P4=#X6ivhggp?@|cG)4Z^Cu`4L-*xr`)KszF`I9=A2
zHFUm1no6G&(eZ;Qc`=B%tei<7#jV0`;hLx#J)G#;1md46Jv8{u<`?tzLDbKJZ(jYC
z6L-!g?hik5S~Dln!KTbz_eh14YGv3kC60tdhGThhGRSqV(vJ0&aF%s}k6%8C7R7zg
z<@#5$V6g%>E*^}Ic@B^-AB3Kp`vdzWfY2$0bS(@AyEr}Qvv3@mc`KoOX+Dg0nurp{
zrxR6OW_$T8OJPn1RcoXYqdlzu<~bKt3%yBm#CMX!y{A&<O0e5uguTzm&?#NX33{!d
z_7Al{rsAM;!9ut<^e>h#@#A$mdf*+)OITlfkNIzI@$So;IYC8lRr8Pp8Xm#85+$!_
zQrsEFkb25DTWeF%k9J}|rV6~{wt&G(1*(7dr$=I9p!#|hXq{{&{b~z9&(;o2zOyXR
zEl&vaV`seVQ?RzR92y4tk!Kz3>a_C{{cnH@MbZdv-k?DAehr{IdjV<bnF{V-ld(~~
zfhK)-g}Z@<IC`rBlH8Jrq5T{BeBmV+7nz2_D?z-VeFHIAz}`hI;{c~zgVw3DNTYul
z(OHs3EMhl-8|yGHDcp&>zwctc`*_s&5ChtY=P~+s3D)T%C=5c#mx3_17mOw%nIi}1
z|InnBQ$esIy;RjVk*b_E(6b-&ge{y;W?xKZIj&MDG+2pF`{tr!`goS#2nEe6zK}h9
z4|XJfBW?exNX$&;NBgP`)e;JaoM&U^iksBgYa`=M%?5bL7``*h_|A1Hq%B&BI=TD7
z@WC@Guy=>tlxs+@UqXxS7qpZ0IUF7a5O>Ek;`C@Q@8)tAv{$^QMQ6?u9d8A?^h_rm
zFIO^8;AMoZEp%Sfddy_bdixCuXsvonB&)_i^JO-Amseu5=Qu7gu@teJ?L#;2g;kl%
z+c1Nm&}cj9aD2)KTZF@{JG0O(I0xiGg;bW_%_}~hhTT8e9M+O?DQX$}Wo;}i8yAU6
z*NxOYcoG#GO#%z+W_oi{IT+O^VZ<$4=Hy(;<rFT!dW%m)@Y|YvT`EHxT@mf_RS<_$
z%q<d7%n>g?Y|hz2Gkf*{$%+T0?aK;qG+^i3ZR;#&9Xf;0-5IyprOUGZdN-%pyC3cr
zv0U!4N95A65vY{>CPvBxs`OOS86Om2w$C1%j@yHETM7xhYlNRN{PE#2f;ENOAaL`t
z=xF;uBAT@Eo{<3Uea3;@ri+BKpNj<kd`r#&5I1h2F(1Z3O+RPmL(zuDk0Dff>@l~k
zEDbsYeW~M$MEJ0$45j)_rGoE0XhJ|w)a*Qhg1S`8n4?Kh^j1M@#>Wz+-a+P5bBClA
zEWdg;62yIX@DD#S_Q#^J@a?H5cCBb6TaW5u=c^o;TW5v#+uzXcH-}MmdN(vlhheK}
z6;X3fEY0k;Luv=>=x@9PE**!V1yg9Sw>wqm?c^_xiGV95qtW0$9vaU7q7$}cqIQTg
z7^T|b^C7`F>B*x1<LGSTV*0)|J|zhu2@@fSLI|PGUKoTVNx~o*gb+e95{*>SlOjw>
zrUw~4>AC6bRgF|aNJ0ql3n7FM!oC0Zi(bp8bN1P5UDx%!P(C=5*0bl%VUZ^4rO$$t
zp6)RBbqUT^SEAbvHgme>p{;lr7zz9t_qmu@UC0I<yJRqGO{EV`pTXK)V?g0;!`<jz
z0>-=nCioU$#-U!!ua-)ha0z#xu$+2GFYq|YyaGk6yW+shV#~r9s9rXbbksc~Z9f8V
zj(8H9?AnRq&wkWn(N?PCumBt<mSIzZIoH~(0uAPwlPz%Oy$%fmulYP=%y41+l|EE-
zL{V&dJP!pwUJ~<zWz63+lbHWHM-%t)X#8^@)ILyh_4fimGut2Y^CE~mzZ*>47lVf!
z)hN|sb6Vh1cr%!J9eZ3wOV?{O|4=ktHS|25Uz~#Sj}fHV;sFu<oJH32V=&x?^)E*x
z!6fk<Ouy`fR$0@bz9omeG>Zqx%7Ik$vzWAvN#MnCSJ3^&d61R2^7gqZP;~i0Y8=wR
z*y0+m6Fr93t?!AGZdjqTEfwFdyu$q17vadA{V3Ew1{NeAV9Y8I4bP@07Rgv<l(ANC
z<r9lf8Q{Kq3Of&rq4{=yXgT<Y*sj}zs^+<L)Zc?BEnxkkGmk@fLJ=wOIgc+^7hy+_
zjihPFQ(nAk3AoKY2_B3W+TMi+ufacfyB}w1?Vl3Bcb7@iu1UP;<zQa);wQnp?M$2T
zAfbF2%R!t*%VioQ^T$y1epx{F$A{v~#cVI&XNumXNQB*(_H5WnUKAZem75BRQ~a>4
zqmk2jG!o1|Gd($R26=E&3H3#JkihPB7E_a;;r2t4{Za@q#_?1?f}PDaE>N#7rvjRS
z4OL5_fwe6uH>*jlYXD4IC&J~PO6+(!lr~)uLH3X?L`U_3T=yM?gZ`ZXWL;U_L7}8|
zkPh13JcSk`N3)+G(mXyGzStLoM|&Yr9q)r`r+lWFk43?>54`AXG__l42>nXqP-bDs
z3w|D^tM*iau>WVWRka>XCap&9gAxN*6+_0@63FPCO@qfV&2!lfu(p0l&7UxRetbFD
zRlFqXx94s2rbMB^grijA_lHZJm<m7AqH(vG3Y;G36RX`JP`8MpOm-P+mZ`!0UpG+y
zrv=7qtV!^k43r;pp<7!MplQ7;k^GrNU7~CtnE8<!;tvtm&QW0As~lvSp5#REVH|(&
zFqS@W!r7DZ;D)UNMKA6X_6I?oVIEMQ?+g|!OV%WsjJrp8qSdtm5>U_1Lp@Ilk5__@
z9*}y4DU7#>1i>f;HR`-cJXR*s@B=l}C_NUeiu*&;B2U({n#cNTeo_5(bC@@MG2Nlh
z_DKIOaLU!6NXo=U>NorV1U?I(=6XJ$_|>0Wd%GUJx*sE^S1YmQsXJLWN{X_u3E&NV
z;R_!JQ|`NA^)EIfTYAEHNjwPhcY@9>6Oa@vVI4vxAav9s3dXs5Y;K0$D=D~aD~I!p
z(OdC3lMd55g+~3hftg7%+AKCe^(Z0r8XQd>hn-}5nl{oVU`zwuVD9fh4{Xd`2ZA0x
z#X{{hAow(iw$0j&-M>=Sl^Ox^N(qtbBUTkM9oJ<8@d`V_PvaM3N6+q19W#Lf(@uTg
zJD}4+eR$a@z!e*!F@NWAS|(nL7{3GJr4_Vs>UOrf5@Rxr$2kp!I3qF~D=O=#-61`U
z?Nf~MZ(~5Z`8;&a+yDy~TcP9fc!>TGgMuRgykAKb9od6<-qJ>K)r&lFM#FBjo)8QQ
zokJuetqY{+J7Mjc1?V_R14sFnz~lPUXyq^%YDe{8?;Pt(h<QUA+xnnxug^4OcMq5<
z&W4t`Q;5;XQd;iH`lHT_K-c=Gq^Ux{C3g{_d=tw&ioTK-7UGaBC}q9qF)W8uMx9O{
z;b+-hK>6)$$Tb~8tq@;Oc}K9$BNb7XYZFIH=KVXL3n|=D#yOBukG-!*p7ILbs1Cu}
zPvuZ)FUQBe*<kr^65YM84Elzhz?vROq-pvcPGwpSizfC#hvr=B*{A~@7UM{{uLGNt
z27{pcHS5~wG@1}Q2xV@YsLm{oeYb7FC|!v5qr)*Ya1l0aokN?G)x_poJmZ<jSx>@y
z`nje8!`n}=oP##)tUU!y`<8JjVJtf@4~EX?DO7U%CfC8325t9WllHYawAF7NI4E2|
zXYN~)af@{~r(P!}p)n9<?}@qN3c!3F(2o`Y@chytl*zY4&#m!T{UjO%iaa9w(1qyz
z_)744D3m>B*})+dpwP4?zB1<j(m8|>?E$*w&JaGN99^}O3IAUXdhS^b@@0C^k+v67
z-!rzI$qah`sT1~_Gy|_3U_N(&80)hNNcPk&5I&<4_4?^zW8Ws4wk;ctRRcNwSuAV!
zMaVVI+XN9zTiVBVxk9BWF?{EVkC#?~@u>o+36DaH^lvov+Bq2gwKv*b+X7y3XZV9>
zSE1c76KXws5>acn^IJ(N2zM}6{oqzI@n;k^RUf2LZKmMIA0g&uzv!0T?EAQ=xme~i
zj0}{p90BH1KNnNT=tx0D)IX}Q-Gwo}3t-LQCD?XkEUkA7B-OP!__CFGS1-q5Q`!_@
zx)upH41nK<1F-h}I+CxtON~DpP^-2fP&#uKW``)TcEd@m&mInaa~EQe#YG&jbOi4G
zWsg<K(a@Th14c7<!_<Z<^eAI@l;Z!W&d+@Go?H$wGbh4yABwM;uH&)jB5j%wN+p+G
zlh^l(KyQ&IROQD*oAF?l?fS>N9&|<{i*eBW^BIwS`c2l=i*d*2qj37eC2;(=9qqIS
zlAe4R4r<jwY3l(}_c0dSPh2IleK+8ySy#~NUo$;A*Au<_#(?@@FJ3+ACH2zy%QZMM
zpZem3X!lD-hW1rp>+K5|hheA+Tt=-l8|l<;Los2>FxFx4o@B4w3cYKXxBAjt=9gvP
zP<tr|USG794eUj=?r6gtszke)aUlCL4cbR5S-0C#a2F(y-~=x`W<L>Sb?a?RMFmtT
zcc-nr%W2BWlMt<U8V5Bpjr3{=<LwdV@i+z^Y^UISj`=M6mxJ=zA@FKbQJr)DsO62t
z>>Ck>w{OKjN-f(-)lbHdn6B`$?leAV^v6ec7{C8kU&3d|;qw2ui;P>KHl7AM9NG8G
ze?R(lJS28M3ZZ6@3G+~Fgx}c_Xu3EO-8TkPziC47yo<Er6XRv-1wgZC0oc9T0f+yN
zLY;@K->#Cf?ssD%YX~fkIhBT@(3RZpTS?fa3?wbXH9$1A53LVD+EA}U)q|gMMSF5l
z@0ls+&74ASJW9rjiODqBD+@Bdq!TB31(~qM2aPS;`RX;pQ0^N_1(_>(mH7~;Uc<WU
z7OiK!Vp<UVmF;n5LZX}eg7&^c;lQAAcw{fjSJDvt$aFbL#4+w!NCx<PkAxNbGEgw9
zh_B8ZK%Sbj_hh3StgN!gm;qTR_BDh+)(Mf#)}$e`2Z3gN1W21KNqE|5h{_Hls+mS$
zTERMjZOdu1=p>3h4k3O%O7Id#W90M|Xb~Pvl$jS%?mmH<vwLgD{V0%mifo>ZV_YuE
zIM4^46}$942h&<kqv;@aUk&|U9Ay-MvU8Up!7>u<9NVd=!GpFhDkp;XMXKYY($Hx6
zXqcUo3%WPM;P8QA>@IzeF85M_<vS+`xyg7O{?UAW8if@nQ&6X=3%HzN4Ctp-P{mm2
z)@nUsvLy}qSCtUh(HD~c>_8~JKzhgQMdti~w72o-P#%Zc^~oSO_Lc8-y)Qbp%kc39
z#^=!+3l<0Sh&>Ttl)Q$FWggN2-jmVv)Nl}KDEQ85PZ;{(64<rd!#8d>Mji7d6@hHt
zGOHs~_pr{{ZGn*RUoym<41tDTnqc#51b+8R!l=yqeEwcFo6E-7c&@obb$;p6z*SDr
z9(4|Tq+deS#$MoOxC5RJT!&5NHbhpF!zl_fFu!{|W~Yg<q1YIMPY=axYbB=@8BCq-
z{NcW^xuTHmhFke)7_|-1l8fXMs@VIvH3AI`XF|n+C-m~9kvPR-HD-JeK*f?++M!=h
zjCXFvf_J0Q*GkTMJdS{o&3qVj^8%PAu0f-WL2O=J2y0@^&_^)_#r{ETHVLKc?z3Fl
z#%W;hv=kqND6nX(9Hl!)0(SaBpQ%UD_V;Dxe_~m>Yb&8+STSvRxRlskGK6Pi0wMg}
zRpRwT&dD|!6KU&8+V-15!u_pSEiNUt0&SEyD@jxOG#It_B+Kl!a7ORvL7jgP3N8i1
zwNgJ6R}Ui_(zjz<Z3goSjwgYeOu@eYReZ2$Abx0Z#cp$!pgdRucF)*}N`WVp{kuc0
zhf<=*G2q7Knc<14`REeR6U~Epa$<cj=07)s**D~{e#1KKJEs60;1s5qUIgLfZp3f$
z2uS^Mn)R?uB*v2`(#Y-=Fz<Cd>drlh;N4EY<b+{cA3eD7nB}~98CE<W2U;J;vF^6h
zV0AN&wD)ISukJZeC70q-Uk=;0nA4Co1!U`le8zXwrFzTDh&5wXgnp&0V|ysd#(I(L
z7mFcsFJ=0~IpV9G!T7vsyvyEYFpp(stz#KmIwBp-mi9x>Gn8dhcaTQLTsOKWqjs~H
ze>XLf*6CJ2)5XtZtRxHrCjHOb>PBkUF2^Lxy=c9lKb87eQ;S`}V7id8Pwp6V;+7TI
zqH3n%juMp2pU&$H8xE39<A~SPr<~9%02K%7c;BHF*t|Lh7WvIWQFU+5@1PaPuI?+2
znZ}dCDN67YPJ$ZmdNTV9>%j{c%<1o8y@~A}5bKhO*H|tsa$!#-V@IRGasj+uu?`(>
z$FjVc9Sht)AhPFAY@RJO#?A}Ca^DM~*2kB4*F2=Mscqzq{b|&cPG+8hXrlcl4pRE=
zgocM%#AVB0s@o|9*S)c%CSxhYJoN!(NpCbL8v!-C6G3xrDt1`eQ`s{!>NLKc6WjkG
z)BXhG-Bd3$)ye?ldlQKGN+ReH_Rgzwp^ZzyO1C)t^@sT=CU%ngErqCj%zTl$*Qvm7
zy2`luGMv1v#*~!_WYE6F=-xvUsy}_FQROdab(sTI^jZpmURS8n@hW+{a2jrOJc9)>
zRcQHpBI-vQq5E4o+z4Q7{@N@!Unylh%Zor=-4`6@Z2=mz1P?W)L&nEo5FGNQBcdHy
zpF<^78!|6Jm*t>yt|xIB8w1wPEN}2_4z>CnNKf2X;*3pa7_0mOcx(~VVC@JHZTrdB
zwuW(%zr%P>jVYkM5&_JlMZLd0p_X^gK$Q475qovP;tUIv4Shyja`!`R&L0xqYd&nM
ztOB=%Wh_%NowR?wL%sK3rB5d9Kta`DmDRHz%nKVuozm>keQ_$n7gOly@s8+EW<3V;
zN{Rmb46vMd7<!-?{^=Qrfz1)bD8LP*SJUZC=NvR;x(^r6SSi=8s6t#u(CQz{NuzN%
zn|Bz?qs@V9uJi)c>oRBvXl87(Tr%JW`;H&=r5VCAVBx`7!*>&?9n-8{?>q;?qYP2P
zPv<JD3-Q^yJeIvl1@%Z3h+Mn!dxav_R~$^md+o9Ib}Lcc(`3Hf6qJUZgn1!laGv>G
z@_U|zPnz*)9Oy#?hOdgN0s^oHIIwOyCeq3bA-;mK3o@6(%YV!(G%AJ4JeTrqtC?5E
zpZO+ywCLJsrmJh-;zM4afR=8<NoU=5sI7{j88*Rac4RqjGCGT`(MjMZ*aKU8UxCKG
zIW#_g2nq(AAVQs<P+Rtfvz_*mWbf&VUSq#f`I0kC1C+qk?b8tEm_r=foyR{^Gd@H(
z?HtT@fK1a8$daZg{<d>os$SqNiUMKcPonnxWm8?Y4Wi1Je(oIt&V^-gBk>3-2DALi
zt_{VidrxTidPiV)ROqc`g(+UQN%nX{`sWk#=S&<+o+?YRX8HmM@Nq!Rau>9Cz64Ce
znYVb*7Et>uImMDy<j8~hI81W~&YPMBmfsj3c7_MaXCB9<o-79-WS)n$S)le_0xdgD
zprQRO@ms-iNBjJUt6Tz&EDv|kbul_U><7~E+8{f3gBNhEB<o8i#D}HeX+Iv#n%Um0
zh-Ku5kA)_WP1MzP0J!|eW>K5tkUO>vqVoGtY1R?aI4BL|#+E3*@}B%!5D5bh`k;3I
z6A*mD54{#`CXENJL9hP_wjU$JD)<%Mp^*&sR|>%N=qPAOy$B}CW6U?h*g_}gW9rWt
zINtCGq>PJ&sf?c|84}J5H}?S%C!y9|jL4X1=D9l=596Zr(DqviMx`=eH8+4Cv_*#9
zyA<HRIYH=pjxp`Zr-7b44(f0xwq5H+rkyCjs9THK3NW8M?V5)on=eGk_|eAAJ4xFd
zAhhc?tZ(lNO>Cd-P{RB^gBC$xw^EkfairO{yCEuh1kJCkB}22bFhn6EwrLL7GSrWn
zO&Nz**G@qLbuV;X5=He^$MbDkJIFnIV=V6$0nHy3BxAS(800L6QCUUMW<Ls{C>{R&
zPY*91M~ojgb5UFSgL(w>0i7BHl5yV9<gLOO?-B|NtTF5RDL7-yG#tK_%=@<(y<YaD
zQL$a=9_OJLWo=7KuO9>5Nlw_>G927GgTQv_Wm?_clFYJZ{n`=582nI(f?_2n?wdqo
z?gnE02Pv59M?s`*CRWsL1k1&-^zURgTbJA<?MEEJh#Lj9<~eLm8VGhDk3#EdXK+0o
z2jc03C}mGb#=%pNax##3<c+22pV%((X(Va&4#I-f0ZbQVJ$@SN(Zw<mqz2Pimi7z^
z89O&1t{cltvG+C2inBDSCel57p{37GVlh7lQmz<+^~O5l^|k;K1V*ScqbH~aIFS#T
z0a&zo9hN^+GG@YQv}jdQLCQeh>r@7@>~{dnm$-sf;uP?BwuolDi$?>qeo($P44&Qb
zMp5u|u0<XN9(gKi{;eA<D?Eu~t~1S`_9AT-_+itljktTa5_2}I(AKvfh#QdDo-QJ?
zW1%+sc}KDK++ZwaI=yEFfPwa0IKjG5lxdcrx}ty=jT<oKEYpX_^~AfKEAUqb>k{2!
zf{Lj=MBJ!FWU-@2mR%&soUfouXgGT8{YPFq9|Pk}@m$;|mi4x6rcoVgzGnYxI=X)@
zTCP8e<;UbW>TCj(V=?Y_2*kGLY?}XHE$KdF5B~Ql9z#-(Va%i>bk%|)=8ykI)H;an
zp`IXn|Bhx4n?SY&i?Adu0qZ>~sj+k}xo*v&th}9s1-W2IU;+6K>{$y~1d1i!iSXBV
z>T<svQbJeak|peW+9ek<sw`3OGRssvJxqFUkAQ%|`%utW%q<@p05XHOoM}@kvForP
z%Pul@T7WHeO=Nu{H<{PSG!(=qHbS^m#q_oeY-qJ0;a?(%;_OAPp79*q?`weK^$6bX
z=|pIeH&7p+2plj;iDE}f$l8;K>#i7~%MpO6iwfR&@F!9m!ZO4jUD+<(9Xfkw!~4g>
z@U?FRng=F<^=}OV#q9k2nF<oAgczL+A=WHg))Li)Bx{x95ZiK$nV=7%k(#``t_pe_
z_hWa1#qhwMoh5g7vCK>y2{fjtu_c=Ia7}0IC#H%1%0sQsk10;kMV0+m(!jWh`95x-
zc1?plM+TyNQCC#iy&&l<U*7cS4re#1H#F&9#_BUaiAB_Q$X*shO~cqO@@XYZWZpBa
z|3vVs&IDz9hj8vp%L=UC1B*IXC&)uF4c~Z^YSu;LPH7QJZ}fv-tt{_8LyPWzzY0_I
z=Y!zmJ)6L7HRSR#Uv|FfK>w05tdB`YW9c@o$#NN(!d?{CXOg@G4qf}K1zGGk*bu|~
z2PJp;dp#mhlscUz_;o?k?!6)E<ppR`n$p_#3(Ti>71T4#sWn$Y+Il_Zr0+Cn*5fkN
zN*M+-Mi!v(OgXgCzofx=GTE~480s-kjxg7j%(@bR_6fNlPU}b3`<w%p`~jeH3V@KD
zZLFWF0w(QbT)(lblce`&A{jmd6ob2R`!-I*Jw=!BP;mx2|BV2_z+g82Ev6~{<Dt6n
zBo(y&u@UNzAbMAF$R0rv>b+P9Eq^{!W}d_W#Tw|By$((|U&4@G{Q(EvAzKYgv0;`A
z8Q3xbSCo08xWbCs&z7(qj~(QO%fMb=gyusvDId+w$7xF9YOO}M@Dm`_-%A5`tH}@B
zW9W3Rj`uh&qb83Lbhd9}OzZvBC;T)>x`c98wV5QXhVjFTH{ck*aMbLgi+{**^c-#o
zZQk1G@QdmG@dK!8cr=Fm-au;utRch08Ed0ubU>{Hzc@xiy-*W|Z;Qh98?zv^YXMpu
z><N||meY<;0hlr=9qkRTVo{HM_^f{@R2&Mz`aj{&Bb9aID#xN|b|J5aUpbGDW$3?Z
zBgTCC#kxCftwrA#fvadLSQZR{&iX_OcSE6W1%;@0!<gs&9f@C(NAy;(`Sas3Sk=im
zMvMJ0YT`0_`ga;;ulr2g?>j;0;iF)4nM1*$ozN1vg?Q$efJ53A=qPF=%@dNSR<tDu
z=J@a*K8Z1Y@)peKm&-DU!B}G&2`1|jFk{6|;9VDDQ&u$BbKqv2Yb3|o>?#h9Y=D+6
zE!6$pT*w&j17{q)Q8UFB^UDctogK%z0xp92E`4nF)Q8%C+epVv9op^gc=R{PLG#5t
zapH;~rZj?7pI<@s))vyYCNgXsbA?*3vH^oHOF<f}z!Rl;P`x|@IzGmtVXsnj67FM6
zfL)+2I1PH4zqyX(-B{LYEvQHDV*w7v`v0$han?ptl3<JK9a%OR`}Uz%>K7WirX1q)
z6R=uvh{oG&W!i`akn@H3%di**Pf=s|idYbw&9#2ET?JCfJT%(m1X8b)Fz4h7WMFEP
z9h^+76S@+|)(f~i{5S+|1*l!K4C2=xhbCe}ol?&+_LrRcEv^J{Mn71^@<8nq*FgN^
z4l0!fFfT<Q^X(Q{4;hq!&6#ds>&&{^-|vH*g?@|&-$uNO?@&jLL3o)jMvm<%1=k8;
zv7ZeJPppMM!)!3wegqB>BtS>SEVw+j9P8h_qo!xC5ckeJ5Y{GvL*;ZByl*=yJ}e_I
z3yd+wsG7R&Vk`kVlv<5j3QN=HV!-MnsFlvV)9tTG?QSJB$eXCCBpW;r%ZOdQGg0^U
z=k<M9|Iu}J)*QG&blI75sAfC1uzvAd2|UJZv4)oUnKWrx6<F>=$nK+oBlo+Z!%8C%
zd{e5rnj!9HI~?On16gihFgd<=B;z7nK!uYS!WUen9mmsg^U8SaU_FHos>+$JIs~NK
zS!V<Ffs|t&a7SQ^Ep<Jh!d8aHy1%%nyK?;g<RXd={v{f%3vo=)QSk5@08#GQe0=R4
zD%cca^I|B=3NR1)1L-te-KPX^@Y&$tkU~2FpyBBkBAcAa`TMWHHv>a(s8JFKO*2S^
z#W<9+EY9*?$KXcSgXle?lRntPL$Ui*eAgARsVfjx56}4f6=?UfoX$u*iMEPFxVOv=
zfBn!z?~U;|={n1zIVw~kT4wOz<RHvv_Ybermw2nrt`H@xK<{!5kn~Oiv11Zd3uHF>
z^#NdO*#qu%xS+wEXnKBv5-*R^N0-Jbx-r}g#lJ(q=#vi1tSgCZWhsaby)Twk-Y0F(
zU-Q<ZrV{0m$E0*)1WFmRPT0H?5)PYS)21+ffNla-=(o^cl|?AHUO|+NN}7N9Gc|gw
z4|ntZ@x?AN9$z*Gy_lCuFg(~MrrT7IHA-w!dS=o)mi{<kz+80fCxet}tHF58D!k#y
zvQC<HbYcAk?2+k$xR~|EtT~R=tKH!6Iu&Zs;gEVp25noUaA{Z^+sRhZ^13Yen4my=
zCmq~+{UW@SMxY?s2DCISp?^{Va;7n;{i6rA6l8$8(-vyC%9VQE4})I0QdF@1b>Ye}
z#KXanERZtqyR|?0HJa&n-L8^adq*nM8*Ag&F##GaRPdB#?UT=|v1u4%(fD*n-R%Li
zcURUaaH9wW*O!4)_5)5haVGiRgRu<X2ZObJ0cl=ZKs!V?N%A+wV1M<36VIt4{f*{e
z$OFdEcxTEV%1lM$G%ad;@-wmBtHS<&d$HaGO&Vfzl(>9Tl8H};qjvK)G|yW_JRgRE
zqGB7b&t`Jpg-7tPo-bOi{Y^c?d9XU#7u1uTp>+z9jwLI=p}Qrn6epmu_7zfJTLxZe
zgqB62C|=YBL{}udG<iPl`0hzNR<L}~5KpYF%>l<Q0gM-L7)-OH!MQdY1i~>ywrwoe
zcKi*s{5XQj+zXig8;?mg%<sOVfk@lL@N&&GtVlQl4#D9}Us*(yBZKG)c1F3g4yViM
zspxZcB6@Eq!}h`Up!+e1o)(9q!I%?7abq?Yv@8N|RuPtuNyb0!lh};g4GwMX$<EJS
zWK8HW443;u=?@ALle^TZPYCtt-j(s93-P;E9*TDbVy&SOG{r_>+nx+?o2EvmBnj#U
z1(G($RoK4Mk9nJz_au8wQTCZ@oGRfg)C4~wb|?1G$LAK|&kyG?O1lSoPHrR?m!(kQ
z8v_E<T&^i?54a~rqWskrJlJvu6OJY^uKr+rCW!_IjY|L~j2$cWpjHOabg8ou>ay(p
zOK~N7_8164Wg_#MZXk^z-)OAoNNgE%f!LZB5s!`5q_MXggH$XBZ1SEw-WLGZ2Pfjn
zH;EYXOav_}S<lx{wx2zInAeIfXYU{Iwq?h_`NCnWp0xp5*I%Xmw~JBk)tBjtM?kn_
z6wJtHeeEsIaHoz(y*-oIZbgjGx>%x7>=PoWjRf(~J5<uWAE%V;CWl6qf%U0N#C-is
zc>Cxa8g4y{4Th}WDr{kK+natQ>RBq--D@S1_h0yior|HZ&u1<nAs=om%SPL54qj&^
zqfu)-DGXzpYZN7-VwMlz<V}Txd(!r+lR^IN5*f<;ABx)1RJT3}-X};=FU}csEPIfU
zVY8uauMbTzSO-o9d9ZqD8tbxKO*iVt;=Je!n4>E~;eWpLP~WpS=x!RceD4E|n+u_b
z?G>zPazU@HH;BvG9NIKw9OcGM#hNEUP}PrhpkE0D(`mzD%`0{$j4=lBdsnEiFM+>1
zN<os?7rGl|V%iKf4)8pUOS^cXpZ`7(Ze5P6ekh>xWikr>HK^LwR-xZUcGv0`gz>in
zaI()BG`h+7YV-YJUVA8vn8$qAmo%Vuc`4s}#VOF)_>9Di3}n4Iqlky=c8IAi!d9!Z
zAY@&9gX5-S3lO%yJqY6Lm1N<{bNFZOAvAW;VtIh$P;_uM)@7W+nzFHgL26oWd6B&n
z{h-2O57hRZK=ihhK%n?ANMBWwjz^0a`_~anEn^{2J`L>F&jO$F0Q|UW0E$LfkOt;k
zk6*nSt==Gwn3sWp^R|Kd{5%l!jpJpfcH0EQdB}dDLA!O^h#^D!!vjtRQjs3b5Ud7i
zmOb%Xb)MQk_QRNeV`1+MCF_aQrH>g$NX8k`iswQ2&La)=I478Lm_rNW-Kg`al<HYK
z(x_!8IT&IHme#vr(w$`N^SU3FT-u7AXV{st<r2F;9tDx;88>QV4(b%dWAnD_^lX1m
zY>rFD#-N4x=#K+Fmo8+xc_HU>h-qH=hiUKU1t3^vP97gEW4$gv_{T+Ophgyh;o4a!
z?ao6}{wwmR>jkuU;s$|>CV}45Fk+FT4L^OHaP9j62%hDz(1hieE6?)ns|G;(AA7K#
z7zMU{qKJHiEe;)&f+swM_%Y=SRDL~+f!$wF+k#8@GF2PvS3AOM*Er~Gn2F{$$|)WU
z0L!5)Ta>Vre41W?(rcfHl?L;Wefwu~pq9-Szk9Mi-fb{;YBnmp455L|%Hn6ksPDWk
zXpy}aKEGvNfpSmA{~JR!cNyZIXBl{uW$xocyMid?7x`rq1yh$5vcBh7_<kdr^;g@0
z*sB!ZM!Ml@jVz4%Fq@e6&jCxD<D6_x8YS`psMUEGPir#<{mm-4{W$~XDOo?*2m|D5
z;y`3~v^eCi0nHxdL*4f8#H7!;X!_V6EY2MU9k)%O7ZOASBXY5A_jF>H9z;7vHBsXq
z%)_%qhZ-kb<eDF_J!^A^sx8_EcAU+E*0T#R|Kcwix&9P7c?hY?yRBfNvjklh9--OS
z>bQi&FxXTR1U<I1Oi1Joh}n`xnl5F-x1M3xw|XJQ2ea?fspDLmOFuHzG7dsa**7qH
z6IfJ@gN&3U&@@+oEbBC>nN@`yx&6W0&j7r;`9d+tLdB9H^wEG-tOqTb^UAR!N|SiV
zSThiekE=k~mPEUzRbuw&IrR6`NECEiU2OZVKlppOqJhCNj0!(XE52SP@GzB%i+5ng
z8+(u)YbSVzb=FT$f}h#$D6?<i)3zLi!04w$C;u}M#<RIbQVw2q)m%91^ZstH2Ak(%
z46$Ww*o{%(zsnyz8#G|$JVR9cXTlYAkzlrHFpXNpV~5{z+ODRkmM4Kuk~V2E97&|e
zdRu3*8Q#N%WguVjl0+T8*7yR(XGmdE>J_M6_LtVxlt5Ix0s16ng43*v#6kmMQ0g*V
zmcp2AxyPtt`!D*%l(Cd{P2w!?rqktbm`?URjLIJNEykOhKoGo;j4pFUZ^t#z&NK|8
z?+ZYtd$HJ2D-fFI|7Lo73YsJrfX<xp;AQof3N~EiI^}L4dv3xDS}LYF^$sUq{8?Vv
zY{EK6-%#oG4MZ#%!FFTuHoy2JSXz*cLdTvU6+2VO-4HJ0vK;I(4l~_uCiAa{a%~SD
zu)T065%2DblExum8UK>rhz&>i$_KPis|yMs(AslBD%6_wBGVVlMP*z+D*qjhva+sF
zzk3MlLOl;h7GK8N))wk4kHyc!W?^CpV|u@h0}o|3R%aIBwnr7LPlx5MI{d+T=?Rqe
z+{x?AJ4kn`GFXN>6+EVgG0)@)aNUv*_2)0qxLj?N-jx!^$~>66@+yX8*3-zUZ1C1@
zrZ0`7AwiM_w&$Fne=B2r2w(6qO}*eU+x4^=vl*;A>xXYmLqR>ug`Q}$zEQ{W&TCHa
z)+zDSwLkM1eDLS>R1&z@Js2f@KXbJ%1y~!9kEUNjU{&{MocQM?zFe3Lk}EffwdV{X
z(JbL*F*%sP{0quyA!r+PpG0tx80EGT^2rVQpBPak>_r6nD^(iKLomBxHS}N|&^LDG
zLi>tJfRT5pcz_|a&P~K2MRT#FE1Q8w>;Ugq{Xp$h#WhTv1ND|E#CRm@DPlPxzg~f;
zW#kQ!+fIR{^C_lh9I;7QJ|FMsrK2TLa<ZPWP~$>T;G%6kbKzoan|_)%y>$$B7^rY?
zpc{H^WWLbCr65$#1?7>mV6jmJuHBdYe<!rFUKS2oQGx2F6UDNk!F2D1&8T=jieSYR
z8ufk(bv_w@VxF?z9w8WCzt73ESK4fv9}M#~98o@X5Mvc9!F~L2cywkUDrMb?JT4Ag
zjKXpA93f`)*Tl5+V<@U$$=6mef5(%v=$kG^)37ok{2EIOBbn!}cQVwyDukE))6p~a
zBv{3y66L%3(0nBxj%{CpidjJ<X-NbaRmP%?O*WcO?m|<yE8w>O5y%Y-L_rT*+$da)
z0-Z!EZgnC;@fCE?Oar5jhIsUMIXb<X4JqeXR$bc_eY~$i&9KwdAcS=@Ui(ZXOU8jB
zK$mZt;s&B!e`!dU5%h!MPE_ZAE0#%piPLT~FgQ9N<O}Z8mGAvfFxZPY9D7fN&k||-
zg8rZ?Eys-Y*|ghM1!~Q_LzCEi6D(K3g_1%9kMo!jQG$B@TEsl}92qdT7zVJv)5XBv
zs6IQ6%RWAvRP8fhdeT3eM&qN<$HEQ0W0)u6W*Tv6JBik&7l|ZmD7D<-L$&_1M(p^@
zIxu!YjKUJsOTX}551tVHb*AVx?Fa<etDp%6la87GU~oGRf1MA<&I4<}U~@I~>vEp$
zju)bM?`a~kQQOEG&QZzkFGT8f6}R<YiNf;Rq{-(h&zM|v{1g?s#}0)5F_q1|E2vw=
zIZR>t;OReXCtHzR+?dl1gdg?DDBVmjniEN#EGN^zx-AgOxPFq~L+G2~`FK$j%$RJR
zjBoJKMt!^8rZ<0)=_XNFqIU@6KCnBP1M9d@`h%|DQZP5EBjeIHqrl<_H+O6y7T1*F
z&^7UpHIe02F2q}-r4Bgdr&Fhy-#F_LmW=ta8ziosT+5UZ#LjOC2{#Xd_&b@<xHAzG
z&gVmvk1wqK6^^cjmDFNn9-55H!Isx?)J4-1^;?gkS;=hl@r?k>bx+8fC^hrxJ5$@t
z4UG90jp1TBQS5Vv%rFlu_+y40Mk7J*&mHpjQ5s{x+$tKl_6nXHeG1R5Ov3v@HfR1l
z2k_++t)EqhUgP`Fe)Yq#%`XLl-o&D9*hUOYVt1GpmVFt|IvaMK!hy=AXkC02rI7;a
z5v+wSqnBdCmvuy6Lk2B>_tV;tE1aZz0JeJ`2E3;rbIYf)eT@*3=Q9sPyBT%Q6oFfC
zF&G2ON+{h}x55%4*sP`}#pYP?zz=R*x{4#iRTyRdo5)rS<4x;WM}&z6z0Lez!eei#
zeaumC%~BETms_A;4;kKQNI<{gVc5_RNoS<S;-KwwaPp)IT<{<s|CwZA(~1B{(OLld
zHQjJ+a}LJSwUBCg8ts3}QTE$|j@o(@1R5J*a}Y({KJ%EzDh=v#PC!6-I<)7=U`$mK
z)XhAMI-4EQqjDpSa%O!%`hMuNA(-2Le-#Q=hH-(*QpoMnOV~fr1+VvGJaoNDnD3X!
zy60@z{UwPz_$VB^YV>A%)}h2_eGvS+a}JwU>+`kZU}Al800cH1h7NQG+w1dy6?|dE
zhXQ=g7+kjUUQqFoF|+bb!6LyI5*p6J9L6!XecBx#y=1>bvNMS~?Fns7>0n;rPNptc
zhDzV#P_wfNO}!66{E(xx`E@bL{<{|PPfbM6fvGgRxDfIKyg^G(7oD8?Fb;G&)W&8I
zZ%uD>Nl3w?BTG<iIf8T!tAJLq5l9~;lc}4TCZH1qqpqbhCc}1Ks%Rtm<MK(|q6o11
zcLiq34&arG73gGE%9%1v&AbK>`qYxzo-*jnc}nyG-jm*%S~#b>D;jkxKqq4g9+&50
zu%R3*UWl;oK7Ul+42O5@9{8OpIgEr2R$Zb=lieY%;oT${p;EJaLImLPJW`)!i3w&$
z*$ylTT9b2$=jg7m);kkJLhjIz6<Ngi@MusL4S><>_MqzVA=>}hblh?#07Ksv0&L2s
z19lw;&+~o3IHQ8g*P9Oy%mSdwzY4sMUnV7o_9OO@px+!7cqVRu&ipB0y{ewJjC@Vo
zM-@ZEVAeSvd>+>xGh%b)1Tg<O1v+NfP}%l`;`4{%5Y4_&uaWPmBK8ppIT%bDUY#cb
zSs7@p9|5<ujPZn}8ls31=nOX@@jEXOL82Vq##>>|!%P(KnM0I=<Ye(Jb95-kLi5f(
zWP@}P2HZFbPQ6+<y<=CoFCE$N#eXmwrp{)(i*Z!!xeA|r@JBu-m(9i{y!pT&x_pEd
zI?PHZs;~cJo+k5N%Pb&NG!vc1=%S=^IBogBL)4yF-ocgiY;i}iE1NBvM2|Sz<N>IN
zw&8MrmjX5nftbmRz4-AEZQA0^1r{2j{_f!@F*?ho>mNeV)pb<&j1+E{$3o{$IXHzB
z@g5Vpf?Z}hak~_NLe2;rvSg%s$sWkJIRs{RCZhQ<PnyR%-pJS#Jb2;=7QU_mQKB{|
zPqX=bZVi>!vP^@e9G+&yV)eSkxaD~$Mzz<`-lw}^?b1p(?HG-9{X#*wum|WqJWT~S
zy4diD0$sg=XuHvMx*$Y?UhV|MMr*<1=mLBaYr*d5_0+v+5p<rJ0<{T;q5A$oI$52F
z|BYXTOC~v^-*qpD>3splpN)h`zirWcfD4rv58%BeRoEPImuTFsKrG1xr?h(VABN!V
zq1#zD<}259UzgV(R0(?jZ3kOjmLD`rfc>K=_6|FP7{GWM#^(GguNZV)D`3x$iq^XA
zrpp5^!_-YG%%5|Hs(h=7=#w6_zPw8VBeiiwZ9aBfw1k261dSfDe)um%u<O1F)^1$|
z;hB!`@jbgYt+;?w+cI&C=6q}$W(Y1N(U{@$lDv-XkFqZp;YwW&hJ3OkC;E@a&pL(J
zVf%yVbX+1<Q!|M2R13-HeDGP73gfj8K+Emkw9I}uO6N>~{HH}U!L1S^o{hnfoioV;
z(=yz%dpR}^%w^v8n>74h7C2qpNgBFJKxb?{4SzTgCPke@*`RL43Kt8m$@MoU``6t@
zF@78frVi$Mht9(47q6+e=_w)#4yWc-8Z_P42JJr75$VB+wByGWlsf5AX52)9;|^=V
zj|<qg_Ydi0{!2S~3@u&V6}?Akfbml{PwkxyDfPi1tzaH+3l$X&?}t9u*#B*&A(SVc
z2i2e$D)4b4o&OZ1xgrmps!#F1Caps^lfz)QLm$ev?ZrN<6TxyMV^p=UEO%}k#Y;t0
zHqnbJ#ysMxM-L+PzYAz><_Y$mo-1l=a3Jr0okAClA@H5u#SLaukY}G3p*fp-(}ono
zFT4IIifJdtB?5LIUrw9M)_~-g7w2N9rdI6i%3KhFv5O6{o-wsFUDQB2Y_Q3-9R9Ky
zx#gQ7IJPj}{H!=)oVN})4NL^@lYKy4IE&jcI~%HB&nKEnmi3t$2E{MR(c;$`RDO+v
zeS>FV=Q$azp&7U*!kKlX1%h(w3^*`xBzl$fBf{lX(3!glZ2yJBQJ&>eXLo~7mJ3jr
zn@ZdIo}s;>Y_X}YB?xC#fy3a{xJwA=Q_Om~XacCmXV8@HD^Z#^icSj{ho4Tdyv3tC
zv}MIg$cSaUh`aMJ<K|!L`*#lNm6UR#$WO&F<prSZI~A-G`x4U_=Fyrxovx2bW_kZG
z&dz!l33Jm!v7HswbJQWxB@6MrO9)17UQWE7I;g7X0b}8spi0*kFKUm*$Ve4TRS&~f
z%}m_AH5znU9ieUN7Fhlu2WLEEor9T2@!51;)~lny=8Zzozrg&^lgE(KS26Hz<_xqh
zDS$f~a=fvVY4D3w@OU{7GQnXkrrZ%aaXqNjXd@jJi<_0@thZtjr;eV(iF@87o)c7{
z8fgvU34dw5r<%;W%DSX-)=(82$(_d4@=m^H)UVGeYF@FOv|qVT-Z_NBu;%k93EV|>
zESZmbvw&LKnq$F}a(p~04Q9W+1eP8;#IbBKqE7~%j!K7wtix<Scog-bUh~G%Y~J&=
zGt_=NMWt_dK@@JIVdKv*Hf$1o%AN`3p>(kA_MBGh?Sj@!rn9y@u}NYq8x1$6`?VM`
z?%rx{>bHYfyE=>7{z{<b1&1N#z#hf~G2{LGazX7WfE`o%qS;m_RGzW`#T0Wc<`?7E
z=!Ak%ff0Njz6d*io?<<Fz2VyAt%!rVf#}sw67q>L&yPgoY-I>k@6d&K3m%*LWnsoE
z6%nZ(QN?~OF0Yy5HoYmh)hQpEzikKbVEgg&Sv0^U0_t?upjX3a`gLI}TCdIpYezkz
zV7@`w?r*f~yeu@x&mvu0X5fJ1lhM6s479nN0NWp?Si|R_rQH<ZM;?V|>Wi?mUX3b#
zCRjWZ0QW`(;c;3_FKHqTBd5^U+aj^`S~it$XH2`Jv$6EODONSdU~cJorWZF;Yu7%o
zb!I633sGU*pK#ROYlg<}B_!oUGK3l)2HpPxG3M40+IiL%nw2rsnE5^1+Xtab3Cksq
zJp+2b3%Ixk>@3>k$9peY1A6*b(Wj*ZdM!`F*0M~Dx!}e6s2w3SE(oO~tf`Z@g<R^+
z`d}{pC7!o}*xbJo_T6G0qOM(8F5Cd!;|xHwx(6y29R|_;{UA$8r19BGcJ7s<(q<L)
zyqm=Stv*yZaurClKah`EMK~jOG=}YByw|HAIYG=!S`(WKgI-iZMIC$AZtbH^&PQll
zdv9=nT^Oesh^F_n(c^Ix*_}KLRr4L-=$rx+xeg?W3z={CTu<2CG#d5f*SMxaC#u)&
z3U8{M%J#<_>1n?h)(^^f01K+XdWsLIWrZ}nc_y6R5CBCt*t0sH<?(Ni2HWA8aMxxf
zin?at$c|~ab%zJWWb}uvCrdGW=_%IRsi5A4cKEH*2J^SQp`RKVm(6Vo4sNu<>~}-J
zYi}j>2*1k0S7%9+^ci_^Hy4N6vF~O48`APN5ck_3#F6GMXr9{>LcSHjlJ}!=l`0kw
z1*F6J?nkiA{3gG9X*!PVxfECCF@3agG`~rm27zT)K$rDFTWi^p>OoeFn>ZJ|HV=jo
zniKG==@iu2XG;Wa+l!5VA3&qC$<$Uc1mcZukrRIz+jLb3{Jwb%U4QHW@w>i6v?`)F
z(0?zCcdo>d<CvG-VFDOC2Jyxx9l&<la%L@!B{yCtK|&$>4z=|KOHUsv-MxlJm@L8U
z>NDJ+3B_PH>mxb8M};0g-x1k%5pN`ZL-pT>VAK0J&~j!u%Oe`#H7y#NHfMnATu*rP
zX)4M~rJztMA%B?~G8V1HcN*uw>tPiy-OPG2<O@Nj4kf8`<#_B8)7cV{cn$QVPA->0
zkQNHP50}8Hm($QsGmiPrZxNmS%-=DYM+jitnePfTj(x{}GE2pl$9us$U^Vke&*2BM
z{czL1_uTuhG5DZoHbgVeuG4w83+ueeYkxn7;{L<Q+Prj3O^ibC#XqP*XCe1|U=fb8
z%fOsrJ2BjL8WfyafGKf4SRa@OvW!PIPECCw>e5|)>)lOQo6{Zbo@cT-c`rDb^PF6p
zWiOn4x$w6q=tA`vym5-<BDO@LbjEig;Es`JPXn;=pa7nK+>bXMXCj=?gSLmAd`KU5
z?X+;gRsoyyZ+xeNPWz+5vtlBgm;^PxiKON7G${Ws4K8doLND(U0?Aph<H;th95oD$
z%Xjg5_bQlIc@Hs`dvODQi81`I3+y;$gw6&^jL~kP61^wHEjJJpX7@R9FE+n_sUVV4
zOY&>`dC2JL03*hfW3#uCsskd3>iQ-yZpy$;OXigr!@i>vvS3$31zN{^rOh+lphEbX
z`nd;#fcb~46`Md3XHK(aTi_C(hVIJ~fE+T$Ui+o2E2NRO?Yhpz^!z~W{`RL5p&5t!
zCxTBW^J(w&g{Y)J7;hDUh3agyb8jK)?1#l7^F1^qX&h*eOoA4zxsYPe4Xn4E#^pm+
zqnBO+*YV$3x`y%EtM*=k+KV5k-WMrJxix_43+J%&V+Jwy9ReNs{?vTXdd4ID$4~C%
zfN|p(|Il$d>&Du_=Lh!#$<<k0<Jkhx{XQEc9lmf<hjHL4zmp!5i=kqCDiq!c#b2$Z
z%&Vw`ny`b=6m^?Qow|dq-W(8YF$dK(6DrY<A?9myX%GFO*n3ePWC)WW<8)VSQa&Rl
zP72T}*8)qQ29i>@hGnqJDBtXa9V<SO^RK3(#<Fy5d9;i%%Pw**tG9!4GxHyuM4~&T
zjdoP72J0RxX+tN|XvQuE*$4{|eN?dM#eI_S+#TJcx8X~6?{A5=0LjZYTvXU`5MG*1
zdzco0)cHPXgg?|{Ru;+DBydeiv1Ruf*ccIohst8WZXn|ge#@e6|Ae?T^BhWMHFA<5
z1EOQ#4^G8VU@%q>Mc+c8syQD*TjS8WqJ;L`&e(MR1)!A|0HW;|_)~wUqqbHuguk56
zc1QgowC@opwY~r+zj8Qq#Q>bQbr&|vW$0eVvbZrlX|{O-H#=1q%^gOAtkN1)3N6%T
zd%UC@ZfJDyKf28`7gfwZ(Bj#T3^lvJcC%w4p}hd5R)zo@nNRUeA;=Q1Q;ALrk#F1y
zk)>?DI_gDbasD<&#~DBGSPgB<jzasrn%EY02E^-@V!LA%wET);I|>7M5@3gQ-=^Zt
zx+F9Rh@`fC*u5ww57H9LFg~8;nTC~OSRK=C=Nm))wd=%qodtEtsG!YX=7Yhd4BFD{
zPqGR_F}g9I^^)JB9a*{{ldUD=&5}?*lKDflcax+(%qKK;5Z$g<fRc}Kj2$tA9ExMl
zPS1I)%Q=>8y`7Dg2ikb+E(58|>b_0&%t>IaV62+Nv!LE_k+<DiLq#nUIJvbUaV-rY
zXE%qVBCjXNEc3~mTh`bn%;RL5WiUfG7h=ZOQ<bqXmCws3s-GvpdF(Ni-E86Y%+l#;
zjWVeDc?tf!a>Ul$<23Jj5UN(LqzOM0LHOwwSt2}-$}b9VY&?TzZ?n*$>o5pu*g+-j
zC1`xIkT*D<L>;1KAWGUxcRb7o4?i!KC68lSZnm$u!)E8FjJw~LN9E2k0v1cbComH1
z*RkE`f{iGcgAk=(#M_?t1&;APS?Uf13qF&ysuk!mIE3g0?V$m05}60)zO|Fsk!HR+
zhJov<Fx>q+?eJ&4!1oexqP-F;6pTZ>Hx25Kg^-@Tm!o9wY~r-x1j(*m0lmUZu;qag
zTSL#GX5~m!1m|Jk(6!ic&Wc?1j>Xm<XHir%jE{<BJ8q|CL}5Lcc(2+CAt{bv78-~t
zMru%><%vw&usCgP8CWJRBMmRh!L2KM?w=z6WO)F-f43V24YPUs_bFKVasvvAwqSB4
z>(^dW$m>`%Q{$sdqtlsbV|?Z>IWT=X-Vp6XP1#Aj`oR&M_6~roItnvq@^~RK3~yaK
ziydY5pyOaev$xw(mBBsI=EAy%dv*n<AvR2Ryn<d4q0q*+bE3P<w>CT)beCmQwY(pX
zmu5o$X$P=<OLs5`iAIUgnXGS;qwj)rG_q2o%ep|4644cRBp$}L&ZkuNE|bbe{kEC0
zrw9%2hvUayxhPPs0LKC5tjl;lc4V;bx#!Gduy8sE51JBLFx&TyE2r&+Ouy>>%I4hw
zC9HaT9(0espd$CKoJZ402r07z_vmDzbD$@9jLsvLiQZs!G7)7zF2eDBk?5p1n*M!w
z1(Z(XSyyZdw5>9NevVQ2SkCrCt=)_BJAyzmB^uB6WxCoUroDc*qp}@`Y`*Qz#1oo1
zkmR6)S__z8s=5cZ`~9Yp0T(%eU=AO6Y9aLNor~7HO`-NtADEFe4n>E2z@X3wf+s8C
z<BtedHl2y>n=YY|xIc6qw*_7Pt%Rl%=DeHXQ5dj(5{4gUn(z7JRF*gal78lZ)`Mfj
z>B3vyekQv+%6uXF`4tqMt+f#p3wVgFgy;ULtQWzAnhs)j|3WEkIBNpp!qK2BiYM}$
zVVEF~1@k>BR>8)!0?(iHdk%%&Sv-2hXb|-!H|p3W96p^tgXI-b=;afLx<)J)QG1DI
z_%n}5Z(E{S6M((fo?scy55%z-(?J~16440{PG;oGH+OnM!d*2q33@`GnEu#g{+w?!
z>kp#$97wv4<Z|;^?_<skP`ljVB{x%PvqdOz@jK4)lLPTtP5@dZ_+edY9{fxU!0OQx
z;i1(y+{wIg`M>9}S?eaRPW9wc{&|7qbP(}KJH+-XuZi87uf*wQ6gUMv<6@4U!3@jm
z<oty~Y+>2<<{&lP4wgY=dJfcGw?~=HWsr_xnWqZ{s4VIr!xu7t*AYALx7~;iLvled
z#?rd}dpFQG4}qSO_F<Iadw%M<3n;YRij5g7!1#zJG;X_2j1m@s^mP&m@Ar&!QOGd=
z7MtO39|EiI4ODMtHlP206rF2WjP2Kl2T6!R5|R+2&_k%X76u^+NeCecNeCe%)7G?6
z(?*iiXhXCyDw*zURim8{PY5Bz6SD752;p7tH^<?FRQEmCwbt)<o=1)o8@DfbG6kGh
zl6UyvO%^lm4EbA<_?-zE;7;=nUvdapRMZH!7L}px_yYWU;W)+|7>svLb|DUkEgQc2
zEUXSdj9qI=?uAHHZQToF9nxX?IwM@ZtPH+xBX;5l`k8bZ$Ilg)p+ngK(2iEZ^7ta^
zX=KyP=9xGnDV3Fs8%*80so><2#6+C(PX%@2bK-SqtO!QiaR!*!rG!cIP6!6;yFgk>
z4yFYMLzuc8j~iBF(uhdd-uVP5LPBvLqwkT*8;Ta41J58OjGeBBI#V+6gY`hHnVbzC
zGF|Ss>@Y}DFBdnR{>{`wI)pc8^Kkl+Jkago#&=|yVB)?aW_UK5c#6xX?k=uE)gK!O
zH|~MX4pz+O%{f+A`9rKfy99jKrm;=bwQQ=oz(wj$myWFZ2d!q0g6sE>fvEC5|9nP8
z9@RMEvZWnHh7hOy#7?jZ9LNr~NKyU$iMX;p4=sZzOPHk=KaEhp#`MAHl@rRwJBC2p
z>!qMPTfq;!lA+WuTYOF<8*95&W1YN&&uYxZrVbIRCrpQm-84HK+Xc0+reQ|TVNiT?
zXWo5>vBEP2Ttk`8T4QXu=FfasGiolTj0ggm-5P%DkR08K^INw$ip%E;OqHF*ZJv~Z
z!^I4cjt_?#OIy&Wa>v9PXEssC2BnQ*tZBXzYr4?Jz3M}tPTVS1xc20g+s2@2D*2}=
zqw-;QA=WL}$l51tfu@~Sf=m2*Y<VL`d!;to{8e+=blL&TdB+=dN5R+6rnu`_B>s7x
zh5^3|(8*^2F(C$V<CjYXleS`TQ3PSb%J<BwZaBDi_6CFJXF>GlvyeMC7RUZ_CpMZB
zn137!NKE>=gIt(UbOm2`ucjSz5)|Dyf@V=AIH5|3<v+%vaZMLUHrfE;Yl$=ccoyp*
z2dGcC0p#@g!}{d<BmOfH8a|9-$2A#x@hR|JkM_XU&0_3w07Om4tY_F7qF;?Sx@4b&
ze?xQdmTfisw{0TvT*vW`p7E&rUnZz-9pIJ?F}yuY%2l<$m}FusDxd5FlmB^vGBA{3
zT@H7Cv>fg)%LXU$Avi0WM}DI5Li5pd&`Ryc{nD$U>=|P4f^goqycm^#f<dyqL3kum
zf{Dj4?5K%llAB}TPYi<g-$3@)C<OH$=E78~BrN~f6>T5c@D|NUY@!oogi8M9`fV`~
z3@T`UMcT4Y+8B&<Co0Xr*05_rm0lGJz4oDElsgkSI0$baAhbxovoi(6OStF;qO}9*
zGbo3x8W02x$wiD_XK3Kt%2WChAGtY)PahBglCgt@efi-ieZHMs1jEJR_AKbuKY(1A
zZYX`yRcu^eA|Ca(3ibY53PG<Uv1!x+p>0?!?>l}3IzQ}SzM_R}g6UE$T=Itdd!=IS
z*JEHgYaQ5UnLvuiAuNlOkbl(+3#;jKJL{KVam^V2N>5?MX$k5xV$M0|f(e@hv%cq}
z)-vK$&%DN)-iC;Ew~yo>(!)_bZZc~>^plm3SHP_9^epyzDX#0(#GX4}Mpc9p*C>nU
zMnmcU<8}oG+$_X~;a6EsB=Pso&&7-x74&tS#ya+yId!_k=LS$NcF=q_%1!}|VcyU_
zurIW=NkH*mir~HYAkR4&!D{wL@p)G^QBG`@*ndkh#20i0W1k4@d20sR)>z@Y_HF34
zk2*?@jXdNw{T#N*vAwAP+y@V(tXK`NKAVhwR;%FtK{`jjYh^XH@!&k;1It->gU=mV
zf$Lsx#KFh=QJ-fzG#1){>*8~u>ZJoI53)g2dlaR4r#bpvVWJ;FSg?bh$L`PBWu}9&
zzC%D1GTb`1XEOTFn~LgQ?(lsQ@mR!5peB)6Jx^6UUL6J1byx7cxi^@c&18w&WoYnZ
zISe~&OCI55A;d=yy`Lmgr#T!aS$ZJ2tOl1y1)xprNQ-Mqal^zZTy^Cx+jJ=l8?J8#
zKb=bOxp<DU{#RJe3Kcdj`6cK(72xEz#7!K33R|6y;z(a5#*^E@b+EVabp1ID)eVCt
zPui2Y?gWiPLm;al0G+zz@EUah_nY^LOHX|fPnXd%;nE2@sv3C5m{PX7c@H}J7NW;<
zO;n6NC>(pYfjsUWV7|!{m*l9?Vpb&H`4x?mI*mb}s90>;Ia|=pDWRV5a`v?D9Jb~+
zGtqV}>y#V?d^q4tdH5Rf+;V#FQfAn7Xf&4{y2tF~=b`CQiP-%_2DeyK28ng2p#8Hw
z`J`e%{_itkkGTz6Ji5#^+%mcG0TZtGECnE50@9XY;4ywX%L$8QCT>xzbbJNm52Bs=
z{V;$57FgXv+?7tnD2*zoyAJIneaqoT-6&N2=Gb~>Hr-P<vbxS@yajfFSD_WO&i=rR
zxA)`5L)uxBc)EDb69qO->jYy1$n&7}hHbLyg({sSW*~G2cdKK-?w!Pfl{Cl9OXV}q
zZo!A$9LT$Ulv(8%VcQB5nC_m4pKH$1T@o=dXg|27@1*QP8Z7@!Gr8@3zzhAL?m;v<
z70kyJze`Z(e@pD67zpyy2C!{<7<Syx0qG212$|)A%2QG{=JjQK@uEAL>>dlv<5Sr6
znUt3w{DwIn*bI{V3%pLVN$7nz78L%QFvFfY1(AI)$$bW99pA{<^=a62s1t9hhyzc7
z_NuR?yyZtRI)o2~>Wtl}m{0ra{g-)*kv_!#OWBUNv0!Dd!78VXhSuyrp|;mBtZXh|
zf2IXdS93Ach5Rcxbm`4Ao5Qd^WDVFXS`9B6SKtr5GPHVB2JhdNah#7_Z?`e*t;mPQ
zBjwz=L&k@=dSC;N0^MQ6A@UUCCf6SLYHJzgscvxXD?4EDmQXZt%K`ZiXYMfJ3A=SC
z8synBm}p3m71jejeKf?i)C(B3L5&(4yMQ7iRP42c7+kT3QL594wU`QEp56s44xME#
zt31iKXTTN12ebDHE8u^sx!Cl|jXS^I$sOM0ibZRu6xZiu@*#`!@sGtN>gjib#M_C`
zV)QS)8!Wj?U00}U3Wkh|aK7qE6*~8G;&KJuUl;!3A72DO$+1|(i^t$R?Oo*U4+W3F
zsjz%5@uas8N3DO7A$TffN6WV`UB~In)Pk}RF6Vjq5lyV<zZ2cJrnB&87Xkm8<D04F
zcthb$?9mY{<VhhWok>9l`D$iXk%4xUPiY%u1|mh2ARFfb_0MA16KMd}_L+%tUO`@<
zkIcs|4BAZ2;<41h_$RI$s>e!kX7@OBPU}xDnJ>KV$}t@Nr86qJU4R^gHW=QHz#M5;
z*tazWwQV%8Zk`D=DVDPNo^oti=m?_h0{~)8Y@WOU9Zt)*a!*fYVRKY4S#t_jz((9q
zT0y?b^diFsay}I2f%C9*xP5dn`W{fR+DpU8)6o^Jj%tC%uq|M|>n`_ctK}JgFG6ep
z&3t!^MtO5@cq=b~sMx(IIx!Fus{{DgSCrAY?$7@BY#H`Eqr!~!r@{Oc%?2XkaP&ww
z9IYFUwNpZ1xJnzf&zV7u-8g76l)wwibbxa)tnlVAxIAw^=5@*iQ=%qq`85ykYt29h
z>kw}91+l+(81A}L32SE;!|?5h%IYM(ZM}k60ppnDtCKjZ@jCC?vIaY5shQi`3B+5C
z0FMxa^XY}?bh`{?{c>1_{xjb1<OaNxxC}Qx+=164h_UqcCU?CuRQz^R2~@=T;P36u
z<R>VBgxi-P{ZB0FR(E0<k@ReLt7YBJY{t^iOq94*75B9%0I7SWSgPT`H}%N^P3t3A
zYidB8@r{u4>Kry3_k$@b62R)=7SQ&3$vuL)a+mmQ7CdDD<OdyrajPxSa)u?8Ie6iJ
zS4QHd=`yI>`aeNP_d^Az?%z%gaQOEid|Vd=PS*XPeyKScbWVX2TFH3lg9$nmoZ(t&
zRp?Q)m^JhxzGR9iI7J-gJ|i8$J6a#TzfhhvbO>cv^YDaw6*9MjXu6zuI-ciwj*!Xf
zd&aR*FB73lbO|=)%!S^QN<ni(IlKr9!&f81F>&}>*mX9Ix+VFj>{}t2|L6{FPTMf(
zzv1Zm;xctjoY;gxRrv1vMBMl&6wmxELesb@e2RP-?jK-^eZ<qy#OM-Q>~`SE|4Lx5
z=SEbYcu{Qn>kJHAxsv+)X9WZ90iv$O-2AdRh$4!`3bR98^}U$cZYl#ohdw*y#2@^<
z5D!lZLi0LL@(PwR#ip6eL6L`vrrq(f{UF@(CILGVhk?VuRg8-6m{r@4-0lWQTU4kY
z*co)SY=Qq;O75#l@rb=^u+C&24_#7<7G`GLYVmxQ*A@nq;elA_n9V#)`e0?E73@4r
zIlw&!SV7J(j0~TJT9%g}akwe>GrPqNXQ;8~v1pXUB;lsUE?85dL3ycg*1cCBY&&BL
z&L2~_|D$T`WSEZmhf;9h%w3r8Gm<{9hEm^p26G>i!qZ%A@y10<bf>+9#g!-AWxEx4
z54j^4e0P9mr(RGP?hnSpq69n75vb}ugyt8om_%=oaD~o%l1-Lk`H9V36_rgMKpU*N
zm&`gch}FF$1YM8R2#cdPpnP5`Sm=Hb&paE5x+Bl<(8wwbF6j-fjj`xaoB#zA4x+qi
z6fc}J96vej!<V!xv4}RIz1IRRFA0NNk;UNtF%uFur*Ma5Pk5omecDOxg??7K*tB*8
z)-0fI()kn+?jdE1teCUr5iW$Eftn$2n5pqkHgN9_oS_wmp<#h28?}yG^f6%aI|GTG
z)02D8N*DZ+J>UhmMVJ41p?TwDE-|X6_d-|vGowEqSZhOFt{_zBKjZ2!SCGnez-z>C
zNL(?88~Ll?TmDf{{W<~%{#*o)9_!%#_hZm}Es_P=27|_BJ@O2gvAqN0u{|sWEGW<<
z`>n~ER(|DDnNP7}m<&uPA1GzJgi+p8&?Z|KtSp!Ent$d2_UR2vmxp5e6nzx6j8i!u
z_u?-S6cEwdn+&0=Fn#_O%)6U}jlI;oV-K-SLO-$X+q&bj2ZiWP4sws$rQqtL35NbV
zLHD8{c>Q+_Zkd;&>-TkH(fb|Nl{*Dg{+dU5u7lt=DTx1f#RI(;G&7~_iC}xT5X`>_
z5IH8BDZYn^9da$P=|E>l4(<<LZ|-w>7Z<+iYAU!`r9$xPaD20cx~#_JEHaKqlX+_1
zF6+XzdXz&>_;Sd&7=>%b+o1Yk2u}Z-i(ma|pWPY++C8IKuAUtZN~T`-BU23ML%U5o
zx|dS#YtzoHcx&Yen57#JSsHd=7C}C0B#uP5FARw+M2Cu1JZI%;T$CFQwZF~5puPf}
zU97>jfcE|AvDhN%&fX+nM*A!E-29anB${mIt{d~=;IQ>5-@S;do6LEUNibM_(Be7$
z3wfl)8FGc)X8(Ivh@elu$ES8PFbsyK6@F}+br9A$|I1(frv9qab*@dF`L#c@pw*@e
z#BX%M!p2OtYvpMe5EYNgWy4tAm{{_QrtwOR+3<&l5I^jLIAcp3_c9s<kuwj#u;ge=
zHX!%(G8>e8ycWz}3B)}f4{g8ZVB1tpc==X_S@UF&K5`&Nw_4-N@A0VB*@Vq~!_i~}
zu?B_9Fy)FIuA?TJ&pro@+9~X)0nJsL77`=yIRCh|KQW44u<#V(29z#`Becg${J57J
zFaE~AmeG6s_6+cOl8Lht`s3ceW;n5cICYKPgtM6w=<hPn`%Vuwr+<Icw+Mrd?o%M(
z9_5wN6Tpug5Ry!WlJ)B1j>!jM)2k9_Ia`5?6qHr-%?JC#)6g|gU#tps<qz*%LesuF
zY}T8}IJcq%qyBb6iQaVneOLyX{%L1+8P15OI-$K^4VyJ}8(#L=h68kEm{{$}t+Mjq
zxWpb0*mF>IWZ;9mczoe>8UyXqz@+*F_jvdpZ*3qJ%|H`WE_S2ywt#^ph_1Wrnfd{p
zb32#et==)Pd_fSjo@k`ZGYSst`!iu^0F0bsfKQK0P~xb8?(?hQyX_f#G+`2sPVGa?
zsU+)@r9;uQb^{+5MjU|Ymb6p7$#GaF6EWnj`yIfC^6Rp=7-&`W<@H->XAxLIKHAw}
zk)H{M(|+>cia=20Pk@e@pZE&P6}WKLXcR?9iTC%fM1^D($~#qyHRP4><BK<XD-MFW
zggzqT=b)s$ui)38oFp|9`IfK%H1SxErrT^;(~|vy{LDL6`*%M^_|8IA(sZyX8Vbfg
z{xHK$hEOi7#NZr2(a>xr`f6wGZbg}aPhW~%R5J9{+{5&oMnR?X8D4*XKNw9Vj@Y9N
z?mhny6FtdNCCp0%{~qKWrp#01yKCIp@eS>!$%lAeg%10O>-*&+vr9`MAN(=I?~Sy_
zcL#^an?gm8sTizr5b;1dH=VQx3^GUanrDx>ZCo6$v(^IN%K`YgB@KNN77?@KiTL3y
z>NB2tC-xY1mhU~AN!g~QXs&w@8=3-`fv7Jt=@N|wcAU?xsD#(<)#&^Yh=Xt%hTqBs
ziNg-!v75k!Tjr>qm<a`~Dd=l4gV!yTFuy}{p{|HLcEiYlkr>MwH(7)3)plk)hn#TM
zq0nS;iyuCoi#uIQG4VTs<c~iz%S)lq!3)}nBjq<c1M=VGVB?ObT=%*k`%|Kc=ieO%
zqj=i=x$J;BZ^%QXI0l7&T~Qto3r)M1h_fCY<(6*CA;w@g29_$YIw2kF{xpk4SE{Xy
zPX%$W=s;{8-H&_B0oY1;c(0K%W<2>lS0&Acn%@suN>2oZ?FLqNPzuv;lmGijGOL@A
z4pPr>CR+1?X;_|MuEyJiwxTGguiOS|%?xpbegRtia{`Z#jY8>x6Zq(7cgkw#Fj1sL
zrLOzTY6{nb{P9v~a@_}ytqQT}`Un1ZQ51UQ+F<mINNk>;$sQff1`B&S>rYg}@73g%
ze(Ve44d+nz=pPpUDF7W3Tf}O&d)8T_#k}c|G57l63l6W;pqTWV6<&KzUG{9K*)$&#
z$-k(3y&Gg3<)G{FgW~r_Ke+s&DR}N!fRV?u!S9S1jHeKTGNy!y{__^+W^X~w-n(%4
znL<$Qi~$^2jMKk`qv@`bm=$sb9i{Yd{C>l0q8gZD`fM(nJDJs|WN?oG*(~ePUB=55
zSo!!S>zGRS+)MSvBJJJ9trM!)9o<q?l&^%$-)R_~DaBKJFW_vNb+o!TaM6!pFh1!x
zM$8|Js`)!XWaNXl&g9^(9_P?E^%zLX?+DFOJ5Voa6}ReaW73Zct=(PDGCe~RaF6z<
ztmXqjX7+$9buP0}6_mF!7P!nT6xK$agKM^H(bQ6h*R>K0YQk2qqFtlq+<lO*9fL1z
z=sjNYl=m&ognZ)wY#!nSA@=5|`FT2OPe|l@e^D3t^KsU(H-;sqYQXkM#n^G1`my78
zg7?)2Lcbod`20aCx{E?7tFQ_4DdQ^jjuvV>Xy;rXD(EgvX2}sbFiS<d$z{_Z)ISE2
zr|yD=6~3_9p0e0~(u?IPJ8)^;0yg(QaPyJ|){`zAeXVE~c_IT}T_k^P+Z0}Ojy^NL
z8v%**n75G|Yd5syuJh#d%-v!IR^@1tUBgPpUVy@=8eTJNF1KjS1JAH+SpU8Pj7^G|
z?x01m_D~UI!6?eCo6zoNr8qHj6(l%yBUi2|=z4q+TMXF)<z8pdf%=WD+a6Mu>VnWA
zKF04Xt%gE4gl4{!jrr;Xhdi?ImsKUo%(Af|v6<h`E`+Dy=kZiX2r7DX7Tau^3C%yF
z7=ZwAq|O0!u%|9aM?W-Bu3|Am)hLQT#Gala)>qRi9G=}7b5tIHJr+XP6f^Q14&jX(
zPchf{JK`_?71-CP7lvaQ*i$$1<I!qxKKg^)AjiR>VGtPa2?L)c)UBG`#`QD{VZz@q
zREoR8ajnzXU}Xpy5Wwy4O3>|p`JnYh#j;96xlzM%jC>aaj)$*6E4j<;PdbBBJatRo
zou+qtPZl4k2a%^+`F2SrM0H7rg-I%`)6+xc*m>|aT7s6T1JHb_0$PKAiydFe@t?o~
zG#n6)!!P$k=)vI6!C<WY*}&H<6p-B)P?2x|v*yT%b>Yr@$}~Z;z=!fvH<>c&xwyAp
ze>}M}7nMC1!=}*z=(jkRuYXUov=SdUb9X+f)~`p6v@(|E9n2dI%V1rp8YkzT#f*2?
znZfx$HdZ|nn~okKx93>!upUg#so`L3+F$5c9l%@ueZx9VrGswgH{!}wet_GOxh^?`
zqzhfy9Y@N?CH#+jcf4hy_w==;>|r9C&zJv=PogtZHETar3NF*hqxioX_8@68T7AF9
zGt)<6;dp<jd0og&PkmxbLg*Z1S0>zd$$;TotH6E8P{=uc6zqzRK$jllQ9Y-!*l6bw
zXlcC4Z!h17W*-7jwk{0no+NTR;>vW4lya?kYPcT}1_p;O;&@pGv`?PTM5d+8KRp5Q
z^jxqgi4-jUv~yj`(>9%o60htWf$rO?*nvGtIN!$yg9CM-&Le?EEyx7ZM05DKG8_)l
ztmkuRKgub*5M=5+5Y0R*1h-3|J$*Q_)&wpI+$lWRXos!~Y`97M1J<~BA9lVLiDsts
zdwFFuw4@cV#cj@LQJ@kEYc9iBDY4dG_du)uUAb2QaSPfrnPg6Q@hFVN{<ZV4HlrJ4
z`qTaYUstirUyu20iH8aP<IuxoAF3+~VCFbtck~+tS4SN~Y3q4bV{(GkE2TWbwI5}C
zQt_>CKB^^~*rK3FI6O5HYr4h3_`_)+A*Y&FS}ad!Rf6ulD)FA6kr-y3i)x{cZA;xk
z8P7GaYl#f@#U*0@!^D&scM!Y&PC(U@6I{zGk+omy0!{yXW7jgG$kmVuV|3-{VU|qq
zo5f&Qc7$*L5JUNkK=gY3jT=$^$u+oI@Mx%j?C^Q$zGV@}Ie9?ljlh;(XSq6}fY;P%
z^B${0asHzVm^@qqJk2kHI>Uh*%rHiyWsL6Rsp49nqkQ<@B+Po~4;^s}(V$Bg{CGYF
z7QPF?Q)M(q=w1fuQeyo6-UI#u@frT#muVdU4rV4`yksG?ZQTgo<#cZL+zl4nJ_!^%
zU`ngLAbT{38Qr1`O518CYSw4Q%KgGCx?9B0D#RUePMBccjqc1=&`yAP(^CQD$Jqg{
zOV#lCXDQsxE=9HHHkKG?1s0KinCVIj(0Y=_97^<f@5Nytaz4b2I$c5Cv6Im+!Ve@@
zE38Gc)vEKCOF^qR3bZ@}Sqr(a?;1E`%cq?ndtJc2wFPEunjy5FOJsgNv8X-E4o%xF
z*u;;A@j(AXH1KTVqqfN)Hpm;5-rZpL|B|rpx*llIe?D*Zq51vA&cFtwq57_pS-d62
zg*`dvW;$`3s!zOm5oKP7ika8FBKF7p3TV7r09xC3^5At2kdUSVCAo{S3+>aMIzhA1
z|9FN`Hkb#7L;PFnTl}<zSMT%i_<|KE((K1<X74};qXxnG*mxMMvlU&5y}a^IDprOE
zvF05^Kw2}6x9@w%dk-O}PA*{EkQp#Wp+>7d5vZJhmU(K2LtXGGw2W1v^olcxArcb?
zCxK|gK4uyc##9Gr{(Sy4j(B$*qx?A3w+h&1JrlgWop`IH6W>Gk`u|*$P;I2Ne!B1+
zoVO?e<q?5pr3Zo2H_BuZYcIm`BKl}u;gIGAkxs#wXwc3jM}yEZECidb?14tTSoY3k
z67GGK1uy;=h$l~EqNRQVdw*yhc0QR4GpWR?wPG9S(R=3ahH9LptA>w$Juojd2hN<o
z2)TnUqsHYksN7i$J#Xlsd&Ujk<lZK3sA^%mQZAx{r5I%YWN^!(>%1X84_=TDqVnlt
zFnJOL4sR2|qcE9ij0~nbY7ACeU&g(Drqs)df*WiZN@op0ljb#0`=J~@O(DnjN%EMw
z_u|vEa<FX~ab;|X#WL*=|2gR}I(MdC^3U<$D$0e+yZaHNd?CoFH&mClh_yVqOT5Yw
z44#t???0X7?W@dKO-nJ(dp#I4h;7-!#T)IW6u{0^RT!9@02gPTM4Pqe;LM}F=qNh@
zkuPsE<(+tVKkhDXBxi(4*Xh_Q+XPl;_JUU5G|CBlV|5W$VExJgWscAJUTHQ|N{iU{
zIU}$xbT`+X6hoYy&|<F;23AwnvEti@@csN@D19whSMJ|V&PxsQg(QJt`yTrG#|pmP
z2J-e)hW7PGQMV=;od4Wl_n&7&ZRHu5<rj$)X9hyBk~l}*-Jo_<ci3&2gC=wSV_sV{
zq2qEiSe)z!6*uL0Xxj>Gy%K`9gBD`roUJHpk~6usocDGJ1NF>zVxQy%VAJU+C`*5F
znWq^<{>ehEB3;l^#iHiU3t)bYvbC;j+55e5U~wW2hC8O=)ABf|i3<Uje{^AICmT$%
zAqPy`V6NO7kCW3+pt@TMYwa`@{#xy!&ch=v4S&abtUHI&{z_}r#D1*&!EjWU3DzB>
zLzsHlX|Z`1H>^u<63gw6fpPUGW*8LDj2CZVu7_5L8_$Sfpv^fH*}i2PYX+hBKa@Wh
zxDJ|6MR4(=f%tWjKMq<N1{S?+pmp_YmZMwE)zrxgzgR}k>~37Oy8vXPcd@IB2cT6`
zEVljG2QODdqt8-Lu$|~foxBXN|DBD6R~4YO<vmN(a6r-F3izyZ9P1a*eNwfLKizc*
z)hl>$4K3yu4jX{4RtJOeJbyN3nmI~#MTw;u$rzt5f%?S;bjIt&lSL6M`E&pzNr-b{
zal07yj)1if$c?q{Gds3qA3B^J0(INAfMnG!LHp21Jf%~H&ic2RMr0fxbti-P&_9Te
zwj8AYQ;WZj^+nrVxv<v%3b^fEh`s~nz@@Y(wD39rHn)bL$3``AtF}N(hCaXKaRlS_
zzi@O*=Dz)AfU#jE`2Sds9o<gD44+x(aH<%>4^%_@KDx7o|6&(I$6@?~3rtxtny)H2
zhJWr2L+_l|f~@-%>YA$Y+RkP8@1%1WnU@2>y__NWcs!FdUScs>G*jJoiDhdH!?soA
zDUl?A!gQ#h3N3{3zpfBN`T-vSSvanJFlPCv0397b%12m>;y3ZJk%eHgPln1VxuE*7
zjM;ZYK&d}<0ldV}H6R`jx*B2AoqwS2x|~}Kxxid2e+pjA3{nkukRR6%K2~y=vwj0U
zxP%zBKNmcDPJt=blq2g;x#a$T*e06@X!SK@Lr>9OZaFa+G@W?w>*P>+`;Il8bOE|!
zz}ef>aZeu%rZaYO=M!&ua!qGY_b?HZ`YYKAC2=quj*E?>l9)>{^*|4J5}WM{^ZQ0K
zzq$9g3+2olw8CJxZ3MQQyvd(kvqwDE1CI?xbkaD^?MGLlzqSlQm6T=EJ`9_Vkslf8
zjASM!-@!NT(J_rB(=N54#UJi07GQzB9cuo6=5+H<UNie0E3Z(Yy1dr<c$<J0^fR3@
zG=%yudQjQtI?r;KaU-W47~0tu{Rd>jD&1Ik+v_yQMdzV<Od>HX_Au4FKzMN>1)R62
zV8KJg&y`ouZ(J7Z@XKcnrj#!{Rt(8aK}>d(9DZeqP%Ab8XX|thZjosJ<0-c<m<#Hs
zo#ErElOVl)feGdN@H$3;>`nqVt+T_949+C$XYgz5Oi|X((bw=0^B(-3+drm^K<*6G
z?o3%{^>(q1*>$cfcLNREe&F?M93)>sKJ{KA?w+?1k6Nz5)^1;T3%yGsOBjeU^2EBm
zT|s&29M9@~nEdg?<@F4K28w~}CiZ8R-jrMWJQ_rr{t)$rymR`^T<fe7dK%<o;kan%
zy+DPd$2g<&@wW_jXhG8t0aUxrqhJ3Ll4aAqTVEd??`%bFyCN+7l!Z4PgV4n}A7r~E
z+;MyYuouJz$a*cv|5J)h&KiNJTfO!8*=N9T<Zw`!EfV^@Ge)%=b$=$D;hk9)NFx3+
zQN}wKZ&=E5_Ligl#U*g&{uR7UJ&2n1InYojXWtUmqCBIWxqXj?MOtT}N&i2wD6-Pp
z!pI*3qFu0g?HwL5Mvjq3tl>YML8#HH0WE#>z;$7=xQ8q4v$h0a{-YSEC5D7zz)aLN
zo(|XiQc+Dz5M6I`h+jgx(RdTO*N<n3kxAfc(MR0cZxrS|EXFMN&9G&|amsI|;JZsz
z*z4^EwCfUqy6=7nk}NktF1XPSXfi4n>VazIKhX3Qcu}t~Y@gW;?3ahbkma`6rdNWN
zXAgjb!EvV1uYik!Okwa-3w$+&SWd(Tu-qDnwO!{!d<5OwLPW47iu|E3?U}~16z0CZ
zFL>qNWjDepuW`hiY3bxpcX<<vLSGAurn#fHzb-m-KT7XOIX2K|revQ8(oW=pWsfAd
zVG)iO%D1Aq(?kB9cC`m4$HUx-!8E(+Dkx7pV+{{eSaRR}@T=c6^m<l+($1@eX&w~m
z8M^_yI!?uw)@U%<<B00iD}r^%F|6@91qOEByy4Dl%n0^{D<{Wb>$9g~#qcPW_3ATQ
zd7tvX8v=L}?<H;>;0U1E9qQcvfwnbDZtOh>94A!3l5j`VSeuAFwGYyHY6VN&G6&^9
zyqW1-2e2JCfmyxk520B(;I!ie^i9*k_Y(&~=r<K{3(tba&u6Ur7wR<L4#82IBQbJG
z0ISoL@SWXd=p`S5O>YlTcD9x^pKjn!@1MmxePgi6dw|%rOE+?~NTKp%H?ZxWhK8QM
zcz#k4)|y9v?Dl)+(S0ILk3C6U#q+4zC8C}{PujWaVa|n0X8)VI)=Up>-A)0c<K+BU
z8U<4*XRHho!3*~x_);eWC1clt<b^ZbEKR{pTA}#apR(3273gSR3UZBSf@0+~)^vLc
z7^}x}2jdmc*CGph52Sut-*T2?=L9KP94$`o;SGlVOn07$y)O=i>XkdlDen%2<BCAj
z@*I|XP{h_`wb1<Z0k2dhgK3}j%-~HOYYMDnFE$iHpJ6mFb-B!&Qs+Wj=S1$dRt55>
z9b$=UHK;}n;2!T5gQfN{*z{Nh!wE-SQ?nH6p8N9hPMP2^F9iLkmBFLv<M=Z!9U>N8
zK+)ovf}R0@{+x?2Vf`#Twy^}YYouJEZ_Qi_w8RSIy|mv~z`{R%cr`Zzug4}sQ_y~<
zyFU}!;v;$d>Hm1k+kE!1YX(j;15~D-pq*v~%#it@C+$QU`fr5mJONsJ%9!)6ODs}!
zj=L$zb)8wxyl=#D@z%L$Y@$Hh7Ymq6iX(U>dV&0?FN?AZhPw63;Q5KOX!^_uUfj+B
z<1bVA&xz!CG&d&4-6lv6MZCK#0wu8p*7DlTFnw7U%z8T=jW23IyUqoi9+M26YIdWA
z<|V=7+AB7Yn97oB4+zd51~p3tqRFZpF4+_a9*fR`dNCF&o~y;)o8L3bbv1na%-tC2
z;0DfSg&3KZ1Wg)G*+lmgR9x!}8Ba>lW7d0a*ry1XnT+OcTgiXxP0ak|EO_WCm}nY6
zc?Calz4H&A-eVDp##z9M`qh*fOdx;cSl(XW1C-;N#CGaS&=@5n9!eS{SI<JdF_WNr
zW(w5r&O_G?zxl{Pr8p^UGIsQjhVeO<Vb)CQl5{zM$?}_AeeYf|USAHz+OEQ%ZAb9@
z&OmISo}M(;8~h6PLS4Ti?o()o=Ao9<({zPZ&*E@WCP(XGiI@-=jaGzJm3W=9Zp%0f
z`XbtkFE>TW&9l~lRVQGYy*|qB>A<M0AoM<8MIDF`-0<!)s@GJ|b0=eJX{mMlgl9}#
zeFc`UAb$eAuMcoJvA&+OF^3mp{Gv)$yQvQ-FYn<_p{GHqY0N*YJA_$Lfp|%uI>NpG
z$JJHYaDD0&RMejo-a1_dgK8DDMeD)!^F@%cuz|m?jl&JGSMWwOb=Qxifd1GJXi83C
z2B}U=Vo}4wADd(F+j!R2H<yp8I*hJ!5_sF|5V*dr0A>eUqs!al=w}y*N&gPPoN2iz
zEuL3gyXH49Ok4r)KW~7y=BLocB#vo~jpDml6u5h6!lXx0*uFHK!O+d{AifgoC>PxH
zBbfNfL(!$%8~*#rN^I<&$UHaqA$Oxa1V7>2-8P%K&5lIN#yhN6;%Iz2W(|tZ2<Uf_
z93=xzbIWy`&`M)E+Ll^_cSN(Gp|PEI0JmA2_X2pdBn?XEA^hsG8&fAqQR8DJI<D!8
zyEb*j=3Za9V*F%QH`|2Cy>+>c%K{9(G#9QX#F9UAF39wZ!7e=#*Y-&V`JywZ^`!`O
z4d3yG9jn;MA%1AybuMIm*W}8D&xOqAx@dSn3nDVsBNVLym{r6SBeme#*)W{gH5M#a
zYC>$92(?a5W3~pNTyF$Gi|;UK9dkygF$~7=(Vp0+Vk5?Grsw41!{VVEH>0(ZV*sSl
zjxY+#Yt?A_`52GQya>)Wp7D&S2f=ICe|*Ik;*OayaN&QsdqpPb1=5T%hV!zvK3Lb~
zEMN8V1m$vHiAC16;`hWy^A7n33~V%+^NmBiOn)~f)>lKu8rqfFeqftsR^i+I!RTu5
z4*hiK4l!rH*mu`nZmRc{6-92qw!^tx5_6Tc^$G`LU44+wc4HFtR;HT&n7vVlVypKS
zNW72%<=Zl#_VgKeHm@H_a{tE~F4Hrak8o>u7-rQp@z0r6*!giLW?a(($6=|Idx^)y
ztC1MruL=!Xd!ov8I^0@!1u9);p|Nu%G55>G2CEFvbmbY`kWKxo5w57%Fq!$t$j!ZV
zAv(@1M!DNOF7Nt<sSet}=F)ZOnpnWMpU(v2JR@<yz$9#PtfgMaeIDO^D0n9dLS7hx
zsP72Xp6zso`p=my&(20sr+(rszYd^ySuq;*%0q4Ah3MChdJ1D^fZFZ3wZ_^LJa`Uq
zA7)3xo+*Hr3g~>lX&8B~<KXRTH8fL4$8uQ-cr8mpf$~uLmoH;e>Q9!JumN?=w3vZm
zSMGdqCbR4vgn?eviFm)1|35!8Yf=*5d3!X<I=3;Y&OE{7^)G%-(;PcW^x;28U7XUl
z5-esu6u)l~xIwQqn0Pl7y!Sp3hB_WX??Jg({Iwi?JrBZ$F2nJ?!U2=*=^Pe#k$JT#
zpe|!H?`Jj~+b#xjouPCll7uqV7%LnbPWiN(2e2w#fi?By0_{q3;29Ie$_dAK)6I?4
z!7T*mlS$mS^)fV42dpJ=0h3N%PaKc{)-Z^2u)hY1%kLhe-}yl(?>QPS9Xd_j)Z>u5
z_&7d3os4xi$ajz&!z)J?^0jLY<NGIbp}t2c*DvKTKGz##={~We|9rZqb`|5*CU(m@
z2fZs7a{0XiSd?81&i1|F=bEz+7@h=vt<(#$Hb&nTn!%>#@F(@9C_O%cm(D8z=bqvG
z$2u7~vgTpp;{?q5(aNOJz0fZ*h<q94aNREm<mVgdY^et|Cr&W)d*u2#f#7JYjXzFq
z$4Wyw-(Jt>7#)rtv!C<wWu@RspX;T~G2oiogWX<0&L2NDPhPPM>lY=0-0ME8Rcxf}
zwo;|+_{Ok?I%QE>STCbFf+&u=E*=4vQyy?x?0iuFufMSQW*K7UMsWFggIis_$gAZE
zXz+3{7$30}L=|3wqqzYF@4UrkO^ZQ`D&qDo??j#v&MbZeu|j(RrE8Y*guG024jaT}
zk@S6Cc?#N6zjD#ULTt@>$96?WqT_S=H+0P5R`exw_m+Wo_*lw&c!EXtMrIfm3`<v1
zui|wf_?13lKMxq=g7c&BaS3%d%Mwv`+y^xE1-Q?qqjlvW)cqvocfxbgaPkix9Q~K8
z4x7-7wuQ^55ohS@eqv9hVtvvSQ0k7w)<{GHv-v1eM@(_<c)|8)A4QjKEnGTvLb3f-
z6ExQGW2WQhqQo+%_*Hl&SpG>ur>F#;HDU?5HwHmg*HB2fT818f3t0j`3vUNp1XFnh
zSgd*?G>u#W$^B2@mr%+xUMdFfpd>-*^qs|LT;R6Lhtaz*om=)VLi?jnxcklRl#Tp6
z)qDP9A@Oek8x^q>*YaYJ#O84gYd<hOAAwc_Vn8;<lZ);g5yGDJLaPgRna^l9FpV6C
zzC-ExKDiXqcp<7~-hxNdJp4R*EBf4!fucf(=adVu<eCS%Xw%H<u?{2#llSCEHvF$?
z8%{ib5^O`(<COG#$m&rA_BYRg`BpX4dwYbpUyb74u1Vs;gN3|4a1|y`*8}-UG0m-B
zGHHCApthL8qg<mvPf7jLJxr{0_#oV@vBS?dJE808c|yGD7?3WrWm3mrn)8%G)Pw*$
zIDlME`R=g8BNqMR${?fI7Cy{ei#dflU|(Fqc2RC)-$S~qTY8C2=EV@-G*J~5m5sX|
z4Z*|#EBK2=g{ZY_9Wyzc0rz((C?`s+D8m>wNJ_oLuTNO(2LS&?1MrXj5fE8-rWxoX
zCM8zDCX*PLsy6{23|fpZES@*72**Z?r)=XYnln$Me5%g^2rW5EXVVH)CJ*QG=OOHa
zRT4Iz9{?7|{$*BDfz|5_grCObEAN^IO4AkacuOC=7UzIce8L=J3!!h%P>?Ji0ZG)q
zHojg4d8hKR`CoEhEYJky_feqtM;G5WN3!<S#jNytAQnWArd|Xq{-@3mA3dPYx$EZy
zN#+ybh~aj0|Fr?PXJ><6Y9w=-FcysaZWMGSM<M4igUTsy`Og=pz{K+z52xRaYm-t@
z_TU0HSzF2fkQdI@X)Cuq>Ogl;>f6!&*+i4xH`5WuQ`TEOVYE<xzldl4&kQ@(o<Jwf
z8us39DEKZnr2FzZtm8YFbN>YF+J$-_&V$)Z|6C05szSd@H~6xPTd>)DF1*S(fzl2K
zvE(>8)vt#@vh*MFwB>`bO`72Bp@E5q=^oQngL3QT+$%&0@z%9G`E?33{Y(%t0#5U;
z%gN6?QH#2YE<%w}AQFBHD@>>lc1jy0(hB~ziZU*aRou$3g*nZrg4)6~?$KPzC%tjP
ziE8S`_Mxt~#Y0t{<0NuL<bpg}!kdd%gMs-Rc(p(Yb#rCpL|@JOhV(_*$Y^-Gs|cK~
z1~K#Z2jR}qY;3%~2+Gqlkq&uW$A)^TPHRBqcTe2R!|~~!+32vjmDz&_#y!7;EmJj7
zqMj+XEnLAY#4EtmHHiK3h(gnU)i_ZogTC!2!TWSyfNi=M-;MHj)&6*2ClwV(BbmtX
zmzCt+D5jfT#T)h1*zM+JoRt&=Z*Tgdk!(5!Z{LqUpA<sc#MAit?|FQjLHzgfYkbhZ
z)Y}cPhetvjdMyh9|7s(0DfobEQ3Z(hezaE3SMZGAhS0EWBd;wV2X&NJy&NZC^2h^F
z<9G-RI%l&*4+fz}=c7>7k%j88El~TnJ0zb>WuG2y#oCJrSYs9rzH>J7?L8gQ>t+nT
z8`m43txmvQ8!v!q@CYz^Oj%Z9^V;ZzP;RXNyU}|tezh62ZyOGhTiM{7yoc$g-4^TW
zpBC!EV?pRsfeEzZuE|-<Ugpz%?8SV1`Xme82b@Gj=P1^?<)v7>>NFZZJ%dK$$79of
zapL+~C8*;lbGmCNn9sUGjOa@!Etv!Er-p&)gwxpjPY6uxQwXnOR$yKq4wW~O;iEJ4
zG`HSjUKLer&yEXdkQT`xFBaRcuI5kGC6GtIPcA+4iS;6e4xtnhJ>N6+?Ac5{u0LOX
zyaXE0r!hUho=}z;fzHLJ$n8RWGks4kxiJ)Ko2Y|Q>B!6j=fj&5Mda(r<r)jV@qZby
zCSITCPp6c_&f+4p{k9v@|Bl4w!po>_PrUuLgCWB(o_biWg01dJ>|IZPHvdnY<&}+E
z^ciTgZX~?YNrSpW@xqa-%TPY0l{<TDbBl$udH-GEm=o;+ZG~GQ=c|%DAA4Y|R}4rD
z=fczVDR4_2hu!>wP&BzrNc^<~D98ln>ID3BU_OQ$20*h8?T76b!}6s`XcK&aH=Hm8
zdD3y7(n6l)+AF*vr5~>!9?h2|Y(c9jKD==ERESu55>H>afLaX$;68PW>U+~U1aC7>
zNhZq2`?ChuRm{qLFpNK|26G=Z$V5w+hiE%9pE?$#y#a(MBTTHCNxq{B-u)-}2&ms7
zt#lPXyd8>n?qtKafJEXw?tz@d7Tz#bhsl<$Wywx~D7Qa>mc+U;6fc9Z69t-a5s&1b
zgXCqt3>IAy#Ip*B-)3eAk+Y9r#?)i*UMm8>r6<F%lhqivZ#ydO&tcZ&WlXxUO)%+f
z3>Ho!;6Y|OUJo9EKIW!qxZ@Oh=dK2IiVWIj=RoH#=2$!T3|D^{iiR`n(0EBD^UtXS
z`P{M4(3pTy-y{~hJr&xIwy`-=%P@b1fE~Y%^Pg+;DOcztUK<?_%D#7)-peD*rNNvW
zA(lL?mKYyD4)g1|a!dqQ4$U;s*fJUvr*#Av6c0Ab#SlE$1BdSL!Uv0~Yh1VmQoIwu
z<Uj~6)u(=>=4);hGo8*-Bbm>O3E;bOCxnl_NREyo*!t20^KJxy`e&fnMO_XCQ}uY|
z9W__kTf&L~ltb%Y#1;25#RdzFK{VlrYH{TXRId-UR<_#ntTxgesrzChv6-@lR`4V>
zIVAe*1=q-3LQT(n{&>_%yql+okGjR7XwO@&>pB!Pp1Oc@C-O@@*9YGZVSLeUSA^87
zTy{wgUI$va<&-3zv+OK1dR1~=WvcM@{t+<OF9N+msp!$=FK?X|4dyl1nV~~IPqfTp
zrmNSGfA=Kp8j%dWCs2;9J%O9|Xy+E~*96-?8@b)LH2gukjhy5D#Gdhjwz(WyKCI%d
z{r-xZfAxbdiUZi>))Vkg5sZB-gO+OrTyEjb`stiN`?TLYc%~k@nNerGdJayUO}&m4
zvq4IX*|x+!;7hZr#t#>nT7z83TTViW+ejQwJ)uN#Cd`)@p|SA_u}x4Ie|o(XJdQ*%
z<JNDi|KdIPVtz05Idlpnqa<vw!$ESIlNWtwG1sl$3{69yGLJEtln;<W@}g$e5_gTM
ziriqpu5@&b-2<lEb(rn?3w*p*KYURZiurqKpE*1V{U)X}3-34LuWDN~9siT1yo$jV
z?+jiyuY+lHNrZKngbLl8;<*6`4zu1e_ee8vZnWZqZgxSr%Oz%IxDsn*`aE7^HyFwM
z(8K9Gq^-#V#k<Ag^(_T>cIi=c$T0!s(`@3mSAzHFd)#!q2^hv50sH=;U~hDicepZ`
z9v?#;E<LzR8F(l?1ilkaK=91X&@nU*{rxwfbmDkx=^OGGcO_)R#lcXS?E^9U!_n)B
zBTAoU6*u(j&iz)82kr33>|<&&%%&ZTa`kt%kn(}<hYo{Hmv>yd>vl{m4q!f0HiBWq
zW7eA-V#a%XL9<g8YF{@4*HcR2cS8kgjXBA^+KzIMl0g`JbtJfY5Fhpob?X9tbBnIT
zX|y2+^R+^;UYBJs$TkVbpXh_yql=lZ`6W<#EXMB>tf`Ay2EI>|xv9{=T>o4UOOwuV
z_!I{%!>U-xI%4<f_GhY@bT%C~$@<Q`Xe`>e2PfL5fv(PO;#cPqvtT!yo~MaHnKYBI
zFA{$}FcuF^Lv*enp2njE#9`ECHc=nA-kg2-;J->tyq?C|6Lg9FP{(S&oQ5g-MR=}j
zGQMhx2h&(Lh(DYP@3-v5wyG^KVZ=b(e#DdJl-_*iyv69W%nKwJ2aD(RT#NB@lVPRX
z4&0%(#N?_y@b}_eRAfwpv9m}-ou7oJ_f$}O=L4@hKAc@sC!^-{AlUah6m@T8@@82g
zf8se25uP&p$Ys!C@ef+Wq@!p<o^^ZG1a4EEz>IBtai#4>EO;4%2kjPOcKZ=jd;bxu
zKgL6AWR0-&b}$zHGXq=fV<5hpHK_fQi#<l}<JwF7LA3Iqb;6)5bn(vNPLt&5RnP@H
z+vP!G=O`XI?+^D2>IbpEw6Udf40wkXLgfG-aQa{a=122Uw@eGglmQ8qW<cq`*66VI
ztGMan2yts(CCu4(1(ln(0!;IS_d4YuEpQh%KEKSBE@Ij(($8V33Ua;^2RS!HXxXrW
z-e)CTx_=(Z)y7N_?glGwC!p?PLuftyN$@70R^fphF#mQ5=9;BJb_(^yE4{#E(LPAC
z2>=V97hL*rueEqdEcop$#jBM$2o2NVT%io_Z{Xl-y@8oFp5y9PFG$`Iif=a+WBc4i
zU@=rJ=pK114s19EgG$oDLFI#5yI-<kV!FxQ7BRc62{^KX?j13;?AfdS7@uMWwn@XF
zk(EP}^HFe~<iy`<%D`j03XNSVg{-P^=o7OBz8hELa=QR%5pQHS>c^ubiCB5v=dj@V
zBCHt~kHLmhLHf^M)>_^LtZL^IXW<|>?j9=a2=k)Mqk>ue6oJd|2(GOvfsTEV*iiG7
zd)MA&qeRuH?;1>*jc~~LK^-^G>DKapA264_9X#slS!hXc!koJzdbUiUx=SY<^=KkW
zuD`W5ROFI7=^aaNIzrB$P;?v60~ZY~!LLy&OkQxE?v#hPBsm5wx;sJbbQx+M%7-V1
z&Y|j)9;->42@VBE#MU1I(WajOy2g9ho!13$$EgGtU%8BRjo-!UFgtX(_=VMlN1)5M
z&7h~3%0)N37qjt8QRiSj#`*<9BeAyI)Yj-z(#G9l58{|c3tZKy0N3`21lu<wAwKFj
zPQO+O6`QA{BwYt)R$RfUo}M^v_5ci~JJ6%4IZzsP7DVrl2y%G?6ItBlR*%xTZ;CM+
z=}LR^ELZ;T&@t#3eFUQ_XQTVV)u<TuODw7$q-s}~;#hGg${nU~g9a~X%nsy<l0q<g
zmJdxOli25^0%Axnh03yRV3>TGx1`-*XX}#CH*_&1{z~9^ohd6RgrZ-@8D?Hh`NEa8
z>}@(?BV54Y6GK6}P{rDo#Im~kjjZXZnQ(OSX!3q{!r^g6m`83TrN=kccS$fLUFwCm
zayp^v>kd#(yC=5!a|H)9D^dRaI?c51a))1wnRIF%G#4Mk?yvizp7BMLjwRZCkQCG{
zqxt;OrD(|$Q2WPzXzJitIbsG_SsVZZt8iwpX$y567}|P8acANk>3lef)-;2If#;!l
zS|FbulLqal_c4v_8T{EX`u<$30*`yc`OyI-=$!eQSsk1SUJJLuH~UK1XHkuJM#YkY
zXrJIv&<k8f&P37rAXVkOXs+0NgZpS^L&jJ`_(lxOUAt9i>-!IAWZ8kD5A`O)vhaJM
zFPhKbtOkEEhfQ(ZB0m81Bza8oHWhPM_@m-jrMUiGCHLL@n4LHuhuRAwq3rrr9Hl`l
z7x%uHmq5=}=v=^;;mlj!pZR2J;elVLiRF8XKc-#tw;akWmaYNg%}S6gISngaM&Z-S
z2-uaCj<yT)navX~6pt$b*9Vy}?P>tt>RASRR|kP^EEl{(bwKq*6I~;^Lt@rmwopYp
zzWTd7c;j;}lG{S<t=~Ly-Ft5GGJszP>WiHD2aK0Rf~?0>>WfveHHQyiV$(C0H0J{7
zzs$h*O#luvMpMQq4IfhHv#-T!bXDA==k1W#b#XSkX>bvp4C<M_ksOUj8VTZ$6)5U?
zS*!?3Ku5VfO7ne+dvBbKi*+LKd$uKNPxS|lv|R49{ycAfx(7xr3PDMUCVJnE;eP5_
zP?vX#dv-g8oq8mo$@5C?bZ;}%{2xW<9#>QPh4B+fNRlK;6G8}4ROekdNJ0o92_Z>^
zB822brJL?IU5|7*k~*d9c~?0l385i0CZ-{T5Mn}Oe(U$Q4<GHl+xuPXd7kgHm^AAM
z1dF&%ZeAOVvi}_TjRG%hOZrJn&S=(t+mbIoeHwdD*n{*Uc~tuCWx_pyICJ+r6yCg!
z5k9G4|0IHo9{m(bN9v-~o}NR;<??8ccww{bg>Ysx_1NuSjGZdPoQmBFMovemo7V`s
zUxt%EH<-6>^aIt(A3U1;KfRScOlo`qgZ(0)bk<aGzZs1V4PFq4GA7QMP-SOi3spl%
zi=AU3G*%>lR#F`Jt`4G{yFZ$oqwdU}*DTRgg4^S2afd(kx|fZ{!&Y@L@a;-8IO9#;
zeO0h;w1F`%jnKF89*Z7s!*{}QY<AtxO1BlGM&JjzAmxI^C-pNJ_~$T8ShNJ2YiIGU
zp5rKvK3}E$TMF~tzMl8a*$wrcmwBI02}D1xWU#*s?wa{x$x(X85RYY2wi1fX`YdJM
z8S<%1myz!Mks0ZHX9lBJ!mMl57v(XV1*j9l-lc)3Y%9dKY!7*@ym+9?GO(+T08wZ*
z+H6k)-v{-qYZbk}$tRh8ZwlQ1Oj^OXGS1{q*n7YTR(Z(*2l(Qs;~5y)Lfr9Z<xmO9
zz&%{iP1zZ3J*PwOp-N^D?k+b@(!w4CDc!5J;I2{(9hUi^yfYCLY9r*O<2QiOmAxF^
zTxY`$Bx2y1O<+=?$=a(a^Bonyp6|Vcf4q@Wuci{0R_a45pT(Pq=b`uHJaY>;32FOE
zn4@_)sQ&HEG)}lMkEB4<sk+T_GZonWqlfmnr9zz>Q*o8UX_T^35P%mDM;FHqKZq=u
zx}8Yl3%gtf_0|jFcAAj-8V{gs!(I$NlMOOy19(}g@~*0Ebnp#>^fYaB^-qWL6yoHJ
zo65b+bC~ZoeQ??)XKlF*N{=RE<%3Ji`TQbS(I`c6z)^5{U%+f+lX&6)O|*+}gznCt
z3}lZvVRO-0-3WRn%!Aft7qCFv5uAsw<xeUSv7<JWyV@$yX4h6`l3>c)`>KToe|{1=
z_bldt=jXxQKJuFt9E8@bE9e{K!TTMe+)&^TK4yS9O3V{@%3miiFo!Z&=fXHl&xTUc
z)od3uKy_n2Hm>~2`m*L@Jb3_RXA|X$oxAY2>`~M&zY&}oX=dI+dgTWn7I9`i@rS)w
zS^qGcBsN6BxYI&ENM?p+Ie5X>7q$MngrAe@@a|%LY|VCs-t^g!d~r7Sa?J*zy*6s}
ztz+eXNx}DpFLbp;qSyR%)+^WoProu0Kip#pQ|0jSG-U@kf}*-YzTc5NSRrNd=Xo6M
zt;qv%MPb=}@(mBk$w%9r860DVp+W8Ma;uT#6OT0DeVXZf*`Wpuj3rON%}dPT!x3W4
z-C!4VN1;-)5lU3dxK(8i?FUAKLyj#7|G9!*#!rRcc4WY8JPi`-*-Wt~8s=9Iz>-^)
zDB9`{TXtrn>nF-8O)mnGkrh)I-xqf690kfL7g!VRo$8NBSzyIrxW6z6;scI>#lu{#
zHmrk-^WxFMV;7&lT@2*~nNU84`Um$IV#|*#DA~Uo6<JyG5u?aoU{((i=}Gjx8HpC(
zOPQTc6Kn9VfDKUr=-d=8{8+RLwbK@2-`dSEfqXiFPzBL@ZVKPLnuot{@JB1VRVc~O
z0i&;^5oKQGl6r;kS6ULZulB&^dA0Oh&%h_UY2K<Zlp6#eX6=hq<f=7++;zzgOk9xw
z(R=kE%KbX7@v6p<t9dN?59-6P2_z=ROu5s-5SFEV4WcIuVPg9q?A4VxP%NIteZFR~
zl8bv#X>m1{L?>Y9!v4@{b)CH8GP!tQJnbWb!1+~@@bloaFvoK^n*2&Yk!d;j{^N*^
zIjP*lu!Xl79fc1gLcqFMgZ2eRLiKwMFnf6l`itX0wDh8-;~wIttscdv9!SFpD{`q9
zN|W|onY^p}DHkj6s%l;pA{?wj-lyznG@E&wpTBz=)9T`xxN4*@QL7fSgT{eqd5fjM
zIzn#r2X!DG3<2G_XHadt2xKQaxtC!gxcKOx#G{Cp)*Xfm1R-#43=;|pu2P@xER1|t
zi)qh~L4TPX6Cz^JIBGC-PM*TWF%wwS!P985?-3XHkmk5PiZ$-q!G7je;KrnDSedC0
zMysY``+gPfGohS^%*$bgbFaXYz7VtsJOXC9L7+262`Y6Cz?8M*X*_0#`;sz1wTal!
zzB(X!I~R{_ID)F7W=xP%ROQuE#V+>k$4YZeW;1#m&mAhotzJPOusvR>ddD74eW7y|
z%@__wvzf&~9gxvCM6qrW<dHV%7nOr|45VnIAwaSE6uH>+xLgPNVdrjF?D~F%sl3R6
z_~+Kp{!zjoK>?^nq;oa;oqs-pb|U+}vgThyplkCTCT-fr?N|`gan}K7(Gb*|QN<?4
zRHKSgFobv-fe^Rgqn{Fx2408x`n01>Q^853Sw?R>EqC$DgDI~osk>wdt8_?)g2P(q
zJ2-~R76&tl`a9lva0+X<cNLy}r*oE81-|;X8nS<W=i{Byz-GWeIJ_kgH?DL+x2;;R
z*}x1<vlCElEa7aA??O-SFz%JtBD7h)3p^ho>kd|7t~F`!cSi|EBv5Z{%TUzY5Xgdq
zuG4<zHeVE*NoS(T*f?|`uiTtL_lfpr3y1O7H%AgX$b)A@Rzrh&A@r`9$fteDL7PdX
z{P%78*r$Ay^s8EE4`=uagTXFkH-`LY$D1?!;7X!4%97^rhP4f-(w+n6eNL!2Mfp+F
zm0UWXv3gX7z%@#+@<lYh2_!wHdlGgW^1#y2B33>q2UaH>$Fy~4n3i`sE)cq-j!`Ur
zs>?$0EKe>vZ^YWeg5`gIAijw2Mrdxh4tBd&fOkbIDqYt^vD$x@R@q@(doOiZZ?Ff|
zb-#h0`4V)Pc#-rzId8rYje>_$_^T=%?4G!d)%;FA-{Xcj#KQ_#{VAi4q&nEuJ{1dA
z%mi9I!m{{6G#R*q&w7!9bH4?m@rQR@rQI4w#H}Na{SqPGx=i1$r4aa*o`Xr}pxI8w
z{m$hx?d@S;drry<ms8#=rwEl}u0wR~R;KZ54T!7g>@si<w0ciuW~R@0+IAV(584Z*
zdp82s$Aj0>3q09A4<mM-gHT`UPhGN^k3V@CzD07<3aA5DCz&`=SHb08HY|)Wz_#mA
z*dYl9E!E91_r3-B@)zUh-<P1_p%Q+S&b4!%Z$qouRm{|VJQnOO;K?!J%;<xJHS4TI
zMYA4j-r&sE_Md>tn<Q8=fcgXe4Frub)=VV+US)E|A4I0BxKDd7w;ZtqpHUY|L$N7#
zkX~ps<usrC+!wnl^uTY68pg+GvwPeI?b_aQw{T^c+am@$r$x{kvWG33HjDacXF~pu
zdSa$aalwg8w3n%+{CgqqJa7cU9#CgZ$1%P)s|bzEO`yGKJT!(U^A2L&^n88HcE1S1
z+Y8k(BGQ=HP?uQp5z?rROyZ}$dE&k+5^%Z~!hCjSF&)Vku*jK@s$VCw!(YT`wVraF
z4>h<#=MV;lgo9j@^x=S9b~b$;I!!cy&WVR{@HRxFC-rcBaV<i-2>dSB6Tf-}pFblP
zwdZWWUVev(HD6WLRG$J1i#EQ$AN~A?iO)58BQp$eN1JMMzG`(22+m(19@<_u_$FoW
z)=5E<?+<-tN1^G*2>2}~4C5_xp!>TJ=5JR(FU=ZyijDZ~co24zPA3{1B+Pn8`y00d
z&{Y}7ANu=Y|Gx1!<LD_=UH%7G6J_yu-*Rm3y9k|iL7;UaAF`o>TVKh?h)DrV?R+?t
zPoz9xLoqMxP6x@10rF?xw_xB|()+1@%RqCi+~JZt_pCU@;z^GdEt@Mp_iqq(9em0)
ziiX06oHJP2Mc=F;@%+A53`m0~K;xom04bRmxNrsNOrDFn2Xnx1mm#!0+l}346Xb^D
z<$R7+I67-DWQh;T!8oZ3ls86Tafd554*1NxDl%C6dUf`mn5Zota?C9a2irqc_<*zw
zjZwpRBKdSeeQWW({b`I@A;(FP5>S{AmOpci0L6rb^4Y{7s1pys&Kv(SmwR=v(j@?0
zkFLU&#ssdhJznmU`I7e%3;*sqOOzZ56=wO>p+zXMFt#2Pk_U#{ib9BC{Gh5m`5e!4
z+=)hC9GFgeBoulRXR+2<s3_@>tG$YV?uk15+3F1ZHmL@VsZdWqYz{xQQ30iyYoN6y
zoJpTqz?$_JP-&$fCQtm6`yDlfK_y$!z|4lXDlDO2Wf8>ExuRz_U6)^^x9h0D+MzYj
z{LezVZmF>8zXZzTcnbw5R%7d<`)pEaf7FT&=88XNa{V#!pcop#eb?tPiB~SRuBhNo
z6pOJwaXma(e2TnP)FY5o3}Opcxmcy5N<qE~k>nv)R*Pr#)HS1cB!>30?}ZLIT3jRX
z4YMlo1Lt5P@Y>AfL+b4?!6*nv(C>~)XDlwdQBM4yLTry-$nA&wLCFl-X^zvzRbxuQ
z<!l~G|JlOIM^s=9y~i%<(>XA9Cg`3Ig?{Om(0G0s+h%Hr^)hu3bT?NnS~&<UX0L+8
zIx%LiIM3GY-j5%)l|rulMbQ0v9*wK7vWTObG5mEB%9QDz6mJsxED_-3cq@FVT7?$P
z-<XC&J`c=YLEV?1naw{LT+y6b6@OEcIN785rq|RbwSEB`Y<3Q8T#mDD+Yq5w!&FpW
zwU_v@3*fC4J?qWCL1)!KDD5E*{K^<S@BzVa_cfl+ZLw$KL}D|J!jt4>4&PCSzoH5-
ze`FFUROfM>DC*Tbup6uoxuD4{U-s&e8eXbNMrXltUimtLo7og|(Pr}ZW|U%>N-lg@
znML`pCMJ;Yv{Y8v3J&IpymE~Nga=0Aui+=KWpx|#G;oC$sR1O@EVJ+81rUt(mbV(z
zfR;X;%`Fn~%+Pwwco~7G&W51Q+xfiS<1l#rSj*QfYe4&TBcRe=jT@eFfL{IAynDen
zVNdxYsJjvd3qoahK#g?T<5zfD7~RXqF~poD-^=5bVA?$wTc<?xu0$vFTzr@RAm)8!
zeH?hG7h;ZMI5rh*M(GtJHqERIdmhe$&jIAMObw#L+awUUQunH6B`AMOW+|hSQQbKk
zRKJho?nAP$AoT=<%?N>h^qv>bJZ*WgbSpOJDDmIYM&aA{biPeb0k13nvIDx&xVbk1
z(_Hi*W_&VQI2?yddnA~wJ`@ZE1!(cL5*%ud@>Ra;i2HaE&ye@scLaItLUef-&F4*n
z#(?Lxzxj{zQpmm;0lCM|L1V%~=JNA7?>3I-m03dGJbx%g7NkK$L^;?!m;<xMoxmnD
z6IeiJtxYq-aqImvFva=`T27fnIXWe18Y#ek4Kz@)`IxXzk_ZI{`@_Xy)9@>0Z(7b9
z;_@$NAlm#SbbK%8y$eP_bYcvMcb;T7G^nrd^%{u$?;6xyyb2L#*MXICDzrze7LE_g
z!AGt+xOwSbRJ8q+cTkq#{^$PKPAn9=09STPwin%vDUUd605ne81U3`sZ1kPDsjC-a
z>%3WDaWRxwH3l%IkzvYbRqB`=Ef4uL1=1GxakHu9HIeLPL1SIfv8)U|w-rJ}!Z0kq
zqk{n{@p$036f9M$1zATg4@|wy1^xdKsyfxdo<XVPAp?+nO%zUyIEjbTVxf6ri`>&=
zH`?493Eh+P<vP~_S@V4l)^lwZ_#K@Mu$y*N$Mr!?V>l{~ddNF6viMKcI&^vO299^N
z&~AGgaWJk3_fZbc=Pyk-Hq;Oc)USih;z+i77v;s*<q3Hw^?SVkhkco0i)pKtQs>PI
z;r2J_=xuZ!zco;A(H`n${<Dbf`&5PkiILFRupexD-GEw!syO$|HO&5WnQPh?;m&8{
zX>L~smkXy!zl{<mm*sN1-_}Fh%Qsy0FFV$y_{60}2VnUe@<zJc<SoPcVZ+<g@H6rn
z`85`yafvE4-HfOFV*xk`!@!O@PaJB?(BRczi2t*i*?lO-9?5K|xDtwww<=J#t_*DU
z9srBZ$xP*+G*AuPhat%sU}tv(26df5qrq|DvO12rIo9BRGQ^X^>6uSn4~OF#U_n9^
zX4q$;^P?GZ2lozU=RA_X-75n}-(A@6Ds=(>caZw}GVo0AIgEx-cy%)k1bb7#>Ivz%
z)3upGdp>Me1YpFr<B&6a7TPQ#);jTu|6JFAc6;x$XC-26yoTUgbPmLlAwsVm@hr+W
z9eX9F)IUnIZH+4Qmf1q1yC=k_#-KE2Cx~YME?>K;3e5bpp;!KZIe(wQ9VTXg^mR8^
z9(@sfI_0p}cqyuMCBfc2>Pd_~kHb@i=(~Lk<zj9yyK@iZYMU##b7DSEWQCAh9sy2v
zi{Zq)WtcbQC|WJJMm`BO@);Yj*18z#Gfm`kuZhrE&rWFTb{!nGr(>h#Qf_nOKc1YW
z$wf;dnde?&HS8I}3-&!?qV{vlE00!3S9Q7M!#mcNbe<)D%4E?^E4YP6K3o1MA2yrU
z(0SAy?Cx&j(VkVpwJx*qEPdnKM%VGThmG;zr{g##ZazkxjlwA`6)Tkkuz5`)7oY5A
z4Iath!!CeYa6h=Zr5f#i9E527%b-56hR$UP=o|l&-ThpSqVs<Vlb7V8;O6eC*@`q!
z)7AsI6LmEmDnX+~C7>K}nrVNWkIi***5ft|rFY6P;*S^*G!KNZ?0OLW{e~GNZ9s{4
zhdk<O2nzP@uM!Uo#ekzy%y-*>PTC_uvE(%yeTuw_iwc-i^B%CDy%$Go#bNn*74(s8
zCI-S#EdMtIHb*2O@GBtknF%7R9?M*hNNCSq!{?WWkk>Ymrxohr{T;K=z+*nPUR=X_
ze+}n5jok5T-)dBrrm~CW#dv>ze+>NN4?e0q4l{GfhqUPw-}G%8Y8<-^zO>i(zMY7+
zB1aIFd0{wd-QBjQa0s1ImwOgKyU`zV)$k=OIJ^%0XJ>-D?*P1U%ot0bb5QlyWodVb
z`{OrJu4#7-RpLWomUTIGl3s>}!<4TL@}Swq2Ez2tqS3%~n7RHijvH_q4YaoL<bmcW
zxIB&9bgA*C$a3x)6@wZkb6I<u64y!%=Gh5t+?DQ8srgMNeV7XxqoSFCY9VX*kG_*X
zBCB*vD!^=V6cZ2LBDeL)0nu;eRW+@ZsQSJZnrf!uUo)>_y?}wscoR@j4xxFXmi5-o
z!TVtbD4rMs#l7BGIy4x&bo)W8W)YL5gfgSw3L*Bd4b;ay0nBaGac)5n{MNG`BQ{3C
zsb3tQZkvp<3+cT4`Y9aWFQ4As#VDOW2@HycQ1`1l?z~LTPD;3?pL+yvR9(ipWR3jG
zB;w`lerM^H{(}WBO9#IuO^`hQ#AHcLLdgOE2cb4^j?*UI%XO}(N&i1@7cTqj6T3qW
zMvG>mj*S{`G@8jx?4?2zEq9RJb^wLt8%wh>!yxhH2%K>?8g&+s@2WZ#)NM{+@{+BP
z>=6ek!%suJdm9h=;fz0aZ$cNBP}Vwi4-0=i4dGlG4!#x*@tZSYRS81i!+l`eXa+{d
zJcvtq2|D~k+4)nNsFAV|#AzYiWpXl#F4oZ?DaMqVfmnKeDfIjk$6Uq{4|n1h*7dQF
zpO8+#_R)dh_6;cqcmUlp{^mY6Jisi6G=%+Uuvw5T)R^=^9?}@k#fS6cAx`?xy)BO?
z|8pITc9o(@RR>eetVh;QhUH^p!2aey6puSy)qBL1Sl8WbQI-*AS!aQiyj<CL%P41E
z#LSc%i0S#+!g-bruPgRI*%;EBI%RS=O~2PyKMEBO)p_>`M>ba@1z2P;4i@B4??D<C
z?5V(l&^9JJ7briKQvorCv}aIR$BZ-N{1y4zvP}bVM*Rv*(J6$+lhfdC{y-c&oxB5A
zBO&VCH1f#T<4pT}e4;}$&$?Rfe)R;F_*Y@dQ&;j-PL~TBhJeXb4G`QGSNXaLVe3ja
zl+1j=4F0_?Px}(bny0#PXXDAB8dJyxs{`bEswYv_w}ydTAoJ%{P;zxV8Vnx*5ly?G
zCa(~p^p>EGY7J{WwUz%^S^@`%^{-Qr%bV5^t7L2d+^nNL+UG>{`Ch<Pwim;v?PjR`
zAs9UaGf?cc2eU`51JSrIRle_!^P|Ts5p%!u!L}n&wDYapyVD1EEKb2C1?y0-tN?!1
z$%(6w#FSnpV?>t|F~!DFKT^1*HFa_I4dW2OAF#@*RPG#^#N*>qAWlz?gZ+Y0a*MRV
z=wHlN9tM}!q+-D>2f*?yURggDJo=x-x5=^i%V7$__Bdv+$c|4rQI63!`cYR|7B6g&
zVSDd57&$Ev1!>9h(s#L-yf6|EUz?6MV-N-3ZpbB}E9C|T3Z^(&O*=y~mjBBeQ;Egn
zv*rNoRoj4mLEm}%%oD;_^RrP$`7<+2NX18IYvJ6G5<K6}4|UA)q5U|>y<{)BpGq>5
znNDV2HT%(P$U`B{j)m+)S9!D9AlMyhhjzcsh48_EX7v9TJW;mnUTwq{%yPlZ9khSF
zXGIzJZD3XD1j*uetod{z_zu(sXNx#=`6`5#<Bs5vkirh#U5<*2VQi{*03@fTftTwe
zZfiOkq$3Z3YOFTo-zoyzv$d>mO$4;gbYk60bD34h12$kU`DorxA-39cVfUaFyz4>+
zK3<ZDJu^ypy~cN*6E_@H8y51d%W6R+pm*u=7xL(}ULfdQQ1Q4f5T6dT!UNJUSoGo&
z%#X{49Zxba+xrq9Kk+gyT~UPQqsJ4=S&MxS(?wOiT}&}yHan(>1O103;5+0S^U0E+
z@2PPh-qTRk=_rB10u^jrc8J|wLOI4H7w!->2eU(MaQ;81;L)}7AemqwuP=^-&VAZo
zp>&IPI-Bzk%3?GappISM_E5QP1-Ps#U}@fAXl6NxdfT1A&>;Z|e<VRi-(~0-R)>B1
z)X_L~4VU>!S>0ed&(b@!(y~9l5k}0z^~4fWj~2F$4hP?K20edfa0}JTyl_V(e4D!m
zB`>Sw4Q&k|vrzE9jRQbMG61ct7D7ni3CPbhz{;Y02sv|yXWZ1p(no{wWHO?0S`;e2
zB?~vSM4|YCT2*i*btf#S$Ag2f;mh_#IGp<W6w;6U1Rh7rns{uy9LBmIjDXH{#NWJg
zoNGm;v$UO25c$g=4X#~f+EW5C>0BiiE(u2M>p^fPlsX<A%ZO(g0prInL$zz)c*zmc
znhTPJ-I`Z;cSf?%en%;9)%wU(Y|ca1tyv&h@PWN8$)tDp8m7@1hMmMej3}yLKk`B#
zP;?bczB=>DTZSC|t$=9NN>KVkgnf4=(9gCGBv#vmS~*KWG<pyGdzKikK1)cK@`lRk
zgP^Ei88+6eg2sS)*zjyMa_W0=PVnY?KGSn<{+=t|P6wN5aX5#V&YoWOAgT_)okK>T
z@vTeTK6x~BPpjj;Rco15Zv}Ik4=CB#pSm&W-2G%E%>72)p>qTv)2IElQ#^K22d9^=
zHMrR8!x_>6nq&UvI@^l)0^uU8U-6vx<{SaZe>QTr!D`rI)W!`y>}N(>FQA1}2)xpX
zfJyhlL9jN32Tr7U(DW$uI~)jo4;r}Yz5?cRKMZ<b_(L!GP~kOw%g*XBv!+mtpQQ~p
z4r;_3zrdXa)WFV=YiO`H7G~1#i7etU3(Pu)&C}Ph)^HyveG>$Yt9L=fL@8GzeMN7`
z3HrNYOk5~~jFX#CvSXqA?}Ox<Z{5I-tq+9j!-G*Zql<NaJj%5~qoF0_Iv0yuxcF>h
zm88^#eBd&^V9O-DBn?30q`xUMS0z+vPXXCmd$75$3DTkSVXJl)*t(=b%gH#L6jXzi
zL$ZirsSM(mAB7I@qoDooFU(1A8$^Di8SWeHcQ^b7t;653<(mg$h-o^v&n&{JqZMGc
z_!o0ZTn#0cuiy?%4Rllc$)`1{lV>4Qu6Oe?KKhPmc<l%Ga_?bA;lxP)XvQsiFM_pa
zJWl3Gl&f(AYuiC+GMAW@?`J^w{Y`Z3i7lR)!TZYI@=1qsAYy_HE|-uuJ9q`#zW)e@
z<XSU}4L;l_=K}vczXtp+n1I-g*thH~_!--Rp?o(?niGPPx```uvx3JLC-5{yD{~K^
z=XJy-(7t>Hf}_c^c5@y1yOxu;d=4>w>OpZg+j8mHWK0vkU_ZX*p^JVbI6WK)FWl--
z)lke*Jv`_fmj%(pa5}XCP<-i$rR(4rTrm0&)@xDT^h5}J=*dGqO^mNc?#9*w?ac6#
z11vu2gHt9gMtePCZaOF6<b-?-j5Gq>7v-S$Kn!}0XLxex87Qs3%ZI&UXx1_j6yXEq
z5o;p3s{1$AnWfHmY-#}U_+k8?@^I9N90dM~d}#gpmI;=Rw7^W_aW0Ppo#D3NTwn`M
zFD8K~IT!yJPJfg{_jZPacO=&GWbZO?`6LJ5#cAwRWCH4ZnGTX$7x>g7$`u{7vb1-m
z=XmchFkbT??^*MUr-fZ*O@?bhGW!6_IXDTGh#}N27>KRoPNQ>OGmCf>gb$Qyhu3#i
z7$5pKS6m&&S6vMP1N{uSICc>8Dmn(fk5{tQ2NXDdNEE2fh(zZniBOsPk>_7YMu|}`
z%W#eZr>+aUG`13LmzLnx!b12^fVg8uHvBkJ1Mbu#(7mEl*xi2uQ@0NQullvT*zNz?
zwHO^FJK4d%7ovzfyckzN{(KwIQVM0tmwdr&K?yU`*5cNK6(HL98z_v%Vx{OV>t}um
zCT}HHkGvFZwA5+uoP+IFN@(+yV`}0=EL)yQ-t#|ri)}UZWSGG4vl=MKFM>vseeBIs
zcl`cq9yV?n1EteE!Ex6K-1(k*!`7LC$?_Te?T$k7K%KR`v10-4Q#|DLn^JM753#v#
z%J|x68?a?yI`7uK!-D4!lX-L%^c?j7jny})tCSc#>&}CT>=ev+LmB;25f|G`6|S<D
z!#~#4Q)HJ8vN;LNIU!%}I6noO?pN@&nwJ1<j!?(gC>UZubGVs4pV~bd&4v-1Ffjy7
zUMI^X6CZL#c7o9F&Q_T1PzXZt20XP?pL)FV;H8B+#ssHf;H*;a3+~*F{8?S*r`V3l
zD72kIyXd>?L9*_rT<!E$sO&IiYAYAh^+sOvJqJbl!9w{^Yczbg2VVcWM6;|eI-KL^
zbVM1VwUxj(lcDLTBPbtq8ueO_L21o*?wNjpX$=nHMynk_Y1>I$w6GA3|4QPT$1<VW
zWHG2-_lNe%G;aO$9M)$>qTt?kOYP|)u=wm=H1Png_c9NiE}Oy$N7B8g1`5U6$wIZ{
z2=t#Fi<0`SsPvk=OX((%t)Iz^zY9^Czlt@UKxi_4%%dljV)o@u-X1rQ^_&<4PWEZk
zH}RBt@1pES>=^zmco6ZLiqOLBA0{fDDHQ%&0)f@(5V+wo*sP|%H={+^I-rN?DGdg}
zjUtHOISBR<TWCbS1vX8H1D%?SJkq5G{d=oGcA;7L<-Z-+<3)Ru>Dkm35``Jj<|wXE
zhSFztkX1sn^?)Rtt+AUJbZ5Z%#sZM}sB-ZikA)}q)?)jZUp)H3SGn(BJv`d5p3B>`
z@K<^*szuAV*NVq-gBzJ_NJbpS_rJ!C^g@WKr3A|{mVCN;c;8M9`@$}vk3$-p7@LN5
zm4|RuP6oj1Y2ftfCHYQuU{dQ<_;LRfv|KyOf9xa;V_6=CO-M$K%AZ2B`SZd4aTxEO
z6^Q;3aj3I7mi9QD3lExO;k9JQ_fbNh56&Q&)PT~n1KGZ<fe=snbNNTw_1v<oQam8u
z*qa)V)aSCgvcb6Fc{MiO+zIyiR`A?81x3e?gTK2ghHU@DqQitxzvMgDdQE=Mp<z5e
z<~&z;J``GCraZ&AVKCVG65I{Q2Cq+l^1FL1>C74lvK@{*SaBU@ABo16m9?zr<`%eb
zN?8^6=iGB)1DMgfBGC03mNt?<`g=NY6|I<s$|3%wz5zt)ffx~bfxE9t1ewEIrtxws
zxA}*G*11xyV|)>dUdZr<Ni9m%j}n*ZI27CxflJ&F=q;+^3Y+q(4V$Y`H8~x7)Y=$o
zrNUH|aO{-^K+X+oEL9Dmo)9^7z9bJ7o<jG{!Pq)*ExY=7G4=*b!T5=7%=v&L6s(sp
z$=+?evDX>3VmE>Emqc_fzsO4s+qqqvGxPZx2%^^knEfK1_0u>HIs>h^fpH>!{W}F4
zPZ2|Hehte{;9z@Z4XTdUXN6<LXyi8(T!3cp-a0mIf;&2<*MiRYRO(jo<6>!Am2baP
zP)=%L8gIf_Yv&QxXd~ufluc`0t_3^nBB1h12w40*fQ2lix$hWqx8a-E!wflAJ1xV4
z!%@63&K_I_WJ7%88hm7~gs<FaudVJ0HkM<!*X%RAsq_--tn6WC7Ynf9ZaAo4tp=O1
zmsszW9M-gS4xYTMfvwk0GOLy|uy1J&)T~P+-`qDQ42T1}8A*`*EfMUCbJ1da2^2?e
zB_?W*yyeR^x;FNZciJB{?!9M*?>cxg?e9cW=77e_@t~oc$~!ORv*;DenZ)NPQ;gOj
zo)PVdK6-Mu!Kb)yOET-+Gz9G*&V@x!!l^e-A7J_=IPlm5HOBaYcF<`^Hm!$(3Awmv
z#uYgCp9Hl#B2nY7cDAZm7n|zVfa+rjhz@>`i&HW!BQo>JcUHicT#CcSuvUI!cm`^(
zt-;H0i!ekg;96CM=<(lF)EU#xdjb+6mN>$<h8SSZJx`2$b`e|~ov?jmEsLM|g4bsb
z#uWE+@b<?}6fAgYIp*>RY(Fy=6?U$2t4oxNKd69+57S}oA_Up0%dGEL0_bRMWR-8F
zILV?6E{bR2)_;$p{>c59youOx+lAajcQ(BG5Q-YV-{u<cyrKU6DQ2=dQvPjbELun@
zYyI&nFW6Gb4u=)Oj_^3R(`JXtjvPh0|FEUm*5sF+!|!GT8u-rxb>kcislUrCp2#pR
zhP)3;BDq-Uxn=v@Kl$e?*Fod?eC9G)pSVNka8(TL$A|lKld28yEhHW`pG!xV#4Nt{
zK^BM)FSG1w*QRgBNv>LPkhM}z=klC5xTPF}gTGb5R?`BsGWCHY$IfEO-*qVQC$_5o
zW{mz>Mj8Kp>_cD*{2F)>TaIcGGx|8VJe~wS&1XQSLwQEwIxvuRF^yN1e9@)?m{%Bw
z#UYa^BbmgEKdV8Xy@dM=&cN=SYgygb^KeQ+TKC~)yjgz<SMn~#(i)oG+RUKzZ5o<6
zW?{6&N79YoF!j^qU->)}4j5LW@qT4!8Wze<c4VW=kuWSxbEfC@D%;Z;gyHrpu;s7{
zD200C{0DQ<;pupME4_?atrgfoT@Loc9O3(htLS^{4Qp3dlkZ7MM61{%AUIY84%1t>
z-3jt>`s%aRT?HuZ@MdWv&hmrjOGry=z_bhFVV9o)cDsy%^UB_MQ?MF8->aqWQ|h_N
zCLQ*x4&@G`tDYYShd@p84_JhwXwAo}^_3G*T=Y@ck@AKEH6SRIwnO321dQ;e9-lkv
z5K$$@j+B+)V`~pybCsd%iW)>aRB-KmXTffHI;s%cV6Dy-u+w+pjSaWBQBn)PRZOgf
zIVNb-$9a=uG>Y3<)xGR;95dzuK6`u)^!(a*_T~!IT6l*ScLt-w?jj!D7AHJBj(RdJ
zI#{{18pI0&g@RRCRiZnGK{IV7E}eNC%by-Yk(&!?qZf&T?E^iH#NXcXuUu_@101f{
zh1oOd+0ZnlJ*WY68PN`9;8+kA5R>BMWO&e9LmBwIvgCnt*htIC=$a_QPnSdSL!<)A
z;*!v8-C_794FTDNA@E^9KNS6w$ugC$p@v#Nw0PCbWyO|UG2c<%t96_?{5zQ|<mbWp
zz#X|v6bn5mSDBl86f-WbWdfI5EGZ)l{XVP#(e%hFi#rw2A<aRtC=vsAtE2e2%(CA6
zE^F6V#MY0GMLW}z%=GqfbPrB|l-v{?Kw77nFpteXB_alJEdQs|5Ca1n$bU#I!fF0c
z|7#1(u2;qbA*twPR>0Ig?147xQp`Md1&s{Hf$jJLG-<jfPi&$*lJ;=aIC7rt7);F3
z%3Zj3upYj-S3vXgA>MBn^)KC-0m_@gnZPoK$6p(auiqZUw{(7;G9?CWM)b$D#}@$F
zPO(a1A+(2_04HNP*RxJT-H`<_;J5>djxG}Vm`AW?<zN&yS)y!46-Woo0j<gX@fBRb
zfS^67Q*noNL@Gni7;nt}wFyKE{DrU76)2o4#y}gompwLMP?srp`>DwVc@ErVQ!(s1
zzY3%02XX-xRf!F6%9CAxu}WeoI&Uz=?h^pE-$KD`&kj0UT%w%uC2-mBfMa3~G4I~8
z<vYl)y#68IRlXRbEnmoc>q0nxNG$fV0<?Gj!VNAT5;oPVK*2Iwq;Z*Y7~|y9Z*pEL
z&4zZTr^2R^BshHPI#^5_0>v*cq2v1LC@>!mf*LQ2-X-~Xu528B?lDA*jT2bgcgjtL
zeqoI+e$a6_mif-E1wYHN;N%eiMp4GB>ZcLb|2`jel2$?ZW+&<mUI@;&7(d<^i?2qh
zV)ym{rm2<(S{AhD(iP$Rwa4(c)+>0+JsJG0+PQsd3|HKJ!NeNDOtj+)kMP%p`1?})
zLGOFJ-N(?XP!59a+W6r>8i*g>WMzdT5r3Xz^~Lm_6(`BhsqVmwPXe(~MTB>5CLk`T
z#S-&)2w9(rQ`Cl|#3Ynu?}$bDZ<Qckw+9To3*~m!-E6@KU$ndD3%0}!%pNJ>PFE+v
z;TR{H8R{79oDO}#A)x#;kNNHy3SC8^X!~C#x7s}(1R7r|&U~c3@qPMs{~?4;2S=dV
z->GoF%K)2J)DfRPo9PAH;<BHcFn&leMr>Jtw%bB@!HCU}y>l2C98?BXFCRE@n!0tb
zmviZ|)esz*1EangVDy+jx!YMY==kq5cTu9A>(bBcjQa%)d_*%&!D5*GCLK&l`{W&i
zG(p5XKs-GYx<mT0sqd~sl~Xo0u0IKXoBE(mkUf;RpG3XUv25&{QnV7DM7LKW?)C5h
zm>o<9f5Q>j_4xz!HY7l0r8ef2&%q|?Z{Qg-8JxoBV>>bWIxJSf*#1}1&1w*IEFgXJ
zr!n`AHpB-i>ruz23}VO+Q83dKtOdc~r2UxN85&@_wiulMwiAlCpRBsS+#Z|58mM!6
z0xMlH68`(M25+Yfr%b|jFlf9ZESTL7l)q}jBP+^Xx!z~Vu73RVl`R<md=D;iKZVVf
ze+Ug08FRBgsk3R>1!(TjfrQPW=#^1}sG<+aKJ~=#&;b+E;nXvmiDsn1MxP&uannt(
zKF=H)BVAyhZU7t*M1W305^Qt@Y|0>Z?9&Qx8oL$zht;CtL8PvPzgdT6Au;`jLh$u?
zX#9B!1%h>!AMP0;P}j6q+EmIkB|_H(GdS))0Y@26#WrHT8+N<mqgKiW&^&8pG#%P>
zJmGp;4SEKJu*!a`Df>2>X<S&!BF1h2fgW|ly17&DZXCB+qXj2%8}W$6mQ8^HC`hg|
z_e!sVH$#u0+SFh!5&dLl!LiI_z(lA|kAScL#^HY->(O^pIoPaKVgI&o#z`hSu{~E=
z7?4ng%|Fkf<c~W{@^-t>;aUf4jh@9ds0XPs+5j$8$}r)sEk?wg<3<_Rnd+S#bnl6+
z)qgHac7D$_pCyAL#{rx?+qv%KLUg{pPB^|yfCAkDp~HbR_9F!`<<=7XtNSd%;tAle
zDjy9t9$+WFZ^F(rV($=#F*aZbYAdCpeat>^YpjLR{)y<Tu*Oe*d3eWA69qB%gw0m#
zdGniOZctw@e5+glhS#pagVozHV4y7;iO;~VU(~N%d6W72$e~Z&5}5Eh3RV__z|&5y
zp=%0ly2BykPz+>WyvF+N2*nl7Gcn#)4NUspu$$M8qv87$aM><}gVz(VuV*i`x+!>l
zgCk7JEThiLkzjm35EOlKOGBq(a1xMz*wfXb^tdUUlj@;cKsC$`P6peM3QWH2zzw>l
zaY6eA`HZ(ius0f*L*E7l=WS7=E*Fb4+)?!%`Mcjw1C20yhWB@Y^>=T4H<B2%d^ocw
zu1H5uGdqwKjBC#>Kux_+uo2|4>{W~4uYdH>?Ohh!RnCUa1uo><yCeKQXftZL$6)EL
z=S+sP`Q7miSlKt2y#8N>UO{KUepDDse!mB#f1QQdg0WbtABm-x+W2f|4{U$(fDP*Q
z#<V^c5Vy%d*<&qfKd~TstOe2(UzFb7giW_|(Pm}?3iepT;knx}<Xs3V|5w3O8@0Ki
z;vJt$d+NtaZ7JV*oY}Qj%kNHX0I7=`cj+IBK6|a9j%Q%9jwbkuX<t-ylEt2t;_B^=
zxK=NM@;mE<vDGp#3;LG_yPk($-31^OB{6vSnr|q{MW@a^pc8nTvLE4c)#bz*YrVn`
z-$(+Vp#spc-49~JUC^m~lDN35!FR=EnE6AB0s}Euv%LhTzhvR+`L(D#B!jmH{$jnt
zV5ZT~!Q3Wa1IgaAtlvS>(~m{6mMk6OO!>l*&)3oF+g(0iifCXy2=^?L;L>$D7_XQM
zAzm$fenb#DTUwzYaj@L?7lX;Cc4P1F*SYxPlPX)MVvPKYd;^j@=A!E4D&M#BnOJuM
zDjFn0<K5*TXw;Iskv`NDVohGJugnN)c<{$k7*a3^1)@x$&-QOTxTPA_dhS8v_^Z%9
z_Ya|uLk0FZtb>+q{fIsLipl7iSdi|C;>Ryp!PxC!(Gdl?9%*2&`#WnqLud7=i<#$X
z7id+v%f{;yFQBQHU;Y%0S;beN^TatQ(3lQ}qm#&oy<gs^fz)qynz!ta#$~(aW7>Cr
zP+B$`?MVyiYu1FwsnN8TF@}r8N9k)Lf5yc6sv$Ze+^-RZ@i76cW@8@RZ!J*s9!_k>
zP~oe*OPE*ThhEXEV0%9^e75ZZ*qEfDZNMNDg|xC=O&745c2@1ec6qji5UZZnqY=7-
ztk;Z3&-o#2?s>rVx~f@a?^w{->&pD)b?A7l81SC~(3F-A_~0qmP4vMD#TPMfPZhLf
z)H2b#_42TUSWtutp{M>0?@g4Tv+N`<$>QK(B8Csgt^<(gwB~jY+J;|4N!CZ!Lw*l|
z=R5Q63FMJH<X$B>Fsn+o&k<E#T>??MGm745LP*L6&=duuNtP~p+SA_oN-fV8y`rqb
zgeu8^VEMer38)%-f~zeaOIb?dNiAp+`tD8wy{frzNo_Mmv<##SXF2zbRKd1`_3)J5
zU){P>xbY|AiUuiB=T9ogRt6CXFAjZNh%9rzoH~_*S=(|I>Nz!l*CR_2?#FVU1tlQ<
z?knt;u7Rc((aiqjaOhFG#%*e%d7I&HV6;TSy4zEEm(g9;Y%&Qna*6G9;TBV#y9kDC
z-->DTB&^cvGs`zO!_v*fMi6f0YWgp@#)>8`y^zlZ8q?_K*(bj>J`Y5OdU*W(9^y5p
zvRn3g=u78)nf*xWKHHAY4g>k`UuUDkz<uCXpU1j;JB7DO9nn>mi&OszLDLzB(c;83
z>h&#U9dnj|0o#gh!_C3?DX})93oRAiNtUI5+F*-0&GzZaJV(6}L)bjfXov@$4WmKD
z%M1lSA6V9lT|rUMY^im!2tL;OqsRYqNcv6?@>YhPD^sBR#Zkag8|ay32VL8%*<qFl
zr@Jqrzq%*=+k;SY;gGN<IFe??0A@6B9SeJ34)vr%Y;8#Z>+)-;jgxW9;6QBeSqmE9
zZp-zeiG}n*7j_?Qz_od#fs`1aDAWYa{>_AvtbAPFFAihXGhm`s9`$2A;)5fvLguZJ
zcs4W>^X8D3ph=9c(kM?#IU`}wBHVsOgo(QoK=v*SMK^U}Ut&D=`%3)51!pm<CkMTJ
zrow&w0<1rjiWbXHLE7ruOqN^E3ts)sT7pvfAzxGC1)Rsj<wewevVwnZO2e)r`ypTL
zGPbJugX*Cz(5U-{bq%zIr$+lQUynTT-=1>^72+f>w~?#nOv4#p%4okc38FiVSx34m
z+If$F2M=X<<f9x58b-lf>O4~}T}rcFBJ^w)^X)-t*t7W_H*OL$hnHWOt&J)g?Vrg6
z3cD)fgZbd;O1<Sd<d^a9kKL!$LGm*fOONWpub@Jx(aXiem9;omY==S#UEkgH_|Tm8
z?z8uUi%Tr)yzR^jh7Td8jsy<u4@Wn3;%`)(LET9)pd%nY)0<gfBA~7fmry_#4o%fH
zOcMG?xT44qkJi#Y_F)W{2JMBmHI%X4{F#Z|cM4_JvGV92dZgW^q2250+@WhUcPvWA
zGXoc+MVlvYu25pl7TcKZAN0*$aFv_ct3YzdbvERaIeOSS;~VPaWWSHa&x;45S+@jR
z7JuZ=Kg~v;DF;COU%6$5QVi;SzCs-Hjj-c(Ia-gsj@`v_nB_ehwUScFPo4@!nv=n^
zJ&Jjl6yl_*#I~K8$>271s}D`XWz|7wuy8P6UlfCtr!8TXViPvb@dsTGDW*j|WZ%?-
zNc+))w5T&+*1DK7$^Xb_YEH(qm2u#<zms>pQvH9pfHmzq%VA|C>LsUQ*#;}z-EkGq
zPR~T+L21ysd>9UXa~X2$Gtubdb*41|F}}$VpOwa<VCHj+!^G4{*4)fzuLvf+s}_|y
zN8#)hfpEfKh&oy+;4r+Bj|@GI&NfjvE6<U*#unUD=`!zq8_cX7FG8jE8W7a|5aOpZ
zaAu_mme(Qq8aT^}KMyhQv$5GLM!0AZbzVHL1qz4YJi0axT1UZR@_h7Nlg-}FxdH&0
z#6Sq=)qmBa#JLbAtk*`DoPW8pUlJB1w==66G36DZs`tl0ZkSn4+0+e8Q?G)!CJL}o
zt7MK#qd<3A2Kgt787-gqq;XlWs!<bf=8i?_uz3)_cM4cpkB5fbe)zA~UhJO4AoSfR
z>^l8`8P2F?&3|UH&LOL@lJ*jQI^q26kBOL@myY2RXX2_WG2~fTg9mJDFxW94r`lz}
z@>pWkw*Ihm*yF`Iq}1!)^B40v8VQwKcM#)2AKYGN^Ym+HP-I?2nMYzy%dWBp-BVx^
zQi!d!YG|vJ2%c%)&^GNj7~LHQ{`L~o&?nva>Sp5Q$FmSa(q@l!vw*aNXnW%#7kCy_
z+THyo6z++_wL=yWTWSk4-*5trhlX%HUjg{lo(IKxYbZ~?42J)1gtlG2Ak*H;rEk`P
z(b6SgMf=gFgU`8fZy`3_ea+QUC&G%c!?E#m3}~EQ$~uqq$Jx>{cw;U{@tXik>G;XK
z{pjy<jgpz*_CXU2dQ9cfSAGMnX_+wXmNEM79?zPW^~g&X6=Cu>;x#DcSvsH0k&7hZ
z=)Z=#5Pwv|;$^<nSrm`WZ%%Mk<<(5KxENX|eByULEypK?7cjm13~HE$La@dq&@`n^
zhJ)9I5v&BBU%L)dAIae7x;?bpc_eS%RDi2SR)d$v4f!kI6zJ5Y-06|g5WQm^Wb8an
z%)RBbxM*YYX-R0I){hD1r$X>1^4mS+C@}oa?<FyG*}4tAYIlQe<UVj176i$Qoj~t^
zBStU!F7z{4M1G_}!jB8gaOG|VT20F5C%mU(yM-5chMeZ5Pp^XK%X6&c$OsI5PdooF
z`4AL!0ehBJqHgy_TphX$yF2Hx)|$0k=a4O*_oogfFDv2Ojzwcbd@7DFrGCK;X$bDm
zm~7wgLW%l0teQ}bV<ZJQYyL3QJ~<52y6M|-*#)n;P{(P)Be{Kygoz9as-i#qlnZ|U
zz$EY7dGcC2>VS>n2Fb}nmmzsvXXy|ObgbkyN?Ob-z=&6r9z=s3XYtjvI`I0Y&*XoJ
zvD~l*Jk{b5{z``h#Yq_bE?MqYPaKAX5KJ3A4pp+kz+}b|z`*|46j6_VtX+-EO(anH
zhFIN{{rj~)5@vjG#Kkk>vAs{3^(^;BLH0rvH>crBVrQ+}9!1~Uo2;1+B0YdQUxx1i
z>!Cr!=2bxd1PMmVhGJ4g3R<k54GuqwSt;aW(~PO$rQ^$6-{x>pQnjVdpAT4*&o+p0
zK7w{jRMB(!6Xw>>fwx`T4$qs<!z2G{Xt#-gA*zdLUqt)RxB_9*#40qieZ~w1oRpg!
zC4S&tA2{Tsf$er1<R+OjA;K&jTUTpA$UY7Bz>Yj)i_3&=4c2JuHV$o!rh&xMj0vl+
zgE-tpF4B3+%IdAKeZf!Gt(z`y-%Xi`jbbeO6pZc9Bl#h-HCS1y#_Ya~hw^==(Qj}q
zUpsdRF+y_C$lH+}9Fs};fZy<N7i9o;=t1rU1w8)K0yjHUVsG&ao=>x8<M6HE^&wcU
znO*~~>iTGA774zy5WXF%hBkxM5aquSMG+coMM)(ZwP^G9<5d{HR0IuYs6Xbvz0kKN
znyXp<$@SZb`SCpyTVq4$8aYFok~Zl|P26~_3&bZj@B-N)NM3#l)1Hn2ud*)r<as9O
zl5T}P?bWpRz9rw6PVd><#`1Q1DeJZDW*aFx-t$DvwZ|2~Gjr-OS!Kk`PFLZhi{z8F
zu@`1XpM;R*reIew6~3*eZ+~wV{Af!8-(OwKIL;LgjLJdj=30=_j;z%!3fAhXp<B@t
zHu6doY52*!w<?5q%ceX(g7O<1Qkh0E>9Glin0`_n2q$hto$R?F{ZzpJvRR0U$|F$m
zAw?Mf`X^TztBk>B)Q`M1oEXF-&>?&qB*%C$C;h!(yD){DYsX;z_DyK*tcreXR>Q@k
z<>+uE7Bt43mWTWo&-Q%?g=yzTpvK};@~7E5@W;p1X#1S9gCBB0@@4_Cu&(2?UG@0f
zTLzV97I8(97jAIPgcbRevp<<j+`rYV?AKMSd_$V`r}Lm88$|t;%Po!N4siP!_1@ev
zmuub2W@?8*h$}<+ptgK`c<=)Di_;=j*F6SfuJKm?KGv#2oUc3N8CBl|inBB1c25t&
zj!=8VjdHG#48qH^E}_wP3npEZ2B!pp;I)nNB7ej}=d>$4YDqj649Ud{F7qfuIu>xF
zKkOTE5%t~&;a|B@)RB;%bUpcz>=2A~&O&xD<w_#*SiPwjy2}_>mn&e`K?k%c$>ZZw
z#-h?QJM8G!!VO;k%Uk-%K_bf&il3T7;NNoW8mENr4m2y7S5jv|5)*$P#H{+(!qg-&
zRBiIW$EDLSuX8Qh{S^%kc9fm}p$+BlIBuF)jE27#!Mdr2Xnm>>Z8k6A&K84Nw0b@7
zn|p<sr49$_<5F~OCdT8g8az}z14EWhhZA3XaNpli;Oz2QZnysvla8x`S4T5IBVh{Y
zj5UQnP#3>#iH6v@1yDvaoJ=(k>Rg=A&1Dn{diPoul(>N5XE%^ai?MC*PIM`)#fEnk
zFg`dE?(eSvae{?qMAieof#$ZB{GDKMb&ODTVl1~9_mQ<2?g5ieY0S+vk>5K$hnS@d
zL<5Y#rp6t+N9wThCFjvDIv*3d7NcOr1-aY!bKGLk5bA*<4`Ssxs9gAlDens7r|!$Z
zVzv=Fl#K$1J5r2@ngjhb7Liua$NUZ*1C1i;2soz1pU+7of0r#QE$iVQ=gBdf5!-3e
z0A@ef1p;{ywrhDqVgE=}BOhSig|!&JbrrE#q-YYJCX{)k;c~+yRC%UK{>5nFB&{e+
z>xl#y@s+3i{2xbW9v1Wa{oz4KLI@!WAtXttd5(;PBpHNM5<(I}l6@qVREkv6GOa2t
zqjj3+$Y>Kn2=NIagb+d!e&_r9({;IMnfLOX^LpKP65M+jh-|6}wm-38ryo^<z^#nQ
zPY(i{=q|YacQh6(l`_5GXQAL^0SjyqFwIk!@aGLp^fJH0TKC2C!RPmpgDjP;d|Cl{
z(^EjDCYffvQM@*VoCR?caMy}6(6+lTv5Cvk=j|l+Y(*H9cFo5FB{MK4mwFNDT|oS?
z5MHU<p>j;4nR(|K2)(hAo)@L4y=e}drtDRIjt23n;}mmLt*~ApWw8#6Vdk_TJSnlj
z8G{NTtJxVXC0k+ihC~0)fL2Y~0B<tL>l_)0tzu)ozAxfX%gyK|_oV#55+*62&&|jH
z%E0-e<ni*7_FLrK++&S)R%dbUXD2jYM~-t_DX4F+X8R8YqW6Lr(D?98*#4&nWhctH
z>F7#u+5A-4Y(~teb^c%(Kt6!8QV=Xy3zE$DCCS^?W9#egkoDOPWrrUsQtOgY{$vXK
z&_@#m>kWl3!}B1C_B{>m7UYYW3aXN1uDv^o%{w^<&x_`wq&Zpfu6-TKe%xS>a|dGW
zsZ^nK_a`Q)`>HS%IJ099He!}pFZ{9LELf@bWztEFikJW7Fnp1a-en^2jmuH6(v_g?
z5P!I5b^<gW*DGL87O(knj~h%qL49mr@N62xDm-hMeAFAJ$e?~ib-X+v=rTr*C#vLf
z>I-im--U*Zt9SE4?I)!?aPuyxzHSS9h9+R`z!qVIQwjMGl;|g44H40DhOOz$wQLwD
z_qFo%e{<k|S|)ltOyd4;-Ep3y2-T^_XHJE%R_QsR$7|}K<y?YVkqub9p*`-xzP#|p
zadaD#i*DtMup-<5ei)sCug%nNa|*}yZA(B)V?5U#c}5}6$RqZc6g;G3Ak#GmO)Df|
zdG8bx*!h+`(Fp}b8NIVPFf)~vMc~=81C+z&0z@j={x~V_`KyIq-l2R+Rvy;+`Uy|6
zow)OTDSFKy9;9xA!YbrDOaHipn9pj$(fx=&YoZEri)Y;FJ~_ljj{_rJF6_wMg8_Pr
zu-@VlYm98>6~u>C8FhwvCe#XftSyR?{_+bq6VdsN9tMBW0@LtAtoctVbY9#Ka`(yL
z@>ox3-19uTthMGfjDFU(QpM<Q#LN4TS#s^699@)@qqri$;3FS+?dWJWdOru1L6*>Q
zI2NS&ezXJL0{3b5T691T=Ho)a^i~;pONv14r7t}P-f-8Q)R*q|OX#R$NZiH?7=GeB
zZhupZZTqH!MPnTFGpWQ^vNfoG`wjOkEyMfog}8ET3>ZZibKSt%3U}3PC?$rSjq@ue
z_3;GJ<4w%v4)sFz>u{y)idopi-&{F-E_hbf2-96u!PIsSN>=I!W338!b6fyRKO4s-
zes_dT-|5*|k<RPCD`1P2DX!SI1CwZGSwlRYFFxe_Rjpuix6DBIua$6^o;~f}&zW(+
z4y^3H3D?u!)^0}vIF8Q2xx~c2*Lx4bt$Nl;o_U))cX(t(CB#oTg<)Qr(70S7eC;$C
zQ=<2yvYWF)WBhrAYS|F@wk;Qp9+I2kD$SKveBjbE)aNK#%e;b8Fyegz#7`{8#*|_3
z^YR+JQWJ%86LQe+TLP^gd%^fUZaAQlI^LEa6rPfa5XsYUfII`jHtYbs8hbc*CK8`z
zQ9ep_AV|_x>FoA^7c~#Z=OXGb$0uS^H_8PsmMYR0AQ&H<hAvgVgcb(0x7{2K8l8KX
zY-hUAE0<%vTRC&<NPvuq<8hi9-7_@SbKNmdgkkp$AWh8_)19xPb~1JEKU_4s>Ngdw
z9?;C|ml)n!4@S9bAye0?W$}BmD6=bp0j|fP-H({1g?g~K-4T^shx@`QxM4^=8S6f%
zyK63UetHfL#_D47Z)X&Zwc}z<B_C=fLRa@?(E4&A)3vcy{EHwj<KF*q+B=zzeJazw
zR{(#_6HtElFkN>;;i_gSM(1mAkJ}Z@Gu{vDd}#+QK7p=NsSEX^g9{2S2#5Y2h+5r`
zK>E_{JZw>xP(PPkgR57Av)?7kpI4$s#X<BtK-WVz85*3U_k(dFEa>}ybssPsP4*6h
zVcUYx{&F`6dzH-6yDOn0<P2Bx7=_<H%9Rgt!+OL0#HOqO!@u^J_J+=R2hxxTSJ;x7
z1!A*u;kW_0pu8hg*i^(p{y>_8ZCy(Dflb1P7Bp{su#1PN_~QF@VNks-6<s_p2+@Bx
zG=-dnC6yT%>of^lO&7rK!Zc!xdcsY=B5d#;3~eP!w%l|Z){6ZQ-+t#7E9e>WWhS(o
zGAEY*elYM;;=$W<F;u4u%5>tm{T2mQUx;RDaVB)F-{ZY6A4Xhsj`>uN<4Xr0!o$5d
zyd1IvNA?K8YFhv~<@tm2ENJfB6(e-kfZp|P#O2%v2AkBu^T0`Po7~FVZm3Wu^(vUE
zP2@H|X2Kg{tlbxk#M)c?!D_u1EA^g&(oF+F-1?cf8)~DrJck>8L-6`C6?$a`qhQV!
z$P7<_#`$j*#(z%0!d13t8v2t<UwbS3e(Zw479WsTOPR!pKBsQCnbdd_Xg<&51D&df
z-Jb}0ZP(dpI=kj%N5VmIEO+=80*#9o!;EdzHyH7Qwaq>Qf;U0(4!;D@xEctmRtM<C
z2w^>SVn+J=<BcU3q4A12*WL1oc@vMJ>Od@-I<8`pf#bMm*%{Q92C-!?&S79@HH-D$
z2NIQ~X2aSRq03G`URUY^)21Y&Mtc-!)xRa~T9Bgovnn&V?g#!Zm%wz#KyXp_h3vjG
zlL_BW?6eE4y8Ht5n26P}Y(9wG=|1ovP+@h-mN(B-Qb*sD1=eiC#WbIjIqrteH^J<`
zi~w}e`Hu;vUMO)t=8Qc~N7LWC&s`2)Q^?k>grWN3s4IJ{5Z|2-dcT|@^^qT%Upft{
zbIHHD`zEh-9Rvr>?V;|p5>jHw`}yGzAKOB#q^2-#V>z36>ZQbLUJoa79I)zX4&-b(
z2caWqJ~`|v8ofFPc9WIN<FO8?4cmt)X|`Z_L(W{)$YGxmhBZA+!K7v<u?l<@PSdZj
zp7SFxr9Km?)duov*KB6&q>bm+UcrOAyg@&?E5`280&)KkY?}I%Ti&?IJ$=JKlSX{9
z734Pi=81<__=3ZIdbhQXf<rQQ)E_toVjpEf{HZX?#}}b--zcbmBj)0l&xLx+FT<oF
zTg;hD{FQlY(4b>Kc{`norLSKpJVtIr!CI4&L6gI9#MFFT*B*hdR6|kn-HPe%Qsc^|
z*GxBZ21@@PBHy$XxDj90c|#mXE@*L4!!0hpd6k8X*@{7RrI2+s0lSr7K!M>?zN7sF
zW`FiXt)ZXz;fa?~D`zt_ha2;Tz5dK*bsEdn9fX0q%VF)l`Pg*16s^K8@sfF@n;M^h
zZHErS8iTD^{p&ZgU3`VIpE_JJ%-BrwSIAs8-sk%#MSzQ43#&L4L2lCx=sDw?BDmEN
zerH}p>6;C_^_n4Tb4mo4Zr2svqW$o=%oGhyR57P9$q-1rMx)_Dysn!VO%E>S1<wb<
zjXwEUoz?@KdK`w>F594HcraT&LkHW}Ch-?3l%>yk&1+H?gT>H702LFg?-L0x-<x86
zVFJMA)99jG%R)~^pxu-pF8S9DoC=r0CYtFx{5b~7E-k{^Dg#~-n2v|^<1uhR1}dNL
zQ8XSfVb;61<C$|Q_-R8dw*L-*U6$dLh0!hP5#R?CR#aj~XduprOa)KPF_3(J5h_i`
zKznNe%0^KiW_N+mWOsKcG`fJ6?=z|IV$QS<PUNEAg>bKv=0X=@S@gMXd~Y_L*~BB5
z<dq4FuuFx&t4E>z-W5J@wFfR6dK!&lO}W}rC7A5X;-OK4&}f`7y(>|lq3>7T_;nY#
z0jikvt^*ic@#k7rqge1d+D*Pc#HGVOGD-9d#f(eNw2xd+B-ZVP7A3?$O8Liv^@z_v
zc?RdVIUv@!${Gic5srLC8SBkg!KCsQKYmw<PpY#aP_++rcaQV(ku*=&GY9>})=cZt
z5J=&~58f_hqTjngn|9}-^Db~+>4bK|-XNfi!ASFH>Y!0>N?^@g!dz$<byhfJc>oHY
zE`}PN-sC793Z=Ob@I<>1qD~PF%5x^jl{Q@IU4=&{55bFQhU<N0VB~m>X;<cg9bDpq
zzCOGup1jVO0ZtSCa1HxwLXGwnY~>^gehmr%tvO0qz>4v<VG(+FaYg&a4ZP^c6>zEj
z!c3D#q0_P;_H<7n*4@ft%?WWR8%f#8r;+T)!izY2R04X<DrERokJwK-s9Z8s7+XpC
z(rPaziYo%Q!Z6nC62Pni53q>tyFjpGD@!uD3OONruwJ$dYT9~H2BQeIJOA){@{5{G
zbb*g=GH^z*3^aEUZ_Z>8n>}JS=1o6}ZWBv+RuXYSh}GIWD1|v2RnZ>k9oyPt1EwF*
zU`F3I@X?!3!tbxSX!7Vhv+7Da>V7@3l}`tWqeKW*C)m6BsaU$KFQ)LfT&X7lqsKG&
zB;te(+kO(fYP#W;7rvM$E}`pU6>IbI28Ukcb1k04($rkBY57&O8Pmu_i)COxaUgf{
zq|92CBh*H^f|2HB+Lw*z+9Ulzf7l@K#IfXw%7M`_K@c{zKzMjVISRg?P}~cgf>vJL
zz<zN*aGpeGygAECS_d6N!AcEc-rwiJGbHR`Za6e9ysD6Y5P`mU6Z7H5tO0e&yQcvn
ztq!i!Gy<ESUFIXgE@Jz>2`uj<2dh;n_}*2C%71fE^~)#b8Z`!9=vLsE7njlNsTuGU
ztI)FWELO)K2KAM+`)~fISaEhAan7F$14l|x78#`|*y9CKO#s1Vmy(7xk?`lT5><us
z!ShzFuy*@la0t}K2>)E(7MYL63Bi!Ap$0z7RCx4M$}8QXzE+l$i|_$okuiXtRjcV)
zrR09@fHrXnd=c>jJ9lW|rNVJ|S4V+vI<MG~B)S)y<|$gQo}z1&&RVIZENs|!He`De
z#y-me!JSIB_HP1?RSTiLz<Ds0QEtEIR<K%HinTR_*M1xXYU}2szg{<dXD`MXGsvk}
zRnDuY2C;clD^T;zarBD&$gM7{1ob19AW-cP+PyCXw=O49Q~eSq-&%-v8_q**n-Mco
zrl9y&3N#Ho#ao?zGBIYO%k?e*%ZOcfat%a(Zesc?3eoyv2sAys#?{re!EfO*=o3bB
zto9(j>1hOf7&H}YZC0@42c>8^b|JPLGJ~akW~1DDC(m#ULn*nd6t*-w?x5V|Z81F$
zhk?q>Uc_9O2nBOPV3Qj$E!-J%s1AjK%{5$Wb~IQ@&Y<3s6Zq%sB=T<UW(E2*L!U-D
zqJaJws~QhGb^v<5{VU9}SPmcEFQVrfJ1CtS3esTWaUE&rdXa9Fw<uRcpFGc1{B}Zt
z-$U*<>Kq>I91O{-8K_bzMknJU_EoJPn!H&I>S05`pm8jyt|2c=;tJ|Zp6Age`)H3S
z#=owoK>vLfYdi3SX&J5OzWt@>lynq>-9(U{JO+HOw6cSr1NqHPO+0Lu4F5e!M3pC3
zh?l()3u2N$EK=u_iUYB&K9dLSyo{km`Ouljp?>^kn0}@NrGjCgOWY05C%1W3w<2hN
zlF9z(Im&ub!$sT4$F6XO-0Vbboiu=D&LJju%t6*VYcp?+@dDR~WN6e%fmLZDn$<L!
zZK^(x5*sVt<{rd6BYN}Luu$;JH3t1z#_*}H0>4ks$BN2i(6dtoHCKO(6(@mbRk5&f
zVIO9na||PP(T>?M73-EIGL=35k%4z1m+$HghrGoo?Q6)B(-nB&$6#_knkY=a)^ox2
zOfKskrSSasg?W8l44+CX!M^n{9Nu^a+rm#l#cH~?bgZ!bt2NWEPsU+c({N*BB?hhV
z1E2jG;PN04N0$bHlRo7h;xB^LtP|j7_m(x3CZLTM<!vZO)T2urx|rw)m4}mPk9Lb&
z+_S=A!4cqh$B*x5UW1G5=rg}|7!2r_1>fB(u%PWN&)T*Xaa$&6(k|2J+(X{j?#I+G
zB1q5nR!s1%K<lx@KOD3QJT3(BrvD~E-#O$gxz(v?vnQAL@!5)jr8>BM?-I=NP{J<<
zD|8H6hn>5XOln`JXboG*4w?C(eWnYPZoGmjzrzssJ7d@7<Sh)oPj31=#qioC_&s_6
z%AzxcZ?~l59N%<2u+|%c$8<yGB`4mxy+|RsKAqXSr{UEf8`10V6JF_5fD!GF*s!5_
zpxo3X3_eL!{ZaW)(<B75fYZcm+X=^9$D%HEaR-<SaEWIou6eu_AO0X_p5l?hXr~i+
z6URPty%zdTsRZ7UgY!+xX->KkPLyv#*_={Er*c17k2nPpb_%Ge?*p}C#1OHJa<L!t
zO<lE@gP`9nQ$Mwr%=1;ELL6{hG4*N{s+~QLVTCoU`E5LX?c>-n>P`PW=YSKpmteHl
z63(}r!&{C=(Dj}GFt`d1_NQ*TdYr<fypY?^Hw4|!jHxfU#}Cgt1`PvefwreH?Hbb1
z@H73)-MTQ{VjFIswSl$P^kMEyf%WHZa=`*S-Wt#(OmDF!_6Ju8>cUOO^`v`a4>w`p
z5=-9b9*^x;Jz!z51!a<@+~OHM`^EFIJuwMAyZ&a6-X6oghk_yUd;yr2ZR3Xp=`j1T
zA+8)J#Q<3#F0#qM$G42BE9eIHmlra-i{!ig-!pCY25$deiYk_uVW;gZv<`{Fi&|r`
zZE6ZPjepA2Pn=|<N5{fw$7Hm9FT$6>S=c9lINu|p@bBdyj2)f>zUG&}#%ntsz7>rz
ziS%CW+@X+pC-WNRS#GTw4V|S0yoY86Sm6=SUo;Q@bl1bb(<4!%e+R$f<BA_1R$=47
zMNHRdI5UwfhD`_J!O*imF$F!q$fOrv)M-l|`fmKC`4w=FAf{4K02JzGp={zmp;6o=
zh3A=MRB!#t+qSL&-5$;C%RycAVnZR~SPoYPFXh@{MXaeNj;;N<9G%)K`KZE?c%>XM
zIC%@i(zW=%x!=@4If|m&nV+DNFWp2l+ZO{t<TVC#HNOi7iz9H3^<{*?ei(5mmEYFb
zf>GDRSg_(eObT|Pcc3l86E1R;EC_<_$>%`b>M~2-Cc$R|vmkiRb!Oq7jn*w;VCx?M
z^3=Edb8rz##>>rml!rje^ad6&=N?@PLl9pO_s07M!}LMq9*&1M!7C6G^Z;##!Kyc&
z#FROQo3nP2|MnU4+8@B?n-!oOz1fK6GHhS71|+@bF(>O~VCJ_3Bh$`<bjS{H>#>!6
zm`?d0^MP#TOc`u%rEJhXH*_1JhhndIVOF9u%=f3v#1QIM`LARjd#%Hkz%Af=BANKC
z`KWbnG^<zL19|V2P}|&<8!x80h@l^ES$~JA?(xOu<P;8`?f~Ui@U2~6l-#T?5g5oo
zd4+P|suKQ&dUI>$N1<JPPk8ws%|?z;rseS!MY$jdeVnC`H9-mcjtH@-c^nAM1UNXu
z4;r72;)CaG!6l#aP$YcDn_ks2Q#ImGjW>aeU4ziGMJhDCvz>CgnVdfvh;_*#&@v9g
zAAgnLv-}VLH#HLlZpAF8T?SEo51_!_!nAt4DX;C`mp3n^vzbo>Y&dWk`_s9t{%Ins
zSVb<zcXt#siEk6T_X0-Kyh>3WgaQ6R=zYQuZoiv{^~z<i{&hEWd%T9c){f}i{|HvM
z1hT-9J>mD7eVBA(3%Xq#&g*VjlH)o~*mnkbnQtrt_emF^tlw!Ui8_PnVZ^Ap6b&y=
z8WLYHn|4+S5ct~%7Jbx43pO6h1aWZiULX(4UJt99E@0-AW7vFIlPmwe<kcTvQ@*2=
zdlT=+!}cnS^CjlF$5*a<sRWd(M2gZSxp-vRE|iD%M56~IA>PXmTgIt@(a&r}(hJHQ
z+iU^xDn};XJQ6f+lAjTK@ivuy;OUgWI<o!Yw*5sYeRvt4w1>iwf>rqZ{w`coSBdG1
zsMqq`hg(~3K>6`yAU9q_eC|*%N*_&guJfRkcZj8TtpNU~3^hv&L1T&x+Y<JJ=Ft1h
zy)gi!YX<Nk9eeQ7@g$tzuL^4aj)QTPbLi|YGqs#RXRT9qLP5N-S!^G2E$iFyqrENh
zB`EOb?|gKcF%q;kOoXi;XQ5{IeqgWS%mnNDf#3N+Of#l=BKf_W^mDm@^&w{M8Tj!c
z8&2JFMt#-G%-E|8W$vkp{q(!bIh2R%^e3TLx99BAmJw*xs{$I9EhF!uD=26A3B`3&
zxJHzW{HEUUzYc}<=`0*}BnZ8%E8w;$1XLS5F}>c3%O7iVnf(?Pd8U&1OJ(TNwnm|M
z*&p511~Y534C1IyhHE>>H`EYBy_tBZA7;rUy~B8uGz&~e#4zQOl_+@lzwTo=kG;si
zC}A}GU6%`DSq=038AH1+J<6|aLeuHHK{ezp>$E%x`Wd#&>A8}(b)-P;e^2=g9oms5
zO$58{lW<#VJi2{Vus}y4t7V5Vy2FO+_SFQFVj+n41u24u<$-SKVPV(w2+&<ed5E3|
zLEmr+^Xff_{Bj$>b#)ClQ8cp7o0Ztus;V&A;e&>AIMnh?=xi~Cm*09|Yt0MBe$K=7
z_af2szBl|gwixXO`@s8@6m$xF&9%hen9}j7ux!;)+JT=&@pxY@t~5lIrTO%=m<X4R
z+l1{*2kW=B@Wwk!`TPgOd=d;OX&>Va^?h>iQ{_(7@EyZl%>uw~?nIP+))9)nn=z5o
zNB*$SWoX*b2L{w`Mioc~1M|yZv@w+FCL9zdUO9%+=1ZWelM1F4<9SLDbp$%;oYkbF
z5MA#S*1uB+L1s6zj@5H8%OVKnrb}7FyKF3YE<veoIDh6$J3`vA$z^TKuVXPP)hf(}
zf25AYy2ng>>?U^|oW(q5?tr|@Q?U1~0_?Wq9C~=r9?Q2s0x^;$UrfN|fIA<S5|6R(
zC~uD4!6?9!HH(g++;%-H*nN&!YPf-k54lfDfgP2{qk_)Z`a@QNJnSV`+MhR*?h0p;
z_gz3#p)1T%jfB|pfuM{_6nc*AU<8VU;E5^F>g@=vw<j`{IT!KcO6px)p<dm;O>jG&
z_T}@-dEFA?5_L_(cG$};@2>%))!NLf{X96iJ>Zhq0G@(bX!To*+^R2yH?Cg63nLbw
zdg?Z)-l`26$Ey@=*4+U|UPeok|G2*1Nv6Bx0z|(^#)-s@)aYHsJa%8=O6-H1cAW;R
ztrf&X@!`5$#A3ta$OU`|^*;AN<^Grw&w-_=za@%ys?33)J%!L}T+V%}447T^b*A|!
z96C2m1IbVyMGW2Z1kS;xp*<+SXPt&d!5^9ClN2s#b0sI+Y+(VNf7c}@lIz2TwQZz1
z@|XnF+O!+oGK0V&dlmLro`>>n{=|9=Wwkz6SWv7Zbu9x>>$g2K>Q8;<cIrnC9tXM?
zhAIpW7ICTjOQD9_0_L*rJGUD3jW_NQ@nQZI5TQK??dMs7)k|ZZW_TWyI}G4Vm=n7E
z@nO;L|6>DC6_4vp!t}u&e8#v^+EoNWTMs{a&kO{^hgN7&z5rX_pTO4EYs_oWI#er*
zgLZnJF_$A~6FL&ybvT$P4dKoN3p6=$iuh?2xMK1FtO+UsZGkn|4?77hX&SJEoEbrn
zA~5x90e-c}fP$CaV0ZL2zi})Ter~2*ufLMN3GI%5d`_eC`ggOUh5#tLq=b-jJ~*a&
z8A5F~tG3sKT~Wuu@BV0L-090FWE<d<nR#fiKo9NX=3|)g01$eFg6*PW>V=6w6cB0_
z{JNA4*h<%O0_A#yTe#qw2Y1;qm?t><p}xZ*t`$#Nk@vlsa{oZ6kgfu>aE6MvB9P=N
z!7^$+RK%WuJdZ#;m|=!@zZzhm?g6kFF&+%d&w~+h3Y$KMVz71qs2phLF2)(ugU!VB
zJNH?`9vSHT^Fq1jBB+^qmU+>$*GT6US2gry%0vxC`nw=V&kDs&mU8$UQiQr$KZI{B
z^)Q9_zJqI{KoWRZk!M4>9NlE7zGn<t=Ztv!n{hDh(p+4oI|--ChN58fkP_4SZm6Lb
zz;ty2_y=N1=r8S!ottRSKcqJ{9X-zG@8;lA+yx|)f)uwtim>8j1q@z9uF7``IC_$H
zXsfoO)k7<mJ~$b?ypFTRAC<z0a_Y6sKrm%WY<7OYuQ)lQMuH!lbI-uy$$@C(oGVOs
zTf)Rk<)G=f4ATFtXEUcA#7*wu5LVC+jG9(b_b3t;Zn=blF`2ApZat5ncijVy<8j>N
zGpH7Q96}#Ze%~+yTov8GE-Rf6_??5>9I`R&j5q5H*g{?ONV7v}#0WdFK`45-m_PkH
z0ShRDE|}5GY+NO<fx4c9(sMB7#R7N%8EDWD2s183W3+5NyKyiYhRyRut%TW3q(Rw$
zJyNEoV~5at7>uNOdEWzcZ+TP<nu(R5=X4l!R4(F{4VjpdRL8e#5?5$_0x^y>ShYBs
z*-xHG`=LFc)klM+uL{N6KE$Xu*TlvCW|&RS?+pi6V8ajM&x|hNR`>ic@xXa(8~%#_
zUQ2wKDV%mqZtM*CRVr?Cko2&_z$a>;zE8@#9CJjyyd{7p)CE0#OxXIUf;kvdR;+n2
z+h0-yx7P%K^8GepKeH569Xf&j?>$Ulm0zsCDu#t^CARI^Y0wsO5+i!dgY}Eg<IAQ@
z+I{*8JEvq~b%Yo^FC_>=$3%mEnhHqYdJ7x0<3TlF9V2fa#m1|qpttilSB&<B!=H|V
zxV#uTf+)ZFcc-Fa)?Z%Nah^qViNbW^7MW#{D`G|(LN*<;7Z-4tmBP!4BrtH+ZZrxo
z!>T*OQFrPDURO5?8jBtH*xAI3+WLtP*eC~FT*>QKUSZ;%t6B7<Da`*>1c-lb0Mj+5
zs1&V0rNn~^mV25tPv{Afb0>u^Q}!{3<}iFIq-Rk47;IYA4cn6_bF$?ocX_4;1AL-D
z5b?zflIeQUv<JIW%g|@~6{!Adj4p;#S+mI^Y`K~Vg}u(v`<yoT4Ex7bXFg!oBjcbT
z@id<h#!#TO8pNaffluiuuv!-aBGYSTErv_rVLLIdB1FV<3gmi=R)Ny^sW9bkE@XYV
z!2|!>1fpUKs1po__K5Dx{Wx`kCuO7dh%nx`SpxZo&*7Gd;i%euI>@_Fwz9nttoyA&
zQC79${Ui=6iXu@QL)qOUC(+YqG+RNe?JVa|7AwEQqucW7UGJ*G&CnNSZM%XSg$`I}
zumc*uxGD^t{lWc46?A%R1sW)!>fC5l7St$GI@`JO{yp9?Vh(m*If1LDq~P;K1E>q+
z3&*{Y*nt0pdR`Y`#ZfvRNI%U~dld<B?`NUggoAW;B|cx<RHj-Q&hC97hrx|lh;I4C
zO&1+Sk0BLM>S%}U#g3pS>k5J<ckW+&8suZ%vTElvt_97!#_S|kjMxTE4sq;#?nMye
zUKS8K0%v>;qj}6%{wUNGBYKsxLrKKcYu*fR`b<EHoY;BK!ibC52aR@WFqPLIxcxmN
zuy0fWm*1-(W@<Uaja!Q@6YdDLsmIllz7e$Q()hpLKKO7z3i=r9K<6y-ri>c}2?_Hs
zx`6h$x7wI6t2a(CPC~WD^JwjG8rsKJar?!iVAY{q{G_jeo~EhXC&Y+&DkFKu>O698
z1fkz?7c{y!mRmZE2G8B|S?xXn6#X&5z%`?JbMz@ZW-=Nr2iZW@xv{YFdlH0}RN;5~
zS?JZz4UBG|Mb%5uJZq#0D04(8rCD|JhG3|g?uCjGS0Qg{2$;I7<Dv(?Sbd~Fi=lJS
zJ>NKJT3rE~5{l8#{shd#X&Bps`l>&l^E1&)$&XruW+v0I;F}qi-6NiV&p{xm5klcg
zViM1B2SMauQ}u7*c<@0I*1hrPfz;0u(=}>6IvrX}IJ=%V9|xQrO1;cg$est|rzhvI
zQ+}Umq|N}Pp9edb9mdl4&x1>Ot59Q36?55<#%_50kFVL)Oun=L&5J@n9=DEN?p%(d
zUmv*s^+Wtk@6D+7IgH0%w}Hb2*>EjrCpuI+<LYB{_Ahy>@GNWKngjZQOTrk|_(mbr
zklBLG?y(T|VFzo@ErORHZPBR17X<#3<zDI5keZ&4s&Bp6ji7_rH!vSH=5t=~YcJ?d
zpQKPMI*ql*{J0IVgrxSZiuxnHL1M9%r7t_hqx+o3CI<i~nvaPp9&%I4;1~$+vW}c?
zcyB@(+9Ym-y4&3;4|q_RIaC{apaQC`ce7esF^J4_OX_y*hJ-(Rapjy0ENV!Bm<$>A
zT~YzEoO*J_Bw|s-1*mJEjW=|}xGR>j68F>ST`q~;dr9wwo2eLqv6Qtv2B;Ox`_?Hy
z^vztDZ$A(PP9Drjn8oS`ID(7EH+HdOIF1@a&)(egY{;}#D5nVb;$Ehh(}lcU+5=%p
zATbD{W^<RjM&t%Nz{L*4ZCRATQWCRK^S}@OJ5+?n{>;RKjVr<W7`Z!^i(#B;C<c~Z
z1l|5`DN7c@U1L6SheYa9dVJ!dvunARc^7OuN$-vOOj$$pLePAYgGQn#6r`&_*K7WG
zU`8y4js3yW>tgVWbOtuZGFBZm6JyeLpwZMQjJ;jLyk1O(`ae%t!NyANmVFY^KK8}*
zwZFJs(-YotBng#;tDrTb3hF%4dA`#LobzNazSAFr>Ecg(pJgfaRX%XL_;P4;z9&rm
zJ`o)(DFc(6g)uSvu*L$JPuX;GaSI{5&q<Vh8w4pfclq4=$5G(2NTHre*U=c75ABLI
z^U>4?bz%amwCqF^mkoGZMmy50M)2;AI}Uj@9O2hXR<=9`itI*Ui_i?B-+$%$I{xsi
zyC0Nh9*5c|BbhSio6vZoh&rvQEN}^s7uFoA{oJ|F`WUFwUyju-{-E{mDf24N2K~Rs
zS%H5ouR{wk{To3UM<qAy7L88hwV`9<QMeLDtceW=pnmvg=J%hBX^5W+Tk^F*r1FnF
zNVZ1%>pl6xc^>F>dmCjO-&48C7o`r9pgAWFO+wGJf0V5i1&S2atL8$Z<8?(oW}s!D
z5#IL5g)Nlv)7t-vJ?l>#rLJdj;>>ke@v{W2dZa<hgWJq*&jIM#CmiAu+_2}Am1s51
zkav2|hBf_d5aum_<^;;s-|2@#$c<QVFbgza^?_ICD{zhbB3$O~kDp{T^Qd-ZDl5a7
z*y%T4d-?!94?Zwa<xIBA&K&D5_J^9U_gL0q2~3}N29shHIPumFto`K)txbJF9xLOr
z*vGt}%?9e;8-V0{i&_0lFQ`8g&e|k0^gkqr`4^LDHhzhTQ^p`{4Fl`%R{>W~0>ROC
zv-i34(5)pAh8;K$r~h+8*BjKkHt*ny;tE*uZ!nr3v!wn>gj{psSmHUAz+7$0rS2NT
zWLa;yuKp#VeQs~ouCIfgXOAL$%)*7?Gcl!LBgjw5nO&>_1eO@{8J{m;%q0%`JzaTg
zdROM9aRI!fkJ-v=1sJ(>34SV0gX)JXn8Kn0?hl}@SyDMu^)4d6OFj3#Q~=R!UU;{<
zie}yYv3^r9SC)NMtSX&|Ws~#Ks6CWBvGr)MX$wSa61(H>KJ;6>6!dmAFu}lm3grZA
z<{5GrJyrCep6%teMROp*KM8-#@dp=^g>V~lp(e%+B5wiO@9@Qw&Aw=VfzBi&^N1-d
zVJ+`7sqa@pY|mXh`T^y1i|4?MN5{eH#8x_sjN($^ZC2@h3VnBuz?^`t*m{?8Z~y*N
z)C>zFRs}=71JkKf>cS%0<JcY(2NXF7m_#|MB-pVGyw*Fw3!aA_qod*WBicW$wgQ(R
z6&A5r4D(8h=+8g-6U|Y0_ZXcchdXgC%K=cYZopk)W^kLIQtAYkFuO_-bcC)!$T<Z@
z*^^mbCG~#p5<4tzDs!E7lKp)c0nIvlpz(Dq*A&0z>wjj#jE)qXI7ouspQfXmqbsPU
z-C;$~{4vqR8NaT+fH&4#Vc_IMCXe3EDxzMnz>1|1?X?5FN-RL*nmLR3m5a4eD%^Q9
zx#1_53SS5c(dn%T>Li+>QPwJAEnHSWDP<Yo#Gs3O9k`B=K*Vff8oL}L4$(M>&EExU
z`%S_T=PzQt^AeEBrb3J$8JqKoU#nZjI(L?`k-sb`lTeCo<EFw%4eG5m>p^B@5qdTh
z^VW%#e9BEa=brz-H0G>=E>W4-*S05W_B{rLW0qqO@e{)4B!cJ1Q!pstA~`PlP!HrF
z7R2p`OzP1!FD_=sr^(S~o}4vl?0{&MzpTYI2yKloV8EPBSWoxc#tchpg$Luh^p)7K
zq&pP-=Z0+onQY~%NI2xY7VUafB4~v3^)KS^_j$^x>m-53q`Q3B&S9uxc$ziQ%yHi_
z`gvsdU|RfetPieY+8z&>lgSlma4u$o5$}a7X?}yzhe3OF6l?Jq4J!kCqHAs?-rhm?
zgtgas@FWFSd9@zX^^@>so*hovnSe6CcTD%=SD{!fiPclqYEpg%YRt=GT_%U4#A^dK
z=%u3NXpXLab`UXhBksXc^ts&&)EbGKKUpd4lvlFQx)3xP?*xLMKbS!Sb*7Qy;0fhu
z|5+E>*I(tXR@o@nFq!-<l&!n*nQP?=n7-Kt9yU6SMZ}%wZCTSmkgKZL9ee@HKE<ML
zQ6)c@YC-Pz)##Qr1DuHo5IEVHHEZ4Dn(w6C^86KCWS@o=*Dmuc4R3IHK2qqiT&8Fm
zyBqvWjlgZJDG1Uc6(Wb7!qL0)&}iut#3Cu;w=izEXf`z7@_@nPiKCU6iOaHL;hD<`
zDE%G}(iwAwf_Ld~VQenyihi*#6OW>9WIGeiE`_M~VHo^1mXB~8hO!%uH0L}FqD_m1
ze)Hyn{Fwt>KjcV0!wl{!Uk0%qhoIs4JPhk1VnZV%(1-QqHgl#z%Y<M)Cp8VFi_C>V
zF=J6uv4<JA27=uz18CV_h#v<<qlVT8VY})rZlX6BR4<it&o$ocuq1@?c{(NfIoVv-
zlpONbqrk4vgP^^AKq+^^cEv(AAS4D<{}zLl)k3x`XEu7OwK1(X$+%?17UBpZ$a8!k
zvL*-jXLA_hxQ}|$GO#Q$1<$m>pj?nuV*ZzMv<8`qZR&%GkC{O3iEC!J#pbv%Tni=p
zbrm(!N<ec}G-Q7sg4Q?k$zQgK*9{WDsSo*-tq<if+Y(W5Bt_w|F%-Nu7Q%ZAH?(Lf
zLlG+%7Cf2*_Nqx>Y4m~D?EjCi?4f{7#(i<=Qg`&Y9e~ox7mCKQ4njYk&wQLUA-4~)
ztcxe}(3x`T^ShS3dtHeemk!6Qml7`i+RBVdSF(OVfmpn*lIDyV+^RDH-|d`&KKiOK
zvWggMT2Fc4R2Q(9`SBS~h7ym>oSQTY!EP{dm7h36`kN^%U49nx45(M6kp-<PEzFaC
z=b~=;W`PEa`TX^oST*V*_!Liq#_K&%tnyO<A(Vkz{+!niek`;a5y2jfAdk&}KML{I
zRIXW^!&BtP_@fiC=;WY+6Wv1K{AL{t-r>sxQ3>F4d>#tEJu4w@xX{HeNa$0%02<4s
zTr^7uWJ&)C$I|`oT<ajzjJd`ZSx!OuNZP+FOlJMZUqbtSZ<%0?x0%<^6tox-2C<t4
zL0Y#{Fyr$H(BDqKr^g<O^v26j@Tin&c3BC2pOJ;xp5WDSgIL`xZD`!00SA5aSl#qs
z6i2S+UWsbpx8fLT$K`|P)d05Ib1dz56Hz-i1$4<BckhHh*n3~XPa!cl>Bbn;sBLAf
zy`>mLd0q7_S@2+X38u@8L6J|hoxc&hb$kyH7;F}5ZQajYop<BlLAv-hojk7d`|^X^
zdx23@fucba3_h#OA-X?#S+cg$9Cj)30akL!%m^O4Yy(7(zQTWh9EbM2oEMCr3opZ$
zQfK-i@=y(|-Sw5}ubsfYADx7_!X9*^R|%chWux0z0KF{&&>gi4JZ|Yh@I5)))+N5}
zzOKaD9)hvNSX9>JD`fRom_UCjIYG()_4jXy$+)d>ak@DMnr!7iw#I;q)il;>Y7UY8
zvZ3r`4yLy(1%v8&Sa)hDxJ{##Uq+%rdYv37C)Ciub{@!XnRAPJHR_PAfF_k3kjNjD
z=+i7Mt=myBb$q}l#EDRr@s2n2=?>x(3&CLGSzh~o854va6Uy&)vVUh5WBRo({QVy4
z{~WMUto>_;8;TLZX8{O44OM*TKrEkj8MU3IXpne>_L(}c=<Zc)n0%SeY+AzR8qQ-A
zBVhe)CAf9Hfc23pm@?~_LejlgiSAVkMSA*2=A~}{)eVlIQ4xj%miI-gcW3y`pXX5X
zOC^(>IVFsFlm<<)q3~j6F)@SE(eqafm-dMVOM5eT%&loxRRvxNe|SxoXm;>x71yfT
z!#uxMz#sEmw5qdV5=~-F8QOv77ZtEulE5|mj6r_143_n!PTH;EFplmURg((vFQ{R=
zsTNb|u?5yvQnn=XD9GYXA>C~mh@Kat|A_hcenmO-Fsz{2TXz%>*JJNWQgNb50p@Kx
zgj1+@^xnS^WIsoO?7|3Wb=81z6Vt%6*Le_qFJfZVUaWb}G|Iv^avAOYs-vbrtJ5H8
zxmwIdZldm+-Y9-LCkLCX$VKyD2sUj=;Yv|_iB?4{lob~OzWU0)+qz<b?G?D<b`)b)
zM1a-z1w7cO3)HGD;*)Qjp!X6BFp(&^{sqdPI|-mMj=KH#R*)k#oLu1m9=-uMIB*a?
zJHT<RuMO6GOyNd<a#$l@qIermd@ijx_UeZet!<Q0G%6Mq3y)#U@_ZC@$u1GJN@rAW
z-^C@&qeRrLC*Rg#fS>Ql$a8xE#5;qayYCRR`+WdfwF1GaVKFv({!*x}2}av(N3l3v
zA5FhivaT;r;He1Waz44Bz~}zt&`QMw+Mn1Bjb~-QqQKAA7BXngVDof56nwY}Bbzp(
zrDhM9UDg*ptJm`C8g+1yCo1ASbD?pdKL$PV2Mgh4obP9ee>+uCCROD!^+JdT2W)=f
zk1oAG2m@vp<KSPX$<u3sVbAUhCA<6a>cNv);~G1by>uZub;h8{<40U+bDnJsqS^k=
z7Vce<0rp;w%>H9Jv^rdbn{$XOP}LU<(w9SQydQtG-x$?)UZ9=PY%G<9fuL{%>ku1a
zY|p*0rh6dWPYx(Vdw0UHW)Az0=74U{Ud6=~G%sGz#$1{P3N3m>fv$rcceSUUPW}RH
z&e6kyBbU&2@=El1M@(v(4d~yhfP-tJpfGkC+GK^pKF^W#c}`+}vFV^W*9z@*b3nVN
z9@x;iSpA1K8qUfkjwL|Y<kv!vNfPe6v=rP92hl#7IPB+4x!h_!062no*I=;wycY%p
z^rm}ccWAejfw)hAP$1gP!-lv*^vTIwb=)n!EHMIAg2K?^N+GJ~E4a(NB~a}c!fOl=
z#6HAR`Q)ZJ6s3hKN8fQ#DeWuByCm><$9En|MCs8MHseDCp7{}g_Y8`l<K0Q>U}!@0
zBOP8~(-ZVo9RiVfI*dBG2~W(BlBe`6Y>$t|#v()N)2g88XdhvE-6W=z4^UJ-aK!d9
zXDINsf{O6bICE_fmNn)>i|!uE|G#1z=h5fB_h67Z=E63kwZxeo0=~!N(8ul=7S*4?
zz}bzwWXKlUKmFkz=G!T29>TNI`(aJ*N@(o$ow;58!Ws`*vckI_sBeCU2ag{PcK43K
zq|p^9u>E0L(~aEQx2O+y?h^K1yb`Nb&hg+UkC<o44gUAy323hhVSn6L;NbcFP(AA^
zG+K^Q)IUpMo)aCJWQdDd?3^HMkZl0y@qt@6#h~$KHJlZsi+2AKnOFWM2vx}fjT{G%
z*CasC1>;cTh!G@(<%8v}3(y!=#tr(pLc^J>pq|$QY;IrXP4n{k;PTVBVE0DsWl@3t
zebh1R_<u}!bGy)E8fAkY8nK-Y5jgzLG_(px;c6WvP`_pe#2WwM#%uE+yLUfgL_B3r
z>l5(<?S%b8^0?=5Vj^ms!_ek*boy1t^e6bh*j5FK`dn1l-9#)Hz8Gw#EBGF-Y_w7X
zF8?-!W86jVl5tltrNRssosGtpi&p>-k>5*{tGF>R6++J@;o#zZD7*ek5mf6BUOqu+
zSF?z?<Cl4Zjz8-;^dee@bjPvJR^g`36S!uR9==yiL#-p5*^4RmsJkv(p(%`ol=KUj
zkxY((Znv1XK5=i8$D-`l5Z?G<9|qITUH`Zn?350|dllqPESUmfmBayfDFn||kGS4y
zUyK-1g!7hPLFu=H%+&lOZ|b)ZtFDj3UH-fAVAx?){p-z(c16ISe;x=b6EHY39|Zrc
zgUy3AQM<vPyB!|E+a?A>uTwUtb2l9i)>4kTqt$c+G31N{;c)b87S_Cx6W?^H!hY)*
zto?KpR~pca*r^1&$66wr6ov<tspz(rSh3qGiD%lwRAhBqA%SEb7Bv#g2VaGm#~sn~
zWC&XA=mo(QBcbhY5<9q44~D&?*=w&HR#0sN(K!gNgJK}jd?LCTPk<bA2Yjw(fR<zO
zkcXThW=N{SzV;kkm1v{<>;rDFFP-@$oa0xThz~JB4AY-&!Cha^fX4K_sFAS{8Xn2H
z>i061AxuWMZ5b$XmkA9`k7E5jWA5^EEA6-CJnXX#NX5SuL7kBxFck5J{UxYO423ku
z3vi^*7&Kb{gJq6WV3!yVlzd$!ba+HPtclcPh(8Wqw*FjTG_1JCa5;1}k3{+V<!q^R
z4Vo4P@`m1D_~Yk@_|yPq1Xf_@{OKUu^oI?1AZAa`BUrmXP*FQb6BGq&Q50}l7@Zdn
zZt;hijjWX8lxW<mNlfz04@@@Fg5NPSz{BpVvGwz3CV<a!WyA_5T5w$GY)aX}S*f7y
ztO_kOe^Rj#!Rd<%u;?6tb;39pmOy!uOXLFkP>RnEoq(7zAy_*=#Cus&udd*|;^5{>
zQ20)SEy>38Ueyn5>eHCAa33^Yo5_rBh6-<tI*N7IlAv?WXFlLt5ct$iWxC6=S#P`H
z)QR%OHj@bU_@6b}?4us$&V}Hrua2_%Nbow~0EtW2U>DHG+4J1-Q$!A^Y(K`$y(u?3
z%op7TT63G5>o9Lv7(}-3!*^T9VOYmiexpDJvX>2fy;?N9R?bHkaw|paPk@@SS6IF!
zWr^>+V&d^1S%--ntaHYZ?|wR@?4Aw>K1HI)NmF?J&^SaZ<VK2eq2yg+$$<G~P+vuy
znB!8gH*#iqU+D9^Gg}eZd<X>3f0;E1yF%|ZhPdoQDdbST+ozj4A08f#d+*ZDanTt?
zI=y$cPtIdCw@RV$pOerjvOi>5tAS|P4z}uz6yMN3LQ_WX>$i_Gk?I%Wlf6}-vWFZ+
z=_fGjA@yXePJ>|PK4_mt&rR7Y=DD$0cw>+;hTZBy=Wj0>2&J=0)5~z$=~x`#Rs@}b
z?eKNZX_^z?5%w&ejY)L{Q2lx{Ywn-Uo{cTV>gpVB^F#-7EK|X-;wrhSPl9NG6I6#Y
zh~5A!xY7{nYE-e-@hrp^41}NW58$DtMyNI+3q<ziV65r~KT>+3>I(rFg)E1Odye8s
z3x*M)z2Uz22=-V;d-_f9g*|3%LnpI9Xh_}*sxxPzFytsUXnKQR7j=X`g<Rh$9<)RX
zm^Jz$x*R_NvOy)#etQj*{Vc-Pji%@#?!$HWUIta)L#*Cl2>+{}4U+#3Fv}!bF+V5n
za(Z|2mv+b6?h`@#X#nNM3sG`>03VlKh_2L)bBWpr5jt6Xt|t9^<1f&2Asv_RnU0--
zr5M_ud{4vYF<p5826t)Yw=)s<^$ADMvynm#?{<c^Q9RwYm^Z)t$?XPTVY*NKnQlrd
zYki&$Z$peQeS`~ZVbq<?p2-3SyMe?ypd?z{i%C6Ng)Vu-J=v82ZsR9`i}WZ+Hc7Ec
zXo6`!RM5TlC@fZt$JQD3T%2kGci$D_`omYC!1*^98wx38ewup@90e{NMT*wd-b{JN
z5$YX@yQW_(w0A9suE!E^<IS<CGB1vK(&x(U#amXpqz5xj9Ep*^v0!R4mOU&i1E<WB
z)N@)uKEFWnRd#1?Yu+-Oj%q$}!D*=Tod(t79Cr9iZ}f<{0DVWy#lGJ0&?(KtR;wy7
zyroXA!6(9+*_mKsTZw&XhShE!Kt8S%g~RDwly$2Su77zH+@pzGLuaG_t2neOJHm_N
zE785`Fh0~f2L)3H5?7G?u=l*V8+oi%D*odwvtm$Istc8ci_mkDCVY#oBDOra3(w22
z_NzZsQ_@Q07=R|qeOODm+sKe%*tpMCXg_*13+y<H@XiMh+K@Nhpesn%eo|<<&4NzN
z*W7PGHmZM>qmN@Lo;=5~!M2H4%=*kD+frfw{(P`f2}6~fTBhB80K!hD3RPRmAS@~$
z+65Cqx%`&U#ypFsAE56g=>szz=>xH;iLCNYA_i_%vg&VNS;KZ2TlKm>nkbHPQP*)o
zBfoUCpQ~VBN=)(Qt}Phd^Aso_J>k)k*Q|Q>HD<7jW@dM13Tt1-2yO5BLNI-AnuP*%
z`C`j6ANr%<q%rH^S_<R-oq#VRiZOQJL+-PUSV&1n&@}Z6G3k#$Q$iEhTX-CHxn9EQ
z!6z{6ZaKSI9f6`hwF*6-OePJd{&SC1${2?*({-7kzD`1XeR8_~mx@JNXRtPG0O<E9
zVV*++p}OAy)<|6IPd6e^KIkoXk_w>VVLVopWnu4?&KN(k9BPkbvjV4;VB)qH42~Z_
z(@I_NSaumx$Q>?zjm&mJ1qkAtOFAo!q4i>jBG@GntLIFCs#SI<9f?pt{rY7lD!A!(
z65jqTfyQ4mSd#K8NCPwBK>+apwi=P+ndVyoLhN4ef!*E~plb#>1s`X#uq$m$le)@X
zeg~sQSUA&f)nZN0G*NP7By;H(#8mFbaPf0uhU8^1(<Q|i@L?IT-k$KPGRkiC))h*6
zOyHh>hC+pVU)Z&Y{OK(pcp&ZKG-oY^zjGF&Z0|bOq#eQCCKCf&Z7kMa(Bw72r+7Nu
zH>A(PxO7Pzf8tq)ji2;|_|^n;`=kgZr_4(Pg_PAM*Y8@0L|rcpP?|a^#J}f*QK%W*
zeKs83mJZ<me#fHzrD*1NnP$JCfiR+=5GxbUV;7k{&AMX1E;|!2KAs(1Oa6lB`Ovhn
zp2?eMp&;T+Np_Ech_rXl*{g*<3%bMAv`g4(c240FvKQ<k$W0M)44vq+@NHchIUBxk
z&!^$!RO`a)%f7P8%1Ctc@4+9<7=#`17r{d`2Gk;>V0K>-ih6q~WL+;QTJP-yO}QTI
z5|PvD(maUxnah@?<v{o|%I`jN6@Dq-htV#ncz^dXEY&-MbsF|C<7YT^`wJlcngvcg
zqk+L5#GF$7UuH8Bbnnt$<!~U@Wiu%3T7ohob^2YO!Py6f<GFepys|_et?mgR^pyp6
z9ZM|EUbhr(J9T(##t^vS6AdlD7ed$_+T9)c#7akz_vkuh)JODZpDn`hAr_+-&80;z
z%!Q^=H+k#HRoGK>9BciR3d`DXCTKAc3S3UXN(~NIm-%7V^hEY{iWD<0le2g4aJJv7
z6z*@PeCm$h3cH#WAo(okUYaG)sH*_2rgv;DWi)cy&SC1$X{dLE`U*CeA$;)xj5z0y
z%J|;Gij7&U-Pi{Va)~`Qz5tvyhOySZn#`;3GnUf*5V+ni=c?+ekZ{}*W&g*~xrfD^
zzJGiWk`O{jk|cyA9p<@ZBqSk(Bn(0*QXy^Zlq8j!4r<bAR8kb3)#tfoN)kc{EwMSr
z#>R#ae)sqH*Iv8VUb~v+b9nCi{eHd7E`um~hB#m|{fteyg878SVCZ)ltbB(-)Po4<
z)kIvt1?ixk_mjoX)r4z7^Ux)ovXSSVg{(B1uQX&a{o#Fh*yvndcQA*$Y}m#OIR{M*
zVt=@9V!G4AAj|FwHU?#5not6|DQRe0tI0jJ7lQJhx8N`+3{pNe@V4W`SGzt6%16%R
z19q3=wH!Z`BpHe=Q~MJ$Ym~63<{aWgVk}g)iDS!jFlEUiu)0gnRIgdwa7G09cbTy>
z-83*jqZpkZ1Yq-Ebqsu@374;yq2W(oP`gU~kX<qS^$$&yS#+?4k`1{1z(8W_oaatQ
zCZjC$C)<AM5_Da@$kV<>!$$pl&^{|?{-HYHS(3-cEwRRR-H(Ff{!3iPxss=ZC_w+8
z8End#6p(v#XC3yX-1A5Q8mkQ1PiZFYxshq+7m#b6`lb$(#AEiyqI!=Ec)cke+kbV4
zHNPyTx$z{f2>-!T_JyNjMQ;cWSc8fy93NeY!KM#q&~JpC#ov!%!~YedGZb^RYe8@w
z<iv~L0?W?R`SDL*l)I+!WiLdiAF>PLf9`?Z1>;eaXGu)9v9Kd0A5Du7iSMt!jMAGL
ze67ny)HV9am1DP{{oJ9rtRVzu`S{?L3nx&Pp~-e1Rbu-nH3;7o2Tiz^t1sxzTn|nq
zZufEQfBP`GW0!#shCtIWM^<1S0JxOS2Pggznr(~uunpcQ*>hD8oMORt5zTy~77F%*
z*23$;lc*jN1Y3(vW7v9qC>%{)lU3uvZ`W!5#WfC$RuqH&Cwtx%*B^}MXt9PbGuYHb
z0gawl!qB0JPP2cCL##`QE&N-o^Ls4+P;?NR2ZZzTtxwpzA-UkRdJQ@mCJDEOWJBPB
zVvc+bn8ZD1E?&9NGQ*PNyei&aRwK0B7!D_W9k6ToM67pFfO6$X_I>J3G%*MVlU}A!
z^S}YT?Nji<!E7u#P>ELZ>)g-K3eM5{r11R=EXzF#A2ettP~!;dTc<#hM*wELQb0ps
z6%Twj5|+)%qw%OO8s}wmrPY1mlVKzl-ARGG&s#ClS{<E_&}OZC88klJgzAqAY3`@Z
z|6CA-M+fb}g1%=3jn`>du!P=QX6}dH%dTKUm=`FmT!i?5Vx}H+5L(6_7Hr8w@Fgx1
z4PKX_ww4j3u`CGPaSGJV$ne@t8{!L_Lwg|kHL7Bv1(d90Z#76Bll$;6$9C}>!SDMq
z@K`heo%$w;wHGURyDUf0u$+%uhLMA5bua#<_5=jJ%Yr*Ah!M506g+dsqP+WWp`p)d
z_OkzU>@3~R$_?Fl{h{+<{&_M6PP+hM9o9^A;FjRGUJZt9B|lIMWiq|=aO-=@<fqLO
z+g{Jb9B)5fV_yRw=*-ci8PB$yCI`@j|Ja*XQE1dF4UfGl!<UJNnEl#SyuPW7O%Az;
z@@LEWGMXV7lgG>cnK4t!XA6zpk7D_YZY=)l87w&Kg*7V+$t9R7r0CIi=d?9E?1)Dd
z<pX5)c7j8W5;Pi5g0Ec~Hrw_f&+rmxTfGLHH`;*ezn|QD-%9F<d9iaFs_?=bU$n_T
z4;j-du~jn<RV|Ca-ghaK+@g-x5n=-ueX>$zCx~0eO#>I#GB6NvxH{H>-r*?YH8%on
z6B4mt%SGt?5(<B->!HI0In)IF!!?hjLx^=gJGe|8CI6lfd#t>})Fv+m|H;!K4>O=`
zXFQk@-yn4V5qPP-1oS`Wg5-;%pgXmgUpTb_LvzdE#it;AY8eH$3e(Uyfjk10SD0w#
zzt$<M6s&n&A!yW}!Mn}0kBs$2jbX{yK1Ulo2V{WmfMKwAA7K~|jRIBLEOGkTWYic?
zPJO8N+-3h;-k$PE-2BW6bhRhJ+M#o3|96fpKM;Ve5o>wY^Ii}emyKEz%b}#b6l3pI
zLZHqH;-YHtf-!r+e_Ic7!%ha-jv3rgLj}!YS-jKeG?R7qu$GP3TVYmSi|P-6SkjGR
z{T>s9hAofCO*tOQjaTwEhf?Yr`atZ|1W;S(i6O^du&{tU+&i=kqw?rGBQJm{qvAl6
zG+cap_i;S&l0#QUe~{pCP=pV{z^L=kIQIy#+~?6wJr_n)D4<0)fu0NMXwsWAr{NP}
zcTgFc=N*HPDNdlu3W3OzuIT^5mCGOe5K=1QK_$OKeHaCJ(cZXa<ul=P=|$piaIiR7
zivE8M=JJ{L<nx^l|2!RxJ=CV7WaulQWrhYwhd&gg@l(ap&$+xL+zASfS@O=!)MqIB
z!40kKq5a=rvEA)`U>wrO8@#XZf>l>|ey;>{K6{_-e4YqnZ+T*YFp-HoPFu$QI*-n`
z7^qDl_uyh+s#JQO<&I-t+;`$xEq!b#{m9P^n20~iqfn8?nYV8iZ#Xvuh8ze-yOsv>
zmfvExWZ_`u9tsX0Ga$aF5;ZLHpz}l!%9|1dmj}~$=+{E<o-?2K?_-L;a*xw4Ly4-*
zCz;2)cU)hVz~m*A$(l3}Do>J&mS<9*wg_c|2MMOmbwamoDOmUNBy5b%!j`7h@G|2x
zgn|`*(4UX9?>b|-Y&sNNJIy024l}7oHW)@F^QWUDp#A$3aeS(rN$*Snj~n*9{^3}7
zeR>tvK{$`qOMq2h9WkbwJX$s(#M)oZ23?`&wEA#RUeOfy8h~J2u!?15T!tDWHIRP@
zg%FyD+FdgPzO@#uGUq^hjuogZ3a!J+_p-Z^(*SF)GM_J_F(vH+mnc<2%c}&TYvX-Z
zzWoRD_@w~(XX+hx+_QGk(1T_<?Ogc@{&HCXF<>gF!)C>FHu<3R63t|XlNU8qk0}o=
zhtAtGAynTLU5yx~B(Ddj8{gTJ^GVoxApvYsFM#UTS*Ct<BD4he7FylS!1JjZUhkwF
zn0PEMvN1ueBd5fzHUmL5bPf!=SBV`b7o+FOWgtCf%4KV{(9`TN^Sf^fw}!`pLCRpf
z=~9i}KR1KltzzC-yc8Vb5f*>e!GuH=sJp#nrq|Z~uTRbV_C4p01Fy2W+x<zNISLYn
zMnm%qI{#4S#QWd#=v1#y-fjhPUr*BY??8E6cj%RT0vCGE!Mv+QkbUqZekn-<|BgtI
z_Whd|Bz$5fT9&k@>Vqd@Bk;>1+Dj|<^2p+05V_kGa%K_7Bd!LnH=o7v^L5apn;y5{
zMjp{yRS?;H4AVPJ@#Tspo^B$;oTOAR*dB#8Erlq(vVh0uMALjgz$p%C_+SdT^9qwe
zYo<CoImZhlZ+Jl3m?NP1FpG&d_+i;}1(?)c09}VbXli$Xg=HHs-s}pfVs%*5+x_VD
zyo5Q|{Q(6JmqK8l9ptuQ<thh%VZ|m}lqJd8h@qpAJ|9pSoT$(|Jp|TOs?m8O9o-JL
zu!gymGY?tIJzVB;NkainsXR~T{JA`?YzT24M?r~(I;zaexio&U&?_ezx|Xkoy?SM!
z<_7T8jykUWQ*dUE13slWL}#xOn4lO(JFX2(s<oO)$LNdw?~msfH7v2^ojGU(orh+7
zA0%NavHF%ntI-iAyD5XBm+EM4L%YKR`Y=dLIqSU_#Hzm45S2;i6z+?L@w7upJ7uM`
zM~J&zfsv)-z=yImCg$#-LtPog_G!HHZw<CXe+7n(8OOgQrlOV4BqpEqnFno(K@sX%
zb*bOrrvK@&<wI;xUA;GC(Ahv~|A4oB+X2GXJ-AmR9cpGG#+u}Se~dNyjg016)1CO`
zc^+7vJ%VX<KgLw6x`Fq`M&ZPb<4|B5A=HmKf>Az^*jfID%PSdcI;F>OYBy{cbrJSR
zEKo6H4J){n!|MEQab52Lpr?HthU#@gUA+nrZCgQmtbHIl8eiUd`XAQ%MH>b=Cxd)P
ziFjl_^$vz7valw7{`7qx>dl;lrgg;p9_t~96a`}Cwq3A=_JSsp4}jwOWU$l8XUdrw
z!m;ugl$!n!43qD$r-e1tx1J9-($3=7EG25b3557W0~i~GnB|s%;c5|}J7Of%WO6h-
zw;KGz)Od5yIac140UnA>I1!kGRmM5kv8gvqnRpU<EzX3#Mu+gJQw_ZXNV!?)adJ1G
z6Wh&9Vf{0D;fEu|808THev~(~Qq*wKxQi7s*N+vOi)W$JuE{KYYZ1y{rK1QUtVRR{
z;pcO^sb4f5if$#Lbl4MSwOs^yj2Hw;75Ke10~5swJUl%ZE7iu~?G~DCS7zhv?&r}~
zRtj$ZfgsU7&YNtg$8^&K?2}sAqDnb7xh3-t3-vK5BNQ86Il_?Vd$FMKJ=5j%d+WQ9
zYhKU*<LaKUX__gnoiGXCy~-j^*kDLWbce3HrL6T-8LXl1;D6o@*y6rjd`;UP;~!Ht
zttOifajiyMt20nbGkm8R+2Wjg@7T_n3S##pLx*TT)Et=r10T-9tvNLFAMn27;P&&N
zR&)S#ZiVoqZ`s66t6|=SFWEwwJ+@XIWcvS+8{6I<EDr#}qXVGYv5Bd^X+i7n$IN8H
z1$4ORfc67Y`3+0To>_R)p2-Wgeepymnj=YPL<vSq5LSAQ#D;kK`z?#b-&R#&<SdGx
z)!ISW+MaCV3tuezD+=Q0*8s8aF|Ra+8*ejZhx=FKb%XU7B8rFHLt%LLs+{;44NUn*
z4M?_K=bJhvBcVW0w@iSPMFpTf;3n%oLk^l5$-J&ZPG_Dp@N_H!*;Bc7*okM%wQ(7m
zmT%_fzt&=C;W6;`>&}B`3SiNVc#^O8k>`@$OV)Tn*ZEZ_nR}9V?Vb$!>otVY<6|-G
z%n{ytvyjU-=rhgNgQ03`GB&qa!H!nMFL|*Le_juoUhQYQ8m&;1X3>t@lc0aO0xW*1
z@Iw6-oD`aicOqpNcFLaX7$r0Qts0d$1>EK=1hqbvDBm3}_N5(M-O_0cMva8}x7DC5
z3ultnLugz}Y=M7gfa;kZJM?=hPQB=l<;4S;OGq%9=J;@Rc{$eI3BlT}{`f303!7dC
z^73I-T>80IoEJ@wg#)p0x3mhP;#Gh(bHS*(1Rh;F53Q!pxPcG`PrgjXg3tvx=XEJ6
z_hf+WUh25#7;vflnYh($ENCa*<Dc)u;PzPpSS>%z6+_J-JF)~7M(=p}r5wmvN1PX=
z^TLWECVjk(&Ak$a9nmj{lV=VShZjQHlS0tX|5q%(Jd}66AeQ}g>cU1-Uz+wfEwguV
zm#QVaP_q~Z3tm|8WHYxnod&h5OTqA59GL$~<OS)^h1S)zpnUXH+_-|ADCw0DmP<>4
zmQZ+kU%>hUbJ6^x8!BfG7F=G9hUcTsLb<*-DpiZYd*5JIe_O>=*HW#0nhs)~F6B;X
z@8h~L5jX9PLY;pm;(5~*__FjIH@S5guEyx&it)sPIM;_us@^jH_jV9Ga4#w-lisl+
zk$0{+ggMEXaC6iE^zRVC+g%q>65AUZKJ<awcgLW8Wfw28wgms|T3lW;3MFsMx#;ga
zYr807dfO+%KF5*tGZ$f1^8jK}y=HGWyU{)BB%K?Vg1XLjuD?OXR8a=Z_{tC*A+Ep}
zAKFnp4uza$Kbh=S8&eHBz#Hg&c(?s{+UJ;Xw^38TG&rBvi%oGzDB_>uKQXw9x<5l1
z<!1w6Qs^e^{HP6|9v%VHJI@6F4+fw;GKt*EYsJooDj;B69HcnpQbyDV=(>kGpls}%
zQn*FkI?-=^@zMCnI7N|wLC#?q2!p`1^b(UjtrOZwg6Fb6jhR$$g2tB@nf8>;;Pg6M
zJY{u0y!`Vp6o`KFzFFI`kFXStt|vf<>NWqQJr}j6<-y0RMtGqxA7h6lp={)9ChZf)
z7xga2hG&%!@?$dE^e+W}VGPq*c@`QDp9J-<(JZva3=KMHei@#Fy4FMRTQv1p4yw>&
zXdf(>YJ+3aDfU;GGs>rK1pmQ%Sj(|s)DHCkz3(#AuUbKVta9FQCxO+_xlenn8N_OZ
zK%348?Cu$dQ<|&5qreQi{z>CO^XFq;)k-?oO#>6(G}hJU0-gzq1`{MV;m!W+)1XY)
zzL>fhtcSQdH5Hnt?S-j(^fBzC5ADap(4|UXF3y*D_*DfgyL%C8UY}vUi&glAI-BO1
z?szh14Ziaaf|@q+ADk*?`T_mea~&n>$1MhzCFCA7n+2;?GJI!L0@AM*^u3zOf{Xo$
z@2D*vwy+kB-|gnk-^-YMr6tReK4J|U)Zm5P1a!-Fr5?*F_%7veFqis?*T@l&-wPjn
z?n$nDn$dnu!+|FlPAfivq7<77bty4a$6XRyhu>hL<^KutzwKG@=?tLVHptqLSFOHG
zegDZ|{5S22hbKe%nj>82w}4xu)SKC^6sr&UgWI*{gW~)aUcKWmb_MpslGjn-VWh>!
z5v$m{TOc>@Q^j(27;<GzBGVlBoH#&vOyBROSbx_gxcGh$-hD`Y{_#D)o^l7;bf1!i
zm$903;`z5}aq~^HL1H|gOHG5ps@Df*vpWa+zDU59)KI7#b%MT6M-Vceb3>C2;Fj(V
z(N$Bh;a5KBr2pm_`e~q%BZIcuv(RL}8yftFLB~?!bF0ai+Wu4cc|$xlSlDu<nHn>u
z{FUnF0rA6`bT)IcWL5g<_+{Nm2ubvXuHVa`m*pgUG_?;o%GBY(ta&uUUj`;cg|OE=
z1nwsjGtBExG)+B8b8OnD>xbgU)D>7BK9f!V#}VJGCtuGl^3e8pCDz(>MZAUby^)_B
zAUNnW^@>74ew}uKFFMKdDMA0n6!I5Dh#M;1V4u<(6%W^A4H8@aWHfi^nTc8N4uW~a
zC|J`=g#{N5g1h}Jh>TbQO-?o--)E13`BIQZ9Apk9#1ntL7*+Fvc*|EUu}kGs7SJw3
z_uHqS#OEw_Xd^S;kpNx0n|b+*400!@gZIxa?&PP>c7~>cQ)o7Hep(HBA?wgRNC!0Y
z&U4ulC09S##)5Pfpsh(Sv@$b=j`{Q9B~L}s$0RK9ej#{hxx)UkQdF-~LZ%_Z$Lkns
ze|W%^_P#<F`L0Ho^h51AI*_-o5`8zG#bL5+Y<dyMTVCAdGR<~|ffc-jhhfv>e$0aQ
z#rg{@#W@iN!8F@~%OuCZ#ONuL{En}1B)&*X;aPG^9}~J>oaaZax8keNF!cWQLag{N
z8_RO@p<xC2Y)zi=7Op8yt0@5)tOLb=S9o^OF<9LHENtmk3L>+P^44zAloM4$dsz>B
zR~Ct{RVQ$M)Ie0zn*myq40JZ`3F0*uLAk?MJSZX#%}1{UpYcJ^tXBwi;S<>Rwr*&j
z%ZNd|h(F@S_+fk~)b=MfRCNxI4;B-PF_AlZT;$RLwql(=GSCk019>)o;8@WbbU7*k
zySP`}%=9?@Gs=1HqFJ<o4|sD16Zaj$QyM9#^`9w^ueHLbyJk{Gm#&G>KH%ncl&#wr
z58A8W@c6$}Tx9NV?VQ;gUNzKU;P)QTbn*=s1^ptQg+G99I)_KZ;<;Fks_(VLA-~DX
z4~8<gi-SQtClMObvd|{uBo<AWkCETzLik&97tjoROTjq^KOF+<^~6Gr2}F-s+qnNv
znh|Y{g`74S3V)Y_TG}>9RiBRUK3%~O<4Djn@ddNW!#JkH4t;E+L8O-nQkD(hyY`@d
zt+TjGe;&9M55vx4V{%#!#~jIW-oEY(4^FOyywq~gD5E}9uT#)+-HORBBFb9?9@-R-
zuTJhk<@6IQLVZ7FH44$VpB%hrO2EE78aBP(g)a9i`GMINQ29AdSgo0fN7q*1(B7JO
z>A@1zf4`jj4Ko3$jTt|rJ_nN(yHPd1lFm{q>6}A3`QZonxfe@O|LzLDWq|_1^Rg*V
z9e_oBkD~GG5^hYI#fEb;p!1(ac;D_Ab{WR<utUViPEWxG-wd=4jld(8w%AFTSL=d8
z>ga5Sr=G<SK<8b8XTyTCe_+Z{;!TA93B67fqj7Z~kfncQZppda4z)mM@CaslKVR$=
z4y^pl1b76wfV&sNr!xxFKlhm_YCka1ryf>Itf!))j~+<$_92q1Lg(mAcs67<s-36K
z8(nvz>$`c|-S@nueE^oME#>9it=T`H^znMHTAZ)Y!R?t!DEPx4tj%}h$R1Zv_92$3
zjkSj>nl+d%tVWHm(b%5i0ya81cq(@@#;+d++dYhMdLre3U#3FQsal9H&4+FhL=SK3
zZCNfrt&}S0{Bs<42r{F8r=D9C(cE;pA3t{W542S`Mx)Eiuw9*YNn_7|pTlo1*O<sw
z<oRP;_aV^iCq<oRBmA;23uS4GxbB0kkkvR3>=sp{Zbv=qxE{wg&N~ctBV;)GxD4ZW
zoM10^Jm%}&#MEVTV@|V({XWwNy=$q%RGE*RS;5dze+DW~%}3dP2G-ZTys_@Q8B6JN
zp4koz28}d1zH~0-l0`>g!og|SJjVh&ens)iKFe^Bg&YjJ%||7DH~ZHrh+%8WTeX){
zBkKg{T22N@<|}StWsd6e^SO0kG8&g%Wu^-(p@H5#r^zcZ(B&LAyAlgl?rY)t9qMh2
zRbl7x1H9eb2=xyaiGBVc59YQnTz{n)l=1|@BVCTt7tKQWxgDsf>jJtnzwvnIKHxCo
zI1IF&i;~jI)}Aj)LH0LgCXzq#_y_x;#HoV#mRlj<pc;nlqxWUccJ8)r8@#;rk)I0P
zid`Rlq1O`?ycCnCy#IYM!R~SI&uk>IG06Jsv66EUxcv4m^gs5PhyV0M>HHM*zv@oC
zO>zai5<@%l<b#e<K3vVI+^We0LWZA2QG%?z?nwn}Xn!E?JLEieZhOnncP+LISBcYf
zFM`VCzlx6wBhV_(AB_84hU)AB2rTab@m<eY;IQ)$_B?{O8y{s-Z9`u4F$7%xS_?}9
zXP}#)U}uW+!JIFIkgK<tUxzv9-ucCM7S!U~x~(|)@Jc*&bpy)AXo~NwO~dVB84wv}
zKpyM)khRelYZH&5mT*Zh_)U49Z)d=JO{nmV=G*GB46qOU&AsgxfreKRG&#ECxxGhG
zVIP7i%agdv%1oY8GlE<X5lp-z3!!Wjd^j-+n|-damAetQG{ur{ss^nph-3D08+mqi
zFx}HJXu4)7do|e~R~mN1L2+YIrgp(v{yc=C>vXKqm!oG81%zw-a93Uu+E3~O`i-{e
zExsjgdDhO9&AwvKdNVLPL|uOmHE1PAm}khJ@YapyLd!(pvTh$6u`3uF?)GI;yUTo7
zZv~3p4Yi7ojt1FUx<|(rK}yh2RNs7?ZJd=0x>m_NHOmsM)Sk2A)B8}dv@bMkRP)IT
zs<Cx-0P}a)3thQ#cw4xV9PeA8;MFPSH{B5mrl&Fec~SVT&l!k(vJsm7lhFLj8EB`x
zv48n<mNRfaI6W;w;{#s2vT7c(;$Zx;xg5W2EJwo#ORzUj;!E@FQMTX*7hM=;9d@H9
z9E{UO9qMn$7N291qa#>oS1AmdKOU#vh{3S|=~(+Z75(=gC3ZwVL^16aJ3^sD;~Q_8
zC>4tm_Cuqrl6URr%<Vxxa6}_=q3&ev;#!C<*oy<l7ScQ@fVvJ@f~0Z0*hy<P`_wxG
zBGwjRyIpU%^j{I?h!4U4Sb3G?#HOw!Px-hL0xL$NU)cr-7-5F~4#b3Y%mIVZabSOO
zFFF;)z_5H>jLbEL`hf=Me`Ff`WjY9pBGSq0wuv_&_1nAGa;xBh%=pnOcD{Hp=6ziU
z?tN4F>K;cBW60foV=n41%@VwQ)S27FW6Z@W6H3B!VaxXzP}HipGP9bulz`ae*H$cQ
zJVHMB$^1%~57vAh0#5J#ME%tfXwYp7b{tLu&$VlyO=A-b8SQ|(pPt1^F}WOmWTR)`
zS<u>`&gEO~iW@eS2q`nuFyylz2HxxrRzHuxe$y<({GRY{4+FHCq)uI!`U-zrH+Z|m
z0yUqIx5-cs+wz~W$RW+l!|5z_l7@g!?pfS4aSK*%q>PouT1fG}1O@F%3=N5e!Mk<P
zxI`cPG>J{&&==+=uEE?wawa_7$VZ#5MMH@@Xl-rbtNz=74U>XcU`7hhi=y0D>~^^O
zg#O&-l)I{5#IvjnK{{xvV4to9;~DL}2S-9kU=Vtjy<o53<$-L~Fl(2jEySp+;<koq
zxUw7ZinQ*ROV)nl)h)-M?yZ!0=YJ52vL~Rz+lgs=IKXvt%JwYJg|7OI(8G5&ejXT#
zO=U^=d@9}V53UAtlR=R8p^EJ@zlg0f(qLGhaFhiHR5WYPgX`6WplRp_kE)N-UOUFB
z-pd0zbAsU9pNCMUp(nhXeiq_?c+q~n!fJbAFY*)zG1-UfVx1|^dAr*can7rr%t1lS
z)eFCv`sTwJe7po4w<N&JfSJ(NQATG@DR1k$7p5nl!+(p<pxui|>f=0PHLrJItA0-w
zHawfys*Ab4<UjGH*8%9{Yy&2`5-8aAk+~Y3#qt(c-Zq`obg!;3(|P0@*c2|dN?HpM
zGh)$|`kylAtGwpk5%|!TvNe5rvIiqWpsw*4_`f%TFfUJ-Gb0^qb}s}A?Fs1d{WQ3#
z7Q$1}DG)95!^YJS+<;iT-4jzFUo#XPR-S}HRTdUR{uC>#JH>XbX|QE>4E`DGh0%#q
zQFlr_R~lA`l`#v&(wRX#5W|SEmx_;1FTkAW8m0)3;d#A@OBQ*8wI1_;lzqA2pO(a5
zkBo-$?t?-8cZIm1h<Z#*YoP-(QG2WvM*Z-`9<wgfdF=y}uG9wUBoT<>rV4I1tKjaI
zA=u*im3qKqne<gQdQ9%eRvJ_Trrc)~MTe%|>o81V4$Wo<={%~%Ok3LosktuC5^aKS
zqbhOsgE9<yvk5g1Z^sm4PpBp)V96_Qc%V26i;rbO0C6DY;XirU+1}v4;XBJ2K7&0~
z#zB}`DU^8RLXN2|i+@o}9577?*_psXq_g0CzXZ%PItGD0iy>6c4~>43zhzVz*6kL-
z!~=P-<x~=O%*o`1PpM<GVha0in1_<z-a=ijGgJL#AlRL5;PaAbj(U|mv<nTnqJ9$F
zQC)#~x8=}_W=7IUb3pyk3^w$mjLuX8aZ68zyPo9Z^@S;vx2G=X2^*eOy#PA)z2+LR
zL0D1|3BQHm*wTLv?_Y5e3zt&|;aN3rIKPlN&!SzYm4q^DB@j;Y2+5ohA#3UZNLiK*
zb`v=}^CX;dy(>V^RRg~rjYVh8eyB6)Hh++%gz10eV0b3^!9sRJe}@$MjOBPcfb!Cx
zhX{VlXM^der(%!CgPE8KxI=Uf<+ap}`S)JA)xLV}7wrqJLn3jPbQYSnH}az9P<)=J
zKph_md#a~IgP!q_wOfYjS-n6xO9y^j@I?Q>BOtmooAl|#?Nkc`|5obHBy}*Y>?*Ny
zZX#<6{K{l%!&yUIEnmClJO<9{iGI}Ck%ir`-f5DI-M>_z=G<KFQu~nIAiquD=t}Ay
zjbO^?V?uO_AKJFjeCAv!I9-Vq%0FBI^BZ!gA3^6o_lY3C7y?!)YOw9f095~43C~0H
z(fz?GeD&TPweOy0p%JB!Wit`#tMa&g$7-(C^pe%dhq1H~>3Da$3>tSS$!EHqUw0lr
zEYc}l9?~H$xK1pVpFi248HHFU>%qK_`*4wQIMi*{fu$t-w(4ocCaNXCrF+}4X7nhC
z92<sVZ96d9D*~^jlS9vKF!<G4<6mnFvE1SUDE^`O+5EYvoO+Ur91e*GucPkwmSTDi
z=He&H2zsoyf%b{S&0pvZdCw0+dD<Ru?=>1!*e2MoUk0-(=v=Y7FDod?gC~ot=;yR!
zdJE1%&WC5rigJc!qba-Gd7a5O{SxmrJ^|Goau_N-hsM1Q5hwHlD$JtMEp;@_L~k?c
z=X$39_fg(_<s>WdIEL<U5w@p$A&j35|F|u~Ro5cX%CA2(zL^7&v%c~+J^C)sGX(vq
zqw!_8xgdR{4w95(;?~^_bmkTDUjLMV%h;)K#^4H=`CNj7do!_7_W&3#U&`9UD4R2f
z=4CrVq4mKCE_MipGn6YaJTM$u(&vlIJ`uC2tiK=%@?xHg$l=nhA8~?@qjF@8AZqT3
ztxHT<$EIrZyO@LP3J>5)KXNJkw+KA9<<ZW*560h|0eK2dxa%v2_PCp3tEekzQv8e$
z+OvWB8zTf6SX*0Gn_$kBdZyW!4&vidtnsjfupY~x_}4C!tqS7y7awv-^#?&R@1F|Y
z(MbTqhJdKs6DzgUeYDHyM|{=Y_;SB3?W3Nv>;r>P%ljhtJyC%62C?{Q5^;q5;#p%`
zIB$=6%vNazpt-LJxuWyA74_ns&Ju5>WFb~6$D;Pt3Uu6Y45c;m#S^cdfvMU-s8u<X
zssCn7O{<ibOx6R#?0dZQ4=+4AY%G3t+=1<(#IyJBh5Ebq3aW3zg{6<Puw?8ATzw@E
z9hnKV#NB2t8{cw!wS^GXFB`Pq>_-Fl0K9#p0w>pA!q~1-yc>QTynFm5KK1W3^vjF^
z`+@J6R<VH1N5+6jXE)Tm{)Go0GQ;H|#E<Ts0D+RTbie2hq^(2KzpoQx@sV}sgB4hk
z8_IL`G_bEm<P1wb!*|*Upu6}Os-FETmh|!DbARe!*zXE(pPK<LGY*hH;R39&S%EIp
z3#5h%bWZ-kQpG25*#^o#T-8C-S0?}CNq}p63^5Ztar*b8sK+PakI*Bi^4|^dZ?qxt
z{RNDvqH~O40gQNKi#~1f*qL{ehspM`wp|;^EwLVQrc{ATObv#kCboQ^3AL^f^v>V~
z?PaB086ksxbH-!l5Bj}P#wx)&9xilTLg(Z8sC!Tg16}i=?}0c>nV0|pU-r^xyAw)Q
z*0S;epV=c>JYK(71ojaszG6`o+SM0vHETz(x;&Qs@r`!EmZsclzYdFE;|!-CNl;d6
zYh7;rm36Huz^nC^DAK)GVe@wg;2$(o&T?UqUa4F(UJXKSO$JBmyGR2Pnbu`J@`8?M
zlIA?YJ&#x-Z~S@rr^`^X>oGHWU5nBs-+AYH8S@BV#3Y%ag34~T(5BlPv`;6qh?WF&
z2`b~JJr{7r?rE_2s}~CTA^6-?2Cn~B(_fQNWf~~R&)pSk=PG&pzrkz-ufa;uD2$q=
zOZ`cGe%*uEKYiNy(3`W#yYSmeHE;&6Y260e@(8T=x`5IlQ^XBE-*}U39`AN-69vU8
zxj~s6%4V;{eDXRg&jb_S!wQ_H?Sj0bB4Xc^Lb+Wi%Scf|`<GQ1GHVt2$LM)0iiND}
z5lqrx0b%s4RoH9<)#iUHw4Ex!w91#MtquamcTU9bTZo;%zj3!q>!9*)32J@V3yLi=
z+Ba=wM)zePN-PqZyv-=fx}W$tGGbAwbK_pqSj|-#J2ul3OCIL1ofYYzaXbV)N1evT
z1u2Blk$~*|S!>VUA=q(z0`-^8h<7=TMUEc`k!B^(wxusLO143Z$7=SWqz@*z9>QP7
znP5x(Mf+}IFn*g2>V~~38+C~M_DXtY5NG*E3b;FrhYpMW(5}$sqN8I4<?>rx^cR-*
zySoIZ9Ug-+qkR<}$JT%<BMX=OF&Ar-PT=$?nn_Q-Eq3wjiQtgP<umUHG51pNEuO<Z
zUd4F7yEX>)tp%q8|L~e_Jz;wdaZ+4<3QprRz~kd@Ch;Q1-m^vUy8SdZ>{-SfY}TXB
z%Ly#(DD4WyUJ_rvy%tr*A1chM7GcP04=C)Lk3K=MV6&?Tv@~;Jo>LX5BENB2>_F?d
zzFTn2&vlq#L#~F^y<vNBA-3s}!<FWY-TzUdbbl&HT2=~@Y08RN`ZLNB-6~d{uSEGy
zPiSoF4~|Q%;f=W%MH@T?aaTP2@;wPDDvoV;lzhtd%TSQ%3x@w5WeVNHOv@(@&REMq
zXLt#9U=|`g_{Hn=N1|O^CYx`15<hs7gKflE(CkmY)8%)pJxs`1ys<y{eeQ(?s!1$x
zbs*Rt8H+3bDaEGrbPy(;q`giAw47QbY)me~9W(c1X!1_1q^zyhl6b*b{WM?PoJMT&
zVd%7^m7Q)qh_fyK#K=pJ*)_eD#HIfsHibCmcla2Oe;5E`bbZi5&jddo8jEVFL*QEX
zOQ;zY&LuP2c)|Eg9Ljg$n*lR%+M!x>(wT_%i!32;WDsC-0yJKK&vok}m{NK`2)Q1{
z6n(X!ZRuZZP|y{yKfDXnTUJAX!9wU|7z&D^0=x(g#Fek3QS;jz5H0Zqzob1NSy(_l
zn-ctb&=zwh_GT5asaR*U4}R0VcIAshIP;H{s5LngjXjrf?N$ZTO{iy$$B>>&^FVt0
zn(#Wm2t@8*1*hT?wy}OBTJ)uPzn?mnT}tLDJet+4kK!twr#yAB6c@Xup|)%;C{v2L
z-S__RIo}bRl6!+{(-|&`$*E8U)xgWSlcD2MH7p*J3>$4?LG@Y;8J*+FhZKUUah{;|
zVi-tfUu2$fCvfUK0i};=$EZ;Ob?Ff-zBUv8F)By(8~vCp&;YzNtI>I&ika?nfuf*N
zT-W;ow0Xqvj`a83M7xfM%GELCl@(n7!vbZOsmon3pMNOJ1=Z8vtP_=>n@^qio{I#-
zD~>RGIb!vN)!5gp04MfH0E71-pzk>!F3jG8=RPLm%NuqOSmX+a%FD1x_ZE|76j*l^
z(>0OW#RC7j%A^aY2rG_lrK~XRXRKpUr>%~wRsPMx%95efwLfMk%E_A$#-wpqg;BdE
zp#AnkSif`}Wjo&TTzb|e)DwT^aUi=hxPbf`E13HOAKKw(fMm@#!BBH8G+XC_==N&s
z)*^i#7_=4Z{#}Ss>xl!sgXVK1?ZDNe3bZYB;NWwbo&Q$nlD$2lrkem^+si@!)Hv~`
zQ?wJg8;|b2%bER*Pi(wL8T#RJFbqzl{74iJs?El*9$%Q@yjqsEDGt`9=0J-w8}yf3
z!o+!Vv5o$pw`ebv7em+h^6yNmDM9>PCV=MmLVWP<Fs!Jeu3K*hrdyTHmv$1bsPTbV
zb%(O_Yg4U7-;1paGuI=klRKT}v9W(10Vg&NJp8>dqk1W7Z79WqO)9W(K8Lz*!m$am
zK-TR6=iWy!qM1A}RXUhex)e0&{1`P<1&(fUV0`W*9|;WY6zNR1{G@f(ToG)!M03lS
zJp6S<0Oh{*!c*Tkw9?+f-#r_P{<^)Gdfz=@Qr?X`w*HptI=;*)ZYK9n&}DMXPuwSt
zILcEGvBgGzV%rSlY9I2rcY-yXsSEmlk4&fEZOS$WSc~t_XBr+3BBwvW<EJIur;MAd
zU?6#xikOBAWr>I@p%GdJ?n$nczj!TvIlu@ehnEq9-Ul+hPN8Td^~OhrQ1@sLOqrHR
z|Bc6NUQ#|9&zQ#5{r#XlC=2ut9u$x1brkhamqE?pAc*%F0QNd!IP|&@b#@&E+mWHD
zK94@*rz2qdAIC7Ht%a)&WWe^&v#|Y0F|=#H5x4YvDV}?BFV5H^prU^S%loT|Esx1Z
z{cr09=ToQAS#pAD?xUUW@%9RXdw=4muvn}gSV+C+aTPjlc3|<>PIR6X4q8UTLG5BQ
zll3Wu{--Ly#PJ+HUF?lnzs=CO<Qi87^<pl&70_6F3|+=+LsL;T#@{xDp>w0be&{lE
zPfX)^^AAym&;}*rC$gCbPGg&QGG!MG**uyjx&<iE<RNwLs)Dd3Uma?eHFD*HkL;x$
z@i|QnVu9UUkX#)CvVEhW$K;{tw!AwuE)c`W8^!p>;yiV<jKJN*6yk}!Vei(+&YUlV
z&hl1nm34&&pHu-Zr=KM#0naySp>f|p&>dO^PQ$A}eS8FWpM912JK4ZVpLAl;+H$qi
zgFz(!YN2f-#b%mWwUjMH&G*l_OY2`u*RYt$E^HB8Ix6X`-JJ!VU4zmeH--LtMx)8l
z7u+fLZ^3UNdHHT0W^cDshp42BHGIp2=G9A~Go=qWJ_uuudTyZ8!vX`%q&OvkSR*E@
zpy!^G<YA*Zv%(IZJ}0KH(<9b0P!nP`=aXOj2~*y7f$rNlNa8+KC_XF(r_L<lWm+C|
z^>u<Ug)gY0UB$1nYf<F>+gfu_G#|ac77Hk&GsV9cz^w$-O-G{D?CH#ON(_YEIE0cJ
z^%V`V8a%$27BpVdhvftJW46yMe1Drb$I<Jtyhj?g47ki1?-S34vPasjw!|HMBQC$l
zX}2cjpXOJ<3k_?Ob@#Had$tz(ZwN(KUz$z2F5#XIE~u=btc$M(uYb7{wHCgk48a?r
zTI(om_9icCF;X_*7IB`kxipcue3ci`)cw6sKG~G6;U$9e<8T<@_b1+ZbqpPD$tlaU
zhe-^+i*wZr@nvW#zV%&4tX4-<%Qptgr(03<c)6e$6U?1g`+$>f3X_aEUE#U+3|Hjm
zFvpoj;C4C~@x&$w+j)<-SREFxFDXW~kPuWZ$OHMeQ{odBicoFQBd)65N1}@`2;-T|
zv4rE`-b>MHW+FWoS3{atH&mXB6cXlT!Sf@Ac;WbBI)~*la22>BuRGS&N5I5GS77Xk
z-KaS;9{u<<FfW~is;l!6Y-&Jyeh?^6&mhJYy&sy+=cfG+i)C(_;+NkWdDz4lbYAnE
zwfH*o7N2iSWI4t9hH5DGHoJ^5QJMICZw6j*jluR(ZTPm=3-J@NyRPkkmIE~Fj`Tpu
zr2{N5IvH)>_r$X<(I{$v%3Ad{v6t4>EMW3c)M<Fh{^z!D&Nu}Qn$!*X{Wnh~k99-G
zQD|G{Pi*2#V$-4x(0cs~do(W;WaeptGKupC_d?*hswY~dZ{_U{`mo@F6Fw-<hNjV;
z%xSeLG?aMq_|1{LVN54;);|K;aF$K%Cr8o!f2}>AY{AIU2Jmu!2)aBA#8#;_<_-A3
zPk5$bUBztFbXW;0pE4eHeLPcae#FBbtOG~MMK<0y1kWc|q8GiF2BddK_4u_sEg~6=
zb7rtcoe<{j94!9tW)<a*9hm6ETk%M%WQ4#gs6XsKV!B>|6@R3lcJHg)*{M6t*!PR=
zBg43=+cd7o{>7|<XdnB<gyrn1q+Mh&)opdL<KPu;FYgHp{yv1GODYhfrJ!gJge3nM
zl%$4L$dA0G&xE{a`(wrWEBnAu@`K4Q+CaV0UtI2booOUnp!>azVBejx(^pf)@s3`^
zRdC=RhIhvwO7ao)Kg$eaBOu{p40)Wlf^K&u6RoB_*p-z`@mC>Cb)%W{$0X={x0vUb
z>!NJ=A~e-=g!rn(P}B1$6UB9}2%3BjMb{j7zJE2^zaPn@nGxbGhlyFZWi%)ctPu3i
z-WQto>G46G$FX*x5;mID;DfVqpes9sy9!TZoz6|R%wY!B<>x}4-7zpW_>YyWih`U~
znNU!q5KHwIz?YZH&^&t!+>v;qLD_ulbj_hXVJ=2KkH(UZ#3CP$%-tdYR+(5~X6YU@
z)LaPpsa9AwY#TGKisWinCc>{MT?{`$`<oB*u=mYqG}{~mlRf+6KWWoZD?eKt;%p0=
zdV$0MtAs|QJ}{;#hVsH=xoF>Vv6VCm9LxHF^)3rcTbP39r)|XYte<RX6lIcx6QHj$
z1@ELz*7Dp8K6Ra<dsaG_U5rM_*7HKVUK;i9X7Ip(FsP|ch2*6Y964w=N?x89Z~KEd
zO2h84{w`Cf$Fd(*B^^T#wLNg1&H-IzYhlc66FTFnpi^@IXytrnoqaFjWXd-g`j#=(
zsUf`NaX(P+n#!HuCh+G^H{)NPg&3I74d0)S#e}()U@zIg{pWb{#;x-}RWOp7B@^Fj
zK_9O3Jc_kO>}K8jMnJ?uFZ#}J;!=9{S{>YoDHdk*yj=`BZ4%hA))zOfErdlbm*}}Q
zkL`Nsgs%-^z^=BAX?LYS;3_X@$UP38u1~pLNGSD~zwqy^Q&D#2o>*yCOFS$m=w#aX
zw#Nztc>^;o5MXDZ92WN4jVTXASi7PZ`rmlSG~zCUYSbvP+QBpIr#{DbbiZ0PV>HV0
zoY{+>M^WQ~0jd{$Wb*<k3#ri$bgJ)g`P*P(e{F%JY5}$7$Kh8gu?n1?v-Wjw*dvW1
z4E|6`?*YWTUmnjy^(~fVhmPXO_F{}3lZ-wzUpM|<$y!zvfpXm}KDBiix=o%0`mP3I
zm4P|5+S=isf9<hl;2_rW{T$f8roBRn8HANR<2H&5aCDmlT@JMJl0l{DJ?J5;i>KLD
z=m>CsM6)yVmsTRpdt$5djhOZD5_f(+8`@9Bpvk6UUVn}JyLER2&B<N7WA6x*N{?cR
zv<f=hZ?J;#O@egbK{&av7ORWY@T_GlO13#*-r?@BZ8ac4VsQAxGQ4Iz8XNNHSvt;~
zsrC(pl#l}05T?R%${z+LE=AeXIYRS(OJW#^xO9Rui24kva4O1U{tGrS+jb>%nDs!}
zAk7NR)N=4lW8j~1km0S%D8=Id$<!;aSiv?oCSusdR`z=AIP4*@!GcGztjlK~)>ZX_
z*<Yzwl4b^L-ps>kmm<+DCJ{aN&Lob5F}hg4;I~!#u_<5!e;s#;xD5W-WW5CZ$Zsz#
z?GoD`n9V%wqIuxlQ>atc_kX+)9MqHvBE?o~<GHga2Y?W{bqP93(s1H)x>r9i5YpPx
zvF`~xZ2f5jE#3ADLjyy|y>kVdyQ0DNxh7uUkqB-!+8~KZfq*l~pjaSBE}D#nZ)Op%
zaUF<;?yhLPGl>}U&zR=u0w&v0Cx}eCEXB8raN_3#nt3I{xv$jmeqGMh5;K^5i6hh1
zKZ@`6oy3NwzF_KNLhj@F;Q!Q??&*)qolab1j~^zWPsMR;8>K|;&m!=QbwJHcRX96s
zJBHGHw_$HOlmv|hd1xVJZm9EmaU{)ALYb)S9dFGt#e~ivNb%Z+of;*u^a<T7yVdYq
zsKks%cBn%<(6)gxX4qpC$mjkN`!CPKVda)+-|rcduCrqPPoHz)*ikT7rZB5%ld-$@
zC0ME_$8|PI7@v^^>*L2_`{6ivFo{?M{g0rnu>jJ|CQN<o0eCkz5mhg`iM78Of#_bH
z)sJ`8C`$jq)-6y+{khw?13jy>S5xM>=O#+-qzSU7eyrwKC|V2~f-34X=$;Q|1yBOU
zyMtldw<0w7=R6p6#ehxbT9p00pu%Bf6mg-m04`htovyEZT{|&S<G1qKdJ$%gNM(Pm
zr}?L$0!q%*Ft{dxj}~+tk~34yas`B~sbC>T>)E*2HR$)$2kz;P!km`*^bGh%F!`^9
zTZAWowt+g#uE<4~1^TSxB%QyrlbL+kC+3<(J@gS%VCB#hD7!zDW?zVpb>+CQX9DUR
zc*>Sg%~sxzyia$}LHVy><}yS<{me1q)lN%MZGQ~tHL0*UzX~L`qboL!+lT>A!XP9i
zh`B!*hKg2e?0n$ES{vT+*<OX%=>MKCgDNO7*^5iEhN3r~hIN#!*Q$FdmcE+>Dd&kx
zu2zN8J(hwb@(gE<bfol{&%bIZQ5kO{o<Ezq2d@{4je_N<J1&J-OBZ;<{X{|An^<`1
zhshm1%&L8QDmSg43HAGGnY15W<G0T8fBUB6YxzoSCBD}1wZuz!?JxfHEDWWe4~SFV
zZl^xsJMomNKBzO|EjuwV1up&6!wx6fXV)v4eaKiASRlu9voBMx_bKb?LG!_SDY=%*
zne6sVR6Ftq<P7Y`y7xH_BH1*MYM8U9t8$^CsX@RE#Vn=x6q=|z;r7BS(7uW0cb10C
zj?D($lmzbGV=?G$+lsM4hp@kU5lG%Lkeu-q*DPvcUQOOOnfyZ9Y3I1#qAjQs9SdRZ
zoHZ9Fu}<YIFb>Tm-nA)r3Lg*I(}$tf<zGy{bdp%T|9-IFSIm@)5fmOLU|>=lep+=A
z;$^j9_p=aEhV{a_<Kz#c`GKTpJCAo819~s=p|$A}an6dsc1sdScXo-{l<}w}W{QpO
zNhmmbTr9cgFIb5Vu%$yY>3Wf3=i<@OScSOB@;Lr^&KV`O`hwGpZ|phkw$|+l!NrRN
z*l?o={r{1%@ewtcJK-XFetE%2^Uh71JowOUDd4~NF;Dqg$5s(bLiKdMuyXVY^!PFw
zv@Dy&Ar+lmGvo}2*5~ur$LU$W`VfQ}so1ht-SBGv(}*)a^41VJ*GV5mpZjPF$+q=u
z-bEe!S2&CDH&#LUt)-Cl_7IxiTSI)tcp*x^26U$cLXmwitTG&gx6YT6gJ=NPpC^!8
z>a@6Q^AV`|y#sU?&8N?Z!J@@iFru;mwRG<ba|bTK_KYK-K7Sq9PnylF{@u+}`gZX%
z@ux5={0ua7#S2aq?^#YhGAF0m+$AU#6g?>yovnqmenoeSBTUQoqR?uV3$BL2c!s+D
zd*5%z3)MNO<$Rm(nPZ4{Wx0Ir=n%AeyB9r%tOnH>1vgw;!bkWVgRT#5FnjbFG<jkR
zQ_O;CPH6<HlxdWYFt|{EDuOl7A@;!3S3F+(CM!(HM)z*97&LVrntDy=k>Pt`gKa9N
zJRb%x3-0sCzauc@S|QUZ|HdAq?ZJ*U?jS1@#3gTHxF3`8rlmjG(Zl&TWkeCEdKOgV
zWduO6Suv`l8^mgtqQPmaK3L6&<3)@d2KT7LyJil!T{sSr3uV};rUA7xi^2cLd#;>x
zl{a2LO1sc{YsFhbPzi&?qBoINM~^5l)HxKdS5)BQuyYVwY=WZ6Rpn-h+BiCl_5z}F
zz+JWQ$u|XljZMSAFNG+od1&R9*TD4yuL-wq^uvZN;aIXG2c)k%g%<e=alJSSl3ylc
z$`g7I|9cszPOhU&Kn^!u7s0n{WPoUHHtHRT$JUZKl)V~n?Qgr0Tu0OkT}<vNAXdoS
zbWmFx$@PDla><Bk%=xF1n+ExU%dR^Ny~Cg-{v7%gR1&{`CFD$~0LL#4%w%j3af*7d
zuoLUSF~Jb<$U#up6PvdCNz}LRDcFxX2GTfNaYP(t)J9g~(mz#LH{>>J^`8yuOA0~x
ztehD-r83QlH~H%8GMZm$z_?`=`0Q~4&OcC!Va$OtaQ0|(@es%_*ubsuc$jT`0gVGf
z&?|WfI(rQRMYjR0-R2MGv1%u*uHS_zTTXL(?I?02CqltG@(Q=@f!3NTo^mV(3Tn>6
z1B*;Bef5h)F7F4@S);|Bq6sWvPa+zU`&a*iEqDym0sDthtl;Tv;;0rg<G4lSj2p?W
zcZ7rE><sXK?8;mId?apD^ClmS2R!e69LG!;hgXtz<BT5ewEIX$^?*<an3DovmJVP%
z7?v$7f-e^N#8nprwd+R2t+~bhw;!gQgA|+WsQYqQ2^JZZcuN$6P9{sCxgv;rC*BtR
zSaBFzG{-_yd@N{{NoZ!;&O%*Jp?kwIn6+PpN_iCQtcXAhro`b(Eipy;7jLPY139Y;
zP&h$nw~qlB-?AAb@<Q<`ay4n!$atNFHTl+Oal7E5s4=h_?R--BnTQHFI6EIUyf}jL
zvlj(5r{8@0;e(h}OM46LZWVP=rc7n^!n!7_8@Au95W9ELE>50b-q7<Xv`m;M1kSt2
zWfmVQ0<1%!e!&=6eCrrA4*t&V-{|s=0f}Jr^Ejv`Dl0b7dEWHyd2yZYDb~5ZjV1iZ
zLnGS=5ZNRPGOzxueQbj`>#--aul*#**RCa(96(9aNcgfm7_)RVVShBueO}z+ospwJ
zZ|Y8TTrnEkA8dm0CoW;Z|8aD#0WofG8y^WtNRm`C2q7d%&AKI(BuSEl5Ry(vLJ>yi
zk{X>)=%l9ebe@@YmnlgIA%qY+v}qGNgxK%;zu)^X%{<Rq_u;yJ*TJjO&fU{#^t3$4
zA3c%vbp~Lzz*jo;1IsS!E;TWeyKf@S(4mHkwxBu16YoB&LDLfw2)`=@+4V`pw1(-G
zGh2zW?OADcx)QqWVSW9bLWu2ehD?(NsNS;y`kuceww!Eod}ap9tIR>IC0B{6!8xX(
zS_n$f>7@PUZ5~8cQ2AhleOD83ZM_72_S?baeJr~*=@N85F(+EKj61f6!tU+_Feng1
z^2P%gL(H(Hq5=4Own-D2#(103LY(+|En0?d#)2zr(Q*4l()oS_3@=Q_PR)EUc7H;p
zG#T3ND?(*eF}OS!4URt!LG<<#<ladlVvPuL)&Vhe%2C+YWr}XuO~l!L9nM`Hhi!_B
zal=T~4?nTS_BoS?PSrBFr<IDLDTesSgu>i(8M@5;Od}OFK>dR;9GaAi^2y;~f3{Cb
znby(vY6`eL8xHz5nxxuVN?X5NA)`%?ptB;odxiZ}G`18Tie1ogUOtEv{^Pk?d?M00
z`QSPyhnM!K9+gMGlm1{lo6|p=u=2YMAZaJ`Sp)*t!jxo^9Z>qud6v7AO((Bkf*-A8
znJxyX@fXJX>k{Gl&1$G>cY_>os{<j^JLGw_l5+NWFPY9kmDos>|IrBPLvrZBwa3sq
zER+1NV2Rnkc~X9vpTw-jh!?{=8r9*7cr+xKF*@(iWoIwp(Bi3ZJt_~m%nw*5vBZ9z
zb;RInDfAo1@+^})FnM??<5o98>bv7GIB)_Bl?<34I0#xVtfxcY_d(S`F?LG|rK+3<
zG+=oKweHwYx<lqkU3Z<58jMSa9VQ9T@#G%4^(F*c#mi{>t$3-zf=KlFRRoGd!61es
z;4vx#M3FiWDVhih4I5}UuLL?(G=Sr{O3a-e2ZJ;OiVx>t<jYFDFFucpM7ro_%6=1-
zbZWCT8v^bjW9+2yEGwI+ZPzdey;KhTG*gqjpAE2k<QWj$IM4gwQI1pf=i_XTQP}@o
zgM6G?4EE~<l(S<iR9=XKR3cAayRuxf(rEms!hBcHVyUHE3T@^_qw<}4sopc@Ki<8J
zw(S|9r+n40KgN=VzifiO^G4Vl?*>1gk3+Nma$e}b5$G~yjH6h8O#JE&EtTiMZQD)S
zF*cI^($-*e+d}$OGX{iV$LLViS2|Ubc>-ql@_O%V1kO7LsT-HgPam!^`C@5}GWq{Z
zWCuNYLwDTK-r^Tgx7|-$oxW4c6UW({E(*K*ZV+SsQrIw(Vo*5qG=2!e3s3aXr!yGG
zYYI?7R7l-N=RttI0qR|OO|x#Ufy|c+(eqq2x+*Z=k==06?cPLCQ9?ACrbtds0fl!&
zL@0^_o6-p2e*c#!Td{s6>=aQk$VKa=huHp4hFjgkASR;*oPW5Hk9MgLQ`3yX?eWB~
zCyMbPhDe(+(`3Gw%nQsYMgQ1pk}x+2d-kQ_igQ({^(-2@KkLA^iU<%UKco-#d7<6n
zacF-h19D%jN4H!1z&3jv81Fbk{a;*$$c=pP?GMLf-vn52c_B_~wMD-7X_MP0YFTD&
zA^12Q1H+SNFlh2qnpNiy7boR__FyJssmL%ux)@GnSHgmut1!rf<=soKL2^h9*g7$7
zu?Yv=wok?CiILc&-c0#^Ml_*f2DXQZKo)z4muXlHT#ZyVOMfcaZWo08>V??1oXrDf
zjVHF1W5BK|32S~{!2_>YE%K|7>Q=;(PkTJ^!-Q*SA36daEjkOWN?x%0i7)C+KL~ms
z7edB1H?)rVhkm)s@;mpn6TVUca00jR(B%khi?74g>^ruKjDgz<i5Pm)2wj<$^wj3*
zhzF~&Ewl=<y5fm*%|S>{j{~mKUS7~Z1IA6V!sao<AXbw-^OlE5WPUTtpxX=XA#7*$
z{47uK`9GdwODNi2)PPQ9F>qREWB6|q<m$|o3epOB#_?B(uDTzwy;6w=5&6)p&2nGQ
z3YcE3mWpni0*}%?EDLT7w2#e}Ds+W`0uYjwT1E{QRbrrb5%w*qz_+zg(Bo!=zs@Yj
zWTQB=Vg4|kifr2V;S<%;>!7;V*AV}f47mLBILptI5n~m9b{ELOfZ<B0s9J*uR*1pu
z@dO4gmq1WM9u|cy!fKyNJn`Zb%R`6<&L0Y`=Z4YYB^!}@)}JQtV;pt1$F8uo!j`<X
zIB+GK%Co*!ZT?l@dL~NEg0x|N#xDG4E}-$zMT8$Qi}z?D1-*@rsdMWd5KX007fW53
z_kwwa@B)mAX8t3k<#1#`3umjb>|j>UwnfMwbhsGxHAU1+mTMwg!}tnjN2%HFSZV8Z
z3*;;+lm>2NoX^|KXkuk3_#86<fm|l#cwLq#e=`Bc>vOU5sx83U9+GD%0(Bb>WW`%Z
zn}dBJmF2*52giWOb_+3f%LL_Tnq<+n*_atq2D;~L*j+=QgYEG;<(i<VsQ{erF%IK!
zO^~lH#~scHU)1xUciTSZ`%&iow5Y?FT_VzcC<3-Gb45!#YY^$hK&r_!Sjc!<AGWIF
zmvoi`mKFwgMqdMk=s>XA+YByC$Fm)NJmJq;F10qw00mJswO==tv@r(v<O$8F=$!~=
z%ii$7iS-_DHiE8RH3Y@8GbKd<N=EMokBq4hw0AmaDQDw2*-lKInTuI<9=vVC@^DH?
zCDsL{Va7&RY}mjwV<T?To9`uT&+G(UE4PF4M;Wg=@fI~VWc!TQKc%uk7s44}VANtO
zxN)@@!wyVB)M<c;Mki3o(HpqR9B8(#he_;t*OCUJPl+9r?p%w)6)Ql9_h{ZyYZ$pD
z5|k!y2Ze0LA6H&S{0qO)_j#ky?1mT5<G^TQYoGzAW+sBZ{6P{@r-KfE)q>DsI^?DB
zNnq(@)atGvzHf7BWkwN3J5|FHo7HGFpN(DX7mxw9_f&MG0-WQD5q{pJ{AUW%t@^q6
z$bAvE?VW(PxUN`YIu%3#B~%}Zn13M}Ql~G3aYm8oF<p#hvSt(?=w<qdJ-m|s&1i2%
zX;+Cps3!)}KgPhA$|}4>O<R^Bdy57%Xp<JbJh=E<h?b+mF;qSRP8kY;^J5sXE*ps$
z9RO~%pQ+p@Lws?q5=#b3QQ0h>yg4AlN~V)cou&Y;UB7wXj^v=9Sb^<gTtM*fG7Nsq
zrtOAR#KSv{D4cYqp4+6DKCA%D7aYOyPSIGo>>|^Gc!QcZ#Q`fz=>4e<OT!LhfaMC>
zx5pGZK5v1WjbW(yQHBPJQqZyTgugN(n6|DF1ojO)<-P$jdL857WyFB}r#;}Jc@lyi
zUBSU9ZPcCgjTnC&$Nalel*w^H+0~o&XIAkpwN&Bj#J$)g5nyO&1-U-B7HyX}0k>|D
zc<i%9xq(cUNw^%(uWvxJ@xIc;DiP^^VnvKZwP@K?2rapnp~<2Fo3kz>_wjn7vE&TX
znl#eZ+vzM<YCg|#jXtX@1WXH<!1&2ops(FRZ?|dSnDJ7~J}tq&3J);0(k|ScT><SU
zcM#|1Bzg-CQBk)Aw+hryZ&N6E1Rsa8DMu0Y-_rQBSm3_eK_YKxfXBiIM4`8k-d>W4
zY2A6ud*vyyWtyF;N_MY$QgrRiWS)C-D3~ijohDW0^VA~BZ~u|X3cax89Uoko9&7!t
z05otqi)KyVNxH^q<P2Guyr{i^6Q(wzvP(Fac@+}ec$rjRctmzI?MH6zcWG4Ta%_0L
z0n6G_(eH8t2zh6y*kB2qoob3Nk0zo1u43LM=3ko{dla1qUePr!aX9d_fDF_qKupLP
zmalyoZS5T3mAp3YSRV|#Gg)tY_zg{-lMUjGzlqzvyOi(yU1BXilX3R$lP~+$;*7)B
zpgDLM6v$kd=kNn>OtJ{o-(I55RyD+TcRKtp!@Z@A;++IfY#r?Yf?a1Zc@3*k*<Q74
zPcFN+0x0L>WnwM%Woez6VE-$U7js`ovL-IYZ!RP7EjJ#6N6bOpLN>n(59AHqt3)?4
z8H6)^iSs8uT6@=1p~RhZ`IkaR!Y1sC4<=&aCU7gcL`745U`L%0ChVVvz7_FwFy9{M
zJ8eOM&PuX6<tX-Tv4?r@BEfF`e5{k7ipF#jJ;3^eZ&l5xPR0@u)An@lvX<6o%tK)z
z<0|Yekv5;Ufx(D}R6m~e6w)%_s3ZbR+yg^HF*x#m26lJ+ORH`4!0dIKN&A)6#JEO|
zaW`6|EJ*`hyk}w51V>C=s0msr>mVa^723+p0r$oAc>cT-x=y$Z+^dDsrqFP7xt0KV
zMO#2{#T#|Luvvy_7J*~2P#Jjz9<5_%U&lni%BwK9^bBTY2hb^D%oBEfEcBX<gbU|3
zVX9D-%56(V!`%+x=B5GzN;9Ba;Uce!Z-$OGe^bGoA;SNC$V6Sdnsg;s&<}raMddk4
z05|5KtZg<8nxqMegJS5OzMMEOVl27J5OU?QAHLY2gf1s3%j+(Kp--&N<St;WhVi(l
zm?_HdR)AeG%k18Dhi5W133(OAQH$;279N_7BZZ0R9;b;Va{@tr-Ek1r|Dhb`jdj9H
zu0-Bz9IlvIha5gn@<RS9sO#H6<S&1^aS9g)Z`l!!dJi>iS%QN0Kv?%M7r6_O*zCRp
zA6FE>>q#PZ9>tSxn~gkqXDQ57jAa_N7&QCWhCXEaf7gXap{xL*H8~mhCuW&sjI%*y
zjfu2fZZ@>;a-u@TpTxE|1n_17Sl>2;%ty6E%otUtqms~X8XpXMXTq&-%1B$vP(8L3
z+QvlDttRXY*{3dTE|5~u-@|cW^C7&s^eX(v^k9)<Et09^g$hj(kh3xaT>grH+7D*f
z?!z+9{^h}cdzzRhB!srS=3zTOg2xXNG0%Du5gk@#v&t4|W#6ldvWyCA)S+EJjFvsV
z3^_$1=$wBVI@R_dKXk51Kvp`95V2fq6IQeJ9hSC^V0!rk3w91`!SfXrxc6Q)>V9`;
zY!$|nuH8ZTGtOg&!39vS)rKB5F6hr@T$wngvD#8jQ(umS8Nw8_-IvX>MNdOnH}hL0
z&*!C;F^<>F`;=ReBn>cCqd&*BK+(tyIQ_N?du9osj+cu45pguN$qfYRPo(y#%mef~
zh39-UhI;ZgVOifO^p!=B*p&;}uGSg?AJn1pn-ZFp6pyVsH>o;V4+;}*5`A(>s{Y;t
z_}5Q>J>zCRbCs}+#y}97CR4uakjeW8Oha7~3BvZ3n3W{}r;a4Z>-t1p9;q^|?p5Bk
zAD0+6D-3o1Sq54!7NKtJWtc1(hhJGg8ZgMTzzXxAU~(h+h<$N(@^DoA&7O&0XOXvw
zvG|$ZKzl3&&gBcz8vaUjzPAlJE-|h8VV0NuZUGK3ujj4%?Dq;?P7htT!Op!Y(AuU$
zTu~8}MfE6?InpfMdfuM1EQ@s#)9@~~2C>E#!gWxU#<TlMHphS$<TU~~br!^87TbA$
z5uu1KgRa|8Nr#p)XlSlR!SBDx*L5~%99Kt^$L_|U8Wq_2p9p;~^%HK^2kCb<&zrq>
z3hIVW1{t-b1J6Q;0qg%V8Ys$!DPviR5<2F_V58$6cF$adfgN)}nC1c9_EuE4Q;B|k
zlZ1y3NZ32g<~M9NA-0-CTo>hFXnQhtSzSP##X8_sc?E3!jnI655-yi-Mt(uH#Q3Tb
zR{wVxI?L+8W1bT+YD|EIX1npD0^=wdEy1?@aK=F!kD}*E(w?j&vLkUD-hUGcUL%{4
zYhHlW?~|ykc`Z>b*8=fl7qaouC4?o3r0udixU|R8&QdQlN@xN7O+BRl+DD%Ls<Ajw
z>j3WBQK--SIyX+t!Jnd3=$<@8)c<Zkj_F1+vGP1xmu?5Ww-0DfP$~%5OduaMf-$)G
z0x%IG?G7AAd+AJYnY#wQ>~ciQm&`Xhe=l!|MJ1|tS`x0lm9)U(Dqhi8fZQ2ZiPmue
zbeC)=B_nfiP&ORDyyipu=pQt6$b`itjHjGiwI;Qa1~B8SMe8GqWZ&F$T%mFnXKl&C
z>~{fZRKgh8nKR+>uQ~W9Gmz;Ng=kx*gQ`;{bc@GM<i?(YFRQAc)*uD9PiK3~#%L6N
z+RePYRdjaMOjNs72(B*H;M+BpI0s7c&?<GjTpx=IXR&j2%LV$xfO*r6Pl4z~Gfd6A
zfd9L@UxbCCPIfC%>d%Bb#c3crri0yK^QH3w<WYPh6RQ~u#n?%U=Gv5^ao2BRrF;qA
zH!!}%lZDU~6Aep^=V9K1Vv_fKGuZl4DsEQ><%#BWj0@8$kSTa4m=7uyC78*5L^~e#
zQ(Ft91LP@{mp8z?k2$FHR7~{cmw-J-4EGBeM~NRuVvf9^ZX->hkNI>unpn*;Fr8Q?
ztpWS?BC=n@63r^7z{i)7P<SB$dmc*IERE?5BQz=Aa|53!7q~qm8#Byvai?!8>YC?(
zZ{#bfX>$zn;~S(4la$bRxFv{p#>4+~3R$xZc{;~zV4koZH6P8#jyYAxZ<r3cUF>Xw
z0&-a9irMZw%zFKe$mRx{Y<RK_2j(#rN5N=N5?rJ~QBBmfEsEl*eE8CM7_pu4L}QDo
z>v3(K)-^RS3)oE@4yAx=%xPZdCvS*98xG16PkMXYQRL-?!}8}WPtqd}CvMk7$vMVe
z+;@%E7A{5pqfQe;r6f4@=L)zElYw!78Td_V0qx~=u%FG2{$uBL%+mR=ab764>MjPF
z!p{E<!$4Rrq(OeiL00-lqI5zAM_(tv!1U>;&U$Qn(N~_{t!a?BvlQf?2h)YM+1R$w
z0otb?!s`5sbexY6Rr5-rs3C&sARZFEe_BY!9UaD#dMZt1_pZA4daCaJjylvQq5PkH
zSS?q={6}^q?{EYRdiK*n_a7usVI1nClo}d^k=7gi)cSKIEm?mCWBU_ee->kBEI1A$
zhFjsqEi8ZWurZX#vAO1-HzaRG8+9G1hM{Up;2y~2adtVI^fv`lu|R`f+%Ouq&1T*z
zzbU{UO-wkM#S+=82}C?M4(2l*P2V;iE)TRueK{XkzSRr)8GI94og(Tuc|Ev=<}r@%
z4dStPB<a@gC#gZBsdDNw#+}-Z$*k{R6FLV&zib1Le56BXw}2l%g>mRb)OX2lF!&||
z{)HDj&RbyovT?A<p#ksjje^Af5Ky1x3EF9lL-}(mcAx_Ut;(VHQ~`qI!pMCcLo`-s
zA#y7#KshOn_ogHZh3p*-bC|%if<?SYJH~fNVk{B1J9({M0wWh(02d{1aB8at!T2(%
z-n#uLw<ZxHCk5e<Q!@A@ULxip3D~>Li(b#%h(jA|$h!6k$UV22_J7y~wJ~8(ooR$E
z300_GY(fG;W2n!f{qWo%3i)?fFFE4_T~RH@!NELYHhu;#^jHNbyb+<|lPjR{G7?*R
z?=WtS3$|6Up1D~bGK*i5%+>2b->8?^N4zJQV-;Y8T|Bx9uj0Z9MYu|FK58<}hQ5m$
z6ct9{p|a(S0jYtf2jbAzdMwKpafY_59FX^$4)x1fX0rDfrbDR%y_FHrxzU?wWy}Wd
z!+%N4A4A|*eKL`HHo>?BsUVBk2&<Y-;^5|m@a1M2LU|IjKD<VBH#t*#hnb+Tu$tXN
zhoEO`8sXk8qkT*x2aV^DGtNbN?(qzCoazKeh2_xtPY`fpFYrX2mDELgm2d}kfidoe
zA+tr`nz){~$t?vR->pFBcb^FNT`*(13Sj8{8Q>h6W1{uvK1ugUgy<C^5aiDCd5S7|
zu9n+*y{>-~-3GQyxV;gAtwJ$hau!nSIWW6?6Y9542E9Eiz?#(+G4b2rr_WX-E>76_
zN{<>#n=t2h3N%|T1Bd8*>|Sw@XQ<mv?bVpKJ^l)=?k&cIxtiEvs0x3*)xnIR6L|E1
z3hD<(NgI?CU}$hBI`5tZqMy;k^AQ(I`*$NhEfs@)MS-!(JK`Epj+-~cqU_uVw6vZG
z3)>cBs$2s41i6Cp3kl^<xyQTHbQQMDUWSLa$WZO(MW$(}B0(uNz$r*FnRhu6EtLuM
zXE6`w1ufe8{xm&3Qy=w;GKqbT3W4bliPlFRh?ZwaBdYC?f9?p}j&4Ez;AHCiqMKBX
z35L&GHe=o`1&FnjLN%?UeO48q|L7VjJUt7p4|hTIwsq*=>Vc*iOvA-=MY?>ZVe8xt
z^Dc{^&9NLftZF;D;UYYLAC3+Fr|{y~H1yvc3VP2q!7#Oj6m8gz(TqV9^C|=#y2aT1
z`~ZBg^JKMa2E5hGg37lA$f<kAvpgb(0qvWlMEf$~e9t1GeYLnXRRW<-PigllMYK5|
zg$vo-z~|H)aI;^@=BtUMwYi-rmo)QcI5V!ttL5PT{3rzG9A#XUdQ=D;AZ8;6iI$-v
zUOLKVa{N_Pr{M^2?uSZT)$$?C#0fc*j}lA9a_Dt^L({L9gIR~rgsb92Cso8C=hG{R
z-p>ls={KE-9|)<?Er!~z9*K5Qr5L(3h%rwV5!bm}iS$k#JiS@ZH1g2^0X*c7nk^al
zdJH~wF#liY1F|nP8t=!*AktwZJu@pE<+*I0RObec>^Cam%fpG84QMnw7X*eEutA&$
z-;*}s#i`S=bJj8VxH%tW96j(kE(C6`BCq@TLz;JdHnjOH!tNqv@b6xL>WMipG-?}I
zIvIkJ;UOsL+zZy7e6UJ#Md4OItehi<;x`r$6)+#Sjjh2%&p67hwIga5#n2_o#cpA<
zwC(gt#<&?yx#c^gf*Cg`HZZ-$&*dO;R3zaK1T4QS10weXqr-_v__U-J?RHaGe`*^J
zG2a8fBU&nA^?d9$mOq|11LS&h*<CjslkLs$kXbb9z3ZU&FBIdZ?L5?&UIQ`9=Yk=R
zJ>O?1gXY<447yN8Ke|`p;NP=AxR3dt4SpEQEsFvE<!d0=Zw|K4%ISAYRWv)?W%9x=
z1NstPkPF);Ft6`PmPK)hX3e^ShTBHKgBw?{Kl?wPV8bHf%QTglMK6g1;{~0X9fCct
z#o+w9llJ>IQB&s;INA3Qn~CH@-;oT`Dc?YQE>+S=;l6n6c{0<+2uWT`81?x^kYipW
zJ#|zD;+gy5LZX22zAu<uyx0PRXU>At!vt_Vu@1VOfAI_tG!eDUk+?#y0@HMpP&VbF
zN#81G@bz|O48iee?XN|5pR+*A<0D|hhIE_-6EWsp4w~&c0z2Qb-I-h=TzN7F4XeG`
zu4yy4-gJe$bM+_^Il`dG3$DZ#V}MZ=-8#PzYM;uW+Aa`$G~QF$@OM1^^QoX}eTjG(
z7r=Xe0b`jBBf8Fp;4*szs2^TJ_}zBU{i6bO<yS)W+i<EL@SV;xVVO0pXGrS@Ymm>|
z4CM{YxS~#i?Qh=@joHyqu-ObdPNdT2&CKK7&oZ|3m*Wh>P#m?m0yD>qgDgE;sTn^*
zA}D`A{hN1!a&8OhKYpLbIpYPQOWDNv-W3S4-v+K@Gr{^VKG<|ImY(-|FpM<Cx=c1x
z@a*NSyS){i_Kjk@)gBUEA^?}-jR0@wgRsdFT1RU@;;AN-x!jcm{O~5WA6Y*1Aw@_x
zL|7;oiJYM6G}K@u-ZMLfqJTc)^N{(2xBe?p=Fg|?m4B1=<(X)FC6%Q9EklQWd1$YJ
zJQ0U6hdYXw)y`qwDW?h=ni`5L6OGZ_^9Tyg8IzE|bg*{SM8>XqEtOTafZ3+|)JEC>
zy`kk0qs;W7PGP_m%mv+j$H_UTE4aZ;#y;O6Yz?;qg{}`&ZPOUk8ZASs^JgKZXB*rR
zCu7UWP%sY>Vb9ML;=eQ*ysn9{ccVK5jjBOwp#f1-%3ylZtwjG~1(b;7F>0bec4;u}
z*N*4X;pUssDl8HDzAJ%CLKd;KHV56fD2V*0n)w)>5&f>kjA7IQmEE&(&-#hzr(Orj
z>r`RTOdpKfI~bGGM5?4e1FR2LgMHp|;ICF?yQ}>$c>XB$l?<oqPG5-Z`wr=&rW7#K
zawSGj(m~hZIq`_Ip+;84$Q7R<zU<vKFF43{JmWx#?Wv-t$zk_Q8CABOO#9-`;=s&p
zpdaysSNioTdSvlw_|bIKV498vW0^PX6&Jd*kMJJNOo!qAS;(EXgXepA5_y%p2qSN)
z0N0Y`(8yA-)kqhD-BzRZhC+Dpr~o}?mVuu9b#ldJ5zbw)8gqEnIO4B748P=tiQ_L*
z{US5k*G1veoh>YvMhkqjLxHa~N~+~J0z~i2A*gm59+-a=4~Xi(*1Hi~hn~@`v)PWa
zGzRiUG=M_<eqx_{nWwzd6Wk1yQTF(($(h-OXdM|21}eMI(btddueT7s`zfB8Wj$v7
zy@#|Hw~*FC708cZGrP<jl<lqpCqxw5n-amCV_=*Z18fe<bErOzW_vnm<Z(A@7JE*j
zx0(6QS>5T{L5a+TX-WSmL&AzkG>aBVeJ-uX%W{d>F)EJwMOVRqKL;{Tu{^MYJ5VX5
zi(ZQeWH~>Jh!}0?tKyBgq--TB-wr2vn~wrtkrI9TPU>7_&3e89+Ed9gfxVcY_uYTI
zp7%K@zgQLZqM07}@j3W8rVv#0`Ka@FA=4;NU|c=M)m)?kCwq6Gh{yC<tLL-c>^a*J
z6&j0{2SA@s5_WDag5YMxKl7Z&8?xC2zIh9&hhYopIWiY~Wcxs*f02&wOJRAzU#WW9
zHOPz2gZ4wWc--ki(sL>bq8zJGaPSMUrjAtHmq1;*lBu|#`Nvppa?HF~+V;j1<{z}j
zr<VC(?d=R(zZv2Q5M%%4L84a?O7vFTp<G`TGD3AZR<GL$M{mTU{_XD+9zCEzvz*W_
zwHAs^mt&T36FuOY&-e(b=<Bi#8jI7=aeg+@pVdkD_p{JtFFPxqI6(D8b<l7PL*LC2
zq;G@_9L!_!UIwE0t0TByDgkR3A<gS;B+9?G!CR9u2yhLi{Cyg{nDj*YC(8}nY?7%P
zV@n#ljH0Wxui<gXh8Oq3K(EOL<pVkyZ)-VP|28J+6SrgQ=dGYVv<pu}yJC#{RB+L^
zg~9AuXuG+eR*z1^<fY}P;c^a@cEqBWd@&xFJPchQE~39Sp2XCX(coh?9(sp;q9q$x
z&Wgz@@HZR{ZZRr25Fdh$lZx=$vlwt5=K~)tna0H;i3GZY;oY&zu;*M9Iy_=IxQ?Yb
z=Xe}G{#A^eZ+>9@_B;l>{D;a;dy@_}XYP*+C;LhZQP*V(?LFE@s_$I}W6yDPy|*d$
zaVw~vZ9H=<6O<`FA@-F*GUUCLF+(;2x2*v^rW^uemU9=*%KM?tC7^G*2xHc1Ly^}`
zgkQGslg+HV_f=4Z^)ZCoewaGXO{Kiald-6TvGI3_@LjbL>IRM_>MGtebWAecFm?|5
zkI6(gh0FNDIv2!?wTZfRIhth-rwYq=LrZTYSUy<`t|t;uA-9t#yIaAIY^L`gbqS1>
z+KDdv?Em^Q?bXC!#*5OBYD71}+8VYi`Hw>OzQd?@DV`Yq@xb@hESLJ9{Sb3Z59}r5
zh<(p@>C&=gm^C?w&ORoG{NlU3M*(6OT^fOj=3ek9yap}n<yl_J4I&77P9l<5F@G8J
zujLj<WO@G*(~de&?Qn;_m`<vZxf=7VS$@;w2Dq6K4c+ZoaJyg=b{zjrM{P|<Ta%|m
zeZmK#AH(*=#auk6c@#O3e<ZG2ZoHW9DMUeL$$S%@(r<%J_>Il>&6>xtjK3J{T>hKL
z+DA!4f9KP#1D+sGo(FFJKj`}Fi_urfjLdtIhy7=jpm+_-X8Yugt_Og2bLW%JHe(R@
zPoiePbX&)^p>Ui%jZ0+rqhCMKesdNB4;$dfXSr;*>`XJ69>TTXReE(-J$5neL27I^
zwI<OhE3^PzDk9v}4Bqq-F{=LQqr$x@v=xs6XdZyh;mtU5v>4Qn7?F;b(PX%WI@2Pl
zf?HrRW=6<J<*h_?O<|ek=Y^<X`hy51QW9|0mgvlx!R#p<;M>PaJSN-Gw6XKi*X;_=
zk$auC#mmD1)(gu<IZ%6RrUyyAPBm=V4DQo>^2o6X#fH8R+Re^z^-}DfY6lRv3qQV^
zgsC=um^nBHwSN}ljGxTou<ejZYQRf^e<py)<%ZPwlpRgjd=g!&y=cra8M;*DL08Xv
z>bp-BVsi2!=Cd2in`4@E#!4@9h=fH}zF7WaF{*yMN8&u@qWeUatG?Kq8jpKOYlmmx
z8Fx#>bwwaty#gYC9t4MHmq9RFmvl@1k=_hT2XW(U!e10i+jq<$!sQL*)}3Nhx0wvl
z$3|g|RUz{4O*PSv-$yjGvY>nAExLpK&jF7NAfVb1^gk|#t^;RKW}_+XjJXE){Zb)d
zr5a`}^^}U;)nVecXjD@Rg|UkJu%~ew*sK&_YSsqW`Y{dckN+*LxADc==4@y-<&zmZ
zcOvJym$bPn98T$_;?9q1DEz6){JT7y=Ntm#v<sl;$xGU|-36uPh1hn;1G?tEpu)qJ
zFiY70`#eJ-wWbPYooGQF#|iNLhY;JYKBW9AHOUGEcXT^+m+%|%C3!ll;m-U-kgjGN
zhc7HIHHQ%G*Q-(2;wUNL#N%atJ&IeR05&i_%w#pnG2<Gcwi|4n5Ccw;C2&xi`P!z)
zn;8D&qMSQ22Hj$M`^-x0$n*lYDPvIeZ60xD{CA!0GAc`-1IfzKu=VR%_@u<Vm;UEz
z(+E4v>RCm(Q5`g6q&K#238&)qIau1Y5f$p4(d$3PM&`{TJktwcZAU12NznL@1@vx_
zCvIB^wl$vy&3x8h<b+A{dZIy9gXK~lnFB4`F2bqfMp$;D8h?#eK~Yf&P3^f&Jv#P)
z@2QjM5qW~zs))eF`~!_?9u5A=!{NY$IOIO^<|)ZhaK%!df;!72>Ulvq9qS|u)DL2A
zN-ElXkYn?-YIJQq0}9S#;qoX;RPd^&?P*_Ve^CLEU3x=0H!v26>K4K|5oZ#-Is`-i
z+Xz{5^0@vM%R&;SfX<%tsI*KTwis06rn`;kdvr1Qhq&RFunKTpJ3t0?nHOTtSjf6t
z!LwS{0G)dhK*JD_ll8hbWvT_{r97q`>&HNA@G<CDJ5N*B2N1ob)4(lNA4<0}U4^SP
zCLi=e-Cy-gqpE^=?&BfPx`=d#f0Obr-C{bR4dg}MHFWu91#lsOh*pU(t1}M1JsyMH
z&EC@HF=5aXy&k(q#FDmy>7Wsj1&+b~(7LLQF?Ue1=F1K=`&%p-RBfTI6|ubc*Cgm0
zn2zla#gJ8(MTBEll2*n)b9}xDN)I1H?mU)hBwq_{CnkZb?GSI|l4zJu&Y<X`0pZx^
z8>xCG<I2%x=%Mh7EHG_E(?#VVVE%REvD%;+at@su>hM7HdG;QJ;0DGla{MtG>`%3m
zj(d>^<L$seiRB#ZWA{x(2#VX?@YHQKmx5YJ=gO@xds-@RbM{NS_J*Kp>0=_CvziDt
zC({>Gm>zf1d(ymU0a!2Rv1}kK=&hbfv$7`fGF>m?DZwQOs1m`gi7ME=`Y91lZ2}c-
zYxHd%PkURx5|M@*<*ZyLvAfOuZx2FX_hSlK+B11oD&_b<H38orlEciQBpP}vjq2av
zGvEJKmix~9{H8l_T;h0Cm8}Q${q~f9C;%#p4x#{KrTMM`wCt)vC;4i)TYn5OsR=qC
zYm&~E5Xg?bhNoBtjjn7m)2E$5m*Usdy7fK1GG-ZW7m4w<`8CX2Z3brJxuEUOI4%|f
z+HkxCLiOHKRpokeyCVvRN;k7UG7WS$*-?d%k)Y+d0nD$3z*7A<R3Ey}m>PrhT>|TC
zpC?Ow`5olI8uq={pTePu7vOm!^R14|13x+$`B6%A%7F{0CJP7I;C^VWyi1~djWCAQ
z6oQqEr|((FGz>QM&78Ry;OYniF3QO9&oVhJ6`|R)8)VekMoj;n2C09J(M*NKIPyL7
zel4bGJ#__DmcJ|Ae`Xc)V>N?x{5d2sZ1!xm7kzajc+J;6p!(G)&|gQ`oH?1d?Ys)Q
zyf0w71amNZ#MpNR6;hOAc@B-2pmT}~QC=@29*%Q>e@KPwl{#SZhFDlxyA9jV2ZQX_
zKPLUH=_K=x3&hThg7yx5miM$BJce;e-bKbt4b8-+d?E0imh(6cqmfe<ftF(jX{5$}
z2sm(&hMB73)QUocd6{gET5h7S-;`LMKL~lpYr(_f3N-ZQquBBd8L}wA^xI{S-z<Z!
zVG1D9oXhId1w?hb7W`oxt!0XgZ~e!U>hB#(Zt_|ES0Mo5R|>b-4l_7E5xju)po-}*
zhn;0&#vFW_*@*Y^n&EGaP<$TfhMPRt`|i#(4p9v>=)MYcJz%*=3L6>2^<UES@*&lg
z-6MkZDi+*1K&@6E$9BK-&{UEEoTAUvqb45y=rawV;cTK=qK1A<6XbHl6O3b=L7*^$
zX02Ke?d+LystSY9TZOd!U@tH4=4aZq*A^D+C`Rc_J}f%df`9$8!pLpb=yT5;496}4
z7RU(N{{@3b`6qg+ECg5G4rk1z7&0S2AMy%{L7-(oGrRK$|KL$c&!z9QR_8nje?6dP
z4tmn`8X<P%h)HdIGCZ0vAD2e?Ax0g-HEOk3ecT1idPPKQ(^(=6XTN)yJr&0~lO9LL
z*<KlozAFzx<qHWc^u7p#Jxd_;T0L=08K7q4BB<J6G$?HAA>IEO@OVxV@K`0rN*cp<
zNgALq%NCUnH}dk^60m0VVifDz!hz&7V6{FGI86$eHI6Ig+-BOv9p9u^dMfZhT`436
zF^}NNDa@}pmEefiMBna>l%te`IZy@BjTh05?Tt1u4@VaNJ5fLKg}6-Mz`M5;{bB@I
zJGKS#K@P2MkHSvT5At{c#mw)Up{LIr?CDeDn)#S^nHWJ+O&WHuxkCK8aWsbA_5OEF
zfqYIwj}aU?{74yNt>j3PZ=J%VLrd^g$!XLp$fezetUi<c<ehKWjOL=X=&Byh(_=b9
zg?xK3_T2*h{Xc2yzGIa0^aOB@i9q#*74$SP?`>HReY%u!5l%C$^XtR7_CpqabO{5=
zi(@GNVhjvwZG($D&w%N#Yw+BIX~hrr(D9+(Oan3wT)b<U25~*|%F|K(*dem8NdUWz
zSa!zhjiB^a4}xyYgl5@$y8C+|hAwRZ{w8(FTyr~Y4|QgYj&G9h3Aq@cS%Z1YmcYek
zFBI75P~{W4)J;JSbl16)-qm`<(1`tp*?u_f;98W5c-VG6AMZ`_!^Z>7m})CP<<A86
ze<;R&#hdgoJ%u_q(y8AyALKX=lZN`+f|Exsbk(WCTj#O(=Tr_1`zAns@^MJ~=L$HL
zmq5P~^O1R|(Sf^Hz&&jbdI)2n<lko6&Uyvxi6@+OHzl%1k4UOd3D~aM54I5tU~qRM
zF1rwidA!H8+qH#+4vhlcpxIzt@{D|2QwAQJcaqD+GtqbVI)GU<V7B*Psch!oQdfh$
zyp1;-u)vb}rLT#|NvS`^yGMa<d7`vCLJ8DAj)Ty-_0Yd9Q|h&L7V1yF3|VFVywFve
zuxf=nc5Bv3eGaaNaS1{&ep(IIks2VltcS9N%A_xFH=fo=L6_=9kah4Z=}t01rPOqm
zQCG#@+eR8`DWv?>jG>jY5jp4Cp=(=DN(Kt4EAx+u=NUuHw3l@AjwQHu^cD2*e@5+v
zy)@>01!$SE{K2&^NY_pyP?{}+qUVf-^e_*_8^%y=oqU!}BE+s9M^JD*Px#Ar@wVBX
zM$VjXVnLrSm30o|#UvB7?g~QAzh`)SqfVYPSx4t!Gj`kGm;UFz7`Zpqd0ehH=5JBN
zpsAbSet9LdxAl`)2LT$~4MXFP3V8nA49tzwM!VOk;NPhP<8POv`kFT+=^W!oai_96
zqmuZk`lH)|8KBki7nxeBhDwri@YNt6JLlC<{nRDkf6@{<8=0@uEsNsa63Vf<Y2x@T
z6ojQ+1g{q2!qP@uY-fR;K@!aRm+AMj3s8C+(OEE>_FKG=2Ic*vZOThQS4$lPdpCj$
zZzEKyF>U1TG@3Xj3DjGHiSknk<bA0oiMy_#p6YZI*42?2r)*G{q>)Z<6}W#!2-b-l
zXn!Iiy0$DUE=d4w>Fhj|SCSa^+k^790phb?3zf<RV0hI7y!JOj<<Bf!J5hr1KJv&<
z-EGpIkjEHOyNGV*Z_@SGC769Y4mwg#FwYqCbLHNoM>3}2E}0t+`R_vgv=CI#st4Th
zjB1THgwCbD)T8Vmv46AxKE?~tpf46U$2Un+?NZ5?;c-y*ssfJrTtMZ*2B~tkHBp_q
z0{y#+!6Slc3?7((T*f7MG35-n$uS>mVS?28;|Hp6^yL4uF`Qp+z>wrZ++$vcUCS;K
z>pZ5H-Q-1ulNUhB6&2LBJdE%1V-d$Nt=x9@KP(z)V%aYb&Ovq{-gf}oEaR!+)O1kw
z^#q0cMkGBs7hE1z5WzBAxOw~%MD4kV!lz*ny!8Uk%uYjPulv#<#Wtd^HX4Vf{i41b
zYC)8ehm~<`PyTQph}S=5+MC%t`vY&JYsZx%S0<umdDbZ0^dH%1n8CbvxkO-Z&w4PH
zU;4EeUIeIMj$<+i%;R}ki}upmGr@54Xfnud$MNL*62Qfa)tr8eJ>;MdJ<sQZ&O|Ax
zopJ<Koy~D*`fbu~kwQBIlc<l=V^Wlz3G;pqM?J9!72JjN%Puhl_H9B_Usj);Qlw-5
z2}HB&iV&9@iv1g+$T-hhXi5o2kI&Po?7$SMi{=zIs|$tgfh{QSa02LG66SlpC3U)L
zgd(%s)ZCyLo16Cm=kW<J-lI=j=U*oB*>9-X^iRaq#)>H3NQ2hlrDWMGQ<V1}31`-;
zV!Mhn;F8Oj=%N65%7##MG!bp*^Ks~e7&^<VaMOl5M3%GcZ#S7`t8&5G!4=yJc{Ho=
z1eKCJ(6{<Y3qLcK_be&M%*-KcTri|g8Bem#HbPZE2&y06M9r0y(0a~XGFOo?T8+JV
z`gtcIxN<MLF`kkjmi_(87LqXfJYF0Y0T;4^usdiNo3jPO(2t||`CJZKx?hCQh`ZFD
zTZi@^5X6bIz;dw@sMV%m+0|k&|J{hj;qyT0VHgR>(8K|~xsb<dZv9np<P@7Zw6R%8
z|751s^79qjpAO-5pF9D-bBeL)We$w949A&AOVB=8g?fw#Bh^l?i1L|BJl75Oydb}|
zm^o!G^nch-Qzvn-`&%zN|2@EET{IEdU+3wX@27t$j%PbyFSa}Nf*{*eP#al;j`sHm
zf8{$9Goz1GT*I`P@_E#CWSTT!#}#sYjRb{{C$Zhw9yt0n1QH)FKw}MgP-JzUOeiDb
z?zcqg>0Q#j)sJ+}m_`QUeF!I`RorP|1ovqRNY%^n#vQhAeZu^3m&Ty%#xO`d%Y55E
zSPv5JgEl`eLSpJqx^Q+J_R2AB?})?T`!CC-8=pY=3IPZ+y}(%41a-{UW6ZrLMA;;k
zw8|Aw{U`FI-_MfF9J!9o_RkUV5)=675`b-gLNKyZn{uDMrMk|upd?-iq=DhsmC5#<
z)Elle)}eFJMXFw1iKj=F<23Fp)Z=G@T^|Kukp=b1_G6ltFxvev2b70JOQY(iFuuSF
z9J+l7LYhKQt#1*QtZg90TTfzFR1^`-iz5Ro-%%gi$&h99fEueEr)p1gK%iR;R@M1X
zQW#EDCe=gp{>yOk0MiJc6QJl8g8tp_Bs0F6bYD9seVvt!p%z!cx#bzzbifCVub-v9
zz8O4DGt2$k5X17jUyzpkv8a35jdA-VFefG-h30XjHPD7+KMzLE?Dt}q%n^{IJQ=$S
zIF!>9ijv25tS>L(iGDt#-Sh9$_7~rI-6M|hV&qrA+9C@aFnU17AIL)6QB9=(y&=<R
z9YSRl*2A`arrm;>*!AyD)E`X5ZtHi%rBO=fZel$8iYPoa#6JJZ7U_hRJ@}1N17k94
z@$W}z==QaTmi)69%>s%b-y{;H@uN^2S_92)OfQ^y3FQKE*{+q3j-mn*b74N@1-oNb
ziYa_lF9nyZ2--J?Pkje_q*3eKQE|05cDOBs<gR4EPi<6ictEOTkO97fG1ND$hD@K6
zje4h!gT-(We*BB+ly#W*;^z=AbgCJMf2Nb;XI3Nren@m~mqTi1IgxE*ch$%%ypi)l
z(D=*<s_@y8II~^#$dBhiM{zm$>MtOiGj-C<pIIH2kW5_ES@zn3Pdtueutaxx2oAnI
z$XLu*KxK3p$ZUpqkprsmVt)o$s&>-N8ac?1{Lkc6XEP))Pq8uMz42$AB^ooVP+?Gu
z`7g(UfX#!w9v0vY?>XrE)QG6Y9|6k;f2fW@B6`fzCmS>kus<o2xTG?^Ud>+YG*|_*
z%4=|wb^~@S3<59j5Zt56`aavm5Wt^6_+oiU%(-3QH*Ys?ieY@*7F(LXxdH5}&Y-Q+
zb87f~1}5HL4jl6g9zTB2MB%TMkh)_(>GR5lrWK8#b0`p=td!x@#SLgbYaZc_n@f8Q
zFG5$WGPp-oLJ8B#<lJk3@K+Jollp_|%?-u>mR!U!3Gs*zLm)LTf!f<!fI0-zV6#wc
z_+5^iC52Kw%R}((-wQ0;AqXGkFve+mujEWZBu@6fh{+EY<9@z1Cd>=Ox9<fgbGm?E
z64-lk<2ui9CG#Ju)Pt_~QkZro2YozNg6v(VN#B0PhVV=Wxzl`9RyG2)t)bv%M!_Rw
zh&*gOii6tBmp`ouq?>l2PEtN3c8vvnnnuR)7vZ;ArGO*9(AkqbQNCq2w6{#69rtfi
z?7af}4&~s9dkrXFaGwg!ev#_8t5dG_GM;;MIs}J@piW~2Dr>e%9Y?-noOFK>7`cM_
z?m84GXoK$k5{NOmM6@@RK;`Oc*z#90wmHp#R5?x9<*kc*SF(Qis{$1L%QVSPj#1we
zzS#cK6de=8sp>5$n7w#q@}7MMS=+_9@x3>Owpy@0(*uRJ`k3vLfVvS8RA#!8aKFE#
z&c0_z<Pt(hT33Rh-#i@tBm+GP4ilH$I$|haj*$<y)6i;Qy{Z83^hBV~s9)4)vNyI3
zMdR4-d^}bjg|nkg&|~r&`pHub1!au!T9AN$EG*FQ`c=@?A46gS5p*KizUy2jX53K4
zBDJx&p<RL+&oaRHF9%RxyckM`CW3AFF!<xa-rv~g)HSG<syt_T(k(o2&13Vaw3l>u
zR0<?oR}!u4iP&9cB2^o48WkG%!{s<Nl%&i;|1?c}KUs?HGn}F3?*R1QtOtpGXW5QI
zg8affo<dV9vA@UiQgaMp!Nf+4InF$urH4UWZ9#S%6aaJ<qNw*d<$kH<jqVkoqecup
z*`bf}Un{8cp8}F-c#njJT%djL>u_++AL6!y?M2Ie(*s&n;QHhd6-x=EzYNEPk-_ME
zAcQJ0ZiI5Y6To-@?dHvg)CWIF+iD-scMm1w*`8vZg9h5KVRsy5JD<`#usm4;yKbIi
zj15*NFlP3%6b%%lr%JuQ_@NnxW%>F%r=5n=A!uJNb?UJ}ebw2F1LRJOmL-99l#qD`
z^U!Z*F)Gp$yrE4{?^_&ge{Kh}pN64N@HOWD&xI&n6`lxd!C(I_#5OkDKe9-M$|i4#
z`tb>ny38C5kZF&z<BT0_vq04%8GJWa;EupJtXfcv`T~|Os*sC456VgZ2oWAw8v+4c
z1z@bn0kadgq+0W)fNUZ#rmYHy@}Elm>lQ-Oyf83pdu`Gg)ClqBi}6OE0k-Wp4*$fJ
zursZU7ju0NzT+K3J*~Z9V-XAU{8KUax-t4PU!K4-jrN74W8XmyjG5R<{*Jzc1sB)g
zlI_dT@#bo%aWi17Z8?cYxdP$5vxkxo+d=Jj4gSvuq24=#m<_L@gJ;JRPN!|1to|Mu
z3}pI<fli6QRY1;7yNq5%Az)rF1E0DbVD~r@+&)MMKY(${UnY>>Q%ljz{(<CUN+S5O
zy@c=iy%hhM2+CWY68`aFv@hBno4zo;%*uCC{t4!PA#4WLzk>H_yban9<<j#Lb8*r#
zD@;6)PfoVyq4k-4VD><T2v*EzcU79yaZw{}uq?ukY7bB_n}X?!na+-RE}F-9qjoK0
z%N=I@hRr!ik83tM|CWF^FC8sLPe$1mmLYQbf8IT7^!R;}VH{b8Lw^_)mCwP*KzF(@
zw-Q?&k|A<o1L0?;O1KAv;9LC<6>W`#aVgQD?xKe3JLO2*Q!8MAYtY$z7=jX$iRV<7
zlb9$V7Tqn_a!VhD>H5%hXc#X1CPm*38{vL*4p^Dj!v5(~QDsjPxHVj-;w{X-TW5gI
z27y#pFA%ynPGZ`o*(PqSmkB>fB;`!4FgEKvf?t|iz&Se?)W6ukI-U{6zwk!o>%V!G
zZ3sPaGazVg76@+FlRKwtL0x?<rY>T$g4M;;A#^NSG3FHK=5(=hW&?Bujliru&7|av
z2$V<pfX}B+sx#UQH2##r=QU+$8nYYUy=g#oGh3p*{1KaB_tT+MBC50U7cEHGiIxtJ
z$S|*!I6Ev3bapSs7+Qb=&V1;|)dl;$XEfquId;!_Oz%e>!9x!2s4$%E^+IDo?t%?!
zb93O)XNuKq=IWfH16e+ASuf0<?e{{k+f@OU!51-J=?r?6*Ry_DL}%z#!j2OcP}7r#
zy#W>AaefPZcqo$b0yYtzW+V#C4M<6-80<~^rJRH$p4M(Z=sn7Ek;ZT!GW!hJ7BH^l
zoglRJ+6DgW#mKW@+C<L)YNuQaJtceLWupVi{@Epk^avWvi-Td6h3Mi~OcI|R!Dbs1
z=D%D3vcv!Kd@Ak`*^Cv?U%Zy|+qBWMx$)Q{U`)ISimi$Sf=87>YyW%NdSo9M)yBd5
zwhegmuP{tounZH`bI`SHnAG(%q5D)W;J4>&u6?f*P3N6O^=e!2Ai0#ke-(B2<zwIT
zFg$m}6H9dxF*WZZZIwF)->avfVYmidA61HPPQ{~f-Y(KUoR9OU04x=1i0h9vJZ0;L
zwEs#Oa&1=;$JLB6ob;1&EGvvXgkjKVI~T`&iA49idKmO$57Rf)VdG~T+~E`sAxztm
zm>o*Hb%J1fI^$$1CE*=gme*CZ2|JIbkfq1hqx?pezxd}6{ctQDna-OyK3xd*hc=Qs
za+5K!4p{zm7PVaF$(WP3B#{FRl%sW-*Cfn=-d8bju(}2fM}DSRK})1rW^Dc$*#v%D
zF2K*RV=?ldv*f<Z6!bg6XPMR#P)v>o{=EWTd+aA3t`~syS9|&`Cj<HW{*ze#{EG-H
zCefjCKX5mwg}B`(kkffwVn5A+a$bz0{yN2=dv*h9J@$o6vMs`1`3E#L@;_>MFAF$j
zyCqw$a<SW#@ja*KP&3a7(vJh@z<om~w1<9?>Re#VreiA9IgVw@7BWtG1H$JohA4dG
zNR_WWl4jRjL^Fd`)Nxb;#2l@J!Kc@tuZQJG_iMqb$v$YfHJoxrrAcD`kD_z&i*fz>
z@JI+Dgb+fALJ0NTO9n*}CPEU05Rwp*Fp^5rK?jx8sB{ud=V|U$O-Vx1CVnBr4k5`o
z#D3TN2kgB+&7OIl`(Eq&y{?>iax}gYS|`5axB6Z{v9U-wYe)iUbvpo_8;PNyu?2A4
z9{5_m600K1!OQ0qOz&O<9+5W$x4;PQI<ybp_~sHy`M6?>CPewyk4n4g)!gHuJ{&)t
zgA-2;$2|_qP!exT4*Azy{I-y7UtEfMvpi95@<-{>c7~fjp)6doH%J2Fl!1B$?3uwO
zgadIfe&v4ba^DqIF1O9Lc1?pDav65HH~@W)YJr|^IEwdF;k%88Fn;wtb~TFT-Q%@r
z&!K^Cc55I#WiPl)xWofZ2SHd=Ii&PiMSSTY(4O~*O_iG9V22{S<r6@8lVo(ArQmXR
zPagbE4>B)uVp0b~;IuWg&qc6b;SRMgh%q>|FZ9}T5<A+`dH(DTAelpV_H<KJe<Oq1
zQ{@oZL=M6~!<pxPI@jE!{?F}7@HZ|&lOL3cKcNjiR-rIBLW3MyL$L0dHUwE#psTMf
zkAG7FQz&~1V_YH3`Lhr?;Q|wFs(|V~$NASoA2|N#k71iv@K%3oT(+(N_GvB0=Gdv&
zs>PslxhHt-8o@dgTG+VV1Ed>1a=)f5aNfBcbo-TnXtR@2KBp)5+;j~^_RAF&k$zxj
z))lsXQh_XGE2whc^X{Gom><!>bq1@0%6u*yrMZLbR0a7+k2C#F6A<<I$ePA?2v*i7
z@zXagR9sDh<XVoC)(4?(*%dxmB1LJWEBqp6P|pJ5`}O#s?9e_4>O+J12Zv)gDP|{*
zGK|MZy@@S3;seW)#jtNnh!gqyf-=j}iSqCb{6x24tUomhcRB>4TU8`)ck+PN&KKP5
z6XjL&JV6~k^Jmfkh}}x;{@ir#G2R*?n@@8^sT|*ZkA+rQIv#kakKMY>#I435AQ_<t
z*;VDZ@8NkI?oH47`h57Bs0$*~5z5F08HyDnmG`#fqD_d5>vor5P~S2<{FYpyFY_>F
z<7cKjGY#F0O3^xkILFJ@<Bk2<7-sGUkH7Ut=QsMip=KF;DYM2wYWApE=!2g{?$ifu
z<}LR|LT73&3p@5sFujw5jTRimw~CbAomKGqU%G#9)&$LcgCX1`6+ORihMIqjq44=}
z%p8}4l238q^MqKPdn(PWT{oli*{l5aiUH`Uz7QLU!>AN4LYNZ=6VY<8de^`llBuWr
zB9w98lh~f~m^noqLH}*nP)p61a_4(7a9%j`x*Ne`KG=YPO(MYANnqi*g3fUlg|O)o
zaJ8L4@3#fuG48UExs&erKNfK-a|djlr^N1bb_JCWJllI2{nuAwTxJw#W?REoS|7&O
zSb&cCaIWF4g@dQ$<FmqY^l?*R^Y!^)oJ1K2{vX%<OxfUiOQw2rnQPq~#x?&6phI($
zS>kkXU2+g^Vj0?de`N-XyV1KmSWpF)C?)2LsH0K_&C^scrl$lAe90+$p`A;<IkM7R
z;u~%cL<>31iH<$zs@u<b>A!nXKROddYUx7D_;eQbd@?s!Ifn&z9RR`OGnsFOJ<1nP
z=N)=qn9dOk*6{2Wt5WmFmStjy-*6b_&x=Nprn?|rUkQscBQY}~96GyDPSIQhjdKy?
z-sXJRpk>(cK%IAJNVt9KB#<Sx^Hv>VyjyO7nh||qfJ_~$MZL+VTMXUGj^pc|qcAjM
zGagNjgv2wO(dC{$>?#<JOO8*&zo$x}=Ef>`eX9>vd(UH8xfglE|4!oKUkT(J5Chbo
z=V|{QMDrOj*tAQmRP`GL8&=V0?>z!5pPSJ0Xg74OEF-q*Xvos_!xKeTcu)y6o1$!w
zj}NE@4QF+dFq(hX39_-#T;1XYQ}o)zzs_@^EXE=xS@uD&IUED;v^eop_XsU_Mx%N2
z0sKvS3GFFsAbw*KZ>w{~rkELAI=dTnX)g$-6Jq(ExB1vg8F|In3^;8Oimf${;IU;S
zZd<b*D{iKP_1rj6_Z<yduQGV<3d;8tm4ozFLGkzbC2%u!I0~OK;bmP8=#Lr;uznfr
z{TT{w&C9Ui!$s!tpj5D5mO(7(sh}*8!lT+G)X^}3IwvpQ(Qyc*59<Yeq%6tmb*NZ2
z5c=tCM)A&dY|4xf*kG6gj*f@n)aWr7rXy!sZ|qpg!gv&?=A(QfLder4nEk_v`o@3Q
z<K@;U&bY+YjGa+kJP|}5rc9K4gz0s07asPG#CBf=G*w%&u$FXqeLD@m-cI3>ttD7*
ze}RcV-zlC_Pxr&q)ZaUN2&<~e@2e)m_5q*S<585gPP@TG)A}g&S5g<#!WYBt)hYvK
z6d^S2h8oJANIo<PqPuh+Td!1RIsV~_TAGjaj%NmD^qgw*W*5Sfao=w_w(P8eR&Q_W
zB^SZh`@=wbbRKWIybipEy<rXOTp<3xYSz3S;GXVjZ1>8fJX;(@`Ru|eD-@vjNsk;e
z)n>*`F8Ii028wo6nn_MP5t^UX@WcmOuwY^sPWqFDlRp)s);)53{_F=@savpoj3362
z77&v2v2x}D$j?pzbV%Z1yT3E3hpiCOi`dD1{tE3g!<G7j=fSU@IhY@v#WaUd_i}3*
z=<n_hinigfM4vi=9vgUj+6sQ;?H=5jeh5`BTiNtode~s;K>VtSFsbl7*!RqXwox`H
z$0JNXFp6o6a)RJ>-N8Wln|bx^irXKAV@r`a1V>UPLC2ZD49iC8lpRWW;y3nV%Mz5@
z{3;G|P{DHCiSxcJ#jiY*>+zFdqC1cJn%9}pHy4<{whB9|U-13&qR{kUCo^{H2d|gJ
zVNmu6bX{fy8u``C;eaQK_pVVkk6(lz7Mjri%bDPLMFd@HsgwEG0{&f+gjs)%!?mN^
z(7@^%wja79RQk>X$0L=PbbboD{)oWNk9H8;MqZoj|M9x6OQGfKQr>U+ENl-wA^bb#
z3>rOn%1mD6F^e|NTgJ+m_-uZ0n<;g0jdv;6-`b4%4n?5Xdx!FD&K3A;c@7+P_F?E~
zs)<%FLyzC@l&>cxLSw=JaM?|nMcV-Q8q2Z0?+L**Mg$g_<00mm33K|&anrHOAX*^e
z?+o{1%{~QFoy!r<yr6siqf<g%U;`^n3Wezw`e;4H9ldi^pgMd~sd2D7`2F=oakuVZ
zGVm(cPjF$f4dI|JBQKu+5#qB?6&jr6Oulh8NFH>Dy7gbU%|QlD|N289v9Q|i1!6}x
z6P~V?&#ftkAbOL|OOD!N&HC%$(5nik@2!IL5>qC*at71)PlHyIGpOO<!$e9CZXsC-
zR$hl$LJiFh-j0WPAtiY5`X#(@WD^dGSd5-I$EicGlz9cO1B3pPQAqMY?M>8Q|EET|
zwTi=@LPNa2{2-n^aRIa|{rI*a{qX#FJ#@~|<vLjtxi~aP@YEHdzQO`ctO{7myeDkp
zYGZ7)31sOGgIULvZ_GG|GQ%bYuq-<ZpN8dPYl1&H-@2mn5|!ZT<OJH2h@W)Qfo-C5
zoa}(WoMD@w=jadZV|%iUC0nRZ?Z90+i7`J(f;tb$*>%qq4$AWIyL&jgKHCfCv?KR%
zj|A7qR8ZEd<NBy*G!hZtH!Pd_i>ClvhJf^DOR<k@BDvkWLB^(`Sa%?qd-e<i1H}}c
zTTWT9f<veoIRwSZ1>}Ps3k!9LpRzR{WzTg4%`gUvE16h&r5GN5ybQfOB<M^t9E~d~
z7S>sUUu&!}aFj8BM{ZH^tlgNI8Afhdaybq;hGFCCd1Ft`PrbZ`9sl#@Cr_S1v)Fjl
zaWi3}HhnYkgY(K%^H7u<tO954SxS4&VccnGC2FiYNPWZp(3JD9Qu6nb5O3kj#l(mm
zd_V>1q1$+qY%5xw|HfR^ikRkqePDE#bEsZY#l@>Oqt$`Symmkw_}`)V$gXlM&&xtz
zu7>02d{h&fffXLn@PKy9)~kA<Xi`5@Y3D*>uy^B)iql*p`7rY;)q`itGVq<30-N1p
z!Pt8k47s5|=aCap@1rAcaqq@HM_otJ@LkI4f9WpJrxF)@PQqTj%24!kk(r4!17s`C
zGspKnV1ITN&7Pew{WaYkFD)ZZVkVB9kb&J=w_r$A0nK#Y3C*KI!2gsl$YUpPSwtgy
zUOECNO+E>2#8Z%1szd(Y7|h>Oz$Ims%;GS)C$FdBnped9ahnF3BMyN2NqW}(rv)2Q
zi6#C|GR|-@MBG74gGc?rt^XZ1!mAvw)K0*sfzxre>N;xmXk+h&AhtQJz`*W4;C06e
zn#P_1%m0G$^uF_`vNM6N|7M`1H6CR15|r-M9A+5?1I=2{Ag2e5Sbi2Wo0FmTaVWa_
zdvmdGKQJ24AjFFH1-?Jn*&T5xKE73{&K@w?p+q!@Ol2J<)c;!K2sF*Wklh0?-o+36
zZ<oRN1mY8}Spe3}ltYS7g!BVdyzS|A@NC@)<xgVJq^XuQZJP{Ddnd5mlrvb_Ma8>(
zyoTxSI?PS$0dt{eOm)vA5OmuYCZE5K(=sEl@t<ovaQ95OMmc)1Ddlg55mzc>0G{rZ
zh3&(>Gnu#xN-yp*YdC$G1y-b?@e2>;GN_P=UhQQGbNms<Tm{J%+W9DHucvok88g`)
zy!!s&k7uXD)ycl#byf=D8@8j^LB!<GvtWfCb-yz&nd%L$W6c{Xse@w#^7EmXWbBUe
z<P@&h+8cu2p8-)rp4s1Lsi+el07fSwQ9Rj@nRJ+fSLXyUS$YUm4!4B)r*`Al(ThNO
zF-uSgBSH3ihR}cWe6*6sqMv0k2;nIZ?G{g%E>FC0M~pYDw9!M)8e--wL-7MccDP_V
zCZt@zlpU2^bgh=HT_ZtN(0kU>HGutDlz{j0FA@tiglXA!1L+n^H159<%QSWnL*=Hh
zKXDG~-g&`n!Yv@s=ntzYi-6|*`S3S=Ev8;Nj*4+xp`&LQJ}dS`kB9-_(|ZnR{XL_s
z@bbr)y;+!bWe*x%A-1*p5N`GO33Kj$PMJP?8*3Sxh?<X913FT^!&N5q`WAq$JQUKu
zlGA78F))gM!`t&#!Xg_n!k`!2QMw$Gdm7`4c1>&@bBooNT;YLr<Pi2h3w8P@Fzk{C
zg!slmplK2`Mwdc#c>oTmChpP1Ib4)7qImD!VtoJPC>piegU(zN_)NcJXAcED$(Er{
z*BzjwjH0dX9<*y*i%C;+P`$7M<j;>_&4O=SZ_Y6;`x_5#L2JP(z!a|T%Y(?Zx4Hb^
zAnJGLpe$`Ao966+GqTgse?S(@cNl?LvrgdA0n^aUu{#PyQK<L(u`;VqH5G})5F@GM
z)f3i&c+EQ@aN{c;xY7~EezC^tf)L(fPOdshKkj({5)0puh0TK&;Pb#MC?S?$yCjIO
zNIgehgw29<k%`&5Kl{<g>k+SfwHym+UZHnzHq>flgWKiz+<w+8rlISIBPYv=`H;g_
zJsgHQ|C_<(7m5U%ZfWe#wE#5I424nNI#}y83tNsFf!oAtZr890$j8XECX@&U{!`h!
z{|d2N4!KY@T)<a2MKhtk)JJY+`DZB;-u)ogYWdAwBWt<sL_<tpJOXasCzgh95vV>U
z@L5r3q3P^yp|V>F{O@!LZY^I+X3w7JGd~mFg=wJaG{nYndE|8ofQf6O&}3~OuNhs-
z(;xQZBTt`!UWOSM7@`EPn*9)B<Aa*Ut0_M;0jh^4VNL5ucx+Bxkj^=vXa0fd?C3>(
z!VDNwMW4fniNcX-lpRylnQcf6g&~t^$GeXBE(T+G^Q{!n?b8#APk5sxov~z|HcB-;
zUsM|q15)$*Onfu4*eAc0{+=P&qnTKQ|NAC%&b-95e%?@aiZ-C=@gHFm%^}u_EYVLq
z7rh>(F|P&-m`NQv`E;7)O;K=-stP9k{D3)2hA2hOrNzy)mk@0ZfWAi&J6n+qUTs0}
zFe3-P>b&4yzTa7!(QF*tTnuL%ZO~@oSn|dl0k2~#dBqezyn2V;`KNDlzomN6GT9LA
zUY!G-dAp!ix&WK!q+pTeb@ccV0ph{)n2!Saw(C>zezY<9VN~Fm7{biA6D$0hALymD
zFioSqV0Al^XGP3}L2+@IH3-=lwIp12Hv-=GF~W>3sTi6?S&5+|VLHul#NPwJ-ydPl
z9uHhl9gA5f`hoN3^Gr1Nwb`?^)Cs;i3C7Pus5>4kNTX~)a-~+N9_zvF_8mhzzkBQ}
zXDA+~0F#}CY)oGcikZXk*H_}}jJyb?dzWHXRUC+GgZPC68?0$5=hptjJXv&v?zC|z
z8ek&qczqJB&sRV^_2c^Px(bz}yJ7Rgjrj0ME*jQ`()T8f1xBUe!njhnm#9FGuiu%C
zLmG2yPo>W`7Itf2Mj7pswfc<2wl6Mpr*Z)uyK1gz>&8rGQ+`R~0Z&ir!Be*fp}`A%
zbXl?$7XFHdnEwoUsdXTv(7R31agEp$mso4tzufaqZ;*OVG1Hs5Myc1nNzf=<3Ld>q
zC`}CCaD&Lf;CHQ^zdS<wf~A3`pTx;{KxZQw^|pifhO4}}%?U#D7ZQKMkC+<Qux~oY
zzFF(Z8xn|>Ayq8Joq8Ev>lDU^=JK9d*;ui)1Pa?nV0G{n-t_Pc$k$9{?Tqoi_E%t>
z_f~Wk#tVjz6Vb275572j64sYhFv)CvP&Xe8dNZc6rORpeQ$7qd*YyJ*+Zl-cV)=NQ
zxm6C2g|?w7IAscPqJoFRULgwRe<~zqKfQYsPhnkd3p2<c2<|7#u&ZwfWqSg6$AdGx
zV<>Ua!f8)(H(9B5uS{sR-i;<FA8@bX?QGPdD3nxQ$GTDef}e{oN-n)&;#CW%Z@&ks
z#YQwYUx%|#i_zn46RUG8L64x{f?k0bL_ZF2mt-n=zP`j&Ut`VUl+=$k>d&RN3yY1G
z{@|uq#Pi)(fw=Jms?pA8!YO+k)Yldpvd6;L3DjS&+DB)rT%}rG0HjC_pmV1?c)r;L
z(u7rzn3;pZGG8>Fc?EQfDQ~@@H)wt+ViIW|7OA)jJ-(HoGARXH)hQ#Lmxqrpmq3$+
z2?p-ggR;HJ*wpb^S*M7AO@`4B{KFQEZM-q%jUBu`aS>g^1EC@4JM(ZtVS2`C(3;p4
zG`3EFXB)^dr5_7B{-sRDm~kx9GaL;bTmjLjpGxV?cGky@p+S&043cdkA67aTUp+{7
zsJ<+?tblDe>4Bl=%h6<f0N0<ELH+B6%p_+LT)&x#T^h8}`B)s(ukKAuXnGx99Zb2;
zA#l1|7J7W_$|}!MCvdM5zm<Ir)ce~&zHKrSop4j~Z^X%3nF6MUiTJSe05-+<6z2a(
z!S6p4L2{>tcOQ8iM5~4gO+yy3No`RO_)eDx*!D$Dhtrr`m4LE+-k>OT<z2Q`(zz!b
z9XBbUt70fhlm_78_*c;19nT`AlTf<+t=Wo5HIzRjw^zT_DE<&_*7)%_bmow&z`qjf
zjj5ljxs_RI-ea17eiDbyPdMG0j!j8vT#NR{I_BNM>(??CcK8buZ(qw}WM5d;?iSt{
za-NBeell(IFy^9S)Z20Zk7EzG$DcpEGJYU9k1=A7y9;qx{7H~D4OU{yW~eMJXF37<
zpz%*IuN<F@?NuSl5yy{Wg<~<qr!j~e6M)qw3yD9w5`t%h;ZcVwY^Is0@zA+Yr#lqd
z&phXf)M#?q1(B0egaa&{G0CO?&Q|Thz>4KuLtH>RzZD>|&{n)$SP8c&gXvhihg=>1
zadpbZxH!e5@!!W>e`+0Z3FE<h;zg{_nF7J`&Cv3oFRZqq++}M4_VuBh&K3(WwXfpQ
z2SU*P+H`V2<l+FmM6~%bj{3(Ri}f^~^43YNVDlpvWnBxvu5LEG-y2T<>`JundXl<E
z#5SFr!{tMyg5-8N^L=~`MlQMt0li&O<aS|p<KuaNckghKp}X0;mBFCtew$t0b{>^S
zyJF0Vi|oxF;^xiHK%0Md!Qi@V;=xXalsgN+@6rvndVVN$btuGB31;Y6s(|LMe&8A5
z$HMwsv$kA%*3Q2SUJfE~G#!uHO3v@U8IGpgPJy%E5#@F|_i6Zd<AK`e(C?axjg$t0
zTDL0Ndn{*5G|Tb&>zP=3CkA~76vL)^Yiyda96kS*fUCVRX1T~=$@!6}`{EjQQno$U
zBnC3O^+c7^NM%4T>gXQR0_S=oLFdG7o@|wdRf*@YWmPn&(;jG~UlL4Ls~~@Y3Xa|{
zfunS8zZvI<tHl*yT!0|CH3GJ67>y4NiCHku8LhWO0Uk+#_=A+U4-E#>*|V_8c?}c&
zFJ741CmKB`lmTk)BzFwW!Unf1W!r}_vmtuusP50N4$c8drViKo)WnzVO@@XOW~?c7
z1!bb6QLoQ77W~!{U`!gyPuRfPzldV>wdnllHiPs5;2OJ?h0H8OpA8J6qvoLK4SC8u
z?sJcs^qp=s<f7o1BCDi8_WEQZIIlbh8eVhg&Zvv#vs3Y1yA_HYn`T$6^n>bzsVwUQ
z&7TjonCUf<+fG-5?TB<i%?VDR-_s3RkL-ew5Xzp<dBK`yKUJPH&BA-xc6fD55WW@7
zB8J}y)?qiAT-;~i<vb&7ntn)GMY)>BJM%!ZPj8&@JRMDc$-!g)1*O-m6n^%=Uh-82
z;9tr@pPqFNcTTM!U*$A%JA~kKQv<Af{9DjakHLohW5GG$gfesEd8k>P2C@fdaYcRx
z&FYN7|41C@^en_U^*Cr>Tu9Gr1^i8>zWySGS;h0|c&}3zt@;N-*^QkTv_Bkb<~tB~
zqMI^NJOw3tnw1jqE@tn0m6!Vw&)|VG>!_zJng*g**JAWL-VIDF!kKCIR<O6-4AO>h
z<~}qHN<_A3++7o7LpBOvSj4NRuZH@`W)R=k5>w2BLHGMrXrmprj%5f~#d@;%@v$h1
zB-WJs1-2|T9Ti?5IcR_9#(&p<WZiy<j=Tbgx6@ATXAkIpvjmI>Jm=r{(B1eSRJQ&e
zPmbBc7`(6y;veW^nC38SPi*AIw<%Nj&ITQJ?nJMkUficg56F6a8q&Y~VbY)7isMuF
zfGGQ^GV7op;=nR!UwTgXzVkR~+Q0JpAj%{>ddoF`QHS6mb=rO`f!)SZ6m^c^|E>$i
z4885BDhxIgg|A{dYr}BSUFsySrQPDCSiHN3Sj2tF;aj~BPWH`2ShOF8uHJ#o3$xI)
zj(P^O!dPKwJXB|7F~t{wr&dLf*D{&gPZ-8MqGs@8|NlNWDS~@l!m!DdbNi)dnN`I{
zrhO}d*Z-_#k~eC+eL^M^-JELXvL=WxctErAU}uagO9St<l#embVu7EWV2qsH<YnV=
z1O2A;ZmbfVpIZqoc!t$a8U;S&Kb3mCV|5B$y5nXGM#2TiKSOu+w^vX))k&~o-8egM
zgpOtOT)BG=I>!u!ynSw{vU*uuIwJ}6L;)z7ag%B3)9mE1N=W2R=(Fz(TPVMd!7YxE
zduSu7Dv_-l6par|w_ynF4{NxH92&h~rJe*cXvcNzVg*{S9z`zJ-7vOk7HTam;LgL|
z2v@f#V8h$X7$fP<3te_%MuH`dJ5WX*i!3hR6ab$bjL^7WfgpS7%$m&Im7RP)*ryY}
zsct!H)Xd_)#I#RnZD+DhF*cfMfqkD@80dZ!R!<FsS?e-E!{9gjQDuy#5Bf7>2QeD?
zaFEzvRHBwXgpC-@6)veP#3KtEiAk?j9SgQC6H)fy3s=o}r@VLjEL0{$f{&32em?7k
zq7V}nY%4%@`8#I%%!&_KHIVXUTj8jB7)T8rF_`YKr(AMSquW_nwI&vw6FvyCMZ|qM
zbOCeX6j<+kn_1^j_i*temenf&eeW#A+0PbY+l<rjH^&zri%-GSA^ov5`~*BM>x(vx
z4&c2kl$gAw#W?aI^OqH)hW2#Oxp$DuY}a#xLjzfzp$i`$m<S!l)l3u=V>)ExEbN#%
z2J(-d<_*R@pvIOuopbl2`YvZo$uDA$=EULsBTg8bT7sqpmdtt5A*j1Bk^Ap-!EX~c
z;D*J;kahexNZz)w+RoFUKA)Vg;sCBbvIly^ctc@ODERI#hJkM63(%GdE@S+-BB+4Z
z8@Pk2@td$v?ueTc=b^;_Vrbcnq0F`xmwb-EO^phO$(X|I9)9Hd1us~`Z-0E4lK@yr
zXN%{~#d@#IxiH_D95{Wrxc50_i;p=N6su$D!xCKjVhrtbH#2J;Uz$yP=JHwBi7Pt<
zO3Iut<wg`VYYjk`&_MWS6>*WjYJvxhX9iEwu<sQ_kLx@6D66UD*^+@)uRDV0o0Xs&
zlZ8LlG1UHN5l9l^mA9ITphJH)=&6a=-hHRx>!oJisDGQ0d!5++5pcW24$meP5R-;7
zlZvOj?(19}Hn9?<;fs|GwMwQdiiOF2Q*p!3O|(NUK*uu{piy#)sq0q2u0JzSb$b%8
zz9C1iHzT;V&1t@_C%M)l_ObbvY0z+L9IiQ~hkg@gz$b6I4Elwj-kVS+x2q5eFC{?Z
z)2{F}^9**_D8Zmn#kEJ$U2@P2?7S95-{aGQOG6Kw5$%Gz?(Qc?*iLBCr!LH!fuLFx
zQ0&}q6t9=2W7y3j)Wa_W&+L3S)Ru-Z4+TE#TLskNQnYzrfJ^$)Zd&t{pt__q(_B3h
zE2>Jd?>S%4czugFL;+BL{R||y_rUqGY}o#U`XhE5z~HtXR)(Hq2B|KrPJ13GDhlD#
z{sGu(;td*alPTNkh+!e0l+p<k;oW;{y!9Xoet(jphfxkhUfT>76$S8j?<G*V9!68?
zWr+OL2Q4lXaGfRn;isj5)4R&SYEKK(@YoB{8xY!)55w@F7@Aw2g7gP(S>VT3c3?vx
zRzENUtKmx^f5H+tDwn~-VI}Ci(2k7~v{1DEgK1>1OlaO14d01b<F+Lj8a|YBBYP>z
zqpotF-^Lhr=PjEOkPcokmV8)SCJ2y1zJ}${G-na>v~q#!!{My`Po*+ls|@E)E`ii{
zk(8A^M}7l!9yqy*Ytk8WNPGlp?JbAYYCmkgb(fo3bTawFsm$eqFWcad4}NE|SiQ{{
zsEDeBcUc0uj1OntdoGZJYCCVrb5LqD5+~s9J+2is3u^3jK`+x?s3zxL!vqR3{c?l%
zk%!RReKj8I7>r*YIHLdPbec2g7H^F%1Pw<4ZgfyK_EaX*D3*b9&MCNMb_hi)4}sHP
zI+WeL$(^5{6;!Dz)Tn>T>rcmlZfzs~xhoI<%Pyh)dNh_+B5pOf0#<LFKxJHslBGi^
zN6{0Vy+#s)ZauUPJw}mdeJ+nVO74XcRyTPSbPN~3PkD<iT;+p`<nJtDpa)uhorE6D
zl}m&30FUWI`^-Z2SrFmy+_Ts)KN@m}7J;AFJKnMM7|J(NXXsWj)Gu3&BE8o|c4K^*
zco^LUjYKfgH38cfjblyO2YH8o7597eoiC#7x~E(pq#DCu{Ni~yQ1c{8PJR(w@|{3@
z^cL&Z=7d@^@|YxE&WtyFP^v0MalJY#7;ug9J-+MVQmi#T8%}$6`+Tn1><TW^cSD`e
zcc!13!E1(DqW!*iOzTCQ666Iad$UB)biT+o-zJ9g<S>@<yPc0K492cy9Fww;?%%}#
z|GI$1foYImH<;KAZfG2u2BO6>GpSl=vCj(;8gyNWs#G2J;qY3tZp+8M|IvFn@vWf#
zbT!zSUj(bN!CX3dyt4XE9u|IHh#fB+@!>yFa0m8cSYik_eYP2XFZ_Rh%ysrQ_!6-N
z=7PqH?OZ<nEt3uzBIv~}MR||2m^3>8eKanDU1dJFD0^Uj%r@|S7l{Ku`Qxj9?Xc#_
zE#4Hl2;^PA@CaWEocUW94c83BSx|~vxdND;9z@;sAslB6fnLW?;4Oz-+$TxEoYi@_
zC6)S%-#-?Q-jR+K-PLhz{C>RCc@8a{S3{w-0~$mtA${2sE?RaS9{-Q#)>C$))!1po
z_jTb?qeo^Y^iEh->xy2ld|~e*e{lO$#G9tCV?JAt<CQD!DAgaLtO=bBA`QpccHYU{
z;Djz{tbfZVO2=T}kYJdytcrL~?WUql3rr<5li=IXXq@z<H;PQYn^qSO=6?N#f%>0h
zNSsIC^UWn(9BB%kqF(TI0NpJQXM>kyB9mzkf)#tMuutzzm|oI_K@VHh_1*`P-Y(Eo
zOY<EYC9hmn1Q+SIQawD6>3MZ>l}-Y?{be3{$PRMXe!p3zdNvmg(czxNL>P0L*uw>V
zpm3EM-fBG!VU2&;K&Ld+`LPG?sg5FnSLwc!2TdcE@J<%X(|sJ6WI3H><}QV{f$@;=
z!Ww0*kCc(DC-EeEL(knh*zr6G-sooG(fDzwlXe6e(mwF5d#-@&<}+cwH!&EiBDkjd
z5U`0T1;buRl+V$J#*4*Ve)c(&tvL)H^TT<e=6R^Ua0wHvj$yglHXLYp7{fjt=dyLD
z1^p^-uyi|$KN`yr*2XZ|<B4$4D*#hUbFfK+!*tqFxqW3g{M$~n|2!QlKlB2>61qSA
zSH)Yd7P0T$IEeqsAnnsNZ0&vq|C?|D-5&hI7aX{T(jjM{#da;|A2$W7xFEJb?t#to
z3sL_590W}afp-NJ(7ZT+*_*B6(m9r}+VToywMTH-YBMGetrYC7>X@w8CZ^F{1x;sY
zA6cgkS#jwgiNA_M`kMd$tXbs6)4Xv;6kJWd4y%<qsCCHzdxww1MSH!`W>y^eaVt^x
zs}t-E%m>*&n##b8xeNtQXqM(OpU>fVNP9PGekx#7Tr$A(ngVxyCzjiQB-Dz{;5|k3
zS()o%g~S1M3l+R|YIk^GwhoJP52E$_<KSa=owxt&3Z-7fptEQjSJ!z-^9f?vUw352
zMt_v5J>-D5ZDe%;qgVsIW32uLF}q>$F!*~uN}l|Wx$Kw-8b$ZHG}ab&{4)a!UH}^D
z_%Y`<N0ccx8$s4hpxL0dY37M^@(yo=m@0Qz@HhhNj68W)uSjHbL(!hH7#{miDlM0m
zpk(z2<%)G8tbCHl!?bKcnndg_yJ&Q73{nQ&Oom@37eLF_hFPpiU^)-ux!%3Kf<2!P
z?QvVVvuKfUYpfpjdX<QCcI-ySMh;m!uS5Ql1dJ>h3qBvnSNQoR>l>E|CYtLZD%6s=
zo5c1ly98tIF$~&t3FjwOz?A)$iPcfX>exDvjyhZ{^<`#%uU`b!r{z!`vlv95z6*~|
zkH$T_)3If_9QD*52*&O&gp>PM<FM1maZ+h8w9hY8Dqch}X~z+z;>sxQGBX^+pK^Ho
zh6V85DG^#l-q7TDMX;MV6gRED4s{DG+1*MB-ik?qvPsnAa~uwS8>z2knF2jN?8WN;
z^<_2ZqR{bE7PQ`Q1AppQKw%1BIyM09LIa@t^^+LfSqhGRo6)ymDC#U4$jWVOam-VC
z#~mKQq=RVY(_<wYuR%_zv>@C{oe0rNZB`K*0;4wTqpOh-%KGOj-?g5>qqHL^ou@|(
zyg8uW^$*wfpAQ@4a&)%c3hfi$F@w6f_&Bf_Dm)e7tacq_Yriu)k8+kC{)k1Ym7q)R
zE8a470{5_q7Xo!>ArVf|W8e^M3oi$w{*%D1OC)MoPhfTAwJ}=c0B4E<QQ8pC4$VG*
z&Cx~BFr=OZ2AYAfej8JnMZ)wC2pN-taQx>~jL*Krp5;wM#gX+~v~QJ}R@4rzaX1UY
zUiv_THg%4zj}jMiJ$M|XY}AU0(6YK4t8{aQSx2rzL!u{q{d9r;yQN^$xfW_F^+2<S
zAv~iU)zVY(s4;UI$Q0S=ck(Kien?e*6E<LepB}KlnK%qxBDszCHh6w-F#2fg)1Cb+
z&!cQ+v#l2D$qbcdq5aV!Dvy`NEyoS7DRZ=00%t@=(fN5WS4}&htbZ87vv!?^6!%KN
zzN_)4>vi0kPrdCk*-GgqTb@Ur$5o^JQPrKkUtQlSyk;7*8|v$^$~zY1$CiNNzz_cU
z)M@B_j5x4^-ZO2>N8Bo|ov+j0fj#O3ROno0rrUM+dSBuSImL7N-eRtLna4b&0`n8m
zei%<;u4_DaE;xwBEiN$F&I<jEX2LL^^W?i8!CU%e!=uEDIDQ(vqpKF9m3tx&wwOfR
z!LG#KIRoWa1f0?t2cj=K%@!)d!DANrh>o7%k!EeYeR?5N)zok|=X&Z0f8~w&0jOAU
z0Y{F^#;W`ISUoKW&ke4^>Pf!PIQu3GH_}Cg#&CY1A`x9x16c0se4IsmmWISojLE2H
zFYc_vEVC|PV93Cgx3QT8VTfHVaD0FU<^`?7z)C56xhY5a1?nDmw_^t;k#o?u4<sBv
z3t~DiXC3l~j)?8hH2At;eD4M8U15X<H_oCb?Or`boEK_cvY<Bp8i*eH!aZVmsm2^u
zcAO+O!#(0LOi&8C&%B^Ex-U-uhq%X&rwjS(2jZLqj(DRW4XcOkW`V<cg8J~C;Cjar
zG<WJkmi;;Y)y0$eWOtbHyYqr+oDX<(4HhChQ(61RKp5y$g|F??LFp$2FBrmAhxPcF
zow+o(3PPQ;C7^vy3b~f4(Eh3i1QAmtXeQ+>L(2Jsf)otvQHUbjS7zqD6;QY$7*$C@
zW?+`f?Q}w6*b6$(A5MkTlL3(VITlpsBb2g-EqquVWksTL`3`MgRE>)@Q)y}|HUCRy
zF*|lL`D*I2Ra8LBCO_`=bvdi6roX$2x;_65gB>*c)Vpz>S#;?E;ZH13Yujq>VLlyv
zUOnYiAwDSWZeOfeU<)RDVp#BU%E+$HgR{>r08}P2x1v)_+c%y2z1t6drdh06V>zg4
z)BH-ZlgYmJN2Bs?pot4$nn{1u9N|xAn#tIa{h2vi^~Sc9p|H}`4bLBTKpdXN4+xd;
zzo>)wxQKeChh`E#WTV+)3(A4KPXTxG-HG<LF}1(x*idH7veFkrbW9qEpAJyQ_bCI#
zp-o&I)y&VL6E6BR0qdrw@UOcoxv^1_vU;Hi<T7G4iuwS2yUra_%gGNGLwWWbQ&D9k
zXz2-16TbzHj3`6X*e6UfwnABGnGAK8E67DtTwLBZ3uE%)n6{|_G`D=<?!pq(y%on7
zs*DibZZO4vmmsT6iaw92i#2W_$ljb0^m?WVTb@c#;{KQE+IoR$8lCw@RhqW%%v3hK
zJPY%WTm!3^N=W%<FWf65&XA^#nY@=$_)w*TUK<y(Q5nUk^Pm@Y$tCvm2wh^gI%2$A
z6gN-lhGCAkdBQ33$aX!#4Bl~8)5{$+r&pnlt1I|QLvYc;6KHU=2mc$Cj?G6>ne(*q
zTua)?nm%L;DUw3&e9@N6LT_;V&lQ?_9RWudYp|#Zq&Y6J`^6#f=?0?X5;5f|HKD1`
z00G5Ap)sAonh@eHs8K%dYdLIfIf*UKM|k|53fN>M1C5STJblS5p0cGk2)C&lSP_9K
zeS>(tULa^JoW_5K6Bqw`23Ta}anHV0OtB&aG%poET{q%gzK;Xr!#kNv(k<#a<|z%%
z?_yt)C*Z!q(byzPR^Bo{iRS-ILk;*w_of99ox1=PiSJq14(c$`j{4bL;=uo+-NjjN
z(?}EYn7A%Nj|u0|rsyHjG6T@hna-67IWST?9VRu<&NgKjb6c6no2%`i{niy80&*}d
zddu8$npkHOou55>C`E4$^5Xwe(Y~k>KdqaH^0jZ3(q~c1gf+2HS#^~gJUIf9d;CEv
z8Uy`*9!KA<1*ivVIEtQW&YP@w&JT{QQywu#HX59_q%!mQ3(#PuB{`#qLx4y{j0!_$
zFyc5lF8ULz@e&KmjfSKVGL-1q^7NABpx)P-dcGNgTS0H~#?4de&-Q|*NqvOlOAJw_
z->K{~<O&)$$;jWl6vjWO0M)JRV*B(t{P6-MHZ1$e?9)HcZ?c*l_?dxq8kyXu{2PC#
zmII?hcA#j(3$tefu7Nmh75ShSVRhSOkcP}vHnrRlRvVs&rl*&bb%Bh(YB-3s=!g?z
zMv|9h4Ky`JDGR+)Ks9bJv~ApmogP`dh34Y2lnX-T!UP`XbQSE%k3sx)SNwaR90CI$
zGLx*u;Qcuh#ybUp=C3JWyle@m-lhuaKXTB|=^qwl(ig3nC92(2Li2_$kQ$r_zt7J=
z#qz6Ie9@aShWqgNm}H#yDHx^3)X|tn4EzN88ULm8vj0b=;(-SSo;(EhsvfxVe-UWo
z$yw_RIrQ4+g2%Sa#<|zVV!V|fZ#_h8*3rL-@fC@`N~2(z{skzyM0sALau6M*&-*={
zIV)0_LH923*+zk>Q_rB)Bu|j84+fbxogv?pF#CX!%xGUc^USov!Jgre6+I7T7@b6u
zZxcXk)J~=sMC#;>B5ZL==GmviQAckOc*GW>Rz;>@5!e+rU8Igj)H!lum|)Z7a@1U#
z0j7JGK=uFr<?Xo}nD|-=)3L0COifc1sT4v5F+GgDd|{~01Z)^Kn>QSG<>HOYz(?N(
zufHHxm|Z2x7jERk?p;R5Q>EbZGl#8SWrotXAI%IZe)06K72NOmYIqep4)-q)M%_gs
zFzz45e+hx$OxYBhMLy83j1|n4mvQ0K1Qb0FL%p7<g7MiVE?VQxo4o-7J(0Mr5tv_M
zNBkQt=FmG543e)A_m`M%dp0tohcf_eXVN>A<F>=ec=@F+y3EX_o!KyMI6f6Ss>R^E
z?ll+XuTwfqEQ0%P7qRD?Qk>dx2-UMnSW{V|(%~@iAUif;XP^C$<yps#wwXiq2uCh|
zq=cAbL!oL@GQBJB^Jx8YbQv}PB;)Q1f1jKJ`{zY4tz;bf)Gb7{m5y{bQ!1}`l%xH{
zDps|@5H*4)^2qHMA%608CLVMItPEng^iK<U$~g0(&Y*opD)!bY!huPt*y>F8v<I%x
z_#+?p51NVbzEh!gRvx^e5w(`&r68u@Y1<b&taXdUI^ssU4cH8W-i|@9<<?NWEuN*f
zk7Tu;VIb;mt4NvI57PTQ;`?e(p@(Tac#XZn9gF6{U;lE@8QYb4-OJ#vb}|V2n9Z8_
zDjwlDk9OgUi^~Q|P~L@xf}?M<c%7TfdPxq7DK~t!Foo8M$bV67GFLPln|_^9rl*xa
zeN8AfN;ks_ZDK)E;mgq~0OorwK#ve>p7q-j!s-RMRdfx^nk4AD#1)$NTw>1t0HS{q
zg-I9tpzD8+So&ZYo%utdrDG;DJ}85ZI2CV-)1bR=A%CJj9fzo&MonT(xH!y)9n|eL
zov{hpT~kn|Igy73EJB_7MNrqHAAe$b4JBcpncujpEdN9w^laFTdXs;!M#mGV`W(U1
zgQQSC$sBd{Ub4w*=P{yp5L%8sfUlj`pmP^>B?LHu#<^>#Ic+Fd|L-*U`PKs{%h<)0
z;b^R*2JpZQ&gogA&QVkLIJpGnU;G6B%_?~Jkr<kplL1`Qa2+|@Ql3S^h%0`m^T(L_
zx9z6gE)vT?z7$0kxhQIUsxX2mrl{%*V|K?;_t8u_;Knudn*EdcEgcRo>Dg`=x(oY;
zhJeVuM5x!4!Ib1OsIFWJbzu*g&f=LY#3ulvg1s=sB@Ude_cA@3BOHA`@ZISGz8^z-
zs;MF1a5f4q=NqGoVK(<11z>fByq)dm1Y_E_M=vYJ=L2@5s9={+7<dNk$A9ADo7;H$
zh8a*9ZwV&B;UInavl!0yfQE*hOy|WLmfz7EO6VNg+!+O{eqBN{dnc65I*qO_19`)a
zOT0Zsk2`KZk6XX^gWi@(A$WK`Q{7q0eCrfoG-EJSA8=<HYv}H^Z!&pUlkn>P7!dP3
zq3+EBW_y=%rH}Uk*qo=kyAAX9B1VAY74X$hgQGrGST|Y<1}V`P`DhM$X1?H}qGYA2
z|5fGuT0b<NdtVT_I-5PSjm9rsBe4DFG;Y(m2@ZWJ$C~>QP*CQKj*h9!X|)wjaSH^W
zo)e*REV19+)WBwQ6L0*%!Dah==;*Z*!j2x{P0P<iWx5MIep~??`YF)h?sd@a*h@W(
zbKpBX2c$JsShK5%y?Heh$2{wS#^YZLwv+eZ@5^a8{b&g}p)#pIc!ag8je@bDkLJsZ
z!8z#|sM;=qPSg}~`*L0}KLu~R3&D<*Ih5guho4g^C$nFPc29%x$ANK}Wmtjo8RJ->
zS~SM!&EXb3j#J-jB-qA>pxQ}ow29%Makx8EXjCwHd!dj%--_#N?tpGC6<8l1$X}bz
zz~-Kn=y`80N?S#IcUdN;p4LXWbDGfptd-R${cu^&u_$jhh5FlR)D?|{MN2GElANTJ
z_bF!@Jx+kx;_djo4>?$8I)L@d7*riz&f^Q_!7~_vH5(nlpwkR!aRi#auXxOod2q|0
z{N?q5;P`46nz|EL#U_p)R?DU5mJ^KR6_jJj0KK}ITwmiXc>P`i!nUb6<zo*_-CTr0
zYA4{K)E7K-RzTRyK;dL;6^cGJC@|$B->`r>8vXJhrcjK`d^MhVIsgaEO9IWwfmo{1
z6T=ke1!4LiG?pjg%UQ>vE$bwt>|X>%(`B@aFTz)&46%3R0leHR2WNC0i%<{-`}9&!
z`ipkzlivt>`R|x$-Z?Y9;oEuOMICGkC<XgNQsyynENscjz-xE<k?SRao#)}WGWQhP
zmMHMIbqO|{a)e&HEzl#@2%f%Nj^FD-P_}Cl<r7b_&VU}6a6b(#T=Ky}<i~4EPk_t`
zF?=YkMl~zIW}FYC57A`@q|~R=U%~7C5G3T}C5We7ud7QkJZ{LN-oteWyt|7>CMkHQ
zMj<LXr$S9qDyu(1-KSy~HeYQnb_l<i$IX0H-|~b-=G|bLI>bsH+7mP$YVsXx3t{Gg
z9CSR;8$7(sq4kX}*o)q>4qFTEcO!`_DqgXU#6zs+rU&u81fg|;21pEzxJ$r#{Ca*F
z*1xU<eae2c#7u=P4+F63cPYx;X2345QdH^GGq*YgnvG2(-mn~vmNzoF`Bg!0;bazU
zb`sn?`e6L?FxWb<67Kye1Un4Hrr+n4zHd+9sh+`@QbvxDyC%ZsBa<-i@Hi~A>V|HA
z0<iX285kTnK)J4QeAHV7`gk{St8x9&I_478OnJsrvUfor>qvCFzXeq9slQFRU&${+
z?9kZEI&D=DQg8`kbiMFWuSis|6p$VoY39{IJ#osr#Eibi72fms)`)$Gqx11Y|Iyg%
z$6#z;exJ=8Hx~D(N1|!ges<2(6#ZVG<H1+Mcx-VA_UK%TmCGm4af!|^tx+hd^)kz@
zHNtpCZprBgM*EI}$L$2>nqCYYgU3PBpJ0^CTLsJVsB2)Q<n1{wkSU@2hsPSO%9o*6
z`*jvHfco3(b(vo47Nu+KZSH*Bkn3kJMXyZ`&{PxyI!0<t{LG)bersp%1ESC-xDq5o
zb-?I_IkxXM<Bl<dL2L0dE+IeTo5tz*u($%%ZzXcSn_)E18bR($U7^*Ox}a6VVNzr$
zylyy0Uc3`n%jQy7;v4n$c7s*@WoUYG0`+{yf{EE2@GCfovv<kxVd_$BCFhISJR7ye
zkJ;$kxu_cPQ82FC$?L|Q0h9fg+4N@@L2`c~t7(_>+70yYUjCCcbx#5Pk6R(DdJeRH
zl!2dn5ak*~5Hr!2*$~G~HT{Wl&R};O)|KX|caDPfDj9iW)IdCTGz5LP2G2B)pd;Ns
zTwQwe_jmko?^6ZsPqJ9nG}<3uj$&``$KcLrQ{pgdLtI-R7^oF;Q{M@E^%v?cn<$~6
zJqWdXJm8*7R)Wir<<RZGAgt4l=Ix)FxpDAPR^KxNbS9nxcdF=Vl<naQAEe==qGb3x
zG=#E9Yf<<^S4;^A<_SqSh4Q)|9)@7pp;_F^r7JdE8N>`|Ru`s!lxFKuP_ZZlJf=<N
z(SJ4Z%TIq)FFge@c{_PySu9g^<|z9ftOEJf6R;!l8nhifN38TL?!3hu^pYc$ejj(k
zvx6tWdpON>ozr<_bS6ysNi510aiBR#&g!>k!Ph0_=vn)nj}PumJxF4ZbX*22kG;IH
z$5sq0vW5JQQy{Z{C4B#U5@wyfgfZ8>K<p|7n|;S%%gQaN@y7;5&oY$t&(iR7?{Iqe
znxmm1`7Oqq;BoEiXutaxmv<k;b)P4p>b)IQg|5Vy5y8-@p2uSjJ?Fld3pJ4o;Q*bd
zzW<<JgVtQ?q-F`;bdFTLpzM{;ZRR?+j61)hzJz7Au;B;IkDfX6rqo#SFI0gn_#$*|
z+Jf~18@OUY2{)!$x!6wNOK7(cqamhQ+X9wS`Hn*uB?g--m{x@?bNiBoTaNC*p2T0B
zBR`Int_657D;fRfJ3#PD@<C0^gui*Tp9&ugMuq3FU9yyk`<ifzblUqSXM;2=viQ+T
zM4;TNELfLI^8PZFZV)^vlB3^XR}gP1RW3pmF6*0)eP?8VIPM^`JU;=a{g+8>pfId_
zKL{%?41k!}MCwNsbD7CDa31<uaFxvfEqCf(MTO(CTf`A=DudHo&tU6N@^8$r!wl^_
zjKp%*7~}&T;xcx5cN`9g%L4l}5o_ta0tTk)V$9ILOf=yuv#Pm(!UuYWENwGWJ0AjP
zr^G<v;%vARegrFirDD_TDEwVvfsL1QdFtO&m~UuA9h&Pr-SsZ}y8RTZJ3JDc+xKHk
z%tw}@Ohaw?dDbyvA+xi4!a_6V;5c0^^rAkjWOKZ7ysaSy{jUP^*Bt~&d5zL%ZW|AL
z5zE>wHsR4N#H&inf<UVTcyKcj_ZTK&=rM0pw4`uh^dJlzHXl9@KLIUSr=U>tBFwo;
zeESvFJagbkoV;!hE;$*26{Y_0PjnF;d``2IBY{vmOpZ^@iE&}Aq8)5E7<rV~xOvmT
zvd#tD?WjL;=POstOb3nQG~Z^(K37#>!?AAguhT|sxzwLcy_$x-MG6f05{X_T+@WW^
z94+4T0?F{xAX0Z%czOV81vGHIKHZ_fcR8x!UYfOpDfpu;ZsZ?Lf(@@DU<YMq7R*_N
zbtPTE$eKQ@dalfs55{Lw8_exEOJ45%@Y|i9l|L&`i#2f1a5wTWhT@bN5wr_ETx>M{
zA(K0v27}p~xarFapg-<k9zStC?ZAVIW6sUw+Cgu4wbfj3Jm^MFh$GNcZp+T?mSFJD
z$!PUv28d~AeUvhyC+A$p)SomX)}&bq^8uYyDK~cik4yVY%)Uew;?@&VjH?l$ND-oN
z9&?4K-wgn_x__7=U@sW;SOfaUEa3OLxu{y%Rau>WmB-$lgJU<Q;{KJP=uGF$RQCYT
z$sG+}cFHlGe2?lvJ(JRI-Nkr4%s!-tPv>pNMH5_5Z8H5`S4I5)xnbT3<d*7og?iL#
z%J}ieAV#!}n~vz@ozqjfWcC{tf20tC2Jc6|3x)jR5Ob_{b7k$b|7ES86`)(c3S#{C
zfpf2w{LriUXmVvE;LRkk8D>TO;!{FPaRArq?*U<AQ|NJNENUcdgRriV_`jEO>?D4U
zN#9Cn4NWC4(mmnfCJmI9)U)6S0qoX3WNwoK!1VGnrc0fhXV?2+*x>KXyH7d5lgI4r
zKQeUA)mNT2yMnD-ns}eR!RWK30Hen!Fzjp@$kwM&DE%pK*NcWJ19CAeB9Oi-Ic&Ub
zA*S4m0<VAmkD_ypi?RLM@Yp9Igb+dqMM!F{g^?r)NeD?&NkWn&VcM!h8<n;im8AW?
zX|7dGNkRzy$v%V-;t3&n*Zb}H^7v8r%za&Jo#%1%g%Fkw@-Y*62knl-Q`fS=k4jLp
zB7*)+|Kt8+{^Q2wn=vZ)G!rOo)z=BBBrdr*I}nwF(uJcKjutc9fC!fFzYdf(yyngI
zR%oIyj(F^wn68cj$gjvzGvW<D{vsXf`#)vNhpj-DTl3*9WwN?Vmhc;=%jj7>itfXt
zAfi0LDMxqgq<e<Cl^b}^`owJuo7jRG)KTKv_<U(7m^f;p#s@Dj?aBppVk!q2PY2uR
zugq%0Y}zTjWyUl+W7M&HdTtil*C8l0J?EdRX-4dNh^xo1lQj-9g~nJl5KU5KPV1I&
z1!X_@m3kGAMvue2!xv%xf&)DK!(#Y)pI99${J2T~L6jN1DBE$e7)7rJ%j#W#9r(Nr
zm39Nv-%ZD{H!DGNc@GOnn}Jhlr_x)2?kKl=fkNV1)-@xUn=gwcjzc#8H@XD%qb72^
zKM5LtToD?cqFwjH-tgNq1u_k7P=B%-pV?B2a5V&~c8*1lgGo^JK^L2oyIJvtb>Mip
z3L8SNuu~h>qvyVKE<Nr7{!e|;^Hv%@RHZKNlHt7P?r2n5(hE%b_GO~)L%IG^n%Dk`
zg8F7ZF#BhT%B%XZu5y1g3?+uZ4a$XdWWmRwDd<|bn+w_vKwC&z)>Bc?{EpuD%7@u(
z>trmXIa!l?6%)Sn1A)g#*^dh1Pf4z`Vd<v$Go8-0rw4%k4P(fEvmRXTMzO%9z91Mj
z7Hn=L!m!^-=(@TT`reNxPJtujZI8#sy*l_iHU{++9MLJ1Gc!%{YdJYnx42$b5_SMh
zF6|&+aVl?p^_&Hb^MRN%_9*(JMjlTaq1_BIs=Tm)hkMWAaf@)UYJA9=uk6H5sWrMR
zGXT-xa|Xjqr=s<sEMn-X;?Dj1(Y5X`&(>*V?Psr|_q$^L^ldYr_ue1nv0l8nyNXNQ
zt>8rI3cQ=T6P@j5;m_zyl)ey>pG^-s&s3w6kg_i&(LAI7Ft8hP9(2@F+3Unu^lT1i
z%DXIJf>u15mB#Rmwn`{{zYVPSisAm|`Iyl}@8caWSj|EosFmn~sMiIiJi-AkZdr-~
zG}gCraOaX`8=3h|BWC))6g1I|hM5Io?2u*h1v-IPXjTBC`T}M@?=tHimcp}3_dt+t
z4!bck29`<_v0T0ib>_ZgsujH%bk0ZfpkW}BodmOwTk-3(16b8WJ-w^T*f*_A=(;rv
z1ZrKvHzV@#!j(<f@?a}hW7oO$tEpf$>N-y$-$jpIHS9ld0dr>2-6i}4|D%|RH|Dyb
zj!&boBGnu99I{YsGh7BHF4*=~j3);Wum1tvd%S9ChvCmRETD|*`at>~se*Aivrxa!
zIS}{vW!a&}$cODK9Nn}Qr7KP|$&5`r(n5i9TBqSuKoqVyJQhvTBA5sDu_O<vL+d7H
z^*^S9+>Um|PBTDcM2_5M3DdkZ7iJzW2Je3*EJ$$*t8qSq-mUcfDW|b-Z5+q1FvlU4
ziJ(%`&ZRr-;0L+BukWS)U;o2kwQV)nAE0^pV_%`@WrHwuWj*_LmE7Sgb=em~Wz4^=
z&gvr~(Rpz($}ik4SJdZVvik^ADL%=vQ%9iT7Mdx{qdCLqOFVm1IoyaZfoBgqQTbFE
zo2@Mc|G$(0c0a-_{icC{&NXUUH`(2$L)alQ;Zf&`QP;9J!hlgUkN;P$eQpc74^RcM
zo}=vX=`xIpRsyprilFgX4)^78sJP4lV~uEzRo@@-D}CTn67_7Qg>Zaw1(*p=L$kIA
zv-=(lX%i1a>$XJRx@aPJ@&s-gKOA-Z4lw5&`k9m`amnb3!omgg40x_X@vW}%^g9Wl
zY&{m#4O(OZ1C+JwI**E40pNZ04o^(%gYHLtal-d7oSHrfI|m9tPgq6%Ep2j#U1V2!
z9YYhxLaz2`9(+y{VAS(8{!b|u7Y|Rx<h<Q<hdGP#EG~TeB>{%Nrmk5=Yx!)WbKtQr
z7?s+Zx!Qm+kQk5y%|pU)b)+dS`&f#7TZcl$&_vXCjOMS>i(qbb1v(s1Ltn8N+LD~1
z@XThE?(f4l+gPLer-dMw9S}bA3P!7rNZ?1W;)lt#C>~@DDi4#n;mTMbPaD?%GK95m
zrs#ipD@4s+g`J`0aNS;rs#0Q+A6bokhXsSPZw&1hd_lgKp?*7kzwdE@VshQvPIF-;
z>Xq1bUB)a+dbo>99(PdOgTG8wu<sOq{ynM!yVdS-L0n&GGpJ^w{r}^kPwD6Wgq(a!
zZ*cXu%Ch-Wvr*l`8)9Sj@CAM5pz5&{=qemQp4~spE$bY#cx?ss4=-iUW|)$L>LR!w
zp8=&k#Ke`*Mk-l~9uLdW@<ld!N&{eELOgcs_&~R*Jy?2jP)^~zB|;)JDeK}L{rrVi
zqQR&)DH874i7-2}2nFZ02nTan7`dzxPc9#e0`2v(&W;&C17XacRmtn)hJd(OUDiJ5
zB#ItL%ezO;pzMS;bPts>|2M>br@XiA?k~cn(`(5mQG)cofV-*}u<{r&BrlavMnN40
z{qRNcx>RU&NCiOw?O3Ce+0;X{r@Ebna}=rT|8_oe4mga%=Hy_E4>=-kSA)`mIk=ME
zmsV-(Kw4*k?V8!pO!v8{HHQG(L%=>d4@|CPfxW&nsz2HQjlX_!`Qu5fo4yB}PJZW`
zU9Mo?>n~W3Su9*P(Zi-TN4Wgf0(~F)W03z*P<Tz}odJCycCj7QI-bQu_c~_3qMRM?
zia;e|V7YY72d8^80RLRYTKaSJ!Y_l)<UpB%M=eghw*!Mt>A;$Jj`Cd(WPNAZgS7WL
z=xy%@9ZPDt))6tyP-IX)A%Tl|03Wl%0{sV-z-K`*=+Yda?5r~;-l4qepOqk$2Q&3r
zd&>8kGqH`IO#j&<o+isj0lekjBZ!wKc9Ur*k3!LR>V!=D!!=45LcLxUvc3mU=d7G-
zg(<?1h;S^_-HaV&HT*_I7<BVe=6Rd^L%lYGT6_<;YEy+b(Q}afNx&px#NJlSh5^0C
z;wVcaTpBYS<Bf};k=Ovr6TY#;mfleR<_w>-@f;?$27^2$%^-I92$<RDD%=jAj(h5f
zRi2%Jy;TF@TmK?>b4rRLYfTW;>y{%qXv~J6<<`&7fR|$ssFloz;?DKZ`6G{9sz14B
zRUx(<EP~=p0hEWQWB8Tf{NAKMxHm5Ww$R<`=Vt0z9sR*nZxn)U(RyY-`YO-eTmmUq
zE}*NgSk`xEGV|8;1(UCJJbidEHjK{1(0$QND_#WVn$9f$T`lyjJqQBFP5Q$JTj6o1
zAdC%Kg}R@Iq5b|SEVQk}c-4HU4~*cNHQj7quWZ^i6>#<BzQWPpL(nL=54sE;1_Jx3
zX!J`2a&KQe-R+EbjU7<X6Tr2vO+)|JKCrJ3?PHIRf=Wk!Y&+o19i7ziL4O~Rn|pKp
z?N51Kl^g|kba<3YJQjbe0`*U6LTSG%aDJT@MpBQ>|LlHzxGV_qhdMYpgu|^Advw2+
z2$8GJvHV8}rhQomH~!2<6@_xxwsAQ1s`9~VksEXExh1PV<jDRgP!?-I5%$+kg0wB6
za7#J{n=2Qx6pv(3(Ow8z$A5A)jivDIrw@MaB0qGyKPpR%nEZl02pUtRh3}Hd<Glxb
zb?cyaD`gWtzBTYsJB$&-y|5+q18e@ynB0(FqpF5X^sca6RPW7g4H|gY<YmlhWFci4
z<`bWBGQ3_Ai|*;`FgE5I|CptY>srZSfAdTEv>Vo_TBQx0fxDoidlJ7Jm`LikcsQ)R
z7Mq`$K<tMGhCXI+tBB6qb7#Tg#tUekqRqNO#)0IYi%d`wX(0dTPfk@o*(S58sC?|M
zY;0XA1k%1PKlDE)G56>8?S(vwGI1KYwy3QCfQ!%5SeN+YN5*c%4|g)~?Ra0v2=V|0
z!&&^v^(1WdI|xp8@3>7pbsPs|fW&OJu-kn;=sW9!$Y2nvnSS8%?c330);%_0S232S
z%dmE9J?s150kCl#h6>s1xK;dWXz2i<(UMc-9ooR9$@$=*N4(Q%zlEQdRuGpygdBVi
zd7FhD!)z(#!lwz1HiqyYQ;PAu3Ehzszcb4_s`zx96=+nhB!7)1h<*CQ#JCVlS1kn%
z@;fM}t%Sb)6TrThc-guyWS{lUqPMn$x9T5+A2nI<UFRGI{b#}i(}Dn(MS{__W)}6I
z4RlnFV7J%jQZM2(+<Ca1I!ojy67}$|_VdJ*uHzxSucA?}dj8bc0Ww?iuwm*|P#<9|
z-2A*2ouV#r^{Recad92-gMwt;VE`kmQZN=YxO$KqUOE$qKjIWH|HvE=#r9)5GqOSd
zM;5O>Awo@M2mGy+g!0T1W;uEx>Ne=Y&lk2R`BEj5$A!yeZ%*U1i+SKSF$zwGuEq1+
zx@gd4gR=7(pp~_QSWULj_$O3msTjcHj}?P_S#i1GYpTr9><R>VsDPck4AX9Mp5HVK
z`V&*-4|Br9H9|DBUW6Ur-Z9U=Q^>~=4xvRFU}cz$4Sf$n<IH$jW0(W1Ii7@1J3@Hx
zWgf&Yp9C|H#elBY0niEcX0gRWR-dw(X|7lS9pA%w%*zN^98-Y`r?<gqw>*5hcno-s
zt0Cu6EHN&9WQ`twsMCCoPa0T-8Fj099Pt)wa-yNbZVb~s6V9`T%fU)~6q>t7aaZRk
znZDHuZu{|)ti`L7*Jct2ZmkhC(f`xpa!x4X<c2W|0n286D7_@Xj*g4Md~FNx@6GYa
zg*?n3=SrWOvD`{W4<CP_x%<*#EVZ-_&7TsdfW8xb!c)*=V>Q~=4CU%=nb^_XANszu
z0{6OVv`p#dpHBJXO$Qa6Cagw*qXMX3zbo?}MhvFO7eQd9BohTSag+a@g{bRRu(9A0
zSV#DPBt1)(wyTIW#M2q4@~lu0{jJ<KI~OgRUohR}h$F+O-_y8{yIk~VwF7kU>AWp)
zrc45^Z;vy7n=RlmdON*;&OwLnGzb)yL)+i4-0IzW&`fA&Lu${^nPxSsFF3*LPuyVb
z%~{YkRvQ;+*rU$)IBp#u0zEGV;)%pr<Sy+E>V{FU=wK4Iq|cMhKRXShM!TWv@OUtn
z1j4xd$=KT}2&_8qvbR#o`^qPRM)hC_j48sDJDDIbc~CBLoe3?@<TEN=35^+xnfZbe
zraG;JnV(F7gvHdqu#f}nIRF~BlUQe`CuoLtvDo=XxbE~II8XBq*NTHoX(#y(g0~7U
zrVqtt`@V30BypAcB{J>Equ95H!`R1HK+P_a1$}a5rT*m7wO$V0lskVFF^{+!w^_y*
zC1{8@pgqM%nZjH%s9l!H^@E2ob6K76PRurPCdCMY1dCwFD~?UWEkL0K!1xVE%hB{a
zetgPeU(ycKRSfSwm!r^6PArdKGW~CQ#8J)TP7jts``=fr;O-i<idYK+Dh^;*P!&(?
zs0Qn=SD0YWY}x4z2T@lT#@wq8!G=kysJ^s~du9}I!MJLD=R<Mexl4e5iG33mv>F>#
z`|<8Uu{`Iu9+n7Guy*rqKwkw6>iv+vT2X+PJt%J`>fn{NlpmSlhq_M$V4oa^(w<rT
z>E1Ia{G)<Tdn0R3*X9TBQckL+TiDi>PI-%?++3{3OjdY+om&w(j{JxTGJ@Os=YV_p
zJwDC86uLv#g16^5*ylo>p`!^Xh~6rU`lk%V5sj>C;u&;z9}f*HYxr*;%5UT?#kWsl
z@cgqv$m%|g_91@UMQt5)&OV7J1uFP+&tVk3^N}r{8V5a(FGBq?2Yzj5E=q02GV#td
znTB^MBzD+={?L;s|KCDkn{hr`#hyovyC-1>bzgTjT}Hd*e#9OzhNvwzpxEI9h03&N
zY$%5yhtIs%ul?xRa*VI|TZ>Uu5-yL+1Xt}g=F}$$%&KP7Tq&A878#;uZ4H20I7?R*
zpk&oLu-GyaN7K3X%=1eSbgdc{Xy^L#MJ9HI(mw9WUxUtI;y_*Vfjbu4iBZGA#AZ4S
zT0nW&0`fGy>IVTUhM~nM4nY@{IW9d5V#`#%!-e<>E#)jRgmV8|r(+r2ktBzx=b$(R
z8;T}CEDf46%!9FXav<17Z6}_>LVlc>6F0U+LbLi=HlsKef9#9{^*s}~QI;~8Z(k2y
zG@oxND+7zy0(?K)7#oV^5cw<t_nWArXPy|;pH39M<Rj3vAr-9>ma<l(`^@T5I6S4Z
zPm|7i?6ZkJ2jt%s#13X+I}6C}wE^_QBDqE8eDbsmgRT-CkZxSS1y9U{;>tlRD0Lwh
zG!7|K8x=+jki#IJ_qANSM4v1CJ<7GJb5VRp2d6y}!!^*yeQrn5zLywgKt4YDG#`zQ
z1@J#w$1vyq4BT}-hW2~km}IdITwPp_RX-z9t6@2GJoqWJHfiU*As#HGJ5l@8Hq1%V
zz$)Q%OrgH9?PN7!i#_qI$K8{a?~23R`jeQV77q1><N4EIU+nTWW1{Ug+^Z*s@>*UC
z3+EO>%sd0MNx2L~=0SLQNeH^m?PRu1w`E?g9Q=#t;*B!mrW{EDeb*_pS6ATPVS6C$
zYYv15waZlII)dbw0Glq;a}z!ZB(I`mCuRnt=FdL7>wp_%K0gbUPYPk)$2ydc8i`_Y
z1xuWh%lhgkGxz!4pk>j-EWdg{l12qiI-Z8D*u!IcuSE~)wu%Z>;TZWO9@Otb?+D5{
zO$^3x<(a%)<1Y`E9K_njF*Li<0Iye5Q9+wnby*SI8#p*k=j2MC42_x%Y-2?gv@e?h
z#?xlvKFT+kO}WBH%!@^r6T={-!x&wsToyJ>o(INCVu&17j0foqZ=zku1$rxm;q%||
z$%|9q$FX^6o|nTb3yWZPS2DJV`-0i*(_EgOgBu@aK&9n8EWH&8E&ndFJ#PU`#@g`a
z0mM-oOaI<W?|CnWc+42O2)q(K(aMjq4MnM-xhajSZBwB8Vwgd${Q(?q6$=$!6R{_A
z7&?Yi1%FjMm`zIp(SNTE?)3=<>EqQ*K5+tnF#H_uJ54*3J`%R&$RJeSI)#tgl}%3g
zI9|WOnHk;Ghl!gK@tbBAs0L1DwI8m6zTF(An)I4=3_k!pOU~m)pJ<$YIT4JEPebT6
zWvKtRkhQsRjPmkh-u`<b*l|3zoW9MQzAuNybK$g)C`DC?C+Mh50Po3R;FOgK-fJwd
zNqY`xUY!lT`*We;%`WKe>;nxWs(9ntk=T@^h^j|xxY2lHnE!Y(M)od&+#f06I$<Ds
zZ4H4|FFm+k0EiRwK;L~MPr7~#4@{>W(6?HtZS^Lmx4|FH(}aA5K_DvjBTr1-NtU+K
zf_OI$yw;|UH}=*6TM6ZUA64RBKV4MmqS;!@Q83?njPG5w3TH-F;b((Opfde9hzmX%
z1W}&J`C2eMZ=H^vD5NgBh^yS*hduMikK;Oonx=VhrD_MtFWwiL%@aV&W<zFvz?BD0
z2teBd1E9-U14Ig^QDej$2%nR{8j?j&&`AEsV=@f;G!x^!N}+k?U{I2pg4K;guJZa0
zPx~E?)3W5?*tm|^)cwFVTo0m7{g3I6um-`YUQE2~Kd!H&4)ykL*i&5@qG~ycXm;Ol
zOo$85q@d0|x*POWL<PA6aV>Iy>Syrnb_gU)twYIdRi-ud3b~K$8TkS+aBUulT9>g(
z&1%S4R|Bkf1nO3o@PhBzIAK~H7(qH%>`a8tm{oB8*kyV*(OGn03at4~XC=yL;oeBL
z_J<5LLdF2jeZspQ(u})ZL8#yMAM4t+l)LOFzTDg~d{fUM6qqR+bnK-J@P!y2ZoUQD
z&B!G(J{n%r&iz!B0UE3NQs(>)w}1JN+hRRafKyB~SZ<*6bSzgJwI5V7qoGZn!M%0E
zp>p^I$RA?_ZGHDsuBMv%_T-|i!8Gtv%R`-86~eavZZP{RQW!Pd52p8oV7PM!OS+d!
zGr9o~6g~*vUOf+!>5iD6;K~Iba`bDOQy@ij3O%mr;J(hGXl7c5`ie!||MNlO#)qJ|
z?ysyx6@@Ay4hx)@;H!V7sQE+}90!WAHa3r2Es4e!vkhQ)7eSp1WG#{#vW`V2O!7<#
zGGr2N?f#FPVrL<6Zz(j5IZu6}3)JOO#KX>2be4=`f^S=8jjvL{(ryOM{}X}Y&Pd_W
zIO>CsYhoSFHO$`A5{^%h;w7gD>??l8if2jr?k}^jd8;*RC{u#|PYcoig&nlK%-~C}
zS)x^d9+Q?IhZec6OfSR*HP&y1`W+)cCFK+MRgD8%&=o$I9uC>1-CTQxCAKxlxz)3D
z2rr+(y-SXQpv6Pau~iDw7E+JmjEp;9_r=ONL9lT~4ti6c{Km9P(6U?uHkvxnwtEHY
z{Vjvgy9vxO@(c>})<EhmIkvsCfSqrMH*sPsSf$l5(VWdtP_2R+dg#2qcRqPu<Yn#t
zp5)$80m<t~X8o2}4a7+)#S73$yS|LDT#y{~g9#43QFnhD7CSX^m!oM=8MYZ=c?;9}
z-wLqrxWGQVCmvgBFUZ>;jj_r*nZhSz?l!f2rbQgM^r%CF;tEs;2QZttAEc`c`1Syr
zJKz2xQ`H;Hw)YH2qh$xNb#Ntnyq)q;K{6<}s=<u<p<v#}fI9MHm`<($iVrn$%ZchB
z{xuZ#Y)QtzjsEcdR~B0RGvzBM9>rItebME^7_{7y#I3j6;|KK;cyxv4S|7=$&fA&A
zrC<mu3ga4wWS}~4H>=euf)lVFE$#_Ws<Mr{|JTkN$C1NbJBtT|D5IH^H%>lNhj-r;
zBMF1ZJyl75n+Vvyy#k{?uBRFFFJ?4F%7VNGgH!J}OwhQt%rY+sJU6a_w$jm<)3k>e
zrGf03$O3uCXjK0&Ralk0nA|nwGp>EXG8R%t^XX+Y+OrJIk3{hy3g^&v`x;^4lC2o%
zwhra(t~}pmJ9OT7#icj;KwW?@NDZH|C#6TRTAq&j9#Z!7UJX9zh=w6;SKw7!Jk%eP
zaH-2vrrzerTlQsw*?^0nwO|y)&envW-LIL`4&pl4C9zuHEUsD}1CbUrXgErU=Ii~5
zgGroBhys1Dc2;ace740N)T`XfQx@&Vrf<sNue=ugtNUPhMiSE+nvP8)@9}+0%W$WC
z20s5U0mC!%;n!&$Y*T*0<gIpKN7-i0KDSu*bw8-T6#?OA-wTBe9JNA;9~2ue6O4?3
zEK}O=2F*gx(i-d^Uk(;){lGlNlDGUy=baDBp*8&oF;eGI=3*}vN0FjEDF;o|7f@b-
z-e)R)kfVDQ)oZO}qF&bE9ITBP5YH>QBmPeqiS3p}ti!PY{+-OgAitr!b&?MRSNY(g
zsj1kRd6{Vjbg|Q`y-}2GjP=7-^6W7KK<n&(ygoOMJxoYPwc#W1K{VxqC|lQi$T_%H
zN_XRcP@LJG0o}FNxu)+_p7D4N6Y16n?=IVc3IqJPu8EX=kKcfzt#|kjb7FPa&VW6=
zr5I*TJH+E3SuW*xT&C}Y2@l9CBMzlLi44rCOZ8H}5*<HnMV$4EXMEod!ifdM>{Eml
z>ZR!UbuH-0Q&GHoIuz$n|8_Jvr~MW)534gcbM9=~J&c6xPXnR%qzudr38I`g-QP-1
zf~)g%W?QreO3bIAx#wv1-%nz~{ZeKE>p5W4F9hI^DQaljfLt%R+}KKqet&sF(TiEa
ze`ztOvf?^#FiE4|OC+{={l~lATDeo82EUN80qK~>+BNsVCYLZASfq@Xtq!55TN&#P
zy2hK^T3Opk30K-23%1^2Z13<4R5(e0r{@{M*uVPZO&$cbdS=x7FN5d4Stt;+7}(y;
zlXVTg!2B~(x$UH7#33h6@r*5?e0vG8p`P(GqDzpbJ|5bl#)HW0x$JYSAD+BFAKU!4
zp`b+(zvS+~^22u6Mo;hRiNv;e*vQR;1lW7~T=Z=80ep)Pd$)jp*`S9mTMZzoBNYW+
z|H_L-+u*PP7cgjH3Yh+HDEfyUAg_=+Gr#eR3A`%GTQt@|?8Q60@!BKmh%4dkDTTnC
zL&!l;3~NkNVX)^F41G4A{7M~M*`b@&zDVQVIn8{%^+Xh;(SN@i3r~NK!4ywl(2xy*
zwiY_$tN##oI#;mgew_L=RiJR8g$b;#=<jY7V)N!_Y|r5m)S59Af~v{ovAGabA0?6d
zFAiJ}sxz_kL9Ro4(Xo$w@J7Tjj5s95%<p|LH}E7V-|NM%JlupszvSV!Vm0*RRhTfs
z2P+JW@j`nHsxI~A1+!0~{D8J_^tpv-Ha!I%4z0n4Tes+5m=2;_lZD4mWk6?-7}e)y
z%bGR*u-z{m(CE+>u&V5jZuSUfk}8}$<qG&GwDX|;-s~0K{alC(+bVm>s(XA<@~e}Z
zX)OWml4^WxE(1&bD42PU&Q@`Q(RPeKlllzguQabx2cEu1>*mqCX$PpEnug6+C=b`Q
z9uB{)!>Nzs@Y~&!7)HF8ws$&U^Pev?TO46d@?R`mKa{IJJb@WM?12PQ_}L@`Mbm$?
zYXQ|bd>ieK?nt1vGnY#e4|6O1Dd70g2vuiaX0_&)5IbTjxz?W1-phuyyzyd7_tAZB
z1Tn|HXQ1FgFFs{<4Qlv}Lgkf4l+!=Nx?_k{Z@Pr5O3s2Lb~X?Fe2oiMpmg%O1dO-J
zg2K;;ir;A$tb3RF6c=L5(7t%&i8abQeX+QygDbo=VXE`(dH$>vS=uFCR84c{!^B1C
zVR8w&W_eK`@s;p+P!w7zj^s~k&QV7?x7>T<VEj<K9u><naps^{%shD#f;P&aVfa~W
z$@sy{yFbc`o#f1HSPS=5GG)^4qdY8$ygu9Nh*=QIIz0Xg)m#w7*J{f{3->{hv;zCr
zRDyi$TrPb=K8Kde(Aal8sIJ!M&)@h{muEga4WWGWK^vK8aty<ldNAVqMa(~N7}^(C
z0N&dU>MJIKZMOrrN_fX?`{d%sn~eMtvw6puaV#<5AROFhhXJugsMWL)6W+UHmq#DS
ze3^oOACZeE_#vOCbp}6((lGI>81}u~h?Bif;uftF=&3LR)_2(BmNH|s5)m)pQyzSs
zFbr|wanRh*3&a*TxyNC0%02taVy2x3`-i2FP&O625(>fb<pq449fs=Pt?+>_<#>$S
zd6#Pq+DVGBS=}5b-|vSb5_GWmOED972cdk*51G=VUtIOu9<DfeAhuO}W4qc+v1e=?
zjC!~hEhe1>&5{V(N#}AL6N|w=J+MZ<8aiI)%0_*vf?iAZqMI9aL`uBycH31@KKheu
zTB`Ec6BEI7R46zVkA%<DX;!hxn|DkOCuXP(JCj{XOl3LxZ90Wl>msrBf0oc0NgWQO
z7-3WIIBZI>fzHRyAP}~2QPW7F?blA$MR!>Ze8_A6jRfWTuR_%)y}0V#&1|Bh2bL6F
z!SI#{K6A4i)YC`Ff-<YHUO|Ppp=vns!$z#_8V|0~Z<&`h-DMBf2rY_pps$HHn3V;A
zs&o>~XgUpcw-XOBY%FM`r(^aMF<5WA3h6`3@Zd8Sbo!q)xzC=;T&Igz_h>&%w9RJ+
z`pHlymu6&xnz?qIB{t4mK)n<Wp3jd!iaIeA6XS)VEMg+H%iy>q75tx+K}X+HAbtIY
z57pg;D(jNC-J?8AV>6-o^>Z$|N~~{lJ!o8Q%ol$zpxm!7y3BjZiWH0Re$zC(JvI?v
zT$zB2UW`GVCm9glEyMP0rnt13c#EfFq3c}~iaIk5+SjE(Y_uG-j+9V_{u47?y$vr!
zCt{(sFDm4=bHm9YXnKosHSJ$mtZX>Dc#3jh7cYZKeg;lbv%xR>7o)U4dE64t5l=^s
zVuv%rPX8-BZeArinTF9UQX4FDCWB7TY>=M|EH4#Gp~GbdnkFB?_WEErPP_MbJ8#S~
zT#6<!qnMJ!4J1`Apl|HQ%vcsN6hq2e4BiR99r6X++l%<ccnch0a}e*nnTuV6W}xdh
zF$iR0))E@V-$h8U;g2Sm&rt@pH4o37O~i1c^Tb~Wh1dZMcD^n`iB&M>&Lk&r{CUW|
zei;oG#p6>24RAL*%}gRb^S9asAPrp%0`I2_>o%5w%+wWIT#IDtlS74O+b4qH=yh4P
z@&d5ATumN`OZ@Lw%FiYB;p1H+h+8bd*aF5pSE#YMO$qoQD+S+=U4S2yQ^51>BsNGU
z#b(nbp!sboZz!_>Rig`7v8IkP#F|_@)U-V9oHso9+z;C-|8VIYHEy+HKU2@Q6n>_8
zRqVtOpiHiBr|J@%@`>Y9%5f2@36%Rk<L)c{;Q4auemo16HQv6(l#c3PD!+<uznsw6
zkoY6pl@a>Pq8WQXjI}YK?=)qQN1TJmiW1CTeF#6?2n8*rt6*NB&ds&lc_ML8)p}L4
z{3HcF&}A=57jI;t!((~FC>!$jxw6NT>`;DU4}Z9S9_re>X6g<F#7SNOI?FP6{=#Hl
zGh;qJqVo|(rh==Rh)FgNfJ?`VF-9W+luD!U-j{0V$`OG`IZb%r@e;Hz`N_SLy<yb5
zIxxQ=f#u7Io%0|Gnkxd)x-66hsb`>gr4p9ThyoRxYS48}XV$p}Af2}ip3jz{ZLNn;
zogd?dcVj`iIt7*dw4g$f`UFD`LFgtuXpP+l(vkCd+cGx@R7gc74f4BOKgGi*H*?9q
zaA8ib)7bxCAnN@di-t3{;%8nAo`xq_t9v<i=t_jTeU3ucmcdMU!7bs!emSVMI-arv
zYjA0?6$&;Dk>%-&@r}JBHcq|EyojaQVe+24I7zVn(O>=}{R}jC(6hU@fOp;4%%{9v
zhC!u|xr}%v(o6sNgVjq=;M%*4(;iV!^h@9Vbw0LNk*jyw0nop;7GCu#1FI3~OnKH*
zq1*UL=vy0z_QxW4@dJdI(TQLec@AH75D!8m29abxG>bae?X1(F@xMAIjvdNe9OW!^
z^gPTMR>Z~iugaf_(zsH;J3Oc;0NZ=VV{?Ne#Eu>XPMyb5--TuXzsk{lh?Heq4n}d)
zK%s|>`r%WfK)2xy@7Pi;{4v}g#BbMdbIX%5tH|xJ<?L1L6PAl!VU$tMy~wvDAH@y-
zR-^5+K_IWcT29O=K94?^od#(TzB*f`v)YmR2}fAsgHOEKUd+^OFY~95U+}L3%&~ss
zTC5@Fg`o16zOI`atRm-dR^&t!4^NZD!fE>Xgc#UsYrxv&1!(s(4~NvNV%@Z8sDGiv
zB|)Y#(X%3K%8LTEh#b(>x&j|+E0L|2;gCV;;D4(RoOn+8)S0D}FHFIib{C*+QxUQ6
zI7``f7Fr5_3ay6kB*%0q{I6msHgsR%;mLGYS~>_6*QMj{{EhfW<upRVY4*3I27f3f
zfaa@UNThCF_>B^oX61D7RIi2NfP7}XI{;O_(7gQgM(Ef<jvPxr+;Bb(mmgL`i{`E9
z+8)B{r_(&=#0d!BFXCgIXW$tJ0h)`vc;ewQd@821|3M9wxJ(&3d+&qTea-CE&_1Y;
z`j%x3ABs}tOqN)<6BHW5z+{RXhdtwHn6nj~uT{cmGwKYTbryEF3<awkePV1SVMxqr
z%D}Ba+3(XJj*kMxPs31EQ<HW$J;G<khta)46U2x1g1!s64_i2xiO9ua8;e21BKYKb
zgnC6?^8CHz4W4@77!`oU(|SQ$j0p>`$)s5>y+0Pv%-CNJzJ|oIqnwT4`51U=dkULx
z)-!+m;WVqf&JA5kputBMyKK|I=VlTbr<FtX5>LFci@vw6sY1&uYoTC=D(H`oM7LId
zG&wX9RX1wzuI;Co)inty-JcF74&zvcj~<i`O2u0NL$R_hoc^v^SiaR5KdhREU9Do`
zv`s-<MG;8)eCEmP{ZNvVC~W_@8roav^RD|p=F%!f!4h|FqM^@RPR6mBBYnV!y0RuY
z`V6-%f$<tz=sI95yLXTrnU^nuofm@3I}VyAUs=Q9v0!39i9N0;2J|~d{g4gZMQIaC
zmih8c_by@7%2aUn@j~~%o?!BNE}!OBiJEHEqg^lnyU$T?XToUOtIKiG1wi%apUi6c
zM`q%y#(T#UK*QR6u$!9=c8iF08A*MEp|mUgyjdu}J%`ua4<sMYds%qt6Pbu#W<OMC
zU}KRXk7_-JuG^2Y)QcIY`R;#Q>^zy>?zIDzDPte{x|F3AOaOt;Unc6EBQv=xqD;U_
z6qv@BcUTRDjsa;*T~97dJ0<~Tr?t@hWek6HAQe50&#>6NCwc6n2A-ySpG$5YlbQd?
z6e?bpLsy<3=Dt4#<Hs$-VwKg9Kk%VY$AL5497MfOdX8e6`0PdFao+wafVbqZJCevU
zY1ith)ElB!Zl%0V94kDWfN=vdF?sO>JePDDi~W57<jEi}^2P4Z0*u<T2D%z5Kza5e
z^fdK^j298`qLz3xEn;kaL3^8gC$P>4hYP`!o#-FNRSGsh%Yr+?I-OHkwR8p6(tOP&
zpa>*0dU2Jaa^fCUgZx>H(EOG%)b^tJME)(IF}*)!(FkhB`ygYW5Bu$s2BAS!AXods
zdUzZ@-GZS1aXF}6mB8eoNtAEfE>pG3BNs(1MhUmV@gG;|Jb#(l&E1CmuN2}v6=HRm
zykc6P$DrC4ZxGkZp!u*5+-e}+?$<4Bt9~$g|FOb@hj!6<rVo>)`-0x50*qLoiyu6)
zP-)C{?!UH&rxnHF>U?q^{yB`E+iYNqg)VOSFA8qHOG59{b5XWG01{&gxcNUN2y-z+
zFSAl|`u@k7Tgw=n>Q8Kj%Laj-66mXckGV%>LwSQA+G-U;lxrGHXsSVLeG#OEgu^r+
zdsN-Inx&1Y<YE)z+<t$|zZFyG!21w*4w}!>CuYKb+K16Pvj*}iWT<{uhuim)^Yaa(
zu;U8n>Qg+Kdh#j|PnBTvDhFPinGRWlgRn#2nB4Gf!iLuYOnRq)*H5KC_dSA1=?LCx
zRtQH`D)8N&44m^d0z>wxVG#Kf|K2)D9?)Q}8&L({w#cD<cNVL4I}Hh*MQD^o%>A@B
z{x+Z(+MmdH+HwJye15>>#<#icV`4Mqj>f+?z0q~l7v^c3$-eD9M7y=aLUq;qGP|2=
zG3=QX??oqr-d{yD37pDZ4bo(ltNq};4P{zx%mnA+t6+3k0qR%J=GE4DsA-@DA^92b
zCE6P`Z%qZ)Z<SzVIUHP+uM%rtLm2+e8orG#17&A&rrV~Y#ORtVqdo|=#w-DE!%JY4
z6bJf`lldC$i+Di<DBF?=*6ML^aZn_N+>1r65_{~@?qLdVr}J$MbvRHUK}nFaOymGe
zsn-SG@`KpCVUBD=XeAo&sUQ!bmazGF4rbglp;_Q6Y;84!K`{sMM@J|gTeAgiV`9-{
za6Y)F$|2m(jr*_E1d&u56rx&L``sp<nt2fWIa8!ECJc1bLSVbmDHOb4DD2Q#2!`n+
zaZ!6R3Rb2X$Q>A3yB-AB5f5bH!B=FxuEn8hR2DOObr2g{-m*4#dwAu!jr`^lK;7{+
z3n>i%$(a#QkSD>WoD3`(wgBzLXSq(n84Rks&(D~o;&G5-Jp^FPHF|H)odWy(<;V{V
zMbDK(*^jG1xOLBEv{w4g^;b)wD7g-wMxAH5SNmbU>;@0mqfQLewV+Jw9@M3-#QJF7
zrZf&cUmpQ+*d#2T6G055i!wp{5m3FY06HJP$wd7VWc3SYV#mlZt}|^VYwgNq*()|c
zxcEOdw2Yi)Y7DHGQD$FZFfl-dOc@enCJ$?IOHm-KNKZxG!f^hot`fi01e4cdCFlnp
zhrKPA&}7;>7ILr_I#<5ruRmSI%8z1@`)9E;?KUWZePEPf3|8YNGI>p$Y@g^Xs(#r8
z_P6_EWi&AWS6oGjTaR$s#t3LJS-?d@#Z1x32mP0BMU_2Ez`STY49wk*Dow{w8X;nr
z0*GyAs?TCmTUhqZcl^2b8T4Da3;TIj;>CiU*fOIEY_-NQTT^2Qx9$>lX#W$YG)iIF
zITy4|Ea7WaFXPnv`e>~;obp|(q5EtsNJjimrkQpCUQAU%m22<##;$U#DVvD7FCvMd
z{8zT%-%`vPW`TV(PV%V1-Z)BJh#7NCShW#7n{K0JItMR7i1$^nwHgN<Dt@RuM43el
zPr&83;?OENo{iEVCuU12X87D==`uMe^E~MPw-P&+#X{b~1=yw+&zpBWV-XHRaoO)n
zn0JmcBx6>g-F)J|g+3Pc9_)+I5DwN>Ua;owM&!?q;)jGNaC?;xb$8A{h&Uc3c_F-0
z<j=gbFMvQA#`|6z0Kct$;DHjoQ@=bD_8&0<U!5z0U_Z)~7zT6y$9_zlwV>Q+@nsft
z$cIU%on{s~eyCuk0we9F;Dgj`T&EF<825(Te+*+mPnPg4^*-3~emV@J?T@;f2hC2`
zf`V)eq%VjE*Tih5Y(O*ps$JAuKEpy11Hki{H%{1j78h3%w;}JVtbcS7HvFP~`ihkx
z-Z`l}<Zl7kzV>6&@G?GIOTE;g$=qt%D&Czf=AQHVV;2j={E!X&vu+Z4zMRMEyM35o
zc&Mzn+!*?gp-lA+N5FaHL6&}Icw7rScT2ge>M3Gd>Vb*cUM9U~#9Bo%)-pU^n7=5S
zc-3JBW>G<~H;ALvu_m@@VLr|d^~ILmN1)a(8`ILV@rFSpyht30p?~7x<N(Tl9n1zn
zTmcB=vki1ae!||P=yzW<of#}lMRkq)Lea@MRBnDQR4`k}8}{Vzp5`eS-zy0t$7Z2o
zCqiI*4b)AJ$J@b{sB^Pk=2lz-`O|2BIkygv&yI%XKlbqZKqXph`vY9K$r^rDfQX*)
zu)SHRT<rlz1JV(4r=y&hSni$QxO~@eVP)!eEL<J|o}yTIJ+2ZTZsTaIUJmZ-yZG{!
zTs%8)66Oy&%A_yf@i|Yu(A*)7e-~=thzEJ-c(4Kl$Ck*vqbS$6WE<P2g47Ky0&V?d
z6vh5B$ZZ^ex`%_wQ8|iF^9e#7tu$y?r4Gn(8qRD<03Fj_wCfxV;rACZ|J~EkW(6@J
z`^^DoyYrNBih_=4nr*+Mduh)Mh*~umRQsz!w?{soJ){PuBc}3g>Mq#qU4YpWyg_$^
z3#pGiiAkmj4>}Uad*do>GmWL4sRJBt8i}@-5@6H7ap?H{68fK?0L{MdnB^Zo%HOW%
z6aC!KEzu7=S48q&uESB;?+3g2(F~g?Yu{x*o4h$SymDbAygDL>+!@F4dAlK+mgYcr
zFwj0^6Dt4s$`Y-|!_x^#d`p5K^ztT$=dWpSVtzca!F-then)6pvIWcr8o{N1Be8wH
zDVThk3g#*U@R_8B?(}rGn0tbOToYc(4`Q49OTKv`IZ?(WV8hN5Y?i;_DXV<2=~)F7
zUW<YHekX{Dk|LZhIE3NLi)FguR$w-KFu2wRa?k5UXeV?*$Nt1E>T?kncjckcHg%kG
zx)x(^sqwM<63{cl2^8$8CvJaA)-m3OL-<$#D=X@r9m2BNM));w8#Wz&&qI2V7bB>P
zxB72{2aEfo?IB~C)!`zxKaCh;U!|z9<Pdx7K>U~d`DLNgFTmIg+M$QIaNCk-%6?i>
z=1YRAYZgP$p-7f-aSDu&IfioTqYPQM27jIOLHF)Qtl?D!%l4-JhBy~SH!Z@9aUSsW
znm*K*|KUdS>e!7);x%PVz{I1e9M>g-)hW)ES1kl1=EpOA%VE^oC3vQfFX&QM1wU^^
z!JyaWW+Sdb+h=9a>|V^8Ov6y)$Yf~Gd%~r6jnL{-2a|d@ah;);Ah~fFp2(|2#aZc~
zkP`>i3DIz2MiTZl-b-$wiLCLxIoq&dF`Ad`g>7?*m49>y^EO<9=96Wz{EPnhHhdE5
zSM1_n)=np$q$^9jKz<7!CkR|1r@Y}1m|f~g_j^C~EuaQ$tLLM7S|8!AMGH`5x{%i|
z3g%s3cC#Z}j*zEF#_HD}$A(1}V7qP=*v&W#iEbBo)PCas=B|b`jbQ4eh8bipsbm==
zg1OF^NM>HN6l-VE_fYImP#tN{mDbt7%uN-<ZcT&4oz?iHb_u$UX<=*LhJ!@?3b!A#
z3T&VIa@o6BRIOafbhC;eL&2CGdq=wj!#IN|>hrtw+6EctqL|<-d9W3Infkyu)+zJ{
z<<Sqg)8$#rX|xsBw?B^ZuleP*s|+zw(+tF1&Xd|KP*5DUQ1s)j(Esp7_<q<NMUR&V
z^Q+s1q9uvgnd}Ia2h%~3X3+od<`ee40?rEB*m7*Nu-SprS+7TEv1lW<`)2a=UxTp0
zSr_WJ7=ZgdQ)rnwme;Pz#e+K&FflY0N^KNy_&q0VbY6|JNyiWt5-<36zAUOL2Y%$x
z9$8WdPNH{QVbC9zaW0?hw&r5PY~uRV+i_{>0j54!i8;BCW6EJULj2$i{?{+?LhF3c
zg)cn(?>Zipp^S}9ZOr4~0DK)myIt`Hh+1FC5?ia8+4WOk`GUM5LqBnqjw)yjiG=!R
zCvj((Cr)rV58@2E$5$fs9q5B~MkOfD+QIaVDvA5}k2l&627RSL+|$huTTg8S-M=L~
zw&gOHD-LHJ1#<q}r5rr}ed5Jl$H@_V6n4pr(OD8ind8N1KPinDcdZ7U`D;P&zhvgb
ze0Un|&0eXLf&52r)L%N37;~3V(dH_u9uC7sc^N(9rrhD!I8=FlkBjGJvSVJ!So|rA
zU80Psy+JlNb8KXfJ&&T;Y9Z5DF2=quGeFJT3+&%&LhN1(F5V{v=}J%b`Kt_njMpV!
zz#8n3#-KcMmP~VS4QgeCvw1E7sPWF5_%u>sqniWk7S6>Ca+lcZk$d5;BNLks5{94p
zEc4rO5nF_cvR%Zq8avJ(=9`~I&*Kho@=`R$Q?J8w<X2vJv<z$`V`QD<=sfpx8knEV
z75WUWz{DTAX#1pE*gfGPxAtq{R!@eomcBmV>k<Wy`_I6SvM8)Cnh6~-J+e@8BM6Rf
zGSIZT$(4Uc${O}PWA4=|P`5H2Odc77XU}9fU6+UE@zp|cQlf#=$G<G_?P)q2hOpNj
zd-2|{^H9I;E1yhfJmZ2D=-W03`~UTYJ5?N&{mX^Qe$#pAK7Vli<ji$Hbn?cS2IhYH
zE)%QgLF1P5cynb5Hrz0V*p&gSQjg*6LF;h#a1Qxr2k^j4S#bL3Ow<WYVST<{KtZn!
zvM4L!X}vMWuP6L4jWYa878#<sO$?j=kLH9YW0>IGyYkjO4J_(6-OojXG4B3KjPI_2
zSCfd_FzyyFUfCbL=}au&ZY@0Gk%nK>=sUzO3h~1_?6vA5HcX)W-)drAdzYfu6%O6w
z20*l>A<oKLj&I(0lk2yhnRRM_x5rlaGcf{R91Ouw8!_glmO_SK0{spiu(p-w$j?~<
zPC<68k=#z+DK!`wAA<6Z60R(m!~!3rVVCqAbURPS$+pEPz5JG$ME9e!cp(6Nt~zig
zvz}!RqU8&~d~FVktq6p+!;6{GB3~$NCvL~}6zVMJB6K}uw%`0<!)D4e90@}kV(>Dx
zaTxw?9sZnLi7I+otSdqpL>IkTOTVv7e^@oF+w6^-LOF`YE#(;-<>Ve(DzvY=%v*Mg
znOfo%P`q~$+;j_3!zZ8P;Y_IKz0vDe9tf^|l9_DX#+)R>z%pnCwDh<OHP;^kqt;2Z
z<C#Mpxjd-F82r&x02^ipqLVC|(?STXOk&uM+I5&2atT`J48&lgbM&)}#AM1T+dmdS
zW8_HKGIB6BuQ<mtdJ{k7fE@IQO`$&QKC9j70|{;^sK4O^%hr#<CowV<Uk(EO;z`i=
z>J@HfMf=tfE1B){Ja)Q}zE8dV3|f~(fu*RDTaL>_QPV^IU@dhM1s9>`7Ijj8C17oj
z98~Rp@%;2^SmPjr5kWFc3-sk-O9Id{^E+4a8UvZ<i3@mpENnSS=l)>gK?JI@mVN$A
zR6Ux7^RZxGb`lLYk@Kay0*}Rx#2Fhd;;Ye8;%&*ObFh~Ed8CJ)HdC4Z;Z)F2O$Nb`
zK=i;!jC!pNrIT_X=x6{~=k0|DzxSYGVK9iS02&3fQ;}Bl#062{Ip2fpjC>=MZe0Ym
zQ<Jc**9q{hI|A)}UNgP<X<(Rn77om^#kQFZytn6Z%F}kSVvk_xx)=h-1r?ASdJ+fB
z+lb09Uhq)2A~ZAiA<ktr`@Dj~W~KG$_MZA<8z=F=9r0LiafiEB<gy0s=S-_Kl%Ci3
zteNJqk5?q3{m0{=5URz-CQ<lvA2Q!N+t78xK<+ttIi!~t0BCK)j>9PI+Gz>%!jquW
zaS86NEyT9Z641^39~1vmgsuU7nfSsxwsD<5WNQu}Zto8kv^WohU#DQe)-qJE=;HtU
zDYuc8%KmHCBR9o3@ceO=+2oa@*3nILcO41diVxZ1oO1M@A4l##fAl|L4`P?&LWRpK
z;nurxD9*ml@2#M$>mXxLtPrE5YLKjRdL9c?H%8BE9<cFy42;Sl*3wB&VsIAn_JId5
zBPfY|7)kxK-C~^fI1tY-RYbkOTyWu(3*DlCcH?4bN4WrHT6JKmm;<<aE&gjgfXy;X
zCaN(+d2VF+l<lKY{NXS;K|7f1`*QxRwj6Z43u&JHi-q?4#TUKafi1N?JiDO`tgi~8
z<xU~woixCkiD$8yvS6Jn<C!4QpG*3>@cd{0$y_ztWiyto!|<V(p~K@Y&loWi+&3Y{
znmBRWORM<T1EHufU?bgG)Oqp1D7I2{D@s=U;1b_7=(<j><J~i`W$9J!M=Yux<wr2W
zhvuD5wm^tnndd7%$nWI{uC{Ivo_7XruicE<!Eul_?h13WARdF?F0L6NX6FyeA;|0q
z&&XSc7bIJ;<K=I5TR#MCW%{!A>ox#Ohk)jUP;At>%UeTjLDKn}X`fHR#NI<-Onn(D
z4}BnX?|+kBNp(c!E$uw^VgV1D`GLi*`$vA$WC*g2=OJi^Ci-z`vw-?QtVMX`<2F2V
zj=rzAnQ{Lt3C^RO@7+gPSikBy8{0+Mf#Fx!l$&$W$tQ-34&D@I&JLkGqcVQMNobN?
z&edhru>AyO9+eV_QTK@T>{^b(%3fF)eFmBp>OjL`Jt+U#%Js~nKqqztd*v*_?(G{v
z)_oCdVm4v^n-H1a%Oa4kFM`mB0JJQNgMahRV{wlQDj#|+Y;0S{hwM#+j$W~F!8H<{
zEEhtcejtWEI!+mreh|9x7PCyc%U@4XN2jw-xpGATxf=3$3}uy!)|>^$@3AnLxUhZa
z=)=sNXW`qPJmP6dDW{aebT$Rbw07@?AkR4FKUoTmvl1a=;#Rcy5)Ff9MH72t4S8^q
zn8${EoGuT>{FzI*@^@!eYum`e4|y;}pR1@t_btf{PcHc@l-awU1uJ`FW~$yFb*9(z
zuA6HiXzx8<+|v&fK6ruT#sQGje-_@@zY%{o$3yyxt=M`=k<NOpyvDu+*X>-6wR<N)
zb4dW`hC4u1k^}S^7KAGgpT?P#J8YYt&O9wvf&TinfX^wnVQCI(`kO&%e<APnzpH57
zP{TG|{y&P&Jub$s3&SHx2uTPbBsnESHG3fmAtWIrQ3y#$LNYo_hv|G&qcbHVm1*`W
z)A0=<gb;ELA%qaV_5J_)sdvot?7h}~U)M#-$c|t-LnE2u<97KM)l)FcFB|i`_o5`p
zPwserD?aIy00S&+se6=z4oMZ11uUS<%sOy@v!J;vA7u{%A$PJLPuN+&8%IrrI@4b~
zd|)|<G&7*RXf+sSzLs0ReJ&jDc!<2^+RSUVBRbpmfyFw77+h4%#^%_d+Mq$O?mNw{
zXRik^ra8)<pZw8Ua<cXNDx97bje;NJO(oqokgKu^aPwj?d%6!pMv<$=X*p`MB&be2
z1#N61!n9C!?^*~Bm1SZ0E*bd72Sba$GZW4GB5&~M&g?hFLL(KBTE9K#R}PEuzh~31
zJ-Lg!wP?@`C6C?xY>o~2YM@Z=NlxPuSoVE1x@&hw_p+x#ljPN4(L|rg$y0ny!W3+G
zRO8K-p)6!w1kN8_fx7uY%%hUCib4fit%`@-Cq_Kq${jjYa+u=DA}~{-z5S#*)-*yB
zI$SHE-C-INuT7J8ie9i0H}|1@O)s>s%x6vJA6aIN0?a&;gu>4$ph5fBgvkMH!uT-U
zF~u2EB_eQoNAK63cbNK#%do~V5NkGW#L=fK(V;FMsz+oq)$Lhq)Z_CwV}2Y)jh~OA
z2qosJT>>4^nanFU4}$%pnAd@=VEAhS3m<R+?iB|^d3+$2Sd-79Z5Q<(88jW*2NvaK
z;JRxah)(X1dkvEk1EiGOAMORug^8#{ytQY}=g_D!3oS=a<|Sc~_?Ko_eJ97F=s-ki
zrp*-4`fU$hkEZjf0|PPUN`H8}iAE#cgK)`g>g{I`704_dE*{>E^>jY;E*FDmlOLGa
zHgJJg6#qEkIC0URbAxjs@NpsWr~XZmYi%0EbxM+i8p+lud!U8+sl~*lab`Dlw_|Gl
zaj?$Hmj_n|vb&4D&}?IGUT$3jqPc5K0~;q|-47q=Z&-%kX9j~|!)bZS6-|gRl;Kv&
z1EkX|HYULybPq>>Rp$vNDmllW{5Xh;k<^#*eI%4>yx=brrO=j4?8reoV4lPUwe)&%
zU5h+!_D?P}+(K=mXH7zp!dPC>Zz?Lj-YlJX)fZk*2nCPc^ReY>mRx%A5RCinf|_2J
z;N|V**zoG6aAjd7#`y+daWg#=*N*2_>Iu+4oLrQ54l?)qQr;zZhfnWLP})D5&J^Uy
zS$Yzh{;dP4d@dC1C_}AxIT&F}-GRQ(x%<)cc<uHs6dc)98a%n0HRbC-^;TlweVu{T
zYdPz5_{Iw;pCYK91o^7Fz`C?pzOX0?q$gu}iI6fpC&_d4%bUv|9tTH<Lg>`!AYPgy
z^D+<Oy<K`>(Li!{unXKwx>YFHy+SU&L<6GhzVLXw20Grdql`<jvB|EDJVq-N<%iNi
zHew)mKfakuDd$yRb`la7)?m9Ehh)DK*lu%5nA_74Dq=TbUEU&it~&@@<HNX>K^-^A
z{>#wwJg*xufZMOn!gq(fkqwT-rXTe@cc(FUT-pjX+6Qs*>%I7~hM0~*;=+6j1O4u)
z;9B7ahQoTItb;O2!)CFBcs1JpA2Sg}i1E2b5NdADWhN^|f;&4U&)gftj~Jw*=;1L?
z{3Eu)8!M2ME|q6?o`Z*4Be8mF5tA&@W^qr6hwYM&%J&nv*^~>+aa)K`a~92G+P1-v
z=%Xl@@Y7`B<{|{$^~5^!XYRB9@+O+m+s)Sk>7hVuvN;K1CzoSyU26mZ_2><1L6q^5
z=N4XL3DxO*Ulnyi^vZ>3KNZ_<EWie}cGl^ghgRSGm|(YmY0KNq(Eh-UdzkNpt(#IX
zIVTR4dPJaE`42v6s3$S5cA=gI!vi9H>{@mSO043@LlVwIzt6+gqf!=p{s1dFVhj?O
zJZ>`5jM+qI!N*HgFn+oP_KsSHaT|$s5HA9oSZ$2bkpK+12%4hxAbe6qu7qpc{?H|O
z)}V!1)ai6}2w;kB#OeLf7wQzF_^eOeu>MjRI5}HmtBE6Q_YOzL0q5nH;w;eA*c=@V
zM}cT*xZKcqGPfV>k9D<WP(8R1PHUK<tmG#32?HSXQy)wrHc@rQXfQisfri<+thM7e
z(|N{O(u)vcF(;wTd&-{cyrV9=f2qRrGgIHK$KlLlZu21rYX3-3t>rlFSHt+^bbYXQ
z^@bB4h$YyS2pPkpP<cZf=x@Hvbdv9K!`Q#V+znFl^^9QEaa+N&G7@*sO+>HOp3I|c
zGXz$iM75yZ)S*@54K>|R?LlAq3@1|Gqu9jp4*4^dD8V}iF}OSFaz&v#)67`{`Z`t6
z<zB|@_EJ}S%K*T_wa{cl4j1hk#xa8fnSe5anaO_KYL9{#JmhrjFGAJ%X-rVx-=zIU
z5|l$Ay3>rp;@>VTzZwZ^r}*Nm*=cyvYzJcLGbXA(VygUE3|mx`(RoNY2+VltCT5Kf
zuTjS&dL>xCC;rc*Crr!e2UBxC&0m^R@BHE$w&P?xnjBpNvb9wV{VJe-nl5}_5CvL2
zLwT1|FuIuEV3p!xD2XgbSzigiJ24F1yH&v$uN0I#o-J$-j*&Z06GMx6e{xk{ksJ1p
z71|}4VZ&_totCaQ4IViSmfzFGzq2`1H_D;wKtFI9y&YPU2QY<YCl^gKg~}l6)M*nx
zQk>1~DGS#=U>2y=OduZk5TU#=0M*y7hwhYhYFtU3Jj%Q|?=XO-C>`E#*a&p@7ebq7
zIul&>0(+XjwirwYwRGAGJ&FX0c_#QT8Uf0xau)u>7hi5Z!_?F@pm9|W*!{JH^z5x@
z(*H2mnU}%LI+t+Er7m3gJk7kX6!YL!bpLtRz@FsF!NZY*VfzhvUE6rpmbQ~v!o-cb
z`I)^Zer)eeIcWY&3K~vB@ts{JX1*(iJ9R^_-zPcN8NTKETNi>^yaM#?JNULK#W>1E
z9Wy=Lz-@9WRPV`0(WFVtKIu98B`ZP0z<87lRxpXG4PJd#0`7}ygxXpQU{U}5xNmL&
z2&M*^Z1Gj#g;&)Gy}P)6Trku*>;lIxxpI~G^H35nAGcl2$Le$CIDq<BNtbNUbDIcW
z4+?;eYlhJ5?az;Ik)ZOLIP|Uz<+hRGymPlD7d#jMN3IcLdiE5i8he$C-TI^ZyS3nd
zIt`EYucaNkAHSgNjo!M8!6vc-LI-d(+IfZ>6hwk7#}EwPoZyn(f93neRNx-12z(VB
zjqiH+g1UZ$Tz20I+HR2NEUZXq=M#!CHwHt)`!oE4+#TCu?y*eDAh>6K;v&5Z!lndc
zuvc4)CVRV>`|K@DN2|B+nOi!#<{f|*-4#N?<-Mgvq11V<K(G@x@eb<DT4abJ(+{!p
z?L}rdUypJQDImBpl<OQZ;bQ7m<yz%JeViG1XeP0@aW&|B&j=f*?S_V`o~-pwHE*A>
z9p)__hyFw8IbLW6(uVVVq<1ZRza0#gdgV}PnS&m+2f=;iC-zoiOsuo+5H+(94PA~g
zt4Ak!{=pfLKmIWnjm#|#c9y}a-T9~(^pF|uOAsa$O7PedN0deaFSk1fTC3hOE8^Aa
z-t>lj`(^NJv;qR7E5WQ;B+qXvX3~*)tm3r>if7G)T~CO=^ge;!QwzD_CMUp{BOqya
zgGrYU;QWbW@E!KVX@QyOFkB2fy*U&t&PLsGeF)5^{p;z0;9^kDvOdp3FjxosuTH@F
zr+!cvn24e|TX_2eE!OBc7fN2q;K7PhXek}WPS-C-%_YWI_iGltkK0-Mj_tfHp&D9J
zmI-&brJ>iVT}&};57;FMA*E|Gync{~OT!(AwUk&YI`EfGemWo8&fR1U@>RU<dIYw6
zyF>1$;cT53b=l6_qeq`T*!tC;)i)3(Zdxu^Rb9yUUMa=Ax0IJCRz`n^J)ju>O}Ope
zMG!p6hpuBGpgYDA-_1UcFT)l?vqdF(1Wv=iNlvKq;=0^o$S!DIH-tCaKj-G_15mBX
z2X2oiHrZhrli3$Q#hAe;?f;ZZ`uM?9oy92qxP{e^&BW-{si@Uu#O=Claqn;HxgMw7
z&V?9^Rxv=GDtEc!xGHbCxS!QW9>mvLaj^g6c2rvuj)q@@c=47g_;Qp1?q3py`Jzn7
zx5$TKMUnU<?>H1xRpI8Mi|D27iLQOam}*`S9MU_2`MaIL<xD)({dmH5rP!j_YXh^t
zya6lcuSOlNC3I0~XU*@2g3*l-47@i2-&>zW6f$y}z2yR@GSh08uiSRSDu@cv$2Gra
zU`vnfymOxen6xc}p^=sdzH6X^a#7+NsQ^96=eXz}Q@fSIl@>+8$xW4b@ozM$>K_O3
zkr%?Q!g7e&cLvwIFr@vaC*8-NF?;>7FtmFhihCd9%TF9f{Id*NeWSo<bPx{qi$l9L
zd0?`92q?n#vbn>f(c{TTaGdKcluWH>+8frQ|MOxVGbaHCwJpK$f1#N0-$Lei?mjn5
z|1R&Tbs6pUUIv@^#ppF0!DWvUs70IN=8Ix<shq;JLf(@9*&bZn7I5ullvfShB%H%j
zuzjaD=m*XKi^gn*b{C;`SYL8E^_9IZ-;Sbt)u8|8ICpKm%-aSzf^5ZMKJsfdv7w_d
z_+uY(CcTw4bUVl0c85W3*-9Swqzscr2hhKr9MoUx$$dA_{R$I!dQl0ieqTd(JUYjX
zC;+>{7`}Z^Ay(^X!OdQmup>o)+LEKN+ktpb|4u{QOUfDM+-3M+3w+AULG^eiX5v+X
z(7Fv2xvxzvH;#kw)A{gmsXv6&rlP;WDv%tPaew_4(E5Vzybsobo605l_Qn(KZgufs
z?-^Xx_%jFFaWGQ%AnkGv;6ocZhU`5KUcH52-IyiUon-_CySt(3G|HnMt`Z*hii3*k
z^O!zf2|W(Q^Prjv)GL*Pd4?3HTZ<@vdI|>T#$ff7{%Cg5P1xJT4-#&lMvt6Z-1&A9
z7Y)=Anq5zVYpIkCcx4X$bY?31k$}~=s<`3u32YT{S*P!d0@IX<s1!}lxJj{c!ICx;
zlf&2f(`4Go_T7efq!(D4a0s(mgf2TRc*8X6X}_Sp&{9Jz*dGmU<udTxua0Y4B5+{z
zMburE$ffr$z`{H^v|Jg)cTe4hDax_1P*)i{4JoIu;Ru2?>T<6qd0@Awkl9_|2$cu(
zV4H3~RDPeqMBfa#b^yZxX;s)1H5-CoZ{)$raxUeipg$!O97`jFv&Nmn_D`po(a;Id
zJ5YwsZ{ks`Qp19YUDlwR4!dW^5*xprZ8vkp-c9Pb{387~J7OUBVj*jOah549&jZ)N
z<vip?5awREfbA<gn2|<1mqpL!uJyI7NlctQtyGp!9}FFhQ{Zp95f*$rgex`Uu=njS
zkUGlnQ};<2Y@iJWu>okQOgJ^OB&Mw$&G!vmf%}L$UAJN^<fa($tDDZ?oO_h($Ue-k
zE(w9aj0~vzY{U-#-iIb_D?xkp7QAV1jZ$Lywf_Cd%_;ZoU`bh?vON&!Qw}ZJw#<3m
zanLvLqOeyKXl6g<&Nj5a=(h!b^q+?@rGVGY?!o#mtI#OK1%eA{811qU1Z97Ct6OjK
z25%{~Go}vk!W39|D-g3=6<}C>7J~aGvPEh(`1j)pu(!R=J)}{r_4HhB{lOcgK9jMU
z4TX~GI50e4f)f-em>WX(+*3Jh$3(=glddrGaV2=Yx&Y370-((y4gUXqsGmCylpm~u
zX%YR<@y-D#-z>&qkJeFEbRvKFl6Zk<)}o#Ac4nk`6a@NF!kjA}7&t!}T)`QX`WI1F
zI+cYl4#SlbfjV+B$T6ReX7wqs$$Jbgnabg>X>WX<dkJLoYPsl59J<&yGQqkJr6H>-
z;P#p<XiZ0u7}9sWI0GufvtjZXggI%%uCxnht)fi);Zuq8kI^}E+Iimg>KNSna0cVH
zpNH{5r_gHRJ#Jy%3(|$h(czvq=5L6BwZ7@NH^vw>g&H7Olp{3jVJNqxOy8#E@#v6}
zPwb{k!laY5!@RT^ey?7I>e2IA;8*H;hF{=KN9jIs?X|GUka|Tr6VXF6j;)@w8V&Bm
z;BSv$Y^5`p;eTDsap+mOZS_B{vtx){8qpvRyq$&ZzYFEM{wu)stqNv^OvZ)_X>_)&
zk$au)!(^@hG0Pq2dHF?i;<emj^Wz7iwx<p(OUXs`2|dt_x{iiM)A*mC6H)TfftXX1
zpltIx{5VA&-L8^@{8ckA|HYwIbd~>FRs^5w4`YY&5x6jxVOZ}7EHuf3xCmQ}aT^St
zo1M{FNdYFU=eXCC)lj{>9H#YMgZ+pf+IoK)Bt4R%X6-u2^tT1)V;t3vj)8qsdZ6LY
z0AX9pIygBp2z~P7A;#o5Dw~f4?Bfjmj$J^CdtzNfAy;*u#!I5ZAU`t}A9NGq*^kFz
z{p?Zbl12HLTw`9Hd={#P(2iwUEGYW-hRLlJXx}A<8_|^2IlK$Jwq}BoQ5YUnE<%&%
zsbEnW%OvL?urAtfzw=24!+FP;IDZdr((u4!;S}s$7K%?~sbG?^i?=KdhW)$8QwHA<
zKDAb%yH$hGYXzO<7RQ^~Y|ntf*OXCEMm>(N<cjrGz?XFLZtPf!m(#1zJ1-qvp0%<3
z(isqQb}CG79fG4q^ham6?%=v8o0+G&<CD+F$w_}2C%bau7eC|L!A>C1AkV<0K5~~8
z`*<aB1297c9+bsm9qpBjR$O2L{ZwxDuqTuE&O^@=>G<=%T68#8g&x~_LWj94Xy^rD
z#j0%>Y@H7-r;agUm<UaT*5rm$aIr)Kif6lESK1hC>T!+tU$+>WFJ?09K~-|A&p)|t
zz$|#Nxj*Wf5i7^=m;7Z^CVMwU8=KGW!y;W@sGs_nscTV}w(Ai8y|fe)4)<b*JgPBe
zb0)FCpYz`rHemhYa4sDbC2xHh1ifwqppO1mq0Yo7au3H*D7_{Cmq;0=H>W`J<pNwd
zFPAd;pH15u=fcF1MIhQ8!4J>ahh=M*;hk~B3Z1i?{JtyEdEF7PYM%lEuet2+oO85;
zBu)kG_?}1tK&_#aYrX%*x~k?7AM&qEM9z)d^}f&*mB;M!uJNc#1>iF1I@g^X2NnyE
z-W4{`&}%(Qv8sj8;v(j~=m}TciJ<+Jr||uaT>KR}0IRjUp}HUoJ(9;m(zfG}`)wv~
zb+%@vQMK45jDb+kV5WXQn~4YXgc)1+VzBHko0w7nMIO|JS(i&W2s7}yMowtG2vpUP
zGS$RzZofJdY~QcM=JMU3lc-7DZd0M4+ZR)Xy$iYhO5o+ECf>B57dQ?)2-8j_qKkSA
zy<f)4>vqqhv!*XI4;+C%^Tc?4|5{v_w2Zh^1(2Yu%9h3{a4YpfRMqA{%&4BAxVM$1
zE{dZ5{0cnQZ9c}|Uw{J5LKc)9iD{W@QD5H^zK<an*PK0&^w$e_EHOk!<>Rb%cNWv{
zeF)lj++{aY5)pQWV*L!dBkIisS)DUeXnf-@<6OY=umT#^xP!{XVjO8hj6<GC%+f01
zcgmm`6_<h0J@P562!wt2^MU)vW9Rd7{x>QI6tQWh?SG!IZ3-P+c#!f0rUpEDcMj&;
z9))FTg&1O>gyFs7Kp|~1)sY%8#lKb9>D3!<%%J<mi_2_Q&Sexk)U%E-1DY)lf>2>5
zZ%Ot5P5q}VwbwMtlzcOlOBJv`<udlhd1yWJ07y*FfwfbY(AI;vfn|l<dq03iS|m6x
zC}PcRbHF|z5ZptofXKi2B&`DKCRV^2r3_TuQ{|maG;4d>i@E+Kw~^p9Dz;e*1@>Vk
z35n!+P@M~oojLGMwHFQ^O70T#TD&SG|6#iU&&=@yxvc>T@9xKf(X{7D*}!cL8n``m
zG&8%GFyDQ{uya%ec_Sp`qo{!X>r?Q<^AOy*Gzao;nZWng#6X{4g#z!h=;G(bOw<dw
z`@nAC5`7RlbC*F_06FP&0-<P6EQ7^aw4=-fBdH0dn$h_)dM#fuKORRM-i~oAdk}v;
z61_rtK}UQzZ$6U3+U@%=qxgNq|Ieg(!ffIH?;D-n%314{=X_AA0{zDrq4J_@47bmQ
z%Cc;DnY;~@n~%cq9-C1-u@s8Bt7GcI!+76d7HU)%fgo@OEBu-QvX!f8xm^rHc9L&)
zDCIoI6Vqah6Y(?(xqZ}U-kLodygtm}hTVdNH&#x^M!(m5_B>(&eGZ48PqI)hj)tg*
zPMAix(tiGBsIEE~yaI?j=YNG+D-laU`ka?p4#jqLUHra<+<dW=w^khlszwXA(E;lC
zWPadw>LTtvDUaz#1jD%vA!y!Sg97C}rH%{k@T+gKa76Y<w9dZ-o9!>-s5;s^lZVM&
zm@aSokM5bp54i0Ea!H(P;miLd!4g3#+#6XAUU_T5U9FL?p!rJY7klnu;11U1^HF@4
z*rt85`0qZ)F=4J7cWgVs3e>`(y|)UJE}hKlgh6b8D`kTEr!$Gwe%>u2pE&HAV4rsk
zb|!>^Gg}PP2V_I=!2ovVUjnvlyeE`Q%LV1cQt(a+0%e^~JlG``cmBvmm5n3ub+e56
zzf$7gzZ5<(6yuiF=b*F66jX0afP(E8LAh=MIJ`T93U?oFICeFgye=K<TBA^{$3AG$
z0-;*!5oj=}0N3Gbp}ArK_&lD52{wztE;NJtO;liWjuNbptVWj}_n62<06(|%!^cWT
z(WR#*OZgCqXmW-%0Od9=4#YX_#(4OF4E-&x@!X0yCYn;mwEE3sqEm;UMdO8BWqKMk
zy-dZ*Nww&5?Hsd8f5?0&uPQiuQ1)_E1mqo3M}ey^dqUq$LC~90JFzd*3s6H*Rt~r9
z9fkJBrTB5nMzl!(#J`)CVu#))P&t4!<5LEO+O5)n_XFs;bd#wrx8^y~+fZ5@Aq)<9
z#6`yX{AF(eBs6d4rHh7PlpkdxlM~p8=1bT%`W;WoI}1$@iovMhEtiJA6xJEoLH7M}
z3|~<V;Wp&iT$Bc_XXChiekXTH7zbuf#2@VA1TK<Bnq$pGk*uY(df+P1`udccorsj{
zwk(B50v{B7^pQ)(s0s6*XL8w{sl3zFkxjUng4T(7Oz*7*W={Uj-M868XPOOHebbNY
zjVQ*p<<zUP{z9C@;cT_JA8M{igO0Fz?nQmYCVD6Snl1+GeP>xwP7XHf%z_Wp;njI?
zj(7f@$%EHjXGO)tK`}4jMV7wQRT+;WJ(bcodwp>2<N|EGTLxmmKIoG8fz~r)xUG@_
z^<Pi(Elz&qIM)Z`^*hnCcQy<zT#efeR$~|K-E)8CV)N%pX86n&8dl$s$9xIm(Va#3
zXjloo8%7BgFDWxSaX-`X>yRs`Z!DM;Q)>JrgZ98RaO{{firQze>Ob2+_SAv3cO7Oc
z|7l>Ibq}yUMY(v(8+5KF7H1sYpNI|r`dSz{7{>8aoAXiaR1CU&Sqx>v$O-a24ep#O
zM-Q!B$n{quZuxBTxZ2^%v<=WgGqmPlbFlFvcbx@s62HrsNP7tcy`{5PSu!ZA+kyKL
zC2%x9BG<9&DR0tT04~K7Am!{-Y;d|EOlb)M<H&PpcJv*qv0jQXx9Iok-!30)VuptO
zT)5OXh&Asx0|h=8pmEa}P=0-e)iplk4K^oW-O%yqvh5Ey+C%P%@3iY~DG}OEnG0V6
zMq}fP7kp;o4lFYEXSK<@vAOmx>okjJE!)k7?#uE)?K=5m8p4FZ5CG;wqCm95AHFO!
z!zLATF6;Y{rOe95Mx)EHae^aGd25D^p0xYj`;MK^t-<XL37EPp1Kxj}h>9i+e(RVR
zEhd+<%&|UTw&t(tN1ct>_fIBTK8Rp{gVLZuX%2V%{a)UC0-YtlKH%D;d!onjc=QkK
z2I8ZoY@2C3CioPCVNG`~-R25z+k2z3p#m%eOmY3FMJQ0KmI=BC2|dpL;1A#VLzYPi
zN)Ap3&Ff-%KAjLYbnle^+MkFqd#8eqfu>L~^;xOFD9}Xbj0*1>H52!|(nHzd^_V$T
z4Kjb+U`A&7kh>-X-tPglQN9RGH0N|9Rs|<#fyc$Q@Gzqa_4`fdmf8OBvBx5`9(ana
z=~s;wHve(yD`OBFEWnvdD$zRkCHYo|z%P5+bH)athXd``z7{fNFAHd}Z;>mH)xcs4
z4|E@}5!5QWL(7CIT=HSATyaWTnx--XwK^w($G}R^^*4cihib6v(r9pW3?N4QL^gP`
zK6zr6a<knfeA`kXHuQ~T<DM@?8?&>Z$o$U7_9Qky`3-)%UjfcsycP9AeNn8riM0dm
zQHQNYr*B?ZEr^FDt~IExD}*u9Xbjc(z|Hz3qWE1H4();1;QCXT>UkM*0?N_Ybv~Z>
z>`(kr5%2T50u_Hh2p#*rW}#v=Dz|2__CLe0=vf_iTol7HUoC|G6UO7!6Q{vy;BI)C
zFQ=cWMW{5~7lJ>OLGayc+{`_jH+-bCNp6~{<AntHKC=Ydhp*(C(^SDNC<w*dB`m7V
z5f{!EL9LmL__zU}>zB-wKboNGx&*XGd(hG9lG|x6h0OdvT%zF#3d7gzm-{-@S+I&b
z%#NVvQ7~-jUX9BREy3pwrRZNLg)WN`(5A8$+7J0M?PJDp=T->fo+^H>c@SF0dw^od
zQ2C7{VrIQfLfonecDL3u4SI*wz0!i#L&Op~yO4wOTnL>K!ghu#p|L0w6|0H4{z-=3
zf98T+EOoXs3)nN48F<t$5dS^CfD1G%Q1UQZe(#tRGDI^lM(Z7Su^YxDj64>`rl6Q4
zHqAZ#lJ~H4#ZW~7T0bMNd$SBj_7J1in^`bo(>ZK548e(SvcUOI3%@v1hQ7`<=#k<C
z9?8bgMzf=<g>l&M#}w2bIFrk^ojqB*0i8>epk@v2_@7dCpDDq!k}&)^aWra-4guBw
ztXXQ!LY!e|h)um;u%1ihD6T!nMB8Wbuj~4wnC6T+@p*h&kS>PW$(Yu_H6R+{#DhI5
zz}f8#6mN(}gSqsbd}sli4mjg?%LEY2O(=!`j-$HGK<HdL1zq=7@_U*U;28ahxja@!
zM?5C%{I1M0VJ4X0si4_UAEEm0L}5_hJ!sMOkefKpXUz{cLsR)V^pfXu!^?xfWX^bY
z{{?m6-bdnQ%FgIH)Pm7L`dMtuA$(UV)_(AXM%g@a67~j#XD{9q`jJnixU6ff0pyx5
zz{U%qTwI+AGRt+`QQrv;3+N*CX&3YyKrGp{P5ie@G^WHSQg819v+L>xlBN65`M4>R
zQ>H`pfi>udTXD&=kyxH3z+tQ(YE8)@R@ia5uSXf!M#VG3^8S#1{4`urufd7KcA@S*
z`v2Itk=YeG5pVGm6LiEFn<zxQ|9ySbSr`G!dvbU^qZV=G5?CoJM49GVus%1Oot=LI
zuKlC$+?y59?Seb?{x*O>*%gP`2Y{uy8&6m@9G#z}qFvQrmipcY6PLxJk)1EYrzUWr
zU4b7hM&k|dIMgkRV-5c$2u=L%u>U%$iC0b?zdi3+*lgmp4^cpKgE7dW<3Q?N%m_w+
z21}2j{%>NFEUpB1ktfSvMJ~}z>&OjmjkEs|>%)DMd|D7?U0jRUOPLfqyC?HKllo)g
zy$akGPCVfgL**@(T7;e7zOq_=0q)0~L9=m;Ileu{PX4h*#gV@3)}fPl_NW%-3U0Cu
z_8w@y%mtMqV?k?kF|Q7m^QHANoMK%@+>?v^fX)Rpk#9x!-hWu#4l78Xq)na{8=lK8
zLFktPbN<nu_N$3d5|fX2Zk@r3ow*nuNbFUMYU+<%;O>uq$bSuX!RI}~P{|<?bv7sR
z>eP79+K>-R@f1pEZaTTHmOCHc4K7D1MP*hFX7dNg)f=N&Q@4*SGSLUM24#Rw{&FZ}
zv#<n$!Pa{#3@b>124k8(_NIHP);{5im8Dp<J{b+)NYLB%ESNl(piA>>u3t#q&Dj!k
zo>s|(nH6{>#s^`_5sW()3ieLqg8DBL4KL}lG(TM|x6Xmqx&A0@p!5DBVk&hPVFmR#
zZ8H|5rG*@3)-FKxiI3#zcoyfToI(?ePs}-?p6k5{r)*0VTz1aGgD(%_v+B#ZQ!fN`
zmb#<YrWnkA6-|{4#uamdd2^pa5dXa;7kqOQR=*EGlgBMAeA#TY_0NH(g}%_)<0BjJ
zFbJoY27ue0D3~=+1aFngG0?IX-ql=)I_;C>>sur+eFwQDtJI)qhBnr12*Wid=ds%S
zGS{zi1lJ4NpeRm9qfR?8oplafi9`DG-7o%X{5<NwE`o1+G;vn1Q{+%jfr4dfm@=;n
zrIywZ`r!eqYraVPDPyp1J%ffT-Y~O$z2sU$xAET37NgG8PptQ1+PBpdqvKsiW_2qP
zUp`U?uep7>K{#dHR^Jqw{C>xLw{JuJj56L_aEDv^bZ5D{Qn01ZKcTqdGXHb0KL$U#
z$;_t3qVv)OuK42vZo!n(3ms=_vq%aldyjy77qO6g^rtT6VdlSz_%}=fnpM*vwCXr$
zId$hRj*&b1gc7^kcm@+@^#hMvr6@VJRxY2~4HJ~@VSGDzTQf|Ftv;r7>H11+Syd-&
z+!zhA*>{-Z!NbDB4TG_!NCR_c_XU-)86eP_B{TXx3BFKo@o#%3biCm_WJNq=JzRs$
zk0W8$Q+Iq{M{NCK6>z<HkX$K8<?ZV0`D8s}B|iJg-b^e&fs+Ss44;CU#ufOpsQ}x~
ztbwoI3Us{cD&Mgt1`V#{;z=ilR+IAR{V)iUY35w6Dgna*z^;yugjTn{ptZLYbE}sz
z>uy7sv`-?}kNwA-m&Ni-Uq7yYek+fWHgKI4S-k!~^64eg-+3a6sYY$$qFp!1<$M6<
z_sPKRcL!n7>ugBX4}to-QV35|!IsDF=;+ieRNF!<^pXg+A%dPMivq!QkchuF&WFgl
zNR+MI&wZ47qI7+pFnEg-j->hf**Fn+G>5TFn%N8d3WY9@_1Q=|vzOB|P*3eRB=mIz
zk8PLP8c8V#dN#2&Z5O~}N(oavwG-+MyJ3-JB6j74p{>q(7(DbeXv_D*%iZT7BQOnD
znI&U*Phv}FEd#;wk0$F0fGhU3059LO&`0WyP0xfpLY$4x`$wQ^clsBw_Xs<ybONXU
zJ5D=cC7wCzIaeAK1I@lFV0zgRNBgfpRh5D8a`Oc2kep!RUzBYrKFYNwh>1rR#?nlW
zpwGN8P$|#C%r~Q<CT=Yjg>3`tlxR>vVpuOLMX$B$Om@bYixTFTHl;*D(2odoRoH>y
z=JPP*K`n|iXg~f(9~Iv&V%meHIAlqGRNNXZtbaBahg*!rdCN<1)3++jJbxAnlEl#Q
zGnqGBe<0s%O5N|9axnaO1iE~5Kz(<XoS5>g{q0W3Y^mkJ4#dq|SO%*P#iPi>$TX!q
z5e2GWOI6;Jn^QFuj9OoD_c`M+Onp8UtLvc$<<8zckH*Ps$R&^%%hg_M!WHvE?5vGt
zNB??|^D<W6(KHDZBUE9cX#(2%+~#EqFQN6xBG8x5;mc{CYEn%dsq34-*SZL@Q_J8+
zdN3sIjsZ#E>pb&Z1?Xy5^RmT<an4mv@0Ci7nbN`4Z4G$K=jqV#LIk4bEpXL65q@gh
zV7hG~+&fP^m8x`@wJ-!+?qo9e=I^Y@^*d`G7l*I!tiqTXS*#^;g<NZ@6Kgm+8JqP^
zpsr&)eC(oU$>@))wbKz)msax>nrX_mOu{cw@o?6;2-}ao7TV}WVPcyI{hwW8!Dz$X
z6E4Xm&)0DEjy7&KISUMjF9a=mm(ObGg@!y*F4sw-Ovf8;$75kki#=`;pT_iS3b1Y%
z0#W<pq1t{cozo_Rz^R^_eK3|cJR;8GW;1Yj83Q^iw+Z9)B_P{c&CR;4XLbh%;PZ%d
z>=>Si%O5$Qqy8}vW&JU&<l$gF_9S$-&&GewuGmtf1_M&`Q7`Q>xg{sVRf!0V%ws`N
z9Vc(dGscdjUf_H96bQQi6@IAOjn=2A8~E%s6S+HqN~Iz4L6`C6bVJa8H<7<|ngGqc
zPjMaNHn!(t4dy4^rk+<hhCSGiwyL9`U2>UR%6-`R6d_tQX2A0$nPB<<Tz^b|p+!J0
zQ*OQp!tduGK{5}Twa&no<#B+gX_db)8@8!rgW~*H;fkYE(bcCAjfS6uI;UBn^*xNq
zVoE@4`GTK`re1|t9zXeJ2--bVW*xKRz}Pnm9f!H{pIc4P{purb|IGso2j|LL6Nq0K
zQ3gdbzjKqSHnvRakF`gWplya9&+mH?{KxonLHu;yc4<8^w=RR`CQlR+m$x&lj@710
z=sinWjMr6YRy`K#$cNLFT?ETaDFgA&23@*kfFX5kJ6{Tz#a{_XwwFU=XB9Zsd}cA^
z%xJ7KhPdJr#BemG=kGyzUGQvBtr!5lKAG_M3<s?zRm@R6MXrA{mJV`9DZ6kCbTco2
z){0E-xTi&a+Up$ok)+t7lEvF6UX<rYF9bjRNUV9g7&{ZgnZn?Wyj8oFJJzh0%T!{>
zb6qD4K5oxEBnx1qju~DZS_U~d7-c<%f>%WrpV)`KKNpHYq!`F<-wOrS&kF5*!uX&-
zZ8XuD$`S^}F})Rzs5)mBZ=>Cks`?qs&CFz($4{ZkwGwhbKNX6cxT!VyE9*WlXOr?b
zq4$t1Zm4#XdvCeV#0JEzo@OZ5e>I#5TN2?cy*DjH;V^w?5uE)~1parsf!T$0p>T5m
zw0F$r_KoJyTwegPy?xnasW<q%D}-0yree<SBoqfcU><qIi66QJZKMD3m{(cc?)DC_
zJ}{DB&#Xn8J>7B4z!V(s9gkLTPUFk6dgi6+0IOTq;e(#r$YYv>zh;~O^`lE*O9jnz
z;(X9M{T=W8-irwW_Lq*629sZp-XDXau=(&fNC@&rCC>pU%5G;Z=ZT*Y;=%-VdxhUq
z>GM09%q0Wf$UFS?z-aC=Ty^Li{<=$C0B0vY{bMnPjM##XPX(O(7?7eDk98+{Ve<<=
zy!LSsn#GTYPOC|*q2m&l2tjD=F@`^$KMoZ#>YdscVCan9aE0y*BF6<7nLiObtFH5w
zt^PtA%KrFY|HB6x(7xBC6jxgBM*sJgAhD~(x|`pbj^|SJ4M>52)HS$tRy-0{2&|_l
zL0jQxo_|2f1V3#{1v$WVvx1nWJeQjd_{G}{in!zOBLItYnC0RIc5d@DG_}%2&FS=<
zFBgDdW0nv$Nw|qSqQm5DsNI<WUYlq(X6uQY-qM}4u$AfVDu@3b>7qncOPC@V37Utj
z!8ltFTY`?W_P{>iJ$nUg^ee(UbNb_D+>L^H4y7?ElR$T{08>70qz+>aK$RWLu%SLp
zxDPoRortSk!@B+!vxa$xprYOwn;s0OtSI#iOC@YigByw#?Uc8^`^L3I&0M9p6rGc6
zxwirRjyF~D->Fn=aat_&@2-UEzRo;<*%EMfyCRoaE#r^X*J5LGHF^&=gFeyGI58ju
z?r+M)7X6X(_8)7owCW(L_bsBFp<F)8)gGnMCxwb1A59&0#-ntDCX6^E#{zjcrlh!o
zcIFz08G4+#<4UfmjRwo_9xTDe3L1XA7Ou#ggLTAl_qT9@KQ437LVB7tbRC1~v&b8J
zdn)+v|H+jiqG5q{0V;k5lsa#i1cnj;WN#RVg5ZT*&?JMKk>uAaj)#r*{ut^@tjv6R
zwk(?sCYI!WZ4L&XYYFg4m6)L2n|b@%oiO+e`6;Qh=Dk8pj<9w%-N+xj2dxBg-g$19
zWz9v8UYT~8j{&RI8C?AIjZm<XcDa_EFR97Iv=4H0{L)R{FU=6I8l0p3cqqgXAKEi$
z8vd6&7MF*u!LQF^Fn5?SD&{G{U*#I&(N%!m0bQ0En#ZiGT9|r~7+Rm(z_7{X_(9hk
z?RItW?2|pQ<BT#CcEo~q_y`D2G2`K70qE?L50-srK`t>S+e`&uw*9$K>vcXHeX^W9
z0p+k{eGu56a0LCws=V97l_>iBkEP11z^d^qGfO`Vna>nlQRT@!htWGIb^`gzwV|=!
zX2?!XLw8{x<~sHlFM7BN)c!wzIlBgmN^@b&gJkH8aDc!en^CAX33Cl5ajj=dptYYP
zYmTgfUW+SHb+0L;EF25f1IIw=m09TcW13w3S1wb}u%PpfoOvBT2dk4x3BFv;gI~L`
z80{W#h8&L(i=*+!gV|`ek<Q1mh1g*pfm?<oV~(8etY5Oh-f<HI%gu<x*pGL;59NXq
zC~Zz#57w5i<?2?=yzV*emIXOD>}(ORHz9a@JTazs-sGK=x<jEN03y0gMo}VVl>bHG
z=Wqqqy-nkaPxKDj=L)B<YGX`!Hg)tanEZ;TS-yUEzH+oRmQbdrX!r&Qjy2(-ch*40
z3TI5G&#%eK7=jNIONPA@!hgQ#k;O2E?s;?j2c!Du5iBM(lv#%QvzrsLu>1ot5Ym=I
z%EKYJe0Ux<yN#khTgzL1-4@oiTtu_6Mi8Bvgzxqwf!4KFeoZA0@k>u=3&{pIbLvl?
zs$fP;44M@)!QURhHEs&jcS>2fb6?ch7KX|`)Jghc$rP`$Aw(RBicazn87rff|3D@i
zJd3(XlqH&*!Z$jd$8#21IDLpIW~wJL<G2HO&;BB$I_lua!Yt5tQ-@zmCS&kky1(p~
z3A0O#@MqE)^!ywJd$Jdy`a7{uTiF$K4p~8db|mUuQK0=VAMzCLg+i?;{O^i4)+^1X
zoTVBo*&Bef?Bh|biyS=P$3fHlk!;2x;`xQ&;Zk87i~H3VWt$>FASj)syW=dHgwfgl
zbtI(Up?BTsvCuiMn6>BxamV#P*kLCLM9p=?<5D+NSdZpoX7$HUFGganZ4K0YAITcF
zR0##R*raJd9h1G=$!qqyp*Sgz1us6rMYTbt>iMUjT}P8i_m9P$u8}ZTgV=pNj_@Sf
z#aF8x1j{sK%y-D+;%D!<#uEBFR;*)|nHIcD<rca0`hoPv6}iA>zOZTk3I6L#2ue!E
z%9{pcfa2>0`LtJO@bnbg=ZzQ(#qk$WS)9N+eIg+LN+{cMJ`T(#-ZK?wCz@K?#gbQJ
zAEeY%*Wq6>vA;7xH87k{pQ(w}9t&XXZ5=#n7=t=)n&h=O4|*X6m_7U=w8>^bdB#@k
zyf0;535k6BSuy%I+CZmNgpH>+frnW*SiNlE5fzEpG)@h|yDku0ToV>r_+s==4U7%3
z!NW^Vf~RdHYWkgLuO}!_YibA#dU6Eqx_nXdj|+&#oG|rRTn0&RDq*L?CZtIj2+XC-
z(Jz$zmhDW?#JTm+Slkws0Qn6f)DN&??vMO<>c0Ls{Z=rfSD%LjpKiEymNWWXxeSV*
zt~@hw1x6QA{z#+&)$R7+IFx?Ysv7QmBb|GHia|-)MW(a4kfn%QnB(BJ?8vXV*!f{Q
zSUUY<ZleyNU}dSvqi2~ouUj1EPU!)iD;6=e-f2vHcL1~(hskwf7^~Dw!+y5DwBx2s
zuCqOtCJ@VazYEqyh?&nZ;%#?S;*?#bczfn{)O8`BC4H~kE${LA%@z<?ya%IZXyJu{
zXHfNB6l&%4<+64;*k3#Var0`S-&uw`6;jYv4gqPD8GPI3fZG?)tkvC#?l4xMZ<PiK
z$+3KqW)}X}?-ahVs796UQnb2ppM~DP#+y%A!vsjc0^+=y{mzg_B-fxqdJsx1C!ywq
zd}wQo=2{zxH-D02ebyxO7#4|w*_1u`_?(M}AWtrK#{;vA(Aqj0np_T(H)aKx`2V1N
zsX1t$yTY%oF9Uav3c1~>+f4DJhBY)3bA6yEJj_0e4RIo&(wh*N(@O%wr;WwXql+N-
zrIfubPbCKIZ{Z=aElU5s60UYLC3i|3=4Z@-FC7Sa?tt&^c%h=wL3p$83~U~7nQ2B}
z<5p|@(E8~mc7VF2PyKAsU}z}*yEYUpzixy~qiWWk-!7bQxe(QtYe19VKjt-h9l1&K
zE*)Je!<aXTyyadXD{!wNcb7ycR4;*|OLuwuCUSg6wVCSQ)q$`*L-1Ns0=+XcL6Q2A
z3HsE?GJjp<O^^M#qDvX$<`;uPxKgg(smyZw1>u|z=BTb!DU?{!J)&(7W@&FEB)KPg
zm)nsO>jP7aGv_}(BI?)NWwvQW{KzC{Y<KFzp7lG8Z4CpUZKXNwx89jpn;(M4(=%b)
zid5PSW-)<k01JKU06H`aRJ;&?kq13&O9WuDSB*{Bp^YAE_JO+dcedoqVKg~01ney+
zQ}8<jFZYPULkF_(jg}P8s-A^YcWGu#_aCumpgdfu43&OY!ekXdRewLwTy}<834Nhq
z&u_WL4|ROkO9dqxeAwM%w0mE@2gK&&I@I5bPcrwSj#en2_=B<xZt-$~_b%DJQy#dm
zW+&S9-wFdaY`~~3q!%jLzzv^0md^q`G}O&U9Q=+SNa}~8v7<`yP!AycD0uKFCK{6`
z_nBFNCf78%;`BhFS^7qH#P%dga%a+H^())acNvP$ckoyb7jnNA!oqoxXsKDwbq{9o
zT)8?Idu-*dZ<d1HnA!M6v<4MrJHd8!D9-YtcjnPdS%OOiX!n1@V|1s}zNb5wIgroc
z<`P&imt)@PC={p9;I3OU_~BP7bO!SW+bkChSy}<2hlUK*j(~KR8n{lk15xC4dG+Np
z?BjYf9Ntlb0eM?c?PM017_22W?k_fZKzH(JI<Q%LG9V_(7Y$5@;no{+JfxbBs%dg`
zzNiUb#s=b+fd%+2kDQEccbIDMCdyZL!?{z2VaMAsAg#0nt&`#0|E)G0*;;_2Sy%YE
zpHh_Gxgm5O)*S`sE;HSK_3YPs30@m7$EKl|dFL+~^By%GOy<Xf{i4;dTUdpk-t58r
zfzhBkq>)+LoAQ@S=EBEa3HZc_934A7L9F|Y*(>b@R+Ect*(Gc;Ny2OWFC&H@1v}KC
z&csEq-W)A-RDUA0wcmtik_Mvte44GN`{I=snb<f)3#G%pvyLGW>XRP8OwDn0cDuk^
z?#8kkBWqDtp$z>$=;Pnqp&03vNG#r1tldw~vMX=d0p#dhJ_%aPwi5SZAyc|l0M6h3
z@`maj^qkC*cj{(9Xzpn)qFtIm5H|B=@iLShkLICEG{8;w6exMr!t>r?$n;iWa~pYD
zB+r>O&FMro2f_Xky%Uv2LElTO@V;6SPIrm|V}l}8O<2M#?yZ5AT|#;KrP-*^C}a)3
zA9!$#C*=ySa8+S4YwSxIoOl!NI4p~G4P#)ft^vQQ&O_YjU}zick5+G~<Gf*l@K-|w
zzSK^q4A*yI{%R+XFOPu6j@eLJx&woqbHKzQn)zGRvE2T8V3tfB#q2{I$%)Ww$Tl37
ztBT@1T}=JT8}9t^8XsRs4yB-NydiBF7loIVdi^-Vw1}~ye|RWM`IrNS4(X6PWfu8M
zZQ<bX3LFqS0Ym?UvGwIrym!MB^@m@A`t)+}$O;4(qh9DANuT?bUhq$zffiT3@R<kW
z&|2pWix1z4&WD%47xFWRtxiMkX(RBy)g22Ld7(t>ElXHM-}{bKu-s|JKEnZYd(;E{
z!^Z*+CpO#tC}G=90I7=}SH7MIxi%$OnUD(>eIhs(jHff-Trf-}-h4~|@x?A8jkn~H
zlQy8oo*p1sSPl{e^*0WfLa^^;Uj1qyl&;J{l@kT9?k4TRGKNBvwt{Ky=m(DKljVMc
zXh)K}jPIj6nkmhnik2K_H+vbO_So$xVaaTDQw|EAXbQc2wTOqjh@8bqr5@xP9`>If
zOiovz+V2!Da&i$S3|S9a`N7aoxQJOV?1OJ^nWCeYxlrrdAz0~u3J->7quGNK!jLM;
z08Uy560a3ZL#-IXhlgYHpGvNybR7TICV_u(1#T?T#_WZ;p!NDT_be@@`$J#Qx$h0R
zi4A;pk5V)|FD4IO0Jgq81NC`5@MQNmw5QK1rfwr#9V;RC<|uF<JCF?+H3&7s&*N3}
zL91KjyE*8|48Lq=k^>@`*~belWAAYzzY6AZhPr-pkHDGiP`rE1hq5MO(E0sD*y_E6
zulC=FpN)qRmqLxWzr<!&L)wqku)e1A(IR0qWJ@liz5ZorO_IUw4<k^fRSsum1JPAk
ziusS1!hdPRH|V1Lj*$lpv#G|bJ*uEVyF-|?UxM#g0X#iii>hC{*qA{<pz|n>XZBeK
z=9C4tzCND30T!UFdx6e;{;=D6Fs6E42Hh>@AX{>gtDdxF4MG2yh21`AFc=SZ9v<8|
zv5*_xtz<5DYWO|7EJzxp03|&c46}*@1HY|kIha24#$GJt%^!{?X}qQ9KIR{p%=>T5
zMV(dKC}*_=;ucpyJAGcN+Kt?|I*le<hp<t6n=7w)!SyO)a9r&f)H3n{lelIMdc=g(
zI{{g0CB!|{gWHcoaMu7t>Gf!#(cj1XUw|2=yQPpzyouSao(iN|fWx6N5ZtE11jja&
zdVOC7f{3-IdQ*}Cy8_tj%coJ6@s6A5kKmq9g30eIq8v^z`AUg*eg2EQ&di((`WCUX
zGz%7OE+T%!awsh+M&TG@t>ss+R>vXi=Rp_L$cn&;l%sSt^8&@Gq*CRK8aC{#3O+GC
z1uy#%<5i`eo_Xc?QCbc<vNhPKn#6Er2iNL7jEPLkg!&8hc)zv?G+cCo3+BjVftv@Q
z=A3M{^u<1u>D}OxC5~LpsR(38{h;_kCgvWD<Xb{iX}&!J`>{N1pEwBwCniHubpr7l
z#KPbs!EEwNYY>ZN?A(`r826f<2X+ggZfP{$3S5ihx20jsv=Mawxow(>ed(++0nb?%
z;ggO_(3N_ey-KRY_~0}6+bkFt*l)*YIrKgpXn>Nmm28~LYV>(g4Q5qm@$bwj*yeYK
zD~hKJ%XiDceMmQ0|Je^+j8u3+q8p35ZHaA1cR<n|HFWn{0Oe}S&^^WgI-_@@RaYES
zJ2M<|p_nT?d!wOo34i%Gk6rCb1{0-t{)U}J#TD{KjP*y0BPS?7_=7202Sey@SG;{U
z3&sjdFn`Y#rWkF{?FJcx`*ceX_kS$x{(sKB`hI!q3_~bCBuDpEb#j<WeYk;h!N}wi
zC<nSjW;4xL7S9KxlY5}^#SAW6dYVW1lVfhlMHu3khh4UNVM}8?og)fSJ?bu(#Q&8`
z+G9a(GaE%p3T6{yhAFDQxLU=2ykqW(M(eW4jh@Lnex^WM$wXASy$OTQ2|z!~66Vb?
zMZ4xpu;R-F6eTwc*E{t@%N|hxs<EtPQVL7hx)>zE-MBZ6B$@}SV%^Get{xIBpI#jX
z%I?cxj@e;My3hl4R{UXYvo?VDvF^BeP6=x5+{JdwPvgdcM^L3Y024L}KtUN@*RWI!
zRrkQNGtR?vgNvY*QbTOlmvXCX3KaN$5Q=&|5?0f*T>Rt>D=$3(9)&fq-j~D3e*Wa`
zo5$NdQ-qfMG8Y`!Wok_`<(6r0<$?iup!@9v*E!+Ho+T%svnzSD_Snk}gUUHvswAfl
zd0Pt4vBCMLP~FNIx|`Wx=JYHcqlf|Z3&TLe&H=xPsN=Z5NZ8QfE7Uu^4<|pX<vJ_J
zpkd?xC_4AJ7`HEs4?>(I#OaWPN(dpT*$W|r5RxPbm5`)DkxZGAu9NO5O{t{2q|3XP
z8kK|)I)o6X;~GMo5Ps|TcRr=J+526W=lMR@#L8t`dBd>l%;t*>yz_&>EbL6B?a*pA
z;mx0D_B624L!uAbe^WNIehp~<Sp*$FDo`!J92BDM&?Y<;U-PTQrq`{4%idV18AKU-
z)B{UgR#eu0)1aO3eYSHq-E~VB3y%GXCn(wn-4>c$T7MGEZ8X8}QWSH#VhVv_G((Si
zBes{;@fTO6@X$d6+suaZi*HZkCqv4IG<eB%^hScMmnIy)7Ek-+y*&RNa*^X4v1HIJ
zx}V6SYNA1($5mqO?C;FH@dWvHl=(l#9%#OF8xAi{KviOV{#8it?U$5o`=Sum&M&}|
zD~F=cPRyw2r^3PWY2ex&hc%BA@XWrMDBsz`<HVL|l(dg@3M0Xz@*>z}UxwM~252&k
z_Kptw!J=a_M64ohe0&%>jTb}ryM3%J=aTq%FXEqnqaE6tKkyK-RL%Ot!iPQ?n3(v6
zxqj4T#^y9{pInQP4^N>ev5#rTzoQ|uSQWZ!E)o}a1Fv}Ui|wTPQrmxqu&IGOSlMBq
zabJwpvJKed5r89>#Y2<ZbQZ8S59Pxhg#Bl0@%?mSRox<Hja|8^_w2LWz`c>XHjV}N
z>!~1f%NITfL$EV85G`0W7w-!Ly?u4?Jk$p--0?-ntYLzmbvS50NQ9=a3?a|o1oMYp
z=bddfq^n$rDP~ikW!oj*HFg=aBm%q}>xF?Gf!H{&o*VCW#KB{Zp|$&DxHgacoBupu
z3cF!U(Up%4>$bzNu|8<oNxIVeEj+wD3T<1~fyMVRtj%brFpYG+wtb^m1JA;ys2v~~
zc3IG0evr?M$UyssH$3dA3`GVS;?{AG**1&a*qQo|*yu_yB-}5@uWzEUA$0~M)=)-L
za|@G1CJN8mF5`yLL$R$o0WKA(QQmVb&Y3HqDE6x0AAK4<y%p%XEfRdK^w6_BifZt4
zOnE$MKX!Qv`a{yVc{+K-%GQC%^Mue5xf|peo$MVkM2$w@WIxZyF{E9_m0sUxYRfg?
z-jDIvvHLtzTbv_kehkFxBh*nMoHI?os*Wml$Cx7i1h{xdKxx+)K=0eE^N#?*CvF~P
zES4~<jj_z9GntDNNoD8BdwuW90Q}eGPn6?wu(u_hxNab2GhYP7%|mGR<TSR#M)0|{
zg*f=aSyUPkj?&FF%;+xdr(L;l?pF!j0ancHd9aYSDifzfk41a6R8T4^z{i2Q7~eMq
z+!QZ(rgk{%y)y#9)CSDzEyR|ZjxcfADYU!cj;{Nb@%F&s#DecHRDbY59pio|`A;ft
z`m3A2K27sd^XZ@@Tx0H@f#~$N8MIFv0kN0z`F{~o%!wGu4G*osd{G@-@Grs4Dj8ai
zFvIU-XQAf9c;?W183w({!SIPCteY4%jWO5Q<1u+C`ML>2WB(S*@;(c@{)|PZDdeH7
zCa-^E3|4FcZnT{IOz)lwhTs0?wv=`D)aw-GcK%}8vsTkws1Cdvc0n1CM{Uq(>~dBG
ztFVj0+#m7as=oooZI6Pf#ZmYqcMUcrTk@if#5r4}K)sunq4R_)mpJUFel!a_rXE9i
zFDtHXAVAIyRW|Z_A;_Bac$q%Ef0QPQyHYFJo!NBOstbgOI&ECocRb~4^nv9IQn348
zGW@qi75zx_q5-{dZG#0i1uX=_LKDdDr1z(?Dme8^C7zZNvpQ5EIAzo9&&2@EZ~Os0
z=PA!Gco2L~Bu(?B49I`#MOn^aXnA@Th$fuJ);QV+?Kmoav9B01R~Mjs!*k}Xyo|Cp
zw~I$aD^Q|Bbzs10_>X$d@QkDI(4-zLw4-=a?RKWJxDT{!h`^R`bto=)#YL-@h)*8X
zMD_P6tj*I0;=SpPPsuTDot4C1B^H3o(+EiJV~CPjAGt`oZFWO#5(m<Vy!z<RbS6po
z<Lp>en2Gr6!~)80*~c;#B%|M@$+QzMtd#6E6;^MI!G*Eq=>5DDER&G5j1rV}(d_ZW
zNPHi!fNN_D;E!64PX?5t*=8?j_+`w~50iKBlSBxW<U)!|FL0WFnRS0X3L|AHaGtbz
z0m|uEe@GX#3Xj5_rcBb<Byh_?IaK3VK=<N{<ar&89UEtZv}-<~elN)EMLf|f0$N)o
z^8=#}Vqj4*3_hxhHjC&Cy(whI)p<~S)(#~1wRmU9f8wbZlA%_A5<0!2zAE}P_fYSR
zElCH-%leczwtVC*Gv2Ub$3U^!&jDidTgaPlF9byw?T<c8VcOo=f~G1luv-$yXTAyw
zw}pYjn0jd2wn$K1vlN@S9HrdM;b5^Po7G*9#>N4fKxGveUe#nX9%|v>swG&XQ-s>O
z>8LGR0(Xv;fJ18u_?XXuS3}Z4{_70)u~4RrU>ByJ`I=okT8rJ)^LWnFznEmauc^u7
zZJ>824_bBxW9#-!pzS<gaC{;WT06)So+ja<3+uQE&9K_$R1-tT9z=HUOhf;m_nJu(
zaYv4U{;)al`j>zX)AtYqn|4#xR9Dq6N0Ek$xcJKgcyi|qBwr$JOic$9um~!(X5;Bk
za#Vdo_eoSWR;&r(mfor87aN8i=cIyYLbFK@J-fpCKuoC^N7;8LdFn$w6pd{W3qB`l
z7kLnr+wxI9EedRmQ{b-aTGF<zBMm!gMMeeDjy+zGjWhwx8GF#eU<RlynFU>1IiNk%
zjA>plASNL(=hJ@j@AG1@!afWaneIo)yYrwwe?FWG&%()*ZPCy-g*$~?VU9{H1mB{1
zV`MP*csLaNZ{&mJ&cP6=T!?LLN154?Lm-P9kF|7;w;Fk&rgsCAILE;l?*z0pbzln0
zuCB;7=Jk~ks5T-BP8}DZd+lhbvN%e4=!sC=@PO%$8O5445}Cs>YkW7e4E4rm;mE8S
zDA^T)7na6h`t-r5v35OvD_7u?r{rt)+==IYP)4F!E=&168m$i2@!l{HXD9_j(xD2J
zw3eDGhm)q<aeTQV|5)YX&}xVe3j*)NQnqKT1>#NWD<1tLWMt35He#hNdhLWVsRm_*
z)Uoi5H#knr<jVcxIb{i8=Gq;&{d_69oo?oN+Z-|9V<M<Oy~P@%o^d_JMHENMq4%6{
zFcf8o-KH1t&pNpnJOlAT9^GeY&Jdx{z?AVE3>?bQVA3Y^8*&a`vAwJ!{xjP!B^&Q=
zK81}Q`yoDpc&)RmVOfWQeCH8dD|8-~E(g@HyUna_T;@8*-*VAVFKq56kLt<|*ms^6
zdJJ{pvqx!SOI{W_QC86MJ|ocFwUYmZ6H(pjIS-lohM$ilKcvq#Xuq%=Wk<?ck4_I4
zKS+UB^M7z{uQ9A{x+l86I?Ov%Uh|#{8Q@s{R>*m8j3%%8@z2NBp#L=Lt+FSAbrQ`7
zk}hEP^oRVE)+w-cZfEv(+d*dT%jK@5d+#)0iKfRvgKvVlyDd<uG8?nhH=&bQ32f}-
zXglZ@!!yZH?3F7PJ$hz}U(K-kh#XJ+I14R@@<4jDFK++04lnhejZw=gFr{r1oc`^F
zmO1*+<kHOzM^E8Cxj8h~D(CIIkfpRvfkZoBxYI|9Wm!R>s9wsSBt(O2YamFjeGr<<
z+~JL>4tl;x1EoQi*e#Dz>>juhv_GZ@##)Ksq=><f9yZw0KAF9rV}zE+?O^@3m6+-C
zoVC`RVdo?C&~>mrC}!nB;O{e_KYKE)r@xhA`3AhS7x7eiF}yD&O>63GW+<~}=8_;@
z(>?^dSL|T+3#2$Nm~@Z>&VsxkSiJarGWbZ=@_ilTL3>_is?ZxHY<J#{A&>jQ<G3;u
z|CYiFp+CMKbQHCPS4^6p1j;nGu>QRW>NcL?Kcvg>b4oNe>nAf%kB6H`^>#}DG|lQ{
z(j$vtot!dfR%`*wZ+&1^>~8Y;CE(iZx#(t;#!7n`g7O4C=B9K3{NEMeFe{GvFV)HG
zRt&w9`{9IoBUE!z!HOv-nQHkZFr4#Ru>HCOOsurP!i96U^TS~kab&>aGSmDs8Z_g5
zh_QbeG!_h_oU(V^p=38UeLMygBc$MZX9P>%b(Z?6I;OoL1LWmYZyfOejiv(DZ(uMK
zuT8+!8_dz5DiCW(i?&BrOn>u{=#hC=TpSlInEgxl@WfN_Jt+dr-W7?%iKnEk{F61&
z?6SE}H8?iDWrq6wAl!T%eA*|6!oyx@qNd<Ex?X53u7^7VQ=n__6BemE27PlQ(c=$b
z{l*7@{+{2g@m(cv4XFpwoiR+Ut&MAc`6&K(V;D-U{;wNjs5hb-JyVZEGx;pdBj%A0
zJP6S3GOPI8mbdvH7I*6SfIsyse!YV+p7_X_EC&;(YC*#Z1y^5w0L=TDf<T;u=hI8@
z^sxif6RiNX6IsHSgi;Kzde1}Dy_srQ2w2%gh@bC}Vt2}I;+h9zv3>@#-A+3^@3lhe
zhB8!y+&9%8v4bxgEX75)XQOH-<?X6cJ*~F1T(l+{9Fj((@*g^|%{?4NO?{c)?h(}c
zXK^di*R?bxklyG5EM0Aau1n+i@gKT4d{70hzp)HGoCU$_tp(cdn*g$MSH2~j<^nc}
z|DT!TIQLq}E~2cCjSmFou8MgYwy5~`BNLl?VdB<DykcGuX1CSBr#*YnZi58EZwE4`
z{+00Q?=sl`b1aJXG&1GI>F}s10WVb%mr6br@ShU6xkH!!kG&z~31wJ4+krFlNQWGd
z#)371LB<?tp4o01oKygTD!F*2^c*@)+Q^g+F?e%|G7F66;||v<OiBFAZz`2S`}gB0
z%A3WCQ>4OmcUL^~crd=17LIMr7D8tJ8c4~Cp?&5Q_A59Ax)%k&Qo}s7zegE{yGLSv
z+XZed%fY{X$r0LvP-C4Z7oB)sd2sl8>@IxBO9~5NY%gt;|M5r|F=af8nq$h(M@+#X
z6?SMfZ7)RYQs$wiA8Kxy3D&<zA2gUTt-*s()t7h?Q%%|9$O5d(8w%1*;~;VRR4yCg
z#jnmdgq_p8q4e$okS#4lwHsSt&CoE^ex?FuJMG~1yh)Vt>j3Y^oCnFI2bBi*>S4&E
zqc~%DE!KZ4gTT$iL5yF<$L);)%a5g;A6LVnKZv(&f5gQ6v6MYZ_QjC<hR|9NkL$nd
zp|0Z;tfb6}iNmsR_wmu#GIAv-|E0q6*PUgp*R!Ge^+>d(pDkbchRg1j5j$@^DBMM0
zf1!{Iv-7~=+6k~-Tm$l5n_1Vj0?-SK1d|v6I&U8bAJc7MJFtoGOz(r{&!wy_xRPZ)
zy39rYMS*VWF^pR{3lEw}K-v2|zgD~#)%U#NEx&!i^FbM|)C$Chz0#ns^IUwSM?4YV
zt)SRp12*%IVn@HDI5^t{CCZVceOv`fQOlt8paP6iO+gkkfMwnAz`$*>z#~#|jS+{=
z8Nq_yYhq@oy75S5dTulQDw7vXAf5U|RN1Hoa}(xc^2}Hae{=}t7xGY{@ut!yH3jn3
zcZ1*Y64o%YhxH#xv-}BR&|cyPYEOw{pZr}Mdtf@{Q_yTUR)ju({$h$Zm3UpJ2Cqb`
zQI7F6zVf>j+Fmq<t4EHZjXrrlole53*eE=6xdd%L=(3K%7g1!k(0HO_EOsuiXC8MM
zF`BoDr*)n~%claon)s3(=t;$e{tRs_WTcszz`Is&MN7v}C^rnlhnKSGa}^E#gC%G`
zU_Mxq-}%brL~MGq218xyQB{Y$Eol|J>{|`EhK6HZY#4M|MYGr4m9)1k1Bvs=%0sP@
z*gh)@_F8O1tM{eCvLy#FcBMJ|I$Q#d{<XZP^9qj~DhJ7d%ed1#7Gw|1AlB><>5%F`
z;nhbN0W0y9mI#v%XF!U~jpi6zajd#0N<;P$qXD?}`>Ra+FF8kUc(5bH$j!N1&#RuF
z!|a7wuu@(M_d~+5^K?ERJn0lVPPxtWN8jQ3LzK|^=LUG869+oizjMtyl#jG#2{atD
z<6%u67)kGctDS4X(5VRKxz9%FHqs8}1)-bYSh%r%6_)zNQ0A7Lj||&^CV!-}Ki?=&
zyk80Htk!_{W;0N1+s91I_OKq^b+9Hm18$w=*nPSmA0MMl*-_+2oD~n!Y+a}c(<6?J
zGSCSJO=T&0s-5uK0m}Z&(8TZ|<zT*mbTS$teA9i(5c^pUQ%Kj_Kxek`+bj(HTM2!Z
z>_hdWRnWF0i`!3_NO?HFS;x|8p!KB^WAl%)hJnhEbK+m-<aUC$JiG{f4LO?XEXOa~
zGO?j(Ip49}it^9DnND>*3fcw!aMKiF+iIFCCv0LO<4s~|cO<x_F>t!j%9>)UL0Vvn
zIh{kezsF9@yt)(IXs?>nyc<gON|0GQ<1<Go@gP2Pm-ceV+;a+Yd`K@kX`3+WfGTQl
zaAu0qTtS^?9e+FRMtKnNL}m{K?-AwT6hNBot)oz(^OJ|y3<7*K8`nMGj^5)^n2u10
znv<jP@XocAS1<6?QL`~6V?CHYvqe{D75J+k$F6(IK!zW<S-XNbwMAGwQ~}AZ<uI=O
zG=|n%qv+cKuGwM+^{*45{dhH;v0Q)>ajB_x*<kUtkWA3<@q+wVXV?-t7=QgBJxjoS
zZWdAiYHIUYk990FvR9|<>2>fTKNEaDWwV$mXRv*^lxuvQ#>@`cLh-r19JZG;+aFoz
z>NOa2?PsH3TYoV0y2YPF<l$SF$+(ELAAb97;P+6{$M5Qo<NA^2**KH@oEG3vK%U>p
z#*m@qgpFVRL=)B1%v$>p9P8(for(}4mY;zUa}uC!{~*4rk{F&D@la>s1J{0)KpXTI
zj!Za=BU@;$n!qZ}g7=GK7gJttzr|b@SHmrAw76Wqfxj6Z2-Z7pb4UMFur~IElcLEO
zw!R9L&plvo+Vb#cY$+Z|J%epe&kD`dhu@!(h%Wkju&SN1I)6{bhQCL1kA8a4Gszmv
z+B}3wSCY`6tq>Y&>QE`NAN+9p3q50U!EBBdn3YBd@(1yPejn<?*UTkeANf#~FGG9T
zCYXOJAB%O9#q!^_tb62guC}Xzd4w(nMd4z2QMnUCw3e_FIcZ=>xeA)M_MsikxYgC@
z{)^NizU(w{v9B9XeE5SIhQ4DG&vyRxh!WbX4+HZhNj#XeJ1rM%AbU0Ku8wJ7qHPd&
zoVprca0z6lj0T50#8*;iq5ZzY#Ni=6e|W37^=lO~nUjDb@4iA;rWLgO=Ls052cl1u
z{p>(9G{Zv3OIAmXgCE(w`IOgk=>lX@K5F9pNjz4(3<`a7A%6EcoHnf<Kc1?^k#2`^
z*<cr3R67%&hLeI^H4tR!pIPgpxgc#VBx6tvLVhJ$+R)ssu9lmca7<*ExKNM+A^(5B
zsB3YRZa?a4#z0r!aHf6a25&H1&*cA26gy>yv24SOpuhD145PV|?QHVU_8-LGYLdR^
zNC<ikIt>wvim*{d1KO+B!3muLh+jVrPX)zc$V)A*zwacsJ-Loq%t{B5@;v@!K{!H2
zBAOM96JGyghf^|=u;yI=S_QWY-Krt*{0y;6Z_33XD}jrq4(4TnH4x%u%M3hbQEsCf
z<d3cg*B?gAd*2Xv8a5IQC8yw=&VDqwC}Wyd4_WxIQ_#7$KX^Q3;FIHsI)imU^flDP
zBWnUPGf{x}R1hB28B99vQJ|xFhW1;z-0>h2nu$NJY@o?2CYZv{1Xnb<-N_YZeuCp&
zXLK<rgBsZhD2+*iS2GtdsdEf7GdjeirbTe2fjGXSHlV&S)isYY_-DUrOmPW9vnAzB
zpY{$fmgT`4HS!2d(B~fM`Mj+o9!fi}uvM3Q(KezG<ZtV_ZO=~d_H=>lG};}#9|?6n
zZ9L?e2XFSZA$^)RbIk7#b<!B9nzjMwdrQ!LQvhh&d1G&?QBrlzVBc5Pxa5W})_s@r
z>DC8vO=&tN?hfbsjyt36)XU&Egma_GeZg>AJ|@d)4y=}nEqe6X9<mx7ERWGHyM$|x
z@e>C{MPUv3t9A9Nu`Q?$v>iXOOe;SyY0_dc=~FgRXD;<ihbVh558M1K1jm8Z%n%On
z=kH4Kn2sqb?H&n2U-HtGn=<rU3`*;UgLa7%zFPT$Sr5`DKD?A0_^qHBp#<D&2I8s7
zL3p41hD}RmgU#nssLC<JJr%y#(;9`&V>7Ya+Zr@4^a9=A-q`rnA6vESq5JF^X8B4T
z?YbtT{@Ur3$@`jp-II;8_6<SXxs<2&vKJU;6p0<TllCVpj`@=xMD+J8816y6iu4j5
zG?|ENJ!IHoEvG&@Qb-<3e*;6RV;4RVL~<jRo1K7;$6cU)JJkpGQ^j6?n&FdI^{Dlt
z3|o4CU<Qw6faHr5tt=3)U~5qGb}#T)K{-#ChCpmQXIH7$)b?-{?)8?T?Du9=ObTE{
z4bySz-{hr!w1v4HJdSQJ%doXz7O~YoG5N>>;JdYPVpt7oT@6R?oqo`18!B!Iy2vwk
zT!60b7*LA+k4tvzLVn2;UThqTmP^aAEp?{Y0#dm1s?F##L!IZeePPN2$t#AGA+hix
ztN1b$rthMf;hTbX-dmW=bre_EY2$6(ZGr+O3XQ+Jd27aY%JNBr+*up&<{<^hRac?(
zDb-&;6|Bu`zffJ~h<BFNg30PkmU5yHj2u(>$lYO}c62<uVzdkWE}w(IR|C+y=?F9>
zBn!s>5}T%SFM4e9<oJ{3WCp9@<-<U1c3y@O=>uVuK^)$CKL*tnl|bXHcueae&8zP?
zki{DFPf=&EVcRJfcMz#|7IVd&ktms2Csd46=J_K+K`*xea%L;BpNncylXeCkamCDI
z6LB!`CQmetV>yoQ@XUd9YlU=nK1VY7c?Yho+l%E#1=Bt<2y&Mg;K~*0AgR-<G=Dk_
zaMxXK-!mF4UGzZx4^mXF9LX$y6DxFwl~9^?ksA{Ww@yKPhTEC+{PscknLy&dABW4X
zX(%!2V8OQxv1!+MX1n7ww$FD5vs-fTi(UhzOoaVOtKOs$4sCABKy~jW$~<Xdt`|Ka
z_d_|THw$oh>Q;of3Ftj-9LRU1i+h_E;<%5hsCj2StPHq}DK7<f=EDwjeRGvp<oJWE
zFju^Fbrh<&uf_BpJIV*BrXI@y%zR&(_E;8!g>?fHHK&_&n+|01W<x&E@C2rO`^nX&
z{wwxLM{Yf9JM?}p1GS<sNObY#78j53o`pAgML-6q#>b)6wFH*vN;8vprQG7dSloIh
z2&7{sfoS4AuI>F@-1+xbo-s=o?JUZ{VofWnsPJd{*@kSZZy-(=#iQA{q0pJ14LP^t
zxXG-Oe3^R)^vsXPL!K#^R^y0;qoPOy$5`XmJQmw0oogtHz^*_5tC&1N_Wmd~t~kz>
zN7tj<lq6`0EQL2X8O;h?xKd~umueHYO=bXH7Xl#3BpSDFrwr1_qtH&Qpotnrq!%<2
ziszB{gmR9OwBzwdZ)^0>%H<;_WuQ3I7UO3`!SK1`X}1?DY_GdaTHw{7(G$x=-&fN9
zWERN!jbk{<2OAF4e(1$+G;Tf$E^ftG8b-WG7Q_`^p3LlLTIJPga`bnOB~7BQAm0*#
z8frgS{e+94>9h%aIyiY(KeF6(;<$u%SMKaMLz$U~-4iT9;<vX_G_BsW^T+}8t9#3_
zXf?d?>W|qDVK}0B0KV!h0SmQ0IQ5+jrsQwJ<ZHpipr<_hx=v=5sR2v-%F&@y29I)5
zG3VAxX4RwvhRdIeJ8Qm(PuON+qKPk<IXm+hbyKX{;Rr=Gr*NF|39zoc&3+l5g{Cd1
z*weR3=u^3y8>LRhtv;dn3Z65qlJl7VW+Kcb|5M^$2cSHAIp$YR0lSJKuvHrmXA8#T
zJ~bQ6RP={DVkNY=2)y*#J)ZJ<E%eMX0L!njIQMrJXcPBK{XQ|6f{2~_-&hE{c!)AF
zM#7}=!IYhs1v$4aK-2VD&@v(hdUo95wl|Z&b#))~)6L_P2j*f*eK=aa7zfJHeZa_;
zxa~!8@Kta?kB?TYqUIPDKegj3Yl$gzWfVNQRRiC@WWc4Vx#-j{23{G>0jHD%rnWsF
z2LJUZ3M-Pqwx$@m{QrbQ4%B-^??CmL7hz?60qLJ!G9!s4X-sCq7Vmxd>SGeLhe&Y5
z8f8rW9uEJ_Sb=)_#n9lB3VxFVS&9SkcYgh3Z4rU2)b24mcy$o^nSA7mZ(qfpPo&Vh
zF%zrK7lVx~6?G2FVc|S~G&}R3SlxiKL)$$-vE_DUrIsJ299#*5E~Mh)A4d2!xDdj=
z+MsMxK9_GK?T^V4-g!s_j^rm)Jhy<HfN%)ex(dTArT9s!hCI(z@cLdQYFt?e?;TEK
zSy2+Y-Is$0lt7`^G3Z=x#Z7yiL-YODx!k9i)mU&;FWE=zmNGQXAm(Q0K%pXtdIGr}
z#Ckb`VZ%VCv?K+tiN$F9i?p-zFM_;RHQekNf#)7sq8{~0s@Df%@SjphG~7XR2+m{s
zNg!62&P!H&g?^xfvLp1Mp`(>s+WpI0kCuR$Ng|U~F6MH&v!1VtLhFXTkh#GK&gjHp
z?B^Fe+w2gsX=iZtp^2z=SRH!oq&VV64$TAKbDu3+xug3C(jWH4ZkZY1aw{C|{;kD|
z|DN#*wUd0SMkwac=dUg%jBB^7$HK$ZU**KY_8i)QT_Cnufg!*8`T*WBD+krd)sXUK
zFZBUol|~JLY?3lDz4sC;Y4T;B?C6hQ7LLWv`6gI)yAFzPBnlzg3V=H$T*>u1_bD=F
zW~HNql*L)-sD272Eg)X!@-D$}@i%rz_Z-?cpND3o?A=|Zl|E-<p!?QYFf=1>@|t*f
zkSdT@sV|s+Y~Y&>DsWSzJDLm}!&D+E%hxY~n1+9Xus#yMIpm>rp_JWpI}P&06=LmQ
zF-(8LN4EH=4|W?9yJj1J+&WBLwICAOdLiZOz7>`&a>RxW%GlUItj(b*?4U|Kcw9cn
z%*_^pnsYep_{)N_$GVyR;j_fMJC8S4GIaXX#M^Z3d1#-d*m>$Bx2h-oQTBLL>^#VP
zq7}TkJeyCpJBBYl(EN4U254;dg<IPzaVKeE-1b~2uY!z!drvxwhcEaf)gnAoO*2hL
zbK~Au74W+-7{{G7#a5dwu-U&7MLHK}J1Of!iScDn1WCB%$9VX%-VF^_3<hD;N$`Fi
z4c2w<c#GmBuQ)dy_NiV(+v}Owq`8!tE48u|hy)$`@ASTQhHG~(L-*|sG;4L|hStff
zYpphP9+knvhG6oP(Ek2G5YN|2V8!?5@HU4vSp26UWZt{M)v|54tv=o3i?cv6Y!dif
zJp+f<24MHEh1}JXm<56zY;mi@Ni$Rsbg6E;{<fkl_yTI5_{>-SNQ0+o_SjRMP501L
zko-gWtG`!}pgsom_bp_d7b#!0G7@@y(8Ynoo~_V|WX-liAlWk>Jkz$IpZ*~j=t#5Q
zowS3v*C6PxRAGMOZP7WNvKIST@``mAFbu2k$tGg7-HpO~0fAtjz7+D;2ZH|PzOZBR
zQhYt799^9>0L+@0NT;>ZqCaKxjC)^MR@xiQzpX~4U-=mDcQMAj$VaV+R4^R9jEmOB
zi|=hMhQxVFAo}%-w2CRDLlU9YoYi2M?ZmB5F9Ok`8-m(Bf3`pKFuor}?C`x?FucVU
zZ2ykpD&bN{v5W`EB-U@N2G9Aj9Sm!<KzMZlC4El`CT(HNaM~}iNXN(|$B%S|HM&?)
zoxrF5dj>ROBbX$qK%Cv}f$q}};-*ol*rB8kRw1L9Vqds8<kND{&oAPtnWTN)7|bUn
zk=N{f0mx1r0*41mX!IbJue0*Q*y4>K6$!9<c_g0sb^u*N?RiJH4L*B26vYjha4%j4
z%M)u*5qb$CGfv_e(F9ac>f~0opIE~1vp7*?iP@Qeil2M1N$n#yN;CzPZG+(K4qto~
z*O&ME7zZ1yN1&|X5<SluV$r8_rk%TXajo?cxOY44i^D#%^zjO8b{`L#eP>{Fy$%*<
zZpP03{ji1fjPkWtxORRt7mdh5HNP1UGW{~s7%~^sz6M~fT?JliBMbP&br5JA03M6p
z3YLE!=T7%cp&{Tg&#%AB{k->s{p;0ean~7oyh`BbXE83(-Gkyg$q?>XgE_SC&0q2-
z9DH;MjJ5}X(r<OB+Zv1Z!^(KzRobn**#coXb@*l?<t=$6L#|Ih+?PHH?Zv&&Ry%@c
z>J{@AKOMGaQ6##2JBLyW8S3@Eh#p5T@_`GLh@rEVm8e@&7WFx%SWpQHOBCWikVi1g
z5j6LhgT{_x=6Bo+a`p{jPSGYP`H=@rb?ubBRYMGqcu+i#0?keB{0}Ta4@Ch6_B)A%
zM@ms<=mI^%w}OU6UvRs4jwPDUVLd~x^DiZnQQPnjkZs>dedY;hm1IEDK1WpdE#^6I
z=1~Sl6VD9F;Idx5A)puer%&ruF40KF7jYwS;z3>1s5R#mD?C}qIm&b>8V4#Nv8-Y1
zGcFpEg7foFqS=+lVh`7R4Dn##p%)J==_^>BuQGi1pNo<a*TtbL`(lecl)orT!ms}X
zBY$aw+JR5_oj#_hsT#@J$d5D0^CWiuJ|?#OM*-R^9}0~o>!H138KfjtgJ}5-A>_CM
z-WBY{NS|=(S59HGC+Yky58xJ>7F^Dz@`f=#XnCy;mLnrsp?7c0`Vm95??cnXLB094
z*Z}M>O~KC13&C=KHn1gg$Y+y<qOQY2_k}(n^~(TJ$5H4uy2*NE_gT-z%V^ju2P(z}
zG3n|uUi@hTC}t+};tl&TaiSTF9%F~aGdwY56?xw6rJ!#1noD*1L&H38W|IGwH{QF;
zD^^Bf)tF4se&xru(q8t}U~g{qA`2nwIG9xWu#lX=VDinBwcV|QS5K6g@)C7ky!t*H
z9gvQ)L@ll!RD_pFTi-RS46S5RFuM4H#|9k7!}CUCc>HN@b%nASnx+to<0OVVm$MlS
zh0u6_GE=X{iR0aCfEDH7FVcLqJaOeOO3p&pu%Enfe-xK}uw$b32^H!uSA+J9Fo^v=
z2)@$qP>hQqN41L;J8vPa(HS_iat*2}1z=I4A<n#&OF6XPI1JbX`aW~{z$+n?MN!SV
z^@nhK3-Wl}59clM)Ms5v1o1#(>0i@g#eH3Qb8aJ>aWDha9=&6I^e&?I=66h6r;8hW
z8h|?X5v;xRECA&Pb*vBMLlx&iC2=dfw>yX#OD!;p_RbYzV$`YjrmT}HE{XqA*>k)E
z%51_>m9)4v4M!oVZz6tP9*m;~&%|c&EuJ&8h&9c%WrjCf#6|@QW@Ka!vNxA_&VLgi
z(NBeoDxOw;ynhL;Tn6!hcm1e8Nvw3jNnkeor`UeMR8VnMf+7Fzz{J!vYzcYGCMk}R
z@8T4IS~zG-xrA<#Soqbe7%Ccvf<@dJ?7naetQsxB{N_qJ)8(u<ei?gp?=b%>Z3pVt
z(Dw>c$JXyU<W0;1_5X@^Nzy2c{8a^tBSX03;{IrGC!MGC5`$!jBb0_Ipl36AA9f#O
zndFauNSf4sIc3m&z8uZ|*~`}`E70$)FZ|03(X)*D&9SbKGkp}#J1@b;t}0MFSPBxu
zDZ*U+GSJ*$isLjC@M5$be*ba`8-B{rWb+^VYefNg=9fZ)4%N_Ci8FG0XJxxg398nd
z0kywpuwJtj=zh8%ns2TEMYKB@6y##*jDEB)EWsZ4oly3v7_@eoVo$9CrbnK^U+&BB
zRox{}eDH(7kbS5R{(P!g9yHFZgNVhs=p8))Y)ryoZjBOlE*=LDAN4`Ue&YoTSvr22
z7Y2c~G0@6~LS}3fA8}NI8bukbLo1z~i<96%A35x7R*+B91N(mP#YV_uiL>8xyT-E+
z@j^k|u^f~aOkqt~0?3D_h!x3^+;{O*G<-){J9n1AM`<qR4LE@j9bOoIus<a3oyxw-
zr=sKfIas_V2~zhGGxAgh3!P8(#JS$E35yBBt%{w6>O67FEmom_kJ+{su`;hg*yeUb
zP;4B4*T$v5(}YV{wVZ0bk5S-fFb|KJEXM9>DNx%l0YkV0w*IJMW{S_u>D(pgo~sH5
zZw!e^Of!ejYl2VS1t#8<iLS?KR`wx^g@;anpP~`C^ZjXTB=&&Zw1y2$OMuQ?D;PgD
z7`tChW%>&pxthZ-u6}(VHrL03dU+imnvx8sZ{(oH3O_!jYB8F%^yiYKTCujZt5{_q
zWq5obZ;^B=YAim;9<ADgJ^jyu*~J+`%E7a2LmbT~sP5Ujybgc3pT)yB^RX53VQAT9
zxHUN*w@N~B<$q`48qMuOW=b%7V=pu_sfBOa2heGl0u`xOn5=h}INmTHGSiGvY3@6&
zMKzUO@e!2&DiZdLsY8+ZV6L4sSJ3}58#1ScgX`>t@bwwZ_5PrY?BXk|!JRaBgSPT~
zSv-bMxD58KXFxK?P23Z=3H1$am~VP9ie8Jbanmo}_K31kRs^G{#onZCOe{pD$6(W_
zTCsGk9;EzkWzt2(*wVS2H#|uLpQFn`acnqvzmEsydFvPsTmqqPNg#J9;d72%L}lF<
zOs>0$vci(#$g{~<Jf;X>?m%?B<-=_iOW3{tHlXeMscg-2Ib3U;iEg)-L5^1*?|c3T
zK6Tc>e6J&5CDnyb??|H*_7{3My1_C`CtXlKR{U2CZaZgy9reDTmgorKdSP7StUYKC
zI0AS4&ccHw^jvhZz<xs%yQv?A7ZiKZGW;@l=ssX;Qn%o*52H}y;&Sex?*YHI+F(e(
zBYaLo3d$Dvz@3k0a9HmcRAebbuw4YM9NQlq?WzQmf4o@7GXrQB^@6PhNg(~U95mZQ
zxpYhjw8z}!>YG(y*?00PT~`Lhw0&ajX??+Ux+~PZTLL|}AG(`gvc{|@{QH$$I2Ljk
zTN7tO_^fOg_@xvgL*r?GohQC;k%+pFx1q<|ZQv6f25pr~1*6ZAlqng<)~2Ol_rfYz
z*j$UkSsA83n}{;^wJ?7dY3@#Xu!qFlCrLVnOLM`XY8p(%4Dyro6K9V7%BKz-i6WbD
zLG@u4SZ*Ey6{53z%xlU^GuDMx!!YK0X&_1l1o3$<Q;10zh|<12{NPREx{=ROe^?B9
zyek)X|07_Vtdl|Sf#BHU0fw4XJIwH;OgtMLS4|v^C`~3YI9zGFNe|t2{fT4EX}56l
zE;}{H8#|kc^P=+y_vnza#)^DYAL<1@hsT5d901X^1|in6l__RQ`669Sl#Sd3N<V{`
z$@0;#>}U-PojL?VjJH7Ruwa(5o$B<~O7^f9>5tv^g7p0g$ZzF1{PbLuCY{BE`YIIp
zR6zLr5H2~r$y7UHv!IzAgYxeE*gGg6=DX!%_8-w;KQsV_3>bnIV?MI~>oGUC5LePL
zn)a)c;pE*s?DF;om4W0T_bP^-fw$Sfp{Jqf=W5JvHURm7H0*Kt$Qxoj;oyZxG+XQ>
z_TY=5(e5s5jgN&V*Ai&YP>TneZ7@Hlh7}JM@RUOk?Po4BX|=#p*2i&qQ-Zj3<}9r8
z$Orvy4s8J&z_E7(e0|K};M+txBThnFtPY#`wHS5IH*-7VQn0o;M82Z{)O?)=r3K4D
zyGIQAr!=s6s{lrim7%ltCBIi4K{?&)(a_rt8cHr>OS(I}nsbNkcAt)#U#p?z=4Te^
zpM_;V_MqF^MF>@wxtV&KX+!NK?y!Y=v1cYs!+bvPt$CDu1c5Nc&m1$?t-=u#OUX;q
zpHFS3S^m5{#!JUx@0MejnL8IK_kcSp)e9ACyO`sq$E>QX9<2PGp!7lzEc><{+x}h7
zyV~c2{oHQ8n)DG($4vOCnqWA68*$UrER@};5%;6ne`YHJ1}8&Aaygz_ngI?bDrgu~
zDd?0Mf?0DfZcrZyDyBK~E{+99<-Xu7GC)OA5tl952HM64nZMy>Y<j$bZ*e_|`bTy$
zr<FdGc_YWp#^ZeTpK=_0Y$cjSQdU`KQDuwAXz1Bf3C;E|xGN6hN|(z)ZZsb2qJQ&O
zS)J@^>QU6_F#s8{qx;Qq!7094s4}gZH$9F8+q1+N6Gf5MHi+^F1mX-f@Q$7ntYbUP
zx2qm-(Uuq!*X2W)d3%3QlNPe}#vEKzqmQo3-C<3G2=!Cu!_5;Zpt(>3%qO@(c*{i8
zQ2)fNioL+h(}GDJSz!xMuF2LxOtXl-W_Jnd>Ai0F@VfYYAms&U3<9SFA24%Cs8p=t
z+`5SJu!GB)mFfiMPCfF<sZyN3Xaj0TxiP2kf4QyQZ)UPyh1qT+?_X9lQ<_BYn9%t`
zzXkQ6Hu4CF6uru>)x=@NjqzL_cL^tc&cWF0AGxcA9D02{fqwt^L-^Km{<cVg{)Y~t
z;!3#q_5<?w1y-P&ojNp$qr^UGd!WnQ0hWc8p)BS+m=Bx?hCR>N$j#YUp;85Fe9l4R
zdKZ-WofB@#)}i^%BnZ5J7Mzv@VwqMhRz6vQ_I}m8<(ez=-)n&_N=97Gc^rgoAqH~y
z4k!+eWbb?BqE2)d_jIfye_ANFeSt8*z6SMTj$omDHg2T64WF1A_Bh*(^f@vflA6mE
z^gaBiWy5CO9avad2!1b&A$_-iUZ2#lWg_Lah4;lwZDR5KJqE_bk3?6^I56)u2wf&`
zLeXfciD+*hG}T;y3;wNziVhd_^BoNBeI7B{;ym25xEQ@_mcmf){n(W92V|UIifW;4
z{MFVa%sT1>N{3gI?s5Y<n!o0yT2s)X`ylLh2nL;X)=;WQhP0^>D4}4Z_eWiDx+~p9
zO0LYTXal&N@~1iAEXrt|0e6<9WBj8;FzDC@&H0<*n8j2y*a%p9$P+ZOi0N{uiWx_w
z!0-v_`1$U6+?9}t=el#z`NJ7B`OwZBx6}wVud~sR=DMYGX<nD_g>E#<>E5!Q-O;Xw
zw#V;T)_=J;;(8G=-p!b+`DncJs~<WMdn@191)K}02QAmf=?~?&%%l&one{+Eb_i3f
zFM^EF71%p@KIW@QL8_X8@7W)y6is{g7bmb}LLlyXOLvToI==VKgx{}1@Z*<D7`{Ia
z+5-ZhVLbIKL#P+_*#Wm^Nl}Olg0}_R@l|>bHVEq(=#7O}{^g9|gHSan85#~2u#_$_
z*!+sY<0GeH@sK9wE!_k0rzD`1-pTEgbLii1!Im&1HpjyNuWX`w*{4gaw(BTYi3kU)
zf1V4<>$WlNf^*FAdI1mZqObcd;@(P8U_G0>TQ1|V`%(<~J(uwxHsqD^uVU~%n#tXw
z#RliA0FL#CLd9v!%nAYV>oTxy-3!g(a+b9EJlY#PXO`os7pI!Iu4OkgEII_~Rl(T#
z*8pbNDHqEW<zSYxfva>DG5ub%dD(&l7`G}Nx-O=mTZB3I_^INAzt5p<xB~oC!dN$C
z;{rW3l=y^0)1Z^!vxu^>pI3@sG)6<n!~Q65oWLz67U5M{2rBM8Vx4(kx$jNN+t_)U
zSkk4;AjTPD7b&=-+ca_4+8pM2G83%A*J0BOsuMoF<~@_hJEF0K*vjXKNkr_ec6aFR
zRf(BROF(k<5+Avx6vpkK+_f*CSgHMWCTy#L51WUeXhWjdCntsZO(mYwlfz)(zY}!#
z#9`~4N^aZx3Rk-32s56ag}UWS(e>+io*7;M`@dYk=HIvYvhSnN&+r0<lXp9G@@WX^
z`j^S)-{KbJCwF|i3%W0jXR&1hPR_E#o-MBU*lae2%YJin*O`z~_MDqpjTXAM9ARCt
zp|ECgCaNipgYuifJX1zI3;suC%fMoOC?FLL*672;;c=MUybLE7`J!n5bCVTKMc5~h
z7_%)0@l*ljEbJh5c+m`gCq57=UKYX0DB34v<dPm}lMo(gM!VB!tgDIUhLaCs6M5dB
ze=C5l^jdCfPF|IHYuULcj%c&C8WpZ@SjA;OXlscQe}p_#{_YAmDi0ogiN&^WHN*h9
zS?QCo8m}$Q2BpKtAvp0En6;~jJzPk8Hm4fA@898d!=>OK5epqt27*b~2rjb(NYRyG
z@z|Fv!z&CH=vm^kCr44mdJ-<0PJV>MKUq@(ao<vpf%_z9luZ1}Y=6xKOaIBR;8894
z|J}j<Ya15@3^6Tkx-a-0T?vM3{(!<OTKH(v8RYs5CDv(h#%V8B?XL!dvQZ!ks}Wp#
z&4RPH3NhX{7<CeIF>R>?=UC1{rxB6FXL-T9=Cm*s`5O54=UxmA4g~hc5mfg-&!vVX
z-09s1uB>4~p4Bi+)JbM$J1$mA1BQa+y#q*AZejguVqn|M<!IYmm9-t1CG-l6BmWR!
zc`uIdUnO8{OEj8Kzs{4^??R_T3vugaJyh8l#B92<!Q+o25ZxXq&awK3n_OPaI)j|x
zxlbWBr1i#}O+VPp&5@9)v=TIDE<pVyRrueyd8k&90yfr5P=D7+%KQEyz{wmuC@RLU
zqoZ)7vJ0MAPd?vuo1is|7=c@Z@x9m-U;ijXli_ceVbnv`(2@rgYiZ}xX^u^kc7vyB
z6(&yJiAyV1;P0ihFMHk#zrRSsrLUuLYGpq(V$LvDMEcL&iGuZC&6Gb#GYWk%<V2hC
z%)he`<rU}`aFr?b9)OMNk9ncjW$3LIiH5hPL&$|}h`oE68Q9g)jAIH|ROqla{Z-6%
zavZyomV#xgs-c~9C__Ucag!EtJ;-aLZ(G5)%XKk}Z=~$C-K>2@EuT9&5K|UD=aO$O
zJm;_rH*<McX}vy`w_HeMiJhqszp)rY+^b=KLOHZ5>OiHD*jmp@Ogpa!-~jttG}?cG
zyb52qnPse4wyp?Pw@Xm@-F$F^QfSjEWIFmaOmyHIlWgo3PHb9-sS7i)%pn(=V%{^I
zWf|Oja}dvsON7Mt!_o0vKQ4K;8wwxAL1{=Z%an(+^RC-abSbmq!Iu#1IK2v#A9gdv
z#_y(G{f>dxOmp1&`!d`!$|KgX4a`-EN2MLUV4!(|>lvS<TrCC1a3?UD<^}O5Y3JRs
z7~ZHxLYPqqz8s~7hPVF~E4n6<m#ZJ!eC#mR$?G8T)<4WLgwhD-Sz?!;D|l=moe%Ne
zl`X=!exD+o@xmLW!zdGpIPfE^1E6QkeDM673X8PoVePPbyr8a0+|+c_oH%1xWt@T~
z#>6PiTMOG;7obxzaUqIoc<ZcMY)d%FTQ5fQ+z-lV=|I2N&^D%+a{vPN320QEN%fHu
zD*Y1;elG*q!V9E9qS~~(5Aj@2eC2+d?=#8TE0viUHJBf&g`IKz$qVtCS)WRPYg$oo
z&MF06t2f}m5wxFua0;~7>x*SkN1$8o0%mtkizVNDVC~$!D61{uFDgmTvBalx!QTUL
z?k%c`iUL8THP%#RZw&jOIuG+pscs$egV|p9!{n0+IO|eJz0Y~J=0!3XIe+Fp*Pruu
zJ1(L1oFT+=@PyNeQ79Sri%aL}KqhHQ)cc2GW{fgQHbp}7%$uxbE$v(y_wwM4$1(mx
z7*tH!0VypS&|AL}wCh&0*cw%sHmD4hvyZTb*K=9;0}k5FON2t{R`lMm2AW!i@usnT
zz{7bC&ghH+tsfULwCgNvwG2SB^}m?C`$)j1Sr|P`6CL|(;2yJy2Qy_2^c*5hgiwo<
zl#WBnkOkm3Mb5=^HuJC5;Hw}X@DPrQOZuciuUqr6r|2-KsaLVU1Pi>da1Q#;EXPhK
z6`ryro#}TR01<IBq}TeuvpK6#bf~35e*8J>(Jo*u>%OzOeX8Kv$_wcB%NyF-%>^A(
z38+nb%bIQt=PDU<Xs7hZB={1;o-RG?T$3s&o*g4^ZZvo_-5@=i0ySe=Sm<No7I`m(
zrse-Ky><DZH1Y(9%D1q+Eh)rKNX1j~Lh`yd^2M`DV9oyJ<TrLg`@PGs)u0?V&e?|3
z7RRE-_i60>O487oeh?J*+(|z}+LZXyFs^YYwrtx1A)>zQ(|?s%wIT*WBL8MeSvz3$
z4t-QxRfgC%6WsMesKK5IcYiwJj?$^vyIY3SCi~*j-Nh)0DB|w@$j8*8i$~IiBcAaD
zGwWaw`jD^T%Q;hPrF_<~;5jq1o+Y$Boep_hW@1{^EG)Hjg$1?5KJ%o{q}X3PJ%jjh
zyUkfhfC>C-qlX?=l-H@I18Vnamr(PvQd@W^wj?%6lfyWsl0@9QU+0<h#v1Y?z2f@k
zhrrH%@*&Yuo69`Dv6L0jFzebf($y~o6C*$9cry^(o{zxB{skEOTMv|DmN0{nk9fP}
z0DN_ygy%ny=BA$v222`_I)7&{p92eVg>*PJ(H=f$VHEiER|k(xvBJ~+M&$b`ViTTZ
zph_IE&J?*UWCA?{=Y~oX39+Rp2O-D%KV}eV0}Ai&Oikwox0-aD)kdvGZQ_k<&mW5G
zR%K&h-bs)WGjNhc25f~a=x_(nHqv1xtGDs##145QPQd0!no-4%ArHwJmXZ<6yGM63
zk1|j4P4$J=t^;7ZA92`8H&huR2a~4}%xd*#7F;Mpi%I{nhZ`^At$oDf@OdF<KfKCc
z*<NFl{L(Prb0K*6-4e_9sq?TWGpJt~3LUlyXxQfjDjrk{t_w0DweAwC+zsQdy@u0{
zX9zU;?}bwHT2M>6AP)E51c?nV`1#rOXtZ4q+S<H@F}hV~QliQu6XNjWv2B?D)SP;@
z6XL{yJ3tf~YvOjggJ=Jwd}iHrF#kZ_q-8gFqH`kj9Zzf|e`B$&jvcrrtY8M-L!p%B
z+siYmG5pyq*g9e#I(JY%<M@TUeVGn9#R6CSULwfd-1)4c^VoE~S!h_>!wQE{9;fwj
zP%^&95+_FzhdY__N27RKld1Tw)Ei%A6hi)%Vl?mw1jp^>!i;%UaQconT6&y>%g@V5
zi(k#xwB<rDiz4p8M{zjmPh|N&*oDOHNa6@I+#iVP<91<=Bk62<8^bd4Qry0kgNkF9
z#VNOkLdk<<tca>+Cc0El_>N^7u|~uKt{1ltslcX21zIjX#y{kjpvK8~aNOw*oj8Yb
zUfys;gA^P>f<a$$g)&i`xjbheKYB9^71N(qir7h>IO6{q>`|~lGnshyw@kZ3KeE>C
zP0Z+BAYZ0diDt{q;NlV^6ir(oRJ;sjfd`IL4vRa8JZ4mM8IL9%=Vq`OdjdX1yQB93
zC2afnr(oDtD)wm@$*<k=!u-z;=sL;(&dv0~4>yewZAZa@vC}b9s}h}c3Q%ij0Z68;
zh8cMmAz~?I*%C`4Q<u)R<#b}P4n&hLuUO8X#cX0dX;mvt@KwKoXmD5=z77e4W=9=p
ztha!s8ztCrc{<c7l;G9e4rY@`jJ$g~lu3|Ctc?VCBMN{^+gIZjFB!I94+4!GU+7VG
zAT47mEIWA~j_aFJ?!`#BFHFWD>T^bV(QNgX1YRfyfTTj7iPG+we(kG<Z|Bsb{vj0z
z*=7&LcTR~Dt2gtf73t{qW(Q<D*F)2JRq_g6uk6wE2anYSTvjW<^K+*ulh}z_{a6bV
zMcLSG_KIsCB{t#PF`)U$9n_mXa{0YZam7q~nstu{r!aezW-KKgS0>8$ED^UAoFtEv
zR8Vt{g%-zO%uRVGaV?@T+y5x|ys>0QoAhvK!DWcrm5HVQoa0L7i@{9FnbO-SxaPni
zT<xHaiF5x)(Yc1j{C#bFPzfOkNeCf?5Nh@+gOHp;62c&gLK2cNCFvxUWICuR>8wVj
zrrE1ZCz25V5JKoj&LM>GtmlO{U9PLHIehop_qy-TZO=b`DuMkKj+m<ELixc)bBUQ(
ziRlaSZdA5#{}WP_6kRT<dUOG8--Kd?y)zngkAyXU*W;TpYq96gVpzP{7@d{+kh+7k
zq+f%u>h(NWsFsZ$zIT<bOZ%`6?Lhwc&pEVFl(7S*hp_%rno!g`)xzoIU1rlQ8XcDq
zC%*L;8}J`#p0UX#Hr{ub$YOyo=(Pk@_P>;-slH5m>=}M|y$>4JB?>QI$VauIp)B9e
z9#_4}M1}S)Xb}~{v5B!LcC%4Bt|gy((mQ4{pE4b^PZboaaocYdpmBt<xJHXuS^PP0
z$_(OjQ%O^s^-pQ_lJ3hz&B6pb6R_D$JGOm;m5rTyK&>zcp;Q1d$wj!vp8C3qwnOv-
zO&B>eAGQ@{fYEA8_;@xLTJ4)zez$mN->6`!gO}jxLj@*v=2NFgU!M501Vjau7Tw<N
zz$Y*EViS2TWdBYHr4Kf<s^l2<>s&QN_qGQo(QI@T3!!bv9pSCxQKXY?#uiAVE6X|1
zB+b!PgYqqpK68_EE8)<=t7v5qiB5$<e1%pS1_&>4*OC!zHr+Yj{vLoLw+~A7CO2GQ
zwgne&@<dU<6lgfV09@CbLfEDeXk07elEKBw1p-TK%lCqVS|+IW{w!*XZ!5#klz{kN
z1!&LtD@=%`49}EW7SM%*|J_XRI`@T7=G52Wmxl7m2f=8`L7X!y5yHIt(|s!xwI2@V
zA8$(0W4;R*vmzGgycJ`IkH&t5<o|eH3<-f_z^<NjBmD>_J${y{OtXZkL(}lFauk*k
zCrstjLl`o<96CFL`1c*VQ18!EeyL&?w$~d$4yP>54MXaS5^?8Id1&xz156!z9wm|F
zHQPFlsRA7NZ0$5O>?UM_tTLepH}kp6Mvzy;mTwq$9>>nujYg(RQE)Ovsgb!HRV!8r
zQ!;ZQx@-pAyXb%>;bq{y@D3A;)<TS}FShjy5vFKFlOG}*LpGK}ShF9~CVx(ldKP5B
zQH-djQU321SZ}zDdM~56bE!MjpPLU28@i&AMh}o%E{7Om`YKXRz|)R!>{2`%<;8t?
zT^e<=BwjMt&8ENm?<%hCsKH03llFAvE*rQf2nuD|V1K0?tQ}K%#_Nw<<+NWYh|Uvg
zXANV6=8eVo>vOqi%R7s)6V?!yCX(x_`GUvKhurQ(D*Q6b1N|6chS$e3(XhS325|y3
z)>N=T2`3R_YI(zVDYtnQ1|FutO8=R$<og{2ZN=Y&d%~8GMq|$ey@ycmsSaxItwNC@
zb&Jm#%Rid?qtiqc{0uybkA7z1qN(Hs|2BvB+e2B;TfRKxrUdXqBuFlcs3T7R2B8I{
z8@qESs~RTu87Hj$ph6$Y)wO??fZ>oiyrFbB>QD}$#^@}6?j=Vj2!#r{6Q(_(+)C>K
zZl|lprgn0CmZn0d+5w<)QV6BzJfLl5q_Ry~CX{RbWN)9xLhN4~Yz>WP)(QnizBWh4
zbTzPTPX+61VZ_Nwf}pmC?7MXgO#Bdtn<$q)>=fdPzwzKvnMPV%9BUGo16caAUJ=9q
z*|7><R=Z)?o15JF!+0<~a2j&Ab0`Y+$HvBbK1_EcK2M?ZZb}(y%yvh^VKR1;cA5bv
zlCdsG!JN!xEaCPoZZ>Kt*r@xVQsR$BW(s2Ir$AfxbG$=%nb-6iz*SzALfOgZ?A4b-
z)cSV_wI^-k^<T|d!jtu2a_2bI&0md9@sYf3%pM5*><8YrIQo9o0j-65z)|J~i`Feb
zzfsPp+P#~1ZqLX10f!i#y~t{}hJfr$hOjB+GK)4c2BQ;(*j0HEMok`1nl$x8O}zl&
z$79f5FBIMy&%iw?XVFW&KUBGgajV2kurn#(iTB*`tuJ|<?U!(mhv$hcwnyo4$cvum
zrm(Vo7XCTNQF7^qh05u^vgD@;I+dPa5$1B3bF&H)>I2wb$!H9?aGEb3MEzsk1j?Rm
zN|5-JgKqD=@NZZ#W;{F2(=~UY(Xm5d*mW})x$K9?<ZM*ay$Y%!h7kP08l|PA5f*zw
z%XZ3<0A-v8nM2s9{t(+hz25$3c-WWY=s6(`-oBiO4G(0*6FH36G)r;n*L0k}%?$Ua
zB%t5dbL2Z+4l?TVsglW9>w`{KxLJmJ%ic5RMo-La3nxCdADA85L;bN`nS|J%YLBw9
z;trkP6W4RSTk*{J%wnElMB3TPe4g(Zfp>z-sFUmhB=qkIk7OJ&=SJgFD;df|<m|73
zC+aFpK*J^(?2HO={)vMqnHw(bI5-@>o1CVZXE;bYlay&))yV@in_l;W;J9QVIz6FG
z#+(h%w(kcEJX3}}BeTId_5wB!tPpN?IsqM~q0|wh;*#pwN{=1uz;{C=D-5XS%@4aN
zUl6NE;F<?w181e#xM|=uX8<&lf3@y|98;6}!?OQW*wQBF8u6!4@pCbC)iv;%5l6WE
z!6KfK;D#P9`Yg>q4*Jco!;}_|eKzD#&#*N#Zt4LCSD!<54-4#GScIMXtD*7YPcG>!
zEg3gy5Z<~g!<qwcm`9fd(0a)W1YYYcGUy({C+6VSEk$_BUmxoVFLKG!V_bgUfPLvq
zhldxXxS^>8UP<Km@%|amAJG*hw|iKG_1glC3Oi!AM4-o*7wm3t$~)&@QOc$q6V6yv
zO8P@Tu=oDSoA-_oqI@50*{>j<MJd+|(E$jsgodk?ykol#7yrD^QrJOEORs?P?}zZ+
z>oT;mTaE9#RluFo1!!`ofc#q87O&R$p?ur{xM*08?JYN$EI^80d6jIjg9&(bdB`iy
zMq-%pEnd4o9aqmTLvQW_GgK0K-^a1Im^jF5pN{QPAr8Mu`pb>wY*A$vUYMVYu0vO&
zqVy?C>>mz^Cee_3mBa6!{n0Lg_&<t8tV$5i)qc*yoFVF<^(lpkCF7ON+a7YWo5Z(2
zAqTHs+349oJyXFE;3L(<;tR&4FGW~rSy%9gKb5HY^Br?cYUcq5E5R;t9exy$uX6cm
zE?L>tVskUi^X&_TMa^lL(KVUNUm5e}@{7u<K`FdyY9bm+4sn~qdC)nsmPhpbBNR?A
zge|4XXw;{gA1S>^og-=dx$6!*k`;lXiL-H6OcoS+=0F{tsb=S6+24Buae?Ma6mti7
zI;S6Mx9$dW+a}(xtQvKf#<3w4xx@op0m6rrJ1~mC)Bq(Ij)=m#vxPW@v>(~)EZ(ZW
zgN1ld#(vNxc=YlxHdjsH?FG3^RKE!w4Ytu8q}amEomdaLRWtj<(;n&gtP<}D*|^y2
z8QY%|3-7-)a$}oO+~nFk=okjvGrt^#@05@b8V_}UO`*4OCPsfdfgW_<GJQtg9%5ug
zRJRI`U!|Yz?+aW0w-pVpoaQSom4egr$<X=r5(Wi%gTUVmJh}#edq*#*6Q6`hcY0xy
z?ooIjc#V5M4o0)d$C#NxF)ws+1=9|HmaSKar+*Y-*AVLFYyQsilCtsZh@DvXM+#O)
z&*I7DN3lwODj4qX4zl4fl)=q{NtcJAq0c;J#HB0BpaBCh!L$gD3|oZdH%qbJ%7D8(
z9)xuT-Qm*BC`|ZN07L&QLGe`wp`z6kcKtho;h#=HP{%s<RFa5a$U~{KDFeIyF96mx
z#9@ohIm*_R;?n*}bcdgSGl;E{+PV-7!#*+TxUbyti8^av6vf6{jzhIo>%cN_6B=5(
zDWj`;f}~(4bS(MBKEM!^|0suj_5IQG2KDqDL8aib6jM98BUpz+<*-EjHJf}m>W9F0
z-Z9ktZy7h<5sRPJoxm*PD=6@+vZybhOr3QDyK_&0yW0<8XPr558g*!|>A<zJ=}f!k
z0{+)Su;%X~?mbTz4EHM8o301YJgPq{?7Yod+>YboaZ+gAf1lN-#wq)y8R46s1F+We
zB(#nq|K#jEG|g~<%i-o2W_ba^E_~;rQxzo{<n>+jrw}*Wb|s%hlG1Us1VkF|nbGcZ
zOx#sXDEnq5l>B+awOtKa@8_kMv1=fodV_QvgP%(8aCN4y$blWT$)Hz8J-&yt!O+cC
z*iku~*;INmn-LqCd}$&E?ft;S>y8SwGD#C3o62H8AH}N+PD1lLUD#bnd*kH*CcCCX
z+4@+}q0f(GmoA8}y9>*g9Yt-2Jl^~!j%W0vUCT!?a~!Edy|ME^HOibjlFl+lPAoxJ
zC+w(N$imOZVbKSK!om8WR+P>SLa5heKs8_Uiu&Bhw`t%k#h%+}mJK~7lo=$T&B%0i
ze33VrUgNOuPy{+#l8zCD%3)cPP%tIg!sLGjFl_KuY)+AZ^$#Hov)h2GNoSxVA&fcC
zKZTx8FW|e9!Pq=si<^F0&P5APD<vzp3*TxLLwne7u4kDDrsEES$6OIN%_~GFp9rvi
zuZ0Qowea880^IQ`11>$?jz+a<U^FqG+xXvN1B+>XE_9%-lCR8S_IBDS^#`GM6g0cc
zX4=d0&~Cv#W<2U6t0Fd6?W+RxJu(tHZTCP%FI#{IW3lIzRcLdy1hhYgLuY*=t9_OZ
z(N(25-(?-nsHp}Y|9p&|>ko<aJu<Es$~7-JLw(3UW$ny33~6@8UqAin{x4zDwbP;Y
znG&u(zd+ADE9eX=W)ByUpW@dN@PBy_$)HZXWhKzmH42*p7DIUWA`JVa4NdE!A>~6J
zs7cSD_FpNeW?cl07zgT$SjWYW)38aWg{4%KqRzW45ZI;6aB*=#y)=7f^HmAjD=zcS
z`SjWHw^LSGk*`kAQz<R*VG(*2P(Ofy;=^h-@F$&bxhHr)*()w3?Zy0eC^}|bW^u)I
zUx>WUOy4Fz%F7hg{L%}gm2zgCw-q!DqTs}g#c1a#6Gn%*fv?+G$oPH&BE(TJP#}dD
zNdaJ-HG~T;Y*mhSCkBJ`+)T5g3W!^g3*P;7>F@c$5^}3pot7<S)t7-_x1$oLZ^fc;
z;+}LLPG{>`m`Q9G({k!aSM0Rt%veDE<tb2gJsxeI4Tq3(2k^U74zUa!xc$Tdw9|;Q
z=sh$61#K-wA7>M9?4T`ZmX1J=un=YH{baOOGXO=mQY^SU8l5B}Y`s#;nkJp+6}EEF
z8dk}@r&mBud^nenaG~Ah4uC7uz;~YyM3l4%kEw!DT9tzRuU<v(S`Uc5QAj!`Wy5{P
zqVM5o?!0soHl=Gr&*wXEP~8TMp^USP7#2mn11Rq|9y*U}!-g42uu3%^HR+!AdK5=n
zdM(C(I1l-+18|yP8#Y<wpnXIYlV$~A>%8+|A8Cl=doDo1(Vbvjc9IKj_bsVQw*-%-
z7S<eI0D?MWix>Uk!Koq<v|nn0%6T_=#@mI>7B)hK%NYJ=O%e)vltRWKnu&geL&UGX
ztmouCsL~J%2f|g5r}}`&oCt_8za_lp>w}BGZUo8UGbOUkzL>MDin>zv&RpSbg$nmS
zOtQw2$^Q3I*bsh!SQc4Kx4sn2kHs?kCmaoN9jx_Rg&jIO(BnriJkG4fws~ur?qp(P
zKB$C*1>~umM0b&bQdsWdj$)|+)Y}(;`3HAipO?erRj1kf*74}mSO6_&BCuII6(q-8
zl&yhlxyFx3)GRxLA3KRjc%D4+)HN9~r9Y_lm6hm!2uFc(KAZa8iF!g>gdVxRv~L*3
z?SCGDmN-)s@7TsW*I(mpmgJpzwF&Ij|Kj?VDG=64XUVda;MGG4HIg(wc8-F$Zq)nq
zIgJ@Ef5GdQ9#e|1_Q00iqd;I?OgXR!yb<J&#_4rT-bDjim>Zb?y2J|y<$}+H7z{Pe
zN8cXJ%(&_ZT>FuMlParW{lxV+$0HRQW!hYWsMxZNkGS>^WE+T&{HF&o<619*^}j6M
zkZ%nMR^7n6;s>#khd_dLDRh0`A3Z)Eg%I*tAIUBNHNkPNUHFsN2Q1^xbpcp&Ee&)0
zEI@FezGUHqDh#R(;kQSVw{*b`W_X(N{<kLxGc*!eq4i6iVc`frmQ<kml{RjBxfCSd
z0&!B^8kD^G#OfvA*y1tMz)@o?^;vIcvOgJYl%xpf&_2{ov=aty%!bspGA59?6^Z)%
z;99eKgZ+^Ji25IO;a(g89b-pwRn;f1aa@YtiNl%s*+<OyIp-P6PjU&(>iS00(aV$a
zk$xMwGw%ZTr$=Jrq(?mEmk94`=fTpCVPG^h01R|9F?%I>K{nfBeYYWCGyWH=*<!?2
zt?)*><2zCMn7SpxDCfMO5MIukhF`>~C{V3d`n!aHd1*IXVYL)%E;n;K*UL;H3gX_^
zE`loPsfE+TsSvZHnD{17l!9x$;rtQuD?eDr<%u?25ac{_fd3fm^y~*$e~!h*kL#Ja
zSrt@;g`nfSzFewS!xz>jV9vKtHa6G+vroq0?S8vZ;@hL-mR}r5);L=<g%o2+xf!;-
zuxAECVtDI!;<4;hL-($8;lZq6j7W9k4yN7EzBB`p*Ahd~XFqzU`7+V0>lWU>twHr?
z1HkPOpk{9iYdb01ek%oBrsQF2*hDmQ-v@rzV^HT7J>MqH<^kPfpv-|&mU;*#SejFw
zmv&r}qj3byy`B{p(MTUz{JMDjWm<p{Chgp{`!-gy{u|%YM!7oqEXe4(44Q72@j1aE
z#P2?)3_o}rBwKV?=Y~occh(P056Re^$GO;U=nw10t-%a>K93HQqEpmkW;@RxwGVz3
z9@uprrDscdO_dop(2ikt3uCak%XHq}Et;E3gO#53wA-05i3!5>S^cx^AbxJb8@gWN
z`pTjB<*|}xkBdr;2I9DWS`SCA7EwV&APdq!@C*-z`d#Ii5_Ac*J(t1$;xue~b(Wco
zZDg(A%XmmpG<A*s5mp2&LYt{0FvzY83P!JDj{c@#er*Fp^lMS}UNsr_R0iOR{^Vg-
zykd4UvcOSq4@?Y?!Nmb<pjAT~))t(=LYhw_j&{MpeYDYRa0qjqM=Xk$BJh`A#(-Y|
zARflhv~vQl`qab?)K`O3p9jpdG7>cOH=-y_m)Bgs0PPDm6U(%P86L_<<MLDxN5`V;
z4hict{lMSr9w5K%G6<g>Nj*|6T<#*_Pj%&J>rw#CA;kX8*vwM9wDO)j0wpV{gQKc1
zGd-gKO?er%b<u?Ux@tTTYK#e^0U}Ns5a)OTES?s~G7{{dM3|2ocO+rn<=rSv-M}Ix
zzW7T%08LtGcbXocES!6XhiK8>|5yoY?z$YNlW!sC^(DxdW&w7e4O!jP{h;@9A^&&3
z3a=z;Ba@Fuia$~xT|9AMIu(;&XW`v(B2-MD2q`CuNjFWyuY5Y{$v44(_)EAUD;aJm
zSL00Ri<@5_gWsYdsDHBr8x|x%eZQZ|0#}Yk3t#a!(MQp2gBOfGauMsgaV8R#GmYMH
zXtkPNr<haF>ystwKh!{jQx`$*JrZ(0<uTXa`@wm!A$Wf}4FRX>Spwa^YZ?{Q?Qxtp
z%;*IY%KQdKjH7<L2};d=G(QHfMGxaJZm^1&t;c@z!YX2&2gNgU9?A}kTY+9TCeR+E
zg*mpI#lW^~`0?!uuDqLx4u8|3-r^Wri{)r|`Y}K0A47WcF5Km`0k!R~qPRkV?ek}V
zQ(8Wp9^s7>$nPZ`KMwT%3NUqV0ED>(@xTwK(7EIw)a%?<I^GNc#k2;#U#*1LZHBDT
zWhXRMUcs_e<aG!ggc%jzS?BeO#J8+x;$IVmg-46ISG^y!g;_IK@c^b)9Kb~hC&6sC
z61IKqg`z$4V6pL4*63XfpEIVSIA$S8M}?rGlX8}=lX%1SY}Vt91ic!`ud#+^MZ*fE
z_laVx*qw?7Z&k3~btKNZYJ#Q%_u>>a;ug2KfxK@lcm`L4)*mG#?JL5sW5^>J>%p6D
zO~e4>{UC3b^2u=pD53YtV@0+wAx_Bz`BN0uR^-h!-v{nZNz6pUk4;idK*6}n(3Jd$
zj~u@M4KwcX)}uX{Ow*bbz0XC_?K-CYqK04pNSd>K08Z|=5Ead&A7v7QLR5vO26-5<
z=n>PGPQ?T_nmzYuaJjDrpZ+Tv<$8npYSnC<*OG+xZmF!twH&l|B939#P{`=)4jWDl
zN7Jr;pffQT7C<lt>?O8@{w+2p^E@^T25ha)<AFy@z(I2k=De8%g7F80K_|*7J4AV=
z6YWeOTUhkHx)8RDw_=2F1((IkxOBK97zbHEPpz5sIcsH_IX>)}>;kIR62scn7CR0f
zhKOa^;IwKqypZ-n@17sHM#FT{i)ikgv>K!OpT$*I#3;*&Q;HrOf~#_W%&Clqj&a+$
zuZbKc*sJi`Pch!;PW^48Js_kg3Qe}@;XB$vcPN60nZ5wjC#^ukQYECi6N7AjFgBNn
zxW`*_Vh<_UqR$mLcW@-WN(sT$fu&g4cQ_`dM1pv%26XDAa@CzOK4w!UM7ya&w2wM?
zMRkX;fl}V++RQvYc|%Dmc}YI1K>cF`syt&$B){I3q^8oG*pMM?jt>#O`j&)aU#JiZ
z`#|R8lQ<}I2<E8u0H-Cn*zjR0UL0M4QKmgHet-dLP%rxEuanSjx)Im>H3w9&l#f#M
z;BCJpy!nfTu$D4_GYZQ<cgz;hyZe~S<Vt0;*8*ku(=_O?-h|UGTA(C+dP&X2onZPq
zfS6VlFr$O+9N+uG85QM8qfSEmt4yx<jIz7BWiY8O6)MUDi0fCwRG+>1pVCEWUYQQ|
z84~gf?<x_`je)6M3!y_VlnYiAn(MY`gZ0hhJneoI46q4Dn|ZV!YbKW4#+OX=Gr*$U
zr5X*llGn<$g4;;MIO$a)+@)P%({KZ5IZw~X+Dkk?YaS+i=?l)=+PFB-iMx*Z#I#RP
z?yH%4F$@;tfX!8y^GSi1W)a7;hW7n8$R9sJ2Fe;Ilr5V8UHqd^oV}XW95v+i8W*{{
zt`+Dm&Byy))bQ5H{+O}o6BC~us%%PI38{Nyp)DqZ1+3i#4L>TF*Ahdpdq)1Le=)*_
zq0=BHI|~-xw#C98q=~0lVxPYeC~r!K<`pXCmGBS@NQ*@Qz7|SU+qe!ZM8j`FSg}tA
z5eHX6Zm(F(xfg{WD|10S%7Cl#FP3O)XfxD03UbHUJa1_jHv0Q9m03SneCY>oSe3xE
zr|9xU>wHiIt3hz>95~HB$(v%<fZ?5VG+n)qH?G*n&xFO1H)9%x)tP`|iZOG}+yG*y
zNz8HU1-7;OQFOjP6_UmN_&(bJWYfAb*Jqc(`Cb>y8>fp2AzCOZ`Njqg=#PC@C8BKj
zN9OBO!~{Lng_HgkgIxOs?KRi)zRN~o-Six;_3|QY&LW=AM`tdpjRjxZ3SvooVawMG
zQ7SnJu4&I%%0&f;KX*ZcCH{PIY#guSw(!4$&Y1dcF9<*m9lds8!zm*ex`x<eUK3cf
zTNAJEo-I64B}QL^bY^Pb8_HM++8z(4oI^KA39QCXwDacwRpG9i=b>?O5Nx-tLb-1*
zSi4~ewrURnk7pC$pGzf*yHmDy>><z_eHxCL9>A28t1v$&6LuwK&@NC25wDCv=1%!E
zlVcEbj`}hx^r61+f$;RcvzV}-{2#6lc~1o~F9z-e{ZV$PR}%}`2Fu~@>-ofdEMch?
z>0GUma&ZG=`Ql|~A*gN)Mu{^(lqH3xt0EXPb|#)QnTDN<yW=s<$=IS854tADS<aae
zaQ!&>ik>dA@E%!39aepW{?~iphI?-K^?M3Foh5-^2l`{{`T3xD+mlcIbBuNXKElQ!
z-JydQv#TotL4QXMoeBQ1<4zpR-kOHt`OEOV{~V~bcSNUcDn4b<Hhj32dJ88`0p~DZ
z6m&_)=(PnP+CE<S@lq1bzdsC7T+Y24)0y{bb7;(Z$Ck|*fs3#00abZ5e$k1;_fNgR
z+rWy;?pi`nm@l{aN_t06m2mM1<la-ZaOX|&Sfl=cd$_!1sSDyz^KUeNwYC^*?aqVv
zjS<SG42RTTp~S&@#%uqPZ|`0@*j+1T)`5j+luV!FXRAT;)=J)>NaHmFi<xM?59M8>
z!Nw$zJ#cVG{n6x8n?bCDeaW=DTLTe77cjCJ0f+467~u7acYc*n=gLAT+_aULQn{e_
z<S%o5T*JNJ7NcsyA>R051f<-PgSK7+PtDo@h3~1??puelY2Xg%EDV4I+h7nJa>C8;
z6R@>+H55ioLC<9YxcRdJq6ezc@5o{P8pNqFVq7uI6y*OMX3Y;`(RhC{(?0!MsPdo5
z_WU}6rma_*W^8xndgc+AMQia7K92b7y8w5TDsWGCKWyw64dPN;&dv|S%a>>$_~0g!
zInEb)92^5J!b#9%p@jO{wfx18vk;*c1Uik!KoNS1nY(7OP37?za)*3?1N#a+1diZK
zJDS8W8P3-{51MN=VbVYsTv|{Lug0Fhs`o>oZCI_a!_pqqKB%F5UZ*hRVlHt&<1nCi
z4=^1T#<aavZ0!RFV(1t1&TY;(>H2OI*gBX;k8*~PQ_IjJJ&Q~J#+O92Q3mg^HG5M?
z*=eicyiO|^Dwl*|)V^r=Pi})AHz-5#k942EhHS;5B52Y+3z_5Aq1n${Oz`lrg=B<(
ziE}CS%7~Yu;Py|UT$YATy)~fz5%JIGjAjut=fIk$?&#4cK`1$XUn%MBQL;6bxZ8Vl
zKy|IOL=dymqH*#Cc5KZ|Y<VV!96S0hSZJ}f|EY1gX&yh4aS4oLlOgqU8szjoiG~|8
zg--4Y?yeVyqJMLQ9fDlm^r@cZK1u|~TLV~AULFfG90_$@Gx61WH(Y;GifU_r@YHS{
zTs%5N=%F6P)_aQZ<pNtwwHty>PZRNN3uQ(0qJ?k%rQ*0>7U)cVRpZkO;l`3<xR3VD
zzEhTAem~N=t~y{?;Sy*YN8Z-Sr}<S4@@|zb0K1%b?1k9@Oz0Uv>!$=BctMZmSqX}V
zweffNPGYM5OFlzK8%;88(3^bHHtNK&Kd%Gr#+zu@aSFBHSg>YM2z0XuCI3JcbZw(d
z{k_wCrHZ^OAA;e2*-*?ai$%C=0M_SISd=^vC&q3>uf^mmx^}gsZO{Q|kLIj#eHg2s
z7J*t%$lIbxoO*VXt<54&S!6Q4x}ilq<yW}R2f8DCsinPf5q-z4X{JhGURJ~g+T_QZ
zqdHiI**R#Pl?7U_%3-a}Wvsu7ys%p_I3BU&jo!mq#M9GEa&4E;cpw9Zj(7~cJr93-
zT*W5wK^SRup4iUnJiMU*d~&Iyc<E^iy~J$FxeK`Q98atW3kKuiU)ix0;i$??;|&wT
zn9+gzEa;7Zp1qbRxMd}*-*gJ5y840RU{`cVI}Nv|k>7iMEQr_jK&Qb6(cpm>d7|a8
z?70aB)TQG4^xok3c`e|(Ug*#x3-t=Lp>xJv{^R9%oT9!LHLJ)Y>=}ZB)SJqL&u-Y_
z69YBAO7!>=2bU|Tm(9c#y)9`r-DktAS}wx9|Ljn@aV*!nb{6Y(xASj@<mmIJ5JKDY
zF@x?AlAB9~wp|9}?f;BWyKy8o%`61*$z(3LzDe2cPagb-DztO?E^KIC20gA%r%sb>
z>^N%6TOHz{<ClOK;0_i>XX991{U!e1A)eJHl!MW%u8>h9W!8e*EQ>N8PHliyX1y?j
zo_Rsv9`YdB0=#TD3EwX*<I1f`AXvOrA*jwP$?6x4tw}vu&2kH7JdbhX!+yM~!3~~G
zNyng^159voD)HSH!nRAq4wZR=*mr_5Y)Cldm~Vs3`8jy+M}M>&SA_@tjL>7-ccEx!
z0oT^mMCr5yX0tGXVM89jI*T;F|IWf<(@1vz`)Fc~UI0<d04{dk#v2`$qde}daR0Ir
z@c2_i{INL5_#BFJO*UfwmzfA|Ib3(QlDb_b{9IBAPCb<hRgz@ZWz>0$dt-vT#;f4*
zLx$3q^BKHc0tQ`Hg7u&-V7_1|)J&(n^221*av21dwo^7WsV~$$yTZg;my~*YUNK)I
zfAC5j2f0NOs43gd^mR^9r`jY48XUwUlFhMrdOo&ET)DfS6*SZY;A)>ZY>=tpypCbG
zWD(6EI!S!9PZhl;3ZbB03Uzlqfg{a~+G@gO(%D#d>^}cnK>phS<DhZyadiIhi$^~p
zU(;?Gh!#;Mudfxz-#Or;gT;_79D-`K@$CKJY?!0f3pJB1nBi(QKt1x+u27ZytC@?U
z7~%@Onu(paEAdO*NOWCwhbg}9g2j)+xyB{pKglQu3URz`s6VrI<~+AB9>S&-an&t4
z*HRbriKZop_7%MKb^y1j>J3Hx&cU2yHx%n54wy|kPGm9f81|9RxUG%{NxC;&I!bA7
z7Y-GV6y)!aLP6wNj9`Y)9M`}DolgPw{>b0EjD&)_>#%i)j4}c{m}=E0;h2>rz!OW+
z=@WSmEKT|17#AqqL(j+2)Tc`RkK)Wx^nDr2qdt>=;o}vsNgjtaiO#Go&0i_?v;(<m
z7-+qVWhrL)*k}4-)S4R$-qYQf?6V^bi%EkQ%R`~#e>PZC6p5z&s!2c1#*CS|uwcs}
zG<<QLYl*tzusLDibfA`n@u|dmbpWUkfZm50P;>qaW~6g4dUTdqZ%%{89<g9=dJ^`n
zFURji1(5sSXtXy@fMJCdP`Evo9l0ch=CGq^RY{ui7YUnW90)_`9;Z6l3+^8-0z-3G
zWl-A>-fAF%iUv*G|1S_;2NmMn;Q!BB4p7?pBtm@_y>}4@z}epz!UN8Ljo=(QFG@g#
zbzd<4MP8caJA@`5r0AMbjM^uq<ge%pO@bqE>{<xA|Nf6ZUEdYOKDJyDtiuK5v-P5`
zOwE}4eC?G8Fc>a|kqP9je$W@z?KVIo7ke-ZyUT9%yb3kh$64zaH_*2WqwXF<?%pJX
z!F(VdHCc^zw^g{qawc-u6Lc=m1*6IersuRAzJwgZIrJP9O}lF`Rp<w6y;qVi;{kI%
zH4!#XQpeeqff&7!JZlTiqagmXvQNLR=r?02`3d?!bNMEI!#)os&v$UmjvY*(amrlQ
zC6+tV_qSEAKl6zA%Wl85MTPHE9x&b(<>eJhuvrR)mtJy@WfvfFiwfUvS%99q!=XMo
zpNU$}Dh(uUeAKRmh@QkJ+dUY1?okl`xSD&B5AnIb4O)L+1?HPSvfI|fvDM5O!g71z
zgi2yH?+C`611AAgl~A`u%Eu6cQ}XW=G`ei#QTq^8y#kcx-Bl=2c=NZn5OiY8;E41T
z_2AJn*13!o3M<fg#zipSoyqo&I*1#F493p|VZ<1oiP~dEFmbgfueouEcm=V-#<w@v
z$j!&W&3+K>&RB|)*O9D#IA?ACbHSiEm$y*9!ty2cQd(RD$M6!cZvZ$@z69GXEkX74
zkg&J9A@NVsdBBibo_K`L-Emn=OSuAmw|ipeMS5=PQ1{;2BGzARG)7z;%7-2+!B^IK
z&~|$&92rSofby~E@hF!vDjQ(^+%p&`%7+t252NT0Q|>IIozau4iY(hCbd0*jt&`&5
zsb2!D5XIrVuq>?dm`d8pdGt>!gZi9nLS6l9-1)Qz8aCfoY8|!$JJ|!F#?oVGv}G2U
z>fB(iN$&hvSR&SYePVk5*+A7hCoo@ng-1M}4f?}~y=Ip}`=}dy^G6fZdS1pm&(^bX
zlZT^SiwU^zOXa$8qhP7II|dYqVNRFRXfS;|>@_XM{kaP4w=N1LhyPQ~%QQsEi{z41
zq<0vcp}vPHFN7a&Cquw8Eik<@nH50}u3DXog}=ghz{_B+WkSBhAZMJl`U>jpRWa|K
zrC>aHHCS&u%ezp=o4w{^9!Gmj@qrasm=gh)o*clSzVDgbOAYK?Oj!N*H?04#^Qd;J
zC)TaAgSWCg(#6d1jBf!h4@tsTzWJy)J_%|AFB0#VIQ6q8VUW2#w2YOq(ZR{6CiQ0{
z$s6HyIsvh2H`JJ^v$UapSaJ3=4BudnUY7T%ldE0nu0y-N6F~jm4&3>jEl75|TeP?y
z2KB?loGl5V52YV-JhB%bJ(1zrZkv$pI)`II{Gl;*GQ`Z5p@+>}2zVThg(hcs!{f0a
zOq!29uM?YA?HbR@qfX2AC46LVFc=KHOa0Sj5PjbY9){Xuz>_;{jFAdN>i+2XHi^AA
zoQYF>v+;>eHEOnuX0oAuncU;2@>#zu%*j6kTL0^hwo!KIq1CF~f4>wOS$Et<JcZ7&
z&bZt<46O&BKv^=!)DvN_d2BIk>pc@k2m4|4Jtc(s_(IiyEHM3gPxu9hS^gywBXok9
zVQT{QhUL(1GMLGF5kEj4&iln?pnTH^D0>zI;za_Ob6JSrThpQQTO}8a6q*}Hr!dpR
zDop*~BJfpd!-<`n(SM{aqGbj=y^{|$bt?ArxDq8s4|r1z$2E)GsDIA_d=I$6;_)M3
z?$CH_jsC~HXSH#Yy-66MnI>%cbc^pF8jQ#HFUQ+{2e9!vfRC^%-qOhiO_2lKJiHb=
zl=qqG^&jlT^Emj@oQI;h_xP$keQ{YwG5XGk;vY4`p~JlF|MRBo-&P~yG<;#+Pf6Q4
zUJX^zKe^+-LMHg`%SA(^O2PDf5OF4qjaf1s|Cabeo&I!yua{AMZ#w=NO#2s?MNn+D
z2=nu($D7Wu#X%X=%cKL2WxG&j93s@Jrt|xJl*O76>DaHu9owGf<JTMos<n>+S50Do
z*7gE%L2rI}WF#79xx%>1r8wms-RqwbFSySZo?uRW<a1NOVsb9Fi0H1J?JtaeFc8E`
z_9%_gt2yYElLsjhY6}*jN;;j@%~=bk#Gx?xL|tui^7_foWB#Bsu(81$+pg-e{J$Zf
z`Ph{U+(b&Xxobe#B^`WzpFyoxNqm2JAhh+J&O5GIf_dBz*1YvU<>Y1KP$mmyr(y<R
z&=(unTeS}z-V{TS6LICu!?7x0Ftoi0WR13lkox-~c%LSH=0pOst94^X^e9U)_c3b*
z6_5Vc#3Q!O1BsTevLl-MD&8HV4!BTkkj{oyZ3!QnOa2gTcc_|q3Fi+YPh43#O4NW^
zxK*H_?3YC<@g4O`$nS9lS%legSUoTf<r-J8a7H#19X^Ar`{klIXf3N6R)h}UOCg1y
zfdnxzkz_&S-8Dngx%ZR``9@Y{lm-5q$Kv(@R_NHp3+fkgls)avBtN@CCoG5E9{sU@
z$_i}m<|`D9>jKvKW55H=AmaUFuIDS_E7yhLi>9mah1e<;(VCbxX*j;N+KyfK<v~;z
znps*-G3~f`5dETE%I9Rz>AHzLva5K*-8g8g-NVML9fLOOs(C}fKQ7<k%S@ZLK+qcU
zz74-8ytgt0UDL1g11qCYaJMfuKEF#@^$M_GS;9n9Etnv7u<-ei@tB}#3I<1GKvus`
zXgc7dFz7O|Mly~HQ;+0;jqwX^9AE*CTDMr!l$8*?CmEyE64A!!1Q(z8puQ4wX7*+#
znE8ZoCx;^-usjUbtrpPgu!rewItxpkC20D4AS`En@zf9UP>zpa&F^AyH+doyD;9#2
znD%Qn-Y6n?vxlJ}Z(BMa1w-a@{rhPc7VZerFP?nOdV9S8l!L?W<IpO5&g(TF3j<%u
zLA0qsDOep+f)U<yXUSy!hQ;Fk*}KsByaJj<#h7#EFSi*HKzDoz&*;93{NUvKzW<mR
zW<M3uE{!SeX5%^|>eX0s96Wrinbqhr5ZoVv=I>^amY4$0PaPp(Z8rP+y9bJd+qk>6
zio=TW#D`f$e5MPe59%|^-Rn>hwhJ7$ZQ^llf%q`I1iuZrfOd_G(8=#S3I!VYl6a5d
z6MRvkHPa%ie=sU~7x7`*<I&r0Brmir#<|O4(CbtY8~^GobdolGX=OU@pA-j14(_m_
z-+4%n8HE{zu8=!46zt-n!Dvh&w3|O<tvB;{^r0mXH)9%V*Ir@L(Ier@t8Ba<W{8ec
z>zUCaS7@82!M>L)BSx1AYnJ|G>Ya;F@MCs~#MGKu7720b<Z0+pnk`(~J_fzQOmNN!
zVyDr)N+&f6tuHA-!*e0tR@;Eyk>i=3-T;tqX=Z6|MWARI1m7E}UvqmT+^~$qt>bBT
zR~;u5$1mZ!VZ<qUa~4gyyI=)*rlq4?*sQ*YGpyoJ`Y{JH4)^6f>n?z?;W=WpdvcsZ
ze6AyLP&k)%#;=!w>|n9bu5tq^zC2=0pYL+X1xF^TI05X8E7q?IP<rgqLA#-Mg`Y>w
zLwnQf%w9_!JobL&Jy!KW=Q2w;Rzn_Hwcb4OpeJck!9vx|QOfq2FWCI!mq9t87!}!P
zfYMgf;p@QqesM<dz5+|dnRw3cASMj;AU^Ii@J>I-UcD+riOnUhTDqH?np=VQ8!ae`
z(ZQG`U39+Vj`O$b5yv*LIImX`^{8GFu5m2GJ0B9MOD2*F?{>#-KZc?-W+n)(xKU5t
zEoFc<b#6vp5sJdrgZR}I<?(+fQGRCu_hh*s|J#Gf%qmcSQ3Y83%Rs@L0qnu13)t07
z3ssTy@3;dq{NHit@QQ;LHBAVkoubW<6fE>gLf6Zl+-~q#KCC$p8doV<M@j(?I-Ly7
zcY3j1^-64*C1=ialUdrYIVc!F%&8Zi!T_2(#G8)rkf2_~I`%^}KLVz`W0gj=?}%IB
zk9Lppx#FfAoUg?&v+bluX?_>t<SU@(Tm`1v;<@gVZrG3lXmhHPZA%Qqqc4Kdgr}l8
z?&8tC=7M|u7SJBv$h@m<!SQq+`};uwy7RVR&m>>8-66py&t0H3(*^hUIflx;nJ{&z
z3aYx$=glJ&7X1!J)1|4*i1xU)??bUkE0_&bl|f8q6nO$)2%G+Q3e09MgdN4i$QXN;
z{jAVN<6-9kmTdyL`E;gn#~5pFX5#I%QZ(@00uNsd#ho)3qbhV2pRp<oR1=L+LzxE?
zmxQBix;?mV>P@=)=MpVN5FfaNa-BOz;IuEh(Dk(nwPQ1thO4N{oidH<{I1}N|Am9p
zHxrk89m9;1C6sN|V9r**_^(yRP<4<g)q<*^E&4Js(ZWG&c9021x+y$8<G`?2gR+~d
z8fE22;n83TydaMA_jLoYX2Tc=>QYLqWh>DBv;wN^lb9-D8=p+f>edi%_*qTa_c>Wi
zre`30e&{sXb*onv23o?w#x10^O@uo=F5v$7Q($q;2_>>{;g+6ru(jnov$G|ZU5y#j
zE+>YN4A65(Axw{<|8shN$uD0^tna=9B_Vmj)R46>)wu}tf1d-Lvr<?(h%~mS3}y3(
zD53K<AL?S&FL7F^4ddx|*C`Fb)pZ?fH>rTMKgY=b=mn*^n^D`yjm=hi;_k!LLF($q
zQ)f=3PU<Bf`MQ-gS!Hrl7{P0c)p3wjG4>p)0QvhkrFf<VjQK4EL$!%K&}sl$cyR3S
zw*kRkEB-N}7dHK^Wo?st^1E*e!0}fU%D0BI_pdFXqhl82o+(AGaSY<N8RFZ_GEf`W
zA4J-vOk~sxtSCSBw)G5b*w_s_-ppfO-_qgETzzZ^B|i180$7?5LLKnhLix=ECXTg&
zI{MB_FZ9O6;U`$Vf_#wUF7Tk9KbY&1JeEGJ0$0B6gU!lD;iMs9kdi(a<?6Mpo;0kU
zg7qjfILBPm-m@9!<sf#RuQU!kjUAo?Swh53=H>8!We5xLv1mS)T|5Ur7WiP*<W$zU
zzmcnE(;oF;5Epw#un_|C71G^#|J`J8y-pt0xX&y)vkOE#p29cGNC4G{A428LMCfq)
z#B8qgK?B7JZZ_#W-1=9H?n5eh#;upUxz|gjtNH=zTMuO!w>WnqAFkxEsZutsLh183
z9h_DVM7;y=n6`~Jk9L~^StDg|m^eK-m7%!p@Ch`TEn|iHXV7fq8Pu_hfsS+QK_!h9
zN+(}u9XGGDOV-1%%OC2Bnid3WkJ{l5dJWsN6kOq`1;d_};r@AJ&^P!f+K`9OskNBv
zessm=xCyN4s|r3+kHI8jvuXV=22vXr!Sdmu_(gIaf9^blwLdSS{LD$n^6QV9@6r&g
zl-&MR2#y>;jFYG^aC{`@AkGIt#tx8FTSIi0&1ij?&fsY?LBpjV*6)3#{O=Rx?Zv?`
z%s~c6rl_EO;C7Ig>Vn0a3bgCl1>6m`Qg>;PMUegm)_lH0X*gX5&BvRS(N^kcvveAB
zs<#CHErA$Oa1ykyqmmtPK)s-y&^(|&59}sDJ?gU-e;U9|W(Pt*L>BK9rScb-&k_^D
z37S`yGVfFOSZeYlXkQt`4gMF3?F-NHh=!i<@|_BQEy*Up)G3}2D`gPV$WlWh5R9Uj
z-SoS{u6<&`T1tCOADZ#jg>r5Ab7iMV6<kZ7h5NQ2L+ex5S=#C{*tR4Q%LksrjULJ9
zMOtXcob}jrssh{kmx9^z3J^s;;AZ(}L6S2H+Sh0DYi7wf<a-uME2c2>Pd;dxB<9|>
zzxnt-gYjuX1vD%j2sKxCLR*D5Q;mwmwO;eEFv1Li=5^%_<Zr&5vX1(`P4UR*NF4Kq
z&N4rL(2Up!hJ!sp<)3cRq}mPEPe1dFRsIk&E*FiL%X#h)f83YmgR(v9LeE_WC{?+!
zoTj<dcbvngbUB4<#W^Uxcu;89-2k#KQO5ZYc?DlDz`}KbY^p>Cjvt?JbJa}h&0Gs!
zql57K>?FK!_#BQT|6u*ZiTt;&3J2U4p?G)^_*_xP`uO8Kb@@oRs-XGSZ9BiQk~$}E
zYy{WPv3${vFyc)<<0+fs(2g|E`_D_k;DQh|90fS*z&;EsxX0`_90Gx4<a9loap0($
z15Q37_}}x1s4PmsYui;Q8&A8ivP6*olJTB>{ovL_ee^!dL3Cs>2sZY&kc=y{$QbR&
z8n@d)NAPIK*qD!Af3GulgJBR+;|Ix4QnBasYIO6ZS$%074~Pqaxl7F{_Y%SXrtPI1
z(Gpn3E6`_J7^HWLLDRY@VQN7TZ>@OCyg~|~CT%yDYL0{@4=18pPZ^ZRvatPv3dGxJ
zPvjQFw6C=(-#(2a-%20o9CU?A><7Z=rR8Xz`iuEkQHSC8XjT~ao_l)^0w|jfstKpS
zU#~Y-#jnN1e|m!#WriJBBx6|cU1DjO!Liiiq=^)wW9w)xIb?@+e(#vz5J2$XvG|1L
zp~CeVlPywXPJ#zql|wu0D26%X8<;%nB{y;11ol&p!N7gw6|`T(wdOB^uR8|O+?5Yr
zC0}^;BO7#^5`)s4qae^I6gvz*Q;%pC%`krWali#oq~7QC+o`wAoBYqOIxRGI7h>+S
zB#dc~gGQvY`o(vl^#fwmnyYb{{RricD<#<U<`-}J`kH42UIw${UwKMW4vhJli8iBF
zGL0YI(dLg2Xs;%}Zs-DV{F#6rGX+dGEJZ0L#-g`F729^L0A{W^hvxpvnP+uAjvjXz
zpE@aUNvJiJoyi4dT{)O!H1n|1Htw5D9asmBP}iqBdiwZ*wzx0%cB$ef5gO2Ha1wsM
zGr>*|2k`pzlF$2ZIkq;=fNvQbwG0)!>Xk2)HD{qJ{vFdV^{0K;Q|>S6i;l;nAd^Y>
z?tUw=&$J@=@5Eu8Vwr<;tv90ic~>lqf5t1!kHe(p7oqV(2AIBEOPsl}e5iX6dSBW>
zey?!Wo)8Q=?@xix)<j%f6~miX4ONPI3Hi?ZbT>PoLaBQqHVaNF1r8R%M4dE{xx8eq
zEj_?LGZ{Ao??lOp`$Cg>&v``392R491qIWUiW=1&*77ojr``<4I%2bHL%6ak<20KQ
za{}i_l8^G07^+^ahQflW5HhSBTS7KM-o9*HT4I40|AnCafjrPWw2W9r^=zckJd`~g
zDqKu^eb@XtE^hd#G<>~~Ef%%$o(pH-QR-VTshkC^D?CB1g|zp|aMBc)qvC(Xu=DyN
z6v?N6rsYfC@W}zZtxUPWvr*{QmWJOi6kuB5Rj97=L_^-d8$QIdY1c2Ksy>9PUW6+P
z|D5ASU;pDna))E{u*s<PYarx!pF)?Op{O_a3lpS|Eom=YM|`4{sM;7<^14-smRBdB
z42Ljp=ZQGD+g#L$O~CpiH@Nwep-{NKo`pQKK~KALsAf_Etu3RVY3XG+IWikz<u=O2
zFM*Ujz0sZYU6U=oxbSZxK6J`NW14MsTh!3J#fj@b4gr%<#a!)u2EI)+#E7wHAi$uS
zWo#*7W-0|YC5C54ay1Wf_2Cz)mf-Eg80?aSSij^f+t5L~kZ=AR4SGRbM<Un{GXj^5
z<gcj?<cbAyzW&)p)TS=WuHEQv(V9s;$YWSRauO;=ykQm6N@8)--T3i#_?R~kz0T-C
zx78O=Z+i$V<yGY2X%=piuEF-1K}@TdVeW?!q@UPeMz$JyI8`e(sDD`gB^MUEF9i6X
zoHej>tf4x9i_N2ihA)S(UV7`$l(%t%B5i0$KLzHiY+%I{>O7X+5w2U}i{_JeVuM*Z
z$PJ57YW$EE^mZWFen&B5br3{1MM8#1iy3^ZVn+Q&g2Et-+0+hVg$L5OxMi`@qkIzo
zvOt34cdDbxY6Vw~+$^kmo`<rN3&A!z3k`@<81bGorL1^3Yh{JfXA#i6;~1J>b%$+l
z%RrWULAd3v38qCBl7}OJ8);mGl=;V@Yh^st+gd^EjBJ*3-H(lE%Ru{=CqU45JviQ_
zon_ntRPUFGF3k!w2@c@R8y5@HKh41DGYYWWYz_WBydJI3k=MDnFKc@kCbYTf3mw~z
zfuMbAN!SAoF#K94l&xRFP1Q``ne|1q({SPm12v&=-4)i@a-BJDTnK#(*P#7(OYB!f
z{LXJRyz6%A813i^>Ms>oH*6rh-$2@}dPK?d`Df9gn&zToLEPzc0yn&=f-gFi*nDp;
zx3`Fg@7`75(CC0KXI4=rDgx{}BBB2G0On(N0qlDGV=FHD!O2a-P`B4$Q2&sCU*45t
z{_ZTy(2K^sPbQ&tK_$NEMR!a0DvV2<MvT}s#G<!_+dK(Hq4U8zlUP@4&OxXDVOX?p
z9C?xELNx6ly=}9(-OY9`k3R`HE6hN;O986SCxjkDuXCLqnbd2sp9vOsRg95UquRR=
z0LL3loKz{)xT=Nw%aS2up&1BV9<lvGCANl_!|u#8Xu5U~bc`qOk@_WUe6*2o(+GzZ
zFOxvFU&)$ZR0uoR7?vEJgc+}Buj!^hJ6A{iw`3g}sSm`LeadlVmyxt*brT-BO@3WR
z6T0VKLO2}HqUSk7>#cntaC)M&>+}XkD;KQ4SHXWjmtxL;*O`IlG|Crk<=$(@K!;)+
z+hs}jP~SV;BQ_Q8y>&yq)@AIAK|1_LGwYa@XP_}Y7DYal&`g?T$A7!oF{2g4t0}=3
zC*3HYo@b#rq0O|%*9u(&iAnuuH_U6Agms<q=$;hCeSdF4>0KqZJ1+sJ&~O~Rx<Ab$
z8$i?M9+Nb5^1HjM@b1G*sGB+qBK83f*<gU;*p*zcka2JI9$fLalsCPm*}Lj7udA=(
zhJJHcBU6JNerb?vY>z5^GwO3+sqFSC2%D#m<u|$wMCuem)BX$5cH3nT8#N1K>+&${
zPY-avHI+I}FQa7Mkdhp?m)vJsI==lsj?O)<#_j9k8zBiv2q7sYMF{n*i6kUpBP1b&
z+!B%`?2;tuCaHAU-AFg7q@Fd)E|m~Egb?=WgxotMal$))@4xuqQ|-0p8gq>AxE~3{
z=f`o+nc?Jb2x5uPCSml!-q<<S6~f;R=5KfoTKP6vc3z;o){_8>*Vqpgjx$-qBazil
zy2Ugi)WLB>8Plx_g4U5mV$JS9%#rpU-f8R5^?EP((&s$fd{}^P-w(0-?<m`>6C|iA
zoS58w0aVOQ!_0-jD7zXX=JurT;q}cJl~@VqGuLC_Z4p)Gt3x;PmoyZ4@z7n-5FhW2
zQFT2b<EJz99HfQ)=cLgLKLYAJ7C~NDAIh{ePBkA@3H7rZShmI~u9&k=SoQEEW(z0T
zWruZ`{rESR-~=(iaTYkvkLGnVT3F73QWWD;&~J_bMm@8G%58V~!m~+~zrTPcb1ra?
zo77D*ek-?)>CU3vw_(<jIP_GzhH9fS;O_HqRBc_#GBQ-?K9xaWn1p7wyG+q~S%4+0
zp+nNfZ1*ol*|RwA*vE@S*zLpc4rAuAxEjo+Z)5E~q#L`96jYLRK$#FBs{DG+b)Ez>
z>6Xj9<HBNQ<aQohSB0>3tL*UO3*xW1pGCi55BzNa81C#Rc8n!9;o|GK<L*L`>ZM}M
z03{2LwTDq-!|-ryHhi+>Sd-l!JRTfDOX_)zsZg*Wn^gQ#e-3BNi-$9=(fIepa+EB%
z!6PZ()s}Y})ML*vt0psWG}7UXk9@&d_bF>wB8P@_Ywl|251R$@@bvx0nl~qtuYRp~
zSM?&@vAej{+!9`Mc@gb0`*T_AWTtLD6v{T|!iovh1-ac4-436@4%a-^I{6A$e7Yv8
zZ?yso>d1g&d*M`A6$U?1Bi8R4K~{G|XuX*UR-bQJT7Q?|QriJY8ky*EaXC21A2933
z7cgUX2lYYB21&#f9v^%J0?M=TQ*tb{Y0%6wRuiPoq?gDz`%H|%NiIjhLW{TwLx@@O
zx`+>1vJev{pTK^}hB#_n297ybjoN+oGTXoHn2B#VxE?<Q+C|ft$ra9iQQjEBLt*jv
zY*Yysneo61INHSq+bJ6|;nQraeRCXL%IaC+p1IH%cA2>yk?^Naw9$HK0JL<Rhss5_
z*z+g5ap<*t@}eB!0X24%F^T8R^Zw%gjlI!WvlujAOvUs52^jt+Q`CKM4x>+3L+7bK
zTq7+M+eU?eJvd<HohH_3zX2B7gkpGPs1W{cItJ`0$Bv;@DD$}`W{<OjFIqX6rtyq*
z47O$^tF~k12!CiB{g62B>6YsDw6}B3Vj5<r#q+~1f?+=O;G8$+i4D{Tw)qB=^{fE(
zd*LAewFF#tkA_!jmvPI{?bx>9B0P}$VEG3bPOS=syE$=a{2~V&YS*Bf%0<vor9IZW
z8Q}Wr6<3+312rd{c$D685`Fz4h-Uq)VScDOp@W5ZZ^2>lld!OvSdZc9m>2t<N&9K>
z{3RoZQ#KLKTTa0DEfb0FBZ_k;#)FrwjP@JDSfyVTh!qGyxgkva>W5(&SMWn(0!lYN
z=PM_yCVzA(3a7O2=$2CKF|sGN4135W$;B2bW6w}ltW-$ZrG`5`rsJ9_5A0?5zwGi6
zCjA-5K5VLErlS!Lxgeg}AB+}XSHPs`V}K*haue%lkkkZ%Nfh-ux6+I-aW4<}Hy5Iv
zT+r!53|@S#jkTLAL6pfLZ(5Wfv6@=!lU9WVi(>J^MY`LzcEKJRC&A&}RPec90rGKP
z@I}@iU$&0IsAxHlns}GnuqftcRL%pY>4QYeNifPxgY3ccL2joa*v}tC%zTme9c+t>
z)?dS<2PLSU;sdsO2D4|&i*aL9HY!!ufirch8(C$+HXX_seOkhYgi<a=vnSKNst}hH
zpTRc6vCy8B26;ObVr8faC|}wMl7EfC`Os}9*WAh~i;($QWTNvh25w%>?7xqiIOxJD
z^c}nw0_I4eIN1wA9HV&kydo?-TE^R_D4>Q1V`Om_WpKz7E%e6Il(APxOQ7;WG;fn#
zqch;GFtbgLi(V&z#@x$f|5=PKw<4L@>gD`aZYT_IRiOR?OPG`z1~xTEF~qNyS-r{-
zB$F4w2Fh(m-lLw6&Vwi#7Jx^;RG4*dJ!X8Kz@j>fF<^@+D$@S3v=3APv&A}KHC3G{
z(i|AB%Vo;%10d{?J|_QJMl;M6vHM2iL$0Cx&6aCSIxhvr#_z(%PHAWpstQqS+$lR{
zhBZn%e%F=WWNKZ7PzCV>RCKx9D_>sQGajqIN%2)K$Bgz;?A`kuzMDz?DSa}*sOxz)
zd0jY$B$lC>SsokbO`fyO+rcEQ5`u<0LdJju$eYp+PS?_&v-fdsktO4^|2vO*Ca2-k
z(^Lq6*F5)bA@S_JP<C*TrIp77QJJ(9ta4k#if&~XeR~_WX7>d%mp?q*=>U8ieGXm6
zR)WGTN3b=?<^M$OL>I|<C{hmrqg|^p<^DOWCvKA)7%=Z%{#X-rn41j=puEm#=GNi^
zo(fagmP@(q)mhNo?;NPA<j{Ui2I7zs6gFIkjNOda{h0(ea|}^2{0__Q+m*airv*jV
z0o-3L2)-qgRz8D1+n#eFe0LVQn_Hn`yA0)<hlt9Nqo~XL5E$1cgFWf5^(6;sKhzh>
z=`Hls=s0-)kI9xPMWdnvT>auWP;*O$y50lf;d|=%c{T<7pGSh3X#scb907&}erP34
zuv~H`7DJrFc&J4hr1ss5E~C1`y$J_Uamb2wm}cUEbqqafGMUm#lbdBEi;A+<F!x{>
zL{DCeO4&dmX`(*1&KM3s@xw^(U(VCUf8Z9QO<C*T=LM~*sUSBx1oM8BVBPqQ*pPaM
zJ13jM7W0XCKD|GFIT8lYjr!#_DY*KMWuPSH-yl10%%&We)t@ZT&piX)m%UkS!YVkX
z7eu}Cxm+W97OK7}1j&*wV%_dn!h+G)s2e?t*@v72$@9Kk&&d}13Mue7e+a4{tVRnZ
zW1ljYp#F5?Bar@477~m)WeLpoMk=c;DQDV^{XvmbZz;{MBzD0c_RsP8SRb!OSzBZ(
zW9-0ZcM`~4`wGQ!ViuJxVzouls59#aX^$6}ZQ&s3lRN>hC+xx0#XC{?X_M%_${7c2
zIf%LjW}?MzZ+LB%4KG&i!j-F6;l>B!s9#V(^<PyyzWP0tXzYbDPwHyARmyPIBH|Q1
z7i|BbEa3SK<kLS&yU;Go;7B2cbSAPMdEul7&43>2v0&9ZP>`i-bH}Sv_#Ju@^yv(i
z{_F>CJ$EsUK6k~XPNeG}8wOLKhNJgCsVrpeTF4MKf^628l8mj3;6-Erq-h(|-TsL0
zSbKvzc%O!5FMSYWh;y(0f$46I!9}_kz;NUUyrR5@9hMcu<5z<wb^(`qWuf8r%P8g1
zuni($t~Y(26MyoKxhY^LI}Rq#kAaT17KGk7fz2+&P$<w8>tq&uWUwDLTjqo7XJRkR
zmErM)))@D7GS=SCq_@j_aJYQ{v?A;w&eRSik*)xdO}ubT6PGBj3Y~QRzBVvHt9_)~
z8knKfzg&nq5)8WQiG!(Y4R)u>q2<*i(D`KmfnAA5s0?M&r*m0a&qQ?Ttc1Q-!ZC47
z0%l6}(C#`1#}Rg1fd_=dE>+m5Hy?W2k<U5z7rQA;$9nPr;NY1Uwc{kHG*Esv{gz-6
zH<SDRnSir`_M>7RfO&xgq=QvJ`C~El%-<2ex}{TB-*M1>(3O?`@<O`->S&Zu0?m5q
zP(ixun+^|5-Xce{Kn+mjsEYNU0+?iyx@E|IHz>L_hqBs{TsQ0%bD<6`&tK%_FaIGn
zt*Zc&>=iIydYzaaHlPQ6(R9>0>^RuO%`R-Ad(xHFFSY}TyigdrmH4%R0n|%l&b4<F
zFK1Uaj9aL{zWZmQ>XP0}zS~*Qd=m}6)2LT<^A#cczwRihS`15{bLcus9rvh8(CuCf
zvoEp5yw<a#V$LyaEgBD>0!~0iB=J<M6d+lcfUU>qeAl_l%~V6!`svoF)G8r;ek~XO
zq5e*_7|8w`;ko}+Z1;=Cl;tV-xJx*0Tun^q@~u4RyAQan9L_gKo<lc{VcgGY5NZWo
zf}pjl!Fk?jT-2u=SL&@stJ>Z?JZma{l|LOTA8doC!z=OfxD4Ep^@%$SsDvgfI}Es|
zhVlo-to*G8>hFEd^2VK{{Z<WcJ7xsN%T8e7%K;E_CW5VwUXE@vGU2)2Mm)ciIu;%g
z6X;AbTnuo>AkQeMJ$eOXZ-{{qa747(6oTr0Q{jW%67=ln3e8C)pwoIKnp|#V9v7CO
z_pFf+J}pR?yxNdDZdc>g7zH|a2|$Y%#q7XeCD?L4n7im{GJDfo+_jG_Y-%>g%BP=s
zw(WW5VSfg7eMb|!(hFKOzlggR$f!H&4yz9h0%d477)>4++a=?8_LLkv@#zST*y(^P
zC)1nNwF+ZgT)F$nQxN}}I1(RcLDaPr?s7gI!cV>swLg_ZM)U&SvM8NpHz+`UvjhUP
zO7PO~tN3hsC9ZhQv2f>6@NFrDD>p6izAOhM593Ql`O*xMYzrDA0=VkJF2q6o#`C&_
z39er(!Fo6C$}f6C2pQ%qUbHidOi!?xvlBOG4Z|9%T&7!_3k{b}@h^tgz>z$?*<a4^
z&VT}F850YA*O8TOtRHBNmjACWg`0QCVRPOoFhY7Wk>4k92fd9$eMLvrZisucnN?#y
zZc)0DeKAGqJD$TD>{Zy9C)v>Ott)XDoB2PVDfePKfyvx%S-N`ch00X&EzT)tZnqDx
z8kI8Y$2EkF?d#FVcPq<~9Rm$Ndzd;t66^jni!wD`%S-3&DffGcOYF2urj8nkI!hv9
zQi%*V3}X0GkqoN$lbG3edkFVf4(78N1WsRrO4z}I#+&h}#8UM*_OF<2au|bN?SpUe
zg=nkh3VE)Y%=OE0zHN3Cv==GRN+k}%xAfu~#y`XXwkfDhy-6m!F0o_8y3l+&9~;Uq
zl8@P&s}xax`j+o}xM2+LyB|dPk1n9xwT!JFa0xfux&p2FdwFYdtf-Xj7pf*N!o%fB
zDBYdSgG~LwyRRpm&#}B=-DObKFXu6(gPF=OZ*&e?jh!0|!83j{buDcLWkFwv`mh+=
zFC>G%vnmEWRE2^rp=c&K3HuCPP_ZDmq~m%J7@eHXTDJ#6jprYhZKenJqnvR0U?<G{
zPsDJ2JJ42{z+JN|uuFO>zP{@Rw=X;6NMhsFeLTpl4t@|MpXRaL!F@0z>JWeAR0PK*
z)KQ+1&BA-XWAY;!;?JiMs4?>pSR@-VbBkh{C%<#EtaZ#jIEU%c9$7N*jObr{9^Ho6
z^R}(Wpdq{f44UNFW>f}^!*jUrzN^G{TZ`g!KfnS7bee4i@0V|w(uJ~U4G+1|KsWZ(
zixcmVIyi%eaHP9<`f~wQ)0&xG!dcKic!#Ss_CVDt;4iaW(8li?$X55^Iz!w5avRw5
zEjlQX90uQjy<oMvPRunafX-FXDDA!g^=>DEnO-C)RC0Np`Zb9DeH8T1^#FO20k;x<
zTZY$B?yDn|`zo%09N#hbHAxur>I$UI>J8C>384PA1d7XFvAmNy9DY>tpNA^(LLB)N
z6tB3K&<)M@&0--vW9joobL-}1pr2+BX+6e4@mPfTU8g`c{-Tg}B?)YXhXZaR=Ix7Q
z%=-}u+7d0$kc<caqCj}wWhmwq4Cm6|F<hpRB987c6d&~rfR*vl)MXSwIi+7r@^%)N
z&2A~NPpTqcVJ(;E=&{XWCOQo53x6{OzJ8PdYO16?{JG5A-p_^X6W!7CL^9~+w6JkQ
z^iefsA5Qw?hk?>jbT=44yvapYy6_;@1Z9yv#+cRfKdi1HpSSKkB^HyG+%&%{I&@9q
z9@KTTZQ3~ueO(1sqwa9~utrw%buTkzebF&$0?&Cs449*yI8CHH`5`5<jVK0vtuD~`
zwGYU$+DazRCjaFk+MDetMOm7_wSx0_6Ba{2LJDY1=@8u<I=HG!EBEN|0f(r+d2>uV
zR~r_Dy4Fh8aH^Q=%#Y%8-?*dFM@uwXx`Rn)JZ2i5PM}LW%!sn(#J{iS=E^(FW7jln
zTlR#h8WZR3Kv$?si2~g=P0|{jphd5pDXO;6T$%~;;m3vI@z2?&&@<R-nIc@=pNR${
zb$55wvD9xZq>j=eti9h#a|*r7PWKS95;x)R-jVp9Sb{sWoFMUkXVwjOG>Ug;Edym>
z7m$mq-R0QeMa=R?Z-u=RcA-sgdQW^8xu<Lin5<m@K5ZP8pPiXxKKYB5I-`}^Yu4k_
zOx!iSgm@Oj?ae*S<)*7xo#+a+pEJ?Q#Yi;rsA6WScSPmYJ}`aVaI~_|ESYe=8W)v%
zW8F4Wma(LccPz7n?0?h2!XcVD^sUCHk~!$pbt`_H<A#}iN>MU8fO-Q1K~jI9xJ}%Q
zVdM+ed6R%K!8Tm#X9bOoU$}|)2550IhO)gm;BrD;sB>`wWm+!?O3GsIx=%$jA%MBP
z*F>Xx72IJ_5iS}iN86nbnf-w{(q3bQslDRS$3GQ9^N!*m{dip6?G$xj{l_GMtDurH
zMxVYHfmQnqaFoa6qMgn-J}m@fUKd2Qf=c|Uaun`tF~iqq3qZ{$7g|&AinXWJAz32~
zgO*QX&1Ykoa*Zx;Uh9WG&K~HJcR(~W9E+{($%5Wc1?J8=3TAbEFxn^wi<D;}W5Fq|
zF1bLNy)rP~90if*sWDkM6{F82;)l3O<{Uc&ybXV_D-I`d>Dd5G8J&+Ctb@U^iE>=A
ziNzZ0PqQOgtMTqZ7u+-a1O~3m2S?n&EY_MslVK4kP1OYD#GYtX>H>;PxlntSbg>Qh
z_@Gl^&{|N<i$i*_qR*4i;TO#;JA81<Q+HI{y<OtHbTL+#5%=Qd8MGK11ixZj(B+eX
zkZtP=gFan{9dFH{Q0+L=9$UyZn5KZ$y^$c-v}FyF^Ste4H0ruaq5JwkjA<Lonl|KO
z`>s;7C0}c0#7X$JBOV_wScrCk$#8dP9IP0&9(C6DLir<4X4GRclb*5WQDGi<k}^Fm
zkB!9j{~DoV?iA4eNgS>&Te-9@0$PJKXkYOc6KF^B?}I5gBB~r3GM0kj{t~oz*#*t3
zI=SwtaoBsjFSgfRCeM&1$WIsv^X_cKJwsDq+B0HoxYEm99*A2#&!dI81It^u48jH!
zqSR(Pcq9q<_x-K-lz2ReH;3V)zh|Nu5*Ipw{MldHSenN!@-{T`14f~!T<}zg4Gcn;
zUxrXuHWt!8C$kUJCEzJ!G5zXw5Vfa)DbDO>d+0v)JdueKS2ZCrWejzVYC)n8&0($8
zyy3$&(DKwp>5yZfSa}#%-P(r5$qvwLy&l{i)0;D@H{bG)9<Dt`?<5lk2;Cb88WT?o
z8dkRilYY6(cg=L{)yIa|wP(@cWE86WJjd7kBp<>S;<wI!Uh<N5gI%o1M|R`0pw;q|
z%YIc*R{W%(8~0dzWD^0yqUK_zc@fMt63}epG5UI!z-EVRaK7sX(>qsS$JZk;x1G9_
zmY0ZT)(-^Pf$pMi_H{U;Ih1nO>ah8GDzSpC*oug7bSX{;tAAtB{`*ZHK7BNLzUc`?
zHRQ*gJB2$6kC@b|Nzi&03tA7oX;=M(MNKT>lg`9J%<6jPtoxh0q$G1gnteR0_rZ)g
z%dw%~5B8^j22NXe0;f(m1J8S0#z{BRsOKvTwyg_;FUbqgr~4(4PaGi(JUt#ewM2e>
zl`AS;+{AaJtt!l}T6(Wr&Oa^m2My^ikcZW>3B{{0BvP9?Z~E|{m(JkwKovjUABP_O
z7eo4m9310D9VL5-Bd%`?F0%j<ddFe*yDZFJ?hB0)RmiO?M%S(i*pZNj{a2o*&Y9Dg
z(RLX^`(Hzq$uR(Tk=Vn=!Zwc#7}G>L?5TFH8&@To_?&_IHF?~AJO`(tXQAmEy*K_@
zFASmnJtNI>G;_HsTHBU_tZA`mGUx)kcQ6+fAD)Q^pP#@jd&c7I*lpOn`vN)!ePAoU
zgy8>rB#cd>Kvw2p*>-;&#3-*)4ss8uzq`(3ZY04N&j4&0auTh@u9&y?D|4EY4o#IJ
z#z*%d&1x;wg`7qG{33MeR{=9<F3M0@&75r#S)fT4BnD=KPxl<q53T~oy$_h}m;R_X
zjI#M-CzFSAt57+&nd|44FvXfQp*Wo0K>O;2Z1a<>{)8FnZN1>E`Vf3GmsrP6Dy%uV
zH`==&fzGP)%yIh%)@-W|)#T?IyZ;g*u7Kc>Bs~67P7ES{G@G^^(oP4k&X1MM_lyi{
zWy#ocawckw3le1mwwL5)hCyYa8jh$wi5J?;u&m)C^{zR=uFcfV;kBO!JD1V<@JuZ1
z-y5a`h<N0EJUU$=z2@OOu({k1YEG}@H_Pm?)g?%@)r`ldUgU3l*N4fSdUELmC)Ro>
z7xu5Fu9*CX4ErC0l}5+$QcE%#wrb%L%5M5vOh&C6hEOT`^R-=RN0|DF$*L9#kz3Ay
zd(B1I7I+e6{Ti5V_Xt)Sk&H$?o^#hNf3toLS8;<{1$w`+L$yc#P&F|G2h|aaOf?l#
z(&O>P<zZMs{!IOM>Nu?91ZrG~K%XB0;9;K$vi;@E!!L#1Jv|sh-DR-veGF<TuCvyF
z6j38*8SkFxjm7QFlpRw;>CQ>ui^QewI+Vx%Cxs?O7;tOq#R@gT;r6p}%>GFz+N+^s
z>rY-htUt&?oMF(%T!=}X4ENs_VMy9!aG5ko7@N8frRz462C9JcTa)O|zh+{(2)Jq!
zXsA`dFZ#NR?r9(u#_?TDK-m&!c()}0Lo^E5mv4da?Y#~T^;Ez;*Xy{kGYQky+tHmf
z5JKnC9!AX#HFmxctmdy}xg+9GRW=zF>;Dqu2hRb)#BtT7OQ`!rAHEYe>3?jnpf^Df
zGM0Euy;GRZzkl=5!&YIM)&U6riA+0Kz%Lg<v3c`wVu@_WtVQ7{ZF?eC{-uRi7TaTo
z^$d8Idy2H1Cb6NGL&gwK(254ADIo}(UNq`C$U*nkZ}Ed{7-YBYA}`GveuwUQ&DJEm
zJ$fhc>4@X-_!sLOf1C{{jlfoMK8RCCqx-$n;OY}aIfZ|je${>EzR(IAzwUvM{}hl|
zn2yVT>!Zt>?V_#0Tw*&v<__V-ylq)FcaWDr@+I=y=}Wn+NgLv;FM#sg5uS60*g03i
zz=-=pv&{$IqB0P&{E2nx62*Kc6tlJ}QwSO10xn;!!q$%`P=)k6d#g<-S(0xt<3t1;
zwuqu$PGj&N5JdgDIhM9hEqS;}chFsU2vqk(vQEPyR>%^-ZK)a8tt%lPjkkFE^eJ@H
z?#ki@^v5Y~xmaiC$y&b|@Q`QYA*T2$)Z7SUijPBB$IadBe;tK2Nsn3Mu|!m@KM%gj
zRLTV&Va4|QKsj-&&|>Ta9!7!WQ|g7TcT-r0st*J{wnF7yGcb!0SmXY;Tv^~EHl3{k
z=Sx|<v)l#DtBScN+46mKO2K*K74}HW1s^&OL3<M)csoQBH=Fdp*X=r#dA<y;CYxap
z<@_?o$>3Pq0W?aVM7a&hA1UN)y#7Uu-+GoCEeiw1vn&{uW`$<`?(-ky<(RRu3+9a;
zjhYj)!Rmj^bLlxO`}-(JSM33%u@!W@-HP7Ui}B_D0`$<E3yVIGZkRX)(xjAe2@Quh
zkM&sjo%+YFJHe(=W@tt198K?Ym=t~vwX;`&%iJ@l)Bh1~s3^sjy47&|?`ar~i7<4Q
z9eS^v43qYz<A|5hpi#3Jg12OV@^}==EBcBaiz4|W7t%Ig&IOwaSL)G}3k?~xcafHG
zmHR`9E7KhsD@b#G*%QL``h!(Va7kJpYmD#fNBv@<+}z&*^lQ?vC7ZI}ul(RduS_(t
z?uk1r62R<pIoz0Ag*GwNX*|dX6uYU*Xbgh(@(idyo&nbFY53YQ6B?w$xQ1)HIODk-
zeh;U2`n|8joXit6#&?JT8?S(yVgs~VQqFb81-^92c6=zf<H{;}OI}iE0R}3dofCln
zU8C=D@EYO`^n{)tMpFKt{7H$!#h0BPcuxB;;z)lMN*Tnun{EuA8bj&$bLhNbDm?8@
z+E>}YlHb4R-%&H<aoe|Hdxt;D$63Jd(V-ABXBd2$Pucx{9`YG0Tu~CDfhx21fV}4n
z!Tb3`Ht$m~su$d1@`>i)tR7D+Tx45T`(wuL;kbUp7!356U`A{t^?N<ycYDWRive}e
zShX-?{{&DzeJd*B<Hf+P7oo1L7j|?JxTkF{t1az^A3NN!CGtI!3EgQA;|q$(qqyT?
zAD($R5zS*#LAP!TXrE!+{aPs~@{IZMrAzT{KmhS7E?b6=$>Sk8C3rY@IrjUQguVvE
zTs;%Y?A0Ww%=KcbV}3Hz+_h-+2B28u5XuHkw`|;*1Oe}&vDRM|>{n`G>rg+QKPixs
zV`IU253vU?uS1VbA6Z6Y7_Z*$fvv=KDze;*9|kXnyvN^|OFtRYePzQmZcm0W?<nW?
z>nzV%wh}uO)X!39MLCxq80|X<6+3_Ox*uD?I(`wx-6w|O)%T*@xj&yUYZ=OKbqb9E
z-LZ8Iam|bRqWZD7TxYlpBq6f|*9Xbqus@E?>ZD(vHUgC6bOfD$kH9YL0<5Jx;jPUY
z#DKfUB=>{F;;0&~C_RWTe)&PsxGHSk)))31FMz9!M^G_}_QQ{}m}Yw{%5M&U-<yx&
z?=vD6r*DVOfH`<&VlHM(KF5B05eGiy5Np0_1}~)X5cAWLopTt9{<*Q#qo)NG2g1Q5
zv4i^tPY1=vA||y-6Mn6|h%#qK>av>7J?blkf3@7v`PX9D;iLyOn`gjWvqCV>3}@d=
zt`N(^5L?oxvxHan^xjM<F>$<v+F`1oI;s%#4SZmA>?N$(<-vVd6oFgVced<S1(x{`
zFL9_G`~RjK@pnD&8qIJ|Zyl74i?du=F$vEXmO|Q=5N@R{Ldnpr=o~x+9K!`Bb?C!o
z^D2b*jk9PTo`m7r)nGhF4OO3b1K3r7PwPeM?76_kyst!%K4kg}=fK$4<Y%7J9ix1=
zVhpj6TUD|J?U}1!&hxGG^WtL};~l^nZxH`aPk|GB+|l-$J@fJlfwm%gPaZqRY=hjP
zO|k=wvPz+Y`c~^qECKyiLeTuTylav*HtYH^qbteaEuG3hD}X0eZzCqu0m@Pkqfxer
z{w!rb$n#ORyH?D<7lQIVgT>|`Jy7p;9F#R1(Dd3wOw>LBdCPOe`X0xjhi3q|_sYe9
zRhL1XJ>*G#0;(*%&!eYag_}WTs8FoqS?dnso(%HhUdY9#ifZ`u&m~Y+wD7+AM=>V9
z8x~*r#CBO}Vz(Y)=<q{?g`0`BvSI=>FFl7Ho2g$??J7n#7K2MqHL)vw|Iz;|Z+ZEV
z$%d4G$;LJw^f8{_H%o`?qxl%WwvZ_!MnUGwSvX!L1k;wR0`2{oEIS>!eRezsIYq;q
zepMJR=?_K|chI?e$1;46oCyarAVU2(S{x0*!n5(r+j123XuTFPo(=<}nS0P+FzFnH
z4)hsV1&Y3ZS$ZGzVHPnfp>@t@)}mnw0Uor2go*I1AsYAPSK;z|V=*VbA6hj2;MF?R
z{k(!^@b3qRCAl0m4xVJXrrlBH^A)V!a|zo2B!lbr?O^`i06!RX#lCh|z--KFZj|<(
z+a5a#wSyyIr(P}|j~t5@N32<${Rx;m;4sP#oM888e=eCwx!j?q@F=7J`o3R{L6w0R
z@ZcH`R9uCkqB5-bbq4<QKx{py%`%b>Fo$QE+#~s$;GE=$fjtJH#*74T=^DX|7B2<k
zog;Dhjs(2;rw?|kPQ(~9HE153kMb?I1&202Fx-9trFVeGXxo7QjLWcvpTW*=YQ+2r
zW@gH<pgG-(ykh?1i|)jD{}18a6A60HNM`}GGp+sSBClWD18t>RsQce{ZXT5cjcdZV
zuCR{<xo-y-F%e91b2$aR;8S%Tywj(g(XnPO(|^MiXI^m0yT+34Q`caS%K(6#rf5U^
zst>2+{Nymw!F%_E>~U9lYxs3ob@d`P%$HKHS`dUkW!#4JjOyMB*bqqk55))wATEZ|
z*G1T$-52}((!mWgufp2QSZH5b30ATVLiU&IEI1|#6(8eDWceW_QI750zh5MH|M7wm
zvLKkcCKWxb?f8y01HgM}4ptsJ0{soHqhD$W*4+vg1YOd&jz8zt0edm{#C5PD-@$<t
z+I<<8awUv{{G%>t97ero!%yOe=pwMFB;Ubv2k@}8=3oCBk1jgb#X-}r;U3FrSRb;9
z?)yCS9c~0MLk!_Wd<-^}bg-VUH{b}3Jn-ng1$9;KvV@XVD64xT!m5L)YU%{Ofm&dD
zyM=Z9e!-Sx_+ftK87xdm1i$iVlw4aY_T6WM(pyRvX&r*DeQxl7ckRTQ2OpTKGK3Y*
zL3DRM4S#LVK%GA_-aI6kdp|K{8Y8|6Mq4M+o}IipL(W0G&rsBl?F!a)92;*>15fA4
zbk>%Fp|dxR{*Z)w8>`Ue)mxz>s~Qz2p0bQxBOoaH9gCqiSJ{FY#6jG}l^a*U=M4j}
z$CUFBGtB~I-&=TpyYc9nycHV85jS94Ij9bFf{Z@4%*c{B67G?xOYaVQ=Ux~;))iad
zyF=RjOjh|i1Ezc<UgG8BT=`K;FsY4Y9qFrh_LBrAx9!K(hbA%oQ+A*`<{j@ixE<Ei
z6yc4J8__B*nR{L~hu4pDAnmXrx`c<|S&Kk)JZQuv2CXFxgTFEDJ}1ELSTHJQQ@@Q#
zCv&uyK}fqF+FF)E&65?(d0_?`?MY{nL$e@Y(ibjkp>VKx4BbXo;y`N|He2k4Ew{_i
z;a@fKGKPWbK2NZ>Dg>+0p@QPHLX@iJFc-5GqLX|O^>Yk^@Z`C$O?4&uruKjrrDuuF
zJR63-RZx$h8}(3T!q2UjAtY@Y?N5WSQ%w!D4qpJ+X95mIUSNK%nEmyQ-YFA8(L2tC
z*WcR%I&Y3~`A9=vyz~$+FW-;q>7_heO7mAlEB`DF!}@VIxR$yW9MW*X@U^?Rtj3f3
z^E@1P;Q;DBq~5ZNi72bu3mVINVxHDyUe`Ji@}N5m?N&zYE6O~O$Mq8;xLq;fEjC%K
z;c)<unUjx&sYkJ8{~BI)=sFzV>VYkue>03Ipw7g<dBaFwX8Y<i)173)vm<VDmqE9<
z^R-{R`5Nh;hF8F-cq!<{_QE%f`%sbnRcsr76qD|n;E0*Dze$}4b++|_!@Ou{ogX7^
zpH_kvvp1vcoq^?3%agG64{5I+dxTN5gHZKm3zv8-Eyj}HT=(`8@cq;u1D+j%&3;MH
z=1*CMlX9rZTnsAr)j@855xf`CO!DRsb6b?kL)=nWYl8*A_*3BUwt)Ojb3s+>0E_y3
znC8M>*!$v6ilm<fZNFdK;MP@?#CZzlGdF^HLkBN>T8YZ90+;wdv#g6Crh0Ezo;HT|
z1Orz<>)G*8eXb80X)R{@gOfq>)T*Rwb2mKFb0|t=8>f!HcM3z&!_lnZJb#@N4UOjq
zLxAiQXrDL+j$;jBTgL^^k>6t3Q;x!<Ka(lnxvnJmQZ;T0a>bb2eVKcFAx@mw4{eiO
z*o9RZD4D$iH@KFd{)}y){bxJ4b!+5G?bBT27qF%|WvIJbhuLm&ft1BjfWt|DiVrUF
zzre7j*%K1yB|=2V8GNzyDt5h}hjY79=00^7jD9&6KkT>)7JGf+QN(qSZ1^L(hR*^k
zy%fRd&nGrnk64*Afc;%J2Cw$GgddcS7`jBjhPN~LlOZ8EGh`{oyR3oDkMmIYT>;OO
zBT=6i+2-YCQ2izob;k|H@1w%e#LgPV8_OYhXEDkoIplZR2@=(@CGwsUsI3p<njUGW
zoBM{vt*4woZIBQXIh#oaSVK{QJGPzv!(SZ8foG@lF=%UdF1z$jv_07avq^WH`#Xr(
zd*#@gMjTqR@T+5Xq@s0oDMY7|p53}jsN}O@dIFu9HZ#EIh78~JIgXAq$PYAJm+PM2
zFIpJoLT$l1FdJMh%-GQf=WR1U*?@Jz;h-`&@aP(GkpE)!!AGF+a3-8h5wWIS3TCZW
z#L7EGSbepCJn)0y-UVywTJeUQOK}(#4j|RJ4)LcH+=RaGbGH;gn;~&4*IGd3<ydZI
zcmP})PqCqcE~50qcwYSKDr7?#G(8!I0XpP6@7D)ke~lwHXCc#GwV6x0d=nc+#_^)a
zaA<u#5!xoLhL|!PK4MH3+|>&Or#+PabbP@srbVKPejqlg<Z=07%4~Jp3$BO$aC@7(
zTwxr~bC%HElx@r=R9Il+#XsC3c`z22ce3ic2Qbhy22_Lh!OoHkXi-qk>h=#~?+!$h
z#+5_!VlpVk>rj`}axULsC3^Qf3m*TBhLF%gkR3DN{(3QBq+$zJLt-JqeI~v;oCp6J
z`=Melty6c~L5<NIKB<2`s<v!E$BTZTcO?;Ot1iIpFP7N$YZH_e=R=3-B9N%;#>2zb
zVcDZBVq2YIquOlHA$kN@oi@T;AL`8fvxN1S!BF4QnEHL9pl;D~Znee%Tz-qpas5pG
z#eh1G1}1`$^JVIN@&?)VGfeGZ9Jq}=%I!~61}K@lkGF?`n=73k_5EQ@lr81&By7)Z
z1)LsHf-++nGiE)}HTNEqb<7kOpYy`kUFj@*+6P{yTttmNnjoKZni*+D;n2S0vH8SO
zFk2W5((h@Ae_v&`Yfo_Rk)y%u(G?8x&|w*=#~~xf3B2+PVa$rdxS=)!G#0iHU%Ubf
z1LBw>zMc8@CC-QUh2o;Olt;|(<kI22*_X5ete?LZ`UYHt9aa(S{B9BDkxTdsx2s@A
z%)?F3NK2j|@Y(U%7&nj3jm${M&gun|R&sPG9#1n~E4K|O#iQNwanZReP_ukAc3hkY
zGLLrg`Oj$V@chk}F1rlco`ImemL*CT>w>mgHTJJ{!B4hJ&>}pS`ao_ng*UgXd8`Uc
z+y$HzMd#}w3-Il^5juR*`Mc4yr%wJK=jA`9pu02Nr8mrdb&Y(}W8tZJCUpiI2xeZ#
z+3426i1SHD?iGWt`{YBwvqV-o*&llSE=J|R1To-fBp$x6f~}Wh#j;CBXt!O%YHv`F
z(x!zRcc+3!UN*Xa3&N&~T=W>E0f{bQ5R#XHHAY1sS-soRo;uEDpG&#a#sZ=Yk2B}!
zcw$}(T>D}S>K--_CFA3YEgs95+3<SS`sOXy)jbT=Nou&FOD?{itc{vxXVKA2m3jQP
zpS9kgto;o&ODSbW3V-FXxvz4N50s&jb>WF7nW)tEfwc$I@lo+$R8~A^7RCB3ZFoO0
zpF;h-0|LM?B$s!5o66cW7DCjCUhrvO8LT>)g{>0m*3!Dc@&>Gh`YFUd3w$AD4pzsN
z$F!+~`T+0mWrfyF>F})54mA?~u*RYNU}u64c8QI|K;b05rd(j(1#(bTmNK)XZJ;o5
zVH!Vd$geeNnx6X{T-Kb6EgwSA+AR|84XyFXW%74l_XhK7FO;a)TlPL+gLCt$L9@&c
ztk_L)(BN(O;z1Q;$7q1`)jRS+RI*}LhV#RgP`0EOrY*FBD4Oq#zXyWqP+}uZ-!8P=
zt!Ik7+LG1;cTg0jTh>Lb;!}bQaN^bobUjy!l93aH)^puK(K8COk3{mBA=TKSX2~{g
z(nY2vNBwWs(2(lKn;MJIyyPNtco~O{UwY!cfs+x)x0Tqx0&Bi3Vd@6RGBFfGRzBxi
zp#iM+tqva0B+qo=b)nRexb9UP46~fkvHJ>;#qJi(e*P!M*F7M9*hNgUwg;X6=E9Q4
zq~(vw6Kb1Op=RDxo^fM2EFGwc_3u(}$+sd@9y$vhP9^wf^&<SXocM8$5hWh#KHy?^
zUR)b%k1Cp*(EL*#F;)_wE|u~re@=sGS{8<1oWjDt=LountHiFk+3<T#KKPEg%O$Hz
z1m`7c(E1`46f>FxlaiHie$g^?{KZgmXRzgmbPX^$cZVswbcJ=(uA@fJK*6!KgS~9K
zh}Eej*g7o>dN0yLsgyXK6SDZ;&JCzg@c@q?DYLp|j2pd9Vh<w@HV&aYV{aK$){-ul
zHc*ISU5LH#u6P^Gr!D?rq$z0f48P61(taU$FTcw#{|Uo}jib2oL5OJaaWvmip2|F*
z9>o{K&C%9?_<TS0+1>C&*#BTY)|T!B>kED`Hia@+o(9mbn)<jxi3xGJ0Ce;2h-*y)
z5Nl6x+o#u{edckvr0<I=(n4l6Z<DCJbc!X2yV2xVBC9tFK>MktFg}a2cnkXB|N87@
z7m>B=yWyzM<515d5q>ukx5(C4aMe=5I;Y>l$g;g?ew}g)Ju=XExg0e|$}nhcFcTZ(
z*z_X<0~XU;a?~>DcXm6L>&(NJq3P)H<AW$Y62Oeo@3BWmHeg0KE$Ej?489A2;CnI^
zY;-D^#{Gq?sJjU7NGo!03Wl16u`Dm<G%x;g1a#f>MM<)X;MV>Rlc!sVwtY|YxnKI?
zx<L!EYri~z*4xa?>64gdNbFnPeh|JU1X}AZpweifpuLgypV_xq{mq#G!Yb&|GaKs)
zG?@B+Wa`_ZnX+`;|MwplzdAuZ7JXR1Z(5k!auPz44`RnJ>TF-Fz#4thtRsF2R=W#O
z)j=K9f@07o|0rm#r5%DrFLA`Ibg-rQSa+A5P}g1{9-ML%C4avY>Mk3zBWp6Tc9#t-
zJ#UJtDz2cl{16nn-(ix^8!bOqmJ`!IN~pa(pMF0TdM>CW?a3D8hDMM-K^1$e=wtDz
zfzU?2ewDOXR8cO3hQ;gH-L+@H>1ZL=zB<A3WWAy09tX|C;b6rhz;^je&<MLGI%k#O
z_yw-Wd(w<B>=<aZ?}G3X8QiF87;hiCA9vQBMGsRqKHWDOTXr<Fm#>ebv*UH_SQ3m@
z6Hc)DVHOx<x{>&03*pQ0YfzU&8QPbWiS6qTrl(SI1MSf+9n{AV>b_C*H3pAU)a4bN
z#dLR%fX`O*DH{;XkL+E7I^(Ob#r8bt^qL87|4u{IIZ+sO@day8{g10l*P_|U2SUuM
zpTuyeWXfBY#60OSaGG6?kNaj}P}o&syw>u>XyTJjivmT3I{zI(nU<-W_{IlWcr?@;
z)Bd78|091KaC8KIc}qQg4yVAA7edyPDthA|XAk#PqIabn@>YN68_uL-X9;r2CufVo
z?Z?s9{BJhmcNXMzUy3$eXg9_G;(0$kz@&O51fH0H=EV`@{ohW0n=tA&{=xhj3-FN7
z8I&tE1%H}pT615qD9IrR@>a5h9-Ht@lP$eHR>F+MrSN!IB~}I!gYnB9?ozu%2>EV|
zQD-7hB{YP`YjUjgCYI)`IJmqo4{PT(uu~_?vF_}881Lo}TPc5Q(a)6)|7D08uZDx}
z;nza*>wNB87tcFq*K*~wYqX0wgFeK-ah>Q3t|hczin|6=O>;pj*BaWyMDnVav6|<^
zQ!8BoHEsjA$I!v7zWpU{Tz7)C{Ygfx$)V8d^@asZ8w(rqbD>?HMO?8eEMC@!$KCRS
zpv$C>JiNj>_$cPOfabEp+gP1n68g=Zh;x1ASTo!ba^x1|m7Kz^j5bBx&1FK!y=%~U
zq71^D6++ay9%%7oBfE9zDw-D*F-hq&OOshD@RxZ!M&8+i?eETF#q(l(bV(JJ>ASeb
z!&cEv&w|M_s-W1<hUrYShe0;}sC)f6yFPg-?U7<YG5d!wkGQ)E)%&8f=`c*4zYHsj
ziLW&Y__hTyP+#kfM@AK3v8@SfiBW}Hx6eT7_LHdC_tLU)3bDLYDtYtBE951-4BcX`
z;G(aQSlv;DZh@`L-r*FVb7?KA=6vQJ%PU09ze)G@Hb?oS9$fmLz+0YXK}HWV@J;xK
ze^}KIRZZNPs?#U-bPweVJ*B*3VkAgy_F`hs4Df%gf}Xz;&~uInnk2<R-jjo%zDkO2
zD|SJv>MZyXP>D8L?(oQQ2(i%$EVHk#hIy&cc<|0%^k4fw22Cpbv<n5@ibig`SPD8B
z>p@H00m6eTpi`_%eYP=7mT!TTUcu1Y#R0Ycaz@*k@3^XgAq3fvM<+T{jZA@$zflU}
zpjb@kMqYBYHNw(erO>%P88trsVJZgM_-M8j%6JUD%~#X=FNn?h?1{roFJQw8707Op
zqqOP1aOaT}$8Ylo-Kz|=yQi|tyRYMi+$8WSrP+O(H8vWk!o49$*fhEbR`>=&{SF0O
zyi5F)r4p{uDB-QklTcM}Ge6s~4qHd)fL6~C7PM_FGq-GJvLibLiScFTp*xW0`QGE5
zVdr?^*Y$X^A`2Zt0W2Rop-O@}7`@*BY11d5cmH<wHzyBwxAA;Tzhn5_$rp`AQ@>!2
zFPr^U#EeVhaMIXZ>J6wWSt6sZolZOE*kvAQ!(4>@?OY<OEvB*<Q1-tmC`JcCQC$%X
zIG&AG!~e0AHJX)VEc?WY+s5&4O;<2)#c0q_tl`=J9)`+8gCR8S3VKZG#_|r|5{z67
zq4n@}N)1fL`fp`WcPg5P9QA@wM~-C|XeZ@00Ct`!qnwS5>)THP`@N>H?4~`s2hhH-
z_f|BWy8-D(22z^kFv4CAb{etRSWk2EsX6fCIr%n+DussDi5MPO%G)N4MoGg#(P}<r
z(ih(2xs$HL&Z&`@;c%ailWfHo2mGLPWeP&;75poDJ7(YOhUQL=U>Ib9xAp~qTq5Di
z;`E6T43>3O4y^CO6zGiWPMl~{9zvRnj%Fx$|G30VG|6*l-VN0I%>wnoHSFgze@Gko
znB7uaK$)~~(Ao5iNnhS&W}AMAhcN-qt_nh}Eo;%~Re!WlA<wN!9!L^KlmrhtM_%`X
z_-WBe_`NV1TB>fbJbi&{Zc9dol;P+(WB}<llTf+0S}4*ehao=waYv3Z)Ew!@7ETy~
zg;g8i#p?pfZ%>7In*ZyTd%@JbK~PKYv+VAqO&+Sog-zE``dSL-kA;JCWDs-M-~;);
zqQS_ukx4zfGOJ-#BD|)v>zEZvCW&nE>MG(LbpwS*S7CJ}b<b?cWDj~wrOw(ixcgT*
zw6;sxiviT7bFB+;f^Kk~`^MxAAI}}X^Z=tnQ&5uq+d{WeSNtFIr7~<2sFr_XAJopV
zOz|ibnlDFBR>oHq1)|YL^2V;R;F6wt!mjFEJQ=FM9#49rb$A7I|H}jWuRV>e{yCTt
znSntC4iK~_14^b)#=zw>*ELwhHB7bX9ugU~>_CRzUbg3L56q9xhS8ghX#Z}Eb4r77
z{ey|vVVcI21J4U7oyW1yuy8b&KILh*`V#*{9o(M3<Bts$SY|-yaMLd_ZD%+O-&rko
z{9cH&cU?k<@?U&}b})K8C}iqyWz6B!R&ZL7h>iodGL66d_=d0u=o*_3E|HXPBY?FF
z`OD7)Ct--}HruRJK}o?LxVnhCD3?g->nP_At;fJ@?PI}h#4>O#8xEF_<fxmf0dlKm
zzS@)WUz60)CNdAZY45<=zJ0*#druzqAP~*2$1&Xqf8Lq?oGHuu!S*X**pX+7&0BWU
ztm_B5cMVWu%T3ZLi{ZoVX!J<@NBn&}1MKHaM_FJX@46!m9aHl`6_26mq6&=fqM&ZZ
z_dI5a7bs(Lc;U$H5Th8xEZ+2D9{x9&zE6J$SR;W-gII6@hHg`5aaFCa%-PTvoSNdn
zA+jG>XaqBPs-^IuWjl<q_QBeh61=dPymi;i#M7i3w0N1(&Q>Z)X3c}Jy~+5&c`n?Y
zGZ}lmtR`QH57vx~2c`Q#X0zxW?>wCi|8*d4PK?CD+dJVxEOm+RUJ3SI*SSmbJ?@gX
znRnc*0HG`j)D!$rcgGPJLG`s8b*AEu-~fmpc9l8IMwoE^1X|tC7Hn-482-8h6vtkb
zz^ej?(iw~$xpq)zlFnoiOtiE&rQ8QGOn-Ib<E@WF3*{<)jFnQ)^%CrsYJzTGZqjG}
zFs}c41ijCxV0IBBuM>5!jV%|7xBXx*KI&ok!}ZYWzXIB>Ql_B925R=b<EiQ+Q1)?(
zrSjljVfd~>boLxfY{fF_Vm>6?wi=Ijh)wC5WDC0QR)|xMS)(o8o5dd`U~jMmv#y;-
z=N^H4|A2Vh@oX}T8<b3E9Nl5Vl6l{MGI2vj2sRF@V)1>JK=J7aa6aLVpWen`#`XzZ
zSBKbA7S~~44MXR=7_M%lPH%#BP#CcmH14g0_diUqEbth3y$M9c?Z=jj$6d#|?px`N
zKOFU!?uBAE+Uxi5gpbt$IB9+e+D<$Rcei)Nw33;y@QoKb4=jR=Z>el}G@aQ^i*Xw<
zM0JC=3D9>ESnS*gMh`8R-ow+xqjQ3gfHIhq7=?9TUvP~bH@R%V9npevbDz2<K<MTw
zu<h@UZdt*6!_Q4<;;aw4F-O4lktICs?21R+x8jGOqfGMcdr7ca5%qnqLaS^C+LxWh
z#L-cZtRI7}(Wfx*Y9yF@r_vmCpIdCY4iWT!R_U$*4u9%-_};7Bk<ppmznEq8ybSkS
zD9f>+3U{BMj-Hd3qjZ!D>~h+Oc_SCF%;|EFE}0H_GwiurTM6cE2*A1?BJ3MM9nH(=
z?@@okqkbPofA8M-_KGQf-m8Z_b@!lTNH@Wz;{~hK$OOI17tniQ9_#N^iblZ~0sm^|
zo(kem;u@&i5sQ0*5a)dLB!+Dv#MBRA`gPZ#-XR;qqa5)3!I7x==cC|1U;uGA@<FkK
zv6QY(sP4TJGXIVNt35w0Tj}q4?=S;<>s-U1UJ}&3bV^LqpzPKD5vXZ*4U}vX^9Y&G
z`u-D!71NG`*ZVjOu%O+`*C9~9QI21>B%|`_RZtB1!LR#}XYXY@cl6zeuD9ob?5_oa
z<h5h*!#5%L!7~%I?GHgK>8?-pD_~F)LsoPaJO4}p`Fc@UtyO}RJ>u~{PZ68lBlyaN
z@o3>%&vk=8GFOLl;I%as!e`{uey{?H=iK75*bK|}I@57bb2_*kyTZP0JcS?JGvPG#
zV&=Vk!S*~X2ZwLQxUwK0wel6PO>!2~W?!Vt=s!ZI(`m5UcZ^5xJBp9X4RD$)74K_Z
zz?6~uP|FJO=jJ^8SRanz_p;dR#%1`Ze>{5T=)%|Isc1i21=h?j#!XS=GPkErr<=VI
z44l9!T_G-eB}IJ;7xbu7!^aahpoxDJPms(<os|sg2AtyIUW@pc14dZ$;vAPI*9+#4
zi}}yy7)aY!$u0hy0iR@5@aXh<Y=3kXTS#BZD-RWFl6EjN%{Z~e*?<WPasmDYwCzc~
z7s3S4{(Ox+TRahWm;{1~QUhPRrqI5jMu;)a=Ifsy#KMNI&@bjBCM_C@8G(tQ*pts|
zWTPO;<0`LP*q?ma5;PNsW7{)1mzZU;oTb)SyWj-nPrUiC$_VTsoB=n}1Nbg17Ifd{
zGkB&C)5&*s^{gq%OU8*aL#CkOuM)0X{)HW~C`Mbi04@t!Cpe8z$K6vlq37rT(2BXw
z`mOiDxVzC9GAIdBoQ^^Ep%rkMW-Mi&uFSdQK685_a^2GbV$TWls5kc~>vQuQ^>~hE
z|3}fe_{F%rZF~@tWGE!bAS59~dDe|0q#cqZ2_Z>BND5_iU^-x`kxFVhkEF9q&$?wg
zY7;^d+JunwvqNYT-u3<ieVUo)S%>SsuJ3i6=?4DYZ%Aub9IA|KNpF@lSiE^ap3jbf
zZCb4R^2GnB%MUS*c0b0tQLaIWt`K^XHxj!K$<%a87z(OvNYuVc_MR&t>2nR(ON_|p
zm08fb=`|-G=|z+C%aJUyLV-ns(z(A8ZMN?Vlb9yxJJKF3zU~6oZx*0R+XAj12z1-*
z2I1yZPV*kid#?MNc>QOKCYnYtjm=8~7v`d;l5ra)%b~z;BG`v#Lg>ij(8Y2M1+Ht!
zU9V_3ZxhLQ22y-J=_KOaB2YA)g8!Mzapd(`xWM)TejB5NXFuIh@?kms{78(_iF2ua
z`dRLNG2>xeJqnU#j+nTB`5<yw&vR1<26W}4xcnm#ru;{bD8ezPxf0hjm!LEtmlqg2
zD)l!H0jKvTpjP7~G;d_TgJ>0AcTdF^EllSPWSmzp<7zuM<3~rvKi7=FYFRMJ7@b3O
z@1DV=3O9)V;EKY#2B1<8o<Cr1D2QAZ0@S>r27`0K?om1ZQ?wZa!y_1XaxSqnw}QGw
zALx3EEm)_w9C8ky0D}evOzIepvQ-;U5OI)8dbt*YPUXX&;TKW$V*+W5)B(f7_3SLg
zvRZS(@L%l!Y?){VdUj>dRifhSHiVP6#S%0VW)jwQ#CSfLte-ZXI`1)IdZ-y16}S?4
z&mcV3zYwLqoxIJf26D9Z9OKWO<qIqrgKvTd^q6l2b*%;1bJYRndaS@FHx6TO-yXWB
zf!#+KB<A;WZPrgW2{cEapmN4^h_Y~l-^`2FS9l#pc2uDteE<9;&!KQ*j0*it&w^B^
zookC7h~f!7{L`7kv3GVbV=fhtvpPkLZTy1zYe!=T$1=59D%x^%2a59=h_GM)xIVp4
zB!-M@k-QyUHT>b);C1NlJpk?{MPp&|R_vT&0Nom_cjx&RS^$e^&w|xZBnX9NX~n?3
z$wuRsIiSP%`iGaYY+3U)`0Uyj`+dtsx1)0*aSxlxt<(jv;SxS~Pd|L~bvHV>#X|f1
z*&r*kfSjpm%-j8pcU-XtZ2A^}Lf8+Q-R9C`r$f-jaWU8#lz{IzF-5Bz@HQj{a;O~&
zFE1wOMN!&+9LvqNa84QV@MB0SILo$@w+hCsX8HSLhgoJiFdjSiCy}1Pm8^5}Clybe
zN@Y_Q!-M!tG&zxpnoS2Faa%e{k6tAYYYI^``vn!*Z{nq$T9BK<n0Wmi!G8TY#`|NL
z7Ml#PIQN!z%~!x7{Qz`!2_~fj<e1jVn3>s=(X`JtkO+rC&zv4=w&xDnP<<RN-fGg8
zl#67@@K`k2C<2{WO5=XBGr*Xmw50G9DW8`Bb?<`c<mc7U@9HVM80?BE1DR&@bu;Z=
z*&iIk)`0654qpw)K(T)r*n6#mo)6negJK-Iu06``2l{v{T0zH@<f2$(Jzs!DeDk`)
zw4JT@TbAy`Cu^e6vUepP<SWJoQc8Vq7J;LZea1y&IHUUWByL?MsIE3~Zj%myZN^n-
z-?al=i8ci9Vm$bB&ZzT-Kx;ouH0^mn+pO4m=-URyPuT*wPh)9u$VSxIo(HQErO+ak
z5ktGX^mm_V*buZB_r6So$*t$n<{nSQwkC|5p#e4fD$r7Poj>ND3?;1Vyv?E(e5?`(
z&SiOpICJcCI}e|mP#D@3h0AIg_F!WvjIu39&GA;yv*|8z^E!-z4t<NZhx>8vw;k9O
z)xfnEB(mICB565!m*|h2hAmUAV0b|$W^5Y<y6d|Lq)mX(J1Q!8YRuV3%J{`)=^(to
z?zsnFb1u#|pzhIani|LYg%9nAjM1Z5j(q^EvFBO8{b=H%ei@DYGoWdcHuk?}j7JQv
zgOBDydTpl@R+(%@M?MFfd&AL3R?KzOUB{%30q}PBK$Kh%P^g;<vf*RF=Iu-(40QsF
zD_)>HxeDUF)xn7MovUpe4!w8E$enHxe%^czVM{H*%7ZAr&(0~99Ypov{`}Q_Y;enS
zC%okw#@O}7V0xQS`!O@YqGADkI%OfMgyH-Hhj0*dnbPk6%7G6!jqUaU<eTC;7-t;B
zj^&rY=Sc+bY~4i5WgaM){;{mXV=?}lTZ1KY{cysh>-a{RgmUj`+=0LwkTjqI&F-}j
z+u&5F>)8kyQPEHz?g`5mva_>!EJ#NTfaQHAW0vtb9RK|Ux)M{^Hmw-D$^ZArD_D2y
zXA<DZ?#?yLztg;d<y>z7E0Y5EZ{FaZF%1efjHRu=>bV!M&9P_P3{dQ<W$e8(Xyy?L
zh8vt<39rPOXis!+48u>$Sbtc3I7kam@g}23!Y@na^UAHFo-e(@Oe&=Vi^8$aX)ozb
zDgZ~@xzPPG5|+L>z&d+Q;@+*AsJ8M7y6;%T@?`yCe#<G05G&Cza0JNgf>2lY2Hr2<
zg6&aj;O8&Kt||RVGU6Faf72fL6fy^s4_}7%ll?*M69>)B6m&Oe5q{Z4^gI{~@mDQr
zUMvsN>>XfPbGA$+4(IPir$OS)7%XyPOv<sWJHyx+%+4=`pqN;YxdcP!&Rmw`meE|-
z%V7Gjlt`DpCCBDDqZky7^<>FB=vCk*Ukaw8OkRFJ0hSFcLU_3e>t^_YVJ`DO2<8%3
zrs)+rv+s8GaWt21#8_KLY*A&BIChV__F*;3UYrES1_!V%e!$-CZFIb77K(HYXzuey
zqM>z)<%P$V3HZSfdOj9yuKtgh9qJDaYki5%qd}l?MiWI3_VD7s`&4tII;@IOL${zN
z{(wKz0nJoUTxf{hV>_tjr=ehatAe}ov>fZQf-yy)zz=n0_;H*BbZRwG_^OxK9V&t{
z@eMR)`})Pf3FsP-2S&~+82Noap1kUh%~?ZX+XR;5cPi#Ok8XiZb!TXqG!`J*9__A|
z!3gV9Xn%eiIiMzGoY!)iD_@2d4SpcnIFZ-!Vhr1LQ$Vi%g15+iLo9cWp@R4e^CiKP
ziO!#NsMgv6vWVxj-ZhrK&bGyS+r_BZWrXc_ccP78KG^#tlXyMG$TqBks*l>JGIyZ<
z-z}l`Wid2V#*n~)m+?o93G4P#U|ek+$Zk`__}%b*R0zr)6x`iA>)7`(kV}s-MHM~B
z_l8KR|1x{bP_enK)f$+$H5u$nU(hkCV)Qj$26cO?Kyzt0mGo|df?^|@<K>9bjz3(~
zh&fPn^CUFfF(-=)&x29xa1i*aD}6V{QTz9siQos*A*!F!UVa!zA5X)!9m^3-)<VJ9
zY|`_C@n2ISvHGTr-rlqUyEbYv9Y&3|9bW+r&KJ0S78d9|iS<Ej<Uy4Ei1^#t)2#Ax
z6bRKVL<blL>7FC;EItn|-K-CyNkJ`I2<!8^O@^B&LErp7&39#c&(xJ5er*9)_j{p+
zbP5JNVf}4ebFksZAXv0-2j+gOq{{V_cI!=H_tA5xk(L5JZ;Cm0w>Q*dI^%!*+6(pL
zk#=?O0-kjd^ean4`z4=v-3ghrC#Q!Vd#nc4VRCxmT@AiBQK3MVVm^LZH2$(Lz})&=
z5J!}9&o|{l#)e=_vSHrgwJa-f<TSsK%?JhUOIU94Dw^?357MaO?4s1cvF}Y<+7XJ*
z!eZ*=kOqU?uHlrRQ)qO7`BpX$hW)~F+*NcDP4Xr}uF)#6xi*32^|WwJUK$P^R*JG-
ziUQ}E;MX+_kM-LMuD>r6x3HzOKB$nmeqlXI>h56rF@c8K%ZTVw2DaJl1}~FRaLdvF
zn}{>ex#TtV6dWL~-a_=DiOhSpjMOb3gIuHpMs^|^Oy{w{V=-|bYK|+c?D64JU-ZGl
zB<YO|?iuaD$#IFyPiA~gYvd$6e_Mi$QWbQ0Aa~484@w47s(M;WUY$w@WuFg}j2?@s
zykuHgk_&E)nRKprJGMt^ff>{BJ>KucR^J@{$yzOxo^v9dYxY4q)6?MgEuvFcNG!KB
zbL*CvqVZ<-Om+NAKmVPDpSEts4U@uAv6gYbkN?FoDF;|T9Zzh&A0+Lk`-1<YzmN*k
z@M(1-HU=lM9-CjBV#;G$xBMJ+Tt6CYtD><dv=1D(eHPI8DoO<>XvxeCpuX9Ph$lWH
z(%FVk+L-}f_j2LDizIYSGXbByZ+XwF^;9@bo0zRnVm;GKK=ew3{~UD^g|oJS{h}fe
zcp<vSzM`rT{UIQ11sY}LL#KQPD1%iHs+Y(z3L#Xu{T1h3vjdvr#(`go1iU{ohDPvJ
z&a&<kCwM)bbKem~qIxw!ecTY@)47=%d<aJ`8S7e}Iuy&bv(Pd?lX?3Yi+k5@jQaA4
zigui!ADf1v<(5>e4w!?B_(Y5_J_Baol~mBPpXiv1X{gH{P}`gWU75L9Eh;3<2|2Vm
zbp&K|SD-NN8R_(iqJO_TipSDaWX<MH7@snl2;Nmtqpj+s#dAFLtv<_oILA@zFGV1J
z*~srWegc2jCa`myBP69X6CWYVZJM9ty8`35ruXN-qWueb?=MEXxq~3?xC~8GFY`Xy
zUgW3N2yC#r%6XbFPMd}fG#lAK{p2{v3r|GRp@Fp9!It?Uhod5ebr&4(;M)Ro=$OBk
zqQB)~@<5Z_wenO@BIQ7Ee;KqX<QOnVh4zOp5wYbGa9vbGa<$FC{Pi&Gsg{vrBZG-b
z>Br?=VtIq8NZP?T#1jrD!ppieG?^mB6&Z7|&BYqzZ^E!@un0v)8gMIP0{-rfhUeAn
zj&;5aJ#SWn|Im17f6+;0+e2_w+IrNfw!nyVIgDJ81+#Xt%=I$XJ^$5_3*JQ`Gu9jb
zBMP*i(?GOt1cT43Na)_M32Zkozna@-PNcVpYfhg^R2x#jF=Z5c58o1t0Ba&2dlKr$
zc%kItVc<JMVf1DrlrF3VvnyIC!mId}v1gpWj0dOdg;=oa68s5Dzyfn6FXrx$R=rFv
zl<7IrSC2V+W5$|V)I)_A%)qiEk2gLUfkG1%{T;!$zHet^r_C8QL-N4fMRC-+B803e
zEJC}(q2%h1xmeQML_)1Iv3j<IdQaH^k|8Gak7*6|?P3i7(I%`+u9LQy@4~3^pVWVs
zClT9EftVtuO&@M1z6YelX0jznTv!e+P6!t^v7U$fNnrfH2(UKX2f{{kqMZ4V%$mF!
zt&Rqw;msG+UF#x>u3hJY1|`CmOTkRnl%k-^$HHd!5HbJ)v1LaEF3Xa`(NXJhjF%IN
zZq;)?l~>SuMJd|N9|rb!#)1i)M~`<)QF(nls5w<(tGt^eJ$yu-KTT%Z*AH5~jtA8X
z<T@Jjp{K782(_0$fgpny#`{8EeKh(kJI}fTt5_%IT67;9NjLV-LfMcg(!1&eo?p5K
zR}Y?x-!0;ik6MIwl|?MqaEAF^zf%6F0Y0!}-DtbE;H%6E(A~F=obi^S+4T&%uX#So
zFMlQHzeS=&68rmSHQ_<uIcO7fmzIoDkc@FD5XMAsi~T9|XN?QGwwj~ig!R~*+aDy~
zMu27KWX_~z7=-NKkESxloSD>0>;HO0S{L&mu-RemsGJ7vIv0VTmIkB0TB6~jDX{m~
zHOSc<3-v8M)OzIrHrM%^vl(>`yw;YYjrSemwnD}=7pg(WB=-IWZ{c;BueR>zI5xMM
zi*cr<m=1HWSN$4RIQZeWht)W8B*lW&O8WIqBAY|(B%$4oC|S0ac5cg}f{-Mt7I+Dl
z28>5{$wQj4N=l8GZ_j(dBp9xR=sUQDN}j!^IZJn-ZigN9+8PX=^<${~cm{|UP2__L
z6G4>JLo^-P`DMjzqUtl03-n0GRq>3W_$L#G>K35$$95tfbBDA1x`Qh|tYqK0Bj2v=
zjE<L%V&l?8ki{huDQZ#Y@nWdGSO}9tQ^02URmyL3L2Hj-+Va>Qdpb`s#&Hg0n3Z7r
z;^VB-G6H;;SAh85Z(`Xy3sfJ1_{kSef_>yPFnE~_&ZhgQYt(V(D@h`~)?etZ!J}~T
z{bBgGUKyHt4TpJp8Q}jv1&Z}fmo+*#K>dVBQeYhbnMV4U<XuPY8izpZo6Vg1N|t|a
zZl^xl@nG6_KOEEB2NxDsqV8!^u$yrR+>!{de(Nxda0Rqnf0!5Fdc#fCn}n*0T&P{y
z9|gucl%mqBY;XC<H|{ltW^;C@&kMuW)7tPOg=M<#YM|u;HL$$#g0tB&44TcB(Zh^q
z?an@Hgnts)52)o@`fLU1g1$tlp(IM{uQb2qFdFHxuHXyq(AWM3wq|eUjml#{{XO#@
zd~*a%Sq3RMu$Uz2A43D{a3~2J3Rxb^5543FjJ&G=!)dOd85_a;Z6#&;56W<8z#<%w
zQ4TZenIGq@J@<6-Ie2x4b;->cMC&vbGFHb^s>nY?!s>@(3hcs;anaZ~ZarwuyZ~yV
zU3h(J3T}Oxgo{?~z~JpZIDO|j4BA_SX0Klni?2mwKuIM;O;LwNUuTT_ScMyy2B((F
zx(6<f0r`*xpd5Z2UJc2Go<SEN;)(+Fr>0_q{V;Hwu#LuNxq*eozvS4;<B)kJn{j3n
zKz*w#$urIb!Kn4zfuc}27F$i=?<pwX^&e^d_ZF>wpMmy2o>0lHV6u6CH5xwH3(~>G
zq|N*f?JE6Ds%M`=9o9KjFm4^2uLoJ|A2SDC86!z8K!8b}%yY-qyw<>Q*#7khx=f3Q
z)0Zre`)e{@58Hw*c{@Qop_mgtu;Aq$A$-B?c#OKb4`wu9V?1l-cdrdW$(R+yXdz+0
z!6Po-fX({Blb*S<0OkGna%5Z#ddpbOFpyP@Nf)zs=O11$wu|@}c!IidfAXL)5>$3x
zM6GcF<9x5;l~ISlS>i?u-ZoIZwG<yR|J8uirQkh<{Z_`?so}Z+v`;JKfA(V@Nxea^
zuwfTgzfHjA-%@B7SFyg{55#lwK=ix20Zk25uysNOKCll)xBlsHfN_oL-Oi%S?G$7j
zyGB`^23RmIh>46b_Ab}JG3W~gdVR2U?NsgnAb5`wV8cFdxM@>_BPU5greyo_^o!J|
zcLB)fpN2srobX!AMtn7=68Od({H-oSpYw)vL=fv9uwKh@g`LFnN+0wbI}l(UVs~pL
zY1ZCKEGPWtY~06yTYf*j!NwdW^cjE|YRqH2mqMa&FiNknPRGgR@M`%0Y*?2IKJox+
z6S9@~S~-Dgkuk(Sen~yu`(XT#Y*dqcpv}!EXr;>)NJbS-)|A8k?UPXX-~tGwzUGE#
z3GHLousm`+UK{6uKAY!&&`tv+GeRiue+hf~sexiyfB0@1hY_+(D5`Ye94njXgtK<2
zH}V|XG+L6*#pgid&vj6ZNh0EuFY`s-H;J;+5af@ZQqj%@B=dzXiu(+t7xx}U>F)a^
z=L<q3W4`_}n1!vIllV}^4zo<ECz2luC@7dqtAB=4Ya1u%C=p=qrYzFOKM{4<Az|g9
zE9e<11@)h5Xc2#&=yheFkrF{{tj1k>KaR~pe^MLuMRfbea`s8b5!U&PPIbZP9#%;1
z_?x2s<?U?t;ZA)YpTzEwX~d>?AQ8|&xchGv`aN8N7HqA(V>lV5y&Tn(mE-wQ>(Ei#
z6`eL`VdjJRX#Y$JCIKSYmdseZsSj!H5-TwE-c6piUj+LFUx`>o%1ljC(3A|Mvqw3h
zGXENCx3gKd#02g<E=B#4F;Kcyit*ZdAc(W3Cnp!d-wU)*cEA#ZL6@*#!*BW}BoI{t
zM?p!(L+0!Hn+l81(p<|>i2Nr6yQ<pA&V|`1(d;H)TDG!mb1K(m-A>*z-o#Y9dDy1L
zW|&_Kz|@evUp-6F)b<^9F1`RsyF=kh;&g1-vxPKdCebI43Q#qv0G*tM<C*D<w;RRu
z_5zBkH)qOBPK6>&4gk}Fdz=HCtrff+O4m<6i1F-hqtARs%HRKz&sGXhpR9!3*i<55
z^X=K<lPv#V!8gRdC;m$X#B#_{PL+Qcm2Z%)3OWHkN#jU$&^zjJEg!^zgNW#DJBh0}
z18wUcP;0*>FiblNv6V3}gcew^mc6$|gTaPn(PX+i;n--_ub?<U_1n+kofJ2exPGS1
zOJA{0F&(HX+KzIm8K*XlWp4`ZQ<cLhK5KD4_+Ilz-{nu~0D}u4VdJMN!CJhSD8~Pd
zcf<jWVG#2p1w{25NY>s;%($2Ve)UK2*upH<`A|S?hIJ78j}lJkz&dpAR1-n#Y|#1A
z7bH_%$OL9dVV-J&-`7G*TQF3<ECl=1PU2t8ymM8zIpMEJUVbzZG|%@3-80!#=V?1N
z-+mR9y<5RcYa1qe8>9NfI--0%h={WoS8rtnXK(PC6NZ(;??L70`C>IB9b$a&n*nex
z<`TLjoP(!VnGQelI28xRU{}pEzUO2CasFdU{mm~x;{1y!u?ql^NiO-eOab<~>|I!_
zK(})vX`MFHtIiaHw7-xBjdR6}aV&d3kAp*p!%+Q<9rI7H4vM&3YCX@3b*bCaq-S*`
z_kamJ>c?ZJ#YRkWx=pg(n2W__Ke5cY#4CJPgYtnUxpmJ8Pft0FNv&BhGCTv#!Z*NQ
zkrU9)bvKCKr814dm*p|iLA3o2=Od6qkHcajU$va;d2d2*ZK{Em_H@`)e-3jtrGnwy
zG@5fc0;(gl;Z)F89H0-V)EYp$Lt}A2^RAx0%h-fBMl%LRKCjrI0;k7DXjjkpnPdGx
zhvj!xGmZ-CUk8&vauAsOrz~RgsD<<f#5jauTKFn7dF&4b^Wvbyu$E-|Y{%+y))nw0
zjJnpHrOEA;xKLgI4IaDcwgc%HwdgF4pX-a>%bAYQQ_i>|Gl}ZjecHHiAZS<B;+U<+
z(ec6sDu4HdGq^8-jFbzs-t#iZjH+QqbqVTc%)=y)Fi_c#1pnC{&^+TBq@1i`8jG0k
zPQHMK`K6>ycOxkNor_?&AD{IZh20YZpybC9=sMcXn|8Vq{r_g;gi~|SKI}gduRaZ*
z>m^_Vo1cjOx0~NvcLL2?LqN4h!Db+rNqKw%>hCs&=kn9o`-=G$^7r$tv!iin1&=bH
zD<nQtLM*J-lYbTVY;EhuX3|5TeN2DidfAW70eEm%tiYmK;g}Z0xb<lPpcq48pvfle
zKF<0q&3Vjl5R>+ZB*xt?oUeE2JbEr?{^7Uti0AzVx-;hzT69eyuB+0q;d?olt{+63
z!t<bg%}nC6#|aWHW}?8m6n&cZk<>qk_JwVHc;Y_nioVN9BwDP~M+Ev_theok65L5X
z$hI&oDK-dt=_B&lHv$#Xuf*HH9zt8#?CJAAMDd#SrS}@ak5$E>(oN*VcD}^U%@*bj
z7UQVmqbS!n0t-zUd!Kb`i|S)}pZ_tv@S2#dgDgk*YCBoHQ4W@~F3%s?U5r7BE70VY
z0H#-)NLMW(KAYD<hq;(#p7UtyE?Z7z(8UW>?kL`LaK7MY828yV2S0>wLG@9_u(^K<
zESZ0dF*^)sxF_TFgm7T9a|gzK(ZD|6VsJYc;9G`<o@f|>J)81K*D8NFtab?x^krQB
zv;Nqaa~g5rA<!4jV!wF}?Mdh&njbgA*|YYnzjP2)d(YPTzx7ad+!iIbe-Z!by71b5
zAA0U_1pPHiVi{*Z1UvsBmEW(kd}kDh)~SJg#c^nM)gt2MreI=_3aeXYW93<AR6H>!
zfA=dyi&TATzQqIWdoP3Ay$xiF%}&fac@dPm`?A~;n|DlH$oQJVvM~)6DA}h8?UTNd
zZO$wsrL&h47@gq7^&#_*T#SQ__1>uBTjy78KZ=fZjH%fb46VC|Qo)(qyy@yM#8B>r
z88_6xbiflLU2q8uA3r6Q&354aSO{H-JE`tD3C*h9fDX5F(9OS>Z%B_JHq9R3yN_jL
zQXR46LKHP(TB~~TY!v;u&u#s%6UBD6Sp6j*=DpLzsp}V^ATNLp5yxYqT>{M07UP#R
zmL<8!c%2@m#BfCoD%SNu!8fW@Y^;HD&1$?AvIc!rCgi`?cyv&&g!rU(BG?tdy}H5r
zW>(#%K6O5zc9&x;wOx2l!x<lV6=T83C1AKIAKErWv%cLv^wAg({MagmTQRw)-W`d|
zc7wv``C#&lWsUpfGsd(l)bpF5LeB*YuD|Dnk_*^1`zp4l>e5CwcM!}9;_j@D!fQQF
z*c_;XnP|;2fV*jgF6-a@@PKwCB0Rkr0u|v*8~wKrUGq;Y*1702O@BSP+kFv4ho{5;
zoIO|;oM|49M??C`?W})33ZnvR=;*3#_$!ZPbt<heqj3y2KbZwlgHBMl`)s~5*9Y8R
zF#bT{W>mE_68WN`G_K_$20b2#ilPN9%P*yaBQD{?n|hcrKt;ReMu6R4Q7Bk`MR{Yd
z6m_KwaX<uPDR|2`>)$QJ_h=?Da$SM`LmO#p->=k=KY=R>IrN@(nUz9h!!+SxJX7L;
z5=UL;-H)NFXJ4pv*hk{+Y7F1ZFQE9bJC!`KpyIZnsJShZ<q1w8Ha;hQ>FJ<;f`Z68
zn;yCzfQkCd^LR5GrfRZ$x!o6%dt(H|Z`(tY)@z}9zCP0uGkC+CI9U9Q<!2|Rg4xkA
z(7x|3$*9u60qzJ@<8ARFSBY~jTtjgb<9eD|(Y^PA!FT*6v~~`mU52Az<4-Hh5uF09
zDt+ue_nPK9JE85Z>mb=Z7JmOOf(MQLu<fA?N)EA(VSUD5n=pc9A2vbx$3n20#n?a+
zD^Tqm0WW8!q2GU(pupxLR{s?Udw1MG`EE}tzsNydUn!kC`v_j%F%4U%##7(^X7KsX
zRCKaWfTs$^0GKqJdMntBGw%VFTclD!(<L~tzY^4*^U!ye1jGZiiT&D@aBPJD`hF{b
z+AC)GbYv7bYMckNhnC>|wSjc@$Z4opfR-D-aiyJCK=k$;fA5qFCN$WgILC^w3yDCn
z@>Q8&^j8b<ry}CGGZLytC`qH082a_v;J4ri@RLZO;1<(=Q@vSdH|t>;9YDmRwv()W
zWhhw~LfTa~$<XW+w0K%a1R24!Z3)u~Zk~hq#df%AwJ$E-l>*IUuF}uL+3d0GFkIQW
z0_AP$oY$2hcxy*E3_5TFRb$erFmMKp_-8pTGmZvZvwV0vJqn7P8FRk86s$k2LHFrj
z>B-MB6hB`A$t$XHqQ+4abg!Ck_G&FW`Fs(RwuDg~Etae9j$qPvD5xxMK!LqAIOAP<
z`?Lh5Kg+?hTTB-pn~O4GJ}xSZ#I^ZO_#?dl@ZBg7ezu{yL$_hlayRs5IlCnHjgVAU
z34#MLWs_^eaMiAGFbt>x!{bcLs0>2omlsri>mhg2uNwQkjzi5=2bkV+k%~sUK>8~-
zgT6zkcxenTZ)5wG{UwA;A{=>Q30}{>j`^1Za7Ta)#{?8J%|{3JTC-UK%WK9h$N?MU
z{ZxJ-2euj4!p7ryD8KNO%JwSZh+P#7EiXf%4f6{JS7CuqDsQ2+8LVbj;c!0@cIOB%
zURpx4I!|G|<#b|QZ31fEl(w^(pN{5Kbg5j1w>D{_j*AnNzpjOfp)9M~G!bpWj7f=r
z1K$CsXolT*Ht(KB%BQEHVyYXlP~0Rh@)lyH*bs$Ff;o6=1g2yvm4E#o(G2tj%W^lc
zqjzY*hd3ht`iV*t+lb%HOQ6)dj@6nuv_U_X)6%L&k<5+19en}Y5{`mly@aSXYV+mK
zEKn!z5fzz?<>j7ce3Wc82r_=sTTPMR@Q8K0rSAbvaS_N@y6~b|v#GA5F`K(eQQ5N<
z;wDJ(;h-bf)FQ*>Q<+aZf_XU|!l__#2-&Y%&R84a=oaZkT3fTh$Fm$uRwG&lrbFZN
z>#$+D5{IV*V-MSR(=;S#5&eR0Eq21+VaITnAPdoMAvi4_fy1NwA<bHZMhADnU%{Cu
zw8*C_+sRO`7KkB=1pMElFnr;@0^J?r;a%2IJZrm(G1O;JQEDvu*nZ)peI5{Db`w`U
z|0Jz(iNU0ae?jE`E}{Rg-=w=NlBk`2MRmLri0ZZ(r*kfusM~F%6CWm_%vA;|>r33Z
zbK%T8a)&qdJ_nL(_enclhnw%l<LRrH&~8y6Ouuf4uayfC?u>@03Ke=6q*2Y$`53n?
z2IT)e<3y?)K5AYK5o^0cg3T~gpWy<1CPm_AHU~DTUk>Ug<B3*@JGy?~3zHjyp*hbR
zL>8HJvV|CXHH+Y!`czbXwC1Fvn)wIr?AdBJhN)R8xG*CJ^&e-TY|e9{UZ)NM_pLBc
zScX0`=Mc;LU(2+Hgu*j<JRZ<MaJU@{n_KO$<=|HM-N-!jcg6U8j{v=YGv2Fn5?8%s
z1Xwd>y6T+{xO`H_&biE|uT???tC~rvp(~CoI|s+s`a$oh<IuV;209P+g+fDr^q)AB
zs5_|>b;dkhl`{xGdWV2~$zNbL`XJQa3}sm;6}G?arLD4R5}CksrbAkoWkWG)`C$;7
zj-=Zbp9c4TlIe*LE3k9DJ@b!U182J<#C{07&;0j+bQ_-noqp@UAfgtIyhs7(OdEQq
z-V!B*z`9qARmAdu&S}=r$0HY8;v&GZEghTJt|T$BGCVdXmbOk9NA|8`p83dPeod|z
zv;3HLS{Vf6hS{OdsRXF}&HR=RXF=++Ybc5w$p>kk0_Vbk)UhWW%H~~X-v3PEn!z|w
z#a1}tO%d7`J>eFcPC}#ctOvaA745mA25yuCACJ>~OIa)lvO9@xnuCdlPc42}I~w&C
zMZj#ou^7bkQmo$z9zNOFIqwXb%oc+rEEzg%7(?~+XX0*^M}2;Uarq@YmRwv3@Z~F2
zE&D^fMn}WUo!7Bo00(0kZY52qVtgtuKs^QhZ<Y`*Z0B%>IE{5e2lD>fD(acPh)VnI
zgu8WR5Pz+U@f4VEF^GBm`fNb?yI4+sL`8&UK2%m<2%Y!u6V)_B*lQohW`?=M)#*Gj
zA99KHu3s;k5#f%OZ=Z9LK=yqlj)VBS3B<Bpi-`K&;~%*sqv*m;Jg04jB}*K@=TS21
zMlJ!LTmNvmPqd(WRTZ@HLm|p*KkV%~gP(j2u;KDWXq+Gg->Oj3p5p*&T^qs8fSo_8
z8)(BY11K*ogd;9um_1txJ=JQUS(E{p!vj$EeGv>V+{d`%4!n<ACD(Pv7vw&+;FHE=
zh{@S}+K(GhKK&F7cg=%$3$@VgiwCi3^#k{oQ0jjpg!T@Vf?r?=zRQ|`8mVG5+7?Kn
z#<G5tx>!2-A0BK5Ntlk?2P}uH!zV=)D$I+h@Jk;s9Q=kJYb?Z%8x>&bfUIvjowhs2
zvrOkyNK+;;4&NT?yxED?E(w9!-K)`^b)<_t3E$JML&OJD(0R|_M75l8<!L>se7hV&
zg=G*Vjs-J$5z);U3*Me=%|4Y)nrgzZVBIOs|FeLqxs*{wf(unQ*o1b^96*w_lXUg+
z{8s<z=%`o*UG7)7f)rzFGewPHzYXxC^90yCOa<fRCt=W)JQUwMNCoO6%iKK^U}pkj
zv6%sUTzm@aXM`~(xi0N_Rsa!ywxUUk0o2>1!;}6_Z2h`K3btGVO|$FpUn)BbPcQ;2
zcHj18EVFW-a`4W2NfR?FVcA#?dzU4HpOF(|+bhUZ`$0H4dN=C-*}?8LHT1cmBU)yh
zqCFLAbcW^$`1G?3tvq}gS7$CCd@C7pGV{UlXFl0|m#zO+^B|5n?Q}k*!+Nb%=z*b-
zZ8aX{p#+UCPazrWmcz05<*5FriNNq25LZs*JYv1Dt4}m(_3Q_(6Yan<;0Qc6KZUI?
zW)Tx{Ae3ng$0)|}IxSv;f`2NM_n#%=@$c-JxAb6Hu3DTiL;=k?Kk42P30T;kg)LDI
zpn0`}woI-hljbuvKtMRIKQ|F)OkguspCEGe^97XTJfK7JhGFiJ-?a1lOCq{+f}dAJ
zK|P%V+nwhzVqh|8W^98Z=H+@VDaPOLr=nR~2KnAK6IbNfp+x5<RezO42JAcyf|rYU
z|KtNyc)|nbE*OrTTW^y1mU!YmDV{3(y`jw;^+Ba9CPsZbh}by0Oa~3Xeq|0-m6kwI
ziarV_RI^T-An4q(4NTufaDuA-X#FJ<svQBmpU#6Wmcy~1picEQMuVr#Ai&pGiFW>a
zbaygm?@BZ|(8<6nwk_myQwGR)52I3fKFce~P#U-x3SPb71%1OnK6)~!S+OpgzDrRR
zb_jan1$3cG3VGx-PJDU*6&qH9=$}GR9NA3$V>Hn-;wxj&XmM?iPlN6IIH>Msz3oin
zQJhGjjk5-0wQnp=zZZtsWC)-4pT`kXi*aakG<G}3<DU0<sB`N)^r;(*U7uA%wSEX>
zRE`IomzA_&YA)-d^T44|ws=xc48oa=h4{x5>e>3PcD<guYdC>VU^ShWc^+T4C!@>b
zODvo4p4bN~QSOq#IT}BsZ#=XZllU{8r*Q&vf<wUF{|~KenoTz|rder)f@PPiK}#w@
zwI?lP?w1_&HSq@%Q+*IGYcG?AUE_{T2tncWYWi2yMpVs}5TE|*>Bo=WXcn>(k`_jx
zOqPNl`xP@^VFhSvh(K+905reILxbH2-yc=6MLmZK2JmR`H-&F^6B#4sBG)tPIkkDV
zm1t>9##U=rnpY}@HbYlv>rrEUy%v0EpR-Uax`+yHC8%7d5<zhi6`zU)kyA9WT%*Y+
zsk@_dA43}7G8tfY4Ba_uBK|Os08h&Yv^81GXGx2ollg^JMY+`L<x-SIGM&HqS6M5q
zAtikc!CLDI)<6G4J#~ty(5C_&?`^?jWt*X{rjB%aunv>2S~&c<lJy&H<&!E-psuzR
zD4Rs^<4!HMeOv%OjD@|VCIegQ9U<bK2z9r!{7sZSdR1gY^&7_ZSQAU+E7NJn;)|Gj
zJ^_z!D#2veRPesD4k|xf1C!}&kNyw@<<2K?ufGyzlrX0EZi<0T4rpVMiL(cUfX{Pd
znwSuetsA#;hM`kX(OLy_$FuIVh<f5{%Y5#-ABbN+1uP$^Kve|e{(AMJ=&=-TGT(;n
zh)XzcL@*u;90`(XdmuOZ91*2w6SeruX!ma#G;L>RZrK5-Kg(wC<7N@z5f`Q<mV)l`
z;oy0G9l0Af0gq@fk79l}{%=GeN|vUOgNxmfIV3@%45C4W3Y_EMh@tU|P{-jg__2+_
z=8lja^i`namve-#Dg=XvXF%Y0+=6+}`PnT@@~n3Qy{{}kQ^bA?mgQ@mIFd$aC4;4J
zZ&~-ke)!M(-8k2O8m_vDkl|fM%<S{A`}75Py7fHFRk42Pe^;WN=_`72(JGcJVi^Iy
zEihxB1Z;HQ(vs!l0K6)wVLJ2ceYZ#X>maz6Z;kI49>jarURbM#Sls_QK0nI#&){hg
z5*LR)edkeOPZ?=7Kf|l%oM3%8%Rph<O#8mPfdUg<nA>kSN;5Ku^WnGDqF09)jR_@|
zybd>96wL0}p%Ahy1slB<q4$SxR43jO-lke(jCwRkM}8t<70c1(*=3A-S`BkUMx(3c
zH>!BH1)hG-#?L=;;O*i9RDKjwxqA%S<R2%(Tfev;H)SAev_Sd%FveG|fzFO07;$Mh
zT2A^vI+cv4mb(!>r&-Z?`?6R@V<UQS$8nyeI>L>auqI;>_Fmip7Rzjjb>d3Ud^8K}
zt|d{^BYxE1cN&Q8GD*pRS)lxH6<xF>1?4}AxxIl}C@yZHs_h-*R|7lO=f3A)Qxmms
zokiVZ{HbTL3YM~ro4{ijZ9QjATaSJwT}MkH{$L-bwf;w)dm723)e0=w%KS3J#UT2;
zoy#kVft8<zp)zG4wY>17Ot8!p<9Z9>bK7YcdVMo$X)%wu-;enX&q_i4%PrCtz`B(N
zi>U2s2j(kWTP8ZG0h9Sy=#tKb15!WMwXQ;Y@ln#%%KTvZ3B<7TEJ**zNU~-qb_Xv6
z>8LfNHhvBYE<dI{GYZf@G!HCeuCY9g0g<<JypG*Ts2C$h?XCH&msAr}5W{ukx#EQv
zjEzyA1S+X5G*A6Z?IX`%+;f)4Sv?rV-Cv1Fa~~KVEk<eDA{dsbz^5;AvG7F&z7W>1
zZq`|l@VgXU=FP`TA9>6xIt5lrCGPygcud9K5FfS*oD)Ow$({LV8mI<qQ`1nLbvUW?
z{+%xhYvW`#4&XnmiW+)sg#KMi@my9JDyvsB9YBJ$cIC{&?T!XX6*y_M6?Q(|1jjU&
z;_H+uR3E&6HhoLOV?X9#yf<rk-!=!neNBP^iFR1CtBCp8uR`v_i735rh&V0|CF)at
z(PNPnRG67YE;H7Eb3igJ^3TGivCC0)EWNBJ-G;vCP~hdaOV~ZSgY)cSp3V=8LBH-H
zQS7h>%Q{caIXeKHMvTLUZ3TG4?l=}ivL4H6XHX$YgPyJ+B79%Le2Kv<TW*g9$sy3>
zmjvzqw31HWBH9}h4%tf-*!g@dJXm`kibKw$&{&T&&R-8UQHC&_`D;wvUXuL<2T?a<
zE_`feXNZXg=w|<h`h2)eEE-dBLtrF!EeqqGU#x|@^NU!A-*^mAB%|Dx%`}-GqoD8!
z|6$1<G#qq>acO3vy-7MLFwa8G+Y!{dsQ`_7Lhwa{K6<L2Q#US~@zO72+KfwRvRMy&
zE++B=Em_WT>>qOT?}ey4T$?`EO#}XNB`h6u6>T(!kb=xqjPhs9AEV1u)VGZ0Mu^eF
zF&>kwJE-fIT-=vfilT;Y5}y{tnDX~n`G5s@-;1Qe4GH8;vor3F4a2?*L(y})8=YMx
z!G0qu@TFuOrpPv6+psj$JtHIX?_)@WT?Fg17y)J*nuv720r*<8^ZDv&p!41v{2vC8
z)}T(#{hEZGT{p4)H;~TNEyCLTJTyrw#h#lVXp7ekvY2&T3N0Ax@zyEY<duL;CI)zW
zI%6b%istk~H6Uch1U$L#Jb3QDLPSivY|tABUg4*4ZM+?0?hmI?ZE-}Lp+(hAr$FC`
zVrb4vhJrtvNW)K0D&AGiPhi@F&$Ke)b95TymOmpVmoH=WYAX;_XDKsU;z8Jp#6DdK
zZX+`IB!fvXYW8BxTCIjH`{If9zP(_SHitwt%m?XV71y;r9G|oI&+JMr{1tHn|Ni$9
z^V;0y1|BNI$7TiCAZsL{Ara(FN(!5AuEB<~5EO4HrfnLm3uV+OOk!u5u8@~p&Aa(b
zkGRc6w~j|<vjMlKU%8~e7>j7(I!>9r7$j*<Kw?U8vG#gYcW5M{gL3e%VEtCZGHLbI
zuhc}Rjc)DAMzQy=GS4$xsp>0-+ZNschjMLf*)|0&<Ja*G>n=kV{~s@n+(d3VRN}U8
zVNg`503(+WqJJh3ey%gX#+qyRe3}9ztiP`A$12#y{@>B;bHp2NLtSnXk!&=BuGe#5
z#1JQRXWmxDo&nI;tO}pz+F;L=Lo{a>+y9gHa)u8UL-hkksIw^~)s+XpuY-9f+%lmc
zV<DZot{8>$ZgQ#vZ_0d<D|nIr0KWG%(*^flpu5A3QRUu`^KaV5*zeOhf3FnsV^AWR
zjLbmqO~ph};s+{&kNi`1o-~maq9{6(zR+YDajQOfrt~7?T>YZeMs;+aA>-w)_5w}y
zQyBLs4?2Gwf?RtUw7wq3$vt#=!}p`%-~VmG_T`b_8k9f_UY4UM?l$qvTtmk->SEHm
zC$ud)mfrG7ffc_e<DaEt(0bfL(6>k+)80w2`TS?%>J>?EF;=GU_A5m3u#CnhAE9o|
zHe5q)Et=`Cf?v~YQE<kOs7~CPUpajg{;p0!L9nI8s5jHG*ORSJQ?ubDJAW9*T|hUl
z^Qg+YQKoZcI=Gb*a4ZQ2`RoenxcvsX_E3&KKA}YUPDKj;F+o00fclS4LXR!$mu;B}
z^_A5yhb}-Jj%i!{%4z(SjkL+}EWX%Oi9Q*rJWgTnO2!T1F{zrdXe;1<${;jOIE}7j
ztzqcN5$KlJO(Y!)NaZy-Sh$ps_#Pchy%vB4Z#(&d3IWzPicshyMAL<<$m-f`tRAxj
zOlzVTGa?!P9e4!&z!&|rEwJ`Dhi0Fi(G~Mku%p}@byizIo6T{+d6}sAG!>FMa_H}+
z?2NgV?XiMdtUEIgS|YvScR)P2J}V{7#pm$uLn$1)ITo0|lRo%)96bkSqO5;9wVcbi
zjsw$3-P=lVbl47E(>#dovnnjyy&esR-X~KMr=TdrlZ&v-1fyG)BoC56JboBxXa=F}
zgR4;Eej1${{bBpQ{Wx(4Lztxej~5*o&$_e3Wx@e>IQi&}oK!jz#EHYn;!icumZ%4B
z{BB_4?AfeGBhFmXQc0HmVV=@oLQXEor~2EG%?47yUAh!JeV2gBaR|<sQHst6H}F#b
zOl<DsMaOTvj0G`z@W5&x_MQlW_8qIBp^*87UU!qt&(1;{*$&nRqha>k3)r%-ind3)
zU{_28C*3-W$i9bDzB`y51uuce0+tyx@M9aa1?HZvA<A20iCWer5W1|TA9k_lcBvKM
z8UlA?q+lqzOU~TO!n#T&-RfSBjS-2|*I^iRPj>{B-aC>Jtq-O5qwue^60C4Mg@Sid
zWxae7Sh~KX3xAeCtKCq7lV?EM-5d}UZJ96f^rGHbKd8n1kK~xuYQ`Yn05aP~awR(!
z8<MA!y`RPC@9Yef)2^@?d?Bp=S%V$>3(>!d<%EVVp!@n{VeIo_);a1xEVG}JmIe*f
z(+)wibq|T_y<k)<y?`0h3ZT8ph%|V{(eW-<P&a556_3NRTen%S@SeLw{z}5w?!F)n
ze@m?AhU14{2T+vykt_J`918yZkFQ(rN8Ma@)37NV%E$S@?@wVMbkGF%>FgYJW-3_d
zG49{dNf19X7X<O8oRLQZF}OJZyJ92BObcrqm}iZ-t+T*p{cg1De}gP^&VcsV-9)f*
z9LarQ3y$`i;p6X8)*0dp_BYyD2UabZ1b(NgKab0F3kitKw~%(hI_fn_4$ylL9d;+d
zCigQavdV><8~0=L`fb!QCV=mMG8N;keX0LCH_|dlMVGhEMnj)ewAz(|GfGbZ9}<GP
zF93eeV_rh}Y76h<FUaHj9_S)wKEBp7RMd2ebROS`?O_9GpV1{Ko6tkQMVtUB^W7YC
zyhu&z*`23bgYn+hk=D){D$}mQWMveFWHEL;^ESwmH({r9HA&q#9hd7HqAGPQ;KV>^
zb!Lo$l?3W)*Rwll9Ys<{ODs>K`l2%=x=e}t{^yRXI!tgso1<{9GnrQ}fS0Ge;uLFw
zVDFMM*wr|T_>L$biKW#b&b6W&+j7xjfdM%~S*FC@gBR_UFg|-9+VHHCQxir*s}8%f
zZu&#@8zV?pnj`j3Rfq1~IrR1OF{p9BlI@WzXm4K!c=V4uM*Y1DjF|qgcNcr!7rM|c
z+Qds6J2-FOEb5+a33ZPdn<S}%@Ax+ne@&Z*UJV1$SMrh=?M#JE<w+uP)!?HX|Dn3h
zZ;3)XoVc1YUctl*@Syh^_<rWeL;f%}t*C(v#voOV2%;JOtD)6a0L`P*$nfwi@Htut
zy<2`z`Hxei=iPCdw^IWxe+VGCZ#7CXQ;Bo<60mk)&*qp%^JDsCgSs-9b=a1p{jgF<
zGSmjQvGe#}BYaVq;!BbSR?%<sqoB@y9IR-eXkoaUHfJxV_HPMY(HV^gTn3<j*M4l7
zEysx`W}$tk4eKJFik5<M&hmBo{G~P3sIx5?j?P|yt<!e$-FvmbEjx^5K5MCNND*|M
zR;Mo}>f->_dNljoOvhzL<5{^9)g=oU=j#Go`8XbJa)aRKG;_2xS_$>vN=f;Y@z|L=
z2s(kl`zaCFw#^iVI~735$gk9Sc_1trRD-`85<nDsl%Mlv6e=88{%z}DT)ikAX1JT6
zW&eS=keq`8MLMt6OCjrJ9`=153$A&mp=Z)8qPm#JvH|U!))53B%YQge8S^F9kB1)Z
z9awbaG+N}AU|o3<5&rv!TsEJ8z4vud6j03Dzg^0h-5yjJl*r3Q*nxQIBG9aRK|144
z(xl65pBv>$-6sV@S<YV=ALzvT_k2jY#uZ#{v<WR#dSu@55OjNF&CXgJU)6At<<8gB
zl6ix%@8NP#%gUo0s!CB3!+h#V2C%ba2jd~!FZ<Vuc|X?XkXCO1>ziv~w9jT#%g)Az
z%1fN}h8%Q0ACLd8X7|fs_8hM5rIKSONv3xu#&3E?a~G`x&1VUW{S*&57sV(ujv`}<
z&tq-(PCQ_TATcb3a`&w$O1#1g#?PE@Y5cS7b1(CYe|bgg;4SIawFIZntoLH=JW|&h
zK^nTAaG(E6!QETup+P6(;J)Ob>()qm#;+JG2i}0T$%S;=ZzJsaa}<gWT!1fqeX-|+
z1vS}tpL!frp~!YE^hCSB&o9?lrX!NSy7UH{-8NC(tMlj$T@Ms3+XwGkBG9?D1YW+l
zj^fGV%A(9xp;Eh?*8TO7n4I1W{#Tz7;rAP?yZ$3rcfSU^UUYM<lh%{xOQaxDAITXk
z34^`=3q$43zp3utV%Rz-71f8Ckp3<Xs59UNQ7-bsNlqKktx3kWTK@$`+DE{2n+Ayf
zTFbXHmR{TOJldViqaZ{$KcN?}#3Y(r6J5is#6o<v#0{JO@n*cbma=b`)KPrdlZ(Gs
ziu=mKaoCL|SdhCA<(>;s<8Q{w{FDQ&dzyGV2|F)L)hF450x@mVd0eF(hPRY4Xerg=
zd$ykf9B~#@-KAK!=otObk&kXM%Sg<xDv$@h<$2pMF!Ju9mbck9o4E(ouf`Ccc5R4L
zv3%;OQ84kg2lfu!inV_g!1q%uL-6_trYViEH(Cnap=TjJr3_SKPqLzlKh!5Am^+|R
z37S&oQ_X6pUFX7i-IEFQT~ZQTdkUd-lseOp3yCUUliHc7h=5FhF&`?h^^OzuezO*i
z$ghLo*(6?fi#KV1I{^y4YEe&P9g2YITOah{q?ru#pD0M#`9f@1yA*;i^+n$&G2o*t
z#tdh6hFQsahw8&1aZ({R-dK;%*}G_GjkIk;KC5cEN%iyN=-WJVtn0sqb}n-OyKNO9
z6BSa&FG3LavnCBzFKGAfI;u<^NF^!kIln)O7VKej)$u)4)N+TIbj*a-^)sL;I0V&|
z7s)s`#;<-`27(t)NJjtr#3a0d&a)4NzgJaZhvh`v)TzP;jZDkv-$N`vPvvD(YG`_c
z7(d3ufKeIaBgbZfd`vIz2-C2qcMw>3jsX9xNHpvUr_;@qIPT?cbUS#ESL|XP<n3pP
zFwh8OZ^Y<hrb`7YD^PD%0_$L`rtQD2h;y+m%hw$xuF2_Ca4=&&?#ZN<iHuig{Dfv{
zSF?=r0kF<EO*<9a&~&N^7~Nfh)kXqnopO+9Hk9Mgb!qtfbp}?i@Pp1zo}gA6PLw|<
zkRxsRFs!~9P1jrm|LPK`6UZ5xE}FKy;_z+EVEnS0>A#h%H{keddf^lEDBW^|;9u@I
zfZ1~{kJiDuvwmPFErk8MucP9eIm)gWLr!uw>tWM}*zs{_dtf8B%1(3s3;&OzbB~L8
z``-9S2uTP@NRm)V!Zdp!2}wwjkPM<I2_Z?Q`#s$om6|RpNh)2Y&tBE&CJ7zi4spV`
zB~F|WLip|9-(GGt?Y;K1p6C6{f|l45AbWFAFkELlng?Z|i}^ODfgdB1#zD^MR}+58
zN=4!SyaZcqYnVQD3N)&#K_u6wtJ|blWEVgV%;6#Ls1ixlx&blmEHB1oaW3phuixSV
z!p<_mVcjVh?=l<PPiWB#F(KG`FchA+XyD-TsnD`Y2rrduP|@cJ0}t2G<=tW6u>Kmn
zaoUIRj645p%v$W4(g3{+vLNs8E1<r>ow}ARCVh_%63^~C%#Yk6EvXOW6gIIA^_0t~
z_n{tMWFeS+xB+>+v3Plf7|q5^5saTHMm7BtkWj?>H{+_HHaik{yX&Z4tTEW{2*j7~
zWGKCJ4fid*j7D!4Qr<6}YR}?WIykkQn8?ae-v3kJ>RbjL;r-O%xCT}CEJK&onW*{f
zJJni}g8Z|=WTsOx+S$wo?<^kbWvLM7{O2@xe=vDAF%w4}K7(In0dOri8coszXr$s1
z=8^wE!dk|WvcDo>K$+<icL2PjpHY3YC?X!y0O{O6q}h(~9@^`{`S^J}P>>HpnMUiU
zn}M&2f+6@a^P8@{fC~z@q1{APxH(!EH<nyLRoj=O;8iU;=2&Cwr#h-xW)C5^T+ua4
zg5{4Qu;|fQP#!uIH<=tq;n8AF_Ka3T0pm7r`or}Z|4m*0XFyxbr_sa@=V81<5#z7)
zQEC3aL@c>Yq+>X^D{Fujj~y&;?}j;6_IOAbh^QTmLd9<K&l<+bSr{*PIVfWpe_O&E
zNF$B1$8_OUw#WR!b`=NM?q%b9+Bot%eNY*~`U7&n*SDG~9Wfw=ccV}^?imqhj0R<v
zTNLFdfIc!!DL4ro6qHFHMsfq|=g>osCZXm0Go-n#4l+(?phdGiBwoJD*kg>P@6CEW
z3)JD&yF}m{cGB$@K^Qx?g^Cm|a;5g_$lIq$jac^dc-ApgKjcTkgkNdxyKs1VrwVcx
zjKm%X3miTq2;ASWJ#*#`h}ULcqnZ=oH!2TzeZ7cLF<m4qt(o+^+y-8J9q3;m1DoS2
z@Oxu9dfFX^yG&Pcm^=-(1;|h>Wi(WLT8~CelW7-Y9+iws1hY;RXs!rh8rmP?k!FQK
zm8U>$gA25$rwMWdVaVfV5yy$=;I0+B+ZhMM?v4TaJE&t=hn&b9ClRjC4n_VeSzob}
zpoC@JH6u>J5ra|`s8T2@XvSZ8EAjkFBXmtOWm(objCE|{(tGP*O-u=tv`qlXt$CaY
z(=^SMgTZ$q68W)jg0BDPmfTXR2A|o`*)oQz>W!f8na{}PsjP>qZUMIBo&~X+CRKHi
zQ;WlGq$fC$d4{sNf~tIW#;gaG-)Ue{oJ`hyW?JIhBCy<WjOgu{0oNtPX!K?k^c`dS
z^e1)LccGEJucg@X(VuvAKWF@m21wlSgPPeg2H4mg)#{^HGCplQYWf+VlT!$mXb-2J
z($&OdaSr-yPr&4?Vz7H{jKZGzg4jl57|B(jLst^Kp09_Z)ENT)DMebcxfrbeoW_ZN
zuHb|>`k0o>ykZmd$i`{=*#B`{b!gIQGz#2C#wPk>ZbK$k-dv6Lky3CwCPmS?zo}1D
zF*GxMDa45}MViNg>-ZQVd0m1@>IsO0wlE-)Qtu^9=au?Xn;Rn_NwW-t6o+Dgmo@wz
z69vb7>Y-#&7NnPGf!^$G)G4S55+D0xWY!6$zm7*q^A}Q3FQ@GiZQ2{Mo8{ou!SP-x
z<9_QxiS{Y5dGwx)U_Qg17m2j9;634otU&Kkfnd_=1)lGwk%Fm*;ZV5*oZi@>>g{k&
zzj8h17AV4wQ$-MzBOyG)Lqscq?J#~7U}UBvx(b6(HhB#+HgAHl8Ls&CRv>yTk6>B3
zBOpAdAvmCr0rG}R)OUguR^*IBFRLKxm(BWmm%BpL4sWoT7L1h|v1n%Zq`K(WANsR|
z`Fjp5XFljIBAq=REaKc?@Hc=gLzjYMJ_PJ62T{{q)1Dj=Y>bKm*`Nn^z&ZtjCbEw0
z6)X#KdxXIErz!HX3{BORDWRgmXmDWcPV2ANX>408cWvh$G(T4lUw2z#WAqJbvttA4
zKPtiHu5MU6b|3Cq1?;`Sdc7S_f?tIuV>fFF+Pg;Mn^qNce6$a29J2(TiCdXJ<r3$y
zF%EWV)Z*+Ve%Prn317%c&?$`h>+cOv?|)1{oc*0JaUMjA#+l~6+|7DE8S9bni27q{
zz|z$j2A~#S$$wMXYd3H{cn)}P>ZN8cPQ#xyYp|~@Mo{+8Dm41~opa9$hW{R}M-jJ@
zO7ET|{EL%e(^OlmigLq2Hh(G`90NXmNM(0)SzaKBldoC_JVT0(9*p0XtU;~z?!l?M
zBT*zMf>&djKsII~cfff(Djn1Sn6U(IPo070g7v8N{W-PP62j09TbwjJ8T-bZA-uIM
z)v`Y$1h2*}0j<1wf<Xm<*)BKGn|1Vs?I6^s`WM%y?*#pE=Ro*=m7sRB1Y-M}1n#C5
zpcge0tcO&BX!Z-v(|u5oTQ?taXR(Z(z#6-gnKlz`%=Ldi36o;kd~qV2CjH95fmJ+W
zK0%7g1L`oucLz!){U+YKqo|zi92QQo#(b6oa)_Tpg;y_w=-C@n;r(2Wce;;rlpcTr
z+Xu95XddlLxJ%{i*L@cUpggdjS{;dpo^MZx$%HA;Z<J27^1lj%O9KR|SyohMq&~3v
z0P2<L1{3r1P!@WY3^c9*a|M=lecD4_eF%p1ZJC(*cNGW?7#k?30t$F1pj$l^eEHdQ
zUSTc<H83V)kq`uWVZci_K;@yiAob26Mh`MruhVlu-{e8U`v9E7bUzqt-^kd6?A$zX
z5uwr(@ixoNGoM{wumkaC_jkc05p8?iMgKewW?e7g<U*tvZIXTx;ouj-AC(VfH?E^H
zFNX9p{+09SkJQZ0L!h;CEeUAm<DA&57(49-#9LkirG&X;|Ftrd>J23}vkrhPZ@ZwC
zy(b%IbP}=VJz_CwBKX|%hZTnxVyjyKDBEuWGd-r8Sg|gdMSIXPy$RbxhLJwk27xQf
z>b2#~hq@QqX#0n;Rm0-Y)+h;rg8w49dr#oU#vI1ni-6cc3HuZ^;(en=Tpm#ceG%ti
zUab=P?90HCuYU`8HY-e@?qi-Z^RI+~x=4$nI=sFXjP_4Ap^>19GqN2||6hO7v;Rp3
zJpZAEbK?P6u1jy$6QZBI1dnyjM2COx(Y_O=ARg<-n0fW6cl#45`LqzsR<;tsNY<6Y
z-p_3ZZc$4+3C)-p4GSk1;NaS5;@VnHy#3dZgRNIlvSBk9YrLIIJ3Iw@nhT-i^>^yM
zIUX#d6-bt)6$W%ifY7a;@UL%!?#n6QHUA`d4>0acN;DBU&ovFwPC%tDE7EU#o<{v=
z2^OOa!G3!qR2?Wrzo$~n-L8i-#_mBKbH=JEcY>wNYtg>&yuiL~FDePQvHVaTiWh2-
z!9|aV@n9aiV=BN`T11_#gV1q@J^Pt|PQB=1(77KA(o`onf&u7Iy98c6iJ>;Bd%zOL
zf~O@Fkbn1~UOMx0Ud^C<rIGZUwm(XK&IM7`f7N=Ycal_Y7b+2BV(u=5M%7yIw9_Pg
zK36!&r59AYlw|?jZh)BiVeCRwG1YxC_P%>g#Y^?5(w|16UvCNeO6yrxXcZ{mE+b;`
za+c*jjmExlV01s347+?BW46Trt0bg;caEZHj{<RwV!5{LF~sPMKQVU7#iG}vFr+8}
zVpuj%c)t{%hHGF<n<_3(-j6LQ8926l9foAHF1N{lk&?@y<n05Btzjkb;CTp!z3~Ha
z=rz>8&_uKzvP@U!AnSfn<HCw^(fsK(G+6D4BHx4XZhH{8n8#zrlvucYY!!l?5iHoR
zgV{rN<C*Sa3}Ek~@xv0CpDc+=Mvnm33*p$_6^bhDff(0ZiH=Q6z&x4tD@UIrgMML{
zY@3X#t2?MFySJnt_k-i}>7bn}V!HN4ycB#Icc`pp9^UbwxPB_AjE=!Q51O&EBpDwS
z+o0#Ysqn(p10}my25!D4r!+F1%5SC6K?f<d(RoJa-Km4Y8>XoAE)xar+t5<<FZuM`
z64hoQhMX+LqN!?ZhN>g4D?IRBqY6HVJ`d_y^I$sb9~#qi5;fS*A4EGtTTwRaOSPb8
zKd;fqUd9O5%!F>+92o!SDu|b}j9Ox<pm#|#nvLBIeN8=Ngt<2w^(2ACuSoLB-2|TW
zGJo55Uld&$Puny6L1FY*oH8>W2P`VUDQ5!HpA?9v!Z<WC`OG?ilF8jEOiw&5qWdpd
z;@*L4D6^UlLg883ZYl<$`*t!|vj+a>Sj_Y%mi|3=U9d~D8mIj{i#@H(w=3L)`roq9
zk7b(0345urb{XuswhDKqCtv|8kioVZA`Pto;pzzPa_egJiob~Cy{vKht`wMDk7yo{
z0y=$a(6zJ~^E5d~{E-i{Y~%1le=T^s3rN_posj)MJ}Tx4nP--H!f!n=4cHe2R+IV6
zN1R3mnFro+Mi*6?n8Id=|A?3QDVTkx3GJ?gkRJ`>nC`NO^sU*)=3?ZOf2+XMk>@Zd
zJr(urhd`8Y4%jVkqZr-<?IU&(dCDrT-q04cq5wuzPsWV`AM8FB!!iax1Rd)ln71R6
z>3UI6{^=6wbbCaneuzh-9ZXACNTXgijzTRv_q*PGr_vrfDm<A2X8&Wk$ytn(tipo>
z(^5ga$Q#7J4aiiM>ljT+(f3I_%9MXrKQLx2%vIm1(%I2qaygJ?eTAgtPLV+CNGz0e
zmT^MEi|8oS0sd@V=$rVJ_BI}cfjPISqVs3k@LY&y;h_Sr_Yv4JEQ^Y{OVDAz8KMIm
z(OWGW*D2{^<+^MzdT@$_`WK_n@IAS?c|CT_dqi|5<f65`A?)6qi#C4`fp;s`GQMB|
z_rE<yuw(2hNPl`B)N0D%m7X&AuaIF$&j;dNydLC#?vvaNf0LW8CTOGLM6~7_!-baB
zcq3pIzFN-o@a@BiSpjfAG??EqbONA_1bkz3prwW}RZAGZV#`GO(K>_8aG8AXtIMIU
z@(hTg_6k(Xr_z>areg_8$@>y^rrN~`^tVya?C@jz#BCs1A5N`PJyAK<2TC>taB@Do
zCsr(h+%1=ge&ItZ;x$uPI29)8g@Wu;HFyTGUB2NtDrpV|u}KWbY##_L-`6m1L@8u1
zrV!h@7Ie&>6Weo$lCLL7dh`{L*9T$pbP-NVR>9&Aj(8|C5(na=LAuTsS~U!jce{z2
zjM{~cD@|C>PY;#6Zsa=ts)eUl0&wBx@u;<M9jDc3%Ke;_kN2K<qi~KPF<R|Q8z-=i
z2hla?7;6L4Rs-l<%b00CCSWt=CY31e5Xk=M35@chvGL9yn)sv%!@kaf2~}BGqeNJ?
zGK2PwjHf&|J`t-ArxiA?*iP+eS$7U7hgMN$M8Q7CE<wT4FI3Yh1MmB6$5{Dx6si3%
z<!_eaWtJi{yHdq@?lT6z))2s<im*`{hng0XK$sK_{Bf@-@0&X3My{e+?f*<$CO)Gj
zKS!{g?Nu7nvIeaWx6qC>BP_Ueki0PT2kT*5Al>OKcy9hcls~V98$E^SIr0PzYY2x=
zb?l7FnhK5&&!XhAfTCtLwa}n2a7BakY%2xXw@EN?&kQ$?ONHKT=FH=y1QK-_^ZPyE
z%XW>!wr>})v7we)9H_<#{-JDk5<&E|a^yy2Lg2V$);Zcj`DzI`&+rPme@UR#>}%!u
za{`JQ)To%BM~#%4;lY$R)T)ia!6nYnzHATFkGICQs`cpmmjT!XO-1Kbk?_Gc0C<6i
zS!Do%>XF^Rw|Ar)_9F_NLb3fnH<p>H1L12=IL7YiMt^VU`LzW$PS?i%9SuZhbrDHk
zn+RMC<L<V3(w4zXWYjs95w}Q&tadF-{>``~`BD(6#xXC@W)Q9yQm^u4G&7zCwwjvw
zJCgNZ9jhkZ=k|cG?mZ`+u>reYXQP&&kSiE6lJq9*0d)l@$R1J;A|FAu-s%}pzVs5!
zR%3tewPtW^odUhFmQZqgIUNjU`Nj&S^D~X@)IBS-aXkiF@)dN{n<}&n*p07F)nJ%w
zF)?<CKo#>ixa-8Su486G?(<|SRo5mu4{~U3iI`}ea3fK1<tWTN%V}M27r_1)TKGeT
zo;L4@?+FzM@=ic+{x5PfI2Pr->**`wN^*Wy3HJ2{gXh?2g65A1K0W|Z8^2LY){$M?
z8VjDkQU$|1MW}wom0nMqh9z}5AgbI?_2z1!jhxM3yO>AxDbvq;E3w>><!|*a(%$o)
zz)zS4wSf|-%sLMt-nEdN$#zRle!x@k=FFTk7`G}CEn})lW!hz2pyh}Mva=yVfWY&%
z<Evjvh5tf4QQ8-Y;mkAQAnByp3%xM$-V$(gI*qe4s!)^ZaLW5asFG$k<*o<7sC)Sk
zGd&0n-3dg;Yv<8&jRhXI(L=L=8PJib1)|q-#tGM9cXK|xdY+E*_l)Va_%h6nLDn@n
z0itqZP|xfNcx@hvzaJIibOGbWPh|IWSuXW;3x@h^Q}hilz=EJT(6Yst#I8?fZ1EA$
zr>a7Zn@F+U#|+w!1MLe9ChGJp<t=<dQLi2%jjgaiV-+Y3`Vh4|mho&D3VCOBQ76_F
zT2hYUz0sv;Gk*@0>6>9}!aW+hV=3c{o=3-u)2K9ZF|@awAP%P+5K`r&%x4&m*JW?(
z!)b`7yTI(`Z{lQF2@iIrf!2~Hc<)w$SHI6fciU`QGI$=XzA}H$TORf+Bw%jjFM3S#
z66mNuCNC;-z`e)?K7Sg6U1=^L&R|~CjFr_HOR_<2MiN~Tw-a-D&xmqi2^hzQz?uYv
z@fBA=60OPU%=Tbxs33~Xme}H@Pi+JTsLekcNV`P<23RPgp<6ldq83P7>@4X1{ZY8u
zc@A0_H>3DTIw=_Nf`=cfQU4(O+;=0qQmvr{kuzY?X$^EuInMg)rlRJdI`EvSz<Sg}
zs0`(%7KiVV4b11xi_hd6Nh<J=q5^h~a3q6|;uwE$6Sq7n9QPj0ht8n|ApXME$j9t4
zHa3CqM4f_c;*K8-bkOc~6co*3OqsA?EIY?Mnd-Ml-=!!)WgVNp=B}gdaVenWSVg}E
zQ~_7f1btO5L}tzSRDu=)kMl_Vf*O#rec8+MW)!J@r}C|HNC$aDd3$$4^^YwW*0K*0
z_h#dZIpH9;%M<L}EyXgGkr-+bi5F6wSx)x=l;1o9%dTgko2wqSeNaHr&h4br#DEz6
zu}9BkeAahaMa&EnO$*}A5u0^S8F#)3ysd|VXpbAo_>{<U^m|GB30o>R-%2g-?Wd;C
z=3vIIDiEDwxp)_*z1v@he`lUR=`S%2x~c*WxmO^{X(V)4OCZrpo8=xaz}176Sau)+
zcv%I42bEXha185xv*oDVayhr^BFl2?<&dzk6@Y84!S}yd%)>y4`c(#lTO+8=Cm~g`
zzl0vuEHi9<1J3?bLGKg(^w$3zQ29h4@f<#rTDLP!v%5O>vaEp3egih&tbu{^XIK|-
z4V`9-=;>yF0qMm6ZqqPc^*luMUjaY=R0v68`)(sGfc+Pczu%FT=e?))JTXcbTAHW)
z59zp^NlWf_3lb-AIQmgK3d3T#FJ3Ia_rIxR^#rD`n6o)!aXuWrx*dCd%mDtIJ>=`2
z8ZbMTK#fLEhGR!^p=24$YnN?98;e7%V>B9npGb!-_m-pOHg7P)Mj~%vzEzV<V&uv8
z{=zWOyt50Qa;Yp|)J*)GMqq~lf#Rbj$h%x2@Vn)Y^BdVcw(JVz9+sfEQ;8nbc4jQl
zOyKAAGiJ{rTA+0jBGzm}t;6pG11lYggD&&_r;fmuC&#Jh`j1>sZ2(y3bkibf3Us9B
z(Z5%yW6NeE+Gv*px!-hf{naC^hwLpaxgiDjL)ic<gJ|>hGgZA%Dd@`oLG@!O6kD)N
zto)f^;LB0yIB^6LS27K`k4Hqwp9Ou@ONrID^Qf+Wm2no@Im5W?*mqkERC{%xOcVpc
zEKhLyhcS|u7qUG4I)O=81Ng2Oq&E8q<khF4Nh<RZ-%!MT7dPUl>dUD0pqn#3xr#9j
zR|C)Fm>_q00qd3K(Itfi5URJ4^)&y*smWFWzdMG!>Q_Q#i)bh~>H`lJMuTvU1DRu*
zg!ginvY%sP7G}5v%f)pdEgXj7y<uoA3c^6g&6qKs@rc_FQg^Z$#^jwr?Z@ial@bfR
zdJWVpg-648M4{(kvw+XEih+<k;u+ye%MXX+@5@QBY?20EbBsiF&)KB6;4dPLnFo?J
zLj=nT8UL?B2?V<$K;O5LOt-GVO1%(_y}k(cK1+b!q%PXilMT*WACOy5R-*zWfd8!<
zc$9U^**=biqFeh>V<Y>%lNzbABbx`h)={bFEy6o}8S@-sLH;z4)?ZJA-r(6_b7Vf1
z<yBE>$1(D0jXz|cVs}uL5Q`?~60=KxOwH6|sApvdQ9R;;xgVpUWWhAjv&szW<O1x7
z-wZbE7m?w0%&VcKLdsTTgYR#oik(l1`?)!6H*gj);V<I4xQht4pP-nd29uO(q3lB}
z^FBI4dxbA3YUo0zeJ3eCTnT?Ni*SWkBP#O)>8A@ETAbPe4qeF*6vsNxt!gp%Pa){C
zd+jvyM(ah*rMxNb5H)-}np_CQFFS?k;~W8nn#C}nZ%Rxp3b0M90cyonAc&3zhuCmh
z`;Bp4p3Fw)f095hI7}jTi?DB68R<;k2OSp!$e3;^zF_luzef&CouH424>GCZEiaHC
ztt6|)=;Mv0(@^PS0rar9u<ULQxAC?VV@K2rh8+&XA4v@myW$|*lU(71Ep4>@^<mIE
zvx1g{+Auyf>lJ;QL5AOv!N!sr^j)V48%(Rw!jh6P-`Shn^_oDv$pZ(aj1h2Bneh;L
z$lu*cUu}&c{J$uQX0NX9**T0varR&oJRSynTUqxIV})4CfS;!e-M5aQA(w|X71OEj
zKkun5J%w)ERE3^mGvv|e>cOldu&H7(7DSiQ7N^Z%{HqT2GbCgjnU5btwYZ$`he|Ki
z;DJUpI2(-w`KS5RTj3ZCuW164cg3XrxhvIQ(@1h{|D%$(M)+7K7t%L9pjy{=g72^E
z<jl}(i1*@&uy4K~F*_aZ);D9x)IovcWaf{$l>#3Y79lU}qQKpzk49uMuDEF-6@8sc
z%I^i?`NNF)YjBJ@y^O~A-Fc{UAs)(?Y-4#`Yb>apMD|pcpuF-V?7f(RmqhF{=rS9W
zW@o|ZEm0U&k`9^=!jXS(F6%NifQ}WXA*j$FgZ#3o`Z6o(Q2v={>M@N!=m^n|8i{*Z
z2N~~rh*a&|d7?Z{2J>#2pw(ds$eqm4^INOH;=cgkhDq`3wuvY^eV*&|i6e_|<zT_P
z$>1G#0^!DRRJgktrQJi}-yP=odb|yGIn~0+wRLFrC=ZxC0%m566E^AyY)>viy{Xff
zzibXyQf*Gd2SYLOrw1f%Pa;vq0tk;-jQ#qT;N8C=pm%aU#-11nt#0GdMod_4Cl-W%
zZeUTW0`@$<K}!~_04-sTK(FXL+XXprs<-cw27|q<laA$8My-ROq)4Jq3sLXmD3Aw7
zV9!53AiGaE<x$s&?7kuWXLmg+pMFd%r3%m&FQ8s~GuVCnnHUA1rla=hqipj`0dM0e
z)5^vKNczlXxWrnhR6C2R<Mt3)oDn?~Uk8%8O9ZmRzJjo&tLU0ZQH(RN1RRqzVd9K5
z?Ce}a#bXYDn&)*?`!1ym`UsX^VLpeRN~yIO+dcLFM`dDNY-fHav%imn&gI{<rS=u&
zSuQp8c4T|IC<4NeD1rIuOw?7W0`v2W(W3q!jNg=r)13=ogh?b$UgL%eqpIQMmMd8A
z8jQ~$jKS3JfjBR(9vZJ@qx08x`uJxBNK<A3zbk{PXl0`4ov~o>c>rj|Rp7Mi7twlb
z6+EqF_oPN3u_#n#8ptA+^|J?i#zE<HeocfAY?v3;m70uRgJI4WiT4yO?7S_bqKf+@
zq$d<1;Sg9BABLoNGtlh$5KgAN0y3C!p`c1eWA!;m?Js3LYlPT5d`aG}G)BKA#o&9&
z9E1ZoTu{toA~U`Lyyd$%yXzFbc$`Ji(Gl$Dh0Svx+(7iZ*R-U>o`fx10XALCueN3_
z(Rnl(9ON9T{bF;LeLV5wGxjb^;Ny!{YPP9IkQvN+q<Af)V{$F@@ZXa5yE8~oY#gi+
z`=e1B^T1Bw!CuEgFdM4C-Jy%H+-WbCj3^~~TUaOLzHG4DS_h8lC(scKh<cPI<&JN~
z7WpuA=otyh5k-t4umF^WO)%Rt1AEtrpxv^Ws)l_h`jMsJn9Mj_M+d0dy605$C(Hj#
zXPQl76}7y&nEsEaf>Et9ESqx`#XF6m<?Rm=zC?ymyE34wZ9E<`VDmEXEj2#Uh#A{i
zXJzwM(!OE{9b+4YA7<x4W5{D-(-9(QIl2l$yV;v-qA?Di7lBPp&e-ud6LatTfr~!N
z?xcBQcF!@~IdUc%8iYgN;ZkCxqXI4OpU}$GOL%L34Zb*;14-Rlcy;j>{8D!acV?QQ
z-S%dXo!UX$a`o}0)oQeMoez2szmS%?Mk-D^!2UgbMEYVh6kW4`GB5U7&>bZ1|2`y(
zKODo}xs#x0Lp;%ONM{VGe+8o^*FjR51pC(tAyM5QwixT6=D|SNQ<{jzxv^k$vtICK
zb2IuKjfCd@BG#>tf|WYs@Qy0Mka1U`UX|U?UxtAZ21A!z4+cKIpt^a?A7<J@jF`XD
z`%Ewtjm|?0zJ%mv#ZlK)D^dSrB+Wfn2kr}X!Q@~GwA5*lplc^!W<)ubTdiaMPnIEB
zkb&m=r=fdm0f{X*!}7Ju;p#1Gyr^l4?e}L>%MUCkw7CiH4`~AHOAEkg59@Xd*axCl
zHk>4*6nTHU5x*M-C{oa*EqM}LICKsU3}d~9cK=il>`?&U9!)H@vc~@OT%2_2DrD=2
zVC~!skS;5RfT%(=^k#lH-HD*JYb1yVkn{n7c$6XasWn6)>*AZ#7L4%yB2mg+K{bu+
zp)>LfNDYm^)`@v`V*V2N^{~2W#;qO*O{JnbF>O5)jj*>6W*JOC1GQ5q_YlDo;)jo#
zOYr25THsk7sqV?=sP*3)kri>EXGfdh*E$ZpTTg@Xp4+5V%yP5}zv#fZIO^=D24b^(
zVw1)^_y0B%g)jx|b8{eF2cA*knDYRPVPW>|CZ}juLF^g~K}jM4#~1nZj};%~$2W3D
zuQG^wx(4fgb_dNB$>jX*Vw6AhB*|_y;L~~@SFA3^N|_kEKb|H3>K(=IqO({Tz}QDC
ztU!`{i+dHEPwk&vf*zMh8gzRv^sHfBMvx9#mE)=19}^h3FdL**qiAQM1M}82vQCI7
zFk7^Y+>5io9%IH_c)S^OEG8lU!T^!*XM*Ulhk)-AkD`x@VVw3>T$Wdck`X+{h}s1D
z@R9O7myxcZ5b*zZG3o}L#}Q?E_+RpA9MFmZ)t%}rKRp{lj}q3W#<cVYFRA!*8R)i6
z$7`s9y<N9RiJJkPF-3~1(_%R30s-l-8;U;!mx1@=iRplBCENcTfvcI#?9L41v`%?|
zZC^MHn$JMn92E?DxfouVr;zduYH;Ee(@;y?!Q)6Yh`+MloUM%i_df-cCm)5;$JysH
zF9otxlCY#RhikLg4Ntd;@Q`gGnjDM3J^$IEb&Lq?Hn~B+@-7H-JVMrZHiGIFRS?R;
z1xk%`N#~Gj<im147W`6S+_A-^^JzVKdGI>+-l#$xh{GuP-{dQsckE(ZplAAGh$qcp
z_r@5O{7A=vL&M=c%V%rt@B`1=bs$%cqGx^(c79#Nkja@~#IzXkS`T=rGaf&DU4(P`
zLh#QK3tYo%hEaD$qr+ln=y%B>d~+dw^kcL4WGmP>l4%v|Q}K&Tg0;+>CQqs3WV#8Q
zJg=KO@JJQycWuJErRA`9`Ye=H_Yk9V&hVJc?XI_UsMa-eNX*hlTe^W|Q%(6{as#Rt
zSw^R2GB(JS;ZSucAJxXj;J!IR99R&7&SDeLQ&J?Z<1W&g0##IqXMD*FJ$P_{VtKm`
zNShzg`!!6f`ZyW+9^24%Y8cxEA<FDF!uW~F(6)%>L8k6wy!|v(eXE0(|GHA0726?j
z%o!AlE4U<n6-GJKL(k96<Ri;1w4|rgy+fN&Pbq+QIoCt_V`or3`;pVSK;TuVDGD8Q
z!0{663!m(WX5OBhx8i>UFE(NiJ3q~pbMcj>23R~%LT}wGM7Aa0box3m=xt|Sh0sE2
zR6d(Mo)0Cj-WE{rNPXga@)jL%3x`6L6dW~{W8F8KV0%&tzCC;f8(VGQRo_r-Q6Gt8
zb?T6$XS27`5Y+CUjNZq*q3j>_#{FOp53(3PnO6ZF(-Mi6#}rZ!E(7D~*&ql=MfrzA
z5dT}m*gQgF^aF^in>{)>?IWrOr8H^UE>uoVK(D98%u8ViI&c5bc{fzhhGiPVj>cm5
z!Eh|e`$O(;tO483B-U4P4f8IiW6xFg=X@~3P9B>B4_u_3+qH@H)gsLMdm*-jNzl_F
zgR_jSB`%58IA5TJy?radkTRcRx-M|TS$B=(E_J=j19jhdpj3MjHWv!f%TtB%y^2t=
zE|a(}UrEAd3}rnEr?4cn3^xpFq11mfXk7P)eJvhn@Un?9n?8`=8VfLPf&=nHkD`rM
z4%&RQCSmPOcqMWTmLy#P8*^t!R5Spq{2@r5Gre=|FQRH?4RS9j4SMVc1Fi04P~;Eo
z@jt03XR@jDXMf7umB+VK*Cic$0?C@|`!VZu3<e$y!cprJP$eK9Vl@=0Bhy=SUu3|<
zT@$b~ZVgpkJ_;&sM`DY?0{YtMBHA>#pr%6xs833QyQ8mT^?^k6DyRU@4|xQKC!n(J
zd>HO?9_8z%LBL}}3}`8Yj=%^|DD*_`&1Fzpd=>Xr)PtKH9}niw!R2Z<KyB`Ju&Hh#
z4ri)hn^g|BjjaLYF=jAv_I1XpoCVH~$60SeTeaf-V$|{-5ELBmA?ZV!L4DCw(#5>W
zn{~FKsK<boTvsJt8WM0^oe1vq1T3>Zhl<;L!1zQKvV(|bXf=UGCflda{XuN>j*<UP
zFTva!&1A>Rt=L&O41}L7!0y!*kbny9(?1VEbGvBJw`|%sd?yBNb^+}bQdFC^2qsy@
zgYecf^2Xks?ft{ar3MbQf|Rgp+!>Z<UW|n+&5)nY-aH>df%kMlwajrNxMmyz{!0})
zTx$pODY}E*wIX_M^9)qJ)<lGwhe3odtL;BBo}4ku1q!#2!2Vh+Fms~q%h<bICxL2h
z3lS8k<<ODeH)HJXBFanGu3o-tI=XsrM7T?xnk`7EwqZ=14#gIdElt7Ck6f7tV;}39
zoB_Q3j4L`JgX(OG1v|69Y3$tHRBPf~;7V&yzw#jD-g!c1&UC=Ab8*1Cd7bJ9GM`Z~
zkMc&DVtVN{@OFqo#b5sTuzMvMNrxa$m&b2Z7Snwk>wR^JL%Sh?;FuUl`u6IA(gPoA
zv&alQwljX(z)(zEE5X!B_1Gc1MtiogH=l4f#ZD6lT&sh-lOwTr%3hRRGUhaGc0!bM
zF^LTgBvPB1;Qc`z>di-E`-JIq%HIhrU*1ZyO>!Z-_B=jvxQeq}6Y$CZ8rU0q3G<U>
zgT=;R)-B~gRg8mCYo95N70xCia*2qq>oZ@_Pr=}=dcuVlq5E)uViC9o?06=Om9~ep
zxv-A48!rS#<GxX~)>JxpG#>b-H;HKUE`eWZIO?b}@0_h8{b$QWd})0Owf|zw5ONsI
zwu#~JJUv`uUx71KPN7a*DjgWSPpk6}p<Rj-B&!yn_gi-=owb<Mvw8CT?lmZy^s4%N
zaw!(<y+#x#rl8{U0@Pprgm#V!V%?XP5cFaLctm7_&X*g|IP4MK)ywkUg-^(R>vHgn
zv_$oh3E;_vb1j7ukYv9mmf7A|Me4Ay$RB#&vfiWVY2dB2oVxGvLD}~O^wa2Q6s<_d
zmF?4+mbHX>4z1$khf1m3;*VfqvMciBp@N|6Jn&LA0qHPyww}8HHj87C*Syu#e;@O<
zTUNpL3R9fzz7ySVl;hW@*Fm$=00vFc(DUJWjNPR~wFLc~jn@nEb@x^1J)Z)Hv@Fo!
z>SW+so*}XZ6@k9TTIgS81CBndD{FxrmPBkIZR=A>yPF7(Z7hbM)R)vpD~ff$tgZe%
zv>M!vR)C&iDEmyv1S7BK;ftm?*r(x&gE&B2mUR&C^x@cab25H&3`OZK1v=@32y^uK
z*kbHO!<;VS!0=C`rdAalzRbn+FLz03B<licUI3j}7K7|V6(@P-11$#b#OA?T>gsX@
z%r>t@>5m5VG)NVgDOU^ha%yN_<}blrGj}X3XFdfdW#kuVL;emUR82TbZ%r7B?K@VG
zg4h>SZ}e2Kc{q>t3k?BP5(Nidtw5ux8YFym5~>TAk%Sk8*pl5t@|H&6Ft#V|akVGT
zzAc2GA4^21(#ZJ9C$Z<~A#gJ1AkVo9I^Jr+<w%N=BdyVV^EK#sx{`R=l!3?ZE3m@M
z2xTN6V=cDOfn)oqYPVc4u8zZq4<Yz!T_AN3P6V&Nvmmyn7O^Lhibm#eY4j34Bx~?#
z&tY^ejfZUSKrlO6$~2Y&IPxFMLC?qoMGpy;nAHfRBgawOMXb+MGMt-b%Gf1ewt@a5
z2}Cp|gKAhL<zF@<-z&DGGQEL)bqRvE?A$$aqmk_>_K{c5)?j&`2o&eDzV*IiDDGuF
z4ay3FlD!ec^XMcn>U}}9G){3{w*yGYlsFpNK(J?^7**%8eB9?6LCOP`;T|1DKi!>z
zr>Cpn81QD=c_YdP)u>Xj6%{=r)KPygm^nP8d)fPBa7r#M4P?CjiL>cbZX_yeUV-+N
z>tM}4HMsuE4b}k>#qT(22?Ghg$okr=Xt+NC&Mm5B9b^3hwcpdAs3e%!t<Auo*TX=b
zbre3#oPagVA6YLp!SIDkFiPzN9L{AK`YHM($j%>RzS-64woUl8JsFEL0-%LXrxINQ
zfs#58dryjBa{El=XUu@s<xx;_s+G&=7qTwx86+`%Ih6c#fSxV76gLKA`{QsJ?itMZ
zno_WAWp`04hb37{X{Y@rX#dttPByY$n$_j_U?<ZKFT0sqyQhNXVO<z+cLn>x6d?Y_
z2K3t+3m^JI@op3I_Bp*~e7;#Q<iFWi#<Ck)<K7EW?XG~~mZ8{K<O!PFLx|3iIZ%6I
z4vOTd1Y;WEbFLVBW`3X(-&JFj>Uq-V(ZC76s>3*P41)~Dz<+~h(8g*F8CWgEpj~Hy
zH`>kAWY86IH4BL=cL;JV^ugl!E$ZlYod~8eHtD$&r1^d@nC)3jQ~j<$kvij5iC9)k
zvKhpWB0yfcU+~xNY?QrtO=Dv`K-J;9K;-O1r-@Ia+5R+B%`r$FT`VELgyl7V^bvEO
z4=V8zh!X1qc^p{{s(m|%q;)cQ9%>+er4`|T+=z(1c9EjHyFu~RAhm2BMzwDi!jfB3
zu<$&E<TuNKyhEb9`y%7&m2vGahBF51T#PD=1hwq-(0=hJNpx#sUAT@gd*(^j)ow&@
zud8SLC_Tnfe8}Caioo*ko&e*DvCH};^|xnC0{cWz^|oYP8VUHrrw)B;8(Fux4;V3y
zPk(qFWItu|&aq>Z@!jc2Q%$x9QKlY)aj<jQaeOdE1p3_<X-myoEU=kC8@CHV^-nDn
zc&6d3N(Z!QyvZKBwKQ^MC7Nm2f|#b0`hfGey<;Or9BRh=N@t8?{&1s}uZbG(3W;U=
zip1VR+Hp0JIDGA+BArrG+1Mol&zcH>G;}@8OJ;e!J;PB}9>7>iwwP{zlkVJ<fX<br
z*ywBn&Msao=MfM6<#BX-l{))7y|@LgshB>AWu(0)3cQamrLxWpfh767Kyo&Z^oMz&
z&(=$L`u%!rpJN1)+b*PZgc+89jwLdAB&fcS(j6o7(D%YN=%hOsV{rypEJ~v#K`9`A
zlEv|&-d8;wUV!bJYe`_X8p`GOxvm)mlJ+@a<*sNL)jy2o3qEri<8m-!MjaHn@nHYk
zXvPkg!qTHFP`L0jClXHP!g_s4C*!t9Ca%Ecr>`@n*mw{!pq|z<CHOwU8?D0D<EuXu
zUan&I*SwNy_1b7icYIIHYR{sh<7lYe$2^2bOj!naKd?g@9VpW&y5iyP<QOn>_5;3o
z7<Fbl+;~ehY?Co&aK|XPd?W%@>jwl1OUIy*e;c)X6^_Zz^T5l^3KU;c=x<&I>U0XR
zIe3tDrc8r^?JU=DIRv!FOvlq4)6OR}2?p-R;ogQKfQ>`JY6#;)JhA~B1u?cCJx>FC
zMNm}TOnAI@j;DJKbWZ8QxrFtYes49@ebZ<CKYM9$&}C5jGausg8yWZZI4MjqV!5zM
zboS~7)SgriGx9W8pFt5n>~#t23b-ri)*p&)Jb%pN)k0rqE!`03fr@{-g5=3C=3n~C
z_05|JZ`}lFTU^chjSth1xDxRGya2ra;gc&I+xx_PU^(nRMCj)WV(X3I*%(hp?JWh1
zj?vJ%AsTvEw%=yQXpCJnz<EF1M!aj>N%;!4PrJ{21TQFRS?_`N+FWS$^k*I{J(jnB
zz;#XUrC)=puq$&1lD}9du@k@qt8=JO5sx+{Rxo*qHcE$|B_2BLOgpIxU3$N1(cDB>
zTiSq{k`PF4nu@}e?o>D@p0jutMl`=Eu`nf@q0R-;K0{Ts8Q`E~(nTD8W(gYYzaiLC
zF&mAprP0axSJ<0P2#h3&eZ$I$a2DZasg`21Un#zlTm@B^Tf{3{PUb0J0p6&|q-TE!
zerFzgoYsUTqYEKw{VudfEua-km!rempLF~S)<HK}KzKLOrT2exF#KLMUMb&zC*H4R
zvy?vPJ&gIP3#+Pa&&V)D&lKJ5W`jj!JP`%mru;9t<c`w;+-$WH1+^Ez_4IzC8ed0w
zwv2mmxCzcYu15ElQW7@50oKg$$Mh{$;ND#e)z_Qx(DFD4^3jHt+GH%vYeI41B~sF!
zLL`GpG^0-+0oc39Z3?K*2%$!uK?0)-6QN~s1&wW4Ny|0zX_Pgkyq1%Ey-my$FExfQ
zfh@<_Y7LFE7D8<0A5!9KOMPRLVDNMesXbJV!qdZH@Zx;<SEUv;e?KB&N++R5o<#In
z$XWPRk$N*fxlO(vl>Gje233zhy&Vo9dv1+deQiV->d5(iyAFx)H`!pXgXza4q*HAZ
zDE3~U-s4BI+48MG+%z8A71ol0{7Ili#Kd(2+d*ilf-Qdz%2hUk#*A!;$!7b%OCnUg
z%={FC6XC(LXfSG@3cSz3rY0l^dRit@YsX<&(%8iXv@zc2^Dxw!@rV<R|JStd;0Cmb
zI*3M^>xq_WIPHn(pnA%Ds<b787K9qm<8!TWHDgQ32UiLVC$mn|S7P)!8;2@O8B>F7
zBC69(NyocDmJhUswoF}co;Mrh<s6Q>7=w9{H*nEAOXT9&`KCKsAbvQB^@&HYJX|x?
zygvf&+p<jTx6`0gyq0Mqr$EUn5ZjL*q)IpEQ7PjYnL4u_xWWxoo{)`CUzdaCIz#CB
zkptN>H8ix|jd%UhK;@1MC2C6rJkc)7E9f@Oy*dn&m=@;x!5=rIvb(f&nV{Ti1$=ui
zV|`wQ#PZ@6*wJ_b{hmZJ|LQfcu3HC2{zXKwvzwkvnTQ&ZqtWp><01Q(&{3mmuq`#0
z?kih^&d=HGAMKCc2Le%Xt_$RDJpjM<q+stqYrv#3jTUX{rt^y@q2Z`9c*1(O^&<As
zFqK`P$yoMUubMd3?t>^T$RguDmEmBaCTyQJ4O<>&5t*Gk+|%~L;RhR_oN2IJO%3P`
zJwsJf)u>0?1(+Uq1A^RMP;dKcn!nWn`+h8dzYa~t&S%NQtY|OmcPZtBdW7@xH->*g
z=P*ra8EkSDBY%D|h|9J?OEI6wZk006z688q7tye<jG4t|x*#bJ!pBeyySWDv&6y^z
zy@WJAxj-zllEI<pEfGDi=W?SGNxF#jukTf()@j<Pza)f4usx@2ybL6^--+DxxIpa+
zs~NA4MIFO7diRAC>f-mI(Bm(G>P2rVt?VUcKhn4-eYL3i&t<6mQwb`iIp7>oj7lAs
zK{qWDT^?(ptd{wHyOYr|Rv9fXSK)8gRT=g?3ZgfNQIz7sjV#t@yO+O6bPVf2S{;IR
zMZwsw)<leJYf;dhjwSq+Wc&BAXn9JHR+jl;OIik4%deAi4JWWX*g)D9hLNXLq4+he
z5qC7&Vf&}Ez|TEPF9{a1#07iroYf>tQgYcISqxgs{?HdIV^|KrOz_S`3=T6gsccpf
zC}ah|bmD`mle4)8?qYa!?F8oXb3nRlG#xbRr~C@U*vOB>qxu5$jiH?AloqiYFQdgZ
zd@P&R1WNfX#QRG!DfbCP-V`N)p2`YX6Bq`aJ`>=|(^cp=G=mzBj(~pQI+z(E#h}ht
z)NG@wKxyh~;_&?<9-qd#V9qc`*`L+y9udMDpJepd8-f1jaX8=_4`zjL$Qn^G7#o#=
zsv;1+`~PTM66<Wp_)dF%4JUV5XQ#xxU+|!zfz3D9DNlP9@mg=fzMjkvbdLE^4OvE7
zlp;ts8Vh{3PdR0~3$@HDh|WHD=oux!_GN3S#YTO`gH^?@ADdzMvS8MO*h~!$HR6Yf
z!|?IvRF;!<A)bpeVgI*f$WN-HzZV*z`Hlq8VE&tIGYdR>Oq-oEZ5;oIF8H=KL(A{+
zD4Mg?l&>lT%h@c$xT+Gpp18rMIZ<FSwHo=WgNbEe67)JP0(!s|M;^R{t8>$kH{wvW
z;`WCmsLGR_$){Q0&uOYQ)gQ}`ClV9uP~sdDh<_D2;~b$W%4g()YxG(uEG|T|>y@VB
z1={G=y%b-e1K6pttigXrq1^W+(K&vXai5|Xiy#bJt`qRSCZ&UYji50+9nUl|4#(7G
zm{5Ka6|#$AhM4)aKj;y&gn??w<&y-&3B=TAGKRd*z%6VxojyMdm!C=nvlP~mIO7BD
zdb$`M3a+#M4<3>I7e_kQU4?nF2DBD02YDlMN;!?Jci?Y<{QiDH=Yff&<jB9&B3}{O
zR=p&}RSr0Apb-lv)PdcO9q8d+hh>3oc<A;<?9!Itu&MPZ=8nQ6ea39-QQ`)_u(uyi
zPAn`hz|()T;f>#N<d^OxIYZgIYd}hev){uz+rb~0=SJn}|I!7!Porec7xL@cBIY5T
zMeWK?qLWfC+;?fh&%tSE&b}Ac78f9PRyrtJT7hiidqI?6J#jud6uORnrrEmM*m-OS
z`pnA&@rbd+^FLkm^zA0n#sD08hUGa8W>5k1BNt2x#C<|7%=xnm^IDtnB=Z^F?_`}}
z&yjqeaR8M>SI}zs5)^8<KxINUw0vo!qpk)(;f!?fHDvzYmyH~6TPb&J>J`>GnSmXB
zh5$|5ac=)nR7pI6VLAVi<1FJQ+M)vwh1FO#GX@`C*oyL03qirlV6=R95L_RL$U#$<
zfph2~p1Y4h&7M%?E9^kt#Ut2yE0N{MRe-;w2#mM@>^rjwMCxa&CF48D5ZH*dhpf=C
zlKDp(5@D-nB8piD@K^Tc@E0va2gdE}d8ZC2FaT}FWfkig;$6K0)X5B|=|7ksYk`g6
z{`y4p=q-a>nH?yG%!Tp}KM)!|BxbQ&2tURE|GLAN56;(V*t#g9?y!z%Z_Gs>%~V)p
zvI~u_W`M(wp%C^igyvsck9y5L<dM>5bY*X@Hc1OnkNH5-Eg17u{Q~NpD#D)C=i%gq
z8(`aDj-GuNIN=6o(!Rilbl#Z4&dB*>?$t`{nU=)*)KUrmVJDq1CzA2T5;&XepQLH<
zDC$r9N@X`Muy>CYsE>02KI<6yvBMQR=PV$TwTjU0kS?Sv8$jZ>EFzk|Rq$y_2FSiI
zg<~7yuwV!4W~F<f<;_lTJEV&}duPDj7&~+@QUUvxQ}~;`Z;Ku$QkxZXiR01zpmr<~
z2HuUwu+InBdzg6uGG;+Z-YC{P5(e=u6cjJ1W4rlxVsdCTY&<@SeYSqk=Cx7Kdtedv
zh5q2|#TQvWh%!XV7o(Bl2Jjr51$J2zP`G(G(ch;HWB!CQmf$P0ekNdIlLE%vXZ@@X
zEvQ;UAQBTVEZak|uv-T5E62dwqLqqf{ie*$N;-!*Lit4%m^iBnC6X0nm6aGrbzHy!
z(InD5vKB*hW6{+@3}H)Hhq%W{BF{7wv`;DH`UkYZNH!i+<2df;L+0ULrcGSRC1{p7
zkMjSPLhRWPPW!tuN)wVvw^|_<nEyrZY%{@$otoIvvWnOwq+p504-)jO5#UKOY4ckH
zdWYT+PrXL0oDvDHhgGN~h4EfWrQp4{j0S&CN1J;rlQcDkI?US!-cN>tnSwL%ta>H5
zF#05VFB?V_BaT3re-Stfqp9%bK01Fo%ahG2N3ZBR#DjSr?k-||r9ElPt6^Zui%Y`6
z@;n+eq!FH9Xke_a^%(HF4mC6Wrg1EkZuia*jc(2*&^!zb-tqC-z$7$s%_f%(7hqX_
zDp*vVXAFiQm^;oDE54k@D%Bdi`(PI1nLME;)D69w+?hZ6HMNVnfNejja8h;}wtJcq
zh4I;-`R*!FCnaF1R}Qk#0EN5>7%LlzqTey3r1X!VBiI=(7dW7%Qvw9?*5XEb0S||B
zC=yea1-eI~Ha?)d=v>^mzY0wLsiIfb9>j>yWtcQD1`DFhK^nP=guST()fFNPN>c&R
zIv*%FzY7*y97K7CHFPqc)Yh%*vE)P<G;W9^|Hiug|J_0?ng1gF*%0{6zYftCr{k$j
z-q`YkgMqatXh%&dZDTz(yzV)Cqn4|*{L%$<PD-Y(o^Ppku{lb0j+5Hm%#$@`i>Xb1
zzaVhiezf-UV!yW<<lC#DWR@ZJN!F6(k~}E#D5nY+<3WD{^Ty9!2o9D^e<&P^yW7oC
z)^L;tZOWmx*B$T^J4YPz9ursAshAsb4$>o1P;%oXSF*Df&0IH-^q&bh_vlqDjOCzU
zVHLGlnh!D~6M=qqK8l_0Q^`mz2+Mgxdq1#zd8`4IPdm&NXQqP4#f5WMs2~Mbx=C|;
zGZ@{wNmZB1n1{v@G;*rYU2#4<aXyWoE@gt*hA2~zuZPa?;nc=Wft>U!#Je6l(Ksv>
z!$zyXn7SCOO)o;>)=xyL%+B136ZB)a1=g*;gla8C%#*1F7OD9(>>=|Z%hy4ScPyx)
zknTDqVtVEuD&Ct$76~t4_pKCE|DPvry`IKcaN4wPniXTg25{ah`-rG4-!xq(6wB9N
zp=vHFXfk>c$hKc1E;?7xu04qMY3f4boG@^FEC9_5E70^-HQMZ345HGU>id#9@Q=&H
z?uD_S2gc|*fpM4rm|)Z63wWqG6IZinmGJdmSU$`c4X0M2g3<qRbgpqVre7c52q6h+
zLd=#Vgd`pIwGcuQLP$agAta}e?9xd^=`599DxK6;slD%O(e5B2#KeS#h7dwSNW!!J
z&%1dspU(_+-}iN`_4|FllzZwujjfrKkEed5!G^<w@zt^f9G0enfqt2o(vJ9+?q5E1
zHlQpa9DHyRKMafn9UhE?MBzT)1hD?pp`7cu4UP7%g|=)I>SI6goMduwz<TH~S;#~K
zCRi;i?SbVJjcK;ojmx8_@s8X5SyyTaG}N~<la2t0RVznn<Wr?x&Ls$VYJ*aTS>QVO
zu|#+N2B?}}%wG4<#ZQ3+ka5Wgn;&rAH8_T5Z|Zcu-^iOPLxoE73S35cv#S=w;JV=t
z->bFJq;@5jtsB61%}qs{=R@IG{7H0hP304P7NPgQiTtO<0*w4Jmbc6q21}p}SH3R<
z?J)s3^v5P_3EBz8bK=>37Zp4n9YlAkne3OxEOcLSfW0~rgR%@WNqvex>RO)SS0&Z(
zG%Fo#bMJGn4;3hy?ZvD9_>0eeXMoQxkfZYaLcUfx4Ap1N2Rs%@IrIq7I<5`(V<ut9
zGK7&&3c+e=7>c_#u-+l5D32S*tZtpb!N(RMjLv7e&)>2JjaVi%+9_%E8vxSo>P-Kb
z6#kvE2gT!q$mubX|GWMy`h_pT;?c(`6Zw<74sr#z$*Gh*c`1b6KY=E3Q=sbbP}ta-
z22!hA61_<(=v$uxt@*X=&^6+B{VazY`c?Q3b!&2lA%v7H<NB#)*rnAA6jo)ZPHs)7
z^ZWU!VXIJ-Gu*PPDHFV>MZwvm6L_O+1~v`1gP1X^F?||kwx8-l{6H}_Q3t-`_;c<)
z_Ah>?do0Q>=u6TY7s2Q+&iK6J1m)9cmlaIBo>g{~8NVb*6I-}D(}Y24#ncP#|NmUc
zf7e)Gc*<BXcAXE;H_E_#OfVRIm$01=D^b^QAZ5RH(C_as@)IZ}z5G)_TGtC4&X%x(
z?kcnyS;o9Q(r8}bBV6lgh!q!)<GhHzh`YXWll*Dq80zBtG~e<H+i|G3Gm^XRn$O<-
z%)#cRw9lq~OVhUjuq8GR;v4qjlvv7vl@@?grUw(1-4Rr$#gfjnZkUid232vM!UOj+
z=<$lOJ{=v(>?B9LGd%(IxA=koTjH3t-eN264#5%GL-E(GOl%F!!H_@^*F6x%Dn5k6
z+oEeUlbOt17A*rfp9nKvOX0_l`REpOiM>_~z@|H=vHA0FuA6lgx?Xm}?ym}<?&Jop
zb0Q66#uj3e%T8!hsj-`1FM_517WA504jnx?n0(WODT?DL?r92<PMhG9r8agCSAgri
za7o?A(GU_lkJmX2gVFEOapRZ1nCF$j>;BGVwl<WRil@Bdt)sBvBYBMj4zl$A<{<mv
z4yN@R&~^DYZk;(y;?m;*H(Mowq?=2@e8@~(aEyL;xe9g+tU^V-7(50V!@;ws@UQa&
zaM0EqY&S8-5DR@2T}u&qrBj|VLQB%(;>i16t494o>dzGw!%pf<Scmq&4=D-o!o&&3
zUkt;O9_z3%p^eAv`G<`!sRk^gdum}WovE*JCGk;Z)hnRp<rU^$`hpc7(S@nH6VdpV
zHK=l@OB_;OOIp?&fu(f^(^Gjs^`>6fMHvi_Wdi2$AjsQt45}{cfl=K`?mh7mx6=_|
z*I42_?93H9L?fUJZt;~LO7YcQ;>O5A&}8fhi4pON9N(XV86i&Cbk$q9t#rbwhqswq
zQxczX0b%i{Wf-TH3dV!-*j4Q^Fd0RC{>dL$Q{Eq-QcISsyOo12zso_^=Lm$|9fen8
zRroNV3a#<B@OSTQ)O|ob{6`yvS5t_~*KEPPb_ZZy_%q(5n2S!?x?F2yI#|DYXXV~U
zmla<@w)^yKY<g@8f3MhyENeXKDwgxQJ~x;_x;?~?-hw0AE~0TkKVne?fz14l&|jZp
ze846AvR(&gP_O0Yu5cK{O2M1<Fj^|$-lK?d{xpy2?k(YOpVK?!g&tpTw;#KDB*2t;
zm$9~W0_<{*L#bw^Rn#*97HbfD?5-ejc;3oe?#;n~?)3NLQOUji4Z$IA1XtMp;;`W;
zNM+{)kMHMsmfK9~Iv<9Wsfpk)Mh#_&!A$(_Z;ATKFf{6nWqAR?pqpDvtidJNP%@MH
zjDIUVweAW{2dvo06=_gq5rZ)np-kknUOBsU8D)hgaYy|^W?lST+2|R|3u_V};XhM!
z=uPg27e&mnd=#wfJr3(f(;d<70I}yqaAC^?<UhiQGcaAU?%!Of=)D!4b?Cf=c7jVI
z@y$CgfX>A%SX@*9wo`WUyfGClXM`VU(5&y(sSr4@X%*@o-Nl{ye<b#b8`PV};L3W+
zPP`4rmKr_eow_t9eI-~=S;fs>Uj!FRb!h6O?!W&e6kC2{Zxc-L<<TN+Pb81*RJw09
zM&pdRl<RGO$h$@l0sU1G;QlxS)A9#nTUIf1J16C~;q|P!CLJ0kb0#^GLOjZ&tT=iU
zly%#M>_|8^jQGX_S}UOG&VS5Ix0yHV{?7-S%07CL53(g4zK)=o_`*Ozw!E7Fol>kB
zBBj|$8p!SY^R`=WCBFI@n0eSA9HPb0QKx{RJtpEl$l+QeQsKP4fd78ph<`>GV3wu@
z6W<};g!v_?9&j97H`j7`bv2W1@Z=3QTY1B7cX%|`5?!-ISUdg<SV!v$X=!JmbLl#C
znIFjKT@InrtQ5XZ9E;OVoF|uTF<19hq5EQ2tRC7MTW$UEuN&i0J~K^-yjub_ld3_w
zG+9|+J06-g91_MUGf=&Ocp%*`f_ln$cxaRgYd3kLPenYNM;UW(cU!Pj^kFaFP$z-z
z*`mKD3no5Bpz?GOq$l*qk4sE?k4(5l3=;=~`5?P|0@}|^0PFRGC2y9{yJ)8#o)nU?
zRyG6Lst=R1^$55$`{A(Ma7^+ahB>C>#rb%S*Y4ef4jZPzwDCIVbT1Y>UxlFS&XvOS
z!xcEr@C+L3orbJG5?RP*nw@2TmN>b@f^pGPzEh@y4$H1f3}5v|Z=an1KR>GW#&VCJ
ziSR5#6J_6zvf~&1$dOF%ix<W);@u^zh_8mQJ_eX)5e!nvd}a2acvMWA1g@VuB`v0z
zZ0NSD)Oj%kcX1CcI~2?e3o=0AP4C0I)H@5CL^GO$+`6YBQ(3li{l(evY+)6O^ee&P
zN}Qy{_8Jt{MS%3iDlYG%3pNXNpmx_e?vS63br-yt_V}^jnR8e2KWBNkst=faxWbz3
zj&NzRjZ$Ipo2hQzVM)~jTX%qFAL)6}F>OD5`EMQ?*-Qg#-BHZR>>wmgyau@&@=<MX
z2Iy9j!!puQkY4%3W>u}kkm%#!7H}Asd{4!j6`QfQZXo{pPXAtBF4ML9N;BifJkKr;
zx2()WY292wrcL>mPD3Uwy2EYGmY`^Nk-{)nfxGUcL)OFL)DJ&HIj(eA^|un=G+e`B
z*SBKF$W-RPc@l=cL1@fa4PIsg(WB-c-l5&d?JpLx5;I3^ZrB35$5dfvb3V!}`b(1P
zV_4IV)2wYncd&Jx%T0v6+;S$pBlrJg^#NXR|G{}^3_8!9vPbY4GpniBy^+`LccmV}
zNH$ihl%9V*ZvDZDe=r+He3{)a_**7Up$vW0#3+yt$dEKIj$$juWnpB^9j>!Ti2+CJ
zS?!IpEU8xpU}ZAe4(*SH>9nUB5RTRscdhI{USe9gf<*h%ag+|wQ)(?EHsDn9v1iA?
zs|8_@Ejxr^4^N_Qv^nfN5JfP|QTUr?$-7@KM)$c=rroXvUGtu?g*t0+-i~mrYpdh>
zx#v)=mcFHzzYAU4GQn~|2lvYlM!hcrv@cf!H^ct$#egzXbLT)5Ju@}kl9^{0g8l6*
zJYL07bZ<U0-zo>iKp8Wl^Q3LE15BnPmOLeb_VM9h(x3;6hQ*@8!er3kAzZ6d3LSfY
zv&>7AvA+B{7u8D@mV-Q@wJ@H!Zp(lq-NW!jEgD@ryYoicgVelv$i(fd;PKA^=ySLm
zNVTmc(&>xY!G}KBp*Y5EXRQRgRaxNgn2ODX971xpKu3He4ySCb^^{&*G-Me2=h}I+
zp7Wpb*o;c*XD^cILooDztA{SN<j0|%hhA1NhAYp5uIGA|G_g0-_kPB;7MucG<#Es*
znG5!JM?t`r^K>7ed!bbcZ}O;sM#_I{-9_lwdY;=3H0CCeS={B^PH>xk6l`zU!;rct
z{Ps|VK3>EZ{5ujRmH5%jIv5*gm7!<a6~R&eF{^w@fA3mqnC>0Grp>R!cE7oh5Z)h(
zmLA5e_b0&XWIiSg(T2C*DR1)kdg`v^Vv*G;e7j~miegV$Wjcg`SbrY6PM9jR>K}p-
z%_9ExW)ZZxoPl>wH&Et@z7<9jd7YRtL8saTPpwmu=^l~jluQc}+uJ<ct_U<u<FNCW
z9JJ>mBzeaI@0Ws)$DM=r=u~cOvJ(Ov1G&jRZv^*{esEw(9!Op%gSJi@&!b(z3`cb|
z=n6uU&k8}*ky3i~TQ4*?I13W#xoDX<2vXXYqP)`(cCjI7G+`fzW48+1hbUliO))gy
zw!w{OiqTHF1+{NbW^nIYrv0)4^$xc4sp>JP(t9YGl$C*7r&OUGoB`>fp2Eh3XV9*9
zE(ZK6gQWOeO!?Cg=hdWu>&YkFv#1Q*av!kI>h$d1Gy!8}FxX5d_My`g<(+jai7o2R
zuKy}P?}3fHZc;l-8f1!>&acFd#jBuc?JLQ-QAVhnVZe=^tcFoNucBo;@#mcNxcjOY
z_L5biw#k2d7CDamj*P~|v;IJhn2DHpftYq5mol)E@)ny^Y|q>eO~gL2{u*XwI9Z0O
zuft)u^F{QI9Sm)sjX?8=KV@gs!0zrzv>n}t*=Xy6?5indPW+Vxd2X1K90KYUd!gog
z7IqlTX5##j%>AEemXksIqL=qr`0xzgw4UxzLG3&#C6;SEp9^)lzO)BV#P*(3;7E26
z>eXH1GY=^+jdnYRu9xtSwSH*O6o!r0iy(0;xe=`1^1qs*(R6VxTow1l8Po?>Jt>f=
z*>1<={ZomfLC;=~L(n(Cn4Ik!;j-0ElpTA|+$>f@gD<jyd&Kx)t`hZjTcNx)fU9P2
z;N91iW24J@ICg&;=B-pm>#z&#{(VFI=sJ|XfrY$Y_W=u|S!}KOAlOqY!=zKE(O}6W
zI8a;)2Ip;{D`5hdw^5f;b=ay`bvT}+oIvD)8BibL1;I-xPhoCs)!Mflo(u`WCi2i#
z{0v6Vd&Es%b(}oG{V?V887wYRg8s>K=#jAntOveOiU&)C1iiVe<0^x&%5qd25eBPe
z0ciXzhkFE2FGKGbkDS)b7286=Ilc@MMw|e5^Eu4>bUJJFKF$~HIz*nEJ*=a50ycT)
z(?0XNvQJSCn(X_@bPL9V4|Of?SO<Xjww~k-z6|@0UE-Ai`sgu602|pEUaMvSPTo;$
z%jm&~+liGkryp7#Pv*v8iP__Pv9Y^97gPVj-NFLQ-uW@b@IvrzHf16GJut}46cuB2
zdH;=x=(Q~u<R@!|NGpBTk^hRhMg&NlbpHg0k%8FvVlOm`-^6?jf*~oQgsI<31E)LV
zG2!$P4A9F2)1x#;$>K`K&3hnJOtXsj?|DMr0SHgo2q``#XdIox6@6sPq$igo_?%(^
z`WM-I*A%q;x|i=eeg<l^rgJylIJP%oAx?adfjM3Da~oKVrs6Y@Ug-!oX(pERwI}MS
z1@qAM5)cV16)`34+*+2bw2W0l`zht%S^u4Vq8><<?+~yzSApu-S4r@4DTXH;g1AWn
z)W4kq2VN$FXxkKt%XTTou=6ZSR>}q!t;J5CXc#Zm#-NtzD0TO=^4<{2=WdFoj=LRb
z6c;kBmv-RdSpbTy^B{Y#59TaTf&6R-Yq{PRq_d)-Ze2Eiev|s986(jpLdx7vXS25(
z0w7`VQS!7+<;Gh*=zcJUMSctsHs4x=ChJGT^Ue&AR?UILdnZvl!4gW;^wDm>37k-<
zht_-#_g<vSRjd8b`rB=edd)n(DGg<}HcA}t%m>F|#azC+0EcHR#d|boYuJ&GW~Y}x
z$N3J{;Yf4GTZ6%LZWXFIN3&j=bKt<NY}8zog$eidxb|PEaMho__ul%LGFA#*x_jVG
zupc^<3c@h`i=eh#5B41z3u{j8MAyvIFlgr(%Gpc?#knmY(_h7BtaL}MyJ^s#wh8WM
zO~-&6)=-81ILqGx>x66I82^NcoHZ@8^2fr{j~DPmRWND~oC>4k^3gVh_|ezr3J*Og
z8@j5Lc^oa^iupaE)@&>+dFPFfs}k`c<*llwox+d5RakKivHA5%tX*~<<<mz)?cP72
zBSo8=?O@n>yE~5ep{~lTgI0=y5K#Mf2!zb@U;%gXxk;>wKm2b6PPtx&v+nN2Cf89s
zbK_8y?b^gT&xXO4Z3%!8$H8cMATv8N6r@jQN)kTmuv!~;&>vg?o@=i$jZvFHL&E?x
zSBAiUe~rbu&8C=;Hh{Tb8V7~b$yKs_FX)cFz}+V}Q!h^&Y!CbiCbAqs_F#qJ^!qZ@
z%svLo?qq{EvxQqNCsAb54JKL{p~h$_ZxB6Y-`!pDpu+}&IcM`EEt<71jFOZrsf3EB
zr@$d}A6_260Y?TF!TbB+<iyEAjq4Wh!8{U9P2eD3eM+MCcm%xD)j^MKvBbLE&0Mc^
zgJr!-$#c7$Tl1T|q$~iM%TICh{g<FFZ2)*H<&d%^8vja}jbB&T<2cIQX8aq9eG|8!
z*;HNBeG>t;VF4g*i)60TkFk)a&zQ@~dU9LYK^+^)4RW9GhMF{ffw(T>frgORUP~Fo
zJ<t#gP?e|$_2eDV8ak1`DG9}{<0;t3>mZu1pNaF{M_@WR6|Kj~pf1Y~^}fI5O-C)b
zPX*1DeQmJIxCo{CAC!%{=Xp0z2aI!x1B-X%*g)<~hqM0)k#lJuTXIvPyYM?V`8f{(
z5wue-)7fn?svp`5F5Rkl$jepC`qiH-9Yf)2Ps)kRtK+^C0w9FWtk%D?B}N^Gcvi$9
zY`Q6ttR9q!`v3cLT13I`9umxRn}_aG#(_c-%*;Y>F)f2F5PHH0L+*O;#x3Pce`zs(
zxonB`YkgtK@)(pGxB{lfvoAgK5f}W!s-DI{(VJap-!TXBlB4+Z%?nXH(@vrmnhNRD
z{BhUyzT`$};-WWJg1@T__Bs9Lw=WDuojB^pSEu3QTZy0`ze{+rH>`IWh**-2{|q)j
z(RwE|PwkFQ!N+;aXkD0bjGo6KH~7up6?h=U3Vm-Cp=VH;V0bqRJU14?tPiH>*gKhd
zY}0_ReNLj!xnyWkb7G=aBS?ZNc;$~w6ubh#J2HoBY)N5xpA5nEY69$fJqh1$3CA-^
zXX;l(u(-%75ZT|c_-Q-?k39#}Se=W@Z#tqVF<&rAo(eNwsbfvQmC&?HgSQQ3yy4vd
z>SI`dbp9Ifo>0PEUig97Ww+$%uwy6}jJRvp4)9bLbC0DvQBmv0O~bOlY;XyW3>wb!
zQWinL&q!A6eFTf+H`DxTJ8OQGN1U4wN&U-{yk*f+-sk*!6e;Pz-Ey3z=+W*eDvXQ9
z?os}#yN0QO#E*(!1}^?ykhQM>inZh6-Y*$y7IeqN<(D9G(GkA5Fcn&qNi6)sFv=<X
z<OTJW_|ttp8i<Jvd-^o@F)ZOp!vguF+m!tqbq++|=5vkDQtoku7)3*);m-g%FZcPz
zU8W|0#kL?kg>sCH(Id9oNiLtA2@URkC_1O2<;7*NUhr73H3R-8b`QoFpMn{i{b82o
zNxa#y0Ph-`V#ce9sOqLC{JU3%fioF4^{{6F1C-n`_ysqaO>PlKf3T!%Uc>V<+~l1f
zxd#%!zUv0F)tL<jbGySm^DuOZ+r+KMjAwaw*Wm>6t;S5!gxz^0vW(FJduL+4-cebZ
zigR#AwE$W#f?)9Pv$)UI7Oc;Ik*Ma-UYSDB=dY~Ciq)CW;lCQkx~xXM{rTKzwkBAY
z36kgA$O)9~hxtQ`pqX}<YP;8h$k0&Xa=sfzY7OH?oBP6cX$5S{M655l3JIbR))Bvz
zi&=kRk~Z}c{YElrr>W%f-%`{YG6j_My~`h^LW{#jD5lwcoo@{It|C7Dx*I~1sz1DP
zCg*a8C#JuM1=r^#65p@UC~6!ek^XunsJ-#Q@lOU&m*p$BHd+CCb=l1O?h|G{{R)WV
z?HHPwg7~77&?FcL>YvW>y!2j>zvK#dcV_XJ3{TLMpG2*Qo)G<gC*~aPhx;reApBJ_
zWDOh6Je(i0W4$I|()Cz$$qEC-+s#aPw+m~!SF(L2V*#f}Gd&Lra7-@5!w`X2jJKld
zWt||dTQ13%KxeJocrLYw#W_!A<CXwBRCkIc7E3BT`c;Xemj$Es<+n=p+v=bxU4!D=
zHo}e$dvuwagNn(%e8#aWv}-zs2BU)^Vb^3P9+1QN^Z;~S*utcxUs%3b0x@kmg~<GT
z<~Yp(b$c}P!#fkGdt}Y#)%t-}pD+xy?}2Tm`Y``{2-cnmWolJDDO>!ASB0sd#xRUr
z*5CO37t}eIkt^blG_aaX*<Inf)#JCx;IYFN^;tenSw{ZUy~m)<>L_b4*~v0WFJi!S
zn(frQ=4#QEJk&A@9$2U0ZP_&Jc<Bc|&V#`4&2U`u`50<UDPntd=HScQ3oyZ}nSJXo
zLDMQ3$PUewtjgSgI%RSQin$D0I`^4(;dl_%Mnm)Ri@dgXcMz$&EBm=^M9-xQpnm5R
zV&ThJ)56nuJvte?8mB{RMjX_IOk#G%Wl$4y9-b3Rpmk(4L>UvCpz;IL{~n75u|eQJ
zmDqbJaWG}-pD2_1W9wnsZ=8L}e9Nlf%YrR<YFHUexiSHNXL#eM?WHL7^;houIvtWO
zSc2!F98|Y`#7!oi7NU1f#@@5Uc>HQQ#!UXg=hhEIha<$^zifa8y7cZ*t7ER(b6L)W
zbNFMEIp&X#qCMgk6lo8a)SpR1hozOUwBS5udPL)4-K&tk0Kw_#CT6z97{(6Bz)L-c
z;MGCbFv%^2X-ruR_a-XopCh^Be+u@hsR&zccLV36N?4J&2t60c_|4N5*wpuc;FCx*
z@%@H^x3Yk}TpW#xx*@PJ!Ve={GcY9NK1*6lY_Pgy(0j5Cq(RHM>-`DbBy>4%%Rb4p
z#(t4_Hog_20)o(Miz}+Wg@79z!~64kW0p|`uA&*gcKvC7^N&BNOKl3zmMJmgwgT<C
zoq}b%RjA&75y<y@g6E}r-otb*s-H_{q7nL5ZQ?y3N@U6{i;AGwDF|KV+o2^SmbC=U
zfWnq)=r|C_R4YzOg1&H&?ROI-;e{Yh>yH^PhhWHFPR~$@vZ+-YB0S<zOj*1Ws=2u8
zoC0_38HvYJPU0#55b|#n3wP#5V*2(I?8viJY#1!zo~@j>oH2)SbS|&@J|5&!1<fLk
zLK}5a(hJTq`Layjdg%&$K1)5ikV~vf<PXN;zgc^MD>!aE3kFZNle2v$Hq?(~4NLce
z{Q4?zUr!yu{7uYBYZPCgro!TPyWv38GE9FGjAkpBLbcln9A7|rirmXk`ZEN<+W<po
zwhzl!gT}HIVB+S>+_sf-lck4<C-H_Ah6g~zs_AGuc^D*I`oTulP-b*UEtk#I=Ylwy
zvg*DN@T5C_tGSHwI&Elk$b`hOOW3w^CX70xiy!(c;Y>C4I|B32Iynp$oIi%Ab_8SA
z=VK5@8FKBS-tgmC1j<T#vmJ-epstfG+PL?Dx~ewj-gc0;B@-*)RXT^u3o&cyQt0YN
z_w}H~(B}7+%eH1P>CC$l%Z_H2HPxAXwZx?Iv<Lf1Mwm2p8@gW~0h0g9u`9Yi>inp{
zEbRci^P><N(u{b56T%0BU8s7O0&!gh@VHh^$5vP7(9MMAQ^owM`$Uu$HwjHg<e-^d
zNgP@)Nz<lJY+rdl@SKuL9OB+=Uwsjne7?=MjNr5f+XP4*<a)!aoXaQS7pn;DIh(lF
zj~9XYuZ!3=kPGr@i<z~4G}j*A3v~CZqvfof+`i96@G#m!Jnt~RvOFGF`uIchjghGR
zpb)p0$HCcA;i#bZo%*^Ix+iGzG2asK{mwIZw%ibPh)vlbq5j3GM&8&;ZUUDwgae^W
z+_zO2!nUBkhZLr+r=9xKWMzYSHptd%gY?RMiRs|J7<teL5>NKP9<>p;_*NeL^sGi4
zGaHQioMyEPHqh>Sf#j@3F}@WF&@6llMD~m0CVL-qf#wTSBdI^?T!}8dhk=G&7F#lD
zH~smtg{ntSxVDQP=<!ybaPI-L{~5&`Cn(T8r-moAm%?E+>fYMt5_8&*7apmGq@(K4
z+7v+DLv`+X-wIW(OC(8u0$dGUi2YutkRKILe=nWYa*a^7coA54-VwU?9;S%N9Nz0~
zCiJ76zchU@Q(X!~jj{qJo@*kpj<OJT`5?CITw{sm{qdox5}eM3;8FFBXnp9t5Mr6c
z!xQr1*t4nFU84;1bhSXX0|mvLIOtfW4K7#cA-__>YEAc%=5R8m$o#;1^9%|T(YNZp
zGiJFKfo^tBaIv;SjRg_pKsUjP;AF@?N?lrFwhe!rh0Qk)z(G@YeEMku@eP8Y<-u`o
zOgqb(&dYpmFJlz7`$;6LCSi-i74l)FF>$}+61Vg}LDplqq*k9gU%&fd!;StBIp813
z4v0fZzhb!5I|s$rqVRccGRhqv^CmBC(9>NBK3g);>xLn^?zkn{u1RyZE>}3*IvZ<W
z69?603KJbz2_Hn0&{ONYP_sb|t+nXxq}WZKTN}{Sh=<5p7j86m0}s7ZMQkwg+n8%H
z+o2*{+?<O0?#RgtnGG+t6{D(q7;9{?2lKhziLH{(HUy2p^lOp)&Y|<DKFx|%g<E2k
z<`d>v5C}@Uz3A2$$&E$~0gW%QxY5=W{RTv$^Ljrhd6<X;E#$b`*#;j|Pr=jsA+H}x
z_ssLe=Sm%iHa#=p`N|TsewNFl@6itB-w58eG8GaO=9s0Q0`*%@vHs5XnAcIromvi{
z=P%+LbQ=q8M_;p+36x9vj3Dp2E)>N_qguZiU|y36qHGnMIC2JW95{>LPn<xr%3k2M
zXc-#K`@qw}&VYAFI@dGX1ifQ6qRl){D3Jw%!R9#T!?nSKxH1~{fgrP5jJlnxFm|FZ
z8gFjku?laLs}uLEsg^gLv=`hz+~Z>c7h%W!|F|P%p6th#gWRn@?P_i@T|YUbZ;C}*
zk4en<b`%H$!$D*6KA893fV^B+nNMF^h|!OMnlo$pGM5V2m3IYQWyO-N!3)9Bwg~TU
zB+rcZ8*%_o$8ZmUhaB9E3hg(%&g3JLb|{JArVhGJO4flPpcPOin4U1lb*D-}bb1RI
zKA1>6bQP<ND#fi|7GaNfVvMZogS%cI!F^%>Qc0i;3<mgsdt@8477Kz*m4=QJbwKaj
z8n7H22-SxJiMR8es|~2<9<zxf5OqnZ%B&aK=azGlOR<A{HsS0~eqiW)0wA=UUA0$0
z)2JvGwBa(SGOSqB_AK@g!l3n&DQrAi0xnZ^!JsJzG@c4DGMTuQ-7mwApi$V_#>weC
zhntFnApMm-j6Zl5?G9hW?xX0v{qP5GPxeQzGV-o&P%xXvpJ>;1i>p??5&CEk#{;f3
zBTJ~{9!}qRcijX~dshx`0}|2PKM;%W-eOG?2S7h5WqFs+1M^YDG8nP|=2{k@s5HjX
z@FTfNW)T;!ZGfboCMV{~RjxI*lyZRUB|g;sGU_)Cp4_d#Q(pq$t$R7>28$u4d=~EM
z?uwq*olth)OVa6PiIE>ZOUzc@=Asc#gu-DL!M=YA>k#jN@Sq}ar^Qz~hC@*Ru{kQv
zp#4EPd1U)AMP(2i=qm%Ko)cK(PeeE34a5aiVcjra-md8h-TPEvo1Y<Qm1PQ%54*8y
zzj#bf>J1jN_3*-C#1O~T@HX`XhJ>oabIO^`h`$V?v}_^eIX%l26L@=86o0!S5j+Fm
z@ci^lQ2a#h|G5I(yxhU<ZWWzt9`QP7FY+p!V4_F%%wag=ho}01fhl#&tj_VID%wA{
z)7fvHGqmj9&ejE%L;d;$uDD^w+!qkfvj25S>tzGjW_bZKudPSDrIa1~kq@G=dhkc&
zQOufnm>>1B!@ZN%VexFr3n)$r_vVC9zq%(hP{%g$P64PSJNSUsa9n&p2m}9~hO$aG
zL7MpoXtkJf`@$9Q{!bH(US5tZr!S$`gD{l#O;CE8)^V+EZ`k*Pd-1}Sa&lq~7o>_9
ztK!??Tx;WZiK~t~bo`tLS|3j#D_@H#59!a^_cD)b&&8zbM0EHO1>r?rz+=2odsiS!
z5EyoNEe59>^zn)I1Yw;&$XAB4BYo(<`=ylEmJfg*I?=={>&w+Vukfa{8bSW2AMwP`
z@{&VoVCJ-*c41d=n`<ulaXg^I$B}9E^2WgtSr~rhC4WAKdh+kaGn;}o?tGqR8y6ZR
z%a0nPeaS6;v)6po9;X9Rv6I!T%vAiZ?+kP}v;cG;_97O`Pxjc4*dPhNxyGC%==l8)
z^ZJwltD|XVL*G0@<0RNOH<jy@gu?4e1!i6B!L^6ZBA${zjx?g3DjUI6u4}>j<56%3
zo6c(&{KfvGb9%@-eK0;N1@Q@MJnmVE_pQRA`)hNQD$SUpc{h*Eqdo58Ea>!-V9eJw
z;J(m-9XM-+Mqz0@5*WW<ACJH6eXw@B3kWARW5e>FOnbp0NI&KaYNDUKZe*@-a*R24
zg=mn!Sqcf8dqDUG$^|XAWtLaMFvTSU_v{Trht1DeRO4z4-*=Cz1*_wMiXx0MNe6$*
zj>!LTgq?2G=~+CEve^RLemw;h!*qFLo)om={-zw{C^R%mfh%JJP|q|SM8kaWY*#G$
zW~M@~lQQzd4a1q+0?|`D1Y5Jmf#>XZOq5R?zF&{&4(h?0mc3$@A3B+5ML*h|DOn47
zH;U#KV`pg|ICd4ng*hisZ_fuV%h7{GQzTdM5iI#H0XEL;i}mEqE52L>jkDLGr#J!}
z>D}U<7|e4It5EfZShG`pa542}{GE=HqsmHn$)eE7V=X8=)KR9#z+`+f>WDMpzwMXM
zd{l4TelQH|hj8AgNxe-Qf2cL?PMY9elqoP}o?Gb-o$oEQ1!dr{;xLFDdzB^5%S8X4
zSr}9rfPz&N3|d!7`FIT`(lb$<|0fXTF8#sS{Q|cf5DIPN9T9o?@V1+K!Drjw+*Q;K
zbiZam{;zUqza)djsRGz`MYE7eC!t+=l{-DnM!klmplshoxv?8erCtI7(W^kSQi{bn
z+o5KU1T1~la>u%B;G`tZw5UmFS*gP%b-pNyvA0Yz%LJu-D~9-6fltzWc>O2{JA1g}
zw#g-!Ft8_XB))XhlJSB@oFCY&mcs4}S@_sD1D5U-quFX=+1AH{T+g1FD6a{!p)qjh
zKqlrLt443PReab#w0mgvk=Pe=7E}L%N3Ix;<~D`cc=rK!s*D7OJt3g|?F*MXaS)BS
zu(EicfnDX9EFqr3D^&^ZKbU~(7uvYT5-B>RSJNK;ELi8x1UKT8czoRrSFTciuJa1>
zoLND4ZaM#CP5Hk|TcNf@g^3ALXzLXyBvtMK@w+MT_SQx88k~<VfxXG$zRvPV&m62?
zL1*!`aweW*#6F)*!Rft*;6|GuP+ZsM7TYS(V+wHtyWQaOJR=@5$ri%3P2o}KCF~XE
z4@u_Nxb}c#R+T_MFXjrC3)`46Wnf!rhni!T2l5-`lAB*9(cIsO&DfQS_OT+64vS_h
zT{Tb=O#a{np_2M>4ct;j&Xey6!chHu99e!5Lf4c)ds!jdKCKWI3~|Ml0|Uu*JBUe-
zi?Hiu7Po0!2ETqJfNQlJ3VZfL&#f_BG(Q(QK5szlvonaRG!l2+q@ADP9xib%0>e)$
zG4o3-X#W)rn&UmuF^TTh^>n71dl{UT&W0avXu$z<={$0kb$6VK>BTYdH6#mKqn*&J
zYAyNtkUgD7*_r<NIBeWp6p6D--0mg8sii7d9}t5k&#bsgu^ncdz6ziEr()mV#Od2T
z5U?Vj-?f~Gx*`XVTIX^hSp%hJCj^ILSH2}F9)|yU5+7vC(WR#wTuQ%y9XGzPq*2je
z`LvQ%ITvH=584I3EMQ-2kHb)6v-~;N5;eoGf$PtIBskj`+-zF7MS(6ltS*7>#piM2
zkvw7#5Gy&kJ093njt8ETgYaAlb`2Q<nH%!(#Y}T-&8OYX>m+7`6XAB!acp{WOA>RB
z0d}MJo}!S|23UagMYMYSAs5`&nDE2&+*>DZ=XIOLu(}y1g{tx@@EN`t0v`O$XADwD
zxA*{b`x3+4=7mA%w>)B6eBf#;9l^8zWq#z@Dq?jK2e<qWrYXOI@@w1CIOqX)**^tZ
z+EOs&v=ttr48V5R9GG<L9L{`^fk~ESAkveTHnmuB*{03V;YuveBgbK~g*Q%8o`(GC
zSIMn=MQNEF4lZ;)SLypJRm%@b(uSwN*K@hxmOT(g^i&{rtV5}n3Z&&<ng2*XbU$bT
zEm!t4)2$5McXzNR;W$@Mif7`)uUy1iEIlZDr13nNkDPdc&gNO5l~Kr??p|T4fTvdW
zfuYcK(;3{4ePDf`2cl@Nm~|<3!Sj+!V6rfdN3I>rcTu0Nt3UCx`b=iGhth0%X^_Ny
zjXz~A(;$55U^x6zOpISU@cbCcY}QhSzF`71u9*f&YWp$f1V>#ze;yE4$<>8U*44KP
zn~PF-ZQ@YK+qMI=eceEMZ=bTa;v9-3QbnC{HSex-2#x5dAb#+iw>5{bRCzqwUt`d=
z;=SNBbOsLyA-B+lx!6#n1nVUttcpV7gdKrNmmE>H{7=ezAvso_TB*$==H}4=G+8kV
zjI)x#;r<{9f0Ds>?8`>i;d!i}mm_6(k=Rj_n6x}x>HT^dbU4;Ct$|l1T0PGTj@jG5
zpIE)#C-o6_4}qdm>f@xOa(?kLX6BH4<kNi6y5`P`b>@R=O}->ySv1y>+uCRLBIwb)
zk-VeN`Qg-3P*~`Jc5W&JXnf(~dP}f6LZ0OwE6_UL7FCnO@!axd_*{0Geg=nFtfqj9
zW!w3egHp_&M|%mQOG<};-%LBI4;WPrgSkIXQ|5R*_-Gemh;lcGr!3???;JpLNisV3
zK95z`-g2YCQXKHp4Tp@}fU*=ne9~fwH(rNh*2i#+kl164c`+-IC4qJjWvE@00EHEx
z>KwpNS{2|1&GmTo-c*dfBEiVHE1;=)GqhA$leeHZ7{z~Oor|d}KggaZ^*Y1C#40q4
zAny4^D|X<@S#XXXiJ!U!q08`b@PxWhwS8$G_^F9$obCnn&-TH-{eF->Ae%S8I?fW_
zYI1pG2x@DbB{oujUNd|>w>DkD>k5x^XM>|?Yq%IYo@?@<#Jv{>ofDMD!qGD~6+Pb`
z0qtSLanSAoaYa%%{+j%t;c{@<r{bP7OnIbw3-_Efp6|=M%Gxav4(n9H_6KJ{nt6dI
z1kHt-%){Kooih5*pRv*8T(i+JB_0@}WjUSC-c}2p)7GKW)OFzguMK#f*bj|=O#x3k
z%CV;n!YpY9x-YQg4r$Tc=s_MEd;BWqUs!>!wiMy8c^cT7Qp?hPx5JxeZxj`12_i=S
zzo|6mJdws-m2U*c086koELF;khl1OaFzykjhQD@{LBFX}P<+G@I>%4IUK=jqC-Wls
zT(TL{mlN|oIR>S_!?BXyy{fcp%8r{epz6L29?YDH34Ls#?c8am*!qs`I=Bn>+ib_C
z3x?=i>4vfd3vi90j?>}-*7SIi<dH=oaW0#=`rVg&|Lcn=J#v9Ry>k*jRm(tQ<3*-w
zc9qx~dO?$_Oyc_Xk??Cn0&W?O*tBsKZ<CD$`@usXV99P&zR)Fi#R;D3ehzI9oCo=v
zM}kqCn1%cQ$Ma8L#rF1vSW~eZYon6D^JpwjP<hiV)QE@Y#-PjSh2Xj(Nzyq#0_q+F
zvT~1nEY(Otw|SAQ?fMMXk+hG658MUP4r58n??0I5U2;lhjNx^EorbzW#Hg{;Vyy}*
zux)gO2Zxd{XeRA(*UHiCK`{D6#zXTk^3=^w!J5<|ApP&Um3y~7@bX6yI*BKOagH;5
z_%jDA%gN*G+r|_3Cebc651bZzves`l;NtL=n;qVb4O@@#t8-VQ<?vzz$x`x9r=Z;a
zgV16)5o*Jnp+Rjcc$XZ4ZjOs_-$O5`n_~`bIWoauGC8zzc0=LIL~uJ80Pk0(qbhVW
zGyWxlz8*zzfBAV__4ZHd*bNYpE*s&>zJ;i!{hF`)PF$v#<>2H?-wf+WsKaPU>zIYu
z<`_-$Y)@?VqYUveCG{*%gX3CToU?)y0R22r`lC)bu_g+Oy@$Z+GU7m2&*HXbMxgcC
z7RI;~VBCFTvIIP00V>Wlx?h8?Qh(-<uLn`%dt*$$09@9iFWPDh=e2jsq4rZ7Ulk|C
zN8Zj@zYzKLOJivMe1`RZ5s!n6_TuLELs3rWuXgI+*<|>Dy(4h|FTGKQeCo?9Kd;2P
zEu~QUUo7fG=YY}6RWQbqoOh3XK%_ZR(2S^p^6lgV`apcFJ`2EUdLNj!YdF5nrn6if
zW!*xjF{fuvT<2~j)_kjG_a-S(v@w(Ajl7DrOVe1_E*%hR0^v3F9K}|h%;v;Zd_RRU
zQl@g4@F)-SeqO^7w|vpQGXWGey0pJ~%o@huW*&b$WX%Ukur%W|dOJwK=k!`gic5m5
z<<uLe8NBLF2)N5;Lc)19Y#p`$Me_a3Y+yP@?&^a*Svlea6A0O~j<-+fj+qN-v=Xe%
zTtdggo-re_vt5Q8k5TM9R!1VHOyu(sr^t&OV`ck`oR+iQLH$8Im#ue~*dOsl!;_w<
zwe37KDRbDL348GLu9*l`mAt9*2Y;)v6fK<<Y+H;AZZbQEp1u3?G<F<neOH02mXuo(
zSj@$)7j=k3U_!)lw4Lt5L_JSgdM3t^Cxp68zCT!-f;#n+kHYgu0(kUO!IOv7wKLcY
zFGFZ{Z5IJ@KRM>_jRBb?O5*zSnB?dnJ#<?T2t!Opkb5N&8+OMr;Q@y>y9q2HFoYS5
zJBu}I=5v!3#u&Q32#g~c=D)MS?O$i1xP2UqUZhL=l@&~0Jey_p9s=SdU)J8?$Fc^V
z#h8p|{N(O%%q&TUr?S(KXL6i4<*txk*9~R;vLWsYaV4Vi5&qYO-(-Mw`8T-huk);1
z#Cq(Cw84ox0b4d7<l$#N^VzR2L&ElBlzrF2?Pf91SX_o30|!Fe-?vzXwJt*PF$hn;
zg6<}3**s(F|Nh&`H$6a{?z{(gQI>IgX9XDdwE~gaMAmE@gXs-QHlbw{dKFE;fNjJs
zn3KiMst(}T%_q@%^bKKgRRsA)4}tpaBh1##6N_UxxXoLFk{czkORfS<nswGau8=%l
zO>?oxvl8_mnxH$>iFbEPg)DR085|xADg7^^-B_fq4t>iPSb=kE0TfEk;)mE&2w8E5
zUyKXD%o-i^P4fq-yoC15r&!Z#F-SdHgvbGlU`mS*mXaG!Y<U&ihD?P9<IUi8YCSHy
zX+*iw8DNv{2K&Zj;}J6tGz>lgqPr4!X*dn_Hq<g5uVHA={+hqN9SQvwF?{drKtHE&
zX4-2v>a7__{iH-IQRO|syL1Q`P5;U*yHi(n&jo(dCk9FsF;Mk$C@38IF#At0xO%t`
zb2{@U4AVaYGY0oYm$ADc;>Q?l-Mkt??(Bx*ia<D)??ykrc-|R6?#s6qSktV>f^oS3
zPElR#s!0s$DTjj3yYoyaEx;N-S7<x(g?pA&Ls!Q|F7IC<SbO?_;mahrvOfSrgUEk*
zH<Eb<#|xj`wb8n)npr1qfrO3Mz;kMl#CzXmZ0qwv;&^cvh;O8$^~-Xr4!<7ECZ;zA
z&pLsv=EOMUwLGCT4AwN3<Lu89vE%bal==2zl{Ny_XcaP>bYF1O(1X9bi2JhWI*T-j
zfh5@qh<vdU{BL9upM$ypw{75>CG8LO;#ixJz|>~0gRE{1%(H15%-bM^hywD<dXzG|
zoQde;lK{3m44@)49*oP=pvid*w^z?$I-Q}A_xL`;`&(g;kv=AvacEsg9)VX;+%4r7
zH@I{NTtc&8-##svZFCacJw9+F;$T`1OhwCa)c06aiIH7j1%-MaW?&b<EH{k<<Lppy
zcbN%ZVqa{%bP(NRDDN+nLH4>xl=L`+{@W{1c5Wq%;p9Bt5Qhmq(;?54*u8m!LGkDl
zf8Z!bkCY=IOIg5Ve%281%K>!P?P0TH;;^P&kJS}!WFhnm-!6N>&d1PRIJOXKcJ^Rh
z=hUGsEC;l0*T9S)XJCKIZan(W5bU;ME!NYyNj2iEa)xO?w4byTwC|I1s-qWJ&g{=b
z=O;*1$-9~R^rN7fr7LVcn2YcJBL3W~EH0X64rSMtq15%aVEqSi|Ar8c+W(Qn?O{Fh
zeRBbIcY1)+dv#Pbrzp)MvvJGFa&TDW0&jRVUeZ~INi_>#;T&U3+1VFWQ8p6SSwkcP
z9rH1=dq2vDPJz&W)lu|bu8@855%T`*0euJM;<z8@(8o{)4!mB#Zdp9XD<0RjA4mP)
zv1qzo4&yhg<ECpf(fzSKH``kRHp#irr8>)M3fDs6m7Z8yU`EV$0V6hF#m4)^Tvy$Y
zSy$>q!ssgIeJYRtHT4v_#M7P9o;a92u2|{&g<)BIBGybMwtK51?^>=29hDnd$MpiN
zdv=d?C76SJZKTAFJ~IKw`ooAZ9NY2(B_W!VAX1)yhkr;>WHuVw_a~!g)FSpOB>-0L
zrYupQrqbm5e74W&Z}#BPSybCO5mXOea{KpX?2miXv8Brt#Y^_E`K@JW^r;U9kY7iU
z|DIXD<alPXKgvhV1e{6kr%7-4AGAwb+>O57`j>D-`5!3CULz5^`l5#0GW1<6!`1g@
zq4aPY^G_`Y^FOHH=dcLi_606Gx`<bO9EEkW641qa5BzZFxIH=w!+rMP%^9XBN*fC4
zC7U7Dd;y-Id4K-5O0XOqfElrsc&BC>`s*)7*A0Qt_={X6;AhotZ3^u>qhVa)UhGTH
zulnbyID7C#bn)$uqRl@f4SmlcC2nBZCN*p}DaHOYuU6%agoY;)$T<)Qmg}z3?)EQ+
ze@_6*WpCLqwK81nq5L1eTX64_$i1l->8&{)1{Q0hY{NcmwkzbXe~!dP48r!A7ockC
z3I1;DP86E}k9^PzjK62Yxdq9n^^jxc&vU4IfqKZx7t#BgTyU$FLsUl;Xs>sI+9$f8
zu-VD${#}Iu$^z6lV-4y(X5)AL)98Pv3ibcW2I=y<+~B`S<n=QHmvzhW*9(UG-o$}%
zn?LWM9^v!UV7%;e5bHE1LX+=crvGm;Zgw6;uJ9FDmmCje3(Ro_^}qK=(60VUlF~kP
z0K9dGg{*-u*%se&Fo?a+JYp>%=f!c<b-u@q`k#bn4q}`nx5R&h3FtkGeBR?%@${7p
zdTNmOcltJ%lDiW(-?Boze~vTx=YRRB7a?ePfqZZ$1zy@wj<V-<-0f&AB#CM`?&}Gi
zN6$gT`1L3q)hwyi3}jis<esUnh9>W8pgDOZdi6;K^*grQ;YTR1SL{X0%W<q`+iv(|
z5)2s`(@@$z$jUgiJ1FJ}+}*4@I3B(T2Y$uC`nxAk?ZRs=UzRVC%1f2@qEar)qwjlS
zrI3*zM~&t4q0TP?2Iucb@7Z)F+OQQw&mKzjhW+FXSp(om(rLV?NI{3}olGGK;qn8{
z@MKm2deR;weUJ&|QG;O5x6x>7yBjZ03&DYuPu7_g2a&F4$w#>b^4=Fg&GuC=jre_a
z;v|XGYBlRs+YfziUgMH?7tz79N=SO(Ou40EiOtA;l!r}4+qHASz(5T0upI8G`yZ=6
zAORhzAEZpY3~(cnWnEN(mrfB@-3mpYQENcLmSGED3FAAjLCTi}*!EbPohk@|g!}ur
zgLQzg?8|BJzLQPfAzgMqy#jnH5PM}&r@gU;S<j=c(8hPHYWpB)x;72G4-I9@vWB2=
z=p3FpKznxoGRXy_9E?9wiUzO%vh5b&hnzDYeP1FZtv$t(ntZ|JP5=nUC^xi5ioZsZ
zb8$d0$U}*DTR#InrIz9@Vu)s}I*HiL6L%e*jQ2Iq!iT0_XyQQbyck{n>T3)+x|$?)
z-wW9%PXRR3gW*&UGptf8;uRXFVC;S38yWVdo_dVZQ$Gj8Hyr@^T>5^_8P7$BI|S=)
zc91Tah!y=xVIK8OwD%Nom(}U4iH&9n(T*TmI!;pDM)|@umArpTJT^r21+%IPEb@3a
zW->lUxO3bR+r-m(QsaDZ=<NWzP>iBGw4+vYhqKo%<EN%cxbH|3?@QclId2qpj`l>8
zoo*<%lM8M6>Zmomnul9nVh4TBP|v&@K0HuPnK=Os4+X%k6IbY-u}UGI{7CZri4`hl
z_T~=vV~GKIlQ}3q3WkfWpm&d3OzcoANSE{ktpWS7fzE18+m7;()@@(_Rv;UsE$n@)
zLX$oR!2ZoVuzqAA3<{-hNBiIGpEG6XmS+spO3QKH!wVQ*zXO`968P+AgHfyRZx-5f
zDjII&kW@Sot;O4g;hP2AxKRZ%n{#-W&ZP51a`J6!QP;+fiDztryl0QNgCz<h_a3L5
z;VJN$cMgII)}hGfZy}6&EY^1|gqsUe!0pRRE?)OiVzd7s47N2w|Nm^TeM|%^I#h%(
zRL&!x(|y{jKU!K)FJ)gFD`@k?c9cR*b1>iVssJMg`SHfu$1Ej251i?FQ7c;vxBqa*
zPTDc)=j4*x;UzJ~>8yOMg4c{X1G1MxF<+4iNhu-dadaR!X1RbO*pT{4Zg?l+GT7!_
z1@$@eSo(YNSxi~YeDX)aUmbq<)aN8PY9v72$GfaLArp7WmSffq^5;d3L-$8ZaNgTe
z;`(00SBVa|`;8d0_+f~=?*#oOU%~HRUGeFbRM=-sd4QS-Si7bIZ>)Dimta5U7F-36
zM*^6<=DtLG?@kb{T`ehEVS?^Ab9l~w&e*Z-U*6XHI2Ugo2`77(pmcaA+y0F10FJ2w
zc9}t33;m9y74YWDaZvesCrSq9VS~<eP>W6^m&P^fB9DVDiZYDki9*BCi>O=joNLz|
zfL>MuG576dOz0VbPGjdm9r@y&LLab{mQ*yhISr4+=OL-?DHC_s6=Z$)Nj`iHfvize
zQ2nD8SbwRaJP0wz$7Di#z6nUgMX=t8X48M3<gPt}Bn_p%nEIJCnD>nOdp9od${l5>
z5{Gl~0Q%l$%~z)Ok4M+pIuf;UeW3K|W^8MyVrF{RS>MD;OwZ!bY=EG<L?0#v({8Ua
ziC^DFp2>^<a@n4~5|hSEI3hK~MTW$zE>B|8_;lXB+Xd=3Ux3He=dgO40voIr^Tan1
zxL~gbcGwm&y}~$t^VV^w8!#SfJuKk&lQS4-JrN^)M{wCHZ)nkQgq*Jbqv+h@VocjO
zJ`$27gped5X%j-teZ?Sz5JIT56_QX%LKvMy2bE4bBc021P}6f?X*zC85?We9h;6O4
zEg`hL*ZbdoKK|O7>3Qz^x_-a!mwLt5PcZe(xo~1q9{#*G7H!vDWLYsaU~qLQ$bxiv
zPV8!Ib&ccFDLIvs_4lD$@;siITm|ciFJb?1DeZ0Bm~758&^ShZzo;o_s=W|ahnB#r
zS>zX7l??-4?m)=8fT<QoaMPA+X#0mXbPqbhvYLqd9=L+9vd%(DojaI(3lp2v&E=tg
ztmM*PN3aFwqH%gh0+3T0G}>-JO3WvYql&nzR|%FAA6zRe63ikKaY14j=&99#=J{gc
zcUi)~@_fwf9E;K$J1XCwkAp<hCf2bagJ-n&bBU?c+@zKG%=!yB{QVQTMagBD`Ui`>
zoX%2I>5SuAhFv=bLrZuOoTQywpN<EA<Cly*@rSteEGO*!poS^mqoHr|aV{Dq5yH1p
zUuDNN>^)%&R_hhu>}7!`IxF$Pj}Xvuzr@3EKAt-q0}tGD;lYmmD4kPR*^*NQRvR4o
z1UjFa9J?b<o2G@`r=M`^$PD~`jPB!dOR$n|V#hYrf<{I#w26tYX_tqRDf=L{bQCnr
z4B)b7iSWY06h(8Z%;X<+VJpqb7bnj^S)?~)JZ)xvs&{$jr%u*B{UkdzI}=A$C()eI
zl^y>^@3HawS@={>uqz$`+24tC6!OvBw>cL6+f{<%+w^&v$arVkI?(;aR?wJQ3okw^
zpka(3?%o>@V^b&#9!>o13@4V}zYm?}u0vbZfly{T7t0L7VAn`uE+-`rKjAU!7*fKc
za&|&U{u;CoJ&0HSFhS)c@<f*0<oD7Mb;eysr)Apg-GCGrGN>GN?@+fvmWBmKCHN?H
zH(Kr)06lZwaGRyEtXm})-dwDO+7%a|?-y5&iM6QN{EYhzS`N0#w9ltIR?v}Brj|Mq
z-nu1$kw+x9X3k*$_sx~=Nx}Xjb*$xZ4YraKt8+&^t4SJ#8DZKGs#njyz%nd98bEHD
z<6M!m6g+R95yxo}%OUtHquw~P(>LVZ#~hgHx~bs1X%!xuo(EmFI-r+MeYo~$eCw!$
zZMoe%G9niZ_)p;XV<bCiKwK<!ax~m;Ag0z3W+c1D-u~_eWsYUocFG$1l^*lvsl*A;
z9V_5#F_f?(&?}4u&mF;Vu0Ir<_hsS=aygp3|A%Ra<AkWRlhBpF9kl%4FwZ$zv?tk#
z)#3~2@wtqJ>+C@jZGSdDJQYR#0pcbyMTB+^pzcP(!n8}*q4<y!dKibWa04gku)T^M
zKT_Gx<F#mJI0cQ^Iac6CyOleid3%~F_^oPUCq!q_=E4Lnrt|VqrR}&@s~&S-bF|a5
zWHp;gP_O?ozHOTb8?IYZHgGEFu3XE)Fa4mr@j4KFTw`uD8{ko10r<Ti32i?FG*rEe
z;t|RCzWoer`9wLh0nfNZpx$oNL|ENG9gxM&V4zjWB}QpbvpO36*FEOmNd>63m-x4l
ztzu=$c!i35p~vx}*l|e;*ilZUJ6)G;s;$8D9=X(WY-5nW5*%p0Ec!l9o|hX1R#O6?
zTiXfa8#8gwU~=I6DBzOkvzS^RV202mw3V-bEyiA0s~-b)N$qUFB<ev8O2UIKVfcVD
zuFdv`>7A7hUC*j{sOL5OA`eHc8-uyuR=Trnh~b$Z61c{fBL36SXtWDZg9oRG`Ezo*
zur6aTwzOS`dFQW@CulP(`zwR)Bz~1qw<tIDA+=HxS}Q&|rVKT6v{4aKEOb7vVY*MU
zSk$eR%yNPN5a>?t+PiF`O#yZE^m*3<P3n?d;2X;WVcD{1-2PWBlq{+O$?(nm*W?RW
z@yZK(PMk;2gAat}v%!4qrf|@&G>6#29nh3f4t8qandyLu;C((HFmf@tNk*gYiOIa<
zi3Ga(4#R@C!_g+^meBXtYPMx|B51w4z%n-d#WHM0<1Yc!*Wc7GL>=H@ptBabvP6(H
za3KzG)uw&-T<R*SgFNXlbF$nECw2s4+2Jg_e6JCbt_6~-$(sDk5*B_-3(m|Kf!+T~
zQMV=t<j2qA&59^kZJ&#$R%}7#C6%yZNghPy1c=>6Re(YMJr+J*A15pqV5P$hEUlY~
z&HfqKe>ecblhv><@&%KupA5bWY1U+uAx^wXo>A*5p!-`dm+r2Ir?M<KCME8Zu9!O3
z{^G<;I$PvwRECYG{$Wfg^sEmNhu5iqXn%xwMUn#C^@H*ClqAToj>o@49kJ42I;z|=
zg^m@EneX-k<oSHV`o*-TsF)~-o)^RWoJ2@+xCoj*H}Hbn<Dfa@CJQWa#io^8dDE-M
z{I2;vl-5sy1=TduHFblp{WLTER711BLwIBAX|O4(L$zt%bbpNpC-Zcnd4CH}e{_`Q
zq=?}?QtS+@!PEA>Xfz}cUuOnFP|RS=_`DrWx?YO$Wi?CLIufj(&W8S%Q^2}~eBWjz
z%;4Tpp4mPX-%YH--64UnHjjLOegClU!IXRXdV$<%gP?WfE~Zd&<cd9ImE!RQ;GJEC
z{)ecKF+mNcRYss?1BWQRBw@F)6elE}K!c@+dFJ6Oyj^J?e$kzaU+2X^?}ljRH0P{f
zWGF&AZ7K7axCCtMZxP?N679<pdBN@$)+&t<*9@J6HdhU}{!wRGacVD`JJR#F_%tp(
zQwxdx2^eLn&bmrUu-hY$C5PD4+}(@!b*X~YkJ0?dxJ(2U`cCyjDQBfEt{AU}nJ%P@
z?O%)SeybqCN`WrKhOkLXgp`mNeEaDYmk9~t-sqv=YAgbE)pY3oy^GmBFoOx1rBL(I
z8f_LGVm2>rz$8C{^{n2&GD=SJ&H-!0iA!`qJ~an^KVE_@FJm#M#u(N1liRWWHkY^-
zi$z|w;QKxw6%lseDxCr!-zH&W;UKj8E0up==1cR2CT6|xA&dR21j_MOcq^*{{eOqU
z^p!QJH()-7Se~VsRsd@M-T;be{^D-CBs%Bho40u!M$N;YxYXo0>gbk1r|}!Hu1*vC
z^2Y>J4XnkWht=F}BXNC<wuy~jgh0pLvEZ}xEJy=BSBe(pi$6V1gB;a?>|kCsl4}Su
z4T{0Oppn_@ml11>_Oye{V0&gG&WI?$*4PO&3%Y<orOPP?zEEtsM-R%Modu`TROY<r
zCmguE1f8t`eI`bL_k{rLZgnJ{WC)ZIG;;r%DLACh4$Jn_J+vzybu~i81&+$-th5rH
zMu6C7(>>mLX(-5PZf1AvBMaR(6Kzh}Fs;SYAxCzfw=InVznXm1UiOe3KR61bqt{|i
zfd}&!7DG<e2(J5Q0F!!YiA4!#(fei)_7k_`)Wb<Qc;IxbFD+!+*KRX|8U5_$Hv!Qj
zgr&9Q!B~yc@Oxw^8cj>#ng4Bpf<+QAd=dgCp#>~QOBq`(s*~q1fcfb;aP0-{-1v?%
zzHpR6)?QVNEevC#eOW@z#>pU|^WE?%^DwcK^Pb^K)bDeoZVK`4wCI@@8Vk*_st~j4
z0EWibz|!aGpmru4LVB-&wgk~R_y+u$R*bUuhD_02%Kcv4<!|$4++^%UT+*={-|0qy
z+-?fxhi1XOy>#}EDHm&fc3?f3_n6afyP<xCG7foKg3a3ha;puc{QLf^cqqdQXW3;E
z5A7&uHWa|>bFp|~cpZ9n4FUsY1@SF7^fwQHrg;aU@3J0l-@XEWOwqvCZ3U=5`V6?g
zv&YU~9K@!j6_Bz&pT!O)o~pebKcAY5zkP8>XQwK-eu&No&SjNlaWN=OR|ulg6nXuk
z-<j^cZQ_6hbO-VG1%E9guv~tYbz7t{m@*I4GDWZ~HW)jWr$WB>dMtmE4{w$gfu`Xd
zZs=QwWwb*qSdfW5iz@l_PsEqZ_?acvOW0-aGPL}qgPW?5Tat}sWixW2)6g4?GfH7@
z$TbjtRKdw<8K}E!kD#7LPK&7<pk;(AiYED+`@K&>lXFKIuG0atS9C@nK9zZO$%%;|
zhf{mPF)3v#b~gl~-MutW{%IbVxCF!c=~GavD*-KUw(=hL6_hpI&IYf|1+_U!@P3~Z
z{_oqAEc-{St>FcOF9n06Fa+1e8>3rq8FT+V7o4(fp)AiEdMqPZda5lZu41fDS<Jqj
zbwRsuPpJ31N6&Xh)OfXvD?ZhTqenPX_C*Bxa|~h6l{M&oDgglVnOjyMi`wV}OM^=>
zu|~rEZ&HuzgbI)9nZb;deqkb6h&*Zq<#8x~v>`ADttS=|CoBmz5RbflPyj@|GUY-<
z1a!^HWj2pv+3!u+`1S9h7*G;Qj-N_l`-}6~wLSrI!cAe9b{a1FT!88C%+S`WmseO8
zqxGvLs58(T>%o^80_9L2W(d;XF2HUZM=VIU!5z~}D2H?vj;&jZK2>=bls_6oBg!j#
zj#~0~?b8rC(wzSp(}?ej0@1Z}0%$qdg4}H?7^+_akyNr!bJ#KLO}l|}>CD^lULW9%
zJ?&IyqqRS=qt1l0z94s4dd~vwhO1+}MH1NFm;zdBs^Bk+2-F?A9gR!}pwnUrF{!1L
zFUz40$6@iQF-hpT$csH0P>1F2rJ!Px273*NN!&afTl%O2s%SLxDK`iGF`f{6<UI59
zoW=$WOF*kETX2IB5H6h#qKUC$$z&;*yxGrxtGS4l73aW`yv`{qZqS*QD`<GU<h@lB
z@YjdeaQC5^C^zV5($pIKJ|z?FE>C3TCd8gTc!b4nIRh`JIAZ%#Lvl);p-y-KVAXW!
z9~=)DeV^AoOhva9m$>-AHPA6tfaJp&oas&bfud_bi7T<jqcCCioN8<>)nsmVXIWyX
z0krGNL8N79X74lvQa2vM|GZ~otrc;P9@#OqRs@6UYIs|f2o$AbZK4fe^07=Ty>?IN
ziJr=APUM4==pda@r}CUfG!s`2#h`b&5OJM&)KNiP<Lnbbueuz%v&MjvYJ=Fwv4VH4
zdd@oIesG=Z#MPy2RmNps@Vm2#93ttk;!Qn-JSiauRwmcY5`@UkSbUIj2HU&Ou*6wQ
zc}s>dYV=Knc<;T~ZD<BYCr3i?F$aA2t{Pr^sDZpwr%?IHCEoVV8zhx6Vw*GDSh!OO
z>#JYIB}>hO+JbDzc(#%AJ=rK*Qz2G-4H0a8E4lXh4bXLC0490Wfx<q}y#B5(Jn%b=
z-Eqik*M>lwdla};bg`(Xz09vE4LpAPmwzrGR@@Ha8+9*&QJGTo5nLcDSps$!Yq8bk
z6hE+nT%jKVn1Rnn7Cy5IQa+`_BV}@TEb|7xxj!?Hs~bT58+ir1BC%V)3UYo>|0N&<
z{+O^H|Gg7}M)P0s&Jok0d-Nd`Nk>v%OIxs;w+h~jS&ce7k|8oQ2^?DY;6mXLO5Xnv
zf0g}z_dg8A7nER|+5&oqp5X02&tmPXKC=Ez)!4gnCd6zHfk#s+;NF*EC|6p<{0qv#
zNScl7^s9)`NBQS@-tcNdGCDt?`PZvjp7Hokp{cZnE7!Wf+ap=L?hfVsn{Es3lgagd
z^*j`Kim}i2|9x}=)Cg1IDfTI#^6w$=-0n;Lle0X3e-$1T6R#^EjNAOWlh<!3CQj2L
zrc$vDOak)6u{%x4QCJLl30GmvA_ZD^*K*0xRnR1z2&$ZB*v=lzJ*X7qN_Qy7_ew~d
z^qn^^nvbKL-7#nELO8J31SeZ+qju4H=o<2!`yI5VU%4-{BBsH=PaN?5`)rK;5e{13
zd9crA0_~WU#LeBI%;3Z&`tL?Q^6W|C$~<Sv`<KIkr_s>;If?R)HRARWlbDHmD6`Jl
z3|qd1V^qou!C>kVRzdr?0f~+ny5k@`zk2|+B1`EW8%?~4qb!tmhJM!%fM!<!k0>Ta
zvV1<e&rbrg|Gsb?dMCek48_*6It(JBOO%TpO#Vja=14ctq<o@i%~R@A=W!){RkVCs
z##_owv3R}$KMJO(H!v03eP@Ec$z-s4p2uT$(Ahk9Eoct8%`WX+i}u1po|Q<PM3xHi
z%?ene-H3LQB`nG)Nw5uV<TxOjoU1|nQmY5b_t)_U{_9XWNDXp3YfwK;47$aOQFiw{
z*GiJ3;`j6BHrJbpSsBWzn}|(3?E*w3TVsIcI#h_lA${33)GIoVGbc?$kw+QZxUG&@
zJbBO=u?@^D7BjPEbr9AQ|7xBF?`0)SBn;<DwdJTkkUE1(e{<VSAGn+OD^}l2y@%?Z
z#H~<*)z5B#&X+Z4c76w{rKWSJJB>e8FT<OUqOoB9WYF3ah<_dk!H(oymS!PGi`QC|
zzyGE3Xyasz-M~ROa~dPthhyE5Q<yz=0~$`r0jIS)pmivnotD=yH<uJt^B@mU=m^&L
zX*Ogu{LE6i0%5hf9{zN@0Xs}Nz8$?2lsnd=WY6E`Mz`AdMH3gaQf}eSi-|{k`UkW5
z>i~;Ac?y=z9fLt;UHtV)${@V|8B#VLgR4DfG4%<c!bD*n&>V>Jj@{hq{d+#VXFVRE
ztf6#ML}lX6>7YuvNvZ7t;ZX}GM%-(@x<vsVlZ~J^F@#6ikDxoP3cT5&fPyT6ryTsk
z%hk!<bJ$-{T*?KLbz1D@9pX!Ej20`bW-{pvUEyl63Fe%+j2Z#?5E`_TH5skqu{X~^
za8)%9DPE0!x2Xrz`4dQ{C76d_+Xzv*!&u~pW2m%(7&SMWSdZUekOaxN$o4b@r3+AO
zwi7kW>5X;c6lzpV<h#l<u>SaD&<LsLk&|g|-?bH!x6j24J9qvzV<?DR$AiR2Q@myJ
zIWXAl%1fskqhj0*LF6>1LK@OusW<&HXiS^I=O)F`Gb2*4NgK}E=Y)Xf+!n6z(ctxi
zlQD6RE;JV9gW-!H;Qga;`ilzcYibZ@1bBg`4P1>PmT<kd_*q*Cp1)FqnU_~!)W$!!
z*_%ivTJBTnyu=3g-zAQTi7(fkCljPmb1T*E6!9Zv&ZyLq2!Ys$PG#*}SvLX4j;_Uf
zRm!+gbv!neS1`e3Bi5>uXLm6%+HZ`44E;<Prb>O%@cCl9wMm#ez5;#h@=*;^py}U(
zAkx{(Q=ZXx=R4WVbLLH+zF{<mI!}f45g~ZY;T)c5y@1_!y}+qB6qVgSa^Gw8+_9qW
z@%Jt!8EgcHLViM1h=5C1jIhr`l`UI(6+=4%c!pF0HWnkf-J&miLG~r+T=X9^`rOVv
zy=F6T4ubKok7M}o5DZtIfZop6(d5xg-t$?R8`npI_4gU@&T9io7YUV5o~=c<?_SI}
z`Z^l+B;Y#{aXID(abxXuG|PI)wQg^K#T_!7^Ab@y;kmFaG#!Jq2ZQ^zXsjQ05xNf@
z;+}r%xm(8#uu+YLSQTRCTn^)Jj?&-!L;+(A5}?^%%4A)SxZn2eO!UBtb&hri14mD0
zdz4tzpU&gRU-n|hlS^PWZ9Cn4juTI~7M4&iBdX*yH*4OB8av;!vG39$YT+vunw^ca
z7L#wrmRve_bFi~M3}m;9#Jax@Mv=C&uvteF&HV3hfp-3O(<4Fj@+Sr#v(W9zK+0&0
z1j+p>K5AnC?HjJJre#amZ&_v-e}q`Wx{>f^z<K;&IS%LM|4dweGqAsB599x$8KLEG
zTqWO)>358S@WNzp{6z++(ZS`CG;XkVI%{qkM|nd-p=ae~{O`mS%sG7-!`Jyi3ya2G
z)n`%r(G?hVGzn6cTEgG0Vr(5ggGGrbS9*@T7$rw>?B*LFBbG;S0HW2<ciid<WuD_~
zxz-*#?s6;<KjkE1bRT7%$|)BcJRcW)uK+96@l3Sffm!>gtKb^UAfwY;Ft!N?@!fi2
z9;eV9`2)GgT7)S3@lfIx3`2jph+)D3e32dpU&5uR9kz#jZdxFl+b(4IJYb&pwlfRL
zeuN&@;x^HZu;5hyXrFLFEqj{J=J?@U;}r4;Y{81$dOY>G6yG_Ji}-jHsEpNNZOR_7
z<t6cW4*24SW!q77#7O*}%Rwjf2Kr7)#HPm%+$>-r6sQ$aZ(ssnJ6(>-EC%C;36Shc
z-jJ#o{#y>si#k`M=b*FT^M=@%3#Rf9rp_pRatuVy8^j$0SL4N1yU=Q2DGPd7hh{&t
znbdzBm{sP1+k;3peSHv=?Rn0cKhWplqYfIKUfkpEIBXfP5H(#on17EmoLENR>y%Mk
z-lByAW=l{d_85~i9TGQZ=Y#%?US1ZM#Vp>}l1rW(;U38>Z=96=7M#qzt5dP(>O@eH
z4}uf*spO2g&)=`2zL1NUr+nH7+CQ3^hiX13Txciyu7w3%iDMbt_VDh8JaTPRqwMGj
z@R?5Rn$5~$*ICqyy%mq&MrhE!_&w(vh`TZ(f~#2>L(|O(P@oaZ&lRMh@FWRjy~%<D
zb%G3PCNp{1b?Duu3<)`=s1|Y*wC}mYww&=;@|zmww-urx@qm6F;)(qZ5|~#(e3pjc
zVuOQ5-2azx<Y+148va#aIq(5<n_&vlh|gl-a1N}VT@2=3JF$LmCL~(5@ENy8V(7^%
z)>rl$(=1&Lm0=q(%}EB2J_^`%AQ$=)?YRBtK(N+a3(Xqjey*^GC`lwZ90|uIUl<w&
z7lU87BWP74xQ)09A6y0OS^1UeuYJp<$KMe12Qg))oU0vM4U%)JJkKlv-i%5_jp=0k
zeH{a7k1vAtnJW;UIunWvOUOTvOSy-8%#8ibQnlQ$>*y$W7gP>TeZ+z5F9KQnEavK4
z2-#levAg0M?-Uy{y<v+{WdF9J)P>xB?{<K8Ke<>ers9hoQBc>Jk8@h5VMg&3P*em`
zPj@e0(|8dHuEOHKU%@T?(U{_2&Yg}2f#rO1%}sIwx%>zJYE=ohM&CgF)g$3=w|)51
z!Um=DEKu2OGI$hFhc@yIH@NQrpT=B+P)`%E`^%5FKeFOILCcxX#!BWCo5<j6JhbnV
za_1Xopv_<xjG+Cf-@FFc9##)kOKfq%KWR{AlEDs}*J0APVQBY`vfq~tp+oN~M*c-k
z-ovSM->?Fw375tGi(KKE!+KouQ47^So&$N@aTsT?7hm3&;qcmPn7BI{WyemF8!M5U
zx;aDK>zOEuzHVMVie@hv7g&yPluxIe(i%1zP4f(4-KPcE#N4^o@WK4G5oNW`F2#p;
zrx2rT7xvN}@k}tC^)}fvmE?1n^5!`crC&752#Mjke~b{yhEukqHj_0?3dhiuv!L_V
z8FABLXBPF!2<EQJgfdmisfZdd)Jut3#gB*ej=5-k+!*FMZ$RVTFlg2{12x+irg3mI
zZ(VVc%hp#3k@>`HFTDubI?14mxq|=tP1FHW#j>g>mZ?qey@9`q6)Fe$=-4bwG8~Q4
zl?TNSrpX{_H}NArJM#&DC1LnH7wEBi&fONQ;{6(hJa^)3Y)&cyf3;fn^h-X*=$C@D
zb`FCJ79iR+u~MR>CrEM@fTi~f7Vg%;4j!t*j^AhosOJH)CwW5Sa_V#X%@#{F_M10}
z+PT|r^mlNJ^Hfgkyt(9Gcym!O`}~!OBun6r|IVWP)q3`3U^ez&vxJQ96a4+iTDWmw
z2)4SrvU$lqnC45)(65_8cB_^b>^uVDMaQATeJR`;aRk3!l%Ub$TJCga1js*a;%-HG
zOt;vN^>}!~&>{ou`IEW`vlpO>mIe>kihxDgwO}Kxzzi{YrA|?o#CI56shy85CXwU<
z&7eGEy?K<wC^UQcl<Su?uyZ?WuwfzPx-w5gKen(o%1%ao$be63h1h-b5I8KGgo>eU
z=9SM!qe%Hr;e>$;DvSInT<lLff7wFK1(9%yy7oS{8K7F20!tdVVv&CdzRNX0H;X8+
ze>o2&r{aZ1n+<q#I|t*zwIFY6<X;~sW8$)Ed}PyrqQSXld8SEVcRUe-R@Q?JF$p9=
z73PMkPoh<ToQX0_$`uxJLE?C~(tkranyxeee<OsZ4_2(_>sfFZw-S|(W#DiR8BS8K
zz?)Mq5lf?gp*TGWOj3X2I>d<XGx(dSTswye&k^5ul|Z?7C3UzDLsq#3j(STxi%q%Q
z-hp<)=jeMo7{w$5gFydqJ1em+LdC&x;ueEyY-_v6qF%0N>uRo}WZFCP_Bu5PQA>pf
z`!0cvoQubVCJ?t{4m3S}%2ix6$xjpvJIpIk!Rv%2UfHOonF}%WH}BdP4bFz)#4Xc6
zvw(rnu|E&Gk8T5_*;$ZixfE_6C62>5J3OVe6L-u=CMV=fIKS;WN|SaA4b9~7@u~r*
z8UG1Rq36KyKrwj!*D1t*r{2fyI(Ys_1M3espe-vQkD3}W(H{tACevYhE(dXLEtslE
zaiZ>J)MCAComB{YrM%at+402qddYu?7UOrb3p6L5BDlIIpaIN-$(~`D^xgu0NoYWc
z{R|=Mem~`+e9+C$hELo+0hRas!J9XPqxsDm^juOcD0VcNqvk9KY6;<Ln(z4B-RU^{
z);hE=%!kvu<jb2+eU{h6Z;LR3vTu~PmQ2RPGxR$h-zi8{XR&*~Cd2M|GH5zh4Fc_b
z&k##<<3HQcw>*q~Z(t^ltb+6>3B=!gFC6=n2E$gK#0~A#vE5pWCw())@Vo{6Tc}ui
zbQg~rPw(==bTm3%0yc3cxM<TO-Z9A(Uefb?@9hf8(X1BhXI62I6XD#cNS_;LhY<s7
zGqk(x;u-FmTpqX`?mk+9#}BQ=poz`g>9LVGC&dd+`b2@wUlF*_a3VIzo^gZHF4p?C
zTC7DKO-&1O5t{~!yH78I#7cGWRO?{{jW2k2Z3JJaXhhjB?htA!K!o=gbbGGC?3GV5
z+4+BYSJHV9J$0t{;!Ckf)?)apbT-~tGZ-Cx=h6A*HM5`O2hIPE1cl{#!E-D91`d~T
zC(~ZmR}ci%qvOz{!T}Ozm~oR@8MpeTi~f<SIA`E?bb}P^d}$$;1Po;c2dlW{qH|#S
z+zkSVonU!>4fMoa=1(&fbWS^Hj&-rDtiOuSTkMb8oilNGD$Tve4QGoI&(d7}3DZ`O
zV`h2%%+j+8<z<vh9{VR(jJ+wCJ&I$wZK;%HrTgFjV{Q<q$s}e5pgO0Jm^JRgc9&3M
z_32dx1uTMYs+z9YN#FOrvx0`hM={cTtW9SRD7IXOsdn+G8AiXeL{GYl_VT7JQz14i
z5q`bB1yx3j!UqL;aO<iJ{oRkj&AHVuYFrr%-WCG}gLJS=Wiz@4J3&s{If&gqljarQ
z*v}5WsQdaLGY#KBj=qy@<|TDhd!<1A9bvrTupDic_j6H_a^(fN5xyK5hASLTqrvPp
z9(7R8d*%|G$n_mFJ9Uw1b`$gJ{$@~JLHoFc#L##p!}dNmn6&B|x=QY`PciYR^x`bs
zh*HHr0;u<OG7?NzUx!a)4X_~J3+k@xVW?9n`KFpGW42v@*vZ6S$(BOiwP4&A?uOlM
zIbc_C4&EHn#KjZEXg}%!pKvM~dM(KRH!+Xf&P-(Odt<<9?p5L4xm@u0YbIPss6lD5
zL*>4M1?aTWiI-snKCh`ljk_|?9j*ZZ@4~TNhj>&^o!Q%8*F$s1MlixMaxA|y6PZZh
zNVW{smo|cD&lKi4xl3$WvKB<`-3z5*RxET<34F1yMY%W(T0@!yUEOe4SV&xvt<E4D
zL;GX*gE%O{2mQZg@}718NW0A5REts4Iu-1`Q0^L@@z~BX9yD_^+?bGruPDcCuuldV
zYP+FpObA~Z9)q$TEf8%^7WdX&1Z5vf5Os|fKc@VAzhXc31_nUi(CL`*?G1N1I-a_x
z4V3|vE3xUE3zl}0yGUa`G<H$nCH+3jlqjLf7bWg@@eFTnL43MB7#A&eM0LMl?2HeF
z)|o%i+3yQ=C?4~ao{?aA%L{^@(F&H{6>Yj2;HS41E;<kwYMK;c2A?Btl?KT2t=V7C
zCZb)08rSaY<{7KaQTJgB*Qj~UMV%vfOEW`{ts&sq-7Z$@3&K!sFQ#u|4ndUNN$5+(
zjPi1}=Jj&)A#kMhuTw&|(N`{gn<^}>UyK^<G2&)@a_1b-hVPrD;FGbI=0|ftd&g#w
zY(K$jUhYG~d-JfsB!OKnAZ}B`FO{wpr%^NI4E$~*#}w_G<bGL$PI@Z^{rwSa!G{ar
zv(f`{iYIf4cd=lSmcSIhAyZ5_P5ebW_TN++?0!#s;HUYlV8|)RS;=_Nuz&emn{;Nm
z<vUkcUaKr{(L%GxD0tgp1YN@)a_`S!Xf{ZNcg)WP`zum7x3~lp<KoRdy;{U?)?b6)
zw+g8FhTPvuSMbQT4D`IPnCUg2Ma}F8E+Te;=teh(`zs*IG+XRcn<SL&)h5Sb5>Kh|
zh5MdIQC&w0o_hA+_RR+hO<d9E4`N`oBrvlj6T#2Hfkh3tDb5%iDK0olcj?Kmn0v|`
z++k--oXG!Ju$>;7?#&=3VhP-{kYaFqF`7-g4o?jt&?E+hZB-SB$D>$Rehyd4)x;~~
z3Q&Bq08}f}u<%Y6nrsQ?o`dv2=5LCB|D}OXKUR^;D%m{LJe&G~v^QMu#6vBNS;wUT
zpqX@*J6~n!;BW&69E?ZFl1ak)jhd*@_(5!Qyn!z%tH7`S<Wq*HmYIzT!U^tyurVYR
z`<CQ!k=;u3UbAG>E}H}eMZrul+EjR(Rlr4YCsDH1MW{LJga7&pIQGO@9Pn#2E(o55
zZ&^4i{c9dNU8D}GVJ91#6@}rh<!D`*!wNMHlT&XYc+B36FFv0EiRB|MaW64%->{9}
zr~9|>`Z&tpyfe#|AEka{F3!okh&ozz&{Sa#Dk?`%D}dOrJ+qmm_%c-dM17WGO&olY
zItd4tg6+XP{-7cpqLPAz^7uLkRl5PI`cm|xEt_4DHnvw9fI*iY%lVn|TiZIw8zSWa
z6NnXJ;f!jl$3kbtB0=S`I=q@o`LMTMyncHrZ?9Maol4qx$5e(5#hWO%w+;H77J{xe
zW%e)iGX46kVEy1O3*AYssDvgi-(11)j3@f4L_=9Ubx=PTfXQ8A5+5l5?Me#>4eV#b
z9%`V>GZNZ=cj7801z>cgh_&Qg0R2@BDBe&GD$hM3o@Sj>OsrA=#%at68OYB)%!Rgz
z<I!)@G5Rbd{H0|L&K*v3fz|=w6<mmm2V`Mo6Fqm7M}ur@3G>rz;_Idv;iI4&$TH2R
z9D9|yWs5Ha8CimSSU3BzXe2^D^|tTU2$=(B0Xon@v{Ds1H@y~rIko{cx2=TOy|h;h
z*}&r>rRdh`!D8*NGSjyX;9oQZY$kZ2rhfvlYgC}9G#e&mox-yt!||zY8tA^wXKx0P
zhsV~I$y|OBZk=3&x^KD!?FB8Yez*bHq|~C6*sqe|ug&G^?|H4O9D8<WLC?QdV4pY*
zj)wn4zQ9->F4_qmx#wA{$5sgaa}~%cj8Tc$fc0DEK+(%ka7s-OpAIrd%N`$a*eyf5
zz#Dwy86A{IUF2=6FR<{hl$mt2VZDa+uzg|!u{e*yo#hSa;~@gKFLKEATMH-L%W3wB
zaP|+{(?J+|E>#F7?>*p<jWafr3!%Hd6#F`?S=WYan4B^W;m0=UTsK(2FLlrnMEv6y
zRUH4mh!RDt*ft;pg9_uAXxb7;p<XLrRfflw>0p%31LpiK4!iYQ*z683;-TIUOkB1w
z%RgzZSUnpp|EmSv#XAJuPID;HsKpaw0?{;J2)s}UfdiMXQ+Fblav1b(HJJmM*RQjv
z2aBPZJR(skt010Vf~9q#AnDSFbvki?&;3ERYZZ&#Rsh!xs3VgXfjPs|`GM7is8AV+
z;fF4Q<!A-BGat<xe~yQ*gYzKthBKairjDIw{$_7aMpNhP5Y$$#Mz;s)+<3$}n7cy;
zcA;~bZ(J=5c~XtXA8X@`OKG?)UjyHLB^J&|Z+7x&J&Y>J#s>|ln0VTSxDU^G6ewXQ
z-5u|I-G!}xPh~ldVW1HfMUl|8TvLOxE5`@GqPdz_b^^JF`w779Cm^UQ7IZ1QYXlN_
zyV()KIdQCC%!EdzO6a-pt5~h|KA-ZI@|HCx`2TsOFD8;#ZjBm?@){4yzt4fzCvIRe
z!xIz-=rdWeL<k-EjQP;rx2NA3!ecF=SKSQmTv4Z-E62c<L8xd9A$IK?Vp3fc8xJ@|
z@6xBt$@hbJjLaEjSCRzT9b=G&y{&9E?c%Njw!`?p=c8xP9NzaqA53(w^4^KLlo>e3
zMCX2&n_SElL!$&T5;uZVmkLPy{}iH<hl3T(fwXTIv7jOuC|-RQI=b^&*^njRchUfY
znu?+0&}F7~)*7$Tok+XG2I~6O;ibgg*zB+s>Q7Q<kzD6f(=0GHVIo-WjsVNFe0(1l
zkDba((bBYyzbn56icKjf54wb|3(etYwPHNFh4>X+r_p`a6^K-nL(e=5mXmG_e#AD6
zeLNmoE(}6ln$s;v6VbWk*us+EhoNGD4_mRU1mwkQ*b7-H=3F=8PR`z-c6|+;bdLn(
zr3D}z*a!>XOvFq(+Lup>1BGOOVARvZMAP^3cAa@FXF)cKin@8C_Ea!=dKA7I%TSMJ
zfqrf=@u?<>6{d}qyuk)-PXEm9!hSGO=ca{0dr!dNH^hy*ahB(t&;a}@FaxJ`e5863
z`uDxyouI~A4+%m&1Th=KTEVAM#`aZ7Fyqrdto6tojMCc%hS}*T9dZqma#o;;<9+6`
zmzc7%)0viMFh5s+8Fg*-c=&i~wce$vP-zMHuQs4N!yf25dXG!gcVWf{7oqFdP}Fk2
z%z8ea;fl`%Vi%2F7&720*p8vQ{&NL%tXoaY0B?9$cnEcbq3CpXH#0hYm6_afW7XX;
zIQ)u$MjPU|WLBm)F=jPu->1TyjB1GeNI8lzHcS{i3N?N$6*R_8hjreUKssxX*kr;I
z>^v|J=HAJGvekJ^^+zGp_i%o9zaC{|>R8eC2)sA0235W<=5lY^y+qcVCpK3?^U8zp
zig}^j^dQ%N3Ls5gE97)C=9+zjJhh%6b>D+sdXaq9Segm`V~jed8BTA`M)$HS;8$>%
zNmP9KsG3Qb<2o5Ew`>H_rRPG%*EUx3IUR4(yyK^JC(!2a4bT&<#g&UL;2#Pv++ZD!
z(u^K1s*2#9qAE!IE`b}T&fra(GniZ)gjP!yaqHC!Q6+E=x9%?H26e>#BmTAJiB#ff
zT@-(R5e{e0?82BkwQza!9Q2%OLHwl0tVeW}JHL;^W={9?6ARJic^B($I}FMRGBC1C
zhpS#v%)d7cb7DjAlG0)HoN!L;FExbs`6YOt1!0f>dZzpQx;TWMPYI`^P_(a2-1#I`
zoVnc;oHSQJ=YNIZ>PnrEwK;-~C>IPioM+x?<>bN}4vFo*veE5(@vgEl*6d0^{R7{)
zb@oQspqqs%H-GSOi;K`x@62@zDMvBJOo&ZtfUKi(n6G>pt^DRwzw!w0nw-gOCPcGI
zeS3)OeV-d@`D5&DDSu7ex{SR+=zQA%H!Y9EHG9vao9{Nz5Y9p2br(EooQmOrvxv?A
znC+V;$70213|>urFi{z-R}Mfehj(nji$-XYZv_p5V6gl%75tx^gO3NRFw^w{rZ`fD
zN^u>#jhw-Bsugs+Cx+krSupkp&Ebs5CFBzf!$K%Caqt<li8Wy=<x$vvx1Yal$z?M2
zPke526xi7-!yAv&&|IuVeX1CmnFM2jr6aVq1@WL$?OeOKfp0vz2BVe?fGDGb81={>
z`nm=~Z?*t3&r+c^-V)vJWMT(Bw>^52cvH_}Ebp*K(N%4ze}0F&wDW}UEJM0?P6OGC
zgD6$1G~a!{4%`yf;@hPYQKEebYn3t}{_h*0$n-!_leKyKk#i7AtPruqO#JucDl{2U
zfX&Zp@b%9-aGvENTt|5}{o&M0eV7OGyjQGi#3j_w*a<#+FN6B5YjCbB0<}MN^6)Q8
z;5NPr=5C0@jLqj^roIG)=nK@<`Hh8cpT#`?UJZ$XgPCsnZcq%W;eAhIdB*m0;*4MJ
zGu=~aVze!xS#uIsI~)xLvLe>B>SrGNc|R<)LsUw+gp*$_M9&2QV!yBuW^>$tb=0I{
zZ<sgrW47^5&m+)2+6{kzBL?h=L!X~)p+05^^ysOxjQ{@S1|hNR_>UtPWr<=Na|0eV
zH$@Pgy)Bmfo5VY>(etia4Z5}zqfe_hSZ9rf_DL>GUhtJA#{bRUtiOUcW|GTSZ6F9n
z!v8y%ymOD0SbqgQ%T2addKSeoH}NSjS~DMFT&z(sW~3Oc6HxSUj9@}ponymF;6i0O
zI=Lk9Zo21m9;(OJ4<jK%WwhA3?miRFQ%+1Ir}MWmO8&dTQb#$Vp-lyZSLlIUYXsoQ
zjo`mImd%>B6SK~%VCyMkUSRk;xBT@A_|GMF*N0Ku^^i8Gg#F1JT34gQ`gilbHAlcS
zEtln7FlE0b?Lwiv3_Y_N#6bb><ZWyK<&cZmz56SVX{-nBIqI0=Rfqj&6IsqUI(s;h
zW4u3*iN2r^RJa8Ct@F6<>{DQq@6Gj{-C(v;Et(#A$-;*X0o_{(LiubpbjlvdqmTQe
z%O!fAc;{i+4qK3{k#NPx6xaZlaqK<?y!zt=_ASi=uR<-9sYhUYf<A9|+|Si}azUm2
z61djq;rHTtcpugX(jqa`WFeMC9RSaJ_FUIh3|hl4^JDf>)E76vg87+{w6_r2-!6h!
zuLKD1NClyRI*%n&A$x5)8tht&@*g79DXKtA&m3@Ev<tQV6?2u;f4I}r01m_o3Tjyn
z7LxH;p09>YO$$Mlx_G@694;r-64RJNty(hJH|(SR!7NB=zsG(UreRXZSoArw30~0N
zN#PdB{|Q)0Ug%fcXn!r#|NE62OgqjcGa?1MhX4m!OCV*>pIqb^&SY9e_}d-2C&ncU
zO}Qz|X=Et+U6}zpUOQmZ-}6|wM-DGCs)eoIC$VLg0N<}r&-TMUkbd*$Dr-*i$0dQN
zP3$dcS+bz@S2a|%RO7c}3Jh%<!aN6EVn%;6%+Xb03m(?P+XJ_G)cXCrXEH#d=x5fW
zTfzM^tLSr$hYsS@HjZnA3(d5H^lAVZ<<BfT?4c)?GSe@<nCp)^4MtyyVQTva>nm<!
zWy(XKPfZDvT`!Z1%tdTgEW^Im6OchHi33$in5$QU5NiS8xDfx-vL+`?IcoO?<EbGT
z2>L%k&Up{$8tI0^EF5uN%vqET-vDl19$<Jk8JllLK;N}0_KGqzo>xow7_B3C=@GfY
zMQ6ntiJ|OH(I|{9jl+Uuay=jJ;_WY@LB8<;D@!_siElFbf@ETM?)fOrnX(2B+F4=j
zBR_n6CmP#-TEzPonc{)0YA`VZwmKmXQa%(x`_lPf$OGYxm>5bM>*2-KOei!h!h*UW
zd^0==nl#<`PtA!K{&E`pSbh$l&I!N=xh~l8;5Y;fO#!j35e}YYNUYIR^ckECp|b~Z
z5$*-EU76JH@fBQ)&O^pLGhXFK+2*--1k>--kB#?-<K3m$pG@wY_KQ64Juxm)zcGy$
z&&AVqLgA|$b+?=X;ne+cXw`5K)Dl#&HGU`aulI(7hX@PseG@u$Qtvf(9B=>C8@mJT
zC{MJMiPpFYl6lkMvR5^3_3}cuk-i`)HATCIkzCg~TiEFqjEd2_U{-@28oj;E6Aw;f
zW&SbPKzl7Ci5Hl!(8KO8ea!E{B`!L@sj^`BC|LJ2m^??9vCpD`mFdt<LPFfS+nr4H
zRv6kg4S-3Sjkt+;9U1CZ!Q|`%-u|o_^p8JgvGS$R$<B*=JY%8#a4(lXjO9JP4!p%S
zo&2>CVoPIU8EO@ZPtUBzveC<-$IpNj-9hRK=i}{Pit$VAB(!=J%dLBAm^5ft<@PJ7
z(Ejir*7*;hkw+|3R<kEI@+?r9_b0<E=NKxjhewwwqr^Xn+fNa*R9uLnJ|+3LH>oJU
zz6-D`mZ>i<LydpbdDNa7HYBMMS6?Bg<jEN58@G~QUUw0EF1yl<=$P27-v-Rgs!*}(
zAia+^!oq}7v>LS#<~}dN$L|he&@h^>c~0T6x1xEgY6I)>q|RBjJ?J(pVakOm(A$v1
zweAgr_l=h^<JYs)wasHtRLxA&PV<xzgCN833}zbXVVqk$J~)0FKHT<3m51s~wK)&A
zP$sms;u_mKX$r2G8-^+~CvX;FgqL4Yr#aA@I5(^KJ(Udfdz{1%JUI=WC0oI2)@}&6
zmIYB4ws1GqiO?&*f}-j3LF88<cb-#&22n$K;&VFpKsFO?Xg1efXwRJ=66bPv9mZPh
z0G~L47-@}My0V?Cyx+={Urzvu#&h%Dz0qiLns#JCIrx2sFJ9NAtlOG$w5bdfQi3i*
ztM+s5^n5YuYdb*D^+q`MVIP|GcC)gAW3U{O=v<i$FD6%@)#-XHOr$Q^5L;e9wVU^T
znF3FbOHke(K)fc}&95UK=+Rvey8jA`E%1TAGe)Cg>QXTh%OdoKJC{sSLe=A=kUCG?
zHDf(El^+MQNeWomNV_d9DbJ~ykE^z8V#niaEDI=8J8+{=y>cT4*=x{UP6HM%Hb9le
z1zd&OGR6K*_UT$ayl7vK)$d(U+s^=6Gb!^44XC)I3uDhLL`m;r*1OOfLZUS=^xYg#
zYg$9^sq3g68UdOME;7HD0o+I&$0U?()M>9o+c`4U^;Qp_PP9c!Am07<6;Sjl1w~J2
zKCi3<_&6Olb>(8erVYOBB!7L~T)45^fLNI0S&9kmSqBDUont-zxnd+9*cu0J#pk%p
ze;m`j@rpIH(K7&N0EGO?L>k%h_TCz7&%Xk8d#Sg+D3Q1+E9nkOS&p49Omo>h=qg?W
znm=ZO@s%4`-v{LP6>-rWiO{V*iHAORfemVb7(PvgTUOM9-+toCv>Tf>cPxU?p~S;}
zYtG##hCq|~8+Ivg0qVCu<8BYDcuq?ZD2}V5r}sbH?QjNvws-}8btNv6^#A$fSaZ*b
z-<Ws&MI5HKm$)}Kxt83Q#XZo#bwx3lmn$U)1&6ku=MrcC0S|JWLJZd-y!G!8abU>~
z)HFzDzvye?y59@Iek<)v9j(QlUsl7}k899#q6DNnJb3q_Bh*2gS?RGg3?vratZk?U
zEYOid(}^lDng1)-xRN0DB2J{!Ka-*Ji!OIl8p)dq_dsjMI7kZ01E)i7!lXY=qEfdU
z#9xbGaQ85rz22IbBD3)U-T4xO-}6A3ExzOBu=L<y#B1IV8JvlQm5rE|nTo&oX``mZ
z306Dj!{DdYsP^$R>#U$o)apIF@j5XL%Wh!K*?vCY!!pW^&c>{%bWcA&0%Qs6xQ2Qi
zeHX*AYdGb6Ml`^ucL=S&Ca@b%W@66hS&*^k3O*iDh_=&KVR<ci)st4T_79Vpbp9Vg
z>}Bd6&8J;W@SiLr%n7<IYM8O@4Tu~W18;ZMvG!NPq1|aHzw)FW+XMb$9w~0ny6P9^
zTN48Hic+vin*f(iW#KyFVfhj3pg&{><xL}C@1e7ple_>H1(!fnLXSAphq|@_J|Len
z8*FTrvSVutF=@a6bcn0Muhd6X4AK>wq}QNYzz!H;ejd-C4#l}Cm2m6~y-&tI5j@H+
zaLF7$?$b)%!A0GCujXb9+T{#^i!<piPIH;|&0>>XX3W@O6Rr!7gFcr^_z+uyE7o0r
zvCl65-)+UlFFK23AC-e-XpUIAA`8Z>UXLQHP2d?31XhI_yljsOUi_mLwT@=Nta%E=
z9#>4MFM_D=d4LJS@O4HF2GN{7WzY}+!!-WMZzxKHLU{eb4kOhm0N0^TzTi3Frdi8%
zZrh^O%p2UomUgJKw{y{=On5)*I%FoCg!F@3&|Wi-%Xi*l4(64pUF8i=B}EW^MaB;}
z7DK0_1w2T;0Je<^HuY`;hM+Ra0t>m?tm)7+V-j>Im7&3(4LodwK>2FUwZ{#C!@>Gk
zGF*mc2gcDpU>o<)(*f5l#Gko&U0m?`47!g}fLW&+T+OdQ_tI!Enx6_=%!5(6HkB!i
zX0YVhPT2WxpSXKHv6x0IFgI}G(Clmt25batEtrXk3-m$Dc?xAA3n5RZ77Z5WK*1!+
zyIMUJN_JP{eUEH%jupaNm*rp-s0qtYHlpTinlU;RGu6l#9NiR3%m_1-8pW7z(w%~K
zM?5e?WXHQ+ZpGLFRd{O}!3?dV!OeF)SWyD?w`woipC5y#-ZJ>fW-ZoB!<dd|8nmxn
z$VIKY1ke4~xB|ijk&Rqj@ai^uJN5zBq49QCR3Q#EFvhdl-uUim8u;Z5K)0QyJR>Gq
z@VsyxY`hb>+2=|eGHn<3D}@nbkNP&VjbK4`JW9XB2|fE2Lb%C7F#X)d+=35)*8bm^
zRhkW`t!MaZbtA?H<-%jSqYijpM|%wpR~MZ@8$HSp4Gw_TTH^9fH6?z-0&^#mWN>xv
z=CS++^H&EbxR!&OhL8B%#9?T9@D=x57lfTt>v+lZFzC^60>__%!1L)CmPi~_U6mcU
zx}Dq^m8B5sTF;DTJ>=SJdinW}326KSF><>c)n-nGp1;gNH2gH4nY#u1*FNDo6RJQK
z8bl6&acpr>7;)TAL%nhtw|}$)V@3yp#=%JXJg#w>hAMovIEx0J<-B$Aeg0`_4z~Q7
z4x*ifm7Sx!c>J4ad=VOeLnAcN^l1dV-CfK^cb!Ei^+u>Cr>pY*LtwM=PjQO>C+@%L
z5@?Uz3UgTo$R8JjO4(K>pSKYPPfi81n;&@Rz9`}Mb0yeWXwR%Q&+v%HV=+{%7-ZK&
zu+6-ai+1J757d=G{G=doYY)b>f@%mpx(Unn%AxCO7GL)3EG!V>(K@DrX05(pIcO63
zoYdfZZ4ADtx`YdcT?ON(6)@i-51)>`f=e%EL;UeT9B!12IT|!Gx}{(SR=3!*s}s>L
zdmghoPzydEITN>3L2d8}ye=%kgj<JkVoC~1Z_fwcfUDs2J(=0g^Z>P4`j9<)7D_CJ
zFl+DA&~ZHqw2qGkjh|bX?g<|u=*v|Y_s1Dj+xQQ+Z7AcmCD*ya-$-2FYzH@|HvsoI
ziJ>+DD4QI>dm7&IpVvp><=0YByt&B?hFe4acri+khI50FhhgShVk<{q6ACPnq2_%J
z8egk`v9E$)=rii`c_gsz#-V7JtH#a;A>ChyG2(v}N;M);$NK{2S*C(~eJDnSYyqDm
z#US0XP;4hz%j%COprULzk8el;!^#v$^fG1{`{d{s>c(DwpMc?#-?-mvYl!}DBFYC8
zviJ1=lNja>3r-9|qc@$*;G{E;w1@*0>y5;grM!7w09e-)v-Ye*Fd>xu;PF?Pna&oj
z(ol%+%aZ^NYT2B%+i~5a0k~#F4MvTNfj2+pfbzA0@WMF;f>aveXe70=Qo=BHJ~`o9
z>bXZ+E-2r0g#!)gFrgs_kB_ORd5aNtha0l89CAzrH^7gR#53{?6yJ-x4rxnjpu>=y
zXLEXa!KYOqUl+@4$7rLU(sJnkB^3f3$}y_+o-lt%BkB&D%wNCAfzKIEXtkjMIx~U=
zUC0#z{W8)2X*zFxyPsvOp9u>CZBg>_t60ad05$elf#Kd_`rFQfxpD-PSOf0;=`i;G
zm&+3CZh%>Q6>v3e%sEwp9)FpE?B8atA>INvWyCI<zZ@l9=l;jhx%kDny={Dugd|Cl
zBuPRD=`icwjD!$ElC()eh;m9Q8J%=)N->=@=^*JaYO0xM-59CFPC|$sV#f|)wqq#X
z^?Uz<=`+pqtaV@ab$u_=-J1!#&p*g|n|!pi{hJ@%mP<p}J;&pj4P5Fyg(kYO*f)b|
zc&3R8$?0dbZAuH7t0hJ8v+q>+-i)Rk$$`DmnHbYSF~ZCRMagPlx#R<Nu3`RYn?XdP
zYeQU@G5*=sKDy{$5k9vSqD96jA{40e`x&1`n?bYCKJ7U8=I21-_BybQ8H>D7mM_a1
zN5j8`;C%BO<nJ}YNylRFu}=o}W-*5UpSD=^<1Eqb2>{=5gTeTVIz*Wyq3fJ{u(vG+
zY0xyXYJnM=O;^MH%@z3JR6eHV$x*T(5CS)3V25EUXX3#jV<&JzopU5)Yb{K>F%uV=
zAIE5|9Co*))HgGkTEz&Vzn~gyHKswQ`y^<qIRqM2ai}CMcJZuZ$nzD;<-DqZo>b;@
z9h3&~g>r0B{O0ttEKwM>OJP3#1x*ZMe#AD$2KpEeyTVy^E?h*hz7}n^Wul;VH{G|@
z7k?=R;8wG>sQdLi<lnqQiyav!Y2IRZU{(d~nb(MSwTOsP^r>4-3cioyp*{=-wKW4l
zP!rEs{+V{S*G<O8z|o-kIGvjADrI@*omBJUS>nt5{mP9s{ERa)+;t)jhP@e%Nq@}3
zo`SK|Gl6Bo<4-CKq={&GA{R4qgy52TjdX2VOFI3Uh}qVq%yVSKW~q%(GJg<i`lZv<
zHLh^_ehx+_gps24FKClhA?Rne(#0Qza7vm3|7^2GOFic0J;71w`$;I{m(YyIrI2OH
zcF1q@!NY|0Iy4ReTiFPRcQJ*n;fvA2Qy12xRYP-o4H_y-n3uc`jiw~vxN2iGOIwaU
zinY+`wvur$G|AbWiE!eRHqKxp?i_y(T>~`Gd4ZU;X2y{t7RD$MYzJqvzla0VsEY2U
z!JFf(GxNrI&T)+aZA#Vvr*Ffc^Xq-WRg|-RVl5R~IPg`9gT(t~71dVPqZ%E-(Ep<n
z4)0x#P5NJ`FmVf6_1YedyMnnLIoDXm&&Td)N;n3-T@Sm~=i;Ui_Fl@n2rCxt##hV_
z*L5eDItFS{eXT?E<xB~-@kf!BZzf{?<q&W?aGHE=jD~B95Oge#!eeZ{iheN?gqe{b
z8h42Kju+5qP3CElL^0^$6_8U$ko6^zto;|z;~u4tcQKvqlOYIQf)vW*Wp?sq-ZUba
z-RV*vlB&{b40)CdLoO9z#54fsH{pyY$Yy()6P3=rM}!lnDrz=HqMPjo`l~q(yEtE}
z=E>Ol_tul*q!j2MS_%$W!1U$g9BA~={midA`h5;|4>%5||C@}<K6s+!{vyI;aN0bi
z5b`HfQqk7`I6=iWPPu#nU%qZEec=@e8M?W+@^U>G7v+M(pKK?iwO)Sbg%m^ob3<F@
zQgZhd^V5W?V^=5pKL>oK@Bd+5(z~^MFCW(7sFlWc3x4o3z#83pb!nHs1=V<I2oVnh
z@c8m#6ds?8#xW<s{>~wcCO0X6$z|YK`paL{pTYb7r%^RL5R|4PiBQTq?tNWp?ERrQ
zX-PHu8$IEAo~IKZqfjv2IujziW`LW6l<+1d$sK-;gCao(4ZC<Al>b!o2h0=U%Gpdm
z=TW``<7A|YYKgH!7MVS#3_HgT0IMej;Nt=)l%7{;*Y|LqNAB~hEn;Bk;c)bP+|4h!
zav8?_ti+Im|4`n4gY1e-f6z&z1JTK63(I#gv;Jld8agt;^X)0>={uAtclknfR66`V
zIvn?yBx0)~invu|f$?L;K|G(Q$hXvBUe>kfbJhej+|t3gwHW?gYk&g(CnQJ59gTZc
zggmEgW=*hjnQaZC^8RFFd=<7CpF`)Eek7ni7F()aiD<?j;D6~m7tPpi5jP|-&Seaa
zzY~w1FV$)6rYyiQ_ISpN=@>H?W6p{RD60&>p3*iFIO8DroYH2Tj5rc<R|tzGj@bAD
z*$krK{DY#&e~yf`_W3=nNIwa?hW11KHF*$ym(r4<-L#{H^{i@Sf^eidILrwI+iph)
zy&HzA*hpfmx0dgdH=joLOdu@lh;2Ec&~Nl*y#HbY%5wdI2xC~Mu^rVE*hA~M`=kwn
zAa&L{*c);Q`<sZtH~b*X{k#XY$5F`FJVQNI2Z=y01iXC8q4%%``YUZ9|H~RszB8oU
zgwZIT8A}_5YS7lIM|n+i$q)Bj;0f~V%nBMnJ5(P+SZ*;|z-K)GHIPy>8GYpuM60a;
z?7JB^phAM%SJps><4V?<!FIr*2dRZ#6$q!+D(af-(Emd-?bx=AEPa}Q+@lh>Sv43{
z>ISUOnPv0FZ>7BH59Dh$<b!R2CKP3_C&pcY#I&t}s2wf^`+FB4D~X-e-oFU<DFaPA
z5z9_hz&4hfGh7r7HM1sTVs9c8&OQxODmUX}X%?tr-gC7fVW4+R37gn6(BLTtjR20k
z85V|uffB_G#=;OSkPw|gC!tGji3e*K^QuM;#^(p4`5i0hpLGRVtoLH^ALsG-Pl`Re
z=Mk0tO~!Z8<f6_`!#9!=u-ceFLz{E4p)(yj3g_|zJH{~%#al&lj1W_gl~ZjOV+tnw
zz-Zl8bpG!ebW!#jiaZ3o*~_T3<tDKz-2loz9&&Crlxpb@LXW?+KzVEm-*L)r`t(jU
zcnuSStjHGX#iNk-=o1mXvEWh{#(?i12&xl9$RI<u$CRvx%tI;Ib!i+lTv&vgNfHVU
zzOcKg%|VK`5~S;XQl0xU@D}7k+geX}lXMl%E@b@NQY$;hh>2hur$Etz;plN_1JgJo
z`QGYhNPgEIrZa8E_5-o#a3hjhc0HzT*^yNK)t6qAc%$X2V3>1+X@Gr)VDG9dRC<*u
z9Dlo!v-e9FL&y-VI;)_l=puHUe8fH8%^0w&a?rR*#)URrMQ_z*a%O!P^2Uyb_}Xf;
zb&tbaUxgSf@JE#*4zgA<&&Glf*!xI`s&^KkOvt0?a-ZhJrek)XGs5)Kpk0>3dX^I~
z{`P5d;MG|S{GLPfEW9wRPs+M=M^S%yCF<r`!#{mju*<jsc(M<QZpK0Q@HQXYqG!Ot
zi}BFpy%I$uR*@O`3GixAI+|~1_t6dS`I1a8?A~((^A^`&du;_{pOrBV%m~I(m_Y@;
zmq^h5YoMcd8eDkBaCgQm)c!h@bTi-Dx2>}=QOP&}ZTIMxJ=v_Y{5iiP+Jm<8P7&pc
zsZ`%_4;KA8Ok^x?oLcaJgd3g1#4;ghWPKw7{v@K>TnPOY7tzx&k<S}?i!ZAe!HnPe
zDD;|OC-I)jHD<VhIG1s!{L86ks2*gDjziVe-C+K%5<A_lVEd34#Q5baDo|=b%e8v!
z*fI~jr=LXsz|C-T#3=}gzQFiwebnW#7D%6$fGDY+>roz~>(*VsUxU-on3O_PNEDdq
zN-_G$E26*HkjT=@h@*i!$uIT=@s#OQu(6Da4~XcmU1AK>ze?_|uK<l7&!}YQ9HN^4
zop>Cvf>C}ch?@w)5=Zc#p1|ph)x;mi6=+%PN22W(g8$DKMCek=@9#GTrQf>vZ6Eeh
zy^~7jH>rdD`G-)j-;ZxNCJ{bgQ&H7EA@_B>9yY5vpw^3JxW}ystA<`f$7k`7C1%{(
z!37`@&ZOBfQ(0Eyr9y4N85$B73QBzwaNIDHF}REEY;$i>5YB<vS*M|^*%3aDli=#3
zA}spCSQq7UFk$yD)Lp&`|C(WfvrZ)=pXF)_n<WsvT^-!Z{jq<#6kYf=&^7c9xly_f
zZQnG}I;S?G>Rt=wM;+0zG#iba3sIEN%Nft;<JvQWP|z064XHngt&hgD_g(^5>`(+g
z2lkNsU<al<7SP!NDQxGFMSk5+!14R{V(3AZy?9Yb-*2hHvyr**Hi)9cIgZmXt|nb;
zM$xeNv)I`j1JpYjd1s2DZc8-PE#3**a~|?FW~Ec#{V~|ptd7I0=c3P0XR3LP`SUhg
zaz>Gqbv;$WuGRJ^j+{f~=^|+Su0<sk)=<xQk)N+*V_hBNzbvQ#pI#XnJ!a1p+l9JQ
z){p6u!hQ6M0T=c2XyL{7Yl^GD3-PyGRc4C*`sG~UwJ03fZv=*)EoR!<Vk}#l4!ov;
zw2T%(Ta_hrwXUP9wz%Wgx2y2MUe>{}C<A(vS3qmvMDXwZ#cv$`owOMR)6iHBl*4oF
z<Uv)?^?MQ(8BHeUuV&NM(kn#Pw+A(UHISCC!-<>a4U#e`oOPIfApVk#U~Ff^e|>x*
zYE)$aj3~xw_9iGQW!)1KGNEL@2Gn&YL-f%o!lSDc$6F8LC|b(0pUE&OBM{&JEQ1pT
zlkh-x16n2x!Ca<m#<xEp-$P1qlOzwDn%Q}?I1N5*JBJpkjlko7l%I0V2jc*Hm~e)9
z$WE+coo+8kc~T}W7<LIY$Bu&1o|D)y*?^S9Tm;9@On(`c$P(AiXxXm@#9yY9AFDT^
z?`}g(=8|yapbD&;p2hOarx^qFICpCkA0JG=2H?-~(s>s+SJzmkr(UBHws(WocPTum
z#;l(4P_UU}yq^(Jnrw#=zb#Ppw2zn<9*2$b`_RzK7M$=jUfWlNPgfY?<&07YDKKN&
z?HKG?vH?o}j>5}F*p9<sFgjP?;-de3O05>_gR%R52pg4)$NFzZn^-3_tPF#`OEOf=
zn2Bn9)+b-gcnbWNcCzU;RJdaW$ZV3pfBSdNh?4>f1H%3-EK_=G1zK)7N|(jg;w`3~
zb$FPP&$9s}f6OCyW8_ergU~%O6t)YQ@5}Bf2^`!H1w*nGh0$z|JIVOYF`4|Y5(Q}A
zIR;r(YS8m#5N(Wfrk8`u@n5$P3=9nbt8+;pu-!%$_FAC+{Y313!V%kb3-I*_infXs
z<fwNh9z4xFK+Llv3uSwWi$|!H2LXf0Te0`kRH*ZQ%$Q{LARf)Ug{!u5#<oMS!)7tx
zxHOG($r*}4SJOc{Y$A#nv(59wFTUlGb7X~@0+l;Fq33H5#LaTWsLzZ6b$Au}PQ6Ec
z9~aS-|I!(^LLGXCW>QspDk!U_bB0V`2%P?zXg9=izR&iuZi9Zn%deGdm1>~q-+J7X
zQpPeBBS5`h4oYsu<JLjjQPuF8JHKNrZVsM-Pb?eoWm!4%5xIl$sC>TFhEfv7m4Nfk
zgP=cSGW_~3#u+2B;MIU^cuQGkYQQ8+KXn01zDMG&2FA3kjDcNEa^T(E4FA*?<Aodr
zKGj`;s^0*aN3Np#5;i|1>;X%|GRn^=fRu-<EA!Y`(Ek<yjTeVN^!ouMqtXOzy?cmt
zkQF;`M}uSgKnPWF5cu>0w62n4UzeP5Ge)t#&S0E9P>kXY7Vv6&3H;u97VlgzMAgZB
z#slIL&k`}VpQ=Z<mReFbR2v<Vw=xgWC%$Gpt3IXMQ981o3iqz#9GA~P7l#GV=BFf|
zvlpX!Qwi!t=wN5}Kp3#z5GAhu{J?;%WM!oYwAGJ6abyNcKfEBkr4MaA_YL5Cu5#y$
z7qH#voD+}~7K+EtDv+0b)lT~sV+dDM@^s2swEQ%XgkMv#_or0erDIJsTc?7yvmN}t
z#X8I{MNs35kEpHnTq+$N&9}AgM>UFd$&#xfc=($>^V)~d*3H(Su}u$}wz1um&Or33
zABv?LQ?aC-`3^fO$itoKc>R1OT3NSJf8P->dO3%(FT0?q@NW_@Sqy~(!|~bm?I=0v
z3n}*$a4<^;`-^3uZB&oNPioM+@dRjHOox!pL=>j{36K9f16y2}o?UaD1a8%*ZRNpi
z9}-E8YAe9yRU&-Ys6gNEjKMR6b$<U>NpBbgp>Z|Kiz_|QqAv*_KW%`=fz#2#l;uqL
zKH#jdfZB-zP|AG9b)rn@&`KsPTHi_P6cN=@oP?Hn&P)$nstC-hqz;eAu-Q*eeEMvO
zJVi#jk1L7vRUW51ubB$eM$k9en%Lgr45=|G=)kg3(Q1gINs(X>V~&nChE(<TU-IY}
zdyaPKVYaXmyB3@!frpmTfXQJ{!StP?R&&_?Boj4wEwtk(<La1yq1x<O>Hg;*+B?-5
zqNPJ9f&_J9hG2<JA+en{iwgbC6p~0idhj-ZHtl!Bh3$$1cdJpST$cHs^^?DBd>Cwb
zY0dao>F75=g44$z$1GC`**tU=TFW+|G`1Ez7IG+@uY<kIPJ`vrJ>YLC;kU-0BO2pI
z63J3SSU<@IKOPc;xRfzz|K}$e^B0|WKOZibRN_w2QWWf~pk*7*0<ycjC`y8<hZzqe
zBn<<73jmCCL68izS5b$~zt+Hv{CFsH{{ubiH}Ng9x5EP)6Z~g$9cEdVz)-(>rU8b4
z%dIS2m2m_Q{<#Y(_IRPPbu@pqeHLyTf!J}-fsA%zJ7upSd@`B!%c#C{I>~!kfBR7s
zcV5H(zf-YSJ(u~FG9a*hIrHK?p}Z$+iH|Y{Z{MAX#<hgRH#5(V&m)@nrwpE7XJ;ex
zuWt-s{Xla<`Mf!DTRpc3)a*J##I0IHl)D_VW^c#tm4<L$6^vOtDIGDeg5^GEp!3KR
z#4_R_IHfs3N&Y2#A&EwVz7mM>65@+}k&yp12*xGuX1U{7(3>o0dHfp4A3p`g1TfBG
zQasW2+(tu$sn}MqhZ>m)VYWBxqi-+f3!fk0Yj@<(SFb!#HrD_;J)J>hf10-auV?<t
zd~i$PgX8_-pz*IR{9?Ocn8&>Bw>IExmJeyZy8wj~Lm=>4HJMs`8uTZfgm%A!AWLv&
zj3Ksvv!4!gA624Eo#i@mhq2i=o_{kY11o0afVAuuzooW^_<VLC9T`G8iM=x<yS|dH
z^Zp=R63JNWvq11!i2ff_p?mEpc$&-3c8$xZeKU=JR>e5%k`!p;{|OVumE*MH9k};<
zHXN+4pwEIu*t3taUBhB@^U4KP=nH-j7l_;DHK5Zm83}SK0!#iWS`;vdhS3}l^xS10
zRTnfIS;@GH&UEOxNoZFlL-|{z_D?I&{QNY=VR%4pY8F6X?+ntYe-(^>uE#*n8hAfy
zC5GFu=TE#CPTtW+1NRzOc_|H|vj^h#e8xJ{ngA`&4v+^-S6NoF7~3r8qw>Ez$hVS%
z;fMi%)hy%k;5YM1FQnd82f(;Mmxeh=aejjZ3Nr)A0td#;s8ZQ=WSemz#Wh6OtHJfY
zh@pq???%IEtZ&047_FAdh?SQco(=FqZ%^h+*i(Zg+UudUA`|_iw}2pKG1Lx?1nI>P
z((T?xBu;X^d7B?t++T-3m}gA)Q3}4EFdcQ)C?R@PBoT@km+NsQRCrFskU`9MA6AN%
z;Z>}Yb{Dvn3ehQj8HB!8L(hNSlg{X3y3YA5nwo2a=jxfnym0{KIhDYRXHj_b9n%ap
zRKbRA0yI=_rl<beh-#1GiPx-9biGvprq^`9R%a!3KUWXtBVUrk19kAM=Q#9SyOfsD
ztEe!DWWC1!5-n9GK3^w5;{q=>3su7!YYs1|Ek(zFbI?7flI_?Rf#&L`)a!m0-oN65
z{-Y)lWwR-Nw_qA>pC64*Q`(97$a<W|n7z(p-B9&_?U_P@X`u$oms<zXyIxhy=eG#A
zKM6&Z#Y0YeWDUP#nJ@Da#DE~jm2S|Og#K<007ZL|l|bO5dMj)%yGk6{@3Z6AGww(G
ze6-!61wwJNVhDd8rXGl-&W6)T>Y8aFNSn>I-Y8}Hm7jc#KlgxR_(^tNrc*)nIL<IP
zjmXUVsOZRXP-)h22k-6yNlXwdJCp&&xBlj<o;gxw@;{2x3F+9fh?4Nlr*ZF%KhSi-
zX3WPb{JeWVG*%>2!N(GX!!Juv^LRp@`o%)q`d`#jU`B28uTsHGYre<%GmMLKnD|?K
z<2wIY3TmbHpf4?_(N+=EJ0%&7ryB!jxfC5L&GD;P0hT=yeC%=({##mzs>)o>cnG`m
zq;(R{Zc}dMP#Jg$%HiPavqZV2m2X&INh1b7rJ9+#Ab0AA7DpYSXR|I*_PYY@L;nDm
zH5tS|EsOtg=1{!%MTk7vB}hF|362kspxWY6=qi`d{#r4RF|!(ce=fm*2{Le=;{-dp
z_n_P9pTy868v~clq-R_5u)}#5G5a2kraD>l(0?Pbd5jGI+r^mFYJT)@=dGywSq{Ng
z_Sm7f0nZLvj6)t=0ds@dr2D1>st-kDZ%G{~-F*#<H!$DB=Whz#_svwuwG;oPd@%NZ
zPNcpEpe11qxcn=`kkVwDTA*Owd<FbIQh<Wxnw-AYJmP!T5Y!bcMbUJUL{*l-cDBdu
z`owlDEK_rSSPkm1ob%pn8MaK!=7y-pK+~@rT&^rYo@k`qn>mHxxn9NTd+UNS>?VKL
zQ4cn^4yKl@i_^mFGEFKi$L^rH5c^REYUf9R<JFVs(iBe{)9+DU$6RW=-<k-zv}j4_
zTiSoiC1?+*1kLRdu*PIKlh3%RB@D&D?w^e_YJqEI-Ykz{XqwCBphG@n#|I@!=r%ZV
zJ_++Hror*T0Gz_I2)pkXV7@^G5y*kD`%a^FRw2l?7La%^IX!yP9gFsEr=ExXNNjfj
z8tTT;+@x@HeZjJ^Th>Fz_8`6>ZYvv$B-mR~in7VO8CRehy4`(2a^ef;>@I_aQ<PXa
zcnlf^QTStZHm2N3L!odQ%M)j?eT9S@FscwI$pi53TZsNMQW^jK4c}Zcj9T(JPPhCL
z6`v#|iOpYkb@QQhgaveUPDaVxP!L|Y&mFUtGVb_#8pm=v0%0)8h>C&di>>k18Y$Qs
zW>bwpb<p`xOx`_DMe_O@?yX8f(c~~bPJU0H?qIvTx#hU@`bKP?QiYPyY4pwFXx6Eg
z_5br)tHl-2er`%l^ip8qQ#qDD9)r)l%2}T48vZdW16!owEZe_>W&QK`iThL0Rws_M
z^@*vceH`~d%)DYhD1@*cj*iKiR2lr8mh58so|_|da3#c4d5QHhYr?q8tFW=`J#jwb
z0?zBdapjw`@ay<;D6xyitid_7ZTJx4)b|f9Ol9|^xCKx-YCG=ls$-1Y2vAi?sDLb?
zf%PV&Ev11x?N<vOwTp=Oz*}0-B}A8Nd^}}+j%muXxD3}q_;`Od4h+`E7aAhgTPNmc
zHI4^<U^3QzJk0nX99(&wfq{EQgVop;@?%~KYL0wDoX#8uw}>6!H-Mt&=Ep=WF9#2n
zYl6k5HhMJ714F*AWgM6zSiSE8It5%JZiTI6!1Y43ILLOrtfsK(s}p1}4(8>~c(nDo
zN`vIJ=sZx)x11ObfmXTTczz$fJ;)iWukdhUh#Pv2cu%CqjuXk$Kj=;^EezXz1bLH3
zf$3-)s5@9l{6|H@mUn6Bc_@K=GzmesuU|>a2vhRl=?UQdwH3xL(n94BH7uHW7CtlG
z&@}iI-p<p;j)yXWCx?KG&T|r-DxsekTfQTq41;Ua(5LD;5zf%(dgY73^XW^nVXHfO
zs68Tj{nQz|Z7-)*&_o}s%K^z_O@7y$;p9S8F!H{u+trQ#o4Txw0mq1|uqpl$K3!CX
zX8!@Ub{;_4F~&KyJVmY4%RzjYo$<yeVRXZ6tbHs*<)#$4_k0ukuCGw1ZH)iE=O%^v
zHuCpGKF-v-h7Q~IfNn@22m*_U$-)%qRdv#mn2BJ2SRF%j&Vlxc4YcJ*8Q;asAAV5|
zQu`v&_$9!@HFYRwyGwn>)8PHFiW94c(!lM(VC?ueb=+Hz+ChaNRbzd{8GL9Ov5#e)
zrgFMXg=FJ4%D7`IiTKMX@P7S>Z2M~xMy+NugbmYoe<V`d_mspt1VaB@cCXi8hGsfS
z6smjko#PUiAKx7_p4EbrawfRR578kjGI2$_E#4~A!}>=8Y`bAeq?6nDgFVBscWoVg
z9DWY8JUDRvFN>Jx)G&P@51oFl1GQi5?A)=A3m!NDKj+Gb<1Z=5@`k{K&GS$*K#o&N
zm*QX1%sZBANIM2!MYAh13>jJqs&}(V^!lSvJWw5jxKkiWiU8^6i|8@?6bUsbh9Hv$
z@cIx1(x&ayc=;5*>b`*LDhr6uvL-6LbCx@2vl=V?ve2yBgFT0_*lchDRUYpN&orEC
zcbDSblq3ioKOAgb4$z(wAFAzs4FnaTT<S6jI19q)#Jw9)iO=|dC#T_Ow*=Nd<OmX*
z_ni1vHS|hdAaK!A5Kb|&%h>9G{sC3+DmV<fi`_sFYJrQzS?D%?DHS_sqS}w;cq*OE
zuX%m+eWMu6n)6s*-k)__|4sDRcfNw{-X%j4q3c-+Dc%~7nk)nAvg8Mix6=XjxG?Cr
z%h>G`8MERW%VJvJq*jb;6K2HZR)a25Hh3e7`{m+9ZI&rqEd=RxXOeL~2V8wMvHIf)
z)}{ECQ+0Kc)|L}szq<;y7i6IG>Q^MfXaP7@?gXa`c^F?(22Qi4vTXHw)Ew*z<^%Q+
zhYtl*In<0RI>Y?gH;+@R4948$4p7<s0Pv1)ppSm8K)vN@FmFE7i$)9}PoJD%y37Zb
z6N&}951A<XafNGBQ!xe~%Su-J!&8&RH~~gr<C8^n^6LUrmHy3FKJX%&{uD!7f)_Qt
zt%IMpJSD|ur=a2p>)l!4iy7G!VBdEVgqv7L#HkPb*YlVr-SLXN={W&yjTy8_Et#&2
zDo2Z^BAThJMbB6r8fD4+khgd7E%ig8>&gh?@2^8!2QfWYZ5p`msDXXd9lPw;L4$HJ
zmau)AzSA@qp|ZwySvAIYyrqHV-Ne*dL3nGkxXu_OXsX$Q8CPZaWo#xcBlRdKN~9Vo
z=V-h4Y^L!liGNrcsSP~~`tL@;j(~l5IA|w!3o?n%D`)7|vj+bcVQ_j<GDbJJ!Q)9O
zXx{1#{^k*+t8gLud=Wu({1VXj<dY85bU0g9iJPhhVsxA?*uGmskLzXNfejXD{NNq=
z{E(85ClkOj))G5WOn2C@2<Q=(iQX7Q^|r)=#+hE~HMs%gG0})NYr$#B1=P<RNaEkK
z-fz|yA^B%Cccg>eN1knmv;}3@({K?JjR+>GCu3KaH3<8xXy>L(vS6VYl}o?aothj2
z%JI%<9LgA=5(#L;FCyLJ{-F`QtHJH_GI+LXBI8cQ5%no-z8M~mz8QMJV~pg!Uo02!
z+nGAfxd^Ej#h_i2LPadsx7TJWHul+o&TcD=-h2W7^{v28K{n=3ngXv^E<xSfmq<}=
zGeM(?(7T@pCY9$Pf<FdIE<|8R`3Z22jzafCYG|2y2!(mm>>M5ah~Nkx0(E9H{%kI=
z%Rg+8>_Lk$ImCNf8MM1?2Hx7a#L}w>g+t5~p3<>g{?`Wb`ArGETXz<+YNM&p#}CXU
zXQ=oeHKJ?sgxq3Gc+CZ#aP2B$sWH>6nVz)zkQGjvzX^-7kE7(lE+YP$?XGRoxR&}{
zE|9(=WzTfcv?+pc-Ql>p;2d78l%Z1m(oSR8M*6#FFQ#^R!Q*A;P+UBaxH$|0N&F;e
z65b<j?tYBZY&X}u;Ww$fT?ON&*Q4mrJF+{8p!2YAq~F45l<Z%`vaBKy%w7U+mkTiD
z;w~_Ir->ow)9DZ$4&#Hak|MpG)N$uTNGpxRAJ-Xk|JQUlIHr=c6nkOn?7O66OerVY
z)5|w9;G>Zu5Vb2fQd^aSZQ-McTb(8;%9u==#?Hm5pBG~beMeLcpGfrFqfBcvz`yjy
zW3+ibg@Yo(Q=LWCbpdote8X-3V=&GuIRjhEim<yN9)xcxzu)JHXq=x-o$NH>>*LcP
zS+<I#{z}K$xkK=#&1sysA{MkQCqbjnpQu^Ky6jS}km_v-xJ6vSb})>g(3$`%2V~&)
z6notJAqEVSx@b!BbR6w84ljw6Xkt4Nt=eSZV9L(L;l}~GBEib#C~OQ1#=6b(!Rq>J
z`gMc_@~&KzJ7ou>a_1cQHHCGV$8EsqiCgfjaSl49+#<YtWp-d8CkE1DaQpKZ<jze;
z-vLGl?uo=QKuQl(x}YTF5x?VoKTg0jpEW!S6a)psFZDQVv#KG^uic?-+kLtuS&H7a
zmp~NkOnBl?3f{LHcEW#0a#?JLbFFbJYWDTVXju+4MhJ+^y%c)d-C=6OIqWFbqF+0c
zu!qg;o!4(rO^tH2y<b7J%1(o0VHs#U$Z^7*F=!PRNd@<xb0PE2kRi$G=xMeD`s8Qv
z8<Xrx`Yd7K;6G4bo9V1#3D+@NMC&U3!I=KdU#PExjd@;Ra_tm+tRIDNqLjY)BxHT}
z64oP9N@AE^*U3`Ep--~`gJV%LVF2G^STntx%J!yqOJU!-aoG6M0xWMw(K~?==-KZ-
zBHS@V;k=yV#x|DYkNcTe;T#8(+u8bYZw5|cyR^^=@i@6N8-@2(q-EwaE>jtd-K@jR
zW5`9?(#M0sv?3JP4X2X%Ke_gC=V|oni^R77LAZa7al||FLG(>PU&MN&(&r~pZP^AI
z9jsgFQX0<6K88#ifHQ3WAdOF>hH)d|yz61Se@>3#uPO8aW<q2#`<vdJfGn>{>T^!T
z*yTUywY{D=MQ=BD>%~#YpX~Rg=*N5mrV!IZSWY$r!qWV(W#=+z8){7Ima}YV!6k5B
zgW&1ipD!6+$vwVb4jL9!AoICQ-`g-g=~Y7ze>J885!=%o)1>`IX|wNTEVM36fY8)c
z*v2_B-e4&#WPHCKrs0jfRDc#F2BXrLpRzBBA13of$Jk@gs~H0w|FMkpt!xmk(c^Sj
zUrW{y#%k)D1dpCCMBmAusH1xYk+128v$u%RR%i;BzYoCSyH8<P4yCu+E75IvED=}9
ziN9zO<ew`A*%Bj=b(Lah{z(*FAB0^6qtWw6B~e|OOA;*@yK>l6JaxAYPNlLt&A(Hq
z<BnBubG0G1SllA*<w<CKo$U}M=JZ@nF?Qnt#sGRor>6G9?qnC3d)Nk*RSgQQ6+`id
zAKP*M?FYAB$}utJ40Oa<(~hl$_zzA-4FfG`D>{Rt?S0YulmdlDX{62CoX8d@lIp_g
z=vg?0bdHz@*4|g)BkRtP^gE8iE&kNz`ciEBa0T@57SZ7f#$|P4Jd(C>w4J66?|<#X
zj+=u?_m&grwIdGvCreoG@<fsvJ{9w?n7}qpi4Q)@SUxffeMhtYd3;8mPr8h}>#V~)
zPsTXOxzN+`n-u;o0AahIUDJ)bg!l8f+&#OLbps6qXVo@{xp)abuPq<~eRe<7t%d2g
zim>R#RciiY5t{k;!_*mz!SpFRyNARR&p)F0v9Agsd-i2~cY?9!hs_2{+ZklXND)fX
zMv}Hw<H0{_FSKu82NC5DsiCbanmn!t&Ab%w(b0wjzho#=3x?>X<0R{{HFPFY+Pm-+
z@WwS$gI+bfdMyW~eK~x|gWLQxhso&Qb_(~a*Wj4mTJ+z}x`&n57~3}=Q(KRsKfOUp
zZsrl;q*>tXQa~KE-7#eU4!B8T!MkWCs5Zy)-F``N$yfGnbBLr8nJ;8iFh2Xrmt0We
zcyu;2L2=#*;%4!TTHa#WrT3cfW_}_VYY*TqXfSRX^NBS*SPkv7*nF|Dknee$vC#w@
ziQtefNTQ}gkIDwUb;?PTDa%e~jVE<YGW0dt37R@FU^ROysLgc6<?ljq!pTz@{d5<x
zpIHlQ+4~nqCKCO1%c0~;4#`T*1jU#JRMT;Xkn!Hox_%&N4!#1JlUUE(8E2F;)*uXR
zAj`utamOvzJ=yilj;C0xD4M++cpI74T`J_{ukVmY%H#O?tCU!_UMI}Hf_qn9!cFr_
zK#=>9Kg8fT<B<;_R-HS6y8-C9xef$PGU&F*fcG_is5UPRTmNh#jitxYf+mqhyK{u^
zQGwS!8Q`+nr<ljn&#oeVBSs8j&)J)KkhRnk@`|RRXYO0lvnv*5TWhIG-bgmH*-YA1
z$6u?(^5T8|F!cU-Y-c`yRg5LoeRUbMTf`u6mf7+84~4fp=26p+1L5!>DxT>>ORVBa
zn@oiFR%oIBgW=FVdnDU6P>>`HMi`L~0e`dK>!yR?{7(uA8!`bM0wR&W#2!0quTsrF
zV!(D^BekD-8QW7%L0nNjzIC<6VwOJ&o4XPvZwJ!04_T})w~4;_QUkJ`C+WJ}3Y7PG
zvW^ZJaT~mgs6D(&F?%hF-j)-c`H|pXlZNoG3CpE6lK7f@qIN(4%L-W^#i~Lg@SH+A
zZe$W;elaJ#Zw&2y3Yd2%7F+}qaaDu^I*(?os<kzC5({%MUbCCFemjTeuCHlHF6(`j
zlrm33F6oWWrxFVTP9xR>yc!EZd~ho<FJ3^*cf2E{XexG}W_#VH*|dG4AsSg0Ab!&Z
zw+Fkabv+;BCzgYHL?k-Sc}POu)MLx59O_y{A?i;N?)|KRRzq%5X_X<d;8%j-e!_U5
zQP3i;LbZ)n;PQS2>+`liyTw!3J9+^)q;udo%?U!JOpEz!0!5)Gh~R1mk!vkuS^si=
zye-0giz8T-XM`4`*&ejxD$a@s!11fgux*VoNcX8h?_d`UjwxfAIv4D%J40UlOhUuy
z2H4pz9y{LHLC-jE^zj%CjZT_Wbi0IhZmVV<#B3~ym<hF`^FcWyh@LfM9X4M#fQZfn
zJ&kBgEHA_p!|HKPe=`(>FCzuB_3^LcEQ|d1GTvf*>fVM}<an|8*R|pJ>iGbSe&7S#
z$Cb#tI1sd*&ha}mlQ<`vb>L#gIOkGL>f}5hl$ER5=PzUT)``#&c7*dkHj#EN5JS<n
z&&1n!F_K-ASVoz3@ys6$WtSSDV}yeF10+<*&Na~jjs}XH!GFqG(zcRu_}vOfb50Q$
z)>ObUQ5sm6l>qE0Lu2)Ssaa|)_N?B|m>S9G{O~gR-dzsHEg4kj=s6JnUdHd;!F1up
z6M*+7nYMnYqaDXTQIBm`V7u{Pygc?iCiZ6?7*Cm3cUdb9s~4i4R~mjh!e+N;Rh-dX
zF`9kLMZvCv{9XGi;a7khT&->J^Y2I0aG)-DoIOe9>q|gh%7J$pd#{aKL(C<E$eXd1
zsFEFE+z%bF+RrkdXO7r)PIW}B<}l`?XZcFkW%MIg4c^bC(E82*>fV2%`S)j_&PSHB
z+!aN%g-fabfraGJOMRTzH5*fW=b+p4y;SvdD0%y!2<2rXh_#^z{5nfeYqS;mb~-`h
z>me-9G#0k(oPZH`8Kdc+A>iC~6pXi=qaEP_`YrGb;_5YE=y#W>R=y!2;V-DMp@4mU
z4u17F!`79IzbI29ma8)%9#^1Q&Q);fw;2NNoX3*e=ds=S9`lzUMJ^#8U3P_$Gx{Z{
zu3*|g=}K69qzb$)<bbjc*?v43+|2n@I3mIBrp0JXJrhNp_3N33IFLGqZ==SGPxGA<
z6JTBF6;xGR;M`wJSWiPV7^=(Y*$ZqZ^w(>mIjNDVoC{%7b2@JPXBL{gD1=X&CL!<L
z4X));IzRCIO!8ykWwtt*N?Hb7AkrP|Oz4Z?1j~#$<Fzt!upy87n@l7R94gVS_&gS=
z4J8k^PsRR6S7M24J;^#DB0FAe!H9dKz<F;C9_=5DlJghfOpPY)-gFX!hgPFmtUi{W
znT4C~Bw}ahed_1So{jtFwC-Fg>t`&1)CD&QIDDsJ!*YN}PGbCY1!~Vpf{N8w(4L=-
zP5b0%zSbLOd<{jP4=J?QemuUse2n!C)Pwz|QfSV~WL(POpw^qlvb!=|ojxD4cTd8y
zO$E?3KACa1#B|eKImU~;Sr1wr-tK<^4YzvXtlV1E@R>x<&v(VoVSyl9xEZYq1<+<W
zi>7y-$Nelj?6Z$?gcY7diOe(J=FjioW^ziV@ffyU1M}VcKw})AT3#&X<eyV$phqzA
z00Z!wehw|qB$MV1HJ~+m8TuCfiI+!}0nhoDTz|nXRBFwqf-ZN?r@jUYpJd@OmUpS^
z@dg9yY_@Z^hq9S0Tk$plLk8ccZtXGXGmrI!*>1v?k-p&5JP;&m7U1m{jTPk?%sZ_O
zRtCCIZ&;4)w=%$CZ#4}X%KRML6*TMapD?d28$F**A-+jk;54<D8o&8Q%k6bh`Nu)f
zj^&7%WdeHVMc|MT2T-{64u3_zTFibq7V~ly*gmz5W@&t-mi-2Uc2_p{``LI5`;mv5
zKU1K5Wfp$ABxmow5c1#|<KgZNRJa9>r{byxh&a$nB-88pwImvZ3VZPUY)Omyj)Cp$
zTH@=c0ldO&yOu)**m5bKn8vpg4~_lse1jI6l&?kCNq#tdd>ua2Sb$pk_4sS)0hBx0
zQ`zI;F#ACqgocJ;M|~Xj%$fxYcjsZ}AQNmm;RUu0=Gbz21*E1pgN1$xRnEA?uMw_5
z?ilOhb+!jp_W#a85m9}$#j>+?*b#3`Dhn5(Jj0*ZzpaG8zbj#dmLobh?gmwXgm{|v
z&>oh_{?lhR7WG;~>d;8M`YI9M?+nAw@#$o11oJyurc=}Jtz@2V9Lq)xg_TRKm{-sp
zyi5k+{t;~MHyR0^d*Vr`pa3r0%)#?+KFC`a$=6M(gRBE7H2R_q78kO9;Je#^_ud3O
zc5A`!p0mg^^yh~c9A_-_-Ow>K0qX|2L(}m>(!IhAZ5`#Xv{nZN8xQc`3@*c$FBYQi
zg89JTHwIM#ThjF40`=ag2iphLLu%be%9ZDX&rx&Ai@Pi5Nt5|q#j~JWHvwXdyf7!C
z1lx}vhO6_P&>><B8qaOxyZ_~cmwy(q%vu)Oj?TmAq#e*TVmWNOAOq3DQ+!d7DeWnm
zOj=%VA^xxK(U`$YP*%#aKRXo)o_KzZXUqrUNh_h{(|2ypk^}f-(;3v*$rzhGLV3X4
zDAae{1lmUiLw>?}*0qq%mHZ_^r;K#M>Jzwmf6hlWwHHLsJr8a&rrRJl#v!RO#Z<OO
z6`f{G7t;r%J6;<;-YbHL+clWUd?HwJ6trfPV3+b7WBAHY5?crY(|zz)RW9lq*+65$
zE7HlB$l+_4Uue)I?y>p?v};es$sHH*rCU7Ce{77dB^DSs%LY3tkHH*vKXc=|!-SFv
z*xP!CTDsU1&yD(^5jh8VXELc+G8<B1I{aJaj-4|Gv+lAjq-2N*bjOMT7#|?>7V~@^
zDYSJtl>*W$E2-kA7fO?oQ9boMI9rAB)eY*wYS>w<IE*098VLINlW^U;ov3<vg05Vg
zjGA|jL+4H*>3x2j`X3tyW!9Sbbp1H|w^oV`ch%9^c?AU4#$v;!22gd4C5@iuuyR%`
zgiSaPzc!eok+mKM7EeYOc>}JzdlDB{B!SBh<}K>JOVVxTqT9@FD*U#d%w04UJ@VzW
z@%BgJa@7Go+Q>k@(HNpL3NbL{42Fr%fN9rBXzlliHnDu6?Z8xU7oR}!&7<(yES6<G
zeZc#q8z_6O+V$+IrcNW)LB`7rRK>|b|Ck!+KNN$|_>Mxm(}#4$PbOe89W=cbg68nQ
zNc<rYcyCMv7vC!8ds>f9-csoPY5<07;)r>=KD4=C0gGFwu+IG|tqng7og3ZJot3@Z
zVmr(t>lZZED;+xabrW88rQ#fChmO_fXw-&MD9Jv?@}qGWt;m46<8@K^)r*_3aWvZg
z^O{86T8P~X>Zr>uXW(^M*tT12hkHvrkar`H4*hxog(G(ppRh>eGORJMD;7MmUXZDS
zPeBjMN_t9zx&LDH(Smscoev&{c1;;*K1qhlDFyf;Xax?k@y7hWQ&F?O7W0d>aUN?d
zU{0G9*F5)Oot+NU?c5$>eESm3Ogw_o_cpK@W+1lZJHm|43~W8K8iY(+oY?1pmJbwg
zXwhuOM7G7|g0rX}a1wRLEXTDO#hCy0JmLMCfxKfJ<vG8wv2MtRuP4gDVoy5#u}q0s
z!v}%Sj3dx$GaBCKo&&SIRH*n~gtEUbpteyYr(EUB4bQE|tDzS$f9@*y@s}Ptl`1eo
zJ)Z18SdQkp`^g;$z>*O~APgQwf`{6m=pVxR((RFV%Rpf~Q-*<Cj}qa`E1VYd$*Wct
zL0KE~6FE82H~PiE^La-+F0zj2S9RcdYa;FcX$@*5<$+5f%YI*52fWxk)_Y(|*7!xD
zdBqe|UoORf??P1jPy~6Z3n;f-4!+VF@<J~dA4V3S6z9<Rjs|=>Pzvr3!%;Kq2)JM~
z&E6J{AvsFg<>o^}%6L#bZ8-*B+Xec?HuQ_h9t>bTBQ3FSX-X~A!4q@ffc-jjJiQ2A
zo-1kC??PBrx(aPo-x)_sL?x@&GnRG{>k6F8pSVaL<HP)5X?zxnpU1-Sf%EWCP!WbE
zq@Zo#84@4Dn5V`&s$0jj6WNa8EF+(T$Kz74eRV56XnF`d#@r`G4Rc|~5kJ)Ucnk#3
zCvja&4=mlc427;sK>ZTS#U7aftvV&-;Q1@W@=iAu?7c%J+UubFsUON-@5bnFWrXLE
zPHoe>s0NsW@`g-dkS7O;YZO|I6j7DhSZeia7>ZX-qR#mR{OuxFlytqJuI4g~-tL7V
z#Ti6awib{V4)$LaaCb`_9_fw8NZu8^+gAnVF?-<PlV{}m1RYGN<Dt(=7g$&z!RDVQ
z!1qcqDE$GP-e%(_%_8)Ccb#~4Ca}H25#kY;ML&hG?Bt?A(j_i}{c<7e=`O)Ue<^B@
zb*4PgHpRT>5)iR7T0{45vNtLUO{D1%JyZc7#xf?YX$$p`vCq~K)A$(!Fu`CN@>COS
zrOQ`>?&27jR~U-A`dM_|_ArQK-sdK!tN2Wv3%#RUpi75OzU9_oV98aw?&m>_p0%0=
z&QwvWaC;D{uJDUhjED3wmbiW$j)59EB+!&~H-39ZJL)F^@24v#ecH_5Wu=MB#uTt;
z_a)(J?vxK?zKWLG6ym*T2^8;XfR8`L==t53GcO-V9YVudA5tkS=nZCQylmq0U^r2&
zzC}DMmypo(1}Nj}Ao?CdjcZ}heme=4d4yoswj8ROe~+(me=_AMJFOqp8X_-#uB}GE
zK+yQY?y~295aq;moaeu*IDUv6Rwk}TjSWup!Ex5#V7(I5{KvzM)<ksMtV40haWb&v
z6t>1GsdoGY*mL0kwj4Tw;)yCMce;j#?eX;V?JR8Em`{E){jMuH22wX&faqDCL_Uta
z!yMzGld(uO?5iR1sU}KCH*pz-)xf)uX=~{`7i={X(fr9dR9WBQ#C>}B=7|aRHg1Jh
z=S(Vn{h4cQ@Ff;E>)_44P}cKWg-?uAQRw|2SF~U(ghqy8kMlK>))$8@D~?mob$NWt
zsdbDIJsgCI-qbN(4#KBJ==(=3<bP+m&F8Z?zmGF9_0uI%aleSoCMk9rL4VQhJ6MLS
z_7~Bu`4f#RhVwPA9;Y6^**x#&!9{2!633IBB%qdM1AiCOv-TNKx7`i|RH$hA`kQlf
z9YD7Cia=%N3WB-$w5}%>RLpA{Ju?)7w4{KxF_<&sC`yl|@q>I5VT~Wl;50c>-D3yf
z`9doc|JqCN_ZHkXcO7aUB%n=qQMcfJ@cC6PXgo@!_U+X;FESU~D+<Z$RfKs#&l8Kf
zRcLr*JLr0y#2=?OW5Cc-Y<8#cd50^^o0`ndJTc?G#$n}B9o%o5k2;DN=x?40a@RzX
zysrkM+s~qCMJ=~^C!0@n18G=a8Sb6dfVLV|Y<HbOJ>7zdMNmEUtl5Tt>oG1`T@0>x
z^#_VFrNC2~62VRb>UmWRYDxu29u1=%K|H>unV7~s%RuiPQ8eViVU~X@fgZ*a^&j9w
z9k=;`R&73Gz08HeuoAEhPoPpoEAh>YLlN`esLe~Hy(%f|et1bGi_5sCH!cwBaTYo>
zp7BGE2B3=?>*5I30#BWZAaox<oMt$HeOMCiMRu<%jpv-Z)A^E(!Pt9!G4!{n#CT5u
zoLa|6;kqOuI`@$r+_npBGYrYE)O66+PXXnEM1ITiy&#y+<2#JYhC8R(t{^K*(a&9h
zck@%A=}81E%RdYJ8B6g&_d*neY{JDMO0fEO5K2=L_%8G90HpEQj3tc4+e~|#6VN~R
zCn+<JKs|vJQdM$jPmP4Ya((EyKA9eVXNabwe$wbM%v&)2In}6nM2erY&grf^{5ARl
zSUva?w5^{J;iYZh;4pyI21kOqu@SUx(trwsB6L~14{BSBQE(_q@nzgRv<*R0)VvWZ
zd={Z0tpK;K-Iz1T09CWCh?056G_0Qz+dqov&fZaI`L>j9jz5f}MGMi}u#egv7>t%~
zLN30+84PnyW77>2)F0ym8iTIFz{)smzgs~&^ir7r!-FLG`JkuxAc=Zc4BewAL(37S
zFQ1Agmvh+tS}l$9aotRePG5!3LHDWl=m03Pv_wP0D!hARKI-<%As-kA!?=R&mqHcH
zUy+8@={6{NXihc0c+;-=3DiHQg3XI2RQrb+edQo$tg~YzYj7fpyRBhsZW6ATy$*$X
zjL&kqkQ%R=Lp?<Mz~wI&@Vx3oS|j_z=AR<ABN9M7n|B)bo`#OOc_?U(;?MV!U{=aJ
z;J+D)KEH#=sq%5CJ-mmTUlPcAlrjiTSpia&42IlFhJzaU#3FbCoHN~z{@YeT%c!Z;
z)Gh)go6o@<CqDLBHQ>(CPN+@n@ZWjsv2D<FT6F9lxp3DNy;C!A{$iFTljH(#VS$3T
z`?j4%%W-V|JRFoq{v+nI_;6D<40IQXi20O{#8|9LQs<lp$8p0jWKJucnwbsi*%#s4
z{z&``=dmJkC-Rm|;73GI6g|97T_-X|UD!!DdBOn~^vlB71h#uVI)Z<X>AM#D8HcXw
zHNWUW3fxILgz{fAAnH~r)@Cseid8?@^lA=5=r-^rB6wp_!!#sq8Z!O_uH1VKynjW3
zB(RC|)|v{IzGwLn^^LU7LP3nL7ZAtaY!BL2MSOvEN%|+U++-=58EB%<oC&1$%NUrm
zIT=^%s9^8n(|Ggma8!8g#_3I$@nr_v+sO^kGG-D)1f2!VtYV6(Z>VZU3U<sK%2|5-
z$ubCsK>Ae;MY;cyk4CjH%$1^|*$5x?P;{PcMtLK~a+d73-lj^${Y)1(fiiIWVn(#(
zCrN(EK4@xZ{<zGY)JfPtevD%*it)j8`)oGXn3RwX)lVvZ5DATgwTS6-Pu$%UkMA?W
z!M|)E2rfUMqK~`K>R}|NCSSu=-Y&>*JO(_A$2OB@oy0@lQta(i1IHQ6Z=zODX1_GT
zA5YBjD9aq!ZdeVL+J+>o<TA7zo(wkg&LKR%%KVupYR0zs;;~spC@Bx3v&LuO^T2Va
z+}B5plV9)!7IMX|I(z2H7K140H0lm|Ld}+*1J55*!M4i`3_h?K|3^6JD|D#KC<!he
zxDNx0ufcy;&Y{-&CD`%2oNs=#9J_R{fbq2bkoVFAjrWDnQtbl9nY&=uBxOv28|?mO
z8IJ$DTH@B;3+U*%fjZ7VM{(m{r2A_Yn15nSfZhnO91~72xaOkROoVcWCE%}d22_4Q
z5Ttz+CF{G174IfJeLokkpIZEX6rF28OxqjA2O%V>BncxSge0k1uZ$!iNs<sk2uTt`
znC{cnbib&K?submd%bE(B~FJ<ZlOcykb6i%_^<z0AAFeh-fO+j`#iq~>;8+z#0gp;
zjXQ%K4`zY9fI|COId*M63g=5TamLWA5Tth!`9C6P>w-_@Wq&NZvS+^QqcWmvnngXI
zY{7wyvnY95&iGinz;>R5%d{6^MkC^U9S!C;Y9_k3TIu1%>`ZA)!=%*nn0sIu(f_K=
zd>e)A`Ah?ys;eL$J6oW!F$w~E*P}|;BHBMb1UnMfgRJ2z_kCj!8dR`8s=!~=v9yYI
zpY+GLDKpTO3}WAthunhfBiPGh_kgj#i7V4)%ND((GP7!w#_E9UkJX@*x(5zUWS(1L
z2WfSPg1dsjXg#|eod03n9RDsx$0xN^IW8M2ruyOW_w~T5o@v_MjMQ~`4_Rlp1U+Vn
zke|LqU|^5{J;qUR{>Kc~Lw2}2vXssA=N@s$vXZhqHZQN92mKpTK<ijE<kvN^xtKz~
z<tGx*n8-TpDX47C6lnA`z_StSP-qxft$SL?e8DHMYeO>)A2E#OeJ+9b-;854=&`BP
z@eJ4AGKK_56TngTH%((ZDc<at#A>ZEMBbM{mUJrf*=f<%1}6|XO%Zr>WC|STJR<TX
zm$Ch|0@2aThT+Vo;S`nuK|{~u>L6ru=ctM%!z5G+twra_OuLIK0@M3-82va9b^qi;
z=aMzpKCYke@+?XH-=&yRH53dB<Kg+wOK^9nKXIx|C#ivzXn$S}<Ym6l*fbpjx~74%
z(S6#qq>g>NhQg&I*68u-4ux_8X-C)$v@6rJ{liUaa#jv4quF<8Ib%0}j3(WTGu7ec
z2R1kAFzl5zzIHx@T^;Kg>rhv)Ddqosm&?$VITZ%fr=iE(uQc-VAe{Y=0d`xI;g5~i
z&}YgCu&AtslZ^LXsMtb8<@?Csp`qxpI#%GdY9a^~Oo*)DGW6eLIWWUS6bZVkdCMY9
z<#qE}=aDZc&#VM>g;^j_@`wI1b~ntqK-+C6k}AepzFThrSTqRgIX@irZ71VOSTf$%
zDXN$Cf*h(`hPO(y87q7Z*g9N>`k#&1@#PWe>h^`+E|y_5+5$$?H{t%P>+!R45On;r
z5gx^GD9~Zg(7s$cJvR$iOPH^tXg_prtR^PMhfwjG<+S<eOt9|=2J3r_J9)vJlaFSa
zj!8Pv{leHub$+bx=OpW0m_;NSu4Hd+CF5O-AfRD0C@ye@@O`P|_rqeC(Uu1*sTV3e
zoPo;EQqVP&^~^us3-Xuh)JMY(4yFcUVMQ8R{fMKotP*ZmSOsXmZXw$bH^AMB8H_`*
z7Zj6(XyjrIy5e-m(JDornGtCFo~L)eDnt0d4v_dU{ZM`tyuK9Tk3X!taXj<kml9%S
zr_S~=Qrt7Zc%=PzXxG9pB2wu#eRF*d+u<z$-oxbrS!NlCHi?Pn;?dauAc=-QXn?+U
z<`Fr@r#;VPf~!SRtm5*icYz`8{A&pq+&72LLdr?61(0{sszBxIB$hUt47z%*(7o|F
z(VFOwt*=+XQK=sDNd!>oWG%3;W*kcAM4Ee39X$51UL&bL)mCjGWfvV;KkODz^#4VK
z-=)-M5qqEhj6&OA;UIo`4*3f$p&~*AN*XDUe65->xmlla*Y9dSK@n6L)}l&W8&TRD
z3*C&V{5+WXlMV?WMsFX}gD#LkQx{_a%QbsPYd}Li`zHKeQO$l_<Td<b+Rdk!TN_Di
z7y5ycxD+z`Ey3{j6|}k(hJMmon41!ZUDG(mzE<U=OIOh|57wjZ$LsXTDsSZ4*nX<^
z7zCJJW1K)8>N+PBd3*H)qIGK3R>7x1lih(*R*istEf-Mf?-GoTIEC*z^1&b{QlMUa
z4OL%{N7u{Uv_WAN2-s}!$;2AHe|#YZ+kNQd&M0U}zKEwQ%dxL34qA0GIej~K7}#&j
z?lu7ew^M<1U#>a!o@{_cwIUomxD@znr$cAQWh&}7r{qLFK2<2ek;V1k7FP+E+bY?P
zWF&06Jr*}C$VA7sLnOm19@C#J#1`37+#zQBJna}_(WMK+4i&-UqAgf<N6h*dj8OS5
zV_8`zfcX74vZa0|@_t)ZwO0Alh7-x?arhc4ip=3pu{#>*902(>wjW6t2Ce_?B(I$!
zF;+ejvd^*H)odeJT9b>mrz)s>v;?<lSYy9eJ5?H+4SUb{ppsQ9Y5i(Sj)#{s=Gkc2
z!m@3xvyQUfs!S?7e~B~Vn9k>94TD_5Q1nun1Z*>6eF|Dsw8*?Vbp9F~DTsjXi_AOR
z#+d63gV5&AXmnoIO;o*iqV3KEqFb{B$|?YR#woyqR*E(jEMw3npu*IvByn9GJpOnM
z#hRO_t_ovLTwwoxYX|j;W^;4t7kcw;J}zN<T)niB7$1=U%Wh1?d9#+{c&R1o?x+Qi
zGePKaHXfWC!vy@l3<YCz*xCP419s16N%BEPC`rns;>9<J=rZFSTs}|rTJnivbR+3|
z5rw(RW>o2=7Ysk)gnKgApl!Q|TY0Y$uO8okojRtZ<dqsGPuhw*j^(0klo@#It^|9w
zn-&cnZR+mLxa;bHU@c>8ftN#ZvW-8w{$)mL4j{Vv&tm*j2l7Na1`@X}gGYzidp!OL
z;hXyiq?eCz(i!6+wOJoK#L47c*nW6d6AlnP9PTd>qrvAIaF!h<yqN4N#b?_Ye};7;
zZ;(RfCl^rk8wuQwi(s-mnOL+j-@utbZ0#IPWQ{|i$Lys*ve=E@vA0L1eNlKLp#V<~
z&BGv96SO$fMt2!VLI0H|oXrS?sCjlMJlhCm+hs6tPaCtR`=LbzW3_tA(0kul>eWAl
zb+YXw2FdGb{ApkCiL0koQ?|1Xpz)yF;R@17IdR^9l6^OS)5279C><uoFIvU;@C54z
zR^*_2>IwGU)dSwXG855n?dpz<YEb{&PW#qqF%IY>BB~XWGs>&5^I{|2#^$~sPa`qt
zRSC)(v&ph4%dt%PIxVce48})mpvU7sf%U5Z@VY1^n*U~lU-wzmpZ1c*CT|7DNgt>*
z^ES<W@Pkw_rnYxWC(Yh10?lW~s8fDC@SEA4{!1atE)>w|7bB6&uYz1P#?+j`d>Y0<
z;M@{RyRwtewkr(|q+CD?xg0${gwyVq%c1CZ2`*|niq@|x851o9Mz#gwis>cz-@Y8&
zkhK~cghsGo!bv>q%zD%Mv+3_QMfk&D6AJY|a2u#6@}D08O;uNtd!>eA;$Ord?+mo3
zjRv1pt0B8h3es_hsM9S=<R1`0XY6m<TbBUthgl!xOLh3_zZ1oCvOv3ZJxEp#&;X|O
zTb-y!4VF!GVtbpLe`R9pf@N^*h#>}K+@M$YMW84z6&IXSM{oc0q_C2Q-dq0AG&jZ)
z-&Mpopx!81bXV|sVkXl@{Ncf4)@|zP0!Eh^yL9ALRM77v!b|Eb8#IF3Y<~!^xg}$7
zW)l87Ud+6a0>M3*C-zI@$cv*oSZ2&JFvpI7`o02s|6@1|+r0weYXYg9wE+c35yQ`I
z0##KroZ(!CX7d-|23N-Q$Y>xE=EH5z`G<5Y<-@>DKaeX-AaWf?FlN8)o16;Zw^-8V
z%R_K@>M@kOJ3&1B{b`HJV3gAXocB%(*fZT371G#jK3za0N-D%jpaQ-2?$G*}G0g9e
zr=qALyxaCSIp9+RL~}A`>ap(F^V4bcz0s`i>KkqUG6@WPGr?aTiZ1<Ef%pCl=X93c
zYow<@WcibmoYMp633JIlzbcloT3KB_aD<)TyGd{25=apc7}tIQEhk=M8jC9Vu}Ogc
zRcfN-p%D@BPnnJ{szX^$Dsg5r<~q?_w4L%*5E;z&plkMl#l%FAfAm79XC5q1Ye2M0
ziWvjAnQpyQfjmc^sp{rXv}2y?syFd;WxN<&i`N3&7@*G?%ZJG9@JJoowOo=y{iiH2
zHZ6w7Y+uC>e@n-Flj8<MD|{CzrBa&#s<L(utiGd-BbFIpYh5+Cb*jPHFhAsf^x*Vr
zOF+G8De!Gp5EaigFz*Is3U&?Q+h0cAy4~Qa8w=J<<CUxP;XpX+2^$hkgj+LBl?;%)
z>Kcn5?$$F6@!tQtRH$ZIC{a2x7-Id~=)3oOk+)W(dfeH3v@*&fcDJ%X`Y|6{M-7J;
z4r8$Wx<9e?b)~&~vmtay8Dly$;O>@OJRl1Mw*eXLEl>k~pqxk^h5<irjKF#MR_YfW
z0d}nlC~a*av43J<_tcFzFy$~7J=u-kCz$u>`8jCapbg<2IYjM)GL{*Nfq%V_t9m*C
z-pQSzS^S44m*;`bkThssG7CmuRl&pI1$eanG#<UT51-dlv|CUKZu(8AXi<w^jL$K{
za5hRj-f~{c9YHB3hlb9}MvHF~aQif4Z2rEN^jl_-`<CZXbK`KLCd);SV=N0V@WbR;
zY@ew0zPhuyoXw~<TyDcW)V!fbcxv{7lkaPBUgAa6kQGD0(P|9ZH5u1#P(`%`#+aPU
z-hV#pna)ZflFA~k%;N%v+Ju3-^#TlA7>6b*p5U74ihfHOv#dtK7&8N8?eq|=h?9c0
z{tn=OQlQ<3R}q(e<KXiq#)@m)$aJ9PP=7Cj@e4;2W!KU8K@th6`%F-!zmJ3`e4-w^
z&55GMYQnN~wCtlVcrR8U+TPZXs4<5++7(dV!F<!chqLjD&=+Ih<r2ro%&W9_1iAXv
zo6Yf-0{OA4P}diW{rQWa>+S;BA2JR*M>tZwA|F!!q5&Tr9fT7Ol%P4w&cHCH6WFD3
zyopMJwXgT%*4xTB_hA!0j<Lbd?~@^YAPg@4-hwHPVff)hIjSU8aiT{akbNYd^?4N1
zu$3&2?ZGtX<wperGxO=YsVQ(gy%a27Sb=M?65btSh>gq(o?_(*yW*~Z%E*~`Y>Xbd
zX<h`q=YSqN2L!`5gaPlnwtzQJU}{^d50aLDsJuXtgok~j&S855txead>!YEV8=ePd
z-P2KZY88Blh==HLX;^;72_*(e0_~l%p!-ECG%wviZS!lme$UOIV)U7;HogozMH!hQ
zyo$O`-BhwO4K>s%82ccLC{H}Xw6hO%rmYhu%OgNxDq{;inh6H<Da%j|a9zD-;5vO3
zaBSumE6pHbbEabduPper=M?6u$HCAmF37vb7dQ>s4g>nXX!39|)TEq43^BvKY~NJB
z^eQ~HW(>l?Ur5uR5?rNq3G+`Cf;zcMY9A~`+a1@rPIVrsvYG>hiD^`u8w=4^$>1|2
z1!On(QoGY4)O*XGe;mwqqV9NM{6)N6vI-TIS>K{*7R;&Vn0I0mNH?$LdXgQ9SzJ0g
z9@C@tx33Ye_m`kGWEzpTOyYRw52L%h4hG(oQQe|GI;mqZPU+u<BfDatK2rnt`-}1U
z4VFvr9R#_PuTWRV2~c-f6GukPN3YsTRAKB+XxVCrUG6bZwJ{c2`}#Qdlh@$?GhaL>
zLh(HdgoBZkH%b`{;!dOZoH^`FpMsf&uc*_e{oq(_2bL@wHbPj7ZlO(}aGb#3qw`SI
z8e`h;xDC9XD8Tb+X&@QR^yFKtADbUQ4V4P8q;?|S-G3OQlSUB5k$#|4I26ktu`_)9
z0ot)&6MF9Kg<`uPm?O$UK7R&yc$*RaH+OLVE=6AJy2=1yELaV)B#A+dlxHw`u~6g-
z!AJ9O_~`~TQ2i?C&RPt58_lV;-$=B*VF@iWw!`vq3-Gze4jg^g4NbNR>3a9gC~C1c
z?U6|ZWuOYKuR_3e{bAbnF%R;eR6vLNaqRhNgx01B#5L9#V5uCOm72NB%UNGC+fm0C
zhJfwZji@v=i|HvVF!tkLq<`84A~6Z%PBAa4yU!3*{B{zXowt%!*hg#=GQikI4FgW|
zz%e|6w#}^u>wWvcRb>>NSDp%sgnDSwev@`LFCfj&4-#CN$@=a0f%?5t=DA~i?c@09
z?fIVOdK8lz@6TaP-cqa?m5N4h#zHUKIr4TXqQ;JVOw2POrrRZ`@|Qg=o^uV=<p$uP
z;tDo-O=v6qi<5omqqaMxpjS4Fewx1pH7CVl?AimM_{)JzH@Sj^Iqg)Il7#iTabWOZ
zHT?bNEb?TURsFy6(BNStc9=8~*)|FD<J4FG>feOB()>aC+Z=w|rGVHzl&Tm_qYWNv
z;N5qa^j`|5ip`hc--2rPn@uNcVr6*ma1!cT2BE!GAp72p6u8(af$-!Ix>;6$1{NlS
zlQe?lzXq<WZ5bZ8ehI@*Uje!H0wVsa1{7sJ&`FCpmo2AYU@Bu2NY+rTZ4}>iWuQEh
z<^QH_zz=2xpm|0HUCUy?sBbXlTCiNpC_ccK2%?{x%^30C@LOvey7#$YkhC0vG*+U0
z5IfILCc)g(d8oXK&A>4y;6Pj!c3619r`v!|KSEF%B!bP-a_rf3iSQ-oIIZ>wc-6cB
zjit=@zU&ahl(^v1^rIO5<r?sK^A?3NpJV>tez<JhPTZ9j0D8EJ?)>{4@|?;mg-43v
ztvTx@ZJ8-Juv(03(n_$W$D5YDenzL?Ex~!!jbQ1;xRAvSka9N$LP8|y{HF>{HhrP(
zfBNXV1CeBKkv|&u4#ShV$MMU_m1rdOC1Ll>P&Bui>X8&s{?1sE5s{2PUdHD457asL
zJtwL;RNeYdFqb&d6UAy}Ak4l9iT&Sb{L~6`IYF>fkb~bNIILfii8ZN#*q<<kh-daw
z@3SMw?oLgVt}hly8nQUKw;J5-PG^~ogG5L;+LE^mq^vXCY3E?z@&7T-YOz2OcSGR!
z>LR@9KZB=P_R43`cRKGMmI2B+NpyyY;k~mNiZ4_W=Ns-^CgY3#FdBvh<7DV$dJIg0
zc(i#n>wsY2q~LS0nA1E9?Gx^ht-=`O%^yPB(^DX`?+Ni4&vugYLWv&TO#&XLk<~M*
zpi|SBsxOL!#KGCnnzS6!7cy>0bhMzGF;!ceGwHD96x4&0u((MTt=3G#O)Ti`%x@9|
zu)BTx2M&bwH>&y4B~bM?3Kj2O1n~tsV!P@!iFIVzmNEX|b#p85%#R5iGmFW{oGVB#
z@4|hfSEE7_yWgGI4N?<jFnhy12zCBsZ4cAm&#WOatjD6&mZS2;lR=b{OvJa_$l8o>
zJeLxKevFf*5-A21!*b{;`%0gTl|n3r5hs@8<oO`i<f)1F%kR@f2Pcx^^OH`Spw9B$
zZKU1lGWsxol!)g-^gP&i@B2)M{g{S&r`)LJT9z5utP1k;tEtt$UNogAm5Po`2gd{@
z_B>rCq0jPQ*ViyGdJ;u?I$jdJwS!4`{sFe9sR1va_f%5R!$n7nP_pM0C#|w!y&p@V
z{dxg7st9mkQX=rf=AwoNA59347%%k4!G#fUT#^Y@UaT8u#UsJGVbd_Bz?b<ca?#uH
z7IDugK_i`dDh*JgR=Z~sPe~GZ&p1pi_#A4V%AwXfe^OCc1N>HFJNfQtSQF=i!tzRL
zr9OsvOX|>gv?{_a9msl{hiTJl;pK!x+_#1KWuk<bqO1hXPOLBJ;d;oenM1^`D)8ux
zCA$kYa1I;eKos?jzFCt1vU^YI)1i%M(EfsSiAI6Yw~h`xFaa>%2FiJpP<S!ET6;q%
zy+11n9JXe|Q_oAR*ZnpZX)K13qe7u7Ae3&MUyBCR4m#hr)7`mZ6qi3Ca))+P2VOk<
zk+D3&r#>!qkR^Vy&c&1v0!yWc%`wA3ySWU9G5)M_G@H3rGF?n;PggI?1f#*>)Gg8*
z95=K1(|9Q{GOVWM#VkX-y^MtRPDk(K&!~H1Blwh^g9f7{s!%AS26K#w{_E@X#`rmC
z#`4KC>e-xo5(Qf}MPdK-ag0a03)St`KxKdns(i>1Xbv1A-gCp*Zu}VycW{QceUVt`
zB!I4XM3o^?pzOrHbNkwfq~4xbU2P|}yBE@`&#L6^$3!xR?XH71mVxW;5ZueYcOGk=
zaUtUxP;~yd>4qy!*bp!lPB~a$H|wi$-OQ(8$Yz;8=}=Xbf&ol}Y>!Px0n6RXeM5+g
zr54<sycKzUXTeBq9<*I60_iPns<8DA<t~lE7V{wT$$BEnt)!fLU;*+HuF$Fpmtm=4
zAd1hJQ;#9nX`(EYinhlvmVzG+J05`HubQb`VFgMm0;tN){j7(R?G@Wz5r?KY==yj7
z-2RRPt0gv&&<E^Xd_}KaJb`?zJ#@NvDc&rogmb&LU~iK<W)v~c?m8t<biPXe^0vm7
zp|`0)dkLiOc0<SSsc`>D19*6<QDY@3wr=`LTRYE#gNX@hPZSf|Yk{PDha%YidC92=
z$1=Zf0{wqh#3OJuEi)2;!5t&8y&eocQL3PMcrnPM!~*APHSW0!n`5nAAm#TmOx7^P
z&1_%k{B#KsYJcO5LJwmKDFV6L9ZuI?j8Cf5A%15p_@u?d*MKbcd;5WC(k{;EwHyXs
zMWCxd52nxe#Hx4qh|}_opnLQ>am{C1>6n|;+AfI(vW$qPuMVtPz6JYkmOwM};tIq(
zRKJ%(J!cMQeFbdBHy#2GAHu<Lff#x<QZdQ12{YG*flcW}RNs0I+<N`!m$ws8-3eJH
z^a3sO@1<qi@>$Q=5;*O34!55?5C5&7kAt~DP}jDE$d_kPe&Rp!X+Z{?$1Xx=ybF~4
zScRIhEAVce8spbF2=<0ap<zDj(|O{FR#A`1(K$xg|DP@C_bw-*hI8Q2n+NG8tmhL<
zV6t8is1AgHdt@EBw#-H8nF-t!_Wy`3JA<TwN1T~|R6TYnd}vI<zvr>p!SNdW`g{T9
zX=czZJ4sp22n`-zrS$@Te6BPSe>0!5d~BS+DI*v<dqM?fQyEJ?tXZ%mhNAc4NcL`j
zC&IL45PRtpW#9m8Q%Q$Cu@RW!6TxQdDuI1(D5;2KddiJGC=uQwo~s*i$>ng&&3a06
z_msi;lTBE5+#Wp4evuL3mr#D%5!%utA<~k~sXn7X#Y_zG$^JO<MLu{<at2=0VPd4$
z07^qAg7ID%zC6M*_b;rdV?i#(c`H~BFb<AQ+ksAHy3qXXHQ}%B<65><Lhq3lqWy3t
z+!=^Kp8JAo@2DYAC}^a8djsLk@><lsBc&=H#n2-jhC;pjob$N~&^v9%`qr}`q}miy
zPFKOpL5NK|v)P?_DF%GM#yn`6z#n43b(*M>Eo`6Pxhe=kZ%A<6!JXK~o+XosYPA1C
zCSF}R8I6)$p!MEbXlN$z=D7m$ckrlW<OhN0UI{8(8xLxx&ZuD?jGF@G*we?f*7J+t
zgIEr(BVEwbH4t82m7>w)a&S3`$T1d&_ZtIxqpl9Uvu;uAe~Q81Y#(|C$+6>1B^lU!
z4YQ}RPHg?H(4$5<k1t1vt<eauH?aW4k^WF+D@8*^rhiy|;MB7ZqNTbbYF;gelx>F4
zGT9X_%oU^U@GAnN=t|Nsi*e@mWDue8CV|0oDK#(5fT|N-U@|a)&<WA_YW`wu-*}f;
zsqe&|?n@y2IiJE(c5Wy>Fhz)lZY6h!s?5Wpp%Q$tsR{KbKB2;M$3gS^W}>~t2%P+e
zgMKvAUlsZ}BbG~hcgP<k`(ucWY7W~0MnmR@F8U~c6B^5dF=I{;x*G1Hy{pcn^`QMI
zE4HECUF@!Rf_095Oo7fPZfw_f5yJ(6*f59fLD!}W2DUvU4k}?7e?SrWroP1GpFJS?
z8bul%#$u=v#hQ)m?(!*<)SZgKhU8u7#{SL~Ppe?q%UqTfzCsh1yeB1Ng21O{6S%%u
z#JYBpI67q;ru>=&l5#$nJZJ*$X9O;dnt^JIB2eX;CGZ|MlNHknao(p$Nc)4(>G4$1
zqJEclpM6jIvyT#qa5w#^Z-OQ_qmVapX|-3(dHS<SfXeUZVE4{OlFRhk#6T}_9=U?F
zr!9t;yd=m-m!M-q9nEyy3-4}EMUSELp*&FyL(2AHXHJ&D<fScL_HrLS8VH9{?e%Ci
zDu;H-8{uWPA1eGc4qV^N1YXet(;W5j*hvZ?-@yc(@7v&St#a_bI2^8rsG{RK8~o6(
zgs5=>_&68VHEhHK?+d}n;TUAJrJ%>yd9=7^9X21bgw$FWT#0hT_ZQg=R0j=Af9`*E
zjvA~Cz`h+pApa|Xgr{>b@OeLUp9qH7KYlROg>jtLpTurEPihe#0ncBnVf&;gI{bJU
zR$G-~?14SZbNQPjjb`jbi#f3I=u`|((*@h%uLRxfoNqp}i}K#i5O@_&MybYqPP%RZ
zwJ17>uCvpq^_rj5Hf;ucd2j$dZbiY6#|1dmky%;n*j<vG24U530e|)-FkDfKk^h$B
z-Hq<xQJPNES~I})-wgb`jgOTfk+?@E9*^(Nf~!yRQ8!W#ulp`TTg^XQhd~MDeP-YO
zxO`Zzor4lxAKWM&&wTNW!CF|4xo_W)F<dOB`N&x|EFQFC;?OEN8kA2qfubeL$iGbm
z+c~4r)$t5f>`B7$qz<he2dI-%2vv+}C5n-?(3!wy;V+Bn>({K;b`tXt#CeeK{(0zD
z*h}R<cYyO*4&2JjfEV+pTC^?_tLs_!UQHxbFHFU6=dPgq#sq<3qbK%8O$RlmHTX@k
zLj$WI66r?4+xa^w+%_2+><K>cxQNYeE8ubdSadvnk(O2L0ndlVjH5k|gx|hGoJ-Q6
zCI3EEY0BYro-mKrr2C}6Sp}u3<<!T9oj36bg5!P->^|Pk@rGvO$dV$M?Z>id<@cz|
zs=+X&WeWO`YSOhz4gq5igU5P3($ktxpReAHx1;4Ky!o0-o}h+LyrP)adzS{>;lsNF
zxgh>}l5_+GlhMEGFy&S?k^0Xi@-GFP!guET)+k{d8YwLt9|IfixuNEW*>sm$I__P<
zVZ$y<Ty!TBHNH#Od7s41|2Y$zcISb~hHLQsbv7pO7>9<Pp(|d@!4%6xxVyQOUjKL%
z#k1leM$WQx(Sd-I8^CUmoN@aE<haWf7*)Opch1zu#M7I>d&dFVRW=tpw_FjZSgjzu
z7aJkmR08f{s%ULJ8F<r%aru)|5f&vvkE1S(J7kJ!Q*xoGm_un-19mZfpzXUcR5mje
zj6`x$y37~ll@Eyg<|R|vgd8$+{t3p53WM)P=do<y4*RaIz}~%)bf9)WY){Ms&OHgn
zD5jx&VpMg<7!G%_yN=P&M$#O<j&i-TaM=yUZ}?>`sB+&;q+8yR)omq=vC&S%HAU!{
z+CWA29Rjnhr63u!jCc%agUQ;TH1*~}^p5JLW<kuW;ClwFS1&~Q%h}XhH<t#SW!aAr
zGVqS#kT=^@U{acnhj`2IK6e7E1}`L?pAxwrRXHe|7Y8Xi<{(f1E;v_@SZHL$GH115
za-{+Jej5ZY&30nlic(aJOCU-^VnN|p79D7cA?Mjqt)9OWyz7P#XP+O$D&hsHTF^(M
zV~e2ma4eRY68J}X8+K3QQF&_t)e2_a!_4>cZuM9YD?Md-_Z-uKxE#>i5Db#calqGe
z<l0ojVeq~P$emvYajc)&Cqafr6Awb<D>1|tcT$7j8=!rgBkNq>2D$Nuj2~l0dr(CE
zZ!};(^QSmlKI4>_UhLC#2?TNJ;C#IZzIH4?r`WNed?F9^f)0TLy9>?u?FXMXUj_T1
zFZ6)TB^=o9N48fspuUD1^dAidR~zP?%f86*pGt|x;Tl1TQ560#X6JVMZnmckq&>+A
zoc^pV(%F#;`e#<N9%>P;3OtTC?~laY|JY)ub}~4xVcnq2D=+zYg)7uq0*aaHC}ez?
z-1N6}M~oSE1*JmxzndUyqYYZCE&&zCSHy^M&T>P7=_?I6hW~eoM5vWuqEZchEE~f*
z;;KpJ<wC-{q5zv#Z%2{JS<@MhGN9tWB<Sg@W_v#iGH_TQcFi4ud}#&9Wlr2J#xM)R
z8hn)E%=kBd(c$;mJl*X8^$zP$e)xR#M3&>Q&`SaXdm)v*xXCrl*a)Z3C1BS^5$&wl
z1ib4urdGYD!SR$AUA(6X+bSJUB|4n)N<2&ieQU7l9`letaHduH7s1(}o(fOTg^`gB
zYz}cG@`IJ=75k7X=02v*UGqTqN)f2fO(l9$1Bg({p}hJqbR8_C&mZ}sO>Z#j|2rF8
z9hiS<eKtIDi@^umHlw1~A^Kdu7CZlQM@5yDC?$JouXjA*&E8|uE3bus#uv2y9uEz^
z1=EJ~KlJg_LR8u{8vUXYL7(;S_H6XV7B7|$*-=J2b&62*u#*5UibiZYh|WtIXo_DE
z<o>pznxl@<hU?7VD!oNLmL(FW!T&$UW<a<^4SW3Z&|}nWs*st^eB@R%w7Uckcb8*(
z5A((JWYgBavZ%-8S?rk@gO1B<X=nEqBKC|VLbauEws$SAHo69uJ4)eAeh_58*^VBi
z5*YWKVrTSEntNyxh&z7~^>b|I8ebuZT%5x?+uZ~TWBlk@#}Le%B&YMHM&kD=`8e<l
zK;^?INIVpW<_&3}{yZLrEp9-)V3x5vzX9rQ%|MUx51ddduiEj<T$U|bLh5G@!$FTu
zqRUw?kUzBNY?te@o|Ai~zCUJR&C4**SfGpVoT`b+C1vn3EyOzqwDH}x2Q*nB9}O1&
z<m43%#I2R>ZMzllmYFg(4-N;*>XFEUeW-7jfzo4a2bHsndLOHXfjPTbrsWAWIXwZy
zr<V{zNfKJz8V$b}R709P3=Kq&=t)Z{M#fhGZy4a(KoKf7_7N{FL#*K?u?~yRoLe)S
zX<{u<^L9E8TbTof*J{Cj^=@eOTMxx47oZ_#8Hmz9P_533@M??<!*$lffJO*RSaOB^
zJ`F@~VgXbXZ^rIO4U}wmAQ{f(m{v7}?Z^qLKCJ-WvtOn;ij~;n+618+OYq&py)g1;
zG`4TwMkU5UR1m)v6&ITktIGSd;rR|2@^vcqIE;q_#ql7YJ)ALg`-!wtSI}qL2&&o$
z^R^#GJwYi=>q>>|+al1ff_2Q<?xJ!n4em#9Hrj?$ykfTuUuf9i%wNVh-%ba;h%a@_
zOC{nbOy8d#%=#1eb7NEN&|>Rta&{Q=@K&xw-p@aR6jcfJ*mINUW<H{-hWT)}Ns)CI
zXy8dNJ9J>objka(;1zfV9RFm1S9LV=-p&A?nhm5_#{vJBn6&RZM0kbYxS+x&Ouu>&
zx<m44)vECjo2LP}GonDRWGr2sQ4fx;Wz;-32^M&+M30_q=t(kVoU;W)`%e-TC3R9y
zJ(er$X1NQG=fwL*8R`ZbK;}pz@bkHXo%c(zCt4&}=~Rnh52Eqz{W&nabSLh9REIv-
zH$iJ~BMlf|1pFe#3A(Z#Tqb)%uu&O%eurVe*kPEf<qa~=I%pqSOWZF=8G|AOXCya(
zL4Y5|L|nn}#0#`}L=ctdUxGBxI`#}5rhTu%P^o4mXn8e3;e;H>9~=VVp~3K|zZl%a
z9aL0&*HoDLhB(0(h$tP4>K4P`uHpr%^p)jH+;4O8J!etHGfLnyIe|8qjmG<f(@|<5
zrotyqAg>HU-C}EyZDV~U^@Y?zc#WzuR@UF++54Bh0#sfbgX58P7@1HCqle^TCd(pu
z?XRGnMSWbW#U8X>dmb8ltI%{%4O)LuhvpfV(RsHrwsj-(6IG!3PJ;WHA4~paGs}&)
z3f610b8Glk<cF5PlF&$W4zl6~lv1eAX*NGj%j8CAjztO6<2*z%PJXsp(0PIRZX7;w
zCL8>T-V$x#hDD;$wbfv|r-|xKj>mC_lF>g~8|AfPQ{moeP}SW`jQky7>d%GP&^86+
zYt-P;pG$D!%u*C4Q)=B>OL<uf1S%{Gp0ZU5o|+tDoiJJg-Sz4C@4f9<rDX?b+DF}|
zvtE!*so?E;nm*s}j3Bo|lRvRUldcB77t@KGS?67o6%CKtfKK7=V9=cbO8YWUlo-R=
zj~<I2?^&zIdqwc-H-__jMA(}4l`dLihsIaffAze>v^!b>LLL;Nt8f?OE(@iOix1P7
zkgfPAsRGLeXMthEC2*~&r`DU=Nr}RF)T8rJGPerKY>KdJ!D8rl>?4vbH4xyxknzpS
zIUUyDvVBP*Bps;1AHqZwZW$(cxtDb&Jq*T_^^7^Q<ukqInu`-X*5YgH5G-T7DYMEe
z&?3yn&NWIP(w@&Lb{c_-gAEinMZ)Wi)p#o@6M1&2BsOUw1YCDU&8IocH)&1zRbR-v
zPDOCco(l0Ie~`~t7I-M!;~cJEME%X#v@0-^UK7-!s-OXS=P5wu7M3|}pG^5D76_a!
zjz%ySg1W<O$bG=LVj8;`TR4Y4aw<dP5IO7hdqS>8>)?mE!6=$n%*ik9gxC+6%wwiQ
zMCvJ+d%lo%WU%?SC<I&A90Bd@N|>X>bVtczPNhPN*#2{u@aE{j<mgzqV162Vv(s_$
zCdR&49*R|U-ek$OR2(*D3=TQqjyK0&hIeiuppsUO`U{h3=UErQptt9++B^zdZ@lM(
zdoFUTn<d~;ZwamuiPUSz9io?BPdXNKQd#9fs3>DQgjY^fYkd|bzBGc4-+AC;uZTPS
zm*d?|FXneHg?9y6kWw}XG{+qwXU1Q{W_|z}>1l<o{|#Xs-1DLA2%oX{iwU1+4c7l^
zg6-`Xs;hIKI87=hBAX?GG&@g}=ak}^O>F1KxEaoh6ihbP!;iWGd}EM}{kz9w|JDfn
zw`?YMx>QnQ?MpbL#~l?6_Q3V_aKxu)(Bd8kovl*@hdP-D(rgZlx*doe_XCKY(LxX<
zXwraJ|B?$Q%Q46(2zjoJoZciM`g{!{3S-WL?wVTS@%|lY-a8%D!;-+hpaGhLwZUV~
zZ7%MND=Nk2&?x55>?#gNU9~Kd`#^?;DV~s6tbiV0R#M58YdG`aWK2JE3Dh4B26fkP
zB0udT=>6l$*ip>u?W4f)Pf3V|4$~b+><4eJk#r{GdbwSeLuc)H+L|{FHstA{fy+@i
zJd*9Ebxwifv^G-xk305U@r8<jCHRtMR)n|NyrGZ+lHhV;6dH?f&a(XTSO+ZIn-BII
zn7;P13Osb(AoOx7{t2)}w~{*0GKq&m#Rs%|LLel^1^hn)L8brhrhIc>e4sNOA6Qr5
zF1E`zxS&k}GG>Cr@CNmG<_U%i3h-tk>+6~83EfL$X|7TeJos}J`HfHMQC$JHd<cTF
z+or5PIS+N2XT$PDDLZdZl3xjq__Av->RRPs@8`!<Wq~2|G=`Id9_sjmaYrPxoLMKO
zlm=|82BX|5(6A{E?N+D2mx`UZU4i-ASw6!1iaVq=YNO~&8T_<MMgICDL^@;&9kWG@
zr`i}FDn=q0_&Xh}{_<vbS_Ry7ClGs1Wa5#}Iruy#6rF#5<Rnq7*DXT~mp+|9?~j_a
zYmyS{W19h0f;2Q4yb>Hd)38P-nXy7ffU_>!i-Uyg`CTq({$)(85?_&)*H>9KO92w;
zM!3-yhOYDLiGJXFs^}XEy53gMp_c`*DxFj{CmB?-yU1(Yf_D!)fpMP*a@EUe!7`RP
zaGeA_qFqFEz!071uZ1gNA&f;b6!cX`fXU3;^wjf8uvn`BS&S1cZFk^!i(haiEc;%z
zVG?Sp`mwBA4HkWiK(|T+`%@Jtd_7VS_+TNrRNKHjU@UH-B6>^1ApBb>Jmlg~du<^*
zkQl#b<UPut*T@Z97Q~((9V*mI7np32(VkKBNN#Bbr08aWTgpf%dU*^jO4fo&^>Vg5
z*CWqj%P{%37__WwnP+xC>HJ%Q%|^E1te?kKw6Lz=%NYWj5y?<Bg!R=}|Kb#qCWB<I
z6DTlE)YWwj7D{J<Bkv06yADvv<3Is_sg-~~WCuRkkcO_e8tL}6Mfh`DBPNx~P?#NI
z+B4RhNP9IYZ}UylGOY%x^pokrcQomJ4R)?vttGABRH3_KKI{)(jk)tify)bZa2o9i
z;eWo-QM$QkaQqcDnz03se_**3tMfG4rx>Q3uf~?if72?tD&ckBs#cgRL$`f@sFx@V
zIu1I+O@~-ePnid{kve49C_Zj~6oxYAY?y_O$nV_%#_kuvXCV(2H%ukG`Wf82MXZ0S
zS57>qiNRysQj%Ld2b$Mi1oeJba0_8)%p8t79*9H9=2ou9yNI}M&WEvrXlyOn0ap$(
z-`A5ZV9<MwI!-L5Peuw@Pt9SL->Aa&YiZak-vZs6u0r=!Ey~kuqte*|!Io@-UM$nE
zpS&I{r}#s~b#<Kao@KQUZxFNx+km|Fvmp1tQxaL12lkfwu&-|d8XQa@__KhBex+3N
zt#6YzOe0ElNy4-V%6MslG4ASd#?PA)KyQ6Kac0@8;5SS!ksKzHP1iYnWeMI*@kQ;;
zM^JaVoGR)WG9Pgsah&yn<}bVe7GZ}Wk@2@GzE<FKfe4P@y#T0T3fjM!9&q0XDi*Na
z^m08S-m68TH4$2W%BbtxI8t>YlQ?ToqPOiTSlr7blE^HW`Ij2{yq=G1<}&}j$v@R6
zg49tqa1`D->x0JoXykph5FA^!4O`gR_C&u1>kkD0-|`@lUz<RkeaEoQ+e;)@qDGY#
zdO+enF*g5Rz`8sRQqS}pY*nli>`6duyR{FkUKoI@`Ekf+{xp-ZNyMIg6WhBB!BMt}
zM(5X|nR_6V?YO`^L3fDgYZRMtR{`(Saw?i+4%QRJ`0F*}q~<hGKQ0=+#MPtu8Yzf|
z4+xwcMv>fHEw~X9iwBC6FxKG&h~AGOyaZ(^+>}OiK9wT8`J1YG`Qx)Y{@9(uy!;DY
ziS;%oIH=HsExqNi@($DTR^KrV7?Vic)N^Udvse({wIV(I7D39t%m)@c6WW$?IONq4
zyniPUoJ<;Nk?TCv+RSv0Vh1W8;ftGka!_c)a<bo!!jx^w*nNLJ(Rm)qc3j`+IfZSA
z)BLDA+ZW5>B^NhWAG@)eYFjOV{tGNW(6)*?D%ik^Ir%7i?h6|=8LMs9bK*GHf<_-m
z!<HGD5V<`AWG@pqnbCKyzkCNc4mi;cw~6RzzZg`WaP<7)b=bPa3c5ell3f{0Tl145
zf8A7g_rnqN0&1yqiy6r*+X`h32~^^G75cwTW=t|m@cwTz(OhZ;?}ojmt*72mw+Stz
zYn(Z}(`6kBcBQncE}xh@4Z(!uH1zVkhWeA^A^s5aUKO!klZNA5ac~TPXBTzWI!<G&
zMuW|+9F~PWKs^(c(C5HwYOrPt^_k7?A=CDdqsH1O9KlCXM*$vMu8Hq&GYvNSGS|O!
z1vDr<BIWG6z)C+*dSMmU+G7drVJm5Fz!5s=KPkSukw}_tx6^j51*ElN39&0`0{aLa
zZcWiemzNP}e_}C6{XA%Q<Wy>}z&dne4M=le0qsii0MDVUmwebbV0%=Ez1TxzoysX+
z--PqBii3ON<7jft55qOtnQmSPzt&7g{pvJGywAKfW_w8YKsOQn4kilYSeNsQGsHl7
zHgV}JNAEbs5j`sd51lV0*DHbXgr5@E^k3xeoZ;ZTJ{y08AH^*Tv!MU>MQm76K=pb=
z&@&~QrbN|Y>r{1OYx9HH4$h;8ZcM>eg<8TpsZ_0Z;{`Q}WV*F}7%peLUxTv==r!*Q
z@D6<x2(|M#)$l6B2@}CIgO7ZX18r5>0-5*aRN+n{R3vCI9<TzHPTh@<n-^j6ALf1c
z48mb;yOFnlc(s#H1qLkl1{Ezn$P?`ZHJby$yt4|6OY5OHVFqfBnoly9G2cN-h2W%~
zE8a>;MaRkOpeMnZl=`m4G0Q{HPoj=l&oi;C`~Z9|y^K7M3zc<$UB;W0EDsZvjy>NZ
ziNeJzq<haf_;AMnosa)!e1QOB@Lh|DO@_g{Q7oH3aX*~BoP{aFSHk@rj7#oY105qL
zu;1|lcHSYJt%?j>b~)jUH0E<I52rLzfStD^XztE9FkG4kz0oD;e9jQ0>m3Cuee52j
zJ{GcONO9rRwHW->2~9Q(Cc6I>W8wYRRQ*E%IMt?scw-HS&(%|p!DFf7Ni)E_2BPS8
zhwSUyk8yvNqDjPJ+LD=0tdrE4Cw~g#SB?T}-dxa&(E*hljtnfUg}de@xT~`aTF37r
zu>)!#8JG_Hb}&woQ6_tLF0lOTdg9TNh0>Kxtk<5MAw$fm&~O+`+n<c9J2GJYf^&HA
z!zyNu7|C+3Au!Jqnb(YQhZdznTYDpPD=dK#gB`HD*qrp+x`A5K2Gk?Xgnwf$eg7*J
zWe=+1-0=c@q%fW70KS}TYy#KUE@3RTE~=5pG9)8Yal`piEapo=?#`Jy$wF9%;UJow
zOu>rpM_PwZfRx!5sG@*G^}l3LzA8fPjzrq7oJUd&DyS9n+IKe?k-hC<pln?Pbso!5
zA*PG^KW4lHXBV*FUQKz7Ri_|qCk;n!;gUfqo@-;>hd0w8rE4*`@2Y}UCnd^zzE;4W
zH;Git^<_ImmeqR}M#Wqem>#RgR$%v7op>VJRtrB)u<Y|TUkFO{hu&TLVBk<TD7(v0
zNj-yFy=Q$)Z?BUSg)C4M-=KQ`L_ms*9tfxXL-+^c(b*#o2Njv3cjbPtpI!(D0%qWZ
zzc!<?<5^6+BZvNmg+yh}ZZcSrv5=n3APrS{#N9Iue(9e_S%Cqx{>kNZg+h==4kPmx
zG6q{y3YbTi!_Me(<mqIRf&3T5e$X6vXSW#6&txpl)|piMF7x(3(}j0}6k4(_4q`N`
z&~uHLc|h;e0(M?&EDV62P#f5Mt_ttIpA0@<^H2vvp?%E=_`Iu(Ww%+jXWRr(Il(;E
zxm6$>mjIK;i=d*OeP<>%QN>XgSx(lPXjb(QkJ{~U_!?tVHC73XuB8(116Sx5Wnk~0
z6x7e%Bkeg-#`LTqUw_tN;mJ63>UcxfuU?IsXLpnEVa&fhqX~^q4rX5IGC^n5X|DI8
zHB>S0u1Y_<o9aHKLCj10ZVS`$+E{M8cD4ZL7K41GDc9P<^pD+Mkh>rZY!{4%hV%1r
zchz<@YG@)|TJfk6It8b$u0$ojdxY1LRjoN968ksDgY{5T7`>$y6(*#EWB)?v{WK4{
zPx<1pes#1>x(aIi)tDTSh*wYUN4?1DMDocSEI#qT;Pp_lz-2G%$SI(S2hGW#v-+s7
zSOTu2mV@J*!{mJ;V>>-CV?Ka?xz6@P5ZWxER&y^xr+72*WV_?=vn3E<`;$GxFR6*r
zN!U?Wg$hH`2=7uJ^1SAeE~aG+1V)f9-Muj5zeJ25TFdfsOYv=(Iy(ET5!{{b1y4Sv
zgSYV_2$;PUx<eV;`^0*9xuG5m$X6ntu1r-G;=szGj(DU-plsMf&RVmbodLPfQhNe!
z-fY0O2Y%SWn1!|G^~hH^A+Qgwg)X0HqN&_SuZ~Z{#H|f<3L0Q;!8FE`l#pFl6;SAV
z-qe1<8MMEgO&Xpy6WNV??C)niRY|vLqV7q^-WbXDV73As?@Z8ZnGUVXhg1GVUkagX
z!RWUkjGGgQ*}XNebN*E9(3TOC?VCZt@&?n$hr^b+A*g@Ll5t!aY~bH$lvj=AiZ}S9
z+%`lYF4Y9j3K`JX3-RfpHK=J;Lxl66nhN&}h3WpeIKatqZY0y-uN8n|{yAt~`HJwY
zi>qXAbHK!9A{=*Hggk?~#Y(plsiWsCwA~PhqW&zd|AGu8M--_z;{qzDTtvIbG{{!d
z$I+7yqv-8h&iUU0{Ig>xwlenhxqN3_=V*WtFWKL=CmibS*&R40n0SfSK!Dd;@G0Gk
zRwL8NO8wKQdnyV4S(wH;7fK=hK_p1GR|{M*1GO2W&hM!T3jcBCyr77zRZd5_B%a!-
zvK|LD3b7TVq1j)Cstya$<W~v&XJ3jc*I%(NJxiiKeFThijm1CyWta=c$-CkNJU%EC
z26mnRnfY^~j(POV`FxCY-+-q6CMc4{BR`n1u7;~r^L8GYsV_$9wTZB5%TY|(o(Ja~
zO|ajpnYLWYqvPMsK$R8S!QRCRdgmt6w38K>@L@5=P6&XO&m0|djgPAP)$!e$HgcAn
zL+!7}z;?i2(EA`365nj35=0sw8498y|8YtutUxj$3KQ?WqN4j-IhDSTL=^2JSjG0+
zLQ^X)xIP4p75p&YgQBMMXtF<X8y>iu3$cOesQYRHM%k;QO8jDQl2)+p^d@*$Du=4c
zduj3bRVe<EOoq{7urXoth(a~U|9x!QeR~Hz?wSfuzwE@XdBxb{R)`i8U(*A1`QVbs
zn2HNWaGmo*1S3CIz|sHoF{)AndHqd-l-hc#ah7E!P*|;gJqp0>1hy+&fW4bow_6<&
z=|oqWJ){J>->6dC$m>KnqJlg6REqZH=Wu9U23D_KgT~*^Gj>BJV8ncs*EkZX^E=Z1
zO%L*4v25(5cB*N-5=|~AVCQEF%EJrbf{r(e4vUCz-Xnp0&0J_!ZlN!iq~RCMJQN={
zCkkJ7K<id*=-XbvawHkBdhU7fV`s3FYbl6yTtWCYuX;voBCNCKSdZHz#%e90?(SL8
z+OdanY5_1iW*1tWPbd5#n*_hcS!1VNFa3PZ7do<T(1Fg!<jKN9?A>Y&kR-y)b_$bk
zm*S+_gQ%VxLUjTov0YM){o%u4KoA2xE4%2cn&~)Oox;?Z%@|Zw38J+1=&^!z25wIQ
zrHSQWK7Ja?Qbi~&^~CdP<FRCTAUY(8;S}qns%m*eWP4NSjzC*9`z!(5M?xyuGML)r
zrs3<JK(uacg4|3aT9j^xL8rwSUCp86Q5(8uZ#v!(*P@;MHB?bd;l#h1X!6@@(7sL$
z3KJ60qA&w`SRUQk$5p_Kd`3MZ3P7S4go-JnNt+#Gxefk8m1{&`?;C>K45y&(PsTwS
z8;?TUQ&6Sxh3;*?$e0(yp>uta;D9;fm3Pb|W6ort{lG~u`Q1i@uYYh2Yp)Pd#%a^8
zWBw$K&5aXBGQZpMFo>OSi#&Q=2jW##DDl}roJVk+x%nw{`705o`P$*j!~`sSz6~Tc
zZv+Y^HE6W`BE8QsA1d2Q_Ph)c7_8BP4l~BHtc(*_+$|zvk6}dXLp`XEi6ecJBQVk9
z5)qvkRNWc1k|g@F`|j8n)<MMbKnZ$iw%`KvixnyFYoBS)pGFwWW{yIgD`4xH%7uST
z!iM#Gu*;*7bPhiZw*M@o22ScU;OH5sxhg|VTR$Sd!E~wN9-PZJ9~|~69ecS7s-3Tc
zF?&+bE~)?o4F^%?Fo!IDDMqzD$=Gpy1H4&Q0tWFd#N?Tn&Mp?AbwCQ}D$N3}Z4KIO
z&IIAV+lcf<nqZ6eVf?jI8*~3kB~7K9kxk;jg$F|RW-FAaohGj9wu5NE8#~Xs!pnV`
z=u$HRHFI~7%L=Tk$u}2GT18l##%3Ak0MJm*!WK4zXR@yNmP=(IUHhG6vS&m3U;$~!
z-VM5AQ-Rm>yF%A2g7wT-gTksX5H{|mowuih%??jguM~hvj2>y<n#_7z_duthhp4i?
ziXP8OB5ywrr@l49-G<p9=iQ>a4#k4`hh_NEQ5ipv_d!jbFaD3BGmopW{kr%;6on9y
z5R#B2q3&xVNkS4r2q6if2qB#2LFH(YCZ~DOB#r0V)lo?ZA;c3xJRzQbgb?2S{!jHe
z=f1CDueH9b!Ra;E)XitAlnJrS>kpb)>!EqZNAA(P1Sd@K$JWa?gjAz#u=>XaY`vX`
z$2PA*<EK|y!inSLsr7;Hm4&$MR4}x+T5+q7RWNt$JbDkQ>n3kHh__B)F@4pbWw;o-
zKRWWrf3vxH{Vi7TtvCF9JRCjrDf=<g1;JtoG=DjRCnyJDZa5P~>}%Qgxh2@0o(3`3
z2N55_R;ERsGp+BX(Dc8z+;Ve&(E7F#+I_;|eu^qOzYB)8v!lUife*YtpNRGgbin@C
z0r-73msq&p1dDy9_@g!x9cP+?=ZR%tdasUcH13BETd0#Nembaqd&(Q9<;d*&DMS3m
z5ZF>3fWl}o`8}6XpHn*XI5rk%P07LZ;#ycMO2mR8zo>6E0ZkcselxR)S*-+}_dau%
zHtHW<F$+2lH89^{a$I+V`f1$}B0c|b=h>uj9JFV}3v{rvuMe)@J{$+VrF<RC5*lcK
z^{w}34ikewr709feNDxeKc-U0s51%&m9a>Z`ni-<pwYb)R99aGj~)~5KCP5@Ha!x=
zTDQ5aECRdQ19`A(G{)*(1AR{3{^Yq(x9I|y+J*^lABxGVv>v{$97{Qp_28j53_9k%
z<p$Ss1vMGnHJ8kS=qQ@)@5`2HJt$}KyL`bj!w|H5&kK?NCV~5lZeCX)f$a;D!8>dR
z>6GQv%j79*nK%KoUub~Xaww}Mee%_<O3+HV#C-pfuJL^%h_dGhF#0BUyJdp*nr&R{
z>BXdfP4Ql0DCYgyhW*xDhUfGQwHfCD9*hPL{J>VJ1*49I9jYhZ;%Zf6L3NcnWG?zZ
zd&pZ8-J24w3L*44CK$)`F9VO8eKE^>3)Zc=hE1ctauw}fu(3}S2KFsy-P`<dn95dk
zFggSdZPU?dT`YDlE9OR@+=waXidGA&U^Pb347;9*Hq;1)TYbm}M%+Vjtzi1fRAyT8
z&?F?l4EohaK-8$ySpVW8#1qTGm8Ei#F=?%D&w};s2-MW?1)oa-@I-eB7C$0?Sb-OY
zpSglPAz`3>{T_e%DFd3v#DjtUJ-)6)N(_5f^zCwoljJ=T{jHOQMP$Q4y%3oGVjfl+
zY{P}RoAK_@a2&MwFy^j5jPl9Gh`nb*?AuFVx_J`QIq8k|&-_51S%OW{uiWYf<<$y@
z<1d>*DAnUK72j-7caMhP>(My2_k7%8!|}+tad>&30b2GR1P=lt@QnLjv?^Q+mR>&m
z-aK=B^&ks$V~&A=>rVE3W<0>cEzqWai5)a8#cGE%lsu~@HpN_~)VmHG(3^YQ@?&vu
z6vgX{S#9$b&^Ueqf2UCAZm^Q<$bbtNF)|Qc{gXf(H&FPsP!F9x&BMcy*HG>KbLQcG
zpT~?D$(!`ggVFv-X7j3z)!v>?S-fwun9Tu{<7g_kIS_{xdo4)w_s7sbu^9iq1P(ni
z{FqRNuKj%l+oD=bJU<PU8b&}_Tp!RnSj{7ezvnrs7Q|7bgkL`7@A7&k%XyH<isnrQ
z`+qOMme0huIL3(4WC5}DbRQWI$Sz--j-64&e(_r)vu&S9tUi70dRPMwk7#1Z0YCQe
zu^i=T{jv3=IsFa^E>n_2OVxI08;}K0@dSEwF5)U@DnZxnB&+=`<)<WLu<YexrtTdL
zmItEH+))EYN7Y~q`3;1DgAo!6xTS0%t6G$Ws?)=G=jerqn-kIYkuFF_ItiPr&S6e1
z<;VWyfl=8t_TtKVY>D52+U;7<bIS-!y^n%aX}ZvHe**P71h8X^qEWQDq}=IH0pz4T
zWgR~%S>D7Q*y($Kxi7be2B|h=etE`HWs5-5|0y>uoh!2pQsM^j39{yUI?UpFAI$2j
zimt~pSo`%^(DY~>yttQ#sjvEjJbW*he>nh_t`oW5I0<x5ng9X&hhSr#Kbn5p%WTiu
zgZ7nJrX4pFTz$SUhwdLV6e6zsh^5%@_a4)qaSr02z2>KuxM3qs18LwIW<MF3_D%=z
zcAbpIp08Q!_<o>!I~WY?Ygxh9wNSQ4lNGT!&{}WJw3{BW_(#MV%5w)LbIO!&@MUl)
z5|xJ2on-+~Cq*cmu)hlKMy8|d%p}Sx{gf^L6NN9%?nb$~7mMsG!Vc9h{N$u~v=?@v
z$_HZz7?y--j@NKcO8{iTMt*WI%{sE@foAA6;<`m(tU(ERzMnG;=?&u6gYfzEOuYZT
zFLhc?mpR-QV7?HH&4*X>gBj~^^Eo3_5C6_}-uQv)e}9<XDC)B^HR2NQA>3wAD|=7c
zy7-os;H|cZG}FJV^@KC8f1*rm#8Pya*2Nm^{F&o`o!C=556t7MS&~9JYJGjg<oUz+
z!ag!wn0f`ZZjKj3!$t}t>WDYG!VApD9tZ7}gQ*`_SJrP-3{<*Dp}X!PcrQ-}bN96{
zXR;g{nwOL2PW*rg)6n@jc@}?naqHo};630F)-I#@*Z84at2h!}Qw`{RwFL^grm(uY
zP-rf^!W(;aaL0<5%;0b+NRC}0E!j<Is8|o$l#MhED@BwBvA3dY&}%4l#F%9>hnK#j
z$qk@={Sf8eB4w(jAB6KK^3ZsyDL6c|!<#lapn0+valVK0K+4dl9kn5!?<L4zU4tI0
zmvDzg2W75z<k0J~1MYgfA4UI;;>N=)naJWNab)rYn;HGNLRk@W{4fXN#|#IfnKBsE
zTmsz}=7VU*qjKG0yK(W&d3f=WE|&e1&E@yXO<aqenbdWRELNTkUD1k|Ftrf1)Ptx;
zR6&T5QEuk+KQf0Y-+8KDe=rFDDohQ|hRe~xBoa3A)+^0|c;7a`v$G6;xQ#>25)arS
zuA;fQn8y@F@Cot(xF>1=cE=3{54TBN$?-4K-67|YL$k!C@yxtl9n9vZft3UCTi^LI
zRhm!5ym`aE#g>3ZG-bcpdI(OKhgzE!Ku@hPbYI^GE9dB-Wb*^AuHlF+gKD8MBY+<&
z&cWcl1z?$an%AtzMAZc1tr#J1DHTKflvt2#{mu&VGI(YJa*>6%iSxMGaKqUjC5GFf
zv*{Xln54uVlE*R~y7Slt$Ut}dK)y!T4W&J5g6EESn$=hmdm#ie-|U4(iy9sn)xfsf
zQ{H)8F0;8Y6COJ+K|@-@4o}KMg(AvY_Ljn`U?-esV2uhMp)8QE!Lm5&wEy`ZH`s7n
zkdK%OOVl=@`+YIj-Tjx@tVm@A%Z4-Gu%}#oj5F)_-zG4<-pN|-S5apa-NRC;8)?~D
zocg{HW`Em-8<uTEwd4@kb3P0lbthrS^9cBscL}6wr}**1Vl<8Di}h|LnE7Th*d)e7
zG-ZX2W@kac-tSyY`>XNK36#A~;<qd(q2<`+%yji>!GKt7jUy;i+%z7BdiB9g1!qwd
zZ6fP<*vREO{beC-;d~S2<<*Ywghf5X)6&i0t!Fla<^2n6@akH$S$z#t&gkNXfAX;5
ze>G5caWU(DMqIM7=`y3b6WCLk0gVmBS@k)<6ZK0-XZasfeQv|%$7Mh|&EAX`)$nJ5
zL5O{QxgAbH>t=5d-T5Gt(|N@~SqX+zacng6<6QeV-k3KBBQw&ulF>=<w$(tn^;1E-
zbwRmwuo<2IJNT#C-8fcb2JXKb1dZK0;N#$x7!xy_>1-H_9lPzIsbB<DdP*R2IQ2VE
zIKoF+jU*03A=uvC0lgb8fsM!qwhr__&y%lM;?YEK73_KM=1cf_9IaiQ#OHm|!kSK9
z;qn(!@Ti{z5?I8;hFk_X`~NdO4PJdQ5v3RRLemGz*IoV2mL-Ov#?eZe?eCYRoF%4)
z3NadQ429<B$3Qi&K*-%wg7vElxjAWrfqk4IB!kYQw^l%A&va%NzJ+yeK1dzX<;?Pq
zD&$8cV&o(faErN$tM*l(NM{h!eK3<bI6h&j5!t*dk@oWBdxBK360`#L$jaUr!<>Sx
zIHDsJK%tT+U-Lqzz7mkP+Y!%n7^vS|4|kRQQSU+$|M+(dI;8A|Ws`j2=b!DUX<f@C
zRa(%pfHJ?^-I%sw0`#;~77dE{D4IPzFp0n~U(RFP=i?~()QioYdjxfNF&_E&7E7l+
zyftSv<SdMWFT>6ekHHvRHS2iprBYO_JpdJ|fjD#|aWID7l`WfBi<vJbfzrTx%wCd)
zk@pMu@jWN7saZlDb3I(aX)G}lYeCPFG8|LG(DacB*ZhZgdI2S%kLNM);7fKSknXQE
zTQah(VfH#vD9ReoC6nfYL(ykh`vYaxZuy3lEndfFU7CY-UbA_P)^S>wrVtY<4eRDa
z;@sRUEH)CTLopl`7ASx`(1^=pExGQ+bs(F50c72KD3=$+&Gip}NH!cTZEo}UpLe-Y
z@nq0bG=%FO!5Dv84=T5&W3l;A?5%tS#i@yGg{~uU1nzMIg}rqCN@n+a*FaXk@%VN-
zeFysC<*73p*}@qC_;vju$_j=-_N*8<w$>8MbmvpQ_f5fGQ4^d;+~o3W2f21<66Lsu
zf@63jrhGk*;x&b`I?8i)Px;AgiFX@3b13%fx&$pvzG$0H=SI@|fX-WE2cHA|zJ*XQ
zwplRPlE-|3x|E(&L+h(Qtnru&anC=<3Vw`dstUyS4d;UE*GSg6?lRcx1mL$xeK5yc
z1ZA3k*{9A})Xf@zRwtDp_<SZbCH&;=my4MFyk!`#euwwpLYieUaR=t4ql-m9DA=AL
z(;Y@UkqGizuUL<2IguzD{7j~>q=su>dchy>AdjL<i?vo3u~m)3QT5$LVO@42-u>?i
zI$wUxwmA;Rd#ClWz4;^<WO||H#qliW&MO8p0zj@FEvV1_#bTbQ@JFvx;8Fj9DE^o&
z+iseNMcKqM9!KY*Ky&IfnF&K%V^QSq2JUXn%ygooVEr)&Y#W^M^M!Oc4m2ax(<V0l
zFB7PVW$M(w(fWG?@n)Y1pJtE5)IoCgexV$t#hV3*#TC9O;2g515jeQpiI_|8%H^%6
zpi!rV*&XvFPDTn(HA>|=+lcX>;0=eJD$oJkS>Rmqoq7QGSpFX?dsz+lZY{;OXZ=7!
z;S4=X$Ap%~y|DAdDeUPEX4XHe&`K`D>;773Dof@bBN&3VGPp|VJzidm($J%_BU-UA
zVkdQJ6pq5$B?ln&{SGj{O?f=p3ma!eLFfHYZg*opzg08?efRW5alAQ1oOH)bcP&`3
zPlnnDYI&`EKWxklATQ1%Uf`Jz>No5`?M4ymuF57hNf5dYG={@v94)8T@%JesF|maH
zR_gk29cl+Hi<F?}(hP9S55Qf=X;X!()ID4QIn{Sr^RaM{t`-DQ^9Yl9C2<(}Fp-5H
z*1)bsC(%Ws4h?HWaBJ6TY#W|}4f+h^TPg(6^m%anYd+M6T;{fYhCoG<ChDCV3tA=_
zT%vpuGM^6N-Ji=ars4$gu5)>t*Dg?5<OJ3_)ZO+#n``SV00Wb9Ox2nMhK4C%L+tOo
z{9tq+@{{Y#A#UJ>$z1DCvdpOdBFEGcUSB&0cWy{Vhp(4W@i1{Ef-IPM5`7+U9wb9H
z@>5GIA#c}yx~on>%l*ed+-p3`5VFwW_#W;k4`a{Ytw0U=A#DD!pSyIdMbSS=yjL3i
zyph}Z@A?s_Wud^d#5!O}onFmJ&3w9&GkzGojyl<N%6~S{JM6L%Vw^jf_|<T3S9=9J
zC_nkA(g|aZxkFFWL9hr5f=Xrb%8<_Opy)2N`@Um!!}4+1{Ee8oY$DiZULf9n1Y}IA
z!@#nOtU1MutGZug&y@#bj`lU)I$jD>AC=P`nC4@`3_(16DA;e&03&~YIQfhGKAF9#
zpFM;h8ITBLc`>*@*#Ps`tj0d)EYbIzGAJ3Z1eb{!&}kcvHfDCjuMK0Wm?4yyUW42Z
zK4{xk%O73U!}y6W`Ndtt!wFmnWhcv+`tuHEr_l?=ulvcIdk<lIj3gl4dr}A)M0ra4
z^Z4D8K2xQu&|~(4w~M;i$Y2BXEG%GK)&^s=*C~weCM}vgb<)m-vLa<~W;(4e{ylvF
zD=(jioXLsk`a&jC)yWZb+jp?`-=*BaO%aO}W!&LSk1Tz8D#T122d!D-AnO@rLwsk!
znu3McE^KBl#WdSJzmFN*U(e>Qv%-XJNjTX~6L*#-puJf*#KbAG51%Q=Zm9x2bE~*>
z^maaH{zBCK-ongJ1oELP%HY+iI53#Fg$D|ExMXrAH~sGz4D$EISE+$0TC~Tc%iuH?
zEm{W#Vk!78c+4&>T8g=+-ErMYTf8}gwEl?0JdpffT337XQ`;iI=Fe=je6WZqz0YSk
zT3K9fuOSpXea(!csEau^1na&eU=8K9?dl$KN#ZGPZWPJ2j*++C!Jf}Ksf3zL!qnyY
zynaO_sK4@r`fZcApizchrPc7@G-)9;-|JlVQudMdXZu+}<XxHwHX6hhzi0qD{sW15
zuod+NdSlUtDln*eB&a#QWOCCe;gvxx?vaQw-(x5?FS7?pQvz4_3*pYEpL6lm8GMd6
z>EUVj__Q{fpB#;nseDYL`APt9h`YfT-^oP#tI4=w=`8HJcZNGwxq{e8%Ig1_$(;o$
zWS&0^4iToTXU}}NPQGCtpNkeBOwm;5vx$Rt1~ZK*6Q<12rG4!gv)j?lb$5_fOdK?&
zNmis`sDY{@u@Yv+v+im8xqGKSG(QOALOyYY23%(+iUZOA_DIZGsSRakF2R!Csn~MD
z16;;fl7H+X(>|#M8hPI2<tNSS`an>Kr)>NX1&n-Gz)QbRCvMUl3?42AOi$q*3G=`-
z`LOJ1N)2ZB@*>aSBVPZ@iO%B%5ct;*{GL#Dr*t0I>!#V3-Ek(rGM_mgE@4s)F(z(G
zMTg<v1oeRJ+*oT0jQ{T<9(>*p6$hNgmezmxhC^}as$s^S_N%4>Qd4l1kA$wB$*fh^
z0v-Ic;79uu)XgN#yzfQ4xI>JcuZED9RgPyv!m&F=7vDD+;_u5=sJMvcsnQ4`E@3E|
zx9<k$G1g3>vyQ3irOEu0`=Y{qHyH582M&vg{dPVD&Yrl0lA0PpvNBOnF`NkZ{~?CY
z*LqgJ&6Ccs>tXDyAP~=Ulv#fweQZfMYb|=iHNI~{gNisXa!X(aGg4#?!Ogr?!xwe;
z*76Xoi~RljZK!f2g=_1FGYjnjXwb`vNBpaaOJA$wstp&ggwb9UIE9O1RamX|RsJ;T
zGB}ScX3_(H1o8aMOgDWoGoED*tyWIZl&1_2%C2Dq`5fDQC-N<uiG4$}In!QCgpT>Y
zxoO#J6SMXZ44bcut$u-g&I{6*YE#fG*BcZ*9EDPaD0KcZoxKVZ;h)8np>Vu}F_U7k
z?N<#~nE8aq?|I7=o+v{7=G#mZbci}7#}XTze#Uf1(CL(e>F-XVWB~b1D;M%DXHsy2
z|1Pwhkp~(Mbk6ZV2u1OfX}4?^oHQhm_jL(6+&2ZiPg$@m?;3VRtp-uYv+~Ht!=O>J
zfXn0C$}?_J*1y%5awq`98|c{_G7vkQZt^1P6<y|Y9!+0t5ni1MLAhfE)|x56FAd_O
z`v#!qK?jh%I)oyl(z&LJCuwn0#iqYvc;_p_@O?#eN0|Yx*8RctS0q;-Tm_n=CP4AH
zG*sBxAH~IrCIxPOAU38Rnf!AgKHgZ~tP>9V60>m8M$(^8?*>=(Rj_SpKCT-`?3?j+
z%=zzo?nu5)n_m6V(2#ijM>m3eyqMe0Rss)U6|Yx`<nl57xfc1+yMBy?SIbLayU+(k
zZW3CD`eN+M%V-65AX<3FSe@86Eem%-*uyx`i6(yFI5}#)oel0bwcxvkd?KUPW4%r^
zH14StWGBbtPQ6g{f1QLaic;=y=p8#Qs>QTTBT$1>S5eqA6XVZ{*tqN+JGhZ_{!2Hw
zxOALguPy`8iUhDE51OY$1)i@<!RLmt_%k~lVks|TSuzy5>^pet&laKf;1yVy>xWaD
zykJ<vFub#?4*Hy3jA|+o+}Em{Y59KOZp)m}*mx>CGAJ7~3+OWn>ILFa_k`?$l~Dgp
zfbFMc(0!{nIiO7-q#^>s-HO0-Vgkf`9|@A?NS5A|g(|~J;J?~+*wVk2hh;=!`-M>u
za_j=WTIop~>ttrM;SJw8BOHB|X@)TSD%X^*f{w)d+;MjcU-a!N%G4*LxUYxoOeA@m
z%Ca!^+!1JaJ_EXzJ>xc~SHSQ!Cvifb3P^ttiQN~8ht{7XET;TU^$G60eLFW_n$Pn!
z>@h~&jMeu!%#1e-0&mhG1KhH3YDzdtb~kd<O6mxbDucoFJlQAPP&`+eLQHl!JlVbt
z)qhXG4K-s?rQHb>=U>F`v4^2(Q!+Mx*#r%j;!$_d7WVVZCPbA@U~qE=xKDLO`7wR&
zwAh$9SUbQ!*ca_}h{Il&B75IRbG)$%g0ZFvIJ`C#I_4%}<5qpvZG45tZ&8NhqqAV_
zXn%NmI1ZeT8^P3ZIm9H{V3L)#0$WY`@W$*QF7m&@jt(}$)q(z~{xJw={@97#=K9QN
z;4(-o41jdH7wA`cW8t<a42e683Umhb7!PpjM*?|mCqQ@D7xw4QC1}{^&x&53qb@I7
zFkL|z`*(?`lD7&XmjrR`EB6TGeT-(!OQ7-6AK~nLJB-<&$feF^f^MmZb(ijkz>b}u
znX8Qb^W3m?)<_;>@$Ub*4<;g`*Zj2NYW&hZ2OGj4@wQwa?*2Z22O2G7c0nbS&o6*k
z^96Kx`dAhzip7Q@yC9JC)n1Pdk$2ER=>Cz9&%P&;-@$|1bvc7`#bMU+CkXDBN<i1q
z5ml6iQP1NECeA%z;&QV;Wf~k&@=jnK{o{yXKCC=UJsxVlRHMg+{;YBQKIj~n11h3*
z#9S`s9u>JDQuZxZJ-Llv`d>C$Ef42*{hzR=L5e6@x(k|bP68vBBUttmxN}A*TFf1S
zI+8+k%`zbFw=2Dyeq4#gV}a3CLE%y+-nR2VX@3{ut3|;k^#W{PaSeWcOor3Rb5MKy
zbf~rZ$;;+{=Wn(Wlg;0Zscf`>q7{>%Zb~c~#-+oVkjrQ+k40m@+dOqa30w#gqx-K2
z7+sNytv`2w_^!GzKrbDKrj>x!<Ad0+v=m<KjKlA)$>>b}pVfhdpm3!GV|*@B&W<u5
zA2jg!lM7JCNV}-~Dwxu2ptJWNF5Bk^28q28+{5_q+Vl9<Dh@=ajLKJj&PF_*f{l)O
zP~A*7UzcH6FVv#<`XHD_J>{L>vV}lhS9qT04VkM*FT6_koSO&X)a-PKvs#Y$a6ZiJ
zn}<F@`w-5j;@iJvp!Z=dO1jHIbfqs&cOOfB`~kfEqA^&~x<z+s6MLH1$uAsa1}*le
za5@B3+*(<8P$f@4l?-(`n^96NVlR`@Q6ZEwgS*s3)i53U9pxZ;8eA^^HvnBnRdJiN
z03K734%0(Raa#{!`@8jEcBve69~5)3#q;tbX@%H4V<poze83LRUPyDh{#=xrI9J0v
z0z*2y+15FQ_<UmxsQ=HB=hW1J+(=Wle2XXcJ{OIigL=3)dWp=)zl*$8D$seII{k{H
zxZaj>XjFX1-D`+bFm*83o%@y9uKL0I{1=3)cZtZmT8R5c7olqYUsl(kh!F*$*ls)>
z4BS35&zR$2WoriweQ)w{`NL54RED6vJQ6&T4Vmm+2uc(G2w{V+LdSt(Xgp%UttxGx
zv^gL5wOgQyEFMG`sfSVKh3#VovCne~P<3?{zg3Wmmkv+CWNjbv2dI~emPrN4-7fMw
zsR<fABhaG87w<1lgxCrM^HD)?Y?mIEDXDU;-U8~Bh@toBT9k~oX7N>_fGej!=;2gU
z6-7eRR{=Uhh&gUadHlFR;B4qbK1LO6ExpZizIs4d-$eM65C?OeiSuLah0YVR_`tj?
z*k}>XWAutxLvJ58;z<>G3GG>;Q8?(Cw)65QmnkPc4Lws+q5a1xrkm3TJxz$q?0uU>
z6_RJjREb$0Fyf?@V3=kQq|IVjty==<)Q>WPC%D1xP#*0v4|B5vur0<IjD_E<`M*G3
zlwpCExvROAoW9qM<geVNAuDqXr8TaBFL!rA)hcCBSeV4Z(u2XwYBXvM>I3e2hcNiW
z29$160Hr>uygAGqnvQ(nnzaK#GGHLM+9)$MvpO*D$>)+iC;5@AFw)n|(D!{8Z=XDZ
zg}qILgDW{~kxWJx+({f8smWyXOQ_?N3R@@Dpy%LGVER5A3f_-k?WW{2R!L)<^$lqb
zI~aoe_u}g1<q#=31KBglheaG`E$e)=`rl*L^^WElPhGeZb&HAJ#>tv~{loR&%%$!|
zM2TPr3R$+Whce{_diuO4ONz!7q+|b%<hVN*4ARW8Y=02+6yIi&wY5x|QUW=8$laDW
zWA_11Y`xc;J2{kr!oQO+r=tej&-Q~sng;l3P#ERj8hKZjIvVRt0ZsDbikn|DQRxU7
zIPau8x+@cz3<H~9t9aVbDX6cXgpo}X=pHu;C4YQTb>+Xz>h(>Yc#is(_lSsJwU2e0
zf8+{Qi7c}fnCf6Ml=RBunTh1hh)Wg9dOhb+m$%`d73P>ZhWPM5m$1aGiR6uz3JwFK
zSm+;jw3Aiv<^3qTtru*vZB;Oaj+u^{>T%Fqs?A#Ze&bWOUIibwT=d9a3^v1Po(ul`
zjEg%auUdf~$`&kdQ5ovhU!~raR+-JXo!m8UEEgXSF3+NwN9t=ih$ck~VuiaVM<x_N
zw3;tU?c{vkVL5&_t_I692T>YZN9R`%8vo7Z;zh=A+<7R93k8$0V-+yn!Vkw8t--GS
ztDsvo59KQU)Vp^Y+LlL9??oo%X2!GCPZ*9c?}Z1A$}r455bY{=@vAqzac-musys5L
zKBWxVw+aso-M<ulcZXv~j2A>6$%I5FnpK{j2%lxC#G$9mozx3n4K72~0P2gJ<qvhV
zr`KO~WH;&TZ*K6AX;p86n#5A{ExpbH{m*mL4R2(U+z3c*@#DT9iooiDA3JcW78?~w
zw0|%f+y_5lIWMfCd)HEKwelpi^d1BACChQQ$9~MqqO8e-LX2K`6=L@1@q*^du*Z1@
z#$K%>j(7xycb0&vK-{ag5P1Kw09<06LA*Rk5Y5gs4t$+}F*9PZRaFITzEc-;@<lXL
zJCBA36)=&!;?ikyQ2DM7jpO^^tHeNTxqP0hkuPhh*I+cAFjg>dHeh!GD&ddEX4LIG
z${UuAB<AUu*{PRHnAXtakYl_Na@I`)g?E{}RlAt`4R*#JSA;UDJ(yA-W35FXPkxnv
z?Y(<*Bn=?uf1aRf>nCf=c+DI>7&5I?%3p8ogW}|EEaZnT_uZFEI#wPo$&jNZ?P>9)
zyU_Q!Kim6oJ+{A5;ifT_g3I$TcvYN+sV|-3j&mRUM!sFk($B188qIs}ZUFh`8{GFs
zU$Bq8&m1~+c*vgPphjmJ(UK$+D`Iau4?M`6sE2yQ1Ru1>8%mC@e0ZQX4%;$xp~hAh
z6OV+0owYq0?4fh}mJ&Q_bOi&CUjWq=E@1jt3g0iMV97mt7yB2;oM%pEpQU10>`lJy
zXMTcfIC0d9r{SK{vrsS173z`$KzDXNj0{|ljl<T!%XKnTU8{m!#N-E0AG(v%S!uul
z!LC2e3NNaIsq#x%ORqqFN|p}CoXSx)KN|k~?1rZAjp5<$L@XadcOSE7to?fe>pcF5
zYllX_!b1zN<RSSzHCN-~w9T0L&IBez&qL#YL}**wz}bxoG!NT_k(K1x=`|US6T@09
z_&m4#K&&#Sr+m+eYSfDwg2{SS@Y=HwPc1EjvhQE1Q>&799yUT}^JrFP9?aAYQXuog
zUhWo69%2PG6QcoMyvy$)ljb}U`t8rbKd(bz?4>C1*l$1{!a~`%4QUXtmd-*g_jz;3
zdQfkC!#d|&7F?bwV)GmuJXSd$9dt7V)jAGlZ!%Fwkvy&IBFfK1>!XW-7pq^9hm8;J
z2>MSI@LK&<wA|~(!SyURoU;tK^a(?ym0MVhsTyzIwVm0T++xyQy{NNe6|-G?m)BEH
zM1EbJoy#jjU3CNIi|bK$nGaKaNuA7pmHCmIQK(>b5Sn82;G1J090{d9n`=Lr+$SGB
z7iF^jw`wp1H-c8keKxyrI>uKm1!-s@h;z(L+!G2ROvJ&tSs#=Z`J%&mV!*y!4<R+q
z;8YrqF3+m4!}0-ByTw7;{gav19>qw$0Wit}+ABwbuH+|69X|xj<&U`OoW3&aY2l!0
zSk4=BugTmlSED@7ND%%dcGo5!I1#%FN53e+f-EO6KJEq`IcMQT%QgJbcO<I)k>K((
znb@@BDRbDI#f}=T!$|clU~h7bIq<Pi_HHI<I^E#q{|rP?-Y?@2JuwViQHT1cW2pb?
zAshV32;08~gX5?&a9?$gmwC(BI3`2m?cbQ?wm6oVP{a*1Q*pvVA5hA2V4Y9IET?q_
z^gCY*=1otTQuaI+r0Rgye^Ox2_7d!xqX;&0<xJdRB>TI>2tD^-gT~dwW$zaW(#BpO
z-!(+Gy0i*zt7M~x%K>f^Tgz7usX@ucF<dLdRp{Qilev#5hPlP$8Hv;e7uj4`7jX`{
zM_uPGf9Iey`8Ly4PvDN*spI&^3vPMiFJrCcIMwbd_S@?Xb!W-zfj_C2xt%%RcEmYW
z))>zdp{J{W$Itq}8*l%V*%zB&>fLfMHF(au{VI9PtF^44`<!4b4~Nc+zgRtab43@u
zK-EOXNdJZ-C$^yPc@dgAWP``&DQJ*N-I5aeH_{E|niFdwBP9sO=+8irNU>~;^8rF|
z6`=Kr_1MbS%C_wAMW@42P!R70mc|#rc+x;{#2>tRnH!4x?<x0~>W3DKQc(Iuk7<YP
zhF))HqS}Uy@Pc^Hl1t^NI(`H*o&ARWu3eAO3&i-SwGfAZI?Db3GpU`v7}}~MSfXQJ
zRKK8tS0^t)1A}5{8<h`#&jPMlR*9}R2J%~T!tllDWBBxXJ_NddXOTM(;}(s6G{-9v
zI`7rXI!Dfh;?deTpYr~`pCT~#`X=mJ;Rn7CTX<l-6?o2xgFb$TQK-F2>;7dU>CP)G
zFl84vw@qfYqF5-HI1!{RNjNbs6Ma>xP}t><3KN#VzuJ|kWv+|9<6WTOQ?g*{aId`S
zVgi#}mz$LLjY8?$mBPy+z>kWnP-)nG-g$L06Wx#+*9S+l?ElwnHysola{e;uTLVF5
zPaqiBRS4_k=6K}7STxQAo?xtv5j++PCS{;)?^~?1+zIS;1)RT(cq^Km?(Z&WvC0Y`
z>?1GW8!2n|e9okEx3Tf%l%M@(0{TIru<T(ut&7!QEOCTDpA(?4p*LXN5_p&EfKA7?
zL$|0ev`?gGxb6pYxNOEch=~(;Y6^SuaVka>)!@E~8?m6w9y7naWJX5h`7Wi|T(cj@
z91>thq5*1t^X78xFS0*M#ZYoL1KJ;6Vd-~sA$sKuEK`ofH)mteeq%S+))-6~z$H-N
zrUsfdBS1~+%XKJoZ)xBTcN8wbp4(Lzt80Tjs|>-w{0BEus{kVp(nsHV!or{VI6HkK
zik`gYZ3hsF%mSg-{xG&z#AE!yAncew2dtl`<H3nj(b;(%>LzUm{m?wP_IM{&xijiy
zROO-xYN$8601ujyZ(zMTto<|}MKhlnr>^rtu_%}o-2BPH#|*%%w;Rya^BGgCz67Zj
zw7+{#ARfvEP_RyAYYH!;?`_W82G~OrF>CDx+~y7+J!R1kY3}|0EUQ<!#{RQChpVfJ
z`SPL{Y`s!N`8h=htmtGIZw0DFC1@)}Xo+5mKY1E4<P@<si1HS}QBYRfAL1|EWslnS
z;(@(m(Y(Jg-7_NC)J9?wEm9I%o7chq6NQ-Pl?<Yo%<@Z4GjPkBa}eWh2?parFfH~B
zex907Jvqhnyqbagp;1t9yEpjkszlAd6QF40K(PBa6u!z0(Z#8h2R|%^H-&|`=f4Cj
zd>MtG^<tpix{y~VBw}g?@xtSBLG!gTe0xk<4S5jm&nShDA#^78sRA|Q8Zb{)0yvk-
zERWey4n0x0sb7J`-{<3)gM;v`Wf&ZBazjV`Tqx+1&#x~_L$S^|X80R{F1KJjVImZ-
z&&SRq5wcAC7&Kl-*+<zplQ(mBqU%vJ=Gqeuiu0CY$WtTG`&kQ0<0i6@WAT*7F=Fmd
zrvN^fgz{Nem?(RKU?0`YItM;u!+IS;tzMJZGUqDtGWg?`BmN-j3M`dK{bZ09i%$0b
zq|L|Du$Q!E9g)!RA|3Ll5)XOHVa$78ixbwwfUVvgCMqxI7Br8z*+TjJzy9#b*$-ML
zx5?V9wu8>(a`gCT4%Zt(d*qOvCYu`0pwY`?xZI3<9s`H5x6L^)Avp&nzxD%`^Z}(u
z30$F%6od@{IO=#l%|`<8=0ak=CR9O>#Z~qR$pgPSAC!JoW9zHAyysI4Zz`s|(cKYn
ztKA4|uZ@MoZPhgQ*JI}E%GhViL=cUYvq38};Dxjt#W%0Wj($3V4*O!@r{Y@d&KU<q
zi@Le#fNDXDJP1SD1JHYVCUy^b%x=r+UXwPCiOVktf&c3Rk-K79_pE8aOGEIKvNz7(
zC;_c2cCh+k7KYj`q<)+T=D2Vbq+X}l`r0$#dWf`nvvpvsaDmx<Bp>C@d2nNZDV9Ac
zMb`_|C9GtL?{^Qwi8*KSA9*cyZ71Dw(K)b-E&xnPL0{4^yNO5RwB47u!eOZJO#`lN
zvOu5Zn^CyG35{o8=aHSanCGeqaOJ{rT&q}#KT7h@a@rYGh$M!gv7cbIl$eMc@<Cxn
z5^uLMX9b2nGOc9N{qAY9EejIC^0y{-lh>(h&=vS255k+7xgg$K$uF{%=sdoislq}u
z>_zOzVpZ7Ic?Cbt3Paa>8`$l*8dU$lz{);{cm6XOL;(|dYh5Zxy0fv*PKKF&rO-Np
zI9#v`=8c(!%k1dcay-bcuAD=?#e+bvuR3`AQ2|lBxXgUVNAB=;onU;JL(-G!D7Uzb
z4(}t-pfw&H*Z1%gr(wAHSq$|Gw#t5v*Fd9yGZ159fp3e?LhH;9q3y^lY~GUuyB#Xf
zRCxz87@y8HL%hIX{BOayjxt`a>zU0G8Sl(K3Kuqw!$0JQ>$2(%np-E)-%<@_4~PeO
z{eP@8sgcF+?ch!wUQl*eolAmxgcCo5vB;tjo{pyuxmU|!e;+yKt9zjGuXA{Zz>S-K
zZ$OXZ-ONgLJ9OxKv498O=xb2TN*=Ai7pf~Te3utW8YsgsLBw>$eL>5-7bN>rztnH?
zZm25bn}lK%?>1!8vd669?p}DMVvW*M5paq!e?~M<U9e>(UL&rO=8ItFJZ}w%b)&e+
z_++%PiV)fsg<$!FQqspSu`ic)&|Ll_mp<xZ>)v~y`l4%CU^x-w>mM<7s~(oD91PKv
zf3!YX0VjIT#HHauc<O8h?%zi_xX<e3pFPXP0h%T;@x4IdMIygP%-if?DfqwV8?k*;
zB$~$-upi}l=s)N(DlgoCPgBz|@B9Vgl3xRf)gq>OVlh;E(H?7lnrR*O!D=b-#yf(T
zRxf)nI&mJlhiv6WLA18Tzh^zY=b`!=GcL*@#&uEv6e=abh(-kK_$^pw-OW4g)0qDC
zP#hOjg66Zt)Q8i<i@vL3WOfwfk5<KIJ3Zb}QOau<6tK2~%FumzGj#q=$8k}GsF#<9
z-8Gk?;1`{x7TSYXr~~%g@c_fE<h7~khiXnCAf|cIk&O)7tb%yKJ&CM4TLL;Syr3ta
z*5^mLIR5e#%%aYwGV>w~alV8C-D_-|b&2B;3)w~CpmcsKv!Z;*bFB*?*6S!=*ESM=
z4Oxhr#V+__d;(6W&BgZ}XJK!3BK`@QLZ0_VuCn7IiW-J6!&?f77d){`KNB^6T><f$
z^$@AI90m`ez3pHa_I0*HiKaa-*!P(?4D3sO$P&m*nM3^U?GSTzGuNA#%u*-k!qXJe
z4@y13Ju!_>Xr^A*uH#UUv6j1kYvtY7PC%>uIbr`^6Fj;!8@of7vZC%2pt>QCONLen
z(o@%&NfdF}x>oSk)3eyskO*`=xeA<(%<$BpNc6Ref$F_tDA1RK`SCI2V<*l{Xdf_4
zsKa)%hwKk!L!K@U0LlNlxJWU~q@gZ~hiqC6p7DzKYXND51L(aT8v!y+>VcZY!G3QW
z|KnW(A2NJN3n~F^?=)h#r=!7=T&5a&M_7};0*!qyLHOAVV6GlcdQPltUH5#H|G2=v
zkdIAqodFhnJxLzoh1~JOKRo+9c_qaU*?!_~b(+>OC#wX~ObU5QZ&m6(*CgI!6jOS0
zkN>O@<4Lt7Y<g{jYqE#o9yeb|{doY2^i~085W5%#fl)ycZ;@r8n(!Z&G#_OJ7bi1=
z>OxrmdMpZ;W5LtCoJsoUW7qox5I=b)D~Xh$%guC9sjK7mi_XLBh2+ayT8*jmj>2b!
z)p*l28dq7;b2B|061F6wf%_NPq&6LFKavCu-4?_%xz3G?13~pxFK|f>gL=IKXgOg#
zbNJTIGN$X`v|+(GlRW<rrVjfvr(m033g7jl1h?cSV^N|Cs@FMlo0LUpgk|86kt>K6
zL}UC$x=%j1FATa%Yv<32VDR$;Gxk{tlJZ{Qd}11`h}}icS~i?Ibd`FxW<ubqkx*}Q
z9@;ZaKr6Fc=9-xe5UY$!D%Gf4<r;|Mj+QIUc4gL+DeKWkS>~a-oG1P<!}~co(Dz?s
z-1#aDyH=aRyc#L0RGoyplQe(czZW7`E`za&MbQ4rpQR5kggI}@@%_Op=stLlIqv<z
zLj(mB^=}jEZ@p&HZTd1(n{7OhScWn0v$*TDQEb>wD{PM;#z*c5P{`PagC}0bm&<9c
zu`CEQ{}o|bXEV#|&_S1%XW5;sN^I%d#GBUDvZA8dpk)X$7a@b0@3{!-rjcBHbH2=<
z_JE>|+CW|J<h%XD_0p;HQST~g!9Ft8p|P?=@(wL*+>J3u7f|2H2j;4^0nX}^4^nUi
znU@l}ZoLNGXB6STG945ZO)l%*k_48_8R~v6pxIC}ZyIp`bhn&iEjpZezDvS}jagh=
zU&-da*@sRF75LmH76a=JQRg&bm)>LIVkI+}6Ulv&ywNP>C@~Vln6vpsl#V1lY-A@t
zl{WxY4@;To)^lOdnhIQSpE_@z7eU!>@-|wZBClBm?-yDL^0>eJz~R}b`I6W&S1ZW_
zd<Ipm?yyrQ$=91ArL{N?3brm{2B!-!`uc7>;wHizy`_+;c!>47pNICtp72&U$zQou
zL7p@YGJi?X_QZA8m?D$4)XT{~>BP)kqxq@Rz93@#@W3h?%&c)`J?n=;>%BFU{SIYb
zkJ3>0k2|+t5d_0NSmAx@Dmc|$2DMirL9<o^wI&$y{P+UmR8<R+=EUB7a9p^eQH!P(
zlLQQu^8M`-ux$QG(0Z{BRAp8?^?V2h9v;F&?v4i=xjI*AyUb1WY_YlWJKOan2Mwan
z!N>^)IA1v&ex}pCJ5T}P?_BI4&PGM)I<)#b7d!{kdCb#NkYAOQpJFMnn>?-Y^Y&9e
zt3Bm5GSTvXmMnyLpL&ZAg6F66Kod#cFb%nfq88W<Yv!&cVbJlBbH%su=&`hoi@L81
zk-rk5)5!r!EV7`*C6ViXw&Zq;hA_)d6`=GVbxSsG1(BYXNua6)#O$!f-3LQZ)Kyhl
z{g`Iiwr{z$b1-NUXTa{@1g5v3nb%nmm(x9tGU^wZo^=*4$`4?6|EuBy53R?X?{Qph
z{WwV6UI-8QD%2f7W36lju%1+o&nC(-?xPgdUnelpt~8lwa}qPPU%~|2B-C0%4Bj);
zs}njIESFx!Pm3?2|KmVxKUvAU#~k3Ay(8e4sDd;uU@@xWpv$lfTBVtS%84Rw_3|b=
z;6|Q6_g3!Mn!xY<)Ijx(+rgzp9Yl&Bghu5WHZ;tOa?qR0T_;z9-UBTty9Q`HeF+zt
zvJ!RE^X%!04Ej4S@#?pE^gU;SW9&q1PqgBr`|U)JXg6+DBj(mZCMZY+@PA96ON5t9
z8W1OZlf|LbDM)7Xb2iPdU$Wf>bZ5^F2CJt2Ab!+O7JdH;s`yhEa@;d6R*;m}tM33M
z)Q5d`R+OJ92H)XBse@31yg=k1nEi*vkf%-(y9Sm`$)YYT(n$6%k>y=20kba&AY2F}
z_TEULBVsM|8&HZ-M#*TTLOPiHN+#b_%oF|TEJ^25l|y~7p+W<}zm&DV`G<dx@Icig
zC%Ng}4&Jr#0H!L=fg@Mr!9+6+d#4`8Glwj3_Yo<+ucU5)_!cg`<0Z@;mx6m~#&ajL
zn9lO&VM}ZRnC8mL14kB6-_$PZDp<!Ig3F;Z@r$f0&K`{YLcrZF8`~U$*{7qi<Y7y|
zp%>!8|8fu*EZo8zD@eaB4Kn^Pjbp9;GafW@9ZCl)V!PoLh?)0|ZMe4^_lL!R^V3-F
z;SoXKvlZ;o%zz=$1sK!Om#M1xa=qQ#!1B&L*1e&MmB9=gwb=o?+pD4N$!#8@D1c|s
zPG||d$83aY7&yNYI!@eS{*8-K+p7ST{9>8Pe1H6Vyd3{K>WE3cx#Sb=Vcj(v&|MMD
zq@*{BP5%*M&Mm`quM5~>HUhdP%>+-YeIPPWF)>%a1dYpqwJFE2`6csFLz<07s}`dj
z^g)|>0*_i(gz^2CLF<M-f-jwyx9_T@JfjcTzG&e}oV*kJb_+@ym(m`qf`2s&5q0C4
zT2_C^ba3L@JRe@)(8rj@aS(Xb5bTUjG2?NRp;c9e4m%fUT!rxO^j(+}WDM8mOhTpk
z5@vH@0WVWi<#jt#FwbZK&T&=1W^n*?@cqPd@ZoiXqW~=W;kPN&TV$*%SWTJ@F1s(n
zozv;)F>@8vX1YV;jVbW!^gb+_-W$cAgGqm!%>sx1VTl$9wSKmcvCSIW8}qShHffzb
zN__d+8k!Z(gzAxA(3V)oR8I8=)wDNkXlXWVD9J_ZYklxmxfXe5PC)ykBECpigW5+(
zkM%#p@2rZUj^%1-8NC7@JX()|I=etYm-<UQQlTxu0>%yp9P=Rp9rS&;D1N>mn&t%U
z&z8f`$_h~3Q$oF^MHqcbz%~qJMJf_dIYo?zGDHQ3+0b&8{MVM3WRB0*0O*`Xn;vuM
zE|RiuUDY7%*$ZaBP4UQpNthY%lJ_+@hx?sn;IVNB@0{Jqv~2ufY{z*FO<jc2N(DZ`
zF&%y%o`VNFhvJn$jx~-|s2OqqB0Ju4`I&3*_p3kNRxw52rN6l-q96SqIcqM>#H7bT
z_`b&*9Pe(1zCFjVR(T<IHu&(3d*u*t+X>gbNP{-HC5UTWVPGytsq+M|bC<%33)|4h
z%?FO3s|EcuDb`o4f!ZUSOBWvJsgt9@K=MpzuZn>`b4$VXL?DaztY_k<3PP+Mbx{}}
zL3zwknM0y0cTC$2u|-vwv#XSK@0-oaa(uvZbSPJUHVC?phC!jpWk?-$21Tce%a4pZ
z1M*xwaG6gzzAxvv={1_4SzabRz!&yRzX-NZx_QZ5%6?RK@gKd1;itzOeljl<WtIz#
zo5kF!-vxGf=LM_}Hpg{O9jQkv4z$Xy3!>9gv9sxtpgosE$1CdIDZa{k9nZ$Hm`~g>
zCl-spT|>JE`?;yZ2NSd8TJlQ?;GoqDr<I4GbN6QMTE7V7GhcB_&7CkSWGi-Fl?a)~
z+Snx0isWZ@f=ACoZhzAZI?YR2+)xdi>*9|d{$7-qFl3EBPq=HIK3D&D6zd+;%v>h|
zSML+c3$_;HsCj{CRc;Jzyf@4%5zu+uG4^xJ3{>)>Zpb88=GHu)vM;gB$ioK=JqDqC
zxusBmYV0>VgL8)M!=aamvHWi_SQSy0ZTxa3UTnqfCjDj}2h>>VmL2ef?mRW#d1w$O
zVe%<`F!1J4*5Vw=EEn}g;a3_y`j@z^gETSg=TS5mbBYh_mxv+VQQSAS1{7jxueqrM
z&R5r<>Elw2Z`=o3pKZXh*&WKFuHnrQDbP6L5EmsE3u3?ZLd=&!G>pgtm1&W@z%53W
zMB0?};Y>cbWfTfy_F=-g&3Kf!9PS??QDUS_d5{PwvTf$J<nM6ZJBYjQC*Oc(e`Afa
zu?SVOplIM|kR)e9?aULPeSaWo`z%3oO?B`o9gg+u1NgeRg_!+00VH?&g6rZ2nYcR=
z@(RYGKIyhaYDMUBZZiaGZ$a1F6QOJ9Tu8Gp!Pxp7RIPVmU$+Gi<59-jJ`4d^n+dx9
zMzohNB351?7xk>gA`z{rV^+e`8p>>#AjGd21X>H6WKVrlK(6(ayG$xZyS_)5`@=_E
zbZ#DMKU)b6<2B*n%Xla|HipHxxUi0o$H3&`2t>-@IE*#Kp4G%;JsAYo$fuk0!xjQ-
z4zupQwann5KF>M)n5&+-DtLaO&-Wh90H61;#y*!p_U9B{{GE*b&w1lE%UEpQKwfbZ
zSBM{x!J9*8K*N9<?r&5Ks#$-SZN+qORc_$w<BpS$dMfCC-4E)&M}R~8H@0ZHD%!OY
zS2OG{`)28lKA$$AOzRS6zMaCN+^?Yez5QU>5Ko<enzFKmJGs1YF8Er%<gYKKqTPx>
zre#<GrsMa*q2IGG(T)0N<b7f5Uz#yGXF})r?N}5u0GenG-kq`-`)NAj=Od)kUR00`
z_z;4No?GCahpW(Zuon)9OTwH6j%Th>R>6HLs9y8r@{ggy+R8X|Z1;gd|C|Gle#ET2
zv!E>I(izaOje~8Isc+hE6kF_>gBDHXA&(3|gM^)E_+vV{MH0VTAsSpux3H#|MKDD<
z7@u!drF@ej*WA_zGz?S0<&Zbp?Cs$Tl4Ph$e7B~iV04aiW%Gwpuju(p*tzOBo3dgb
z?lK#LyHd{KnPC$ApQIcI*j|C%<h9KH6o9JpUHQiOUNA7U4%@=E6BlY7H`?)ye{+k4
z{niW7|3@mCk4a<F2TDTl*$WtGnGBtO!dYgS6G{?;AoITnsEbfU)j@t3^J4{@qcs{E
zUH@gqh6ng(?QF1yAUa!qW{n@CWNStx;eJUq8aiLV=o9ham^2?u`=pmQDO5t+f)^}r
zuo$#&(YZ|L9M||F2jkQ35V__8TAHnAu0{gsb$wv#*S$E~+815dd|?AlGZfhzkUcoP
z0lWKqfMu&6T>2V@9l13SEu}qB{kP0^@H9w_2tbk0i<`!G@rLi~p`d9UZ!;bRF>WJy
zgJKIe)$=o1VjhHL$^BUEUmwi5Z3d#9FSw{Cgo~O>sEg|n>)AnkzWNy1hc9Ya|C>CZ
zUvx2EJ^`%d-Vif#C~L1!1a-CT{PLk)Xf!hmUa-X&IZzKQ3uAE7dt01vMHfv^q?L=V
z1T!t8YGLp<eaxKa0JY}+aL4W{`7bg-`|%@g^VN@gZVbg8V-BN+br?43ZU##JV|c%D
z*x|kxY~0d#!7LXj95@GKUid+zYZCeF=V0FWEa+K~2)-kfq3p6R7K|}uH{Hvj)%JqS
zb{x&x<bV0hsw&LfdyAF5{U7t$NBl(lzR)sb8FY9Sfv>$jNWLra_J4cvGjvY-KZ?%9
zA;z_f;)4*9I3Y<wk%W-cJR5_Mgd_<e#0lvnAxWlGk}jj0bie5?qkHeO)s!Un5JCtc
zgwP?x`S$k@pv=7Q-g~X}TP^jp9~c&aV%ZpCyR2u{m-CpLRuk*_@C;PE5M$y8UHlnL
z+JtrqZ+fH!juk%yrMc#iefc+wT`?Gz|Mo|n*&|s>+(Tx4pj_zOJQm8^w=i9;gQ!}k
zg55ifutS5qc!NJM47ksXE1jV`^eP|T&z!PDGHg!C!iLGiu#I;5>>F`Bd*WZb>ApW&
zSN#;$w7H;yva3Nae{)OoGPLSA2GbYkQ8sB2R4$T(`Qu~Y=uLZ_njcr*;m4(HAFSAP
z5?fw(vU4ej?WE-tPOO2x{m){olMJ>0(Z{SE8JKeSBKlrUMCId~P`bif82vh#e2&E7
z?KO!R{$+_Kv+U4QvWll9FA^Gh+Ch{7-F6g5_<U(G+OIALql6$KDJBE0_b!4yr1oDQ
zmWIpIcVaFvBj!KIqn_4iaos|M{wFWNxV$si?6wrDiX-ux%0OaA1yZil9oj65=zcH9
z^2Qa^!MGH%)>NUhegvreo5Q;=NWtj&3}%~Ij)kv-&|(Bf=fF@9N!ltrzcWy2c*2fM
z-G$jbhJr>V=h-nId2PCYH>_pE%4;`G3Uo!)8v{{V)CV^|55blwsTiHk^C6$(QBRL_
z0ryL6=Vj^^O<D`@7pP**r;C_eI1OV~Oh(PaSzxtp2Ryo63r~sBVya<4xt+&+NB0U;
zOTH%PY>DGL9nPW3Kz|-pU;y$(k9ddUOR*$1PpnAIW1HV?!@bjoVYf4aL8Tlf7Wq@Q
zA(cF=<Xs#0mUsPY4ukG5!2f3)G*hS#7OUBR`vXz^i#s%YED>AX&O=dKCYt8G<pz)Q
z!7X(Zmv4K){liv(68f`Ho0;g}t1o=KO@6!2E~r9!fYiN{fl~n9atg$IgIzFIbu)8#
zj_CG2mon00xX-`Y;E?iwRmPBC<2tdq9BNo=X9RS04`qG_3qjIxtD=0AJu4_7_ITwa
zlw_!iMcw0Bw(n1V`$G}V%AJiaMG?IDObBW`O`+~`>V!DhY^K$PAbeR!S-1!@&!|)g
zq-R*ZRw-!x^NM?ztAnT75>(h*u-D!BAb04)=Drz+5&dKF(v)D#)yaa{RmtSh{U{`P
zRsrGB;p1-wwp5RR{(Ux}WvM^7t_cR&lnAl(VgV=@GR!dV3o^M(tUI+3zSo9hX4N^A
z+rAf+CYgfL#AMvO&ID!F$^6uOE!0`&ES@xcB+A^<xxBy)uql9RRhOXuwKGihCP$h2
zX=rY;C-yn9`ZnL^F*{4q&SVz0zB|q>j+=9nc?&6{(8{~1A82HH6lMmVhU?M(y!_=_
zbbT=qU(kYJ7&r*7)2&2mR09o%L#dOk60a6h_h#)_Zg7UYSu3)|*UvA2w!LBC`F<RG
z_w5}1_ks8scatDLD+=mAgrlcHHBSvYM%lFxDED$@_vb}{PQqTX+nQ>@IxH3~E2H4~
z@j-|a)S%f}3~4&y5cNF{j0Xktr1k*J9!_kPp{5Yh`v7sh1e(M5LQu{gl=+ND!@sK7
z>D~*lk8>nk514_i+HzblbRl*uc+YJdE<%f+mGDq&BPwp67tFuKbHAqtagbLDDi-8c
z<jqJ#@2XnrmCJ?peL;M%(%&dsBSya$r6Bp5kH&iwX?743$5jobqrMBfRNT>;O@k@(
zO|T)s4D153FkEXC<(F20bI(<*{mm8b`6rq=Hj{@`R4UfEdYt!_mgBVPN3f-452WNg
zXKHbGxjaBF25~k&_$(5$^e$o6vQU)nn;@3Dj{&Efx0uNoAKdqpW{r_$EcUB8nrqs@
zyJKe{CsT%V=Bpy?SPs|sUu7WyD{$kpak#}Y9RE1x!IdG@WsrH$Ogghja0|U5wg|E0
z>QisB0ry4|GuaK~kExR+F9H-R0$74?A9S8*2G>u!p#Jpj+|7GF*Kv2|y+xO>rB8ym
z{=UGxL)W9{CdxE_u4kV8zcS}!B{W{m$Y(l<i5efy+7{@C{Y49~g`Rr_nsNx4D#ObY
z<k&TC3UgX>f!qIZ;<cwIW8>a3E*(o5@g)mUe<8=}A3M-xY!-eScoyp{62a}=HDPr_
z77oZO#pVq&K)G!LUw<YRqqLIAXKZFxUN?Y^9hXd<;l234)C}USX~V{nXiQpn6rA_C
zaGB3gkQZKKM+fc0eSW&=FS^I3Oe7!v>Uk_^+dkC!I0}lPg1O$1qT<<Lw#Ax!yC+L{
z$FFyS=&5^!+Ak?XhiRa5{u`^*E`qm5IJWuN@?U%siu&GU-A*N}MLdWb|2PD(?V&uS
z@gr|tV1kW33wV3&CCJe#L!EFL@rC>_#ZL;?SIoidF~#sRr4MQiUkV$a#Di^%Kga^p
z#2zD0v6io6VT8j<tpAz?iqZ=pTB{4rop!wOLOK*aPsQ?UnmlG{1dd#}1^+A{U%J;|
zE*W~q%t~n)c<i15DUl&UUrjx<8cH+6;zU@R7K5|r8==v%Auz#kGm1zzZ}&8W&dfC^
z3E7IH5AVe8W$R$-$c@-^-<bP<zDpdID6sB!;n`KWT-=Lv!}6tIvCjzv2fCvKq!P1A
z0kMZ8nUF>u&iQ`4`@MqIH}8jzDko;zw-k-GSo4<04#IVh{#cNakBvpStf$p1e0}Qz
zY&a{&9n^(r{J5AkiB*}>Rq8I$DdI+RM)2N4FJYTYBD)iD2AW=P1Fb!C@QdeJ>W`Q#
zZg_f{jefHiC1)Llpt5)T?7(!)u{emLxnIjQN)|%H_gT=`E0RaN<B;}>_*TTp6-8||
z>+p18x;+Ycqf!wpUqHM0jiI39=PK;KmX2Gc0?IdU<uQ6O;COSi*xL5KIOCBGI?2|9
zZfhMp3ZTx_k@2i`_6fL{hQ#*^0IP^t;#h9LlBrebn{x(5pWksE^C@B-x~;c&$k@uL
z^H?=29?Nh3VWyUmkYc6{xCHRcL1#><_`+qoXYh|Ukx<~Jgv(bK;OCKs*k}1!ShYlf
zz3nsUnW6*D1LtFo*K90s5>UuS+(JEO<%%O<T3tc>{B^KZDH0XKmNJ#8*Z2$7G6=Hh
z3-%IsX7aR-cARNKfYV}Zdc1*UouK*dWfI>ypbo94JY$B6ov<;KIJS{zS?fvq{Z1j>
z<A^$+W~z?&iimR`QqOfJw6hlJM<%^u3_<qo%qaYUAa^N&+++cC8Xq#p552+uNI3tN
zLLT&2p^&dew}Tey3b6EpzJ9bLja1>P|K_5`jZ9{D=n@)U*@qd2%Tej291BBNqJt~t
z!{tHHXL32{^gl~J6cg$Z2m`&FGTa$KIk`UDIS`9M3gIYn$P`3p(#0x|=fJ$a>6n>$
z6jNTcvYYzk5wn|!!>nR)slFyQ<>#=EFVoSmXB2egN5RLn)Qy#G%-3y=gmXJ<XvX})
z59E)<Era91@!@H1w5nU&>-h*Q9Nq_SjJZgx;{tTFD`p+58ie)g71(fO8+9U<G4He^
zko5crik7E~8`jIgXjKDiTXc?#{<>xs)SsS5dmgh<#M1B_O5IyK4x+W)S3W3N8|}lo
znO*!P+&|zbdQ*lisAB>s#+5@#D+;2{`ZBc(9R5mMk5`iwP$?z9(8fy;>)6Sb$ONo=
zwFjG@yysNW3;r{!*+~=XM>jhPs1^mS=cU+C8YYaXh{MjkBcc5c?Lzw{jD9hEx!Nw=
z7~2cq-x`Jfp`=APM}T>rHl$26<uOqKu&=8CAMNu6-NaMe%HE%})G*@x)rv=!E=1dH
zJwZBrFZcQt2!-Lt!FjeTi#qBK`FbHBX&x?kL<V3k?Gc@8wxdOoH)z=1;5f+%`WhAD
zjPK>BeDDZ#J<8<?X_MgM<2rnzV^2Nz!~$KfiW|Sj;R{U>9(lMChb$(&NN2q8^ISFP
z<m>?1jF;Sb;UYFaq6T$OMnmb)WUyY5iu!BTbNv~ru(@sn+GqLl(?-)Vc8xuC+qMX?
zHPcwe&NHa|-$`8ac|Y#7l7pk;F0s}<fUXBO_{Sg4n9)NAKKAkh^^@6<Taf`Nr6+_q
zhw*6CzfIiTtqDQ&@6Z19iNpDD%E@N~W@oVRx5{CckuF-rjs$6ezFC)c1;ox8$~8)}
zS=Z#=%xFTb@PytW&6j&(jadO)cb^2UE-P^JEprrKS3yxbb!Hp><O{!2zuAccV0|=B
z?0phIw5Ow@fxK%yvW-w`c|oii>Wf04AH3PJ0PWWHM(dI1petL<o%_%{5gS=HYwu<>
zbc)CN#wWa9ITb@%irM@oKTzH@i5Dx!vBF2xA0Ffm&eQ&I3FTw%)@NhdgY#%qaZlW|
zK}<Ic>Ve%gTO9Q~kus4%pz%E!^540D3iU@eKimsO4|0W+=eA<S_+5haVIp<(F-PT(
zMzDW#A&Bn`z$IS=wB9ff^Tv3fPQ*kpfrp^5B?2vmpMi$zUeIB^2geW21A}|1;Qww1
zT8|#a>&MliMW-2@%_za8n<+nW|9C~LaW8OqaGE>``g~(!0miK)f3S};MD<DMZbd%4
zz;zc!cvk^_En(*-p1_uLOHj5N1U(z)q0V=?xFxinc?FP1`qOmC?idBn{m-EqWi(HJ
zDa9Zd1I`OB;5|zT&eu(VC7BBJ*igp}RFh#)U@>alTnc7BW6*!n0cca4=jK1C+rj@R
zy-Ur-xArjnWUfs&a&JC=brH@~NkPY-`a)dUVzSW@hrIO#vAq*P`L8)#B9wvsv>N`A
z?k_Fb!|=vuE0o8}Snt~@DE|^8{Fp^u;je?l4o}3e<8lc~#_Zwa2WNuGodDuq>oA#B
zk=S5w0oDihpnUSmik4Ns#q~S<nMD6Q^-~Hk$z}q29;o5YMgCB?l(eyKVydqC$>hu4
zGOZzccy&`Tm{gY1JESLTShxbrb5?`&My!x6|G?W`xN|M7TrO%%H`BW@0b6fJ<Gjui
zEMKsa4elF=I!>jqe*03C%(=!CRbybJnK>#tl~Hhr0?~^Q!DDm~m#wG%S#J|q;IJ4w
z0t(?>`bHFO9m_g~WQqqF(Qfj&R;d5B6u$h92FH>*;me^E^p@G;P2WoN-Z2^+9OL1J
z_X7Itg>retW3kq+4Cc@o!lZ69D(WUqM8hQ<WL~eB%flKL^lAw6$vX{_0MgO#)7{lu
zj`pLi!1Cr<Y#-DQG(PMjzG)~MRv3u-Q~I%BpMJP}Xf+sr4CeP%m7-6oCmJr$fFP}>
zthPHFLY&vZ$L4Y}uEy~E!+yNu%zD~4RnW-s4YPShKcj7mxMTfsxTlj$H}X(uF(=OZ
zpaN!WHUoN@A&#H310~W>R{5?ByAv;fe6&B79^;_wK|c6Pqruyz6lX0Y@A&q;-0_FA
z;I&c*i(lq5r@`I4UxhzzY>h*cyYXQCd^}v&Itgl8tH33bLF%hK42n5Mj7d##aR=S6
zZnX-Y?KPnM*%t5aO-4z;9l_(}G4^dwC5j@><7=8JTp}&Nu>WB!_S1wmGiR_`xeF{_
zUf?Z*!iDCcU$}exG+dW?3hR3B!=piq=smNNDQ`32Lr$H6ZFf&%M*r>P*U;kDTJ~UX
zS&WL+iDolahNDio5iteNi({?b;o40FO13Qr>4UjuO9F;q^YKGy-dh5tb}86zu^axW
zDh7pklQ`wfZx-{TlC;NX%z9WVR1a`Mcf(lnMsHw_zb1jgD;-Qz7VtF}>Cf1oPgzOg
z#w?cNx4wR;824Dn-$e|XW<xM?_5jhO8nM2iD}<KJMA3xTOyYLdEIX!~nJT`qv2z4$
zd3ut|PAB5uqqNZ^a1RzI7Nb)?P25R*mZa?)Q4*SnLA8-QO=BLK{@lc}TMHq`N{tUx
zy?~EC<%1AV4GoVLfKfp$h)%3y`iG~&qt%Nsv2-=M+mL_$;V1Fy$#Pg8c^3AY9EZlE
z(Xii`ZWsR*v(CsW@VZTmRG)E_A*CLocY4@;)|aPsQAeF!KEm}P-l$g#+6zXY<HW;E
zW1BY9IoB>;ery0bCTa<X8%CjM<_)tf+d8zZ|HV5ZBAK(g5z{RS=Q7he_;TGF@4lnV
z`sQKaw#^D0n}>=;eFq7q*<ZLrzl$JxSBQTal0XijSU5uky>IV>Wv8-Hbn*v(v`+=?
ztNVgHwMyLlaTVNduEJI|1vDP6f!PL?*sppNHuhZt<(g4E>KWa<e>L;jvJ2?6RR)f;
zLs(abHWvb@H@RB_w-Q6h$&^^r7G9JGpU4JOtU)E|PRQI84DE4s%qfxh$RjJDezpcT
zI@z1K?YzU=)B!XsBY2C(4hSKQH8$m67Bcb~H))!IF*j;4_rQJ}AE<)6-^8HF`U+5+
zFdCgRLYWA#ti>deXEf{4&s7SI_K{59+RY?q$G~+@^4qPTtWBtzSoB<7+!B*2JnHs9
zjjjZKC1^KVNfx2ejUp`HP|h5Pw{I1sf{Wd4(EHDC{`ze=bge5#^_%BWH0HQqMR)39
z=gMH&!vNfrb^z-ay5MR};-kCg<G0`o5YbuzF8ACZb1U&b+PCrFACIHI^Jgwyy_D%1
z{NVom55m3E<S91QgWQik==t|pcKk^pQnVB_o}A)yP9@S!ay0MIZ4_I$Uj)dx0Np43
zA^3_Ehs-~PwSA6);%hcnKAg&obB2SJ&qh$Wvkr9J=8zv^5_ecwLrk4PkWc=`*ad2=
zr+y~r=mbFb=-$KuYGwNB5nR1*EP5Op4)%S{gO~Om>XsmXNZ)r%*;N@EW?vSTYz;!3
zRDeo`$BALwAKEuGvT|018lx64$?0E0#`KNgf7=)wR3y-NARc5xh%>o-5I3Enh5jr0
zf%TeeLRfMsIAv#niSbN)*RKd{kEBE0QTnc86;79x(F`UbzUFvPENm0*e%D1?6FrzR
zCJ(Kykss&zKvpiD0-d6>V3GfeTaA>mkoOjlFqAa=fFDdcWC^I(pJdp(i7BSPHA`7k
zCRhuJ++QsX54|tO((h-$<D(p+w$+lCeLlPH@|-Wfor9)q5Hoyr6g29$6StdoB-ux<
z{5g`joCt&?Q)r*_EG?H58$(`V6}nve%54`{z~oC_I8h}Cbq7v{_MynkRv<bkQI2^Y
z^&X@az~-xmFm}Xm_Aa^xtpD?bu8tni^}>oe{rX_Wy^HKkhdVZ34TFxmE4fN|4fnm9
zfzFlT@NL{_${KZX`+#+PdWSOBjH8ThjUBvwJ_@5^a=~1p1LJ2VfRbSWIIhkT-}xsL
z#%|h%2HP*9ypg;KqmGO7iRoYA9gGU|lW^WR1~Z-oqEYNRrlPqJYuOvhm^p&ZqIJRv
z(|8<TZ~|}EUc&tPL{NP}UD|=KdCVkVG$qY`JmrWiuV#Reqd9q1kBj9y-V5g_gHygX
z6_k5^Vs`nJ5OmWSLQ^NBMd~UH8kB=mX>NAfrvi!rzL4@KA2P0o^06f$C_Qk<?8T*1
z(6px)m}c8RwlsptybiL(dQ$A8e-a)3{lZ)ClJ6+qhB`QxVanTHf?9|tSntTAoBn9<
zS$CE)71p41rHC7^?guGtC%Mk;G0;LR1r5`a@MW4Y%1-nrekcjPCkEml-4Jl$H+Wb7
z`KY2A4);IzLecBh+~Pn7d)7q0)i4K;e)(die508u{Wbta&qNMi&w@YQ*490BN3|3`
zoFKKuk->*ib|HmXD8-|?b0OZbDuZ^tJTU)D1PV(JaRkl#+dr;EnaF@UuTy~9woV~s
zO|dXCQ^1im0eCQNDtfPa#Vs^kx%JC8ynXIpv}4W^x*jd&{z0UhH?4!|2E<L*E=03&
z<ME~4Ok6|R^HDCutTW<J>`}=c>^OzR>q|kA(gzCv^2Lyb=iD~TAE9R{bzj(_^i!bN
zXbg(2%;rJ;Tm^snPX_wy9f$VlNg!MBKxq3zoglxmKu3QlOx}=>mgeawr)QOH;u=2c
zK`>f<*aXIJEy45R1y;9jDfXW=44uD~VAoz_UR#?3Q!<ZJ=e;}JtFgg)tbo?-w&3_S
zRD7*-DX#6(r)=m;?y}t+Om7Y48#fQe=E9R;9#(+5r%%9G17Bc|mSV^DYr?R{HTdhV
z+1MBv0dr2}W9yq<*pxgKzR-O-Bc1q3SM?w&=r=#d51`VE(a`^H9Nsgl!X5>_sL*^X
zl;3V)@(G(E=+;br;ZQP$dlHkj*GZ;d9*G@a;&~nAmO4m}Ec_7(Ndp;nR^@|e(_=yL
zaS&Izdkp-~>O$k12(E0i2xK4oFsqH*sO#&kQ16_@q^Y}MvXKIvQk+3Q*aULM?<SAq
zDZ!t#rp76*FzS;g4sczGo+0}o$W4W~Maf*TYXYbj<g>P^!yx7OUtHu+SCP9r0e*ft
ziWANrL`Q2CJas}r+=4@_$*dn+Kgf*w{HB7?bPN)H$e4cVC6v@o;}*@sz+-(DDz7|&
z63uY5?%A7%W|Ho9rxr!7X=ZYr5}~`J5S@R1<D#TMGmV!EAY-IHl-uQ@x6g6Xkv6f0
zecHHY<|SOd=meO3GD9=+54((w<RTjZvNuz{DS8X%8Dsyy#|Rz!rZZXE1fE|W#xw#t
zxVPjy^jotTl?xoPVW&3DvIWf3Mauv52tkVjQq&ppH+TEu#B)EzLA{z96TS3gB4Sw9
z@=EZS7sosL9Au)akINO60et1rmFOBM2YstJR5=h1d7HPR+G%%i+B5?s*-Jp>(0QJ+
zr%|x^Cj!<D3<mj{;UH;`1^xeafJI0TXvH{O#v*a=f^lf9M?b@zG``y32VGM_vH6x8
z=tK>`Ci!vJMBh`t%6y0t`hs3m6-rJb47O3=uz3?vr{{06L#8^a>^KYNy{te*=QzIQ
z#Jpc#z=G@|xf<#B{uSBq<9H-Wn&t}9e&@hGVLI4;PX*_)yKKKlHRu-`;r+Q;XsG86
z-*#@q8EXK`sWZXY&Kl~kFzPdipxJd1PiQCwxy2J<N^Leac*$Wr7pV733BNq=i-tkp
zIh&M)F7!y6dBPW)CyhoE-ym3INgX_U$C@4VD#nTJGf=&~3~rWCcjN(p+F`LE>ivL$
zFR`DWW};%<bvE=wCGI>v5<|Y~LsZiT4lb2^{x=i6kQ72U)rqiOI}wXbPC#2j7A$Vc
zB4)>Go;SS^W&cjZ9dhC$Z#yA0s@8y!#b@T+s{jW5L;Iy<G)s9fQatIk14?u@fW1W&
z*6N=E(V<UFbO@NMN-E8<dAKMd9U7}!c<Z%mTpp9e^iw`Fx0OG6oR1G?cAUn9(KVp+
zdL+~ja^SCToki!#mb`z66YXCug5>o$aQ>yrbpD+R?frLw&gcMf_k|3eA&+F4Pirtl
zrHcRe@(Akxx1HD&gT+A;-8p?$Fm;JDHCm-m_gxkSHT}mtQ-*Sxe*o+H@Hh20edRsN
z$wR-jO}w#u3YMHcjQOuG!V%btPIr#sygP<idFV7s(k=-Jul1n*;C#p}qTIUOG;m3y
z+z~s<)fJ@}KpIQ=l77%`8iBfbPuaOAs;FD5h7v?^r#a;<{{1e#`QsRRbp=Cz2hy+-
zCETDf1A`}BfXD+Av9>{gAG_A0$@oObUOxp~7x*Hd5saQ27qFoQ9IZul;`Z5fOm1a|
zD&r=CBi;AZn)@R8d`NSn=iBc!bW868k2OxB|48DUR1X2V)I#gUYNi!(3Ti)G<tfVy
zS;RPHlvX3pTu0Ay!|f2dM+^UK&_*3eFQN16ZSFXFsNfiOnP>lOW3nX<I5V;Wi`#pn
z?#6R$ZO|ojyh1#lMXN!jtPI*7L{KDD4%VBpnOfaHln>43p7Bbc=A4BmRwrS8?-pX(
z%oChC`$FTCIdJ_QvCl6?kQeVC-m)lCFg0st4d(~rqv#-X8{92+46f%E7kaXn<ZBO|
zP=`aJ5^#i_Eq1Rx#fQ5S<Hh@-u(8%3o}TWBjr;V$$mXKBX<;l(omfD=@Go5bXa>B_
zaKJq3N*;Tt8be0j=aaYP;<W|II5zqsJi0*)gme2r@n;H``_?muN#D7$#v5KbJp!$Q
zlp*nDG@kIZ$L=g6sJ2>#+IqdP_L(yLRtdqb9A9R?Z3L(ftKpLyPoeFp4}5wTu~BB<
z=LzBVp!(DwHlN#%8gVC>O41xC-lhbTKE=`Jrp~M-GKD#;dd(YjQ^kEgoCcjAPndF9
z7V|voz`rC;qx@AIs~<83>`zkeY2{`(ub+>jW(T3;hYf<tWf7##jlm;_m!hL(G&c1P
z=H{2X_)dEp;%m+2L;9wn=iF$n-1Le`!|rhB#p_u@cQ)3?Xv693T4H%9z-K@oY-TxF
ze5snZ#0`Qt!(?c-QHRZeHE4Gx1_zH^h~kL~Fsj*rcJ>7@tUCny=Pn`kGv&#Ab1*^u
zII|7B&%&|=R1*h5%WiLIiCQR}yLSv7m+of<KADiRcrGX$hcNq}lfb{|Av^GZcw>_U
zd^2YR#^$=QAsY)(+W%LD=Vv|0m=nu2`k8{=yUFO;^cU^m$UU|$#S_!ZaB*%JdM>-l
zTl;9C!MTgj#*M+~i8fbGCcn<Ulf-n6MBQQYpmszB)*mW^cRSA$r-<`TyC`(9ItyKK
zeW3GhUx-caMe|z?GwS#*d|43*8ym=LE7}C)`+-I`890e5xZEcO)O$SRPtFBk!%aQ7
zxgr{5N8ShuRk7HvDIBb(ZU>(uzL2)f1YJy@@XountbP7ibSPMeio&lf<yJZ_B@MQ9
zyDGdsRzNq{RlGEUSWcfppnG;2|0edt+K&iobk}OBFA}73Qx@do#8np1op9e{E)mJZ
z$KDyEMchW{-aZyuzZ`+Fq!CNaS2F98HX-{?Df~TmH5z`~4kqM#PET~F@4gk}(JZO5
zeh5x@S&hbpYoW9~6MlpS;ELiksIyp$;lp%j(+XhO{no;Ys&HH)r9ShGH<`EZQFs)U
z2l@{wJGbbL;QCHL<!`5$#53QlK^iJ}jZ?t`)|;qrZ4%Fqpx^zYP>@D>Rp`Wof$2>z
zetpFP(9aRL;zcyv=}Y<Pld0Tj@+GJbQvu}_gP`R^G-A+sTz*X-yJjCFZ%-IF>?4i6
z^BB*csYfgr>esRy0K@7-KvCJjw8#sX@7V_)y^%r)>6OmKKF}rn#itmif#uIj*s!~a
zH*LMk+kV|(gX7&%$HoKPm!C(!j7sn%{i!*23^Y0K<x))>u+PbbZ?~(^e3g`U-P;45
zuT7z4QY9WXrEKY-!Pw8!2%oLDM4ja6+_PT_Hr}p=&h9i^?XwrH=$73y+L1C5<a2L$
z#?Oitq1Hh)H2PYLW}8f~6pOGaSf4#j*nnM+43Hf?jGpn&d5`glc>VP;s485KL1Si<
ze~jL><G!)&KBbtClR=xZS;}_qOyp=(>UQy%xP9n(ZaO3l+cZW%{+DBH-O*5pxDkPl
zE^malgJu%{QidH$&sg{Nqd2gS4N9iJ<V6xS%q=+qHy=ep5M{=?DvbDblT!3dvxDu1
zx!`6TCCEqo7I(&xr+!N}4{DkSy{87FTd$4au{snxjYom}&Tgz9nu;xrnS%X%eYVaZ
z6U{aTV(_UkXzn!|T8zxF>_sl_r@0Ag&$0*MzSuFiQXE3Fg#&3AYM<^hw`T$3kNqhl
zH(@n6O?}GS=ZyyaQ|oz(#7fZr+|3O7#bW+iXK2ulA(qh#=9M1|#?GtZXlF9&Ui8IP
zvyE~0b}0r7%cXuCN0_rF1Eo)su))`o#cn^r9W(C;A=hhpoPiZ4=tuDW)IryBZ!T*&
zF&X^RPvHE$a!8=J`1Na($Rm_FQ~AL{$o7~GO-_eUny*yhDVRW1ngi`F2^H7l)v533
zhLC$L2coDOsNvdkv7_Dq;m7S$7?j_}Tz1ExR?ELU=L*d^mS;ebK{MKe0IV%o4^gj-
zK=z_ZkXU>YBF`Ma;y?+woKS=No~OunQOz0#GTK+<Ow#*dg?Zm_FwjT>n~{Mi(n>W;
zc-RN-Ijlj4-l^y|Cj<4q#p6ixa_nzVMI8>l&^EP`_bfV!QLD6|LAeg&XM3P~UOaUE
zs}I&&GQ<J5Y|yp39MoyPa0sPNyRPXhkbGC`Z42l*LL9>31+0GQO00?7Mzy>TgmSH8
z;B%80YL~@O=a&kK_K|{POn)JE{7r5-MF8W0SDD9|%RK!dF%Pa+pks1ATs^NrIlnnz
zI%hh|K5NSzdj_+*iUjOfW`NSoVkmw~{G6iMtodg$44;>XmhZ#KcM^<w<T*E*B^PeJ
z&%yNP2T&SkC%|j!_vyO^8lKU2y?8roxc!;w<i8h0BI}}?eS_esZYVstS3*46JK}W<
z$paZL1_$!Ts%b8#5RN^@y^sNX9}UK%NheLv<`(zHKxdBvxH&$Ox=GiehjATdKbpxr
zt@^NTa}Vl7imSLj*AbeG)Inb(100<~d4t(Sao4wS9CnI!v%a?2l5tfW`(q)j?=Zpc
z*VHS&kvy>@s7H3@FxWC}31*t6;^VSh^oVo@|7D(_t}_X`$?s)-$rMKPutS&CPnflN
zmRRk*1Y$?eVtZ#q;O>}g^j;hXwMkOq9m~=9V=Zcq+=c$*W@5v_U978h1Jk~@6z^KA
z;g@y9Ue^rbyTau7b#*1${=Uod56~TK*i2M6&gDiC6ZnyW(Rg`<0&6S7+5HhwG`r6?
zYZ=y`xhYYv^ix~b&|@2PbhNP`x_c*#@J83EF{tQYE_~Y@4*OltL(|9r$goZ0*3mL-
z$zkHS(+I2AFU9V4gW$}cJnX7G!gLSFLE<`;sgBJ7vHEJ1o0JM(S0nKA<Vq~w1fctV
z6$F*<W!8C9nL_QHc=x}0DEcSJ^x5r^xIKC#Y92j{lf6l^D(w=~RFb*+hn=9}mPX9~
z^Sq<BLEP|X1Uxvt7MGKM&hxej_ng<kQ(~9#!onP^cci`sr%bM9+8d-j-Eh1^9{%|d
z0q>&oXw`^;=1=~Zl#oJqX@R-94h5}9f?|1%gz_qL-nEf9xE~_XL23c&uVzE+7i<0q
z{UKJ#gMU9c4E6UFGRsvLz)If>q{PBhx_cH{?%PB9pW`U6`Hvgl*a6nDFND#g;pCey
z0`v8L5ZqmY@&Whh{!;@R-KxMMf_Atv;;ekBW|>DXqDYu*_WD&G*vYL>VsF9*nPs8l
z*d1ci^WALwM0<2<--I}aJo9o}fbzbeThhrj|4xQEn`0q1JA!yk#2I?Gh4Lv!AmR^c
zT#F8KUC$UGL>5e#JQ<zVx=}}1GjA-r%m>8%g%Jg(K*_%pN*f}{o9k=Vt)zz;!*+q>
z!)>OivmZmIogv@oYG~Cv#nSADp~k@^kS*&?ogy@I+8l=s`)Ti<dXaZst^?;x%Ar>8
zhWbnb5x6Cx=Qdle7n_7W&FA4|;vCf0sD&<ib~W}=hJQ9W<EyJ1P%y}aA#2RA#UO>5
zE{zANbrsWE-v=U8l<;==5;R;J!F21sbFBwzAe)>hlot47lm~euPNWLTV<{ut82~cB
zN9<!1hb)gvh@vk%czqFArK^%pyC<Ay8d#d)kBy{NO2T&ulDs2elrCePPQ5@PS<aQO
zr!#4X3ztNEVHXP$P?|Ul3~f}gz#<lmi$`Pmv@uX?X9B)O#Ce}_isAbr)@)zSNl?K!
z@o`L<<}Zj0{+Kl^-UylDrC@x#lD+%o3)%y=VcLm}sFo@LT<Hh#{~bXmwY{LT^(Ql)
zmQ3HNL@sN|VnO3gq5bJQUf+<#tU`+6;KE}VtjNdyYXcCrS%T<q-HL0D+wuONcxX8E
zLaa0{n4cP+ijEe})Lmc=4vG0_q4tC|yj5cZe3qh->1A=tg~L#|I1>LlkcN71C!^8O
zC`?lgB>vzfv10555ZQlVG7HN2>Z$XV&nF-wWEM!4t`vNR(2eqTASkwPg7z`hZ1}@V
z*mHCMntRq@z1|w;pl1$B^j$FOn<%z_^@8)+wpi5|g-*A!$jf=0x(NGW_5|`5SqBRm
zU2ADaP=)UOXUH#dftzhMLl5(O7FA~iv6C<0^sY=WNTT<ce=`$WPeO!zJ37XP!j}cP
z5VC0@>1!RtsSm*Z3oD`O?=tGPeZ~CpqTuL3Z^XT)sf(r<hdBtuuydsQ%VfN?av~k6
z<FL<xSn&F`8l9d}Cz)X|csF0++0?b-w{R7T%<9;OTfz8_YhwQN`B3GifksgwtY)r1
zHroZU31bW~#N-Nlt&;<i&R1;I-4j?`kKD=h44Pk|3|~<+bgw)LuJdRP@^M4sv107(
zRR;TM5A}R^fhR@wqYOzQm(3?HUV0WdMl5D6gRb)S^h%URdV&6zMO+kMB@|vShIiVj
zU^>hn@>}+!ZeR*^p<d#3gNZTH`xF=5+s6!^mw>EbBG0e-!!x!og*no*pisIfjG}o<
z<D?<?xOa_rPV{Bcf1jAGOwT}%e=R^VJ<BX*x&|2atmjYm<)F92PLQrsBkx@SINU2n
z!LAbj6V9TyUMP3aRR?1O@`3(ZC(MkTO+NpzZ0l7$oV+j)Tdb0>>zFQNxV`1Y`2t*2
zli|m<OuFkO!=ny*Kga(kgvZvPB>tqJ7=Ko1T6K)I9d`%2437Gl^8uDEg6jj^r~_vS
z#6Fo0UL(ptl#*Q5m|o93LLRfZ8LC*anRrjZ%lR*pDd_fi2h+LVpS>TakJcIYSqSq3
z@BRWz@r;0|A;f?UK8bA|8<_FidY-(|2X&%9iN7C6M(1W1>PS!H7E)K%A)M!3+cq=t
zsYU4j_5e1MFQ8%WKqy~g2v^S}L8ne{%!oP0?)IQgtWg)B&!lP?KaTieZN%j=oy%^P
zlh?1Rg-`ucNgY^`*ke-&nhtx)S~h#|meS{9?|!>Lq1Q@Y#_9O4emSbOh<JJrDH_-&
zqS}(LVx6P2gyOfcTxYni@J&WqMIQ&;x#lR2|1t>2tnovwoD<A@kqd0R=?i3qg_|~e
z(A;Mm`pX)*LPCD}J><iWuz{}fYpkutnkgCuP*Pb1s|TM&Ts{o61P#*vqrvZ640M`t
z^a}_Fr`>xXO1=bIRF{(;JEp?<^Bl0gtj`@?c0tFIF!o7E#ICFPkX<r>_o$*<OzdMO
z8c<#FuWJc)5)|N<UZ+uBX3Y{Dj`Hl&-<j^lPuw`OA5JyV!8XlB^o$HcbrU)F95Iz;
zen^04k-=yLi7@OUaRpC}XWb2f05x@J8C{IWGHmg}9cOHNGJ)&JKL`_+67N^&5GwyJ
zgI*`}P?lf>%3l<08sCp!l!L+i`V>$tIfl&<FS+%judJjb2mR3uV&3|q=+kh1Ve(1T
z+I<0YXBFd<|3(uNhj^$1Y@s>q6!e`Ih>9WI%)>C3HF=DNovUV{k#smWEp_LiGj^fw
z^;ytodJ<|aP;iqzbmQ2Q&PV!A#QL{1-^>^erVr2Zg45C1PM`nsj`5&H9Q$UG0jPTA
zQvTK+d)daLmi7aN!D*nUSb)o4Av&%#6)U^;g%G8S*kq;&iWi5)2D0UN^AmYLj;#Ub
zP4?_`>Mjhn(Z~9+8(_$^7%;>IkW*HOA3vS}|CR#CeN_hz9tm(|+Zg(8kRM)q6ttO)
zf`6y%LwmL5eE;hj$PS4@{jPZEpIwETnrEmFCqz)`f1iJDNyX*EbI_+Sgm}^GfwpyG
zxz54+TPt9YN)l?n6BD;)i<xzXE5A-1gpTH)c&B|e=d?db+WdIy%UW#N;>P>?o<Wy?
z4nco<);toW!seU9Fe>8+c~k<+SCBU1QI(F?rd5K!P)lBgUAVbx9V+zm#UXii#95xr
z+#D%C@&ci~G8@9r&`t5_BL+JEa*6XvvzjMmXs3~njxR?FIg`d>kaB;HBYNVUzXsr<
zUykVZWidC2kH^<bV!&q5Meuf7MBYmyupTgkZKsZ!jQ2p^GJvor9Gp~cbJ5c>mfdDR
zto*BjM)`VZG+P2F)&!4)0cf(}IO!EnnW)ABn${Fy%0Uqi6MCTYu?Vob?GM&Q+hE+7
z85os7*{7U27#2>mUf2VsW3^lGtR;SOaURpYrG&NvByi07EdCr<3eM)Eu;AGx=onlj
zc9^yT>|08ib4&ux9AJvZ!$V<XKsF9{UyhO)TR`n+KDa(`#INpkm@uf6O}MFr#b=je
zNKqPVqkEyem9+h&GSK?l3M?L0vkvh#a8pmi(p#tD?LaeZ-a7%MTmPv@nEQtZC7W>F
zu`ie+tAzGRA2UJt8?#21p_c9~W_l)=dnJ<JF?t@8sipE1@?%JDR14}({$P|R2#TK(
zSUaH?h+Kb~stu6iu)H`(dF>2x_Z_5T9TV5TCq5T>uKs_^hz(x{PGisDnHjY>eQqB4
zAs3*A{RMEaDQD$-Buu+sJ{I~GQTOi|P&@ga#bpMf>UFyNU)hPZ3T-gHc>oNp!?<&p
z7#iABvE!duL8|?qS=8(}2-@leMHSZg|GR#h(P=O~=L+phXoo8q2G;Y;@V_3p7&pla
zv%B8&cG7}%qWcQ!I}U;4UPJ!v-!h!LAq#c>&J=gWW<kzf>MPw6&$R5%Fs}{K<o{m;
zZVxx}>_|qpPvW<>#Ujzpxy$GC&~e|FW`W;KCnr?gl%0)|hmXbbCDwd>7kR6OoWX%-
z^0E44Jo-gsppr=l$}|&k_H9+_;hc%PeTJi_T_`{7mxz~7p28jP&Y@U+DEeD0!j9dc
zVp|r(r2Q(yl07mARXv9qk1QeM@Oc>7WQaQthl7W!7L4+mfX*{tbLm4hE=@Q^x2?a$
zO?q3%&*R1d@^Y{+sS+F(k^jjc91BBEqna-HFb4#4$C4N*Fuep-mPb+8OFfk5FNzg$
z7tNYIZDIS06~sU;#Ei+t|L2YIrUP}na=>!bN+y2K%<X9SeL4BOX255)WHg#I143TL
zup#MHxbL9_>PQ=TgQ<dxMrD^f&Y292<n4EHD`&}l>An5I54NPo5QjTmT)b{J%-_ER
zQw~26oW@<@qBUOPY^U)M6nYeOw<NLWH_P!b&BLOxYfZfdW<bNQf$Z*PeJl(UAaWJ;
zM2=5KUH7Z3Yg0J;)7}doo!x`oT?wexHy_%AB_R4Mi8-8(fpWd|+<%8I=!xTS%eM>E
zakL$y9&QJRTorJ&xqzBALHOgi8d}R|fr?8gq>Sxk@}*<hU+)j18tLQG35NwYmD8-Y
zxs>;p=cD$GeyDamg@px%fNpO)@IPG)&cY%#ET<Y;Uanwr-51<zMlhtjsu6d`Yhmib
zFtpa(#*_B+L)p!R-2Pw%-S<dWatJ{6_%MiiA;njf)VJH*DDH9!!i`H*uvqTT74F-F
zs84-xSamvBoo`{W<#n)JJpdh_*$G7t0x{8>-Y1X8FgMGKBzaEYV?4{KcWxp3II|3u
z_enuI=`Yfq)S!M@4;a&3jo%){LwPVgZ<eIM-YM(Q!Tuz-JUs=Srz^3^{x0ZzEt;7I
z`S5$KI;d(<3!@$OqID_tKR1?u;Sm8fY@+!v@_NQTUI%Ha)3DdIb7<Y$m5cTsLStn=
zNYR}s%zH41c;dsjyGa!;w~ImfY(%Y3#h~kQ5M&*}T#0hix>uqhBm68B4|f97yNs24
z9mJN1P<8}QW7n!C=D+haWs;)NBBc~WjX%xwKPb5Fgq~12o5OYEMc{mXA*gir#+1$*
zTxaKVvC)}m9GN{9i-p<bbvTEuv+LQ^;raMw?QvZ9NeZ(D48@O_559Yn@riO3_6ru%
z=Vl=q#jnPv^Q73Qd;q@n48k{g2XJsNx{an}qcrjq%-c|h&12?5Sz-?AURDO>djb#n
zL|vxhe|Xy>S2*%!H%84(fkoX4(7TPa_XT44vJP%Aqze3fEg{y*1$uhSLn$+3?FSpV
zd|@0&UucW%C;72J@^u$y4~8!N4mN#RAf~NPBhKdlel>%9OB3SIDCq)-56Qrf&B1x$
zo_IL&Z%ir567P!&#*W{qV$1zQQR8PkM-B25*~W6)$FxJMuYwm3q@=SY!^>(3DwfR^
zOe@sDNv)KdUVX(Jtxbd{-z(8W^DB27Kw7;t8f=%>GtIfvuxM8S-E_vH4Thqz>KxLn
zN?_L+>U4g$R){VoZFuJkt`T+)ulH3a-MA05=()1;Y5lNY%|x_f-tewI6++Z*^ZGw}
zOp*V_thkl>H`Z)~{ON)GQpRr7B)^{WwK}eEN4}`D$nx1F>?~aZh7XlMCv%**yZ0Zi
zBHsuuk_MKOn~ZLk)mVxn@uf5a;o$6AY&yFby0%d+bM1NXP_}_v4~<YVWD;}wPYJZF
zGGLCECfdxVUa)<RAXPfRn{C^8N0qN2o0}yz7({<(y_z7BRC6PlCbwAooJXD5j`^*n
z=-&Sf7#R^a5Bh-7#TQ&*f5}WXa|&B9!I(Ne6Pd$uANYMh8-Kmyn0dw*thJ2TFXf5o
z_Q?jF(<yUmM@+(k@mw|{QLyf|1p7fW@9%xZ?-b{vG<&U?Mdon+OLHdLMJlj-L>xE%
zQi`Gm_cD9R(~Bk>RIGVeh^B|5&}FJATEzHsjYp%v;L2+3=$Zx}p6$e?+9z?WS2SLA
zOed!9u36=aC2(C_j9xER;gyHU_;b=^lqo6T+<q@Ce&B~)Px9~=-O!1u!adWLv5rxT
z7*x8Vsq1E#s~C;rg931w=LF2;#438G&05;(z`J}eZ0eVauh%Ex!16KZFya#rwuyu0
z$RzG~WUXMMO+WVxe;jyuI%=pO_+43p*AHy~lLy5hk_DAU9WjFJr2b6#(gHBuK8s1)
z;>^r*CPR>_I;>q`kIO9#aeBifJiK&14lkICEKQ8~D-?`gEAf`=LBitb3JiL>2Gq+B
z;JORMfBvS==U9|MN=Xz%(K}J6RU~$MyMk*!Ifa>eML2HN7;FmlVSj2upngn0u6$!J
zx8HmeLb7c^<g6)f$-2mQB~fSO^<*wY<bdeSEi=!@SJ;;FIB<*d1h+;5?hy5dXML(b
z%?t%tzgfdJ2*KEDNF2I{Wvr)x95j}fa;x!IxmET7@*8w;`DYs*I)!@DVykJd=?UNO
zx?s4`GUE2>gLg$a%-Av&b*IWvW4<{HZoY_y%}b!O>?wEBsRr$Dr_el?^t1b?d0m<>
zKK+WAw_Fcf57j~Y$KLRdz9AmnIvP9b7lG=Hi;(ds9edVfLjR4$C=H!ZAzCyW+9xc;
z*f*Wb=}J8PtQVQoXSLYA%MH4`%NZU`h0dMCISRDp-jp@yxPOYbrgU?&;lxJnIw4p;
zUj+?cD}-(#hvf&H=6QDyqsPsO;ILvY)T~SbT~7{fIiBp)J@Q)Wn3*(3(d-pCfLnhm
zg`XKGpvx@-zI{CpeJlm2$ce_CXTw0{K_qK)ECT0#Iy_|uy}LUfRyamP@Xy6gSQ~Yo
zW@hSzq&=yri+oEFwmd;6g_~AeKxg7PILj(fB3dZ64oMK}Pgmm_Lys`iRY$pZZXyrb
zt_vZD_e0x1UOay27;L-P$V#^*LQ`mO-sQOyyptoLdGbV<6%dco_p0DFqAy4{%&Smb
z5(HJ(FqlDE9+|fld{nK#h^*e|Q0D?lx+m~eMk&fn8d=4QQ<&10gthwuz+*}svmN<_
z%i3JPbj*HqnUVxP?zy101%<}>k9gPX5wy$hXKM3`x#`ya{M0;-O=A<Fjt7GJQX6<0
zl@3+JVQZ|9g~n&G;J!@``S+UGm#~BQanxB%aWrR=+-;cu&tB+UQOgo8@1T3{V32-i
zLXBuWHr*V@-`!3?=Y{LxH+2*RF7XF7&kNwKp2XtLSHlTw;$<q0;g08D@GV_o`1RWe
z)XDmrH|*nJW4aFC#l*rB`EgVz*@$C}%i!3uQ`lUs3i7_ufQe2JqH&)EHlG9kL;b-q
z!%%EpP8pcA7+5wV8T~IC!%ua3&x|A1sTy@=&%eT^x2MARYK}3pCD@XxD?U~ojA^Gc
z(LUOo8EznN%*i@%r+4ZEn=q8o-!tC20z}{cW*sZ6a9WQ<T$VvShu2qOHcQ}7XUBlG
z8wdUEKUnRtaL{-a!h(W^@Vah#W?Yh%XX;hJ4pn1RzgWT(w&*}cw2U|J&t=(RU-?CO
zIi?(4C!~;;k4;Z`e)3s7R5YEk?YqoUT*H_}LM5wpDF?`X%3p7qjqcA8T3RM>4e<mf
zN=y^1FRf$J?Hiy?Hib!N#<8?wn#HEp3hfSk!Dmt$l)qjFwQuKu&evpCeylf)_@Ra!
zTb{Gpqw7HQsl-fsTM&GlFUOl($v?jSEgu(@j<3%aLp^bun%gT`_LZIBKIlBOlo*Ix
z9!*Elt~S%c^8x7n<`$Pyx2@l-K=3X+h*7S2OlfE^ygZVIx}$&d_R$jX)~I4?u4_oE
zNX84hsH1wf7OL1-P=44BW)~o4Yc9h<uX9mtJ~0|z6r!Ye3XF)~id}0eAi>NCW;k8I
zAs?x4et9u8b${dQ^DbfBL|<axQm5{_O{Ae^qvK~Ox}4e#w(~2P+i?^ZEz-qZVN=mj
zIuNejNratgDY%HTQelfug8!mm=y*<<zNzOhcD@mltuGN2gK{cnMS5XR6Y6ODRK#Un
zUzyfCF~GRZ;AX3d21kp)_?0K-$4rId6^YDxzL;w({=()w0}z?5;?fH~Lglw~P`o-L
zG!GJi%7kjB(^V@x9g$58-A&N)pC_NLa}?F493tj&1E2EOY50_2iLWnQgrJB2&%5;F
zvC{<1ems#mk8=jaxp~B|iL1!FY=q964na`gC0xa~5U%u|L;VT&x&5XRkh<+QQ+qm*
z<^NTQ+r}J2(cjd8khB;E4+ua-h7D*;?O>LM+i=~~epvh64_ZWO__{6<)SelE&ZpB{
zdgDQb(gs&($kn3mf;)oSkl#$YI7j$s7Y<&!J5gmG@$`D=LHF^U5T$JgF`7NdvppO%
zW|Y7%uM$X!i4>;=97Bn6Tg8V0SB#p3Xmq*&G(zL~hgMDWuK&Z`9>u}dWjn|Nsfmv5
z7Oc#{4u_haNB51o=sfoztFe^eb=6E%rHs7!e_~Jxh-Cf_8K9eV7(S3z>uK);BQG0b
z)YWyw441*v%kk(qX0hNa-GhDat-{(B;mq1`4k)<5#+3S_EP5C-di<MR?RgB!SFU4i
z+oE~r-wZ4kF<!3!jjh<=jq2wW&}qFG#M{o$|ECn(NBg1l;x4B6ErX2lf5DukdvM-r
z9W?sn24yemP`30an7;UjS>HRwtp_I3=iCg^=9fU4z8)rQBA?)hhZQR}S>hkPRD_-P
z`584g)X5!%Ru@ZP$KML#nvA4Q+j#C>+`-#Rb6CfjXrXg`D|d@N0uD8WsAKtt4M>`S
z+0nh2k;zA{G3_Atu6;xq+8=C6X$X8-?})J(lbBn-oshrtGk2Oe6>9ITLs6F{>yy2k
zc!~NT&9-C8J#<mOp9}kQ_ZUVkRROgu>X6--$-IdbCD(NTmBGhYc6eW&f4`h{&YuXL
zSz2(XAQ-YAPX^ih<KXlvjCgW%`*59sk|S;6lp1Yu+tw7w4~ho=kpH;Uv5hqTYLI8@
zL-v}xtmBDPSfjF(x+!%9d0rJ0UrWFV^vrJ74(4+T5ITLvQ7^d_PbiB)$5riO522QL
z4eJG+>CeQLsWs5?u}sLgOup{vnu1Hq60m-o^goWyJs`&IZNnoWl_ar42uUb~q-H%4
zLa2n0gd_<eDMFaeXH80_gQoLzPNk-IJv7ynkhIgbLkJ;u2qA=T{l35aM{0WCwVvm`
zuZthSD@R-I72j=o9$wKrj*k1?nXl$LU3I4nzXm42C&L;rkF(&*d-(&;Dall|V=ly9
zW<J93qkQ#(Qex644;p>-g#GTAmYO#4jml!EnzjIYAGH(BAs6X;Q5m?_g`-P<Bkk%~
z2$GdfP<N*k_fB9%)axSF0oYBO!e3J-F>v;0;}~z(3&tK`Im}r#T>IYb;C6Ka^`8)p
zo^|oqHB6uDihV$Tu=6BDjRxDGr{pcGYL#AX#KK=w*?sRO$)4DNjbDah;-B%*KCT81
zM5<!1LISQHwt;!y&O=-55uEsD7kUi@LTSq;EWL7@Xl)%$CC2CQ{tDLV%JRX%oNeH{
zTSl9buhQSkbI@vD1+^bHn;v+&8*h!4!$fT#kcE1qiEb9CS65MgpC~jp@8f)jn1fK;
zpIUZ&CGB(OgU`G}%z-T?IOZCD-OL#4UEZK+S5F?k%Lk{)2T0)&0p;DVG=1J1iG#o7
z(6(;@Ju>4GCM->2o~b{ac(#N{Pw(b-j!wcy`}d%R+hf+3;Nz>!u`qCHJZL+{qVlj3
zy!6`%Ct95cGQJpx?6AX>oSiV*s}4P0grU=k7#x0$u?xLDKxI}c1TSQai<h5h(0hVB
zd2yBA(P_|B(?l(fn1bQZ9OC%B9L(!lXz$@yw5f0{={hx++7)>)W|Nr8nbp92Eq3SI
z!}eaD7pY-QDVlW8q;4kRSiB<#x>T>D-tlPo$9pwqeNAEf9yz|ox-;n77zZ7d6yhv1
znD-zRTf9>k({L8HI4k1z*`+Ab^(6^D>lh1qHt=$iOb=YlMuF=lNJ^4|hwma<X1xT#
zSA;$PEdx9LUm)Lc8Py}gFnU-jj=B(ps(;jp|0u@!^w&bQK6WP1eMwpmT!fwX@}V>8
z6199E3(|$-7~3cc?29<C@nqZ(L)N>V%tzKWM2{aQ;eV{k))p)1T{a5;7|+It)$15P
zON;8RQh_YpnQ)&bgX5_gpfP?7OwfwO0hYs<ERMx9rEFI`t{zHt*w0&dh)9BEm@_RO
z71mV)Tq&kTif6&`oDBBOScctSios)OJoGBe0>js<P<LW8>Ay7xwV(T8^ND(D-0ufg
zyE(RNj>J>_j7KyxkXz}y3omG;qQ~SCqT9Ng#`!eB9NmLBv)z{M!WM#Xn<gE0I1SHE
z%0+!$Cp@r_o%Pnag1KE}weI&tAn3bAY?Ub5nI41Ar*>dl;m<eMoB&zb4ba}7Lcf3Z
zN1yShz<BH^)V$M8l}lD({oDe!3l)IxcuR;}z`6~e7Ng{G1QkYSlWnu3@d@t~hH3;s
z-^gRM%jGw7mG7tb6pPSnkmdKEQS2<)2CI~=;=SD2Xpx!=CPFzXF-^o2)|XY*iK9-(
zJ2=gUe&BpD6~8!+##&oHRQpQ|3ifR@UJ?)HE3KfkekhrK$_;HFBy+8Jn*FRqMEGMS
zh2JBXi$@QH3O6{B%mQMEGM8m%Jv2>xOHwx8ry5SJ#P)G3nctI+weId{?mmGtShfJq
z2PeU)+EB)Ax`NVwt4swM$<+hVdPJSMj^$5h;lQ70#<~wAlPCU#^Or1SdH?U^{jWR_
z-S_5754<Iw=h%ExaXhFRFQC_rD&b~&JbE;00yiiGyZH;Cp0V7#mXt$JxhL*up21jK
z*_d%C6zoqaL94R{vA-HkOs>2m3hTF!EPv+Ilbc9w=`QF#CBPAnozU|^A*k1EN6`Zt
zFfn>e1iU^|-V1Mb#;%0-SM4$OLm|s;{h=Ka<^{^S4ubb<X=!3Pl=gfiPgw5ZlYn4f
zwu~Ao9s(o5U1FGZ3Y?-1iAZ#lUoku#4_;r2iC;$2rYHJTaq}XyI9NyQx8&fHl{2ur
zh;{MqjKztfaOP(SAclq3^j3Ke<oFbWc<3Q?UlstZm)C<+$$GwU?Ja8mZyy;<JOe&|
z7J*ddd-adXEDV)n&Z|+$h$iQ7ciJMHIza^;@702j+6X#%zApaoh`@~FO4zM528stX
zu<Tk1@>XtR4)W!+sbd`QtZGd>%r=myB|FiF$J`hh*+klLnLG2qmECdbs2ndDKkjNo
z&!)@RyR4s{x>$x?u2*T-g)96@xlB-<vjjB%k%RYZYe0PK1_c|IFMWQE>?}{k@E3((
zU}uP-Rbepup9$z4u8o$7RkYB3Dg2#og&sqY>+)nhjsD1LU+0-%b<`gt4cDXIpDtp%
z<`ZAx%5W0aGaEz4vkU?6G<cdV!%E7!xILEDJ+~7<M_q~<9|>bJ*>X$Y*y4a*KkeUD
z1-kC~V6o>6n3rksW%Zdb+^Pf<(wDG~mnlq4iv&-`>Uj8l0%Cat(fptS7H2hJ;_wXU
zGFw7b&CEbS<vZd1$m80>Mqq|i06r5ALe}0L@Leq#U56xr;iOBP&&X>;$R9?8o@0Qw
zaRuMQcN62UTanhpd!%dZIx;iR1XbnBI6bR2(o_&fWqx9M|H@QcD-8wFij}n2&=xWd
zT!T4}Ss!rJV>)HE66SwV#a0qQUsf?!+HeW3^1cqvIf)?qBL)kF3)E`p4*206fOb`h
zjNNgJEG=ZLD)VYn?Ir7>CHx8o%w=<ZuP@bln{2^;jtcWUI+EHtFVs$}fy4*%K*J>*
zMOX4b!61{k4J##GmTUN;BoC_Fa~5wIv%K|!5wPWeA843{pg04;sI>$HbNs3;L+q)M
zKFix2W?ia&2^}B57k&LrVP8QJ!nq$5ccejJwg|@i$AgB$UfMCBPI|Q061R_&>Gf+@
zL34ZwcG+Cw!%!JWJ{_W`FWX>Ze=X6OH5x}LoX0~BPN=(z<y&S1BG37|Nq9{j=uA~Y
z;b>c!yY3=mG3+xnlS$B|<py;$`<J%P4C36ZClT4|v!JzM5fLX|B5i$2*pSZdys|LP
zxz7*WVlTt59+oW{>&)4$_khsr3#r?76_7j<z{!uBvFqs}zLrlC%3JX9Y~@_k{P=?&
zo5*-@iaTL?p&7pSx&)^ljzAt`i+2oPMHhulLML?K_ZoE43*&=f%`Zph0WddxXX=A)
za?hxi?k;{3KLMUcW-({)MpMzrm&7CSU%us%43t@ILW2bx(Ycd#MDn^N>bKOPsXhbs
z9t{V<i#bHHYZ$t>aD=}q6?wxO;rUC}n^k0czCB4W=s5utq!X!7b_iOV!a(Hh06P~m
z2F8sotmoNg`u+7f%v!h>EUfFWH=OwwA2xz5^(1y_@faB$NqD305{tz*iOH%%EFXH8
zCXStjatE`~#s4n-{e{h`Z&wpVmfujAJ_+VDb112%q-jVTWQ-_>cK1b~{YMjoy_9o|
zUI-qaC(s3}2nJ5LO5K|c(8I2rdoqsVi!3vglm}2jx*j-A4<>y|owTq(2}W3GqyOj&
z`0DKrRK52X?eLAH7TTkrg1!5tf$^9!cMKTLHNmP<G5+>H4_=9m*ni|bJv%!ZHl61%
zzc&pp5D}XTcMz-TvDoACfo1q?u{bIk>~BY)N!u~#*|q~PA(eEcPbFSEjG&@%0esuO
z0af;HM3cA55XE@-6Mm&(sm4(nTX>1_q%6Q;e*wM;*2j!Dx|meX?&CV!*g5SMsW8sL
ztW%f4zp5NJr2K^<%?+H7buJtk-GHy^)?!C;ATd0<6f9De(0=Vva+}-A9P*8vW#40}
zYuQOFqlB>fTLE^uMNs!IF?d}2GFBQ~g?Ysn7_%~ye%%}brByqjbNCi`d?N-s{6jEu
z3+w*`8Ii2fc_h*%7Yz0sL7vN_D(&5wWZ?XHVqW)*Q+U3bouOXPcDw(m$LmhMQ&JeU
zw^>Ac7X@Lj(<R_V>f^mzLiF(K;|sbsLZ5+{ijK9=>17-=(J|C2dI~7~Hx}Hd8eron
z0c^V}!`Y#gXsPW1_C0fm(W6Y1|9KF@Mpxs2IEJWp-sSpe24}VRF&VFthGvV3aQL_B
zm@t1G!noBSjnCy&*9lSL!7^gK^J&K>2Pm3ffbG>GaJ|6~6iz*(+louj?U5Gv`m+2?
ziz3W<H3xgq8v2LD!IpC(aA5NvLnUwQl5?ONA1udqb~iPD%Y&iLd$8-S3OF$i{rk{d
zIKSiym`v6oc7ANPzDvsQ(w|2P$H{?QZ2+eJxPrwy*_=*sKeoQE;b(14!K^U}MDuS?
zP|!UNt!pfy@Ag+}yK*n|dCB4QBYrUbZa&K8E3nLMH<vhDov8ksz;$Q~iIb-h|9ig}
z?YFhj;Nn!2&<*s*O!nEZSw}eiLs+)i7E?~vg79fR?CTH3u6-G(EMGu*sTXmOv2A=S
zOQ2(^8kA0Ez1~_2(5m7%!S4_n*ww+DzEYNVlcJNbh6Gyiz)*1`(K{o7uCXO(|KlU!
z&C%fVghNddLoZ;|b1`=2{-)~xtwxcJKD-RDMyp-DbViK{@=PY18iyXlHpe5Z7Z3sA
zp0!|mwvrA=(?G%fG2uy$aIFe^sBHXF2t7A~HkbIpb)B8)r1+b7*(Ok*A1nt{$6SsR
zSnuq(kg~i0CrV^~fPH&lz+??|tXvNr--pA%hk8<{oXY0h@=)v=1;U07zUnpuFdyqm
zmY=Y|*^6y)N9!i+Ik*Or^46e5(IJ-6l=0^k6+wE%I#jpV4Zfw<u+riRSewRRw5gPF
zH>Pm5aeIhuL?n42V6zlsQ(`$Wj`*FA!`AFmc$~f0T^e^_(-Y?H3EK*yWzNtk%7XTo
z2r%5e4x&=`APkq0Q(-JavZof@N4SA_@l+aTeT}gwbopBhST;PZi1zL|1k1ZhaQ33*
zXgKNxX%DJElMz2iZ`wPW>7$1Bn*Px8w<4}`8iU6tF-PCH#aKro;nur!sOT9DSt0Xa
z-aFQJtr!pX`}3i&Y!|-Olz@2p7~-~cfMkv+z|B#c&~71P@W~&<T2Dv(kX(z)AEZN-
z^%;y*V%)U!DYVl{45FVUjExwK!#3!mq5pc?=VnK)PdkVOW9#wPhO5{#c?4Pc&p|{7
zF{HRL4&9k<;+=1eywiKBY|Ah(pOry7J|4gh<&(?_up6YK-cq4z7V;M7gF*x2LEpH=
zdCX`)i?_2OOKgRGB|$_a5OF-&7e0SP5UhDnkMG-SAo+_OdfE8EE_QE}Rp&xS_#*gt
z-y6-F`JDNJZhoR_KFD6EB9C{!T77ILRoz>`eL1Ouy21PZ-<2i!_=_|8*-tY^6k?D1
zb>h1w9CWW~(3R(<sP-}+GkyxNLo^Et(}%&a<;hsRdp1u07>E;I+OxBYE-ce`!=ph(
zXxj-y$ukB^&+ny$k8`17!#oVtwME&^OW=}yowQEqB~BG%$Q2WR)XZ@OcZFIaUMYd`
zo04#Ru?D)Uus)>%Ql9Z_;vmY!<?qI#)(Ri4Q;+4eGaC3U>H+BfIULo6p=d`IL6&GD
z?EO3$-M84FThMV7=Ig-U@_QKb>Ij+Qz_QS>j?~<xf~x$@&OAZmiRXp_aPCNF%qk-=
z%4s6Poq1eaChM%dn}MwXzlgZp2omdqz(HCLN!@?3yv9AMr^o}(lN9t$od%0ZO;lu8
z2c>^M;a5h5qO>U+n<QUJPsC%Y?(;Vp$NX~lM)FZ(%N5l9ewR3nw&mOB$3y!`GZ2OB
zC#Tqqxb=((eG<Hh<;M+VqS<+L)E6@M?IE}>mju~{d(a~)o;irKXyZy5nCG%Qg4`C;
zwO~2vTB$%Dl<vf?-F)aACM2a(e-Y8bc)mi32FU+sgOaFUB=%DY2Kv;1Vq+PUzFLLL
zPby(kn1BdQ<PaaZbk@seT=KsnXjj=OE=yAZ;;imbkDfx9*B=YoYpSRT%XYOGrD9v^
zc@Qhv5HaKGbU9CeNVYdB&q~CKKQ}06nZ>ecvoLVgIT-6)j#_)v(R{^Pz9c4`W_cSy
zO3^MbHQ0f5N26hCx)?W>20)Bw1fF>)Mq3Eue`7nQKHe$_CaGvH9||+xuxD`C61pvZ
zBRchOCHE~Oz++Q9XBVu1XH)#qP2(`|mhXm6rFXOi52AEuv1#STIT*9d4P`yCpq7^m
z!(U#3wyP5C@X91U6R*H!uV_er<bV=`MzsHP2IP&@(PPm~zSGT>;32w4G%emxQI8H^
zb5b%4q@>X<>1a?lj;Efj^$_T*z}WNhe6g7I>W}9zU!8!sREnTYdmjoe8gUlW?7+}v
z2sIo@@XYLFwzG}If}t01+RJLZ&+<BR>ijSuFBO$_lAw86G4wRlVbl=Bx;qy@SEn8|
zJA$D(`ZyRpt06K!Kjsmc1><`%pmAs^^nPm~x16t|`vMv5tqCHMEn*@)x|fLBmJy2u
zGofohm7bWs99w;p=+RB4_<kZEbM_{puG<$X@H$O21njJ!8p8kYQ7QTuw^Pe(8@j+F
z6tog|aJ(2BNUDwo^`hMvcmE-^Rh~wChK*!-5MOYJ$YpLccIOi`5{0b-`ZPd_Qmrqf
za2Cs%J6hnt-Uw>{)Sig^rD$Zt=7W~8;AMkUqSHt%Ng7ULz6<*kh}x%bk-=qk)V^De
zU{V^j-O6T5&xLp!a?xi5%N!)0W*jnqC=Ox%!94~r>3%Mj{xg+yMjU|YV#edD+Q}E*
zYM{CaQ|J?=MrdT+HHk$${dS9w!<j2wZYB>)_g?~AqXNc?oNrp#(1`8b4Itg42DYcl
ziI$=vr+RP{4yPjU2@s(9jdB#eUJP2ep9++=a1)n!p}Aur(Y*JNI;9k#%aQZYXjX};
zE`yv&vVhG#w4gC26wTiif&GCu<jHY1U+dUP73PJ4y24V}dU-vX2mhe<84t-z#a*bn
zVLrI}IN$@dTAWe20gs0us{e@t-b!Px^~W_@7~%uGai<_R{yKj0O@zLTJKWozwWwe@
z2L@eMq0?$5GJI1E=06jHsI8ZDhOwTrQWR==oPe_CNc=iPgx^~1Q5eE{<?r`H)$KZz
zIk7&$xje@Fccf!)tU~*uY1IF}Txen$;M!*^(M`*M+D)r~)gRdV;#Lx0^64<K-EGD4
zec5RAx`mwdT!cRDr$CZx1S0+4#5P(?BY7><z3M-z^Pd1dit@0vQI+2Mm+=cHTq1&}
z!%a^tb4L4{4+#IB0Hqm(UQaED743{addUv69*2TnO#_JbM!=?xAvo=tCW?biKw$Hb
zsPC#rlL59zjJL=2j#5-w6@>FAGv@jf=4@>~Nlb1klggR?5IW}qf|Va`Sa1bXTvtFx
zt^o|tr7-Z`5fV2EiBZ5QXsZtZ%XLLym^Fi{3%o&F^&)w`O$wXFTcbplOnP73A{zGd
zNa(`^(zI{~<Mpvj2FsLpwEsgAU$MKGMkWUTg&;pRmRP)~0Gp#V_@VVA`kZE*>$%Gq
zUn7KQFVul~))MrTX2Zq}{y6(9+Y8#LKwxS$yc#10!xR1_%j*i&`uc`0*I}8C+(5~Q
zq<GY|7zHy-%CSi`n7B?6gZkt~m>ydL+DiMOxAYG64p`1Os1D5cafm9^7eb&)2t4?3
z9<9Vz$%G+ksQ0>)s%u`SzDhMXGo=<a#Emew%N)H%T&28-&T74_E5Tp>3W_uiLV4~U
z4AZK?tZ5Xa;lGJb-Do=gILmvKCQz*>=lJ4rV~Ox2%Zy$-h_|P)`J(CwVxn^iG*+>h
z0#A+2y8oimNtZ!$n>=$^7|_dOMzSty1cf#K^EgQHqv0f+QlO2kyTzDeR1Y5yv)<a-
zuS6l;9@a9pjzE$NMS=`W{}zo#au4YJf_ixMdOiw6uRwT#1p2-SK>X$>@jV}ik&hYw
zAwmidTY^xT<)g*AhaqPSb0oHYr}D``SRv1WNbWPS`XL4zt47e6xshlGjfT3jDa^+*
z4sJS}#m=koL={`8wowWy?>Pqc{VQn0NN;@GmBTzDlplGNIl={EqCWKmwz64s<97jY
z8!ga?@#78q3ox)J3+~s)g8K`9OxbZ725i>T#7}D=P`!oPYqK5P^EQ@Gae!WbWoXY5
zkvX4PSKYdS;~n1uj{Ta<31dNY=WipcVpt9^To)AIv1gBs26NXYf&1ieB7B_yrPHFg
zI0N?mv_1)knD5D!=+lmfljKy=S=3(JPE>1p_{H&o0NvB!{@q~cNny|81Z!;lxsG(c
zxl0XuglJ?l3`fm(Lh<~eprtmF-Yhd?&syf^VQh8ruW+<Gas?B2lz`D+POxEgBCe6u
zp__|0$hMlnstYHv{~Ei8h3q7T;0`Z+^RV<<6^LhwU~Q%jmfruvuM<Y0X5cN>=VjfD
zy0OG}+5fmUO5hNi1meT$;Cnoo@KtNTgWa1-W8<;O-5>fwwW#K<77{v@bvop2xUP~W
z&V9Ftc(9q%@3livwjiH0Z`4FdbR_L@J3|L87ed=KKYTEx3>UkU<It{Z3?F_TOW!s?
z{>5{+sxTM7eXqjO@2~mN#L;v*>(}&zDbl|VtzbS*BkXPoB3h41pxN>QG2fC1U9TST
zE$@hl!}@X*te8p6!w30>bNb1EoFnutRmZ;PN?>lbjJ9qnM`^<fzV041GH^$WEGUWr
zh57QJ_A(T!s%E11$tkFvY5^HF*HC*Ag24Dtb?tD*@rg`9<74@#owyCvezUxsi2>hy
zL=4&4p2nOI{e1TmjKM~fso$j?xVcIln=kmm@cKj)E0&_K-!u?Uk-{IfnfPOc7+hzZ
z#J2}iK;fJ!hynt!wc`Zr`5{5C#ArGsHykx)rxAGV2^#~aq4mpf)C*u)!1Fc4@T8de
z3Y+OPuYEYPkL_S5R#MS?_Iq~bExrFU8M0jq@cOz4JpUvWpIwSTkFqcPev=S*Z$Ad#
z`mvqUK?gS9w<m)~0?_T7HkI~W;16$LyGK=DS{RxMTh<Cu#ikZCC+CA;ogZ{1OE?uM
zK-q^X;$;07C$qRsyMBaXVwE)!te<GQJiY*o<_&{xqYOx1mx}fL3D`B*fZBT*<2n8V
z$$h*C4;+p{vwnhWs$$X7jQJ2lig4te-S}#NeGQA+sO@z>E>7)l^7Ow<%obN-mL!Wv
z_vM&|>a)-6^M8qO_W?TF-<`cXkmS_wMm5drV3au(^rRf}e(vW)o0Ez1%n2-?!}^Mn
zlQ6lJorzu*l0|Ln(V>&MGxTSWUd;s9vMw7XJI9l*=?&C&-3u<tq8tnD7=Ni}3Ru4L
z0qY{hhfRyZzBgA<jqS%eqK!$D-dcbqC9pzY1?RtF?B8oL8n=7}NV_Arhedg);dqY<
z7go_3S1K`P?+JX{o`@3TUSgsZMR;M)s9~%b_>WY>fzOf9K7$7-TIX@g$z-r-(S}>=
zB%q+T0CW#*pzb&4fo?$q^DOS7;$nSBK2OnRY!T><O=0&2D=Iv*1O)s3GHtld&XD>X
z?K@Zu85Y&>{#6lbHQndm-<yPSi<iKwTQw+ERpORcXF%VO5|G5bq;|*WfXR-z@adKi
zT+J@yW!9OMRUZeY+@LLQBf-|8hnUo{UgHr@@_jHFp9EDvp>h=Q3hbj!Z%^>MhQB7F
z{u^B6vMI>>9bsa-iw6p$v*1az1WUbc@X<X84YzKhb#r5YH!agN)PF16<?Dk@r3NNH
zipOVnr(@-bNYJ&_p+z?u&^Rz19-d8w<q0_~Lt<;{AQV8M_Y;z6U_&mSmOwvgryhx7
z4jQu|XXsh<+8xMvUBkGZYv0NKf|aQ1mkAcT84tEM6Z3~DqFz-t9mZG&<7pnqMi>BT
zW9RU2H$47h1qyZ94y)FHE@0e-uE8tZTTOX1|K$k5?r9L`B_)x(XvomB#Z%weY)&Ns
zEbfiKtU*c~|Ct1aHabN9a08U(1Yy^-i-gk*2TO~!)X=Y$ZzmiEZPl@Oc&!pXJ-dN9
zuxjX*JTbb|FrLN1i?oYn=RYk^L&t((qL;A<k2=-i64zoF;atV~TenH*=}BN!GZ7ra
z*qJX;OQO8Q2txi|g~ohEjI6eWh5q|7<;)PQV9(f3`s^-%VYGiu7Gw4>&$50H#+{6W
zGPejY!DM*<_8Qz8&)lycE>)MlnGE@Xj2Ad|90*>G#GWBBxO{RV>o%<g#}(J<`IZLg
zd65cPhMq82CPMc~C&7HQCn{zy#*MuZP`bK=^#_B<srG9i$?GHGjTWeKCKJU^>%i?-
zJ}KD4a!U8#5MM_DsytQ3JO>%3)nwqA|9wsm)p28cx1sv!YSJP1jdm%k@%!3TVPa$g
z+tIC{=4O^;iGDH2Um6YnT1BF^eHbn83&WYQLUen^@(W#jY912CoMbEmr1RL+KQ4s%
z=nYLL7BfDBmk31(ZG44i`@wZxJx-O+LrdFsqF=cekNZYrmi;mC`C1R@WEwVC&4Kn=
zBDVLQN44U-q3`27_+P$+M~9kfR9{E)DY_Ig-_yIzIvDz<3fz<mQQLPG*sNoIm#3^7
z#s2R3YT@YrEgRR}*TI$h=b>OAg7mg3<IH1;=<ZSi{e}p}+gblR`aK=6zDT;vcMun0
z6506!AztMo&aOL$h5ARJ@^U>Wr!sbE3*)OSu>yJPixf^wLG|bak}}x~e4bcQ+i$a&
zn{_wbpHs%Tc-N`6c^$gm%)?HfEwDpVjBdtvh{LjL=pPXeg@%DRT}=<WS#L}JZU*@i
zHWw4hO;Awj07l(0bTIK4G}kM^!lw;*5!T}U?|IM~x(?nIU&pu|Y(MhC78DBKkdC1y
zH14H5^6mtgzMb0$GS8vVsTKyt+1KFZFbh288-yD5k7-w&0~r3<3WDu%)kaCnz<g?m
zX|F{cRV~n?uK&fujfw6Ud%y%;LIOdjtPnIiJ;7$BE2ab)!GT9?4x1grbrs$r?J0>M
z63*m)k4eCtKN~=!VK-nb+kF)$fvV{wdfqG&`^P*YTDOXc@Ye_sJtiPOwHOZW%tP~l
zjRc=sfOOXy&iC#zaQTO2x=qTU$>ISK?UT}bWsR8gJ`p6dRf*QxeCly|IbhmUxNkKP
zd*WvjCwmpH_v;eq8Tf}hd>O>{Bz0WlV_lq4w-t@v+29Ve!wZsY&^7J>UuT*w%KgW3
zB1=TrZ4^kI`b2ne2*DjcQm~7eJCwEa=&?CcT(hSRrQ@!`oI2)hnwP`ebW+Ti5e99G
zFM>4gyQ$^L4>YTBCX^j#&hk<X?3MimaHtTbOiKqXqrH6hgEvUUP6_ZVznGYxEQTV#
z^*CnnF<d(-9fmi@fcIYJ@LT_pR*J7c?{+pf;`+(QPyU#cQ-VXB*k`=8j~+FPM)Q_x
z0uNu1q}zoMH9H;cYaSBOKSq4Z)$2jQ&YPZ2m!d%B9si<S4r;$-_w$SAQ95A6_hB3#
z)9;9t%q>|__<|lgACCc=>oD@53@kQ=Ks*(Ih09IqG`g1Zo_7%sZF6us)W{cHilfFO
zYruWM2kP~pgl7HfAV2pQ;W*s_=Gg3{XGB{t<9Qt%(gw7s=Y!a~1Rs`M1E(r8{?`xX
zEQefz76uhWn9rEhGo-M))*4$n>ToMB9xWLQxqhY^+S=#h(S?q<RKJX|oOGz7eF_|o
z%7w12GQL6-+Z&~;(#dj@@Imr4)UmEYt#z;Yrz-rh*5eZTwxnX#!K<MBGKXM#Jh(b0
z!_R9caelixCf?7a3IYoV{1E}NntZI=DgxCy0W81h&t}|0D*NRDf)OLQz|C1yljT)b
z*9D{P>Oj8Jf+IM6Wg%{xqK6+oWTEn>?dUc>oN(V*{&l@0>~6n=o`&oUxiE&PClAG5
z6UJx`3WxsSjVSHd2If06$#n+&&$$-~GJST(G1^Bm9%NuaQXUF-v9Civ3Y=U`xZ~<c
zV0j`9Hko8$X>d39tNA+SmCQlYdwDo_I&(aC-loCa1hjmXNnlq49eBEu3SPSM8%AHp
z{<?d_Zf_9Fm51@2ic6>?*F_}~b?kX+KzUmxl2vu=4js6UuFxdt+?D}~`s^Ovqyl<-
zjbQ$$MBI4u3RoBj!01#kjIp|mYnl7d{>UgOoKg#C49+s2rIZR5k0Po=<ru%A1`Ly~
zb6YBFpzD8(l%<VO9GHn+%^`H>n}sOwDB{Z)yUaB{2)c3#;q>3v(BdOUvtFP0pZ6F`
zBOlS_TLpOQSsG-OF{et%9g^adPunN3dzFm>N_;DDv{nHc#VkhAj!M4If9-^KHq4~!
zmm8gOIDp;T()k<J%D~*CnOZ~+QlFRB(6h!6bQ>R1n=R+j%!tjhO-GY=Hxe1oYy@8M
zTZ=yYo!B@x9!z2ba1P;MUCkN1@kxNCuU=EjjgzqOb~T;0;R22u9f!QEVZ=Jg06%?F
z#eox<ptrw>YX4^q7G)bDazr5O^hM!pc^S@=+l`7pD&Z4jxW#E6gVwVMZtPCi#`3NM
zlP$q#{|Zz-7eEKJK2wdy_HdcKr<W>;(28bboaqJFb~qeE?=1uMC$DMyZ^rUD*h91h
z4V34hwAkb2DH7a13O$QsQL3qehP<^@<Ln12O{3Lyn=i87+6Q9dhJYDC*zH!z-pwb$
ze#tK)@39HQr}NP3VmvfQ`cczCJACW51mivpVI6B-+FaWR3;!y|<q8Jq_GKCMa-0Hj
zfy~oLZK40^RcLBC2HKwpw7*~;YX^OksY_T_Z2K|j7##%T$096X_Yt*M%&mE}5;P)|
z!FgB?I7PJ5pT5pmShx(@H*JMgzY9U`T_l?~c<`&)=k|HP1<-yx0=|w;hTv!HGkR7D
zv<%izBjveJ)gO(`AJT}+*Zp8}rIE;A7y&l?LXh4#LQA(0((f(@!b{9A{ndqPKVWk#
z9T!yhj373t<~Z(RFnY?@qUw4cs@(8F1*OTv{ACQ+e>ed2)T_|_mNmlArI5((0~T*1
zP;2V~u1(7aS1&a~$4!o?w>cF0H_c=2={ZEO#1*<NF5$-v#w83shM}WQ(w@1SVE^6*
z#5FUZ)+h_ty|TiJJq5VTClm$OV^Bf9k6ajBiW)^Z5PEnn^x4}H+m+d%eXpJx?nnR)
z?O>LhJV`Z-I;hJ@9=O;FX~&mRniBAs-CK1C?_mz)|C<4;YxA%>)f#U1v+n5UJ*a#`
zhAkUppt`LdI-U%L{ab^vUqKF5-%h{`Pj~$JG7LLk@=#WpLp`1zrh8Y<V~pRwIqAl)
z#7SR)Kfu_7ZVNWhra>tlSQ80_^U9&TA`M-*Y0S_4gSaYXg43FC%ClZz+7~~a2Ku;y
zo@*e6C?3SPx5cQnd@Ltb{==0W4#K_fFJsdm4LZI>0-<W7h*q)<w<3N!rgV&i?y7K9
ze$V)dZ`gb7Tq9_EU8NQ6%oX!!7ueptz`te5I`1o(`=}<BKRv<%-M80KhmFQ)yZjRE
z>;}d(D&U$oYk|Pv5P54=3>b4AhSufdz>kx(tHG0RxcD{kotO-D+rv=s`aGFo$#Q5T
z7{~8yCBJ=H2}m<zsJ2cU)yj9{_c%SKw)$T=$8<5ZVB7^!+*a<ZEC|PU`l7{#v*>9W
z0HK}}=?@_XmZgu0OU4-BIZP#%bxVn4L?qZN=Yjad5?GrhhxT)ZK;MT*@_VBYbFDN`
zw3E5oj~c?oR5dI$sN;OU+e7!eS$J1&4GvyoT!T>=sJX`-1WoTqZ<IgLn!6b)KA51+
z?l^ebvl=V?gHg1og9sLe@J~do$B+@VIN2!=Z#)BR-djqtnx}!<9Y3gf?T8-t?~>BK
zd@!`F;VazULlmsP5t+^v(#Fo(_qQy>ws?OW?p}`hQcIk2zZy*jOR=@#Uw+(&Ae?Zz
z5;Zsz@JQ1}-gCw_y0en&D=MQN-)BK!BM<JI`eC3E+p#?wLhW4`yWQ_CQCU+57q(Y}
zg=!dCek2AXr{&T@*8?aS`-N(qFXHna>C^5CmYMui2D;vCS0*tdPPbQ}n|VEGFJ+t^
zA#*r-XPFvZY@`(j<Dj{J6O>lIB$&-I|IQrf<eb4=kr!IErGx3H>3H{2G^#ooQ{Ch5
zh<t82`6p%u>c8+q#{+fbXF?fctXV<UN5=NmaRQ^hJJiZM1iaj5qGNX;T_3a-O|&*b
zRJ0JKH@b<!g*0%t-Hh5z^~@{AI+bU)fZ^vS<m`9^k=Atns)jtU^}E3hyv)Oaj`MiX
zL&`Xb6-0ICW99@tU~2yBJ9W)cMEmsh(0ub5vA7+CBZ{_Q#Y8219#sU>_g6xadJww*
z>ZX$Pji3?oj(HV@s2Y5Y&$C$psR2{j*~-uK!irolPqbq^lo+sO9En@?K4{)5Pvne~
zp!EG4vN!M=`rZs9-DeYt*FY<6U6)73D+l2Bd|edl7lBby1<7Z;S&hSN&iI_Y&*UlO
z+Z;r}r99}=o(nuPt!i`jb8>pOl8A!^7&to{+*UUd<r2md^^AbQO;f@5pHw;^aE2>J
z6R_jVX=1WyDz^N?@+cXvBwD#c;Oc8{yg%O%>1z#)AQw^Swwh`@-3b#<XF*SYCXppP
zqrBX&CY{F}nZr54bj0dOc+24`s3*-tjkpYG9#ssT<5{L{$|4x@wG@wOm!hXGkL}Bx
zshnC4dgE1G`y>%<>k|m?v$<)gmIi(6g<v`}7bAB{$+$n|SZB^=8f^+F+WjAi)t-u7
z2dnwh<+q^2+zRl>oItD&i-|<*7m-doMh~;j<QL`1sQo4m6_%TU$~r|%HBH6kZIN)n
z#~wvqO8jokpHx982h+o9u+{WDzoMGmKW5l~g6J<A7kCL;4>G@G#(LWA>;Q2)X5jr3
z6*ylOhl!nhQi|E6a1^_{nG|5@+F`IRq7a)RtT4{*H0$_1rjiAD(Dd&Y;^~%!t*(Fb
zeb(pGw7Cu_=z46bK1YbR4#nbPz4a(KbGllA@zXR}&+hBq0u-!H14Wk2_&2c;`)8DZ
z$6bmlW24YwQYLYF^o2Uz3j~kRZ}=WcW2s>IeuCd4QS@~t_bMX=JK_jQ`9@Kw5=q3@
zV&VR=>G<K946o<0S@btHYmJ|RnfF9^FChxu3r1khB_%vzW`QmWK}4`;EA5%G7@ABC
zNbAYNAPSD<8b^y6qfQasvZs?KonbVPx$XpIA$)atwucS<PR#wh=wAL6Y$hhS+V}t}
z1hy0DkKx4J@*7b|`$TP3b!giX0qk7DJQ;GOuw|5pb<~EElT((WuHHzT(!(4|e;<Lb
z$Ex5!Vj*hA?tzZ{TDtLWI&(kvlj2Nwl!Z#crtK1hI#%Ie-6A;0YsCFuuHZ!PEI3|&
z5d;670dld-ojxoJA7}2tb%TtFP`?TLv?%38sY1M6Dw}aXr%ui-T;#TMP#nfwn`)i>
z(trdicru4SvYk1kgjPiR;sjLgu7Km^jC(hfaZJXzL&vyD_}iH=iO+h#;FwvU{7I3q
zw%nmrv6E}wQw+S~AEtfpPeWlzDOiezurrP^^B8=i?>(YmWd0_UzLMjl-yZRKTnOtI
zuV!we1z=;%0k6#p^bA?vWAaqS8MlB3XX9}&Mh(P@eT3iXhbi`opcrxyWu_;<>T)A!
z9dsaV*R!E;HQP%Bj|Ictu^32&Bye02lnw#v+v>@>ZzZ5T+a0Y|JR-^yBI$o?ISlNN
zA#VTrgZ+dDv?stBc+>Bg&QZt%Eiubuu4MU}7n#r%p2=DM^A{Kndq|+}0<^4A!BUm~
zh|{Y}_}YT`q<N-{OD;gebjE`X)B~@7qKKMxHZ=2MK;AEa?lW18gTvUFSV;+&r>26Y
z(Ls=ejRWs^J=6>QmuP$Vg72wRmW5$GfC5zvo%EIn4ro@J7HFaIJR!tXoJF6xLts#i
z!}Hr&H}b$-OzS8_bN#1$Vc-WcaY_yAsy^fdKclK|FP?!S^B^2J`I!Pa$>uVPK~T;3
zdc9An_CN#LPIycjlJ(JI?l|bWJQ8gAb}+bh9{74s1~=nVV6|y7OpBO<MvSF5ZiqH2
zn<azvNjp&p@n<<5c8ALULsVA|(mo$G`jTEkmxTp*D0UMT#@Mlpi!yvjPD0_{h3Gpe
z6qYx!_vY<kC^)Ibi8?33cvrStpWJGiz@F)DEN>)WT{=}S1K9X98127g!m`9`%$t?U
z7wfqY(bT8p!A(H53kUIQYbgYD&cRdpF|a>565|u1q3;|!ugcaEJEi+%-osjK?Jp!h
zhS#HN+e6NA{2}OMJ&nT`VzIAs7b(*)!MTNX=+%)#wI)!`XL%SIeX|kG4?m~IQz^>7
z4xsY?RAJ)+mJjn(ppvr_z$COB6n@TvISvAFIvfn$ml@}->MTwC7KD0vBf<A5V@UkY
zgr<LF)M%O|hJK61CdDblWuy%Zy=H_ahj`%pD-G&I?A?D)0ePa&#3XY!_UN2o`2%}2
z+R#S_>|(%u-Z4n&|A#T0(g5pcqWMxG%XnV_t+VnVd2S0m${h6Z#}cP27eKY;GQIO_
zGWJIghtMyV(6;s^y>;L)zQaP~eYN2GvUltIy%m_3T!SjK5Hu2AQyKeOe{WOA*3$u0
z`Rx!;ZCwL~d)|{(%9X6o-9&l8CfK|z7*d|b5c4l7RQmZ4;HMvS#LZ;v+s<6U=_#yu
zmqa8{8=!UPexmzzG3jcrp@C}@z|rIaRd5*&^2^xV@7qi;b7tL@CU0o3Xd<UGSqARu
zdw#opItu3bavs+Es9yU8kocy<mc<#6yDAhvE@ngWD3*~Qu7+KU(?NRRJzwdJGa3$O
z=ftOxtP}Qx@@Fi>jyhj>n3ajYvzeRpxEhM@I78(N37mdu!<bx7aQs&xs2Ie<KxG>G
zmIh!bW6(Ms{lfK|$e1_9g^TwWz@qLH^qORVUDLe@Z&eYl-x!A$?B8+pPKKsvW8hz|
z14p_KpNzkTt+~(mpT|3+k3kSr7@cN+s*n;rK&QwQYV2POz86Y~y$<WP8eZm0E$bk%
zzzC)iQw*QY&gyC=;Jf@W85?tm-Fr_%@#$jxy2cFqV#=wCZzD8YkB88ICqbYvl(;cR
zqD(uLm<!%;WtXEs*>nkLjPoVO4~U@t^K~qnJPW72TaAha^^oYljjq2Jh^p(dIc?_x
zxWx+vV>8B9`Ro7<>%#F=rT`2-1`xM7RV4jW7UQ{%0?i;1Bz6Zf){-wL{4s=h{FlPc
z$Cak7L$|}mt|(BqW}Nl)%%$F*MBWY<qutwRVv*iSPiF4G(Z@Zpy>SI&fhEyy!ysxE
zrVWF_aO@CYrGe*e65G!;d>=VKlBI2lUoV*Aq-&;VqT)*|cT9j#Wc~fNX#A(m2l)rC
zK&Wdz`7t6MJQ+`$7nV+v9$rE|n|+8ZoViuo@<EbwiT16r1N+XIr1i^u2(8M4kAV?*
zwQnp+bE_ck(Kb*Wdyhue1w-rhXy`dECQiIE<{)!KRl^YYd~X|0H_JpP^?trfG2=z7
zEdiJtijL!JL3KnLHUGGm`hNBV71>UdiDjVt!4M#|9<v4#Vf?@0u!HDfo&Pv=^I?qV
z`N`1Z?@L39G_myG)3kd?4CzUiQ1k44XrHkgc*n|2cK+PV93(FYZyRU2;A;i8E}4m8
zqdd@D%w~;nj__u0=d99pLRYzzGj6IyUY9jBdVd0J&t!t@l@k2dxDW;GuE7gf$yJ0L
z!C$3}H#W+al+C_|8o7t5k0co$HZwl;*A;MnLlF9;7LcxoK190tZ`1qXmsodn8W>ug
zq<7q=;MnPl&|_#azI0fFzC$L$wJ(TvgU3K4>k=-W5{+7!^N5FG37NH`3a2<MLf*+3
z>bWHnI@|tGpUu|fJ>xZi*dGcL^st>{&*Ujj=%0eoIQr)Sb|%{hrH9@UjjS{{D|5!r
zYp3Gh{eT*C90=ZtMAbeQ>N9yHnE$D%Zn6(V0h`Ob)YnEwg>|U4rkDsOmJlbFXHe+&
zh1S5|<UhHEIN!$tL!UaaOyP9YI=zlGAL^ybR~d&Xia9{bDnWlz9Of-thT7lX(6C7p
z&|--r__nFR0KbM*%yI_tff7=Aov}bn4p!HiI$@lO0o!A<SNjn;DjXS&bz0$!aeJ6K
zD-+Q63ww7}`H}b5MG)7j1HPsS@aj$qS{}PfI!PnxDe{6H?2I`u@gQ{Up9<2%wS1NJ
zSD_0Qk)zB3l+&1oc2=v{v7v`FnVceBUQ3~KJ|8SDRx##wA8{P972hwuj9m|=!HJeJ
z=&{8T-VX`Fc{i9N_0%$c-&A=tbeKeK?PpV=aSh+7BpkoH#^M%(XplU8O|6=gp__M~
z*k6mG=9h19olk;MvOS9ZUxqW^bs+tnaT)Km*y29J!{{>W5%CVFL#^}f#84y$_WDKa
z?|jWyT@goC9NNLYmp%Me)mg-<IE%R1#S)=g3E5W@g$s5E!GCirP$)it6>sDr{NNQB
zuva9_yVbzhU>Zu|rxBBvZ;8342CP*rfJ_^fcleWuW$p9OzWqJvu8SeC<SdvBYk&&p
z`QQ<n3ew$a+>#_)Oqy~4ouUW%6^ipf`jNd`s>(Rk$zO=A^#v;3|A|Qd)<=)Iei$)n
zG<NH(hq}qh*jkW+!h2DCUELjM)tkp`XFq7yiU`6xm`~q+tA|@3(@>{_^_s5F!8pcU
zUfOGpqCd%0RbhZyF`krpu??~Ky^{Sr{UG>11ypwr0r5Uf>Qs0F8xH1UlwmyM7#`;?
z-@CvZ^KPcM+5NBm^?Rx*-~+F38DDyP5$XC>hdwrebbEvi9%*Hc*^kK>yN<&dHCkBO
z7>S`R!>A<lEn&eZtV>|seD+?rtXB&P)@HON*$<uaqPYQoMVQ!aiUl?G*rY8bW18}D
ztq)<0nm5$Q_c`(D97kWN%cE9H7TZ_&vH7`xaRJiMp3S35o3r`;J2&HCQX*Jg8j0F(
z#UM+6N_%!l(8{EWd@Iw#=0P=xR7eE*ejdE65~4!RS!%R(BsvX04o<ZmM7EgC1f%(Q
z?_LoaYE@H>KnJ2)VawcC=eZ{C8kG7O5bc68Q1vh&GscX@j<I*h-jE!$S6NKYHb;OZ
z-;L}rc4u=~?`n+=ne_Re2ypRwO(pF6JSv+xYM=U&!p)1oR%MX%9FBwj9!m%<SPkBM
zOO)<f&FP#I<5<~x?7CCT^{q<hyuLlBandyWtnI_*>JzEJ>>THE;R@Kwslu6A%u#<`
zkz~0XLZNC7PX1Gak0->T&xK-)%l8J2l@`=-YYpux_{Q1JoQk>&_Y;pfPl(TJ6B22-
zlkG@1qnPd0?T-;SzNj3XPQN8$^;2}r2}kVsnFzP$Mqo-q32tOv&JSO&W2sFA6`OAY
z<9U&AioM?z*3ZDD+vlNEl#K5?<uXLcmf=CR?RAqDgMptaZitd$R;(Y!>H313dM{~b
zer1<Un_$&AD~!BRjlS8(K!IhC4lk{R%Ra2XbSsHy*|NR;DluPjiscFRy`-Pd7om&q
zD3nfK3$`aYzAMYH81?K%%lp?*+2<T%LTk{nk73xQQw)D!C`7%N3D~tE758`$toguZ
zK5Igu<L5h~Az2K<jT^}7`w95vWETFtI1HN%heE9vW8=D)Fg}nK*SIDFl;qesY49Ew
zr^jZ3*VE~`HRG7x_#}BE9|y2!0vN2C!Dfo`#4z?Hk?wEd9{74;-#?|qsG5C-Rycre
zD9hR2x5S%yyAY?zGhWFL>T_s2s4g3aBYzg)`!4pbZAheoL(5G?S-<)2j}Nfk&nW2n
zy_-Kg%#8h?SJ7sVB&s0qPtVoNK(TBjd>kBuS(^`m`Qkhpw@JpFoaN9bxWx?|7Q+Ig
zWVTz@2gknxux)uIIMGI0>T{l-_<9^X+@AnnXETmWz#%Aku@Ku$vcM$Vl%&67?t}wu
zjm(9k5y!gCS($9lDo^b!%0c#19wJxyqetsJ5MAox9JRe6@s%z)Ws<`9;-k>q(v9RM
z8R6aC#>}6QM{cI1piFfwwD$2xr%fZ|xa7djuk2YHE~3ldhQs|8tmF9ODHphIGMf(#
z1%(p_saC{wIOih5M3X(R_0&4_+NJ{)ZqMk;zJ+K}@|a|;J5L-jnBwIUENs^RTmMfa
zu+$$8UR;G5HIZyyQqFfew1saw;w3e-w;-3lRD$6hC(>bi2_{j0)c)iQHXYeG;m9nO
zZ!YC4JSqlJ#3gWE!JNbPMUcB~GWPVyQ}08r_}r%ybA}dxN^%1FdY*vRCpOR@mWlJ0
zvRuOE3HY`$1$?%zhrVJVbsffbZKkK`s;C;&s^Sst%@;t_D}~`EVJw$vLDU#$(r98C
z9W#0YTFuxGZM9J-_!&oeimcCab`)nQTnTMMWzf9wAC@<+rLL<Bq3i7u#=W9cd~Y=A
zjrj|j27QP&7YOD@1_)>7hqin6knWdZq*aLzvND!4Iro#G=}#&-GL*DVnh*W2J6KjM
z($rjj5!k&6gfYR>urS3Ii`e^<3)jVt*2mN>#eynKK1lzBp2julaj4ZihrdHG1=Bwt
z!&G+H7Mj<B-fbB;`fLYA>ITt(KPi<;XyMQ|bf4x?JRcFu`j(?eZ+8GF-)o`qvrF)6
z9&^FT3aQtAYbc9kd8w<p(CvH_Hclvo{kf~yK7}zJxGs{CoQ;YhwWuj%eu=xgpm1Lv
z7_qsii$D+Vze@udW1EGJvZPuyB93>~(A3uM1?S^!16nNi2*%IIi?bQX%RS3=h4&H3
zD?b!2DkGm?#$#LDMYvn-j24Oyh{=;eVouk<(V;BQx*`e_->xA4J0GG~9rcNQMLgEm
zLeG0OP&hIc)bKi3#J{3D<k_<=#Tn`{3vuUKKXi+*gwlyCX=sQd?R^{x4g86?scaVG
zC+iV4e?EFNJR$ug3tsQ9La%@|P<T>Ec$b2z!|tBIhdePJE{#E_`*oa$K?T3};w4Zm
z>LXV3=R&iM6U6vj#U9aaqP{Q=t|%_Y&3TpBedZ3aq;pVmXpqQ;i6F1T83k#7S8p5|
z%kGJcv-v9+>-x@v`nXsuo%WPFp0^x5ru@wxG@HPlp`H*L8w(z{82@32H{%CZfVn^j
z*~eF)Yg;6)T@VYlp^Q_<ItS(x_H$Z8!x+PmdBFOg^E>L#lAhc8pr?8iY8=m?Rxbx0
z8jlFazXqyHdia{LSJ-`R6!7#~P2wJ0g)7;isP)H;-=l5~wu^kpU&pQS&fBBd-S?SR
zoEQSn<NQEbzlL;up8`(ilqh_ur1LDr&@uTX)wXlSKUb0Y(yA!0ItSlqOhid*2D%Qd
z!c|Hvmtwh*KW>scI+fic>wU9W4&fIU$lRH35i+VNS4=fdjRoDXd@6ro0T>3coXYY)
z<llNW|2ymr%|A~-)naXwO{}5v1J7w+VLu;QoIqp56SDl_dFc44!T7DaG4YKb82Pb2
z)%)e#zza1vCd<ZEx+&}|eUtQgYSDhTKq4<{Af<N?gW-P<Ib%20d7W(plEe_ma!n=s
zM^oIaF&(qGDhw>jC+DkVAkSv*p-wf#DxnruR<jIw@)=@w%!0iarh{O^4^!23lZeaA
zdT2gTgrXVcY#^RX#rtJowq-gh_unTEzp&?sl?QR^S0GK3eZa~ln>ZORLCfl7C>&Q!
z`s;_FMvoWRx?V*4uc1WN{C^akdt8j`*M|oogph<}kR(Zx)Z7cR6GD=Nkc1GD5JDK~
ze4KQe&Lin89aVELHB!kr=McXTLI|O4hj+dI?~nbVr{}rvwZ7l$s=>RHV<Fu+5nYQ)
zF{FAr`Dg=hq%F;74wBF9o+&8v#$sE$44Nteh<Qf8Tg3&$$F|}J(-x99Ojo2l(IskI
zoIri~MAW*l1BADGi*B3`hDPOY-cqXu6+POyWL^$D+&l#<jJ9z{GvaRB&<^5?oC!}2
zg2c%(^vfDc{^oiM*P=|eU|S(%rymBKp9#A}_2@-h_~iJ_kQ48Q71w%kvHb<&@)bh!
z2|du>ahGMQ3c2fr9w=`%W8$edc~E&eD?XG4h2P(>C-h!<)2{-)u8v2O(ShKiq7Fgg
zG?u9s0jj6wf!D=6I9x@~ax=McQ={=8Jclp8mqE;G+QB}p!c`k;z)r^mPJP;fo_)4b
zZ>9*%R;|GMZuNZc|6=g133=9%_M(?pPsm?i%NxgJLBLf_%+qng#$WY(JMl9sqC?=5
zg7PHY!7zL88cf`h1bKbxa9vp`_Mhtq-MRl@@hTl`Uwe}^o*BZ6|IEX#{RiRYc*JJg
zGtjyH3eOZzM8`a0fxe4oTgz#8aBd6O%s2+kt2(&h_nn|}=>ROx+=w0HvJ_T(yigF<
zuJG=26a;Thvc+cuQK;*K!c`*BPaDZ?3oD6_RAjNZ*byTJhrun63)ns>9ncv-B|8f{
z&g_B+JL)cldJ-Qh0A$mCF=wYNFrs{i?EX&XtC|3+eaUtA?jC=9+y?cWj-cV@YfQhD
zSOvcxi^K+$%d?yUb{9`UQ?JYD(zlja42QYaCQCHAeFmgOZxn6`)0wd{Ph@TJ4<vh#
z^D%^W0<EvOrItFn{7mIRSstJ=^dp}&d?Ajc-RGcCI&Z9q0qL2SBJZj0be9^A8s5{u
zz4s`5({KcnOG3dtJ_}ry>L`>&D@BV0iRfWZcdVaZ6~a}$xM0^F(Zdy&A*^x~Y8B7n
zsZ*<=vvLbOA4@Lt06SJXkp9f0#G6VCWlBPdY8JR-K5?SF?W5U7%_Q{J=n2<$m%;t%
zS*&ApK6N+ftoXnTsxx*W{1b<c;XAp*nmBZStA_|>(CM`ujCb?_gFZEI|EVun|4$0u
zr!BabUk?!diNs+7Z#2$L;+g6DP%_2`&a6lx)(iCwivPzVuFk{Jt-1L0_;Tvbk7Xi{
zbZl{?zS-4nAW4;@%Z+jP?C}_MvZOxHFClvE+z4GJOW5EmmRS3-0_{{BnKVC=p4)>k
zq;e`YUi-x}{M|sdVHpTkc~vyd)Z%LuC&2xF0B?^=pnS*<-g18ed@Vdr?we7Ne&{4N
zs8#XBpYvhjzj9R9Ndbq60Z<WJ!Q6s6S?w;`N8M}Zx?d?r*}awZ7p=!Rd&$|lY$8an
zmxIkgWJ@|sP`p78&Fz!WA!ar8HZOs=X9Y7Z2}MzYCi;ydzco9Ex;B8!<t*Nd3PaBo
z=Xu_@wYV5B;f<%Z5cDn)4jie0#4DFzkkbk3-VCq6;7Iml>v`e||4`_v=OOGCg8H^V
z>@->eZR-ax-6KidX^1s8Exf~ak|WP__AMSnJk6#N8Q80E6P^k#gT^mm{66(Y$3IR+
z!Q{&rR7!k!v!SSJK8~^(Hasi17~2a^6GLhRpLx3+#cLfE;-lGUX(&hS`_|BK*Ot|Z
z6=?0MV4nM_XAu98Ki^aaT27NdWy=@tD3iji`U@zwS|aKid4!ig^+fNlL7=wSpJPEX
z>s&P%t$XjJ9)1RhkDp-bm9$&vS;ED;=Wy-o3qWJxMo`Ph2iZ%1-2D0?4mfI#E@rww
z;%F{86p7jUMc~<u;1%B!mutweyJaB2^x5EP7{a|HEFiM;GIkctN7v8;@JE&h$|K)s
z$F_mB7cKy&m{9OLZ3lwLSdq()BxtH3PwlxB@Z1^?pL8;zJK!w1lnvxUN8p~GGua1&
zd3d-~6@L?d)xByd&9%>gY%$G$A|5m2#1d$3?+ecN&wyv!T^7(w0)=u%h)l^s<yH@V
zqw^x1Puz!=<A_7>wJ(dgO?yXTrc_O;Lfy}6Kzfqc1eDFU+UWruv;E-Lp+s2xV-n57
zS7GDv5ZwR2xu~{fHVB`kqO>iZeK~fJoIr!XuD1nPz5ByGSG?kqy}o$1w+k*P3I(gX
zLnw2=!Ebm5IW;2Di_RwU&mVZuQkr2u2xgtH7vuib<THJEiQinEgT`%2u-A{k<a9^q
zHX4trIk8X}M8DsX=iH~+0q~Fx;MVzIW>07BW9qC<Qh=;z6!nz0laF>Aj7wULU%LFz
zYIq2jeCw@vR%M8<HAi8SXemCQsE%`2AHbj$-`T1;8Q|!-f{&FZVe;eQ@Y(1frnMDe
zdrA`5o*zJ*967TyzRSq*PK-YTzF55+T5<~E$D2yDyPl*_%%Qt*bS!2_GMH%4L~M+j
zLJQ<*)-mNYPKhW&$FdL1vR@UnVQ;Y3mP2j*BGBr;2HZ}2aJgo&NVt6+ciAR{jw6rw
z0*4ZGv}<9?oHT`-{uD?uAB$dL)VG{hg|-vZ0LGhQ=a+q`jMB!k6?N!nG?I^6n?@{|
z6mU{HfM<m!dvYxb<g#&!Q&)1JrKzvNV4H+;)PbNuonVQ%7JeJK4>cC2KwH%Y(0VkQ
z39XNc92#rb)u(|dD_sH^PPWixsLtI+d%(U~l>7Ew51osm*uSWa0Y~Dn{$ns+d^ZBc
zYo@WpXu4a@*aONB8lsp-H(8){G-fZK3V2fs#J#RnF3OmLE*WC#c?_vEe9_1y>ou@@
zK`{DerDBdP-4$$!G3@cgLd;_nj_>lAjo}(-wJG3T>y^AMd@X$XkOUJN?9p&=4A=Z`
z5<U%!pzKaEy3but8HxnxqAs=F(>>sAL7l6OXCQe%4CZ@0;XN0opgPTo+>aq|*tmu#
z*XzRLb57`9Q->`{Rf?92iR@NzESk%vfwABkmppkYvI@I|63JB7sdtXGteqxuxL^hC
z_B|na^f~OZDB&#+$m8{81{}Lm1%=~lp|>I)8>B*hCT1*tjJSk<W|c!z|1(USREz5O
zYfy5r&En;QV9-Bn2g=YY(MRneG`DWQ(DOtJ6d3Hmu;M~)erqA_U6uvz1GBi(N;BAL
zP>RC7Ql^o8l3n|B8k$m9vkJ>D=6*E<?;j6FaoKu>*X=U?$UG4ZkDMU(h7GIJ)5D8p
zIjGq$5*%Knv1{v+L2kKE<i3w~uZ^F~B3gs7zs(si{#TE?+*rq?d0SaZq7bF-9g2>3
zVIrr?b79ke6=1&DmM1*(#HM31?s{)EEMAm`@~OIt&TY$~bdQw%?+ZQedv7S*mONl>
zFFKiAeKuI_J;@rk9D=rqK`deX4oq1+8pVI!D@t3|fU$b9!b?99Z}=PlC0s-g&jPTW
zTnK%yyJGB)wP<DWhBtj&$1^Ie01OgX+q+(@<9xfwY~50nMor_-chZjbfjX;tmO?zY
zjS#a>8*F~jIe#d5b6yksaosFv=~Ob=<VG%R8VfG|F|7M4F@`9Ma^ul(2=zaKg>&-2
z{QDVfcr}}ux&;vjfOrNkCWD%-3G9nWM`N3MMRs8d+UgL;z>)S8R!=yX5S#965nFIG
z4on6F!Go6p)cKbwbO(<E?G2O%%&7<eRrQ#E?g0z3Rl%nw!|>}%G3_7gxQ5_g?tN(t
z+Z56ZS9YDlS26?a_#y%r+@DxS`J(%KY0vDr2EETpp!H4ySEWwGs@4)PvO5RYTFHU_
zAb|2%jiR;R4Ny?(UFkR@pL@e`kZ&}DX1%K{ERp&#y%xb<(QuUOBCB0jhu!0&q0>8_
z8#+*bxu+i_Y@LWYK2b1#<s6){${yRCHh_abfE};8dBs;FJTN{ST38n9#hyU-T_eD3
zM+F458sdxSL=^m;1X`B;G3d?*CjGn_D(pTJM++@HYZlRNxt{CJ83R5a%6MS49`=7y
zj8*zF*yinpH%6y`@Nl+b=cshl8;}C}L#m-m?JNr!>Vj=ywOkUrPt=r<5AFdHNbY~0
zW;G+QJO6*&+sJ|$CDU0b`Uj7(iQt#Zhv3;y^N1r_&i<SupNIKLk+nkrd$NUc#B=*G
z!8sAwO->cb`Xbb{7lU>h^$q{-L-k)jMe@dASiOa^9>ju^ZW3_3bB3G;4p361PI<xv
zP=p77{eS&X`raM9A9Qj3qVeD@+{4Z9{opFU+_B^F08sTk%+gJ=(ad)RwwR=$M#6Bo
zaqAP$IG)OcU+bCrCN)q#OBFedwt?!<G5BnBJjzxCGlATCR?M1{tik#$H~bh)o8k@d
zL#-Nij>*U1<s6kgPl!5vzbGPGa<QZH61eOZ@XmS*i0N$$_YZCXiLVxOiLPVYA5BBU
z-XX+Hq+aiZJ&?aL5w7hh!6Dz{(CYau_I_v-JTH#}+3F6~IiZMmr>%ju^HbOutuff5
zYQ}BVByj8YIfyS;;==#Np<sP5F|<M<em|YH=l^2X7pAg3Cdqi_VjSv8>LKYf?PRz6
zq3VdQ%rII9^G9W2g~SIuE0+=nkfX`)9CUmc13i}(;>PJy@oRbrc-0W+VG%hqs7vCU
zTS?4|NSInT2OF0D%L>O#1W8dYQ~fXmvR64n_FzY7PH}`b`_&NV9*T)(1JJHoF7n=d
zhxuYETGt^HZ?+W~9ghRSM<xo{9s%=|l-(T|3SoP;qKhy2gysstBRB*t*XKd^jxkX1
zVG8>EcaCSzTSOUZeNff-L%GzU3XRd}5SW#Z?%y{<_b<A~M_V(!iBh=cGz4$FO@TLu
z!$2cw9B93=<`rXd@rLn6+7-OxUH<~zZ5X_Nq>7sqXW^J63|rhx_#p9e%y#wzofUC7
zzNZYj@-{J~oS@WwJRAv_h{?MPncb8Dydi)%QtN}*rl0`$u_lV<3)7(GnjdCQwqx?r
z%lwK}G-_)!@Vjlv;PyF_nKt&oL03yaT_sy2jQJxH7~icFc10^()0(KWphbM>QB1gX
zCDU9;9^tyb-1hw?NQzvE(BTg1J56Z^*b^FFo@OvJh%2*I;6{8O@`v}Jb8`ha?e>6?
z#J`Gm7=Z`<O;K9t!z%_!A*46;Ofa9z6ZZ4h`t`UW(*-Xax=c)*(V(-;0tLqwvmlRi
z@Z|St3`!**;m->6RLp_ieZtVC@E};(k}uch7}|Rur|x?sN}JB|hJp!9VE3gw`RP1#
z|69OiTZ5QrcLb)M2?Z7UU7OPjK|^`~v^sZj7s~=Bt62ka6V7ADtxv3_eI|HjFNQVo
zLvZm+Vzgz~vCL2nw5q(u0!HOQW{n-X&OQK#P3oa}i7J#I-iWpNAy~d}IX1QpWKCZ#
zvhMtFp1r9zGd>u{YVu^**h9e8zZ2`y<}_0|ZU=_ssB~<M1mznK=r8p}nL%H4{8P?k
zy^HYvJvtAYq=8$_WVj|<g;|rfVRbCWy(SqDbTOB`6Gfs`(ONjPkfY8}6}+!p1Xuga
zM%5!@*}0fX;;SwMhdtyH@Sn&fYNOcM!xq?~OaR-r;rRF0SUkK@f-Wc4L-W^%ytZis
z2ozaR7-z(q)6$s4XaTn|R>cdt3otKd7@E$?Me*|U3S~=9rJd-Gs4j;b&zkSJbW)b6
z!Ei4W=3k{2O%QbWYl2Tz5bGK>6PmUi14%%-NINza1ZuA4dunH+onHkOELn%y{ob)c
zjUpHyS&0?e`n=)Y1l;qBlVi~dH&h(Lds^k_a`*uc?Mg+*{Nvy{UjsbPWpQ(pD(+_Q
z0<}&-@NPvBW@iuN$7!~ynl+8riB0kClU$r_7lL}zdZN+JL#Wif&RTmK(adl%KABVq
z8;PCbxXY49$TP{6q6t>I&Up9&ebXodb3}6%@lY>=X;TtUF^R)j-U@8-_^5dPrVa)c
zOhOf>c!sJLkUBCLE~qWXuR|38UWXtqI|)`KoIzP~8p~Jj;3{giVD>(jy2ATW??)JB
zgsD(wKNA8xdSi{6Hr5xPhSrc+6hDi`7`6@^$nB9Zo90rJi3|EpByuy;M~BG)OwHRB
zq!Vl~Lv9Z($Jc;lfdRCru7dvELlLHWfjRNOD{iHN(0c>8+}Xjzeex7-<0iqJ_mv>Y
zJOSE5f98@Ng(Lo<OaU7Og#(h9i}fCna!w_POMZ%+gaUZkkP1$|6F@)8h;?m`g0?;}
z*r}4tEE7|~QJRWFiV9KHXd2x;X0jh^Pe8{0B-R-DjkVwh2E|v|)4(ir2@!Kw?P{Lv
zu@we=4FvUM;yZa0Up&GN#0M8EBv;O0W(wsp2F(FOsXdG>4Z-3Yv7nJ_MBkVXOe^jc
zbK12RqL!3n^Rsv^b#vvph8%1nzw)ku^C0GW4E7&ogf=}-qN6;RcSpAHjc?s>(YuLQ
zWlKGSqD3J1BNPRVPUn7;M&VwKRA@hZ1lV719IwYw;{H-`uF3_yf7^oO&Sb^~$#^l1
z;m<5f@*-1)O*k4GebW@{w^yRO&2bP0bLRTW1$_)IGs(K%Ah5Hoc=CXHpPd7s>{tl4
zWCp>nmIADpIUSm<BB>i1kI8!-VO<Mx7!-XWA*mkmkb=do(!#b?TJU?!XzGBwg9}Sx
z`duR-opR5@PaC;0{mx{RO@k#r3Q!`et+Z+<M&H-j#B4Btq59;roY<z=t3z{yCB^JU
zdjY?9{vhg2_rj)`ovi-)6bvs;MbA@Pxyz5AEcM<Y^s(3go)b26^Ylm-bcUXXM+X(s
zYd;kZEg$)cEJSbD)nLNPK#^7qpW@@NapwotbkzWCoV1|%Am?tIR)W%JJdUQ{IlFb|
zFj;mHoz75?Z!mF6W7dj<9UY=R+9^1{EE^-HQFe5M7ffx_K=aSV<T0rQo55PpU6IXV
zLWuwJkGaDBMG<Jb*U-C1QfX$Sjt{-Vz&es#kntS4bAq_b3olfS8^V<T{$$`x`_y7`
z)fNR;)_3*BhZ7?4{l>{S)-(^N9je34mqJjoU=@3L#23{|e~Z)y>9TGk^6Tke<huKw
zaYxGCF6mW;Zk8{&(yBpWRriFIvcZ7;%hB|-Ex4o!na#oZP~1=rJxBV1BC855R~KSJ
z<Z0|m&WDbWV1=>Q6H$yggVL~S?C@U&6MTuCXkg6LhFxP-4s+4;=y)*I+ypHuOW;#e
zEczVRf=+%4)8Ed(puQ{M^1Xv7Twex{{l)nFL^K?j6_2-wFJO{i30|*8gLh0aPJFl+
z!v`i{`|%Ls?sl;u{t4Kf8_%+*9OK40C&2p5AULpa3Yvz^1ffr|=x6v`)Gn^TLEYp!
zZ%9Iow$YH9IuM^;R^Y1bnYgM?Hhf)kig=C{Oi)h_D48E-_K||{{dm4Tun=8Trt+9W
z@hH%|Z_%(=mE|8e%hnm@;6TqnG?or#eui_QHl4D<LgLwv>|isF?Z$e_cPM)oV5*%A
zye3w$=Jqh|IMad0dU>N#b1xHkcA3}y{U5t_xhKl{)iQa7gxK}xpm7xEZ?DNPOkK(B
znul?R?=&-`&gP)FC~z#9MZNREe8I1aX#2+$4;-vPn=>WMYx*C)>qa^X=MI3I!)Vvl
z>VUG%*O^3WXc2Z~BV=E$W}i=$(0x1st$s&<u5=X5t<FJ2HL+RBETL523Hakul+1Ue
zc}Jp1)s`I9V|~GA&{<};athar+>f8gF*DTV1pZkjC+=1j3RtB@P_{qIFN*^2ho{-H
zFO)6cSqBkkioonZ6l`cMM6rAYQ>*C*4r}{hlS)5~7#0qi3A!|o`z?x^oPx&kbr5&u
zILdEmGMy3SsIQiXOD`0o=J!H;vi&rjAOB-9XX)LOR|dnpC*!-Zr_e_ABvX(7tk7;f
z&3jKEC&|Sid_6^ur9YRzR-JPIr$cyqIq|?3z7olAP68LVWJRC8C-Kqz^*EjQ)}GS}
zn6Fs?p8AKlMoTVM;?~<-_1<Q%s(8i3m2$<wHP!fi^$uKHdlCePve*URI?!KJjNQas
zvDVdx(iS;62RKj4-HzJ*qu{9Tauoiaqi}RQ1Xc=d_&IMcHb1I?;<P9j(epAkHO&U~
z&!M80(r=2O)s(Gj>Vq+K7xaGgohLlx#1c+}Yd52zXK^gV>}laD&o*K9`4{Z6E~5L_
zbS8P4jgAx}eW?`#UhN?m_GStuzmY?nel?n_^@C+=hoZxQ;n3Aj7q?qYz>cRfUcd1S
zDl>musK-h~@}p}N*3DAp^ldXVY;B{==@^WAnM1k8S(WlR1x#(D4_2Hz0t))s$h$6y
zB%ddO@8NLxGjuFEof-u)mvAm_saDAPX@l|hA@E)-1H0XcBI)aJrfdioiT?~`*<xMx
zqiQJz#gpIY3b8?xvzQ=b3b*T-CMs-D<DTC-L>;SMGM&(DX#4F0s^;gIPybxvvGnDQ
zgXpgQr;N*AX(%*`9x&f2RiG}4g`Xd3zrJHC*6JODreH5#_h&a2GH+-(wuZS4?u9BR
zA25Alh~>Xu4VUt1zLV+CItDk2+TvD#VIlbjR@8y__$1!u^pkD+L*1a|15pDe0~ikn
z!RBifDicP7<A-nT#>9=_YIq7OJ_xw{3C%}o-~KiGG&~<T1ApJK!dn{dC<{x%KX<C(
z?)@HkY<@0!5?@VrE`S+pWm0dcg=wqwf}s)QoSXVY^ynZl=SF0swctAI{Qi{3h=>Py
zKAG#8a+EEoX7S<SSZGeXv27PXsd|{X{G7`TQflGW>hoY%(55i&Q^?i*O2JDv6dkGK
zm(z5a9BY19xZZ&a#;y|y1H-w-epN_T_v5Peqe1QZBW8R;3&IAxV53@kq3f?S=!oA2
zx_dW+<kH+qL$zWUM&}Xj>ou7AJC*#Tdqi@Y;kSJ_iAb5+py^ReRRPQ{DTFCwvn%cU
zh0q-5AMBFPh7XcCD4sG+u{U}sy0wNfc>wvYXE{Kl=qfi}Aq1h<dqtB$HIFa$!`nM8
z&^LlQbZ@o5Gk6Cp?bQ#vf_g%A_CYkh9m9?-CkB3BW6bt!Vs^7{iyTynA=_mmxcq71
zrkhe>Y(hTmB+bZ$^^rFj%mDM5G%L<N%}cM}V@FN|;?6v0Y@p;zMaWiWuTI_0_tXQ?
z*bkmxK63v<)9|m87CvuShsjA&{w6Afe4U-lZuJ=+^xTix6GzuRFp`$kwd6agfr{cW
zyusQ6T#s(U$A2==N^S<9j-LPn=|6O*I{*r=OlbC31@r#NFnIbfEN#(%{HzkNXK^Ss
zjRcqJN7%duKeXHSkR3TW7T<WCM)gS%+;m3*aq%aDR(d`+=o<>H4v%=@^b|JPVG;@)
z8p^$0D%hQ?HF)ROIJ}UWjZeC2!27Ki$R~?okPf+w^-~}wB9l*1BNl>jELw)<5Sxec
zQ+jTgUv~lQsW&Q^kfpdgHWo}Sk4F2qy|81U0;jhaW02iHpp`{_k<|;a+H5y&ix0u}
z9#&v8*PoS%_MnQIA1!!9#J!G(p^bqky&j@4F4Ez4UxUbXU4~CBmBfzikGAW>aKm;p
zY{=-(#f$XN`uz{4Ya)ZfXDX~^>m?9Q7zFMkogw4nb1o_Wz;bmjqRR0!P@NRbk3FUH
zx9Uz5+sP}py2N73p^2c9I1awh%+!3<Txfey!&a@T!%XU;k58^4AKzXO)Yn;5Brn8o
zkBBWx^SWcN#-Ukk9V*3D3Tf_XMaSA{tn1`1cpgvwhod`j@q~#O;(dm=?`FiJYpVR^
zbN~Y#>hRaka&*&~0{&ZSQPsN&p6@<@#{CXJ>Vu8wnw$yydnBP_?mq5vEfSqxN_gh$
zz9@bjUb*$3Q_wTa6eaL47hjHqUmpTMns-s5K1_s9rfQ)zWlJjhpJQ$N;^CNHDmgO_
z!u$7qP@y=9^4?!rP-i)pC8|Qi#_^QN+zEEC?Qpi&cvPQph&(ZlAR9Oxx&kb?V!ae#
zT|Mi}2?6uMo&5j36EP3dL78@yKM!`p4z>WteqV?%Z87X~T!*I%E}+A_Fy?6Whnd|k
zhFRt57-=U$*~$>yLEZ56M%(aIe<|8+%oPdW(!5}j2-e>zL6z%EA>)yP_9c5D!^i=R
zcNT+i#ZVahycqX9c0}XtKe={@5cIu9GG*&G(U>w{R67_6sc|)!bvzwC-pkQk&;x$X
z&_mEm#O&>Z*pmneIE|@9pWE?#Tegz8>*cV(CZ7DtCoAFmY^a#+4BB50KyhjmSWjIE
z&4!2J!p;-$(!3J>Waffh!~_`8Zb+Gq+aja=d0;sz0`@LWMysDXuqaH1^F88F=(3&d
zZIOZJ#5ao8gF<xiumq)cKh`|J2z1A&LySWe3Y-U4YW)p@uPS!fsa4NPSNDJ?E6Tz8
z)hx(<-OUbk9>t)U66mit2o2}xf_Qr=d(>+V2I(;r1twri24z7rc5~e;h;y}fpk(fP
z-luK~YAg(8FKkYs;AkM+GCu(eBs8mEcpm!vuLM0Q#^3%dn`>lEz_rl@sJhz-mY6xC
za9A7bG|dH*tLc<Mv4F-OX2hf00JS@VKv|K<u2xg_?aU<Jui6p|lTIqE&jsVH0ksJG
z`@{Vc<*=d22p?`zL(>aidGzCW^y(w$E#H@cR&OoPe%2Fisu6o(S`pYh4q>N~BJe}t
zFqB>O#;p&+pzwhWXgB7AUDk96AFDu9Gi&&MOo^?6SpLd79#6d|K1Ti&ES*!1VauXe
zhtWgD@<0PT?Qjg`A}g+DP{4}<lhI_c40SZ(@$eOM>MJJjp${&Dy>%dnmjUw#Dum(^
zU(o7UM^38m%rRgPm>boDajFiuzB>r2Hk!PEvUZZT15D>%>V_RT1G14iyuoG@cu0$(
zNp%I-%}eHEEH~hVG9An|@rR~g3Z}D!zLU4>KuhHV?|Q!qru9z1j9%kFvj2vtVofqr
zDg)SgC(1FMkt!NblkdK21PXTT25Ao!$euF*ZrD&pA@T&};Vhx~x)POM38I;6sAv4E
z44nSF;TJBZqfgd2$S=&o1AQ}LqskU+zA_wUtf<0yw`m?<aEQ50a>C3d$)NliB+3{|
z%)a(p{PL#rNKjl<$r=lIaUOO0qaic98pCeqa6j^fh~L<O*KP7NoIDNYqs-xrG6j1#
zuf!XV+Ieg9U#|6+zVk<cxoW3#!Gi{cMxZZ(B%G-jKID@3Zxl@>4oon!Q(=%_021e|
z%u}flnVh<aU6)KDewP-iPELW_XG78NCwad-ek%65)u5BgS+3l-gWHY2sZi>UgE`7n
zNRP{g8+Y<SznPq3M|=P`_5=OblwUNkhg?f1#MDt>nhc=+DT53C7F5(dsYI260nnb>
zA2L3bKyO2B6pv`G?3gtICQWw3K$Ek0|Cx+sjivh<*g)yOd%){3gGn_x^sX7lH`qF(
z>9zCRuqYN)d;Vqa-QM7Iw1QbilY?`~ENnMRLDlrjyy^81?jon&`;Q%P)IS;j7rF>l
z7i<B+3ona;A@m(f?E%*Bt9f$*<uA9qR<ziQATP0!_WKg1aI2+RoFlf)tfBXFA(y*-
zWIm=^kZr$@tGe{y3$9XrAn^diJPW{AJ2qmbdNle}X~7?ldbm(>6dgYVGRN;qNXws#
z(_ZMKVe%*LsI!a@nmrp|7(}B(HT7sW7sJu|OL$Us6OJD5g}*kQhqfO;KJzZ#NK`nN
zCyQb60=ipI>dEzIYJgK@8kj6jrM+=~_OAC_y#Fp1TwT(6*Ps3P%4-%rw+w>2B_UAk
zA4BJwLe&304u6bJ$M{J_)M?l#YIYg|jRxVO?CLy@hJ{co)&Z^S#AQo3S=lsbC(8~m
zMTc#PsDIClC0Dw@M>khg49o%>sV{$u9J^+#fV_B*;_EgQRIR7}zP5~e2j^pF|JU5<
z^i!t4dmUEX5HY3QA<^*3M^O2n8#BM82kxUAxotr*IJ_SR731l<eJ+>FL&u3Sh%bNV
zeJ$RQ)^pV|&U}7thqob)=u<-X&S9gmb%GW&^t%lCPDjCO!ypKLq(wcr6OdaT4@&X9
z${@RGn0mAlg&VVZ$KaI^mKq2b=9L46B4iJt-XbyMg61yb9gf2wYzckmqNkx;bH1YC
z@O38p@t*xNZa9iXQOxSCBV`@R!S{mz_0JlB&|)4FOV^1gL&sjb4MF493?_5jg_0sB
zY8kv%q4jnXChp9_q}pSsPJM0T6ItMvN;CYLm3SciEc(7lC$4}lYQBkuyL&6~<4+~V
zs?}r5fH;`(fLI)J@>%9qZB*5zZ_0ToYDUK(c#sERVY{Mr)dP0_mjtw(Uh<#YJ<z@&
z8gzWqp}koGefJ+lv11W>Y?ndUpQkMKLjrowNrL<LUoz7z3HW42KJ@EhhuJ5ZnPV>r
zj)7C;0?I&dyAtj=K)^D-KH+y>%t0TiCAJ5w1I5AJ)V)3k<~~YLonL~D5#JR~-hEMj
zJMka&o!O*7#2}i{Wlt$#rAOAlV23zVDygfLMov(7;!Vx25Q!zes5w3#*M0~GzYDaR
z*_j7gi#G7$tWZ$<-zbn$7N%-#B)pjzia&0J)A^<bV$yTKMIOMsH>_rc^R&_G*mc_X
zhlBgxQ0P7$2BtU+to`fAfwdmRwKps-E}4f9(<Gq(IS1Uc9`ZwD+)#kI<>~$s_`KsX
zKJ~Cd@7N>g{?r)mV+z+lsteJ*?eLN!5e>+reW;uKPOjnHH18iY-rxjwomO1yWG`^J
zdYd;5{>j=)hk~*xuu`0E%EXC6$jd9m8$0Jg#r8m`T}htZXZGyVcj`Xa%Xxk|-3eOy
zz>Oz|K|N&>(;xkg%^Dp=XMAnEZ?ze+`%MN(<RA;dnE4jsEw#-1eP13F+Q<y|Uxe~7
znn#3Rul(Fzjov3#gV1Fys7q$!sFo9;@y!vmZ|Z|k*dH8%Q=sTb7V2*tz<xSsqabZF
zEDt%1!aQ4%ma!g6O26_ja{r#V@(*q?Fh$kJQ~7TLhJ#l~(MFwSLd7G^HAd$U19&B7
zUfhp@y#p}5+7Pu~O=XhK$)XOcLD;%m6*^WMK*O#yDEgj{QG!+25-Ak9ehR`K{zA0W
zKSM6hIBv<)02Ozbt9>DONH}<p$^r5DgW&S`sK{-JHfvpU4zx|o>0Dz>-^(GqljiWB
zIz#Du9V0R?p-#0=8FZ5qb(OXZoW3svW9!T4<Te2kDNEuCfm}7{Al%=O#x8}($sef$
zvXUykZun|!Ppo2bO|&ca|H(3PHezAVCnD=(wjgzC=fw*HQTg$wBDunl&fnCv`lJT>
zY$p9TYrrTc0@PPufTo9(-Fl`D&VM$MCt(k+B42^?!2ndXoxxjH?ZZ<21pdBfI5;^9
zK(=%`uf{~Q-g1(wca=bf6)}2Jl^|H1X}<VX5k3z~gm?d~LYen4w6CcIXGkS~<|;+@
z*lkc)TL;$Pc0k9IWpLn98M)&FxLnNwGAx(EOT$F`<d+ZGG7G5smIO)1mSfMqQ7G(Z
zrSR6;3k}1mS61aA@?BF0g&OUOpyIKdUM75wdM(~{8;2+4!?CgZv&dY_i>Xpy-RN#l
z6q=o9k~Bx~k<P(2uO?#s2ww=KzoW1_mM8oy!I!la<Vb8{vXxhPo7H>va#kSfW^V$y
zpSvjdyCpV^oeK?FgJ6+eB6e6CGGXdo2IHc*;QBp_v&0ayTX~MU5(hwYM+P)rEN2eS
zqp-t8or#Vmk%LbLFQ%HJ`r%maHf;eby+j_df2cbWb_%;b*}xHz5IsL_!=_~l81gC{
z1wZ>Ll=tlwTQzerY;ggHQGT4bfuL5C#3cF`EL8s`H=)6Hu-_R0XOmZ;aEBT`Zmh<m
z&1EQ`v74Lk8NvID>rgCd=ljfOqH_e&Ts%%>*e4Y~QP*0T-(Zn#L9@AIQr!FP5*k<5
zD(0stFwjm51!p<f{TvK}3~r$;FM((Jr_goaOkxgA=VG1i%0cs@Q1Rm|ju@MXcU~+a
z&d+UL$U=BqWh8!e55V!_P~s>nFevQ_>x`B0M7|wM)ihDwKUtLh#u>tXAB9cChutJT
z33`t8;Ofh;A^a<Q-fKV3=^=%B&C8hGvy}O`<}s(b^I#mSgZtLgd4ICALa==Uc2Up0
zzAPC3jUSE43!d^%dn!So@}Grz0^KR)DWdiY9cWSfRD{QxqRXH-xEpm5)GG5(J~R`O
z{R42Ge+rJ>HyqP9(~&MYkTv4~=yEs!dUMGY@UMxYVSNOY_ROWZNCh+s=-++nN)(1P
z(+n(QmaN+x-8P1?)?X&z(#sOmoqvk{=d=!|Xir0pn^8<N?kvy)hO0h`Fw8p>9KBpY
zxUT}*%L>S~7yvFQvw7&yTGUBB2ZFM-mC^}8;H8qrg~Icq9mA`LUmYuI7*)dCUIMm9
z>;s4Gb|`RKU6DO{EcGrLSVf<e@P2R<Y6UxD)AmEmxT_3IEY5@em&N$~b}YIzn=p^o
zEyPcAA;*g^K3NtFx<kHj&yF^BimrDNC)cCyoo6iOcMbDw9n6I<PAcThoxCYt9p;$k
zz}>Nfh|xHdf0DW48k6B@mJkW=4g7Fsy*;)Y7cd9EQndcHmD??!!T8;1bRE>d-kk1(
zj+OIyVde$uiAd1&)h?Q`-sYl!D#-s&iESfJ!jMrln4j0k@4twI9WRqm<<cAOb&6&L
z9;dnP&F!GM;R1BfIn*>g0BlEAgR?fxf6iP2t0m5$Y#?`f=zOrvPJ=mR5tzFq7xWw7
zGL@;xtV`1o7SU`#Fmx{W++T;@o=d>Zb_g4NS`#l6)nd@t<*bFeLgKBx72}iq@Zq`)
zR5vSURgK%x&+!(&urv|6kJK^OeM7+0IElSqlni4E^3eO(4lvzd4URu|flGKl(c4eN
zU;cPUkt+;_;<#AobHEn@*5^X&4R!DyK;Q4ta`2Cwi_*$nV7>GsJ0;kJ?+q(qZE6Y3
zDK7^5Oa9=wt6I^OQOjMt6r%gDmV%&tg}H=$=Y=ifSbs+)R-Eb&9YZgoaeOOtxV;Z+
z_U%DC`AOdJzz@}`i19Ga5|)-z7d7o0Wj?c@!M&ZiADRH}3v)OL4?*o84UqpA&g_0}
z00o^<y}#03>+3frHjc-h1LL8s_W`bcDo$ZsGKv4MEFSyMw?xUJAr^&)Wn8(nwX#j?
z7Prp~1f>z>-L72bF5B<$?uKNr7#xf02fet~ogeJR>Y*^Fsv7n3@*vg73WuxBLx-eN
z__HntGfl`Tb!h{%3k%qfMF?9p&VtvUN31j=gB`v=`QU~g=;gf>lKWCGW$9DJ=O=r}
zVX*?so*ls_awVh;q5aX67;HB+hiSHlFzl*63Z6YOPtqAdovLL__I4Qi^{pBLE}un5
zVv$|lb^<kq%D8ysUlz1{IhcG*A&>1?&^<XBWQR5}>t|n?;N@&{b@e;!wDUY%dNmW>
zD}q1}+8@V9OYq?h>OjqV$~^_|6}wI@M>D%%@R|<T>XXUS$cfxN$`HqOT|ya#^U*O3
zyZz!Z$Vkit5AC?IRULP;`oS`auJ9XIr(sC>84UYE-lf_AE?E6mbbr7Wh<jZHPHX+3
zPhBsREDNqwEllJ{9ZxZ_Kf4+ZGt!V_@>BrV$PyH^+Eq+-P~c;Y1K85?M<hL*#0DGD
z*=Tqf{(nZaYm_m$%zLAtKEGn-JrS-gxCHo45A5dtPc%pC2&%{a1E2O)qtx?X-Xa(b
z;!CzHx#%X_{ADgNuJm9{uqAf&ImlIS9Avv9a?na|56E4UiHE$HXIyV)ZZBfs_)&6X
zMOLyO4^KjjSjLPx&%%Q{b=bAuf|<vwatAPj{SCqB>F&$!&KibVA)U-o^FQX@dJ$SD
zQYJN&c7X;Lc~4?lJ$XX>@AwiloaGN6<vRG>>m-VQ4P>>~_e0_PW6&}B7|6AZA++i&
z`Y=Bz)U6gdDyM<Zk$&LR&j4I3D$(=#b(URq32Z_qfx6AVTyWrWh2Ca=4BS$T|GJ#Q
zR`MhWS`Jm@`!5IHL`;I}8kTX_9ju4bSy@5(Hq}9>VQY<6`89O@It@b4uiWTOIheN}
z0%dy|7#{h=<*i{N>7>CZ8TnlC*Q*MB=q?fVej_Fuba3&}!-|eM)##=X1IBxzxjVgU
zyz}~VwW*2FD2-DDRSjTWRe@+lvn9dK7i{92N%*2P2<P6g#=V;((6vtlH13YX{+h91
zcX}hnwVwxvbPM>NK^(?JOK4qN4^90N;ZqRtJ`%$%Zq}G$%uFeJadZU+o!SbAQ;T5c
zrwEjMcViuU>X}vF)7<<|HCSJ&$HJe_n0)gVa9X|_Tl&6d@*Ba5tox(U(WZ)Zm3l#A
z!F`dP(_pqXHWV6(J2}HvhTl)upmoMdFnO<uP2}@POo+z=vjebW<5?!Hxgwf1E*gb0
zh3M7UHK;r5IJjkO1v`faJUR3&7r1<-?D|E8OP3Eed~^qy8F?#5Fpvl|&}C09X!Ro|
z`@Y}&;==-TS!Te_j9N*1zTpbN?+50}$GVCj;e4>3bq-qAoaV_}kHh6V0eE9dDHptH
z7dhGsL9b^Nh_6gXfq@BheT;<96RWY%#0lO_u|Ut-<!s1-3S4>98^@i|$F8;*XzXc>
z-h~=qmb(W{4=x8KWwyE(U80}E_e%FhKi;^`j#*i$;&XWvsJC$TY6*2}w*_!j*>7e{
z*?7+>UNrYM0PB8E&~sZ3YMe{PCvJ(@=&TFt?CJiTVvG*A^Dv_>l_~e8T3onaiIS-&
z;6BZ_>Z0@T!n}GYtl7s(tBd%6{TgVajNmP0&lTSG6VO^Cfv?R7#QU}_d}3)Kc85EI
z_riLR&9Y*fH0kdtsAihVY^=aHTpRP?LP0;g*BF8gBXYUmro2*9XEf^HoDPTc%hAD2
zfH8ZWpw`<G#0$!aiFE;zBM)QkbIQrFcGeK6#st43Eo2ontbNHG5Z6s&Ezg&L$L%RN
zP_`ChoE*VqCCz(3Oa^zSnRJFK=Q?ZhVA4;z`xVPjT(p#Fc+k$;<p64a3<BGk(QxZ!
z21x%Vf#Z^W@KS3crp_9P$Hs+2m~#m#i+wGW!+Wxzf7Wnuk|ysS(;G%LDzTKFBL^?y
zmS-;(J*4cu*PX!-rf!NAgI}`?)2iUc**;+1t;2&#!s%V8!7{3=`3;c{y2xIO^tXQJ
z?mxUh*~bUGZ~tJm!zg>Rco|zovs*2dt^CJA$}hgA9?QqItk>AxXuR<>=wClh_rh>i
z=upO0#sspivT^V$T?Jd{&nb<J;@KW&hy(e7we9Kw+Gk@x-*^VR56{BGIy7gwcar)P
zDNx*#-Yv5V=w11=a*6FyY&v5IZj&aW>78KS*&L6N4nY{?I0*!UeW)jG1-f^M8{WSM
zjMGcNgC@tY{qi$rNqfGzGYe6$8Ni5G7L!hR;62efY||{~8%xqDc9;eaCn@2-pJnL2
zA(or`G{A3em(b942!316iOaeV3p53|=V$~*cOS+D^0WAQ0l7q~6If%?Zq_o&fFD?y
z4SI7Dpt45{ZtA6l=0C^Kv*;;OKH0{#8<Uu^_cu{?S{4f5sVl@&s`*-z8uV*QMt!J(
zppt!XY3W&XKCcAFfGphm^C+6MdSmT;Tab6eC|q7I#rhAKDBgtdR#JyQ_5^^_!E4OA
z?_>x(bRJ#z`hm}Q$|qm=!n5Agqv0*;;@-N(Z#<m_s>c|1lsph={z!!L9h7PBA|~8Z
z73PAitTophVrFQ9;%N`O@sl`6$6oQEQu68*xqzm_GV+GhkpD=i7{4zAPNfAPF7xLK
zvt01%Qwa@@2cTi*TUOz`5ggNgFo$AtYj=d<#Ajzw8n%E<YRN;d1t+j$%z3_JNFJp0
zE5+jY4Ak1~!o18Tz?<;}V7K55^v@yoUBZ7%vicCn>*d^faTXWvU~pl70vN+HmNRt<
zW~Zp|uFfgEk0s@XeU2z5@^#4Sx1wiCJ_wWxS#4k`WmWGp(~VZpxH5vf%v;WHdz9k3
zNkh<k>`v$$qysjW(<l@7jd|M4fvv?gV16&3ZzfNX&DaDU{I>|RZXLy=*WFNRJ{(k4
zRiV;IgIrydubisDeA9ZS^&hdycM^xrwVrx@-J<8k@z{q+;d8-8Z2uYu?q(ZN9Oe%`
zX(cSoZ~z)N_2E`U&Fn!-8BR^g#P-_*(S*4B9RtW$rEZAgVN*ml^b84#kHOyQ9=J-U
z6mG0M4bq`f&_jAQLsGd@QZ4VwIS((t<^p7`2RrTc;J9-xxCynH_ak%sNju6ale5tB
zU>9p!G!B(|7b<g~OJGaD3H1I%p5pil@O*rnE0f0a>^3)W*zuZw`XDEN@+O+$yt8QZ
zJum8iiMV_vCUAge>*g(6p>(g5wP*N)pyUFG@7++;?$QINM=x2;J247UJ1g{u%!Z-W
z7eOL%2HpRz^HVA0W$hS1KMQ@{aqYUIWx{-sU`HV|-44KJ?}?~#dMi1x2cg^k|9Fe4
z9Z2sSq^{>>CLR~a)csx%RJfVnr~8EY7d`N<8w8OyrZh|TfL1T+R~)hDdXG+kv|lBw
zSY8Cys@IuX@IbH{HVrzbc*Dxc1}Lm`hoJc-eD*{sCikfaLG4N=`BS8D*}Q;v{C9_`
z8f}L#CsUYpEfi%__Cs4@Z|d3SLw-Xc|C1X^{Qd7-I5R;M_U<aPrvIl)cn$cJ{pG50
z<M^Z=Be7+}64B=0SvXte5?+2V6OET2fmIzBz|pCI>E5}-yLvw5_=KDlHkG`mNQV9v
zfoSvdJ{J@$5IGt(alH^ZDBk5k=iyOMV?vo0+mpQDa|FEP$#C+~SyX+B;Js}SND^I{
z+vhG;HpL7pXr8OL$QPEg3UsE~tMKK2Oz#%4p>CbQP?;@e#MDubtr9#tC?A?}9$EwS
zp|DRlXs@efZD|2uT2My4y--o%+FLA!It6<lRibI+Y#el#a@u!x<Di_LDDotZ)E>&e
z#a-eZIfq$mubt5KSc8XoSAcb|4B}1QVm}1rGMK6d`V;rFiu0i$+%*Whu2I&DoR_jK
zbcb9~$O=W&x$hN^(~Fa_VJi8nIt}1`U+R30Spu7gp(_~MVXitL7@R&e^Foi^T<ee)
zv^GdUt1tt9ulGgc<6lJboioW1_#bQ8G>?UOtH7h22$U!6h2}5kVC}JZR53jbRh?nD
zBiR{^4HolZPxWxTZ8{t+%E6Y<eCU0dW)uFmSVe<1WEj|@D*0%hw3LIl=p)U>hl9<%
zyS!`Ce6V}57&>hRf>heXD})6wvyHxelh<?4U1iu}ErNG{E6}h=1TB-JMeYs0Jb7;l
zuCn(>yGh%*Abu5VN&P6=pB_h@Du2aZn>-jw8PpL=HSu|`5-=XugL|4~FwZDMZlp`^
z6;prEe;NXY8Qajmj|}ULiYdbwg-ZDe#p0L5MqYRs-$a~;<arOc&%>)6PSTvB%bfCs
zTfw`Z6ujD^D5F&+5{`btjqTow#P9mU;{$VX&c(ji)_Ru}cLt)WWEZP_X%EkB&Y-~Y
z7~H;1E}iHPJa4l%>XzSNA%lY;`Ndjr_v;PP3{{vjE(B^)=$^Z&0!;sx2TSx6*p)hp
z3rsT<%a0qN>&NA29yt}nlN&0dJZsVL&u1PH5P*K0Lx}}a3mt)L!SmiZP(j+W{NsU#
zC?DW-BoYlPq@c`|Til2eV_HiNN^-Wq0_l0`Ge&^Z%ro5QiW~LkH-OTuR+RJj7}`CO
zbGO7{+*o@sNLUyP`YR+x`fYxjR%!W?Dp)u|1M8}^seg9_4z#aEyU$8R&_Na6;?$*Z
zn`}t$J0sA{)JEA_A3A$PbDup2!FWqgCO;@r{I6V$83P_O{k}!ed7uVr`pQw#FrB(N
z#H79y1@E_9!pu=q@yWPy@Ytus-$S%e64qO^Ip`qTy-bI#k#aCvbq*xY;w@q#zj4pU
z$snCQj&_j|aR0>#ID2{lP9HQ9FBedk{WG!Z{^zWi`CJcsdYs1mBX@YygEV&g-Z1Pq
z>V@7r%h;7wZn)vlMeMGlJLREskT^CII-9AlG-$u5?Nlrun=u)Khnzw$VGLJ^Jq*$%
zy8(W?!kvnJD46q+TvT7#;iq-PlDMz9KVc@A=1{+8L<u;XF2tc$ap3Y<1-jc7!Tph0
zklUezSL1U~IQbw*J;Hdw3K{5!YvO3iXKdbHiO%-nAo+bkQFx?*sX1jpMnN@8Bt}id
z{BrD;`|^md5|l%VqT;q1cBlvQ2@TZui+I6x$2D@nKhrCx8dl?4;~Z$RPy^39bv%Pk
zw?S^B=o~+Sn<pLu+3|I(@wq-Iixu4aZ5_=f?^)Cb<iLS3awz^zt`ptWknzMB=F_aC
z)#EIN)jLCgoLp~bX0XPK0_aFi6z$nb`T65BK;SeDtV8y)V^99SM<(U6|L5VE6_V?6
z9d~UT2fFfHKKwFqnsyQ^DySZ7ugwBcyA-4c--w)sIsy=n%-KVVjg}FZ?d^=J+3kFr
zWe$c-UkRfoI-%-+f$Zkfqv&26hT`aE=H2cNh4ocvI5B~RjI2hb+9{qmqZ(T2?74q!
zI<DOm36k0*P#z7eOxO2^>{@lO`u7%}lZq(os%95oq@&y<NU>Ns9zUM-!zQg#E_KcZ
zE%?q(O(ACTDPr;1Y10gI9M>ieY^V8Ic6b=g4HITpx}TvPWy3P2IzR^&Zpy?4hcJkn
z7Xo|VAvR<=fkBCoo~Klf$#1!z`aiH~UkNen7K-G*JGq*cH{4h_2TXe8U~SYo_;=rV
zJdt(`&4ae^#l9NY<g}CB^H_{JLzM6^Q4h5p=0f=1d1!Si5d?EYENZI^gX#>}VYfOA
zQY>KJeL3s3Z#8C$v@!XNCbR{Ff%_>l?3%U{0~W+V?wl}eE^p(K`3oz}!5;MQsH16I
zU+5fK1@7}kfTgG!ZAwy@_40U((N#iE2bvFg6jY|2%0bUJnJ9d9nH8QFFdsU*dS3a$
z-u=kJyrZYE=05>;dP<>1V#bzSF-4ts(X_Mn0dcl1%X<-k<}PR1fdk{wf4>2?2@JS-
z@J?v2k7KdZD4&{Q2XKd&|6z4p&AAlq@2x}h8v=!!lBh3`S*iKH0KhjKwf5zMG^7Ru
zIb)b#Q!03xorf6f6zUw@<LNSA-0M0WcOG`e9*4J~>{JHRUE~JxECq}j8AdG8zlyd@
z8Pg6U4up{or_V6Uet8nRC#EpjyZs>F)KBCWxe)TtmO%2dOm42{2Fpg3;Ld>scvE18
zRt{%z!j~xGW{rj$UdiBf^)gzY_rm(mrl<!Ba5Aihrjcsg-H7rQVto)#_`<}Kd_>Jj
zom_HmCo9dLj8Yv3@KLkJhdPt-*Xb}625K_{SA?4$NqDV)2y`C}#kt=8=yLA0qR`u&
z$1I3u)~ZvWb=-15`~8q^PG_&19^@yYnS0JjtZ+&P;ll(bcclz<>NeKa{~}l0Qw_OK
zb+F&UBiQVt3bJd*&^_}CtC?2_MNW|@`&P%VW~QJ~ofB&PEoU(*cbW1hiafO9-~r4+
z(_QlcX1`>f^&HEC2Vq@{9eVDL1znX~W>=hszsoP;sHtZ__svE)Y+MH}Ybsbp?Ru6k
zJP5sq&cJM+>Ho*knFiF<y>0xY5JCtcBs~csggWa+5<*BqNRlLkObMw|(x6e}DNULu
zNppMM>S)qK2qA<JLI@#==e^(m2Os)k*!EuQ9<J+map|&eeBGXTn71JX19AsL+Xx3H
zx?src7GGwU+$i%%HV7@XBADL(Ae=R?FD@G9k8}U7K+m$?czPXq<*rPI&cP?4&^aG$
zRCfxp2{#O+`(@ay=!DTtl$)XJFE)$$B=&z$fl`gtAj%1+94sAhdFqSn+9*$9{|Gcb
zHyeUTcPmLNGq5uW0B?H<#=otFoN9MekMZKSlqRBT)HW{Bdr+o|X`nQr1p6CDf#$hL
zrhAcr*??z4`1VZjr#}~Z`Ql2IOmrFCk2`H10nuLvqR+<}INY0ZIQNquX-i))oUxwE
z_xHv23Kb9!&W7o4uYy(BeI}df4$AwL*pl59sQvF9Zt>qDXxX=xD^6-;1L?f-*cFRb
z;}$^Yl|0n*iNt`b*I9iiXJfl#QF>(~yXUtQbw17l_5bEkJ@lO&zavIkyn$rEXWqEx
zI4G&4z#A*d_=z?GtF9Zo?w2#@T#bYQ4@)q#+#7YP{8_Ki4780eMdzwYd^mA2>Xvq~
zy!Vx8Q7Pr&=k0O8Wif`EMxey}jM(K)Kgt|l&%C~qHalAZt%Nc-*MBQz#07Azs4#3Q
z`NSJ9s6fcNOv>_j&aIdCN2jY%&^%-n7@E8^kTqOo)ng;@S#B-JNW+#;bP4<XcLChm
zN#pgfklp&Eg+dZ>8aIArCauqzXyAu3-Qw{qWat&KYUVi}lD!}6J0~GbkfHwUB1pWI
z4!R{S=;9m9RC}j0{Vg}REioRIt2gkF1QqW1LLXWxGlaLKHE^6h0w$XaC~i89>7*@C
zu}cH1fywY@7tIN$-e5tKrJ&l7!a7|4fd;uA=27O1bA2wr0@oDi8kGPISwkSCe+-o0
z&qeb|l_+w4Q<`RZ7U~Xf=Gscd5PCBcPu*I8c{^6J4x{fv4(<ir2R8-RmLOi&5e9mx
zUMyVnk(<8j1J)nrV#+1b2F3NmtQBWKV?s72e4T(VG)&Nbh#abICEv=YP;fLZ<);l#
zqsT6>^w_Ulls+5^&+MvUuX-pZY82vVVv>CAB_(a&Sk{rdlG!A$6@Kgvr#zJ)SnU*m
z9nTKK;@#8mhq@e=t_>u=N;ZnVy%nCGB|c<IBD&uqucB{%@cTr4*)V<L&#8cBX(;bF
z)W%$Q?E~Mv3vr;&cx>@&=NIOW#2kltP-HX~+tU-+lRjy9c6T7i=64y)eiDG;CKBeb
zUydD6Aie4xF*~`H_^u-+GUff5tZrX5=7iFYW>+v5&D7?K3yBw*I-R^@RzhXTdAPN;
z6kPg?Sxfd+2)J$v+EWYQhCXEym0v<f^5uGEJYcuz8L#-6i~7CDqxa%JX7=w_rqbID
zu1AMJtI<_%yS5m-PMzg*^&@cCV|rhQkXGW4GNg<Wc+(#ZG<oxit+EQlmb1Nho}NDl
zqoS#nUBd+PHK;o;6-1X^%VeoCp@088=($}EvdjA1TIvtl+B9pnP~=MXzTDL_4$Jzj
zp{A}D+fUe2zU^=BOEZs4BP&sxc!b0E`CxS8Dm08(4qx0W@o<C%=BP}A<2C!y^$=x-
zxsGIs&wROwjC>EFo``*a@McKEwhx88dvi22ef!FaLldCHG6=I*%teKBF|4C<5gsY<
z#dqBsu{GNiyr!qX@kR4cJ!u4}zh;8+o^qJ1WQ;QdQ}J<YDP$baWSw60?i{UWf!D<-
zD=iaC(+r_kjRbF*o`$B4zRZ`+U?T6n?9O^bx5<MsM#&c<A4Q_t7H_n>u^Lr1HCVm&
z0T2Z>3!;IC#a7`L@uc?@x{SQIdhTA~V(+c^fHapXkFUb6bt5oCe;HpZzX?SZz6Nul
z5YEv2&81b3r({+`bEK61KQ3cl;}~xK%L{B`_JHr`2h7QOIb=k|@L0tt7}n;2tz{wL
zb^R3VS#$^=_)4*J_+pR>3;5jXG<dT#1mcs(C!hUYtb4IZ{APt09GzK>yZU*c#w22(
zX`JIS%OHNYv=9|e6*9@R(dZzd9VhK6ACH-f8RtsCbu#5&o9Ey$_fWJ~T!Rk!+rag7
z6^o6XOFfYyc2{M?ty8J+pf8=D`GbXyJps5jE&_Ew*7GU97o$!mWzY`QfsDI(XjD@U
z15Tx)S9})4TPK02b&gP~{e)XRoJ~xt3ovQQS{(7b6rOdYLYw#`RO=A)DI|#L5eLK2
zdkI@DAZVV|1cy-;eE-5^G!9ZGAAd7zZ6qyyts)btD>2E-?P9|%zgW*bS9q;cj=o!W
zK*zWzLU|EoFf5-7aR=jZ=}uxfjEP3I-W;5s=i-d(bE$7X1@FqJj*2(KoCir<KVTB`
z`%y&m;LFVGRtC0T+yxS=5#l3$x7fx@7tlbm2pxQu^PrS+XsJ~e9(Phd;#G~Ell(C1
z&I<I^9|JCaW1*sa7zWszz&D2zC<@Lmm5ila0_hInU|bAp&B+9(^k)7=YXi1rXaSy?
zhQDL0QRzkrf4p=Dal$7to0ZLi@hHmJeA~)=Jv`7Yu@FUqb*ZMx1)lTn5Zn=J@fmEw
zM*G<yUZ;p+!>MRvHig9!XF-|t_KnhXo;9%-hOc_Z<70?_bBz)`6jRYEb{^<;<ulc&
z^VEM8qJQQx+_u3BPifJ<xOgDQ9L-R{cL7+I<fG*LbpuJBJ<IDqk=cyg&U8jPV$hPS
z*eD(aEnk<4b+?p*`mAg&{uT<l>)-N*=x%Y&RzI#f&V$Qj^5h-osJ_??61RqlZ~fSW
z#VM4TrWV6Pl82#W!SJ#p9cOrhSRW+cnuS8e5)i2t&0Br*B-*JQf)3X#Fx2=c9<hKl
zS#|rEONcAa`*ewY-Z+@D<HkX2wuEcQCu5I)87$67!s|9;(DmpAFmzF+Txxgd-ET4)
zek&Ij*AGDBx+d=ObT%;_=0WWo56E#%<<dD{S({oJIGm_tzba+OtLC8or&69vbD%Vh
zD(DIe<f`426SJ=uO#XX>*!7)E`)@di#Jy4FXEH7vlaDPHe_3m16Ytzg`l0X)HX|w)
z#n)0%+y64Nd-RCu=jkx(9RlEUcQ~>y9m}=o`(HB(ZB~y!GsTV2KY@1KciUJ-pMk7i
z>k5~xLW2k6u3*bd+KazmhbB8B(f9Ie-m<@2C{Z4b56r_zLza&RW~QQD%1Dg&%!S%&
z;(okL6>J`z6-z%%WipYXfVR0HyV)WBbki5?X^xt+FM-<-58bzcyvV<MgY3pQ)^znM
zxV)PUqkdRp>G`d2MAe7S*`AAicBez9!g}a(-^O;`HbzBv9j4o76*1!#QD5Z@Zw!xS
z?0FP6Y`n<d8&Gzp`V(R3sS-$YI*&GQCUA(H4!*mGbFX*jIXp=Q>%AiCAKoyR)1$GP
z8KSgjGHI!Iu}weo@!yEOxVgj&P4W&i!yn45YyVarYFz`NO{pL~kc(}dejMB;gOOuA
z-nbr(CLtV@_HTm5x2s|L;H_xhUk+RHoS^HBCUZTj2j<OJp>)bD<}x~hwF<PO-9cJW
zyH1|BEs%SS6v4L9r?BOAHTF5hv3^z+l+Q9ptL8%3H=D96es?o<Ia{&U6;G!0Km%Gn
zsxskZBFeTdVdDyG(Z=~QWGE`|TTcV=+Jwa@1bSfgd-4a3o(Ek!&oh1RE*43>bzf?N
zhn^mWy2oU~nfrm*@Jt@|xQ)fy>;9nfWC{)?U-!DzRoHss0jo>>$c{cAi7yXtN1Gjk
zP|@0t-x^hjv$8Wl(sh{S-weaVnn_~)n-f6&*j65Y-3vN4$DljuP_#42+vmFlU0=%Z
z*ZV+h!xLy}x0>s8#h~)gX>h@P3o4&EE4Fb=hc<eLrsx$=U+u*|&7?VG=|+9+?wJ@g
zvm8X1WMzHK24c-rs%_?{bMGF1aB{lAQdIA<LahjRIV=<=+>OJiI_h^;s&R{J4_V@W
z2E^o9CGJHUSG7O~4848@jN0PS<u3Wi-+$+J6^&f!MG_dj_Cc3jcbGN#C$W1!w)c)l
z#X&P!-Ij3nU?A<1TMlxY<~2+d^s`K<r<<z<1@iboqyaYT2l^u;xDw4z3%ldNXrCv1
zeUJvq8*Eue7>dni{1P-9FLTXf)US^2F=$(A28Wc%bEQG^*yROG)5HYNUOorkjwfN^
zkyxfUK84?jPD8I&ng@mI;5hO++8=bsx&5b-|Ir@?b|ALw{wtnN+Rlb<L)KuGfHwDc
z!<v`7(9v-zn0<Q90@lRx+PGlQt%=~{0;1vj`Q_MBWCzucD~LZ?ixvu-;O3ZF_+*nO
z20RUgD7}lAIFfik^NYaHA()N*8jB}%$rAwHpkY%3gLapJO;;J^7|lS9c^+_qdWzP}
zFlLx}hnr=kv$M9*7|Ro2^J&VTihs<q_Ke4bJ}0pM@pPyh5d=x8A>g9r3~Bp=N!QTB
z6jc{-3*RlIQP?ETyE}!Az9>b}$(d!^f$vyo$|9_s+81-K&<<x-F8Q8AP&GIjx?Qfq
z-Cjl5kfYCBE+&JUw*}suABRqBA2Hq8DO|S494svR(~NotJark4lQ&aViDjgC3Qxyd
zzw^NO%}aJG-34pu`!SZw=0*((n9+CuBue&z6*Hwi><}#Y83Vtn%n%ApVF#Ui6T&Xz
z)$D3ixI;O%$IgP+zGKAK%3wFYZO5*V?|e_`0u)awMX9v``;ip^wUL*gIi(Vtemvtn
zXMLb7Q;hS=Xb-tei<s~s#g4Q7<u7lIL(_Y=SmVvnlzB&cB)N72yB$XW|NYE;cbG8E
zb;Nu9^M&_HRmXZb&l5kz2$I?71mE^BtS63)UOVw`f={#J6N@m>lQP+Q3P`KB57ic|
zMU~C_P<x?-O`W$MTdYci@XHrr>#&_@<*q@yJ{|m!yNEJ!&hqA?6`=ooA5^{{iMr=g
z1?Pc<C>|3>teKU<e=|2yw&GBsqqUf+JRJzWG1hoGY#3%(C4%WY4N!Rz%253|b9#CL
z6f0(O?MLs}2<J2KeAhvgZdwVoz9m@v<tVDY(_}+G6+`Py1yIa>&3reGK(T!q9Jxi=
zLhb(eOvJ%?rU&{OjK;<fu~Y*LgArM__$b62&5pd{>N<yE$$KLlzKZzF?{0DZcMDji
zg994&oEINTalp4rr{dv^cyu^+k~Obe11GW~a8_^#%v!t(k1kF_^J;a}&h|r`K^Oao
zAG{-RD8F=&vLH?KL3Q?A-mW?hXQmIqF8}x3_x?n_;u!Ua4`q~#7>Awei*fVFT*$U`
z!6`O5SbN_Umo7odm@*XiOtyg1Z@p1fUYVOvUa-s1`|QAT4Vv49Vf76)NXZG|y5E}E
z>r+|8Z7IZ4Q8gH#SIGjm_@a0QX}$jG1#09!t(&(Sv_zrUX|e`p?jx;O>QDniwSk~d
z@0Rk^cWe>$1!{Y`cuQI|;Efa(FwqKqb;q)M#JJO)(8?9Z??Tbkw?c!%52o>`0K8)6
zz-dbhv<k9^zOyy(lgDJ-`SKK=9UO`}by7CfyBab44e0^bgM~s24iSw<!-GZqZ_qIe
z81KUqAEhyum?ZA`>I^82_{I%QRm8b!bO!8oM$M?vF#0@#YQRKhX5m9j&_H&uml;YX
zC1ca^0v6vdn=)4Z6W`jbjo;eiQTga*$lHAhEk@7`Wt1tn%^piNM7;R!bjs6A)WGnc
zCOD^I3?4Vp#6B<2V$R%gAihf;x0}&mKAQHaE1vV#kbl5p!6ml3APx-85lk1Q(*D8$
z@|+{ZB8%eEpvppQeO?9~eVz*X<9cJ^bvbOD{*Ak&AK;?cQ|xn^4u<?{7Vg6wtn1p$
z2fa!}#d)Uia!?XTj1Ph2<`b^y{FiC?siVt_S8Q~^HgxEwJNkeQWiu3^$;V=-^F9K#
z<IbRHaWHF;4`%!G3qc{2<~qOE!SG3|k<YF{32>qMQYy>Oq#dSSK6L4qu@HPDD9j@*
zO2jZQ+_Rij%Ef}(sME}D<5X~KrnAm!RoPaJE4b$3By4z3e8MQ&Lyq}TR;pkPgHxSR
z&o><uZpSnKM~mql$mPYmBT%ZP1k%6T$rIkqOHHf5g0zc@T6<7$P%+p1o`B2$b3<kP
zEB5Ms$3M~hG~+p;krdKVnr6+NM!po}1oCK}rP{aUmr&oP2zLY0pzG~sP~0({cg}C)
zZO-LjyEPck+WCU|*`Z=l)WWjDU5UJYW(0HEzm+8{HNgIENl<EH3O4VXgg+O;&_(Mn
zlhv-|BY)<gq1;$jt$u-{^$ggwjovNqEaDo)fsI&E+-RSSO2gIgB@cl*IgU+zK61z$
zNG!a^V$~pBcF5=)v@cCzD?CcjeJ;%bhHXUEi56)4r2vR<imHDDnBr=VUZ!QNex4t*
z$>Yp$TW>-1qPG~w&&57YPNbU)hvNJMP^CO{l@FBRpsou$o))9a^#japYzf%EiX+|}
z&08w7g*SgQAWb+A(lMFvEIAnZt{_kRtbQ!zsviS+szEpZ$01-oyfQDr=c4)8O;7f^
z-y(dd<chQ6$TR-?AT#W53vw%cv3g4dpSY(OQ}*BC(Bp%172+t*FBR-2Tp+ID0Jywh
z1FHR0gTzfADf>8zJ$ybL-^v}s-*b(y>47aX6@@ZO+kbg+vM1yY^@9yDy>au6O!7Vr
z6?A{MiCrdBO|<G5)>Y=CQG^U)?~Ddf&tk#QXC0vDbm*uTScb41g1U;qWN#<GC$|;n
z4)=pDA5*ATEyt`_OB|7z1Bd44gC=prhN=ZpKS#ZG^)_OcPD1T(a?G~604%N*Gac<5
z(yo6JOy&{)x6BBf>#v~KIO4^9m;{Y9U$nD21C}X=q37Bx7#N$2_5H6f!-g8L8|upf
zmVe|HYa(&Mm$j&Tb}6&a9t1YF0J54J!qaW(Xxcadx_;$BNL(jVwfm2EEUDo=Rj~{P
zoP^lY3n0ph1$s#-qr)65GmgXLfqK~FqRYD0TxOEJwc^sU9N0{n(Zfdzu<OE5$k_Ux
zb;vx#Qn%Nv)ba{y2isxu8wsf7+kxte3P}8SDT+eeP-n|9SZHz>V_qcV6saC=7LH&S
zk45*i>BI?8#g<MfH$9fadven;<eML~-Km7^C1z;7I365!kQO|T_=U6Qfp%FK{%2)`
zien2wc_DEZ-u`9|^+UjgbQS@tc0puOUz|&vm6JtlXpZj*pQ?z3uy7dIL^be?=^5Z*
ze+9EfhN4Yz9oHW;7`Dr5v7t+spUntCTS++a@dKcA><hka*A*;vn9m&?CPQK?7fVWY
zh_&>9$x5Qc<~g+Av~)4}a9fNvU(bmpr_>A@mX|TJlOOq$wpi35ww$ufPcBM`DV4qH
zHb|%MPNMax%uC&yxXR<X-p*baBJmW5KaFOVn>3k7@i0pidojo0To$Zvh#%-Vwq20}
z{eEV=ct8y(dQ1U{&DFAJ53|8VHUM;pRqr)G6Jl#rq5khm^skzRv3_1)n3@PFi)>*)
zlMaqpQbfM`i>U9Wz}j!d!QCeUCT~l@vblbE^Gq&|Hr7JBxrZTUiG*@O*WlCfsu=uA
zhPv`=$j*}u=}D6?ZgxHnck@9@Clws9Kn0c8?PF#WbC~?~bX@#x8K&Ner<uMd-1m{8
ziuN5IQr;%iIS*vRretACy+2OhJOq1!r-JIYRZwblpS^6of|k|-PaH;ho~D#-`g$2c
zvIT_ip<P7s0jLcPfVPfcj*IqT+qVc-`^gJluUU=~&xZ!z=jWmRLJt;FdVovixACsU
zK1|<#1r$>DoM)d1;!<qpi6<tCz3K;m?o~lh_}mW<48MSq&I-)BLY#f=as%li5woFP
zO28D_ZKd56N<Uu#k+C~BEYbznKBvL*f)mrbT*XC7gY|p4O~*P14alM$lysjnvGt4D
zumAke_x=i|-G2z}Xk<+P>`tCELW0sep0GbIme`n6xI|dZl;pyA{GUx|LiM)EgIehL
zGn?t?`Cx}#5a7065VUU|DsQ^TLmi{Qx}Xa0&@MpR#SWB}X!kp#O&rE2picBfIQh^E
z4^)<*d7BOO9K>Qi_t!x2VlZXB74V78oAC(UE6Q);1u=O5p8Xq+6%~FMkViVv74dxi
zwM0~q-r+a5*dp6G4Nc6eKt!6D6kB%?1-vYY-aZdQJ5pez`302MPDO`)%b`|f7bYsY
zvgD}q*kd*y+7)_JR`)pUbGLwUbWic0E&I?jz#WgAu!e<;%CM1i8peMX!ox9Bas2UR
zC|X-$(6ss?S6|vD+<NGbTU~3g@e%z#aSNekW;FNe3Lw_$Nhlp*hfSe-z;H(ci+m7(
zx;b9V{=r_<(Vz^On!Zrn>5Y1B=ST~~!6r9=cb!<tdk!Q(Pa645#$OVzp}t2?;*Cqz
zT|`@AAeQb<;16#u!s$7rHMtbRWuA+eqB-THYNtY7kUsAi>xX}Tjl-tW1CVb~1WivB
z_`;i2cqlRk9O87@!n+Bm`|oDZ8K((G-~CV)?ZR!`&$1Ey&tX76LzvL$g{HfELHxTo
z3|CO1eOxZ69XQK$vmOWzfsvpsd&@Fn&w%qdeXKTLgxYs}d7;a19@b2`X!%Cs%Pq(8
zM`AAME|26vkA0yqMHj2XJy_><Uzka3KLsOC=CmRX6wbzS<z(QYPM)aoA)PWFUoy<u
z19`tTL86TkcnvjxmK*m3$se;a-DvXDsWwu_lmeP+2O*=&2U?xN(b&3xYdD^UBj+k`
zc3*XzkyMD1LwSPbC_PYJX-_q87<1WV#WMD0gWX4c2sq&k`q#a==30LUzkCLU{_zCc
z*~GNVdBQEK$3oAAyG$Y-UZ#4qH?+vMf>x~np`j8`hqYpb$rHe4H|?ia^`n_x3R=b3
zL6hDtkbYiBo)TM_b+jA|`+hf2e^*19v`HXP9j^M>8Va8}Lg5fw=&6qf>44#U)mbDa
z*&c5El32cTXttNGAhuOU?8siq3ziw7!&M`uEFA@_2Nz*Bc@sn5c|yab;b5qAjdVpL
z#nOzkbpDg?&>#^!zZJm>ADTzGHS*H;ImDpV0?8y#v0d~wFj4=(`d$jb);|Y8Z!*ov
zNs~3ArjRnL^uY1SWjtG-1+k5fxbDno%*qf*qgsvM9ZsRxvmY*X@CS=~q2TT1iIYa1
z$Cic)Si4FSm#Rj=$<PAK$m+{CUyDO!|02QYsy|rT1o3h6FVfyP46OdGWZKI$L3@7+
z#BRL^>UAJKapx-ZswXD$fk(Xg%w-Hu4`Y@-|6t3)W|lZB5{?{-;<nR^P|rva?E*C^
z2TBV*Bnaqu`ZCo$fB5JO%2?<h1lv2cs4o6dDl`06c339~jh)nRl?`RH6q8Qs9x;ts
zDyZE#%?vw-FqOz1uwwgFP&(Jje+mhx7?y(41&g7p@EFsp3CFG--<evD5_Wy<3sa}>
zM<ZoF2pMpR<t)hHb#H5#%qT*reoUEB6LpzR8ugt6C1BU`FMGE+9a~O(6%qs9FlqiK
z<~u2eemQW5hC+VO*AzdMNTK7S6B;HIid8;^KtM$XoBn}x@+$R$WPN|ZxZ*ZTSXhZF
zlg}~9RRx2NCx3(!PcEV4m_Ou&#6sQaB`nFG7=V*Rn7AoXoFejoWqNBc>N<It1`L9c
zoTHXyF`a9E;`o`TfgmD~qHP7wKh<EX`)%IsTmY53QqVqe0X7Y|&Dv@1<ho@rY;KH)
zwum&|p&HDM<6^i(S1QQI8{_(k$!P6fg&Tf^g8Cy9@!*^?G+F7uZY|-U>M@!driHME
zl1iArf<DiNfoN(p3@m4F2i34#;>G)L?FSp7!|6Ex+?kCJttmg&Dw%Dbavq1CqP(B$
zr{M5~D_9clk2+ylJbJnp$~Ma3p)rw=)E0_{H$-C96G7nMLC^Nj7Vt7B{eWpIxI`DT
zyqm-hl+0n(a_V3=y%+=2voU^X7POUZ!n`%7z&I?5y?T2B$CU)3>xdLyCSQtIh?}GC
ztRUbBDJsrc&mMRLkmgEXEczB}Aepbi<qUGs)}6!jopN|%`&!yhF66UvOY!VU%4|>=
zf`eko%X%uWOfv2&mxPFgXLjd7^u2?j<q-JLQ-*eLmS97Z0vNuJ5jVcw3to-I>}9q#
z8pnpC>WR;61F@tGBE8XZ);xe4bxiX=(zI-d#6kbBCvJ%oH=TE8I(wR!diXZ6rCS7e
z&r5?7Mj?=4y@@%zv*nMUWkJ)$#jv=9^aJweA(r^ChKpY_(Ld+)MY=|1e&IFPZ^I^3
z?itO%X#veR&S48FYm^>Qy{4eW>K{|Tb+w#+4&Y8vznHwp2^CZ6-xk>)pRHL&8DVc}
zuXR^Y>Yv7JW{SjJVbmA?SSy}KIj+(%bGZX$n6%&Hpm=mFsy#&bylfY$4JPBexd;F6
zKOgkopA8q9XHqV04AhEJvHpt!dX3S-Mnz-LpLG$GjGRz!VP9DFWiriQ52MY7*Md~J
zFWA)SvuFK6!AbomFO8#lnmwJ}%E%p^)<Q%dAIx4h3UN;!+6Ik9WzBC)b9w?^S(%A>
zJ)ThJOXm%Fn{xhCvGX@qp;enV*tPo*3F8b5o_ZBC!iIy|$9a@bJr6ooU10%SiVpAp
zauhna_Bm}(s!K=fneK?4hly_#4ToaRpry$jZu$5**DIR}7FPp6YjXy?47vb&R&T~~
z)i`h&bQLTg2Z5(Av6qh|bGO18c)*FHm!J=wze|`zs{-$?9q>ObBN!V&3eC^g&J0F1
z?G#XLDufohmG~n(5jDkQA^p}`%ySi((t&8EuCSKX^>SekLc-w4$4FFMH;_wy*@*4@
zG%>%g4_KsS!;)9Y7<hUy@`0Y%c*YC5PR!v`FYBO|iy!gbcB1mR(O{w-gK858LOBgc
zGUmAQ89_y;o^pqUc<o?LH~KT1!-`<R_Q6uUWKi$b2b5a$K`Sx<@4hGQcko_xoE^pm
z^)yibPWNtxgF#vo`LH)fiQA@bVyc1hT;;JNynZns73_{<()>VFc5LD~E9pC5I1!TV
z(-4OpfqsX`fc#Gcl%5P@b+__COJ_Ddq1jDW%~V+JJ_i+#mV)}=6sGWS8MIQ4eaPA2
z-0*BX7To4&*<T91CV7JE;^`3H<IQCC?FKhmQgIV$=QNusn{DATXmk)mNF_1o-cRC6
zKXO1>ek^yeTF6zs53#@X#E$n@U}F?a(FT68ww`gIdwL|Rc28oC=gu%Q>l$HV`Ei^S
zV~lvg7>p9i;m2Ci1146AyL_Wqfir2ywD)1p0UuOb5(<y#?6L^5!uGC9Fj~tV#b0#M
zZ`K83)*Oad`-#}*)r+gIH)4N(m!r~L84fa~{P|($X|GKzlz`*h#{UW{rP(1~TLvjR
zbGh1SU9cJICJtGhh#&4y#?VZ4n10C{opXqDp+_9BswQUYIT!?4EyTQ&U`QGYJz6<T
z_UM^l_~DI^cpv~O@{>?)_irv8+{jIqm-Dr&D-eF{gFHgJ#;)=~5oL(no=D%)CT~7h
zBNtM-mxFHZAV>sfao%_t?PJ5(t<sek+xsC?mv><_@Wxi+go>101ns?rVB9(nB6rHb
za9t%+P$>e}<4we5naD4PC*x6*UD*1$2<qnk<!knPqqkfFcCYt`rE?0PF{+fu&R7fK
z_p8C7$`dxsI)#O#k5_)N8r*JrfXn<vpkm<w7e`$|hZU<(c7K5&GHETF??`pl5Yld#
zJ>nq+_DpYS0TYeP=k<RxdCxluC~h0epSsbGH6pNV<z(VNt|xZTwBw-a6UmIWGo-8}
z-m_x|sE4M4Ol6XwI@%MQ=8flGP7zEN^i;sWMt<4W1kDxv(7ay+I+^W+RiF3a_K_FS
zJ$^JXAZ@`)#RVi6S8(Z{eActW9q#;^fh`l=x$2%#Fsn}<n4d;SoOF_ZT@VK<+d{xL
z$qQOu?q)FJ6u4NOht`<;Tt@Q15UbTZZsK?hxKBS=8p78CF{Vdlq2ZZT;_%#frv5?>
zItt2#@V*;BCQjnrHJ-REMjwqGsE(V|RE7iNS>N?@P-5!|Ig37X#YM}pa8DIWBE3(?
z3t}xhkzVJ96}WckLE`3Zg3`b((9*ILV`(ndu(X+FOrHS0Z=#^lF$+?%J)yaO5DuLd
ziEWcE@?Lrolu?-svDf-yUBWA7lu&^|(dBR}B$#-?sod#yx6nLM4=lxpQS)~e*Kv{I
zn;{Xz2R;hVPgkL~bOqQ9+6tNx&$#iF3ihL_2IXXKcp@PKdwU;4r}rWre8vsya#n%#
z^+I;X*d3+aGhlF-4k`{<1(*~J4QGr%O3z`0S031>CBVqhPQ;-NgpNVJVDV-;B<foW
zMnk;OS4jZvVoz>mz}dpCLpXnF4Nhn$2F~0-aET>0#@AECR9%H4RR#SPLtU}L#2VJn
zX~A_Lroxit<(N}25e$1qvgTo_Xft3jZ+*WF>IPjwHX#T5{0T?V#qHvk6Z_%9jso0#
z#RsH^7qGfW%2truhVR~~W5cRWrar=6Z1^auOgefLFMZLA8x@6NfTc5<p3eg>)kR>H
znujjoQfQmmADcZh&}`3eo@iewE+1J8U%si(?9v1?Jdg9n_X5@K4j_7duT0x-HmK4a
zq}O5!l1Iv7UBeOlkHr|2T)V{0s;+=nxi5MtJmEvvo`u!4D?N8Z9e*u4i>h_wAUIYN
zPk5dIk4Za8-?t1DBMN!P;itlgtXLdwZiVSzd~t$C1x8w`;G_y-C$*jh)zjaZ+25}`
zq;vo`Q)}lU(O7-$nm)vw|3IFLK9GG#4Lf?vvo`qwAZdQbCwtR5V6j&}aq>bz@0JAo
zgJq~)cNM0DMZoH7945wJKuS|&(<jMc_~i~>cy|;8{MH9!4OLM5;Le`3FU98dRm6~v
zCI<EZFmztSMjs;0cJ&W7t?v@dYa#~z#0qiD!aNX-D>SH`>xKOyC=af`Km46m0RgSV
zQtR&m>RS|9Mi$kPBj2+F2Q_f@(p6|Sxj)!dB{9iRC6>3)m1eLrnC|u_?)o_dJBEir
z8`a7^qxV9$;%N+Mv_SQ!>8wvm73@D12!%$KTr{vrKScc!o*S1-I>HRFxc!Ik920@o
zdoE(Q`xek&MfKE%HTo_Nc2rmO;x0`C!JU)7cHbT_p}It*&{5V9`9Z8p`V5<U)ndia
zB+%~J$QWrsSFVf3_aDyT3u{EnvHvnHD**`z2=x&rl+%zzOnVu#qb#wyuStB$5?=^e
zS}HcL^~bxLJz*frM}_B?hzFR!9nP2V0NUr|Eo|dfFXh1FvJZv~L7?#phUbzOZ=V6c
zmn5z_@e+?2P(*qtFQz?Q;KrYczp9lBqD5)?#hM0~)~$fgvK&y9mm`cI-?6s0fKHMN
zph3Hdzas;nJvxQgJx+&34{T|MOnxIpC)CxhV=oHR@NlLP-p!hhS2i19Y+foT%{vMs
zK6ruhbz8oOcv^W?6=L5t{`^(27TSED4mu>)IuOXQVaGMz<u2hO{eLLjmvVV_$MN49
zTTrZLfQj1P-0~l1raSYg(0*IS%})Gf16DDtyhNP(ge=fi^u(#*>X_1ta%QKE!oTl`
z(YU5RNMmfV#WoomrnhmGU8*ps?jY8D;+Qda8+iQI$Fp_G*s7Ak4Ig!uDIbla-a`fS
zO=g1O$D=~ZauskaoCC^tN3n+VK&)O@%j&kZvw-|<&@f01rp&TN%PRwz{)`xe-{--`
zqYx|t@>t077Gcz;Vm!TMEFRh<1r5~%oaGk?rvE%=x4s%<g9XjZoEJifjvG^+T}ht3
zDE!&S0DIPFGiiJ>v@D3hjb&=6uBQSryWUJz><V4$)OjU&r7V5~GP{0$OhQD`f4vfM
zsdo;x1s`Q40cE(FYRfSLi!t9m9a`>bvw)gcTozaYE_1gq^|v9y^AiqOV;znyO)XsX
zJ6ZgBN*?}RdKTCGI|Msz4WKoSc%YA@?1^DEDmzDr^;ci#0gdEy>s1ZdoCftKSxkHO
zN^W@1Uhp03!PG1BxW<Z5__Qkp)Jklj!$lXA$Is$tR!pM$g|amL7odr&J;3WdkhC%#
z^i>k^aCbiLKNSWJGp(6Ys||SFOaXO;E5du)`BmMILD|2R1|i$Jn9K8}EF=P0`<iQ9
zb(<AeZXeF1Nh{&{=P@{HtpR>q6NK;O<WZ!OufL&wFa9_tftIl+Sr^?CxrLL^RWude
z*i?gFb04U-EnsYAHtFq(A%O1L)}jTtv?z-BGYhbB?o`@&t>JUaLO~>dhC50YLQ1dw
zlnJ*Edx&G{6?2QJjq>LqU%rZSethEj4p-q&2?B<fGbJE)LTd)seBRBKTb%G?tqvL`
zl@cR%9UteO4~j2m6Mv_&Of8x8tcS|bJ$eX+fAIjxUzx#!7sN0rrpdj$7XCgMjfQ2<
z*^{nPZ0@XPHm`yq<^kz&o*sqn^e|#MX7FBSvruwRLtJ`18MMVaxbg&5kgVJ+DC%wl
z!|gp~vQ~Y;=JP8aaLybOr<X!gGimCd|K{pTPJ-E)e*94OSu{^7MYY+<tZm5y7BW~S
zUhQ%LB$;hYZLWlC2luDBbgfucR>%?${L4k#E`TO!*Fu{1!*$x@j6T*IrNazCV&5gS
zH6Q0%-{|jAQ-n5kI+SC#L)`Z78R9GcVSk2YqMi3b(w#jKHp}NBR`mg!0oMfuU4O8C
za0#pyz2&}(U15K(WYp+e1{tScGxaf>Kxg6ske!xenq327=<zF%`0X((y}Os6yf*}G
zMz=HYlTh|$DYW_YLp${hCcKHj_GujQqRhb6UWdGpr=cJ@4J93$nEKvIP~LMzaQU6h
zl#PcGr_&pLdYTZgB99l|vVxAhKqm8vEtA<slzn|z3M~WYGAD(tVDSp6-f;rivvUTz
zFACw1c@<jDRE6MYr_fM0o9Fe76STfER27Y3hO!+XyF*OG4o|e+*O&Ao=FrP77St_I
z!Pcd2=zDhw7<S$^cuR@P`q?ea{^%UcSbdDOFUsd?YF=P<zm_}Q(Wf1jKenuzAw0+p
z26Z`QI5(HPi19i2*ZvHEizoj(XewUVNq553y<Bvtf2oSgJ?3z1C>uv!L4)Qhbh6%t
zYFesLH!lY}yjoeu;j7{a`-h<3*h^4++YAj$=8H9xoX~2uGTshJLCLRJ1C#Po&=W_`
zazip7{gs2!u5%b((an9YmZS6eXiObfioJ+4mpJ=9SD$3YkCZRP$jL4^FJ%v|qUUbr
zeMg+Q7{ORa3)Up&qlTtDs=X#%`-Cg3saeF^H1(jvDn{(J13>fVZje36!#vX<ak#?|
zZkKr(HLYVnz2`fVtbQbp3&=&yL3deu+*D|&n8y^Kd|;UYme`s*5Lyg9(9};CT8=*!
zY_9&teB~0^#5+6CJC;~UCe1wQ>mIyyFCTn|Z3XGKYStN3!AzbnfVx*-nbYp6Oh2d(
zd$}SNol?HAYsqD(JvWps)i0tf)n(lAw<74iNMub*R`c)A&ruH2LLM}!64Gy6#>&h<
z@N-*?ZL^Yj-NF9MgnY?k=U&1V>t^omO4>N8lSFR+8pOzZgHu>Bgp*cX_HrgYFMCk`
zwMiIxZUXV|TbWw?JTQGuHPqGFtmXKAP;Z~Z_51?==Y!L5OoflSOYzmjEG*Tw2DeII
zv@h9%Dhr1|-5X=bQ@kvAbp&zAQ}TrjOe-_}QO=tK^oW;5Y?Shs2BUNKVaPKMdYd_#
zMP!S+g<6Qc@tCQNt7a~AW{4#7^;;~ES@k6*UaUEcRHs?%l7TF$#tj!P&%!>LSrD>a
z3k>g{z?mN`ux*nqb`YPnCo_%z*|-=F73`(+#u!{)TS6a?DAaWs2O>>hXj@YaXTq|u
z<XJLCCX*KKFELPC0x|4;IPo6(fTrGa=F#ns`LhCWP@e?sTF2lq-AUK;JwW$*p>Xp-
z4N5PpgMc4Jysr0gh@YPdFOC$T(t|)O?f%GZZmwZoks(+uJZCEE%b|4NA~tCGMidz(
zmwg)LgFgO#n0sRyUe~2QA+?<;2jt-_Ar2azmWzj%$nfvF71%R02eiMP<Xy7`%4nvX
zLx&)i;b_4Y&^|4Ge{bw^w}8I}<P-kpfx3~$dHg<kEUqQ5;Xkds$$22xKQ@B9BuvMP
zFR1U@RctVKeG&$U3vi&bKeoB3u<FZ+&_~S-e;o25-cAm4nb;c~tFAGV#DCz(fHdAZ
z)*8x1moOxnJj^eOutYA5*itek$=QgVZymrP`T+LoTMd&J@4@3!-O#}*6&9{az`Vq#
z+{AAQ8Upnt#A|UXS->5mcR=Hh90rbl%t0-KJ2#5(!A~hPeI+g9=*5`xc`%wxt>Tid
zAQ-7ui7UeC9&K2QULyiYXR-+zHk`mwhvIShn3>p?wgVLLFLzW<;!3s~q2JpK)E#w@
zNtQ>!7`w$deR3%*rCvsLUN(RBI~k;x<hf4Izg+A{{rapu7#p4lHpA|5)g4b*q4#C5
zaf}6>U^kdLGywB1hT=W*LX7Rc0E1^$W1?gznw{(vs^xOP^4J$v8ckYl2Oa7kRfNuC
z6{P)C#1G{`#2^|6ZNo2PN>4d#zIXu?hF4;J5Qih!$P$CsKuGUlOudoV9o9-{p6-g*
zJGNn!{3O&_Q-)6mq@i9@B2%8Wjq7B(!Z?ovV%XhaE(-Nr+j<>b406E4drr_WV;47E
z_l+qHKf`>drtl-VC8XsH7Dl|d3f_B{q2Y`vpy~FIH!aGBGesv+;Z+FTb>Erpz>mB%
zU?_Q%;>%nf>VWsvLMVtmijA!Yp=bX&+`ma4GlGNow6$V1xF3o_LM-gNq>QQ?$G{n#
zENt6Z%3Y5|qRXiO+z@yg5(^RpGjX2yb-#s}*rzY(UpvIr&-k&H>d|Pu#GE`A9fGG$
zAgof8qGZ1Zyi1P8jGs%mm0Bovk0usX@8jauiZV9NoZ;?um8g&=2HoHeHlSk$+I3yv
zx$|pr^Me@FO<u&qyB_e!gz@O0Jb*_!`+?!=Bv!bObh#hSf%Y0t=3DWJ(S69e6!o~f
zTO^u&GJ&|kxrjT{!7KMFH)X`cKJu=_<&hqQeon_VQxBkVCuueM_89!mAck7J9djH<
zY^0{meC!_bErh9|%q!L)hx9;qABWSPawn7S>P1|{3b4?o`6<<~rplDXr#ceU4qk-)
z%W}}v^%(e`y9#FOW$@x0`A?3Uf`!@?a4b*)xk@^Zc3ZGa%?nufHIu22y2u)RDNpQE
z1hk$ZPSwI6LIcR7!7g?DH1jknuC!pE_YB5!JB`rmRVaLo$VSh?<yii60Veuw<r$=@
zu^$=^d%f-O=Nnb@?QCV?7L#F2ixkafE+>D8z^uD6ar?i<*w(~h<PbG%NPN#!!h<2l
zm^|)x3()46FY9@+ABwGOF@O6G>^OcBK4mV)xc8K)^Qu5>TQ5PA>BI;8l*c@`k~jMK
z4m@gf83itd&RJQ|@<YlhYRu4eJe`jrDR6p#fXZu2g&jwT1zh08BCiv#;>8mF_`w!T
ziLYgn=Ne_w^{JTh+8ov%os5Z3&XG@j3uVgoVn^=I0o65%kY#fPb-YO9ag*-<UF*2+
z^f_$L+gy0uegPFM`a^vHonh1emc5DQq=oO`U8mY8lOPa%FaF}j8!ke1A73o(jOC8z
zseEciE?U{d@eR=ln3{7H_fytx4<7`+k3O=ewHL5vT0ABhkHr7ppT<77d*O=tL3rdU
z`F&Sug7ZrbXD4TanqC7Bs4xRN%IvQF77wCV`DK+SazU^ER#1HE%Jz>-fkT>7h;gj~
zOMfeFcBh>8+&0F<w^iW1I1=qPc%Zdn40iX9#IEm~L6+K8)@J#RDF)U;cONg>VNK(5
zwZzc5CMhf3>COV$)0y(0r%Y2x5$9Y!hHn;1Q9AP?{4g4e`rE&=sK!XF&=`l;gNETN
zXOia+z6jwlWjtLv6!oG>cat*-Ri2Ch&9_Amus;T^rUzj4`BTuKs>r-Io`sv|D>17d
zWllczLD#KWbUzY@;L}~^<anLg`IuwV)6dLJ-VqZT%P3E)6r1t{CVE2WoR<?8@&brW
zPl7tldrbY~5NNmm2NH*Ciu)W0B;DF;!KR+LP%D24o1glUw{0&d@^u(sQU>jhT%q~O
zH0T`lji=}l>nMo$IgJxp4gDRSEOke}EeFwb#~A3MS<1)6>1fz`%Rm&;TH2|X0?LDG
z1d-)baQJQkvI-BBt^LKCCp_he!#&}dJk3za-<@(a8{4BjnU(W(9`Y(ntp9l)@A}u6
z9sWu0f?@$khUb-OO)bH;@8h^>zq`!#5ost(MuADH4ww~uVTK1pU?o?~**_InO^y4B
zF9Be-<QsSRcovTY(LFE!4>r7h#_iJoaBt-mXx9HSblvmdLFeh;opD{P?C;Law?x9Q
zkH=6^?jth`G64~>6Kc;5#t~JvsHgFhw{=(Ziupd+78-!6nSa^i_bJeBb`nCSh(K}U
zRG#=H61F7fqJ?-h$fDGtVYVyg>80W%9rC?cT^9_)eV}cn8*eC*;m;rA@zdjS&>yr9
z6%II)j;s*8-BL09(_qN>a-7M^=HSafFWA0Y4aY6M0IC~4@w$hh5O8}s-jer$J2^A3
zIWLUMd~C#ZF*z_OgJznd0KAoW3A=xbvHkl2aGm!b_a#l5ueLeY=_O?^RLh7TE@K-K
zi{SXGsrbw%4<%a$h$BfqZj))x_06K8{>3V=k_NN%z454X;SP^KBF3FG6SrTQiQ<}E
zY<%KHzV8V_Pfaav|ENG79pjRe*@y8G<xc8N^Wb?C=7Gb8V_YcSg4)+7K;gRMV3<-@
z=DP4W>I!L~m5~PUSP$GLrobZF?dd7a#V?0AIt&}eei|>qc)UV9;YvXzxRlt9yWq$S
zUl=-pW=gJSvGu+m*LFS4oA<q<+1?B|c6I|wUTy^21xl!{mB<YTUSuQw%D~U~DvHKF
z6nDjqMMI@9K5c~*O`(`WX9-ihK>Fc@JK(0{Sj=(tguml6K{<!&AoITBu-Q9N|8Wgd
zUn~(fMEXNvL@oQh7Sa8hA&ynZM2ig%xhrXTtzKqADtVv&eK`ur9bh2Q@x$3|d(qi=
zEvo%U!v%*6DbqEN`#KHe9b!w6Y#T3bZ<a9Y1ySIjX%2Nh?ac1(G0<3)fT1BpX!(?O
ztp;as&#m*A+d?&0EU`Q<RX}G>CbleCDxiuDd7y`KJTeQ+lm;>1FIwQ#nJm<17USXw
zPc+CHk7kdugqGLc;-Y~kQCwSv5~Eac=ggDP@F|gLHtV5>fdw`nSPw!qouP(FaK~a4
z4)i~T%D2oxR7Sacqe7W<Kpt<Jyr0Sbo6QZ6pMo>$MHo5mGM;!%`-o^u%AuajHSDTD
z^f<CC&%BL^zKghQ>msqjrDz=0V~dFyil}r;6O+`)Th|^8y(_e_<G*$x;`SW8yWte(
z-CD*T>-a$b-_|IltQI-j6$m8_%wa%0*lpr`UWo(3@Z(@HNC1Z!bGh-WC7{0OAz%I7
z5G6-;8QiZLh8?eO@s!pgJRzez+l<}h^_f8YCtto<*MM@b*BWHpCO&U~HmXp5sc$M}
z5{=k^O_z^SZfzKwJIWI)Mh0TY>Nw1ExFfdIq@1dvFYKIS4eF3S|HV)*RF~BWqCwSQ
z;yVMn{|N)xFAuTmjm>Owh!5(;Prw+$CoG`5t+RbQy{A$GlSC_Sm1&1t=bPf)BqhB4
zViM(ihhu9@EQ7U)kf$5LOur%vZ?}hLi&Nx_{UCOk=?qS%PlBfMA;?{`A7eTzAWt%m
z-5gen(}I>^gJ}k|zg`VmResnSV2DaZ5v+y2n=3~zV%u{S=9UzPdWLgA(Z`#$%#s(2
z<o&_qpMM~)aRuh@kA#O-!zow93GVt!Ve$2uXns2%;FuE_{yNJ=Y3bs(6Qfb~*G33<
zS%rn?YvKCqEEsU!5Ajwm4}Vb3>SuoDg*l<1@3WmxE}V*TbrS3vj_e2zL&v}Zn7z>!
zef!vhlJ#n^Sk?<<s__PqhfAR4GJs^QrGbq#7w&D!#KeBF0AsFkg{-f<VYNL(%K1a*
z>1KvCdeC%#2L!&aq1}3v;5y|1l&S{eI*(y!cvzo1gjBK)$6ws@W-&<R5!7^hVZ-M{
zu=fuK!zDIC2D<_Q25a%b?|FEuI|GUobg{nt9BIEvTb-~BB?tTR_q9dfL2^U&cc*#N
zP8YBUiy<cGQNddphM$sVVYQ(SsE+x_L&dgK6B9R)i($e6>PIyqS;hkwW>}yFZN+M!
zNh~78szX@kb`<O`$K#GU>G=1uA1HTkV*$inG+I0#o3?qOTFyqWbovKuOb4Q?_Cjcp
z9}Vpta?tYjIn$rxjSltYye@4BcQY&lS$a3OcPNLchc{y@@kvL`oR1|&yRmxIb7t7s
zk2$<eLhV4H46SG=O*7_+vDf*e<@?Ym!2z9bjKq#B|A6Jgcr1M44#Gh%=sX_<I$z@O
z$mLjQHd_TfdPC7`)_rc)(8|X(^g+EYU#QqI3`L^t^K!H|fa&N$Y@WOV>TZ&T{Mj@(
zk>-I)HTA4$r+_wlbA^^2$0#4}0K5Ld8yfcv!j?J5;oe|Rls-4$()CC9`lc|9HJ%Oy
zN8>SLQ9V!Wl@0rDS7Ek#EbaBO;LW{ka1GD_>6%}x^ivNXTUdnJFCX#Jl8G$NbrK4`
zq|<An`BYdXGpiRsGkgmSa7#h;87tWyb_pJhJBdd&&>2TPK!?l;0=j#HNnthQ^-V@6
z7jM3@_bII29moRqP`2j8Y_3;56kOy*&`kBL=izktPn-N5LEreLb@M29uFgPo*9j%L
zXT*3emUpb`5v-O7XmwZ~TtZ1JVkd*H9&P9|t_qwxC?onxiIC<-Jm91h82U2@bR~_<
ztfvTcAMb{~6Xsxv@^D-h;EY+VNHd-+s52hQv}>1v`W!z&r2VVJWz<Z1*T#e1MQ=c7
zE4ch@5Mr}FXs6xa(l5Kw<xM3hR*j`x$J=H8H<QRWqzD}|EWvJXEf1J+4mV8XAj-2S
zRqHl^yVb$a=R^grw4aTp^176*vJe~}8-thC8NBy`yuR0;GM(u6+~FTb_T%p?RNpJX
z_DK;4o}^QbI|NF(<I$q1ndi6<g`ZA^c<=ymHhL*DwSkmld6;yO%W8R&WeV06NN~{0
zWT;J1#f-08c|%kgWo*(Ozw8|I+Hj4%(piny_2bbZC7f%=C34gJ!w@hck4av==RbbN
zW78LZZ0?y3YCNC6^vJ;X>Wi>2Z8YtS4uau;Hl{FA4O=_Qz_d#OmNt2yvm=6q{5``z
zL<eH$hg9~yITx?bpm|B+5Y|!M!sY+Qq4rui(n&;#)dyHXLw+J_xOPE2AY4is5p}5A
z7fhVaLu{50&D?M#-0mBU)ql&_x4tzPJ8L6st1G5jT^_A&P||niHcY#@8g~x6hyl+z
z)RPV~WNIu=x!wm_w+UEsRtqx@hJ(!gvq54%dEPel0Qe|J5DPK}uP2wo_loTpE}aW$
zR)y%MF$(i)<S=CRVbJ;H51~)vAotT!w4SgD*N~sj#`7VQ9n^vPn<rs3c|X*y84w$z
zhS}Jv3LTnFyn%FMl5ftuxyTLTwi=+x|50@AaWSr47#@TWk`O`^iZ&ssc@~loqLGj!
zAtWJ$BuoeCY$7_>sHAi-9dvk~l}0C_O^Dql#7;uaA%t&z|MTP5Oz-<VYu)#C4J-lu
z;yhMG?9)2qb7)?@8uu#G(SC$0XeHOPfBJZ1;HPkW;8BTf@SJrT&SP3yV_4pc3iSA`
zMU4Jd9zAChqOJ<m+Khn;u|G8Zq<uxL12jf^kUw`tspqRw(Al4WN?{jjE=`6}8ME=P
zp+z`gOcAPYdBoN8k1){+e{5W0%{Kja1z<Gofv3IXlg8(RT+bSI`JIQp+kG&j)s$Hs
z`OeQhD?sC^Cs=@n28w=;5-Z*N2?@P7f%4-%NQfT^AFK~Eg@qdaEE$J&cc#EYt*hAP
z-NoKbi-M$K$=Feo$XdS@0gTgxHtLRh?mrG;)xm&=&cc#i3edeu+}bc>$WA+t#+3b3
z4!Fgn!O?KZfWFH!8<^_L04D3Vg6lNeamQ;(d|qt|?!J8y6}l$?FDEkTywRm|ztOp{
zQKPiexEC%pF-Dz9zlETjXkNl*K(^UAZtf~(&Oi6lng0#-m;vU`LR8L;!@8}LKz8qf
z5H|QKM9%X@vnSaI{wgrnW)u$DcL+xp9z|K7O<>ulja!y|=K%`JX}viP7KTyqaM4I|
zAK3C{%DcEPI)y%iGr^ED4bI)tQ8{k3Fgb2GruY%(qcjF5X>Y-nvpJBObPB7wUBuk~
zI9UA}$~>NqW;!7WDBW#fCR=wL#@{T*=9*_>19Q6nPff;%7;DVFl!$G{JE7&;c+i|4
zhmB{eut+}@{8y*ocHN`sYn6reY!s;N-UA5}Y{2jD5jd{zDd^;(;C?)neczKozR0~y
zG&hw?|MM2qMo%K9O*wz{-(0K@i(rahjY0*vo)a!UXHmqb?u=Xrp2^pkYmzQkzNj?w
zNGKE}+SEDvL-)xED#RKZ!zDW1nA*c*P%@o?#x?~+)W~s@WeUp9j)vDg_Thm$QQ$&z
zP}T0v-13zY1&w?(ra2)DxW{sX%HiP2vluwHFV1>mhN8~Nrjmt&nJ}gZniI{jalS6U
zu_hN}>4*7`iwN%G0JM<F!B(09CJHrFz2CqsZiZ7vHwn^Oj)G+3VD7QjiW}<<#;-4k
zE52hbmyG`^{(WE<`Wshb?|%+s;m%;(MEfWg)e0t^zD98Bbsns~{9v*s9q0`04N6gz
znc`dz$ZyM}{l6)=Wzz5F=@7y5Yz()$G7+@@9R#U^!*Pg_4@%!u@KwZaeK1}VV@mv~
z%l?x&o>&Z`S+ONeIx*PgJsu)d3qcy4E^at=o1bmG3Qs<TLt0Qacfaih%5{6iQ*UfT
z^&8)q#2^**<fR}p)#Z2ZnPX766`#An2VG$>w_N28Q!6>PXL%#7NI+w{Co2A3%dfp0
zgGRTa*+su9d^DJx94&hxAm=`B8@h=6qpP4}QW$7dkOO|#F>c@c0<>(jg~Zzv&_}WZ
ztQ{r-{uPK*Hqd>xhb#Z>dj##5&jKy-e5$r~al51@@&>7Z)AeNtA=O}~dy|>(C=xnu
zJY}6dY<R9$H7d)dLdJvhOl@F1D@;@}@rzKJflq^rmrugxxMP^ze=gYREP%!hp<G9_
zT<BaTf*FxM=-adeoAU&wx+R&}%1(j&F!}K`Z?j={YS7O%5d!*`g8CWqtYy6C3wy<a
zG-x3V*ISDp{nBuHY%Hwl&r$rW3_M1rLyOvGjJM1{+0(cDi()Do>z=^J2|KZKhdV!3
zEZ~FnTToy33v-%KOgoTr5{bBj>smSfh@@v@ZZ0nz{r}&YV-R&Rlyx?pV$vtEEbu@m
z?S{z<A+O-pL0`DJkvBZq77tq-uHvSdIas*oZ`QM`2<xhd$^X|uD7>5wUC)V`b9^rg
z81|C?EH1&cC6AfPs^LJpL>Ayb2cmW?M9VwVh=n*C<i8`CX4Y=_QF{_Tj4u~kKPNv{
zTqhebKNK3CmlOMe`c1TtHS`Yztr6rVfoNuYD2ZQ&44gjC7hQw*LEXhb7IXYO%pNPl
zT4E`wO<NC|L4WfwwQH;+V-|fbJ>Y|OHxORL!=&HouuJ0<+%oY4yW#%i6Cv-gmmeg)
zIF9){&w%K&HmIk*WTLkPB`@8q(R}qcE;0B=F#qt8Lys)-lP+Zg&M(9P!KK(Nci^?g
z$&?dPfq;Rk*fLTH4%!0V(uf6}+bdYcgj3vL;%cmU?TqHZ4@;x>7T|+|3T%#b<{pQ9
z1(zSxmAaAxO~dFNcH1A{UGk$G(;SpKFXXiiX6W9YflX`NA-B&|R7?yMwDk63!s(k_
zuJVk_?T@2kS06}QRD}tYebZmt9Smc<U<vJY)cxa$yW#_V_tij?HnH9HRPg8?8=3|9
z!H(_;IJ&(U)Mcx=vZxza+z&w6N+a^pD&WR+>c7XE^7_D@=&{ZZ10Jt{tJ)h-&F>JT
z{UNt;)<(42<HYnIm!ss>C>~il75m!s#J%C&@x~n$bo!6jB+sl+?!OsK;yqC^!x&b#
zUq)s3zPN^FMXm)3ZaS)fjR~i7>0N6uk2M9A?t38D=>(`Bc+c_*PoY&Daq@$vl`0!P
znTgJ1m8y>+S3}A#uJB*aEqz0oN8i&J^XMzr$}I!sWO~*H8-S6D3s0C(18N3ypu~73
zlxj{y>CX$LLdX^HDo$mmt`R4?%!jvMSqokZrCg;llc~%0kOS*){y8iP#TRJ@TH3|s
z2h4a`?ggxMScD4wBrwds&6=*Rg|y1cT$n3^3%%yj+$e@Q`JIH9H|=os0ed_=)e$LT
zMfsa1=6duh*f}?_vMChmd5w_s?GO$VQSqZ<5Qt*Q>0U^_y!AosNoEkviH<;#yPc`_
z?U_*3mvi&=HD-Z_PGDi^RrFcS;O;hZu$-YBi%}Rps*gtPWk!%;rwP|j%thIvJM4yj
zFI4}M3x(;qc-6)a<&}Gx^j3~ponjFDnW>KrMX`K^&OyvRwg-l#PR94sE~1iM;8!Mu
zg7;TnFdQ}s^wk2{!2}I#)0l)c;l0r?N&q+O5-2o&$Ls&=&D4hkvu;C9VrO1I?!dAk
zEb|;#rB~r<;)<z-kA^S0wBP*V#!r2U!nwv%u_5U>YdJ1wc2|mdl*1dY^gbj=Egd1O
zbP(c4%0e`J;rZc8$bKKhH`YwTd>>z&V-t=;f5yT7t%q>XQtIXnI}FnA<fGnx1?vBG
zf;blw49I!NG%vp9Rl-yp*ncU_>f~m3Rg1vrErQMLV3htN78>)YKX^Tz&2UP@<#L*n
zygA0D^Di<Tvq|Ep-aSFN_ZpAd7ljc(bK*D`G_pcrd1e`y@TIiVio_y|98}qq1)C1r
zqD*puT$oRJuPZ6oD>4J>TkkSE%TR2nKgXPtzKKWgErPc3*>EY?8_g>`put(merpfI
zlo4gn@I@Oc|1Cl1dp#&q??5|&U##kB4O5PnqvCWd`QkM&aqV_Y2(^Nw!Nj$zRT8t^
zU7UGEf$o=U*gy1K^?PNF?X5xJwVir*uf8$0$7{f{-*hM}+6C=hTJYFA2Y*)Y$HINy
zpgSQRV|L8LjA@LyCkEo%(77mz+zaYO34+j<W{9V?pfa_992?bOza<!e_C?O!j)=`G
zL+KoIh-?1(o3&hB0JU)^u|D=7VxBMD%An8L-xbVL1I5jEY_V(D4JJwPg)3E6U^F6D
zEdOKAetT`ehWsNCWkQ*^u~IM|W=fpWC1$mXaopN&F}XqL*(s`EwyTJjG(idaFX^sR
zJqPp}PlN5})6|vRNu1gBF#2g3I0^^Rre6^@@0`jTx_UxP+8FM=dlz2Z>W8NL4#I$C
z#FJe*7sfQ3Vd1+KXf#1CHX$a5`btv(3vKY)ycR@1c4Jilb-b=LaN`jpAx$%%JLP0y
z6H5Xs$C=;~M)UUz|M7xPe__r&;-_jIgok$$L2Bv+7XRg<){7{Ru9BE-5vc1Glg2b>
z?uPnpr<qQEg4kxA7R`M&5Nqy-SW&bLY*O~)9@;5(Oj?3Qo?f6xA#Rv$e;7M{6Xxg9
z=lT5<Yz?6f+@y=F#W0!uQCW?%bdKWmkX-07&kD6Soq*<zl$G4CO+3yS@L|U$P#Y7*
zrkLk~<3!?6sak=MMZTvQR?yUQ7f5zZqTOIQ++VDZ=1ccM`<VO8OMg0(?G(e9%h^~q
z5YV;0m1X#^XL;sc)NyjBGwDfY{NoDla5{t;x2m9_jl8d8!rAJOwP=6b8<bnO@`=iY
zn7L&>mdslO^+O6^xt|Xz_1c-#W<CpQUIJkbCs?Ci6v+2&CH8MPw^CgOdXpM?kIl~L
z&}}TuT5Xvb-+-da^}+#tDHu-7U_PI;K-=U7FMOu=|6Cxn_jW_eYh~bhiE`qiqu8jk
zhe_O5GM@{7vtr^JByIHv!RaD|HHNd%+h`BacNjY`!50+WON81@#84Z442(3ch#%M-
z2QS|X5VZDhULr~Z$D@-WJvae|wu|s-&=vgpAO}{@^+1{G8G06aiYsPBVDp}K)*uT2
zV--Zv7dNa*HpTJn8fa;qMoe!P>VD6{qy58hA?1%1soHGm=ZRRd`~bwo<l!ydM6_Sx
z#lpXk$G4pN0;^B3q=m((F!;k9M;U{$i9cvE1*F9oQ|E%b@ZYvlu5SmZwd8}!^L>!v
z5W_^bcfvTMQ?SXT2AY2k1o_I9f@U`#@JP`BEw`79KhDQGc?UOK8A5l`I2bJpg3+3X
z(ML-HfwSXihjf|0o)iJHI?BoqpnUI)S!PBPH-XiA8R#574^7EUykz@g-aJbzsIAIm
zus8~8zO1Ewa}a3tXlLzrGk6Gne(gMi?MbUdy*Vkc;<-I4r!8ZRn&(jQ#S&D<{NIon
zL3`e9D0R^j;<5sWm#!!F*g!M$se16{c_2PF5kR)HSon}Z=dnr0K+zQq?;=CsLwqEd
zN4*rEr8}Nzk2CwQ$cVeepCb49SSI@HXR2+m2K8Ed!|hocQL^_4ESyqF{*pE>|GrO<
z->V_-l^tt7-W$R$7=ud2NT_eH1C?F(`Jx4BI59Q@dugSD!-+)b5l_5eV?UmfL*C4E
zJ=hvogjIhYa<zUZ@!A@4$x=5l#-Bd-)8AOVuMU%bj0g37B4R}qifbNLq2stj7IDiD
z{Yr?(A^*lT$Bn1wT_EwhbD?v<zr3UB0<JGUffM41SKm_(L&c?#u!Z<TJ(EGf%bC)^
zjH|AB!LpOy^Qd!1v}d>B2{VWRDIUuQq^(1Z+?`mR8HaPUD3^Dl7-jduL0WDLij}tn
zr>_MNSQdcJW8V^+<2c9{yRZ*!`(Woua=1*7p?uOT=sPwC>eci>b*TdveN7h^|8oW1
zC;OnJbQ)`7D`Cl+7)&G2Mqw1?C*Boe>tDgJ_d*&}y-MURB^UU~>^z*ZHUMWkoW*;s
zh1mW~U{wXDVY*EUlwJx$ujCNy;K{Jibs75n31ICO3E(nHNj&;1=qa8LBhPL`{V`|3
zi@NVlOQ;KAUkNI`9O3)x4Y+#XMQFdF2T_j{(6BZLeIhEsb^dZ_IZR#fwO@s2$F^a8
z@Mx~rvkXOpE|pBb7J{b|V!(dICT>1TA)Nb4eZRhYAi_EnpS_^-dFfo1R*}rFzpucI
zIdXJ-s$^QD^69L&oqx#P4a+<d@$5PU=xz9$U(pT&we*o-x6&8dBuSvJSIlJ>x-qT5
zsc>uZ6|mRY%^qYVg7!)caMg`vuTLg`zO<RE6l5?p;?HQ0-wf*ARbl&Eee6i4Ovm67
z_Ofslrdi%(ilkAX@0AX&Thv)ij2uiCgg~j?G1^%eKtoIcORyb)O+T-)lEH()ct`<O
znb31E>;>Cp8w&DX7Hp221^KT0K=pS!tN)kk57fEWJQswuZF|Xw8!5I@PhnOggIKnm
z1$=T21U<_J9=R(YKeX(Crdz~-9#sIQ)9BgnFaY;xB6?TKA>d^n2ul6ITx(poRrf5O
z{k11FCeC51U#@aJmkHqMOy|egFlP5F0h;%&q1|(9X_}1sc4rqbcl&5w>uUvzH#!mj
z=A791{wTOJ=q$?j)H9Wx%Q5{#1}fa2i?f!I2ifH{pSq-)JP8v~X)=u8BQ8!?)ByCF
z@gJW=-XksBYh3q)IR*@t;fU@2P<U+{XqwDJg_b`aPV~nn7=-r5-N4B695%fT0Q+f{
zOl8k7u=ERoi_a;u-7TH9T18=7pfyxkRA5sQc}T^*F)D5aF0J%I?Oi9p=!qwoj+C%~
zu3ucmhFtNH460^Pw)grwK~+=AQ%rPFnjb5~Cz}w1GoPJYnu5*WJj9y5eKCKb3`A}V
zVTN7=u6byVnj4HjEtR~M!H1zEQx5`)CxZU=eoRdK)@!RwQNGat#qMF)>0ZXa?>mBB
z>Dt)7CJl`S$l+Q5N27dK^g681_fCk$Dfw~mpdt{p{9f>{G7k)DvxX!e>Rd~RJr{ec
z)ETJ%)!T);uU<$jHa~3Hbc5+U*AQy$+F55;41A8I+}XcVgpt(2)=@vh-HLqBes4I|
z9x!B@e#GV`H@&;tRi?S1mOJ_{fv4JW=zn(`YQGF(e>v#mYx+Mqo1W&M!iJ%^M<@hs
z&Be53*SWOrFiUgRgqG>mAgOsMmj1UI0y<-u^k*ike>53ur&WMT1Gz3=x-%Vvc(KIh
zgHUBY1x+r-gQO>&L&BO^J^5C(^=OaSmVvH%ry%X0n_T;L3YR}|<Uy}&xqQ2YAP*YO
z8Ve6X+Wu>-ZL%C>uIA+V1BeZpj=hSNv{T6kqZcEX<M<MqSy5JWgq9c<mZQsgBhWd0
z6fB9~VybzDHGFwYnaB>Fo^lGj?(T;6ew0(VRszd2iM0~h2X<;M!u6caVdEp1`oI;S
zvZS2Loepu!ag*><bYFD!iNd=X+34Cfo6B~+6t}%kf}{@>7`Y%BV-^kv^B8+*@~H;@
z;40M8Im4Zfbwi^)exP&c3T8zRi|WEC9KDjfC!6w^{_rxGu#ht3e1~wPeG+Dm^aN)v
z2cyub7_<3brkzzxjAem5$``QvmQ09$7C;Qa8Qg1VIcPO(;!#2w<tVD~!~VITT6Tg3
zJy)f>p)NG<>%%tPF2REbY34GmlwG`Y8Wc^Ju`c&CJEJp#`l$K1{ChR%*9_vK@+W3J
zClWvC$S!oXj3(!HhF~|)6uRaw#n$LzR1Sa2KLrO97i}DC8a5fEyt?%6t87?NyB6!m
z$l<Qa8C<=We6$9yn0$2(Wl#M}(W{7M-}%gISA=u>v@`|=bQh|jPS4CY%)7D_9h;MR
za{~y&?xi3a+~+c%D5za^oEf_X^ES0v+&=yof4yZn)@ZE7fYk}0YVXT>q-Eg?Lj!F5
z@rOCzJ`EA2(O|1ib5IXr1{XVlTSW*=QclDH(yiFoAz@vaIUsc;c2BGo+>rU9;++aB
zepi5DRa0TvyVGc$5y=b>xACy~G8}6#g|cA1S<BEWuFyP*(xjD?A3KDS<iA)@pf+oK
z5QJYBTG9?PiyTXzxYx59>RCv@*h+vlz6?6*4j?oKLeWclpT<Um`PV!cni>X6N~<B}
zo+{f=c^Hje=Hu79j##&40XbicaQdRl(El!CSK$O`H2umdw+_LExq)E)EDlt>w~?!7
z5Bq-H19#~qL0^Lm^c`~+_2};MC_feDe?JfkSKQ@BNo!!?p$pixf^yV8?^#z~CA+a@
z1u7esh%KXzL)+t{Ae(H<j824$JKQt)6rb~Gv{1o^-ampyV-jJZdntYxt7IX=ICfFL
z@JT;EoEMUe&V~;-W~DMa4>PbUDTkpSgQ4*}<*ZW+K>f{i!Rwz^CSM*5Ro&?wToudm
zzmj|WW)3zxFBfofJwHpk7=7_BXgDi?ar_lj4Q}DZuZl5dSvv2~zsdBiLz!{!onV-m
z%5SVCUxjc;XdYWG)*E~j?4}PzZ6k7akQXY!u#*^Qkzm@rg13j1GudCe!7{Lh$3@Dp
zIYEP~tyzXzYriu~Qzxh=x3d;yfW5;lDO32R)aO|#F`%Q&qUKy;5t4G)^dKIZ&y?^W
z{bMX(w!lR}+NIU@@#uZ70L%w>G12f8ZhlHnY}irCCBN_SmnJ6>+v3R^qYLts?ZSy!
zCsFawFgDaIkvftM+_OZ4ot6`z^Sl&RJy?ug&*<E?VTL$hSZ@qVDBzM^w}tvSeOU7W
z3Dem$Q#|qfAj}s9V6PwDQ90QPl)+z1JA?mau?D-*Xh#<>s6q6KoCV278*!j)5e8A8
zV%LHi@cJ15I!X_5p}8u^E)@#)_rpPF?n|*+g)g`Nbp|^XsqBDvB&y$31KH{^tmcpa
z&xPq2lBEPY6YAj`PG<Wni%|bfCu>-%0TNLbuLx7cBbJo4b@v3rJ(}P;ClHRR(XQ0T
z6e5y~pur@S%gzi13{pqiyQ|P^_k3_>p}eu@M<)62EpuLJ3mg1W&_gE0l$dj1c-aNp
zSvKnUTjSyIEYxvb#6{VL;?A80yumOTq=l<X+s}le@?0z&P1M1Ha*lRG9MM{23E6rg
z6naY0S*2b`+mZ)UTnw>olQz>Dwi7ennX{_Zaa?ihH+Omw2q~rhP#;^4j|Yy&I^}7|
zTVI0-R&`wKdKkzK-()e%)<e40Va$E6z{@9#P~<j+m915v4zU&9xzIjvR1iM=d;xWS
z_v6<aG|=c#x_C~PF|O^LfwhLzdl~G@)sJ|fv+NZUd#K{aZdqtJb{(de7+_4pNeER<
z$0vrwtyukx_6#$bxsQ|ZV|qNS&eK3|-*xy+l~{D4aUi<hWH#}s6IR_n#bsBCV>P;r
z9SYirj(!E;P)3}>am3@TP2*PU#Y{SKHea<>K$Z6H;NTU6f$BYR$&h{cJMIFge{<m#
zwDXSHr2$sc%Au}e2`m|Z6>J;g!TU%oIcmDZw=e9+eDQH|Bs2<XWBbEvjq`B(*%nM{
zT11}qIA-Caf>+*>OK$W7X1M4)4zyi?1ql(j{nK)6sj1^(D(;Y~t&N@^gFroZE$n=`
z8^eCA1k20aA>dgB9uM;)FL?<xZQBEPPsBn^V<t4&CbF3p6VR-aW*)YYJVBhy^oE^+
z;`gbj`BxJ^dtDAmZlTokJAj4v_QPR&ZB&9GpWs#rF<$koEqfxU51z&)P7*lNHy$@N
z&BOEe5^#+64)m&g%o7atAmN|c*mB4l8YK&uk4gwik7VKE_CdJ!_8F`@n9N7vS#%xI
z!0NuvA_vB49DF|-yJidntGjO8e1tV-EE)hVH3}AV$po_Z2atQGi^(fi2|9gKQ9W}7
z6As9+s)q)2g=UiPLWQfxu7XYT&wz6vb!K~<2j#y}rT<V@#=Ka@&41k#+vmPxRtrvp
z^WFyLd{k51@hu-_yz@qfhsC&1I}=^MPv9=qxy(|%AGD=BWG*r5AlqmicpRdBwo@2)
zH?zdgW2;cKdZ4M#G=y>c$uV@K0P3Q0$tyhv?EYQ{H}aLJo+|;7zreMcuQ9#;C1Agd
z-nmaQOWWG4nY!jM_GwQQ&D3r9Icq~Sof*k+q7x|2{}iltbh7pvv#~moI@|xQf&oz%
zV9L94VjesaJLZLQjkmqgZpBQ<5j$YJO*Y6a90kK0eKD>149yfZg|wqXVdg<421ILt
zspUd&@0QGKd#mtw>*<^}uu5$7D-#{G^)WA<;gEZ@(`=5xaTSO0k`eKen$CmsTW40Y
zg!Y4{9+ZX+%VY^2MxZ%l8?1TmgI!~cVVSc7+OHZ2%27p>aRO7j*r_0W+eawTRIvMz
zTX77X3)(cF@z>S9^s@-(eRuZ3;%8G)+FFICc}KCH{D-1D-_0a1-Qb<(Y)nu)&aWJu
zj}KGnuCSvD0*;G7YaVf(NA&}*1xCzs#!qf;Xd+h5O*ISh3c&WrEuh`L66_<1xl~7c
zz<+CbYxQK}6&Nz*jeRi3eHOm1U57@o!{}WAr8w9TYHVY0Kp!dkY`p?1-=gT8l84J@
zoFS(m^#di8$!s-X(T>z7NXp_xBZxm#JqnDJ7nt<R5Z0934Ln*IxVY$pUDN}5R-fmi
z_auUkgyvP`L-IACJk4MmRx4HKpA^JJyVDOpU-ZJ}84lRCWHQKfXLHp))FmD8ggZ0Z
ziEInRw2FP8x%Vc29Ucr>VHy}S5Ye;Pi(Iw40spPzspH3Cl5I9Pen|%Tq_N_<-rCgL
zZRZI#zEC@E3&^_SVM=r;XrEJ~vyDBL(DT1j9tuYF;i&g65{&t1*tcmkW>3A&`h1;^
zKVOC6mOeVD`t3Y~4jPA=(H!00MM1;cfnfitjZ3!f<MrFAcQ<D+bY^BlP_!CX*-^%A
zZ|^{5qhPkK!2=!s$%BKT^YOt6eJtE{37u2Ma8=4ZrVLC(sm9-CQ`9su=-30U-v6Ok
z&IdE+ey>=`p-|lTH6AUDHL>llzgf1uKW+^t2mK~u|LmO2b&A@=_L3Z~+D3DiD=MJu
z_zOtH1u<<Sp*B97JRvn)(dr43_&5P4rSY7{Q5f{JmDPp%pw`4}u6k!Fw2ceL!F_2i
zw<H+N7jNLQfkgt{RZ+gX8(T7UIEKVVf~&(%Cel1$8f9+>dfzu<TcHxdM(faS!<jf-
z0_A7sL02#8Hx~S3w(mtAy4A~Y^l@914yF9d8BM|J>R_(cbe%Uo=;W<Ksju%$v*gXe
zxLQ&KBPz~<+kVQ$jd{lv_SJ&@FBkBb|AR?aXq3t|4Ve47B9>x!1~W7tFwwK!V$)?N
zysboyZCPK5o|B2iU1kqSNny}C=^`FIMc>P(Y0wfI&PCU|gQ}<mD+Z23nTaY`Zo5oK
zhzI=Aa)uF>l~~d`k3X3kjH(4c*oS5ZX4KUp?iw`$o!gUnwox#j{@5N1o||A(z-6R$
z9H<{Jhv^PQ*krQ}zMc(&gaOew$UllYAI<DOWmF7<A27?u&FsVKk4$GkU$Jy6-M7W@
z=rL{rbUpQD@4OacmCJlE%`k(aEc*L8vCO&0X4Y<bj&&TLMl=6fW-cs&>sRKX&QKr0
zIY)$_XRN^A#|psc+jBu=v8+UA)LrqgkLLJQuNPW9Is|n;^}%yy3iajx6@R#TT9Ea>
z1aI<*Ir+YT$>;V2`P83G|9vqpv7vc{GwrwnEKBQ7YJ%gDi_CFm3|!2&$JhU)g0y!#
zk2q_N&BwR0m~$cIQ+dObhEiT=9m^`drNgz&mr#AV8}VHo#iCvgX6@Umn6@rC3!eQY
z?$;v_t6LXiLsu$_E05rHCv{ACbP^P6EV)j%BKUMU4vJiAK<ROmYi_y2T}Nuchw3)s
zoBif(zayCEG-3sQSSXCIzKpXJVbllp1p^}k^y*)ZmS^UH&h7_-$-th7^TXKPkr|)}
z+z#~-^Y{Vc!ziYtiycQ3t0S-g^as0hyMPj;co?=A<>LeM{&=)-C$WwKpz-w(C<*z%
zg07u{#%(5OzUBaQbVp`Ukc2@kXIV@`285|aqm{-~t~mRHpS(h|m^trwUB!HOzMhze
zEh12#`h}ezLV2|{|FE#)G_L5D1Op0lQB$iF4?V3$&qL(vRjY)XZ<e6rs9wNy1w8p>
zF}kPq0iyyNuD@g(ac#{&a~Am*3myucZa&2JrOx9g9cEbT3w<>Tz-8oE_^_4k6gqn$
z&GZ;J?p5-WBVzFL?DeQRXC`Yt(_O4{M#dDDbFlARgyo+pZ{kWkmW|}}7_yB^%Hy#6
z<y`7QnDEhbPku+aFAqTkM#nXo{?>6!^yIFY)H{(f-Mc|&VGp?LLV4N9;oOTYXPfpW
zqosQ;Tq}>HS>sNaR7ZQx#l)`2D@Bhc>S9)eaFsc8peVN*fBM^_N4G$*OgMyVQ?wCV
zy}^9~r&;WKA!yJ=Y}u9sn*W`F#u*V{@AwzD>6VBqG}qzsV+eJnDzI<t5j4x+hZz#;
zq|7R3(*zqd{$>I%mg}Kpyeou7-{Z<H%LV0gBZ%6c3?>6jP*QP9*l^JX&ki_)KE9do
z)$%xeSWXPRzVpEKTOu(hCcylhYFwNefKLw<;uFCOt%jWi^HD3sUb{HTa)RLEmeu&g
z@(emp>m_8*Q-?Zr5ttUPK)v16Rj+q|=G$F@R!BXwEW6CN9-M%xz0#RHK2K08(@WKo
z%JIwAO3ZsV9WSmrhu@wLz^(KgP;~u8{q<@5>xF1+$ms>%hZJBm@`1ShBk?kcZ67eZ
zhM9*1miA~WN7XyMSm(KmAkTCZYnn|!^Kp#({7L39eeEGCb0XCICKgh9iP-Q>EU0XY
zK)vtVA#Uthh-yt^P4%akbxR^PtVm|A+p>rwQwZ@EeyH<f4NR)6!ClYGp)0?JG-c;t
z{dxyfn&&XI*bVYU$HcA&f3cX)=dn$!&)!Xr!_8&MSa|X*#Pl2oF?xS^x1}NI^Er$a
z9`tAHBUWJ5n10YW>@m}~pU3L2+VJKh9|gJZ5gyapi^Zcqwz=G4h0~Lv&g~v+oKFss
zuI0>p=0Y|jWe~avXEB)0MvFd)5l>%$1GTF#VaH~un_R?RC+0w{XgqZcby)MScyuiv
z!84BZLXDd-XhDAJl3#O}&Y(NOyY<<ywK5DQcjVyNKd~@nXac?#<q}sSj)``kM@8#=
zaJ_Mp8JS$<dL=s`@b@nKthE=D&PBsrT!3>mOK`=`d>ocT-j_p4tbO&E4c$Sx%2yj8
zZpv=-zIPn;2ft;a+Qs00j{FS?YUF3@gYp9@LQ6po80FZ5W?C#*9tnl~Vq)#5szab_
z9Msz8z=N6~Y+JV<GA93F_5XMS6lWu5t^#vE>SI3M!rPbL<z9a@nP4i$Pk)z#QA;sq
zRP2UR$K~)@ehN3hc+@-P4r(XeAok%3^rCs$k2E=2+GoImL09oL2vG9ZKxTfAiL37W
zvX=LhN!@pbOXoTBuE<CL&1#<VDjH0WMZ<@|Jwd182Y0<?4X|@Gc=+Zq)esxrCp8LH
z$67LrS(RX3KM0x=wzDb!MZ@ccLukx0@yV@l=onQ7JH^!)(Hw@F+snc8YCTi$My}vJ
zLtu({2v&8q@Q4xl__Qn@@iFyPCY_?$Vj4F}rSn`bJ*c&d=4n~U;H?@0sl}rZ9yBw#
z#UH^B2cnuMxxwDN63e!EqH;tf&$l`aTkjBqmMes^QVAZ+*o?BnzCw3TQ?!~>fF0wt
zL3FCL#BS*r94n<e#KT0Wi7TNT>}xJ684C3yH$&3slOX!Ln%BFQb7!+Pp!P!zrLPj%
z<xpp=|9VY`dTfrStFAE!xy>%!I*6__%vs=nNl;|r4;isp%&Mn8&v%Q2`~{Kd`RfCB
z?@J_|o*~3Hb_TeA8HwD$M6sX6<R*vOf%#l{L6_;BM+lof3iVd*0-q*E`#gQ9x@&|@
z&t7x+Iy#>YH9!rw^MF66K#Oq!q^+Sn@ys{eb|7WZ2Sj4kqy8|-I}TxV5VsHf$qIk|
z;F67E$etF%4+y^GTJB(<TLZCK(C6+G&!JLh20Ixp$F0)Uc<Xa9Ch3qDA$SRP4jlxq
z6EA`5B+AlQ=)uAan&FsU6-)QbV^zUDvA%yhf7hOl9ww)uRWuUK?eoNj1^N7}n<IW`
zPGb8fOR#BSEw5_Tg#!cDpm|dT7&$%QVc$=&Os@>oi7DVOPY%LuPfJn$eUwnxYbn4#
z^*o@}0<0>C(_yd(x)x1^!U<8#aPc1EYoub~rGMGd?)1HV@{!*tCXSS6A-L?j!$q2#
zAz?-VeqJ#VWhVn6W_AU9h#Jn)j-6%^{;_~>Kl2?y+bKVJM(|V)2b118pio&3ij!MF
z&+9AkQcts>9AEJHLkKUk;an@+4-NeoWUSo?^}dIg>q=kDe^m~xABh(jum;Re(L2p)
zKZacEL*GFa*ws1%2RsUc=KYt!y66$hKXe>qVXEBp*MB_PCj*CeR)bg76_7r9!IouL
zU|VPnb6IH&!&YuY&ru)wF8x?yH1s8IVmM6wl#4Di^T2MIgm8jKz{-&3$CSfO6Rg3v
zhU3Dir(omTD%4x6h8-sNxWZ3|wP+-QXRHIb?#N)vavV`RER|-nBca)Au@JxW6zV=x
zqVx1h?lYJCgF73Au$*^1O1B@)jc&18z5C4Rz7*wSqnOmu$;@M%jP-j<^P7heVs+i4
zlrJaWuhT8AzF|7gxaS8^b(Di}?oS<ALaK~%1oJy~P-Ai$C3g<+Pus~G@cc5`4R8RF
zxss)+Z-t}lFA-<9n8|h*^X4xc^cEDeQ=121w95`OwH86UD&0kHUn)HvRfe7BHr)A!
zjNJ-}g1VAc9`w@|Js#g@h8tb+8qM05m=vP=xDwPZkf6hdGH9O~04`5Qf^gz0^=YEf
zkj_y`Q8X%o%DME@c`?9bsCpj@9s{SKN^J~&(25a%GfBd^9ZH<DnX)whWMjSQEv}<K
z9dZn`Q8uKW``to_?5T@tKh&XS-at&BbQZzj6#F`|5<H%{!eq&CRMnkGySPjo)qf2d
zQx~l{dMB4jTDYWNH}rh$4^dTx?Dmr?%=j`G>S(7t<mOc@G@pZsVIxrH><(SCouKwY
zGH*&+0itfU;=&#`xyb4{A1V$;XV<=<7)5-GZpJ7&IK|BH<V)TW{E2@Za~{853&w)o
z0#<3S#os@Zamn;Nlnid*rkR7`%N1K}3(IFNlV^Y;G6By26G0ifK(xF{875c;*{0W6
z_jA+@dg%?)snIO-wksZNDZ($O5Thzm;34H86grm7>ZO={$)AmypNakXUn<{j=!jmr
z-J#LXoSW|q0@LAMVAZrABDUv)GNnJ99yS_-ZtRAyU!p<cG!;b#i^a`2j1Mpzh#67a
zKsuH<8e?*~G?anT`=goBCkN<0GZiKGdUGdE{ESTr%>3;P*xPy)4(v<;nGlNYYZK9R
zS|xafOSvb_h%+wPu`8*CkgstOzSL==gYIVh-piJD;UQQ;eIu`R=i%;)RK%c2wn?J|
zRDZl<`uF=|`_w)l9eNPD@2h})OH5GJjXYy_-|^0+8<}DWG0o%=;Pgj{(svc&MmOqq
z4BrNwD+&>=3e3m#44P}mc*)n1XjN7Yi53EitojtYr}f9$>pDFC`5w$8)=Qnn0pfy=
z<~qT(VndsO;FnPVjw6<VV%R~DI7h(txJqK2eivL1QBU`BFHmj|6LyS4l&yQsVorpB
zs(v0Ta?XLCyE&>pm<Wm?fy5I}=kBBOp>SFnw>#hr@|rE;pwy|TV;_urEtQZvFdr0u
z)-VUf4iw29#Cv!GxlmJ4=iNiG@`0N8^+xjgbY3hK>juE<vFow>X8|5g4#Vt3Z}{<m
z?q#huT;~&I-8UD5y^R^Y2cymE=QIfE(L=G3kK#_ANpS7SNgTR42b7cKVi%jIOtjos
ztkrihD2L3VS<+Ib^3DS~rcH&A8x^RY+7H~eZoqlzDJXgJmZ$wC!zGUnqxKXDs3%?k
zt@jG3n<d99DQPHID-%E>MMtANFpoIRtzWN!`+C~g-)<h>r7Zi1OA~OwKFUFy{3&>v
zkrV3zx$!ooKuftl4)nZ)s-7Jz#_9lfd^`wDP2ExPTn{u%N>G}oEz~Pl12p8r>-qHa
z?5m04TQZ4Ja*z#KHXGf&KXYzB2hp&RNn(;%%j*%8RX2piggi2ynGSb_Oss6DY}mjA
z=5jC#uHGDjM~jI8G2}IW++#i_c$PE!xKiTwbu+6sixKPJ*~N4^VzDu26Zl;%M4x~T
z{)=WgI@h1Dw6N}=F@!QUf*XX~p-!~R32eSPhNapo(DKy~bo^b$Kkc1`X&L4q`&7Z}
zJJ<1|;wUg2)WoL_IfPGVxM6)VG2QnZpm}Jrsk_z!7!jKX?XSmDkLfmpH(}iBOAoGh
zq?QGZUCp(EXR%GmD%e3i-qu}mDAA*P(#^%>-};w{B->_b?fT9t-j+etlU{JbnR0^t
zMx#9C9kZmljr~V8xOOrTQ9}1rZw5iJ%VAg782lNRg7P8rSnR)aXLP&E%zrLtUVnvx
zPkRXO6`ct&27|FRGah_SnL^Z)DsU+~1NH->F;r4O^OtTyVL$4r%v6DxY5ic?T5ps;
zxXN-i#^Az}r@@MTF27a!;glCwutHn{9Vd-p-hN#?H~b=H0#5UO1DB)G)<F1?Lw70v
zMjqgF8Oo9(a7=+MsvnCG>uuiytDLm43PM@ycFJ16$bz92IgpkZ1-Gl$<L)OR_@;Ug
z29yYV%8h93q4dHkPhzd^?*`}g4aOa|QD}cH7_27HJo!{l)B$=X{ppFD7AN54-|G0T
zN&`PQAt<KN?~#ff_FaD*m5FPFoU?w|>!}~QUNUCRPuzu9Ym3p?*8<uXBx3uLBrr-P
zeo~SKdM>}hGF;2x%jQ@tJaCzbwvH1w+_nHy8|oS)mkMdS+IURE7baYx9*<26uhsv-
zM6WzcCGqhfy2eWl-<)91?5UufdPe~7dLCn1!#ba~aP5vVX5Om7rZ1_1gzo2g24!T;
zHwTt}*t!hbC}$Vb%^zY`yYc&c5Vk1ffX=Jn9c3xK`mALt7Na4yi_TpOjD>g`V~n|>
zML*lO)a5+PN8c~N1CQx$`$ECnmAX(8unroBzF+~z{NVVra?}}A4F<;5m{6nvM%ghS
zN#4d+CLTfekH*k8UBOGPS7P0P9bkSh*(~UW5BSjx#%S4mCZ3*+o$0Aibu^04eC>|X
zB^SlJtcu`RLndnX?ThtG27{?t5bFJ@XC4ODT-p08JJNSFYHlrJrdD+KDGOxgGsx|9
z?+@)!uR`b8QBaoRkG5N>*AVfJOPv1^-)&N&-T4TpKaxspLYf`;Kj7x`-9UTOC6EdR
zW*#9GY~WFv?JMj-`%DlBk-oSo%m%$8h|S@#nct15hPT;O*!glk>lZKJ`dH$Vr!R+|
zX1=(-v>0oHSAnEe4$rT7p`m>}_bM_0N#8!GIe!vVBs!q>n#<@i_%0hYe<AiSE~oqM
z8Q#)8gj;DGWRCaKLG{-zklj&X$|?5*^PL|{wYyCN%e(4e|Hcqx&e^#4fgg568o{l$
zByik!8r!{$p{^gzx)dj&V!}nRyVc3hT{FbH%Ok+yJ$3TiLcm^q2<YoRXRcvaz(;0>
zYDXf7Rb*9awC_8!Jrs;{wx5C4<W#s+$MO7rdN$cLv8J2={(hzlcHBX*Qs$$Jj{)tI
za-b!;2l!r_hw<mmW8*w`5D%{at&35t{`@@F;IIRm>{h^uzRsw9EgTF!((LL&0CWim
z*{;O%U$IDh_x^G47#j`pLvNV+y9{PDa5PiQ3lXyC7s2w73@}<g5IlBoK<T87LX%q+
zb4(1!P4B8<(k}zN{3H+Cd@pk4AyuY#V=OxArto`@=uX{w5KSphr{8}FGuFuA<Ba9t
zYFh|idTMO!gB2KrTfjVND35s@hvvJQ1)XaF;+W>Opf|@1oO?avRT4S>Fo||cLk4my
znG3|NG{GBU`nO&#0GGmP(C{z0crz35;^z=-_<ot$Pwj(>Rkf_J*BXqAi-)32KX}k_
z4D0`tV{2k4WcQc{8Pmyyy0eN0veEcJG9QDEU*Q?_ecEU^2CcuH=Jn?BVv%4{;-pG*
zwUA!e@v(?|>1Tn*T7mY5y}5R`adakjFaz_$<mggx`;mV^TzMZXy0;WtK8*sE+)QRJ
z9R^dhPJr=J$^>4gyLKV%DC|BniTZX})S87Q|DV0sZxp&V5cj|EG#s0gi}mX-iVe+t
z;C1q4RJ}(#+Ce8lvU3)1_#uJ900Fknt47tP80sDeFooYZ?0G#FRf7(~ptcjl-|!cE
z)ONBNy|l0;JDYo(*MQ_x1SAv(!GOt>HMT3{g}jFO-yTor%^+xgGYF=-YhiQP4Y70X
zSQav}n%E3^pdFhGvK3+C7WWlcvVJUka_}$)I+sHVeV_H8tplw^Q$SRdDHg$PGtHnZ
z7@3(t-RcY;^!W#~dz}aF(<5O?pGvTO;t$%hmcbQ^bC5!FM#B?}Fl1sl=&g%Fx0@V|
z{I3WGL+P0JErEULaaJt1(idmv+kr-px!7?xj(Pr_#k{j)uy{lUzCWUc&g$E^*8VJ3
zDFkD$*ReQfd=cn`+6tFOlw+aB1+cqs10KffSyjy_c>3RI)Y6_x$A?tjJU0pI_kHF@
zJDj2T#B#J(2t3HvowGA3_+i3je(i7t#{G<fFS|Uko4yh~deXak_;MaxScaaJXYuQ=
zNhsGY;&Rqg@EGt_ET3l2b9Y6;uZU_)9W(;7Z7ji7S08ajDMN`N^jeWZd+bCE`s2$)
zt)8WhXC*N7l^>`*hyzcXWN1I>4^^Xdamr81CiYX}VMl9J>~UkcYBlI}T!ZQ9QT9fj
z0?RwDLPmrMR8?g`{t)UjYi5FbF|lsj$O(2QlE)klhx#okFnZ5L7!pj3t5IR7`l}E6
zd`)K=J&1kZT><rz_29y%`KTg<!8NB*)T4jGRkIf`!-aKBKSzKrk61Lkc9+$D%?CA?
z(=3c~U>ReIcv$c0)T0~=*4~u83V+5rPDDW1RqC4VTWRLFL_+*68PE0i1E<y)@Ob-H
zu&9a!>)InQx!-h*YInraCT%o-PWiZp6Zv-PF~lFp!Vw<|QMR7=X{YAl^{D-*o=@Mk
zm43X*HjY>BIf748V$nJG3Tr0zpwW;C5cIqTrD3L!J)P!sFRXd3b59m7+kw`PAG2L!
zQ)#EM8G~Yrq5JO;4EkcvTau&EHO7j$a$^u_ZY%W}5d^h8)u3dS0)u0wp!@L`eDpZV
zsqehYL|LYSW6(L|&eTD53Kj|vUt=C>9;{&52pp3;h;qTi7g+DhyoO|Qt({)rc0!KN
zH{^hwPYJZ&j^dhwrh!k~6=<EIz}zjT(PPV3VeOOC7;z#UX*<VqM<jr&dJ|W)9EVPq
zC7_zQis`)Zf{XzfVE>ak@}hR8W8%b0;>N*-Iun#SsDd^<CyZSB@(*V(364Lyf#1+o
zsBSie%Z((=Fl9H?%?ag8_61?o056(}|70^NmSa%oI@AomN_i04!}`1zoLvqHn+K_*
zR4@ngy*tEutu5T)pAqQUq>G|-V(V6RNAtqBTsCbq`3cUHMtGkEdDkOBtxN(Pp9>*-
zjDoMwR^WiQq2v;*XU*TzK=R_7*ebCn+_We_{rT#wg_W?@WSXDvpjpJC-Kc8n23kR5
zp{qA}6ml-HYA*%44)2doo<@?#&x6mT_m28_e@HN^!rGPf%;niJaQeOz&0qH9jjc;j
zJU<t^8J|I|^DRto;e51yID`CI_n1s-0bU*bS=sZMXzlorDMrSixy^Ze^k^NzTo16G
z8o`?#Rtjy^4CY53$GXfRkkB5+w5MibW5O>UZ>WPFY85<V4smH0kelfV@#%EGQl`O0
z{OMpcwoF(AAMS1ht?`>VUgJ#kkPB+M3@%j6LbZvLVfeFLoZl~qJjA)65gbJGsx;Op
z>S8e}U)ht?3m^$g5*uD<X4$6sEO%dTOf#kT>w{EQ+t&bs$|n=Qrdsew-2_V$QqjaW
z6B~A<p`MN_eo&8u_ueIF^!GMqI(Qjhp6ZV!nigm@>TjVqoOXr5`$0`w2?;cFd2h1|
zRaYf~x$}4CS~>v)n<R{?>V}#x<ZQrgJq)U$^PuY*;sH?(WHxny3`D&7EX@Ts4+r~I
zd%^wSBktKS7#n0ou%-7Ae10YsO7;%pVZW(&uMq;-xmh3^;0fke2D3adx!gbf;5vI>
z2tlEi%vGrgU#}(LhYe=XI+F6ZG7Vlb-h*H0Q%ruINK|gS!M~5$fH}9PqO8Gzdn`SP
z&ZB!`>#GFZzdixmJh!vLMHOH-DFE$n{>OX0A>OuKC<e4ggMu;*f86PwH*sibb88@L
zGFeV{&m78*o`8~b#PvB)j*0_cxzyT-Nw#;1C#jvq6{aZ|Wf+Ioc4(k9GYR&J=V7kJ
zc?eqIz?JXz12*Ttk?!<NSDnYl&nm<YI|dzLS<p&z?k1ZJpzM3jOunfnycswR!yGA3
z_@{#(_(XH-Ep^<Zt4?SiSHll{J`a+-K3u)-xma`5A@JT@3Xeag<IqWR7-W1F^^@}e
z45Dccaa&wq>5Z}obx5Kfkj@zk@W`_#chd^cr@Mk&^MknXHaQJ;UMJV@W@h(P!AyT<
zfmF3ysrKqnl*;va&%qnXXKjsI9Xmj(yAbNXlRy66RZ!@~3U*VbfsW2*R@yQVtG+9t
zA>$2)f1fi`yKPL*R~0r}D^cN}2;C>AfTeR1*Lt^=Tl~t0+(Z4b$ANjcrgt3n?UJCu
z1nScriokauLc^&i;6qj#c33OG+g%$UAETY~G#_x9cZ}wVRt($ypzF##R{ye!H`4Ax
zec%!17?2Gm4lkJv&AhU^46)mGdvb%c^RlJGQTpV$u=I@|ih5EeXx4wF9`KAeZjXis
z4*sZnNE5|(<G{3{0;U_pgI>x`;+m+j3hi>pFTaQ)TYnI_3=>O}=J8|WucG?JFM@ve
z3n<kMFe^EAkZA?mG4tN!2=)pA%`+8{5x0w5(r4DS-~##<9Kxkb>H85K!A<*k!QXEZ
z(OLfu>snFF!i_V@<^7I#5pTLguZ!1w-ijvcig1(02~?WaiS12>@E_l=QlGU1RE(>b
z{_0I^;}mDSdG<K{`S;9yXb!W!cn()RnugjbwM^k=2dbur*qZI3<ZmdW_un|;8;5~b
z++AL?eK@L5JI}m6aX#<bc@*8dD4sqcn({ssTw7YpYi;TO@#sJPscjb8_KSqNKg8GD
zrw%vlqCs?czo6Rt0xyZvg}<j?z#;7`FvUf{im+lFJwy{T*L$JXdJCMmCIgL!CW4$8
zpS9nYbNLlNKAPBtYKI$Hk5B7qmbVW~^Qv*u)NsnMy)X^5$N(JY#}hXVMT;@%m_S*z
z<+1=cu%Q6cCVuCS56keR#d)lNo~U)0d}bx&TN!H%o3dzLFgp%>o@Ihh_)K*5-vti2
zM~QDdiI?=d%uda^3cIH1<Gn}QaYPh(w{amWaqG!qzV~AF<J3Shzf!0_rNv~bJ6WCK
zTrljm6xFlK#Dx|;QTx#X;@+j;lp+PZ`>P5T8|UIm<NavVun3fE8q7Yp(z|S{8Okz?
zxvJQe7e>)c;f5_Z*VJIg{b024_Jw<`lz%yz56#I31XJsLZZ2Ld)F18z`bl${Tz4zV
zHjc+`<h^iC*asM~829G)!~WA;@Ma_Z4F2g(xgIm9{ayy{^9A4r`8d$Y6YZytV%87c
zpmw}9{CL$LM>G<<Xm&E_aYrzU=G<_MDo6~|IV`xrjPr(q_SbG`D09Tg>*XlzdyqG7
z9s(M_FQIkdUa&r414Z8h(e~pxw9<-)D`lCGa{nYY6jSeK^+4z)Jq}sR(owWwx)5V{
znb}1cBYswrJM6MJeMlv|9?7xdR0U|KS96D%$ryF^64QC{lx?h{GkEKIkVSjr*4Mr?
zJMGI3Iz^)Pur<`*JPFO4Ex2}T0A`OF1#tBQx1e5+_WmeXHHBtCU4Qt2**#GH<S?sn
zkfOs&FVxO`#h0x2$F$sV>PGb!kDjT6F;O18mlnB2wiCy1Q4iL0hb|gzOJ#+5DZDVN
z8#t%@5W>Diz}JdsNSl0|$*7F@KZ?#hF2=15!y`%BMU+ZHk|d!B&AT27ArwhSl8_`}
zkc2QgNu@J7H=WPvY<kyIjT9lYi5)@+u|o)L!neM^`S~%L_g(9G?)$nD`>ueNnUL&?
zWsH>ZN+9}t96Z0cp#5HUZdf1(cAt{C`wCV#IM^FsB&MR&_7U}SO(P~&)wC`u2;)i<
zLHcq6?JUsYL<5b?DIACfxsAALygxpg9tb}f&)CF)<wLYt|NVO!1pbN!>A@Wkwrn!G
z{)mGMv#e3=p(%z`Ml#0$bK~YW!HL6R5HD(A^VM;Xb2Af?{#t{6sci0K!Ca~Pr=wfw
zRp^t9$0(N(Akg)PjL!(-KSfmfa~vr=u%2Z-%CU8bh$^nHgq}GiH1E(75Pg|X+<th$
z1&1YA#P$$(-sxeVWHHpwZp25gWT0WS1NCPVL-#91kQ=0oB}yS^xt7h(|0{(?bb#(t
zY@X$#jr(^=F|RQS?T%fc8}Dmi$lT}D?O-|R*`C3lR!LZTQG@n=GXP1N2H#%e2}-8x
z@Sa>TYP}ytTNmll^_wIx_?|wBhAsr7M}Uj+;^6nxK**PmMf1Kb@JJR3F0*sc92l?P
zyNk#^zo+MW#i(4(dd_;uoSsDyJ}j)muXQmfJA4X6$LpA5M-k-p_Cud{Bxy@H0p`Ig
zaNmDXc>VJwEZVUU<UQS?zy3JeJqe)?+DLGRAvShqLh0}dPT4e<2tq!SB5o+-Rkjn4
z3vX##u|3Am9S5a}^FfjI$!}b!1e?@zRJP{}6+4>22mUmu8W=+Fi8M~*Qzl+gqbTUQ
zAZ+|=9L^ZZavoLdpk;XzRlV_rb5%^?LY7I;W9w+ln_r2d-V$7}b28d}3P#l&1K4AG
z60dL0L02;!`c5kd3&NIT;GP&*zuyKYv!8ttCuCU&G5UTU0j{5Rf${VzHv5&qgc18u
zXAg4`**dTsYCJW2atX35kHV=T%=hlK70=&^#*GbuXxPp8W;0r-K%ycPBunQ#cvuN5
zOxf)B7Gr9g*r7*5Bl92*BBG`Y?%kmjQ2ft<_B>(i%1=?GM{OuZJ*?;Zhm8OXX0r{A
zm9#itf+i1xi0YOWe#QGMu&C$?^lm*3A`5>U`eq(JWm!m*JF}_FyfV~%{xA1tX$D$e
z9pEi}_R_g4V^BAg<vnNrLv22E5cB88#OqNd*fmI@Tb?oZ&vw_mx7J3H)qO6IEqaE#
zCBwm46R>z-5#Bb91m`P3)S)sACRfcxQGEjk1rs1K@*+4_-=ZQz#(RC64O$aT$epEU
z&|5Kp_%2VQa{KGib7}<$7dSJ1?P1RR(rD0XV0rzkg`f{PaOjve>PWXhNbX@&2w90y
zN87mYEo=rMON6l}5Up>_MM?Kj&LNX!QC+RcDnCS>lpXN;I1iRTvW0kM7L>gR$2B@B
zDACYBMfV^KHJ5@@W(<@xM`C+Z7)s9!hTnbiSo_%*JFaD5>FB{=U7tv0Vm+!_Gn4c@
z(E}Zp!(F=60JXA!xXu6)d{7L|<0pbl#&hjkFN5OlZo02*ISK|(hZRq&5Jm=oG>UaR
zzn??Tx0(2j%>~DIOQFVmCmMSP5&7jiVHAl2WZ!Yw#Zu;{UIfLSVr;y_foPKzL@l?c
zzWq5E71Bah1m<A4XdUX@Xdr%>z4V5D9QF;{1HLb#p?5l)b2V3iV)#cAPyeI1VHUia
zKOJQ{hV-h1HEJj`r%S|O3^|nn{r<MZLTMqgz$&`lKEN4IXZzZoK=Zh7->LnK1ZcL-
zBU*p_s6qJ!aMT$D@_(~T+l6dA*CfWi?o!ern+$@=UnF?DI(|OLd|`_y6>F{FHH>Cs
zQT{5_(mF~zmN74I-UKQwpF{gM*U|NR^B_IF26gVRcV@r<x!AV_509!qwGSJx@opu!
zKD6d6Z@%Qsen!x!Delxpb1mq^TVY)O8RDn^jf#@Kavq5*;b*lzwricI{SyYkqsm<v
z+?otN5mIP8e;QQu*!TNv8E5He1TZRwn!jZGk5f}?`hL%3EV^ajs=tfZ9-Icv10RXw
z+7J|PT?K&>DN)#53x=#Ef#>x>{O>6t{Jc#S!xTa5!C_*OrbLEyPeF$TWyE1@8*$nY
z%Xm5gu<qu5)E-~OGCx@$`tyS`2|2~-y}QKnqnE+eu@;V{tV84H^^6mug}qBxlh%s|
zu+8`$8NaufaZ=t9v2+J7SeDDZ2#AJv?=rxS<*@rq3_-Oni{y=FS-*DHZ8IH=SA7)G
zQTHnJtdIlMS0%ivLpI;6^@qINItd-dh%xLxCmdzZ?uu+?FmNXio?c{K%(onIG`~*<
z-j~DOlwg?OZ--rTUQ?5yd2l<NWnY2<@sjsBEIkuOvvM-&aE}U5YAnDXrcL1WGYdjy
zG0x0Ci_peh3|CvqP^qUL@@y1{*G1-^U!w}r7lrg*EJ5Gt_0S$R1tKj&vBx-tggTDI
zj>V}c>XeY~-F{#`{u*gt5Qi;^o1wXKIpeWxCa#L_c`uJW&}tAzKYQhZ;mbJEs3w8@
zJJT?DY9iFW%)%S1DLmSc1l^wg=sqGDRX085k3O1(JrxVUWZhSCw|xP&{$&TNY*p|@
z{ZVY&sRL(Xtx#O?N%-UQF>F80qwlhFP%|wbSG={x7Yo9%@?9)?OjU*g0fn6^dgw7S
zfqFdhBCS)bz(ac)ED^Hoo9^6s65ID2jf%l*9_#STRpwF8RKuu3#=-g-3Oj;}kuOp~
zPc=3xRkWm)E1A<{m<E-uyF)9lDMCD98RU=1&xqK8Z(@S6x7`e^hs}ZepM+>Q>jA;r
z%b+GQ8ubrr!0G{itTU+xH}wQ)-87H6AeUe-UqA*f%tYh1r$kG`49#2hQT)Xo?7L2b
z+ly%|&%)t1k63hl7Yg!QE`sFwG9JwjusifQcD5P;%5|ym74pzE-x0J9L{qV8zVI3B
z#nP?SytT0{>tm!5#~*!+k(|H{W?hl_k9f4-Uc&ZQgJ`-!A-0&c5cnESBtDMZ=jH$$
z{bMY9pR0fwSB-=Bh*9t)mFmA0lelNksg~PdFgzSX96tz%iFGg>OY_3-?#Hm9Jr=~<
z=97Q}P3W<siSo0zp!M(}pgydS9CBq`zN4#A<}!?{tkZ;ZI^!_pfF*i2%mK~SW3l8z
zAO=QG081AkZ>d!Zs|yR5^Cbx!=YAnMx|gx_@mM%FN5Y=1hBQ^B0SyH5C@)DOd1mc&
znS~i{Q^|*1125FgIRTaLvY~!W0-9^~l0WWE*uP$aM?UUG>8KZ^bjJpGAgl)c+Ii5T
zQo!y$<FHFUjy5k^NNfbIV0G9DL>HKc%XJco`a<~*&2rRdbD9YGORT3p57c*s(CsQ}
z*k`H_)n`tlvrRQPKE6acj?aSdD?h1oXE@2?8Arr-BS_&Pnf+rb%9{j&%hzPaq?Xe5
z{20`ln1(s&>WpDy3@Q8bQA`wSb`DyEuK(~n+J=JV(Q4rwE%qKTPa=H}ub`-B7=->4
zhn7hNU~iNI;Z+=Vt~-fZvcW`gu#DubN{8})8bR0eDc_Qli`v^8Sl;S1iJ!g!+74bP
zCX1f)W)n9tzi%w+epjQvU+~y&vldj(KciCkN$e6wgW?j#Q^W&M>EO#))e3k-yBclX
z+)$k{4CH03W7MUE(&_17Xk7tO!C(06aZ>#7iS-kd^U>JZ05Wd5qIk<v60b9sv~Lu!
z8T}e4X}Ewl+^SH#<2lv2m`Hk#8$ppz6FE<L)Y8o)mMY&k!GG_`gF{s)_*E?RwM$0l
zrd7}todT*Zqv2<d1@;srVkPT*J$Yq|BF}@g|7`@@?*_poUlVNq?F<fkIvJaAvGD50
zLVS2T3bO|JgF$L6^f|>5Z|lX7QI-yNP8wv{WyUoQIzj^#SWjmpU!&~*gPJ5LgEPAi
zbw3yl*1HCXvTh;WbU~fvAEME3+A?s=oDQOs1H^b_25L<>M27^kx#lYlW}S=2lK*lM
zF3doiWGU%e+(sp<o#C}(7$mgEplhiL>ETy_cYro5Z`DG1)fLdHd5RC;u^ZdvDb1L1
z27CJmD9*G6iL{vBm#=0BYCotOauI&}Ek@UrgG7IN6}9vC0dzYI-6O_AhPng{`6pEO
z#u)TJUkh$u+1_q-rLfdH89tmp1*O*uXz#Ya;PGK=R5f9L->*ny22$e2SPSj<mFbG|
z1`x+AB4U{tkyQ84ta%ow8m9vC9Vx`}i8l$HG7bcCN2NJ|XJJJ`Bg!^z;$<rpiATLR
z>s{X@vPsIMGsT*-vwO^iyH|p<^;<SO{!Q2C6@r&*Eh(LFgE!AGf>h?1^;&DndSI<o
zw?+|_RxHExIkVAli75`)%1Gp;`FJ*m^@C2-K}Xm|NItj`8}=4rUf&1e=d=W@tr_d%
z#075L&JsM(FanLz;y|z@j=JJX3`s5}@h_hf_n!is9l|m-&-Rlpc`<oCJrZGAEG>A=
z^5ZPa?RtF}H~e)C+6AeDD!Rksq{SG2ErKpsbrp28x=7FGa$Fb{iGTX`;L~AOK_JT%
zDg+bs7W*Syi>7u_2}E&WD$Uxo0Q7?AVw$EWI&T{dQKC=Wk$ai=Xjc>-h+w_?@k!9C
zpi2bfhk%cj8aqcWqU#wutuMI&J%XwrZfPh9bSy^kk&YV8A+>0y7fUSfS7O%fV$@99
z$<71`biUjP+~B(tHyVvaoBDEca7;A%KHWg_2MX{M%a%IStb|c<XK|G3A$<0(5#0|K
zLGbhf9Ca%hOS2Q{g(Zzx%I4;-AI9T5>vGt$b0p^3kEgP_b2XRX82S#sLJDHC+5P_k
z>2i8at)DcJj`2(2>gEJ^F`9?*?vd!WWeRrxx1We(m4!BWPAHx`h_gNwK=tJspy#nA
zgxds>w(U+Z+0zl7+KcdP=^-?#i34;0ImFvB7ED^Uu`|~Wlr}Q%%dbBE>GL`0aNGy<
zj^;q$(PUD6k#U~>FqT?R4Iohg%O~QR)-M<7fo=I{I6M}jWHn&*E)^3O#e>emE7-GN
z3tEiTknhh1EAtvSux3A2ZeqCwrGuax6bqfZ-Dz0J1caPRm~1c@^^;v#ENCK1a#Emk
zT`iyUAP9!vOhBt8>sa64jM$y(;&lR5nWwm!_-?TvqwcZy<c)FM>+)*6&AN@V8>eGY
z<UGjOumeMmAXrZ87QXhWgCap8CSBTrqc*Y6So1cObqDctt!hAic_aOio`s1WQP4QK
zmGpZ>fa+y;@bfB#%qu)*rO1IOYd$<(TL-hRkH+pfdDKm-fK>k~g9m>5(84Mb%@(M`
zwozGVW-vhQ-=7ADB7e3!%fwZNQqXHCgVu3s)OyJ}617ef3^j7$omv8{8hsi*8=iuZ
ze{9iwfjK1i7lA8d0(m^%0Q#FJW9MAPA`E8Td4)hK)gBIgM|@%X({K!bqJs+CE0|;G
z2d5P>l=k)8f#10TxZ3Cs$x|8MJZ&+}>c2)6)-W!!JKM+iXuz%4nrMA;E(j#&$jDEO
zk@j{56`QHmNUB$Z&7VEczOMvF<V-|C$rSVFx=|=)4q=mtmUQ9`V~i=z!1&VN^u&}%
za6G0#{m%ZPf^k|kjh`Qog`1b6I?JHFsym56A4;(E$rVzvCmKbK#*oAMVq=n5V9)6U
zjH78J{*5V74Ot74mcv}gHr6x!z6>3j;-K_U8M!n0Dn_PM;Q2@4fRiOuul6GF%UOr~
zeFz2qAe(i)C+ZVYsB(Zih#9w4aqDE#e53&eq$;T2xeiM053|0r9_h5Rqvwxh!k+g|
z*x0NG)^BWKR8ui?=Vq~YP$3t0FP(}5`fHw^Nn-u_Y*<vE3SZ)w6Kqq1`IhBZk+3;Q
z;0`&|jm`wi?=ixS*I3S|YZJ<+|3m&=KAZ8_4%25-8PCtd76gs$+$f84usG8cjUq0>
z3nx!Jf3Jb%24}(9k|XFkin(R?t%JGS^if6W9JUQhg61-o_xYm>L8dGN<G^#<c11wk
zrV65F+<;jzJh?xZd1V@&QQZ=DH;~^=Iv1tWbvK2$fOTb(1Km-PXC8)i%$a*@C+n0I
zq5X$!bQ_StIjbgYcfU<rZiW%1#rg0k<|yNt8(?1gdQiPNi&jteN3Z*Tv+PL^*SGm9
zS#Tr(P3wcO^!Q~Wdv}wcw_yQVj||1|rRv})n2oEhpTan&?T~jq3676quKMC?n9$EN
zM@S8}o^T?)w&heTJxVT)-G_Y}qS@ZHkqEdXQlK&qbpvPe-R8+`pZAb-*4gv7{Sq*0
zd^hP+;)!mvA{ABt&2q!+bN!XanQRRLFXbP!yJ9zK3ulZ2G6Dq)jtecPJ*ZK9yo_|J
z717EGFNvaKE794u9D=t<q1V_B+@Hi?vW6HqtBLr+Lk3yO<?OE8O-M%yI_rC(Olcp#
zVxScNv8Z6Vor55`8%zhBTBt=C;~YfBa&@YVw;U^i_v0s^))#ii8Z#PpU#`a$O=37O
zwh<CqmZG+eH`~iq!Ke2wSlS}MS-%;lCm;eEAC3jRV=1Wo-vZdnc6ov5iy^H4pm?d5
zu9+&u&P@)~GRuP3Kb%U>6wgN3+e3XmB!k1@0XmC$8Dqy@LEfqg#6@go*S(L3)h-g>
z^;03Q>n>fAXN);2#mra3z7yV;(cE@8#3kjC?lckHm^&Xu0W4?r_bB>9Hy`*x383@f
zHf>eDNi0uX=h`;gfFOIh&}DT!1g(=`cuNZ76SFzZtB=(6Ydn~+6YO+$ue~ER!_=c^
z@OU1}AosM<(c;PIG1(JN%hxl9U?}snoaX#Pf*7a&5EU;u$sJRm*j}NAvOnEI8xPhU
z{Hu-(pK=O%?|Wm*ts&6Cx+Tjz3sEwCBR7AY6pLIu;a8m)|4#75i1T51Z_gC8+;g9o
zbsYq^KV?)k<2>A+$^4zo74R;miRJz>p~vhURqqW4IO+$2nPOhNC$*;1Q2};}^RPQn
zj)pd+L9gR4YPK&0tEX}3w>KVr2bbf(90`ce8w%TPa^PbkJKuhZC%T#yG`Ok}<K|oi
zH@#F?QB;QE@nUFsWC12AFNv>cKLqw>5qY*R4LPOBSkC%rIlmlVF!$>1h38?xl_K!X
zNkvIQ0QDBgVet8SD0-OzYQ=7Nf~$m%j2cp6;(!Kj0q{Uo8?$<Ovc8k;<&G(F#>)$F
z)L9ugHUUgX@<JbPG0fT-36>kws7|>MW=u-Qe)UmMJ2Qfvkw$>(mSYg|{R5FnYKZyR
zLe_(3Oq3m2;4v<g%2sdW#xF_*kzWPpeR2mltkJ@{ZPqBRJ}$JLZBI?k)ZyLHS1|m<
z-^BX74)mzS)81p7px<>Z^<&Q=murY712K$8d=WZ6%|bz(xiBuRi#oFNztw*$z{#Qj
z#qxTr7(X6U#@8d9ze73q%UCKX0M(~&IjyBrAfmPb1cDmw?fea_PoYQc-VOo9hl_}0
z`#uhCx`MoTFk`fo<1b3EL^~d=S+}`4U@*%5YN5_yjFna43H?@@)On^8bWB?fyBFzT
zY6GJFS?2lP;tOR;TQT?)<LLCHu=ztSSYBd17{_wnyugHnf4xtXj~|B4wjn^;GEjE#
z7jIk`MgN_720I0xSeD>2K8|!kV@#on_uZ(QUOoxl%JxHdFJP}wLfX7o9_8gOl5Jdz
zK6`@UbA2={F<Ob%tK(_%sUE7c?*Um#*}rd;J=a;)#ziT2aF3o8q3mZkZb(G5v|dNL
z$J$c!0R%z!PI6_Z2L`gwu2gvtE!f90@1?6b>qFrXUc8C1AK9MK{vtj4n)PII#$rro
zI*Nia+1%?E(K_2pJVqP?Rj2P{t_y{6&m7T3QUKn$h4jdnWCS7W;*OXHf0eo6me<92
zO*tIBWry(FyaMKVd_nxm8=;ubKzr3~xY{8KU!_gOki;xlr7VY`LH|EbH-X09BADGg
z5kvlpAV*7O?EkME8ze(f_Whc$vf2)H%Q{Hjr2}Z(tPZ{ZUFXV|YvNsnH0b)BOWyBr
zMVYNH_zjMSWKS>VK$}PNzm{W;ZVK$ZSBu)ibkI4f3<Sm-cmwxCXf|sZn8O6{>nbCi
zD-$`vI8ULKT_wHNxs%;5ZBTh`F+_cM$#NkQQ2j>>l+$*h?u?Jr{Ov-Lv5fV9u1Byt
z^KQr*s{|fj2<%lqgHe7?=zK#FJ}#@k7X~S?!Dt`OvX_B(|0HN+OgWQ_{?uv$b0E&0
zLhU#K6`e}O{{P}g^V;iFwm1oTz6WC0kF((YBoahX9lY&$ZJd;xi@xm4wDd*+<jhF{
zr8nF0Tt+pT2VA4NulhL4TN9~!Vlha3ym*hbOTle)G8H}PAq!Z}=SL~?dqo|9`RCT5
z;CL=oe%VBn1zW&j`~!Nd<|L+g*W&&1E9~E8L5e;rgY_R{;<+pb9}SPfzqO{LrC&_V
zI!opwjmby#QfG92DMK@XJ6H~mt`SGC;E$+W!Kj>4@b2`3mmMCc?U4e{VvErF<2cIy
z$-|*)Ip`7gjdZsk0If|k;B!~y|7YlA-SbLplZ8-0dKwED#}Ze)Z{&~}bHcaJ1&8BB
zG_Z~BOl~Ql*PAOO{=q6ZzV|5BEO2G4iW4L}sf3z(Eyg$7j$_X6P3W7n1da7VA*E+E
zh977lPi~gu%WFbxk6lc}Q|<}>#%feg2#40KQT&IK8_;#$X|B0y4vomFg3hm%oNUSg
z_S`y0_?^M{b&Ds8FFV)dnv|h+%WUY|bOB^<)5z~>aaeU^KO!#&g58%9{ko`a;9S%>
za{|QS3Vd+sCD6&ZMLv^FC@?6e^7g49QSkv6Qh+*>*&fAW5D1d*O1mHbCMJEE5W3_t
zhFn)6v~eqb%XdP_GH=c@DnO{8QUaNrBV*PvCvwqwV)ObP)#`gmT=(zhEjQn+QG6Ck
zolh2#z7WO|x!fe&TD=#0wP(>O(sO9jRR$}c7hvEEbr7jGLD~vL*VD_0YRMM5$WIYl
z4%{SN6%=peok6X`-K3%Q5@RhZkrRtAFa~H6(LzJ2JArZjPQT?G&sUOt`BIdm>v5Lq
z5xi`38SOpwiZ{mV)M`9qN{gcTPMxI?z99>&6Fq1`xjni%=zy-nR;ZfHd^@}DP=BpB
z2)w|7mfQkj?0TP0W&I+nk^a!JPfE3Ghp_KYj?n&>1S(ye*gYwW{<2t);cr;!^@;_w
z&6x|%Iu|g0>P~oLu>{YZ8iT#RZK?H+snGnplJ&xJfV-%IQT4g};K*9^DzX8O>+8W;
zZy0I`7s8CZC@gdL#U<JMQM%6v^jbr(V=g7FYce=h{gsgQ^)TEUW{!*O{aJQ25H=}p
zLTU7H(BBz_-a`|xQSA;{ovMfBD;MGGp$L=u#cXz@L+s{^qAd>0Cv_#7S``&SS86-8
zwDRH>*~Ot~b2ZW3H=6q~Arks%FgE9;GyaPWf8b9n_=Z2Afh#=G?ngJ59#@5`>G>#V
zzC^`7&O*B+mJ7}DgVl#NpzOdZ&Liv!#(0V$q233T3=_ce)t8zVhnBNBQ8orVJA_t@
zLttW?MmtwHgIixa72JC$6b-Q6a8NjYDmaX#bF0b04Ydf58kr9`n0GvA!E&zYyz6q-
z@i@SqIWbpJf7L3iT)Kz-{kHNhvq~^$0sFh|H>0}$M&nWUO4NED3zj#mIIn|@$!lT(
z>P`{NKjln~XVyU8iQQnbGKZf3oCT%Qoy5*c5t5aQ;O4Jb9QahvcBEO%bMlE8CM~BU
zCtSs%oE;$Gwowm7ODI}$pL&-~grlc(q46tEven%&TCterq#NM1!X_MZyn(TEXOhax
zLm3x(7yY_I6TR(6!|j7nAd9D5R?{&MY-u6J6H^%5CxJG9-%Jj$Y`DK|DTwusLs6zJ
z48%-=){JrB5}bp-BP+50aU3KYoknH3k?7(s!KYo9SwH?iPBe${9(B}ERXQ8fXS-wg
zCq?#o-Q$mrKMU0jS0T})5nay(qJt<8^X(DcYp0^&%+n}Z8A0o2#52DAX{g?k39rwO
zM8zP+tC+yfKd0pQzUoX=UcVgNCir3Kj&10+Ko{4)WPAL%4G_}*hZr6l4G%As;Begv
z3>=vO=2M2CLy`y-7cV7qk^`|csT>`-2B=&<2J%!E(?-@ynmc41%U>I#Sg(WAGH1@#
zRW8JRFzb@s&7{@04AK147~nbsu+d^6dQ41%oGO+rx;?|Z)6@d3i=2r_qlI{@wUhT<
z6VPh-De66;37V^NVE49i44*g#Jzgur40a|^do>=vPh%ZerxZT*@OgMK%@cjP4`HAk
zn<dn};sSkY$XHP+$`+)O9E&=%*^rFZd6%I5-*9Xhd5#zxq@d+Gd0u{ZEVaa>nn5%g
z+j!>l+8W4frdFfLnN#GcM+hFN%)-D-o;F`MAyJY+SUtG{=B=KGrShLS@wRHA!2NPf
z@1!GS!ump7>A|>~pIIkoRt37HpF+#y-)aWVBoX-_E$AD;c<&EbFZ4LF9e5zz-kc9I
zt4S!?8A%il=0VX4#yQuxz?T^;#bt#Qy^3pz?k@H$-BtqfLpOoFoD>~;Cc$p)v)D0&
zCw;w)8Gqm&327CRBX=@U5O$zO7C9CK=RB$SV0%sJwj3U&){^KOY}VStGHG9iLfg5O
z(0{R!Xta+<9bq}S9U#W6g}NXa#JG|39}{Pe<-G0PLGXh02^zN$>&OpegQNoA?>4{}
z(hyt{cN#~13<s0SY4r8B2<RrM#L<;yNtL#Mc-Iozm_HW8VOo&%uoTUD_hI0g5)$&{
z6ZLnSgR<mF%rRuiePdo}d9Bk#{5zw@JAN8=7G=;zKNlF-HV1U&t8lMf6N)ZcL$X#U
zwfwr2sCSrxoyiQc;xfCp_xgfh?Rsi8B8^1pchak8M`6_aA>>KQAuQcDj@(sUhJoRx
z@biuv)*bzShEpB#eyZWG3=<SME#UjL>>=u~DcoKejb{zRu_06keaq(17=yDgE1?n0
znfotrb1lvm`Qa+3Y>fZckeVn7sgg!Md){3I=}4BJD{!juecVLjPsM>w70aC*Rw7|8
zE-3hM0CZcMIsdV-(6)``fja!r@vQ^x);&rrCp&R+BeNMpCxm(ZY5=~D#Cw0dv8PW)
zuB&9C^lcXO+l_@#r!!#HcM(jB7*FcKX`#AW5Von;LU5BFN`JPKe8+H1c78x+4Y9$c
z(E^q${Z8eR(!qDf16uLS2Camu;1{O@D?27)QR+eP7$$)5Mb<D&Z5OJh)WY*Ywdm-#
zhWajF435tyfp_X4=DW#<Q1kQH`Bg}UWlTr8y_Fz%62~3*yAD?chT!JR3s@<iM%`|g
zg5P>EoL_MTehu4;{VA$oW;7Z~)ppU=@ZY?2^jzp~^8+m(89W#o4~8n9q*rw+%vf3u
zb@R%hLv9fYcJvdo&VIUTdnM!<X%n{-li}zeBa}U9<yuy2L$j&^_`J)6K0WrVs$i~O
znFg5bdq)fpiNQM84%(|f(I}hq=#_j0rniM*Ki@_7edDo2+=$+X7easGKFI6IqAe^>
zW4L`PnIH*8N%0X#-tB{3GqhmV!ON@@#r9vj&O*_=I*@LavS;peP*=^xBF7crCRzy+
zRhE;8S0U(hkD9QKYy3kumdD)%x|&L?XOfFu4mFJbo=)Zc-qG1?CZhXL5iD;mrDBya
zHSPNezN`;JpOI0}mXt$OibmsYma8{6t)uNj*}bR90UEbm!i%>1S<lrOKFrPm(XnT|
z_gWr1<<wxrL>J~SND%g}W<BETdssGg4K$99gz+O2Vfou4Op`x>vx~;!w{4-&*?AE9
zMYCbAnFnSRM`6_4qf|?!f{MH|c(KK?8vR5~w6n_PX0tO>GTS}I&0PoKzIM>s#TXc3
zHe0V;!W@O{ym@CAV+K2btoki4>unQioKeR8+zB+RVyszFI<)COAOpkMeR0kgy6)E$
z><@5-;#-~=ZV>_vZJA)#R0X)6b$`75`RMH-X#Lk^uz4TJ+*bP_OFy0JL^*)+AXSY2
zkV^*!EkO6lr_rW#A}IY`1$`c|uySr5+D+1c?t9(DE7uLof9XS3y#sW1+fdz?uSmzZ
z*(ej}@iJu>FyF>O=LqI`G7aVhRV*KWn1Dx^fUyB%*jcupv%Pr=6LiO*xv?_^mgmW{
z(xn0Sk72X^QWE%c6*eBYNK#f-qst4%7BMvgJb8w=+NtxU8addz_$T%I7)}Q@GG6PD
zx1=*Ij4Rw>gGK>%C~otF@(`9$K32ufXj?&?u1}?d+0WI{YczhI2JLW-!`Uge7+7Hs
zdU~ZGT7QCg8i#}79u+iPkqC{lbLbXz76h+$GB4i);xi)*P(A=wOl93y4c2qKHy?cM
zrjkiR51?JN3CVjuntGTqr*HuK-b~pB@`r-yZxu)M3rV1jntMPYH4eUgT*hWW8dxyv
zGPY-k;PZu0kQoo<28v8zX_+$eS4zR)ejLi&jA`2mZ3tAZB8@(wxabjc>P)Y#QDFUI
z-H($|Ip`^|eC7d)txu`t;YOO*6as<z#ca>#gvrJlXp(Nk{MwZ${`P_GAoFN=bRg~f
zHW{4vmqE$NV7Sd#%7XZ5!Uv_f7^%njr^fO`{y)ZYEx%604|fw2cD{SFX9Eh}+!eOG
zT?@Vjw&=QC3a$H6pks6><AxlAwA3){VrRhnMw*zb&hp|@pVY|4tf6Ps%Tb_upKEks
zeR>y7uK(35QhMwdX?j(MJ$fpbb><YFw^@e|QkvlWA=VS!%L@f3-w7Q)0^^!r0o@Pm
zXP`|=RrlZGPkkuB<bcb>-z*H|Pp`*3?M_<Ikpeb_zp2NzUfS4o20u=@h~f7WKrSyG
zx&N+!Y$|igZe{n@J6_y_>~Q8gNQB-gqsUSP5vs1<fF|2wAmHt63>;t1zK7M!w<Lj4
z261?Mat^B2uz5ydNX_SCBT>A4h0vMtfLs5#F{Z>2R8?bp{_ZJcF8iDWEv7Ut^fHy-
za*NnbGeQ#!XVCXLj(KxiiJ@*a4RkvKcAd%)c*dLg!plj>*>w197mfu=%*S>5DQ_tD
z21%<cw7SG`=v)ix+Ql&3Iu!FQjIpWN62rGBq3-5vE-G<2?R>t8i*t-1QAYbnglj0f
z^Df4yN#5)ZD+fizR<Q7TJswl>K)=~nuzh_cl^;6`uq79^^)QbLw+4(32g2OT+2B!;
z2;W&wGGpK(DBB+aiK!`eJPKy;hGX#0NC%AjI2ly!_d|SU3st(8j#<)1(rum#MX7c8
zd4v>ngx5(?z#`~W{!JXFhB0<Z9jMlxpz%BGsn&OO=niK-j#(PKd|NiywX7o5RfjR^
z&?%~ZE*t!VN+5Z`6ME*p6V7m9UQoJ(uZ#&HPS?*ecVHUi8keHC%UvqY)uetKndiF0
z7skXjVIRBKTfP|w4!;X&`$WcyUOpS_exKqpbi^q0Y2&~3Z9vDT1vKzy6Rn(O3ngEo
z&}OMQ>po5;5pCh%ZQV&!uegBD<+Bi_^p*D4Bx5|uB4y08*s82TduAq~-J*Qh%y=B4
z=Y4$ZMtulbyO4<870IQhwJgW!FEpt;MD~xkiX!76{4JLXoeD-=R_qu!x5NjB4+_F$
zzi{f<HI^vUQ~ai=ir(!PVZZ4FteeLCP@6PREW6LQoymYKcb>YZQIuDCOxr)^pmM=B
z=*%!gd&cGz#jsh6>NBqYbS8b^8xCF8Bbb9r3#8$DnVV@f(cQd=8maojy4eR<=b!Db
z)F(r4V=Uhj$<&AU;vmcUGSNt6Ok~q??6GE?P@P)z7)S@fDpzxT16#mEU-tV4)5bTA
zM0rUNv>Tfc_0=rL_A~<z92$lK!Ff*gLJRF#&$5gsj)3pTIB+y#Ij_3KMBuZpCj5Xu
zwhc}I?V}gLUnvP~W^3ZAx?Jel90&sW)0nyVEMsZxqK+Bkh*&h3(_=fn-X=ibrV&JK
zQyN~V3&;5A_hdvIV_RQuL`{=vsGr(NZ0>r2z))#kTfiI$RZB&~zpLnmf1S{~F`QoI
zs!?njDU^QONG*r`M{w<5;IU*WJdqk<)Jc}h-cUm&OSQl?z7CBervb){0xzqR(4l%3
zZeM0@U%h6|`&tU@eo%-TU_AEjVjPg;CqOo_kLnjzVcxtsG-^c{cnpuBvIZG%Kg<M`
z+e+#0-!ssm{1D{LlhC}lFen|8Kw5`|0JQ$1t;M^*_w!tm-BXMIGB(=!9SG^F6i+u4
zpn1$)Vtf1?{{9evF1sYC5mE|ZP(YOr&xa)G6+CBi7#{_0L`6q+@RkJw9!m!6;ZpGA
zu43SZQ7|S*h>j-8sGKyD{n{nw6f6bv+YcCbHvp~AXhIh2*GQ@hIsdc*sE;(k5pg>(
zS-%|P<3ph`dkVM~E#~7F%dqA9W11D40{wHAkY3?qdd*Z9t??n%t*fENKG|@nn(^BI
zx&#iL<!tX(iR~Lbp<}TGKCkg-=SB^T@r?rqI17`DkF%Y4IQ=<|y(@O;!0bVzvF_9<
z6tJwi*~$Q_*)kn(Z}kVqp5yF!XifWb!og#j3V6t3p?A{<n(R9kJgSvoer7Ubv&Z01
z&_=tAPvrdI&G=^##UW|4u=lJj%(l}(eIF?*>9a`ax>4YwmIw<Dvsp$%FjqfG6|D|l
z1+R`03N^!^^4U$+T^IwRT{A(lcoSc3%sfmtTX;#tVSd0Qf_7z^z(0ZY%<H=q#&@qo
z#nI^`ygm>NI~r;0J{LA$Xa0@bcc|KeTHLeB5=GrQsQ%p?%FRmQ+~YA=<Q_sFHjl@u
zPkD@=z8nn4OW>WCEtUsrqjhvBPH9_*LlVtU(BQ}$?ifnUrPFBC4-xd;Re|JR@~kub
zo(fE_@ljbZ#PY#v*gxh9N>1G8lm`;nye$t?_Z-2V@^SE@?jk&YpMg{V)nh!8ENSWV
z0y;E(Cssb(0{9?<_WsM*LPsm;<AOtIcz!Xot|;N{1{z6w;zqiZ&t-l<XUHoM!;P!;
zU>1brdV3~Ls@KDVm#?6qlbAlO{|jG#Dg*!AI7GMAuwa}C4m=Kk&SBNG^nNYZUuQ!s
zvoc7hdn>19=tpfL)__g>BQ`Jc!0rq-uT*|SMoun7!S#pcz4qq#kUh8g<3{*yqZ8}>
zad7o=7(7^=g4@cDVwF)2N?Lco&{eT0ODq%0KU0E%&3j<Ba6cMd$-y5T?C1B_1Tb9t
zi8zU~G3LTz_KdPa*AoL|Y`q`$PA%fK<_6Q(x6eW8N*@R;aDk4QML5dW3HOFE7F1*z
zhy#O3d-DYdRJaHt`NO0mPy=N7Cj7wKvn1zUIOvXNeC5c!_>?h$1;=fLZXM5<*F6S&
zR%N5(QC;l%dJ+a73c<$2YcwO#7~k*Mh+W$1&}{yTbY2aDkrE4h?wgIq(pn;YkpRjq
zEcf|<hswRbsWki`nvJZd(xPgbthWtH>xUw$e4!dAFF~coWvXjjg){cBxsBXg-go{j
zVjLC?16}Jtr)4HMS6#vuH5bUc?F@?2EDZT|kLc<Sg7*AvR9`X}^cVRP>lq37woDs0
zl%K>q=TmXj!qXUdF$U~*X_DQZr8sbHB=+J;5bRN?X;_vAU1QdP$)_TGe1-X!{#s51
zU2CEAQ4N&7yv{2sGQR1-HOw=Th}Zufjg~3-HTu^$>@l!H-G4_C&%KPZZu}p$`J6)9
z?azW!c@tQ36KKzz@nAXs4D=HbW1y%3MjmBMUwzW~do2}vr3!6Ylu$O6vDstU9Cmvw
z=vrA3aqUHJ#yrN;VD~-4he{w&n2GvdSQdCtKJ<=dzKXzX>R8+e_AWdW?MQ-e&8Gk=
zlSrjzCFr&&aZbK@SiRscwA7kOy@NJ^*{Au;xv+`$#4)ycWEJO;Tm@3&La=+tnD&>V
z8UIWh%uK472VH{S>^I`^P}ZB)PGvlg+nmnjCJ=kN)$pUrz&WxJ75q;F8nF5QqJL;g
ze-*lxH*yb&2V))DL$X;WqVH6O=&OY&i2)R;S%LMmV)8T~7Ua)l(%AZ4*i~r_YMaI(
zXffVvr3cMYJp~<XzpwDe0(U;%hWgVlptzNFf?Rz;M*)fTa}kMSS+%GoW57Qn9I7k~
zv0y5XPx}(U?!6*y9-l<6eyeA$#qGp}%{3(>5I&q_S^r&QApDyP4rD8U=@cnO7$>9s
zQxEjcIs<J+&QQ7O5-r_S&&x$c!kxTAv|O-~JL?&RvVzyNazE=P6ZSnaJ55b4`oLe!
z@n~L^h3yBGso>dKkSQBeSX%?pE90?lcsdwdVlz=e8nr#U2|K>4V)yGZ`Ykm9JjNT+
z-rM`}ZaM3ry1(JY^G%5I^h#`=qJUOT=}<Y?7i2v)B&H)Cii!qfoYFAN&^?YNA;lOH
z9?u+{n#BCx7FsmI2mEZ=e!Kk$sJ_#Jn3k28RVyKK+ZgA?x_|`i%0Pk1FwoWU;<SFX
z5%GsjLXZ7>=(<}MamtKbeD7zAy6y>d_-0EKTLIzUv(92$4QlLUb4OEc$Y9=p;Ai#t
z)Q07Ol@1e&H(B`pVhTo(FpLk~1=9VcaK^h9T}S9J9=sJWqgjop*#uhuRUu~=CgTeA
zBy{w8M?O4`g~y#aD05f`kEbm|*6Afh`~IP_XLq?p7jnV-sV3-djH6Ls^guG)mt_Jz
z60_9D)H;u`7iK7+d1wbsuqnVB^WE_P%Zub`wv*BuHO$*0Bjx1(GpAl&Q?eT4^)^z|
zg0(1`d5VAfq6mZYHF2Ks1SV~)MDzWpNM3Rqxv;N^c}e8RHnS)glbeYvTOv{Nrk2#)
z-GC7zGr>%%1%eaTrMpJg;k_GsQTE{(HI(Q=+DZ@XzfuWa$2NfI8_TP)UX>5qJxii|
z_?{E#bjHjEjEYO(gI(GE)g{4P>besQvl(|c_7q&S<Wc9~58CrIlf3A1!CP-6SpI7Z
zcDH#DlX=m2V_O2`r3zWLMvv${-;KXRl3{7XW%%R6qfEhpf_5$nPMCoqd(S`p@)zny
z%!jdc5salimVa@VG3U)zad(cHV5^-K$ZZS2{!7!yntxeO=-n;)W>7imCf_Ab9y;Kw
z-X(~^^Fdm(9p1|Yq4?HazAf}LEY3GYLD5!d*%$%hYb8|sa0rU30d&ET+c7N}oivAF
z&&X)ho?e6|h1n24cOh*tRRQ(MexR%PA16O9g7h0Vg1ku@ITp)uX=x8>_{#?RsJ;;6
zmwN)PV>z1{y_}q76AWnNvi|98BC_29{_G6PGl$HjDXe=LbrQ@+?1A{+kEBv<E8Df1
zF&CE|bbLo>^|;Q5Y|95!n-3v&Dd1q%Lyu>#NA*!>sC-c*_@4QPzWTBmU8fJDevgp0
zHhNIqwJ9|9$!W+GoyGR0*XV>d3sCje6vld41R-iR;P-R`WQ~@`W$#Qe%Jw+p57mQJ
zZxvZ}P6-Wfl%v<kX&~~==Ux9b0BOD#>p@DOHTw}2nX^Cp<xq}{oPaVGjCxgJj;&kb
zNZyfJqH{%!G1Z2E_TnLEzT1MdpFaxUo#)~}Oak+hb6{RqM&<Tq!&kk1Xk?QOKjI?b
zeR(b(8exvkFIX?{a5VLBPa?h>zf-Z28?m=7hw*<h;CHSVeWqT)7v8GKTNgrG=2NQ5
zdC}*q^HHudAKDe-nA2@1bgfXq9EDS$Kgb7_4^D@@S^1bbzZCr^7r^hY5}0;>3aZXM
zOeD`u!7;Rg1dn1FL`4C$bXVZ4+54pEzoYQlRE_oa`^d;?*{EA*0Pk{`hwb<Y;<!SM
zc18se50{;|eVY_pkFq|iBJ(Qqb$sKBPgK>*lQb4mC^FE7?&B5AAv}$!J}CfEsuadY
zCPVAX0cz6I&RJ@l<O`0(;Hd2~gu)<D{TK_A_ZeaHB`x?jW;A}7Qh~F>Z14%&d#tGS
zXZ+%9^xb`gz8cB+kV77E9<7sUV?Yd8S6Gm`h$__85^^jaL<8k|sQE%uI{j=l1|O_~
zcD=V$ayW<o0+HCu=9`XPlUP2~2%Q^@a9~**oj<#Yc~fhsfqpI;?zTW(8#CHo@rHQJ
zI!2U#M^WiAUm~)<Nv>%IG7hN>be1cibfgOuai@r(>l)g==^0gz3n2~HSf*llF;4B@
zfqk|C*eM*#cSL-p3+w_h|2^XOOvdW{m-UxLno#?f3ntvOWak0};!zp^=%o$rTgRc+
ztm|ac<(=3ZkVW(QBj~^*w(lKeO&w0p29f%A5)^8Vm9Cp0{+tm>s+ouj?^NOCg!AaV
zj-AOLZ{fRFMxpvSJLa1-h6k(zrT_6IZ7v@ImcG95<2VJ$!X&JeT_gc)|0y#rsd@Nq
zFIFvZ!OrLAY#y1)Lys~v`$}<XwKu9}{o(rTmqCy2Y1)%k%Kpp%+U7SIjC8`mu|bQP
z|Fe~bwDpmN<Idu%E_3wU5D0e}Gj>wH31%h5(eP47=zMvY@3;_)oyHfqH4*difZqY+
zH^zcG_m=i`yr#}~(!o5QxeT4ggKd~C{%dWG0#{$6elQo*t?EgsnIY39WpL6l!-yr>
zNV0abz71t(!_Y!BoUR7z-=BwGkHOGU<_%fV0(g{^1{qd`U|6n<z8_@lY?j1Jjn>k<
zKVOM>!wg>juLQ;c*Q7^BW}#u2ITh_Rpvu{%m@%Uq{MN9(Y+<&rcbg>%c^gQ?!)EYs
zM-xRVMKske7$k=aAS;>uzkk~d-b=!0+sXj)$)yU{_+G}Nin(ySl=ZZ;F0q;QPvJp@
z#i*{!X4p@b;HkG4(VS&Ay^1apNq-IB+%gVUOpFGJnJagANG!IdeCIuqjX}=nB(~f>
z%6Q*TiMlWmTK)b|vzQSu{0!RzR1Belg=IpIfigCiMk-E<6dnroMl*g4DCX@2k1;|F
zF7}620TJk884hBjO<a+;77X_rjD6GYkby6ZgKV>i&q`qomXmA9FOM|T4jqKaB_b3J
zunyhPXLO)!0`z=d#QN2*X#d$MjKy9;dWT=<1RkR}o11;q`m;PZi&;<8?JO$q?x1o)
zEwuT0lXhJ3$E647p<9<H^SrgvhJ9hU4igaM%s}h00eH&i!}@WTLGoY>b$+Faf%Y${
zokJ|u@hKs@oc%EM*I@jRRE8d3?^E5yCwbSBOI%~kPH2pk$7??rFYMt>qC4G_&r1JB
zODY3kz+)LC_V2-K6%+8RM1<-G%HXc&Fcb%R@{S!{bh{nvg)CZG6Pj#^p=J%3Tzr+Z
z8Qr1PW&7~O>qICln2xPQ$W4(Og+EeF(cEneDIcPW%8j)sfBy<JPjaMDb4!TTj`7fp
z95}BS2U!V6aDB8t-1V=3KKW?UI)|qPGg;4gehGJDJ9Ex#NfTDa-=(7TAYLxd2YUl`
zNU_CsTy3)v;kzkl%wrtr_5{@0RR?!tav8pmgTCaE;2X6VTW5|%)tyRsA~zW2mnWh8
z-S1Q*D-`>b3W>>`c`##%6n(qS5`n{5s^1+>e5$1I=#m(ZMnr+#$;VXMBnQ2gZqR2D
zft5QdY3b6-ocQMkaNT&C>k4Lkf_E91XMT~2<`i&uos*!iJBJGF{fO7OV7Bj?imE{=
zjPpI2tRAxxCr+G?&VxH>^sg+`6BohJ(h4wtQ;&{2wc*%x5ekxz@$1L<gWa7c-0NRO
z(DZX5`Y!!V;<v6NU3Hfr(I)_VCe8rIz9{%m9*I`lPvUG9#wlCH_BBtx6PK@-!P#IT
zm{)kxZ~NIk>CIqJ?rDOh3o@bTW+Q3d8wVkCyg>iYP>^i#;8in2aEwVLh8Hi!JE<|)
z@@O132Zf>c)qjXd+X3)(_(SZ1Pl8}-s=4^qq8e+d8|aU=1`{PIz#|b37&7O;!U*cv
zJC5}g5#tyKD&+n=5+1Y|qSmpz(da@5i7N$}MJ>0qrvMZ)GpVfn1HtEep>p42^3f?D
zy_T6nPtQ2)h`vk}N63L>S1wrX3nZ0Ely!KbS${p2Mn8ANZlAAg4-rMOhi^k`A<N#0
z{t>EGT*i*6XV7Kc5R^tHkSm|e*k>7!Z8OX0pM>$KKW`e0cZp*2@H4PPV*w77ZGgP%
zCqSg5LTqXmVUSZXN*2je*SYKQ%Gd_%`C3gEo!3LV_&l`gDxrQ){z4n8ks#|_j^@@K
zMA~OVyk~fV!G&@-n!~a*nXBmo+bR@3wMWNcdSKHz1}abbVTf@Y3EwMV99JK-Qo2vy
zHJ-sUhuPkK-x1J_`%DE!Dd<-gLDgR+Vri=hRWOQ!-qi)X?8(a-YsQW8syUA>yPgvl
z%^JKEIst19bWm|~3hOTa&DUv%!O20J(Jq~JFV3$A%(aG$x)Esp_zekOeh#}$jOa`M
zVsvv;M0uxDy0ne?ZGI_(mS-eZ4xI;bcg8V?&v2^qq7t%)sbg!h1rZd7AcKz4ez7YF
z9O?_0pM=)mKNF*I=`cl-g_qv!M%TMfiH6dAe044!uN@0Vx9|w)vP&iI+&Z*Y90f;y
z^0;t(9u7MxLB+pL6M@rt7~)rnGfrN{Ua#w9baD-2{Z&9IJ8StR6=CPGljOiSKgN^@
zL-`*Splx#*wa9d6b+6=1+Q(skry|PAVj$$}4x+X19erIG2Z}X^sC?9E@J<^Hx>~EL
zT@!NwBn^eTLY9pydPi@&U0|J@3!r-9FA)0-fd{KDLhvp*v<bDw&z1pLHa!9jx><hn
zmp|!EXrPtZJ+$E8P!RkcLF^ePtx!#b50j^$*8D%TQ=yI@-DiQzX&gEQviD$i7#F+s
z5Xvs@#id_ES*G5IJV-5JjtO;$D^H^KI#<xu;vg+`$Y)M!eW79=+b_&v?=(jXG&pz`
z@|43sY?8ta7*wE3wKfWlRYGs%Xpl@ePDF~6$fLh=pe>4V$(%xQK^tS}<vbw0))^p}
z6(lU$HwXN#C6c~{?AH%<a_=+)pjRjcuaIDn)iw#YPd|$C(`vzW#dR)}G(dC9T4)@;
znB~Q*LF}xAL^c;o@|R-clQTGXa3KhS`$*jW>0qCfhEG((QNTL0rJnDJLt7=aTlA9*
z7nGo-Tf9*9wk0~AW89@~R~SAx4C)`*;NY@Fh_&W0d==}J_)UPK<F`p?)gkDbI1*Yf
zRN;@75XQVe1-GX7;(@n}@teAZln%=T-`^*oPvIEG9bSlw|8{13yz^)i;Q;=B)?!}T
z4$z&L0Oo@<L6Gp<ysU%e={M|RKHWDQybnUHx+WMmi9@L=b9&saqlPoO=&S^PkS(eK
z>5y(>H8KUPMmEzt^A_TFtCS@1At;~sk{nQvfH8d(!!klKaES}}RhWbMSt(d*N{NXp
zN7NNZg7>39BHCaAZoMV+$GlMRUjKp^&R9i;+-t<14?)l|_7fG~T1E9Yo+ZE6F2wk(
zyR_R&3U&*Gq?wK-Q-t<7E1JC@m);Tf&cDW~4vyfW*4*Vwo4-+g10Iz2hoi}BMVcaE
zu7&VbF#ebn@89!cp35?*J6eu$V_wj@C6;WbJPJfUGpTLK5<D<k1v{76a#7#^<(gam
zX5Xcl8vW}^kj{EIzDFald6OE+Qrrmc>E)28z8f+uhoQ(-5oB#oY6b=>K>2?hooiT(
z?bpUfLQxDt2qPhc5bC}b1|ftHLI@#*5TayCdzH4dH|;k{`%H7Kn5bkQLJ~p<p@$xN
z2=Ds8-+Xb@(ae2c*IMU!{?5#9(9%de?1%k9cmGe;)^QF@Kb>c)Z7%TYU=}=?P2Ql^
zekj{jsr<b33^YFp=2v$gN1Yy0tQ}O&|2-IwcCu)p=FBBQyyy@QyKxE>BM{U-lLv9*
z9+X@@z;i~Bcf5B9Qw(^+6^27Vm66VU%Ir~c{4{lC60`X08uk}vqwe}|EM>u22n0(|
zi;)vUumWs`s53j?a-~|WDQ-6n1$WP4oZ(Knk4<%abHD{$e0L~m1>3Rovj;ZMuSDen
z6EwJ8h^@UObl2H}KL@E$-8UR?k{2Agu^ZJ+&%&l<rRZ=Xm>HW4$B@HExXrVlJmUk0
z=Cbt=J)Jb%)B#XkX~f@_g~R+Q=Rmdbn2>LfiWiof<Af!p(DSDpy@q(A-xWLPmF9+v
zf+`UF^szl)Fm*&u2B&v=pq5__t_35}vvw3^U<Xk*dKT}!zArikT}782Vm|+PGz54R
zL%$JMuyv9c+Rts{@{ygDc{lx;MHy#(FCWK;Tl~@Gk&H{yr%;#cKW6pyNF3@aeo#k=
z2IZ8yvPs6TAG=~pe>XV&I}<HOhH}#tYx#l3Xp|qN`+9d{w9|?d^t@9bW5*TrJR1w`
zzY8(%l&@gCzc>GRJ`rRNQ-z~)73~mh`IvUf<c(UuUTmC;PR^5gZ71R&<rpgs*V;kE
zhHQAQGD3|@ec{S$@=8yR;>oRIG@M2H)WB5c9C;KB2jxSG26fE;>I2Pxe)FHbB8f$z
zkAp3|@WQxQti5RjS;;zRAP<MSK8sk)HJY1SL%C-BFsyvE8R5zUcJ*&1f<qUO&Fse3
zeNoYF<ur;~zh0EbJ*AwCD@ebNRc80orWnFm9#!9ug}iJRx;#xogK=xXX6sH^A&;Rx
z!7E_eVZmF!y<|2CXVER*8#P{^=c|5D57#XQT$F&$!_%>&I3G58`k~*l2{@<mB(%pQ
zvn`{8Vg1^^m_INFo6>1lIho!!DVM?2UIdq-FF^L$+pPB2Zg4rT3mLk9*iD5S_P*kV
zF26~`doZumZ*U^)>`ECRLjlyi!_oK|oiTakm@)7-8|O#eNRm%{X)ZBR>#KOfju3En
zFU7#wS<KpB4Kiwsm}rU!mwl~7$-aq7yG=ipnfJ^wb8cUh_q@R}YAyMgg$K}Sqz$B8
zYUlFX5lXAy7loy}w9vBr5o;e93#ywVxz?ZW{KHItiuBuJ_3Wce!|@_hyeX&Wp%U+&
zK80<YAF)lTd$I6T0+t*LB9>oo!RnA3*yM_s`QlivHQgV+#!^P=Jmsq&IDy5c6kcI+
zn>9Wp&)AqCuJ_MU>g&n^-7b^(_0~N!pT6dzt=z2fNHO03mvTDp7eOnl7s!_^LC5PJ
z;QZDP3g`Qx{UiaV#ipR?iP_|-T1~vUH~jd#NNmtZhBp`1VD+~Pi01vEwJaGc$CTp>
z?G324zXT`5mBShIz_0tWsna4&Xw}r_^6#z68zs@;GNw1!SxtsfC*@eWITDTgmh&Cn
z^KpM2Jt|(8na;T+u8MCL%#8tiRx4oSV)Af@R)Im#MV9e$0XNP(15N8sL33R$zhXjN
zB_$`A)UY1_J#!~>!ZGT^SzNrP1ZxjoqOQ>em~z=0B#kk=q-$qX{5a31&Ypp(ZwKSC
zj2@_ylm{j^ec^gfTSSd~W?r8`oSto9PyOHXf5zfJ?%C)!rVuQ}MfhOXIvl@Pf~G0s
zK{Cc!>F{J3t25}$+P_Fxu(SXS29{!@PP#JhS{*n2P|eM+N3pyO=LJ^{Gi;8(isBzK
zh$^4KMxT>noo4_vw;kf2LuFXp?ub^W`tsJQV5l8l4ns>uVUv?O^{J&#FDleQ9SDJp
z2Fk0V9@ce7E?yr3;)|<UV`vN0_@ob_GwV&QcFpADqEx7nGM&l9_e(4CXeQ_}6;%K9
zQ;Nn^nH7qAp>}B@w&ZwnJ;MlY>7Io?DWULkWEQq-&En0Y4dClnF*bI00mV}>ylY*9
zOQSBJyt6Y@8+C^oNpIrO-{-a32B2{xomq8>M%#|PAbMv4_Ww);_0wtKmvk0Rj8mYj
zeHypee1umFSpx}LsW9_zBorljLSvx1Qq*rEs~Gd1cZ#dP*8RXNuBn)#(@<jfPeq-G
zcf7He88%cH!m&w(U~%s%!cQe;G|QOISmI;N@fW(^(nIkQ1MZR-$;3alFn7m9nBboZ
zH8nk$@%!oALSD%y9Y{pS!rkC7UxOFyr=K&UjJFM<-U7`_U^H?a*q+rwyd{9;ywLyO
zA7_sY;=5iZQ=gv(etvoyn@Q(4j6Y3#r1vba(h|b16G!uQ3Tj&zp-ts-XfBLmah+FS
z!h|^TfYVvwK1I2<^HJ2O*#w(LQ(s}69Z1lXa!!8~@{-X!VemTYzAIG9UNs44^Dg0r
ze@m$QY$JHys3L7`1GM(p0!G9I5VhYB9+jq}!Ool9?dyMBQZ*f>|8m2BHv(?dK8$&H
z)&Z>bfgd_y`0|1)j@x$-<s}8gqB4db*CSD+Z3kP{Wx!nn6O`;43uSA_i_tn4$Bo>F
zE|33V5<R9|tyY9j_gux)km-~cGZyw&#Smx111_D*#HfO$V3j-*YX5V=ycq+rK8rka
z<c~S%KMKdZr*GfA73i=ro!Rsx{-c!_j~Qly@8;T~+k6G(mFoHA1?f;Nk7cs?UrRHx
ziJ$%t^}(d8^B4y+4Ajs93`zjSp#-R5SCnI|tKjR3O3)p42|mufK>4&yD4p1a)%uYC
zZO^t+NoV3EJl(>oC5u6CcNYjFHoO-7`}3<D@YWM(Ln23c?k%Co;ux^^bMew9y2D#o
zfKGNGPCl3h8!pD-jd&lN_vHdQfEky6@rO*SDE$3sA$A;$;pz~_?UW)oqw9g4NAJdS
zGySkmYyr-(H~EQ_GJLk|EJ}Oifi7h&Z#0*YhId2A@N~pQIg}mDUWok{TA<Os-LQ7B
z6jyE6L&flan4Nc?vh`=8U^OxuP?AFTwUuCh%ZpjM`f%ePsVE&k1xB1-hIt3iC_mpV
z0{sKDV|{TNDjsX%gveYt{s?f#w8?10Pr*{N^JLqlJ?Cv-w6%V~tv^3y6?A@_P!Sj7
z-$db9jvP<^i9#Re6l`zTW==B}^FKzkpOVjFafv;_;Xf}X^+2X`Z7cD6elXea?W~4$
zGpCw2%J%hpc>C}YKJ`vKe&}=+<1D*D#+%i6ZOT?;(<YFgXe~1gc3~P^1RZMUnZukh
zOn2)%CfZd<pJ5ttuQ;AL5d>$piLuEr8}I7%!!7IN<UiU9)_szhxZ50vdViPo|2G@O
zPuhg`xqo?^ixG;x6)8*`I<vDEdZMV-o0nVAeD2tXJL+yi!^(DMaA*+JzOaM5K~tGk
z^Gl`ZT{<@YTrM<fp90HX<PD7tXOgt<?1;8EHahkb>i33#>_&lE<A`;<eZ&M-J@*ot
zxZhwE1*`Datun0ModkJy3UpLEgl3D&&|Lp7f9_1#$g};yJl2--HyONr{0z9ce>(Lx
zJMtN<67^fiDmjpJ`<gaZy(kE(=Vh@tee$ik&m>>xF0`Ie0P1zKKw{_%66qvCB>T72
zk{2<{2f38(dB-bv4?}CaE4(XdfQra`-lidfkba@e^N%(81t|NpXAP6B1+$;o3h>=F
z6L-?Rw(Qs@H2Gc3)D~1h)cKK2o-75gYlh&knD(4$PnozZg*Qe4w0kXq&9)XewyQ5X
zTpt3x!_%=)nu*QiVVP_~pH0d<=<9S64QCi~zdjqmuSEnlTlT`&BS9EAI2OLn34?~A
zxqQy3D0I-=4?gN=!LOQju>&W9!vV^)P5)ggdtEPx{jZ?9YY$9O^M;ln;&qPo=a~_6
z(8xs$jxWM_ca2nBv3ePba@N4_BmR_O*Z|{9JkSujqjdQ)rCRe^^r`TNGxd3(WoiPl
zW$U=#L%MIh?Sw7{!BG6K84iglN39-8t{!s%>(b*X>yQJA=<mE`Pd-L1?}e%>>S$sf
z&09BR@L-7=+8ww+J?E4aJ#m0le+<LsUUpn{^Ql=&-*KScJAn0%o`oTSk!ZSOAG~l%
zLC3lC;ZoNwm@`rbBl>IxyOKaIV<VNGS2eN!s05hra0XnotGK9bm|4ravvBmkVr*F4
zgZ2*HgyT1k<7T}eG?Fmvx7P#J%@Wz6OLP{#GZJcowsE@!tC%72x5tN!$E2(2P%}P>
zslw;rds7AW<i#MG@u+m^>@#R?NBaN!V@kcJiQIhUK;jC<APjZ{y+2CU`2GU3`%RN<
z-A>lCMjMkBm!gK}DbOgM&iejYgmFF2!I7d6w4Z&9v>-p$y7;Wprq?Aj)*8&}Q3LHx
z-4gasEuj10IeL$u!bh`LWB(dC6wrQiS`QCcai|#DiI02IpL`_59qCd@y%%dk(XU}P
zGdcc&t9pjO{&l7(&dG&a4wUmeJ&Zd&cV?}N`oQ}K#LvB*4t0YzgGia9=<#I?ro^a0
z)WlLWEZ#vn(h#t3T1>m_PGJ1-vheqN0$LyGg)W1_Swn|{iH9hKrMfE8YGsr?x(v2+
z<&1qlgSxLxxu}OeF&sKr&%6q(Y$D%{peEQ43jjRm$L4H{1o!ZZVEbY&_-LvqL($pH
zZCfHZX&itXnP)*K_Y`U^seo+DAvj1J4DF+LbCZEXaauPe4E{}-g{>2rL6$aRycBFN
zhEey1f|=}-kOq{%uRD6<pu?0m3!jO1jW1%YlP)Cu<zS?x;Km*iAlbba8rN+WWWx`E
zgTWrKdcKG?R$mdC-pwHoK}M;TMgjy~o`xCvqq%KJF%-Qk#PUzHb1|h^u<LAQzT^@(
z)#V6TvnF7+Mj1N&jT3A-8$!l}9+a8N1<?dSSx^`Qy8jBqhuX~S^D4orB7sLO|IH3O
zi3h`tabT#m18(*9hClyA;9H6WD4y>HC!07PK|fpdWe=;iIfSysUzqy{;;8QaLc8)T
z)X6+febCOJU+DoC7aqjo_)z?~uK@9L8dy{>fhCStvFdU-YB<hg7K!1|H0c}@?Xlq*
zf$y29^iQdVF}-8H0VvO>pq5i8SbW&Rane!fSYZTHi7z~1>_Chf5DIzAJXl1iJ4pX-
z5Jc?`g1ToB!pbZzYVTnt`ZWin9`#%rp)j*PdXKqg%)$#3>0LLXABd_yn%cJ<0QvOq
z+-%YYG=G|mSuw;&tFVW<6@k<*OS(&b09t$0GSQj6u)RnI4ZX8?#v97?56Gdu(ivPe
zKD5-PWd<brl;JLYx+l$_4ZV-8B9F^oUgs18Q_GVmZ$-1_*(F@><7IGMe-dhUoxny-
z(%q6iE3Gd4C&+B}mx>Fh&-Dp|9nS{f_w%P<;+;rz-5iArZd77!YdKDvQGw;sgJ>|H
zGR|+F^Qg%opiXyxN!1RedNA^c;vUrhrlf+t5dL}eMF^G#qqL+$=|wZghht|jV>Jie
z^CDtk+ktL#HiXnZ=Ide|&_eNr*=_y8jAu_mtsoy3_<1oXde!m3H&rmd+84!^;rvSQ
zDdL2mfwpl;V0)6XS?|yC&`p*2F31(5EL0#Qi!j(W6SH(I(QC>%HvJIsEpQvZw9XsT
z_9)S7yfYXdY@j(+fyN7TS^LLDELfulnyyV{F|QBeoQ7CzrcRI18N`pg-UH$m2k^k<
z_S9DyfG%DKnbQV2n{_G|qjWM!Gnzn*awox-{Mcf>CxXj9g#Bxgm~L7qzar;7wyZ$u
z!L^`zagB$Zkb{zVl^HuPf<;{})3`bWURPw}w3S7$J=zacy$Y4tH}cr$^@Zs2cK~@%
zow@Q^1T=2!47U5a(0iOXQ)S7_G;BA|-IjsI!+tWCno4+h>jK`eIEyfIIM{tm;38??
zlH=tgQJQlZHrlzMsn13JJ9QT}NKc?)wht?wKL9&F8jm4*%ehr-IP@Dg0$uCNuz4PN
zMCP0UyJg#1YyCW^{u>IS{#%rdo3g=rpqwj?Z2(!aD{;1*m9{(PQ9s@*9$^s;gQG$*
zE~Y13BF~pJZwg!yCE|uz*{GA)39POu@a4sD;>>?!5~p22xn-q#dp>p9%twVs4DTZ)
zo@|7O$(?s|c^_jsGpB<{|FpvB@LUKCd(GPyh?#WMU_omwF~t^aq>jAVn7x_26YG-U
zDrp0fiYG$L0V&M-S&S~X6<joBiLh$i2^^nw1#K>j0a3$cQ~iD9&%2e!i@GiaFfBl>
zk0JPBR4G=C4+g_>1zS;ak^17}usbJzow_F~rgq{H-lM?TdJl*Wip-=7=E2^ZerTXR
z4IXt(LYsf=nGKUb|51^UU@{-ow`rm?+~!()o)Ir88N7$))Bf%{c~YyeM(34Ka=HLz
zHt+c0NrN!(oB@hnO)$GOJP%TO{@^p6%OGxT0?Sz3m-z6ZJf%++8>C(U<4#<~Ch8nB
zSyVxuf;CKI*#?-XCPUNIS|)iUQc4D-DZk!|#I38tvG&_a?C4a)CjF<xo|XA@1`LE?
z%?LQPsuIT}Pe!M=YtUe5DXdv>8nG@F^3uzdwfr`7nG(pt9%ZA%-iCL*TMG5Wvij^k
z65Dj*7(7p=o#sQqaRT|>q872e!~F3UWkWu8S&W{U=fTwODi^bGrSn?`hMlYVs3meV
zpbX{r?x9fFD+aUk7s6Y!TxiVr%$yyMf^<ot&|pT(sf|NHJH7-z=blHuj*Cz;bpb2z
zkz-DA7@9a2!NLXWQ8#uuv|jkl|8*aOBT`4;(m5+oZ8`bi)ZEE`Mj0lXe&95Rbb#P%
ztONHzVJf1t`T`IcpEjMOxd=supUi%4NXC%==7RV@C%AL264FvHg6z|9v+Q1*Vf4;1
z=y<w_-`PoX=-|Q3W=JWlTf!kvJCrT`VTCI9-XN8YQZ|B{aBXQe#(YVIwCU9CG-xmC
zzsdt4lh{`&#c1O#<6Y(^;iGNK&}vA5(7ZE&avXyo;$AaP_1%wFX-43DggjD_NOxfq
zs1rvpNn;!nO*R7M@=O#34isumY48pE)6v*w0keB=B&ZfVE6we46-5amGneEVuIuE;
zWbT$Aio0zl?q11i7LSAWXZ@L&xFmCW(X8!6{dFs+qRrjk?8EbU_^PRjx{g{|-MgM3
z(w%;>A;JUp(oC?lz7$ve@8^8=XSX{IL93Wp)K{GZS?NBZD6cc<sjGp@)GbUhaSl&5
zIgj<fi60qfjq8d}!7Xo^^H;rMc6W=gn|&JA9x7rzzRyIPUkA7_AQIw8&)e%A2kK$v
z#L)Q6)eGn@RJ0P@HunXq@de6=i6-cEc{_@{24bC_2gtV{pl8+rFAb)dqx>IMw{I7?
z+$&;LbVoNj8N^|yCfJO&r0(Ox7&mSTIE*A-w@)QVUar6+G}C#w(o1$b?NdB^D_$P%
zg(0C5sOzW%yLdlzShyTSQ`=ac^9fM@9?n%)9|-Cf>30s?4o!>6w|d1ysdhIDFV%TN
z;QBiD@O@vbNu2<LmiW;QD1kKj25vY0wxAob6e|DQj(^ieV6$5R&(M6%$F8TjEe=sa
zvkpJm6AeRJ_$dDh{FU7czc&?t%PAZFHa;J9LpiqGI}fUD3~HjSz>PA83ua}adgB^!
z4*td)f{1^a+aF-UV_r>c15+U!XUcqGT6Qsb<%U3{uOYf@P2z@I^8d$TVs+XQX1r_w
z*uB(-CXp4|HtFFEyHd3OTm)YK&<^|WA&|MbbNG*ZF%{lWG?p?tNB46vp8-t=IE>wv
zf_ZsM`K>2s`Al&UnC1)y`EFatyRaUH=}*U1!wz9a)llwoq>fjam4TD!ld|ni5lrwU
zPS}%dLAuxirrL&Lk-i_7Vb@aer<;75VLs^ENgys~J9u8t#dhrqY_Rfzroc9?8<>H+
zqU2!JF^7lD3>M7OpD;DaMR;-90kg+@f#n_-aPe`3Ye)02{vPQ<dS1{-Jri2mQz7NP
zAI!Zt3ERB9z-sqiE}mZsvqn|n&Cb)YHE961kJ1F&-uuD#<XUu|6AKMvL;26K(O^3;
zk6n2i2d^ejNAB5JE}njkSB&!n8UkTb#bwk8WsJ&?F<2)Mt#&U23o|*a3(SQ6|7L=u
zRHgKKe+leM=YrPdQeN}ojiBDims@i+>Pcu*s=C?=E}<1r&}}jr5VOztINb;19txs6
z$)$d$mveU~AGGjDMbkAEuzuPs6o0b?*ISFQ)%OJ-9jlN0@*<4*_X1R;>7jAdcNX!6
zcq36&km<J@tvq6R%8th@B%b<$?`Pw_6H2_ZawM*JP=p4@{kX>XJJj1}O6;1WN(cX*
zpy4Ru4)3<{=ZQt+6aAxX*^rLY_RyWM!x%o%tbdq10gC#oFjOrR^yEFjepL+0CqGh#
ztgC=7`5Vw$J0IG0^T>zsVtS41O{Lej@o=E{D0!G(Ql^J=z=txd8F7M{OqFx-E)!x_
zjKc_P4X}8B6_5QTAIm}NW%Jd9#!dA7?nYffTlK(iyB=E<d>Q@|tML9hDVjR>1IZwF
zp+)qHFMXCzp3akC@-2d`z7>qhd(OD%EOj|ge#~lnuBX0Rf5@s&#f@cYpgtl4EV@1C
zFS1ow_bL|dM|xwz_#!yavkEUi<oL#W3a-kG#;}8h=;*o^rI|a0`;@<tgln-yeJE?w
zpBT;_GOmbh<xbO|3qh04VPkJE^i@v9yu_`b8+w~rQ1@O<Z!_lpBMDj$M=6ife62Cp
z4{Oshc<;{3(De2JO!M&v>4z6eqjow6W^Dn>pqD)TR1Ri8?Mk`0#UTB!kC3Kyp7sx4
znEJ{EAYROQ-icx;Ck-uY|4!1TOku2=54815Mmzg?+&Z8eRG&DF&RSP_T(JkMxHcLx
zi2ZYGz%nomT+A00UIDi`nuzHQJi}%^lYKnJ`gWYgpceA;ZOX=|F6*IX?J3;amFDg?
z;>nD!V<OwL!nV8gx9|!Ar+Hjye=-(y4P&@`%o-m1P#2XNrI@n)4Ubwkk=acvgVLc%
zsJ=T31G{a8y%#ToR=hyH!AbZ~P~c&Ui`2)Lteh{9Ct>6|CVexVN9~%7x}pOtdB8?I
z6Pt(wNvo3zZoIz<bs@;-gP%=baFJ0)VeLCscR!EMc~A(hNu$w`yvNPMyD}q9(n95q
znAH6oXdDV<ZX>j4m$U{WM44cJ&7ZZWdxP$W3tTH91DwqUfY+H2xH#w{HjZXYemM?G
zj-)|~rY>xCNkHk;1C(Q%3|IDgV@)OL$&XK>uFD`4Hu-|ZkXc+~!wfbtG8pq5@|0Ch
zY3SI|83LY0!F;kr*`4bFJKs*mITufY@wPnPnpCU&<xaX|#&71XNjn(J0bpf10GvK-
z<)Xf8l`di0@Z;)H+-em=JvZgbITNlzuh*sITie7Yq+Z09R${0)g|k+TxBSK{q<ud5
z<!!g2-l9{C=q{y!ot#0kJs!3@d85wNRS;El46GiV2e0!@ym9(oK_ow>__-q&GNzib
znyraUx_2GRfL=^~GDvXVa~M?_RXoDO6_(B1gEMu5!B%$?R@wVQ%)gXXxi*qC+&J<)
z&Vf&JH>1omp;YgC3e=R$1lhkwsWU18=R*OGoJc)kKH;d7E91f!Q*0VQ9*+w&GvPq&
zK42N~mAybRejPKtJqUg-rdh<hGpxTyyQC{J)-tdUC}>9)()}ZAGw=Yf_C+Y4q~c=f
zW|aQ*FSR|$FndHQ@3YJk)}@J1WK(H2#d#Y>EWQdG^oL=?!V<1YECY){iL84<8F@0&
zN;_WUgGSOVrYA3jx|nL#W$SX3cZw2R#@K+N`a6DiS0D2F>vFlb4+IpS!#v%G%-U}w
zsFxJ;I}hh#UV;txB25c>bw`^K%OE7Dp5OA)!m-6^*eU2J%Bf6pvGq!v-?bEfcRP&@
zb^BrCFb@#_bVvPc50p9#<VH8PL&caX&}zKPmUj)qk9U1BesVw5HNFCMCr4q&yrbmr
zBo4JA1Le<-uxhJYZ1TEPC@o%$qH%IORWX)w@?qTJ%NaCiIssYa_tUtrl{F6?3=OrL
zp;})I)p0TK<lQm6LR@=`>(qU_k<P<XKQK=7;V#HoRa7cWJ79r(-ff`nizi%bq8`|v
z3&Y0$$V=6GIk8?U;mL_AG+Y+XOoko?j}TMLaNWTTy>~#JZxGb)N<?v;30i*GiD|Uw
z$oSC2B1$B1e?&B#xs!;io8;Ikv4F*EE)pwx1ZaIT1p}`;%vqp(K*mF*%(IY77CD0X
z#C4Ez_!4XV8z6)njmGSeXW6<PQ?d2tGamES0e$V)U_hY)+HFR$6#YDC7G815mE(eZ
zpg&VDG^8$ziC9y2U07Ngg^slX8VW19)pgRtin2iOeF8)qOh<L8A2E?=2l7zLRa>$^
z7IW3?edr8qevkxaOB1M<$Co-o&x7-!Ss+`j$rmB<^|w+t^9077&JJJ_tYyz_Poc@?
zyBwYevZ!I?pKJ7II-M57ysqc*_&--sWsnaRwmM**xD49Dhy}i6J#{R^K;(j>XmhnE
z{HFY}mC<A7<ZuF7FVTCv-V3~X1`_}85S%f)3bNh{g^=fBF4_^N$hda~o#OBDFS~qE
zomhY3o$py*SS=552uACx3&7&PEnwJt2Q)a_p~0t9%=&{bCd|DAbrm!(?}}2IJSxP7
z>=b6#FbOq=C-J7oL1^XjOzC4&0@k01@j`#M=Q<e}@@gm=_6UUPK~gYP^MV>W$2vz9
zV|(&?ellk*wwrkp8{Yv|_X{NEO;<4fN(|y%HvI0FEYu3P#b$vO%0CQII?pKQPf92b
zs~ZQVp|tNlyqIRPrC@h5oLBS;Ag|MAF3w99B&U1xYj%{c)URVsUY7u7#<S+Pr18iu
z@X#<<%p;A!e8F+Zxcr9W(JL%(W;$wIGY8$h2l=R91?V)$2(RP^!-U}))MfOWH<ye6
zqetUm+Ah)~-tJeb=ASC<vmhASygbpU>o$7c{gsm^cEOstIZRcXfIdAUz*(OG@f?{{
z;tE~IMSy|PEEw-g?9pezN+<HoE$nWEdnZO=5@|BsYbr5f$xb*jiQfB*OL<nenb^$#
z;kCUfuU>W-B|a)4#jKR6KHX&%Z%#wPnaeO|VPDL+y@ov*fY|i-8XNgy56;bW!PWy$
z*q5&h@q?d?xLnh@Xq?E*@N6zr*hhn6E%nfzaHSoLm@D>drfz2qXu9bG-=e8w+fW1r
z@1r5lRVB!@51N?|%Hpz|-@Ku=kUxKxfHt#^u(7IOaJtaUMMj29ht5#hJq2EB3Pf3J
zCj2vb9ol~>g<3nxa&}GyJJ(@QKw6l2q5)Jt-^C&xontzK6EUOV7_UpDXR}`pM164u
z#gBumd4w7&2H5jy=kww7krJ$V)uAj2EJM*JBQt4PKk)0=#zhUYOH0W2*@bk+xWnh6
zWXodQVb9QBxXg@?5NE}{2OoPR1iV&U;^ptop?Kjp<;jOZSd*g7%&8xy=8+YTLh84_
z8w{oHTTy1#&1_=qGVHiN7NoD<vyj6^Alb;2ljddP&zaO~IBgzS4)DgJeo_?nXkts6
zDfBz7kGgKftUici-cQn9KF;Oxt`$t@*a@`!FPRv6<nb%0XCYIQAuub3cMOrh8S0!g
zy<pD+clE>Lo<8VYJORXiN8#7*iI{tRDArA{XDj4;QCEK|cwfDUriZ(7oBILa(%*pF
z?D&_vQQv090NODw%Y-l8-N}Qn7~A%ag$VmOVEp|Ek4j9%rc^DEyeE%``g(9a{+qk3
zS-=9hIb=U|g+JaQu$nYtYt0GZ)zSqdj<KY>I<lIuwY0;xVvW-;qP1cuEO;7*)yWdn
zyXwW=-ly{F=3bzvW~^fRO0ard&1`SXfuE<%@R)K9wgz~C%<xU=u5^x34@PpAFQ#0V
zdavAG(ylCXmoRF+F`9Jq0O|GJ_$g>PR$o<M$WcvVV6JBKHtfOO{%%+l*aeS%2*Os&
zT@dv=jJNF$fV{uom2qEAgTdNV?$GrxH>nQ=_4ftbYH$e?b+O~EgKe43p6O7%YBtn8
z>kjVZGn~Lw_$q857Pt;Ur@(0t;F<=lvU08xPvEW5vr)t@FgTR}=3h%#<H;2u`>-GU
zDt|GDtP@cGC4(~E@wmBX5bnC3gmE1M@y3f0*mD{2%>y=bZ_=(5JE%if{*u`()D&EP
zj)D+BO{`fz1a!ytgwHRdG4Nl~GroCosg(_{n4t{<^#V7`rh|`j7TAYe<5tsyxb-N+
z7fQ;wY2IP+E+rVT6QGf_x|JSh&}}Y<2DTJRX}2tzyVk6F;!IE+Pi4=2$Y0la3M<-B
zz`stU?m4@9{vu#E{<yaiRn;H)WCs^)u}Ol7%@+8xcoS(ugW1MiV(iwi7o86shSQxK
zaB{z8xR~_cyfK|nG;utbu1a7=Gvaw@=|I%+od|Dkt1wDyBA4uI;t^YqfQI>ch#?)c
zH93}de4)?an-nCQN5SrY$74~V3I>>!;jN(=U^n2LV5dJ*sr53Ni$imXMehe%Y5tIM
zjWTkYp3q=Kxq9Pkie8UrVx2=Gv`?`IvCeJ~`q<+Gi(G6Ok^)8scbV;cAJ7>6A6ql8
z09C8UGSeL*%uI2^z;V}^lRTJzcU^;$GB2e(?+WZO3Bp19==pXH=5fo1L6keqKz9}i
zMTJgaSNfja(!S2CXW8=R=U>@~4+ZEq?gU@3xC(ET(T=*vfp|9KKz8sPv+DT5tM|q7
z@7F_cXln*q4<8P}UoW6~FBMGtSqjsZ%h0hsf{8mv^H=tliOYC`%iczreRx7yiN-O+
z0a?XebS6X17(b<AV*p$nAV=H1!O-Vbf0X~+LcXiE(yyt2gX|-q!_6C>{K`U+RivP0
z8h~0eC7{6!aenVokX8K_fEfHu4@$`E(+OoFYcrSQJDJ0YDUkQ?A>OpPj!E+9x$!N8
zPE!=PYC{#B4J#mCE+OqN2ga-{#;|)d4~u^Cn6o*s?)Dk{V_OCd{~>@S@l<bmF*1w7
zF~dk$>KcWs67|vW=tWeu<(Z{i?}WNFX6Ur`k}^--1s23+VbjhSw3_)$X}f+eTIFi-
z{yAYN?Y<axNf4A~MNqIU8YK}7j%hif{P$7bHJtqK?Hl21q%TTa`oWPohA3-zpd3nh
z=8eRH_KQ3VM#G8Ko+o8`-?zYuE41&(-KT6UozK7T^P`SNOEz>O<%MD%3!U>T@W*Y^
zs)|)8|F}#r__7jgj&Ww);?FWdDxl5O32g>+u#v=(F$k<eKaE(f5gCaF{;@ox&lf&g
zb0~^^Pb+1C7dh*kjP2uQqT_>;5c4{lx&{2WM8}x7oOsLI9+3adXdl>|_JDwYsY|)n
zMEGOsj~P(OH^xv#Klut?KN^7IPa3S#?Bk@Njus5~H=Z>(5LyDLcP67J&-;;%(Y`6z
zbDRjHN;;V2nF4Ip&w|18Tvq!BdB!bu9v-#?I}aIz>Putb^9MP&RCs`%C*4u^4oB4_
zBcZ5AAqJ-xf?NJM_<4RKif?s-JeRkEq!anXKNZ7CjU?RXpv2Y0?Q@dM<`owx8^0GI
z`>sDX-K&j-mMgF)n}-#FkNHNcEJ(=asMhB!IQM?ZRZB#$*^l_g1r9vmqYr$3N}i;}
zEll-MrL1{%oCW3;WA)2T%+a_QJ5ABY?AAP9^1~cY{0PRBqf@{nKa%CV^25YtBh(u$
z;MDE*SmSd6?%yrL1cz{R`u71hvLS!L+-%ah2l6e%=_q>ko0qi9!AA(gV<&P!jrRMp
z2Ksysr$Iq*DRn!msCUd0WG6N%D{h~qZh#5gNr<NRY&x2p)&(1u&MnEOIwGbQ8vb5@
z5yMD(x68#74SR^Evmb_ZJA?8yp<uN<iaGhNU{2SLaJ>;hOzXx;_CVJOmuPRrYTJ`c
zr#zE#4*Qwi)K=C}nvLztw?KPr2K5Ri1Ljlj`<)^>m-}G*BW;itZRK8fJRoL$6j<fl
zW)FY%MQPJa)^k=oJfG%=!I8<()J?^Wn)A5qP74beQ>fINVF8VQPQcW_6DZVP0Jo$F
z$dEI7*yGSK?KlK1kVBN61oP$*ll<#7US!nFUAh6Px)dra`cdCR=v!q}(<b&^orA|t
z3mo)R6AcYY*oKP!*xGjom}G0wKEGASv+Tn%BK&yc8ZE(il^AO8y=KOT#-r7<YN5zF
z3hckP@GzSTSTS-f*iS5Bfir@@J0Ko&UQ6-7)mZo;E<j7C18{yVdAO!3N@JvX(03%F
z*sc)NmFDp0peK~9(L^iW%FS=;qp@8eG=I&3>L*EDW1|K*@V+=H=>R$yH}iE}gTQ^(
zIXHYi7uq)ZGd#Q*EPO_?3UzhZ-}4Hw(iCXbqDFVZgG~O#M0lh@9Ita*VC0_VxT`P)
z+<tq3P5gbXch??TllsH`=u0SBnaLZz_@ULEOJFyD8+RF<3N6d#!Mg6zsQRi@s#<O+
zBgpS$%*h8@AI^(<%V5jSp}5V;6{RNXyx?{WI+U7mt=;|5F61bP6r=d(u?oEILyUmS
zY0T#SNxUxeApX`qwmy{JPof!IuNP?pM=gkFUyhq^Mxd_J2#p#Kf#ti0tZ7&ghb3?L
z=Oh($-*y=7gfiB$+zl>^sguug4=OGe!@B`P(IG<2wElU_6IPJF#5x{iDdBJ-)CWu^
zCzI|t7MkqNGQ|pSenoj6E6lope9ks*Ro<01_nORQ8{MfNEeLLzS<{@=7gQ5cOZ$hG
zL1W4zr99U_c&m3BGR{BeP4#~8^I0&;>NQJi&$_~;8N)G7cMP~R-{jrKCDNSqOSx|I
zWq94!77y8zZsXQRDO#U}r@l?4-h_HZOMVXCKN1dZ=mU)>$pgPloh7d3#4>m0A%haB
z6EvDTJ1ZfSv<OM?D<LoA1G^~<M=y(O+<AI1^|z=ntxGiMbkNi#KFL(}waVuAS}@be
z6AqkqrY!qGWk;{Mup%K0nogZzPNB2$(UGZWE>EW}X!67T?1#7d=aQC1d5aGzV7O#5
z`}AfOTJ@q%nD~B>y*CB*p2V@{2|u{#$aP^<cYpAivJ$0*<?Mn_J{Sz12C5lDgw{X5
znVeq$xuq`TebMK&uN)zG=tWq4H5D4m#US}zDyTLD3$oR_q1HVW_f{>(&oilOhbutQ
zKa|@J(*}86JZoR~nTv`lp<N@K>ojT5y<|_R>{usWt=h<MT@3-tzQ-V8Ni0lsHNcpQ
zUg*{8k3W|O!1gIo=rpDm9L-pYI)nN{N8bauD2=*rxA!OCiw(H_`<@@KO2S;y*)=Y7
z1*g}ixW%kXJZ$Jn%zi{Uoqb1HZAk?8BfiD`Vh<Qg_og)g8Mt~y2{zVFS7vQ-#25Wk
zxbnnO6df`Zn(rTCZA(``eSre?UTeW#$1BjNN`eR%fRIu(xUV=1gXlRe68Xa9S6k6`
zh%3x|KM(=uLC2`utogwR3<)EBLKq5)zsHH&+y%{3iT|OK1rs(U!e4e8ZM9RtZk-1-
z%5#K>3NH|k5wP}NEth_xuGP#cE<UpZRZoU0qqb~;w%9M!o0Jb3E^(+=lfdqN%s?ks
z>JA#4!VP~naN`*o3|5_Hc55S*I+8ThIB^8xmZmbtpXI1If_8`B&M@z><Q@EYlS@A@
z;vjj%{ObFI+GFZ@p8-5Pb|XI66h-{*{oJqLK)5pK9MpEThNR`jcspw#>i&rZt4TZf
zm@YZkO8JC!J?3Kj0UxG2A{8y}UIdq|Q@HlFKB!v!jC_x)vBff<dfN5S>E2YZUcp)J
zz+<Rtya;;tPI6&V2K1apxvbj@nB_Ssw%yo+ZRzj%iyetL>FrRQT+|s|w6d77l`o6a
z_U1pdR-)bNAYvF~@SjgrVD44Ptc;yN=T!vs%=JX$ULS;`FKJFZFdHxBr@`|1h`DwS
z*t+4b(o(AA(kty|?Rh<5*!b0`VWkGz8E$A}=>;yGZBY6$2S$fwVZp#6%>FwF4YeOK
zRx<#P_Z*8YUnWCcO&nA&v*9|IM^eu>>6UZHVqJ7FU*xq0a|W%)AuD50!(k_cJYigB
z)<qD_DrV*#M<L`*5K7aRvvdEXpmc?<a=TMkyz^u_-j&Uyj!uQp;`5nuI@_UomKj{%
zF${gQDSH&X1|mKKIDPVkuEb4@8()RSJQe0uoWq%2z0fmmFaGW|4()e5;Vr|`DBpM#
zB#$$des`L=W28T;`{c;7Lp{+caSy+xRnFgbrp{Bj8IzqFAlOirS?oC;W?J{a_aj3v
zE3+I-9u@PMq-$BYtfr1oL#BC$Q&-D0c)o$|29X+UI`I`Wehpwnot0Qa&zkPbk4$Cd
zi*egF!I?-uv>6U)%}l{*u_v!^Qo>$I0Cl762Jx*fFz<Of+6?vrzb?D^SnEhINGN5E
zcRE<b%dQahOU&$l&@RAg7R&25ob`O22722ZK=j34Su=kMv=BQ^HTRJ4_TvEZE*L_~
zBl<q;(A{Y!W&DR;z-#0;Y*j1)3*zf;FEK=UWfIDF7b(pmE~1)OD!e^#6<p75LzhLL
znS&w$H=M7;^`4d3G;ci&nw1F?w#v}PN*nY>l<-#D817ef2nJ7f#Yd}7g5f689!t}3
zk+ugh`)L+G?1ECaZseW#PigPwPmBZ`=(=ACE;hYc$3rU+Hw*;X_AX3zC<fg0Bf0UT
z@%+NZvrxAt1P(OEf~DI~(0jKG20YEDzWGe9-nNl4aQTR1BALyPTnx}!jy3tOxNPY(
zh$}Y%6LC3oDLIZUf8y}4o*Le74}`ja&ct3B2~7)z<JzH8REOT6Fy06)^Qgmi%p~3z
z^oUD;nFvk;$SV}qlly&J%xj1hHmH37`5V^p-D484=YV9qTsRG9Ihvzp{V<$u7>cbA
z$dBvPpIg4$3N1Zjv2o)-s4kHM%x>b%<4&@U%&T1U;0|OH@=<)wo2TqA=N*bSJmr@r
zHXfctJsYRE6D)#mE2A(kX$xF=L!8y-pNdx%k)UEyFs!x&bJFc*oGV4+j6<MjUBGNi
z9H4$`5~%EAVY_}6E;>w1fYk?as4@v7Y@%TD>_{}+RL<<~jA4?Gj>;oDeNon{0%niC
zf_WpU)6=bt`tlN)-S+N~@vI1?YEosy3u0w2Zz|Qbe?vV;!&%jda}cLp0S%$|xuk~!
zvpd!qLiSx|PD=~9=~H#kJJJ*Sw|V2!l;L>FQ$c;BhOE5{fXHIs^wxXVc%DXgkWSph
zafK!P&s&-~Dg)2cZpzC@!4EVOXEEx!vU$d4@Z2VYq<|~bTOP&dR=8m9eknS{#Ii-%
z{%9~Y2AW&m@`nreV3+c2yjmu~nmcq4EKu;gF5yb^qqV$oJb4DX7GUF}L@;U{0@f3v
zx#j-1JSaR0XGHf$E4ATxJ+=&0MAFuPzg#svR50{$0$G-W(D=^@1W7SC1RvvnkH#Uq
z%Y;k6I+M?8Cs(bY%>3v8Fh1preyy9pZFN3u*PDp9Mr8s1isQC-W`ao&dAsylSzxj?
z^vWv2mX|@yFO@RA5A7i;I*0GF5aAgY7i=FC!R$P?3KsJj=y+~|Mfb_)@%Ra8GmFR{
z>;o04W3i^tg8Jnq!K=S1&^FK!EhD<3)}BZP{SQFI)91{xcQ|)c$*}5cCUoU;us;-q
zq7X~v<W-f>F(-sCXjqFID{0^KD<2A0Ak^s{<osYMY8?B+iL3}EdQqTKbb>z07`g*&
zVfjsVXf(eJa~|wP!{IlW)j1J(ec1_({H?O0iaO}#cd%W=UR4E_DHm22p}rYuaM4#;
z$d+GR|BX8u=Iw$I;sxYMe({W}>RhH5A;e`p=e2=-FlsRM#z^<W^D`o}pXd+jj`V$5
z(hH=$^|{g1C}@3f1S|%Y!>>GVyuOL{?OpO%%99xO;Xmr-(7DaJeWcEy&U@MG8GA5$
z#xsUy-?{0yTxMPwPx<?8D2=fYR4<|gx1fG7-%pL$Z<KkxQqQD&7BG{^1@PQy9|qPx
zW!2k6%=JEbNv`jJ_nN10&ExYZ>3cxw)0&P4NY}kSZXd=iUkfhx2f(i%*{C;nA@%Q>
z@{k_S*__9u&sds6=Vhd5T#bU!kE8IvUpDAgy%lu$A#DA%5abWJ^2#OwjZX}Nnpf)F
zZTWUMV@i5Sg(<7(lm%|_)Wh<9FgE*=KD>1cx703%lv~MMF@FaJm&?#gLLQHxUeGke
z1ngQ1l)Ae{aQ{KM7|}!?HSbr<o%|+FCPus|D;`V_uOU9KgR%oXV70Fj4xcRryZ_S2
zTbzul$L>5&ClSMI#F!U33snv`1($((OqQkx&Yt&}V$&$ltE5>hYdZv=4?%gJE0;Zb
zFQ{E7@9``TT>UVESc98|{@0VBx?ByadMZ&fF%8w`q~n%fboUIrMSYi#_}JdW^Y8sz
z2$}woS$(Tv1|Mi1)}Ve)VtND*8i>urqp<Z!;Z`MN&pQ~+j2lKkl%kZnzb`@m=Dz4G
z@_@iU-?(hoBVo~sqiEkt4;p{%Rvz<JN1Lgk;MI1XHym)o8|~$AX;(629N5hp9~~2_
zS2c6>mjUqVmj}+<vITd!&ZcvyijPS-jk18u($?^yV7)m4?=QFn^Y2{+$H)`VvMz+_
zCLCf7<jL1{>k5WVTKs(i<tMj?LGyu6ETWhg^edNPh<mbNyBb-R_#|{+7z?riOW;n}
z1YGJsnvUd^63(4N>7tdO_AwUSdgy@7oHLjmdyZQ?S`IyGEph2FPcZ4U751Jzh?Xb!
zgLIt_KO9C~_LsGo#5)NCG@?N=#GLQhYlTK{E`Z?(F`qbmF1ADzK~wThXx<l%VzYOG
zo8KLN?4M}rYaxAK9E;*VE)Zh4i+?yB1crLA*-!0IoS{zLnm>t2Z*!6B=-UIO!UgN^
z3G8F`8R}mbV@qZ!3yE3ARn-G|YkewQTX+FO4(jp7fg^<sgY_&#Em2Te4-=Y3EClDh
z75sB!Jf7|dLf<|i=wdsBw>u}WXcyYyJBOe|IvQlh&YJCYz5?dEZ!qfzUd-^dKD4=J
zqFvYoZsh9%o?$s){+U6w{Q(|xM!?l?eXz&EwP@oE%!GVH&mX43D32hpd*%YhufGT(
z-}gYnGukZ-&trDC;+U-8D533M0+(lUK}N-a6)it_byPLC-LMIqXuq*qKMS(Ig~LXD
z(%B;eA!S%!a3+04td~R2xf--Z{^8=w5*D^314i*2NO78hqAn3=`y>fYjrYaL?K$-S
z7UQB_#5iCIrkgbbN@RH;k{ni;p6tT4q@i4{rp^r((Hw1eO(88a5)2Kt^37I>Xc?XX
zs)jRg>)=GnRYwc?B_)V!&cV72x}TRh@<Y$^@w<5{*!(B}U5j1dEZGjupU*?s*GhCt
zyTw}j5Jy9v3ZmDvH`$)5G&k^K`r6rGIu_8&@Ex&xhtm0)$WI;B$Ec|<*mLy&a4Xh@
zKiSjCeNZo~rO%;y{x3Gmd=;vW?EuyNbXb>i9(~?Lqm9%8_QfY*<Jjkd$aEWT`BckO
zqKhCUv4lHy^u$kdrl2BgE6llC0FteHpvMN9i}QEVUcyW%3ohm%@mlB>sieNtSz!Jx
z8Kcq;gQaU0Y^e{%SG`K0748U&ZYNV_%bnTvJA|&D&e+kiAC_p&#*Tf|-Rj{%zQBGk
zESctN+l$z(E*t;MK1rVLbokbpIvShIpeQ{KrYX1Jg_nt-yCRPNDLD<*2ZzGCxO})?
zm5JiC0wJV)25$|hQ2w1T8)cGdXxBfLWe-q?F}1YA@1}{ydVhs&;bmz0d<|}Q$V2rx
z+8CAkk=1m6!J}yAeD{Iw6U8sM^NKW%d!nG?{2Oj`s+rrl%ww%#2~78XDi}qcgS@m*
zOq-L6H>5JOZI|=<7fVpAoes8hs#)6?Uv?w67(}^A0{mJ8Rkit8tr^BzJ<dYMl2~?O
z*Hw_84pAyU4aa>mF5#qsCvgAmA!xm14g|YXf8T-~e44Em`Y9g}b0iu|e8S;lCEZEY
zqVUSDvvBWa5o&A>Wq~O(A<sZgoX79%?5m}yp6mq?vy*sShnxow3#K`u3RF}4z)#y2
zthT)8_kV{$`$l6Jph<JaRTb#{yvN7*#^B83h2ZX2h8rGGwsyM_uWqY=nk^;ZwZs5)
z$`)coNhFJZbcA&I!{E?-iyPi|<n>7@U}1R_4c*k>j*ADJaX*Hx_ejGpIu1@t+E|Dz
z80@9vAmsX8Ueoij@}FLF@yPi@nAMZ~?Eg^5k1Piqo@8*x?=B$nf6g>U(>^&kmhM#4
z!+!G;in`HG+AfG!?~riSVHKDv0=Y^VVP;#Qg*&>(q4@A2R`=Tm(wEEePh2>DFNpzX
z%P44iwjQc$ec1NXKG4*m4WH-PVDK|{)a5z&x*zpMwTy(fZ!NKHLKt;81%ulBOCZvl
z$3vFh=Wcz;)8}$S85Ot_9DZ|j7`+1Q-UJF-XUMmym_}z`J<of&hxhR9OMMDsKstM~
z@YbaYyy~t(ZRd2hn;l2LIp$pVOF8)T$O3EkNzn9n8u=?sOmE!q#=vOmb~F9oyK}nW
zVwwb}XOj;kAd_{}RPfW+N>P1J3?CynpwkN-usoxQt}WCLD<8~?9OAiOi#~iFk_Cm6
z&SUymOH8rcNjusi&>ZE28QXlp_`R6P?st?nuJUJArRjn!Gmm9Y6T{{^7f~L+461#`
zv*!A)>}~d096B!=MEe}k`m+If<~`6Q>jf8w8B%U66pWI(VC5WlyuXJSl|P6}rd7e4
z=f<JaqyUtT8bn$r^~rk=V5-QBQhPI3Y><`lM#^woM;C%s%4lf&&mG%)L~*^Y`_NhA
z9Nr-PLoA%)-j@o{_}D4*-sp|~X-;U_ErE?`UWLXv&B_FKUwHCq1@=9^5ChLXW^KbZ
z!kH>#)PAjxamP~G)AjxE>#Q<}-|L5S6anzOjd&H#+5kCw;MUt_o@K8@X-ZEhfHDl!
z)`!g<oR}Td)uj`|-akEnCi^*<J9a^>BZcVY;mwTid}hIq3&62F47P^`p-yQLt{7Jc
z{x>ILv*a;Psd&w`-aq50xf!G_H_cYv3BnGC%P4cXCb$)yfcrUmXlLGEX*y**7`Q9B
z19_n1W>+%B_UBA1<Q{L-y^PalrlQr9NG|ztM9|%N23rrG<PD3qK>enCXzP{+S=VJ~
zW?P0&YB>(|A&$+m87yT?B~uL(NZ*jbu=Fgf`f(bKXNn*pUO`;#uJGX^`LXJw;oyc~
zT>6qjMrTuS+gZs&T$b|BpNYqBa!FZKxebl-h)p<lv!M9b6;z!=$<sTDdG07gbxBX~
z`&>fr*;q5t>yT3WiZ?tWdj>QNQ9!P06RtNshgx)Z&=4QuqG{`4@hmZJ@m4_C$Z%-v
z{g?W1)(fJ*&4TKmb4p=&1$1)@#!L05Xr?0W#mpkgl5fCit4radA{=j7Y=MyHiRgM`
zEq>^c0<_Nn^}%%J_CEqG`?o?)yb1%C`EvipMX0LVr}R3s0`2d4@H*SoAWg4ky32D|
z%{B%6l$SzdP^Pl|b0BoA-40(f9r4EgcpRh`kK*%vA!@K83+p@&O$xqq^UibO{L(oX
zv2Y;hS|0{g;vJT?)DBYu&S8sE&W*Q<!E>WF{_IA5Y-MR!p{irA?q#FNt`*cV`F|Xp
zdq7Rw_x87hBncr@LI_DnYRw@bBq0eQ2_YmQ2}!z1DxrJPR;g5Kcds=^wN;Yq34PNc
z<er2OCxmzW-v5ttj@o;zImdXO&&QV)O<P4fE*imoC5P6kXq2q)G_lh=1R>K3A={Ee
z#GqR7ueKZmFE2q=>lk#>Sw!VB;`}Xf;?@ln%&I*KlS7YVVY@G@?_<WMI1rEZfig6{
z3&o$4Pr!ipn^3dO51L|ofYO#8Ok?wL%3<ka=EG_%x?F|M&j!Jq!2%9F83>!@g~ZO;
z$|tQmj4~}<6mM2S>x5j;o3F&`@3pb<y@OD9SPe_3-n>)uM(Uu&)7fgH>7$(`SZ`E`
zdMl}eL(k$}uH;jiSIO*@FEc}rBv?gPP4~-Hp!$S%@(~>0)Flv0u^j%Tyr;Un53KZz
z#%1?u53aD$wCQ*}lRut-KK(~w(i1-%Q$}}6k3c~sr3ANFS&<VJnajN}4DMNuUlRO5
zL0bt8QcmHa>C|6veI{!9WrAzxX|OEZ3kntFdv<bSRmsO;5ADgjFZaZ4%2gC=6|v#M
zZQjw}08E;4!OPkTY`<nfXp<t?DCRRY=>YC-rqA3<VyU~d+W2M3L^N0+2PHQzsCz%3
z*wYiJV^)L~F~(RoBM3g%uA}U8hH2udC*0&X@jqt~H$!7ONL-dc&9GX!lTb(Vmc3AO
zWjHj_`N6y^fprZ3zzrW0Z*0vJAvU-KojTf>=3wG)57mR_fF|Z-<p|9_S`g&rkM+2p
z4f}B(9TLYubNdy3<oHB%QaBE2CM)m-u{)jnOF(YbBDh=M7FwOtnbl=wbWT_Xi*7DN
zyTZ@>Z|x~u7Kh~WDdY<lgre+FI@ryk9N6DTd><K)#g~Sl`;{)HvF#6Emr)9dQ43+p
z${N&fh{a7yDdTHQgdlM=W_WsmMRz^d`x=U0cWL4Jc^lFEdUToDHgd--^F<0a!>PTc
zSnA{q^X}$BowhBPtY22<RdRrL8lHlIbQjAUmW{0&Jec&s7qMgLZ0O#&oA%A4L7#Xi
z`LiYX(d!ERZhD}dkARogEJLfr8uXfrU^I0BWX`18;Iv&3X&Mb4J*8kWwUKGujb+&`
zHXwP{!QjdX9<?<JD}1!b#WM+8-zkCThETYF;|zvJ=F+|R3U?spwr>wj+`c`7^4j;y
z3P&e$^9h;EWwth(e(x-tSs#PVPWn{KvgDRU(P;269ljYkqMLCts?~Q1q3eHeshXy^
z&maPGh83amlTr+LYz*ctSxl$LVQ89If)S51$wghwbo!4GOV(b*=B=Ya@;_;*Q>Yi%
zZg)bB<8N8)33o30<pZ0~jKWF!DY&v*#2+$W>=^C_z8lPi*_#6JPqrd{FQi>o>ujcB
zR?Fr0HMkJ644+Q*#eRuP(D(LUFzirea?M;J^~Q9{nkRx<(`g~#?GenF7RyFcuHVKj
z7}O?gVQL@F2_~~CV?OK}tC=wuENu6o>a1{(4INC+7(Wi-mGn-jVaa2UqoJH!Q&#r8
zHU1h%=7)$%;b%co|J77y%vUjp`GNU<1Lk(^Dr8J6Mep?~_(cC28f@!>&52puE#4RR
z6Z7oJABILFC(&6@D$Hvtg)|p`)LHBbisN3fpa2A!&oSs+-y3fKdzq;~4^+EV04Wb0
zP`)by>OVV!t5hAD5>r4^a~1SmZi$g2^O#Ph8bpLH0IzrcAln>7oyHQexh;cR-%$fK
zEfaoNkv!67xy-V^KR8)53RzWKQ77#Vzr9Bt7Ip`NZ4}MdN<&4*R@zmL4>L{enZZx-
zB7D!~SUB?txbHp;PtA$jE*T5Q`>)0Jj7wnYq0Q~!75~5fkIs;H?C`>3lq&1N2Zv#p
z=u5mR!$eTpRfX<$<H)1lkLf=P=X$k=d1{4}yfEXa=Ml%$#+jlju_78vPQYQKFsOg$
z0>0a&(A8#(9V=&p^A{OwEI$pt6{~pSV(Jsml~51Ii>2%-MDa%fNT0>vrFBQpW6o=S
z(=-ep-*d(K9AA(hPzSrmjqGN2GLF7D3!mN^h@&4xqRYUA@V_=|w154P1^j5|E``Zp
z_$Xe8*l>^iEIyB~{?PzKisVc7_zUmCmSRi4LI1~0VQX%wVfKy!Sh$iHj%a}OyN?l*
zdnw9}OaRGpB|dK~-OcJ2L3%1>0eVMM7bgXW?ca~`=K1t{wqhA?K5)&SRjkJD8vnky
z0s;&Mfm428&^cNR(Wg=%_Rkd1ZZbeTE5*9Q%G9^D1pUEu&U;kO%!l+QpKdFU4b-7o
z&kW)MxI$=g0%Zzx`Nz5~DB0Utdh1svuIxDsf9B+(uU#H@(s6~m#EICqHx7r_%F%d4
z45W_Fg5Qai81#At>gOha$+$)2W%`FVzb|AhiuZ-gyJUoK2!OHwZNtXK7|gxC3gOTr
zhUb;=zkgydYH1~U=eokH5jjv7w-Jm}qd`96t*8*bAH5O{SmY^3xZog#gI6jsdVVOh
zWE~J{qR+CH759b0xd&0H#1HIZ`k-X7QyIC=g(J25F!TdG7mkkM7v^t3&D%*(`X`5t
z-6TWx?iMEb_`<~FwI4Pg{l))P$U++(MO0c&bH*N7T(jRZZk>0MZ{^cb3@AY&=Rh#1
zL+Dr)@MV)hr~8EXdKLADm95FsUX1;f&fwm2mvHDHnz^g(hW-VsQFG-*R9T`5V~2`p
zn5qml>owqZ*$Fs3mZN1D<L#Q;AhLKI$bt^=jFSJDJU$-n3<Wqk$_LX{ZNi3}N$_a;
z43xJG7R~oOVs<@d!y8U)L%k_5|HW>!32WgQ$NIz0^@XT2x|69LH3JVLdWMbIX&T)l
z2J5Gs7LB5o(Vbd^;L1FZbxlM0&r|5NVJZYK|HGVjJ7N9c{vhdl2u80V$Ib^EF8N;)
z=9y>WrK7}MJ@wwiMoAB{ujasPb8?0aG=lMQ0a%}y%560@Dc_VQ7&?As1Km!bNy!?n
zu3Lun$3HNc&rlZi-$)!3Cql3IFdVd67rXrEOuLZ0BNunjJj0L6UJvChPtG&n_A4MR
zm&5mC@wD^0#-8a_;^VwLx>tQ<vDMQ+Z4G7jyXwR`y^Fj-*2P1=Ed>>`;UHZjhh19*
zygxDyJrY`Z%Zd<Dwx(IA`}Cb_(EYyD<rZ)68UibZlXrPp4wo-p&&(@#3x+|LneX#A
zZ0OfKSQ@(?6AP2k$(Xnll4Imu=m|i(o_SX@G1$2XVkr~lwT1c$+LxeLj1hGU!g>7s
zUMQbRER)&SS+R2{$k)3H3NA62C@;iX{S%lr`5dLPkHP7mnP@ko6m-^Xq&~ll__SVh
zFH!<Un;DR?Y7i?PV1j49665xF9(ZfmLDTf3V4uDQw%n(8V8sl`p0Ea1TLeR@ehf2m
zyu=JMo-x6nx;j>=@I@m49nK~5plLPG5g&?DUk&hjbec&|Z9t8*QA|CR_z{1#1-^O~
zTK19Re~E`_*6E6xkE&Vgl%sI>(G*1U`~0Uy01WW8z}l)x@Gf5u-5=<l_$-Y%{N4=`
zPg77{aROaiPJvBl6|wNEL2Z}=G*ho?e<itcRwu%#?Tcv7Qcb^CduX_|2jr7ph%Wud
zfkydi*nZ3qvx_4sUpEC+j|PLyxPN)*TosIH6qzl#5gHo{$-kO`ss(c}F(`$58XO_l
zMmjl3hKUiMN1?HE5P0>P$s(1C`E;){Am6b-h&~fcKl`ad?C#!NqP^3^Wx^@wsMY6I
zs!^bS=>$_#o5n33yTRBI7AV<cD`>TeC`oh0jxtSVR~f_ai2D%KlwcB-aQEo1M3<JM
zP`a!bhJB(;U!EItuHwwy=?|ACuEvO2x$tVmJaBPb0KwDj(RENWcX3t&*{XirZ-64o
z0=A&1njzZnu>-vXMczBu95dFBBClyaSJI<d%AQ1M*_<K-XlLSo#>eOxs!g882+$|S
zP^3HcPrQBEtGFOGCN~jE)14`k(ICj((@fj-Yq-Wt>IG`;hiBjIs4F}Qe2B9$=@4}b
za&v@BnjF_PhvDkXIPB>2ovC%F<4wN`u$0V$&>^A3iLPU(|G9?tk9E0T)kNOqvIPn=
zi0Q?=!I6hR-=9eNy@{-IQ7RMurQN`uaTwh9B-n4KEN?pT+{aUIBI_Uz^`rYpbq2P*
z2!gKV<6&$>5w`4#5bL|G(dte<OyBH)Wp3opvOmEh4?W`bR;@h8nA|<-mEe(A0-|3o
zn3?TI%ZNa<SUL(^RS$x-Cv77&iL=++)wK1@XEtVMA_^a6Xt;I~3)n|Iuopi3xye>E
zE>p!vg><KBw}BNwo3ZaYH}uWiR+jp5D;zz#72nqxpl{zlV#mg1ylH^|T`%YI>(jIF
ztTA~&-&|rH@&PPJo$hJ<$1{}>6HpqGOOABTzNdwQ>(Euul4%RedQ<lx+5>FNLm_<G
zT5?XE!iK{!T<24+pgAs=|27zpb)*0Cf7@5$sMZ{me}8EjTd|6H%*tc2zinBa>k!J@
z*`vdCJv8?p&Oe4<#f5*eu=|+@M6M%ez(K~YEZB-Q?_-(jyBz2sew5{xFWg;gA%^-r
zV9ufecyFH!wE^+)p6*Y<#fG3V{T)a6z}t6H?(zjO&OQzRd+qBi;+RZOp&tI5$w9EU
zC*8m1PsMTV<hf|r4u$hZ!%)*e?C^feoNSv#rMeZMy)+GnQeOGHZ6H{ls-VuY2K+SE
z#n8rOXw-ioYJaGJ)TLKYvZbud@Zoj-D6kBTwX?w-l7)=hw&dcRi#s1BVd<w}Cams>
zaW{vc^R7dXz90^@trH;oQ3*4@zZ+6}RKYo$vFM$Y2<Dr{lH*97OODkG?K4Kfrs5T7
zUieg$+I$!6Tx$6>J#yi?A3*;E2UOMxfDd=3qQ_(o_QwW6+mZlGev(RV17d%1KX_OX
zhth#t#lIITF~5>{AT7~cs`P>F8+!tr)b9z|r<XwftjidYI2PudyNHTsB4Ez>Ef`St
zn%BH@fHiY1QCh3VWC;<%cim7(Jdw}uriH-0(qz<GtBIw`fiznm1A{aoq2X)_lY|W@
zGukJG5lPhm$DF~=Gn^QFu{?D{0B_R#$s_2T<NKqLeZO%My50Q2QO6$(b*UTI5Cy*K
z(^%r~b5P=Ofm~pL=w!bb)CL=X#P>WnZR`V2yPPm$$}-SVI42stD`p;QerP_bpI~4y
z0+se2!j=@7=n@-)nnF75sji5oi8iQIdYba|7F=Zsu?(wPO<LY{vg~J`pmgUXDqSf=
zq2E?)|EUl5-^#e$!W`dZ*x=`5*|>OQIjUVCE*`T1-C6NiwQ4j9Z8F@p=`2)ytijcv
zhGSRICD1r_n+L3LL?ahsrn+f~;aUq&YOM|3Hh!qOvnNV-m&3kkc_2CABx<O><e8tz
zQ4rQ!DELE8^9f13yPDVmRsKw=?_B14`GBcOng#0`9Z8;3a+~bj2S&=NXp<oql3P#1
zo7Xb<NuD{$rh~%kW5Y327=-rDwBxV7Zql~A7s}2@!ieb{JM8ANzeQ8Awr?TYyZ1#e
z`DVzDlX1E80&&^r5*Th<hQU^jY*WfYbX<BB!k!eP&nyefShxuveXWMp3qx^|{V9k&
zyo|RzJuN7$nTa_MiT9ng5;p&i!iR+=m^|nzsQu&3n#VVB>-9BUHemv{@bBQclVh=K
zDme{q(fhowoVgqbW*Q0(;QJ`hRQf7ITv~kv4=wRRNm5^UwO|&6zIB9c4Ka}JSd6e>
z3$#Y~fcKVMR2V^Q{<x3KXZ~fd*hqIY>f5*ZC}0|$AtPt*2Y34C<O`+?9XmI|?rJZr
z?{$LtRxIaK@xs*Ed0?L$#l{R##E46Ef?;?N>VPunG#|pQxJpQ-y`GJ?F7sA?&*QGF
zMyF8?Oz{!To#VFSi3?}Z;mAWas8<^Cl``16aj`H_r37szZGxS><<NP_7n%;dW`3I!
zs2kA2OsWopZ|+v^Jbw+u`ln;_dGZ;>d}H!K4|!?aan#JSz}7kbERuc>%~MZvt4v4K
zwuwbe?OaT%w?fr%9K0Vla%s7Xu>5WwM!1|5CQ+_Dq=T|7eU(|PJPhXE4ne2+m8{f(
z?!8ODh#RH4xU=~p)VF4`<YC17bj-)w3bW~)Gm_Z(rPRe!!OtCDc$b>9)60n)n!XyM
z-(J8Bx@WcYf5x2`DMIs_i|BRMkuocKywqSMH(Ie9qg(u7{i0o1_~svG`y><$tWTrj
z5c=K>yNVaWqQSXx2Xxmx;=A)GS7St(s0YiqMQ9{QCW)pkE3XRj>zr#|sY164MPQ-T
z&6^sD`>bRI7oSF=-m)L8n3Es;e|oGw#tm901w+$cV*e@y<D-TQh<Hq$mVlL9@53;V
zov)%?%>elMCJSvJ^~5?WB^GhK59?Zy3?q)6z^2#A?AVZ<xGdcnegB<-4e4t1{pvC?
zgaq)WE~iIG80hE|r(U%`(=ky3>xblMxY?6jna_pW1+`$PNlt(Q4|M-<OH^w+&17M8
z&$*KWt|!K$Hti&8dWE2eC-pX>N?GZ;0pRQ3BwW}L11omZJNaP}Wr54UTnvVrK;P@9
z+9o6ghT!L8xv?|{I+tGIw}XqJ@SqPgtltVXO=U1R%LirFo4C2O5ASIzqV=x3EO^Fb
zc;LDRjX$Kq;P3RENmXK7iqz23pbvf<wjNt-v_S2A2sWKx2xH8XKzd{%1UKwvI`7_-
z<DOXi56nRA#W&WnSuQkspGW%(a+Z37f&Tdrkm{79>_wPZ7t+S{mzzVz3<FU3dLFIw
zVp!CA$~|f(@x)G=vkX222ChN${E?!?`(myk5_@;83%DPM1@mot!P&n8wTsdqA}@le
zK99h8hL<4mA4TlSbw{J=)T491fT=!*$hTI>7t4vo7f}GJ^t{zQbA`v#xxQH?9)g{u
z5KR2RklvwC8WYEK4($_Nx6Oksu8UE9Vjnu!6=0Ws64of6=Z2qi*vyR+F*4u{x4D=s
zIM2@JZAO{cu22lIs&850{a6e&oB$Ec+B|7&1-3*uh&l=ZVpGt3c)OI`Vb_5ls}@jE
zm<HBf=gIjbvaZaxY;Jb}whUh{%6jhNiPOSac1<Mfb(UC0bZ?0LDdkPq`*N)yFEmk&
z!^-=K<myWnLLP;Hs#-NzTa!~h=`DZJI{@7KpB3Ee#sS#nGDb`SNq=jYap@AK#+CAx
z22+^&bTfKA-iU;rVt@8$fG;tGJ>#k&LT3fDDPPH?iT%kvN3#y+Q&8Ju6~;!ovks_a
z|LnIwmxbg@S?2=XI{V?;qm|h4cq?nrO$WnXhe3LBFpM-ULc;^L#D+-cP5(}Y9XaIt
ze7-@rsfbY5=Qo?rSD@s0L0Phz5~@8D`Qd=oD4F3@7SJah!ak~@??}39Rs9esrp~?Y
z81dA1V^Hy6FiVV!2Svq!7_cpi7tMA>=Wya&HdMnp?LOFD(Z+2a>_e~qmE0)|v5LMY
zGRJb%O{6SQNj{d=F6NS9@fhLO##LrI0F>t9vO7c2#V7^!R5G!!fx-O4QxFz4QE#ja
z&F-W?tM)<SANjN2K8w*yKL(yli_mw6qiLHdalKyd2V?5fTrW9J97Sh#xO^l!JPBb1
z7t&DGzk+hw!JxPI01OB@hV2EjVQ3~jdvwaV{NR25V|o-uX-mo3Lp$BnC&KMz1)wyw
zFWg$T552FE|IC5-ICn4MX^#^8`6(C5zvrU-k%sVEHxn(lY~p%7TDWuKRN}h-WLFLl
z8+nI2svS^cYQK8ZGjkfKtV&~sseYo4{b7)~1<<+UVVT9jzPPM56y1(^Vdu$%DF5XG
zP5cnMs=5cA8m)x~UbC^(^Bwb=ev*BBa0#yMJx5)<Q>?y!8B3F=g1OZt(??knG~IXw
zTlKQx_r+28eeyzdFI)_genUi^Uk|veU<xi1c0v8$6R2_XD07(cjgLv74Cg;Ln9avL
zR`>KZ@1%aNM|l+AJXI6xYr~k%UdnX`^=4N7JF)EjT3kMNGwKA6fcjD^lwFeWSmlje
zWff&1#+#acl9Zz#3xwcx)u6F)2=mgMgN1($p!C^sE;YU-wB0BLS+yP5%=^n4YOk_o
zm#=`y-*3E!%2Es}y#gNbxwvp*9_7i%;i6t8NPpgB?%zW|VO%n}mJ9{!`!t^zrp@KU
z8wJ7lG^*BXVB$=A&j*nYXWJ|8d;KuEXm0XgCv|vmvIH}>Ov5EU@mR3qC}k5{`1%UU
zyeirANn0o3Z><vSXt*kRHq*X(Nfk4CcNjXJJAtA7To|o)4o|(gfK37KxNC_jST7Fb
zNq-mNm+}I%lz8(ecKMjOIEC`K!^Bvl6tL7t1NoVitZUIo=90e{Sa~=~uzc2H?Tx7B
zA{T867)X=)v+Ku?p|tLUs57>jezrr|04H~HCk=$+v~XOW;Eyh)Bf-}?O7N9$F8lWL
z8d`cDgyZWs5%cXi`w_bl<)M>V`!niPd=O#io=AuYAIM$L(~SM67r3Wsv9o#-wAcU4
zI}|<$9RWG;N;HG%(}~|@wFKN-l0~^nCY0JGf|FJP{}X%#lqVy!Z#fUj!9CGpW&|p4
zT!p^jdj<33#e)2FFhu@IgJUa+yQ0_=QtgI-i|1IFzl-=!^cj2ECdHmU0ccZshPtL!
zFn$C%eKr+9O^G5j84rc}-Mzs3W;L(aaRv6h^@VNEWyB&G#p~Dgf#p$_=w)*jT*|5E
z_3aLuqM45Ig*20)XM4x=B=T1niOyZ_)Up1>bbq{LcaImLWfg*nN`I!-BUy;>xx^#p
zZ3UavGq~rdOv;R9Vf%h9P&zjl;!|iJx6mCjuV#UTVI+DMhJwcRc!-s3WWJtj@q580
zRD4tb<|EBOW1NgT?a1e1udS3TS_HoH_X`d;v|wRUCLFfxjb`7-TReRLGt<n0+TrJM
z@2gmB{kt9_{&~nON4jyF9z(eIRy}w<@Dw(0BR8+p5RBX&5Aq?rOx>IAE0^vvyS<jM
zf!JdiFRydm_%zmgn;))=CRg(>>Kgqxh0-}a#D$^f;LCo0@{=z@L;tfFU4I&G_q>c>
zq{+a(P`+n_hUh-0kV_tJ<-uovQ;yCBgQ&Y*e<2K7=1&(2^G|Y#rhcj4GZ8Gt?gr8R
z1Za1Tz{rj(V0K&vXHQ<G9OPL33LU8Xo+j8;M1f;lAV!A=L3jN!P_uX{Snl*>3HrvU
zIH85L&z}K0i_5`YqmiZBUji*1V#XiZ54HqOymIgwxtrwBe$bS9Xs5*5xPut284K;V
zN_bMv0OUS&j(R+!ETVD>w~Rgy^{?*><2xL%v?z-&`+5$g88aESP;cz{Ilg=J0&ML;
z_g|w>;&hqtN_iNz%vmYe{yGOoHMfzcawq7I{lqn9r-0+=BxovAW0q%kGFj+s__1&`
zsybc<1<Jke49^Gq<tp^f3k2w$iuDI0$-C>%>xw5}^xZP@lssp#pYu@t13e3~BVgLe
zOw?gM!p+`k_+#}>v@`7oo(nReE^{c~lYbJuc_>`m=#4f{9l<rc2bN!+h4DYL$wjAY
z;`vY?qfUEb12M3j=zQ-q#hMvDxyCd<P6DazYf%y|=Pl>TK~JHT_1&+EN~f+u{N7XG
zWVoCw=v|^d1V_vZWlA}NVRya<o|(NJqe`o=#_Iy8MMjB_Vr)@QZ8`iXN+d>#1rD<}
zz=#Vu*g3)oWr1VCaQ#!^)fy{^sEPrnv&)%lLJfSsm59=QU&W4o)?9T|A}lho!-hCl
zc;HZs$`M-;mgk^DuR?A)ZVo%@G7a5dujTFsnz?M#QP6Hp2cw?rz&z-pX+=Eo-<)2T
z1n)I~R+pDt@1_lWE!dCa|A|7c!6sn8dm>K>_P`OFQed*#CRA7K;_cfl0gw4X_qGd6
zQ?>`L?sCH0?ZjZ+ww8xR#Dip?0>+M2fKz=}qWhg-$oMgyd)bl$sgq`yeW&8Vqz&km
z=LFK2E}=9$fVb7UV)FsYAL;pt($;+LK-{%tzn%D}trSAK_v71N#%TUvuh8XU1uu!2
zs9_z)3Td9HmhR71$thz)Ss&SmhfKDh7wU}lMVbD;wB!4aje1;wURrl~gI_0)?m>PZ
zzYlD2>P8HhXAN`c*_BZ;mzAC+&)%SsaB@j9W)&8qjgtx7{us~SADW0(yK12IeJQqV
zrnB<r6d|}{5o-P%2m8sTdBb!xD)n^cRwpXJcflgT=6wm6$;;qZbvVwp%16tzKCpJy
zKzy}lI4J6?f>G8qXnySkohkv`iFO9xR+0<mKP^yy%0a>566LZFGo@fJSmAz*^1s^5
z_~SBk+Wc8?9v%x7)3U(ZXF9ZRRz!*Izoz=W$TaR9<NKv{Xgf0vHWH)pa6l?COe(o@
z#WiqW)sNg&<R#Jg%QT*4@&MzVpcLne^-i1N>bmWy7@Z3$F;j?TAo8DiUWiFIS$5c1
z$e!T<=H<6dBcEM^dB?I~3~|z3WGOJUpEEjp5l?tmwdfwZ1`ZtAN&5k1q{b$6eV7Zk
zu0^6@=M}-dwop`WD&om2v%qDj52|ZBLC9{P`=txFdA5(+C^d2Umhs$O=Oh@WWa2VV
zMLYe=5a&M(&E8z2{(BK-(9Gxvb$c7X`$OTSM6`*j1lzfpu=18BrtY-{eYf3kzwa<K
zbSMyCM-9hdmBVz#&1Bw|oIP;x!K-hmUzdCsf^Qjt<%W4MV^|?hx?K(lUIWnBhW0@x
zhp~6=4midq0v)V}z|DWIqVqSJS3NI<{7>iL$V$r9B}{?DkL1NtCO&gTPpp1A2UEL)
zz+`s@FB-4~TkFSw{(enxQqd9vT8+RX^&>ajql|@#S6FuUU8c5UAEW<iP`H^2sYZpc
z@F{&KKG2!BGn#S&rlN!Abg-^ffV$c=UjJYq&z7jcLBj%+|I?2=ET2pYL$*S@%R630
znPrbBo!sP{D_=b=9h?oPfXdHmG;j|Cebaehy)YCVj=F-*NKI}L+Y8JcE(tzAh<$VM
z2CrR_kMbV{V!WOlVlzH-??o}h{S{bNR0Z<MR;XjF&c^Nz!j|px#KP$&bjLjb(ce#C
z?``p5W_kv<-Z_O5u>u@sUIV#hiP$MC!}Sv`p^It*fAPKqd)dZem(Og>SaKd3A5TWb
z^%}%63x&{-6qebScI^g#SYi1x5RYHRNNYOp=H-eKV#2v+neoiQ5m>Ka%Q}Q?L8)aV
zBbNZEeh-BD_<Mq8xd=1=Mquc+i+t^;9cZ$UTq$FfnB29R<y$0!%79@YEOkJuBUPw8
zJQ!k2?lY+kb!A)TLCg0vm}DD*j~c7tX5S>bpHRQm+#SvHNAb>#cxYPL#nl$<!TS5t
zAz&_f(PsT(JGY;wJ@I4ael{Iz7REwI@Dy?!I5O8aC&76Zpk;y)hH8!gozHtk-xXHS
zur3SDBZ6W7lLG8d&rYRd)x2fgTy8(!7oB5$p@r_qdsBwsws$4yb~XUyfsff$?;@-)
zCGYB%#qi3kjo;S!&RfS0Wr}NRnDZz<CjA&8_V}+5^Dz{6Q=UX^>2uNio;vk2l(}PO
zCBEqs4+W>mFEzCQ$9&3yS=Dn<cGH>f+f)dazP8|;sR`!&qJ=S?JyDfr0V-Mbtl54Q
z^c%Sszn*o)z_41pJ5vMU#!fmb_u}pkvW1TYDVXdkhu|CL(DhfHe8B<eF?ACLTpNm(
zsaHWE2q_Jk#s9Z{J4%XeSmNz?s8UV_ofWO(B<V?%47Eb_k`!iFQNpqG1FwiKgU&I2
zpqMlk?ypJ&8+8*@G1&^q*^{x!@INjeRto>Ft-yWdDG;%>oB4W}n##{>f>gFwEYESl
zR@rXKr4tik**bP3M?`jI3I-f%;ib>I_+{r}v>uhgoul@Gz1t`zE<X=8J3{foyg>Bc
zMEUYCT~r*j7+Oj<2@yF{AZJx6nxC2g##fUdqvQg}b?1OAuL`8Y!^9S~Yh2gR4Sd#y
zq9XN$H_z;Y#XBqThsia}e^dl7rqW#Uiy4JBW`W@Z4e-@0G!5%2!gEKBFygQVv-6Yj
zHl6=Fo9c5zVlkb1k%o;I%AwQ$1q)D@(9R}MG#{7%jY%hQXR9{K%PRS^j-hB0yAdw_
z+>GWghcn?;3iP%K1p6R=$i24&#X-ezbXzpqD9c2XyjjSVkDxm*=H^cvM2*_>%%nFl
za+UUr5x=Ie&bOvuZhw$JRU41{9$Y}FjUUwi-T}VK!%T0l4+XDDK5&%&{N1TE4|9%!
zhBfM7Gm}{9D?%`-d^om`Gl8l*^Kq?t1nPUlurJAHLHXqw7^chcbZjbyjL*k<j>28X
zYhagChK2LvP~EGT-T6-o%@6vP8GfPe{GJjheX$vIj|8%UmoxFhKNC<X;R4j#`9an~
z3#@_bO!C%S95Yc49bX1gHf%S?lL7qvgXL(n-vHbP3ShIW6psv&q5Z~ZtVVq<n7^;3
ze<$^uqJHw;`ZCP<yb7O04#IhX>Eul=!eyz)QCIxV!_Nev(u!GJr>t9?XI=oMY$Q)L
z%Yk;AD|l)14vg;(!tAM)#8J&+zR}fqBsK_TD;I#LRuP<CZ-RQnOlZy9!N(`eM&~6b
z82q;aA}mt`N#7M^-JON-{Z%~TcNMU!rJ3`nFw+;|_T*79!H9b;Vt3OA-miBQPC8eE
zN(Zxf^q(tWot?uL5+k<m{TgV~DL~zom$ByGWe8E@L3d^?Zyoi8`3G;o&V>P>A=$~)
zH}t_jYtnFP1??w#EChYk-#j+if*&*1K)soP5U`~W^>4Cp_1sMK8F>{m)>ZR2Zpi>g
z)X7a!!wvBgOSFB)`tm*KzS3Wm-slVU-&TNO{c~}}>~iem6@=40iqY894kcs8LyY}&
zH2JN}olbogeN_g5m&raT{dkPuX+4AXT7Iymjb;JDJ(dv{07I`+|6%(Jj=8jlqu=Z7
zuZ!^3ohsBIeyZLO>SR9|%{(vZqQ=1<pdzoJ?y3%$E31JIo!wF^Y%uh<B5aGv0K=$<
zy!lKBj|kcdA&r$duqzpZRaMc|cr<#?&VjB<GpIkM#tP_+Wt6Bz+^8aGki;=N|Mj5f
zP{MtCv@jT)hB1lZnBCMD?4lDOV<Pn?9?js^?wL$pFa(-POTbVT#5xz8!Sorm=ty^2
z$)hRYnCpzP?2SDCR&SJ7UuI_w(qLTuaWq;r0^H|rVMA}FgWBp%(4le>KYpj*Tdo=G
zR3^UovG+o^$4p4wXNFqGHeq3|AL}B=vCMc2BtAXPtZ^y5!=Ey*rk!X~H=bqEXUk=<
zC$~_T1;GR55cbaw%wBhjuN{_viKSIwo_GP2uF@>5z8qlWU8cAH8mKy!!>8PJczoVC
zj2)~2Mq_(|{I(bVmzjf3tCR%S9mMSWUlh1BZ-iNTdr_^%6e8%e-gnC?jNDhwyOt7H
zb6FVgzkEA}{d)zYqOwuu(a3Jko&=$`Y3TCnAOt(RvqkFaXz_X^cr?_2O5Z{z$$uy|
z&AQF%f1MOE(l^02>VV0=Ob{&dRLBwcg(*F$z`}2F`0o2Y)J{2%3sz1-mzV26XWbOk
zTR0W$A06Vz$P=fK#*|~y(5d#IV4|Z8n4iZ!K2*Wxv`DzQ=mf0KD5cNTHgMQ}hgoYT
zVD&;ReD-z_p0YiMdTU2>jjb*GLgF>Z&OL*AxB9_13sdy#n1qjKW#gv5i?QHM2{CdC
zAa$b$WZP^8jfIw6da6b+%)Tl}?u<dTCEBd6%!5hvB1+7&4}nTJ9WF-(K=Vr(wCp{`
zf6QEh72z3Rv@@9w)s8ssRRXq7l=7Otlm}m4!as-8U14Xmcsr$tmG)%dF}*i_4KGEP
zHYE-xnpw!~D=<EtxByd3u*+Z^dJUWg*A`R%|NS1(e8pNmX^SsN`D&q6Z6`B~Uk&=P
zSrD&KfReY5dBhI^e_f}}s8K%LPAF#oWiXWfxXtv0O^m*r%+T~8_gGyC%G1iB>+&C_
z(s7lmZJEY&rKh<=EoCtl1Y^hT^P<L|pX_!ILt-D#7i~5kq%7QBQ}f6BM3d2mtO+@}
zUUFy#o(@%mE<k+p0aQQsn0Gdg2CptvVtd`?_EQtU#Ww*)8IC4?_7m2Uc1mcQAqRsY
znHYUA7RLY21e+UsL*nv}e1v{5#4n^?*3B?<I&BD<?~=g9>k!O%5sOV9AG5+w`$0`d
zirtI*KxVxU3X{%)?8O(>Vv#1A1b$}0mBgPi>xV%DC*dFCzIec~6gA=&az#h#B!8F<
zFV<y&lMLB8x5L;{zX3{pTzN~{HEz>56N0vcQclkYt2WEQX~0k}B;~>D1Aa9Bj%GIA
zQ<;3gZWb&HA{J5#q&Z(jO=4*~e9qz*cs1lTWug8vZ8+Rn1r9%n(Gndj#6G_SFLY8c
z^_2vTo~H2pG`cf7o)wzx=JTrDC|GUYlg?HBKxNhmFt}cYCS#VO=)50m7p9@y++I|y
z>H`tN0ao940nBvKMXAXa_V8c?ZmYP4Ci6bA=D$^dzC*zLf`K5vktZ5!ZAR(;Mu>B5
z1|h_r=MCv`V7Y}@)#Fcdo2-WnMvnpwdQNFv*M{u1fs~QThFu-hVK=k_#Z3coo!tdg
z$-V%0;|sB|Ed>tAC*!!ttLPQ4!Ay25b8qPbF4Yn!yFLx`b)r#cvpNg@GX_e-mHF4D
z)9}?dEojWt!K(ShGdU8#-^DLNlf|B}xb*_+4p<2-bF;X0Q34e=$H5n!P<U`83GuKK
zc8i64`XGOZ%t+(y^?~S>I~B}N#h2Ne_;4@FgJ3i&7~P1^YOtb`*p2PXec~#xw92BK
z>2a{hnhM`^!_h_U8|zYLOzY-m496h+_pl9Cgk^(L$48EP{^jzsCW8FdMsV-`!89$s
z!REqTcK5?+a8j7avX6OE2Vxrd2Ilck)@M+`DjBcNoP;(v-g5i8|MA4NIT+2tVM}*8
z>RjlF-6|sd=#z`n?eb}lSR};8oMJ}RHqcH^-#=}+(D2p;UaM83uP-slAAc2Ws`?YN
z@dbCic^Z~4o{Z<lU&6?8AJ8#w;^WU(V~#=~SW|AcYn2ZA{#wre?|vGQ6wZ&PR-=ZR
z3-+uXiq;L}>2$t^&B8g*_(NW?M-<hZ5>2y93645fg$oT&La5I^aCzs-OkSwKo6HP2
zH7t$p4Kc#d--*yZ;Q@EQxfONmCqv{6PE4`s@Wvn&9-i<<w<2$J3eg11ai_W6%8k(U
z<04ppc*Y}+)QFNtE#l$QFlbLd2NBNKIKi4AB5?)i2v=Fft#k}d2!p%s3-D%kHatHA
zXtQTFXeyD*IGnf|(bKSAx){0=ySd|xLNFZElW812!lfO5_@EyLP%YAqH?>9MW|jc%
zx97tju^93`x?#w?SvdciKAMNU;~8VWG8dz{Oro|%TsXrE)z2zGO+yBk-P7kS9?R)&
z=MMIH6%b12WSQeVb|IT)s?oi84xJsKPYTl&dV;U5U0HA=<xi{*z-oGz+73KTbMG^(
z(TbdFlZs%{1_RXDttEJSt7B?Q6}XSJ1vW1Y-DeJg_PPtuV3&%HA;j9y`(4(&;0>3U
znVA~Z|I74#+QZJ#aX2X~7^JU$G4)=N(3up3X3>%0)Ho0tSC)a1RwcT>Uk>8}M&j|t
z6ZmR(B)G0ig0|*p_)Gk)wCm}Z)=1fweYHHeu#Q<?n*wboYCxyq9G$~r_@Ti@*mBkn
zy%SEO)0N|Bl23Dtc|D+}IFpAuR$}{Y7qnUWhuFt4aC6K~w0;o8ZkMM+Z9cJwkDcM&
z_bEF^o_43<iv%Ob>n!<CH3Z!vr`S9*p8D(@^N|FiVbdtw6=aJ?)h}Y|;7QQBe;}6x
z=a%_3oua+)9`wDx2kOquWxTIHHtwh0)7xxvDmseKnm41#76Ya}lzeZtw^^-YIJWjo
zViuOac!aVd1bByn#qM<CPSWS4$pZs!&4+zoQejd8v8%0SLWtRM*co~n^t?l`;eIb@
z^*5%jxuh)nWFud9igthgXQ1P*9=7Zc;8G_8;fqdR?743v&hz0I@%9B9qenffSudF|
zVF(64i>EI5bK&=lK)hCJkJdM4V{H3ep8g>Ob!IQ%-IDuEwo}5o2CKtua#h4{B6gBz
zrT{a0f$jf1h>dHHhTk^8pKXcI<LwBVX=Mq)O4~uQqIa1r(1~}_yC&dHE*MQd3;(@v
z!sqP+ao+cn@UQX!%-0v8ZFMNv&q-&K;>Y2vU**_XcOKgN9Ru}Vd!ePiO7uNu3hwxr
ztzB7#jXRG+)8=NT|JWXyy8~Iv>oN>zi^ryE1G%s62K?-5iNpTN$?LAjn{S4*%~yBe
z?!8&){DJnkISa7k`zNs_Z6!IN1~N;_ZuS%t(aB*DefBP~P_<NaNa+u{Im8Xq+D*BG
zjWE`&0;Tz!y;7p@#>W`mZS4+k%V*;7`C~C+^dFXe!w;NV=Wz4i$60tz4mQsX0eQDS
zm)8tu&S6)X+xN35IeCCZ6w_RLmkjQV)JEC5tzc2Q3Fa&kQ2KW{bj-2<hZYXWR`l78
zA=b*b)BIB<L#1{f9#9YjJ9^}xj**q1HpLB&%*;epR|HAeLuOHS5hO)zrSFGbLfCwX
zW%OIY3m00z(XO)?T%E_%jdPh$K=}t1V&o<%Kx;@7-<OpPujW2udQS!*KAwS2+Y^Ms
zlBF1XQ5)(9hhq=Qy*F;64!d7E)_MeB$J-g8d-xhR-xyAw%sJRIP#@du<H2aDoNL6)
zrt>_V7mvi!^Iu0aKlX%e?z~2O#R<^Tu^&2GhO_Xs)ZHx4A^*v7Vb3&8?ER()vkNbC
zlXnsPk#!v2Ssa0_DScVJ&j)b?Iaw97vmivf5<*<6pmSFlYnV<RG$Due{^N^k1A9RH
zI_iHJCBjs?zuT|RfZ%uAAU@9*_a>3v@>~y4dp(i0+b;o!uvnUDxRv@Y+`v8Z62R-*
zb3RqY0Ie2WgR>U~qVKS=;Gtc}5?5Dq)ej!%ypr}IG#{*8H4gLJ<*4F`-0=Q>HofaK
zxR02@e)c#+p1%=-dzvUn&RUuV+|)yPz7b0D-U@2^S;C;h^z1uO%{r4RG1#n$-PTdZ
zjheOSH2yrQ-&+P}n`6Ob(QDps$u3NcHU-@Q*3gks$qL=5kGhOJgA<ow>D0ljeQPK_
z58j6vzLYh;e^@xQ(gsy2Kyaw)8aCWr2~MFMT*4f<&GS{bW#Kkd%lCrURT~jr^}|ru
z1B%<o4}8@a|0GSoHn(!D3;RW-$cOBHsfb=5e)6^nr>HwqK>5Mb%*$vQm^k)<#JV_4
z%8$U<lhZ-N=PkQ&FBLN^m&4FLL7<oz&Iig|$v^4O9&VB2iK`BH%)<;_ViUl3x^>ys
zuFV*viqP9R8!bPNh3*$y!2PEK^zJwZ;W6g`k4Lbfo5S#h7I9R$E6NNiMBRE0`FcqZ
zGVn4u^_mTO3l+F?Z3Z+{kAU$s15kT$QZTTO!~J74Fu&gnw0J<f_OedV_S15#?Nfu&
zn0P^{Z4fl44n~Xk1y~m`74SqI57Df`%<Hx&@fix@j2h@sT8W8E2Z8F<5_mN+mwE3k
zf;2;SoNllWN3Al!$SsQ?v!oh>mMCG%_=iF&aWT%=Z9u1gK}?$#0kIqIv%v7>m<X#-
zHq{E<oBMMIW6C4d&*qXwU6ZTl)}h3lTrt0PLe~&W*7xxawE5A*Hrg$~{*At9_thEB
z%0-APqY=cF81!|zDb!9FhF$+|fGE@R7`dRHcX@wf&YMCZV^SVu`ctPovYy-j$iZPb
z!%^q+8Bj1fk3)t#p;y;b9^$kKB~9Z>RTk}rnin+3U9T;^dMPC@<YLNW1wz+<6H)Um
zeRgY8xkP2E;1@a@n=b{hzJG&om;G35F{!~95Bs8%9l0dVT;O6Bancv4!ottFps_0m
zV!cPAx%M{Z^l7|c;1i6Kht5HhZ4a3Hyye{b`yJk5wHz+23C4hu#dvpl8rJ)^F}p5u
z7K=N;RpUCZo?DI0N)yrhSOtVUFM#4CKm2pE2tyC*g6H;V_(wT}I0T6x`HzF{Mj5^s
zGYDU_4nTKFkf0Ei3=gkq5eI1`aqI?z+@#yooxC5XW+kEZ;(Ofi^?zLYFhv}!y#TBK
zjKG?JQXXGPeAlvTAo-eYsw3-Qk_6(stbfexF71RZHRN`*+XvEV4$#@V04pj(V9db+
zXlmI3&9_6*(d-I1u1P_io0EkPeP`i>8RhtFgErc%Nn<uYPqO&cL(ol28B=M`lbSq*
zx|BYY=S;yz2i!2*td6brq3n_7IMA_gV-F16(JC|*f_wDk8NXN3+<P>SJi8FwiI>oF
zUlhP^IqI#pV=ku~nYD)tm>8{R@0X9o(vyCmn|+o?w%lb3(MhP2y$R-~6=V2nEu1^U
z9@SQe_*y8(#5BsN?F|R}w-fof9j@qmg66E}Lt*5I3uvr97QI%PGBvgc^xpe`_kSVa
z^u|~0-JA_JoBpzyCKl-SPzq>X$CO4NVVfd%WAM8~xO+Yd?i&ZvzC|Ri`4t%RGz@$v
z)2?UzP&9h*hkFfm;@WFPaxx^a>=o5u?iq)cZp2SE84sylKJZ?fVWL_w*q7;nxBn?P
zpGq$8S<b|Qn8EA*{>v5Uj#_WN03*H`GIJ$s)-co%)Gs!3p`i$Lx;^>q6Du)sL<*Oz
z63fgRBZMVgRp_!@2EhT#nR(P8_?)JNiKEv-eYQ75o^%22mO`jo^NMZWB%^%ATr`ZQ
zx$NB{;!xMZfy<Lnr(O{J9$taA`2~=eM}POl{kUFnB5y6|3&o2Lp!btApfezvEA@ZH
zFG&w#DqaM;>gQadYbZ!f#)3g9ISdzA!|F2ypkcTGyU2MP(fcazotH_^k5KFkBhJ`=
zQ~0n^Zs->k4Uv^aY;%+x-0s#O4s`?TMHkV2dl-7WT1{u=CZ;zg2TVT8dA=kN7SFH7
zV?md3@gpsa=o>DMIhaBnsbQ3bzQYm^yMVRp6COF{F|*lUgVy&Dtj)HAqi+$K+xv@C
zE?J_xRv<>!oCotWzeSw{Gdd$E(R?ZtUEV)n@`J^u=GKb5VgGQDwR(c?>4^}1hdQn=
zeTiFri>XIO;GkPU<c*#VEuSW_mg4D5^4nZ=?F<5J<XmO@Zb<nRi+b1HKx20vG^IJS
z+fNVT;|Hhlb!Z;yS4+^)aXG#kMA?ykK4_>g6LvCX><-?JMha=XZtDjYv0NrrXeYoQ
zn%$^0Co-SGa!5Q@!Pc&dL~H%|@XA078(bf<-$j*ZxrlQ7il@1H?G5HsA~NmLGoY>-
z%cApwz%nC?&pf#d(~1;OP2#`?oxTDsiQC|hDdprEY`EIUxBSn=B6KaS;a*D}A!ETk
z=Az$^IqI*)SNhla)wynH8M+-bX*Q?yLz(*YNtk%(0W+fBbhAbjb5B-;6-Zt6xb1M(
zpb`vS9Khz^4sx~+ff4N|u)gRxND@7TU^#IU?NWHF)-h<^+!M^*=77Dj7Nl*VIkeFX
zkW5i8>!kUn#u#h1uJ{T%sB8!QJ%cfIa}AoGJX2<O{t`Dn76xZFF2U?G<7htH$ZEDz
zH-zhpzpK*F>sln#XD)$1PcFmZ>w)09fcV?F2;(;q+w5LCF@^6lNm2-ytlR)!zfm7F
z>oCN6O@(&qSLqHhBc6pR*qtc?jpM<ryJiUnbZ!Q5m^!wc4;PjCgi}t0SQBIEo<Hd|
z^UB-DhubG$*~}P>NIuF`&LyE-tTMHTbA(b0b5NJ(;NtibXxL7?y3wZ8qlo41H<Y1a
z_<dHR9|`krpMcTp!qLEtSYu8H#J(A}xGpIeaE=+ipnIzcy9gb1rXb(w$qW=CAURhG
zq58DrO^-sA9|fp!qzt-pyZG&0g?y~hSyT#eV`i_S!2L_6(9|X4lAIcDcY)^4|4kLW
zD6{!A?lOkg#DW_AHyjR~VRh^GfV<msHnKhly$oo_eX9Wd+;cE(d>lv>hKq6YMxm2>
zF|m${@vvDD*bf=Z>qq5_(n|xxV9)8;opBhp@0X#zBnUEsT%eBjqO&{);ul#UWQO!Z
zdEpG^aNz;J^ywVRXQ@!e>?~7EGY0+XLqJKxlZS0iLz`!Tf_i%qJb9#vKz9-At`rD1
z*a8WaOVRR5G4JX{`LPj`nKya2*8FcB8qA-G5vgNf`mZ>;cO)=Z^Q+v4_|?iWeNj5j
zO^m&|7tTIjgFUx-qG8@e<~dazyKRji;_4qZw0Ard8~78)dmG6ARfeu{#G1eHO?1lK
zBPyL6M){asbPn~Peey-_B2#Cr-fsNn=8F&sftY({GhQ55ff+%CtSWU8>df6EO6Sz`
z3+B7A>B@Mhr}uch-W@SqAqnokr99-Sc_8~S43yma18Sx5<ax1>*q+S1f)9hy^TEV7
zRf2{;K_FeVmFbUu#y7_m;+IiHaBLTKiL-yR>jm4eL(Kx(%}+x|b*$(LaaeyloLe@B
zL5JyR@R%gyde?VD?DB6+ii1#+zgYbEei1%@n}Dx=4}{<aGhuurWf46`!=u~DkbBo2
z2YefkE$fxI%4lDd+TRk!)~)^jjPUMR%i(=h0LtG?gA>Fvu^zq@8kD7QFS?ZSqx%>&
zDVcBOMJ!$F3eFCtpd^|=uPue>9(;_)UUh)NUPatX?*OE3?c^iK2ctW(H~ts11h>vz
zkLK#0Y+H^<jD!U8UoqM*>_exu38J$lar!WWy_~cjohC@3?wtaP&#9A_{(~v%_Q#A~
znfT9D8P@12gDfjaP%_TLQb{PLdVc01_l>b{{dR1>8VsklW|Oln8pa=`&iv!`#2tOa
z^?+PS>Z#<R>M+H|5B&CZQ&4|B3O4p#g_qAbqr<v*+6|o&2mDisH}9Q>cMB^~t=C8K
z`_6Fqt3e&zK?||LC<5ilCrr)Fw-PULj;XTUIlPusg0Fgq!*{JKP<t*Bbw`edhb0k+
zuhV(6r4l?{B&YL07-&Y5^S4sQvzOjw@lXT$i_XFIG28H;8I)Zza6`3OY3MhLSZz~A
zLZtjS6zUal!`>^o{)g+lIqoFy_<01RDjwpSkjpg3Aa~O4T-FpoPTBJx1TbEUktqWD
z?rvqHa%cu>ZV0w!smPnu{=cisXx{(W>gW^DvUDJ{mOW=ypDsiD3{&_qf>?Rk89?ql
z_R_!#O<u2K(M5J>*xQIZS?y*2cR%tvD)5fh5^>7mN|fE04I5t%!o_A|i1T?y&@9;i
z?NJF}zT%+J^tA$yt{F}&;1D{;UxL*I<X)H;kA~j;QTxalNbXI2<l{={q%xdk>zZT7
zq<TTej|m3Xlkx67${dkL#$ix6X7~}ebaDWfEWJ@SKFl7S-57{;KEI)|1BZRBMlS~^
zo)b}q>YM!8YX1Oie_73K{uF}jVLFStri0RsbY`4(1Uu_?gTu*5U~CbDD`=;vxxp9j
zCu?K7!*K8#XTmyjDC_;Sf@P1W0`1Q!pb_4mxeV3fe;)Z0d-ocb{Nq+;FZ5#_ZF51l
zj|J=rnvQmD7NEntK^}Hf@O`|MyY$g!MpMpUUF#Yqw^$^0{ocbLt*3LagPb2ed=i|7
z%Ruu=9H_p%OpLlSwD+6MHTEnA>ELysaL*qiI*Xv0GWB);n+jfWFPX!nS6t;EWWk<p
zs5v5-t=8_1Yc5*g;r&4n+^?NEXx?DDs!Cw{67cYD8JJ@h_+IR3`sfz%PW{iZx~vRh
z_WeJO&OI!~_4~tvBnhDsLP$agNzJn`2q8&A5=KH1LXyOuPCAncmCB^jQ7WmK_gU3Q
zCB%;1rfoaqw1p6o5Ps|X``6{tb!Fy#pJ%Pl=e|p!Fz5h(oj)FB<-?h6jTXF!Tmnt?
zJL&Asg`XQ;QD%LB*SVI^?yLwqU#>>$+4anpa=sB$cjJ>6Mrb=rVD&#rc*FWw{B<`O
zf-hyE%c~mD5q4u&xdl0a7V=vsW}%caXd8YIC$xWWNSG!8Cl?=Xyg3uT3@#_;&^_im
zQ5U>7q{C}X%7g~{vz(>5DEcxOmM%F%z5QDH`*|oUAs)Er5bEmo!ei|jpncd0NGAf#
z)vwu!G21bDLo<8YZ9M8kYe3+@TvV%lz!X1oS?huAsCl#-+Kk+UUCGPg#ju;i`lda;
z|4hg)^dN4_D3Hu1?vsZf)7IS0rIwGxxkz)@Z(YoCRtjsH7>+K3ui&Se5U6w5%D|(D
z|8Zmkx={D4bB+;A>`1}X8_p=_Z<xEv7}_g~&~9@$n%kY@d;cL0>#DQtEv*C=?si3&
zIhCNFevf-*_re{DgV^9Ok#CC}jFSJZD)omcpkTfVVv1v-?NUE5U1|fCdMUhHXE8Y%
z62U(;6)YdEU=~@Y;n)01c-S%nqRsl@?VGeyou3Q8p$t>c(y&_n37_9Y+@We+VnanU
zhx%=J$HpBUr|!iH^GZmXy%YNtJEB2pca(ix#lIh?Y{Ra1ysK*peD(-Ix&M4-wt>zx
zZ`VRf;X%}|)`0O_XwG%sL74rp7~8T{P}sZ(m%IxB_X9gXHNHt{++F|$8CCG;UL1x!
zQ^IS<6pRgsq>PRy8?sJ}zWa&y-HS1&afQtOTRLc5@B}lRudGpa5G)s7;Y;@QKrjkm
z&+h;>Bn`x5(JCk#O`oBk)QO564E40*zik`@6V!%a^TKwq{bOPzeAoo`bIsUnNFtAz
zC*Q3)iOt`qu&sL*VEy)q;NKXCEu8^4skbM_n9DJ&=Rp2IJrklv>Qn!!KT<9e+aA=g
z+=lbyoV9?Kp2Um3@rl`3QwF3x4S)3&p#EnHR1c?~G~Q*|LswCswTfK3j$HQZEHp0|
zE{>VHmN$;Q3aT?@!n={Haca^IeA)j5hOs&>TdN_CqTcd{L#I%4Oe7Yrxx}5w>(jU*
zhTgB0tavjq<*V0oO}!YHed|2Phh7)U>g>h(YXkT;ogftdosJqtiG2F;F(|6iSEv?V
z0pIf>_@eAMD=(L$S>#2rj$JCWCQOC*Kk~sZBowu1A5}Q&wm941HJ4eR0JD+0Onb&F
zaaCh6{yI|$j~k-#*u4!X>-Lho;=7@@Q#zWCTMzB~5b}&_F^k;MfkD-vy>>HdzU_ta
z)N5}wjf2}IGPu@%0)`Fyz&`yE1L-?!P|^1iW~9)&{h*XP52%5g7ZoT?)&$)Thp=sN
zuGlQ&AOxtC*eA{byKo?woqodGZHONu^8kl|Whk1nhS^-~M$EuG?qWl~3kMCRGdY3!
zh)W?jT^|Z}Gz#`+VOWk!vCvHp4JU^}U^F=dW?Tl7kqe<I^EZp>RVN&Ny8(|>g+NSr
zlJMvjam&r^v89^h=+aya_fVmD(-rE|xUix5F*tH9F<}DoI0kRRs0Z2P>zR$?R}$OA
zXC{=KnG5!Q?p)MwAxn-C!J{A5Xt6W~4D|xU2G6sYS;ujq^O`;<Oh0<{C`IkNi9(1-
zz!rz4Of){#%5eDv;iz~4s`jaI!;E#@SeyF!ri0-|MFdW+n}*(JPvbz-RA?zUh8p_^
zf%;nVGVZ&8b3Z3xu4n+7UD3s+16x?cg5%iv)QpAgf5TG8(a-tvl?8U3g6e(h;QM0+
z*RBe}etRxqLO~h&>W#x0eWOr8JXGoT%i?RdiSzBnl-5tHShr({#yQ0ppt%;qvqs}9
z=i%gEA}CYtS=8T?&+ldnfYK~J+9ei8sLDWVc`cW0-Ndy1(Sv*BPK_x+ct%}{_P+;#
zNm~T&?WCD@ex2p6FE$v{ryn%h&x9e9bFpquDVUW#7uRL&;I|_Ruu3FHOQ8Jpq_u*|
z(FQz-F*vDY2i6sh1{{<QJ)0%e+gQc~aSvQS!Wn&coCa8H4_n_Z#Z%vogHw42*SUTM
zRK?{=nPCa@e6J0a`=}RY)6UL4J`4BS&Vlu1FL=0dC`v+V08F(YCcRi(7{7?U8*>qT
zPFSGylO`+B^~B_J3(3(GD~vu`g!=llyWC{RHT?f%vdnved_#Yz-f)^r>?Vj;ojynY
z%O*blrZ0MawSlg%SiUk~19`EIah(U55EIcP)~cs>UCA{T5x*4;Bjbp}sQ~*dYmlzZ
z1PMuWZ9fG;#m{JTwW@;Z3eIDSEg_6@rTK3SV8t6p?E9byyE(dIi{?qNpjn$qTn3tC
zrLolZ6L6z%DP)_kWf6nUq2uM{P&QGChKClx_QgpMQ#g-H8WJJ>Oa|t{MY?awpjF%*
z-_^OHsM*WP=^imuJSvr5!wg}YM*&J!A;{aRAn#W->YFZrhS*-jD=*}QkuKnOFdfVS
ztHJs8DYVQm0sHh?Vb1A|s5qYp_U@tL+uMkZ=o`texZ2>}+p%C6JC8MJSwVxz3?6&4
z53cCH8nHVA>kFC8_(>kf$tU}%+!<pB<P)#hf*E*Kf;4Zb*x6+!IXbmrZSH;y`!o+$
zjGs&S=}^>8+Xl@hJy@NY9!nTL110@Lg5=j3V)>H$WS28oPVd3a4+(%7(lcltQU+mH
z5}B=6Dm!;597I_^xnn{F{MhD;9|lat<aK_aN*yDFg^b~52TlnZx3;oY>mjiIxh}T9
z{>rnHqnM&`0Wm!)A;EhqxUI{@`{tRL_VWbJTQdm@C(MMU8}rcV!)0`pC&Gl|a*QdB
z6l;t2!EOCGRPRax7p+*PSZoQaZBlXjo=}k4H7Zx;j6pLUnmy=g@_#KT8{cgQ+i;M>
z&Z@bX>=3~<+rz0xH~@?$_r~~pTfu_3b6RslcvE#QY}Y-3hR=F%Re-rT0~SF1I}On7
zlY-6DIV%k6$6Mbn0BO-uai`j0CW2S2t!5@i${K37&*!qTCaV|gy+F%t5X6hlVdus@
zxXTdXZe$v~@(Vz(1!qx~x`C_K{v%%0GX<sZ-mzV)_oAp~zo0*@id>WB;>PnO;Cb#4
zc05W$dpm7*Y&gvp{<tByEO^5HJ9-6!mxxeO+XqsJnP$>@7XG!Jj;@<mqlw{ccI@~m
z)asQClE>4b=yEX4O?`N5SprzU+XmmR>tpj=SD|u+2Rfd8$eWUWvX*giyh(9@ds1@n
zYPB3CbC!eXW}47+NDap-m*V_^Cs6xVU&vUi0UnVDa8N-Q$}=vA2R|E+gEjvk7o9u$
z@0t%zm2>gbv;+*JGo-CfE$F=~#e?e#&^~_!dppkweXB>ZobWv;n)y&c1zc#lugAyN
zhhp<fN8-w^5RxXEpxGV;8crF+MY+qY?rsmnR;44j4VVPIZzzfBo(_^p50s82sl4sg
zOYSy25<0IrK*huoR9{hshCSTCkVi2uF=Y^xnymHvFud_r3rF>1DBTw=Og<?CBbf^r
zd!#}{PdZ<X@yE^RiQ#j}0q-3SChZ-}P<=0-bNmX9N^-&WkP!B5$p!3F9fOPSWzc?W
zA4o>EEAP7|pvz2hU@ZjDwus~-CWnLN3wn+YE&-A1tk{<_2Zg_XGC$Mlh+7=NPjfFm
zdzMPM<Q42!UJZKnnho;)Ht>ES%}+m#0Pk5Zn8OSD{ylbrIUlp}{qK|bo^qF>wNX|H
zK4)>}uz57oal{#i&tsi(At>g>qucXyEZJ=~xLivFc<l@;0*cX2#6eW~KzK-9)_Uik
z+-Yh7^YS<)*1p;w^2@tp^Vu*adNU2Q-Ytg)cqy3J`0}A{Rj~h|7uvU|Gn4K9T(Nu<
z&$Hc&ZTba*bfzh^S4V&yeP&x5kZEbHM^&2(1a3}%mVfNP_e~><C>@6_tx@>G<}cQ?
z(}I_{RpGlY&gj%z2hLxhUh0g4te7%Yk|7b|Cr7fe{nbE7z8C<r9=o9I<_xq<F=K&w
z$H38S56E67!m3!x3X~X%C6`n9pQ&zGdnyU{xTa&%x4j@`-NkutCgAKV>e%#VI-HM0
zTsD3hu2v4i7ZWI-xg#0%e-C5LdZU?k)_91a3`WU`MR21z3G*+}^MU5+^u^;YPy|}@
z3#ofm&m=Z8gr;`7zZmG@a<e>??%~`}o(vtG$si032g8@!g;mRvF#gp87GD#{|FWKj
zGpM^HpBGEB(-++1b`a*DzQOto3dZFASHS5=1{1nb2CgOsz4Dae;GeeO7W<mRz(H_+
zY&A&JZVP2SozXUx&WKYznU~s0Xs^+P;@gYyIHY64!f$;1b~-O)HghAFvE+Ri29I~o
zL4R2|%EAo<Hy>AM);S?=?&92k<ye4o-cUFdL8e{Ea430%_+*GMry2Avy3<Z6#xu|M
zV4W{{zf%>k%6ut`JCt-c>Vp*~HQ*K+1zv40n9j5SroK6edP2j+jk%eqI`}VJeefb>
zmxv)C4d9OboS`oG6^rU0hVivd;2Bo|@^Q<>osC{FNUDR!FReq{Kht?g%y7K%EeicQ
z$@3x4g4nzx=w<nmY54|%dwdknrp#N@lmTczLWV*0)#z?yfTBxtz-@~f6#nc3K@0Lw
zlyX8WyAy6@(l&u%SQ&KP><Lk`mCVa`1|+jgG|-IXg`rRR8!7GX&U~|SJ2Dg`WAE|0
zHwVa1E3lyn#6r0}iK`B;7F6$FGuha;Vv+k=amDvo*k^GPovbPQFx`*;Iz1Ro-pjCg
zX`t9(;(y$I;|AvBpa=8MO~bH3#aMpC3-PN5IDf5yF8?wxUzg9_MSFSSwiT>uTp>Jd
zB1V5ycg%l7d-j1v;JeKmx<4Uy;h7L7EuPB_rs_gNND;Gl|IFj}t78d9!fiJd+F$4c
zby<sen`r^d{WAeVbki|uPb%tc(}b9P<P#}f3<;||c-XqBTw_orYg`w^v(qEM<#-e(
z(|J?+M+w_{JD;+sBiWLd$;44f1ot_od6P#2lNldkVSA$44}E_$Ft7&qqZ6?G+i4U{
z{+DHQ;==_@XCnW>Y%t3wu3c|D?W%{dP>x<>2gB$2XV9i!C3&jn@!y-*;ib@2bep&f
z0uT4cLDE_Tz3<#pItFA}L#({`R2*7Iofvv<YBz@qMn%P}t9}4%sI3C+JbnCOegI45
zN8#_fP1w}Cfjw%Pf^+&E!G`rYApepDmXG(bFmEGfx;+OA-=uKai)TvFjR5N79uRAB
z;-}?Mek9ZwZir%WSgbax5npAsuN+NQB=O|Zdgj}81v{&Z*)fe8%xoA!f4&c<*E-?W
zfNp62tT&4fh)0`i1rReX3Qq?L*tVezUfwu~X6O5Z^}=$nw3G2)b%=_Av_m-XmN6cN
z{?ZIIt53m}Qg7an6u@^hP*yZjABO(2#k!4$dCcY{ZoXtP#P3~%b&rT~H+3&;?VCc*
zS5uh624LI7E8-<RBEi~}_?bNgmi<5*u!+1Ui_Osu{o#C^H#*WD+b(Db`ut^tDF$QF
z@JDa4?cY7%$Z!?NZ{)!#W8&O&ivsVbm$_spW#^hNS*g?f!1vQ=dRMw}9qVy`(GC3j
zUKwb0?FYlKk3xfqAs9|p3Zfob;_ti2Ir@mWMY}gbf!{gGdauNHvFT`iY8)OXu2P-<
zR?v2^$Mto&cufByde1K9+2c-gyiv->#d@RZ<S*PWh<r;0Uzq8|NYt7*6Ae8s5DP*B
zRPnFbYLjr#&U(T^_3hDo-e%eb_Jy+F>A0G*6C%&2R&IT$KQ{3UAAK<meC=JhUO+6^
z>JEiCb_#djn1GrV8DPD8HkvG24{qa1VUTMOnx$V8)OFO*b6N?ss?#1QHwz{lqr3AW
zCD`~Yc)Z_oz~y=TVA?M1jPd0wWCF&&SMZi}N0#?x8QxuU8j|K5$G^0*5Yqm~!kYrk
zs}Bn?8jEoH5*50CcIDYi9x@Z3@vK275sOw`Af9<8^Oop>?E4t59~XoV`#oc3y1lsQ
zLb9^k7bi5*vO(EGO{<>9DjZr(yzcSoyl!D5&u$=(P|rx#H0lbSuR`E{l?@j3N(F`N
z3pdF<#s8z3NSo<bp)i*ion0SU;ei1xX4XttqR|%%MQ;V~FZq-`r<w9!!C>v~2CF|H
zNIQtf6BLiq;#{%8n|xF}8NvG>orP%wV(|9UzBqo-ah(5;8EOwB$IA!_GyM0o(5YSq
zPhzXE-I5#~63XqG-r^5l&qwKcUm^S38SLC}4ty->E1nRAo_&tNkQ<k<-I_Yg`&+qp
zS{8Q-j38gS7e-}J=RKXe2Q{O4;e<+j(cupjtqCYK8VbG}4}j(O?T{=A!z3$zeDnPP
zI=S}3Y;SV0{d&n~W`tn(2RTrGFCGogrhzJK6Fg1~#s=f(VpV8~mF*Z$Fuzs~cb^}@
z38PX#<9ZsnPwrxFGkZa5at65fO=b<(S3t{{K(;7pEmo;%q1oRX!LVEhUr=T{KCdUX
zbtXVy4<&5BTZ3(f-il-LXb*Ay47RPH^JJqf&fbuT+TC-(=zkpXj<?LvBa-X>v_pdr
z8W68{8rq+mg8zWQF!4J1D*jo3?Z4N;s!ApKOqQdY^eTEKRdM;JI<ek_GF*6wx`4_c
zmK1aWMG6gw_=~bRPDhnf*ZScm3Bx08R9|@osG{D4^js^eHuuJCqj7N6zzYSnC``&t
zhvLX;EMFRnhH1HM*C6u0P5Q{&jiLZ|rsBS?3mCj^D(teZgsAaWU|^I-Y$`Y9M>->m
z-t1GFM{a}q%;|uKlkkQp0z~zO(0oT-?6p{+^WJ$BEePbf>bmF{LY=T-<8WK<CCvVM
z6zw-m;z`wosOp|1?py1K(xss=a5OPwa!WCI{1Lc9y#<G#iQu${!}&%njF0=9KZ}cj
zoNH6?MTjd5)kpx7aLN@-%OnQkPj1rX#aiVJ+_3E!&j{ZK(|(?S>hCISYd!%>7xpJ^
zR&O-@o8|~<HwAYeTPAr{B0LyDdDAU-1zBw~yLZqVg=`t>)UJRow|5*I3Yq9%BhaZ%
zgwx(jP;0UwxERr1pemLazHv<V&=GubVlCQ>?0CHHatKi`#mRl?ofW^8+pm4aMZyL_
zrn8G&k)wcS0uX<%2n`o~6AUMWisco7V)HlSK>CJe3hM5VU7HL}Gp<54Gll;eDVN_G
z3_7!)a4+J=+Rs@A_uk1-(@Q`f<81U_v=pr8p8$jKUa;)12s}o!kf@v!;5jspSsI<^
zW<9lG+VmoHdmD+>?ccbu?n>;_D-2<Z1nM1nz&-0MkanIIFY!4EhBM{}`JPHt-$VJ}
zpR>UIj~a-{0H*i8A6nBH$@!mHFe&@ad;JK&+&?ctpy5Sy|Bq$^?-j6}a+PL<a=~ff
zF!)EZ77c!$VKS4?R+i>R=<Xf^uKjC?6R2SRFVkV|cm>WVJBVRJx5DYIv#>5dgw_49
z#5nf}XtX&8mWQ83QO-a?f3^;@Jr~Q4-loj(&_RNwWf(kK;E%~C%R#$xB0v0OGdir0
z;%3)P7(0S^iBGCQG;%T9J|z)k3wwjfksMq-GZU@mbNouVhPn#}z<nff=59{My1!Cc
z`yM6CJ3$+-kp|?f9EsjtSD682CUY&4Kpy8Vw)}^Bov~}c%?P=D_xX@8vW+!HS3%T#
zJ7{vE)mHfgEDxp}!<G!}-y<D2u(QNq5TnuP(;z#N3)MprQuJ!D$k`0rtcF5qb~zZ{
z?ZXqEpXE#ZlTbRkoM$KaW5eL15WM3Uv!9tLwseT*--5HS<CzSr%Okku{sXYYHX9!H
zKfy04oUyt8I6>u{1cu{_gdbh8n9z3_oH#KLV`e`ThpJQ7^OFT;omz&fLoUowP=oyE
zqseU$!`vN1U_vtS0REZ87Is~Q5sTx&>vB4{+)w7$E+?Skvj@|f5y#)V#zM*Xa`;sc
ziW5eY!^AfZWm)&EJc5>?we3uNF<?L1PFw@O-ev-<xeDsN!oZ*MqoVUg3h7OI9`96w
z4PpJko!m(lbca?m-3iYM6R|FmGIV<wXo>y7;Zz2cuB-vU;TYKTh=Zn@6D<F45w?sx
zi7q=gqUrpB(CK@Ed+$%dl1(qUq0MuCZioQ#hy7sMyDX43@3k`9BEzqHHsh3Y5jdkj
zpZY@{Fw1=@dS83WoU975t~m#6UI#*9eKoIrpAGlE5Q900yzjCE?yjo~zSr&9-N7e8
z6!*XLMZBP&dI?MZ9txuNo$U17%eZ=f1j-f{T3zfPM+|&Rv>H^2z9+V$TI*u0e|rUG
zTOy!woE45x%Z1KCwp?qtjJb#I2G75YVei7r_~Atrmb@zACIb?9NgeH_XZb5bzY<5!
zuaNntQ-|8U7jJsjo$2;lh57T=<ATTWC~N)1+BaoF#1aLntq)*f>nW?aCl&s_y9low
zT8WpmE>fm2jJvDT?%LrRH=LQx?T6@+ZruStU8e8xe>u>2(FKN9XTh!?6(GE%Zby_F
zcYlA0Cmh*=YRk5RLt`ecjw1f7n?xxp)gboyC*I+B2~4+L=61Am+NpLCwP>d%yHG1u
zS!c59Db)Aft3t)meQaS3<&IS9g3W{<3=V}tu2KcTOAq5-pBy-USq75(9)i~w5r5_v
z3xn%7pxjYMynRg))c=o9ww@T(Bixx~$vwVN=Md&kBv;MNv!Jz`APMCqe9HhQJco%8
z95@hucxYp@HNc!Vl+`lK<G&`ygZ-nq#I^}y-6(IMm|O;49wyA@b6+qNWz+dW01G58
z83lD;nACBjRmRZt(46T@)iK+_i%a@ev&L~T@Niuk+&xHpviITS&%D6GqI*K{VGW4s
zp~s_JJW#urC->qrz;k_n_?NOlhIDVUjP7Lb+WMhUYAAMZUX1QV!&z95-^||Iku`tb
z%5XQp={u?DV{rx_22aHP1v9bbQYUx+wt-om>&6SGwy^50ula`~w4>HDW7T8R;rE&}
zH0e$Zha1PZyV*Wgy(0~y_UV$lXdLB}p0cE4!KmZ&h)*1F7zd7}9q?%4(9SCc>6pII
zW}S!f)oSqSh$(7(yUhlUjsQ=isnmb&i_Mdd3k~ibC|MfJUj+An=D*L1<yW$?A!{+Y
zwdOF}o#aa^Q=;a}v*6OLn3w?h*tGB)Q+>*UUK=S()wv4fyY-l?qo;WJ#sbuPPAnHM
z;#)7Q6`R?ghK8_qUbxW*%(q;`&YoJtg1n4#RRTtv(vE7PJ@laGj`Zqzu=pN`^6FvS
zP$2`hoW8?NhoGg;el$^A#7ne_QQMNvPe+RBtR%(e&fmO#WdUo^C}-Ms6)-Cz8l572
zp*Fq%wYG+0-O#(l6za!K@@3eM&epQ1Vcg}ybWpbo!snVrIB-Z7%<?^ljV7^7wEkbQ
zb2Pb1m!0EwKf9x`J<Vxa{Xu?(_KTC#!Bp`l^SZQ|XD@SSg~47tnV5+ByLN!Ps2oKu
z`Bs*<ebFWF3zx}nvSTX{MczFvHG*uRc|~vXPXTd(={+QRAog1JhS~och}9)UTyiO&
zds;66@v3QPt*rq4D*D~49y1%&3T6~kfxe4USYhRAF1@=S9S_>Vsks~IoYWir`<H{w
zq+K{WXbWC<Bj0Og2rnre4b>+tp);WysNFw788izn(cL3Vov{{Ga9-K*iF)1c*-WRT
zin@;4Lcb$tz~Y!2I@1oqDR(<QU9ZAv4^Lv%%}TJBQI~4v5{&XJf+?>mP-ePa5T!0w
zm>uchm>7U9iPqFtSq*(x3?MH@3hS2_g$*CPKsxPD-cmG{cZ6KREnXMs+3yJrp>%aQ
zxdJ<_D_Pw0-RNi#1i|~25dYvNYx3)XBmODF9~;Q=YP|pqN@$*JEfYsPr5=^3H>mXd
z1h3T}nc*p4rf0qe>rIPbLa%i2OC_F{<6NfGRf-73T+7G~6M7i3t0y*NU4%WW{TV=e
z?JvBfaSjCEyun9}-hg{Yg~0HrXv{Y`4DtbHJiao76&{KMyZ5J|(0Vc$nO=k=Q?5e7
z;e6iYKs*+MM&9{8lE2?M924G`GwG*WLQyz*Tn5|`3OY8Ei`zt5-+UCpuZ_ojj|(v0
zYYCXQ%Wz@9aqQ8o#G^lIP;~MoH}|**_6<rQwXPUn>>C5}-%j9mM+JsE$AapsSg`Lu
ziWwUD@aDm(lwqftGxa_@OZAz5mIjV;%tjrZe|TG>9y6cl0TTOMW@xCx=efs$@4T5{
z@(4lDqW;_!iTGGY9~>&(jm96sA$7|I=(lwxv976?wwN5>2Dt#_1i9<31l57lu;@4a
zpU%}(O7F%9PDd9oRry>P9-~50?~lUWbBSnPaTua*F9Yufhp~EaU!dIte7$CiqWq!C
zrbb8T{3!z0na=oLrvmfGHZouTRoqQMOv|g}6bTLlU(+YNn!V<;(|hB?larwTh*@a*
zlQK(}f?)EDXt3^eofYaVg|^rsEQ9!X?RT&7#R*j?P4$L4kujJ5T_Uy}DhHM6EJ3s_
zL;N^X9lyL<i-z^n;rJzD&SZ2EH=DdWN7jSJCd!($xKPh|3TmE>M8{JNOmWVKH7xF8
zwvTqBUe#3y->?ovKUOG{$6f|+`2&_PjTj&|cA%T>43HGxXO7paxX7J$b`2fk3WFSQ
zzwnYNGIF4K<s_CnI0~+$1)^eM8qY}e17X}rXb`!R+c<@7H7^5`ju0lXGlarPwZblC
zKB}BL*<`zHV&?*iM7=H>bx*^<Y5`_G-HT0cDWmF>2Tf}yGS#k&V9;8Q@{d=9*S}7n
zXzMev|M@Jiztbet<tad?btpKNhBJ|H7WQ~nqmcpiVWWCrx@ajX7JXx_#NgHWI2Zo&
z&&7<KqhPo+RtRe`0=da9Xtw%8>{?w84&CLTm0g0qkAARMWB#C7PLSCA+#gWV+ln%g
z%izI<V)D~Vc%7pW6CJ)lgM3ej&<{n?uO|w_tIGsY`bucyeqeaxrl5Lp-72QTguOUI
zce*#;xW6L@z1ewm=y4Tv%xDMS{I6K_%Tb{=-V5$TO~8kB=`1EJ3Bv|bFX`}J+$o|g
zS8@~6etbdPJZdm&dF#f1B#1HY+6wGx=!i!u-7vp!C8%6z76&@8Bbxe2$0!dNJ%e8i
zIEBF)G7K|b#}$W4h%-DI8}_dO`+^RkedQGRno@!mSIfcnp@L`Ach-L!v5tgTCSQ`t
zV(Bv>{H*{tkNMy%PQYr{9`GvF8#`NE_{vel`ph0A*lkIHC_#bQ?<cVQnN#2fWkwAn
zx>%HQJ`~=G<CZ(#F~_P0JVBJtFH@%{dCg<?aPn-BEKdgaX>*zP*wf%&?E*GyCZg7}
z)0k%&jlQm#Fyc%m`TtHa#W-?#B=mqxGbfCkau&B|<wI+*5zhKiiEBa~vHtrLUVq~b
z8yPCcvlX=as(Z(b&&LvTsuXWzDzW*@Gv>I3SW}6~Fz$;5>i-qSY-Fn;`SqXNYsxoq
z=@b|A>q-MB>r^(aNdZYV3NUb*1_^2ztX(3b^Ku1RRp(;D`16pxaxpY%o?$I_k?s0V
z2{DK31^>6MU_WLwWH*iD@Q~r_2`6!veG2}npjqDIbIfF$E*!0iLesZ8uy|xGc1^zm
z?o$|l@?ap^zC8n7(~t5hy98Kp_cE&eP4ipm5Z4;4!RoSb@<=X+{Mr+ke}6W-2rlQt
zh@&gFoi1p()^opH#Ls%W(RLkWDsp_mrfvf?Y#Gn)83qBGk*B6Li}uTAU_|#H(O9LT
zb>DZs=06Wy_d!615vQSJu@Vee8IM`84O+^_;+eRaxX_>&7k+WUifQ>M`<eCszK!?`
zvAk{CNb2GJ0dB)Pc<j$4u)ZA+3Qcd8cgO`Zl$J2z=61@0Z9|nafT}v0J)^mJm~;X8
zCGK#W_d8I3$v$Sj^9IkooCpP*Gr?&7N$60JKVZq<Vm*Ui*j~IAL}mTBc60>%SAtk*
z^FZ8mf#bv5dLY|=QEZoe5@k<Ilsd729EaZGE&5|1XY4`z_D~<Sl*LR{=pl597QyN=
za^Cco!otjxke6eQ_9j=w+B2P);rgM%PDL>~-hImx&V=x12c#(5yPE5Cp2MB-A?Uq-
z2H5{Ko5{<FjT?9p>+4VOhOlfnSZ0kH!%uMS0WrK~OBNR{XTp|5e|&a16IVPVzSZf=
z&~@I6cxL6;`O}C&{8={E9?@Y;DMZC7nAW=90PUwiJ7W#D-{?=dX&ZX}Ayex&7^HVC
z#HQK1VTn&QWNf4C|LS&DSChfRdMt;;TQQV*oCr?uJ;7-;!)86Y4>o6G)YHqLvY3Gx
zq6y%WwhVQ;U4i<tG}a_O1O6HrkX>&M%`=)=<3Ve3s5*k{!Gl<@69k_;EYbDfOw9gh
z0nx{D&~VpavBl_O80fFUu!vOFbjt}|WbH=ZxRX3)XA(-{y#=j?7n~wXT=A+89C%WQ
z^BRk=UD3kbzLqgB$2PW<@==}NFF^d)eejg#Md{>V5^Wm7w>qW5c7p_PoH88oH1T7*
zZxxI7gH@~SASx;}S)EY`c6A*F-{|AmaY{)U?hGdBb6zO?v>eQ9%GkJRu{iw89F%?D
zYn8An93=S%K#`zJ9-7^7<oF2?DKo6(S9-x`yD=#HG)Ad&%>q^>(s}RXL{`@{41>GI
zg5^~!-u!)t*yZ<qX5yR4&F{E@WQ&zpzqT7|@3Rox-;IZnlO|*H{#jxtyV;bNd@Q)^
z5|}J1gPW#DgLZx&@Jt$v+1>rXEBKqZxo$m#O(Bk{y&s(VSOX(2Uq-X1`#>gM4_>D}
z@M$51pt&^^8dojGLl>O!`+S<Qbf<ISt&vz0c!*fZ-C=P`3fg}z!wF+2;xT>#!px^a
zUmzw%>qu~ad4!j|BHm7rF)X@MMV=%pZ2Ekhj}FLzjHC6;J>xLI{_C_q^9GHAO{{Z<
zoc!l>CmVGc6NItc;As}~2|NuGI|DKM#XHu#_lQ_gm;*6w61FLOBl)LZ@a(^rGL!Z$
z_T;~D*i0Sq>g54YSo)PUJlO`)z4L_8l2mZocb!juUrm{<RCIc#;O@WdF#m=O-erfN
zVtfFsdQ81OEgPXlcN+`#3}W;1XJY=7GJfPZ&63Q<u_rUuV0Ne)WH^N}S;SDbciuTj
z*g%=6>2V-h@P#)Y_=`7Q=>xKe5G%{A+xdFc4vh4R#8wA6-=&!j+FPx7#;GFK*k*yH
zw{$Qf;3^cFIP<&jqaoGuB-~a~XD$3b*Vs9lzZnq%71!xm<f-5;ZNy!Cbzd2AS_uV_
zeR1)-ov0Za4ywaP1!+l>*nZz0sIrR$$(-xLD{>pWp=Xf&fi+;*dpzqd%Y=Ut5i|6Q
zAm)-aINDMUsdhCKj?55`zz}#gN{$*UO+deJEPwc(_)OPw$VXVmI;YHoZ(cTde&2C$
zbFPH2qi4upu!tv=jesq!BWS0+g~h(sM4boGEcv92HKrOs_TzZ!*2*B?tr86*KrHL~
zggn#kV0gV)To$g2&8OYPqKc1}(#H<s(MO`OsYn6=5doMVISlI`t)R0by|<1jnTnmm
zgtINI+3l?Ov!*WE&zZwTPrK2(P{xz370_%`E=aPg#hv%<$@$o7nLF+b%6j;~5zBJw
zQAx2I8-({xk%RrnRM4<nf+bQJ{uir+Y3s7kea`=SI_to9++=3o&zs*M){fe0SMc5W
zk+=LCg@%D+;C*Exd|$N>C6nnaZs5sdG9y`f&;*<uX@Kq8u3#`}7I;m%BhDx)=FL-P
zLHqf1p1kh>bUwNWsy~zQ^_Fb3KQtE1ESy>QPjc`ju3lr_ZC+qdjtwK?S=hvOcISQ;
zs&;B%%@b!dEI!EJ|13w1`?>7RD)MJq{wr>-A?~&MKJL+PBAOqqgeDt-OEm51-8v3z
z$fqr9xx^X-@=``Pg7-!}=(*ksZx_U(^EGwa;rgIFUCc9PIfH0(CDcc&k!OdzA-{<4
zca}0|M|Oh#5+|lVYAtKt;x9HZ(T3v8vDk}C(881QF#E?bO+gOX>rOJKKbL@_H4w~l
zX%4et9`BYLfEvPEHtypIH2NzWT_%KJi<cKG3SEzJ?Q~8aqF~SM^-y)Hp2@Bs;{9h?
zV65$Aa#)@fo29G(on?>N3oi~jr5ePo)xk5yhp@@ppLT`Y(fpGge9FrJq3IZu=!W3_
z@mKMMojUv+Vui^QqL_RtFx!;j&~$kx1ggcssRv1LC+#YB&5OnM#!AqcNVDv<Dcq(?
z4J37rV)ve(SoNkE^qpR7C7QEaX**iMrA2a-Gs;@AQ0x(?hcV>`>1TC^wjtxec(xp!
zjnmL!S8w8?^khR~7okqZdmcud$!u1L{nUdYx+((wb)2AXp$W*AgHRY7$>dKDve&dn
zxa~xDU11lPr4D3;s|N!87C>|`-)fFhj^;kG#Ff3qjFbVe;YoMm369{mixIPvYI(vw
zEn=Hf&%DwG<cTI&_{D)2R-NZM>n6b;#K8J7G!$L(e{<7VOVIYX$+joQL6@OFWQ;Jx
zr!8mjR9!5}=*}6~>k6th5}Vp06m$;K&YkX#n$HSQ<L)PxyO{i;t?H0%pMe?Fn=$l>
z1E)VnaMc1+Xnl5r*hHzQ|8Nf56icq81;@A(1aUUG0^hGcLOkgqT>E-AR{!lP>$kTO
zOyX%G{Z2qnV>j5UU5l$7g0PkLsiU7<f|!LHnO#^SG>bQgV^&@hYV)aYk(6T9^=>&{
zeq@C4H{)6Jvj{=roB@qXGNAXQa%`MA3q*eogXM9qxV{%LNvY%O@;wK17W~6%iKo$b
z$wib;T+8)qE<>38ZT{apU&`EW<mQ^ypx5L=9IQAoR8)a}|EVayD~9)BIg}v_<J*-%
zSoi4yc#%W2$%8npz0)XfF2c#hWyE2rVm*4tqN$OBB~W)N`<ylCj<ZA?6E!C5wNmIP
znTBSsXF_3{E%TcFin%T>L@kFvrg}OF+ME5M>fK4Gj{nSSAEd!f?KBjnu47GS=fks*
zzNlR^0&Hi=aF#lqdG!;xeDD{sr`3JFFKY+-=>_BD!Bf#)e290R+K48LkFpm11Xg{y
z7EelZiN`XWHFr9IMx!gunIot({fOm-?!mTLx;J{aiAzlt5O&fZns+=FtfP-X*O|Lq
zWBg{k9W@Wn^*aS&FXk|{L;rHOH?h2V%rT*T^<=P3DaOtZ(U@II4#m;Lk}4+;_ug`p
z9UekHn*(4t(p8XsPE*R1LzHte`r)!Qbg$6fflKZ!LAUB;E>rKxJM(6+K~rjRsqYF@
z)a1dEff6)wrk&lV3N+|(0_+!f@SGGIlnfx|&Uzhje$^Ey%$dZ6_FR--Qx_Do9`kvB
z?8jhlGwi-B8H4Rt<FcyFI1aYbvp<g;2VS5|?%kOU`G4^wwg2@abm8Kd0?bOGoMCH?
z_|M{eluR2A3HOJ?kp9OJ?Wn_$?8?oIiACaA35FeZ;Mu(lR7dwgLTfM|CM`x^D<$L?
zXYo21z+~S~awol$JfSFpWxS<*;P;Qpod4Z>ZNkOUxd!4F<rxq&^b$1EUfuKlQE02k
zVf98~*fn}Kw3cSGyVuh&{?{z<FMGyCZ!(qktF&1YF%8v?UC`gC7dnZ2nY2QSWDr5v
z<a?8CI79p&AM$d_IWCM%0xb^%xb}#nYZ&=+x}I~BlGCWOzMKQ*a-C<Hpw{|}$5eYW
z|Ig`Yakdh!@7jP~b4P*6psV0AdmChre$3<S-SOO#KpbTfhBmir!NB-$Ubn%X)z9yT
z`mw~f)$U|<aT0Pr7NWz^KDb176ckGj;KPBT@N9Gdm|2#IN6RmQu7eUizpujPyy4s}
z?F(<5J_oz)&qQ;{5D=Fv#mApYLG{s7kYr^ECO0%$)6F?xV3xu>BZ4sdTo0!4q%VXW
zxW>LjQ4Y_T_HU7;On;Fj$e!Hbl7cT*5_=0l+G8u6nkK@5mrB8}=S94ldy;m2K<r+7
zdPb^&$k`hFTg>2*-Y`73_%gZ~|Kb@rH<<04OKAS_Ec3aOhmN!lZZbOpEyY*(vK{5<
zw7M5-SiTSBFAt-=@nG;Wt^w_%k#Iaw7n7gpW4-GVkmx#t`P?QpFDC?A$424nHil|-
zDyTi5jh)w&^x3?r(0(!yEW=Cro9hYaLjQK1umxJIM`CfaF$U`b8tkPVva+4KYlL#q
zl=Xsbco@^#tB%L0KU<JjhGq%TXtU=rvt#in@eW{(#|pu`Je0{7Ckjn8&$f6|0#g^t
z@!9ibIMkv9g4(jk0b~f)({@9c;VLG5)WJnR2EnJ!8MrC>Jibc2iuE)Tw46196`nsK
zF8u2cs1<d`kINk}CVnKJ*+Nm7q13+%-ewi*M}4#F3qTe=jy;}~g)ti{!9d3yhxZ+b
zD!uJuz4I|xHzEoCBL>izfA^r_t)J{guR#7Fvj|mIX5t}gK`6g50=o9a!ozFpFpPGh
zvVoi6g-ahW{9rG3$}h$p^gaJ)T^W}CpghB_t$g9i!T42UI(FWT#>HieusYTda_BvE
zb7(v|%(;MKWgZxoZe(FU61c{piQskZwRpOx3`Nr{#V0FNxM=$_EIECJ^0uvFok=S8
z-Z~kwKP&hV`$FvM?*Wh=1JVWLxg{Uh3-KEsrP>V^@mIh!whuHvUL)N0BQ8NoEshOO
z$2yx%u313;w*zNj+yiek(9Pt%TXtb#$1{GnD;$$&KW7Qu1%4y<FqY`)0p1J-8@j{4
zGSEl7ag?=Q(ST&H=S(zqsW82cn5=!)a{Ei$*rKvpY&aN!+HtEOXmu5q7G8w`yJK)@
zLKS3`4}-3G`=G^U0@!|e!t1{E<Dz3H6zxk=Kt>$vI~$0bYO4uzXl68MnF;<C?t!q;
z7j!;Rmt*r+RvS_YhqVS^)0!dhwZjBM%yKclhUQ2oQqf^-I8-Oafac~zxZcAB-S1w-
z>5nhq>|?|!oi6Z`wOMFZw;LoTGX+)3A;Hgl9i5}MK%wVvaqj+TkUoDRo;}PQt=|m+
z{{@*~squmT8kL6je;xsDegYpv#bS)=BbSV@1k=9Js4?*f^O`w@e~>Rijn%t(bHP7?
z!*ue9W_RaX|2&0Z66&l^^}~$)f3l9j7Xe?IgZ8?Q;_PlZtnv6BsQ1Wcp3Ch(Wcq>m
z)>yJ)Ne)<<5r<Z<oc(cm2v)t5LA-o7xO_dqovN>Jd;P1td37XZR+7PNZ!<64z8)`p
zqH|JR4On-M1K;X(&}8evgz_z@U3r5yw${MYorTzL@sOJYcE{Pybbq?I4phfCvZ|XG
zP&I#*(Abp>KNbx_C+SPJU!I3W1FFg6mx>Kj=P+5nb}RS9!|=sr0$T1m1ZIa{iVN<C
zVvCY<$(XBF62FnW!d;3rby29=o5-!AN2C34auqzf#N}i1nA~Y8R$q_6wsGUg4ZIi6
zQ6JOrRTaOrvJmxejRqy<qk{XMhoQ-ZsE8Q>35Nuhx*`CbPX1uleJF4J>@#cmumFNx
z1Gt+>2{S*W#K>)OjBhvxeEB3y9(Nw59VtVv>-mEF#Bbd2g%;QUFrT$-Sr2QbQ^xS`
z3^+$jAG5Xj7#!1!BAy=5`q~v-=B7ZERv0QKTp<Qn05qih%Ujm2W=psjzqt;^rp-I}
z-X59g+&2$w&XcERS}jmC9StH~LFztTF#eVW>;EAJG;RY(VZ`Y^EPgm81asp3ajOG|
zx-FE49rA`l4;>KMCo{K<$#86DB;M+uhNEygb@St(dk;CpyrArOSr*ivf5$C5?BT%@
z6__lq;BFOCa5A0?QtdPzH6av4juk@lCkYRGn-31#11Pt29ySm+p!wTU@a_JQxl&e6
z5p4w7Pj*B9TjSAYh6Rg#YD>=BQdAYiSSf^)FthCvX7(S23du#@(0etM_)P+n+EHMn
zlLk`aOUs?ynfGyX=&b$C9o`)$rerkDCS{;c8I;E2B;sp~ByM#C_!{P7X<!LD1_tvr
zuT0SP>|rRGHWswEPhuwj1~Z2xW#Bt{2b*V4cgw|wVzXse;hcX4j+~x|l9}q@Wmmxt
zzoI*4@CUKZ?nvwkj%H#|$9$IwFgfikNbkGBh;?)E?GkI$Q>4L>LSmKuz5?-)sciUy
zP3U}d2=@CEp<c+qE+l{7p9z>fIF=mGtDsXa1x@<qvIzsvLh?VWAYnQ^=ld-cYo{a%
z*NrYw$N!UXypuAT{lBusm)$X8I1QUl5es$FY)IC&pmTN<?G!>WOoMv-@5WFrYL}Hw
zM=;Hm_pt2wr}0ivDN1bDh_{~If_onQfr{fn@b^~oC=I&=?!gCnhKmZ#H%sWA`<@Fm
zhA53*0GiIRIKy%*HrziBD|A;7UpAZe%M+m?XB2Bz_2CM?B+Q<&0PP<-L;R|IZvSIE
zuh**M@*h)p^Irw<<Ng_p$mGyenMuyb6m-)yhL{lAJv{rHi@HDL3Z(^D7xg8s(FHb@
z`mVmZQRuaJJO7<S-*xhp<fuj9i0zb#UYG+DY|awvs|RoQnFTH$0(<p@crj<Yv)eN%
zhfysCjcg}2VBr~*tc_y!J87qO1;Den56+NM@2F>$xNGNK-s(OQ-fx|QhT~$mzfl?_
z+h2tI8Q*xvo&szJSFl}k6lE-u8_t*U#`S)fzvLo0(NCiz@gS|sV^P1m1In_uD7CX<
znWXu<cte~Dz5bX0W@ie~yZt>MtxIRei&sF6cZZ5+5zrF5mfe4zhi3=PMcc_|;n{Ts
zx+&Mdw{91yOM4Iu$Ms=HpAoNY5X}v|8;EatiJ$R^!G2exVS<SaJnd@PM9Wdcx0Iop
z+z1L2cJj{cbav@lXXTWh2uIsp@!{PaQ1WO9Na@~E?M*YBuLb0asAKkK#jNvY4j#!|
zfijIcW;;OwyKl`zuT*lzs}5jPw*nT{eIauzN?{_OhvNFz>mg;h2O5NWL3J|mTPH4n
zjQP*mi8p_s+0<E3xOyi_zkC*+&Q_p5@e}G-R<Q5KqCjBfm|b<0Jb%qhe_akteh>#b
zJJ*9rZ*^8uQOEx~Q3M7C4q!gJh(Df}g!=2F0q!Q?jXy5Koa5=}QW1!H^G?Dmmnt0j
z(g{WEj+N~qDvV64gopFO!0W?j@iF^iFnJZl3=RQQzl>rH6_16x(i-$NoyvR{96;lT
zo5)j>iyP96Fh4vP%pUGwD|#j3v5a7pWRxk*>Q!8Od>%1!6rlA}0>RD&@Ou9&^qrRm
zL3%sTGAIT_$M%VZsv2}06a+^r^suByH#B)Z2s(<wv3qtk=y#tAxr3BIOKHCJpOGj#
zUZT89{SJ+9POQHE7f)SR40q?MV9^I3bSrW~*=su-Jv9#7rpyQPL!n$_(HyoXhWunB
zqPU61AezZ779{JkG1=!PGrB`r_MYSq-d_!2<vCbyzL0!&u23k-0^7|o{GYd(_^4$e
zwy*Jm1Q&mnuqzb&|M-)2?oH<M3?GPU4T9O`kzg`sDfN*uc*b`LEUc(R>+B@1*_S+l
z6$_YY)hu#G5>x7)3M7^TnJ{_={=0{m4djaKjMHV&&z#X!w*(h<uZGc!W6^ko4wi&d
zXX;88Hyp_@Cusm0ed`4yPNjff*etx!BMDs{b5WX9&Gi3t2if^2R*k#!;Z>y%s`d;N
zw)drZPu>;KPG1YM%R9ODgm?U5MgX_dPXUYEK*}doLf`Er=<#q3=HJ!8p0>qwPPDU<
zZL$-n^}rm@+LL!A2b}KsvwTy^SkN3g(a#Bg2>~EP7C?4GFo+_u(J81q_nNs1y}~~T
z!9K;@a@b~86-<5eFU?}L;d`KcY%#<vAue7<8sFX$4Cea>laG)xUanu2ikwVneQ=Wb
z`Bgw^<WcZ{#~>_DpUVRrxtoeOIZ0v=UDE=KbzyieIT9A!7>u_29`hHIj9|uAMBPVz
zDEimWN@GMaoLE4+&b3i+E;)pdzFp)+s{-pDMyOH#50AN00Dkdjp*6Y>N}sif6PCo$
zynPtAe}97w_!NgC?V-YuKdsRI=^ieb|I^C7p8@{cIujqB$YcKQ6N%OGoVSLlVYb~`
zc(;poyXC2T|I1u#)40XMHf5m8bSrS5;0EqZL0mX`2;FzqU|qaAvXpStpW49Pdj~<w
zwMyQw<0ao3H3BX57ciT|EVw<!17ob_LN@J`j6V25VfkgQ{V5siHf`nUnH=3?GO_Tc
zF*H64VFee-#b-j@s81za@_Rj(5IZg*wLh9pk0Q5a0f;n*S^l?;@(lU`@bFSP-g>ke
ztM#(*Ttx(oAU>4g(!M-lO%%M!q<nq9#k@&CG~?-9m3hnx%lrUmMS{_?-`sNc1?W3q
zH-^7ngi`~Fm*V3Jiq+?czwJj(KR{!3cih#h96voLKLKn6(UfekFZ+XG@69kHU<exf
z7Qll(BFxVl4qqy0r{B8=;N?K*lJCblUI`7|2NH`h2&EeiLbc8z&>7PgU)(PSvk?{C
z`9=v0i19)Hz!qLtcZ_MTmWh9DM6C2lN12+bQWp9~DcN<xN;0bmo5vY|$<+Ypdz|hI
zv)_puSO{vT#PPWcLUB~3KaRggt}53y-ufmEj|I{k!Jw7-3`>SW(==gG$1L=8KZA{+
z4Ne)8n77+qE)a*!%xffeH%|bk9?{hQ8;L`wT!I1<^4@3bLCh3AG|In#8MZat#D58w
ztcVAj#WYhgT?Ue~I%3h^0hZF9#b6OlY%IMD?DN7JrLTUAC%njn+K*vi_FubjlJX0x
z=d{<qO>CpZD)wo^I?SR~WBb#daKC3ilnk9ANC#B2sNbA-1kf|?(pM(8H5VuA42G+{
z%g}Xv3_{xuXt?)Sd`uOD%binEPLN^)t(jQRNV7B_9p02cyrrcRm|=Vj^rCy%d&5$&
zq&aNDDSfcFiWjmyiLq%ZP<CV(<nR=1`W1yf|3<(+dWTShX8+Qj<GD2HD6<|?$ux<_
z=vAcz-}OmQSoTbK@?jPJES-fuj~E7;&cga$BOzF(0@+t*Wy9uSaQU7dTBP;HfoH?f
zM$?BoUJc_hKU}!us9^3SZRKT8rD#@tkSjILkwfb!@st8l^6j-)Wb)3^{bw<M=dVZ4
zV-<h!H3|H77l2L~V!W?8$V*p(Y3V~A>b?&j9!TW!*ZrXR_9$^TgGAh`TST6wIcT;u
zgeUnfKu6_e2-Yt^Ctq!vQ8jbz4(jEmU&Tv<s1GkkW|^7^kNvbTwT1H6!vT7pID*NE
zExb)0Al|BW6>^^DW51+8Xt|}!RpUC8`X)P=N+jWyOQ^?Hwi?=sMz9z)XO#M!L-Tv&
zT6%Q^5B<D?Mx%y8*~t=Y<NLT8-OCyV4Pv!vv`1A{gH6*?m`(Ru2TPiZb$qt6e=g>t
z1&fu9-DYCrm#_RqrxbiOMsk1Ojj-;m5Bhx8zyZ-VsQ6k1FD_i?%Pc(6j#yqI^&zwe
zO-1R>*|Z0`#qaJR#&Pd$aQhIQu}juLmvI`~k|Ut?t_UVoR}-W`H2=*_<U0P@T(s^#
zrR<gl)NVM1?j0JePRA7F>bnH_(7(h+cYWd5C2}u48cd&yAjl!^^No!(Qwf;^`E(9a
z%?(9=k7N*gs>lm(!-r&ELX&^@gZ98hkXd|{c)VHs+^LH+=hg-Yzrh+FpBMg*qH_<6
zvFqaSpa>yKiew~7LMUqXia`=W5<-$BAxTmp8J(n3Bb82SI#1~=HO*dXbP_@cp*JDk
z5auO><lEoBF4yHS)AQ`T*8RJG{5W^se~fWEvDF$C64`9mWG>~V?1%hp5$tHH#N*u6
zSTsi&?UmVHvepV@IhQfF$Oe1uHh_dVm1GvXAQtk#T74GuEK0_NePML^6*f-}*CqV7
z7s2MkRXp&4`TM&zO506Gz@5Bn_~<KBAFW!2Dbv_)df+4&4>yJ|c@CKu+lT_aOQ`g`
z0Qg_N6N`Z=;Qn07vlpe4kSi>!=O&??fGVkSR|Cr4yAkn9AoeTUN$;^0VCV6Sy6fqI
z-^yjwzvBh9ZAfLa$g$|Sx(@CZGA{p58BsD^MvIo|V))Mxz~DR_@$DSEYA8glH*GXQ
z!-36YR!H5dMo~d`6v-JK0W)%ZkXtktIwnX!^7}OHJnt;^n;lHVH>=@^U@vwa>ZO9d
zV&33cH7NhcIB@Mt=y6mC62`U1riti`|4Ps_B8P5zcpZf*Tj=qA3SZUS(c3N<^lMnh
z`PVqI+nMoh2ft9aoAKEBxQpl|UW78Acnnp_M)}oc;Js@<h~7P-5BDbGgu6M|eN~Yx
zyK)&bn)9Guw}Y-5kqGT`gTb_~mdz}FRJBM<NT=d>(lk{7yV)#QKKU1I94r7Yg({d>
zR*J)}sv#$kbwn;}qS~88;5fRGb$zPA;jwOb{ZtKe$7q7sGy<(GHi65kyJY_wF&LZ}
z!@Rm-cy&=V&Qdst!zNmy_!|$pcMe7wDWv0St0BLq8q#k~N58MhY+gQs*fjhjRi2U!
zO8-{U3wOkL?{xx}-j-p>_;woR`hx`Dip9AHm?QesJ}SGK!+KW^kd49`?60$hC*zmm
z<C#~$-+v2g8;!$no_W|?=K`J%Nl<2W23Hrbp4fyso>HO_v0gR|zDFhC*o-46znM<u
zMzPYqx;~=Jx_P=bssVS{T<Nq52ecnJNMNQ5`VGvbhxHn8V!H^RpRPbBKLZ%hyaZO}
z>hQL-5}V^4z&=S2n&z#7YkworAWaOX!jka&$Z}|_I6?ip;-J|h7e&WpaJRY~#P4?i
zvpQ0hfFY#w?KI*yW+7}Jyd0afUQykejUbwQ3fkA-prLs=Fn3fr?rN%IJH;k5?gL|<
z^l3eI*EoRZ+WAm&^&<~8$3luT^Ah#5euITmiAHk-gdI5q<*VkgjOP;C`Am<OS-Tx?
zO=8darc~*k2^r|8X$hU_i$Uk;AmkM(;}ktFtj`tG_M>7@+r)*kpfjjH=rOHZUyKeO
z*V+A_?e6w$AnHvAv3pex^>3~R@ili)i91KFk|j|1R~xTuG-9L2RkTzXkKJ0#-<J8C
z984=kb)$G_oOBi3^i}A=Rds0PPz;_kuR~`{3XOf~fSr>HsL2`TA8^s(afkazrGkk#
z=%0OzX~+fs-5a#+QwpwO`R-N)#!<P~vO8cUNg2bk-{}@4MK=?EwdRA$kSbL7Ekc*{
zFnGK_nVnBJGah0rm}Z`V@jG{7-cNlLG1qBxLky9(Po;cL952A72(9edEJ`C<>Y$JW
zysJ@gntEYno(v7zby#mQ%Xg@6C;kb{fnZe!*Fqz3W>gK@S+W_`FNEV95!~{bfSkp1
zrEB+{#97JeIQX6cieD^&j^?H4b(^`59+|-ADG0ICFGDZ4gO+c|0sZ3lRO3?=_CD>S
zf{CSOg3`M@!>d))Wz~OFtjOFjSGc6@{VjSwn{}febOZSb4q8MN08=V}(#u@bu8u_A
zC(P}1ubz~IOoHa#C$!}84pQDzhq+!2=(gqtsdtzQ%gxz)I&Kr|1Q-UFlEN_Y@>tY(
z+eHs2$<X4KGjLCQm$H98wciyAzfNYuXwG7MJ6Dgn0`AgLJBqRKZ!SuHZ^AFPO7PB=
zsVt9XVpg(kDs#$26Zb)dv{&gW?b;9n{8pAv-)_LG&W=FIwhCGxZKK6krnCNw@wlxr
z2#&wFgxuntyf*vMaO%H2wAEo*kP)h^v!a2vl`IB@Ypc*{Nwl=>%^*nl=mp+460lic
z0kaCzSbsG|b;kl!=zLE@&qsihSrh3_m`D9W*Auz=X*jXG0^wa5G^J%jSfL`yY=m^8
zatUgB%kY?QAg0{k1oc1CK+Q8A^}l~(K1k+YyZ)DQ_EoSQDRT)vy2KpFm+4`HQgq56
zLM2l#!r<Ha5bIG&4fhI2XKDb1vhxbJ<P7QzN<o9=XTaNrvA^t2?#?_V9nvc3KduUn
zUkVW#Gf=#zhjf0LOM2EW1JCRdXde2K+W#`ccEg*5KUo*8-t-f$xwq8aGa6;t-%00^
z%WS{T*m0IG9(_j$N;bn`sRAG0eq=Lm?ixD!Jo|l&yKU`IgF8o$;l;=lOnzUD8s<mf
z^~y~cCbuB9HN$Y*uA%tM>N36$%!2#j78sYLi;dg>jI|lYW)hhs%j*@<cisa|8fKu>
zR6$CVE}{k{R5>kyIJE&-xgMk%&aVjfXOh&#EF1<h+1c}ClytEwb0$oiNVCf4f%oxq
z(EZP7VyngaO}Fj>?t-geHMN8mT{#1#1!lOrzZ5TB(nhoogj&5CG?dOmd-{#&H|`{&
z17msLYuGIB<pBuxnujqP7<+JS1Dfgula@zw=*C4JsE`r}o$r$A^kZ`|W_}PjZBzo|
z3**7^ayb?ZiN^jaO~~Rjz>6<3Y~A~q*t$l7^3nS=Z|G)>(}=~$VslvVs~+3VwbEtw
zhNvaKgsbmoV&|V|p6uZm=*erMPtK}f*wxEKD`zX5yFU|iI5ntz%nS7chk^GvZ;<G_
zP^<f@balpA=-o3M<m~hMZdt|NpCVMAZwXJT>+!zJ5~g6jUzIp35Vz*C&u7M5;<jci
zwF@pr^p!z@p&BmS!+xGh5}=pOwL07O5x#9L&uSlYgHPW}|2f9|_S?K6Wzl8g={ke)
zY7?<dbb~0r>>!<d_IYqdLf0q63FV1c|869-`F$sbi=NP?7i+1C-3qXpv!BfA-G`mA
zdr5zaCcZ3VtjHxX+8qt2Q~z<nve%hl@4lbf98-ipHA_)t$z0O8T_3xibyI<+oIE~s
z8MR}FAV*D=#J^3#EPv+pu9^&@oM<x3YYkexttSs27h+Z7B6R;9L^@w9z(z$0j;Kul
zr?F3Y?X9IC3cEw%d3n%vsGGj$M?!z!R4DLK$6LLuGa>pSFLw1sawXRc-NsL&fpd;x
znQ<N#*nFbeO^n0&cbC-v;t$ICmWF+0Ay{(#D=|7yD?KyO8@FXgK)HYi|7OPF_j=Y3
zsNjqr4UDj@iTNvgieSH>0qPk_uPZl2k;8kb?u~e2w>5_rBq8bIEeFv@0lDs5hb4VN
zddTD;sww)TL|jZdmx)Q3q6p5u%SE-P!$7}nA?bFWOLZ<DL_;<sGCcH(Xe7^pybbKR
zyo@0C<!>_h`Fa#cCW9n<46)E)J!}&KNbCL6bdp~_J}_fFi%$}Ho*ROseNkhf!G8lf
zmGH=y_o>(%tqlcdk|AvEBqH}Pz=|XR7!6rO4B682p=LCS`TE$z&eC$`n(8YVL-Yr-
z5Of$1Hrx`{MO}qUy;10%kk010!Pp<pT!zL&;8s#Lo)xb_L*Ep9aJT?U?#QXla6OQZ
z8b_C{%g5EzS0R`AXSQt~iheQW%qLe5g*8`E@QDjr{8J!n67dYvM__#TNYu5CWZ#2E
z(%;4hzEd-;d||@ewZ6pbbTAnHx=VsvS+4fDR2uyLIdSsZN;z%IB%2pUVS|}IX8oEE
zF5kG|b<q>MKRAQZ{YawbbdQQ@ELc>ZCN9(N6Te48v81B}SB9{R{jZZ?SA7_)+*7Gh
z_FeiQHV8DBFIDhkFDXCa4fDCvQ8wTWHsjw)6+D(e3GWQ{zOsNpFALF-F(%`J0%16B
z6Sgd5I}J%DtX3$7slMyb^U8TJ9r2M?%&!GbBg@7d%$0DW%Q32}0HY_SW5j{=xcuxG
z=Bp2*UqYf_H7-L=({jQcyi}T!(??4ti_k7r4Wd@mpn6X}crP@8Fo6|(PNggZZy^=8
zdx1}GIsCtJui1lf%k3Y@z;EW5?0v_}-js_@<F#SzhU?gBf0N29Y-wlQL#b}0H~6Uy
zg3fD^xaH0U{Qgu7ZTSbOU*8hg!20s|U)GanPeoX}kGUC_=`hY!hpLf5C>!%d+8w->
zam<6kv~v+ew!Np-PorUUcNolHmk*p-ro_$B2*N`4Q5T~;RQ|03PR2{Iim^bGP6#p3
zm2ojj7l7M09-KZT5r+pQ1m3WGYsEQC7(Nbn%s-3LXTwqDS2i@Xmf)F*jK2$B$n*SG
zO5ZRRE8(*$v0;os-wNia(w|1)^J>`4^1=4UOj!ShGq@~>0J|&ttp9BR1{aG_^m#aM
zdUp<rZ4^P^T?kDkKS|iUd@zn-9{ySc&&2CeLHQ!+Jrx5B(;3tMpqL6Tj|4Y`MmlF$
zBWk(Vz=GqfxA7u{`hD%}8CXxeJIX+>Z5RqYSdL|xC0HC?3B6B`VfMrVbo_5Px-HTm
z+ft)|K{@zNScIZ0*)Yc=6$kd7f$rYH)M5%lq<0ljyE68Us|+WROU3ZlXbpO~M#20C
zT<jDw2F&6J{91At(LII8wNK$a<{c4~%c$T{n6%G(2H5QAAQz_rHVqii&b78E^H1Y(
zavLNVbBeh`r$Fa|cImf>>8PT*f;v1y@+m$J<o|W>>;g_Rui6G_e?S^!-n@dtegtCg
z2`$izIR_a>XQHxhG->fj<jIdO!I)=ptj8?}t>VvuJgyjxZl30G`NzO7&y5Ons>}kG
zR%1!BGX$>nLc!)ln3uxE#l?9j(5W=@W|BD1`7)LjX77<oO02zNL04J{4&*kS|2Y{{
z>hws-DpR82OVPYm1N(ocAn#@>d>IxAf{O2EooVZ+ivCRO@h||{Fhl4JRG0d<?Pi^@
zVoZMSfS4Zvh5FZUxxO>|99Z6CoHA&fT@2pd?C<lb7Ti3}Q1=PPV35)jd{tBjyLHc@
z%<($)I(h*eGu*Iuk00>gm9o5YDr_j5i_YSFj2w{+7E?|@SnEz|$rvWlj|%Gfi=FrM
zPry*ylh}D!PU@9C!Fe8q=8kC6c0f)n%$y<eVjy5uKP`DO7(8w3rJ~Vl#8~?!{f~D7
zJtJaxV)G>=utE<z7w+NpCz^uH_?@)n&0OHLClSBjY(MWdot7lM=Djysf_9^>5zmU<
zMCf~ys4(|n%A1c=QPmf}hKFEl>^-9JHW;!wVb~+%p+xqM{CT?{A24^zes=F$$G%^>
zxph!1uET@`Teg2p0eSpnDpz{WD~;u%U&U+)SX2WAtw|_z2nPGNdqK9lpOh>4LwY^y
zJYoD4r$flQ)xkxL&syZg^yS$2X9&8lW#`hQ(<Gib^Ok$*q8sySmDuaU@du^g#K|E&
z4<yX_W<eXx!axJl3FrJ?Z1^=1M+6*4FqFbJ#SLgSr2$7U9*<kHk_wjFQi;)D;;a$_
z-GvIQt8EMnbxT4#eGv49UV_dDJLb{!g}8g_7`svtJS}~o?ZbTf>=et@y0e|v$?z&S
z9Xk+Q2sR5IQxAe0<yE?$7@xcI0Dd16j+_H7V7+MpG_BZ=@^@pwz3d_D7BMFEE7;D-
zSOr{amDp@4k@R8!$o-_e`EHG%6fl%necnNh%BvxG)G%CKA%^;(Mhq@g1DV=%i2A1n
zjZC{~hGHtq-baHtU@%==m4i;VRp_maXV5!dfbuEqzBc9+X?uJf`a^c1$S(*r&J1FI
zW*Epuu(?wM0B6-wc(A$wOKfL@r{)MSF3Z8L)$BbuYcEN0W$&l)wIr-j4XjI4*?F;?
z?en)H$2!mK!HOVEkVMdMqbkT6A4lGQS%Wr@hXQ|&7C9=sfC*E0P%@;6m-T!Pl!v{e
zs%v(kyt_i$9<duu$NZocu39kkJHbeY@xWbLg!S2$Fi3X{3W}q!zt9<1<>q1D>T-<D
zTZ}&W67<ud;Irm9>Q{w>_mh{@|Hgjo%%2V3@1n8%V?0q&KMBGYhNK&&fiU4X?RZoU
zqJOHTk_j4U{gs^=&oVyL^^w%5E0v@z*^a_Tqlv3YB|EDpz?X~Tv9qXC%He*O4*Qq+
za(3ON6Srl6WnBuu;R~ofED^rHIE_s!*3y>2D$u<-0o!}Ch(5hTKkFaCoZejUd4S06
z(*mo)4sz|i3Tkamr`3B;V5^Edc&Z_J7SRZy6<5(`?Fdv1uSK<^$@EgK4Bv-lV$Ap)
z5Hb`-|3L-qybw#c>E#e2+lC3IEC(t{B)l6}Fsil+#@SYZnnViwmsg@j?h=ss{it%<
zzK<$Z@mc5kZ=&Kg0Tf?aVfYlpa?v={%`Yaxo<F=_f<WL@L{j;eYryXtkanjmB_;gL
zV1KHZomZ!lal22W0)~?Cfg<!fGl|++v)D)z`<b%1PTbEOLdP>gRD0kG?M;k9IGl+S
zKSY2$+74`6N-*|B9V(yWfU?tm`10u*@(uqbS*sIh%M7-oO+{#VXh8eiufQ2I#wlu^
zVmpt`G-Zg4oM_pFL+uNhGvF?jM2k`TwlaFUv%USYbmWhdfQX%YOjTo`to$k%>-<Br
zPcc_YRXTL*s8a8eFl;$Eg1DQ!q7RGg(73dYTKuX8qaW|+1fxoHI^W6@KbnB=N6f<d
z+F<tGISsO-$-E4fN%Iyg2Ac&A(4ssXTTfjBbq!DaqgjGa98Iuqk192;8p(Y7OGv-4
z5psNT!R=@_k^Oki`<KmTKtlr*o3Ep%&H(G2jbNul2~lfKrg}%x@YhV%_5M5;#M!x^
z%D8Q=zdG5;xMWn#0gL!X=nHKjJx^xCkB{Zpd5*aNt<OTow^F!aT!^i?0vLHp2itx;
zqkSZUNb+=`_e3znDn2A{53@6Zu#g1b9|wC*>SFifLqu@(2i-g^77F&KlehPZVB*&E
zU>~jlE*`<GE31O1zC{Ty{a1(FcdYAj2+K#kj{^O@v&k&R%GiyY1I<quBlFuGCgdN%
zzPBSF{%#8M7qZO8SZ(w>bpS-KOX!=cRfzHb(q>yb6jaxk^*?(}11ob_F1wG^A5J7?
zy)l?QgO9B{uENN%m#}_*0`agCVr<YxC>V4Rns%t6+Hkh>ox0sj{%s-nxd)?Z?Rb!k
z%_EYeSaj`i#FW7sLE!dVI(?M{CAx{ARW%EY%+5*m0;i(}>nwIOOaSYp7jOjQ#N9Th
zBHzYMTDQIyzf3&?Zp!sU_WS{9%bP$r|Af%2j~8hF;|S1cIflW4Kh)u>78-h<rJikz
z;qr^c$X|V>N-+JAG{$>7&i-+h-Rm!)jrmq6c#(}|H7PL9JsXuSD+4z$4A!qajr}R_
zX^7fB6jjfl-|t3Y_}OGod|HA2bEksg9Uq9bV|Vy}_mCB~e%P2*2~J8=q)vMI5H<Nc
zLM7{?@6ZM7SqD%QHI3IASj4z2#_W#nrTK#zAd7XDC|f6h+-C&qwPP8E+HAV`vMpLi
z^ia+?Z_^&upVFknX2P2np)q3|66{0>+6$QvvIM3%G9O#jR4CEzlD5=$lQ~~5gUx@e
z*FIguI`=5;7Ak>!cNv5&*p5nUPFcH64Z9gz+SynDZhCH{=~*+aPYP#V)BE(P%|_HH
z4hP|bVi4J|yp8i5=pio9bdq^Wcd(4^5N4LxyB7w2CxS&67mQZ8Fvl75wf3LGycgP7
z?G=K3ec`BNrH$LdnD={)5mDC^;4>o;bYGl-lAZHN&&xRK>^ux58}y*xJfHNfVC;Lm
zJ_`TQ1%6imF~08#V{V?poV|rmzx5bw_w~e8?oQYjQ3C=tGn2Orkfuq&5LqQ4x0Gs8
zT*}Ux-yhO8zY*{e^U%gsgSKxv4kCknC~kO1I`##Dg3ukZjs&v4$XL>B5)18_d8og7
z6WQ)L38(CCKygPhNuGQeTjn5b7Rm6BfhF=;=ij0cE@)qrj?PD>F$ei0Y03O;q=oyI
z^oie*&bilkDT`7eR3R9udc?SKwJN5Zbb_I$x1!yT4Y1hcIt+hm!?L9suz$mDOxUy;
z{IzP)&~_)hJ#qoO*RKV8mObI~I_TN4i`nx(9&D6WQ4w?Cb(aaSeBySzd^iq+?P@9a
zoEJ=AV~4%#*}Z+-DxBRCgkv*<QDJBZ3}%i{t6(8?<}HvO4-JH&lY`i%wh>YmXycYe
ztiSJK5rkzqklnxZQDSXR6~fe+tNsc$w`qXUfF_J)dt1Siq0};_272!pK<7+HqC87N
zYFr9%4ttIb`5nV%A-bS1yhg*40*Ii(+Dt9P9&T+C!Y#`%+(dn`w?++(PKbG4;eM#f
z6`)N*9(mlE3!FDe5~s&fNR3NFAsJ4c<}<D`zFaEFb_DtNtGri<yVyO>M=HA@CxW;h
zo}&6L40l<IUF_v2)=vhGppXn5GLre%=kpqUGx4^rA?iD3!RqsQV6mwLoK}95s<Z@v
zz-BvD`jCM_n=UGsjRbFx8k*)f6}d@?><&1IiiYbjhcCrcgBsxTy%Eo_EYu4j2x?~V
zy1T<cu+@@vX7rLJ4<fN-h&gic6i?|db8g=&1X1jM-m{m3G34V)G^U|6>zjaZ^R;+w
z|DA-<g{Q$Syo;0>Tm?e|=GbdgrJJVO;!)p342#bu`rRJD;%*4nYoT+}V448;Y16wv
z((#w^KTToyxFQA@EjW(fkDZ4p<&B8*XP|}&0c-!YSm7{_?L{1=9NQgI{-6$?(z0l3
z9Iz0^j`zV9=VEXw;4uHyG7=ZM4eO`uMAHRYVDoD!V_g1`s)bXk_1YX=`cx4vD5BB5
zg_PSmm{(G72s<JV;=~D3d@$A#janxY7o~9!TQn9dWagm6+XLM8W}efB`Rr_J4KT5b
znl{#;{fM{J&Z&~D3R=PL3yC}%);HF~@&*nCXF#JZp4k6$n~wOn0b8AyL2RTyL`R>&
zrIvG$bBb$bgPVw-;SwTqerxvXC!1#-`B$ps7XnTRRXqM<Rg6z#uKnj}5R#XOA(sB=
zUyw}St2IEvA$?F<u?A!rF%a`46~yN(h(9-r<pkqFc>DozJ9Z7ZswYT+>^$i-asn>*
zHf_BdNgcAc!;0)^6xrs{6z3AKLM3W9EdrX(*g@TgD0FFfMgAD)v%NzUW~rOAK8`EW
z5j!1O2Q=$78he5ER_B0SW*<FsR~whT2!pmeUx}Z#oK}YhqlF&ZwP^Z~gi5xDGxbH$
zHWT80WfNK*4@0fq3dZD|1pgv4I6Jlhk9*cKCwv(8o)Iw7+%i-Nmw~4in;%73q5h(C
z&{VAky*JiCIWnGW+*Y32`Z((J))wsQ?a+BtBXYFn^IFzFr?Imu=|d%Uj-rJa=^sR&
z-Ot3YFUF!Qj^&{T?(#Zy&w(Iu4e&o?VQ1J*DmI&epS~Mn`-v;SZ@g+&qIHd@Zyrf|
zrV8*&ND(v@Phy<zRCZ7QS=IOV4iTGp(%z5D!9R+z4=GD%`vywG{EDe~M>Cb(f5!`(
z90Q{FI#R(9e`yqNHEJ19lJs*CcGeA}1Nlo(m$5uy9)85)D<wlo1-ATimV{p^1}ly}
zb!2zuClB)RnCf0EoHPdGkJm%b#sB9xGjPrx))zM~9JXwA!#3@Q)X+Sg>X)3wn{m!4
z_bZXwcoc%S<yNqXF@eFl>(M)ODU{b7f}<;Bcp=6Y1uRqBtT7Ar4mpc=CuJa~XO*c=
zJM&MZn}J}ih&K4_M<a0>FKp!jkc4otC;UF)t`rcXbGG0$jdkvJ3DC`18)9#62RY+w
zMhB;%`TkY-WZ6^<Z#;_KxAzdcaU&rt;}7j&dldh~%cOad89o~uhnE(3qxaV(YzMM}
zb+E}miK7EfciDYeG~V=k?@CN4*+RR*)1mDM`@Xryv7Wuf=(*%6iT6=NdxH?-+9Sd#
z$#!VDZUR<h2w`jb9L5Owvdo{3#A%-d%5o#{qShXa{pUJ;r8*e-#f#9!_%#u>r_xc1
zbJ5{q4a=47qNY2B(_1?$;emM!`tMYLyRXu*;O;HTxf?FEf{D<#?;7j<se%#1-LSbx
z720N8X7htEI{5Q3)a0q)ESp-)*1Ui&qoT>z)ss=koxu3c4&LV2wRkghIyO@i2re29
zFO^eqRi+Aa+j~)NxdUwqiKYE_f`QAj%z`(2QE5~HStv6=S^H%i{O~Bg7^r4HD?g}l
z7xNG~MT4oPHQ<~iQ2L$)DG?T+XCKbanYU^DcoFD67E!y+<>(!^1#aDEj+CfV&}EPY
zre_R+|KcZ!^FPaa@$$%`(Okrz?7s75F4$l$imeAo{lp+T?amhL?7B<C0*+GSPqU%(
z(q&%6vm<Ezuo_L)G6!59b1uI7N-i8=z7~-+7Op8`y}vzFrZ|Ii8hg@&m<-?#8vzE>
z7~AIYQ7XjM<l()GC~rB>b9!3{V)jft@X8b)pNnNqt<O|+U4xd|Uc{t}XHh-a1g#aB
zmtyEtNZn?MTDNCFwPz9v6D`R0t2TJ)_A(T`*#Qp7{CitG$OktIbe~6{h@T9dtUt5(
zuNvAEONm!<IrRG4L2FY!$#`mlrtK8FH55UTI6yg~Ka~&otOF#Iah1!9(8s45gpwOl
z{b~bHzOWd));co2ARnAxO~S`l*d5;~5H&Y$$9TH}l$YP3!m(qZxkf}~D^)>qYYi1$
z?BRXNX+RZ|2-?kdF`s5=p=R$=T=EPtsjwVeS1e(3Um<RaU>>F&*P$x67InkQXw%H)
z@P6k6Jaz0cy6wxSFe8BR8{;6;p5<gi|B`v?V%#;~m0j?vv1#ydu-S5r>|AgNW2d-+
z@(45R8g0t5Q-xGk?N7Rtk74iUSn_s7EQEQ7lSd_DmIZl166R==wxTMK>$B&ze*i8#
z5d^bR{-0;dCF#K`IHs`*MNiGKKWY&iI&ut8d)DBgr3tuR;)^yN91zW)0pv|Fs;oOi
zI=e^k24!5vW12Z=3dJNT<RWbFH9~!*X;ePa2_D;-VN(n97<~0291RUqvlY?Un{f>c
zhgN}Yb}@vd`mr;LBK!YuSmfiNg&KBjcHfu2$R$z_e*bmAT_aS{#wLjuIpQFQ4rh?C
zN26ff%?ftLJqer^yG09!>_ERNHF7+m9Ce395N;`RO^?1n6-y&<#yBDN&pnRTCI-Np
zRDibGLe%pAP2DE<QNzAjpp{cZ1RZ+3pm6p~_id4eQ6ngSUO)nuB*Wl5DRf7@Cqwrs
zpoeB0l%#vn?}e<pwB8KlK?iB`zfw@vzDq3zV!>VTgX&+5B1*A)X^-A@=sZ6hx~*O5
zk!g--ct8buJ|0Gx_LsVMy`lEDZ0At<gk+@Fv);trsLOV=vLrq)Yu-5M?ujS2QdQB#
zc!0)?%tv4UP<-Zcl==JqQr(JYq_<!g_?gc@N7-OZdXmd>Wy~$NCI@bf4#B|JXHl5>
z4=K@OcjtGt;JdsU+doHB=d0z=X~eoGKMtp3gc(?-5sa?0E`fv3RxmVjfzIudiG0Bm
zy3Du|w!ACC@+ae=|78ei(;QE5>_O<$jKd9XKDhr%Do(X8#?Y(5pwk)&k-xQ|^JW~x
zdt3&c)QiySKLdn?%v<*193Hza!^+onSo>-p>kCc-Z^MU_lfIL7s%wDq*VPa<mOz`L
z26B>`BsMNSaBKEOoW5@|c7EQ<<BVBT#R*`yjsuJrv`qkmMXD%%SWdspm7v{cJLa1?
zP14sN#jIEJA;b4Fs_gaviK-v=)%#QT;h}^ds7Urtx`xMJuEN~M9vFKp6P^d9Vd%%f
zc;~DqK6Z@;uDO}i)o=oK=a&=Ff<3fLy_mT+xU_$L5UO#5Y0CQpVCNSH-MY?1aP&Rx
zwu^?8!#lyrc?E2_i71rj!oBXT45koKt8M*c26qd}$5->RYShv6&j2Z1I)-(w@8IeG
z;X&`B{ituQ#kwK~5q|3tsgeuZV;ZDFmo$aCI4=N~_EPw+&$_0bn!(P~%==@-{H7gk
zbX=q(KDK9>oYl6ZJ0KXk*06WRxnaDp?HR<#tsV@9RI<EnHAdcJF4UZV>BUKvsK2<E
zEWDV4GUqHZe|{ZQ?2m!HQ|Ds$`-RvP*GrVj7Qv^=K#U(<#QeCXyhtTygu{a|=I%k3
zqa6>uieWTt;cN(g;>z9)S}<r>EY^>TV7oDvHGXD6u3H)*K4Siljvb`)VlvM(DIJVX
z1)}bo5~BSl8HbMN;6bMgI4L>`|L@3A_Or)bKG$&mA0_0B+QeI@Qw*kWYw-Mqt9Uyw
z4G;cJ!fNxg5cWX~^3~4R);A6mPMLs2mWw@62S7cv1XT`kK<VCkGV%EWbZIw4`)_Zk
zb1mzj(GjBii*#_{j{)n4X;i$I5aHu(5O;etmP|be!TQ%AG?uv|jPjuE^>t$1Rz~3V
zc?c0tLfP(6p1m<2gxDh$SwAIQi315d!#d?1>p*2zCGXz+S;$#qM@`S2r&bvkF(tKv
z%Jj~G@swgvbG^=bT+*oM`507U+}3>GOVF1X2liU*KDha()JnV<C1L-O@}1R0d3-)J
zMe38RcW1%+r3C!8Y-jFIYxtdb2K#-qVag81oHm?9etQw`{KOTg8+nGvf?RmLv-ZGf
ze+5kXX@`~1Wz1LK!*WQAvE)9|ey?fJ_ID5c&hCWXP9wlOWewP$>7%e>BbZjLCH19s
z%pn$ODyWK}<&`5a?R7fte;*4x*O{o!dK+r@ML}$$0_ID1vYB)SO3z*a<@S7VIQkzI
zE#3hGDIVx>v<`mj6{6{lZZbHH`NJTMF;901cXI)6(k&B|Ji7op?oDLgvj(Y@5n%>F
z5h!z753{rUQG<5|lHWPvYE24#rfCqoKtlS9qN!-^f26a20_9qVQnTmGdz-ff44n_a
zaCI)Wy}CwRmPSEkbp!UC%>tz--{`*Go|wIuWwGa61CMeQymprD!xY&JXmSunF4cza
zu_uUC<tLU=mjbtNrj)xbLF%<1A$edCdQ2%}KBx#%qGCdw!Za95`<l#B$ip+cS+;JA
zGu(2#gc&`>EVIV;lt(#I`A9DaTcSmC7iMCCM<_c_<f7|l5yURr3dV{n!2g(tWY%5B
zu1VpfbQR-S29HBdO(yt9#}KDCvp}|T4-V_s$6ALdwA@z$+|DZUD6;~~)*ix=RZpa`
z#$qDSx?<+PvYK`UR>I0(*|^^*2v;6=L%$QN$jYVlD1TE1I};-?ka2R&>)D-`n~kD6
zMV{{N93tN}A5uDwf~eb+{CXb&Jy?l06HigWf$wHx;}2oa))%C&)PR_}|DnBJFX_SA
z`Is593AH6DY(6NY`Yu|O)AC!gjqM@0ZW1^?nC;rLyGdVV0_!=qg6WypSeJl0`S)%i
z7VOvqdgG#@dH+f1^cw?3qBwBkSrT3ICuCvzajf6!%~)kkG#G-wS0hB_P6Tzo7K=@5
zH&LS%ZnUU@<>rq)lqP>Zh@9oRRi0<WD06ouOM_idG;%YQ%(y|?&7V?!W`K0+Id=AG
zY+%oiT;jh#1Gi5LMYlO(cyKm=Wx+#9`Rav0(yh>1dm78{m!rshl=M+yII4~7q#S+%
z^*0t$jf7k(8b6(So1X;T2@W{7fca%?|MK{gb->SiJmagBX@hzIy4ioH^25#24=dMT
z^S@!x!g>HkGRLV(@jIT2-V5qi&OFO2|56JZ4QRcVM=UPogMDQj<fNrRa(W_ua%0)O
zy*%_k|Czo@O2xZ&N_bXtIogFbg6jKK=x@nb!)F6he&YZUE$`zE&)bOy9^@jwZ8Z4F
zm54xHiuKkb;jU92lrwL3eGS`zZ9b2O3K@sF>N3^e7lXr!W}xX1Au+91q~htmXyNPu
z{q+|hMSCJRR4zlO?_z0*&jX&%jAS%ckAf`^mou+vKF@nKC8E2RLFw~;Qjl>5^wZqn
zI}Kv9&?W5sI2sD9jKGlH=Ud$05Sx7|q{l6k^cBZ|(vLKv@$@W|W=qgU*%alrRN6Ml
zf$+nMt3>~@=gZqn@LI!mKzgO1x-9`Og$PmjvlRG?2Jyx{D}vr*j3>$K;Nio|1Rlh)
ze6j(xJLU*YzsgW*dK#-uVqkG$1k0mmnORTt2RV5q{X~-R_c&L4TOW;);}T)r;}A5=
z%0~Nu7bNWS392#r6V3nA00mn2NL%?@Fua`!PX3GmUA&WOJTxTAoieIC$r%iMXJKA)
z7A81zU{op>!xnxZR&%b9rcmbB{Tv0Ql<fkT-B;y<5p8}HLd8h}^ix%$ze18wx5^m4
zu$@bmR2S@uhk?7}JjTr2<;hNtmTn)%MWtbTXy>Rp9_M(EX{QEbPJ4OM@~o@W(z^)0
zIEvxboOp=b+d`lH3P8?{1BB-r3DbTgpw;CW!2hq3s`Py1#Tv>;){5yMbll3AI?5aQ
z(-m>L3W@|;V7bHst&={}uGl>GUOI>elcRCy{&;9V!E#amt~E1F4gj|mMl>hK6vHCN
zL%^AQ95+k=t~o_`<IZ@NKga{|-ILUDW&=1aPiFb^A>gK#O6@&@sO2k!IaafAXVesQ
zv#zJd?<S!Z#{vXO3DW0z%)_M_ONCRn(I|-wG#+ci%oz!|ubySQ&v(+!M-ynloiOxA
z9<&=~Aj;0NJeLa!11orfDr0ocN`UH&1n~P1NA!oU1lP)|pjCZ=ioUk+WXCL}K}#<)
zE;*QfF*^(5vB%kSc^TIGOat*IU9{g>2BuI%7w(P*E4m5lZcb)A*<K#s!bPff5@6j4
z#>iAMY$3fGHmFQM<5`P9jO;zCFb}edzfr%Ep&%SyNMuJcfFti;D7Tq|rlS~xa4r=)
zP6{y9E*}M1n$$t#0y#7`AKSN=V?#v)Hm`LCqb+rGjwPb>W-z-mjU+ZN2a%4~ArL#`
zA(brO4q9*HP$=F+Zfw4SroLk!;H>}@RFXht&}QU#=1SVDXF%{P<`rLZiS#Pmq@pBa
z;&Qc&^5f%qoaJwL1y9{kw`&CBMh4?%-xT<BtO7kZUWFrPx1h@O3Bdb(30mKehPI;1
z;6En^3@vlX1N~^m$<~5kxsa+9F(<7_4>f)#1i8{nsdyXX4;>oFh$RvTJ|PA1f>7Xu
zK9b0Ty?~qJA(e0c!Q-jtqQ&rSU?thd&eK;>Ye@xlQ=0@Ij^<(#J<HrNX>1Q$B~>p9
zL#NN@r9-b9VCo71gkcQHiuMQHz2{-^XIG4#yACh=3ov%oOg6Wl2?gelsn#NYaQpd&
zPS6pcI4%PMi?3jeFCR5bub@?~0p&J^;5b<rv`#_lR=fm~cx=a!U_!ih=3&ap{h;(m
zNMsu$p(FG<iZb3(WAk-j@ZkvBUkE3HInice3zEP_*MpWDrIBf;U0Ls}5)OTr1D0>9
zSocy6Nm^D5kGr#=>?re#*Lr})@@`_!IRq+dEVQmCCRPy(K<PM;_aBp?wjl-jE?!2z
zo12M#&k5SIeK6Lx24acEL;817Cbn-L4Ncop$iOhhX098|*c~=&4Od6cN%~a3bThhF
zEd`e^nqYXs5hXk3lG?g5jGq;crF*oo>zf|HnDLl(Iulw>go0>I2Crqu0MA})0vJ_C
zK<$nx*yiLya>rL<ifTQ$t2NPpLDg(;w3}#MF93^22Ou{k93#zF;;mpA$mCYU$afn2
z7=8vF(ozU#&^w|(&kDHTh7gC|rI5zlMJ1^v(ARN^O0#uv^6OHxk;RdtYK55Ovl%;~
zm1Gq~!+`WGw5cA1{*57U{8ttlZS0|);sR*Be+rDp?SZ?IVX#zfE;c15F-B2DO$TMu
zroywN^Xfk66n>z!&*Cud>oF|QDPXSL$uMUcW5Z%yVe!Hsuoxdil}1gcYMQ1{z?d|(
zRTfb4N|6*E%EnNy3fOC2fWqJ+;2Xw6)Z=OlxE+L?m?>5ASyHKR1|`DERa9bIiuJLK
z-|&dVh;fV2-?<9=TCJ&qkt?+Ld?MVYJ)q?311*cvu_bma@MoGbuD?OrvMosZ>+fcK
zLD_Ti&kEpf3+3q^9t@oQLwW88UXia0B60MKJvi7!4HfS*58Uh?s$unwy4cKz7?s_a
z%FeZXd0Ex$70jnWuY%6xM6^>MjW2Fn;<GA(^QR1EP<uI9KFSTnmd@b+upIky?olT{
z;PF2d(zip{oz5$rmM^YEp;`j*8WxRihh)_0-3F=tv|<uilY;WV7t*GbD#AY*Cmm`l
z1d)Hgbk*Zsn39=A^W{~*_E}X0-p^>6Z5r4`9Ri~!KKa`hgL@B-M&7C<;B@hMO0(jy
zEwhLY>bJ$kL&BjY+7?RqCD8M46kVuz2}>YYy20TX8f{w!e7!n)<H;gS%ZSAU=90-U
ztiq_9L6}md3UVtE3P$D=PKY6GsgU#BR+~_pM_;Mw$!%!FdVW2{SFj*?3=F6;Psoc(
z<i-#1y8K<hseb~-Fb=u4n>|a%bejo=UZ{$#&cr$9E3nP)B5YYY9W&!i@$Kt6u(!`Z
ztt5B2?NWw+uZ%&xRkP6Z>MzEpyrcGUUew4b6NSSSA?oLJELd3w0v}J{&W+-UZ)wor
zFkfg&F9XlZn_&CJ&6s%M7}__Mfy4M3!r8Kp^{MX$L*|gOaUS3uVcm#6p;^#<%8fKm
zF9MCUl{E6idD8qgi{+bS=&;TVQtn*@>woI7_)8dSIIg3=SJt5mD^2G=@&%g_moeZ*
z1=ywLQMc@)5WXn}H1wn3!u@(o8F!b|3@Je`%?nW05ea&mtI%s&85qubLp#IQOCw**
zL3h7ttXr|Lia|&ycu{9odLSD$BsN58Yzwtv^EvbWwK(kLYSg(93BjRBtm{r2?5o)v
zwb+A1ChEZX*-ohRW)77-j*z-9I1Kvt*)wooSykYG2%Q|RQ~wGn>(ZRh^QJOT%?!XM
ziz!57axJLM)rG%J=UHy#Bk@Z_7<(fLU4NxR&;2R*^2ajNw{`-vJ$h)mMF>62=`B$U
z!f`F-z}Fc{x^qr|!sI6M%ruMT`Rj4;sTz=_e2^wYslcFbGtnj{06eGv;7ML?XH~%0
ziQC>}BCr2Va&9wczWp5Po+}`R_myCAQ31=#R1rg?$*f;EUE0xeiB<i1LCdM_gde<<
zn9lJf_C=QHy)zH)wpT#dsylSz#xm?JIM3Y1=5*cb2ozT<f~fDP^u0I_Y#iO_-yh-F
z!7>_dXCml=Z_IVjrT~}hlUddx8Sm(Az~;9N#6o2y+??f#8a_8^*EV<14?IESnuln2
zTNM~hQepmEBWVeH&Kaw$1{;>?GQYe8f3Z2xnRJ#}&H6x0U4y{NTL5um*<5o#1Gy!>
zr1g3{)%d-LWOhzsd*Vpw``||UYMf~Q{Z7hX`<s}K+(}BDBY2!6MpB1GV{yx612j&f
zM8DJym)+lqQF$S_cHb%Hy;ubf%=z-`Gs|#iGOyIM1~8f&!4pnH_T0aO3YNA2OYabF
zZY%_xmq531BoxQ1;KC&4<mMDtrrezaN5;=UzVTM*F-s;&$C*$5>t(dsI~@WJ?`3-u
z#s+vvc%NPf+1_vntsSh1C871S<*y3M9u6X(c@%NlJ@V%F6!x4O3g*A!aF!99bIn(P
z?)*BIo$Ri96ULm1Woj@XuK<lD?$GB@PpmHZK^^O35nMegbvn_^lW%V$?Hg}U%TDIB
zsV4wlr!d%WB5=xDr43zX*rR-hDxAv#o0-#5_cf~-{!a~CS_HJck^MdLX2Qi%HEg=C
zNQYiYf}8wWTs)Myn*y~=n`L4YuH_^5)Op6h4I`bA7BHBkLy<CbMooE3WSSGHeF>Z8
zhK5Lusvk(Lq-WT(OB03vR)ALTTxdPN7f#!qLXU_;D5%w@lJL!hn>C7vx9lb!dJ1S#
zW6U}?i>2!tVj=uZF7!AsM^?>KnzCRQXuRzsX(hJEuli1GjLl){izvL6!@TSDjMMy9
zMjs4A;BGCF2CusWF0aO-S`~9}bx#D9!$!2{{yNyWeHQ+%K7?6c-C@Dv^C%10DqWor
z2g+Bbf(1LLYK=tF_xA!w7qd>=FN?r0bORX9V|^qoUx_@hQp#~LkSxngfm-1O5PvbF
zdxH<*MB<6P?0q30P{cTUiZKHhL1cavoTj%E=Nc27yFwjHYOWKfIUjj8+KXvPv^ywI
z*}^;`|B!%=b5L2t*o;gGeEFz>PDa@*+j|}?@-GvzEDY7^Iw+%`P=BNb^j)u}D$n&v
zY}0G1VlLzjzwU<Nd3t!Ot)6x0s51}m9`@cbN8|Vq(lDU}1-5=>Zu$-6*Lpi<oV-<K
zw@?Ax%2io6^m;RX9wq*^eCEI%jLXZeqsoijm^9NKhdUMFvZvE<ep?-KW)_;c&Rl{T
zugytHa|*gyjiD)Cd(m8)g-7{w(LJUFltNR%r7;kVm7=InUx9Ms9nErAM6w>ZS)@yy
z<vRZv1_g;Xh+cdF!lpcQlr{qYwHZP|3av1^3ZJyu^Jde3#PvcAlwV<-*um*kaB>w0
zCQmgJi863<MiJ)eMx#xQ4QY41L|fCVh=9#jSF`zd+sQ$oE7^mS>gHn$UdC<SfhbTr
zS(Wzj7>c9!5J6Ic)P>Ek_)YSvyHip@5E5AB<TsWkC^CP}4K8(4WOp{vK{8=7<E|KY
z=AcrJiSJxdO*9z<W%H!}9V|sH-FNixfp|36uSZv%?YJS`k<Ih>!@`gZw6$meiPR80
z4P!BO#Z>64McR5Q9Ylu9;ZSicHfg4Sm!3a1Sw_$&>kne5@?y01U>!uuhtN6CH1OT&
zQ|SBf3fA9P0K$etV3lRfvh+jY!iGrXUs3`WlX~Ef%QDN#@dx!87f__Pj~8n{0%YCI
zQgKZVy<1wwI<1a?)gx7Mf<4c8X|dQ<ZVK;8jZn5_6G6j&scD7>l*j48Jx(rT^Jb&d
z(M>#kpNCX9Jx<#9<s8TkX-Z8WK4rUTUl{u^6Hkqlp={o-DwkU_obt>Kn?ClClBzvm
zd_0z#zWYrC!g^v~JrQkw_3$=tWG<cZ5z-ewh4?Lq?RHY1G529IY8n+|_0K$5@gfTi
z3iD7qQWeR&Nti%6w9WSq^&6E7U4c7b#Ka=Rz5~EX)u<XDUXM1bS5d!!vBZDZ33yL0
zApb%vPv83-^hunFf71ovjcr7ei|p6KbLh6anUFB~E_Hjl8(RFkNx_(*EaSi!NoNys
z_vTpSS655r9fpJ-^Oe2{+lkyiCbap^A;=OR1XJe@D&O&$sBJn5UPBX6-(epa&E}w1
z?MKN$UKr{>L=aUSk#cpliD&!*=$|?r>IVRfLjNVLcIRM3l|AbTVlMM04Nyt@B%NHq
z^0q%Eq<c1pIB18_^1e-=mGO<1t%^n=FO9hm-%2yvR${O_WAR4)Lkd)~i5I<yoXDE0
z*#G|0O-l?gGIJ{XZm=!~qmj69Qa#&yE0WvUQ&BDMHMQ{_O;W6vf%CIGFb&P1F2_sJ
zOuP+`BnRLcfdjg4JA+Qkb%@h(6XYJcB7MIj2yg9Kf>&9`R9MPeDw=PJJr9>MmZXG6
z4qT_hN3kBpzYQ?)kuoZc-oWmc?`cyy^8w#vd%+rW;<TEbwe4I{@n8`8Y%@mD<DaBV
z^)#wEYeUOWz_2QHVhie6nl~M-hISFt>9Jt#C&Vj_<tT{#R^@b`1E)@&!?XWx#|U4x
z)6UDq@J{Bstm5D{t>KsjmDIgvDs;QwAr67L*tFy!<>m~dPQ7bD`<ViMDHVXvy9DH<
zPLNtKu8(=1pn6{^^p{y<TdOCbgM`RWlYsDX4Q*by3i%|6id-jw>>od=%DV(!-bw-T
zuRP(^{)|NR^-0*Vx&(ALpMhb!#8~NIhvQZSGH**XgiXn&Re}gqaa4wK)nMoe9|DWa
zDzUx0oES|!4Xz_&A%XQ#s|`fp_{U4pIJz2&-fYH%s|uiBZUS~ei$HC89=T@GfWkK$
zz}+VoBiFwohDD}eCATMsHOf%n@5@|~COp3Cb&yY4PmNTgczxg5jQI2#oVIm3j#OI9
zX5h^2^{*IO?C$Z-PMd-Ided<42y^6*Vvc}61!Ut(*4LyH3a_3N0H<DT*5hdj`x*O+
zZ>~Z8`@Jw|@o1F&iNd~POTj)l2qvy!_u^UKsr}ZeBtO6dhjZB(+M<H0$lahk>M(Q%
zG8gx@t*j?69h!(UmEKCgz<*e;hX=dU|4C&#o*d@An?#HmAI~{H6AZuBfsNfAUZ3|8
zh?yG=qJ7KYTy`>cPYQ!=`eN|hG#Yw8S+P5n7>%Tfyv}2rc`cT~ym$`lDIHu(KdIMa
zd(>%Eym<mUlmj7BhwTc7+k;IzW8|kXZ_3DF7~>ENWu-c(Aq<5SuTkvtxJ`spqj;kI
zTctwzcwD?j1QR+;u>Ss0XdjwKU&$*$RASBxQ({g|8Gu>>b8IZPA>y)c#Mq(;+y}8e
z4JStWpqqo72Rz_5dGfaFUPO=imoQ;q2h^n1W7R%YY`heVVLRr5^^?sYFt|x97ZjlG
z@<b{;>qDcwlAz*MBI~T(!Q;d-f5lZF$oEbF1;+rmv0j9dGHa@&^dGsVKL!0Iml=!a
zz?1yEKpf)|;Lj~R%(Cah;+8-(tZV>|n#`<oturrS*iYi(s}J)hjzpJ<Vj}xxMD13x
z|AYUOh@s{QaC<U~d1KP0ve|2;M&)~0r*9C-%h<f2)>hJ-W5aR|o2x!6RpV$U=7?YU
zhsw>TfkgWz?bmH&#b8k&oNq_v#(#*>R^|+OP)=2L8B^};Agt&*4_^e4psTG3oo_9r
zC7Z%{+~aamv1L2933q`__hwR}A1)o>mVrP&8hYDjpxA8|8aDgDm)211G<;6?-)aPD
zeF#J`50m)jRAMvo8n&J*qX#>gd#mCUIu9O+pS^Zq13iy#I*uV{fdj$zDbW2R1bT}S
z@RSyt1-$)3nm#3Aea(7kbjroUHqq$CxeoqUB3MTE0Cm~KdJ5`Hh;Z_CXxf-eVi+Id
zrm2cQ$6rEvB?!C3EGM;RBP!_)LuowgUV8t56g)mhQwmvc&Hie7RxJPve%gZ5!wpi=
zi*h35#L=6|)_AA292MRh<4-Z`g7(-6v3YN3Qk)~!h<%Z}Y&Wznw}GtRgP`#o>*;J%
zhD8bo@L8w`#Y_D_dOZsM(_4gR*R4mJlLo{jQ4{Sy<-m-$YfvMgo~(-sgt^TDs4_!<
zElaKt?-dt7u;v_ApU6UoIZ;%8#scqcvcTC^0@l}?Np&n6z~jUMY|PAnva!JscrKDL
z!M~|lV>U`QCBUtfy4b?)Bop<H<J#AjEY6jRJGU;up$_TTceIxE4jI6kBlf7n&VR<u
z4(w;0dB_cNNb|RgFq)k|?8oHOh-o`fe)1$<q$e<r&7d}fZbFsugK5fY4KPgIfGTP{
zIIu&6J_-o!o{TZvX^7WSmY}F2owmK-i#c)3kx{@paK33l>){-l)V>Y9n-$=>?poxV
zC-7X}&0!o9>pyiXrvkM@q-)y~a=>mM3Jh#O=SLK1-7Nsn&;^Yi2co&k61yA&K_UDD
zRrHQPw`u!{if8NpC_4AB7}xKO50WGaAr%H8Bq7v1O9mk%A%rkUl8}TXOb4CMCDTDV
z>s%@w-e<{3lEWskX>TEfwqxi^PWi3hU#`o=(9ApUyPkF5_ve<VUHC$5<nID|aEN&Q
zy}_M)r*8YNO005v!Qj6Ycv(cfP3;+|U3`bhR%(OVIoeaceaLhcouc=^8LU@j;{AQ8
zG;7ZQ<JGlr%c}yS8YYA1j|{G*O8U^wFjRBg3Xbbnp!m4~mlux*v-U^q_GMLk-dPMP
zGqRw;dO3_-N;;KfC|5hs$xPBBz)ijbI&lSDk<d=_Sqry6NqNg{fALs>&MRxSL-Utr
zR;9C&Zy28iT?*2o1B;mL^~=J7U8dN*gnEsN9|#H;5vuvb!|jAR^v=<S<{))qu#V$R
zBWv*22vg8r<^fTkHE3qpitYCH%r%3~C`P}z_uy&JO><mCzgRrx9*f=j<m3E8J4%xy
z&~WB5D(odpd`6AE_&EnND~NM*P>s0BF--C`7z}C8P%^rUHC+pUhyTT*bA2FNwfH2>
zZfhYkI1mNZHMlpc8rHhcz<1rJ*-3XVOl|ih&KUJbo_WacKc5Q*OZ!01;G<mmd4K>(
zq0Co^gzr#`@uO66x--ozT};x`%^x1#^uc%0s`xI6e7`IF1YNHhn6g^HTm56P<oj=y
z`mhMa*Ah@vJr7zUKX4saPkg+51YY{G5S=QrF{@WD{`r}OLC5C7a_#fD<-cUKDk9BT
z>m+z8`tp`JI&eIWG}`wekglkLlqWR*E_uNMzN(?i*j!9m5(m;N!zJUBHe+Ux7VP~m
z2^!XgfNp*?Narx_c9lA0>wN%MlkZ@<F_WDii#{7uuyqN2w$+qbyEz7L2^ZOz0ODMY
z&1G`Wf2&pI9ES4kaiB7`fLR~dkEz`=m^g7EbNIOxtkM>OvL(5?V?&T6_5M$mx<Q9E
zHar$Oi)d%4`iI}B>V*cqlbOC*A!y9%13}P(mp{#f7Y3Bu3_ppR`mIFe;$4F6>nXfj
zw-Wp&T*l-%{h?9wf?%^d8f`7F@OQ=zcwuG|dWTPkt;)stAVChw@bggB%%NN{7er^0
ztL&E+@=3YG1=8Z?j(>>X{x}A@FS%gOkijhVPrJ~(d@NT!b_Mw`UH-vr3bDwNpu^&h
zguNuDLze}{j+=uA(+6YP{2ID7U*uW)uHf{%c^G37h?<8Iz}ayvJpMcbn~X?Td7{MH
zo$6@VJdqh(yw9SxSMV1bgHg+}OBhpL4}&!1c&%vwnjKruje3e;?9ZLpb<YdNy(aJA
zm8r~ZQ9mxe)JrgUG6d?bpTh2+<W24zNV9Y`yqRx^jb%lWl&{3W676LQM-h*D6DMSQ
z0_K>N^ZSX<d86BQW-z9Lt(_hV-N7YT_?XTkvx=ZOX*W!74Z@Vk=OC{(1a_q+;)BWv
zu)OBS2X;o|1HFUT!JQbM_W{fDJZ78rQ{vpmk-u4V1%A0rfy{#^*>D#(v@f{~ZniCw
z|JG3Ub2{lPw<BR=y+4Ys6rpmW6`H=g#7=s6;^_V}aq@&>?Dwn|AGGIyxmga%`|M&>
z@mt_g{}9wr(9c)z$0BA>=aI2J*I8%575kzgV;C_ddIX{!c?aJgkHt5~d*kM$=@@4j
zh4Zs(aNtZUlxePFa*O^@a3z6Xn-Pfm{x?~d@(_4_BQ9<2S@KOC!>B{)ysj~r$uns;
z_&y&hj+}%oLmaW>r30*elZHoBW@798JA!=XTZwqp8ZNpsU2>q$T71{3U^xeSq4G&?
zb(@hR==3G);NN~QHgF3zu6ZWB-dv0pC+lF;W{v~=CZP5SZ+NI<hAx)mM>i!_ilz_m
zce@6Is!sD?6>Y=<^klY*ldN^8rbGw`LCa+vmgOJ7sNX7(OqqJ)(MPeX=X>^)GzJap
zHfAz*HxrG%Sna>x4%}wT`N))+Xi#?<OM6t}E5qq{%Bvh>{v!q>o#l0o%?6R41}j*7
zlUthH;@{UrVEZj%Hy`u`yZa|#WWPKZnIi}Lt)%<)JBgwji_8nR9H;N@6Z1ifA}ODE
z1>CyFvi5;LnYU#%N?Y$U+1rcoZ|DYW)!odqkNv;qIRf;@1wixiiA*z@gX>f;@(J|8
zO@?Es4=Rhho;ZO{%jmVYHkr!`(_up&Z}iYgfdY*K(57Jk9lQr%&mj<X?kvmMcp5VG
zDG&A`0$%M7gsL<>)VKPZ`(;s9`0PSfo#})#XB45qf#W#gY!;@SFGKN@-WZpY$o}PL
zK}q+~wW^7f2eibcbl+;IKgeHE1}Rl%2=kt>3qI&)l0VNL?q99u+A3AJ^Xwq%aB;-~
z-%ys)=0hIoG#DCEjkZ6J!d~4#h;cN>uGX^<77<0Cc|B|W?ISeh`(k%~BshN<1IrVx
zVz3Lb<a_3F_b&mEzBCm?LmvvWT+=Y!QGrob9x&(5c`TV2gTHQUr|(7|UbMR!F7~KK
z<;TO2Py5Upoj$O8=R7=r+5=simAp(wzPxJ@%y@VW=`@L~{B0n2R;aVorh}|?)LTI|
zGy|nBABE8(hFc=*aCF9QoHgAC^i98U*@52@mA;oCuCE@r^%%%RRx`~!2et5I+cWsD
zZZX_Huo|2KmSgJj<0xMlY`(l#8h(DC4O+hI;qy=O>^0he+|!!*Yo218Kla0(iXd30
zor8_l19_M6UhG&;y@b?ND($WTHMeQ-qnmgELv)$w&WGwoy>k*rFFD@#+l9JXa@?g?
zh$RWRY)$wEls;RHV~Uoc<zr3id2Faw-k#13pQkV@ciICMoCmFn8fcx}n>$y%;X`dM
zgZquMsMFgF?4J~Jm9Y!4r7)j~^^VZJMAzK*SATH&D+Cp@D|vZMDt1>~0*BH$uzp<%
z$`k4ZQKogZO5{OE*R@0ky|XaE-wo}8k}>8KF%E2$XzOT$9{LZMt8o%)_9=ze@d0?W
zcPN%hlF%`=6hHi+=ke<sX3mnSta0LT?wn7&cH<(B&-M~W$OU&?^}^>5DsX9DFt$%~
z;-YI~s|yyB7I}3qIM2O|YZr~j_Nk{ZVo4xoZd-vhBgr3@wG`d1-IYA`+K+L>I(1v1
zi>{dgpd(J@Dnd{4Zk02QBz^Fnx)9zK=cC(#t31l-g2ckq02}Ui^X8p}%<8Bs9G@x1
z*&}RGSv!n1{82FBZ{qDvp2gkY)}XX8K~k6bjF&_M&#{;esVSqlv)e}|?fJdB;eH+8
zKw5AWd0aGBuL61hM3Bz+7yg}>3y&8hqfbFBD%I1?)&6Aj9ruS}L)8&j`MCh!ebK?g
zXN=Iq<Co;!a81alcSWVY9Go5IaHZ0Msf{M@_xnk(?%YwdoM;DP(_BG$Z4uksl7uTl
z7ogL`DrjZ%xqfa68?@CIqvi(Vm}7<TdAS;z^;R-h#}E#iY`~#y3mmskK|9j`Fum8v
z=A4;@<sUeD*r<}fM+qIn-m|u;br`ZRAE(iIN54ltn7HOj%3XZ%&)1!pQsIQ%i%TK%
zpXYquo{OjzR3qrW_9LzUF@lB-hGzrK(b}SbkDTI<(-&o6m6&?Tu1A2Z{W+8QxPjTh
zlQ?&LERH|qh4SMuXt-t$Sgj1=Bfch~bnbbsZb#hT1#}Ks@QxXfZ#C|yJ~a1r<#^W|
zL?wUCy74IwI{M{Gd}mV+<%qppt@0$clc&1<$qw$mW-HDJF2jt>Tx_Z=1^?NWVBNPC
zY<o-inCDTbG@?F|5;{u<no5?{ti^M^iFHpo$gZrntR-|3cz4lfxqdZ|d7cA~gKEi-
z|C6~cs-ql|sw9H?CuGwjuw!k8M9%k_x7{}e$6jtAQ#}LjO$ShG(j|#bf**J$H}G3=
z43xK4QXkqy%BIZbsVV~`tBW>a-U}t3xlA1hExvS3^x_86a;8!gOPc0QcK@Cq2;>#g
z{_Ml&eYk-3A*Z46TMEV(kvC}w@LxI^{PVROh!}PX#Ge2hA09+mMi8^Ta+tNgTqO}t
z=*eR?p9NvDg1mvbFrPHSuzsoFzv>KYC|knEZVtla$sB?X1);+06SK{_2nyMD@NB%!
z?e3Mr_J{pZ=<R|QBNbRM&<0fced7%&1L%9R9Pa;I3|5Qv_@Mw*)OfKGWc`Vum(~Yl
z>@`1o&lZzQ@-a$J73zMdgXNB|yv5}xNC%p;;(94sTp)&#S`An0LT*;{ff==B;;*dp
zJnGmp!B`~|>=x7Otx#ca6GPA|v>M$`tz}Q?y?WUz9*uIgz{xLp*gV;idro#lyBE*-
z_WqVw^~?z*d)-m`CXMB-tA-60m%;OQ8{g@ijR)MTP;L5Yw6`4!k}@@POd!tQoLC;V
zirAJfyQ}*4+Kp;g+u5YzBv|#IA4-261GURqFi&xr^oGOcBSW&V(ef{e=w@zJOjaP=
z-b>fdrqK|TMLq?O>k`k5Os4im7g_?><AykO+?bw#rTbzrC(RZ*8tS<AzA0$GdmDH-
z29WPH06zZQik7c0!I-x_Fl-xXe|L!4zB>RKr-yU-tT=Q}$cOF+r?9P#{C>wbL5Zgg
zgb$$S+^d5i9QT2{l>!RsDWI7}ew#6r9hsQ}t;<G1a)mD5|27fko7tmN0qr#+vQY0j
z<<$=NfbrrB=&b9_yn8os*~(3V)ve|D>qsH5dohuApv~OkJoT7=AiZ^LZ^;t1c+|=w
z2n%%(jQzF+U+S;MtD#eH%2|CJl~IMtfISi|&1{L;m?)4sJdw!TQqX*22%Y<gO*XNe
z1@DQ+((ZMr78eJf?h^ZBLKsAC>;bnwCStdbK9)R7fvLkRQF`f?AgyRK7u@@y&SPR{
z3F$l{Ar_>=dVyME9&0eAjzq~}(zSYl0~e!AFAA4@ScSq2+C>yc!?fTPNNz94+n){h
zBQ}9Dc02@ijm9IYS5WPrT_8Ui3t#uGMC~;zh@ZS0Ivpt&rsB<;iqC_a(<7nrQn%#$
z`#^NlYGiGmwQSby%b?b}ANOCN?Ej~6yeid&k1*aw{cgVmwKU4xZLWd|>#ecG`x;Zu
z`6={19fUQ1U&Ps?uj2Nd@hII`MLY63FwV+_kQL#Ox-CUWz59XfJH+uvN;GwS^@j9|
zfoL`U3R9lkEpggd464NntZ6!enX2o+SMNNE)HhVMw=8Gs*TX>T&}5!ju@kD&Z*Xy;
zKk9#q<Uuzt@_%h&;oZsx)@rqxs~y_Kq7;Xj{J&6Ex|#S0Bj%Am?h>p0&l}}Z8Rhdb
zxH8|F1<{-y)7}gHyJQ3h+6zxk#=xaK%DR4=$z%`hXvXuQp3<{Sao>m^TBnBUMwQ^<
z6i=S8V_>_=16(7C>k)d4i)Y76oIUqo&l&09pe&@BxEhR>UH}}5u-iHWQ;G_)!)F;E
z+m(+ckMy}RY!jGG)#U9-Ke${gPGZ{M7uNKRMm41mdDGuWireaNT_1nEcES$NAF9UM
zh6KE>XNOU}UNe7*3q-{Z=O<VM(t(KoKfh;8T`-;f7|*PaW{vrV++=v5<d!TLTPtVs
z_OBKwI)AgOuJS3@_jJP&WgpO990mS;ZtzCpT#Ou325on$m^A$kOFWVZrb|bGY+Zlu
zHsq&d&!1`x`+f!Pdj(?Yk|KCwnE<z+ltIDpx7^ct9PC^YgWEkwqkZ5G{skwQ%HiH%
zlB~<#y$%M08!n(75(BNHLnLv_&*IlVJ+a#>0QAyyQB*Vu+ZI{^ydI2-@mkP5u?BM7
z`$4KzKJBCivm2Co6>pd(G(Pd=A6^U8KX{F`&O8crJ5^!)U&)x<LS2ZCQ&`zUJ<RmF
z3SNI*#=U=((4tAKh<^gHO=Ba7oi~7}zdrL-)xak@2`K*HD_HKj0F4LzgjwJZ$5z#%
zRId&;>j>CaWi*<WsM39DITve9lr$fh$`uk|Ev6-K{rL&>{t`*L{S(2YyN&&&z5z=H
z`=h~{dhVH?0ZKIkXb^P3dC6rst4aNfMe|W~@r#*q*KmnUMNjf5jo6hNh-tiqc0n_9
zLDt&J3qnqD<>)Dr#*7G_DOCZbrHP<jKN}2E-|!n6<e7{b$Sv0-arJ%|!G4cFnyl%;
z+PcK3_19GpALs>Zvx1=@ErYEYMJ&XRg)m(i0k<cKP@HQc(WWz2J^h{NUs2etR*3dt
zb6|{T2$Xs`kWcvzSMfLn(rZ_$Q@NZghWo;Y!Lu={uuRa_%V#Ewi7P#!KhAu)99Kn`
zV@G}tTR57!Q<fIN1D(T|9K9U$cbtM*Pp4zHZYr?7#W)~19t&Q2g5B?UaNeFk`9fb*
z^#93ab&lNf#&cdNPQ=?KHSp2$JPw|dhIKd&PRv??PmdJf`-|jz=<dtoT#I<}Bvp*s
z`BZZ6WF#6E0gT*I1Q*NBp-oHxbRI8-xXOoIR%j!fRc=Jt2nR{*oXglzNHc@^Mi!T1
z2{-!EK6e1!*FA1Bvxz#)&}k0XJvs-CmJ?y~jd?h2unFqe5-a1h11_vshN-oGcwAUD
zqzs85#@Zy%ubTx%R{kvRMKhPqdLc2JyO6wcQE=<uDCl>3FqYpt1m9=uz=r?z0v(5)
zT&KX8w`TlDJ8n0=d3Xi%_*sURN7kX+y<kxLIRNaojfUDgq0qci6ARY%1cSH(pfxgD
zA{I~MYUh$!#mWNWpPz?glA+i}8FKOZ>3r-Le{`O6f|a*)@vZ*hm^ed#&3cqS8BJct
z&?;{Ck#<7|Ord#g6Vum7<rcT{u%TBXjL|QlE|*|5T}e8<qY0Sl1Q7o)0ki+E0ROR+
zY3@-Z*}E_rYfr?Ye~}EW|9;B+`}GEuwoiQFcG`daM+~=(o4Io+k=c95Fzd!jOuTgl
zjHfO@X}A}c_jlvJd?IkP#U9*7d|11Xvs^aJgX<f_GWV^y&{AUxwq0w9$x)4=!_$a4
zPz0$zh@Z9pB<beWkovVtNIZU$_8Z4|t7R@I3=V?z-5_qyK5{Ro9N5t22cJ?yF<Co_
zE0%xdc0=kJw-||vnn%nzCjy+`ctSyT287<R!LaAX*gmQ+JW(Z&+4Kv<O{Na8;$7G&
z$hotI4!BRIGo?W*vq~Gpz7HFXvm645*ILVNT#Ny+qg>MY={ZYyF&=FPt6});<0wl%
zDoKru;rc!tFeC(|qb%S@->cZ&5Dea5jL@;6kVO>Bv8lft+ZRq?txv7MWbst~zgT;m
z5xWO1m->>Ikn(987lK>)d9F2TCL8M050yLoAk#vQEib3R`qvZDbJYd#Y%quR-$QuU
zR%>{dHxugr7X>P+#G<@w3v0IRM(q%q`!6qr{>M(E>_`R7GefMQoz;-fmvP4<(vFKA
zK#M#|%5-8c_M^Vt-Cp2*rJ0+IyDicEUIDG2IR4rj#yz$r@mmp^&|3MOOI4H1@2`%+
z28}EjG&>#64=~31xtZwvO_QEuzRbce3^%20$9I;rH;kv;V?iWa9Or=5^QNNyDe_&c
zQ-zk^)}Zc{1vyLRu+|yrY|<mz6@V`9_~#<^;@lDJj~nrNdQMx3mh!S+ThQd(MP9!5
z92aRlGPCy@4b5&gZ1V9`w4O)19W4b!5obz2v!3g`Tmb%mpT&7!%+ctf1T^kWL#g^o
zQ1K&eKCl|j>5*r(YyuQ5or?Y$9<(ns0(nhowIX#L)2It!8{&53XGJc!JXgVX6K(Ff
znV9zvlDPcCqH6i}fs&^xi_u`Gg6l0Agu@>=p`zbeHpeCg3rdT?!Gji%uc&8$PlwGI
zgVL;1SmK$<-cJ}v9?4iNuUwAvW6!|Ic`0D~^ncuRIQ2(H%w<nJufiZZ9~g6q{49S?
zN<?kFnR6*=seC&mKRN}X!LC9<J$ZDzY{4%w9HlRd8Pw)rjI#o}S7~5<M+KysJ`%!$
zYI##5aUs{Ifp5tIbS74jz401w*mRNI%2F`3Mm?5Kou&7;L_*YxF+Ax-4T_h9qIcUS
zu$nraJNJ;mimE!)dX~Yg&t|cJ69ZA}+D(=?I|SuT7Z?`TbAxM^@cX?It)IQ%#{*08
z(ZfhQ|1A~|K66Lgb@O=Dd+L8I=?%8eeNmbh2iZ4IVedC5aB$rj%sx>CS%*q7E_*I?
z-_HlbU=JK*-jn?J0EgPBd*SJGp~|&@%U{H^u0cugNSEfr<%a}OYrDCzsyiybxJpbn
zQD(0`7vx$0Nes$zdB?Xq{NfoiR91(uUKXS^jxFG)7DeIeQJSd#b}H=kj)0K^65wtb
z?L^el;FL-M+UEG6srFi^@^)am^-|Hk%nTee<?!{y7Hogz#&k-0f~M~_;%-)e^65hX
zZ~O9g9}kdC-71k^U0i+Z(FxeI#ul||=}Z`}KpiV*xHY_t8Ec*bxnc(Iky3@GlKZ@X
zdq8TEx<ss?4EX1zFq-zR$vT%9ejfqe1NXzYzn5eC$Q4lKXw2@2h%vUZ4~PdQ!L0^$
zXqYGAAIaCEImj36hbJ)Qit(`MXdg6L5CC8Dh%0{b9!Ezh`0hLdws+!$YS$u+E3d$l
zPXjS=ry1Ib+gRH7`S|ouJ<1g-)oZ>R;sV;&H}t*0l$TT`8uKn=cKv1Wc)K4W^#wHR
zTZd*>>Uh`2Hyp1;g8kdG5GURXhIbP94udExdUh4V;+8{KTrrnT&F59k#DSP3=jIy^
zpp*Af?5G>dSM|$*uoZ#qYriPU)w=Mhf#oPtc2+3vS+LXfJBfV~AT%t!$4oq_OL^2@
zLD}albS;~}GEbkNOm^vPosWH?b=xLxHkY0U4ntwr_X((S(GXr;2q5ilk#NW^6FPjH
zK>xuBFe$hov{v+Io?ou9TV1h~l^@HsrUVPG&O|_VWCS$yc){fRKUM2or;KQ#5~WXx
z<Ga3yDW&D`%1nwK{r+K|4~=2l!7J!6G6WPEIgsgB3|&LMay6?hklgbxw#rn53Uf<l
zT}rIeZsIje=)tXh&hoaOS6KOhXx?tS5k}8nj#FBEFva&AIIhVCwbeGnobg4qygock
zk<E6M+hNnLQ1a^*u{KOa<zM6AS3dD556A?q6`|m8KpW;P&O*`VrLe%P0u{#xF|~rP
z+-Y<f7}5^7;KM*LtXa*PmlQE)J5O#|<imYd3K(^2FKbU<31v2yX-}3;^TitI%s2_|
z9#yC%{>4;un|SS|0{Atw9JC9lpP6=^Chj_*;vCE(ju%6dLpi*lF2YqWDxk_q3HJxm
zwfj&_@@>|6w7l3GZQECa(tU;Gv5y66EukGx&oQvXDhK^jDKB<+5(f(j)VZJLo`Y1G
z?uZCXtuhnbz6yfZ!4&Xx7r54TJ!nt-hYcFI4egeAQ_o%%ij!9hij)#uvN{%<S9NfC
z^cJo*Wf`+H3W8acCs4W8l}(Kx-Ms8QD{oSPUnxQ0bcT9&tW(W~y*`52z8kfwcXP9U
zhx3`WOVM1-6Qdox(CW@f*nCQdxZom;8o~KAi_vIe_?#(5Tmb{qLfAWO2!=SAq1$66
z+?qE6lke;X>7yF+Mj1*ppHim%?sInkK@{XpFu|PCd2lYx92dXbkA~36TKp8~=rb2w
zGAILOJPy@@7J>c90B${}2LBqKfYrxlpj|t0LD!rHY0WN4Yo-QEE`Q84u#-DTsRMgP
z9M~%2aG%W{Y%F&KJMHJ(*@sx@S7z|K{~5vy>Vv3!9*X}pjYN675&v~^8)Sal3E65P
z==R4KMf)!EJ`2n7^V(?`$;)x|<S;6Xb-?LcVo}XW2HNMgLg7|3JpPLQ9V1O{I8Ki(
zTS;@dS2&6-dP&}SZUURaWO%*N1moVVhq?(RtZM;f{HkWL>`z5_VD35`5n_*-XUQ9Q
zD_Kzddx3ZoG*2lfqwOp`sOoVX&NX?_{ndpHbvpxJlMT?*v<DR#)p6;|w`@VOH}*SB
zoiVG&!SwT$;8GZcT5Siod^&<jpK1Kn(Dmpxq=FCXF#}~|ZwOyT2snPBh%_*z<kxT7
z(Ypr<ZUz!A(;f_qNUMuDOibtwTl8I22Cx4DjPfdh)`y`&cPjZG2GOp>ZvgRP-O$<M
z3y)IUO#Hl5V*gEnhE^LUUZ4O+8zpR5whZrtp2ezB<9I{(K2Y0lP2E0wu<C$<FB+bO
zDJtIhy=yx*rRQSp<Xq}@B)fs748s1pz~gS|gVcAfFm%^Y^v~Z27<>wvI)-CcRUiJL
zM*$d!nwZ9gI_^Bi4WIu^2K~eo$P=PrsERAfevSm&Lnpz7rDJkG;+|&J@CKDZ5OqCI
zVw01A?Y}CyJog*Btf1%2ljBVNCG{7mE@Jxk)7jh!O6)o`oxAKg4{1BgaFfG9>eH~r
z{!GBnKLep`Qz7+(O{-SS?qphrjd{!0EaH$<f>vWZs2mf4a%iGN<hn~@vb$N3N*u}8
zvmZ1c*TL>xq%jO#!*jf*g4L8NHjUuGbq1lVS;Y)hyThR_s*cNpB+S<5x6rudABm`b
zF!dD}2_KJIV$Q|G5bqKUV&gPWET`VR0}_z=6yTzQ0_wVcQ{D18kFQ=#9)#;(c)8DE
zI6j~ZW|6nh$Ug?gjGu)Lnw6l;oDHTo)0nnvF#9nx54<OgN9(M5X7TSOTyO3~oFFZj
zu|>dC&=K69oT6URn{3kCKIrUwoy)QgNo@CK2qm4jn4EHrUw&m{-p^Qw`qL$x9<U3Y
z_eX-$`%=*OCI;6P3&3E)HEyMI6+Bk;$L`e}+WUEfID!0R&*w82>k1UDa0S<*Jjk3>
z&69)Z%z8{Ji7N<W=9&d)Ni&kkWeu)Jdu(xiPw2cwXU|dD+`BRas^<BCsQ4-uyIZ3?
zU_4jJJ%pRBazLxLNm8;PS;#2J#;AOE*fUXyf^Qs}9?fTa|M+2cZ(oSZaDmnr<U>tP
z#`c|E%y#`-roOTQX5|*6XmF{yN#j;ne4RAmHL4KPD*;QE?Sd}LPdv(4$t~^5SmJa!
zW!VnnQ)9~Aw-77f`56@5>dlg#I%4OtQuLhuo#kcC!p7%M*wI;ouygz@@V?;)^JV#z
zHK{^>*#S`VKa2+YlflB1VJC4KJM=2K@}n5U3xmKYQ^{`6La?7`&5jQYhpg?|*le|k
zRb8g*a`GJLIB-GoA%8RS@K|hJyq<^UMS?8jHMhTbo>?ho@*B;ml;;n`muGh3jjk-{
zzIqA5w6C-NMtgA62nAlfv<8jl)uS}xv1H!nL8#Cf0|nPr(AiHHLe2%lEF&*Gm>rDP
zCzrsxTSH*dgM4T<{lsK$o|4-OvWUY$*HsnmZWI};>|_v{9-2@4krY9;>J2OT8Gzqb
zyQ1x8KNb|Q0K7XAc&Z-?QE~&w`PRam_irL!-ZGw3bQ%|FM}qRpH)eK{u20DoCeQs^
zeR0x8>S)*quH)!=AEya!GuMKSj|7^PMcgxXDepLQU8w6xI?mv$xXRiCn_ai@Ii-h*
zIe!^Ub5HSxOLO7N-L<&xaTI!5{b8|mF3)k%;En&Blavi9N8{S7P(Q8?ZV@BOxg?62
z=7qz8&D6u|HJ&@(zQ*hV7sC9R12Fk(85=yj8XcSeVcx3#C~KqtFO)$Lap%=8Rzu;w
zKrFd;oBep?4};A5VV#8r>Yf%bFDewGI=p!vb>MF9>xv?qa*5LGzA$EFD5x9cK*MVn
zESWPAG!E49sJ<1fv8M(sFA-zUSSe~19f4N06Oz`Q`a=K68dUDeXC=%2aFtmjLEJMM
zrEyaw9dDO#joh=~yoUUMrxh^ka1!l*d!YQauDNtsAaj&#!JuAuS^ZE2j6Yrpa_w*C
z+1^({o;=K4r`m;gHEKg^)+C9%>#})jpFYHSeb3}|p@QkS!*tg42QA&n;D0lWbv0~-
z8v7#XdbN|oxq1+btayp>aHhIxILeNi!We@LXihKVPk-1@7IYC<d=CZDl-^)^mzZmb
zG-D25#oZox!-%(asNZCc{m+j?@yyYb(Y}o19G0P3mo{v3h{5GWu~-uj2$sSru90O+
zJIv+P4cmUO++>O@=ZylVYxVH-Q5kwWsKcT=AuvdDHFaqH%S0K^1rT<E+X!2@b!aE`
zCl3IR)KN6&421d0tKhj7pyp>dczl$Bw5Z#>J-P>VGd8i@L%!IuNd&cfMJO#O1Gh7|
zlFn}>80FU}tk~j+Ci4&Cq9<wiF8(36k<KSSd@Z+>g)+5_RPZ#>f?rDl@Iy`>bQjfw
zZSqoZGmI13<EZlVV*#kej^|yy{^3epJC-gf$J@*OaDLCzpi{RUTwTXw1I;eYRWrFp
z3u*Nmr-4pF42GpegU$c0qW%LpKDnQa6EzsN?>deCnpxBrcm(Q>Tfo|&JUIMmJvRR@
z7;7!dsgpKUi1(C(1I<<T;f~B^cYkbuPHf70j!zDUp!m)vUa<2hh^MOv=QZN+$ebCd
zpR*Z~>y1I0(^0+K>k7U86E2s%U?m%YH!ikjX0ao=tIA<$BR`7PIX}tMq7wAXG=lb<
zbNR-g2vp8q#S~tta7bPWb#Ym!<uMtY+yl^d=5%oQX+-^VKGd%*Vp`MoLea7m7@X{Z
z+p^}MzxN3+3~OW#TM?`bYoV*-B+uv^ipf*<K<4_F+_pAekmnCIH@fkQ+b@$tp=KEV
z{UQ&)c3eR3uc5r_#}Jl!=#=0#@iS{%?Z<t0lJ9)_b!IZf6*^E8Y6pgb@0n___6f)4
z<pa=gu>qIv=>^=|8M`VrvBt~Vuz5=`Y||!o<)A>QHIAZl0CffUFChQ+85pvBFUmE0
zRI3=N!u3MhkKEV<{;p}<=z|g5DW8F_Xn&lV?82{1h(~1%$GYX?DYteBe$&iaHC+li
zHWrxKk;{Iqp9jjG_s!+un}o-cW3Vk(3zX|lNRHB3*0z@}xBjvX{g?d14lZUiL&h?%
z-FvWL%0Ltqax5>n#M?Utfm_F0HnNWsN~4olbNy`IYP1Wa$&r$Zh5-22D+km*9p*Vr
zjM#}wd1~GT*06dde=M)S9%rs#PKpDPg&mw5yjl7(UDSF#5N_G3gS}R7R^_#bt=c^s
zt^Oo4<&9}z`sXTGmG;8-9|F*0l`rocMEgcjpGs}FL4cHz^@u7%gR5yElD1deI9w0%
zi!&rGejm7abtL~VGY1V~A24}Gym@BAFzS~$z+KC3aZ#U%)!QeSVU^qy+MZoxHw*Mo
z`_3U~bD9Si_xa$N#WPSVSd}I3AWxvM72vN~Fj=n_?|+VGR`w-$_SG4@_3j<l>IxUy
zRO*@O>Hd%q>5LXBNoe=aOi=oMml%3)r0;B!WW|93>@>T8Hc1&!a_=qMFed|^G&`eJ
z<o{TQ-f$i>lfL(s$xPwo03C07^MjiX<IUSjl!tV(-_ugi%5e(>-!njI%vgzQLKp;P
z%;ZyFuEck@wLubC4Eo0_xxHFG=-jNK_iiQgm><Tp();7T{jWeY&ADG+7QwHcV`0mY
z>6m=*HiL-40F$m_ouUk+YBzaQSQDE$@et}iEWmQO!?t-Bp!CFJUb0dyaSKp|-2Z6b
z^Fj{~xRl_J!$DY|6pIGBV?k6j+|0u{0ctixQx|y*@%g4O>rJ5`%@4y5dqjAIxY@r0
z&9U5%gGQYpST7F&(U?AFL5sg~qs6|Uzho!;Z`XNLJJ63g?)Hax{}c?$+Xa2y2BVfi
zDTw2@NHm)AsGH4##eEJ0zX6DwFM87ZaT(gi^@NJbcu1+Y#fC={q4DJhL8LR=+}gi@
z%NqSK`C=rhU9V(q@jjsQ`YQNSUM6U)2^RdxhK?Q%Fmi7SXzwbAhK~7Qw!0ku&A$k(
zmM<i2?=zTN(*J}$4|VZlkC~`7G=SY&oXW%rGl(Ppi3?`s5Ee|GqoE$~Yh*MJ+9_t^
zt1?k!w5dvG@GQ^@dntGniNO8SNDNt*08^?4qmD}?{BbVA`@5VV$88TN)(q!%9d#%#
zG?A?R%NMN2QP!&e0%-q!h>2IG<Lo<|aF9zZ=-fTTeItnNo41-B>VFj)k1u0QtLvbu
zEDyWtU7_G*HgAk>6~11S;L>k-IN;-Y6lsc~=8GC??JvSB^^_$E3qlj)QnvHsP}IGd
z3#%(9QSM_RD#wKguN{UFZ;3i<Yz`3H)fe8Gaj^GNhn|@N*q_}Djq1)!Hd>2*WM?K4
zg;hDGkylDPirXg_gW<$%v@?FppE@%fDL#N2p2;ler9SignvN;kIlPL9g#mvw(F$g8
zx%@qAT_s?Iz6a_IbOz)7H6Y)u3pu9)_^-*7e;Hej_LnrMdww%9+!Sc&bC&HLlL1xY
zpRDvr4YvMkE-5MVXPJpjY-PA5np~<Bw5I#AmF2`Ow)Y0B+4>y!jex1|1x!rzgWZe5
zQC?ueq<7x&uJE_a-gymsy<ZnQ|JNV1-$rAq{(Iq#j5>hYcA@vXe#9+U04D!SgI}&Q
zpmW|O-0|K9t@l*3PuqggV|*>lx|0Idicz4Gr3qG!$9VS;B~<<Mo~w6LR^(AE%M^yg
z@gI4pc7GCV^AClRqI(iyT^8mTyK!+ss32;4SXFQ-0lRH0z_`W+Ew+wA?eRq*e>u*)
z>!S^<DWo&3*DJ}$vIXdI(HqkLIfI$$h2TGzd?1%<!C~=DmalgKI|qN`LpPG=C2A>a
zAE6FfHUdmakHJRy3P=~k=tbP9uj{hFAUBVx-m^xX8x?HTxmXO_;Q`81KID@a2lZhJ
zFgcme-qe@EjU_2q$cN!`^<>&RlnTv}&zSLnllXGtY*c(5$_E`Ke`|0t3hp`>I>G{j
zOh@wPcYM)xfeSnzNgkj6rfhmob!<!_&PgHlLDt()K`jHbqM3YKR3Uz_^u-Q|Ccybp
zmVCz@humF*EjMp6N8gXc<a082ddVRzVJvFs6=9C+RPfCD!xSa^nc^Jfc$aQ4&yif9
z&Ik{5tBHXDQeuSKJ!EQoO1RE1KPYlOiXF#q3NEt)C||jqa%ooFd6km%m2Q5Z^#U=n
zjH^>KO4y_~S=8-$7E9LzVnk9Mh}K$32JP%mylHoHovTk+7iE+*PV6S1pbHx&R-jmP
z1|oW1f#y&1nRVE|ta-eQb%a!~OM28HXO<6EGk>zvqbB0OWpeywyc9$G&BZV}7x~G<
zu}!5Ex}7TlC)e`Kwe*^or_eq&lo(?EnA6%9+@I~jrdM<iGz+U5G&vVVidEp+J{OLg
zSAt#3M9`Wzkx812Flc`eii7q+&~smaqZwdYRZF=zZSJc5jXVFH2_vSAL+7-)T=k6|
zwm)lUMQ7vD&XsiZ&p;W-yDaR&N@#3a174%|V^i}cOg6a2^|z+MtG!B4Z>@*8_?{s4
z{K-NRsq5{tg~YbQ7r5dI%FK&#^zU@s+angUBPQW;uZ{TkZaZ`{C0_oj6b$*KhRuaT
zDX*7mZZ=;5(-)NB45wo36?z139*IQD@Qn~QNF6;^R)b;LHgGu>4%M9jIKFTI%8zU^
zA2{L|hV&nfMwa(UPacI@JdU(s1%EVrE#3<9<?6qw>!Ilg*C0M*=l9d>?d}2SSGNc?
zJ1H9|slj)uq14&qjj0ogA?oXHZcm*)O4DVM%R!X!jEREfPKVGo_aHNPR1EEXI(Xf*
z^-#J-gg-TsP`T`^WNp7FxJ#ania|wS{38@Hy+YA`_!`PIJ!j58yx6Ah*_17`1jDJo
z-igM7XXak0F-(BxhvUHRVl@}da}w-R48df|S!US07|h0O0JGvVOlKE)Tue7`#ZWce
zqjW@>mnpQbKg!jf`9jy&fB5~>67;qx1<%*iZ#Zrb^&2(ulKIo&_O?<8dg99dOq+=9
zQ)mxg$05_W2%8@_bDd%KpuV#JwO<l9&gzV$Vb(#eHF6y@8|A~=pM`_gw-5ZeMI`mx
z)}s9Dx@zzI6z&`yiz#1d{#&#`a2_>>6?m?M_LGV1!tZ2!Gj=zMcX$ac+IrYAp-52l
zbYwNDmuRNgF3j&BukU~m)(khfski_pYggmo<;D0k*qrnL;>BmrgXZ-%tW#0S(<fg=
zd4-W+zpR0YZ=B-W$IZa`6C*$|k2EvIImmot1tM21>K8pHlmrpGxFr+J{MCtBqs=#!
z&qn9|m*9o#Bpm&ic91V}z|wXA$ezXs?aQ;lAu<JYX4k?hClhpkwgOc?hVsl)hd~yu
z%5#QYV$pp@P`^+zu|7JuA$1L_U0TZJ-9u@wJ{>Gq7ci484J_$YHpbP(L$gK^82;Ab
zM!rels+)xJjQ6bhS|rz8lZ1^TPv*5K4_bb>K*d>N2pB~3yaCg2ZO$&V7)SmK(>vU=
z(~oyf8U@|I&VaLg0TWB3sZiztuN}1mJtzFl|0WK$>5w@fUgRTbjo-@M{D`yYAcI9n
zpGVGD^HyVb$)!JuD1MtSZ0;RNUE|aF(C|>uS=NWw6`f%LiCeJwlah79K!BDVAll?4
zn7*Aze$(MNtKApe#uVU&o(edzPl?)^+u)LPGrCt4fwSu;;<OS6tKSbcDs?ew+F8)1
z*~kk#kATz03sBP27c|^sLBshui<=n;_3oqahhaF_Vhltbsg$^Ajz=eH5d@r0Mcr{p
z#06O<wA(2$`R@%-{&6|jdUi1nS2@knI@L?FXg}O=7{`7u#<LrW(D>3hc)#;3n)To;
z`FjQ!Z>KZ8K@Jy<re5l#3(ze1Fz>e&U`y9R=7E9Gni(t<R2o1{RSuNTUjS`g0RW)~
zAcnju_%a5zY~6{?>9={zYr0>Xv@+|gV)U*F!}7_g;QCJzmK{z&&lwZo-Ga?v=rs}?
zA3tVNCxy`K9|>CQJB}s+Vc=^N2z8w<U{bLUyh%SUtuMw8Lq_4R>G>=)nS67x+K@Rw
z1(o}If_?B1Ze-F6i<)E6-BpguyvL$C`Al24=}SDX*+IqKTu|R`iyHpKqob}Q_PiR)
zBdVd||IcDE=b`;8We-PE|F2y#>S@lxuy=-F7Gy&maKIwZq@i;}5?4tIX3e`^a+!j5
zta_WEEtazf(VNj=XbdDy_s6;o*LhKmA!@j^@+Vu<u-rcw8YQ&bnWD>vjwKB^F@UM|
zEkm_gvCug4xnQM7og+KK!7?-uEgwz<GcP@^Q4oXT!v4^?X8{Bs6wv$Vc33}c3-K*3
zKp0emM?fBSw+=wrV^`iV(wkXX`+>{V9Ps>)Jc}z9N@{e@;aBx)*gJnKn%P}tqEkc6
z4Ndm5=5xbg>5q62|8c_RgV&j1=Rx>I9D6rUGp=TK3hjo3gIKx(vt^W(RL^GKO{w6P
zTM4#d-e61}h6kxn_};s#_`J;*J=cn0!SX6J8@+>TbW~!T9dWY${>)2se@c>7buj8{
z7x#`6xJu@HNS#b<u=9b?>O4SVTU3g(z9vCc%zYN~JPmC$>`7~(&MCWj*s5s=6O!|>
z;lL(lJjD;Sbni1AjWN*Tw-@u=y(w3z4;AZ9qw-imwWT7Rr#_h~$?ST;*Z!ug*pzhg
z7HEMe-h-D%rm;ic^HG0H1FP#1%rt}|h#K;NnYLR2?w<&owx!@&-(2{(F#{i^mSNZ8
zBBnd|0_Z!dqxtw`Tsfl{#qOpcYDO+QkuKCVR5Q_xS7t3PrMzTQJ@1b8L9ukGaJ%p-
zq)PXq^YaVb`amL@Xii|+Z_a~nKo;7~56AjtCD?DcB^J@_o%#D;{!V=vab2V!KYUFH
zuvKCzJtylt7C@_Pqfju0QT8DT6Nv*j=u!mrD?i|iw*{e^w;oUKM_tE%y_VRmHKe`9
zNqAS@2VN2DWb2hl=#Uf2dXABxr{op$HyTX8?+<;n7T`da9F&Bf!POt^@u~j>G<@L?
zs!ww;B{LbT!VeKI{c&|r>ICNYoY?kr;&E6`G``rvP|p2?oH_uvBep#56?x|J7n9dj
zi`%Z<4ofO$V)@k5JdS*oG8!A3EuM12JFe{MB_ouUyGjg=qq*0gP?S|&krb%Mqu;0t
zC~hr;Owv{qk0Jop1u*+>F8oDS3OYyYf^+?Fu)SQs3RZUWUfGFgI8BxRwdXMYcys~G
z#;0+)XodN<IoZ%MKnaPY!JW&CL7njJ&^&V}n5qP@oTtlpz#qy1+7$>PlwImf+6-;P
zXH@Cl0*+g|c(N7!S=mnBH*z>SPN>9p!<)Fl%4F8~?23@No~}o`b-)r!i9c6^-UIJ*
z{W+;Tp7dEo$Rt#Lst|IH?BTLKrZD&w&Fvn~xH4nDMDc*~4XPDf^eu+6?K!;l)_P&^
z_Qfc&BHz2;P+o910hjNi=dRgR^O8@uc*Nm6`1&OSRFd;Sddat1{3sea|F(zI`<J5C
zmY&SPWFg2U0&iYEk9!~e!2kD~xNPIBiG#R_{aaT6Zc=ORKlm$8{rZ9#96ib9=JR0H
z`6z5$_K<r|Fb8Ab^DzG;Wq-{2v&Ol<nO)L<e3G~qKJ4v*amBu1{O1BHRhP1I*DdI|
zQ4{3VfpGKl0CYM`Om!zysO}emmZA`5Iy?`ZCnvL&QXL#yorhcMvQe&cj-@n|LVzNX
zviHs0t+EfQkM~8hYG69+elw*_AuKwd52JTE(5`C@*sq%hIi2QopIHO`Yu!-1s25ZP
z?c-JRR<O?peDJ`GXcTYu1A`sQ;NLs}9K@Pnkk`dGmn=c%<AKtgnOrg70uH<xitF;$
zqqflzP<0~?$Bq!LP4_+T>N)VFtOs_7CV+fiB?N`60N;m&pp`ZmTpNzTR%<Iv?fA(o
zv$w;$Yt-eSAyh{Vy8usbx?{6?U%sMk5&keZ1KR(~MCqvQLc8%2);4D}{80}=w<WsF
z_TzQI@cSL^7BLHP?H1yGxw9!&fw&@Z9Gc(aI9hu(N@-Sa&RY&~dhwvu{FYgMUksJW
za@3Bg;Nq7V(3W0-(@zkS(T4K=UaKJInGM_gL<JM-+%ULf9E$9Jvf}1!^uL^l9z-n3
z9PZC`aza3s(=0JlY-EkU?ZD|%B(z!$mjpez!raCNN}&H%Q06DIJ7uTPqb`zlJh>yu
z=|^m4*PiUdjvVN;_|6agB0nwVK(`D%NNn7LLZf;#mP7|gqQzw>SFWyJaz7u-r>sKn
zq+pO&27&Fa@$6U`@!ET<V5;;ApF4dV>P)6y&ni98>2m=bz63+f#z-)CH^cc>zL=Il
z*{$|gNlVxTC|7ZW;;h5?^2cgyo>T&6z7D+A`ZcrtE`b7H>g71jx#y=7u;CVmupu*G
zxpOjh_3dUbD2?fEPk|uwf0+6hHGJMX2Mq3|!HpTSF?mlh+V=pER&J8?7$D%%C-u<#
zzk@iq?<8C*xe9H0Cm_bb5=Gx9NZi(1aPMsc&}wirmks!fi&qZAaaBqbD`n7B5QdU3
z#H&}U6nbupr|({GbbJ4jo!Ux1F$wVm_FiQ>j}63<Y;)os?<Ma}5&nBwffYN#(Lj4X
z_&>>k-QUlnv&$U@htC7-z6g*=-sHhQ_|SDlAX@m=y!PyA@)Uk#8j+Jgi<oEji{CI=
zt`r*V3en0<#y(~0VD{A0n59O!^Y<AN6Y|+B$53bG(8<+7i#T+eABF6`q?L_0Dxr3G
z{3=^avrZ0|xsn${?<i;~Uov_4TS5PJDYm#Zv3{-z*jf_F3w|wv^0flC&iN#<TNlXH
zM%6-DZ<_xc=p6oY3uN9~59Nz8ndram=Gv1dLCNXWux#CGELrqPBKiVMp-f^O-+Tqx
zo+P0h7Qwq$8t6gH2Q{N?UX-naZe8Kf5@QWZT2nEFzBf)C=V8)@e8hzUlm2s82<#V1
z{U9fVrs8xcoJd)?b?bQe@#FYVZ4CBLh{VyobMf~*XMAVa%EYq^1pki?n8JDz^UlKD
z>Q5?r{_`~Lw#P$B?nP7^I|@cUO2-|iPGIV+k&>321a6R}fy<T#W1CkoR~{WeeSmpb
zzA2s+mY&Dp|Ak}6iekV|Ht_8p<-&d^gS>qh>^)1~Ez5Hd`u7%WT9pmj1K)GMLD48`
zYp#sq?^wHO5nrD>6SbSE?}YX_+uApxNtOc4wWeX?^fJkTr^`^|DtU+Ai2=q(@lJUV
zKNVw(eRrng{rY&;uzNdmnj^zsMyF75y$>|`#DMhr3L*Iu-FF`p!PmST>~Uuz8q9BE
zQs-6{_n;p{_Zv&!^N&?BgM*T#7S#3Oq{D{|&Bv8@<FUcB59mFmd;g*#{K#$ci>!8l
zj6avrp_6{q-bopB5EiBlM^PlP!)oHpoqY!}YrS<~clsi1UXlbJeJF2qc^}uZ60t6C
z>MrOIAYlqI0~MS2tdDVE82L9J*}o4eUY*9a;=Le!mRX(QRfKL=_pz9;NOZNC3e|PB
z*w$K%$A?ij!0z8{15ZS)d)rvH&RKj{9{^^f-Jzo}k-tlHgYrLC*s;@)xecsg0|T7!
zRs{8yJ*~jKJDkvUWfk$>gSjMj6lsW4ASJvA4ww$ZAFd3mdIv&DkR`E(kMW#!{;;Sb
z2;SX3OFBS*xNl?vjpN=6R&Vw(scnbQc0-09Yje>4=mjPc=grRC<3r3VJ9gtiKlF%o
zf>yt++-2ESc>QWUnr!>Ta{fC6Rz=<HQsGr}vwy~$+Z%Y5I0%DC6IH3$O81Dzl8)9W
z*3;kul#WRS`M>$XXT8&q(S!IC%F)nTJQc2NAkD~n7*>tZho~JJd5|+{*5nP;?<)rR
z*iOMPE)kL!)If~l6lKt)tXG;9>iEid<}1?MRq{}o`qTW(OGK0u;jykjR6kk{`jnw>
zE*`|4M)^XHEBSIt&ERo_1A4j~0o&D)LhY-u7`A5@h{t6Jt0t2V>7)^K#hwR~J;NYw
zp$#;2*|J7|R}g(nuWI%sR+7~qHsrQDX4i_a<Lhn7_mE07{kjiIzn;P46G<-|8NwR3
z{*U~u#qiFJI=M$hNZbxxlZbkrgoYa>{CjaYb(9>ZpUaD>dS{^i$113|tOV&YD|TyT
z1_bqd!o+cD5~BynDEk!+)f>EU)d4@igZ)rG@qxLzJ^35@X0g=sw^(&i9a=OLfGvsV
zziwGU(4^n&?$c~sBim2==SsnK=s=MD?aV9<=sa`p1USCo)O}paJ)O?u-IhzJVltRK
zxPdrTEeUgemGBPhli2V;3(R!mQLZ~p*qV}uZ^_eQ^`R8X9WRhq<+Nmd(<ZcfvKfB$
zeaSOfFoXqEL4&3vhyNIe5BFx->PqTU{vUTcp9w){KC^e9859gV3C&aX@^=Fh@i=Kz
z^WA7Z%sS5F#>K*kNgHu|S1jJR>W;${<VR#N%;TFG*v&hG8V%Vzb@2;{N<SNj*)BuZ
z!{l)Z8xE-twhAH-4;DXp7_#@9@SnOo9N8lr{EcFmn$aXS|Dg%GX;X$skMXVzgP7s(
zBV4&&%2M50n04HFFf#wbHExUtbe)Uxy3y6;R~6X)=rXjJ1>>%F9(cP)J($gG<9`_r
zp!YR{yACF{gPT8d_E=0kN*3JyAMzu0#Da2EneggjEOb=NM*mnPc--m<viUnDqjuzD
z_#b2RC^`xLQwq3N|CJCwiTo=I+@P0f1%9(E!CBu*pkS9S`u9D@6umw$A*3EMKX!A=
z{9T}JG65FEAWDzl<+a1(V3uhl{OG8HZ0ji~3*7-xVSAWz<wJ7~-6OCn@-TID^)Q=7
z3?b3o@z`OvlKQDHLdjFcTh1qgIPATkYZ!*JTw>7d`XJtQp8UeeG0?eI3SDOwa)b12
zZ0dXY|2R6=fEc&8jZX<7BuSDCLJ~%jde)63BuPk$FbE+@k|c~yD#>(I=`bB=be7cg
zth*X1Vkfafo7ina>=0sy@UH*+l~2vg^Q?7W_jUa)+^OS>Vq5mvG&7%m|AmYVlT19s
z1++J%7FN1UV>;Gp*5P9YMs*>?U3U@kZgbFIv6_9ZHE3km4{G~Lpsu0}0-};pd8C7M
zzn1aJwPB>TokuHgBgTC>1OnAxmc0l1Q}ORiP%qBGl2=D*-?10;*fAf}v#)~Y^L}t#
z6alFNQ^>f<i?A%g6kMi{Me%x9c*uH!opxA2$3}PPyn|qtF%mi>V`xwBAl9cT<z-E7
zXpv<F-0~!RJ6Q#7Gy2mL?^uoc2W?m(nT}VptWa%1J+b&{Ow{~qprCXHxbF;vA#7*$
zY0*jy58FcCFJk#uy=>+mF2o#fe`pI2fgP7-;jdSDATWPtsc@N1H+(pOPyXA1%AvKi
zd#aQdZdRf9;_Pr|E7NZ_J+AaO91MnKi(%J>LhSP#!+cEnyz90F_-oNA=#AaVzQZfb
z10Rpgp_hr*^Lp@Zl<+c}-MqY?l>YBsEqJ>~(Ky`=0%~2cneiO@He7=}e*uc$4`B=p
z4f=k+E5>h41FK2jD*Z+c!0W5@@Gj#HO8@XB|ND)!B~?(7d=hDyQvqe2d7zx@4lZ5C
zNzg|h6nFV>?ndfp)JIttcYvj5Vgqq5FNF6jx2=7)nQ9h%=lmYb$FlTD9KSFVWfLB9
z!9Vw4-lS~&^zj1RZYhH1D>^7Eodv9)5QhA<6y5unRuh|v%H}-FvcJYbQgH!vpW6lv
zN4^nFl~Www=Z$KegD~)|BPfrp;3Ue=)Tmz$S*#((_MTyMn06?7zTO6F3}v9muqU$c
zN~-1-iv9;55ewseL=llk`W~dS-m>Aac8nX!yi_sY#vBzJ2Z6jU7EZMq;rEqSz`<@1
zKJ4CvNu$~4lVA>xCz83PG!(zuEJnfcExfe<U)*!cndq-O0!oW=pgCz0mK9D0H!~3=
z4Gp6$;}4<ckR?>(V>6Z5TJwg5dSJ@@8sXE<(`K_|T=V5Tuul<N=FR}6`wmW^9SM8V
zL(p^0ZK^v@oyfXXp}$uizT0AsAJXbjU26;ujhu&xAA)gK%zE^Ebp)HHGOupIL^S+k
zFYB~o?`*#~V!l8D8jwT0dnPh2wj1xPp9i|%4pYzeCv-ub8VbG)sWfHz5j{@@Xf73y
zjwODuacLy_`Z3Ma^<Ua`Y${$Im57<Uj$`bm8%!_lk8OJvKw$kl8rc5|sJQ0ixx5{y
znS7gQ+YJXB!!xA$%mUcBrk-WGN?DiL8Jx)OLvBCrlKpOsv$!LVHolvJcAqYS?y6`S
z>*Iv(!<&eFhaGsXz5sKNp2lIGiLl*$If_De6G`qq+RD0B>^qW(-cvtRj3VGNIE3Bf
zt0C1bn>N(11!t{r)Ce>K>7;ma`SxVgkF^G?oe`E-7o0;8;i!xEdJ<F=1}0ytX!n)F
z;2fF*vd6AiJ+T-*Odp9>w<#Fzk0DKiQo!L}A}H^glg{pZn)IE`;`=1n8>~icPv~Ov
z&7H*d5X&=OxB#o!Gm~<X<v8YO5&7ovB>Z(Uee8SzhP}*yxM!@hbPUTe7~dgVnZ(?7
zBZ?T>vHXP51+;v9ig}P_G~{Chgv3}wvG@w=F8D|~6C%KQ=XF?Da|~yk=1_m-1q5X^
zn$-r=XU}(`_h3g*oHYdPvGr&l-$r^m&O-d~4*LGhaFp830e>ayr0{r53)=U<7%In-
zWA&V4wGH)QUOCsrb|{=_4JC=&X#8zmY}|E@?d3kvI<4<?J7aSn-o*5<HP1<27|Ssa
z)}sYq+NkD7OR#8)Vtey=&cAFqXz2%|Wc~=tx~?3U{iqCk7>iuF^LJ$qTmXl|^*F3^
zEZQ>vt7PU_;(hpEuIE=dNY;nb)+g*f`m~8Rcv1pv5e+ws*Wsrz5}3K{EG+B{#`mk0
z5G<?2+x~elKr4Znn4Z({^b^qTJQl2iU+^Ag*&tP3z}c2A2y6O5!)`XKRAyuM{w;J?
z#%z?SlwzfOG+Oyxqq2@ueD5;GoSMzUBTGLNZLQ~5?FvQlraInYwkc6Qsiq$5&BzY3
zO{h?*V;R3L(si8iO8;x%+B+jS8_`w}Y}m``8sDVwqmcIHv8=XLJ_#2GfPpT%-wmpy
zTK`@JkD_XN<2jE%C4fqsOXQr+SqvC;9S&wiph9*O{NMkiC3m0D#bJTy<o7qVk1a)|
zP?gKM!+LQ01%T+!0`z~E3;paI*xwY!_5)8WAHI!%w#OORJ@7FZ+OrA!YVMIs&5Wfn
z_6>CcDRCDj<6jo>ShP0_y}dd~Z+-z8{7)##zn=ur{xrr_m<|?}OUcjC5$ICEF^1X#
z=pVih4MY{-dU^{MvH7bxrjD_Esu>sOCQ;|qNYq_>m>-*sq9zeW5g}B3kHw(gOIV$L
z3}xR|G2Ru!+pDjIDEB~6&m2r!^Ye+(@&(jwUVp~+O2HPv-$c4pl{A^EqB6I$vL}$S
z!jh(e%eHgS{U`F0X=^GqN4Qb9QH7A{Q;5AUmqTiW1OxI|CsT1LoIe$S)wj<=YLyrk
zF-EZB{CME^&PVkvkIB<4O*DJ38_G_V63<9ua9<TcI@ZsH@ZnlCwIv=T#|K$@N2}2n
zA6d8c%22d79|>ZEOu92M5#|gF#+2a+@ZYK`c0dn?Ui1XT;@8Bax<AX82(j&&2FUx*
z0`(zBNr$9?nlz|_k)bshJywT$6*dnANWec}3ULM-gs491{An9@ZH*_gxjS&<&?MZT
zZG<=GiqSu-iazQbhK`DVc+G4BvQrt4U1}@AV{s7r$=N<rQp7meJ3)6L)21f1P}wRW
zv?&gQ<d6gJZGHj5*M-to*7;Bw&3akq`>}5MeqhqR7G885!1mZSPWZ+j+ZPQdGxsOM
z=xwv`MJD6zUQYx1jBz@2e_4vo9D=5dP}pxj54&II5%(O%ZMay#+438yG;Ig8=^BFL
zxSxFct5&+w=@53TOveyCb-c#(zB`eL=ut2SoE9e0*7Lupf1W-y)C+)iZ7bL`cPut|
zt^<ADQ&5^*ic_vQVP#$l^S6qqX3%)j<g<?L5)$yMS1c+`dr15G>6~WtGtRB?Ptv*G
zn+~0+MDN1kwB!lXG^d1u@^oHh^b}qEUUm|FSD1kKyeik4y%xTjxMD}65fm3wEHMow
z4Y~8dbKoypGG`VV`V3-uFbYQca`2tx4I(?{k(|35g65XO#L<_SuWmerw@aYi^%N)B
z_@48Ap8=({>5wE!M|H-Qb^2Qlo=2Hy=GA5xbYeE!Nrhoa>quyjZlR55N{HjT7`~n9
zC7oN2fp`o1{Rhm%7a8^VQLqv<J7akD*>S{Tu?~#+62>?%>tI&IUi4l%4SY*mXgBL%
zGqorI`|Yt*k-#)Z&t;tMOD8IyV@RyVRzgRmF?`_}4?5NgEM^=9+v&H-_v|?E?2=QX
z>#NDR{_O7Ft^)e$`7mT~CGKJK{fn~62-!nffAc7?pDiP{yW}XGB&2U{#$eZ8)~RZ~
zg)}b8MVF61Xm?LIUDue2O((8`ZN*vi-rGbs-i<>W-PvIE{3<MCT)MBLl#I8e0jbvI
zbU``m=<~Y>C8y>=?6x#iyju?w{jT8zEkm?$1dx0>LVXVJWqsGd@MY63+%SOk)ihrR
zi$ELj*K{NDszt==>VCRd8HUGt((v`ld^CKZi??0nkQKnZ>?S?5XGni=QDOJCE)5zu
ztQx!xu0ZO?1hjoH48PhXgTSxTa!$&2v^r?Yt+@@@?&S{c(HUS|Iv681FmAbF0o`^;
z8|5o<$hXU0X!P^~cJ9|99rr?*u3Uz;VM9=RfEY}=5?Q~7g5TN_gYVrq?D7gD_EkF2
zJ%1eS_IOD(U3b6{wsVudVm{{8t3j?|3%Z19NQNh%>GLq`c%4hD@7iE^U@q9-tbrY$
zj-u*6C*k||a#%jl9}|C9L)o)|aFflACW)KCuH_OYmAk>J+vAWI#-gYz5W)_w$0Hj?
zVyJBa<~Xi^<n={p-<boF@rSs?^j&yMIT`1hR-n(2dQ@aBqzk3fu)sEh_A3g(6X|Cd
zZ{!|T%*e%pNhdHcE))dH-IX(&dF;*91mA>g>VJj8-7!~C{CyuMwm;9iz0rqlUXxL}
zcMkck5eW}}rNhotmJRhf%RK+*&@eR(?%a~0Tb?yzz17es2AgnO^<~zV)WPO2O8X=8
zK8-p8@tJH7>(BNQ^E@!Rq!hm=hJo)V8L_CkMC+$!qv2j15GEurUyB_yH7Y^ut-{k6
ztYg7wD6Fsz#=Tn16Ff1SdFD>SqRue9ZxM!q;T0C~hmJw-mSS+qWZfYyLGx}WIHT@G
z9c=%7lFV$3!CQ~D(K$UEw9_V|BI*znEC~j!)JyPg)-qJ6Ql^VuLD@_r{^g=M_<V5&
z?AtvYhi@=Nul0;2S?kJqJhO+Dd1lzn6wI%_dg$!Hqy1=gP&9w19j}5x<9Hq#UO!9C
z!zIkG-d7o7?g9O!xp<x&#11#cZEefqedLV0+j|1_1k11~R|j=J9HS-E4QQ(;>&H3W
zLH>+NK|L47+cn(JIVI=Ol1JxBa2dPTc2;2c%5vKAl`+t!O$T>&25TNn<@K-sN0oAa
zUfs5ha!WGsg4`HIuRd{4*R$+)X)&Zc%mQ1FsZbJ7$NJo#(C%sh>8S{#LQy;kGpRz2
zO-TLwCWH4IG4xLR!g?k~f<SC<u2JX-Z@e5ZqM{mQ@v-3YrV3@vxA;2^W@tNi78tD?
zM(=Yrtc&9v!^_lS+rcLKPCFUp#wL(FHidODe4|F|N1$Yf5u91P79AfgCia>MsD6DI
zI9Ut=C7$MVqqIRWEgiGBA4ZGSS)^n{3g4F{p~6#tk#LUr2USz3!QBFwRlu^DX)&aC
zL_INOcioJQ2{_pE3TU=G=EYl=u<UPDrT&Md&^f7+s$8+gwpgAxHjF~=-4$H*!dVzF
zEe8Eh??msWE+SPv;*=q~IgRNXz^L7p2rZZM(kt<7KXIPrRK?UJR~x<@RAN_@D>fKc
zk)sb!q2b?=*dkd2bR3(@va+aV=rtnvQfnzU`ANi0QqHBuj4GL5fyv0AXFCF||J}^G
zWRl@&XF4kGF@L`N8Ye&Sl*)%Mh2zU3;TXGDS!dKh@7hJQy`qXQX}2NTCCvXdAPnst
zr{cqRnPB+E7;pmfkbwg<ng>zCk9yF(WDyNZ<3aq=m|E?;ind2miR>!tRfra`9d12T
zJvfhs>|M~cw~J0u)G=SoZ@$yPoAfoECN9a0le+984j9XxBmH7%RvJ=YM>hKw&gI^H
zILCMbKWWGBFVrPg48oI{U}bsUa^=0<*k3am8yaWQ@&RVpp|C{XicHX+kwv!WT}4HH
zF?AT5jkddI6UVF7q~zT&6zqt`=0&SX_svOEG~y*`wN*iwe*`kTw&0ES%b@p?y^jrd
zfEr<XsIZ)~(h3H_$s1hfDqms~vJtlHv%9qAOfq=b3Uq6ogw30Up#O-#b2XORSRuvx
zKY~Dd{2tMLWkigwUxleMw!3?=6IzoNW70=0P|VFoSp~D4)iPF1X#jX8hl0&wBRD(W
z9R&xZq;GQ=T-~t*y_5=wToVcf4v}D^nuEf+Dq`h1n}$>_fxu_k)Mnr&*tR(a9DgWr
z?Uxgb4XbV0u8~6aYX+j)#~Li&8o|7IB9LmN;;nBh@%!sCP=>Fk_+vP{El+|@<4fSV
z)CXK%vR!b<8)|m$F>SctO@#IzX|VrpglTIq{G%S7X_Cu0vLi{t!$s&A@QpV6eBiE$
zMliiI6qD?x5vv1x$;?Appm<I}a`6u5GCP)T%<;oX+K14kREOBw{7Jf5=C+47g9g1#
zpm{Zjf_VhBVVU9Zc&4F_wg$f=s@SYq$hS1vf^wV|moqXASXmuZI9Rb>;9$}=)fB8(
zGT+zx0NQYT8R@WKxxc$n_^R?8iX(M7kCCTIj_!3x9q@+kJR6E3cEhp%&#MTh8)($!
zv)J{|5cuz#4Br=Ahwgj#IHS)^WZ1zBER*&JNl60N?aX#!KW(VRo_N~)pcYNSr^2Q5
z94z=Vo%QOif!<F>*zoEgkx$I0wpFRb+ou{jH!Gp7Pz|)TQt>nUj3moqA$;XtFfx5e
zgWlBQ+;)l*pS5I*uo(CL8iq~6P;9eJA*Y|YF(#%3aaa`&Ykr>r{irf9sk8(YH6L_T
z3&Zcb2^#VK(5veUsVmEf)vA@$C8?CnTa2q!siYFC$>7v86C}yEx%O>O_<Qq*<9}0*
zqu}rhOB<$5N4jP}j&3rYZB~a#k7Q7jo{dq<Cc=lbQy8$GN5jj{i0{Rjke@jn{X>?}
zEMR`cDW6G{b2@q35QQa9ah$~D7$>;vMs-JSqPDX_Na~ghd{!sJH)USf`)N6ys2j!3
zzfdk<c^JfOEk{qIbTGRWNey3Rz}7e~EPCdOC&$gk%CXB(9FoW<eHuhHt<TfZ+nCmL
zJcsQ!RgulQ+`}_rpffKI3O87y)`RPeQ&S8%{Wie;&<ZqMLa<K0o-x@J;6HW_3QklI
ztFp!X{^tv@(_uLgMItZQzNS)eg6;NWmy;zM%rWlr5p>KpA_I)s?nTOYS&<d|?TeA%
zUp5$Q#o9!!I*sT)I73BxFG<>j6=>URLSA1yh5;&i=)Yw<(RZXQ=bZ#C*#kk6-%;sN
zxfraH_mjGQo{V>q0@Z&8gRtKaPPVO`69jFcef~G;+e#(W%^Lv=HQ5Ly?xpSbJb5e2
z8qm&;2E+CtC=EPC#nWa&$0C;H_*_Yo=GqaFYzL})ou`@y1F5ik7<L+3)6{#t)ad06
z(r9Fa<_?VE)l+OC{=S*0{#k?`<Cw?tKSNNCsV26+uMtI~I`tkm6P2lzoSi=Nl<LNS
zeo-x2r?4)8z2>O>@HFvA$-p(s&ae&}8F|ya0X?m=A?MN=ba{4;mYk9E{D8}7vi&tZ
zpud9c84J*UNGd426XEgCT8MvB4_^za@qd3?(%3d4ah`8k^L7Ka`YK@Jms-X;Sx#jA
zZt&_wfZY+Bpy|&E=*WHJaApKmQH;QlaU2*Gy0cCNV^F7aiQTSK>@GDM95YYxGE+}p
zJ+z2?J$n+x-A`y;T^y_p+m5w&mAK%K(YS`Pj<M69iRk%P(xsaWZp9iP(Ep(r#g9O5
zYi$yk!=v(70NWEUgXsC8pi!a&J-hp3_|10mGAbQ~F@~feI-HKbG#}U6#h^=X6gn-<
zLaU}|e%JdG_-evp><S+P5>X;Z-|mK&`evAP;sQ!{=n#}ULdeK$niJ@U78lK+zsYL!
zQb@sJTO@cy=i<Lhybynk0z18npiF(j38LaHx)@^$$ZS|JW;Wvh+fwh+PR_4yFgoa0
zqldQ%^=%mif@S&iZFDKfFV6votxDYRE*AWxZ1*FL$KC@2Y2O5Y%rd_YAw%-1tR;x|
zo_h()hw5N!$bP~Xv$@|c7cAzfk-C7<AfJ5_r8g!}k5X4^6?N0H&tyDwe-33_#XouR
zC}XbGe=6e`+@*rAR+d(dNt|%Q2oRi$VY=aBGJ2XW`mBwC8<(6=<8T^$DXz!wZ>|A%
zz5pJU?n2LJtdG)oGt0|f!KcA)s5F|*x;B!qeeik+8GHoZ5(%u?kpn415%n{QXuF3s
zZP3rf&0A+;`1^CvnX?x}!`PYQWKYa*o<pa@*O(sDOXYiWiG1g1@YGMo#@pxEj%_!t
z?yE%`-(X@gE`%yxU1U3tk=!g<Hg4FIfH#KMV!X>v2xpn`b^})|3C#of#e>u+-yPq}
zGU2Sx7M#5+55@iacu^bcwWwk5JjNMoSeit%9$$dQv|4g<hY|Mq>M<s}B_Vp((JBf-
zcy$gh7W_q@8wP>xl{gX@dIQ>9hS5Lr*%_C$j$WEsh04EQ(!Ud((5i4dmF|k<nj7n2
zi*5maZ*@iCvNgPKCSyHzFphEWSK=116jU4bVCp%>-8i|1PtwyRhK*Aobww&Z-x>_Q
ze)FMO762OZ{iykw-6y>kli`vT7+skPvK{8|&~H5|&Ru3bUj`Twxd3ecJwZfYyJ(2*
zRB%~Y2UdrHmVH%+=CA{7pFa_*w^xCqbU)qy!iuqZH-p<k)^WS-8-K-yu@1$D=x~Fz
zD0zN`8g*ELY+fo#p53E6*>i8SMH3=<9oAJ91&{ZJV}qfT^p^Ld4R@zN&&;26L4*qC
z50azllZn`VzLR)mF~-Z`DENLqooScGT;jeZxQ5N&rgf!QvLu%@E@8|()f#H`dKGk=
z0Trw~Oxz~Eq?1^Wtn9fu-#yBmc|q<e1g9!3bq1dU8Sjkuw2t9kYh#ux<w=xFJjQyZ
zf#{$ewYTD-x9J3SZ5RMo0&6h*&vP_y{5g#C+=bRLO1RJRg33!vVB~B&6hu~8m`%xJ
z{{3aNbLwW8xH19*ZNe~c*S}QKy{uBR|7wuLXYrCtk1UfkQ=n>JEFRCeh7JD-An98a
zY7Ekb$7MwzoNhoM=>h4qyGzPys=#Q5ABjJ_2)0c<gD<~bK;MCbAnmdrdi!J(_mP@t
zdw}I46PLlapUx=Q``p55i8lyNbyy13iaE`i0njpgHZ-RuV4u<j3>%Yy@au4`bPa~I
zYQkXEBHY+N9DDDn!B#yzTv;hcL3A9qP>p@(e@4-quH&HiV<@VfP*QvAS-`ciyZM7P
z(3OSYC$9&q1%J_2uN*3D)}&pcAz*cM2>9Qh45hXdQ!}Dau{xG<B!c+TFM8;>hjm6Y
z`-4Gh1XwG|pzIdYh0YHqf=7>e_qJz5H;wu0|8Rikp-kJH%%dRgw!-c7E#iAOgIaCB
zMLt>g#{mPbf@6LPFRGMc|8<PFy0@D8PFIjc-^Z~omtw{H<0Ys&L<kE`R$^7kSQKQx
z;Drwf(Vw>jO^2PtrL(di>q8NU?H*FMz+~8VDFsVqIS_VvDAv17#-3@HN$09bcxJCQ
z>Kl|2k$oXQ+^qsz=DFf(71oFD$aazsWNZ)ANd3=@hoF&c-!jz}JlfRQb6`%AW?7Q>
zZgog<ZKu{t3h@56G;q8%58nMzgAdxgvHRXzPBJ}&nwe#St=4wx8F3sM=U%~wZlUl=
zIS~8ooIz7xK|7b*&?WbU*krW=wf`A`>LJTv!srwX3#!BR8JV1>&1SUP&U%t8;~+_v
zO2b8KX^uU+N9bl#kIp+pl%E5tIu-EAYZmq;dgIHwN-QA-U~%&hM42{`YtJ;X!0{*D
zIAIFQWhp^lv=Uy-I)PSQ9-O$TlvfLi21)s0Y#xz~iwz@i%ly^YK5{<q`68FPPML>-
zGACX<<tjEGI1OgBg*-ek7B$8>f$aBoF8<(e8YUl)+{ueD<9!7F)(Awy3RQH}{Yktt
z#*y;Dmq5KU8Iqi?(oae3jM<O?_EoGi<yI!fzZZk<Mr+o?HWQX!VBYGyjUaju1SZ@5
zVGL*<G#byseQSy#%c21#DR@k!08f=k@$H{^(DG_I^pVTxB0P)oCSxl91JS7TFll?O
z3u5JZ>UK>I@~XMaYsuzB)-fnu&-NT&ng4#i4(&f$h{r2)VL;Pqrt2!e>{>1Mth7Pl
zk7?YG%q^JYD1oMiOU#4$fC}!<#Ij=>A=Ygu>&j{)qT&5Py|IHD_dkx^tkck4RL^|F
zg)GbXlsMk@1J83AaO2SpOpR+Gp9by4?xmX`)cO+Ri2dU2A&xk%45GufGjI3$IdqV6
z7vk)6cr6@@UpGa9)3^WF^P>*6S5mO7yNFn5#=x^S1zcA1KvD91l5R8z-NPgRF<SV{
z=PGm<Pll9GDe9)XlQ(g-XgE8D2#N%j(gIJ?b=MNay_4Yh&;!^P$g@0&3gDixa62Ix
z>dKCy`!^Tp%w(A>8&44PKHQiLDYo0Xk}#IN3SS_{ysiS4FG~b%J7<*7(uX#mHd0mP
zgI2sYud0y?3&w|nf#GHJov;HM-Z5T{Lkahs%*F7`REU`}0sF0Aiq%XPu-9aa_Pnvw
zK6EI&jV^})U+Zw>&}wW^4=36za~T~|4AsRlFnpVXE{2Oqs~7WCr#axXg>sZmSxO3G
zEudSt6gAJ9v&{E<{<}dfh|dn@aCSLLJBHv~dml9YuO5W<!@$F#imKm<C%!wkK+@Au
z&|ct%ci8UU{u0xl@8^SgP&`Dj9qy`M*RfRF1gBmxV=M|=e(-IU?OG|Jf~}U7>c4Ii
z@81ASpCg+3jwl;6hd<)77nQ4paHx;6d3^+VeX9rs*7l0(@N>{!(#9Dj?S_&&Gl+gw
z3Al$e(#|o1L3Y!XHk5y1-Bm3_nAAnv5-N$P{1<&E0Nng%8XOO+0!u?3#(6NUEb$yj
z!Z%NZqK~0~g>mpukIfnPPeYeIPX!uc=)Sp(#54YcINptPEKH!`YuG(&gELWo!p_9o
zpNU+c2VS*IztZc69jBU!=bnq~K6wtp|JVuAiN;`@Qja!n>^}F~j4OK)$huVQD^D?h
zg3`#Co$V^H`a~7JiVH%M$Jx{=)e20$d4f`YyHd<D()`t82z*ltd1`5l@y^&CLTj2c
zbuyI1?*qp-<7tC%3T|UNvx~E=QTsm2Jb9XP=4mOQ8S{o0{cPYr>qOwg^YKu8xD=y;
z-%^Jn#yBPCI5*~3EA-!jDyy{6-CPNa9tGimTshP|&mwu=r&xC7CfEES8NullH9tlf
zRPF$D9URSih1@EOSiYoHd4#d_#&U*_hG7ZIs7~uJLYD<q*d29>hG#Iwu|+a;IcH(_
zjtq{IF2IXF%CLRKTXLl&61xLyLFYQloR#e-{#Q>xV}2w}`l*V7i64mL%yZB&VG@q>
zIfF)>4m4b!ac{1cTe{IQK$R#^j2{S|-M5KE_bBIQSB&a4FGzH52FphI!{tR6n1`$e
z4D;(yQu&GM4t`6FAptfVtU`}|w!~^O^ZVXkMkQN2ERQelk1ZJz=$-3;?S2(Bie<1(
zjJIN#Rx&iF?1q0PE75g0)4q=?z#t_DTvk*PtJSrr>B@4^|K`BE`#W%Z?<9;+IfH^<
zd88$D0*IvE<kiGeaMJGr<KkV1z`eEfo5MwHsq}*zpR+Jx;aF5(agZ21(PONSMZ_cY
z3z3blrY(~{)5;pAffgIU-oO~tH#v*axzR-PM++|q`$dOEIb+nE{m}RM8r8p=P2J6{
z!A8A_+K^@JeAa;w<3{rQ(PA{b%0c|@1lF(dk|ez!3ZkBm#7J!~4f*FZ2sOrVcbZ&L
zGW0S1dCUjftz)Tra1XUVwi&f=+QRN>3FtoJ9x?eXMQ6s%vtKHQl9BN=#FIUTPs*U+
z<SF9k$+GAZWL(MmO0L7eAJp$WAV;R1KuQBpbw(Q4vVNt4@8^j=^9R<!M09?Y4>B6a
zYewsUN1O_H>nwwOrjIzPPi9#kIa*ZIkjtHg*zUHD_dfc7|MX`L=x=uiRrjH&Y`F}*
zR@;e_s0Oi!aYkE)5T&XC^Z)H&9#jD~KWXPvc_HL{TMVuqrkGhY7d`%Vpnhcv46oIM
zJ#%UxpzbgxO*lZBzfYr9l@%y)oLi~2T@ycP&%(1$b8x{7Io2O5M<c5+bQ^OBl2qnF
z_^frHlZ4<XzD%@^sNkX6V0^G95W|1EKzm^b-xZnyy1N(9P>VDS`E-Gn{N2X)E}09S
zk5z$7V1B7zuX#gD4!Q?qkij`}2>B$TAxj>TZF8^TqN8PKdYCaBWz+GkaU%GrL;`p4
zJp6jX?vhW{DxD01NXSFRnY?_JsHX@(<~fk6?R!ssRTvBF^;s&Ow1kIHwp)o*g|H_)
z`m6X7oi*iHqWO+W9gd=O+X!&1)@7d8WIm*Y`R*o9ATDQ^rgqc|H~+!%^82!>p@%6%
z=Z8WV+le}d6k=;#ISN3Zo8rC{RSb7xS3wzS#3oVU*ce`HcLI#3<b%mAeRw<|2l~R#
z5>ZYA&sCqm=K=fBW5Oh2Y?uuDL>Cc)BGGvw%iLXY1i^#VmNh*JH20?XJTeQ6zM9eA
zx%<%MR3?a3%NaA&n)s*wChECqAS|6ut!h8=e*Y#y`I<CnK2T0g50vAY?@BNW3Pv5d
z0&GVDE$~|nIWz-YrU+OkP@ZM;AB=spjdfXc&WD**jEy<yJlgnoQEin15`HQMjRsm{
zm!2lHUUs4(H%%ZmCmswB@uXFK3$^wO#;B!sAR5y}dgvGsyS?DOv(I6;MF@zF%_I${
zhoPG@o5>brW8cnkV4^-4nyq${2D^W0r)C`_xfMa++l%zw(hM9@n~wLB(r{1ZB`AA&
zn}k+Hu;H?ZY7UA6IM41)H}znI&sdDty+m~F|Dy3%`l#30Ja{~?i0LI`QNC+1N=ER|
z@UIlSeU`!d1TnhhHc|PHji9=Xv3ag5A#n2rvRX6~6E1GYlHYfDfnK4d%M3FTJ~@#w
z=r!r8q;lqa9SpaIFT;|)Eg;;UPEGcFp$&0nG-K#)v{>Gc%GP-D5@%1UZs<gNvbw4L
zy~{Wz$ps%KTtRQsC-gIa4TZbJ+*wNrO8kSdYhn;uIUJ)VF3el9<S*KAZv#ymxfOjI
zoS|T5F}r7s1%KT+^l?K1iXKIACFvEk^@J(}&UZ(@Bj?x-Ka1UMx9~xVe6;#2fmm$&
zNupZDg4$6F@P9XoC?2k-HdSo){V<+W`e|`)|JGpp1UcXAWlxM9E@9yQtr(=Dihha&
znCPyABl0>-XI%*sKdWKc`Fh&?AqP$*4#&<RTF~65gx)i))P(&V-AhIjjiDWM!$~Q$
zk4}dO-WNyD(nqfi1>ih41LJ2k(VmKh(CwW=_slp0o@+VQ^%4UU^%5agTmgR%OvhhE
zr%?0MD9D+13A|WdN#<lq<$1%2w^c9<zq1l?mI8WxW3V%R8+g)_^pUG23g|aZx<m$A
z*Uy2Sku{DVbq((ePvc*B4g<fNLW1TIw9WAY$LXHHrEI`YQ6X5&_8*#IVj?Yzp>9><
zA?e5f&}D(K#Uogs$mAU$`^OF%e^a`C^bIzPNSH@)4K=K2p}s>ZK{H>4j{Z`Ehv%lE
zAS8`19nHL{V^@*XsrRVxWC!W638wNhM$kAyhF^&dermSC-p5<8j`fDgJ2w-BiY=_@
z+l!LBretTK6w_}@!Dm?nq+WbMf2|6`yu;_fjrpU!C!~W+@q~u>YzO@V`KY%^hUISy
z;asi)>wcU^QB*Fkc890i^D@z&?d95AxAWptA-n;5emj>0(TuD*Y#G43<X0tB^fQKO
z-*JGC%Sp>fq@Qmd#lDlZAo_PXh;!xGZuFLRGX3rNHpZpjI026jILSIS_Hu&xYTSYz
zDbs$uV8y%`^c%~(XR{Dun~$O})`fZ-rI7CBCpmw=XEdPi0zO+Cj1_wBC>1{C3j$&o
zgZdb>h9!VY59@4@?<e<1B1jK+6Pdv;;`l0uw0##tgI6fAHG4zly=?Bd9YX%Aug34r
z3ef*_n|kGjVd(}L7(JoHcR$h{!Sxv48jNLSHfUt-5B>x6phayJ7@ZP=<M$IpHKG!V
z29|-RaS#ZMH$d}IB?;GxrRgivprk1X-!XpS5f34{%s&gC>gS_!$Z%fz)sBXHH<Bg`
z8MZwYF^zN}D6<un9}VO1cn1ZYFDIEfh-Cxhf6#`PL!s{CeX6?RGMI|?VDGu>bko|g
zcur+E{?bZ7)unN;V675%>ak3i=^Uy*-W<A)q+{T`iD-I#EMx4BrQIEc#O>+_8l<U@
zk|+&M(mDyW-C2&)&7Jv?2g9K9HR%2KZ6bf1jb14du#KNf)%&}EAbPE$t??nzEqh6A
zt_9%GO>&GM`I@%cOVN96ItJ`H&AKqx!><`*Q8B!Y7`?hcVIb>)wO$4O|1i&8JwVAB
zedv5xL3#odQ1Od()cxEFCW8YYYHB-m^J6=_Z7)gw>MhulZvl>41r!{ZAMJDo>8Uvl
zdW@+W{%sIRO{pQQ6$MICZlKs8krQ7q=Cv7HyZz-V`1<_{2*zB*x?ndb*=I+Ld!wN}
zbuRRl>?JbAcGzaadf<zPgNwQdG`81a-J4k`4z}Z278)G>y$In}svvUy#EoI^M89{e
zqpO+qxw_ZEls&oNwlNyE_CL@1yBqj&&0G*F7L(NCaF_$GcqRT2CZ(@Jqkti-3$h4&
zFo(^+&ZI}(2kaM?Fc#zpxY#in)ukO&V6w}y;FU32^)_<S1HK?Uvy6CnG5y#*3H1N2
zrn0)5+?x8GXxrjW)Q?A@$mbU)%pOa_rDv(imP%~Txk^eBPLS#kYRsz_zzgD^aovNZ
zP$yzs(uHHd?RhZ`ns6E3ZV$!u9WGcmDii+9xq<tHmMB`bk9LP%0fUxBSf$vEy{DH_
z?Ifma=>H%|+Czy1yQvHk@M&%=${(sws~>BiJ8dcSOp9l}j$&v#a2;2%|Mz>92==DA
zP}H2u&Zwb~qj3eTbcXX$R|BEDYX?zw+7YK;^;DrlafL3^u}=r%xE+Xj|MkQ4hXc{b
zz@AK<aGh}-`qQ#Q$>i^a8K|)#4V*lA(pj59mhLM7jh;9v-WUT)k8;bI207R^yqfIE
z-imK`*rVo*7_gae8h`D}!TJw2=p8eO*bQX4)HA<n$J*<py;+m9d9n*LazoMXWj+d3
zr*qn6L%@lh{jwSQWagkKNLno-B~6uFV|peoF383BW$&m_?-um1UrycbZwGYWjh=Oz
zSl``Fs&v@{kw-70nJfy_n)-pi=>>>1ScdJZ_Cx!o5IW3@N1d_}D8I?Hy0h+_+HV^$
zb;!b`&r=|F@p*W5)d`Kpl#<rD7eIV4724@S{;x~MDA_XuH6qqSiSa=4MlFbaA16uD
zVg&@A=b&j|F}zSMLBCw4DV}F}Oy4=YX5n!%emKi{`^ND}2V7w0c*ZTgmy1psq3~`|
zAvUI6L&b|(D4i;YqrOTkJ&*u`nW?B*?FU+iqhZ9TjTkQ#fb!-E2vHvjn70Xp-=?6q
zp$>h!B??q3Wf;X~DqpoyoVkEP^P2|xpnnC^6bZR;KMyqVH@`4z9yV^TBjNw7XFI4!
zv?%zC_*Q!1PUi8lc(xMu52#{$-$^LB5ox)<l+8d><7mV3UD$pp2*oGgla=O^(Bk+=
z`q(cE%%ZZHhU&m~2U<YtnQhQkTmWhh+KE9&3dYUZjHSwCD2W|Jt!}DAOQI+EvAJ|?
z1AA^C90Pff6H)a|1>NX9jC~Ub%HKw`<Yg>p-c*Mr74x9mFPyi1S&w?s5EO796ot?9
znUDQA@h)ZkJXMFVPV)wQYg&S(JrWSky259D%)yIIk#KOfId;FXr)7J?iB(aXrOwz$
zY>tkFlMTyJkmF4gGh^YYY!j|y-sIr<CV2Z*COY<NfN0qnnq<fB&%0+)3l%BY+(|^a
zYZkomsX#039!}rwAXsH)b2p9ZaAMn4>|;JT{lUdVxm}O0!YtGq!nzip<`8ikCE?G0
z5sTw#RKeJ<Hpg?I<HlX0y*M9)*B?^v6Fpqc!*ifi_;MzW3qWXjo$qV7K=oy;5A;z9
zTHM`6N)r8ueA_O}nJPnJ9%I4pO|xvDHG~%^Z(ddPPlJvN%s+o98QwHWQSx6=<-zD(
zXm#&3{UbIF1AhXDF03F!CLBb6`AjP9$8?LqA4t-C9nk#e2j6!i6*tUEMgCYkXk;h-
zZzqq>MqC7Y#sc$a3?Z}H%`i*D2DhoCK#$57nj@A|8@FXp615z1Di*`%de-~EGOtz!
zA`&(F1QfdGV6VnhmQfHxLHS<r{!xV%eM?E%;FTz8@8EK?+1_N5Ke4}64>cauC{EnQ
zJ!aZ+PG$|7?Yl(lSY}K2WDOPXABw$KXON^IH-PEsAgMpd^#q8)+gFYz`yy$oZ!HZR
zPjSGl^EkS0E%vxxhPsYi3{;DNT07=_U$=nHSZIt2X9+~v22;WER^s#}2J5!X0vih<
z*k<{IX6ztH9TP`XH|0Uf{fnS;KM_kxw^O4BmQ>L*4oh5YiEa(Y?!e!OcNOD(v;CHf
z<uD8x!TM5JAB>B_o(RV0(YAm(5+bTa51)nf^wb<ITaZqa-KW6f^jaF0E@GV!Td{4(
zc$U!_0+$jmqGODND@h&&)1FMhjv>Ktdl%!meA;Q*X)qcjN_$Yxs|4+xY+tc18DFLx
z!}67EpFdRy;UnIY*1MGP@7H7d>}$N^*HgUi`W$vwUjnA_|LbMXgg)1aAiXqz7EBrs
z^M1#p6XQtc#K#hqUC}80CzkhK`-xaxKgYN4TTLad7m4FMH!S=ei)WLzVsm&c{%T{-
z@H{V2UW@|w>+|sRjrEKz^~l2J*$xmL?jr#^S^n+tDx#fT2zHaNfz^6vBKgqC@d;<K
z!O0a|9<%duCd;mFpFnJ1MZ=pnNvNIyWbo%P*fB7H-LG{j4IU?eO~^z{9xTV_uZQ7b
ztqu4+#2KxUndf2oM(As*0;TCykobvsi+mGee<uRe#`jV+mowP@*JUnpRw$S}`a+XD
z7mza%WvD+@4QHO2fXZ7ZQLSe?%(P0!D92JFNc69?ahrtg_EY&%#<*X(CJxV>5#nTa
zuW$LX0&`w(fj?gCL>tX1Q0Cc2+C8sPx3S~VcgbMJmD-Hk4=~-CKI1NgXyM%e4(sA1
zbmYzHSZyDMmf52*c4s;c5359rr7C3U8rJa^{vYvlV=P|7pIrO1O0aPV!hIP$%Oh=v
z8y!W=_Ye#rA9i4;x-~R!3?zYZ-)YqFP}*}*j2#U_p}WtP9zI}$l8NQ8?Q1Ard76r5
zwkH`!{s^u1C`Q4v3!HlB4`Rm4prmjC^HwuXg57jJTzwWTTbl?imno4xkAS6lThV^`
zL#jLTKN9`=0_y5+p@P=Ae8E3c!CYMpeOFn4?M^4EmT3+4e$0=tIGR4c70$B!_V8Nk
z1ST!@h4ghHXg@fU>VDoy<OAwh&sh#O6-z)osDyfSzb5J%CNlmL`)%-_NWuGSwA-^9
zlJ*<}t%cFhYUNK=2L^-0qmj?D%YZh=4U8|F3jSXnlO*AD8fVUCM7=Do{htE9=g|!s
zVH1xHt(ru(yA-U#?^bH74}n)kO7uANf#m}4Ql<K8Xxw@Y{f8e0jnUfp>P9%2>1I(u
zK)0pPCzW+pWsvUgX+)4eq;i*0F&aP(;2p*V?8(K*Gg;u?J|4y#o{5LlS)Y_<2{f!d
z17=nQjFEfQ()=-FbEM?6PDOPT%oxRc>~#dKzES9V^%s?$9?mEIy%1~%O@z$f=~yyK
zOw|hq;1A8i*fzi!WR)4T`=fwbkT`6P_(B`4F3?|o60D<u&X*7*|NgSnHku5LJL8G+
zYX-E3g!60ZLVWm<onarNEdQ47N26mb=WI0sW0hx66taxiE-q$8x~D;O@fZ;e+C{QH
zUxmjfGZ9zsgZquwvDefA8@yuK`%r-8vPjVU`h(byyGPnOYN@y5Pz-wFgOL{*_id0d
zzou&ox-30SoyIJHq0#4&>s^N3lS09adBEM3wcy_^AXbz1L*1pbuqG)VH+3$>fPD+m
zG4DEX)2=b^(mKYiJ_b^=PedIyL!D*{?JljR?MLr%d)<ejQ07mj>n~<~vYEVO?Oo0=
z6EI3}g7Iv=5bxy@>gKv0L;G7}V5kH)*s*Sz@jSF`ilQCo=ELzdn^0Q06NWMt_E()N
z>>2STo`b5$(izoYQEyHJ$K#=wV=PWlJt&uY5tl#5lJ9pe;yjE%r_EQv**gLpV*<#>
z>2Zw1pKK{SyMdmRQ@jv<8Y@Lotm!Vn?xUKRGW8;Mlq`ofK7)GXMbkD7Yp@k9qIs*&
zz=#9veN?05t$Z$k!Rk`1f42^8)-M5%Ew4$#Lseq5=rn1xa|9zRmap?OfPNtXC|o6_
z8@yA&#(FGhI7iYh<tP{)S&jdNZ$xGPL}-(jp!+{_!Ak1|b<bRfgPgL^mFY_Eu1By#
zw~F{j`;pWLQ&?1AfI}NAP(Zv%O!8JF<2=y9b~Mpm8B0BGu&l<I220r=8NB;e0frxP
z2g8VHs@Um6i)W@_v)c}MMWVr}a6hI-j%WQSGuZEUKGQeWQt@#=h`-GGMz6+VX<rnG
zGc!5C>rj5k0uBqtt%fLrI@tCq0n9$tQLlsn_;%O=v@Mep(e0nSc6&d}%`(Ma!B=8o
zehq5RFUCTfO5FH17k@g5QFiSo*Li**eQsWeXjurl@iJ<s6%LZwMpSL>SWw@)hnBSL
zf~W6`(Mpvu`?i+Dd#g(*mbcN?%aQak&)B8AD(ULNI26~f=KDowqSJjf{Q5B*pZRA&
z87+sL>fKm(tDXvOjj9y71!K=*#t4{ji?<%m`WZ}=yhUIsY0+5<YyDi<dF+hka2)&C
z&dXpc>*w5Y5<lnVp*(OS5&5kmk2PvRI#9~(*?bN5Zr4ZdDq~=uJ_OCvi#eqZ)B7!g
ziAHTEyblQhAEO|YRoe2(5JwU`)EwI#ys7v@E$9v|hJgoXqh|)PoKGRq+!9HI=?^%l
zsfIS=8ju7U6Ujn%Jo#}YN>5zjTI5fOrt%k6EO<n?%(J*C7;)m{Dh!YWp`)oe>gFMK
zmd}Thm3jEcp%Nd@tAK9HWAN-pHRw22fKc6*7d|e4@B1`S`%lJKn0=c-{1F(FbP8qB
zj9oF%k3{8{(eQW|{I%;erj`jn`0yJQpK+(HbA7;RWC=@5O{2E<Ct=SmUF=}~;jsCY
z;NTtw%g<a#`;_POSywKb6)sQ<SrW*vEQ10s6VzohMi~fUTm;((j`t#-n+?%%BS%dt
z7J<)*YP3n443}Fx@cm6=)EvmXj5GeG@pW^c?%_b_W*u8i|D}R&s~SiK)>G|W$M8yG
zB7T-{#a`KCs_In@zmx{JdSwZotBptHl|a6=_d5}c{jW0o_%*oyxSI6=ET$Io*_prj
zFWz_83h-ZZg=kGU1seTSA^cx0(st)0d{eW=AY~BCLg#?HJPcb01i^y8OVMLsF8%R8
z9~{%!ZLZgl?&Oo;rBuhhJ&U2wy+4>=&x7{E3!!C>9XOo@Ts4e6cSqQ6F>*2q+2{&+
zZ%$*2?FQCykp^uy=78gd!`OcM5VdN#&OaKTkCM{G;F)v-I}e6{>{>Yd>1KlaENZcH
z(?PPPoX4b}oy6Zn45F!vA>l^>%KQiON0+lK&PQjOp0yd<t|J&ksA69>+iy9)BYzIf
zMXkw2ko1E+NBgqSaM)Hbwkw2^<7=sqOy;jI%10Zjg!d@N?g}?z(ws{tjpwmsY#i<T
zYbkhsj3SE6Ww30}KGsFg^O+CLus1)J3bsWlEKJlv_Kl~pjjV4k9?<@(I_wy^6OUHN
z8Ta})INdo9#=`aZ^)lP@FYOOvt>2sx%Ybh=v<AQ0hoY722(h0z1=P21$9=LrXs?$+
zY#R-T;Q1Dd?#y(4_GQ*1<DCMM*XOyPi_Ms>z~<ccbL=jrNj1kwa7s}sF33&=xsdfe
z1RX^y<4Vr+M-@r(*aY3HUy@`=5lVx+=;95Dj3JQ<{ZG$BU42c`USz|?b?IQs!O36|
znL?8;O#@+S7+-9@9)BFMV|s-TbP6Yc+pb{fU^&>5(F;)&$24ZOAZUML20K$up*8c7
zDNXMZ%}+tJbWt{H-PwcEQwNAm)fx~Ar;>fFd(2;5gb<!Yd1nU{GG5H{T*Su-MPRFM
zg(6r2!zv=MJRymB<m#!#U;RnLi0fz+^o|;4>q8w@k(nbC(OI1_#T+!K@XjUby@csO
z+m?gcS>`p^A%%^rr?ISP5&!Gg0#uZJC2D*MQPj<*W#2B4lHE6{=1E(Yy&FJks@a}r
zQVzFn=~6skaSZkEu>0G?EvP7vFuv;tYQ8)fO{b;c=)zT~+#FfiG~o=q8|#QI|Kw1o
z<Sek-s!4^}7m<sV!On&-)MVO$Ex!z${>dX(m>*2i`jaR_-csSu2r|)&?VBIoqwRZG
z4y<Q6crbom$mKMQ-)Ic&e*}Zg_jGJqt7PvGAG$DzpaspRlEE6htbH|4t6k8iFb%S%
z1Vec3d1CI7gpwFJ@r`8PY2`xVbs`D`lRl7$^g8ri-a<P%C`q;hl#j57p^i>iT6rGp
zrc+$Ay9zsNufm9(7w}Nu2z0E8p~@NQl~J1)LD5{suD+WFCPzgOuVw<8g=bLw>LeNj
zUWTU3>(E?h1WxZcP@aqh*}JXqUB4E8oGrxX&CKig^CB!+bq+J0okx#B#o!xZ42G8}
zPW<YLVJ#Q1+hRZTTRj)QizFBbrJ!TR*s^BpQFnD9Lf|#jUC20-b<wEeF#+|*yJE}Y
zaS)EnsOHivPLTMSh$CH~bp1Z;3p-1;l(JpaoJ1NOIT0NtgXqF3Ouw6G!x=4I2ciT*
z7K9dn(eoAXw=^HcZe=WEFo*3{dDt@`3R3z!uyyJ<*yZNJ_Wx|RP{LHj&8dWMh=P#@
zY_`~+2SXg#?#7AvVpbcl&e$Pj_#ccH&>IIL`C!ohtDHn`&tklmJ<#&j57hS76PfBk
zE<`<wMh-lNZ3Cqwofg7z-xSE(q>2jnt<XCpfJD7#eG}1aetp2W(hEP3hB9@qDJ;O3
zlRVMMbU!+Y{vyrA-)JV=Yq*?BM*k`IsKcHe=-AiDDHa?feYb~$-r$93pH&5hGbyc{
z>V?vb8gf3q7JDBJrA9kK@jg_e8=eE3<fYhJkVfhb*}_A+Ixt%#`u}%pYD6Sy&mRao
zcj=(t$Q$UWGZ{5T6oA{&lkEPu4vkh_r2g&s!2W^l3a`-(wwX|Cb{>+qjb<$7qeQlR
zI`kwhfo6w&WZOi95RGC`U!P1`9ZbMxfje{!W1WUFDNG%mhb0eLw((~z5uSa-w{+*y
zM%V}zlh;G<XjkektDrmnNWu0$c}`-<_$m8>!D)X4^(+{Q9iA1$MVBM9EaqahYZ*AM
zKTq@xYO(gPFRIC!s3gON_1L*nt6(GE)*7+)Y$j}edJX#iEnt`Zg{YZ+o%dCZz{Vx%
z6c4OLmC(_w-=r9ubyz<6MGd%*zXYMq=~xqxh3(^qla7)zq`7ny$|NUATYDQZn6Ci0
zyOr2mScSeUFCeV0=eyTmfD-FqPBK3oq-Qt6S-<P}t#2$Y`jm!svCpY@*F4fNAOTFL
zrGsNa3spN>O^w+5&jPQI)R1E!bbiEXT0P)GmertR(mf&?=1B}~<1kgL6w^1}fD$`<
z()?*1ef;VS#Fxs6gd1&X^;8QCTd$%1F$s;lDFgYg7<6(|Li^=;)XHEi2yZ{(ZEtoE
zQ8P;jyKVrNIyL&K<P16n%fL7@4Ftb$(AquGz=;zuG-NlXx2mEr;V_jQeL)>sVxa7B
z6fIlf1r{kOAbJ$Wd3>2c1s7fu*|t~w`!ixJVf=CBr8%^SadAcc{-w%GZXn>k)2<8m
zXlXo;J+l_T|55kuQ8j(xzyB_kQs}Om6qO`|r1pH)E`$(55|R*d&pm{#Qt2k?CS7)<
zk|arL&u8r}lIjye5<>1t2qA<sKj(}y#yR6RerJ5oIe&b|*<-9fYt6mZ+H=kMJn#4G
zy)F0%W8I#GT1Oc>zb0wx0BF-tlzJu|qmA^k(Ej3js=hf}n5*vr$rn!&i!gI+yQBey
zt5-vh?n7FfAqU#-59w=}HGYd@8J!>5oX&WKZOk)J`t=^EZ5;<omNQM8;duvz8&zab
zp$9e}DFkn|RCI9<q{`xfr1sqq+J0*Scx^fiQ43wb$?Gj0G|(7rlJjXw=3>;79)~j>
zQTR{UP&{=%7IoP@Qkc8|zIyN|{?P1Dw6q-Nn@d5W?gQ#?Ca}G8D5#q(C%ut5@VQzZ
z7ffA;>a3pGGdh@#_B{+b_2;n2RE+ALr=%70`+?W5Q*c@}1@)kk=sCH9W>gJMORIpb
zhi0JKxkOT<9fI3z8Jn(t7Wy2&N?O%Ju*sF_cm_%!^}Cd~99+*dq>t@8cPztgN~KU3
zJPrhH>#j&_&cXQL2o#iR!Ly?*pSULk`YE3PG+cv47iY3QVKGthj>ElYPQ&XsrbjG$
zPEGUn;D|{~f1+JPRo5h<=u8qccgLXLjAfvvtpW;bMnYrVQR=%hgT%g`1SeKhqd{&N
z<KDcM&TLzPdH%z(i&rOExs&1X<dgXAUMQ~jWc&1|VdxmQ0>A08n$(gXLa~Jx%h5Sb
z76<B}D2(;g#pZUM<N87I@8b}CYaYJ*s{vJvBW5(v44&-00E;4vP~W=<@w-3xIVHpW
z`RXW0Dkkx2m8eNCV=n6pr|ylX&Wnza&5J{E$p;<G-xN(N{O&lkd`PDOc28(*LA<o|
ziXSl%440Z%D+nD*Jn?7KRj~AA_4zF|w0?g+_?^=Szi-CibHWfL6NU)`;_7H&Tsx^S
zae~xP)~Ee+07e~^;(DDRus^k&t+!L@k~u3du<SB>&n-mH%dEefa+w$?vYf)<Osm`P
zG=z;@jEQ_DsF~a)zHc+A!uEJN-YExb=hi^Z`4}t>K15;z8l`Guvls(+po8`Pd1zKv
zL@ZMR7;~%`3z}I@h?p^wRyx4UoCN4;9ZPcOM`3sft3yY>qgh%^6Y(q^{4?^Uw$15O
z;X3Po22V%5%5FNoNX9hb*C^l59P#B6_@kZ-OZsHMhs;7S3d*I{Z|jMR#t%BKWCd=0
zSB=Cg2EC-ne2Lw}C-?^G@(Y2zW5U46pi!#)cLXtJnhLdR^AKYih{Dk0sNQy))Q-4L
zoT9ft-m?_&eOF6E9cxe_xt{2G{h_`a%c%Rdo1||`3dS8gf!-4@W9_EZAozZztj+%*
zbR@{YFP7<dp3EReQa7MhvJpCaF9(I)nGjGlA9|fnfrRM*oGa|n`NSi#c(VyUv6zoh
zY7UUq)DJ=&4nW}~rcrMlMql<F18NhW(Z-)ANWJp~$XSTEZSy|#?79RYSpwAa>`%sK
zi?J=<TUgT`jW=pDvDS4gdhR+zd~;vXkbaxtS>*_9age8KHWA=bwwtQ2)uV#fKBcAA
ztnQ<5vP@z4GUnHqLS?g~F#ftR)b_I_ue((+ckdJseDSBTiJM_Qt4~(E=%$kI)5*`a
zd?+o-h1cFPczpi?&dK0G^5hD!F$yEEVuk2yI~8(k)3CrX9zvs}I3bSdwjQ4a|5eA~
z@<It#OuQz&I8Ouh?kZu($zXu=Q21b&j%~|Asmo|71lX6-ZO4NdzhI@bqk`S1Pb-AV
zM@EprKPYysY#}?2okKXw`oD@A*#7hc7G^SD#4uM#k!(X;$NJpotzpI|Hm^MEkctP_
zIZWO!#$#J0$kGUjdfg+^vi2;*`aGn9+AU;}#TC>%76$6N_0pTC;=#{z1}(H+0Lm@;
zq+*YqG`960ssES|bXz?Jx?-Y)5~d5l#851lrGoC($LKtVCD=IFjdU?SPYbK1xfP8d
zZP%I3x~~j2d^(5xtTikHI;t$_aV&Jwd62w!3{^L8AVpWyv1Q^QFm5rx*1ZKZ#ySO5
zuLYyt3~jP9css_XCrBG>BpBDA#QdwFI9;y_qkhK_wN;m>#)%{x@vlcEJqBMq%))Ao
z3J`aj3b(z>h4sB^*cI}N7GLb3!mo_;VwT`w@^L&Af8GGQjvT{QdJN^nhUj!ZNZ9JN
z3dBE@h}a@ZSYdX6rZyJRLdj%eRW*xco-#c_|03e`NCR4D>0|wbtI&~k6=UsY!v?MZ
zqqE}iUHx{(k<&qOmoGKF>W{h0Y{2K#Vg$1wxHBXTF5fr8=Dgz=lb3)!&H)hn)CwSf
z80amu2Gi{aFkCs3PX4e74J;ENyz&q68Fv-5EoMSoSQu`m36Pt31lvYzW9*O`^fcT=
zmo`@8s#Sw<%=B&O+P@lfllo)TFV>r?pUwE229RvOlQ>!Sl92w+U?bg*uU(CBn@2Jv
zUrM0DbBy_%*-QdF<ghk!7uLnE#<tndX^!(Lj0jN2F!2!_70P-fR)oF_@x}+*SMc+!
z9Oy2a3}2V9_f2ZnmF|W-aB1*^tYvSg+t(VBTRb1qGgZ;WX#{+@YmP2h04>Fbh#=I?
zL2U7q>W#TaIvujm#m}8SZ@Y{K2QJ0H_xU941LKQ|v}t&#9<=mk5MVWFSFa@yemMxV
zIBiT~I&u{c=6z7i6e_0&!HG*o=r&h_ehpuSacBDD)3w&<eYOJIhg%czbVH1|qJr9K
zQna};4<cWVWb=LjwLJKOstEu6y_}HlzIp(g?O8qfVuy5^Q3eR+ggG>)ltatyc-W(r
zj+WoHgWAwYXi<rPmS-g}$KfQXP6MXP4u&gsF{rnA1-PUggP30So@<FH{o{HK6s-1u
z^+^lH@3w@B8#f%p2|uJJISW8M^_zp<)(X-iRH9Z&1!%iC9s4?*#LOvf=#*^0`iX<7
z)+ff6T{@b?`)<P8ql%FGm+3E#ypy(UQG)Zz%+Hf{m=-b(m0$2A+D|<IFAdX1Ge&n7
zT*xi8!85G(7A5UshIV(iBrw~!AD0*FvW!=jk5%dqo`-%ARW%vf9V!RiLjf48@L9O5
zo#}^SBU!dmI$S8sMujyY#KgUd<hCUc-z`^Q{#iF{w7w}di#Y(R?*`&mvz0ixUkHTT
zEu`IvE+ElZ4a&#VXvK<FqW;MOKjlut{W<CwZg>C!ZdB3(DU3DQY6n58;h_0B8?4W#
z;>#@`=_uHO{`0FSt7U;;$_i?mdJ!b-`Lo@w2s2-uhNpXL@T=GfBN{`&$MzP@`u>Kh
zw#-7euLB_|BoU+b@Sv7(7%V;>0=2*2Xhzcs9PQ0Klbgq}IekA`4)y?R#)@uPmxMRJ
zGUnFE;VgshDC7m4gwG={pyjw5bZYb-G%MAG=1GZA95Dj?lhjeurv@y&ieR~}6Nc13
zq*mKcf$O`e=wHs*^QMES+na&pnTjVC`6a=#yR3)*d;>fwJ%%yY&Vo79Dw^ymg_$o_
z<AE^)QGDZ>Lu%SNbbBQhO2!j1$?X_gHyYxBVdns={otki1sZUE6gXLLB8&3_@kIO<
z^s8A4<)!AR-P=eCSvH6uWi3P)q=An@udt$ZG~9dbiK|BO=$w>_ewO!%>$g!b;nF~i
zkCcO^{iaZ~sRn+YI|h2iM~KbsIS^~03M(Vd!B=}fj6bA?de6(q!JDHn`M5nfr_S^k
zCCeGJ?QE&r&%VsR!*r<kEQrhWXH*<mB8+uhM)XEyqo6I$&cx)D&=NDKQON}Gf3}d=
zXc>}_$H|~v!x&3OFR1uxABO?I&O&EWUs&IG2p=}5z|gyq5dPJT*pwec|FNOcMwS_)
zKDHYDMl6FO=Km1hhyjTnP~UZBpe*W({x$uH+p@6`e={BJ_MCxTW+6B-nZ5h#w!*bT
zr%<DH8_Lhl20`mgSig^PLJF-&a<?9IMHf>+h@sRu+yc(Aet6!23P?5D0Rn@a4#(8D
zVo=?2=(Jq{6KYxJ<*K<fU~D0=kGx6c?<9c8ibdS7N6fcUz|vQ$kjnVdTPLy%C<lES
zp^yz%?hnJ*O$8+S%x3iZSq+U(g+%*cABb-+Bw6RLz`dG1_-A@Gy7_lX^I1*4Dj*$S
zHcv!+X9R<FMq<p^P^i#5BJ4U9N7ra+V1C{S6p_p5yE_mT1+lzGVFHPId7QNMJ&J+_
z`LySZfbGNdK=-Kvx~nW<e0MRPQ>esba*fvY;m}7k8k_@)vGMkAiYhnhy$vkmLzawR
zG`;YE;zays-BoN10Wx8MHO7}yNVQSGzNWf3KD_`pn#K}G?cM0+whGM6&afP{7?ub8
zo2q!7K%LQ2#%5xiZvE?Y=EzWRmdiog9u3C2+e0d>?I7OG7>_ewSl}r3ThMPM#VV<^
zqb3oo(=$OmVI#iWz6}<z`P_Y83Ar121O+#Ph2%#J*7d8vH7SgR?;%ew{K>~p)@&_(
zp$7cc7~$iXNH)I~0b}iwV);u%A!H;}RbBvfHx*L+E(mHfU2)ffN*H%E1Al#6g<&(#
zV*ck#xTP|i`NK`1G$xAW3jT6PG@OI}zP_Zu_7u)MoCFh+uHtRRKMDGL4Z8cU1&;X`
zYXi#AXtNjeYb=45V>9Rz*9=hSU9qyCEr!cafs~n2y#4ed(kZi;c86sRE**)M@k}?{
zOogqE>%fiGA=WUhxaQ>J;21a;*9S44>#i)UI-HMw8LZzm={sFG(Fs#WZ-kz`2pgZ8
z!kU{Q&~~d#*!;8<q9PxWitEu10<W)S)|*X0d{zqn-4(PVavAy=u-u82H-+Nq;bq0`
zreNaxL28obfVQ27sBh6|P)|nYXW`&w@(1d-btv6d!16LXRPaiP7`46FT&c7U-7;&0
zzN&?g-S-e;SuoaC>5&6tN}%1Vk^IzSxyaF{nb*Hl=wtSY+QsBU)bH`2-?{}y+nz+V
ztJkUW;Z_<lgVoO@iZsgU1a;CsjT<xbAlZ}&$O{j_)b1!$JhC3`lTH&s>~z+PGl9y(
zn^Dr4O%!xSLnDz=?G=L|V0t=qm&VfJqOs`oWed1P&!ky5R$!llRyeZG4E^2|K<e)n
zl5}n#+T3|2o#)K_@#=k{u-{v$?RW`D?P@?RTL)yL3gPL_EIiJ#8TKlwV}pJZTaT-v
zyPOg#=v||O6ACf8ppn);Qp9f)Dxt7F3dZfc3Iaz>lEj!lUO9|qF=-Zb9lt`-3Kn3E
zy&@Wm*?Thx$q%_qtU8~BiE2ABpsa{`O)7=fC1P|<<VnImC$Tjoo+>}u1bf*Uaa?&2
z>K_}0pNFyclSL1i?9KW{NzbX5^%k(#bjFf&rqR|P1M$=o(BBaRn*WrQKA8m1);XhV
zLpZdtT7LVdP1G>86n)m^qGarEa%aqH>|XPP^v;ZgYlGLaY)7f^PEQ0F|4Bt*FS7m3
z0<eiYL*35Q3jM4Gk&VM7w0=@7uF^Nb*m60dyfzMT%OP5P^Au*^*TQ*AbJ57=FplY$
zhhK`~P<;Qg&~36KeAZ)q>*L-KJ3SHXQVO8>@JJYJREDzSJdViM!Ea~RV#E`c$8h5?
zdFfFF6$_inY$jO}w<U!jRk;XmtPT@3?F^0SsKRYoGcaaYHW(!pqQ7$s-KE0((7ib<
z&#-~yUmO5VUwVnX&LeVjB&%lyX<|YB73fKipyT+(xbYj)__b`qQwh1?xulZ><P4<(
zt&e4HTgH>)kC|>|)=?1E$zyRx4j8anjnheeq0a&(Fz(60Wv^Y(S!E~K$Cg0uHy(Q5
zSL5dEifCC=PGi=mv0S<JR4AW@6MMFz?utCLEOw@X&=aNNR#mEZ<p9=x^`Zy&Pe7a1
zDbO|azHmbh>j|x_pdG`57;|nRxF+VpP4{YWvCCvVo+zdtIEOZyna+6jSy~f&9E$fw
z(w5BKOwT`sX_a+_F0w6PteSwWTjs*8un5dg7!RQ*L$M-FiL81Qfrd&$SzU0Suy>;;
zR3z;bW~It-@+y|;eNzDb!y|;w58hFU<U2{fWr}e#&O+;gNZ1&jPu)H2+4@I87`tKv
zvA0*DHcx}lNKguGGj~bbqB^DWxj7(o%>uK`DUdpC0AwFO3R(SLQ;#;rirRjWdQSaH
z^ru*%W5x`0R_g-?8?12Yo6YF!CjqBx84lkxBS1~*1gJfIOvCLpV9TmVY%!n3bZ}v1
z2bfmYW~nK}=6n{8JhBHzU&_MdJA~%0XSspy%opdF12#X_(;ai})7&SSIR5$uY?ot9
zp+!z~WrqZp&6$WVXXrx2@Ipl60_GXr0w(Az&5b%sC3;UuuBsyRXsF@wF9l3*Lx|wx
z1ACjGu~7R@46Uy}g5_#^aDh%b+Lok&`ic~i=ft#|89Rl6jKdY1rvp9etj477O~TFe
zK|VAM!w-EWDNeE2IwhRARZb^@R~8N?j^RWSvz6#vAh_gQJU(Ij#b#Aj>-iD_e|8*U
zp1MC&-1J@A(#M4PLc^eq{ccMu-cjeDzR<Ey34JFRGF|m0p>WD2=7XFLa_MvM=h7;0
z9yk`@4^P2mJ`~55L9I&`c70}fbw#?^*ypcMz}6N|q9qV$$1*MKN1)G%XsOs+C_S+}
z150-$K*QW7vQeiPD_&c{`dL|Ez;x#}Ys<*};zOv}Hxe43u{_vuK2rP3j9bV0GyX4v
zq_tf<6^|7<D4idQrFwZ(GI1R({@~BNec1$_tc14iUa4Pl9SKa|2Jdg4#m`~vdTnH$
zlX<qn+<QMr(ZqPBS5cJqJUvg8o#v3yTrt@0$S3LN62Qpp4At8oL7p#SUP{-c;Ns~8
zr2~ht48Ox*^Z7Cg?1W`z4+&Vkr5I>=7Fzaluu<<SuKbjU>OaDS>e1d*vSOn&oUufn
zRhFZYi4>lAM`Bm`AE8sn8zO%2R$47Q3Khkg!j&`%dKV<In)nfDS$Gl>OLk(X-EcVY
z`YP5iR<7@vTz1brL9;0aq7Mb4<@N)xVbof58+%6@VO$K#r(Hl6%IebPTgr?!upF|5
zVw8QF$}(Knz|TY(nk+j(`?ghJ595M&J0bjiQjIS5D<NR<Z_=~xDs7u^p5!-sf>-+s
zvetMkrby4hpXJezJbnSZ9DIv(DTdQnkuC%-4Z)B{cFbFV#IsosPIXkHVEF~9ATbQS
zUA_$e3?7M+ErH;(w34*H9|C?52IpN%(RtQ5i0-G42hNwUocSS)Gr55@rZovm=bWH!
zci#zZemxhSx}6P@N0rjHm{oA1Ef!M`E=ScjJ{a|mW3{MU()Mr=*cyDNpR{68uy`YJ
z^V1<gjpv~vT^S0C^MsSXmSWYJvtW9_93%E$f{0gFpjSJKnC%uow`Dmk9U~=IChWrA
zrISGrt`D2GUqhP_JD`CvC5yTWp=m@Y%b)cIlbA-Stt1S!_RPnw9hZgq|HPp0-(if~
z4$yM#JX-#p37s}bVy{Yx*KJleYE7p~qB2PKttYy7W1#nRB8@$_lrf#35zQLLZTor=
z?wp9nkbCcGzNM5(<USB98<vq$6pG@zkIKfqIe|`JJLt$FR>I9F!Ir-Yc=>k@7S1*V
zRYiAfYN%k(tReX@B@}xm?;;y>`k=q^6>5|&!<O|vG}?z{{G7}Mw+nx0K*e9;K5GVJ
zZ1|G!XdBW{m_U|{V65?>Glb!c#p~HO4r~scmipxY4cz#cK5se?od&jGR*_BBpQH&5
z8aQmb#tQ|OYp7H612V|66cscsL%koXIe!04nvBkpfVu^Ao5Ep~6o(4~-3zhD{3e;u
zH5)f>@nLmg7h06YJn+wsVuMdEW86F=v4>2k-?wIh+QYHY&4C6uU8B=H)Ub1XJ`EUs
zo2VryqEWCNs0`kUt=(T}VRE4`w&^!H>dIrqNPQ5*#?s#OWH1|$hjC93I^%<A57P!|
zN7s=yLp@^ftRG%z49D)LK}4W%4&=9pan~ExQ~G;OYIXh)%Xy1)@ODfBlZE?*-fp2R
z|I~oITuF)DZz+C|`l0x?rbG1BGZ-~uDkN{r0ZE;i&_=M56#w1;>U$<qgI$TRrSLR{
zCtjlgOdl<ARV7Wwsz_BRW8<%jXBpP#&@q_J*Sg>7uk8!4C!!i%E!Z>mS>jNydJ<ZS
zx`^^#9)#UtEbq>d%sF-n8izO2)b+7ccs3l2{>FjZxOm11yn@Nf!y&tQGWvh@qu%9*
z!9dXr?-}c1!^6+CJ+GLyYdDfSIu~J&>LonzdmDaiFvaF;h1j&XKW6Vj5RBMIN*C$l
zO~YdtD11y@7^_v=G>aHLI>_F`0ay|sLZ7AIh0UEK(dS7sDU6Gx0afR5<A+ILGix7M
zPJTez$XlWJCdOyIB!Zt4m`rbeBz$gLfhOmElLnOp5G!AX*!_%UEBBCY{OkhuQ^o+g
zS%Ku@A8EcX%kZ)<hVZqYiRbZiuyji<UR}5u4QidxXx)A&-SCd=Jtu|On&GrfeK*v9
z*TjaALC{!Jg+6cnL43iPPCs)7Pq7+an1&JVYL5iJ!E(?x^`lUISP1P2J_8jeCkaj7
z1=8f+dfGm8I+4v`+3M*gWOIEK=s_s4elZX_<pyIrorkWA9bx*8Ma)O6C495k8!c5=
z(oxz*xPQ(y6v*z`ccuKL)(5J<jAUXv%PkJryBh2Z{Rj(gr+G6E!=#)TbbA>|Wfm+K
zEVGxur$~tXO9(uhfvKJaERUP<mHOI&eLs%e!BpJA_ElrOM`7w|mgPG&k7Ws*hiX4R
z_;w@>?WTBR-!;rDnk|JwT|e5G+z-r)CSibLEh(LUnFyw<!saYRtax8nX4x|uwyCFs
z=gp5)6kNn|V?w0vx1!Ocj`@nKMv-wp#8B~#vAlAkspZtspg3q9j!cTikR4aSJ+u;9
zN{7=K_bhS5i!=DJBOf|mxS{{RG^wC*5IV2eiV8kAXrn9)TdtPkr(1~Ku}LJNARmqA
zoddUUV+aWQNe?Zy$5Ll6aIp>|J@+R;gHtYyzdaiT?c>lzu8i~y=|kEXJ8i@Kc=Wl*
zYGAS}n7W1ek%U)Cw^KK@Iei)1rp5>x-#lb`(up)%;XKPgVE58xIOwck8^3KW^ps~Z
z_`Iy4?Z?)WTq9%9eO-y(wh`FAxrh3lqNG7LjF=QmAl(a6*qTNMGj2PfSfKzPPrQm1
zd;5@iU*@8#?M$dQOGSUShqd`b(axq2#Al2FrM_&hc$IjKBydH4CF*_J0lHzSphdKB
zjXY!J6%GT%{>w1p?MW~;T7m-iAau!(hr(5tAi$bwY-+ZUu7p+e-p(259iIiwtcT$5
z5i0Fb+JNqblNh&}Wl3yd9<$g@-~%qKrxgHQd)JdJ)ierCDfm+)z~{@x;hd#RBm1Do
z!L{#85@RR<g;&KG)o_c%C$YNuh75;jmu!?Ms7u3-)RD2r%W+6p9F~fise9%GIAiRJ
zj&3_~h@lrYv${mTI98**kcgg^SIFpHSr8l3Ng6dKp*v$+%?|cJ_8@|+Z74>ZV|tyn
z*BFcBFcd#;Aj&FXjOlh5Ur!Cg{LBGh#IPU(#xae;MGAuG64H`v1siwQk;d)AY0IT4
zP%Bd-%9qn1VCP{tIU)}|BiGPIg-X_o)`8GZ5zuqKmD=9%hQ!2Jtld2fimf+b7Gvz&
zJocrzQ%up|S1BfI?}qrp$LNI%_AGbrr!c>FEd-{|WtkfZ2z6p=9UV$r78TPj_p{j8
z$@qx$612YQ1LLDupMBYFq00|vwuTLdz{Gy&vSdD~IM`L@&DM}^Ok*@it{lB$H$dCd
zBC<x%2YqXFQ8(}$T8~HqwRyp)>=q2M%9JW>xJR9>{K5T8BxF1bNA2_07;x8@*cP26
zW)}?bd1o3Vot}cH6<4A||6PdbPf3qf45S^Jj0qdZ;`@WU@$|G(Rtp=A>yI(Uz_1t^
z{^307vil=_W95vwmoDKpvJ!2!HxuujebC3>P}rhz1|H>j;1U;AY`dSrH0fbwjm77L
z+bS9BKx-^Cn>b-fpL4J+^DIUk4yLsO822-M2ly97g4xk3mXEOo^Os+N(r9f^->{S_
zpF0VbS!Zzj;1qN@I*vT3n#Xcowo6THqor!Ek0YzELGOiUWL(ZgOu1GLUW;0(!KPHO
z%`^jl_Rbc6^(9>oN5S+1{z$J{q2#U#%UG;}V{Z<k`0)`e9-xW?11Vaoy{5gEOwZ?S
z2Y#U`B<o8k>LmxDiT+~YsEcD5U&{zK`b>a;IWI}moe(Hi3I)%fj-=<?4#tPQ$nqf9
zgZsToc3vx}cw7T%4{|ZCIt|33yM$kg8MD!v!#@pK*s%8kv6;*Ay;oi&i5E+ub9z47
z49%r(`}4@y{edWWGekOEBNENm&0yNsXsoc9!~9ZKpgHd}(?VT_z%h~Fw6NG=<^tB|
zw;xVgUhbrz!!o<K`hYlPIQerq9%uJ=#)#7~C|S}gb=t9;hPj`>l>SHXYJ&```(=V}
zy#oAxG#x*uEJU%3I%yO&6HlW;^lYr5y<aEdt{6n0#rvh34F%XTYBgi<mC*bDSYec|
z1)3cCEuGa8jSC8cFlsjk!d+7IT*CZ2|D0vHb7f^33H#7;VLp8{&jPK=n65^#kSNr2
zQIpF47|Om^my0KvwxJvZ!D?XAeU<u8mw`^`8MZh5K-*{tRsZorifU&;lX<k7ZSBzM
z?rLeccRtgE#YsyaA4a3-wGj752Qg~_T$r~S!&z6OwEhk0_4`W2*Bi=eR5zobH;Cfb
z3Q*U)AoSQU0tLe&piQfawasS32-|q{6mP)fql^*xqe1G^WFXz_#d?Gq+L)e{3#o4|
zgNgN8NOmni`H+*~Qnd)3G&TvtRV#^Dq)ERGvBK+hrWkM7K#d$Pg7H>mY!$SS*eA=$
zpO^V)WHcX3{ksXDX@IVEEN}N@8QpS!JXSRP75>X}pSk4(25vrr=jF_>+U!2HFEs(b
ztuDC4GzD`bkAU;vSoq_x6T4hlVu581UHR$+ZePyo&Uu$0>w*X1<VLb>>LGBuZU=(C
z1K{I<Q5f_)50*$XutHt|b*Hjgz;;)ufiCO)7v84b8b083s|0*L<iQv@b9A4+43Z8F
zXW4E91mE%@q(wqYBI2Q79AmF1JRlDrCqe73Jle!-Lhkeg#`)?8f}BZ0f$0v1_`4x!
zQ9J^}z09fT%VE@uDaG~`f2jMYZ*>046&RwZ04*!!KtQKJV;&Fp?_{8LR5C~}iQ!Pb
zE^gd03swj0#p`#?Sa!+^(7lifJrDY!dZ`m_yz5B|FH9g977c<YDr^o&ralwS2rI%(
z;14+pMVUEJ{iPDz(+9yygJ75%JP6kuRY!?L1A1E-bZxeTN(M|K60n0d<L^Sjw&mD3
z^Sw}9#QL&3^RQBbJ^S@m;3G$f&z&rx<l!Zlb~_iGZcZbuJy%K4vmN+hy#f|>7DB;w
zDOP97gJ7YCgX_N0kUHl*wdp&Fx)m=ZCcEBH_pV6N^eq#em<|LTR>Q8VMJ(&x589VL
zB(?dgh-CO$+PpUyU%3=uK=u%bOU(hJ4h@hDts}=RGgyAS6ErT0myX+?$b4(rG`30^
za%0)wukirC+J(e#!CorannD|gEu`8XSgxv!y)#OtgWnT#Jh+AFTjl#x$0lQ}K5Yws
zC$7ZU$7xb!n}N`Eual0fTZ3)y9EFnoo28OpCc@O5t7PNXv!GYI9(o#B)_{%^_#BuI
zfl5gbRS^xcZHx_A5+Ur=j{&#uQ)%r30f{9`VPgQ}c|~T~mn>j<hn)T}r!bxAkXR1L
z-NSfs(-N#G`ZwFLgKo|^34tn(VAMMp3b%Q|#}&)aHrWxx^XJo0i&D7bHVzZ!@54sD
ze}ta5mx11ct5h7ZugrEA^C>S8fRDC374&>0L4}#nbnYB<Y0bfWl>=y&EQ63)V$6*_
z22Ib$!o(oPT-<S!4$;_xInRu-)Xad)+))gLsn4agA6Raj-zyTnIGsH3JA}6++nAp1
z9%*@~2x_e<B(}>I|Gtu9>lr^N%wG<%&B|EtOCFQAWYeDuFN4zg8n6x<2A)P`;QzXV
zex9p_p7X9UhF@`+S?*Hsf3uzS(U=~`{}1(yT||A{8mUe6FScg2pq>&b{gZhVY^J73
z)sl9C+pNCIQx48JE9g`a{Cl{~>Z2eKC@Ty8KOZ%Ll3>#o-_;w(_^#NoV$9fSljQ|V
zmf(neqW-H@9oK%abX2P|FBdL)BT}tZcNE*-rec$K@N-?8$hpi?=43rfCei%`roI-A
z4N9{`sk04a($k3|yOuYy?Dpp(v2X`xbkk9kHP@oNuFIx;nt@gM=3i$;lWST<<K*?q
zr8+-kY6zlFgH*~dW@pI^=8Y?Vd|*(y`T`Hp!%r7wRe@Ea6Qz%25ASD+wq}_+c0Ft7
zh3$#5{qDBqhhOM8hP?I^J@7Os_mwOZo&1_AYrOxQFIib2bFXv|IWZdiCuOU0+nPJF
zEd4kUUYjSg@AH<UR}aZjXN@X<Ju+8R5_n29=i(dDoDULNo5@KC|8<0`xNrjhy?!H~
zhc^i1*ac(9fB5>1`)}88(tSa>a-c+J(|4?F-wQcMw;_8tm-b|?F;unOJEu^VtQ*87
zDH=ICuTT-iJozpQihd+}f{#S%(!YGv$5G`rFG~0sQ-(YCd#YJ(bNZFcL-~|wi6BSh
zBT(hu^&9H=RC|!)6XPb4<jo{ruQrU^F=ediz*Rx{oy`wKAHB^T{~iD2YT;%v8~(q?
ze|Y@k|J(T4Upf9~9{-NNCegs;e|!A4|9$-IOaCW+{vChX&)h088^gcHfB5=O_<wx-
zcK?0+|BpUD6aV}8*-iVe_pPt;x~D@||2uL1d;G8Nqv(H}NA0Ig7P5Kt|Mb&2>N<by
z{S|)E25phW0|}??I*_}(%1~BTB#;SKC7`4|4gBq=aj%;-Wc}{H<xT9pLGrZ-ts>9y
z84I*T51n7~GZW)EQMHy#b8;h>*Y_P~Yc*bGWrm!CSW$F)tw8j#;R|nSrzV=T*g|Cb
zNlA1+cce^cwwiCJZrtqJXZ#qeM|@~?Ip?}SPqxeCI=5)0x#-0L0};MdmU)`b;Tkvg
zmyO=l$yvV10zSh+)X%+~w_P=cleZ}0oGPB9Qp_iQlx`y*;%LYBJXGdPmLoV%93Xq$
zmBINWydbM~$;k#pRC3j|yZHE9eYh!S#>t%G55g|XXMEJXOMJkqdECC#(IVwTIjrs}
z1NS{UxtW@K`BimeWSKLKL_0pN;SU_H;rzba@M6uK*fUL0hVS?C7B94921RRm`vr{A
zz5hOM(e|5PTQgLqb|8?yaeIWUW|EB4G1Zpk8cpZbqc`!<qP{ZoCT&??=RPu7a3VLR
zSV0#5bPgX7)JNu<JepH~lgTHJ`OC{K_=i_&JJ0=SFcXcPV<3tvYvr8g*APR_Mielq
z4vfYe;k-S5bD4o!qWI)!F7d$)PIJdiF7^}Qt&2>!y%#gMxPz&j{flwjm9a{qqOon<
z&#oeFXS<nf==l)dbWAWWu-7aTU(~?(z!Lt$*eEVmZzT?>yUESr>p8z}eXdEXjd$8E
zM_OKpIKQ$d@U8M4FKv-<;@0=zd3+xy5YMvzymYjz%+*X(*EfbAD`y}w+%`gF;PQ<h
z{iT#EJ-wClOw8m@xk<U0P2IfD{#SJDeR<L1S%-M>u_@dKn|6Mdpp`dyX(_w4ZMf)F
zzkwpb01u(l&H_HreI&nk_ZZok=su#4=SIo=qn?5FS6i;}&lG;+COxjJ*c2Np9k65Q
zO>UrLU(o=+%becoPX6}wa9(EiiL=>y0w1n!;s$luip2f;kYP#|qON6WV6WH35ALHY
zJ2mDJr?r2$Xl~ID{&b+C=!E4ES>V7i+~$k{vRI~FUk^!~&!iwe|C$?LBhi#y>liNz
z8q~tIt?8z6#ZCOrMjKf{=2Px<Od7Yk-~`_tF_U{XMIf4^o5C%vHy62GRFaK<f1dAD
z*XG-XIHK{iPyC1QuY5aZg0-89Y?+mY$n*3tS*hJ6^gH*P-#qU(XOZ2_b&gP#+20w)
zTRl*ct$e5{V`dl;-#bn;%{-5Dg5x;6?<d~xoj0d9isF&gDzb%LDzc79Dc^o!8h`n#
zwn)(Dmr!p|5kJ528Go_VLga6{gHx&s;T*0Em37+=;_5#%aYb6U`GCG@AX&VXt8mP~
zZ}soE87}goRe}e6J73FsIQ@CDQbFVr;KUajd2t6uMDY!^Yq+);IobId6VWo8VWL;w
z6}-olv;5tO`ZDpN-^BLtD_(Farfm9CLz&Bl2(T88<XysTWvQG2H>_AuG;8%K{!NsX
zNL+uLYUm3@Z?benR!2w4jy}B2cfH!gIsdBW=Ugz6Y3#5OInS}>%TF7NUR<{oUA$e!
zhtE}&WyNUnf>#%D=;jAp)Fd}9_mL)-Kf8?gxfw|(2E_2SE*3J+wtH}FqnXG=@ef^K
z_M4ZxpexIJGe#CHu$JAvqAF?+{sLL|)p&vCN}<WIZiq~llO;b$#o^ZkqMwsLbLJ&B
zvR;FuoKfaaUj9WYx6SAx*Y>uVrcC(5FVS!2JPI1QjWP51U5`)jN1R^q8|<6-n@;^?
zf&*qC(z(Gq3#RhoA58o0d64h9bsC>N9V$|dA0rZN>o4nX@{E7kr6lXN)08cH$exqM
z*ExkabN=R!R<73U5ZeEm!d1_+mPK1Ciy{jQMBY8k+>%qavfC2|%RE;{a?}2P=BmY8
zxQOb}vMr-jMQtOl;(|VdMB{#%%0~WG66O1R=TDdEiM$1UWz9=dxtfu$Ip>#p+-9p5
zzER}|#>n?_v+|Wi9o`jO!;kT@<o@kw_2UT_6|$X|eZ9qP?6l#=L>BXs6-OYp^f_cL
z(2#YR4@IYS&#B*I7p|((OjfGZiK0wx*%2*ES;ne?q8%IM`38?k{OrfcywT-Z+|J54
z-i6BXjjNq7Fjc_Sc8}+WU5(=lPo-k3ESYz?P=f!&HShs({beS8AE}L}A=mHHc}}8l
zhEX%*_|G0mT*b#j*qAXNYrBqf2Q$aZvSizNhYoF#(<w_*I`uf095jL}J*g?Hf-=tG
zhPJ4_I+OF-w~cG@?#DNdn+th=Zt*=o3-QCfYEICf)t$qV_=d0Rcs|=qG`LYw<T<4d
zo%9;0<?l;qf3gBvVkYrn?jvPda}7l<Aw6J1d+5#n+1!xS4E~{f8>hYZIDht`mT16+
zTf7P50go6mR`%<>KqM|b#@W1x$AwPvqBkpyL@$5K%WRG)@pA_M<W1&vqu6#Ux8c(W
znWW?kuAh|8!J8W105xPA-200rEKm~tsk_GQkIdp%kAA~P$qQso4_&BC?+35cXSA%l
zubgbnKZ9i%adM*C@eZ6*KVuTIQeD<Fc$lo>wKoynY2*BMYRZy7>&vWEJ2^v#0kUGR
zfxMifk|^cwH_pb}l=~Hy!#_GPOcv`I%QgDNa)sSbuqbn&tawKZ7dC#Z=+v~2T*tQ^
z{K#P=W&f@phiZIT{$CdzY~k_0T6g?U`!|RGwtm_EWUR>V<YV;c?Jw)jUd;)fD3G5$
zJSRIlRyMOJgHzwI48`S1!d=G($R-|*;(xm5^3g@(MW-%2<jTGe7X5qvmxag1JFzv-
zzsLXo`TGkL|HpODSet2+<pfLqZ}$%Wp5H1<@cn=P{A>gJU(e45{eSWK_xyINqIWv6
z|MBnfAOHSr1NfhMezt-8Prd$Z1NJ|B|9`LJe{B8SU%>rO&k@@QIsBh5po2{fIsYje
z;hXd<{K_@tHaHFDD%?H^g~kJTh2Ax=^W<ROZJ{k=y6N*K0_J1v9sn;E_2I>TilwK^
zboiR9FCp-TKCk!49KLi8=C)-I;8ipSaXybSFv9*e{?spHObRuATc3~c5|prL*$?DJ
z*U|XSW7v4%C*JXDK)<`MsB!y87>~obGs<dQi);_{E6ri`xzkW`;{|U0sK|{k9>$w_
z`a;2N9%Rc4p>5RyI@lx>-$wW4#`*Pv^6E$Q$LTm+r!LQ3cQfP2WnFHZPZg~2Q03fI
zmVwjkU3BuGx%l(ZC6>|ohIlmo#;ya+6slrb2I~fzGuM!>A9)r%4rgOji4d;Wne)fH
zEqKdwWwdo@2)1i`V)Cl*ME&eS4A`7V+`fLNR`SQ-sGT0CboUGV)V~bl4F+@Jr%%G(
zU4F>?JZLglfmRG$PKTK(aXL4zp=_)?Kk?Ea?!n-8%spYkYuIb>R!QG+@jDgX*e(N>
zW*x&kg`4Q~{s&Gt6NBUAPhivZOK6e*9>c3=f{$=Lb=tj_dA-$f)V?0ndpHocB;=yy
zicOF(+<<EvqD4GYvPtJn4Swd9Dllm(7Zxwn1^25ju{)jhh8Qy>rT99msZ{3o&d!I3
zkd^pkWFrJ@)CS`L&(PDkn1~mNrQ0W!qvbvc4R@i$DZZajkZ)Yppmi2{O+BIbVjB+q
zeH-J(`(cHW2kq)HBW;X_uC;#{Um>4_3ZMJ%PPLqLss2fv`KJW#|I*~u8t>rg%f|eo
z)erE`(Pikbl}h`?Y4TlL1F_hB3%q*Mf<7C56M<Qr!~CaTAnNmNJk>c1rLVuhxD_nV
zt(P@Q>-+FO<vKum>IQslFyqygD_Bm9C&<^`1<$(2nAcnhPWelOT`y|sv77HuaKfYP
z^#<mVy=70=&r{|@2ZjP;2?;y*o56u4pHS0v67E;9;7Fwf*X5oKUE>c@weq95CCPyI
zD1QlRo(3$}LJWz+{=oi&h8zxOKHH0@g!cPRQcH<0ysXoK8AXPi?+hV&F|Nku1C|)G
z;~k84c3^wXc&Xj<VVKJ@@Gj0(=Qh^1-~&5TF3u|$#=O<$F8UAT571s{RZ_%Lx|30O
z^$lE^sLKiLf=ivW{zxTbKM4h;-zR3-s^c23OVHqY2#PdML&dgb==4K@7RJ<(UFuRO
zaBhUefuqrGp$e}d(B*noWDvnBRS23dg_)1L(L1jaD~2?Zh_}-)ccLZu47)%tOjqUI
z#DCG);}ry*T#pftJ27g72JdUU54mT9dD?8qwGFsH#HPJe;Oko&<K%<(KW@{Kb?UsH
zLL6Lrn1&y!AH(P58=zFL!6#QRE&QKf;F3{KKYGb?yH!m%4~JR!S?e!sw(dapU;ANb
zJ?pbiXr)bSn2&>LY%A{gk+^>v;Or9%F0)#RGfM8qPkY{nFY{93-RL^{>^IBB(OQQa
zBGtH6hjsby3o^0bP7UPu4C4Fcjl;I6qZlX<!`Q=Se6R03m~lbCaToe>JM_%)N$Xz_
zG^z3Z$_#n&v;kz>LIu8DDG~jDb1+%q5FBTGQ5W5CdS~ZNF#L23GvtQzena;{SHGjs
zR&L5Sx7UIDa}kKGCrQH}y1>ezpCI)N^XKeHhc3%t>eLyCrhoN#yeP+bLT@qUcr$!^
z(+%C@gP_!FECjq(h4}p%_~39F`ptcdV$c=l43Osy`d7l}f;*@e_L6S4zK!0u7%Ot<
z5MEt9l$_aV!nqvV4kFuNTr)ZctJXB3=cQL<@ko6x9`*R~`yZlE?<EK(l@PsTb6EMj
z2Vx@;e-HeNZi_z9l(^ljw`c=;f8t=<F2=f$UZCc0RQcSquW4elBCi*x#4D%2U^xKV
zD1JGK#5Vmy?u0Opn29!jjk|<}F$c+^sithNa}>QcFN4X<108F&io~8AhBf___%C(>
zZl<6f?Do{*y#dEj-t`VFIj{{+Z;<B&SIqgx8x=VZx#cKac?lmV#bc{I2i}qssFESz
z9@#7Ko`!d+cWx~_P(Q=+pDcuHI%1$=<W-?G=r=5MOu&<YN?b#lF`wU8g}<W_gsyE&
zYj$V__y?=PBpqdLrS2p6{w538jWgj>(_XQ>)zcttxQ}&nRQdTIY8ksZk8a!64c=`Z
zVNTT_@T^?{Z@z1DpQpTn@ImIh-p(=LTAhGC3Nh3q<t{31AB{bIKG0)N<Cs>@2##`U
z{Kb4Fu1y*VYbu_h*4r37Fs%aSEPcW<D2L$CyNu72e-DZ^^r1$p18fpKNbRv#O#3;Q
z-}hXHmlaII+L8=v@T&?0&j~aRnF&#OTOeg;3g-Xb2g&c=65#<=-aq|5dG1z^k{&G#
z9@>{HsrJRJam6_0ttxl&rU6f4j-bG)exmqOlS8z<A%FL~Ja;chp7+mUIs(Vp5PMpc
zAA9r}25wD<U2hv8*K{_9?DWJzzjgR^T@su+=otDosq#)^U(#!~+I;M?)x;-X4g$=E
z!E5n-EEs+svUh6nu?8OS<aIpgFFT2brY79+UK39E(SqMKx*lg8(%|eR?bwx^2(uj-
z1N~|ZEeL!GAv5ly(xNVw;V~O*m}cg67MqtsePCXe5jW@03X}^~<dlAy^K1JI;@h;e
z7&ki|e2=Fg_(j7SrMocnOe=K%qsE)EXI+^pz{AU)81v5s`1_>>6RoD9-AtzW$yrGJ
zMTs!<#UMV)PXTJR75LY>9dPV@A8umua4vTLP_SS44v)BK@T2?ug)aRJxW|55y|;>S
z(CKol*abqF_DvKow~)rjn!)M%AIdK%#G=NZxG0hBo$ovpPDnK8#ihzp8}CY*)HHxI
z>USG2xR;^*k}T>&H`4GM&#8SQ<M}Aq!}JX$XnRD5*G>@O&*~Of@-qPSl8)27`in5}
zem)-FsKf2;ABr0+hVVft&*1%IBmPdTF5lJ4@=4acrY%}}jC-{YN1u6%S{<sK^B3m%
zED_@It;0A!%UK`_VHq^`+0f?Ay!CsYLx;<8SbMe|)w9xRzQ=rcf9NjCc^Pn$hiSyw
zX9so*&tdxVFsS-=4;H%{^92sfqjT;Dx%u)IW3t_bd@miai~WG#t`?%3lqa=1j<7!L
zIb76G;3~9Ase%4|jE$N_POz*#g*lIK^3?mF#tr9RjLB#3(g5NX`He{23h>A)b6#n|
zW6<lcMC(KTU{P@y0+O`(3%>gN`eXyX!hfKUJCTN`EIOE`qLb?WErj)X6_9DF!(EHW
z!bXR|RJ=K~OgoTiP~<i-R`@J>hGi=VyPslL_9HYrX2Ok*41#>`A87UZAGo=q3=9m9
zf%opm%qy`DEduIU?v8|>ZWVB!1#cnt`#BJXWq=^az@bayJ32+2pn<$P?-eteal7~7
z@*masZNpdS|I>&w;fGO`xBWTe{#EFo;Xz}&jL~hR91XWR2s0O4g3^%DjMsG>`oBoW
zxd%;nTZtIrTpq)Qk(V%UR4DW?{#Z_d0cV|N4^BgwMt{~*smWT#JyuMn?aw=@f6!54
zf4va1TbW16zk;ac%mvAkufm)e`kY*A1QssNB$CXp!cw&}^nIc{ca~2?jeDB>m4xBk
z&R+&xQQ;Hxe49(I=MU$kC6V}HK`qN`UrLFzKcDUV7<?M#_^8F*ctTN`qesr-#tV(O
z>VY=*C|rz2pEB{u$4JN*jKTTAKA3rHC|}{00RHLaXg%JLH~XEza_Y3<=9{^AndxCv
zZVcrLuUF9)FJ+YO>0;R%Pp~lUA!(GFk=Jv_;3>1KaEft{*SJ{n*@iOsU1i31z5IhC
z1H-V%a4;mUzJ%2a0zmJ$19nZA0Bw#bLebxFFkyP)N^>)=@#+E6I(i98>R#dRHdC&1
zk2#*6uFSjixkXLfS3<{l4ZeD3Fv@55<)0qv&!Mk2zVsh}(Txhccu4@cv*jZ0ygQKV
zzM%zKo4=#x<+qFh`wCqGUXgDnt<mM_6BI4_gE}^5T#Nc$B5skAFOQY@AEG9V+x{6W
z_w?mc&a-@~D~fnv#SgHui-%)-RJh#9TF~-w9=J@8!Irru_<XPt&s~p3g_}e9^sX&9
zZEhQct9PP@hZ9cAJAykNCZb!U9jUFmNX2)P$fM3!<nAbOvLP){KZ`=wDka!iSA;XC
z+(fnRS>VxOgH@iLAPSy|?=}qNJUumVq&N@rSYCruGmpVqrku(AFnVXZ14g`l2O)h{
zKvw7v6wIzG6CYnB_2hDh@uE-|VIGTa{Sz>;i0O_dtMj_%|3FGYB_0}8g)2ggc!7Fc
zS%J|ftV&>+1PfKkCjCsDzLIGPuYI76f(5keK{MK`-6P4powV}kAWq#)kM|e%Foxbh
zhl_hOxLOl+m?A%fw~4(*b)$9o(a+z*z9YJPJbOQdD0SniT4m1X-5%n$B88qbQ007K
zH&ku?4nJfT+~jqG`GiesywPr3mdDzR-78kW!u7*Av%<eDQ=>?zJkbHQR~zF}+jP`<
z--Q11CVZ{>W~SwR0ACIN!G${nT$G<Yp3)40;^JWN$W~#s<dM`_-39Ipe2rIR0?t45
zCAv)iP9Cesb2-y3x!o3u+=uW+_;!G0SBB*ZyP`LsKKqUvedJLhmE+Y1^`&o&x-q+S
zKHgbVi^{!j;2J#&KGoGBxuVQTX6dj@Bw#t7^I@FMZ5ZBvF#q%930U&25WH*~QRkfr
zXWeXo2eu}Iy|@v1%S+gHR2!Z4+R#6f1$_L372tHWSK8?3Lw9sk;IAo0oD=y<3)38l
zK+8<no*W5TaV(eWMJ&zUQU+`G{)N)=Q^fH49gOc|4wfNikh^<4dQDW~#qb1;5{L8Y
z<~#%#7=YYYHO{K~21JH2CSI%wCWbY@j-})9w3iWY5V#)=mV5$(^-mz=!bgm69t7b7
zKhk8sDd1zK2?}q5aDw^}uFt07TpNByVP+Q$E@98-n`PK^A_^{j7{t>CdG1T98t1d;
z0~%%aLv1x9UT;w$&70f_9ovpUSIT-wJsAOsM*o2P;?K}`<Pp3@%y>cQ8)5eR+gNej
ziYAX4&PNoqqFW{78>DGVqjy!~-hEA=z2^~*nW@K#?++n;wTwCY5Ch)(><uUxb{=-R
zs&nZXm8cZf2z!6_=k)>~k@#mrfgkb~QC|YFpY34T7iE6et|eGId?9{{$-}fj0Vnay
zB@V$#T+OIjIP<;)8;5G4{he>LQ0*9el2_x_jNl<|r8nZOJ1AJUNZP$~0R%pl0|&PY
zXkq#VMClY33IyCLe;(C!Zlm(1xiH`O85(U{giGJmq209pywk<8(wZsNkTSmr_UR4e
z6Hdx;Z6h~R_1<SVE5w)^e)$X@^-01j@3gs7kJ7=ay8%WPs_+#BM}^*BZi0Hp4Gd9v
zjp)7?+V?#m6`G28j%6>lSE+*Fk_EqS;wc<<+>~4Jt{-PDQRV}z7vQ*v-ymIf6%UUY
z%<cL)kl$R>j0L;d?|bQeM&<p0Uq0ybpWqt~+N{A9EvN?ddH%TC!jdapHiU1!GalvT
zJHXm(9Oj<Rq9&b7sNj)0_}DYfY?%c2xmDqTj<e`=_BU!S4hE@f9>^v<fs~4N&`9db
ziCZ?1^t((u=b=x<_wpQyU3Gb5?k!BqdW$8(F6jS6j*}fv0QW_Gurvzb(GdeaWV<Ki
zo=<`A*=n31VLEm7JB0$DnPq#FhjX=$-_n51+rey!5S0JSgwe@%C`t8%mDkU~qrN6w
z_}-l$Gp~oGHp939wYNci?GSp!YZ!NIlLYO|RrvU6&5XrnLVjL(!g38(L9_ZT_-UAf
z{$J{7vD-nEWX`0Cdo=iIm8;RlQiV?L@W2|YLg<__nBRDC0QzuW>C`w~?tN!A(~`bJ
zYg<MBW6B+TvM7?}%m1X|IW8FS^#g7*8pzu}yGQS+QRcI9r5%#e|3cB3IMnogZTxE1
zcpEeiw>cysse9H=65?y<N0N}tNs>8)D@iH}6{0~zNTQ_fS(}iACL~FcxuO9{hVt(B
zFQB^joW0k2p3et8vST1LFBVpN>l2ULUqJBEkiT1ELGM(gL6^Kdc^`2bBfjiMi?#0{
z-CCWx9=nCo+dI*HZV>E#jaa$V9}+)(<@3Le!+%3;=;+S(;Haj=oiDN^vzPUNdQTk)
zW^It}y7&|8qq}jTz!}QB5-?yvC-^KL1*tl09^_ns-hoEg`F1@D<Us1^i2S}JZDK#t
z6NSfw+~?`9Q29&+*ts47t+GKhi~Zjw+x!FBRc6Fxvo3hvi9nlSH^Fz@P;^w?fqNZP
zh-l7$xZ;Nl=Ku1+l}Eead4nFQSKS9aojI&etOy>xEEo1~8GoQ{FYeM&rCJW(P*Cs&
z^-5xJ&uCq`(fbyD->yyqg?&;14J*+)KMG?WZ-cl9LlPLYn$xIJA{DXaeEENu;PE|k
za%HSGHJW8gj+SeZ(hJAXG~+uioVO0bZ@=SuBnr?JEk~zWG~t3Js^oNA4qki3Sk?D*
zv0HBnpM1%fwp_a>ZctRCqNEGbYmyFJbWfcQ`mRPK^{!&&b9bOZppK5Bas1TDY9w=~
zHfK3QNY-AGBND$k{7Q%8;5gHodVSQR!nGU(O)cPpC*`2P=#p46F~1}>tq}yzFMwy$
zaFC8@0+#|^jMoap?ygCE%Ysu<!M5O%))NuzJpBuuPgY~1hAy4C=_mM%`5(^SUXODE
zT5-C88N7AK#nQ6|(!{T`IKg)>sb8jq_ZiuaMO-)htdJ*y@U!DwdM7~sqj=c$Q%FBQ
z)Fy%QA-vt9K=7M>kT;w#A1iW2RK7=+sO_hy+G<3HeS8an6Mu>awHVXlzbqTsHI18j
z@*m!L?TnaUMXju+FgEZ<EcMip7IxTD;|UTNdB%W-p4$)cEYqI4?JkIJY~+$M#1N2R
zOf?gp;gf_;=43E~2A0n}C(@#at4`wu)xo6tO*MQBDg}de=A<a34fOMS_?kE?`scDN
zS$kTU+SXf<p<qidYbuf<cn^2kTG7oLjmiGYF=*gvMTGuKI4D;|NA+L4{xe5V3O&Jp
zDmSM;gGI#W#!l$nl7<hrw!%eeC(10T0pG^Eu(0APE)JKc+p_X7Yxh$)Q(;V!4YkOf
z-!16#%#=0_n4z1`O&r*@9|fN~rD!OE{OOE);j&o#%+7?IYY)Ne7c6L1i5jhcTLpFj
z?0o)kE57Sx?|HTRd|1XLuo}kllNB}uPb`6Nje!_`^bFr^AV+IIupL;k0*$zRhOvtO
z;JP>qI=NK}()+L2XI7h-8EKIob7k6nrX4lMnNaOu3;MV`9*1f(AIjlq&fRu8r@s3k
zoN|bPt&IY*@7@WFiqru~vofU48I7Wasp9bUxm;%kLby*n-}F@uont5{E;b>e!g}$h
zFc-)X`@)Do0qN@isL@YD_o!>q$*X#?(&Z4QjGKdDoi^0PUIPO&BS4fej1z=G@qJlU
z@`W1Hnhj?#4%LWv{Wsp*kpr3FmFRx)G<+Gop7GDL@mgIg6l+WQ*>75~voi_Kj&p$K
z|B|6&tpy2oeE`v&{g@Uzn7G=TkYRTYL1b_q=7#oSY3MQWdt#0*5vhFi!A^J-qfP$p
zR3=`%%J{$23dFAM9Urim@j7=Lg5p?1+IqW{ziyyH5@p!A?6ETBIi82urf=we(pnmu
zQ3Wfn+=QMz%P`QYiz_;G2l8)R=iI;Mai80aiR$+8=rYjEhb6H2xswWQAf9ZtPz_U(
zs?q269?V}R<Uf=TBKjdcygS(ff6g-2-6iG+)ISG_rw{Slo7sKW<Tt4MxZp7D0yw@d
z4YEZx^zXLKSRZyDV&a}~b|Y?~Zq`#22*l&ufAc)btbt*@-!Wo?HZJe{g{{7k+?vwo
z_$^9?>ek8A&4HINmA?)yGq3W)g*wE=HvlB(GF_k9u=_*(Ea)ECz~wkS$1B6NNz<q+
z5O!$`bV-y+exM084rFJ+9%tx}W<6l-4P5BCbeLT74$gUR$8^Ud%p1<S)CViE_54)$
z=+*{zPbreV7sDak;1g$(vKNQ#uY#C0iJYV_rsQqH2j~k4$IO~?>3`jdM4$i653kpx
zpS~H=BSGKr=b}EG8>3A0!~X}0gVczw{aGCG;y4D>Y12*>6<Yh1v6~h@!{B{=(C+*P
zekfU!Jx613m~sKQ5A25M{T&dv)0A%fZa~X-{6^@tLDxA$NNW0B_BB(XUeD7x!H^mZ
z9dQT;G+LOyb`JA8&4jX?TKrah0y115GRI&k9REl;>%_-+Zv7wdEgD3fzR!RQnrcL8
zx(OW|W=h5^i^awh26Tap7WGS!@}<#^(zN5>G5i(J53YZTZ;UiZdQTN39-qYb;8|$c
zy#w`Z*d6yvCuD&+wSNB--`qDPbF^z2e`X^VKZ@oD$^-B^QKEtKc5#u+ix6cu6x3JP
zQoA+%D6ei#w)cMmuWd3k@L{ibNxv+uDv%*zs?B`UaOUoqUdTV4Fo>M}FB58;_JDBh
zVGO^N&QDLv#Wdq<n4@6;AKPlM?(=>S6@KUcqZTyS@djqx)+K-6Xk*cPMXIqwhW6$D
z#+%F8*><!gbx#WAmv7htw}h&s)oTJMyZyqE2Q}z_HZL$_^9~fg$>K!o+qoF03eIew
zBKek+42cyZ_!U7)#K6gh)D?|^J@+Me-1-`ZZwQ6pq#jtQ-v^@^TeLH%7)r<IaH28K
zq+h-sg>j@B1EL>5`o9a<>cg{fUlEL4WlR${g|KY(FaCJLDemE6#Gs;lJ~Kv>`kH<N
z<-xtIZ&JW_{wan3vHi`T5HnKADu0>AHPSC?2cdMfA@x_UM^CMG?26N*vyb#a?B)B=
zXY~@J=IjP?WDr%ldW5^JDMKU&CA?NN52X|Ar2?H>uKj94GIM<s`0h_brEyQW*@3s=
zVrc}b`gwrD>~0JQVEt~lWoZA-kSf{K@(~$xVBe7{j4ReBJEj?uq#9r7yZ@1w|4*4-
zTfL7d66C?J?ipv7Tg^l3Xb7y%<3;Hyd}rKQP%}~>$>Z2O`|Lz`aZQ^lHtCX8)QB`J
z-vFz+%5i1ledzu*i?{UFAhD*#^a|UF>9*H{)@}m|V_f0d;xtH=2}g-+ovZucA};3s
zWq#5dInv81(Bsw4;MBVej^lBVn>`2GRgS=sdks(;W&<hv3z<tg39MrcNqFpHm{#)`
z?Em*1tJ69m<$t$8+;I~QR49<NMT1D%7*m?adQB;_($M7t&j-k#1e59vbgyjZozLa6
zPXA>HX!s3he(O`a!Zl#oC_@A`_r#GOkHMlNY^O8sD2~r%@6!YcdRzVCV=7y@cZH8o
z(sZdrFrWj64!Ywa1wC@8+kh5Jr@_4c84$_tQ+%f8R{nTeE<ZTKng}cYam7z&fbO-Y
z_-u^=6-@P!My7v)^6O=gKK2((=^R2z_n$=f(nYZBlstJ*rA@|K$dTfTN_1b_jD+2*
z?YH|vW6)2?+Lr?Cuf`Y|67Kk|B3`m0gDZ|Sp<92c(Duk6G~Im+hjpc5OX&(&9%)V8
zZc9MxSREEJo^z$eS$un?1{_vNKrkHf&3k2<XgQegduE8;x~_bOzZt1D3PO!(awN4_
zi8_&8FuJT7lvlUo@=Q5gttvxCCmGVz!RNr?=6mRMXo1NwA24*w6?o*JMkjnfigvQo
zpmo<R&PDqMH)WnO35-_Z){Vc3EkTdO(k+kS!G3k3*J(o%b{kRCcr&`C_BD*Qe#$uK
z9o+PH=P}-N6X@4|;zM1Sfg)R*infGuG4AOQRnde?Pwqv{%Z5bhQaXCES((C(bUbsw
znuyo_ge4ahNLgb9Bpw{kPrmgC>;fP1qvhw}<h={<(lr5j%HGkfxk;S;-4CehxR!Ya
z#>4TB0j_1~BQ7!T7N4|wCHN1z4-IVteB{|I6#Y{MNldy_|JggP{mw`f9kk*ft$B)<
zrP}ns3+A(MXFP?>O8EJt0z+<01^1^4T+4quLHeNx1VJaE^j?-YFn1>WUE>1{OLQS|
zoC@^zT?2tqTCw1UM@eGTQcif}FqB>O#;jvkp?26_m{Vs)#$3=MF`Gm*zTFH<-PZG^
zmknXH-4n*MGRAZN8raVD2-a^oitfJ;iM8(c!W_9bxaqqV2n1bxcK=;8R<WS&87IY#
zC#q4V`!NWTt{3ZU+sL|GaeNxpAtO0U`ulYY)G{t$?YkN*7(0ag*KS5Od`*T$+ttXa
zte2P(FH2?|e2=Pk_Ck<r8~1zFW2o&<<=pJ0u)EiQh6yWq%>%`_A@?VUVzR|`cXB|I
z(1u+zY-!kXZE7E33;CZ47~5qfC-FT7m6kbhlXiljE!@>OE(Sy6GU40(mk{1B#*109
z)FbjRtkg~bFSqG%+&2lQ|5}cjxd*uQhgz_-x`G!K?iKrI=i&JXT`D-;RZ?!-08Ux1
zU~%#-fQ}&)9@3>2@pW+Yr4`vb`vCrto<xtRpD=!gJV_q42Sq&=AbG1vqo=)M`yyWa
zz9SE-l;o)B-gT*a@B*>KL4~uPl!BFuBf$6S73iLLp0N!QKr>s7ER&ZdhHrM@%s=b!
z@dZQjduADAq9xz5>@C-4)WQFXDZ||f7G&Fg9&gJUlCYVxxtetfB=X*B)Yy}U5#gq=
zcd;h1-fK&|zGU(ma=Ntk&wDP9pU=89^<2YO4Z3vu6FBW=N@5E3fwfKw*19Os_Ge%5
zxIG8I_cx+7``(4b)-&(a2baVlid@6LtEd_;4`*#IgY(~?kegzQi!`-}t*17LX8Wp(
zlb+zBKQhGeOcvjM=P2y4x1uhq)aW7qP1w8S3`oAzi-qAzRR47h*M7AG_HMH#5_Wf;
zu`!!DBZi~%u+NaWr<rq~;DYXw9%;6!26e1g;pHyJz`0}_4E|V%uF2NKN7kJA9qhzo
zcPo;?a%LoSK4UMf{E7m9${$p=BumznU|F#i=%`zQo&FL2>3U`2sMCezTYkY?{vYJz
zD?pz4Gl-qR9C%0f^9sJdVDyeQ>{eaJrN~}}<XwYkqjNNfs%CR?F)JWnPKFomx(E~Q
z$dIh9znHTz2851#!8)@M-3zZs^`~9oHYO!v;Qs$O@A<%m?J(tSydI)9>o#gVuZ3sh
z1+)Q88Q)XFD@PW>6-hrPx_UsSdIZc&ZveZtXfC)mn=#OsH&8kg*B@f%P2Emz;7J3B
z^tRxH3m0&E-v!2LZRSh)I%(R=D69`CgyotU5R-F`_ivqmiOKVrr{e%G&a40@nJjSh
zAJ0dnc3|zgR{rm*>EJQ<HFPKD!s;$nV)}X?n8$bE!^TGFbgF}}%U5xdy9Lpd$kEBR
z>^$}CCg)pkK~E3?nKwj@)Ve(4H;o<*W4~qL@C*@gk5LmJfBl>jn0vU+OpF45r)#h@
zka-Y`hC<@vLhjKhJ>rsA%<GSFLZwBAxj6-c=q4*ATC+cj`RzvXEwBKt`(8(DuqF|{
z3N-RR1?o2MDnv27z_F`?NbQhDe&RSCqVOge8W^`D#o#5h8tURY!+Yp-(Sj`uSPrMj
z6(rjFG@??Es+-(FX<a#lbIqLZRfL?e&d|7UFaEotLR&tjN$aD!;NrMq@GZE6>K7AH
z)YQUHUiKMcSYOv6^C@#m9N}j_cmeLOy1Cw?4<PTBJ_Y?a)Dk_0IvYEjxgre`&vx=Y
znXJ#3WKBym7Koj{)<9j?D-06<=IgeofZY*GI{tPBHk}v>d--@&^VT7Wu$YS<6w1!S
zj8Cz!zj%1ADfKgX&$~8LobKiWb#r#XKB0)ZH-t$WH<;40qb#$ar@+4p$ip@IhcR=3
z3U_GDaZq^GhJ8l=u;su!E_Br^&|>rQ7xXk53^OHBzZ1c(awk|%RwR+*KSB5FY(Bzq
zKmO--6ZQ3V=*o~8=>9O0D@^!>;tS^V;^fcpKYzy4not8DR7`28rYz0ba~ZuSTJiZs
zGQ1bu<-U!G!MERDLD+TVj^ELt?(GRuL*-`h(6|hh;hxOd+9;j0R>*SD_8?(9dqF`y
ze<s?5%qr@`nKk9Os$QE09%<y8k|%@U_!6nGoz238v7AO2+aHA3L*A=I2=tES;tNk>
z&;QSL-IH;{`qR)%zc60g9p14^NE^b=^KvKSz<c#%PJMF;(4*0)H1jM}$Q<Gm*G&M=
zZBv;mHx+EXl!=_PFCK0bQIimLGHad{-I8(^x-}Q@op)22zg?cn1h;|vE*&i9CPThS
zIsa<QV3IIfg$TmFi=Xwd%*M?tT*LPBurTvDELWccX?-I4HZUK;17_f*{pRG(+*%NA
zQ{eq&E<x8TOH%3<Ctk)fzkwNj((t{{vFX-j5H3%_tUtzdC#uoa)i*G~(40E?d_aSQ
z54dx!EU9((;~%b&;O`G@P}iJ;;<IhAoPECi?XH8B%6AmyDWI)&Ic6>4!A-FR?)z&I
z_lKhpuWm;D<{G{t!xL`*RHZIIu5%Hst}wg(F?za7m_OYME3@Qi^O9bO``;j1{q6(a
zbyFt)d1({(Q`@A|dLF=+9#fh*H3tLr_VH0Nj4S9cPAn;2%sWPzVEp54*nQ;<HZ4m6
zuR06bTK|)GcL<S|2I$fGGY6A~GwQT=+<6E~o6U!{4CTEW+c~SaK^XHg6V%L1>4P(x
z<myCOV!#~P4S{KVMTi^JKC<Eh*=){yjy18{tWHB;)q|jKCREF7QiIx7wEv`qoBDzv
z$!HeL($ppIioY@5)o5<Ck%$V0d5hPM=)~4bZQMA4F}e4(3yYrofG`tVZbSTc=$<^E
zC$idfKbM4?z6F9yt_NoB$i&#^jiB|p3&D9Ad>ru(yyw0{VZku=jPT_$U8<y><pa!7
zpN9c#CQ;<Z-bJAWB`+2U$ly(;G<d**{y3#T>TkRQzZ&KMSauqsc3gtXT8ebT{3=lY
zMB(!R1Cn~3`JMIhAn@g9?tO;^72aUGq0TZ;-ue)N_e#Nsu|0*8KVqz61?+m538Qak
zVQs=rZv3M<eDFhwbj`2Dryl0CA^9PvZ8DfnkK6#yjMS-((+gO?*MQyaW0)JKkDsq;
zNWQqcLT1fM?$Tdb(tF?&+7)DQwP9<yl|5I%ef?Yh++Q=gX7LcZKTwlAsr>`7PrBgq
zi7Irj`ojkeKL?^OT70H^zVziVV{-9iBwUrK5aIDS-mz|gzinzw+#BVkr4L5)y#bHl
z;4NdKBv-?2c9J8*_C18j-@ieN=Mv8U!3kKF{sk9SX;7ay4im+(oY6d8a*wcQf^z^b
zxGCf%!@o$~f9(=?j(QGD%N1$l+&QRZy$-&u%Y$iS)}uu6qw9naQxdzJ-9Lr9ur%`p
zI^B7P-{yV+$(>j{EB1!BnpvPUcs6v=L1b{mHIy3@h?25d7-6v)%5yp}j?coJSwDEW
zYYO;ja2g6GN?`axed;nMnd={^O-$({tokcUt*SiH;mmi?<yB~4DVw_;KFgJ6Zs+RP
zPeY+|7Ay!bq^7USz*?e5?UogAt-&_TVKs#Kr7GfwMk6BOc>cFi9DZ5;2~B;9V7-eB
zwR1<V;c*05?TAO=Ff))_co{O44nb!84nE3JjqdO<A>p3sp!7<KPSY}_8wy@R(&GSF
znSBTB6E=eTxTRcl_!4wiTQ6Pk!-DK%99co97*@J4X3w-LZp!d73?OlM!cCTxPoWU7
zEFT5ezH#d9Z*d@g0O}ve((_x5Y37JL=@XsLFg)3uXe?4DWi$%j4rMv6CR%*?v@P+j
zxX9J+(c=0{{_^Wbu-%=v5#KUGmg=g?P-X3N;E~{f=gxP+z@Tp+XiVg{n*`vNncqMZ
z^ou(%`92o$zL@p(4g~sSi!E9%LecRGh*>@lnqpPxtRz$7p4^B6tJhqF^HC_fWsUy7
z2EdcBy+#wu!`A)}!XH25F0eU>T#N=S{VztE|9lkA9Jv|8l6Uc=#@6G~+z8Zd2}6ac
z?&w&p$EVi3fmY{rILAeW1j(0iOMmsSpLabjT)!5&`-j6Xa}DzHpA=7jRwq9D*MN?A
z3oM=X5(;#TiOvB6PoG<n7uL2UaCaaYA9;fkw_C2qkKg9K#vTBP@?y??(K0r}Er9eN
zlhLo)6}%QRHt#|Uy8p*R^grJYEoVY;@zr>=S@8`uGMOuyu|LELwHO(H9$k#D^EGN^
z7-OmegU^}LYwa~48EMYg=YRNBkN)7yozw7g9=pTVm-E6oaeT%17kshdJwE0zV})I6
z=X!?>#rv&{E5gnMzf)^KNjL~4>yyRr9-9&6<}*P4CrithJ%OeZ0{Z2o8T9$|@loT~
z!L{2jprQIUU+9_+S;4c=EAl3<RgSP$vku+g7)jSXwV*`>^H5_HyW6*p<U|i%@H2wT
zG5z!t@UcD*F{SIF%rK0->rV39tc^*cxST61ufUpaO%kNE5Yjv4sr4}xidVkjjSe%i
zWzk%mw)#2bCB1~Wm@lYXb`b(b3_#Mq!C-Q)9FGb<qkHiT3}2&&l~V&)y!izbJ%5BV
zV=Ljq4gop*MMNzAP$=Hm!+WK?<%^H4fbA<4ss7|W{Ii~)n0Dv_c7IV}4hmo1e_0}w
zHeBWxE!vBvKZ0T3lCNmBd;y%lCQl-kcA@9w!|>TH8ds;@#dUU;L}04IRb42=p@F9K
zdajVVbMwSb77tM{`*N{Y++^S<DbSXopIAQk4j%JYq~Z4ua2_3R(Cb4OlmrYSW8@Tw
zB%v3p%&bVx>8pH;<gd8Whi7i1G+uwsJ^tzuRXW=<2*2fAhq*t_qfw+B9jSf*rz%+z
zuUc!GcJ?)jZq4E51{}lqmjQ6ZW(+26kA}!~9_TCh2Li9*u4JbYtzEkoRIjYVQm_5e
zO+S)ZJ}8A-sp^8$p49+l+2$?V9^)Ms3Y#L2!0KfJGQr)5o>dx+fz`EAsgEfwJ=!EK
zI{g^F?3o2=HZ{1xZ#V9kr%xVDwj~WG&%v!{rD%LffQhS<I3K$}Tu0MbZp;zISv7EB
z*mLY3tw@TaWvRBY9BENCMvt|9;P_4(9Tt2*y%<FjzH%Xi+{%Qzvf3=uP|uw)O~6C4
zjJvu~fC0ZPP;HSpedcIQ%j}imZ=ex8j7^0<Hdb^)=TWFk5JBgkZX7ZE3rgzjq`OLf
zFlOgGu+q`OTvZ+D`?!yn%o)O8Kd(odKF)_d!>ve!$y_LF(tzlo9;iNi8(PBlg3xdQ
zzar9>w$6IT36<Ko>MifU@}dz5c8SLUr3RFYR2N@*C!`*eGBIf28v0~AVuX4eD%Yhm
zzKkL4VvenOdrI)eW)rgZxi;~eQp+!8`KBI?Eg0EUi&>RtK(T5FU4O!u1YUm6N0qID
zLyJ$KaFjk*IwVKD*G7xfru6e?rgUS+rbslpC?c`nwCRQk@n}D4C`9R-;ODQ&xJ!%8
z@zh$OoH19sCJD%=2TCOWdp6&a7ss{Co{R47su-pl2Bj$)@b|SAj@B)NnNHcb%c}rl
zwz1vCoY7$1u@@GOwWFpD7a(W)L6EfU<dlqbX#Vyw5d6)Wwk$t{fjU31z4JLX+|1@T
zvaG%LkZ1_?{s)o|A4+x?QjC}|2S#{Ul5g6-p?0A<94HWxuvjB_zkfPRwJXByC+nGK
z%#1AU2*&Qur}+kzdw3->7Q4$lc_r3=(Enyimrm;fAAt#|I_uHS@z3G-i*A-H&E+zk
zwU~eO7@W{c#h(pBh>KGNAAdz3N3*-;kMGLF+un|4SWKjW?x9>YV@fq&834PhZ}_Ee
z6=<pJV)5S(%q_gVjZgjh8BUeXf-psS?pCKFk?~o9N-J&YDnmOm>yaGU{-7U3XQraf
z`qjAZqZwJ}#j+4yXJDMYkeKBt5iUuOIKl^nHd_ozSq9j@7>1R`LZqMxrh7VKP}WVZ
zSn(S#taIkG_6?@hZ$87ohG&q!HGvb&)SxXRy*c4Zb$FvLM-tnna{HV#NZp!QpygEo
zvjf=PbA}JN?Ro;W6W((NpQsV{;y&^0oHdxTw;Ple#<6?NYjl3%hxu#QfW_#QY;Ive
zMRyKy`WahMp`Z(XNi3<rxCvX1=7}4u#rWlv8<dV*3Pn*_;L@ZI)Jlg2D%;{?Sz{8F
z5D&w=hvT*F_aJ^mD9V{zvw2`BpRW_l&z^G=F0ZsA;VnwkTtkaUPF)kHzpKPVlj&TF
z1CLJbqp`B+7(9M#O&uRcaQ%Kd<iZIX(qlg!wsobUbmdDlEx8HCkBn*ZHaT)eVosi{
zY=&8TZsNhkBC=z+C9PN(&DTaV$M2m#u<%$F7JRvYan-r#U-|;d?>B=We=OH|{WWMl
zIES~*RLI}%Rp6zP%niLgh-Sukao1k{0O7s>P<oomI|jRQzq$vJ2R@fEbNE<TWpxGh
zXME+fB<1k0K|n@3{)D2FGBkK&5*W`O3A_4_K*#bdbYCCK?<RKCe(WVk%~*=Jrt6V2
zt93}%Bq51hSq!avH-V$*2k%#8NE;4zacOG?letq2$lv}36xAKzZ79XYjVymL_d0XJ
zt>KKKl&Bo*Z6y5(W?l{pZc^zG;?B5@-qjuaT^AF=dZwUa{u~|r&!Ka^8Wlb)<bFM}
zCc9X6bLk-Fu5d^Jr<VEP{w7m;d|EH33N4tq`JlLac>oV@j=_i(SJC<Hb#$vc4V#Tj
zNT`P|j=X6|U#u07#DDDGx&05O%{HgXt~`8XzqfBJqdeW&3Y510;5;sr!nCkz*wkl9
z!`>a`RRT_-cgI2Q`#u#SS2-B#{`6v&sw+C4j|aivo36zL@=#qFkMCO>QSX`pS?Yt}
zd*&__yPm*hCs_vMr#yY$#XcLJf&A|!-7wgi<<#GPgy5^uaQssNetv39T;>n(4eEuQ
zRm4W-xUCfzGRNPeRdPho>*t#2H4$4Gr$#V0hmZOxgQDyCoTj@9>t1Dxy(f)eyu?KD
zOq02|Ph?3lQ#^S)gVVf3>oR9rlZklK1+NT!h~YiIaPHW{ECaX@zFiQ|&<hWsA*C9>
z*|ORA<OJ9<Bm@V3CgEy!N4TnHK@Lo3^TYP%m{YVDwokeX%hV0Y`=rINMfo;3dYkc?
zJ3YW^j1vsrU5d^PQm`7m704DtvNWd`;+yRtIyMOdmo4YK0}{AIg@sT(=Of&@t3&!<
zpG1Y-kD$-uC0E>a5cBUiagHC<__=9%H0Ih6ym0pj+D>3RpHNk(@suN3jfYrvyM`Bh
zo-0l;8A58$j^#q+H{z1^yLd<^3K|w{fWTuf#Yv@}V75+?Wd3Z$_md{UTX_eTZ<j&C
zRvWrOF#&c~GEev_ZA@a%%=O9^By-9r{#QGj)n=N}9v^d%xOH+4Mjt@U*qk`3{NjXe
zd$?U!&O*XUHk%a)Y3uCoT(Mp+7xYbtw@dYi?dIzky8S*(R(c1cUg?w8Tl3iL;RFBI
zUWnn_SO?_5VyI570{c0SQTbdR-rU~=eq(?0ZW*jA5b_&j4jltG1rb%^exYg(bM6#B
z<R^XAAo^1Uuzdx~-G3j&EB?|Wxm9dW#yG`tuYK`ywhEd3JRN45UBxB0Sr06w4T7fS
z^6Is%fR-BA^4*Pd6OF^|UsP$gz8%+iq7ME=Xwkq8rF`c=6Y6d%f&8jgF27irhQEm7
zI&XEso4+>H=tmS*7$rl5`F3zP+KJs6hrs{ub+pq+=9OqM*S%NB8M6Cuul{{Fut}Yi
ziS=L>n-6^~O2X)sJV@uoz#5htciFfJ)_<0v`PWh~cWef_$f|=#VFFBwwxpAXt58SB
z?Of=SN?7^PoMv26r8Z`-z+-9(gv^ttH^QExe4;fGw1{{SJmtquv!TMKt-ROG2(VeB
zO}qDopz5ht=rklBGta#jdwV%?4vTK1V`?)7JShObV~JdtogGI`suAT#Rk|%%n?&92
z#QYx}{7tP7_)X^#oMmTc@4X-S-Dj+6t2+D1*6J``<~#6VSy1IUhcQ0g6vJlra0ANY
z(7CY{m##Vhp9(JH9fR}O?Kl}+mb3lbdOJQftpJh-X_2JBFvu&Zf%x+()Fq~z*YSIf
zr4J5ESN+o?vHl0ZXFK!kI`ZN>+&vr!VY@-sRs7*ReLBn_5L15jgS*X6ZqUI&wA}hR
zgc%LPeZ!4N1tf8a!E(Ig?|yWzodpY$nY($I0XT(y#+kRG;L)uC6s~xH4yNYxNp?1*
z=$cT;<r40SrUF^N$CMVob735>7RJlH%G+y;aBu%}TsVfY(8elD$NW5i=xal}9fRP!
zlK~MhZ`^FzALxAHH&n6v#Kq6Wcztp_iWUFDgb9YkG-@5{e79wpibprvMw*gL&qV0+
zi{$-xAB2(9Ze!qQUH;3JOlWaf0AE*{()?dS>@VMoR=$_u>_8RDDf>gvW*hLbz61}K
zRRS!gSSgN!mL?aFEQ&9gm8L;N&mM5=Z)uUW-;^lUECo^0RIW!p58ACi!Bp1EEcY5h
zZO>fC>)RL?LEDRe?SCJ`*}wPag(eZckaCjQ6UC=GS3qWOk@(49IqJ!}G3tM6;m-4P
z-1x?r28%9YW6g1Bh{}N$cNhN4o_%OmcM;>33(3PbfUmZ*{z760avh3ftyqpsZwvtC
z%`xzi@nGc!O#>^9L<}GAh&L5SU~2CT*m@!xN5}m{RfAh->=Xzw4|Kun>U%71`_6Z-
zN=CmsQJ_D`53EiopfscuPeqwhyASFp>5Fp>S|R0Z?Hcf!GRp-#UB`tDvggb_WvGq0
z7CkgG9fG^=LgIUt#i&`2?iOn3vh@OpEW-Kf8yBIz`UN-&0a~3t@i7g0u)xWLc<(La
z;{GOJO80MU(4oBkk4DzNNrx(yJqa2;1>Ppz#H8V>aKXZcj9h9+M2*_$es;f97|3xp
ze*I8ew*(i;Y{Ztqqo8^9Cuq3PjrM1fq52n_6JAjSVU8_tPe+63y*no`k)@M-Ov$zt
zT4Zl*I--pcnEv50U8w+n9WF(!urlc0bdb*)(+<^!_fdCBJv5d!f@E&FbQ-g`{+`tf
zRA58iq#ng-6FSjn_h#r`^%Bo^>w`|BEcm9`qlNEf@ZPb4^Vs(pb(YQtt;et7wYDbN
zm&v+Uyb($wuemm}F4==hOXAHsDgz;JAaq(X_|A$4QNSo($@DH?_3{(WyQjl?iNirL
z(Sj&eA47fr<={2X4?_>!fU^m6@zNX-@jhtEPYu-}Ilh^U8@f;Wsac66MpbZq*)REM
z7bjfOH;9CmJz-h<Qf|Dd9C^5)792IwdB4r<`DUp}|E;zm@$(KtbDk#-S|Y`@uLqIu
zR)5jA`8T`c8-r-iPfjGS$G0srp>hXB^y(!&a$hJQAMdTj?%&J#ZpSKKMaPuP@D!0+
zLwWo}WT~Rqm>NXu5#Jj<xM;R9Ia2cnn(v>7+r1{Fb#@P@lw8GmFZbb3?PjdUeZ44J
ztH(J!+J@=bZIFGV4<pyx;hPd;;?{Z>kBkq+&)RI4p?#gte11SY&sdkVjtu~}<*M|W
zK@^ydx(Ac)$ddO*Js?1X`3M?jL*$NC_`k3rBx7YF?wMsu$4;-s7ulMm*Y+382#dy*
zvj@{4^W`WkT*<XRxC+sEH<`yp1O3_k_S=u|AR6|RJ95?uCB^IbPJc@jt$ZzA%AO^9
z)y!!i3P$Zgk?2_g_-Csb{k-)Z&iK!WR^PLvKUnrBwYdg{zHMQS%@24jVH*1XX@|C0
zB~s#~P8u!$Vbmv8`eus(t(UQ(6Nbo>rY<YG>~s+-O}~epm-a*9mX%<eEJsFI3+c^(
zk612wj9vHi>7{KJ^tkm!erl%$X^5@i!>?sQi_==({!%0?T(=$%D_zGUXD)+=nk6wD
zl8P<|>p1r_tYexy6?gmT5j&eAzMLzCW<QG6ZFlfiK?$nZA3}E%0b})#67T9hkA<Uk
zA#d70mLbuF`K~I&qjDjRs@EX?iURsQ$%x!{9YQR=pFx>7*KtLT4Dn*VX#wM4C|ok9
zBdb)2%?##Y{V^WyEc=JkLWiQ=ybMtEP$A8qV_>(TBDwm~hQ2m7BmoH~^qfN{1l|hf
z5=|d*r#B2DTgU+9>|23?iyvIf`_nPFx}D9#?s6Yk7D#5X9DTcO9+oqoS?$c(usWOd
zhP96K+-eQ#Us?e@uPjmjd@pbK<1V;OZw95iQ=oR~YCbY81wPJ*WDL?RC4sl{IKhJo
z@!MPrYO{hly0RZ~qQ)zH;P+L`3&wcO?+-!pU0YHg_!LH~Jcf97PcMBCBn5L1*mC6)
z^r@?3>}6|e8dnH!KR-s%onzv2)_pK`atrEvEX1C;77VYvj$3kA|N2qB^xAKE8k8sk
zLB(9&eq1s3&K*LR@121+hhBxCGCxdOYzXFN=0vpSJ38)}$UmNMP6V?4B`a8kKzVH`
zc#18cv~IrmunBwLcAkd?g!NX|zQ$BLS$h1(5Ky^dP9*pCLab9eoY|*9efEt&_xWF?
zU&pABxhG}m45CK9EK$Md+D+(g{gVs(?=V-rUkX!Z4<ZuBGA_U<7lcn7U|8w^#GGSp
zg3zaY@2v{Ryx$}BI&hM2g+q8KEf3eav+PdjR!n}QP8yOWT;{=*yj^SsACYGcs%mZM
zcUYEI%#!d9N6Ii-_AfqPC{G>kIH9q!B3-C|8^zy$VNh)nw96FZ-_QUUp<zP$RrE*!
z>o(RCA@y!Gq?yLuSh(aDIvP#qreX$$Rv6LBFEcPhU58{YUIQ1#*iern_aWki8;o%@
zCLvE6F+7fOwd-ed@-=2e?x+Bbe-6gOI%d?(?JNW@Zw0|r57&Sb_aP}?9>PXj(IJ*r
zWEb6rUdK9k%~$F)@Ix`Xzsb?mkdyd2Lr6r=gRxIBft&U;9Yco{p|^1Xx4d^Nj2$o|
zV@I2i1YLPLj&&vyeT=zvDf(p769Kr&vh1dSupGZ5|8Q3_R{gUkN};#;bBQc_IMx_{
zjjhAA-Rea2^E<z&(3t9!Oa?P`6*40CBfbtPMnCy?yhtmQ|E<>!y0yBr^>-hp=j+ia
zWvpAj5Ha$`UkK8w=e<=<bB`aX5uJtmQ5dul+mepq^$pp0e&7m@Dpezau?CEBx(}zU
zyN9YFQ?aM=2M(VpPg|OAa^;bI;QLcVx6JPZ&2t*$$}R;Wj5x+S4NQmJ3Fh#?&xFJl
z9fDzB?D2}uPsXS<#J&j+xHSvo@kswM3}89p`(xF}i@n;U;0HVFPB;j#*@oulSyI18
zWn3ESkr9WK$()HcM15;78V+NfD`6}~^qEtEt4GO9;~G#Gb-**B7GqW{M46hI81nZx
zPIilj^%)0nxg(FEt~Kx^`XjuXtxi(YV&UH4T>Nq}8v^~a#Wz{cLusolefhl~?RVcr
zkxdsb2-9>8_h{n<(lYM(XBqORRgH%IGvxD}<6vPR%UlPY$I=BO`5KpY%pA`cuBnDt
zcq$NKt1^{jjNsq9b>grW*-&}P9^9@LLQ77q^zR9Mu>DVs2F^|9#{av8XPO)FeP1s|
z+3G=Ulq%e-k)wOVdA!0pO@fYzB}d&Jp-n0eHVRJYWo}5XY(9XcA-eQQh?JeNYdDXI
zqcCUcA$(GAPOI0-(vD{u<W#c>?ieQ^dyR}px|t_N`;-DE2%vv?4BC_*0QsoXnE0lI
z-&A4+(OarOlClYd65F|dgH6e%vN<?^LJt<%$3ec!Le8<uoKDYP16j^y)SYFs)D`~V
ztjSj~<WwNUHm8B>2SZXj+XySqAAyLnSXiE_38K@}p~WX1#+JsQrja?RjhV-<Ow^;@
zTDQ21%whO^>mYK~N`~HOGbKj0cQM3qCm0t@#AQzu$=D8SQm|Wvm`#sH??KDB=&N1e
z89D+kt}Z}7ehap)8w9gFjY+P#Iu)*%hmP0gfPRdGvv^t$ON$0rHh4LDS9)+M_paip
zy7wrvVFxPKJjLE&x8Q8dXjpi{glY|qz$4XhtgFRbU*8^bv!AH2Os)Yf4IVFbN&CvH
z?`vd^kayfhOBvGIstrxEcR-<6J02f36F<#SC#}k#If>uj5~mN$yLfgUwA$R}&N1&x
z=c9h~$h!r~SLWexMO*su`yU8&n88KdP^Q5)J@77CmK=R=MXTcENL|}fI5hty<6fP_
z(n*%m)V3lJZCM6N*JI($VI`V-$pF<^Z|&3=fAFqL<H~csvS*wL-7a_vp0U&5&ceee
zh@CE-Y@kTVAr0y^G#i4hf9BrK8eqH#A?EC9Ln*tv*l64b!A@uXciC%b3H;4_FFDQS
zr}lHj@RvKM^ADz&SdqT@f4RTTqv2xoZggIC5l)AG!Wj0hIX5Z?wq9WG4M`*)wrMLD
zhN}GUPbu*5Ul}MKv>}q|0;z)k7YNVmM#0Vqspw9-biq;+av<sjKG1uH0(~uU!(}t-
zxzL2hD*OPa2<BKSeJVXsra=!qZ34x+Mx^EaB(A%d=eny#qp|rEn9Zl6Kw*)qTg)2#
z{=WkDjAXs0tdrpPE{~gkRf*JYiGg;q72g)NfYO)w5Iv(Gll8Soi_bGI_qir)nq@+}
zUB7WRdu~Db%^L8|)8i!``^6#a1R&{PJ(1)VuoFsH$Ipu3!^2?EZc2u_=o7z#@-%>*
z$=^I-d2`P@u#3&`<ICg0J~j+GqAy_Mf4^{Tx&@WX83gBcHp0<^6mvU|qcY2s>)!qV
z@xFTW;+AL#W4R0CLwi^UNtsrrPJ=Fy97!tHr|xmncuCmZl7JD-Ad#qXaq0?0e^n}Y
z<-Fl+Ps-Dh(?)cnoGdYNvmy%y`dALc76$tY$={A;APg$y3Qro~GBb5jx@a4(pQk~O
zf9t`)aVBgAy&D8yjzKcZ&=g<eV4C%Bm>%d3^#WDurJu(CPt$+|4&Tl@zbb^471?0g
z{}SFrnGwl&6Yx>ir^1<qeA7Hd8npBdzbZtX_Q@rJcS|JqBzgdXl9i~g$`D$|?o+j{
zTe-}aRb0>8Y?znE&UrIF<DJvV&?ziK<?#x1$n29SIBp6tvsZ)9uBG_R?=IM%v7~mY
zfmrw635K;aK;VG^>6X4fP{DuUW{gs%D=pG-r;IwC+4%t#N>!+!xDFjg`~u0t|A`a-
zbL8eVn2^!FO(30f1Own1=xdqK-kciDw0|a@9m_JCF9RViK$n<_&*6suRk1VSB|hKH
z13vmbgMjASV7j6aoTtBnb-8U=sNe>|@*!NU>K2R|Q;&6_<C#046BEOY>HE)ZI3(jD
z3KEBuq^!z-riB6EZu^rnt?t41)2Bl{<CGk&X~Dmf0&#DxB5~ZU&pS@sfFllSl57=u
zk^;wJrT;{HbR!#M*8Spk6+Om4eKC}5yu;3!SH;yo%3=5JQ>dCMfMP{W^nTILbu0Ms
zMLQp(#h&LdY<MT${K>M!Lz;P`3@IM%kRj7w3<a0{inQ2Flm0c9r^_Fo1ck#pu%Jaq
z&o1<ZI{7i+Hu`_)5L6F_3J2i+d1WGOS<UCH)`wLuWocx21K!%IN<P}KPS1~{c*;SC
z`JbCgY~Neb){r)|xtk0BISe5=i+oUhZ39f1#_~(a9-uSGj&+odaej^E+^BtwrLZUr
z{tn&^VK-DcNzk5>_geDQUo6HIUr%D?K@+-ktQKwg)Q|C_k7D4E3*yRt0K+fCaDz)E
zTE)$Us2T4uVxk%(I@)ro&-+lM+b<QE2T3Ivz0#Fit6@p42I>3XMv&YJmQD$grHeb2
ziLTiJs6B9=Uw6-l968nq?rECh#{pL4$GJ8X?&{~&wO)ZzUk7)xQ<-k?j04H=Sl3{;
zEYP<&$S4!(th;asr?T#B#r{LwOP0~?GX8<4g|^uF@GR8YX>xyFCZWpKQ&_rifSbqe
zY2I5)`7sHMBQRf{hFtT;k4qC^+HL_AN#=mZ`(YTez!iJfzk@_uf4G>L40om!Vnezm
zeVT1VroWvC&L#6uiGR)SO;8}-$G5Qy+kDvkdN4V5hV_3;B6;Qf$vFA|&salpH1usS
zX0Ng$4t<HR--Gp66xTrPG;O+Sx(P9h6_T>rr+CEaF`QbV1#Sjkz~TKfkVFgug^SFU
z++Bk3SdngDKN9<HMREIn8j=pbqj;b71zvt)sgJGJG(WMFGazhdd)*kPg)hVaA0sMK
zAIn{qwIa6jSr3+-r?xv*;aiJ7^gGAiH|LgP+FLUc_@+T@>)D7;a|V;bKwH!aPzTS?
zMl^Aj8kc@ph5ljR`GR9QWYiRjqg8ux(F_}s_<I*;nZWvleQ(h7f-XH5Z$n234afuU
zAw=J*9n%(?Qk%z3aMwVMTwi?_{q)wuQ^-Y+(n1)#MuAABA7QNWU>ZE^G3<LKAnxBM
zNfqkPf_sz_cQ~pG{olE>-CiE#FWAX#xtj+rgTG*D(|D=*C0X+FwmfM!z5rgLBfL`l
zJ5H254&8T;<#OyCusmCfy8Shw9}Ev+s!}ubE2)#A>~G@}za9K7>fys=L$X9unJ7DK
z!kGmu|N7!6KXJYZ$r%V?*};pP%wZ`U%V<Ztu1;R^<P7(A**{P}$UZwh;atKd9a7j5
z0n#a2bZ{eMFw}OVNlOu`D;2{(I|FijKjkL&AIG-;v`Aq@7CPocfb)m<?7V#otm<>&
z%4K<qFE+uz;Cqm7XvRtItQNa8U4*r_4M}Y4Q&6&g%6s1N!EGCKN%M#T7}m|^Xj>d{
ztL+D@_Grf7+Ji7`d<7JDB!K<!A`CaUj9wb{F!9D+?Af7AFVelxeN2N&TK~D`?<nQG
zj?TdL<~*2nMU}KGKL!auL;TB|<>Zvlg4T`?s1^MO^+N}+b3r@CxhYZSxD0S=Go^n^
z#Sk_~fQ~=5awD!4VW*5f-Sv$^qyzJj7%W1;ET@v?+b!wPv4crbYa(L#YK&mH&&<b&
z-4~i!e%F?IsYOFqQU%uSyMe09ebBA47+-I`gQ5Nzpl)*r2KuA1scr|14LgX+|NTI}
z%?=nn;tICkuY`vNEAXSnCXBzD0&^V{$d<#c;JVkE&OUVmL=MwfA9oZAE~J;}Y&C|!
z+Y;&g{ex-Xj<4KawsV?3=Q4iivL>~9Q~5GcAoO_pLF9u!aDD$(T>j-81fMBm{E^4p
z&0s^iOr%W&#a~ODRUd%wq;hC-eui(^%q(V`28xdHQhlel*jbwemGZiD*~|}EF#RD0
zZOa2C<@Y?7uS%ib2(nn-w-0~uatoMm<6<grcPbKGh9BXD%z>#CF%#P2&f)Zi=@_`=
zviQ&0>!^QN1_KZFa<}Z&Nkl#CRG1_{`r_{x6rBSp-aeR^{fKY)>I(WSPceVK5=|NV
z6WiO*VuofvUL5}kXS~oRvs8?zYwHyZ(eVfMWz3Q0u1*B{FG^IFX^^0@iQr^lz+Ae<
zuw_aM^tH&L>7)l}|36bInKW6f;=npamz1d6c5UkUT8N61wTQrXB4=@NHRjpehG^DT
zNZoKA3ievix~yQ-OVuE3(#f@6TntpV6W2dcrIL+Z;+%TMCwclx>^SBL;wS|wQo1ay
zkX%J~?U`KZ3tv7nzDrD6^$5THHp>1pCZknyp?UHyoD!`{*D+R`d;eij@2r9&2N`=T
zpc8UO?Zz}O8?y4jBP`x(hVCC<^JeBI)Tw<ehDmqui6cK^NRS)^+cPHMlX{SA5m6T>
z2e3PD0dM}i!-^CyHs3<NcDXxeQmjD|O4P~BJzXGVGx$Vbwv(-k7jLili-B7A#Q&uU
ziTK=ANL}BK*UEn(JX5CS!pZ1S*#r05;!$vKphU3cr}TaA0&siAzEg_=Ioy#2bf_9>
zXFGb)ix#Qq{u}(b=OdJI&eC@|RoMCd8hC$Tz4q4CoV^X>-)w8Z@1=%h`DYbex3~|Z
zSs&{8SY6WHupAsq)8WYJHZY&BPSUfAz}dSTHhg#oNeQ_qX>gU^?iSL{<!|A@`b2D7
zuS71YUcjWnd@MB((1)UEP%ufs>`g<+y2l-u|5uZfzO2T@DGXovCXEy4Uj+*l8EO&w
z1ib#Ij#p0ziGjB&c~N3S<bw+Fh?)+qb+qI<G_PV<#9#KeGeIXgb=+ha0)@k#p)e;G
zg-OG?{acHe*LN=Gp~s#ZFZY#PUmT6YN-M!nZwDTmD<VIir=Tb`mv{g4R9ri!gX@V(
zK(|5u_#k@_6$Cyn>Q-mFv5Ql<kIPv1gscF`<X^7h2TL&MNeXk>KV+^obK1l9wmR2z
zSik8!7N|bLM`ta`j>;h<zvChVCQs)lzhL*GSqJ!u8Y-l8%rscFRfnvU*Pv0m{P2;{
zYs}wV#kaVR=ho;8N$lwZSlKZhDt^C5^I(>P@--k<k4Jz7iNebLi(t<#C9?h+n**Gv
zh9PW^u{m0W9QxV_y(3s2Js<*NLmoiWi(zp2rUt2sHzuXWcW`^H^-0g<ov>-)VfdGo
zj23g`se6$J=U;UZKD!%}e~-#>($*`OQS|_|Mpfa_ttohKn-X!+Gr-K9nbIxEoe*+L
zn>LRB2<I1>k<_YOkX(1*k0+gm{6)L5bm$*(=>~0aX~q(+K%4bslH2iQg$iTy#z8nA
z%9J|$(Kk6ChSjQ3$t`mZ;%?yCiJI8zVg`bZ2gH#+6b>~i(A@R9v`KjvtevDmdY@#Y
z_g4{3TgS5K^aEomD$!-{wWyZDOHc~z;hZUB5!vYSrIS>|F+=9SI5rQ;JieII+;|xc
z)($3)x`RMxt0J{?zQsM8tW9bR)XBZ^`XuK|BQNoJU6NtR?j-{OFuVUN9=xnU78Mv0
z!HE;x*_7>Q_xl^4ceN2*{dI_Lv=RMx)PP#9X~95;-_pEimh@&wJ~->%VBOL-Je;9G
z1EQy~Ztwu#XqgZ1EKgw3y<BkIsl@-hSBSpAgJ(l3820;PBt4G{cL?#p`J-6Bx)E&I
z-ZH;y5QH*5*vAD|L2D}yGxiCH@uPicxqS!`t?1yGM~%<-d&g<5s)6sNET5Q>1Rs4Y
z=t`d~)`6Drcwrx`Y__IPt`(rW%pGn_%@Gt?<)gKs9C2Ha53w)5fzz^jJh)bktQ2dq
zSze5In*49rF<Xa}O}LKTQ4?UVkv{p7twX(h-|<Qo$2f&~QD_z=Adg-e6N~i~;KF{s
zA6=}Awb&FRrW(<KgnB&MYD!0|)I!)!eaKY%iyO>Xw%|esA5a#A>g*Y0TYLn&x4*`i
z)aShWLu>4))FCqOW6>oGxmW*3(V2(U_;q3UG-{sb(xj5{mrQluwFx0}G9@HQk|}ct
zN0OqHBuPl65|Si!-nDZml~NKCk|arzR1%VJe}B6!!|8b6z1Mo4`>s5OA|oHJdz35{
z`dk5xvR+O#oXtbFEXJmdXQA2T85+OVB*AsDFksdK*7NlUJ+Dva6dF>umz9tsDZ&ZI
z8=$jwG>#Lk0{=VR5Oi=ginjc9lx%4iZ*?*zQ(O(%Y-KrSY&(sD$bB%RR+hNPHnNUm
zvC#CyJ#e!Mf%Q+7sNkDk!HXO{ayRxoE?CF*Kl2bY`i`+YRS&kFSqr`2vp~Hr9}VkF
zNW7slzE+rmu9il8g^?Ivbl%6laksd@VHuoo<2wkOr$!S#-b9dQ&f<5vAbR$~@$hTJ
z+MkCpBK;7Xvs?3>hX#ZALNZKPm4rSfKH&QO9M^R80A{--afK`s-W>4-?H;L6q1Qc#
z9xM%7r_14ojxp8PGY5v3UBda^M{(7lGbq0H9%P^DfyPNoZd7$Jj@)(_M%^)@N<P)h
zE7ifh9`_DhH#cEbZ5hs<pAO}x&M=PLcy8a}Pv|gK8LeK5a7cYFj&K){!fW%fMtuaj
zcSiG0bJK;dvIo&WA|2B9ss$DuGbhJZ)Z?lE1M(p94|~Q~@|~0Av29le*nK+y0@Vr7
zX2k9{92IvSyUb52vZnb`Upb41M^I+IHfdbbB^K@L7cWh{3z`}?aP5AUmFtq>x~BGm
z_{<Z0_|k^NW-bEh(pU&N8U>Y(Y9zi=2D9-1)IC4TSsGiRDa$gg_cJE8nqhqXJC^%i
zl*Ki?sf54Be&B@xc@m(s0#A;KgWi>|q2^yTEJZ~+y-|?}f<4%h&X6u$(h3@rdia%$
zi`zJQHSc=ejlZ^l^+AjF^X7eTxW_ZbL(hW>ER3iD>Gs<&x_ve7Trvsfezii8(mrwU
z$2V*)XbuxIWJ$m-b?)=p5OmK8=gjRBgm1Sf(wklEd&72w-tG~wg4d;WLz1}EeN!O8
z<|*rFjmN8Ywp5}~D$c7Y0^yqmwEeG!KX<qTAFh`s!$gWi^2b;>i<2VhuioN;wW?&x
zRyTAUrAT7iSnpun5!h->(N;kY(#soB*+q+nHQwcS*_Y$-J?q%+;5}}){sL{Q|6t>_
zNMU@S5O0N<lW3ORc;@#IBf@4tSilr`cSDEd=ZN^-_qFM)wOZufynIZEzluj4Y)O*W
zLe{mBBcYqtqDuW!jMlZHxdKZ(c>Wgrx?(~{Ghbr)m}*GhYefD{v!SccTGH(JV$S{`
zg`{DZxk*C~U>He+_~o0yD)119I&Ktb6fXm5GL#ZeW1^S%2Fk+6!_qS>i!#fK|Go1x
zwE4XO59vo>Z<z^G9or#(_y^8pxFVa&n$e1V*YI$XfX=NZpfz+Tt*z|^@1?h3obEQ@
zeRW8ojVdi_RU!Kd{s4?Rf<}w(;l&Wfid-_399Q@Zcm52eT<~d}r*jjhsx)Ft)+-cw
ztMIUJJQnZM!2CQNx>9IFAEc?%K>0R)^s~_@XD?ton=sz=+!(x`yA_k2AEJh9Bj@({
z7DQL|a~&fuz^gGTL`F}A)K2^Z=E=S|Y?&n~D^KC0yInA|i=7=4<v8c)2~a&go^KTV
z;dS&Uvb{k9D6(Gr-@9jU#@!nrtGfX*9kpp=wI76iR%LgJSLpDXIXlIM<h|@rl6x--
zhC8WJ$8Eag;O^TXkYB@#uB{S3u$H6qvsN(faufGkTaLDGw1C1lRggR>8U=8d&l_V#
zCxkXZ*rFuZmJx<I30LvlHDeN~rcaErZbJ1<P2TIXKewf-7o=a9(V#cG@!*qQ6n~YY
z(Nh4VJY?wv?h#ZJmSb^U22NLHvxSWntmCZ0cyX@_GSXI{)`hF=?{m_j$4Y_i+~S}~
zk7Y4?|Ks-`wjlgADdHX@1?EW`_{mRn$RzbX7${wbR&z|~<a=+>zibnp8B&G;kv*LE
z>r1d{;SqL!IUo!eQN#%<XG2SkHMRU40-ZBWx$%q@Gh6Km)-i6*{6Z-@`_6Sd<uZg6
z2c^KZ=n5R!aSPRsXi|mld+5QsYE`K$^KV@Z36uUo<M|kVQdAb43zVWx<L?Wd#Lhyu
zMT>EJ`5<~}i6xo3;33#uX~My?8K0x_3NOB~4$Z3@c_r-+a47T~zH3&XD;GJV&xk6#
zeMy-n*#3bH6&j33(+AS4?t<q-#swM1aS>&VL-@`aMl_ldDIey8aEj)t->%_x^z`8E
zUt`9!)j*Br2i)TBF7)qZ-T6<9N4RPNjGC!S>Tl$sD#zFW7QtMMLKg(iUW2WxPICe$
zB^qvJLm2`MV)f5}PRCB{>bnW%YRfs3(e5CCYiynt&v`X0g-+!qc=1>vyDzgWpw=$l
z$!ai_j_ZNX($?g0<9<{L|B4T{$Wh_2G_c8FGVR;|l>93e>P^z6Ht&1EbzU+5TkkG>
ze{M`dtyeJ)%`Wc0*J{L;_r*GAO9;%+fK}Jkslm^9{@<~On4PKx(T(buDqv^#hNGY{
zb~-PJFXcr$OyI1p3hn!<gh`Fpx!bQzNSwtpbPYYvy}0)SC7@4Nj8`F@%|?9Ru8}ZU
zVncHpa-nCr6$Y~W!u@UL^tGxDUGh_zl+GATN{07<`KBt~uC;-?GSPsBES186&;BTQ
z(#vlzz6w{?Yf%4fs?>ecWq#@T1{glO6t|egf#-`FaP^Aj;(`x@<0AtyB2<MmDX{+3
zb9Jg3VMzjB{^sAwC==08Z*ildC;uzKio#J<TBCjq+bt{5(>)3v-Z7vGJ*v!|qDK~P
zPsfgdW@!8`1vCtu`E{x%(Kg{J?_@a}8ry^U{O&}4v|KHAecXb+?gQ8^Hw;$aHz2EA
z<cXc3Be%CS1RvCXf$DKy+{zD&L34=$>GX8Oguhp?`o%QRh`tO1<KvkA^99%`{=qeo
zs>E(tBC6hF^YO`>c)^4`?u&ytaW^r-<dL0F_g9OSZ|*|1K_XB|3<8JeJ)A?xYji)^
z$+gX{0)MSam@)JhxLvORNq0Sp<9X<f3dDHb!6?$tcf37cnana#B$l#YpnFXj=0=-=
zsPL_DVC-8+$oU1)!;3i2z7`zmD-E~PRY~WJ8WdTa<uY1MGf$~8w02B~(=3l2DAUaQ
ziq@jWdP~MN`+<sL88Te|2b83}N40z-YA*Fvn9iObhflCBe%ej&uWJUAU1?a9!SFm^
zKEiyTM*R0wo_s4nY}u_r16ID}dVVkle4iz)F_VCw73CLBmm{4QLz%<cJb(T0(>UE=
zFnP3HhbS&*`Ma#=7~Hr7v&6qZDN~W=?TJTw!<X<WTbguDIfP0_rRl&j4r9e@z<iz#
z?JRZWW6VB+LdI$ITbziGdIpi2RaH>_t_VaEb-5ABGUQNv7Hi?F6VH|J;rO&tXbC+G
zVc$=4PES^G8?*Cqei~yhX~>{cqA3ctIyge_6?o!%6UB{Xuz<109ICqb-4lO-%4C)w
zNLr1@k`&3oj}$I%W&1`)iLmU!ZBAorC~saghHL&6fj3L^sMp+X&N%Ea<|T7zk}M7D
z@5$4VYj0xUphvu4`A1$>YB%hiVN8D<m!f;z6v;|bfh~;bqnFwQE7M;wPSr?Amg@!2
zrI$cE>J$c_=!55L1XQH$!6!4HkmyWhL0Io*Sa(W|%wV09ORKGDM|TChX5F4Z#Vk<O
zGNu8$4)N}-D)7=>mn3$`gMfE;REVnQOb->oa*~Ex;o7u}?Y^?+7GvE5Y5IfZ0`yN-
z<I#((PZ^fYSHE`UlXfXWwAT!NRk<-a^jeSBN?4!LE(uhwrNTI6=A@44fW3Q7$ZOUc
z5AYZ8g2~N%wRkjtoHhJ*R@vj8>53$q{TU|9&Cx2e6Lrp<gg1%W<W0E*uQ{8N(a$@<
z&-E~0d+0Qnb1a|WB4BPTGwQm_gPZjG6Tq}t5YplW1ClCC!js|&SK}d9bs<cx_=nX8
z-{XgU@}viJG5TsGXLF(qvTaoO=TC-G!O*ST`>m!#bfXSipTFeK>)BF`o0rjcPAMly
z=o5ZgW<a$}9;5JeEQnhB9WMl0QAx2W^DcLTZ)O|x*q;Wq@FuwD!kB8t4dC*41jww=
zBu*+Tm}~e9v?S_LapXPlFeqX^*&;Apbq1GIKENa6jEIxnIkDKDbue;6(Z%jOm-OFt
z;r7^0@XDFT32vzs_&pm2ADr~*CA~!SKQWYQDM?Y=#X8V=i*Zyf#xT}o4K544fK3xV
zf^Z*VW6)O4DPkSQw)JDtFa_f7_mIyXYrtvPuYpH{Sw4HN7X9z54%srZ5dC+sGyKCt
zT(iv<9OC#8-7XiypFv7A%14WIRwwhxU3u{9EMu?uw(*^JpP?XQGVl2?8A4v|fjSvO
z_P)IV66@<?pM#7$i;38JKp$a&J|1H8&E`WwXwAyx?4H`-fA%KCZmc$cQu_`xC5=Mo
z{ORE7*9dXTuVGlC2i(d^!sk+!aF}(@=I?2O>%-mfy+EH7pH-ouPXcgtx(&5#{ehyu
z<$TFEcDIa~h)v5=FfrI3O8#Vn=wco(I64;GlWW<q)(~#i3?-^F4an#vR<v>XYVK9X
zaa`RbM_Pq)tWW$I!`edmaqkT2xZPVBFIkT^T9o75qLo+`aU27tPvg2nHA(avfH_eT
zEE}^Jf{kB6SLII-d;kn-3da2}b*RMnJk+=Nz>;lRG;w+vmZhA7;0ng*e7y;;-6_P=
zHTSVCOF$Q@nX*jtdw#jO0jYY|0VlMMqunsp51UvamW*lO{Pv`CkxvE0zoi4898@Fw
zTMS7+HFG5Y83s2pH3^DJ(aX~aj;b8PfI?T!cMUt+?@7U!wtq0{h8$5EVS|YuGeNOj
zjY`;VY`?TE5#=NmG)C6&PIqSuA57~7IP44aI8`#F)P^i7k|yK2)aaWK4HDgK%&|=m
z36#Ie=YKm0m6b9iIVzUT@w++6*-K!)k@ff;?YXf#Z0M$$HK;S$1;^zv7Eov=y6+na
zN_jj^YW~gkuPUT`Vh`xqo`UUL*sL>Lnuc^YVg6kW)+J=@(bWQ45U))X-n*fa^hao2
zumvS;w&G{4Y}YBTMza67b87n)=&EaMvNdW9=H<tN{$p+O++-I%VLkVrF|i=f$Z_br
zbpv<a*@aG}H-!qt<y>#S65V$#8I<0C0`JgQ*!WnA6F#bfywWcCwem0)nltyoi|44P
z7zZ=7KVsu0HF`$29~-|}a>J&Y(<t4cWZbERVALa^8?sD^7iGO_jcI&mlz{fE_rvp>
zl}K0JdrWfE=7ii^i1N`Tqkbjff;Xn5(;^Xa)&HPf(@;z|YC>0^QT(-9MWQjGlKWL@
zP82J&nD48Sd*GV^QY~+>OV)@s2hIZ{#WY;noQN6I9`mUSG;syXYj)1o<T^%uhdnjs
zBs*M<pB`vTx>-JcseB$fO3RX<MLzhMdw?^1dLjB@7bg?RcJR+tvG!IZRPm0OYncv(
zKaYWAp$>%okfj>pq5RUGtDyF_4K8LX&?vo3Hg6X39upPl_k2q-l4#NX#IxXhCj#1r
zGp4Px67_m%ilg;<AbnQ^hPW4D{ZunL?W_j5#F$ahFO&I9(NMa3+))fqWSNDaAvDzb
zJXkqJLBMy`A8mQS@6sGhCa~C_W!`J_^ex33r(>~g&P8|?m4UBYJ~4*>Q2JYQ9=i6P
z=MzVLLM3ZweCr`gDpzOX#`SXaoT4tZs~X0+`?8#Ufji_ZGNd=TY}`Iak2W>6F|YO%
zzIbZ_T=G4I55AbviS_!Vtlb3<X0on>f(pcqS&o*w(!j9Vkkl4`gt;e-S-+qj4qVqF
zn(N<V^_U7yAd~5sO|$qM=FJ&ttx3)O4+|%pZiRC#Qbg{r8Iin7;hRzhQEoBoIWK&N
z2W@^Z#zsHybxa-X_NZ}-9rnVv%gT&{_8d0txCnxYH~CsA=GwM7hhe@t*ueg-qw{BA
z)D&})I`S-r#E7u)KMCZ@FNde}Bqj?lf>&<`e|kzi&YG%3-XF9k<&3KvJw=~SvhCn*
zupBP*{Xvb1?p)x836OvA4CiEhNbG6%8|0JY(WF`yG74;=*Zes+X9j`i*{fi=<``_P
zR3*@+1ZfxEL1T*rHjee>tC=s*^;aeznPxzozOUqp9_f(>(_+x-!y$}YW=<vdPl18U
z8-CtzE1I<R07#rHxbfSx$l|XW)FeZVF`td8cD5-&-ia{{m1v)bHa>qg2;)dUPTYST
z!N&$5LKWjbdZ32SWo*mt!W!itplz>AB||rW%np5$Jmx!e9R3LzB{_U*>R8Y!49An&
zs<g){75qB4K;Wx;ocpIis2^iPk80dQEh{$+6-z)6_rTG$QjYbVgnYoAa4sNVJSSEB
z1V@ZuGrzr$VfZvTDry=K3OY6TkO*b8T;B&a=f9);U^8mo2z+vG1H^s32}`BJpm`(v
zKDh)#wzmg&G)<GZPtQa<?h}{NI+%5)`|(QWQ4E%3;itg{q`%LE?)+Sc9*X)@WEH{-
zl0$?E3wUsw(+D=xmta*X%Sd`ugXCZeuAC)L)l>$+O*t4GuPYM$g-_5czZL`I4KTzh
z2%k~`iG8n*|2aQG*SlwNI%h$2@<QOwG;8Xm6~ql;9=k=ZsknJX7d{^H1WP*aLg1K(
zoc@bKJi!<MNsDWFukp8pOIy!?8W#&aNAIAy{|B+o0beW&R0FlEZ{Sgh2z@U6!5KHQ
z!R|&0*DL)GCXBrc_v=kbVCrZX@E5b^^k-}od-L&u!LZ9fpX@Vv3FET|(;CymjJKW#
z!vlUo*DGWktA+wW_??3C*=4wa<pT4L?Z8UL{`G6f;m`49nBCCBWdzr7`Mnt!?Un#B
zRXecj4x)xwg@3_#dDW2<ImeNM$V}Gr4XoFv(c^}3x#QFz{n`(_Xz>kg+diXga60t9
z3dFy1rc~kDd;W3w5Sp~TLO9~5C9xVC$acZwF-1|I<ZP}*w}npF<Wmnjl}^J@_CA-~
z+0PlQWL>K3kD-)x1w}=l_>pY)dPpi0zvQyJn*B?DE!!z|Y4?Lo>Ibk&m!%^b1mp+n
z<5kX9AuGQO0+ZI6EHeY#TrD?D^;3q8))#P?`6qn7atowovD|*s77#f~bAo;`*A!C;
z_F+S)Y1(J7%SSFa-Vz1-KRf<uvm{Ozcfhd{6_yu^DR2uEgOkJ#?Ok7@M$J4fX{jC5
zS=vF+MwX4KmxrXsTe*xW3!vG+0bUpxljo|oRF`p)GWP_7%L^ayP0fXqQ(0!?t0nAK
zxeAH&KQ;>)5K7%)S&*ZQ&unT+XVg5!M|p#28K&^9QLHO(W*}^w5G`Ksr$Ky69|4R6
z#-W(a*Nu(k?`~uHg(PpBpIeU)8?vy~UyjDVI?o#jzjFtcS<r#+JUVG6!3jln$E5DC
zc}YLc63LSB-Ghj2fiz9}R?4@qK9{Z0eLlwF96IO>p%qya!;EEUg_H;v9qB-o&-*Z}
z=N>Pa<IB(fHWg?0o&&d-*AV0?OH)km<LGrPGpJXJKP<Ir2_MNyHoa(`{*kYIX-xXg
zmhrb&zs01_o_x@+5;O>j;y$g+LLW94Q3;!ifvfNGyqPtvaMGZ^F6)y6t941uqjh+<
zPmN3!T|hPjCVsK2z)(SpyenYr-fuE6*jt_GR0rbH$sbX2-&pLcb`JcDDnJ;|7*KAe
z)Ttz1tkL`u9sG)T$+zRY+ok;|lK4Tjg(dWSnTX=be85X<(Cu3z2!CfoM1&`E(qv<w
z{93$Toenz~GoY>b8|)jN3TfWHXw{a7t@oCIe+k<eNX`~0u$=nNPwn`0K$bMKo#9|>
z4dR{_4(>`$%rCS92G5lv*Q|z+)|3vejB(fk6i>k8DU)%A{9-&q!ZD!a3Y+hz^Vc6q
zLC`yC*v9gr*A<n(tNA>)!OfQD_sU^ONdnH_#<+W><AsqQ<VeLbE7Ddpm?k(DLu-2m
zU#l|$kFmVA`ILv^A;VRuJL$p9XJ(Aso6Rfy%ix?UW(vEMzr%BvVvOORqqa>ZUh#c^
z74@tmd@7Z<oq2)J46Oh+#v`4t+y)cYyoJZw{;;rHpM=~}f(G+TaQcchku^LAhTY~g
zrB#VIpGt@5m$SH#IhGWD4rUy^Q#gaMFCx0G!)}dhP;o+s2yZ-tSjSP&%J||bqrWpB
z*ILk46A;&MeeC(5h|Ztcy!~Ma6c0;ge%=pQYbry1cqO{pOP08?ciETJD`<Y6@S>fE
zdGCo$xLx@a{KsZK&p!;{7OfkQG0lQ%73$L&J6B=nltO-~bvWqMtK$59CxBj8BX0I*
zK=ujaFw{E>!%k#Et0V?T9x%q%-%WhZzh)5sm8UUmKP11c22HC|Aba!~Sow1rX5n|d
z$ap{(v;8oBL>q^tjM3#22{yq~F?ho%oU=-gE-NsgnN1&no2^IUJ=LLol@#5!U5&O#
ze!`*zDe{@In*(l~z-rca5qZ4i{I08GO)8+3s}2qP;tVsS3qj-`!Pk#`gDV1>&`!sa
zmvT^|2d~zF&sTy?iRUrwnJ$er*n<*te?G^c87f(S{ONB?;%b}81=dvYf?r!W&w!5@
zI`<;(nPNk>b0+lUpC*`LnTmGLSO&iIG#q^{Ag0T|fI`YGKFKXl*yT6`WmvvmG<+<(
zn=(g5+;6erH(fH@L&7@nAB5TRySVrh*DyIso(@+jgk<J#xacH-aoQE=N-MaX9cy6O
zRDJ5QcM#5qT8FJE^RQ}LBQCr$lu&;KqEys_Z!L6)?2l^v@;DB=W_<;*QWHk|YlA;?
z9(f*!0I%#wzR{zaYpH(+RpTYBQ(;U6b88A(Rah4G<$0K}Vk$}wlnaH`?a<<02O%#t
zz-NLqt=>NcY*fE6=0rGSaoR!2o(2qNJy*;4j~F(y82x4ML!HVUPAK;tB8Du7ozhdG
ziOpx5CQU~L`%w_smcyG2Pk{C(k1_1ZILK6G%mMcE)??pOl@bH?dp_qynx};dqf__?
z@4G>`bO1lRlOZqu@tCo(mlMY>#>U)8l%GF{3hH+h^hLL#9i;JXVODg*Xf479sB~sS
zGunH;gU!b5|L&i&AUO3EjXpiXi!;k0U~w7*{5uJ&BNa*Y@nA@ZV%!Gbr?{sSF@N_&
zi1^5O7~@-EMl@rW<}VYMd}i6*_9JLFSDv>zp$gfX<v{pZp6(v@4U#^*5}M0dLBhv%
zIIwvta#d=?NBRi#$$aIsziLrISP>sPZV4QkrBCGDb!qf4OU@&y7x*KxM0!9#FH|T|
zmCH?-H|z&QI~Vh<GS9i>_9Do6{S21LbYc8Hw$t-*=3N8#F}D0vY_L3qqg|sBEVQ7p
zXene&Im!*;U!zUa2lV=s!z);g1AEUhXm$FCF)S}zwN0P)-M`EH2pd7o>M34+r%h^G
zo}o{nG28!0AmqI)h<r{Jh_?I|R+k%4(Z4jYWbQig^x2xU`rIu}n)yMqEHWW9x(GaG
zu<WTQ7>^H3LZ`yD;?Bhd{3xgA7?iDp$(3y=mir5IkPdClEam%3ZgAdRCbZCC87|9|
zA@85)lhFrT!RH~O(1rOm%Gx>dN`s7W(<M3u^B`g%7JP5y!)7i6hu6)*{HQ6=F@b{K
z=bw=Dqg0sxv5cQn+>1ozADgx5Qh}bISi|%HbYGPvUAp73$=95EN|%Cd-w0^9#4@2W
zs_-LBKmwRcYM@&{zb(3tCR>f+#ES;@9eT`tVa(8DSIkLg@G(sBJjt>Oj8U2U8&<^|
z6U|U<;(BH>XS8t@KG^#TS{S=WX)8NteP6+eM!ItmyInw(^n?HX^aaLTI0I8B8PIq7
zIwUVK9YWu~!ZwBfu)b{>99-6hGja-H_TT3)>6I$Iq|10Y&dY=vQnq|?za}KrSMVwt
zb5Ky^?WpdiPDZVlC2F3((RIa1E_+)g7k@ASdxM)GKcO32bKmeI_u8YN`j0T3b<$Tq
zG@%MvdqJTh2J-MP#=rW4)<xzt)g}OMuujmzNv+tn`zEA*TZyOhw8^z4sx;nN7ws<X
z;(hyeVk)a|Y^|^)jeCP((%A@TjGGC;ho69X$ufTN2syBxa2c;J*o+Q)H^32T6SDk9
zF%D0%q=9WmxIBX|;5A|qS2szCroYi8uv(4=jSWJ9)FQA{`U0|JopF7H7WMzy04w*W
zVb7ylyt8L8saq0`Zl#Q!nR*^P3g_T|@)pE?>{YCu&X~K4$_s{9-vQB-5TPU}TR8jn
zE!fd<8(SxS#2+L2aBtXPa{j?pJSa4zp%X4Zt8Oj+r_Q5UnFZ^*$uYl^8ZGnDpkoh8
zk#B|Oq&4IUFZ$3Wtlsz)Lo&~^9;y-XY8UZ=Nt7S=X%L)wE}*TCOSwT}RnoEWIjm+K
zQOU9?oV|4-)O&NVCDn#X`nE6*qZ{%+O=x@dD{OFM`&E|BcOU<OH|f`bH_7JI<ytbA
z_IE4Z9<5D+`8=3-N0DR|7*j*5LBzkb2)7VdjPra1g?~(G$IfoNX~#M^Qd=A`$qzgt
zUct$%7vN@YMe|L<_|%iES9G7vJl~tc?pbYM-gaMjbc7C7m|eyT^4Gx2={i(o`a~FZ
zeI3+fcEIIDrc`ZKJ#4>cKz$OjV6@jz#uSQ!*~~>zbN?RWkTI8C#truUi2`x%Z5XO@
z0~-(TV7Vz%6unu=HLf$jxkD7;(xkz}X^A}8?Mp?a=^vp^q(*h7Tn5Q3q1fF0q4;Pz
z%lI9*$&J4F0X|vEkjz_ysOQoaSjpZo`Jos2*6awbt>OT4GZpb+rL~-Y)ICg|*9JzB
zk&u@rpv`@<6h;ie-C++HOL#HADQ*)Sp2OHX@1nrn;VcS;g;2cW8Mb=;<u}b`cOI=5
zSX_S*7KUk(y>BhZisyO+x&z_0$5r<1wc|&vV(!T&r!f8y<Cy7x=gac5IoVf2FbKNL
zR|T<5ueLO-o%00yYMb%Ma&=OgK7>>+T*1jlw?QrQ-u(FQC{~3v!p5t|u*q%--d>T(
zxQ!|N8hu-Owb_DZFEXHW=R1Hva<d?2-cg7y$mJeyio`o#WAN0KTS$wUZ%tkr0yaP7
z{?BUh>NCXMYzF61SqK4JH}at>p&$r~6rP`@NGs|sNc?(3+88?)YU+leQ+Byn<L+!e
z|4AHYp4f^`dYkw%iw&T9ItTOXqPT2J6HwlF1_x%a{O9?l5N{dB1%xWW(tLXiZMhFg
znX9=N=L}d{kpS18nUdWFQ}CpBIXDDJcysyTyg=r?IPr@Tv|W&3b$1UJsQZ?SWAD$D
zW9-=zr$yiWwkFXqg0J?_;dQQAfY;!=Vy_j9ssD^+=>(oA(F_(2M7=?u|5WIJ_d`f!
z&XDNK>lugkr#N4!oUdGJMTX~o1%XGUFj(dw-iq$U48~!}@O{J;KB)r(!-t&03NxB?
zxLe$1bss%`d_+A?ho-zUB^R@X<L}d}u;;rqh?DX`G*m!i+&1D9PZ_EcyAxW1o}tS|
zW7L?Z4CuZQb2`GIv-BY!dqM+chsnTijfZge24lIt%f?xInbRtc^#eawLg$to)M7u^
zvg;DAvuGjDYg-ZW`Z>0LJ_Os+jELLiP?*o$TXyHy^0TQdl?+?UFAE$*e&-CKuY(>z
z!2EL%u*4L0UU5hFC|T+nN-+8WyYpGG?#kzpxOu%1sppP^K<|A)!G5+^Upfi|8^n(G
z%`UjCM~P(YIgUy9=kgg(M}lP{o9Eju70xoLM`(@&``cY0F>?_n?`#7XBgD7g%xQ7W
zIBa?P0sR-Af=zk?obhZjM!%`!qF3mnTCExVTEn=c?=8^>RzR|QJ{av@i-Gzay6D_T
z73S`n-PI0#j7zLxIh!x83kQY%+gyt6XY}ffhkYAa7yVosRP~j^pRdM@<7Z4aJ~Sj*
zD?WiiNDgOvN}8Io*+WR>CB|daB@1U(;@U;=*mWcvz4`?ZC8tFOb?cGKmD0q}M3!C%
zXPlCZKWr8`8w~z#L+8iqvG7v|UYj8x&aOjYwY>^?>@y4NFNk2@qIUof7J<mnnE$Ot
z@m3;xzXXH{TN>X$!DiMETHecLpZv*NMka%b-G1n+je(-#NbK9GMvJ=>!S&lhK1Zer
z+_$Gf`<ca<r&j=nV=Rbvp)OIWHmChx{2^v|0NRy*=Ra&_?2C3IiYGH5di+z~cS#d?
zyx)r%M(25fTu8xRc7F@74Fo&oZ`f}&1;+he47uV`RDIBZ?Y6EM7C8d!XWWO$art=n
zd_6|*ljD`nyW`e3+QeY*D{iTNF$_C4guELsMUPES!@=^!=w-YL+71?gU(hG~9k~{h
zx`%^zW&?Q7_eO`T=Uhw8e<<82M)?W#C=z)K|HjDC)u9$7XmcPGs`Np2<{~swe*u0L
zN_gF2G5ptMOlmI_L)xKNa8Q$ngHK8zQeT5UT9$xcG}94V!@+l9J?=jB1_u&rU^|<!
ztf>8ojZbPAqq<rAZyn228LEh@#cVz*{Yq%HQw3q&0eEfdjJNESNocAbof1@x8jkv~
zW1lgb1H1=Gn<>71CPjZKsSrDMM|+XPGO44#31PScwDTd@@u?5iXQ@(!`z*IRQlDx`
zeSsv$6yZvvV0d*Q66J;sCNa6HH0f<DhD;g2xq8e^mSqEN9SgAJ+77fdVBC|uvtp-V
zH~6l{b&zt<oRqlUg@Du3cm<n}+=|Wj@D0l+>sSO}#ExsAvP7BsJQ@ODzNKU15(&3T
zEKLUOKZ{Pv<M6=(#y4xN;~K*RoFGY+@4fT}+kabv=eletKN11*KCE9Q?^n=x;fOGx
z^9;Xz&LmWIl%f9^2m6Ab5>1V;pjO_;Q6<(7IuB)XXD&Wq`@4DwaG1dd=B@(e#gS;a
z?+>V`%>y&WPOIzb<Yu-}JTX;{WN%r_dsmm@_6ucTKWz>=-&%#eCmzG-Gr8z7y&KHf
zxz+XQD^9EbDY&>eLFiM)hpjvfbx+2?^(ZIo>0#cOa3eBM@Qm@STVaxiIh7Tk1h>q3
z5Zo;0PdZBA!r!B~`IQ+lKPC+?53`w(xCQSi4I;a}u7jU)EcaA3iTS3+ih0IN^omr*
zfq4bM>lqMbu^!ns{U5lxwef-QihFV>56vxj6g;2fP^V;!CL3qNf>1-!^x!38c@~_C
zQzwGi8y!V=6?lP;cR@$?BdGor2@!NKEv;nS98+7U&4`7~zgfq4vAMV>$p*X(q%q%%
z=i^c-RL#|)mqOKutp7r=3z>~Jt>NgAcM5u~s^DsrG9@OAS+;rr&-BR=Cz<1L*3z5^
z3LiTdtX%@G{*O^)@J5(F<_6blq7DX!lOgq72hJT92%%Q5!69Tnw0WD;gjPokE-wVh
z<u1<l*er0mtj)*mD1{8YG%%383X;ADeBbwrP`>#tME?o~^ZVC@`;0Z{q;u~e+bx#c
z<vfI(=xD}Z*LNV-phR|<>65COXK=7-DekOYfazW(=(i~iW$)<XuX!bS_`D_6Rb{=f
zE4A40r3NZO<cQ~s5V-NU2nWJc=$!rXBwjs>TiKfh%a*H<s?85^_}iOcS1QMy*&!g=
zI~+Nk8@hBy{AUcR_z%kN`D5^rci89mm|yEVh$Q~|h%=_eL%)6m<nNKA3$k>n`e7y7
zFyS3^G)+g5zr-;_>M%sbYLTkAUy!O}!u+CXX!U3X%w{vW{3bJ6_o0YC-|-t?cbn1d
z)wj^|!!>}H0Nyu3Afs>)O*<e<16sAYvLH=#SF)jd)>@L%a0TKt<c@ens|6K*<S@Bd
zjrzuRVUr#OyXxJ1SVAM(Ut|2<3LR>mx`msxq!Cg!>XAmf%iyAdoXbCTJi|O>3VTiI
zK32TjV_;3Td6|*+;94yFcptn9Hh^TS89X=E$LBdN5T{cGPPxOy^A+>ZXYM&n{+)m>
zH8J3n@ml!KQ<F3*#Dc_Cj+VXG#n^X8Fn7)X9*owe0ml+yG0~?J*u78V*Bw;wkipXx
z$XwB}m=n5>G0<LO&9OSH-Yn1hqpbVm%)@{%57wu$`~7#e*IO#j%PnHQ+zb~!up^Rx
zV{Ao|_IPtn4V(B(s}d;D8-PH@4G9vBMyJU^yotFJ4BVBcUnd(-X{&gYV|SMz_W2vX
zV9&qWq2hHP%Q55NM=sb2VdLmXoKzSBXNL`?6Ml5S1&Jo<36X-nZE+CY%l>^Ck2$9^
zGugXzH@Zwq=4ZBBlcckHm}#>K_pN!0VYVS$PIDDJP)!9f`x%{_`58njrV9n)Gh+Mv
za6BuUj!A|$#h+CN6MZQe(m8G&)D~_*!RTX<;U5nYaVS^DJ`>MTX#i_A5jTIs>f^fL
zP&N%EuUk-Un*>8d>L@uCB^2sg(VdT0VA#qWyk55#`erxs>Ob$`DNO?+!#JMphcANp
zicpvquS~p8eF5;dg_`bKY^$#Zhj)x&5grQ{CD*`1;})c`ooi!z4mW&i9bk?sV8LZ*
zv$_wKY!BxotP$EyH3u1ARWj#-HIWjik;cSeZsox=RAp|nK%+>$TF#JOW_+C)um0li
zgN$k9=L<Rk<FV0vDetWH2_H1&LVvXljan~9ByaT4q2)4nce@!aNiShe;33@O@CR7E
z!I&xp*wD#;1?2dhItc8|<ED<Pg?Sq*5ltB*u4D@@YJ4OXX@<hZxQo!Wt{Em-SHOUL
z3b+(~=A1vNV|C&P-hJi(x7<pL>XfWUrMs^&cxorsan=0sj$wFym^A4#kHPx(nZW5(
zV43Mn?)BIQY%Y5e>Z?XT<K>rPNnx!xKJzi2{AWpB?k)iJ$S8DRJU`WKY;W^h42@Hr
z`8-J|oVy@J8f#O<t_QdCy_&DUWNj82$aUeAv+}eszYjky)F%ql#oWQk{owU0O1N{D
z8kYaK2jfcYS&pWki+(YR?={N<jrtP)Uxx{4Va)oHL`Ax>L6P`2{KDBR`<*tS0gZDN
z$&XC-xy5W0hvz7hX7vadxAz+cXtbi?Mm1`w5DKDz{(|5wRnTBq4<3f5bY$mIhz&7<
z;OmbdTYocG?w<!YCuO2&T1|oF8s;jqO@yeJ`)E6SJ`^dakde2RF&23a3cd~J=1yc8
zorY!H*|B1*JgH9te~bpV$Z&AoaRH8oNmGZ1NBMDUbFq4jF}A&W2D|SUf<N0ay{nQV
z<~hqb+>nNy;}ZGA#cFtTq#;?PV?@_0C=iK~26K1B;QFb$R7PEvs)fr@`+0Yn*SZ=-
zO{_nzVGdb4PQ$$KgNX*?IV34)LdQlc8hv*RXy;nc3>69Yeu*C4dCLwBOD>_Ej0*Rq
zoXvhu#qlrubm_=L-jJOi32iOqU|^I9!>f{@c)m59n502O-TR<jCIais()l&tSdYv7
zC)CfEVO_sov3JK&H0()2<60v+Tlo^S-o1h`yBS*<{P~-`MkHy;O8(cJRumfd;)+j~
z(Pi&F&gphPC%e=Tb6&gz)d1#5i62BmrnTYk<(=4OTMUc()QMxy9Za;2gtLL{OuytG
zuV#J)3$t9YgWf`O^G>m7)nu{gSA<X`tATaFclb^B62bnH7Z$3%g?sES>KT5Rbyow#
zofoh0q0j!onSyj&a>RsaRO|AK-KA;dIOZKoAA^zwVVsh55%}9D<8Ie}ywCE(l4<Jv
z#y!TwD8m*F7N6nTZ`p&JO$fYmR3IsRvgDq&F_ElyXFch5p}=W5XE)~o|6oiGRJc`R
z|CzU}$J&l1(su!N&Vl~cUnn{f$cZvXiYKv|teqTVn)Pdt<>#4^{(u7G#I%Xqbm}4V
zaW_VoO@mb>Dd_#P4A!zNlWhX)aN6I5)*V)Opx_kx-F=FkiHo`1c4PPyT!GhjH(`9u
z3XF46rP3#FKvu;|+}`^M$C*2T!v=lqjLhJ!A11J+<`3j2$zWjb7e2&G7d-b?!v4W>
zWURIcIm~+LUMqfx$0Qk%46U2o$rr}d^A6jeJWT}m=qO%6;R+XM^qSA8D&QM0+~uQx
zPKIb50oZOB4^N3IO|;3!4zv5vKWhaX_BNw4a?`+Qp%HD|_M0zfXY`|uDkOZ19(~~Q
z5#2}cXWis<e9@eZ53Vp5Mo=a+Z?Hx`wfp?S>$lM3*%cUEeIDa36hOdS8Mwd|V_EPY
z{<m8qWbYl$`!{?BiT?`m6_<a^hhs#)1~E7B5M}Cj&kFv`(V%e~8$ep61!{xXJ@lc3
zGw(4#OXZIcUA+hz9#9CMsZ2dHs=+BgntK`{Ae|@Ya{WK_>D(A)I#82{{Ue8f(SM(D
z!+<oAo3B7ehU!AGry-;+7|Gn$gXt`jS11@KOZ!hILPviI)ckBlb(tcx8&k>+Ew`ql
z@BYQ-dP_j~<tvmdzYCV#Icz8U3X`s`<Q~MmhVGzDbkN=hnTr^|-t7|?6~vq>55%0h
zttGixZ$ez7mVwgWN0{Avgl{x%70=gs3<kP9ZvXxjoX+(LJDuk7@^=_txUfJh>7L3t
z`K=I|F{w=9#2y^lsZRa&i||*07(D{mzV}otmlUCh@dYWI+n0B+I3yIzC%N$F7BK#C
zV=t`yY6UaCH9?8jaK^W>BA5G($i(r?PjsUXwKEV+j?M?CW%69YLnZoSgd+8v?13|E
z8nAimDfDr%#{BM+P^SHXb2T~xJ1g1T!Tu#4obrcx2o``rUkta&h&^}oLNWh!7JL2N
zhT7Q$(00`u{S!)I`A17Kciv0%)eK?W7kf;)w-u&po<iZuH~6gY1lp+n28X-Ve8@SL
zm(G0SFx=oS2>O?a8&`$#SIex3nWq|Am99vOYRyUhl(T$Z!xIQA7IW=!vp^7*<se`k
zBRhAN&uc!#ryV(t8k-s!uS1Ipul~m3ZPW2b2<y*3Qz0o^PO>xqX^5_rWju}(P%71k
ze(hUPJpD1W=B)w|*#dnA$M~H0m(bZ~25e@#Z)A@0hV%d6hSDe8qInCx$Y_!ITiT&m
zU6BTV{RX!EGr;Mz4o$C-B})~5!2s=t<dq|E)5;?-HTfYXUz&{1z8Da(%U$rV5n+GI
z2mFuCup>eXFkYsZf7~+(OMITg>^=3kg`K$;C7s1Rfz0Q!J`{Eg4aG9&7u=Ga%q!tL
z5;N2oUp9|rgE%9)Y0d=9<$_@QQgfO<*Nnct(S(hz&A9K%S6rTNMt-iaCMmA+MDqG6
zH?C|x2sXEiPnrnvw2V4gcj6Lmia830ouo)`)OMU7b`mvY7J^}J1vcc=G7sI|0-uAa
zFw0emy#8?>=Asxa8&fdvLJi<8S=he*BXicOQVsP^-q1pWc)Tx%SiPm-I8&dVPq>NC
zes$mtmff1${21NFw}ERon^V|~K(+V%uq0TAJRdBF$;OxAXoV?x{%s_jxiOfUZ2p3R
z8-@jTf6v1vrxAE!yd3G+txdaBhGCzZG>ter2pZ=*FrSPLb(&Mp4e`+?&X@`+(iW&^
zkqe)$8_~T<dgQ#R5;=5Njh3b|*L7G6CZ9B>hko^-iL)Bb*xSl$^sItuALYpX9>%Wf
znd|5>D~0Q3{t}ljH{h*BEIJu)5W2higUP!OSfZH<Ar~WIbZZ)19DW{arn#Y{;+#-W
zAraqX47S*uArSWE2}-tkatWF3Y`(<i(>{(+u}Fp_OkIYa>+7K9&|A#f8Hcu42n2MM
za`SCwq228ba6!tJmS6jVqAPC;gzGXfMOi>1Vr<baMF|8i)VciM-}vP$_g=rk7UHI#
zW$g5iD7VRi_>KL>ds;+5t!x#Ty;3Lc+5h;YdE4>bx=Z-sOfeR)v(cNIvg}Ob0J7D#
zaINhf)~5W!>`|NHjJG=V${z|N>rBCB^D9tixzpilUqLd8a>9iVAZyZNSn;P61=~D%
z2!4&CdR1ZdZ5y!p(1aTTEQ$DR3R+7uzpvpCoFuPDb7n?E&atBy@a8tIPiw`$-%g{?
z>_d<d`I7S<mIrIajDfrA2J5!SVt11a$*5wS=LOTyptY9k+h7LAo~W`8*)}L+UfndZ
z4EuMyz)2-0v~#)w+Mf8rDekc(akCa+euD;#oJX;Gu`G9Cq9)n5I|KjiyNc$?M?mMY
z7d$gmB-fKG@htnbdKVk0F4p8zMvHN@>JOmodstG|4A#k3#JoWcgWp$!d!;9sPt*aq
z?FM9ZG~;I98v|C`*TQ70i-^X75dZBLH!)R~B;*#L<a{@Zw(1M7XH15Pzx1d}fh8`T
z(!kiX`e?pEf!?4t#AoVEfZ2>$azlmX6K|qc%m?savXgb*vxMEj5qSHuA{lDdfXC`o
ziPw5rx`yS}oJPrWX|exUzE+jbOl`!{r3vUdubnqnkwM)e6_Uw#o|{fgfnPaV#5RTJ
zUd$BG`D%k`XWTEe^}Ed3h9!XAT-GDjJS?`ix`D$h-$2984Vb;8ocD8hf;LfltncfI
zRq;PzV*-ylmDfOH!ep4YQi&d7`@ZmEih3SH=%~tMEdRI}zp;FGUGWRfd4v=iU>s-U
z!um!>3D@?=ie_#84%vZ=xcK25%<+~YOSd0}w=Lo9{<;VfrrgHE*+#VP7<=AsYXRG9
ztRM9(7T-r8db9ag<sn(J&fkV;ywK*o7CLinm)GFp_-`2DcL^l_PVgC1bGa(ZZV1};
z6Wu53f|tet2%OWyMa~&SoZM#%bN8lUd9M#@-3-GSf*AZdJPB)uw1b!QYhlu_2mH8i
zo{%_3A8fV5&}m^X8Y1Jo7d*koN4!z6PahLG=GHiDfmKFDST&1vnckT~<4RqYX*VT}
zWlx1$?tg=f3QHI`HUKdmDztsC0?fX^`p5i3s2+I7KitXq+lI>gCgElbXEU#=T;^G`
zSEXAc*#25oin+}V=+Z7-T9qhI-Jk8ofva-VP2QRcvh_Hx+yGw5YbE~fW=y-u?Ytzc
zMW}7BO$NHY;y>ntJ+hB6(1Y9|{=#I2vS<b?zx@!j*B>N1HHH3vN+F<LlU}N>!@%oz
zxZ0|hFtx4*^Aoc8FNZ8h>mMz;{+<a*$h(M6cenEq<!VszY7jNr6O7m9s*$k0yYSsx
z4PtOang%Z24LOht4HL_t>E%Q4|I4^6>as*9*#?}n7r?t81~jJX31m(Qg?qD%Nn!LE
zFxq|=3R!kRX#WcGy)SafSDT?a*pHV#^&R4NC&IYnvh<Lg1qr<)MW3y!!i0{W5RfHJ
z?+-4+tNs}%>iStAau9Rpij+xVbu27>8HpFC7Ber_Pq5HaByD}cEOYjPD_%APR)5H5
z?v2}EUy=){OLxNV6DMI3>(->N(4k>IKY5L*93<cR2QL52Xk+*<vGCJoOe?*Cg7p2|
z*tKTFQ9X`1X&Shz53R^li(6n9)W$XLisTaQqT$&KmYvryg(L}Md~ywF^y~{1OjjU+
zjq}CU(rjLJ_X_l%HvxNxDhPa|h@w4*c(3nlmlvT<!VZ{$-TASw^T=SBV7wNa!*sAj
z!1#f>!N9~@cvjJd1aR!R^jVB)DNXoW|1U69q;RRI3RX^(r*@`FR1)vQ8GUSo!P}JS
zhb<=5iE)#rPc@~DVS~BDk6Di7$5P>>-V$(naFmk>tc5o5d9bNw1~_w?U{WeiBhJ+_
z=Ic$i7L+2x7O9fP@8-B@dpJ(MZ9s-f8Pe+N1}=KHG+%za54_uYAj3Y5-?r}}3MY0$
z-Y93ZT^I(#7}w*6cQ;n2uH_=)t?BFqvFLk>dAt>C(ERIFKIK1k>i#l{x1D~8zr&xw
zFYNQtxHO24RFJ39*P1bK9P^~OtU`sAb-Z`fAnN<42(sVE(fsSyIGyohnm@>Z&*B}R
z;i3r77P1}B>iM92+khTgKa>RcI-;U-8TM{w-^0dMF8Iq|_%BhHBn`PJe9WADeWtT8
zF!ehZ8e0#kJvTt$+Q0{etilB?cTsr^bH0&}T>pn*a8sGRD@tO-f<fs8+qIhU&yT^x
zZlgA?t577RD~-sB@kX>jX9y7q>k9Uj^nu+(SMJ4HeR7F0@<Yp3;j|61B=Gz>?sS)c
zROTzwtauB|=&a#=ZQC*3&w?}tMF9O&i8exn^71F3=gTurK_S`?d}q8=bw1z^W3MG&
zM#-_c+%Y2s(qbaR@^owSoGx^T-Mjly;&fIVUAmforICZ%MwhUB&jm2qvmG2}Xwb*$
z?jV^{CG0Fxqrpk-upp3~>5YbQ8N+sg*QN1LmEnvktJr%tE#INFXcV}f*5C)v)T7R)
z4e<6%3%Y#20TH+!5_$wV;nR#~7;W+f`&sVLWq~T)GgphOy{$!a))#=*<Oulmf$fOC
z&%^?u77={xbKGJ&1SfpC0A8Cygu%JL(Ei{jl%F${?!Tc(M&ur5-XkfbtY4VEUYV3I
z9(sO86gUl`eAwx5uIXb97^)n_xoZu<F77alP6)=b>TBGhC;?giMw%R-t4zHQu`@-p
zDw|vW!CmDKu=57Myw^pHMZp+~W3w^aW(c($eh0(LnlXOB2#04B!qo0!2wOf1UB?cA
zNn1K#H3Z{|K}KZtWNlg&YC-c~$3f%M1c+yTazTbI7oW~txbZgh_18z3SFb=deQu$5
zT@?g5?uVFNY*%g_h7QZ6arw_mY;kPB%!)`TZoUe&3Cwjmmfhi;wS}fbcj7xIGjiaY
z5%oJG52OFy0jJHQ!98;&X0FSDstY=_c{qC(*InW#G%>cG)ei_crH<WZa>V#&FouSI
z0uvou3WEqr8lGVM<N$2A<b~F2<jGk#EBYhekT~CF@4g2rc)uhAU3|BILDFg7WByOD
z9%4i!qYQ<O^K@X}%b$=Sbq-qF^spzu7kB??gU8p^vEL&dDu${Ox41BH(p(^xfAR)i
ztka=$jdn7}#(%uVnv?jKIc8(7e8io9R>J6cuOaK3CT)-3gbq@{{EqcT^l|t~+@QvM
z7>q^GNC#v8M>h9f{)P8HR011#mLU)GfW+PEP)wJCU1%z=*OUP!9ZN7~UM(bjY2~e~
z)lfy(4$pkbz>s5Jm~?zR_ZZ|*bV)^=`LF>}_pS%w%Dec;Lym6Ql!_C3*j_#CG0S@2
z=W0e<;YG5O?c;l)Y?>c58^+?}f>U5FHI);b)Bw*-kuW7mio)5IsK|CC4$B)j(b+^{
z(vlIFsO1QcbaY7enN_?Xr3jZ6G2i96YOeaUAv`sV#0L@XxLr-1cFy_Bo$=KoD>pFL
zNBkt1I)QZpJ2FwMUklM7#oQ>JpqKF;c0Cc0gfB<HdqOM<BN>}(p%(44Ji#}InA2+?
z*<M=e0+(#}5*b|t7Cez7V7VD;jb6a+Y{vH5n#-APZ-Y_tGW3<YKFRVZ2d9`)Vdl$X
z_|auZO=Z{}FLgY-vzgH)U7BRLCF?}&lc51iia6JZLJaEP3D*yuV)NWIm}zhiW3LZI
zmxBp>S&$zTtdJ!=><kk$YdMr3W1ZLI=Rmao04OOp!>>()$QMnD4inify*!v79qxfG
z`Pm@7(i=CONrH`GJU+MD4~K8c5G5fSJ70JUlh?32*3ONvrSA@eY?sA|pYd#Gw+WPv
zR6uTl3zS4(g&?<sVEI3e&OEHf@9V>-d7kI_Xf`F8a-Ox**N`NHkSUonmN|2!ktQJ_
zm5?L}l{(MbB$cF+QbG}mh=eFfz5Dn6?YdlDr_S^2z1HV*-~2z2n;}h<H@w7BH9cxC
z>&uHzd<E|%cOX#J6&w$@bJti$_EuU6G+f+`!?yEKWu!~9;+S7aZ5KS;QH;l1t;pyl
zSFwAGA%qQk4)1LAY18Gs*z)kGsQ+*qMj6LK)+tRom-Stz#$<sm*9n?Cvmv-C5BCVe
zF}v|O>u@%}#jOg&>W2btw~+uwTg8<f7S!lX9)=2^;f{U*bHW_M1&?KE`7sr0_q`E^
zMfHQwXFF&fV!SBZR=B;^imcyMg5TR-p+HFy^>co3%XUS9pPv+&O!i}PoCNh<RD&Dt
zsSw}cIlS<|EI7g#D7`mk;V`Qd7`C(=1OeRz((M0_QNIr{nQoZUFoeXZS;LiX_P1eu
z7?WkPAv{=-L?mB;j2#cS#5N6_qMO0)aiwrO!-fQ&x-hY9V*>aG3nAi%41JP*4a4WC
zQd>n0I{8#LjQX96ubIy%?2wSIVf~reL9am2_34hMNS`LHRf2sgbx`CeDI%=<Fk9s>
zy6R=}ouN-5TG5vHs2<{q`ddWiM;KRh_-$<aQ;MZ4ne%X1zbHFm5K`B)a2GwT$g+bo
z;lMjp2wZK(Ez#bHsg4RDxF9a{+GPvb6~$1k_7*ia?8gIU>eTK(Bl?l~I&7wwV$GTr
z;FkIwWIiU~#<=$o`a+&;%@4rwy~kjj9`n$y@e(_E-vp87UevxZ8_K=Ss4DBGB`ocN
zb^8VMP{VF?c97yWAJYOs(=d_n(<VM~O959i@&&ix*Eu}gB_KXyH-WL@ICyuZ3qwpS
zarH@aa$i-K^vGCIugm8kxUdE6U#q~@E={`RXf;kgXh2IMwLrSi5#t;@_}*_Hxb&q}
zpq?HKVNq;{@Tr+^eY*@MJ&`1zCA8`H{j#+GlNDXC=@(`!`Nqd7s_`8A4yLl%n0;~s
z_pT@$^5hLjhm{JsG*6LEEjR!*t#;g+U-oGKR+?Wk#{;DIXF&Yn8~FN84ZexArqgy-
zVa^aEnsEIdOj%jS_QV6Cz=*nu>YE2Sf0F>fF(!~!7z0%@;b{MFDRf4J;p=Nz=$g3(
zYAyuAz{zhAw>FU9Icft~OjjYX(Xu2eg0XWao#UL1qC~nKx4_5Xtk^|38RdQ2xSA$)
z&R5Jb<|fLVjg1jKS$7V~&RF5g7qt*JHyr+M9YTH@Dv+uCW!$poFFv``g$6e@NMPq_
zF8M?^WM;+UBt0wAMrzUT)<ayl>jZqrXvPv5cTC+mpF8{hBMuC0#e4Y)*l3^0nZDhP
z>xL@Px;z7tqh-f6dk0})<_q!Ja|BcWjbgqEd6EAiIoQYU5I+RgbotT-EIYdnwGPXY
z6UW(EVrqz^=i){9W~mC9I-Yfx&y7Ka%G+$d-@=_rF{a9Si=gb98jU+;jEd6QEQ|dO
zbDETq&XJ@e)+NA_vZvs4WGEW3&gZzvmgH{J5fHc~@!>Pe(6LcSzu0@i^sQ{~@wZVV
z=pG~T6uiJ+o|@#hrhxkYnFYIODR%092RJB+P6s!lZRB}=*v*Gve`F-z>u!W=K55WL
z(nYv^l_B+s^g{=!B1k01_|Xm4q;+))n1A>R_V%oYts#lS^=wJgu+`xIUo2Kjv%ABE
zF(PH7)!^mQ%3Sb?Fk3wx%b)geHE~-xe@kCD(`!hoho)oEDCQCAdkOsoJ7DLsOYCj{
z;IDoeib`*Bn)#3MhwE^hD^wuQR!P$)doR%Rtc09xBCfXJCM3n!!sB=$Rnuj&zcdxd
zx~GYcuj-Q7*9XBiu$y1mDnkO#KH+uNYS70KHq<ixG`8yM(318>j8-?N3neRng>2C`
z^eFGADI_63ng2WQ2u%0r#DZC8A<NGj;!6JDs#_mGpseF)esl$Hjt&LfFM!<3X7H}8
z0+vR$V$5T<cM*3$?uzXoSk=q@4wR!Ay2@DF1FX-t5?*B~ld_r2aqE!J%{zD*i&s5`
zrDH6q>+Un4G}e;rzrki)tCg{9wkEH{dX2Wf&cnb6=1q(#WUQ}u;N<rW=BLP!Ww9(j
z_0tnOQ|dtQ=AAh0`FHq|$>!gw|3UfMHm>XBMr?_16VEDErGcuZ;;}z1sNqSLmz6!m
z2Yj{x_m)72=o<@j)(S|UOCsywHgHAl31UT?tKidV1-T0rqQ?;{KJ}0p=f3|Ro?9eC
zM2&gyEhP@Us?_l9=6IAnwE$~e`?yWJbckW|7ns~#0gVzDx$k$_ULaxu2HsVNcge3X
zz^agYRPYlOcAUT$_Z7&2gCfkj+=%kYE!@m&o!D&m6*V;_X`U0sqp9yf!lV!-1Ip0U
zu^VavoA{hISFS+fH)pT84JW(x;GsuV@QZ4YuJ(ys2?(IV<q!C5S<d<1xrXvAoIg7_
z5+3BS?A<XvQaP9H9~w(BbOmDo%WUI?x)zXAev<2W{}#`J6}D`&;&QUj^Fu%Dlk&B%
zadUtS=xorX)m$DNs%=N}+tPII!u$B$O^kbcv`J6>OV%s%$GGz`Tr~{@SNU?jogN3>
z_W?LC_cyGwse(~!lt_rwVrW`xfIph<!PZ4GG++1w7Plti-iy{Gq<;uR{Q1bmuWN>6
zzb4qF!gk{S+Cgw;IG<J}q+2VmLFE5trLJQ{HHT;OXX=dU_62RIuD*ybU(RyFdk#Xh
zxh)xzC?wspzv2%10h@LRF+S`Ue%a89Zt)kPf;siN_FaWhRT^acgFc+lXh{YS)nLz@
zix@X*H?$`>fQzUDoFmy@ys8OvPe^0Q#w!qNt3n41T0t<TvM}U%Aowc>a1pl5e|C`d
zEWh>gGP6`k!cI$^e6AVtbE07Q8wt9#z=Ya6o#*7sy7|=WN8#`NN0^p-9p=gP!fDk`
zoU6ums^4`$XF!7FT0cSl#S!%Rvj>+H8`FZ`25$LB1+sXIDfzM|0fr8gqShW8a_p)x
zX*$CA%C!dg;ExU|D^RAwqJ3P+sKd<X#_}JcA+$mC40gDL;lm-UUz+(I2PO1rR+>7?
zN*3~R|AeH@ONF%GQU?3TVAkj2v2(X7tr;2(W*@I%_9(`mEqV(MlN``~&qqubkHc))
zQt(lT<fl%lW3zsB>|7m+Ib0oAFj@({re=YbcL*kj#X?T~H||xH7+bU+aV7I?!Gr9>
zZL$T7|6Iy<JzK!9c&R{^xb&a`%lk$Q{fGq(!Mx9_G;zSy8t!C(9%!W{qrY-E|51Ga
zQ=6Um+JP?Ud&byNQzFC?tTW<xg6AFLa-iQ=njUAaiHunmj0Lk8(?{He%_F11{qb6`
z?H&#9T8mLUvJjG=H(<ljJaA0VqRC&rz~+b2ko?~}?04uzZ?pICJ5q-puF)W$U#Ed6
z_$#!lnXvOrtSDQ54vG(rLbecL`P*WVwZ~hiDC@z1$vUv`^$~DudkO`-7MvWv7qh2T
zv8?IAiPxg9;Jjsw1)4bl?7JfQx_lifyMu9!Pc(|Z<`@wH^I-jKQ>Nx$CSu1Mwy!>U
zj^C~F4Se6GLWJ)Lj93%F*W6+mRlzg7?lFWU8~=rccW=Tmmdy=3Cr`Bx2x!>xw~*TY
zR2&_{yk?#j>}NM3AFPgp-feASEKYzGWirI|<TY5vKSjL15mYaoz)JR=U(7zQc{e-2
z>p%tfc~dCs`bL4qg(v7)=MEo_8<3-nnO79lA-?CXMY|srqtBKS{&hkY`0vmJ=iS<T
z@=zX9JKu|pU+x2mbV*WH5rb=o$<c_S3T)pK#Gf=?0q%cnAtL1^SCoAU{3rHtGA^Mg
zIj0X4oRo>_+wbr-qz!H6MBvC?IV#0=($k#`$-!&Jq~F(sy1xFyA9(Bs#V*Bg+F%G{
zL8L&-Zxw!3h$>z9=q6?nJ+wZcK(*(1gH=20gxubbu5R~v6$@q3KT3%6hTj1nznPr1
zT@5T|ZZ;!(meYa`bO<%0%a00avLS+^*GE_#Xhrh5XAp5?EeMjD3L3Y?@dGgk2^Tt`
z)l~~y9+Zj$lHK983CrqF3uigWOT7LOX)64?4mONYAlt2PqMy7vnRovq%#*2w!eQ^R
zrF;jc_m$;pZZn2v@H%cvkUTl2s7i;)wS%JFIp)R4<TM|&!xXlgnde^t=8KCkask^V
zp&U1hF(ths3VHWG&bYg85tgqF;nm-E^W|!3uv@wc?0#BO)1hBkE~QHR<9;m!_C$zY
zpKpT&cOPJORv6?hP$D*085?a$k$B<AXD~eD4*F~qaMj5MR6b!2Y~B9^_s(x%^EXp^
z+UzVo^O*)AIWs|j&t<r$_ZN5eN5NJ*Ht(2xhBK{r1G3W(Bd7WiGfY{Q>c(fT_{c4=
zPd*PGqbIVy@H0rNE5LH6(NJ^Okk?&*11GvDlhl=6{MYn{sQ+D?rdsafXD2s+*>Ght
zaJ?Dwh(4LThdG4)nvklKD%AJQJ+A%xK6vAxN#t)`MbOZK)ZLNXT4PJ%_*{?8q#DJJ
z)86ovECW$_Z4&da?Gvw_t3YN?Xo9TaThM#c7!>+1gQOqVSVzzYr+7xNJ*6!@GF6|{
z`aXm3?d*HDHdcJ7<tX;gSOtpv4q%C#kY1mmLYB|5BIyoaq03nrTCUFIx{f>Zw&4vp
zb6OV`HSgmE&d&?VR6J0=(Fx-Y6=T4{VlHCgV(i&vOeBpeVM?_LdAar&7OkEQs>{p3
z-tHk^uVh0a%pY-&9wp*V#=`ONp1`Y4xdQ6%$8rKkwTZp{mQ>lH2A$U3gxG!W@%V5J
z`mIr$Bp<wi%h~TvV7&y^ungU{09_)x_b2?V(<4!y_Hf|-8Rjbx<L}N2EO_#YpA!EB
zTSg4wJomf6yo3h4@=}_nF76Tyoqhu4C&u8x+<f#rF984Imt5e6VdD1vDp0WEF&8Ll
z%sD)N4qngLJhf~qH;|qOtKtMS@Yfk}$mJ~PJ+cT7`G~M_L_OcQEuHgn9t~SqPbOiS
z27Uc87H*xp1p8(_hBEzj#@}Q=1NGzt5j%My>;2tw=)i)!Aav{#pxr$OT*@5W+a#r^
zWIkb@Ok;XHQ-v62oq~7cnjt%!WwX!j;uAwQ;^K3bv@K^4Rp)$xcEvM%hT#Ri=n>ne
z7iw}2tScL|Pe80$Mz8vC1GI!~;$mtNFtuPCw^NpJQ#l)czJCi=e|!z`vI08DGFMl&
zJi%2#55ZFR2L=ipF#bUue3q*R=jfwc)+$LVy?ZHyJ?Bu#atN{C+t0c6Y(|gy+Wby8
zQ~c0Y##l5R+=OHe3g^_(VOt9X8?(E)=?4fJ{S{M=$D;FT4chCX2q~e`WYv;tI2BWf
z_Upz$LUS|bcfNq9Ki*(%S0?7oQ774-Uct+IQnY||N@s7Dq&t@|MvX8A!q|*w!Pa-E
z(8{>jCr#+HjsMVF=_1Sw6;jR9-B9#RpK7!y(9}^wdFR{lB14-eP<gwCWz=-o9Qp_p
zYM7In&xTZf-#xzZ@O55<w;*xTSh#f{2?9s8hzppn#@R$ywDz|d?OHzveD_}mUl~<&
z(9Q>+q7!gzA;ri9#uu)5&2PM$#9VWRq+z!*Ewz&&>UvQSXntv8pqeU{d#Zx<SPA+;
zrwF?I3^?B<JzPh8K09AU@xmkp-gm)aG_AM@E(Q0Ya>;dUpK3&-6y9R+$&2uGiV@km
z_&R(&Ye=^&QzAh#1VnyJI3`{fqHkq3A3}G7z+{`Kd!-}Dyg!PGPiCUMc_x?YU&7y!
zx`}FeMnw7aZpeu_57PRhL2y`}+j@#+WLZbC<D52mZQY39lvp=QZJT%?t`xj2bHSZ;
z%cF)*#}iqnk(B?&@eQ}J^WRZ$T2KP>6jkWOAan8$7`x|>6<nERMLg~hu1I}4A9aak
z?k<_5;wE!y(W^@rvO8|?p($W^su*&#j`Ox7doXuR1%6H`We$cgtkGd?%L@<r-D|gF
zQOR%dtx5TicKjlSOuB-OE0XybwlA9c{S8JPS^;8f=8}CJ#2+>1aOeje>N(~zOxq++
zT1@8goyvhwc!Bl5=Y)WdjV(7`YX_e6UIv+wH^8SURy3b=m|ovAW{e^^D*4Wm2HuYo
z^{w5E@@p1BQGo(4Sm`p+KTQgp$I4UJ_is=j=fGv8z2+s~4Mn#lZy;opIn=o9<h*7`
zW6~-|9Jclk*iRkGl}z<O=c?0K7Wo^TZsdUCn1`TP@)MN27{eoR8UJN*B+Kc?6=*+@
zg%)KG5MvFX-Y%By{SVcp$3l1#^GYdNlj`ml&^0@NmyCVG^6vIr-_9m@d`p8w?UKYI
z&8b*a5e@cR9`owPFSsM`w5U^a6}%^GzPd9Dz5Jx;X;%&M{32rtI;<3b7+c3!Yize4
zlZ(N5w;(>@F-X2X3w><f?R$78lv_)qPx(>)lop#o#ifZx9E=A==eN+K%J^qz0=du8
zD%6=P5<NskD&m!>oUA#^Rs3}<|B8H$f-Nt<!VW|wWl*hJ3?cCun0dX8DHsf>@Ar48
zoFawlm)to=6@bbBeUR^Tg6<bHA^plWtZ?f^L77cq>dj;>S+X0JjJ*v97RllvfhOa@
zJ%i$oGVH&pPY?Xof=Oxy<iJKFaGZDmdL=$`2U0b_K{6QB8@_Ozj$#a~UM}i-U(GEu
zYQQX0fTMRB;KVaFb6OOFt#&g(^Y$MUc&8MMf0YN-vwnkHizj|QGYN~HiMRrf-<;sI
zT%q0UFQ8AO_?)N^-lSZGgnbNU`-woDS;c1Tmk0T=i4J(zt`173`a|ou&7gNej`+vg
zpwAsov>&k>Dm}(w4da16`x${=C6~Cd*KA<uFEv^+&I?o}f^p?^V{$`Ln(V!!PHI9m
zY0x5d^19a?MVlGRQTIGQIx`u62fTyqp@Xn><R37<br06P4@GCcM|{-eC6K5W$;J0h
z!YOXLRAy-`x-9Dj*9kVX)>neMhK~hf4OetHcn=QV&>}LU@1o?UXt)_wjQ^Z1$=2%)
zP)!%&^V2C<H2)f$Q+|c=!O~P#s}`172H>b@b8>di47AI-0o{&v@F&BVd<(Lo4Tt)%
z!0iNgvSArSR0lEUcLLuiP{G?(hE&kWc6GCN@Tu+Wz1vdHoK2xPX`?b(;b%>bzH2~<
z_b+iUE(_&{AK}uD&%is&g*35Ki=Mw{K!4L-+<5&f82kCc{mlZx!f@z(@Gd@*Q>QsG
zH@Ok_tmv_YjC&p=Ul^EY4A$#+V;(gmV!J;epST#f%k7xh+{M?dsN;1iqERrH7J7V;
z0q^0pSQDPf-0zP$)wmlFI44`|V{)4R6DmhmTiB4hD_Mu9Zwb$T)hC`3eK;#xi>wkY
zz$GQ!7^n7}fAw9Dm=~}tNqG;tkDQFDdxrCZobBA0AI2m$Sx6FJ^rI?1fPty4VEB0g
zo;-0GdP|@4Lp@CBIt_pP8=^_q-qs~=B2B1lZxML3Tfy^!U=#%H;|6*j;GHK$xc{*Q
zIes@4+e=oXvzikA)u=^T=H77n^9v@AEJMYr2k^m`xx}5;gYHrV+M3BaoZYgdcU>ZX
zEy$P%<Ueup7nWk{(y!>>uMVko<>G(`72K8E>FAVu563t4z%Iu!eD|dk>hhkVz{(4K
zrULglDF~0munf|$t*Gzzh>s9Fz|$FOB-k+<8t<RyTdv1){u88VfSeC1G-=U5Kh~{s
z?!czmBOuVr1vb^+#;7UMP}@BfJv!tltU3kfvYBf<+Z!ZaY7_Of3sB(Uz_skTEiw((
zr_SBmIL}B+>YIOu7bplOHfdXc@0uchO_3?OrM$<fEKjrZO%O~G%af8#hA_qR0!nJX
zW3J7!jw4<$_vI%!GR9Js{3})_1%6L?xe6&#JL4(nEoYsYwhM)$Ien_vBu7+TW@1^}
zPY8+B!gA*St=VZ0ElWE&K}&1?IbSK#>vj^3n}p)N)76mF{1AurII^?lQ1KWc`yS$S
zSRHCe9FBKlAf*$1F9q?U85sz%9oXV_lUuV(mL@9H^7jk$NtV)Mbh@4hmg2qW;h2b7
zk?X)~XciY+!Lmtr7DG*V1K*;n4^78Uz_6@4cs)i)HhzkQ?fNevEj<~OKaYnqC2UXF
zsZOQY->UxgFFbeEgluy*r>4&AtXL=D%jSLpp|uO<L>NL+zdH39aa6R<^EJFvdI3Q;
zy3{40m~p-ip=tLk@cl0s17|;<xS>^pq)+OCJN0FFw4ohrU+Q7YR!0bCKGF0UH$b$d
z5|(|lg4qV45b=B)Xi5#@n2nFnt>znJ8!m(wJ1nWHc^YgCz6s-Z-DBCbj)_w{hfvJf
z42_%5a_ah#AZ3_{Ey<<aI_Vdnr6Wg=MO%`2Y!~1>r5#(Mmx9Tz3Y^fz=BLXt@HH=n
zjsZ2|^zkqJx?@Pk`g}%veizheTXC6TC14uC?!Yo<F}k4>H73{LtBC^QmcaT+cLs6q
ziigO>+K^!?fl%}>mb18IKvT7i=?IT2;Cycb9~4lBfkaXya4?3rR|-`6%^G+pB}H<*
z4N!377p9)N%-a|$5o^W>T)R$|JgZ>*7iSA>XfUU&=mgiiB}il8c1((}!aZyj-MAnM
zWqZ4!wl5NfgffSB#Rk;Z@&gmlpm&u*K)v-P>W@h0yH02Eo(ahq7^MprIbCXdl67z=
z>C$bh&f!p3Hgo@MOym#8!-#`R@rPs~dd~fbBbJ&_N5&qk_ozm-lg9L)-YMMaqX$De
zbI@7!IM=d(bzEPZ$D)m=xIra$&UhnDbN3dY?>Q%kI>=_dnwc;*Z8d(9E5O~ZinQJ5
zE5F+F4JKCq;h*{H)6|Jt(4`RtZp8|;Jhq&(n8SV-{f1m=fD{?AYb6NwNYg4AbDA0u
zDDIqm2^thwU${tzTRvJqTGLhGx6B~=P7$%Wi5`XY70~3AhC5Bt*>15AW`Zp#VJx0g
z3MTa8TwPMWbw8`Y2<RNDNETNKh){P4@8SC&zkjbKt=|6xwl`VP)G<eZv-yql*}FPF
z>Kp4dmxA9fW3onR80Ngt!WCbCp@;uQws&Mb*&F>Rm}Vz#A3p`XTqUtK<_CIxdxp=F
zro#Ijw{iIWmzbBJKpyN;AX|1D(}ya|OZ$rX{5-1oZE0-QHsT_fFu#avYbHMuzhQBz
z2`y(_sZR@yN%_$rPPVoczEpifyD6_Bt^W+%wl$(-^FlF7%L88i_QbY4W14eGn=et)
zWbDu_j0aPX>plW=|E`2DdDCFb?z1?xR+2W@J%;O^|1faZA+dAN57bW4Mc4cH(fP?0
zk>}t`G&V7Zne{Q~;T6W)R~kV}Qy8YVwZTpm1JpMehlgK0LW9wkWaVp3lJR1I`STX@
z{)#D3(7zH)L#n~|Ru6x6#6`>)_|2u7cJV8w8qhm;bja~K1>*W9fnVGbg2G)9e1(2A
z^96=8u0b{=c;ACnQjBZe5e~XpA0hC$qqzOtLdHf><w_LU?A+y%$W<?kb1Oy2cq7DR
zGiN}!0pl8o-iyYXsYCtJ6pWfa1dQKa!2(&fFLa*Hd0n1~B}a77SjHPTQzf$9eI?e}
z7ooc3C0>#Fm;T#+6r-X}VuAN582UCFBgTliPR9DM&eEg-Y&RtBX+rBqv-|8I>sB9O
zfU@(tq(o{FXzr@R2i?+StkV;$_`VmlN>ypmk7lvYDAuc=eF^0K_F#Mea}0{HqVn6;
zLfWWW@EJ?_TE>D1SX;yA^bX|)OrF7OD{Y!BcEmuh&ls@I4!uR^K=1TvOg_oHZ6EbX
zQGG8quU4gAA(~hb^$<Mn#KXbAx3HDd$ATX|P!YgmgjFhUH(?j<zh_2$4=S_%+FQPU
zK`IWP$}(P}Bb;-|QgL!lH)9E!Qm;*mVcz+T_)KvqSP%IQ?wl6oZFI?tde+lBc?$m+
z8q&b7K=I}SE1+WPJe<=mL*#ik+-i6aOy{y(pH&`u3|HXPjqCVJHc#*j^9?1k8I10|
zB8c9gNA_e~!9ATi#4(_Wi#W;pRFz`BedZwNak-u!tGpQ|A5){LYcqM*4b~|0+>B0E
z>V)D+_u-`6No?t@5M4e{i4!Xg$o?&gv}2?`X>3K_-|QmXSZzTE%m+abs?O!QTfy&|
zaxe~efUapvx!z%AoMcNdPRUj$j)gjq<6y)4i)Aq*rW8Mq{0UPubcuP~M6^{%;BL{s
z=r>iCD2JL-`&XlRlfSI@xMCT5H^)rOt_^`}nu=tspb4)}R3UNMiu^a7Fzou(!Z=46
zcwhmKLFuiS^GA_?d0`S-%+n%AmA=83S1*xO-+<KRqmHCD-RZ=^a>g)q_PNXF+#Sjt
zhh&_qr9`{#^>9xz%&27FKfJfM7(<3=F`xY%-1Nkfj9uvufeY2CVDw0F?d%>zp#;ni
zJ&8xzPF|Ilq?zhHtQ#_g{}W<B?8ElKS@UABWiBE8(Z+Nja{wH#cXN}rDbq(g&SAPT
zb6$*L-TVY=QjV6?>H9I9|DQb7zYhSOs<hvV<&V1<lj(~zEv`O+`>N`|T`L>Xdmh5W
z_gbXn+b0~Byb<FTwsHfG!B94bafYfS>8o)9B6=DR)jL0d#Tpxu895!N^lKA`9Zw<o
zTpt{LQVF|KRfs@m`9!~sDr9{tdoH&wgMuAjdBJzni5-3xB%tLsA2d^g)SL`p+0yT5
z7{s~+_6BI%I|YB$YZHsd8f3p2`#V+V`Ey?cq@!M*To__PhP6IK+g}&Cv)RjV_xDt&
z`IZ9rwTF<G2Xf%&v=VfA@e+qG&?HHhjA%r*3zTKtgU+>|plY@@`SGC=g0(1m_Y|O2
zc{84W^AJn85XNNOi=Hqae7|qUXxn&nRXl+;&;KFJc7+|2<caF1PLR~J#ats3NF8Ju
zi^J7;ZSX9{JeZ5KH`T!{+g{+8)#Ij~T)dk@p(y1Xc<6O=IbC)z?RN)G+9Jz#z-@5g
zK^ml{25@C8r{^CY11^O}!8|t_RPA%vJpC6}^k2Hju9nSn+5Bv(R|@#oKj4NQW$Z=I
z#W4G?3f;c&4ZJ+2L?^lFkUqr^P^`gbz+-=7si%NA`-j1US4QLj%Xmj!Iu1Efd00_+
z7~>~*!t7hBv|7Ce;^yaJ^7vecTJQsXIhK7YGNE02r}8ORRmi<vjEgtL0M#dJ!I(t~
zM7PeA_SY?dRoRiatttZV8hn5nha#@~L<Agg2?62OJl?a)43r(z@j$l@&ATH>^oHl6
z<L_Ub&#n2~_LQFxyt54cW0^<I2}dD#Zaen>5W%*~T4crD6nyKc#OBpqoN8_n#G0@j
z+wYz5#G)7FCB;}iS(Wbj#^y8raT7e~Q|^p|IT^mgfc85FV{hV4uyJBe0f|NE<n#gT
zr2X;I!VWA=uff@d_u=)m2uMBu4h5_GL<^(ap?%g%6wY{zUNsA$=uSOv+h7O2m+QHF
z31jL}?*~4CD`9MEIwYRRhHDGbaM-cWY({=u>^^1*1j_%wUiH(wVf8aejsD-9*Pg3B
z)e8DW%fVJvj}93B0(H~Ne84{~x}`^zHYRNVf#-aY)1+=t=cT!bQTcr8bmnotR*c3c
zB<XgYZ&?3|ox8)%h_cUb!8&S0A5|$6ch^7o?~x@*KV1P5Z)BP0ybNYny~2u-`4IJ0
zmTo>;i-*Dr!MF?HL)&?jyyb@O7hj<Is#vZvI0Odrf}kQ{Bzj&jpzZdx=q{xI`!+tt
zh@DC>SIvmL36mx*KQj2{6M?J?RxTR;(26*Js>1~3sTg&`2Y;+B01uDF+@U`=A-ky%
ziUWFJMD`HSz2kx+wdFYb8C$ETU*`jMG6r;ACoCI23O<Z`ia#b*!OG)ruq<vQe%|&A
zDpq9SM#&|pKgt9?(+Id<$ll4ma(s*4YJBPBkHP~_#4`=9qprRomF&-l2Maam2iHtY
z>?uK6sbZM&Lyd$-nbYI#!DyFcO;abU@xik$fm<yPg0_F6+TKd&Q|t%l={iuey_9qQ
z;lcUT4Dx>^qS4VdjQ?^W6$VUBfyciXDC@DNA3qMEv0*7F?XwDPz0dLsbx*>DxDGs*
zZcZ~U)nU_tNvNAAO}(!^2eEf3G#8lDf;XojZBaXvf7F0}MOT&$34<ka3vllVbE-c7
zFqe~`4SI~XCB5PhV^*##{1NAiYva_&8aowO>SIdI+OjOb@~ab<T~?u*>t5sH@7l!e
zOdK8#lqVZ3rRbs0vUJ-SC7K!V3v6A7(9L12vmWLLEoZKxHFFv?9-Izwk-1#dyAaSe
zGDDNa6t~>}fPxk8u%`JmR4e_)07n8j0~h)9H|1bapurT0z96+xp4Ja)QT5GBP}n<?
z$5}>LWE~{-&5h*c4azuk#TLkwH^aF{W3loU+wb?)!d!(yba~>2ff43>@!ov!R_Vm|
z9Lw8x*76O31E9UplrH&K2+kcxxs01C;I$|pbL;klOW#@a8j%Xl=?_GXk=H@2e+=Cx
zZvyMHFX3+eH|Waq<elg?+`2Fi6_zT|x2Cpa*l;h5=y;DOzi2a_Ksp4E)1mI_d%#Yr
z9#V7G@GmtTuuZ5;98<>O)XQn$9+D2VP2Lzri{KKlPSBS+H2-`N4|a#(VdEeaxIg12
zUTDVJ2W_}CtPj6*%!Ax^HH^MsMSgrq!PnyNpjS%KNBJ`7b_av~FiZY<auH7J(qZ>L
zc0XNl3L>%=gGyQ~u77qDV@n!Y9&s_cK38H+qe#fPX~Ep9EDIaM<}b{p;(u`$2xA_K
z1V4g!|0yFm!JKeWi{$~B?9~IlC5H5~AP25)vLI>lAE3<aErd74W0IPXrd7&P=dKC-
z?1OD^)sgLFo?5^zJt2XhC^VKfK%d07qU!ZgxV&WmYeHx6zn{K_q84*e%+p$2r|<(i
zlmCJKVNLKi55Y3GCvfICW4^LGifYp^d|mn%CwV-=bcIihu`J2W@@9SXk?v?1J{GOI
zWXV0Y8}p2QgO}K@HQYdjX3H}^$z<lwaAJGU@rK;ke*n6=rQkG<y|0C@u<X_o=1LP#
z*)_%N89J9&cUs5&*!2K^PtSu<ChXjm#GK^yhr!mehfA!x#EtQ=B*Av~pvz(tcrW+I
zq8C|Q?bvoG(t0bJH*5g5xg0^e`o$=?J6u$`a68N})gU=uxt#38x46Q&0DYRoBA2bX
zFzu)rsTA!%mx<-D#Z7^l%zBJl_m|?TgSWvwSB8c(?g#ma1N_<pp{O0Q9ZXMc$G*&V
zSjF|h*YbL7oe+ajTcvQb^m^EPx(V+xU+B+3394;rMBVm%g&|$WL}H{eQC!dq&WBHm
z1g;Ge9~!XEaH<*&p70Q&b+xEqHOm6N{VVEW^D^t^Pq62cEYVeAx$}pEkTYJ9o)DeK
z_ln`z`{NfExGoWR=E!!+%z~8NtT)m47BU38aGqu@wllxxrNZ@~v5(CKwp&v{aadtm
z&31fd;fd;}f}s3!HYX@Q4gLYOyspbVc$30*)Wa<}d+{=8d1VJ<DxRS6+cPkrIvzKF
z-3sNmHE4I~e_%e5<y!q-fbX7ZV0ZNd$cpk=rjSQZH#wxW0vfV)3wYE@^W~>RaKR!1
zhlG8>93x|N-M<+A{>sO(+kzo3{wv1y-sCHFw?pAMBhue|1ggUgsDFAa-x>TJn(nWJ
zfFb`l+t?7!&@UHV^QXW*^+ve=UV_@W^})ON7C3WIg|4z|f?#vT^vcWNd@qF~A8$n-
zv2L>F8O9pH+tAAT{lbSwV8v!>5?uce&Q3O<&dUQh(bIcizw|VJgyrqME^b8kz7#0e
zd(GXtqE9z`QKBmu$E4tF5*O7ef_WLoalxmLczqIM4CoF8#q8Vg>puI=tox2WyQXq+
zs&brS4C9JkafBq9L{L|@;A{KX?j`Iq#>Jh&n7DNOB2|d-QI)8-DIVR1GiHm@AkL=j
zvn%g{AMd|Fqh$%dgY5{147|hEwMRi?l{y*x!g#@JPl2<iJhyIm4CeckGoQQ;C(wH$
zmSGtuAC{9^dP15^=~ku7pDWX-le+NinmKLrlp(k6K49EVb!ahL!53+56veqab8{Z+
z6XQl{+O=Ugmy@^>)Nh@}OWhN|Bhdna6BMcXigWzBo!T@53;C>g)|XhSNDcagwB^@K
zOt^Oi%B#g_ANz!h>viO3Kb5BcIZKiGt_nm$QiDvoW=6&+{loS(A<PeD#<;h$xR{DU
zIC*Ugb49#`t&WAzZgvQ+{!}8t7K4~!I~luPhrnTZTe8|qgH~4F$4cf7eh|W*M{Yha
zuY<YZ%g@3y`W;uWj&O@t2BhA-3SWL{LvUmOzF%!hUY=K`?!Tr&5qU0(c3|GRk}jx!
zrAfV`p25%4GNfQgA9w37o0+`H$9+2(qtTYJTKv|6-7^`QIV}?w##e*7+;xbUeU9fW
z^6=z_G+6AaN5U1PiEY0Jo*HIKe@G?4hx1#}r=D@P-=AfkdOa%CEf5F2H6sm-5nMh_
z9`Zf3sBs%R>y%nk+or$VlAERQdQv&|hGv3u`DE~&ZUl#_C|Hbd#<Tn&wkH+h!h_AA
zzF3Bf*sn@sCq2NT1u-Ify$zh}l1yGP;US#PH6fo1KEjT?5Zp9Il?t1Z_y}byjN3jM
zmpy)l-E!a1{IU{l4mKisJB-MpjR~00{}1jO3aEm17d~F4O}5zDkXc41RD3ZNe~XjB
zde#@15pWuHwKwB{^c41PUIOMzR%6$&E_53c3FGg|(DX@vpsJ3Yl?>Izdn`Nf_23}H
zo5!R6q#V5VxdOjSTM!>{jHvzd7@W6WpK9rb;-RkxaU64Ywd@js&R$jWVdyE`bWn<1
z)CfhtBva!1ToS)-)L~qaR9t9T55GGTVgD^5-EH^;G7_eM?#3DrD7Nt1=f8(1Swb@6
z@+R0UodPkBd2sE2!=);k(xLuO@Cf7G>D<?(_N8hx(fJ}blI_rL->t@xtsQ7{Q<Y`i
zV>#dCJGl6DJ+_qXVDI1rjFGzoDHgSO^#|koEGz@fmeWwP=mGysVhlE2x2DxT39wl~
zom%9Zk+`6<@L>$=2Nd?8y-yWyd;AsG``|ctC}$rI*`!4ZhAss0+i1)*+=`l8E17pw
ziFE~jz}~fNzqHhV_(%+jTNrzLmBU+@tnviv{>o9!!F&)muzlp7l){Kbim-Ec8VI&-
zp6EQySX63r1!oioVy>qVZa0nw55^!0v>YwE*(6Pb=`CF2D&`?Lmsu!U+6!|mLvc;O
zSs1G#Lg&axUbse)^Q4kcvUwURN$3#orHj$F=qo3fXU$yMAF$Wf6>5cF(7ZMsQh)fv
zti8HaINY3zxVs&af%T3rn~~?QFJZS+KI$*4`hN%Tv-QJ4*)9p37d+<<=?_3g@p$NB
zyvWJ7zks*B8T}GG1y$ynQQNgvU=b0Af%78@FV?Hj>h>mh(o@5F&heam)?U2W`T$pq
zmn0><F0lXVEnM|i%(`<?U?ycvWb=}tF6B0QDBIF=|9a5W(hE^A4z)|H;dP5VRbRgv
z7E8wC*-wp7eBmdKKfq=pYt|v@H6qS-mLjEtm+-<&Qxf<22Jftu&Buob=&pyXpW5*n
zorU3iTJcSoSN#ZsA0%Vg;%3IOJdT<>mf_QVET^p^K$Vp`B$n;1_01SV&7~X?*Z84@
zMmk3Cmm<Fx--Ii7SkAm!fIcI`#Tg4uK{ZzgRr5~c_Z+q-<?GPYI~OvTBOq|~NU@Kw
zPrPyeO%T-m5rr;KXPy5bUQu)gcZU{&@Om0=sbUNAK~-p<KatCo-UDSTSVw-^C-IL2
zYh0f237yx6g8l{_NK0XShD>b;Ejxu5GA)R!`ZI1#N+_m$)+9c6CqpXBVGSJohdH0(
zxc^3raar&@G&n9rpRZ)OxVcB5^3h(fi*LX*y_Yc5SdvICOaSNAyT!f}jcH4q4V^rn
z<!86vKsT8O=*L*CmRIsnAX6jCF-?ZB_)5SLN3r*+8h$Xj2AgfdA=Rr>bU@`4Fzy+*
zytfuxZcl^M4~`%adlG9sE5MEU2Q`Cxz=Q4h<=L#jy4{dol4Y62g?l;kU2MlW;~E$b
zpNJoSOVJZ8awNa27W;&)AhglIn(Rk>OIfnWd9$MUkxC4<KmEwXnGwEAe3MUHqC@X7
z&XKm?Se(p!kA>s4Ntc{F7Pwo&{Q4}63u@<EZJ7@&G8!vX?n1~AW%wp3PyN69LFVbt
z(6wkOw7bS)=QvjsOwkap{wqm#U*hnyWjBP}RY2$Edw9_<#z*&yVs06PqOF~r=$jp$
z*j$E&o$PPn_;3`GHh$+UThNRxf|`C!mg_prU*B~aizX}*sh!lMt?|tJBV&v$Q?l{)
zOe^y14P#yIoDH4D<=~-I#W_cNi-tYlh^-Cp@K-f^=H=YP<Piw^2j228x96a6-6~ME
zD}-^6hR}9FJ};jX0)p`yCfd!MjWuL0m*^u)IVU;dbzvVgCLe*AP3y5yE1kbPhviLE
zSuRgbjvl^eNR37)kz}3knA0qS1L|xyI$RpA2AWZ!_@?Ny_dnQQe*?E?$x|Ww>~dp2
zVe9c2=x<&NK8I38akD6Q^^6&DSlS1t=cte+0bjAgnK6%_Uj*2nhFQHy*uBS^dPEVN
z`rH(1C&fY6swl4QX&o+pX-&do|L1I&i`kd%Lh6apQ2y&ampFYSd>fmNySw#iyBFhJ
z9zD))bGe3PCGD`g^gR?-X%eTHMD!iE7;mab5vRe2kT&%#jC-s?{2u?p`yCfhz;6^6
zh^qNwpFeml=@>fSk`Z_H9^tN)KE-V-E}*}G3g6}X6^n+w6}N{ba`H1Wv0EvU<@4nW
zn%*i>yPC(i<(@RzeIp+e2c!5DUrFL*-;Mfp^FVleJuhu&3#Ox9Vw;T?$+gs{Lslq|
zg=59wHGhDcv4mw~uE@c#dwDR~y&L9@Hl^Q9*m+_4Ke6--OXwB5W6q3eXqfdGTx_f1
zV^AiZus0#Hv!6iXjaoje(U30ix`U2?dN_5de|$qvFNk}NgGH?>`F`mFepp(Jf?uk<
zbGQlbAz8<_+=v!^UT#ShPUT}<o(k=Rzi2X>Wn(njaVFI#a@Vim{2~R?wU%&#4Ta(h
zUp0tFUo_@~zT~wwNRm`zbI_S)LVRv~6<?9kA$Hd*pe3i6j~HeE5q}GKA7y!HR82*1
z2QhRX3Wgg7kB~U(68xZoKh@Q!Q0bK@qh5?Xrn<!A{v^)wvKoxHibStv?7e)h34F&Y
zgO9jIw9S!av{t!8(J{iUX8-PX?Hj-Z9=}9(;#faRI?`H_ij2xpx0o^MJ!gV{bPU)^
z#NyG3chOdc`HF}5f{O#i(rzWPWJfg&9y*VOlDg!#+DoW!G$aT1#N*g`%fVOc18!+i
zrUf&v^9iq;F)+tQG;NqMxof``uhp=7^nbQA^|2D;Ju`0Zf8G4=%^lF7!a8;1ma=<h
z9RDumFWhS0h8tZspo`5LP}ls-sdpP;JnOA^Py7lyPs-B81&p6~<09|4<|-Zxh{K}g
zV_>DHDQy*JLW`?_d7dv~+gLMFezT3=z2YY5YMln>%QrYd+Ka+bU&2s#$^$T-FbnM@
zZ1F^9Hx})5g6y70AeSRU*8f%~r?^`v@0h`Ta#tpz0+!%8_zMIhW{NZNYI*I1T{z5d
zD#o4q%ln9e`4Gls95v3C#IHx3J^v*73$JkTLM5skkcgYUNs$|O*f}`V27-Q;;?lP_
zFnwM*)NVKnJ&y)4AUcmHO3cglN)ZOQ+03Ku#D5R7AVtcpqIF}K+t8HdV%`ei2=h+n
z1Tu!x+FJbL=?{wYU!#j!A!K>(f^QxQ<j-(9>i_O0j>=UbY2ycArgb5Hs#K?LKDppq
zz6w6PD}%+%xoMZ33gs*hA*fUk4O2V<O+N1Ev%QfI2;B!aW8-n-fhssrt3ZCv(<j|i
zm%!s?L&%7a#~^u?ES)+r9qN+LVOV}4>*$q=+qI5>=B7*>78io46AJhbH7~%)WiR?i
z-sU&$kR<-2bh*h}hT}&!Eh6g9heJiR5SKI(oMp{LE5_K69}>mzhxN<%_1}c1SI^Ps
z=_JlXL6z}N<3M4F8X3Rz9|kjCdFq|{eD<05I6n0qBr0_CjWU6JOBH~ucRogaTZvr(
zY_`4ZqWG_`61~szw$pZ)l1pYsaCt^CR`gv#XSc^9=T~K1&5dMC($2&}cK%&9`5?M#
z8bZy2J?Pc*itmn#0l#UQbaHbrbe(mFj#IIC;PW>eu*(DkPgNqmk^^^-e1nh5=aHIM
zVB9rjOck+bD!V(?86L&Jq|2h;<Nm{nCCw;<+N4$L2zIXAiNn_K#ek~u@Ig_5y3c)%
z7uBVx;jcR&nVATI!6kSm$cF6JY6s6gmTOqAOfPk4(M_z=*gHWT;tnli47n-rqu~hl
zCbV&ze#fIw_BMB9y*kmjB1LMN*<IkvBn%r73JKR4$GtBcTm$cMYWCVRedr5Fv_8W*
z%sT}>vrmgo{xgP_C*Qeklf$q_vjUTjt3la^N_4)rPo!wO6ZI`Z!0~|$eKoNP#~=L-
z4DALR?YqI+{U#iesDy(2GJe&8e8^v034xmTc;||XqQ2eL5dCKe^>`A<-L?==Th$d1
z!RcWj%;2k2`k^M?iPJpU3};4|lKf$hq1}5E>{quW5syCb3pJjCSGX+NH~-^X(sZa$
zP7QoUUK08C{^mTs`Ehaw+4Iuw2fvu*ay=D&u}0sW?=Iee>TMgKeAZLm%Wx@<(KaEu
zgA<@7e<lCcDg`^W7<($rnmKD$qff#o&UH#3KZiFai~chsf@=#3S1H{AXG4L=_^1TS
z+84mq{I8%l#F%V9U_!IJb)a&(7vy{$$}9a)p+9!F!JYrHLaKH{!J4fw^()J3XC8&j
z{O@q%suI~+-iG)0N|3<+sMxvLPMp(~!rL2J!Qq5=ctcu~TxI;8-!+anxE<Md{4xku
z+i;6y6v@^}1yB^y$y{7kM0uAV*nd06ZPw7Gm*48r-*Sd@hnzgMo!!PQGcCc+(tYSz
zVF!@`pK!tZCT#Y~Mh`X@x-wdiT>5AYBQ`11YiCTTPrn4c&A6!CG81aM{~6BX=ivd*
z<KSxalFN9nLM!HXV)3jRJYAN_m|jVodT}Zo*k=#J8RumFb_=q83uA3`z7VyzBtT>I
zKi*@EB%5_C<S;dWJ+rQ%b>1DQ@z{*HR30r^7w%wFGb*1xiac}jY0k8y&%SZYr)<LQ
zV%ds_e9HT-vVk~@t9(1EK-@kZPIu-F5X{|&vMiJ7HnJ1V{eOax>hhDrLcwrV5y~I#
z!RA?(bkfL34D>b=*C?&$Wa@N?d!-H7EY~5s?|lRR5uV^$AWe6;GcQBoawvD|<O^r!
zqkYL#KA=mRG7lw}u<j0|mPT=VOif4xbMgAW4n*~X4^UwH6`dwmGB(QqjCW-Wi*Lao
z_!I!%8*f95)iXSKbO=b8N|0oYCt$y54n$}7qq5;+^t1?v$sxtK{bvC@+vWh{b<cp4
zSsFZIJ<pA^pF`@|0$h3_AEP5pX+r*ccJ9{{M_8T5r1_t*;cguo@AHO$zTMD7nMX-n
z2$nPKq3c>C4y?Y<W|Y00y>U1<;<P5VH(ur1dSs~7twu~;qJ-^k$z1OKy&z1n;{(lZ
zK<_juwD$MGMu{5`CVT?|qcbAgd|5aY+6A=>!eDiMDe7pJp;vkruRLCpzF(<C^3sz~
z+*1o$8L8;;XF9Y;<?;hdhvAO0LcH~2BdQ<Y0XhHsKTVVP9Gfjr>FWsH=Ff1M?mJYA
zNkIGS8SwkW5V}yK3fisN&my|ah39?6O#7#h<d+T)852JsVH3{%B2V5N6cV3~F#ZtB
zt&}sLOwSxO+8!LsJBr2JxPS7*{DuY{`>GRfJU5`brpMstrBN{Kbt$_46QXM7OK`XT
zjxkAIIF@l+3xv5`OW#%T_k0!V@GBeEOI~9+%D<Re*^RE}OZdoPCs9jVf>_K~CZ4Jy
z%qx?mZml!1Ao?m~|G0n&(!bzy&288kash|sNm5tEK+KqM4_EoA(chA-=rZ{abE5Tf
zf|1_}`!jc<sQD2dS<0SI`zo-u%^6eozY`m7v!cmL_u=+XJtBD9Hu2$NIT|(?h%KTq
zK#2tL;Jo=&ZoYVh^(t2=DUqBvdR*ulJ(4+x`Hvo{<G#Co$Qdx6@4~U%fevXXFSDV!
zA^}WtGa)0RlJN7Tx1i4L<=W*ga24u*AW*}HADOF2{(fZlYzaNo5T8O}!!_Q5<)rq0
zI*NVif1p1w75<x~MD>p|U&6F#{CDdUi<u2#?dlHnks8lu;%o2_D>I%+3v_G9!k1`E
zX#Jpums!7a^mQZZzhMmowk8z{J74hPTNJih*Mnqi10+axLW|p3&e0;4OECEdt5)Wr
z)4389MEsi&>M2h$r`S@H?=oa;NC<2n!k&3T#)fWi5tS79fNj!Kt~pJS23=*&Jj*k1
zV0tHdu|4m?oJqI^%JIl(Rk}f1i3pMB)1~C-)Kfpvh<#>0)70_Ofpmxy>OiWV7YrPe
zq`sQr{Lf(Kg)C!xID;sD^&B&jY!V6kz7@fj8yRra>j|Wnu^HXDw_MIm7cS!AZg@7!
zjcF~-$n8#9GA#c(>}&iC5N!mF{$-qac{>DX4`8{S4hkkn(3)mVzAjOT>MfF`I&80+
z?midSD9wSUPx|2hI*T_P8~_<TQ(|?^ln6U+i!Co+fee){PT+8SVp&5f`iywT`o%}^
zoIxLko({rG!UG`g`T(Q%4<Y6sLtt@-E@coB0GBa%tE2`dn5ogCzf(Y8ILPPBy2xG8
zF{X!@1L}cqA$oL2!Xv#*EaG>IW%XIludE!pc9?*R$ugEvzbblJ6%LJ?9-^xtoO?F7
z0)7S>kfwpX=z4Gf4KiA=RF-8~ZC+3GQjn)v536vVdnxp>yMg0acZeOsK8xyNOx!vN
z%=WNs-MCP0yHJ99jLQa_wQ_W)(rIu=C<ocHIPf3i$~&segb}Y|@#|UU2b`CPN!QoH
zLh}%C34Z}4S4P8s%k@b6eswUk6wtBtd!X>D1ub~e%vd>JvGQCC-rdV`JEy)u-ZTs1
zJM9K{>B|!wWLbmW5F2>e%Gka~NAqqGcc6A&FO1)j1U}IV`DRxs;+s7I@BGlGF*kp~
z_wB4Vc<m{O=X`==DjM{i_g#>&R3yT|J~5h1Vr(28#;lMg%PtDhc^=E+E9+4@PlGxO
zq-oBv)BJbdnEJlZ0mDdyREdlH@U%9@+Ih}xefS;RJaV8-R+S7^s}tp78T4>%;G<ba
zyGtt!)<3(AiK?+2yTQQ4Ol9Ky(2e)<HihiAFfglS8J_DB(B7$p&M9SF;D>zi`z957
zZN4q}l3b6MzWc((S&Y^0?usP~m%&TF2k<UyI>v39ipBQ@G%@)-f3jl}cvuU0!@^!D
z>OU_M4!jdTes9b&N76W#xxC&@t_SzS=CJVWHxO`zT=8f%+Bnw@c77ZO@&8BBnYh*X
zbz%544=Sg*G@a(mnNsInJDD;kgvwNg^h0DUnL?5zNzsI43P~jm=Uux(QVB_ugd|Cc
z5Gv8Pzdyi*>%8YZd$098_ucjbPaZjrFQSvM{z4!`udV_))8h!L>u}+q%OLG_6$)8?
zi+5q?^W%N^exDq<D=j25uat56uBv1~xgOE&QlqxJYx$%f9T=53ftzc&0Dd!`#})R@
zT0W*1o!b8Jq0>8I{+Y8F+kF;4PrSr_!;VRo*v3O+=^PL)O=3CzMKD@dnp`?8MeZ!S
zjmuuMoOyT%Uzck|F5j68Jr}(2%)`GJ)hvU_oF!yBJiw+MW&8<M8#=F4Nb`l(MBu*C
z!G?8MXy7F@-Dk<%hL+r+AuWsrS_;*<i7-U93(maCg4TqeAR4F;x2?a2H_tE@dFED>
zZ0N-<zX<M3N;LZX?1i?}kJ$O+Gd8*H;GOD}VZz*cbaEdgnWL#ng0yXD>D5`dsZs=`
z=1XAS)B!Z?w}cSe_2A>hxVgdB{1sUh`t|>x`=N)}<y^>Xcc_yYZ^}_*HHfxHbwE$c
zH!SvdW*yt#;*C}!5`RXQ%wD|=O@^D1u<O~p?ebxevy$B@%=FM{z8dH1xfDZvvRU8m
zFqS4fz|7~GG&9PAW^5GT4)eK~GwnO$!!w?6^M0JG$#w{-65g&b7`<lx5HDM#Ns>=)
z<flCP03ZFCOR>EVQ#b)#>Tm^{<@&&N)GJ<ZZ#*pSRVK1JH_%|2E}8X^d7Uea@R5}v
z5y>v&zo$*cdta<cwTBtHdb7+!|24kteJ&>m^yYNCOo*+s3ERbuLjOs9xFc{J6kR90
zdwvW)FMo>{KN^tas7i^R`c2+za|6HO-c|65-_1M9enPANT3N5|B4$So0)hQxbhJ1H
z>p25@%0P-_6o}Ap^>N@FKBH`xh?*^qfS;L0bX37$>NI{ZC&~_hprdStL0)0&<`T$S
ztxAWNND=3X(~!J_iZk-%u}7)^jqghUMViR1G$%Qk6k^iDp>W$F%-WfTi!K__#+?B$
z*RdQkO6$>x{SI}d8KbD(gd2RZ2xVO2@RpSZDasG!^sa12g$b%exW|%n`OyHHQ-q|k
zVh)bq>;QsatY`6gDgVZiy}t%#!@i5xF*P`Xe`q$Cu3h7b$(kNq@T~K^!Lc0d58nf8
zi)MmdXcElau!`mEISBgq3q%n!_yuAM#<Chj1zY#zmx%7;LgvMYGuJ?!kZvf5PXXp4
zr(TgE;-td{wBavf)_Od}?3r@l6gVJm-lR?iS`K+T@5z#x;!<wn{%kZ}`wbrdi$?#5
zpXl^yucXxZG#tAB3feFKLdC5I(UdvwlYPxOdyx&LPfy}=EjC+w6oZeh#-oGG1FW#s
zhU!m!xYlY2q$;IgNZwRfJvIww-~NNaGq&?Z&bksu*HUm#{ED-Q8D9U(<|Wy3P`Q2<
zglZ1p+@qzq!6pG#{{Diy2Cv1FJ&#f7yoT{y7IV{go0AueB~<e8H}1J6Pu?1_+4E#M
znD$M;*dgy>_MCJmk2R+`Gw(o0q&k^BLyuOCPlAXAAuR7Yh)$fv@;AOsV0^*@Jry5f
z_|^(|g94iT+ZZ;9&f-sFE#mX{4Msd42R-kPLA~=aF1X{g!+P(tXnR1B@0WRm8nX`J
zj650QG_6+r=FxoI<5Pg+))|vWwU&hQIgTRPcdWnqCtr4UD0APn=1-hwL<D>H*-t)~
zh?n)l@b^&@BE{I|7y9HFo9PiIyS&6YnRDnBaF_FAJg0TUisYD{fFTX}7{m6gKTjzT
zyHx{F{AW2vPP+^K<2S*!uJ3q9r5L*=-Q#T+E#}T1W3C8;P_VUOeS_z%U>$M_<*5a2
zdLGF+I2#fLBldmUsEYL;^l{T9HG~m8SU*D%`bM$+z*864bibH&l7{jDIs%d;GX{RY
z&>^;x*>Im1P^a?gyddgP{z4lmdV}@3^t=oBU58%ad}k#R?r(*5V`K2&l1xx|tw7vT
z{ovq2Giqkr0Xu#uVFguyo5Qt9&c6!g^VFhEda?ZekjoITJ`C+-SK;^UDp)zR4;Au+
z#IF7`<UKgbxw7v-*adSicAScN$uBVG)eyS(=45obx)pNHzXz{o);m0ugo30kAWmon
z6@vkk(N`y#tEK4Nk87d-T?}f4S<#2%<cM3QJiM?pBw3y%;J+}MU&s1J!$;{bZ_|0U
zFZ+nb8;ihw_5fFO`>Q1MPamkyks^7&Zeb3a%lh8f&-QN$v~ICBnc|&={d$-1bk%1R
z^$NkxoOy-UY2#iWW0oU72oa9EVEw`jRAguBp;0pA+<#Fl`?UbhdrZc{zgAQ)fN@XK
zEBO4^O2j@&2Nm8JlMfd9G<)BF*t2vI&KoC71R;aPeQ5*8052$-#?F#flR>yf8r6na
zQ?vJ4^#A-r7lVf|F-V?_RlfucFRJ0*{C9X^Z#aIstVboxiM{+?E#9~a=$5?#T!m4r
z$C8bTPp<LrQorG>0LJOsF_`ljQOBDu7(`B=kfmNrQ~2t+ikQBs9)i7&F@_P#*gqJ+
zNbL&jJfKfpy=_3k@~@8U9$9oXi6i9-q^W%fzrErbX0W~RrHyA91EUg)6O<?wKg4T7
z)+;b5=JcW-@Pf;49M*R**N{s9S0U>InlDpO<-~RnrH|!5F(#hR^*jtN87nazAx)Yu
zF?QooHx%6Ua}YJ(<eoAAhWj*A9P*XzQLj|+g?ZZ4!gw7>U8%qYi<oEhV+EJ7*%2Gc
zgj6~GB{&t^inE`3z?=^(J5a`0jQh`VP9;;tVXYQaTAFcZ7gKT2EPYy(T!jbihR`Vo
z?n7<%Wn3H6hk_jrk{M|x^l9!3*f81--|p6@>0vBCHDUwbGUPcHX^E)IY=V{jS(yI$
z49va%9MjiS!=daNFgLR#{zcZj;fgC5DR=?Gi|RD_S3de}5EA7zpK*f`;}>m}!OF_T
zTxCHFcnsDeJ;C*y<0o0VV(ewy<<SlPSH7U?FN$?5r0M8BQ!-4q1kANGnR~ec9uG^w
zX=e^V2jg>kT>OgT)>x2|<Hb;aY(C6<J`IHFF5J5>4^Z>B4zaXjzF41&d5JC><kD9&
zQuILy>bu$8C?O7}4X?lvclW|VS_@02UV+B@N6<0Cgz9%NmnOS6{VGa_4L(&6*Lw|S
z+n<Kz6+tjlubfxsGO>!WNxCXToM@5<zhF6I0RD>QEAs=mV#jqL%JjtgiS1mQ-z`4Y
zbufKoasvH!KSK{TKVCPFv3mPrC4+7#6NM~UVrgJb5+c4py0;jwpJ2a_>iXj3t1H3t
zq#;cYUWHC2?&2n^R^CXcM}&)0_)T}0!`G$CB<`as9bd5zEsBRj|DNMmQ8<`-ww8hD
z*-^1mha6YwH3Q;)8qxOZT&y0ZL^J0o5yh<nT)~(TD&=)}a=kL~F3iK-?jcy--G#&S
z?}E+J380^GoH1$`|8PY+II#YJNtG6<zoUfWvb#_dZb58oIZiiThqx8ip`Ra%r6*qD
z?7hzNU;f0w=9zDxgq<y%nNUaZX&1g2Y($1=oIsuB0oZkLI>dD?K*zgzklv|F-;~?{
z>8`c7G1i(K{9s6YxF*#2ZjMFbn^-&N9M&$YWBhUhNmI=)uKq<U=l|t0$Gw&(iT^RT
zX9VjaW^RF#`XY8-TEy9^v2&ew79TvWLA;su>6|V~Bmy_HyfH`8F=_V|G`ej;ye@v?
zY?CVZ-+Nddy+D>uzut+}oot7{%Lg|owSbZ4Ad-{U#j;HldUi!}IdeXMm2D?f&(gxu
zALab-;z6`tumIQWFrf2&S&xBbr}i)I2F>@Cxcc5T7~`TqMCz}3w<)v0c;G9Bluu{;
zmu^fRYR?Nl$a5>csnJo%hq3AKU;f;!RP+c*L!GFVxB*YWo}n5fvrL=r%4NF~K8$Oc
zvX!}tuX6<zR%mOH#_f5dO<xxokf|kiF)N}06U6y2?9M+7t~?_7dQ^|d6_}IzpSoZY
zbF95q`irv;6ruSreR5y_73?US3o$BVV6L?t`d1Zl7ROxB_Kzn!$CRSy4=q|^+X5SY
ze+4VCJazmR1ESTc5UXuQY-Jd4A~8i0JbASEvzG`e;uhk$1yAtX*ap1Md?}la&%s<B
zYe;ov`<lU~G-LZ$+~CdLlinV1<CZy1Kk^;=HuS@SX+`*xvms@MDs<yfMPj?@FW<&^
z;&QiB@yCENZCuOF&xvf_J8Fkm@J(I(gt?1t*}H9N;ZqcN4CkGmZItY-PJtq?Q@qvm
zBM_pr3}n>9uv~*NY`)J%gDd)^X!2}Wte%TpmmVouwG9i5ywG@}2|W{014{;^=$qgs
zC_c3b?mU(zajMQR`hbwg#%zaDa+4pFehYn7c0gl*D>yB8hQxOo#E5Y>d<P!{L7ju7
z<orvRGOY$IWDWtC+F{#)Snh?s0hzYa6~1>i;H;1&G|vAGE`>>${LY9g-F}pt$@11I
zmNj5EdIxsh_TpWG61g)g^FaAvDYokE!WGYwaBGJ&y;gJ?)#D2AZ=4S4%FzPz!GlTB
zgjR84!6EeWdBq6?ckh~sSeAEYCO4x-m23@HB^wzts{U^<-~Ct}VketWrCD<1PYv5e
zo|wx`3ttW2<)T3lGRwhh;!`fDQ<eVe)}{9&uS4Vf8Nk+TV87FlrtqG);cW-J^iw6J
zkuo4~A1jGsu87nSKL~C+16P*c!BdN@$%q4+Fjz}h5?MSQo0g_=acvj47JYBDX)ojC
zl_{h;o8ieN$|PV&3T|M`zz5#ZIGOQ~zu#s4^(*Y2QTY{=aW3v_77!mhEm|qEL?6Kb
zY;shmnZXITKR*t7!me}mQbpY9i~3|kL@Pv}JO{PwwdpltMp~C1hRuJaXy9;TT0inH
zCm1&wr>7_pi>nbBnPx?c{~H6d_AoB4yaHXLc>&jIyg<h*6<}wS46FOh>Aln{v_D~q
zwoBLY&qkd@>4zO~x9S;&-YLho)1}EK+el2Cx*Njmw}TP;4R^fv7BVJnW{e*1{6+;O
zSnc=;Hv4~tNVhCVpIidP4^P1Upt}$l_JIrCCqsqm+Wg!b#?&qKCYbC$f|b<?SkE?*
zoA(aIu3aXW&F%`nUKW7Mk$>Rt%DNZ>)x3CIE!JN%0sdSo*1UU-KD*6O6qe1W|6%T5
zJSi!+{RZorEXgmq9#GE80Gse}_;#)d(b}>P3>6vMmAi@=p&eYI8{_t7%2SgB4U#_O
zAFzuwTJ{g2wYp|h%eDnik1-=oEB(Z)dat0}uuNFK#26h%$<ujPuHe6WFQ99OER8yx
z$Q$gdLP5*%{PLB0)c)T&#-RDg|2HoWrQO-Rq{1F&4}Q$p^V_+o*BQLx&kJ1fNE7fs
z8wS6}6r!iGB6YG2gSa$Ju)os9Cr`Q|`Flo4HhAPPj-))5RqjFc3D&Gv9E+a0%rkrT
zs^rUmy%4wj75D9j8p$}*$c<$zpU8cWLCe#a3P<ec+~O3mz+Hy*Nkh5im(t)zoigJn
zo|cSwZcg1_7V-g&io~<+4`c|9=w*#LkoCS4dWur9ar$#imYL6KWPN~=laFzsO+Dk3
z%;#cmx1+DY2B`n<6Biy@3X0|`^sX_>u>FYP+x~6@!9VZ(^P+YriefvqlbhkjO$xqu
zLm;m>iJLs39)Z0#REZ7MTXKSnKXw5>Yj$Gr1Swus(U^KE7mBxa+<|`v{~#!!7jrag
z;k)%x)VS_~Z&v(;sMkKs`zXi{bWY&9Jfyjw_ZS!LPc3Tu4koU1w{RoYso>e%OjO^l
zOXfuBlfnB`N!8<Eh<Mb6&t@r*@pC4@v!TtXw@H(luQ#Uc^LkJ;^oV4^I8&m=H8LK}
zbFTPa6^=J{g+E3a*js-JR}1!IoWW2OKH9=vz9da|`8-BR)dL9r!m{d9**oTy36V7(
zgqB(gG{|>1{vM=2oyG_xj(c808avm=r{2Qkw}a5taVMXwk|j=_Yeu&|)*{OktV#Ka
z8gRSbfTPyv&?4h?T-TJlnD$hFT@4j{!MtWre)k&^wY8Wx>i|4mQ3r3Xmch*W!I*YA
z7@AHo@5VxFYF{`U1X;4YFS8O<PCO4)?Z=^Q!yNciU`?JJP^Tv~L$IkCU`&H586h&p
zjMy$rJ-QHHo-(ETB!)y1a~5KrJiwfyzmQ2ZNeJWhci}e9H~t7`ApH_sER4`%nKX26
zWqkd~n;>U#2b|r(o=HE(8~W{m6?^M(zp{|JFUjLZypM!A>*Z*wGqXe#tj0Hb?3rO)
z%Rj@UNa-e1s?vM`ayJ{(Wc6*F;INomzG?u2|21;XPOKYK`V!lMa(F>ixOhkUVRld4
z4;!Bwk-B^%;#_+h4$qXP!EFCv?|O+l>tjVub+Q@6ISU$Z!dSh=TQNHPDSY{$ORc1?
z!OPcDB)NG&vXSL;6=MeYcmoAeI^_cIB-JZPvs#R*B4e^>izShjbAaR1bjZ>G8B*lN
zxG0}Opz=}&S5#WXEv;51&fc!*<*^Tx=IM~fQ+ZUhh~>Ag`GpxjE$N1+7KrL`!gBvI
zNSJsXvfHJZdu_m><@_X+{C60l2G+q!^9S(bx-pIX8P8Z1nxx;4F?0V`ajA@%FWRlc
zr;p8r;uBuj6!VH#s=SU_v%Z5lDw6R0aIEuuhDU}DB437FWL~5Qs9!z_1fgeO+7cW3
z;k+T~$d@AcoMkxM0`S{=71Cn=41Z^6k|jS9;c-5D?t{KS<;EghB7O)x_jCBrpepcv
z)P}zj%W&=o0ZyI%5<}K6$B@QAI7S{}K`6`9Ue~5G!;ENpOCjvF90NJlML0ZMNSaq!
zPz@-6;D5Wo(Ipxb<*uNYW*+9<Il?K<ILF)isnN|_TA}lOE~YDnW9rZf{`Qum_%`q|
zuD?R@@p)5X8>0;KwzlCL%X_dFXQ4o+$|1Gm0w1WJ$IBZ$hvO5}=`EHGT;M1mdNhIi
z*e6T!-xi>Qg(9(WwSr%@so=XMlsjKP0C!rNaqT(}sGXos6_Ye*H~V+lZqVkqX-7~m
zB^-v2RH605&v0J)pSgzaY>2vbo1eIoxu+UxF;5f=raPpGEcX%J<}-Jrp9MV}X-&GK
z6rgZ>1$ueba*I~5yVa3PxOXetd3-U1A}5LX{?5nP)0)n;hcn;y1eU`e)hO;Ox&kkX
zROyZrC)x9<LJp7ZLoe?_TsiA8>#JSL7d)=yo2J`BrEnVmX{t0y93O-Ikp);1X-L~f
z48ba=&mfg%NUUsYaCyK8%u*`@uR34;nvoLqIZ%N0>|91qvS+t9j$51_jxL-g{kG;l
z+G-LwKE#Yvm8i2h>m|mnwdK5q>(Zh*qs8u<VmOO|;pi2@?zGcPXrrMg+CAyUUY80?
zb3FoWUJ+1u=n{JBHbeA;GF*~(0M*$%CVAC1zLdYowa&N)u5TyvqZpG_v|=laAJT#T
zj#`{x?peNyKZBwZEn=sDWt<|*ZI5)IICG?s_CL+UJ}=g1+SDTHb+jf<$Ik$sa%P@3
zeZHta8v`{PIIB-SxR7Nb!u2{~+1h{D)3*Vf5B7ke>QTsc*$Fji5ol_nL!0h5iF<Z@
z=0-4XOI17%|IZt#n<|ja)(n7tY%SC(Z^l#BA|mSA&yUc2iKS6LxhHSg{(jXM?)#z^
zyz8YvoRq%sTNuBx?c4+?&tkKqobyl|D^1h%L^O6lowQ_J0d2!yX#061e>Xsbv~DYc
zFwJy!cT2!wr>{fXz3GtmnPn0tY7>cw<scpykyN(<EV}3+Zh2*c{@D|_uSetX^j357
z%&Q3B<aa`HX#nHn^l=6kO0eCs4lk!#(gx;b`QacWQSp}0D}098N|w|xQiF!g91U|H
zPK8S528h)94a&x?pw7HsPSJ<NDblr2Vss7Xg;+7~rZ>ykxN=3tyD*8~z$)=+2rf9~
zP`t+)yR268<x>CP#ASKXKUAMF67<Q$=b2bEZZ?<BMsKy34nf1iM<DDO&IwfnoMKK0
z@9#B-YrH99KC2_VN{9&eb%{s}n{%zQmm=H08W6()JGh)Y9-Q{hfj}D(noah^$T`QL
zSML~_`pD32_pRuHH)*)*u_}GAQkx2U$8r~3Qc>lx8T)O1#wj``bHZb1z&@IFx>gQ|
z1<KLl=B%k$WcEq&?xqI4CBv8;6}^%VQ3YsZoQ%o!$0b?~+Zb18D;CvGg{$8uVM#$Q
zZXSFO7t`B#++B(k$hR>^`y|MlC<{l~eWz`l64w8y<%6@Q^Y=BUV8|l}oS5^G_4q#U
zR&C2L@K`@CQj(=(s~S+8w*>{yx%^lSYf|xj8Hfhu;L6xk82?v{t7SE)!A>g@c=I1$
zrf<PG6>@YEd#@!+kHi`0m>23pDkxug$1)Qqp!#tHb8pph8H&Go!8L0bGAs-(=b6Fa
z=x->}jTWD-VXhwKDZH27I8e)EyU$tjL~waA<Sn|)iSC~f_YHQ(*}Lz+@<sAsJC5Z6
zYr4f^%~F{1yc2SY!XWtM*Zh%bbr}BSIUZZ}8t;Y6lPGZ&@9{;K%p4kp{vKwqIN}bf
zu>7-A#T@a5gI&=2><oD6cHm_>S3JgC4`#<dpj*pUxT^jJM#z_;#V{pYeMF8L-~9_d
zhnb`9L>Q_+i^q3t=Xk1#J@d0X(A8x#7uwtdQ=EQ+Q{@Xu>&x?CYp%za4J~2pYXN<%
zrbuq4nNj!UcQ~h$wtSIWvt$*U#}!#z1FzE`II|SiBWIb#?MolxjiX7Bw^e}(KU;GF
z%za=V^n#oD;|`a!_%l|ovV(bU=HyP-Tg+<w1va(L@clLG^=9UC)BJy+jgB__?pLKn
z<~#75^I$S%%u75|Cr`C>e8FkvVzF?NG;^44<eTSD!-9bqkoPc)k2rP?rGgac*dx`r
zKk`1z&Cr1KS#weL(^ounu@VM6EQ#R25`MpX9AjnXam_LxFp;r&Jx8@;q*O9|Wv**M
zLNE_{R+xOy1iqZVh^3Qv<K|O;!PMB8P7+CzoYx_&^CA$7p4M|2-ZD@=PzTQyHOX<-
zkvOHv&e1`2VkL*~=v2f0-`Z1n{|D^6Wy*SW-v#J1{t0~AU`&FN9-^Xc8E+CHr0d$$
zX|VAV@!uV6PTpt0N9br{soW9HYx*-Bd$kMxO6Q<U#5DZk#pZhxd-?P8UPIoqjS#8Y
zgEKeEfsNuZnEb<n$e(%*O#+EH?5zw=6btFUoL{gdP=h{Qe-~XIUVy`KEStY$TApy`
z9)8=aVEo27$0Kj4ksU5>Fzmr=)){%j%?^qM8_obFUmKunNk9Me-AA~x*^C&L44_%T
zeVBdv3wl_}lTnZRQFf0G$e-MTA%h(NS_aeLmP7d>$DfjY(yWiuUj~llS<r2%Mw{Bd
z;LF5osJ!PC#`(>IRGZZ>V`2`r&9|hh+p;0B{WAA=4x0;I&4q2brJ(UA2v@I9h0=f=
zKF0kUrY~eno4xiDf!rZj+j$pn?E8nCyn~=_<sj0(HWsQcZpZau%&~Z&9$cr*1A|Z*
zVp>~{Uws7BtD;?8eZ>=wOpC+F8g^DeQ%G152eGQw#6GW<aqQo7UO%UDm#?^^Ouio3
z#fP)Iq7qFwWKElPe3Q6;R>q{+wy;v`1FUw*g_8CL&|9PmgB6TO{UROkTBgM9V4XqF
zQwU8)>$xHID{#!4SiCg<0z(k1&`Og*RA{-D+vjCLjF<d`S1Xun@2wKGBUZG)HW-Xj
z&Y?f+dOj_^3;#OU-M*ORs}!fg#M^_&Tf<MdP^JS{i(g>X^;IZv9L4ggtUE92;1rXB
z`5OxPSid_Kq+5HSX0JNqKJ=lD(_=o_$AN$T<s7Q!KSh~vW#XjbDZc;Nj8>Sf#`a$s
z=r1kH3Hru31Z)2m*E4U1)1_r-w&xude4mM%be2PK?^E%hyh8M{ILg_pB=G{{5#qT!
z1@zFa<DhZ665fQofSl+;Xqgg#m$%!Yr^pVkZex71;q3nN;TbM|Zb*Z3>+`b~b%6Y;
zPDse;fDOK5u;lX}xO|&+6h<bnvxX9B+t|xRbLsf<KXWqW({2=>T7mmo`_XY?4~%#^
z8lU`7BEbd8ob<1=5Xbn&nRC?X4y{DIboDtlo$?3H@FW`kG=eE<b&xf*3A~1X<l9#~
zg*DDXlD%jXyeNKzqVq#J|Dv629$zDFnQuZBSN+59vCm+(;ZvBY@eTc^OA#mK<>Hja
zbKw7JCl}E%0TgHHV=vVrXQ!P<&FV57(2yf4hH{V)5(&om41~gSpz!=Su6UtE4D^ji
zevBIF$+*k~FRbM1uYF*<0WTD{rZCR?P0nQj<HLy<Yln3|jeSJ4A-Em##y{uZW=fML
zLmfCXtQWe3B3=XzD6iTMHhvS){%JZ)yqb&MKBoA~G#V-<lwwIvEIfQ;L=tAl;|RfW
z82g_GhAmFxGj8<r{ppJI_IWc>cjqB)8yy3F4`s=RUxs9O0Nc||I*2Z-Sf?^en(qu|
zcP+gw&{l8&(iWV>w(BZ5qd|%^OzL6#8sP2seZ%g;J(#ER0*m}p#JsBt&2jGl>E<SU
z9A-*OUdH27<`3)2UCQrWWQupYRfs{bh#X@MlKBG??AzV|`&Tx>`A>%-SUHX#BHaTG
zjWw{>=_Ag(`-&SLs7cyX2Do`ru{gQYfQr6^arWC8Cn8)O1&W00*|!KLPE;Y!4;@6&
zz$rL#Ntv)m5LT$DlY6U#)I@18X-W`+Z;}g4^y<ebyDqHEn#3>3T!J$f-shwKljfu3
zs=4f+ciH(b5gbqK!-eHGm^SPhBwu@h85@fEA>tm^PiDU1O|sO97|`|)7F3WMATbiR
zU{81%cfr=0=;<8cx;DwvPt9gzdHEsC8^^eywSD5?8K-!;_i^YxjLn<Hf5kaTIrt-C
zFfpFk0Ha>Nhonh2p#I5dFmBI;f7#j4ZLu6Soz;Y#ao?b8XfW>j?=kF5=|ffdJT%z&
z8=H*+P?cpJ!W6o>8+0>!7wIxD+*kgSAQpp#yCnY<QqXp8E?@8UnoB-c&r3%=fu4nP
zp?{$XB*m=*!Sq9TLMIph6;y+kN*aW$JP5)eU-=A|DSYzZMsbSUKX7~c1J#6I;e5wK
zNE7#9?-nI8V%C2kE_A}WZ(q=`suyhyEnvo?3>1ww=Z-MO@y>i>;$51Hn*vtB6nG0I
zm%fAlz9`OpP6sYd%)_FPAL5VqPvHh1OZ;B(4(!be_(>7g#Ol=t_;sG`YdGd>khF6*
z+21vW%}}HRYa!x}B6Tahh?PrTaMF>rtSda7ImDXL^Fc5+u2n~s23v?swni_;fE3QT
z24eS5;F8!3;w*ET;#dHsPeO5~%t&w)gkgr-7;edaS*o+df|l!NLYjv%z1*fu^A5D}
zHzsXC8RkJbGA$IJ1!<Gu3p==T*W<D1{BCjO_E28*#~DX@Xc6(?Xo!5B2bv1zbc0?Q
zY+B`tOFl6EM{zZ-jT?f(6>2!-P6MvC`3wGS-*}%yKjy}Hh~x7;!Rg&H&ek@axr8Qi
zD<8C?=g4>{={^rDt1dwOzg%v1UpaiJ7m}?$zc7DXBidb$0LAbY#tCSFP4ZIcWT7G1
zZz)EdU7?WWSb}QHJ~F<AGwAWf+)gQ3GVhr>DN1eUxd*IkTla&r-FB1nG6;m#ex2an
zd7F<9F(FQWRcKeGK3|}m4sJu7L9$<qKCdl6zn|qOly?O62*#mYJ|4s3j&fa5-TZ3b
zZTL&+AoB?8(#xwCfYZ<$;^wI@V0hbIe8l!PHhxFB{@Y72f8Z<TZ34KuvIX347UIld
zX0XUbnKTXRmh{zSL#44Y%X(kOe%IIN>7K%N=rVLonHe=?og4_2!ZO~PBqZq3%TLa+
zd8dwj;Icg4%{YL0)|CM}jd9?n0<qE9k0(cHlKOqcyw~(1$-eZ@Sh|6E2xNYNb1UN`
z{t1Q_*8lo`A{cU4o09_Rf8cza{icMB1H$fUweTA?Z%fl8*>He$*U&Fqp3Y9c3)ZWz
z<D%Vcm#x^x+y8a|?;t&L^<EL`G?rnJTqd?ey26Z0EqHW&2A+PP!I+^%;QzrI+;4}o
zj)xp+OE!VxTnqYw?SDnbjrpQ;Tg0n=GIx-R8qu7ajb^t7(IcE89n1fKQH~cO|EL;q
zY(@+#9gq8-tCOY|viz*M?DK0hBF|@u=na#DAgfk|U$jcGMopX4d^RPwlniLc&uC1h
zjS@xwM|`DW4Bz1;Af8E`kffmomm?goY2F#ht`SwR!_5ZjAEm<V$XRI2|HigAXShgH
z))oDg0_|muaAK1#$(Xkp1DqT2Y2<hIS!$CItpuFu-O4G_aPYc*3j9r)aP63nXg@gy
zN=H%NBEg!5oy$ev?JaCSd<*`uy<FguNN$AoLM-e&jOyiV&vUDjcg(+oS&B9kJnrL%
z^rKi5ISQpC8e#R?cd$gi2n^+xqhNh8=X7U{*f)szOdF)=zYVqUWrm2Bk7RDONhaL5
z11y8GdlPK`coZ}JB<TC>65MCJs8*f_yvDLM(Iq&H?KT8=pP;l^Hx^G^0y`8_q0=-Q
zOBYJ;uY){0BW%PR=>cdX@!alLQq<jybw0AMgVw26SQ^9565~|CTcZ?zpVy<p>J1?L
z@tgCq-_KXhSqZ}aV(w&Q1X?q$%9o6rkYZ;}qb(@J7BMbESS;UBrb8S2KZ1IbC5=3P
z491##K`-W$?Q*T>#+>X%pNhwflYSN}dx!C!dy3)hI1@UE7!m1-b&&Ks0<FGWhp8Ed
zaLTO~SfbDektY~KWY|sUx>m^7?7WZeo{w?t&jb+Gn9!*DRNm>&Yi`K34jgjj5roVm
z(4;UM+Tss#Pw#vOmt|j2_<1UqXJmpqb_y`j^d!P1V{B}@j+;9fV>sE6Y72hj?5rFR
zd^VGKZ_30*+a=KLGZESzL_+kl!{B@MEHov_(S6K0?0>nByLmyLLc&c5*=qvy@(<i1
zP$%PTrHR_$Y*1wRTKjoXoM2K5=VZHBlAW{?j8gT9Q1vBu>60#9u|PoN=iLFr0~c_o
znkDnDBtpZL#}Jf$278%X+)H{8Z?9xVzdKw5p|>4`sqMj>1=pZw%S^Z`G^92OI&{3u
zDG(k%!>v{T^cm2iHg~%)E?kfCgvRj68z1uJ;)yuDRf#mc^kfVw1K5_63BFU*@vcTb
zI{CyxRNO(XZFw43lRSt_ewBwIOD2Q-?2n-T&zhL~Xp-qajcH2kZ-{%d6RX1#pnb0=
znig4+<jQAar*G55f!>3`sd*?rxR&uRh9-jjp}C;<>KE_iG)U|;zEAwbrXAbL<GAMZ
zT<}?$1-ql=NU7Na?)?%4qGTvb=~-D491@>D_rw~o^v%a9r*rX>Ums>a(WdF&-a+>F
zR4`kWg~J$Az;ChvdoMMEqE-;-^e#hsNs+7)Ymk1EXeh_u;B%)O8pH-P)kzxt!*V&H
z>uCIyD*?T*7=BEB96R%`hK#~=5JYx~N376=&YS9_$1t5Yl~_<``~$r<xwyok9Tzcf
zp~Hw8Om^^th6_SkxoH(NZ(&{VyIyRUzn<|^6U1Hqaone#J}f#kQBoOTNDCU;(MA0=
zo(X>k_2VK?@6c%w%zGix?J^=cBeP)Fo$ct=(#203`w<PpeDKL~9a3E;N8^8+l5vg(
zq*Qhc%pRA5#U(><rM&`mT`iz(O?NP*trvsk&TuZBpP)XZ61&f@$C#Q6kYPK8|9)yB
zs%%xn6s22mN_r6Si8iK3pU9BtPUf;4UkO&qF?eVM%S9Ai#3q&>T(7N1&-`hI?c><@
z%drqL>qKNlHpL--%kiVzH+*F&ON!#kB)=9pVDJcI$)mLv<lXG6Sly?NuRLUERQzH1
zd{!4fF)xRr3-jqLJBeL?PQq&YL@4~j`pa9+!D-h)B(L@iD2{!`HEHRBAn1C&l(!CP
zqh(y1Y$xx@<{X}P7~5#14&?1s2b*(M{MJ{6EOUAsgEm~irZ>La)0}HCguY~%hVhU%
zT#gK4JA&@}2H52p4wX4$`M`CB+*t2Y5bl?PN(~V&bYh*L>)W9#U&Ix!^}@>|M??1#
zduaEog6tY4T2NmL@;+8n<abumXc7l8-tW*fQ-kaZr~}`ikNkjh6uP}B!M28%+)Te@
z-Yds~U!FT0ippeQ(99#a?b=mzvectqTJ4h5wrzN3Z6Y{F6hWPi1g8|0!jAC@pw?qV
zWj|?Q-G5IIlUmsx=Mpb)7z#~g0$y<Thr=N+9qMtI&AKXo@ibb644Pv=F8xYGZx1Ch
zcink3yd{G@(y82yn+mk;=@5*&$QTHI-k>u50-P6D;qiyk#EX9{j=%Z=i#n7!`K1>a
z&pIEUEYc=X7yP)?zHc})@iA9pJ&2misFC`GH@Q6{Ix+b;`}3Y<5UySUE$_VGNofvN
zf39a||67umo|Wj}UyimOi#ew$2PA=sSGkoAl~C}l8f><o;kpbAV9OzGGHvg5@c(m(
z3qBv>uzLAzIPhARWE+P-wXqR%?V3s6xG@&a6m=Tp{1={j>_jQ6!#KW-gCcKv&Z)zX
z>q!a)oByi#%I10Cd;BZEI4%kGAIZ>^r@1h8syUUq5rx}kj6o~tMY(2n4=hZ?5&r^l
zil!=EUe(E1m;Kx$e<>0bs!2B|zr*oKqo7X9nkY_A<7*RFqQz1-h+kHTx0&lCbY3;;
zDA&NtV_KxsOoqg)n1~Nv8&G-i8_=t{#P8po555YuxMNv5wy-WlQ-1*8Wfsh52kb;&
zxi8!tK{&=P7ZTV#7kmpY@VkQ4n7{QW9ygVtp{7;9D;6St@Q1j2TcPQ*4sVP3+-J(%
z7=E|FHhDMx^8Ai}s${5-K#YCuR`_C=8ELw<2(G!5U}cyAwqi5uqwUBK(Nd*W36CLf
z+ixzTn03gcl*I=(X^^rv1|;`~HTiW*mHG#+<O7wC@Cs}4aYKVHz38S+hW=t5AipB)
z5uE0<di*eX@*aL|(@JcL-vU;i0r(~67lecgX_)3~)QR1K0R=`Rbi*@nu4x2;M+0{{
z*@UPVOap(N2LAJt$q;`@hur!uBwjrf_g$t~>G2)0I~-CwBl-SWVpMDX1q~VhP$g1^
zzRJ)hvz2>6m$7lC1pNb*y9+^clNB-EoD2IWr9;usLeBNEE}ALM!;I?Vcz#nObnUO?
zyuT`v@kc{IX|Xx^$vEq4=Q&`q@epj={fHM?$nnkpF&6)EFNkv2;_KNSzB_0Lb09v;
zxBaya#so=|)dAzN<V6H#r@6qe)j#2<uOca1W<e@~q_Jdc3yj}A4nOoLQ^A7m`CY2l
z_@&=0=pGFr*|EwDL{$fHSbP`?8ssEJb+5(0g=WNl_;-ANNt4K+G<mJ4O1m5$aE%8U
zFFofBm+&kHQjdIMo_cNUG79H6hmXbkYk3F`InQnSrivwhYv6uoCA`~p9oH)8&|`s?
zWW25>v~GC<PZMNl{TgiyHY{*hsQ4Gg21MZO(BIIxrv!UqDtSW(R|p<o!#|prj?pe@
zsN~&_ipg4-74`$t5_9o*btu|yPs2xTJW3hrQk!9iz{yvWn{uEIL@j~5z@`}sG9RF`
z%pm$ELzjj>OoIs>=B(?S$RE<Gz``{r@XkLWiQ8pOD~FHb1fyNyO?Vbe%iaq6Cn(V<
zrLlai|50#%;R|+pHJ~VUo6D4AuF66S!uQ_8Fn=LTV~jD$sEe?ju#WW1W;kYNLzaD0
zCpO!)8Jqf6e$$`35}U6HD2}XQyO*P=&A0^a>@&YHga`1sg;7T&oN>PfO?~(iM{BE*
zLgrj9-g6N0j>K>av){w6@C#t`DVpa~Zla!3C8sg*HF|szk+c;DQGJ>b8C&=P)K4A3
zka=#P_uw)wxc?|Wr6mkb&N+=&^rT2^x)$5J>#;nO8J8R`<iZqha!vPEb52KP=>07n
zSRQf}<m>Elfsp~Zw#=NoJTZtg`Fs&asy=`O;U9Q3fpHyFa?vT_5ES~b&Rf8A9C30q
z3}$|CsRKDUQ=)`3Ki%hVoQYt}d<~kySTdE%Sk~3=sJJ2f8|I9D0ejBfL}BM3Pz!Ft
z<he<l&C-)x@SDzj>DqhH^05-*MA8`CZ~+ooUg^nE9ja$>i670Yl92W1@I^``j`?It
zW4^Ks$cA1%^egkVyQ$#L#Ay6!a2NMa_r;hmflzH#hV@yCP_XSW-!xJ{S48mmT2YHE
zl9M6HYmy}vu0im$rUFWGr=#s&HM;!IRdAe^fO})Q*lZ~UZEnPIo4?G)h)v5OYN|6I
z{?Ckt$Nz$+o7H?#YL(b~o-zrY`4Gyx7#D1tjAYDJLn5A#4O4q=<FAsD7@6GzTjxE(
zwcoU$GOroqRNwRe-4PO-<(3fo;|$zm4pgs!RLK%&c{<ARIPS9=ME37)VOeB15IkDv
zuyS8FOfT)haXCXs+|(h|%T<f7s=E(YVx>u?^(YWj$#e2wYa#5?a`3W04^CktC56*1
z$*2x56bL5e)R>l`okAkCO$o=Of5&mDE8~}My^H@HHKvtos<|g0%dmfi2u9s;!InT{
zdT*Z*@jWyK^v1{W9zCYyCgb>bg}&r6qCFs#o%3zmeR-osDY9!~Ed&e~(#`7{!AA7}
zIM-i5pNU<NtuX`cE;)-;+H=qh+n{mjNmzK`Em#Crq0%-*vUb`GoN(+6?n^#`dlFU2
zjf+=d&;jP(*qjOL)eWeZP>*vdX76_4g8cih#-Ov&Q!t5^CVRFTlC2k|sl-^03Iy}T
zS!3#<jxpIBKMPomtQSgO-GKB@`4AB!L!&?b22qa|y2e-VmHRhx{$B~7c629f)5rrZ
zLPW?a5qY*G73mM=l<5G7h`WF~(_?{LHYb(Q#k_C$Xy~Qw81(ZFG)Ol?{vRpQwAGDE
zniGtEzvW15Uk28&UXk9@FIZsLfyPR|A+pVd?ahyIQ$O(NyL&Gb$u8rZV{1`whZ@K!
z$dXgu%quwP9d4_tftJw^Sm&8_Cu5tqqW2fXMJIg4P1WD{GchkveC`wa9~y$2?M&$m
z+aa_kqJ*<M_ZALRe?$4y8rCt);o6V=gs6l#RDLI-GSP~pwJjbS<E`n~-1|VPWJyZg
zN-WU{15wxuZfDLRw8>ls>t0;Pl}33mHmDc!l;?r)*93^_{mMDLe}g~uK4Z&smQ^v-
zqf($js-;v}UivoQu|$Pzy<dak8x|DTw&E3buHURwjgjlv^SL7nLwcq`V@Dt)PShuT
zVb##TJPC?}2h(c72#C~Cq(|Bqli9TxZ<)(b+fnZL+geBhejAWrpR*2$Z!O6>N>OmJ
zn*Vi3jh5UjXFfB=Rxe24D$Z?ymPcW*d`AH`jQEDhr`~e54THJ1gP*w8>*`cgki&Pq
zN#K%(Zp002rdQedm1`)IrfwoTa0)Kxk1~d(`7~?d-w?`4J4d3;{=fLPh5bIsNkPV#
za#WOe#)8DlSlQx=;U4E;ub99%B~{|}U7hn&Y(s(H2gx$Ve0i{{4=+fW(BM_3yk0>f
z>I^-Ge|8%Y53}nisPDFSy5j^Qw*WqRLoP=7{o!{FV|;W;3GRzfC+Puuus-D>U+o--
zRypQ)q{x~~T&znVa|p_6DMFNK6#NQj%n^AT+N~7|TEU%o!d9Bbh4!LI+nK+eABy2i
z*gm4{H!Pg~1zx>2B{!~W)2Tbz{UIR*H4H*9V}TZa4`5vuoh5vG(g*mqL5IqEJ;Ifp
zh+Q99SFcNtI{)|zDi%KQq0@@&eXomM8bN%Loeb>r*CwSYk(hLFBgmV613hKNYu<kZ
zo(hMveySO9>r?>2)Dw~??E>;Ns|$+W4v0UVVot)ObSMexz&ZWW#AkXr<2=>iNtQ{-
zTcZj_wX*cqH4$mb{Kyq;cjOxK!+^bLaCUqwNM(pfV8mZ;X7DcfRKw;vyA7e==mQG#
z%DE4Fo}kada0t^6gayM{|84Xt7~k%VBDG3%{uT=b*Xm%%9RUr#km3;cz!Zv3K9!t!
zC{35%VR<UIU0nCP9&Achq|&4TWCLn2!ut-|Z#~LCWM0?SU0JZ|>peW?ra-b7%k-YV
z616?KoD+J#<$ChV`MInY?qw}S3p{?}%8$43<e;Y*T<F4$To#T~Vp*2{tB}^$dxK%8
zJUo3aMTNOh+*_3ksLe;C^<E(<+N8*Pt<Of&<tn5nc^UZXZp4h|6X8Xvh$1`p_pEB;
zm)}&vZjb$7GDAoL1*4&UU>!WkFruyFUa>o+Hpyzr1!L8f%nhEw)oWyM&+lmxr`|1m
zi=024IqHXU-w&eA?xURT@;ojqQ6D4ne8D1jK056f18P%jF>2ir-tN39wLAD7$}47~
zmcm*T7>wa;E$?zis&r|L|8YDh(IRnnKX~u(OmuSVk=S>C=EG(kfN(n{`uNcUbj{z*
z>%2J(V+xs@_1=6;GMb9R8XiK_g*l+I?I`*$90hlWXc3DJPdG8ogzR-xN2jU-5<?f3
z{T($G>)#~vu`Q;=o^@lQ1D-%e{5gn^lqZTFJK)h24JzOI4a(IG>15We49v~ql%gZ?
zNmnYC9*gBHWXf>e?{J)(tb*STFxJj`b;hK83g5zYNUp6B{qjac4@t3HTjdnE)@VY)
z$GwC1a!*l9$sODlUB>xA`m}s^D`aSY<ct23p<@p+uHcsde4?*JcCAT+1GhElR`o;p
zc&!GFGk(Iy-5G*+im&2W73N)28-crz_u`X>hGd2~0~dFwQ}0o_<YkXF$um)a%ClOW
ze>v;d8Fs^Lmr%I%k}=+1S8+ckon!7I3*uTY%f|&uQw!6BFd}6W+^vydqn8qucJjbs
zQ=gz9>VU*aNuF*qVm;WJUf4WmFrDNrBq{pRv{G{)pRIHm4$b=kMbQqD*gNMj!HwYS
zS6z5++Y@Xm-vrO4S#Pi5Fi1?U;Xk<?IOWDV^;HL9Yp^NNTp~k@xLtfd%W`-QGo&+z
z)$td%vU&X1c~Iil2RBE?WAUZMpr{(dcb&23Gn}tO*Qp&`h=(#34SvM@3{%0cFBKgJ
zPsSGZUivj;C^jWM#w)Q#L}cg)f?)RDJj*;(ld}0?X_MIe#2$v(Jp+~Vp&**v#7Bk7
zz}ST;=y~BPxa>|xGE{|bpC?5E|CMvH+1Z%kQpPXbr$#fU^ElT<1HVP-kSTJ1Kym9n
zsL@p=<-MOlbbAG-sV+stvaPUHIu`w9OZZHG4H9gb=b*(n$WhEsJX_KMA}KZcQd5CC
z5ATCfoIb65$?->+Yt!C#fKT0XiVuF3oPXj6>&kZxgWPww7$23QTaXw;rHi=KaYbB@
z`bXSxR0T(~9#3}A7I3@q8@qIuaWiopbbI^<&R2edOCZ|?sut%9T&Lu<_8x)m?mSdW
zRj0!7vAlnG0!H}k#<2cdaE9$V20E<B$Q%O_7u||ZAMW$3D|>;yKaKaqCbaKZGaTYu
z7;ES-Hb$z`&^r~lGQSc0Bd2k14BFuOI9)P!R|zy87NEQM9%rlbh)a%L2-ALS#TVH+
zbi({IsHlDuGfkNn$ElrL_#p&iBJCl;ZX(hrmNY|k3UIpWB-;2JY;6+~fxZlNoqZLq
zA322bMseu!(wr{nmm|sfsd%++9yYCh##g$gajV@g!d{<#bP0P4QVO#4ubc(7^-BaN
zBY#MjI*r2wP0%JWhAhiNcyFW`nJ+(xG#I#ITjv~1mir+IJ9`8&EIx8$7VFSm&pPp)
zyg6-()Q1f=EU&qnW#5+GLC2D#u<q}16c&1Lajv`3b;mVu>-vZGYb~j(V;3KovI;jZ
zZi2`QEpTO83644_Pp?=X#qn|?IMJj<r`BncB}%=}#Ii#fV=cI@I0bIWNN;RocjvsR
zg?x;{PIyz*1i@2xat~*)oRPB$z4)XUE4q!LZc7oS&y2;E^#W|0`HB;@$8+UuzTDM%
z3Evcd#`ilkh-Bj<P;VSeo{q4`1}~QV@G67|{i|SW{eiFV)?z)CRcNux6y#sN28A3W
zx-9oSW_!fI`>S$PUhyx!|Eobpqzr|De#UJzEr(k9{g_-T1uBVE*p;oopMUL(p9eEW
z<6j-l#lW1M2}>pRM&qz*UIv&ON|6z*dg!mW4N@+i1>1oseAI_=TvxG>S3KZ@gDeeb
zbZ|L5VIDXy)hNl&=@u+UnhI7YBp|5F7OSX6LD&t(+Nq(O?($sJx$c3!xqHAp;|320
zDXRRig@<>Vv1wNX{QKJjQMCblO8+YmPKt$)rZaG?=`My%`^TF-t^>O{#UMDmFhAMX
zil0}lKrHGG<M}HmaEVVR)IP5R!Kk9VRlcUw_OTLwxa=LeIxU1K*Xz7&xjsD*ZcdX&
zeit`|#6z;qUw-NMzqnuj3fQl`!QqGmOto{s`rn;6qWUP?uTFx6HD{QA$&$`7)Sx3g
zb?DJnX&S3<Mx7(sc~RGwv1Eeb(9IVJ`(sd%^?Fi`7K6cOW1`o+0Nm~VaN~xtvs`XA
zA9g#553UKr7Xi;O>r)4u2|A9hr*8Ac(s$rUY#N?)GNCV={@{owz7WaZ!oT02V2RT+
z^k+Vmz|e=h`^ZvW&nb;_cdy0UWft^JbS^3!c*A;fjwq<RhC8aZ!okn-BqnqdG#(p^
zpQdP$l(2Y|o_!x;%6XhNY%@GN{~jkR51_?^|8TyADsgIj%(plOVv|;uB&v2eBp2n1
zo9f4dRGdD^`=^Y0damHvb{Z|}SuWK5DB6AIq2;y;^z1I@ZR0b!HO#4(+-M8CQZk^s
zRRh!iL_+Gacz#Ec24-0KL0;WsE@2Uc<VR=u4U6A`XrwZi@qQ!s-zdi7OnlEzF=bAI
zD#je0XaaKGpV)WnGekC>fNgygTvuF!N)>lL!Mq+kqjYKDp8fFHQJxH9pM?oKhxc0q
zfhX%D2d};flA@0gC!5Im%s9?+ZS8O}Js%T(pM~a8nc&}ig)`Z#OGaK{>?=}(pYO{9
z4q{m@<s`5&=woL#0eP>eNG{hHLXA1w$*qr<oSiKoDb8Qu0pkW-J~x2<57NP@x`j`>
zqz0+S=V9?e0-NG>ars&==ovo+lQ&gxTNa$dU?)u&+AAWp7zN_FrgU<jHB}t03QZSh
z!i2QvC@pNk(%bVeX#HbQ8}fts+WI*0*%b)p%OQ2ndrnzlJbKAq=6l5nxV=S<+&}(`
z?U+mW4TIWn%;YR=Qku?<npOlC*2$2JkKTOXL+0V=-vi+nFQH=LV{WYRT%2}G0L~lP
ztgk)@?JMu1^Soy;V(KCgfhBdWIfGegUtxEc9$DDILl@@qyX=y{w@bo@D;U#52iIc)
z>-7yjBt!OpAB9n6F<jG%OZ<hsnb_sv#JN7TpcC;0X7OyVQ52QCaODw9SA7psD|N{s
z#|U(~s7BZ5DHGS(kGW0tQaJWTK8#u>OS=tcK-R=J5Sw96hRQOZH{+vhvR6XUw?v78
zM3)S=li+OG!?5V7KC#hcjP#}dVE7jUT5++3?OELa$I*EQ)cC%C{Ae$AO4@t#Az7*O
zTsH|xR#uXgot>3r9V97|wp6l`q@{VD>*h!$A!Q^jAtWJLN%FhDzrQ|z_~Rqz^gQ={
zUGMknB`NT*rsu+}sLz$Bu=Z&(Zen-M`W1{X8+Qn%x|$I2!;>WzyC1W66OZ)~OQ6Oy
zk^5$piboEd$KH?^EN68P4!HdQ@zlqnh-@}b@jL<I`<M6=;zAs<qXzvW&0(2V98}#r
z1l2tnwD|ZFt_4|6FUXCPelg(kw%q~2u_2OHvq*5vKMl{*bcn~t`|z5v$ma{3!9RK@
zx4+AZhF-Oy{3Io!GP4wT<{=eF%5yVrE`k$|PjC&}t2DjY3ep@iKI32$h8$^t$OV?1
zQi21->@}sObw23QF^`L%Ye<DR#_-KMGtege1uj@EM>0OP;w1%b*a^$QEAAS!DcFF!
zTLQo7_F1gXzl)(Ctm)&i?cg8oi6H^LI6*2;q#Z0%$J~2<=BD)Z#U8ffu%%|fY|b*P
z4_hDH=cLX1z@w-T*4q7mXqhwkoXtX2+?rrfjulaV^AqL{H=+ApTEOxmH8N?83TfXH
z&I^26xB&Nhc**)0MQX9=xK@QuRTxMD!xqAB(H|T=T8|_=9Ek6J%G1czGr{Z5P;`(h
z;>O7Jq5t_B^u5%CSE8zM`-BVF&%Q6>#d{>JQ~J5d|Mq~5X)}OPIi?PkqYlj<@SX8r
zoP7EiI4V8G#n07AqSAZp?qHmhn!6(L#86CZd<BAv7b5R(K^VExn7YIp;)QXAaQnRu
znU!+|9k(#%Qrij6=hbH1Z>UQ`jgE7|wYo5L<a-GGB7%yEWuRms<-{-RMK!M+nS;<v
zVs<tPnx2~BW<ADIO*N-(caDL-`%i3Qe`m4tby4fM3nHhpM=@rU8IG@Ed7j)$T;8fi
zsMBP<_VQZTy|@^w`{!cQw83D?`acd*2b}ThB3{r|pyc!*>M^1TR01ZUqsLQ-&|^%K
zRkiFL`-jVyK4e@71$gMfW_HQ(oI|S|e<RR<XbKopBK3?U?|mCAn^Fezj7yn|Vi?Zw
z(!!K^BT>CL3phVBQfg>U_da@z+3a2%xp)D$c2YfrW%lBDnRR&V1#>;>gmMCwbJbE}
zd6^%&lt@mZlJZ3`&#|T!MwT?-2!-}v`qbZ|h6@dN&80pb!^IAjA@xozoOseDeo}`q
zsc}%}4)2YJS4##EC(TmsijOtL5>JScAAk+;1Y}YT=;Bv4L|w%PMMDYBwe3eapL7)0
zhC3HJ-T^z!>s(nT%lkYJ;lv#gIC;J*s`X7^d&5sKbB-?QW8O_ACr22=dI^=A55jXJ
zUGlU|8-9c&phK29zw%Qjo;#{f9ki_=efu|FyzPc$?YRm#sVYN;NnfD1aVlS<SIe0N
ziAA23Lda)#he}|M2*o5!A6m!-xw9<eoj??Z3o+-9J(qam8qDpD2G>JpVaesmApX0b
zW%^cNPh%%HJuw6aowpzyEyfFhN3pK_CM1NnanCIV6SD>Zwvnq4sOZRi(U-BNGKafa
zoez7K*b?_e54i{9Qo!C*j|QHY1)i+;Ye&{I#^nH!!nPs|Oq~JpBKBRivEX!94ItB3
zu%G8$Bm@nAi#z3p0BA4{^z9U!TULhMi(kS*3vKGNCkIR>^uhfFQD_{`{9F~xDPU(o
zqAtJ3C@;oW@s@BdXFDVs6BOwyPi<ne<O!tfcJlsXS7T(wRer%%U4lB%V8A%&J|>-5
z@uva)E;1$iFDVhDck94m)hA9~MT-j3I-Go5-!M*ODL-?g73pkAgPYfyQ9a}@s+t`}
zr%dKX=vCwUr`pnj<nySPxE+c=Qm$9-545IKN?Mn>bDsMSfxXv#h%0}AUMBZoT7&^f
zD_M>O%OkPQ=OL)K+TsJVb>Ow26a<E05~;@z{E!@q^Rt6-O1TE{AAXKYP539OO^9Z>
zlz48XLLZD8+6^6p+d*@zGFcT-isG3px7VJCE7%>W{%$$e>T6J?rT4hf_?IX?wN>)-
z%{+|KxB;tPQ@rxlf@+*GqD%Bts0*Jas`&Q_eM*8*YZH6-YK4?MO){XNr9M!3&J7eS
zYw=<BF|1knm1}Tg+_0Iz2QAA4r*ZlG_uCGr;5HubhiTB6W8+Zyt~x!==7AxNzrf+V
zF126T1wA%TIR6D(@Z|zA`b{{@jXxlPo6OHsU9Cu0o4SI_&PI{au0b&7oEAJ}Go;j^
z$hov%5$V=H!#i<CWMqvhecf;x&Gx<LsyEc&+_S-Oo4Lw{4#|P)%hfP;ZZew24I+Ba
z|KUf~AJ}xu9^D@P27w?`bR)rlbUl>g_1mn-jmk3IGN~JUU!{qb-*3UyEz|JrV;edt
z%9On8Z^4}Y1zcqH7hc;@i_P9_c>g>j)LW6o{K01Y`u~bh>fOW(e24Sg0d3;9tdC21
zbQ8wr-NO(v8pX>O@u7NWVXNd2#=KFaP7B+4*T8!a^(GgBw{6C>mlqk6!xrAJmtw2K
zPCjH!BaApF#>BPb@NDQW)K>qAPAi&m?(`N|l3|G6>2t9Cr9O>|WVy{}vixVS7;w0J
zoPRXzK5kDLMD46@aXFn+xW0ROkkqjR>bv4N4_#X-h}qyQ`%s3aB(Oba!C=r6f8Yd5
zk2?1}JHuU5)h4~~yFeT|iBG+CTU5q&=PN7>iTsp7#BBU0&Rju>+PpjpJKkE5<g4u2
zxuh2EKGh*hZpLEafGT*yKg1Pgj8ArW34|rE_mrp|VhVPF;QbTlVlp0;3buptNkt-L
zzvD`$hrp3p21NX&&bhT=1=qxlz-RaeA9%&!soU(m*xk(OF}A5mfjj=aIFRfc$<Ddn
z=EN+{h$~&21WwyVA*!!{Ex*|8N@`7NCW`o(%=H%&JOpZf-r@+`wf-A=6pM}+(Vw-|
z5HaBvj7=QOb}mX#H~j<jJiG{t2Qik-k6PYS7zo;y)tGba12=T25}mPkHx73!L|5BC
zARxc^n|Dm9&6~FnmgI<gkA-4(1oPj?+LHPsGqJd8Fg@&70{4xtVnZrp|D9|>`~GSy
zQnVv7`?5jeAf!I<5Q?*oaRo7hiQ5exzCJyKQklnm|Kv?rwK5BbCR9UZ*DFqZOjS~G
zCm9diybfYH1IY>ZXvlJtf?qegL;T&sYyTYv?_C%zvfwMGH+|rGHVL6!+RyE0j*`CZ
z>M&#!kH(hfguV9p$gy8AhYX@tw-<o&VF|wSRwPq$ZRl`ieJU0|lpNNnhf$K-;FofO
zzxz%|>VEtMuQnO_VX*-{D={U$DO>o{6Xd8gy_0`4LY@w2)+gz?*Ep$BBWBhFVe)4~
z5;*?`H`+~>;8!7*UVnmmIkMn5QH!>imxAxbU~bE4mi_+|jtY_j#*!%qslj$GZR&II
z?>!CIA621Sry2czdoOgY2X5xh4qWz8j!rJqq5;3O==5Y2@+9sNo>j7-Ed?L2N&Ogz
zo6m_tkF3VRo$272yPpp!k*7Vg20?wJA=)_q0jY<aWJAtfP%p|xhfP2Dy{sR1^^`g>
z+UL)*M`aLMZp^E!IR$?mqEM5T<CK$surB-_bg{q3B|07*FAHd#p%aQ+-a_xr6X<(4
zLc%>#psgc2xbkzh<ogUq5X?Rxa+_8TpI!)w!O9-Sb}wM_h9<08pTN~jIK>HMbww?%
zw;}asHxDO_;6$q&O`P3|^)+w#;4f^BA{)-RoLSB5kGhDSv&Zr`1&`qI%tToEEE@vn
z$Y3<fu}W)o!T+NSzbF0yCXBUZEVKLQ`}-)j)8_y_UdK4{3xA<kumY7BCBe2lU6QcC
z2AB65)6iKC7^wAux4DyquBl$kDVQZ{y>ed^xp)=dHGVdClgopeKZhWotp;01h4J>K
z*&y$iz+C5AF!xL?W`<nA@7zGzqbf&Nd#nK?Lphp7t<b`=2{!6iqix_doK>VujhY9e
zode@y_#cJmF>D6A><--7p+PPlsKCl&shE*+4cBdagYhXxF?F&k9#xhlPt8?8a8X(E
z_;M=vZu}>bJ+4h>*2Q4zQyor0PKjQ36%e<Q7tn4{&Nt-jLK)wu(9>DO+prmzbo?;U
zh{dX;Ywvsz{@%p3u+H?T<u|Z#tv>l|-wDS&6v>S<iI^LdgFQ2u!+pm&Zr;8E6c;~{
zSi8KyVp}7+WsDs44>rM=qG2HQ9wb_w$FeY0Rp2))h3h{biH^dPV6S)|+ya|m6UhR(
z$rBl`%bG;%#bRW`V?OegCm(!cAZ-q{Ar2M?`1GKseCC27P`1|uFRi|Z_NRVeTWKg9
zZi#@!D(Xb)<p$^5G>93??Q~>!!pyry)N$k~bbryo3xf#y8X;Eh+5r1%6QTOvA8?p(
zo_m`sN2l*PiWkDuFv)BeE-PeN4)J&FR%A^5>~pXzT7k}$%YlH&Jc`j#V%8-CmpAmW
z_qZulE13s+{}RDNS`LEnO_HFA>Xff$JO_=ElJ@5b;A&NY!%XF&RiRZhY|l-U2CRpV
z`kHj?EIZ==aW@K!PV*)H68x>7i-MW=c<u1%Y`39F%r@QNsyaSE*^e2hwrCYJbgagv
z?;9}Zn>iKEZ{ys0hvWB%RP;6xLgo&}E?pnV?Hi#A_HHuN-*PT@OkI`u8ZSft#dCP?
z-|4(ADAR=DyTPp74<$2xL+XlmlIo+cfK{2mPr3^<KFX2*(sXFgH~}UveS>m=3UoR1
zuids}-=$rpqUW#7$TIz2h@3J2!7Ua91BY^1r4}@2&}uI9P&M!DXhC{Ss!*qv^_aWd
zily255}mA@IDl~l{SUK$kH&S9fa~wUAm$eKPs@k=kA?W+wG5eJZb~8=mSERy1A1g|
z81rr@pz6wdXg04<B#r%#Tj4GsmIG3eXUtrO-HhXM=NMLwJ`cV@mpQv%s@NLQ!wc43
z<MeuE=-EbjnpM9a+efDJ!jB0sMt9Bs++G-aU6<@vd4j&#+9(?P6*K<51@{qIyiIN?
z+|6Y9L%Ty@?5;&7FG>U*$4ZvLmjktj3vkXcAr;hK0%_q%&gb!W=#Tz@I5iQ+zEh%e
z!<f&$^*0)%euvOQa-cQY3RMhK(J?Xy8ka@m!2^t^v-l)5Z2Ez-R<J&c(Iftbx((ej
z=R0ILv3vRwcNjC`E3O_f2uxPC!2Sao<a2T}=xHe7l=-PBTo}u`^jC5Gb|vcdYaMz#
zxed8p7R2g_B8tPcCAa<)lB!J&uxaEjybx3eACBu0!HP}JpG>Vu>ym!%gfjCVoD-6}
zw@g?r<{N0s7*OjKgJ@7j8{<RWhBC9Y_@kN4lQ;bnIW7pmP1TI?IA}7stiFY@)1`Qj
z^=S7QOoT;|VR&q}EIs&u`72spfb_uw$^N~Tq-XI-?rD!K2xkxAe939fMQa=Xb@EM?
zLuuf}sb~1OHf<U>Tpz~dg=3uyLC<R%-0O-W$amZbJ=Z?r-nMXrK-RH&^9k$=&FSrG
zT@pE?o};4MSS30NcDo+%Q$jT9h5agY*>?|w(dyu`VLRF#j^Q_NG9*Dh=h*$v8T4=!
z)VrVL^;**S1F<jR-r4_k3*I<cbcbVb?m)Uswhf|#gVE3S4V(S$hGpY_!TSpW(ka{m
zi|c-4&B48V<e|%u*!u_g3kKxa9zD`sr$##hjOo&&T0|*!1N46?W^+AlE`MVa2C~m^
zZ-WW(if8^tch-;icpar-Y!+F{^M~(8!x>JSJesOcCe9U*sD@@(K3|RmO=n%tpJVyg
z+0SsemM#h0>jR?aTTp#q0vx=hL<ApCI)9T@B7a0|ULC3qzhs-(tW%!&=A97TdeMn)
zy=|~gc>w)6b{_tXuqG2*%*owpRwU!fQ+%XmNFpObIN<?BxLlr$E4N3$FK^}*ZEr=x
zHSH+TZebvgx7eBZ1jU<rxZ(;;>^`qZ|EtxZF?G&3`urJG`L7y`CK!Xw`E-!>sL|Eq
z-r@QsxhRa1W!|E1kaT$<&daqVla(AGQav8TFZCp$cekTQO*#l;b$O4VY&`nYhB~v}
z&&Cry=)3F{cb@&6#eZ@+-!}t!v+Ek5hG~#@n0dtvb?Fw_aoAQ-fjN7D^P2w`_Qx_l
z4$D&de_V`CZHmx6`!CE`xPy699-)IwFmxST!JW=yT*8ZLM0)OwD7;shUYNWdg+JHh
z!jlx6Yh_8~^;}#!A{GVRze*P5nb6?bx0xHqlD-J}jN*>1lA7W;e!E14ia+K{e1$RC
zm!pbNs~*4~4HNRyPMI1c{ek4oI%MNE9ilW>mKv=vL%sRhF!3VG9X4oSj<N=rue2in
z)+i9UJ(;lY_ytH<d504gw4zL0KQz2iqj~oKm`^VV{s!yOn;U!4q~STFk2is&*ZaXG
zB^i2y2G9X)_dByupESSAN2RZc+@8&cuutC$Z;mMhUD;%ebT8q&l}_;w*q!z5sey#L
zsnW#z5nvcDPr4p#g{YKUARH18HX}@^)jM@`D$ztkWc$MjRb2OyGz?w65Cq1}&c4?_
zOCCSTWqDW+2zs3deh!oq7*sjE`;iM<Pn|{Alq1mPdX@JSxqw?kG5E>6=6e?8a_>iL
zkPU3Uy>0=I#fx3R+Jt#j?>>OZbGq<Ct~{j&^{KZf2f^Ju+AMwymv@>For8m^_Tj1Q
z9-&WK3@X9q^&tq_z~Y$NhuGQEg`am_hv28R=u=h)2U}IBpNAY>VI`oB>sdB@K^FVH
zsRhdeazu;!3BKFQ_^Z=i;IUt97B#|zH(B-yo&_6{gMqr#$xw@Cy{-hm0f~$&a*^LV
zML=%7G$+#T(-L8@5#+G$YnRMAuAX&kwVy7B3$fv#d(4~^d(4ALfrE)-cOKY%y2H!W
zuR^E(=@_O_$#PHMaLh<GTKd?8&YC_MvoqxAcJ^Ppy|G;6xJ%rcKe^bqaUL!SF+c;W
zPt1cLg`E2WI%i`DdKW(8qgf9nXC>n*SesL6+!apxDO}RM-GHXq)_~EkXZY=_64_<Q
zKD+PkTuNp$hIZ9+|Kscgnueihg(XEg5;vId#|sZGV5-Fv$+M|8WXv@?XuYMypOAlp
zKP#p1H9?M;`&`8XuDaBJl@f0?Ru@xivPGHBYhm9WTMUb1--#8`;NTI!M|uxL?YRP$
z*OQ>x%&U@om4h(G%^kR%8uatuYPkM`F~VJAad`z}>evTDLf#0Fe)1Ew{6Sn=qeS?H
z14!POdhkE5zy}(6;OzxpQD%D~8pwyi>#Ro*v@`<KeE>?!m!SX6k$m^TIXFDaf;Lsj
z<GRK7(aV9c!<Nj)nt}nGAn3ib{H7LmZzzOMs7hs8&GBWuESXjJ9M)cb1v3vQQjtY5
zSjZQmS=(qRU#~;NE2i`AdEuP*$}Q;0Z3AC=0Q{e<!sNfFa2F^L-!US1cu$U|aC0#7
z$#>+PSw=Q%HCOadnKt*R63?#(K$^FJcj)rv=6x+^9il(5Swo9>+f;IbE3(x0*?2Cs
z@EKp+sE@&c+7O*#M841U1`ErdkUn)bx;IQj?+tBSrRM<j(#wO0vw=8DCIk&$nbIBZ
zW;8FR9On3{leuf7A!*4L)T^*W|A04~gnNa0CdxEv(pK;nX21ZWSWFo8gq#0$3w#Q`
zfo8Ukxg~FvakY;b92NJVsOtr8WVyjv`fV^|eI^!f`w!Z+Z}C=>R)fv4Dmc2(j5yBi
zMiXr{nvgvb{dJPKm?_C<Jj#MxcsUfiMg{R*<(=Gabpg??AB~j<mVuyoDZk;;Z`f^T
zPF?2?N3ZAKVE=bTT5VbePMZ5sa4=mYYW{?mShiI#_%8SN>2sW5s!w}8?_-ic9}B*i
z(<_hSQDAn2TNYOify@hyrcrp{R2%CM=~1K6?%>dU5I(7wVasZ^dv^K?8AI$b(BeL~
zWV{y$2HQIKmfnHMI;=}DwgR<JsDa1je9%gI0hJl4e9&Z992Hdyu2*tlL*qNf>G};a
zKk}jaz6p&Cn+tQ$hU7FBaC35sFg2=GA{aQ?xzfazD(^=e?ypF91*?<goduXZi)ElU
zsnefP>0n!AN~0N5xHWwRFSzYfl3@RhyXs^`g2$+#AZkKMXlDh-r~tTqX*n40RU-br
z?^yorG)ix9Aet>pRn_CLZMqCC{yiJSi>)QE*o<*ncok-xj>36&&fyoY3X~52#|`^_
z6GN&ACWzA+B{Pg)lU0N}hcZ`Lg&4Q|`vD8hwa898H>jz6j;mLxQO|K{TutD45GQYf
z&`HDLX8m5YFK&T2SGEH<qgJ9mqa96zUTE0*78CaM;85k;*n74Ys@{Ks4<{<HHMUU_
z`J|BVX7}swS6E+X$u0i-9Yb38?+sYFUxFKEa>VGOJtQpq&RKmr2@f``z~P=kTIHrj
z2Yu5gzQ6zBZOH>%yR;PFO}53T1?E(SvDY$O>(I5R9NZ>Wv1i{O?v<Y*J+hExC5PG2
z*5C`^Yj<1ZUV4-pb5sfZo$cZC-Ddn7qeC@cDiVv104!jP-|lTU*si-4J^PQr-)H({
z{y$SH9a<!bnX(j*+YBUzyV<ks?{mqnWNq?2Vlu8iYRYD4u?V42ptpoEjLctw=RL-P
zuU^cZeXLHqdp|;bSu8IeSSz`8=Nj&~!#Eom(N1EEZLIa00*5@A7i4Q1wysX%J`HA^
z#%Hozd+<M0$?pW6*e4J<A_bCa4N=r^79H3w#NbXYB>t|4ssoMSD<8>UzRGxgUNxNK
zj%*ZbRXZPeG!6^(TEXt_K~NZKiQ+rcMGw9vL22n{Xdm3mZ6s#o@aDhZlA{c*tYiKp
zMTV>^7Le9^me6U|gwy^ilFY>u;quy4RIXs>3>{VQOAujWg)W^{)dh=OwaBk6X}Hq9
z4X$*vF6+nNT>9MkaDaL4e7*a)@zqCh)rcGnc$Ee+TqjC7b9lc<j`Zut!q|O!M7+I?
zoB!z^mbM<n&sPS~pH~avu}3%B-|T}<>s09bcnMr`8(FSUKwAfw^UGG(Ly9N+Oil)Z
z%!;=#u1<#-{C$C0jExnheGS~QS$^o>SyABLVs3c+U}CgQ2jiAs2bX=FqH5(oP@QoM
z1Gn4YzM<Dp`h5^CF=gJ)qdG8o-aq`Etxgo;&cniMR&+|{U|RhCC4M=XjF(2`pmz9m
zY!wdUlkNpV&FwentsD%-vH}vFr9n1D$kPhB1{iXbb=5*L`PUc3xVQE=Ry);$L%R#i
zF-gYE$a>U@JcI`OUgBj7Hfzfm4B{Xg5ZHL~J;v$$s|r&Z5N|>s9DRw7>Fi$17(!#+
zl}OiY6MprJ)gWrlfTwN~L361D@7_0{^%FJNoHd_w2ndGvs6zS*hTz(f&p|C|K1QC_
zq|$r8c-NpUs4TRmI<aTq`BE*?KK(GaD@dN4w`Km8m`L6)odbpFWEk|J1S8{8K`;F~
z=kmCR&yxKEMZp6}>qF*&J+l#<wo}gScmq^~)<TYD4KIkP#4s)x+q2d2+gauw-W7r~
ze_UZ6rJv5<)0J`XT66N>lzjAzFX1l*6eIt45E1z8FNq0aeZXuLqFL34g3O=%VV1kM
zld%Sjn2O@zqqxGkuQ6gq5k6b0M^r$L?irkgDW2KzVpakA+Z1u-ai&B(=Y}M){WbG(
z*b?Uw8#=4olKSqj;5RqO5r@IYdGnUT*!e&PJ>Sb<#^YPyqMpd^%bi?~lMGiifMu+7
zZlHdTA_?j5guvugurN0j<tN&buXY1TJ>!+8&Whu=^(&EOLyAC?dEH!M{Y2H%AHa#F
zo4{|uSTL9&q+a3^FgNrSxO;Q>LQ;!=4ylt%YX@Nca2-11_E31R*A_g@)tQsX1h$!J
zlc#wXAm-9AaJlk9Quim9?dJD!(@~4K92n0_YVzQ-(QJG$)tuHO+~zyu%%EK<0et&(
z(PvOCwCvHP)+{F<`0oHFUoj%hg#vPg-5-<HZK?4q1G+Oa6Pgspp{G|7e}R4nwLOX8
zlhq5SN2w8e8V|$N#(?_w)0k3Mz|KtDxoP!T=#t+nI;MRY1p`ls+D91B$9@#f8yVAa
zPV8O(J_A-Bn~2(<>rfc68FVJF4vJU-LY>Xv?#n7PNwuOISdXUyKR{m3J-FjzKt^5S
zVbZD)bo`@9<3}se%R^O2!=YBl|9u2pb&8owyNMS+UoT4ieu~?5_Yc|$?{NN>Io!bk
z)^r!^KKH1Vb5c1kKH}y{%vELV-V6)kQ<4uiRT6OSNLyNYcs{gVKO^z?oW=!9UGS~-
zZ*<J>!CO1kNZ07+xas*;{5*09hN!%UOGiGT%fBxYw>L(#?5;edJ^qL?^~EgvcoTg)
zuEUA6omd)dK`Z}taVO_zVBgi-_<XS(c_UPzJ<@Q_Lh~_ry?Y4bZ>!M9@ES67??N#>
z0#>8-QReVhm>Ob5-p#%V|Lj<<yq@*!^aqjmRqPosOAiHl=_S7JBDwxl=5^CkC4%H|
z?!dVODE=Q~zAOuz4VmZrIgfYajcLB=76>qIh5e6CV}yMO#0d98rRFEjwe<{ayDUdy
zYIfnI|J14HsErWQvJCbONJ8thPdG#CGQ1t5O45UL!7zCM5p1t=hR8i&sBJ=r?GA;|
zUgnkQYQU6?8?f{1KlYt8rP=2+X-e5dymE_u=R6}I$5nuT`{Ph(piI35pE#+0mMD_R
zEVgXcrutc>=((etw_u;C`?EG~z&l+c6g6RsHoN<uPGUSLE$GqF#C6R^WJaq8<Oc+S
zX3!(-6D<b0N!B!Vz9vk{&%*X{dwh516I}W+9^$2H)TZn=d>AE91~RVDp16T*W?96o
zzO)1nZ0rOa83bK*Tj4+wW8drK!}aqlCvP%}m!3yae<EYL4Avrs?*~)y^!2E+{3a|r
zm<m3}PvP%hx6!Onm)2Su(rpusiQ@(v^i8Odw5(6SWDS;6Ja0*>P927<fvqsRmSq|B
zcHrs3sw6l;43E_dVNaPEUF6$}={JUgaHo*pF)a+OC$pbd&lc0UbC9Un0J9XLF`+gD
z=SMDrelfz(0>(-neVAMJ=MTCJOyamY4btFL2dI0I-D$7lCsjTAkYzaA`0e1Z_$+^s
zaS*PaRv=<kEs!pIDKeRCO66ji=Z^JM9th*{d5H$uKH3`}jP65KHiPN7bf2-t9U-Tm
zF_YBigMVHZZ+9n$o1uIKT;@IHjvkfZ;cJg@Tb?XwymuVG?SF`7Ew@EZ_Kf|%Nrgma
z&*5$cTtO?P^HA2k6ILr5VVB-DUfoEM{`L#S?e@iR@2@;5e9;A8ixi3ajM3=Weia9@
zT&(x-Pkf)<Y52VJCm5vt1hv(=)Yh2wp#FtJt7DjCrBf+PHuXTEjU#5ptcISGUN|vK
zi+DD%=h$pTD#%xL3O{c~6WLBDWa)eG9A?TTJ?X&WKkV*x>^MI(QG?EMdk-D^-Owt~
zigp#4^OGMnpwrz$T+O$0@bhsjv|B2H@9LML$)5M{=M6pj)%q4zisLcCX$n}bmm%(Z
zuka4zoOq|QR6g=I`<?xwPs63!BsX7%<pwiFp{DMT@L>mh-u4I{bX<YjvB$74d=d&5
zj6t=qAW#xL;^sfn0-r4l@WS{A%n@3Gw|+cq8?8><R|bHb>JAWG-&yh@+K~RpWoPu*
z>wNeic_P+(QX+d)NQ}mx#*WBj?7q&<di*&U7MTf)dz9z^^-_$w?2K`eO1SgXlK9dG
zA_cP_Q0HC(?HjZBob_KYz;7689lHoa^tDOG{LffcqDa%eDAHw*x<Py(2;Q9Oz$dzi
zn5y@cTOH13EedJq?O_VdNkZEE^ep=S#~9Z?nU`yg7FT@Rm<B&jfk2xEeuk?awdKr+
zK&uMX$MI0S{xxd<I)-}+Y$!O-hPL8FXf9*_&rD5P9kvV;r(A>|-m-+%i!e(r5r&;i
zf!~7rm>TTF-F$Ht(iD17?(G&B_MEv(FNfji03%W>VY#kvGho3h4f12Q7G1)t;E9<5
zkn>BIoA^tg%o%wcyJduQ-KRmcs-+HpOks10#x3AsErqm~GE~X?FeVEM@ZFO4;B?^{
z^dFyxJAa!~r7_w3%HZ1|e&hh_*vu<oQ8qvEtRY#sFb0=Bc!yHu>7uCEZ{Yo3HFso|
zE;XsM!SMw(kQ&s)Y5!BB4zVlv?YBOFu$)a)d~Im{d6uOcuS|y=mM7IQHgsLqX?!?&
z5N!eu1RnFa!&l_!TGi2LwlGSfV0H+3*3Hy9@DsjrXR-0L1_@J)fY#%0xCq%Q%(|9}
zE606-oIy%hy#EHzt&PUbGQC*$;VP`G>4Ev(Y(D5GBl7EL;s>;7lg>dtkRNSMhddla
zRmctSp74afU|bLPThnl6p*}HFR^p`XKSgoI5}4#Mh{jF4171hlVbKW{a(gcGW(~^W
z<KCphz-t4E(vKvV74-yu&g=yB4+qg=qzoPWQ=VM4QKs7AYSd?KGmQUy6jqLyjYfKA
zpd^#QrLet?iLEVNea{mLdp^S4r`8xc;3{ZkF{am$6zCn!?j9~h+>9(C{*Qq_G0c{@
z$*?=c*0p@YH3RD2n8e)+mL*#bMPS9-UogtA5eHbR5$lLSw5(BtH8*x($WRM>Fn%7`
z{`VATePjPN>Px|+ZXpic)DQLMt^Af{Ik37*1;S(0$k1tF@NJ3_d71GJpG;At!`4lJ
zExCKp;PhbXp;?HjlP;mw+<G`+JQxRxvalkKaUV*&ozF9NYh2hRRH@4Yz3E4=)6WyW
zEoTmsGb8w#W$|3iL?OK+Ye_vPp5)a(_d@-${UB85vG>?B+`m(fBu>AJB$%Bg&z)!4
z<|Lj5Xb_nLC-HZ_37a3k;im^gW9af&PNDk_dY6^+F8_@ZozcCEpHuUo@J|?QPSqg&
zGg?8X_%Hm}YD6MlJ;Gi0%}9ZKD4IP#%j-AU5|vq2w7cUCM26IH#pm2%?!ao86rw?9
zNK7%TU^%w#OXIRkib4Bc0*e1`fQMzrFzD?Xj9rz0eZ#jxwO=hB%4VKExwFLr-;2dI
zjT8<X=>_*6>oD?-6<>G%EgGHO4K41C@KiDkbhJ*wNdxAWx@+v5y0TLe@a;IJsoKzC
za}uGIY{kW=86SMY71(Hh34Ik=uhh<fn)yjNqy74*XK#W6hf3$V>FJ<!y_M@*JQC!_
zu1BAJZ(+s_Lnz1`Nb-j{fcgY>o|!+0M#~wIeR|_DU|J^Tf60W$zm7q8NeXKGwWQ*P
z!IE^LKFD313Vkm(V8cppR4A~-R@t>^zTT2HHSfaIgZH@`LUkgj-Yi+tcM{8G^O4jH
zrgk%fq0xzXPKK9)kp9OFR!_tbmLd4W?lLa5nw;687F64*L;LsL0%^Q4e~ryjd$QtT
zrulbtsnimQC3{8c?9Y5vswGB0Oi>!wCz(FOglhk3!`{?;FyZ=O+Oe?>+oYAuX|)|I
zr^ushvjuVQd5iXgqrmK?H(#HBgnQTa3koae;W7>8*SerfJs)0%&pZz;BLdN2Q!{X8
zwq)hlt5D-SgR3j6K)0U~h-J^$fK%m=MU?5+<AX@foB{mY)(>#7!G<gXHS%i1S2T7^
zMmuFSdULBk-enBYt=k&#SDBFdHJ{^m7n_sRLElA{XUjP+-wNp1a0fQBGn;T&JTF%E
z;J@@~6IYp4XkXNb1;QE>d#skMmA+<q)~_Iy_vDYn8`8=m2`Bb=%nQeT!ZN25pdbOp
zQ2EPwj`8M`<SgmbG{&dgbpcY7_VZ5*bFicE0VG+~VtBCzY43G{E**35e5e3Ra$~^e
zOE%bTyUyK|Jpo;2i?Dq-`)u=Hqucr);FdE2@3Y=zjnJ4eT;=IvUlZa~v<prC{DWr~
zObJBI#;Ws=aO`9u^;G}BpLpT~zQbZg`+}mGgX0wx&nUpaA$|DV-kR38KF5wa8TP)=
z0$<kgXtf;6d7t>mWsS7LD3&2Mn|EH+lQ9amF_)@|WFBK3O7YT!A<(c{pDNB~XR(?o
zd>;Q6F0xFyz+snj`?|~A=+9=fbDaZZ&Rzp2s2Ai<X~Kl6KRi}h;E-O%DUy55l}8(r
zcl8L~v(7_kOD-x{<bdZiL;j7kF7a732mkA&xTrN5pLMW|XP720RG7}KxK)RR&GnEu
zL>Gc{Goj~rHrF#CobR~CGJ7}5dFM51B*Xd}JS?q1mz+fI-rop}uwh*Rr(E!4&WzOZ
zG~RZc26df!2U4HBk@Rc4fvOWNpvBH&SCkD%&z(k2{QR;+p|TcdWM9Dg*V?pj?Q_t(
zi`YNvFN6>t+h_gYgc8E1-)8QkP1|AMBw1QMoB7@g2rsCx<UigtATINCC3(N@Lzm+<
zFv^_;v&t<nBm4?_eO|>f%5r?D<SWNdS0UDQjJ-4M5$e`jkig{?ymVs*x3u#)dUjvt
z?v4~=+ua=ykZ}t-{x-o`4O#Nc$cldHH78an3RJCsHBMwK(_ae(q@}n8l_n_A$_G;J
zt>tMXpBTfaav@e}{9<g<XZ#4}Vs^cH52MzHV`}7MkwCi1X;xi65F@s$8Mz(bKUbzr
z8x0u?Q<2NLYRqXAh_P$WR=!8R0_6uP()+R&g#RogHNCI!fb$T{TQ-~Z47Knhjlul=
zn_+&{IQ%lPAKjb0LGWz~FO9e3+>fX7X6LSQ#hb#p`I+;u?eZ$v)}%&sm{%@zVk}nn
zAHjx%Jb28qlkPWKxg>u94gR8y^%*_5$@@S2mSjc79Eky!gBF~>=M{eKx{tVSoElwx
z<S%;tdj@uAZ*Z-}1)Ni78lP8bOMhRtA@4j>U>?h|o|<4n9-$S<`Lq*M85<~9(}H}H
z|AL#;uVRPE84ygX5xpECpjJgD5Y;pT_v)}*s`VR@OZ$A0%Xn2Tsc0Nd4Yi~#zc=EA
zZB;N*#*{d2d%-3i4^di@4prY?!uI%TSp4+_PTPJKhka2+kL~*~t0@|Gb(zpM0s9QI
zjzLRDC+ICwM~zL{sHdicy)(Tq<(3@^K5IFvTs#f?#xG^O%Xm?OggM`YXGD!JE$Ggj
z7f`&woeR5k0@Jq#;m*ztIB~2YNo#wD6TQyke?HMzbK)33YI7?*mCZr#o#nizl@WMN
z-;ILenK1T1HQp&=zMj2ZSd)2#v;XiAx|{mJ<&2FeW%O&PtM~>1<Se=u8iV$RCj60k
zA72c&r2)tLF|+anNG;b(9$$Kfk(Xn@bJrFS&r0E@<Y<$3{!=j1WDcmk=tZNFd6@IQ
z4P)00q*jGqxU&5n7`O}Q)7G)zXDxuKr>*IA#yUzLQ-XcWjWqJPD$&bY4E@#3Af@G8
z>w#OGl+8#Ri!{l!_$)k;02mtnhJ!3)+IigoXKWvUOWd_Vw@H?y4!z4KF|S+Gt7r)L
z`5Y2Qegy|FSFS_N6~_cc!cut)@@2R-k+yxq{(GGu&<=vgUlP7$bueVFR3TwUexO2Q
zHe{!N!nK|~;4!lZ(hYR!nJg3HySoAN{2zee%PY~}I1_4lN{h^$6AIVXY7(8QW+<4Z
zMm8+bqGltmz%Q0l+@SIj^epDXm>AZjFo*zJXiem;l&QZf<6g?jqnG~=ICEG){I{>+
z-aZi$mnRP;t7fs8%*CH<aOH+kHchCtu>g8CKSAV>P5jP=0g!ZXC)7<IiJL<NG~?}6
zkd{vo{cumk{gH}9)<~OdW52gKVN<xA_Y3)J*?Oe^<Zt-JcKS2T4QYl~2k2#d=cMlz
zgL0KQ@$P3#BGlxo%%f1}SufOnQ6Oy*5f~MH7<$~SXi<X^d8#oC?$0o#t)0=L{p&Aa
z9P39-NJ_*B12t$;$87daUC2k|9YHnuK>VNUuh^)A*BQk+eIp(D@!4M?VqFRZ#lM8}
zVRFQ=A)952s=;$(JI2k^r8?JG2W5OBvqgvE<+-ePC6~*&j0@({4k*y?lT`6$cs~4a
zjl}5(WQjx50q(>(83?^{2s%ICLK*F6u<y14ykTcU>Ex9XL3ceD!{)X%R|msBgK(S}
zehY7Y$c5C+UA#dv>x*x-z_NSqv0~_5?AXHG<Evs}DwvW%%o|h}_KNwEy7|az1z!Be
zRbsawl+Ur=1IjPiz5jtMjk{?|(+|AH2SpR$v)OleW%e9>b+_?mp_=rT7n|*<ECM@{
z$Pf9PjBox^BisLkVP9qn$}LHSBB3%db~Pva_NQa<&<d{b>sIX2e!<mEJql}ztccj(
zQX*9Kg=)sXb7(jY5F>`ye{bQ{MVe%FR4|;RIXKtl6LVp#DB1bJ9zwg>uAA`~BwDWc
z`WZrcWeYaUjD;<uN?>JOEA~I~Mb!W{pIBJq?B);*wY$Hg^jI7`C|0LBimyRpPzq{M
zZD@PuFn({I1S7`{pbACLLAuuvPEXP%{_ke+?RP97+Qoz%Zaf1@`%Cziu!nH6LWU&Q
z=u^QfbsARJgl4t7_;DYMh+UzO9yd=#J@suEJZCzDhD-5t(<RW;%jec?IFIQb^_(zG
z4HE<%e13W;tTHpC6O)<ywk(5t_s*31rd@-q-7@resWl0HunQyZC!<pzbF{>;PRqZ|
zAoYCDxy;?l*+0mH>eGrezrzI6M!bT^tX>qq%aZtc_`&?~MhInFAt%q6bJ|+UiEm|<
z4D(Ti`h!J$!rlv5y!SdZ<xfZZqu*e&njSH6y9Mw1!(onA0j_O-ftk)S0MT<;=k_KS
zJxPzwThWQH+5B9mZ#FU*9-4)%;dhSjLxt@hAZf!vRA{<}K@H!q>SG%CSBv42)kthm
zZ3getBJljXksG?L6wTQ#a&n^_T0}}<^lDX7&KxpLBc?Gf%t&}=JPsR7qETT&A?C%F
z<2aL}7+?38&9WbI<G~H955Hu6;{TvWDV)D+D@%qQRYL!%*FkDNgO_&hmPEY$hKDWc
zz@>e+=xT#9sWV=PuYQ%V`*jU2SfEE567wO{|1keN@G^GX3&0Xp6>^K6L&pt2hu@c&
z(gO{T!N-90aiTLgpMgTUFERw?Drixu*E??QdTac0NrP(ckR?q=cY_lx;<iV&;I}vL
zF?@zOIguyA^Nhi?_nskHVku(&=Um)!E*WN7e!;4!PAr=>fNuDFp6y@WV6ol>Zi`J1
zyqP3R##xl$s+&6WrsWsbbzX%^Gi!Mi#G~O}9RksMbjHFybbr~*Z6Ed&92V+hWz;|_
zP&zfvz3Lq2Z}E{oQ2zy#_uWJH5fyyutaWIXZq8W;3&<>|TdV^Vz#It_#bb_1_HFZr
zeG|J;FiQ_=FcY5IEyj2WyDwIKWzT7II_}6oBK4CK2_{hq<01e}vm}|<kK(;+GQ{ji
zGDQ7Zf#m|`uKjdXa_i7QYL*`c(l#O8W~@z$wa)MtO9s+&S7eEDnFi@P?+jUTC*b5x
zYf8QjAP%IO)63cf;!y&)!X3iLy1}eRatSMxUgM0J>DV(n3+fmvT}4z0d7&2|Ap9Si
zXPm@aK5Tz;z>aVCtLEz)Z}8PUEx1;_8UA%x5j#ay>i9hi+@Cz+EAE)nH48M!m@YXg
z@HOX}UmFo|;sx*{r(pZ!8!+G32kgcuqxZld^erltRDUf6e}iOxcnr&MxXuD&#&U`_
z6w<sM^_bFXj|Ya$#ybafNa&{P*xwk5sWz7Iv{Q}_x^6-RYnnK3%Ocj(3grSXu7qKO
zkHHdu#tQh71wNyGLf=y#Y_A@STf;O+TihP-iJ6Y+>8D`W(J#31zgt)kuS*t~+mIW(
zjEGs&3N9+YA7*^p3{(9ElJt<1yvoN!n8tV}J!fM$=~5dAZdnL>)-b2-sC&@6l66S*
zf1+1D^PPN&<-1QtqN05g7Be4B-p3R0`2=%#?hsJ3{@q+{yb76H6pG?UKe!8~EnpQg
zm{y<t2g9t6Kw9cb)VlNlL*_6Jc;;SSe03ai8SB$lqgT9i`W{K=Xd(T~c#S=a*x6!0
z9{TSe2xI@UJzat_xx6(3t+%ngTmA+~)`mE6<4)pI|FbA~6OJx7-6Sc|X7t1OG)#>S
zlQ`+QL;9<as7`~?ZJ9d;SXt82@%h-~JRIo#tEe3lfmMS)f<W}q$^XuL&hIzR-+WRH
zF-{_Q(r-gI4ly8R+7iC?Zh=IhJ_?Sge8-YT9g@^B16`7yi@q)oMY(}v(Dkws)tqHZ
zkBnj2o4EBb`LPj3&fkL_{6Bd0HyGWm7l1(iINq6TMwUdL#=9wPEX#XMB51S&hd1GT
zXy!r?21jx~{hveXwIy6e=@G=X9iWr(4qW$+#(&!`<Juk%OfAh2<$q6x)_2FC#&R3K
z%HjgHwl3$wZ0=xAVm3N1oW@*rn=vT$GnDT83PZnd!D*jzar8(VS{WA$!Vod^&tzT~
zJ{^AhW}@)r04{D$1#Ev)3Z7TS@(ICzxVC`{Q7P9IS}N74@4FzbZb%#Sy|+ZO2fRp*
zb-&^48?0QF$G4x=qKazTbXNXuocctatQz!;JvLl<cZ-Xh-n{$VHTL`H$=*j=<8*0o
zyC;k&G$#VFLP?FGh&=;duo=TSPG?^s95xKXcSTo`QAyFVU6%}X{{?CbqfuaUvDiNG
z5QH4O4Fednbw+16-q6g&P7mgxNcjRS`x*bda{$^z1cTe(v%vq=AeSGp-d%Dd<JAu3
zvqBk*-a8NL2L*F3`#DKraSyKhlE8Z3YP4=i9$dO2PcwOKntr#5pSM(t+8n$HAIkKp
z=qJJ)+ri}4lt*~C{sL~=HU`I8yu<s?EXiZLO1z(A{Qul3Y2I<k<;SU5c{2!xE^tHN
z*cK6|txp1aZ(+dfCm^n}kaR_t^I@ldz}`JAIAoj#$-K{e05{e_R`^fYnQlerPmslB
z16Sit6HQ!nuou(TJ%zXwYigz{;8y0f!P;Zv@O5Sc<gXt<m(*u~tL7OD`>9C--WI^z
z6_;Vq9|fwi;SB`ta)m=jjfu;0jw_o}hBuQt(DC9CJREfn?5#_nZ^lM2+twj!D^sTF
zM}|YS<RRlwzr<emK7g(>U@}>qF5g{=bUS12M5V&rd-6oDzm{9tW=-m<Y7rMGLjB-+
z{>dxGhUsIBM$b(c5}5{}B%Bl57V|E38T^)_T)5byM;>e51>Y~hyv8(^HJVfl?hC5e
zjLwexDkDSM>wfZC$%T;1vhP3FdScaOOEj@$dF4N{RNed+dasb7?#5G5xMeoVu^Ip1
z3`25$D)U!=I3y7i)bpGE3<8C1T6Al|8JxX9j)aDoLruLNt-qFmt@mwU!!2*dHq;bV
z6!&9K&rZ}DMPX0FL+tmfg_?o0V4m9m@;&JiR2pxD0_F;9b8<o@+he@e-vSW69D%dt
zl!$PQ7lbYQfgTO-v23OixUPSJ_Y4e3Z}wq`^wr=)k6q=p2JONn+2uGk*oJt_{f4O?
ziSV#Om3(m5BjtA$NEpkqv?>mwE~7O?o^yWk^I9&TV9Ljm^bg}fkUWteGe;M?A2;LP
zAXCy~<O=&(o=dc?6&CMgyElO$A3w>0c$Ava>eC%iY4I7g*7cwtJqa#KO7xAcCAE7r
z5u#4((`(Izc<Dzn_H5b=f*d2flJywVGhTA{CdT7_6AkhZWJo4^R;u6WhjBvIi4B<u
z9v>UQ<N?B!Dj`{0I2!kiHlurWv`E=$1@v?4M}NB&AkggPjf{??nIsj$(%WG%bJ+OX
zY=S*ax+Ha4v#2v|Av`c;-(|P=+-q?RzAFg_n}AOczSNjRero52E!L*HXULK|;cKuu
z)rl^aM|jVngXs4i3baj!<rn5@Nu2!N@pF>oNSE6Ye&5i~==~y`8zu7&zHZIMy~_(w
zFjbqkpUQZAlZ+%u7d~Uo(iPavcpWi}DquI3IZr>8an*|DVAmbR=U=g;I;V@kYp4}=
zbquDdCq_fUxlB$dwF554oOoXJ0GoVvzYp6CzPaal*W(BZW+%Wqu$Md8n2dtb+ZeZw
z?Zh@Yfv7hJRvuz=<@Y~C7KQ0xFP(xGU7@g{lij_YtwoEzGhW>2pO{;tNpiLf;MLO4
zgZ9yQl)CBgDJ=8eeWi#wCJ%82?Hbf?*l>(psYd-1F7hF(n&8rf$t+j6mur~O2Zs}n
z;O;xh#C~%l>xow~-<1~S*2`19#1ovJ`!#OT29`_K+Rmlce-o|Up+al4;$iO8Dwy);
z0v@VYr;1H%PCi?PSfT<c-uQ*r+I0mMXPMH8%a3DY?tL6T`yNyncEGk+H3ANC%n2fk
zuPY;<H<x94OnUho&0xN3f<4z2Z-{~8wt?Q`Z+y_oZ{R#yk^D$zyZqydbj5B3>aaQo
zeOEr@UMUNx+J`ypE^0td>nM|;tDo^byI=JDwW2@8&oD8w1jOC{@qs%PpkCn*-mkfW
z7jlXry~Ub-&KiXR_auo!D9>5UW?8*!cOdZLDXwRsKa}0yf|2=(a9qKL`cEk50vQ8V
z`hFaLDSHxjO#pWOUkYx&UZFU}n5!!1FfODKEDj!Ez6}{zdd-&Bm21&!Q8rZYFr&ot
z(=^U@`BfZRbP@amEs_6aKo;IEN5}Ik@W{adWcTiR=0IP7#b-`&gN?&*ez7t1ERFz`
z#rbUilZ?wS3i30SquEql?)Y@ZauC&mbfS<G$Co=xF1N$a>pTvUwIY6>-tu-WiqvKu
z%h0VQBA*0v%*?%r4!f-BPsR+|^kNMJSH!@jCF-=XOpiPaHy}1_reWXx4}G-X;Fwow
zShXY+GQECaOGXG9)uw^jzSk0eSH`)`UxMpD|3+)hl0LhA0Y!UW!~R)!(XY#xaqR7J
zOb)=%m~NPNM4uQuN&<I_SpG!Ham;B>MgKi@yxTu3bSXG3YE^iKH!E`4S=Ir=Vkzs&
zY;n%{Pm>Sb@rw`FF(jEn85k`*iyh-%VDHIBIBU^`Ue~XJt-d)?(0K(ZD!<@w)j5=2
zJ-{E8x23h)1mu#hJA8Tk1haZ;L9ougM5v<=O^?j!whwy5rQb(X=_3OwcgErK?P{bk
zUye@OVM!atEX85$&e%Piy_ffl=VVw1A}p*19GA;c8M$tJQo)$}x$OD>cpN)d7~&n*
z!Sr&v5*6kYawn##gVc5mG+BqD@5=<PYZUXO*vfI%+BUT5uno0}(56+v4Y1~AD9$bz
zOhbS9K=tU8P}S29m2rXK+s<~Xr;Uj;t(6n3k#f@>6=EMh0{3|eQ8-o+KbFgqfQ4Vc
zf4vSLzsQ0PJ^c`cD@r)&<!a9Sk0nW)xD&;zemIYwWlVIBtB}L;+0YOikN2IWXtpj{
zl+IkI0lYgdDrLOTIDfQ@7zg$bX5+?9)@0e**UVieWgd!n$&d3!q}6RE`imxV5AzMF
z{w{0Mv#pWy*ggwy8LG3pgdE+XFpzc+>jt&OAJM938af$y!8GYdtj%TF3&S?fO8E>t
z4IT}t@s@N`vJn`azKH(P8CW_a0!1=OEax4;YwB8(QGz}wt1|)>;TQ1r1W-L@LIRnS
zD?7lJ)O1Ya1<9TzHShJIuWKyk9li!?zy6P+^Ny?WedG9P@4fd_A!Mb_bKPXe*GN`&
z2-(TXK2l^QN=tGisfc7$=eh1AElDa#NQxGcR7fSi`}emOucOZMJoj^5pU?YU&e%A|
z;qfd}vb>M&2d`?=%i@!`G%*(X9w`y2#}=d}iS5sGeuMv@b~wM@l-vnsxw?f^5;yh)
z@LlZeS?UY-r>M~SS%diWhEgt1io!3)8Mr>^IczMpBhFi5aMGWA$UXE$Qk%Ai?K4MX
zRcSZw`=v-e%`zapw!wHl=oKnEUjV1&nf!xR1=7_mPiulVfVcAjppoo5B3OowJ-*<V
zum@+%OvS2SHn8Q_O{{eN12wIFXy|211Nz!Iaq>ofh-oXt>QrO$@2h<I`Wn7#x&^UR
zm7=lC^>)oQ8dJSqfzCryn)>w~^en#4M@i|Bb*Gi+G>uzOT#(OqY?OzdhTptf)=1XF
zpNP>HU-9FO#o)gv5ayNZku{0G;P?d%dKE2b;L|tod%%YM4x_-EWl}s7Dnaqm4A^Qf
zOJ(L*k)LwsuwHv4%StqIjk~VHmgXx^GgSxgj<+G=pK5f{?z>PpB^cbke&B3B=%8TZ
zH$3<x9H#hv#;~N7fR7x&!+IoI<`oJ<5|puK;36Nr>JGp8kq9&C4fNAn4Y#&l2lezE
zC~wxmBxjaKJ-&jg56|PnhD7m=QBL4D_YN19Z3GRY2NA;yBVgoS6<S^Y2K8>Llghu3
zadW;kO%#h@slg3M@0$UpTrNs$DAGhfA-ks-;s{GimX*B%+siblvFvYj6J3PHQL?nM
z&xnSsumwxiz5J#HS@3^0YM6?Ik6EWe#(X~kmNxxdV~HV%m4dkPvfXe`E(nj`nTF@t
z_jU9-b9zDX2NW)29rgID+{y3K6oRheg2D;#_&fW3<%jaEi*+#fLJenIG6ROhJccVR
z+Qi}ko1q0paUPq3xgp_opq}iC3u5KyzFs-%aZI0!PwfP!DI;L2Sd&WY7DG)vn-jH9
z;RSxC*@<#{peV|gxW`=w>;0_vu%v@uK72J6A7W?jPsb%)Q}v1I;x-h|)x+}ZzHqut
zjjZ*LX1@9_u1*KyxU!&HjMPYn+o9jUu(1v2WIctz-fN(JC=0@^Sq?2y6CZ!f$A^;3
z(A=*_PqX>Clj9i-xj6@RZZV}*qs*w~5*<!t^ha1vEaCV?Mf&XNAd+;<lKMNf;<$|;
zFl@5|mtlAsy_NbTrGNU+{r7T+JdzH3oo{2{s?X@LMi(nO?TO&%L;Sau<*yaXAo)!z
zc5AafP{LB7m){p0#oi6ZT=#?v?<&CZo&j$<-4Y#+j)IP=8K`l^jucn^<i71H#$Mxa
zB!9kR!ohZo`_znehShLbrV@0kcB1}@Kj6JX!u5Do@>>PwL@_%71Rnyq@220FkLnsX
zv+X_mj63*)FV{nNlPqoem5siWJNd>NW^~iHN)R~}XFIeCao&6zV!-+a$(J>0N6#>{
zylV{u)xTl*sVeY3n8_9EMDrbnmw3w|rkwb%0Q!#DkP90c!PBA>;?xwecGPwlGk+Ph
zIV$5F8y%8SQ48~C84>XpRmk0-2eL9F*t6}QtKPcDIHuA7v~mn+*{GA~xrZ^jPWPkN
zV?#J|Q=SgF*bD;aaZtVHDX2ew#`33gV8M;`V0lw4nHjYV56v1xzf85ITOP1%r1Mku
zE<2hzfkaUMK%Uy!7K2Kd4DnE_MX#(sm^{6L^W<}&ZA&bAUz(2NgP*|l%>ugp&?T&I
zcqja$mX7gq7obO3it1Du5=~_O^NTw`d}<?zBY*I7l=9)@=TFezd<gj-O(KeKkhD~q
zk<PFZ?8}!UL)P7aIagyrZ<i8H+7k=Ha#b2o*o9YI*?ejFanQMBN)G&HTpHssSZ4bL
zbNQK^+Dm)t;2eyGyRGPW;Xl~EO+fm~7NJF+44HOefH68}!<kH3+U~a==PZ7X>M#X&
z9dLkOr8}{4-xeJ8ycjg5$rHO<#bEkD6Wg!61<kk4AmXj4XwN0?Oq&+QpE9B&D=n$J
z&LOaN{{zk&d!Tf*Ep6Qz075}Aod0T0KK)Q4?Xh-vRIE(oqpH9)$d*VNAHnoEIpQ{a
z7{t9#f_mj3NsYBRJ$2zV+|V<jx7*I5UBD}-oGMSl_B-&$XP;!flPbxfr#a}mcqSZN
z!rp!7XECRYA&J|li$jy;NaZ34+UY3Mj{NQXm}TlX!C0Osm-Is7szKCG7|S`Fuj3aw
zf54h63UpSu8u-cXz*d)T{=Qur9DFbb3u}4Q=0icJhV?|cZor=Gx0pL)lu&#rL(;Zl
z8f?C#Mc1?I<-<Mp+-$R;4K??0X>%fITe)FYLjz`twMf$iMY?{vEiQE_fkT_MiNJ3G
zx7e0tKfSg1{4zxpA1*+CuMvH*MV0KjZc2M)r0Mrk1)}oc2`2p~PX(eA!ZGU;AnS2G
z=hSwB^SJKL-f7sw)7BXqHdW#Ov+hHdNnm-~NN8d1yBzm;Fr0B3Zi+2QmX0#2Cd>h^
zWmmbSp65U|*nsB6OVJW{Hixmc&+dr7%6l)c=C&rtkotx5pfmpiEGUw~kI&iO>;})P
z6cwW(V`cb#o`(MXSFlvpqe-?0ai$vElc$Mb?-9mGeinsg$2&k(b2qpTQwL-25tQfO
zf=yQ{&@cZaKlhOeS+hQcacLrP=EdW%%US`BIat!r)Q8}<It4#ytCAAUe<<zYhIW4^
zqIZfVbforjV<t=ka*X{ykA*|GvJ^_sTY<)7S>`79?l?O(=hEz+4R?n<L#KvLjBKrd
z=behgxReLst8xr2egn$kEF&NP6t+4U(z#K(q+L>qx2t+FCsUiMfBgsUN6zD<&jZl9
zz#e;d-D5jo9isVZ5dS;If+}4o!d=H>!1>ra96dD-4fTI<GA0H1oL8XL!yIvPk1;9a
zKj6b<Wvu^J$(5L}zGYn}-+gxsT#d7$ZVGb3M9FnL+nb3$hy|(j@Pp>sNKD=F2R{b2
zLC@DSD7|I?Jei}j?|3r0%SG^^ubRN)iz%nHz<>^%YJvBYOYr{UC9p|JkLLeQ#gH{J
zj1QwvG<RR;oE^ubOMVc)X;Tn{%^1sDXjqYQ|9CEWMlj|~y#OnJJ_qM%8DRC5W&H>A
zTw6tXyu~l(QhD>2ANLw@-^faAVVU>(=YJ*pmS~fKt^!!Ta6jx2-^G)gPT|$AHguAt
z<Ei97xT&NQHtZ4*>(_nIUJwEM2APwkybN6=2**n)8syS&3o_0@ngr>ofyg#OGJ6VR
zCC`)NeocLi^>(&YC}{zIqX!_+iOSCKn~#U9Ea`)D+O*}G2t`^IuF>1y@LN-5NMU>@
zcnv<!I$PhsC(j#uUI~H04&cTLBYGg>9tyO+bNuya9P#Na=AKcZ7g2^zT3QclqSa|x
zMK^l+F^_Yv8An?zXvjESsEPl}kF_!*Hph&~L@iw+x)cnShwFsqx$<;kydL?%&L#D+
z`HWLK2X0mwQpeCOD2nakPQS`U?_=^{ct4EyvI^tHzov8b+k3g%1B>9h(PtdeU4?E3
zjQDni5}aFPMU?}tqOI)?nENLUoodd4=%6ai-9C{Myj+v5e~3LJRye_d+eXwwWP}cL
zjbP8_tB8zcplNLk+h#oguTQC5Y$wZ)_QybnUKifjCQWj)UZcijIa13UYt?sa**PSG
zt;<7TOn^2d98AR}A*y6zK{zhnm5PeLlKDX!4T#xrbsCVe8Qi=wC804bAXh9)vg9%_
z_o*Q#3h@Tj?7^tt+5jhejcMeB`!Lt%B=(;F2X`&l-aF$xwjJC7OB+~cdy6bA9-%`Z
zelM1*C3Cx(>)5%W9nwEN;+AW%UfBX0{^fX8;{4_}1QgF^JIOLInXN@5&KuyY#2(yV
zya3%+Kjy09@1Wo};kw-wpmOtjuvUGH#myqv`t<?Y9&5tsRT|`pl`*k0W7qJs*Zg%G
z4PxwZ5oXCr)7HhGd2e|FhwK=eSyIeL{2mG0O#Z{R54&)^>Jw<}8^YM>al)xf)adOi
zDX@b%cgK%p_pA*?yyDX^P)`x!S{;fRK~EusIh4u+*nKwY66mzblTAbGAmW4x-4k{Z
zT(12GPgL0VX>=0a8Ej78O^QLmU*_Xz{|2T_`sfzgB*|kli}H7mxFy?|OW+i9+qizf
zQ|~)K;Lz`q(Dnz0cvs`>uzHMGW{RS8NB-iV+nCcQLH($UAbPcquatd{VKXQ2%Xb^V
zqL=^-&mBbbzfOfjpB8Ld{u0f)rO14#T9%Kg2GP8w+%6>}2-+?K^)YkNFnWMD8hD2T
z_lBa_szD;C`<Oj(mL5sUs>G$^%VFqPX|mkn0Bm8*6*@ZyyH6@p*&!~Fdaw-Ye3oK|
zAc0@LXE{cGVXRn7iO{zq0#bi{fGICEG1>MMS0Z;7=3M>)K}F|r&Fx!Qzw@|Ipr#MK
zSJ>Rce<s>~GlIG0#^ivLJXt^76`u|nz_4?Z_<F<fTu~LvYZrQR&PUk%@tQ7%tZU$o
z#j*2^iV3lMbRH&UR)bibo#{W`<(g*~L7^-U(boeQGkg?&V=f=>kT9ezq4=&!ibUt-
zq3x8r=&*~m_099qQuBqdIQ$uZa+)1I7GXul3>*SEB1OKbOOb88SD}9MV!nIYB*xR0
zrShyR+dX{{)vfS?<|FL>r_c7Y?JYQaRTq}89E%Q1KJ(_2igD;D8#2x$6O%UHgT)Io
zvBNu!zp&^qirRvBQTIe)?7s#GXlduuG)r)OQ6A(TohMX&(|{FYRLR=;@o0N!9t_E?
zhItL_tmF}oYuPn8b&-IKy<kVSm<vGlV-I>}mEpjz7a+1wfvhJ*T<tx!YwfMYu&xjY
zvk`L>x=L{D8AIw@&X~l1e#7^Dw)C~OJ!${(7)MPggV(opXjt79h;BH?nJag(zFIWD
zGtZtpd|3dU<Gk@D%a4iw&gK?(vU~Z&81Bj4TloKXL%$M^kF9Qi>W|jwt6IbL%#_0;
zV%7y%Je$whTnE#(X2N@O3p!`Q1GqndV{YofsJ`wKI;}0`FUOh@!$JZw<2A?>uMkLZ
z=z|$ArD^WgM9Ig@d*C2_0vf+kJltnZb0Z8Tj$`C$^0jK-I$<o%YCZ|2hqj<p@;me!
ze-f3y=Yz4=1Bh500PgFiGOkWOnl=8wUq&x+mE1jyo%k1gPRY^r%;yv8sYjzrlX&&s
zWQeHR1-@5Sf!KBrq&+jCokP8GBfF1V{+i8uytL&T-i2ap(M{O7N`XvpI)Noxg^+%s
znJYSJMyzdrG52Q+%<w42$;AppeBiyLeE)v#Ro7E=8g`w(zh@0@$d#qJySE6pGvAeq
z^CSL@-B{S3@dWpOYQXfrx-hJYIn5Kk!I|(tn0)dcuGlS4dJbIVy9XUWO}DGuSNmr4
zSiTPqE5Cydw=h2O$`kYyJw@fOj0Ko_#bxD7=AQfiPE)CbbCaAv(;XU6<9(Jl-P(_X
z7&k2U=nO8mdJNXHKJUcQszgp0i~^%X*ZbKMaL&B<FkmH3+ioO+GCYSQ)@vD4L%@5*
z4V<>$l=|)S1{Ky{vMDhnew}N;xT*rqJC&oG!ck7?ksUETr-?9WDEOqX{f}?}tA5S`
z&1G_YPv%DO9sLpi^Ei)6x@`ZDu#|Uk(!i|Q@eqIE5s<ZZG?OvKSF=5JZ-XPgeGrU$
zihqEmWrpx(haU0C*$dYeSrZTM?a-;#iFMKDbe}|%Y<T?~w`n(HwQ(n|S@;y2gU=vn
zCW7;oX{dN2h0D0EKt(Z`+2)VF!$9yq#@vg1U@TP%L1J^dDLNP?RHdMkN-}5dQin4q
zoPa%r58(M5#<r+t?#$Jm`1%#gozR7tJI4gtBIiTztUHkF{*)W%#+>CZkhct4!|^5=
zn7r^b8cw;+nMNq1^!P2f={|GPm`;I;7Xsqm634Zsjsds#ap2Z>8+YO@Ji~HL-D8E=
zeR&V_g%eDOzlz!HJ1rhni4Hq=K;gGyn3FEV>k(>nJ7ez)>}G&vZGq5Wh7$SqMUF(O
z^n-(f0X<YEO+sr=vTL01humdJ_3tT|c=!)KEl?u*<DWtwd%XKC&cr#>8EePnFZZmd
z5bK<@8MoaF>XpJIUYm|Vd5JUH`)JWZowv}QA%VE9;SjLaA9T+ef{Ro$FF9EQBH>lu
zarkmD)Z7kU2lY^VtD8^W^cTy<bz$6WFX%8#;l;LD5`m9hmSp>4j4joqQcB9?<IbHZ
zccTjn{X${5nLW1M@JI9Z5~#E<0<|Vx8noUS0^X~Gl|dKgw#x}~7c0@=c4-n>UI5NM
zGjUuL3nsN%fRp8T-t52wJg`ue7^*zR)2q&)(FRkpNbW0m3})<(z@M;PON%6ApG3pv
z_x!;gSJpXg!PhbD=T#?712fuiZ4K+jrIhp2PQC|s@hI5${0xR}Edj}$68P6DO?o^Z
z@;{!b6VZ%kl8|_F_U@%fl>(LNg4wzFgY{0sFI$q?GgZi~&C;~vwinAbo#hrB(Ln#V
zov8Y6HdH@!#HnoV=#3KL%AW%;fw}$ESGK~!M028S5)6gXsW4s^VX1r>T(GYMIp*=s
zGu?w->5MO1@ewmD^r?b_2?=ETOzH3uSXyI5jS5-kdw2vd-hEUe8|n#8A0&MGAvNkd
zmPd#4JTH}EK%4{rLOfKk{;vYLR#k+`jCUEWdYg9&Gl4SKJ6Q2bh2ALngX3p~fu+hN
zs0o+Bi1~I<ZoHTCvwY46i9#VDK994^|0((VR*|^wdx|CXmtfdyZ4zoR1+|BoQ<sa+
zd2bD4F6to5r7yLhQ*2DZ>a+=+E-)d7ZZYO=l{wL4J=HO>{_yxKW1q5TpP=S`c0?s}
zlMGGgx@9;2zqig8uD=V9M%a-rF@3=07}15CCehGuMsMA0?$07~vhcbb`K8?o;>j_R
z$-hl$Sc5Vjvn&J2T{GISSf7**R)>5qfP&LT#K&+l^hdFOf1x$4HZKSF=4+g0pbBRz
zeFi4ju;)nJFqB=Rg|f|NAUOX8JN>fZ$7LHLz3n%6=+EQ3-DcyJrK)6ajuZ(kivvI5
z5SY-RPfnlGrEjDA;oCoLl9=lRsUbWp`0bCg_9&veMJ6AYy$nPWKU9=0<~>4UIG1rk
zC=T$3J6`IP`%j9T5*@+RC4u0!B33f({yWrP&hlkzO0z#EKY~a4JjQM-fvlE1UL^C6
zlOErUp-G+a*0=>S%I08p=6wwLZiF3)@A+n5bDI6Q0X0vCV*O-&e&JX(dbYX%=RGne
z+QBujZEyiRlzxX{p-J%B><^|do5XW$Ml1cR3C_+?C#i1FA^*7*Oh|u$ZsnsSQ2ZYn
zTIcZ_5{<|Q7s`0Pv+&>-4G3^O&mEjR6<;n;AwOfK$caKJI%bAEgif4~BIyY*c9cH3
z@mqv`l7;-C#ccQJJW_bK&Y0#bybnJ38GS#=!yLbbm|`PGZ!j-Ze%mAnu4c2$Ei6~>
zrOP>XXris*1$cY562>R`V0VNpPV730f_V!3r1{#^>rF6ZC~d?Z`4gOF!6bAz=77m7
zjljN8gES<V60hK4P}7phOPpnCkI7ZuXRr$F^7x8{#-UidW-ylLDRI{$VsRd0KV!5q
zbAp_P^lQencFI^RYjecAO$t;Tk|_~PsLA%d^M>0duSt(vk3&I!g3#eaG%q-y#n1Wh
z3x=<F4+Z`@q$*?^-Y*S<eHw<O-OB@4G3QQlnFJ^1KEc2_IjBA-AN{ySZhNF5X+5tC
zH)0gYvzhiJdC5k&n_^63&C>zz$f9>Z1YBm^n>A)^CmE>*+d3wrw*}>Ijc&)l)C&+9
z&Yn}HM`6vle5haYn;XcBhUZe&G~n`FP=4PH2bb-Fj&GVUhxPAvE-;}<!x@)WiJiR;
zGv@QdCJZ?9nOmP|L<5$b1h=Qbyht!ZqBye#YujI<LyZY|mrdbB&l)*_(GmW;D?4wh
zsgeCrz2Gaz;}RBjqv!2a7`^s0zb)K`w&pzHBRBWLrVmv(D|H!$zGb`q>sEC2Wya~s
z$>M6d+W7X*9gwe}1QD-dK-sGV{QlF$OV2gP-$OjxB|pU5{S7eZ<y#!~JPB{7wV}SF
zA0{gob8o8HJu_elgjxIZf;*<J<{$K^-`kN~)8in#{aJ{EKeK(0D3$9Bm%x5e87}f|
zgTJB5WW9wHP0as-9lwLQpt!S;xc3>``#$A@0%yWU-!r&J<2;7gNjR_22$W%w53BTx
z_;;EtNnb>HPjL|}y2;+5*G?0v-_OGpm-T7Qo({fVbr=|jpFsB)Voos8gZDF=hNt3X
zD7DJOew9h!1&0yl+Ca{%*C74q38v)O5cf|xtebIxxgWEIJ6|x*l<j)->*>L!xz<#_
z<|d>o4{!xT6H(4ejpS^qgqxq$Ncuo1C!=_WIbEm06=!=gB|R0={qOU__v~|+p5wdM
z9)aTgGJZwuAS%+iE!?g&fLcygbZD{-?KroI8||q@Wq&xqkfJcuoNyHdtS7!){0J{?
zH74l}($q!vG9=VBLi(j(&d@#q@<*{ARMbXp&+mUQJ>w~k-@r@;_nYukJIiP7su$kf
zEJfde9u*#W3)2Uyl0Al<pxu8Ds<)M)=VN#5XfmXao)lr&Z{)5Nu`J;6Wx}XPZCbzf
zj&Sc{0S!piqbKiZ6XlUVVY7lRS@~=!h71kiGeRgt+%=@vSclQAQ=d=u`h;a^A{2<T
zT{0|!8CNzIIxiIio%kKIY_?+fH35-fGt>Nmbi|ltX#ThyX8Wb#QXxCD3zg^snO0mX
z_W`}x>@hLf8u=wXC>~%uQh8gh-sdrQ{2SXfuDT9(qwm4rmseSSpY?Q3>5(dT3n)=F
zpnLWyQg!t|5N)0eamF^Nt&tCf{!Os%2+w%@_O$WGTy(1Fz%g~Z;LF-Oux9XCj9AG!
zeEKXGmcAYxPYcmepbdhaOQ3$d0(*`fg4=$Nfq)SWTpkVsHP68NN0vnCtRZzbwxthe
z_Tv86`H(-c0>>YdrM17Nfk!|aKW+OA9Q;FvjPoc(%VEQ~1-8sbHg5#VmRDj}su3gv
z9mlC*rlc<W3Ajnw3w_Rn!qO{jcU7Vf4$uCgV!=z^H|;2I`27bzYvL>r{>+7vc_!57
z@@v#BoQQ%mm$~69ROu80#`|Ku4QCJLm-!oo-B*Xgulw`S%XkHjVI1|!aSiCOyPun!
zsz5|rnq5OiZv~OPnD6u;SSbA-qrYpxsSm;MYx!%~?aDZ*MGBbyW*PT$NC7Tz&<1De
zY;b4&7Z0y_y!D+a$jXLPwN;EudIjW*)prnuw@XI92}k4nDtK)tO}wHEaI=95k$n>a
zxorOO<;ozM>8MN=8|hIGZW<J~8)1SZ1CP5Dz=Pfwc$e+<bB$+k4YiL^ID9#p9J`Kq
zss%?XYEkj-t-R&LI{wAcSZw1W0lMu$+}e!MKT9~6-Heuh)$!xi99VZqgSJ`6qU{)E
zT;IL|D{`-6Q2HvU*Z6=3-ILjMLQu4bmsqXRBszww<jcD_xa4{QePTDD*ZW!Evr`*P
zck4lH&j5<At8&H<)4}4K0+}>djSf2e7IWP1;Mn{$EWMQo3udl@!e^{U`_l_1r!s!*
zhbt1*6E^VXswoxK7V#Z<{=8ypH($MZ6h``IvHKz08SR_RE&Xjm3syxVtkuKQ`~G0n
zRSk5X^N9<*G9R<6bm=H-HUaIjrk>|t!QuW4{G=^Q%L63b;#utcXY-Pu>(6GMf%l-d
zEggOC=z&0-l=;#*4;72AK|JgGl>JwaKkY2Z0>c;B8g>Vlp9+G2(SzB3w_Vtn8Gs#Q
z6L=So02B=vNZf7M9(`gMsy#o7Rtv1iUgj-#G`#@==`q5biT~iq{s-u<XHV<Km4e5F
zQr<JV3_e0Bbb3$1na@73U8=8SW=I&!3e$r6$^l8<w@<KX$|&?am<`^G0{O6ch7f6W
z6}`>OFgJGxge)|{<r@gJY6NkTi(;6e(1V7fIyqMbbJEQ65MjakT=cItKDtsGa#xIz
zEcl%aIv3LLw7L=rk@|z57+*Rp=_2b6+0ti6b!lpqI(<6Efz;b<g&BpKBrVX0lwN#?
zUpHjpO8-dwd57^RpV*SZc_HAI>I1c-w4s(*<HuQdaB1FKoD;YVQ#x;APVP)}`1cS$
zsMr&4-)dpWGBGqh{fJ8+-2sh2ed4vZj|*#);rz?mpkud$7rT6y%o}tQd!9Lg-H>jq
z?@HrkV+>$)f;ySDItTW|y@Xm#M+gcS3dMbPP`~96jsgjcTxCpJtLpixrt2`JVJDbL
z8In!D&p;9$fYvMzw$;|0x;{+9n{%(C+ou|dp|XTOzu$!9OqHY2`#8)ljl`S{>@}$R
zJ7@4hp5(^(NCs9Uqh9!P<_Tc?tvo9#AMq0UM(dM}hbu65asaHaNyj54DL7{HC=kWC
zahG{%vPZcZBKGgYW#$fa{pkB(d3h96?yLdjlp<I<B^>ICWatck=G>n0A9K8Sf#1GF
ze3B%fv-FuKB{qOB_ig3$3gxI#=v@@QZ4genQUSIl?3^2aSmIvn2EJR*!+@zOwd`ES
zJ>^(;N$U#k(96Ipj2AZ|N0%%#QY5|=rJP&SQ7AT;0xg#esl%sE+|ySL(kVxA;iM>h
z!uAUu!;Ns{WP6sUZos<Tm5}><m9TkJA>8}ILVy+1aUhT}`!slo(?31_c+P1M{AVLs
z>dL$XKKeq<pbeb2a1=K@xD|5GkL8w1-Ghn$tjNyI>cn-zKTN%#Pk(N*CkOAX0*8`^
z=rT>29@?x*jcvmL85?8BplN7y$B2Z_)+0_g6EN(H6|HYH1>+wTP(Qm_!e-@+Rh<a$
zMyb){JI7GKeR8e8dPeAcvLBZeFbCf3^}Jg|i_kabB=0`36T2?kk;|J5$Zf}Vxc%WL
zI9vQ<e(GUx)j)?Xzc~Rtul|A#S9h$tZ9`R$S;3=;iZs{tr381oLFNZ5vbs}-WO%N{
zume53uOGYaf23!NwwxEL?vkfB({*Y2(Jb!2LmFgypDBrDjGZ$_W$;wWZ{SPfF#S<9
zFLgmcimjq~7+Z-IFWA23@=CsEb0qXnKF;Q=Z#cJi&Y-7ZO!9~aCTeov5EII`t)2pD
zLzsVeqc!dQmI`B^wXuCz8w4HP1Z|gABCXLOJ(};h9#aw6$z((R%iq{PjpcdT)^c7a
z+HkeE5u25rfWt3e!;h&0sM&6T;C%|ti*BImTnq5_(xshYPasLj0q5S+pljl<z?|YZ
z^c!o*8(!{5ey$<eWBmY~I!yQ)<wUN0bu@Z7I`fVKdH5VJO^T;)ge_SW_$&v}duXNv
zma%-!rFSgDw-(-vQ6j32HvmR3&b-Tee7abK%@s|Ua)fbpb%P;9IRGse<OmaHK7qY6
zHEHqo_nfcUc|Lf)HSynUM+cf)kW5!6XK5o=JXEFo$|Eq~?G0{R)m_xxFAevr0>Bb(
zNxF|Lf`0L43~@1o^z{k+Y9BK)!$N^EzhsGc**y5h{1#OUHsbW7Jl2k#hY^=l7-QlV
zU$|}>`gM-rGt6?KI_Ej)YYxVpGgy!0+XJCnloy0;PU93)bGTyPd|uRK#Y_A2;{E6(
zD16xi0u^n%lq5&Qt6xcK)|}xThnmvamKLOH!h9TacP}=V+=P<y?~rE8x_7y+@VDw+
zJZHiDAJ<QC7Qv<XyHtjloc)ZwjAgxx_3Haqo1?FK6JKw&pWF7r1_kncuG*@Mi8J9l
ztg}%kryftmnJGU(zBLyvICsL}OdB%s+aMB_FqvOZ-#~hv9Vfc$!%uopjlG)tFso}2
zB$Rw)uJsw1G4ms+&F;peg{hcJbhuk1M}TnZ8Su3o%9!KJ@cv*iHoiN<7z<I{ST^s8
z9d{l6{$Nb+m9e~njWn%3{u6pNt!Ve1gDeBBOUJB`gXA;jG<I($L<~9)BPwh0a8x}`
z{ZBx=2fyN~tutX>i#+2I1@Of`-f=$+^ysK~6`Fpcp7XSS4ubk?`0-9YI-Fz9tdKPP
zrQC{9cdUu!qse?_&`B6Ko@KUoThms#1TJ~}aX!jB71MedYtL#HitHQt)VF?^u;2rH
zJEB1<t16-6`4xQTWKMd%N1*8!ZS1&r1-?itQQ3z#p|~Xvb*gHyVRkHvI42bBcXAyz
zU4t~eG9)Y1zhlLQ&sY^A%a|%Rpizl+2{#Ucys@s>+~kZF7p16AWCQA+>t^|U8_qFm
zGwiw10kceWX<<wOe%x9FZBuSRg|8wJcjfRADGGR{!HhKDaKz@<>eNqVJtx`C7zR#k
z9uVk=wr{!^|7$(>jGE9QmWA>!Ta3e9zCp$Y#*~nH3mey{)8x~IT=eB7s8ZaGzbuF0
ztXOGkwKfw!j8vlaZ=|8STowvd-$3MskFc-9faL#Rf2O2M^Ol>^v@a2;nKll*PXO08
zJQ#Lsn$TD;<|*^Mie@gl)H__4u0NcIw-ff`5bOI;zG*IOQ|*TKx-9tlP>o!03CCll
zazs;08pb$$!|37Gc;`P0;x|%@d!l7Rf`)6NdPpW%c^K3EnV(?bb~2bfmL++AZ(?m~
zEDC2y@Eg%2e+P7FVuk@N(eHyXPo!X@n}BRi(I&-)MmYAaKGk=B16TjblU=^2z@@{T
zJu6~dq_tmycjZ}L)Vzup_&(*!7k<K|{rMQZ{v>CuPy^0UpCE6|8T|N9pMKaZOAh~I
zPPnsCP+uM)EIDx%eJ;m>24fD7`L-UWM_STmbuId&R);1m4F=KnREdV18By}EAw`TK
zZhdw(Zc5dp^OqdQoXxt_>$^Ox-XcY7*4*PQOHRWS#xi?h7>j}jVcD51e=fSzBD7{*
z*yQoL=;SF6vMk>s+FK-PWByD3Z>$S($O;MryFsw34K-QbP%=FhO%G{d2xGYp(@-Ra
zuj<h)ObOEmsdHTcrevv8CzL*E!P|;`Ac%jFEg8m`{^s@U@AZJzXm&<#5)fHi8_;po
zC;$DirumHD_jdaLXucZD&a_4xGoXX*_7s@E`WeoweGJhh7dW+y81yf!fLr<pV831-
zERU0?4||dzf9EJD50ByOgW0+2=pv|WQf9o_RLHuhfdfmsaSQIn?m81HSTiR(^RyKS
z<BiZF!;DNjn$I}2x}>;&5ENXl!8QH=puB4mq$`>80k)3Nv12&=Za1ToYzx49oGI##
z(!sqGj$?;Q3TLE$3f(UJkPOX`r<+!Uz+$#r6>s{(oN}l6E%FB;GqnV*WfZCHnOP_!
zox^rbwcN#%o!I#qSQd2`=IT@a@XPm5<vJOg-X@}P@KZ>9b{sBUS0yVIrr=ji6*6o8
zB(zXGjmBj|P}P4G{An^KTbOgi<-tnE8&Q$`%qhj8x8%uGe<hOOp99`MJB9Ld)Tnrm
z2Ra|`L}$|oERpMkfW%ucT<H^tFbPt%5n{sBnA@`pWlGsTV~Y+I8IA)%qrJ<p+9+IA
zU`TenVL7hf9Q@aM5ht<EvEasZDBfMj#qawA$x2#av`&_E?Ngxp3rk^9>MsZ@-wxHO
z?&zp0kF!597PWK*j6*$Q$w^V;(LdpB^(p8xmS8r!C!BD4i<6cdL^lg#EPJ3zqq<LH
z#mbLpvAYQ451fI3`s>_$mdTJ#iNbhxe=``QL+%HdLD)?t#wz|KBpXDi9exQV?>~Tw
zB|E!WMe+jkC{XKQUSRCta(sGWD&q}WDlg-1*T_>pze?@~>q)!dFTSTGm7CN44^kh!
zf-wKNoZpo7T%g=N@U*OhbcaOV!A=Du-u_1OK_5V~GXih?Fr%IRzPRoE9K5?xgflgM
zVDGRh$nE>W_JDIC@Q^L`_;jL*f+Y!kH4i;o8rU3|ofod(<W_#PryKIK@$kg|SWn}M
zM8?#fsNArpmTGOng6&l(nsNre3=~7skKLHQ=PuVaB^yVjY0-y?9S|lk1iuAaIm;j4
zgg0lG;?fE?tWuLl+Z2}N3je}QU#CD6x^Ls#@%P|Ki!p7LoaP>`RHxI`Woby1nAf?Y
zKpG<yG4^N$)C@V#jj3dvtVn%g(Iihh6rc0z7q6hZdOyG9nHa--(m2QY4)j#sAhex$
z9bXMLq4Is7Fd+RSKc+bd{O0L`TgF3)|H1)yl4?$yKI;&_CEq!(FE@B-sY16Glc1zO
z9&%4x3ZJ>FP(#l)e7G_X@{V1Ee*A#q*H<NlvN!RR1qH8KM~ujy4uXggAl(%Y<DJIi
zVYy!THS`H{s<NzxQx>-Fn*!pyD&X|cjmwBrq2tB%;1y|x3gbd>tp<DdKsjn%`x6~D
zJmdE|@n{*fgj?|Y3ha5oxU(lI)Q3)ltDGzuGTW5u_j%%E1ue2?t~&kJXh#+WjzQ;3
zHz4LaV~yFF)BLy0DYrTnTIW;Fe{mNCm@&@eJ!@V&uNsETX$Q;wArgV+bFPT>G#lU4
z;obUHRQ;lkhhupNY)pcocG_fZm=g0)f8oq*Gf}oK1YAlZ`M`QDDrlO)pMED#wt2Qd
z<#)ybIo5{($$fZgdo~omQlPnqXK`NV_CV`L4e))Y42yni(biFGVb{ZI%)Ob)r#)nu
z9l;vrir`T<W;oP78bqrHJ%|0872v752F>kGVOWI|7xru<U!<Z+Rt>i&nHy#4tmTSW
z;ravDCu!4<ZOoItUqBB>x1i;6Q%H$WC%+g=C^*uV3{(AsaW6$6o!1W&T2jz|Z5EVW
z{RSTMwsW(}<)Loub-d7d0~Ga-!>RO3u*H&PNM|Hw_dd&q!@JId*U^pO^ko7#(K~#@
z=|SX#tSa4XtVxHjz5*vo8LMxrJ@r0e#MLlI{Q*7ZM6Q~~bucbMM#c!n;AsQLCTUFf
z9El%>AIDld6S^VNoE$tb0<Sme(ZObR<iipz>Zlcfaq|@*r0XPjR`<iMKd;f4w1aQP
z8!q4U6kOh?PadxBfnvoXPV;pc=WnnMqgS8j(!z}BjYEIYdx04@aAX(mRkR>3hS7XN
zKoRrLHiEac6517X!7#=~C=V6Dh?y3o%=0t$Yu2LaB2O5IvcudYSIN0tmLWK3P5DV?
z<nQKN=sEQcCI$;2cT*e}swYDQ;x3`<R3myYbOMN$3Wa6*jPH;!4(kh&IM;h({PKb2
zzC4d)A6z*BHAm<2Z~v8oxB5D6>r5ST{7xe#9R3A~$qrcAp-;PY4Pj;Ceykrdne&cY
z!9A8Tp{wjI$((wGKe~EE-?9ZY&Z*F>=r5Q&JevQ)*!`X?w|&|`hV<{*h$`wQv0YA`
ziXv--f&@8<Y&Bzsmv6_wk~T2SOTk~~PXY}pKvSPvko$E$_vn)r$v-p%1pZO{k{Qyp
zSoZ>^8-#ETH78h>A(P+Eav6iXwdjoPb~K0k3FVWofd2dEc<-bh5sfcLts!dk*-ur{
z-qyhSgd>C=C8hZCnhv$xx1S$>;}W`wjrp<}4(K6ZyY;j$(82ae#Ooqz){p0(1<DYC
z=^RLpHbeE<Pau~2!{&y!aOUstSR8i*4ouV~V|vta2Y$yeE}56{R3QHEnqWZx3Sh7;
zt!fwru>%cgYtVwBPBkzzOpf~Ms$s~rGr0NpW!%61D9*JzhKrLJzv8_zNgmz6@6P{>
zHG9Ij$BnG-#Q09W-FNZ!r!tf!dUVf|x6mQJ#vM;$o(lOG{*06e%Fni7=?un1FmL5-
zF9$=i?hP(0x)Z&-)cKy%=eSW;0vfMA9#^G&!;s1S{1rWGB2qV?&4ZuA@##f)t4)SJ
z-208~37GdQfz28BThdbspYfydPxv*z4;m9+U|oR{mEN!kR~gHY7q|3Dw2?A8JYbCJ
zohOBX8h<e9W)oO$3X<$;x4~0|aWJX21<o-xX0Pu99IDH>U`u<T=Up;Cz&(VSclz+B
zt2CLNW=(FDJVKlOS_EeNhnuY!n=M>{zE`s%R;hBN;hYlb(F)-e)r8>lcO1m6ZbOe1
zg*?1F2+rw|kX|oOZKprLx57G*{WTi)F0dzg>pS6Jx;~j(BOtFErOBDMKk;DgO3;7X
z3^zw9(|s2W$TZ!@;BqCOtI|;bgV0E<Tig#@3e91<QVr_0TM*eI4R8uE<p0x0bS$t3
zeMYoBJAwILf?1|jl!B6Bi~;_fIfOT)plGKBhRrnw!Ho^V?&aF}WS$&xpBm1Ey^RMa
ze*y2UX2quqtT<x@3T?CEp!jMMw;)81%8xLllJ*#|4PhOTdDYqHkDbGZy*03<&kXk*
zj(}ZJ6EI+AC00LA1g8!K8g}b5zst!0=LG0cIVBTP=$i)GcUj&o;WOtn`Y2ze^#<2R
z9masH``qehO62l?tcURL24oHFL-pEw7*IAC3eO!wlrT?1YCMXp>axX7`e0CC!sf%_
z;C9%Q_r9?mOpDy%nv)d?-pP6e!){`r-ClGG^5Q0$crhMdfJ8RR1Kd8C;QE#nu*;}|
zy+!&Yz2qLh(qk?Lp6iG4QJs*d9gQ89R=jG(MwoQI3q?P_3F9g(n5$tJ=jT$(W!aT-
z<q^y!w)G6e+-k+%iOh4&&J~CAL*VX;7!*4U13|)f?uT^*J`YkLZ)&gMjE|;N%nP~V
zlvCW<+EQ%&`<FXiph}Jla^X@qMeld=tRD)T*UtycO`8Pn|1NPpW708b)^c<Si3LNI
zJkC^iKWK(O=lnhi=*+KwA*NfM_;!wl{Bts}=RzZFd)fy&hnfHI@olaH&FH<2Js5EL
zIPYi>1nGO?cne>B;x<+q?HRi{c}4=>F-k_Qr6(}@=z0vmyPUqvc^F<*2C1`7!YQ%~
z6*kI{p>qW^p(qEMr^G_ewsxFQDNmA$6{*7`OYC`D#JP>g5+3(u*P&3F?wD>u+#k1a
zd58Z&)?OPJUhn~8yZhkfKLK&Iu_eRrsnF&HB^c)H!v!g?0Gk)baNzI@NIiFt<@L1i
z<{vwHFpqgk*4U$9mygTvt<?~);50ZbosLG~2DIz4CK+&g0Ym$>X-DErZjOr;4bfC(
z=c+%PPk<J+o-r9cujJ#cof@$8&jVOE<RwOefZ`8ZtR0XA$;ncvT0RO@qa4A;z?y;J
zf<PEt4V9~U;HhH>9$2kF+`hO=Za*%=bPZ>&?A}L^u1^HjDf_@%)FC`^zZBDcN|7xc
zcX7z<mtdQC5)Nu9(cb-CAbBSS7gbq!7<m-9Oy+CZkk6M#JmcU0s>BVe5iPZkN$OLt
zae_ouNbJ(Z_?mCv?Gl1wjr&}5S^@XdTZv5gElX4v+tY2YwxV(w%L<(D;c^UKqSwVR
z1ewPevbPWOhKHeVdo|~{uN6gm(z!<OKd4;Q38R{2spzH|U%K`rh^2LDj}hx5f4>gt
zKhwEA9s|%jnEl-DF%H$+hiLSg&7^XQc+D5@xS<ACv`yXz5-elDaEA-7@@4!X?HD#L
zRiwN0zoHRy5xBh$5iXA#jdg-FILg@&ANM1mSu=>Mk9^B%`X1u5zp<V;9|(`t;xO!}
zfZtN6i_PIr;qe_iQtmj2&-w2G*iAH{4+!G|eeB8F#&)}ogUhh9XA%aV*CDcVy0BPv
zELM#i54&cX!$jW;n9X_{Dtqimyjc>w%ra$D097t`)dFG1X6Eivlp+zg_ky%iJU9#v
zMUll3UUx%1`dsbA;<tv-+U5(}UNbMikNfy;s6OewFdD~gd4k@tl*@|Q0(C>yq2If+
zP#rS>m1g#|zWgX&PP>OuIa<`GNQ-{k6@$x7!l6OhmUbL%=LBq5<Efel9lt+w4k~6;
zbX5!Myv1-cuNkujD*17yY9wo<G;ZF-{AAHTdGG7JC}mcJcfVMYP1|gk`*R*Q0Ofd<
zIm6?sl&Rw%#(1ta;~e|U;ib0)>DP<L5&!Aa`kT|Z5ivSsnsqa*9&b#&Z@l8_<AS*2
z{-^vezg#pmP^1lgpYi9<^O!zuEga0Vr!9;{aeA*hX&=-B(jAOB)!Bs$MtsM@Z{^_Q
z!Dg|BchQf1->bf9;ti!Uc<ch(|BhH8vGk0=wsFiau<ko}g*BpIcLMjE?a&<0C}2`;
zDJYD!qh^cENqvet%6*a})t45qJ+%oKW_;q>#@~dRoafxrLgp^JQpcRo`8a)|DV^hc
z4{mCtqO!tL#wsub6UOH5=)4ARhp?{GtHu2CKU;9hvq|WANT2R9jzAZcGH!XrYUFP<
zVn=`j_dCUg%3rF6w8l)FSIqXk>Kx|Xl%fCil;R?dr?B%$1DlU{W3K54tP(3xfsvW(
zf{PuD2P8`qz759WX}KsftqC9d+tN9_2%cu^k&flse3hXX*1ckGq_3Tr*qM%xrGKFy
z&0P|im5rx|Sd*e`E85z03-Wi22I(>J*z<uo<NluGAJBgEJo1t`rN={#_Z9eakMVkU
zNKpeFBQm!642D-}P`?&MTE6iMw@qJ)mTHH<=qMY;2a4pbHW`vFzVf(E#NKC+G1t{1
zIG4~L{HyIxvC3mP&Z@Z$qeigoZ`Ey~N1Hn@Hkt~4>3*F3`#~gb)_>?d<DO7+bPl$&
zd#SXRFFNJRF&6#+XW4yUGVh5BA*=`C@E{U8(jA~UFqzBh%;CK2Jh_)=Rq0*7Qy6_B
z5TaKVa>LUaq24bJX0cA9Q&A4aU%U?qN6uloB$$U8%(eBt0P41VhG#Kpc*w?>2v+>a
zPAX8O9U;3QT3o;_ye~_Xg4vuw-G|MPv-lge%opFD2=UKy!E&fI)RZYwm1So1;Bjeq
zd+{Ic%G1W&;2xxeb#%Q{X-M2#eq}-xn2ULct!=@qNfV)dm@&)e%fp417Fb=#_9`>u
zCFk6j%TZm#3+xnxe6<N(<m-fE$D5EDtJTP$buU=f_zbsZkQS9neu_7f1F^z1ndPqK
ziKpgXbY$~Ir=~_ueRvBvMYD6|FdNBzlNGoncm^ys)h2%8t+2B=0ZVeip#~pt>n<kX
z#g9CC@PqlryJoo3P=qUD<w(;`JDNWJJ$G%R6_Gx62ez$AhMt$MVCjEO@-W|=zLOQB
zi_s5$$0l`B_~IHwKCEJHkO0ng9lJ-q5b}QSPs1eLp%^lMI*xrKMXqjsgPw7bI3huX
zIBiYhU(Zq}$JN=ryu=2o|9r$&_ek#GduggY>n60fGN$(>KWIJimk)`0%Zbd>vgf>G
zx%S-Y+@|fU1K^|2ce^ix5}jP=u9Cxd8OCJ%kYA{s5(llJ-P{_7eHhN(#d`*2ak>3N
zgyUxE5PAPd2yy$$*QXDllY2U6e?*I(>V66iH_pNVH9K;jF(K^Od39fa3dz}EPism?
z!8&VuBCK18VT)_{UCEK?yEPLHe&phaU&rvlrGJe3BZ2MF3S_CuCbZ1-fX+l|+C9yb
zuAgj!zO&~+`$1Wjw_(gr{r6mZ*KfS7@)pDH%J4P1FZj^ApI{K%m)V?@;D_*I=+&Z+
z3tqc};o4+gBn*^@-^FmbDLaG-<%e<GijR<PQpksW0ccbh3Jb>kMZwDX!u<8R)IX>R
zbKUbKQ#PvMOMO!^NZXbq=yzc5>ydClrWiykRe5p!KmKUCJ{=Urb}Uampzgj@=y(~2
zdj1NezR?pO9t**e@UfURFOEyN^A+CQJBJ?rB0kr{n|m3mPSSq_a({D~(|Ly+=MrlM
zxs~c5KDvwB%DjjV7Tb{0y6M=n(+(cYPr&)d2eHge4d{Le#<K~gbdg>lCOegI@{31c
z-{?L}t0}<?Ys64CI1?M3|Dj-$j%)SITG)1i<=jU*<03Z4SA8)D{MI{hmN!!Qf!Sx_
zO}r&_U1>maHxh8HKZxn$a=G++dvLn(iC=NVj9NGv5ktKuuI|-QaP(Bcu@yNueB%HN
z+riuz$BsaIS|51s55uF1VYp~f15~^>r{y6J`BPKaT>RlO&c*8%XnfP42h|dwHqRVx
z<tIakQ!v<@FxP#v3~e{K59w*uoT0W4T-tRTt;hbwy0DozW%wUdnV~`++HXO-i{Ej`
zUI|P~x&t#d7?1<XTBIcXK5ELg^BY*da=-gO=s5L<Yv1I9tqLKy_|SD+<5r4uF0g#{
z_b9L~pkQ>vm`=N-Kr0>0sMLmRTo`{3tsKta=+aa8(@~q0Mi-;+<L|uTaWk6R&RDp)
zZ-tA7zhtb{Ti9$C2YbFfh5Awnm#Lh9N#~d1$f>gAenBW4kG~JEds((R=$xdZWHB#g
zWlFHj1P`d05QX1oF+@v&X5SK!U2K1G^3OX=ceLTg92*CPtn2kfa0^79#hg{hV=VX4
z<7Lc@i0JnjiT%+*MBC~(2wZ;fzLTec(`F_9)VFn5^jnqC8bijghz4<*AMf4MA{4A$
z#fyC!`TfS}7)RH^@SQIqc5ea9`FjFwopq^G_a%5`E=78he)8g#*8IDR7NoaZhg!e?
z1>%-9;C}fwceuF#%13(x)w_&-SF*W1=NobDNLezXRY0<;GPx=40iZKki-zS`a(`?c
z$lbS{ctTN=DtA<|y!BVf)8R72<u5y5<_ck{nlVk+NW$8HNz5G=410$$5@mZGh{qn{
za&P-ca`QHW^VJ0WV60E&cl?5CzZ#V7*Qc+(sF3rZNEBambIb30fnjDU)X!)a2Guf0
z$wytT<tFnAv0h5<=1_Q8_Z5yS-NogXq#6J5IWH)Co>jl4me0^<9lNSZ-i#=c5Z49p
z!&jLImIu0+nhRi7oi)UD2g4L!MlX=jB$`7F(052SzhGnszWbbm<v|*pc)2a@t2~M>
ztMt&WzXy^ozrhRh+u^@UcW`e)5z3pzV@eV`m+h3{i}pOkvzO!X67y?pQaFXn0xU?l
zjWs>QcIcYf7kJUNWN!OG9b&z>m%Z~QNV<lw{B(p46>BZ!1vbxkd+Rdn&sC?o<Kp4i
zQU&^aBy(aFyTb5Ae;{`EB~amSVE<fCh;+VyqPze2hqaXuP_2P}PMzGlV@3Ft=+fms
zSHb%sa@6~*SmJwkBYa8wgt;Awm}V<QjxFv*alt_t`+#Nj&ott)!&bz-zlp1#me23_
zp-gV3$dR2Zm5JMUKgrEqk8pTsJb15)f;G2afw$IEUeF}Roxh%j0p%U&^y)gxO6gPU
zG!FaLDU+}zBY4BG-Qb<RlJ{6=Lp}MckTkv%?r%-UzakMjRQ7S=$Jb%h+%U!m9^hPF
zH1KP_bir|deQG*ViaKAfKue1cyrAT`u&3uSda0*#1Ii0=<)|<y3Na?z@(QuQFcp{e
zM&Q1Z6IfuPK}^imi1!O%oXSdEq0RC;hCd-^K$GgOWsIQiBS>nsspx%*u)B_B1P|?i
zbmL|G`pNQC{&W$Dy4Rxgc5V8+I0XatrNFxL%A|FMn7c4E1Ds}?!0j#-8Wx_&pH$4l
zIk9t5haW_8``5r3dq?aza}O6xQJ_tXMLs6bnq?lYfPcw7u(M>J<!?V2kFcYM?dM`e
z=rzX0Z;||uqBC)*>FdJq)g;aHJkK&^u6x!dQ@>;iAyYCXnKC5_S3)HuNhL{=BuSFG
zXYGU}BuS_wX`pD5B&mG+`va8foO91!>wTX`peh3a%KcEJaS;P!7JyxNDp!PuvFn-=
zJ?l4w+<I<8J+t<~+HpOoH2f?UCMRIpYy;}_C?BS@orKCQsr>jUIwY;<4iqf?E&kTr
z0DCka;r4s+P&d^d_7zgJGiDBrQF(AW%^kg;X~Lf8m8eq1y7mKC;nqL`KF_QGpCtqE
z{fGfcVRIzGyjD(KM~wu?K1097>|UI`l3%yB07h49(`3I$d@UhQMV6Z}T`m=Ms+WOt
z(><=Uu!%o(uOE!ZSrS884eHCWEc7eJzBmyH{-gZSca|*I+-V0Z268}Bs804-Xp;Av
z1ax<qIxVh`=Mtx8b6E~=AuORAY-L`<z<pzqd3zGBP0}DmUp-JXW2(@{{w1`1W8Hp_
zIQ!Arm!RC{4+#1t`R!8k(WLMYI7UZv(oyU^_p5;Wa;gtwd`G~+GiIcI%{vrq$;dnG
zP>$2wlu2FE4%nVm58Y8Yj4_mlwKW%Ed9fS~I<ygMTG_mD{C-G0@LM?D{tq0GGbgo`
z`E2(0nJe4pgtg`4aOHk$vN2Mc6a>5RAz$-3=cm8<Cu-7U<4Os-CBTv%*s4XMCQZZc
z+d5RkI0<|nF2kFnl<531=ODA2F-4O9f&Ut1oVhdxQlJK=T)B;%8)tB@G_~ln`I^L-
z<u<FfuzUa44Q}hL8JIus8)Rs8fZm!D@P5Wcv@eb4m!#-W73TNy`p}HN>#o69trBc#
zm!(59OL4`;qj=<$ChK{=gie7hR~BIjWu=OI5e<SVyCQ)<^bm1X02DmD0=8y;cw)#^
zT<3fOJ6lY_ZuBykQoR$!1J591B!xHitQ#Uf6-|CT7P@7!-)FuI>HO5fm$OX7)4S)<
zVuKOoSvMg5#C|xfb`0tmPwdTOQ-V=z;3*59Kh{wq<D9QROt=}0x-7yGb`rEeWGt-n
zv*GqEFu;Au62x-UF%&+gFwXKd>SXtDPJx4<ozRLp1@C#wdB!xUwFQ1CTM?CjDx^-=
zF=l%USJz?<HIzLsynl;bca`$xP3+(A63|PZ^y#DP@nC=C8=sin!FhjUoO$ml=<!wr
z1x0&=UyI|=@pu3?nq@NDeC0_7+n+v;Ka1&C$H2d+Jj@z>1fsUAht!qddB=I~Vg;=f
z7(3pE?9G-Sx|fZpi)jNW9Ge6YRt{{g?+)V%-$KCKZ%{YBMA(&l4<_U%zzX;2Sh;iz
zm_)AS>|FPw(7gx(jU}l`a-P`ZM+Z91*~s#gsm!e!0|Ec^>7Cs&Bx`09cE4vFIjiY-
zg>?`fRfI#-v)K?>IviEl9xLNWCHl4~p~1x4;9D%<C%P#TfnQZ#5dRHj`RR-q&?KJn
zu@`S0xr@cV3D70^7}mbgp*jZ}arCedh>raN`q~0gKU|td4rYVz{Q*unNRyo7!`O^Z
zhyS{nWlZEMp!Z!IpE>*rKW?xFieWAUbf3oW8LW@?tCY{$AVn3}EUZp%1n>E*TsZXV
z4TMe3;PEO1Dt(S};XjPw%>oVbpjC$4=~E?+!llB5orY96`XkiOO~caMUue#}L^@*)
zY3AHmSf-{)j(@0R&;3;JaXx^hZ*n0b>k2roS_T^jQ!p>#1amuW=A0R`M%L2+eMNm(
zdhRn6-1;u8Q@RSh3(_FxOA@T=oxs>eUwM~?nRtXbcDJ!zvgCQjQdu<{Dyu(X+_eqp
zTj|5?dVd#khYOfr?kqZPf5`RLvGYv7DJPJS&eLDhkD@hU4wcx<n6c*^qVs3sabGbQ
zNZvz7?MjTAAWzNqmqM<*76iZ&#!Vf?l^$STs(=sRsYtM-F&T?BZey^$B(=4^kGD!{
zFjpZF5}!@N?J=vcKrcu5F++_U86`=*ZK}9-$x0YH^9Z`^?ZvY!#~vNdn8^uUc<m?i
zfjyR|r($w(J7Xzker3#~M?S*1z!>J?`o<g@UzrPoaSz}8f+^Myz(3+L_rWb17qpv@
z=<~Tyd!!AP$v?$iQJ=x4jOVZJRwH$f7Na<Txp7<qxsA&i7btcLc+ZLf|LVVZ|0>I?
zNc`kS98snPx76U!jPLN1`7C}MWV?$spHLxl2Q;rb44aJ@^Eze*A9+0uWG9b@k!P>L
zz7BOF%b0W}|D+hZbuXO#r%RUV_d}eNC0ZUBO59dy(dk}Q=;t&RTV0K5t*;w8FKXaj
zE)K$V4_P9*mf|4gmWKrvvN-+Z0Dk*Y0mq77Veh8%JT9LL3(iRql^N6cXn{9MTpq-A
z$vL2BT?Oxg&fvEDw{awY80vIWg&xHTQ1@mvFC6<7a<2#C0hI>Kjwr|S6>K)3pUa7U
zs-W9%H@M)dOB@%j1G?-de*4z~qKJe1R?$T$)(C`{1aoR4u;hJoScXFS50|HBN@V?|
z5#Fm%LBbK?0)=9nYkLhVrfoox`YViG$G+B$>3p%#GAzlA1lK9cVD6Za5Ye(3GBT3j
zCG$5z7jpuQILFIQ{tssEO=C=|OE~Cz8?=5JQw?)fI2tTV>Xvd`(Y{BxyF{62q>RFA
z3v6h?ToJDQE+7lo-X!q50hqXs0rlxYIJ2q)Yr<z@;+89%*`YSf8TbZfJ#ip$ekF?J
z+Z`-pm5J1BO$grJhBNl(<C=4<XWD4bIquNm%dY#t-6mBMv4EXj-z>xIhvU(J?S!LF
z%muB=SnzqtdT$P`d_p+m^#sj=*C*q#cUwFx{yB_14p5{j&rD%!QV<4!Ayr9><hJWv
z#6?zjV1-`+_^nF=QS`OE?H$^5UZ6A`ee5KdWJuDR-|{iDaRb&RTQRRhEw~o!<3SJ%
zw?bH-<?~Taq`pV!=fl{x<%M8wX+p;N4`33TRR{`;?8EDK!ZdSh8a^=$^h$%lWw;Qx
zXIapLqjX4bs{;5(Z3h2YpRjSJCXI9|hH#c+PMfbqw@=ZalQt$o>-B43>%@2(-_Jtl
z$Y{*mZOe7{ZNk+`?YOe>6sj!v#H;9}^8yt~oG?V0u97)~p9V6}YT8_svTwix%t5R-
z{Ug+{S!)O@`UrlV%+uZQ9KC%EQ1PKGUHRYy8fXn+T>EF@&4GqgaJMcmOX(#<f3lzp
zo{P}+6U)@pe&r4&BN%AR$2E;|#QJm&N@ZTcQ=vM<(NQQo@?Mq<{}GLUWA(`tv$GI$
zW;Jx3zXmPMg*exX<&AYkywFCD?tCvv+8MtnDN~n<tAD`Wp(?~RiGBVbXv3M?_M*vJ
zYc9@h2Fp&!zzriqGEy;-`Di_OS*cWTV4l^}r^0ZW_Yi8fM+|K}_pv&k?KKQ-!Pn0h
zimSf!=dYCEPFoFHu+g83aLVQdz2kFLA0NlyCmvWSzQ=!g_#I9k&43I0^ojj#9~8Ks
zn(T8-h5C5PQGszF?2dVcb{lP>(nFa#yBJ~ONC9`fQkpoqKLPvl%Mi5iCHT0!2GNE(
zzQ%|3nY;Jn+_}au^7sLW({{&h|2eSa?oeVqM~yE1UC#2FQq*&a7Vo%YFYi+?MUCn&
zL(Mk!dvKGdvMbZjk-Xt@lPN#?b212W6NLc{f1t0?5k3Do@}}%Gr<Y#|`=$q@FKWZg
zGHu%Y(*%=>vmtQ}Gljb<5!X!$Xc9Qc{dZ*;O_aMXRG*_kMd@ew(EB1hVa0sw0d>%Q
z|0gD`EQ8aVt?1-+Dn#!02F!TZ!!mF!_}u6w1P@{N>X-(+5&IQ8bX3XK(#5P>$7Zx^
zkaOGT0gnEzymv+#L>y&qnj(4V^snI0#-^kD&LS?zstP9mkt35%iXptx95C<(eA)97
zcJHwuSMqhqik#Olvui8_$>+o0f3n0$-jY6WSEJ4CAGoeB>G<2wlBB(*SloS$yS7)3
zxbyw|qY)=@REsY4{`QNjRxE?^2S2blKo`5mX@b||Z<sm55U<Q`!YF4W+P>p2)IaC}
z|KS{DN!NqV?dx#7F&$hl@8%PqnBxuBkNGPpOa4k*k*GO|V9Z<>)dzmVgtimdE;#_D
ze&^7$Yy|Y5(V{&%KG<jd2h74YqW_PjAaS?`n_tM#i&iScY57TP$T6haK@y~AZwaV8
z9m@C4J;&{w6pSK=qnw>k3yv{=wG!<Ft>|`Kd{U3@laVAfUzft!em27loDZU}l@1y)
zw{UpqS5!D`hbqUP@{bPGp#FMm^8S$pEmoId?+6}z*d`vvUza20yKck&NL|w9HHfDr
zn;~P%OSBnC<HLU3!}Pa|PpfkaOf)WuRW}SFnOp_(jFBKdbPV+B-@>4n<>cD#^MW19
zc~-20Ets?x1bcsA3w!46QtgBqUzXP}e}cbt%t>&n7CrXMnhaZOLDZYi;ObBv+AD70
zoi{IodGAe#k;XF|86=DIKfQoc@@h0oDjG&DW__Jyld+reMYg*?2CpR@u+_>4#F{4{
zxrpt5^B?h9&QD<^4u_sUtpD-(9=}dgovs-53+?Po>52y}aD3<+xENwZ_Uu!n3fWpX
zbH!*lan6$LYhYZhdn5BwZyX1UiC?iXZwv?&mT+0BYf#`4;}CIf9+$&-p!+2UaW62p
zmsvB^-Kpk&xl^3R$&tMtCFr?0UAV928czD}7I<Gz<Xsny<cu;W;d~8U8sn71cdpaH
z__OR8HMJeyv!DM<>pKX^|G;k+n2^pn3A{l2yMtSOFDkm9VEN-Ld-W$KL~Hy%I2L6|
zDlK<m<}N9ivg{i+SH<ujBaO*5RHq(--HhGMGJFSIAYSPJ7Cbb=#DBHC^73<-_~Hv6
z=9CAk{M70D`&zX3@F`xfy4In4?=(zZd;sh&xN$+Pd9X~yoM_#80f%xkp*8y(=1hMB
zA4Z$dZAruE)kSht+2B0$25EuU6oj55%;P;-ie^T&@LtTp70&Xii8D&YJ^3oM?~XD}
z8^JP1KMiP@Ryc-N6Np^B4&zF%!_Orq<a?|oS@&)Ty)#ypI)<l<?aim*enBY?Ir<p4
zAC#j3gR<1LT7ydLdjV&pbZPe!WjLO96A~|d7S}XcL70R$+QT}O3u5Qd@{bOsZlf^K
z@-%<sk`5_&%!pEs53tbj0I2UbCnD>Ac_%9pQSz=ad38^Nm~6Qr%(N=zGtV(5>7iYa
z?iL8~FO=y4H^ws`F$RkLy7(`;cR@pXJak%RpfLV8{`u68Xe&$Ck1!{y^=f4Me@3*t
zco$x<)22^CP3WTc=@7T=9@1_BUE5|&-QU)7K|ikG(#kr_{$fg>7Aq2I<uXjPKF{Ts
z_d)cB7vQ|D2UEjzz<Fy6KT)Jk`^S%fkY6J1*imiLnd8Mx9bJf?TW$+ACf;E=nOnlG
z$@=(cQ72rvc@w9c42LmwW<=<43_}_sprHDdu*xI@8}6CW@`xLZLDmY!7cGdp+z;;e
z5?KmuKG=D<gZpG^Nb7@NGFRMXDDXA|-bI=OXD2bnTnQAdYX_}2ap3sNnAdgxgTv?D
zN88Z1AhW6vdiJNV`$G~H)#C7lxj8vsc^9YLnT!!nKk@@NO0gwXfg}#S!aHv35c-w`
zqR!O-R9aIG-!m=A#8Oj|!x;NBe@bIWgbhliI72h8!^0Vtq`RpJllSV<`EfTv^q%9K
z#wNjZ?e&=RJQ-vD=%d}jzr5p0j{mexmRc@lT#1@0aD2N^Y~e0V3$}!!N9Zb?zB3p1
zjP-*!*5C47H=K{Lc*iR^Du7E%EK?`3Gu2Fex?rg+$y~n@Gxn{->J|OWeOQ3LQ^(+t
zDwb_D7z9VPY+;>z4eOGdL1ge7xYx<%F-@7g{1JH~toq7+FP)IM>5X_pvpgN_7sJ58
zQrxwlaXa?OQqidee86yH8lIvK?tW?9o^BPI<9iAuSH8pvx?fP>Me`gt4i}!7#OBkh
zZ{fleD;gQx3wgC_<c}3&va(#Xf}1RK=sdzdfp?g@J3X%?b`GXhUxB6z`Xpo5bnIMT
z!Ap~iD6gtbe8T5o*Gp3@>MjEHYI#xyXZXG=jLFe`3e*b>Nc;~A_PJt4MXxPk>#JiR
z5XFiYPa4FxFG{d<Wj3C6&W5JpN+h!AHb@npgiVauwP;Hx`Z$$isbn9_sXWG9WrxN2
z6PR=UWDt(_Y=@n3%_!#)4zfDW@!B|c&z=93S9c|no1e-0=<k@XaphaFd!Z3%UXUl_
zM+RZA^ado|1|;XK72~(;0Q>kEXtGh7idI_+9qnDX+wqLqlyyY7qE|qRFO}jRs|0v^
z&Vocuu>(Jw3Yc9SiVY7d7zg}1-?if=q(0U{6&KcR9rK>wz0ZO<1=g}X8tXpKD}XrX
zCbYjb3F~q?uyHTD7tD|IE^~Qk-4_R*<DT)|7HaVHKX%`L_{b?83<dFwP#C?%4Nte;
zzz-`gVfgZISR$H__g64)%Y~V6d7~xiFVtqwm#tuPMGmiSRHYTaOHg>?8frg^WBX-u
zTD&@*E4QkLRNeWoqP-kk&rf3<utkWDgLvGt8f-Q%2IrfqAp3JE%dLNaUbWq*Z+Heb
z4rTsFbw_bTS0}D%P$AAM%&6?>F3$hK8i>oCheu5<$g~SX$i#KOaL=OqjK6A5vzD-&
zahxi2>7QY)!UDdmpqrEX-3K8G7eQvs06Ujg^X|J9X`8Yf+YkIjKeJW{?=%BnyOVtC
z5-HdtW5l}Y7Bv3FFI?f%1mm=GVYd<6+YV8r+}-1tQ*TV2{#s#eat2iD=b`9sS>Dhf
zHR7^731TX`_~e?Y=q-5!-W5Cq?>XDC_>DIDFFDFJd$iz`k`$Cre~PkiFYp8R8gcV|
zGpbZs0Er3SoM^Z%H{sd}uq*K4%AD5l8%M~H!1TMQ^4fx*ym1J5z0{Oc+Oye@f*h>d
z6b>q<CHSnIA=E$aH1F`?EbDouf>+yX2=NMnA!^FxRiG(R87JftcP!;B3y0F<iiO}3
zdK;bQia}Is#8<A?fho~pVB~okmF8Xsud;89#Uab`CKF-J5O%#+FThhHn=qm10o=XN
zj}dFH^KV$rFXhsE5c=PQphe85JFp%{4iYf_5Q~FvnTOZ26~2j2fkCJWu4Q}A_|$8-
z`hpe_NxtU9m%5;PsyFN1I-&794H{6o2P<DVLHE8*j5j(GXJ<-~ZGpLHAGDoy=zp<X
zrZ0XRDoK()wP3;PQ@n&^0;ty+692Rhyyz|AKJ;6W{IW>wmsX~GMT`M8&6xI89fK9(
zG)xqPa_-5O;8<uc_OgAd)KNp!3^O2qei=}~i)XyawY$RfA7<d{@SXY1v-p~S{}2@^
zhUaYo|Lkvk+O;N}mZCs<o+d;1?1iXf?gw@$GF;{)PiS2{2Qy73bB?i_`N7SVaCm|)
z#imCnj5Ei8`pc*o7K$b>SMy(Pyl3tqH?$uW4O$-}fQ&nXr*DSgqDRl+70W|SY&53P
ztGD2u?p{<j)Mw{hjxTHZ%ey{U&B+ej;cWRl2z$r6TnT(`SJgXcX54S*4Z|@cKb+5+
z^p^eK<x$Dw2&l<z#IL!G0W0sxnb<{v>T644Q*o4=v^E{g3S%M5;5jCLSEdWL>ydTi
zhEX44hZdG@(DT-cUPzOpbEhzl*4BHRhP(>P<@j)RZzy;3)DskJ9iN-&qC;ycSHdRz
zi&jgNsV7~)YlceE^92%Ag~&o?*fp-C))xX!RKWE}OB#Q3A{5OnfQaiFeDA)Upt!mZ
zZ39N&gaupR@kKT_Je{2<D80n}8!1b2)?3r>s*FXZ{}xJ?8B<~8Q8;^&ZN&C+ctEZM
z1I3EC_@OE}k!(#|JyvjuUrcZoW8*|~RbY1}o?mw2EBc@Rh*mjqSk)v$gofu}^!Ohz
zS63Cjc-4SNMujU-YvPKgg@fZZOM17(n$*=V<fbn~a8hdoa7qLB!P(HlasiE*o3TYE
zmGLu7`QZ~K$+{=C*wyg>=CmEd?u{GSbFqc*u9v}L#YisudkT6R-r_y&Z-By+lbDI=
z+~~R-C>XknZ`>M>UxzbRSnEUH@#}cLns%Y!=~%40c@4vF?!^B5aS${4B7Zt55B^B9
zT(_4NU1WCy&Q36;Ve2Y@mMhT&NeBGH_A0x^+{dFoEr=lHt=RKph4A%0E7~*b5!k<2
z1T`Hsn0EIvZrQC(1pZSv+tr7#*;;_{gEt{?Q7y_vSYX*uTYT|QowypQLc!ifbo2KE
z1(~g|X?h~A()Neln^_-5q5+foDqzz_6LPCsn!K*MgP)n7RqcWk+J1Qnc7=WXO;aWM
zDNlze#B7Ef=DfRTXFz)5^=amfbC|-sWc@Oi@v4p{sU6V{JsU;rY*)`0dnaJyU=8**
z_wldTd*Zxewo_TRi&r=t2L&JZ3Z)sBb%>@8sfj(xJ`XLy_eTPEYpN9St2L%>w%foV
zO_oe5NkiYE&Rp8H7CaO)l%AM)8U==r9qbZ@@~e;M<BKjacG*T?9Q$kt-mQSJB_5Jo
zk7Atz$4z*q1A<YVdG5oHLve5rmt)omcI@xfUiTG-{TW6g^pt5&(FN37Pz}3(TNBZr
zk<b<W0$k_Z;4(U;Y1m74F84ks^p2Ono)PcCE%pRNj<CYZ)_<^dM*z-C4#mW2pSi{<
zm+|9H#(C|&ll#~7ADUEJQ<1g;*BQEt&t21p_8(60ESC*6Q@^0-Kap@}{z(-3TGB9@
z0xE`XpzUQv?Dy_~u!o=E&l*z_SzHS}g99jY;U@&1X4eK|>yF#|7`yz}q2g5LslWaZ
zyNsuxS+z73jGD^l1O?&GY{sLQABc&u`n-GULhQAB$2&95P?m=o9hq|kWZiU8c=9*4
zZP2AIf3Ks}#4ae;$c1SGY!0w31w@6@#2SsP-*N3bn8rzx^kbv&N0$VBbK8{g-BxtX
zCQEvgS7I~8(a;kuM}Lo3C8C3`^76;(Q+Ha8<=jIwt8ByKJE4$ilFIp*?#73``lQd~
zJLnbsL%x@JDu1h!iHtLS=iDni{wo`rwFB_<w|jVtYsWf;WG?49g3eWW$eicF%~+#D
zT6VG>aL7&g%I2^UyHYr_jVG}bzp`t47TCM%;%y0a8nv?p-Ck~j`9}GmQhXkIY#zer
zM~1}LcM9jRJsys#E08~Pt*Ao15~N0r#bcRwFttEH3!aY?J1$$rU1;dVob&g<NiT=Z
z9}H-(!Bt2+J`{5U52N#nubd$0|3<-o%oAk}StFjH{{1LSP^X}OR)XwUcmZ$EJI_3@
zv&HGAnkZ;85Sz?f41z&(s@Z8tir?E~$512cc!}kjXe<ieNAbRI_QL(ROXz4e986Aj
zif?ct^qN?NS(iUSc<~Hy$^VW{Z}Y(SkrX{JLz*^(XW+@bcd)(u1voZr<Et)7k~>j`
z<fW%MY1$}3a@-GKAkGKwyC#uoZ-<CstFUj*2S~~I2oa<AfR|e~JRGJ(_OiJ_=F+1e
zX1xie>_p7eXAJ5Iy<nfEM@2tcITxu0X!$Q3x97A%ehRyWYx6j>a+de_S0j8oHyjI&
zFelKv5oqVKlh29`0>S#F4x9dlp?6>(mq=U1HV<u}al8izs(i$9)z84=g$Oe%?6};J
zU%W={eyC5Vh3YESn`}G@n)Qsi!rYY`f}Kz$<~IL4`VS<Wi-ReGGbrlz<SPnw*v}Ei
zDF`QmM146l(=Fh>WjF{f<~leom?G}~sYW}b4C$kEWok0JnKNI<+!S5Skk_O@W!+V1
zz~MVk$47|QbTE$7rG>a+)_XX#OrE+Hoadq>wXk_pFrT~p6|T}uM}tspsOA5HtN9MD
zZ(%#6O7-$-cp1~5UB$u3HWa`jq3?(D+!m{s=$iVHYnUn^w-R3B($XZH0yVfS)|}d9
z_%YwFuCU-ky0DWw%cTb>LYCTPe7)-wdg`Weax<I2jm;RmTlQlY%b;|5oC2GJUWgkz
z&|T{)Uj3PguNZSlcqbV4dk-ZrhV>@he!xAq-{@2_fIHar%AaTVWRC!{OjF=c>~|<o
zJS$#gz&g+Nzu>OgR}9?Vfyuu9aOV?Z|0M;QHu?nWuj*yz{9t>}1Lwi>Oga9^S0^_e
z85ixP1PYobfr`ye?(kDRs*rORldklE*W7a0-x`5hpCVvqSS<e6Zb}ok@8Mh@?BU<*
zD3jo3=7%YI2@!WLatnvZkoc+cbW5`p5uJ$SDj4s;@tr<;d;ezM<z7gyIS(G}JYB7+
zOjQP|(P`{FOlinx=K~G6J6@MWX`9gCS4$y0r3GiEjfQ=G@faCb2}*l|!N!s2ryaVA
zf}GV3hpOtJGVLEHQn)PCIKB$UW#&Qu%(ak}aUaf0YS6iQ0pL5Zj}JKf0pf44-N1`s
z{Kq|)*=Iy!-l7+*8@R+CUjMM9hqdHLWa1roAIj#TrQzJmZHnaIHzT69xfN#rxq^q|
zjfk%xjvJM0ME3g)Bf{7s%xAsfMOLNo%8u>YpZs8btW#XzKemGnuY!X4PD1Oi576q5
zHXWJp2fyvF0`F^@Alh5TIYlwY$LbB-{OnqA)HH>)cEhO3-|2i3<3npI3?<+6Ph!-h
zujre%oKKRf!9Tgur0mmTeuH%d3VO@M%LX*5Q`KHHT`xuJLh8jiV{%}$u{-+36`<VP
zdDyL-22Jy2$wkK}*r|4d!{ySLt6IU^Rc{3cyoo97ESdA=KP<662{zK@yy@~`L?){l
z*MzhFJmafM8yJvG%RPLF>Smn4dfr+;8H>g8B|AfF!Xx90`1A|&JMYot>qtAdp>_^#
zvQ5M2jXXfI0ZeFmkC)jyTt4lA!PP~0sD`o9uXn@aJt^oo^8v5I+wkFg+c5LDBInzv
z&(F=?3FGaPQSEslh*}dJTylP)i9@q6!TJ--U2cJ**<<)A8{dPT+#o*EsQ_nVC(sn>
z(0ejkr1{<#?0r5599s?veT6;z>%aQMakd_0{+qycr!D|)WFEfPm!c;7a`E{14^Wc0
z19Z+RLc)a6=z|SV7G??^6WMc4o4tdtTLUk75h@i_!4Aec7cZ#;i*4qN4LF4MyQxuw
zoK29#-Nr@fPhrkFbuu$R5<1njs7v`0^o@$ech}1x<y$HwCRg#Dr^j=nMbB^tHKE;y
zPT-`XYmjavO|9B~Ge_K9IDK0{bDtPs@0%=)8~q;I$HX#b)DEbRZvq$XUyyU=9!4)!
zr7jul9D4XQpEZv43GcE_#+hKe#&}~oH8;TT%~SB79?p3@>%qX+*7!+Dp3J-{4TdhV
zBtCc=Y}0;)D`vg}=NZWLzWk0)yk4T;!+t2}+RHUNz2Mj7zC-23*O_bn40=~hf}Q$`
zWRFJzOaOHd$i#rCcN`~bX8i(<ZE#M7WyIu4^F&FL#F<AQa)YjAu(g_?RBi{#Cfwv!
z3Hz{oV>ueEv&E9n|6%jU5crvT8@*H-f%Lbc(LTmF$gbsbG<(tWOA~Hgb&T<x%(37e
z<+`Isq2h;L>|TBoTw~7Q{Y@q$!g4borW^++7{JwD?J=Ko;~H$vFeH&LvH^D-fpN08
zV6wY5v74?>$LV~=(%x2Fwv;`Kmuuslf6uWsU_0AmCBimw297t=C3UNI^Whc%vagHy
zQD0Q(u8Y|)Yw$ScR#?&Ad|8k^mdO3vpg>ND^vM_hgN!Zkhm-6m!~%66E}3->R`1j#
zJKc=w(G&yXn=_7ERj!S3zyCs+j0Uuo@;K`@%eOQBm#W+abew-%xT`3e?UH^86O`9r
zV4x&iojQzquG8oLHd&CFe*fVznN|$iGz;Cv+QZ0I*Ks?Wi$$gFg8gIl$-49HFs1PZ
z6n{W&^pz%%owFDUJiU0McxkGWm&fV)J;OB`F=*2p&U=;TfY*E@s+QA=gNe0}@c2Gl
zJ;t~-A3ws%5f_=mnDqy4sX65MF2i!cecW)CIW-R5<IlD#P|<b)BnIo^SC)S*yXuSa
z%VkkmHxjo`>SpZNQ{0UERwOp{7*2@v16L&--tN5zmuVr-ZDH@}ZBK`in01jr74*qT
z=?mC=x0CDCl;tB{S#mKm<Z$4#5s|R3z`RX}#(K(RamgLVa~(pwJ=*X@BxAwun}=^>
z)v4Orm$2?=2&_?)AQCs5VeCXHvgWG<_1X0srVOuxzMw{U`Ihm@7nCzQUNs0L-($SS
z0sLb63qt&5z-2x{#idh_zvv%+OCODsb5Eh8MS#$EeI0Lp&5C5krf`RpqR@MC9ArGT
zqJ86A!2Sw5d-q&L<I7g$@q|VkF)1H~DU6-B`Z)OXI-{pJkh{K}`DsoTVx`Gte*bYr
z=Hd%Sz2K`DaONzAKa2z~^KvZidB#~3N8mTw45fQi>AVm*;=3=2JJoMYItMp_*Yhm4
zCsxm!+HOt8busS0;bELSqW}Zf%F-LH<}_wv4L3=?50hrqLI0owh%W!lYhPCa(N%w-
zJJt=f7BoTTAn?};EJ;AqWh`19j1ou6QB_->I!i|JLDp8(dF=tnO8NmG7~4?(QwVO$
zVXi_?3p#K1GhE-KNwmdUWcUDkXKLCl^k07yavy9!&w&tO*~*ig;C%%b)U8j~$E9Jv
zSrR;ox(ZqGRiL)~I?A|o!oI(kP&{W7u5c}d+3OpzehTYNCw#<7tWQ}cVaLh&vU6%s
zKI#nagv7`kVVrvfnmD@Pbi0S>8Pd-s*RO;Z@}XFFS%$wPX-e;1(kGYm2Qf{>m^Pn3
z2)|e5;jk=YvUr6mu_#{*y8mg>Tg&q>G50s`<(vcEOV49n62(;2MJxyEfN4Lzfyqk3
zU-+p>O`3<$Mfy|F%jN-eDi`r1rb`mv$OPW^=50==FPQh#Ix3$2kU4cmiNUTSiMP7Z
z$GY?#u*>}~kXfO)kbTeMk!ShNH%+|vsXzP@Z&ez=dKq%((!l<&B=#P;$jLC~oJ9T$
zs9f`jm&h=rzwffM(x@`%3bn?VOB^JO`-|&l)H63@Kl%)3-qK44c)|Wwp+&`UICaK|
z1cs^6E3=eHS)7pbT2Tj&rqp0SST)=6zT@WFCE)B!&oK0xHdWED;&aNbLIn4fLl;%B
zw>E;1!Od`nagcB1v3DRBE%bc$fgAW}NK~Snxr}!r=w^)cp%<m->5JP@#a)LRanO*Q
zdmtbg`-0$2vkbkSnT$%a?qC@^^ZAZj$b0@_UB?A>aOAH7ku3`1L`fU@pdC-x4Err-
zE3Sq*pLfE5!cZ{rxW~ohhoTK-&*6ndY|oX1QyDvQ(LOUYIkkox9L|IPY!$HoR*&|d
z8n~feYV^>>8c?}_oKCSeOdV@MN2|3%NT4}p*fQ_%9RVZ=U2y5y7K~>suS3;gpq#Bi
z6&kFd;6?_Hi%mkogNNeOyQy5nyLSFXHO0%1HAqcaJId}J<bqxgLf7X%xYauwTa(PF
zT;N`;4SkBzjGLHf(1<okNBGPXz=TU2?yHg`Q&yHjL%D!Dj@c)Etoae|{m>;5>y<ej
z-!?uvPe6^_x*#Olong0DaDr`?d}l^DMhzN4F3aiJul>lYW*)^adY!QG`z?Gs+nDAL
z)WFE$^3?bF2EP3gd#85%C7yTlJhl$;gMyz*;^}rt=qWXWi@)Is#`Q|HS6GcFFJz+U
zDJ`zy{vAwSC;|hmR21|Z=Xov`p<))}FkBaKGZa+GaR+uD{kDVi+EEDhF-LiiLe|T_
zKa6f&m4h}Dj&NJ=%mlgB4e+Ds9Xcqglk3Ba(Dj@it#v$vHo=jc*M2#Q@+puxbsRW5
zmLoCDK<TY|q$*K_d8=fIQ_oGPX_*ZwiEa3_ljYR9%y3$r3JD&J$8SdeU`DDLxewAb
zZ;3i_tjOjBhi7oLo8F*py99kMtcB~AEK7a<HC|6OC1#KR;B=KQ&~@E}Zt;ALflFDJ
zscjE<zP~3<Dfh#=4h#OB;4{?JOh*}w-w>j#3&&p{gZ7Im)b4Z<m-HbH?wz%!r{)@w
z4UGGnR(c6i-krs|4@R7YVhaimIEs%gSE0dr6X3>iDY95vm8z^1Kxf=WKJ)l2C`-P=
zFHKRPK_kkbU-mj<&XwbTsV5kR`Jb>@b_y2US^%l(LA;=&)8X)6S?U4bAmo4ql}cR=
zk4(d$IWdgez?jl{S^40VmWR(nA3({wH`r>XKqG&jgS{SFL~#3Mp2+_VC!>`JD>i3B
z>eejo)h2cFxm}fZ@0<q~BWJ+}4ILVudzU%<K8O>4UKZzC7jeZuuJLvX0uY2Ji`N%q
zW36Z+>I78tZLLYnTj7H@T1;rC#}@b=$T9`5q<M|4*_b-(D>ppwK6<`9CREak!NmGB
zKF{L=>YQu9%%lH7#-Aw0lMr!CtVd!)OsMhU0vzSZzRtTz{FwZ3bRDyqd$_@vY-kIH
zl(hRG`8Na|9?BAUup0s{PQ;&$k|cbw3N0`$=HC77!{ay3<8#Ws{*Mb>Lq{Cud#}al
zh`->!F%UmbH6k-hHNbbO2EU(b5u1VS?7h7kXO2~%J+jT<IsG)RHuo5O`y<3WD_t_K
zCL8jS?lF(&IIdu&BAquf33Jy6K<-K8RHp2Pg7~pQ3wH_dteDNYX3hrJ#TPjZ`%%zo
zevGTo-h*qWwcxj;YoIvnDK;)V0HI|nL?{sj3mK>CUxfjg9`FU09?n70_yyP%C_&36
zjb>TeTwZFrEOwroz`L!Iq#NGPWaQbQWZ_6-@+P<e6DKNgMR1)t^=88LYlG<LSOShW
zHEFwaKbCF?f}xwGNLGgq&F1u}&V+;5^jVt34o0H$uay{8FG*$2^nxKLON&-o(1>;p
zL@9TK)|=JH!&lMh`$Cp8aWw%|4LM@6Jb?53n<f@(S>yI+WAG1~35%wd<Q*7qN|NG2
zVdh;wma)kPfw{6c{lQ51Jz0lF`Cdl2ZG^OD7@e+Egf7|-@VUZP44>wUdd7F*^<FiS
za^4nw9}b}7o$-9ZkQ_d^EgAd+A97b?O{vE&GjuJz&TS1ijjqP}v|^+)))_YNcy=N5
zJnn_w(2wYRVm{dY5b(Y)r=fduK3Z9Q#5kol=sh(K#;q>~r7!XDGWQ}jU#R3qjL{)#
zPu@YDZJKb{5H`=!PQhF9(&RqlGpW7MqymF!!Y#!tyETX1M>}6as$Ljx=cB}%)XUI<
zg@?G-iSO{Pu7Ef#dJdw-1HzG)EEuz@%fac+0N5P~=4hcY8KueCR?FQvnUWrGG+x9<
zFDrywmxhoV>Ax`R730;F1fz4qNj}=_HJ%^NKL4AX@q@YviHf&{6-N!}_F;9X^I4i^
ziYoaDBM!jQf@aXlY-27+Df-Gj9aT$)(Bh{XA#P17<a>`o@=uE-FLB2B8DFvNw;5;)
zlW~!ACLA8CNWLU@K;W_@kUwThM#jj4+r5*hY&?wUrA@#Tg&yb-vwOwlF<#lDK+1(8
z&^ceuSN?jyHQTIZ{EJPvbAc|AooB@M*5zE_X&pGhI0eEDdGIz?jyNCvjj1a<P&~|>
zZh4|k%Vc+RSLVr*wB?Usgn~TLc^M29T1BY$p%_QTP6e@IEHng5k#S8tJo+~oeSJ6b
z_H)mJqNfy%Iid!UQPpf8@l|-5?e#rQ++=LMXCN?G#b->Nijse<2q`fl{mxn#P&onL
zr6plH<K~Fe!^MIK^*p`Fx8R-MP0XEegX@tup%KYujPHlSR5z9}bN+&#qgCmzJ&f1*
zN16sP7QRyW3taign6&vCl8i0Y_+>;rgn7ROp~ElOH0&1^I4l+yOk_Rn?W5r09~m;{
zh%prf^*hX3z~<o68~N}>o53bNg!il3fv!p$`7yonjDHdVj{Ev~+4OVpf_X`{-e-BZ
zp=wa)eir&ZFo&-7d2!)}1TdHr0j*I}VW2~anw;(y?*8$P@$w}xCg%zlyD1G@YnQ>z
zos6eCWDmEu!GLxSl<-PV(;;<X5{Jz9mb+{$+}29Kc1vBVbng@Hlo>+3#_}j#uSntT
zdsGQ8;6;a52;Z=7+{2<w%+UM9vdJ&_8x8X0o6Re<@juC1RI=IO@e(kp(x7_|M?l7H
zwga?%geq^=@FsgAg)96%LdQHEaxk2EEQ?~%Aa5Z|V>}u?)}eE~DM9mJd`Aas4bm2_
zN9wK&3cUwTa{gP!!c+NSq;&mnNc4#m=5Mb-X#*4Te<nl=iwsBim19BMWUipvoO7dN
zV1t}9v`&<u(T59R!vPJf-`Wge$IbE4qb9s*5{y$+vtX@c3tstTMf_I|vfsf~G%U5G
z_3DEl+pJC169jaH(osk@p9D4&SHaTs0&tyl4raOP(4Ze@!Fl{UZtEloOnsw|aU0vg
ze{Czb>r60I+uTA6XLX!3yAm+&AXs#$(bNy&aCxCRJ^#g!xL;obi*A*`sBNm`fq@dO
z)3oOvy6O@EvCmr?T?eX+;S#9ZjdnH9`235H!Qq!Od3ULZxv|CwBh&AKT74@_tIWe}
zLlfpJRwY&AWQgH6)+f`Er)h5=gF)dz6!aO3!^@@dJ9DS2oOr-jTPaZEl1i3`?&C!Z
z`?<uhU*bo7#W0Z1-mOQc^1+Lk`@JX<n@=3&Hz}~5+))|fE{T;W4k^RzV=F;*|4@>&
zWCpq$KIT2AvW&_^Kd@c%13IR$K27jkuGl^dCKVoL^S&tl$u^dmzO(>!UC!f#uUk+?
zRf9GQy&=T#4sSQ?JRf22$~SLj?5$Q4u#)bB)Qh=XW8ZIlHhw5AN@ZR|x)xXWXwwJG
ziQ_wBCRb#9m~rCPpttsF5LoL8M|y2WgR`e##hK5L<N5;nY*`m=S2T?Fyn&@Nhq2s&
z20D(=K|A41?%WnFy1V=$COOE{h!59M%0h>ti8QTd8M))o3(kExTz}nSh=^9@Cw<I=
z<FaAUH}(r$xuQ*`SKNb`fA{%;4@%4+d=}-F-+<M6N+eW175Ar^(bD{KsL~ihp1fi_
z0;5~t>SE59otK09_i^aDW*L9mDjj1A!uiVEH^31yIB6Z$XUlww&*xBB^I3x!c&Om`
zaq_evu~$6aLY2UVK)mY6*c;bp<JNRGi*&WlOVzxJ+qafO*0v(>ef|k`L5+xHw9rbm
zA3_qBV6oj#)^B-&W$pI-I@c9gyPm_t4i>~~!zWnD7=I??B{-L5jJa@c8{%XSkRLjf
zw%6=NH=!K{&XdQ`Uo3kT?7^EnG34fQTTwLaxv*@>WjO871+Mo5T$;pVm^pJ81}$Rw
zVu?eLxG|2mS&_##Zk`Q^*-N-^T?@KBI1=g(wd1=dMeO@*#2*@JWU*l@7TBJ^*DW%1
z=HuU(@^2{~nr}*vpDF{Dt=c&3Rsu$z`v}u6+(2XYerRIRgKIj{uxObSwZDFYvu#2E
z-4P&MPC*c{C(rX~3NP?{=}_+Y9X?+8i4*QiLp95dCCYu~%wOKY{gZi2vaV*&(o6W*
zDS_qeLqUHG^OOa!_n8w;aC*;c{JAzCjrbOv%IOdZ;RWzk8-ZmCDSUtCDR}nKg3fX%
zL*XPg8!C4M#~I7SMrF@oVS6R6e`!FzJdmYv2N)aE{~n*ha?Js=opD;NF12-cMbF!6
zXczI9v#MjB*SZ#*eAj>&=te-U-2#ZI)@0`h6?pwXjXY=1ZNb7wevii-5S*BnH+_~e
zb$T@k_b)&!c9x;T#YQ9|FqDg(Crt+beFin=M|#BAYc6e!JAcp=7cyp*?7@><Dr0pR
zvF=~dR5rgUv?OkUF3`C)1U(lNb4~{mpb(E?S@I0-zxjH!e(4T8Ek7M^PtzldmR$rp
zPKgWFIShekHE~Q>Ixe026T%-G<A*4H`u0u;&Kx!oB=;y0!K57yNzwm+D(cea0aehk
z|H!ShmLQ`WITRg>5@z<C;s=)qsB`;jkhOk->RrQVxUU-R4Nv5T3@~?#j~Vf8>cHKu
ztdCZ479#GZaJ#(1!Kx$^1Xq6Z>nc*%>|i4=_$>v3Rs$${%42`LFYB@oqX~kyc=~$*
zRBwHZF~bhxA*Z9@INXg3RW+o)%=C%M*=KBqWJ-M=n&YDWZ{V42$-7Tm51y-8-kIf?
zAAe+Q#b*zNnIjp`R=*27JNx;=PQ~cBQ&XrmQjsR#&u7flAjny{4aFC$LHO`Ke%Vk8
zUELp{u2dU@M>-($eH|yo_WN2Z^1<Y9ve@y%IIg;;7Unmt!(FTYL6XrIs2asw_eaIh
zIbDJG_WHvGI-kc*gF$}X#yqe&{DwbOD@962v0hJB6mwljb7k$)T&~gt@H+4sbVP$(
z<$>+6RL7JaF04oA{r9oJ##WdeD?vqLwQ*6$9mWeV<vW?{PBizUu#xuT$uK$U?AODK
zcE006Prbuo9Rk|uH6BEl^o5HIdmwtqEm(D278=j9U1MbtCZCOk>^5z>piGtIvUh~B
zPDZS#lmYI}ulZfyU&EPQ#zbc7KbZ6B4c`BL4NKb@7yf2F`p*pJf3qBCe~Sb2*<RqV
z%@AjYtCH#cwz%ruWiVP@2WBjH=_nt|_4<VJC)xAf=3E*sdioO%i9+x^a{?GFI0eoH
zxxB32I9R<`f*75di5+t!$R28d3ghOZ(fDs*TV+WddpBUAsyagJ2q@UC2Jt2OAoAHO
zPLM#9ODlx9ld<TizgTQC!U!rjc^bT45#wfhpwt{Q*v76gvl|xJ^U?!7S1>oEJMw#P
z)}p$a9#yJkyt$GXIKEkeO804!lcS{Qqs!&6;?#XS%viH^^A176B4awC;Q%}<Y{cv*
z)8Umf<AJ@9;PP+F(z?OJ>~G}*|MzF~?v?<nO<&+q`D>WM_5x{#uEP;YGg?+;0G3Z<
zuz16KEOyA|W_`MaA&S$XA&cd~4m9#M!dJN8`T<J%n36NYw24f^U&cz&g@b!zQNGlO
zzU!2xZr)Q-u<Lu?Z^uZSYvhL-mwU1B@lq7sk{4@m%fWvCSTH=QLJS5?XvTyN@Q!)L
z>jYebC;w7WrEC^o#wqXxItzrS9k0Oo(|0i@{Uev+@)9Om%8<0pw=rE{0Ha3tqr-C<
zS`nuN8#eo*aP~iVcYh4(4b!E?{$_C8vJh3iH*(Xj+z0oukGSJE%`xJ$73`B`J|2`L
z1tm$GsPDc*=2{`x7?kt1r*a|7r5UD<V2+Nl6Ch-DEiYSe1?DSvKy^(iXr?L=jtnD1
zea(qu{1ooT21O#Ol7xF%Y_DV+grg=|)23BVaeh0?WGZjsCf~k^88tOf<f=}k+`Ms*
z!#vFTXiWc{)h6Y>&!C_(UYt;T1tj)#!4JtR_|*Rx2A)_97xpR<Dd`nZReurvrsbkE
z%k!pB@`vPyinROVR>+<>ABH(AkxjcVU`f<b$huYtWtmerBU?k7FzpZIvd@?5F{SJi
zwGFO(ktDgt!g#L_m1t0GPV3p6DkFL{?oqVHsBp%IGWpN`ptljx$@<8xvW$bo-|}?A
zZGBp2?TMp&L(wjU?K+Dhh4yW~xDCcWxa=k4$rTCcVR{djwtj?6twUV2#tC#}`J3Fs
z4QTge7}{Kj<0351ahcle?7F21`j>TKa6m8m{oaDUvH=|NQ=sl&nJ0JcKR#6>gBz4b
z(DN8VFHMpq<$WK(HGy?5f6CC9hR6JZx0<w3C`WzI%;4;wz2XLs^3W;2gewa@3p*Na
z;AEEq?5x_vJHM&Lw+gkWGQ5}fpP~-?l;Tmv+>qPeT!E9!3~Al#RAJ|Wk=(?E;kd3;
zK+B@!xK-Uduwd|$aK{Qo@@<PT6<;WYOowuA=Rg~NLRC8b{Apa95`^rgrDbar!BBzi
ziY9&HLr)ALS#{qaro#v9tdjAnOd~pewc&F9+2YJG#_SnThYQ@(&~<7pCw$NdxnqCu
zak6a?BUj3KpG`*Bxl&O1p_R|DlBRc;GGE&AE=WJ34uvZ<u<V`{I4)emdm8-^iq3`b
zx>avk)=`-_j_(rs4J3o@k4#XUrA5kP2GF_cJr{HFEAQ%M#^Zo1Ok?lCiE5EJ@n|8&
zP8~+hf5^br`<i$({SvwiQW)6MhO&`MpyIg_*tMMIwB+;Q#Rh$9d)tVPF4=~O(NV%$
z?Q+&b?1j2?!n=mX^Y4lfuFVvXRN0T*^wbV;nf44a1G+d7^NmLjvaZD}j+=Vbii-Dr
zhpe;hxOd-B>O0GrcMaUZO|{O#aSO(yK=_n@UUUPJ&MwD2Q7jL*BgSF>iAQ*Ok_}O~
zy#nU1NrVonNA3JnxFU94d3t8zzpxAx#k+{Vj4Z{uZ==Eg&k-~ZV3~mbuHb)tmbCWa
z7|goJqwKyHe8Iy!;op@K)Z^7B{PO)fEb%ZTQ)epE4Pg>&ez}TEj#9=D4LPhZV7&8m
zbDDQsn|3NsL3guryjBnE{d6uyFmi>}j}1s(_Dytq>IlUGNoo_H!>6Q7$36523N$y1
z1$JTf5l>b^EV~|u+sl#)myIxfF}uGEx_I&MCTJdUny=m50T0>FlECl84FxN~Tj3RF
zKJ@}la4|%IT54{|-wX)9c^}7lb%LjBEH_di8Pdmp#++&9G?(>43>uB-5|0<Cu3$yG
zB!A(q@eiO`QwApF$bn5|19xn5CMs_3#67;7VDe>SB09AVJpcNktjl}GP0Qko18(s}
znt5>JO&o5|Hlw>@TEVJbh2GhwOZ6Ky$d+U!qUQM*Ked_B!{ON&aepuO?&SkGx3>oS
z>h5BJcNdo#(8mX!NCn-qXR*G2Er!olgOK%Rbb@pmsH|AXEmi#sqmAxDNV^$Cs91A}
z%J0SU4a~K<_6>gd_y7cB?mO82;Ni-Q?^qVH4(FWD#Nk&g=$TPNY3I#x{Omq9M|pD@
zD%LmP<rc=2D#?N*VJmp%-{stvHlqsLVL#U#N^@+cq2Ulka;%T#(-~)4XUR}3+Q!)H
z1*Ryw<sh7vzY8_-#x!iH487xQMMh8P2kClwnjvb2CwptLXj%~VbtK@dczNPqD2<t*
zM9bFOgYA@haL&o)Q-(1%&pH9v4NZbwy%!imT#veYsezxiK3!C3MC<3RMA?~9oF{Jp
z5@UaYMXC-=>SSkvzbhf*zgF;lxdtD|L}PU5ZFnA&4SjoC;Fh&4asPt6>=j8`{%tq=
z{4C|yXPJ-!aROHq(gk-}|F(MzyS642b8d4az|G<)><?0-5$-nJF&{a4Qf&z79K9Rl
zn)=Y`R}Q=g?m?&>f=9+FlHStaST}ePbA%tT?$2ue*$pjPU-2E1cE5mZemG8CK9oMa
zZA>OuTES6$Q{sO(nG2cxl{@-co7g9u<+D8(gUs4O*uPYo4DZn)Iv09*laop)xlNmj
zWv<{xtK+!kq5*ridk$yr=i#a&3vlRjZSr**V)kDhly&!m;}@3VVEJdLPQ3`LK5ElR
z4oxt|+?2GPsKF5ZWbT5xIVr!F05K2K_%v(g$o%m^=;c)m%aSZ;mE<rIG1Q4KpIicA
zZ6a82D@hdkb*OD>7i1j0j90EQ7Rw9^;<4rlbPloQH0Co`iu)Cql8}yUhJ?0lf6&E!
z5qbx$29tPinEO=?#{_j?;kg#<v3UZiG7s^~>PzTwz=#&Td4-KXcftQCI@7Qkzb+1+
zM$Pj)&m~jlI?vikLPA0^)8CkokR;(qk|aeEQb|Zc2z8#db)=GnD3v6cDj`XdMDKn-
z_~znjIM1{9TEE|Y3nohy23~MM!RmAuf%7Bh5o4`smofW4d1bm=y_j{SH*htHJAt2W
zM_dg*W97LR*d?n(nl-m^)0u<IS#A&t^p>~`l&F&vr;{<<#)3u`u#U>H9PZ5ABe-nE
zUR3teCvMT1{PVo~n1CzctD+6{yqCz2>$(Y50V#a5X+Bp_u@sz@Tllxr-=pRw4PrE|
z1uobO;Jc%tcr&RJ8S@oZIq1^Uta~b@ZB0!Y*WrIpf1t%cj<{`7DJM87;L6KmxLx!N
zYOwp(;=xs@*lR`tcbZUthgwc0!j8UsR0lJcE0Pf|R^)2t8%$?&zRc)X;BBr<BN_X>
zq*j4G*>fCpszzbTuL8dMo;umhxD5+8F$Zqt02H6QhRx*@xZ9_g*IcQM+q6fOI3>u?
znszJf4cf==n|d34EOO9aVGH<mmq6BJIdJ>N@~G>~akZ%fk^DY_d)q8%Md5jn3=QX`
zqInc-e^8jAt<I<O=EUO9V{T}JHYDYZVEdR{DEK4*gWlWR-G_jpsH0-@g(+yalkE<S
z3xpAGEoiC4iXMDlh%#YDR3Lv=>_6Fv%M~MTnA(OJK}mR2GYe)u)+Ig9e!{0g3grG~
zb)q5tkSojD3qj&u_<ElCF5S;^0)+`KZywkZ$NhCE%+1BluI*g&c3F(uBZH5fHAqJ3
zSTJO62|@XiLYeFOw4FSJF9{Rjzk)$@m$eFU$UVqq{A=cXvP1Z)%rX2DnR+Zf`yMp<
zMBD>Ewy&&sD!db;NJU4Mi;eE|V(0R9+%?&VtX^+S<_tEWaTVuq9P^>qT$I95OKKs<
z`!(1mJiw>=O61IYT_P@K>}R)4Fx^y-IEomvO-`GJE8C$>h!TiK{c%a@ujW_n%EIgu
z8L(jVZ_FNL0a2G{;3DTvDA;_L_isPX`7de}=F2G4<93F0rQL6gT9FM;u5Z9`f11(r
zRU3bI$`;JuupFEg>(Z{aIFMnC*uc%LthYNE{#G5w@DpN4$VuZII^XbP?BuZT)Gf#u
zvY&gQIEa{!m8UB&rC@XFC)6!s`O}9xAX#)B{H#8qsB|>MMQG8U%^&cgnFYN*OhDXZ
z)x{}aa(J~M8{D|Wl>FM#g1Zh@;`jsRWZPE81-%d@RA>(X-|6hL6|TxP_rDNFYj(hz
zE4diY`eSoK&g0!O8M-h>mAEgvjgt8r#KPbG&|_VVZr}cjBU^I8tyhCKY)c23HbWBX
zD8kO^r+I_=Ph9q1C3Ji>jODc5`RLBq_(Z7$x?X(2Ri(=GQX1>vB`ZQg<1{?#@)upZ
zvS47A0;%o#3;Anpz%6tQcSH6V=p|l<l6OBb;It`yde@f58@HjawIxokGAC<9H=*kq
z>($GYVtax*y*$c_y1woN-HmL|({Bp3XBMH|tk)Qxdk==T+JIWkcl^6Sh01q50e71$
z95R=^vul3xO~-CR_F)x#9VSh@qQ1bBVRu*uJDE!=@(1DQm+;7gaY@h2K$Sx?K#)>+
z2Uogc)+ArR!Mcz!XCDU1+S9rp$;_Rw1HHHJ#^M)~vAMQRC`eMq>=Em*%Ks=Qw7H8a
zefw}%pd1nK{|Q5+Cg6lYMnrt48VsiwVePbuI45HRx~aQ~Uw)GzC+}&{8!s3$sjm+*
zN7g}|66+m%eSt95J3PK*p1JD#9FeZW51Wobhr=hvw0^?ns!d0)qYt5~=NCVw@&#52
zRjKE(fBffVj42Qt3Eh$Za2@ji+XhIH+;C+~(;AOo%mW~6RW*)uR3`}&1++r-0>sa=
z!GUH=niBdGhcA_;%0C%rAnqCOwlj(sRjZ3L&i+KT!yj=$_#?CuGB@L)CCo`^0l!2R
zM5AE|KkqAJyq-VKwK0FObJBkhW2H&ceKpC^7-_0->pE`sc#OlZvR-W9N^meUM{kwi
z5S#E5GH9x>d&2>+xICIK**6kT&M~A3rpqCQ@xDa^C%EkPIDWw>St{~5Dr{Dq#erNj
zm<C!Ct*dX@u6QwcF1y1&)=?$zRy~3zFH0dKv6bJMY)7UTbwk>%xp*P28{X^@kc>{Y
z&)l*Nt}jl(-XHa-Ams%Hs`@Z0z5+IFFd*Fnt6}53KD=aTOKhT2VfFUUD6&{8mV4ER
z@&)nGH}Mai9A`}rKGGsZg{*_a*v8E{uQ?|VFL)R$OU4g2BLyqEv2w>HDAH9WZR8H`
zv}HFhd6y;}^CS)(7{}KmSp(m!RUlK77`JzJG^aL?b;jlzu$i`u%bnow7}L<m$+Ek1
zU7-?nni9bC#t%_6u3CI^f+g)Au7smmx1(~N4YaD{BX?AdWWVX-jP{koqG7oxc<~MY
z4Y8(zS?3|f@h*Q+M~5~W0zbU_4EhFL$2iAt7}qr#e+2!&s%@<6<9(SMnPyH-9sYwF
zkyBvkYA>+r{{-QNYV-q@!kma!5EKvN)mqrUW2p(AE<M~yGhLE((i+?pmouLCb$*Fb
zAG%I&z$3a^v|QPcO0sY8(^g7w%AA*Q<G*!ybBr2=5o$EN=o<6&SmIrol^8QM3x;<x
z))pTw?p&7$7xXDCF;sx<$4@cVrU})ja-{snIV_vSo)5=<z!=F8=AL19Pxo=?nS7NO
zyS;(tsdvR?TfX4sDT>6?F$fc~tytDw0R;1$`F+_+WO(WwXeoUSDbhT*ptp}-JH8uz
zz6|Cr{$!QJogHF%WoJxKUJJ?BScZ)`>5opxhn*-*jz*k>&gSLZq1%i<GvAEyB=>?(
z+<Jb7s|-Y?e#eBoB+l+^1x`LEM>p5-Y<@EahWJR6k|q9-v#tW|n!kfUXTS5T3l%tb
znlb$-Z$s?c*`0dkN7PvB!~Nc$f-e@y(LeKVGp_-=Ukjb_MUWBgnezpl{%Y~D8$D69
zSre8RM>EcoJx=i#(BN`KkZ8XbfB1a@Qol#Q_oXR#iuI=)>%@#>Ud_+kbr&U5cM4s*
zmFN;nH~jZ_F~$^FV9Mps{FC=hcw3X8#svj>t|A6!C^2WzFAX|$!A{6)oCBgB4{_NK
z3ggeSJNBU)c+P7O=?Odvp1!rbaygs5ePs^x33`~?wG}0MP77-SYjHtjI_%%T7^tCo
zV4>*EzaM=LoA*uUwf7yy=20=6MZhl1%c|iPh8oaueR(jS%9F|;Q(=U=3Gr20gp0Im
z!06@&930xh`rgt|cEb}_J=KEts(3W|)d+e^c<9*Jips+tV`udqKIzePNdBNfBUR%u
z%pd?xPLw8{Lv!)a2V3G7d>j0#Sq~-E0guj;fcHuTDu|Zn9dAAd!83oMT`Oab!$Qs?
zG=p=h+rT;5On{M_%&E`Mi(K{)33s+ymTXCA#N28FmY;i!7W-4gDZ#b859a}n<~N{i
zeJyvWOP#omeTS9I3-$j@d|c)cY?^!(j6}%hsIEev&~xx(fhq~i(xyI@tN5U0KcF*Z
zDA(WM1fH>te1q+AXo^X|jn9>d<jN7T*X=rpICuz~V=BcGCr3^_+>k_l)T7A`Cn0x&
zIX$t_oUUW%Fze@xCt|vk?{v7#cfU8q=3GVaV()*|xpy$dxQy#~>W&w_--Nt?^W2A`
zu@Lm=0=DdkXROmq-g`woB&C)x&cS4Ky3ClR{{PTwycreUxh;-aWkn0@s=2WX*gpHX
zEDh6;gL@^`BsbtSUhrWY<dJ$@s^SrNRcKG<w7<r|`46yjawhLv;}89ndhl-G3-~p^
zhVsWJP~%4;<IW?7CiKJ3G%4z~Vk(y%RLNytx5b9o>+sFjlrH`$LtCZOaZKq6sK}^-
z6#f}!@VJiiS*FW<Ngn}GD&08l=zhqv*J2%t$E;(ZO(pYJ3nlsI#bIM@Axy59+t9oN
zm9qa~nR+XH-Pw)@(o~5jD^%uX7Vr*!jAJy(16po%f%Dvjpmp*Ty6(7!jlE{z71f62
zm+$e{2lH66bTst5ErLbUPeJp)t9+011IX|?&vkBFjG@;)fv5T$a2}D(7hRz^;i42#
z$f@US8Shcu$&L(dUJOeP*}(RBg(&^^7O>M6_$*fDv>sf8PX8PH%@A8ESq3~_jswB_
z$-JJ#3*&!uLe=>cUNYt-CfIM}?DSURaCW9C?Xjgb1E;{pqMNTWONW;ilt|v=`S3(|
zn{n)~3BxYFLz|*g=zzv7H+ex^pH<5T6fA@C1aI&ulLpy8o8e{31FRdRO*<a_#d-&M
zlq*#vO5-f(kavS<z=>fvip>+8n2SviG!jIAPKzm<OWR~J{+LG}=eDg->{c^J?5UT?
z6%SAdl6nqXwx+>V?;kL&!-&kYG9?BN#z635FIeKT5&DW3<2L4&47*s%CA6&JR*V#Z
zb(SgdV$UbA)WGQbzrkz!YGle;{4(w*PF^cTPM#MrMu`j!e-H;DKTk4-ga&P@9*nc@
znG(6-_EbFj4fK9E!lk62<7fC?!F!t+FJb&VOr6<-Do@5i*n#t$sr(O=<jRWoPFEo#
zf2tCXvm;?rpB3rcFU=jY&?JHnGsQL|R6qmuSdK+3em9xraEg7!T=03^B(F<eEoR=g
zAM9*>mUS?C?%_&y_FZJs0_WPL$QHpXI2y@#GOrBb&{!!lmAyw4Vhw0sl^#dlDpJoz
zP1<SjoU_bTATwhtG4~i_ge;QqQBy?F_*{bZ7q((eZ96)RR)h^-qEUb)1&(Znw&JiQ
zIyNiQhVd?#lGuZ-!<5M!(^$N5=od_KU|G+)E4cf@3A{W`j*4rv=^BkDEMJla@g@iv
zdjI&KueI<`=?xxWe&)zYO>lMGUKl-s?P5Y^L!H$*RG@*l@Rt^;Ix?J(R8wKbGZP}v
zKO@X}mkv>(M)ckudFsWTggu$h@W4w0^5ph4csMl{A1YdsiSK@)#<n16oZO7&b}}^M
z)ETko+2goIw+MH?`-tB|WywoX3s!x7!25q@?)061IUnvXmwtjV9SS$|2?mjzv`C-o
zsHwxWvoa){x!($&RAZVh+v$&X<}{3|c-cW0AbWfT`n=ENS~T<E?n@h5x$76kvi|ao
z?E<P;s6#rpSU{{<E2z{jh5T_YsQpqv>hG#h8;K2gM_ba~VTJrV<5F}x*n^qd9znd1
z9(}8H3KvGG(B6(1bX{bHi;7rIOzty3d>YFO-rfdAo8Ln3#17t5VFZrY`3ODhbHHW0
z9B~^^ry-ZqFudX>F1x{DNt`0fb+TU6Egd?xv<1C|2=DA)!~G~@VtvGf2C`23XvQeI
zJO2#$m$<WD$u$ffH-dZ7U`iZT#US}+MU4h*sMn%WSe%lNI`3ZM&v$b43p=&=OPaW<
zWmoaky(aK@osKu!dtj}T11Y~5#U}{~WJZ)hra>31n3s*W42{WI1IDn?HK$LXGG<qo
zvv|T>Ln3`zhXyUYiXJaysm9%5-0r1-$8!uw9zP0;dqkLCp+H~nvL=7RpJ3|2SXh}O
zMZN{Go`m%eKD_7!cxJQb=%tt3+8}kZ?0zCtnF-*^x=NOxJPpROmZU0Ho@*RG4Wc@%
zvGFy_8V$X~2eM8^<wG0lGkY3HE}syKhCUQ>w`9l~aR>%)ILd!Ms6t*UTac`*C7`&*
znzZ%CVdA4E>^<LrMyWsHTBI10T^FI1UkN6<T)|w{k!)bSh{j7Xu;>zFMYOu&gVpT0
z@>-urf}4dN!&UImEo16_Wg&*meu@T%r^D`d|5$&zjGZyob6K1jSUle+j9&g6OP`2Q
zbYz&&_p=J5+$`g+%#k9gJ}dCHWB`&f<gs(!MgHayLn5oh`mJH#z~<OySm5Rhohu)3
zj;FfW9o1gEBCr%pW}k)DTc&jRln1yZItRKWEN}0+9}gYTBwu)4mN`3$_3PhoZ3c63
z&I|TgZ1=<>jZEy7H-Zf3*L()O$KOviAl?y$*dwWez*YLx`RXrj!;0e&%XUVo!}s9U
zbFxHz=_SlkGeg11EUuT`!#=!F!SJ3>7&t2sqb6Ly=eNtyBSi%Z^epKls!RGqHb6_~
zPjLEa&HoxxiQm{gUiyjzKNwG9OxA_C)gux2#L1Bl^dZJgI1jrvnvo0<;{bfQ1Wr%b
zu3^hRVaA^~;+Sm~G{Q9k%Ln^|({<+BYER`-V&XV~?{R0pDQ(c!p9UUDN#HL?#Eb(;
zLP5%IxVtMEyDDD8wZAfSvp(C)+YA%er}2FDUVB=|SOyONo!|!kmf(Ufy<GlaMVkDd
zB{g7qfqKu&Fx$X}CNI7Si~S7ghqjw2T9{WTP+a3O=DsG~e(5jsJ?sD_owaD8`j?Yp
zYz>3-nb0YBhX1%Lo^hd{2rZ_u%Gt;@;C6ThxS4(yH?28?NeZ2~&&-JSeZLFItBXNn
zqYmmjm!e%3>opkufwl?NxZ(GA96gBTL9fn0C&7Gvn89aEkS*uE2i4(%{6>89HX2V#
z60s%ZJ$4rNaEYH1(d)rO+~Ph1n>CZU9vwZ#{<#V<sr`K0<k=wb%i;_)5yMQcadG!N
znOkBpG<d%O-EadMo6Wdjr}R*wHi+-(mZ00-W*qma9$Lcsz}u0zbyHlRqvJL<=Y7M{
zTV~XR?FEAR+u*{vT0E^{N=`68am|ox;KTOz;aN4Xb)hcl{n5t<uNjPT)AWhpY6aJz
zQwdH2M-VU$)uYvaP%S4POg2S;Rd6UapWe@QewX>|>&ei!fw|NF>*aP#HXv8F%F%`D
zwnSF83{wuV`OO3$@$i@TQCc<+ialDuceFB09;`+k?8C6>iw%yO(}X7ShhfGyb7=b$
z56$xXdGFX(2xYurp@RZl;ip0cPmdNRDqlla#Xbm}8wu?jKf-f3327^;pz8ima5}aD
zES9h0POK1+<@>B@{NTqB<_(M|?kjvg+<>aR+y_tX9LVPjMs&vaA?Ws2lQw60aPjZ7
z>BUM_(x|Eiu2#`_VQVz-?Y5-pUp@9rx52jjq0pCl75XmZ19N!MhS*71CQ1dt@JO-7
z_jdeu?E<`Gd_OJqI>=Jm4NHEUgng%~uxJ>Yk#AsK>fT!3%;Y#~CR)(mwr$Wg%?&lQ
zHTjDwibUP31h2+4!&=5HeNk;dZk&IEDf3mSgT^85JCULyodcXf^*XR)_pnQYPNRd{
zAY5n5_Vf0U;sZ}@i6HuxP-L7ZmR+C-87~_+lNFcY%w_hw%rP&*x1B(8-(bQU0d-t;
z3^bNT@~(QNF#hI!)EUr+X@Av-!m0CoLEKGFeTgn9|9Ki}tJvp6O^Z*l>tp?lq5QWr
zQ=%l(il){I#DDHGu$k@-M&@HsGV&*vcX$N&A6~|1OdsH47gwX!?mYab9S;khw{gx_
z2&5NFlZ540xG5Vx!cm1mRR6gY>FOSUZ(UMkR+$P(D6i)I_jvIOKO50?o4RntP9Cx!
z>Y~?|DlA!+2@{ANt!=shL1}M6XU10ew>BRvVhXsAVS@-^dnPBwraBeA5o?qVfJ5^|
zUPDyFdP8*}ZEQiM<(gq}u{!Zl5b-r31jhMNs2cl+`DMHdBgJ(P7rq1}8#K85tL13r
zb`>_42#DzJ1+f?F@qAf%39FP3VC2O@JZzyx|D9odJHNfW>d7n=RC~grt|rJmqJp1%
z4T<NUUcR7Emd(L?xeF8OV5sdDXsY{)?iY$t^kFTZsAfp3@7a<NelBP!s!;zor@8d+
zs?;m548pn$P}XAuM2s##%ULEg^r{$w^WXA1vMe8K6oc7}qw1Ld7n)yMLDbJs2(B#Q
z@~af6>JEDn=ysnm6^+4fygdzxQpX8LEJ@t4`&jZ?3i1@pSzjR9rD@n|j6IbL&MOY{
z<5}J=Yk@lI<fzaGBW#G|`f1^`JIchT_AA$YRS0r74akUSdGa7ip8Cn_Q6~Ckj>qG|
zcjG0{;r<P7ySt&g8Ov|HJ}3OJ*_LHLl(_IC^D(<$k7_*I$_G6C&3*A623%7#cAv<_
z2EDh)TO^?Wh#1s~_Xm-T5vuNdg&xzT;HjEDiDNl!!Rl&pg7;Qft7%4~lRm>pd)8l(
zgm5tt9h{%nAX=_qK(p~1e=)F%`Qy(BJtOAh<}IaI`Pz^^aVv)mZ9_;2%;BW1j)Ews
zP<ZX49L??1fQ*P2yo2jM413Fd4|mC;|4HBvZ&asVSO<~=N&E$NCUiDj0~v)G!j|Hj
z@Rhmh`w~lF>o`4Hy3>LzoUTZm7rw!b$8E^$LS544{DBL>Uziix0&bo2grY~yykDRf
z)}{|Z`OYq|Zjd5R9~qI^f{VBXO0cF=i!K;j&CggaK#9&L@rRfxIGE-9&ULZb!`xW0
ziMSe_4zrxsp%*~IO7N0~0V%p<O={ijsnk3L(s_0|e2Te_z2iD~NwuPQZjBkW*ES`U
z)|=4Lj}e+zNAWg$GhqE(bz)Rs431mgK;f!#)?Y}3@?HV9ANd2F$%FBfYAiU=*L)-E
zXFTjxCldZXpSmmyELL0-YS=7>4ISG-Qm~y1*01BsCrHtV^d^)nSSbuCmj|~I=E7}^
z6`HZbnwG3&=giGx`3}DX=(I4TIx+j8BkejqdaOxqo?vr9(HNI0SDG<+ksRM8cOAib
z96C)H4<+ixXrTLySNVRAodK%x@rqOEMxD@qW(e2Avfvq&SH!xthNQ}9CpV<pj0SnN
zp=Q1ok!*1lPIxO##^Ej4xL1(~-sN%@fu0Z~k8o@C3EUSFjh<yJ3%03|3zG{5%~A!j
zYRwniG}x3hsU3xY2e)`7mSMGg`3;?T5x>$^pE^l2xZVG?L9O;26i#60^7d%%aaAN1
zxK?xZL9=1^>pRf;P=gkH{?1iS8idl9UV!#0TQa$>9a~4*F@IQ(c;IymdJko}tm1TN
z-l@m0V~mCLZfTlwTM6@f_ra+d;mj*_6SFqi;+NLNn3Hf9dZjbCyP=BI>E~$ff@dQr
zePz8Et{y+!U>u<bW_0ZG27Iz_Fa}<mi{sLlple(Y9L=F9$yh4(`60u1-Id^dt$vL8
zUctG}Ou`w~>U3xufqfln^!f3AGzy!84T1r9Guo8+OJ3mJX^(O4Bm-(L`v;%$EK9tz
z0|ZLJLW^ex*niUpE)BOL<L0M=_C0og2x6b17<JeicM|0rzQMTTKj2ZbA@ToYfTPY<
z!Q|g>aj%*dRrx+1J*I7jA$96>{4WKXZTW!9NJ<uV4T}Q@|Bt+u&oh{qqfY$dpP*Bx
zm=6u?f?fOU$%0Eixv__AY4bcOyglw8WM}{6#T)aW%KspLv%rw_6x@aut5<NI<yN8#
z*i3QkWZsnd_8!kwBPpGCF{yMU$VYF&fJ5t{?2ZM^Rl0)GUxV=OHpW%D@su-Wooabe
z65|p$qWcgP>SuThbH+S@M+3~yIE69!$}YR4Wxil+o<Cp|asY-bm8B7D^iXTmBWSkt
z;kBwVAxLuoHmxtfnK4>K=)v}|3)PqhIt_~F?7^CdH<+4fh_yZcusLZaEE&EDQgZA0
zeD6BMWg}1*S)a|t7K0v_F^*{nxbYRjRl8Qf-mng|>ez-}#uX6uCzDG!5XSAQQ6`-&
z2lx!T-JG{{8C<$$O-zoP!m6>V7<gkQI_b3Fz;9I=GcKOF3}ontCQGu-?l~sEvBRUn
zJZO&D&d=%mh)Vumm~lRr^JE!{gyJ~d!`vpG>4U+1w=UTeAx|ulKl2A?n$gUPPAHmt
z2K5uq;K;rs7#dIs8a7S5V4Ef9`hP~P);c)sD#El6<xrLWkJlQKips0xscS?Z98P<N
z!AnE9z}^ASDYK`ya;0h0^fTZbGX&goY9YY(2;XTsiSOTa7C!7_`90lX@S*fPo2TyK
zGk)u#6StXLwB`@QPGmDrz59^Kc(`dX>o7->5A#VK=3MLnz21iy@zR3sDOV>|XZ#?l
z<rL1begg>=5&Vqz%;y#Qoo^M|5hM2oIMpU$9lj<m<!3*ae6<6LKSVJn=zgees$@M+
zPFzwehaIf9ka$&^>K51F*iT8=pv|($cD*3U8xY<rOGTrDo#0k9ha3M~n>G!Ohk{x0
zc&Xf$4bfPBlCj()yaK>$e=lxmUBH|Q^Mr<W=EN^SlR9<m;pluz(i7GNAD7)=d+kmf
zUn)zwUY>(;-7J`KXaNKUltSxMWolZiOtc2qLp;|CaqC8~UQ9HEIl4nZM<TZ+?h8mx
zt0OF!3EuW0?Cv$dWt5hR+m=4%1NwBZyC@l*mW<&eANMj2O(vSv-h?sSdu-nQ7w0#c
zkd1f{=O~WBz*0H7^OGFezRZ%k{}9lO!J3@+*-)$>5dmEa&EUD+5IpZ0P(R1Jtoy$j
zyt1@te#a%8U~m?j)7m)6v`V33C+jSP7=X)LeNy+a6Ju+B!`?=FvQTJ6(lpb-=II@D
zGa1Y;Fp@$AJ>;b`b*PS}6%3Wng{D|JI`@Vd>CANpU&B=(x%yFD@4J_aYn1{~*QLU-
z2US^DDgZLNjJb{>%`o5CmQ;=P<7;gnKvm*Kt|nv_=v`rM@PnHCf;BO4`-Bfl5`uZT
zDJrx%IUd4Rmf`1Dy5#FuV`9}@2`wiMW6>DKq#FMlF37my-Kq(gGl89-Ufh7Hqd{D7
z`a&o_*vvT{;Q5-WHq^`53o2%e!#BkoX80SzHvfz0U(X9??^PldANIqF_%3ieeN6b@
z4jmfCOW}>hFJbyN8xql+1y@&8<H@g9WWvEHT(vV0mgP8L@3vU*&+ZXc>a7A#vo3CU
zdmgw(*06ibZ`OBQ0pg#%nE$mOZ*02&+u6PMzgZgeuK#&xSj@wCOLdx1vYH!yd>Q8b
z_`s!kJOq`Mh}jd?u=~PT+^yP;zNRkh8L<a{$VB4_!&7J(Z9~jQsE{)wl;{_q@zBup
z4J!^@XYY|*FjIbwJF<;P%8kpA@0|c`^gE~WQWxsfjc5mB>ec4IW_(J<b^c>Q<7=-$
z)4W0SmQ55!IJ%>!yamQ6m|>S!32bt)Ctqd=aE+8HHJbGX^wUj<<KW-0=)+1}J4}b<
zx`eSi-T*B6kFk9MtQjvujl<WC_~U^FnUnJv`oEi^(ljZ0v^W*i;3IDDv?ubf>fmVO
z4CbO_yCB2a5Xygnnp<Yzq|(KACcWV_ZawDmCl_IpSp!I>-Qa@T=WuG>5-^Ef2|L`L
zqW|s(VnNwNE_&{J@Yg*E8C!z+ceDSY#nwzHWV5XIXhudqW|=9zA01N9@EHTvbZD6Y
z+Br?cPGgqe(;f@6Z=J<ijK@0cr4cERHNZs$YE)2Yj$IM|!2i}n=vvM4*3s_#?B7Rm
zxb8PF+?t2MSM8|Mp@R^qR|$Rdl<C<3JF>j<HgW;XJvl0tQ+wG0{twUMnV2HnTqaKi
z1CIPm#s;suu0?j=JBxy0$%Wy+KSTTLeVCSg2xqKIgE*^|m_2VMSXF+2A`eUI7O6|a
zqDnZgfP9o~a>1~Lv8bT(mKUFjW$(2K=rMF58mtg=L1rv#Etvt|Unr8ix4*b@og=()
zu^w4<jpZ?vtNCETJgCmUfGf^_huV)qbUXD`yhbGty&IRIAZRh)F!>I|l<vdM?N(Tp
z%Ur}>MXU>u&AOmQm=JoGvt}%9$C!^e^PeK=xceU_A#(|P{z41QPu%I#jBzP^2tH$H
z@^yO$kq57&Y49pPZuh=}5OC}q7d&nvWR_OI+d56sxt;MqdUK!{%W<wL>(kE{fh_Yq
z`9u!oVqPdyN99j2`WX9r539K8qil#(q!=o^3*n=_2yJqWssH#h+{zn~n8Vx!^Ee|S
zUvLbx57^L<U*lj;u>qC59LqQNoB_eBIh=*ze(u;ME7GH|5X)0fpkz@y$1?{~L=f|#
zlt%(~GDiAlFL6O@0w?%$q0sqpBQNah!tlF4p!xVPu3*<0`0!~EuJ!3d!5GKFz9G5b
z>t%r5XU(zKr=Qz)%Ye?F-HCpMtMT8=DcF_CTsA9HA$0936cue1E6_wPx%?Ca-?HVD
z{0(W|D|H&h+{crPOi7+yGFMgd20g!Ka2XpL`RtiVeDmpIu7$BjIt^R+ZE2^le4i@f
z6%UwRrA26x63O~H3s$K{gR0*{bY1lxPAoct^^2<b4KL;Czq-57JnJhLz+3|JW~!1O
zH%w?r|8@wQ*vPv*o6oELdIY-lR-{*19cH5(k@Y$Q5|R!F-W|g&yEo$FmIT~(<q+1d
zHK8shGUWYZeUdT#qd1$*lj{{5u;cJ`n0~~T1Shf`bhtmap<*UjY^W2fNN!;L)jM2T
z*n3c2o{r@^oj{K5nLYh)a2s;j9AiGq1v^e<*}5i|{ELa`;4mFta4N)K_)Yvv{T?cW
zmGQrsXC-<40p^pd!WHKp!~0bfe_yP|BO|29J#TiOAGr-YH2vY#DjTBkVmes(EoVR9
z3xx_p-lD9d4ragB!21JizhcXAB^$3mkj)uzS|^KhOwXg<qdQ>0dJBO)|DZ|v4P$1k
z#u<}RK;?-DUTw9cEm<}g*pLKS%wv6Jl`XAHupyxx6ndt0gT(#1Fkpfl?*42|7wk6&
zr-ETDTP%$R@mrv{ya>CWKf(atUyM<#&57IqR(?03K3fmL^RbKZp1VF3jrt_qWMxjM
zyaDOE^bIWTU+0J3lVwE<ZK5i8iW$0<{GD7Q;^u2iOFpi~1YHYg{%ps0Ug_s$PyS{c
z>8au`8nX1sBvU#@G64fatKjAeEppb(j4YpHPJ+fi!N1x%WMGgfZG5K$;yiZm9%+OF
z2h6CmX)^zZxvy)MzD29<8z5I|LhC(tpuxVKP*4yD7x%~F)r<R}VR;+mo$BF>b?(52
zU(C&*{0R@U*-(t%YtAikFTh!KYT|E+@21I9lU@3BU6(caI!c##3#;H=ej#XB$8ar=
zu7UppOWsLxmJ>QrEZBLC|MS6~Y`djS|I4!>8F&#Sg<Zmmi&9i)5zAeeR>S8*b|hlI
zHyrk`C-s-Canng-8Xl7gQ{KnIvUOg##rX+5SfoG(1z3<LjrY)kV>ui-S*qH^GEgHg
z3OC6!W{+Y8CzzVeuh_z}HPWZ~JYNN_<qrGVeQgDjas(xxT!oo8&FEc|VmPR7O_VT&
z^_UvO8R2%IVRnJb8@3!Y53&Zsk7t;&?gV)1?1$JDY+m790(%+Xc4vPY`WI{ml59o-
z5)3hUW-6TUIE`OeFV8)*4otO~C#miYzrjx*vwf#SoU{Tp_>=&nbF|6eQX`VtXGYzc
zO8E67Wr(GLfVisu!HiletY51_`|lY-f9pgrxoD1B=h^ph{S<CS{7)2_^@_(l3c`nb
zvQV<?j=1AcEzkf1`um9v%g<IrP3{zsD`c+HoD6)qTAzG=s!4*bJ%Gw339z?WfwtHD
z1flp7`1u}VF3Sa+Ut2OXkCfs<!>Z8l?Q4jZDZ`gjw8_jznq(t$Fep3>fFW@zB=5>b
zzUrMlSQOU^N2cf!chwl!KT(9P<0CQnJ<mHjf5WcdH^GU`OuTQthEC^f&Nsz?b_!~_
z<{uBaOua(v{X#j*RTk9tqX}8l&9X{6*q^j?6c;yu;Pn|^%jG77GCpBc{cL#fOPZ8V
zImVYKm!RmZrTF%V4U9FR#20@bj>DzO(098!{MTVb-P$e)uU%Im_x#vAVPdB+=kO&^
zn*Ijj?ed`aj5Lj3{s%j*|G_Yob8zyR1##KOvj0w##GMDmbJM2(#YGACk$<0nr>A7$
z&h^L9zpj#xX7`(oA{q9)I06|{d-2c=8Ire5fsWh%9Xbl5@P_F^Y!ja6!WR0#lG90$
zcIr2VTmAzJgK2y!F@mGZn$Y0&Ffg(9#(%$`!qNMs=rGY5ZB8A9u73}pWk(8{FR&&u
z)P_6^wIFVG()_!cBq*DT@U=*v#z_8hMp4XZwDK$8zRsNfZ80FtOBeFZCgZuA7j;O}
z^sm^lU=TG3C_>q@pV4zzI)C8kISf_QrQWht5FO);y|KE~h|L|Q>uFPc^;8r`yoCBP
zefnc#8JdPjQ<>SO#8n_hc%(wtoN_?-j!2NicJZHrDPFx31OJZv#E8ysY&I|lL*?1e
z$G=tVc6KEvT0cyjGpZMB8rfOtlYp+szk+#EySdDJkub`m7w$fc1Fz5e^aR^Kzh`sj
zpEEQ`&v_YIyx|cR?lL4dBCp}^o5m!2M++x<$BDnx9l@R_O`tPM7wp{`%Vf(l6s<Ss
znw2_vD<>xu>1hbn+V284UXcVDti#aKR?M5v_P8y_ad+x<HjDhlzj)1Zp-$bxr1NR0
z^UaXjPt_!WF|7a-0B*T{#N1L_DxhP9Yr-Tj3iiQJB{S$BErlfBo;=U~%HC7K{BDO@
zn36XM{c=BpP-KfYhIN4;rqHEn+zm+B;SX-ZUkhJoNYkM`qk&XelHfBeKegVTE3jOT
zTc>D|gr`MZqnas>2)c;YE9{8VX+19Mz!waws^{BcD!B+4h5F2MWjQ<wtDLuSBQGAq
zz)H3YI^4lKd#!;bt2aZIeKnSS=tqg*x473hj!!Dt2r=ya<94-0SY^-lb#I<?9!mm1
zf14^j;(8bb)BRk=C$Q(}(He}D-;YMI(lp$r0Smnk;lpiG<g+DXBfG>w+wWx1(lMiE
zM3yF>dW4(m49KYZpV+o|3rsqyLq<*g4vUh$L*(Nybe;AXdiQ+iQeDL8TI-B$0$sSe
z<SEpz`NGc}WKN%Rldy8B2Hp4h54P$2;uS{e!QHjdXfMk;5+jxAd?jNd&@^@NzxIZc
z{5M7H8MBY;4q}Xm+ploGgMc=4Gp^~mf8t5GY9x^_M2C43&_dyW@K?Dpx!KI_e`{EN
zkMrY)hsHv{%XGM15R4%YlyQT`c#vHY3OBYs1^L%c;dX)s4Sx5K6QsHdza(aX+pAz<
z=ZHN}ajF+&Rr4?*iFx}JN`(qb9`h05cTj%SX*AHd&3*JTCL`JlvE}bjl--kx`R{A7
z^Nc1s=wHN6c*d2Ms*&1n*U(cp5LU)B|3{ROST)ar{01eWd0vSwWap4i$*Sa@oEgnl
z8wDbB5p*`0@{Ug!pL41zS8`4X`=`c$S92L0K5a~b*X;n0jb0$5p+};kpQFt^E6_Dd
zK(|ZA^yu+E5VTGz{IE@oDzjAS_$YmHxRT}C;;qrVMnFC*OVKjZB5;2Hffp<pBffuA
zg$Bhn!}?ctv?Aya&=Z+xA^9w<JmLWR{B6mP2etU$H(O$~J`q)}o3cBeIk?X|1U-t)
zFf}(D7tAi;Vz2(h=UaY*Z@x5r_KkVg;)O!T)^5yQV2YXJYCxIwlmEO_C)5Af6QvVX
zAlc`{g$zr@;J&x~zke+Ms$7E`LZ?CP{Bls>?(lgPU-*h*YdpQO8v|#)0YPwM!5Tk5
z{5e~n7_c2vL%<UJR(cCtW?zRzQwA{2;R<M7`UF!)st_NqgPi>ZIXeH33{fx@!8})%
zkB{GhTT=EzOIjr3_#4qsUq$M$+5@UiQ(ofsl?z*I4k_0*g39$DnB9Jw<;r(J!u>Tk
zg>}-i&x`rwN2BrS1`9G|q9$43tO&Ps<OuGnK;x>z*z2Ijo;~|;v9vl_r6!MV3qA_l
z|FE+j>$inw?8bsQI+(J|0{Rtm&|7ameDPEQ&r8=iL#aj_zWX+G9lHdQ(#71G=asl)
zwt&=yT9ced=a{E03*xu3x#S@^s^)Bq%f2#~_SANu2kv2iOfBonXT#S=x+G_CD~y&G
zkcMb0bR1zo{o$riLnjmhtS|Ag)7h-6*_<|h^u?g**3@#%F%%7{E;Lzp45obCj9#tv
z(ERi<w>w^q<~{nw3j$UZ99e5dLOeHtnrac$+p}Jy*<0Sp)eeqG2}tXOOzhm;%B}w_
zOZ)=9f@@9-G#pC;^^J-&eTy>H3(Ua)Yb|t}xDf<3W-dd{*wPV(awNV(mvLDXg<XuN
zp<()r_lXuk^Cd-my2YMiy*bVBbK<j~4ug=OY=|4#j>joOK6t3{3ZwsWO(`=mY+E{a
zwwLi%<}TzPpR*=r>YY$w=8SnJmLM>f;|4oxP-nG7mccQ{`axQB+R{SwN$Kacbnhdo
z2SJ|9GOq7Z4TS%E1|i`g5al}--8zSZ#XLjCyZp#^1Rr94h-+fA$A#c0UWGSUN9o*G
z8=A5wn^RdLk2kdy$f>1iumw_~=!6xGvo%78_&<EtttWWb`Uljq-FAxFa=65HDh5}R
zcne)U)|Jzt*FQbOJOf|MvYd{|+cR;_{!&PJFc%CWzVi|1jG*iBDePq(-{B!E@RN=$
zsYp<ziF56lzxFHiQ^rA0+ylE<24mv%DmFWnLyZyxy5FK6dmk6@fyK<Fa$o>+te>*a
z$9)&)J)y8=eJ|FpGl0>8gY2$tK+~#k!BEX)xWu{+PG{Cbm4OfDZW@D=hCal)Bq`!n
zD+PY8#mI-N(6u`8Skk9M4RU>1&n;T)ozex0<5>pmrHwd4&4%l~vXwD6O8Elzb^4AJ
zz~5Uo<j);b61n9QBn-a9pYxL^)&KTlXy6-=w|oqeWIgI3$Ofl3BCbE(2y!F^u%vVd
z_ODVwNxY6YdPgJd->6JuR!zfDg@Yg%SL<@&RvySr6{FQFEBrS+5qhOk`OmA22~53&
zDf2GzQ(mz9CYx!*yTxPZ@EnjEB~JshnZNRz4Q+@kh9$R$f^WMy#wsP@3bkAuM+YEb
z{eN&xq(F5QlCaNhKXdo=a2kg;@t}1WO-9dR4i!Vx={SXl-xw127hyQ@y)4<VX%?1D
zP{h)$nj|$Q0ic}qgS6cE!zL7U_RNGFk0N;9(+oK+e_-+#E#lMB#l;?|2a#JAmsPBa
zW4@?S(cA07A8(#w-uDIEn%)9PnQ4td<?pa&mKNpTN>QgV4t!MfDKtBxMi(!)Cp@oC
z8xyBPg4aAQdG$@G(Fua!q92^m?GMna@&->_lp|UzGT<#chks2rr54sET!Y+5>|tyy
z3x^rt8s3F}r{`c3<J>IMD~4=|G5Wh6;<avdfJ1Q}FYoXfv{c@+yvJ^cU|sH0rVX&H
zaRFLgy$NoN>6tb{j>?a7pehZ07<pSj>xW$hrT0-FQm_{a59FZ5Tvx_-z07%zJ_Swj
z3UpC^4Em*ChcVmF!{0!4y6K)S$q0{tslzNO+?E0RJob!KSq5_lOlV{4U{Fe~2Z8L{
z!eI|tPFCIp752n|QWe`B^$(&~Up7FeWHbgeY({^dN8*}<^&mcd4T7x%eCU_)Y{t~b
zzhifw9sXuyPRaw=Zu1$>98o0o{=fLap1)Bvrd#+Vv<6-;lcAl*ZE5n-7qEhqf$4V^
z(oQs{4(H2wgN{esLe0Gxd~rMm9c0;p$(dZ<vVEMh*+YJ&uLBKRdXcvXJIrUyoX<D5
zW<X$$9<Bd64+YN0#FJMkQD^B?2x1-nSu8*7|1cFM9nm0??JaytMiQTR!iGFZVs1Ob
z46bSRCJa92$c^)O!I*Eq#EdQjZCBN3u0LalSq5=WZDq*r-ls6RK$?VB^fF$2l}l*G
z8;Je*5)Cy3WJjuqu``W%NtPl#YMlU4tG7dMqBaP<Zb7lzH*g(u0WOH1fm^PJFeags
zFZfc%)$H&<w6uiIb_LF1{S3?w`pS>vtm(=YYg%{QfW{nV+{3K`d>>{&elX8p<zIOS
z_6vl!+K(~XrVh29YtZDI`c&omOAI6|ckt&QzkQ4yS@M*zZmk%XI!+xl<o<Bo54NEO
z(ZGKnt02rBAY)xEzhJ^6{I~2DOt(wNG>saNZ8F91geTBAel1vZE#ZCJhd}$vU8n*|
zSY9{=A8IF|8_O1~sTqld>#a$~lK;4njZrw%jAiqdU*xaKFUFlYa-?vs0&ySl6inx_
zUT{-8PVZ%Be+LD22UP%-SOXBRWi#fUKHhER5V2@!GH0-EE$(GsgWDB}&`n*R&z$iA
zS|4bWV_$Vi=3eHV96pFYW%d<9-*!QOZxbIA<_ajwvTiZQVL?R+Z)foY23oX9j(#oV
zUpJwSi^{?O>sPKm*_NgR{o)#CO+u4`cu+|-hBkG!r|f$O-ltB0{~D3_q_%+c3@L*-
zKR&{eu(cRtqzV?T4MN|wt3X<wXWfMu=QK7`ZIXAyysJ{U<)bm3F|q*dBfeqprX?V4
zl?}Te6v8>i4~)Dx28D~qp-!YBbWC{${m!EyFK;Ytb}(e0uXy;g+>E6B+JZJd=h1C`
z1`6cA39o%WjW!0d^!CIDusdKIy6yfYTqEiRXT}S7^6?kU=~;mvuj!CmTL)3WTMw6^
zmdjvx`hBSNKaHPEq^aZOB=B(z;9^-uK_YXRe+Zv(zyEcNSTYL!r3_}A^I0xF@7*D9
zc^vo<Y1D1eC(lyi(e#EC(YyN*;(Rl~|3xWpu`7tHvS{TqOIsmfu_7;0E^-MUG8hZk
zJjmdD6}L);Wv|T+itFvSgRj$QXqsG(IqZDn)^`k&W#7Szg_p1-We51k%F&owA>4Qo
z4;c?ua2w{`!nR=-d9h*%_+(z>YlJbVI_VC^bTpzz<1c)7APGfIay;ZJQ-OOVX1xE)
zujt8U43H|QDb~fV$N|h)&iIvAO=+k0EzU2#90Ce0@Z`oEyu$LY9}gzsYj!?TX1y@+
z^=AO-dax=w9j8{>kiI1qD1G8Ige%s-qdHl##^egV%d7yWVfOrklS)KXv;<;W6``H{
zfUdkx`0oFi?B_qhPZSF`E-O>{&l7RDNhSEMy$HiQ9x=xq<H&w_%PlRjV*YM~@DB(=
z_i~tbP>F=btU+<ne#nqIEba|x;7Wu6V5zP}27ByBssHC;i2-WlJmg1R`3mk4e=$RE
z6W^A3miO;VhV0UEOc`ZLPwjmO3;Y(sPY=ejQ<W$ApVva(vDJ+8{(>uPH6rWA$dJD6
ztuW}L1l0z2gX7!D=r*O0x4m-^{e6$)hj1BaJC@07Oj`xTJH4?pejMCmGu)ygC7M_%
zM}A$($H4pVVUh9)%%H|loRA4FktKL~y#%E;9%g&^xp-!xHt9Yw2OTpH!FXhQ`xFtL
z>iY{g-T^C8DqvWEfN045<URNQ=A`7*=#sM&p>z8nZjECtY_Utj<W{zOytiFEY=R8k
za&iE#D*OS-gc-uH+xOAn1?yi9Yr&9dJ3%nG(M1sdUU>4JJ+X)ziNEZxqao{(1PiBe
zmtH2L;7L87<!K8?QwyQBUY~rQW=4+fI*2pE#-pgSkn<hA06OaY@d@i{mBgGtAJ_{5
zjfU~A^7$ax`l>Ln`!3`(eSn&uAuyS}t9y5z<!(z=fX3Q&Fs9KJAO6SV)CK0GATp10
zvRuJmt?h-fqrahz?G_>)4q!vfUmSLTaq*t35cfNpH2Y;Zh#4QtFd~`tf^Qby*!mjg
z-z`OhlLGognw>qjWuk`V8@%sy2rcO~JkRp;&dS=1T@o*Rvy<iFea<<1dG|xg>?D4!
zsu>B)?1r-MQE0gT3byEfgINPIWb+Dhda9ni=WqjGq4AtKUT!ho*dy`XZ|wK_=>Tr8
zXC6?=ahCUb2H8&wxzaOY)Ojoo*)dn3vwIr!@U}G9Oba!pc*7{Ohp?A9IsLY=&(-B@
z@ZDC3^2Wa~zj`XZWbEEokCRX@aXU8jRzphH0_IS=2p_lf;ti)0&=~HE8izVi_M8i(
zY*WAn-`6PnWfHa|-h!OoW}LENKQ5T!0$wE#@$)`y@~DRWUf!#Q9MM<YF<e0Oc4b3`
z<qEF(<YHc&lY^Q1-4K#C3Vdo;!ian$;+go4dl|&K&61yYwyk9Di|sPpg7s1;_?J*H
z-!vV)v%f%Db{t@k4m#<Vb3RqSxr_k;-XEQhclPebN|vh-JpAAi*pUy8Z$eNsayP#v
zs0Ze4(j^H>3-N}!En4hb&7E5)OP<Oq(xs+a<fOF$odfAma(69UooPeEvhMIZXJ`@k
z7$4NxA;3aSE8@JSnp?ljk_cRDdAEHsd{KT2F4}tsu1~ioU7s~*duJ}{2=(caq1L2o
zt|KQRXA94E>_w;QO#a5MXK*yS1^pJb!vf6~Zq3wibUQCgn@e8_4bHKjPwr68PS%Eo
z|52rGDnhZmYBiiujK^rZI*1<g2`mn}^WJUm;P_x&>Z@=8n&xi8Z^jD5VpKc7WKaME
zl$_x=WH>?I@F4EXAzS9SoFg6{V@X%|vToy<Kb%{!03#;J(k+n$B{{md?C>E}Slq^4
z{r(q>C0Fn~GFF3XKG(l*CfiHcaS7EzNL{2&BX()ghG{Rs>e5?`UY>@M{2<7loWf6T
zGb8cd`ytA407F_WFoOl}E!KVF7V8zD`>1>1@4z_mmOrrjpD%={4ug!#TOc9HhRbWn
z$EdyU&{y*~E?zE6gY`CYohM##L7RU=z$6{WXm})CUM?UqF#=M%<}kQV_yMBT?9O{h
z86zGnf~Z|1u{V8ybJ$nJMP;yi>7pLk<zPmR+WZ2GnGP`J^FQ2WZ%M0Mn=$iDHiT?&
zfauO&xO)-%zLU=w8`RC*MCyf+YfrPU<+nKe_92$Vj^!3bjls9KlxdxXJPDFMjna3%
z!4q8-I(*&$+;_L36_d4Tdy*-=xw--+Wma(ab0^4;JqFvI<cTDxoin_sN%JD#avmdW
z=tJ)$oXWah=<AGjaR*rz>jyZ<X5g;bwdnaD%gjAG$BSkfQ#Yvv+?;0>7`C+?%iYI;
zpt?aEl(q_u?mU9<G7lWCY(%eLOvbP?N3gl-65sUNk|wz4@CSFYXJSv0xOvMj?t<E0
z%xx5+%oPoq;!(zlMy(RBi2V$bP%2DVtBOi<8{xVko83xvVR6xCJP;*CghdfJ#Hj@r
zxK70z4q1>F;tXz&Ot{_Wva#z(KLig1aiWf~T&3P+JbO`*M2QfSSnvLMKl8voW~^1^
zy(r0Q#f&$%dAW3bLY^BC-+QaUZKoNGm1c~p4T{3l2ifS<bB;@AdznKkS@$8SgRhay
zfqABE-=dc$P8~J~er}IK3!OK@w`~70%}zkP{~o|eQOaaZttB-Zau1q|*K;9PcB5MD
z4-}Z{Q{PRcxU-|1b^4ERLzf$1?}j8e`nLg+B#W2}H-TT?szwxFR$$y=ON=`yiv~K0
zV99cGQzk#g`w>bc<Z2A$mORJQ1DfDhdk2i{>}Y+|0=P0#n$(PP!nSqtG(S=cRr1Pl
z(b96LC^`eiq6ADaFXn<pQ!%f@82T3$;I1QO=q8e&-mWq@Dk=fS@x+QbLzwIA3-7Ps
zhwge^;L$$|U4PvM!S}5$erw)<TZ=0F%U?vR$miI@_A||s-wAKBS^fT*j30bs3`DKm
z$?}X2RJLFZDCo&Tz%YB-EOS_#aDehoOS7QwIy-~vWH2AQJma691HqUvs5<cndTn}z
zGYYin#zXR?Wa>c}`YIY*YLlURmo2DeIbz7{6gYG@8FQXogN^t1;qfRfa{Irp;Br8l
z3T~zIgH~#jjFKAu|M{c8hgrv%9OjQE9Yblh%Q%+LI2|H(=CwM`zg&7A`}Zi)-`^YY
zsMG&AI`g;~yZ4WecC8~V(?0E49zs%cUnj|yJ=u~i`%V(FkA%vWY-y89k|auM?&~CN
zLXspQ6_O%}QX%y_-`~HF*VD@~bKmD&*XQ$ozk`SKh4h}E867cGgQ#Tjd~}lt>jUTU
zxuxaM=6Z}5G0sU{!Fw$KVgtujbxBNw840+21eA0VpvEQvmX|Qz$&Vk<eM}Yhr)$yG
zvDQ?syc)_MpXN8bVBZPDpPX~OHIbXK3Y(hzAXo4Os!kR|$?ngbZ~q8jVm34$qy`(9
z3wP(oe=zg1KAm7X4&}N|K(Ki`@2L|H<20V3OG65*xf6`Wk=hv06${2c(hyS=XnDv%
zZp^V_3~&1efz=66JXMEw#wCGWd<8py9zwBYnoQ-r026!kK>8_z6a1@@jQ*oYtj0$}
z?tpTP=={n5&d<c|vPSMxjxIGYK8E(b30&psB`|V<5#1=3r{cv&`JJC9VY%gFE`DP>
zh+1uFo(D*h2MOri)2vIR7s0)|#b!gnXJIW>A*QxNaq>f3=2KQc?T$xK(eV_Iog7HU
z3iOEfYPS3Po+&f*Qliz>=lLX?_mJ{y4|<FTPH8Cn{pvm8-4-^(+(0erx@)r}wD<_l
zW_j7#zgJ++PgBy}$Fey#)}&$I8Wb)W#_Rnahac?T!K~RrBCd~u{6(FxyD|z>E@#5V
zQ)#%*_Z*0fL%3&?m<MJr<BOd9h<6$;K)P}e2SOznengE9IcP?Ms+d19hIv6{OK`Ne
zn0Qz^V)<|x7kO7m!w%@sX)j*kva(jJyHbDxpJBYW@<&j8Q3!kT{IKdb^Flu;g=q10
z#zhm6=vGb4w(jC9oHOC|Gb`GY69s9a&*)ze0l_(}V;vC5uaeBgw5H*BDN2f_XhRL_
z+u^XB0hMEHgToI*#3%NiY@Y*TfWA}XlTKd)!@d_F{dhe`(4Or)aH%P6cd;OL!fZ}S
z%>xrBMx*Arr!c?dB3Az><*K(|<qDWL+|W#)2JKU!-JKfr<DxPgHhKZJuIU3)D;~Cq
zwTO-e%OA$q^N9}gAWqD9LC+ldj%q&~yng`odtt%mKkqOr`U2}C1aZ}mZ}QTeaXA(Z
z6ocEl`QG-kP-rfOBOc~d5PB?U$c0v{_0pxqsrSKg)h!TD=Qv|`5xuoThfLU@1>9O=
z;>mK|WrAREGyect?0)XHGYx#Y22!j4mV(zE9c*5zM_mpzK<ayckTiSX36td@dUb+x
zVSe5!xeC}4(u3zS%?Tv_z<|%aeE$LyvT9cn9B<borTZECZs7n3{;C9Vfo*)kY&Ft&
zy$CuOYa~7`7pDIE3MbHjz7JF&)B7gCuWCyons${dUwxAA`R)Wn9FHn($C$(B1Nud=
z{C#5<XIgp(D?^5YNH+>R=X8Ke{TIefz0KY+yZJ+9l~^lRh4H#yLD+W%E@~EF)>cjC
zFlM=|3Vj+@vXYyBk-5LWcj3?Ck8mnVL@(|vM}eSUj_ZAWNQ;Puw$H<Ob4z8?#QG=Q
z<8`sze<Robm6){Z6{CuEAm=yl1(vq%g-8Vp`g)2L9ph4pdC4Q7>C6*ux@seat`pMw
zzn!vkW3_P7!D6so+K+6an&9)q7Qd9u!$PkH99dijSw@d=dddP+a30B+uv*mdRuOc5
zaKU%IEDzrQFK9-w_pHu-eqGHOKH!xuNS4$>$@EA_FG}Sz1{x9_?`SZ1DkQtje&Q+d
z2}m2B%{YM3{Jv|7phOMmgi)U0Tc-=DXV>D%t)@h2YCUrpX-K+<cky*WY?m}{GKWb*
zRDUxQ#xah&kM$lX8DvW5vV6*@(efmVab8v^A48|v-k|VbGnO7?od*|ll6Pn=zjto}
zZg~C-_L&K&kFF+Pwjdd8Mw|ez!(Vxeah@2ro@YDIK0bcjXuSJRk+`WV(&T6H(55|w
z^ReoXh4u8~w0quyvyCAcbzGjTX8q;P^|c@kSOgjmE~2>eESQhgB6pw5)1(=0m~|zE
zWkvL4Z)gsz0|f|oIga^d*H9q7pPd!35ySeTc#o?>{P~6P%#==X<e-qadM@YPCo$Gk
z@EJ~ATqM&myUrX%@+A59A^!V6Jz})mi1g1jBG$WY=}_Z%9F%5EOx>>Fo2+9Hu=_M<
z53!+-3ZG$wcPa0Y{*@~kNkI5Wg$h^Oa4vIeu#)Y%!>&!_xdd|(#;fxK6c|rn*kexW
z-@+#kJ`PpU>>jd02v2LY$p+0ED7Leu>CfY#`r{uyJ9|F}oOPY|8>$g7)JFfo2B__o
z1i@>^;oZ-c)M08g^O0ZRTsJB3LNbhNdtwGYRZB2<@H0+&V*<A=_bU27v4%DAEVqLa
z?$n!?_+VNgjx1%|$U_=5s??a=c^d+rF1{EXgPdUNTFJ25T5vB&guE>)Anv3CALr--
z5e;GR+&Ucn$1cPko42_7g@F9bo{i-ppZN;^A7Fo}h10sCMb?NsQ80O)vx?F~y!@H%
z(pER}(se80)V*I2Jz_4%=`FzZR@qoCsO4Rcq)7@`7IsOyB56DA4kdvt_%1<2GDSMn
zy<|0%Rt$mR*Kech_}B1poB<J-^v}6^GacV(rJ}pNBAq!_i_)S0aGzTm`rZ15)o-5h
zeqnzgE^-6r*{s9GQ*5CosvT=+FckOu4H{+|RO?J49z7X>hnPn;dckt&wpNDNSk^~l
z9k+QQ1Bv_Vw-_<)HLp@_59?nVu|8H4bDZ=@PX88D<6%1?(^m+AXEWeH_<aZ{e9E_G
z--40zT`=ze$K?ijqmf$}{ynEf*2!4Tc}E7fCg?EcWzUrfZ%TQ;9Rukj7sm3w|B^3j
z{RD!?p1gmvGQGYi4BawVZo61c5+|C+Pn{PAsnsTE^C=K(S7kGIe3xW+Z5=L7wLtq5
zQ#tA8Tl`@UT{=~(3#E^rIM1DW23m8sAwD*Oyzn%>L~{m|4bF!2E<Kn$)_@H7T7<Px
zkKp4gBN`n)0{t!RVCnl1IO_2YeQKX_^$)tx@!dzDwME$fhbg&hz;^NPwaA5u@c?#}
z7^j-e7e0E5ftA|S!$b$%+RE|GtB(+ueuZD*n-A)oDW+Q-gv?cacu`ZHj7-RcG0&q}
zepi#6m?44Mh_`4zXEN*>bRN&GF$ZTh19N@;1*i8{1K|P*XvZ+u?0FT=;!-ipN>`!*
z5o}*IrVqSk^}~*dsxUvk1BbP+nZSgNFnjF)k~T$xGynXAIduiNDM$~0`n<vescg<Y
zJkdFaWkKy~ig;^R89Jx3uI7p1lIp=`_)cU<BMNk|JY_uii0!#m=4!A#^&*Co76>w$
z!RAKcpr>j?TYP8ZgopDWZPjb|d(nujy<klC`3a~nWC2>P)g+!fQ$fACAN6f_1;ML4
zRFbfHbj1$f)-cZ66FW@YG6t{x;xSKTg)6=%!}SRE|9YR}EBFD_A#^pUFo#Z_nw-p|
zj&%&r_M^)fH*@wvJ$j(=GQ4pdK>LT3;FQCfR2V#&n@N+wbwaDeMoEGdth-fh9t5W+
zm=nE(KFD5vjN>O+5vk#~oLztVkvuaI|N4LmQ4KDI><;!kXDoq)R4pRs{{SM6d0=`k
z%Wm=mc#n$-u&?4csDI!<Q0vaEKch#EF(3UIHq)9bT8lfc--7xkL+p8Q1KyA>aJBE`
zlpmUqu<#}Lc-J?`-b?vu1vgQ=_6Dasx&g(!HUHd6iF}mxqm$&=9sRl??R|R}?W!C2
z>0XZDu+0nP)_%fE+iZyJb0z34rx162H5_>V9HgT<(AYZ&{&umfb_YAN=s$vb@439w
zof59Tc^s;rFrz<51);qI<Mu}$;q<;U-pIOgXqH;i<7WntoMc5Z>7o*~{Gmu@{cFQ6
z_I$1i`V98dq<m@CP`u+zq2LwsXe?m&lT->?w=9S#(HGQy8<Ql{+xX!{2rTV-3oXUQ
z&<mwd3ko!E$V7?Ls55-N%Lo*E{gstH{ER8{`#@mY#g_#Z!?16yAWj>~EjxS~M=*Ab
zSZx%S`Cc75heV=o*cBA6n8)^lv+>CvQ}U8!3l^#giOuT@STnQ<UbS6jKl7?g_BtQz
zUpDfU!^~iD;14WRV7<1>QJjVRJI3|ZrRAD&{Lr6Lu+{EIssevQ^$xaUD?r#Cp-sN1
zTmVVz7qHXH<+;uQ#Ct#x=ufpIPX2+M?;&;mOF{+|Oj98@8Q*%Bq9O4aXURK9F$bia
z1cc)+bC29CsGq4BtxXR@DT(Jxx9i{<mssX5FUO~>lR3Lfo`mnIL+dUP&9XUz&xL9D
zGRc(u^-v^wR_{QNrX<t)B1fd^Q}J&Mn|Eye4eNt#iNNgzSMrQ;IGm<|(K{pZVlnee
zKDi0&d`F>qGGj-Knu`mY3~8eKG7$gI!D33eCiDK_x@Rb7vGNOU?tO*bOM1At*KYhi
zdkIYa`v`=S-gB;u`;;HE9h+h;v3leV@Qq{K)F-3SY14F&d+{80YBC1*eTt=ydLWn2
z{C%-1bmam|;&E#@|Ng^sByY+wc6TdyS=4ck&0jF`TQLY9gz$bXtdIHmE8M*|kY0AP
zBAp&GklvpR+Vwvn((WUO|BFL+f5v!z+mPe&b_wtEQ<;8Y+|ps&DLcPi<K>s8qrov_
z`gN)vQPY2lDOxX}@s<q@lb_44YK+5}N37#fArDcVPw`i)BFWN;!401uGj8o!C`x9#
zD7{38UHuZrY%wEq9U`$|^fL@vlnlP!dvVV3G)#BM<owqKfm7CLE^==+lw@k+0oK>e
z66L_2M>Vi{1<QZi-T}`K4?!9}3)G#Mcc`xiZY&fKx3WIyUL3(2T!}`<HEU7&ly{c)
z+r|0ZcjUwxTS0#k<4M>Da)P&&&U+`Ql8fx~cXPHCi4>fIMKham%dHF)%<;$xykbOC
z2U*Z4zczFoS}X~&`;O(~BKf{n6_PN-m>jJ?2I3tbxbnF<eDnh5%v^MU4|rY3Iy|$u
zqXr9cbGj*U-!vKg?3*DpG8V)-&roxRF6FWgVfl-AXzc9<nMoeRo#prwpOom~{0tl%
zG8#XgV0XtKiV)1a)T8S~<kS@QuK5O_-JJ>hLX8+l`c1ZBn=StRkc%Ai?qoJ<F{VZn
zN|k+N;!6*CaqCOT(%FCE-Y_*Hc8}unVw7dN@0E!w&5+gm+~$2YHAw8<Rd8xWN<>gC
z=M*bDiuR+uxSoG*upqx5Rq1aB<(X;VD!2$Kqq;GnOM~&wPw|Zs9(I2<Bkm?EVbjJP
z(5Ti4ZjuUU37QHClY-d{<SzU=XF$4HZr69XAEdV%LawzBezA?gbl+ND_`HDEdEx|(
z#o18nb{VJaP$Mc!6d=v-890u33VY<581sH8v{=ehmDi_n?rX+ry{1Y-kGzI3#>-SO
z5VH3{n5@ld4n&;U1%GauQP<as5H_ce7kGN|bB+JuujQ6BUHvpSsQM<_%|QNTV?0JU
zE5pgREE~Vig8n>v3d?S!z@5MEFjMm~jAEaEiz5_pl`aZ`9Gn~eYQcYd>#^mh5%}zI
z0D<dzu4p~W%1rB!v?N7i&r@r<^nE0r?=>I};}|D4+yJwUmawel2RNA_M+`j>;<Oti
zgWe`#*z+pBK1+>dIv1lLc@pflV|_?(!ZL09P_ruz1e&j$)!ANY&!9C}v@8piBgM?^
zm620_BShlD+`C?rw}Q@3=I9;z0qbWP@ComVu<m3s9yrwl$%Wte?7suxPE9r(C>cOc
z?$RbZMlsLr;|Pe&eunN_b@4*WW0>nR6^?o66S;q_XnblNxGO*9(k}<`k$PuQvapZ^
zS9gMcA;F`sI1Dfd;n!{G#gj@VwDS~WX&+2N!J<T2o@xg7CZ-ePW-bBEoptd3lMxv!
zZ%p)O4<M6Sj{E`%gk;kMyr64MZwzH_9k(EE?oK6kN6F?M8^pl9iS4M<lL(HUD=={M
zcwF|%kW{~tV;Sn_lHx2Ix}SBwMqd36n}+G*)x9b-cuhZuVeXfuE&@92`$Z5x4#sWY
zlt|1MZF+flHp=3wm>c6GUeOlO_pepxhwRhf<o_2lcd3KV84*<*tV>2_#l!j&HpFHf
zo2@FhLdX<0*SI?zJpTL5ecjfNI0_i+Y~y)8Fi(x%$thr-;$Dy*k$~=GYod*xAi-Wl
zYW^BQMW#Mws#VPTF9JLK?eWN55vjVcA4j^4#6|}jym8T-c>XURl=_xy``wBzmXq;-
zSp~NKv!Z!loxtOb9`AG0mYUuCj@k>$;K~N(WpS}0yY8xx2QRf~myH6kdz8!Hwb7?-
z)h4{>$J21HN<_@ouzdY}ly&R2a1TZ&<6~d;XBCEGc62qLvakknUYe3>ZW|=qbaHDh
zoPfldOHh@0i1n?mzzj2Mx?vjYF*8Par|1BN_Z)@08jP{~Uo}6x>?vlw{QzTX+4Fgh
zm~k-8Ve+q22#tYgQTG$O-^+tIr5{9(jb~2!1kPKW1`0y})0;xMlYu;n8}#u2`<h0s
zd4t~@WGEOlQ1(w^Oz-r+39jmDjAvFM@vRo2MTH(+HBb)HSN_9+r__j`!=Lj{%|jQD
z)2Nbk0A8jWlI3h(T7PgLpSz<8ruKBfhtOxNe{akUULv6O)PZZ-R*yLWBAQJUX_n4y
z{P&OTG{=0wJS9_9die}aoUo+zHHna8$Ywf|fANAgHK(6*wllWQ5vX_nCLyIF^4OZg
z$rI(sQvWV+cz+sFl0q<YNe5)#I0Q|5tMJthJpxOtz|rj;Sj0VqJ#EYzz2iFPv_=8`
zNbf<RbRjx3-bLJWj_=lI?lULCeUfPqKVSB)`=HH7RTeQGNH5q;Jq|V}E`#{j2T5Ft
z5uWPqg468+(q?j!^NBmg?JyTJZ_^VlnK4UTOK$TT)&pooL_LJCURScMKH5*Y#Lof?
z8lf(r7Hc1ZtLb}OlaPR!fhX8=$wF4X^fIhuY>9%~YP9wS^B|8pg6VLR4|P5dts(bt
zQi}rhWJUDygpb@MlN`hzYk2igk46Z8aYr!&s%NeMRX65-c{3lKx}V_WTj@A)=~1-1
z+8@u=jfU!dwfxWg^I)}qGK9^uV;u9_Fzo9SNY0rAe~AW}zvMM;n#y(|ok8HSfpr36
zu5h_UGto2TA7f~wbEV3F^^d!_*jP)d>Cp)tTl_%qP=&TLPBBTcB{L1)f@_L1PP}vl
zU6=#=mky7AO$CJH+tO1~9!{`qtlpqB@L2qT8@^dU6tWaxwwe`*c#zL!T|I^?ir9Ji
zl9Mb~w*{?K=VEz;Jy-Cp9b5W0VB?q&(4JlcrBj2!^GYS`(7290B98f&s(8_eUQYV;
zF4C_o)ApnZ+H#k2zLpcvte4GiJ9}`!4ofoB+JfaO3!wjGO;Z0UPnLho35Tj$LG@J|
zNWXms0@>NjF5*1C==*^JX|b&B8+%ubt#lUlPl3S>4Y=1+k4(MWjWX?IIJCf$mTwN^
zXR&NgzsXu;WCah>S(_wEy?4Rx?JB5X+?4v*c<>r>0rF4hfMCfo&TG*u=u|3%jW?A@
zj)OkC*WZIJZ7MXgknt{E{h-b-3Z=RcIeAfYWQD;S(9PjB2x8Vk+<`o_OH6<vyH8`e
z*&O~?fsl+5C82C|Bh-`#@SlY_{clEp+C4~u;lp0DzQ{F6*t=-n<Jx7;t3iZ~k27GO
zO)3OD{l`uFt4oZvPGbKxiqvPyb=j9o*WlFsKHQ|c4!7UzN22<xlA7%VH?91O`XlRb
z|Fj~E6W)Z~uUOX{`?&NhTc|JE$Om-)<WdF9V{~&4Z?1O~pKpyq`@N6&hSuxs&!J0#
zuk`Tcfr>cyurIiKxPe#c2weIu0(@^4qOx;;>XoBI&wbg3Dz{p>=O-*kzP%}RaLQ*4
zv6DFdDC0@1GoE|lY+2*}0!Y=VLhG#>M0S#WFNf<&+8*rUYG%pd)H&5Cj&TRaBY`MN
zGXP&@0eHT$#-9bZVEG(2<Ji6%rhPt!d)VG|s!AzLT9*w&UR=U<H|7Mom%#78&(1hD
z=h!@_4JRcRz^rwrQM_s>x~>_GUO!^-%IYTUJRzh~%62Fz?dX~_h%4U2&Q0CxK|Rj~
z26<}}m%eg1zFV89I0&eca0teGSy91t1z?<FB7XNtmR)&_6VBNI!FI>cvV8!ZUd))K
zEr+>qgK7|eq>_5WR7gwi1@%MQVClmMD1RBvz1Op*`!thr@Xrvi|I&|sUH1<Y9rf|k
z1LigEWOw9=_Zf?|fqxmSNa{b1=cYHCK)U%$%)`BK(cmhMC#+ktsv0br*Z4fLpOtGx
zy~dn_t`#}x!&}1}hZk5R(j*_|ya9tMZ2~)v!J~)zWd4y&DEM+KyB3f!RO7)SX)b2>
z9fm2WNwx2fzz?x~khr%G$x2o7(AkJeecPQ~iqE1(i8@)B@*8(2<fD$?T?|~QM$2&`
zj$mFsogns7J?eyct9Y4c(+>D#DkcxcnG>Z2hrq|>J$H0p4Xi5i#8dwyc+{{SGJQ^>
zQMG{lFE^g-5W?s8kHh+lbHOIP11_jL;S1AB#9gMadyzbK+;S2&PZBVfyefU}Kae~e
zl7!)Z4ud%T68|{Rn5K73#ywqAFlNqST)#<;jx=TLxtFWC%PTbLsCU+6*#`lMOi-rp
z1Z~*ZSO*golQ6d54cPHfhj_4l!PxNw$i>&s@lsV1Qudkr8<K_ELBU{^4$vacLEhp7
z*?;9mw7E(^(>)m*xY&g5X=RzANu9h;)neZ9^iyzc8OwYBYKD=}2K!8;7+%Z#oBi)W
zlhZve=eaKRd9|M#+sXO`r><bttzyQBy@WXqcEt036?1?{p?hN^@85F)dJGM)-P4lj
zm%c>>ekX+I=0kthAqlsR0=bc`5bdvzS2nTEQgkR@xg3WcZI1k#J+Hv}i4jSwF{dN@
zsvzk0RPc5hiMv0Z!_96&A~LeU=&~=k>wr0RIer;#m1|LyMPpXlS7^HKjKz;8Vs?}@
z&D*pY`XyS^poTH{?#2nMAM{(cGx$0D-fKqs!Wo+~l*cuxJ@8LikCZ=az?WrdICEzu
z#`ek4;p_FtQU3@ic@qoWVf#?ne>rZ@>rV^MZh+(ICdB5@6WCVGcGy|+T-d`&{JSzO
zqORo&g0T@YVe=c_e(xM;(hlRDEYkQn<xfzMxP>b*YsAk17IflaRa&!839jlDVAA+2
zC|$dUFWEQ;bUp?{dhI#*Gim}}lvAbB3N6V5$D1%y`zL(#>qq*}Q6)pi>SBc@g%6zx
z_|rNW-l(5rp65ugv)BwN<6pq@It?=Jk_{nG#q>1e-=5oUNPRK_c=b;M&@k&eD(#tu
zVZ&^Bvo(6eaVigMiaKG<nOMeZ^26Snec-h52e<Z%0jbM+iVFht=!(7Fu&(1SW;iQS
zoy2a8Puzof+wC}&?F3534gtAEU%=ne6=sYOlm0<QR5<P@o|v^B7P%>q(O07|{P9gl
z-+K@oOedpo^I*JrSc9DXPmy{pLT=P<B|5K;<<#$m^Izt)!SOkH*i!HtC83$Hf-!93
z<S$C*Kl8xLvUDsRJ)8BSuJW5&4nSA4Es0w;3D(MKlRuA)sP~Xn_~p(+2vz^hx(oy8
z_ZenntF1A~TX78dbjE(cuiSr0&v1g@V2Dc`&b&`|xfnqq$}SdRwPzG;A1@+1uf0J*
zsj95{c^?<j)`IRiZ#gFeJH{U|;6`gPhc20e=5N^Z_n9S>W{t#vSmsn7szb}Bq=IYc
z4VbPK1=saWXsDqL4w_`f*vI{8)D|grcxuDdxzCVXzJ@YiGb;bYiZ~h1fN83FWc4Hw
zEi>AP`A_@NSseoMM{5gerS&7zEA#LiI~zsM2nWwo`H*(wEbIHRIaAYZ-cj@j7H218
zhd`N%b)9k8*HG*oR0mu9E1~>^BG>F-OBVb6Lf3k2$qmO#tn+5Y`NR+4uebLjkDoMx
z&5(z%Y3^U#pTT-N)t6x8!0ot3B&Gq6)o}PUA=QywgLwVnxaS|^ALo4Mn!dfjKiw7Z
zWN-oIItN0yO)k8C&pH9WC$l-kSBQ1I0BI`SFfJwxj|SA@liQ&f(4xxDgmXAo{+Q&|
zS5>mLHyoF(uqGAq?eMhWC5G3zU~Bp#*ke}+3mL!b^+X%ucAL5ROoT+rX~?(@w=jK(
z8lH(ar)7b^Al~~j4%VHF_B|ckpj#$nV3QnmV|NN0mG7ur_ZC%}7vPPhHY7Oy5*NC!
z4-6|SaFUZJ&dsmGmcW(ZlDQh)$qe}8n+ii;3+amSjG3d;%e(NmVO+B=dzaRs@mIz{
zp2@le>%4fod_$0`u9Y;6W9RftWlS~<grPf3s3^z?qeF*-pk%znKv6*dJUE0}V{C{|
z`a(Q=#+1xEY(pX(;u)_?7oAWURHTOJ(SI$clKKsIs$Yir!Rd^>+9VTSaL4xf3dHh|
z9<{QI##Oi0W0H3ZsJI{Dv!@^BdzGHSYF<SCtrilkV{zDYY8hNrRwE@>pK#8XS^s$c
z70&0`F|@nVzzbxHLC}1g50SGYo6XJXp1NdQGJ|nDwU6QcsRQUu?|$@WWeD>EAH#>6
zJJ3FAIbXOp5~g<F1ZjmtmUr~JB<(a~$bM$7@5NS(IpxijM0Iekk8-$KhWa!mst3-!
zJPuM(hh$DGqF;SKns-ly^L+RM6t16z)Ei3JchZUuc%exq?2-rRvS<)HN6Q9R--YWN
zF5sfKLeesh!<0{qyr5KshJDspUG#~&V?7c}rvd8`T!EuyS0Saa4?i=1tj2M9`e%>~
z_B{B5qF@b}Pa80|@&fRzTXp@s22F}zj)5^5P+EHn|6F0tAU;jva8(J5)CUr?<BYj-
zTuk$SZA3q=3xq2T(WfU=HY(ye4*Md7J^NCjn(@(Hp$<dcROySspHShbC5<f01vlfb
z_(SD?jw0qvF)%|9Ut8Y&=_yXmc_3ZaDuKoWPhnQ50gYX^A2mPUhF1m_WY3mc;Mvg(
zQIFZYjGb>c6ug8@<NL#p+wzpPf1ux273N?rz(3m$V6gE%^xZX&8@41GJ9kq|A6(1%
z#vJ7p>g@1Dy9ao@^uRNRt*GXeJjhzCK{t#pf#f=G)N#~>@z<^()wd;sDn(?SYARpo
zx(^1AW~^sZJz9QgH_l07ck~JM94M-R*XIko<JL+L9AZA0?ALH)gFfl5@6YDT<2gO`
zm(cB=26wH_pqdHe$L0#DW6A>z-Jwji6J+T0Uo>}0(;h_{d$C7%EZVOO1knToYzZ-;
zc0V?N`=E<Z@I;4H7U<EW|GuH}%19Lc7tZ)#xsc`c7QL-n(Rb(qZibzJOr4bkhiY3f
zJw1X8A4Kqcr4$X$$k9+$0o`NNjmd8+dF_~0h$~h@s3kiy+8={eYwX~%u7C_Z{s1q1
zVcxoLb+~qU0eWllSal!=)|4ee^`vvKYq*%S6<W}wo;wg1WlSS~F{fd!E=9vPn0@au
zUq6%es|Gi5+g@9d^y7pJh@1~sSdOAC`4EON2ZYa)+g!{K3!3(65Y|=cQ>D89ur2Q#
z=1pth?L${!z5ia$B6TU&Z#^yX&OE_#ZU*pM#h9MdQ6gTKcOcxkf(q-|{jtMIwv639
zy9dv~PNQG2aa=JT_K3v4?0mF&EX({%XD*+Ox?F_WFD!I21gGowIPvt)c*mFB6*Hw=
z{=vT}%Fg2xn5XA=U@o2<Bu7WZ7?bJBo{*pM5m&ny6940?;Lq-RP_hhpq3=W9!`X-P
z$`w(APuK9yf_3<Ln+1_RER(E_Fd_9h>WnG4i`#ho4649=&f4CH%JqGQKy4Wa>=j{u
ztTu5<D?l%UW1O7Mb9mCNN^+k@<C32uV*5*%)*V(P)vGe`EBl#VGn4twb(JvvkO~bO
z=#1VrW>maq3KTJy)ZYdJvVWH$ZQL9R7Pqco<mn_xchLY>-R~0D<u|dlDF?EXOL+DF
zHnJVkcxc=0%h&(wV0ltC**Zft@QAd+cCilKyCf60<lcwy-&J7vXfxU^xysdZBlw_i
zy-0V;5m%iBO!ayJXU)_j?3Rg2tq%dSmVvZtGnZLE29#YMqtCty$>2(5DyVUgIJy4h
z^Zs%&564XIEbAgIWV_w6CdSf!+QZeKtdTV6PlWWxN4TSf=dt1u<E_+oK-YKX-WxlR
zOx-^m9TII}t+5GR_h1+}tvLi6o`1mXq*rJ)Mu3Czg{1mq2L4Iu#1$Jo(2je{C%c>j
z$MwVU{V6NDYLf}B^LxU-`oMTApSN(2uQJEk>pax?nu_3C!*X)XaAL?5@E&ytL?J?!
zr)U0Y?Z4cGurb)?X27lT6@frcfuFkY84M^JKxbB5MU^{-FyxUuDWzj@X~i8BBv|C=
zZK{Thms&(Rg?$&6nxf`#=4H_G;A`ifg||B`NPUSv@4h4zH)O~lKfE0#&J4k?nP;)*
zg&A$QKMxlvTG3qpNLbinKpIU;q1<mFinnsG^T}5jwl)*2MT{k#!{*|<&w+}4GJHtP
zgr%*~(D|bQcV7rZLBK#+$$~gOdtC)%*X(m%JlPHnz6*)f^bKsDGKja|VaxZj*>TE{
zGEhJ2jQc0(QKKdcvRGb&{%5B{@*e78!Gjz$o^1pHbH%8<_0j)x#p>T`Ky}9t6j^vd
zOPeCLb)DsX+O4?vbA)tEx*1K>WX>G@Ns<=kx^46Pjfrus=n(iH+t;@77v4Hxb#5`=
zEBBT;+~T05=M6qgVRxbRllcQT{y@NsD9(DPJV_hR7){<;lBhZhqCLD7XXR?q4Hd_5
z-(|*M@_K}>G9x}tVoF<C2HR2V5o}V@ruOW8T;jS3eOzt$%BRj)-(>||yUwHC@11yD
z(U?e#5^=_gUi=eXflJH3;;F7oSleMu)~Q=Vx9}oN7*vPL)gR$pEw<CMI1d#muOK)=
zgF2MTV|{KS*VR>nCwV=R{qGtaGag759bw$N_ajmHP#~VfB>Xr21x{Fc5Y)FOpwV^_
zaeh32WaT@f;KD&(XRjOhu2tusZR$tf^$1By>lM7&X+|dVX}C(|7-~&r1KQ+6@Xkw-
z8nMi4`eBa$=fd7$qrZT=+YP>_aVkiI$8+^%=KT0G0umOn5vz<WsY=pI&Z%wyT^1rw
zer`#Jr9)a7H$|1W{m28skk9D8=PIvu;WkFjx51y=%iz&;EwZ(rDP0rw4_eFTq3fL*
zeo&1faj*@+>~$A;!Iznm_h*9f*vtZK-1`fB9GN4BjO1Ov7{iu1<}@W!hgMywg6zLa
zbn>h;ET0s{tCSkhV6P_L(`zB-<sLyRYDoQ=D`0D|F<G(n4D4jzz5B$H)Gm1pw^v<9
z!PFv2T!Jh3(HAhG!US8FH{v82oBb;kLRwxI+*|$&H!pgKn_l%{{_g?Q^`f#wr-sd!
zSDb>!PqI<^gX3e>Kf+uyb-Y!`xQzKcPS|Du4r~tdM|uu7)D=SUyQdJk=`?<4zlXTd
z*Ey+erew(?TM}=Z12*f+!6sPBc4fh6@KBkaax|u!oR8uvV|M?j9faDSSD@hT6-oY>
zYarGqyr+_YhOv2nUR(zEsc!%o!)A%W&ztxQ;mosiR)ycQW*qJbjD)-)<N0aqJ}(uN
zbAJ|B;h?)3#Bo&u1obV1+4oq6scjzq9CZynM=V0QRlgxhQ-MamP=(`;Z0FBBn>m9t
zsPC}(d~~W2dZoldzsqdaUDAx@Y`%4>tQX3SUh?|6W+Y#;4yG_?sblyRSRTl78LB7v
znyzYG;-f-`Ej$SUlRNo4V`^E>=n5w5mvL`TTTo9H%ScyMMZ0SvoSnp61NG+_SFjx3
zjZ`CZSG1%1Uqh<Bt`A<;h)Mp8z4+(dUuZYEh5ReZ+|PG7i;3wV@W{wH5+x*EjDa@G
z*@{lTqY3-wC_(X)LeR4*gQqct7~lIHny1~yC0WMgT(Tk!FY0D{ZZWE)zQKX*8YDaE
z11j7|gAwM2#GcJ{+<k1&#!#MKaA19@0})WV;4TgjKg0VsRH?_YiO^l)4r9_7BW0!$
zNeT=G*8!_|_Z2)JQ>jFvYXEoE_9IgFLT<qs9THc2mUHzz2)>2uxP8;s!_ENa?GbK*
zrWxn?FYnv1xity@UQ5RAsKemU9)am&Ja`Yr^ND`ii(AMgI4@NsVX+F}^Kl5Y^=5FX
zljgzMJ{`Jnoi3ft7#l*yRkoP<4({6+5If!DEW;hjFS(~r8=c?b=GPhcx>=5Rx!2<5
zFNUP=yg8{aH{w3D$Wc9)ED%oVMxDk$h-0&+PkI7U$y}GwAFr`qn*jB?&SF@tCkhRF
zxa7&RAbaToDF5ornLj&@ercbviRG8$pFD;;Lvx_-7IU2k2SUy#d9o;+u_TPo!q|m3
zvB<PP-Pfao0U7Vmep)bJ3!~9O(U`6&d<&))?J)SsNE|uz1(c2V#Wm4AaKQW(PP?f?
z?bYO|>+e|E>QXZjb5W06`CWtuUZ+C^W0u!v>hpt^E7KuMs~KNcf%n%l0~OIph!^KV
zbI~QdbGZhbN&@*dwIz@X2T^q99ZGn8s`W^dn5sXAyg*-QKii*deiVsELQF_r_F>7P
zPDLW`e*^D2iHLOZY3>f|Tsz1JD9GyYfdTWC5O#K{P3Ft8Z-UB1DZDvZ4IT^5qvbdS
za)g~{md`Y!V}I+@VM7n1e1#banrjDZ-W-MR7nO*DrwhCrYD{{PYB4U(h%ekOhLK~1
zv^;1Tp7b`S0kg030mCFPYLgUI42NL2#UJ>$%z`<<=0NhkM*MxE6`k6Dae;03f%?_s
zr|vj(b&QjQ=;+cP?A^1{BAxk)e)C}#J<P@20fKZF=2UNxXzetjC0keFN-Jez9jQyx
z-7MkjtABWsJBt<cCwN8(>A^J?#PD(mbeNdn`MhR~bod5-FHd0O9XHk)8o{kQJ_pAI
z9mmE+2XR}bHF>5pfO;_=<I&7tF!?i^LEpbDQJr@I2Cuinea*Mgy^EdKrq2SEcwO{Q
zG=NNvBCPqf49j=-<6S@4N!+HCVz=UNURp91jhETs(&Nz>KK(Yt#iXOV>?AMP-sU`*
z&FsQ#X7H(dSA*c-N{O7{365AHB(05=cqr0_sE^zWd+Hj&Ho=O-478@w6?T%Ql5hAZ
z?>4#$vU$J6%g~{}0OV@tV~>0R)-PENZ*q9h-tYkg`lDoSqpM)*%tJ8qs~PnSKMWpv
z75tZH0~vd12n-pcPkc@ie!>Dns<D85UaXyPLY+HwoY96U+Q!s%>ut%rd@btyOrO+W
zzXDeaDpA#>3jV&ZBH|n&ydJ=qkcAH*;gT(}nVo^MjA(Sbu1@1ysu}NgFS_m%(F3s`
zVdVA(2n!D97f<2QUTXvwQIpI^?+k<w3m?F?G9#94|A{Vzose0lN4-NHK=M-?x@U3$
zln0oDr~P@Tk4%va@qddR1?K#@O!m1lb-^|rYdSfM-Cw`&M~@vxxx~#yXfMzF3;yii
zeykR38T$@;rk;m`twzLu@j?)afSYd7i}=x+dhV`Z+}7_{c}NP<xQ$RBR?fdIG$T!6
zazK7)5{uNYSZrGZ_F+A|o<=V2<`F&iz2M?@vUhXIESdC}xn$qiRq)gAJa`O!%Xe;L
z4vwBaJo+F7!u)1q+{nL@P1on*pl1f;gZXxBnPLIeN&{$c#d(m%T$lLI%f)qfDDQY;
z4BocUBzgV5N{XJD5@in~awNu*NJBYTt!PSK+nvBYo}2KnotPL-`U^{@8In?W8CC^!
zp@7q(y}MLs>>lQA>?nX?f1hBA`~~bzn#UMU*U%&E9A|9#7?aN*fuEb3K>VOpwx#h4
zINnmCmmH5_nEG)LEg1}zSy#~Up*nSZV@SIr)u7MIm{czi(YRlsaG$xY181kfL#Zj1
z{a`FA)z3J%xfdd)+hcyoWz6i2L+Q?JS;?|7SUdR<V?Y`4s$W?b<LEa@Z0$`5eshBx
z#<J%#(HmoA@}w^bQ6_5zSNG*y+`L%s7<-;KeKo<eodZc_k`kKE`U9sN0wIhy;DT4Z
z;Jo@;(Sw`HwK+#adK=}nxk>1~!Gxxj=E2viTI4650r7^3T=tsh9E1&le|gM3;2DeC
z^yNt3A_2+C3S_z2-+ZyAfcDHVfzZ9|dDxsGDQ;elfBJs`-+z1gxGKgSn0XYuA2Wu&
zS`$C~&MiddR*VgB#QM?E98Ks)toBC0ZfzYRl8fL}G@o;>f3qaz1C?mo32ojvD<8{~
zLm4M4N>)3y2R7~80!Clid*{Ms&SaMwT{}mPD(Uy&tQZmT(lx~Q_L@|@Y#t~?>f*TP
zS5WnY3caKvM*?<D0<)`n#79XES6DKKNa{)O?a<>&^dInx=IRklK8UBxZ(`i%NZwnc
z13HJlMT@F>s7?O`ZwBz7k!eIC3(moR!;OfKi3<$7Bu7nD3NY`5D))@-9L7cupqWp{
zvF^(c2!42*8y66db#`Y^;PzYQ)Xw@0`_kcMC1bq>yFx)<F0Qg=zxMI1^UoxfJKwzn
zjRm1#zsigYZMz3y^O?_OhATheKju=e3<QD3I!L?H2k{R!V2iOf8npDFkk3L#yL0$u
z;wQM+ZcS9OBQSc36{Hq~W8QoVe8t?A?m^GFA5NwCXXhWdyQBl1&#-f)-A0z@(V(kd
zE(GJ8V;E!e5%mr3p{$YZ`|To7Wb&Q6EWC#;-ZFfn@B$jvq@vHtQkkJ#D6GHr6s5kE
z{Cua+_(AdnoIIan?QSbtxN9xEc-4$fb>*Covmw~mYvDQPB2@p*G3LiG2p@hI@_e`O
zDS18U(-Fbt9shx`&86V$H5p2K*&J7EJhnZ2#ATGKlCEMSI%=5_%^lGQo@_?)BkmH`
zdp_VFSPi5l4f))bF?Q(cF@`(b7KwI~(|A+XZ4Yqt$NC+P{IKmZoVLc4_Gj6{Bdw*3
zWpoAh%@a|N)gstA`x~3<3n^)lBl4E&bb-5&gs*uIOJ>HP|Hb7X{bnsgze8Z*IT0&|
znV?;_4~UO1;QS>0>4yzR;Lh=FIL=3x_}A`-&}nht_aYWrBCOEEwU3Lq8^>)b491<T
ztC%<9yDaP*%gy!t!w&_ISnq<d4mDX1cr|k_F%D4o(RyyZynv)ne$LH$%J?P;Eg)(M
zg229YsBzASls9hT(`&W4J!3<`dXo;-oBb22w#Pv%yA$k-6(D~`o@jY8zI<L8=W$V;
zdYoRtJ30U5R*$@k*9?t`*wLC!+V%?t`C?w&Hj?Y!6b~vhOfYQRE?(p6DU{y1&goj$
z;0OL2{1Do(ym~BuN9h#q7^6aZ6W+r9X9d`^-vvDJAo{MCr?rj6V5~ln8e4CIYaiul
zQ_Wv~Q_~_)EpCSkP1~?x>KhbtChYGa0OlN4Bc}rm=>(Sr5aV|f6ZNh@;vsvmFZzWg
z{ii^=p%+-Z?*yCJ2RNy9GDeRx!gV1U;G}8J@*Bz$hgWHEru#g$IzPaxI_hNb{SMYk
zkur|fSxm3^jngdRaBk57i0jRlh28qjrzW_;x~Ns)F;$bd6Ib&pogut$=0NViY%A)$
z=LMF}>*TbTGbWB_8w^QKK*KpPkW>|idK=h#DJ-Agn=K|`+wSqsz71IA(+h&Lk8(Et
z3&+Nfx1hB?3-SsqAbFPw4Zfua<+C!lFA2Gjf4Lg0y%I34VISCpQP{A#2+sdgriJ!9
zaMSfuP<{O!`YdKWEU^t97}ks_qQ~rcyPlJN)5O~)`ef6TB@k!%lq+Grgz36rxW8PT
zZpdkcuniXUpI16Y1{}pddt1Rjmvxf@D^RmRgDx6&2p7k#L+Lba6v{lgWMwt<T-}bf
zYuiEcs|wT(o00T^ww(FZ9;7Q2so>o<$#Km?m@DLA$PXows5c49*4M(Y{>~UajUg#E
z#NZ0yKS<Im1MSKRcvWCQ;x0z;4l7ddB%5h(t+OS;U7FNg@|SZNy9J{&SjU(8NTjfy
zyU!Rp-Af*Di4PY-?7G>g;HS#mdJPyb=L}9`-LE$)ERX4_M3$~T4yIn6FufS?X|p~x
z-eO9>D~4hBC0{(e?Icz!p5tH5k*9)1#W`Nv6hJg0iFXVtf|Q>hA>wu~+Ba*_mFF2p
zyILQkc|Cfa|As6LNz$DML71i)<_0qt;lvOeL(Qm2W)Alb|3&-B_OQW@b+5Z$gS&n-
zpLLmKmTNXbh>i^9R<6hPZUv&BYDwI@Sw42{L(aZepLhM|Dsxz!27{-cM&Eaeke0C%
ztHN1cp+ASd+9$z#Q4U@{EJuclnFBDTm)kP52Zv=A;hjw<(I)l;PI|#IE-~r6@y~%+
zeYTFjwDdkMVfU1w%&Fe7QUSVk4)IN<Iw-VR!X+mL;GmJpB(F^sN=`{RhoT6yGl}E!
zGVEo>j5Y0M`2r?0wxjQ&$z0M%d3wadnwX3_gSGG9fw$F9{9wV}(?=#@Q)LwAASSSR
zyMWZa(Itx#t%*m#8s61=j_giK62#Sv;0(S5qxiZm7e9ja#2=31C2~FReWDVv9~cU|
zvcIG7<Vdck_BmEwX3y(6%OK3r6kf2IONP*bcpiQRzTwOH=%H7kynHOEzxRM$mOT3Z
zx(Sb0vis>Z<b=vDpe?MyO!xDc5pF^bY`3J{jE!;8L7P-BRE6x`Pkhj9Ejs$N0=clG
z6q53uFdl;k<7@80Zi@&AY@L8(51CS7*GbM%i+M=me(-a0q$sy66^4y_1%6RoAo8u{
z{``u>+Le=0T~i2o!*#H!qnTg4?JeF~U_+gm6VZFzZ@APi76m82O2iqaeCfS;Al(ot
zaZT%yx!M-V%p{38{a7~E-zev&RuzLbn=kr2)aC^4S&|EijAM2FI!sp{3gxmzT=s=e
zd?&CuTVEX-J!0JF@dkX0=~x^ge2pz^PZ*{s<MoT#+*M+SdXgY$E0w3JQ$%z%%lg#T
zB;bKJtsuOUi9ZJ?!#e#EJd&YB1Y0X}d}f~GU6*Zz;m6HL#HUjJ%d}3$O7`YDryPR`
z_A^-zJBWK*-+(RFZj9IUk>8m2iQRL=Fsg>4heZW9u=xpk?`2HfMYX6Bx*lzEY8X%U
z9N34i=Y#fd#)M;8*br|48G{9+<*F%cSoDJBnIdt@Oj9bFTE?YC=itq4iX_~<7_CMP
zgos*Wy4Ay!xM<%4kDk{!BgB}7zUzYXui36CrXP=TdZ@Tkp3HS04eH%DVEi!l-;MLQ
z!N0@NOPI#hOqmErZ#iO>^Fz3uB__gyfqb6MKVGzbIdnt~$9Hf0krvxa@H1A9W|#JG
zOZh>lDe%W}kCaK78S8YEeZ#iy6fW+d8n1iNl=McLP@k`=bi%$?<aUQ)vhrLIIS0ed
z3<V-x!}u*Tx1nH2DnH3J6BaM*#1G});LI64>RC1pOO3VRos|aZ-5LsOEZb3hbtQLu
zl^Ok#`w#*|f!H?b6o2Elka$$*alVmXIsTJ1>3^d?8CfM|JWEe*r`mJyF)Wj<Q0{`S
zVJwfzoKVS&t9je|rc^bSg0#Mc`(RQIo3%`c$6p;Tul<bV;2IqgAM_D?pE#pGn=y1Z
zJ7QLOB#34W!1$L(Kw-l_#wiU2+m)}e<BLBC-=E+&)Kr1*qx+o2txmWvtHFwb&lph{
z&F%4i!klZ{c$MEzIbrf1PB`x+e%_!)2fYl%hK7rf_dG?WJH8(oT00#+y-CI`UCkhr
zt3#*M@i37&8btFya!JgUV)v&P<DM7u-mMi7Gx8~}WHYlyRW_H3c_~|)q)%=}Mj=RT
zsn^qZaQqqwD${Z}*~&f$AD;<}CWoPui6+%$jH{29nP{5Zf-3ARJH`4DZd`6ar$1o)
z`j=ba)^G)K^ime<R59Ma<}Ii`(#@qv*=PCOV=m&fE!{b!2<FSD;iH*6&Y=Cs(sQTr
zRR-e(2KVuW^T)%&Iy34r{0@|;R3g;h1f{S)I5(~iS2+{R`j!ZXa;)fb*7f$T&VvV*
z6!(V@AXC?GK!Irj7vK2{zFRAh1JhrFeT+PI$iG12R55d4o&!6j7F@9haM5IC^4~54
zI=A)+%(r-i>ITbjhW<bjM$Nd-CAZPXFO#cwy2$xTSdVP&C4S;mF)44J%)hji;+_ya
z+EM2Sd5KH-krl<D|67N2XOD*%MdmGP(7^+>zcBLUeC%q~r}>Ajq0P}JAQkHHu9G)P
zre&PPw$c_}d6qoUUXueK%^NZIc_%2#Tho+RPhk41boBXU$H}W`)53}O&?zsF@fTSp
zg%eZP8H^45gmEkm8pwX^Ql(*)7x|L^#B|9bQzG3n5n`Qc;SpnumYr6kAt8F?)B-IU
zSM*nwwBb8wI>+OkZX4S3!V5>#XwvGBx3IXV2<oRim8nj91Z8ENm@g>BJzn>rZB8?0
zKa9ua|30IGpqOPMius%0*j}_b6S5}?!Tnwg6n@<U5l@pil`*%t-%|teQtE5GaApF=
z^_K8AOU~n`fh?nR(SQam*oUqTOC+Z+Sd**!m`meo1h>}Bng~i0bB?x$V&N+Ropj|r
zk}G0jr(;OV|NP^P+l2Jbgh}WpW%B{WUDz;4hi*G9A|FIk@Pqg-yv&m)zDDC<-*GXB
z_YDO`;pgpxCh-Yx60vYf53UOv33B)g6yB}`Ps0yj)~iRv6Psk0`mBl5ia~5Q#G?~G
zkjBNdO2SW7g42yD7&vMsTCG#2abg8NMT_0_jiz9=f)Rd+$%f}$TGXZHH;P6F^8)EZ
zNp?to8gQwdyJvP0H>Eb=ufrxZZ;2T7Zq-0jbq}{CDhrw>-e6sUyQr%93{;MYFnNq8
zj44qhtrj&f+anCi(^A;`WHaUlwnNJeA-=F>o`74rF#MS<Db9?Bu-Gb2a8Sc}vE(-f
z-P;92o7cjl8?Uil+mQV8kRv@q&Eb$;3%ZWo&$Ugz!|gp~Mw=Ml&A$(E{AMxfUX%n2
zSub4ARtC}U{sXy&W>noi3KyU?u^i3vQ?Ek6jO`hx&vFOxiIIHJsuW0{xsv}n(U5*n
zcn;Ofp}3dLKT1l=Iqj&4xVWDUX0m;?GV?jk+;<l}x{J7sxG!iswFZONvAkEEj2p69
znTSSggdaP#$UkpuqJH{2dp^HoGuVgRrlAL5%5^KECp-_d+kiCJDX?C?I*0>pU{CU0
zTw7;GO0Ic<K;;DLXP?6KuOqn$p6j7v+*{D`Wv+|NR@{@+gG-&8z;^C041TUfqt29|
z@5W)!7{r(XOOC>f9tGM|agFc$c?_jauOyGN7-R3nS%3fyNSf$|<IP!ibJzn}k?H_?
zx=x3BH?-pqMRg+D<p<M@)JU+%6AWU_h|kn!nUDKs{Pr^f<(|aCw8!l3Ga(vg$k-lJ
zae2;WTU+AMHi)mirceJQegom~dHjZNKOnY$J8WhS>YzW$^mCaueYE@ue)(Mi(ix#L
zaZZ+G$}KjVk{bxw8D%UN@C#FH??J%WF1}UuCdkozkSbnuPKePbk@78Y-Sig<y}WQn
z<wp$6djs<Ih9qk|+nHRS1F_EGxUHGFDqhTk?iI<f$LlZp`la#~U+%(=6(%J4^gT`x
zb3@{OX&6YaJ#iMU9mPv0uKz!d&cv<8?rp=nIn63*(me5oB-DQHl`T`oH-uh8LP(Ni
z%9JvsQc2Q;gi?|uN$uxewM!C0l7uKpk|aqI@~z+Z7wqHMwDwx}eO>1%oY+D!XWCF`
zbH9miQw&Ks<CTvp>4%pr4{GCo8Fb^0fzl6WG#PnLtSI=(1)ll=N`{3{Ff9%`^roQC
zXl2?w?i9aIX#{8*F)rlucKok)C|Plo$F85>Q0tgBsk!qR<xOs(V7t)KW$0wwovK1s
z)ULrgDb~kp`XJtYgk?V`eZ&pp<;baEQ*ub8OozQ5LfcyQVa+lb^sw#aDt>EFSki>f
zn;ye<xlT0hl_p+ojAa-hpn{EK&~4!z&P9RUoBr7V6V59Ujoem_eeHbQxN7i`F{7SK
zpTd|Ur!n!V9LYB=056MSyv02MSj@75gQHpg|8@dD*YG?v_oQ(Ktc#y1a||UsClp*b
zQ`|QC3Vc?+g$5%us8VSj9CS=VlNDCH<|<%5j5jFyG)cJaVkS!8W3f${NRYe?;q+xL
zf{sLl@%jJYy>vy|?edY6Og(`iSucvcK@~z?y9o2T5A%M7>o7Rj3^Na_6AwK(Hk->4
zt8AQ#x3Y{$!L4WL^Wq36dGuK<_>{#5ZC(p?&(mR#)mL0$I26ylV0{M<XD;N9CHh!A
z<p&<KpJ=fvr0SnQpFbu1<8A}O?U5!?UH|^SBT3f#3aA7quo-nW*OSaL`|)4#`%w!b
z>RQ0%PCtoO+cIGNOk4akfn~@dj_{8bti>7oKj8;iX|g?%qVtFe_<EZO^;z+rzah_f
zL|&;v^QVfm>7g`<4V;BK8wT0gTPBRUB?I+sukhGN#_;%*2ll%t9L^sBzyAoKa?4*(
z3NOII;S`ium%y<w7<~GS@Oz*l3EJ?8F(T%|oKOwIC7O`d8Z&D3CJDA~*Chhil%h73
z5{Qat?+4XP9Cfb==69BZ?!q)qwqKP-MeoLWZG||XR10^DPvM>uao}~JS!jPY2Wsvf
z<C`;0__!lKVBgNGV8Hz6<~?kVEIN*MpM3eqJzcnPem*EPT2YPXMwm<gfVCIf%}!Jy
zf$I$Ee3c0NQ$aEIo*68ir%ytzI6~TVX&T~iRCuAH1x4TYaVxU4sYZY;`1+TCeSRWN
ze{MzYjSWGQaR)e+B?)Xi*urtuO4R1^MC|I|!AoN=*K$G(J?~gg>pb%vWc0zx&U=g%
zYm3FJl*naeGh%6IMqk?hf`E4$AY%4+bTuvGHQ9S4Z>ATdgj=DYcZ=}f2*xJ+RmSU#
zD}-?8_c(j%Cy4a0!8>=PNq22LAO7S9zj)*cj6EC53u;d}`hK*+$*jMd_Q8@4oy-_G
zh55oqms_!D!zHe=q!o9>Ge@bg3-wceL)5YaXmdFWHO(1phG+%jV`RzRCyb}ERu5uV
z{Ks8BV#wS!8d$*Ir?y|5V9r`8I#*T?Ja*^v9gi|`cJvr*2wx3IkHfJ(bF$=s8_o_^
zqz$oI5K`tY+{ZitHfP>*Zx-G|1JfS#__z`KS83o;YD~K2hw;)U-b1MA5TZ9(mHs6v
z>~A9)1CL*XkU!TrVOR~M7w-fS^PCNrQzZ}CeXBi+qD$-?Jh5Dzw8?jX)ra#??wAK&
zI-~iW>pnoXO(-VMF{QVs%8}dyN1^?lFDzEnBkyulY0#xQevH(0l&fuJGtOLmYGp>u
zxItVOWkD_3J25cXh)Q%)#5G-hAV22{jLR5C#>BF&{oc3yDCXgMQ`iM|8{|=J_MXi^
zd%3)|8{mj-4vIGy!i9;yz+>Mj*85h#b>6jjsbU%mX7m*)_nH$ar&m~`eU*0u6MnsJ
z6104C!nB3a!oIh^VLHnLi-NXss~MNt{JJ%9{>ZwSW#93>9h=*=O@!~E%5=vkP0~LQ
z&yCd{PKHQyNa~^rFbLG34?NV!gKP_;J0+IiBdW)>?Dy9x)0E`xi00Ds!k}+%4+i@h
z(6j{{$d)MJ<pt)%wKolu(@ja^-ig?!YD|k}AWHsO$EW!+hU8cor|;9%$uF@ck)0R}
z!;>E4s}Zs!cKIx>WX5^0Rn`Va(tsL&zGBFMC_adJRp<t35-B$ykC-7lD>lQy6}@P5
z?F+U#IHA9p6`o6ah|{FpnFmx`Ea{Nr3}?GQR?u9~C;gDJ_8i)%rE|W_6Md)aE4tVi
z(YR^tP_{A(3ruaOZ=NZ>K4?PPE6?-ID@w%!6+ZZk-6iWL?}dSOE!vQO0XuYBFiZX^
zZ+o@@Yc5RWwH(Z8n1Kde64U}a{xc)(`*S%#$YPv5`Yz+TDA6+SEBNtk9*$b|4F<No
z0F^uJ{Wr`Ayj=Ek(qYd)Gcp6)5__S&eKfW^cwn`%JjkjU(L?iUaD^({{f+75Ru4%+
z7(Nj!JQQhHOgZHLcM}AT`NdXyD`2fegE$XWq>_ueh~``1w;VfvY>%U@tvbz5I038N
zhmeYmr%>uQ+xt4?id+7$Gc4>T*LZOc7|F`hW&UU2<Uj*@*dGB4z1Ju;atJvyVF%n|
zEatYM3bg*|V{n$a3?=HO^uL*Uq$(knovjx*N#=gB-GDb2GTVWVpArnFNd}~Liy0nx
zn1{|jKOuoVrxxEaCAF^4z;pORxY9n1)NSua%fT2d%KpR~JDfy8;T2x6caU?dkLI6d
zgyM%vCE|rQd8=7Uv?}W|_s~m|?FJ*z-n9eHsLImdY4@<GdM_#rYLamQ!^p8dmVfJ4
z!?ABoNdA#_h|)U*1B_!WDqSknC@bNP{W2r5-I<UkdxoERpD~2?FN9GlY?tQ9=BQmy
z#gg4)xdS#Tq}pv4gdA27@3qN8zM~XR%&;PkOMY@TD_gi&^Wo?orGY7d=OJ77CLEme
z4RU)s!LY~@_Q>1^MaH}^^3$cxhD~sfw<PD7WBTH}UwB2ng1Px3ptd9qHGWOzyylPQ
zY*YlWwbqQ-{$lPp_aUHzH=(X^Bf|KN?5?m5E*7iNsvQ<6IQo!#Hm?MC{*Hmx<HKp6
z=O1id;>+F*Lz!RhBmOIIO58(lVZ-S<u08r2DxRq2!mF*Qz-u1Az3c!kJ|jo$v=4y~
z|B!Rq{26~%8IhOu_aHpl3V&`iqCf4Q<K4Le=Hu0(bEB3+K&lxBAJf2V_#sXz`5ngE
zZN+%TiV;Z1a1FoP@SUnU>Add`m#!L6Vagd8DWy-=cbHO(Q;x8_Hyk~mzQbNQ8O(KJ
zUIn*CVPC}u>`a`DCXbZB`Q=Xx|0f<(*uKr;=5%;ueIHEc-@pWyc9t(civdq3absgz
zFu+Zlwx`tLg4wm`ZY>AAyaKsVH-uQvxQ5!6BWTxFIa>7QB>0-MeRonh_k6AqsdfH?
zW*?eB@*+aKZ`o)_A2)=0MT`Xb?t7rDIE*Bn7~qZArJyG057)7%1GU)iv&ZTTh}YN%
zZoP~pb|4#_T@<Nk(I{a^%rww;*Ts30O~@ENkNGgafLmoT*Enx23??A-*zd<J2b9Py
zK{8fsHUYP?C){Pmnh@+*;`HWnE4W%lbIR9EXiCgqkbFDBSDoZB`Q}@UZ$1Gj7hiz<
ztx8Dhy28asc(9u_3d4VzV*ThY=wBNNBPK^;)sh`(bMZ8{F77spq}s*tnvM9$$&f?_
zG{7cy1{;q4g_X5;;juo;-CzQLWzBQk`Af`PpXyZF^ffHZ*p0$#3239_1`mqvqoboL
z75!Zxt~mG*Bgf`}<hvHlP5lBZO=L-P@($EX(kE9AD3iDctRwMqB<pb);`PI3<Q&UG
zP5F8iUD6t$d&4@cZkmQYm$M;j1M~1CE#_YDRwcpG6dL`#u|8lX%KcCysylcr59|kJ
z;d^vAYJoEcm53u(z=xhWgr0e2F#C)poo1qkmG>P{Sa}$H8jgVe`v>6TXh1VNhZDoH
zAvA5ZIhRM5K%RjcCWJhPhV&nt<NiW^`@f8TzvU~6TbfW9vk8w6A3_Y(3~`5RH5ND-
zW4f~+C~m&U3yu!*0u6mmrG6?X{?VcVE1S95Y9HXXX%Xf%HE?g86^Y<)sE{nYj?0qG
zaG{wYUDo^?)ABR99AkNWa3vpum)A3wD|7ZaF$Z@U<E*UQhCXk@d7q{7;GFRQ444Nt
z{g5IwH9W<}p9(NW{yUssWl1O6RX_sw6wFS1f+Os2*kghL^9}w6=_x)KG)#tu-1{cX
z{ci}BTstCs?rlczC;ot~1_oq3eSxBRSH&R*uP|<U%l{n3u<$>Y!Pv-l<==wXT+c}?
zSe8{ZX5M@75`7iElT|0<Gp&f|_h_+8ryPwnih^nV8!&LZ1}!&YJ+Oh*NL9|`sk4!o
z%=`j1+5$euT#j1%)uE$m1{YQ)psw;8K=No5XCj-zn0<G#eeoyG-**e*_z&p(v>X2C
z@~nM(AJuJ)DXiNMDyOYjen%huR+>?XYm&HmaR+uJi{ae_3u<B`;v^Yw#fv+i<C?=A
z*nMvpc%Ey96aQ9WiP#MP)Ebh9|H{yo5ntd*zCLwLtl$r?4S?<)+qosX!a!w{6pq(b
zr8g{9X~=X_E;p?JCVsJ?;nS^Y$h%H)(%#p6z`E;Pz?T1b*%eFSz~pN<eU%>7zdH!B
zJ->0@K{n%Zv*+Ti`*E(GCF~BhB<*GQd1bX@*nIe*(DV8y==C(9&1UC?uG`dT@cX$q
za5)#kQ#$$W8rz{+#|=V?DtKZ)f_%-BWnHKx!a(oI7<EGxlJnb8^wkeUddk#{vChf%
z5hQ2Y5SlfmgM0XZ^|`;Q6*qbrp@z?M3=h7_8E@AhuAOV~$HAdA(q0Tvt%h)NqY63l
zPM;2FCqZyVBxW(!XHN41=pVd;X>wPfpX=u&+fVRQ;(lOctvY=zRv}zv0v-svi}_8T
zphfErxLt2!?E83L5E;t9nRE-R3zf*elO1?riUN(5zQe0-y@8)UDiDt~pRn_$57=L2
z=YY*b)Nd;W--Uay{(UEGp8g2k-mK<&Za0Ih0%O&^e2i5FxA;?27!xVR1ANLva44P4
zmjZN&{Kx{hpTTDE_D+1e;4ZiOkOhf5ABhwDVj+YZ;KtmtprRf1{G8qL<iwq7oXuPU
zm9MUWhdawjEIY@AuS<g5>Ost#6V2JBnR5cGSx&CwCFp2(i_?E|1|GRGkIjjXT)fe0
zh^u`B`K5k1Q>;J^SGvH<D~5E%Cid3-{)-PBDM5!oJJ@HWgi&ua=v>CriCLgRC%@4p
z6J2^?;c+)K@6Sem))TO~U&_UgjD@Thx%`+rZ{X~{5;RF*kHO**SUWoc=7)WT!R^~{
zcef(3^|HhZp*P|6RCXS?ez>)|RGa+x%ChnIWYFzl6`ygt4<++E`SvHLp<&V;&g5m4
z@V1j8<Alrvea6we(yvDHkLuFxPeov9X-LMf-0)wozo`1ngk0>F;FC@xnvghxE{V$o
z-FF*7u>OL0--kfZ`b}|7d_G7jLdDItO?;L5Nib4;4JW*Gh->31+`^o_%a;x#M(>_O
zcX$R^m}bKLO_eb1?p`qLx(FGMuHetot0+o61xeu&&f~Hz{H&HGGiH5<#L285++s>M
z3YyXAf)q_}Ve{ZEmUM35R<!7lgNkj3;L48y40ADO{$y?JaaW)jZ!VxlSQno)<}n{E
zvd6As3A`(`CU#3xq4~f@-nGtx)+r5v_~0Mt{csAV#p%HM&&wFcO$Ti+0W7+vL)JT-
z!1iqy(PSV}7%(9i#vELL0bly~HdAk8pl7gEal^Frv%s^X1%fZs!k#*|n{NAo+ee$z
z&HG#N{LOsK9W<b3UthqP+3fxqe;2na4ab%CVpMGKfW|TpOiZ;VzYiMIa#0F=fAAIs
zJ2@!FOEBA=Q6E(qPvV#{Eg4{J#>i5XXeRNW*_=&Slmc}v8z6Y$IkX$`jvu3c2Fzlg
zLS9EGm$zdMXD8ML|7k~{d!;US=%E(rC@`RsC26o{Y7vy2{DM&nhS1rA1yHp>%=hY=
zz()TgnDcxsRQWWbC{U3Xn52t$Rz897O+Wdxl`Z1w^_A$AJB?dCz_PLxsi?D$@ptES
z!8gWLnlL;cE7)1N=E4*F`O%EZZm<Hujj230(Sm69G9Jqn6^M&ar2G@M+j7>#Wi$Fw
zGWrtg9jeCkKO?dKKM`C_R3c6$`t)4IeSG;T2lQu3Fzw_6uGC`~$@=aC9r*$paLb?X
zH@*fn=|TJ-FB9t8$#|4Uu3*xXQp9PaG1aSuF{W<|_gsAqU1j}@X|2hpJs!a3j(xmV
zwG^?mH6Y&)g<#t830%qD=Pc8<2%bOupNow34Nol)zIW)s7h)a-M~tw@DVA#&odAEg
z3helM974wY5C(tcA=C2<+T^|Cd^Y59mzFd#$6YDLTx~(Y@jXu8rfZSr>C)UsPetmW
zDuwZ9n{mGkV{)vj5N=|hvtyAw(H--d5B~fI+;(%k=N5N#Y&gSxKW9c>&WwUfil^~+
z$yC;ZT7x#xlfiF-Jvgv8gmC#)tlDsfk2}~6r$q`hK{Fc|sFN{dqQz@QGge05JT6-(
zN9}^^_=eS&FpPCej<R0i9am|R`e!I@{t_x&Vip1kv0G3Oey4c-)hLM4osBgfKe&lq
zhw+?FDrRd<M~`D|oOzuQS!u)A%l9sVP|^#{f0n|c0t?c#SB_jPQKX~Z7sEmG+c413
z0Hc_LV<MJ9`Qbe1He|c@w^Pw$lQu1Sy%<QUGU-qI2bvS&`FFmE?db;4ppw8lO;IJE
z=I5Z@m6d$=!d$pN+YYA|8IWCTjmU<wGgviOhW0F|2c=nJ&|fD_J#W5%WhQM<6JE~s
zj8>&SEit@e(_1dhu8QwJ<_=dJp5fP}&+vKJQ2M^lgktM493^7+=ZVSu*@0**-@~%#
z?nhDj+#cN2uS;-s7IYMt(Y&pD`LHFQz(4*tNY%=c4}FUCmnA!cH?M{fTXcw$`F@nF
zI3>ov!x(FC8pe0eW4XvDpj7$|TpdpE(&D?&Q1lo>e`r$u1wJ@SEKRwr`>46%8aiJ)
z3*S$glTo9mWB2|a{P`<+7@)EcYKJ}si;X_eebE@KmYu+5CXuM|L6Ng}cnc}Bd{EMp
z#|hR%aw=;=!Sm}?=A7ZUrLJnUxo8SEM)C%d2ZzzmYb;5w)KFTqI143XO+oVbiE!f9
zI%qMV7*Hb5wfx+ROYSm8<trzlSCSg<vM(8mb|2%inP2YG`r)+Y?o_Olu11sl33%O4
zpFG}bPM(`skl0<f__0GqklFcnFnw?hZV1c5dF4hV$f}8ZzQ%}J>7_7#T&K`=_%@gk
zpNcM5U&6w!L6$Y#!5dDw3?@nbyv>I}T%!;Hqk8M`=shhW_Wp>5o3r3R9Lowm-H&xY
z$3RuZP%yc)3YSgEWBIf#=$QD6Pj*x#l_k*-zvmr>j56Tg6g~ruOfSx6h!##PYX#}d
z&1m%H4=C<kjj>z4;o{wjbk*~>%nusJ<IA;Jz1Wa$7cYT<*qIm-Wdjm>WpRPN8=5Sy
z<}?g>es)MQK4uP0$-Plr$a;O@l~Os9WLShI^0W9KkMzmCkq7b7wJOliNZ}V2wW7GL
z4{kfi)87Y1V#=zI(CE7u?%S7w_sK&r_L(Y4D2T$m!gHJ`QImRYo6O1EHKL@VS}b@q
zjqjdf12b0{)4{u?AP_8P-uvgEsmH^ZmG|+{r+-m0^9x>bI)cx>SyO8E89U-E@Z+uF
zBxJa}IIj34#$M9kLR-zrqAGdf=lTycrbNPSLv7Mnb{rf&=V6O?88q`n;sCY1{Mu-i
zY4JNK?h;eHWPKVvhn&I^85z1wYXp(^R-;qdereYVeX>HJLR~u~T)4qexY3z`7m0v`
z?3gS(Juv~(CYo`QRuw$7mhHtOJ@HkaEUB6*N4IL)(D|1Zpx=z;AozTOJNWxH)a|<j
ziu;vmC}CZ{5(&(Gp#(Xpo*=n(UTkLn6r7kBH7hEcZ!c1W1MJ<nr0zDD+}g!AsKuk?
z?|F>+<j;8e&(Ojq8Kv7*X}Z&7n3`%x<3bFmmwg@c@*HHpALi7`wHoqoG@vAYCpYh>
zF*zHlPdq+FaL)H1!qQk>ntm%1MJL>NlaXTHXI&*P`Mg!Up0Ue4q`xE9WWbMbYnnFa
zHZM&5j+1vP5H8w^EVtICZx$+3+rObONhF~CA3mZ^S`U1#)+a6OE=R8qA)lk-(fjB=
z3@m>F-Iqpk7Q0Pwt|5o7u8tt}&1$sTq!rIxP=g2~BN`+95#pEUqFP4-)CONgpSD3>
z(!7}Oe2@!0hn|4Lfiy_yjE0mQo$$?l7=0RX2D6%+peJq|2K60>qN}5sFSrgpf6hX&
zT_4y4S%KH2G;R)K9C|H{<U^V#KuiP=?Tu0>+_V6*#;L&?Gb`FS*92RNpF-HscnI2}
zPBm9b(|ucm(4+Poex58#1W%^%CG7J|x;F7U?@Yn$lyHbxcm<7qHex`i9Lh)kLfh6`
zESFWl{BOq4)?5i6D-@}?z8~H-S&-4`#v}xk`TFt>Fr4iVhe8Z#h<|sn;^<PGHDE;(
zwp@Ur-nVcIX~jZ88%T$B;+J}PQq(#bzJ6hD#1J+Ynv{(iB5f3Db&6w;Z^4jy=46ak
z!5ZeBo>ay<ViPsk45LKsa_t2+I<Ld)J<JLDz6#y$SaJ`lhLQ~?YP4X=IS3eMjdu$*
z$)!sVu|{PZ7pgByI=yAkE9xxx>@uVrb4iSnw8E%!{qVcvEM%+8(OPjozKktK*~g<%
z`|mag_}a_o7-+$HYbj!Ist|`ixrpZ$x8vnQ5h!{;Rov!(9W?Iz#jh&ySf6(TY#LX>
zzMKGD_pc=>3a{eiwO63V{GojL_!6jWX#fZH)3|eME;M-S(^y&7>rKrU>hAC0B3vxV
z#&y?lXq_Hi5PBRd=BJ~q^BTxv`NeK;W!jJ)!g(#aE|lE6#aYR-{Y9xXE#r)+>bhLa
zWcT!X%QV=qScX(`aiBXygFemFBK<@Ij2c>DP47e$wW>j^k`8_QLxbK_e~9-h-hu6w
zMNsu|Av|)Hr@^9XcqVN@(>gcs^NYX0nz(GRa2*FjRv0ly>QC@1|A;CzBf)w#>qcDa
z;x^r7{fxA1c==MA8v07nz8$uB?X()H>bc5#4`<P<>Yy-H@SW{7lCU~!JRI92MTk}i
z&fahu1O*}Dn{1YKhw)p&`brqH{R2+9VNC@JtAz$b?;xDp2S#m-x0|X+2iInz*VQE;
z>?nlN{c_an?n^H1+AKaRG6vilOR>&g85&sseeLuwc-Sfhyd;{?r#uxe3RsuJeK;zu
zx&op@2N(lW3iU4(!lR|-5ac`=SLBAVy~n?dF}cxc$xm6j!Cjv?2&3^E7mG8(Ucu=@
zX5^5QF7YiEL%-imK49lmZq*TUvfh*3d2fEfgi}x9e5Ms`Q{IXbC9P;^rB0tlv7PqT
zOz~K^7nuB0hAf%*4LTI#vC_bjx+s4HJ6UV^;jBqwACz(dfmiu6hg7J;B(|^pG6pm9
z^oWAgJ+#xZ<;TP~!A8avedGHBzD<y!K@~NeKq?;e!&jl?(<$N9)z8p%+g5nwpihVN
zji5z;EB<$X<O0t%!Kdv4QXUtC;iI$p=B2*;^WQ8VA=k+Hcy2{6hh(Au)<#%1s6pF9
z&T{-|Rg!-=9hPe{-*RR@#8_wIq@s9i&8$Sn?}wrOKoV%!IPwMhRTyvMiRIr|C+nCa
z<kXFzg6T)_FY~wxtj37bMybL4j}%_LRU)Dlfx?A$k1_r}L6eZNkhIJUyc+wsu2U5_
zPNGf%&xfGK>9Zhx|2q!bYC#`_s?o!%cft8VwskMAfRN1vPI2maaI~Zj<NQ}(Lvtmk
zv-~|Y|F@SD+0DaUt5vDYsbqX;bPdHu8nnN^iSr23LoIgq_{;j{BMtRQ&~h7CGB^ks
zpVY`;-aTydSd6Y(4(M7K%|CL}p<~Ls!Q%5A>{c-3C1%$2W{4(H3X6le$23^?_&=xl
z%(HQ=&XgX1QHkCwEooruQwVTq<I=OI!=&=zr0R|$K05yZ+`2TmTLxBinZZL)c*u4<
z$3}6@4IZ4tHd`35s)djIB}Ys6JKz%h2}724i@l=H@j8(caY2kGNe#M>k@HW&IbFsV
z%246#=g83JZf&}veK}r@Hl`lk%ndZzn&=mP!hqHsu5RCJED^B2QjjO=`#ylUyz@}A
zw1ac~)y##Q8Z7oM%?0monp8KoovYe64gB6|(njYYtOJz8+2xJn+y5BQsf*bh;?o_F
z&2$An%2@P~L7d>}XpkgNf+VeFpx+@!*XS03=F|b05UWleuPwy<?aeU5)SOm%uZ2#Q
z+o+p=1T-z|P;Y`hZ4Uhh?G`TPH-9b0v~g`hgW1-kdB+Q}ugi6~FnJk9rEh>;y9`L=
zp^X@q)(5kD+;E`pK75I1e5*&6)KS%hdQ@aX(b)0OO)dENyz#J-y-(&aKY3c>ckceV
zS}>_-;oi?@jB@P`DEgDgEh#$;f(w1TB#IN7GKYt7ej(iC-l2um3yc|7fP0JMFj6}R
zf4xv6Su1que(x$A(o~LTT&r-W6+&Rj3%IjYli2Rs4{6?E!uqU_kmt~ZsWXyMiN3-$
zaVIcs*`Qea9%I?$j)hsainM*tO3<x13lBHy(PbMxgUyMpus*g5iuMShHDM`Zw92r)
z$}P~yIl(m<8ItVfhO~F@28{73ggiIkf{!M{;kbXX-{dXV9e0{5nVgSdK8fJo8IP#3
z5Y8+WprVH^m94kI!=t~V&^8pJrG8-$3)8LjP$sQxH+xvy1N=ByOc2^o!8pqIot33k
z9YaviZ4UkS7_T7U9DFFvM0NFRnC>(ci=S5G(dRluI<OK#zA1A>-^Su8`+Qt_l=+cT
z4N2afV|?X7Jt`bs3B5iip^o#%zYZ*~7OzdUFG^7_NwT=VDw}VNPlIYh4H}r42t`93
z@L`x0*&wV!V+FP^F)swGfY0z>)dSpDcLAK8dE7R(6_uucfG9V{o7fx;n#Gxr{6U)j
zR#c;2C*SbHT?Mqx)CI=HX%kQB0cbS~VBC+_u-v<!eHV$0pIrs3Ys^Xcozu8=hyj`T
z-i(UtjH!dS0{zuzNzK%~uq*8$#7<RaezrzOL1vJX?xZ>R`n5bs3#;Y=J`DqJF9lrl
zq60K!<UvwAo>S&u;B{4Ls;IpXX5Sb}d*A88^wEl>=1dgtrTd8&9i9kp-HR}3dLY#A
zXoM@*G^wE3V3K`MIV=s;Cux_psiZJU=vLs03sRNH?R*Pjvrme;St;;SLm7)ebVE3D
z2jg3;4d%VLNZjLe6@BX(nFr=9SUlT=ZU0^a!(AJoTlYP`!X_2=X6lnQ;W5x%Fbf3P
z7deSlsMvVA4jB@D9tEX6#SyEeiQs^ilU>g#zF+qy$NNi@-HPV)g{**e@D!+H%3^Rc
zJ_+)&>(JI(07YJ9oLz4=n66Dn-*L<lwOg4MEM?hH*D(-O$^2#FD9(ejyY8WOZpzww
zs2D7Zk$+2}?($~P^0KDk4%)E(Tnl)~l=11y9pS|sBgOz&iskPayIEG1dT}p>ies)o
zKJ!1tN}4${gSU{w`0oi<*#5`qGw-Cr=6$Tsmai9wKOLn>hiw#d%|62C=33-@1>-=h
z9mLryR^eR-GqRF-O45`!!L;uVF!#<<2)SRy?KsGKbUEo*RS?H3{@H+rik+yMCr90W
zl<>pMx-n$+d!Z=gH(pqK4Fx*xaN%AegzsGjl92rnt24lbO@0nlzm`IW_AGSmzkwou
zMX}@SF3i4E4Vp6w;r93uq|nX{b#Hv+0~Eh-x<>an$Ala(*%J%v0=0<YA}hF{Du&-X
zXMxwtAyoA1uUHr^M>Tp`cj;dqmyTmOvX~j*yZ#k;AHIfTcj=I07A#+yYEnGW;REQ_
zSyQXy<|yxyj@?y>eD>m67;daUPUlD9hx{m9o-QETRsJCIT*W(uT9fX?1irC412tSr
zIFX|lH%(6&>fDZE&6#7kI4K1eMf^msjlPg|ScF|l>NG`r03<)ng^T>IFgL^v{HbR^
zjM~0ohQoEtdZ>mWQXD@^Bu$S7sgd<Td61E2$e18K;+jicyxo=vZaHK0OjIraGplGQ
z85lwHUXSKAHdW$6{mU4Ag>`C2PsW{p4QQydCe1$LgYpxaAh;tIyUMSD#>PN?H1k<l
zrrpL-fsB)F@dAGP8{iC^HdwadI`$s80*(p?vE{Wr>y*h-!L~WYN>&oKH#o;{3TAue
zO*8npH`$%9jxkSsnWM-sTHGUShQj@GagP$aYq9&W+gn@K!(S{^XEUGXOf7oTA%n4n
zKSJ)*JXpUy3tYEG@sf+r#QtN3Q8#zw-Y^&C8{-B@+?>MRtXHw!MUS>-H(?7W1|^&I
z82;fVzvk+D#+O`y&f+T&GSnY;iJF*S;40s#zMAb}7ee-|F4)w52P0=a2AhkYar<<2
zoUX&VRI)6y?)}3t*4vc#-}eSy#`xlbm+@HUWJ4#)BZ$n6gchGz&x&zO1C`rx+T|TM
z`GW@OXABU>eF=QI&NK`;RmnfL9!3sNu*WC6*<M&a5ex4Pg4bVlJ|b0@y>*U5YGopH
z$2D=W$4dCYX+7}g9AlL&5%P;{;xKE8DWv@lX0Bd+2uhoPm$${?yp6g<<av@Scy0`$
zokny|fj`bqD}pS&0PuLU8Gq>vV>6a_P`5K3WQYz~rq&M4ecLf$MGh~CJphNc$r8JX
z>$!b#iFk9g3~|=EkB18mK!ejfY?)My60KmNxtt>Nv&3?0*0&%n`!naejd4zXKIfVh
zUva6kPQ!?oX=rc#0g5Co{Oi%uq<jM62=8R{8`Po+<37Q<Oe2!_dl;Ycp1EAyhSF({
z1RVF}LIKZMXBjb0-8+o=yfFkKgKom0<!B5k|0QlaeF1OSS&(Lx4DQc<Jz8C6N_&(q
zVZdb{-v7s64E*>EHGh7BDyxGm6QoWQpM2r|ZM7ioQ~qPKqqEF|kOO<B-GuAbb?7rB
zlCPfX00!@sNN@2J?8~|gyI5A;tu~Ddx!TTmep$*ofbYQ2X&Aoje8%{jF<i;leE8k7
z6)UFA$Evzlyn|H(y6T58hT?E|^Y=BzE^5KTTggz@5RZB3i?P9x%?+<+z$9)sy+2q1
zHVOMdiMcofd`%!c-3`6p%hH1Rlc3`IcPzK8gKo#wyqm@??o88hCg%+Y!NslE-}VdR
zitRD~Qz{H5`ylw8M=zHcUgx<M^O6|j=LM`c@Ch-gC!3#@W<)$&_38S(e_^+p6bZWZ
zifh-^p`-4%z%_v(Id%Luy6;|u&rb{^Mf2}*wHKSgc$)^bpLGb+Sr(Fv8BR&HB9RgP
z#Wu#NtBrgP81@@`R6oG!DOPlGz8nn^ZQ`>wF2k$AN^~yEk9wKu)3lH2d``(r^sMg2
z6#p$KJkSUY!%jo&!vL=Q`djQjKMlOjE`s{MMX+(zBaFXe3Kkix7c}D$A3I_wA6$4G
zmfNshl!6*LtH8Rq?pr`{{vKHROM&=RjYPZoqrqgeIv+fLJL_zo!tUSOIKGvglYfk9
z(3-cr@3c;cC6>HNRxVDxUxxLMRwL|mfiEn}taK?B{GRFJW=@?vtzdj`sZ;14+KGiG
z;W(~DgXCR2fnEoX3cDFOJW_Qs>+<g4(&EQ*Z<UORhG#sEacu(^Idj}&mJ9NScjEQy
z2^h8C6WjGg*y}PBr>K|_lLyJd%S+VBH-!?Em+Jue+sw~>@wU+WiybcbBp}u&D)63t
zDo$H;2*N)9h407Z$%U9CSj(vrrRD^<_OA-@v+l+FZL;)$|4?eH@(lCnG|uk}bC90-
z%v*odAljKJ;FHmZu@_``j|qFxC4N30c&kSp_5?B>KqKF1(ukv{3+UqjMOtdBO%h+R
ztet}%jh<#kbS8g5eVgOZP|}DA#*5HXxeT6)mFU@nmZXk#hEt`A!DK~;uxj^Nu6DN-
zJ@MR#2w4Zc-^~})`<XkS=?LGr(1`k$dZMpJKlD9fJpUE4C>ZS~?yd9&kMU2rJLC~o
z9yOx>-4+lNzihtS#E<tVc*<ptJ%e`DZp_^h1!`&ixc*2Vl<f?|*o9+x$5xhCTKi7;
zGT{evm6vc=*zCWS-K~8J6tME79&Kl5#f1rz(9ne4-|F5u{)n(3%Qtl3;Hq1oxn(Vy
zY=0&U*>{sO5%vou?)!yaX%D$~?wRN~{y7iQD)doAIl8#Eg2s3!-1CkxD1U{b*{?Q;
zH2eq|>5aHm{W-o{F^qUOSkn`a?%*r;2(+nSuH^39Q1pHl?3{ZFWfvtv;=hd5{rMN9
zxqRZ*&Q_+0t5k`|axZ6N{Db@WdKis}*QF+oyLqX_?0a6l3#E!wNQy%^MwXv}m<bXP
z{!B)Pc|*}(5C<BYZeR!|bNeqFG7r%yh&nqB?P_jv_jkR7-J3$tJ;#`ezJI{N<^l9`
zI*ieI$>?>27t1=&gK4j(!i(q3g&kOr{*CS^`&S3ma(XeuJ=Q7B_M`Z@y)k*)BuDID
zsZg0D3*vk-AF90^;DnJSdBt|lf!ljQ{QM%UlaeDQ@9M-Fjwbxq_EN_0ZYaz<`-;2q
z^ARS^XUw-mBV2d>8V3A0&-iHqFnFs^G%nv@pQlG%zImem<qPPQB?~r^7+z+(Itj|k
z;^XU5pgp35kGW(`TT2XSb8i|Su=q1?k~@rB-}M?sY`=_d!`snqiz|0?ggi0Aekfi%
zj4VjIgCnQQ(Lbw?p~C|;5PZ2SZe4H;G6RiC@VGP3Y^=jyVrTt~X)Ig1vWAx{XI-OQ
zIa(1q8pbvZp~{2o9{lheK$s%5`L?0!MFFkx`O8fm%J>=!&+xk5(O|>&PZ2W|$V@ip
zp0AY+f=WrTThuh}?oD%Qcg7dH?oPqLd9T4v{|_h1lwtqp)ajdjM`7;k#d!Z(4Mf`Z
zL#K2GXw>ZGCk%UmCL39Yu*M3%e=wkiw%_pTI}<W{!AQ)$x)qDdhLE&{5yIAa8F0LG
zC@CGUKqh4C(XpEfupuRfGs)SCic?zA^~G*Dogc>b0;b$^Co7^BejTn}H6Rm&x+ELV
zV(Q6M{QRFfd0u)D!J!gYIJ-jVFlEwtGZIwlM?lT`9?r!f39O%)lfj=2P*Ao7lYZ$?
z;m{CxEY*Q8PRh|n!C`D0T8bWDk{ORG3z|E#At!n__&Vmoz{{IB+I0v`=-7_09w?I+
zj<PhuTaoS`WIpv~88Uno%O(c0T*0{kwEb=k^^5L<;aETLsmf$;{w6T%JC46xGcegT
z0S}#Lvo@JnX#d&CxlMD0=K44YSvrwV`sXb!&%TXzi)Y}D5L2o&@eTA>1>mb$0y>!g
z2xOS^3oFln(GGJO7ZHe^UANKav;iHn(ToO(7r=%1SQKb7Uu5uN=vLXlm-%TEo8%s@
z;bbISmeVAixi8uG_fc4STc7-F(4%vBGrGL>DxMZR#(<syZkfo4*8kR`CYuwv_$kA|
zW}y~{b_8)65AX9e{yLC#tdhULGKKkvnn23UoE&A@DLt_S4>7L7qY-knrgIyN{Hj2%
zM#_;%Nh&1PjK@83eVF~D374!$1<jvVq2n+?&!OzMdb&LyYw?@+stOlJZfOU%Ya_Wc
zO55T7r&(B?JqBZs$)j<H6s=1?1G{;1V%PtaAE~5Fe0!{LWqSl_!#iB;Doww?F2*M!
zmYul__|w&xBo4@uoqJ+%=dM`nH4TN<_tMxi+<|#5!#Tn03B1Xo0Y3J;6&Jcwj<n?+
z#u}|+-bYIpoZU~ukO6rjx;GndnJ~V({}sqKk)`u>+d#DMy)eU+<+n$i#cGW)@P0q*
zcP4i5{u^D<aeps<d8S4VM=pf^{7LxBy$Zbwsj#H024cIX!}?wCK~;w_DR0Ga4_U`@
z)Qq!WQ*@H=PHExXo(gF75)nxE?!{j3p=_Qp1byrK;56fi`jwA?d#saw@Y!2vo_HE&
z&&-1Y_Y3%i37o%gQK9!LOR;L@eBAA6N@JqCV5js69QI)-abG$VMkVm5Gv^+vt*?jv
zb+`HM3%WGTqC{A&tBX@Q1jH*am!I<J5PGc-7YdS=IvS}0N?tDH1i=>_OM7|rUe<!?
zh8G~<(NeB7L5m!`69FM9=}yaK@=^A}Bp9J-ME-eiL_}jIiC?_9hpB7c;>6iEK-StF
zJjC}ofvFyj`fkR~<NZ+o_#>MoAA#p*kKsQzOvr`lwb*E-2WJ<l(4eexzB}PxSmV<Q
z$A(yvST2Kg-v6NJ%VI7hD1=M8YX$q=SjH+$no8>Tb8(@@wEO-6mdC2*GIuMGKB;sR
zRcNEQoMqce*YiK#9mmsn8&5>CdDJOO+PuLDO!8;q<TPv6_2|Q(m{Z(O#(nGc@q!qu
zzaVqakQAg$0uR+os5q{NTYo(mbJJF1owor!^`A0z<yqEfk}@xepDK)(^9R3g*&t>!
zj-De=!Q{jTbdOV^2j`E$kZNTvq$;uabYUrWJu#xOt}}VFp0AM6t-!d5L+SJ@M{(vy
z#(QKr5zR9{7$<Np^APWcBDo7N``|0^y(~{#>b`?#;1N)$lqR3oS(AuHJ+ddL3Z~38
zBRwx$;H7*$TzxA=rstO7fm_VkS2_$#4)em<$p%zOwi&NBj38d+BRQ`kNAAKEDH?h3
z9?O>X3qw*>Vd4;V8ou8YvYu6=To~)K-j*Sk3V$)~=Rab>;SG-4r@Uc~<gdcn)gM6R
zgEnS)wDTJJ)3E#JYhFr9g{De%L;mar=K3kefOS1w;DTPzXt~c<{qv5qa1FpU4g(M?
zXAenjv!MBv40l+4E2iyj5=sUXK`?B*lX1HN5ha??_ifC(FvX3hevBLb!3Le71DCaw
z!M@2?V5W8`mY&VR`7g|=ZrN8pI?RH!FfVqRS|NY<ZaOG5=~54!JWP8e<kolI0v~qo
zIP+>7XbUcaZqOu1tNkgQXjcw<>RI;dy({;*NtbkVT*9J@2cU~(-}km!(EUHzo~cEX
zc=3<9Z)cc4;LuPI_)me5L@n-LcN5YevkR|DtI^u&p%AvAf#rX8qC3kR2!3wj2ZFnB
zzGVV%YVUB(0wvmK`xagWv)$)=3%ae&khG+CfN;nUI6Aiz$4J@G=JH^E%z}$BB+!D`
z{(A}}ip(uFGXYv_RMDm9Jnrjzi^ug&<Bax)IC8WFQB3UMcHU`%vc)!}ee^tZjZflc
zfFku<&-U`CGkA}xBd})NYm7Y0Tm_+}STlPbFF$@8YV1|w1PiMj6SObEiUq90(&rAX
zhmU~t*gA-7xrKgg$58_#=-J)6q&3YG@)<**y(<%z9Q^>I+F-8d(Hp!sT!v(Sn1(7R
zmcb2MQ{rl^fdBTY5%-jX;C$g0?0fYugfx9}T2_1nNKHCQMaYw8a|bScmo&XGav1SU
zIS+T$6p4uO)Grz2LAA+yT&k+dd_7nc^G$(zHdh1s*+YNNcYL(v5ag|4cM9(tyy#IP
zccG*THas6e1nY)S>7n1@)I)a1|5^wc>^_!uq7_}Ttf(j`hO5c<;J+A~l0BFRdW9Cm
z@|P(QSh6|1%t<KtK9qI%72w@&Z5rJzPhzAbuvuG{YE1Wmw1@;SV|^n(hw%_w8Up*X
zFEPJO7KqlCi~D7B`H_Z|><#uA65M{X%%BcV?9IoR%U7YXQ5Gl7WOL$WiCE_<0FkVk
zuxM;PS1ap}A@&P|3DO~WFwU6PbSd)P{u&r?ErvI#%n=8)jN>Pm)?iNCA2iPHMNHMA
z{qNHu>unilUta<Hb<~JJT1^<2{~0yDZ{eRZCwE@j1U@kD7tA>N6T(A6xva`|jATsz
zALG?%qvcaHYN&=sT_b4sJPqhFt%sVXS=`RrC@^`}j^V7!uyy?)UcF;ZC)*9D8pDft
zx9=jbIO7PKY_5Ohi7E+Ss{;x68@u=J=I)GFC1fXa5BQjniHnt~#KKD0_~i+n-y}s3
zcIeaIZQH?uWh4T|7}M`(ji}czS2Rpggobi8nm!>6${0hvuKPS7yHhqy)C1dGb$aYc
zGWP$@<!!nudFM?vAeuN^ys%$^zH})EIdvOiwR;#96t5C$*=duTrm3hlKNlotm+{$B
z&v1rbEmrQHgCX1haEoh0QGeAh&`6cxL)u0Q8wF}q;Ga=^dXF_(Qkw;y-8mrrBoyUY
zUh2g)dE&qHIm~?^hobBD!q=t-v^&}a$9&{rr|~n)8d1-0e4<M(mnoCdx4NX+NQAo6
zbs+tSHbgSFT<k7QOc@-2v3p~%bHE=OH(O$V)KQ4DIfp*!vS7e?T!+%-Nq4LP*6UQm
zirLF>Vc!5gx_%lAf2rfO|8z+O>q7^8&BNH#&)9H+WgX_)2&-!+!O;;)WRAH7i9Tsf
zD_0JJBMswMd|n2zE9)>;EtJpu)`21G`o$9S_k44g1&uwtfbA$UIeED{B#bW|p3c|;
z)8ugR9to;cjYcoMNQ`YWrfxg8@gip#@xhUWIF`Mm9REy!&Ce9cSSm|v)w3|qH;N0`
zW(#gpLU{EJ=EU#zD6pB{!~Ohr6r<DRi9xg?3CyX*zn#Wp9qY61Jj%{mg?s!^<48<l
zd$9s{DV(!$1Zh!yiYfMY7%L@FXz?rp8tQxasw7=FsJ;*Fn#^f{$4*$h&x*{=H)NT$
z0#2}_2iG(TnI}CN)7xa=`U?YEFJB2-(JI9M)*RHP2{7!v0_j!IN5O6Z-*TuKdiYon
ztzQqndQ`}gehPXkBTylNb;Em~VzyNy>i-;zVZ9%qpvZ_eT#n#YZ<Hlbb^qaZ7cGoC
zl8%Cnd&S8wPNT|P=6YkU7x=IqCF^$ydqQ$y@JBPGZc4||Masng3wwuZwu;~GRiSO(
zjTpk_0w<ZLOzH1E@Ty4Xa@VjKn6ZT2y@Vj~X~Xx@j4!9?0A4}6I6>D1r(v&5sP4le
zESUTle{3)%bA6?-c}0TIZRbCHKkJzJyw3v5WF7L!yAs`6)lpYwDrkfxa#qLluvsBr
zc&G3t9$tAIyhjhCzdxKpiSJo%&HEF~+ZSD|T^b8s-`8;SgFZso@-SG-m=s>uRIsck
z10kXpZ4Q~C=)pKnG-ovi#^l3!wx{se>H^;8bD?>;GhdSR2KMyyL%?xMSfASrZes}F
zwdyYT9MD6_zkYD7br=l_YeK80NOU??jj=O7^5Gu}xwuWYAU4be7AI<uTs<~7-gJbw
z_-abu>{);v59=T`Kn&eG<=~vX4vk-|2(2GILB8}izWU3~+eK`SkY9&2;<4PQsJHOI
zn`H|YMscDqIh-IlNqFJ85$%3b&E00q9w{ADdXMF$eutRj`dS{XTx@98kxfvw=|0YG
zdJi67lQ}n$EYzQNK!M{kZq?lLxGeK43|BIvrAIYMNSq(9tM-X|^KAsJa#jE@k02CW
zZ4lbB-eHk?DE~M~mkI=3H?w<_FfZ#5XI~Qm=YQ&wg<eshsFKes&KydgU9uqSN43J!
zHO53|^9r=vmcp&}yMeiTAHsb5wfHJPjg~U6npxRR`14YeL^|g{wUaaU?@8l+olqmP
zepb}Rtp#JFoVi<`hD2647S?=WXXV&iDA}aVmGP=X<?wkJnPf%q<|Jr#a)l&?H++&P
z9rRR>;joMm#MU?s(%=KX)9)-yo2!cgy)vQh-ex{#sy1z$s7XEFy@3tij7Ut|8oa3|
z&30a4V$Yo{zf!mf_v-1Ay$RvCyH|raM#>{bn&6VJiIC$Q4W;xkYJB?5SH&!cX1Q_P
z&+7){;}cC%`*HwwF6qIIvo4^h>%DN1i!zBU>Bi@uW$5A}0d4nkhRRcy!ROl$=#345
zX6p~Urf~|0MjpnorB<Zn!CU-Z-j8PPax|aKCcN*C!){A;&QOoQ%Rd$9xI~HiY@3IY
z7ad$$>tjAnON#C_%*WOkU+@|B56Tazgf}hGm?Q+SpOu0+U&?Te;|=HuXFZW^w_pX^
zlf*8c&Wr2}xXk%dMA3-xb>)s?Nm&KsaGk-nU4P)-HC5VG84rb%E$HE*)6g8FK%X+!
zRh2jpY@{jgm7B^(vH8@camaZ3ZM^Hft<1+a1!_(f@fOl2fMIsgt=gE&tM%Yw3->ao
z!U*6Vo6_EOmeg;d9OV7$%Db5wLie4&{J5YvtPgz$4ZHKX|K-V(Cf4v0$I)V6jRsi$
zHVSo8ieZ<fE^W_?;u;T5!2axJu4-cw*ZRy8oGh8+^2J+m?v?c@@JNTvrfd!;o55wx
zPUTc$4Ol++2C7_FN8{;cWYo0>5U{SFPx<x`o-;;Oq;dgFi#Mi%e<pK(hpLkcFFMgz
zD;j)z&O_%vjo5g{4xWwFAc{{N;q`P?Vi&rKYj`t&=aiJ_nb*2hLqCE~e>(yK2ZY#^
zn2ya&6SyEPAs!zwobIdFL|>10X!a@<@^)V1YFc%;GlBqEpOpuW_jW+@m=3Xb#xW4(
z7w|FhYBcZXTJU-Mi`!{<7fg-_Xxp5V=y>)fm(yhh%WMy#zlJUik(HtUM4C}cmWdKg
z_{PmAiST$J56kYzk{<SNK6axOsjQCzm#A#$c<>*3#IXKOs{=gfW}V&dCt!)yEtr>4
zhoXXBan>1M*m<`e1P%*|id?d|us0XsGRwV=n%991{`Ke<c9_?VJBus++QYZ;x6ml^
z5yVXY4P(wRhQfi<?7iNI8b{~wD=t{#p?k_i*HDd4-e*We<AcTR&ch+=+bi^)#^yXt
z7qKwzDXglJB1R71pm|F=Z?k9##HcUDDvxK}%XNPtw}FQj4dzsvI}F0F^&qX83U5y5
z!;}!#scGJXsqWhJc3TAI@bgh8XA!R0We8Q5rD^n69eO3sjC|9&j5^!5V7EQ+7lzkD
zLxMk6r^r&fz8tv0vrNn9Cm6os3~%H14hK8uqicjW2xg7sE!E%Q%-F+t(4_|@i6$6+
zY%11rjISEqi#g%TLH}<DY+A<rQ$}p}=U>fNsJMXSt}}WrIt7If^{D?*3A)K`<Ry?T
zlplT-6O=cjf@>EBODoXyVi`<!DnY@}*^uQcLmi70V0P#!$ehFG6E#b5{BX9js+-3r
zKTxHc3iOD0_-hE>@&;zqThoAJa-511hwHa$QQMiRnErMh>RldA1n~=a!I+y^KlfkO
zuk?p!2i3@x8>-~Ai7|=Y!k7^AOv&z*+QjgIC7ikFi|J1m;_{_ZWE*oYg#CREEhoN%
zTHi3LD^Lde+6Un6c@cWfBw{%11o2Z{>bdJN)bs_w8;$R9f95{C@mPv@d^roggI#RL
zxrs~gpN)=wTOqchoe$3ubIoo2e0H@3ZfEb6@|Em&f8+;{d#6E7uNf2Wt~xP#+zU>(
zt;i-L4PrB)ioY-Zi`_ThaqV`OIKg5c-tM9t|CD)0dj_}RR{0);-$!sIjYaQm>R3MF
zIyBEs=F-gLghAXzu3>r(m)095mi!+_XC6=G_Wti}o~OtZ+95Q_r;+{Ks~o8uO&U+B
zB&k#?&5~*-NkT%Bga(ojlGx9^GNh7(BuSE#5{goi#BY6nf1Q7NoqBEd^Q?8>@Aq~6
z?%^wL_`<6$O)7-{!QDNE<ol{F@O>+Q>hGb<V?P&{-2Ve*HzWCno5S#T33D&U{s3c^
zd%QSTf!b@Riz1#nz^3SasA0LtuVG%8_0tSL_m7}4B@3|6bryQrmvIMGS+{eXx2SCN
zJ1l!O88;n^LkFW~STIVTG`;VHs7C^t@AiUk)K<cbG0H5vmoFOgE)d;09hxLJ9D_SQ
zapO;^LHqnjEMq(n_m9P#zj6eu*mxUa-GuC1Un~-jYLlF3HYRWLjfl^TV=!jWh@^))
zLS0V@j87EOFQ@dVU6%~eHZUOCP0TN&$aWzGZMZ8~o=DX9Vpe$zFY=6o=Ks!1a=e4k
zHSru~EflccxFR;Tg~Qz`fhdkm;BTsDq2=d5+|k6mDoblXaJ-JMoR$bfSA^tYVi<&7
zzKbz>cGx}cE#JPd1wLj>VRNts=1m^5on-tJ3R!RJtHLN6UmXIUuNhLCOIx|hmh1R;
z#|V<EnTQJ-jmiJw)re@e4vo)$fWC?b)bDN!O7e1G;xg9ddv%EOdvP3>S}D-7DwfH5
z>c<)NN5Cm<D<X~eq3_>JUZ`^bT;sN3(plE$c~gq&7C~sbUzr9UItJUSw1|?xluTX5
z`h64XxIX`L+_&}^d?@~jPsYj-4JkEZx^^F0+@6V=Zl5{t;(VOeoeWXUYBV*#99A*s
z+P7jA8d8*n7e6MjY`|Oic21Xc+sE^@{Bf{)^PLMaErVRU*&u#qBl`2rnED-j15??I
z%_1@vc3w!wIG*jxJN>cFq8sj>zJO_mMkCHqhB?zu;IB(YB=w>Ve4Y}4QpT)THn~-l
z(EA$GWZ#0~!6$qY|C(=O&-d3m3fZ#<u<86qNS>lemt8syeo>?88A&p_jGV?5-jgQ>
z{$);TlVEh<ZbO;tRqp;%OWIvEjkhI=Ah*|;ivRV)+UQ-td#X{xYk9D7f;t(`^7=(x
z7bX4MqH#o|Jee^z7nZa4S*_bh=uS1G*B5KBEW9$Bs0e7#?pEB?@ec;w`wcB>^_Y{z
zvd<YEoLyfJoAZ6)Q|xX)bb<x7nA;4O6!eIF`XjD>_b=YL^%yQ3c>*U5dkP!UmB{02
zT@uJ<_148*=y=Q?4A{@MydxX5n>0welQI36@(8Sr5ekoJk^ix|u-NOO#C?nv?VRQg
zH4B%a=G#=RJG_AxSXT1xQ{MBrG!@I9vAOi8J)ElOB!0ebM&%dX$7_9t)Wu{ulr2%j
z<L@NcgQKZWg)ZHGRG#t7Mbk?L&8hImLhk%dWty~e7iz3!4pPZGEQ%i^u|Agq4~r4j
zoLR{732h>;k(c=_wNTE6UV{4CQryt5Ljr$G)7Xi|wDFt|s>(ZJy&c;jH7J1cB7GY9
zFqwJ!62NIdj`p5;1e&)Na{jC9QPB5sdeKE|k>ZJI@L*gG#_pSjC7&8WlMLsS|1^P1
zzK~mRJOn3FmKmI~h-(=efeQ`a;MAFUtP>T6Hk)5_Te_q0y`nPFGcqA-Dr6xo;4f%w
zH=&{9oFR7TJ)gDZU(QCy1=>fKfW<7<fl152kgIjz74e1ZS9}k<#}s4m_=i}-KY`}p
zKlsJ(4(3hyigL`!XkyZUt4a#dZ+$mjd*jOf&S-wMuOSJoTmVI@f<*of%)!_5AMPGx
znf*9`A(pjqSK0@n&(@6HoC8=pjLvD!2C?uF=kj9_7y7&aw;tbz&fmU5ZTU2)8mUWc
zJzwFLA8m-215t$UFvV>+ZOt#hQ~OM5O=&HR`m9Rt9eWK?+wP)4xC}{uY)(_3pX9@)
zYLgLTPNB`6Le%DsNdNRb=$qchuRX6pmd!oHScP-Zv35HMHk>c8mnst#6(C<wszmIi
zI7w5&56JraiXUDtP1lq=W0Asa-cPw3J{?jZzAu_FH1;xRI^=PwBRaX}3J)%;Y8xm0
zmvQJ5;yA&aqyqbSJ4IQV>73JsY3Q^)9;>^nQL{zB+uYZn6Rw+3a+qR$!YjVIJezAf
z9>!PfK97fbuVdeOX%J{m7j2voimQ5cX->o-ME|*rp6C0~Vqyq$yLJF~)qqAA$iSSj
zosf1k7t8L}bD_cUaP87A3~z2{e9L+8x?PI25p4)A5#vgqK;C8OH(ueZ;9ANCbcqS&
z^jd_}t(AT6*ByhLU*Eu~v!54CtKqE2=i<+Os>HXqmLC}BfL@E+FhY7MZ+lD$^hfW)
zoOS)6eR%|Vd^`)4sw;5s?YSU6(`(lqbOHuC_0jo2Fy3an%RAS9VMS9Wn7sZC{qw&-
z{JW)aYKA3=n0$x(PFyf8U=Y>wuR_|`An>_#oqPB66E^cr5|tBXbbm)bj_B7SIs4v2
zx|261L9oPrtAV8LPy(-VGZ|e~{(_|WI5->sWb?%~&TGSXaI60dZs`diDt-yiGTA+R
zbus=pVU5bpHz0*^ntTFfxalmnu<yJSohUn!>W^gS@BQsCR<8iY-Z_lfiv(17FPQsn
zFp7$@&td?tPqS8O;woKPx+m`;3hya#?v*B(oy7Qq23B0;<tZ5J8O}vV387x^2e%<!
zi6lMj$Ep(sWNE^EEb?0j9zAPtO^`E`F)q%w*fLx=PJ-jxHEGkURoH&62Fl`^xpedW
zVDl`I@v;f`@xlPSUOtR`X77)#h75Fc?Ze<dHCW0zjM81K^RcH3bYw%IV3z{1w;#ux
zuDdXQ`w&Jn3~>=gdwK0z#vpGBgZf!Fc^7{dXgqQShpo*8zrItj+lO^|-}^wI!(#Mh
zzmt}$)yT6;GDI%#HtNkCMf%K}IAO3FE;CJl*qpggwM&isSQHNK``th$Pl^U-7}Bbi
zR<yUUkp%CKhsm;8xM8pY6&^EB(*cSLqEu*i;B>zGn+9#mQK9<zBD}SqaYI)0@;(mQ
zAgj3v29L43Mu!F6AAb*AHjP5l{0=A|lz^}GP2RoDpX;8cilsALfcC4B32d*E^miW|
zd7Y2Ct8)SNg<|NzXk7lnfOK=ixcVn*c%OCsvXmL8UdxcykA8`7jXvN^Hq*J=9e_t)
zo6%c^CZx~s1uDf4qNkHNlubFuy=0lr38GfUM*jmjJOxzAfD5x(*Lkp!x9S{$0+%t<
zU#}ZRyT2Ue-E|5@=?ngW+3Y=O5GNo(w><G4%R%k^vJoQ-Z-LFIXPn2%IuzeHX}4y0
zH3q)4Vh)y27{7WxWEF1%<tJ0H;>H0;k{c4094*Gax0Kh@P$#!q>QJ%2k5inJ&V8({
zfa43~NSo(&*ru*QD#zu3o^uiQ%b$mccgvWUUX9mW!|vz6c(N=foY_|kQ&a>{;?C}u
z4==Fn<2x>{{u=tqcHp|(GE~@BE^65^56k#oe#i4ZlsczC*Gg_+$n`zge=HLQ-Y<YA
zFH35(bUno6cY;$f&$&<gg)Pk~81HZ#ZFZmF%O`c=?%a2HbNwj7RjK^{-q#TsMcTfr
z7LAKXlIAz;j$*TNdh?n}t~pQ&l%5QuE1bP>)01A%REdLox{B1@Sc{9*{LY7NW&577
zA*j<I$NUXDY`^HTY-09zuEqNq&Yw9Ca?P!13fo^CTV+A=lWMuIi3plE61bO|j3qWH
z6{45Q(v?#jAeQA<+A18Nne*p9%AA4Z2c}ftH;!9l9|eLN9hgwr4cNIEvo6(fO^Yhn
z9pV+gvsRYqEHI;Dn{>OT@^1KZkh!W~G_kpVJ_P(ofxy7AsIX0joGMSlI?WUiDsAMp
z=n5!WZiRcvjcC6t9&C4><~>j9<M|>rlC|R$=d1GteT)og(}4yUtM~#w`DWwK_ru8c
zFy<<ERzr&)1gjHvLdnhBU~)u@{yuL(%jWyQ`zUQ<e$N<JhpN#?2YDKDCJ&_Fw!zqT
z*0F7V37L-Hp!=s1=ouxV!~IIIcz7BI4i3Y)?B12wCZu6Q<}_{PTO5C_18eeL!|-XR
zFpG6C1zennjFu;XZ+dXj0e5WaYX`CE&jJNDD_#|-O%BR1@6+!IP&4-xyz<v$ob>J7
zwHa1Wz047o<t_)WEw4GDi4G1v^Tt+W`=dc8-riXs*D_AQe4U%{Q28yyo)dyjqc4W+
z`328~>A2tk<H~gfL8J5x$U9LFnlEOdVbBBU$}}Reqf0RLmI?J&_5y)NKNp`Ii?aQm
z=)suVxywewwX4S<v-l}qNIZeUxG$oChUxII@GwRjE6`0orI5#VPQKxhyy^RX*!t@(
z>kQAu5!Q0#`6NT)|7#bDPn_Ya)wXlCo#mLMG6D|9GN0`h0kPI}#O5}06qq08gp-c~
zJWGUb$3ououns*Lr%Q?y_MnT*A@qMC4Ril7B-2^9TAU~lg%9;W>gQu%WO@r-616$a
z_C(Mb7m963@A1X0O4RQ+z=95Q^3Fz^*evFFuT6<urNJPaXxAsvQASX&n9lDnV_8I}
z8Z3M3jb4^hpl|a{UU7mcwwx)3nz#bAf9}F-2I%2d(<4~gasifUnLwPTAsyK$LtS2p
z_|ntHwESi`Sg&V1io7eL^;gwM<Hijj5LZfCB8z}uQH@`7RB3NhGThs6pLq$N+ST}{
zz@MjjM5wFCTWZVE$8JibDBfR`790asi6(TcNjWS&bO*B@_;N*6OJUsG0OlLp0_L0<
zmOUIoKa*@|jMxsnpF6PaU@QM2l<g}9=K-r+U}X#2rT#g`|NdxB)_hrkUUi$$`J^ot
z&(tMiGj*)LaS?pR*>S$(-(hp;4#tH|<;Bx&xUC*J(EHUFg}Xd>%M2MJ9w*JYk66ci
zF)di$QiG{Hdk-93gqxx%tUOWASzmjCURFn-dg))3@7sYoydlJD`k;%I5*4qrgNXlD
z^ZAYb7(O}&_IPU0Nlvwp<ZsA%-HPJ{r>@%N<}8CKb$Plh@;nIEv(L*VSQOu`hvk-o
zFk`R=ZRS5jdn<qL19O?=UJOR*+u?8|&>LCATXM8$G+j2wf?nN~ipQ&2k7h$LKbcn{
z3BpI<G{X_BPFx4yj4*Iqpo%j-{A51Wm*7)8AFRBub2_KK;xYwSJn@`)sJ)W;Wg^BB
z{l<K6j?=JFDIRrCG=QnjPq2OS5%V5>gNXWA<_UepYkqR(pM)#Y8^=Zv=Or$9?`;yE
zksC>p8nifZv8(7~_(aC~iGh{>UgoF9NN|Nq5f+>>CYf$u7;C-+7RIxV`%eqZ`Ci6;
z_N_Q|niQ$FbL7i6y#<4cOuQmE2U-7Ua^lcB%-tlQBIyR$>Lf+Q{tb2qM=%!CzX8zv
zl9$-)YH<<%yLlCT0X?N=NdzHN3L-+3IQz_GQIwt$DJ)6AWsPS+QSmyY{%+&HSuoyh
z#C3kkj4^0(HU}IMS)L&J1~jNG!p{?5puT(ozgAg?3MR#H*A2Vy$cQZT`0o*%T4O<^
ziganY_Cu)NR*efE*1#5<SiG_FC1y#l;eXhyhC|k7WYV8F2zGG=4@-CablaHtHMPOX
zYDID?kiF}F`ttFICPAR$Xu9I{d~CiP!jIo*4My4R_^_lNcK`bfm9O<;X1pBp$*$&Y
z7mS05#X2;^uMXU~TwL<!1c+jO!rdBOVspNi^D%hMXWbkN%|jvFsK>=PlI4o-Ee?Y}
zZF#7BIv>`)W=xOWFVU@|2s})hVL6-8xmLY^;NPAghlZqU^anIcFr)9UlwpOdHzq{O
z(Y|*JAyw%b*PRx{Jv=x7lbGir&np-P4#Yry`$cH=Fr&YiGqLxxEER_t@PcRK(Z*zu
z3;SV8j64>DiKv8iyZ7+3EpkvfiE&5DE){sxjHVSnMJRgk4ej;*$6c0^rFCyJp;_Y>
zf9trAl=@DEq=mtv>V>*|`JxCAjTuc#F$RUdmvUhjSpRr@WI=0$4p})TjX%?tjSUap
z!5(`pT7BRJZ*i-bb&OgWH+C$Vyl4SQ(<ev`FJyUn<Z~KKG0654G(ETv8qP6TYp)H@
zb`7W2=W@V&{&rkmHHvto9fKeLTYw2MX&8Lr7-$*|<Ko~F$UgCfq<ubIeY*m9iy1>C
zPK8*p%$k+l2FSf0hAQF8B<0p#wC?@|KNVHTzg!0<{u)V|u04RA{Ciw@DHm3yvV6c@
z3z0)yAw<L_fpw54+JFCyJ$odWDQ5klS2IMlN2a6agZaqdskk6mowz5lj>~X2`1(l<
z8}k?|bk|v4CD4-O{#^^JKZT<pdOuH(nNhP_s%)?L9(UeByc(uN`#=G$4Bv5eH(OCK
zv$4SHjxF4F)uYkLG3-v>1hyF=TyR$p_D$^IG;E~Fox@t>LwYg@%FRSRuN6^xO(Nqx
z<f27#8=Iedz<;SZcqe~2b^k2F?qPB~Rxr-x245J*xPJp<w?pPf<_Omq27<Z3ZwikB
z^?grZjlT{}U0KA-UrEH&Ljx!_jN-&(3pnY7LPh*`&^)LN-I{OsKACTvpk2)E@aP4b
z*dM&u;tapsI0oH+pOjQ|D5J}da&7}JMfwJV@%1wkl36zhORi)>$-+V?o4gVXHeJLx
z@9TJt<-pa&$3gyD3v}n%L)N)_oX>V;I(C~SUHY#voooFX1sgxxxks?wYmPg{7U)9`
zd-l1#kl>z+3gq6!3g+?tz~!4oa}!vnE4<|{#3mkvteAP6iDxgo^G?O27xMgkweNV2
zaZe`h(x)FDZ2-mbhTzh3gx5cri%DunxsnMn;2Raq`B-0u10M|N>iz(1JA9e<^*9K#
z7MfA3kGHVIWB~H($6@oS_0X4boO_4nRFYf?VpUy9aPMiz=QBBoG=wdCjHu7guUzbY
z1KPAqNJ~XtaDSBoX-z7_4_9@uYq%Vp*J((7Bck}qgV&)cuM2|SvNPh*NJ;)ho?9EB
zMedZ3qH&&|V2(U<adqi3$8sh=r@04pSL8DXV4&#8h}%dLw8-AG3e<eMCzNh8qxH2N
ze7p21kQ`wD&fOWDX=*rR`n|=ztyj1!Q{F(ZaSQ&C9uDpY$8rO$Q&4Z!J#@eMlh=%0
z&nJCv<-Mr}cE272i=`|{YJ@U=Q27_^+=i3e8A`PI(=_}yE(0UKHNvdDrZiHa0n*jB
zVo$z2Ex)Bgg&X`qT-0gjxy2L0cBqp{UhH|T|AtH2Ean6HexuL2<NOh;@z}roEH<y-
z#7pw7u(Q!b(K*cql$VMJ!E+zchn`*Rx$^_Ps&cS-X)9NBWf@;TO9kd+9K-BfS?aqz
zmvjF<mJ5w}i<+8N)S$c->)j{9IF?hZ{62*DJ>Fu<6a|(gNCT^#jMx2r8Ab;Mfc6A^
zQuAmpP9(|f_gMpD3@GF5Fot8C7Urj)gs9;UF@Cf*%Z1DEI{L?Oud^1U4s~+3SDwN)
zr7*7f`zl_4^>1F2Wpl+Jg6-ge9L)AGC;vHGl6@<T$euDwvZMYwl=_Z@`ncbiXJ3W8
zhMCbuwQOuDVP3t~H+=UjBVODs1mS2|o;QDl-PW5yZ=4CCPHObt;V+<Y_9hnP-<DJz
zG$ny4##Hmz0{*^~KJ7FGT*%Ijj#i9akU9d6B>Y9kwcpY9kOp%YuzPmXKfKKQ8z>%i
z06jme!#8tHqJK@Fru^E5p>4@9c0w9fPo2d*xc?X1RSWP>f{>`WSP>bC4()uMjkZVc
za2=avNy6Uixc__woLK)0zZ9F$HP&Ltn|cd^#`c4BO$~0&Jc0HrUrM$rB%@889v>kZ
z0q(annO}4#*2i4no98`}%-^_)F<cr&z9L(^d}<h7@OBi@r_n5<ktAxEn+@?b<`BL6
zIFz{hVR0;Ftf%dw!X5g=<@hYfT5F8af2Tp^ghq7N$`BpAFp?&kJx90Qz2IxZa}OmC
zVEHI5LYfqaaJe_P`&U1B-Z>1@+4EtF{Atj+@;@lrZXxlW%=W5k>HLa)>EPb2!dS%~
zoV3$7_$)e(u^CSAFryL@3lwR}B279~nK7N$zr{@HTu2}CW<9{~&={!zT8tC?E9n+i
zY|x>3#a&Rk&<<v5=#k_wd0O;72#S~|^!%QDR9&wF$CeJG`*&u-&`v{QRCW{g4-3Rh
z&vy{u)r)b?1CYXYz5~0Kg71yLyg=`tg2T+^+_O}Nd<eJ-5l+uhHPjA0RNKM+ojI?v
z<0HB!{gxP2Na4D}A*kG9PAm0ZfHJj1C&wgScS|(f+kO$g&di6!P1V?4cZ829e8hd<
zAV*`>fABe<*#F04rbPT@xv2eNDX6^}PU=_ggq6y-dFu}d)@k}QfH6@2w5n6pwK2G*
zPKyW}=1Lwu?t~?OKEW<|6H<BaDQrD8n)b?Hg9Fd6VS8yjwwu4fkJmp#{sLKax%V2I
zpK5Vo@8oIZlQ$qgc!TvrhNH#1elXEmiz|NRL16eFbZBb?0rMma9_Ht_9W{YABgVw(
zk3;+MhEy=|08U&`fJTphWBVwUW9xp3cTN;x(TMjVzvv#uMr9168IL5Mjl*Dx>rGe{
zmx<;dEU2wF&zqXxMBAX<cv+-Q#5bb2q?cd0zW>hgse2uvDvV`(f87H^nNJwHRD;Uu
zFN9LRo!Bha;>NFB3x66%P)GCQSX7|^I*N?>>30(+W$|#{O@;KhHL#B7zxezR+o5YQ
zhvPJV{1G^T{d>3bOH%DnaQhmRe}4*ZwB?BMl522eK_9j+i9lV+Gl;qNoOu=vc<0Fn
z*bM$P79ATUy5jLajNLjGmhI)B{dhFgmr3}i%y*c!_%(B{mWyP4=3?$)Q%JinNA2&=
z=Jc};gS%t{mmhqRtLavvD+eF&|B4vz@AV0=zxz-m_8re1G#yS9hjsCb{?Vq>(v0ba
zSr+7(N;jJLhJhX9N+cWV5QS5$3t(b^rNO(Q<fJ9*M90E~33pJq+ZiOjn=$FXM!uGD
z=icwvrq|r&U~`Kqqz-@2CkZrBabiAq&nzFjn7gECk%gq$@Cu~lJ_3<NJp{=y2FJLC
zTw6~*FPK_gV58~JO*Yq~CObOtN61t*a}u-N-a%NhU!BUyH{h^wGcl{ZjE`@f2m6QQ
z>HR1jVm9$IKJaIqlPlBM4!jft4rYM1PBgAQT7j-j(o{6DoB2r{!KUskXL;li)`l#F
z19u+ch-zaZ(q~>yO(Wu%#`sr7?C)mZp`K@QB#LEx4tdw2(>x(YIL5%eJ_&rw&>#^Z
zA>5Z&hNM@i7JRlZ;fhw7bMA}ob8+vO<2J$oe{5p;?HMyAla{`M1@o1NAjZmWt$PB7
z)Hg%XmpvlEy9EA9#VYJhP{ink>tTKVO*CMg{U#CfLOLi=@t0MiHP;#Eyki=gWV1ct
zbR~LgNQ#8*Hl}G;UPI}!oj8GI-))w&?|+p#<PPam)hK&NtGxwkH3K+a%K;Y08q!Kp
zKL~zawX?4{&50%)!$`Lhkp3?lQr4fv0#7*->dIK~(HeB<zAW9uIL0N@UxU^&1Lkar
zfyWbi7~6a`@6322_7xi?EA^6K?583KGBu?^><(z;VFY>0vLMTG1btDKgQ;&cX@W*E
z3ho}jstxMQpY#)xVoyn8gQH>1|8Z3&{y_`h9H6%X@%d>((n%6Ap9XRQp-I7cmgO}v
z>%*a_ICPqhyw%9(T#@EZxLJJ>tu^Io*$G2%&dGuh#a5j6#FA!ZEX55?1(>Q@%}@Bs
z9IFRQc*UPB+(^bQemmBf$Ys64!nX>Hu{Vl@n(o1YG<UGFn~(BWu7V(7qQu9S!*!Jw
z#NMY`G}J0j?g-*h{rh(GKEuw4?7l;-4ag`ln=fu{<88vFF*NECewg|VrL6;D)5bJ(
ze^bU^o{-CW?W%myNF&bCawJq-+ywPb1#Gtak`u%qlvF<c1lGHrLB>C3q~YU9*tU<c
zMn=RkZe%i8$@qaqMHz?=_d?+3%V0Bg4JhXrQ6Xa#4VxuPt%fIZ6)bnuoSeXQk6Fn(
zE$`sZ=E#v9-jT438pFrEj~Gibm+MsMLGQ7~G-(`zH(E8L;8B?9{AT8k*<=iEXMcdI
z-FVo-k0kEQXYc9uA2i=-7db3cqo<z6pm^Lok;T0M9C!EyPD=g(MV1+&>OE1sX6RX1
z0@+yLa{=f3ox$(h4`5=v37w&`00Sy+KzP+{FmsV2b6gY9WKRdW%Rc6IEA!CsI|^KF
z+1YU5I(DCm;D<G^|328lviA0Hka70f4{A^ic0MR;KF9SYb%V?35xjVfIsNhX1#Wut
z5T11EQpYMGI9g1Dd15)zS;@}r8u#I$ek$Y{4gg-xzz&H%k<B^|3nK5MkI;nLJl4nH
zhX=uSOa-qrBv0zt_st}_6Iym52u;><WyQ@r-cX~}my$W*i-o+6av1D>>Voa;J@j5t
zip1~I!``FsQ0%@4a^7SB1gyXy<vZXXxf<^M$36$vAF1uJ#l$^Tn6>vlzm`go#x5al
z@~y*PPAcT#<QJII9RZD}CxHAhPsAY?$oY^1JKA4B$)q7*W)@y(RUv8_`xG?KujZvo
z8DC;SCLew14_<CNjehT%AY{vPP}i%$2+o?1yw5s#&r&7c&MD9wsl*EwgUEMS0v?TJ
z=UE;0EI9oHW_791M*R>FKX#OC?vW?E9ZrB{rarZGVGhR`N&F6(bXa`;5%$f0!pFLC
zpl`^!NNM*Z!m-==!S`&x*~Id;UJK9#(s=!sd%#09fDygHP(9-|x8HjYHp<A+WsFO-
zmi4)84(i~cU<=Z~&WcG2e<Yt5XcJ4u)!V}InzzQPP|5CFpezW3hxh8hZ$0C1=YQjB
zOyA*INM-D(@gTN2%rajApgC&;mvk{hR2D9eS^u--hJB6z*R7V+BlrX=TpCU8KTl)a
z&HMbY(g8SMY(!fv7-u8Gm2-KJ#2L-}9~2wPlg!ueV9io*cIUsruXt9)@=oERy5bl3
z;*=7x>0HSLeCvVvjw<kl2BUuAT8L<R%n$lL0Mo4(pnhr=7wORnzVg1%R+7#|&=G8I
zbq-H$P$KT@_Cbi}3RIl)mJg0T51E%{WAYX)QuZhqlYf{KL2McyFCPp(x23rVi$H$f
z6eW`S&n`%hNrEs(mT4A-b1`Rs<LZuQSl#E%ZTs4aefcjqrzJhSo7+if%svcjMu;Ha
zKmh}_7$Zbr2I5#%QK`foU+>W*Ne;oH&Pn>T+N_8#GFmUOVmp^QI-K#DHiGsWMY70R
zgQgrFO~-CO4<+Y6<Btg^aEWph^wq1QpBKA_2*bFDU93-b^ccvz)FL)L&3vJh8M&Ej
zMCfm}Q~k?>%L+Fz8Bq>JgU+HKiS|(E!FbU5Rp7yXcj_(T@Q{)g70CV+xg~tU{!kg}
z_O}k2t)fKZ-Mi5x+6Wwv@z}M1&5&Om=Wc0hlcpTTygl&<UL4UUJ&T1TmU-#6_CJ9i
z`_n=2!;g<p^5wnT)4}I-6Q`-RkCRd`qbB)v=u<O^-w+^8vR7u~ShFk;{#C_Eqes#|
zDy+w&?gj3$tFUIII_(~%haM{$u{@P^Iaa+C%`Z5~7>=Vr-KGY+lClxKx3k`(KK~*6
zA2>KLoD@{Wqt_V^h-){;*-K<d&=R(P8TKH5kY#d4_!yI-d48gVdu-RSOP<zc_JiWa
zw;17o{D1W$sq*PDI9T}w92)OH<82{KcPYWgWuu9I;bcrD?|Ib&%b}?HglMdL9rVU`
zv(B0Y+T?!WhaHN9oT<;4cV!+Nx}!~m=83$cxjt<<T>$<^ZJ}X77G(BLMyI6}d`vmx
zx8#nZK8ISkYJVZWc+F+Za+Tw%eJ}85*Q$}l1zA|$VTR9UnUkRQCn(k|<&LSaoOSso
zmj7MHOH_=heEuUym{tK{i$)NkktyVS?8gY^NpsTJjj`;G>}OO1ULS{e&*zI_)9c5~
zBV|aYmaxu<Y#}rUU6Y*Cl_fq!{rFWO2UfS3k%i8yF?iB5-edH0IA5(#Mkbh%q(>5w
z@W%+Qxl9fFv@*Dxs^3sQC;?6z_2A)ga&(OKHT0NZix=<el7~f!C{+I@YPj(c7bO_d
zv-~ik`bmIE|0YT7$EeV{x*sqnbUCgV@d}E<S4lQ3GNKD63@2I69o()8S-K&|h|HV7
zIze*TxFj(U4lZQ5(X&$2VJqtl=bnP1cX5)zX9B8Q_Z7VY86$WTn~7e?hZDEuNwq(L
z&g@Pc6h=UMjU~&Ap5XJYe*{ta0Id2?o78o-pix^2bcZQG#H|c&?4>FU9MXZv=fA;r
zfe9Wy#y;oyFF@6AJ-)mnB*ID$a5@_XE9VVEf43NTV!-;6W)JypqQTiTByjUso?~kK
zaMD*o`Akn&bPUX5yWuJPU%yD4aySA?7c+PFbOS0JN{856X}ph|0{*vwITM~4U`Y|%
z>u*+|nRiubjrc7<=2Xa8oehR9524E@09~_U;q`43;`uTOWE+=Z^Kl2T|7#^#lTwZ;
z^2OlF*tDnOR7urG#%$Z52E}(Nu6;0^T!|XM%`<iAwh!sJL+=3?e9$89;yc_@wv*R1
zFXnA`QOvGWB~f|BxW;`W^e_8^Q`ODsPHTDM`NI+}u4D62t6Wa`_&dm|H^AInX0>_5
zvLq@o{C`(2q1E$3j&_WqT11olCR!xlTYxc7a>2gIR5Yie9X&GYVeh95$Z!@A`{rIr
zmAV3noiPt<JvM+au?uT&YJktA!<_$)0`!=74!u5A^MebAK>na9U28^hUO^zbGKRCS
zkp=8iH6!J5Js@!Yi?=p1_M)ydjXccq8*8uPl$X(H{3sI>X8nOx5;lkOTmtX<kK=)V
zOo^oRD7q!qf?%5^AG-4}x(DS*isD=N^79w)yS)uEe=%KFX9o#ue?UfA39fv4nNK>O
zFKISXMS->&icdGPo}Uw@t1ZMy>a5eUn}UB#3=R$rv0Tb^yRVFka)+_I-&z_GuMR+$
zW@C^mQlkGnR3~{$i$J3D6srAf@iz0RL{`59!Fd0I5jCU8`_)w_I(Zcq8a+qBW>vd!
zie_Z|g(Fzg*8t;uHOLV~WUSmrNbQefMYkB7mz;;@!Vt-@h*RMHEg9A+YSR3`zj#Qy
z73JS<#y+pjXsfdb{TN%u-qntGV{UW76LEoK%s()n^F|M=AMkxvDPAnA#-inGz-Wah
z$nKV<Es1GR)0B)$7QX<S>H0X$n8%P@mQ5~v&W#Q=AjeBhiPKjB?j{nr;iOG-XQ@HB
zL+by{$@n2ND^he)i8+0q<Ht*{A?wRVZpsBUI=@nx_J5b6c8hdKuxcuQ?|l_|zOjXk
z8HPk)eGmVz9rx(Hie!tlE)|Ac1My97$tJf>XfB%!$D;djM_x7d;Tb-KxpO_Tqo8c{
zCiu~lhEI-<qDg;#iF_Qc!XYOe(k!(VDitEw?~(?!n%vLb(i%mr^xa_6m?mgDEx@sZ
zP2l7<5qAGN52_Bc(4`@gD-8<=-zV%lsMd&ktFL3xx;GNzI|j6Ob`T^Sk)vz;0vIE}
znsq~UsOegQW7vGn-EIhfGp^U9W2|Qs<IZ*e?BKsv9zm1OWtgnZxDYlx#)&>ah|3@}
z?>H}tm{7^bL<(rX!8a(HyavQ`^+gB9>yw1YU%)f&YugP?Y_!*fqIm}J!NVFo;um23
z#t5!|;b=NcrxUt;?(%^ztfBWv6<8F@(+&ICuK43LOv%s1$j%S=<#-Iso~hB`KkC4p
z(54gGjY!GjJ`|ovmwd=JLSLI;&fF>;W33u+#Xd`%tS(KIH)Mi#oDuy{dlqoP3fSuW
z02+=Ig4OSfT<W`Zn7Nnb40fo{wornhmhAJNum=WjR6xPck#rNOfV3T~dv~ajFY1`Z
zw{EMzqdvy8EyEo|P!G)pH+XrO5p>y6Aq0(hh+-eHoxQuh$Y$k5J~ht^-CTd;FBN?<
zqPh%I3@ae~mkVayxXI0#@&&5h45-_VR>+&@fLV2G_%qXVh?kEBx?Isg-F=!=-i}~d
zMI|ScTFp29w-;Pz-oZwyMLSi6luk7#+G<L~YWooXuVV>5TB1uTH}%2%GwNWabremn
z_M`jtbkT9frT+f?8tgF`PV8O6xS|(7Ma_y{oc{LHFyX2hc_H%`_r48B|0^-*vE>!a
z?=xnZ!X`ATV6z`;Ok4e=+4FKHn_G8cOzUwd(q#OrO|{(0@oBu~+EmW3vmV`ZH1LeL
z5XYxGV|eLj=7G1xMuqd>r1F_}YL4ce#w-SlO--PreU)WX-f>Qz6VaMw*OJYK(dMhE
z62*{CPQ2rr#Qe}45IUEL^5?(jEA?Jsbp^{;4GoEgtvG>s1%<2=ZAE&zjYv+672CrM
zaMdHuagpQhK%tW<aZ8MZW}i8n+BtP%ek~4mW-}MUsul>J^#$x@_DiA*PDA&Mc(|Tq
zMp_<R!hFL>_;XI4sDxXP-jTha^S{eDEB6|Hneq+|S3SiK+w7PFEftF9r-^!k^-2AT
z&A9av>wkaU$S;ft02}w6T<-Q-+;hN$7z`gy&Ls)y<~;%`__=@+juP`ebB(zq+f;}+
zm%w!_)}+lUYjGx<n;bDd59C%4+8Y*&ek?tSTPOEm%92ZX)L|5L>OTt~3eTd0S20vf
zU57u9$<jXePR=M}JJvsHK<_09U^P0EchkBLV^=jmlUWLW$Qp?&E#DxXj|VpeBYJm|
z8j;jw;y|ea73f=VHz%l)SMs5lzwH8KnR|jQ^CQeFmLd*!-@;N0wqIMkA9wekLHo7|
z+{z6hc=lgQ>ezDyG7n6|*ky@)>F{xowQCVHTs;eg%+pgh{}0IhzJ^_&bV>D?-<-H+
zlw{7QtB`lS6bwamAhB&k)nPVR^tX{)HQb1_=CLfVBfICVTn2)DBVncfJ1*r(7r5Q&
zg4u}%BskiHW*sz#z%NHJVOJ#zypy@Ozw0sSXPn64$3YZdb>nVNQX#z|tP8Mq4JX$7
z#5I1`r8>4Ob9wzQdM)eW=e@{9r=rO)*@(Tf&mDq*tzXgR+Ac2XRUj-JUInS_`=)!R
z7@|M1%-Z;N(ao|f{CKwtZYYl;d9EK|r|c*q>0@^f`+x0hC*A<@qzR%>!@Ibjdxo(u
zmtpWq=AGDLK>U}TgX+L#oZ{~1ctpPno|hX_?-EUF{||6WHV=V&Wh|F<U==4bFp5~c
zkfFhi7rAEgQlu3ipzc$qLgz*qsycEW#0{PUpLOg`dDI*A(|BGum$C0n!g#lM9s1`X
zn}K%4<47A*GRO881bgT3&Gx0-#hf!3$-X}i1OLH*ZC+^ml({I*4r25DGEr%d9Ltl9
z0C3*K?#<`%qh$`r1)fA>3msxB|A06062Z`G6&l&_93<PUQGYM%2Hc6}O2pM5{&U8z
zPVax%{2+)Mn{*L^=Q4(CQ~{=xR<L(dxum_th(;91L0{=pzWclY)3RHk-}*YZ+};cg
zululK8=E0FFXse77r;k#IDh*W>oR7B@Of7UAZdmUd{}-A6oN0HU~+MR)d3OfTgqa>
z#9){!sz+DLI_9CM;j)bOfvd+aSSH7KP{MNTjy2|NM!w*>i!wN=66TsHP{f2=tZNbV
ziGTgVnAmg-aKTp&qPy7wUU1aJu9@8dcCQKsSGLdj&ip|Vi%r<Dw-K5L3OF^dk#ru*
zPwC`NMB6n+5P35j?2RsQUVG-EU|Wl1;R7q&e=Q%z23pcCfewlIt`0N&hEZ=hDSYOj
zN^Fc$;pYn0xqUy1oug)Oi`hM4-~ERuxjF>-A$1s09M0!deq_v<a;#bU5vDOlci1aw
zIz`ff-ENiK#1tdqtu_+wN2?N}amztgC`IQ8jH!~6IZ3xthEvhQ=|<L3I(4KB$rU*w
zUtA7B+up(o#|!8r!}FUpKB2}J<`I7vjJ6tk(WapWcWZcH->XF+xH!NEII7bO)=6(Z
z)y``c?#EEqtx(81-a5)sIH#@+4NdMs@0&*4?==KjZy0CWAO-8xeuKdpMZ(W7#YG>C
z$v9gDqS$zoGgV{H!8TcNIk6YA6!fUgDhZ#{@*LL>&%=j%`XP0T9(CL*gYiBmarQPH
z;=3e;KUExqnhkGon!yFkTImboj8CY3O@mH0e1$9K9*43m_qbQ{l*u?%ZPM+(3pV_7
z2b1!5!%F%Y(<XGF!=brY*Yq8>8!(sloILdCY+~HP09+t#O8gZ2LF%R%9kt(-+N?<7
z+O!lwbb-wWUwMIW<^*o$0DB+qc1EY-=Ul_n+t7CLH1i^}em8RmKij88#956Zap4|*
z#z=FzT}h49zrMwtoA?QnR%DAlW_|{tPqf4)Y7RFiJPw6l%%P%a4}9H_0Z(FHp_!Eh
zJ(?DReXU`zxypnnzD(e%%L*{qp^!5=W(oVeRq3sX=P_bj68u?Du_0?QwgmUXl3fjG
z`iePTZY@N2wtJet&IW8|PvVR|%7KSE<M0S?a&H#N6GycS(23H8y`oTfWU5Tt^ZUVZ
z@&%Z*xf6n8mx1i&Js>PFVtltc&f#(=>^}Gx3`d=ZP-z*cDIA3IBoXdj_z{$U$H7Ho
zJ#u5H6s-oY@lLZZ@hPkS!LIFk<lfg9NM(GPCA+qwl$!;8@K&DWz3hUGJ57nKoE}R2
z-!tZGI9z$;heG{QPQK3y#axc$-(AvFe~U3qn3x599Ty=)_c^Sc$+~&}okhjzXL#|L
z)7<MY17dRUHw2ArfZCbcz}?~%|9o#M+Ke2_yG1iLY>Nd^8B!&@UN+lxAHz|5w8*VV
zEQ^w~T=Ko*2`KJ40tY86(#Y9`_~<5^CwV5rH|yir8}NqtSr-*J6i9$rvHlpFwZy$@
zgR_b3c{ZG#ANglEw(SZ0R5K?H{Y4<WI2y!LKNN7cWyk|(bK*018v2}B$ghquVY8&E
zwnL_DruyzTdR1D3qEjA!YAi*S5&by0!UBD_C}Y*_M`$yd!)YD&SPt$mHZ}Kv-=A8z
zeo{zm`>%4RJV#S|J4-s?GaGgC#PDRf0;%rx<^=0!aVb)7px)&$+!!(>Z7WStCw>~Z
zS$+mXtw<2ecv@h8GKQB5P@%K_C=!dlTo~4?LyrXwCoVe=ab8*np@?<eHqSIBpWL-c
zG3(`YYh2)7d+8ISDFKWNB!lWpo@09XOANWlcn!v4ZeT|^?0C}v#vn_qG>dp=zh)F(
zmGJfnvV3y-B@8>5fx@llIHA5e1oo<dy?3i5+|>h{(}Y|(i#L}kgfrGP`<#;*7hz-{
zI_f3B*Dt#eH@L&jt;1>asc>G?$&)kmz7AW9DV}B=z@NRQ<g51+FrN|$)9miT^#5gI
zvl`?6I~bAf+sb?}|B92$)}w}lKf!0FE)5;cvftM$xVo#4;M%-cjDNBex4p2WftD&L
zs_B6@Te>jl)_YVv&v;11-?*Zw(|GIZN^H*4hluMd_|!|${O=~ldpX!FG0f?Q;?HK}
z`pRK6`4aOak7xPyMc=Xh>sgSRFp8MWzQUZ#s_6cHE?2f-AJ($|@7?qf#I`D)@5p8@
z=r?D0t!g<^7I}i7Kl&gVDmBB`t>@wC8s@<DWL<=<z3{nMjrx!K2c_BGF}6Yxw>{G&
z$`?MsKVL_Z)Lu0#R}2B=u8nv;(vq62Wxnu`Sgc;7PUBba#9D26y5~ndnkW^*@A03}
z<<(al%2ObM<_u2%Z3b7nydTBg3D~J>O2q>$l4^_5(0QBre`iVX4g0-pez$-NkyoV3
zMZK7nF~C2+!Fr`F!^x31Wl)iI6V#QuV6Vp*(CU{WNy8swS<(W~%n0X%(k~=g`R_T~
zN!Q?Mr8+TVoC|^9O8%bY1j=2l!hfIXkiuv^a>a%DQ^sm@;!8=eiZPhdRy>9ksWs5)
zeSyvCUT}3IE%0eWDC?z<<%1s9LE(&W?0r%I+gF*>W~nYoleRfkVdqlcJ1JaL%Saj#
zw3XX6C?pN<+Te?#9LY45fO`CSXl!5{=Jz#x^9eoP@mdOKUTDSOUw3&EDOVKS8O1$!
z(j{(xSU*-_1qe(Q6*QdDq&FU)z_eYx@HLG`?<)pyHA0?l(bOVM4|kybzA}lQtv0Q+
zXu!Vo4Qyv{5A)0_z{g`bxE?a1_3LkN%D3;rZ)bVpvNoBEnb-i<|5(y!w%4;b>5aGU
zmSB>X6fYPTX~!v7qt6#vKISKa&6_HIntUM~2uj3p4$*jk(NXM|tBD%kT|@7v4D^)N
zgd^#rs6|{0WSo&A-I7%9nXU<~=ry9BIB8<<WG|{tdWtR!)p^01P|5upMY#WX0n4Lr
z#9I$#$OY1bbX_dUhg(q1Z2_G3^24A$s|J)<7CODx9L^pQ68pbeFwChJzmH<?uEh$t
zB$?fFZczRz>(JU8mZN)wh*xUVq)XWuB1i8H4Bj1pkId6$tki-TC!S;9f%TwwGMMel
zYq`?KWZb}bpyQcarQ9VOtR#l?MwA}m6U}G<W8JxVw1D$+c^Z($IMf>6AoeK~r6mlZ
z+nRisb!iwO8~ZTCFc41uDMJ6<U$Oet4E}RLB<khrl9r{{u&8=aqFnl$@!<ZVmF8L)
z)_MmLPY!2q?ol+eNtH&dFXX$oPsNW%_hImrjj&+34yjshNMg4r;<L14SQ<#!PB~UI
zma#Zq8=t`M@B!>ctr2y*UE%B<MnUL7HCi5d2aGDOz!HULP;|(Ji`W|p@?G+@?Q;@8
z?ZIh~9c>R!GE7KbUIAFFD2F2>>cDZLB6PcTa{ndBl62K05Iv^}*Y6UNBol3}wsSai
zO;RV_%U^S0;g+;Nv>Pl9lu5JSQLgCR6G^qe4`OHsTwgSr%Knyz{GHyYx%e}8+*O`B
zz%5u)?+)kkgcQw9(B1hFZ)D8)Rsz=DiZA7DUd(|3DJ_i7j|H;IlB6%{Mth+*_cJ*F
zuYWs+o|6uM=fp8^__zVxl-Y?nfuCVX(Hp>}8d%yWhMi$7t7P#A+ArP2ywk0WMPx6E
z(JTh@er34kF%bfOe}#%<ZP3qJhhB`UmA>#I76lu?rxPQ|e0N1UX;&IP%PmB&L3M<<
zC)iun2||GmuPajxnQl>_cIOiI%Duw?&A%|1GzDE+lK9!?otXSxj?^`_Vyy!Q^AlIH
zTud3P+)<0at5xWTZe`k(?T<m`0-8NRmmIMhhXcFMVESc!^t@pSE+@yq7<tB={lRi2
z>b2-X<$2r2<6xvJ52H2@Bf;Ds^fsG-zMb7@b<zr2m44&=eK+9XE)CM;9S29XHR!$U
zS1@PCW3V$bAmi00!_!(*x)h}8f?dz>8W70q%7=Q(>uB#S;hqI)k-N;HoA&N3bgRU3
z+4b7Q_IVipFuD<%)~#oIfj&N<X9;R<KF=BNE5W_DRFK{}jtBEbkgaz<qjTAO6hGZA
zv7B#4yu>}YY~xMrz1jpl@0rJ{YAfIWs2yB7Mqw?#4mR-vpvVH?nb~0&k`Ms@_NdW$
zTTO`cc(y;jH6CubN>k_5d#nfb3FU7Uz`#UJG>(tNE9E^P_dgYqjcR27A=X{rz5-I$
zzvV@4k09(^2THuB;&opcQgnN!1n;bdB{^eRzgU)Yd4HKJkIIFApUM$I{Sn*8UFsw<
z{0q2moCswFeo!)8mR7A7QWuuv+x%FOjJwx^1ri>M_S;KBB6}E*NrFfGQsJXLb3xDl
zz%uSGcTESjqM^)H2++QQ1~u#+Ki87q`(-Ei9KOl>Bus!g7K3Qtc@@qJhLMg*Nf>CE
z0P)%SR9LVWik!dk+UhA-I&LHu?Th4mp9b&(V<GU>jM+2g5=OpRg^j*JFj%fZBNlb>
z@>mP@$w{KIuJxexArMWyo-#gIGp3Hb$Tv!KQLRmfvFOHg+xL{=zV$um8T=Bh`ZaLV
zI!kJmB?UIihjC(sF9iaXnf%?^jKSJ=8!v=QlT+JA6UW0WOUN>POJsJS+--f5nf?{|
z^_JwUO)<t*$HD6HhNNh-klWhv1T5<esjL?JoK#Br9Xr^}a%QT?MPD8g-IZx{lRV~5
zE5}Xd@1f275pS<34Nit%IQevK8tL~JeOpg*>W5x}hNB))94}3y(&cGPbQ9)VtV4I=
zkB_>9RMPzpBHUK-x-W(x?1UC=(EN^?r^3K~p&|Wxd<1o0m&f&lYZI4G9i01bO}+x2
zvVPhHxKCL(c-LNj;t)GCRqjByzh$`FuN<8F=i@+?99mC5gryaQC@eGun?1KVFKut!
zAQ?tq3RTEep#srweZsfzjsq9_em<!37j)Q2(EW)F{xlm+O!Ad!)*&NaVjTc?pPP}Q
z0x>UpFa_I#PJ*-`9ev)cgwIp;NJz$exVNzu)Ym%T>-<l6dg){Ax4ehWm%1S@u?k!d
z6yjxDEAolcB1w~~CFbq`if@-ghphprZ2bdHX)I$Dkp%BEhZFJXJ-koXPj0XOMVQ`w
z10O#$AWq3*IOi~&E_0d$;;L3mwJqZ2D(R7;z+5hq<@`IjnXp`kd79b_`Am}yC^)@V
z(ovvFW-*p_1SE4-?=0y@y-IAW>*Bq{l$+(RK=V{;@qZF_KipQvJ$7b2UE@$RP>aN`
zZLR2f?j=-b&gC*=&!c4fcWCyX!TpHRqkA+gNY-8h+B`mmZ_T;Gc$x3PsZh*&DP?g=
z<1|QV<58%Y$~vlv?-_gIH70#L$VEC7z-6&AwG}A9mCFt2zQ>hUR7~X@QzwG7Lob+1
z>A|U=qsb}jF4Va+0`EKPQf=lTytkwr8}}N4{Ob>(!aRj_3Kt>!ryOwsElwMhNU+Hx
zUUv0Na7;I&%>#ZIwc<K{{g2&0Hm?y4?tTK#KVC<dbL<TK>;d+be?c_-A0&-l5A!Au
zu(M7SXLRmg^t0`O#<|H*FTT#Hw_F3`!Y;I*GhNa=!;Zflt4A_hXQHy|0nBkZi2{Ea
ziJZ$@blTg&$-Ie0o1<I#@K|{|c$RfPeibtJ<W){zb1TcN>%g2DEQ7wikMmMA0nJT9
zXk0Q8ylf0<F0YHPZ>y6cs)#AqyHLXVmwETwLFLvn{By;ER6L#vCG01bqIMX^G<M;&
zA3m_Ga6FnH90w<YjL6o^KIr?@#+gsq0KdzOi08X@9Co4^OH@1HmPHCq9j`)9cghkM
z4Gpd$Y&ezLAB(yn8r0{@B+li;3jQ;5&aSW1Cr@wWqQ8<W7M*buW%2WQ@#%d$jGqoZ
z6YugaQH;?&E)lD*e?y1vX&7OwL3Q=Ju_9R&1l42jx_<M;s#Z;!H7TC|>JtE~vSdj(
z?S!J#RPJn<0=>A$lJqw+M|pNHta-ALIV8WMSjIx~ZDRnopH`r|qC(Mjyd{2D9zutW
zSHWZ2Z^q^b!f0z_@bn)8Mk-Qh^C}K|W$(Z=w^tx@PK%_)-GQP%(?v-?`p|lUD{3SN
zNUWO_>UbLCk$H8n=*0j|o5i^B$09*`Q9kRQCE-ivok=R)2KrwQpd{!6G|!t2mX+6;
zKP?jCvYBJ6;Ui`@u-x|<FNoyd;FXu`&rS5g=E;Vfrs@yg*V-2TX;&nI<wbTz4dXD+
zkmWT07swxP>Bn)rEOjk;j!Q~r;s4|4Ov9=C-Zs3Gnanepr&NB;v7dXDW@(Z%Pnx7z
z^DL=k&XlB*5Ryt#v7dXHqJ)rygd~J0N)n}a{ogMgM_;$SpJ%Q6zOM5;bbks)ZeIk=
z)2dN=%xZXIACIMpRj|$62OHxW(B9`ZL~nfx0dlu6o#jk_zOKS@smU<qS0ZMNv1P1~
zt9;?lKfHa8FQ|_2LPsr*t9VofHD&%>$^UkwpNH_)&`46mGH;1&4{0ZQ$jb+Oz)7oO
z*`4_l_(~2ZFMmprdT||+ds&*a?QP%}{dWo1?lL5QPVU83J0xlTD<!&O$4|VIqDQsV
zQh5vJBa3k^<c}?T$`>}W`CrXlu6wf$R)o~y%&NB#|51v0xPZ4y78kj%|BrLH<ptXF
zbjUmAh>8|}0-!nyzO&4I@L^AEXB?oI@KRwD%j_3^OyVn6wSxl7>>KFHQ?)`FYI=#~
zbnYSgd<?`%Pv@Wq+l{#Y31nhl4eB@a8OBaAqF+`jQHf8wL{!s^E#}%p!K?umA83Z#
zmu2Z!FLnm7zJ#9Qm6?WZtz6sVGT!HM4Mt9Wiv?p<=oF6+p!7W+Pn<5p;3$e>>+{eq
zDFySICu2>(dv1bvDj5A@ZeYD<oUW85Ip6pIvwYLoOz$W6VT1`;{z;z1+<Pp_5o65M
zu@A6xasp}<4&hA4Q+RQ`2{FvN23nie!X2klj6c2ycFfnHeXgBQr#uqIPYYwrB<3RB
zrA_U$-*RhY#b}#mAQ$UjiihSOgUFUT&P93#Tnf-20gVIL)b$-{S0q~BiicxctGG#n
z0_guh@O+;MCRw-Q>v=(FHR1)7i|+6~FC?MFCl*^2i||<D8t7`%AcofmMV`*7TruM+
zq^|5miOITjar1kaH`Sabha|xvX$9a*tI<>aQRb;f^Fim=2-?^A9eq51^M<<^GjDqg
zWLypA{^*EN;PTM(lL|cXHlPEJb~sOkc}ILC=^`oS76>@ZC6rzS?`5AM=4>mk;Qj#;
zgv>V-c^7<I4@1Qywlmyn0$-*X5~)gc;$pUdi$1MRv$yMzb>4BXb+jwYR49eT%l<;f
z(^}5;L>M?2z5~U=`*>@vF&UZ_1~q3Uf&R({_<5DFD+IS;g={l)9Qlc&o4>HUunaR8
zpI%x)9d9t6y;^b=W}MCBB3X~u=gTq7JF^SV^PQ-*SPNQC$`R#V%EWu|OW5tQ5re0U
z;%2sYK;P^+xLfW&tm*p2`R^Y_++uGq=V?6WPB&o1l{`GZ)fik<@9?VIbco!n5v(Vr
zz}TcdV7V=j_ey$$h710POkYfZQxyx?%rZil)!dEFP6?PcdK?D)5@1G>8N~dn6;`S#
zpky=S4lrh7t7SX%>THGEH`Hm;wQHPzwLebFdI~ZY0^&TRLp?jfgjID8czOK|)*Vh}
zcVb`s`OSo;teyp@4k%KAtr=K1HR9cnK`j4i%<Y<@NjBV*qFGfJ(J{AzYYvWtN%I%N
z*u!RIx9U<@8sCVaXA7ZWN*oOEu9#l?3Ey1QBA#7S`QGAk{GDS$W{&89yy;DFaOp_e
zeSw{SJOON!=itngH8@SKh3!`A@hkH>3BB53rp6#_*ri1qC3)`H(P&ODF&{rCuy?Lo
zJ&tYHA&Z*_V7-(YF;vSF6|Z1-PFsCEwYm(+FJl_3Zh^NQWyxH(4-jx@3<mF>2R`Rj
zsoUguc+xO{cb^zDF6nvUqCo|ktMClxkJh2S_R?e^{XMi<zu{L1zG2QnBRHdLLgDiP
za6eLkbFRL^6F%P|A&!0bSe_w4rx(iobs@)hg)l&3IMo^b9A~z(EK$N8IAb`Rq+Y!N
z50&CDxJ;EhU#f;x58J^ZvKao1K7<)QXF2yGeW;bcfu0gn)F3d%xm`tIsCHiH+K~fq
zTog&w$#-D)A%k-^VIAU+p5P((3Ys-W;()0e?y}J#=0{mp$1sTZ>Zpd|Mst$5m-YK}
zJ#f)?F;YHkH^kSnZu*ljeqqTN?6tT7Q8u^X9Xo@ZP8Or35sala%Yy#LI0l9f*MlI&
zB(uitDUM?7aLMm4U`^|H?5Ouc&s0um7^=!!m<HmAetohxISQQj2*BP`7VOs@L7)4(
zK=5Nfs=eQarPH2*%k}fZ>OCst$l*cky?+c0$Az&2tP=*Y%wYbqAkeJ2jq#DWIET$T
zZB{(sx)&F4LoK7w{~9|_{OiW5v~-BsbzKy_G8p21zrf>@KB35I7)?5#i#f9kM8|#v
zaNFK#P<NxhxOwXxoODJS;+vT}Z^0i_^IikiC5Uon5+vwsJ7dER3LO&IvxQXRqtRx>
z)6|zYV(%=a6B^vu5OXRh`e(a8OqG6g2}8@%|G2F6{uo}r5Q^|W>}1@@WxdL@JIagy
zF7k#X_x<2A^oiS2@c=inIZqD%TqqE%<{Eq}z%kkY)(%8tlyxu;Kdn#1Z%aY5?`b$U
zJ{=>U#&U+fe!`eDjHjc>@@<L3=siajs@h~q?HRYO#4ZxD6uZ$yZ4bBb`Al#QnE?_b
zjmX@;`D_OkhkueLfnoP^Zh@zO>}*Yg=J#`<+}(+5aG#BNwdpX&v<zFKwCFXK4U1&l
ztM>0Q)Yz0|6$;ivZE_EUm9lr0QYimtrxV<Fy$F#rTKTnO*>Arkg0FreMvf*clh11m
z$x}A>GetYd4{gRXr?jX`g*Mm!uLDbZ7UR3E3PkqQN)%WO7b<ubLz{Fm@3VD1dgQa;
zPOmiebj%Y8RE`K|G{<1-g>}5!2ga<{ngf*|v|tss@Y37Pflz|^f5OdBAREJXlq^Jp
zMCPG96$UA5yRdI$9y~iajO2MYLFzXRE;7H2f7Bx&laeDrvX0G3T)ScOv-^OiGvKcE
zZxn3aEJ|H|1*77s*nU8n)U_HwVO$`5zp@J>gJ1ATi=%m5I|HtsF{dgQZ{jIkcK>NE
z!RWm!@$@IQ+g|Y)bDkaM^WOBLAm)K^%?~45B>V_phLX{Hggkv^ra;^$hI3Z4TOp?S
zm1xb&_h9UF0X06n!p4?VVdMl=AUd~kw}KMw2u#HrL9eiz&ElI^NV4qFDqdhdfsfhB
zII?qYL$}&4u-o4%vRS$tzRJ`>-Jx~pZcxKH?TiMi`G1*j_7z+-XI<nr%G-@vFVbJy
z4(?lB`N-d)+@!y5u-dp0Ys>mTY|8)?b+mz{hB1vPn98|NSi|>$9ok>N1)i4cVE6ic
zEI&D%rkhI8X(9J;lJ6?U;B*A5t|S<qWJnvnJ;FuRiX@PokE-7PfV~CT>@&U2@|=f2
zU!w{P{Tqa0VJ(n7QiFtzHlf@9$dK5EH?UGtiq1GOj1)gtCyhFru!7wIRduvUc=l#!
zQW-?0?(ul<vKoD%Do(^janSuXk9YGLPIuL7kZI@M;2?d0wSI>&_nR&Wm$#tP=2WBl
zF_s0DRKqDn^HJmTBXCi#6bjisK6m$UqI3BxCgm}Ipk16WzCjaJB!0uP-KymNzFc^|
zAr0KNxWVPAW~8%y7##XO6a2c~gS>|(6eqpJ)2xdblMydGxz~u~94X;4W^BQtbJ?)7
z`Vq`H!CV>6Rv53-irw=xI4j-=CI9-u?0PROR5$=R_SHha9WUWzkqId*+67Np2JL`0
z+e7{e=0j6i#<rzF)X>!nXY*QcCd<Z1A9rOAg&_XrS}l?iZUEa(7?2$EHNsHGQTQj_
z435X1XHFO)?-HoZMH3VHuuMR$mL%eVV~?P+N(lwhkx%#fhXMaRmSsHT$frqa<OIJ1
zJIxdzLOlUXMC>eNe-CmtE)i}>^G9*PLNJ`O3EJl<ktjAx-E>is)aWjP4W$yalzC%&
z#gwUioEZ4d5huMr&OobE4W6=@0%Ka3gS3rhQAQu<+$EVC?hm{FEPW+>FknC*5_u|Y
zuV5Z0bH29hHG0kp6qdtrt|6N-ccrhQ;x=<~M#&eW9*suZZ~D}6`#<hctqduz>foy;
zw8Nn$Ioi2Olb$p#!`OSpj8AzJoX1Ybv3+d!EanAQQ|0N+=sTE*-Yj!*fe(8ffRf|&
z>GnZmqJ>>NzWadH>zJ>`_8GXuU*^MZ$`bpD!)Qo;7P_2|;O#6|3ug&Ti1>}GENfV8
z^Je-;TCP&h#{``fzF;}q*Kf^8|Ar@Av`#d-&}`29S^!8ze!@W23Y?}M1{vYnv`Nne
z%R@Ku5=Js)QEooYV)G^S+u~IFI=j1>o6(^iY+vmDDAQ$4m*{?PD@d2M@IiCFfJbUQ
z-n<-x1%(RKQ*=GEe8)`ACDVy_QJuw${JX&R&oI^v8bM84W@F(!O}H&{6-CPI`8D+>
zx^^(HjGhK@nWI3>$7Zq|b}1Jes*8q!MWU=z?_sg^U$DO84b`az#L?(DR9SaH4SmZk
z>Ng;NG)GYUD@o6uEyJNA5!5%ap7O;=P9!{w>a}<A(t|%3;W~^S5j3E9&nXa)V4+}V
zu5ivb<_w%Q7XMV6qx~@t*!?OD5=bwG`u~RQ(UQbwwInvZwZPh@pBOatA?DaeLhDiw
z40Y%RmtS!rjd$_j_RS0}lh1OO&7b4k{BjtnX-X0Y^+?E9Es}ceHs`(f4Tv9-Lamaq
z(7j_K7n_s_Z3~@o(SjIEj^2ZoDIKW5aw(CZ0dus(h#>|EJztIGJ#CMI_ZQahC>y}6
zH_zaNlMYoGdJE~dQ(*VWc9y}gu}uv!hOZhUsiDw-YrAwA2J)j&YsGI)(41=f-$?_y
z@hD^GC&=Il|GBuuS&=ariedAPSCBE$kk$p5&_&*AM6^BwlzxlxxhYEY(99(;)wmVs
zt4fg{tlMK1!FZG=uQGEQYq)VARq53;=dfYUKd?SZ;cM<P41S)%c^**`j$Ow5tDmP~
z?2|s|?|I1YJ-7)4*N<{>yVYrV!#^(aOEVX;Uz=?9t;EV24G3fV^x$iZe>AlMX=X8|
zO0MPB<c7d^l|&GP1Z6HuVP3XaZ|qKY;mSdp_q?9MC-*TWLar4Eyx;L#)=D!c>pNI8
zwh;OcWblT6cJUH(6VWqQ88>Q~l0jD;VmDq%*gD3D#)_vyzt<A5zV`+#Gd}XE6I%E+
zEwa?Or40<f{1ElWDC6ByInp+8lixk&0J|4hqM_;sZrMC(I(KXVT<O!H(OWxVWVtCZ
zsAN0JRMwkFm!jWR_@UMLn|S4&0eQCWC)!D_<u`5}P6TT{iPRP)F}}4n5$9(^^tLKo
z7A8jA3Muci^AQhgW`OC8I<#UZ2gmdtE|Kw-OxfR7LHiaQxhO@;XXx^UrT%>1zA<>J
zmi2R<_=>WYFc<I1K2+Kih2o5p%TH6J^<)I8IKxBJI(Fx)N`?GcYcO^1d8lx%$Mq+E
zq4%wP*jO(?%H2-$tELum<D3WCyw9A7*uJ6GIF>P^IQ;$gBI@Kuf;e-or%j%Lvulm$
z_BjE}S#t&l1G2F$oN+y!y<okoGAXGxrk!?c;ct&T*>>|cy6A^MRpcG!?lYtw8EkI1
zpbVDJ5GULGb?GasbcBD*SF<t^tYSo<apeshS&##IhsLsAr!?8RK@7$n@8>o0eu3db
zFHyz38&Khv$T+-yAm}f!sT0W3l7-CE_?`7xUW|bJL<Raqq)2Y-X%eqZKLPeMGuLn%
zFSwe^O9pPhcy@-#x$DAdNQFUbVGQ^k7o&%4{$ZBr9f;kMq%v)(D6YY}?nwtgiSdcz
zM(9(Yu>qWo=|^<C!5lgoX3*iu=2JzgBKzN;xrt5%I9L1^+KtS^`1kXm8(sN^yH?oe
zn+jTs&+~OX<|q(U+a6R?qemH!UuoVqZpG7QFmAOmJvBuM#+|vr2dz+~jiUWve&IGm
zM$~bI=5}D|rOakSkNMfT<55HQGFJ4k?7-feqV>{s_<bqchg`eGM+Ij=j_{Z$-76A%
z#SL({k_j=t_Zp6L{eVvnDnzTPmmBt&v8A^siMlNnX~DfAbl<ZTbuEUI%;OJOpHv^3
z^xxw@*267+q()acTETt0a}fNp4_yw=h17CGKF7nAd)LC|h&{R_mA66pr}AL<c{F3%
zwc(c0YzDyG==(Ms5qxR_T+eV~cj*-;HeQ>mZhC?4s>vL`(u6k0O=3JtbrfrS3@<dz
z=-k6M5T$jf^wmgiXy;56Os(cT!e^kanL61w{|$ET{STUI??B5Q9rE(E6uny5j^#TP
zVfKM`G#7sh{_|z%_xPVMu2zZvJns!2@P7v<3<tqRs7mAZsgV7Q`4)Mj97pa}COuCz
zNy3sBkW;M4i{<Ij+#EF$+1$XpW_3g1`K$c2FBhOOM;*TpXEV63>%k|(4Yr)tCXbm{
zr@L$yB#vQcp`~Kj-Fpt#8-!wTOd>3Hdc+*Q(tOQ}>)4$+9nPFmp_5`N(Myr-q$6d(
zBlIOmhwDJijXJKj?iY4GbbwIdP4M-QBeO3_GH3C3(9_eT<vDX8UfCSAh64DByH!Y?
zWdg2R`<#ynya&9tGTFJJ9rKQ|?&}O|(Ni690t<AgMq>ju%N_>lQBly*?gd2~THs8!
z6j|lw0iSl86T@e6T>pwYT-D9DAbR=(V#L3p)B5+YC{Bq+`JV%Kwaa`!{3SRTsY_Pt
zKSV`G0g;S;1|erH$n5df(3TL-O{px0##lYBuE30{ttAk+{ToJxhw=g4$KZfdHY8nK
z4O<iC>56byoS=RRg9k1^@Riq`om0I?-=Pk6GnYtXFcn>Qwnhh&V02Y20?BnH%!NuI
z<5LIccxVRaF1wV^OP+$l-UFyp_7Y=d7NO_VT-(Vn{Lq;>)s)C={#Kd=NuJk&hQr74
z!CFI{-Hdf2#f|LTxRLcGKgPj3d4IGvm7`7kYFxLYj&-tD;KFTsnDSu?#QV0fZtqS0
zSDiR1tW0FR-kaDF%3SPc#8BhhL&%W#<s`3nV%y_+I5SYh=1&5A9-D@SvrV|2cWSU`
zi8!5dqZYbqRHzv>C*h&$IOTXN_FI1BUJl9;t8F!~XQnbaHgy*C*xW`9rC2tz9L-I(
z98UU2oB-#o1}Ob{kWZ6tgtsYa=$_uorAQ@VxSck7ELx327u#X4ni<U<txgs`X~D83
zENk^;I%;*^<2}hiZm(?<=%k83>yautdPG3ZuL>@$nsJLD9Kq=@*bLs|?CXqZW05(^
zoj~kjd*9i|mxJh+HdV8j3NlTqgd1!?!NCg7aoRm@Xof7!QC^0_A1aYKpV+RZJ(o|P
z$)R1sDZaNif^}hcLdW6?$mvuRb(r1*>0hyYsdNp5J~GB}*JWYtO9P^%>IO*-kNFkB
znP9xR5#kqL2YfOHuAH<WF>@KyV{S24-yBIgCO(6hg6I60D_S&}-FFXe3&S;gMG&H@
z$NW6&Ilp@!ApF%O2+#I}15cmhoo*fCX8xQPpT8XQWp%0U3O3tWG7GDVbVx<AGkQNM
zfZqj@)I8M}65leW!Pa_+x01&GRVpCgCjj?{)x2{2CG5HU3k$bj2XAcweLa+oKMNG7
zSj<CM6<fsROcECgDs^qnk39w9vsq?#-%VVT`5vbE>Qcv@HyEe(2S4(NBvHxF28Sc>
zVboMpGTJ#1cd!oQqH0}ga#WrCIIKauf+;+^C{8`5vS+}fR8Bru0ypbKL*q~g->9$_
z4Ug?b_tBobAm|tG`8r$pK|PBxle}=}?o7B}PB75YALswqg>P7|@_Sb)I7V2)D-RVC
z^`i_HjqyjrZhbtt>;-zdod^489Xwg4O?NIRf>Z|?{{Dz|C~s-t>|1>Krre+K_nJPD
zdM-uAkFX@3!F?h}iKkqqcp{!s@5G*g$7o}!O$F&OnRT-m<7~_uZtKP%@DU8B6RMNY
z=JjFh|IB>sN2~C;s~5f$y}-Q>VzIyBET^;8lp3G>glZ1w;9$Hs@pOMIv~yS@8eKbx
z7Zc=Z<u?~RDJGzky!M00>l?mbqD`JSQHYF;<{s&fB*zk7aRq2WHrEe;%h}`n8p$Hq
zKUtPKx&!uYsYjK`2hbt271{(}c+VUUoH|#J3dS$sle>R|W6wnBVtgIpLvO}Ok%NC0
z>}>2QNnPqcaW5p<JpY>&b{;Ipx$`?=u2eCs(P)P?*9su(!V?IXnT;|ljj4gIABKup
zpUGJoY~sYQDw5@4S<Y!(Q#~L3^Bly;whR3t_hDz$Qkc1-7A9_&B02x0gywqlale{2
z{f086Q2Hf*)b1-P{fXtCTU5cmrSi0};~}Ow>C%Jmeqohs9e9pd$vYOiaJSgLGKSyI
z=jHu?=)PcB+;|dw3eNLs>pS4ct}fX5jqO09mT<4B5pj~M!*%CF(eU6+(S1=C{F$x-
z`3FN7gH(oGXRcrsmgN(uN5IN>38GNK7$Zskwoocg4QF5F;#=ft<{m97*=j;7lyqr+
zv^<TqIgYNM9>B_6H7ZP8hi>vyVQ79AmMp#t_SWL~{*VHRUYZRRafR3zk}S0A*u*;?
z(1Qbv@i%GQIr#OMby6Rl2G8A%7(7*hdzWQF9OoO*HJ)J*J)SXiH#~#9|140U;4?bN
zg=5lIp4)uJ5C3Lm;lh3`a46#;*`p8|Cus4GH|FzWkIIq#f_gl3XCk-<yx{CX2|BP3
za?&Q@;6XVOcu<0bsIktM|1T~&>La*EOb6$<X#AxtPpqdl!Q>1LB8YmxHJ%)aBRZs5
zwlx~nPw3O%$0SI5Nd(G=sX|fg6KJh1260<MD0!d@<IFQSeSPLSo@S2YUTg6#yRHdS
zW<F>9MQ1S2Zi1MsX5n=GaP(aFfq8}e;OoRRh)Mk`5;PRq6uMH*ZSDvRkH3f-2M=JH
z@_X=l$ynG5dYE-UhsyL<;-t4GI97)BLciv~L2+FwY(!XZqeQnY??4NG9%o*PhuxZy
zcq(!UEPBTrXi5^i-=ep$f0!Q8+wlR{vK*6q_c>hsKUdaaJ$h{G9O(ajmrqyjL(gy_
zH-&S=#wayf*qq0kPMi-_fgB!WPCljM$$Y_fL+bPA58qbw6oofeVT937@G+VS1r3i;
zdgn~A=`ZCcPctBSZ^|I1DNeZY3Nu2n9$|eZn-Sagz|*ZJWc(TCwmZ`dIDIeL|J(<S
z$#q=0#ah1ioeVzfG9opdA&9GQfy35Rh^)KL`;TQgflZmX_*5Hwo$w4C*WU!~)4FuS
zuO8Iyd5Ic6Kj8yAo7*4OMW6EtU{|0;*Ug@ZN@H)Zy=OBX+L(jSL&{;}4|C%FWfm7J
z^8|tvV!+TTkstr`FNQzff<`-}sbE<cw5)oF9w9R@ub~uzcW*}-57yzVQD-hJZ&b^*
z$C&|H@XXm4yN%Rf_8tP(B#`;-jxtyCuuS8E1Y9sif<y<EK-+9PF#i1!N*m+Be7P(w
z-<SyTSu)gdiVE1PY=@8P%4E#L*C^2N7rM+!<Fa3|UF9)(C{37${gKzeQ0bviyl5Dm
zct(@-r6|yTLvausO0${aCm>0=CHw*AE>B<Fib=V5A!;P^H>-MqUN*a1eO06$EWdbi
zv=V8uQ>Pkn8!=#02xP!5-mvE@KNzP$V{>0X_ipBD{;NPfv0O-bi8mj!@E}ar&}O;9
z+gNB3%`aANgky_`Aqg5mS7{yvo7`NkP1^;II=#Tmn^NSzk9C;(yPWgbx*x~=_2L8-
z?`&KZ%_s|<!wHcxwKS{ZPUjhsV4I1&cjf?od&T^kPrr-0&Rxb4T5=>kx*B85)`J~L
z3fozyPfJA~$NjM7+^4UG=+zx?Q{R%lGq`|RMwPIpow81sH%`#{0$JBX+5hw5-%f4v
z<E;YSkoXvVkL!^J`xdB6bOb50bZoXrgw75<dT7Ny7^bX7y072j*L}AJr}zKBYT;vC
z_tX@kW@yqY^R&sewP$gn_eT_eH;DS~k8r?c5$+9OGjBarDhdyUQ=K9-97^S5>+7K{
zF@krTDGgTH%x~m*5!=QEp;pQ#jAr~2W48uy$##OVZ=&#Iw>B*v;fLl8jA7_ApO-w&
zvSxXMFeQBjPW@#>4sTZ=d!oECH(8%3?m%=A%z%EqL*O=Z0DX*np>_K{oED-_qvd~M
z=C0q&edEE+-QNPn*>@NtRT3``L}Jj%cu0(7emCZyJ~SLy?pq3?{(gY!u_h#m`Bo24
z_QOLF_hHiEFnk?nO1zdlhL77@FhW=durUCH=L<pq!Z*ezNC4A`nrLlv2L)HpW*YsI
zp`W|d$@^9#vim^|9=j3-eT=u@!k#nt7Z<{NWlgd&?lboF<w5G$NN^dm5WNGF@LQce
z^=5%{$Is9C>-{G1tzVv0ok;`hxqs1}&G5!4hVnX5xsbz4@Ozf1(X^4Dq5n@3w?{#l
zHg>h5PC+GDcC&j`?jioKf)Bc#Dq;Pi%eLBonD>0(5v2U^fwQVgB-mvZc>3NG)^LNI
zg7zbDd_EiR>%0SDp*yCXjDp7AZvOkeepI<z0p-~SaIuae9Amtb(f7GyT^2aMCL9a>
z%|I&eA<j>If+H0T$=wb`()MybJUQ(To(*?|$|Zi7q&2{I>&x>l-ddP+XFfRdr{Hqi
z2;#nc94~k|U|Tb77L@mj_*EOkAg});b4H)X_*K0aY%+tJJN!C)QJ13%)0rEl$DdC-
zQUn!-pK-RmEgbGiLr>is{F4H97GC<9@9H!r7g)c@czPtnWPZjqJDOn8%xn1A&Xfu+
z+Y4iUzT}QQaD+F*r0JH?zSy|kl&`$Y`2TgKoJ+|({(Sj%bPiYs!$<0q!&AjcO@1^N
zvt%P^*<|yte`%4SzOUePQJ#Kg`!q+6<+?6k2K)Fp&O?rI3!{2iM*KZW&lZBD<Z=|`
z_XrRCcnYbCA5h@SoU_K+Lb0b`(21QV_7@jp+w@yF!-&m5?d7O<Sr=MJhoGU1Bz+YY
zizHNnl)RFG_|TCw>rWD>^)CnRqc>XK?&JQ{EW?*yCFvin$0%G=4SVOX*~z(K)WND7
zwlBDeUzU7E!GCKpc(^GVK3>EtU9;y~RbRuZ$P3^z`U;oxg}KOw-G|$Z4f>yx1d$Iq
z2r&u-#;w!C1sC<`1C~8`eN3HLzSqUADatg@<|5P|)}}||rlG@(Vo*}^=UwFP2wOcf
zF{iPPyAhOym5LYOyKE-7N5;Yt^$gUR@)gGCX_6JWJut>z9J)2;@Xa&w@W{3|P&4yB
zUsxT^XXsDHwi_Gayg~pZziB{+Ik6B>d=#cNl!0R4Srksa3|o7LW1GfRe(%f>uw1$w
zQu|hMmM6EsUb+;W<yJC3*CsCddLE8y6(<H&swC$m<MynS=6>x`B4^GeqxA0sp!!gg
zzL;xCv)hzN>Lp9wCsmx5Y%m7%>L%2m&<x$qX}tCsC1U6#!RtI;i+V|mG19JyKk(l>
zC>tY3Cj5Q_8eao3HvSG8bqeUwU@cPQybaTzZ$QudlQ`+;9t>#Fp@qs`@L!xbp#>CU
zFMS7%tFE~I)NQQuSq7$wHFzN4J4`wjj0b-*uHbe*oHa~~n2r^IV0(bjxyq3}lO?Fq
zzU|!Td2*z{Uz@&T-DLiHGP?IP<4rAT@;r6`Tus<BP5Le$zC9EMPBB+wAYkO{TYRd^
zLD+OBAB9GFY<{x<mzABvu@f1KIL2dYdz%3{7_LlR3<gCxnfp-T_E#`hZwGyc|8VZ#
zTnMe2gnD`@kl$`f3x5ajllcnt%nm^3cgi5Q&zyC-87ruo^;Ac7+otY1%su*MPVXL9
zBfY_lIlla?aCG!#+!ciA{=pj@m{;qziZVOfa9m8*4LDvIh@SReIIRqSaC&qQlL8)c
zOZ%iq<CE!phwWX^X6J3U9gpx6^FU5pwFBRsHYQKZ7|+A~2#l)mLzlwWBJbQzkge*1
zLnV6P6R;ajn0LbWPk|`#YqMSO^(h9ta>XB4jLEvAIv7q}LAfRd`}P;YUpA+;TeV4a
zT&PFZe|UtrpYdR4UKCYD_CRMO%X=|)fF<mKfb|+UyU-i&8}vZSUtMxVtq^xUPr#-$
z6>2LjPh9rT;rGn&#BSE-YJ6BPQeu7y!N_~On{XtXH8RiVDQ*0tWl9}Vs?cTCR9?8M
zknJp6L{`B)u&BKV(`Cj$=JUIF*t-?WjoeUTlL@&oPLh1ky@ylB*I}Q5GIhK+8@$K#
zVjeN2sYexHg`oqg{3^h{oxAY9au-N4xAIi2r}*=(GR?7N*^4!AGktT_$fPT^Sh(vr
z_PU5MpZ5vjlV$Z_^5zka?H@s&F@BqtPY7&0ZBC*+N1-RF<)r?Jk=l*H;2C%=Ga>6C
zcFgz%2OoRmhiCdkDd{nP$9Xsvq&*fYW#;mU`RY`7zXMvQti#-P72>2^iu<+A$O|o1
zQY8Kct{I)jzvjB6{InguW}mfGiwud86Q?fA3V1<YkVrJ^HW(VIgR6@IRo7)~v0GO7
zK3I;F;35c~aDcP7e}VRI89&zNCFp0k;O~7w_)K6y-Gnyqb_Mg)*i~cV8(nfs_Z99e
zY()2crqp~?7CN@dz`n6JP%urvIS#1u0++v;COrmZTx=uut#1Vzl`>wh;{uo`^n=Zx
zAYOMZ`#wjk<pnKG!fu@|KE>xAG@V?9J0JB!%=i$@oya;eEUzJGlou^5UJZ6vZ*xVb
z#$amiY)&b67H7A3INxumg!#8k>0~!u^2Jb$C=_Nv!D}xRdRD`%X|Xu$trSsc^T(l?
zmm$9KE1sV=0w+`qz_e0xdM-|r*0iO7pX?uyA1+QEk4@wR^LJ&Qh|!^bw$gNpQ#-`G
zH{~K~Z18rsDN)K0arWn1_~k*GB(nQH$R&&<yC=Rzm5#^Q_NE4(xk;1jQF~FDWu<oQ
z3q!@#F<A6F3AB{2@s73TeEAs{PO+2m>)(n*q2s^9{5^`a+%6irOV+`gN(HK9{EG9G
zxFU?rXU+wSk1(&A-3L~w(VDEAps&L2=*k|@?5G9zzvRK%8M?G_TrEn!$z+_$`M6G9
z1FQ~N(9|IdK6teO|A(Ei3zY*f#wS-eE>MH>%Vs-Y$&L)6TNygUeE_@VnW72m{t%XF
zMD(`1;%sAMd|zfj;`hYj4Jk!Z&vLTgrb?3k{&wQxKhNM;X&^5v7((@2d2-2yag|h0
zqVrb{9z2#HyNlSn=;(PE8>&c-UNa<)viHEo{5|%oNAr_b1mdwVS-gJgH+cKwI(}ZQ
zM|W>ir}C$@8B^l{7vLHV1HtW>)G!g0HgJ62-*y=HpE2X#mvfuX7C}VE8mwuJLH{Og
znxhlU#iZ4XoL2mVf5$Y5gLWHqhmGeITfSk8d>QsJkJdlyr}#YLBdC0ujD{XRMdhOL
zT=TL?V0!!x2#h!LE_WAkF3lgg&wg5D=cfYbv`ob}^VG<hXZ~3Jr<w08(xToshf|>=
zb63l+z^vy#87r#>XHOpv2cxBl>--_m{t}Cau5AVz$~yG>m2t!NG$@F>inL9iG})%Y
zrG5da9621LqTiwC#kC@>QNi5RwSid3dSg3Vs{lSp&{<ZNv}-Z*gPzMm&lmSHx9AmM
zdh`b@j(veUt^eYx^ifbV?FBbRBOZJ#6yaEIFYCkwW4W|AKg)|^tI}{P-hC4m#zsS1
zZyYb5V8G5Yes~~j5Op@a0iQEURO$VAUU{wr$&c*ANeidI%+2*!ZqUHnJs@0YStc$m
zP^C6Illdd34&mO-JO-7#hlO5KK%sXChQ@pX?a*8-&%4R#2lc@04d)=zDU53!@r<n;
znlWq9E7&;Jkow&}f`xZqaZhG##{=CT;M|TT6!XnzxtC99cTf+OR{EgezP0U>-Cw|O
zZ6VIl8$qTxvh3NLr7+HYDd$MO;<0Ue;M5}>>OTJ~USgkdQppoeMr;I$@aVzU&r5LJ
zNoQU^&X@5>7IH~;bFpn;Efg=*CbbcoG&tojpXJ1QF)L(+3s1&@XFey2eEXD-nDY=S
zK1JY!v!$33>;jEX6NO4YR`YFJZ}VDW5g=$@lhNS(3>C`XLC0MY#EWF8)By=P`;{TI
zJv_(tos^=U5stjz@lsA@VIbZxkRfKbji_U3C*HcPM4!hug2R(C$b0r0jeo~O`$SdZ
za4G|ikJlod3lBkIdpf>nJxC>^J-m(65T3uxW{f4XVa#o9%#eS`_Z5jzR};279)1$c
z_y0h32URM_IhPr*!UUdbYSVy|=Acqr16NCj@T0a0*)1Cco}Y#?YYXzwW?n0o?W;<z
zTiK(UhdA54-9$UBd2sT_eI%dN=#imv=(O7fHXDn0=}Q;+19P8%_>PsJc7GlWGG^Pm
zJEnB;?Bf^{Q6;K!n~4pqhdg005GEc;#9?YF=-GQhRQTl)zff^EO0Pc*F6TZ87nQ1#
z&JM=-o}~@jC+1;LWEsfG>X61yy3~Jl0v>xZ1uBmA0J!;})6u&iAL9dlUSC<}O$ctT
z`6x&$%lPXiLcilH7^`vyH*<{vmAg}q0Y;iIcT)oh6<Xm~@(}N2^$Yz5n{k+-CCPMB
zAfk7tae0LjIW06Ly$hD2qj4M`d7V8|r?v2UZ?$mJ*yT{E!S;irM4ZcnNBDI>jLuBV
z0&i+g^FIxzl6gGb^_3+Tz7Hc;m+R8<yjvjM`2_FiS&+uBm7M7J7f{|TL2^773o~9a
z2K|C}d}m=O<jin}@=Z?gX52H}uyHy%<)lM->tSv+<CpGeRig<t4ygP}pSfXgK!*c!
z+KlMp++R{|<Vt;#ljOmlnzaui_Z$aYWedN=&56!qHlJF06s0%2fR9ZNKV4ITBwQ(E
z9fph0HhCnlfD63L_<@2Gk250U<f-OsX_8(t3015@@mE3;`t3@E;5%BhL_H9a>=%Kc
z+mz36a=~W5i5PZ=b+m8DfMeMV$XR2}o9(=a$F-%Y_tQ^Mc)Xj}kq$uXCvU*!#V;<j
zWIM{+jfP17C;X++CdBdaEc`jY0xj!<c#SK$j4_ss+HbT89`uHXS*pa{^CM&V|HH0U
zC0ZRGipB|<Smpg5%C~OkUCi=0Lv1T~xj~KOPdN-p&sMVxb_j~u_rlPX0q{8T1JBI3
zfQy4oXu*5dTkP}UxOI%ts#t(F-6PQ@Jws^f9|!SaBS2*L7rN&Uurv8mP;&uXxy+m-
zRJ1_w=pydDlQId=ug5mcFtCi<jIHmRKu$-5HhMRqkN*f5s{uNFld)&30adB|1R+$7
zC@q=GId&RgX}K;Pw<HmYR^9`_xVclq&v-+brV%})b_w0~j)zdW9q9Qs3E!Dr!F%Gy
zbmG!`=w5pWEE6NZuKlaX*tHanFV!R8;`PaqiPyj*Dgv%XUc!VW+H}36JbAc8pB%WK
zjGlJ?gmn^Hj0bQ7W5E&kyA?2}y9D*R@E4}Ghhfa?c+sTn1Xv~ioZGvR@n#j0q0Pn*
znwI;6rrK~KJW~e2mt=7M3Dz&FDCW=AS&;Ihs`PsjV_O_K#FtxL<6MTT(ws>r_?n}^
z%&oHkK07ftu=5^HYwdad+FoB=E5`Pxy4plcvj#TzB!b<l38DdqN*ubK3Ck8Mk+EYX
z2wr8bN9R2V(wbm#egw_=wGz+FW_>BipIpYw6Og)XG%x(K3GcqvCGtN%WBvwvv=jP?
z4(6JWYg%fgfH4DhKX66y8^h81N*x4WmE<CpH-q@Id<^ai;F=$*!&$boNxCcIV#M>g
zg<fy*+t^FY4;luGFB_xa%VnXwRtg+DAdQvfOQG>xChwf03^Cy@Fx=RLWHEQjNR>=n
z*K!cMqYk0nj)j~;bPDrC<}e1y576-)PVJmDd4r>R#8Du}`4qk5>aKagz7qDk<F^Pu
zu)FuNN)_^t?f2}XQ&GBjGQh-<D0s;@q-qVE!O8!z6O^YO9H>O|v8`x$))JQ8{e_Zz
zEj(9j#hR7KZ5XP=4TFEMzp9?so6Fww+dNR9`$T9QoeOW5q@eZU$FP8!vcG)^9$0f0
zPsZHE&HlO2ary?R{hf<*jZATn?e4u)Gr;p05$a_|f_FhOjAHw`t{_>ezD<>gZuKya
z@i?gHZNu;SbztkQLI#HUK&kFW5N*hU0SjX+dXoz1Dvvg$-Kh7p9gkW)L953mSkhFA
zL2~tQZ(ulS9F*fKGnRq7(h~4xnW3QTp77c+h-F>Ggjzm7`EgJGa>I0W*zQP`vcx34
z-^e)48yY#K4JWxi_S}pOTZ!(=n)z}6u0T$9Kc5qNit9ethF6$xq`1kDxDOk_eU%!7
z{z5s%o(_P_Pjci=QX+18t3k57&FOQiItVOgnV9O+kh<t4-jOmUMkcNJkFg(YmKJkc
z7Tv{ZbJ*SG#8@HTWOt;qihKlfjpEMn;Qr+$7vr2I5?9&>m#6vR#j2ZV`Ry7XIWmPW
zx1Yil2bQ9p!!MD}&1QI(t4q5(M)Uk31A1h50_5oR@Z?o0u3)>yaeFuLeg|xE=u!c+
zYOlcU@8qc+Q4*FLJ?9-IhjCZRv`D$E8Smxn1*=Z&gs-Mz^!9JY?HO;)3(A)X?JAOm
z3q$uoL>Yy`;&v`3M3wFQ-hsm92>5(Lg(SzaeNZNI8OIlc;iG3<*y&IlXFQx!daxXg
zd`-xG*Fiub<D8F}0QVsTmiZb{!-j5=`4M}xUw#YPihJ?s7kQfT`VP|D%B0*b7SGNV
zBcqyF_S=3e*OzID5ndvQDQp%w#@y$ptWN;zW4+*_94vB*WIP6`B#4$P#<_FD!L#3&
z-+l2Hiq(g}{Ax22w3l^kHd({Elk*`&O@V0fp735)nnpgpjI+Ko2X;vg9`?G6aSu}Q
zl3F6>T`5OtyY-NAA`I?6&>>0x?r@%4Kq%Y!73Sh!a38w|QfH0fB#$!Ahh&4zyfia%
zp!x?W{q$n20a>*B?={!x;>!EkZ^8BvWtdn07F_PF=LDm-aKV-m+<)ncq;-u8c3*Ph
zH8e(HBe^6jWnEKQmb;s_<~8hcA3{mn9hhbO8aTGINqC$CU()1B&6_~zsPe>dN1Hes
zt9HK7U?j^VmvcJapJ4Zm`=C|lkI$#~p!At^UiQo%=v`%mMcVav>^Om)cU5U}b2Z${
zvmlp>bjj3Ib+Y&dLP&rSZJJjAeopUkjL!wwFsuPeo!DING!I<`#w0kolehP-<et8e
zrk&+ep`E*h$^Ac}ehb^Rs_D|wKuub;BODHiTtQ}uC2`C2Lz`NC5aCmNn^1-ayL{2!
z_8A(x%F;kFT{3hp5H_-2ke$6eP1s~iji(!96gvVe93zDeMY{BIiZLm<z8d!|^}&F4
zDd;*UOAB{paCWzngm!6N!p;3HutmteZ_VGpyebQeJ~Jk$|2(cTT^$>aicx{~Xs$7L
z6CdTY6C1Cd0Nmb=rzaRtkL?+7NVy)}1WWO$8|%rg$$)JFF(NuALwoHKvG)i8CFbH;
zmv#|s7T)8666$f<tWv<ca$vdqGio+U5|_!H!l{;37<fpYgrCy|&syGAZJZUfxw6h_
zKjYGvcfbax9`LkY01w7Uk~Kg6!u@*YRr>b~CUwPuzW8@gQf%N-)#r0Lsz3P?X%8TI
z<y2gxYfN<*w`1pnO_;2i1tEd=F^x}!VJq30QE!lUpFD^I#~a}a&Bi0^V?jmLk`A3L
z234m9{Bc>4Z1*%HMJcag!$E0kz54<PZY)JlwF9DWA^|b1xWqU!vxHIS+0ME902ppQ
z$;V937cO4^6teTAsTP)F_?RTz9CZ%t(&XVulR9;B?*~zI1RR=p1;<NB5W6I2T(77{
zFQ@;;?!W}T34cPNj6M{4Uf@z|*7KfkV_?7GFcP<l^+`7@LOUAIc4Cg)vA}11iK;gJ
z`u-!FId=_1*Ti9^&Ieq9uVMaSEu#0;0#;PBu6>sjbvdet8$x`*#Uz$bIkflxJ)Gx#
zJV8P01<aN|1HH$Y*Xy7Hb>dz^<g^@q{R}x0=~2SzPQQdl?yz?zNwPK2(IvL5*PF3)
zGSu!pi!Om9X>PYRxnJ^_^`OK_h?5Cfl&nn-WcQ-o5lK;M)iO9@`U18cF{2Azm*Dx-
zC7^Us6+M#MKx&UU30SCs%_#y*Wn4Y?_{qHHpgFZZ_!-K_1apS>n49|6DfC)(lkM{N
z0ViWj9=v7uSB1BH?Xw3^WgZFEmIH8Vj~tcRW<e~MM}c971R6C*pa(l68D@v@^K>uc
z^r`CPL9YQ3?~Q>I*FK<6Ni#eCB|<do_y;FB!RNvYxTjQwtXm;Nz5naS9K~XkommL$
zc1T0a!o|=C;+*xZOL!_H2%4h}VO3lg=X1atY~F-$v5$MfdKjBgorwewH6Iu%W2_Nd
zNvf?YNxc7M;tl3viAmzQoVyhw$FrGy+eHVw_)eQ-<#wPTy*P8USS#yD*xQUfor&;m
z1)Nx44NClFKAbT~1RhUq_OcHC)@k$5qJ_ERUMFF`)GpX=p+Xj&)+Ns`zlDy8-Eio|
z42=1gC`yQJ1pQ$@uu>)k#ed9(#o6spa6zBAYAr;Ui2=NS4(s%&zGq&ZKy0-C#%-@x
zA+ixyaFSLO7^(GOWmq6+vpGqmiy=tYY(<y0GGVv*evrQ8$*tJ89vkByataM*bkWiX
z%=ul#IrDLF)ZKu%ie<vW>%*bHY%+Y67zCHHVm|Cn8E(5>ieIlKgZ?}p)UoS>LIai^
zv^y(IPm`rFduw?|^&n2jtI(YKEyDb}PN3C40wq|s>e&KCIy^^~Xw~NMK2Lfu!uclz
z+g*p=N@aNR&JQh*x}qSblY9Rm9?JqOsCWtULg<%b=6@G(#4L4srIww!ESZP&p%)*d
zBA}k>8lZJ&HVVYf@Ta<UaP3<jr~cC*F&FANfz3JY^#eJwGvqpqW1UQ;9X#g|ID&S|
zEraC&QJ7{B55L?lV1C{naG4Y<H2mTVGKXYnOjnVx-}NW2KmRlOb?32JMJZTr(Z++p
z#>8U-W7+O6$8nm&sNn2qkz>0FTp6iG8fxCbI+bJS8D$6>gKIGHo*HepV%fd6CR`O&
z$3MUI5A3Afu&DDN)N2_N8P+|GnjMKbYZ*JX)tu@mS|I*655<~h;rAu0uy*nv)`4an
z&Uqt<)rJ6cm;H+CPO?7e>vbT%+6#lOtwTrUqn!S(ayUP$2`d+G1bed#e(&2}^tkf~
zwY=s)!uCE`{JRB$^q=AkHe0VNz5thO&SLlVMc}aJH!ST_rC)RQ;yA@WoFMmr&?YLI
zziVSeqK)pu*QswID>4t9KiA@>RCYhh)5P@)#fi#<Y>=MI_)>GNIcA|E6GETCh3}>`
zXk$G@OiTi+{nhxzhn@K|ck)W&gvShJ=pHo-6JIl)lJGY)KEK2@Uc1Pj8fS&=WA%tu
zi6JPh9l_Tf(7=HFBRKRn8w8iixnLW2@cWX2N%@Vusq${HW9Ppl4-<Obq8INncHCgA
z8F4w!x*!g18EHf9F#EMRhA&m4IV+!Yqef+;(|#XFeVf7Olxc}Pqdtq0K3(Ovj=TuH
zJ9SZd$49Q|5@VHLHKux1W0))UAS@0}0q?;!NKHEr<C*6}px|ZuXGacp>>k95I8*Ap
zJO(G2x?pnKcZ92#kPj3i-Hc&wy)P0Ss$PLz=NaLNykDqO9tL?`9U#yw&eWQI3o4Zu
zL#K2jb}IkGxDyxfh(adgOR!ATzn@&s8-1EqqDckOD>=cxNZTX$2{Y$vQ`;VW(sj#}
zrsoW!j&+Wl^{BVF-GV(6Bm}f$uo-e%hlG=sFn+}Q*`nsi!(eK!0~$sR5dLL12#SMj
zT{dNKOF03Va`zN!+sP5JdFJ%LCCuTL7K_0NHYib;iwfd>AZ?ifF)zQ13eOwk#=T`|
z+sJqj;bOS)k}jG3DhQHCi&2{;AGxAuPas&n5Nn?=#=$o%=d#I`Uwon&_TFQ;vJ<D!
zN8pcBy#*xe=><$`t>fLd)ZmL@UFat@1h#EYQ1_Y%QQ_-AM@Eg>-PpkOs$YenX`VPe
zhxrDB_kzt5O|bfKjCInc;JEkV+_6vFz^6~dT_wzeskS`xx%*#ml(gq_LiPxoR~y2L
zUHwpLxdo<Pe2xxc9hi<GEI;nX3*PHx7AfblyyFPy4O;*nArnzc^$dDXXvE8%QY2&3
zd9>H9#>kM%aQ~?n<Ea{Pj<cq5A&ga?b17Do{$(?mS*g;rxw+sOun1qj7{rDp<IvYu
zpTtHj!8L6mU?<}XqngBsj$AYJSz4pA!(~igI1#+>Tt)BFIryfq2Zt6tVbA6kWdDL+
z%SVjS*W)CYW<tM$E9B2prZ->8(YZmV&{~?^f%h`@PO=WY$IejJKkq`g=mEAJ{KH#H
zo6_gisuVM~!Nt4A)XKpO`}?hNC`6j_E1K}-H8CQv@wT!4kq>$fYLJpM9vmCLat^OQ
zfThwW&f$R(?woTN&2$ZE%-5U3R+Ts4o_3V^qU1@k20McvzY5{z58y1x#x^-!nvt5t
zS3YxxETtIu{^cxWS*O8viD4usIGWFSk|G@SU7bv~Qeh0G4=`oKTYw!~V6veF%X!D}
zc8*(Fm*%@LQc4nnI)1>lvOfIh(~FIHHJqci3~&F?hmSq811q1X(MR)#5lKsFYI>1z
zPyHTpE?*z<bJKpqMkQJDQ`MNhRS+Wq^LJpR8OvQ)$<nnurRas*fAQ2DSFj&-7Pd?-
z!qzsn8$5hgwCW%S(&7W0p>7dE;SA;qj6ioyb&RpBLBpHJV9}5`jThIWGRmKDlD90C
zC=SJ)znNn+Ab=OI)Ink4dtAV}ley9ZxOJHt7^<7lRf^So%L@Z4?p%bPX@~hgGbL!0
zNf3w&_u#7SE&SFOqrj&51g3^)(T~h$Dia!wCs%6F0n<8I%e-v0!G(~Wtw-lqsuG2Y
z04VP+!@$`k*s$#@RIe<=;Jh;sS+yJX$gv%U{zJYxhV=#>dGaswM-sbMH{toM%b+3o
z8Kk6Ng18EE(ycZct6o-u@jETruj7Q3kFxRXbjCV+a|LXsJK{R8mC%&<0ut;*@K8~X
z>Mxd|Bcp~eW_%__Bz}T%ToAnaWlp%y(xmRf7UcIU5$U8xuJ6Ap2wtSfbzRmazN8*=
zoR)DaTf4w;z9r3dH6<}#+Tb$thNy6748Pd^2tL`uc3qMZWS*TRUHaIR5XS8l^sdNU
z#ySEqw`2L~<MoKc$6QF4nFh=1zF~QSB6q;799Ar3-1>`3!V&{NmM5#`k23yT)d@G$
z!f@{KlT4g+bs`+97y-7-rKF@9z?tJenAoRG>}M~76VKTk*Kp?4-P-GMe@Zq^Eq;cA
zN(JGzg?d!;tt?sC9R>${hEp%aK-h-5B;e*}ynJ#bnbbTU&mRv$!No)@IXw-RY|@|_
zUL|0-&48CyX1T3{#&p%FM&4)lJ8q0#7v8UW1joLc0m|ASe@ma-9AJD_12fvF8U@R3
zuc7{iO*sFM4!K^QjPd8ELs7v1ELq6d&t8ExSu2*Kk3=|leCUFKqgydF<tCKBHlhiI
z;UF$^1vYev)3XCgG%{ZsqFO#fRql7JeQSl9?0GtA>P~cXi{>Miwc#}5|KsS)!)pBA
zx4oN3wHq{-G$TGC)PC+o=FE{HbLNa+LkLAmNt8rNB}qs~64hS!B9W3xND`7HNu{VX
zkazvwza4a_-Sez<-`91X-y!$LYr61MGKK^?!2FJtaI2Hgk%k#drxbjG)ZMS(&)w5#
zpp-^yVv40!^HqVoP$v=LS72=559r@|9XXtU<sY;n@=H5$Pkt6&-W`oSs+lk>M2Wdf
ziINyv+mN#1Z>Zw2C}?cI4Y&HK5{b-1NOY26n~rf7(dQTpP&x`VufEf=39dM-svL^i
zD!_l=TW6Qm-z9kU0g9HXQ<-c7R<1o8E(V^#T+w^U^sE}_`Yi(?6?X;C_IUbnmN`pp
zNaB2HbucK^#A8aTWRC4={OG4nN>@LD8iSLNFhiX+S<M0UUSG^ytOl|DdCs@J2v!$-
z1FMQ{Xwc1j-2*nl``;o^mW@N%qzD*UsX?Mr`m^;er&0QayH9$%aO)d0QuksJ@8B<T
zij02%-&OeCw(JpTuTF!M4Yu(1vniS6I|JNq<Wfh|5SZ}t6<pcPxk_h`NQd=sW`e3U
zXMW$sSU#r`^ZiCq_ysWN%@=I0yx>mMKz_ISCK&bQ!rqa5U*D}vx~k8ENO>9ce3v0r
zq;06Ht$<tp%Ax0)Ug~!8JAOLJ&~fWHbOkkx=NT2zjg;(EXoLUjAo<Gje(uCC!j#M9
z@M!5J^l#FauD;R?h4ehMejh4izxYJ&TrwlAr#f&bs1fByrcl+%{m?Ipr01ma#L;Xh
zh`;QHE6HZe+iN3Mw>83Y%Q!4tqXW8Z77RSrjmgDF;WE#gW`A$Pj6G>EOqu~{Tlx9q
z+(>EDE<={x8H>)%$8kx|QxMH-b`IUpgX(i{q1A|4+=-$i5i7e3-p-NuYPmYecz7J9
z=U(SNn@==x<ydT4%$Z>J-Vos|FF5%866&vWuI?ZNueGP)&LUHy92SK&4i2!2yJ4;k
z=Nvf6TvUzXUuQA*D|;LRYtHA+mQLn#`{Q_H;C~Q&FAon|RYG#qO3aR&4hDPe;D4WN
z*_9!8pwQqa9U)2szkfDFF^<AKk^$)pzX?m<rQ#m%a_%SYfw>yI-xAUd@tY@ryQ(EA
zJGYfK@*I@*m^gHdUJAoXId6QfHnuxyvYniJHnG=$<bJ*@cz>D(GcKACbFKAYaXSfa
zjkYCYlli~1Y&<;U+~N<8n&kZ~YshsOgbjPY(SyMmVAzM4`1U5<-ofXL-TZq{b4y58
zZ$sl)Ya;Hopf4XYIA47p?~D6jum<n-c+Uk<UxIT((Kb*Xvkwk?8nK$lHo?vM0Np$2
z0=61m78-mS(5Hy|#kZ`4pYl6Vl+YvD!MoN=WGca9fC*|{H6*zM^QC(?^e2t0Zg9t?
zKAF(g1uVjfg@jIlpC!G}7H!71y?F@-IlEfUhtFchbs}7Sj^7_w<Ee;jJTXKA<?qX2
zuE|<q%j(;>$Gs9e&wYSx&%c11+iSX$cdp#DP;h%*EJV)CglzB4ATCX!75Rl=@Z=?J
zpSl{(K2C>h#qr?(<$y5O_X{?E6*D{<1O1nB-<+KhTHNY@9aGiVco`#B835p0)rKqn
z{R4|;L#A-22GjD>Ao0Bo^Ndx8vr}rZPHrvq)``i=Qd<&pESz`!Wx<R4UL8JkQrq2e
zC@#NGwfD`$^8<~D%pLyB(*Hv5CCf2a*Hr4+Ttb!aai{Z&HCXYOvry_Hv6JWA%l@+z
z)(>~)F2@m)l>17ovUCKvL>+(^x6@HwP0ae`7?9><o_U;gP-x)!<n3onU_5Kald>vI
zee`$E+EnAS|8}S?y$1gZP1v464S3gxzbm_p*!}VSnajfwLiQLc)S1?yyXOVG8jy)%
zg9W(hsVr0bt4<CNz0Bu|J3z~{9(UG%fM0@$*+tX{r6Mcr2{vU8ZL@?J(@8MIQk_&w
zFQO><x?s+E>uZjykS{g1Om@M0IGnFcs@xe~f1t`#j;N9~A4DYE^PKSJqct1qm4zNI
zfx?7w+!1K>ohR?_Q=M3CqSRlPdES03IekAC)w@;D^V?gj_^=KS4(2|G))C+_%^haR
zUq|`)abVM;!K&2wjHRC;{Fk4M8<$(NG_MpKb6k&!)-9%Y`1>K^*jIezX2No>j+S_=
z)`W!*-@+5UEXZGX3AZcP@SUO_=`xXHk5kL>dUZF>ic=!7ZZUZOTNUiD{scz`kAm4_
z@}amx#Insk2(dqs!C=i1df7^aBn<w5=F_*)RK8Phee+l-8xQo!9=^Zb^*<`AJ|iqV
zcM=w~$dJ{K@1fsd0~S`J&&tbo<7N8+<khM&{8{z@-npufLnSFFnjJ5^F)${g2zjZ)
zv^j#vuOr7~aXgmZe~lKLvt+dKA;Rh5pfHYi{hEyEgnBLZkLOeUhr0?BuAPBI&R{n9
zD9e0ov?0d4AJeKaC54NI^Sj^~xLKq}6ipj3rehx-byXr2XJTMy%}N|*unAjY#^H@!
zGr*+%0FKZ7iS`y}P~!FyPTc8_E6nf1;IMkQaMp<RO$tJ<?M+lPuUIlBYamH9Jb<M`
zFF>1*1#ws-74%+|!gvKslFB_3=4<^xHVGl3PM2->%c6b<wMfx{pU^+ZkOimRhMM!k
zAoJWnP_GSxX9HVM>O2kG!^NOF<UFLejiZ11zsDTD-|HK(7UwTJg*6|dXxXG~P@b=e
z{=0I7wlf)+UYdz5dDZZBY=2_L*|<uv8l-glO;ncb0+&Ulf^10_d^*5)nnoY_Y&Dhs
z^Rp(So@o+~tFN&uUkNt1o<qIK(J=GBe$1kcXXI;l!tA)wD9ZmRG~OS}xsb=`OT{L<
z%kQb)O`dpV5`|@#3h+ir7uLko3*xtHsD{~FaJViHHiM<msud;-*5sbOlNK1UW;V@U
zvlfE&v!PD60mbA7{_|2GdWtQ4HeiOLLc46w_@{z{$4wz%YcsS4YYR7~?BdMO-7snl
z&m1RclFF%Z^xi;6d}$R96Po(K^WAjt@m2(p(k>x{=VUHl;O;uJ9KrviHI^3q!`_bx
z7`fvYq|ES#bem1!v91m^1>UuCS}TcJ9WS_zTM44)vXD8h2VIivq;um$?CxkKq89NK
zJ%6+cd7k+&!X^ZgPaOjFQgd9O$!CF07eM<(35-`&BqF<#oGt4WSdnZrjQM&Cw}e}>
zj8|EZ8(kn({re4T$^voBVlh$AI0$~+F@NZ<HR;qU0jben?0H=eBkE;X`f5sV#6@9)
z@f+%9;}7C2IhxITaGMish<9=<OrH=7r4^&_p!Q$TlDB0B-=0u&Un`zfNJr-l25kC;
znVi|34u;CMB=^-0+L}0ohHojt*ubmcDyxQ}UCpq~@HFJ^dML4Y(ggk^!*VJbK66LP
zA0cAS6uPQ85LdMp<C%%i@QnOKSQ~GTWu2oTail+vy*UXj_LqZlYO&zrqRsw9oI<Zn
zbHV3)ES~)M5VsFMP18GEgw-bjf8KqDrh}hg#;yXaH)_Ta4})>p^;AeKX_fjn>=PQT
za=}nXhD`MFN6TIEEJ7SlOKfe($u{n9UQh$iFB%bl&gQC&n2s=T9<JH1NLJ@%!qp|W
z(4p@?VdJlK)R=V#%8o9fZd3Gy2-op6A@daGpTCH9n}We5r3_V<{D8sF-+^igpI7-t
z<NfLraB$p1h)~-G(&_?mI~0jq%d|+BQaW|WaS^uLy`&c%vhk&Y8B6Z^jk#SbC3R)8
z81dseEnHD3DDqwZHjVp`{Vq@NdMgkA6xB(?+mRsK-3xQ3a)(B@GD}(LPwgJ5;2WNm
z-T%Oljr_tHbZ<{Vd1M&(ME9ef*SgU`wj7<tyhE2cGVDvmDb%j$hWUeo@tJ-WK2Bba
zqZ$>7$?z-qq?5mo<&UFl{|j{M2+m^N@)z=Ye#6W_1Gp63i<daJ?#l4R*tsVQq!am!
zKfjuOnj}kdP1Pjk{T@nd&%c1cdK=Q7XTg@fR3T$KZo{wb{Ya`sH>4Ze^L}rlR9-a=
z0)!AS99@Q!o_Ap1^)l`V2^R*3<>Sbws@yG|hTA6?<73w=nCfsBTxX`!dCx;()ZSa@
zqgE!2Z7af>f_UL%K_+HSQfB)KdGGAQVX!klf&p(ido<7nCQjlx>QUCDcts69+AJcq
z|JT94D28Lda<Hnp3C&t<m}#yFd4GF1YEQFbW8Z(mIkAQ$FX$)w@~*Z_n<jHnULzRv
z=(5GioZC{X&t!kvu$LJ<(0z*E4gMN4_MOjW&z#CZO<87^_5})dp2n7nM$FY)inCSc
zVGuOqp0~Tu-~Frfi@pY#RQCZ_3C3iPqbi$y!+=>8it)|_ea_`m!Q6s)ur8G)+3ip1
z7XCG-Y<UG<*{5lltsE<RY9{ntd<g-wv(V1g3xW%!P@ho(wT=bo^PHbSD&#PIKrLO)
zXD1Poc~s}yBb*ZR873G%fbgOO)Es^ZV-~MO`(5jCKA+7v{IjB;j#XnR&4ZwP&OrI!
zIdt3GAh>@lM!g$P;cAv8yI(z!NvE1IkF9CcfAAZLbm37{clUv2kp|f^+6MySzhdii
zsbCrP0CnT!aciUusY%=`Og<@M?k5J2>!wOfx;`G4y-$RObI-8k>s|cHa|Er+ogjUj
zFRhPyg~|h5VEwUj@W~4oTo;{z{DslzKgU9tG=3G7@=onP{<VG$&|&fK#hj-R2d#aj
z^w}+S_9rov&q!_xRR=3!*ad|2kETMC$0q1+A|@|(4J0Wheb^Cy3Dd`lFeb_io$95+
z1}`nrmo^r|cRc`?Cv)hV4f?D|C4t}P);qcRe56%u8-LzDrIReDz~jRGnB-}~MyK)5
zKy8KO=j}|;wciaMydU{=s5W`qEl+L^%SP8ge-It{PD5gv=uX=nTz=e)-0<Q~w){TK
zl_pb{pYgbTWH)Xz+Q`|G{4;CxRhnHe7I5hR*7dU$v%O={$?*wIS;YG>pDSrrwHniI
z`wrQAvuJ3B8fTmDK{}7K$Y-6!wbmcdF|!h%6c>Z;>x<}?IA7Sz&x50GipZRwQTX(s
zDRB+Af>noQh@|%!=;odTgNHj|eNzq`n0AVDUtVL(lW^gNo+?Ve8nc!QS+I2%=li-1
z;-0B4G}~AY7AeXsG`E(|N%hd_-$giae;!r_$3tX$1}bv2*V;l2TqbXZWm;neC+-?7
zwp<D!k1GT#{l$<s;Vo1gsDKr-f1-b(n#8TKS~zKI#<W(-5YdRE&h}HMV0s$&D@?7G
zX5O2M;hInI<7o}@X|61BiCKUF+b@A@y&6`oG=;G;=itUJO=hOF6U*v$Q@7+enCs8`
zo)*2r!l@Z>?T{hyy)y;shqc4d4I(B|^`_B(PvX9D=EUTN4J2MPWOowlFj%t@rg)U2
zamh1$>2MOeu1toqOPp<z;?3PLTIBn1F_9mi%DaQy3$x%9L<nJ2gWpHKkIdq}=qgAo
zJ10r5Gw1#GIY5KWn2W&*+I1kGMqCJnYAwDm>dgd~wfm(j%I?AXZ<ir;`B6A!FGrO2
zy~l9gZ%|Y^jx}`?1pjXXgpIEJT`lT&PkD$giySf<mmTFiq>>-Pe|kk|8P5MKAX?&N
z*+g?a?IaQ7chc>X4$=+os*GK*Cb|{v*s(^7nIZ2&OkYa7Chx;as{?R9YzZcH{J<+4
z_#JCYgAn2n1T9y8L+;gdA>`kBSZAWf%<m^b2k!#JDE4Rey}E2}&>g&)Do6edp9w0j
zUSOgOzkj-Yq5IqlacPMWTTyR^A*a7l*DEtYzFNeNzqpB$qpo4JDevg{$zbbdRahL|
zpWL_*5C7Eqv8rC)g<d~NkZ4J;^@p#Z7xn=SOt-=W&K#ef{h4>DjM(0ljhONM6gbv+
z!n_m5pk+cIWQ^`c^M|U!>5(V!g@FOvuvUrqEI%fQ3fE_AC*;7=i7F)WTn;3B>4Lhc
zoQJzz%=TR6@A5UJf)nqD_VJ#NP1H#KZ``5dyiM6nr)G5edXgTz%zGR@RXF8JFqFRf
z3CSs2aQ7ZD*>-FR`m$r7vGxESk~Jb}%b!E`owq{ZVq<2=S%^*gfnfGM9QU8th?;Zw
zzF_KkVdtJMP!|QGD5}d@R`mg#@0B5*!f{DTZl#dDE(CHiG#LDE8n&djLeK9{;PR?o
zxZ^5EWETx!lb(t&H|+$hQ!Byydw#(@>!0v;wk4BQ4}<W+3Q%`E2kQ5Ec349WT&5^s
z&*Zz%utf&Um5+nn9~}_4pQR2UN0;)x^8Otu@Y3xEhL+i)`SO1B7vK9v^S=JzkbjsS
ze*#{{YqAr?3ixH56;b^92@k$%2BSBI%y*J2X1))EVOc*RrmKc-yI2Gzmi>s^8V{jU
zpZj!firAIQMc{oQ5zVcfX@9j7_{3=pzH}~xlr&kC%74esk-X<TMuXX(<*cMPp&<Wx
z0*L(k!KU*&@NQH;rk5W8rM?AlaDEetTMWS~Y!IkgcS6`QF*6T2N*(9w0`hEf>#l1!
zY5hD94ZiIB++BsF8Mfh4YRn8oz4#??0JA@mjGtH;;v#*vM_!G&One}OMVXV;laC<8
zEyuENDneYZEHNm%Lthy@#|2gjL`hDKtv}rhkAt1Dap-Z}w(mRIsip`^a`|)NFX!n+
z{{{VfGNeK;2>ieLNE2@iV8O2%pyBr|8Z+&$uzI8>+ZuHP3)9@7^=b%ii0(%s7I{+H
z6+UR-SV8anvSn6}&EVx&HD<TtHh$);npxs#e*b!niRH?4@jo>(-L?`9YYR~Or!A_l
z{fBzPr110<-|eQpq@JBSAtdynV7K)M_?Z^tNbW9cfB6}ApXg7-dOxVtBnqBgG$uvW
zoHusoGHMms5)aK7XwPA|&o3OebS#8ivKc;G2BY4XBVa~8!`+sz=y;|M=cwxu^ZjMi
zXmc3ojlKl6L!W_-1E0t8UC<KU2ngRE49OvSm>m}c2YReX3C{|a?)eMzIvb!o?ILK^
znvt%pChU#Z2~=@t#^2#jah}^#Sf3gVjyr<DbIy5bf#E6K{+~CDbk`$6lhw&<3oBAL
zV<md~y}-(;O|<v9HIdFMf|!s#>iy4}MI6_IVX0H_%BD!fk+X69Aro@^fevXXQGl{R
zpXsGJXV7n=35ox=4A)Nig}$L*aShMN7bJ_wp`>DzzM954>w_e|1B@Yi)>JUxafV9F
zW3bB59oOHig-1oX*c!b~(9=r-X>=#l>)Zm7&dr>LIX_S&eoMukl$thj@7!W{+VhC_
zxu?p5rC6CfI3^<bW&OzB1{HRH_cMqYJqkkj_q%=6ei*(;pE#@?N#83jgQRJvU`bUQ
z7(AT|3tY^Y#>*548LPlVU56zjc6@@_i{EoT+6kzuSqHi0kEC0E`C#GLzc}njJ&3Pp
z(_hElVSRNIM4wb9iQ69vUgomQZv9<4q`;c!zZ^i`S51VgI;EJq!dbe@F#yY=mkINl
zYf*$L();}$LVNF6)H|64Jv@u^_5K-bp%20Lge)$etI2-+;9T*ymh{n_=a@S!7R~a%
zLd1=0n8)+*wH8Wj^up_W=e>bW{;f_r9D>oj)<Xyj3de{n9ei8+2+c?d)N2&MZVy#v
zcexc;x5Yura~(dflHq5@-$K5q0BbgK_hw9gT6aMYhU`hen#xA$s|&Z#;&(9yhhKyI
zMisKrQ<tTjO%Q@)6o^xd9sI4hj4P^dV(PzVkbGAJDY>udOo;(Yo}>={HR!Nu>dNeJ
zv?XyL7J|#z3GS!9B=kMX!=i(;(DjlkdzNiLboCrigXyrwkSa7-<qS9U2IJvRT|8^~
z4{DO7f`7j<;n?8g*fX~o#E;wr&vpJ#W4VY%h^Ny*qtbE8lS(kJZ@`v`84$sl-L+#x
zEauQ#{Hkyk6O>I@gWpB;$>P2B*DcaYV|Osn{EP!zREccodpI3ijf-a6ke<@_xV?BW
zO`x3HB@&~aUIdzd9|D<ru6)j@172UQ2_?&(qW58KR`l%#`0u&r?0In>-M;t)xU~JC
z*VL}!yvHX|{>4Jb{NW1n#V0|Wr%3e<mP6!*YRuI!!ezI3-#R88W(T)G*V|ll*}RbI
zmmb3;eFf%qzz;@uRHK>j5JFi#Zai+r64N>`!T18~lple^Ecku(+7_zpe}i@zN6{R9
zw;41x4*TW5#0cTG;J^7u&OTiW7GSmlGi4g^n5{NjW)K6SJ&X8f{VHc6M5FQ9N~F(i
zSXuBzaA^K4FjOane!M5XcqjPYGQ!2ps$_q^kI?ru8uAA6d-~@#T)S0<ZGNIdh92Pk
zhD=-N`N<&UOf;?I&bP$JSyIo3-Dp)M%S`23@Q>mim}_;4pQEEGge!w{(?Hf_b^_P_
z-i{G83@%5fVQ$q8>T>A~^)*g`ngij~WyDV^)>gsxjp1Ol=>@nXoso)P*VC2$6csD}
z!_~jG;FIOo!2GF<(3Cw0EfzFFZcQM%bZRn>-_v1m>v%j^!}%@tPH2DB6zt;Dsjp)b
z76uLi!`W{!WTwCHn9n(<H#y-MtCNuRE)iY!Kaq%UJ;9RM+ALvkJ|+_l*nL2gC_cFe
zqTJ2dc4q`ITE=(6bJR%Tgn2N3)KA<zLY6dO7v6aF5@XgyQ#aisLPN0y4D0s-UElqn
zubOgDwLAq9wXy}hN%@dFXRX9x_5kMEYt7HqFJbMl91P|8N_)<je7DP*q|6PWM>DmE
z$EZ52x_F4+|Fk8=pSFPA-6ATzjBsGC5qnc0N8Fr4go91V;GpA2=N-xb&usw`(V9*g
z^GlzN_*VyG4_M(&&fe*In+mIsMdRr+TBN177`<(M!Tr7gJ2{ejBAkuLrtnC(<i>q;
z3$F_4)BJ>vu$$QE*#;AAd!S-A?;Us?fW;ya_dW|SrR5rq#DOGo@h7M-c?NrqRfE58
zET{$QGJ`F)pgh(WKHlTm(EEGw%TJz{uF+$a*MHN7%0TFUOp9C_orf*rJPg24^jmC5
z%-bhI;JN<9C3^(KNFoL6Wx0IkriIIc^U?4%XI<5C2L8Vy;TF#jyAIZY0!zMI%$fxz
zI>yl8+(kXQ8Zh?Bb8ra>mexdU#FySZ;NtUJa&REe7bvV0jvkdIg_$uh<qd`4HHUEd
z1&X3|U4m8Ud`N|2aBq_*TQBryv1+-nsU`z<UV8<t7u`WgbP{9CLSWaODD3vpW8sYh
z@oVV?6t6i({qF`zY`QmM%Fw+q_uez?+A$8wk86O}pLI}?){QZqRIqCb#)?2W*1F3Z
z61D}SgSbzyOG!d6&6}|Ai5jW66AfF2R-)*aRn8+~O%y5($jez*;aJsGtUX-}qH!ao
zuC}xB%&JRRJnA%e1_nTG+ChoP_h`1-o1eJQ^Ex`ZuYns{b(lIwgS~N7=Zv-^RAlJn
zJjqTKbT^-Y(GU-<m;VYHzsKSTV^b#549QX9zw3uS^5^u*FuYZ5%x>@JJLcQo!nXSN
z@V~t>#CHKf=iU6gQ)5o{?!Skf!+wGR?}@n$lA(=fp5jA0IdbZ7KUU-WL;CB6IT_Z)
zxj#L{H22L&%EU^{Iaiz1R5jC@)l~3|+9~+F*N0uwP88(~&9Qmbjdu;XGjBKVr-o51
z;Q89J(`&#nL=G&oP1%2kAECj2mSFDoODa-b3BK8Tu=d?|G;gSoetUfrlZ+~0(<6k@
zHP*~HlJ_J(oWe;*On44XiyfY>#Ez{KlbbV7qr+A;>e(?H`qvep`N(qVu>>>Xl2jq}
z@4YRsENxQ%<SG357XWtmoIxyqmX5l92G2CR!2C=C%A&70rpuCfK4ub;y`PiogHf=k
z;|QKny@rW$mnF0LZp&wZ8C<$-PM!|eAqh2wAf5dcEfl`MLlqH8+YtdZW7SzJ=Q;GA
z&>@WvDmizu3bdd8f$bhHG+;n2G}tcS9Cvf_E7yd1ncf0wtxY`r<w4o>h~Vk|LgGkg
zK#W_c@OUr(nQgn9Gh#>uKYy69upm_?;cSYjXAPKMuPL*gZbJG-)<VrQ5tMdvhFNPA
zF8p;EJ7yooJ?axMNJ)<D4HGl}^JAq=Vlj)9eF2TxM{#!LG|c_@LvnqOJjuOvkyaf{
z;2h>WC_X<4k^&Z^Q(hq5w6p~KwHT3UlP;t3-UvZ<@mtuW$oqt!cEgr2B24>u8%{0L
zX47T6ppoxiBO^*-`z|#wU%!A_?^k1~CwY#$^Ngfs!UvkmSsh~M96>eXJc<h+(UyQd
zm|mR?S9bC~?>-Y8T+<96{546<UPEE<E=A6e9V2+g*3gE5zXhe`3S>ewLiY#*qWt)?
zux)J@>Pb6b!YNB;^nyQABa?(g&LH-m;D_g=f#{I13jCc)B<K8Ph=H&WL@s$*Bc_z1
z)pTQUd%Y7nD`l9)Br}#9`;6w96~bM<v)Db*hMldc2DcZ6a55x;-*;?;#Nuys!Uzf%
zY7|&x@@&kH*CUQc#b7t%EUmHS+=YJ$!v4}6*h05qZ9yEYAM^-jPiursqknj6jhK5=
z{?MCe1F@lT6ne&$3MY;QKnwRHI+mH>c~*rLyien~I+ZpJJ_(76(*^rvG5G&?RH*D!
zW`TEYh{a_swq}eOTj+?;ujnvpf7fLW&$yML;1am_UIguh*I=>H|1p!Mqfc`(<hk21
z(fAKi>Y_x(K70Xszg|M;r+ZMC59s)G9GdiB11T9tgtCSx?$cU=smn4!9IA}D@v%7T
zM;Jc?xZ}bL5#Z#Yf#%gyVA@JsvPtzdw7Mn<BToE=^z}-Reg2am%6%a?>TgD;9S^Am
z&r;uie+~K!dti8TI4-Gr1T}U%V_13~qe3!q`<sF2QS?FBy5|u}xX)wL#1{Z5$Kkhe
zG)}rd4Hg^`u|eDU`*=%~V0^@YjMaVyJ5&5{@X%l2KmEJpbnAH>x4b{=to{YMOaZGu
z4#NM-V4jLE!?QLSkRh(adc_BD<wp=aTF1}%2S?;MA5|f7pZb&OrHxR1p$szP$721E
zBJlj1POo3>&$fO!igm8Wn55B&3q{2+``UZx@;(n4p&gJu&l+6*W0JFe3sHI7P1rL=
z%*M4CFwv`Inmc2?Bm|$KBhNM&BweL@H{C(q91m1Zdn?#n;v7rEQ0`Q1ppUamu#VsH
z1U^fjkTMa^nVYlR{WGMVxiMJ!*b@H`PkiFc=V@WjVbGqpm^tw`cJcnCG`R`=duPy^
zx|?+Ac)lMAc?{xre`sCX29VEK3I4lQ2rde*CDZ@DfK0Q|Fh13mvl%te)8d)r;OAK0
z8|a0GUJ(l~2?MKAD_lJG*Z*fC7QTw6F{4#boIi*j@jnmqN2s&*o{cbK>n#9JJskXL
z8aA!nh1q962_7Xubj8EJczNakQlmA9=X?8LzbG7Jq$;H9h$V{&JOT3;s-yiQebx{k
z1Wv*+^xp0W0eg6^ufHe!?Nnf2J8X&h>dUm$mAmd+Ho=TTMkM1#GgN*22%^0rNvrKh
z!63kZ{nZM`++91RdoRe6kU&}96X#A2^<{X|O_8K)@!y#{_oOX%{h`IP0?+lC5qq&7
zn-pck_D{JB4x3wql@%g#B+nVnsqyEAjff^#AdC#xBra}$F>_%k6ip8Y^Uf$pc^obz
z9#<6NJh}UHqbF6q_@0(&oTX`jeE(hGmlL5TM?XB}pFPC_*q>Jnc1^}Gdd7Xwj158m
zUlYOh3}@ARxJKuV`Of*n|G@l1hcx2oD2TZ<fDI09geOVI;oB=Yvg(~V3F)(8)ngxn
za$UTzEO`j@^%g+%6m24!?ku%y;@*O_d-#4RUzocs1QX8%NaX(*px9U<$erRm<Nsab
z&yc5JD4&5l2fxKin-yU2n&<H1Utq(r2I`a$N9XW+o{@qL-d$ge>wY$1XPYmwXXnt+
zOOx2xlw;TI7JBzD=lE+GF<b6*ci`NG<aldHmwhPMR~W+p4Fi&GQjZ4t16Z1m9GiSa
zj@7?<hMJ3>@jLQ?Y|$=b!R3jC<cUl;K4>>(8C!Zm&Pts>M?=Bq@NdCvOcr?U3x$|7
z+*?*W3T__dT#pONOn0~oMEGma+;v%WVdF_C!V4g(x#0ZzjtsLK`-o<AbC#CDBT4v%
z6QH;-5R#o`LHBDA&aa;U$NBwt-cbt0XIJ9;eIL-@rct_OPCWV>=s{<OAxpj5if*GL
zgty(i>z4OIvTbV}oNwpn^4Z%Uul)lY*c^*Pk4Q0b%U(Khx*6L(>^LkOmH=j)B`-Rl
zjfsc+1leE5;S+!Ed#aYwZ416a^*`>2R0+YUrUOX2dlE!^zX2nY^vQ-73gnXeXSC0`
zhgvqbP+3EjC2fqudEZ~a75z3C_Fap8eI@1`>y5PL7ti{>uoaGUt7FFOBFz1JQ?gwl
ziki*(30?;K!9B&ABoMw(uQ&of2mb*3A@1mLZXGJmeJvcU?nR%^n_<4G1)3Wh(i>Y&
z;_!RgtaB9KE3e8DTAPLg{ga00Tg9X@p;<6rdW;s|tiZHiGHmpn8}O`1j`&AE60Sc`
zVB@2YV&dl-YTkdI5Ll>6avfX+o8BZmZeT~w>uZypH+nJob}za(4-!u3^7rqfKsec9
z$Tm*qXYN%~X?y5um^peb{CJSU^R7pQ+WS)YwtE2U-z+8u&$ZZ{j|Rk!yP$3P{wUS@
zCHNP2J3sJ{W$O3)fq(uy3cDA>%F}mHamF)nnvqPkpM1ulPI6@Rs{pvQSB1Z4PYHE1
zW!bea1IeQuS8xaKI)2tMB;HT-z-8_~VXf(O2%FCthE6=M_qzkXZ;>bVZTIkFvpMse
zn@&Byo|brcjKSFt0Ebo_$JqD(L+j>G=(Z$6c*wh?xeslm!M;*ZaERx7|2P!MZ_HVG
z(T2=yQ9$vW`&81u2KS2eNXw+hFf(};xC~XL|21<TrLH`S+!qGR-tECxN$2tGe$F0W
zdI?r}C!rl&z!c4FDo(#m?d<>2&stY-_SP%h!#)Hwa(W>8wmI9+ed5C+R`Ffl6tt=#
zAgj0p2m3vRlJ)uoFNmNaat!*#XptMi+%s`dMaZepCnNejhY7rcmA8H@ehf2WMcXON
zR+D2pBKiBO*?`5(wPEea{qcT%8&oJP!ew@UvCFJRSmJRG4{g1GomSc4)pVAgc&&*u
zC)wbzv4=4{{x3Y<7lytkg5WmKCaMp5jw6rilCkMmaQ-wUR_rYU-Jdl{f<q?Qev={h
zyl-+|Ul=AXx=$Z8SP|FfGK_mz>2Wd7o~-yqhwRWNR%IVh{Baj;Jsn0DJ}CzKcjXvv
z{uV@e%Os+!Ma~WpUMTfxft453iNn)AsxIpf%Z6O%4l>@&>p=R`ID|V!4?=CfYG^FX
zg)1N5K(0a-z8bGj;^{$XWdTC)>>qrd?M4&am6&)2K<u3bn0`KiPARPcx6+S7(duuI
zeb|Pn6q=HOyY*Sf7kO+7e~B~8J)kslJhsYZ3a3+T$v6JKU$Of%XE9C4$$JpVcWOI?
z9z_%OGvpl>mEJ~?sl@sH1Tp-!S7KWu_|N8?2DA8Z1jH-L@l$mq);KT2kO3X&m^1@s
z?bc>LTRD>_V=#&wourB5B|^o{XgKhNv!PxVf@_F?wUZQiZm2(wG0<YGpTB|w_w~rS
zLF#Pu+zX&TEEp3eH1S;6J;7mlI3|arK$>nStVoIn*M#SS3-3sIehZ>!9ZS&9TAmE$
z8H)d%)FsI)HgUF?26yZkkhqfyWX?b{c3nr0WV^ReS-l+0{=|DCkAA~;>sxf|BYwA7
zUL_R%@dby~zBHx#1{A1U@w0Oh9=Y)ulTUxa>Hli6X1y+aes~KNWu8K2lqGJ7REL7W
z{mI=~Mr3EsIrwv5fo;voLXEdc@W1yWrs32Anul*;8C^wXDtN!`>K6!^@kWTBI~Luh
zQHocCA@x@j*iL_ep3`LE=e}w<6(5Az6-GGEKN!Uq`>5T?00_^nz~uEi;2jkax5Mp%
z?wJeNlFa!Zrh)Kb=4Bkb*n;KOo)C(3*5Fuj4eosigMMBnY>HL~@77nMP4+Fwu}Q+j
zUmq#Ui^YUTe?YY61$E~xi@kgJJ?_b2%r#$4M|Ec+w(kJH$>!upe-U$7o(P`1)P(iT
z{@@;MO^ysC;6J81$D`l{?b#q;SJ6rCkTxKacjYi|gC)LiodyQB`!VI{DZwT01f053
ziHocBSxvFL5E5gKAIepUXw|Qr*5?Y4)zyeka&JSDg)K9FCC^0C2hxWt<w^MoYj&>W
zBCh)#kMLWUP4BJ;eTf3QzXrkl$uVi$@;jK+^%nx3wLr`sGmNbIiX$yFN$W>Do{4(k
z?CU-ZT>me_bg&bS+@1l7-&-M@GqWay^B#Wc1EF=3m$1Ko8B9p|0^1DKSh8UNCJH}r
zJGlkh<8}!dW!&eS-U6RbSh1pcAK_aWcVsPffhBJmaZHaPS+ZJ#tu0VO&57EqtmqPT
zJSIlF(jLL5@(2~@Kch!p*@El7^-#EnGXRDZP~}6fh2OiZnQGfB{5jz{$eqqZqu<5w
z<DQ84xEZpGQ`Fh^6}yF5-}|!(+7digW6VNR|ANa!S+?~XcU~>w`Pe(paAXOe#V4J`
zoi%rX_rXb&2k*;y_|wEy5-N(i;N0-;3ud3ZPjC8(NW{uHf@Y98k#((xo|<3?cHE3D
z-^w97zXLNBKI3ri94gzrPAE9mfi+!YFw<Qb3cI2Oo3uK}$Q+2TcqeqYM41FfS+F?H
zC~v557ED_#*mkGYf_aJ`{Leyxh)dQ}Pm4_9Y=s?WxNyHpiof8tdoT@e;2o*4*Q9-4
z`oN_)Nc!Pn3M!B86|~76Q2Re;ktXLZZw{BdOX1$rpT)2`?gQ-I#Ak%BdnE4^WQme$
zKN6<Gd!Mc!@%SQbcCX_SB%V*9G520#Ucw)6*umM7>q>>R@2|imEm{c5`hu&+ac1+J
zClC{>&So#2isyLWRp-!QeD&6xcn1f-!qaU$H*m$7-|Em~(Eq^U*G=e>trHYXl-b1n
z{LZpfn<YN@Nn?Wiz^6iol^5}zaN8Sci(4DSE{wocBO`HIsxF!E!V<qlJwyM_z4X{)
zo_mbThVl(tAXer!#7tDi_z)*>;%@TW{f)`qA1a(N^$XNJ%)yQBz_dLK;`^?^p;sr6
zW*C!otQ0dj8%!lxlWn{88Y&hXgEv81WMH?L=<qDxAK!tje4Rc_{)Slps0(7+x5M7u
z1Bsa(g+<QhJa_Q`%RXt+#PS@hnK}#19~RQMZcS3if?-*!H-@PC3)QZzSZ!VpDYqi&
zmh+mJlJN@v<SKz^e|t`%_Atnvv5@<A)4BUeo2HvQ7yP#8W5bL9>bq5duH*Jla#fL3
z$5r5>rhe>wA@8*-Z-Fyy!!h^g59z%nO^|T$6VQjrxGd=k+Uc!@=V#2BOH>$IW)2{4
zWsJzHp?9%ya2tefdVv{@TI|Y=znJ(=4(2&@WAn`ZWSPQB6n}7`J=4FS?%flx%*%%N
z?aoT(_E?an0y8wc6Mz@1jo9Y&+i3h-i$8x9VYTZym;`>X?6U@QJN-wfo+`(5*<n!S
z=eWUJ3!xmv%$8{q4fYz^-`cQ~bGooD@*?#6Pm#FQuB6}I-{EXUMuR-K%hB98tCi<y
z&wFtP#)3P-6>Pyy5(S>xI^Zk!6J1tWvgdI+q}xr0DOZ`Z@#{>;{teGT^q@j|F*q5W
zrC;&UURfe*b`f292lZ1lXZoHDp^KBmBzyL0Fex_ymAUC?c~6UM%rPS)-7N`To5P)h
z_IT*WMJ#>$4`Odwv8rWX;6-&9c1>}@<aue(bh{iyBj?lBZx+J+FSRf^Lz7JMPJ%N!
z#w?_(9UtEPi$?vQLP$ibuvJN)nI{~TidQQ$h4N#VxYt;i+F(lJT#Z?ZGyfUS2Jqh$
zFI2=*o|(50`de|9--a?Y|F1!~cQOmh<_(3cQ)=wnc<y%H=Yi?QpJ@&Y!@VEsP}TPr
z=KX7d^iw{9yOs=5E<1@c!%V^PrUMr9&ymB|GHTO(7`i$CuKsld>^Q7P)G`K=7nekw
z^QXs*!}UmtM;KLH{1i4?Yp@3+lJLayV_?6#290kF<nEGWXdK1wF@doVY@CZD_DR94
zAPNFy2NJEtsw8_~o1i==3i=Jn!-(U37$<MWIrUTFZ>~Hupz5rf?^(C!yHT$>Cj^IX
zHSCJ47ucnXxQl0ZT^Ei5zvh1AxRMP?-``8GpRdEuUjK0Nb7hhqTqZQkS7cM>KZg4^
z|G=YbHYDYU6}DSBV)pM!!Q3}Uy7jg-+uQUU4L@2jpS~k>XNMK*y=*|16dZ*U+U9V5
zWh_=2mkP<{l~^%K#MZr0V8Y8(EdMzhtDPobLVyZ$Uw9nb&*!1t2!A2jb`d-^P+>Rq
zjL3mos-!AwH5#0_DX2$|Lc>5kvfTF^=Uqh7=bD$%)JdOhpEQu|8YxE_Gy;SIJ3H2;
zIv!sJo#+18O>oIYmrQp^L)Dy6aMj!ZJ99a+r>qXHWb@3Y?;hGuy&S8q*l<_kVp!cb
zfLVs}Ui+%&aALG3Tf1ThO#e`YNh7|ae_aUv=I6QG7gcz2znB^IG=uNkv6w=(fWh@~
z=s)ngkezx1enBa^?GZyn*9UAp+A4V3T?2zHR|O-_3OH{Pg{{XY!>}`zux<5t+}Y8D
zq5&<=u2nlQ_Fy>ovG!-hQEs?bPm5$9$Q6<{^<v7mrO<M`5JXGIOD1jH1nF@v1%Kab
znANL6M)mUkmDN&6>h}rc=S_snL;ctxcM<V=7%FVvI0u}r{h>~xo3!g!Dx6q#2;HV@
z0}mxZ=2{-#Qald5TW_KIPcN(qmP)paP)7CZmTc_IUhpZgX4&I+z>q)Y%;UBe&)jYn
zJT&Z~(#Hwxe{yeom;>*vSfaPAEL8qZptBlI;i1nKWc2JLz>(X)BUqO0S?`SHe!IZT
ztPaWz>|pRO4R)cPbL_s|gT%S|EOK=RcblF;H=@aMbN`bZ3=M+f(ikYWW*DMth=u2l
zqvxs!N#!RkNSx(K6XPX<?kG9@`$m;mx+t;j6F&-R7YkrO`zPG{Q;o@k61aX$18Y70
z>|*b!`0WM=TW-mYi>ye)WCOOnqFbofC;-RHd=A+B9a{Iv;ITl?F!C`aAyzkG$)+DL
z$>b%*D^2H20}-jIvx7&|N_f_)0Ir@2Mvp~2%a!;6i@(XU_a9b(!jpfPJNbj8YDy=#
z%$$iKX0ecXdj^f@Qe~r`W@7N|HaIy|iA)--373Xiu&8WpQtF|~)-K}Qnuj{9VnPnK
zyYypAcNnt~jyL&k@u>8Yi460JRAKW6*}?lEL7;k9iFFTCA^T)Axfi?~%xxD7CT}93
z<XH^vRj?pEvy|BSr>1P+L2Kf`=hhyHp0I@QPJH?aVOVf89%P@Pb*idRwfZrLvK8P>
zoC!M{REysG;vmXGhm{7E!or<*A>xz;NFU}w?pilk-Fu&N%e26SIKo$h80@RfLYsW<
zBKW6+=6_VN<{Qs*Sn>J&%nHfa`3lT@j0>(P3I!p9=PA4m$%^JT;G@%o@qb*<;;;@|
zYcd=641UdBA4h~5&5O{(&+z-2PUG;O`b@mu7sDT3MfLDQuq=HDzWn(Yr+n|g?C<Tu
zdhXLLcka*pAAF-DAGTxsLhiMh-V8#t3YodY9vocDsK%V{U?lB?Nml>Cl<T~|QJ#;v
zJU48xHV_N1UK2zPwmFae6TsiJK%y)h7KWURMX#(R>OXb2#AVL{a2hfY{FoMbH18^o
zReFlr>Q5li)E9GaP|3mnr10_j08-eRC+u7k2{u))adg5ph_#eujaHhhd4L8{_L~4D
z_3>EO;RHjE)?!S03ocz_!0O-dv!kImjN#q>I(s9uPkxRvPu1C^5gK6sW-_`?HbS|_
z1K0}BA~2UI3f(~iSm^O8zRQ%t&Y)Tl@770$-Y}?<A<(Y91kx3zgPkewprublHqD$A
zrPM?0{Nr$AtR-vT&F4xRR3XLgJNBLN;4{T%f@k<M!TeJqEon4jW>1t@+8xg0eX~`v
zkw0I)=jn46%R?Hus{@+8_o1OPXR~|>l?=`@U?uzx;o^CgUYTjfoP<E!$j|ff#Q``|
zGys;^oJO3@e^)yJYJ`n~|GNz5(VDH`aM%E}d+tK)wC7-Obtcbwv<Xe$>+$T$CcdBi
zMMV)Z&gFjZ&?qDoMs6`C0XM(HYRzuckbQ<W>+7NWfC}6ALXSM=+!DQ$VYuaI2-Ngm
zk=}g3ox0;osLOeY^phFyB&c7e<{8{s;I5pLXs<$V|B>N4TU~l_j24^u=NOp(_(KPu
zK8<P*<MFbU23car?=+W1(6cR+GxU<^vo7B2`0zxSTrW!!-_>GaLm<ENp2dv)U*LkZ
zDN#M0&)@lD;OIwxTyi!8Eo9TsCEl7oT%bzgv!c;!PX_g}dPb}LIxuh8He8{<1E;q}
zK)0S9sqrtCdKJyX_Z6*}9^g%@I&GPKoH<+H@(ItCSD}Bz3dzp3{BC(`5f%=NLy30>
zcwfoFp2cn8nR|`4=R{)2mI!pX&zUo~KEwKy5{P-$LLdJz!Q519n)pvi@K9}|%A=2>
zH^hN*=o6e&+<*i0HA(X)O{UD9YyKaU&}6FztkC-jy4T(DhP@oT*%HNflic4S+lBiT
za$r`+BYat}#ysnSrSf*1_aUT8_rHGzA>o`$qInNPIltplQ#_{BxL|~g4RwEV1`{hf
zX+)|LlMfGqdkx&>MV<<O)Z@`0PY15tmuC)92{hMwio}1@5`5`V4R*X|TJ>6sr{y%5
zSB3-}P4&^PavR<^x&|q{+x%yeA;z`HviZ7W!2i0V&^1$;MR_uG%Kb~tw``!X#&=*{
zub4<$d%<+@b@cL#6q<arS^52q*e}4G4dcBk(X9^W=Z6Q7hSSkP+PO#Qx5$db?~uns
zA{C-Y_-v>(0=-;UfGFKZ2zhjy`ahd2^+^sA%-r8Yd4!m~AG!uQN>zzc^>w_wNQ>OF
z5V3O#>g2?csUX=l5*LP>u!U!>nf-x<@Ziz~T*m!};&NXYtWW_CY$Fx%_k~zVj;dVm
zM|N&agj8b(>|XK&x4T`Vim{uq(CP@BsvSTcZ_z~MK|!3!vJ?8nsglNF>u~wW0vs~h
zjJzBg4MFV+WE0PcPcGIVW#+C_^%H{woJka$UJV8ZtYC(Z275B558Z3?uw#Z6QJm<F
z=_k}_XOBP3>s^Uc{!7O?yG7vsK%2PDoCfcCzb#^c9HcEh2l;l^#N%uu^<U>ANFw=X
z-7ixp{n!o%r{#m)+TQ|I)?jUyc*nro3e2}Aa@JF0PVSu@v}LFUOB7lqstZ0qRbd|_
z&HDpKB3HxYXeHt|*^U@IEXCg6A!vT~wq)vbQ=)rZjfr9urK_s5IY;cSFfTn1vdf(y
zci(A=;~CB;y=DR80;E<QhCr{%5{F-bLZN#a&9$kbXUxXIY{h(#Z2t|4_LVRzRF%MT
z?#Wi40q6f0k6i-|A$RC-iRbV;5)Z8aVQ$-Ntep`Idlb1V<j*{zkay|~cN>x?`gbs*
zm(MS3C*zU&)sW4zqiL$;P<rYP-?Q^xda{U3_flgH3wx>Oh)IHFaSRqmeZ=!0dvMu>
zf$+#r#EL4f@eH3L3%$Ms_td`POppdFlaUjg_W8r`2O4B)TQr{8x)t1Brb5>_MW(z^
z4%Jm}z*L`hJUlcG2hS{luCPx+zeZKk<9roAk2ELA@1kJ5mnj){iO&G;ZNnq4jWKOh
z5A3*Q!)(*}-XX|}#rB*9uK{vwd!`mM7##ph`j5hdjJHtM_eJp3c+Gn&3&76K0B!8~
z&+CH~yXTq=gKw2_H~K_~*ByzKqlZG}wJ7Qmx)3~@(`ZV%I@muAfDzKW@MPi`bnxlJ
z0GT{+Uu!@f*B$`%noVGtmVwjv^ZxjuPRzA(qMF~+v0YUbW@N|`&nS^ldnE$u`Q7+P
zRX<$#p$R;1YqCB49`N6dQ0~Mv=DSfVD&NlUl$xek=_x{U?;Vi6?kvWR?}nJ`GHA6Z
z9JW3;C!=rk-O1k@^hKgFaqvtNMDv#C#EvFd9%zbIJ7dv)gdTfYeH(Ww#X#k%TXg5a
zqY#>N6gwaNhk-KMr0U{Ra61|dmu-VFujeJac`QQ$Cj5aj$7L9OlY{<Ot_phpsz5z&
z2F#xQ3oifUF39F?!QEJkq^`M+2mds}<GR%luqgnlYl>lljU1cNYRPPGDYAoh^^o5B
zRj}8Z3N=PX%&Nc}Z)g<Z@Zp^6(EJ^9Wvc{Tnf=f@M~ZTJrp&ic6VvmI=(svLQa4Ki
zUjyaIGGh<)d@~a~3z{W6hzf}*;T)R%P5kd>NG`rnBGJxw(BZ2Xf0-JRnn%L~7xw^(
z!?R^rkYr7;Gyt+SvW1;N$MC^J5m_<!1%~c_i8Ge-|3k72<V>|CwUQQGA=8cP6}_-=
z#Vt?^uqF7V3YW_%6E}-k`p0NJ=qxs2tr{KFX-Eq#uF+xQmL*`nMhvY1r9uYJKQHG`
zY?=R3uu|C`Yjqaly^cH_!gD~G9Sqa|J0{F891Sj$uTl-SKCpk`1J@HPaU}0;?Rurf
zK1Lcdv1GMiIQa+qCxl8}JEx;v^jBP=)dksst}slEv(6{4grEprR(tCev>%>}si(5g
ztH2)I5AYmJmb@^T`-a{ty2DPb4^ZYMkE!$-<c?03mU9QkTLpc#e7!CSY2$rp<FVji
zRE+n=hQi`ZGj=jH0^jqyka*$<Xx=Gi5rq~k1yQj290fmL`~~<H1RJ|~r|**{EV=g)
z%%|2%<zG#LkY9tq;#3cG>dZ&~n<bJR^UYcP=^vo@{1&J<ni2W#UaTGU5OlZsV9mcp
zf@{}6R9%$~?afxqsD`tJ68tdH{j1<R^B=nNPW*9CK5yxq2Wy_`5Z7@@VE4UMaN7Kr
zMwIP?RznN)rw8fnAKJunyA=1`mL*|3w2ApNZ<^>=Mui<Rta#yNXty)O1J?q$OS6lP
zAJKq2jaxyU-xC@Z)zhi!+$DdQVrk}Y@YHvtMKhm+wofMnc)o_!I|@O<xx~}&uEa$p
z$8e#%0kcaq0?}<v3G(+s%k>wSlKGT|D)avE<!d~P@=wygM2B24ON88~3iS5FM|igR
z7S;_9!-IXF;dvd;TAB6<PJNwpTBAPkRLd8#w{jkoqY8%1(?z>ICt$68FUHoJFdOcs
zp0{EninLQf_4pXf(+<HQR`P`KyqbTR72T#Z8UtsZKo7Oecu%haPG8a?GPAg2B(fDt
zYl~r^lQQvUhrrH|XTl2aIxQLc8W*dolUILrNRr<hh&pG=o@ewYqoaFaZfq#dPVK`#
zs=shffg(A6mAixE>T=elUdH*Wj-#&C4A?S10=!rdonX=ig$tsfBTtqj8-K?58y;{`
zKN+vv8Ii)_A@C`g_qjAhL_E?`_;iTRGzTc*zxPT+Wh}o-c)ZLRw?L1Seb|kW?<zq2
z#*uqpylF(X8gn|VfHsAJ@Z+}%8!*?B)fA>k<Ej)$&ZBFXo$y8w_1w??q^QDTjvlAe
zrsDsj=*+`v{JJoFn&(0D+-XqBlqq%IwaFZk5JHlKWS;vOPYFemib9f*3?)hGylYn~
z%9JD|Nl213NhPUoe}B8KE)C~>_g?FH?i=+w55toWkDxedI23T}BnGQ`8PjV7?Ant7
z)(?imgu9o}Cz9=rm}9N;$1?2oWHb5EmwCVPA22eI_3G4+%i4Y%h3(0FL5U%sJShZ(
zx1UOq9jY*C`$2AFku0q}*@K=35QRg3aY<7`B)?s4h@YPmj=ICT6?;B$cIm(QVAkmo
zRDHVdvLleIa4i5MH3^J(`VOkwzv87$Md+lt1Zrwm!U)@Kcxkp0&2Sh2ueWJ1hcU~3
zy+6k{7JcKk&2vZPA+5Na?ZU6j*$bWBx3EXCo?m?R25#;(ptXy%Y0qo{?Y6NYd$R9A
z*IP?!khB!N8Dns(jS)FjXhj596mv^h=V2~mX3Um1B9`SvP?~Cn?Qv3M$$>a57#hmw
zEj!74z-KvaPbFdzEg)eVm63m?!S1fwG~=@`@2!6jVsD$!r3&&ireHAHGJFv)JM186
zdK`oiMqjahE$jIFF5wQ1)1-DQJGp3IGy1IE4^m&6fo8Q0ov^P8d(?YacG-z*KB-KD
z+B5KFyA~PuTZ&eBU*sI_<Y8G<3rv{B=B!);)Hvwl&aK8oXW1V#?K7ZzU&_&;V~y$S
zTTk)z0y`4b+Qcn6s7w~x4<c>;bGSlBF^GLSFhg!9&u=#({=^khT{eTD?*dkyo&qk>
zr@07AQ%JgXUt*tn7m|iLvYzZNh=33n$L=|{D>R9hX*uWeFhyc?Z7k!eB?9`GVs_<Q
zDEX*K|9pIef6q!29Y=SRwPJtc*Az+kE`4&f&X&ZjVE*zAKhbw~I!tx3rL_vjaJ2Sj
zOq}-zvT~%Dt7{#~G=yVgESrrlk|xRePhj<(TvUJ230_5VsJOZscQ{^vm?#<QB)NlT
z6-Cexc?7?#Qzh>MAEJzd8nsVI!yPkZu&FHu{yTOK7qNb;!@&)3aH$maQ>w+WwHfFe
z_?(;TEJGEqf8kT}bNJ?STe0q$Dz2G68#fkR!%%hxa~scGPcjnz=@NZ9;$#}mWc>_-
z3T^t{)0{kFU9^vz^YPvz1JYgp2`wTG$*Fu<wpY{OZhD)O4KKQ}3L;Tw-z4UUJ_9|s
z!Z`m=Y%XZ*i>{2B7rvT#9DGuopE!NMhi-#Oov9)><2lPLlld4bH4*i8H{!<1!DKaa
zzNkj^V()rKY`#7p0uy`TXhc3vJYq$BeN%al704Ax$<q;hH(r*Qk^T@zOx+O7%alnG
z@%J;l&8~d(cYBH-_9~JH<tPyRT#o{eDNbek1XS}S<JOFfb9UIffQz45!k!Hyq2uTW
zSau^0EtavbKan|r<%V#A;3ScmDf5Gmm<v;kHRy!wU<eAjj(tA>3w4Va7l-7&TxCnV
zo7BMNz+6f1OlNEiP>0bshak>ZrT$hj)Xb(Bf2kRg6&9u>u<jx_{bvkM^1C2t-&G9x
z*$DFQAHnh%eUkLCQc`?Jo;u4}(;BbMcy8e@*!;tg2ps+Sw(Q~Di5r<Xs?3;lf3>1|
z+UKxs<ws8Xz5;#ib`1r)mUF}3+tO!84OrGy37!?(gW0!kXy|(m^$Uz3eL)^L+!BId
zW4J{A@o|iOITicL{jr+ubV~DfLx4>x?;<+{4745L$$>(2n0$`wV4pGJ@=)%Co-uK`
zJpnu>jl~wP131S?nRd9eqD`AMHL;wFc1pi_pBZCtr{`g8YLur*0&T`gZqN0dFpT%J
zU>^4reX72s1un$skeDnRnkst0>8WYcB)>_#r(ZIha=d`EeS@*C@(S;sD@*?>=V5N@
z6I5pW7^5<_qgj@Op^1y&&!TYLIai6K#)R{W$7dmFGACiV;aoI(j|pBr=EZ}%MTNmE
z^YJW@E4Q1@d*9K3f*{7EDISGW!xLe{tzhO5HRD{;X7l%{3GqF5m0vwdid0Tg#zR95
zXifZdxcxzvY*U|vO`0s1F*Y2#KA#8W_yX4BO?JX%AJA>WbCix=jh?qPu!dzKKWm(Y
zbcJtlKhvBD3ZCbRb*FRLm+nCArxwU-(WA4RB+&BY5p;IR!-k1x(Vpc@y-(}H{J<2@
z%!}etG6N;cc42v_6nA%P0q!#Sgo>p};Aim@68xmF@@)jj9aAT^MSsw-Ef|wOvv)Aq
z(z#0NB<k!)ZdlVUXgnDYH_kS|ueIj%kgo-E%D167I0TwZ|G+ahT^y@o3U$r9d4rS;
zzG%s3jLT;8bhp=##JDFB(#z18^_OOdo}ugb9)9P#WW4jpoIFdlglY-nqudjM!I%*c
z^HZPx|Ldj}?!kb=@A2JaTQc`R1{%Aw&;9<tqGxjl!=rhp;drDR$=H7gnvOok>ZtSB
z5z!C5?uX!6^LFgssZPX(izR!K?t%4l9X!IG-*b|$<6l=jnqce#k8iR}Pk}zBJo^A!
zzAfagnaa@Gs3urxYeNm(^3Xb96-2935S{)EV>343<6HWqcvvjloG_S1Zu@~{=gMGy
zF3Vn+jN}v2lfb>_CR{n@f$>u;r~~V-SG9iTX67oA?xY;lG!Ni=Z+F3th6dDBmg07A
zU^$dqcKpUcD&)!faL5sdV#iq@%vpF6Z!eC=h7e0CUK+{GI<W|!nEV2RE&Jfy-(FC>
zauTAd$MRkOu7EghzsN41*$gvmi0qZG*tNeD_I}qV!qL+p>GE^TSj2Y0_QBwC`IyB1
z&`G?aJfE>~3xTk1nuAXZCT@HUeJSto)IBS5U~eaGzi}NkUuW`#>Kn1s&=8ldjD`5R
z%iNdb3@iw}&$}dk<!gisplAJm5SpRL9Lb|GiE+S%QmLrF_da%+G{aA4J9>G!Je{>)
zlNQ*?VL)6R_xn&5elIbl4lg!<;>15(&cHR?;C2mC$AsgykZ4R#yaquLR<J9n1j;WJ
zbGhBt#KV3ZtY4-=(^@o1m*-#D8fikNuk^wH@E?k;E<;OIBM63R@PioRIDcO<`aTLr
zmrHjf-`g!oR|}7A&qwikkDtLlxn3;jEtlZy+t@jJ4=j5sON;F-AZ*EEbkDJ*{@OcW
z{RlfcdglNN2A7N6cOHSZ13|pt%L7g@W0t7!xgQ2hSqhq;zjH$`7!iBFd?5dnN!caJ
zT)xb+n_&bWD?{*xTr-?WtcKy=^YBdPPi!Z0)UF_z3oQ5v;!UqP^%c3`JM{%OmS<eo
zPsg~i_dOtE*b2Vt^H2bpcev|X2bK&sAT_puAnuFI71W=WjEH*!S4LaYojG#U@m)Ja
zc0@w=j$0UYjotfa?#0pO4<TcuKh|e&h0e!2a6a>=7fz{w)NMP^$z>WC&bxyfYbtO^
z_jzo7pNa=ZGmeJVFUEplo{2lG6aDriC$thkbxIWsuCyWFx9KtF&tLv$vN_%S(2TzE
zehu4Qq)BIo16&W+CwbW?;Bww&ynKS~Oa2VzS~k=(Ut}FO(_ctx>P~>KPO~Vfr$gdB
zT8J*Fj-q<zmL2L1?fG}H-&z80(ZitA;ylE7GjHJB4BYp9E2JbxWB;0P&<~L&+15XB
z(rittEb|JVZd9g;QE`xc`aMMb2>|IyCCqXD7OeF2i0fw|eN#6Nr(enh(b4ZH=!lR+
z6zNji_9wXTfih_}m!pb-3J|iD@pX^>1%G`4{yuS7arQf7(A~^Uxf}}{!luCu!5=7#
z_zS@cZ=&lP9qiQF0KYp`h|$${@DMsd@x~@}*Um>}zjQ30U_f);+~YM>r7-%=85pv`
zf=pInf4d`c!1(iEl2THE3*KsxY%&*JV?>~w{~jKxT*A=FZ_vz?Io+;jiVC#mbDmEh
zU_r(det4#kxCb4_KBH;uJSd9}*2h6K@fWykU&Qu?4|Bix8_^pFAE4Np_2-YZbK;BJ
zC1VeE;?D3gOxMkZY-x(VzsK-nURhHu^>M7fbCutms!y__*`6+X4Ng`Wf(l_qBxuHF
zsCjKgYm^7!<`a*x{~=@iMLh)BN5*I)_YZ&g-@)_^6`-m)3KEuVLV=4Y*G0ulVpGlj
zFCji6kwO+c47i5%BTixPcWn|kNI+a!XSgT*JX*ffra3n+L4d{{Hm~dBvZ(^~y7z_O
z){kH^?Gx5YUt=8P#aMo~f-jTQU~}dPh_|@J2gc}9NCX(xyB0Utf5U`KGurvt84Of{
zVfw9BSmtO%2JO@&-XSg^SUy9d*igb1{xGE(_9k?jlm;EjypnAWZ?T7UqOPRR2hHvI
z{N?F~aNJZ4T6#tYY6g<PW5Hk$=P$uhPko&At{BWRJ+Njy0r%0)C<-`>?u8vtwQ4qu
zlhPyGcK!h)|1R(~6+)}oSycSG4+3HX8CR(uM*h&G(=G}~3hP@<za>Mrt4WcmLl(e+
zqdgF@rxT?!9Wa0|;yRy*!SvH$;^)wSw(ppKxo<d*^*D>e7VU;+ok&m;8xhT;Yj__k
z<}C;i&~Zy-Nu;9^{r&p_R>iUX)r~wZ^{_U`O!<K;+jVKEyfz&j{1-<59DoTUj$=uO
zKH2s{j(Yt$&Tp)hr8A4zS+(;T9<geNobT0qrcMt=RZrosZ!N;}pQ<rsa3e&GTLXei
z6_TVC2l->}J-EjIHZ<PT!<DlYNqNQuuBRo3i@H3KUsN9nYqzP<+n>#-d)#FZdH#Wa
z=e0<Vttkz9!FtqTjMpPS1uW-2g`yN|Vjn3(4IVo~US$Sz9jWv8@8_Tj+bsrt7toen
zjS#ojhDd^gF>{0(?Ml7ReC<IJE1x2~F}xHyE}mvQiv=jRLV=VX*$0wp3;gqE5OJB=
zFWEiPjCgCQ!&{a+nED_B4%#TQvwA*fV!a#k>R!UA6_MyVavXZMoq@U)%ptI&4qezh
zu`4hJ1{*Vu6Et(;r&0XzTqELmPl^g}8qsn!doJZnC(K(@jN*=dw0wDo`E9G9g_gpK
zX*zVP;5RCk1CH3lvcT%vcz&8XDY&Z(9}RWLaSL|#%b5d;ZsTEux(((1<ViumasJL5
zmWg598S#u`BCj?zsycohoR($o?`^tZK1-dsu@%tlC(oE>(HPxS06n8v-{Pt#hCTkn
ziSJ}f-YowLmApCB+!=xf`?s;(=T-j3L?OL9)R-DY9)o`eWk^#{H=CIzK;`**bog1!
zpDbbX{3uy6#fRmnt!yPb)J8(soe|6%;14GvwCR$hM>xb?jzmp;jMiK%JnLazWTRTn
zC36ZYd^IGgZ?C~Xq6raiYRi4UT$A91qo^srk9#mW2{l#6VeletqBG40>?00=Ap5Sf
zLFXaP@l+1T_r*bb?h#ZrIEinn-a+Q?9xP7o#4Ynu`GQOn$+1=MAin4Vj2UJ{TBj@1
z^D9y?$9^H`AJHWNR!Q8TIVNOko+C8M`9VcbHN>yiqhBAsM%iWG(7vG<&V1`YYZqyn
z;V;Da`V8K{Jq8~ntFaCm>k60fnEw3_2s8`EPu}$gQg<$bd77p)r|~Cu9E)*XwhU3s
zD&ThocQOX;XefUc%a_LLgOGZ|Qj6>G-pYpPxVS=dj6LpfJp*p4-OO$K7KbcN!^x%j
zuz!susk@jfa`Fk~>jQ<bbG(p*mno22pN#OnzXcJyZ^k^`*SK%2G3KXhlE*8}iPwhX
zoY%#n=<#hjFU<ZcN&NB=99Buez#^9OEN~M=?#;whYb(roxB-tH>%q)M2fQL4f~95z
zQnfzfH$M})&fbRl%Z>wiooq}rN`tntS=^_^dZcQBfEK%cz#LtDn%(dKN5u4?!;Kew
z)T+b$LG~<v+FFV|9W7kWx_mBgqBA}osY3iGnqtO&7v49fMKs~uc?=0|g;9)8E1394
zBAdhZ<$bYm+EhT(?%L2b0cO;9M<sfWQNS(xtGFJ?F|c51E_-`bDwuLO_qfS7JX@?!
zrn4N2?)ofTvnCh1*A6DxqonAFriU<g*>=`{Q4m!gzlYna5>c%Bg4a^8qNy^@psSIG
zE+^jO9Yf~GI=hW=Sr2owKEGi7`w3XRrXF0chw>V~Sg*D(8F~x8f@Nz5P;J&_5AlY;
zC3#S&b_YIxeFV!;NanFw+WmIMli9z4-~4zm31F=7l^331O}!@FHm4QzH}L2?Yzeoh
z^ERX}F9++pFL6n?0{QH~a@o6f;yIIhX!Me~nz|oC?Q><CmK}|I*=*=Ri!zn+Dq-(H
zIXco#p7gF_o}B1POq;|!Chs4B<{?8`lO#{iOil%d;wJQ;q(T!c4Co@S9PoQsiMmbJ
z#4@oIHZZnAQks}^-1i#f|JvirK`p4Px*0omw?GTa5ruatk&FPwFNh9;q#2A1c7XEX
z%T$SERwyKDH({9v%U1lKWBR0*8y)=|#tImdVb=|(-JBYEZqkHq!QI$8hjmidTT_D%
zzj4@PJq&z9L2U5exv8iT%=VoDhtVr|$N9EYCQhDCuFqi%tdYFA8q1W*e}UFvCUj5k
zZ*czj0ArKB<4r?j8dRRftIAAb&+C9(O(QpSpQlHkzdeVMf4gAG4o&7{_~v|Ki5~H@
z8H{@u2&v$#vU5gj1-Cbzv3p+*11-4_oTcxAUEbwTH%-9Z^h?IMLV0rctvR(>{}O7C
z{lOxk9w~Gx$J9|e7~)oqlO3YL<?9nJ`%NEAxmt_|S$Fo+P*dU=rUaLUSrXxY(qLdA
zM@zow(KUS~ECVEk*Uo5?#Vh2=C09c#;XgtAJQ=!c`xe}EF(3Zk$iVOII&|bpU9w~J
zB8c3qL>;@o!Isyy^x}SV@}p9QEIDgK#A_}(`{ZY#`9d4ow)8Ql$>>EbMvvk09&2*w
zpDk5?x(rQkOK`~M8~Am&9I?#2i09eY5+6Fnhl3jZHSsa(O}+vySF9yQCOWiKbOked
zxANMlmSop~PHcZJOE&DTMT1M%c<)ux^t7}J>9^>F^VMm1{|57<=sbeaBYwgfjW1xg
zdw{E-xDj$nhJyFOS>Ss%oh!K2$6U54k~tl@m{?hkf|~dop=~pF>0S|PZl1$s4}A&&
z3k!MiGLBDWeLkg~YV0{>h$hcjXS+&|hWjX!$jcW%OYIS~^mwBC^-}PP*$>s92GHhn
z0_G~5#K7GvF-q4StIm$V-V?cKR-#0WR=fferC2E2tWUr4Ds+cJ7^bJk<Mcl)qtaXm
zQ@K9OkqYDE&%|=Ov|nT20vXJ9b;27b|6<6MS15j}PG|2MMC+>MXxB$sYS&-O3%d{U
z!@H!(F0){iHdCi|g!KT`POx_d#e#{4A#YR<%FPeOs&B2_lU-FHm?4p5%w5iz{22js
zHRFBLA*f}ePF?;c!ll;J7*zD0U)0tIhsNJUNslx=n4?X0BwAoacrW;?tpYWfU+8hx
zi_cKl#wVqeNIDo><la+ha!_7{ER#2+CdzM7KPMEOcB@g@wQ*3(G8DHY&1jbp%jR)$
zBG-9~usCiG2xd>rO)`{*q9HM;7~zB=(NQ?;S2FhRM(A1H%tuc72YcuUEI1h^%3|-*
ztn-HGkgiP|$_}FYfm@(h!7|-ealBXOEd=ITNpec(>I{2$=WW60BBe=lhIVnt_M!8m
zzMxjsWylFL#$RR%wBYwmQA|F?$w@_68{UWttlKR$ph{E=M?=!_0}%D`16Ou`fbAed
zq2+55M&;Py4wFz2j;-gO$(e)06dSm<Nta@@6#bUQX6VeRI_Sq>+BZpw7RK@r$mS60
zwQW$mJqP#H{e#`VG>Bq%H1}h-0hzUDH=fSdCG894X_Wl|&|C2y`~p1j{8dZRFYkd$
z%tw8fc?<UpufT%l6izYLgl^4ZT%PbW-oqsbQ!CzZG7dbt&;yWPZ9tqZ$TP0IyJXRq
z_aGfF0rA)dXXA;A^meWsoli5Em!=Lrt<)!NuG##KAWe$X<k8Gpn`$pRj)P;)q1SjV
zbp2Pvd1q)+uY=mObdfG5v#g(gwmen2BSq^|<Y|V%Cmbhc9kGqq`8iT-=5_rZdOo~_
zrw^-<OHN0zw|g}nE|nwcr>EiNrvh^4dp%<dm%`#MLvn1FH?F*{M?S@ClLKQJe_Hlj
z?mu-^+7uB7Ihu>%N<}buO_Qd9Pf9`PbD9%>lE=FK4AGz(37#|9iftW3z*Q-M*CC2<
zzh8scA9%tT$0^t#{*Cg#zQJEbZN`Gz;@mbvgY#0er9-@BiH}I0uK(DI;YI=y?Y|#y
zJa`IkMVIkngf;O}SEt1$%wb|vl<TiK7Q!F19e4O{^lB>RwtP;(C*GMLXjFqjyQ}zc
zh>)zavnEf(eXy<P5G33QLH{ZXS~ut<=Q-v)>vFqr<-9I!(_RJ7vc6)?(iGrs8k3yM
z>mkYZDW6ie7M)H_hsZyT=rppMH>nC_9i>dp-L?;0<{XA|`+}i>WhAr<BC&6%fa+~Q
z$iA6{VHsKcgt2yL?XQh$oh)bk<}u%S*$da~$pP0JU-?`1PM9wB2M(&gU@p!f5HLX>
zQdX2eYSn+(^`RcNDZGX4TGue5C=!E?Zv+>%_&&Gs3OtJb1qzBnDpz&}qsyOwCVs-4
z)3>>I%VV)}jT{`8HYc9bH-r6Sc2-~f7?-YZ#q5@Jh*BGl(>fRj_*Mir`imLWR1-pK
z&;;1yBujTpVa#VcbMocY0~~*hb>t(jf!(N3PBZT{da=7%*o*7nq-cfZ<rcKyM=9^M
zG>-EaR*1<vbHPi!pIbeRb?;uh<n~hbJ24&a{IKmL7WmssEM_Uu_tpmF)fy#YIEQ83
zROazwG3BHcDKy+Yfj1+KNI?5uC}d~!o^dQE<KT_4Jw9v>AWdhyu_c^W4VJ91B-19S
z5SL+>IQ4;29F<s%;5HsbIDo~|SHp3}_|7-5CYk=(;21H0uO%vE(UB%-5?z6V#|&v(
zXddUkay*E=RcOoM+gO*ph4qyc`6QP<$(ac)P_?j?>z&*R8Mnf5@txBsn3;-}Q76EQ
zx#%3W&EWsM)Fki4x+Egb5@O#c!encf(P=ry7hJ6pWdv-3a<&JZd%%z^&ygod!#YKE
z9_Kmd!-Hu-l^V+48^nBe!$k2Kceu&(vLGzj0A-iULt}h4cmIPL@mw+!8seSMH_VS~
zOK;{K?p34H!Wl4V&melm<tGZZnmY^pt~!fXD)8c8w<Y7)d3VRwd@NlS$MMr|prF`Q
z^7+dNaFI&n-m=WXiUW)(x$z~8DKw#WA2a#hk$n(1U_q?A)TvL&ABY$f1X_JcbSdll
zdzni3%m}tiO8JJ?#cIs`RfNUiQs`hiiJLH2K<_B#VAPyXoN&!Nn1CmsY*`b?pFD*T
z#sbLDyUEp*P6hd)Z6G=K6zj?=P}!#eetQ>TLEvnWA!Bj4td@o_!=GHScLEIGqeJWE
zWvOK1cj%pb2o|#p%{g%vj2&x&e({&^=2LYN$6PbM!`nou1M|?au?;)<9q_k<c}v;}
ziZ^yTySLxN^`mmpjb){O-!>)3-5Fc<VIvoBbe#(+W%`xHOF(CQDjX~b!7={2<RbGy
zK6#`~UDzJ#L+CvWSQ5%t&kaR~j1Qc!ZXQ4LoenwYI0f4b?(;XV-@u5vYOGM$gu>0<
z+%x8U+m-ST7Cp*m95J?=xi}j?r!X$jflAJ1tPRE8hA2>dhCXZ;wDP(R4GUZX>Q@sW
zqwFc~Z?*y#vYbkS<TO^+4Wj2|KBDD_Doj(dAOV9}S4KJ<#Mu@SS?>jqwR$GHGZu-`
zEB0L8ki%b*VwnubCiL{kMN!l)Ok@77bGJT#k!b_m0~s>z!XP@`;Sif|<$=d0Io$fd
zgj{rfhyxzl<PE!{3wv9!@zH$n+Vq}_iDP`h!<DefI|W_hFXIj+3yjj70+IvfRI}?R
zYE0Cn{$bN_!pt+E;ATynfc5Jqxnh}a6?{rCrUge2K;iQ{Y)|$aFFr9M0&f-PTNbvE
zu^Bnl1S4p0y$3>P6Fw(47oR1@;gg4#@kf6t79CY2j)m8uVACT^n(78YJEQQfya7oK
z2!LDOmTdoRLMn9FE_zfP7k_phIQ|WTLhp7s_(YK?9I+rd7nP_|>NA|Ia}Ucs$HR5;
zIh1WS0__tfWbEru_^{B9SU=v15yu5|eeW}zZq$nDx7qo)u0!<kpETKfM3YQUZ$mTR
zZ{XTB3KqPuAcof-;fP7gajJn0jl3F$2^Gts*drQDW~yNE)Ppd`IS$v=zQ%!v@+9cz
zD{iE8Fiv@K7)yL*$kgmA%o5fy&UmN9<*ggP#d<TSCtiRhYz`h2Ucq7QEXdR@XAXCk
zTWzvN7t?c)YGp=+fxDsXS}}y0>;$8~KN;V7E-$>H0Y*0mz{Nh6-(^}2vy0iiax%cK
z4}IX#Fcrs!nBoUE7Z&bW#rMD|)bw}6QC~D^rq4Yn=I-L2`xy|(v7F3<qaYSd7Ku|7
z>7(M4Aaq#C%Z(XK_DQXVV_S|xP|g8(wcdvOWB;Dv7X%|%9=_5~7PhR+<9dg_V0r3h
z^f0gFyq0=kqRm}M{qUSG_$87=>OBO#s1n#T*O&_LUlav3+0ZbR#o&<3^TM^OVC`HB
zdh<Uk+Q!b`&)A-vVT-Vzu>0((LA1~AHcp-%k831@smf?G;<iE)_i41^n5C*@u|Xqx
z{&9w@BlSqshEb4XmIiUsR^+9>Es<V#4!#W4ByL;up+{VR@pCV52C~Qaq&HJwr?Vx|
z@VJHvY3kUXmX7i4`w6@F9|i>~!M-qcoK?kKE|<=VGJbx?(M!HUi{%UEVLJh-U2{;h
zBnBGTInE_vzDV}=czDG!6#eZju=EPc<GeDZm-~*Q#&rYY;W`o9uB$V~xFVYEK)AGb
z5IMAzJ$t2(L2RWqD6}flZUt@X^C<*0OUH4(u$2!WulQRYtUJH=JoX!ALUghz4c)JT
zj9SbI+?Jx#=K}6T_+U~I9D|APD2$w9$N0?y+^97=G^w+n;|IwQ<yF04=Cc3|98x)$
zG6{VrjK=A|N|<Zw9<Dvsg!bHc)T-`>67LvPb7g$UX)07KG~$vjGP8?g5pJ8DjYes{
zSl1%tN1e+;%ZU}RGFzUE4Wf)!_X@AuT)={-lO!(B|A?NClOdrEFHm|+KE%&AK#yPr
z%H2PKQm@&3Q1Ak?-Y<eBiYIXX-9E5#e1SWMX_1+Kb!ozlV`xy(#C3evpgZ1|<K@-n
zB=y5JZb^rLSe<!_KBAjYaPv36<Ln1aYTCzbS!jU9EbG}acM~q_R3|}=-CV)fHJtPX
zmIY+L&t7#!`btxYDo5Re!JtOOCTk?69g2(r*U7upBFlUyfkDzMu3RGqdItVr=sO=M
zIQN>Le<cr`UsU3XRchp{!&M9$nZUiaG$M~YzT&a{9jIP(0;d^2LOYX>-2DVSYCZcl
zEHP80^CAZmkH{7-@=q%Y2M_RJal5fIt_?-&D!}hq4pf=F!JObMUa&Tfn-wd8gVm;_
z`S(nC<lX@_xmVy7>wXCHC-OTQWq=&$M?vG9-13}m&ZTlApPUi~IRX9LF_&=0niwVV
z4MWaksU~cw&clp{Le9%}4NO(hp^>r_4353!Yv1=kkbyIN-y%b6Ht5hU#>f|!rt_P&
z-9+8nCsD%gd}r!e&wiaqB2K)Q+dsV?92QLCH5)SUzk&0}F^;q627ow)o2WN&Blb*5
z<9#8J=cWmX(T=m|v_=P=l*C+N<q=RwRwp%Swp9FL4?apb1P*66@T(cq>+xfjHTG@c
z|Ihz1`6!EpWxruNRia@Py0GUG+XFg07tP*w5)Zt40QY2!$kC-KXwP_a0b4XtxSBl&
zZyLi4TYciAzZdVuD-iJniF47fci7Cj$e|(w*uduVf&Hl{(d>mWpO0g`_CXAIc9gxG
z?_;O=c8HR{&Dnhj;~ly-@!rxBjIY_odTc$s$MIS|dRG&egxy8Aa|s~uI?bDS=A-NU
zBrw#MCnc*`r+~SHuG|&S#D}a?{WTb~h$o0TZsO_3%ps9c!R6SS(nHc_<YbtDjw#k4
z-^-=R%QD9H-mgl}e7D5~hfkn7b7Ymz7z&L==XgP!Qttb02IMw-_x21y?t6bZep0xB
z4hy7sNrgW~G_1pYTV_JGy)|Z(TG5KbdEou941E>7FmJy$tucwkH~RarRkak)kG7zS
z!|QN@l@jyMsAB2YYn+A9n53Tmjjx*7ZY%g6^lp=(Z*KmA+A-{%?)e0h7WqKk(L>y`
zmgkr`jm>kLUU51LhQL<lUwSiMNCSF8dATkvB9l9a&L7TxCbynQLL}?4Y5rZXU7d#e
z2JAs`+%ZTw*8#3Cm%_`5?3s1x7+26ZO;XoYh5KSmsqsl|y5USRzLYa0kaPo&BwT_I
zu}xU-J{$$hbaVYKuvtdxWmqN%$2t0PBw*rAC=I&9IV#+QsMx6>W()?~S!!hEMH3=C
z)_~I(ucs-#9u^w1{_d|V?r_#G+_!HZS~~ZG*4RU+z3DB+@In$hlifGpod99dNp7%h
z7#>PBCTD-;WA|z;`bA!!dbioJ?1VMVS$CQX&|d;V;)7d+HeAGPfB2ECO0t(c2d6!;
z@OvBMp$EO=j}II{oHPdu{<?#}Z?CAcHWGD}ng22_l26-XMuH_KWZ$bOw5}P3SH8;7
z((OJluu_LSD|3Yyw*M+z(g1~wYf{%ep9_j-dEHGOsIc3X(owo(`jQmz?a1dH1atVz
zo$h$~`BD5yaxweRO$Z%`g5GN80a!TzpT-s9*z@IR#@^o^Ltf#JXG-*YMKngW>!8Dk
z>6}TgImob?*~*M!48dFwH>{ORsA>VPEIsNi3<T(&2BSl|Fw}e`9QtZQ8keX;=_5;U
z))KR<^lJ#TxC>Wc3JQiP=lc4`!{Lp282Bj)Jj)E|mo6=8R}#$|=+tt%npElQWrpMs
z^RylKcoIUV??I1II{d}Wa#Zpv50-8ZhoGelL1h|;55CKi`}(X~p|6BJb2eg3usYE?
z%l17YIo#qli7Q{ia?$JSCAZDBNbQ`HEFUF+-?al+cD5Xp|673(rL%G5Bmogv&4CrK
z^{D>p1T@KAferDOAoEoXPW^crK5e!pWtMGF6W{@^>KFLVuQO1migD*Ax<F0jI55<Z
zCsl7L|4-G3Zu#{e2W&Uun9%|{7eaB(+y}5QT9(8-v>=TnjbHv%o@8bz(t#Xn+I(v|
z_@B9m64O^OwJZRuf49PgeJ?O*2IIQ+w!qkUz$0;IA-_z6bz;}Rtbs=mqW2b7K2{;I
z2~u<*+mK%QD1{dPWnsiPC9rbMK)+#0u+hMTG=I^-+r)wt9O#n-xQO8?J3sdnxP#4Z
z=5p9|8#EWKVta-}JkiAVg!zxS+Joouc|t7u7FX~BJ1W}uuM-v5r^BDqKQS@B6*QwX
zxPa*qTv_BpXdf$0lulU^gWr18_ge@|3pApF2o;g<he23+#E{OrTY}TpX%gM@21H!(
z-Z^)%EODM}MO<oM^4flE4khStghdD7)<<m`7GR9hhg&ger$kcf7>AX6RM3j;G6b`-
za@&vF(uzYvFzmqxE_C-otQEJzbWfK58a0x?F#ZIlyX?S|?zb=~Y6W<uwDbEUhu}rJ
z4QZaW3U>AjNx<Gud}wnT;*2DWmQ4jGlPbQh@|?&bUz+?jvmzP%ZtjMl4P+)sk-B^D
zaqTrDl7uHjzJ;osO=C2k=wS2IRXX&fq5&}~>OiOeE~8+`VOU~rN1Rd^1L5I&te<8`
zhdi_;HtU~3$h&ALT>KH*+<G{1?`Zy~sTC1l%i_hXKfbKTkb335=cT>boZ9<5p4s^U
z3OBunT^pD~Zea*a=&yx9pBotWkU4aI?&RmpkRpGt>(U#KGC^9f2xkTAQ1Q&!oFx7M
z{NJhUHEk1P6u0x0F)Z(&&%W2LOFZYQMV`HCL~kQ~)&<dmwqd)tsY>SHvPsCh+%x1V
zjJ`rZ#~JQRT^^R7KFvMVV)uFLJx~zYBNFc+knI@?`V)ktUlfchKH8F|GILsXLW5Sf
z{sx8j@<brF9^NTh)4@NjiSXcMs9u)<yxdv5H0Tt%>9Vi+(iv<-nb6*4Lrf9?nip%M
zA)9#*K4wE^^}L0ioYk<;MHarZ{o&riV_0b4$C&6h0S-<E_12ppwv5RYDQCg+V<~vP
z-+;)lS$4;~1gH&3fV;yLN!|QlZlJphTQ289ijOMQj#4LYE*F4{MG}ABLx~(vy$T8y
z57>O;G?zNE8!u`eMz5CV-29^BXv3Ta;vp5Btfv9>8qvnJ`4w?upLfmyL8i3v=~u3~
zQXbxQDG;-@iy2b_oxjy-kRTr&bXWKR`^OoOs{BxXxxkh@SIxuzaX)Ya<NdZ?VP}x}
z?4CUF3{2j4m$^0Mc)t|Z<v6?%)Ve!xlWq_6XI#aG$2Z`esvT+DeTd7joP*K#-=jrM
zJ*sPbgyt2>6s4HE-awkxC~8rUku38YlZr0J=Xq9I0Hdv4ptePs93gMPhcMURga}C8
zcN3f@g@9yz2W&iGNrnv=V3WWbYfp~DSL|6_CN%~#oEAay+8UUy>WNnB0%EsHh{bIL
zT+|Y<z<h#enx!Ev7AwK#jck7K!IraGYQ%WGhTuD07RCO~oDIH*r{;smoiBHAMzt~h
znU{&uyS`x6+XfWe%yAY}jdR+fc!s~V)C<c_6@t?#8JL*D+}(CTIJNr(oU3)mq=&_v
z@V+$W{k38YVjIToVdu%&$5~!xJvWtck`{q3UHw^#sNZ(Oy3~_UzO@kRxK|R(<Op<g
z7=c@YhJ*GVY4W&9n)uCT-dX=POp2C<sT(F^+m-R0Ahw&cyDg79edDmWGaTYug)}2}
z5!MBGab}ax<6&Pt+UY)p<svdr*tQ-9#%NNlIlnP}-dK25qei_v16VGN<rUUH#%8VE
zkls{-Y1hqYrolw?%UX*9`9Y$fFaP+&hIAa}uSTzgX~N-*lW1961I-S;s9t^u0{ZT9
z7ORfnnw`(_R<9zuG%S`}8rzR5>$QmY`|BV$tdVQSCGpjJ%3$G_A~Z=|%zC&3;F@cM
zoc2lleMFnK8CLOQe;&ucoUPay;R{=?=<;W^TJg|w9b&gy4GKmU@}ZX$Xg1^Q4F9M^
zJm*>9FHV~dd+rEdiWN!M=*zITvmA9;e``^7D#$A|K*DZMD0dfw-{}l2Tk;-tassh`
zOaW}&Zcc0IH_(|r6c+wxLnC@(Q2fSQq|aDHDP|2I@K!?Q{GULZq)21<5PTzZ4=RnW
z!sp@#u<1`H?3me!4qfa%)3gLMXXNvum<#aI$CfBR-GI%}L1_PCGw$o01&y=Ug41Dn
z>hePks>2Im<t}NO`qc*>XXW6mij^#9Qv-&d42aE^yRi4^A#_@Q3)E##LDxE#8UOMF
zOJi7<YO53x++8Co(DCQRDpGVadu~j>5QSL>{jfTpU~%JWEOX6dXa73PIPrvAb2$>?
zXUFmm{g&AH<uV^8r-4^4y2IPY?I`<@xo#g<LBz6g;8}bYvjuNKFNE=savfp&CL6N&
zr!A3R{RA9W{D%1^<&Zl3BDx3?V0!#(JT|%y;<rZd(M~cnzW*L)o)e5+N&+grtHfJY
zSHkRT#$;T9fSfK@BK%Za+Os8u`yQ!9_epHQe6lU|idhc}A6>@DSGqyC(-3s@w=*x7
zBPTd;S|T1A0Es*A!Q#_L@%ei*5<N<m{!mvS`gWn%n`wi>U=O~|k{6+>CA@EBoFZ>U
zS`cP0TFUsC@kybaFkvH?&-|2A_r1pcj8Ztf{VG0Y+)|_Oz<!@y*dIR_TLhm#{Hm6N
zH6tM>R~NU8Ukgc=Yj~mN3;tYpIo9x&tXC~Xy(Yfp1x-cHM@p_ikIHon3Ae+_1S7oh
z>lfrz<bWI54tqYc=U>A|sHG{;@HiTl*jdmm2Q)ZwS)NEa{t=Xge1m&sSMb3IW%9{L
zib_9Z=kTC?PO);EF!j?(DERi4Ph)vUAKMaG>#j|I%{3+BDKj`hnukdLpBf36E)AQe
zwSwFjwyzEQ3bosmsce-k+oOAdiJA-z{9Xhe-f3LAxis}KZRPHiC!&*nIj<~Pj3LT>
z@F?mEgk-b)`s+OYptT;INp#r`<)zbNO(pUt{4XXY8gsYe4nvYrnkYeTBz#O^Ii}!e
z@J`75z_Q6u&z}Ri6(zXmXCGFs-GkEG*>fy(BQEaBLw$Dc?+KQnQ7>XRuVI6+PT{`D
zGcOo-<eT8=(i?2QR0%1j%)O9x6u*NK*^(BFeH%t#eO@#i+uP0jtO~TlIvZw8(WbIL
zm9XY+IAhy6f@Aj)e0SQK3^FmMZ6kd7rW?(u#Ip2ygNxvT>L8kYWEgHv4#l?VCKzC~
z0*wv~!>ujGRPbFQS+Tkfr5BgOr#>z6o@I_YCmO)Ed^4JzZ~+Z_rRjmw%q{%3PvWmN
z9$j`H6ooBF=NkJNmvE#rl<4b|N%t8e^ZhS4m19IbyV<PqULN>_{{n&hF1~cF46W;&
z4`-e{1))FzGPmb|&Wo=&IVTCrRPsT%){HT_=fa-r3RKYN$KSgD2{YI%sY->h<7DKi
zt7-#xd9;wMnKB#ia~IH~vYos7+kyn1zlM_XFIcXS1p9(b(9X{vq{m%>^#3lvO|1v)
z=a>m~Uxf6pr4$J>&f!v2enUgxRV+QO#k_0NSdLDP7SJxf;DHu(Q8mSNM{Q`*#RR@!
zS`V+eX)LGsNCAu)dn;5u6yIIaA-?8re1?({I#vAR-}Ete=Zglgz8D6*nYy(6xIaJD
zhGiN$pX8LAtMJrOh2&)&2DhGRFg9I*`u$FXpqhBl@_Y`xZM~?JUWQ#0D)9Of4H7WC
zftMMShmwa~ur9k33${%a>4$to+h`-2)wl)h4SKL^*m|^^=mWQv%h9~O4twQaF~)SR
zDBOW@MQ0B~)%aKVsI&q!FL!YneV1U(e@)<H-i+HUQb9PRhzoRVz<JBq?l1W^H#w;g
z2V~UAo`R<=?<FmXR~X>l8fp_}mcSa}Uc3@H0qjCeuxCsHj*#I&@O69cjwNRyWA!k2
z`C~Bg)o}%vaBoS_G*gysGK0$LLQFCk&v_VY)8lrQG^3R9NZOm>*6CAlVU8u);yWC7
zkx_WmITgEn{=+)vP)dGr2i51Dhp}%jfa@_!#)$BQx<TJKwFC0x7h@X;T7QW2?pooj
z?mrMl%9#^%8H#5&<qEqjz@;XW-(%MU%Ogrq;50wCQ82)XKVQoIZmU6z+3uiag(($>
zOG~_j^Ki-dGdL0%&+usiTwZBLu8j`GZ|u%9S6PD$-Kj?mJk6ldK_6}mD}lgq=V9-0
zA>Ags6yM2&;9ZtETx}*p+$#Ry$1-)YY`7{Zm~t3HV()<Ket%f1XN6{;B(SvPF7Cf-
zMxC;mN1!JG)xQ@*?(lE8e@!NC-u(opUrT4*!6*1SRf_h8zk?@dS@-Qg4lKOLa#0ht
zaWh)e6$@2qv1SpjoTX3t>NtoJ+yJlmGyH)DmQz1DoPGAx`6$JOT=IX6X}@X!`r6X4
ze)u_1o?8URsS3$)lEtzMb>Ji|O~)Qwh%RT%;lfm9vNK$P2nHX??H!X17LBH~`PwBo
zcl{3d4v!X9w`75r&I7E?JI`3z_c`M#0h#ai3G8hr;-*xVf4^IbonxfYAkBa}!~=iF
zPLUkXwW0mmRX8(;-El$-xZY(qK}ldlrn~h)!Bhown(~=XRyf1nO9R}qcZQ&;J)4tP
zNQ2DhO&BZp4r`zG;nf#4?5roCNo9jL1KWB$Q^)q1>dP?o`CV>BvK}e@I1Zc`qFd~{
zj@@}J>6(fEAB)v-1)&nIG3hTSaGB#M{q-I8JP73vv#glQ5nt~4b{#T$lr!!aA*8P7
zF7iG1qoH8wGsy(!G}uxz2v=LP4ri!2T{!P1PDr()o1`kiXY>~+mQkZW9gOM58V%yD
zZ3~Zcm1&;TW0(_YPF+b6XZiXyR;@1O9wjio%;Zk23cAY~gfD|N?%y!bC?4*9GAFg_
z8&Eu@OjNbz5FGkwLZfc4f`G~P@KVZ-XueAVw-w$nZs9$2A72W8X3LSjBo!PX^^7qZ
z4stkk6fXHH$MT;;B;&7}5yf3r)cURn+*(ah959VrlCMWx{)~i&TW@0DrhQ;HKZ|)D
zABx@vGX`J9du~EPHWajv;oBBA@D3-;ctQCfzAk7nwvF1uyFQqOH6QoD=+;PF7#)rw
zd>#nbZQu)bBq5%^1ZP-pMe*$uE>blQ+MJtEFx?mCBnn8!&uq}W%D7xbrCj(SeOg#G
z6l*RVM=6%mGqP#JO`hIZtyl%ycd#8rfjU@@SE3I4@8PQ^Q+g*$k<3|q2BRjrLPud5
z>|j4DmzO4@o`iPv8fl9^Y!0J;+ZX)Ec7g706%f>_K*O5T`OY=%xJk7Tr&$>hE|%^8
z`pXz&i!mY}b;H_nb(-*YBgUIs(mO1RdaijB`dZJ0&GoGF65q;sD9_-crtJi+mA}D-
z%an|&Hzv>g)o_Gc2IC2ik-YV_Bx&qiRrtmmt>bj5thy|yB!*+1`4+C+V+RUT+a*Se
zE1*a&7mLp=g+V?FG&A!XRJW|a)H4Y%_DUcs>;1qteQXwf=mj*cv`2@pLfW@`63q2e
zC5Bm2WSMRx7R%{TyX-^!{F2MK%R-xOSz65XKW_o0jw(!z=tP6R65cB$4z$_lvPWq<
zI*wyqk-2FYFjtP={GdrcZ|wn1;YmFEl<kCvR&kXPsnA&8hEoe2LDg8A78f<+{OK>D
zP&Ef$PBN#ylft>GzxVj0ewI%=+>3)wm*Jz{-HiKx6znph_z}rDY%VzpHXP8Qb~o9x
z<mCt0WT#0zHXp@7U$tmmMUTjF>^Qvn&VZckGp1du`ardDD431WrjKthH}L?^d3Z<h
z4tt6*iT(b=uHNEOTJvzj<jIU7TFNgH2g7IYv*7Y$IL7=DqoUteUI_+~u#Cgp?uOfF
zZel@>xR*nD`894JMvW|QHzfHBZHY&AI0&-#!j(gVX&Cb<HpIPvd4gO_4f@CRoXO&B
z`;~~Sv<*xhnuXcDnIJ3~#d|Dz!`Dvyjf-B}&?nces957L-?%T1>v&WO_4gvdOH|9{
zuhAsOhS|~7K3%%y;1up?cR02)HxnFl2m3%|o+U9R{$u_i`U}It+i}mECJ2CPeq)*x
zDcqg~7hVq{NypBDuUD}oaE=607n!5iUzVMCHXkRdYm-SoRLFwMM{)H1Rw#UJOp{8o
z`1Hseu$$70z8@ob@w5LV3*W@xREOKpJlhe}mAYVf@F1e!s!Bz@pRwZVKS(m4$QAc$
zL(pgieDdZUXx<Cqm*2mR%|@CSTat)D#f)=MpU8Q*EAWEMNG@Qh1!L72<Aj{UAYR_e
z*FCv|-<1DhLB&@|c=upxA2tqCucSfsNf}xn##j?mj3pz!T2qHAS?GK)9-Ia?flrT^
zy?+#l?;PfF{_cwH;Vmc?!S>9>ar~a1LYA-BCAyVoP_R2kvWR&A<wxW~^4%!7b4H4m
z<6T~?;e$sizhcx^F&}Yy4cHxb1BcVf^jfn%sb{V~oyWSg<cuku#XSe#=CMF4O-Txy
z`F6>AV&?O^@a&f<6gReD&b1ECOIi!J4D5nL+lwHSKQF4+Jpo$Dzd^cp8A_jegPLX2
zd9P#@+!CI{{dBmDbFbgT;#J!~eVZ6E9^3P!?;r6ZuQ~`)e9M;~oq^j&E0Mf{GcdEu
zoXVwjvv<sBnEJwysxwYT-WBGgKAprZW1LHu_g5qiZnK~{OCOT=|H0g%`)HpNg#)`;
z-u~k(7<Iyk^h}6Ab=K_>*FMB#=F{skvBWJK?(v1E*5aU3@u+lAKpW*!_#$VP30mmP
zUyYO|nL2mifMY8(OzuP5$%bUqJ_Ax8x&qd2vL!toYv2gu>G~cyz;8TnMV~inP^}3%
z^yvh49}U(Ob(~Cs&v|K>`63-5@HbArcM@D90zh!+rpTo_P$IbXNiuf-ZV;`?f!^yE
zp_zGtYPZK@(%*lQ_@kS_(_kF#IH*DYhM7`)y%UBUeu!z-=46=RYitWPrq4_DiR8g4
zNIok=e-C9dfMO*&w(~sHCBGDXoga@?{~d?O##&Hieol*f(!|zOi3m<aa}lN*XdI<P
zj=jx?(9xb?Il2M{E3#~Q)F60tZYgRnkRn@>3b|ze7ck-YV^meF!62(p{>NZBvW?w&
zkA>fdi&MifQ1>5Ze9)!^E00Jf?7s)<*Uf1R^B#Qe{fX7P6lm&?(RkyP3@tF1hH~?v
z@WD)nagj1$Q=l&GLvJ{;F$QX-U&DwkT`=;O7{@Li0?Q%{iMX%WdCZ$a9R9+9ye?z@
zcv)Xe9deEvC_aj+t5v~=aT{G4R!D4~4&WQ>4!q2=cw5|NL($D!Xmo!e7OZ~Arwng`
zoQWGC{Bk$0o+2dMp2WcC^W88ZL5mhFj*vW5F{H9aT4;HD4-Rv`&UO>KQTU2==*LDv
z(|;QD$)&;Q-ZU7u%u?a|R{n<;&-?J~J+?clJPE@sR7usFMy}*m7#cLN=i)#TUtk@B
zA?C5rwD2h8953f|%ozKJ?We@QKIDe=CUZ66O&GDd7NZZd{6u*PZ^qofej7R=s;`8%
zS98besqt`Sn*|EKe-RbzWvr@K*|=;TyBp3`0AG{syjP=`uQXxZN4ewtKIu!0&(;tA
z8X9zJ&oJh9D&doMM8oKaK{TvzK4UEYzy~F-urZ~B`!tiW7~S-UeYqU<Gx&(Q!%yMT
zTis~Pa-$C8uW=D~SL3n==JZPWS`12@jCEUUBzbWQm}7X8q+Nlc-SwlqIOYu8;9A&*
zH6CWov&9|L;vg~aBdUxyBL=&_aVG{3CIuPmL=EX_`03F(REm$q+__R@bn_Pw{u}{j
z&1*5LF^9`@O@Zt#4|L71;ALGlW8;ui4EXOH_pe2s#5$iq-#uS&pN<lPpR#$Xln$+V
z#4@M`7qK<-E=I3-2?F)6&dCoBU}C8}^_S70F}4ZVxcn8r<?{^~#rUxfYtMqP?kGg=
z3Wn;DBGg>5jq9^o53ZNaL1TI*x2QphPQJ?AIL|a`#hh4((tL`(Ch3yasZa65dTnB$
zeVQ*#%jcrcK8DXxJWP(AjG6Um5VHOcUcRPFdZulJYm3!Lfq9C^ujUObX=i-(loxn0
z-G;)^gV1kX1GhTTLGdqR%_ly_E0dOjYJ(5f)nsw1BRFt5*vl!bxriM{WvQn?9o(<I
z!z0ogG4_cr_?r81kF9j6@54P%Z}bPX_BJ347hq;@65J^;rlGg5LbPE9++40i8$U|H
zJGKk%k$TG&I7h>uPq%Pep9D$^m1yP0si0Z2hf6w=%kP>H535$%(lMLVXwtuP5ZPUU
zd7VA*ksCx5RU~|5=3me{@D83`6w;q<hGazb91MBz3Bx`hH(}%?)N%L5q>IL!eFA%L
zxr=!}La;;S0W`54yUDdel+U{Xy~|~3;EWc?k&WOARVWDF8^enUSJB+nn4I_@MQ7qr
z)7ORJ+kBgo(mapmObz#}on%gukbZ`QkSQUAkff4ILNX<Dk|a~zvvzJI6)F*xG)a<F
zNJ8q{-(Ns=@7ZUs^}f$DXb8Jc=~H32BFmIrgIz=8(Y5aX-sNm;Nh$!riRmJtRwN$1
z^cUk7yaUOwl~83UM7v+3!6P{k6YsHn&=>_!VcBPI+K+d$g>*xM8RJ67=IS1k2F<nr
zO#ROq#vaIKoVs&x%U(b`9p&iMO~y2Q*js>vBRIC~9n?)~ggp+7XQ8(h0>A&l*A@Ec
zZg3i^npw8%Q=(}8YIff?7=<1O>(E}e6LRfMNUw_yby2>>h5c<2?|aJjGwR9QlJ^tf
zUw;%jmm+8+>2uOs%JBlrR=B4vhWm#qFwA2h_jt)f=xddssW1jKMp*L&I}buLu7IzF
z*5t!C=9LvV@UpA-qgv*3*rNXimdv>c_g)@E!HT&~Mg8@>{u)E7Q&o-unzB^iB?n)d
zoWtaUI{52?DeY$Ow&AS15I=ARJe|$C8LuC4@)lAwIgs5aRphDpx{r)e63Tz>&cfTB
zW<)wyn(9p~KwF_U+Sy5?y>SIp9Xk&NkMyAK-8*=lBu~3`2)U1aszg?PEhu=W;S13q
z(pOo7Zkiu(-yRdH^7ta|Rey@|ClAA>AB7mbFM}^yaE7~j-hyr$qeYA_JV5W%R}jsZ
zG&fSD$u@V!(4QCup<$lT&1Tl;x{XQDEqzQ{{T@7vUUFIsQ*hD>S*mrzm2n>f@arUF
zn(C{}Ir~xwKk*U8bKgPQ>05mBUOBdlp29iS2VnWUr))nHg(-#XT-FE!mmQnMtqWdq
zl1G}j>A5AXoTx;Hm|p^6!g0KlZ$<8T+7b!tnknrw!*JC|H16DoVRz;D*4rcSXwe7s
zTk@I{{7&S@=MTqY50waRZp1gEJHc_U256j;=JPcyNv<+uc)RJ)<g?aTll2|^ZFWGU
zOc2WMX5S@;TcQQ0jETY^)+5sShvj>Q@$JEvU|4fI=y^uqsN40hahfdc$}oU?T@<<A
zOQ?3e2gUqHkTx=*3ztce2p3oEz2S^SM-Fng!(@quGMg1ANgyV96`s_xAlj$9aQ>`#
z$i7+x9(IOweY*nb&6203<2EsdjRWtyas*lyG4|PXc}!)T!zo>Pc<{G5@q53D>-tFf
zkXr?uV0BFHoOD$>rQIBxCa=Sn>}<8W{2%vxl^!kqkIn9^J;X|;jc`p-mpqEd#-P+L
zZiwU^d`=uh#3mVFH&coxv`>VrZ5O#2m(?)-D062RY14*UDVjO|EBp?ZU}a|t@<)`&
zoQGX3lW>Zcc+1m^Q|h2X`xquz=s<*f5NPfUM$d^^{F*B|<ZF-xncA#M=A<wlhmDoE
zH{Jx)p9+bKs|Oe6?T(VD&FHyf0xxN8=1r#-i3C}*a_ap)pz-)}Xx%MO!#3KBjICI2
zHnI-SCoxCxgk`+IkKRuWJH`DQV@4BO9)QaYS#gds&zYu&@e)Htq?{57(buOAw`Ga)
z3lo}NWkl^>oAajh0B^cBm-9<xZlftJ{3kZAZ=2c&S<|E8h>AMl)?UD+?Rq3I|0!e7
z4dHKWDM7)8AC5wHR#hE)$;DmPBerR*8`G{%EI;bgUlVj`VEHRJJ7F*t?ayT#))H<A
zyWb|H{^Cz>$b^DT7IehS{V>|@6s8@l;J=Hu;QUF<<?QksQcPRn&*Ej6wBQ^D{W7Pf
zQ(F1e5_fctdkAAHt>}8@P5%3y`8C}dxV_G$5ND`L<aK_5$IkZ{l6HodI4EL!K`<H|
zlOcOI+<{u&6;8c1B3WPhxvsE8RNK>mhiA!>U31rChNeC?_!vQ;!651=uSu_3o<hry
zd+|-$Y#gaAOG9uAtY$8;Py=s_Hrj+<b?qS0cq3|?{~i(sqCn6)OjNE^#Lf9_K{f<>
zfo(|{@2TSp*0b)x_#2bKr{z54Y+VK}7IXNnyodb#9`<a^&E@wEx20jS>a3UikDF3i
zhR(;YV#8O$H|*%)1ikv4yzmt|`d`F^*S@^!3Mn$t*^sz=br5@>Z-Ahjt^6mma9sKA
zCgUCN6yF?VO@mrlSjm_15!V~i$6r-QSL}W;mOX>1XIe1yz)Dblq(f6_1y1^N63?5-
zksG6A$;A;4*!1@{<d~P>^Xf8OYs<Q9=7WgerjuOx!b4mQV<M>Rk!JJredsnU4JWTY
zhFux6`K>7mL}id3SiYVMvS-TB?rjG*cLd{54!sU;#jOy3Tao%4zsS4lN5SrMhUDiq
zQ&M*@4b?-R;vAMW$;~goZMWZH<}#LZWS_jOqc=EZMOm6-eS`B_a0?S+oMDix6|vqa
z#$Wqv=&E~t=%8moJu@`Hv@4xE_xK5x3~xoR4>#b&7)^4~;2mV`|9~!Sdg8v_jEB6p
zp0h8x3kyD;!byxt`^obzUS6Jx3+&WLykrdye<L8;DH^0qSBZ@Hmx<G&D<LK#6rZiu
zphEpTzGw=|W~?5?4{Ps&8Jc#Gr|gGwbxf$UswMX94uhaMA)MWgBHnU6<2|K?L(7O&
zSf0*yTZwI=5Cwn0`>cBtG#t)eu%gMI?sM}41+<aPdV<+mVCu671Yi6x%wRcJXj%ii
zM!#i!5kG#@P79*@JQiZj)q!m;&^mZ0J`X*D0q2fDZ{=goXZ?B3zOov6eSc$7Od>8g
zq(Z-M9)}v59{h+U^I*oD*L-jCTKJIn1|?FOVqr}Z>iAld?ydItY?cLGJ?}cIyjjaQ
zFSqctohh|qIgLP{Ztyc3%Io&IvRt1jeRRDTa<V6&qOK9?)4UAw+h2f-U9QM{Fb`qY
zV)4yDOJej>hW`DeNz#sHamKMlP*?sJH98IWy&w9aA~+J=m#R~fJG!JL_dR0~M518K
z3a1@x$1Dsj<XU^Ip#R)KxKj8T{mQ5Ee@rD%qHKi|pS{NAm*mOL)VHWL)CpQ^Q@EUn
zYHs8ZBf7I88eeH;qf*8<_<K#AuJb+!(UF6(t1g>6)ucwJjl2u3u8gInVvRnpS#GUm
z8tN)lani+~An@EjIIb+f*x(>YwkqX3ZKiWM-PQcf+#_f!I>zU0kjAu<!I0+jiA&Dc
z1wkF-hb|cjtE2uxfUuf>lM)YIG5fgZ%zN#(_>;Kwdn_Dxv!p)om$!P)*mdK>Me_aL
zxaPQ!e%x~gZ)`Lqj`IjSDbXT1s{6s;bRVp>w<IF9LA3CyDV0sN1i|Nrxh?g>Fsl0y
z#?xvD8J)(b9?KU!pHao;D${xQ?XqAi%;#e#$e_zUHPN4h_2_<-?OfFc!Kw3u>8KYO
zVET9*Kfi~$O>32?$y*~@b$&m%`D9`IlTWynF(Uj2tpgj@-`ZeqhRc7k^VEJPm$Ud4
zpJG=EVaAr=a?XWwnHA2d4mBW-u{Tk}XAtKin<joZn`L5B0&v6iMbI+42yVU7CSMtA
zu9|%pWq+?jtNJ0t^RO-U4BUc~JCktK;Oh`*d<%+peB***AMx4hDpYs=0WLk`8i)_Q
z#MjpD@UTRQt~>9|yl!roZp(6P+1GMj%^6JG%K~ss%VC^)UzQXL4at(}ei(3e1_Uss
zpmo<_(3{Ad@y64E_cx~tt+fdELW!MM?7mw4ikA$QW=x?+5b0D8Tc2d&)w>AW_J(1N
z&n<}ETY)qDpK_=2ub}Zu2~-XAV(-3cuIkGbZvS#!dZ)mE3a7otuJ8bUR9Y?A-93hu
z+ZLhn3)Z6>)5;yaz#O4(VtI-BJI;0TJ=jwH3Z1z~JTWgE3*)|`|EFA(>^TPoHd+`Y
zo5D9Z=8Gia0xor(J@0<W19V4PP{9{Zw3D)caCXNFIPwsKbNW#}l=&~_CP7DHHzu(A
zc7y&w@u<ufpep!_Plm}*+f{D(zTr4y&n)D7{a^4KJ55RY_H~S1E=@9H`Y|Ui6P?)K
zyrf8)y83I-=LybGSQ`r^-`_y_xEe0ZC<t5nFXJ6!eJa_zmXi&+2FU~4pjYlN7j&?L
zi?)5jWqp0f?SEoGJi{_Li<Pn@#4(I>T)P-XtY&%E@gH$R<9jSGx(0<;nxHPp5sj88
zkh-`^2oBePR=2}^06yi@eQF@d;VY{B_>9kuhC!I}JwDkg9>RL_a$EbifNt3r?5bDb
z9r}$(DcgY?XbhrZFCs;5<?6K2$AUB**)NW(8$y#O)Nxa9>65CbH^KP?^Kp-1^E8#w
zFluHKJh6<x?kFjg&U?)=)N&%hr~6_RU6yOj(`3Cdb>e)X4T5uJfQvtXp8D6J;lLBo
z;-@+!Lis9c1Ra4#xr~b|{Tq7x6F`*t2150d@%tHezJy$5ytsLe`La@ENEf?f?UBVN
z1J?AghXVcRXF|GEx9~NO?|_b%B|S6l6zcE4h^BJQxNzVUnsEvw=TRKX(n}L#n^IWA
zc0w_8AHw=y#dvYeC-8Ha#Y@smsqvbA=$GyR3nf{q7WEt2&W6Lf`svv05)GP1**Puw
zj~h~a9Y%DSGRJZv2;)-Wz4skVI2+3QEIi9MteU|IzRu50o2>+Pd^@(~eSoYT*I`3m
z1m<s0CVjirAnIugwme^jVP2n{c7_Zl-v?De$?+Q2yZ;1^f@a>CZo`m{6Hu4952N1b
z(5#+StXr7DS2>tNVtgCi$&{gk^QDM!DGzh)Y{|mf1k^tk1>+cxGf6cU%=ee!E~Sf5
zx910D)obDD;T@n``+=Vt&wSFyD)`&+HsoT|L-2j{74&zvg7dp3D2;jsLBDqJhOY*}
z<UP;vw$TOr_|TkmO*g^9lk7cd<pt3^fehI^kdL-9F>tnP1$2En&qwcOPIre9;?(04
zxgg1OzG3TQ(GHdsJhf#AHNW}~G-kALvmdIGge$82x?M6f(CZ<r`C>uF#0??~oGfXy
z`c=*}&x#+WZbcI`V)>ULY)9sOi+dq>$T(KEj6pRNHLU&k-L0mSuq?RCYi)kXW|pTZ
z55ue|DK2bnrg+_jR}k~<C3H1=gUg4FeCW76@JH7lFZ&4KK8!~}?~xoGH+7P6XaMiU
zsuA7o2eBdV96GU{V%FJZkg<{A!)X@OwLymVZqNi;B_wWKFPN;BCzrR)hY`0lsd3W<
zET0wy-!7OF$;d+@!_(&YlyL?dhup;HHyog+q#P1=6o6=54P<4_h1_8lq$|6J&$Y|N
zyrZgg#JpN;Hc~)`kjv<}uz^pv--P;{26Z{S9~N!C3vHUru@Zh9^%zHV9P4}AJ^RFI
z*x$j-nZH5IGIMxF9)j-u!hV?>sPRJz)4X>=x_=`ij9knm^hvmpy@xR1M;Z4j%#?Z!
zjl)IPYGF;y3%vQmg5KJrMFrl;jsoFp6sDO_?{$yB{>UMS92<=7otE_GyF~QqjO0^C
z`ayq665|SbVYjIp{LWJ$&u6a00RNA;etr$6DP{mIQzf>GdU&<YGms!<3PpE{xGi07
zxRT9sm1}jWyU+kO9%#f0#$Far?q@m92)J)~4W?I_)A?2U(6QJYsR`ozEx9;aF&RxQ
z3Pi2t=OO9EC@dIljDB$gC=@^E&gYlmkWyu8MpN<Ae>ZT!=t1;ov<i8!@f()Ni}Aj{
zHqD-u1W7mFfVIH~Xj@YWPgyp1#DSIg^oWoY@cN8DYeikuOvRFdW^RG=F`W48H}1=l
zC4UOugJ0)f-fF>N^ktm<!15^2Q0d0*l^N&|t3wW*QYSM$M)GE73Q%*n6~zYv%x4@$
z?<Rs>i{A3h@jo#sOOIBjThbA8)<79!F6~%;8o%!mu)k-%DE^^6V^~#S<YpE6!E+EX
zpZ^~QF7?E_)5oLtkk`;Ol!vbd6?pW@8GPgX6Fg*JLDYd4=s&p!Ps$jQqUm3`YD`Ao
z8FwMW-VCN}R-;l?mL#nCy^}$T2@zb-&)vCe5D^$FIprv|@CWBI7iRV$SX=g)c@%Ep
z<qM$@`f&~1JSiZ$-(~2<isRriWiI#(8UnT!6VX+*3j8;*Y(n;ZF3IX0ev?<FcQ-DA
zpc%K|L-85RJkbu$N%@c!K9r9yXZ(mQS9sf5BJjVr2HehMfmq`#_#Ga_2k-TOpn(gV
z&+2QO!!~o0Z0?43EoZTOK$G&j%&B0{BK~}*kO=Z;I(g<?;VsLQ@vkv+CH$TV&Of%}
ziu;G*>f9Dc`O^T=&${^7C`<6^Kg1P|`wV9{ZN#+UU$|C_aIR-d9bB?dBMZix)9umQ
z(Pgi@*yqtfXb%JS3>pW8+izj&vs&($izc~tQJxs9--d*-E_g`YgnT!f$UHjLsP?1~
z=AAL8!ZIy7MM{abYet||o-DNpG$e}?pP|N7JAPD4E|5bv&>+>21iyDh4VMA_4)Z0&
zov6ajZMV_CpXG;`L$G0-Hy^y?JVqaU%2$jWgQMzfFe~vDZyVRcHDz1@zo8u>mo2s;
zftIT%VWTnM;>O%KUd#nOF^^yBu1j6+B*UEM=U6bV5^X~}xvZHZxj#z_VC6gm`lVHc
z8X7IZz}8#{o;44wHmi{K&{JUXz=Fhz6~M)J8t*u>hm%xZ;)2@e<B84sBxu%h?jz&D
zr-lxL%|)7|_x&`Gbarwf93ua05Yc_z&#xbQ3qK_a>B!kOblDGO+Tdo)H&xc+3R(sW
zA1G0{rhtOO!yqyEIXr)Ffa@N;0Ehf2jPLG*)XKqp$S*^jdgLamBpJcZTE=D^Wy^<O
zvZ0dL{a7cPfGZ_Z<YhhUYj`hV94rg;^ZFxt{?rE>7Mj5{lk0F&ry3<c_lX{b9fEy>
z<w*ajaLDo;3!4|}k()#6k@1!tJU(5-p}A@#D9{5c9zSB)*cP-i(&u!S>4D(EYp3aA
z6B_nv5AR@bl({?&osOPJMq%M{a5NU7|L0v$5mgGf!vh^(31OVOJ{eJ=ic?XM^vP<|
z=J_9Ria{oH9aG~{XD!EVwf8Y(g9xGzUgM>&9tP8AZdmi~3`A&z;@My}%gXHH{oIAT
zR=EWgjVOTZQ_LgP%Gh7+mbkk9E+mZK$aSgb^KVNm>7&$R7@7SNTiS1fscZ^2qum(#
zUbw;bCFb-+OEmUgJOEjS5BYMZC=^Iph^+5k0m<QA_~DQyk-l^T1#h0^{`ss)yEoiN
zVO0lu1pP(9x^mua*bgWiUJMt7&p>|QCUbP$$n^`2<8#JV@UH&XP)bva21mJ}N4W_V
zPZ@<_<F`ZJu3o%!-HHxavioC+A`w5g#)RiNxb9{v%bg7YyUcXd^nJoO#f{Lo-I8dG
z5MuYd0OaINN%p!gjQd^+Dg9C4vfW&qG}neMzZZ`UBSWBInI+vTV}Ua~4s%*-qcF-|
znl!6eVldH$fLt}4yG@C7dFn%FyCT_UWJBKkyo;|poS^Wk6!nligjol=aaaWV8LGk@
zK6q&p`QCew+j$Ff3We0%^a&Ujyu&%!gK5X253pj;F^KWIg|#OSfOqjl$S&%Du;aB(
zub5Ay@Y{LzxdFa=;xJg;ETrcaDUiE6Ph!>hS6s!vES$Q_j3lgL&pWLt{K{eSB<xG2
zlZ(5r$a2&MsJgcvL#{<YcU2JFZ(_6jgEjba@DuczcZ;*WTLIBk+USxd0z>yCn6%%7
zXv}x#?=QcGepBK(IG2uosRy|y`vwt7y%4-xUjkpw7~Iz{z`F2gIQbw81Kv2p>${~W
z=#qDO^`jU&PTHbw+e2QES1tZ{{0p|7ssVeIKd_>&2$u|(MwgD!5a!g$>klNspMh>@
zXnV|+pDE*3=iGq~t>4hZc#IDIDn#nRIc!KV<M-^lf^`cs!DCZ76a_Csf1kN<$Ug$@
zm*0hXR%#@m<_1?Z@-oc7q>c>>im<7NaaGUG<N}2aQ10yqS$`Y3x!uj!+mZ%{yR=F1
zy}|V64o$Wzsf8JfuJa9N^RT0&5f-MhcQ0PVn;*Ll#rw{q#2)a1j}A4l)FR3EQrYhR
zAG+$X4!QYyk#}W3_#S?XH*~Z~o=YbTv!{^$<O@{lS(9<w1jOa?PB<*5L?*ddlifqV
zW8qYHtX!o^#uh5mwC9QZ`FT<JXm2OnFG+;p`TMcUT8j@E(T>C33u)?!B$4I#2!LS5
zcUiZZt8}&{eHG;x8_<JScQ9{;OEc=Km!n<nOKyJjEA%<h#&x-E<r;>vySZ@}26X-8
zPc>`P4ZJzVzh?WK?@IVy+k~jBD@V!2tB_T#&)pv;OZ94%$u<L7@-MCoM@?>p?U!5d
zvyU3B8}bZ{fBk?2oj`s|%u9^T-H55xhJ5;bRho15Fe(VONsrM}mMOC)EfzPRcXb>m
z@tVvBjMjnV-^XCX{$`wE`k70}HRM!^;^C`@GMO}3mk7_F2CvSuFt_gjR^3?+@fzAR
zrbS4tOIgPA##B*xaWy}rMwYhvKjA&^+T!FQE0S6$2PSH!#Pz@(=sP|fw=z~;ul^3W
zI?o!DQhq@Hi2D#W+M92PW}Q3VOlWJ;pgIfmNYFAF-gH!f$j{9OsHHhw{v;N|W%9sZ
zQwooT$`i+%E5WZR2^^J9GY*D5IGXFB!%!81i_#$L*Awmq&-ytp4T(VUtvKoESFB)}
zy!o=5!EbX7x+L|8%zuWXg0>bdZoPmX!j);b)_0IR9S?yj%5?R3#`f`Lcgsg6T#KX;
z?7q%|%k`n4@lL`U%~WFS;nyNRoy#ImUlZIip#g+5N;ttl93Qh&pL(%(E5irxalCjY
z2wdhkn%35c=4qG`VXhu7sbRCTz-GA8q(=Dc>lmUY1w~t9Ig6fum{j|n&1<(p*XLQh
ziKaH|@npl#G<GJISYg)PM*g&33A(f|7s+z};ZHX!`ropPm~yWlTE{ZpSkn<`b~_HY
z1UJwkV1UhC&%uLaDPmE~-nn2WNbWIin|&7MsxZdt*CY-%DS@w49!!$fC8rxBA!O|;
zh)a2bHFk%v!!H5WytE|aMx>)_U^dK(A55oCy@RBi@hKDvu<D`~o9#w%ryXy@#G?gh
z=lu>t6aJxFoCFdp-a_i3_ncO`CcF{P!jVdHq{;mORN5MoZK`@StVxbP6Qe*Kelj0@
zgDI7yO~jY$&FTG@FVO3GBg|&A+^YCqUh~C2Ou2cNF)j^6t^?cgEqiyg(~sl2<E?4J
z*>_xY<r`ign8BqDWb(h|<VcZ~CT$&7z#BYeIi6wmuwwNNY`@A_P|G<{@i)e?`^)kV
z@nQUqZ7QVY_(xb+qff6&Y11L&Z$nCJ4yYM^hG#mAF?yqm7tfA^)*-KWQ=6eM<Y64V
zNwlOc&WCy5C5y3Ptr4yB-HVsAA}}bjmlvovizG@N*tP5$M15nv71bcH{e6iW#h!I5
zZ$+|a-$59|xV9mG@AGG;Uxja9)v4dV!(v~<R~UGk`PgLNVEe>++%=5Nqi3b^rhmku
z*>(oxzONb`1AlO){85aV9fM9WF(~o7${A#7kbpC`I839DaV8$ZGl33K+wl(Ghy1|=
zS9?fpzXtzUXB;|h(6VJ2R5sS(7z;Ie!2A(zVb487KYd!?n8waR9^vpAhz?ERj<WrY
z<y|LqS(eMS9yS60R4a68{=;=_c?=^hD{-!)4e46)n2XU6(v?L*QX2FK1^m!l;|H5C
z`mdM^WlZ&fHWgx8a~3>{fAak+enG?edVUl#HcO*``t`l%{0<(3SjH)j3}7>(6vkKd
zv!<R^5t#hFm`ku$;9}pL!{I-qh^T|zd!`=~B^Yeuqeo9i<Dr@8ziAMxxnW9M**m83
zm|hU5m^!+oMT!17*pkzs`gDlmHkRT1oNICI7`m0#z{Rl-Auhy}=uPZHyM`5<hExdu
zx=TRQ-@byZMiDRZP33m3Dneh^Um!@lFJ3h02)NIm2J#06aOgXVHP3rd|9T7Bt9`((
z87zw;?F!GdwW+O)JA9kkjogJK{C#{s9=^cNpiE8XEua{%W&vdMDMRWfUkLeog3}qP
zO#TdQf-}cVsi%QFZPI;@H}^@=;Xn21V%87y&1D?cv$xRY%M)?<g|`@#f047Px{J42
zr~W&0(I*(Xa#`Az^utm)dg_QFIXBLncxLpWF?|Zz1J&?jrV<sdcSrA~anRtR&CN@{
zhc1s3c_DKI8@AcdEee)c)b|7J89y&%+ZTS{Yyr7=D;4Hdo<PZ*L3DiRJ`j11#5g-m
z65_I-FQtueJWmxyjkyJv7gwR@-y{Ie5!-q@;N1{I^3g+|B=0%S|0kb^wwVr)8(fH4
z{R=^;%{Zj|1a7ZOE99`uK)@OW>KHJD&X!jpw}>+7QugHv2FxJqT_~57AAs#*B@~`t
zg{}d?*r2En^UBT1y``nN#AFj_>}2d}AAc@XH5yXm!nlLF#>6%A8k8@e4Z$nqp|MY&
zn6}r89V6wTTy+bV`fwV@z93M#vlkjB9^(Cv`D1bjd#1DpfYVuN;&>~A%~%F_VZIWr
zD?5q9*gijcr3$PcA)w2zGX~|0X?(im2X5P-POk4~v(r>9{&&PtbeN|9|GThwwOJQ@
zf5!HJ6E1PveQ!bfp$4#-G>C+X$KlxYcjy^76juJp!dJ^x>Ep3Oh*XIN5!$`uGhZ<V
z_4n=IJhLBu>hsJoeP865dkR|O>aqUgU=mj)M}B@`=a)bct{gAM)QTsf!x>UEM?Z>_
z7|#$3hI)v<UwRE{+h0Rka}nnk3%p=S469u}K#?EwM8&+vhnyvefSKUB=@s11T!oq8
zk0Do1gV@G)^S#@{`9DjS;l}|J@)2!FDRbPJsT{)Fbyno&XA^SOHV@h=KH-)(y^v(@
zhJt_3#i{8V(S5!*#FlKpH<e-F=lzf0`tm$(Ej@x=@{hT8?O9+qRe@XA{Tup0j!t0v
z&8l1h4UM^ni*BBPS;59s6g?OJ=wE^v^Mb*EIjMy1+B9+aSnS@Q%HUdF5ISQTwA}a$
zrb<$<dx|Ux*z<$W`~C-J6i4!3e+X#9%d;ZBU6+2lhUlGf4pqLK1>@jG7(oW1DCrv{
zfBnF9tbGHEU&;}Gc2_me-Hi>I7es2?Q}MMkd&k{;m(P79OC<a5@}1}9h{LCISn%=)
zT(yq`<$K!9ML$eTD;4RV<c~0R-wbq!|AmY1sgQ3XYhs!03ZrxuqKoovjOo7z;R<zV
zdUpc1Z;v|jgG6wXytRpN#1iO!X@$p6Yole4G@X*T1tT9_f~*Da*p56;yn#9NhSsZ*
zvvE71e7z=Lb$cUBcq2=tXRSrsxvOE-#w?sGWkbFjW<fXO9L!Nvp;Cio$P9HIxWD8M
zj0oz)7^8#mtx<{=?Yhb*&0mg0_bDFuBSHTNZ}jZ7=IS(<W6SU1gYaKzU>M$ls*DL{
z^w^9zyi_2SDP}ZSE&;kU)^nD#8D~xR4p@ivV`OL!v@VI}CpznqV=rY%_T_Yx&l!oP
zm)C%PB?}9!RHB1*3`ki>ILoybau<H8lGL(R(Fn%3YV8PxGoA(L?DP|sOdSV46L?<6
zM4kNE*$U+iFR>x~n^>ba8D%Wk-D2ScUbDUl{M3uYpCa{XdrB}?DR04*j3v$u7}Mm7
z(cD`^R7sOYOSc;+YpM$w?q6`b(l3bpJeY1d7l`-HS}^|2Ine)f8G;O%zvVN<<kA-Y
z!$S?aS}Pkaj{5{Rs#S>JLpjctF`^6JXwb5HbJ8-ykQz=<hrLcx^z<76eV)GnE{;*6
zRa-(ZGdmW#FRH<Y6a}jOQjuu94u|q$KkWUU20;ZPNF6f{f-lIR;R(hmzTd)m>*PS{
ztD7Q!#`8^A`~VYL)riX-Dw1z|3NLo3(?@&yz<R}D=$dsKpNE{mp1sU>8lHu{?f>vQ
zyNl)ZB!h2g0qR;_fpPwWi1hArm{qWwIWTg-xF-~SM}G%9HoL^XPLQVjo%2#Vj?1pI
z_nYDOIYH1!N83vRDsWBb1`lgO;|zIP6%`K9vKJS`$`JjL7T7xF2q!CAk2w<_b2agb
zwBbhxH<8VV-Bz)??|&Q7KSTz-rhmZjGJ@q>)VPBZ8B%-H3Uhj1^T+yRNy5iju)Wlp
zhWvQT-~J&(EWn(Ou)4r_VZC5@lJRi|xuN`_$(VFo0=D%^u!Zfu*74J^YUoie;o5A@
zGiw=0qDr_Ki%R+JYje=)hzU)z+zYZ76|lwS9;l9tMPG+F__OB<#GN*yE_w$<dHt{P
zQ-%s{O;6*yd?s+o|AxUj|D*W-oO-5S0^B(;h<Gmj#&6s#O+1ec=dQg|B6)Y#VR|9s
z3A+5_oCV8q#0?kFxT*zPo@IgJG-+ykEd@#kz5%}WgaT`JCmsHicPlFa!QOPIX?{t}
zi@@F)#?_#zk$@aN{27aj^YBsS9<;kXiXWSQ4*Z<j#4}~$G0@(U-kmcPsz@Uj{KX5J
z{<{wAwwA%7L+^ps=uy#>YSjI&6sPGbP{Oza9?UuRIOi1pncM<t?uYS}nGH$$q)om2
z;=#$pl7yNw_l?#pIPl~ahFL%63nUaBlC)@Dx)fDgZ;!%DL-6uxfB5xVmPB7tqfM2?
zu=nkC@ZA2GA9e38V<`#9hL_VZ;nxd3?DaG*z<Lfi>|&p3R~;HQRvTt^Nzt7<2a|4n
zX|%De!THmdLdJW2JTsa3oJR)YxcYp&-K0$-x<)|OS!L?EZ4D<5%Z8O5r*X9?7X{0A
zb8qGqLfyx=s5a?0jJWm+cQMX5e}p;4V%V(f*)~ycY6oAf#Q3SFufRQ(XBcy~8)rFY
zp=JJLNV)VCV&8?q6y~zJbBF!C{%qqcFB;*#fj0ETXE@F2DP$eW<x7nY;?HP#@_CFo
zDgXL{-8t5?ynzXx3Oj&4f6~ygK7(tubiiAwszkT(0Pm-)N?U8raGI%gP}DsIQy-O~
zFd>&awOXHq`F#=((`kanP6cu^Nt%}1uHf!i$dGr8b948K85B%7hXFoou}k9y?`h`F
zH>@b*gPy3<yT<}i&QzB5@lUZ#OgzheR*1qAE3sMSBBpz^pi9JD(b;E;*b&u*I>k@0
z<3bkd?p=#G`U&eshVXwje*x)4F{C;@6*;;_qsuPVE6AS#pIj8^e@=sln&c!p2C!bW
zP9JzX9D~59{g|251XaHW!-N-%jrX+;#NU2k*!d@XCz_G_nepIm^auSLba6<pA9ge`
zhoG!HCjXp+0+~|Lw`MD%@#Gzse0LAH?DXK`r#C~Q+Xui}FZ8^$2>kXAg{R}$E^+!u
z@o^PZ@SDtw8&Y101hy;rz<5M=n`IC&xe!@H5DV7c!0mcNaa!1C3{~=gR6i*w%w(=c
zM=4x<T7^!k)FF$=Q>^mt;9_QUVyJ@>O1ch0--bJQPoo1pb>?H2S|qQlzYVOVR^jNY
z*Kqq5HqRL)je^jBxlxn<;5lC_`h1oYT-zi?-ZJO6#;J|Whnp_axm<@8@5PX?DhWzP
zrbC7II1FI(^M6_9M7TMFf1{s^ZCg8_<dPYEDmEbP$4y{f^AUV~XDA%M?Tuazji9mm
zE^k`VDV|<(6qTHgLQqOC^Uj~=rrETEYON{RER><E4Ljjy*frM8Ps68w6{vUASJ=LY
zht{WNP%$qb-gHt>9HmJjbi5b?;w9@^hVwNOU&0VM3biY(=%@7ps=}BiOH0ZyRJ;w&
znOwmV&I3oCsmAgLi(#zX15E8Xh~}$OQ16u-xjObESg|{5xxNROY>vgUEg9&4IS7|{
z=)<8P10ug`7OFU|z@y_5(Cy77XqlCW_GJjFOIZH$<4112sxb+>qA7lJe;f9$8H-^r
zUghps--&`G5t#WYlI}5L%r-Qkp$W@zK-reAGYQ7{KDI0v{tj#A_ri<`hoH?M8pPMK
z8QWwnKO?M)Z%sbRM<*NN*>M#pGVI55>|Jzzqbj<2q{5N0;ka8)m2Ag5FlD(Bykgv>
zFsBA|c@+tlLzz$c<SVXDN``g~WN~5IN1SHZI>CTU4`!~ehqdbs$gZf*5cJ)gb2hjO
zVc*t>cNLt5be5^iF!RI+=|D`~Y{zF-mBSb|XJ|P)fSX@xk%kCW6zKJ0+xT?Q|5uNJ
zAqG@aP=M2gM=;D?iyt-l7Yg2);LyT!JWl-4WsxNfUZ?>9Q%{3&nLbsWDo^U0k7LcY
zw=iP6DtN}oV>ZS@YMv<<t&#y^)ko;H;x&f-W4*F}`TPP!E83iE1U=3#V6D$->|frE
zjR$RL*K!@+EAuA=y|~Q9W<JJ-Jqft+yaAEguSeFzDUpmGC-BmJh>f8_;<dX0BPQ5D
zxK%Cu=#nRPyLWLRHu-$*W(5$W49|VEp&XTunUI{dQ^2f%IavMPVcCC%Wc!lGc)I!p
zdY)X&&la}gys75oP4QpwH7o-2(l|KT`v6}VFn5dmDyT9WiJ#h-<DvR^ZdlBv+zQ4H
z6to?23><wO`p<m8<N+~f>6Og7zZcMF6VL1V7jgl~P59pfOEMm-F`;D?>o`iVd2|qV
z?RdhEjd_R-YA(FxO=B9A^qya~#gdw~l;hh&mZT}w4Et^xVwTAzD0`|#O{+8b)>mgZ
zj`e<}V+3^n30+cRp9jffWoiBD988n7q3(16^Xm=*pYm!PH{F2vd2i;PuldIIr<O!Z
z@&Uco?NPVK6H^rw`LN*6xxznUUZQh`Gt8U<lTTd0;V$O1EMJaTO9^TFF(v3N_`@H6
zJr3T-%97I!+0Ysi3PH9vVC&eoXd0Lyt`1Qk8X;12#G)$H@JrwYql0tzCd9zSfhG92
zmGu_?wzK}xFV6K~A*{ACq1P1TiN?$MAh8Y?|8BM*rX^7<zqtVWyl=wF0)3Kwz>rGb
zP8I)}r9^gpyAD4FG-$VD5_o|p%lC|eU0>e8*91#iRHz0Gn_4(e&8wVGAkUY-dC50i
zvd8q+@7RzvoKru38wc}Z6zmCvPiqCFJVh3C8g<C*@K_w;?uQL#PekU+9q?YD8Cha^
z4FwJrNG|9Qf!#q^JkprBdA`Jwu?T%l(_l%VCfau!(!WN7NJnWI%sv={N9!#~OKcuQ
z4!sSjPj+xC&7b4y<LTHDuo3;P*nxgb2HGXta-|U;vC3KrdJa9pDI-{)X3$$cV1X-h
zj`)MQK`X5CxW)Q~Md%jJ`j}^bVPM5pG~N4>OLe)AvadDK{oDyin-PHCiy5=bw}rEv
zEe#g7N+f50G1oTaE6!IK0kW*C(6D(47(ZBrcOGkzh^2biqpC%3r0Wo$)9WF6XEKy*
zvtYe_JMLA$5b}LYEi7io<L>}v68Z2ZW(E5~T$}|RCAiMIhTu5k=pTN{GG&yNv7kDQ
zXVB$9m*`7?5|K>x5&7L)4?)F*3yX3T!!<qluAoCD`VCyL7VDQfEAq)(<v@^UDspMf
z7u7UOz;$zapy~P!=Eh?)l-3gd#!~}2UO`AlX&SKa%~eOOQCC1s53o$vfE1}+g0L$i
zAmLLrj$yg&xq1df(h$wNOgSU|&HS+^7~efeXvT-#9Ep~NDiF<;axTvptIcN!u?yeK
zjf!{<{wzmPX<<Qn4aMBZor=WG;|sh}I*b+59zgChJ)%wY=)_MJR3mpHe{G>UU3x%|
z=&qIrNx3toT9$LQVZP9{z6>8JtV4gjH@HGY0&NKs(EZp&7;*L>JYbIOs~#>m<lGks
zwaJ8O!A+1MiNW^yJ?Qv5lkc#Og-gZ}xMoEMx;{MuH&$xW)XF#b-w0_sBjyD!2nu)b
zd*g%w&Np!EvR|lbsZW!pN<b#y3HB8)1Irs6Y>;ljtn#5;v6M79J7^*9yrDx<`kNpR
zm?zufHe-vQ0K-M&u&cR@&D`SPcZ?~iUHcKG0|1W?C}6njC7jN3+9Ng<qRPB5=uMuA
zyAq#+jX<3&TK57nSr@e}_8F89Rphpe|B8Ywa$ufa0xpjRizSC@MfP8QVY7n?3f7-O
zqtW`LFX|lRXWI}3SxYkX|8Am2C2Dj*gIcXICmO6rI7L^CqpNChNwORrKPCuD->cD}
z(RTQ1R}L;&9|ZcJzrpD5a>RbyNPK1^MW(O)f(^%_`Lxe{e1obXw9R-1l8qO{8k2rv
zg#QkBJyIRtuQVchiUP7`Bb%L@e&N*08M8Dl3>)6aW4E>`-j~T|8Hyxv%8n+`IJAt*
z=5N8@FUjb5=Q(4%R*5z~&>%@tW6?rjN+cnB_)Cnf<M*EDu4USgESA$XuNA{emZf19
zNw5>L4)CPs{6Yi75(O!m`n!p5_4fkf(khTO+z8JHGvAB$eekrL#m|3v7UR1bAz(-b
znl2Rcnn{Lq$@K($zg(7t-@F19zcOJ)kTx_}o)Z7J?Fb$h-o}o+1?YUY8tT}+Zdh<8
zdg(TR=@?he)a5!zOYTA1@yndG;0auOcM{egm1f@EW#Wat$I<6;A?m(+#BU9?Ak*Ft
z;Ir#rFswO0*VN=PzwUuK?Ov>e5zfZgH&m5|Rw>hOda|U-Z4Sm7n_=wEQ;bFN*GY%D
zXVd;UL&~{_U=ui)@MBqTAkT)x)I5aAlNE{UuyeR~*+#rNQ=R7fsnShrBp9~#CALqv
zgAMtmqA_PW@w-<YxV##PmkoW{Dl7o|gj-<Uqe>`ASEEHP+fg!1NmP2|5$nCOJg>hs
z?$T_8^}D3VG#L|`x-mhV`rVtiyEvJXSm%p<J_du+RW)M&q6_2Le&VdH4Xj{{Am=b6
zn!Hp2b`8^@-wj(~WB47+K5-QUQ7+)87K}>YQ=nw}LkOt21noXbX!62@40(JT>%~Uo
zhpQ>^YHfzo)$ITLvXpP~*o$6;-H^wf0Nub>+?Hl*Y90Fs+l!lUQRoXu9Fhhp_nlGo
zm@-|q_&ml+q-jL=e3<j>DGDRjLU+tmG&L&|N&EJI+~z2htLj1}t`n!7Y=E>j1DZTq
zp5C?`L|U{}X`T5aa5dAW8N;;b-iQD2UN*}r-(3Qt<;5`hrZjCzx5QSz1kNx+1OALn
z08=#K&N5!K#%&>#^U7R~?N3gl<1LqVd^n`+yb4<sOW`p*z?h^QD4s1vM($T6{n8Rh
z2)_Z+6KmjBh8fXKF~U(J|AA-gQQk7k9ZolvKtj3(r#tQe=N@Q5t-rK_zy2(cwlk*E
zj-N1W@ihK0W6583XvDb*i8$WtC?qdT<Ap0ObAs$7d^Ynn25eai*5QbkR~Vv(U;zjs
z*?73j0^HZ>vhL6&K63ms{A(M9>4V>*ozRAs*CcVm>ISa0a|T!%>;}`F^1R=+b0VjE
zN-W>>4??D^WB=T9%;`29Jp`;<A5xrKGiMBHtsTp{Jrz*qBty11E74%y0Q{V_`Kk^r
zblY(hawbdxza@5@)Tw9a+-*RQr?`WL?FgXdQpEMc9u$@yg_Po>u=TVWX~6TMpk)Vm
zC4WmA=5fx+Z(*sZ!{IG^k9~|iN3X!P0_K>P7{bkv92}TBnEbl=5yx+A!GObZFt|XL
z2<6qOXNVyNkw_4Ac0-EK5_a~6V%C^S=8vl5z8_#aqjW9tith$ASM~~aZM?=!@@v2@
zc*LLFtU$x;QFL0#ls?xP4+*U@=)YH%ewWjtpS~*65Kv>>%RBt$VY0-wWikZ)mf~Zl
zsiD*|8IrtS2ICG6rkCS`S^w@M?h1R3Kbg<Kwqhwv&oQFI#jNABD<3?(PvVpxo7j7f
z0$S~VfJ?V1lS=2mSbbW9oRE<wpQf47_j9#KjejNN)mkzCxdGiZ`z{oj>(jS0!!Yrv
zEQMlgX#Mw(3vfP)qgQK_IqbYye%+e7th>N+8d~&L&?lVq*o2(@a}-uxA56=`>^O^W
zQpCv7ibRZE3BF%ypl(<;yk~yyXtnper13KsU~-r9dv{(`t#=ih<L$Bb`Dfg1pie#?
zVgA+9!8CryReYBwLte73U-r-{$S_u<X3dN#QYpgJ*(bPR6K{e?=oxrW_yD!ePXVQy
z-PkfO2p#jL!Ba=Z-Kn036Wke}##KoE<i5oIYhPeuttPp!Ta}bPtb$?OZsd=uk?7IO
zS)TqcI+|y4Uguj;@-9)lGsuu6PyNM(j$wE5IWN&?{|BzS@i@B9&}9ym2<TZb0N#vQ
z@^y?VsflWav_EmYWaTS<9_t(=xUT0LX0q>WN+IO^ssrnB+Vt>$*_iuCmxNot#N?<X
zjL$xcE1w8R;oxj|CW*idlLVMqQHdvl%}LV40}v8f!-W*J@!bn#n1{81Z&2KhYKMNJ
zhKPA~ry`E}dLAO*1>@3IeHzuIMRxhL!MzoO>9EheAh%7LxXej~@OeCBO={-V@_%CF
z27*q@Oo+MD52%Pd4g${;PG%Y=^w%!|3EDoNdvP%id*_*9khM0(=!b)lx%f<ty2NA0
z-@;>uv`LJjIUc>GN{Z(z)10Qy_+VoPj<TD9316!@)h-?4uw0R}FINI#+b=GxvzfE|
zDP-=2X3+MMrH;HkR)lz<-|**rmH#8Y`cM~sj5emHS%%Hj?u{riX%>Dgt;L6F%r8XV
zLB+UxpnTyVHm%*o?sWy?@_cVFJ@i)`a3h6pvt`{a#gD8<BLGETS>mE~MdTtk93}@U
zkaN)%WY)EZ*n6)K^GXe<?4*-$@oNJtai0f$YzBA#{vD`{wxB1=uHlq4BM^*=bgY_V
ziBp=5>B9%iAuz8|9OssS_4kL6_iV4QVb>w_I?w|RR$AOUEo+h%d=V1Q-G+vhWqiEZ
zEVipjWF7O3@L?o#QOQJzHI~ZrKV&tj{Jd%iOU@PDIM>6N5q_A^!S4E_D>1EpKlDs5
zgjA{hP_HN?Dql?@oV$l7D^H=9d_ATvE*5pBD-%Iltw@kLBiAqJ0v~o`4jirCkNz&-
zaI%yJ8B{Do(%P*dC9W5jomL>;;`5Mfc!ksWoXsElX+d0{Ho{*G8~WdP9zE~qQ9r9m
zECZlVqn+}&$+68C$u~hl_8tCe-7Rn$FeFd@>XP#O@%;B&$8p?EU9$akGPFHgi<U>M
zp>3}ewJ=a7MI(y19n_Re?y;n@uJ6#%%L49nn6o*!p4hj&8*u1h=Bif&rGKHAwyu=>
zvVdhnj+o;}l?K*@kfps&Jv@${jaxSf3H)oqgnh@j_MmFCVrOocR~NpyX^x4vc!-%(
zk7GA)z;jh*RD*2iYIjJ%(x>_)y3`F!Umq5QX-oK?h&YfA$cA3aCO+iiR<P}#3zA<G
z#Ii30SkrwM7KUBNIW1?=`oe6SJf{zXlN9OcJLmDlZ8KV{>;a3D+b~+|GZ(hBNvyEW
zhLk@ARN1lwlI^1a46b5batRuHoPtw~o1*Za481n=Ihtjfk=atrch|20Dd+pZeYPA*
zf_8CXGkJ09+hM%NA5Hq7l@7U6X+?r2^zp}ia<M7(Iyn3FLw@>UH0KA?v=$Zc&`SZA
z`X{*Lq7~Q|FF}{fleuHp3}}q+PkhS$ErWOsBJrNU70ESn!dP9_@j4CGLUxyGsL3rk
z@EMG=4q-*M3auH(vfvMH=l*{44O^c6gx2D4kc_Y7R$usz`TExMe4q^#shofur#&Db
zyy!}$D)};BgK7_4N5R|u{Mu1wWK{_BH4d=3tCbFwd2tsNYE(%?_;?6$8Oj*JIwW}@
zox^2rV7SzhR;~TaUwza8ubqcb-w+pU+**W>ihn`hrH$wq^$q98oJWCkwqux4YHkH%
zJUIql0FQbj8o#R+67CJd@=O!j*AW7<**sHreiTgiVO)&FcbswZGn5R}ic^HYL8PWm
z1MFU7uu=d>bZ_xRpVRp0l0TSydpyWhe#PkWGn`~uB3%7!g&{K=x$OI&VeyRtOnH%j
zcDWh+iqDqxCCl<9rd7e4sccS_t;M-T?T22)uY9@IVN`!=Oa{B@QAwPUDAm74RPWf0
zIcFI=Q8I<wfBi95oo3n1t~M-IP$EKwo4mwiF(*4>H|`(LzK;$UMUmpWFwLELP2cXv
zAh`v6L47#rw^`EO1uR!T-4|sC24R|AFnC`4%U|`V1(_v{c#sH5eX9(amByYeE>ZUz
zQZ|6CZ3)Z04x)FDSm3B_bs!0w$h!uGW5TG}z+ht}!77XMH#^7P_l;3d=`E6PPr_x(
zx=}6mA2fLvgS_f+9DkZaM|O?|eIwk;Fy>UP5;J$IIzC+|q-(t_$;UPUu`XqNVyiVM
z+33p4w_FCbE;D-c<PhTPUkst2Yw@CU7z$)(<=pZ$r;;5~{OCGGazEl5@&;DKo6QYv
zm}l_;V}f*K<bbW=A@1yof9S-z%wwAlKx)l2=$LvHoJW7cf=|{kE&erJl#`;DE^CmZ
z-Kw;3t2vdwu1Lq0b)ps9Nh*14U~>@%aiZ}vcrDUlY_Ju4#pQSC<(bDmpKtleXBZ#Z
zGBY>Ls*_**Lzfu0)WO70&#~d#U6HiaJsj2e12(vPL2oVgJJ_@i&%~U-zBqR@tQEqG
zK{2eaRxJ89!IUJ7a^P+D%Mq2$0*oG53h_50F*PTj+od|1G4frwb+faer%sQKh;U+a
z@8yVY(I~#ax^**_@rSF7iBr`R{4m6txVrSA&{zxm(o|TtTMSL|uc5e~xwz(fai&QE
zUOyxeik5vxm#r~kkJxOO;$}^I4z@s|MFKqfZcgK`&%xHtcifmZV^Vuq8|I%GgjH3W
zG2x3c6=a8SI-81drwq$ce(lHY-G6b~<2pFq!E$Jkdw9PGAH>^#N8lzfr2NTVTy@Zf
z1e`a46;B%=A+4G_UTy_<B8`Y&`Bl#U;c4hhwjuU%j{xP~VuH&U-sNnRsD0{Q%#-9p
z^guKJpmQ+Q(NL#Vo~(C!J^}VDM67*e3tPP3LfL**Dqq34vaApFXY?*yl4wO6*85=2
z;W{q9f_W0VHt=HkXvo^U9o(A4z+IN53z@qCb_YXuml8Bs91<r)W%B<V4M;GXeZF`5
zj6qLzxU1JUpzH2Rw98;W)2MQha8v~EDc{Eh#W?VhUw=XTk#dN<pO2c?{V=N`64U(0
zz*(sfa4BJXo07-;yt*tbsd@l^SJ=?ZNsCdnm}Nq<{g9Hu)JEQtCRx3Kd-0ZJ&Z0rg
zvGEf>pVlJf)~4Kgv%`2buo~K51%q(=H?Bo*0Q{;op>Q|rp9kOO8Xkv&cfB$lJ==yH
zw|R^!2j9mV%%4@b_8zRuxCaT-bGUK+3dAT>irfqql9Hwe;Cu2t)U%F9evSfJq+f~y
zUWz2F#)EUep$uBjSK>;KFZe942n(yD@kx*k4SU<0yL6-;F+Enzg<q^f0dp57+`rH5
z{eA#)ZpDGJM+B~5%*sw3w!7+Q+`Rf?UOplVG-P`D_$$RAX`c-NiWBh55DTI)aU_3=
zalk@TSf60)V{}{i45CdZ!~5nZsIjt|AEd;(Jjd8GZNVC`(X;DV+D2iiT00IOeHFLw
zdJ36_KLI8&56m$`T6pjebpMd21@=Nz)G;Dm-<ELB9~zN!JdJmk&cd+Qow;g{nfv#r
z20dE(6m3^E^J&SdY=*Uxdm*bx{zuW722$0vVfc`FJjP@^X3d(Ev!7L(r9vfXmP);i
znj}r9Nt6mnQb|&2FjNv}KMM(oNHhouNkWn&q59VMzaMXJXYak%^W67!z1S@z-hFrY
zg6td^^Xw`%otCBgH`(*6az1mtWh^)c&%y!^53HQ}4Rdmii(8yO!cNCmn7;fMTx4@*
z<04s5l-D5hB~8%2_8kZ|o#Ev-$<vE5nj~ibPgr?Lg;<t^qw>gLJpU#I#gU)!^K4Dh
z@!~g^JFXMs*0R2Z&u;$E!W)==QI+<$X+y7B88pqAjb)vSd6AbB7dY)YY&u(wOMC-y
z?28!i+oXwi-vmP4(^tHSohdD0`=;JQS&%ralJ{vphTnvi<iUMwGG|dc{$8d;`%_lp
zkNsApUYLa*pWdTx;$}ST9}Cg(vtWMV3tYF^l%&3!&CgDkrGl)+jQG`8Va)&)TJ6i`
zqvK-uEeS8dW+mf`)lcTedMV)i={)RC(<4LXpNHEz6f*u9QZG>fdeu(CoGNv`?6DQT
zzr#2M0j3cD<P%)&F(zq2b&QSh0S)6mfo`D&$k`O*O^J{SKl<VK)KYvIphXIg^l%qv
zXcIwezqD{q5AR2Zabv;<P(j%ksgAXPM9q2tQ@yol;Ls;f>e&K^!@I%0hw^1EuefuH
zR>a^;JSW%gfP&Snd=+!_<yl<CgsggeyVjV#IHyJ*EMQKRmy5vfzoq=>-nWe7txVQq
zD<Ae)#Pw^Mfz(Qg?y|HYPOpUG_K~ayA~zJ)E*BBAW)WFxY(k>Xb@OH=B06Fr<4C?p
zgqD3@aBNN$yK~QEZmNf1(4_zoeoOH`O?`67tr%0QRjKO|w)fK9p6RlsncZ1k@Qq3|
z>i8Mc4S9FK>E|fv=15x-`p^VqR7){Q^Ef(8>0`gmU|#RE5;;GIu}h>|@m0W42(D}6
z_ykM3{#gju^KKtFE0v<l<kwK!_zmtd7T547)?3p&#U*?Xf&KaeNY3d5ZpiORc(0pf
z=*~#^*spRF$C=T(<aX}l19LKZ??1-&e~zloLFhlD4H9fNz~A{Sr@XWS=KuT%Wd`Lq
zc(*x~Hn)J1bsr~CsB)AS+L3@Y`J5!hP3-;hqxkm5R`76_hp#_x;D)cA=za5@_~<YJ
z<dhcj6BcNK_o?OJbY?s+NevRmU4Dh_)0}Vzs8N;5h1d}vi7Sr0h2`}cr10@^?vRxk
zEnLWW!n1g;rKTU==te`ZnknTSRB4sxX%wAf{?N|ZXsNDE<u6#0NSgrI$okX={<Xmh
z<s>MpO69z@>&2t@P*@+Dhu&^Kr9-y%!_&EJj-jte|DOTyKD2@#EYcwV&*@5@W;oih
zd3iw22h`lnW}t)rh`lmi@dk^9^h@Ru%m~ln+-)Lxzj!nLtV0kkyFDA%h=#$4fM}>o
zj+OpABA|t;$<Vbd2>w4Gxb^Q1{*)a+vb<Vh>Fie2&JfWjrj7XZKOMS8xfQc+e}s<e
zaIRwv+s)i=KpR=c$h>!7yap$LAosg?>O5;|$@VbG9+vp@nKn_$VZE*#5A6DS5|(MN
z$CqqZB|GUb*ylgv0t#kho2>~%8ytda@&}`hvbfe2Ik>~kjO>{zONRNGFixf%>&wkT
z(ZokQ43I-7xgznaRXZ_f;&0wNX)?xF+=jGHC92}`6|XE{Jgd4@d`5Z+SAIi{N?HVb
z&k>dzx|xCNUYd}<9Zw;xcL$sl+`tA4DfGs_g2M6z;OQQMo(~@2F%50f_eF~i@e|Vj
zejAfS)q}8QeJcF@^B#hhJ;bcxEVs1iJ?ad9iziIkH^FNgoU*qeDZ@&zFO+dg!r6{x
zW)9B&BcvA|T9MeAeE8IFL{@!JB%&-u3=F&vIo>kTLg8dMZp^+@7LGXMQ#EY9eHw@D
z&|}$n33?~wi;We;D9~Rawl92*Ib|Ef>AL+87~Tn6%Lb7*<~b1OehJ@&Ct|6akUFr8
zpK-!<JSS&Ow(HcwvGXj?ZZ`?{KF}r|ch+$oF>|=mhI}Yp$^0q$GdTCIE&S(pcCPpo
zg*xv|NP(*X9X)6p7TKP`ymQB4<va^=bMQm<Uj4w%yM|=%I<_l2p_aL}L5X%&et|iw
zV<G*#2kNVLfFLYi>Q(OzagAx>fmy0#!u;t#51NscE!EhT5`|F9{1taJ=pG-|o4HiO
z4SC-OOBd>pjC)OdhVfDEQkfjcI=?~hftuL+tp!d82x+a`caT@hX57r{IO2Rb%c~di
z8_#v(ne&Y65p$d~7^n#IU+L5FXRJxY7I|v_=PR#V5Q6<@N5VoG6WZ=zOXKRtamkI@
zAiAbW{SI66)$Gh5d30MWJ+4D1SUl#g-6}xUm(Orrhzz++qF^Swk3QM`2=j-j(YiNB
zxOwcmfB$z07{q9SmDU*8blL<RjywiKgHlLnGJ#LKWk`&M51tx-0(A#mfH0{cZ<;(5
z?d?v(s~3j=3ae0&abd+_%%NsGlh4?`2HgYIAT0PgPc{!Cp+ZME>U$V^WvAj!Eh|`G
z;m??6y)e8j1gEk0N1_vR5It?<L%emUA<M9xuPDQ#Q)R(?rw(b8y5Sv{dfejk9Dba)
zrQU9fxzY{U829+K7?iC*UBR4;(Xb$``>U}(XB(_pYmIKXWjMB955i`cQVZcCoTOw$
z-NNKZ_y-%}Ex(8}c=L}pP<zjPKG}jw$#bAQwjV{F9H%r%o8CQOK;`mnz$r9=lO){d
zvuZoA!&?Z3mw%!5K{YzFn`N5<XM&(;nqvs#ZK<#w(bJiCQRR9JoLp=`Q>h%vMFxUo
zi7#K)yA-CqT?3oOPR8JIQZ8+f5xDx?VSTzV%u6~5l3)d{bc80It^1Mjpeoq>=UK*u
ziRM)0b~Pxs51?kdLr}$LGa4(&(dp$vLZ&LvjO62x^rr~RTgGAhD+Rh^fCB4K#B)gt
zm?x`iEy`r6lVt)G<`34T*)`8GtLy>PvVHHqkvHM+w^1m(t;>7=0M29f37EgU7OJPo
z6Z}1t@dR{1G^|J}(NGgl^l!whs~;hv_yc6g{($ZG&ci4@<{vs*1uG1zVc5&5aO8|3
z8SB0hHhDP1=^M#dYWA4*Q6@-rE0mc7>^aDw3K@|w4W~36#4F1!Nq#$Hl=RF&!3rP1
z<ra`Q<SjQRNDcxYNI|6^!TJ%){yuI(&n?3+cbprJP)LT!N94&j&kVHR6vkiL!FIL{
zS~QF?e%^X0(or4i#LBu7<26sf5}iJfj>!N9C7|B^+xUhT57@J+?5Mx-3%*EKBN+`(
zc)zjRxB)L&HZy!AcX#t3eAASRsBS>rC#%!Kb-Teu{}0yce}tJ?F=)2jfSiytGhgK&
zu30aeTQyXJD5yTiP4R0$CB}-@GDcX-1eTi$eCOyl+lrH@tVQGR>eQk5Fw}{Qc;mqr
zU`THXy#8WH8_atlr&q#ekSfxG<!#`4s-Ce1j=;l~L+Fq?30)6cqT#Z4SaL>=)QP+}
zuL_<UrN-vf{?E8EA3vbg%rO{M_zl15>5wSxI}qK@+>)b*!`u>GR@?7lF3bd678K9C
z9=1|HEgwEEJx3a?S<6?t>Epzm4`4|21MufY;Kx-FC}YGjY3t%~&gOxb=>8W8XFz7?
zT!Ai^cbHh)&Q~(-X}=^EM(?@+l4s%KEkk9gqH8{`+NwyROiu%88$b-w_oJVRkQ2G?
z<dnw6^Z(_Uk-#%oaKR8|I(YLZ?D<!U&d0v-BP<)yg`J-rwtZ#Y#N_dJ=UPI$Rv9~=
zhi7chyUwz@Nf36-nyY0UKhtRgsm@R(x}!7(-<>t1O`m!o?!+yWt(k|93QwcYrA6rT
zb2V0}l){EKbLu!*nVh@C_L6sG=u}5t^7q$nbaq$)72zgyAmbyL_HV$5q3<E@h7S~l
zNg&Ca&G;<-;f&e}=p5e;n5abU1HJgXBSY~<r!59=e!zA9XGTBm`i1aJjNAP_qH9+M
zgz3icF)!q(=;|lFOlJ^#ANcZnY_*6aW(S=9(SvjKgf!=51{Y|24I*TUFxK@aIBZOX
zDOdi$9TPTVk=q0RHp<ZUFTWussvDhtR!G-Bk45K4tY4Fs1~DwxICAv|+$q(Dv4d5i
zCT$TO9eoyrGp_I}Mt5PA=OdP5E=9E}Rl1b*09<~X<Kr{B#2s&gxAP?4Jvxr>jtRki
zR(8zMSHvrM%0TfeSz>)66NAS-K*N=pV4Qy$LyrVNqWu>>^^-h!B`edtVFq-}VjUu~
z6XLF;N<=a(OB}XGh8td(hsBH4XmMZxiWYRC)3&+N32&LN@U|iA5KZTEu3dtVr-v}u
zKM$3v7>|DO4DJ}qqOEkw!ksVK|KDKd1ZMpyi<=KXXmpE<9wI`Cc^_w=rox|*6_D0=
zbFx$93n7EGNOW#C%T#_7H&>p5r^O;7soy4kmaIz#7-eD3%xmJ{_uagExdM)u{}dvp
z_F%woKXiY8g`c)!8yphblGh0eM8Q1`c-gx+WBEVKm}HA_H+4a2(iPr6a}VCX(+%VI
zYLosQDzr}bjMVQ!AcQsMa~6@!Fs*Yd%P4yB_tT4T&k+%I>YfZ!+Dxg{!M_-|*BiZi
zoH1*qEe596K~BHB_zhPBkz3lBd*LNo8!_%)sTmnH^d`<w)}l&}qwo#OdV4I`58sRq
zVdLZsj2q~KE^-pII;@JNjk?tGs4gj;@d36JvYq~~A6(U@QP|mFMdL!xi60r?#E;_y
zL}Mg-|Ns5Sea$i>+9V8ees7ab6MDePCx+x++D&Zq5n+tWY1V=NC|xu49sUrR5QoWG
zEW7Cd&;33joLU73zuFMrq3U!>#S!eH%yUwygCki+`*d#rDt#XYX&tLkFx=14e{2$n
z&#~EnyPLGXz#NBdSpi{s-*`QVJe56K3bS{y8J_h3F>hu}^Tx=a=czMTGLv=M72m<;
zz8rkG)s{${H%c>2zoS*9KK(8lL<H%(90fn`g42=9V(&N&KJLLrELwgOcGU^#<QqDq
z>_<NTWHRHtgxT<dANkyuhx$bQg915R+=~LI`(l-<GH9(~4lX?l`cg}mBtB^5wO?A$
zV0Ok>I!j0<D8=xeMb(gEZbn262cYlRLmd7{i-z8b#?96`RJohwZl3<)w%AlcdJ+#c
z%Pg_M{3u5JDTjG^+9X)hg8CW0;(9yc;i<18@px5&0TIq9T>61qH&>l}FjArW+SPHa
z`9zpl`jk0tx-*wm?1zScemwGmF$AN&@@d=6VbbgGs5Q`#cqy+18%0C9Zni#EU1dmq
zta*Ty$5cQtwV0D-&gjbXUpS?x9Il};0R;!rCVY4Og|mMhMVm%DR2r}yUFM}iu0b5u
z2{!U!AwT$QQ!|jkfx&&MA>#-A!{axVNOS)=-qr0gOg|_`$7k!1R9D8Kezq8EYYH*$
z+ZpNot;LvhOi1!pA4O#^75ba`Cj@!VGV4TT;@JP*f|fI5*}X1@%S&|0LD}EftDOo>
z{!vg-Y)r<l{)X3Qv+S~>9%jf^aKe&U?!kp3w#c)?r$>wlW=(;K@9fceLN4ceF9ZHC
zC$E<%2}`a1VA@W07oXn1jaAzTp31FYqv4OvHoL%Wn-x{NX9Vre<8iC2CCODuf*k*m
zQl|oeIJ)#Q1ayDmwkp|?!Wa$s+^I%(zF7s0dE4NAa19#!H()zo4lnnqlP^y#r~}I=
z<S`zlQ(=~LAU?;)A?&xl=&0CxUJbwB<qT#h=kcbDdui|SnsZv?%L}~ZsgH9E^yn9I
zGu+A`AZIabADavJKcB<gAEzN9dllH5n?S$!XAIsl8@z26bJIFxsdtAK#y?V}GiH8+
znIA7>y2ePftGUkS$o&+rpT8RIQ)XdC<!Vf8cf-Uh2SMN{FMT#miTL^b<Y)lfLmsfj
zx}C=%&D0dvd{stmmggxy_!UKiw7_+f1y)rRLg4*25Dfa^cudKJlsRc|kAAkHWPvP*
zbPsT4E}QwC{xiUR*&r?@QIDKq&Va916p2^G16<6!Rf9D($&dMi$mFrtal;u2j1IgE
z#XmHOgT*AgdW6jj;;DGu6@B_(jex9lV4SgAAGizOElAN>9V$tCm-#@!f<}{L7&S(b
zKAgaK^<P<uGv*^o*4l!|Vi??>8H?>!2$JIGnJao2qw115B>pgm_3GEaPTLgQ(k7v(
z&5g^6V>`e!=8AO<!WPpc`0*?T>o!``^u!jh*?JNT5`)-1+YHM0SyAr?QC$B9A6V8p
z0IGbaqjLgNPWgC%)1Vlb;U-TlTy5w$eh@v$&d-U1KI5woe^7j`4%R=igtANi(3gG^
z-R4B0)9*;B@0uo<!v5RG&K-ld+s&wnx(TiBu_T)P%H+F|HZf?9<nNEohUGRd(7>P;
zW0|*s8>mD4b)_(n`9K7MyBQ6)-{Gsl$DnfG0r1|a4Qp1Wpj^sYs1_Izl(V3cvhmXL
zC*L7wn-iy5qDT)E52RL;S+_FN2|Tx-hCo>t?7BV{QdK9yi<3G;vU`Jcwz~~^`_q8N
ziRZ$-vJCY8vy%(bFM|A(9Be&3kdy{zv)yVRRJxjD>$((d`#zRsem3EW1wWvzax>aI
zeFB^2B%;uroiE(1xL1W?u*J`mx_xDLYh`O<IE)ALS{0I7tAt7Z*2HLmHJ!`+Y|>qq
zpzKB~_t)?t$c<w;a1}R*DBXe4hrV#$9Rx@v%MA7=aev=cL9<8=>@wA<q<A;)$9(nc
z7o36Ddu7O|`m^|_A_>>L)ub10yg;Lni|Dsm6O!xqpu?V6=rTQv-fHLhpNbk}_<0*@
zAG{k6nEeAM{j=gfddz=3eInfM?}d;%$yk%Q5e&TaV9Zq|n)-Y=H_5?>hNf81#n(b{
ziMSA)H(uq121fh|@2lwIcpU;%zhJP+SswqyLfTJt+{|XVIcG<q(-}F=$CkNC>e-I)
z$ztj2wFMZDO;A1Emh?``29FjUFc@)_i`OZ`c`umb;bI=lW1ZB$H`n2Gi4F~2Rg9fV
z>G)<>B{bd~38^dF_<rkTRAu+`>jMO2xsMSETTsu7BGy1t+W?xqSC8f_|AEazhJ$nK
zShU=tPUrxZ4-eF#6N|E8$>gVSk99OopWcccpNH_P-q_H(FiR{hmnCmG3*yzd6!x8{
zLWgtL(MheAeLIcGE)Q9v6vE!YwoZ=jUlJkdtRC@~`3Vc!`tYZ`B5BQL{F_TGA0@cV
z=Y-tkkH?qcxf!NJ&^?MP$~^-STYf;<Wf9#Ip++EZHV7o!r27si!X?!bRQi;~+kagN
zenSPc14Nw2s6{+Fx*wbr9HiQwv9R1xiHN40fSm2RkbdVXb8J18!afZ&I9iVrK0e3N
z_&9LLeF#bwQM`e1IRBTngTJFZ-STBWdX}=>!{Y+JsaA$|xB{0L%eunz+r))&bzGy-
zIq;gGK!fM0Vd9|>KDNIGyjNb8x@(*9FWBBE*~*&TLsP(px1~-$CcxsIj9r&B04B1G
zR@fUu3?Ac&1)6|f_fPOH<4a(b*>61YT9cZ$3?!>=+{cht*3{eG6bv|hc)8Gm2s-CF
zmd+bQ=M2}!X1`mUO?@=jZBe0;egj^>l2O@GQxcYVk5|x1XF176@%KfqF(bK&n}4zv
zl%Lk(mTNKKBiqJ!s2sNWx?%<E#hmM4%!{Fp92crg2Cr5mx3AV?V%cTz6jsBhXk+5D
z*b|RRFJNUx1MhTTl=#wLGYFm-!p9tDbLiGXn5k+;1`WG`kC)p}$(F^^88_R(oy2iX
zPMUP$^^LfMOvGXP2f|LHL@;5`pVc@67*(B(x%^!iGA#|Hd1+u^eU97S8UaDWe}KfP
z*0I(v1lEO_l5<nEXz%G-Sl($ub{Y==L0fue`|H<?->6A;|9g%~62_|<$nNh?*e)Sg
zg&yVaVaxp~C|!03{qB{Z;Rzc|9n;BmMD69*{9X!a3zfnB{a-$yIiGi;>Ed+PIk+PA
z48(1l2|=nVbWkJ9wmNOVz&u5|>9Rg`8GjB0E=#$kSJY`y*&*l{UXCX!KS12_Yy5;$
z#e99g8Oa&qg#0aQqB0^A6SiN0`xgJe&M^}!xqc3hO@g_%^;n*HsyLIeHY8<h#Urgt
z&}hGi6t0ZoFZ|?Dr92H%hU%01)p^j-ZNqsUvc=sq4aoh;i6HuTfe$igJe~t;RL!6D
zjqiBzHtA`QaXN~x8lXsTXi2bUx-6v5zRB&?sld_I-(g~^EOk0Bpdy1yynFC=?$yOw
zJQn{8^U|0<r@jf+-q0q6bETYdpBJ>hp2#?tFZt9d>$w|>oe--j1#7h@IH^OKbnhRA
zdz7ExkiK7VNf-)WpKFlRqN|)BV<7MTEQLpzE#P^A^}!a|(8#xXF#2;JHnNPa^S)%h
zrfxrOk$DNCO9hzmAe%ebdKo9pvZGEv{iLtMo?_LR0ubq;7-ww7IqH@$@}L4qUZq5<
z{<fgxS0CS*kOn)C%Y!&L7qSvP&_2$QAA9p7W<;AZA8QfsJ@F0Rm26-g)r;5_JO$0|
zRH&dbWc;zntJu4#6V8A&sZCD-1#uIu=B^;GszT&kqaY{Lo)eTU$~YlCfaP8jP&}jr
z<Z2ASL#T^IL$1R*eLE6<;Uf09WO8*szKHDz&wFhDh@D?5*x5Y<HY#WlVR0)zV__G5
zmc59dI73>tc^-eUI11y`gT;Y_?U@Y~#i6GZX;0oj2v`}-Ih_p?D-;}pY57}Warq|{
ze)eN`KPhK%nDuftPZevsoB^{)Df$%)c?Fg)+(&d!=sAkJy<V33u=8n0z%TTc4gj<{
z455;%ke=6r`&eE@puAo@=#vqdv;R7D_f=rc*0B)Uq=ib=r?_dGyHQ<-IlDYEasMJ~
zI)bc1i+4LPjd{xq{&w*T7~3N%D+AWMm+^PA=fT$~b+Ygbo3CD%;kwH7p-X)Tw12pO
zr_0WQSGpd}WAD&c?u^s<_BU5JJ&pI#nvRLdG2Dc<2!2K`o0DFd3CmvXMY-E%^vY!e
zB21nR!}_$b%YG|6FO{RYuNfV;@frGE^Wki!_d#1jE6!ajLvP&|k}Lb=$q!#6!lx<F
z?PaAfemDC~pEscTLwg`&`w%=>FHa29nB&CY5ePi(#X`|SUZvn6<Q&oG;(UGizso+s
znyUS<C4My8Pt-u45jDUq)uM-d6{&Y`Ha9`H0!tl|afQ|!*e!2IXB=$6X7)WaZB0X=
z^%pL1R|7b0KPLXG<c_4^7^=$4ke_4qXjt4e5R@dbPF?_x$~7j#5@zB}mcL4w^bebl
zush{GO)x!|4W(MYpy}y$EWBtBsb*ujbTJPxsmK4%+Ck^#x4AOqA>8)ba!6}F#dc-~
z97CHr@j|;cIemo9j96AFb&5U&aH>>EeiA<M)h1O=pP+t&DVe)ZkuEEA0|?j(pB43q
z+`q|?u&NLRTVG2ZW(jGkMlvrt+a%4F>BP*rdc;FA3F|)zN%QzPPEa^7WBs*v{B2SU
z7h2gFVRj{~U(WVA)mjuLsG;#@57@KQmL5*bg;#@vp*(yj=7g-~BPT2aVY&$()x8T%
z@3UZNF!Ps{*nxfgSzP?<6L!?EgOhtpaN0dF__gNq4i+cy>8?l2t5^a5;!3b!#$?=L
z(g-WsJ+Oy4M)0m3jL3P2dTVrvzkLMGVtWqV@&7^GLJ1cwGKWtZ)<kghsAKJuIEWqA
z135Ed!S6^5KPM^%>Y5WECU7e5Srvq{E-Dg-d)HyPuPSjeX~z^ZOOhkAPkdeHC90-8
zM(Hq?NB30Yha5AZCORg>Z|f!W9`jOq(?Lk>&sB1T=X~I=++g%}s1-MB>tfzYA+}s8
zXIT$r^oyCytAi}b_|(Bm)}PE&==coMX%Cs3`ZHH{@FthzRL6%}Gk-{&1vrF#27&00
zgWgvivg~dp+dEwlpVxYU3I);ldHWso^_YiiWACCse8928Nd>N3M4`iyOQ<>ClC=Gs
z2$}0t$b{yb{419P)Mnq^`Md9fV0=Aiud$Acvz-lXZ00K&n<Dj5Yr|ujMx<kS4DWpF
zFu!lleu(?|OFZV21jE^H8J>7TgTq;nIb%aRF34g3{B@A{KnCmPeqdfBQ+)esAkmLz
zIrvX3Yql{2ZEHnDuiBU%avn%h=br^9S3Bv_buH+$@C&C^Q-T?1&*KgD{92W>9ovGf
zC^`C=`9b>l)K`0<C*w1QikN?9R|jvsSAi_^WWD_PY3O;F@iRP3Nk@kp=RY(K%UMn&
zxIc{({Ta+39XlEXA=@(z?;gWd6FX4gsV)w6w8D-TwtSOoH)Q&n6S*ZpFm3!{$n}2(
zOO{rnsmlO*#Z-X`uF7$xYi>fNW)dfgP3LAGP$E|qOh`mLW2I{u)0wj8vA&(1*H^Xh
zaW?h*&SPc}J3bNA4cWPH=v@f5S4Y`D5Ae0^AnLG;y<>fMNGngh<`y=Z6G7S1@$Mmm
z`PWs+xM%Eh^jp`Cf}+9Vk>gL}9bI`UNv3?&q5^pE&4Bt_GRN-0V9aY^yn0=AG+$DL
zl>v{r9W^#o(5EftAE?uLDRC&u+`@%UnE^h6?U?NT3o<g!akAG=U{B%(uxG!YXBHxo
zSEmi`=l=4kOLu_#o?IMJ&fbC9S!nPnk6X~ILXTCbkn2s#q_Hvr)|{LS`QH@C2H7}>
zG0uUW@OTig9Qe<5+B7J;0Zz39VMf|nj`JTtjMqni?}2KtQe1)dJ>l$g`V8D-U!q;S
zHC4IX1vjrg#hk|TeDJnp$QsMs4lF}bo9~M`0dF|t^*2DFl?UOKaDIBmYizpo17$qT
zsX$4V+iAQU{MOj>-OGQ#fq9DbwTv;T3)O}uea1Hszr)c7cA+6V56LqJdt$K_7#&t8
z+3RdbM`sFm<KZvJ8J~`l8AEve3-55qPv#iQzQ@-l4#W2Q-*CczN&NA@gXqrYU7-As
zu@mOM;LQyOkuj}FEbD22wVA=_J=u=8HysTx-U*2NgX6I3sx956G>Gahalp~?zaf1m
zMY$0Xkhgmb-q@AO{2Eiyz@(E?AIY5C-+w@=pDE{eauWCRF~!D>qoBH}j(Jf1r56~x
zYThbqqU~~xu~Gk^)7xF*KK~~e=z0?GrUfyFyEC4yet{8eZu4|e5tjXm;KGM06aT*_
z;kk($+?{d`KI~E^``*@JPmLbEHzf)~SVwgIU=!Lf_z!x&c*Og=&cac@m~V9SCH_*m
zfaY%h3QdpF(S8ME?y+pZ9F;Dd;qVTk+=k+l8w1IP>Ie{YtD;l$d91!^K~}J?!h|?y
z^ka^}mbq-_=I|c7)U06GWJ3&?eVv<QI}u_SGiB8!=2kf&;a4!u)V%Pki1OQDM#ErC
zxHkcvHtNIjUKJ86`xLyaukxpvb6PTQ67M(qF*i7Zd2#Nvg6A!kF&lP+M_+wfa#5K)
zFi<7M;}nSdBxT0`yola?C0v)q0(8#|LF;`iOX?JkqSu{LAC}A8a{nuo9c118Dh+U+
z+K8Rc<f(M{N62YN5YGux!L8dFJ4(@v7x_QsGV86W+#ns=6H(6BXESf3wlW`AStE5j
z!-^-z?P31zZgBdK7vHFU0WY&`$Oy{j0(10fW$}4#QeG5BJKp0b7*2+*5$d$z&LK!2
z{{z<C{(}?P;~wX-lQ~}W==4WQWP38pM{MqpO0El`?%Ze2dHz$*WThD~$n${Cg&!em
z?@P$39>Uyp3DQoxeuz660P~mw&UIcS@OOp8FVc>iGd2+#T<(L@S|_pB<}xnWum!tq
z*;%#sHzvzW#kzzy;;8WN;J2=XUlCRTUgNVk+aK1n<th(h{z4E;v*vS7HQ?>ZFHxdg
z?3g;>0WbWzAJ+w#lJyQ}Ikg@u#>|cZ&FwbCu=g`MN1bNuBSSE(s)TQWro>x#0_=X4
z;YUsOZ}et6x`m!ag)2EwA7)Cmw|oP^;yq9ow}LmlR}Dk_UVumcF<cQ^gLqdC6%+~n
zTv>_tY%R!k<qjx2z7}oSa}|9(iSK#03)d?|@Cj26qWA1Us8Gnb(H|axiBkc3o_~hI
zpV933ufXI}Qk>{5r1u6IlV0^!xO55t-??DS)Wfi&%aD+WL8M3_7NdWkLwR30Dwrkj
zDC*h-;?fR`3_gUu%R<nue<0@fYtrXGmx5K?W%xr1Fv9vF=-#QvpzSg=J<AgZTN{u|
znd&&ysu#1C<w6PL;R@!ea`!*qgQE+L@#hso8ohZTYS%Ld?WPatU4NM?I?xP$a<!Z|
z`T#}_e2bmed6=24N`BTzu+fjXyEkZ3L(?CSCO-gfrYVs9V^|)jPe|KrOwhxEd1rE;
zO&F3t0n0Bwg>ze@(B(fLjI$Sm_wzLIV!<1heZ7pMd~-45g)MD*_y9ZunIoq+5!@~u
z#l7A7^kSkVncDsg1$7t1bN-x%u83|FUwI66W<lV=o|hco1Jb&s5(r+M&gL*@G45a<
zm$v;b$QBiViAY3TlC|j82aM^xwj7jlPH>^6%qhk8W?|1BaKpL<c$sG`)1_k|<&^@v
z`|A+>Lyb`IH3gd9wxiw;d9rlHZ=7iV6?<mp@B*Qa^vYyavgLR!9K6toDJD5+d{G4j
z_O^}z`?~qWvuj}3pL+C`xeLN=1^j_!Y=7gw61Kc8fPLyVxJ-U7#9d6_3>iNl?DRC4
zz+K^=AG!$ftaH$up^x@sMqz_-6&_==q~E<3w52EtHaus4)?gX>MH!&z(^sjaiLuxh
z`*L-MbojdHb=)D@Qp|`Q4(DEqh=%AHYKRu1Ah9cRz{3i(*3Lw?*UWG6%?lW<mF>7C
zIP-i3t_iO}$!In9j#I+RFYe*3c6H)(zgR5zc2In{w*drs`QnhafyA?H7uGdBLJP+1
zwJ*rzCOl^SHVb*GXQWH5-&)dGHtW$aEQXE)O3>jy2E5BC>vw5!uRYC3d8av*tUt>)
zPEUeG`He{W(lN4d6O8IMB<`=*Lb`Mk4hU2royCmDZEK5_M1|sce^4rl<$c=nu{13N
z6@C>%+{h*DyO0kZKiA@)1~y|`b`tJx(?P$ZU-^*SPK<6`2(zQD$^L#@BGkIU`r6D1
z_D%&$v6FQHvOp5oA{Ctb#=SK^gSXXUF>%iyZ0563_DwAm?QDZpwlQcS6;RVp!C>iX
zNR20&(C(#GI6t`*GFHCf;@)=hUf;uc-HVKO(yvd9`5n+zyc~132*qI*@m%COIoNXV
z3GA@YA?Fwurf%$X&frok_oUZ~ED)=ZukS+f_sL*fG2{z~_k3jUxJv2XadI^H5ZkGa
zcjiUejP)L<O3S0ZLEWYa;O?tO=WJODrG2S5`dTsSZ|w$?e<E_W(u9Z>zm}F;{RZKt
zb==TXvSfRr6<sed2l<$%81?fgdbc=0a!v%~oOKdUo}o;7<W(6bbg{TDwS=>;9Z0)n
zg26$9<=UO3@Yu_iWHm=ZXMYK5Wtfsqr9Uuqwt!4~G6s!qs?v?@-zoV~5D5Ri=8tbW
zgu;*{IMOnJP8)4bE7$7cY#S{y+e4lNnhvCi<@dPPyV~)-d?u`5J+}jgo1n~m5EYur
zU|AA@3cUez@di~AXJf~A$XwvL1S4|eMKjjL{1k`gn9^_aZOHGB3MB5^3*LZrWRx05
zz`E%sB%mUWpW13hMX4Up;4Pr`^(LHxTOSOX!ukg9^HK1ea*-KYcp~;SG_qZFIUf$D
z;+HT{trqGws-qn#;=N+8ag%rJl8H|~LyBc8=490Hyn{ZyKUkBdmRf`B^?qFUM3cxn
zDicA}txWHM1yY3&##;_z+_Cvtm@sPs1`g<hgd4x`;XVOLW7$jn><wtJsfD}ug0Ue!
zX2blVNR)iO0(16XWQ=kp*mtTQzx=YL)$AE`uDJp}t9l`BSqiE&e#BarYEV&ag0T~{
zsqd#B_}E{8w&?x^!FQ?Sf{F6v@>Z7d9r6slj)rrBKa3x?I+7o|;y<j@U&jp_v;!w}
zT;qH9{(=U(OAxGW3!x{Bp({t1^#*&O=XnS;FTV&aYxBW=asw}@kCNsr9LQZ+5{?FE
zra{ee0bT5@NOG2rmD;>qhWAC<w8lXOQXhWe|D`jJFtB^)wq9N^i{kJlgGh&LAvf}$
z6`AzVk_OqH2CJ~)&|UW&dM<zEQ}d*}T*Y?0Ybb+$Tkn7`UjsF2hv78KB(|^Yh51u%
z;TpHqAe&Hv-Wy~1QC$>+4jhA3%Go%f?L9iav7kpURN&Q%L97Q_B`%wr$S2;~j2-G%
zApnZ`y2i88&gYZxzC{CcS{T!g4hLQ`x-|2!_G!ppr9dRTr=^nVY23;S>B#?PyUm_E
zu*r5k<|#~q#?!6LlVe94#(V-f7Z+$e_XY!J56AlRsx&pSj~~<=g>Tg>G4}d**i&pm
z+*V#kgNtAIOEeCeOrOCAw)?r`XGWVRE91IZ%4GYt>kwITAIsVO!XV==X80yR(Zd=L
z_AB7m%l&K*z8qegv;C(n2O{h3d~U}#*mYDy1lbvx0d?2-a#0WT`<{St)0C-b{we9t
zzIc@EmauN59DV&Sf(a5lU|$8m_^HgV_Og&4t?~`CR)xYzHlOJqbQ8wbUxZ#ZGc#1a
zhGDbSIj@9+kYC=9vfUvNl6MoWuf0Y;hmqX)6lFRtQAFw*61eMPd1}`@73%*B#!=gZ
z<an?uDTztPE&Y!{skw&}ZZv{VEZ3DkRf{wWb?H${U2Jq%0^24i)07X^r0nn%*xX}8
z;%8-p!_H=WH&~J6UMK|xmY+ZD9}m_kMpSfaAX?1L2Q&6=n%auEu<kV$_>M-e0h2KI
z&~bG6^8lPy2aElKV<GbG5Oi)8zzuzE>i%*dC-kY{4sYy-K@*Rl=(7$taep(6YG5vt
zRoS?$wik`ROaake18#;wJ|z2;VU9)@=R8uvg~cA`1#Cx^UVI4z9sS%4;SD^cgQ(SD
zOe+WL!(Z$17}hcZlzyM(40}Vt`R{)4H<YE-uBvqX$v&<%IuDh@m6_b(54H!c#w6yq
z?66V--*XRP*vTRkjcb(d>SD7Dh4tJU>0?}WYB(IYM&Vz$Drq@a3O`s6zsvtRbiFZw
z;udRiVy6^7CEjIy;A^lxAOsRO$)Wbtzi=Qo1H8xGhX2wP$>v3t^zE}|^wN`qJsD}Z
z@6L7*M1JPS`PAc@;tkL<D~KDs_bhhUc4JiF3z)+g5CIvLyl_qr8hNPDM~}5=+>%6T
z(}y|ux5gHiWJH0h&M%x2l7KG%!XV8~2g8aR`LTD$<D2|*kh9)ETw^d6qUYS;J=eW}
z&eUNzy+DKXY}^G^v)Fxso!|8t*L&4rS@P_k4GGLH1HTcc;84au+H|`LCZx@Rp5HpO
zH>DB`HQs_bb9G8Iw=zHBCU_H23d8Qb!_am|s0>U%%O%mc##^7JOA@fI`U$t;u^Z-$
zxx($^Y~Z<q8`^6MXw0Zau!``;4IlF`r$2<h-egRY779r8n`@k+izfYMz`Wqwa#8qi
zBPSS;kQw`shr1<C&>`lyHM6|2<G38ZLH-+TDt3d>IbYdqgmTvs)k*)&5nx>H4IcLg
zF;`VYX3(W8F#6#qG~H1JL97cL8<>S*y%wCn<%{F`=*OJjdR;hixec6}4XEfdFJ8Eb
zWke1R!p+2thOnNa!?;ey%B{$BYr2fWrWug9{S0TI8jF#Cm*KwJg>c&G6~1@3CJED2
zkdx6N!I!SFJyanV`kA?|Y})zK0mq;)Kb%XA9mTibtOiR~@|V6V1cT%nZgWf!7S+j9
zqw5Kn{Fu!s&)<Omq7u>R{ul8h{rfnNy>D%HN${L4+nJTDVl2&<(!y;u+<~|Kuz3LE
z%soj2wIiz3%drY?F;79==f`}2;$w`?4&fHPyoNTe*BBdsIki_F!!UzOaD>+(E5<Lu
z;TJ85%eiB457KbYB<6TASB8S`XCZL1D`xvKrov$6`O}ppj~3cefx<sWG24%JF>fPj
z9YAjEj>hN;Lpc6KpQiT5qOsj(G$?<<4_vK9B5O`y<mfRFm3$9mjxg@U15FtEjz`Ij
zQQYW{<C&L5L@yd!k`v3>eR^Iimh;un(b<7@(o<sb<)^TQ%@@Vm`C!$x0W~5Kf_Bxi
zzw=XZOv81Ue~5XE!(%}s(Gee8>eGzpGSvON7N6tNjnylzp>cCJ8g6L^pFgSa&8z?$
z4~>Mw^P=H&yBB&d^v1~rMx?ChEuUf0%T2o?V7bB9nK`~o#ODe$$sAK(u$ZAi7j3zY
zL3=ylZ&U;J>}tm7AR)au{RTGchjTy7w8$J46G*X1L=PQ(I_4$w#7s>^xt)fX<=%^l
z>lVP8p>ovwL=ive-vIJqtSn(B5h8i1Dvg^m1Zp1GVFjCcxReA#gq0l~c4sc+KbNN)
zV#3)N_$sGs(}mmj{DqjYBQdUp`QmE+aXt+_FjQBQ9C{}rMU%fW4>e<Sd+x{6+qz-D
zcQXDbn~0HVjLo4^#TcALoaEI3>Eko1Br&*vKk8<R=2vCOe(nYWbBw>bI}FA?5ix%V
zFBMu(gdh{fpiqnC#xo9td-Gq;$x%(L!uo@{DyC?>cO)8A1#zMk=cQApYSELE29cUs
zQy|Kcxffa|!3)-h<TqQ9I^$)WQa-x}dt`F~{Xcn?hp)hS%M%W=-{Y7I##A62?ihCF
zB)9$g325uE#<@C-mGJEaZ1AjtmhN~=pWg?+4_T4hCqq!OOhj#}nDguEOK9%>%A1Zj
zh&Gv{u=Zpe)Mcegk3TA5Y;6H(sys&TH_E)@Z!<EmOGNzsGL~rNA+WFCf!=5IVC=Oe
z(8<_|0?$L@p`}`+F11`-dc>CIi^K7wr#x9=Qx8X7Cxb_M7wQhz0y5KvYUuJHKSqN(
z&tlmap&?@!-xxouP>pPf{tem}-$Uc5&G1K{Led}SqQ?V6sC;#p8#XQ;qPj1DO(MJh
ze{aSM<Ftu;-+tbH-b^lTTcp%$I=e5FBtoK$g!^;IoU{ckhrnkpxagxLX*{EcQ97FR
znGBC7m)MZg`=5i;MH!6U{22=C{&6<@$D?2SAt?NGgOmK4BEEP_gGlym;$O{7hIMNO
zkVxYr5FNG(6h^q?if`J~Vtoh<@0B6F`ZLizVijl4*zTOHCe_(iiA$b*Wvsh+khreq
zmmGJ%jQH=IX!J?x{4;l9;?ziR`N3QnrJKcJwqgu?5CVe2PZLHk-mt*|4oXE~5E;j2
z>%9m0uuJ#3)Eiguz;VVo>dS_J`{B${CxJJP@1V)K1kT?6i9OLt*x|68YdZS{6+V4{
zdFl$J@G$cKR82|c?pM4ZXumk<z8$*HljqB_xAGJBv25+b(J*1%X}-lE94%Irf$NM?
zDD|y@uNv3TX?mns65Q@s=NkgC9X#%QEkb{*FOVv?fq%dGD2@^s63f2Z7#I1Ov-2{i
zpH8vOJ4+F3yUoPI+%ssAzJXAB8aMB;B}>W~13C9F+BsFTPDlafv~A(LlGd{OUJ^)e
zg+c(P@Hw2PSl~Ai>W+s<t5SZzw(~#Gd2JH!5YC><yPeRzydOHc_Tu%*=UCxxPE(z?
zb3@$1K=`p1JM(SO=s(7TZTZ1T{uVl_253;9?<tV~g0aV?h9oq6J7j<ZIM46nDjVB*
zgGyCMd&2tqhn%@Z%>5$0%<}Im{$bKd=H9qIRQhE~8>*6bsHA^`|B<3dbl0ANg{>;Y
z-r0;sr(WfEB-NnSrW3g181v$GMDtD!iPF$}9x%zB`4n>tpzNa$*EAs?3VZz-&%%sy
zJ5<=WqykFjGIu!ZAsd*z;Zt`X<;K1Wfy1kQ;l5xCI%LlYaQM3&$K5uixfPAr8em8b
zCQgFBQOt$X7mlbj9+Hi!(A(69AM)=Jo^(^9iCVFI<kn8~i%8_0Ivz?5Gz<C1x;1DT
zs!xCJG9c>`Oo->e9PlcU@bj<e(weSTq^HbDfQkj${Yrq!j!T@wxJ1|hY!?`E9Qx;t
zf+1nC;J@G}<b8?)Cyzq@V*h!xIMM}qk4!<wpE)e=*z#0Hf#?mCBmUW!QNkRFVJx3^
zWnd@sNY6pLA{Uf@u0?K&b;<81HYDfUcCjCJ^G_dyqid-YqBo}TlH#r4XY`z#6|GBW
z?0*8s7uu1q&x)L3J9F=NbJ7FVPw+X7Lnk*C@w73=asH$iATivD-&sa8P*sh3?oNTa
zwrPBil?jc_WWFiQcwE3bMR%VahYjl@(Pk^_&^~&?`svm<^1LDu{oTq5_8j64F!sqa
z!sZh9gPBYAmw1<!1`!^+$c4Q)#Kr%Uf++U2v@))gtBcn{zp+Ym?I%OxJ^Z<J{(#>Y
z{`5XN-y>k(l*~uiKZ7Q3O?tws0-rgi<CrZhH=4wAlFSX<?EI71+$BdH^6K$FXG603
zt_q3U^PaEUXn<m+TwG;oLj9(`<jfYcZ}Sz#-JU#@Pg}*~(e2~GsimLqR=<Fwm!5;3
zvRZ6z*?~?L=b>`iHVh284(@y7dBLku;-}ZnpkLZs-X<&?npd)3%8v$dz{wMQMYe!w
z5gY2grb`_53An(^_0Sz20UaZ4xmw0(gzwg9uQ3*8)feFJMaJY{p&9MS=;J#Wb6b?9
z!KanSpr_Lvh#P)Xdi*MTzvnBXuF@n}ljh1eVYgvoGt29wUF6G-4dz5^7@KZc12(cu
zX~deBkZ$0FqSmpzoQ(uT3o`iw%|#IX+>$wcmSLNkHeIr=2G{JH45L_1cf}nZBxV{k
za@8i>qFs&M(bagFaXi#&ozUvlXwdub3x4Y|BD9&E`;&_>b+{QdUD*!fT9wG2DR$)2
z!3EH@%N(7Q&3S>{`-}^+H}H+~H*g-Eg5HcbY#}=y^*?FR(Qen^Z}~BZlaGS_tzsO4
z!_Zz>$?p!eB?9$<;N5!)3U*WI)U&4I&H2z4qC)*MUclD*8YECV7$)buK=smi#`*6M
zOEdbRf20M3P4tAyfH81(tR?jveI7hEYvR_oYNY4FUEZUh3U?1;UXTYC^qviy(<xCH
zucknZo~jW+q$nfD*j_y0(H#id-wN*2U!gbi%1TZw=lws&LV5jj7+9c6PCp8V#Ld&;
zwlDJ}4u8TYjZ-3q#=YS5<TYRKZ$g9aNI?%vP*|~=U%JSOE^zpPPAAl<MneeJ$;N_&
zgn)8+DEg%6(nvNd@qZi3?#E(4h08Ff(GdPW7r!yx4m(;j&_pUA&i(peGEJX$B;SAy
zCWj%RP8&D9^9I2se@D1!33heQx!XxexX4hRG`bD|tv7FR#>YxHpcDaCKDR&=YCs2V
zsKNqn5~hEon3a?TrEwkDw_2BOV*Ol=3!|{??`^KScK|sZ<&1aaEJ=5%9WD;mBTfb!
zZ(rQNtx-~^={qXmz@CLDG!sMEhaGV3ur^8cxq!_~k2Ov*kYve(VZ-r{XgEI!79TVs
z8~t-JGI=koFHYbr3Ui@(Qaq#$3xM|{s+n6?fsWj1N)o@H;W}h5^K~aza&we8ykGwv
zjy?#5I`=@Zw<+XPJ42v+G4sMQkIG~}70RDt^B(4WbKd=h6W_^#gyqZ=d`zDdOx=yI
z%C+cIEfrG9c=AT;O{wtGF+Mpy9LL1o!giN-_-o9bTkTRj(<e*vyNfW<YBRjQBqF&h
z(m^e;7b9GgVWqn%(L0)rMFULe=WD@?2j#@yJ!A-`ng}-d50a~MAoNHCRzWU!2ggfY
z{&j=k-yW14xF8OA@`S%yqe-@W_=zTw1~hHFA}yU|&Ax+oxY9+8A@f?9DiywD^9Td}
ztaUAZbkQP-H*IK~eLEkrSdRuWVPAK|Es&h!aD($3C>olI_l^EQj;5M;=|m~sDv=?H
z&br_~q8gl2qPVvnML2!qTNJi1cZrQ2O}VE<E^{x@zj-pExsYNg^Z2ct2d*i_@K35q
z62H9Ug}Ntsf%DnS;Zrn7L5dzt?p};{OKs?irJo^pMkw?*j)A@IchPEyK67vQaKGLv
zP`Qd8jMQ*KCq)HX7|im&I&4mvAjfB1iv-<U!(e_rW0MWJimTkyaLeyObY86~scpUk
z;RRM?zEeIN4Y~m-NdrmaxJ<_QQKgrnE<*QfZE9B;$*)PBf$paBAxifHY;dxp9AnvE
zT%}GvI;l|abGO99=d*oWqZz_4LoB^(MFZZ8=<r*u*mU7LKJSvHyY^a<+GUwAX{IT?
zW|WP_8|?5^5Wt4T^^l{;_5l~0p*ypi@ekB7<MauxcH&t)s;<nkpz1X0^D?xX@}4`)
z_e1u@HoP5Q0m24%s2OO4oe$69aCXm-UC;rpJ9sF+Y>Dxo|AN%|7RF7k5f2M?ffWr!
z;M&ey?PU~Bhp_!mWiNQIeu;^<M=<8gzYO8+P}n(k7^cP+ao+okX_i+7RE1`P;P5|w
z;~g{FSEou83=vER+=MG$wRmQE1P;@WgMcYt`SOXG=*#x4QOrTskb__}!JN#}u_HMh
zhCoL|;hHU%AvACkHm2IpIS-eB+~7>i>2Bxbs<twQ>rZj6?Qk@l#lzp1<uD|GopYpj
zSl97C@h0s^sO<a3#cI4p$wqzdez85GogDp9*N6$#)8O#&4%Q!i$J;)!pm$4r!RpaW
zv`I<@Fz&`RS#u%d3}ZM8e0dAkOeh<&4!pciL12q2b<33{*>?ogVCGpqDC-w^zYfRL
z4V!o`d2_6MJ^*5@(!st)%Aw6_4ALEs&KG*|S*!sS{!ZXLehq`l9mY6xu`=rdvAM~m
z1Zl{OY`iqJ4})~tz*<q2G0jt8!6ggkhUB=MM-%wMsp0(X<2)4XxQ?AoQV>6T2eMcH
zW1NmRShLg~(u{_}DXK=k<*Ja79@cx^U5$-*mSOG2QE1gKLlgRj;F4k0&~hk_<siyA
z<wiA<o7@AE5(n}6*Mq^_T8}#2y)3=3mN5!EUSibEH?V$7B!5{|nQ;0yFn8V>%$;!r
zjuKfatXTnmUelngQG;eFy@krdy<Bw@`>o!O;}UKt!AQ3ES)^=E^Cl~SKq=_a#VW=i
z-`9;kD?%YHT89?B>Bd)EZ=yPX5-TN^Ah0?wp3>0=##+CyFrtp{PCEtm<KJ_`dNrwN
zfgP{=Z6iAiGFDXoYkVcQ03RKT!x1tQ(SCR@-*H6-O=bQ;clQ?%%(Ca6%+?`s+p@$n
zjTH#vU_-~BS&+0*nP%vAajAvBIGcY4bXn_iJhAO5v~-Qadv6Ak!+Ar{$>9s0|D{L9
z|6#MvOJC8c*q2Z1^TG8*4wbZ0aYR}Q*5%%YU%zyTc5y2>%*#Wkwf%g*sUme>F%3_A
zaAORSxxB#PkMw!PJIq*7%DZnn$?dy512#HXkgl$$czDr0SoMLiI3yeR(1xoZu=~Rq
zmo>8tk}4goaRfp<+1>iY1=LM)g`OA@miP%t+&CZ9i83Z`?^)+2d_J$?oCB%FhdE*N
zX3k*SRcx`*rU`7vb^7EU{B^7ayGGXHI(F`~`tlnej4&n^J$cNb+Qa9JTP&9T_zZsy
z2GC`zCxC0XCDmTniJFhKiPv~t9Q=q!=N|@m@1HC+_#FdIO=Ec7J*p^NrUiAr|D)(k
z{9^3dI6kF)Gwq9(DQyVJ^3>edAtBjAb`e5)EZKUpWeN$Q2&E*XRVgYp_jO2;q)3|-
z2}za`QYqy<??2#^&&=HSIoI|3eZSsZ^5;x$!pIybR`futd4^>6WOs0oDTGy~VL0%p
z3j0=k!1zJur9~lYaK~LkYI^$-Tr*K4=Kq+n{dzHfAtVdC#x+3mICjQQwE>TtshD7<
ziQf*Wk?NZ>AoD^r@5-2dD$|u{LE#>$@l_Euy3-7f&z|u=dg7r~KZz3sJm3VD!<<y6
zHzCU9VS&Lju~teq+&e2zWUSPP8vTXEj{@-C%O@Cbp3bE`vLJ$EYdIO#Yf`aEWqv<f
zNEkI4X2=?l#y3|ncB4M@l<Yu(#}ux0;{+(I>Ve@a&OpnND_A*^v3kqqVy^uVTCm=V
z4@=Rcsay%xhaZMBkIk_)=q)}CsDg<eA}UP!!NuMkV12L#6byCD7f&~%`W^xzIq{Rr
zla0rh8z1ARJwq7VmN9al3AtT=I&dH3FLqiQ;Nr$NxH4!8Sfso~t@3Kn9<4y?(~`i=
zU@}~+XW##>rJQ2acl7^z0t>#4<L#wCx#|xm*mt0byL<L6*e{vDJKlN16)jY!oqvpQ
zp3h)1YvX&|@UITFUuwyJTQY=(Pt~NOLVn@vC?&EzLY*uaEkh&!W!d^;!%%1MP}b`$
zgz>C1q3L-6?SJmzzIFp{=~5)0Rac<3e;fGejsb(}f1zVd1Z?UtBY*8R=n0S~TAF=u
zZzc0MHk;GP7;T#URh>F`Nug`xU2r_8j)J%l&TDxiuWT}ibTqJxZ|P`$Tt^aYG7m+G
z)lhy(wI!MSV<?H2S%*T4ahzYrVcuo=2WVkgoPxM?@yZkrOxO~GYxn5VMX`6#<l7*s
z<0k;0xi2}txar^^xeMnQGcezub&#z+VHa~}S1=EA<h&wmUCn$_0iPh*$BGJ6%f+=>
z7f}4-GUVp8V&T{qQ1-7O1`hoKUc1e~UFwfz#*UcZuSzr^0WPx)$RDvj%?vyQm*w>6
zS7`;V9J&l5?-@ap=|V0wArn(a+`=ZUYCOEV1uJ~U;aGLV{a-G^n_zQt+$o#6+Ge5r
zkPuX4d)YC8Y@S<s5t_wu;OhDVmIKT8G|Pf@$4~IzvI+y_MuBv?9d>HVgZ(upK=YN*
z6Zw=6ozu>_-f4ux>rX(q_bzaDz07&mrl7E36PIz~D2&TH1Rm|PF(8F<FZ|k&nky2)
zT2=5Fdz)*Y?TA|DC&4}K5}%~1PGaY}W8(Dp7(UL1Yof;7PEB3n(sCUOGD<jkFJo%!
zt4O0ZSz_&=_Yl6;72dV5dG&E8zVnj>6;3nZO#Wz7lQCb=CM5@z4&A_KgND!#=DMVj
z`EOO(vsNwXI7AKni+W~Km@C^2dzEg$$Tkfkx8*fvzqf#r^xK>?ydEY#Zih?Lhti@5
z2Xw10<X*j2!t6gnI(N`4d{cH0tCQuZ0Y2tye=;}Nnq~|YUFR}K^|14VE?3(giCt>p
zD7<LNpYcq=#E<o8pE3q+mVd*!{fgAD`ZGUokriz@a}>g+8`3%*11jlD&3`uH7<QN{
zag0)l#YyVaIH?zxffjLeJHh`7)T1wyEotxm252385Z|QM;Fzm=WNp4VHL8}UfqFSG
zG3zh%w9Ui3|E6R7%-PWT<`ifD-$(4#VXnPnZJ0bPn15Buc14>R>nnE-JUF6Do_2S_
zde)ykG9V%r`jPOvy$%!e*nJ^28)Dmr(59WcA?%_S-N9T;i}NSp@uW@I1OI_**=_t8
zJA|b9E0apkDu_2Ml>S?zM21aa+{{^jdHL^_q~>}I9@k~rXw_NVA$Qin9eWJ?uIaJ-
zc_>u)S8?L7(=b+f2x)I*4EtjiRGJeCRS#Idxe+jGSs46Yq(jG!A3`Om>#^wd5VX&T
z;yXPIaBgK0ba_9<33@ZI=R+qa6|uYTW>f06C7Uz<BSXsnO7Y>5HoPCCMqLEu%>8~J
zB@v7H=!vqV#U_yD`GW8ldx!a+_|4vJW{$^L-*2Mu1th&UqiU;E!0&=3?<Xq2Bm2zB
z0xN4;VS1n2R-{P$$Jc<}0TIUE`N%n}$Or4zKj3<=0B+1vpg)q_(Tm!_(#N6L>%ej+
z^A6+FXK65KNCEnWH$w81IMltNL~dxyf^GA6?sHZkR)p8^CUSa2$vTYds;&WXdM;Rp
zR)XMTYyN-Xs&r<dCDsR2p?r`QStA)t{*<2qxvV9Scj*#WH|YSFT=;+mUqbm)&Tn9)
zZ7t4c3`^@ZFEGseJ|2xVB6rIpVZiAF#81D;DYR9Ck8CNw`AZ164X)urLmRPeO9WO_
z+2P!xKR6Bs(U%VCShU-KKJ}5OLMtQm6Tj!f7ddf<RW#vD5@S8<ZG?UKchL39RkX+o
zg7^c!_#2TbG)0>+gJ)$y`Ft6&^kEKX0k7cx5kr#m(2y*eI*24?%aceIW7<11m+g|}
ziRESi{wmU<XPJuvQ_QHaFoYMl7CDX%JciaG2-VZwA$p`9?M6A0&7OlVyfuidE&H1)
zGdWxDv%KG?R{qr&H;hppN{p4h<Knn?SZy8!BG)L$o4gaBPc$bqT%C9wkK@WzJTUo?
zJLuVW!}8lzu%=s!v^%|mRji*K@J<fHOQKNDRD%kt!}8|H>ygT4c5l_SCBmHd%vqDk
zxotnh8y~B}pKEQ1bC4ldwkZN6Aq&N38p<T-z*T%#hZwfC1fO1h2p$*PaW#aauaiFY
z>uBQDF0fsb+J5*t=09kbM#E{w;u7Y$@OgXyXvM~WBqEga+pv?f%GO}}g>1+?E#d3~
z)G^ohBpCh90~Z@b`YkX8mD1(m{GLj9SbYQqk!t+MAhzE+RxNgjQ)B%0KoHFr^3`lc
z2|^?6!f)_i&XlYlZB4v9^r*(SSIleS4T1@8ob*O#LG|KMm|szYxiaRo!$O|dx)%oL
zk7-dsYy_8KYfE8p0y{^U(9COXoY1h46RO*B;wgU_|6m*#u)AINl(V>Wk~3)RABk>P
zRZzt&7E10mb61E7xo@3=f?j8SmxB)|J?!B$9Suo=o4h#u_7*O0Q92hq*bftzl!L?A
zBWU2f1@j(n1FwiK{=jfsN=C{N=V-<O-IC9H$^GE>_gmAG;|xh?W&=0HZV(~KX(%dN
zz{#37z?_1?WUAeFT<cf`ZhQTqYrZZ0>p6hVet-EK_0RZF#fT1yW%G3fJ<Ou%SnzWj
zKW?lQ6+N)w-A;@_FOwhQnO-F@&S4E2gl&T4c7I&X_&fGi@-RPwv8X$*<JUK-%!Pa%
z1`g@a!OR6PYW^R{lf;63**=I{bqtz|7r-6XQwdns%Xj}^PP}msu$B3px;$>7+Ghd%
zc1??}Vg2P1zNYjhb60e1IKgGA3Ap<73Y?v_6%%Lw!tmj)oX}<&XvFK&kuD}IH>5{|
zkvaTH=A0YQs)8`T!BEG1dG%W#g8-K2u>mgmY{Q%#m386;pSwWMO`FDkYD4#Db}-LL
zfp&zdbN(%1EU$l_6J6d4!rA4VpOG#du6Pb4s_*&A&TrURIh1CPnFF63m}?c5p(<LD
zQ?62wEU}ZeGd6W~z%`r@$vDpgS}-%R7uUMX#dk?|m^ZA1@0qb2E9&O)&c7VR-t7SH
z$G&g^HBWx;$!eJWdJ;5^IVT;*c4!@{g}ihGI}a=x1wM&s+{k0`IBl&0eex&=k6(EM
z>LPQ}dp#O=$!ODIn)0M4@-^pn>nFF;LKow;2E>wNTkv_NiGr<DFuvg}uGCCnd*d)P
zi>ts4)*1C$6$zmOPxz1t1yJxqhh8g|BYxSJA@0~bY#BZgN4ne4SZ_}p^<n@!GX|sn
zzv}es0!MJ(c7ZQu*<8o%N>pLL4n4JS1xDRE1eUxL7@6Dw>#o&kJj(<Gt24Rx??2#z
z7REz*r_CpO?t&0Q_ReLjh->P~^vzmbQmj}4>T3tnt2KtycDe}!|Mv_JKfVl(Ilpn&
zgd}Lv3xSu{%!n7%i7V2Lv31&2ZvC$kbUeEq7vC|aJGMN7cd;*FiGmrG^mvFr%M3u*
z1`l-aRcCX^0kOucw`jUYg3D#Tf;YP}=SIrVORh%b#+LDz`cDVU{`ei`j*G$f&L!09
zdj>^cSq9rhk(5|Iz?QprU__P-afn`l;d2*4`~?YkZWa>jh#OG3+!9xOx1eFUi=p@I
z5wI^7a}tZTe6Rfvqy<(Zxw{KafuF}}6cxF0>sKg{iZ@#z@7`5D;zkbo6*3QSzZ|Wu
z*#aFa-|~&chIEiT%ep_zgrfVJxbE>woEQ)TEv<LpftEI1zsZPne3OCLxtBov;R8gi
zK8gRBD3Y=ZhIsT6W4oku^0xoFK=|V&Tp#1x8pYLP{6)4CdP(@|b2@l=*jZfp<U0Bs
zwM7y02EV%6fnP(%gWto&ypZLs=63%8<xQ&e?!qd@8|dS*j!1C$p}P>QZ-WBGt<anB
z0J?&Y!|5sA_#<x-7Pcx-LGDyYy`)PuC+U*&i+|&*pv`dcco&5Gec~2(snOWeM?g23
zvDV_h;<Pfh?-=@;pPBFtGMBPFn|P;mKD$2z9Qugs<nH1XPjxal;t2NNPr(Iw16Y{-
z3B~_>0ejAoPiOn@Ck|n_t%=PYe?5iBpQG{MlEGx}=Q5BqD1ppKA-S3#fqv05I8p6Y
z@G1$D{uU;oUkBT-Bz188Huunbl{&p|uSQD0j6iEeRhrIn8s819NvF3d>^PMRE+H)A
zLE?Ga+l!#qZZ&RO!e(wCzrv&3q15lu4Ng>Wo}bWr0(=JIxcEJ5*^Z@zf617ZhrbAE
z^0i~2F_HBij6U&W?PQ2##6vE%4dLa19$aa^2nx%$;uwh>@$x)@w}lF127ed}JDOpl
zOfN3^?+Gq?tWMtfR)gU5I%$)S0cUUO#V;LqitE|$4PL({ajD8JaC)>gt$nCKSF>DF
zY#j5+<oiI$cw?08yX#~w6_Tyh&EPU{1Y3XCa(jP%2jTrmyjQm_4KBL|@!lKx_^BFP
z{(jc!bj*d=fBkV`fslYv0NT!VMB!w4PSDzWN8rBi&Z-Sx(PTmd9vq=Wa<=G`-Sf-X
z^Gg|{{xzhn;}bx#{hTzzU!UF^&tqtM2{-rfaoCk1%lf75XmwPJOcibgwS0A0YX6M)
zStEjMhghh*#rC9U#&AdaL-2{bCGnEc#;rv~jAiSOe&^(<yMZ?+Y>)wO3)Vf?F(UD;
zFQwseVlG+11Ts5E^BHr`q9Ah+jM|?IqT9#09>sf{t$sO|`d~4Rv19qnhdKOL+a{bC
zbQG+1{AJz3-w^IH4TBXgp<eYZC{3(^-|A2BDB~GT@UMgTR>tcIIL}$FHzu7M`*299
z4lUz37-P)#I$9Jxn*{_*zoSM{GjxRUTuqN9Nlxuxd~n8XkEr4mk0_DJ6V=I?uzavM
zc^M^c@lM9-E%@r$S$sI%oRsa%!+t|e(#sf2(=`%V4~*@hEGscSMv1dOH-ophcpq2u
z`50VPf(y}@G?s3{F-eRW;%&^ijeN%$pZSD>6ZT>s<0Jfyt+LSczLzs{`U3MtnA4Vk
zR7eAsp$KTlf7l%V-i2VsHJFVrJ7s81DPt@;v)N?w5$OMGMN;k>k$0Yem%rqpd%G(q
zXtR{E#0l#KTn9;ao|D(rot&#tI%rLgfdVHVaRCkCwTezcoH!NYhpgpXESsTD6bUV#
z%E5c8C0sUBq5D|h-)prx%aGOZTiW$#d)Q4jFS!X@2W4UXwM3L$bcFDYu5iAg10+`;
z@LMx-A<sd;_6S#`&t=R=$F)A*-Q*|l%%{VCI|&@}v?7Q6O=-4B41$LfoIr0G-W$(6
zgt9`C>86jXb9IQUqYvtQxrNS4&hQ`X)6p%dl~;Q&4^CT*N$bW0>^)Wr)@-jkwf`x`
zg$99EMn8MsZE#XC=|mN64H`bahL>Y~+)nm>%IQ@j0llBm+*OghVqI+CXM?D977rC8
z3b_r~hJ^-yAhUQqXSDhP42))8xX&qK$9fZ(_w@=YOWxsaGKiF@Z$PoOFWL$o^UboV
z_&!OI?qJz}r<-SR&t)|_;o1Obf4YGVZ(hOjP3m+|oGd9_txYTASeK<$M_RK+L_{pt
zZX>ZG(tE{NF*K2TTg&dI<z?I)-kb=P8W}_Q81Lg2h=QL>_+23qxH`WPe%c!n8G9Kr
zY4~OQ!m`ee3*&j`eIa~P;Tll0U52LB@6pybieGMV2V6@2fLF99zh<ut{cXxJ%eqh4
zx#z#UqQ$<<lfq`y2Vdd!xh6!Wl{wD${ecbNf8gRjEW_s|i?b^&L7^fXGQAJ+-*ls}
z)MY2`*wF#&vxm~wZ+U3@^(^)bS3}t=0_yg;pBs@7#rSkr!ELoWuHGJp3#{2^eyW<Y
z<HliW@O`+^s{lC{;?XZ`C}wzO!ZmR^KJtpkGvD`O!L~IpQ&vRl*A`-Fp#-lbm{Ui;
zO<=L&J<K#VrnlCmqNg77zNiQCrx|;CoM$bLIb%U89$tlC&)E)7QI~FVW4V__cNu$6
zAMH5<PVM)0tQ+@~6Dj3#8+9I_V=|AjA%~zsV<A5>(wM~W+60v)EwIPPm>9CI`<X2n
z;FtZ4U-`=#d^LWeieDKx{1*)c108(#cq5WpY>&1F!??N0FBx<GESD@71VToUs0@1v
z8e+yq)9i$AyVQwsmJ+ok+tES%1#T8w(za3MSa76@uTOdbZhyzZ%58SgUONIM8o#9?
zP9JxEQYL@c?z_-95k6~tf}++Nuwd{f5H?JPR|_RLOPY);lMEsBU=}VIRSgqf4#3z@
zHe)<=gbRyV0?~15B>d`N*#DNXCUy^|>vUt8|IQiJYBS*HEe)Eyl6f;SL{PFs4s0jN
z;5iK~npwYz+y878{u-f02C*5@#Z`5<<Md@P@_ddOjIrE%t`8)oC!7k0|ApNftVpo^
z7jS$q7+V*8;P$6R!>$R1xM|-|a@FPsly7>3V;b3RV6O@9ve1AIZ$1H`+Y@>JyC1<>
zQD5xUd4LPwF$?8vBf-mMBoB#oxJ_S;R+@f<m^y93HE5C@-x(vF<(HlN#)z8|<v{Sl
z5;dx;(Nbap6Uu_2B`FQ00c)^Dz7o9Vew1c?IfaXqv`J;%ZAcxo6-)R+E^6>teAg+W
zqP2rSr-=EfFTBV1wegr@t4adQMd*BIDHqbhT#Z>n$?PZ&em+RSP|YB4U+|anDUrir
zH)Y9kt{5bDWAi8OT!f+{|G*a)_Fccf6L%@^1|9u<5EW|=hyObT1?FG4S!LGLJ$My5
z%-@3-^=m=&&vEJYu~Dey#PT7t?@7rwQ)2j3hA2Gz$~x2?Sk|`%`|5OvWXC$VwZ9ua
zomL_<SEs;%$7N{W-^0IBv4{BOLEQ3`c4*|zLCP>yGIPy42v%K)4hI}i$?hv38MPcz
z10O&gb5zC86mbcuO4MOZEBqXK0`2xOE=gz<R?LV5Nyb=dpz8!&{G}CRKUq>g&z-y@
zAIeF3+NB+n=JQ6wk3%o1hPV~&u)STEm@Dg%3j<no>$Vh7T5}F(hP{Dl8F?relFTpd
zo6r8PYjE2^hh%R5z&A#P;?r&(8hnjN|8!dt_4pU=e5*o|Bieay)=5epZ3S}UX2LUF
z#_h=Z$n7^X#(Le?XfenELzX-Pf4fKEF!DM07(GB8wI~?8M44#i9K(-?&tlT^LBz?_
zoIJ8nCW)R!5Ky*+ajD8VwX3o?e}^ftpMH)P$gk!lmCn-4;R@Ka{Tphm{DSV&qfvOJ
zl1m#QOLAuGk==8db7k}$zHUhZSMjL;^A?PPm8Z9XL!A%)2zdj;XT4<kVPkB{H{$}1
zM8hnWiHLZiN~%X6fuX^%cx_ZOp1h<$rIY>fC_9T(7)A1Kc9D#mvybIu4)Jp@<$)dx
z<@e0b=B(4F;TiU<UzjUHD-WNB*lmn$q~!<M=bcy%APVe0o6~ak9jtx*69-8m@Y@NN
zxqbW|vizb@5NycB`Uiv8l56~pN0sPYbQtQdrh@PBGPZC14F11=!Eqy7Tz6fL+!`MP
z;Ssj5X1Eb~+pI~W0@+!2g*x|R{bz7%@JH$PyCAq>%^BrpgNr_MX}5pkR_4!#y)CnG
znrs=~x>y5`e%TP;g}Su$W*TRi)`;(9`eCZU3fQ5w6#a^-`JJk&<WKw~3{l>Nu8yqB
z@ss88?H{0E|5I`N#&~X~K_<L^QHs(9QaE_<1d81Lh-<srv35!$>{dy_g~H3YCVVh$
zxzY-beJQ-5s|kT}Rq8ZUn|S!?<MQZMm~FNlE)CNsCmtJ;V~aAds5KGs(_5CQP6FGO
zT2wKZ!TNj`rB`=!!oAajXhD?~Kl}M%&{^^W*UYsczkh2IqYFh4rIm^97V+@k=rg!f
zW-y)oRtFbtI*gge^x)N!4G{apiWVs!hwJVJ)a|D}_N!PDse%&yJaslo^pru(*9hAE
zOK|a!0eC1?Cf{~PqpA22Z0<FrMZsZUJVqP6rzl}TOf>g_F_b(X$k2CUOX}F(!K*x7
zi_vLAX~o-2KGH>zj=Cg;u=xTkSSI4!qgUa?4c)MNp#iCWy9$aQmcd%)_%-NJhR9mh
z<<3^4Rf|o@+eYS+a%SBm%^}n)VGqu5QK6x*7>2Q2K;SQyhmJGG{M$Ojt@92)F8&1O
z-fRN3v;<IWGG(66NY1sC-Mc>xL-$uJ_~mMAP-R078eB<-*1Bm>FbR1&(~25@pMmZp
ztMQ@+n_IEpTxMc1mae`JP0E@4%GR&A|HnyCsma6kuy)wX@|6D?Xc7B3#vd8VToQKE
zxMVjOdUZlCybEszgU`45^3?*;b886-&y;b2c?+@C_$%vO%!0RXgk;f*2N*h&c_r-*
zqf6p991=g2cs^7nq2}wM{+JCt#5yLeSC|)LuQ5H9{Tlk%v)|*82=C7dLXl&<xI{7*
z>~k~t2h0;>aAtt(7_kqFo-wxJD{E{rZQ$1l?xNCNS%|$C1kQt8xRqN1At07HjHJ79
zm*pdDxnf3V995&Anv{u`J3z<TZamHAMc1uDQE}N2((u@dGz~irbs+`JRZ@t<?{(vb
zlT{EZ=|>%Q-VmR=0_)kiE6rD%2<Am#!NLpDS~qj*!glVp)rHU&VL+s^JbcO3CEIOv
z$k|LQq8ZItms&%}@%A=o&I^WZP6{N`btI?Y*9(!`XF&GhQE>iAKV)jTGOkSp)^wTD
z1*$#p)<m1QA2Nb;j6=cYq1<jGd1`Dr2htrih)ehdcqO|Ma`xq7?rz3NeozDjQGMdV
z_&1nZDg}>OZg7~gd5fqKFF4tf9ZMv*Cc%m%n;Amaatpkv&0Ng8o}=biCAv20FbHx#
zp>&uPJ;{2)1#atjiRNz{>%o{;6&m#2iZpPv?8HBg#n?3awKUQs1kSY#p<<5+5MI-!
z2jWh#Jg7O`+4%)yv%WHaE^<4&%!#w#My}_~A^5(^oSyyu3^xzzLl<_ZdcNT*V^W;t
zw06Eg+XF}WknKE%%vr$L1}2=dp|4bQzl1yZ^bh7d_=8s#%hHVcDiEF1qiTAappE&y
zoyVo}yJFSoj{LW<DC-F({tbgYOPCMAD1|fN?Xf}2kc7W@%5A=&NOhXFqgTgXezlz%
ziJpH1XJic`H)i^RkwG2Hjj3ShtX;TMSB4C@{Q%ui)&V;`4V4$GkTa86F6FPgG;)R?
zBrg7p`_GRA>yxj+{l8(5_ntY-H`U>(fQM-CDxCY(ra&`VzM!{HJiIZ`BS(@=sNiFp
z)2e&7@u7z$J^5an-qN@Wsoni>D)$}ud&Ps~?HzvE1{3nS>NF<LsN(cr>JYyfYV_3K
z^^6Ib0>aMWpyU<?13&w~zFfq4Sq;X$Z*w5>lZf`0DUr2jJ<<J17?iYb#@6LV;C3Y(
zFIEkrpI63Uj$s3?-2Ms$FV8p$?u0w3wdi8Pk`ydSy3d==34wR_-s9Hzv$*(oCrBUh
zki70HpRV47O)oMz(eAN)o1QWW9#amR?OfSTcM#6AjKoR#T2!O$|9#jAc(JnqL>Fg)
zf!7dlJx~FivZ~aw-4(;d+1#R76QWXI2@;pcd_8~09(2;C5vq)7l43$PtNMexeIcJ)
z%NUH09ARr*5-iIaLM#Hp;ZfZMJk80`$`yw|lHG}>JqEOPUpV-=D^vA7LL$hqpqls0
zNWk$IoJXG;wip$H|Lz#zQZ;F3c`y3kVs|X3LvZs9%L$FOq1Bgt@H_k7dym-#wyQ62
zE34*%y<$3cZ@-1z^Hgbdg#pTTH$uJgGhAx&5$pYh^sY(@Y+cISFE7~r^7}lPbcxmK
z)}=sQVG#d3--5JiH}PIk(cH`9+GJgWHZhqYAjuczVdM-E7Q`h;k4pwqXQvD>oMlO3
zOJ!(iUn6FJ4+M|NTFgO~#jPZ%Ai2c4B*mUw&(v5xq?2`5*qkm=yA<X)8W7h5&*0xO
zD`KJk0(bEXA@^`180+|;1jpikhm~kl=}^!=$~dB93`x6n9^BZ^ddw{&;N3q6^|93u
z@On6O9B0zp(aJ<=YclfP*7OwXB#cxurZ*mhLtK>;Mujefr-k34*<%{czE^@9Uxr}N
zRTUz`*pS`FEUDyuqBuZQ$g4`CacxaLY=5jm+)UH>o>w)DTRfHXh%}_bE0e(yuE2+W
zGrHk#5UiVFO)}HR@cP#bNVCjnJn=@IzG*a}K9P#F<NRodnQ22pBlEc5`El5#ItHJf
zK7roRD<RWsC-~(b=LE09;ayG(^Pk+sF|$octo;J8I;B7}D>iVhTF;?G=L5g$<7Ld=
zq>1*g20`AEubgI(99i`;oAp29A?n3d&dtw~{{E^<J}3Oe9dABC;?D1oR+NiPIpNUp
zVLGRqX-Ev}@1Wn$7=B^25j8WnBK@ki<W2u${5^$rtADM*)ZI$7=czWd{loSP`qQwh
zAQn<TJi>oA%hHKUpTVU`B66qDfNm1A{S)KB*k3b)nd%~%x5$jvtO!Nnfy-cV#hN<0
z%fRFD^3>G7oH0bJS@)E6e@FeorIqoVP%aGJ1{;B0m<|0?V8Y(VJvcGL6NikxhIyT4
zbXlX2xEgeWbAq`TH;x6trve^)_P_z77r4MFh53Yir8;q&;obUNthuj26Synj)W>#o
zdiLBz^KR_FXhwM()&u<N$PZuB3qq^SFp*0Gk>z=@;~!O6{G9b6hPiTH<L^p^F|YYd
zBM;8vZv~X`GW7C?vv^ds1Ur(O`Jfe1*z^4g^ZX2=c+G<CYnf+6K1LcFWd|LW+R#*%
z%Jpo_;VY`OsU7>gJ7&K@KlVIGm^%{AZn7Z3lbb>7%pbhHTaF%ntA$O|pGh^Zgy282
zn)Ji0!`M^i1AF&b)3;X#@XuEZx_fy(cKmeZ_H^peIX49~FzP<^4$1(J&HiYUFn}w=
zt<dk}S_lh~rAZHtVy)+JY}jr^e*7??f;(r$@#b&Dqjs|QboUoDT6_c)8;db2@*i+m
zaE5h_^Z0R=r&<0=kvdLk<m3PQDz^QS&O4VDO9RFn;npty2|>F>bdpajYI|A}(b%J)
z_Iet;S+7ISv3>P!!zc_{r$XmZ1+x9bP};$AHorHq`>4VvC^#A+*1TpxQs>_Tmo4S+
zBQh60k95O@^I4zn&q1^^JOraN1Y}@?4z-Or%eA^4$IM0JI6+|q7<VgEzc9kpK6?gh
zBPw96X%YwzXQPYmM-U|@@&#+AiI>f{CMwY_kkzC_^YrtXuWvgT9+x8t%UNgIb|{T-
zR3X9>z`eS75W+RD^5=*#d6I2I;#ZV#aBdNVZQKlYWfMVb@fj#v<OwG)#iEs=6%Fy!
zqpv=kgw2)Ov?xKHW#>5Q-O%H(GU`06jkyK?8uekWwGC~|c?%B@s*~ttUAVtg2~u}g
z!&D(-Zog0im!2SyzF}FOFL5yUL^8V9=y86klYvg`!A<sz2|DQ-bPD!E^*jkAGzKvL
zhBIWET<6mjP9e3*LSv~j>@>{7mbA05^w@namgQ6*cV-~;n`1%GA$VRRqEFAu(x|k3
zVEgY$?t!5-kwaA~`8$ZVcJ*?{(-~V_<E3NK&K*$De3o-|RfUbBA!MEFAR;H0g^#CX
z$@a-sq`or~zP2rfiSyG@kwjp!_C0R#_jF*vf0FrWIx5{#0dtEOEZP&oyelT;X>lPu
ze|8&1R#RbWpdz)&tid6v*2LNWmsqkV)5*G+b?xJVxq+T`1Wz4WbjuUZtUCvZ;(VBT
z-4s+tupHw&5qP~l!`WB+Vp7#lTwZz>w6aS<$Co{yp8moYZ!@u?%^N=lu|3k+=gb*%
z6k}K)cUP4xT`+YimK=}aMtWFL`2ZEND#;pKXDQLS^I9NhOeh9^aK}2wMlLSk3HUG;
zu2Jp)O82%v-%Cr{RNW`8Ecpvxmz&@?K7*ZajrrC3DrBUU6<yr_8Q0()R2!*FGk-q9
zsBB|U(&^)S^pg2e3O^zDKQ=${c!;t|^|&uxn>4}~*pU7hbPgz?$lIT5&SCjmi5zX8
zE~IPg24L>OR=DBEX0w{9c*ok5_$D#_gzy;O63lYJR|C=1?<;EEjs_LCM(kXphy{TS
z+z~cIv|`K{e>*c;IjIpOP9f4kj{2m5`Gl(PrNQe7$FR`hC)#ZIg3|+v@a97k5;iFw
z^IU)MA^V@hkL7ouvSA(S%o_uv*IE*VuzR?m{sVhH1v*7dk;Qpt%H-*~c<8Y2!`BxW
z@5$c<w#iwMjZ=(C=FNUC-m035e>a6|%rl_>G0*C(99=51e1`V>g}kMX4wig)%}rSO
z8SG@fVtB&}F10QL0uOzM(4Rl~nM*@JX_O@t*B-^HVjH4=(vT2C5lKDp2wsM%l79{>
z5@y>4;rLj{-EtOdZ`PvAZwVyE`~gwgJ8|@^AylY1i%%74(}J}!umci75;PL$=D&du
zkfTxa9&#@K<ihPaX0-FZHMENsW9KR{oT;p0uFkzUsp$mn9U2LX8(x9MEf+LuE`e02
zg_$llP@CPI6ZbWt?NL?mYAhG8Bqr3$K7r2)4nn!nn?R>@EXaQ7h35DWIG^wk1c9yM
zSItLYpnMn>MCsF&5vmxT)6OM3vRw45o1BCGd^E6_#+(Je#Q_eRVCH-QU2BvG2WBYK
z4Rfx7`}_fJ?{W%$s*8XM<%!T~9kiXih8wJ(!n;%psyD6_@>C~)bi*wCn5jj&93z<X
z;}{lC`3V0GXp`iHCprJNRk*8uDPE*W7%$F{+HOyWuKpH4E7o~UYQ!`<OH#Lh&HuL^
z6&sYW=Yx+GED}cGVjN2AYpWrAa}@XW0OREOR`RKV=P}gs3GW4Wcy9+?2(Htjn%(AP
zoz@jBF`0_i*YZH;w-t3)kl~a=O^I<?41PbYNQd2)qW5tR+>aJi=hOiRzj%e8>>#A^
zdVZXwP>)_Mw4_O`mP8!T56g?vAvoYG>`~CA@}@!PoY#RR>!!e&rHv3dN1ySbKENOG
zGALpB&XOK4P@ATPxx-gt>z_96#z_Hi_f<*ngjmK<ZGnL=$v8O6f;@~b#p)Q2{afAA
z1x8;WAYPgFjQ_)l`HlFqcrcBedIV~>{e<M=RBlU&B|T`PM*JHhL3yqMN%lC(|FFu0
zk#`N~&~J?EH-a$*hRj4?dJ%^j2x(KoJ?Ze-ClF%v>CU&BbmrDN9Ln}Q?l(7bKUlum
zSy0LCI97*}R88cc{K5rV-B5N&8H8zzVCo1FE%Gs88QQIUonIH9eM|!#M`mDYzXJ98
zJ0C75TM&`S0oZWqB;J=XrEf;v$LmsEdic&Rurv%n(*)*ujBVpPR3pIqPdYdYd&C8$
zveI!^jM08l8?RgV9>2$ni1h6%uuYkOOGj>nC0SZjMxz&3EH|On%QvA}l`*OF{}0Zy
z&n-5|fUYerfOj^jDAK$DURxe;d`KLc8Y$3Mt$nyFJ&@&#E%;k+7)zre78~EU;<B1n
zw3(<z+EjFjmzS9Pv3fot<0w3C9ZCm(zrt9U7ohcu1RK1CY;U`h6D-z{&XdTH@UtfH
zi)xS)9!4}f{~8qiF~yD*m2B4k1LW1ssqV>Sd^Kq(j0;ji+*OS?sz-v+-#TbI@Pyl1
zQVzb%2|vss26uV*!5;Q|>2s>ZwVLNKm-Vznn+Wg4@m$5wV2n0nJ>A{&(IUMRHtuA6
zzWzim=bafjrJN1!zJ9z!`2ah2hrsuq!NlO(9I&i0hmKLVIHg;Ux%5`{|J-avt6X%*
z-3M9F^5-X<Ia&_mP1H&IHB0m#QU^!O6zM?XbLif#ODffNqW#jd5VuGI@vnBmyTre+
zUz!Bx%ZJdGu9bKs$cCIc@fxxWEy>a`m3)(LKBut16z*QV2}U{ZpjU~9oCrNqsB24i
zXe`8S%wrrMUBLe%Z$dpL-vlqOVQ8E-6JiY~;AYhrTz1%qE~aVV{dYg}zx3nqY-<#*
z8qVDbHze*G4f#CrcYf2yB$iqJA{ML@!B@RP^z~rRL;W52dWQiKCadvrKTp6P1%0~u
zu>~m>@(}aRoQAiBbCTd#XbV>$_GOnqW0f(rf3yipTDy6J+=JjOQh>%P9lG?LHjT49
zh6fqnu&karG|#?(PiN%Gj!hj<$k-DL><nqc#-r$o%t7j)N~7N!(XI7)(53bWs)lQl
zfMch)<uc*myXPMG)FkrF5oIiUx)d&ou0s4yLoTzrfZLn!8=P})fahODQY*cNu@+Wz
zLx4GESY*R=fi>}sd<s()^s%J4gNtRurlc3zq~m@ncVliLo|>k_+}mxKyT1{phboZ3
zwJ+gLPZN%+y#U{>*^KvlqtwHTu?mP4d0Kv$vGw0zs6-b|4vIlbu`8%EzC7NMVeYsO
zppnMT4;u5pYmzc2a2%QUn6YtYeYk<#uW~&5$CwIjR&noiKEdRjzcAp+cVu&Fyn6cx
zxcyVl?O$mJhZ8qK_Y6y7TlRvN$T@N5eP$#t>INs-dBjOWy@qi)?O^bmp+wSnSGx69
z9P8or@yX>Ex#Rc|o|o$q!6z-QX~ztx(_$T(qyyldZ^?TJ=5g-#R&(AlTfnxvgxd^3
zV8F81TaGFbuXpL<Ejx>FV$)q1XT&&*0c>vhZ!7FKF`(Y9zwlCMBGx$^z}BtjVY`DF
z$xLkI>~>V)WHm#olsLfU4NKsn&L(hm(-%Uwk0Keg(~>kjsNx=X8qhBr4x#){Su)YN
z0p1;$fg1LI@#j-z`s%U?t-R)mpSuRp)F1)1y)XjwwkXpPY;R@SPz=6(E%4wFkFkBb
zVB|?t^11pO+D}X7-#Rl7Nrx}QvRrlKmMduTHw}yLJ%OM{hUCl}w$It)$-CF=;P<;$
z;DTeHA?ub2Nj%B;7XcBV{vTuPPA|i)^O`~6`i!6OE{XYhGC2p<wa+m61@@lC{N~k)
z=+;+^e@yz(J*<*9Uvd`%y;+89lmn>V)hA_^8_>@#nYUkF!+kugO-d!jFuc10D~gLa
z(L7g#&8H#kS0sKiOvIU6Sxz}G3j=~5bID)cq9oFTTUnBZ;-=564|E*%waODw+$5Gm
zZRetX9_6zu3NUb03}k*_8TBL`zSQR{EO%>%{&UvEZ^>m?mj4(_b`OH*6D{Z~kt_;r
z=Wti6-?PtIh5b!O_`b(!n4D%#S9qR9_qS0Hf6)Shx0OOfn;u;~MwU!VQJ`w$+cA8G
z5}h#iEB<)-8ula(A@w8l>4=IFoU5GyXeoq5^I*8M&49RFS^^<DI<#qsJv3ZZCV6UI
z=&n7E-ykf)ckkJ6|3?j9{?3H#P<sn~o@{13Vh{MRoo;;k9M1CRIH)u|1724`#Ce}O
zxka;vkgvOjGLMyz_fD_H^}Uy{uHTscS)qefukYY`x531yry6$59EQS)A23``!rwR)
z1IyX&bi+92OHWJ|t0oI+TvZ3$|C)*IIfY>VH4r5`_lbj*oKQ3XkP#wb_v|%XS=&_*
z&~@B{LUnR#^f%n{NRcQs520PN+TbzE%6@K<rwWq~ff85EdA+_TP8Jz3XXkEy*WmeB
zP<~AenXfRupit~}dI=2Ic>y6o$3Uc*1~Y84&>-SCzb5Do9(*~NN-mt>u9@dz;LEp=
zxBDTse0vFvTR%eGwIBTPNrUMbxo0r<XecHpjDsVMS?K;f0Z%kCF6cij^KHp^Wl!3`
zKja>~VqJ*n2ZwQBj~IV!?qh6dE840xAKD8k+oj#d<~4g^Qhfp9)S>8kK}fiG8Iq7c
z9%dGHVTeI8gqmM~s*n;44*d)M-|vC5*L3D#{D59J--};H72<P~J7_n=6r5{;FPNIh
z`zXpo{Lv=PVJ!1WH`JmacW%D(iuIh5&I!yLYJhrKCvcozK1xX$l&hsOpII>YY&Z*(
z-<i<qu`0CQHWi$8*74JIlQAvNh<TWYV0F__de^B0Z;dQwPS+c}<=7tVsbX$S=9RF$
zs{|79TwXHjI<I$%`6D+y;@-K&Lj?0p_xw1Ix4d3pg-I+AEQ4~1d6(kj1T4eHzVnmU
za_MW#NStOY4q0tY+b)Rc<fR5=u4N#8D>Nf$)PYNnJ>di@rTMRzcU_^N6{O0oP`IQA
zOCImTpvwyMpI-{(j<g1koKqvqPa!xwufhJQY%jNl<>CY6$gJyZULmiVpK|>%wuZ&x
z+=fInw>X9q3N@%d^JGjlz5*WX`x?X;T=ML_GWxJOX?{K#LQ5jJsNA!>hVuYEy}-^t
z7I($YDjbBb-^w`z>rh9H6MTljGYIck4)FoIV1zT{J&UZst^OX@pp$`Z?2dS5!c25u
z?8TQUEd>{o1W<i+8NE&!a?Ya<h#zY*{#WiH#z$2H(>3+bGu#*po|t2K9lMi$yaK1c
z7}MvcnHQDy&t$d#;@`WhO*N$oXJ1#Pvt8GKsD?^=XdM^FI>>2DbZGRklQ=a$23*GF
z0)9}T?GFBEU+2a>6+Q!=<vH8)2?o3z2DXgVnyV&-#4a|^gzG3Nxg;*Hvmp<Ksx-V&
zmbV9Wn7E1gr_=J8OJyDxzJ%>E1AalG!b)_HG=S_d#zKxMgUUS*p{HmHh6nUBcXA8P
zoqq~UPU?_=gJ~FFy_O&UN0&6IW=sFew4n<tHR!<77Thh+BPoXFME<%wnVC3+^&G+>
zZSxPD1jn!<ZYktlzQ)Bq=FphsD2>mDfxv9DRAF#6bE=KR+B2WQDPDsJVsxqO({tcD
zW*$c5SP{pG93bly`K7MIx5_HSs^5}|+D5??2W>Li)|MC?MegZ{hs<H0$=pPvac12^
zh?{U1d>!lHk+vKun>ZGRr?rA{jxiJl41n;$Xbc~dfm46$L-?E<T#Sr9S?4gA@G`8Q
z)4{mcQwzjPx4EFN??cR%rlI}h0)EC?A+65Qg!r07HiysvC6!9f;<*;Jtoeq$`wB32
ztOf>F{04=dc2Ha=AeHa-V^DKFy1qRDL(Z|;=c~WGBzeDdZTmBbd$JbW^R(&Ls*w=;
zT$8Tpet^!Yt<oKjFF|io9w^5Nh@W0Bcf)ZDR1_To@$VL#<;(o^KUZ^co1eh0j)%-0
zpankvZiY+t8l=4<2~J6+%%!{^O3mwGczOmnjZ`CJ&xpv@+rP0u^@CVd!;<tgWOIR;
zS+JR9y7&6AIpJA0Q~w!=fzzj;VYoRBMYf;S?{k{k&phPip*W~TgF5WDqGuzXqu}BS
zC$)=FVB@7h%u9~p(%qqumSRqQH#I{-hA%qjH*oteIOEcCWx61#7^3KNbiHgyHNzB1
z@)R~(NjU;#m*=45K@uE2El;vng+m>u49;CCyp+8k3pX0jsqXDqmv)W&!o0es2fN{7
zzZC611wr-u*^s=pnrqGL<!f1X-fO=<|8$rfJ-t+iL`=Af^?L`=IhPb^>&{TH&!56M
zU#gahT+7hzw>Pv`G@!=BJkV^vj6R-kc^$nj^fxF+w&+Dy@kf}q*n~LD{|=&OAEcr=
zA}nPYqm@Vh0m18~;tw}vh_6!?r0&gz-P$@-vTzk&H&%gqoH-7gbB;sPdSAYyR1Qzt
zT9QvP_i!J3Z@MT}LsPv!H}`Z7l*DB6>wV2=+9MrW82$@hJv@pX;a&Jz6og+R@1Vx_
zH&8Ko0)H|7JWhOE16v!WVZuxmbaV(~?ET&1MZcKyDtZ+hW|Iuxf~6?zX=I+b?KpU-
z23@$;lnmb<gOb%{`EpHj;O^8YNMkOxh9fFuZ{bb&@7Oua%<$(84_Fa#|0Bk-wWP0q
zSP^^2SeSj=0Goa!i=8w2Flw<mil&Z(nBOAe?|KQoMl){iiV5Q2ldR9(mdmv)v!pVc
zhmcn8Jb2W5!<gsBB*s`s>Sx;F#i*yScwGf!tw({_?>#gvRHjLjuVJ0#Xb|=p^VNF#
zn9ugKb!t!f_`pTHd_Ll^nF8Y9_7pUi-^V+Q4=~C$5}*Fphu*z|(Q(;Q=I&IdV_wyu
zhnWmqNn`Ub`h;;qb3v%Wd^xhFL^S1rbn3$tbX?em-Jz`SY&jC1D!c)i1bH&s22e68
z2tN6CVpFAvPY7580b}J+7{&58du}+jsAYlhWiGdFk})YrJ;|%>y8xlf{&0em<2c8n
zvrus5iTFO-eda7Pqzh#VF>KmOoce*ibF~J8=uEG4x`B{*HB8_tJpRROGc&AE^y6fM
z(lKm2fyDS<pf{=jO7SFKS5%_jCzWW?`)N=uw52whL&(kLdi1C3V6rrZoefh>G2*BS
z?K*uO8mBbC@#42&G4?miz7-GqZO21}QzFK{c4ZtzZ?Rvo9%s2i69&c~1r4`1I6TV&
zKg;!@>yw)huv~}+Tcg1{_8>T1S&cKNDN+M32|ttTgr?6m=$k$o&C@igBj3;W6`a5?
zFB9;pdo}37MNB?mg_(_axRP;i&~CLUHIUE2cxM6se9{v%me|6A_cmC1q6?g#9l~45
zZ=m1+5ho9Q&Pg(~aQ^NTeAvV|q%uA*_>>LFQ?KM#GRH{use2e;D+Un?sfS!3uG0Ms
zp>i{^Xnh~5ZBvHiHLrM8Q(e+hZ%F@W%!MfFC>T3SlWg>Pjm{NE;G*vzkS%BY-Q?e#
z%b?rfWB!N}U3!bptPbJ+tMkx(lrdLzi**I^g&05T7~h*0feWWvk%{}Iz-yV3#~LDX
zG3zb_PHKkF!)oBbG&aB6>xxhRe@B?7&Mkj(58Q*V^U_-};5SQ%)7ib=wdXSYJKcno
z?J#BBty^N}^)vXqq!wP}wv=nS*(J4nr$#H5G3I(Qo2TE@#ahWI92gOem5dqi)%`qN
zYKvfd(~+2bMvE@J*~-Q5J1#!8?JSf;9_1FV=)l?c_rbHAp(K0WXUygF>BjBFjAgP6
z{H9va#d%GTJM$}wBsbu}TN`3$XMwqHFQ7t+K=^fkye4l<E7r`%jQ6)-Zq8q5H)A`m
z7#p}Z`53ld?BG|{%1|Z6Q{b}f1(xhh;YA(BT*u+<kdgWVL=Rqb+B<DXMcQPn@B0SL
zvrmH8EfWk6?B#EEvfS5CNAzmh!+pDT3Fn^ffH2_|2yYAILsOM8G2;dvZ>___x1vGq
zxgWesDuP~aDdTlDOTGT7;GY@FlFtWQz<43^f-hhUz;Gj4{n-(W!~GyW=c~9XD;oR0
zusiM8L)_PlW^|j;!t0JPCPg1zP&i=_=VSYpTRkBcJ1W#r;F6W^pv%}sBg6O%NebLO
zco+I>^oi(HzO;(@U)IPeQO6l`K-{$sUj-{;c?@${$34#<%)BO}=GB1c(_t<`hwZ~8
zm%&W09J9`8)A3i>Jzn_=xXpUO{V4v3*M|o)ABur=<Ovzlvb!5{*Hl5PNhU6gX3r}>
zQ@W$P7-w|K5!0mKY%i^W7Tt!_?F8#ujJF|6^=zp7Rv8qSe$MOg{=wOo%mQ2T0i$Br
zJ2CjBqgQ+w=Zq_OHA`bGIphc4hv!02^k7=h(l74xS_bai7w(*$fW&RM1r|rLK;2e@
zsC@VfqMb#2N2DTtR(b^AcUzE*ou9zVljj}MlA&_P5BxLI59&4tGX7I7-d{18NZLHP
zEEf$b+`XJn{APn&?K466h%uhxPD!uctbxQ+x50k-3f}FLgxfypBpy#r0sSp%RMz_u
zB%2M#t51HyuZheL*{MYvm`7pZcVlXB<`iEtB!rI*WO=+1y43o0IaIE_jTTdCAU4$#
zRHpWV#w3>4f1|-)ylF^%r>W5G24(2ks!bl}3P}KCzmBxgC;Ok4qd{yh=VxLM-4BM)
z8FEU*Ci5=djMJqv`AhKPlRSC8RY1ExR^gtmTnu0GA72{X0hbTykry$Hlhim8U3{)H
zP6m$|=N>>PSBqa+hx2Ny2@M^iNiPhsp!H94(0T1Y{OK5Tg6Z+_G~59NbsM3qN=O%e
zvm!lLqOr{H9q!+z2|57_K{7lJ+|Dk<;4630h2>#OJVbEas|G`3Z*rUKli?wA8(Jsd
z#?SZ5A^usv*zw=fd`Nv1<4!d4IqWVoOv9X3e^G}Cv<rP7&B7j*Z!ntC0D8ftFfeg3
zuG%wzwh06Li{GZ?!TL08E?~Qc4_bIcg*lSt+_=6R8!Fo_pg#Xb^GdU9AmBneI?Afi
zw_UPi_BI7t-}V7koG>BIS!cM}$;0sY$m#6t9E1zfpFz~9Te!t50pYqfyuMU~nY|xT
zPc0bqqElgS##FpLO-SxUv(B4lFg$fjV0n<&-0Dy485cB9+By9(b9S3RdzuTD{p!Y5
zWA5U!6A$p_%UT>YvJG4l<DmD$aTvuoo`Op|#oDZABe(MmRN5W^FWqQq^N1A?;1ddu
zidpxs@-WO&H>Q=jZRqW81Zu1U9G~-4ta-qcrWRzwR?}xtvEv>0Z=n(Oy0wB^T`x<v
zdcKE_Gs@iT-Z?n){(KbC4ea-Fobx;8#>tElkz0B>cuq@)W`_Gf+cPubI(;U)8!-2#
zrF#C(iFvq;oWZ#V`XTn)2lN$4q1js&w2q!YVb(0Z>-`=S=!>O)W@_Su+A82VO9H1|
zVX(O^S@OVyh&MdPX<2M<pJm9M|6xjR4PtKcv5Ml^fe*2<PLl?_Xyz&m4f)|@9PZ$F
zaKG7yy0Hl;D%}W=Sr5d>@ja>pWux_L)<ueVnZI~yJiJ|g1ye=EP%z()(|2RP5%+rV
zcu>c7P^&OG@HVa(rAV40oS}1WJto%Ng15^0MDEKG7}BjkUadZYj$2}}U^9nHpBcg;
zg&}ldv49L4uq4-A)v3KehyEJSBywBTaJZ2(ir*E3d`cExpJ+n%+Z%z|Azf0+zWY7J
zbJ6?#F|cp{#h+|oxvp_{!9Z4#rVss#BJor4(}}l1Q1(SU%3G1fpYM>a31hjMg&Gh&
zDF{!GR;SX@N>p+*H~-ja=Jgo2h?^PK2g&6IuvEqv61EKo<NPT&$5)-$cQ|s(wkwjU
zv-g0-=Mo5EyZR>`Y*v`k&4-^i=b}C>hPB_1LFO{neY`jk6BeBOA4O;4R^!)&;nSSP
z)1cBkmnk91dDkX$=7c06#4mFQ$rPF-2~83mNm7PJ>bz?wi3UlMBuNM%Ns>zV_V*9C
zE{?O$-fKP2eS?Cm0S(P70O0~PzO{_~9~$lpqaXdjiEM{9WV=35JD&%|)Avc*#`JLq
zt-GM5^)tVd@$}x@9705;$I*J;9oRg131lsw%`L960oMiYT-H)kyrusQ`kU@SQ0*ta
zeqtR+cI(pu^XF)N|0LkNRB+uimltTU1XbI2R1)-H#9nDy9{+)tz8MbbGnZnS?mw&y
z7Gq7;EL04;!8yNErbpHlfM3Eh4Db);(_<z<Tx|eq?^M8JuTrsDrWxXyD`2jpJ8VzY
zrrUN2NzJa0cus}QkuulvZF@L;VW3EyxfW45d#AKa7|XYKp8{*|%P{=`b5>86rKVT(
z`5WurV*l39nAAIx@e8uJ1P@bsY?v0Q?~<Y|jZ)CEVHmGA;sZFVD$v+5pV*thg1DLV
zLGLrR3%%cs9v0g%ujmKHoFmwO={4F<iRM997ET40<Ih)RC<vBeuC@ZkRq^1xQoo@K
zCW63k*=^gir})q84DHXfr18Da(SAG2(dZt9mT4Z`ZuWL|u67W$v>M>M&}0mGp+dY4
zK0>!_DH;{@1r(*LxSJu0bUVpq9*K03!v1TxIp;Q>|CoWo{qcO@$x1X!)gki*E6{Is
zBcE1%71}Z$asP!|(h*6_kNWjEhCXEZuoF74MD7FI#ji%0h-d7*?+mryJE6F?2LG~N
z&O*jCPfJ&(Lk6A3hTm5p)T9nNZ@%GDm+wa<bxV3aTS%XO)}YIC<>)y(OX3^JX39@%
zaoEx$tox!)WWV*{w1F`2(J`aLvSuTL$%9LGF!T;d;r0-7(mBf#7T-_D6Rpgz_GA;P
zX@$V%9ompIW)6<7K7yJS2XTkecTjwF9mj_0(vWc)^vL`IJgv|U&jW{$M%{3TZxfOg
zdHQ6y;Te2xs!3|ZSNJEPO2pIn8R~34%H|f)5R`L*`=Mz?YYh&wS=$;gRW8DL%P9tK
zzX=}W;<<Z_(cvt0gfCin52ihu0KNvJ;bFWnQQ7MUE@nDVBkJWpPq`0s&uh>icg7TK
zc!wIdS#DzH4s88=26rnpvP^v-M9m36C*f;uWlaG%=Z14hIo7mUuL^R49r3Mi8b0}@
zOAL;fld6MGs4gua=Ea}UaF0Gtk88q8%}{WXlW;rsOhloJGFYzYM@7DZt5DY=UaK{!
zbXO;A{7{GX5z?qO`zll#{)E%NtmqczX4!D-2^M#(=Ia{Pqq1Wz%J!+kl4eUXepLo$
zek@1r=;7dd(iWXpCv(y7Owgn7D}PJK++v-FxF1^u#AdXRoQ+kcjlmr1JA8nG2@}v8
z<hdDjPhsam9n!?!B!R*l^xnugDxZ`1t&h*aCf8W3pR*a~$3BK(Eeg!#UVw8)c%#dh
zJpSos#@nmy!KDd?#Cz0PKG41hwy|v5E0#;~o${R<;burJ=UU>6KsI+ASH}O%&jGux
z6!06d5^IBf(RH;Q7|n>pm7F4N+xmo$EL2BLEq2x}u%J$pSr<>-Kj9whAlk*fgJ%~E
z$>ysy_^VZmIF?Mna*qmr3F9qpY`O~Gp`n~x#zk2Fu?M=HtT87z3odVFeYPKC@yB$=
zJ;?q8hhEB(3z`Ms{z3w8hkb^Up;y3Z%u`-*-~?xB8-<b8_8=HqS(tgzj3%7P=8pLV
zVUD~Oy_L`gF5ANRFW;4E)cPzKHAswlq?hwtbsNN*7x{vh0+g8iK^rR>vUm-<+kCKw
z#|w-|-bYiE=nARwBpKR!O%ci=owFUj6ins)`GSex_!FB9NVta;O$_DGKIZ^L3gqZ=
zH)DFNQH!WN@B{xD7hw8vT{2?HN_cvr0#B_hK>hsp;P{{mrgt2}zse85YsU(Vz9Y}O
z7YnHO9(!=w(ZQ8miHBj59GI|Mi8$V!hiX$Rp?p9KQr=jgV8~%nq{Vn#8S960=S&05
zPhseMR3K?sv;{n}qWGMB&mb~*7K+u9x!TDgu=~m{C=FyzCfdNKTfM|C%W8Bw_?(MY
z*~Xj~%O&zntuXRZ2l}lU$Nl&uMSqsXqu#MD{(=X)r=Bxs_m!cbcA^^mrq1Pxj;_Z4
zByyxu!-~c@n$T_9576z~S2&_|7YO-@SthCc#N8Z5HN3)wmxavfA;V@YjeOhrhaC7B
z(i^d_V3^~5I9I4jyYe@o?b3}9D>5g)n3H*j`z`PpVoAe?QxrC|Ghap;=kQL78f<01
z3-g*pGg|8)<IrUo?_f^e%w>Li&K(ct7lYX0B+TFc4r*Szf@@3~civB)$hc{dDODdJ
zwbhO7j)!vA8a=o;@Cbx_H=)*TAE8;$2wvO-DBiP=bIZJgQ`5iW2%#pu!$e=va~49V
zX$Rbt(j<b!BrZ5}2B!X7hSOD5$%mH55O2X_cTqIE|5E<%$Slyg-vJlBn6Fk<n?{EY
zLL1(IwvOmPz24LO)+%LczP=RlOv}++LyDGI-iAn)liAZ}N|!wpk_~5vkbUA<tjkaZ
zm;R5O_n2T&r+o-_D3vjndIoU)?k<d6c^{<;^hoI47tBZY4TP(1^K0gWq9x;Y%IrCf
zY15M-VsjMuTf0HCKbtE|I1hd=b!orlKh~pv%J>nzfYFX%*pP>7*zV<I2Fu^=?&j_#
zvbWydio&`zVyKCE%J`R)FjChKWL@pR+oM4eGhq|X*RMxMnSL;5tk{)<>p>hrc<T$N
zFg(HsqS;O@%r6zi7l3ovH4MLRF(b(|8105KeqpsPJbx03`HwA0jMPmGII|oIciv{~
ztvXz}>K&K_-$p^$mcp#pOW{Y#bC$#03BBK#`+Vm+OrL9vAI97Ty(wp~A^sIwzEyz0
z5jvFiSkTlH!@!QsSk50)p;>03P_tZ~=ch?gSaKZjbtBr<usr|8ouKz3lDTM9X@FuZ
zKj@|^E%7}I_S74Ktln}tRvFCycmgd)XMo-=J^XyK1Fm1vr4h~F!H#i_rrk_{@NJst
zsuTj+Wq>gUbZGzS<Df_1b1jEHOH2jcAc*)<P?W7jL-q}#vvwPk7o&8D_?RL6%=}~S
zJH4^ANF6h?k70kvFU({2x?x*?gZr`7ShH>h7j%6hCz!iY<hh=Gr=K{WbMa6<et{<S
zo-~?sK6#5XE@t27G2b}FNojC8fic0Ei*VSwZ{Yl64D;T6Mg0x3G$Wb454Ortao!R>
znB7O*Ev~}b!F52Yj7WL>IJT!H%<-3uE#XH+;(}gY)B7o`QGdy@%i7d?y9t+(-OYR<
z)A-FkjA1c%>IA{OPX3#RG_9%F!<EoF%=l0NQ*BMih>gZDvHK*(H2*{C1$V)_w}^eV
z8C;2M2Bc=Q?8BJRlC&w^Ago3dv=!Z6nQKD5S4VR_-`PB5#8~c6fIc-;)d0!ZNH}OE
zM?K$kfL`B6{%`+TNU0T4hofG2&CLkUB&$=mB8qB;H^6-K5>(%H5v4D@fiDSGWd1Zm
zY8><z#?7}SZ~jEG8JIQ-FRTU8Mlp0Bp8}fiE`U*pF@3-~PR>_dCGt{;sfON=5v)pG
z=S}B&-)D1IJJ{V^y@)R^A0+u&Y(%=W-r{HB5PHF<094kkgoVtjZgf~kPPj0Kx%eE&
z&P>3JF^}Nj&^H)mW=_{UE(d3W1^l-t#Kh5wc((Bw9^*Aga`83H`V^1tM(*%1^e8%u
zZ*xJZpZVX8nk2a9HSQdxN7^>72EokI2}|zk5w-Q#p*U3ohCS~G^^O*F?Vbhu<rbjy
zq}z;rp@|JLbu6cQ5)Zn(h0*MH<Kt{2QgS>5ri`Sptlo-xU9qB<b7FAaFF8{2L!Y+V
zM8oR)gNa+~ZE!1lf#$i4z2tp@OSadc>x~Yg?c_DsoynNk3C}>gL5lhxv;~2)UxDM>
z%NVn09ts}Ra-K_%LFDPR5Z1sL9IcPI)?=%{{g)-($nG2xi9gOAE1+slX&`%i0;KDT
zq5r8eT~wk&_W!4WL9b#8Kg53ptLe6+vmuTXzgo(@H8_d;66EQJ<)>M<mw_T$^{BP?
zOXzs6Oy$%qNKL3V&5ltc2akn<+QNLCe>EPRFJBfVDBt4+u?u*CS0&%@*@k_VMqGe)
zC+CtH#xIoBpfW7~dCo<S+#=14QzYaX{>flgY8^WC=i_JHTqypshL?WFgZESsmk@i7
z+xRFM@5>vIs?@oRzY&Ilf+bws+21fWMw2v8?}rAj*Vx;2mUpl_hYE*2px8H#fAiui
zo|smG%SRd#V}-*|ykI`B6E}z|rG10m&|iFik&qfl8xmNu7xYiQ0o(Q8Ig@G$^P<FH
zNlyZ7y8Q@yg!dt?w;x*6jd-JDL&(%TOWM0elb-8)go1!8<JT-HV_DqCg$?Z=c+CwO
zwB$!A92>~OqT&0oql<9@k4Ry;<_FHhGzF*c*P!;!`?)i$$G0~^NKV(ihqFUe$cJrz
z;Kz3*x;1SG^&NK{Y!B!2&QkIchv$nRvNQo?KM7br%D1q6;UgRx!a5;0H-dBCJJkK7
zPHGY_^M%Grw9}Ni!hVm2t^Ox5JopMP2o9Ja&(09P!>ggB!wmx*3OVDQCqPiRr!Y%s
z%xSkyz#H|7RCeSrX!g5__dSQu;wX6(9g?Gh@{5IC54su4=`rVg_zIWsQv|OTy+-{x
z<zTx<8kf6j(ao>pK{!y00@qQ60m5FEiHnmoO@4<RVXwh>d>3=HwK8wpeo2d^Ef&1B
zruzFUAZRCY8=qf;$V2BKvu6lt{<;s<9th}|_&!`yJ(``(toOE~owKY{M8&mtc_q0-
zuv_{UEPm;b1cz_j7*Rgjrr*ZuJ6)LdejLB^p(10E^uROTn(VjT2O{&kkV%Y*T!jj?
zDpVyqifYkx$2Z>B@ChH3(=|cNI`<XsI%J(v4nERSr9Gz)!!FezEH^uW?I)sOQ!2|$
z1$=|B)q3>8fhrgXQvvtL=jgo19J*S(vGc+~&Oq6k%*oavePyy_y+Q};42<C5Mi`8}
zaRcif9)<1GeqmK<JN8@l!fBRO@fn$f;xTL=ZzE0qiXD+oS0$d0+ko2<g+Xyvs4&Tf
zSUw#J-mP1>XKWT&bLA+faiauH->5;c;|>^Ma2{`EUxwAcwaG($WxDl;0&PjTgPm-5
zxZ5rh&V(tDee>>P@9Pw<!99#CF4)gkFPn*(!A2xvVJ);j)S=F^g(9B^kr1WL-j}x1
zL?c+%P0-aUSv8-zblxq+D(yWuBu#@v|1gHB73mm$u8j9(a{-&MIP~)p@oEe9psSoG
zUeHJeA3-X_N*U1vnTuR%q6lWMMG(*1$qRy<oFeim{6CvsYGX!i*!dG&t%$=Sdm&hA
zEv(XOMrYU6qR#m{;fTjC3<7nD6VLjaRgV0auce?BW6kE5uJB;qD=0cx0bcK&!ONx@
zSAAi3|C4|D^mKplV)KgP89mIA$+*K38(wd#3$A89iAj5{sAT>xcs9n6Zr5P^!KmLj
z{;CZ*yeF73#fM`{;5||2I}1p4m<#`!)kw0cF0FZQ2X9^sA!F>yP^yjjTn0boPtP!=
ze!FJy>6nXx8y}r+Pthadtv$S8czL1ygJ+y+ff(k>F>Yx&<r)L(;mO`g)IP$xr%9_2
z9uf$UGNpneIR&4a+Q8#p4qu!xo?Ey~gNRRMbGp5<w0w;eww-y$DL(thM<@Jcyr_jB
zms^W3Oyo%Fg2CX~!_Meay<odkmv*J(gMXa_y~X=@%j4EGi~TM}9<0Z~i&co>OcAah
zU<~af)<w@ci(41X!J5;aoVZPyFIg1^+aGBYz4h``{9g?xw(JsFZq%UFAp>B%^8h-P
z+d_Jl4BOi~z?$g-y5gl8X-@tJ4wV|vQ~nU<N$F6{1)ZQ4!g4C(A|PtgF*fHlK>1_G
zas5NqmrSyx4}GlYA{i?xf8{jW?TdNA$^S%^UU5)fa+o_}tW1p^wxO@X0mdur<vURy
z<I-GN4*5Met4-irrk>|a&DQeivlt6*k^@d$bq{7v6%fNAOL3yO5+0OB!@jKN=#u^x
z7hG2$%L<i=^!Ww|x}7v(M?^nL7iU7)^ei+S<AxpwV>z8iV%Vr#3B{#bM5n{DA(rK4
zA6Xtp=e_$tm}1BsuP`RhTy;pXV~C{B#FngDCm=qkhzfU-ac<o;j1pvn&x>f-bk2}$
zW&7-&$m8IrG>eaUeF+V3Ix^?NJ&}8M13G=`!q=l^iANH9?|WF#8;80e>g78;(x63G
zu<SrkU43DJR}otErQ(k(%wHj=g&t)(;8|yl^*?TbUnAR@Mm`XwO5Wh4xoH^Pvye}n
zJ_>(+h{GwrPoh>*DW+Xthf_Q$w7l-%-QKOhprex{p<fo@^-=GTMIgE6Nr-pe>XG*A
z!SH5U5XPOk1a9neeWfBKn(OjWXLur9U8zRA$5o4({0>0R(-#oy$mSR0B`~v`y%P$5
za|Wil=rm&=>hJ!D&dH_B#Td#r+#}$SYKY#dt)haJVcfxBAH1dzj5q(b;f<<%uwA|n
zZJx^!rK@a4?7v@fs`3^VeaVB(t)rm&Vg{zX)1eV%hBPE>Fc}cYp|zhox|9#13023y
zW9kSv#CEBZIaQ+g_9R&SOhj+{Hj(R;NN$=Zo5^*Ba)N*-_;<rREcyKhDi`&FTWBV(
zTYn6HdCJhL^O4Yfss@wN<lw|DX}VnU5j|2jz?N_Q=)9tV-|RHNdfD#0eRLwIwhQQ!
zOS(ku+czlLoPceopYW+V23UP-Bu01d;Jk|qM0>)%ps_ORW}Qev=X={Ff;)#K9iRNr
zBmO#f;bIRuG?~yTDoOD7X*(8oui&e)hG7<c!CX-f;J`ao`aM>YhM8H?9VuJTTrLUR
zRg&SqBp!n_b0mTKY!4Iv0X#?)Wb9#i1!XHN8Bn9yYMP|eSp_qr^r)hPBAs*K7bf!>
zw8-ur^I^9^=clb)J<H>^`p$rFiADIkE0g{I9VODx(4v;p<>^k=$G7Ym4T`^`xS|V>
zAR{XR<H!}v90<mFOEk$*HiyyHm;upSGx@4gBb4vD3Y{9ecsm<ITDLy|PJSIsO!gVj
zq<QY(JlvQo-fzHsuowBR-Gy*uvo@_?a})He`mo23u{6$H;CAcE(k0{0;qLt%sN?L1
zJA@P*$FZ5gMolW*vWT-ZI|)D8@91ljJE*zx6ckWZ#v}{JhVW$0;hY-|u9qU_n<xfd
zA0f$8J&C4EG-*S{XHL9UAIp#b<IQK=<F3#<Xl}!};tSeAr|>Wc?#$<-$2M~Pv)?dp
zjV9cFp-#sBRUp+7wQx#eLTZQ<=V$$sTXW_!q@Hkx8WjoOtdj>$HLcwC`Nl-)u8^9(
zvZ2ARb+IS+1QdI=@bA~l5W%C%&{@jy^33~DllYp8Yw7|e%Cdsq5u%O$Phfw(0-nfK
zqo3@x=xNsd6KtGO_*F@pB!qtFYp|Pd($oN#EeX(YsEUir)1-<U-t$NPoPd~X1L%Dx
z5(SzU3w~S4(nV%^gs5o{m(wTtVMj`#{n0GeDUQUAU++TO27t5U29cl+GnBu@{B{d|
zVz;>!Od0i>IfqB^Zx&e+P1=X+&2#a?wYTuHy$&04*5PCy=1u&gOI=e2c#n;$kUrTA
z0^9EW-(MzrE4~g%GG?@xo)mp%{k*fbX0&^R4J0fzg-(wRpz8G)izl<0QFt!2<euZQ
zq8_4o+g)f+JA%(xSIEJ2C%DdIzrT>qd$Jkl0M&(ptI@oD{RGGue;>Yhu)N`+LEsm@
zpE*yzLgRJjrli-PuFDjwXZ^$d7x%+_i}N5(t&|juOXEMgmB6gyRe0g$GRD3u!|D%D
zaP&<p;@{7h+deO0P@EEZ!ZPZD!OfC{Y;~Hb!+elGz0l*XJoPbN4iDWsP-^u}9IeAT
z%^?d>e)=-3^e+V4!4iH{v@|I`YQT>g8;vCk9>dHP%A|0JDQWm<2f;RM&VS34?<~Cz
z+FNz;yOe<1ouFX!TA6t66l0()>lzWO2|+hb7e0UR9oH6D;xR=_GQ`n{1QoA@26<U{
zkWmhaGv7nLGwYVj*$d$T&3r&sA@8D<igOPJ!ii;!rIxczQmt18SC7fkmQRQA+Gi=6
z7;qQju5{u=^NXn0{S!N5U-2oaTGV-mC4^b((@)XHq{(Y2<1Oh>y)~ENU}Z65Op4L*
z63g<=u!aHmSn&KUfuZ*e>5Mbhv|Or?uXX7~zlte*N24X({lx}H)(dD>mpZg)uEDyK
zVknN!<4aVN!TInhiJ(0NHcrWfo5vKXx)t+Jz8=qee@z6HHJd^4KrFPylyX7`J@|Z|
zF-^B;a_ydDFnU!g#IT;YVDo9t(YKm)1xv8FEfiYjr%39LMuY8w%eeJa1k9gY0+lbB
z$7*LQD6YHD3!fTtC)Nvy^Yd)(!ihXsqM%J$zFg)!wclV(cR#n%^*4w|$Re(lp@AxI
z;n$uEX#ODq{i5@^QFjxudQvj$4C>Hfqx+%HgmI`d?71(=%rE=lLxG5Eg*84uVZ#G8
zT3kAwk1!QsonSLet5E=Tdkf+?`U!k^zaQtY&cvY{E3(N)n|K+H1M7rzXtB8^!e0j<
z@4`In|NRusF_(h4x`A_54S<c}UaZ;wjo+ZvjRAY4prh&w<aiZ9!uK<L@mX6gXZ#=V
zmfr_8uWI?HzQ57EaXhY1c+Ffz`qX~wSCpw|dq=exY#H-dqWpnzKIQar*4xK8yf`1X
zUL8XJhJ=7lY8uwOAIlYpuc19-z{|Hh1o6uAlDf10(7CQ1A4K+pib)jsOxh2T<sVRI
zUMYwb&q_{sDwFCff57>r4zxsOb5<#7=)GeSWG|8@ipD#zF}@80cK2`tsw%W|b2PZI
zU2WIXT5u@yh0eLtVKZYwM{nNA3su*H>=p~)WR7B1&3>?Zd=}crunt|qgYo;9mte0~
z65nDYgP%90fli|r7MqGB2Tk|lt1JUr)$NXPF7nhVkmo#0$6~p%3OuuyAvccKLTX4N
zHrMsyL9_pG<j*9WyO3oI80Y!F<0@3$$(APUwShD3W~AiY7ZCJM!-SOO;5{=&q}?+E
z#LCOy;2k%#?@r`Ht!-(O%MVmot53q`<?x=Oeq8s&lK5S2=cRA`VUGD>yx^L|$yl!#
z>J$~JqJcIhb&bV9y>2YO|BG|DIRpfgt3`rC2_nC>w{W9T5bk|?2Gw5g#@wTpWP`Z^
zU6rm$TMx(JL6(26`4!J^wR?%HhFcKBw~EwYNgYn0YP96!1Aulvq<KTApVKMMR%bSV
z&3wR6d32o9hhBYlxM6BE+L)=5fXQL_C7Ja>%QSF(#T}HnY)os8@8uKrMDXPY^7wVL
zRaoY-37i72^Wq=7x&0|d&~wricbX`W#q&plu;vqQdFL7S2mgh*tlN;cn%$RD@?g%%
zLR?T6jT0{n!00O%Q6Q8W?{Pr~t1C(%HN^wupAVudg%9E9Mjc{3G!I2xm(g^u66+gR
za8ZNVztvqE?5-LD>vuL`bcreVj@<!YzsbfyeTrn$u4FW<j>IEtTOsjK8v17`(}d|l
zDkaAl6ju~!3F}xsIuXh`Y6;wdat(6J`3|Oy$%jY58f4t`lNc3~2Pt78*tweh>eX9;
z#kF6k&gqd`{(T_cti;EZD#DGnRyLo%E2;bP6<bo@NN!!Rpfi5up~Isc@K)_OtYA4)
z^$~q2{IQAit0=??Y=;@RiFF&6R`HSA1dF0$;kHPHsP9#!O|?Uz=7uUweET1o*L{Qf
z=Ztl%Gqms@o828ueSsY{?7QoD48j{P@s=~3(79;^?|gC%zve+DbdK-GYm!b}a);$k
zo@vwKsmuA@#ZRGR_yFwqtwM(@OVM0ATVg%@EBs=2W5b^^&|~x%1QQoSonSF`%BXNn
zI_ynCTfkK18dwcjQ8g6}YWPqLer~7vsD1}*{MrK3ew1U&nIJG<cb$1@!y)Q157)O2
zCiA1t;>5XSa4YUEDz^!U*fE?}s?(;wr7Wq)G!$jM6wv-rE*4KTqG#CKLlE)<x4wP?
zxt|%!sy!BdXB(0WFTO(D0oK)32!c@F9ZxP%Ad4;=(Kqv1=Wyk1e&ye%tP{EnQ<dk#
zyGw(~;@j+wv_AxVq%Y#mPBXHtU4^9YU5(Cp+9D^NPn=Fj73;2B(#6VG!M4x@T2z<v
zxoa(me4`v4qjLyP%N&NbsC`f{kb#F+GZ*>UH=K!<fCjztU|G3V*eYoTaE`)Y@73_u
z>I=496+;K}6yLHaW8B<ybe?lS6h76S`Fl5EU~wln>=0m|kAUs5t!R3c8r-s4fqPlT
zxyvR8i<>{;L(gD*uBJrPu6~9+QtY=vSSJb__6O%PhN<5<j&q5uLo1mfq<45cFMV(W
z&W*T&yF0RBOid~L*wc>EvQkv?_A|~Auw4mOVt&#I)V*j#LM3m&?(Sc>`cj`buPhOD
z7X0CbO8>YKY3IO4TY)yHvEKVq<|B#ife)Ab@VCD)T~ihW*`fOM)QlW-p6kZxeAT7;
zgUfM{XEp{tlc!7C6^LnhHrMh)8t&ZHqLCJ|G)K#pO7yc}@OUFK>qs^Rg^rN8Xdi~Z
z%mF#A@CKap4#E*(mmrTZs#ounr|AU|+<Ld)P!Qn)kz-967n!|(SvKReVFF0!et>q9
zZk#^Uh8|SXqdy)S5P`fie<m&-3%0%C6lGMwe#=&Hy4!|o2UsTkQ73RL!*SuEE$UUP
zK<6||&bg$8JJzK_CmOxSLw4%qrGT*svcs`{EX%t0)?s+Bgtt8wjMvnE<JilHTchew
zX}^#<pS{PKkL`!vPv1D(0>)<4jevpUUCb+;4&$D(b6A;qE5_Eqfyz$&ta%6WBTb37
z-a1ZT@mezbX%0pWTZ9RZ_u}tQQlz1NI*2da63uQ*hXdDh(LT?ae|LTmDNfxd3i$Vf
zJCIj|b>8ccEE!BrXG&4wj#lnwjs`g{Do58_!Thy*w^8#{2<Xp#js?4}p?GDg$Ti!5
zo7cu>j*|{c^yVqx_-;j#{`W9%e)>P0oW-+#k{R#a_K*wM6v1ylYDG?mHGtnP1@6|<
zzW_s4Fo%UM@i=3It|^bO<@-6wghX9xcefvk>w85mKP~WfE#tIUtI(XoGw@NbIT`y%
zjij%A$z6?-Cl<4n$lE<<@zF&sdh%=^p4)0h#fi<5+5?r4{nLPszphN<{)>f{O*#;)
zJ_!0$Vlm|DC(PS@6}#^rgsDfr;+}#=9GovhPx(rdBaeGQGSZZ)#U{eyuaVFY_XP)y
zRHx}noggegg{)=_!_HMZpnGf*%oya3#p9n#iu7^-!Y_fn=n3zfB~P#amLc}f=R)Um
ze}4TwDf-eS4_&ARhNYQNpKllO=YnipvyNpJ#-B$SD8#7c6xQWS(H#~x^zEEh*wk<z
zCvHf>br+4PbWlBLjxJ_C$#fWh(1->#MMzeZ8Isj2exqQiCtrLl6J$ND@z#THP#mfW
z5C5>a^Z^SpjeCQUzI_<`Q=bGm?-wNm@Vs(DC~8Jr!`?^l@y{+LGUsSNdQUsYo!Fqs
zyvcSFC&_Pao4X!~zP^RqD;<QE+x9|Xe<(WZZNZ4}9(Xs#j07cybI&Cf#P=3Ehu-Qy
zz{IyWXs#h$e4!D&7XQJaj8QGMx&>Q1Mxyf+Bd)W|7}X@~-0RQamuZ-gpe#pjdWJPs
z=rbcK{e$5}GxI!_v_N{S7wEbE<exFVx~M%Ae%Gmy7MbbbofZ#vN9STinHI@d_8mI@
z%F$Jml}YE3A?Q1PHWVz&;}UL$@YA$au<Yd(T)@6hbNBwjR|ZmK#uPo;*^tf;UtmZE
z{bM=AUp@Heyc|v07Xijg3gGXtY^Z0M9Kp~va3W5TNIRO-(GCJK@j7z^?w6&`ZqC>`
zg87*>qhMR66cO02<<ks5VFLG%k4_!Sd9@m0+a~sXw)u`in|ml*x0<o9lO?9tm%=-z
z3s_~XM5m%Qnbl}T!h+Ztq$4EGTFR1~_pO*X<{=(t8OEpYWJ$emDO`kL>>n;e{f;{G
z#fO^N-@yV*FFV1mOKfhg^$8=JWYPJM0d+O6<cdDl!tS+~VFh!Buen_X2?BX6em@Po
z>T*%c9IV3!tVwMMbLcVehU|mgkiE>1EE21bxGjtaJVFm$SVlEpE*|@48<N9}x72Kj
zAn8m;#k(h=Y33z}^7g<FQ|p-PlDWbeAGIy>8S5YX<0n@zUn{C%^uIy8i^op%SaF`^
z2EU4YTf<<nAqVO2=YyZDBR_Vx75N<c6!tX9Q&{AP2jBe#AI2qhIX@lBGgrVGnHre-
z(42ggF(IPmAHdV*6-<2c4om|F@z1{uBF0XPRZ+Hyvwu;CoojFL8|soU;>}?C+tr%n
z9@i$eH;?m)%l<;~+pQwSKNhfPkTQ|=8I0>acj9BocN~AI1jTz7^T8I^us@Tzy^S`5
z+Jslo?7IsESGS0|nqOeCnWw~m%~1B1&w+c%iq!3(Jk@D93*qOp_zb)Re*K?#pJHPw
znAIh!_c(wN{}{8`D3D9IbB@n?HI7eHQ=v`rX3TBAO46FGOm`^xqO|ZH_F7i)Ek`pY
z#R-0*H-Sb}_}m`b<Wo3Ne=msJC-bqEgGlA*h1kU$@*y*rm)`g_1eq00_-k1RqjH$1
zWb<Th4m&f041y$f|5`vO)x*7GyV5oKJa~)>WI5l8LZ?hIlpnmpo5k9Y)gPi!VWch1
zXk+j4WhN6$&9nF!2kOAxtQ=%D<DqUw3bcJUqHQC>a3Ski&6OR5M?M-+yHRmiRknjM
zMdZ<W{9}G`;UG4TSE9A$m(W!@5J!91((e<M$=of<kdyfy3fA^=3G@B~p<X?Y`j62&
z=)2^DEaMLi(uNtLo6xo(o}c`Hoy#w-aK-~qP?Gr>);~3)YcgI#NQMe&yR#Wq8pc9_
zSs_+c4&X>OKlOk37lmsXGw|+5*y~`)JSvLx=n<B0Y_!FtV`Fgi<Ve(&{f6N$?(jus
z-;wu`p_{F=!Ev+&ddX+Py|=;4_c{$@mQDmyW5U(R$TP0vQcjStM09pmD|+e3upPZ6
z=UrLM@0Mb|<adEk+U0`YX@}4$$_oSy3B0XC4>$kRPP86(28U#-5~sC7D897<wHkGZ
z<=AoX)$IXhEO`tKj_hooG@g%EkcFngLYz7|3nM2n{^Y^IwAAGhUR7sZ&oz(HuUeHe
zd=~@DD$Hp`y#|@@WJMJlexS#XuV_C;pO5~tfVVW7hbeolFv{Q;j4e|pcdD7=ie*;a
zUo?WUk}B<EeV#~%A87ef85`ZdVsyk%xS(1NSu&1j6JbmsA{54dS7-PCU6?dWk9h`P
zfxBWg>u}A77UfvjcSM$0y_F$#%=u(LMVV%~^>cYoR>1XK6*6ASl5YKj5MsshF&!$r
zO6LJAO<-;m`;EL5a}0ZKISdv52Ge50<-D8qM~K*RA7}J+f|va+2zYD-zLyg?f!he~
zPhTgF`4`6SM2;}!!y^2ppi3R>?xD-L8KCXKSgy^pIpIF!j5OK&E=bAIzB_?GBYhgZ
zyX^Sp^zZO}l05xmDo;1N*rVIBjo7%(1@~^hirLbiaOlZWEZ-T$`YAf1X$2NEXhXKd
z^xk1faD)QJlq~{@*#$^EqDZqASn>y8028Ks<Zf?yk0*6(NKkv#1dpNJ{Qd*6==^yt
zX4$0h!lKF0WnBR!tS>~IWvFtA7HOF451-=!z2<C3s~t9s>uwE+o9@BqHRrL!bP)Y@
z-k6FvFXJm8eFw*w^Kg@~f7*i#A$^52G_;z7{k{wQ^wLTcewU}8#4<#^JQ`zsHn1J5
zEz#dr4NjTKTyUHkmU>==gL4X@ds`INsF+j3p#~6S{e8kf+<0(vW_Ke^AG|Y7nJ8{L
ziPxW`Vr1M$eB1K{g~wdrb(VmvW*(37&Cj@|bBkf|*<B#0*Pakue;8K<GuPfsD-x8@
zU)X!v9ctZ6P$hRGXuQ9Kw&D6vdH)KECfZWL(u%^8yl&iFGMH}G-wma*KhWIh58QsJ
zM}CZ8^J~9uh|Yh(UtOz9ymeNi>)lc=_(VTWL4-B=^|))L3Mtxk6Po@T0uzP~qNb(n
zJk+j$8oO1TccdcU@6TqvF2-<pyB^U!YC}y`KS<VTu$}IYa%jxWLtE7)&{E_AQL95R
zh%Ky3w!H<|&-UKuWNC5Za>=+fBf7Ny2r9*o!{+)$n5f)`-Q}~uceNhK`0Eo}t8aYf
zodWzZCl}jR+~##A$K&S{Pw>TjeQI_p3|(#NF=E9VDBdkiTMF8F-v@uVrbrEFG(w0y
z$Q<@fn)KHW*3Gr+!TMK!V7GHCWJT}hvRJ0&@57g@BlLxjwh86DJHClbHRJe2O%<p<
zEfH-4exrB8QOV{=c}QQk8^*XkgxO`?5aC-4_U|+J;U>Yjxcnk`E_(}Ix>0!kvp$hO
zP=dnfJUsa9Gz3l6mpH#&C$fsyCI-7?N#~oDT*ko|_;e=<b#|uX#_1z5=w&W<wU7DP
z-W0-!->w*=E5sk`h2-TMbux11V7eydEd*#=!Sf;$5>qQjmz_#y`@9xBcbM^Sl?J%{
zDMqvoCc>KPZt#lQ&2|`?w91$D9Nuj$^fy`!#l5Q}dE(WO7>Cdq&N5Cu>kECkFUWNX
ziEDTc@AY&q-evQeXqOq_oNyT%Q{o_`%!=x5PUf_8jzHX;CfNN&j;?Hpg<>~0U*2~H
zRWcd-yR(D4cH<+?J>>=s-Wt$0ECF0Ze>p>?nGjhah4LMx;JsBBvbW1q+k_~bHkJe7
zD@Anu_7D~9lCWJ*hx$#d;<A=xadqQamU`Z8Oc*as?cT}J<Kv6b@7-v=h`Am#k=>0?
z-vdGTHdNda0MqRi$;yfW=EAN9x?Ydm^j4+)jjPaQ`6LVsm8UYZ6=*^7b8f>IMf$6`
z45x&Kz(_+~666;<q4>*C5ml6;&abp31BU<a&2MHdV>^+<e_PPerUBHHeDHpZ9vQ~f
zV(INaxF#bVdY7c(G_z~W_q`6Hk4!^hy*fSX%KrWvj$(J60Vc1MBaV9|jCobW8!c5N
z<==JDM~&s4QV(P3VjX(myAAQXZHF&7QwrY7wCRu*${ySZzNc>S_T`2Ag<TKfv%d}1
zzuN)zOaDNXKFi`&C4ska2^X-joGU%|jLp!dqtd4j(9{<J-j_CW#)AexIBF(boREsf
zzgxk(U>i6F)j{bPbL=)EAP`y?jNyFnk<|d&d01f2f;uowcgL>(zQ7v|X_9_)0`%7+
zOw3-0Y1z*qZ%zPo>YU}^a17gFHS!CTrHN`8d-Lm=(3206aA%ASd0^21D{3U@QPRil
z+^9yGT9z-?mBIdD%$=}(7>?bpNWzy_aUSNEdHGT4DBhQjPI>7t#ZdxAcX~1CP#fR6
zp$AtDW0?iRT+#0^Jrelz0y=kxV&3{aa9N~C{SR%y{EbHBV*q2L-!p`hms_x>stnU#
zN?~Z{N3_&e1vUM8XbsoHhI8Ryw_b_XOrOAVHJ{PevXB!hzQXM3SFo+@3rfyQ)2x4+
zxw=5cC^Dn0f5UQ&uO-|*10_24?`aIIa6#YB3%vcC4ZP{!mt2dUB3^YfA-_?bvDf~w
z?#pcc`wMfbsqzrs4b~(#{0ykQwi<B9a`DvFd$@925)2!KkTB8+>m`iwo6sO~o|q&O
zq<a)JSUuqdr%v(_1AQpCoXF|3e!DoOS<>5e4d<&?VPc{b4O*xr5sauVoXU7GZHKD(
zVa4~MNih;Swy9FRvIN|JI~m*p{=gpQyR&3>N^j;R4C=bVJ0EYAIA8VTg8Uo#{X4IM
z%hnOl`!)kQ7bJ69@rpF3n4s8l4xG+nd!@1goO?ALJ<DqFhlLW6cN&0Y8|27Tc@5&U
z)DQ$w)*wFri+7#>h`;8ghT{LC;oL(pIxQUFit!mg@mC+j_FqNW$tlpA5X=QeR72eJ
zPbm8~7q2AQ&^IG=NSi`4Cvo|S)w;K#XtgbN?#SYQGv8tO7!BC;!kC<_2*U&h#2KUU
z4&qMbLY~z`u#f+V)89(bGgH{_>-vk39)B8y%a`)fu3eC_I}J3~<v`KyTo4y8<Y&np
z!MWnI$WMKar#(wy>!A{8@@T`X(F<YKTXWLl<$|XJ7$;uHW(q7HwPRcs21G01h@AqQ
zw~b|ff9is(S~(w|$adOvFowEV&>^$TNzpsz`>R!?l^ta``|lqpT&qf2uHBQ=NY-<1
z_rK$XtFOVy_c8id-bU9^x%{F5OH!$epi-@hv+IvyPwgk@l{*H*Sf4_@&726LU7Tzk
zk8!he5}}MYqg6Ba!_o|0s`1>M1hmft$B+s<YE_HURehK=QlGsQmt(~yWzrWPhK3i_
zz$0@xLf{^h**%y96`n^ge@oo{%$kHx58~x7U%=`A6|i@;1?%PyK;w!pIOcLM<Q-vm
ziy(cB{HFzOYkuLdH;fCv^%lPJV=li;4HESFF}GZ7PTUH<V#oqH(thO~+HJfJ4RbGY
z`{%G;>dxD|enT8o)yRX>W|q}gVs0$2=loDN_6B|D%7@R$fc~2|*=+M0zxZV`>|d}5
z1rx=b;M4$TckT?N+St%U)h?E0o{7WrQc>w~Jj`TsfOPRk6qiJD-K!SDl)4!3w0gp_
zFok^jiwbVc@{3?rr%T4nxPo?tUF^>mfm_~XNT17c)=!#5C!VsN-$!XOWl1@lSrUgC
zMF^AqS=M8W7OhGs!-)QS;M}rZwD^2Eh8xU=aHlxvl-q}CtIV;ccLv{MehjM5Q|1q!
zh%NqCK-1(rOtfY^Yw>gR`E800M^qr~2HV97Wx3rKo}vGCmX|Af&-?AI<aL=xwtT!R
zTz6-FsNmP!ZtoVx1l|MJhiMXy<uzL8F{hX6Y>+O`h4Sb2kU!6o+Pz?$1RXirnQ)H(
z_al~d_0;*J{fvwKgE=cUn2@(4k7M2DEvP6}!3VAA<%&<Rp4{BqSeL}k>A<i2^`^fl
zzaRlNUOW$}Ge<#I(M%4fjKJ&9q=?S<HV{h$T#%+vGUjw1$fvO^+CO$CvA$5?^usWP
z?H`OM{(>*chUCQ+8(Pq!jkY5Wf$%>Kt}SIY1|CQUAsV6HDgiZVzKUB1qT!V233~62
z6gj^-2huwWz)73AnPwT&bcY?dY=}PDb4r88=*$3z>r<dJb0zQcAP+-yZe!=J@4Wee
zX1KNVI?U-dC-TWBVWQ7XD0UqLDdDQ@-u01RuYC{1dY94Z+CL=Bw<<m!!n^b@fsDuO
zb3U*MHLP!9=eTCRmGMsJR2mc4`aKX^WJZrnPKJsq#<Ch{fr)-Cuq<p4i5C9CT8rO|
zF}xq^|N8{%u9%SAe-@-1%ke2q#kxhVP<(eRALS4W8S_)Ye`N>=#yu?bJG_#sS9eEe
zlPXDhRTqDGhdzmA^DDi92b}E1K{R;cXvit-1@mYL>(9laZKN+;OPK}z(l=py{&h^e
z`5WvXKH)!beV`-r42J(yA%d(25)GCax_(K5kLR1v<MJ%eEiaEAiFv$zoGo}?S<Cl&
zDr1D%KGcb`#wGJu*X-j-ctm7Lhe!+;m*|jy<1bM~X&puu9fUTK95{4s!6UgBFfIQn
zRO`({k5eW5pEM(Kv|ECjRa?<`e+0X0goC*8t7zcTWN22DK%Cum_7;1EB|96j^L7L#
z+<n9AvA6#E2$q9Z5QrAeOTfG{0_rS#7;FC2@Jn9nk-X*<2>i}6*tJI_SJw&Y`mit1
zo~@1hirKv46YHyO{Ka`18Bv$TjeG*j83Zo-3dt|BQ1~yL6BXZudyxrP)cgbLln%qN
zEuSF$cM`uN^Z|CPIfUJ^THt%<6Q`7$jk;{6dVsNRlODQ)@KFaJYT%8Z>K~)|#}brE
z(IUmylHuSFRa&k46kWtwT<YUdjN2?lSL^5!!G;%-f+du9x}b-hH@f*LEm~9%V~o27
zYmtAeg=9xJbA5R=F%R$uoGEKS0=I6#wvcmtfa6kd%{#%ZocRs{PCVhtSf<T(l@zE1
zzQ?n?9Pxa42Le95<ueZbLU(5qn*PcV6>e3b=}ZN##quEM<TMpj+Qz{}u>#4zp+q|S
zJ2>yMI{aI48T}s%@tL9t(K{E%ag$Ht<wHvJ){IKnaJ&r%E^Nk4`D~6SH%ubbSk3GG
zcY`-J{sC`Q|Dmjv4tCWk(lEzzjEo)(YC$ZcsHH_Kn@{1bX?5Tv<%7a&7x^rc6<p_M
zf2f^P1kO7?OGdOT1Vvd*OtsU5PO%3ZlIT&prxo}iy#;wrNG5)K06`(ovE=e9Y>0k`
z^6K@ld+!TuSbv`T7ycQ)<|kr*-7T0hmcqEhs^rXSL()<=jc<?lg}F1-A<tVKW~_|^
z1@>NA@r%8aTGVMnavpc_ur<l<W%oMAEPgj*jn}Oj4jqhrr{cW`onC(96OD4g+VLZ_
z4`;jRm}JiML<es%Ntp;Fe*E+^inRJ+5x@#v>YvK8V?lq>HE}ktGn3`yQhuY0>Luom
z9xiIQ&Rk*Cl2*pH;0U`yY~!ZGGmhPXy61>SxgE!b`2}3w)<bZ-p#>u#45q2*(9Zk-
zycnTFD#OI!T-L~yI&Fov4t1;vy#WFBCpaJBeke>mgNnIBSk9lhJdSMOB4;gt9opOR
zP-j0zW(9%xg9|)5t3_>P6FB)r@>Eap7T<L@f#vjUuw0%%oPHxj<_KY}<RLa-KIi*n
zH*CKtLngM?LqXU%e&YD2keHeUu`6T=?pq2AFIkfyv|udXBDnF%7SAWMT*!X|Xde6>
zLeiv(^9VPT%<2cHD`|XxW&}8Ww`b0X8b0A{As@R#gSLiW$8XysP%ZQYINxI)dy~ab
zxkrTg?q)PFON#~$dJ7+#;y`-jVNB`NrQSmpf~k*?ACWp31pam0hFC@7JGhE_>Tgb7
z=C@$Uq&_^Q9fCd!!y(bX5k5T}MCYs-MC_Y~a*7&1xzFrwzHhiPsaajm3vEL1V_gno
zo1NuLoJ%3xL5nu#nNUra_xLw-5t?o}ERlPjkCFM`@#{C%=imBo!muO75ENY_ndz@g
zp9ISjyNavecDWWzLRq(V?nPc8{{-m;#sqNg!lo^HIC-%e30oY6DnU9}JR)4A%rYLc
z54WONKaP7+e1rKMgP|s47zjsuLb{chbMw9f-@<NUL2?~`VY(`v79RtNDJ(a6SexbJ
zjj82Q4M?e(h!>ug!cH40l2CUYss}}|eoIc_ZS!+1<Hm`imUlx<v<)T=cthk7U+8`;
z3m$Xi>9EYB5Wu?g^O%!NJZlFRG&>Zmx84Q&tOWj#n;~f&c@f(a4X9uK9^`)-6VvKi
zY~K6>s@!ex&sY^weD@Qd-c!nDeq;Sh?EtW!mdWi(P$&Q0@4@!{=b_>343L-$Y4*Ef
zR5Cjcwu#U9lZs`io#6_epAZ6mo#k6!tiWrG0ik0w25Y}DK5gQ4-q&Xpwz?PK43od`
zN7I&C<}xlT;}zxUrofJw$3dL;8NDZK@TNXVyhDIHCMz*+%=sahHM1Rc<|Ec6>e0;x
z`Iy%I0YB)~!;#=J(Ed3Oyqa54WyLvYoErh+OXv75RHVf<cO~(xul%i2NZN0AV`u=&
z-TDT>!0b7gv`-KJ3jV=HgKDtrS&xLdQk|1xdF(p@!WQQ1IBmf=vKuEPoH>l;UlbvB
znKWrgQo_XR7W5+H>17H`$o$J}-ng}wQ%sEG1j{reHOV*NEn{a!_vdgo9Qr}=Q4|EV
zec{$lH7A+_Ce+`{1k)o}zKQV-e7k+Hw`2)SKYb00g-H^*FGlpoVJR|*=+U|o0sWaQ
zPuwq7;j|`qbac*x*_yTJ`azzWUcM&kUGa$@_P7;`RBGYi6H83>mcp1@y6j#pfLfz%
z7)0_Vo|Z%L)$=GEWS~kW-a7+7*;{VnzF6EDr%J}&wIr?A^uhmV6xds>fqu(+xY#5h
zf0kQO+40Mv=93v#PI>@(ar@ylI}gUbw;&CMe|W3i*5tHr4TR6>=ekX0F}-pS<mD+r
zo=+tFYgHqzehc`lmR>HMb+p4LB=QB)>AZ7UmZ)@)K28iYr!CH}aPOsb=5Um7nT2K~
z!ABN6k6nlH%-3-AV+8i>GN!+o<E(haPRY-}Y!n1d8sE5U9Ex)!68Bl(poM*^-s2;<
zKaUlOq25erII<72w$0$v+Hc}z=E#h_CQY5Ki$odA;_!7Pb4Z!W5s!PBTvO^1kX5vx
z^{YBjqEQLU8829TSBW2c*@pPn3h45*APn{Y3SNizfM|6&c(+SL=7}s*p*6tGjb4oj
zzU_SR(dV3nk3MzkoQPRvom}1iBnWJ}0}Tt#^5I+6A@zwGcrQAPn~lt9Z+95P%CdP^
z&^4A3W#7LLfTHEUpeA<(>`0A4|F7j}y7VSDlevw|kD1T|y6oTgS2-`ZI|}-v@?rUS
zP4Z*q6WnL6PINk+LfVR4Ft5DH{9yIG_}Fk>MVDpu8Wy1M+Ir6M?+p+bNzulBZJO{r
z3@yZ}WZ$$*WWyOuy|WVRht=|@kHte!Kmex`Q3&f!35en`O<XlXjY$19rJa{sIUfPL
zp9j3)$_IyX=Pw$NjIR$M;_?j`^Xw<g^0Oi#(bnYC_@gLVp9{9>W;7yyA6`&81N!o*
z5YF(p!rfA|V1W=meP(aa(Mb?<dOR#!FqoYofg<sP7?E|&UEl**2K0~?NjQ&OMo|G&
zT+$|guC`-lpAu;~8^;$_b-*6hb9U}i;znL;#c=meynUMj7>}xkDdBQ-eeDzUY94`p
zE6TAnQ~~FD>R{2_B4{5<SVorR@D~Shvl)N3bBqihwQmRNoMd;$TddOZ-h#0{yt(8(
zX2jI(7dP;gIYvK9;KVZl379<;*9Oay#JL~9?V1L?oghmD(z@KRF}n0Z@(XD4nFdGh
z8_+k?G)cuA8)~d;g^?SFLb~}dSR|`SEE1#{Q<yR33fjO^^)KM!jj;LFXqYjKx!j(H
zV_qN21&`{3E!zx8&HLkE9ajkHA38ZcUz$9ebOd+AXTj+c??GXH5pFShgVjkXAS>Ap
zk>Pvb(kcTQE_H<~^2tH3cyG+BR-~5)X%MB6IdJKm0?{jyh6j)Qu=sv7C+KQ$ItqqF
z^JEtUj<1G8iw%j*Bz2Ouwg6V$QKN#OPh98MA6(>}9-L@;2LpRFs9<cR=ptiJd$8H>
z^^3o;=5_^NYHmZ@sv0?ud5)l|mj)Fk#w3(E&~~^<aM2bCs(!tOI@Lwck~9S74vxTx
z!3iMf-Bge;A)HfKi`dztgho4+NzjQiBE24WP}+7E^&S>-ZO@N#1tX*Rhkd!&nJ~cJ
zI8Y0QDeJNS9COPwS<#i2MUYy<dd}X}SbR8%6ZF4!(!bP#?P>F{(&#VbY-Jv#$2a+M
zB|C5*d;}uys#Eiw_nALR5yFSRM~h>bIKnU-wDR>y!Su6S#v?WhA1skrCw_yU4oY<M
z$a9!lwg-zs@4<RqAM6`Eh<4~SpzHrQI`eocx3>>(^UO}>c?xNgQ-=NAtD{-vNSZWj
z)=Z<LX%kW*B&j2WOi7Z&e(r@NNtC2AG)R&p(WsK%^?U#JM}4-(e%8A0@AtZn4Du@)
znPX=~6E6Osz&dZ`knf_$72FPo=<{|kp1GK(jGu#|d08;8F$J6M)N%$|_rPzA3DunO
zocVm&+3xRGyzbT-?E88P{eGW<riRIIRl-7IPmFL{;8yT<)q<96<|>(~LS3bT7#l{J
zCdLM#AT72;DO8q@KhI{(qX~)~&Pjw~_MR;~&DZCB=7vvW8SX1AAFnWsw&mz@Uj$jW
zc7_?L$iD!71!F*oJzJ&kW$1192rN}E$LRS7@Q+?4t}eTa1@S+iT&<21v}jR_#Pg69
zJ_2$_h~SNPI>r`$fKPin;V=8&X!HtEb<GC$GYhCND1?rg$8bUUJV?#Yhu%&9>-MMf
zjwffLV~YeP88zd)W$HxJcMx<Io`(UOpZqh+`DiU?OFKSIhGh?KVaQGwkoRUjX=_Ci
zedh)9z>c7K`ql7o!U!^x^~oj;^}*#&I%Mh1H?W1xPHZDm@b30s*wq||4^fG{+@VUI
zYv>cXavRz)G!n$WWw_Tb^vKZZCT!Hyr+e%yXuMlB*o|^W$($>o<jpcOljAY_k_wS=
zmnNRG&vNP?N<l2!%Li67Z{6}uBG-A!+<3nZ^voL0Wmy<vY4{bG6nqZqJlKhR%x35|
zdX7SaGEOXemis<jl_qT12?EJU=kn}mi0FKXZOi6y;(i%Pm#LWBKl2_|ZUNTmH-oJ|
zg^)5VOd^(=%7_2Ff+y~0;@}GAS_+lseOIPIqePb8y~@5%)}^}y%vFUv*P~D)aK%IC
z*mD?bL^E6};AE*G9Umzmw&@no(U6XU|2CBfY-39bPsrl8?}fmiGx%VEfc$r#b#Dhw
zGtOxqOtESKVs1qmSVrsRm|^7Tt|T0q*9d`w&+yX28kAiq#E&D`ZhDai2nKITJ~Qvf
zoFr=+J$eBQ-@;}HKC<*zi5WQ_O2Kutv1sf3@zB3XgS^^x9qs6S=u@80ix+WRja&~r
zS$P!J+$_V{ai8(PV_$G$yr8r{dURl#8?1Lyqw^!Ih;SU^aVK3wa?XZM%i0ggG2vJr
z+RNW`*QUE)t;X=rueedoDs0cH4ashrq@6LeLed8DuOb2Ung>|*Gz)5(i(<-r3EWXV
zi)Lm0620i#IA?_kO)~o5+`dQ@H(@IKzt%|XWNtBLVhR*_-hh5~KI)zLgK_KoaaR%B
zK|TuPOW9|x_|*mv#Oa{fWmi7uW-HX3dkY}x!anCDs1JR|ec>J9uU*Vx(%;T0Fg}gn
zMHTd(QNta|kf-I2zM}Xep%|8%gzwEVQLOM>)ahYD%_f%cIc@6H(`pf?{rW5>u8zc}
zm<wFe(>3V#Lkry|?1YaE3RKl;4FoYZ-pUERP~NC45lYu{89ma}I9`(&_*+o#gr(R;
z61n@Yl7KgC#WmbF5PC}S!Vgi<zPAw^mOEnH_7QNX?F|STV)@t?GjWphGxS_Am%sO|
z6uBqH#QN2DsGjnj%iL{9)UJe}Sg%{+F#kF>y7l5Grx)mWJ_ZV(T)@qLMqpdTG6<g^
z&y`=y<Jy{Z`QJABBz^T((415ZQ{E-QVteMUuNn#QZ?tIr!sXnAutR8M{S%|}nDXqq
zHc4RJrkP!gYh(FCVzA^DI5L)Beairrt7k&rS1WFY@IGXI*Pv4))JVm-hhQ_j2ernq
zol@j9{_{s3mp=Lkliym>n-;QkeYYC%)SJhrl>Ej2a@V4Qwh~cKpBNOP4M~daD?XZK
zMSX<JpiuZ9gzP?uy{VTWUqy!RT2szB`mwoR-woE)_CvSLYIJmK3KniOqB>$r-1~9}
zbH{JO)P}q8eYOc{|MLjP9L&XR#{!fp3c-+n;~?eRb&<A6o*WqRXESRph*&;>ITl}_
zRiGtva4q4q*p3*#K1N%I1b$t{zaU7PjGjZ@{Pa787=6WrinH=1b_Xk=($*9MN=vx5
z`)ppgZ!xquJ;N@S|M=-gim)oI6!gZoamv*#pzhm`wzL1>mf9S&c9x^#Shld@uQL#P
zjqO#GJSAFkpYV7^Am;z`ic7nwMaIl#f1kHT<I*4ZVS2C}S+}(jeX5?}ptc!3P!#~n
zCYjTni?i@cnHtKrO@))m=0sw02KOn);@-Rp*z@2zI+lgN$|c$G4w-vYD;@g|4)SJ-
z9g>?HQ?W`R8FusvND#h4pX7twGEI5v=@rkPvX`a1R~kU%8AY-=4B?z!Eu`9fg#gP`
zZW22Wmk%<2LQIECqn!;EPFw=c(QR0DDgkb$T9V?=QRwS;3K9$(_`vfUv52{41=H$F
zH2pSUd_pWV*_lw+B@?*toO)C~`x|zcMPQtQ6eN6khw*LK(W0jVwg&vmIH>(F@tF}l
z5N?EfpG1IP#$+fTOC>Usg>=XCT-3Vf1toJ>mXC5EUej3OGY>`Oeal3ro|dqEKI<0m
z`pj92Zz4Crmge8Q&0qXapH|#hjDlG!LHsSl#nx~N<fV54?0knWU+U29ua1Fud<^I^
zH~hEZ>`dp?4{jSKVPH)#<nP^p_nnGi^X)eH8O=N|+E*ZdQ5&~cx&;bdhvTH=YtZMG
z#@(yXq;m@+aqEpR5Sr&<xr2t}JLB{SqO)Pu92GLjU>BAxHXv_4XQTapQC!!RIBr97
zJ_MgD1i$Sz+>V6?#4JI~%^EF77E2ovnfEp<4>|~ayI-Q;rj5{KmB)8ptpq`3YVnWe
z2Pj-2$47L$!Hs?TbZ#=@lzk7No3Yiuson=exduGm&AJ!`v-yO&tth+fH`?ZG!-i+I
z&~cFY9NBl#w#zJEzTOU{ODbVXej#`$#bG?loXJg9pxaIhNPa{lcr0tj^L{o&uwX+m
zzgU?l)DI)y3j8o->Tl8WUzWsHy#*iVF;AS#T@*J8I7rfl|8g1k`;H7LZ01n;+92iy
zGKW(ALbMzC60Cf6agTB;jvhG$3UVA!cxMp|*6LCDsh8ld8EV8mK$j{pAG#>220GTq
zU}mlj(YrScxnC8S6?21&>dQspIT2Us_!O@*zuVwxH!Lh*o$Fp@TC(pxro1(vby^z4
zrzi=vQ(5mlU5NHO4)L1%XQCwh6}alQ;Po<9;<M{4XSPt9GoPVNk6&oUp_BoT?plw|
zmtLcll|I(Lb;W~HvLw6FhMn1-p=&D#W_#Efv6FRY-mSr&v`}o?9LfciDw20`@o0U8
zai~^m5NGKg{%6n@#yv9OTkbpnp^hQVeX$$sU;1+27{7FUhLHHX3?pR?m1ta{M3%S9
zL3_#^ES+9}7B9<S)maU)d{ZvSv#zvxRRne~9>h&7x9b@;l}`~L;Og(`bHeN0+?S;N
zuqcGhn`(na@<-%pwX1-doz&!fj<U>}^Hx}NYcKldJ;J2X15p0-COWYB+KIhVq;jn#
zNbe9sb;>QY$XJJh3l~dL7JTQw?qi;==lOWDNR!Av+JyeISyz9-El$z<KL|J!#QTgN
z5~*pknU7#Qr~Zujcb$~!&lu*Xchtk+b(dl0!%FO*kcf)f6};e}wPbLPBJlQx^x<g*
zwj(bGRTm-pB`ff2-)oZi_KWaq9osu9ngLWl$L@oVVCTVK06DMGd1MAJ9u!HATreRm
z)^apt?l~M6tw{VM?_-k}V)l6zYDaG1_6p_?@5qDT6YSg>^%RaDegmfG*=$gy3!DCy
zV*93*k{8QLA)}lJ=bEE@+x}+J-HF!ZeY`o@FzFRz0?1;D#b$o#jhE0CtHDKBEJHz5
zv8XHUFb4j$4V@!$am1aAXnWcoQ%3FN1U4T_oOYO#Yb6$BrG-0&NgBZ5(oJw}SLS`&
zbJ6sZDiLov#DzRIp#Oc+AUXdD>D^sA<V&v(Jo9CIkcYB(e&lZyR^@VT;|-`$?=$qe
zr%weJ*1{o^C>%TeIM}By;V&}2$MTh{VUC*$dFJm6i@#-I-n)7neJ&MdG2S}tSb-I`
z-(VK&p-UFs_}{Jsge*j|eXT40c&b3YS-pmvzg0;4jSA*PS3q%O78mfK2!9-tAzH>7
zD35JWu6_}^Ud+MhdKql+Y+<~pmpJ!?73I3LXzfQO5Gzs_)oLf)<JO29kF$PSNE+A-
zD3IQo$tZdB09=1v#fh7xs8YmmGQ+zNIv2Ep-10k^GUXrc$OcogN85(}{Qe6)ScdON
zQx^KKO2e8V#&NTW1<#r;E+gwR7<3Fka=(z2mtW&uZ{~6%><y^mwg-I5)jMp0zK^e*
zyBMAhsuFLOc^+LaOJ7V$2629*NSLn0%UXTFD6O}Q@nwWth%TA^`33|Fl;{;XGqTKH
zp8lDTihIKfA!$|sI_R+b($YH;ad>QrVC;C&Z}S^iU~v&vK|H2AS<u+$-!N`v7JfU?
z3yMzTAV6*x+NCw%xqvvZ9<riEO1EI(wks67AK_m8Ek)OvErrB=Qn<8qG46L{+}o=u
z@Xe|k{w~ocn^)(7-<8|k?4&0!vO<+Ot7m~`)niDhyTNNYnnCRSMyP&U#brt6qS=l#
z&b#9q_x4OJV`;p{m@SNJ_stU9;zit6b$y%=qJ>qAnX*^oG7Oz%d>wmP;+}0r+m23z
z%7I7tXP!Knuz3^oZBFINZ{J3r;#(YJuVY@CJ}nz<LK;m=v7zi2wpqG!sfVWEki9<+
zBxN!;r;yj`m8EWaclfcdpTWZV+nE3QKeU$*g}Lt)h)-`7pK{ralT{su@ef{s`p8eP
zwQ3PWbNVQrjU}i5Hl{_xmY}1{EPTb8)AUtZV8{Q2(=x0xFy5beN;HULzzf*=OP<!e
zJ_Q4%&v-?D8ydQ37~{R>f~&&>zU_J@EWL9UzyBIRz1Mu=(q<`;ijEvK`uYj?T2+D1
zm>Yc05fzNvoQ-c<<*2h-4sXUX^8ZZyUp^rSl3Jwb;$Oxj?%IB^JRwguH5-x5McTCP
zn<}*n`NU=m6};ErXYjk82!cY^)3~V5%PtMV3to3oE5Mp{g1(oC^HfFNMtU&W-<W*e
zC?FR_Irz9nm!|JxU1(7trW~*3Ul*&e^REl%*gFpQDB6%gW0tRd@*9@3td!^KD4q|?
z#>O88U=aKRz2-$jPQWMDO;llBk9PjhT66kg-)S_GV&~OgL6E-Z07x^&lRb!e$NbZ<
zHC!3$?{}l9h;<R3j|8(1aolmYA<T>!Mr_05AU|IN1i@2E%GbzA%KP6+mN<OD&Us#V
zVYDIX%$kpL>V?FFOT};gzX6V?!}2Za%#fo)PCQIPvw7j@>-7x(JTqop-RGi1p)&M(
zs0#5Bsq;BS)eybX0C(%l;T#P%PmEQi13RMmoeL82@8znbe#Aj;<#&17=wJ(OJ#s)Q
z^oYgVJcyZl3pan0fJ{L%`sF)uijBYc{HfNot%mTWecvD?aS$t=J)x$ilg&!zftUF=
zC>$RNZa-dPfJ_Fr{l#hwy(vRd0<xiE{97Q2m(h)Fyn=&6Ky_>q>P^4Li{5;PcD;X5
zG5sw+lHK=zEzV;M1QGuOa~VtKq{~4=TN3-2<<Kpvd8K#8L}gVk7PH-EtC1G*+3p3~
zQ_Sfu)=%-V=;Ug9^fCT#b=n#H63+d79X35uCH*eu1Yk4FHgHGH{lVbb;=#B4CB}&l
zJ8<;|Me4Kg0KbamoGjVQX0yRj9M<y|XFH^Tqm~pMWvW58CpUpi_9Jv%Hz+D)-LXhR
zY2ujagah{Hxa?tRXd;^q?}keekDqM!bZatn&UplF#gY7%%hHfCAcY+|Y~~VL&5vJT
zOWW+uLW-d^j(z5bKEisI$5r9BEWL%@DP{P?w-kzxg)u*t6Rc1Y5?|Me5S3KUI;)x>
z=PM-AD?j2V>6;kX*o3}QOL@;ZseD2}GvBrNIOkgEBT-QpM*7N@@bp6^`n}_M`Th@(
zHdTgnAIgOu)yphDyAGtApM&p=pImy|6tFJ+fX>>B@Yri<B0lWHby=#TR?==*mnjWx
zOAH|;kv&g4)ZyT!1iUmTOYXYJl1u&?#7T-hQ{p$gU`b6$7Rzfi#m@l6Io|Ajmj#K7
zbeX441?@jA<yGDX;pj~o=(j%zZRYEdNhuG(=gWNlkW?_t(7g#cKc(namhs&qYfR&g
z`r>ux_vo`O6*xCp(iOh|ZTqA^*g6MEU^MFG6mSD~lR3|@BEEO|8CcWjhT+k-`NNUw
zB<$TcaQ`MxwX9ZR{@?bHx8WXakNE`E#~$-j6s_n5kqP-JehSL&g%Gyq3*)nPb6(PQ
z;5Qh^2`+?|yqkXx<0gjUynZQC&yU6RpN;8LU1^eCAWNhQ&4_m!>$kW`!QtbIMDSUa
zf2QApu8k%V;h;JHGMRY;YH#3|VtF)J@(a%l3?q39FN4RCLYO$mfHXWkhT-4L;jo4-
zNm+PXw5ZA(eP|Nbtd@pFzB4d;NRif;I>R4#MLMBb8&W<@!)PuDW>!d32kBebIKzOp
zjcjLrux+?OwHo*8H=@A#izMY@2>)5lj3nKC0A~xNiHPmhlwMxI=H_PRIoKeXo+(H6
z&&)t|>-%uq!kAolA5Kt`fyKH>kmVABapP=hCHvebXsc1d(F5GOTa8#f!xyI{yn)^#
zbDV6VOTvB?z!7;Pl91|;)@ysgX%};K1oCifRSCM;e&)p^xA9r8GkK4{q^PNtEeS(u
zn$p-NDRfn)k{gq-q*aM*zkLeSWlM3<*cKS}NQTs=jKj^dJhA-00<I@D8tm2lpv~|x
z=jC6(c!U6ge~r12t`RuqkC1F(JxfKmcCMiy7CLfg;A%D}v)yD1CUJpSsnvudeTq>0
zHH<&+Yt5efVs7h}MR-7W9Pm?F7q?!4%>M9z^&+224!l*vj6~M&h+#Xp5JfC`ZbFa8
z9EEnT;}{U6N?qU0<Z>38(dEVm;B>4NSw2S}xISy5TXq~vekzkcqfKbdyK0ax|IYlF
zA+UR-9tef!xWV6t;HOF~G+O(iS*bUFewHnnF0rN~ozt-7^=tekeuUwFF9OpkDkN&^
zWzfvL0lAk5R`tEZfU!ATV9q@#&h&x!$)1>D_zN`Wl)=WW`lPOh@dH*)lWcsMgd-I!
zNd02QzBqA;d-`0L<fW+7Uw2;NteAUv>aHny*BXU*Ask%p=#uh@?P$TC`>mz!P$-iN
zN3wLN)Klg@x5!5E_-@fg6-Dx|wJeEZ=aoZRHy{i9c)_XDoN0#<9gxw&*r%beGK!$T
zuLR@5VsJ?AH)9%9Vez6o+`E%;j+oCZ^hqO5n{k18XmqITZeR4-azrvC%%9yS@_0WZ
z7rgpJiTGYNrR7t4xibbfjMZoYU*?8@)v*9Hlga1h<DSBxwkeI-oPgu@3&<4hW8fL}
zh+9;f1{*xuFep`v3N2+J;<r1rO=lj^V;i7<-V>}nI0^oCvZ1S(GkeTr0eSMe4#SIr
zIL}a4)LQQc`D<)(1!E51E*(xM9B9KQBE}%lFyOt<3<oPb4B<}qILkMT53wy>;@3A4
zs=^iN#y`V|&{GpePY#0|c9$DE8Vs9{IiaU5JF`{4fvLKTCl&HX<SfP9Mjx+YIZ>BX
z+l2AU$qJk%V@%!MkHU>L0+Mt4EZ)i1BWB&cd|87IsqdQsKJDI|ikCduQKd=TpT?s%
z)N>~pi|K{^eXwwThz4v|{M0iMM<)$~ltXtoqqFmv^ZXu)u3DqNg%nlGWc}*9o}9vk
z8|eDRkaw^Sf{d0-Sh@BM7C$UQ`w3e3Mbg3e$t&T(1Lk;3VUF03?Wmbt1i?nVP`}rQ
z6NgQZ_^P$y#}7yGL#u$8)tnVgxF5*4<B^iudQG&Fngky#S@xH`yH`pWKVr#v$Y7j3
zvHcy%OsYW+uVlRS90NLb&lHq*JA<Wd?bx23hTWIh+2hY?uzX`qFT^I}z>SOGS*b!d
zb+Wyd>q$_r4aZZ51*9>+l-|x&CpP*@r1NkJbkx4VD3)lMA-^2E&0k_#j}ko>(FI*^
zYq{mu9^nT!HG0VS8Cbs@!1hDCQQ)9KJA}I6b9NM;6jTWf3$KA8+oOJKeFNuTFmFVf
zfTkR_hRiAQL^srm97zyT&zZ|WWgVM|47EZk^Mo!eFd>&pjp<MKkyt(XKA+z|f|JP|
zMqZw>CK+2Y8OvcKx6MSE4DIX!_Z6y8sPzJGFoS6Q;2OqXv7nc(8WPdomk|Hy6}TPk
z;YX~|AXDU|XiD)>-f{I0oWcB5uKwFZ;fLiw{q;>qy?+|it~0N!NgB7|%SiMP)j%%0
z&psNuf%98Wp`Ymm__TWm7A<Q7N3IK22g#F^ju@<79s|2h$rC%Nn~-~Z3+$FY2aUZQ
zSh#d8I!%%zD*|-MkpDbv89a&+b<bGmfX%0m9FVAKU&i9G)-Wenfq2RbG2_-X5W0;*
z`z_D$&L|5qSwn@yS3iOJezs>bDufKXe%M)*0sbo4Xk#eL`V)nCuiuDN-F}bh8~1`>
zohmj>4FPdPii_)^8xlDtqDj#kMwg4y&|v;TG~z1JV*3+lE6)<`+G$Mc$A3Vp3v!@%
z>H;R(UjVQE&)^>`OGX)L(oLg!@VHGQ%JC;qkmFWTKDZpRhPUColZtffIxBkUuqh2}
zdw@=24HE7g2K#01p{m{z81ODdM>&=kEik6NPY&W^p%HadjtB97TR2~Rci3Fl#NKl^
z(N$I?I@oGStAs4i_c)Wc9y@|=ydX^`Dh{K&1INRG)BsSEJB<lbPs78o=ZsMsfEgQW
zz;DepezPzOQh#;9m?Nh2yTx-9^q6z5H&gg3){R+d%sj>^vQ%Zi5;4kALtp)J{?#5$
zGDd0yNe`X{dn=k?T6!)tslMRbPyWF}hK95_qyiO%jBm^{m|3zWY@K!h5Bkf}MxDP{
zhDwJ_|5SqEJ5q6!o;Imcz6W+vJ)k#v5T&~>K-5!5Y#Cn)%F{ohV8U4#-17+opFYHd
z-ZQ+Ei4G~0T>_^j#9)PQFf@e?qvD(M`5CVa>E_egbin=#rrf?L`SAJ*Iv34{&Kvh}
z(y-g$S9KNh&%5$Fll$>)Qxh)NV_vlCi=6vmZ8+3?7up+4sN-8P)=#&E`W+g)y-Nwt
z20c_DDRe0x@PfwBI?UP=fL5!{;(}f)8u!GMX3kY1wpIOn&H584)N%wpzsp>Doepgz
zVIbC8Aga9Uj4_Ncx+!=#RoQ(PM_-S_%i0>mZHyY-d8CKUAEM#oSX<h?eL0$qDdpUV
z6pgVoCh1}N@XCetww4cx^6Z85&bb2QS+*-<>O>5vGzA=g3~or2$+*`#L>w=K1t-k#
z-P$9lnN$i<jv<gf+5vxE(<2L47!k*PRj_DH1XiWr1uPy33wH^L_xek`pI|LF<lYbG
zZ{5Zz59+{nEYA-gZ9!z4d)WPW5#(7vf|v?(lJyn9Oy3CF<*vaOYiaQG?cjI5egw+3
z5zwOk6%HJ&!^k{y5^j*gEoFVWb>ut>fBxp$8nPwzD_uB)plAp@)`nN_u{-j@bvPn!
z1dX*jgv&4g#@%e*XEx!xBqm*s%+xod5u;-u{O`A{tCfx6AD;1hZ?8rl<+J=xt0U-}
zTft}DE#Xtv1@na|^O?I?hCGppgpwmHcO)f)f&Z{9(Y$9axv_bmqZ$qY%aeGcP2*79
z<He2L&b-&Y0@AcX0~@<?F~fEo>oK2zSC84;scO*KH2ygAI2L1%n-3(lCF1F6JpMbK
zvB-{&!C_UVv@g{Oy7J`V?t}lZclB?azTb#U*))~q%l`(u=N{<2?H%XD93id=D`1++
zRtPzw089OtYtmu`CN=5PuY;MWJntHWj5&=$msfoLiCtXY{Va&f+YM{|%*X~W8^)m>
zPRrlBa#I=4>c*S%IPSwme3khheqC@3mu{4y_t&>!@cPd%>mN(nlWB&6iF;kvxSxmO
zh_#S%t&y`W)uJJyzaZS?FIZkToL)E7r%T&Qq4HxB<~zHwt~L+Rqn2TQ&l-NV+W=&<
zpL+(j^DibphLxdDL98FgHEy%V|2cuGG_|Sb`&MY0@rzT;It_ST6Lx?2&fcjf;Ku|V
z(*1~W8bgI(r*It{Th(Yn<!in^=?QQDp#jsiWnjFiEUB^j0xy>tlJAuoWPG9-eZpq5
zgRS}K`m{zO_hbaoON-z;mTzHPq-&yzuLH<e7NSXX9fXG_Ls#TcIHO=nS2=5wA4~6I
z#A6}#AE`@n_o>5^&{H6I$}$^|bx^S+iW~BN2!531>#O8>@r4zV*Ua<cES1Ok7KcN1
zWf+dQU_tTMTpTxn`Iwr@IDN*%a{Zdkd5^fmc^A0hnk_Z3;8F(kF^*h*=tS6A)&Q}`
zJE4}jpo|X0L&C%qUR`${E<5=gQzS#2=Du3MGjpIVze{xOT^24nehiM9T96dG%OZ7=
zCN-L{5~3eG#!+mCTE1W`=s2_4uy3A7D$s@$b=G0a)F?3hTb?wXKgN&!xd@fwb*Spd
z5by|Rb7b{a@MQmv<NE;!@Qi^MYNgm--vbE;r9jJa78JgIgJw%RCFg#>f#{b)44NfF
zs{C(b*c@3pI@})W&ua07U$3E>g%z3c;RNJN`3T~<-?<{_g53+>pjIH`r0mou=k~vb
z-mS)TcjYsjH1aL_T{psi;*E%x(>LrF>k@-9meshV2wr=hfKObP=u6Nh5T}MQM$c_-
z{8AygH=i-0fW4<Hreo}|RaoATDDwS!0j{#_;fC`+p*bJ|^<COAYV=q1TgY?b*roiw
zlUJE%t&dlgKZ$$4nNlzI+@-(T0OBp5BsCoo7;9okv&KY%o<GZy{xP6qhECu>n;C-d
z0-UE9g*{8IF-|vurZws`y3&rj?l_$M{N9hdZ!#~<0>;z-SqTTnhT`^<8kBx#JG9n9
zh*>R9yDs(f{>@6{+Sob_daFt+E*8Mj51(<_IC+v#G>F@{<Cwc@2utfrU|yCqK}~ZS
z>1Ra}7-Md6u?@9fnab5HegWA{ilm?i;lQZ1pyn(jd0slysZNbKO+5H4&xhQx0U@zB
zvE)7f)#qHTr|@B{i}&S(4#v)O!qGDeu{zZY)~pT#i|wazhHNb!$n^xX@=QrwMF?zP
zl?W>wZOF;QS19;7-DQL62Rv<{M{l1sB=(o@@ZwvYlDF})MDgP>Jn)wT6fWI?lL~G^
z_r()n7IqdAz6x+#-~blbw?an20feM9a1$r<_6^#+eM&5sJ*Ef)F4)0u1$83)+{yJG
zW<4pLHQ2C`LiM>`F5y8iV-EF+#w6;Jd!znfg-ay7nCpc-ZQjse`4X7jm1X_fc;}IC
zd5`tYa7)UH7@jdCy(_Q64wgv^KUmKt^<2b%UdfWSCINRP+>~zZ%)s7=tN4fQN+N!2
zXH2q3n04<hzvw6HT<hmau1~Whe!_i#l^bw}YaPx%6pLm~YH+qxhIq;r^8;OtoVl(w
zv2`rP(vPf1AGw8f#tmuwrMHk`p9o&8`(2SU097YX;_MUcV5y}<On;W4XCd>Ex`r|q
z`f!@Jy8$Qg>tTYg6m{!41(nwrvq(#c&6zIpvl*ArS6vDm55LAkeZy&=aVFLm)$nh0
z^l9*l%lJB+d15bBfo5YJ*pHpat((<@1*c!4`>13n-u<0#t9~LX?{XLInyE+4Ec&?3
zn{{ZRx;NyzZ{rKfGU5L7CfNDp0|<uam#oXOrY_AR=$0M1m^*X|9QQb3irg>GVMPs?
zM{CfwW&1$4(wGW#zHn29CSsFY8az<Cg>7}4*?GDf0*uF?{%|8IVJ-*h!+&t4P8(cO
zx`SPU5MEDsoO@Q1fSMI!aK}0czFJX<b}p@8>ZVJ&9aFLKof6H`s^C856hWPXI!P^c
zL!%|Kbn~?`=xcW7B|PgF#B7kbYCe|iJ5_-Uass@r28m@I)wUBVY(D=I+wAsY)V_P{
zS=ooacNtH+;tcz<WX_h2Gx(vnBD5{&=e_Ez(ATaHub<K+irQMpQ(3ZWnhX*CUd6dx
z(W4Jpwx;6dXNbO1#+_ZEPNdae!i&A5(JZ!w+ryX^wwtG-zdv(7v|Qkf4$g+Qk<3>o
zw^bC@#@qw*d(ky+39q;;0w(Q!1k#@`!Seg&H0j(85Qim7>L<72t`R3NTwVraV_osN
zgq;Z<GuC#xF5i|UO9!Sihk0lby6+oK>!j3))mZi&p&&)ycPo;yKT9B{fSseR6o?WO
zM9i~!kxL1(qW?*cpy$2VyOw>2UoI-b>aU?F82Bm4l@Gx6jEQ>F$CB8~@8{><2}57w
zb{vAAXe%=wn#$ku33pHMPlDQU{c97N?>`kpiM|-IWd<DCDN7=m2P|kDJBu_jCtHIq
z?>B!NZ_jF@fx?rpCNL4NzDY#E>bWJ=5%b_gZ9i6|s?s(O3#gl=Mu)b)g*DF25R+j^
zELMfXh;}1#ajhZ=9r%FH5>#M^I0+}&#<Kpu3s*dGKHC4Y7R_cn5%vA{;^a?c!C==?
z)OU~~o{ygJu48gU1#d6nIMzwC@0-P?c<AEcbxO3Uc{<M2*Q9!1*d4S}A5?ROQ9G9D
zuYRus7j0F@Y{UI1xTGs8*Ef}fPg{>S+Vm+V_Tt4ur_kse^HFc&Fk!Pl2xhZwh*BN%
zPtQkj#Q}+*W*0XtZX@o}Da7u*t&raq#<y>*2I=vJRL?jOmgS#9;eBN^jY-G;A$>A!
zhcb3+ocMq4r^y4xKb0#$Z?_|ytY9kU=Lc{l7c5A^tQh9B{f#bOEK8gh#CfQkg1=M^
zNJ>wu=!IkmyzwNgE0qJCF)v`xv;?%>eTA=3xd+RWm?zrx6c@x?$+del@kj3~v=thl
z=1ayl`};H|cPNuPyG9T}oTJ2V(|xX@{R1cu?7+CMKDhM2BXA9Q#VxuPh8}+&gSVB}
z|K}v5Rj=~GC=OF+l*5_?Gn$x2K>9-obYv=_+s%8JyilFa2!DivFFwwS*BC4GtTb6@
zZB9I^j4;}@ii<w(#KlLH!u@S!5SL<xYhHeZ^87+kpOF?;-VVm0%W>$Q--uQ$!!_Jl
zmsC7$2A$t;LB&m;G$&ldFDo)2<KjcCP6~n^emBa0bjJBx1;qKuE%cwON?S@RKvtmx
z6BUwi%Ca-8cdQB;UCcR+6Hy%M$HiMB{QN5m_zAkCuUW_~UpN={4(-ILQ>}^B@dyx3
z@aKvvv-v9nD&)zq78FQsZ~+gSd6&_zS(k%y3q8WnYjq4lnK?`?GNId&^yv{-0f`>-
zFZdnI=XY-D1+#yWBs*7WQP;m(_|=W27*;n4Q=7urp7{Y@Sn?bFf**4WG!H`g=>WJ^
z$G$6n%HsCKLD=h253{^%*!({TkntSf23r#AhZ0!8?)SBG*`6};6*{l{#+iKm40=lD
z7}fO{K3y3>%jfH(>wjChP~Mn&ElfvgUDl(CIR}AjSgw0a56W3|pg{c&Uwv~G-cA;h
zrW4z6q^m7?+GI*zqzq!pv7fm8X$&4JxrQo3IyC2n9d`G%G6&9(OT#ku{T1DgExRir
zy~=^j1SxW77*~tUd2|8=^ib{?+-jjl0~v4F=KV)p>#j=$Ev}q!&MD}cahgB=yc_3B
zS&-rz!)cXU3QSnsg$GT8aNlz+GGh<RFr1d69?M(7IlO>#Z*oAlv>Yz$=OpknuI7fy
zCZdpQ;}T2t*&SC6T&E977C2kOiQ^RGv*O{Y#F*OEeubMM24vEgBd|B`55ACBq%&UL
z#WRx<@SFY%xWKaJ-ebctZ~PD(dMrb~L<w-gxYg*pW)%pp+49~!YeDz2A#n;cCgJkM
z{Oo%<P`}?E!e&2$&+p!2<7W>roB39fa&{dzm}~~c_ipj(HPzrUO`iB{tV8ih2T|&a
zBIM-7=r$ZViP}d<m|;%k7clYAJS9@~p&PUpn-P=cUEsC34(dN#<`&+$fPObsF=8>`
zI<Yyb*WAD)m)FqTtxR&8<5-qxA>YxTjNjP)Xo-a^sns}!2j7e!wvX<yZmkOWHn|Jl
zt+1qLI*dsCp0DUCy&2nwJpsRX55C&N57WF<$hi$g=-taWj$!wqJ(^;BxGc>z(ZW&F
z!|=aF%49qJ3ZBP1vBo?f90ph(y0I628tuYT|6<(DG7VqKb#ZXs4=mmE02(8+z)z5e
z-K>KaF=rb5np41X$`?6Rr>S7acxe263sSV~Fk_r`;)bai=xQCrubS|SP5ffvskT0O
zoOle~G=_Mw#?KPl(~+?BVi77|ihv&z(@<FCftz!fyE;OH&en^8S8P8mh82>yq)?33
zOXmA-RdP`#d010i2{Y%~(3V-JAp3(k^)plAv#QLn{N6H&&yGFZ#FgdPzQ&5W#~=n~
z^`nRDJGf!4L(ARw!%W8JFaI6L`v~l~s>9D9Q1A|7o+V<^+1Kd0e=GOnIOEv$PvmAt
zThT<tsX)!GsjZ#>DqU3>o7020S|Nr1HVmhFUuCH0A|rUxS&7ZU;bdoq8RhwNSm4|R
zo-rqYYOp@?*#NYSdd?pnZA1MYE(gihGg$Y}eYALY9t_9YP+##d>VE1dG)5~xyysiI
z#W)+YLKKMbW(imSNsgKfzDHS`l~{h&h2PG0TTADK!-i`-=JdY?#SUGlQC@~V+CF^v
z%zUnQ>vQnlEkh?+Ou%)M=Rtl~1?O`j3LfuMB6jPf>0ic1r27C5yJOseFbCAPS#O}j
zW(lOc(}St8`IvF$Efzi<j;*>0*s<FLf|(DEPrQzG6J*G=*+LZ8dSHt1f<&mN25r45
z{8(K%TEC3r=luyrw{Z)YJLL`+B&S4<w;si3hb&;UcpWsd*=)5M8xybACmvsZ!=>vd
zu>S5J&Lpq{gvRf<u@;QWa!eUr?%u`4Th5?snxZJb`wcdU7^mz^5xSPl678y&qt-Xx
zV&5n&ZlS$^_VRaNTWT!^NcZxct1m#?&sxd*c1;>Ge<~he4!!a_hedr2b79ehuMn-D
z%nQy;#}OYi$*?d>QWPvhi>l@69A_J<c_<X*i;Ss#$|BzH*)b?(Y}(Q1Rbje^At`se
zDoI~_3D3l6kfcr3kmSUARqVZf?R78)crL-I%J;Bw-8y{1rGUIlF~rMW0^#*wPHzR}
zzR4-mWiwBqr&<A*B>4g_Hl~B?q#AyNo+_E~HVb{rrs3H$=H&HcUGiC*dB@{<ShsZw
zUS^yPXNj0MYc-_Nqh90Z&|r2qn8fSbs8Yej^)3;6BXQCQ1v<$&5qnS0#jqt^@X0U}
z+PdCyZF$kWuOA0R_ProbcqqB?T0rX01AJ}KBRVR@aG{s+#~kYTMadsQ@UD!n{`inN
zcx1V!yOc@q(KYy@vK+nj;<(-GRWRXCIQEq%!{&(#(9NfnU&t6cQ>0sA)MG<(@j1(Y
zwfAxjW%qGvf);7LdJgA%X;E^<f;#0|kxt)cxWHJEO%ZB1v?~!3>%71>@f#=Y&Nz2=
z8L)`!!|Kp2n6Og|ORqZP*FDzMTUmsfWw}r}B^I^^|AN688q{UdRZRJ%4#o0$j3>1n
zRfgTe+_|~naXucN6&^&BQ<dP~X-%H3J_tWT*k_$)#{JGzBx@ZMiI&QKZ0byf_xx|n
zm^lrN10G_#lLYEb;vjL_Y#2ayNF6FiLD))|z3~IMUQ(9$FsHKXx-;N&fQrh+PN2tW
zP^qmNtji}&cYbD^2IDl0@Kd4(_zbAe|AN-rr{k@bO1yF{9UY1%u)X3z^qeM3y-gB1
zd#`3bw1#osrKiG_oG`GOBt`$7Y(y4y4x@)w^@3~n9O!Ys1cg<(5TKn4#TEjb@Ldmm
zvWxks<ScC4`U7)h?C@Z|IUTJnk6{PC;Ot@7(5Qgzz-DL=&zEwze&{+@${vUK6%~*Z
z%lMWP_2?8qEkw_L$>+~$;`jIHkaPJDAhGB?l>Q}0vH265L3Q!h-7Kj1<2CkOZ2=L=
z$8q>S%(Kdz_XE?1Q<)EnWO0uo6=cV9WgYQYIVlExw<Ys!&Ixdg^#Hv(uV6@UHsGNs
zNVNPLj3NyneqRa}J6-0z9d2P%h7*SE6+?bTDn_Tc^D3uxsOP#MZsqQA`1*q}4G5eA
z>2i~B+g5YZt(pV_sw1HBUKBiIXHXT#Xtw7m;qUi7L5=JS*t*Y#Hbf;u+v;XX#9G!r
z%FKcT&nr;vqaHEUHlW}3ABE6oW+Z@RP-HeK5Q~wYv6H<&*4Gze>|yr1Uy+IB&mT%^
zlp>(7OTe8}lclDv$!IytfcQCI!<#QO$dc#Q)N4vM2->c2-45)1xyFOvox}2CzieT`
zip`MVC!p@D7lPh{5U#YHhfVd$Bwo#s?#_{c-dF>==Y2U2%%bS*l*sQo`5V)QOi8!Z
zQ&bE-!5!!+N1v0ql8d3*<bH}6wreIbf8$vw-{8kQjLlH$^M(D*>%;I@3Z(X-CPwdM
ze2dEwC@gd0?6<Jpy&G}aP;QG}U%rF=-m#ouUzH>!s7~~0`va&5D`1||V<lr}RDheR
z4UOOX0M;{4#=u)C`tm|0?z6X`c>|qb)_GKvm(T}jXhAE#>Os?`4!&ybCvbME<)(4d
zVO*wwZq#Hw{K|)1mJ{O@q@D!XYFQYYC&AoNsu=xX51)6e1iu_LrhS9Uc;!vys3D_3
zwv8@D#j<e_svnB$Sx0lr{o|Mue*-2hcEG}+(O`Ce5-;#db@r1}rFpeaKpDT`nbUVr
zHPnc?is$ncA!hV*A^WWCJ7LPkH=y0foMc5Zl(1|`Z$>c)|1zhUC*si2G!mco*I-+b
z6twOuK$BZ9KxaY&$gh;6QS0BLn@JfbZxIUhi{7*RP$hpa*ovGkD`a!I^;~v{7S&CX
zrwuO{Z|NUv=7)O>=YAx>jNtPqyVjE09tneDuL7=j-bI+WjXhH<ZAjT1L(=^Z1vAqJ
z5+9#^eDQ=-PSCVf60q@K@J-(W0`FLtY0U!ASu%p|Jl6>RYqC)2w;Iw<jEAvnB4F9}
z5meBZUE+QC3Obmv{n7Sj*rA|A!jETj;vy%>1q%bxX6V5Ss-#OU@6#a%8_h|Qe-|j{
zsM2eS!>N=+o+O!%MDg((E?pnQyr;P<<FA+Uf+I6weUt)CoF+{NVk$U?J4Zn9@(|zk
zsGX17IvaK?v4f^Z5$;@Y1J~E+k!gNL5HP)(Tbj(y?0@Ef=fC#+GcEScQqUtUmDgcO
zHtQ6nEkPq`A;vLBgtiRh3Zfycr%oWvM?so>5Ux4LgULVdp*D0Qo=%b@`9>r77x{_c
z{Y(h9c_y?+Q-=BK(pX-xTBPh63*u?_Me5sG$G@+FyB;kd(}xoAr%4YkI&ckrX*qWO
zU>xU~=iK%gk?5*nDk^^cl7AcZ4x^V|<ymu{?tYpKwGaP=R>>vI9wkSjGu^p~($+-l
z^(X8uuE#B3?&JKcmZafkD5SPCXTYHf{uvyAFA6WvYuX_Ui~InAV@w%KzS8Bu#}bU5
z$DBSZ1oZ9h(->&b3j14CNS4n9IJU%qWbTxr^V$R?J5PuHn`BNqw@+hSwSRac&uF}R
zUyV#NGo!Q8jmiD`%Q)toHJPhvO5C2`;s4qF8WU$~;lv^}a_{wBv<Upd?p<A2B8#kl
zu?g}+@{lT-5sMx5m=JgXHBXnr-{;<;sIv*jOWIK}=>k`}_a{s}X+;K9${DYOb$GhB
zabdc}Fl`fKQ5FXBdj7$@{rvy92D2W#DOHNK=014RsvE0a9`P?qIsAUdic~Ca!zlwl
zpz`1f@YE^d1bdx9XseC;JS<6vYAA%xQ6{qA)<MgLjX3u@<C<<sK&?dyC?6OKn@UZ{
z=rQL(+~mOxu+Jf-Z5VYf|HyUSyTZMnXhvS{lPBw5$HJb7OK6{JNmJE+!M5k-v>Tft
zqL1awBagxDzYWPop(@$C%?JI?WnoLuAoRDI(PvwRqmEh*T+A^bf@9}OPCwHkM_O%2
z>Zik)P*H|6qTEoRx3FX&Ada8v&Rq1SpK+zfRoJuk5=MPrinhu>dA+!CAe~qXcIBmz
zB~0Kdv(CWg_M;%^Im5ZHN`_|!`uNyafozu$+^J@Te)sfvpD*l=xco8yh&emVZLCOr
zu{I}O_0^?ilM_m2N1|T;N#0ZPgS$E0ikx7(!_@7E&~@`TzTk*5UGUC~IyCNJvyciL
zQhWg^mZR9Ldjz~6Q-l7ZdTdN*JI_Z;FxN?mx><)qY2r25SY3nm?-pRaRSOrg(i0?R
zA0e1=)g~O94I5P6!MQcJz<Ycql#gr>)z4jmXGVr&_ZSDv`uT^mwQz%LXG8H_t^#Rb
zX954^r*U*e0(vFg1YyHAe$ifQ?6Gr&#aA-WxwnriKh2oL#m(4r(h=94Vl(vCix6i1
z1ow7K!`Y^F;JE1{bnj~eu|SVjA8F==qeJ;2gL<gUIs^{u&!OP0MACWV6`ngf0*3~_
z!-?g2DD|%{S$kHCh`gRM-i{*1i3V|B<t0q@+lP_Hn#6hF1uwm}48uP;!R&p>=-u~%
zGkQJ@!c!TiJM$DeRA+<AYgyv6${Z53>>y^k37MpD3T9o=BmX8AV9J4BiLK64E`RA1
zIQNT3*U%IE*m28HDae9ac^K1wo?20}O*%O7vNg@wa|*)^rla4e4BWHWn63g%(yFdb
zx-TAq#*q&&QU@_V&lLW4WZXUXTa1@2!Gf3Sw0hAKuDaG1?6ux;yJ9Y3*FAk4X(vxi
zWbfm;$bZ2u>^PWBwS}O9Q)sF95!LtBLXu@BWIR6w-P&n*Yq37*n_9rn**${PM#aI?
z^FcUPeFvTxFGb8eu5fKr6`;H=oAZ=4puro<F|#QT^+e^|k2CC?JS>6V^_}H>8Z(&h
zW~ju^*NB_<OPS=!4T1stJsNPIp~rYKD6$>?u4|U`@+>`)&CW>*%6HLotpa$?>EY8i
zjlmE$Ti@Vc0qULMkRACOv(7UfOimQKSEoRsiVS*(wQ|Zw*?s1NDOHXv0KuZaG5GO#
zEZ?(FvP5nK`DW4x`O{6Idpn?mT0hL{F(4Wj3`pfhIqDlN57EQ(xSGTu5bo0Ddna2`
zpWz8yaR4(SwM~ZJA9YaJ*om{_TR>6I43;&@lF`MCoz&UHy=>8=?Gx6ab5kIERXL0K
zZkn`9^o%#y{sV@p^C9c6aVTwA58w7r!g#m|W{rcKAG>#^{K%2SH}Z(u8f+I9j#9H(
z58>SzeEv<Dh}{N7KW3Yfme5=9b&G(!!Ov*F%99r?btpkYJ**jc1&w1<U|ApAkFfjl
z%EI4}y7&&%PaY4S+zYTVR1e#B^om@kIY}-tc8cHk)zH&D9}aIdCiABn(22MIu-{7`
zmr`ee-3?U`IYX8RrIo1X?P9K#c{?hPFt*Jc1yuEJ$H*p@S2^2=!q6|=3U$UVxBnno
zF<G9v>`@_s4?aUvu`y%P8PR33dZfwlGIzXlGv1YvrZcx3#{ri*E?P|iL<hy_di}Bl
z59?6rnFH9T^${&s2<ew|8Q6G3NPRj@`2^41kbmVdo+)F#@PK|~;y&86=OI7D4T1BR
zWN!D(hv=bD0pSr}c{_&&P^<x1`TG%81o4oibBg;|eG>~!Rzd96Kj4tj0D&W_AUWTd
z2%?s_2!ihv$G(XM+Xr&sVr@;=WVGXSYg@9QND&?MI&hW0JP9$H1|#ze@x__9AhEp%
zJ};k0Y=;s+eAJv@Csd`&A3n!wQpm5lbOkdCu0!JvYkKa?0Lay;kePGXKKU4XH+ZuS
z&ecuOWGm)8AII^9S6K&9tsm8fw8%EoIJ|MkhWT=bpd#%71Ri(~8_F!GUJlQzPO8SG
zA>ZJ@h$)b@pn_}5929lAPXdYaBW$Qi1G^+2EPgf{2FrG_^HU0BiPpg_U0c@ml5lP_
zi}(ud(-7)tPNK#<hMn#SuzaBpbAGh(Dd#dIJD>OCgjzXpt=`Z5$2=L=HmZ`ZDkF$7
zn$rTSQt<03;5Vy2fM-fk7?~?i9JgM__$VFfr9yG*GD~`Eof>%+IGhN_F62%6rK!wf
zYl6k<C|2s{a#Lo2eMJBo92<e58P-IS5r*|Po6&8?NU-mDz}MYjdxM=O{KQ`)$o`g4
ze8#c<I`bUQ3ldPDX_=yo#ZgddwF}?Otz-99UEKRHk9nN5sA#eY^-pFD=9OPcQj{FH
zfaD17MHh2ZZCKA)H|OEQy^Jk%r=7R9O#mZ>NO+=Pg*$c6V#8oMW1;z>-<c?M)wYAY
z(pykIzE`w-xC}&3UVuGPLb^~GiY;gJK=?ry>%1e-)Y60$Ej@^RoCg0#)0)T?2jO#1
zS>oJhO$WSxVTBX(d#98G*M9-iEhgf|6~-k0UIN~4TaK2mw1}(4JkDZRC5TtO=Wnv?
z>f}^*20F`nHk~0baLEF?QV&4#gG4m39KymGcF<Q702^~PNL+*%|I@yQ2Nl@PcIFU9
z=a_SH=IOZPMH&8L+-%P=D>!jPy2N{FHea5tA^O2sv0qQ=lPcYMm=@xPul@B2nV?4_
z&MV>1RS_`eq%{e>Wkq)>+LEGOshHK-%*|(?Q~5jt?tBr;B`I4|=Y6-iwtjWc8Z#R%
zYb%mF+!M^zjfPKB?a-w$6WbaGMT$2JvA5<iD9?Gwcxmb2aG3qBR?2W2R^EfYjY=Sr
z&xa+Ol}Y184R|5>1ur%~2G^_oyx&_9@ANO*kwr0fp2-bRVl&h7ZhgtYHCp6R54*4a
z-XhVvei_~{x3-(Q06ZI<_#C@mcp%RSx80E=I*-f1$K04pJpTdLvRr5$a}l;|Eyi*s
z9~5s2hWgH0t{eMd-W4IqpE;91yjYPoUAx9d8I?oLYD-$5&GWrs%n7?a8KSZ~aMJQP
zn3yI@yu==8VH*h@KVqQe^E+@Jy$d?Nm}A@Rb{y#c!_^%83KKsvN4ew!$ZJ~AS!`yI
z7&!)SWg3!bk5;_7$bi1}VouLJOGRB{EMeyE5)2rW!+>xn@bJ*5{w<6TbXu9zs2NaK
zcK=UYs>Rs)pIxR@yn^Q&*lzQ)4#w%8!Ol-Q^l;`8T*9&3h3RS7ebx;sUkYf`YQ}N$
zY=xyk_rU&KCYLyzb#w=YfrIfC7%8nrn;#mI|E8+Zt)gAX%)Q*Q-yt}%LyD;0FhX;d
zvmB<RN8Ar}q21C#s9Z1}v}$Cb{<{w!u;mFS`!93noWF@DZn5{x!eYM5MT`w5E3omn
z3-d9r7Ns_nLw%$TZg?O|Jr`K>UbCO0BxnF4q%uLgKZIX6?l>B65|Z7^*W;)}HR7rE
z51iQ4j-MSHG5Yy&EcZA9+sEqAd_Ehl7wMDo2NsfaLnkaMZ9s8zpTu@rD_`!p0p}hy
zAcgDKV%CBKxMB+H1MFK3-)IK3Ka^lfT0ZBbdIcvdY7@cjM||6+sh}SF8Qe>isUSZ~
zq`Fi{t%x_2$J#=~X9Mu8-2+0G^|<nAIu2jYI%b*7;dP9$?7n<vv!P3T#AYeB^M8ip
zmI#UQvRL%I(8O1G8`H>!SZr_Y#GN}D!F87qzBRx9KZ?#gtj4bk!>4)FJddY2nKCt;
zcWpxEDIv)eLP9c!a1^P85EA+&Q4$hT=Uux=l7x^Xl{5)Ol2np!e}5V-SC>xjyZ2hp
zbKg*P_Y)_dLXcnj6i1c+gdQ>!ADw32xoeXpf_Xz-%OiesrDLku=Q|DTox1U;k`@VS
zo{95kCt$_yp%~v*4~dhXa`!KP#z2-sAN;4Dom-!B`YOsqaOz`@N5OvnE9)soIKSjQ
zIxq8yi*JZ5Qf)|5dn@exwh4U&=Ovp?n2@ZqHDHk6%;7(EG|0He3GDy48f;$$r)-gR
zVW+{kKFau3;c##IPRw|A6;51ZJfzheKQ~p2j8@elGd^609@983;p}|~eD93fwVyFj
zFGn)1XFg+CDoT!TX714JFjSau6<-!LqfuuC$n;szi}S^>JH8A@X*Ywn%pERh#XfGv
zmuKj~-NKw-@<foW;u_bZ1;XnW_!pyiESP3a*N^MN9LC9YSTi5x1g6AEUYaIbY(tY0
zJIL=em&BF?V8!5el-1mZqazHd<DxzMX!bl!SXhhRU);fenHfaCSqp6@wsh#ja@;dG
z5q5IfAU})UDSD-;(D?)}W*q5r3XFp)wxb=JZ0N*?hSZ6(rKzfEe8-R$glqEHzD|J%
z3~YG6T1C7&&YX_g5(>9R$`N}<=9hcSI5Cz_B=e4FlI#d`ob!V1qu)?h@5(@&8Jdmb
zE>xm{Vjg#B@-kfAXH8uc!XT7&pM1Y;;7$zFCHfJFa#FU8{`!)AZLE*n?0`<2Zox-+
zE4ug>;|;GL0k*yJbnW6p_>KOB#M%Vt-qZo7njd2DFeU7{yB+G)Ci6y&Ij8M<5Iug_
zQUlh>Q-8&r!BbSh@?|QFI`$e}H>YC6Gg+*UQs9DFH)54Z20)%2D6SlV5!;V~y#Fs$
zD+<D}A|WX^@B`t0z1%hbLg<}mLq=uwz=s8FW*com+xmAyS2uf3-K@dM!_v`7TbEw*
z6k`FACX*f;(TYrYT0VxN&tyyLCU_@Vm&m$9ll9T_*HtiGq)#HEE#Tb461<cqOGdLk
zr1(dOB%aL>g0_#~b*4muT52am%bL-c8?u-_*&WR%<X})N^J=_0frodqdDo5!(9a)%
zAT<R}O+OAImZ{>LE%Bf!^#D0lX>wAf2nTd(AhD@f)H&%X=q-7Rf`x^Wg+XT2f8S{?
zbiEASangzoc>IGa1)3y4?<fWhXwmvHFS$Wu)JVkqNG{hl5;o;p5rJrmB>B)gjBpR<
z9rvH&9g4HTXmK*Q?PvFowiMXCrW7|hv8-LlBVI6TMRrBecg*m)$PYZU#*BVN)~TpN
z%azP+A=`;_vovY<$<fI77|}11M=;pbnhG4OIJqUZL^{?7J4Vd}fqW3ZR_QimGS|Ar
z&==@<^&G#maTu2W9tFXltnj>n9nooPLZ4mhFld!0cgX(`2%BGV;^X`H(lI4)uaCX&
zi=|1|zl~t>&jsWx`*7g$5WJw6hVKoHiTPe5vgf`kjdwl|n)X^G6O-`e8$BBQW*PVj
zP5IWQWNwc2Fx;RXj<c`}-Cob<>%HFab*KNJM?@NTUaU*x3T%jGycE%XS&Bz%lX2^r
z1{gI`0{X9>;_%>8P+yhAl}B#p&(=#Iu^^Sp@~Z;3jk#R$oF>NOyeZl~ZZ+CjX_Au7
zEZebH%n96GU32FgfwZk`RyBJXSLLjYes&QUbk~nJh*|;}e<MM4ofHfdE7BP;e{j^A
zJ8-C33L(}XcdE!!@q_C*&e?;g+nO#ECxwcbcLM80id>GrfNq<jKz6a5?UNTu^y%<!
z)R$q*oN=}2OCO_Sw+px4m7Ur5dWz<}KY*Rfe&Y>8#vf4G%?ko^x$@9fj1whe{nTzw
ziZR%e+MIFeUFO{OGpAlg_0YYGWeF|DLD@Jt*s<~r#y;8!88_Xy4iyJJcaIGQocjun
z(_iE2Z5lLWhcR4cJ+c_VP3{}(YK}Yk0ah2<65n)X+T`nu|5Y;nPuOTaDK{F-lH>8+
z3vHS`^$Yg&YtS^CY!uFl<z{tnhC;Vb5M6l`f+V+~YRP?0<H7`fz~eeDn2`!+!w$jd
zf@Z7^Yl4XG)v$=|&ty|pLWi3o|H`Zv@=A?}%LN_ueEbjmo}Gggu89~Cd=Wlx)uP#J
zu0!s<P^g<T7=69J^3R4h;S{TnAl32(BYbS3@GQ%bDrn)Gf2_x^EXzF^cL;<3V;P^n
z30S3h9=u$;U|sbCJpP~-^k)Bna~js<Ud<bpLuOv=`DSE>;Zk%gdC9My`5b5Umg6$3
zYgqJf3#`8L1B25Kg2#1zddD%Bc?)HMG#e5>He)D{jOK!mT482fE@MVNfqdEfl5EE(
zT!6<RDEW7dU*2v&#V_Ka$!{Y#PQ1lc&7Fal-zw0$;RbZijv(AVd<5*<Wku($QzC-=
z(=Lr6=W&K>9K>uL%zH70RNL?4&`PI+xFikg4@+@F?IJ-C90~b##gft~k6_1Zbs~_R
zB9aWPgDXM0#CGg&Fz{h~zJK!c_D&-rJ9I8~92RmFDT8RS^-DCK)rq$pcOaUa#Lp8n
zsqc;x5^IxeT-7f})kDllr=l_aPR^kwbLV_<X1zDrlMvY_AoF~!$<zs&#M?`kF^{W7
zmET*yfY-retdr|DPEO>I`yJ}kp2Oge#w7L15onU^fp6KC)M(6gsQ6+;<x>X0t+!cX
zud|EIctd%;8NG0OgBk5!%)G5rSD;?s3F!Z=gwZC_@NNG9#K?zn7j3WMaIJC}t8PXd
zrOYuQyq-CjzhhN8@&P+S!L>_`<bLkP560S5*kni>GHTI<WxfS2(_QY%UqGX-ub^@9
zF~)7Y1V=8!VXUk-s68Bj;Iw)?w0Jb?IlM;eGyY(?;UK~dmTTOb%kR|v57y7VgK-{z
zc#koR16_R!UU=nTN`MV%Y72un&opjHS`rR4$HEPP9vK&K4i?5*kUyzk*leu{Z0CHz
zn(60Z@GgCt>T`oL7^#Fm%q&UJ*u{Ld#Q@hmzY#wCkRul*j6=P37S6eS4UVtNg6vDI
z2N=k5xAPLw{2R+xt+qwgv2KvReUrpz=27f#FoNzv#uNP21rENBn0Y)Erx@12m^HIN
z?@$|D^soZ|cM~yX4$F}=tmOxC3t(+J%To=R1wk7J(S2ElwA5XWzGXe`GDRV3I&|SI
z`IqS4{Tef8oMrAViEHrVSS~5=I6O0EbD$j-)R=unB`Q@=Iq)B5y!wdJAsIl~?xJ&K
z9?U85LGLCr%<$KygLhccn3tO%QmYW17VW|g+iZL;_yw=@zeC4hIZk?S6~1I$@qkI6
zQG76+cP$o@5c^4RDVE2z#%{PK&X5Sap7Fx+GopxuLd=r-0vQ3bATBZnaut$r|Eyv-
zzdadkMHS#YPsqI638>iLj3Y{3V77`C6<j(ZdhWa&6<4q>+g=4KmVT3yzxp@-lDWHj
zra!^@(qwMv{Y7BM&eK)<nV0W{gqt$xK6{oqKww@tq<6c6k<L&YufhBcb{)KDx)~jN
zRE<s!(xywB8PiZF+O>0A59%(pB;0vBGU%Q<sWL6%v!ATMZ4>0k4#pJMsw;y4-xx4h
zrU10SoXl6ZCes8fp)5ce2L3dFK=lc83JUoBpIR_~QG}#5^8&=K+sS5{vLtb1GHjTk
zLCc<G;M<|{<mqo&qRIiv@4JuFKB?lFkV<T*Z3XYMBO&U|5nMj(IXqdSK`-XpQaMA`
zi<$a^7Z+xT_^Y;5Wq2QY*e0P*-D*@%G$hhtKj7b7*8M!%g94o~qV0Pmc;cmy4B28!
z1m>Eeoi?99s{xrJAs?2HECGqrA2?!TOiK2h!YL<{p<}c(e6jujM(fYRslk(RST4)B
z=9*LMi#{OkV?SHLSMcnfo6xH4ibulCNqdPD?f90*g_LXo&^D)&{Mxbgl{b`Zh~$4W
zes=uidQ9;$BO6tggYC`>_*~5q@93SvM6-0>ZQKY>GmT}qJU@trwae0^r8N-Y<PU;p
z!Ej3ZKS-W78+ZF@()P8M^hnWHycL%Tz7opyHL-o>jOrZwbZh)jCqW$)0-tI~F53yn
z_)<GEL0Xx7IdTid1r<5HxrRj8aGOtD%DTH!2J~G|E{g7@z&XF?=$+&U=M_|m*T|pD
z5BOJ7>90f+OVqghU45bv14>jUV>5_Tj!3j#7Q#qHDKfzN=#hsM$?L~vbWGc5(1_Z~
zwT=qI{QH-=J1i%y7~ap<-^%81rL@AJ#{x1+bRVx=u_KDNJy0~bp5<(7L^It_;`pdg
z^ot$^GjC~8vsn@_%XP;?DiS;ybOPPCu+LP^C2nS+2@$_Ri6He-PWQ_*AnSbq`xw(E
zLGdQ`ooNF%7hi7v4SlM1<TH4@euRkzWn8@Lc=S)Z1o4ArqpJFONVV$cjy){IsO)@@
zp6`vbOg3QDsvqqCw<Tv#YzN~^9nbcO?gIayO)!?tV9JX%Szbns$e&>Ei^1#JnQ<N_
z^nFHC#fR83;~8`YE<uegWBBZ>OkQjf$%oE9j!TxSlS8Hf7`<dRypq+W8ybx1!^N_s
zC-pPmwQUA&In0=3)|WX`vslKNUL(oBZY*+Eu%vBXo{$vt8MX%3GCs!uIJ5a+gnuFT
ztVD}ABqf4#Gjp;2$;JMq8Q9Tl$?}GJBrxq6%qqDGZvzG}q}&A(Z{Cs6Luy3vOe&um
z)y;Rin9@YW*}U5xX-Nv}!Ib@-g{l*R;lNe}Qn~yy1o^Dul3h~Rxw}#l^Ykl!JgNxc
z&R<M;egtyV7;lu0;;t>S#d@_klumAip3*W5P!mH#*jSVg9Yph2YtRV!<9t|w9MK%2
zO6m_aOT4G(gO*<lgbZ_ss~)A;pK=jeHl9GU6iY1CXO2j>k(^)HNQ~9l3Ok4~X)V6W
zIb<tQ|0ox*zskHw_?_Po&UoM92ITEa11jCmau4=DIe}MT&J3$caJbz9PdkiB$E+rP
z{%}jezcVG<PukJI8ciCwG7Or-4x+jHEBw@V9R)$hMC+dxVObFeZtFg9Q6X<|?W}%K
zUVIHQmtDqE-8WD&vX%F}JBgccMvfXx^nt9OjMeBM;AY*Cf|~UQ!J=7<2;82d#+(rt
zdQ6TcHVxy_l2+k=GDf6>*Mw<ly_|L64yXy)hu8QdII0qX9_0#r&;}RI(A<RXzyF+d
zbE3G;KW%VtuO*yKR;S%pE<)F!>yTh?jdJ%9^LxxBE_?F0OSwv9$}q;2zuLi%nMpvU
z`vnS(SM#%PQJl)Wx1otYz?<<D|44VDxBdzEc*K}A{;{CJw*9=#T{E(M;T8z^{0Y*$
z4r2O;SSWsb3AoKHSL%8n1P%AHZ;eOzIxz>=jZh))8<oh#fs?RyW-cV^%;Ib{rlVpi
z<$A_H#H34xbQS**ix1VpO<yhIuDS&CUry!DEjWfAx?8#6nBRO)Nj0zgLzzUlYJvVZ
zmW^^w=EasDB?9+NuBrPcLcq)=7`4O+1v554Lgz8iI<x{yZ)c*r)F(6+ny~rJYOuKQ
z6cfL{l1%#;g7*)Gv*&FK>=E_Ci=zglqV5Vze9?zJdsNx(A^?lcbK%4}J#ws@?N^-<
z9gnDU38(JBGfO#oZ@eMYCxuJ$ZEPh*O_$Mjj|4&)r#vQc36~#S!<~y$A(GBI7*pU0
za^nSLio_G$HoQdZJWEIn@nxUW%jlEY52+cdxJyTuG`Z>1_wNnK{uz-F>zRp;FI2g6
zcZEc!co{U8uxI$}w~!rZ3~?O;{O~Hq*`6Yxfk8F6=C~Z4{<Rfjgu{6!r&frsS%bqv
zrD;~=6<p$`Ot!Mwhua2IoVAC&Q;kpa<Lo1Fcyc|u%w37U{2!sJ!)P$lKMdlvcU@0>
zF2+aIDrD`FHki5K0uGgy!&C1s!e0$L;=Z*V|1qxAGpxlY--IN6!2&3Ia2h&a7J<Kk
zC4Jt{`Xe#v@O08c9GHI)uGc?ioL)Utp2ovWPMP}Ob%a9^vq1F8fIjd#gVW{};*W`{
zG@p5koVI)fqp$~XsX~u93_A>%YfaNfPKV~X3dB)$CMWoJ6)%P|581`T;QNlvic?cy
zYBS4~_uppy@(RhQ{04}sdkNjY|G}2sYUHW28WrCECdxSehCgGcM!bUlz;X3;c=4<>
z_1&Kehb%TR$JtbFL7_X|tJk9w1Iw`2JrbYHQl;62AGj$pa&+>=Qru^3M*LUA@c4QV
zHJknxGnhZN=k_apkh?uO{pAvF-4_Sf?oGg<cjc+#B6W(x0OqH#y~WFiIP2~Z6lfYt
z<bVHx`8r?F@q;u?V}G_mqQbh92BKEW^Zf4GyI8272e*WsV79Ug^KUGKw=M;^N>z@=
z=?1`?n;LW+CWE$(5qW&hfQ&tT52LLuLHJINHy?f<*Uq~K8g<CaJ%5B%$9lQ?*M8hU
z$|jtip+Vd>*@|N3t%L7X&DcL|J2*la4n4aaJ^owEXXl0SZr`gVyO<YEaNUrG-}wiv
zTfU;RmoZuNmNCq9Hlbzz3`}SU#E5GL!N6S!-c7jAI3lsot7}dcWn0kH<#XW)>%Mjz
z?dDn;dnn8H74u6-;Ff+QcCjvLuJ&J;bNV3j=jr01hpJ%rOqoo%!u(f8`j}Y%R3sko
z<i@|Xqs^0S$nP3+5|#HE9QT~%#2TJ#p7NR(Gggs7Za?QITmd?Da#Ud#^F_32Lh}3|
zh)KW1pBZSwne}FLhR+qKQ3(aT9VS%aE_+{l@0GMUnK2*kHIU>rfXe|T44#|CPy06>
zJY#uSB*V@+jE_DmLjx^pOv#MSYT$P;M~i3`);!z`$BX|$bqZrUh8x51%3g3g@mi9)
z<Pv1%vN=@uRnB*n0UvWrm$$q>6!ovGlHk4aG~TZgi(9*)){?P!R>ezBy$b_5GadS~
zPlkN*V7{3C54_vmP)T6u74Um(3gVTjk|L*}(0VY8`!1FvrNxFcb?OMX)wBr*)=RLk
z;x~jp_zuaP-b^Qc1UvK>Vwoz3)0(2W?%QX;@fqV?uhhdm{Z`b|D+G<^euvv1<tQn(
zB>(pz1$BBr$C?-XTPsa!zf>6=xqWEB`(V?GGmvpmg?A6w2ep}UB;v>?F1S_)@>S%R
zcjh6K{`Uq7do<{<xtB01@F}=dHgP`s?Vxrq8>{UXqQSyC#-}mBEKwu6TiMZp5ex9*
zykpRfLOOi&3s|+a9t@1)@%f_5(DU~iY#c6t<?k0^e)xZ&^JE8PuS$bA?bop>O$w&1
zAK=WN-o}AXhap8*nPi6kVtnRfI3eU2`u-iv#aP+$C$pIM(ES~HZf-=cYab!py#)nN
z$8f98d<B=WrZD)L44tHCOgjHOf+6#i=(fvM=y<jcT~Y=^hOH{T+s5v|#f+)(Xfwu(
zB_PPnmv|k2j>-udv~?fj$9_M_WsF<LSE<W04qrHCmqqh`D=lgEl1NajJdCo{n=x;y
zIq{Tz0D{*iU7tJsV|RhakaX!Yq>BGzcQ1Bc+BcSQvtDB0k$VuZ=qpr@8UXjFmehXL
zF-{ba$>wfR@Y&Cb+?G7Sjho+KM^Z8uCI24YObkQs>nixhpcp-y&!Tv5AOCQu4Oty0
zMLO&=xu7vq@hD1@<U&n)RuTvNd=q+jvn5R|yD3Wf{sFyYv*EK?iCSzJL?Zbk_*zbb
z=(McFa@TX9eNLI`ZlE}qWio~9JTOGV5U!A?2<(|OtuPk8G@OL(O}n61!;F-j%Ewl#
zF=%~j0o0#WfkAg!mp{#vZ}Z;8Itd3j#n!95hT1qzylbgw^_kafuAI(iFVdtQ2|U^u
z$x>CDOw?xG=fHy{5Fq`8^`|t^(Z_}x*VP7vgWuqR9h&6QL@OFi2gCcB_GFfVDlOM_
z<|k!{F)2QbJ<m)aH0Ly&J^ukb)#Rz$o10wB#~<AO<(I*JzX>$2mZrsV-xxQt9JJ0I
zhw9YN5MpT!cUg93sY(n?m@P%ho|-}OJ5xGpd=c!nVE25#A?O|bmum^U3m%;_FgtV|
zc(Xm-hLZ+l^F`+T{KMRCRsDEKI34Vhvam>Bizbd%gN<AlUUMtQ9^L}Aj_<@u_D<2u
zwWKo`12=!Gzoa1Bie%0&f@>KaEN^E+BZ_bFZIjsh>V_`jzKsC2`@rkSRcN!Hh7FYk
z5I6NU7i%5?_NVK)?R)*u>_2w58NoUfgH@pPRW&F(6fz&SE3VqYx(@y_bhNB0@o-%T
zb0qzk{Ph&-#dLrYZ%oBPPhPfAz<je~A%8-P$j|c(R+fH)XKv~Eb&LhsJ=cJ$?YxWv
z>1jDD-X>v5;1P)XoCx}#k}%e30L8mcz@Cm>c%p^vK!@0p4XjhSYTQp~;^Oht%}h*9
zR-i#aZ_wjw4PVlBoqzK@2a^VA)52ZPAiuPh&j<r7kk+CZ*Il6f)^Yr|L5AktJ%(AQ
zpQ1qlkMbkxz}fgP3J0y@)~!(^F8kl|`L~5oy5$dmbuv2js8gdyuW>~nbD|XJQDM9$
zpV8<8$&;o+%(Ew4##6!-jF%w|?eTE>wG<I2x8_90f5Hr7O**`n`FQ(2Va)ImT*Z^M
zFuzNcCdRe$UutVWvA+ageM!M8^<uti9b@zM%?HO3{d|i%V{NZK1zVmO5a~8`YFw;9
z+eYt%;^7}y=24lm5AudtN9|c=Ob+sn7UP;xwq(KCK**@mWxM+4@aph+&}cN`GR6$y
z;Zh`YA8v<nrwZY!k_0s>3OPa7B-h9WMdG^S0!p*YjK^CkE-Xipz#k8I&bqm47?0TS
z<Q1HeTm@79-o=TwEs(O)n9L-G<n7HT=q5B3^(tr+g{opKu#_PmSbj=tm^3|XcnSsM
z!z4?eorar3?b#V7o_Tzuq2*5}PP*ENWkEyWXzM>5ye$V`PO~8Qhyi}nFGIraO=av3
zFW9faVX_nSDlvD*t!@~Q?}16bEXd80|4>lV=aS95TFJSSFi34ZztPkfvwY5h=XQ3U
zla=5$=6u=IB12YpXpk?VFHxYdOO$VLOJZMYLIb&v=x%O;g}-LvA!kQuTBd|EKK=mf
zSK2U_ahpc1&xAdbR9V;0hP2*hPRYIO9isXP{l-;b-?<9ta!7~7zC1P$os7$fF)a(<
z&bSE?u+3DRX!J~kT*e|A5PXIGJ+jn|bwDgwPs-RphD1+x!kL%Yd~|g`c;`RIq2IPJ
zp7LTeyL$-VUmZj`bj5soJKIfGtmnNyyvMXPqtN#JBN%i4Bz`YZB3@OAj8!}sgFk0-
zzlb^+XYm>g?BlunS#rehhbwgHx`3NQIyZ%~H`t5s62X*k7iG~2Y(1h*3m7}XdPoo`
z?pFl2&A@xG&a?Q@P%dZ`o0GpBjRL(!en2)ALiN^S*Iy|V%C7?tpN-tJ40-bW_ZK)N
zHyzz9cAy!@@~sKooa3dXyk}4tEKQS!7jdj}RxQm50_M3YUu%SGPIth1a2GDju_b<Q
zOK|J5WT<1@;oR{uRJ=|{B4FLHpyfJTnoR@fgxFxyRV~&lz6LgY9{P?oWINqP^fqkf
z&boDA=W02c?_7d)BM-yE?b-xf)tNJ@iT9KgfO^FdjE|{-%u#zW<F_4_=l<f8kKF|K
za90T3+zbQn3&Bx)I7n8uVpHxUF!&hBOC%xq<;5$I7XD&<q5G~^6<Tp#h9McA(FM(n
z*X0*B9)_HFfn|A9K>A+>2$~+do-;9}8nK?dihmB)usrXW!ll@t_8tx$-Ho#BEPT((
zoF>8wZtB@=Y~kMlYks<#3DfcYg0mRDLz)hoY(zXv^Z7Xwet||<0%shdN1hn4yke3y
z?WnM#Ee}Va%hK;$Q-F{PR9wIyu#bE5kM$*WAAs4ccFg_!2=m1g`MzHdL0=*uQ8(GW
zT(&HysAwX*JEu?Ubw!-xcL~3HAIm0B5n=YNncz6;4}WQqDml7djfmF|<3_MdRIB`K
zhy}zvUoGnC+yG;OZ^FGZhS*fX*s}l9!Q*~B|7ov~?aGv><Bw)OBk&SeeWe5YR9=An
z+LL^=(mV)1ZG{1)2f<`hFj%%ehfz-$4@096gO7&7p{zid$aorG?*D`jCs-a~)l1jL
zVM3B&kj1%%sZq7HWsJ$x$lPxnr?~ANr|W7!d{rf)s^~b*e`ONy=i!PgFIo`iyRx)l
z@?l78YGgh1sgeb!d~uBK1hn_Gq0;hqKx3?!3*XfUYI$G4?BZ=G{5Jx9w^%}@b{0Gs
zk%PiN6JgOgU8;7e6do~l#oJ%%w7=sB<_(h~t)U^{G@}T1PCf-QTLokd>x{yG`yiR_
zg(le+)@8RP`P*+}?G{CvczzAnp#B*0uifA^7*E~t%mv=iBnqV+YFYpC2cK`1%m>bW
z1C9T*N!PY!mYryX743N_9D72vc9#S8Z3%|}*4=5?$^QOpe;)7mVc+;#SYND0zU{vQ
zL)g#Omm<cVQBvXlJEcO3zp?Mh93cvZWl1t}qp>C73%XAJje-*!ImLCZP|fDd<tsUG
zyr9iE(~9KZSXq*BY97Q}*i#LbsS92B6DGLnlld;m*q><%2X|dTI#!w7JHH&i&@&Kf
zb{Cu`zk|0mvNT`eqG<E&U+6e%4`W^3KsR4U5jy&T7Gtq)sxToTqaUz4_#*yy_c^xr
zih<7;a?yCFJYB<lY;N95MJcnDXq2T5{qAc`Q_ryZkGc|F*ZTmUjjTnD<u7=-VOmsR
z<|Xn`{tZ)O^@)d~A8x<&AEf2xz;LO{;CrlA(mN#_1qI2HO(Slh_J54Uop%nCr0g-I
z;yMf$*Fi^i8t3@Qju#HOB5~6!lSFA9K(BkNP->z!+20b2g@w$2Tb~M>G;=X~uOm#?
z)gk#CSqAxKHi|c#kT{5AAz|lk95ULN=!JiR88(24gP-s^8&*NLU<lq?uf+1o4SYV^
zRrOb`WZX*`+RygUE|=My^IrtGIh_}E2i1T-n6pm!7pNTa4z99n`U0PE_`pL*hs`l1
zh8l{j`=d@FTZ{%;b%-nHV$c^yToT7RAd?s`e}gWrKBi1|F3AFcr8%}Pe8G?0dI3ZB
z%VWO7MoFK12^#!7!KJ*gCX+5d#kBE^sZ-y_-)DFJwg<W>u=EjSRJ(8o!!^l))4Axg
zwI31}j)x5YWb6#DX0wWCyqlK+y?8_s4|^z6H@y?0CEs;v;^l5hT=Gqr@`|x9mI+Dn
zpTlTu{Rj6qe1(#}Za#k#fsXx8xL6f?+V=@iu;wsd!Om>|ZIvf6uZ`$yC)OzoX$Ozr
zyZODVZHUe)Z#dO-76dI7qM(}3oG=?DZDWnW-*-4;Wh$_FToZV74Z$j*A?>@_g}Yb(
zL&28~l2O@+sgDzRlL8Yopk|OakNL!embmfiEa(wU0>`mGxqD~yVcK{BL?~Y6J-nas
zf4|GqNE;10it#X{BQqgWe<iw^ZQ&bS1=M8oB^<Rp9$M!r(x7#IC^)bnr$Ux_LDt0Z
zEoZtwOST4Dq@O`go-Eqiu7h8^kPNfZBR^9tiM8}W*1=5!!M^*hpN`6qJ2z}t-s?Er
zQ)AA}EvF@SLp8{STMA^H*K0VsPKU(pyv_-Nbe#p_$)ZrD9PnX%lpgoFFsCgO{NJYV
zj^Rq6ERzQ-wN0s$TPuj?yv>PfQl$%hrRgjAPE2$x=SRHfQSy5+W??l<`EM!O9n>b}
zGky7ijcU|BG@tE|3ee+p02grLAm*n{k(8!XV!qWeQPbKJFfsEw*vkFF>bL2jdsvBB
z7TeN{G!0C$`U@#J@+2B~EKD_}MYBavacBsF>P~D~@d_+sOTqEb2wtoAIXrKC2L>7%
zST3c5H8xXl`ie9heeVWy4eHRUJ+7b^;)%x3HHpqQ=8M>*%Bd|##iHy5FlLtlmQB#5
z!l)Y5vMhq~@{RoL6dAJb@po*v_8Hd<R$_UsrEntLg2<<;Q$LoK?8><T9&RmM<^u_2
z3f@7IM=4Y+8Ha%r|KJ8L2gR$0^CGDn$YBh@JvJ|)T)_deuU$aNZ3@d+4?ep`g)T@u
z0gmeHabJ-v>41lvz4T=8JuMFjlhpt3afUlB%A__>ju`D=`){isxRG%J#q99&x!95_
zFb9QT^jM7cWX$ay$8z?~WBc{}1sHDd9$N-4LWkKMu#|aw-Hqa)e7G4d&wLBZGy0&%
z<sh2KJ0MBYqDyTzLhXPQ@s>~I#bG8~PkIJ_b)FpYT=yP=ewy$XH?PP1Um;){&f&J%
zxtJWKfc=ZDaCFZr{M`B!-|UuSXFvnq{D2NkRydEzE}`J`R|2_XKC>ClRLSUz)+9lP
zu@_IC=jX50r&n?q^LC+-Pn`WtvUOq(JZEmPR_QcOp(+N8zvqJPj_0_-Ef?KNlDMip
zU-%5Ahy3(G1~jLM?Fx!civ+LliI!*IhQtAV`o)rh4hewkAEn9ftsgLOeFdyt;fr%7
z=fdJDJG#7s_5U`06ZJ2a!NI*Old)Y3N4%-SlVK7Rq}x*VGf3~H4o9=AGW4Z#0k%yo
z!?CF`_+UVV)_?NmcY0)@k;yGk?9Jyg1WVa_e<YU~)C!K_tjlxvkZZS730!Erh=Kb~
zL-3+X<~o`H0{e5$Js+NPp}kY^XXI^m&Msq46Gb{Zstu<-Kgva2{e}$|m++vp1vzwc
z8???U<Dvp9z_Td_ezG1y{}q<|-du<E%8F1d$%OFFP0;VCi<7UK5T8zYYTrRQ$*L}x
z_B{+_U){rTqB`bROOiO1<-_B@$8cW6E$ohbj}Z}#ocHAm;IVfXzi5^XJ*FZ<<9cp!
z2JX}0Pkt2owmjrcDCrab!U)hB-vgiYq{y1{79@C!G-%yA45Z;Ks$64dHuGi}_u7Ug
zi(KGw+*|fDD@&C}Y(sN>De{Z;vHsnaqp8{+(3opN1T~GWuCuj>?YnvqtfXATv~OHV
z+E57C(23T~>KL>61!}f12hU_ZQt^vDGx8pReOD2`EU6LA^4Tm~3^3~WZRR;Y#yeIn
z;3j@$uNlKtAaBbueXFC{o+bwCRbF%XOE!yqnafGgB9}Ahh%PysZH<zlmAGfW1nOm4
z@zWXsl@c<q=Eli5g}DN<S)ZUtxD$`AlqQ$Q$kPMgWk`1BX&BD3u>)@_pmkdcSL#v^
zqnBxs%j&jtyP-S$^f}8qwn8Wxtb)?rm)Ly6jMlIC!(|LLLe;!v&~dVa{0;lqOe+J#
zXKncZUdhpzyfl8^Bx$-LPnJwHC<Z^bNC-W4AHwD;P+zAce&Un}*qp}h(CUomvvD%G
z$%yz){sa8omyc(={V;v16^#2~OyzHRV&VJmxV+&ZY;v}x*9Pvvz~d}vGd~2gS6dT_
zPA^m%Nm2P<Phiq^Rk}M^k$w)>qj<#_3~CPI)=7U+EuCd&Q`Sr7-cceWSx&rb!d;ve
zU(0=uQKC}I8!@2hF;2_VMYH~2sIbkHEFO}C!bm@8e;`egB5GmUvOm1v1r;1UF9r|G
z=+g}bGBom+7U^z(0)0Yf9P?~6bk6+(ug7YUrQKO5@bGZe7~jh`4i3kT3PXBoUO&c&
z0zeQqJv++eD!622@hvTt*sE_y!Vj_xiSIQS*pd%F7$fpWwh482J_s&z?qkgZY1lfg
z2I>pevdsNt6wE9ZWsE(+Ws*1$2tIMr&wD_AR44S7%22MrfP}AZ#^K|{IH5fp*K+CL
zo}h!fp0TXw=RAl{sl(Ry=Cr<dEMHSbVB{2KLfce{iLx5r*U+IMTISS%9*0*sMHqao
zhl|JtknlrMtbWJ!V6z(OA8tq!y$U#wNOMk_^{Ypjm4dGm@EW1<-0<H`a4+!!c<$^4
zheCwMfmT$C@$v27YSRx3UZ8le46NTPMch{eg4iU-)n%tMST6ntfz}Dw(*Fm{MjwTR
zD^Ftq^F6l)Kj6=`7?OD`E50;gDyU9e2lhGtIIV$FsID}k5m%bH3syqnf1?4rckwXx
zlnu>Tp3bZKtj0?F_po3XV+We$@*8rr$(RqO_$vJoteg~vb^HhHj7kE_#5~CQJ^*#U
zuA<>zJyKaz1*_T4FnRqYh;Um5|BM-fH-g7&Z!^Hl{to)i)#L=;jyW1Hwek0aAJ`wb
z8T`+rVByC?$XAV)m>nI0GL6P$c}o-gJEB99$^+3ip@F;dSefw!RZ(eX5?byk{eN$e
zhKefTRmelg{$Yfh2eSmWNju-r)(3lR`=QYGHaHqDfh5sAFdd;s{S4+qBTA91<e`|q
zGzZn?REcVhD&sjm!a~OWDl-^?vFmkd|6x5mIP4CdvUmnEht<gTv5Xls@jPGCI}*iS
zsuH&l4VW9xxIsN}B44&|w;a`v9(U(*ivOz7PhOLHpOgXrrID}>bx7`AU#z#*g36#s
z6!+ZD8Fti~IMigo_vL0pm1ZKZa}yV2-ojn1yLL<X6|AP&(eS<R(Zi|(BhEeNBPYE?
zwOa#FWcL@vR&PbGV%f8~r~_>Z45@eeAHMBY4@L*~VB4p`cw(X<$#M<CHHT$sM@R&(
zZB)$i#N%8C-Y`FE0Gnl-FXBe{$rC}+UQmmgi@yDf!Iy+^9(7Zp<6AW!*S-pTf5nN0
zCLh3>(b_bA*&V35c92(1WiyIPHli~(8H?WG8FMcT;1JnRwx@}~#LMe(a0l}x&9*1q
z8>2Dt%okDd&Td#f;}Hsq+p@Fe=0jwp7HO&tf#HX*!1RZ@v^5}x+cxhj4%(?n;>NSC
z$$MMgBtQp)G%xV!BP?k8Y9rRcJ_!c9ST?ds$T}FQ+^v*q*!YjJ={Hh7J;@Zs;S(Uu
z<}xVyXu~w0aPIe4E&BPd7Ol%Ega%)h*BJH+w3On(<<tgn^)11d={D4~_dJFqTf?OZ
zQskPF8x|fdLI2$GkpEJOOWYODSUK{%#4ZDbA`O~2c{V>|e<3KwH*f-7OMXec8X4VX
zP5cio0sT|vWE<;?)Q{i7O+5AkeaDT$wA#lY?wu@JtMv&ywqN1ICmk^Lj}iTsRlqtN
zfAGbsi#W>Y8q8`o17~fPgUL4}(c{x0A^j43n>-Tl)fqtjwx^Owj46|GvK0kJBlvZ8
zH_%{vEf;^_2_`F~p?bR<4c=Uc{-J~6Qg;~c)cT6)%rzct>I}!t@4((zb*ieQ3U0;+
zBnusliB#$!;``~Ir24BmE&9-l$@w?&UgQtF(HMinZ#@O)eM(g9beg-PYDwNuITG65
z3}T~+lElddTzxIaEmCH=^p|>gvHmZ1g}9@A+5hLKp3tft3mNm$c|q`+oP0Mw&T;WU
zE<JA~Xbq{s>Syf`<mkyu%HP7+JIo1sasW5J6F}^5TWX<KfM$!u5U#C8wVqm_SM^C)
zZzfIS=6>YwjJ6^dpIg9jVFHNn&6Av(_5^P>zrYo@rAcA$YZ!T5NRHjMBPYEq$&P)p
z^xe0+7-;<f?93nFLseT6-dn@?o;h6Q>i-~PeHTx1&tsbFN6@qWiDkNS@S=%g&D`xU
zbh0tpkH5w*u8zT&2g_l=lXc|Y89>O0XxzOYQT4wjT>iQjE!G*47_TRs*zf?vRLW8d
z*7MSgHKGNhSvHc^LQoVtZ!G-F^&dM0YtOxejLP+#>H!h@ni78Me&&d6mEz6jr$L3?
z7Kl+li#E@!Xzn8+ZQmDxeh#(hHa&rH$E|2xX8{D9V9%AdcarCMy%<wz%`KHXjnguY
z!-OU?lK+p0etxqiox6^~Rhj>A?br^SQpR>Z&6QA)YDNt9MT6oeGnm$VgsU1bp@nz%
zpsZ9aRy6E_#~Ybp+`<}nF3JY}R4l&8vn3_{T6F&^4wo%ZBI2-4SDlG6psVy9dxMNg
zu+A75=!*x9J!!mqdnky97w~fN*6iQ#f!;7}a(t*hJ-=O+sGC@lyZ%>D`0byh^Lzn}
zS27{xsdiL%h%xD_&IgmqMd;RJDw1~3f^WhGocAdi|3+yLiONe*d>f0thBDNRVU>EG
z$}(S&JVD1nQ1H@{NDteMRiVtg&F20Qo}V~3?Qf#3R;-6NSC(Y!*0BzyKYsHphDd!a
zlJ9qw^FDkY<02-*+~ayg@AUy3#}%LhiNb`4`*5aRhltZ3x@!EgrF(wNWv(=qD>=!y
zMH+$7e#D%lSE<m%fke)K-cN3Hq9R#)h(kgBqz6&?-5@Wi1baDa+B{c4gn@FP-p2Z*
zW2B%{`7X<2$l(~-J@`#}4x2N);R{@AG1>hjY#BU=_%Ju0ZT4#vykF{CKVKc3(qAzK
zT^XiGu?glvYdY;vKJWBB5G8iyuvWAT>)#v^%~_oVL83%nqA>!?uZ3~KiLZE_AZG~r
z9L43&^Wsc*8InhlHbl014SvefAdl^HFk35|_pSNI3sc!SK43h!T~UMmqwC<r3R618
zEf<7$^SKztGN~_j<Yt66!iGFG8n<W+jA~he(M?+5_i7RN{%I4{uk_^gvlVIZDIv@M
zmgB|9E5K>Ukj)blaZDzGvc)bCYViRgj%Rb7?4EftDiv2wDn#GiT@nq#oU=YL{F$5D
zH1XPOZckJb44rh0bq95+b(RjE_G9z$q2_ewh-aX)W+GfZYegzXFM(A7Q81KSjxQuK
z)V}N`zmGATbQTXn^Rhzp9&U{_ro$okAj=s_Zh>HV8no(J)16vxG2O!mGv>bKleeCL
z`eJ8JaB6){*9;?S>NJRm7#Ba*`Z^qozk~7-72qLmz}e;6<kp6V*uLJ3Y@PlUyC0~~
zdFRZj@WBm{v9AT$Uf6@QQjI?C*CW|?)No#LD@srK2w^WZD5@oaK=cJ8uS%0)N~$Dd
z`F=dUDHf{c0qbOWz`G;?QEtqFzStc2a=Hl|_8fr|lLisNClzQ-o(&6CEQw>zLQc?n
zC`a<K2quisr0$<WVbq-#h<TmNhg@BW`F|!#U}QX0CuM@x-{;WN{~cR122uY~9U5x#
zfX%Fr@<X>ILcv-+y0F=t=m=K8>?4dfHmwfVf6^s~+y}vJCr$c^-A}q~Zi25;5cgH2
zKnKs3C-Q?C%dIGl+p&#hF&4^`z{?fz$0Z#<?q=`)+%8w6B^mJQeK+%lq~!<>5Z7(9
z*^cV-Sl+Rvmm5AV6zdIBB@;%Nkp~wni1heS@Vp^Ky=GLQcX~8G+SY_P+6d_O$lVa~
zV-X}6-^Yh0s$^hn5h#1L!G&xg`C@;C!NEphfOY{ySYL%jr;p)DT{|LktsMnT^RsX6
zHKs!osxh&#OQb%-gk@JL%s42dU)60%f-o1V3$vjkuZ+uYp9KPw=dKRBkHNBkUD$K|
zEtak;hOPVS@%&#S8s~fp-~BbF8eY4gu|}F~z2t-QUKo+K3yQQuUK8bImFbbQ#&k~m
zLX?(Qqvysy!NrV+>g)E9_xWu~m!Xh!tTE>z7wD4_R@xNOBf#^07L-L=g0+Gf2>z@V
zY01<x_Nh84q{_7ATo$-hdr0Q*WKKxyR4^?+jcyC4^7T$PxKWHTTf1lwDaEUBuuqBD
zKGvZ-lbS(o!~mQa#k`rv#n3ZqDGZz|XI&O8Nw{woEK#n<I3*tKJGWthk~wt?RS_9Y
zeFSbkW_+HL0X?T8MaQjq239Z5V2Eley!j@i@Ol|;zuAh3v%4i`E00646muy4%*IhS
zelkC68}Decnin{J$(C$jF03Q^ARk!|f$Ci_re-kgH!TCFplPV))`*VLOThY+0AmN)
z;nIRV;ImDWmhh}omsi4iygH(iBm?kuTfzG<?uA8^keoaJlx2NZzzoF(oW#!R&Hg25
z`SBKvKCejaub6_;btM|S_%3h%Nd!UXExDnJ8elTU5WMgIWY4QKP8f2MyXU+HrYHQs
ziANH^;Q1eXpKnGtXep9r=0sh`SP<hbS<`v%Wk~%wEv|n0XTEi06K|p4jq<r-FnQ&V
zsp=VA>*zsH(a3t4=U+p{8xyq74Zup@QFz>;A69P2!~=WO30ceBnm)t1@mDN~n}@bY
z&RLKB)@$atk44MvQD9&A8Xdj#v3&Yuc)weV2;Z?ge}+Dcp>t8}ej4SuNG#p`2iA`3
z!Ir!FG(6}Bh^PI`nIZKTgm(3!<PW<+FXA)!zi;Ok1gS7T!b8?a4hF%jwb|J<QIMXb
zNkbY(;}u0K;*qh5kB&-&^1n+UTPp=JJD!1Tm@4c!a~N_N!^q$21KyXir{zD?=!isf
zB5(8!H3a(HxU@T9af`kGx6crn8)=Z1wo0r|{K<*)>NsCRFIaOyi*`P}16P^*$mz@@
zaF=F&V4V!eyjzc#uE<c~?KdJ{b;cO~oyt98POq5JlOa4f9*nFCK|E9$f==z=j6YbA
z41YDgzWFsTXf@<-rN-himqWPl{4Wg2J`Q#5f3PD)ffv7dlk=eT57v&<CSj@%aqY$;
zmYY|}5p&!5{fVEj^I0T1%ar0tS8dX=p%4V6K9D%%Jhwln9mb5(V@$9jKKx@2*4IdJ
znj5sJ@1n=ts<UTd?=u;4CdibI&<e%u6*suaJFRKLoPH2`IrCKwrCj}=@%U=88ZFNJ
zf&0|VY2@hRIP9qkoiY3)o=>(R+s<ncai;<wQt=7J>%76|WF8#<z}x_ps=V^ya*XPH
zfe9IM)adP16o)pjuG0pnYnGvV8o#6D_$;hq*{tSt#;1I_l`r=j!OgI3hg+N8!+~9v
zWZYj9S}xqnHBV&k2a!Gv-Xh`CtRrCer!3sl_yvL-<Usc28utG#LTkwcIErkKGl5EC
z?RBWbN4EdHb23M$tis(J<_zVG%|G&79-{d;%wqiV!^dMWIlK<<%dyY;lt+>$1tv7L
zft?rrJi`^tA0>8gbFK1k<E^YN;M&3+7`u#l`&J&}1eWcb)Pr-_I#CX{?>hul0qhy|
zo$xzNkAmGhS(2Z7lJoCc1qtP6P`9ZCM>0QLY~2a?;`9~{-Liy;Eg>M&-Gp+{>SVrx
zG)XT%199Ur(5L4l=$Po!1xMJgb4?+@XgPlC-hheODUvw}IT$@jfP#q!L;`j%oj*uG
z`t7G+Zp<$<`^$WqcWc3!b@rEbk3lUfE83%Y2dy4F#8<QKL&5kYoS7EJ_U`#GW{En?
zeW5{v^{(?Xd|2j%au_~09+GE|V>2tp#@zjvb89Y@6wWuLmIaJIB22_31+1q(-VlS8
zZ}W3Evl(BIB^d8CreD_@5qpvfSE_n&BWklQpgIlw(~cgVZ#jS4Ox~>xB@IzaF=-)V
zsWUEw{v;t;K{QFj<0o*!Djz+7JzMn0f%D}7tcvJFsb8h|t;QK`<!Z1g^afNfrl{yS
z1o;Vr=;G^Eq(0t&F%GWfER#1T{ZH0G!mLBM;-NGt8afTM)@{WCb7Ru2IUbL*`CDf5
zT`ZdF54p^d;*_FHt&b>CkEOFYtaXL``jt?jrj4b&4roCXN%g)yxHvZj)YP8Au~|mM
z|AaT5nKp<_J6_9m<TY^TXaT-f(Ieh>26)E{fqX>Xa-0!g2rerZ;h6W&QEa&ZO;_5G
zjGACB$f%ibg*?7|kREI;ivsb(b)wd}UvcDf1-d__1wN**EZ6$=a3l5^ibO+Et<n;e
z#qE%~#~6LLmq-rw8d5a@+e_$80J=hhhV@#JvvpQfh2@{zvJ@c1*AJ>LoaG|k-sH{>
zltW;tJk^|IO>%$vVeZccF#D-Z@9ncd`SNO<ap?xw%`hS#{R&W+Tp@AjQ=qq|&cumh
z3Q^<pS+2?40zY>$2VG$eeC(1XCv910-{$~7y!0t%p8f~%+A(lEs|-X-^MLysimlTw
zVMcoufBvr`O}%`B&p-BE;=P0N8j5+WZ<!47J40a{=?AxrC4BbuXT0*EtKc|ThWfp`
z0-}ot;ctKno!GS!gNF>qr4GBGbn-l0ILL~$rW*qntVrx$DUp7&2uy8X264}?a(}|@
zNOkR7ko_%$@}L=<T!w(gKiP<-{dUx=?LCZ%Isu7a5=Daj5t2zinDb&&CZ1n+AI+B4
z0=K*hm$$UR{q4G>ddfR+)wUy3pQ@9Nq$+NNu8_P)SE2S+lfd3f8tbF0xi0Zs_#~%B
z0v8{K?!UKj+S)=+BPN+Mdj18R+NG)ID)xQ)RSlmV^+@OA82o+lBCfT*2%|cF!4?NA
zDoKyWy+87CMWzIcs-~cFNfXZVktU_@pTYV<*0b`xB5^%=1{b`oKt;`|_`}+SXvzx7
zfZ{vMQ#y<pEI(GDT#V6oHbLj&L`baoF7o(x1tlj>L(7qD5Zlh<J4UHejXz<$)=(LG
zS1l3aSFmpPCjsp~(~Mr9e}YY@J~3FC!<|lii1D+H=@OSGj9<G7j`#co8Id%x*``mV
zFNI*?@KDJ8*8&bc-(kqYpLpoPScCv~mW#Ed!O>^9X>vb!gVHT9%wL;)JD<rI0ejJ=
z&W1i_d-?FFOuRGiDq=`9PN=jc7i~vkZVT&FJT@YF*=!cKYNu%Vl03LmoQwvhBXNAK
zHF+?zk#z$O@|hJ!K$PKukxOleMs^{m_)-NFze?k^|0p)OhhTaR^Np<Xg&}gSc;Kc!
z@!oVBRi|FZ#4sU^H5v-u?*h<fWfT@^3a~#afpsJHh+3O!xXhAr2x|Syx32k!9S4Sj
zN#S|u{!xije<HRdB6xp3%g=cli#^5>ys-ZO=Xz6yWSXR4$?kKo&f1cA+DXyKc$Qaj
zs>en-#&!rdp+C>F=b7Cdu71L2ZUX!Mhd5>6`+X)<eoYIO9o>K(<IR}on{j*JibNR+
zlb~wgJeD4~0qb7a(8x9Hp87=1)eY)I=|!iYEkp&Dvabn-ZbgT+3o!bM7L{>+j&`~U
z__Zb(6HV6O=|^|aF~$}r+E<~mjv86H#+LNT8ItU#cKjM+Oy3lK$5fS57`(F`m(7zR
zE#Dk)f%*zqma9v4T3mtv=dI`!-vRRy^r$B5GfEV+sP!K?s3v={SPvol^G*JM72>pg
z^}N79nHw{40J&MZG-LH(^xD*c(X;wcaI+;R>(o`uU9k_nf31KmItJv_qf*H3OyK*Y
ztik8k3C3G8A}w~=FvLYj_RRkc{VP(S_$=d+<UiswFL+^hX*SH88;TkuEzs<BDHcaI
z!jz`#c*oO-M6Whu``R9^-m{Vmx?02s9+N=6&w9~(TOo1NZIsl%TL#}X>`8lX9_sWR
zgK14mKvJ_88^|U2KG~k^*=0cwK{Q5Op99b5cfh+<YGj7gDg5t;1*uPciJh;r!T0_J
zE*}qY_B}zM6U(xx_C~nlxGL3EVVphnT+sO{!<cFDlI}Y_n9<tFYe;$V-uV{Lson_w
z^ZX%=u{KVNb;uJ#JwjIXW2RRmIIaxAeEO5?&Rl@C$~LsMhq0-9J27V81I{eSgl2^t
zhsM{M)NIlx?EP^E4Q!=wr$-n5cSDcNv3?G2$A0kID<7b7r8&8%YYCZM5#Z2Yh|jMx
zUvTAYZbyX*k=B0$!ejgShU)?vb*dE<i&f$ACg$(hvQ(7H?iqIM5Gfq`n&VYjM?-5n
z<bPbu$5qsESG#4X)%5>Sbms9?eO(y8nP)O5!!?9vP2!%lOPb`@ph+r8lT<3Hq=}nK
z(xgce5)FzZDcrNRZjvDqDpV3GAxV;?;@$86efn^nyU$+hd7kf6g8Pc9&?xl`L{E$2
zJpM?zR(Jv(tBq;oeF6A|1cLD33C@3&1&t_wi%uDTL_sNE_`U<I-zqXiQupK=Or%5T
z+mALRNJA6rM^>@^#*w_1&uS1W3b4Vk16^nLfwXxiCt!S0XTN*kbnq!Z{-O(9yTQ7$
zpZQ|U?Z5EL(wro!6hm(71l;v=2$7kZ1&2#xapHq^bYY$2$^1~rx{!izYS&?fpA2c7
zQisvj#?ad&Wm&afSU!9zS1VDclT=$UV9X(SEYzTC9S*1@Frf0zgXySiEqE?PA5xf)
zR(xkKuVQuy8tYk}z5OlcK)bme+BP8EaEW^p@(7yn9>keivHtR(U~%jUWE@_PM(Q;X
z-nEhS2S>pXm(zH4g&MWm%laN8Ps2&`V*I=8I#hMGvo7vAqL;t@!J{*hck@z$OEF9@
zo&1)Y@LZWF>#LBU_)1Q&qg3Kjdy+Q{f5-V{7*c&}HY?u=#t$cc;G&Qv@W`<ag_B2e
z0&m_v+=cai)SFX{3MmBr9tmxG;^CX0HZcg5r5~4uVZyAxT+Omxtlx2;j~peWH4A_6
z;x2t&@?<mC2b*%o+lPUQ=}jCv`X%I^ehxQs`?2~5^Fxg?Cy|+p_>5;I5VPYuezi0r
zMHzZ@*>NMPeMSuWBMMQlUyXmo*t+7yuS9BR!{DQvDqS|W6l4V5AUGMoU&*M&YyFx;
zqc|HrE?_yI2SK8_hC0-|!-yQXd<ZA8*}Z1nEw1I8DjDAY7o}syusrD?zIMd_zZ;rV
z#qT%#I4>Y+qmMw?a}VqvBgG=qI!-^H@%zHYN*rxduzr38_h*U{S^IlA;u2?^6KzJ$
ztwr>mox#niGbCdzvmn8@nos$_n8qLWbA2o$ogOfR^WP+cPOrytkGluZ!~-p$!n9CD
z>mT@Kgb_(;HzCH_ry=+WV+!p`fZ`2COzYhS`svwFK79tL{~JjCE_}oC?AM(2k)M!1
z;WnG4&O>9&F$kSr3(3>xLAZ@9&=|%zm}LOr>ZYI+|Cvv|`+)y7OrID^`*_cXM&Qph
zed5HtQ6G;o4yP(rYGV6=y;u1?0~TYXXFX>XQi3D!|NXumAXp5rO`#d_>lzRVq~Nsb
zI%wyr((KTGoWt0Y{Mh%ptY>&E=RH17<g|VR=VE@En_s6)Rt$(o|6V`tuVojO4*HBj
z>jpl~elY#`jRV1`cFAWW0b%4S+;WL^QoPaw+FQr^Bf6o!cmhP7W=zDZf6@D%kj7_}
z!-$+bbgSy*M>rT^sX_w0UZPAAMqcCE{%nV)!(|Y7=RDfl)nM0^F_12Z;3^WV>8WWZ
z<gKY0Ii!CAn)IGS((F^X%vXx|;s`j+){|6@ev3kzEI#^UC-1Y7<tAS0;^lEom_4wR
zyJBfYHe}qupM%ZG7lDA}&W(fZEsEr~p$$=C+GN#131oYI;TNiC6SZPB9QBE5e)}i!
z2Uyo&{+5r}xMBgGPEw<R*VO3rMkTtiZV<7#B%lk86o}ACn<o|;<j07Em=ISlnO0DP
zPRgF5fxVZ}eiwz<N=3#P8cgdIG`UsFndfP0BH##l=sR5j&MX%)dAc%@DqoY}Cp*|-
za1|yV{)2IqmoaKsJ*rJi2InnjP<Gq^@+PVpD#Knt`J<<-^Dzm^2bp1dNi)AfDG%))
z%Az2g=@u;iwBQfR+k0L_kC+%vX<j30t$D#4Pac3Pm&%i|TAyL<`!#@{6|q=BNb=1Y
zOLs*#JQ(~G6v}Ub#}swsgRDt_dNPb(9fMnEKSHB%KcLRF4hF6mOw{uC!rzyZQ0TS}
zy~mkg{e-Wg?Q4W&!Q%$FwLTN-yT5RSqX*C<{~bn!fI#eBkcjFlV;Bohn=V{sLZ5d0
zgJ?e^T=syS%{}T8-7dD@FS7#63Cs^RDIC2mhwyd<ExaXFBSi^^At9=bbBjx0Og4L5
z6s=BgyWT^kd71dkd@yNB$w${(=6wl~6OByG!=~94XsFiD?U8<maF0Q-Xu1|8oo+|T
zXvWLP$%0R6jN4>~od11u$o<j<1%Z`dJ^Uzi&WOT^Q&p)<>R@U%-j<qrW`S*6H~+Us
z8HQ(DkyP(BF!zN7Mit&;&&&`aj5@(R%!@<6Kp}W*baFnQLos7X4a+6n7CE=v!168~
zI?0oHL-IoTFTDa9EvEss#ZzH!x)}V_X25W^`+uNP2+eCs_;Wvxf!!Bv-ZbY0o6QgN
z;-`vOi(BB$CL!&bIsk1C-RBC1tJCI{*6@7Q0CK`L5ewYPz>XTz45QubOg5)42QxOV
zmhVLKCPgy*JY!Zo(gP_biNr%1a)OG^!}FSAyt|_mzu_H>EMT)_oH_Ab)y>tfzW_Mz
zD=toBcfuc^7^iL{H_udqNb`^KKbA3ecC{sOSn7|Nl0cN7Xh~K?o6+t0tmAl2ACzC#
zrU5T5V^Ci)cPLPkrWc#hRu9G=a95&Qdo;m|W!Qbc*KrX)2U05y9oioH3#L~YGNzIO
z=Q%-v4hq#HgMKTL_^x-b-`a%C+B1aIn_iQ6TZuVo)lBZhlk?ccJkG7J)?#pX5tby&
zQ#oM-NCgd&$z#o^i0NAkbLGj1Z{wjh`!cu<n8hoy-S)QbeC$6WB+=!ExNuvR-HvdD
zd&^D8q)EDT#FbUBcfSUie(DCg6?<^rInknotBeQGH%g-9t%ABkt5JVF^AE53&Q~4$
zgyn@Ayp)Cb9roVjwqGAWPu_n8QH(|Dl90_upEIC7mM!>jxd!$5o(y#({xZ*_HobMB
z1P+=pU%}vb?#~h##`sa@Tv!$c{v|-Jl;v}$JmGIGN`&;4W1;y$G4DF(2MVZ&(_a4q
zo{ZL@%I#*f@<}xIZ9RrwdkfGhZm{Us0Y%!{+7AZPwP~b>0=2JGr!MLbdFNFvAUM;*
z1^!W`CBC|(=d3-NamHk5;9b07Yd{2hhCtxdFOXpBiqcQ{5)?6i@#R7|cIyl7j@6;U
zjGH2hrA+Tzx)JS#zd_KtU9v~m2MG<K5K-|MYEOMZv%3n!sony`Cw2L^GTk_qufY1=
zC1}U^0axCtkfQGioYJ<#&~{)wEZq8kFZ~bja2Sd*uYTe`Hmj#QWN{r<%eYi|eSq#!
zVB%pwq%VU({C)wK@j;jRs98|I&J=9?UJJJm3druTXmnU|2HyU;fWk}}de18Y-ST$u
z{;6O2!1E75Y`C4Vb0hgxtP^(Vz9d{3p-6hVYxrj00d&~<iy&0F$z4u-gb69Oe6;r|
z&St18U3<hCCyY0x(tsF=Thbuf_q!N|e6ga&?>FF6!**C+B1W;HvPiwhiquT{!FfAh
z7wH`}A;-3AkczZCtVn)@2UcdF%A*#5f?kw%K9ESamT~6kX;|sgfWpA5+=8o21DgJr
z`|f2!BLwf5@9qxs=*qGzg8}iB{zHMf8EsKkqS~W>q1&QDRGulvxL|+58^-X%HTCH7
zcvT{pc0Z>f^adup8O5&{WI{Gq%TmQW6YA7nCDE>IMeaXcVy8bGKTnaNk%2C_Imm>F
z&2mLSS^d1mvlzzV*CdWQLK1i&6JN*2W5U<_e6s2-XmXr}o`<I3c&1zVCP(n~m;69@
zvQ)G`L4!OP&V122-+9;GXK=w)mmF6Qg$Z*Q3)CbYd}KaClIuWfox`4QCq;U8VKL4Q
z?!l<fw;^FjK1WaAMV}7VE7+AH(rzz=It7Hd79p|rc#l4YFTl<xg|WnTK*!qkIR48F
z5a)Do!gw1_^S^I6cVrl(7dm5h(N$0?kKq3;QX=7%Yfwwo10##~^DddDbnO#m)W6pQ
zb{9-R?UXV~t!===MjI<PHh_=&Yf!sp1Jl;7z-5^(`0l3xaSA-e1&mF`tJ$hhbU2-N
zb71~&1sAZJ;l)`R7*PM|A($4RM9+M(##!upGMq8-Chk(D<DNdnC)Vt_>{-v>w=tw@
zq4zL?-F;&r4)&+9Y)ytxWO4r>w(U@aNqJ*&47<CWi@k$IkKgc-I%j#&#TmGATpA`8
zMB~kOnfQL66*;_DkB-GS*f;MU<Cz@8Xq`X&cneFq2fu+?&3!Dos7{T?0*aSf-~*)|
zOi+7*$#1T4&bO-|z4JQ%A7cqWQXWhM`@AIHuaZUf&9~5WS{VdI9OeBTDHo(_1=n2f
zV9kggZuYxmFxpyy!o(0TJ}iTgxAZvc`>&xpn&r_4&t^W~CSEr^2+F!AGY`_w9FNPU
z_-VNTIhCzNv-i5dhZ#n+Z+tp@3eLsq$|tOUY6bX5ujia(9*h2*VZ83eQ(%_MKh%OF
z5c|3rEBDSo@xo-$^=&=4KiP!zm|Vo_gd9wrGYax{WrNGCc+QvU6XLRq+=7>^6Z(%0
zSZuEZPe)d#+hIV2B@Lo;+Zh*0?~>?}nk5-EwFi4UJ0aMp5}I|(@p{S!2pxI_^E3v~
z>&Nq9*XpmR-JS>8<;8sShOfN#nFg3U<q7T|Ek{K9<q+8a6HM*u;E<5%)Xj7Go>%cu
z7jA{ob9=bXdnzpNsEG;-pFqK*pXfZ2y`!=!A>o2DuQu8ZRMy;t^aO8of7OC|#}x_x
z#gI(6s6c)#R;0oow<TkZzQPw>GvZ&dkZWGbbc31iMf`9B>O4b=`Iaf@d%GIT%4<;N
zzx&`e;w=7Wz!?5(*gocO4wt{y4%@EDqSt2x$+=vNy#0rJs-s0yGv}jn`4_Z(u?4NR
zO@KN}*0uJl3JOxJ=`7ZH5@hFvPEFw=(ZY`?EcNF)Y%RG(GkjoJXCts%3C7x((w3oy
zBtzB`D{E|UQ{EAj^NI%pH7hzprysga5uBbo^UcJZ-ZFgvAxSE9Ut}BRw!X!^$rfbX
zamF5;DoexGHA2v-FPwkg7`*V?h&oT~z!$yhB)#2`U%a-Q<?~w5<+eWFRk0wVZ}m{W
zqfnHO$6+XY*LQ`eQD@-^@O~D~ceFb|@|`N|3+Mu+J!kNoj00rLFb|l9v4j{MMd8;8
zXg}!<mfffThYKHh$<8KNv*sZh%5bb>b2{H<n}H)fR%5MHgQ~BKME8Q%FrDRmoo?<D
zDI~JpOQx8w*^~k?0gccB?${%D0^IiR;b+)B2d}-IApP-$%Ne3eA~f#7o>*O4J8U(S
zwPu5p&_Uv)5+k{-H;8U<&BE3UmIrKYLko3w4*W;(8Pjtw?^CB)VW#v?pb>Ri><O2r
zu#R1WRtz|-Nu`o8qJ^TvxSeSTVcy-)Q!y2m<$htg_AhJ?ex7Om<{SgsqUM<)Btbln
z6Q`^eNp)6;ZjLdd@!J?na99X_pQT5FAHE0I80O!Y9*<Rne_&|f6g=}O2Fq3Sa0{<U
zlU^zj;gdd*&@57t(S9HHJ{>^3U%nNMIATQ?O$vd#<Jp|CQ;ql*wy;j7m;B@YbiA8r
zO5FBOMu!G=&djfdz($1c<BjOtqT{e=1MB>tz-1dx0P_-M;uNdQ|63zNLWfGh&|LwA
zLGGZv|1VS>e*kW=R_L%aj#D@8!?r)JVDXCw-)l$t(jjRmNVvwBkB7Kh^6J#pHv&%Y
z%0uzM-<-d@8*Gi`u`Z$<djI{#Ve}_9uH!NHLlq>l=P`el79?UB2)@f<&Ra3^N~Uy@
zKJ$AtDPY4G4eIeKoVT6vkyEdDg#Pvxe8<dst_srN<wjYQI~@*o8<+FZ!RmB1V<#uP
zydWt%^$3HOM&qmDy5voF8w{T@h)ieaw^*6w8Gqbh-97U}Bi`+XfRY?2ootP(JZ;F1
znC;-TlkNE$8o_gf4CZHQ(B%^Z<j_Y|Try0B=-vo`G5ThtBj6G@X~$g{`{WhIoR?r(
zGxJ#d&R{*eH4>w)8hBO6K0o7&pu7GujOnzXLK7oS;FKrw_g%sdDYquoM)K69bO0UU
zp$u*hF2fL6ITEe=fj7M21v|>eu?%}8c*>aJ$FCQ`Nwt~}Pj|=Yj#4g2Z3sK_&*Q&+
zgGlA;2Jmoh;3JcGE-dRQG^Hbqm_HeG?@dGTrJ0fy3s@F6^|dH^<Qd-I<|WtowjJt|
zf|!odhApRlVdB^>e0kfDwhwHF@aS?r>z597+qMKGBR@b;zyap7YeKKUe9$#ir}fdz
zT)=3iNjS^Fg0u_ppU)KxYHi}gm*nZ9sY7VwTvI+}gC5!0&axcG3mqp+FsEgrhamK1
zed9Vcn7rT*w|SQWy%xv(@tuZvNK=VEb1|pQXC|N^dyB-+w;BsNzd`#=mW7IvqxHKN
z!bNjivXill(v_sV#-4sOOk}+WZA?SkU_*rkuSEODs}o886_`}WgWZ+~yhFek_`7T@
z2sZBotXPjaSI(i^&zszW&?dOB#hM20?8Jgk-H>?oIpYS((IDBw(4l>ble=xj_PMs)
zzmiULdvt}53{wG>C2z56Qx4S6Si>*t|A9vPCt$>L1?m}BfU&(7V5x~Raj9Pji=5Qy
z;yp~SdpiYR+&3Y;a<wd{6yWIJU_)yz4THA1<I%h8G|r9b0e;(I^qVsXyz(0$A!!Nc
z!dLP2{h#>A9cK99=p#J7BLHIm_<~Z$WB$xw)<-<Uh^%e#hWhZa;P$qcn^0y<K41EY
zrL)|?M>hhN-4)OwAu{CQ8(Ff&<UTu-3-}2i2hgaUs;sB0%rSYrFXS_-ltkwqC~nmz
zo1c9|@7&Y8^S5+pE3v}Jn0uUqU^Mh-Z^Q2Nd!VMj68CMM2+_$iLFii|>LBAF%qt6;
zjehXw-bb=-Dj%HQI)r!x=3(}}H(Y%250H~Ri+$}s@%rRrxWdGcJkc~Hh6zG?+<q98
z@BGaj3Yq{huMWbYua+2mAPnQTWTKVWg!(bAmz%<AZZTCQxzED!aS8L$*+%AMh|{6>
z#T^j5y6Biub^v=;&c}0EE)b*~ff<#S^!@@_`udIuZ9EpoJj6v@8#-VM>$DTd`S4CB
zqWG!b^~lSUwm5a-U{ax#iRxa8<Qe<iCb0c=%Vje<FKHktYq$^NPXxpHcLHMf#FoFR
zpMXLu#$v2Lf(g&gb23@EuzJxjRET|v7w;+4>nmPE%|Z>@bCmgTWaYW>AD`pGY;F2?
zQ#498|Ay>$c^GLY#~bS9VQlbHJSRH_iZVhWJsmlX`;5!pc@4T_x*_-BOSt}KFpZa!
zLb=i-USJs|3E62v?c5LYY0$*-IcFfb2|2wx=H$*wMKXT)UN}9@njZQwA1p_(GuopU
zK4@yvomXWUH?U0PX0eEqtJVVHG-F<2Yd-7VtblzD9x$UL0|XBzOLR|c0KMz7q|L$&
z(?9M(`^jC9FFzcr>#az1V>aiucO!njr%LSKH^FNvAc?){Sm2V5spLHh<4<sYS52w<
zD<$gaCr>Qm>LJ<1nA%39LPW+_kof)ubz2+q+3+MzGfsfvN-SeDs~ulXU><>XOZvi~
z0DH^&Fk#X<E_UE7oViMtc_^++bmb2Hzgv>uWP29N&me#O<0RZuPziS#M>F9=pJe{*
z!)T^qPMsIDzyZQC1)pCdirD=<))=+Y81sDGGU$`a(hT<Omz(_IZwF_gUBX3vsFbno
zACEzWiaU_`K^^?(t>Wh{9z?&KWj)zyUohmnER|0*AY!Xt$0<L|Y2?|X*m(96#B4XF
zF?!$OoV6Zpa1F-|=ju?Q_$@fCw1U|9KycsR1nK+Fb9>~zK;^GnP^YUx$IqPr5mzta
zY8xL^j5DQ=oo=HvyHM0>F$=e*<f7M4Bf8`K5SY_wMdg+Y!0Ji{28?AohWS(Md6>it
ztR{<szNd3(Wf?F&=`K#6r%HSkb1}+IjwYPD%bgWn!BJUHQCK072s@e2c(M-G=6d4}
z&7n|Pl>tu{W}w5P6#mg!0kzIgf_R(h=sKqb56tYram$pc^~`@Tc#I;c7d_)*^L=3Y
zwnB6&Jt11Fv=6eUe8Jn#v>2-43N$Ck(V;gDh~3T!P(Eo6=M|I&Ud%hF$7Zvmjn+hK
z!88!8vEWBksnXBcio|%)C!Bwy7PAY_VZ!iVY!-TkN9`H2w`C67%`L<1oGiAFj^blg
zd|--&E-7dGo7LCeW7(xn(EQ2n^~Q2^LAeM!8k%tNTQ)N;(+6qU1WAmc9Bq2@6}Jqx
zr5#Z+ym;d=))CLx2hSsLsrn%Dl<Czo6P4){!xPx1F&S2{pDjUWH%49*@(Wv4Dc4qp
zy~i>^*I5_q-e-eqq79J*eg=!lePI7RAEv3yz|b?v=+83$ekBA1yV5y_lV#lC>VK#v
zS^^25BY5$X%OF0(-W}UkO6s5g7D>l<<L8xzM3(WuWA3v3)2ADp>r2LpcgcWCSH`*<
zB%qtll;S8`Lpu1yA(Z?17qz;3d3eA$)GkfDbg8bW+`$SqYQ9Br{8f?0sRXR=+QZkW
zUIc$P)~TK&OJ<Ks0po=E(B&b-zZPspc&#LgERms(o<efS>oTer-N(E|84&y7Jr=y=
zL3?g9G^Zk0|0R~o&dWz>jxpEfV@OqARza=iNmy2Y0VaD{kRtu1a8^!>>Yd0(A~7cu
zG~|dd{*Pq;LS3@y<0E|2^9hzeIf=&ad~vj|IuU0yica1?g|P!~fZ<_V*rIkGe8%@c
ze_1S^kFzE}-U(>aJS}?Y&^L^?N<`;^?~wU;AoW_>0zHS;f^>;Br#-$J4C|vgLBx4c
z$a*=dalC<blV_s#t4w%#*A0W;e1nQlO5}Re5V|AJ0VOqAu<xr2fV&2ry=F0(p1Fa&
zviIOqvL21pF{A}^+ED1y!QG1Sz_O2-Fm$yIwK8ED+GmX{XYcN~&Qpn`xbqnCM+~Vg
ziQu++6z6~D50>h-pkVn+eut_bl%I^_3vRW8?_4{`D3D@uQ4iO1ubTT?5eId%<>|s#
zVK_cS54@LlOLqN}C9%HavG=wq-EJTt9fcdY*5Y=|KJkM4^G1QlJ^p}|VXq-v^A2~h
z;4pd(O@p)S&h|7=oz`G5b_`KO;dBEKjPwx2#IU^etUx|QO@lPC-JE#bRsMRokUC|$
zO2YXdh+i27JGP9&ssSx<Zp$ZBZW5CI)%h4R`7DIVv)&^1Z-owXxwAqcwcB?E20B=f
zMgiNOSzm=IJ0o$?EF&7UBbkeh_67(I1h)_E{2#_*pEv9x7Iod>f-gP=)$__U`^RPO
zf+6cmWZY`&nr|>+s2LHTaTZ-1Zb;l>zwv@e!ID`K%B1$O3|OkMxykx0SCc&fddC;R
zO4coFSjO%rnTuJj&>!L*{(#n^r`*rD!DQL6=U}oR25lE!;eS<JLG_se8lc+_f~fi2
zIfE=TDte3?uf4=gZ{^9->RagM_k`Oa&(4jRYp_(-5$ji+ff9#-q;>H#7*l0T7Ho`$
zE;~(dIW-%k8y<?jYGmTjd3x0LNj5*%x*Db%4I)l^27^|MGuNto1dONNf|IL$K!=qZ
z-z{B?pZwW9aNQfoy=Y0(Sq|t)ry-HQK9Fuy>Vl6$2VmpMZn&j(0AHSBtWX=)y|yg_
zEBf-$*Ej(KhOfemUH>4w@f#<$Wju@ybsD4)%dZJ}iJylG=&t+L<e{Mw>1-H{9tCqS
zZQ&!>EywzRS#QTJsXU#X$6@^4UU+4yMQnA7`F9rT<m(d~vT8~-X3M?j1rx-gh<E+?
zVY(L4s^$2qLO-lsH5u2L3+TzX15m=ek9QtiL97@AnWGM%kK#M1Z}Z~at#xQ>_YBnf
z8-)r{pYVE=D%JXB2g=*cN!sM$I5gRs)~E~sdiEH0T1G>2!BO6JsV6vHOoIBX<7nig
zOm}Qyb8_`R(cyRtlKdlwb6Puq4<1$yo2N6sYQr!#kF?n*geP!42_5LUXftRi^#J;B
z!e;!9TAI#W`Tl8mYrq>2tL&F(!~~#@N({Dt(WCR$p2g<%0BN&qQ90a(#IL``^yms`
z*=a~Z*}1)LgaotqC-Mq;g^Ur$_EQmJ^k1Nihivs|;J`2lOgaZgYvoAnU2U2ye}i}O
z>Jp*Y0E(7G!X)*7Ac!vzb>B-z>B6C+<%`Xzz<4*8z`Jw9ykuzSgk6}v`Ws(*s|F9P
z7>eWHYvXe14GeO~<Z5$0aoLkhuu#s0{+B1&4&)uD&Nu>YSwdczunXH;4}&N`kyh0N
zV|Q0BX3h-*l}&Oi%l!{64PM}M4|O7xP2m$lKl7s_ElKp7o4ny+E12V8OHNEOBihGn
zVMay<Y#1Dc>GAozID8+h_N~N=Y3b;ZtpbfK+q&kM3DFO!g;K@e=(c(r>?v8z<{LZ5
zenS(YVe$ZU^{-(*&$QIi_u2jR@mM6Y7Q8=P<<~4^vx?Rp-eSf~-1k|N-Uu=v3jexb
z(03K8?ygF_)$2L=2o)k79s-`<x1mmJH@0Lc(%JD^0HY2;@&FU4>na4NF}pa|<8`3C
z{|{dN7lw}_jOh1XRT4jC9)_n>@(%7Qtm9}tSo=8Q_IWbI_?`_`O?wE7jt0Q${G0Hg
z&IM=0MZ@fXsqib}DAviS(q~Qs$wz@bbW6I?w_1i84t>EH%0I`NrAPSe52yL3xB-pQ
z8X<mHFBl~z!Djh^B;nXGe$R$lC=e$@dCDZ-`SvQtr4Z1ocYk7E{}5{2%{=!B`Vw1r
zA(d#G&<xeb;H`2Sq-n?bnoc7WvAbi(j=R`!RzSqZeI-#QKj1g(bJaSg$Q`<T0|K2o
zV082VnjqW8KeDnUiXMYW3CkLFJ+=o=?OV{1cplcsnvjQ#&*o-)4n01x{VdZmcez|g
zf!2+QrK`2Ur|cVy?vF;nZdbnKFw0*E3~>kRO6`y`f6cfT{Q3_pn{~sTd+}GBXgP0z
zA7{T{^&NJHTs@eQmFzydR7K)@DVoz~d2P*?D%4-Jlymm(!1UdxIDL7R*Dig59$7M&
zK42yE@V{}#7+qY{KM^(kit)nSTomTTanr=tK<4u$;EGrd;*~y0*4qU=UpK>bqfS_N
zgz*3#$kE76JQuy~9=Ch1G0Benzze%ypyFJn6>ph}!5IZG_VNQr?{DGMV#3g_K8zo~
z<s3$y+Qa|7`UOW^nv7cRL*TW^3ykWjgQ{^27(Tj<E1v!U6LhC>bxE5U6WxyYks>75
zj)df5IlynV7<lj+`};henAuZ+N$G6fvQ?nBs{TNH@K>CF(3HGcTMzylpYwi0V<7I4
z0h!PE-#NDik=ZsIV5F`wjgEJ~r(@H=JL(Kqy7L;w>`sG3Ek!ERR|>vvkNB>=XK>MO
z2F;Q;#hKkVG2+%su-{&VHoAjJx%MIWX(+|Dp6gim(?L=C!@(##K}G)iH^7hEhfzfo
zg`wx(!&<|~I5v0#zS(pUdY0YhNB1k!+C}#0aNs8ff2e@^-|IM0>SlELd5Zg;&St#E
z3<=Z;aJ3QJ6@@#2TcH9?yLb+qNSCC8Tf#3Ydx_F*vYc1PWyZNV4)y<DiGt+IIJ?x5
z5XioVi!yeD%9$X1S-u|18n$BEmJ-lC9*8^kh++BYCwNq7LfdtoL-su<unQ>RrmWPY
z+b<|nfs!x3o6Wc7N_Y7?*&3w$z&~yS$`HJ%MSV9L;RC%2)c#V1l9USQId_2D<}Oc`
zsI}l#x3!S&;K(Ny_rrq0e^BeU9zJ_Lm@Ge4jB~}+XnaAIw#7ZfUXdKN6S_k^y~U}n
z7)Zt>4xuxYZ0N7AWw_p6o?!3@Z1Xz^Yj<3L(9$B9GAJ8u_A8UlylxyPd4i?!T9~nd
z-8mE*c~{?NaFPq<^ESBR33(y4NJg+9`WyePGNrbM_Ts!V2E>7J00g7da_p=Ya5UM5
zbUX~^Z1eWw$>{SCH{O~CXB5Hh>;srJSe1m4^Wd1QLmeiMg*(1_6puv1=5lS~xBDu{
zgw&z&WL;c1|2CdjtVe#t51<8lUijjt9`*b(6Rk!nQDI~w_x-6FwVO#f|LJlVdF%o9
z3DcoL>j>6qJcr>{1|%?8hkjU_fwr-2xG%<vZrVPGF0%ZK&zzp&h3AS?KRF%3ZjQyE
zP95-R`3<ml2kgpKAv;PYVQ*kO%&}#8r&aP8_5Kk!__EHDUGluv*QcCqdlbK=QI=k9
z@&>J6n)F}(LmW0H0ZvG4$O$c7;^R$m%0O9iUGfL&ufLYec94OWgW4E(md%4B6Zmr1
z&Ajq5J>q+D4$EdM@|%Vf;SyI161is=Ix|*hU)*u@UeYK!I`;@(J?jguNlkE4UW;|d
zGlq)Je29yTz!yww+U;408T}6-Lf8h|XDE=r#y7z2(^oF6aUCY5rl4HA5JuM<677!L
zIN^d7?dbf%pX&dH!`6I7tvzGFP5TMw{m~e#oQKeC`E>ky<UXD}-HUg>SW-cPW>()j
z1?utU3K#VKA~gSc!EIQtO*GuY!0Fb0NPe8kRaRbtTK}oIi23!J>@uJ}Z8ARotxcZA
zDv+5Mda<p@9;?>o;^b{g#6{}~9~k%?MCL1y*bbs?N$gIyR}Cl8G;DqH8PD8`#o&Ng
zm{hkM-$%&N?EB*|*gX|KS}(%g8Uv|Q-%d{DQ30fF`viugHo)%I2K3fBA$jl-!MneU
z(|GnA%j<VR^(~72lI{F5F9RxF+ris!{|BPuFJbo*E#i0nAh`5<V!7;CjQ3JtJz1&{
z<@gz-b{4!?HH?=!Iq`NzHeAH>N{oK3i7S_wko9bbGi>Q!+z3-p_JtLBXKhY;-DRoI
zgFblfXi5sEz5%7wIL<}Y1n$bokvj`zsA7aB@l{jCnhb(&Z1!K5b`z!VdA?<bI^_xs
zh<88|$R8R)CgwcDMN=PPflM>3H!`LI!{?4YeXsFo#3LN_>Mi62odoT^P=HfwFysGh
zPLoq2!Papi;murb#&z~x8&H|ECHD-=He0~H8#^JX<Teg1(xhurPomkwB(x5{2g}C_
z$oP4CKzQ?p==W81l9~M<PFi9>;{(dEF8u<gpIge6XI<l?FAjkst92mhDFIu>9R8(B
zI2@K2kQ*v@u)b#!@0Ru%o$b28_u&~V)k#2w6^1lvYA$yAb;G|~Hl+Mv85gPb4%5v}
zaTWW@aPk#1;`UpcroCYui?WHhE>4CBrr2@6SspUIeLE+f)-4jhZ<R#cF9PX%A(y}W
z8LA|`hr|idXdnC${_NBv-Dj03zk%_)f@VN;${MWQW=v~BUvh=2Ce-EhXWsQ$3$*J0
zfr_IBq*$Ozf|oAEm-p2%dDJoxnvUgZM<MD=55-Aav(e4Bms`D<aU$QZ0MnTzklbyC
ztv8v5@NfgV2QI+!@46s2;W<VfD}w#;*YTujJk(qkb5%~yA;BXQUA1yy>L^>%vHKD~
zY27IpU%3vOw<uBZ*F48MC42naJRUaNnUIk41Ig9RQK%I%8vK8+;BH4TZp^Qf67-n?
zj?+8w%OKX<`Hf{2daChc{THY&I>%*xXLpf(OZY$IjLBC8mYKX_PGr9!QpNy^q~%=Z
z%*hbHRhO1+ID|1aYSd_84Vbfg>aJh~68-ZhzxbXZNzqawX;Titd)W-!^dH;LzrBIs
z(KGP=W_7Z9bTl}}o(8vL2O%feg4VtN0SUWJxqUCDLsDlXcE!tJbOAyAgnm@Zn*rIO
zlR>zrg}c93o(S%B@!_j~@ukn#W8CNtY?~!SUzcLO&;Ac)Hx}_?%Lk(F&|>Tvco&P#
z-sShq$bk_*F5zsE26d>AMXLc5A@}zxTz4!M)7_6jz<gW!+S8N>Tt8zqM^S(Cai(W7
zzt|-eI%UiO%y%xvH_G;S+s%{;X8Sqb^ku%^vmd!=*`ZMWB%O=U`Uq_-CzS7@h5=d$
z;Mp`2wG160e1JLKa79QL?_e5dhM2Pp@WkXi_V<3d1}07Gft|8CM9t0(o-5{K&&Ew~
z>#h^?l|SSbi7epULB^_oH<(I%4nxWSLlStY7<_eJ^RdTYW5J7`kTO??$juA^>AR1-
zg0(Fj{mh&k>@=best&-i5=A;(%yM8da(uFQAU6L|124WDjNZM6x*1u}yEqLJN;Y9j
zV<Gk^8&J;%##y*Lj$1pk8MTcwVOyLP(ON%-4@@hEp1gnPe&Pdof6nC+`frG2?!CjX
zo-!!6Fs1v>kA)teVovC^o^uFV1=G59=zRC<xKUA$o{u|*d#*l#_PAmQ|8)_?1GM?x
zyN6M_U5WW@8gqnIjiQwm0%D(A14AqIXh+p>&T!ymUhH}qXDSq<@QWM&iupmk*?mN7
zWh(mGnZw;}MOd_2kH!uQg7yy&!E>+_Gp}1y`!{c(=wTWpSGIC?CmZ>qW%3{_+{t^*
zVBVpo@EqH;XWS=4L)OjYz*Vnh-1)X4P?KXtBMa@gkZ(+f++fK2KiCQR|Gh$|sH0rW
zt!!9zQ;zmjU4zK<r(D}0SBRO@1mfW{`M@)Z)O~ps?AXP+gtv{t7A}qT{QUsm$J;Qd
zZXCG0&gCELUP9^XXI$j$6)=9E0i0zFYq$G`ps{xy=ItJWV+YAmnfW#}aMpcrY_cR9
z{0*tU>LM8VRX{yc)oJ#sTYTI;bK+vEfZlW0@=f3J&}zRd#Luh4zT2(fJ+y`Qh-SS&
zgBI|_klop)ndR6I)1!R}=^z&}3(rOjp!fD%#IQN6SIwXh%k*{VShf?Nx}0T<{nns(
zR~<JfYY>4qlX39|HmC9DB&ss(d6~rXwd>b_Z<;yc-Lo*|tP$y$x`AuR5RmJ8FN15D
z4}Q3ofDY4l!i~jkX#2B|Pui$Kik7cotm!&vu-B#=Q~ux<uSd|aeHH)CfaL`9t?3gN
zTk_OXmMZAAz{VjxQ1H<gXRE9L;qleruW$&&GXr^FmOYgjmjwyUAuvH0iM>X5!FzbN
zXvP87d$DW)RQ2{l!QW39JGdIRxvJ3Y&he03BE%V^en9^Q3-WxiF|l$cP-)QtE&&5*
z^GF+-VDms^Typ@Gj-P~c+9v4s`5U*}BLct8VtskW)%=MT#vpA6ey@cg5i4zhb8+%m
zA2Wx0^y(ZAVf{TDIT@OIc?j+_(Is-$GFagB1dL1XqsmT}&p0RMg1#8xm;H}XDbo^)
zvXgmf$tOwdGdVnA#2BlG(7~(lD+E?w0(Zq)h#VMz;@Ht5>D-x;+nIXwU10($y(;43
zx4eT3#FA`DTaKnfA~7(o4a;u?b0^XzxbgBvc3xbC1>;_zz}patc^$e~as{tHdIv^~
z=OA`F!>9P>qdeohuKiHSex~y|t)5>or;znIi^Czk+k=xgX8!z3_wnmNOX`{%1+Up9
zxm1(@O~yYVWW{mDxl|*M9)#h$krJG_jd5H%vLvgMH=uWPisXQnGMQ*^O&*)RL_6k}
z6OSJ(Du1$#8~rsAukx)JZ8H#etk<F8k~v`b+a6ysZ|i^d^7Pvl8}eYmR{S$vhSV$F
z6-AEj!5uMO*uB*W=T@9%^NAX5WFBCZyajlzfX$KVW?1m~14K4<VU=eXxGYtnO3Nep
zwB3w{IJFLCR!YDr`hdu3(O&L&)hg%=`3zQLcY%&-363+eB<*vb;2CKwrs`}0MV3(#
z-*Do3iq+^<hb@pAC&cJq9+0c~0_H~kfRmpY%jq}s>^Tjj{o@sg+fNm!-4+OwZd614
zkrlk4+8O3PtA*ChOna?(3imcW!dO)sNDF!ct;r|B<6knD-1>;Cui3-LD^|kd1I+t=
zQUR}y-U8F#HQ|-rDiq(T!l+fhp{(Z{%s#<7@eZ29@IYCzm}P}NFES@i%><O5ZHAAd
zWiWMs1r<j>#o9Ljvn`n)M=zRtyzLq4KiP-T7T)N6k#Kt6denHo4Wt}-irzh?+}wrT
zpmd>$`}vmT?CeY+=<s2%+wmN~{}Yfv<!YuuYx8N0S3iNVF+8}xyi$)oZH%phjAiwh
zX3>g$(S1-BeFxo^^zzBeBXHQD@6aA}0V4d_yukGOt4Ew6eQhdx=TgqiW(Oy54ifE9
zUV+IgTKSIr7yS912l4Gx4Z4MogjdGOL<QsFkj`p$ChmnHj9oCTTme%j`a#|HvG`Qf
z48|K~!tr2Td=ON~cFkI_zDt!vseJ}-4|OU@Dgl9b-NexSg?Q~yGzK)X-KhFA$n~?K
z`x0N{$~qyv&>=?>#-Eh99-N5dqC@fBxlibJWGRH-3xPWC;TSxn0gpc%1mjN*rpX(A
zbJk-!V8*R1%#}O_m>14`UW-BQz(BAc^$!Zi>XW$lie&c>Rr1tu0qVBs(7oXrM1C*p
zCBJ3I>#sP<vO1%%X6I-A4Id9j&a$4Lei`V@vZTwVbbyiPb-e2B4=GhD<mz*FCy5fE
zpcKF%UKbV&X#hc8b@n<FYvNQkn7_OHEne6!M;sTckgCDiVE0eJ>98!$Q1%SC?O2NS
z7vw;?s-CMYAB9_1AH(_D^0aO2H*^<8;MO!HvT~vpNl1~$wXS!dY;_f8a(kfp-X4@5
z@Rwwqm%yHxbzt0Q$-E&-+_K88c)jXAoMqp=7w1fg&^?rIQ=5;!F31oaQx)Q`d4)G0
zXGW{o4j^y?)1W>$Vr673s0nLu=IV<W)lV@^=`Te8zRV@>yuk0fV1j|i8^G3l5v)E}
z2v4rFIo&QBq8%!+)?*V!9vuzg8<&H#?orriw-W3B^>M<`rxMcx+B8nnn6@9vfs8k{
zn2}zJ@08mS8?0e>dOvzLThNf^i|B1`4l$fEwbji)?U*v)YuVm!k~Xz1Pk}`FQh2)S
z9O(QrBi^^zxi)ldj<hUR<Yj*kvNy}KzU8x!zuXY((vsmrp9YowC(o}@Q6O(07!dV)
zhV=ZJqiC4Y&k1D4<ZLk&!vr>e)HfJI`t&L;^XX#9-#-98KkveNo@G}T9>%$%9`H!(
z!}2ZJICsPf*40@JZqXF`X4SwIM|HY!lrFW)%;dBWgy6$d^{De+K=-bdBi_o2)M$DK
z)RtU>r(eUF2Xrkbj=JXfVE9)QFR95fHA#m}GtFsF%Ue8mK##V&UT2y?Czp1w4c@3;
zU~H>wZq8X{;{VH(tCv5{&7r3-Qfmq0A+*D#c14n`{2YIkm7>@u8Glr()3Eq#7-nA#
zLBYzDHfj*hl~=HmR^vM@OCp@`O(d8(5hp!j?=AZ!te03DgZsZj^WMi?y<#43;9Y=w
z{GVgT^b{_`svb6NWxT%8%nyC<Y>v~~NYR)$ee$$Yg}%#FCLIfZ;npMlSmd(<n@>p4
z$xs8PDyWjW&j&%Ux+JH}yAxJ+3F!jIdI-Ni5Hlv7f}jWPAT)U^+01gb&C^$+^og=0
zqvkT!DBa_ZB^lG$^{3cf<}Kf``6NimA(qp+hq`9|%<Hw6?|sw?nuF4@b%h13ktBn9
zs6Gi>nvO^P7?0I*3@R12axq_T!{GwvWo;cyRhrYmKg*Nbn|d70BQ%I_$PK<MB^q@W
z8PlLI(VUWI5GGaV6Ni86(fV>V8r(mG8fRHX$0`^{_*$Xv)|2S*bRWJQ!|wCh5m|En
zAs}9y>UfgPz<ZeQC#Cf>mOlTC14C@cvE#aA-{eUkbX|$1?2fnkM<Wi)tb+i>0;mu(
zRzYecxHSCWo-PbT{h_^JJMS#lEIh{amjiGkNS;JaO9Z9xVNe;=4g01s@1|co9}`u>
zwC#CJPnAROQ41m6FO(lwt4Fg}mY}mDf^WxC2>!>|p;~5;c5x%RFLlFVvs=Jv+Cb6P
zE;I6eYA6b%le6+i2ZHnL3aG0&25Dd4p+!X~j+v!M5;_VbW;>2yP-qn=82MM?w*NHr
z7|5Vd`zhDSdK6ER2CSR<9mjlngq<u4QgT#+RT@vA)=vcb`zxR~sSh8mVcvSB1)RK`
z0X-dRMbB;b#+husxN|~}Xgylb3GTVGjyfO4tM%b~&a30l;Wo7Ny#h5I?2Z$e-gM%n
zIa$4T6I$O$$I$f!AhmtKX}%gr61%tH9aHwXsOV$OBsp9xQX;F*=};x<0jNJ<$a+T{
zQT)+>+9+5P!N=W<C-eYpy`s1PmCI;0!JITN{?19W$8ytf09}z>%9NZo49$Ftx;bHB
ztd#^px|XZ&>*BS}+~R|-H1S@sl^`A|hRBiAxG1f8D4A$XD=ShlVRwZ@_h<+1>Aedv
zi=N`FcLt=fbP1N}{luAUu1$DxO44hc4gO^tIkCbz2nwp;>?}L@@QpvX1!2jIHK0YO
zO+5*M1;nxRp9Xr`@c7D9!ZLZ^`TE~)CCk)XU_>|LEPkrSm_eG<-LoDBh9uyO07F{7
zE0%W~6U_~0_fE|mDXw^EK_*8VlC?J{Lj1sY5T5sln;y%W*BD#yr`Kt=t4QZwKTgCW
zrUrCq-bHryE`*H9Kf%MJo|Cp}vFCjTgc<F@!M+W6&yLLxH*_Hh&ZE{seQw=xSyKKs
ziSHGy!>>iU#KcBG*k6Tw_O>SWyFQ_yy-?!r6AP)AHlpR>Xx7)J$h%M1q~7B;VW4XZ
z-mlRkWvi~jwKQ4cSJa96SCr{%b}nlrYrvMrMKHP|3h&f2miBE+s_Q|(wNr`av-4*R
zdwxEhJ%b)T9o)&pesEq=&hoVmus{78y67B$h^%+G<#GqadyK&R9C_IHYbls)OGW(^
z>EN3&6t=E1qnB4?qOfuuG=+VK^$}`Birt)ZU>JOwY(xxxU%*DjJJn8n53LnF*wZEp
zeI;+9a?D9|>G_8mqAnDeYB4W<Ea#i#4C3QQxgM+Y&~$|{=ETKtUqOi|P8>w!iIB_;
zu_POh8c@5q`6yoL%ei#0?snN6IJ0B`EtOjfUF+gN=RNDt(-DJP<|58^q&KE(xbyy}
zPVr@@LeV;sW#@E9VQ*{=<gj<pxrGDq!z8B9-RTo$myd*>j{3AS_c7#UY0;~?l=Wcr
z<@ha)fvClwz}5X5I7P9%<9{!B>FNmB{?3eeYSp5zqcVt13nj;NROpNt=6C*<2)@A^
zL239DD1V5Yd-F5+y;Fyb(=a9L*WbpY&jwM^qfk6EpK))R)Tv$dT+TDgm@c$HyfEE>
zs7(0*PL@d^cyNU~=Pv>$XBUy+^^P3d0CzC4H6=qwGJozuju%_chQMi+P+8Ui<89;7
zwqhsdU1D7EiSb~}-9X<9ZeVg$K--Vb!e51Alp3Gr|BaWYM?juj4QIcrr2(B}eIJef
zegmsFOW{Y>eOxR%koa>!T-nwlmT@i>3H5ejufkmr+jly;AAApws0F=l_X{#7org&*
zzq!al4pM7k*j@3w<J#N^2#y`ZnA`Jkhi)>AR*|RrPCqeTd<zWjur3Y$F1q!M;{B5u
zlRsksRWlt#-%eE_(T>yLMvX3MwW`A{nk{Htla7T|h9qC=iXBM+b2J$H)o&nCo?=9A
zMh>9jWwMf#%L8cY6%TA4(ZyHvv#f@fEWoVepzRY0iDj(cdaoC*p7#Jw&tdvm)?7}o
zpj+~>a2<9vt$;(fENShz<xr%(2J1b)qrj$E<g{!YNJov3xJyz%tMnLmGuMV{=9-h@
zhqC0WS}*Ij&cPM_k1;xzvBc&7Ng^H=gCL|or+*gfhz@OsL%EKO0o0Ag=h^?6dz%kp
z+0nFH51@X!5Z+xjB~7DF!M>8In6di~M1Bh4%-YO|<FRb4+!_W7;gwLh$&mOYYt#3q
z&FN5C6<YRR9VA3(@*{?8QnyLLP&cau9B$m?Lj!uC@U9Zo7@PrPrHtqEK|s$)+j0E1
zV;CR12v0R<<GfX8@Z$qL+PFlQ3ghqcZOe5qY<V<}F)}7f9XI*pvyVBauIZ8^cg1YR
zs0WR>GBDOZ%$R<!puEAFwtr_#<x_9r>NFKtSf)%)Iz7Sa@aLGhzz9dCzQqd5RNOrI
z4aRE}!QD5;WLAzEnepW=2<AT!g*(qcnZo-JaPkNWhB<Q6EF}24%Y@AB{DCz?{{y)x
zvv7P_2FAQ*GSyrGnKo31>c78-tIOIj!#friXBd$xwNenTo+k2ITmqL(6^Z!vC&x3_
zn(_Ee8`c#P#sz6V;**RuNNlGKjVa>rfRhGEci9G~)MHUP<Tf9ekb-|*O{wpUNY<UO
zg7+w|;0+22@k8+$RNt>jf`TtVWReehbrynjQ57%LWxKasx!m?1Yf|6T$&K$l3n3=w
zFkHEclPZkI*;!staQO(7wXH=}$j7L#T5y^?fNl|)(sIwo+?oZdbj^Hga`Qh8l4Yzx
zWkH{mRW(4>Tegoj>A|fFWy!!QGctFh6xQ9+qIu^VFeg77LuVjFDE7c#zhmH8V~C&F
z{_f&EHgj5AiNe!Mc!$5uxSmfy!C-k#5Hvz0E;dBTm|HNSBpD6ou>Pv(8XS5@kJLMv
zqxJjc2*;P;J*8x9_5BLHk~UBpuoPUgzQ72^9?+23P?yL9Tw-bolrA?1<FUc0_S1^W
zLl}m)>eH~FN^~3ZCk-zU(*8bGBJ8`(4T&<O8ke&nZOKb8t{6ZA%I?6kcZ_K?tXOp0
zTR{BFVj-mPHYV75aS8KoN&eqOuR^lnU9}?dIQWbc1-^xZzkQNFyAR_R`(o^c8t`9y
znUCA8NlM+-=#>#_w6)?ISpBxaziTYOr{M!i`|>2dr@!&XUTe|a29db^s4CfKaR@H7
zW#L$@5(xkMl+RW%z_}r8E}HfchCF19%k`G@<8BFtAKC;1Sr#RzEE@cheE`{<;B}*$
z=@`dRQA>fGYQKhd|E=W<a*l!bzf>-*tphwBzTxN3R-qTZtC6#+R)prIqW|Jmoc8UH
zIP4n_er&ceU#L!6OXgwxxtHvH^%QUJK}_g&;mbz8K&PsMBJus-5~;vZ6kT|ovsE<4
zh*OQ2J|mG+E4v6h^I#rR|AzI39hi8z3#*qK(V7%r5GMO!cxoD_-}fC88gB9<ob|vV
zQxPupo08z1fi(PEJKN)I-~{#~MH1T^Aj%K}ZV!jMwK60^z7><A!WoAl4#lsknD^o%
zI_;wPW40A_-E#?iFKMHDKq8LV#4_Xd^1RJ>0r8tHi{5GSyuj=t=kC*tQeW0x{oe+B
z=<*pg<kq6WH!~_Txd3DqG=rACJ2xY%9;UAwM7{ks!<&XGFgE*wl|x_RRMw>yeczB4
zZXQT(4H%90P7ESdu0gm`YECS)2QfyjGKpUH5v4b@B^N~vI4L9<yeG;+?X(Mco1A1j
zgKHq({1eYF8ALpWhjRNv@-cO=I=p*x7rjQjKs3AvR<c7erO<{LCESE-`;3Wy(PoGq
zo(xN0upE?^IjM?lz=(niaAn6e+<#V;wsyXPjl}}m^PVvSmzZ&0pCuSstI7Kwr=0V{
z>kx6kfc|8&fD3%*b8SCi{f4WI0d)|>V}^>f9-rmp0^?B7B?mX&o6u6JIj%`Cq<s?)
zg2%Nq&gp~}6wJ%R;n&I->tz?jj$44m%>R3LpA~^2>d>0N^yZnzL0)$V$+NCRjYD7l
z$I+R`)wsQVcsI}Us9CclDMRh&Ug?;RF@$7_lO$6VGH)rGP=q8TNl25V_H!>KNl2n3
z2^mTyl|+N&UBCA~|C|qJw|kzo?)&?_E{u?4EE01y`eKw0HK|{ZN~XhTz{vve{%;r+
zosK9hu5Rbo9>0Z=KPu6va1_*RUJdShH$$n@5$HLj42dkW`kZ-z&6V!L%mXaX^=Jr$
zhI(|DnI@4O*9{ZCrDFEre*VO&jhLe;Nz$55Km=aoh8qtj54)vkY|bTa5%aEy46hZ6
zf-?%Y#A}dMI}Av`hI8<*j0A~Zn}}|gZ$Yy|IkZ<Wj%-#bziFo;H9e_BrZ}921DO%%
zK4mpH4^t<b-~IxjxiPg=vZU5!BG!HSDST2YPp;)@lZlSn#K(z!=DU7#UPjS;!Q~p>
zapWg1MokyK+%O@h?M$g*jSab_r$f^j7folK5?QV-2lS&35#H(qxo!jEpeI3VS6R}K
z8kSF8n2s*b3?YP^<Bs1mBu~d$5ut1|R*oqFE9Q;t{@MpuER0F>q3_tBXM{1!bwK&a
zf8cw69e%1DPJA{Li@)_r5cvWfnsWCiNJMDUzU0|}2bSSuqd$<w&ais_YLNijr*Jk|
zk{--cCq}m67@%j54)Khc-ExR)?06w;ZW#h;t2s~;IRPR@IOD9eVI((r2pZrUV^B*o
zgi{*A%Rs2lxQp)&RD)dQVVs?>3wtK$kQB>7(2sq>=jFt*zQ|YJor@4wm5$=_&3=LV
z^l;&{z-!QU#tt3d{o>EusKEOlSx(lVN$i5TT(+(k?~&-wbCD9HEDvF?MFc!pVny?Y
ztDx$QUUb>3g2iF$#j797la`Tkc$|6UQnDGdTK+MFP9K7nO-WqKwiG_9XDMD-ABdC1
zlEnLS8(0MjXpNIMh~Sp%<wP~o@q0f`8(oWD`?O)weH*&5;1|oX>;XR==I$-96p!8)
zhL&q4V0)P??b6)MXS`8EkH@$1Z<rWUW$fTWR1fTa{~lZ<+Bo|NT@<7`2m?;`!br&$
z)MecuzxWa`HA%yjk9$!kHxcEx_3<|xUZQn)3T7_PhKmm^h-l~szuw%6v`%7YKB;&R
zb@vMc8_lrg!5?l#AbYp(tiV~mpP4^M6&80mqq^rOo;#^Uid#=WO{fdBJlu!VmMPNr
z*}7yw_7J*kn1Kbay#O}rQ_pJ)G0@qbQ_Wg~Ju{!-qjP2?q{{^Ym{W4U%R}f-U>S6?
zQ`llC0VnvsI3!sK#le|k`(S-YHID#qjT_)XT+uf2A?K%Z1yo0sVDwKl@^ynX>5Ki!
zrDxve2Hw{|td9q0;<O&sB@<!5;si)X%F?VbW8yPYfu1<>6_P3hG<o$)u}+%|3HZwP
zQ`Nt*H&B5Fb~N*Qt1}>fb|dOPxC!YkXLw!ZY5?h-|Jy&{iIZXwP8x#R<kzs#%9d)c
zjRIembt%2e_$3n<%fv08t8>r5^%G3#n%|0a%y&()!zvBxcYTEe{U#)AtvZM-twAH%
zhTb<gin_1AV^^ao*ZgP<3ZfJtvA7nSl}+h0kB9g^U6*)I*Qcd5s~~p0A*7bR#{v5i
z$S-DDW3F6WFYdyS5$(89ts9Hoi7>2SK4#ZiF{g76yAv!DZhO`OPLkGieX=_FH`j>%
z%2|OM<;Q_Z-&wrxY)C&8D3KHWj5XB5WA|$j?5J*ms9$Sw{3Zw7eQys|>YalA30AD5
zlT)~|@hGHdFU0&Yw_sug`(1igim~J&@Bh4l(;o^0#gRX7<>qGqzdaeV%Zh&8Dowm%
z?(?Vh&p@%J2M(<%WbE|;RDQ!eLRY3kR){$%PJY6B87_q6l}%irmLY_l@P_T<rKx}8
z91y=?XBX=n;q+A&WZf%uVsS@}_!*sqzljE9VYwU;U#*09R`PUdjy18Y*2O>)iggbs
zU@80maIg6wzWhjqNEU9#N6b~G;1q*StmiN(Whpwl3+MvdWmvVonNvtnq%Ftp@a~>5
z+|Yh~Y8K<i6?-1!%&bBopudTonS+Hb%d}|EJNBOJ7{fQ3pWy^0BZQ8se|b@44ktX?
zffHHYFwklVpYoy<?MAeN@2jgA8-EImmY>3u;~wZBX+nRge8$5a|6t0wn|OUs0t}wp
zivmH8Fls0k&MGR?nJ*cO@TxR*y7n1M7BgpL{1$MyQ3Q*<vf$nq0rA2#PR&({yc?ea
zTOJ-q_lC>ZQqspAnyE-Eop-~*87w>SbvxVhZ(tb+C2H2Qj<Ix~qK{)8f3SqzE4(w=
zStX2947-6Q!R)-SehIpbZGtINzTm7L#!U5g<gahJ0j+awh@;b4&a>5sF+(nM(m{Rr
zcf}v<OVp!{2RpdKYd)hJ%jmS-h{TRA3Y$lKhr1z_5a1+9m#+(l?!!-^Ewl<m`I4N>
z8OF`G@yEb_a`-lvC-BDRH99-6?EIn}uxGo5d&7-s=P(Ph<H}nwF=>R=$8}le_mkLy
zWrp@XNrk%NA+AyCE$38!moacnh2nH=+Fqvw3vPdeEa_T2ji0a~T#@!3QX<W^-SFY1
z9{JWKMFR4z>D(wgY)JTv&7Zd5xltx0_^~mq)n+r0UQ@odkM$hAfAAq!eL$+2;x-#E
z94oN~9J-G|-`+m1F1mxadp8J`MrAPdaXI468)*4V3O!Y1n5QHY+#j0p5sA~m<XRVg
z=L95!7}6NeBbdC$m|N;sjnitLfhx<8*H3wb@kPsE(`IdQ!Tlr*Ni~B}F`({s=88!#
z#Sxd(iP_T<?nb8w_ia(6G4B7MV@L(JxVMvKQ%{H^2UUoXu`<<QXR&P;r=qRrFD`_+
zO}8iwBkFhW;rNIjV3^0w9fcq9wYLV@FoAvFSLjgjsW;G<r9>TbWBJe0Nig&H6WHy@
zGEzb@25+1J9jtrqZ!!&xFcWg5bO}vj&rSC8beu2`+%4^dTYMO2<xUq!i6e2SPm`{>
ze+0ees?x(}K0;&C87{bY9qh7}A@1+Z`6S6jAi@}-*)b|iZeqWC4H7i*;5+bH`3nm?
z-tqFI+PI?PhnPRmiLXAS;Ni9JS(bV`=Dx{7$9@i5B_&Cng9C~N0>m+AMni1zW=J+V
z3M&<oFho`Y1lMo5d;nuo(BHr%M0Ih^DFe9I{U(TdzHr4`hJ;Vqz3ufC4U(T_k1y94
zQ`v@*c=z}*@VseFRULlAhR`bXVVR(~1?+d*iQ_IWWPI5OmK)tI;`f_Sc%fPd4WDA5
z_U%u+_1v0ns8b?Cm#(3I&}+2u5|HBYs=@^dpW&O%103z7Ly~8U#AZ*0T;6OK^bZ;0
zZ&;p1x6p6kb3K?Be9jT7%^gnVeVe)XKju*Waw3RF1Kt|XScczn!ROaee#ZA)J}4>@
zJtR+Y$(IWG6#=&~ZNU@PhnmPK?-oMxo+J2~v5%BH?NL1|jw?Td{QRObDEgWyT*?1t
z{+={p-(4&CJx7IXvu=ipBnhlu$~x#lz^jF*k~JCMLH+h>e&QDivi`d~x&2m|uKG2c
z{M?~Kp7^HW!;4zvxt|GLxx}4usH0%p$qgu7qmE~%>X2`GQsk<?I_Y0wNd%I^c`vI|
zysmB%bJZA<88i|?Zk3>vL^$I)s`8=}8~F>b&Om9$8AOZ!Fx^87$}>}V+ckEW@XZIt
zE*S<b-W|NaFGYBsxu+5iOJmC4GBEz8L*_JJ!Y<tr{MRUHVqvODcCwv|@`K~JZj==b
z>EpmXMxD;OphiD^5Rk+5lTq+#HfpiF)cLqVwu|cKO8fOtvvm+`Rjc@7?g%gQT#Ht$
z)&=+1x6otiJbWWzKzv$rIDM}$-rd?${Od&-*!kUnw7X_B_Be2ED#f6lr3{~+_`}|@
ztT*&>3imD7lsHSOk+$xUsOZ;<9+r>!sd)wH(31r&hc)S80plXx&j9tcCY<3#BXaCl
z5tffurl0plLrQ)PuKcD+BeyHlPnV8hwzECD57~-uoVkZVb<C&ubs~6`RCAkG-v599
zj!#Dd?*5OR$C8IrllAqGwCpD6wI!fe0_)H-=kky30%E<?8jI(Y3Dc_9pjrMcuFgM(
z(`-KtWCLU3TC?wL$r;?LrA36)7K62RG0#pP|NGc!6#NL`nkFjIMu{PIzS5)yX*u9=
zKa4q+CPQOX6c;&{-66cDL5r^{EoJwiV`~ecOEH{3k&z98WisLudHv9%ssTGKrODJ{
zbBaN4u`1~Xn7^$CvxEY%_fI#xZ+{p?f^c!_=ap>McAZm?8RBP5?c{!UDA0iwO=w<v
z8=~A8b3^qg4Bq;PdnYackqXOwR(!*sO3|2D-T?<k8qnA-Bf3>Wiqx<D1ujk#v3o`}
zZ2BCIK0dL0hJ+E#UhB{N&6V&*Qi009Q$Y`_&HRdq#h4w|!*>PA@CFHgP`-VL4^Gqv
z`6qI)`*$cb%ML(Dy95S)&BZ%^tVzm-95~VX5cE&X0k<WLM?CuwS9Vwoenqv+%OlMf
zkJsVH=_}KNi@&4IOV&5)y8$`hv}j<p8t&`Vqc3H3Nygt{;4!9vFKEobUsVNAGhPqK
zWQq-;t1!*#FHAe#0m-~N9+)$n8U;3^k>?Ujt~tuB9Tbq|c4;tbg9TZ_7|}^%zhJUY
zCptC9qMdOP)MahK0#!-)6l6%3hx|nElzNtpHK$Gw$KvLNC-GD5c`Uv-nzMX&35J^H
zqeJ~&c&u53$zf5LDRB+NYk$B9mWg}!g!!ZE&T$JS<zQR~oBw=P5HBpXA=2MJp+oQ>
z7JOhFfx9WZklpP=b6M|VbP|Z<KNW^B?w~+^3s=B5a4y3wX?da@IL7HSr}JLeTw4gU
zt4D$b)uS^DJ@G<_J}o+FM)pa{5Z}Hzuvu#^ZkDbD_0$lqFhq{{Y8cW!*93k5GN4Xi
z59c=wCj|)|T-v{v;gB?Q3yWOy8%Mn39IeiAS&V<IHAagHTuWRN3zVt*#<|=<FL~<z
zpp<t{{w|*K+7Bm1HG?|CDz<-eg>61f5Qsi-$g=~RGEB%}sUom5W9&HJKf=VgD4d~_
z&U?7Gp-+Dm9~UZ3KI&AWW?%(0)vFWD1vf#cc^^~{SK)QmJKM4JBgjo<nY!H?q}aKR
z$D3g=aQr+p-gIZE;6#?KScW}P&oS<jBuUl{<#LuYH_rDGa38oPKDgSD)UB1qFr_mX
ztjPG(7O$}|f-$43bZBGicD$t*&GJm+vApagckD?j{`#>I%@3V{zO&UlKiiPLb+#Z+
zvo)y45lJWs--r6?QE*)M5@wVOP;;(2ogT{A0`)g|wd-OW%eeIEXJ@eYm=T?HL<DcT
zBxs#&41emq0hxTS7aP7v;iQdU&`+xf2kS%O@R4c=OR2%xDkdnux|!>>SEFmRhah+L
zGq5i9L&w`!+1$yJx!=t}^ZHkaa><8DeJV6?-Zfs<WB`hdHVZpW@F@IR2@#oXxI4&z
z8Z^Cy+<-;6IBgobJ1sz$Z}K>#{u9+ri#V@LD{#*k&6#aD0%+oiqP(r*%x@3SeO0b7
z?Pn@Zdhr*0Zm2>1@@-iB^*!ff$!3DhYcb;QO^(ZX%>1=`d6$X=e#HDDOt2nKAJ3`B
zlvq=GY>o}S*8YR*N3hT0{c%(rqe%1hE<pO0Bm6N_W14rr6zyYPVxUbg%J;nHXO3by
z@z8$GeO3rRXTY4SQ3=C{g@z!tGaUmuGGPT{h>M69xGW9j7qNH992+GXG);~qX-eaS
zAbr}{o`4q9Wa-%5$04;@3rf@(vt*zW?O4CM%07~>i)NXlE9#tk%|d?m?pYvEIm;{A
zNE5-lu`FBtouAv^4qJ<~Nc_d!XnI<mIJz)D?x;HST)6{u`k&&H+)iARS`B?{UZi6w
zOGhfQ9@3=?;s=aB7X4O%T0B)G$L5T|pAHh#$vXnh{Bsz+u6^f+8h)b7%@l5hvkl3R
z)q-2S+9Yhb5~hre1IP1c`K4DIP|zNYS!srZfhIwXW8J%}j1Qsv1AHzTqockdc3Jmv
zMfC<`V|_fD*bYHo+;P4!t3Z6xJr|Ac+Ck+|FDy;2!mn5Li236-sIgUonSV^FliNEG
zrBCM9T3^E72W5zU*E_zgje<zyJ^#Pw`D}LQQnz9L)E9?X9=x5~CuvIb6;JZ&V|~GK
zqXQ?%o9;4K?IRp5Fd%|&X0APrTcJ2fhO6{@0>8DDNkUC8XH=(79ecJyby++noRX(A
z*1Y0<(w6b@V;4b``BNBD-HpqhgrV2g>zwGs*}_RnHzPfrhMSX<VWr<iEHXKWl~4cT
zLRV`FwLy@e!d$3U*WihT4cW5Gh<x&vrVDMELwNCAs0!N0pI=dq;zMDOyO{Y*=bGb*
zkQ1!)UxVAGsnJcFb;w=&>rm`#&s#h5jCJCR#W_1b-WIw2O$~4%hzI-jz2GjlM64?F
z99`J+vvJ)ePGq))_i|(1=lk*Eq>w+XU;3F3{}O{U+G}{R>1+JuNx^qtDm>q*LK~xZ
zaYL!s(c!<Ru%$qo43|?RGnd!0_rVeV9y?Qg)-|U=imj+`cpg-*eFsa~YTWPLh5@(h
z(02cG?rzx=@ZOOG9?n6ixiJ(RHJf<-npVc;7oZM*3Cpwg!LyfIBs(>L6NF|6t^X?1
z-Fa6qr^=EPPkw<R4L)H0{2@dQAefH|L!Aj}xTW|w<}sgmqwfpxli*v}$#~8B5*oDB
z<vIqxQGn(9)nUfILav|XLbN)K$bpMy^hmV@wFfg)%x*`u3=^v5uSgoR-f+sDX|UJS
znl3zIKtfp#?X$BH3Z`EY*LAX8>^L2MyowddYxzT-6UE#;hp{SZ1Aju|FzjowAQkPV
zctse4zvvz8I6M#M4l-uTQRd5XdnAkwdx)vJ%wzV`95bGCaFESY+>|>&K4mN8CqCqs
zEH)>@8IR)htlzNKPlI^nwc|gJZlFm0rtqPjDrr3NRA{>?0CpN^k(f_6K~Y&iPb-OG
zM!7y%?mvwNUe7==`#L<H{{;MQroffQdgSqxCn%V)P8cgE;QRKJ^C|0|W9-jIe6B}4
zT$Zz>ee;_6>|kjyVC)^&#jMYEVGS1g8`1^(A8<3h2hZ}lF+1IlPp01ZIm{C(|J{vU
zV?T3zgA~zUm%|zK$3db-HFyzQ*zXt*5ltB&NZQINuI|SwNmnqOs7HqWOT&2<8YJVv
zF1VyLoJ`x50F7>oxEG<%F+P0(GRr14^G+51&mjpfU*XE~J76}3?FFqzpgEh<1U)pP
z*V}t>80&mZN&SJ@%97M)RtG=(hYfA`VnN@_4x>lcS<tleuRyr+D||3jC)rLeP-<ZY
zt*KU2v~Q0%{Y4L+mN^Js>8GG(VFs$Pce<BUEwAgYL=Tn>poDJ`s>=L>Yd)+&(M4UM
z;LSbP<S)Ivqta^rSH){M^y(yXH<)8_xEVZkmn8EPw2AFHNt(<uK%aAVLrteSU434W
z=&X8*PrZf_qwSYK@NtiL<Kts+D9Dt=4#x8l7Z&5#-tpMHtQUu@GH~@rP2%XK#JijR
z=2IfuATKNcL~gpcV0jkITW(G^<S5d6pSrO4i9DZWbOTkEW}!LbVoloI0m~PqqhQJ;
zaozMTE}<cb&&X`Vr&ceqX?q{)yY%v2KkK>1wJG8<O$4*#FroUHZv3Z7hhF|+K-QGM
zfOOak9zRy{E-fnbLcvXph%u+qro(BvZWcrvSktlm71&?Vi%S;Y#Xi|Me$Dq<FzFwH
z#M(+M{wzy3hOwD*ojZ4pIoySpr()0|6>|7Y3A*1Y$7u?QXsV_}itYd99jonl#lPPm
zRG;;E?Y?8Bi#;wH!?Nju#M_Fy%3#LgdGPLeHyVAJ3;#`RM{Ydh+5El31r{!cVzIoq
z*7Z3I7U<LDf9tv2g=TcAEt}u1I>P*rHIT$l!lkQOm#nfFQ)?q3b7Cj(dfH^LjqM3m
z1@n#;RlJ!f7DJi>p!l>if1#S7%q=ZC@ip_5?ops;cF9n$uQ8l_D|0V}W^<4KNredS
zLKL0cA*|XS%y*r%;LN-fxl*rT_-^kjP@H!He`?=DUq9wy>Hmq=H@m<+Ycl_?p1Gkr
zGoiJMdDII{IhnUc^y>XgmXT}XA8xjxCN~e^-IpccwLTn0W6xsaCOv-Ht}JwmE5Jp~
zro@ZAg9c{3W^-Y}EtaW(!=qSVeUvjyXTG(!m1(%=ni-w*`W$BTSEHZgMG!m*08?r9
z{dNApC2M911MOG_T)~ET%okzqBF4Nck|WW+O2k<CK92AG2xe>h`Rkoez@@IA8|dm~
zUa^S6?29H`@nYbVGq*91$Zz<M^~Swgo^tMGp-}htG+&^X2o0wkQ2p#nPTF%b=KpSj
z8K0Jd-NQGq=cpoWTq(ze7+u93x@-p|)z2yWE`VsYM=07`#4Q`GMsk<FfRD`IA;ES&
zVFv1`y)7ENT4bqU#RSgVzaO0&tmq1Q4KJj&!md0`qR^^I{kGhMuA~9J<WfI4wz7Tk
z^8}0xHz1<DEHixV2{d$#hh)Drq1)s_$e1XCgnyZfr_dJSowcaC)nQI>d7(Hw^DPFt
zZ|8lgcwW#S&aJe31^%h+e8svHP&T~?Mc>X~7rDT_dL>O_q%T2IHn2JLEBrRklKd@s
zi|Ts=Aiq8jYkwBvww-N^0g!~m&WtK{lr!g_JWPA6Pe0x`#QKVX%(Ybw3#N-8B7Q!2
z9^cEHNq=y>dLMKxV0j=Z2ap!$V$cX#644fiGj08_>_!fVJ9^OYV;ttC%mnqtrquoF
zH2zkW7)23(g`1wR+_@^{P6&1B!cH3+{A?$f#XiE4w})}L>lSF+s!T6B4=0AsQpDq?
zBd_hQ16~1@*vZ~8A?!1r`JxBn?>Mp<Lyqt}n|q9|W!b2c#X{e0ve>rs7A6Y<aLUtA
zIB}*88e7jX%|{$37*rLy&6<tEh)U2k4@LK$My_CY8fU4)?p50}3QM;9g=~>0)U9JK
z`I!SSv??6aviISI3JM{Aj`NXTsW@hn9LYcT6!y>9jh42mG)PXG?9tLD+s3TG95*ZS
zaft%e9*l>C-;B?&ax`v#7lmFIb*WKDGxn+W@CEap@P7=fNOuJL-b>8mXMPxkjwh8k
zjnTR2_Mi(|D$no>9;PurGT?)>qqw?Jj!?!PS6X%u#~I7fEoZHXv}+1vFP)2(In&VI
zQ3eEmw_}T5H<u4r!S?qXE?G5-Q~$65?0WpMxI%|cQ8tE+U21s4)rJ&k`@*!JrEnoQ
zAJol4(a-KMHr@#0D>muU{&pEs@}m|?%|}A~VGGo6Im@+4Jb=1=>NL*qE&3i##)QU$
z=q@Y~AN^!X3uZn+9djf4&mY#Ucx^;7imrjW#CtBvwFB&rwV?asRPYEI3w_ItXjU`h
zW<9p$8skKK(Oela-l_sDqt}Ak2TMBA@e9Vk2nF}<hr&L_lzkI$9wjD9law#Ru_0<d
zeB8#~%{%mIp{^|T7hHw~Pg3CSEP2}XSr>&D-hw#eCFp&UC39u#;j_*K^ttm$+;D3X
z#Cq=KWpjfthkbru6?CYX;T~KaBuUI<XTtTQCdkzIjLALW!Z)qV32?z1G(Ia615Z6V
zd;2*Exw!*9=GF5v)G|29DLc^qGy(M;ro0==DG0Hiox%NZ`YHtyJF@_V?Z?3Q31X(<
zXfy~rgXJaccmKN||2Vt`OdaB|aqL&2kHasar2ktOLu84;rg|7T^%Ry4UxFUnQ~Aok
zesErHN#$K{;<AnTxLNKM_I)bHu#d)&pY;?~^~=Hfa5yy1k;Eyl2XXhfItURZaoMsV
zD8q6ZeFrB%Y#C$y>RQupE}Eot%z4;9wjTZ7od?lqHQ{&GjSb!Z8fMB?KrmxthUUMA
zL&N^JBU8sgX=RLW2!%ISWJr19H3*ox1bs?+gkH1Ha#5G2V<X!&e`74VTP)+`)MHG`
zjjwVZp7TIewiqfb{)0U|IwW<jh-FDtAw5WrIRcy@TwqP#tWY8|ZfY_(8p|78EQipq
z=W$xgSNt%~h-j<N#hTu!@Tb$9%rLybs~^kc8Yg}fiax$@U2(;V{3ui<gX6~FsS}oT
z`M%i@+Zo0EGbRGn6@2*OkW%5hIdx#R<TyWx?Yw@Moy7>jN$z)=HR-s}fSpr%ar4?&
zY~H5^>ORZabNni&)2%>CPJIGBQ`YN^o&yJ%mojwfPk48z4x6=jY?U=5jdu%ianvd3
zUeW}=J`E!0VNOIfe_eZCE2Cpj2-qgw=4#@`V&HC7Jhu1+^W?qYc7&y)<A@&K^8J2H
zf0)IIGw*`$$aHXG^HB2&Hd9MZf$)#4XQls;SBNnsHJg>Fa?(m{y`)ISD?Y=$%MOC0
zq$Iy!>P#G+V?c_RnebIFdvR8ZJk9x{LWi!shhkS1VOh~F$dgzB?hjUq$7`kH;@%}F
z-R=UIk`4_IP0{R^F6{Mn#o61|gN1cIj$Nt+2|pUSKjjw0VCQ}CqFp%l`AyJrR3@33
zldz;skq$(wvFyQWXjD8XzU`(*s+JaG*X<izQgAiiFR`KnJDIO1OPcPt=>;Rh7#Nr;
zOMQb)@nc>$l+R1&hWt9g`ulyD_R<$m${I7q@DPd)-6_lsWZw8y=Q;Zs-MFHeJ*%n}
zP>y?vD^Icf#_?!={!?vQ{<j_LZr5<JE3WY3%V9XW#e{}MsM28GnqF(XgQ|?5<$vle
zZ}yNt*lX6SRc-=DT?yK_MwM3gH{q_`>g3m{&zQ3C9qUh>gvJ;0;A7!|E{zX(`RhHL
z=vEpi^|O4-rBzs<84hC@4=>^DHJF;DON^2RSl;<6l$tmJj5&@KU*u4Il?;8fMS*62
zjpK5&kKlkyG59H5$2xN@+GpI(T|8z<lf5SJ<Hs`IO8iGS{z`)`2w8|Kg`N0VqaW<^
zlxWq)T24kwgE&r~$sIo<O%_*I!1i`4>a8*W@vn98>w;lqQsFC1`XB)w2Iu(qI}OO#
zxf?<7<0rp8#heHn&J?ckWjXdfie2keFrRe<HvY6=tYzl-Xm!QD!{zASXaFKfSAO`Q
z9^DvW0s`>lVr=Zt>;4rKJp9eSvNETi7aWI(v@>w}j2pW<{J~eVZAsorFVO$>h}S>R
z!wZ~uvFx5FA1F74_nhd+nD}EswEKcssQwimILHuH?J5vD4M4<S6KE6;hn7NVC}rMA
ztw<4m-4TZyLJpysjxE2uUqB-iuE2>=Wgt)xV~EHMT<7W#=NdzD{j?RmY?y$tBfIc!
z7TX&)52BqHg++|Nq^_m~5x-flZAK(Ik##uJ<pFRLZOIMS2#ncg%$#}Z#4qj-?r__P
zaT=P`cx(o$PgusagnL55HpU1WtVG?yFy^nafWV)W>(l>*f}d%OTN=sxc=T{T@-@j@
zb7^*iW6#s28t6Em<9<2U;_gk>RG;--x-DM7@LM8G;A;7dAO5hBn+T!~#<-pohe?c2
z6FJ(HoN40OUQx){U~9#$A7#kU`ZnO4%xTz1LyQe{LoU^Xcwg^_fxr*I$EM+-2pQ(7
zE8$I_$&&V>jj&SwER=cIv)zF$IB5*R8c{Ni=e)6v48gmOJD_vQnr>phUtnwvj>k$l
zPv<!}b>ArrxH<}b_Dti(IA{^CxM(N{{>m9E*b>?Fk9g#=EQ#-QN0ZY(P!QD1XR^6Z
zV3|3;<+VNuEPlzgJ!oM6-jc$6p1C<iqq*6eoI$fZ3hK-s^Sc?tu`YBTtV^oIi2aN4
zd5kpa?)nR3)xBU_BV%x+$H4P-DkSH)0$t+7co)QxKQ~L4q-cKuXEReWDfb-MN}lEW
z4wP^a<;;!lmH}P&Zga_5n|Rsg9u#f<!1>?!h$1s5UR~D%x-L!Q!!N0jjZY&{PD`2u
z&d}qJ9G4`5z9v_hl5^Oo>%*&7x#RmXIodplVq^Lku4LO>yyz%R*tvpa8a)Sr(|Omt
z;RbByo`&j<S?H<Z0fC)=_=_3}<lD9PsMyj6J3@}b`kTz>$$Wq_4}5@%piJlri0A8v
zRWQe;DY4rShGv%yc)>k^Yl7xOB#g&YuwNOA<rae3CnH*>)CH{_aj4eNi?%IiSYDoW
z9=mVD{m$WZ?z!J6^k{(+*&FB+Z-;lRlqgX>i_=ED#y-hZ-t0IP=Uv)_75+Y$@!JY4
zb{JCURw*+5m=YOWxdAKLoyeWb<>%QOQqCtE2MP@6oX}w;Jtc?te(?$H%ze?(sEAjN
z(nQ(sp_rMXO}jSC;X2RhQ8mV5<Wp{9my0G1{F8>cbv+PtGy$!B(xG)Yo6X(3R=Aoi
z7;Q5TfyZkz{`-L|_~IdRg-jaY=Ko_z+*VV_-YG@<V-!h#M+0o0BZkexBf%$aP{=Q{
zroR@RgHPe|^w(b2leCuPyB3%8(OR}N<X{tLd-phhII{za^_1!IC37K{&D8!|D<BPw
z?W*W-2dqEmK-c+892uucc4yrI#pY|^<6X_$W`%O^P1uaTrv|olIHHq7Eo{+OCX3%O
zwpO(SoyVL`eg@1vy8b%qFIfdchezS#gGXTV_8&MY;0uV4zr`uG=Cm&>k6Rzt%oucP
zoO@jw-xjS*O~05C)oY&Uv^4;E<w0y;{2xRrS<r6%PcY$f9?qexYy3jJaNki~>SWXj
zkBt@SFf<{31=nGiloYvjK%S(Zum?}QKp6c*o64>!U>)nPTxy#qG*n!{sNB~O`&^AK
z@{B;$=J(+EZWtZU$<x9TLsC5dEG%9S2jY7J;P}rJfbJt8FP#WF`CkzenY%-tad}PK
zI6<FM;n4bvkieJnYjVobq(GONb@+3F9XXt%Ybm;b7c|~~E6fni!ara1NO4w>SUmd(
z*j?wL?XoRK?zJG=*G>a(u0{Os&&MNE3`u@4;}oPcL&WO^X!fR+bKiLv>(<EA_Q7l5
zRKZ+qyZvCzp&H!V1t{2Zflp=Kv4CGWAg#U=Z2wShX4xRreR{@Mur5ze(gsiuy2B;d
zzr)N&n@}<$A56ED;>TBPKj@kc#msNwXqmuQ7^{P<G>`L3RmiWMD^Yv0KIG-khM904
zx}~;b^4MRzpf%YwebgD=<Dd-qdxt@fcLjcPPC<`32AtG28T#s#EPYg@L;N0GMv)E&
z)<YgR`K2yxnRl5VX_kiz7ueEZ{bv}TA&Y%(hj4J4HC4PNM?FF$K;x}Gbzz<SC7Fg~
zRmyFQAJ~t{76x>A*9`P({Egc-DN~;lnc{bSxgfvgEJtUvpHcW2aBI+@H@-`dI>#h#
z)>}<_=Y=v+oc{<aR3$Lf^f;&&mGBR^VKlhrH~x?@CDUh@VSaQiq&6y0)ldmK81f$L
z+m&c0%XjA8lces)MhTDIV`q!1Y#b(`MP~fY=0m)b!8*x?%AZVOJ?gvQ|M4%n&tvnQ
z`;Uc=62ZLt$)&>Bd1KHqY&V)G#bT25DsayoFZ^*#o|Fr^IFtF2F#BFOJe;q=7<Y3q
zz&8`q4;1qauU&BC%f-w?ql}3btee63Kb0HX;K~RK+BS9thDs_@*qRCj+3(S8m=U+(
zswEjW>Mow-O{v@WQ1E%s&)1Kc&X`;8cysx?tfwJEBM<11DYmIlWGziRvsq^Kik;9%
zUK+1fYLEbFF~m+gg~wL1&RJOoUp!(8H#m1LM87p8W^Zp{?J7Ss9iv04_WQH*_FD11
z7;ExbVh=`neg#{$V?1^-33($O#>Y#C{L&%VzPgw>Z5b<2a0wDRST9-26|=lg;fwV>
z%=`HXR!4up=SjL$-X<RQYFUH3_ITkVw)4^VeaHPeT@6EPjzQ!_Jt8Q&3?q|TQSH1r
zEmQu1@rA6H>|jWOYfLB;C}DJ+KDl>*%^O{7aDR0%+$~^S?nE0Z@x_4JDVkE34Z%3&
zl`2%F-^PdU-=gZxXOR2wGv>~4Kzj*OtVm<dhKW2^EKuY&ePB%Gt*Z1avh42^EBfM2
z4lHx9r5h)j(<wd4V6Ib#A3cj;;C2nXo!*5z_1XS?>2Y)lEC8b)uJDItGafiglVgj6
za0*(}VyTVX-mR?|?xIb7mS2O~dUg8oh#iKr@BOkj7Syry39egjNox-u2hoeuh1u5z
zxO(R>)Si9|$N%^M*(+*M_izomuM`PSmP*rm8cL+DUkVc5Zi4bJeeA5}0)Cb+;0b#c
zOImcGL6jL)(J&wrbqdj=RR`S0{ROQ_BJ?y+rR{MN5Vu~Aywiw*VV#4Fn=l+awAwg#
zOO9W_Sag$QEphIxcet%cn&M$GDyDTpkcNP02QhCPn_E24lOb_wJ@~*<iOgT9LTB4+
zqd?i(C0`{A{MKsGeDNjd5B!cZY%X%G-jA{U$zk|d(FzGiBpJiQ9iCrOAS<sJ(8`1|
z@P3^RFIXSfY#NX8y*^OsyBq@_sKFWDm`Iy3e!`M+u3%;@H^I@IY);goKjRqJj2z-u
zertp#b_e$Tph|NDrZlNp7p2eGptzEUg0oKOnHvO`UoqB>;V0~q&*Qs_)^h5i?T}$M
z3q&@1A?w*?yrm;cHtFe;FxJ6N9zPmHfyjCGcHn-4Lr@tVgI*i!IMKMdP<}L&&G*=R
zXri%Ld!r>fKQkgOHjgl>dJOtqUyGu*Kls}!jMuQkgnAo&1ofL!d9UunyhqGzPHa>N
z|2;a733(Bm#Zq?HJ$VPVS8I{<)c{FnjWG7)E^x2V;syD#LU-Rz?wiF4l(qW=rOMMG
z?-Ag@HFI2$DnlKkg3*8T2(Z*Qr$cWARPwVjy?&(v++Q9L_f3DrZQsv2t-e9r^z~}=
zZ?G|``Vh*$Rl1Jl(<efH?=KLX*<P4<VJ&`0C`TVXO}gbJ>yFN-axHi0<QI7!!^Jky
zaAx^%sy~x*U5BH&v}dgQ>c%*U((G>0_Y!S2)G%;N0oS<SiuTdV{9SPkq^sC~h$!*f
zx>~@|<qDTKTn(capTx(4@t8BM45yWML&5@ktXRJi?~62vDC{zzlOgQ|=6IVQixWl~
zkXdtd=}oEO^wFF{*lL=HjinBJgLDth)wM>+?VqrAc|Hgt$B1QPbm`-1n)KJOyZE@_
z9otzS=j7K|QDwVB;8|gYmv1W5%ybX*v|k6;Z>6K^n?eY(l_DJ<|HT|XGh(U8dJ2pm
z>aI0K{BYYXeEj4yShmSQw&gVx59C9|n47SAi6!aVqe%r5*Nab$y^fDBTT$(S2N?X*
z1KYN!(p9+@B>r(L8f<)lQg;<d@>K&!$$O6XD^+QvO%aNYY!!yI9)i8a_Hb$SQS52a
z#C^Za$UV<!j9kz1$)^Ii`Z^(s*&O&*hdEsoz_`EHlNk3xf(}gl03xrme3dAf^Iq8i
z)3c6Y%f1u*4xf6At!3Xm66o6D{TkIhUhxNgS$^Nu8y#Er!fu~S%%}9H(DRuVN>=ql
z-rNX0s&7r4Bo1JhQx>GH)S)h{11-L1PJcug69c7MkUruHvZsomi1|cH#z@n=iE6CJ
zu0Xp4s_3|3A1Z1bK+&!|;p9iBuy_yimT%2MfxMe|vE?67GL>PjsWPr<x(vBZ45-<Z
zyWEc1ZII38x1wG7g|>G}xb_X>LEW($i+>&EH1D^9&%;kRbiWXDZ?fG$>O;u$JBc5)
zis8py1G1wv114^kA?ZuTV$aocFw<0;ev;B5#{@sYbfh{-*up$U6BqKHt|xI_wLEp<
zZ-7AZ0vBr+&PywHqk47<*R@!Ji+{l08~4WvgIDdr+Se=5<)<;tI;TqX9HP;!rxe7D
zP2~Lb0y5GOIy`8FD9@=l;jJ_csJ{qPo7AaU=22eIu$sSo`xd%sPsgk*T_Sndj80u=
zPNe3u8UOal=q@V|Cj43oA#rK^`!XHsarq2i_i!gJ^0Xlb{TPEc>8WsWJM$T?TrWO$
zZivl?PeXBGKW8JWNG3`(VU+zREPLDoGrr6KRFXl@`U1S!txYfOHzXSB*D!KJId;vx
zfZfO5!>hy_D3E1*p(!V^!|oend|JWvU;}DC|2;;#G9JpfG!WhTmmgh&j0tH)XKKgc
z*y&c->2VV)I~3^;3oB}8&vr@yt!OfVvAidgbI;qKVvTnWx-5zT?=yX{T>1c1Co|?G
zFt=voFmzTa#uB*(j6X34C;m|(cE{%9-+{j<3i`vFi?yi8kDU`%7jx-bTX`4NbKHwj
zG4O|dGkrE3<i-d7h1jrhP!_=Yk|dfF^Vv|9r$sl$NyFwX!)VQ$(<t6D4&C1@;G<fS
z;4s@MoBmZIIV`^$=-rL$$Ec8$Q{8ac)|OmaAxpLuz69@a$~3P`67?Sif?$#a@AFzf
zH-#%u@4^8n2o!+y3P(&XdN1_bGYZGGUqt^Wg?yzE+ab5jMUUmSoNd%^{7@-Lx-N|4
zQj%8SUVk&>7aGt!=8HVDW*9kWr9*Ab%MsPmW03HZ^(2*!a1upkBym^_PFg#Jv);c#
z!P^o1uNAs9C^?aNaAr^NIcNs$(N_3rjv~=j%>&;zHuz7j7C9~b7~F#*g~|UWa=F*8
z!<1{Om@s;PzaaS&tPLVz{C*`WyfXybM*jiFhM7D(iNiZ0RqD~Tj?;bd8ny&0kh$XT
zP~BdN0)>58G+Bepn0=VLB56Z>CRK|^Os&9(TPHyM+;DEgPc^#3_5~I{bK)9|d@;n;
z1ZFxOhvb)L-0%y=q%-RbmYwc|?dSDbSF96jfB7Mw9*XOa8j&ks6{$C4D;3qs(IrV~
z*s{f#F_xsU-Od@B+!(|8@B$oi?!dc2S8?%w&td=EThOKTg$rDuN&8xMgYLXS=+mJ*
z&ltwVBRV<xlgMwdr+DgL8^S^*FtoB4SIMc69UeMVzCIG18{2SNLjz_QPr%5Z0{Xpq
z7<JEm%>SdRLzOc)m`Uz}^nVI;{+UGd3}*coI}2FV{|OCNvo6HN4(5L?!iE<|QT<*L
zSdNgzzooKdM$mdp)Ct0W)bC@)D{Ykh){jyh%EaToB;V4mLWiZukoywM_nyIgSnPb1
zJynYH9X=J?12pM^$u`(oVoVm8u0oNB@)@%FSl%=rv>sX!mGwgy(z=e#E6O>I05#HY
zZ%w>BnG<vLOuTjovC(5TcOmvN)}C!Z$!i^8GQS7TE=Wa(F%DQJaUAg+n}bT1fYXb9
z{H7*JbTtlwl_m4On*@nNWW)KzNBiK+x^w7%Kc3rfb{}evDBzk>8#;x#GK1OAXROh3
z+&gX~4D>ZKeq<PIoYI0-^fzxbbrDQ5h`?hRw{YyO?-(~EMS3#Tp*?FFR1Ay6PBv56
z!rVGGO18vN@en5M(xHp4X%V}9JuqT!Bifc81SMBx;&myF^Z6UXYqv2>|CHaD{wf6i
z*>6Njy^ZnPqATcTaS6h8&*8qa!)Vv>dUW5ji5ouLhE}zG=d61>m=|{$RBAMW*E|=H
z$}PdNgX3_*5c5mZ3@-kzDZEn1MDqL-UfVW|H0=6<8EW$(ZkiTxx%8dSd8AAleLo9V
zhscu}wQxwLCcM9F1LuDzj9X**1ZJe_LMc&1fpZK#Wc;7vMfvD&bso3N$&e#!*vw~y
z8~W{EkCP9n(VzbsQ{no*cxn7~^ilFh|8E!ILiImraOeR{dQ}EHPA6m6t|VBN$oMlk
z-?={7NM11cyD;qcbg)147pEm2g9~4egGL%-EFF-dGu-!}{-0h>;ICb{F^Y9MSJ-pw
zCO2Rz`*Zz+E}Y$MilYxH5H+V>bQ;XSR=Hdhm#`i6E)Vey*;<_Z#(>lWsewYe2J!h%
zo+GOyXnKG#wEd_+7rilPrn?uqo%_HfyBOvU9{|1Z|8<q#3h%Bi!Z|M#iR`2dQ2o*h
zArmfx+w4Cqm!<|A{r=#XG!-ImnF2NWIxP3EK?SSSAj)q(N;?h17h~U`Ws@PE`tK^r
z-}%bvN6mu32N$^wjG-l!BSm5-j)P)34{@EFh%bzkCg~@m@wc@C>#u#n@ScbG&a)Pj
z*1pEYt-s)xRwSMuFd!B3L5w5T0(}$HV8_-{@RC2t9c+{#GTaZ`Jo*6YXWZvky-mTr
ztlzyW_#AdE+`ye<-N*!=-?*|g4vH@y6dTNohk%qe+>obEL-~7f*MPZ7%Q)UQV-aAv
z0J`knaQY42F#28?p2TM;_-DH~#>SKVoewaR?Y9Jb>xJjNU!rKp1(K(93A<_>xNUKx
zF;z*2#%zcHf0e&n{rD%~)5>}`S*?)TT8|GyU*q@?#;9I%1$^2Ta4ox@acS`ju(<pP
z>wfIRnd(15bXtb9RhFh2k7Q_7<9rA`#?F7Qbs)P(jyWOxv1{IU47Om|nyl{-!d$?%
zhj!q&4T_|2su3x6o5!7w)28m8VT_AB26V5VhWyX>K|ORVZ(pK?oBN~je>u3uv}YLc
zCYcX2AH>Zb3iQi3MY3~q1CBb(=8UZ$gxasR!8@%F@HqM~Jl$tO#_bc3I;B)FuN;K7
zFCSq7<0-p@{6K}IE=)e9fda=4*N=aGf{XDHh#0L4*^j4l)9UVFz|IOdy`E*PONz0v
z!4`Y}`HuUa*Wh)ZQe1ROf^gPS#2_vk^5#UtBlaA#mw1bB6?N#rDpT??^8r*9f9K`R
z{Ndvl#?rZMhZ~uvN~WR>iz5QK*#{OwUQjd$FZ4j#+GfT{NXNk?9eAC26C1k6;k&pN
zaQXEOE5F}k%<N}i_CNxcJ1)k6XD4u_6@_mO5~SkfI=sH35Nww=aI;(fg~!QZ=-yo>
zjy$ML&a2d6Lb??d4qDQzm~KqbJ&V?5@1f;-D&IZ!FEr}^z@Jw7)W`J`#uyD@{i;7;
zJ#`_5e^93Z2V!y73Qdyv{0=leR^%pKiGcV0h9so=7oPpgc30bequK6MnDqGuh(dD<
z`&0+e()cs#Pfg~JjMJiNzQ=HGZyQGZaEARS?tz1r6CRhBC$Xzf@-EK_cuuQEhtbVg
zaoiE4S$6c`WL<J=GUF5XB|^;^NosaJ6WmVP&=<EVVGHxJPWtc`lbyG6nvdRMU5Ysk
zEK^1Gx{v(gw$qT-!{+UgYE)1&o(~+Chtfy;aktzR&`w!~3;Zg<>#q_OuS$l{LtjC2
z-W^E#>;toRSpek*abuwYH9i`L%@_`??<`65It$Vqcm@~y+<~ghrF_{pFYFtmi5I?>
zz!x1;YCrZII-a<R>(trqy88&fG1LGXhtK7%{E;L(emuu;3n`-R7Qy*#z5~TxSA<o?
zC-^zq*?2m!9A`2{)%aNj=&t3+%|<zBjgum>wNkWu<Qs@)d%wD!Y|qtU!alo2=rpMk
z+8s3@%9q_SC)~k={zF_{kuFY4AowT2jL2Iza2NR3kUXeKlfxT?+ubPU|Gkeh*?;FF
zw+sV!J>p~L%*NDa4-lWwr!5CAf$EYX#_VTYC#9QEaMcW#|2G}qGrr3j?hBk)_7eOb
zUBdV5&Nsy{4(%I_@NTFJ3iexb=?b+NTgh>Psx?Bxk1{l*^&P*`pE1T=mx0-`#W?r;
z7chxWgz-&Ru}|qOZxh~#PtvqW8ZCgK4_$CGyAfNQD^O04^*a~1;OX)K6t!mZ>pJ!6
zU(0*!ntBEF#xXAP&2p5PC_$!Ft3X#tEGMp5gCp%q(dW%)v3s+;kS(*JI3Qkl`cyp(
z+5HCpl5^a#sn)dolL}pa=_htMJEHowCcInyzwdw|1e)Y>8)`M_?7jgE^bY}<H*#ci
z#&0&K@5ba+x{Rg&h)cE~hABrr!Wv-xqqpC<yctX2eY*jHMHisxwLJCwvk}u2-eJq=
z(_F%PXYgz?h4tFzWJin!4ea!Tu-M-ySe@XSq}h&zTWv_sJ9TjUuZg#p^#=FVUxb44
zP?xeu51h8M9|g8^Tug^`pr2F^#v5IR%!BV?Zpa{<DP!kp;Va&I$wffc`yRpE1uem&
zvEZ=~A{QBx;Rcmhzrc`2xQ>LBmzB8mxh{2_a}dfl#lcN>wjBG^8bqG!c}t5`V7=!a
zq_H01n{#SJCDe)}pB=;5Ryx7VmyaOak}*|P4OnLB8LwM*2P0Nw;>E|qh;N%Rw2nQB
zu_|M@pAIGj23&D*{U0cjN@ToZMQl8#2ZO73zz?}7yu)U4pFdlJ_A*oImHQP_CiQ}t
zlmK~}ge67yU<q2V-BOjSgHIYh-YbT^ssBLuJXKO`kS{!-$DUhKPf<4U8Ei{vgWRK;
z(5M~3<<`w%1)qJ8{GTQtIwhXD!kVD*bO9Hh&2|>)rgUXf3!52tVzR?RUL$oFt=rPi
z_SI*>YsW1NkqCysbAwprHJ_KURHWxy*^bM4IUjq^m8%H<g6$S-LExF-s$cRCG-{0C
zE>5*39vRcQWjn0NT*o?iC8I{hCv{>$oF29&$6~DJERZPU(fq0k{WF|#C?-6D;)bo@
zSak!}9qh#1nqFw@sK#Mzr-n^8z$eC<cUi|674KO`SyP&}MX$u(2t8Ub`81!ro9*Y;
zCc&sS*1eNq43e0|F!i!JJz5fl*_<9Ugfbs1+q*CHOUKI4-w-kJ1A44q57jF*X?0T~
z>-O1mGpFAJNtW&L&rRSx<=WY<^d@Bd?#E1>LCBCWfOPY#AnsX<?n+JK#2q)V<y#G(
z6gnJ|$7GA^AKb=vqw9D*aR?TOvQhipIN0N?O?-7m!?`<hWbuV3uxykhi8YYqk}rni
z41YWDpYW8^_<jmww#ZO{_AG9R8e?<EXBOCnWuaHADeFLTxaRCET;A`G@&<W)oktD#
zneq39I&B!gbth)94kobA$44p<>K+dAnq#YQiD3mg4zA|lSS|`=ObWx#vM%;ODtu0T
zh$6S&!sg|*;MLv4d(XF^qr8;qGedPUCw~}G2x48+G5dJ;)5YT4={xaa`b+dYFA1^H
z6M1)GDCho#K=Dr(ajjDvbm?2sy5K4-JHH=2UiLC?Fq;{;R$;-!n=tm7CQUj&8@i6m
z@G~B@a}{H+LByhUP-|QUjrk3{<s(-Nyfm4!es~?MEd{hfbq~w9uH)VpokeN&qYx5v
ziTm%Q1nJ884^?BkLDX^yj{VGr>|5#lQ#WO5Z7qTg><%!J`2`0}?V)YhZCKVIOZrF2
zlRlgbRX<%g*>9h4bA3G=Yd8p-2WI2D1*#Nhv5wrfot&t*L>!xTggZ8UBp#Eh#>0aH
zusYI^tXQQ^1gEmZf%QVpCqR;~+kTQ;5Z{Aq$IBDTQ-0vnP|tW`75rY+Suk|t9F)n|
zqEn?LojYO+7RLpOpWaoZo1+kt^Vae;w3YP>pP^#LC&(RL3BK$dUq1f}pIWYuMr~1O
z_Op(|e}~hfQ!b#QV=@FKnvf3t&#=-XA6paUNr6QbH!I49o_(r8rCS`J_?;5$3q6K)
zb6;|63|he6Yb$K3Rw74t)S%@(34F0@Dtj)TcXhnDfR9ZW&ktkoEN$s&IQv`|^D0c{
zb0p4T{7WNRz~*wk0T)63PAT8WIy~#1F;3Z_3XiKMfpu^jo|y9vMvTtE>k2Ipo8N`n
z`6sdNnF09kXyd+b%VEq+5A-^|4Z9;X=rFA+teaTI<t1&zf=PPN*Biqx{y&n=G%Tm~
z>%(`W=6TknnJ38*x%avfq9i246G8~d6ne;<kV;5W(I81eB}3G`*VQCVN+FemRH#Hr
zQii<O|9C(7)}d*)*IK{dd1{rx#T>R%cRY_RopaIU5OZtl$MI6aD`=}6#o1;ahc2sb
zT=QLq9CKv6qh0r*bWSm*uGUAt8^54;(gM8j54)GIiq4I4d<%j|$p`*ZPjTK0Z1~=q
zWzc)_1s}Pro8KR%LG>PeLg|feSgRDsFFgJRZN#D=V2%dV>mJ9sW{ls}Cxs4kCFyTj
zbJ%w^7A~zdp~h#nKrg=yZ*0`1Eqn>Lw4xL5|N8}kPJR`6BzM7>j4BjA^&95-s*weO
zU1;?vm7BrtAlq;0kz>vZP#1k0?X0C~;JAE#ue%P3-2NPW?OS=X-aQaG&mWYk{cw2t
zODqpshGPq)NTHn)zvI3l3GJxi%59$VQl$bKuwDsueCIR9MikDSKZ;=UO?VSWVPJGE
zZa*hYs$9ZhjgcO)I$6Y@QL4uZ#@slon#E@1d3aHb%_oMI^mUyo@sd-->b7ufTa(II
ze`Wln@P$zC`UggjHKXF!v+;-dJ?xp1fx#7(;IL{FTD|D!e419_7EKd48xo1(DlI5*
z%n>TRYyjiXOMKn#tN7*D12C>(8Qa3mQ-?#IpskZQ^=>lcph^uIYTD8Jgfu4UE=K<@
zN8ZM%jj?6Ufquq%*f;$;q)BFgLgP_r>ysn;p04PhF`ExfYv2sK-omicZAjKs=Cd|@
zhn)^=zjwZbA6ckJhi+&RNmXeyK59U>&2+?{iC5ube>XHHmvUK)&w#+>w*APljKA%x
zPNNQ)p~MPJ8nDtD;yP;>Gxa8#Zj~ZO6HJNX1Lir_=|*=y6+F4jlxDuT#aK>FU@X3c
z^QoEz#))-Y?z(F{WZ2-0aS?Es-6`%LegPNOtCD}3M^fjY%Q(M#8OoJ0C(glmHdFoK
zH?EN*7j<4hYW#Y}3X$V1*?y^2<1RR9o71};D$JE~n`==X<RSxP`P&h!L!+LFeThXN
zH}4DBnEk-Wnqu6@+-L_mNfN3M&9}Ur50U#-anq0@x%jdlr;lX!(=D<>-wR^Yhxvd6
z!^?&GxDNjfxQQuqlxd}014f!DfZLmUSQ4cUGnxCKQKK05^(TR6VlcE+?FN^}<$V6i
zyRa)xKyL2QqI*ru$gCd<q@$%5yHfOMOS&W#a(UqC(}r2^vOz~K0%p5~GbTUJJDN@4
zSI;#j`cw=(gOj;*hpo7ImmaZ*5U0t<a<Rckk;H{g#DuER5Nfxb?FgOiM?Ev4vNxZh
zpH>b?EixnSU1gjw`yIBJx<Tk189L9l5P$F%Bzb)_?&>{@@0p8z>hYJDa_|?VS*GLP
z_2F1}aIYwc%McFsI)GsE2+X;zMnc09xM#cB*V;pt-OF-0i%V?An0_j+HfIb-PwU3C
z!&Ok>VN7bS$rFtVajZ?U;akH(!LrJLXzYE8HwIonsEjY%8T}Pp3=Prvzb=uz1#^WK
z=kt&MXc3h;=KP9Hy0lBW32(bGPF{^B$@^kSyH3o+dk+BIXXS907+>gdjtSB8Lh$Pj
zfe-DUAo*&Y&^Yy$a8B9-)UQ@Rm4=6W`d@R@^XH+*>j<W|%1~vN(=NR1!<p1af$_Ow
zVXUzVIZ>oaLWh2Fg<^8Jm9c3eyA`oKb~9Y&ui|Wn+i3PE7Vc&+-`fUn*pj;gyxy{%
z*lKaykn|kR$DTp!*Qa4jzC6u8v5GO~)ri<C_AwSE^IrtrFjv)(9-90Kd=6}fk}0lW
zzn}mE10p#E%jMwo!wk<C$&*+MT_UcS1LDsrG3MM&a230R!>gL{%A86(H)uj9eU^gk
zF{|OmqRZHK%#yBCJAv~Tvh2x$)0_(F;nG)W!L7q-7`Q!@Pm@Ulr<`#7wC)rN4-CW5
zF&#2!`xgx#v$<PdlGt9h<^wXSG4#q#s1l8bjC66vR7RMf*8&?R{(_VxZ^4v#7Dn9G
zq*G^@QSYjwLTgVM=<qb6`7e~I>*0KM-@n86IxghGB0|wyvxtv0iQ#(Qn3JwMCm6@=
z3`iTCM4y>oaPJN?QvQ(g<8lpXg7``}Gh-wTdQr*gq-SvuEd#B;%~A4=1ge>zgQd~u
zFnIAj_V;H5)%oHNcV1qFCkK1LvkSR7nvD6h!Ho7qijz@-bWBQNIhw-{L{?qbd9`oP
zm`^Nfs!PBHPA&c=`e^BZ(L)PTZ#t22cV9jbWqrqy@oIFJzZ~_ta1{KO2&k#G3H_s5
zip!>p(;&foNLGyIir$#hMO!P-ctR%c{farM(@MDs>%M~I=?zd)Zb~b$3O(Nz<Bcuj
zu{QM$-}Xe4{3uW$PXhZOee`+GTQO5qsC!*l9#h0w+<yUu-fQ`YcILEGJkLD{Frr;c
zUx3rvqcCGwlipX<qE<(4@W&KJLQrARRKcM{`<P_|u%Y@be0cZ~LuO`RWRgAKc;_-q
zp7Rby*Rx%N<U3fgJq5GtjJU|R(Y#lNBo+BRf)<m{TwG8P7r?v&SsL%bZ_PI_jCutA
z?EC%U#|db@xdeo_TH&1ID4Oled}n<jqGY2*@a5|Wx@N8}*)}H`J(9w~K71}lE||qd
zZ<+-qGUjwtx*9nv{secoTjO>=6Vl;-mUH}V2TGD3q4)4~?CmV%{_0v0vy)e`|Ca%s
zE1^%#UW|olKgG$o0Vz6=6%UyT24qW;0)9IagT5Ox_{G%%S~BS<?lL@z{vZSu{nb!h
z(g4!Wi}6Hj8qToZhp*n5QscS%`BW7hn)=8JWcC%~;_hHfqzW|7QxB}d4#Tt;tr%&Y
z&fL0jC^&sc^!2nQ9U}T9?an#K9eE5kPh-2xag1dtJ)6&25{jA317x-@lCiA)MLRD<
zgM$5Wc)62hNPHP*;=UaYzl(-{XETQHZ#fe4^)V=h$&g0&yjn5iC+5ZqFf?5Ze~i(f
z8hbyX)kIG;`_+d!XFB<}&rPV~ENkA5XS<v8&3tYOa=S83iF{-cMu~sJ34V{E&WGhI
z4(393TO#bAc>)iewL}k=flwlM!T*9WXz;cm)+|T6ylfB)r{ky#rgTg}2dsVB2o9-w
zRA5T-v`rX`xkQB?^smEm%SKKxwOds9@D(5Pk31a^4MInZ1qi~g+b6JHOsK&H&T)pJ
zaKhFe2sNqZ6N8&D`*aLnS#uI5u=f!iixw{Mau&CtIvS#We*oiY@uHyQ0MYk<l}UMt
zIk<29!v!Zfpquz@93xi)Ng7q~ppV^D*Vj+owLch*9V>*wJI}%Fh7bO>RL37<N6<pc
zquf9a>v((36wSMI9bX(Uq}Qykpkd!5usxwc7d$p3CGXg6h|S(X>(lt`n|>fLJp``C
zCn3^98AN~PqC&S475FOfj(Z~ElH@5gZch}I<z-^-n%8`UC>8FnxrlR}GVs`$nV_+E
z5iS%{CGVsp$oHTK6wFcN?y+2A%g<MQ@@f^n*31*0R>b1t1hxxGG{l5e-Dsw!2oh_V
z+ePCSm%UvKRa8CrP><>C_Gm;qx-Rnxn(uKG^B6Bbp3ZU*KHT*OX=psB7wbFnp;Yb`
zOy1cJdW?HG+ne1oeoK-=<p>pwZP%f^7siH-q7u{1=&oo3D%qw$L#GY!k|)#f6Z6sa
z9r%G+Wns{er$YSAbkTU71+TI=f$JT4n?Ewef*jhB4(`_e&@v(cY~R{*$tyQ-E&0)0
zo?ih9yvA}>LjWc|PvGzsEfO)N8l66=;QLT*qEl-ON)GI6{&y1p*s&b%#X5p`>{I-`
zTO8w!<jLS*IvjKTfzPII$697bSTWC#dKVl5t25`BBQ##*vhFLVQZ2?$_P&a}EN|2+
zV)?SCOE6-oA$oonqxK|}<Jdc~vCDTpck6fl@PB&bK(HhVGz<Bl`NO=-+c&)U#Yzx8
zDFaEpDd=A`mg^6&g<Z-yINNzS`g%2U%=wMwwNJR<<+Y%qyoFym;~wsO+JY$(M$~7g
z8JiLHXoiA5jZ$Sj(4$9SVE%8EIk^_r_$d(YKjyS~`7mB@QYTmENRWqG+N5-%DUFW&
ziUv)manQ{dZT}tytuh`W!bVa}c8l@!JBV80W#Hv`60??H2f<1zN_$%gzR~@B^5im3
zaADijg&vH#lO6~=*M7$HKDxAUYa%4wn2$dh4?(3%9_Dw8VByHi=&n}Gtvb~OH!NR(
z=tL@Pm{SafPd=ji#RcHEs05TESkHChT2$Gc0k~;C$}N$i^Y*ZQ!jgO3Ja*r-J-DAw
z$tVF8zL!7UJ_o%=UK80UN>h918(id~D5$xvNG?CqCVfj@LvQCzh}x(@f2=r)gYl}^
zJFuQNoZO6ivc!qg>`~PAvK`-i*9;TpdSPP)b1k%<Mt6mGyvy@UsCqgYs{M@V;d%ji
zLiV9+@ov=oAwgv(n$fx|A?Rgq3{giu!(hiD*l~Cyy=}~+<02#OR&@t%angYaL3JP=
zEJu|x1hg`oaV6clx%5LikSv!b5=83?L-X1Gtoy8J!O?W=eaQOMPb7$oU>4}#lB2c3
zZhTBm7#=hC#BzIM+$X*bi`dWY8CfF|Wz!9PgE_EytUB43FarzchKd}WE4bxFM?tdG
z8EUU-b4#w9QS8=0dre)uA+ANY{nn+IVt(Me?`llSDMNR=y#_O%4lIAs&UvtXRIbG-
zKIoqdeDdrBlplK@w@mNBT7$!!e4r^2o%jqoX#y-9<u7c{Ux%vaMp5BE1JG)i1ZI{=
z5Wt>);{Pr}r%i5*JE93L!&TUOLkFYJ20+&NM*ufWKrVP1#)LXzpu852VDIQtj-5iY
zX=|}nyBNfqzQeTAk;I_Jm~QFw1?jdj7|?tHkJgW(SFRh9^5UKN+olEU%i1A5q=N6Y
z6X&w$vt3)IBK#-fkw2+P4!qq9*6V{Y+jcEyXfq6Ra)VK&CIZ|-rD5<>G6!?puyx5P
z+?~#PPnpW(*|YDU&HBM#ww{8Pd$PRFeRdO^af@4WPlwFbuL6&cZ(+m_S+Y#<44ql$
z*I7)P9GiL+gnM2>i(jG0{ls=IS-n?SxZtp;@yQQffjvj}E;k@}Ukmh&S%xDq1J9J3
z(|wz=;G;wVI@(CXu?*&X>DHq&x9gIUP<IfF*J3w_y?B1M0y*e!Mm)$z%yCS>dgT}J
ztMCSf?Gd5cqDFLirHLIDNBDRPJ<>7%2sp+c6xkQ-z-MR8=m(46FnYZO>ASE2HQ2o0
zpVyC_htA<;eMxf6g>^!_lkBC7?%<?JGBmdB7b>V|W7?b(%-LJY8w*|tduL9Em-E>k
zu;DA%_sznVGFg7ryhNPN9GE4!vp|7kT&s!(?y}@H9KZiEp576KyG9w3nt4&|Zqp00
zCoHHS^1U#iYCm&zHlg_1uizhcpR2on2`9}T#+y#8Pv4=zMfRm}dZkj-?t2PGK5pkP
zKDdR0w>B}?$W6XPIt4-+e?hIbEFE!Lfef&YTtgva%IJ;~zUIw{*5rKH+3bRjA(usC
z65peS(?+~l@D5bM2k_9krC7<jj00XZ;M`k<#kd^Z)@FeFJP$6YtV_82;4OG{Pmw0-
zs*@{p6ggZhAXSq!(X4bm-gt8!q@7Pf>y$fS)U7~tG_~lS6$0XN^aj{0SElw)Cd0k)
z;c(IWE+ofH2kE`da9vfJj8Kv%cFcP-Yoii*WA~8lRt4nyT5~e!aEhynXvE982E^u}
zG~ICHE7q<VjpG)v9AUOG?VY4U({~%waN{iawN0PyCa=+*`@^|J{N-IekE0Ji2fOXx
zqp-067W`5t4I0|yu9-NwSg%8Ey;gH^gX5rUWf{z_V7!&^9T+`!B@TA5cdtDa!rd1i
zL(&mz+^4t<zeY3mJL{oDu0O|VKh!4y-Jvk?l_|+x{v0Qt)1#|$HE6*o1>!I(5d!_c
z@cz#gc<UPn@yp_~@a$AFPCxJlC#meil1_b8vX2Lsf)38H2ZWxR6zSe&Iz%fX1hmF4
zM=j;wI9mNSh7D?vqKCRfI_D({{9fAoZvBlGVwPAwtA>l&WJ+hWJ;&p%%A{{&F3=NK
z*v@z~KSAGs>J6l@j#Chf4@P|GYeY6Nw_;Ml1;(smyqUdeP?q%<y=N@p+f_R;gz;tX
zr9NeMZ~=31PUraCQKaz8BFyb*<8srk;@C^s=%95E1@RlEs?58@`%DOh_Id(eQ<>x4
z@giS%x)0S>ltKgZm=s<Iq4>!U_{LL=7C1<g1JaI|kgh~e?b0GGcClR0lH9zd4-kr{
zeE`8l4bGu52Zk75Bil}#<{paX7ih72@hTm9xWR<j*6-omD*oWLYD1DP_{IlMzm84Y
z`*2IsE=c>LPFp`_<3MRX9<i4s%B^xFd`vf3oW2Y7`f7Ajq!bA;%?10S$DD$n58H8U
z=UPX;W^><9KG3P0SAI`1rR5I}T=z$niLn@bn>jqrKSq;ewwG7D#BH%=b3xlkJeAL!
zreX@v@p&|OaWB!N@(+YP*P`y`>Uiyd7B#cs+3ckOT^eICpE1!x3?6~pl&d&j<2mR*
zm!<Z}Dll>3HLR5k<8y}wc(s{f&~&8>X5<`TEUeLdzp5C7ZvVg;&hJCN;vpD1piJDh
z>A-R&Ydlb^hkUCfkvb(!Pqylj%61dld%GCFsxw#d_ft?sHo<Y0F-<<{0zTH7XzX>H
zQyfUfdyLU7v#Nk&h9cs?oSTBR|ME?a-Hbs~#Mz&!=L#kY$cFHDFgW)B$joOQQ-wm*
zx5<Q<{%o*_{{m}Ohq2A40KEeOc(vlGSi0H^FSC7;P4QK@Ypz4=y4vx}UR&(G&fE-W
z$<6w83_DuB@rA87xn9RgF6Gu+=wGW&L&Vwh_Vp7Ov%3zgT9Uc&iOo1=P>tvZO4ID;
z=i$(veAxYSI%XZsf}%H?<WV2H+kB1%*M19XJS`SC)YrlN+qa?PPXjk6>JOH;xI($5
z9Yp%*aJEsg5F2PnWqv>BV+ML~#kxfFoppuRmyrM$r^}Eu;w?<Acz~|)MQ}fSDju0)
zOcFLLqSH7Hu-bJA<ZV@HORp+lIODfyg-C_8l=KUYJ+p+#;&FUluP(jH?xi3#4!w8g
z3rBWHlQ$b<!TOaPm<-&-kjPgss>zhPq&?wWlX76yGK9NvEOQy#0dYlI=z67*?QP76
zUet0_2@Hh&*G3ZOZK2rljm<@qg;RCPWB4U9Rrs^S0~;%cc%FSf<rrT#M&>lT=`}#W
z_XAKmu@sBa45@V9AUN`ec;z#ybd0SY4LNWd7JoLP#wmY=%iq_5!1wo*g3n@foP{ab
z%W^**d#1tk>o?Hewg;uYJ;vp8{BiQMd<glS1p}Kq(Pa1p<n2C!FP2@#osqh<XI=qr
zoAw+UAO7OT++?@Q0k%6(6GQREHz3P47GADrdDZz#aNFQJ2u;2LkM8DU!ueRV-DAuN
zN2ySUDJ@VpB^6|(y7=sMbGR<0d~no!g`po-qSs$J@QxZ3jd{NtlR1_zH=2ZM3CB@#
zO)r)oyMoP)Yp~EkMO61?J7m91;RC-s=Ev-tj^_$4;i%V)&9R{-Z~hX-&b`|P{sV0O
z8F!1%^@@hYz4F*G{GIo^?SUSHm5fg;&5gA&Cb?>@oZ$7%sm&VmVKDX#?^-$lKSQ*r
z`G*9So7@e8f=zs;xjyl>-^dMrd4LWPEFth+gWiC{_<X>W_*aMWB2VUZI9mYQ!cRc<
zty6eZjqyVLKXQ%}UkC--@m%AIgK+30V|AztMPPUo=8V>-(qr?WzMuxyXGoFNwF3IP
za}yL64~rsKyys>18bI>m5g7XT6yN?#1HP)~V{Mp_bD3<73X8;1!*wMdWSNZT@(<D8
zV;*=%I&mwmB;bFY0@`}zIxds4AWz>&lc<C{a7DQYBg=h3aV%rpq)g=9gF~Px;sqR2
zTnFE44XIwxYmm9t%S+RGXqa~m#Xd<=i>F<%+W!<bwJgA(DXTaE>!MET)Wk`{t@!vB
zdtT>X6n0yVfa)w+#!ijkE_w~Ykz;8Xl_g2jE4w)F)ziTJ;0lZo^8v$Wy3{s4kUu?E
zg&O*?^Ddc;o>{NBtr7Qesl#LJHE-dQt&>EB<92gCb4SpQ++*C%WgfWahdxpH<jkdi
z)WrxpU9i_0;%!Gq;fQ-;^xpHa_?3A`jd2SG9UPwelHG?}jKaAhFHNelG6Zb(qWShq
z2DGK934cx0Au7{*xaEI$;Kn2w62HQj*xGjRQ%9(hgKya0$Rz~?mIA7wScmnqPGO7a
zljz3Ur|?+Yite9u8}DDrLyLwo$l7xT?bb@sr`J`;<8M8<EouXL@8(5<&_BX4hlcRt
zoKrCA&_<Z77Y{8vuL`{v6>wV%bFgtwJbz%d4cH|xcB*kazcT7FUT1gw5UNG5da*s=
zxJbOW-wrf4>ypaGWQ;Uc=kGuK3f{d%eBmZHPS9Y&Z5T4A0f$Pk`_@)aDLc;99*u<0
z+u||!{0}I+^-T08YbmC?2thx~3oMt*kf0?aM2U<!*)sA1C*X3Y`nL(eyY?Vfsftmz
z3_ZHEzYX)9bD@W28yz}lqMmpIko`X~VB{|Rw90}sT4>Xnwvj|~*>dJO4HXSLyhUS6
zTi!o_=YOVMhuN}^ptzzC5?{r#4sABRZBZlM9_qa3B~x0wb|hUr)Qqz{#mU_>TEwZ;
z7LsNUA>2tsmHhQ6(N>D@26RcBat3(6ypD4nl*x@@#$fq=4oBF@lD#$^=($+|jhEI4
zmwpgIa<&yWi*;j<wQJzgrDZTrjk&)oe&wxU>^}W-)=>WGBfsXtRqQCL<MvK6A~FH`
zP?9P^MX7BdvfYCcvhs}m9g2n9c4J9^3=Jg-e6i>+j#00Idk4Zm|7t7lOsa(B+mE>*
zYY9*cFr?2WD$>e=ei+xUNINSPh;xkrefm;@#8zCu#x@bZz$^+=3bWxu^HLPJ_wrem
z$H7hdC#uY><XvYRg+Tjo7*_5EtG5Mwx~7P4d7#f{kGA2QyUK8xttRO&bA-N(IGCU;
zLye0mzuYq&W&ihiIdr49Sr2-H5f^n(g4U*%^5fbWPc7bvn*C%w=^f74x!jNhZ+-#|
z?BC?Vvm!^GHr}`W181x50YA&uqGQq$k>i{bqOV>8k}e+4iz?jEVTCkJF8qnD8-9RB
z_g-|*G^T+nHJIet04=@}*ma*}qh7}HiN?}&{d-w*aH<N?OaBUv$ztp#uSc__zu->i
zO7N~X<y9i;xj4q=@mQP%yOn+L`9E@0i@Ao}&)>q{Vc?TZ<fwS+G<2J=4SfG7(1gp$
zcqu`Z#?Gq8S{E^{r6*db;5Pwm<hx+s491IkAWIvbY7?yw?@;C90gjB2q{bayTw9DK
zQHx~ndy?LwWW9%+@tT#~Y#SB`o*e>?Vd}8%GLIcvPM}lw2z8b|;&)w;B_vUt<S}=(
zre-|ORh6SzF}GmsL^gA6{35FM?#J2}8@TI?3)5vW0CDs7gG;g`gdA|fd)7Ue<a8f`
z9*!4!uIu3iUJ>>S8Al|*R)IOofw!LKjC&iksK`r)*7T{68O*KjcvVd(_-DJYD0c)A
zyzv(4>zIJ!j&Pw*B;(b}{^Y7;WFR`4@fYqK$-Cq689W!h<ctq(VUFP(X!+K_|1;N|
zrfm2O`O$BoM)5uBjQ+rX*#8kyYh$p<s2r|elqUzegeZv15SAO&aB*YCLV0H=@ANkc
zekK(G={F}<XTS3o*go#R2wkGJu?R|A2H;brDp}dBOKxS!6HgCC9JAd8Wl|5b?6d?O
zv5~Q=*uKN3p%gpS%t_uVDH8U?oQ(PK0sObia6w|9^OiTQMFZwrG*Nzzp_~7Kj*Av>
z*2I#2VlE1GrAO%hCjuIbWQfi5HTbS70?l|;44QmR6eUok3(%0NJ^PN^7b-J$m=s+(
z(TLpm(}c5@>JzOKuc3Tv3!mz#fEnXP(uA;jlv~X9W4=9bIs5~26sE)G6h)f)&V>1#
zV!$OLlpnOaj1R)vFmm%dZpr~Ano9LCKxqsJ-OVvBsfphmdkYHNPQd+hgK*hWhor?z
z&{OIfRA9c2FEl(YO!?J@S7#cM(*?_MVWTv0TBihSf9*nldBW{~X+pEl&f(*}T<3qZ
zh|!=)ucj`Lx`Ly}v74E(9JPp(q1W_k@W?wo=ARCMo{NcS#ogmT)f`nrq)CWH6V%^-
z4Ikg#K}nDOIC<|XywG+5hpSkZert~i#-@VPu@ms8!<;m*yO`cQDcU!W<xCVT=#t%9
z^p)p*RCzmz>spqJy@@8=$TJbxeNG9w&yN7d|E>tvhgr~-3q}ydwMBSr;%I#2VNSZw
zN})LR!-R)@U?OuLNM$t&f^_)Sbv@Ada}4;`cBB4@&Fq&@f!gl4!i#@oU4w8}4B7bu
zyV~<XcZLBO&D;g_w-zm9nZv~yx-@+#3fC>Lq#7f3;@JTMBFH=@bf0ccz5Q6;dBhNO
zz6e9J;@MDEpha9hu^Yv=L_T>Q<EeaQzQ(LfHa}|6T=Ivv-Z%pdwZy3eyAL>ox4~;(
zpAKHn<VtRx0a29~KC65QzCAa&X}uMg$T%VWrarjqT^erKoe$5~u|3M77~a)forc*M
zk<fxyTp0;vSxE&NlTrf*wuZ3#W(kJN^g?bM&p%tiva7$<F~#{Pps5{1vA&b7RS>^z
zC=)a}6KWFcg*lAVZ=@nmH<~eb%R?hlz`Cy+l<tD);SgAT5J02$WH790hA+F)pyhH4
zCpeuV)VspN@u=H4;c+>*oovE$yG@AOZYkz}sE1HJO{{&yJSj^&FyN0G>m}Xd8cQE?
zrNg_>wkHes#*HAWT1~K!yCC{Bs6gK5OVQ@j%#&um0cdDD2IV<(NBE2AKgN;QX)0!X
zgeoZb?*iWW(+dip&Vlc|Q<zHF@3>zc=Ti3!5Br<aUYl6n=Ra2*bj{>;TU3A`<_aHm
zUzh6m?gJkqXSh=y4vvn}!Y!T?;lXcRqP&gqC@Lml%))9=3cn3i$9J$^(kKivi3P1^
z%p;&b6<1DR9mEZNqRIKcuyF4P*7vUg{wC|aRDb8mHMSGjq(Im1?}QJz0%~6~on`0W
zLF~P9lwthB{6#wSP|aubPqXKn>vi!_ViK-@Do(5Sk0dTBrMSjGK>Dw&fn^;=MC9`v
zm)muKzU4Y_H<-gWS)GJ7sW-URs2v2?$MOQfE_;u<7{*Y30!xc_V8ohc9DLlvd*&<$
z|JgeHzl@_Aw__#fjNcC-jv){@^%!q|w3Oe!Qj#v7UW<D*w1|7r4ZdaduqZ!ElLpP?
zgi13DLGNNG%#CAPr*T%e|Fs%PoT5OBZ+(Q?kMB6QKvkG{UYVvo7=zJ!uHvr}bMgjr
zLG+yEh1M1E9kiGa^x6bLoy&xdrLTmGtwuurh3^=Z-42t+X<+$fDTq0|8=uRaz}^cZ
zVE5TSxGU3$<hM1VD+$10g<`nuVoEX|J2NNw5%e?(=V#ZC#y@4H=>7hoNUqL;K08+j
zGgm7SKQYE4`fm;n&9NeeL$z=wTa8>8GAEI?E?{>-n*?=A@Dukd5%1i&oB_LyFKze-
z{gln=%8PnL59;CODrHjmLP7Lq^9K+Yy@viQ2`rqn9<?3SiB3@#th%%jFZ)Q4a+ed_
z)@7!|%v=EkZ$I&BzCR!=Q-u!1Cg9R>TGUaj83k_n%&m1F!^Ue;$3Y9sdOVWex^@F4
z*p4hI^*p3#H{o*2=hzc2M!bsDXwsK=F#NF;!u92-TRXs(dL0~YsD+MLV>~!siq6<L
z6SFngt!nTl7rtUXifO;Xb(?}P-~T?;KFvilaT8RF_=TZU-0+a_IvhkJav{`=s@#{P
z&aChG^VnSoX8y3+rW_R5nM^%YIukGcVtiV=tLVS<2RApGWh>rg;dz!>e!5<R`dnWP
zA6f6n_=Fd>yloU(P5i}cJRgbQJU(H%;|%bPJIQSe)1zN1zQW6^isYTAGBqCT5S~6k
znfJC`lzrzbcWku`wK~PP3TLJvvgcxl^Ej|xVgPnM$8qng&!{Hdi<TbJ)M>mwEZ#f{
zqPm9RK;8$GP7h+a&?my>dnaLfpEsm@`T&kco(sdY6iIp8ORO}!hSif5iCM;R*#DLF
zbFV!^mG#>EF{v5QxIu!}eOiPXpC+Lm%c2Z#{t8N#zhFd{ImvdQoXMDbc>V2t^rizs
zErU)}nQ<OGH6QYu^K|JyR-?$$!^v=nF_@AC{g|>*nyxxq!*Z$h&|CkR@6gf08K;iI
z+{aSnIwWB7P$;NuZsX_wdjteU(?l;1pTu3t*{FEbkW9L*kM(geP~fIP+@$3o;<o|x
z)Qlo^+wbG{kZTx|^aJ88Ey=VbW14Q?fMGcoad%HD*xx(^?fX<<_lk4SSMn7e{p`RQ
z>9^SXjRqeh;f<YkhD63E878gPr$JF~h4Z5-amo<OS*S!}QdBlrU1oZlP$P2c@FV88
zP^Yzg01U3xrN)nb3d7H}p_5i0`gR}ZF0cNMX@AN<L2@NZth69De;VN~XG*=le-S<L
z{ReyPWT7IH`3`1`fmLd?;PU+qcTBV&^g>Eu_KBZxl6iaQF*oNE^KSU=YfgM4Pe6zE
zaV{<IJ?kJ(fG<%4DC5!34X>7_^0RaBrkgQoJg$JH9n8ai?GPNvVf^wh<KbEHMI7GF
z_=+c%;f$=W5czvEc;xeNxK)F?uq(d+V?=T1XM>^J4=6tI18n6l!M`&zF(@f^YV?pL
zebHw^K1_=QB?Xq{+GYv+7lg6j*(i~vp9TGJ{tS$)PC&P<+RQs3kG5<7!F}gXq0)%6
z=oi9cTWbskWt|bJ#A<Nk6D{bp9cOUhya@gsa~T!BsiTIE4h=3X#q0}*!9sp9ejdnU
zzRQPTwNOCU_B}xjH4Y5@84qV2>%}y^5H_l&bB-k!xrcR9#OaMC-Ija`?sROxC&#N`
zWhvWjIrIF!zD$UVddJ(8H-lXf%b?ExBW!W|%@xja0*n3{u%QLe@>d3DzpsZw@$aBz
zj18AQkiz*(?cuwJ$6`o5bGcPN0zpT|)R`^jM1P4T*fIxu2V>qgM=L`8xNNx5{uF{O
zE@I$;Bs?&laRg`1MgO_8d8LBmAkc}(b6D{bZRUQ0-|EZReySd~r0s@O8*gx*^njmQ
z!=9Jywr=q>4C-gMK&hcG%KOTbCx<UG-)0DIxFk)3DiiW1%+sZjJ>r~|YAToRozEY)
z)u0o$#o)91yRp={7{UT&sf$YipWL~ezcf*SwruU>oBf?3vVRlKS1tgf@dng%j4@Gl
zQzRow)ycKLDs)qVIt>=Kz~U9_AgDo{lI{cykNpgb7ivPTnjM-&8==ZnJ<iO2A^hCU
z`iA?X!R^m<)|F79YjU(njgcB{oX)z2-8cEbu-{l*%R_CWGqgLdfZR<+ux3mudWQ^g
z1D9Pfy+#=WmMjH}f<E}Y&<~?L<*1D334UdrCeg_b;p&+)QXt8gg?C$E;wJWYVcTT*
zI%Gji-E>LegpXX**HFm5HW}>045?1$Ilg746Q49O4sKlh4Ldz6!20+*Z1w7dyE^UI
zEWZKVv*vL2>qB|385ZEH`kM3Fc>%sfNfDR9PuSack5?9iV%oi<*qq~sEg5~nGduK&
zlbagm|40Tk`vwT(C5d$LeC89j&U^T{4_&7mK<@}|F0|$Y|AILb{emhmpk*mEiTv;z
z^NYuSyM%E+WO45iQ(Cj35JgK<z|qnM+;J*jz95IcD2jp7=XY_Zm<5V*UqM*sNqn}*
zoZ4=X;Dg_{gT2LS*mSD~>#Lmcz)Wcj8Y-IlQbvwCR{C?+{5;sgcoNTS>cHFLipcjS
z<4aVG=Q<Yla{I4Kk#kFw89RL+UnuP&tnSk!Yfl$|^XvOqldVl052lDhCHBLT)DsxA
zjB&eC98h2|6M8SDV9W3~VUW>i;i{E&n7(KY1dbks%T4~E*T=QE;IR=ot67Wo--`L<
zvVZw4A@N`~q=+i=(GZb38Pq<T(Uud#=xTQtzBE6@Y6T+_q3#d9Tr~!-55%CJ7H(WP
z^IrWolRNlBo=$XDCXdUtX!-H=pwg?)9lFZ8U5C~{$v^C#B3XrniFKlR>{&Z|c_Nhb
z{tFeXx72m@9=NEk#^{n|=<n>nmwUExCZ++%dtAd~bLYVx%OZ5Sc8ojVxe}a4f5PnP
z+RVwg9UMzSxV3f4^lQopDyeTotu~$ITvb0qOyhfy_V@`Go{k_#uHQ$SA$2NxPzb3{
zWZ6#oH>j=Lh;>{R>&LTx&!1bkf@K|alI5}afeyW>CPjT8_H!zR?U*CM=F;zT*ylPL
z3p2Bz%3}!sI!5E{Wab63)+6IqcA(zJFEF&nfO>y9%lI!^MCr<XSbZ)F`*s>qvA1bB
zTyPtVCn|By%K133YA%`^iP6+K8aVob5#6^rpK&1*P*P?9U85*i9nR&?uZzXLJ26n1
za0?Dwo6{*11=MquG6asj$y?luf!VFWK(bDw@uN~<kf}Ovult^JIci2n&oCj+mc9o2
z^=8!NnlEg7%R~1VKY%x<*>kD~232O^*=j|4_8Q}?=5N8ef$3mhsLmYiaqva_GukF8
z@?Y;85=q8+DE`451S!qv?RAw)Ft~xA;w|ZEJ{$frw?vAS5AGA`QdtEhqHzBVWNfR)
z{MEbAq&yn*o8qA%L5zs&+`zw`MwBEfkkqAXVAW%J>i=IAzdSew&d3*Gd-ZSJ**gS=
z&n@Y+t7mbap)yUo{s-*$vK{Wtm3**_7R`S*igsldgJ7!URIR~M%rE!=`l+%w`6QzE
zJuyCb#yOUezQp*<E3nik44ReGP*Q#cUR+(u95W+`!`#!*v~(2pv=4>IHG!Ojjy|b$
z9b!2V6K*d_z#E@6=syNU`0w9J9HZUMm@jgiO-vzlo=_*bqyGg%WrUq-_o48aJ2!4h
zE*cMB61to@#)&S9V6kO9R4r1c^E>*W&%Oskw|?L@4zS$9>3iJeUR^TjloUO9O^e7y
zoCFo$MZ9C?3*ovj6Po;)@dI=Oq?^jog3DL%^x;M9#%?K;tltgBV|T#(bH`wug(WFo
za2N6<q{+f5*(k`AL*qE+^X*7P1z%J6%ep^uGnV4URjd<!)Bt=onNu6~PHp?jlB;y7
zhCK~mF{Mn8{_&BfhU|{_?e738xW=Jl`bS(jEKZcX0#QfvJ(gdp;H(ojfMLT+@VI#%
z|NCi4XHC|js^3d--{NNYF_3`nBh_eaOed!^Hi$R2wu9d+Q}*6}6lq(lKsP=dK?)q?
zh%%eQk__{JD^sP_57}(<>f_W6n>)e(=sE6noIcU-^MnUmM^FdNESOnpL^6c3&~~3W
z#HP3Nmb;jLY!u}dj*7-N_f=>CMxcI$4n~yv!Q8STv|~T3326?XH|`F^IQ)iH&6BY3
zcnR-s{|?vM6hT~>Hnzk@3I+PEqDvM!#BZw!n&>3pscci4U-TJ{5e0m3<P39SGY0+N
zc=Y)9HH6##LH!Ipuy9Dmwxl4;VcnT>-P4@N!WScDufbhgc{01zoMpr;$cuiqe^y24
z?>Pge!Kd(9{X?)hnT5qp!658DhB}MKfk|^Myj-4vp&O1tK)fZmPI`lPVoNYp@e265
zJmI{zoE9ECY{h)$qdCWfGG0b16F%&Fj6SDq&_2Bl^B6xwn`OKOk;zkr`)<M(XD__B
z;1@>xvl#^Eo(fYqHk)XpDBVzpdL{@cnQBOE|9s<@y)hwz{4XLoqCpdNtjJdoCqaw_
zqQ7(&ln<=KDAvjB){zC*-PvqDVLZZScGH^>gS+G{h*d`bY}0Ijv*H3$JN^&)E=c61
zLtfy2MrI^?;yCcV`V{B?PJz!#aTr(l5e=^}A9fjIDqguEyvbZRPA)Q#a8efLJdmd*
z)n6FPA^`rfOk2OvdQ5)003}r=X`McYgPE2v?vyN<BdJVu`s=vYALQwjJ~47?LkQZp
zf8>tm$dY-t&B>ygG?wGnCMp$MAiL=tH+Yrewg%Rbv3-wX8y}$i;{Q<9-GEfaGRA}T
zb&%^SfaF1a-se^vc=Hc=y{-(XTj&hgM)6$lY-jd-%){lAlksJ)IT`U+mza*#CjMh0
zpkJ|yIW-LECJQUN#?68(TB}3Eij|1Xr9M!LVw}OGY)qcHA5=c*Q<Ja)P-Ht=i=cLB
zDGn2L`|O3nWDmaZ?q*Scw;~kZI1gJlvK;jvBP?If@^PnMptGb7smazPhgc@IzgrSR
z9`-|Smn6Pr_vq|@)cDM10kw^|!RIDV1LKIDymX-kT}~`%>K{E+7&R4?^dEsoUN*>N
zGj7bg6PzR2A{yJQNFuMi<c*00EbzFGA9rYwpqP821l3?juG8m(CXeS#E2ODzL@sWg
zav4+ZF|JHOF>H9B1LaTqF#W_MF4=!HjD%PWg&W-4%K~EM+RG&~?c~1atTW?08BM0k
zQtz=2Tu;tO8t6P0RE8gM>7H?XmrptKe60srBPFuWCJ;;64JxYi6&O1wisZIkfG5Uj
zs3)6@GA<S{daxH2B6?7NnI9ybWQ?2q>0Bh+?<k}@;g(mdZ++(^=amu-F0TzCU3(fR
zc(-EH{goJ1@CW=Byu~ZAooFpVamHb9^bc8q|LX}A`Wc8mGoQ8JFA9fV{6Vuk84M~K
z5@kPofsxKOkTq1xb{pTY(^iHqbg`ntk9xr8sttTzTY;onnpoRt(HR{)s4SldPi&t;
z%LNp*$4&<Mb`9cE`4jv1E5o4$(cqZ<m2>Ra0-i?#;d;nT^jh=>=lO)8f3_+%-u8o^
zneT9b`4@dZ7IHT_`#~Byai{)cu+jI!xR9G%<N_ZUlfXkTc0>AUQ#i%G78=H~^uGnB
z^a#u8*sl=4<A@Y|#BS2s`w?e-Wz5y9gM9QsdDIC?#GP8tVf<-n;<)!JmoalH6bCXs
z>9~8~<bE9H-8G}RFb&!&qcMct?RR!x!;;2JXqEXN(h6}Zs%(U9j%m32pg6s!kPh6z
z59s9l2xZ=W=T?V>qf$J@4ub+d<!(5%WL1e?WXjT@FN=l7j$zz3_e<DvHAv`r%78w8
zqfc#z^>9OgI=v$-W*Ml9;KOM_?%S!LliAFLuq;_&jyS%FVH^S}4H9CM3lnBP!X>i}
zh}5EF?9kr>qD^n%*CknUr_z+VUp0gIOTwY>`AlI-;t>p7vIql*f_WFUK=!dCU?8y`
zvbqQ0*)wSxYP=pi%H?UE6=Q`|AA<CQ)%>w=0i<>))18@lU^O9{@z0{|^Y8uwjm~wL
zJgr+aSp68o_ldw+Lz7@dBJ5Nc#7E3Agr8h7yK@ug!B}NMm1l)wuVskZ-yZNAz6a@<
ze>s1LS9s)(1-am3N&Co6a57H>6S-wr#Ocuso`pDlZVd|Oy=H#jr?@Iml{%hPMZX3E
zx|6x^6zmnKj7ttz!+0uoKbdc6TRLMj`N8Z$A-YHG<tE?$h7fNH;Vd(ewfZLps!M?T
z7-=kjRm|Hws(?L0);Gv&gPEJsu*LQvcI8CFKG!oaUPqD|ru5<7$$B(sp(c$ywFU~S
z--`Opra|(L@4`x>Wf)XpC>-}X6$c|$VzQ1Ed}fZA$iI4gi@q7}nz<D3DAj_4Y!ujD
zC`5Hj8PY$n7{!~OLFC8)E^nDJsjgyvwAFE_dp86p4*kWHd~up|aWq=r(xIAZX7pMA
z16Y=9N;Muu!P!P_x-DBkZ;w?aKhz4a#f=x*=(R#}By*d!J%)8ZEXj<$1~h!(LTr6~
z3ip(qMm@7^Or32=r%ccy!t6?zSEj_~3f9AI595=zHNpb+j-Ksx4tDplEZb@oPKT@H
z1N{>?32_;cTIYea!X(ZrA`?H>XpmcP60ui*7g}Fk4>3zaA$K4Oo<v@OQNiWtplgQP
zmajs`%S+IushrQ*Jc<e|*gnOwnVXx=`m@RM&~fWAFSarr$C&;AAKysOz9UAHOqFQ(
ztWro<>BZWUuQ5&WGK@AeCwnhxP(>MC5_%<_6Lqyh<h8wstWPtkR+)}*zlT;vM<Bnh
z6&;!$fc@rb{?Kp_p7qwDj!!*=bGMt&!<H}cuY@IeB&S94KNmv#RdKXjr$D2+8=%}J
znz!2cgsWL?z`WWnT(Z0+w>Y#H@6~D2i5hz3gI+51y=X$Y&&;p%K#w*|Xv5_BTe+m#
zMR;NiyZ<V7qKp3mHp?*wLaQWgEO^Z|T14`_Ka{!PtMf2!!5@BddpNF!YnZKj5T3mq
zfWkx3LNCJ-EZudQ@jzv{wR_KFzo8+Go9l^<o3pq@%r7+gG+<ZU9`yd(D(aHcqZ%$t
zLD02Gbgfu}2n0v&3l61C&9=2bW4~uY)p!$fVDUk`6lF-WV+MIE+cy{_1a7?n+dDlq
z1pAz)T)_OJ7}?>@+u9-L|8^9JHfOW>$bu*__t3!EP|$OXK)YqtI5>~zby5vLAT5a{
z*27p{Ig*z8y@uHn*xuFKlsKCH;$mn#EREZahHgC=Y%E5<v%51dKZ>@v=;Ix)IXG5V
znMgQm6Mhcsg{F+czCcadHS`zOIcpJPwK}e_;5TPm<HXHyy$w!2svwv98J4L<V+or>
z8vhpHU~4$rm4xzwEK4ZNKFy7H)*#~lJ;KS6rSOU6<y;4@LGr?LLferNXmCW2c;`8A
z{_-=RlyLw?{5X&43!8D+g5|}@0<bBlgg1<Zm0r`ux3&*pFHHdBByDJ%=>=9#e{xxI
z<uJh4;~P^4T>H2glN<awfuA!k=(m7M$1-Rcn8EG)DNCjNjEK|2xv(aiWwIaoqr|^D
z<Vb)fSwCBhIMgwQ$9iRQzDJS>+s!dy#42oTIRP2>)o|b8A;`Xw%N15w3CG-ufq+pj
z(BHxXtycALUXM3|=W}@!z{*^mou@g0k+1NkSuh^z{|<AyBuLuyGN_!x{HU3$P>5wv
zyLdHk-Q0~&whd$L{F|_DQyIJM{}L9e%!ct{qeyMVQaq#DfeTA}(f0gvPNn5E7uj_U
zg)E<=HR&(9v>1Y;`4!P?RHnfn+F?YUI%#5m$No8EM8Bk7z%Y$q)L9eCjknY#gYPq8
z{vZWg=Wn=0`a2pLzJbt<O(;AnPW8{n!kDo=V6_NvOUh`*R^Ka(d96n+s%K&L3D)zN
z!#ZM@6zCnfWT<tkM)yk>xcB5KuAD1Hzo#*NM6?7oKWss@ioW8{`Gp{yR|aDL2#9^g
zL<mq)!P$*wG?LnJbtOBn<g7Q82To%C^YK`Iy`0zaN#$qD51<RXfA{VThNqs2^gGMr
znhX`8751X9-bXz4yBG)k1gxWI$S=^;A<<7Qu-vqWs~dR)Td!2Wb!BN{XdpoYPoKd0
zgibJgSc)Z+N70KckKxYk<lKI#0zaTeo@}eacSRbsw5|m9vaX|z@FxUYe*_iB2EH&Q
zK&Uk7IM~~7hd;yGG)^J`^tX>hg{Iwj@QXa1#@v9zUCrQknmzk=JQ3F3y9)j%CUUaV
znUnXN5zShZ3El#B&s}&H9mFXlKgksqiaUsyYgj0-b(|u=Sm^p&f|u0I$tkBi{9$iG
z$Bg|2OTXlx=#nDM-Zlxt^>wJrjYfW4tQzqT8;LTpf4RXYe;DshoA2yrH>DZH`0KGY
zIlESl$W_;Z*CO`+EnUI7p3^}iOhEN)`eA8jKia&ifz+GT_+}xyhqz9{C$_9xl*Swm
zD;7ZH@=D$*ITP)*LLfco7H?(l2A>YAkn+)oV0ro{oKMH1#-4c)x#J|`R?C7gFN?h|
zoCUoJ%doc52{hiu!b9fD9usv7r2k%InTSM)IJ*RzMzio%Vlp_Kufp7!7r4Kc=Cmc^
zy~xogkDLD}8;bYq(le$JxZ&(8h>Ns?ZR{3TyY&O_5^<h^`@LcJi%v+|!-L&K=1h<Y
z<dQw2MYaF9@PfZdQ}uuD!0>IK@NcCS@zys5LCd1N?Ah$ys^FVY|3)9)P`n1wW)s0-
z!4Wt$Bu;(?6tXPJE-X2ukM6gIdDo8*p|*THkH-=7wAG2smN{sqJO!-fccaH`)*(N0
z0ET`uH`RrD*dP&ug_0q#VbOO;XpMuGkX6`sunN2m7*O%jYcNpv4D4mfFnjANaQ)Z>
zT~8YzYH2ulR|NCEZT;Mry3b(T2q1Vqf2#MCaNz}M4aW3shKS1lV8r$_SSXz%k~2v~
zt?}QX{BSL&&N$i`dyVkJhS})(^)p|4c!+bXmlckGbp@kJYf$oI0$4n5hP#`uqPNmI
z&V7P6FCM=U+m<Mij`*4AwHKgtT`|<UdvpHV7VwQ)qd?()24;6X=i}x&f_fD5TQ#2m
zSMfs_v?QGK9TIW<SAU^fr~%#b`W(s@S(1n~rg(de1UazZoK{Uq0+sviSa|)N$ba)*
zSaVI96n@;ym5XKalSkZzH(GkMQ%a85zFdGBH>{xe<!yKq$ZluSy-+RA{A`bQ!Z2YD
z<Rf}$Q^qnU3wpt1ksq2>)Z^~&tx$Vp96EZpa(ftObj=4<s-U+DvuG@Ynj8K9K25$G
zc8%1c8RFJ35dInPrV>zl#uPA%;<D{V@<$3KiFo-*2*1_{i&Y%p$YNvqyC4=9vkthJ
z>1`}^%!TDIK0vDFWC&tk^U|UQSkF2M{}2meXqN}Vfm$p=1G0RN7=5t9j9Lze(cZ6P
zdEcWR=rFAZohH~}ez^s$7b#G=xi7)E$rJ2`nahl^95lRVgB$Z7G<*FEantg+u797v
z*ocuNW7TgQkO+dcO>3}xOFTd4+869tIGHiCt%Zd<ZV6>;wTaKg_ZV9KiBHH8G6oz9
z?N+GM4EKZZ&iWdv$vwp!d3Dn4!q~!Y#~^5pBVTz!if(AD1g-Udk=)@~K5j9zuVFiI
zFGQ8dNbX>85h^^2!Va-c-b_Q8ab*lB)CEHFf5*V*=QPF{m;{ce4hu=F0r`=xL~OOw
zArAUb@}LbwS;r$dDZ$2X&vDk+N^H3?0TQ`)_}@bnIz}%F#(_Rr)U8blYfZS%4HneZ
z{UFvi8PRa3?=VYDhSsQ?lDLXm?vKAXxgI7@c7B$pRih-RzO^*~<5=Eh8Fj0Oa5Ri#
z{FE7T*fQam(EH|DuD8Mt%B#!ylFoY6*FOh^{|Px2=ZCyUWfC~LQBI*|1oal$@gE$O
zY5hi3D$;ua-Xk-(7lB#$r<2Fz_V2vj+83C+g>l$k<xNG46|jco?=l}UzV(HLsOOvy
z$tOEGc?&h7I=UQ#f=_XA^NaWg^OZ?yO$_8S&O)qqBTAQPQU41)T-Bjq_TH{V3L}nk
z{+4Sv*MDw<<2fCc<)4BpM>0-ogDy;)$97`%mgMze#+n<NC)~z%5Mj>+L@w<L4q7Ke
z(59K9(T`<`Q<xEaQ<tHu)&_x9&IPcQT*z6wh9ONnhcV0L=+4{XboTtaDD!_DorzzK
z-Peat^E}V<DX9>8=t-S>Z4#0>nKEbU2_Z9?j--+#6_O;0q9jSfx!10cgd|Cl%0ZGO
znNp<Q{d@m``ZS#T-g~X@_qxoUV4v+F_{w}M6D@pT-k>;48<&kP#~Yv{J{xTF27z%X
zTN;l0hVE1LbHORGpt|ZkDyx6P(9h>#zwtrXpizjfQ}sY8;RV)6dLiJ&4}Q?j&**Ks
zhxgM+;`;w#|HD}yu=rpLd|DfY-YTp32P^L2<pr0){k~Kb@-qoWRx1#r_PdxFY(}=o
zmg9>u=1Q3|5&PmC(ZT-#FKlVY@WpJttKtL2!@px`kR5M4VLjGFBti2^mP>nm0Yk<K
zD9_F*63hm#acAI7t1bP@`qH~3lkwLI9dgdZhJ2qaAT1&pdd|~?JXk3p8ue`~r_+h=
z=E~B9k`J7n_fNpRd+>EdBG!FwgPg;_`%abeg<UHkzVQeu@?EgSu^!@6CSkzOHg3K_
zF{lo|27$MiLGgq-P#$GUgIb1hQ+;ER|5uB|eAoiROH^p{jUzC4xD1W?q{g``<#OCz
zE1LI`c}wQ4z#5mmXb{JG$_rHB2D_sqU5kdZN3@9#&lsCW&cUoVCUn{D^Q_m~i~n>m
zhnDdvRAl+r;1UEyX%R-3WJ8RrAtZIogRdX-XoSl+Sd*qtg%7uKrf+Uz{0x2SYTL^n
z++2<^fl<)tkqIj+Sw6>bu4p)Uh(90RVt#vD^wG(N`ci!qd}-u!HMD5U&?MMB@jKqn
z&?1vJ7!YksMXufd8>B6ep#kdd5dW?kJ)%yb_T_oJxax0_=kD_m^nMl3LL#txs2$by
zISN5EoNK!p!MQfA2iFG15{jIQkDM*Zmd!bgwQNV*2A$-O<uSgOGywX_^uhCWB{<av
zaf?0`fbsAw_{covMi1KYrIHEh44RH5TFgN^%Lab3EPThwCY%!=iBn&F1nu$<yg6E6
zLz4oPT6FM%(~slTv!T#6`Zzb@s2E(_`+$2OM+NEI-4eV%abLbG(;w+_C_L}N3#K=@
z{rN|UR2Mztd`?Dy(DWweYu7`8n*o*4$VO4=54clf&p5)l{8~>QtH&pEva?la%W*9*
zUp^YT&Wyw(@q=jif^=MR;{-H(=*LoN1@FDUic7ut6;Av9z|zm|BCqf@@b04(iLSM#
zL4F#r>@w>EiFrPjd0jrao6%eBE?OSMm}*~kgFzVMcWW-j=qbez^Ra-}US`W58+H_L
zHd_#7*8PzsZ7}LyGThScM(w13IqAxod_wv%@H5fl!ZxyVb)*USEx#)9&lJ%7U{mTY
zTg+9yJ_c?61)SZV53tzTfMhu-(z&nJGgn(LKf>xJx^G%2IX=Oj#IkOH;3wfPe;fco
z!xc%+rfyuq&eWR<l<BF0TOcmbz`a@uptnkioStq=Dt9Y@Z{9h^HCV+R@~g$>!CygY
zw~sG7eHMalZ{pQgFjnUhBgumnd7`<&lrHr#BdH(FsFxra?yt?m)~iy;oW%C^o|<kk
zr~3GubMry|y&+1r)<WgH4;b98OONeM!<dMn=r4a8SU%QGIQ+IqQn>}Ix_Q{<vjHXq
zvW$|EGQDm!h>A^RX<yD2NGa8Y@dv(PLU%sj_hckC@9%(!gT0*iP#NFieFk#2OE`V6
zQ}82HgE0#p^Mgzup!d9O+{9nz^h>w}O&psBq}q@c-ipD>C)U{TstdRFXpsr23N&TL
zQ;9#zqZl@Sgxdw>csDGLW&DNYzp1RZRND`u<Lt<C4`o`TQ32wg##lPz9B({KliDV?
zpoYB#{XNGMBhPd}&6g7J!#pnPHRB{OZ(qi(!yt`fozm!?oZrPhN&SD%_(~mP@P6@!
z_ha3K+70sPoDu@wC+xVk!bYrM2|lmlb6lgAJtn5qfjrACMU658KaFB8hcUi{R@ox)
zd=K8&iDdwL#`2CTDQuP!0U3Q+@Of4oN}pd42`+~7*Q<5N9pN0j`sXbKu}*Mf_#lv+
zDu({KO^CBu4|VrDSUrCNb|to==IgUCVXqA_vo|BorZ#jV+e_4MIn8&axo{PRJ1}a$
zJqY?9bL!HOklmk(qwd9lOL8jYe0qx`6>BlLs2XmuOi+SqI`=^BFAh^NC;uARQ^CCz
zMOptO!K_$4GG7g0E5k1MI5EbTO*CKfM2>*-RXC|`Ph?ofIydwj`d>o0XnY?7<)&k8
zKp>3K(WI9j1Tug78|H8g;6I5?NQ?Gs#=!SxZl1~POlAx7<~3q<SuwZn1!FdOClnn}
zV!6+-F{~rqhv%FX$oa}}cE?))GZchuSLlLY#v2mB3@?7DN)N==F2o9Q8<QtJhmPKj
za8qhPIut41@4Sf8lyRIm#z-{QvjrkHhr!64dnk-M2&H4)nIqnql)tuPcX|_2c=`<Q
zemjrr)^3A`FVfMg4sa4tC)KMKa5*`%A<&EQX;t3vo?Gt2GkfMc8f{P41{;t{{Y~I>
zxrNIOKEil5;pics2j0`t!Cg8+aww}13wJ!_Qq}H*vPC>*+MdO?*B`>eb2so*QX)jY
zV9!sxr<ii-8Q=dQ3^gD1qe&6l<9>8Qsay<SFqECG4(g#W{TLUMstdIT0~r5-vd_MQ
zq-ygWT*>A><?0(z8Wk$q8|;H;g5$9v;TujoY7XXIXEFb`3C(xP0UeDb{C?Y<#=IR0
zzMT%x`6Ul#8a{+-?K3bTl-(gejpm}yKY_kEtl!p@%~x-WVV~VAFpuSkN_8jj1ydMD
zrcRD%{js6rt@X)N`WlvJ-NNR3x^!-CHQM)S(uibv6o+MS#{b>o<O<pEbIV4I$ke9c
zk9ElU7Au-^$cn44Ea772oABR8e}EtRWvPjaF6kfi4U!~c+&W&7*vZ`o?-G3woFH!b
zH*R9m7*lvJr$BoDW$%D}6ptTZOunZ1;1@6D+uE7mX2nt{d8$S(YnMWo#=pGwhkv;f
z$5<yt{tOf<vz(%gD}TUJlk#1<Wa{ES;CrZ*ODi0SF)SavmGyP+wm-mO?>=I{40D>>
z*#)y})oA?WtvILgE=o&2il&m^V0&Uax;!s}xs!q*X5L$FtkrjL-+5AGm%SSMC;x(N
zpOkRq@j>)(J$t@=cwMA7+lX|TxPj;725^`3a%vi8<kM~wlD**(d_1R2yw`r^s=Kl{
z-M9!0{izJ0#%pnjyg9XJyuo+!dNiZ!2<P3B$NA~Y^R>4PKyl<fFjq1MTqVJ`ZMEQA
zCWBKq<wCs=&kbkYCGDdPT*82a`#o$oigo6RBDR{K%ds7pUUCBa$|itASpe2<&H4YF
zaBX55R)4JK9o5D_D%*oEskg(GpR1Wq;{?h~u%fYR0`YCb6_EY*0uok!;fksg(c#ZO
z82>{LS{37A25Wcq?CatMJ5R%qcmtBZ?=aNO$OGvddG2gzIi~ub!O|P=Mf(@7!D2^G
zd~rjKhQyRW>mVKitXG2cs|M&A4#%`L3UGtXxaE8IVxyh{gdTc_BR{kG{lP1|(|b9P
zp37qSkHw-ek80>DG{d^;XOP%e1;S%J_<XJ$8M>?mQjSN71RLVrP-p}x(Hr>%Y|pwv
z)0}vPZbpZ33t_nKJ&c-l0hS#qV2&*ps1Os_a7T_k&$P(idC4FR@yGe*=P@PvATJ)d
zUoyQ-1Ln#a;<mL4*#4&lsz%*|y;H|R%C~0{<L$OI;$15jz4QtEe)Ab8r7?!s?OTu-
z9|xW+OR8jSM&`~b#viiI@GZyz(~{Vnd43TFzyA$&lLk}q)=q5L%l7|`m$2T0-KY0O
zLFIrEbx%~{q}&W}S>FjMcm7I_E!QA^{V%azYX#Q#KjlkwPoUUR7F||`L&_E8;gTw(
z?0Y21c&`Z0>T*z4;Vz`8bxC@Mv;3N!4h9E(0j0~DH0!_Z;Kg`#0k2j-%fYL-U$z%?
z8@_|nA#3Ctk7C&Ed#wL|nPs8g;XCGT*x|-JyY(4ZE9(Wnt=;j|$uhY7>=~>LJ%qwb
zbFnS_2Cw{g5?bxDC$;gmv}C*k>Dh6PmosA=;^Xh3RwBSnk+x(=*&uSK^epIpcmxwa
z=+OLyY<4+z12^JPEr=NZv@@s}1dAuRX%tCN;Py%6ba4`d7swKCK{z+B{|YM3U&DST
z8QP=J%W|B|<+6Xb+xThho&VF0aO`h4@$Fn3?<`Ae&31rgRs*!ke8s0rWAK%faX%lt
zaVtodBaUah;2>kwI+Y~zma5CJ(~~j5cg3Ns)djTOv=U;5KINook0p-nmNYFV2*mH&
z_%Ij7)t>(pclF7jL;5f72Kj~Y)i;=Hcm}tU-9I&K?a9T9azxqs9CtsV8Uyu?!?tgh
zP+4tC9TxZS)4!?VsD(H1Xtgdm*ie81kCCu?@<`OaAcFFQgP8PM2={Ji5o<{~wlqp{
z%h+_Vsrw90Z7aF*l~b|x>`~BPVF=rv|HP0pJTzPxg(FTD;>>zBy9((L-M=1!8>-me
zX#X14?Oce0@DY-Y6$Q*W<Ae9vb9w!Fb>g8Ef~MQYqP>wF4ONz4%JZ8jc%VwLa1d0_
zHpW9*M{w6AP3nIBrlk6v9_*F9iX(3+(<`e~iI-?6>I}E0&fRg?Sh)%7a&AD^RYgv4
z_OmGF;(RV<#xbt+N)bPKi6PNfOh#APS>P~H9%aWn&^K0!ROPu2^$wE-+fmlo{e@*N
zRAtDRzPsqOsEZdrPcNz#unb;U0(Uxr;@2{B>e2WF8ZJMDuFHf26D?TnW{nQ{Z<)U|
z8hRfZlT#^gaR11!xI*PT&X;$fI}9su=Z5oGyoNE38S^4#B^6EV>_)kG#+S>SkFhL2
z=J#nU=)7kCSD|v8@|0Zc%nyV4$5_w1_XPCLv8I*xl_6D^WtRV{alc0_#N3?++523P
zx7FJV-W|sLk!e~)v*{itd)Uy^bTC~WV@~%O3CIRR0loP^i>9PF;IV<hv}*WTbUn^8
zGmCz+JJDK<x#h&ojem_fDGj`!W1-vZvzA2odkDu~p(ylW{h!Wcv@5FwvG-#>%8fBy
zo;ip-*$yWD?0gud*~hZdEHis06XLY8aNo~<H22j6uYX!N=>s{i^XrFGA&1~_u`PW$
z-JU$3B~PQqHK6y1)%=#0N{G=)<|~{JbBQO;Vn#~@pJ2KkPCDyRV-0J}w)g<+7%ye)
z&^NGTq6vmA{uiU!?l8FZIZ8LD;mK@Odhf3SHE9?`ikG}XL3}EAx#<QbU$_YcnX<%h
zOBS~#mi7C0JeG71_dvOm(fCwoNBWP}!~D05)sWH0TQ8C&38Mp%Z&jfEtmj(%unMmp
znT8Ahn9((A2BdTM0EkCC<O5+96uQ6U&Xvkj{ayl2IS$~`oDPF!?a17-GLS^ZL)T<u
zd}+phzfQ}s_`fgEwph$BYAFTqdxPZ{PQsGwTGa36U;fnhQ}EWf0c*)6sC68QYp3dv
z*<qIC%4j8W>#`~hT4uskE6oPeB{!j~zmz-YU`Z>?KXQT#o^BoWqe0AxMV|KhG{Ax7
zn}@`qxH_5h*{neSJcvY(H&!%t<3|w8nFZ2OyCEfb6v(K{k##YQ@i%%0*6tKyzU3#(
zzf=V!ZH!B8n<vS#;vnWvIVW{_%0*5(4l*YOla(!>F>x=OnKGtfqUl!*2>Q;rSR*(^
z!w8(PtP(qan9<7mx$vu*Wlcrq)Z1?bOl12s=YW4;PLCP&*<AtK6I6(|@dz$=%X}0n
zF68ZcSm)uhkXtmG&3Lx-@)6T*>CLWB=ziJ+%I`dY=z}ses7L|Qj?Y8eeG6fTz6?nz
z*GA1wF^bm{c)re>oSN_l_l|Uje6?cycJ&F^aw{OG*cR9JSd&$CY>xT72M({-A;QWq
ze%Cg}GT(Za^OJcdsr+*q{2Y2E0P6VCM2)luN}*}I6+PWnj<Pdf;isO*=%ky-byXH|
z<_qN+`|T(H-QAwXzbi%W*rU+J%fROZOQK)+6=zqzLx)u6J?}5UL_rk9kB^16wHCD5
z=RT}9-v)6NtZ!W53Mtp?C3APU!M4mq#=Vz{<ldOmVAi>EwbljyT6PwgktH$zx)pVs
zjObCJIkmWQ5etpq;5m04>Rh!K{3lF7uRV;zt-T9fCUk@CX=~8@_%Cj2)keu@<~a)g
z&P^7ud2En1s^<^E5SBqv&3TQXh3fcr81vVZC4e~P9lv@i^94Q8Ce5Etz==77=zr<@
zWX_PmL{Y66a^f=hq|?mL7hQwmVeS&a1!eSqaSLQ(W6@{)L%6%!n3xQUMdLlcx%QiB
zP<nYPrU;&MSxsG-Gpw8IW0@<RErUt2(l9K2;Kae-k(hBq2&(<p;qW|5DjS%IT}C@N
zvFadn*t`tGuUirEh|iMhr8D@@lk?&DOm!;jKL%w??MToa#<yxe$7$P2@Mi#z*B;wY
zMd$Ze(rZs{Kcv`PTn*!b4aum}{b0PWn0Hy01I5ZkXvp|%y^n=dIzN}s7*ff{{@VrX
z&h5eZb<E2hGnx;Qs^X{(Um<_hGw4iKp-bdspmxCo$S9Z%yGDQF`!ydxd{#D$WGu_b
zvG>63kv4s5BTuqdHh{ybc5eUtYv2;s4Bk8Q`3IYs^Q(U-ym=u@JpTEDH_`_a>G1a=
z_ZNQ|`$C&H?(O7-$>zKg>n-({ea7|qT4Y&iAKr+fm{O?CO?@O!qpRAPV`aZ+@emoB
z^Trfv8X}=*)o>`3&BBgt%b{<91I9l83O8B@k%sT>V7pcm0s|hQULxyVpKriE=3A&1
zZRf{WB3j;y1ba<&;vU+C2N+ZKe4sw*eQrgjc`1?d#Wz3@WK{6O&Krs&eu8f9AxJE!
zLQ}o{=rn5~SN_WxZ8p3B^AJOfTbP99HyFo8e?Ok&^hi?SFi`co4J$5Q#(b}I=$YjM
ztG{QWw#6&nL2fEzF;}|nS=4~XzsZsPHz^KDx1}L-9dV1%6kNzSHkQrZAh@-}?OV|c
zmSw7k+P4=Wc=RoB-LV_K?7z)=p6~d=QKnFspbu^RNBOUFFQKkp8`Rhzhwi&$Fzt{8
zZ!T9QJ+n+ekY0x;zo?T1V{C|Hpc0gp9ul<$KI9WqY*9MPjW5(Ppff|PY5intlslh^
z2UBWstFILqEiotYMiJ0uH-i5=#*{cFroob|t#Bol^+z6ri<Vq90`vVQ^s&&EbQCe?
zO6z85NwCL=JJsB*0(%l)um$F??gzJ1EH@LpT@qhhgSlIl;PXND%rz&2881)q$Lmdr
z|AH3~<Gh!vk161EceF7+z+CS3xEH9rPk;e-RebhNGdi^C9Q+W}<6fh~uq@#dbdB2q
zM|YZ#FRvAdU(Fc)_w-zFmrs%mVs7><xm_@#V<DdO=)tvBNoeYs1Z&uR&hzgYyyqn#
z-J;8IhTX^fp8u5y%iK`4#GD$o9YrtU18%vlHVJXchE;cxalXY@Y<(MmdDAvwefeYV
zRNh%AO&Tf^&%4GgYLa5l{Ym&i#+qDua2+dNFc$T9724fakDo5v(OnCiP`39em`7cO
z&{H2#Jf~jdyE&CJequ=jn)RsA)t|3kkjQ(x&gPjmU8FwJ3npzeB`@6MiT_I}4g}WW
zYY%IZvZoc_y0g6ICUv^>bqxL<oy;;u6`WJ5DJT7_&XvyoSA_AyP=2r~8gDlQ=bMWd
z4$}m_O{;*&&vvvkHy>8)i$>|pM2sWhsGm3;7P=^rggJ~^`8S74X7_-Rg^8#(=``{m
zZ0J8KT2#VjkX2*+apmq-P)<@{{?X~ot!c~mBoBfhQ#I&3Btx5@m{5m`Us&pyCvsoE
zP$Dir#T}iHfME;ms3+})vh)=Yb;=C?1{;xWixVIrE}m0=qd_Mg3rE3PwIWxGRy=uB
zho;={0L`{$*lV-^jbkogPzal!wVnZ$%pz?2(85h{dCYiV;h<Ymj$%EbNX1i&NJo$4
z-Gd7FruXLLWTO^wp1^jcdefkA$^$;>+YlH8%-3+^2MSy+6xE9xP;k#zGRo{73TM_y
z{MM%PBHM$|>c$x2!2Vv+J_r~;1DmhdP`wq(M7_!g-P__sIa$M5KIDJQh#vmR3+A7+
zw7?wicl;C?#?ZO;h>O3XPOoHZ(0bL?plbXPbOKM{wF6aX`^5y`o^eFwV<srh=yA(W
zdka5A>p>IhFxxW|vHAq$I3s>hW<0xp(lAXqjlDyZd0|l-7o~p@R&BGTRa3X(>c{GI
z=Xw<yd5Fi4@7NvRdK_;RWJe{8-(EV;3KIW)10CT)NQxeb<BFB(-d-6Tk*`Nje96Ql
z3B`ex`b2yFaV~ptG#rzcVEx8azVvdJ$XC6MpKAXcSI_y4#<$+{`r}7K!toleUm!!b
zt*>K@ufw98w|?+X8{!hVP8_q;lz1KA0()kg(W)cLbnKe{@hz5NLtYn5zn6uj_483}
z8uOMc{mDCKtpOY%0u{nC#huz@)EM>|2}3^Kkahnv&oJl5XDs!f0y(*W{?{gBTrKO0
zn=8^%l@2cAvLTI(Xa$7>_QWPulS;0YgK}3N50Q+~Bi)Wuv`ne6`2cJj*9D)P)d}at
zn9P>QEqED^ol72pz$ti4?!W+8zRv*vp;usUjUNPmy9Cqcvvb~sdNixEqeC}TF=ixl
zR$njT1|ABirnwm2nmmEZy+a^X+<|F2jA=a66x3HmV9J#QK7Zs3a1fVq2^nJ`LU@~t
z$qmJW(+tVFF@D%+IuD*C$`HjZC1Cs@hAW?R58a+$LJ}`Sasw|wS1aKjwiiQJwiiF@
zpDG+PR*gs#wnJ&}do&FwXMOi7b|($P=-Yh|IcFbUUBhy#7z^jE)#&G^_B8m+d$7>A
zBXJA0sF{}q+kec1A0rt<-FYa43_1_a#jI!9*#|TIZ-Q~%3NUd<#gtLjRJc{2cU5`M
z&1Sj5jz4uc^t3Xa8Py2o*~9TSyCeVU&qD#f(`}DA<D9vP*i3Oh%#~-p7nReP<}nCY
zu1bKyn~~h0Yz6Y8avQs=Pbdl+vYXeo$l)49H{nQ>JZ(sPfSRjsLb=5Oj8jsh*0db=
z`-Gynm7PW1WI0>i4dCjlj(2|<&|TNx^CL&xX6!~0%Opff#ywZ2gJ!*F+|!A?_N0xR
z_J%k7i&7clH#|+$TKE!|>apG1J`O9wukmx_vvK4GO%gQWCqMC+H)aI2;5NhAaPid_
z^jFfPswaEFW$_c3|Fi@ADxdN5+s;7nGsFq({h8so2zqW$f>A6FyXEgA$e=&aH`oK0
zag4#-Hi7ZmR$*{)G3-yd1Yej#+i6z`?|b7ib`L3s=esmW*A->lyr~g$ZeN7z@~_;#
z>Yp)2hCS~_o|jxTJ_?_jOsMnkAV^^D-m`tzFzAdLr?qqt326?4iqD_$<!3FD^Ur;L
zV6Yu=+U~*kTNR+eXj`&`vEHJNgkT`=4yrYb-S#w`133$NdxSL!z9WI+$`&x5unJ$F
z98CVQck4`D4N62kID2R~dT}*;Y3O-=v||xIu-7I-2baLgnW5mFX36J7>7cH+6#i4U
zrfc@8l09=DpzEwNT(3KGu$cYj-Gb!EDJ#YxOn!y6`QbQWtsHGty$&~y9K>-;ve3NO
z4aDo`z_tfJu=k=lnX=BF3T-n*|Gd*7?(21F+Ds+dc_0RL$Ca_Ukug27F%=(<i-lX4
z6o|j&Y5c=7-uvfw<K#UCWNhzG5G%WK-hG99&av^lyw4cs0DA)1YXBAB`!Qt;6%}6T
z;CxFCVoK>ru6m-B&y-`Civ>rp`p;xcYZ?m`d;(njUX9VFPvP-AA<3BY0y7+kfTiqJ
zu>Mbj1e`t0i67nOUyjoxj~Rn*AS(nfjkG2AKSW@}ZyyxwHj^}@Mxx*3MA3-o!{~6`
z5qmt|pupEyl%%}}b}rK(Z5!8Ph@c9@n~J&U-6aqc=)(2J8d0a+MqDH))3<|~Aza+d
zW(_ZR?TB<PH>VA(CRtPW+X_e|F(`ZAigxY0f&LD^VVBn@es-`5=^6Qw|M9vH^IN~b
z#ROB*ey#>;>yJX?xyksY#*j>zuTO$AKLU)n0-dWB=!mVoj3s*%ZmeKusw=LNKi>*a
zJKq_D7BZLiEXFmPYe<!Z$|N$Nf$`F<LA&e>r?5zoYEN;XWh%N9=BHwx+$D@sSB8q}
zF&KBzfGpoTn0#2Di516+(bORu11>~BlT$9fTv~)Fe)ZfPqsPomorY)oE!e%;jHY}!
zFWR#8f9%TREN^d2W3Mzr;n)(czHlo9Oc@I1IZE(uACFt?e}UhM7|H5^Vhp%*llufF
zBym~+21k`cHFFCzh8ck1c>^bCoyP7#PNKqoS16rQ$R)bQV)b|ArF(aQIlmpdQds`r
z%Mxy3f+;<=@F;2|o6|YOmM+R=JY?(P>}(w8=G;9C@33d1r_`EyvFFKILm`dHF5m>>
z1)>z&yF4&9+~8Cz)(y#nd+hJ;UNKxE9lV>r{;LSZ^roBNhFfS|p-c2;UO@NO0?s}8
z2paa~!k52{@tN}kXUUk6xA&4DCGt35bRiraWP4G(XM{w0Vk^Hb*pA+@eGK!aE70=?
zwMo}YJt~%blYAL!NhXYE*}4boK~7GaDBOF9t{TU<A+IYKGe1PK<k)0LH?<`(>qGJ4
z;!xJTHsJDJO7P^SOK5oW40PXn1@7ZUgGBroV%#%1+atqa%Rn=%I4!}5Ig6oOv>mH6
zS8^It)yUeH=5&Ua98t<srH^jdk)dxs;LNz&P*_vWpZjh~LgxR3JxZ$dB;zmJZ2JYC
zC+i`h{x7Z=W>5WUm%(G>LG(tEEb(#PiHqgz=-*x=;`}QBgd-Y7cdG@YK6@Sb)%@na
zEwZFx6F<XvO~#L}bmTtH8$_qw3&)O=LTHprhR7j_@OHx;NOWWK_ULK6K=rZPwoWBj
z-Di)x-Z$}+e(4eZ-6^#3xd?s}nna^7>5#c+1S~@y&;55mfoMyXLVRBeSiDvvmI~T5
z`4a{0vwGmSua9?lGy%rmJOqQ*UBMhv3HOR+g~pbAU^%c4u+vzLWZCQm+pjZGW3nz0
z9NAGcY8!JlMiy}m_RZ}7@;@vzzrbgis^grk0^)8`DEhs64E}1fAXCL};Y0mn^y@Fg
zhkD;|>@hvsz4jS2kE?~DRXcG0<_=H|I}9oP1rme6_c(1FVoGW#a}R`bbJR`gfRPaq
zxa;$S->`hV)z+ff_wL|vmbHJnNRQ;*91E8>cjBH2nzZfwV4CI}fVQ{_LJpjSQg+Hq
zes~8~`3jk1`<=w{M-wEedO@k%SMH&P3Jn}8!lB>4La*cwW*>P7g#{&C+9_GafN91J
zLI-mBxGg^Q%fl(ocBEl?G@MzcP6YSYxJd`vC98dRLhtEocy+oe^}YCq7mtrAa@}su
z_M*9%EEvFi)wA%A-5>1v*3CJ_Z2)2TV2H>}<-9E0P+IhrE7)-pFW-0ul1Xz=EL_fy
zYEYq;k+&f9qBV6HRD&KNM<70`1XreX!>pEjh+HXZca@-ax(ldF9-uh%EFW5@fu(&l
zqW0!RSYhskt5OFOucJqB-h?k`e|Zqi@eJfU2fu(1Rj07lUI=A#TToMILH{W)L(_%?
z%$~vA0+Tm$hq~3se)*dqsQM*wEISOPqn%)h-vq3B>djp3*Kor`3+7B7!u!f;Q|-^o
zc(FsLoAz8;{?SBTvIAJZAmSYOP0r`N-%aJR$RPTy;5-<!Gh|Mh7QMDwjR<DjyBROn
z$3I5Z>^&EPM^bdDJ=cUz6JF!JMMk7SwF=rk`eFPiS2*<3l00}2hZW98c@JqUIv*B*
zHq1k%5y>dt5)D%EQxuQ7U)28U53V#!fNe^V*gXC(2vLbYH7*hi!({2S@oIE+g)XiN
zwI|afSSNV18PR(fft&6>U`#T763OyKA5~=Nnnoe*pnq{oS{(Dl7}3`CH4wNa5I$Q<
zaE9s`+>-Nv`7?`A`Bypb!1}mCvpimBvKYs1=z!8&^+<0pAMQ*EUjDTeBl){fP+y2n
z<&LQTWGmizG7YE1$&!Ll6S8c8-H(S~0Z9bwD8(g$P*lw|<~Tv=$xxu><|Nyz4HUXn
ziT^~l0~&c5b`^?1tn)!KZ>E6Ut{zP84!@2WGBO}+9FAGLhePQ@H*mhDh{wxh$u3bm
zoU~$jgpbdms$Z4%EYIR-gB^)c4#t2nCET`qrs(dRD{AtzB9@aI!S`ku7>Aq&-yRdT
z133jlqQ2uy*#oSX9^w|gq8nB+W~uo_7mV7n5ZcCXhbo$mNtrLOe(eu#L5nec6st$?
zw1>d_!M9N3t~v=G#b$~-wgFdu2S?TC!^CB#xYJyL2utO7@zx0b#5-*&KGslFeWZw6
zw_S~<G_VYj<$4Gjy&QXPO~OyiKO#O_?-sE32KR&c6QnVV_?a>q)TqLc+?MS_C+p?d
zx~~e>gv*oj|LT!a#+XS9Fauqd*}OCR5^UM<2)AxgAuiXR;<TtQ=-r>s7oO3Ct?Vo}
z^K2WQxWVSVH)G(HsROC>nF4?R*wWt{et}QaD~OgafShx(I7m;2h?m}#+*))WWlisc
zpu5ZM1><o1QG1AY*=%una5X00%7K||554`@F??NbPaIXBAXk14o5vl7{p2338(oj$
zwRelIwpGH69kxXHMT7Oq*TU&o6Vf&D9xpR346AFeV17p*D4+@nR(^@fP09S{MhoIF
z{1q3S_5^yAo^XPq5&V&KTViwI7et(r$GtW}tetiZe1}fN5xX)VCNh+By;sahXR;1`
z_)n4V2wj-z`3s!4%VJ}P3RG@e1XVx&K$@x?bne`PX-g)9@-K4~@ArYqzk}&+9Rn&X
z^5V5!E^ra@LD*&>M88K*MIEXe(f8_O){VHrb$txwz16qDnsElC%Oe_eS9d@_MH~eE
z7tPoFO@Wpe8I=C>hUX{i(KUxtFvGr`6CaVsUDA9mn12L;f5yRymQFOZm7#-GtjXpN
z%@}q}3g%0XLFC?xINgTbmF_XWL~k?ibkGW7YFoLh3KanVWJAyse{Sf{+wjrVnoeJT
z3U*b+VMJUQ_iU~e@wIK?uD`j6rAxlzbe6|@d#H}}9lpEyxes8{P)+)1v<21991nHr
zR#fWy9tBTFi>}IDg@(!du{v)#Ect20IQ&gm+TF^1|7}GQ&YOX<ypTGtXZb(=3vN5C
z1nMg<LMdZ3G%vXe(Rcd5X7f7`4IO|^-5=~ZbcYK$`3Q23M}gxT)^Bz6kmyb<2f?30
zQ6$UAwXyE^&m;lS%GRdAaoza8oL`Q%A&odw#VrcpaGa$AN$@<&N#FZH9^0D>PIMOC
z+0Wj;(_b*=!*{OZh8Hv!@4@YQ1vtJ@iTXcf=jHLIS@-8F<~kjNxiSh=mocx(o?6pO
z4uff4-4Q6&+K4{yZ$pUb0vsz2#^=+O$XSC5oM<)=H(xU#bCeWGk5Lm|^1F)xKVEW=
z^=--A;gLX&niKgjFSOfm7DVTsvUz7AIt+Wq^%#tUj{kIENs=3CEKs1A%XZ+(6DHKV
zRfVg4u@q`Xz5y@Zi|vrAc;8{k5S(`g5=LC-jYA*to+ET<jLQb_mLCGkn)jji5&@=`
z-v`YT7r{(F4l8}yu<kayC%RXHbC@xRhuBNLevqTFhrWT6f-ROV`oYa@lc&!(Te6JJ
z!gtlH(=1(8T)E~6^Q!K2?S51Z^V9^yX^#qj>68qyyRsfjpSy^(J{4o>0K3b3ZW8Uf
z{*?c5xd^-O=7Q4{53bGWHM+<C5LLS?QahF-j@j@7yQW`&rAcNar|%%w-hT{YPDJ6(
z{!m;Ouoey5u0#A}1IDK6=28|}@cK;}G=I)aoVdLSgICz%z$IHUbLtIDtiA(LYO>4;
zU`>5~CSlS!6L5-t$oK4K4&{lq%sSG5pfd~<tDG>W#~gqC)g)t=wP9QKB9zXx<nP9-
zkQ3{fS0$hY7aTI6^=8F<zjh~dSpyn}-QzypHzm@Y=2X1eNL1B!8lI<~MsbP-Cyrkt
z`Tivz#UnRLhB`@LSJ^ahpRkk@J5;&(TE9Wvt(9QQoM?AFSwHOPMSkjn*DzS5N$die
zFfWR^w=Nn{!5hNu0~xCJ&XPEM?d2Eg6@$`gGoo@A(e~fbP<F}?0`e!ojwz48Dc^|~
z=bRDg9xQ;AVh^nE+s^XdLE!X@<5caVV79j+Nx0^YM`q{|ze}%0^VdFrq1l(f<Muhc
z#ds2Lt{akN>1Uy!Uz>*RTnJY`>eFiDz1+wCSNNafDO?eJm^ojMLxbxt5G}iaXSy|r
zXBu<kT`WR}(~F_lUkas}6VP30hDcbZ%1yO-3*~i>F#8yD_L?_<V27YcoRt8jm#&NK
z9F=I#MiWe2-wh2V*7V>PQ}TBIRa{BuVCkkM{5Wm)y_3IlrgyBUD(n3J`OSDg3v}t~
z^X~9cSb}ZOmSd2;J71^z2ilddz_cy8R2n>o3tYbu=Uz91p1eG+!A6mu{vk`}@it`7
z6-8pYU>*9Z>p<}=cMMp_@@T<hc=7SUG)hO8W-$)Qqf3Jc{M4cz8VKeVnNX4OgAaT#
z3Q`nz;V*wPlKv_lV>%vlF6qq6afip6frsEX-XH77rgMj`+mJGuHR$fN0#3I|v3+|y
zj8M_0m8^@RpvN+RPT6jPnVy^<FBQ4^J>f=GW#P=pwQx>Rj6uh?a#x4AWA3VLm}uXF
z+aw;4xh)B$mi3&&_I7M;>;`9%7nn97Bv{x}%ZN7Er>sbd-rmBEfvHHljLB6$6DsM{
zqTl+IX>40AoExS|#J?NR;4=#ow(kPpI3p@M_$`!P3FX97Z@D$GJ<W8+(2!mo4+{5B
zpr-EyIDg_iMsnBDJ<o*y8f#0!Qpe!DXA&%Q4d+vKOC&Bi7qF}53d9fYN9$kmq(0{h
zCriwzbXF(&Jgfk@P1XdjGw1oRNQ|oZ#`qKR)X#r2^ju-N|BOgJ?T9^HS@{ye5?#>m
z^HWi&XBuZ#cL)dAGw|+FEs`{0HYCpa1LnTkSZl2T!V^3<y~Y9pRobxqT^p1yx(j#A
zFSA@kENo;>Edv=X(j{8NJ38FMH`a`?{`WIR_8C!!f9HT`{2bimZ%5DO9>Va=PjFJH
zHW9vR6$u=2T$9{PX;S!12za9hLmO;p^T|}mKf~_IO>6*GIRoyLv)PhYF?ZADENWjh
z<m-M|Ql~u=8S`~F6h@xmw_Lam6EuILgFMfb@fx_W(VPmijiGx%KQvptgrrH^AwWA3
zrk<*UjJQ9Xv5GF{6z=9@p0se{ymraEJ&gZi!eRTF$#|;xHF8=CBy`RQsPka=#Wfw!
zGRGAX*ABo`k2`Q>QX(ol#&CysMPowCeeT3^dvem4&6SR>6!n!U(E!8EAhb)7Bvp=u
z8tBLN)=XHRT!@!Hl!BaFD89Xr3-Ki$kaP4Ro7Ma*nm<IJs$M*a6BjUU@QP?qO|+r|
z@APS#-)m0${$2c%t4Z&?)}dW_b~MItBLDNL9VutLA-GI2XG|0>bY{D~7&EHq-wSm+
zOYrRo1zJ<^3RkDHJCM^DblC0=`ZJfoZ`H#Pvh^#Rl|6|nU(J|1Z!kREWI}~EW4P$d
z5@>$+6*HX}=X3+*g0wesRoe12v8Mq}$-Dwr)~O8H&<L?L?T~a`k4CaAz^qeAXwi2J
zhiF{IuY;9HzI7S8{7q$90Bu^8${6PH=_s6S$gA#*1EKCM$!aAp*pYD^EE#9#Y*ilm
zGgf@akT;;NH;HAl0BfH|f$67eyt*I)yjD`KatG_!k7P{F5&7sakOF0X!yuwQn|I1j
z<*Ww@NZ0YFe6#&htZy=-`<JZ6*{bGLQ;Ts`Ci`>se{|7lu@U!mBb%k%TFRN<%z|Hv
z%uyaBfDN-`$S^kV7~$x~I%zJPso)RF8w`eSw^5i-qzw(-Dd1DdKGW0Rz)o*D^2MK>
zu}3ty^-nwmQ8(8@dHpWzy{$yn{`!oH-#Wl%fHCKWS(DcnROl4OPf%?w1iM?E5Fy`y
zmvxWBdrvcx;eH*1TMg-2vuf<SJsf|GV!6SZN2qJo4bAtOpr768ZLX|CL0P#-$v~HG
z=w#h4#c}X<vH|^3qXe;j^$;FoPF|n8iFS8lq48<}j_tez3Ccd8s`LU1i;nQ_23mBc
z>><#8HUxXeSdxIxp*S$~5Nc0K<hCACCJk&>k+Sj=Up;R&SKWUYR>w5r%q3@WYU^1@
z-q8q|N94#j#v>Y{WkH@SF`}cq5+HMbDV}0kpVFtvU^PpNRJB)vwDue?y*rOzvBRAD
z2EF9EGW6N|*AyL#2#B6N0q@S0oUHl<*b(^+?sW=?rrk4CJlzg0_BQl)pA>W6&*KH1
z8LrY`cgP#%hbJTT$ygENJI5zNv(F=Nh|#5s9M7YLQxY!mvWA|@6S(Nr2F$N~1oaU&
z`ESf)|LS-uj%Bl^lZiqi-y98pI&8`F71pF;rjS;z5pnI`?eOE$^Ehd*0@+o21}4YJ
z6Ze_X;IQ#F8gBap$xG`X{`^}M>K&5gN8f?8ul8s^!<O7yVn#gb{ZaAL9T=<g8ve^>
zv#QJ}fV*2TXL&FdKFjAbp679GhC4yI;ukmY*OKg;d<HwRRj9d0lMc-+gy1Q;FtKM1
z>JxWZ#Aals+P_eH<R*Se`ery^@BpQ`rzOW8nGiunmg`J*X4v&L0R41X?*5%ENJsw_
zk=NE#D*Tt<C_fcz-zR`!^+RlXXNgr*g}N^}$NjA`p^ILXz>C?6<mfXkB9ID;<mYY2
zG5LeZ_T_5Cs%tQ<{(g&3^^bz5%XH|~xJp=_nt}Z1uc*6eEqY8Cz@SqC&a&+}%4RLY
zAfK~b%+zIQ7<e8A&L^PDf<!!dLX#Ze)rhh0L0F!wM1qe#Ww`?8Ok=F{p8l(R*p4P}
zU-?6lp*#(21E1ri5fWVfFWZr2o`BG$%qv~~oNFER2wWbq=UMSfh)J^q<6(_l^5{QM
zUtxhx?vJo^_88IC8Cg&*Y~r@HCPBp6UQXa>0aw3tgZm;+(Uxx$P;mE$<l{eP^v53?
z5M*sG%sILaU%6S(Z{J=)Wt<w;*6skYS|2{%qDulI^l9zVB*qPH$1{wxGky3dP)ar<
zF5hlJ_|Q7MQuYL+NftT=sABeMBdTA!7V4%{{jbkKgASkL1PiI;(0FyCY0(M0K97JI
zuUQVjq#bh%HiJ(y!jcS@x%~DOdcs-H%S;1ze;!N@>M%aeh`(ID?`F`>xWJp%jKS*t
zD!kEA6%sr6I-DD$L27c?UPpuF-kv;$U2~b|ea$2O;Oj%^$M_~K&t++dMi>MYtmfsj
zlyKwiE*QN{NEVD$Caz(Ly!(XF5^Ikp<VM)jz9~z=@!K+7Gh9GZl&m4zqaAFvjKh`#
z2ia^Rle2000FvGxU=W;$*Y$0fw^g6`30$CP!$q9G_%|f<MewT<RY>+Cd%8&H0;E>5
z4&S~ix2x`-LFr#p;!?xIslfwmZoEp;YV#5_Lz^LJ;yNzS=o$#rM)R$6o<Yhejw=iy
z5S6|ME`5E08s`*9{N9o9YjY~D4b&snW{k@l)sC$t{UF!D81eEkFy|%PM`byK)2gSu
zVBdMk%%o3PzODx6_!yATm`6DM!c!D>DZ_LtB^so|ysx8+@xO!3*zj@yx0F|cY3c{?
za-IZXb7iRVG9P%i%9xyIXU}8zjA@PDCtP{08fK>{6U$LAL9D)+YuaK*Ue7Qip>>0(
zC()t@87nR6j{&MLu%e#l*p9(Kgo=Sxu=4kL$cfT~+Uf&P`|cBt9H&V70zFvAdgvJA
zTwPo?`zsip9Dr|4XJPO9>6r5}j0@^-z`VeHQ14RA`MvH!{Nw{gp*A#R2V;;G>(E&D
zNH8u7fiR;k5W++*$Dxgvz8M65<UThw{0p3Jwj)QjG5&GlKk&R;hJ53@!N>UnwzKo@
zxH=OO6zR%~mmHNy{`JMF+0Vg8Q$Ss2K4P3(sYJS=R}x^{%$>7l{>)+8qNW@jYOi~h
zorAkk_1tL?OwQm+D^7_vzBQ!gn}dLAm7t8dF)3{-z>>2p|2X*&*RL`P8xNFV%2k$k
zRX2egG7jrabGV1g**PFU5n{8`VM($f+YeZB4hM4iuOV@mn^+5mtlyKeL=)chG3Qy-
z1IYi|1`|Jz#6V#*-p!MtPS0#O;R)9L<C3}A{fw3T>L}z5e8l|;pV%y#ak!$5$x=_|
zT$;jWcxLRLtD;U9{rdxUx%Y6gYujPYQhnn6XFV6a<|)hDpB1HSQ{(?VX+kb|B;(LG
zOA+df;O;$R;^3Id$N0x^Rs&kJ%T=G=+42jjA6?~h#`*DW=O%;CscyWp)DI2GPdpj>
z72TPi)~O|(-#6|J9(bFM0@wfCV)@SuaMp^eY$~v2XE0or>%gd;mKgc_4+s+Mi&`S~
z<B+cw^!43NG`7m-_pvO5vTQs~5Hc>|tZy*O%!;TlQ^TRPid1WvH9e~;Bols^k>DLk
zIOBW`Hd-b_8GBz=o*N9K{_BRlZ3{t=F<f-WP?IKW%hHaQMriXl4f39(Li>bA*f2T)
z2KC>@DxKe$OAP3*V{+tvEz3IIGvfRX{}uTy`HzoiG(zLW|KLH^Pi_oZia`xpFsMnJ
z6n8!V;SqT{rLqlOMJ>FsuNpZ0HbnEYnGo%6K>ZzFV30>5XBvJLc1YjD>irvGubm7w
zzh*qj+;46%rn5lVIT1Yebb-k59<=wohKez9oYO}i{I=}{*q@Xk?PapGeufvcwF1@{
zyo2x+GQ`VU1V<g^sizLh0bewbR2PJC?OCkLoaHEy|K~sG(0hfd*OK7FPc7on?vLus
zdGl~l9y~s1L!4)RM2qLLL~!$qTgZ+M$ewOSe@}h`Dc?_U>Ke)LC{LZFJlzH3G>mE3
zxmO^VxPjL>{|omjSVQs~#uoOU!rKcih^)yBd~{2Oblv{QclA$3@z_(6=}VV``|)E~
zAKuIQW2-SBzzQV?Dp6%aDK5I-g4J(la9e~&z%uwVK2KMp$|_?~BiNMaUrT{sFHYhK
zPv%5;T?Y?i3GSS#Mc~L$P(3^wt(R&MVb=hv_DUh<$qsO3Ss7Eji-L(eM4cnMp%@T)
zk{)5M+e`G^XHD}i*+Jgb<)Ha174)5raor&Hvy8e0!iBT=i(~(w;Qj_KZ~jrZzTTdc
zHfu_9zN(>EDMR#okQhs%Ov$1PVwn8Ul!kV)Oot_70@R<vh+P9*<f<}o9teVE$FGC?
zk2`$G+9&X3?0c49sDZZGrM!p7ZPxv-5e;Haz0x5M*nUw`AbE8dlTBWNd;MB&<;N8m
z=&VZ}?K9c#Rh4r;vyBrCkH@j$H^JxCTj)>t0)qI3Zmq{UQ0RXcI#*>uRcr;eEllF$
zrC%}rPawk*sFLW%cVOIjInwl5mlR7yXabKg`S>GPlzIyU#bVbCQwcAoUXrgZ>NMy`
zIKNlUl4c!<K=+GL5}~XsAN`~n+$VF~LdM*G+xi3QT&2)*XahbrV;TFX0EnIV7w#^#
zCvUqy!Nj-0FnNtSnUNw#Ohx*%Gqw&rb!(vBz8*sd)&pgES@&I&L<!6b!<EUAjB}A(
z4ST1lj@*Dh4HRg#=PoYviW=>_X+nn<G(j8t`sK@#@Y-1+@#=ocyIU*pPyVLk)PseL
zcfA2;AH0QEhRYBK=aCSt-UJ4{Dx`i*JZEC8M-n{jsMGs&NP3|LrG^_sz7gzeP3eGW
z+XjpY_TZk?nh|&VSl&JN6F<7(61u7{h4SGYU^$`>Qg;=C*)co9-_j%!mFKutM~yh#
zAA#;`fAiicO`K^g;~Y0Ca1MRPxt^pJ=4RQz-3+%Or2~hs-Btp%cpii~VsKi%2Ru!s
z;J92BH8&d2?gO8|(;)+T6yNh{ZMJljjv;j}IR$S%TF`VGZCbx|1FYI)Pjiirqg17d
zbH*mD-lE5~`kB#!nf7E4n|1jQcgE1GGLRrG;C`=51y#i{Xxj*&>upUnn~#Ch_d0%s
zeHz+mCV|2x0oCPxp=jU|?s`>&e=W_(dS0EBEnSF1-qzyK+;9l3lff6N=45}Xkd`bG
z66@(IwAecrI%zZNE6AbapS|#EM;1P8cn_k3&v98D<K3lSgYkb8@Kvt~?ce+!ww-4E
z8jG9wBhrA)Ccp81CR?Fa$qGvgRQZK6GBkdIK0R1lg#Ib2boJt;(9^<v^sA2XrC-)@
zhx1v-K9jwl>n}iQ^<I9F9|duGH!paUijD<Mn7J$t?F{X)a7r{deVqxrmS*y8uUmMh
z#b+T@tqpe@C=%nH$&jfoPjlMFf-<K>wa2aJbZc7haFP~@J3oj>J6$-TuPmMkE5ZC2
zmgmjrmH5r?;_Rx#;Qb^WGPL9{QZF71TS_2RdK;w0H$*mLJMiK9H*oh`4F3JkfI^r$
z2Hkjr@dstmPijKTd{*MSTWVCk;3NoEB};^{@41-hE4;VmYHmH7FImf9MVXlov6a>5
zEG(GEGn{cv)EB|$|Bm449vzb9^9f}Yb!hEkV;Jyc_Yxglh!nEi-$#ka(F76P{2{qS
zk1kls7>g`xBtI$!j*fkZUA2yoB0mE!i)P{Rp%&D(u@>E<7NX8!_Ric~<mUY_oA=t0
zij9rw(53U6(`Vic;i^Bp^D8|Vo3|AG{JKSB9Cb+kt-TnPYl2{+O(RV@z~3MY&VLjV
zH$%o7{2j{O5&wp+D1AP_NW$mnO@T&FfR~+vNsHDJ$T`1`7e2A(1GHc9JBBpCTaWwD
zyfy-lGG_Ftr{6I5V>J|Rjpq7mY|yvhEq~Zwft;yi9PexWyyQ<Fs26r%*z<KLlc_|!
zynMiQm=Om4T7Utqe>nfoADG)#o8-)WhdoZR)bQ9tP=04dtJCv2^N&;U=$q#_X^jkN
zS;=-pdUv?;FPCtx-5Ru&)G=?A7nc#}k16YOB@c(EK)G85YNInaGk)!K)4kYuY9VYZ
zW-Qmt1nk_*JiV^-v3lSW3WvVHgy8R7V0Z`q>QW~8HzwiGun<Td+=x<_cpO(|M^q2H
zp+L!%3llv=&FBwMwqO<J^XGAh_chjS&x5X2U7W{rZ$!lqthsrY<(;13^j)@ez|o3a
zUNH&R9eD+hpUcqw?#bx;wvxYe@h0xLE=Na8gfytM0ks#o!mAQxk{VVF87IVGtoDSj
zXE{{Cvs{tjK_NHqs5)_7IKTx*K85D$CS2|tj$^sc*wv!Q8>?7B<dMTLdotU7d{AL$
zqxyoG`hB>g^)`e$nK9qyOUZ=<Lvr!lAX4fXi+=ly`TSw;7`Jq!C}!VU$e6PbHfuAk
zNzVX(N+Tb7-4tnvX(H6eW<s%CCzM`zC8?H4;GF%9QNKM2E9>5&vgsKd+tJ4O$nT5r
zod(2xFd%m$tcf&QfyOXiW~JLiG+loft~Ke92ulqR<nM==@|mE;c+9)@S>pA{gQ>ga
zVab^;B_e+9hMC7qY3k!!FhceToL_YU2ac%{!2m_^>Ptm;{*R*baHR74<M_2l_TFT-
zq|JRkr?e!MueP?dv{fqYODZiZN-7CSl0=F7e2#8XDUu{4aTAgjk|gPOet$u7uje`E
z^Lf8tuW27ZZ~QE1KRW;)q%l^^iL*HT<X;Th9?Rd<VBfip5(rLKBq{njXkn^CLu2Y#
zCn80hYfT~gzn`%9=|jlo;_yPQIf+c4#94OBQQN!oVH<OD`Y)f4_s<U`Qyq<{b?8qh
zkbZ*V-Hm+0`T(qqjf4L?fc=eav2n~-%=&zSgU`F6>EvR3FMk`ucC1H_k%{~%hnvhl
zJOa8lG~<vb)<k5!h7;7>xmT?Hm6M)X1<H!QxgOIdoIFU5aOLSZZ`csrB+!H7WqQQw
zha#v2tp}G6iO}KBeD-o}ApdPSX7x{lDK5&a2XM#5`|nL2JG9_kHp^viyM#V{Nu0yZ
zVvKw-fG;+9&o|{=2FFqR(W)Se+x6r;yZ{^WaLWypq+2n5JLRp$&4a$EOX#6?hgbK~
zCMVush5LqVzFgSMFU`A!69zbvpo0ojB%H?2j2wYmQg>jx+iaZcr9p=NItgcsvr+g%
z!hWZr!m*6$v#(8yxa8mB(pG-q_GP`tk%G_gT^fw0=5i$baygukXFRU#pYZKn4hVzH
z>9`^4B==zzyJxiF(B)d#AdsN#91cb|=+eqpCLlRc&npbfz^4g@)Thu9Bz=qr2#2|#
zm(Cddu^+UO?0Ks*XSjQ93gqJFqcC{%0&Lv89{*0$qV1dS@j8qFdvQ)aPP*6y>w5&G
zHOm%sf^I^yp#tf4QlVK3*gSozK0Q)vPhA8)jGv=`r>`Nl*;FtVy*=m1@_KD2e`2v;
zBfo4=KHhz#Og?S+jk-~~<eO(1D3n~rp!5ioT^WR1SU}l5=sJdE_5g<J(LsU?v~W3v
zT91zL!<uC2;PgIJ+x{Oae6XMeE>B_e-ZqRexq*YJ1I@d(1%p;M@sYKDFzKBQ?yCBY
zw<ensiS{aCnCUl8{cSb&{n4U%a<N=<$yc2Grjg|aWAdGwhVj3~s8IjJCtO6EIXV6K
z0Z!R74%0>}Lt14pep~VvpRa1ik!P%FZB#TQjMSr9GkbZ-?^Modd^HqpSqP(owaHVF
z4w-py5vr7^kr*|Wm8uyFy+aIX<$e=LsQnG|^46hN)OlWdB7ql24?@+o=OC)tfjBeP
zq~{+!ny9svtC@2JD&Ez=_2&=R^Y#m;WjTxAPPXCP%@0s%k^*%<av!a_CvZ0<HUyNf
z01noqR|D0^h^S(8J|p77>?dG(lQr~ie1aygzQgY3vzRhQ1I6-n(0zF)PWE|*wMT!#
zTDx25&K<;8I|WoNZ8AjfNX7EfALv%Okmn-p=+XKDj2Kml+fS6hw?VepUhc?AIzw4^
zbvfo)r-GGrmC(RC1O*|l#exO_U1&Ohh-7Sp-FJK7i{K+hx{n8^BcGwZuot`)j&p9A
z#@rIKY#hJ+GyLQ<iQkS&h<1ICFDo_ZmGd@4Rb~P9ER5urHyuI)`%X|}&j4lTel9>)
zmY4sy8tfPMFea`)jM$-09t?9Nfjd3%lanG%X1wqlUPk1OM+P3PsmA2v#$>p3ASr5G
zfy*<PN4D-3o<C$krgx~4wEJ<KpJN9^oBxEw;JIk_Sed@|ti#g%9J-Ae4KCO9m{3QT
zo-4Pf+2aa9u_}$Zfmo-q%#xG;Gz-=0Ljij<VZ#d%PWzCEOz?t@ZK)s$_`-up5ER-}
z!-n!c2+2%Ali~Lu;rCUzd|8e-ZW6H0p&@v@G{S?TSQvXO9@@_Z@rhkF7(V9;3O-l!
z3w&B3FvkKGT*^f^oeScE%Lmcz#Sqx?-%BvJQzkLr_3+!f-*`5?7_y%{gw01yNz;&Q
z%!s!G!P`FW)V@yqEc+BsUojz;E3Tp0j9R=p%z@r!%(L<tHk_38nZ0Gg`Q5XWiHE~-
z=FQ#4x$R05>V&fQ$y#%|=&mO5k?sJy;bx>VZx^_A7Kufdt_ughI*eh~`TT!BQn9D~
z1~+4$BJs6o5k|&8;ad(}g<pZ}c{xy<@w5`5awO|1sz2e&E|kOmnI_bwnE6{ybYjEl
zQ)s->glw925bm+O{DS>A;q(l3B6^gZuc~Q>qn($+<D37WV1ysa4b>-G{O>|uy(`8U
z)$&&9x?E5vo0%;v<gK&~dE32)H1A9VzHS;w{?3)9E9>>i73%>cvg-=?u^E@1Rx)Y@
zuj9s(9#}Tgn9jJXNu^U}bBhn&gTdL%b^q`s$XwB<by{*TC!J+9vvg_umGg|7U%>}l
zmgBva=R;chXxzdW+-f7#sq(<9y!81xUSek~9N8dGqgOnI4OhS8@2&z|yEGHyBN+F8
zQWebHk_ZcooUwn2B~>oF3T{bB;=F>h-1^OiBwDu~+C=U+c3uq5^R~k^yK+%uc_@56
z*Nu-ql%ju%Hgn9oLgxz`+Ek-N_y16VYyJjQtwabLOYDi;x)kxU2h}L;YZ9O7lBZ=&
zs#H7S5x$BXNJOj<9#-^&>*bo@Kek8wXXr>KY%RmG+qF>Rbd2*wHxRjv=0rpGiCbOI
z@#SB)z>UWa)H}uqzbv+ZzTrDjLIb$H3v%Hb<G`exkcSh(_YkoCIq$qJlPh}j18?4x
zqHo9v;kQZ=d>XGodihOw%|eSTf5Q5NL*GH1ry*ohgyZkBv9{$2#FY7hr%yaaPSoHM
zy>l^n+fj7%)}nn*$H8~hTNGVFK1t;rJX?7b=KR&AY5!jGS#j4mO?Pb)a7P19e>El5
z<6mQOoX-Dw64XQfCOlro*et60bjsUvQ2%KtmN`*)ttUr|Vn1TQ3TJ+SA#;)5vxXTu
zj%4!XPZ&S=C8Rmo!!oG~=^k8z%92ptYUU1Zy&~d9_IL9oTpLd*4n&Mzj=O#p<GB)B
zBD&Fu8jHH1?Mwr>-BJSOE$zI?13O&#BOV2|ZTX)+)u2^xGk5f3E{Ii_pX{?5z1UKL
z?XNQMaHa$4ZpZ~^#-i}P{DWInqfW?TOES_w9mYB}z`>fIc<i(t9cg$4R8OXY;Ku>h
z^&gfmsZ1AVxG*2&vD2{G>NXZF-+-;kW!$>C8q_4#mX2N}OC@uq;tdBs0#1y=^xAyT
zx;mSiXU+Da;*oG|6LUEX-2_Qj*}XV&8ch4ba_qy*usp1vAH1Rt915@Dx87H9L$ek|
zqrVFirMfsNRf@)MRY_^h2Iz>iAzw@^>Bzt%knrLUn6Mph+uv(Yc(03jx*y;qSqIu}
z+zu^OJX{pb!^pqOFhk1^eMe-AB`G%I$nlJ`uy>o#VptHmi895l?w`2wMO)zF2xnNe
zK!L<M3?zah7kOPdE3)g?TTorXG8HRsWBZcfoZ)u`a;b=M3Ge>q4jY-%Sq6q=UUxdi
zE%y5V?h70bMDb6(Y^nHU9FCczL0&TNm-3No{NPQUV3pa-_n2)4VL>>Q@8x-a#$$LI
zpiPvOV);*p<VkOECHi!}<d1f;d~k0!ILi#<yaxnw_ctYA%56X3mM|8P_IL<zkaG6A
z*Knq(JT+wZugaBgn8R}|#EhE-G3zbyK|mr7x%vtpJN`wV7aKu&cM4xz)WmtU6~W`A
zc8Kq)XP#JjD)@axD0wzl>{+T$1#83d4*$4~R&As3;<o+FMgEV^%CP~9+FTgXag#ZY
zH*>+4?Mc<xU6|piO>3gExQJ(Yn3m0>?aVEpa;E}+sj;*49bLW_wxW;|!=TBmlO8%n
z>?HpS{fz^l{h}^7ZdXNT=Mm^)UWQHdBT7a&aW2UAHzt?zzjSnA)=C$!FZ;vx9S)d(
z`#hfTe+7@SlxUDsGR_j1kYS7k;5_CR_xFS$IT9>SwsSnZTB$&NEKk7ZXSSsB!WH=N
zRe@$)TLz*b@5C+D58$Dy2I)x9CP{I>AZ_L)5Kk_IVux_9oO{7(Jo*5wTbQH#qe%F7
zl_KfWKLU$yHDE<>2Q*)^qCHnGfDU&Zf)wO2a?(~V+ftX-vAz9#mSgulJcg5;PUPmE
z)*;F|_872f4!^c}F6&WUM2(y}klAQSt(vq^a|h#vzK?~AlMFyhe;ha@AI5|C&w+EC
zDj)i?5Dk;f$-aq<HT?cNx2#W@$j?6nClZ}eD%i#)zxcy4FjIuS+lx8D^g6s+W=JM|
z)}zYIjhEn%gO8iv!Tf^fcvNQ|F0*8w=-r{be54ud-|mX90!z>${W?}GU{0=&jl6X9
zFLY43i>=F(a7$ATEM2II$I4@{|3?JupS2V=MJ@x+;6IR1c^mw4kAUsd;h6PqHE*vP
ziWY(0_^i1B5B$?0EkP_VKC7A&NK41HpIwKZzst~N%rD;K-%AV~I2>Xb>mz<k8w5F(
za9<VmY3+P7T4|>XUyo{%wA>h8y}^*Ud=-F2RT#|mvLQh+`#^8)MTl#O<y!VPL+m(1
zvZ*)}_-zKHB+Y^(zCIzA=#3M%rj&8zI&2Pd%L=@$hhX^8$vAT{hl#D{gsZhjL&h)}
zob^|SWQYyvf_iJZ^Pvi9Hqs)^PX~}GUIS^S!zJ)kFNNl2W8zkHMr>ty3hD#?LgatD
zP+wg@tb1d?gt(zq_8BfM2f48~`f$+Ub{x9z0%krdN9P|EczLb@v7XcdnFn)Wal>U;
z`*Z<{A_wrcV|L;qHJ0%m|5lu?#4@RSCV<AxS~wSMNo)tQu8F2R6rGv{0W*4d<?=I}
zd~7Nxc-oRTbCk(|Ka4{W(aSiObs%U~<Ok0#2Z!-WRPbr7I5CBJsk3G9c@bkt6v%UV
zzgNRp4Nnx+UCj^M{1NZ9X;9Jl2%#Xr<X$afQg}S==X{;div#7YVN8($xjNaLTxiiH
zUgG=MHnj@u&$7<s<T~Ej*_#u6?c;y6o07R~XIVT>jM3l6;i2=&RJ4CKKYf!ib<ytR
z2Y?NgIM3(DTHgiP-@%yMTEHAJ&!HxVWj1defD{EG#wD|}*YsV`|I?TrVy+*Z;1)1q
z?~|6McTm$ml;soM;D?d~-NtwD8F6ydzmM%3=61l-LAP=7j-R0VycOEx=5Y&-R)X)f
za`E23Y~EI{M%_}z!^UQIE}!?6ci3P`Cyn?BqN!_O^mSvb@vg!!uOQs{<T+N%{|NBt
zB&@leg7vJU7PI;*j;V{q8`&>#{J37|D+|WLt1Lh9AWrCe-vjI-Pow+4F7PY2pz60u
zaJ9!Mh%s}6_A(W&)r#et(qu{Gw^F_)qZcRNv><|(Q+)2VVhDPB7Tul-xbg=YT+lKp
zR}Kp}?F>b7WwI5yQ+F8VUtsRWm?unA{2w=oF;CM=&$CRE7f3$;5*{1&1D}nL1Hbe3
zG|PW6*c8hW%DVnS{Z|-1f%)oAwIhu2z-?ZZR93GRCV!5_Ht#l^m~Bd2ZlojI@M6)*
zG!U)pLc!vnD0}rc2v$yXZJm>fJ9U`D%4P!Yo%tF{zgR$aKf>+XWr%ggbn-Jz+WOuR
zrrcY`Tm$#9eV04D-4TZ~eyS1CAZ?n|)eGX|IS^2%%cYiVMnQCrD~iwI!L{YA=Ufe2
zpRmrJktRKJ&6sY~*P|Kz%)79!9P@-87$-2HO)T@;{_`5<A2uNlxw52lxDJhSF{YyT
z@>HU^m~-<VFHCIHrz&4s&~b&7m!D}4(?%<k!n@`0BaZn72eCc(-a0Nz_=NMAi=5d?
zWl;PchxMxO!87kM1d{PEJ=B6!jgP>pw}qe<wHSW&4xo$8iXrpSNK|8tn*Oupkl9qt
zTmezyP_G$Kd-yICeUZU`c`>*xhV5&fU2t{D4#LdWte-WC&4jP}fYti-Fn-Eod^260
zwx&*pilCq1KQxAGSje2^VU~3CI$he(q)qGm4Kete7Cl&f2PBKC#j8zKFgErNx-#C0
z)ssN(V37^hoVmlLL|=t|!?)o0-yo<v--pYWsZo=RyWriwhdVpi6Fc*g(Nt<rBz;pL
zEB-#JdD*az<3ql@eiDrSTY<5SdHCOORq}xOsFhd8a}QL{VWibtF3C}i-dy|)JuDLV
zqF04jdpZ#Xe^%#TW&C25J$l41=`*e#ZVPe3TrRQf8f?h^fWHrzksIB2F=g072rv2$
zcfw_1*On9DOQ-Ogq7|`!f(1>q%HS)TZlD5ZO=K8vP&IG_O7H*|Hd!7^LoG3cH=&Lr
zogvo6icVqv!hz2}Ve!T*ob$&RPMrS_MsD7Y8m^YK;BgHUsYbH<p%hA3=g__QB%Zpt
z3!c`qOxfju)H^|r`_f_nCj_I>ZNOh%vnC7o-`E1>7V<Rk$W{zYeTXvWY>BX*eFqEW
zIe+G#PAsezwtraxy6)CQS)xf}wy<5S8NsaRGOny@2>xR8d%@AUeAv^um{(H5OOiK>
zrM}m=V{A|N@vtiK*6rfkE}epaICswPvJ@AGH$%a(3ovSPI^I%Zd4~Oap>xR>pdHH8
zc(^jj`gjxfMd;BDYTXc7OdzXN6-|7yKxF$s9G7;6BL#XyuO<b*Mj8=;?@^)hh?AW6
zvTt0pXcO*nlwx_#TP~~OBIjSlcn7w}g<6N3_?49!<On<Oj{E!%({`DnUJmOXRjZK5
zmU_<QvIJ_(<8a09EPSY~OQkOkL%%!AHN`xD8At0dF{PZ3ydTc5du>Zb>Ws$3t*Yp8
zTf{w|X+w6!i%~Rvnoy(@2K*jN;#sUs^Y&cl!q&uq&!A3j<r-7^aPoCrUR?k~P1B*|
z7Ms70ONZqH-@~#q)?~o2bR5ggL<1(^NL*si*W}HH(Z{XW`MC#zrp#bjUu_!XcpTFr
z5;!$q8!B(8gRkcb$n!Q+;>Fx|FE<EC#*S*7f4&jbHy99Kku&4LY!+4xxdT~U%UI4T
z7WATwsK9EW_|UC#JTbKmZW&sWj&KEXr@tM`Bt}#kUdKP`t;f93humP23JFnrFx6I%
zN@J^pk64$$Vq^r6Q6@yk<Ts>^e}L|@pEFkOBrHGH%*Ea5=T^tm;K7U{=!s3noPmmD
zw1Fl~`>T(qp5B1VzS-Cxdl|-wv`DRf87fpWMqvLXh=>Wsyr)yKXOABB5#;mVWh{vA
z;77vnB5gXV(2_)Ioq!ml3XEs}<}kxM7+<SEdqy?jYc|KwO*1A%tC-iZz*rc$w}S6U
zR>N$^AnZPu0~bR?SPqx4$YTjMt^bN?L)y9Q1z$jPNdv9#*;22LGN|rNL<!6mms_WB
z25!qy@#+Hn`&O4$u#A7cnIU;+YD5wvI+@>1lX!k$^Q`K(!mNK?yt36i@Y@ps)**lJ
zQZ8fJ#dfp4*#Xqhx&antQplZf5@i?lKzw!w>tnnYhx=(z$v+QH*(`&*!6}l^=tW>~
zy#Ty4&p@P%3}3(hHa>c7K>UxM;(q1V;g<a@bJ+O`{hy4*K}v?CT%(EaKdDCxOrB$L
z_eIdUE<+u|(zw9HabQs}8h35(LC+C6P`Kn0=&fU{^oK4GeZ2$JCT@VC`{#h~vIE?g
zu-PyzPKLz9uEc9eAJOsB1V~H_1>dF|ZvB7ibkm~au-bkt=v0=$;aN5$uk$2W-M_*4
z^)WZ<D_wZR&fuGz`mwO>0q%cZi2D~Fz*%ip<b9$hNgBhWN!ty~)_R5k*%jRUVk4^T
zFczMk)Swp>RA~D58r<1P@$9ixIK=oo8dbgKls7%*PnIfBN#0iYnajS<^Os-?wWM3B
zeq!ybBKVqQKn2;?alvm@YDP=3)9xR%HxN!jo(rd)HK+D-5929MEu8s`gQT*zpmFF0
zbR~I$PsM&PXY+(x!Ro~C@JTdx%fjyc-@$%v4r~yVKy@LT#j`u9|IZ6hdgusfv%bUB
z6h+eR{D5COkmbT>L_m^f3CQobh<6_EfxyN3uw3RdK761`hQ0mAT$S;>3F|{!J+=nv
zcgjsxu%+2-uQz;$CJ|b(o_KGb*fD1^1ZXbde9yO|G+D}}NLUswHw@~p{Q%bnWtup5
z0=KQ1Wv%5?nfqlo*Y#mD^!!VP_!B&QmrKT9tgqNH&wwm-j)JqtYS=tT5$fV@!cql}
zc{~}9DEA9|X}tkowkJYl!EG+rF9GIlEyklK2jh-0cTsmI%YHQ0g2$5uxb&C`hFG%m
zg0~~JDb=Bp5EU3Y$`wR*=3<QlXF>9ZxkpEaf!Q=ox>L!D<ceAlhFrp1xu(?3eVwqt
z@*-q3dti%7FUZNOkQLGk7#Y*RtNEGY^6%`vR*;AOrOjOaLo*t?=s3>U>_GP?>cTiX
z9g-FHoUdG)ijHTb-196mBAjy=MJx6S7k58lvr0!Axo9+BQTz`otM`Lo>|t@$jkoZ^
zP?PGc+J*s_r*P+<=@PAbbGf*n4o)(0C_l%s9zvFg(SK?jxGuepMd~N<{2DXjx#}rM
zS}qH{&;8~C+>`kHDsA#<ohmuG%Z6AjX%#<zY)b>aA$kk0@Xnu(b1MpTsA#(mCz;d8
z_h(p8YnKw(JF5g*3Lio4Bx`oo&c~SvBOvv39-5t(LCLbI9OtJ==^bOTeR3{H_b=wO
z^DW7mb~Vzo`#LmD+Qt~F!{FW-)-h&xK<~E0&>h<j=>l^)v^W%nZ<^p-fC5qMd;v9n
z1dYsV_-ls*#Phr@opk6P>{X3M=a}`V*AWGNt}1l<qJQA)|BmZP)kNm9;$19Sxj()Z
z<VB@D*<W`K8b@43$y_~}QhXfJukOby!P+$6Ux{q^ErINU8qfpgntC}IR_@d$v76+n
zN{A9wc&|nxSMTILyws=ChsQx!^%7d6iny?d4{YZs!;4t1+B$t4Ha$EAo91o9-1pUR
ztdVtaD{n$=dj#h77(>8jKbA9F1Hwah0c3~c(s|#pJl>6S+^`h<`wTH7nz5a<2H`Ev
zlJs<a=XAVOsqcopkZAIf*OJ?ciCP-;D~k`@Va@@!q@#TFg+4G6vAx-;ePY4hRJ;@5
zjCH#Q!KtODQ2QzvYy)jD-uy8{u^fowo12{5v=@Bu9$%!a?=;mwmB{bbN8cWH?kT?q
zzU{K~fwm&K+o(kYYW%=j5C%PW8K>d50T*awhG!__oxV1Pb7|^Cl3FiJOgzYC_&R}+
zgE~DpB9Hk6)p*|pW1-#71U!eOfquLKam!Oh?}~f;#owQ?DWw!8rv2jdR0#+spTj_w
z0UfYik@(DOz)>ti>F+ZEGGz5pGA~)I&Ny1z+!){U^~3z8d;x9U{ht4GLYDlD&?X*c
z7vN^09pwaC<oj3)GHi7)x{O<i&w81XBjqe?EIfxnRS!6Ug1`8sBbzrz4dK4*vBbu{
zNKD$(jfpXT#6FI-oOe$Y_x3!y-v=wAmi0T<X=9v6ti`kHKe4o00~UDvg{NoiNY$hm
z*ryqR+qXBsmOHnhuh;?)7cfr!`9=_p)DQ<$xpB77HE3gZ3AW|dLE?hTLZ9Dr(4{36
zZ%{1~?cEC_eRrev@Cv9K!*=665iq)KC5#-dLVpZTMrU<hdXYKxu7ulD^ZAZ6CVUOl
z+No1X&nmHe<4h1-d7FR#eF|=vavQ|^8X#cgKfY#wGl-6K3$wMRV8coWl9~_=?zJo@
zxcLg77BrBG-ws8Q&k}KgD|16#7eUtCq2N9F5})+qIqF5afZPm2A}QDpUdOv&Q&9pg
z@hw8P2q$5IS|zw=)Ir;#Nhl9>Slnz!*E80@_fjiTUOA9^r)Nd7I!^P-+dFW|8uojh
zcpT&IM#AmoTI89b58DebfP%x*@z*F*5}{*G`c4bLIr9}C`9PjK&i1PXE53sD*f59=
zv8VO%64>}cmH4Esf?cbsQ8cJ9UwZi#;&VHwnah07@@}|m=OY{^VoZhpt#G#UC|c(w
zF!0O?@EXf<Sl%*ZjxNIDT}2>0kO9TvI*?<cO$2gkeAvX}uyMTu32L~^#V<V%I~$p&
z`{N%Fz4wL*xmu*B%?^H=e#f$PIbfyqfp-_GQAznL&f07~djHph;Sa5;@0?`LncXc^
zAH9KqXR|@sS(9$5&0$XRMqXdhk+|9RasDi$-Q{}&I{S;EBvY64tXqyR2AZ=u(i5-@
zk|FX9Yz}z(gSg1y6YghO#j&UTFzvY^gpNFd{+-j%WuQ5FSgwL^UQf{RSQCC4qeJvu
z4#RGF8M>hQI3&Kd#dl+^h}Ntr{NC;Y5H6X61)??BGRTCs?@Qr*uiA4GB{}f`IdihD
zW&jys*oRYIY{bi6PuY3x3SV_*AP#lf0#6GRNrqq)9zU-{BhL$Yt6M$1v&nTBTI>tF
zt^xH4sX)K^Vo+-wNQZ7%3)zgzQQJ}i&)lDYg)_VVd2Gg*#trbQOoprrlp`}{%ac&!
zB#3VBghjKJ=#L)uGy1;a<235|@s)WH*QpMtjNTz>NW_D^_h5@<3KYfdfQsM!AglWh
z9ge6`!HkKZy)YU_ZBirG`V2_1j0tLaE8?sMEpml%AoTBRkhhy07&ql6f5>(Kb=zHu
zPX~X+Va1xH;94#4N=NYf6vjVKe#yU;vb|txgLw5@=9t<&0?TckF@C`_2<uaTX^I9U
zQ0XkhT}<O5=Q?3=@dIw;>^BgS%`z2Aiqv^yGVeQa3cq&wXQ-=EfW&9k)I%=@_OD{@
zLG~_p`{xQp)8es$?fLv<9O<DDJM!DI00qnK_yl>@ak(UiTJs%vKZS2-vhWYI?s>-v
zB*v~e+Q-;V5;3z)n+|X<z`T8t{FiMrnDg!^MjB@Gl8-IofRVMBXskpZ?{Y=i^hYqC
zdErDBP5GsNbg<&b6WBZLA)NilgX5;1*#F9iuFXw@(lRs1c418KMK)|V5d$r|6zQ@Q
zHd8aX<9fPMkIZxn!92}9*y7a$Ng<VZuxup8R^+1Or6Giwe&$*uPVl}iapL5Vfz%}E
z15E#INCz8Np~c@07_>r;IA2KR{g*f32*%0dO{{7A3w8W<aR8kYyqSHM4f*vyg3-uu
zJ+^Pz#*5}kg>DL+{I-v(By!Y17|&+XF^#Us@3x_liBE7wZVGngFqd{*GCtbGI$V<N
z_eRY%CzX!p@zBZ_IQ*<3Eq-*7I~Xp;(E+3Io7GShTw5<T7&aY)6z*_F7CHDObQ*L8
zCxWt>F7+iB`7U=u^cs=My6>y`F%}INUO5(r3{)l}xxai&P%AoS3$Zdl5rO$6V}_{U
zeHk4x^ot4xcZOrgMqljMo{fTq%Y=f!4zcg}W*(bcQQ5DO+xaIC1J91dA4$5zC70&~
zZZ57t5wAI?nsN{<TO<^$N#q1>J2{Kp`C!&HA8#44u5V~5Hg<JD%~vJJ9eM?P7GB~;
z&o>5>aK?vZ@42G<00{AZ2Ugr}kR};|+Pbgk7AxR?^V(#3MiOfMP6TPuHctC$4^~=D
z!n%`rxI!dD{3}Cw9mgbC`rZ@Du6MyZ<`_3zaUE+;YhdM=bgU~eV83f?dN9@xzf7{G
z@-Jqg)=m@7Doe;Iug>A)6W)Tyn@oJ&AV<_%*$%{Sl(5&N3#RQfC!b#pAdfcdk|}4*
zpuENkLdDV0zR!Sj`=rAiT*tEOaSO1n{3Q;a+XcS!pNjL^YWaQ-0j=%*2nFrE7>e(4
zQ?n|5T-$<|dKtGa^&;PH$TPk@`*}q5u)zHx_{Q<VTSpQwY;`v;d9FbBuOx7=@FfWL
z8t^eDtjmy;%x#aT1mzI~>SH=V^v9KZ=2imBy&R~(LK7n07jn)O4%BOVDyoEP6L)zD
z9+|?p|LQ||$Mjb2;GH$78D4_nTjRjlVh3O5dj-xX#p3WccH~>cC#+;V?))kpl2-SR
z3v)YxDQA*k{~=rY)l!c*PJP3b*EDlx7gysPwV`Mf_?CBDp}{S1m7}ej|L~8l^<vxg
zO2+)wqKV1YIPFX-j^B9!?O&fm4|flUxcwBPgT$~WLxQdQG%)+oRUE234ti3uc(;Te
zarlP@kggiW1%JAX=>d%6mLCtsj2HEGgaPf`VoI|<sdFA<syOH1ao~LYAm3e+jQhRP
zKsog>=k{kLcQuU1nqLAiS~dVXnjhkoPZ5~CM1zi2Z^nB@vLs-`1qfmN^!l@pV6>q-
zguHqVF3%ote_q^1=e}pWgB{B(_{(D1(j16n4ymFG10Y~tDEB@15lWuS6+U}a1=FW0
zko_tLAx`l-&cAX42aRDo7lRP-mhxz*X)flSAMXW;D&r;}K7t$W6(Mfbq9djH)M4jE
z-064;t1Pr>=>nEzY<SAA3fCo$-d5nbstSH72V>K@YTPzI9nS{7!|11}*!nXGmpx&*
z@5mF}9K+q{{3wz;$Y&wFZ$<nzbV0{nP5NJu6=|eO^yF(R;<j&|Fvemfhy<^N^HG+#
zPh}a4vUd<P_ZIeFZGuceKJ)#J!OgqXX_|T!iVp4LM-TJ|KXZHLF-eEA%KP9mRRzj7
zsqrphk?=W?WtA6f5NEBCqbZAaKyF?hRDK%<oiXhwNO+l_(8Rb?<vYYfWkXQt_Y~f5
zc!!NzJS-4(<AI0I(TTmQ-CFWcvhbYPXhk(Xi@yN(^K-G6?E$UKLb#XNEQ2v~GoLMg
z4FYvOVY4s?qfO33*_9`tb$ALt)GdHzY-Gtyi*OXBDr2196<+P(QV?mi^I5M|xpb9c
zU~g!sjB-L7<4`nx*@2@L=Hl5KVYt%Pn)F#LXa2q{n0fjwnzDU|^CUw=g9wPoF(Zqd
zjfqQ0G`xCHfvdkR!mLA|Iqgmrx}~lFqEuvw=)fCS`(M#;_SYAvlCXT*p%s`xr0Bwb
z;lnJlc!^s&@0HmK1slV_<w`IA{?Aujo_iUd-AaI+tTV9oNEb-g7xFq4eHcExk@b>T
zH`b&Lq$(G=p{c<*_<<(Py2*H!7iZ+9zZ!;r?FF=A>QjhUf5AHHZ;|uVBepN)*|S>?
zw2L)}_puZFWz`%Uw_BFDP50tl7A=Qb(>PGxWluvqEa>E*!<akj3QEt(!h=#}a{Y)o
zvC8x1ye~cBB_pS^XK;`B^(OXRJy_&wyL>44t{=dsPr3qbhI0IJ*@w`!?KrF(_W?he
zJwnIzvQ$fUBA?rs5Bs*4q2qHuSWfDpq+N&FO3h$xR1<F4{1&9rOwPFt`HL4v;8M%~
z(4_i3bRF}D&k0dD&d!<$nR~Zm{b>|OW`OV1f!yT|0SR#038oVri1)x9xJdsR-dkrx
z+|4gTrQ<R@ug|=!Bmcm+<l`_+-kvW0DTPb%%p3efhpOz!#KZw@oWd4c;<i#jIJq$k
zmlY4Bt=&@o^K}zC`Q!jHak~m_yl@Mm#{I>a|G$IXx(us)EvROg4H>W}1Sh7L6Vx{b
zP0G$b{9mrCQJ(Q<?xM9K>%%eL)c$fG=#6>~j_KRsDwxwaO%dmFS;Bo~8UN3P0^*x4
z5pNQ@!g)^%x~e7-m6-QbdjAl2PE(nj@|S}Y+qr1!5snpGP3WCDHsCTlpU+I%g*Vrz
zlCT@GoOHW3v=muTmjhnVW7N$xUC^fAS+3yj#giC5zZq<ojzTrL8EhA~3WE9oO1yN4
zw`Dp%VVepu6&R2SNuij2qa40uy~C^@>%qPMDH;U@^XEQkkf3P=7|wcx>h}6XpK<&I
zk0y)L{w)W7s~Mrz?Em|yn!mnTK#E4#!{wrXXdTfCjl)Vnab6$vMijx(%3E;0E*g)r
zneVScGwQfPhR&F!M6RAwCAH_$z+Y37hMh8iwpn6~bCrj3o5g(7H^wK5`YXJ%)eX+u
z-b1}34%oh}ojZ~H0EC0?f#1@9&^`1LNWIQM?>GrWT<%5#d<zNA+pw&_27T{~g%K44
z$*61D=-X!jBL?WwMxsHJk7-b!4q5mz(u}s0DbZ~{8ua#N=5R>J!h0VSh@)K-*JRv>
zz4@P^Aw+=+G&i`Gt{DzSw+^Dwpa`5^`T+g@rNM;=0g+0Qg<5C*P|^|tJDGPXC*6i_
zoTo&FACjRiPBIXGya`fjd+~yg0qtfy(oW_Glie(VuCYhJ>S-Y7tNWgt=RY47XMcb*
z+m*<mr+4sQU<7JeAopOgDrsC-3-f&s<Ku0cu<`422nb+ZoO3tPe_s^JTF0<=%w=%i
zna*pBxrU<8s^Ud!&56B}4Neu?6Z^NPp>Xa6tdIVVzPg&2Ci#V_8yK@*Y(&HLyV1V0
z7__fiki|bsu-)q-_V0gyOBS;J_T6icct48w7;^{r?g)l{wrfc?5|H_0jEOWh620~0
zxW2tCzq;-wDDUY)-=^bY=f$u2N@p`vTXr8SPa46}-z*b&$`o%j8q@Uq^{mU_$me=}
zfuFXF^Y(^<mtP)OXSiVSGb_3w_B3c*D}hTZEy$#X1lZWi`onws!72ACc$9nNUr#o#
zIQoS*vK@-?Bd@|DmRS#aD&Y#|{zHvJDbV$u-3ebj6<@vi21OHVgc|~-IC|bth$|K{
zhDafbiWG2yu^f$b-vIi<<%s*fe<1H04+7t;u`|~ygJ^dqcQAzY%FNpF7jq6P%bL)~
zJB~nMurdw(l8M{rwZZpKDkM0}nuNKea4}sXoI9xn^J`hRk?kED9sfeAaTF~0*AIdR
z(tM3!T@Zfb61b)R;2X!q;p2iIAaghcxBJ{iWg}G_T{Q(y8>gasXD^&Jy$<s>dqIHx
zAO6-tWnvU~3yZoL+rrTrx39j4<IQeE%}ch^xSbBAZ|p$P><3Um4Hl=I=T6;aS<vz0
z0HWj|+yz1SZah1CGrrOT#tv=QA|{PB&_2n4)|mJ6lIe_56l#E0+b%)ap<Im1s^!f;
z+L4t~RWj0bCbmA$;?+~x*|ItqiqfCq+hMP8>7Gf@C+`HQX3T%=ZwjATCnx&r1F&Sg
z5Ra&2{uebTE~}2gDzgHt?dZddVSB;th>f^qOcvBk(Wg$Wl_<Ve3b%rE$p{N}|6l6_
zzT!#z+NV<HXB<F}nxBAT*&N>BLk^g|wxO+Zk({-1FG%@y{4x96=rFnjk4(~`u3-ad
z#Q+|B5?}DSmW9k462;sZ`B3k{*b*Q1p!bRU+}`aH*xPa!{2nlVR`5c6w!edU=Gd(F
zxCZ?;?-OjjElVutsuRH{Uo?Gi2M5AW+!CCE(J2+Uw&NLSq;^AHaTG|B>bV!qc7*R1
zWA@$WpnUc{=V$#6&MvJ1=8fZXSqEqC1~Vc%stwfU9f2(;Oz7fkAE9O*%h&&lg4gQi
zWJSp<Jp1P@RQ8O=rKwg_Q#yeB`e#iIuQcF-59eWg&<jw^$%gifWbS7CX&md>2_cIf
zfI;pD=)V^NZll(t_+t*tEBTC_<)v`{%5^+g)eD?4o9C6WUe<@V@TlxLmadrtvx6jf
z@!WFOy^IBkj<WbU%N!kFei+Z4O2IiYX0+&_E7)d5g8XY;XrDI^T3wEFl?Ug;4c4=)
z_58>9evO>PSRrbib^*l_mj4>$#Q9~ufJblbNPF1_-hZ_weED<$0u@;2^QsgA&RNpK
zk-C)IBp_u$JQz%_XAIk9as0VF2$`1&fqHWw`bq{?{?Y`&z<~UP(sbOMs6-;qokr2i
z-$H7lN7Niy*W=?QZl?Pn*s`qv1cDM?6dNS`{Br;iZndI`GOvYhbe6DxdM8ADFF@~=
zw|URR+aP#sMCXib28)efAmX1M{Ss(~K8r)K%A<}kQ#W%nZ}BK?o5(BmDAFh+8<Ms)
z5?rL?(CEoFSh{Tmo1=2z+iJs=cp8$c7oMPetumFEDN&#Cp?taeZv5Dd*tN<A6AzYf
z3tAaddvu6!PVXNW8%kk=QWI28Xk|OM^(c6E!d2>+z<E#p#4W2+CR)~uv28&U+l%<}
zBL=Gw<&deM87095acqV)u!$R$q)N8fH$(cEOL*XrGMO8%N{-*MC%*hQK7i%BoJS9X
zgQp|#VNnR4{>x_AjE%4Us1*0^ErF{_)+G9F7AT(Tg#GW`VEnTNC||T44q7rk+NL7j
z$@e9==}5#0ZFVT0o(_E%^r_2Z#tSKn!71xH(Bk!bc<ioDMcF;B0*e=7t=f|?oUt8!
zYu<2^mgRx8;S`5BfjJZe^yh>q?CfBUh1x+V_$8Zfdy!=zby#+IwGq*ooC$;NUgEBb
zG>~k!#GPYo$-?DE#174f*Z03rKm85v4oJbyjV)-oLxY}<iNUeTJnU6_1mj<XLTC${
zm0wZ9w(m1=R`NgGf6E2lHz%N0*k9h)YAv_xT|I0ya3E<5wqZ@t4IJC{8J14;0GH1a
z-b3a+FOZXnKls{{VQ&<u+L`@0Quhz~tW-zImcP7e`zJVcnC118dLYj1K6V=n#oHb-
zM64SDxwAq*tG<=rF7p-WAOo7c#|Gs$FF_T?E*X5+j-G9>M(+)l{Mo2}_z@vN$wO_J
zdft`<Oxg+Blhnw*0gOkODN74P2{3ubIgB_cPku~IVZO~QNE`f%pFCBMn6`Yt1-t6O
zXIC9R{^bpjm8(Vn$awz5jlJ0SP!S4G4oB(Y55i%wN7#PrFNEpGaSsEP2s>BsJ|Cu`
zQPO!#5uXJ66}MrP=T$V!k7k{$B)H7(@R{bRVAHNngN}a1bIRr<nDwCj!){`B*E=}e
zU_zx6ZV8{D9KGeIP14k=c{_i3(hj~LSm5G1)O$5(I*Qn5sfweLjYv?%4!oUXO{Ra)
zBP+-J#h-gm;`>z((4Kw2Zrqe5Jw>0mAiKZZ)pR*x)meb(Xh5}g9_NEr$f38aAGhY+
zPqckLf%RMSaPOCoDEj)F_u!Qg7TSYckrsWL_6ZGCY9RYn2Yg*oig&G_BfOGfUG0b9
z?wt;SuBV_XKNA*J24m&-0Z<&Hgua7JaE|>eD4X~Tteg=SuRei60b(xefHL~c`GyTn
zndm#)o!hfmiOzd=9WtCOFnVDoYBk02j{}mh`kMy%^qQhqcm}MyV@#*rS0R_;FJfU}
z1-#tP97E<sDDh%U%dPi7!ODt!oM=I${5)(N;E8U>yT$8@jj6GgF3FxHg|n~g(0PR-
z7H)YD``>y&#AI_C^dp+(Qm+Un9I>E32Gro;&`{J-&w;aMC6L+u0E@$Iq3^Q}`gw|>
zP45`GH(Y`dGtKGt@6RBn){KgNISFO&zrcNi+59ks=cM-|#f3q+C>z)VOaE*Ew^^s4
z+3gw1`ZU4XQI^!HJP=Pr$Wv`KN1~E)8KrtVVV0;EC9CIhJ+%%Htj2n#6Px)VH&n^@
z5_PhQok`pRs)c8_FF_B@Bk-hIh4?4R()@G<lATByi=qq~@7{t|EnPaVc>&H`CV=g~
zD{#IsV|y+)27Q4!k)HX6zSale!$ui$c>rTzbo@n)!)IVr1IrTsR-~d0Uxi<W-hegA
z4rE-s5sAo@A#O5jxvwVa7^*!M3}(NByfhCW=GIg)Dg>UjgyNN6bJA|JfVW;;jd=%I
zuUKqPCo@)~;J9V}@SnD1bf7g<-?1ed9pz}@&0dVG`Hde9%*m@&%pJ9PGQO8lCEMg2
z$XK6#JaPX8d}i~ZuuMC8up$%ks*mt0Jj==ck)_+e-2}-@8?OIK2}X~Wr*2-ILPKMH
zBHF-q+t&`mOB#k+SF8C0F1kc&8^>EuOl0{>#=vt;5?ht}a%)<($o(=^;<;cmE~~Vo
z4bzX~(5V9v9vFaL>pgV*br>W^KJbl<h16=`2(6-gF0VM9TQbvth-wnWy65GI_bv^p
z=ePq*`z%TP?|mp*8_)l}ZbquotKfpS4e1?q7-dvXqO+4bI$gJ*?~kYv!vq0|4Y4KW
zk2HzXpAKx_lgkU2_CpJM*0|`~!lF;|#C`G`Ff6qv*}`;~euaHrS!MjR;uMs8zX>7V
zPNCXLKr6c%PWf>#S7G`TWS=y{`Ps3UXp}3Sc8qoHUFFF3s5WpM_=cOvxHigZExdu-
ze(Zkm76*N2Uizzt;Xpg<)7xp$$I~9bkx_EguPO)MGIpX?kUPTt0kH9`DVe%di8@Bd
z@?U<dQ^y!Dl&@R{ew`Qa4P%qcdTdXEUWUQy?E}H7`x8iI3;3kh7F2mM&!w(^0%KR-
zfF(BOB;4y6WG#;4M-(zvNc3#sIyMjNgB5U5_AmZh!q_*gd(d$$6ZhO^UXUHiU^T;^
z>;Ik&EpI!(Wb{4oF<{+~sV*RGJjO*BsS{h)p{+W&4xQZo;fk4-v`gp@HNrBS5N=L{
z$GnjA=+l7cD6sPQ#{Z~@M@efrXJJ|k*|KM#=~Ev{rvK(@O!TPxr`ur1JStUPnslob
z%ND&n4UO+Epx)<Lh|8Fd?FZKIg4fxcVAB*ne3b%yjNc)Fy{o<^%aVx`22icZ{@j~G
z11K+RM2&8bfZLDN=-MO7G%2|ew0vi9A+PhGd+uF0abzBT+p15)1{ZQ=fklug9LY^(
zj;!X)5L8{>2w~y6AU!`Fii9)3`T1AA>Cs&b)4BjI3%+uZ6E~pMVIdbxbxHmzQ<7fU
z1sLxPddYIs(Rdyt`Y+@sz9`3U=UzkS*oFA`bv>9Q%F|Jk|DkZ~FW9@Y3dXBdgZSEf
z9R2GbCi(<G;k7B49A!z|L+(O&8lZhc65cvsOs8BLfQgxmw{=++1y1{2chgARS87V!
zUzWkcYHiXKbBgf}Eol3E)+d>*gmDwx!QQF~=XVLH?<VF@Ij=;Q-Mfi%X54~0n`JO_
z{X*z6{Dtq!SieDh%vF4V2cs`3APTx8zNw~8oF_~L3#H$1u&oiZjcx;q(xGVnZ`7%2
z$JlRdm+;(GT-zxH+x@y|lw5=@AFHA6zdbm5VHut_v7o+RN`-l{_i^EB1EM&18Fp)S
z!qINVgy@k)D;+(q-Cmco`e;PUGM+-`jf*g3Ljo#>jzv+49lV%bz`Voee4qCobe4SK
z>&%+5zo-v&4l%~z$0BGP^$EtWsfV9p_P4j%3kLtnhD|T4u<p+q7_wcNOsO-*=yByx
zY*ELF#>5D}n2&(;H54YXb46NtFYh}_oAQ&@N#Rph>{$E=Jp&NJPJY9xRegZ!$DrRJ
z4|581h(e7DamX1+hTqS_6J_7ACT~5s{dpuj=3z*^_gZs7k_%80XGfkcF(H~Z{W#(*
zn`w>y0Rn}5@!|<la8<#M+8!PSiGN3-)yZ<Px_%UH{-r?Y3yg??cO5LFmvP^SYdA4c
zmV|t|1YP-?v2=qm4%%Q$lI<9aS?MA-C+QT<o4XS2XPiTYId&v$N-p;2pGK>=Z+!9D
zv5>Up55V~#_;Xs8B(|OBMle>c8$0tYI%iCJq;JvccQ$?=$<BOMZKz{e4Bq=XcyIfg
zTt$}{1!gN;n>pr$a!lfPDICSIO{Vm6X)M;{zr~<Y26S)EDX@CIo>OSCBlE(T45~AU
zxrQBRBT&$p{~DZakMmkz%ed}-B|7aaW6-}d!Qf*>c>VN1l6};UnpOp)EcXrG?9(HW
z%WbIE)-~LgS6gt%AOo_+su!jV$w2CG0f(PbCk88}pdR%d!_V_jdT$So>A8zXYIUjF
z-`$XJD?@MfsS@KE0&@JeA{k%S5BXs#)M(cls9|pLrUNR}me&A@Rf0J3LM6AC@dZRX
zcM30lXLqfLbR6>Af*5T*#@)Ph6&HN`2UAx6MgL#jymZDDvB>E{zAW=FZc=u|4-SaM
z6_tF$Rb#TIM2A`xRtY2Z|L}s%+WBsoQT&{<HMm8eF(W_z;d2AcY5vqg+*F`}W4FA8
zFY!5;wdWiE#rg*xxLt}0XCqLwt6AtiuL62rDbm8qN6@Xk3K!H5MlIDb(BgRtACG+j
zBF38cJ(bQ6{h&y#!XkOMC9c8^nm=$uv6%6AKl7^7*n74>hH70}&%5<=T+Uui@|xXk
zo*T>3(xJ>7vUi8rZAdA%H}5|zsEULavlK{6bTy>8SM%F_?&0udYQ&^{6xQZBBB2*B
z*4~OpF9WB{JHoSTY{xHR96dJotx-A6_mV_xR4Rhm*ObUN*9SOaCd(ETMe<`J5^%0q
zfvmnX3T|F8Co@m!0&E(BnfKb@<qb{ZzhWSy4!#a!^jIEj3+vV|N){?ywIt>7Be=+?
z@A&cwcRBex(_qr=|Iq7h5L)q<#ebycWbczU)_;1A$)j1<=wT`xt$PEXhB7~FX(}(>
zVnNHbiuopcM?7;wh8)#&q@Jh#!O&rh_0@5nKT%l=q7_=g&!MJt?d)z0IIx&&zdN6M
zXL6FcRV?Y^>d&Z=R*LWb-M}%I9OxT=9b)y9vB@SdkH_JYob;ascLtP+$?waswB8(A
zeQt2aYc<Kq!&;<zh8zi%h_H3rDc&)Nb!DnPa{i&mI2UIr4t1Ub%59Z6=?`NO95JNn
zrjOu;nI`f6=Lp00Mxg6R2`*>Pe8(ShSohP3W%OU<cY2P)D(i31Kjah+pXEsYPRPPy
zn|)CJN{`~abV$5(SM1XLn43-R;P`-E5cxk9_Osnz*z^+Ke@PRc|JH$e9;`%rt#VLK
zkcalpOt7jS#4=9oGiJTT$xe<WW1$n+3Xg-%nO`vdVIPJ_C*bJxFc_6&NIbG)`Mj%>
zVR)_uaTdNq)Ujjv#zdIEI~V2oxiIX9A_+9p2iu%$AW$`8%m#nqvzJA%a#b7lvRuXB
zyRY!CRGn}jOBTOj{N)>-_;9%$IdbJJ%GO-MZD3A%ekAfaQA$KKWFm5NL-4=BrbNm<
zry9mfZ|yQf$&o+2?}vd9B>Nwnt|-Kw_+0Mu>=1mqUyelmkR!=HW~6gPDt6w>fOAuA
zN&D+SFmOBuKGm6g;WKt#KKul{j!{T^r3T)Nv9P7)BWMU3z`rzw^Zf5QScm!GoC)L6
z^K&cIbr{oMPmD>a$v8}%$Yz^v?{WS}b{;p&fT3q5fU<2WN`3e88?O3bXrD4>{pZZ*
zwAZ3w_c-Vqp8{i_jl!sX{j85>$Y-ov526fb^!nM#GGkWxM&c9bG2H~E>PLB5*4<m~
z^8<W$-Nf@bHq<O+F~$U($9j`K^h{<h@e_ftt0D{D>FAPG8O&=`d>R(#2xwhG0%Po+
zL9c|*u>RCQ8q{Y3V;=>>oTE$8|3f`rqpSgiaUD=wpAMGnUC}f8J8yLLB*Zuw<6_fx
z{P{tf?yep{13#_<E!h-Kf2t*U=zbU7>rcX*p9@h(?gY41<qNHi?xQcw;Wmv7KtUXJ
z`8iCUV5J{4G6sZhuNEov*Qa?3fpF^q%W?ajWan2W-hRb9mdiX1hFTKTuByddL!QIx
z@HIH+xHWeEevezPL^9^pGj3PbAIQ|VpjMxL3M(9XVX6hY^Bz0NuS;gL(lPc_e#mf0
zR}O=`Xl;y~vX8HcI>9Mt9)X&vly_S?l$UdrAztBHR4{o|K92SS$;NHGi~0vn{kIa!
zaGLSHcU@ppy9OrK)roz^#Dn=Q1>);r$FFiUA!<h$*C@6b=Q*8)uLTYy(Xx`mAU)jd
zn~2K`N}-|6kUSbMM>4MXLV!gLuX;BY{@B`3G0P9k9CsAsgb8T<=siZuegZS8CAR!y
ze9o?G;OzH5iq6EZ#;yy)r+L;q4^DFk@uE7<+NsQBo+3l$5G7=ILn<i^LK4NBB&j4x
z>O5<cBuOesk|IfxN|GeWx4(a&A9c?2?7h}~Uso*`u%`f5uKfhTx0I>dmIL72!e(5z
z8eoO7Dk&^V<Li&*K+X*#YQWzItw(B1M`A@^d6<yGG(WNM)H?o^juC0UW{>_~vcS8=
z2+OBy!83UkI;s6Rcp9(dO~%LIxXuXNxv&Q6qFeA+Un~xuzXV%cElHeV7L*5Vfv&n>
znD=iO7Fs29Lka(|?(UfUJ-Opi@b@}b6Ty6a_o}d|a!|Z8G7kdRpFyRoPteODj(`2E
z8HMWwxLM{goc<XO%8z29WVQw^*r7}chb-x5S7|Do;K~ggwZyH=E3ijS7qgz%WBaBy
zEVVnwXKy^m52-SqM&@>?S#1pp`*lfo>v+CO=^JmPNKoEN3XAzUAZ(D~-G4D3rg9o2
z*<151<_h3>PKFnpI?dO4PsehWPkb3~MjPVou)5_dbXWU8d+HO|ZeE4A!Hk@U6GQj?
zHIU2hiZ%*{)OK+qbfW|}qPH0BdIGTf>tjwyPM_s-vI?%7o6;J!anMs!gz4FrFi>`!
zNGvrS+wZ5qDE9n3D<@BeZ#1WNcBlD+=I3DJ&T+6bT1Z}PPQ=?jjNJNNgL*zm5_LZt
z&q?li57`~7_|P-0Fri!?3iqzyb{Rxs(%Krar%t5k+&dM<DxVD7LSyl!NS}=Fx(kDk
zSr5|XDM-FgL!DQ5p;ToS47Mm?@n;R%Z6w33YfxfNH+dM5)r+Y{Z=vl+I*y601M4RO
zlDD!Fa`!HV>~lGMYSu@1d!-PXl;yY<cE+0^I?Xx*UqCuXhsyqZ1a&=0sH`v^Hu}86
zq*b#xn=@J1eY6dI3x9L{!j-VT?-RToxeMJ3FW}5I(U5elj9*iwLF2c-<JTsrld1nO
zmpa|XOZV$R+wtGr!R!VIeWF4m(}TcU>MD3Y7Q(q>LAc+a<&YKwq^Ze+Ry<=LD(pq4
z?S0IfdRg>Jb~x$tPQ;(dD&*InK(yRsL6Sxp(n&*Q=={+ede>@Ddpli{baXzZKih;h
zt8JN=VLvR;wLsS$&EQ<LlAG(mK5KqbFx#>lKTI(rb^kT;wqKYVwLKDRnG;}Y8OwpQ
z`MdpBNm@141q6xP9Gfe1Kz_9<l}gS=@9Q$)GTMy5mQOgCH4H*tvNLGtc1V@{iIPv}
zpgQxP<tkhQ&td!c35tW5dcOt2zHfp9i_J(ad<G%QFt{&?;><R4IK6NxhWI<e!+1>+
zeEt}w8OCxCJ}6N4)x+>`@fRoy`3wQ_KA7J03dVGWV8tdLl8tXe{LKb_|Dt@(Z}|YM
zf3HU0f7c+MOQZSd?ab}0yd8=iEwN58#8<^>as%%^fbHyXEH*R7OY^S4yH%Q0@a9Xw
zuW{<s9i~HQOF3NmTZSED<Jfzh@nLnPv1v`P$m{ECJb1Mg9S`eLj}@#p7<>i;*D+ON
zO(@Qkod<EJe&NW#eAu@B4&1zAL2k^_rhAT$$MTXt=y{xgT&Fo%%-8_Urfcz^5yw$x
zYBT0O{14PxVtBiV+5C<~HL~!I5&1hqif%qU7sG`o(bkMZMmr{DhwJfEoE%MT*oMoF
zS3}l}0_f-t#L%VRz-TYKyDgHUeyn>~siI7->atFanJFE;J`2-JU&CRkQgE(5$~FBe
z#h!o8L#Ky6QQoo!Y9H5w=c&2SFOUMUq6bFnKZnj9);oB2Ow2zsCVst%5ID{eD*iTN
z-LRc-De@>NKAeH$70MyNWjvf!IEiUG5!^H@A>CZ5Mz6iTf%4-6z-IPSP9PocI7zz+
z>aT{uf`lAiIAF@}7au`o$+7UejrDg&e!{1(O^E{4CNa%pxy|zTp)h)dSpMuYPI9>%
zy|K!IHss0C^Xsy(TyhKa9zF`w&t>81olWqihvh0FzoWZtB>2s$LE)KXk(cv*E^usY
zL2bAJZLR6Tq&2GimCL$x$mS<hXxxIFM;FjtApj1>320S^FBpu<hX1w<BQGlT$mM(~
z;`vWBXXB=TmM;2a+y1xkFj0zR)n3F=HwUqOR}G|n^?(&i6^QtpC0+c$loV>Qee>!j
zv<dmbJd!qa_g5(rb%XW9pU;JEVI!8$bA}raq{zJgzA@g|Pp*6GGcHW!HGla$V_>Od
zaxZq7lI|3BE_LZ3d}Mx+Q+F?+t2N70q&)%$mQ6}AR>bS<-&Tn7bpLoqtXF=8yOr4<
zM$Hi6pe?L9co{~`)gUEOETgnl5mTfuVt<?y{G%%*5?`)jQ{Dm2sb>TFLLc{{L_n<O
zuzt<fV>nh%NEHjd;9PeTlKJ*1h%|e^`I)aMa!xEBjZ-4iQ-8vlrSjC7yD#nvW7%}u
z>!OKM<%sXwDBhmPkbRR?Xnb@H=e}$wI=`{zB_8K92T!K>4`<CXs_J-e&Nqm^bBB8v
zcm@-7ce8WyIDV^>KB?IwhlBqpfKJ&d_;FQ>=x}K;z4aMP{-Qxz_PIgn`edjd*8pnc
z#$e-l=FSyeg%F!r;QLUPZ$JG2)D#c$O-s|c><5c@r>FNJE!TxHaF&TZRBoW>^#;zY
z={J;nc3@6kFXUM=KJ8z7e!6-W>{u9y)gR6Rv<G3`i(XCuWupCdUpbX>DSGcp9eM>&
z^xWmj6+R4t_+QOjtNI`mJ=CG4nol^ha&|WL?t#8PZs?iaBN{bLi5$7BOcpUG@hd8z
zC$iM3;_G5?+x!|mA4c<n@9)tr=qzXD@QL|iy;!#$AZTALwq&w>nS%}NnH~uh`Q^~k
zu0wl`*ys7e9~f{#$o;a1c}^;EJL8e+rc|L<R1a^Io{9WvRq{kPA9v2yq8F7<V9a10
zcUb*Cdk^MdRfII(cJMsRS)PkoA>(m-5z9zrhd?#kZAIpIqNigr_)B#`C}T&RjF+Q(
zuIXXex^RAGOdLO^vkG<!i=nmVGX!O`XG7?3j7l&hE`8d>TSX6d@}+Pg>Kg<atP*?3
z>yi@YC%9^EN*aboqTX9}SG-)sTUEK>wuOn9`I%*sC#>aiT_a&nVj46VT@?AvsDTyo
z$1t<69}7<}5%c*e=qsH8H5-)h<TxFY5W{?TC(Ysf6GJlDE&+w(-iWR?3(1Ngb0XR`
z3Xgjk(CzEwssHyH+#357+rox8Wx3^8KxN3XI#1?Jl^3g>9^!4uR1k!#JF0c3bN54z
z;@h-rFyAB1T;Xij@;3+b|CXXwv?@4noi5sc(uf`^$;7EUrhqz8qZ4!lG*2`Hqg>Bo
zn@$;5X~;ew?<@GwDaT>N9cz+(x}UN8k7K$*H!M-Kq-y5Mbiw>qw)_7o&OfYy0WtqV
zOxkM5-DCz1w<cn$SU}gNUB~7K1#0$^&DE=ZaBC+2z!{BNbk*27pmF&PHobi;j%F+j
z{lAvv(y+7WADe}<-E+ZZvKAS~vde1ji+J0Ar$I*gU)-RUje!T0#my{tS=iOa7d|Qy
z@AzR#-hLc~t+)1KML;$P^R@XT*&ST9XE_K}jqtPMF#7Sa5fyLyj&pY3!i=pfFFm^=
zzjRz9r<ZsW7rktQwhx=(!7nR%uqP9a#YqzHI3L)!)tp)#4`J+Dd;XZ55gB*DkQ8t0
z#hODVSia#5cHS&wzNc={G&xCfR#Aa?hbhwR3@08JZbq+la`bI!4_rE$2{Y$@;^Phe
zU}5Seer54pOxk~hvt#>$)oKRBK{uE$I#I&dC&}FFW2$6+tbneYZ%BS#QXxIhnJ+9>
z6<X&pUU}{?(&DfU7ew#o_P;Qpm#?W4y@4;NGndU%Zoa~p)m6N6LNwQuaE23S3}97#
zGyh=h1w2;5@`>~Vzo1E$^O)Cz4pWVA)>lJfw{{Mflz3F!biIHJ);6ZS!Rp|ZFb`|r
zw?pwd8T2ym;4&R^F{mvLUNkjhY^W5?-ZqQ3I{6XhyMsAFw3%aC<{4gR?j6{fegi(6
zm{7q>8P0jtEOAwdBPYDH8|=;`@D=}wu}CT$LK-9>nvaD-(_JuTRtrv>Ux|9>ji{jI
zrKoQ%g6HmDKCNLs)NH>9xx-Il)TacD++awfoQ=r>xi)kjJx**QV$L1?1{m422%8?h
z;0othiS^P~<L$+taJ*_O*gcTsQ>Q+MTYW~<MHGvNKQs66yiX8Dw!(#nvFOmILYF$Q
ztonH+K3?@bH$k19U9}V;IO#lwrFHRY)d%^Q{@*zAsX84r8&1>5vVK;E4%cLsDb{3;
zZqXiHn%nOU&V7ufY`7QKt;j?L_IJ5)(TvvB{6OK1C{Cbt*fG9L4xV*Cz(7}qKwP5B
zEn>YS-$@hr@!==Ya8EjR2Uqf^t(Rfb_lewn-+DaMB1?{c*QZJEqhW!0F3$%^kh_@@
zM7YtJD_T~IPb^q&&oh~qoVX2bBJS~tk!G;}fii4mvrNy4Zv0y1AskzjiQSq`T+D8D
zepu#ru=8%=TSUuX=Il}~XI(Yu+G~@M&pTj#I6DtHzJZV)KX8x==Ov_v(Q^CYFysA0
zbPZ=Syut}s=Jfz9)=mO5^T#0GE=x_OcVOPtNXYW(huX8HApOJ?dT+_VTej<-^zABA
zkRq<L9>ZimEuwcmhu34-3(xWrY%chYPmLAnonynvR2Nm6b#pT6)hOVD)0X7)vX@YN
z`VhFTP@`e5t=KtpJmb{1an8ZDST}<8YWI}zSJs+POJ~IBRjjL0Afzi7*Fv%82i)^u
zE&f(3z+Nv0i0{13*#sVjk{##Zj%5=1|MwKGOxL9jQ{s5{u?wN@&Q>h`G#2_Lov`&{
z0XTJ;)3m`+EOWjGkEt-npvhf0F~5Ov7!QKyb3^D^(*l7ZHy}Cj8w@NH!-gYL#QEKJ
zzVNC#C;$F0I%;NNV#RB=OCJV{rQf4-@mhZW-lKf+Ee+c9AqEG_w6J(?0Lbgz!Tv|?
z(D+k_emG%4v);XdRa<+|ZoCBFXOjslWYx&Qm7$n?<QeK6CFnJQ=LJQxamg$XSjJa_
zZ>|7)r%GYnk4>Pq@ht0SyoM?E^5i08FA1;w=2n?l(KQ#EAv%}&&lWj@f|??2tBt~t
zRXfmG!V&h4*Q070nSA!rMUWIfhp#et$t{yA#LdejX}3*1|C|`prG-K=&C;4KKQo+^
ze>#uvetu-n)KSoD6a_sxt*}qigq&g?d!gcMZu@S1;yBHm)QzjdKhsO__qpL@uB;((
zU+)2S4T^m3=an$Yr2*p8f<S5H3v6ofM%b|orFWZx=dZo6cD6EIGV~MsRz8C@J>T*0
zCmkvrn<H}mk|%asI}a*Y_9-WhW&FpC=kuek@N13D$tB%i5a_WA64hO>X?>Y^TD%1H
z(~Q7w>tp<al8d}Mdp0Y+m8Ct^ZFuc{6x)5JLT^MVjvCUTNhdZyx$AJS>3+kNYHFd$
zO6I}LybB}#6_OT(3y`0AA5UjkqldQ^^`3AI<Tk65ri1e^*`N{g=dv8*SYt^2cK|Lf
zmL*NzRpN$l4*X4IX~ptVsBTkc?`g);zbb(V3KFE`k}6}kW$`UPgIFhgKR^3RJd9y`
zKtXH|_w!~7dS-p*G*mmlX76#%e#nYCXG%hI`WINR@BwFaAsXJjGo#xptjL7f<FJ&m
zDg~Y8+%{j<E3mo_y|Z-j*uy+b)HwhVZHyzk$cFh+ZiALB>(#t0g*wNV{CbJ=_~KPJ
z1{w-Pm8&vv`-XC~w@@Nj_8A3&$rJ6=Od;N&n+M6!sGqJw{#)9Lt#O^`?^A|J^7q8r
zJ?2DpmJK-(`vCSoe#U(oXH303%t6=a4o3fc0AJLtXt|dL`dA~D84O_aq70VRN`VRM
zP4Rp_MX}05jOoq*_b0))piiG~mZ$=pWd#*o6`(gagfHX=MJqybu;9N37}H+KZ);=y
ztqn4y?u!9+w!0{@NY<bNwU46EhGsZlX+dUz5*eBzhRnlZpm^y4^y#KSUST~*Zg~RU
zs%OE|Cs$mXL&5(PI|Ci+#XoWNXpq$kW1>pIL(_upY(I_*#HYCCYu=dVG{CQPc?iel
zi&1>H2~5szMzvHAK9J55`_BA@vsvzX<2+RyeJc((MrGm1QYYMTNt^lq)}ig@`EXI%
zlxQTs2JZkJSnvH9mX+yI<%9-|>XD%N)Ei2J19=nSHxT^ta~w}|p~$%jiq${hrDaVp
z@^(KqtaXK_OI4_(=K)l^<-yH3U_}HKD@C?)O0=gk8-883AX;aK!=I}usCU&I^zQWX
zu`BeUE|#*M-WyIcoOLf;EGeC@N(DHF>)tYwOB#~m1><(`O{IId{T;P9fymO7qpS}h
z?BU+N<{@>}H^#0vrf+3mLVdgiZH!@#*9>VIxc|MQ?ertiJ8m4h%^3xk7B$1)8-{e_
zquDrn=}zo_c>&fucmYX|zl(jR&gJ8m9p`^CPH*@{)^mE&i9P0lAT;;mSgM#<PEWxF
zt_!$BBUPwi-C}S#Bq099k03?c3FOBx59o0{u&TQN?iq$yK4T(#PUeWV*;vuw<Y6Si
zM3;tjH*p42t?^Kx5*=HuL-R#KboQ5_LmL^xQL6>dnF^`Nksdf*(*-ZDNt4NPHblo<
zj-HD=hjueP;dse0blIXzyNiEfq2Fxrqe~X_g2G=AWSz>lDPlZ_r4jIVw>1sdQfK=X
zKP(mtASoP0#i<F9WY!{LEE3VyVHz}6DF+vrufqBzzgfog7M|aC8U^9w#LB+R?Ov0{
zm2YgvG2g2owVmaNpQ{kz?mjLnp%RYhoWf;czrp?97pw^G#r|21`15BA=I^t{H(`t!
zY;Ox45{uBX^DDaVxyj|<Zs&ZvZJ}`MOL4c`ZR{D=0)N6~h~DCnkXdetTJewAI6Ibc
zcEY%Z1ZP}%UzHxqi^epsd2sgh6AZigj&n|l6WOg@!1tc>L~(T)-kPjKl5)$q`OPxK
zE=QL0dvhM_RkX=*4|Z=adLdfx`WO?k%t+s>2O#+DT3|N!6L>x?5qo>2f$6O#^qfAx
z=5sH(Nk<;Sxwb;=S)U3Q@?L|J**9?6d>#GlZ^5{B#!_&UBifC*IMXbbo4!_&z8}VV
zNS3C!u*{s~dH;n{8wJc??1I$Sh<1S-b!5+yL4h`YVEZSFZH&h<n~Gk?7h==All<N{
z%me!PFxIkpV$VM#P-9atroDWP-_51ThHdTG^=~ldOKZZG{idYnQ9s-(e*v4RCn~qi
z!iw+&{HNN2D6(CfK6f48^h%TBrEJcja7FCzSPq5z<as^ML42ot9v81=Jmfhk7{NT_
zP4$hUJdz0~+)Su*w5&Lmc{Ju3n^TcZ1$x%*;<F!p;ZJKmhHc*sX<PCSPKVc}d2@b%
zmDDHDyBET{N1D@gKTBFPONyR+q)VmG_+Z`(#+)`jBAV(Pji0v*slk#Zc<!h+jht@A
zvO<f+IV2eSx7ncPL1WT)zXSX{GVuE-W71&d1M)XSkm8^~buvFfsgFNa?Nx*0WsmWp
zUK_?Sht={cFH!TTBI(`MjcTDMd7Y{&;P>Pu+r0;Hf?5w4#qz+@BFxFWT|6ip(;>x2
zL!deSCJZI1)0Zw9H1WVhOgZ8M|MZv=-*;cp>a7CRx)cJ!dN&wjq)WRehU2Kp2n_Rb
z1Bn9>c;ewIOtjLZkr}BF`*}PB>WmVPizvrETdv}%C==o|_ZohEPO&Mun&n)r=rT5c
z?{TdIpAHqe(x#s6)qT11eU+HMVjtES4dB2MGpxU!2#bT|$aw7%^sV-18Byl&_<4p4
zn;gmqiu1)A!`)#?#Vd^OJ;QgXT?Qd<z}Ibm$Q_T+A&;flZug89=`Llws)_1QG4Tor
zg)hZkJNILr>mO)4YJ@jYpA285Mm)E=ar+ZbK;Jk!yh4qs_#AsivE54drdyoY_$8j{
zOT#-~6iLvDxhM!IDlj_2xG0H-^Odg~V&V(NCVbt5>!<#|N0Gs06@A65$85G!V?|Z{
z?%`bz3z`x37wevBg0e^-cOH9-wqgmI<d@GYKK}+`JIeXgIOYNnDv~$_wus)RM~m#(
z`(dmZO)7|o1t(bdWAtUn?9{@xY+ckl7|+<OF;E($fUkzkY5(YGF5e@V3-dk&^~^gM
zki+(;hf;ZG{ZG6g=@P#*K!pZ<%mwQQI^^!wI83gy#~&+xvG_`~D3LMMb+k(Gc)U6(
zee{IWzk3A@mZrnSZ%^^Z*)wRNy#S_a-$#!mC3+3bN#L0}(QuY~^P9kUA#8?ycDV#`
z;G#K`0t9)>PniEu9_3rZ`19wC2~n3}d5vS-*>CsJX5lM7to#@M@~{;Nwb!S|OA_&2
zgCPM=eSFX?pd(dlAo)Tye0P?i*}|jnSV58gn|&AOJXa^VnhSB!i1%>w6=SkpeC>GB
zOq(q6v4kr36yDW^?eT``i3Q@3j#Hy8@%InL8dl$n26>&xDQeN{_dZ~vw+*CbR-l^x
zDt_gzpXkfDW|FF}kdfUn_f<36C0s|<0VAsRcruiBeBeipt%H)ptKhuPmJ^L(Y|9N1
zjHRTB8RryfeFggtyRq3*t1Ik&lZ3IuheOL#GrFy@1JGI@hsNH5frW?I_ri+YT+@lc
z5sl~&)y9Q77eEmI3KsmJ=;gE>Vu$8|{CySLvbTVFoIJ&bN=^8fu_O1)9Yl+j%-O{5
zKJgdyvGlhSrmRs!iRI>`N%xz`ZniPp`_={73lq48aW>R(17nnFm=m9Knsg05gW9|2
z@Ya_|l-Y9)+rS)eCuxy;Y?fWP<p)3h%`=z^QZ#UU1m`Pt5l4J%z?e15F!rznEzkXd
zCY9CTK7B9NT6wUp_7IM!Vp+?=T2cStV=Q;N3R&j&p!;AspJd<4t-s9Jyh*<~*NQVx
z96ky)t_R_3zvDRO-X~D|q679fG>EI?0QAdz;C*@`F#Fnc_<znK<aZ8!a2sNdCdSPx
zWXy_ZmoVSHkyBy(Z@YWa(B-%gW88M}`LB-Rj=K_MyFigt-c}@q+DTme%%A+Ylh#DA
z$DUsmm5;)`!Ti|~U+~&t3$k!HkM5V^aO7?QeZI+*$TR-G;D8iABAI<BH=g3#R|)B#
zpIW5pY9MC87m!sI&=oT*N!owMIZq!Kl>hr4wT!ldmX<a;N#28@rb@Ugr9n3=%S4;}
zaDI#~+dFL<&6f`Cgm~Qqe(LjZtiRIA_;1Xmpx6mjKSI%=yq0@=mYscf>%ju%6?pXO
z8uL3l@~MLzaNxZO8Pmjg-KiTe==NtA)H)2KShgt2kG*4_$`dcc49?e824x<b6G^r|
zA2a?N3fq`Z*?&HmGoEyXZz@`{ckp4o+t@t2mSw+{(V)|k-g#qAE1Iss);1a9K1U6e
zmnCBNYy;}&R06`Ur{JG2!Kk*5WeV+XpddEG!AZ@4R()K^IXkQZiJ?cR74QqMjWZ&*
z(3n=se1~&I>ZB@Y8wBQth@3aQ6jeRj0pIqYM2(7OY?@xdJ&}$>yO*-urq++>o4Shc
z>HNc7i6xwJni4AR55y(o-Jo424+XB93d}t++3YU}Y9Fk`c%LU+vQ8VEUR?!pR)(}N
zR|V(I7eg4bj?zRo*rjhwi&Ztyy}^J6|0%$M)puE^;jBm^N0(F#b;01O0BEgv3G2%$
zP>{7n94jpcvNfylc!D+c)Q=GT9x@<5Of6`~jaAHHU_-iXm2m4$1NwcnK5@&Hr&Xeb
zyx>;`-VIVDn`06|Aa6!xIwNt1RvX^-*CkrZ`*7j$i>Nv)2q$nSAaGV7_ek~)#(%a1
z$y0CeUIv>r1?e)rox$B)Blf*rvl!g})4(%ZSO+>t7B8GQ2VSLD;nSNObne?A^7xpC
z{_09pGCvjqZ%;v$(MAN?>o8EUuAn&LAe>}(zlKv+Fv?n*3>&c;iz_r?BJ;8PDlXz9
zQ&_I-fE*3PM+I%mFY$o~rt_0DBcbS23OFli;~vI#nYFSE^QT|u&dF(#+1|I{$2DtG
zeW4DYuhJ*KWrk5WqE5B8vbIF72^o=Ff>oKf`6cB9zO0PHLh)Adtc6xoaX}L3Io;*6
zjz<B8ZH6lztiN$17PIx=aW_IWNd3Nk*rO|t;#y;x<G&B3m(75?SDAlDRg(tKVw}bP
zzhbqfD8AovC&t@-=l0k4@+lJt9A#|BP6tV{v}ZU?ESU{P{y`vl{U<og{f<WxZ)0Jd
zy0}nUS6nau5d!}m%{x6(p*?Z25c;(NZnf!=n2Q-W=q8VgHoSnU$G>>($Yc2YK|89+
z&VdMPwpYBzVcEnE5IB{I9*C95<|{v8($5X}eWDo=oIT|@sbLpBG_|HxPKU9$atE9`
zq(e(oLJ$}iNnoDJm5mGrm#OSI(tcZHF)kL{{zk*fw^DT5^gCd+^(|C=v;jeZRl%g|
z%*}9k0K$fi1o`<Z&~%F&9o8aC<p!RhbJHF1%ve9Ddwq+K-;&NnnvZ~rj2J8_(x;&n
z_rN$qoy@-bjX7>lz=f~9AlP^q{9h>0n2JJ9tsxeyo`*wVi?Z15hd!)o4ueTo<ml2k
zmai+k&NUC8fQ1_2oTqguH)24R4ohQaj;0(G-nsyBa$E52?NZoN!a)Js&(=6x0}V}8
zTF19x%M>9d(I33Zm}1<#trF~J>(FN_hEwGR7qA*ykNF3#@`5P_km9CBo9Yy~_G%Ma
zk^Piq$1d~HEEi*SpZ%SVsnPwjs`<VeAzilm17zA>g};<>);f#%tD6h3AJX`=GlQJ1
z_6@KL8BS09RiG0(wqs#KFmwzkgR^oNbH9Y5$DbrT>Y+zI>@^`*{H)080Sz+As1!mT
zsnMC%S3&)g6-|o#DfYkd78Byl$RRs<5^!V-R5*3QP9I;E>og*2j6s#7@ES6oEXJ@o
zS|FWr0j;!|pZW4JSjSwSIayLP{?<GGQeZXsn?3*|(-Y8Zri6llJW;04If(dcLjJ4$
z4{N@M!v2pt@w8MY2)xEPR?U~?CQR`M8_)w!_hjhuzX*YL7S!@L%XP6#e7WK?%ntFu
z<kOWnFzYkxCiSp<Zj3l<?|W3Mx(urMmSi{8BWG9M#DI`u96$dS?s&_dk-jV&d`gZU
z2^~gcx5(1*CH{<E@DdYOo?)HIEnL#I>*Az2>HM-T_AJq5ZjF!YAmaHsRD7F?0_hzE
z0u!|Y&zW6fn;Q@Lw%J#?t`EyFdF&>vTOR~b6{;jY_Zp|+_6@$BY(QCi8Cq{gfhA?I
zP*^J}RF%W`jLk0CwcWAm$VYDUYfU1y;34p2v)JP3a5{|T^^Nyr;(@XtT$PWwK<WpV
zY_tbGN8aRYZ9>@I*bMyFe+Gjq8dS35CM1O&=i=qVxqw~mIQotSRr__Hb5s&i_YKoP
zC|2g*rzBv=RW}TZ)}zV#H=wHAg)5cM#WBrAD5&dkEY$uXDr1?e>^KdsXHp~{UUdrX
zST7^l|2a0z=;Y_lQ(_J?L%4CE150}nxFlYOm%VR6@8%d#vV}Rbe#)?ZO9<!o$Pk;?
z^+I6R3h<hl#W$@w#ye=d;j6n}LzC8dQGHMn^gDS#+KD8*f1?Pi<&3Dj$4PFyOg-M;
z#eT<|(?#(<!$4<s7G`Z`d93ADT;JihK;xu{{P7%qg0v8)`La%d-wW8y_}d!tZQ$(V
zhxa!0!3ql@NsE-l&*6P&c*mHW$WFk`|J;Tl?N9LTt2}uUU_~=q)1hchB|w-g3b)Df
zlF|A2c(?=!S$7OiG*&|Lv13r^)6U0;j={b)3e+K^3`v0^%SUUHG^-vAv}{DL1Ew(d
zvl7{INP-ND_z8vgL*a>{Chb?)&U&F|MCmSUCbeIgW`(?i=o)h>-1J1`kQUE9yZ#sj
zmg$hbAqyAZ`h%Yh3`mFU49N9(fMv~9=o>f(MLq@4@Guc3pHZQ176EWzqc%Ca(}YOq
zX^^W9rleMI5T)zb-C88a#|-#!ZZ3>Xdeezhn0XeRI|@acRZOT1;}!n6FHH*zb!b=Z
zLkyg>5aK5-hF4Xpl&e)FzjC#x@J5X&a7GO1?o%We%-%!(p-z792}P2_vWMgA)aZhF
z|A5AhRp@y-pHKT?hR?T1k|%;;)UUc6R+{X>S*LjPoS4BUeS9JQwV(hCdst`O?Jopx
zRHGwfl5xq%SU7l><z;rg0K1Kdc3RuGI6eh)t-MjX_a4g++w!7{7!XXX7k7>bMlBU1
z=q|s<`*roh?>lDnO}jO1TwqCFe`9kW4aU<_Qi4OwlU#Qu3LNB$_-gxpcy*h7UX0&y
z`tvo(+BB9QDo=(7*I7>cO&#BDElZahOVReV>F6-Of-jUvg!MMv;49q6xyMP;6^t!l
z)U*wYB~9_{Vm<17dbMateJBdw4(Db2+Sp9tKaov+KE7f(Sivm4f+dTBAv?(mMl)~h
zGH*7oaeByguQ|upO%CVb+brDaQ4QbTpU3fkN8;4XK%71L9IWi9g4?FL6yKf27{3Ax
z&K^z{41MB5yFb7Z>r|-g$i?j5<tS`k%sDHa=Y4$?`K{{&<g=d@6&JDHxb|>*s!5aX
z&pO5z8BWA8&!4~*Ro3ZI4#wTS>eP#m;H&~3qIOLJ+DL_gZFV(2KVnR>?X&sKA#Cof
z05IJo39??rL$<~S&cE;*8Y~L|=N$*c6VxnFe!mj-<$Z>i9vURodJyam&cMP+diaUu
zNizIZNz&jHUX)wR?jlxjWJ(zhZLfhMKQXjU_ymG5=YrX>V(4$^<Q&&akjf8?52&*h
zmK<M(ftrsCdeq**<j>mFEQ0y`tWx<070g9bodJE4D%50iCqR}dmHxB`wtf2ueedtU
zeOFZ)c+g6`;ie>M_<kIPnJyR$QDBmH2eze^Leb4HaDSW;&5pauwN>8Xd>ZvgsNQdA
z`do^6p;~lfS~O-=q{HD~*HCozCltSy#$~2Y0pDt3h4mCvEIfiGjEUUDVwTS2viS70
z=_qfT0)7vYK%sGnal+*=I{!Je2tJ|{$pdXaC9-mNB|MxiLFzh=!K%%w;OR&>EPRP+
zhm!d{_GLKr$_}u38pj9brxXZQ4B?fNLel!}7AA(C0e>@Pdd^v${ABjuDsN9dE;$8)
zzCVQ~i(Jv~@yF5s!+oe&pATz-KY;TiO%A;z-~sbG+VS?B^VddEah(p-1)IZ2%~#;(
z^AaY7Wnm55y+yt;LV=k83O^nNyF1~W#-$>-yHk@)xE2B?Bidow1$7d1X8~@kKZD~&
z%aHY(zN4DgUamBx9xo>Jpq$EZ;&Ny>(Jw#G=C~)I<ozBjO{n7n{~5{ss<o!>`r6b+
zG6!$6UW%Pm04JHz04x7lf$rD6Kt1d%{#eC);6{wC7I+O-U9f`Im%%Xea0LI~jWCpM
zY(U95n$*Tf2~2MYX!^85XyL7CS>GyrMAV7d$U)FpTn_TO$6)>L5)e#VD{9{*M(-_g
z(0{a)Pw@MUdK-=DgvG5W{r)oiaMUH|vzR+2stU5hckm%05-`?<?N<lhajw7rz*dW^
zjJZ>f%8WttBv63}?V>p+b18a$4P%aPxx%H{pXX~&vuFL|;k0njly4n(1KQfoVr-2Y
z#Bc7#u#y;VGRv;&tA?Y`1|#Arxt)tT`4MO6n39!ei=ZIw3XUnR!O3OX<WbRWj5QgK
z-mK&7&-R-u*!*qjQgc#}a2rotKL>jsvAM@iJ*XOzgZROp{Pi+Dk`y%=?ngYr1M|;e
zS5iHcoe6@swhqi6aFKWZAkC*re}s3{LNaifhru$I3p2UQd3|Fu@;MK<wA6oL|J82v
zeNx5upZ>yMTJ#p2!)|g_jtYG7;w+Fn(F(Gf>a_n}EI&}3i$j_}A#h}kXtfUGNcg<N
z%277-*Gmi5;SIwh)&|7<xGt^N+=Y>O(lpmjl{wAzXol`djLbEsS*E)%)`RWnx4yx`
znPDQEgBhH`oltc1*QVJQ_j0y}OrZZ(Anegrq?ev`F$PN|s+h?TovR-qzRew*dJpiP
zS)E*m=0;Fr-O<JXT>_%L@O@VoDso>?Z@V=OiH?R(K9$%x(}LXYmL>AJ72Nt^22}0i
z32x+dYdToL-uVU%5V5rzXHL*Yo8@{C&}&BLB{X9m%bc9Ks7Iw|b)y^WR=sT-jYYLe
zG>T?m`%p0Wc6IQtli571<rGLQ)~2>9+F&eDq=6f3#2tbt%#Ik2cYEcC!!#q9l=KHe
zC$2)FeI*x|oWQ%--$L*{1oF#%@~Mv*FRyhCFBnsa=TodnYaMeET+tDaPzb}yi~2-<
z`T+OfUIos+wg&InJ%E&h-FTvnWzjr8p?QXo%KG*|{2%rmC?14By)TZ+3p?@I8VNGz
z`Ea^*vlgk|ph~ZNVjjg2!Thr#b}yEYauhsIFSuJ2gK6K~VZjUoT9j!?3%9F5%(W>H
z<n<5)m*#U_jfXMIr5T!57W1XIe{kDv&B-n~4I-El&Kdk^gU{16sCx<PKiB0zX+R5S
zq_iD0Lhr##b|z{5o(v_&Z^5h)`ZQPTCaC>o?z(GhA@{C6?HN`K*(yTTm-y`Pu2YdD
z7Z!s2QZG35h;`J`(qMDv4cz-a2K~mf&ehxo&e!iPx3@-#xLt{cWm3x2ds8JIAEipv
z?Avg`-6VGIjo=;ZIXJxjFEr^_i)?<mL5bcUXq6wpI{%YgY;GuynDYren<II(uUU9w
z|8Wdi=M8!}4=^kG8DwoZ4NoI<N%oJ$U|@Fv_jD9vUBDQan=7PtwnzD?X7?~X;3KMi
z4aL)q+i{?@57xNc#7nDxLZ(w7WaaF@(lsHR+By~9bKznbxuO&<TbH5m;#y3~KgaEi
z`T=517tGdp${TpG?97D{zG|91Uqla~r>+JTe@Ou0nWNlycK>!7{Rl!WI$+OcWB6zu
zjpOkHg!R<%In{q)ks)(+SAOH&WE6151S>Lo@l7Z={}z7@S&_P#5;S?rNhthVg|j!5
zK+b4yd}kkydG2+PKJW_^haH2O-o>z3kd12cHTWZ3kx1Q&MZwLxq9wYc!2B8Gg$G~6
z*I7^Sxr7`!tz5=(4GJvF69>-!#fu%}Q#ec4GgvtK3{)-8;|A6jga3+iQ2ppK#Qhr$
z4l>1@&7r;U=a4kHI6fDXHmu+rE+lb*Zbt#5<Y?jTVzFyN7L04uB7Q2vXxfb)9P=Op
z`i*zNor$fuWZPc2_xe5apDch!Vk1UjF(k`6rK#YAqR5$XT-1#AatkV@xXbs4(~aF*
z;hTd7S@lmGbE%b~gLXGR#yc8nx3t1kk4ezy+yx7155}7PLFeK6BDESXbanDV;anwH
z;QEX=e2|GZW~sBYSDn~*wK;#cnw`I{@8M%cYw<UCD3J$W6zE2|SUApha<W?*u+S@u
z56C(V?#?G*UGEUO*PdY=6Kfh&YlUUg`jCp`X_MMTvBe3%z;>aiPj(m$i9d~94vi4j
znS%$uNsx&uMkI#yyz<4ATXO0u#Oi25m)j>ut8Cy71_WYQ#y5WOs1F3To8it8Rbcc^
z7uTk+e9Nb|T+*_qs54TZW-xDj-m=5ECg~S6ExgZnYBNXATXpmmMDf?tnX8-q%n4P|
zV3uQtW*v9o)mn2hKkgPgH~e63fpUJ%jYN$1eZogiHKaiqjnF?<0}P^uQ`;AYD5t4L
z{FQFQ%7i}H*RM>LYUvWUnmQcTp98UnCZk~b1M%$NtsrPq#-JJX@K*9ZoMcYxMTK=R
zt}+Y<PVL0#g*Rbi?I^s}H;noxcf!<|^O*g99zVJD3g$j>LN9dzN?wwqUmibYGhN0&
zF^a&tRYAPp$t2un)e14T6)2SUgk*yTOrG@%1-_M>pg#}`=T!5}Gt$5%SenEwXM48|
zt>WdMx^Tg}Nu1}1ZP4U!L)<>83)XzU3Pl+#=kVhLXPfK=Mgg-?P%Ovx>q%I3(S|qK
zPzS>A%~<TN1{a)iP)%YQB=@I6X<#Tv59yNXf^Nv2ya58t^k8Z-n*n8ZJL;EN)1t*Y
z&{x5ko2svf?SG1)_sC(!I64o72~NDgZX$oKRY>OguzvmzZ?XI4bo}|U0;f1jk<u(J
zJmk;rD-w006UB!xZM6v)-BQI!V+~q!b|N^tUFTOG{shy_@5B9@Mx<bx1?_gY2rs!|
zv}zlN>zy8eVg469q;5khr%92fWrmn!WebNjzQY{vY;0cp3!W{JBl~X}V8jv`s<um=
zd4S6?+M*4|zZit=)$H!(%$^w!_n`gANX!{?3Px4Q6W^DeTzT43*tTUD#WZ^iJCe((
zFrGo+apq#Y5DCur$3x^gC0sF<b#s!vdCwi2__K^J-~D1G=4^if0rnp7?71#+64!xf
zTO(X<lqOBfPjc;Ew{i27pHP0_Fqq3okVN@sm=aoz_MYtgY&?m3^gy57($*zoKe3FH
z{XFsJYkBb8{TC{&3rFYc(>dq1BVyt5rQC!$HgxR_A@Oe9geAW|LfANen5z<pvrj&O
z(0w;Cp~H}zIF<*!Q7PCm(i}7&UtsrJDOmqy0J18gU~tnc7}NF$zOB(B_55oHdwYS~
z@qyjZ;SblpWD8W;#zWW9`(VVnX8i~C!NrI%c#msRN04N^%$eMt87?r^PLZ@%*rQsk
z5*}eJvAkKk(XQ+pH|@3*c^PU=PIP?3q>X;u{>%n$p56cyMmUO3`>}rNPR8TWDHqvn
zxCFW?LgJMx0srljC*|!sV8Zr9P?C@(3t}C?S<+Cfma&7g3^pdmrgdTwo8^t#Sc6kb
zR^#~K0=RNVkEnf&hWzwD+~@@wwB^7PoZj(_&7(ILY<jH6X7BGg=_k9f-YyvQ8ej9@
ziZn@fuLPXQkRrvWCScvn17Ih!20yN7L++XeiSzvdf+hPLpG>eI(PRE#VYM#a>nX=D
z4B-L>7ULGUhx=<ZK`^XC+{svcft&7#_O}Odzk=?eZgwil#+71JTMh=yDra+mAb{)2
z_^8^9IP}WX=_&ajDLRAc`j27i{G||Cr-kdSW5Bb1D*WC13Dw?T;GWvbkrIvJ)TSkn
z%b4*MzZ`1D#n)Ac*_af3e9xTjS#6A3r3*pesK<p_?}Uc&9e8LTb27&$a@(UqQ4s9p
z*f8T28Vt38+B*w4Ss_dMm0t6D=U;L8IkI$H;aj*+yAK~t(kGq?Z&7`zC0Y28Wq`*H
zLR(@bKS$4$mc3)o6>l@vySc{4ulItDGpE9FC2LY>5G)pGtSwk12#1rp!$^$05iC&F
zgGJ|FV1|z|*|x_N`=3T|*LNxq{i=5;$Y|##vYhs+^IKq}Yy}?fXVXse*$}TEfQrXk
z;N&+$y4zEkq;JT^ON*3gpq-0IqrD8P9<Ju!zHEZ9)XRvB`8ID!9{!B2#MGyyaIYsH
z1pBoMs<*d-#9vt&z4RhPMX8boZ!b8SW=78VYSMkpf#{oXjq~(mI~QI_?0sVy)Z99V
zYt>ANU3vu9aC;{65(GoH;XOVkdOIH({R8VH#=!RWlX&W-9&x=dNrjc4IjzwOkbb@e
z6n@H*uj7BCK*^V{`ZN=>_nFdlp;pv4hv&=(|Dwt7YzzyQ0b9RwSpIJU^L7-0&hRk!
zxZ*2L-_4kPvm4Q{igA)Xm+)S0Rb1h{ciiW)AM7rlz|GAyB(ai<Kvs*nuPaW7Cwo|u
z9;qg<u~`H<gWs^^M;$b12g1`q3nCCm6-*G^fW%@pH|<&uV|!Q^MOl{$GOokSzrnnL
z-e=~3%)pPynpDp9C+27W1NW2V=<CS}L?%?047kbBki(0>=!F8fKXb)YjS}op(4b9?
zE})jN1%f8=Fk8erBJZ_vVuT8*+y8^xK@>^gMh|h&?kvdJaUY!XI(W4|6Zoooz?FLC
zVxlAae|;|I%ZWQQ_uPikQ4)B;<O@JZ*8el~_;0-?Np%UpV{gQ`{7MZTHCHD^dt-1;
z@Hf=c>*U>za*X@j=$Hv3;aLT{Bg_v%n@bWjX8#P{eZ2~J9!%uY^WVUv;#b%nlLP@r
zHJB?jo9%LM@R!}x$n3bs;8k>nGniKbmk!^7>#WD!<r2>BNdgeQsu10p^#{96-*B?s
zztF}fob&2pJFNdc;m;0rGC^M#-^?6F2U!lZNIe@4FJf-ExTEl}b~t%)L!FFWq)J@B
z|AwTLI&q%qE4cDak_IeTkNgr#l9HbQT4PynSMMYzOi<x9U;V^)OM00r^%W{s9mLRr
zvADA5H~7C{%>PB1B47KJ{QUV6M4))wu}F&DdDrZNHNWEEPR%VGQ(psxEPL{A^>fs|
zYDhx*cQWSQd@%Yv8-?fQLEq8ap!dcC<je{%xiJik-bUfJgN?A{iaEI6MbHZdXlmaK
z8<?NL^NBp2`cnxc#r_z7>N7vDZ4U;7u19&cgYlAPd<oChuzOZGrfmO!!TG~U!cO)x
z*-l_*>m%SV$djEu7Z5LX<EoC$kehoF#D)oMZh1&VToF5dOHtEh8pQ6pn16WvDTY``
zpicP@kTzzyVm1${aSDR3f3Krf&{`BGujdYY4#yr>=I)!ajP;xT<(#Byp|&aj1s!`p
z@VbQabadyc{GOwgq&I%uqCk?IF7V?un17QwJp?IwT+xAeFnAq+fxZ(kWbzFR9KBdP
z!N&&7h84oPOl`W##E3RTPr(;=HA(f;WW4m^1*~aof}!4{aIQBJX9i?&8hI_?chwrb
zx7$LQzb&2|8-t!#uW%c^tzf2IDhL~IiXunaflZGdy**Ni$|}~t`#N1ZPs)a_dOZUi
zudC93JJ~GqeFE-X$C#;4BxqgJY;62hff3#&Wce0j>Y8{Fj$AUMrPeoL!L-->?nO4_
z*bhshK3$8R*{el1`9DC5M3yyI*T<6v_wjGDG`Rx9h}-=x^qMKlW|1KfCp!k)f@=7q
z=6XaZYUGx@k%HKDy||C@g0`_t){9@7B<7<$-=wC;3wFOP@U0Bs)dacR`gk#{3f7<}
zQa^&s4VE4IBFo!#`0%=qvheKTQ&{*aS?s)4Mw~6a!0&KoP8jx?%u^UbS=)F#{!xx3
z1zzNaqY>3hO5)qcMS!i|S$MfnhImac;YCGFIQ_pRxch@O+b5dgv<gcaJ>ep#hrUK%
zgL%AL5o3+!p2d~HXCO)A9hY=^1e{Tg#RF>kbkC9h_wS<ck&6-ek|RTZY|x{g*Hrjh
zPI{#5WiQ5r<nbRCwP4$xN1Vg<7u@KPS-6Gqt4{O}pzmguS<=7GM@2;9pGFH(n)-q-
z)ff#ExGWF^H*o*%l_gyl-$U5xQ~ckGaEy%K2O&e@Fm;~_&a6(v>5O}tIGjMpuT#vi
zr;fdLhFD$r6||hjL9qNO9Mt#+Ll=}|`{H9TTx?16S;s^4uoa94ElImp8RG%4eBliN
zY;>QAl2#osa`y{pD-Pssi-fc{Wi|Mp{|*ahmtme~8=TiYioW{{@$KR^SkU#HcmAu!
z73Znb#2s(3%4!WPJZDNYMTRu|Qv;q@{S9L3+PIyu<52n71w3-rjI=#;h8OGQ=shic
zy1>Ss6FD}*GFN*vd@o0G?mmU?fpD&Jh6UL;KNk&|Yb9Yp2a3Mt!^W>>*b%o4@<(37
zX>W{4oRbO7Nm8Jzlomnv^7&Ag=>ywP3~8HM`TXyhtb>~d!(SN?(_sqa@<?Nnq<2w#
z-Clu?AF&eyP1?jM%3IK_?G1MA2!(5gLb_+RJ_=Xu=380D>u{nC?s_Oq>i2ZPNRO%b
z=x+obR!E1~r%s@6Cr3XWkfKk1n=;PeB>w0ODbjr`ofpWM6gVVmu&lH+4f$pWo|9K{
z*(Xa_PVO93-T2M9^=smRU1r2?2FDEw=AwPO6<Idv8Jw3%#+l8Mc**n~eq$ay>pL<;
ze${No(N(3XWm%~8uNfYTQ6Y1Gn9+frr!dHPwE|@`M?v6qZVvOpc)l^>LX)g$&#_|A
zjVQvuyb%cr9u1Sy2Ox93EOz%8V^6msEj9bgSMwb(GqVei-IXPQkB3CI{%V*wXbOHO
zUqGwv9UQQGjTI;BvEiTz<i;bWYn(!Xf0jr^jm^*Y*<ygie6X!M4rwxr;J<IC<iLq?
zY!eOxnA?Ui^Se0D)HN`E%mB*o`^wjNkHx@KZ3XMko`=9SX8hLZ*ZAcmMaTd5`g<M4
zpXMtQU%x;|%Y22WN;F9(|B=m<O5wY^CFv{ggyg(9`0t7uIkVZCgov$~_h~2J|2P?w
za{h4jYXo%46j^e{sS`D~DiQx51z@78NW)sv_`>eP;(0E|A*AwO^zXU~d&V+uL*zqI
zVJhPboh(Cl2{&wdlFiLBvmj5u7?JQ%EZ>uIkMq6i0(XDOknIL?^zh$Vcp@bVI$n;#
z@f|BrL#rDC7LEtI8?KPD?JibKzJksd^<fB1si(6Vb^T+H6V4xi+J;ux+G|c?EZ3rY
z;S^LJCqtdKe}LJnV<Pag<>Od}w6Qf1MH>TQU9l4Fp5n>{KHJL2e_9U16sq74VV?G+
z9BlONK##}P#JFY{`5Uc37o0cd{d`OC3Cq1W*N^7w3)0b7<{)>?@jZ&Kr(@ng4*V{V
zq<!8xwCvFzXxb=)<=%my7j~7q8^`=zi_<YpE1b8}*~@>+<56)`25hTXgPu2zgQ|j%
zWTnNS^RE)VKQR&J*BFxhMu%Z}qYTNM$ufz-&(ZgeD(`AE#2B#hIP~WlnEmdBmBM$h
zyU&z3swSY|@FQNYaR=;Xy{IPkY>w|^?`fU0;Nz-Kq%;0tS*;jC4E3nr#y9NlY!52}
zbm))po4DeE0kMhb<I<!oVR~>D^c`V+lQ~;q|0E^IZZ>C~qpz@Ng+4uz(20LGu(_qP
z3~@SG4(_RjbZF@!TxmWCV;_mJ&BOx_EHR|3lM$EB3`M<TPxz%ShGhNGKOoQ_!>>`e
zi^9RRaOi*x+214wml;E9jLZPszi<QBPk({~nWvyvV<F7r`%&t=IuXiw!>=?o`ngS$
z=y`nP1^GU3yHk~1B+OZO?ku0%IR-?em+_*MXv?2jP$K&Uowb&Ulbrl<(%Glr&xb-_
zl`5|$e$M}tS0?U5@mze#Uu^4&=NblTu-j0cOIqXyW&v&JJuML&b~%B-a2q%8ehu1X
zujSKAJ7I@Ao84^`qMi9Id=jNftKKsfc<(Ser09ayHAgY0@d@0jH6by2|8Y;QvRyP|
zk(pe14$k(=8T-}-%WoV7`L=eh%_o`v+dZ6uhYU`)EriUWtvEaU8nll5fS&KuVdLE(
zxZr`HeOpKZtA=xBn?69~O9NOPW<h*(Q!v{q6ymD&=>7+<xa`Vn&_2HuEN?SE{J8_z
zS00GtGwWf$&pAw*o`u9zl}Js#gL=P=FzxLnUQoXh@7kM_3zx5gUV##IzgxlOeV>Wb
z`R5RtQVANKe*qikK%QqEq^`&SLH63a?zQ3YBv(kf1*~7NYz9|T^AE_qW9+m>!nuAh
zp@lyG<LJEOYJT58e%d>yU0SD|tR&QVUpGn8$4FKZl8}gGC4{6hGAbcak|ZQaQs;f$
z$x28vB1t7}mF$%9yT8BxJswVHyzg<nUeBi*Rhp(lpFGRJ87<OeapEEDAMzX`m9C3#
zORLkw&D{_>(T1MeZ9pC+vAceU02p?VaVb({`1r}e{4Z%Cu`fN%xs5vmF0%}wlX>ob
zvps#QlNvQ>xQm<q?8K(maghJ>8%B=X&(FOz0*&v>(oaJxus6YsiwHdcTd(}Yg0T<r
z(+~j>WXC|_n>SEl{{@4(PH{I`KT)2RgDPWLm4?5OI9FNI6UGm4+`=U6`=v<dZc72h
z>q}sBz#wwK=?ZT7kNKmbPC-%a3*K-15Zq*6fNP-`BLveR?UFp@W%uHluwrc2E<`{1
z4ybJS2f6c0;o(k0k~d;7t$5@H2SAISlhPt{>a@t<OWkPV+6yjopK^u=RB1%#6<Bx7
zh(2AdL&vVj2PG{%8fNnYnjd_{JcZ?)N$L;yn{fkf@G7Kz+82J?o27W?507fLRd^~V
z1%qtQqW|zmd?8l|q4SKnEJs~x)2>PU3LoO!1J5z|hCGR%u^QbYLNHsh2tJ%rA(ei1
zcwBt}25$ZXIiEwoeY^^FD?SR1FHE@lT-K{#tkV}m<;k-kB`Wc^K&VoMf}BqnsMU(D
z=iJft%U9^4!y);nE6D47!b{zUV63<ojMgd8Wy=*wv#bVM@6Ta#3lXhK*W=0uS8{pm
z9^N_7gvQrifzTJJeBSx}yyDdy$Q7RgWi~JH{gwyEtwXU&sT<`Mo080pbMZ&aDcH*P
zs}HgSWINF(Pveb9<IEy{-_uMS7-~U!=X7Hpzl%FFOF;V0RPgmNfI_hXjLFYN<<ZB%
z>5~-|Ek6JYzOqcwtiSMbwLYj@pN7AQC2+eb0d3D5#_nWe^7$Fdhucc?@z!?SEo5H1
znd`a!@%d0%bPl{jzQK9H32gUFKq+5kQe<Dptvakg9aB$&oAGB1?8t*X-AbhBfi4Y9
z{|eq2KbfP<2*q<tA#<rM>PI<(>xBVG-2M#q?|hF{fpWC#s1KN#8PVg%Zh^A(FBrvK
z%Xv!*FyfCAtjWKH@y8<}GTv13>Qgo-oBV@AB?{z9un`GlGtAp!5s{8*f}ZNH&}^<x
zi+)FOlaHk0mIPVmgnB3OxShp2S$5;AmQ=Xn@dx7{baLMU4uk%RXqIJFVf^MtpugoX
zXIW>7`(_TqU$Xji#Pz?>p5c#;+W(>9SOeNz)`L%-b%@6bHC(@4hLm)q!M>lSwDJzi
z$!HHE$4=~Hy*D`=HvS6wc^lAn*9Lw%%Lw%qW^wuY?5<*c8oPgPg#)ktfv?~Zq=uT%
z#*RSFU&!G^FJ;>MWd<J=b{N0ERiIy%e+8>T8?sBykgOYd2=jO5pnba+xJDg<_RD*r
z_mVHnDOMwyuWfNgupUWr(gpu<-}$C1-!ZSz71~dA@YTeOW}RM(BDdcNSts!d{e)iY
z%ekZ|Azf(6y2I87xbW-6{EC@Uv~OY(-{{aUQ8cUNdUsi|PQqXD(LaO9A%{vV9CinW
z+-yS(F+xqojsC`Z_SQ>EA?(-|JmCEgl*=z+Y~)L*s!V5mmIQI{-19KL?kY68Y~zhC
z=+S`KG%Pz-i2HV%;BAy5eX`~J%lq>|QB#2NOU$?swaegPCxDFjM#eHxBDPb_=!ScJ
zsJQPDmwUPwhEx~fpr^O6F!B)Wp3Ru`S#O~zelg^K4n;?kB~boU4aU!Y4Iy8QsK=2C
z%(4qd(+YF4JHUi?{9_&QAu`D8OOfnRm-!n@*`C_FNt|EEvK}5rxV-fub3E+k{gd`G
zKfo`nsac64Ly7p!@&~BIT$-gd4(PLD8k9R9=IhTr0yn*_IMdLK-ekY6nt89eezgHC
z60Bn1Pacgv2<h?r9Vpj$6(1>A<BT>vVi>`CIS#CU+`9*y#u&0^^FK*S@D~U*(4yDx
zs}W$`-I}9pz3Q0>jj6xH5t6CkZmK~y)IEgUrL5zyY6Grr*bl=4Eol0oPm+@krbLsz
z0sm@s=wW@_vFxrbAhH32Lb@>g?mF1-cLNh5waB)0KcLj#lXH=F6#M+prM*hC@r=G9
zaeJo<>CP2=sL^;{<*+8{abxq>Ia9!?=pe|hIf@_5WytMW`t<aVcPLp<51Kk&ID?m>
z;ptnzYuH@)HQJ0O%#<eAe_X(b9(Ta&ham806)w^Z#;>9G5b}g@$u%49PS7EOJ1@CW
zQ*JU&Q;c(HYZT=5m~t-WDtu%`KJ$m&DC+ZehM47FQN5`e-Ns4L$mElJ`=c<3*EgWA
zjxfHC?*<+wr9t7RE2#D?6~Ae*{LO3^-uFT-xHap6OW9V=WA8lpUZPCY8SiS%${`Sb
zua{R}_8oBQZZOC!Lhp=CSaW(T;~PhS-8sZ#4J8<Q)t1X0^9nM1o#EA%7ZBE#h=;Bm
z#@g~eh(B|i&lA1j6^@${bxR2(w&cJ`Q#IzBsKDvzhE%-e3iy^RL|OaS_<SdVQ_fJ(
z{%%R5a|WYVur{B@m<bMd-UIbzPBP_{_*eY~xDM-qx06ii*$O=pdxv=@ED~{_M?PP(
zX(D_vRHq)S3mJK(4CfYk!x5FiB-DHbH?&fT>|ovY<fXCj^pX}ayQWTe&pe1xj`tyS
z-&H<+`xVJMFKL?Q)s3_FzQb>A`Y7@gQ3K`zyc5t4^$9W96}26wJxsyI=NtIm<wJO3
z?sERFvOZ~I%<mZHa~+l%h}Sd(WY9+;S+w&FM!FmFT6%jiBc%c?(pavO&2bLgc?rf=
z2E<Ee0_VxrU`HPfI_tAK)f>b5vcq5T`&Lgx1r=Ljdz&$JC0Aix;yKofljgsK-+|Dl
zH#xDe6JkUE;UeU4wFT>at4;y;BV`bI;a71$++Dc;)RJEM&<t)TtZ7~F67Z}%fn70^
zA+5g(hZ$^v9cQfR&RM$j((K!C&r=S38JE=o+p(~w8x{8H(v&X^;HJs?%4?Fi@zu{!
zuvAO@H?aq-9CgT3(|8P(VvhLW^H~3<4StB31H`)+`j#DKtlubfdiMp*3}whAZUB$H
zl_wKw)3EpI6wW%X3IZ3ULTOI`aGjdOrA>#*jxvE4uXRaiWFx;mn0Z%a-=Xy+Uku%}
z45f$f1^*nD<(`y*ia~ez)Dy*Mseb@oyC_l5O?SW#Y*^>39!HQTaQH<r#JQhB-%Bq+
znA*(ct+Is~3TN2tC0gS0`n*^Y)C(hRWl8aC4cd~ZL-en#g^=$z;BQ_VxOokQ=bC%a
ze$59ya>o!!@BDV&As`6T7&|9h=?f?RtU+G|e1Yc%sZil@A9balLaU1zy*GLSH2(DF
zMHX{F_whfxYNAH+7i2)R(hnRw<Sm5WT*lcXsE{GD@}!_tkCqMF3zI&n(AAwyAfEOD
zqT{y1)o12pZ0=4}o+v|)nMpGXwjrrq@d=uKe+TDf)<j|MU?Mo@CUH};rR~@DbM-&j
zdFt9Yh)FZ05!J(>EZ`3w_KgL7$zjOo$wSYh%*A*u1EPEup#OkA&K(^97mvTj{P6^v
zGxTw+`T#zNDaYCXed_0P46fzt(<>`>V`FU+*B6)#7NX-A^L#f7($7gAA5^9O>$Ly@
zanBn~x^u_?IMvC}-c9GYKIYDNt7bv`LUUm(o&ne2Cs1^$m#g$hh5euP$&$AU<nYM#
z$h6!jSma;i;WL1m%Dd2S;x6n@E{8^9$-9bspgA%CdIzWSKNt%qaz%%@b$lZ%*`!Uz
zt0iM^z#m?FjU{F#4MulU6&f8I0-EFF8SBmtdV5Df61zv;^Yac`gxS!8UrlMy&84tP
zgRzMlO!2(w37mZL9Qut`p?T2H{qfw3*TOEdtOLspN6X=A#+OLkW{38s9#E{WO^%G;
zi+$ZAVVm7>T>g0w>mN~gRgj4H&dSl>ImOt>_O_3*dLa6kGe})woXPl)(8}^M56+sB
z!4rxgG;T3yj`|OuIq1;%cQFvSHwtR5f91vT`S7@IKf>W|ym!+Dv{sa%-{)M257gib
zw#I>X`2g4q8cd&uRY2rHmOW93l9*)vgOtatGt#tMeCO?ERA={5c^@tK4CW+o9rg~o
z;!-iS`XG8vJq0h<OF@lUHh-+ylq#@0`$(IkyuZV77(AP097gQ~f9<i%)mQ?_eXAfM
zT7k{z@1p;{Xg*w=4cQTlb<Z5Lq8I1If9H(A>D!n)q0HU+ZD>4toZP{(MK0pVGcP3b
zmdMg>kS9Zr$`Swb!?>-7HAs=g2xw2z!9M2km^9pq3XU{zQ}0EiNSG!5olu8zU-qKQ
z-`QfB%_5>>l7(LXmN73?Giq%;fZ4M;*!PgJZS4MX5$Tq=W<)dE4yoZ957di8{U^ik
zl_s>m;U2h168N8ntf!flhdQkL<XiX|$DU>L$&cH_F1Oa8htvrWG;|cV?#YKd|1M73
zVIhW`6{Dl!JeWP`F+3ZhMl>Wg1ekZte{m+?_#qYiI)YJXxs5;Hd=~#(WJdBHM(~Zj
zF%nC60tMblFgIui4nJLtzwL~{W%4>c6m<B|i$)l}*cY8TSL5&{%wu=-8@T+f#KX;u
zMUNHGs=W>cMbnF2s0@97Sd$t&sl_IBZEDz}PM=LTBcdgt-0!=(IKZ+Do=@2ROE?z}
ztY_Z)Kq<QCy$<svJe9a<nc*I-cC0G-%3Zr9ASI{%F|XSVN&B}TsBgK19>eNTF#L{W
z-O7E~H*P07KF~nRBNpr)?;gK`F|JlQS&+TzW~7qwuUhA~!9+C$lBMz!hFK`nhCmr&
zGtZKETsg^H9H%8!^AuR`GnqSh{0fu@p5*=<l%i`!jKLJu7<`v)P3=y-1L197Ue}`@
z2US>5$IopjF-*if*Zq8Exh37k9H8oDk71DNB{b`MiKCQdNzYgl8o)J!#|;_SJ?bX<
ze~smOHeZB*<7v>CtHcW|<!R%173Ajqz(3;Oc>E;Gwwx2;*fpszT42Sz9y)ZxPk9<*
za0i9==krI;{=q8S1zhBXo6f#!tQ$OCg~#{G)V!nt?zt`o`Ev-SmsN<dsVyxRjmO`~
zgQ&a9Rgjd`;OX<`bgUbXy{#j`A63xcq76xo@8UN|o}r-ZyYt~A2jP9N2CdpA!!;Kg
z(B9Z|jKAy4ho0I2USpqQZS@Hl`#v3ml!D-EiyVzv#pc>!ariLyF*<Khqq@^0P$5m7
zXlXK5-2e731KVLwumY`p*#NVrzJocXh><hCi8mgwCP5dHxTvLtaOdP*_*|z)JV)2T
zKC3U-_frFg+pCa%_eQK|Ii1W>GdkcYO9B|b`tQBR%z3;R;TYo&>n!88xn2gfowrac
zQ;uv~ewcYYh3IC#1iXhHW}O@fUlY>Er}1aN>4-EvD3uF_^0LG?h(qBH#&<Biip%|F
zL3n2o9-7Mh-DW#+t|rUXF3)5P)JEu?UBN2`9ph$IGS2KNL*9pFT#~ELfWJ{D<HYw#
zX4iGYwN7Q~XU?({ua1gu#5}<9dcEMm&TccqpJDrvWpG%<164ONztaS^w{*KCaoeSi
zb{)mw8(D_Jdy#Nu1It$qvBSE5yWxQ0Eyf?QkVM=F14Z>gP%>jX294nPTWV&sT&|X1
z?N$k83f3sv*duQDJH+>9#$tFb%VpQ?hw$Kjj!Tgum!}M1-ZouoKKnV$Utmmz|2Cns
z?=j|z_#{5oW1SN79)5X|4$i8+jL+>pz$%r4Sn$*uRUOJu@ZlO)Xm*)->Rq_>?2mkP
z)+H=z=*K>@(|mc!WYozpr4E7lFuSq?J98wkeXk0c^qxJ-dM)`Yu2MuO1a9L{IZ_jI
z47Qbrp#B&$^q$&|c3BBv-K;==I$6_KD(~RGAIy)&avrX(Z02YD4;~Ck#j2G&C)yeY
zKh7W&nLoyxpIM*iiUkpMJmD*gyP;rv40NW{qQKYCS@8NO=vU|PLK`c(ag#CQ8XUx>
zvlDTR+-CHf{gmyMnNM{3Z_a0?7HtU?5&a1=bd;F_Q5`fLCWq9c!97Fr)lHH3vR+&I
zxN=_P%zEUEU)j4zkqQ~#)9=nh==}K%97h`C-^>%R#z>aFVqUTl3oWp<ybFG(&x32s
z1?2jv8kJlx;`8$R;QOQirc^1@%Gk4TeV2&tv9+ZKw%>ps%hsS_Idft!@PGqX&VxeX
zUQGV77+mBxqkZ5d{?W2h^!&3MbY`(EhR!+&nv}$U@~=esX?IXG-~i3Wsx)c79O}Er
zaPt-x;L|X3vV2G|Wc>Y&S4Ig)X=ejxyJtCGUs8(t6Q1yv`BR{>vKBVJQl>pGu0iFT
zND#erfxFJfFnN*`4YGAW@xNmD)czH<AF}*aM=Y08{SwvTE-s3bg3^orFpPO1Cclv)
z>DG(5<j-nQsQ3r{uRHK<0$Wll*~wSsb#mX8tf;?<0c<^|MQkp$p!d-GAnKN;q3inj
zY-KmR>7+>u=Oz9x!w5wu+xXJVnOG%f$bKg^{4YaUYTnTgQnzp6(($U)d>rdoe-GkE
zTbUBfOTbOyWT@XMq>Av0t6nT1<-?9~LhlqVNwOLa`Cdki2=;GM2T{+Y8|?p_#7}P!
zf*|gwB#dPWW!Ox@Ubde%G*P1Um0{R<;W-S86QeG>gDo1X0L_?=zOo}x(?kOO-###x
zhZ?=enD#-ga?ImoM5^p<`N&s`xUi!~AxCKvoOTESH`X`RNY){>LJ>{08i3Hp8fbE%
zALjb&VP5Ee+&&{6+Pk!$8(6?tBUauZdcTer2-k{twq1mMi(YXx+b=_&?r*LtE{w}o
znE^Go#=-c|DoAxOBzIcaoJw;qj(13fZCg{ZWYi02h_oTGkIh&g-5OtAIswzK{Rb14
z_2aC$D%4%i3De3fsKDW@b8VXj71(6*n<AXy<-#e@cY<Z?@|SUud0$vZM+wvV*&Uvq
zDrcb}OT7P%$8|;LQRnD2EMI#NM1QaI!jtS4pBC}c;#l8iMHOxt{TSmPmUE>KXQP%H
z%X+JR!G9&XbQjA?m2arVUYWnV&Io1J_niaUm78FRohp&5J&OKudn6i+g|z5S9WI$I
zVJxft;;e81EuG=PdD+c?g)5Co)7NHDeItTL+y<5bj>1CgGx#Gp51d97@|P6)@#K&c
zb{1tls6Ug$?egA?zpx1ImspZlq5Hr)g55V5Hp8P4jLCd=Y_V{NK6n*MxFZ`2(A*gz
z;-NZvy;{M0?G0kQzK>Xv^cNEM#;`q7D?g37AEKQV(9h>1MDKow@hkpuZ?5Z74^RZv
zwJgVdbU3GORK&9XdwG}Yb1=Kjj5cZO)4qqAc*<Ft82aU6jJ*aCA36{D_IF`u+aNlu
z<ScAzUWl6tW&lj}gvKpm&{TQ_H(9@@K>IP~+*U-*!e{VgrY$XPRzg9GBHpGVTK9~=
z(REGeZ)3)9@-T!ScVuWt+653#K7e;#|AT!)Tey!!e{g7v8r`?R8IIXXP*DI_Iinv7
zB4oinS)VsL=ZPtga-rTqn{Jq>M$uv;Bqz$z8^Nmdu7Nf=ussn*489F2d%H1OdM>o?
zNr1AjSJ1b;ox9C=vda?6aAZO)zIbs87n&LnSHq83x_v1>Bf^MYx?6`uk)_-fJcOzz
zjHroc0Q%1V1ooi?sM~!X1XdQ30{vas8TyRzl^OHF-h!CvmSgKDO*%r{39dwydi*-i
z-5+d3wo8dfovSO}8fHzFuhmCQEJp9qcc5U<6i7XzK<o;Sg8$NqeAIs>EE}~79#u|2
zf5m^2QEui$uoayLw>N{%!9k>C;TL=wT8T%y6^Y$+9^;iwaMGR%tdc&)`U5PR<NF70
zH8Ee5ixCNGZ{dBja^Pw2MHJ@T#?sRdc$4}zD2VfiSA}07GHea>wky*O>6$cvW$s0F
zxt!aw!5BVZ3kAdUnb%s86n%cp+lKAI#DE`|WzFskwiWQV%H+uN9cfUuv<?S7K8CRu
z&tMp1$4z*nPLy9L($>{i@z8BU`euq131NAbzH{lE*SmM9+0S^lYcBKgjr&15{sC+f
z|1Y~Fp!0IoiSz;~`lel-Tzb)jn=VUJZMFt9CHf)$y^G`SJVfto6PkV15F7Y7T+SK5
zrK!bmZM-TGR_Jhv7sjG)$VfD6W(@Fx@4S1g0^O*37E^o8$iAOPc&F<XT-kzaFhZ{Z
z{e8|!raJ6{>3>(ki6KVx)!#VS{ZT}03@wOy#%-4E3gQ;5Gox3JUBlu(0&-wdHB>%&
zhU<R|$qY-zQneD0a2fV}j7kF6BaAn*Y#F>tuZKvP!{Vk%6vF?`hf)`bxM}udIDKj>
zcrnL8gSZ59j@m*?vJCktMl2Qh@bPU?aKw?#N&Ti_g>f);jdDX*Zy751`bq3v_8$J-
zH6ZpEbUE*`-7v?^l5E$vVcZctnleh3MvBLB;Rga?*Wly0lP`s=;6mK-HVR92H=_gJ
z0RF;V{Hu=vc*0gdR-bgmb;dTdaOP)R%v@Qq?+UO$;SCaAigYSh!@(^s%#X7dpGs6{
z-GCwLBlEQG`ron>!QX$!az6Fe^a*43-D@3zmn-CH+{b^|ds@s-ASZEWY7MqVrhx6%
zW?qN4pn-dnL3i{M46JGanKLC=b<77;MIB73VaM)TKk#zXgmh=k1Bm=?%<hErv2>j*
z7b)+D3HkT2-Tn@SCTjAr%U)pKN*}(toUt=cv)-riUask0IXu)cqB*-VpmdRtzY-!%
z<NaE>XZMwf_H1`}-J(TOy4XF6iX+mkvZOCd21>@>!Nc`$AaC?6UbF2y>}#mT@Wwj+
zz@&c=^lvGfG3(N*TQ%sKC{NQ1k@J)%cup=24^RAwkMos@?$~3X=r$Kel}XU^b~)rZ
z@?1olFYJ4xOTB(K@WNLkvFUL%sGMcqhRVBe@pUC`t3nvFin-YqFz?%hGZ>x0=4Eof
z*}AEQk|8Y^3(**_uFUCPHKCeQyfF0fJ$~Q#w|HyZ0W@AFplvzl@sBg})L75rzva(^
zIjIR4uI9+z2W{#jx0}~~z!=wu^5Mtu*P!m#0R3NjK+t4_ID0b=x-2BIL4)XtrB<Zf
zHj*3Au^~NU>M)LZ)gm>l`H+_gs-wSP-a{?kHtjN>cB>Zpl52SXU@MN?QKsH82+NPG
zhIZ4jj3)xT{7E0opV12XQ>KEp!XNx?a||PYtMjg(8INX8E#E8M&wJeKNAorB@YV45
zATDIgT0a%8uXI1Q9b>t2kH@^wPaTtd0M0dNlG06X2vNu3{+B51T^YnnPS%6}&CmS8
z)JPOL|K^S;T9UF@#x_uQmqf17;2w4Ng7~sB4Ol%BLkCUakGS5)OQ&1VaqUOuyFG<I
zk4J-`<v%{~kOlQ^O~VaIjHx$n3bcQDiM3mrp=w447eC}CXD$5<viH_u@}XJq;F}T&
zZ})-HU7>u<UvsLJ%vi-mwVdX&JbXLv6(+4!hn$k>jCIz^`>pa}^Ex%&q5K{mNQeQe
zRuv*y!|t^fjlw&5*YWT<<}A!Kr`f7ye8|aeu)A^}BHixe!~R5c-`)<R?y&vkG<mxB
zwGJ8P{0ncbx`CpI0N&sAE9a5>52L>cXx|W3niBdJZ!VIdjc)(APRkca^RHlv?`4SC
zwFXyt3?{WIH^3xOM3wX!nS<yS2s5{VlYt48san%bTI<27vWoXgR07#aW5Cw?BA2^z
zF7EbFCFjD7X-?&KXzMT_N^BnCrRxXk#zSyXg*wbsu%eGvWI>f@FfSYOALx%#$A0Tx
z2vPGv{f3L&rBer?o_V+fw=l<P$$LEF8G}JP<#FReHKMPhLKDw*f_<$vXE$OKHukq-
z?C4zZVKa&Hv&Ue2v<9tlN(Zmu5nOMXDL=Mbi3VEQu}tB2v=f`r`Ja_Z(1=~IE7g=S
zB#pS0j><%s{D+^TX+`p^n5(CA0X|_vg3^?Hv1ex)gjE&8!9w<2)RRY(myDBUJ_8c7
zv!KG!8GLU4#4zDUl=nS`Ztpc<TYx2<`Jn{$-?KZIhxfRX*IE9uxs6}DgE^MQbxBI+
zTXFR*@i=$vAQ0ZC-0#?MXubOi9u>3xsr^Un4SdciUfd69SwiY^%M$G8?cx2?8u>`2
z54>RL$zpZ;R%oBT2^37UNz^4r{Po0~D83JaJ`vBCeH?&{xZxOaWitvKc8kk#fcF;3
zQop7P5Sr->=Zq|=^#^_WU(jGuI=dRx<L6*h(^*t}GaOv%J9)u)aq-SFwr9Bd367ii
zv1gz)?Q8tVdvrU3;O-G|i<tyvj93oepTlG8G7*YbLgE8^jIRhsyIb9Gd_yOklhP&$
z8kaERY!)Pc_Q#whJF(n(0!Yi|K&hdx_;k@+=5mUcjOn<8k!zXPbJ!DZ)zmwf%+{=Q
z#dNMeRGO-{?ZhC5g^+yfHrM+|h7V6W3v)**g7C#c?tN7U2Ky)wt6NrNMA>zmtt^3y
zn{~+RNe8g_y#@KwJOBw_%h5nXidu3*Kzo}MemkOyZSuM3UfBvCQq8EB`5M0TOQ9rF
z)PUzbt*MF9OSnO;i3Q^W&WkvUA7jl49y6w`(;IPZHREZ62F?H8in$(TkfS0C$*+I#
zE<eZeCP6o0(REcUCzp9!`ybqn#0z-&iyXD@8G>W=hN0o;r;KfQ1f+AiLI3hJh`cpM
z?9Xx`ax<=?qvJ7nb-WW*3e8A{YCeQ#t5C=ELGXEn0WqjorY~nV;0T$6(EqXtC&?=l
zj~I6_yemUbXC6SGFHcZ$bt=4adk_9SS)AyF1&sA&Ik68cgJ#^ya;R*r_p8T_Srlb*
zPvDBKc04b!CN7UY@$nU>U@hbK^*XM=wBK()xcfX8_HO{%{Sv_Mq$Oj(Hse9|zS$%g
zk+7{n80nBx+`+mG<~7wgIfwNtw(ViPqI+Daq8YdNg$bGNmkgq}2H@qwpfvUSaN25Z
z^3C8T-qzD1p>oT3%|t!w^ZFitpD<6$Bn@h=)eoXSQp{Cf2C0TDf3hIoxoYHS?o691
zxi{d6^{zFjU19{5%6)jq#Dt_iV6XM#8D6M6h5Pqjj#RkYfequgM+Rp}8mHDtdb@0>
z%k~4@o(4sFTy7cuP-JI?@OP*kmks)7mq7TFJZ^*eW0tehB!%PK;dn?7^!F&ygPLrA
zsaA{c->T7Xk94s0S2J{49|7fp*AVsPK4!+dqk~x&xLlhdF<Y!bG)*cY>{SeGTgLj_
zUOSOw?8d5#KlsBhb1>bi3oHJ3K;1zb5RlvW#gRSNzV`7F*46BisL=?bL+w7gq02~H
zEPGr5!oVhWueTkIG^awX9Q#f^P^CduseIbWI8fhnj`<ejc(0w2T-0+5Y)qZVXZ!KI
zay><_i95N-pcBrbIG(33%&AqOIw|5!q1WXxYMPJ4NrkHH<k-#UFIS<b{~6(n2}wAV
zDv@o4tm~@a48txD0Ylkqcq%v!6)!&GU(VA(*-UR3*8d(iO&tz-Gt0Q(pJsGYLJ}U@
zWI_k2o<esy1KMa_$U7Cu(9|%NK?pM=n|3&3*927<&+c?;?qB1=>08d8J<CO5NfLd%
z1k_{h*|U~%#A{n5Z};^K{1rcjjFPXolIW6`U%sMT_d`71^b<BjoW#gz4Y5}q+vjY)
zkFqxJP{{J~N?kH^;)y*t<eDtq*1j87u?~HB*iZ<1U&{Bs-Of8kIOB4)&$x1)kj$L(
z6TQtZqt{Eu&6@QOMeok>btA+WxV9L2H`ehf!)xKM!!PzZ7@^s30a0u*!JyS&IfI|f
zXB~BwZ|plIUhTaP{8v_rEA9nhVxbSp+Nx8xjw3kckciw!kS80b9K`Ftb8)194=%i0
z!Q5bxeCvgO;JiVWZm$=TxGrU~Vb51sIoz1E&KFTN*5Qaf+yVKUPou!clD3~;3FSxk
zf|TELRLptEeSRk(&u;gi{D2MZXa1TZ{LDrAt`=vhw?IJod<<As2cdKBau=Pivrg}R
z4AW^ur%ZN6T3rK0Bgewpdu*l^IhT9hmW^#E9^<Ss=B)qIhR^3LMALi$*|6?81k5`M
zw-)OYfBnO(V|Em0`}IMJC?BSFZbzSwvh<P!@yQZxviOfNS)Zdq|Fke4@5~E){Jpb~
z#P{In{|3{ihqTGFifFW#nucXgx1fAvH{UzEhg*~qiUyw#Vbs3e7(E~h-%9jQAa_EX
z_*#xSKAnk^7%SApNS3bRb*c8WLX7_+Pfx0|EMnLq$g5n#DeFC9{xoAQkuhctEC0l<
z<vqAVSBkz@W^Uy<@32^&u`jA)xtFoqVR4Z(*&;lFZt<JIrQs9*E9N*Bj6Z;9IV&3a
zU4siN-i4ZspK)ZE61lff4XQ3@<B^Gxcw&Mrd9uWUPLq$uE8VIjMyNv+bCJ9C_Y(?6
zf0n%Qj>U<~gv6(%hD!;og6Y3Z&}y)V-Wgc|Hg;09?u-g{GS0%KiJ6Rd8Og<TtCN)r
z&1hugEX<pv0((ugSf){#^Zt4TOp*a3Zsvmjo%`aTK$bDkI1jxctV%YA<r<e%bMc4e
zvCB)Hmd|FLuCGdXZn+UDW0}IN&`(g6et}zGX-4NVzJvbFmwds5WC-$SAI=1K?Ay8=
z6dAK;yfF`(hUGxjWH08Ieu6>KT5x<q83;z4DE4RP)Nqz3^D+*B(664{%b`KgcUd0N
z$24&@j)Sq2{XGKv#-afgee$eJjR?~sApY8S9QH+;esz_gXu~?LXHGp-?mrEgLT^Y6
zm7^}kM<k_Z^|?k-|NnFDyw;6F*<Eu`Fjk%Kxu8lnrxoL)_Pg-r0b?mI@|I{BDG;}-
zD)>+C1Zo&a@V`;f7_;^{cJ9uGx&>>%-}n!@-Id4E;5^CnS{+of(In?e8Ow6uB5b@b
zV&?@3cF3H;ceafE4+G+goAY7I&CBR}ZXt#r&R`wNK#8rgG+oZ_m;{f7P<Z$?2<Lt0
zjkbQovXyMleQ+J;zv&)7en&V~$(vIfBPCjDC{N>~dN`Mw6XLR-e287)fTFBrXlXnR
zg~lS(1jg)b59d8!o(AH<&YO+#m~&bMqZ8QkZfz}>yz&dzWgds6^Ug^2pU%e5sVNw8
zvIu6@7!u#fNm#Zz55p%VaI!*o80`>+hZQS9eh15*=h}kf48~vY@WG_9YM3Q_1k)-m
zqW(TZ>NrG+Y7EsUwnm@$#V-}9$1D%fH*`h6wHCB}k1>px!uSuzQ+Z*E7M|@pj{-Uu
zrdm+Q8f8drePg+uAsmKn{*6XuYhdXe8`_W{qQW*4P+e#V1t(P@|I0t9DoNr0Mp)yr
zGWJ<J1>@o~uUY1Spr2PgWPR46?eC4L%4u2l`y4HPF0~v(&n@G`dW_M#pdSS5h`3e1
z6r$t3AVIDKl{5PwK|z`v(CUVR8If?~o(&y5?mD*h>Jz82vQ*4%XLo~5lI+EMc-I9k
z=(D+wk14y2=QL#r{Ph9P!h6s-{S80jyogrhieR3eDOuy3iOUUYG5(+$p8P0Bvb7X2
zrtTq|SE^$C^(5Y=*qk&joy*TIH)nSY(a^W;EQDm$z;Me4sM(@OkIYZPH{<u?hUgw_
z-G3epS*LLN0$rNZatS8>8bn-VV|bI|JCHXl9FMWH)s`Cybbzw^&ru3u?M2R*ed87{
z`n-nq^WUJ`Mb?9|*MUIB_im4qh9~TG+rMAQE2zm3_p(cv>+>9TZL=g>&M1=Bb&o*t
z>3r~sXLIIKCr&X~pY>MWK#@`sr`yglO^fBJze1Jxu+t}$mny>aS_R(r<UQ0nb^xP7
zWU1*wWfHJ%3-;D-;v@In=6sr+z>mGQggr(ijX50!8IvW8*uMX^{Qzd$k3xaxYVlFR
zp2_|z#Ihq(!9DvP2zoa;$Nn6H-f?WE_{fmnozCtrXN<(zZ!5r@`7P}t-hsu|C=}pa
z@w|~a*jL`l4}GjgjY36q-qIq*;|&wrpBcsrtv#TV^`@MLE28?zHaJ*25|0m^frt8I
zuy6ho?${sZuM3@rA~OSU(bndI9$tpXy)3sMSdG);0<fZYEJVAFWu5vH;6<gWVCYN<
zH93p13;V%6`4c>0XDluL5}rs=Cw_~W)2+FP6IKO5<WVEuW$QWd$|{Oog}NweXykX?
zEkn=aSD`hXLgb>K#qrO7^0QYpK?%F(W70XuS+x+PN-rVA90AWgTGVx020mNN98+I!
zVq*9exUo-`4ByBYlDY3;n9(z=v)_Of6Fea3>Mn5i?t>v`voZU@Rj%*021@ciV*CD0
zkS_0r0im5R^{*4Ud=CSk@4LbOQ8n({po!^9zUZR=ALz17tRwpxXS6<q=I+y2d`zBR
zo~1~X|BHgiS;1oA3c`6h*1?E--YD{z!M!|dN;7p9V_9@32HZOZ{i##2_DUU;slUY2
z3hH=?^@8#GG&oW(BI_&jakEDgPB)za(@yG<G?t~Px}b$s`*(5eciK7Cr|!ra5$Kt2
zN>zG{$X(VG%v&{rSIRXY$}ZiY`DYq>75j3He-`onY;j1o&F54dJaIy!9KjHI=+dgh
zsF5!)yhs7ZzX-y?pK{@}|2zn)G(ZRKC|I0bih{xc+-}OUvcr#n%R@~`@`y-Onj%Zg
z7evDlM{}YqcnJ%~sn8JHCa7(YK(cxS{JN(|qWrI6sd$(;%A^aHWoVO9rK{pa(_$c2
z!HjYSy0o<EFu;j0wDsJ}@tZ`n>$neymR#h_Vxlp0{6EfT@oFes)(=(swKyW>F(@j<
z!?Q<^F)>MrmPjSy?kTqPzxUVC<5~r8Y<2}(lfJ@_H0FM2RRn!$Gg|B;BB6>h+>fXX
zjI?QSeqX}6G+H^BJy{MtS?1WKdlDCOPnooj(gvsYXr8?8z+^j3TATR+1Yghc(_gae
zPc_RVooe9>n`Oy9mP7Lx{R2xQEVuz}MdG;94(?ox!^j=aiybE~081YZoDv08e%Lj5
zV4_1FI+vhnp&VJ{FN;-YQgHVCNN{wr0xyLgehABHent(F82baB$2~#e;Imxn2gcZK
z9|Q~Lo07hNI<PQRj?8^(0baT*xyuXH$+AJr&HVW}>P9rdh5m#1#pVFG4u6S_8w+`#
zjcHuYZ7mvKcb3aHQKt6q^Leulmj9G*;RnaHpu$kbSsS9pm)p9)#7!ddfH9EYzBZ(N
z9w9I&tOf(R^1yb9HT5484mr9)i0H@#gQ-G#%G8Whu=PIs!CP*09GiVIU+6APQ{p$z
zkVckCQH%3JQseWEZ;sTUdC%&ZYw04E?SG9Oq58zfQ-g-N3uw97O-`{*76%Gm;@n9)
zpjY3GzabD1mnqA+fn+06q(2gzi{<F-UNdU<l%S?TJamx}aN?l~xy!OSSUwNDPTuBg
zCLiX~gkRAq?h1(BycLhUeH0sS$a4n@uS0IPh^FOsu<wsE1}uDprLwi$7!9^ps!U_-
z(CK2Ym8_TQ>gAm6kja_c;vwA;QAP!E(~`ju{pJ+RzIhK0#%t4v^QR!VU51Q|YGXY^
zTQFlx(S1X7K<|(;y}46JviBBpq6sDZa@}z-=*~^l7$>6DX*NVUWfArl6`@^07h6w^
zX=6CcC2W)8d=H-j+uIkhZ|w&7ILd@%&wj(5ejkReiiXtl#dS8@eb4u4vKgB5Oq?rV
z*{18IxSV|lePYjZRZn9;>YNTamuyIycZI-MzcjELbOG(}f8`QG^=X=ZCf=WZoUw+V
zigm)+cjng;&{<<a{P!G#Ui%u<AGs5aChAbLPbQ?Lvlsgq7t#A^1-i}ph*i^1f*<qN
zZ&<z#eLVMbk`56~m&p_l0wYqYDg`0ZS0TXeJ;a<-AS+MHldVB)Ho8)Y{!~;XRdJvB
zBtsqA@M}Ngaq7?;6NMx_S(R5%y@KFQ@ZlU~lJKkrcZ{+m-i&M8n~~3rWjx4pGo;Ah
z`*Dz5WC+>eJU_Li5Q?l$aW+$wh%|o`(iZ4bwIN0%S>J*lAKt?DqQ8r_c$g6X(*1mH
z*Isn6+ryj|`yeGh0oF`Efa{g?$YO0d@;BKT`$s%L!QJg#rimP#&zsXc_C79)NkGw>
zQ<B`ec1ZuA&L_Rtg&!kKh|Bp_J~dK}=E89d4GBhH<0fbm8j;m+*Rh_{b;&^21AMDe
ziPw^}$cd56XW4myH=L_aWnQ!V>x@*m>?$Oiwtq!`b2Y9#=0A9rV@!7UW})nhR1i$4
z6#GYg<4uw?@XkvKJZE0Dbkj8c<?Cq3T&zj|)t<)Gw(I!FhvgE&g{bpnH<~wJhhM6;
zq@HESe2z&YPLjixU>?;AK0@{gci3kg&Fjuh1&a_(VyoJOhliCz+IKx#FZT$>)M=8#
z?#ypx)Wn%rvRu`*N0MbnZK$FH%eQXV!ff$HRBawahfX+$S_j$d^k-|;l}u<VNx;sT
zV=y)!F!}uj9GxXiJR*61`M*O@UsV8`oAgNcdrP7?!0sAvF5v}F-xNO<sL|m|b;+aw
z9)o6D!DW-X2*Z`=@D=R2_3Q#_1esFV7l<oniO8{cGDJH<j}{#~#d&#(xuVV9@K3)C
zMTtFp#2LijN(<3<Yz4?4+JJ$@v+&k71Cq6T2!`Kk<9(i_^6KfmjC1&_cp78Kc(5ML
zNTNpau9;vQ>kl8e^AC)x707_mAR1eq3jU+UK-9{cjCY-k6;`a{|Au|;ZbFPK-T_aU
zLn$So9)kb2V*~q}Jl9n~N1G;fNUnx6r3xgwI-3_1c3@(DF9vN~iR})GbX1lBnd7QK
zH=ccgYRtPZy7d9p_gLXCsUB2$WKK7|^gvPfZ7%R^E!y2~2kX;p#_{kml&<|OdH<#n
zE7<vc-(@R!wB;$duGXZUKJ|btmh}1bLvUYgOI>_+^DSbw*9bA7xwm`Zjjb_RvL_8i
z<9~{i3f98(9s!C%AMlm2X4Le$h$f#|i|t<pu+8QgL~NdcwjcFTyz2)@M3s#9^#l4k
z4#D0CBXY<@m2@3g0YQP!`HAXMq<7p_#+%fHvI$k_ZBq-Id|iQ2mLvl82rl4LFm{KS
z(DwmW<mUZD_|0k>hCZ-{oeu|K%YYeCjy{Eb@(=mV({<?H^J*kL`~>qE4Teh=wJ6EC
z1x*L?VD9Y;I8dWYbWZEgM&Cw>6j7$7(ybDgZEA2jVlWMTG701Bs(JsLiQK}mtoxAm
znTx)nM|B?Rko3y?=xk(0R*x!#l?Ron=8pt$v0BUd@B1Vv&3Pq0KIsi(U2epjiKn6C
zkbsPjI}M?h)m-0y`TX~jCbav;NnG`oJ$F|9k_fFd=(^1sBtrcX-0IdNIthjZ588oy
z-dS7~@BlU5SP|vRmNeml9MQI3!o0o>n7tA}`wHXnnY_p3IhmMJ-2u{DZ83809Er)5
zYRGC(pn~{4aEmz%<)79<&)f01Xzo<@{sl=&qeqHMRrDm=Q<Ul6V8)>-p9#usb1<|i
z7zGFNiv{<Ic=QQNl0HEdV;Bo$`XG5K>^q8q@%2!?2>E>nKd}Ca9u<Whz=jLY(4J-J
z*;0yA`(J{++8Ffu?E!-R8gX@{8ToDMi6h3e!ITp(G4i@MpWb|g3u5P=qH`>7X_W_e
zRwST&K_<Lw?nU2dC)}l+fXNFJxwvsT_+Z~4QpMaPLB_pY`MCT1^AjCV+IWLoa#EKv
z<_A{T*;1PvE$VS*6)uvqqyew~g2&$>Sn|FKs#Xa&mEZMfeys$yT1$~RnQ8dDBL;f2
zMCjHh!b$H8$ce2I+?IU^M6<0iXvr=}bjm<+z8U%?onp*@canp%HbdyETrQ_yfC9%i
zV$n_$OfWYhT``j|b7u(Sq6YAeZdI6jtPk2l4QZ!L6aIMp4g{fzPG9SkNNH0s7rJI7
z?=?JwU%t~7oV1jvTkt%b6d)oMnT{;)BSlQ33vvA}8)DEdO9ChSMNzK{cX&l6J0lr#
zr8GeDCM^}6!_*16U`Fe<DnMk>ap&I9p^!f9xkM0b%_%w`fwDiJ;l(~vvd?@E)Lc?V
z`(z#5&1n1f!A4Z-oegp4Uce!XHdM$F5ub%`vG67Z)qD-AGs%Ez1S(U-{2nfG(M!h7
zj^-!H=@Y@XT5-1LBK#(w%a~!cMM*nuVE0buvt51z%In<VRIe7<HIikPbk>6ZB3pj2
z(hqPGx?+S~33hg8g7D%*2)G#o{i;Q<&`FyL(_MHoks9%NB9ALIV$fF|(008veb}u@
zM%dm4{k_pFZ<qonZW@rtX*+n!#MRKc`x7{YC}OS4G+eaC7#beuVfRr3BG`e>f*|I+
z$Q6-g(#E7<(jxf&P=k~`NCUx-7S3+78m*GnLBXKN;wk#l#LH8OOSF9mJ=h2PCcZ`M
zmMZk$xRRfn=mS>F>!Rz`1&wUj-0q3Itzi<hCotBf>|(|&RwE&2UPJJyQuKJ8&igYy
zqz`uhgEBNBbng{@TZ=r6v?}AR?b<-~pDw)7dW0J;usN1zCLgI)Q@l@5j<faUY2l8W
zXr!VE$6G$a9v>qT`dSZE`!-?gG7l75*y8f@Zn)8uu_Y$mfW^{}QP7!FoTsEfbNoFa
za<~H@aw83f4UGW%V0~`BuM*MFFsCo|Y^kz;A1*FZqWVQiT<KhC&{?NToEdL#;~OKo
z&vh>3s<>gZvI%7NFNCQBKcF#4g4go3$Sci8=+HTW_geI5{EIu>t3})JEL-n3pOqqO
z?7d-X@B<KMWwGakHqHAoi!)uxI<MVdnWMpkI;J$Dz}OD6KC%w&szi)B<%zay_VVuQ
z_d&?wD%jB@O)lAeguhQ8VWZ*}m>z0?@@m7ez_SWPcW?2bGZusCEOWZk^A|>rmIhJF
z5AKb#If*Q1J@TDj`7e~QFBeBcqn{ODb|x4H*RaJsWeI%vWJ<c?G*|{c0T)j(CwEp|
z#K<ulCA}y2fL@g@<6>yi5^@I==SOiNGmql&;7I6=n-2lTmEf{_J!jixO}*H&(WUXR
zWSfHt?U_vBQZj=0od(Tt?Le$lqW(=hy8NdrNhwW3ZN=>v5_b_=9n2|i2#1yrjPH8#
zCMUnQ5d_th&MsP~B>I|$;8B{%`xu6UyG}cd(0zyg#qYWNL4RSwPWE}ae}U41NHm;o
zLYz5ic4uCIYjPvN=;0MuHR=^gD_(&&7DD3ZYmQPQ<VdV;7bcHsN5}qec*fG23}sxF
z#;r!2&}0{9Eq()v${+X_o~gK@^f4qjs*}FAfsox-%s;3wAu=bW$nbnAvg-OL{C)8y
zR-Kv;w&f$CVq_;)S-rrhYo#zU)PR0(G$WRVN>~wW4`P?q_`IqZnpNE~uRah3D@{0)
zE!H&O^E||dF@|=&8aX9k|IZXx_IbX<z|tDX6HMgv^&%i++Fi`n$>%q0sD)DZOs<ff
z1qvI=VQ0z~9CDh?5@XJ=8O$K+kz3D~DAi%)yD-VVq#t}g<BYxF4M=tbn`yUQhD94T
z!*~nkP%AuwHfciI(?5}UJ~c>k>~XN(QV+g8=b_X}0ONCY=%WYq(4Ko6w@JQ$0z12>
zRCHst{z>%6XhPwi<NWUfO0+6DlLPsKm{(#6n0OjX!cL=o=QpnD3tNNxb;wmo0lwBz
zpzm&!qvg7Al-K+Mrgd^Ou`m;CHk;D&)1Dwy=;yYKW}J{mNs#M47DKl_=NiKg@Y@YE
zNM?;Y2)k1Fx!a_vNx?I8sTWJ)JMy^R7l#3Fo&)jTcVHZ<MS=#`@rN_sfw^=vj4V;1
z$JIZ9V3b?&!Ekf>%Ap!;wc;W4&Qkt&g$Zn5vKt#56p8=XZ+zdJ58U!5OB!Tugyxw-
zy86vL%-R1O6V>8b27NCY89PB|-Y>j;xfnerS95x2DH^*le%sa~IMm39tSRirq><+^
zW5`{+=jZ|AiY}bT_JcX!lVJV0)3`i&64-al<N_Z(2I0aczG-qEShoHLS*_1e)yW!G
zeCohy%9ixWBL&iVw+0#wLpb}4f2d|6AO+iI!0Z(WNkfE~G_D0(%uJ}$C;_!F(I=6!
zRx-)sL;M|~#rmGVxWLNm*r#I7`o^>2$s<i-#~gfJo?~I-Zfl~}Xh={0)}hw|gw$U(
zOL9A({hp^V&&}c>v{BrTZ<!Bm&550GddXla{_zl{$GwL5HBWduHly>37xPDBjOjW%
z4H7bLDlSY@B&%)vpzH^8)n&Pg>sa@v{~E%@b~$Pr;0R@_e}HZM1ep4rWrNSohxvs9
zYAkC`+q+f3|Nb$)k38b{N7tZ7{#^V|Ri2EHxdp)zdFs{mnIHOGodo{wWx0wj-Y97>
z>Q@bLv1zTSywQT{`u>FM+%@1e`yF52dWsL6n**N)#bVRl1|X(pbmd-kvNy_*n919c
zK=CEi-mgu!HRWMR6Jvq@a=@c(Hk_RJf-`C2!CFm3y)_i6ldC^)I@&bIXC$sYWJZ@A
z)S=q0IpDkgDpZBFaSE4S;@NGQM0famoP6*GmX0XnY8^VjZ>|<Kb&??+LTR$%s3ld6
zm1928S6J0n!1>Jk$+|tvPj$Nz^A_CYt4>aYBJ1OjiHr*)o5k7ur%VcT;$US!KaPo&
zr@`(fWOkMxdORxRFT^p{S*jdH&sd5R#z~VP&l6njkWVl-BM^q$WMbHeCm63an0LRh
z9evkkqErK8V9kr-(-mx}MY4cSo_8NTo>p;Q{h3^^$qznLr3dGJ-;LjGY>3nN^UzeY
z9v|-M#bLTrVC?>P7|b%S{$|^GadsAp1=ZN!a0RS88DE0=>o4(LU_WyTKiimMkHT@r
z2|43@?&M)S-4KFV*(32L9YpeUV)^4+J7D}S#;zZyhB1?wlm2))=3LUHOShSkrYFVV
zSYrTz+gIYLhe~w(FEe_ugJqgh1;x*Ozr)hoGIaPf8CsPX2C-@H;iKy@RJ_{8mAa<#
zQ$vbDRVE%v9z4NR=6Lg7SA~}gR7iToO^Il7Bme1*H7PyQ&(}1W;4uFt9Py(T0?n78
zgPt+<9vqH_GK0zL)Nd$nc?7%;)$o316f*w#W7U#95cRPK!er~1hvpRbuf>G^xP23x
z8kBKR%25p7FC>xC`$1Ud4nIy*LsV=z`p?pU^NdGf?%EBVOTMAw`(#+fAI1{XT8NL@
zgr^S%VMy6LEbSg4&e;0|mVL1y-sTk;!t%K`5-qxM;}f)hy%Wbpr=#wvOvrQ>W0hS7
zSU4#V55|48%uz$Psnf9h=mq?u`yArGu7>iA6kOTBdWhwQ@OwiOMsD0Ho;aXIuxJhZ
zWcixpvAVRY?JcI1relxQS?F7l3-<yGFdNST*eX!b*aES~+J$J}9LnVhlljht9PUms
zCgtH-+`t(<n$$NJv!5=28YOF58Sx4{+{a;`Uj`h?%*RLZX^`&Qfo2g3q&*{r6DZFr
zUitU|#;UeL7-RB_Jvhvn5&`XNPr<Iqdr&bw2V&<vf$9^TDALI16ErnxpIjy88m>pv
z-8w`%d=Q;;R+}cX{6hTVwS4OkidDi5{AkAMefED8oq1f1TN{RVX`VHg)NT$*=GyC7
zBvS~PGaNFUq+`gODO*TFl8`BBk`zg({XQ$DP$3m1MI}X~WU3_j*7yH8zu$4Z`+e7X
zp8LM8w!0Yc%@L%%hg}au=z+?^AINqd#3kIHRPC3D*XLJqh9u8jneHQ=Q?^57>3f{$
z#Mw1zEikb56j-HJgYm#bvZTEgyWVDz^s9RyCbA6f!+QL(Q=eXY$a8-Sf5LjsLVLBI
z^HjC1&}L2~t{Sux_pIS_uTmS3dKhBFt6>mvJe^EUdJeLtKM3Rf;rtm@x+y&ZLQdB~
z&1oY#VPP5Ce#*o7JpUEYdK_Ih>QJxUQV6(YL0)oxOH}J}lrFAxUC@tn^J2R2^F~WN
zdFU2Kgj^!I$$i1?TCOB4MVmQ=bppAkMeCLR6G}sNk=$WolF>W?efL<heI@)(e=iVr
zZoUM!2U}6E{|=GG#d@qm^ETYDm17(G8_~paCF(LCK>93%7>;`b0Uj~J44Ezd(Y0jT
zem#ST(OyK<Sd=|iX)AOe&4mcFT0WycghQ1ySmUj+sCDQklyGK~WbFi8y5%;Qm7GG;
za_)V&ZBM-Z6cdx~TbQ)VoT=^Y!wM|o(c$K8Qfu}S%C9cOLURSSEwlw<b=1h**dOQ<
za!C^6RDdplZ7ALwMD8dT@_Xoc;c;s_p8NF#b|$BT=yFN6N~t?^tj`DYrJT2Y?<_tU
zy#rTWEW{_T&%t}wli=U=1=7_Oag64C%*|5~)KU-~E?1GV8Vwd3_e-KyUj=KM_Cft#
zZK$2g&yLf>g{L+~%ydgDX!UKyyi7Toe_fkp^1SuQ_J<fyl}JiBhr=*4mwP+s@UEUG
zxc;$XY9DSvgr5Vb>8Hc`_A5Nk@tODZ&qCYYG?>4VyNmlJLZ_M%)BIPF4*#f6)B9c(
zB4?PfovL4;+wm>*G+x4T+5s2lnn7$ynrvy#K9qEY;Y!M9gf<6cVVMOGar%_t_Mb@B
z_ofPsX}$m+DF+0X|1N>F@}Wc&?w>6Vc7<D=ifr6J#c1qn%eML)z%DC4d{Dt1)}DRH
zh3f4PUZljT!Usa2$~&R2bsuVJt4FK(TqF3M9TRQ-A}ds^6Wnnj+Qe*wSrcwz_tM7@
zcx?}g^oPo-UPcP_3#_62+ZwQ}Qm3_?&s;g78qP0KrLxyU@pR8Nl;%v3)paV+fIKf!
zHSsE>w8Y@RsIPczt_p3+3C2|mjWO>3XK(pN7x(*Oro2dz=HFJMGYdsDk>|M9`IxX=
znoBmCN8`pDSFojNKNe4Zh^ySC7(LO9J?VUd-Um;U@pZgYRC`?(*WC!yuD?a2>qfM6
z-w+|{+X`HlY)|J+5YxgMV^(kBB7~-|0$*qDL~bjE3G+^1+7Sz?CjN`pQst=JP%Rp`
zxPo{Tb2qiae&V>O0;~5h_<mcSrvBrDU%whsk;z+$)$xxodYv`hy49AZ@@HvXxIE3>
zIUi#EZb+Q{mB?HefQ_ohV7qbzx*f7$Kh%eyXsuz^qm_MVfS)a-mYU;=9Z?vSw;ZnS
z=bqTm$>8~)GwOKu6(Uyj6-1q9W%C_=!pkwGcpyoYdD?mL{q7GCg)MdsU-29!-TDig
zSDUf}`Y9L&704d;X9dOE;q@g)5S2xc0(Kr>s-D9E)Rf9^evPWDxy$dLVMN?x1T)Tr
zfLo!JWaqr|;IfFbGYrndqRIC;e`2C+!T~3I(PYPZn~Jb))fbF)yd`LyJBN*Lf^a~z
z99{X%h)KW33IS@vNB~U*%?K@;b6bHm+oTKPmAWkaMj85Oo)Yq^ROlVk3vj1NpY=F=
zMUj&=#2q(g(jz^jS?33dPUl^*g^^Imv%<++44{+y{7Uo`SU~w#p)1e>RoXw2&dCj!
z*+&K5S99m;tq!7;cmhX{m7@WtZJ{^m87M4O1LGJeywO)@d-=}XPKPreym((rwM|HF
zzbfdY8Q^G<CXMVX&s091N2$*#iN|{_%pEsHR@J5o>UpZPBRCy=*D12Xed(B7R7*~m
z+(mj#n@U?!1=X(pbYYMdJ32~^RsJ$z>l>@V@}ViKt-THo3q1ktjM$H9R;(N!!2M6v
zaKb>Jil!|WCh_;a{dk^d{-;bh*P_gn2G`-GerhyCUyTLv-iDc5Ds)KASgE@i8&_k-
z28UZR9dCDGnkWWK3pmHE-i^HEUc=N6abQ}k%~Uqr6zU_!!P3TOFf&AjX2j%RVhKN!
z%KEYBFNzo|+b_$l^2Uen?_&K<hUZrpu$<^W(6%EOd@XVyz$}$C_csO?Un{6RZ^F_i
zUn9l0Z-dlrgV2&;N0svJc#pFL{Z@<x_n^i2=%PJseqxAD$qA5Qc@T4FoRZ9*?t^+Y
z26SkLJ>8@*6~ErOjcO~O!Gr^cK<D36;J)w}s7=WSvp97&!$!myXQGR)rO3wor^zDk
zorPGFTFmXWMN#l4p?&{haysoDoKZ5PD!co#RL>^VaxRB}t2V;=$Gwm>Jy@t5TmY?C
zG-%CHEtZ<D!b(Qep>Er2bQ`fycxz(JhI0<1ct5{?#Z>dG33n=%RKhhhpgY|(*{1Gl
z5M5p58vF2$;C5vm-!~0Ja_SkrcGLlf7w4d61@{*^+cTvj{GY3?aap@;K2}dZ26CPK
zS@e<TsMMJc0p$b9j_v31^H%;{T5E`-t@W5;48JF@nIc53^2GG5j+}>j#8qnXhIqD^
zfX4QT;C8@T=B&6(aC=-Qr0=mKOHC|TtU<8EU1b`=@1YPg=MAJSux1H2Phh~n*^rvO
z2mP;~!{*8e?8!982t$Evl)ONjyR)#1yd!7%+~?{jRhq2LzrUHMVd}&S7#qG=aClpf
z&hJGK&3Dd|Ha>%Ye{#o6aWw`EP$Hv_9|MnPR?JPom8@7Kg@P`1xHU<G>S}(5@X2?;
z5&OWZ>KT{|O4uY;V~cVHSo`05ES_P_>YX1DQIi*#PElorjWrn1{D%})c!1mKU$`O8
zlr=}RpvM4Brkm3QSNI;y^-l)+Kds|?^k9kA*^4keBnumswgRk>WApqk;9!F`y!t~-
zD?HQisJ|t8?mGlQ0i7_o+m3Enq(i$Wb9U`(YfOH71&pW$RVzCL($#&yxFHQbyt1Sh
zZjJ@lc5AjesT1!E;SRhp9x}7}F)(3R3;L~7gRaNp`7_-gjl-4rxpW_C9%|1V=D!gh
z4YFoA+$kZ-b8+4C)_`78yMwLc`_Wj3P8?I-2`eXaUrl)}ei^JrmxKpE<(gr*JjIyZ
zGcjTQL$1T$h5gy>0~$19St_xLECSiUTQEvH56=1A!>ZpBs6QMd+di-w-T1lQeApY%
z=uAOr%MFq#X~VcRvCvW6h|VjGpzHm8jI=L;$kX3p&^9&NTV~3pfChcks6r#wn+hsC
zhiF-9OpSw;nd}&YFWY%OVze?T46tI6k1s*6SsX^UMw3m;OW+F_^EvlfGz_YR(9Jx*
zf4LK!7b&xd#;Zcs)*x`7bpuSNJ%&f^_O!sh3k^!1;=VX}dVeHhdap8>sP4e3=FLZ!
zkM%;}VPCv_*ODFBp~h~kyu!IuYN*RHLFl%iy*oJjE2jdhc3&b!Qx9Ww&>B#RehALz
z{*Vam{n#Pd4_QxtlLV^`@Lz2_N)4Wo`kyBuIb0y4-aUqH*I3;0X)pfn?n8g9nh0hi
zmtc{hBD*6D!;b#V;CV@(cOf`Wm!y(IdK&b1UIS|1i$=HoV~N|3PKnElQ^JT_b0Ofi
zuF!tbfIJ$-`JD3O@TOe?o~%))l2=tw*c2`7t>#_K;~`L!XG_hm#DLE$FJV<rJa^?i
zA>j?0%*xpgr6CorBG-4W^#hcJzuaAUTPjbXzyXF8@vh}y5v?_R4%Jf@Vv6rZOmO{x
z%|S+ZKhgu!p7Hnp2+ZC+Nro4>w|2|nXqeKYP1}tgkd&V_VCB~Yv$h9g)!rLIGv_9y
zw(3FA0(*LReKsmZXTslSiu8*i-_54+e$Z|mma(`SBdhnI!B;g_DjY}ub)27ZtVK4V
zSj+-y6ktT-M;!g{DO|!k7jCa!$XxuiP&DD+>|Dtp+;UC{N5#AVX+Usxz=<?rSXL>d
zj{b`NhxxmH1j?*z{=kogoTYHlmh}8{5_;{vpvu7^FnWe2joE6%8HgF6<oO5o=y0!q
zV<ud7O~iBZKX9nbf==0_O_wAL2Ggium`YnQbbK8tTv#joSbq&>6xCwJK6iZeCk~4a
z>9V)gedx$CJ$nCF4W!hXF!L8*aJ+<f#4}ulRr4g^G_wZ1<>i69jB&=fN{AZlip#zw
zVD4K@a9O(nS_K(uH7&xkc?Fo^HV}(^u4BY-TX;G>7uOaJg5@WVpgQ+TOM@R|R}~4Q
z{?UD0;^ByO8mZXyowL2=<k4GK3*7#xms#Bo#oUeziT<!;So4Ty3l?mE*Gu<;^p_!Y
zgt+67h+uR(5XRkReb@@0{a6@o!5$y8<xbwIr1*R~ygyS6M?*&83f~0CeQV5?eKDm2
zEVzGZyE$>3a2KPrW}r)^2}Jbdlg~BBVaZ^93`t9Z3tvRcZQ^6uROe6dn)B<&nxDh$
z+266FbP<03Vhxuj8`IZwN1#`0fA%@gnuRVofZh|XLg~AC;5+jz#7Vt5bHN#nmndOG
zbSdWK*FwE}moO>v48&xla{gin+;rTBv4J5F_JYB~&%tmaMNA`)wPBcD6)wJPO4m;+
z0+F{|)<j)B_TMgL8gXH%5F76f3W?*<Yr`oh4V@=+O<jgPQ7iF?vLO=>GGWe(#|c4E
zh{}6ZXoJL_x$9UkZ9_42yJaX;a#!t$bF)C}PY7V(JQy%XnGNKu0pINfP+WB$&JaWP
zaaIm`%=H29V<|$G=MoH2Qf5!$4cQ3?4SL$E5$@08dlG+L@DCY_x_oyivJ(qk?*DN9
zyeCXj<_^-0`NSZ{jwY^*#g~H(XhH2o^h{J|4oW|<w5yXm9QqSNw^a#KhqS=gG$UGn
z`vnFj$nod800Yt|k_fp@;aZUy4Xe0<g+~g>qYnl&K_e2>)vsd5A2sIHG)llBHmq*0
z11lj{V8X|loWFWmBC<Rt8`MXSTHKFAw*}Y9w~SYyJc?&V%f3s@{Tsk|dbXha(2}kc
z-9=OT9$3cj8xM^8vu&SJu|E60%zMF6XsB3?eU_d;4O=B<GjlZfs;$BDwi-|@OUBaT
zNb>TWJlj3ofMvcghkr6{sPx7S*NBC@*CEkj)&rm5;Q1Epn1elieD(qw9a5$vW*M<b
zTNk5gHQ<D~^I_j5B^uk>Pbdt!Ox7*nK9aN|GT*EP`;D`plQ~y5pmrU3*`-cX_&mWy
zZ7xhI{lQsr^0=&De`@^mzTn>T0HX`ypjV|Bbj&9Rrf>ej3C<YxN}PeZd%l3!br968
z%)^1w9Pqo93M+3L(;wDieBWWt?x-EXshs_?HrfWA4~aqLyczfmlVcCPT0s8MW^}4b
z1H*WJM%p(>7P>)<TU6G;sd=WX;aV1aylg{vuN#12>rO$l#xr63k2oB&^by|~8nIIT
zt?=9WU9gfXhU7$N@H#pey}SazeRDpv`zZ@%vra?Xh%orf&qV!`nz8-v9$0Z}9o9{L
zgda~t;I`{$pzzXSklqjr!+-K~&B1F}x_P3YUy+PE=Ctx`y$qEr^6_EJVet3Kgu@v*
zDBdRK4x$gZ$Zb7F)C>f-W;3C1T^*V5Iga;dhQp@hixBY8UyzJhhi$EGFlo>qaDNpG
zmll7)+?q65@XUM&EZPn#&VPk}d49~L_o{HtFdAPs%QLCb&Fs9@e1=pkN0)HNP`^3y
z^!Ri`y4u5vb{y1TZqYHIGKahIu3dn%6nVCE-(`?)$&i(d_eO^^UZ}o;JJ~LjNs_<i
zkyf6;ll~e6#rrrz;Mozf=ade0+jAXup6P}iF>3UhfiYeGqyp+kP88<ZS<rxiJ}@jp
z3g+v1r()rIjQx9GkX*`u<o>+xG@bX&w2V>eHv*-6c77<tmX1hc7}`1*YMKmbRA4Wr
zzxIHF@C(qhpc5VP8wKxK`-I_Z)acqX3b^fJF0MBIg9?pjFu-*ltaawykS#WJr>`|e
zMt{bd*?ecdFcr3r)u1A`tZb(<oV9akAGmCq2sKmsFz2)sGVDVhj;Zd2w*%y8<~=bu
z<>tf0x7=ql_ceUld5ANNjiF>{FZ8O)AR}WxBrHnh?4x2KaiS`%+OZO9Rd`+{wvdF4
z{s^+8jZnCL0e(ELM5jIBdp%1{;qXY#C!f|Vc%SGJN^6wJzSJtrsH=lfcm)cE<m1>n
zWg2kJ6Te(Dpz{Zv#aqn=EH~$xZ0<&NoY+%`Jxx2oy_oN|!=|E+!FL>+Xh^l&jA*vM
z7MsL#4ejD4!B2LF4A$aKlLhTUs=69`t@0alPbOm0YE2g8FM~K+Cmf<}O+zexLYK;4
zvSVcr>W|cBxx;y%V}T}XPdF?{{@jIGS4z>r*^+5)m7|I845|6e8yLa)aifO4gfFWs
zsM}(DVdH*PHe{?K6@5-40sG}($)G79vY8=UG}{vc?hF%}FE-$WqrcGV_5bgcFX5RA
z-;3Y1mwofgz~RPxPC7mx|G&HFEs<lUK}&E%>mjJ^m1m1A)^eV?45GF)WBmaqVMwta
z&AQ+Ko<U~p{ra6~{rwk;J8X%ZN)0+!cVce$65-N#U3&l2Yw&q&PFzgu1f`jeA@Q3!
z4Y@V|od&t!{KWoD9C1hHdB+ue8q$eQq8I7xHe>(D-NrC}hW7pUBIdrUl?^z{=RNm$
zo>;CdTfCx9RzGaLFp&TEm(<mQ4p}B7{e6ecs@3FVqc!afd<FA&wSwD;gTgo4S9s1a
zA04{IQ02V>_OMd!jR=-;@B@{uUo8v%CSYjcWyqYg336Ln$nGnB(dQ<R^&=wioU;jw
zbx)G5@DOm|(GT!QDrV~kp9NRVVhrtF$hlIFNPy2b(o?>d&rGynSGN%rx3@~>9v=c)
zzZ0S7Qw(IAY0;=r_GtKD2aa9C-4nuk3|Ov%NpEXlXtX^263=iVcML?R@qEuN0~nth
zg$hs9*ll7)?|f~A9c898V%>6KPP{H%pnny$*5`sxjEQi>ng2Wg9e`$k-t#^;4CR9Q
zvV!nyJolc6cfJ?lh(S8=iQmWm)r-fq$2OwhhHGf?)PU|bHv;iqZ%DSw6?SRI;IQx&
zyf4JhzXy6T{96==FI^OlboHe!e`|>({SIvAyq*QMttd+EFPZU-dt|FKxi86#y3M{U
zeC7;iQR_XK&gyuuQ$EOlv)e%C7Rwp&&a%1dwxa#62l%;Cju{-$r!hl0yYcaJ^se)V
zh7biN+ONWrt~%q4Nj<RB>KVu${D(fnJ`j9*1iZH1CjokK7=J$tm&AABCq7H+9o-0_
zKhFpznZ|TvgFUSu$vbpkx`<c&USV-$8Ri*i(wWHfbvZZ4!1p?==b162AkP5$mXdV6
zC48r-&Mx!0hTG>mvi9xGc#8M_4;Lq+=v6v*1FjW9&aXl7BLm!SY(tZRzCg!EOWx&M
zE%;3u4uzE`@NB?w%$@r~Cg0~bxR-W8+p%^`ob?np@Hyg(JTcoiP@OjZXNBD%p&&lg
zBMZ7;$9ZpcBzMhQe4UbnBh`*#!?gl{S^9WOnt>Uk^FjLfh2ZjPAKxQ%6XB3C)ff_u
zx|=ws{VzXT&~qerl@vO@zQuN@p~A2=T_CFLmAwD28X`}<=WhCgpfbJyV}|^NH%p^2
z;B~t2B=|WtX1~HR-W?gZ$rn#A{=@f_z9eON6L=j65?Hw+jXGrnhr_s=LNAx>N~^?8
z<Icitle_S_pDH}aevEO~_4rPt4Z6BFfKjUz^Stu{mCk<$m5l;Q-<J{5a!c33frv{t
zeFdjJ{aBZZ7Cxzag!i{r;`mxAs?~pj4$jvwk39q#?@mGHn&J5Q)_N$tD3Gt0xR-v%
zF3`Ps3Y-UTz@7#`=jYSOZR0*vbZi7E0)A&*RgZ7Om6)@%l58Dz6K9u*nM1%Wk}GN^
zqnzZJyiXRojXD4d37jSSZz!5y`Uw6!qsQlsxN(6BYrCPrl$OmzlQ<m~TQi>c5AMN3
z-|cDVG~Tf^3q{vjExKTXJ$+~^gM!9HNM5lO_I&QoJoaP~6^p*Gh&%(saUs|*)rPiD
zUym0)DC1bpnyj>O!(jjC+^wDlJAc0bx9lk5=*s)BlV70D5P6}j?hY=huEyTSH@Kgz
zmyB8CjN!WNFv4^)wAU(v*Qv=c$lR8hT9tCHp}%ZWK6i-O9U(4B?=fI2e~v3N1V<N7
zaNGM)mVlw~ihth}dwKVKn1L+OoqyX$Y6<VBKZWD+CFpBE0duiTHoI{*&b!QA$%Ryy
z)ZYVRe0la>UzN?9*MyNyr_f_wq~O#0FWJOBV{TW9B?0a3B>HKxQ2N>j9$DPO%8EXW
zt8rxFvQa|Hg!icBr@^|U%lOQZ_sIh`@bh>)TK+L)BmQx~+4)akUz9v`9$*JcUAZIj
zNee`tn~0UWo1t`bl8_ZJ22So!VhRsUm_m&$TT>;XIO93)tlWXWn)&XcX*rr7eu^=D
z%TcPD?wUOPAc=L~LuPU(l-s!#l7?m1G5xp{*6dWJqEQi0xlWE<ACL&5N3&d4T#1LI
zO?eROctGMO)rZKR{5Nnj9D<(9G4qFiVbs-)sP~$Gi^{C<z1>$RKhuQGB^n@&-%65S
z_X-g(UU>4)Ubr}+AN74vz}cS%F}PHjLAeM;{;9-!;Q_(<kT!N&no-RW8R(p5gb`6K
zLRn{Dx|3()EF&|~Q#T6TbR@#y*%s8HZXx)z91^CgUqqdY1ncvPB`Lt~%a8uRYr97{
z*4u>2&9`Odz0Dx)h>?i)ERg)V_7~mG#KTganOJYP2K^G=5Zx7tFu(XPl${U3!#AyH
z<N^UA|J{VfCDDR&(Gn6Hj>1{%WW4Fcd-o#`fc*>u_F|z3#JkH#xMMFG_0go}SvE|Y
z&n&`U+(XR#il=>l;<;&IV17CsrW=@1>BkwwB#nDV@2!QY{T`st)?AW%`lZB@vxD6(
zzLRC0ctr{=u92$GZv^MzZ9=J^uHZ18?`v2QF<xIzvZm+ZdgbqEc6TZkYuK=4fjbZW
z?ZkkUzK}HQBLo(!vnnsCP&F=r6pvP8{_(G2<o%x*&^rb){2zc@KVRYcu7PM7+n4v~
z^dut51>CY~FTnO7kT^JDz_B3^XnB*=o_Ptm@7;yyjd8-Q8P>G6C>2jXs{|#DzN~qj
z6@H6)2?KBcgHO*YGWPu}M*evLHs@wS);>N{`D6#1<oYnrA_?Aqz_STG7fFiV8_4J1
z?_k55IO9qULQM?k#$DlAt9{tIP@4{Z)SoqqT_NyW6FI17%Y;YrG&U_?b}+_@n)}9}
z&sZgriu*BQy)KmAdPh|H-Q<2T9~>}Bn+DWPgR9-9%=G(HJmz9ZeP`LSmuIe^<4Ox4
zJ5=e0FXy=*DG1I9`QROvNUqqPM>oG$lHAz+k{L%5F@A<VEv?ZaR{5Er|J#Zsy*>yR
zo8+jYn-**54)Ny8#e$Aiu^=5blhkj#hTcbxk?HfbY0u>Uu<kf#N)0{@qxN>fr@HUZ
ze6Lfe8>K{(LyTE+*;yhvwGW0)e8O|Mw%Mshs`z63Wf)FX*?D^d_Nnd@GzisD&-V@6
zh!x9n(Pmz=rxMY(Sn{CCh^D*P2q9~SV)Co6Wa-j-Fks<vG<HoUjemxtNgm(VC~%*M
zxjh?}8V0sO0@lv^0;_jf)1ZVRNCrK$dlmwNyA-Hj?NuV>J$UJ~dfriciOpjNL+;_>
zvZyIbIYa9!taaCigqfFlpVJLp{B8(S@=LHZ{uG)0iaRCEw9zq<=a6(pfOeq<Ex*tR
z!MWYwlRb!hJC=p>mH1rw<u-y!)}Uj<=dfFJXqI?Cd_KV4PP`+jKGd3j^Usk_y9B5@
zGX{pbM4|H=C(;n3!IrFxf*!T;u%u@nD0nJByNNvMtKW#}%l4Dw6M1%=?125lwAr`K
zD^WUUGAVJ&g51e5p!Dx|Tz|R=wms^Fs~IZvYmX8Y?NpXX-(|@>&+mmnyrUdj=?bf+
zu7=l5xma?&2ae3;&q3S~Y+si`4p%6!ioIHNagh!kW?TTOHy<FH>!8@oidA!F(<Y_;
zFnmBPiiaoi8~s=AFf?REo?=>g=OLI4Xy>Pzd?+=XOMv$VXX*B#n}cs))~jfEqIMDM
zGwuq(eX5`#umKx{Avp402mZHAOmjAtLvGw_N#XJSSU%-ETwZF!=*8dY=3y#pEM5(!
zE(_4-U=D_}x8Pi+%=`upf%dwAWNoQC`W!eZJG$U7=5JPJNwQ8jJ&W%!pX>#TP(#{X
zPw~}eO{yHgGw*p%am<Kvh)OEQ8)jw<d<KJT2H!V83B;aRB%Ha#dpn&mpwX(#E^O6+
z&R<Gwu_0&M3{k<&d3w}+@koq)Do4c2rLvk5{yd&e#1;B{p2lal1BCwc$+VaFdz(HB
z@UKLd584p&p3j-?*~4P15X?R6MJm<r;dBEL3oJV?gxXyc-rwv7?}zJf)t!1wY%}2Z
zPj&oxS&NS6ECrh`Gko+#hb@_H1k$?xEL}Dl(ql_;#j=4Y3K=e##e79eIc-{56bYw`
zmf{lrjxq?cXQG8=u1#|4bfMup6d%*z{L#xeQBFjyuNl$Z5BuZdYur~uYK5QwW#Zuk
zQ<|WB7;W{<>CUZx!EkQ}#8%!QDbx4i42=qqe{zz~)uLS{Reyr~iwZQcF$7nv+XptA
zCh&Z?h9t;Nlf4YHVA02Gg%O#Gcwvzu^K4UQ)Ajl=ag-6<eA9+0oKsZFXSjE@BQST2
z2hR}H%UXBxoVDvyS?RQ8!m*>@@ZB&qwoUXHuU=569_RAOq`RYV@B4cEuBu2M4{@M{
zi`DR!iX6*Y77AxLkHJg4i<GRfXR~*=L-Ju=(9PV5UJ_j#B;uZh*b*{Zw*+(^4aOMd
zd^q=e7+x~!OMl1$xrcb3@F+>lLLbe>Ndq&X;;k($AUs<;QH$(M`wD~hDX^#W?_k|a
z8x|zJ3m2a>ps@+}ca#ST#b*l9ZdEF_RlNaQEh}ndVn-G8?ODOJ%iuC=KCaJt0B~Xz
z7?<0#_TL4VZtnxZIfZb<Bp+vl@4~r{c((raYN6tU9CO^g9xT46qVbm-xWJ$cJr36h
zbqdzBI{q1~Q&*xfmD=puARD@9d4D$WJ9nD~8j!@Jc5JCuI(Y59LQ0xqq50Ei!P{jU
zI6u8kYOxU3mQ`Ypp%y!KPlG88+YMgt`m)Hii?A*H8;CD(#^KsNH1u%2kj;7Z((y&I
zvlBV{K~EEQDo3M8@2SMQw*pnV$MQW{B6xfW7oy{*!3afnP-)yJv@O_(xA$n$_S9gq
zxlb+@J`X3&S9XKWan81AIhwtFq!OEGC&xCOIsti?IFH-52eoEBfu8eIpznE8>X&bV
z?RI=8z3D5qM~xHw3~g}J=1^R!X3AnmWJ)Ufok4}mPB1F04CX(7iL>7~z=K5=Ooeun
zT}futyYnCsziq`aujQGmn=(7>XHD0cXtGbg{({&1TY~YV(V)7$FRR;OK&M3ar4eb?
z?5~#`(`iuymxtrQ@r(uxd-(~*>F~_#q3O6R-->-bYd~#Q9EUY}V%mH1AY7enzykIc
zkTGQiu>89jHA`5`J(`oDXM{KG^ERg?RX>0S)#*&WmngF14$0b~V71#9RXEdQ%Xvf2
zNHb+N^8nnnj-iRH5VwZ$S+eJF*?Zr=P@<R*GjzYe&U1Xv^Wr?LA7IF?K2xF2v8}{o
z&v9^WeJzX>I79K~Q1BaEA)E{P3BS&2Q)4|3kj~`qwlXvH%iNAMi1Px^-h<&2&6!7h
zvf#D#fsiy}CC~RyLeEsdO|mSUbei+Wf2Ba|vXLa`Z#Xo*;T(dODR}oE9r`N2FV%6q
zNvv;w#AlmD?8JR_=CdYNQuRI(9&l$;?j`;%d2$n)ljh(Yt2lHl-^B0b^2{Pg#M(`J
ziBHQkh|qp3%+1!}ZilJ77de%)mpL!}`*XZnX~F{J#|Yyd=(5=Oda|t7fVzF0BvU%2
z#!d^DFlJOdM(oxE=gc2u{vtKDE69SSKQe>R1s0ea7mlTeR>Oj1J({s!#DXg-A#Bet
z^x3jmFq0|3mJ!9+-{vWXx|*^LcP)6o&=kDK@4=wkaWJqt5i3f=F<&_Z-D_mvoc>sl
z{tl8|n8bObNBls^)sne92?LRAiR^c{C4C=z6-G5Bq2ZJ?Sk}YmA0Bep7IhHbeT+xL
zBTu2uS&KFVTd=!lIP*=b99GY?VK?hFX)vEBANs<Xsdd94?lkvUbZg<tL<N>%V90kG
z&E#E=3H3B=LDM<U&@f5>=L7xNs-XYRL05y<XiD0e=3%kIEvR3fNVGrbuy4XW6c2hP
z8|Pv|JxncFRs2Q4_l6b|Ju}W;nqPtAW_`tb+|v@eT><8v5Kxqym?a&YBc#R~@MmH$
z*>a*k8{eWp4@_GMPV1k8<z{u-<=aB;Dyh?H^ZL@|lfp3mr4p@sTa0#X%P=}$1n(m%
z;NwMQ_IyeM&U?uD{OJcs#PT^HHQy>}Q+^32@AG{#KficYJL5WiU0Qg0DJTxppgG)M
zxO$ukTNiT{tDLWauaF1_&a1KNG#!>K+X2nvTS$Cu72hdM!OYttyx`C0Oa6o4z1B;p
zU$z7k4|3o8uKhwm*dL5}`3`sHeuaTi(>TX54{{exkPW$g4znCBVFy2}iwfS!!h&+a
zD|0%D=le;AI&WC)%;zt@lO)AE?bw5Uk1%^E&&YN?gzV*&D5}~H<~~25YySn_1zRn9
zaWxP=uNUL_1ZBE=_F_mKHwEkD#Z;!<2!4@)a8{y6SKV5Ld;I!S7#0T(vy+L#l4!E~
z$5#yBJslVAC<2d#A4!i%IFtok#DZrlFy(tC1Pq>z6(6nX*Hy;slF*M%%A1Mw=2`3>
z5(zHj<UqymGR|`_p!F)rf|C}{$i;QZ&he}gFM;A?M{QQ_G8(JSBnxx=`_TJa_QA>t
zYV@VDDGkp11K!*z-8DG`qTXpidjA7Z_*<X(B)%1P&g8x4^&*rGS|G_ia!QtJx(oUh
z7|;Pn3}}hdSLm7@Pf~{rz%$z(@_uRvw6FDpomqo$E@wVDuk9mbrIrZ&jw`Tn=5iG5
z^dK-i0k%!yoc6IBiE*GV$cNsAB-Ksm{q85RTlp4}KQ{=s{EuNlsu%ou^BP6r3NUP}
z5xcbNGm5hsBwokQ!lt9m;5S*7HK#;@&ekF_;eaQaH=oBVOXXQrfC|QjY6(mFXtC}^
zEwCiHKdb8Xfo4Y)mNl{u#!Wr}z5m4EzT^8)`ngJ&|Kb{);rX^n8_vTr{=Ss<{fKUc
z%5b{t27l%^;%oPA&idw#j2LCOzV9Jut>kAC!&LCw=>%F=PGa3({#<PGlo;D>f%?=H
z7<=p_>O4FF%YDtM;%|Evz4HulH03jny~iLnz(^2poq$&^_oqvf2)ccAl1cbXC3kU~
zOjg8sy5tY{TA2%@epf+Y%Sl+WVgyEBd4O-Po3IJK&+*&%srY$U9$sBwOfRn0=iX)B
zrw?tB7<%8p%!Y{=`rsf;_@T+tleC0Iy4?u1#%#8J1iZEC&wgyUggMK)&?6xR>!0-z
z?6P?W_4H?W_E(dpbsEr^koWMuTLxYO){}H+MKaYh333Ns26exiXtl2l(zke$e-iDe
zc+^Ww-|IzkS5ojAGLmdN`xX3_{zOsjzmiGYZeiQ_V*WdLM#OVk$tD8@R-iwLyPxNS
z^DN%;)a;A?;tx<otFbmyp3TrHhuFJsWU-q*kbpPy1m7(-m^)lSP#N4S?ByMW+CB~7
zr>}^)ei_2-`v&YvjR6aaXYg98&3X$t8)U*1?k#MP7`=$a-F2&BROub~S*=A!NGCw<
zzs8(LBZDpXrb0xQ4tT7ah>x39nVe%GiaJ{b`*1xvVT~Tj2lK9j**P-q&mojX9fNb%
znql@kDX6_U4)sflgk<Fl!T>pCsypI5I;oDufHw=t#}NMizN!abh#cX1<_92`58>&6
z6j=MgowJ|1TrUpi+={KsCBJ;KG3!UCkkR0b{<h`dHk0?Pxi>%|d_JV@vSmT9tXapK
z46uwkf#a5IGKXP`*s$drL@RglY{4vSezFIY75EI_vKF!)DuY8<3VHnWA?9Wh;i<bB
zW!&|4`n>?5X9mFvyB>(JxDNMt-~7cDWu|p|Db9M3jSGi9!rhy6p@w&HmYXZFq-XEo
z><)cuq06}hX6|^H&omP5I=~%W33dn8;)N*_(Pt=61#}6}6?cSeN$<<rEu_RH-WHO_
z{N;VpG=eQ!Al)6}y6eg<T%|M~b!K!CasS^kr6Ay$PM#Rr5s07Wy@d-yj`96<6#Vsi
zgGK8Ebksfu1sYoHPTyi!UyumpWgkKDnl*dB{UsRgtA>DsLD+8p6$glT*TgUp@VpCR
z#TaORdY9Cyv|>kD1uQwJ&UeVah<2%%*_YVTCqI^8s=FKiCR_qHbN+Y2{aP+zv4ZIJ
z3m2SKhwrM5c&;;uh&7fFgG3Q4yI{^FoZ&aY+KRo>;Jg(JZ#?a&$3$m3WmU`LFyr4y
zkmeudv(}qfeE1`5QMJYSB_q)-=0Dk@-KDTjk2|z?4d=hPF?jVb?>*Uu3x=O>!>HBe
zsM(-SeTsies!pCFxkrb}e#|#wsr3r%c7-*~*j)jB{Y#0;?Nykm@&`+`ZeqeKWyqzC
zg3qDdP#ba|FL<i3OBXe%&&MCaE>B}xURn#$5lOHX9bwj96V|k$5517N4}FU}fVAB}
zvD_$lXjlvZ`^|-6wT0+%`xIR7ybtv|#&b^U1tC0}=R`d$p{LXX27l1vya5|pTDu8+
z_Gc28|KdpLj+MgoN8ccg_YyYUJc)y^s89u|JxlcF88Sz0T7DxB?j0>f4a*)Zv9v`8
zWpA)C(Phqi-eB~M3i2>B2fK2j$oichp;)y7Mk&65Q$aQ?UGuKcv8N9UbLV-uhRL|@
zCf}bQ;au~HYAh_U6ndWSfY_-q#MtMEVEaj#O0V6_4pFya?b^mr9h?QRy$?uwTohEk
zx`pH4sneXLHQ+F673K`$^Mv+ZKKn-@PA3xcRrP7+Dt_PDdXMOg@I=upcQSYupIJwr
zB+GavJ5F^URwb1ZqxMkrOS=jk<S>RV9}6Gq6sh?9C78(>pM_p=1nLxEQ?NQ4Ik*Tv
zEK_2U`vb7H+z};}k&ym4jQA|M0IuIvsB}?<EbvvR5FYsfcC5(51!zmhIhxY1V8={P
z_hqP;hb7k(*)tyt_9)VrKJvF=O9xee*QHTHSHUsC|Icms`r$PGH0VcDAMrV@Q>#lg
z+=iPod9T%zyCnA<<qowreg{e-8r$VDB5yL$KXC#k8D4<uQAuD^^bFhb>T$SQe>$P#
z3=ChcM2pJQS$*qUESG4qh>jP6&c9o6qML|CFP#PHmZyoV?@5r#$CD8+HbZ*Ie-L!w
z1J*wH0uhz=>@m+ydprNd`d0^qSl|7;$8#LZ7iU34On<WWuRiQeu%Z2rOR--3jFf!5
zjkBGPqNwW!S+85c=kR4j-tiS?*r_w6?<FApm_(LdJ&OTC7fHA%&olhjgu1^5ta~@l
zPGv?(iY>o@L(6ao9deAkUaka{AL{s@-;DgXtq;3jlgXK%o}5eY6%42J!lGj~=&LXj
zU-4ev&qWq=Mn@z@d~^h-CuLZ&wwrfAG+6G$Su(F;E&R}HPwjH}?CI|kS@?-KsC0h>
zc1AbB(pZ~Tr-ne+Z6(YbF2}Ua<UoRRGORjch;4CM;HRL1NfB2dnfwd?PEuoQuh~Os
zmlwEI-Y0W(F5p9@*KlXWSIp*)!v%cz)_I<D!ME$O+|YiqDgQ7$7jzZI@!#BDK5y%_
zD8SvF04`}UVCf@Im;B*-&5Na&Kb-&BFK&}rnl|8#;VR5&Uj{t*WkHkYn?S~n2Pj?t
ziKHBsV+YS&!oeE-Xu#!_f?AFuOX030pI@VdxyTtmMry*5lcsdmg7^HrpCXf54wqT@
zi>U9)>mVgxh4<SPne_8jq4CdFSZaO|rJXO)F<pyA1oeffmdW68YzKxm^7#SJ-4*P*
z0`2}=$c%nh;nrAP>Zeu2oxiDMs;COP%W7cd6jPew)(9@u&AbP%CkvbZjeo22xYzh1
zarO-*BNPqsS4uzHFlQs0kKp%(dR-VIGo_w%0DjxZ&mzvJ1hLTyyuZc+^||-@ba@uo
zRCJ^7g<hEYz?#*(<9!-=#=pf$!kj_-@sraJlrLR_ronBTS9(>VA87_9CO1Ie*&Lfz
zn9u{KE}*KJBE8M?u-oQkLifpY(4%UErkjRg?597(?Wl=NY&=^i_0h+!nX6FT*dj^&
zX2-OjRpW_I7F0(|gQ?c)QR&p??2~_usmId~LhKDWGGNv_9KV})snt8NdB`uaXJs5d
z{KNMmlhVOsK^)Yq=kD{hec6nau^<X65<WR6;On8+(4{egY}$JSrYb#!+|kzT;wc+?
zX1F=6vi3yr0R{9v^j)|Y$lZ43^MKxN<(#%uh%Npu$=yPTo62RPqq>=t-7#Vr^Q#~<
z@+0}-tbnmozsa(CN`+FD<GB2#6^oBkpz*?e)EaHdZY?RnxoVuLmoE>ySNDZn`A`yY
z<%JL<%)xVu?}3@eZ}ePi0q<njQD&Hl-)i1r^1<=YK4GPhd%27p*mfR8BmE?z!YwlC
zFCuL1jlp!Ei5T}g0h|ZjB~@|esH3t=coNYEBC?+e?J)z%{8<yxWk$2Gp~#vpP8Tyb
zm3xw%6Wg$Qrwr<CmkG_<4di31Ep`638D3n{0j~=SiOTX|;l}Zg{QETv{1()py~u$D
ztt)`6{^^20>4DMn`_WlftY||@5`4JA88}mZkS{9~sZ>o~=5{=aRCsV_T-XYneSIC;
zCA@-$t=_2hrwIP>>%`w?Dfnl!F-`J#21*~l;qxx;_Tl;6ozHX7?TeD6t~&u;s!d?7
zT?gL%p-jzU4VZu8W0-K3&n+&r381I{|Gg9Vu#-eQZMvZ2nog?AS|H$Bkl^+yM`&Ii
zDg-P#1xCLN=te(lrrgQruWxyWx#I;spJhx_5^r;tS`!H!>i`ex`qP}wUfi==pN?KA
zqVl%<y_3IElDxfDsJa7a7Oc<459j;Zf-2#lt^*xmWW^5gOtBS&au=$)@W($H?LYNp
zJ;ymg@7gNP<8CHS(ZArix+RNj(P2qX9znS)pDpw3VNcRgG(Ycw?ZYYs@3|solP-bR
zD!OPqw2XvYu*E{(JR$d^tIXoE8e1hd561G2P{Yd>m}GkmW4juK5Bn@>{sKiB-f|zL
z1*ctoE)Nv0&AWusDl2Ta)FcY)W<s7Z?=Z9&u&Bd_AfT>-e0Aqtitj(+{2?)$9e5pX
zG>7BC<%+bvEl`jizbO;zdkL58OL1(;ah%*_L#yX=mT+bn={0DCw($Za*WD*UBevjO
zsTF<w+!|JFxelc^KZWF(h2(Xm7fLlk!L941EQ{|pw-2<T`Spn?O(+w#@EOXJAqK49
z5$=DoR;TKDniOh{*o8AzY@6f=Y}$SeP7ocs+kta=H#tIAuNj!7SisIpFJZ5KEbjkc
zN|oRPIF*;dqFjW+tQ=A)_ZNogj>5C+OsQy9tt{uOIumc5L5gbB=u)L_2rPU`)M}z2
zL3I~8r*0EwZ_S5X*>8!D(Ktcmtl_G#Rm2*dR6!@IQF#9+4lY?JF}1aK&}l8ts~Hk(
zef%9w{jwmz@eOu+$6<Pzo8XZ4T9De<ay}X2lqe%A@A3}Lc{W1h*J&tTbB$!1^{0Qi
zqn(*@F4kuUEa3a3p%<-~OKUfYFjxSkql3u(7#lWFgP*HIr+`22Fdx>sg;IYTjNQV!
ziy8qWF#RL?l*N%Z@wYMN?mmng+yoOVM0CZZcKlOi%65u18JtyPId1cDM)?;gEzl!t
zml&YaiEzxVtQA&XvZgzp=(3v&v{~CnRkp#^f|bqRhwk>VaBsUkb@Qzg7C*a*A_eZi
zmYl?>+#u)(+l%&ZZ{eiy?~wh%hKe4?lgA~-G(+PsYz;PM?Y9Gjh8dqgwDWMb^&lfA
zalVU_Z0ljv0W0pYx(opt$5AEyx-gjUJUm+Fz@qZ|a5vM4{<biq9(W!kW>qNOY7P^J
zoWQ8_`@qnH=XG-aMV}Y1gkwdDG_E=vs?vT5Nf~BruIYKW_f?0c)CPkrKM$51S&Cx<
zUxG%;E7TZLfqCZ@+1^>&O#IfGiSxT9xxbZRlFthadT|i9rSSLp)H5z07JWr&#%J;>
zz>L1#$a^Uk#o#vCM`HNT4M^q=DX;jx!Uxc%apV8Mgb()Mo0|mEkTMc?_%^h?e~VtF
zenMhxJT`|pV|s=hnYiDcdXGE`Hp8V*V)GJxq%J~Nv@JYxx`mM)`fT`bIVyemTWIO0
zMTai6WQWG;)AxHXqDoB=eorxC-B1O|%fiSeb_EA&s<M7DQj~?4VD$D9p?T;M{1(Rd
zC~sFo&go&?fm2HS9`g5n$qBw^dxg%c?8)&WE7s)QpQg+)Wa;s`*gW$p2FOCm^&2no
zX#cA~u3SfF>M7{2RDz9>{kcEPUXswE$3mYdu<_FnFF0St^rL4${_9FS?R^>V1b#x}
zplTtvCkotJmJpGlGVwq82gk}s<D!Uz&|&@q=1kG0)$3}(VRayMjDG{iibb>~^aM`L
ze+;qvgUM0uS`nYkmKCf{0G}%s=ybLMy?^hAuK|@<e@T@&=S2&_S#1!o`8*k5s7P}b
z$>4O%V~{$=XU{O^EcAs*=(cJ$ie8&x*Yk%$uD_V?Hqu~&Ute07cwexydIYL}^r>gz
zad`4e6<$S}(nVPcY`sbr)Y2lb^LT{PG$z|QlIM9Qd=Ls+T+ydVUP$h^h|9O?azD{r
zXdZk5l*<dS-D4d@584jh19|R$<2+%OlxJ${jBvwNK8Fp8#6jx)sKwd}JhIuAZsdIS
zrf3`bfbXnp^zGRGtLiLfS1G2gRHjwq5!dl~>75fYm{yvPE=rk#*JLexvC$U>LIZfT
z-XK0F)mXB(6g_Wj0+pSM!TDM<|E&dNDP>&e*=0HI5XnI+Jv#<B?U-Iqe;T~52lV85
zo?3_JmW<LcS8bdmc6y3T(p3oHbq{A>9ES4S#H@JBBdCokhdU=Lz(_En-L5}i`ABQJ
z{faJ~o^MR=aQ<nj(l}wWmOkx~2EyDfZ%FWogX4OJtl6mz&lSD`_mvuKh1yw2_U*(e
zcg$#;jtuTAaDS6o7-p(Z#ec+)@$;IS=-+QRdVI<gvZnWvdIN79<8T&fzAF1$+leY0
zG%;)LU9woQAG>f{4Web8!u$)rAhlv72EO%$sOfXCtfn99jw}Y7f)0${)gxG4%YfIu
z#@Mm?1!(rMqRl(UWAE`g_+6TU%j)~mCGRLsQu~IE4o1vr{|pQqEQbS+r9jE5>u~0q
z1wAA)q*|+bP~oXO^V{Rjy`3#kSQAM~pF5#1f3Ci1ALY)wM$#VknZqB{S+IBqraYa3
zv1{^4b><?RGv*@R($iuc(fc8CY7yux87Q=e<_KNG7i07mSIkwmW;P=hfD`Xci>}9H
z1#aZ|ZjGzplb1|_Mr*Jt_hd3q`U^zgtYo5%KV03ej3A-C;bgj9B$n=V0_g$n@(Jj`
zZwi5Uc#Sg6ZQLP5e#!-r<7A<J=p{+gw5Kq4;UC<+YbcJ-4?*o+_Ef6>B0JaAO89R9
z?})aYK{bPexO_YJ+(bPfVrO-}rzs_FKW$~#4@7`j_b%*e{wjo(p9VNJ4XXyX;OyCP
z5P16>xLG@preim_e^iHUIh+h;&(6TI=jQArzn@HaYYoMk#ZYzk3K=p+k%evUg2MZ%
zSTM2`AGYj+(u4QO!lm3@!nwF(wiScsoDzI}R~2AQ6f9h-KtIkokC_Prpmbb|P=3#d
z9gLLYtf&NdKbOBFzCM7)>Qr!;b^ydyPi4J_Cu6CGIrc12!P1H4La5}TFk0Jz{!?g3
z4WE`{tl|+#*ZpGhP<0E6%s#n(Zs5BxH4%9x$In~YO3X{;1vV~~!u%1PurnnAMZS?)
zvonGr>yHE->IWm)b_tyq7YlRcI4>r2Hc{g9)HeS2^_SSQcnc+3JNyx}4boy8EBZ0V
z$6_`b9B93UArYC0vX?ezK|{;~I9k6Mec#JqUbj4r-V`dtbM8XKT^-!xcp0N!>OuQ(
za~5;?J;bSP!6g?zW2j*#aeFFHt~{`&M9-9l?n;4l?Lf{&y@B+i4hwiyOvWD2WxMl7
z;_L8qTz!ytpa1j1Np78RXPg=9SkL=MKfJL<;TD!Y_Y^Fr>eG(?1rYNh0)n-E;ofLN
zT0cM&{U$aEJ}PU7?yzQ@_$>!j3#{qsH>EiDvN2d5<o(rQ!7#9w^Jkk6$Q(JJ9b>0~
zYIGO|{mO&p)jBLj?-$&vS7Q48{9$twpK%{7BB^o{VCnbcVD?QJGLQ4T<yB3l;jE8y
zQ+XeHkD^4aQJp1J?E<M0_cZK&iqm+v<LH|HIKQ$9-1;mhZu5T0%)9!qr0g7crKHWw
zeh!8Dzj2aJ2cGiG>MA~?n=TX0C0Wk>-r=K#8ce!s2YGl%o{iFJLtp%avEJHX)$j%S
z#@W!?QLPZD;SOW&aIW#A?;!r?mE?n7U*^2^CUI$O#Vcj@G=IfU%<<2MHNHIG{oRVZ
zGCPT5M~T=7?L+7@UkYxc63BJ`b5J<Fp4^|?gw@+sS?H}YvbTtP8m&gc+;nYbpQul(
z!}wXu$(B6e@AlGvmD%h2B8dC=0?y{C(Ay8r<9$0-&Z-)MQ%&wd$*FEIv)B)Af7E1)
z59agV+%UXxU7ZHaX@$EhHK?a{JEqLy*`Mw)7^wW52*2d%jWiqPICKC;44F?#7pJ0`
zryr!6s$!vTDoI^oi1Egz498e-e!IEM{zVv`J{1c(W8{U)k$e{piDava91RTljGs+3
zF?>rsd>^Dnhka>+l$thJt!PL$4C=#tb>vw4t7w=z*O<)>xe7Y+^}_A|OT7EUm`-TZ
zV9P+6?||Hd=A`$+vA_%Hp0*b?*S6yP!NK5PQw#p35!gLw4nKRB<EW{Pu&VG2mhRa=
z28^?&6LgEw`;0bhwN{}U-7>LZ^ey<YxIf!<w=YvMe@{N#v8S0T*WnY-+y<v~a;EM)
z{2a3h3)?OUDqED;K`rh}aqW^>8T^9qh&0d|z~IOqOZulW4MnCC1o7b<;=ua_{SNb4
zz}-O@&U?L~$8V7aI07jrR$=+Vqv+dt7RqOF=1IP#APrLz<_;~#7rxx9^v#B?It6e}
zH4D<R)Tnq9U@6;)(pMf5<!$P8+_ioz_s$oIk3wH?@koGI%hYK`>?K^}`vqDGs<3S(
z=O7#OVNU9MuwmU%nBce^bWYa@p?Wqj$RrA%ZZM@9d#%7Y!GwL&T#vhdXyLXuT~KV$
z0Ascu#@vW>LCHM>4L7I%kE1gWi0OO(__S&_?R%y@Te9`Z+~*|wPLdFkJzI*9P$RTS
zq9n46Yzaw{n){qosE{P75XP1$36&)Io$v2&|LM-X=RD8*{d$33`zO}zEC7?TgWQu5
z?EfU~1Zvj`h$v(h|MeI<i_ABpPlFAKM6g=sF|Cnbd*~VLH+Cd~x!18va2NzW`@nLD
zHQDvJAKj$!43a;mV?A>w3}n4YSM}$xbveree{Eo&_(*R4#BM&|zY0F^^dE4VegxXX
z6jAxQIt=dn9dkxfoOj%u{JKVwzh+OWO>FVYmu5H<tVh=Rox#R+&!BMX6VMtz0`C5`
z24BBd*suK_M(YSjkkM+C#_!|9Cq9AsGv0E8j1nPDY(Q*AWMZ+OHQByUpPEf-!5drN
z<HS4B5Sq3M0_{ek<2PL*(_uS?WHTy{j^q*_*Kp;>v!Ey>TwI}?36lA-oczr=@XFo|
z-9t5Kgs}nX`g(*{yWR&}$FIT-4z?t_ZV3)J(*+^Q$G~)sIhpM3gWjbkAg|14&Vwer
z*7X|4zmErxG4A}?wq!hZO+bq@@)+0TH|PAl7%zL<l5X8AFnviJ&Kg&axwbrpn7_pt
z`%KBqv4b#Px*q+#L%6z+76c7?aOf)|B8=+KnQUMkSLYM?@~xXVorOwt#bXZ5>aKz6
zX4Z8L`M@3bv%{O&_T<DSYq~=0imJ+HG=7jK)PC6w^`3eV9#D_3y>lU@<p>C$1j2&h
zEWcE)4v~-A@%5Af2=&hAZ)!b43zl0FeSgTA#I<o(>_g$sM^o}<ni(|_9N@cRt?6l&
zlT>Rp!~-W4X_uuby~DG4zujmo`s*zAY@Y|IetS4M%X5S;cEz$Md(eMz6u&BS7V8I_
zQ_+lPTv$OhtQ#pJN&~K8=jZ?oKgRY;x*z%2{!V1D*CR|YoPw1#tefwX#O{@Z+ZmOE
zoexh#=DuGb8FiNX%s59s;@+Sx+h2D-e+Sd@nd>Joj~{zxAJnB-keF=bR|PbIuwNV}
z{dAe@`-b)ZG<s!CN=-a>ob^DWHp$jc5Mzye8pIn4U~5D>I)^@ic9z#E+HAxx_A(}^
z1qFQi+Ku?4sscQUd&T=X3(BJvEoV*!y}&<E$nq7J8V=&MIs34E&JAn`6Ea@DA8*Gx
zx6kE$sK4l>xHl(_%@B{{i;ylj%{qgQS1pOqQh|5zT?ckad*P{$8Tmbe&E_&j;e7oR
z@M?SyC+}#GQh^%H?bD1aPAvwTl`kQ~a4q-`(&7$OsF3ie-*Cv+j)eUEh|fl9(!`iN
z-j?NueoW|vb0PiUgxW`JqZ5I1yNO35tVtx9fsub3AZ~6GS}JCvM$t8>+jJQ3?O?Ot
zZ;g<0Vllc1Hgdf^CiI2MKB%$V04ilVBnDq`4?cf|^ZztSg%g`gb^O5Jj|;InuM0lA
z_oY_r_n_3$44zeL634y!(4qYcNFP7Lka8pNh>GLIo9ZyB{67BOVMB@vQ?X^|TRiMh
z1sNSi^vFpy^6X?Yo_HaGwR-ISch;O#uE;@=<r_}q8V#8zZbG|u71n6kq4DH@sPf+p
zjQT4DnMoS+yv_nou{m8-{R}1ZrF=kW62D?`Uz)OYFZ`@}fFb{yFkz1ob*g{K&1z)#
zg_?PA!K4JMOf{jgD-N^+;!sWYnG^KP6fb(P18hqjNy<m&y1$^yda=56*tIL*GF`-7
zlUS36km2yd=sUJJ_oJ0hA7jjE<}{(BVcH`#Vm>Ga4~#dV6PK}_^^&igr-vSut%_uv
zK6`vJQJbb1wn1p#J#LlIl!|s863=74Nw5FPp&?n57M;wKZHZMR3%aez5voaxkK596
zp(dSwy%V*EXp`VeC$Z<3F}g241<NNmkO%qfclL(O_mj-U-W7(lxat@xFrJ3o?yFe$
zpb614>&3mpooM1JeQxGf#;0OD>G7$`kbZw4>bRw$A727o^I3FT$FkgI>6o0n5U0M~
zh5|K(QKGef#qwX;^ytREr0?51_$b|o3Xf~@2`7`l;)@9J<XG4pV@gf_>%z86%yn^N
zHy+Ra3Nvd;A)tK`l)F2^n{Ot>!=;~Ce(k$Vcp?Y`^4YoX+HqK$%Q~-mz+uM%l)GH!
zJfCQAInO<CwX+iW{zyo)**WsJf(Gf@yB?d{CAcm|jqJ@fqkct3v~%|_(A;i8bQn*%
zGw%@uzW)tx`s&aOE9L^Kh~n~UYT>VvC6(TYfbjO6D0Y-%mHIF!exgp4gZ1d6j{>4`
zrU9+$>wp}T;<Ij+L+HyK(AC;>gQhictGK}XR3|~`pgVl_SeBi$sKIAPtcfJLyTBjJ
z5bq~L-r!y+>Rc?Fxj!4F&)#wGNI9fTD1h0i7WBf;2Uu?q%TEq#=PcH7FnxC%6T$n#
z^{oyha`a!cu!?|&8zbQJS7mzG^fzpWcy#{r6Wui)V`}jOKDTQp&g|cpnq@XYV$l=M
zrKJ)-#Lq$tua(ea7!BgiCs3wpK#q^A21%C}Iyu<Fu+$oOy32&xTt5ocKCLkD>LJwc
z)*#A*{UK$~M7*4%PEgEvh%o~|^f^&B-%B6<q>D(T_aU5DmV@Khcw={;bBq}_0m26f
zsS@igaF#-%f7qVR(0qqHyU%#<Z$ycE;wZ@oU(jaR$)+?{$c&5x$$RFfsM3Mlgb~=!
zzXMM`x1lSJf5f$xBD%9_8N55jGCLg?AYjo&Xv+D-iM&p8L%vwi<DU_->ql~vZbo82
zZwh}otREeID+!IBCF8m>LlShT5ATxe4o+W=ajxzA!PNIER=L*kiEfHqqiZ_2?0Jh9
z-sOPDNF$uBX-_7u{RKwSW{~^{l&Kw2qvsB4!`>OTF!^8&9uOOn2gUCp(RB*Pw8>n#
z+iw2b@;i8YtuF07+>FXSL!fF&Dkt}j0YU3YS<8r7aBg@oPQM%uar&9ic*&6VyK2q2
z!zaOWVjf?!QGq&FRKwob7kFDsgLZAN<R@P>L9f!G=sb>j^7c-`wX<*FuYvb5(%~((
zGRK#ENt8JEKnJ>%%>lowfVUMo&}X|cP593427LvPy(NTy9d-_+iw${?U|T*nCKw8|
z^r_Pl0yo!W;);ce^zMu4;NvNvW>e}w;?Fwxmh-sv@>vj>Tm=iSYmgCxRq5o3Pthzt
zow+lNsKh#!bI}>ZDHKJa!2mOmYSr))jh@4mGYL@is!3MV=E)UrVR?bXLD=y5KiFHI
z2~95t!>QEIP=3Y_O89-?nK=}EU)8|5`)gpq@)q39=16>%7IC|k$G!At%+6>v?%5}M
zQntgK8r^#a2BBwg^Xxt}XzMqw^oKSLT`HiH_qgDkMV92-Mv9X?uQDHnJyAQ>%72;6
zc8Pl@iL(c91b5YPh#d44npxIGGVv>ardW&kb3Zt*8|Kt3YY)tSd<msHR&oCS^~H)C
z8R(QZ8w5^1`7lfk1gQ?7k}M*NX0HdEo=Dg_@B(;vUF17873lO^y;#(;irY7+7z>ZQ
zhO)ahRJtHWtQ;AI7Tc3R+Blr^Y~0Mr7npI1FUz4f+lKz9Y)f0_)noCB+n8ID!We}1
zR5D43gZ?gsp*u1$bdMFNsg>|5XIKYhUnDP8Qeydz<Ct#U1aCT3>BC?<lCldyGSEjR
zy;LNNY*wSerq;xLOd=HN+!9OP8Kb~yl$!@REbIE#%5|t@<LlKKAfNh4tQ;q#zdY|_
z+6f_PyeNTC&;{50ClELGDR`)i;;a@A!qG3vFm(L|5ZtomddKHs`7#q4elQI-GOp+G
z%q*PW6pm@JcJ$$~3z&4lg3jKq$o39a5O#+#2kU?HF47;I=%Nb$W5{!;|7Z$DYJd2g
zpf0R++Y4WIO~=$r%fY$%J;>4-Z+!$~J+~BMOUGZFJ4cre<e9%Luh{LS*;zF4i{$>W
z&O`mJt9)X49%}GU;i;}Msc-Z|&(bWgkFubpW~p$Dv7+h*Y0+)7waK^#_C&I-RF?h9
znl5~N77N-<iS)iR_*G`W&LtvD++@c`d-f%Qg-x>D*UGs6sSV@D`*XvpN?}0f52#J;
zfI&Jd!KK*;R{SS~BlEtXce4QFb&rE!=L*i?gA%=8W5cq_RlLZghu`Cwhci5Nh|h%w
zAgMSBcLWx6xQh;T-S+|3dOFf4i#14=dKpaq6b_4j9mW!`Jea)F3<Swhe88DYXzy!I
zwqIcT&^?*V75Wwf7)S9x=Gfe#dmkmmD%=Inx43bo5lvMd3$2UUJZr&2bUU;OtnD00
z9_yX@{wo8!$Bvlcxe}B*c4N_rb}nI)6<xkSo5;6jiDw#@LHbgAl-NuHS=3UjS~r$A
zI`swyo)Qv`gn#f(pYgTtYV&&IPJ(j9G<0k=C7}UVc&n8^KyL>Ry&Jzld+Is}+*}Nb
z7rGg<G8U5`OaWiE`>j|z0S-LrOV-9@z}Av>Y^yQB2H$XWNet%wmuSGivFtAQSf3^Y
zy@o8i2=vWuVtv1tXqur)lzVl->4GgCq@s^{#R=fFuaS3JoWva-Z%<vlZ{T_6Qa|?l
z33DLS!zLCKuoDh|+R2YVIwXegUA7%h|3PdyqlG(9Z^sok45;V3NGw;0W%<Bb*$UYj
zEM93$8+A`X)kYKQIna=?-iKhQEr%v^zwlmj>rl?u$(-N+g2S6e;J3*pB<O)Ncubkk
z8Chq8|9cb8=4mQp<7m+xa+VuO9}M|R_HxIMJi<lhN_48uX<X613cOZafae>YVO>%*
zR%ebyvFR&F+@8gqUfYkz1<A6!mKN}4JFud$QE21rM0c=`fJEU1S~EtP%a$rGwCy6F
z{UHSMr%`U=opkh<>(lC^_u#&^1M$4|lizRp7LWJ;0w3ZB!;o3gXq6SqvP4%QW0Em!
zo@zu+++G5g`I|UQld+nEk|E{V11J}F;0Zhe(re53Kl--hO!5C1DUYB#s0&8zQK5zS
z76i(3xUHLF;F2ZFKxdriiyR|(hfY%}cyyPG4b~!ls~qX%cney6_84yZ*bC{Ox*=!N
zG2FeW8T<9C!18krw8&>M=YBFD{5JfBqHCdC`Ul2p&i2CK{mg^l)66Xr9R)qsgQ#6R
z0LT5&Ac=YjoK-u^)hzm25M;CsbtdPbBI6bcLa*cdxwo<TUoW=2Q$UOJli-pq>l5y;
zmSyFeQ1=-s^u^hB95x{W?Y?Wm%#)>Ha_l^ow-s>{PnCnq8yh;hQIiO&hl~oEhd82?
zW0{m9PAIMBYP6R?`V1wyKfs3g&CCYjyT^RY%x-=>yJufi(W7be9Z5i`E{;)9BmD+C
z5kZ7M*ZEw7?jK)(<$q$B3qBUS)q}COI2ePJ>$$7J>LhvWF6_9>-Z8BipyAt%n#^%o
zN*drH>uQxva3V@OMWiz70VJxYV@pUXXjEMTwV$W?+NL~69R<vF`vn7LBy({G5krr*
z@#9*Lqx-ZV47zCtMT?h<57#`0Q={4#xBeO$<-K72u_lZ$KLT2%mqD1D2u+a73uOIA
z3691VBp%S<^V!a<N|J^#9#VcS%R)R$Nk+3z_i$ejV>S%xgv?Vi*!g=u2p13IeV+8k
zWZgEL8lMD(1%Ghr8^$nk^8srikLus7Nue%7xC+e39!Ai5{=$}2z(9~L??u5?)(tSv
z!q?fkFm+rpW*kl9q@k@`|A*>i>g7cAk1Q2Wb-xb=5$OnX9l_@FHCVg98AK+-xRsYI
zX}~|$lUtPo!E^hOEpDe!HSag{^f$&GXBm5Wbs7p<R&xJbtVly&Hjf_n5EG|p(;Sl@
z@L_l`tZJ@-UuTtx(pv-KG{ymPL|xFL)*lvKOT-svv+<glIZf>H<~>H-luaGK0i`)d
zFq_S^kHn~xHY0aV;PS;SXl^DqcGW}Z|I31;gvCHtZY>r@#9?W20JeK5K*gD@I9&TQ
zepyh9;d8&ksx?>eYIil>EPaofHVR}I<Fec{wI`O*+GJj;D)GO~G9RC=!pI9EQoOMr
zaq%{$nqJyu(e>ddZ}SF|%u9Ut;#|;by@a10mf#b%x3zf3xD&PCWYa*AI!<7@pAHw<
zqybt)J5rH2{JH?agA^Hm;UnuZ{x0Z$`3L$gbfgC!pTnH5nsm`o#w?b;f@WbXuCqCb
zg*H#&-PQ=K6eL5}>c^bLo+-Hcg(11g93OcLU*b34f9Ur_mx^Z2=1ii}xL1Y7R36Kt
zlj{x`P<|cn9Am#d?dzZvZ%Z6HWEl9n1J)KVMe(t7;Qa3?r2b|s&%(<%qr#r-+NVq}
zH5ri1#2X-R7vbt5isYq>F&S}TB(eh;hE6-e%kI2IAIoAG^kF>ykiUka8@ZgrCn*GX
z-b49X2R^L#Bc6~nu({b~zJ2C8_-Cg<s~eqA6!KoY*@MlsqNc;w8e8JGtOR`0_rtnb
z)+F~xD0on1E<c#(G+37z4qU~y#uTvKqec!lT|@cw5<cu`Be+-$2kDL*{5N*D%gX<b
zBVL}x=|hzu%k48P6A#6)^P_Nbgoxf=%yye2^SICrqd>y$Vtp@bQ`fw5cz$soYHj|D
zvhind(fOU=!C2XwDtYKW_6puN_oM1RUZDTIvE01~E4q-Kv1C^m&t};JPBg<9Q%4|P
zS#uI{<*qn}&4#*ln6tC#3s!H~h}}sDF2g49MFY}Mx>JL@7-&ko7AVtZ*1h#vmIKa%
z65vpzG7;^q6*u|4;zBx1siZc6^E#+O>wlV{kL)r`cI-=M{Iw++v<0VbyN3qu&(M|K
z<=cwWP$VA&1Lxnt>2c#&=H)$9RX3nYKQ=G(WL&fGa}Z)7fyRq9ux!nDXrF3A%WezF
z3qJ??>XRLr^4XNs^l!w6EkdHom{&outoN?^6{ImodEq0*g(B}DzW6xAX#eH69%;pw
z!=mt?Oqb4DBqHuF&mnGBp$4YA(MzckDj%_4^v~y9&`1&Qad0WWvsoD=r%pqU>nu3W
za)udM|3UsXU2OhpL8?Yr!wDxRaQS^1%-`4%m!}?F%(p+hiOX>e{pka1YXEE4+hP5s
zZTLE)4%C_t@bZNvGOvycSn_E$7CFU<t@7I-Hc^wzSNy~`CAMQ_TpMIsJ%a4n(VXHo
zmW97?6XiV-{FX-oDxcoMn|TgFBlb)t9<=5i#x%hTbuk{lQ3vbm^3ZAAYfidtCs*$v
zpc)<Ru;)`H{!!{fl>M|JRgLFPsQY1Bk3ES#Uy8a)eQ376JLt2y>&pN6w+0=BV=@&I
z82cC1=f1;v;7BGL`9r4N4UoL@7ne92Qvc&`WaCDd5tlyWpj8}(6&GS5s&F@mD=leJ
zN(H}ycLaf4zrY?6P};{FUJQ5xH}f3n5H0r3d0U9nZk@xn$Zarhr6tL?%fUf|RpGw?
z#>(5#$|=Y8gAlI)P+pP6H)OV=)JLB`dF~Zv%m)mbd>>CedknKbsMG8jGa0+iidJ5D
zh5qL<pni!qtq!CRQ}mlNk?lv1zCHY#IXYCUQXS{b>_dK8nvx(72@Vq8!>5eJK2n~F
zPa2q8#q26n1`4QuzeXN6YM~^oFE^U~TanT_UT&qxxu2+m5ye~K+}(lTGhh$Y{`Z?X
ziBh=ISqI^vh`C2k*w9T+HRx?`#ufAL;^z-ukAmG3WS(0u;|sIv5Nh$B|8Jirk$hdm
zE!ESY^Nr4;Ec!J3bTOjNubzSTR3WT5^c1sLJ}_i&7<d*WKxx`>yi`_#p}MR$lkCKJ
z`}TbGkcB7{u`^uMP(EvS7x>%{#r@2EUDdFRyImq8T}S$Z>?y$&WrtuF<M!=I>O;)U
zZAd~OJ6Dt+;cX69LhimWXsYl6l~x5Rzs&ZuK0i=6N6bmSHi?%+*b&t~aqy1a3qthN
z;gU`n?ztevxs0<LuP&l)!Xq4HuxyyG1vl{9M;tWF41!h~@qxSFFqUBeh%a3Lr^XrB
z^2P^U$K1uO0p`^F%|#Ua%M&kLFCY`@bV*TFJhyY<30NhofUJhM5FKnvazDRdB1J>G
z)h`he9fxo+bqB#T*?~+?-GHUr*-Ui5F0WX}GA5C`(P?56nhvv|PSexiAM<c{^snXV
z8^%Qle=Ck}J`Vx8XZS>?c#wToq5pG#sI3hEGiP=_+HA`mKU4$LM{A?#M>fh|D)0vx
zv$XIG+Znp?_#xAbeqp?tsF4%V%KHlVAN|aGEN<p6D>zWs$yc#nX#<=)r%t10=u->t
zm6#WG8l|^QY0IG<Q2kQMcqVVz`OFAPA{oc=(H%5dd=QGR7;#Q$3_(wsE7xi;R-OBf
zOaJELQ|6=or{X{tIjd8NZ6lYx=MI<2x(|by?^O`%318|Cqo4jem|1WTHdR^D;mlF9
z{;vfJ{0@sZ5(Vlbdk4v$nkbmy!Z)(KOL2n*^)S+h?@c*4KevhxXieZUM&0GFF}G<H
z^JRGS4C3VLZTP8AfBwIByI0<XVaZ?NVT~a%`BcWWFA&ishxDn)%$*w@{SZBlOc8q|
z-;fO}zYVTyqERBeKWdopIt=*T3;*kBi_YeAMI9eG+i!NH$R>gR5t<Dv=joDDk6%JU
zX%A@Jo{0;*;?P4o3aW0NL~-dpFk7?$+rH1@y&E(jcfl#}omB?Yrz%pn<r;M8Ekm+`
zu^_rev%T;~J(w?x;?uV#K+23fs8X=xloqHG5389nyLqj6cx@*t9T(9LI}U+fS0PAW
z?&SOA3rP0b!Q2E>5gF#m@{JQ;$i`J9;Vu7%_>p-=WsjP$cg8=+-aM8+wXX@3<CHOT
zuom?nGlVx;YK`&bswld<21La_WUe(uU}(mEQyXpgd&Q3AXM7=6H2GpkHoHR{)2B-=
z^r0TV<tS+S$(>%ndf#f=+}@C0+*EJ}GTCl3=LFju++2gD_t{-!_Y7Hiq#Nu%q(*$5
z?g3>U!SfAjv?3uMx?PS!SBE1VG+zjMqu+pF)WL$gt7bAbX0`Yx7GucR4EVA)hn-R{
zVq(M%e#A@@T-)y^jH}fn+V4eVz3F0DWb6UMhMs~|J|A#SyA5gc(#0Iz9B4MRA<OI7
z`>vn?`_@|0>)X?D^L+#2HxBTf;0qL`+VP&x<6*w-c3i!Z@fs^qp=6yU6&#w&{~RnL
zn;G-kZ^#qy4oO07N7kkHiNhjOZJABO8mymW0#2Jtd5>&!S;_v(u*|d>QrmN|MROP!
z|9Xj<mGALY&0##`V@bmAFU0rr1VmiFo#mTMsNm@=xEo+fivmZ=?mJk}8Hcs#kykp*
z5r2`>SeFMqPxa^p;{(`Zp^lplJOW9viFit1#++Ikjo+@DlFbE%bU5o$-DpX}sEMcG
z#KA$J#@t@($DW3KzciTUb`MJ)6hckS0vLJz9*&rO5u)~FL(JJjkeaZKISOvb1b5Ro
z8+9jKHlQyxS!M<&{8+Bw^ct>G`jk0t?xDwvI$lsn#S70pL_DB@lgF0hj5J3YZn&Mf
zXS1-&zJ&Ks3k6BQZ%$sR%VlTn;H2U_zR6h#898Tg$Rtg&cFRM!l&nC$|GJ2+e-Wg!
zOL?Kc4}M?GIDsdc_~&f@Ilk{G<`lWZdw)6%fm8m0$7`1HuQp@8YUZ6B!g2NHNx-+5
z6aO<QVsvx`2^$Jj>4!jOT_^PKG$0vTlMue-vb?wsrmsB@$7KZ=VzUSuGFY~=`vJ)N
zy1*?X2Wm0ioHnukzsGW+SoGb3F{Cfz)Yt7O$o|7seRttg)4y;d=3Roe(M@>fodr?c
z$2yyqeQ1~VX6EnIr56}Oa{}uIkNl)T1{nPUsg)jP*R#$G>j^o1JC9+N%GAIAS((B&
zw(A~#Pv&*(H8$=$i)!Il_?fnKxc+b=l*gv?IC(wmIMjk(a0ct)K63N-wc_L%_tB1J
zf+mdIhv}OmL4i33uI%c>Cgx+iN*ze3y%?Lm{pQ1*q@YuD5ekdau=-&tEEr|WSbeJW
z%l2HTe>WBcf*x7W-fB*1Gt1gNdk&u0uJC!LjGa|h3{`I!k8@xb3cNdHf-yT}J5x?#
zt<8RD>imlj-l)>sXBg*shyf9;(V-ItOVNA%GFW{`g=E!;G4XCKFF12dEMeVv|5G=_
zulg#H(6INs+6r~Xo=wFELt5~AcNUhPyvCb@9to3{K-J*moSmOC=+dM3!l(iLyE6D`
zZbj%X4CJ?txdbueC7dMCmNS?lqAB<5;BP-868h>WREE|=Ymy9<ypqvqSPZUQ=|CN7
z&8gqMlW;|AB)E8pL4CFbiH}VIsn0xq)aEMOZmmvd7M#JcZ(Y#d*o^F3RfI#F9$}Bh
zJI2mG#`SL64V^C!!5Xz#^fx>q&gPGzUH=XEh&jcY8vkNBuMW!F38b1{=a(?f@|I*<
zGQuhu1`c;3oA$?{@RT<9MZi3-`)<IMlde#5xecfr%c3}pg)|b6A2Nclu=qEQ(oi94
zlPqwnr~%T9?1<a4=Q!q$CcSQ8P3uk>kY?s*$(;5SF6_7q<EPtUl-nvC>}*N;vF~Bh
z2G;ZX@_{c*rZDVEIPQ%IhZ7D1LGjBsxHr_4RPR0j-x-_iwO$W2O>w}9!9UPrR|?-$
zo50v=EMLkHRA)!3lCGR?UaokH+jZTFZ0&4d%$*zj`c@U%{)w?YW0YvyWG}ABffLu$
zL%f2%D&4?bsws(ILGzFu%`|Su6~lV*y_+iiq?*jS>6M`4&v<Az!ZGn$JU)4<L&W8m
zL3Hr|TqwN3`mc2$Tv*G;FdpK7{jcD{#UHS~Y8{xwPR49MCB7|Cfe&ij#NXDoA^N(e
z#B<*Qe%mD#sws6KUEi2Xx8oV+heh&{T^I0@t~Pm6qCu9qK89mC&1kU7h~<;-z(00g
z?N{8F)YL722G>Pkv$O;n{hz=GsXrDCp3P}2>5ogc=#wCKWq$s|(~QY|jTe-S;p0oA
zz|-B1D%LQ*ow75(_k$wcx+ax*vS#qNKA6#8>(1gN19S4<*|SI^)XBe%Ji3ip3VUqq
zh}Qo)%iKP3AbS?OH@*hlRZ65=y9qo(on^wU99&}AhT<^xol(ofl@Y4Mutl3HoHeK6
z!+XF_k@bUX_CdqkE#M*@%^j~ULAg_uxOR9O_Oz^puy<1IR7%GQVLToWYk{h#OZc2f
zE9O}C;uF_}g8SUBkUzdJNSqRR*YCLy$GUOqjD_cYAPkiTE0J7_b098d@3Hquvf>F>
z@gTc11~V4@$k}?NY>tQ+wEDtsJJxlGdcYU4T&JRb9W09Q22pOi_`$YF82-E;+iNM%
zAT>QYAn6CVZK>tT2gk9US~;4vcHsS{3~VV+#ng&xd?5Q<Y0T_~_L?vV7fe9SDMI3q
zeIKU0XWoce#~2gZk8hZ=39eL}#F@3_P{g<?2Ez3)_l^TuCB2Lv)7alGI|H*AS3{bW
z!)r$AkmhO!@_wfh@vvPcR=rRPdt;KJ=<y)2oXxYdOtT>mUqQ@)PrMS#5MQnokW*9d
zK>6iO5T<q=+UJa5U7}RJv;8fI7G?0FgRjNX>pD=m&;c_(2104cBXI3b#L&srm@R$D
z7rkcQd3gfHIG9lXH}ko~_Y*+y%Y~cfei3hF*^)IUqA}{RDh=%T0H(;ew(pF1L0C9v
zJB57{6La~&ov%^z^eHsV(IoAE{;@2$4USMV$7Q3+@V!|MPEL#fy^cBvS#lj8?=v8h
zdHbPl);Z{PJ`MHZ%xQ371k!93vcZ{UK9+Xy4^P{Z3x^S^-0L`D@glx@<rUCNW)ATq
zb~H<YeXo00fwW7N3cP;ix5cmKgJ!+tPNn<+!TssntWEb(?{*w`eRu#>&w%~zmV-io
z9g#jM=6o9O!;4u(Fn`rUzUIbSh}YeP7SZvTy5;~^bd|u;zl>wS$``U_?Ahs!z@TA^
za9ca`PkA1L;JSP0H{&$MC-_38qA&jFJPd=b9RmM|Q?d(+7IYrF+jbq;%MaUhl=UB7
z`7<>ivBFgg28tC)@Me~wv{k0Bv)<y^{R-4_(^m`&V+;!geX{EFX>c>?#J!^vVEVlZ
zJeRs1ikv$T_8bK@w!i(@a}<Zon2udHC4BZu8~&4qDScI=PF!!?#LW|JNLTPXF7XIs
zYV_*EhlKYiDgP~=dF2DR2Sh`!y%uc~*n{#(a~yPvF(%c&qU`fBOnjD$R`V!4aNdT-
z;=^c?Gy#^fp2XoN*VtVx8KhI@;<6ytv;TJ!Q^)FoeUb|4^!Nta7Hs}9B@%1T++|()
zZunk)2nAXZZj+enb4{-`DQa!!CBd4qcdWDNZG9Pj$OoYG?P-}gV_5afxrs{d2E^9h
znq)~z5HAgc1Ct-(oCA6!(Qgt67BR>DAT54X%Po)y)nxKZ+VF6+DlubRuQ4%27~{VP
z9vqcn&xhHV*Yp_L&Mad+n_&gs9Kla_ElAs{22NysLY8r{nahkRfxV#vVU~|AvG33z
z>DrBu7?R4HWpzN1>q;(flPSG>BOT*ztJ8$smk@U~fqiQTdTMOsuQ)Rg=YIrD-WcJ+
z97po>5959?-;!X5?Wn{lzA))uKbj@WhF^YKWU_4>H&ji9l-)~5@9k5tPq6{f>H8c?
zXIDTR+bw>sHKrc*Lty#r>$uZ73o6sgA$y_<4Erz$Pn~=WMW;geMV)$>dHXrUj5R>F
zwLGeJok!JEzoE7$5e3mNc}1%V5c-Vb1lI9fTi|%!+d>n=rw+v-F|YCLSP?0^BP7}K
zA^iCp6rYb&A>NnH;HHONFlUz@ZNIJ$^A=jsD+@)K9s8PFpT=DAFMJD*U1Yop3F9uj
z{=@qkyoZdxw@~rdNzjlp?vk+~N|&<S6rGCq)4yQg-WX6=Wx(EntuQPy7dBlhgY74b
ziL(#$OS>4*`5y<dU7$w(-oG;;an63e(KZ{lUdjXA3~TcBq9ys+YDdPZMd5npTv;^9
z1w@s9#G=3+?5YsJ&H+=gtyTg*IWwYnDi3}y(xig3#s#ij?KoIwMI5?9F?^&k?fQI+
z^BnaYNB-A_NzQg8?(7B}{kRinsw>gd#reG8k(XG#D-+8{?1wMSQ!x664p}=t2n{|B
z0mXT@!OXl31m_;+=Vdgq|KCga?9wS59Ljo~8OpNi8dJLR+#$49W&F2KDrD)`hiJ3m
zCw$b_B0Zrept8jkr{8eI*;|C9rF$osYo5os9S_jMeJAI6y$=d5%Ed)mT|D{s1Y?Sx
z;vG*_nq3siZB?rSpKtHr;Ajm}5x{)vC)U7p#y|Ibe1p41O^M`1sJOKw8q&tA((5(a
zB=d|iO|iR%_ctpOwKd(mXHgS|r<&4{O*X{R*OX$8AvIYs7!w4n!*(|bglem}>Eq5p
z)o+&TJeCL*Gp3=-;#h9v7$H&1xz76zDF-v&k}h<#C+Ij7YBnE-P+cikxAhawbC#oM
zbP2aepaT`>-0>SJLI2CvoG>txAAg1I(9V>@acw8M>pWv_+&m0}<!WS|0b`>yoq>tF
z-lEr{F39jY#Ia!(RiKHur$U(|&=x4Pj054w7@*S~iEr{RsC@eenv3m-`_wg<tv?%k
zkG=teF|#50*954~{>jM=8gQNd9aPXRX8ocScsEH8#(FE$wuDnK@GbKw-C=XQ(QM`+
z{>-JFGbf^qMp?K=DtMf|DHd&?26<g=;IH+X4|+C$(>kh7lZFUMgWF|vdQrh;DxJdx
z$pR8=oPd&BIUISWNgDqq!GE_6$=Ju`ki&RNZUIgB{_`FDLVG}~(S$M67`yG;HvG%F
znN3@wdAl9U;qp~WmTMRS<%3s4@9q+qaYc_PXlW9UF({tCHW^B*Q^9ZY1IBNSfz!1$
zsIU7F=Z(`LD~B*{XTV`Tw3W@o|AaySW4c7xF;-hk6}I`EgP}9DNaV|xATe!}S($X>
z#+h+!Ze_>U>i-1$M8+^YG#1*QW#OZ@FVRDF2j9!OhE1zpa<;?L(8s0`p0hc0y2)NR
zSCI*NUyW$E@@w!|_er)#JrmzeQKSc>t%>p!6Z93Hg4}C6;77MVE^~_p550Hd`RB8E
zMK%ws*mMZg)PvBn*n&#i+qun^7qD<e7xZ$9)MBG54c+yYAA9}*ToG#mFjtdw&1Vc4
z+`(71_n_qV;{s1rHD1yB03@E=%p0r3;F_(<B<{dn*jH&xxZ94@yLl(LEf-SLhyom3
z7>Q~D_tEu4503X+3P%T;k|AT5hqB}WFYkUUt{Sm|7o^0A3j^k2)pBLJ=+y;CzNSJw
zUZjdkUZ+Fh_ZrqeddUe+M7zn=_VdDQdkA=$0Ij>zptIsLb{Cdm;P+K%XRAQFbDVKW
zQ4ES^zK|VmU>&rnY{tA<%wLZ<hV2pGaRXzVnEiPRADi{5^w2lX{-6%s(juf)uAbOw
z`iAY3+Cg%!KzzSwKVG?Ffv>yrF>R>?PyS?cis%<uld%fS;vYczgp06u{4kIjK9WUd
zjlq*PjGfe$DKkhhM&qfr)TOHqmyKi`9-9+z@Tw}+xyp8#T{B?GVAl1j(F2ER3Fv&u
zmKtqh-c;L9U}MPc+9q<4l=wnkpf6f7S5o1dJm@;I9BO6-FfRT{#?*Pp?X)q%t?UdZ
z*t^>;^VSL!y!}=X>fO%!mwXgUl)T}Jl``m!HK8UkjGeeX7A`zuhpEYORF1Tv`;SZU
zzdnX^fbS=~@2E=`+U!B);YP5dRE{Y^5gq?s6K^)_k)iflBzCwutr7miw5j>%-kym{
z1&pV)|2@wdO0ewd6Wr})PBM;4c#j3!#TK38!Ff|MN+T=r!O;S|`(-iM+Fn7o2TyqW
z;9GcN$PrK-X@gm;=M(QC!U^9Hga7m;lPXlHj|Iy|vN^k8b@-_4`OeT<ZAOm<X^^qk
zPlKJ!TO8MX4RupZ=usUzVz6o(gvt`ZG}3{X1omP1Tyv0|E5V}F2rO?rz`fNvjHzWM
zT%+Gl6r4LJn<vpFaZBnT<3}&&aq%<%r|~r=s>X5-7mFa?VlFs5s6f}BH{jk%B|59c
zjMmz=L(IQOj&y1fmt*Z*;^OCAm{JXMBi`V4JJ^%_q;sfM(wA<VY)=X3khX-Q&~(}f
zODw*_BPDwx%^wH(KT|QZb`exv`Ob?jy%mcizJPGxY<|+jQ<#1u3eU>>()#JS*r0R(
zu;C)g*L~*}>l%{F&6Y&E_oqyHH&SdAq)MfaGi71@n4ffg4Hq<ragatu;?M=G!!`7M
z!GLYM89Q_sL{5vv0n(MY=+JN0uhpdB=K=Cd3;513K)p&8>hO0z2A#7;Pu2e1@Q%Cq
z^P7;QzLfDT1((1kT$}p7&V%zZdy@TrFYJ4$LSnAQa8(|5v?;rT%Zh&r#%XrM<g}c>
z_R*0}TFE-%s>Pr&Ckf=@9R9?TzPPB~2Rshvh>Hv}#BE>A_y@xaK;%>?yH!<z)nR#{
zxU&un^g=<<wLjl^!5{GSe!&<$jQ1bMUMasFIFpEJP+zVJ4(lj1r?7qZKUe<#G+mPL
zA{0%`n|VqsX>g#BC_4Q_--qdNUbO-fyJvxW-%L0@wH6xY38+WrDejqr4e^=Vg}UBs
zj%MdXKkmMX#%yk(-DN?B{VN2M>ry`YtR<08>A_s#9gIoWr+H?KY5bo9eJ3~tGvAm~
z71J*4=8wSahiu3HuTHl81mpNkpM#=3b*vM?98}R}obd7%2${AW>JtoT$?cnXHqerY
z1AhaawWoFodmw6i4|>l(1E~eAT)>fN#@R>cRI;XRPo}})#2!3-*?|;#wnDh&E^G?!
zM2+9?;H0h+c}coZ;_3;K2@0~PAvc&mh-Gk|Jr@t@$z+}96>zhu4V~WX<^wkT<TGB)
zN4?*1FzMPoyzxPgOjhu~cbfM=rk8;}bLAk^)IeX?o2+kfh*#Tv7pq?~KE~YL;*CG<
z;z6MWwVJ9xT@}AW`FSm9yXgfw)wz)Bx*J@|JbCAd522^kopth@xts4<*PU@>)9ZhO
zzrz<9ZB`=skL}4C6p-<in=nIVJ-mDI4BB!dc-_=YRE&DU_}?4Fp83-`8*-6#rT59|
zKYzfP#&Xz9wMZXZbK<1#4`Uv`#A^1gdH3-VL<V=kMC=8@og@79(N5Iwb0-F!w#58f
z*Exf{`6!2f;;K0@oJmG5IxTa8YS#nc8#@Q@O|hg#y@imx-jXi6(+oNVDX`+A1@q{1
zqU6hZUT)HlOKi8q>^3hPG=!bE#R58Lx;{)1zQ?04_oE;ut6<R~Lrn5Y!9+h}E`Q2t
zD41+b)jr06|LcqJI9`Vg>90fO!)Nd#OskQPXU-JWGO#_WN|czBH|S$IuWJ2?`7TsZ
zupywJ?=wRx@J@mex<esisslY-)CM~vcEMbqKE%^~3s=ZE2|<1akh!TI<EI5f*P$2O
z{ML`$FFV%JXnxDMF~{)JoFZ%=r2v_CE@OkvBsj~Q9754rF55kVcbk2iA0M8CwWY&x
zt9bzkHOe@p#!{TH+=966N@JX&5<YcY0=9}As3bNR(?8yW`O*)3OP{S!KE{wbZS0HH
zp&1~mYY@kq=^|`A2&2}RQ|n|Y>a*SPI>vCTN?OJTk}L2^y##mqv01{lX;@v`!rVRi
z7}`?KZ4D{|LG)Oe^huQL)X1BRVPe92q^=duIe834b3XFbx-Vh;q*rKrq8LRQe|QUV
z0`4S+P}jinDro~CZv{cQZkO2T=x2Bxc?Yu6-7#XocT~EnOq`~?=9XR+P<N+C;8wXB
z;^z&5HzCTz^Klcm!m}R)XNZVY(1~BX%D_CwoXl5KM!)b3#)1DLz8$DW3Xc4bf0qJ|
z-_%HzE%RaD_J;u<<T!rycsRYA^-XFdkmWiMKgJr6?QdDWf4vg*htvF>bL<}X^($Au
z%NqBd@Pvu%n^}3c1Z!4$fsJ<$u5x<_C5iR$&*~~Je7GC$zD~ywmlHty)uWdA2$;t1
z_g$a2;^FJ1keV?FJhu7BT$smQm1S|1rdUwLaUa3(Za?aAX#qUa)}wPn?xV)N+n5)(
z4L`p%rSo-adCp3mSoWxpVGW%yqIM{pSgD7d{)n=@Wl;X~GN{De#Rf-~57gTQ9y2`o
z{$niZ`4h~mal=#g&(VU`>lE>tS9D1To1;2)p5`WJu^iOI6h3j>6uA0cf~_y!!<A`|
zP|`XJCH;TO49r9{zFR~Kw<f~Avl>M1c#zLF41-_ejcIi_yVLuN3T7uCMSrtRFn&Y_
z&WFz^ndvAi)6%1AKd<2z0}4f69^(3QncTa;z03{Nggsw2LE=RPaIa-qx8KpM+n0ml
zhK~?@(2ywAJ;y4qbNI7YhaCR<5QYxdB7-*VgyJEl<d^?9-1gU!$SllgmF98YKR%r6
zWxRou;FlO4R15w_L!kb1A$PNwu|COEnPQAD+c$c1D(2?ogyDRo2Xx5JgJx6^VJx;%
zjl|0GcF-?UB?q3V5GS9nm?^#vhZq}g<O@}jxo97>8*c*dMkA=7(a1Ru>IRoXv-nUQ
z)~$H-3`;{lf@g~lL}{ASakeI8AIqYON`CVbCNnnHrCIz*OHEp_v=1$}XyBXT^caIO
zmKQ{~$*dd;V4`~hb{)IJU)gvLGIUe8VUIpSY=bdLX5GlN{pw_L@<*<N_4Dn9HDlet
zI5g4y!38QFgI6&kDzlNmh{cNF?(q^%UCD#8QFc^KF$OfuPh->320nGMF-ZLOjjBET
z86Dl!saeJ?usf<mrQd#X^;3t!{g%hrsr3aGd=e0Y$`gQ#f<WJ;59u9$mU%CPyv#fW
zLI$wEq4_~s<yuxB{luIy-}j+wCi6oDorlKKHMpeCn79NBc^AoTltioW&$7&jIh$=*
zKUX7ek}wb~{UIy8mJdSp6}<HQ5I%Dx>(f4a3BsZn&RaE)d84Pp$~*_sT4qk2wV%O!
z_IzCo)g|YzD3T`q3a)7W99d@YPi(92%lCL3#7^f}EQG^ob*={#)jmOVR2uf&TmvNz
z%q8;uJ8Yc5a-yOjTw7d(W6#H+AnCrWZIT9lE#@(@_XTupWiEQ-oh%=zPbbC;Xi4ic
za1T8TRZ}$Rx)GKnvBQG%D(-=*L$7#2z;f~LF<Ru-V+qb5yb8i-JjOSeu<U@4{(f1A
z(Ob2N@*}oq42gktx+>&~@iB<hMi?M>z`ZZ~z|r&XvF6$tcsc>`?IlIB?D;9??#|*8
z2Q&VW`Cnd?bW6PVj3LfU`UP{As}MiI0a*C77qgeW=arPB@%<)qmSb2c7EGHc4qvkZ
zQ`Nfot?CuH?_e{=<{!o&|K)rv)gU<fI*M9FoRR(k=wopam(MtW8<bRu*1VT^D!&Q^
zo&&`;mF*}Dp3O}>l!<o9eZbG54;>iz9n~8IWL|g`&Tm<XdIgJdOqU9^I>~bJY2)~y
ziG8WRrWhZw{rl#=?A|regsl4S9i)8~u=`32{@i6nnw%0i;Uf;t7RTXgi6PCn+{9}y
zQy|^9s-aVs0P=$m`IEzK$uXfKxp3qv>=7H$(7gLx#`&B4pnqyqFnwQv$m0z*>K%sI
zPg*ogu?s)?_aW8KKY$Zw2wj&IXr(gag#?;YmjV;0VccfFon<IraZxNSJcyOPhtY7$
zUEFT3#+(ov`2<S}EiVY<gg=Cj$hhf>shBtrxVZG|AYJ-LX6(p(-PSKhP2H|Sdy4ns
zq6OL@+GdE=5&BfmvkOFrcY`4Jk60@6f#DYRq`ub|rlztThrs1V;c3?G%qfwD>=5B)
zGi_S*_zjj{9RYi$JCLZ#W(e~!q}j9WIO$?59RJx5da4KG#kKmxy4sYqHjG4>+EH-R
zbp#*Or#82`LAqOotM)mEQm0t4fZtRw*}@6muMiOb`=8JR`{K-#JmdwZW5(a9P*-hC
z&h?uB)$JcK_@@!s{a1-f0t&=hN1ebUEt_Sa8?fnf8@J}911XA_h$Uf7P%R#STHzj0
z^es;2(K=KX7Rh)xBm#$zWoMVOtdrBoT-a=O|HXS6&Ti78RV&Y7`O8#pO0<Bu_y3Ag
zaW&t#t_wyVv>-Rn9!8rLMIcB?aMRqZNnJPIhY|6{cqgVGIrW}-J95)OXLuuuS}vl;
z;IDX<&2Ig~{pcE3E0W{ifs_C0)1V#W;LDXhG{f5p`xuqs?(51V>x=_kb>|7Bj<4po
zDz(Gsv0w3jd@|vQdcJhwDBQThjvBl^gUUB8slG!l;y@F47nTij5-ZN0*q@7Y&j(2i
zh^vqP#5G@yNZ6kg_>biug5EPe8QTYn)|}<4Li%$8(SLkqQZCDVpW(H;S@&#ay(~Ps
z89bj3#8bJi!C_7vd<jhjX|)hH550!Mi&l76VoT~bEQCjsKBB<9iSu)Q3zKH$A$?;)
zr_EL+zyH~gEGsoC4_e3%Kws8jP$7wn-ME^=y6CJ?gc5C2vGmDu+)<H@Dqjll!Re#y
z`&x<@!dO>j>oDH`i-`Xer9gh%+6MxmHEN$ML(?N!nE$ze^K8A#E52KeY9;NQo$qe&
zoT(48D=#7cS`4pJVMaBUv_tRAEr?a_AiX@8b5CgmCQHSJKABLVqXy3mD=~ZZV?K16
z8BVMZ!Rs-x`1;K)NHA}LaMq=0xfu_Yy-^r2tOJh~8j-O@ui$fk#@ue|K(pR?sCiA9
zY<E+jy<yB9%<g%3M;F%FDU!$kBp5K@JZE;X0NnOOfya?lapJ-0T!SDK%1sRszO85T
z+zf8T&|-WW@c{qbO++1spCI=-fYqH}KwI?zsvVQzq!xR!J}?8`>YI|+NsZty|H&tJ
zMMKr=GS0g=2?U}cu;_*f9N+y6671Un*GXWyuoLA=6L7+;%cv<eB~B-*uyD^gRNYqy
z{eD@Kf!2=Xg2pzKtIy+Iqgr5+^-=WYwqW(tG5CI;kUTi~6DF~_=+1c?z-6-#gdOLR
zJU@nu8yK(MdxAL4%#eiS%Q3U^D;yr(h>eVI6K}c(e*V*-fy1NW!migCe=-E3-`S9$
zJu18&{~V6IGa;2DQ{nGnwo}n4VT{Ul_O0v7kH0IV-_>I=^xb0)(lcQ1N+sB``YpQ2
z5AYAJu0g56D!y<idyco=<nbs6{*xZ^TkNB7B=Z<WPCJC~+ZFMz9ZdZphrSczv5|Ev
zkBD`N6Z0R1Y+nlf0*_<XV-p&<_aXRl&Cs4LK*`0wvf0DU$&#5`)Pn7foY%>rDquVB
zd+Y_cZn2=}lkCX(yDB8i`~m1QR@#C6k$BL9^-B!L!Ni1Cz&S3Eeba^$RcdjO?(bmW
zM0>KI+{1TV83=R!1IY^=E<Ggz%>|}(@~8y3tZPQg@124L8?A^St6et8BOV5+=i=d0
ztV<V(VxMT{9S!c!CzeL=BF)9DLoeiB<uYc#Ob7bQyAPdze+w3_jslf;Ce-EVWiEfz
z5jZvdFQiz0hg=PPtY-WhPuA%YoG%~M{8`SP)$RCWXFuA|pif;&YB`rhzqqv$#$pLZ
z*1;UitAiq`>SNBwOml*i(YIk|fd~U;{^BGPW^ihz*_=jqAR5h9qaG;>_}t`JNWbTR
zC$ds-=>uib^z{k$8K_JgW+sE<%A**w?K&5__6NU7;SOZ)AI+CPS%E8FWI#p4D;%>t
z6Ls3!nNuT8CK{I{yBgM)2w$qw`>|GJ=z9gC6)U8pOP*lLfJ-nWTZ075@`sX{U3g>%
z%bs<*gGFHkIvMZ7(2VCOxLe6ho1{ysSRU5;mIcY~w!-)*7gW2P$ji?P(KF=&y!1PO
zu4l_YsndqE9Oy%b*|eeTWE8A8pijNN3hCTi#$@q#HLC8HkAeYf(BQc?`{qw7sD8m*
zyE|8N^1`in_j(gf=OaPbxs$uFsSTc5^d%ip`c%hZ7Q-D|lRm3iZa}XKMH?JhRbGik
z+p?WomIZw+yNnWr(NOGVL5jXMix-Vc!sCAs948pl-s2Pmb?dRzVh?Vdd<sS1lKCPd
ziP#C3Kre4Vt?wtH#9o<Nj9Uv4AC$>+=GGl{mi5C8BOq}`0LTvkJR0Lb(^fH;+4);o
z`{NTX6<87rsScG4_Tn;_lOp5%W4^1dmfLzjNQ(yCLDd0m;L}qMp~Dtn>RWZ#821>f
z%?|+&H?XMSF5h)HAJ;Gz^!6wXQp@%zqjZhw)`cA`V|@me2L3MxSyOO+zcCR$SPz21
z_OR*BO>nHTB$E>~;Zd_asbd+x+5^Q9)U8Vctd4UT1z+*iW(5)ywS*tP+yw<8@7-LS
zqQKixk?Ke6#gx~d&@-LqGioP8Mql=hI#1v<V??%jh4X^&J8nIGCXkVNku!-s&y79O
z3J<=|!4XYsnV)_Ec05rgf>%5F5h1#?sJRc6`<&-1I4K;;HYRF1v$5sZS}6Y)1+obO
zTC-~g`kM~L56oleZ)?Ww^^1g@rxtXlH{;xuP6laI7yo9GC7I!H6a^o#z&|KUToU>l
zeje*f#|&{GAAMO5R4GyXZ<K(#rCjDaqwm7>PgXFF<w-MaKl2N}vh&3)6M8ct50@&K
z&_ta9{Fm{)V8WiS-u1aqr29l15HcI)Pk747-|gVejNFgEj8&MgT}Zw3-a>wTHy1Fp
zf%B3#!?k!vB5XAUo4<q6G5jVD8qIj=tmfTziSa*m{a|vIl=l^7pbpF23l{tzMQ0jU
zW7oyu(>!XP$J3llA*pk(P3E4E5He?YJeiVAnNlQC5>g^bDoK(gb?&w6$drUkNs=T=
zlOjnP-u-^?(eQIl_r3R8|NnJ;Me{ama_;kEu=MPJ?WLA*iH2j$)&{;QsgpZ0ll8ck
z=;DJ(4=~+CK-EsZgb3dx-hZ|pea2=kXQv`+a3?v3hgINv)QC&kBgPdeJV-AwrH+@R
z_^3@wVf#@Tj9nCpf{D$pM!hAd5Xv%L%+dJLR)aZ%6huqQlQ8J)7p`i04vtP^8Py}~
zJk56XKCeRIx`irnvsM>-dws<k+gQkP;yB5yXnwjvE*fmhfECF{VQb29{QQ?kPv+dw
zYiNO-E@g1rx?Qx<Effd#OCY88I^GJ9A=4_JqWel?ke_k|;4=V5Tmk-wCb3woL=#JU
zFlPB02$@mDuNZs}q4_=NQWOdEJlGwzXsxTW)_(Ngd!G4W62b7A9+B)c;;h)5zuN6R
z)(@J{7*Qzh`>BFHA*Qrxi#BEqoZ;N=UFIgdVSBGp7F>Yf7QfJq_3pce5X<06s3`N2
z8~faxjBIE5(bZpZO^P&aWPJIG;(z&JPfX}Y>tHm(YzT~5ifexfXsY7}+;kgIc5o?p
zJzzPh=@vBOfdY+<c7h#2M{(}Rt7vUwf~UTz(KFF<#BJ1Y(c<COEbF-qSBxD($Natz
z!#)h)pRvqcJ?9rjw^-2aCC)e+l}Y=ZM|eyj7T&E7!zUHSq*?DK82wWR2R^G3gY}y9
z)0!-__H%$ucea7X);;i0R+;L|R)C-jJ$%F?OS&i{4d<z4;?9ikkT?7yj;ZX2?TySM
zx8DZ3*Vf=emd*1rddo?k-R8c}D8z~L6zKb0Tjsay<9^*&BW78pFnMti49qa0Cw`x0
z{M;IrE3hnFzfp<g<OpFS>mF@n&#5Vyip0VF3+QLdknJKPxY3@A(=ScLjy<-VXXF{2
zG2f2(4?B;dHT$rXTYv$L3%DFNOa9@<8rV(j$(kN1Qj%muwOL=cX?g;LWUk~?m#9&h
z_gd6>mNgBFlcOOUrtnh__hC$@24_F>4fm*Cm8?6&7;g$)qD`5GRQTj4AEe#FUzHY;
z)y%2mF)N47Na9eRdAw_iq*3rewa~j&ot|pS!_-s-NNzmG$KGSZ<WnB-HRm{%D&_FC
z_K_gf8IIFuEkpOFIMAPKM7$!L!R6vOJa@JR8$>J<JSrdL3mT!RMTxpNq_Ld(DRKCN
zyQub~A45vmJ!Z8U1iX01*DQ&Eo$Z!1*x@T&`=ClsefWieTpqaO1VfT{0sMFv&HC0h
z<mprP>!KlvJz`9=k1J6(`P*XQ8eSa!p$JL(Ahu=bQQM0`ny^TlbouClv`z>lhAF|?
zorAb;`fT)ka}C!uod@ykpD<6zW{{hbS=Mq(;Sxz71T+Igg&NQs%b8n4r-)zjXDgna
znh1fHUO{EX9nAT#3{;|JNkpRprjA+-WRogAx4IRUjdFsFM^iw}^&Ui2=kb}tenE|7
zGjxdDd0qc!IH%j33ZJ`iF4i8fOrU@cdj~=Kqyq+wI>dz}juv?vBx6?mWFY^rTz}UK
z94I~v;d{rUSHovsnB~ZCX)+{kZrZ$PECqb-58(<1bmen)-rBCqFT1@4%xov2<IZ$E
zyW>4RHyy+)0XMK#iFM}!?t|c2vv`Ah4hC!L(a?%BXcQKPDgC#>k?kD8wjUd|o54Qa
z(@>v0fN3M;=;oph^!axsoIRZe6)MrVF^J_Kn8Rs*iWQMB9>l4U`$2z+6wMI4=Dd<s
zxsuu6P<i!5=rF#+#e|RHkG&W~jk|}SL#l}@yTRt8!I_-;dY0o1UkV-?JXkkwfQyR`
z;qZ|xH~i$S*h`&tw>3|K_i8ix^>`@U@jZ!C{Wig`DaNE+XE7#?UxzU%HC*!~mJOO4
z#bLoL=9s=97Fy}j<ihjpDr1grrn^PSx-I;?xX-XZXb5pmJqiXJPjZhLclo57KIxtq
z!~Cprh3<2&b3gKnVcl%D4;~>+BA=<z!0)fvZ1DzENK~=jKNB_D!Z^``kKh(Rf}fnM
zL+N5&>il{Dni)@9!yy3-&Y$Nlf5^iLR|2to{~*@K9E5KyFIUIj%L`^G6FVPkV)nEL
zPo+M``LFb;<m?^@I--nC(%GE*%23W=&1ul}5R$MLS+IA$Jux}G6jpg9;j0UVG=ly4
zlTT$r#kVw&)mQ;J_d>V>8oKm*#9Itm2%??E>@4xJ7<*>#!kE^JeDw8Fh<GT6`mFO;
zE~gGIk;%Mdx`RlWJAgBGF~6FOKCy0c0TFY)<#|s*wVWDQ=k*l==1hk2<sndEti&ax
z+0a>+WFYnZa6Gsr7eqIk!FSmmvEuP0KAYv0Yf?AC*d`%)BGDx#kuR_!;1QQ@+zidF
zZSbn%3PyjEqGcw`E9P+<m))1e7?=aQ<A0)>?sm*5(?z#U(cH%1Q*4$i#U0EK1Ghd!
zjQf9oG1gf8@OL;)c^`%OeSzqa6#}|P^hr!mA{Tm9K*kt;MK8N;T<P8AIP!G|yw-6?
zfplcyuoGoiuxJ>(VRxC5mTFAu|AS}FE0gW*0*IMb$H~TrLGR6D=$&u@PA@FSpwSn3
zq53X(ew6Wb80$<l^CvvaRHP60_hIYHO!R(xo^`p_L%=gPC|DBC)tQarf+r2dNhhQT
zYAD03x3jV7PaXejj3W6<R46~nnq1pzK|>0|A)>#L8<TSfmao(y^M2Jp@P<P08u-TF
z8ZaPTKlb3+SZOE?9tkmtPF%PY4}wciT(=HN@E>v%)z_-hP$MaFE!K?S<S!T)mjst0
zS&zIW7K$^h$j+QBnCytq&hB1YT?Wuel!N!iv)<%jHuyST<G-=~-?nPz%2c`l4)Ogs
z{8=3CelAOOTlA^*&W#|v=gplJ|KN6+$dkHX0g$ArMjdTqAfvE{n>F$(ct6plm6;DA
z@AE>GS!+sX&SYLRw_=zx5P}8E4|9B*2Kj9K1FNNe!lsv&wAjLkq+2P`y5uRaUiB*e
z_eqW#e6XP=RrBz|7c;Ux*or1AXNd%s+eDLQX%e9+`@AsL)yIitET=2t_#Q=K<9G_=
z4o|>k!9&pZZH#E%x^2i8f5n|MzhlU(jbiRh92!7Bx6qc|6P1>O`&$Jl(oaXDZFfP`
zk`EfwhH(wb6_`?W3EXV0*lbN28nz9`nlJ4LR}3+3@Ef%JlHe(KHsk+jMj~&dBYUii
zW0rp9|FZeG;?+2)?3sfv2il<bpU)_k?uO0J!%>je<PtMGnJZ&CtsBe<tMSc%k9u?$
z8@6r&sf0m{aXimQ4jI5Pt+!y$Vnh0}L5KeRYC#_KR-^rc6wY~3A!J=Pgbx2SSoBkd
zfQ1J(hlYZd)?Y|VQlV4kn2`nBt%#s^JL)Vv17{YS5yMzHIuQC99%sD7C&xcwkpk;_
z{TBuW>op;2^bWkgf$>k%6S@2O-q2%l0NsYo5H;5%LF>A!I8ToKo0_IUjFU1)cRa?N
zb7x`HRhHFNO%PR|D`Ndod(5|JL}`}ytWaTIYPn}%F`TgxHZZs4+bv>^1&%Q1ts<!p
zDu64`wde-@Yw)f2CYDZ4<zjM_d4b7OagqEhNc9PY;ac|K8!8cdACSh<WeecZ1S4|k
zm?53m-^(&w0;+iaJtw(u1{?gp;jOH46wVyS4Ypa+j%t1G(cxF<n<&Hbor|DDIg5Mi
ztWVo*E`#cQd#GA|1k_beU@vyS^AqfD8!!w@Ta=k^S(-Cwc*H4NJ%FD3Tk)p6kW?9O
zfmMM<q;mXS#zs-36Fz@tH}q^A_C<$;nli3V=u;S2b)PxQhJeGHY4~E6GBK9^iW@4n
zVrtell>AN?Yuu0Eq7!SOW!Gy|o!o<7M3Ke>>O;wL9=uVGs_rbqYJ;!fw$hKEU~>$g
z?>&MGe+@!MzBHdBKF3M3E~%(L7zg&2fOW<cn9Sx4{}^Urz^bd<G;e+4u4RF(a)}_L
zY(jL;S&##D0@BKOT!LO((F3879LP7Jk#iz2W@9pM^!o`eyJJD$+)yBaqDLTIlm*@`
zfnZQ#Of^c1c^fwyvYD~Pva&j{Y;_n~thFPXD$iin+ldgfYy<4eP@q$<&IBRoQAx2F
z<rEXKQbUB@Hb&GXSwK^+1mM@-p(wlgI8<!c<<3UM^I?+{ApVUhZN2#p^`=yVByo>e
z5Xkzyl?9v^zm~hJYe;%5KBB@bMY76IpRrU8=?z&MVwPQq5ZHq43I$*)#kg8*m*qW7
z5%!A=NvTUe)&(YtF7@cpyoF03?};sh9o~h)DO))YPsaMVTFozcwgk^-{>H@lLul#$
zJDj!sTv)mu-Tu-Gg5C}C(jM*Lb}a$>lCyB$^k#IJF&AfY>Li-IUnGJ+9R5p(Zj!4(
zF9qhEY}$c?%{5qego@5Jd<6TGj$mtUN6mLjQEelAIy&?moAETeI<{4F<x@hyjxo(-
z7BWuPkr^mEY$^(`oCn`Ej8n6v9nK{eFs4cjv<77Ze_EDs>rSHHr#`UvDZwF|O^Co;
z=vuu`ow{QK@BYaR9`3mVRU^J}6J&otaC|S+U9^M4g+{bVK9}>}J_-oSP{hT>z{+wn
za_zW0k?i@0y&tlncb){&d~Hd{Y<qs(vR-uCr6=mR{FoE0b}#%IbPGKC?*qTrfH>(6
z#l(GT)XlU}bZk@}$cNs8vjrM#eleTdUJwh88uOs6e=NR!Z-&WMqab88h)cq6fn^oj
zef;Vy%rBR~cqI!eIeAEIV|O0irs(4jcK1}WR3z$*(;v6=C(LELEs>8Qoz=paXtH_O
z;hfLg4*moO5rs}ycD}Q;!`K>v)-4p>Lfs)kX$EuC&*Xob*pi%$-*|6bDdwx2&s*Ed
zgJaBU`15KAnMIx9Z}bIxAa@<-4>u+zNd(TdU4i4f1w>LgL|mVB7W}rPu^jnLjEsJY
zm3^J)c6>FraG(lG<mD-Pn}J#Gdki1nh!t<Y^JO{j*uEhWU!M2Ea|M0q>tDlp%KwGR
zf{&1TV=FGQjlzKT0nRaD9k=kdBK7)~2|e?@z*CuZMs}azt1isPlaFM{@MZz{9M=U8
zD}>R@6zRqztP}O~F5lQ@#@KrLM9{I&C4EjlIQF&j(SwuFcf<<Lcf=CDcdRD;`P6_I
zwNUu^HxYH}glI4Dg^?dKAUtV5TCbl9USp9DI2i-B>sf9@oAEKd-JsI-8~FeC7-Zji
zfq~RPzBE*omO0hI+A=%(^+_%CNhiTOV{@`=7vmQfr?>{z`~dILGR&V;1a(K``KD>3
za5rO?_l%g0;a6|Lqq!9*95bD(Td4@G0UXqYdtgD47ql!_CysYpIpN3WqWG^iq+(|Z
z7khsZ&ic6)d>%<b)JP##J<sDS)L92V{2iMEWTV@ok=&!`Ts*oe3_BVZa|!2TaYV5N
z%^Il1jG|`FpL_#+a~bsDHCmVb3mdI$$R77!VAp6${N6|L24Al8y>^~hA@_$f7Hg9j
z)g(S6Fc;<RpMZ*t3Z1`3nmB$h;_}@KAlvjKNEkm{tXza@Gj4&+86zS#OoE<_3FxMs
zE$WM$1m%%Y=to0gx5$oEJlw+hzQ4%%Y|utQ!*W;WwdY}1xFY$=MMH8&DmbyHT=aTh
zT<01C@$aRnqVsWFTWW+MBf_|h^G%pF_zf@n7?H)yLm+xq0`8BG;BY4y2x&Xb1w}OB
z;ot1}C8Y}X!>>Tqtzu44bI$c;`W@)MEKB!X^ud6=X}qBG0$;uGF1RI1i9>%X6G7WT
z*ZsCEe?4gky)~Pi&pxF=h<bTp)szRE$BJakX))mzcW1Gkb9BMgU?r-(!j9@Zaf8pV
zY{=PyB(8a$CQXP`Ccz(0q2Rq$;j4Kpe|4yze-*;|&WCT~WX3A;Tv`vdQ(i&FuA^L^
zc`0+ADAFj$B_Q3(gMC^7|10Y;-W+a0xmK2YwTTjiOrDMVf^@MXMIVmkilHe~md$1d
zxM8#HNcr^5@auFTx~!T3qNEy#Q#Pj=gQ>jx4I}FC>;|kJRtx?IzkrH`I=Q;Sn2x=a
zfWET&u#9;OVpWnr=x~ZFPacm0XDR0VyMptpc7(aJ>`91azo_&sV>~^%0)9_(d3Uc*
zoE+<5yFBgSHh8@N;eldtME?`awL1$TvF9)(?uK~TaT#=1`@<cKYC!*CsTf(UL*ygv
zXz4j=bT(mb6JrY$F3RWpZ?r&?lQTBu>Y?>6Pk6zcZ5mXAQz}!S9lJ9j_}M2^V@!)?
zZPtTYqzi(^8gWMJJ5K0-lJ^RJ%ysuLw)sL0PW(iLE@^lVTlzj?!v=Rq^S33dC*;70
zyeO6rd&7H<E5hWpvHV)^ALx78oC`29<WK38W9KkKI^~fGjk;hA@z-u)@ppL={`nd_
z-z-n1UBAL*uXotJ>H_Ss=*O&wMR+&|8LwQH8hkcqIk|RG#qr;m%HBC0^9{JGyb19d
zXTzQP%iNCPX5gC~C0g;~93C3~7^S%k@V(qBt~+T16N);aLUlB^VMskFX20Rx15)wu
zR%Pn@s)<(|*~14nTmlJmku;RsvL2QilqN8bPskOhcz=~2*ies4N*h_PmKTfF--6bd
zuW<6VH4XK!B#D^^;Km1C^0@yDwjZd4h1Lx)bxjxkU1LSwRhyEH88S3*I7M!!5>a1f
zOCP4}LdZG<&+h$V-OMhOcu#{1=JG_L<r_9vr9xMa9JYtFf%Dbh;3?e(uG<(-dQ=0v
zh`Nb&AJZW(-xP$)zGLGxUHb4_0aghA0YTw%(WZfaVGo;!3QNy$Vdg$iA-4(*#;ym!
z&EeekiXpUM>LB0Iwif-(S|ChQhYtN%f+IKdp#FSIYPd|9wEQ!OB|=4dS>Bked7?~Q
zCZ6Hc<z>h_XKV83%x6?<{0<>QZgQoMPGiQJD)be;LW5H*e;=LC|6%*5UO#ty)L)FT
z&l+%_lOvm1Y0_-HYADdugXTV#j~jEKP=0zV_U@0v*TJUP{vrod15d-PgbO%Nd<*VG
zNpMg0FI-{AL-WbEFsx09Bz;K0x=G8yGg^r*9W*B~t-*ZG@>ssW#|izHYtWi=VGxru
zm8<k*JC3b)VV(=?)ogC$-RoFpcFlD@z@E*gcXYwQ{4$78EaGez)!@YgH*t5VCUKwq
zgZ~j;1NI{q!)om&&`PtVe>bvzoBSv7=BLU;ZQEV^uBl2ksXHU2?11t$VOUY7%{^xv
z_!UX((073x6<XfpDn@4UJq>c$ky*$8lXDk!jyhv9>ys8vyM-gq4}$Q<Urc&_0LP4A
zy|;ZsAbkCKFu3OhF4q!4<M??l%vGIcMR2(Lv^4qQ^a7v!uqJvF!Z7{96U;K!#PV<Y
z&@x~`vn)r0=x8q}j?d;6Ug&_W(~~jzoP_UNeibUbhVa?a>DcEu2Vx5q;I*GT-QXbs
z6RTIuDf&(<MWb=*c}*JF`2f-<H^Ald8g!+x7I9=;GQG>cA;)+V6y`m~)wQj7O-n$s
zLPD9(Lz(W#SD+G17bnXc<&&!(;ObR6H1zaYG~Z=J9MpWUcsbkqER-QF_AJBpM1`0f
zS3ytnd~Ch1fVMkBFuFJ&v)n)6vcm@OyGDT+Y(I+E_t+A@C1?0VmQ^XKT8u$1yCKD_
z0ODr&W6Ia(kX$l~ab!dJ5Vr!6_Txy#Gf3x@M}Gsi`}4&bhlXPP+J3m<Z9<X;yU})A
z5Xwj11V^80{`9|BajKmcOjY&+?^l-e`YK%#c3zuGhB}GD64Oy&chGgo8ic=nW@OB5
z2|RoLn(b?|#kA9w2voG7vX6DULYLvRk*cIObO3U4SMg^j+R!TDBi_rcmXqWf@sC-a
zDe6`bRGy3j$%}e?{yrbg7T*T9tQF#%vo<u<xfRb&kVE)588(gb1BvJ}4jraOrfpND
z$EGq*$4@rjlWyTR-c%$X56ICy#X@?@vI4$NbVj}1O>jxqma2{1i(kI!6PNeQQDm-9
ze?HNp@^cS@XP7xnp1p+|lFiulOOK0t8ouH5)C(|rh>+~5>&9i-lOgKpCLFitIEJb*
zmtt@c*VP{b<38(=V7Ct_F^v_q=Y7DyX&vD6<6n4CEg;o3A0W?q0vKzY$MnnxP~|s(
z*4J!l@$>tbIk*?KZXX1X!?{rEj(m!j6}A0Qi6y%q<E*c%pj24QZxdJ&r<Zq8T73mp
z=&A7^582T$X(d$Ei@}4zpFvT&04++7;HfD^Kn^pWo>w1O=&F;2<t!szeULlzuPWJI
zz818)qi}k1GIM3T#;W4y90O-yw^9xW+BS-7uA5=b5?h+Tz6zF&F@-wkyL`9aF&OW5
z6?MD*V#B2>4EImRnMrEIJ&p45Z3eX6Y#Yn4REY%pRSWIT+K|uPGQ?n+AG9a2j>3~|
ze46iTRC;Sb-83JdxHApvTrBwZ_dNLfjl&tt+YuEZppVV8$;X32G9czbfpvGBwSPgM
zzcx%TtbnNt4A5uebj*9ie6rS2qTi#mso?9^!aA7@(WF?$(SQB~w?4?g%4=PaGiEJ!
zLHi-DdS8ob-w|HQU4X)^I>c@NRW8fjm(7CYs8O9fJ=>hlAJMlY#U0v2HfS@9IHFDD
zZ?!<rS~rxMtxKN2X4!`wTd>S88pgjfB&R&TgJ6N5I3?H~52qNA=U_<3+<lF9+hfpa
z^cSr9bBIq~&aoW2Ca#Ws4SKoNm^W+)E|Z_Yc2uLWk1-M~Gj4+YoJZK#8;EOLBXD4}
z1ky)QaD;69RijLMUo$Sk@v~s-ngS0$H-gWmJihK`FQ>5h3hwE91m|=5@z;(nw3eho
z%I3eIkgG$k4zr=*R-eGX=_|C(vw(F|r(^e*N^pxR;=Ww5rjl1<;CQAwv43EK0SCj`
zv-%=GOWptvE;|M9c*b6LJ%M)$&8e;PHE4YQ3Ac{UK(Ee3_~COM<}ioP2w%pX`&lNg
zsDH_r-x=JrTV`bF*=AH;`3St$T*L7hswBK~06lZRLB+Z@-hJhBKD(t6B%{}heZz<H
zdPQ0^)>4W&%PxbLx(Oe2qMDy<_7Z&Fr$O+)uko~x5;3kaB5s3OP}T94^Y_rB!!PfF
zy;g&`<BtV(8l{BmFS7TUL<=MhO&Ggz8+>8?qt5+mwDo!uHn8wog}F9VNM~V=nG2^p
zsTUKs#ew5#HBKm##kot@bEd49C%c*P1V1E!eApdOu2rYDRWCtx%MrlTKpdIWf^IJQ
z+%E~6A-A0aOwa=hC2gV|qD?iP2C$BgGgM2|X>7r4m^Gf|3fMcUVs}4xm$}*FZZHnh
zEEU?zxU#c{nUiM)CE)UkdBvvXi=r*G>E(tWm?p1HHeNL)HSVo=)J2<2U-S~HvT8Y(
zxNgjLE5Lw;h4^}E1{SewpN&9;<}PP`*Wf--40y%q$aUhBF_9=gsucqKbiu8C9`C4L
z%Lo5fqCvwnY19-wyeEvt3YJ@~SRIHn2CeA>4Qo1c`5pXOt3@<QQ(@DG6o`4Gj9W6b
z$#rubBD*FY6?JvcuVXK4uiOk*x94KU{JWfq6XQig0-x`=2ivBkqF&`+m_1&K7_tn%
z^1|D=`ehA-EchnQs9%e#6)D6l4dVCd>Y{JyUT87RN6A~Z*IH#nmrS5=;HovtZTv=`
zJC*1n+5)ae^~t<@x$x5QFB}=h*eNLnyxo5aq*`zeR&3aYpgaMBhJ5FW{ftO;M<uF8
z%Fyo2`;fcf4petZ(JBXXsyfXTe1B}@&Y#pJp4VBvb+;qu)V+^+hSFfqw6R#frynYd
z699~kK-8V_ShV;x>xS9U*tB#AWM?Y<@w%kx@pfoej)J#(hGbQ9G5RD$Vf~lMc+jsJ
zy<!FQEbD%DiTt6-c|3@pr=aB0H$JLlGb$ZXp|Oc3Fd(W1qY%b;dTxjnmc4w^G!?4R
zm&$cS594+}4aakgZMtQMJQ3Y_0V}F&VOW<Od1!bK@|hR)&ABG@(r)HFud}?hc_Vn8
zvw=D5jY;t@3$jboif;ec6nu2gLqLrme=znsR2~k6^kQ`yt#uQ6JLReMv>A{v!=6;;
z<g$FF3_Y<$iu`_KM?A3x7P?DQH}5pD+f+6BiM?avukoydW=h*P<e_)x7;qeV9G&{>
zvGi>LSN5?37x+9uVHfiP2Zch=-o0S|P?pwB59XsH{Za6@yHMBBh=$wGM!}L(g>{-&
zz=_S-Tn>zbg=1ephjT7pRkIc@Oi`pAugWp`g@pSw*N{ZVZbd<6M8VN8GvfR3l~^DU
z@%lq8sc@Vh?;?90^S$FSkv+ptu#91d&A;$_vkkq(n2s~;Wl6L73k-Dp28k6~;Pt4K
z^Si+M+yN7KW#wAPd(7N6yTYMvi#f=3GR}kUbrAG*avcs9+!j-o2XQKKDM<Om2Qg+q
z!-ZL}Zyj@23OvNa^EF|+kAS)GvT@5*BU*dP0?Sr9;hQZipJ!%AYi%=7*6<Fx<k~T(
zWhNRvktPQ5dbD{|FUwkoa(<fxkk%nh6dO-){gd-pum2+S=gN=-AJ!Y5JQYn$tw6Z{
z8y8TfOy}9G$G{Em!9X*Nf2jEe#?4hA(R07EY|&82S%iGU_k9rKVM*KM9^;x!GjcGS
z!hJm{5PRG~zoP4W&KrLmH$;~>etpVCjamV7#+lQF<|Z`0Uz5yQoQ;oi3bB!$llLvN
zpqE|`V!+m7uGA`;UlH^Pk`K3w54YQqfcvWa#=ng1l<^Jh4qKCg?(?X!eh6_3`X*Yp
zMT}`}p(q^Z#HH%=;Ne1BvL~w%edm5eKOMG1mstpFCU#@Yo{eCe@DTzw563YHQFw;U
zdwq^5pjS^PZ{p>Lg0@t#s+1|5Sgc8doxY&f_4()+whz3@HgF3MRY1$dLX-f@i0D1$
zcI&>t`kibq>(J-gv^ARRh;racZj{4#Yef<=A=EW!7V?i3%xJgyYpm%~gp8Sf$Zw8E
zxBC+OFy4YV75X#wavxm!s6%SA&p^kMP;T!iBT{m-8TX!4AQAa@IQNr5EQ?$qihKVK
zL$cyrC1u)l{s9egD)SBVofa3aW!@Q~fgJZSONJIKjO2vU2gS)bkFn15KhE~yUkDv)
zN(6fi3YDc=u_sN3y6vzQ8Edh=hnBwhSQX<~K70ak`SVfo?hwCG-j)VBP&hd71U~;#
zr7_d3c?T;Y9lK11^i0|WN})C+er*nB)hW>)<1Wt2eJCU+B#J*38q%OwQn*OB9t}nc
z!0Wo0Yd2_w*s?;*o8ZN;eTT6^lfa_~+O*dFG(MK3;iX$FS7Kwta`v?#f9DMNMG3L{
z&l%WLSO+@(GZ_oxA@)tO!5P|?q>yE3eGa6;h;C`Jd;S*;|8fkRTrKJ6caPBi?S97M
zZO4q+Dp=`V3qjthbi^4u60X7KxrSX_>4K%OQBjtdsoT<s_^V(hWL%8YwNSaa11c0&
z@jbb0ud(2Z*k!PcGuCGwi=Xmz#fzipy}+8bcU%HXoigTKUj*c)Hr+ZyfvhmOh-#iJ
zU&Ur5aT?5jdR3RtxHAu;wTfYkP8Hajzv8PF4f4BA#Nz$rIM}t^j*K)ehlZcYP-@b|
z?-;F5-0o-dI;YtjM1K;uLUS=Loyy!39h<>?vkLLJ^aEbnodDs96@18k1ChJ4JXncr
z>2r<SD0@{%m7Ha0spoo_tdfn%o0?Iqycpe-eIaDtGXAQM2@SD3&q;n9<`RF8g8&UT
ze!JOxXp&Zky2X1igg*r44bL!q>}Sa99}XFzcX4{7G5wvQNHk|EvcCE_JaqXMiu6xH
zVA5x3Fz|)I`gn{wW=?~>?xEw&&D_Bf6I!oX3aV|gv?@^*D}&7GxA+jObovg;MbBWx
zxyMk!crC(p3S8b+GkP|sh)Zu}a|G>!qVCCvZXMge=bkI9ouUUpM)RS+mUUB8RZzv6
z?b!3IiR?LfdUD5a3`*|j8m!t-w}$an1b6X}n+|c<B!Pk)De$qm2sKB$v3!Om4SBUn
zJmy0?>~&Bl5qCLIi}(uV7lz>OxjNKea22wo4e5%jjc|%F{<eskkOpcJ^B{FHQ%i=t
zyjco9*X}TOdm5khQJe0yQzDKNW4JB)XVLG|X2=?M0f*bzp{&$Nh`x6VMw-jf;hPk&
zX2*HZsTAQ)B`vZhM2&2Bo&mxrS1@`J2OYt7G~~(=F`XWXyHeE1t)p_(=WsbU`DY~#
z2(uvHw;dKXGv>(Wbc~)!!S_=#s2+%gx``rACNUEg|2xLddMS?%*WW_XqIaO)#XJu;
zt!WO|$@R(W(3d0*GE~|Bqx1}3+;$3|Og@ICga5&Ji5=-Sya=l0*KnGN5gGQ^nxyyh
z@H2uriYA^!uaJXWYw$e$yO_Cv8^R$b(1NRR)B_dPSAwGlprc-dc^yYV=d~r8jxR(*
z)``g?i*V{RWvsjHfW8$a{JcG~G~A&RB$EUn{PmH`7}J0b26kA-_5|9DPon3+y32E4
zK~vWk-sJ5P$TE$A711eJdVB-2GZF1_wxoV5t}!1q;~ZBw!xGmj$oV0KftxRbc*IAr
z#-ms>ssV-O$Hcb6%kXc975T8zg6`Sh3oeW6@$O3#`e1=3(Oj)hW;1`gq+O5xlGP%0
z7sDZ=TpCvN+=bqW%o7&ofL_NZ@OhU1z&OzxthvXsZMSalk~5v+J1mne5y!(6<}_G#
zWeWz(O6GPXYSSYsrC6mu2g+N=VWoK<JURXh)h4|F$&UX-MfOclx9z>iXdJ<g;JF|!
ze23;T>ck)_h41Ruq@_!vp}crHRy?WU)V@~#zthLKqnZ%2m(2rR<3%xgqqvVA*_i5m
z0;2X;<JszHKIXR?EDUFJgS_8h+@?yS-sxc3fB6uS=7-kCuE^g`!jRERMDj3%`VQ^@
zIrS4L?-c@)n0l_US%LC#y3CQqcIPgE+;4W~Eh>HwdklxrR8u*6J?R%J|9T6Ku4DNX
zcUYI<bSZ-W1oS=~33(;j_+gkT@d8CU=a>v>SlWO)Y+~_~?GsEkn~vL-x8e`=7w|gW
z4JTCPqIB9@uwU&3&W9Cf!{tC|PJIJ)d>v<V><(_cXH1++Ls07n%jyq0a&>?I<zBo@
zLP=z~sHWv6>dk9_&@H;u->Cw;W)QA?`EC3%*_^1YD}}>}LL$7K3GKV)<55i>WBy$X
z%6bEgty9M*Pu$5%Su?jz{S|mQ><64R5#v*(Ce+@~x^MSc#-}h<?9ccgPgT>g-<{o^
zHr)~1t<xb@l2*?9gDuoMN5Ri>a}xBaj-P7v8QUsP;IR+7)O+N8tYBH^OKIikHoQ_4
zVmY62Xjqrl&4#Z!Fu-Zrrlb2zbE;T(9~;>Gpk}!U%|@1hQ3C6_PTj~il&ylGDGIc9
zg(oJu`eFF9PShG|O<yjRqwZ|K*EL>?*5Oa|TmFDcitdEE)^K!u&Hk<_pSZu->SRTa
z2K5*lhgVIXVuWBORG17wXXaV|&CX?#%uk{XR@G1%b_UFAjL7~arx{mxC7fcpsW-_2
zqITyPOsM<@BUk+fpC^7Gm}pTLb1j>5yw%H1z8QrdUF=AK_iuEt{spC5SE9+(2#g*v
z2~VFC5TU0Pio1{C4Z{TN5U%9L?9IhNi3};o?8m~?!zdX$No4z}8C;Zd`M%XtU>EZ^
zMF08?5j9=d^Vb>+7GFc}n33R?!FZ@FTfL)Qj%=8vO3l4u(XD5iXjxY?PN(y5U`iY7
z;Jn1x`2(1^(1_0Sea*b`ckmH&LM1yMhEm-{SYD)oCxnJ%jQBgeW;-yo4a-qjs{($9
z&7mMy3V$;177M(g<{nv+_*ol-Gpohz!PO8YYer*a8F%#MDA+G$NS?d0ZdZK{?;%pA
zUMn{74S#3A!s+AD@yt5j{(T|$ICBX3akm+cE|jB>cD_V|lxy5sIVFm}b->`jL$0sh
z21W>^$!(z$$-K22bw^0icXwsTh||d!cH9iF#ha5iWh(S%WCSJ;uM!P8W<?~OPenl|
zzjCj^6<)5si!pA7*u1n3zIqHoF#LpL+eG*_-GT(ad<V%6nWET9=OJ?nd)8cLGr_-S
zTn8SPg8Pwa7#U$f&Q-C!d}jjunXE!x=JlWlI|B_&7=t^X{(-fV<^$DALBk<8(JTBG
z#(W9n?LXY$l@2op`eiNX9rX?d&FzWvkvynA_85lwGl$=qBvfQ`_{7CSXw&RItXips
zI~^|LUf+*c^`8L*9*x5i{dU#`J_n9(2l3RQJMhy`je4@*zo&a9&O1H_f1J~!<|9l=
zj^lA|q1qQ1G1G)t2(*c@wHU*!1hkfU@Y479!Ojhjq0}aX6J$OU`GmzWuWKl8b%=Ss
zBVKU1P0TTSMII`T@5MNX!7`T1Gt4$4FTUv0up?<O@2(N;-v1q@I2)14A6ee~;(F+}
zG@*J6o<nMZ0eTrM$2jXE$Yjs9uwgEEwnZLmZbiYm?o#NI9RhPdnUb%4d+_Bf=JL9K
z9ELPoQk{#nxbFQBdiC~q3@F>q_sOk<>1-cT-0~82s4|9Re&E#KtB}R5EFbH44~A!*
z!O1Mwm{T;0mrT6F6{StZT7#otYjgriS53g5hRN9Mvw*qN6U71N%6JP;6=JW(qn_&n
z=JN5xW17kAbMyeoLpSc95yoVrZ9A@guT7VYH^nid<){any{s&?WnF^`{NkiVPRYGx
zd*%&XK@H1BwyuTIw^YfT%Xcw(yDGQTgUw{P%Wz?hI?1qKhsR8dVBhI%oKdYro@GsB
zPKYK{EANL>ix@M_l<^+^9)!EYjp$t!X<|}a$U2_6qLYSpRJh85-tGH^hpVk<Rapn;
z?AePUwI<*>>pQ4DXoP(+itw5BO)af!z$0V}9=Y}m9flnRNKt~)#-m*Oc4->0xe%LM
zpMq9;9(I|X#IzX&xH$C*`d%4}1^Xg-k2!yE_8ckFK|XS^Gl$SsEW7NqM+fs|6EJAw
z5V-YMm9E)MvE+R-Sl`yiO*_q~8_Pktr)A?GcHb@a{>v#$(j))RavBt7bIC7b#NmOF
z5b{rb;f~YlWE9)?Ikk6T*y9k;**yaOV_w!-p7PLhV>Enb{Qa;yCiG*iKIuHDNX$Y!
zahcn7xXQ#}Sy>|>aeo(@2WpXJ(ZQ&}vWnaJH;e(vb`6W0F*xoXm`uvW$%+Rs(C<BZ
z9BP59B?7wtw=@Ybf5e3weuIKP3N$NwD=t~<g9jYTFgeK{LYx=F!uOxxeO(8RTxLxh
z11#tS+fLkGmH?X+?CCx?RXi<s8D({(Xt&x@9P>6FJdd3~jeSXAc!2fCR=ndv@_utw
zQ<C9kvlekVqKA#^Y)P`;XMXxtc9x6$!SZ&sn8m(J!ub!y^Bk_g_U<p3R9g(4W36ad
zZ!ppmY9u)|9bD@bh+E83@$5@S@joYNQu4O|>e^g+1GA?b(PbSt%?*6_=0vb=R>EMr
zUr-W}h0h{BK<(N}oWr~&B_;*<hQEsLrsv?xC)UaAj^zesHNcPqYQ!NvAM*cAfB@Kr
z+egfWO*XaYUh#z=x!sTo4y%cx#>;`g?ZX{m`)I!5+gb=vIu5rOPtwt#j?c5(h1I(!
zqa@A-ymHraE58-79Gx!R{(e1l$f#n~+|4YLl>(BBJzUskYtV^ezQn9C;#n+f@Wb^l
zNY+S+7W`Ht&bqO1;t$K2*liOXlwFP1^Zx@IKOQ$MeU8nk-I%mf0*?AukUfP8JL=^5
ziUX^-W;PeDHywmt#Zy?ORDk6*LvSy9x0Slz;G<)@U@7x!oa@Mfls#(HL46K(cB_%H
z3C^e-7y=Hi@n}9>jn)q@1?x*ru=k`YIlb-^3hFM8Z(?r1Cg&7riW7p>S{2g$E(S(#
z5fTGCZ!EZc4V9O@2g^q+*UPwO?g=HFcZ3xtG2e5tYpyuY){GjhkfjwzxAS3lEve?I
z3+ywNryfyu=pR=BGmrM+omX<Cf=`8Ve`U~EaEME8mxCgmChU38jq*olV%knNUwV06
zta11{=kX;1^<7j+$Xi2EVs{fRbdi8d`~V)C_8eFIDu;11+2=5}ob%dH!o7$zr4I+R
zXvlX{YL+tyvVk+9FO<iNjOFL?a{*q~v>*iyM?mP9A}ZL@!RbB`kZl?rsPEm6w;r)<
zX}KA-0X6!@)qsrFc!vQwJGkHR24pDveh+VxrWr49^4B-3Q=#8pRIZCb51wV}vp;jz
z-gXc&J60Ur+rm1XXZZF#U$B0~IZ)_PraMJtC`o?9A7%X0o|J{?XV%F5Z2gQ&<72Vz
z(!YEosu8EeHDGWdkMsF`nfI+*47L7jH*jJtzSk^5Nq8;y{)#R6xZa#LxgTde&heb6
z;s|^_coPct#_%`&6^PyYY*Y>5z~rb4*p6j8=%yd&EJ%kNB^Km|&mhZ2YvH_*C74rj
zmlGJA5!J2wEN)>8_HVB`@Koe+toWnIyB*vpu8w*FA=VCJ%l0-5x!}O}9Qw{rw~l12
zcohgzjOUxT{KmmhBT_Z4i8pIZ1o4(?bTp5L?DRHxII9u6F8i^a&j;A~su#jicSGuN
zW4gk(l6fUFF#2`^)>LM~q$EA!n`j13A9e67s?e?%XRxC`op-!m&Y6`BqJi~JJojH0
zRP5S=ZyN95`kw}5|0-*u7ElhCbCihPMIG|Yxg2{Z{es&4`c$7~i9J-`LCw@%xH&?J
z`bAyjUo-yXv>FqlGuafTj`w7)NF(C2{~2d+NSodks1lJ?7fcaak*0W0us`&iKNGJ^
zo~EjicP<5ZFkOX4U5|vNg@(joVF>u#yn@+Oi<TWI0E?5%>1w@<>rlST_x62;2zM1+
z@QTfnbdHKGuG~iAcMp9M%&>a-H(al-LMCrGj1dh#ct3kb9CWyXBOC8S*qjIuoH2t5
zrHuQRo(2(|G*r)MKm)5AoQt@e17}m3eW3@Ity7`~+V41T*SYX<o(5U`BoV(Xk|+0Z
zJf=4|;?~jvyr-u_wk%@Il)$^Z|2wvCtoMUzpHZkj)r7>$KF4Q<U%*Go9#^<e#ebGO
z#NQsdn4BCZZa+E&_ZW6Sjt}zr@m0_vkfNB$TxVO^c_=IdU&L9Hi5Ikqk7*S`*i6Vd
zb&cQ2W(hM5Y>1293C=Aufjc(r5oDYi4h8<h(4)(W%1bLy$FR*@hkY0Sd<WZ^-5m$F
zH=2_pCoHH=mKxPivgB-53(4R7cy!qQ0*mE@B;d(<PCWYqg!OM>E<ZnB?|uP(xqAYW
z3~oSC%|mds?!^kz8T`(WUyOHF&+b9?bl#1t;I3E2Pnw^LP9<Kb_U~g@7Q6#8-d^KW
zEmnhSupJH+H)FVd3_7}>;+oetgM<HbSa&BEA_RAM<wy6SW9M0ZiK#8Rx4UDvwl_Yj
zwMKiRvCzQo(GJq5Fz@*(3}U`Ovu1=ZwX)P}ff7G4>jEaoGKSn7WqN|~mj$~va!s*A
zA?EgUc+sm)>Q9?cR|O5a<kJ>3pIw1|Gk3zNth?ZFYc);?ya3@pTfzNmJDT{u!h>cK
zSU>V9j+bHcyQYbt)!70Wn<S{Vs~_JVw<IC1N4W88?8v+iw;@?_P?VpV56!GE^<%CC
z9`DS@rjmZn?@I_AywiqpU;d)-@<J{>`xDG>vm$<X8c}MR9Er{!!05dzaC8p4CoE3l
z%Cvm&-C;SR5iuN2?Z3sc6ONqlSOV7~twM8RQjuf5kMx#yD47!pzTJNCS}PWP;6CSU
z&$8;eve<L|6<@dVEm{tqfGM5Q<Y%BVX<ok+8+?y|>nACiuwRUSB3|I9H3Dj?twKJA
z=n{L+i}>lOJe@fG848B$iWdi4lQqv1Sl_!H1LUIl($R;x%kiJkcUU5Ku<9OUJ$1r$
z!*jrX(<tc6ii7mN254(CBVP3?oYT%%C}~(N>e#QwyH3?6Zj;wQ=Fa<=Y<@=MZ1op+
zcC%;Wr3?IrOvYWi#`^6~G9gXdnrJRzj;b<6bW`vYznNk{|9oTbo;)^#zH<eBYUE;d
zSRrHg-iOt(x^%7e6sY`TLKTi2M|;r-oSUXe6o)PW`{!pNH<g`<Iy(7=Q*%)GZ392=
zzZCGaKZok)m|rTf1wyP=h)u$E=>3=>)F8<o|M9XT_Lf^<`^8vrVtK3M5&1a!sv4O-
z4#DZ)Z0K1nPpgXE;Mv&CIL_xNu0&ntuYQexmg-WFl7^t~Q=xD06gIUlfU*iPJY(mu
zh3AXWajY3PWRE!s+o%KgJFQ^ToK>jC&eg)_uKY{m#dwQIksvEaxH*q`qa9=6;Dx7P
z%RblZ;X~*VB|TbpWFpJc9D&jnQ=E121m-@~rm-4I?B`CAcGt9l@Ah=jt1=7fH|jqu
zm01PS+IrOcuLDlpAx$10v%s85Jm(iV6oiKq&}6SEt(*E4Z}@6bE#`%t8mSJW=PMFh
zi6VWcs!6Pv(<|q1JHOfG82<XLNE}U$@tzkSL%C-w%SD*Ml2`RGeo%%;_r^l7?sr(%
z_8k(=n~=ka=2YIB`N$@xV)`5%I$TwqrVO*Ehn6ali0%vMccKur26Sn*|5Z#K{EkzZ
zvsx*_h}8YJj$anSo<;YVt8L3CzBm2_Iz}wyO`H<2*g%?=`bg8VA3tHTSv?r7_rxbw
z+N7?fR-|mpdNhAWW73x@_+4hrs&KFHmY*UC;%c}Mr!!)WTn8@Xoi-;(WSP6FO3b{r
z9UUz`aB(^2v}k1$UbDzShsRp9K+hfwRHx(4wo5o%P67N5Rq(oFEQnF!H>fEzg9Clc
z4VV&&V;i)|)br*L#^w_eg*4IOz{@zmoXoEu%|SNX<VUdc+N$(qoUa~^_m-LwLDo*!
zwlXbJ(RGtM;;hCT!$y4S$D53Et%4%IBy@_L$$IzA;+Xz!&SEAqp2=6tsolW)zEGjH
z179(Aj4565{TuY}?Zgji_QXgh4!V@iqMA}L*6HoSHNH<VMVt)xFV<j=y&kVot-;@@
zzlG;>tf^{!BT90wLNedM_jIQ5iJIm#rtmbTDyLwr|4{TD+lQNt3`s;r59Tzf^Bd}-
zAvD*3#CEd2WL*w^`g9D}?w<?`&R@a_mVjRM3B2&dQE`l&Ij6B?1>_c$z=XHEFuFen
zRF1Uc(oQ9^zx*)f_pZU)X{_rye*^xA(x#RE+0g8^99<d>FhqvAZKw2Mb<H!_RU{;W
zH;P=rz5M`|bKv$beKKlnBz9e3-eDIbx?%BisGJ-IQ_h&v$=lChv%LXrxao(p?q<T6
z-x6@^9fnoT5BPhxl*q?SOR_XtldNLg!<2gku<7p_xSzNVH7d^XIxqi2l{+#dv#T3A
zQsg+HLL7gLo$+?L8Iw7F%A|mQffEnv6Pc-2EX!c)ED1X**3tipk^`Nh#+=XS+jfcb
zZ$K2jddtsJEyi@S3=9vPf{sfOPejTR-=V2+(^g1o+(y8*XhkyqwJrITV@Kea0*rB<
zjNZ)Uo^1SH)H`uF7VSO+QQdktXS4#Hez6CA#2hC7Jr1rz3`kIDJfG1P#~s|POx=u+
z^1@^JqWI4iMDzD?%-lKvZSk5k{7x@;sYrqEqdGp!N1ry$zra5{d<-=AsuREKm*HZh
zDwWI&6O|a81K&+O+^O}}xNchqYK%X_sVPKbl=W-8{X$6fGz%f)KONZT5(}RH<$<mL
zZ4l2zh&`hX`vY#{qR$V|Iqw;U<h~O7Kl{qO*2YvYDpaJFQwR|gwWwOH7@Yg9ki->Z
z_Pb^X(_0ILHxJ{OQPtq~VXCP0!6D4{5yQCGh$+K6Fk;<depRI<3AMX}g^c;Twy_0=
zj}OPZhh}v3gdgzmnF-x0e;dO&KRhswxwF@N1uYfE8?n&AJ1d05Lm>fHf7p*hKZWA2
z0Btg&lkKioXYx1C2ub(KB$yhe3|`iw`FGc<Fk*KuH?rg=gnXIHVT3fbVEgiWYgsS-
zstt*W-3gyt*{)_-0wyw6vaXIHQE6_&M@LSguk#!6n5nNYV#fon>RB?lty?4V)mEZA
z3yi4LLmi@?E~F;v+dw(jj=9StVf~JLESla3F~T;qeb51sEsXnc%Z}a?%F-oukFkjP
zQ{B?NMBZKraJRsS%rer3=kp~fo1_TYlS*JnOd&RGIRO)%M4;ESWc;dGkA9DJ(f|5y
zoUYF@y*cK1D@B%!eDMi_=RJic_AFJ9(xL{cX6W#jxm=e2gz08?!B6ilZzS~{S9rV!
zm}13zU<DAhcnJ(@<=`mhge@A$7`#T;x#1q2_`@z9)3jNpj``2C?!@6h=S}!;r4kkR
z#B-5}hftVUh+~N*ExJ()^{K$}D_770%&3=z1t<@B44+Qwkfp<Q$pnY@;A2<78!YJM
zlTTk0`Sz9YG5?Na&bm6Wj?_pPXdKQuUx&bB{5L2ruRu$#3eTp7^V>5U(L~aQg1ZrL
zrCpj9pdww$=InW0D%4zEjh>BAVL5|NY>?du(}v5DJeJ1ZApZ@zo-wY?Nf}}r@f*IR
z-$jFgR&E8)zCUM1L&<6_dT922^qu^Ot932}vu#qezI+mP3qL@sw;{f;O2B`q87*{F
zBzxv)QnxqB;+&2{Xf!$>eAaexf(6T5$FWT2|Fa+=M^1>6<Rq|K;S9$8(+=UmvoVPE
z>vk+xr^XL2V8A^e{1_umV=ScM;W!C+er7Y!Pu+a_4>2Skl!2(8EvU0c3l6&KP_uP6
zVTidMX?`66*6C5OX1g}o*XD-g8FM7K-Qs44*^r@2q{ws832dk;Mi(Db*j}KC!|XN5
zmGLYi+FFJo9;YzFcpNrei~`W_!|tC~K|J*}v?e$}h+Y%dB{LG5v|n)}&wN3HJ!yPN
zH=7X#m_c%2B5rt@3eMWO@W+m2d@Uk4?eBT$w)mJxWcUrwuB_%RNEy;(y_MXW#|2om
zv6rjTI?Olylgw+$T!m&`306;d0Pe{<V0ncesrhA$le7HrPiHcA4K)GZ;eOnG19SFJ
z{0HxQFji8U6N=66V$b(V3^F>wyIndWD$Vv{xy>p#>tl_JGS!G6?4x*3`wdVP?*lte
zd+L}N1P>ibF?h@)*85!qRdtlBTUE$~Bv-h0oLLWVYjkO~SqWIR3?V1pNYTLbS}gSY
zgU%QK!Zm|DJn>zd1U8t_qN7hB`j{H+@m0c2HCtiD`)oGXE#~Dt??SoLa5Qgcj4sD8
z{6CvNao%jKd~*XF(`&gU-`p`ddNSwqN(6Z?cH&F_Vi;?zKq~#TsKCU=)o<!?UOM_F
z8lBIBkh9}(&`Og8?0&@!%q)g!3TC9v|1!5pTN)b@7l8DP3dpIKLTPaT9_agk!F{9A
zdvF2jXECN$9rNj&y8soTqhX+Q5(cxbO+n2Iem2WFS0$t|7eXHTzVL^LKu_E@LZ9@s
z-{=0F_6EC@x3S*5sIaKtn8x*wLdkMv8ZjfEyFD0+N%Ps<&axLyO*jSdVYbwJz!o<B
zkD@aTtMTjN@JWN_S@W#flqq$dwaJ_*Ata#)Nl2!I{zpPck|{(<NGS;oXRn<Q5)l$Y
z4kbyFBq8<g_k(Y)a;5V;d$0BT-FN#yDBRN~w0IQ2-w|pg$nY>eeot}t24nW!jWfFw
zcVp8iW!84+F^+kah^^a~;)6oY9Gz#%B)2XK{)e8TsLv>I|M9)SyV4FUjcm!%^_7?z
z{Rp&kE`mo)KRV$`Iux&#a*x0p`d{TCl)arUqz*VC9k)l1sfT8w<h!4Eu>wC+AO1=W
z&SaobTOKXq-a(HBGqgLTNsKP&Fy+b`q4m^8oaY;ZbgUea84aQtpA<o|jQc^F8gXeJ
zcfndRbR4LSxdY9q(bpezapz?a-x!Dyj#F{KFz!CvxDaq?8YJC*fsfVmu~s)13SU2_
zqDtQp*E++I<@REZN0Wp<hu&g!-wW_)fjVn>`Wgn^QY4x3dpJX^6TDYFz*XA~SwH~A
zmE1FVtW1kM>OBu#9j%zBUIg?$>p&9p^;xe^<rs2G0t5Q}#1|?ytaDe2&^%lVa{}AJ
z*_Y=jzx+nsGcUlkRGm$D!hIp*w!w>LOLA*$5hU!qM~gS|d)clO@UWRHPKpl1h*eWS
zlRuLmUJ<b=4z@%VULk$ib_UB2d!yq*&Ye814H4%K(K1ad4DgzTMww3pN#0~>%r-sd
z_Ps?Yx!6Yto9=~@+YjAkuMbJ}Ppp9WeQR)Pz-S1NHelY%Hvaif5yaJdaA9&K6#M>$
zhF+yma`Oy`Hm#O64jcik@9)rc?+i)qiMMoe9`~49=m<AU5^=B@|34eDk>{43uy3jg
z>1>X`yGMRN(9wEy@+-m9(WcBpVHABd^bYzfbl~O1ro?2>EvS@Agx-#Ka7ch6+do!`
z1aV%nNo5N}c)u1Fp18r!Qt84@-QAcID}kSZ_UxF`1DteAn{2o1M#gjI;o?7Nr*i}=
zy)A^?<YplzAPNqSup!aB%l4DAsD@tDCPNng!RVmhIBw<&kSrZ3MA09RdM-yA;Ji;j
z{aIil$9;n~%22cH4!SicA?TT5lkPi|1nP>dS`($Bo&Cgj|BgbRd5Vyp_8K!ZV?ac~
z-J_^Z(Ec2RU+&tlrTa8V^=%2nIG;pGREBUkN*)gX3&$a|dA>5SMBJv&`*pL<gJjoI
zapgnKuw0)bJTFipHl|&0@w+*>dx3ic%C5r|z7vQIuSU<q5+Srj69@io!c}YJNy!)y
z%sg6vr@y^~wjX=Jz^DjX4c36lMFlcv{T-n1m5BX1H`JD+f=uQooaA@;=033)7xEH&
z8UBTgXlr)ORE=%@z8)Os*}$xTN(`~igNTWvVUCdpYX2r^uv3G{<fjSfnGMC$FJOf7
zO5EV|2rFAv*}dT!L{=UQ)&^y0;FJY=gDl8VwZVAk#~s|fau;6dd5u3~ZO9gLBU15@
zpU<=I2p;3Zr5`7E;<&wunEBY4c{y~!6)!cWx73ip%srU8(2M3Auwl|^Mr^VfpF`Zg
zi<14yOsZvrM_rFYX~1+)xEFxpq$iviU=5{mT__o&4WCzw2p0+9;qGE|&gZk75k1hE
zoha<oG-pp5?TGiw>vWmgODJm^jGZzi^gKEpdwng$DfeTsFwF|wY!3-m5pAfeF&H;Y
zwqjw&eo-Uyaqxv_!7eu{@V#>lBt6!}*j#n^<)Oop*G0n7XRTnnzyb%7S_l&9Lk!w5
zv0*!8Z?t7<4|zX!sXfmxUJ>>>z5vm^hqO`uv*1zsQ0Ps}$^E{)NmQXRJ5p=Ke1?6+
z82dy}{AxuS@2&tBx5YGNWFodJ&*u!10m6-I+N@{31Mz?H0#8r>3$FL=SgX4^Ee?nS
z-HS?0HX;MG>t$4bt_9d`xCGii##6a&E#fih7+w0R7>_Eyz!Zga>}a_Jc7X>leOfQb
z5k5j#-C{UBt^g8N{70MgG+C|jEhr4MK!p|2IHYD2ioVY%N<WYVZii*UoX?TiX%c{c
zhWp^UW(Sth>;n1y%$e?&N@xvuA_N2{i>-#M(aDv^K_RaRw?#P+v(g3>r?kVEmNJn3
zy#>xu_rV~t41GQC35}$RyGpKz^}d@B$u(zGNPmj0f(g~un+I>7NHH_Ofh|nW1J$b$
zcrjX)Jhh*PmPh*%aX=`(JZla$D<8qeU(GPmtP7HO*TOq|DE3<Y3J%Nv!m#g=w6Y{u
z*!NqDfYxEob|}EuT?d8WU2RxxRRQKF?%^9g&$wnj23CDIgZZx4ILCsDyKRhEQ4K#^
zJo{WU*4U91)hV&cv+sGgw@wJS^Gt}AJAwm?PU5CRi0rWnsXHl0dRgAZB}4Q{%32#D
zRgS`OMkUyqxD{Z*aCqO+mt;=t0IOXhcIBfP={}GKwQV(Em|2b2ekrq;e@|iAZC3~w
z8O#~J&jsr;YjTl0LS-?023&uMn(tL5EBv&H=G$-J>theC85<x-Du%t$!*E^MG0dyw
zx$vw7ctT+dPMu@Rx~3n7_Y>t<BxhcYy_Sxm&>Lcna!q!&Lyt+SJ_{#ga_s5o6}aW3
zCF>ik!}j-X#*8EF^jaTzX42W0DZkwcxn^O)?05DoxP-sUM%G~JQ4^u{zL@4_eHYZf
ztiXVf%i^w`=fE=f1il}sOFI9phIDYi?3!LoB;|XH+46#RAn!K?^L|0#A3;)5B%D6h
zgP8@qo9giZvg$S2IXf+u`|Bwcxg9DnSj+n%S=F3n^SiLb`Lb{f)X3T}Ba+Zph4z1_
ziIVkWg$!$+LH=&V!sm0|i{ljty|WS=%Ubcn;n(22@fVB>4CDHC4VIf_$Byfq#?F-%
zjIB2yTR4Mlrb{ZyY4#>A?k=>ZID`8=dWmI42ZXmnqtVJfK(KP!M*T|^z_e47<)`&U
zHr||cMu{Of;uEzCIgD4l^+~R)59G8h#?#!@6K1P~$|ByKH|>C$L)8%HeiWW;R3`?i
zX`pp08r%9mfKQ`!Nq0^;UbM7jzv8Pf*6$ZxJ5iOzJU0c8&wA1sI%*_h=NsBgR)EEo
zS(qE-O5xT4oZ9UJM?!2#yO#*sR3or|r3s4=$uJ>0hl={mkqQ%d|3FidDYx#YB~%bA
zL<ZxHuqJGLHVG$hd5<Pt&mrWHK2z4PVGD;I<+Jj?5YXZ*Xtw@?$g6#rZ0>9tT+tzv
z<R((t%Rz$fvKqX8gU|chGobR`3@X(t!Xf;5&|_jlguVP{CdBZYR)a9curG!z?*acE
zn&4uof*(dW;@>zUV&#4WH78wx;+uTf^u?T=KlcRlgE=3-zX2d|I#izarT%*tfYvx2
z*8dmJ16IXK|5RiF$A$^H;VV%Zxf8^jKY@P#G;pd@Ad;?g?(ua$!Fg>W#<tw0VMA6^
zt684~)$BsByvWZI>V?!}_<FH#bvABoSqF{63OF$_n)i|2G4qHXJCr0xMEmy&i9!5p
zmvG0&+!u7KC+8iQc2mt$4y@cS2O{>Jq7t>iLfOXMAX#}7d_VBNGsPLIPlsS-ygohJ
zkcIU;BNS&+i0M-lSlX5t6g4jZQN2o$Y~V#{y^#Rv2NK|2s6NqNzL`$=bOx@PI`ZeU
zIazw(4rYpyz$e*+=jAsDHQRWXe&tq(%hh4UL9gIP40j=oZJ?2fXCc-9lhE*a7JA-L
z$CXQYKRbDLVYkX-IRCr_Cg1+cyAUd@33o%Y(|6qTGasK${0yP5`mh$UIkWmMkB`@?
zlD0>jnTuaBb)6Yz<hnsr+b6JSOoGpPSMl!NR-Ej?dC$ufKyyzwL{1rnFOn6=;grRg
zHu*C+J~xNxMVG<bF;7T6`k4C8Rz#z)RyszW!TF{3Sao<dBv{T8obUaHgyLQpHt!hC
zk1WK~fd%mTttA<ECltlE?qayH7OUR42v5pCz`~O%oNe(I*EuSa{wr6)+Ce&`<K%t_
z4!we{+A^w=9fu*K<=9P0Gd{m3Pu>I>;>?#_cs{!VTAdDJ^|Wh{x}sJpn($G$I!K8m
zoUwtj!iTgyzY3M7U!{{m`T0Zjm=N|zl~v4DVATdc;rc=mSzQ(bVGnIEU|0+`x?iA-
z=ZNp_+JRTE7?F|b8?o1^1kCg9&DN`B^G<pp-E=n&-v@o?3_5M*MRH(u4)23`gk$=j
z`QU3f9{m(!Ve0}B+o}+cwVT$Voj&j3zl(?P#l9F|v;;?f?1FqXb5{Sj9UGVPPN~iX
zaG4QKEu8Bh(%Fm|F0dr2bB-Wh<z0qiGd5>d4M;n?AoN8%tPZ*bC30%w(;f3LteR&5
z0+vIg^Jm0OmYfm%m}-mH(At6bAw<#uuIso{aM@*uJl6^X4}S!aMG9P=WlJ)ePC|Y5
zI?OFHgs%77@W_$gB;eW?vHxjnc)5BxF8kFNT}qYc$rA2HS6K>k9+rT}G)o-Ykp$Aw
z6iW=|(`#ZyjJX52z+IiBj)|o!pL3T$dMx%@xC?qFXc3XgNkO#miB$A+r+e&}Lqg>!
zFIZr!%@#D6a{p~LHXWUTjg6WtuEGcNpUBX5lsqeWWsmpgx1o)H8d`l2g!}pXF+uc~
zdfogBy6<D5%_AB1hEB)e$1U^?XR=w3u_2cxaJHxJDhzw3K)dq~z(4zB?z%81Vd)95
zwN?(T9JzzH`zeIGr{Ph}a&UZCEvyyHn5+74Xq>FV-fumOo*i~@%jg%ZKlK8~%Ih)t
zJ-oO7_yk7o`Uu<a6ruL~Ni^dN70O)uvU|~Xe5dI^>>o>^Wzc)fvEZDX>7ya$bqp-t
zR*GE{_Tqp&Stwe!waB)j9a|PjxI^GO_*dw&Er}LnCC`b3SO0-qOXuRJyQ)lmhY5O}
zE`W9Scwgn>Lt#k4eHg)6@Urnc@I++~bPu`3&!z^%)6<*Zt7O<NN`PheU9qV~1{3Fe
z#9RB`!HIV6CfF^-5eK!%*LnrkymJvKZJmjhdmM<5d@0_jS0r6mdgJ44dlqfb1e5J+
zA!pY&cy4S<w#>VQVGHf(>d6C8*$LnV&p&I+Hq+p?EShvhA9V_NujQ;Ex-D&>vB`=M
z{v#ihC7h({m?aHO><hCD4cK$ejDG0u2Bz5};$7(|G|!UZ=9da=-cAdmBGDr%U#!XE
z9j$o$DCdl(SAp|ao;5vZ1!1EDgsD%maCPwma8a5p#1Ed#Gx!p)>La7k+NXJT<~glC
z*NbhRz|h|HA=od=1ixOZv4=n3SGe%K;GS$jx~l_ox39rP0~Lte0nX6yx<N(hdPUlV
zvojX+`B{n#)a%b-=+PbE5#J_kV-)w^DFi$p0HXiKN<S>UgdT$~NDH@fo_=R0)&8%5
z)?BLtt!4Z^`c_|Py!?<(mCl8|SAIb}=W_nXXQ?5xZ-dl?_u8T)G}M>je8Gf8zCH%`
zORR~+vzK&$pE0?WXvn}=g=O}0N6l4w%<5x>w9Ut#`-OH2$|3Sho#$AF?)ZYrkF8j*
z?kecqGZb<=Bhl~E6V74Z4nuUNA$hGqUd9`<(<?$SVv!j7rQ4G5{7SGKYD^XhrYxZB
zlpw1Nqa)4Uf~ZkcxLHG+*}dw;K998|*%_K_YC8Y@%h?MP^Z|>19s?u)--1Vc4{hu}
zf&035;1Gu^SlPCgrwc0u(ac<GU~&Sa8dK22E?WB5yAC154XSE%VOFIv(|&uFdOS-)
zzu<##tT6-E4&_|X^9`7B(uU=oc?WO8Poi#BB5b@d8`UbTNb>q++^=<y+P>pVsgPL^
zaH>XncatOP*KER3O$^Hue?iHoALw>mgc*T1IP2jwi1Kf{-&~>05|3O3X;KZ!tW98D
z%Oms%45o4J5xDV89(btrV%I*c0kiZ+xcK>3$XJ?7y-%-%s;%m9PFs&;blO9Tz6y!U
zI|mcFKLUIAf{)>q_)TWT=+t|7DcFHs{-wy`mnPte3>6lnxD8Z1xg)ICG<@=#b7=a1
z5h_j1*=X-x<nqP>R5tuAT-&6NBbTQ`gvWnyOV^HxaIm;`vpL&cZco~{$01`D&jJaF
zQl(pect`XfRH~Hmu2YpzcrXxlm2^O&-f6JNN`t9P4^Me^W89r~{26CWj%?Q;)f!6d
zuY*3zH+afD2zG8KMt#GH(egxb_A&0vG$rHITshPCEXId4fM2^j%N;rZL@BZEo{lf*
ztgr^0<6VPmIe&e&oe>FH{{bEM#S4x4BIYbUivD9AnTPsK>A0t(F(zvu<{TK0>;G}a
zl-eV@x~Bp@%;|xN%^D<u&qta^E(J%MY`ScQ4!dHbOC;mh!NMR-R#TXQ87-MYU63}>
zzkdR}L&QSJ8_rc(cn;ot83VGip~9O)Z@4|H11DlHBD!FJmCLm0%|?CJ*rAR)&l(e(
zGJ^HWy<zl4RWkXc13n%62Zl`hg^nub5Yh2dxK(1ynl!dx=aVmj=&h@es$VRfw?&TZ
zee(#T0_@nLgiP%FS(hApcM2!xb8q*Y5$IF77{qFa-~#U-#4F80H=jDXcq^Zatlva$
zSk>eF*%s{KB1QP9#P67gFXGPgN+dbV36<#!G*i}uC{KTs+HzmgoP6oHf4*4pgy$j7
z_J@~~oU!w^HdZ#*)3;W7gu0x-{uR0)+2<;yS(nkOTAgO7pAcAxIeTM03kt)BKt%RG
z0RsbZ^0RA@y`>xzCq01yVHV{4H!1px<(Q}MUN|3G1ND`?*}$SksPsGvKYs1T_rDF8
z#I0ONHB*5<%{$;mupFt>@}#_{fu^?Hi{Sgr&GX?U+!|TVyNHcAop<c^IiAPH_l*e5
z6j$+cNC3}%n?K<0(2fe>;3a#q*QpdD|LHRGL+VWS!vRvX_|NfPIe4TgQH$q%zmFjx
z(swC}^7?_&9V_rSpE}HZl?e?|m%(vvHZ9=qjFgumrm|IqU9XEpqk?=YuVPDHRBJNv
z3qL$?)tac(*|7LCzd_cMPe0vPCgxqG=>2OUjeFS#PP=}CH|IxTTw*W?CcJl&!5!4$
zw&VtPz2t667v}tz2Pv;OkHaDi^G}6gv$-+KGMzx@XgB&K?u3>3nq-u|4okl+!Y|hP
ztoF}-@GX;4t@Alpx!+eXJ19fHCEFq8<6VrLV#Y42Te2z7YS1cZ8V%Lq43orT5FS|v
zHR5MzpWTbCZoLL3T?f#U`&%j}M+(jE^4K+VAS8Zx36BTvKw`+xpYeOp+glk650=p5
zSIvp>3=_6MFAGb`B+_w8lTb7wp(rMc&-w0-#DNVrAynN9S{TE9S`I`t_$j24T(QxV
zM9%S^j$u>0>C5C`2+DQ<^F4MXEU_=vuDr}M&cTBI?L^q1p-k4Ud5691Jy7wyIr&i5
ziPBvK5JAK6+hsczTzs5XzBFTIzmCJ?dR-Q}HWo!;-wKk3i~^59r0aHS5F^j)d?s`k
zR$7Qis+T#Aa!tjA5o7tetr6!hcVN;%$#7I=%cholfcR`ThHcIl?EiZX`NQvl9N%L&
z{`)S(dYr<Kiq=F_@Lq}|kKq>$HI^hQ=T4g#$X48m-hRvItt<u}Po7KHR&xfIdo5k|
z?<$08?4zDp#dQ5sOYl4Q6SNa{QvEPBYzg5xsH)dk*_F-b5oQoGWF&~r+tR>DRc7ER
z<!;wrY?Hqoaa8>yB=Y`-)g(8t;qRoV1-Ve%^cLdo?#F7)({Nzpb?%bVV7{v?!O4<4
z8M7V>3yvv~)|oj_KJfvxJ1>WALpgW;(Gl#Pd<Cmk-9kx_0eBsH1Xa7%LTH8!e?EVc
zs_7V#fc^&&mRmDb1vfm*=L@rM%dyI+1R-zzCwO$~GUPpZ21|Y#kVc26!h{oxF*pAg
z#_U**Ka|z@`#_Ugz0zc-oy+0Qt6pS}lRD8JTP}=?pU8R151}bI6y3ftTHI>KB<8hL
zY2FVM&3Po<mG~FO*(tN~L4e`=x1b`w%S<-+#AJCt{yr43oo_6OV`Kx)EWHZpa_8aL
zrfw|v>;dl$y990dA;QS)52(Ffi4FGFXTw_6P|x0+iPRUsbW(xAe;=awM<AARFHMR6
zKJb=Hgyo;_V18~s1Yi0>3)b<>7H7Yo&ys<{0|Lr7tysJB5Y!$hLXR>Zlx-@M)=0V6
z_Kz}YT~37EuSz-J(UQ-Z6<G{<gr8saA+1ABP)Xntn$h17Wyg!8-%lElMOr<$P>J&f
z%lKKM;~WGH|AUw6t(mu~8e2;gh?1!ka?e-`())8TckpMFyJ<v<`CKF6@@Bf)`!FcC
z=L@-GtMQ>R&s@Y6h_e@{l2C>9pd6(ECKq@YvB-gEcwWFl&ZZF0C<7&ptKb@)gwJmk
z;T*kxFfzFWDoysF+wL#)*5Nde1b!EGRa-O7^?xyvvs8kdCV|Liy}RP@zQq5oCl)OK
zjmaNO*~Y3}_<31hR+DrS6s||XX9ou&ip!v0d7S?-(FBI_^W1}nD&**hm!S1Zl_mVq
z5+cLvAp7xmwDJp~w~q6MN>?mKCmw-uckQ84r%E_BNrSofvmw_>U)Hnu67n5~^u*B1
zaCWURS?G2Wy!A%YAkI|nRQxV%yRXXkv2ir&++T?Kc^n#v4}>v8w3^o&@wpv}3YUlv
zn&zRfT!}zq7w1y+qMmCPgU0|@afeq4c=t339cAaC*`gm7231kJg$ktgQa1ehybrTE
z_dDZm1NFG#1&_+gpgeC5p6A)2ie+}h=xz*MxZwwk(+!30cby<=s4Uzv&ysXjr3v+d
zAv?6C581kG9{k$Hy#=L<=(P)K%=A<r68yc5wx=$F#jG!T<F3cDdB4!j|2;K-%6AEY
zWzc#34gFTGNIWJxi>vM4K)>S^_%`@F8uR<g){8yZ@bEck1&c^@{XckQbrUMx*Mold
zT$nh?grx0Bf!0eSVZQMxym!cwh?0&MDDU3}Y7TnDa6f0iFFb;QDZR<Oa&7W^q5<=`
zqbBs<u@d5o&p^RK&c7J6gEMh9VAc8q5a75(2wt%e0`hkXE#2}gs_QJc%$>v?EoZ>m
zNreO(sH3)FvyeNd0bRBA*xtPN_;&Uw?6iDM=RfR2Msat#kAgLr2z7k6ZOi<eI7{lb
z6^uOf8z#s<$Ex?C(3H=2GUb;zQ<d-4Y*Wy?e+-pbyr*f$n?P3j1P_c*WpUd!Lq9)d
zCUoA%$}>T<BvGAi<Sr-oJUNo{u@#br#S3-QDZbjF%3g)pk%8N?puIvFTee9-^(dFP
z2zfAXm^KM$Jtl}g_|SyRT1*|tGguqOQ0;@~g|OQ_xX7IMoV9ZW4{doNLbXA-B-si*
zpR%y;UKJ8Q?-52!-vpKCo(sRm_ht1V+Pv3cNqmR%JXpkfsn4@uaFqC93E%MrEzxH#
z`$K3~T>zBz4S}h5E^+3DA((Ys1kKjhkd$&=FbTd4RvA@d6U!#fYN!(02JD8zSH_{2
z@CotNM=bI5rb!yUn31mmj+esFQmR5Q$`2CyN6^zA7qHE!8V_{qldzqqgw*FR#8Zcl
z#O3*lWaUUr(lG1+Jlww(Uh(eDU;f<jnK%Xf+gHFw`x~&y%#`@h0eEoTKF&^6ryuej
zL&izYQ#;izghr~vnM>bM>U<R(&iux*8$Nhv^+EKTeiJg*jKtOsZE6#A8RCr|q35u_
z*mtKf`Ltb`6stV~_F@-$PgTRh)&lyS=VBc9-J+GRmO~-2WWKR?g}WX~Y-P6{)4#@<
zs7(zx;dli+`eeg8PQJyWM&1X|KLrVQvSIxM8Two7g<7>NFm)6Wx6c`vw?GCLCfX5w
zKWF~$P-Z?iRzpdeqd09L?~ZSK?4I;M7d<EOSzY;4%>BKM=17y^mv1zd4UU6U-95st
zu{YrPNd?k1O_e2YdcnQ&Vfg2mJu4geo`yR92W6T$!dydJ;+wMxDx!_a@%2`$o=n5t
zwx&e1q(!QBREd1Q8ixzdHNmSnJ8|i9BPJTk8IQZD`x?F<4cmK=4x6dK#uRtK`LSuB
zoBIsk`R_%!r@BP+$wxRG$?u-Y=Y@PP2J5$E!1e)I_`Je|?AjNKsij_m%>0bFa7r01
zc@ig1n$n96m)nQKY<OpM?Nl1l=D-{tF)W;<kCnYrX**}+rN+#|;oL8iaC!-K+`Sh4
z7kR@T@7`q0up$`J-U)JP+5}>5qszdLG|Y4W80E-l!<gq7JG@!A+^#_!$NQtmV`ib-
zT4xwweG;$H>1h7Oo|S%|hyKS81FTWOOX*{9$9le_x0O+=tck*}MjbLF<1PHzs?E5(
z9Xbc)3V8`NP~m(APrLqw%B=I$>4qH{IpjQN{_4_o{9Gp<v>9J6ozJ;Ek>LGQAO1Mn
zGL^4NM0>|BY}s7{@7EP!Vbu%jQ)>!a`A$C|eg#!}%R3vNnuXMluC)8jQ+!#a#<Nw)
z^u4YI8MCMw6cl-Xb>lzC+mZ^B;uG-ZyBo|1=*=F63`R+IX;D?X3TXSLqv%6KkxP_5
z|9#gdLC2ioG0$MC9{7RLs>e{WV82u`h4cS=R?xI|&e(EE8Zj;+3gZk{fk)V4TE}yT
z?=Dd8s{R7!K67@!iJRQ1b^s)uVl?AfobDms=;oIQvy{zQe|hc^_q8VWF~yKJ=s)z)
z7sH=b190S?R)~19hYl&u!Do|2B*8aa$WNG$7KzPx)=7(4r}ZHN78nzE&fqWEQ6{}s
zyqWjEID`L64OMdMf%E>nOP=x<Q@#3N$+nL)cj9(hW%nE*%McIf>yUoDH}>e%6By%P
z3q2v)MDnvh(Ea-Z(zi!IP+xnP{d^~8++GeocWfcuZ6BI*rmCCiXR6)p!n>s3sJen2
zYD~8x=Q(q^?jh#^Z0f_Jy7S@tyi|0W@f=GtxhryE1&;JM4$jAu!TWm&z3FR7)P4T)
z-Ni{PU&k3hvnHcx0Tu6FY(eA~=JQPZZK<cqA-aE+3Mri1iS<tEOkvD^unjlC@|sEr
z%h1L#0Y9K*c$4tR_bIxJGhwdMcJvC@V#;x8aM9y2F8lNjR94xN#zQ`!(_}(YKN+#%
zyR=EHxjKYaPXo!C7O~{nA*p9oDmBzGC6!YS(w8|usLXl}UW}C^K5;|APvsrYZv7LM
z4C1*`$0}@{rV07)yRb2ipV9VA!{w>p@Xe|k49IYVpDWav$ooaXmFDB9^mrGZE3qdr
zeUw>~cNVOz;XUJi)%3B)0bC!s6&C2{<Hc}QW^-PJX{Sx538y4DaK$wU8W0A<LOB1y
z>J2(qe24B8=^*1Xu$Olx@a#;8bbOdLTOySsnX{vz(Ip*Tq^s~Oejj4HJ`zph_}N2u
zEls%^i@wfY_-D{B=r&D(CeAguG};OJy2+EzsrKaY5>uue!2L$i$I#Qc3ny_Wxc~Yw
zXmv6f#=Y{!l5z9JH@+y52=x(QJJ$_8XT*TvgjoDCBM(a-$w6#ltFY>W61j9S4CCBZ
zfs?%hsoppoWs{U~Sbs}qv^EQ`UH1Zy5blPGsONh(J|mmz2l+qlL7E!R!Y+vw*NBVY
zgQ+@u*l!M4t(wI%h;`uGoB=I|ZbGc;MVdSGg%G~^DsET#gHvSzup_e)TOx8{$3kne
z;eZiQ-PH#60W;8hZ!rAPIgf{@iooB(1P_nnna%^+%qL5U{pR<usa=JbJ4haT-kFk;
zyD5UC{kHU_P%BKjZ$-2}y`vJ{Bf`UxR!sc11uHqzLZ&+y;{SBu=h;RqH8YwXb5>+u
zdl|Ah1AoE?xgwam)QS{O<FovKW2oe9xx3B!gP2;eU)+D+BitIJ2+2nB%xbEmkQDV#
z@R_F0BscBA@vsNvjxwfw{Pjqn7vHyj+6t+LoWrDVBVIZ|j`bdqf*z+@>58wMV>j!p
zIAf$4Yjod))ra2W#+c90ko6Ke+l~pIrPV^*z=hCnR35rY(xC5DO%@S#N06m+v)K3`
z$Yqm+#>gBD=zU6DH9#I_-R#S3%qt<{*G70SPmUzO2qEm4FOAiXf;$&^cS4;%qw3`F
zKu`s~m}fx3Dt^;q&rDoqPz6VgZe!S2J3%BlOvi7qB3<Tdp+s{OtZ3(+JEa5EQF5HN
zt_~8y-%>Ct7!Kh}nn2mkf;CUt#xwAzgaD%lVvowHLI$fr_i?6d_7D?dq<xTnsA~j$
z=TW>vIRjD$3v}-?TV@<skB;4ZU!YP710C)_ZXY`;z8i?k=NPimksG1$j2tT}MNAwX
zgAp$-ad+57DtQz@-EQBd?YR!Hpymhuq&kGW<4h%<$&DI+2Yhb#hW5l!5V`F#JiJ+t
zHYa5$z5NMJ@8v)D?r-#TqYYafY{c$8eT0`(5u49)&*Don6!V_tj$=HJ)Ug3S%j`(H
z+I}dDJ57Cb`OI#Q1{1BAPP;dov7l*VaZX_h%yX;9qgSV3VEARswY!5ZX+hu;XCoM;
zOaT`kFZ$=nLkyi{%bxcf#rBRMX!V{Xw2pLy!{01GI>HH6GyVXK*2X!H4&c9pDjfLX
z45S5VF{$SZD9cHtok9Ab@uUnBW{3pw(lp4uCJ}5)DzH_)H=KXn1G!~N!Vo)0mRt6Y
zme`Geg`@(aQoh1!-tigp>O9YoUPC9jU)cM3DY{G_CxrSw!}B)E%<_pGYY6-bsTodm
zbD{*NjkP6feI3Z~Yl^J(4aNNrwMchk9n4vw%Z3#CW5m_n__#Hbf8I1%Cf~nEZXba;
zeDAzwwGmms`FCHVy0NcXDaOv~poVqPI8s~)Rnz7|>2m%I(B33ee)vh7uEj#s;0)NS
zRR9MsTN5`|H*idd5j6Zv$iY%=;@LA0Yo7XI+qTp2u+0{}78?-RIuk(!8nIPl5S8to
zC&YK&gQ_Lz_<^%Sv>&LmjT^`DbK)J$QC4KO#fA{e_qsJ1uV8|r4_2nv3oj$M@50KF
zbWNWGwd>ok`_BwKe4_~~-kXwXM<~jv=nxgYBcATh`|1rTFt%ESSV^K_%)#%-Obv<5
zvO}6+&_%<JZxVj(&Bl@tQ}HE5W!!h6FUbwLMy==a^Yhow_-N@H*f<~_^SC!*;&}rS
zopv8nCvU|IPbKK}q6~7q&rw-9-zx`X;q-SRVl-n0XWv9)MFV$)_xuxP@OR<+ceU8^
z{yk?zc0jf}cZuh`6npIU#;7KDG@5>&#_c%>Z=3>Q*k&W9)o4x*YJJC2MSt)}+>g&X
zjfnq$a!|I*9e=)eU{!UiLH6v5czQp++ZeBn)fe`_uc%}UJM>zR+7&?Lku|7l-49g@
z>Otf;tLWP7!JI{R11uWK5XbF;YGuxKHj03pyZb;tB>>zed%>(o4e}sOmrSWKCbsf=
z@I0UIiE8-neWahTV7fW+U#-NRmspa3XVoaI+leug+i+#1B1tW{DnwdIvE<Hlp+++e
zLYq17^|g~A2@a$2bLL|owHVa7_8KD|YvFB=o2a?53Bt94QD~F#**AAnZuZ9XLklrr
z>l%=Js~5Ho8;zxY3CNC`F#C{aki%zZm9HjITjvunNqi3Lg?!8@cnf~~{Tch~t+3jl
z5yEDF$J|pn!pO{UymZBi4cU7f^5)uO!^EdBlmD?9sm9WCGNAj(OBl+tII%wR7_h|)
zs>T~ay-GG#PrL}pCVBWejq?@HUWAcbwu9--2JGx9!QTT7Szh#8(3);dGRHjNVL;w_
z6XeL$PmwT?ckhSw(_{ISp=gwQoGyHsi(cxdag~M+>nyj!nwAc1QD_INsCXgnzAty#
zp5wcrLlD+$FIZSqg8wBM_6xa*g9G%KO8Ze<e$$wxU+o7HuV``i)Ma=r(*chL8M1XF
z6xru`ewNUW#mZ6Vso}I-+;LHpZCGzbCUCEJ%o-=y&fR#cH@AYt?@mbFVaBejY7#f|
zcnE4$V7YzOsJCo5y~NK7j=j=o;$AD(5pWdq{T{$$1@0D|?<1t-T66ZU1Cb>bOJ|2k
z@ifnij-SFgix+ya_JJqy&CSJlS>29IPV0um*>ddkgdT8i{sieoCn3wb3X?UCL37hC
zeAqM)hV(rGCT|MC^|>xfH#-S)oPR;*+IsxQ-5^u_v|;k}<9JojWJWD@v?Py+eLs{4
z->+J*Ytv1bjm}TdmovtaR(a+!%NTNw)j(BOUucw5U;_;LkiFlVp=v7cZHCtg(y_PT
z+v>fjZ<+~h*6%>}!x3Y$W5BDd77}XjW5To5bi%k6aIlgiKNgs?tG>H1u|k_oPtM~k
z8Am3st4JIrr|7QR+%>Os8CB<VCiVnVLEMvs<vlk+benVFY`3B0kG3?-Cr{`y@Z@Lg
zYH=swd6_<POnE+^BQ2`}|2~ztam`ZRA(Ze;dq2*-eT7kWk0I`!2rC0O3b8-W3ITd6
z@yBR8l8e2C(nj8)T~LYrKIxJ44QpWdc3a}%^qn^E8;#q84x;O*6}Y?6lr6DTX6efr
z&W!m8N$ZoL%XuG4hPl#T@6Cuz@1wXSsTgZIZ^LE13X}|5FU}1oRC};7)-1k?pE*ZT
zmOdK}uQ&vMa&E$ePe-6illKRvCPUa}d(2a5ftj2w??x-}gnSxSW^bYeOKpf=81Ec6
z_QNq!p0Qc{4QJ%-#e40#<iNkZ=%sNHT#YB9(>I=NZ&?AIy^c_Wk{s@H%fl7H`8Z&N
z8A<i2rOMm4V5z?$<d_umcW*onFWHSFLpU?Y>!I*6bSt*TD`S(>O>m=L(AX0Q0dLc!
zrm<S2PmMaW+TVdCn&ZV)Nq6wXgLLqmR4$CHcE<AQk6_@{=g{$yKhx^&xYuiZ!Sk^@
z!1NIRp8Lzi&Xp7lTrNRFbuvCpZinU{dtl+2R#>>M975_OFvR69B&6<!&anMdH`A0Y
zzKGB<m3wp|t;DLgEZE6gc0{&xJAJzHC!8Ll&B{8z(9g=MY{urlnEUk-wdyxWSoq@)
zI3|BZtNLFk4gUhCj+EiBaZ{il+LD0AuVT{i0lmM4;YjVtsL=TfFAOkZx;-yIs%M9q
zq2-YFM1iF)uNS;yQ)q69ALr|G56<TQ!0oOCCI`l%hvE;qUssd-=dD6o8^sv7|0?Rt
zvt;k1`5oTOlXi^ep0>sYp?mggRA2Z6Z|drj(*x2VE|uqHXU%|;$0g#t8Y$?m?*wJC
z8CG0MK?6B=T-n~2?33q?jIYY<e8fYH3T}i+x}3A-mqAx^-h3iwrG)T%__hzW%(46;
zJPK4{<E@UP;xi-SXdDQ!1+!pByAAO&<4)QmPsFKjlITnGp8{q$a3;cZq4RixFr!vP
z#^1b+RzkZpV@xf5!Cjm6x#u8w;weG8w*yQ=(s7eNcfEi3E*8zYPqh!sf_u>pq|rqM
zvh7uvM$u=Kh+et(GgT!|uJgIrMn`(~uncXQe9&`C4K+16jiQXl?%sh%sVX!;y)+X4
zytici`yEA{S`$)o^Mbf-o-&IqE)=x?jHUkf&2ap6Q<8K-4V`b?#n6EUO!)Z*t^ab*
z)s<?@cjRvHnm=%`!<KaO9KjnuWmw){ouprEM1!sV7;tblO&_5NlUx2kpq~LV>Ttt3
zPZwfzRujBzc}8WUYQ)`VdNJ(*oXcwOz+`{li5C^wv9i%O>Egei0a+~fQI*ottQs8h
z@g2;{;SOz$!-9vyG-0)89T+b%BW}l<X~1GDnmWUq`khRKyUW{PxsC$yIX4c<Ie%C-
zOhXu#8VwpR?8&92K49j!15y=o=!xZJcu(J$jT>zWBd)76|2J!(G&lrg`TF?5pcSWc
zCbODiGAa-45h_NRlV$~NSaw96P4277!ZOv^+v}F3A*>q7s@`Pg?mF0aN1Zq>d?aXT
z_8|c;qJ+-*Ep+iAg6laSJUXQUqgQ-|wS_8-7|IcoDd8yj)Kc`fT$yO{d}iMdW<+`J
zBxqjjfG5^pg+v~EoIT8rDP%_Dlq0wC*+U6-I3{7<*atA&MV&NUZiDj&KVY-z6wXAx
zO69*;5Ra)^sME;LRHbrs$p+rL9%|2o&w9jL`jS3fI|-e)#(?A24-|HJ!!BtN1aj|Q
ztcf~qte1c!wiF`=ccXGof}nYt^W>NsopmD#OMO1#`3G-c@BC$GMfwQ|XFt#qjRxWC
z*j!Ay;|o83UPr6*-D0EQOo;zogaNN_2$QB=LT%o22$SCA{*%M#?~nvtCZnM79`{?6
z?iA3(7>;c`ghs!nLaUNLBy8<N{}``A+nY*gWfUrSjbDbQ6Zx5WjflJ!sgO2l99Z1x
zM4KNspgHs$gbmT5sw$4G%f$j`&f#-XpIg-9sh{}TL=6_&rGNv={=wFMn(S`r3rH_X
zL9;+j(7LBaL=o=`PWH1PHX(i4rOvgOQLBcrn!LO4>>R{sny^Ht+c5Az6h;^x6twR0
zZ1bW&;u-b|1e(sFf}#vv+^4|MZbedIbQMEx^gyclZ}GC%_t9-8_ka=Jt=@TGcz){{
zI-N2ns>^OcE%$wBH&qMgTziw2QJ)dU^kuP))iizJ7`Ug?i<D)x(w8mug3otVNWBpu
zL`Uoe*^_r-7wz4`sQ=`Nj-nPR89xW|<F7ygcQk0G$H2_cmFW0yKlNy97kcxZ`QLq}
zBz1NXt?WERm#usYZH1h-rtc*Dx0pXmLlU4w_DcBI-G@vrxdf?;R|>wxm*}%DO(HpS
zO*(a^I}B|92;~#F`?>8IwV(g{|9LokvvCB@%>D~WqKUZSsuhDL*5EN|I6XR~0-Off
zk-_)v2|QR1D`fhtM)Mf#sC6JYlYXFDvJn~Zz=YUr;5~_Yo?F=*0J8HTbPL!r`w>z&
zIopKyOB+Ds;Od^a^a+Gt55{ZN*Kx!ZV`5mVK&JayGjnHkw&raPHr=}mw|sS2TFV=}
zbuk7{{C5F|*Z;tKC#{G_??Ync0~#zp9YK;Sq1T?RfJslKSR7)(CVa|)>%(-2p~hQG
zS{h5U9$B$0SrUqrR>1HKGvZiq89%P;P1a20&dIEYP;z1ujc^YJk2SNU4yTmJ6({bQ
z>uEsW(Ypl6Yai;fS&PMeHew$7VK}Y)0%p98#`Mkt96LG*cSs!Air^!d_M!k{BfEs$
zGc{;y6OOej2H?3reMnltT)aDEFGl`(0@8p3;OLP6A&=u>dFwl@e$U-q$1<Vckt4{8
zj7Y5ZMW{<Zh9T9bVbAe1SiANH*l%>>=eM<jLOAcf9o+{8<tITFm?*?`>offmeVPAa
zz{WEg>_HZyY^xoP*Rv#%p`4TL*9}9~7_(U+@9@%zVetH~4f(NHoz#C;hlQb8m^gbd
zCS9wdjfZpTg^7x+Wl<s2SiAwlWmfFz+GpVCX9gc<?!wNG4ML9VHgvwz0_s7-z{mS4
zj@$49Gj=}Y=aft6sGLfxXUSkI=eyaIro+YGX5{n6+bAsChue1>lTB*6WP{T!v_F}E
zutEVH?OxDdYj_^KR*o6DBDI-Vf|B{YQS<LUST?T?p1E_M`P^#p&aphB^Nlm0t<K<~
z%?2cQ{~Ma&XO4R~pJB6}11qUh5VD^>!$h8?Q1y8SY2RZ(`=301`>964m%c$;i@xwe
z?<?+-)WZ&?EBxzUz-iY_h~63AIsEt#Zq2f0Hl@xu!SX%W45rwf*8-j&zEKOEyU;rH
zF5NgS553*T3x};&f#Fu3_xj5-oo_F~#lW+eka`mxZ|<h9sk`vMl}hA{{tgty-6?9H
zsfcC&+~M^2H_$x937t*0<LoeN5@66lZQE0U$=j0Z0tv<%xIu=#GVEOb7Bd#9vq$Aq
z@I%W7%sG1*S|(OOs<d8uw9h+eFAfFE8@!v){zh2-!Gv7nGZQPGQ7O|tN1c}_vl$nj
z;&slcU0u2ZI^Uh56Cd;;-rEkqCC^YaQ&Ywjj=UG*ejDaSE0D^8uaGQOC0XsNM0q`*
zY2EH8wAZVnKRW<uFawTm{s!c<DoL6;12V09AZ_jhObrnUtrcShmy7@CZBJ9udF~<2
zb&iGr4Qm*maSY%1*`v!Opu!?k=6NDRNIj`6EZAa3UNAMbchpaCS;sRHKAYXWk44a9
zEh!klhElJGcfomvIg8NXow|V;)apVWZCw^BEE&b`D<T>H9aLn=yg!&~xk>ol#^0?c
z&cLIDDlmMhPPT@A#<uJ8@O~YCZeI98YhoUNB)dONm>MXINwmQeO{Gxp?9S(V-RN)g
z2G_5?f=34SVKnFnx;{-qPwfe4@M;ro5^>+jt6((uzlu#^H^94(9lIOb50z$i!QIkg
zi0Z!uL#~x$=j@AkeR3&A#~U!w>KL@zIa^qG>L?_)48fP9Y*}E122r5uEX_uhW#3g}
zW;HG_f4MbT*sBRDJI|u%sl<KT*_{Xn+Hj^x5vcIzV#nrcXyx}N%L%72M{*ASCMIL&
z??99^&5(w5%J6X6NT^M(0LjG(!l3O%xa56rcJz-f^LZiS`6w--e@G8%H>W}Jv|~_G
zHI3GMx&_l?-eB<2-H=*UPEU>3V>x%OL(j<X=x-eh{l96!)XO3`kfKXQSm>~5^>WDP
zSsI(mFCf3?F|<8@1xs{#Go@u8U{p|F(keHA*0%k^k|Dmru*gUl(yswbIcLEr#g65N
zsj#rJp&-mtCGk(rAc`izuox+d9E;ueb<30Zphh^n?=Y18z9UF}wn`TnsF820kD#P#
zJ6$PXg0At0(D~d{%ssXq%oXH_|IGt<;;9l_9X=Cpddibs?b#sdcZy#8r%j?1kK%+$
z{OgHbpz_C5*ffhX3#{bHsJcU#X`2Tw9-9R}&bv^a7$Y3HXiS>m3k=Rafr}>=LNq9`
zpb7$8PgUWh3-7VTXdSYxZTRNTNA!MefL3+8!8uh1nqTUmE+-Fn^*MvybN0iUeVj$}
zHxS-*D6lzB7r@T{Ej|k~VkKuU3eDYSI3~6XqR#}OI4uRfS#kzpmMTVl%LN^?beuR`
zm&{LMxWr0_RXL`^;3Z~+AHtcxz7FK^yxx~`TOt~GSP*$Gk{<oSv+Cm_rSDFtv1SZ`
z_}M3L!QM`kI29E&?UiG*A8U}@r4ho&otZq7xsw*|QDUy#B@z&(hz7A!v2UXV6GiQF
zf1jdA9yu0+>y~KTr8yBxj${dqm2$kZFqmfa^n;S;A<|y5U+{d2A?NqH!U?-{lynTE
z_W9r7ALqwMuxr@%t{L)H8L;-qp%4~WP1iJNlig1FoOK;U-yBwACGT%bKX=#@E9n?2
z3YzHV8od=Ynsu1by&p8*sRKeo4Vl$|RN7X?Kw;cG)SAK1pcOW3w5y05;#{FCzjer_
zOPrJ8GE?Yw_y{Ho{$kXiCs4~>0VP?9QjbL*!eQg}aDTB9DOhI0HhMjQ&l)8d*QJ8<
zo^Y1xr&(f|j|c~mIDG1T2|X9wqsq<&v@lB-bEAl0`?>~usl9{zv2}b#bPeQO^+>V4
zCR^XYU1Vzlgrc3?4`Pu+cctBeTgP`|MTioy_*)LGhlUE5x<BIKO?J%n$zO2nj)$bd
zJ%Xnbqun9{_TQ8%2*iqITtAL3&UbNZs5<OEBS(%~RATt6Q;_{xiwtN{AZt1r@q=j#
zjM-cV`?Ji+pYTk0UU-)8hE5AFy{qW@#t0Avo);HKF({d(A{^Vy=dXX<ApN)xCSBdd
zJ(%8{jr)%pMZTs#ABl*I__0v%{uFX}HY~}$g$e64grEYRX?&b47IiHYD#I-VS!6I3
zDHarYq>98|M}}bfDHUdx_=w{-eBsmBGZ?x46?p77l6pHQ(rZ2LIE<f9WefAguKOBc
zXR#4!*}os!8hRjHV-FM`D~6Z8$EmmIAoXt<jZ4~9$Q~=+Pir0lM*7{fd&nLLUU3OT
zbC-i-_Fdtp^9|fF+L%n8ZH{3-4uQXII6loUgVtV;XlM5=^oX{kTD&VJxpJ~7B4Q`>
z<)5jl+cuo>_XnL%lt4-Dcj@HvN(BG?sNG^p2ky86+6I0gcRvqv^R5b$f0cq&%Ti(E
zI|X)6>OdB)v?lrK4<YyURv_}4WX#lO{PU~M=bM+LnIF19^8K<@{P8`6b&e9;k{$^%
zi$~(hdS~bu-vvr*XW=f{IgGX$g5}e^u>Y1xc&1B=A1*gyLdg!n`Na`TxFx~&pEx7j
zHeM|It0w&E_y?KWn&DmV-YlVy4qZ`T!NOJq!ou(t@M0H1`_5(R@%*xMkm&`Kg<7I_
z`x*GL@d8G1cWLjra>VcBZZvvZDFmmCz<?1I;^dnDP?WV9V!ub>$r~o5WwR9va#ds{
zJ>F9L4ZERVFDtTv-+3$dS%N0ngeNW>fe3vs-nVU~rC+|_reu3kIVKe!mPg>9z*AsD
z3h*<il2y;%p}vz1lyokX`d)v52OE@0Wmy(Y)k_A^1$q3QtV*8o=isA<dC=vk1XA5P
zNUA+a{k|4Kz{UU}Z&Nu$^bN$j4pqEXEJjoNL#UlsA=rN1jRC*&q>>9)sLVqmG>xmm
zYtDMi+4(DU@?6@8znbK9trO;le?{G|?{Vg+C$J}3n;3>#5Zh)$Cel0zwv|JnI8TWg
zKfZ^}*OZuDvLXrCRwS0mrHS2`j4E_a0<F*+XmiV&DHH~v_qtcqcZ@yXIb8(=?ufW>
zjEX{43rCJGf$s66u<rsB;`C3Qn0ZYE$7h+m<Cp@kUS;4d(?i@n_5g#|+@R-Q`~>Am
zDKu<Lh~Qb*Aw=D-fWU1Q#Aw<l+HsuoST`RAqqJ$LxZZ{ot~evas$~ndpEA*<V;ikZ
zpClw5yeoKQ43surEubm<`RCmpM#DC7=KT0MIG>+QLgRAabk;MNRBBD)9MvG+0x|RJ
zK$NYllY0J(L-m7w*r%29EX<-t*ua@?9$ok8?@`?EF*j7|+y0U+nPbON7ao?@e=Nf>
zZATz>^h+A@x&u0LYOrd!CYq=`fy(&vg6~ZauzA-6Y3rWDuXC!zanvzT-)0Ki`Mqi|
zpEK)PpT(WH8^f3J{b}GlxXJUx5^X13Int0s32!h~s1RIw@1nzp_aZ;<tB~Z#ji{uC
z(4M7$$`!kXROfj3DYhq~jC4p2%E2>F_<QGmi%_&t#BxUEg3DVSob_9ew0`k|u$-@S
zbw>k8(i-TxvxV3zmG5&swZP*{47TqX2`+F5bLaIFEZ_!|kx-cYv<6BB)k??iuqPY+
zFGF{WfD5%Az<i!RQI0=Hqif%SN9Yc*$-3K+a41(ut?kVjB&BqmivVt0?U;|^e=zgE
zLQsVcNSakm&kwCdlsD$Rfg!HT@{Z$6`y!f|H4L=}JQo`4GN9;{DGBLq%p6M+!1&t_
zEIq)xGGz*^;GaI}<Q>jW3(T0y*G+J2C+|IM*)LY|UVxRcYlN_WqhWRHO7#3~igAUD
z;h!R6SS{!AD)toydVR#q1NZTv@(d6S>=w6UGMYUuLf^3m1=+0(Y19kARL?Ye91f$X
zX|VgwE^F4J?}ahT`-4m`OWc3vTBz)(pm85e*vR%~SYaR{g?$3ScZe29E_4(rrkRl8
z+-ckAvK8rXGJ(I7_-xyKD~1*rV@qBh44taa{KMA3rD@)<ZO$GHGIj;chgTtF&MeGN
zWRUhhj?ToN%I@pK<|*?$&r_2$iF2<lO_C-}QYk8xM(SxEPePJ}Bt%I_qEs@Rdu>UQ
zkR(Y+Dv452lvH~6@BIg!=R?lfd$0BVUY9H5I9}QghhMis;cH1)*OHEtD(+y1=`S1|
z#aJ4N0q|lT<9S^O1NZHfaA;oycGNHr&4~4!-1AIsz4LSC<PG5Eo=f7u_GWIzA}Ly;
zv_$Z5%6&Ei-i`7@*{Esr3)8%n=$wTI!To411l@WA1KEtP>2nqJ4;NtX3vJ@u#h8m9
zve;*22!riy;D$(v#GK4Q;fVvBVAEd+atwxsy+M#Xt^xyx889DV3YOTJ2}T;|QHdkp
zF`UN1gdOW4&h{6KExL~rk4ckzDq2Kyq#QM^mgQU@{DK(9IW3;`2GGzJ4#qp;Zf9}2
z>p1gZ$|qpSiEZ2@K@w){4@8wt5pMn%!gx;N)Nj;U+<)i^B*&`K#I`<IbFm)wNb3^c
zq$p9#ofL=)8_kcMt3h(TqBuECV;X$u5CpJ$>QD~r5B<mX-Ew_!jJby74?V)OkLob%
zToFvzcnKfme?^%a+SJk{0ZOKu3RK^m<}~|FP%O8eKfLB1)D<(2_A4#0gX@rKCQSqK
z;;?6M6P(vKCRzao<VcbtQT%lpzBvAdvYqB|P+phiv70!T%5|chU<F&9^+{mZSH4F{
zo~C4&^8PWhSbNV8#U6YT>{L38?u-9Ip;r(jb;Uv6m-ArhXG}GZUWcKyPpG^pAH`BH
zIBBZBz`f&6gPZ|#q1{YCi`i%KN2~#j&p!(G%4_&csRHi$Ugp^y{S(~QsY32KX)gPe
z5nY`vL`QeVI`L1z^#`JHrH=`59PxnDjdq177{+}wry%1S2k{zJ{P(ZS6Mj;jzc^W*
z+`Yi^{7XZ5?xr+}ST+(Cjng4yi8yh*vmb_MUBHhyd(h|FZG5ZI1SvVC+;}<W&?_3?
zlpjdY+@>rpbM09u%wl;fu_(THSShw0_v2F9t1;576luIUjX!*ekC)HD8QEfF>{2-r
z>Qaw~x~-}ArCH#Tm>_6se9n5=k=%*`iqQv535FTLhBZxC>z%_LAE89vZoCbtx&!c~
z;01orF(VJ8RjAXL8k|>f1m~7W5#O3<KJY~nulmjvjz5qj?qB}F`mXa3@wEew)tQo%
z!51Qd&s+4J^<I>DNRj%trSeHlA24mKA!uzkq@ygFad*aP^iN>@nMpDNuTV$4Vx&o?
zc15D{AAK7ApEBubHARET%waUOgO?kH+(~Bz5;fNuo*uBKBN=lvNs{$<w!BA2_TOG5
zD@L`4XXBR2OZY0%f{a@<3MY?IrdLsg$d??&9Xs9duZ$H9+x3PwZR*C1U#V<n)gby4
zy9X`Rd|}?0by(sr%k6m41$Fw?bo_r|C>rI473oVL=xr}^$Q8kc1{s<u+sn0IH)c#a
zX&6=AkE1I8;!x2=7~U^MqS^_UHd}*=s=q>f&O5GMFCVu~F((dRK0x%SW2o^~lg>$E
z-H69i(Ce=ymZ&`ywZ){1x|kOzci}>4W4)&nBh6{KauCF7SfIaF9A5{$uwuRhaX9b`
zrsv7i#!PEWS>(ofv}VJ$Dar6MfbBs2>oMp^Cwybw5LIbAPUdq1xJ=r?xtNXMBLYmJ
z#9T)t)_#OLIO`~^94XDRbBx2^+5`KftcgdS3>q9!Bfh0s{HUc9aah@LR6cwQey^4z
zF3<09Q?(z%j))8x+one(?-&v(wFPkXv@{(^f5UGpnaJ|$In1Zo!7B&cfNR@%Tqb2h
z9XyZ2ISVb48>PXuoN#4M`qQGcIA`!@{fn@pzj@gXeLD5pWte_oI=Z|cCtABziRw8p
z9(R8L-*L7Ojpip~?k)>n?10cIf9ZSZew_i0p`o04QU%oR=;X6*y@U0lZ>Z#&h}F_+
z=s7G3Y)@<g`{FU!RPY)c#F+2ustDxv*}-@X3p(Naa4>oQ5yxr1fu;YF@U(LjsxC31
zJz1;y+R-w29c8I-)O@b^{(11S=;ZJ1Hl!gu^9t{j;7t#2<vg;N!Om63p>^t2FfDm1
zs$D4L2Clk;V(AiSRP#es`)Qoa*#c<4ElJPz7!dcwUVO0Kn&fp2hn~^vA=@w#QXAvo
z)0d<8sO&TD36`VtN^ZlNnQd^WS(m!D$6;jH7udF@5Iu~|AoWZutYW#BHP_OZvw1%M
zuss;}n_AIjjS8enQId@CH>B@vHR;F5R+y)qiz}@xi0yM-tQVgKnVXhiU-~aRGn_dE
zd)fEoxk@hjtQw7pYK4R_#sV;JbNV>v6{Lz^2Gc2P_@F2?I^OXF{<IoKHibmtLh&AG
z{J9Cj`)a^q6r0Z+YXO%vL;SF6BjU107Pm|=MP-(y%2hvuePgv~R$n!GFLj62_VOe&
zu?o^=E`+Gk6s}NsoO85h^VX+UY#zdM-&%7)bqwo#eh*@~w)xO~d?tjwIEpD>*_p4X
z#0xhrfs?lRxNN2*xinmyG&MOfMwbV#e%^?-rX`@x9~+WdSp(j;&G0Vs>^THZz>4O#
zypLQiW-v#Lca$C-^ZXM!&Zy=cZyD3DhQEv>tBpf13(<AoduVs~f##QcvFFGcuFkI+
zww^R27lLikZO1)Mkgr6)dz#SjIXA&QtdFtG@*GvJSTZ++9Idmv1ySnJoaQCw@=_n<
zS-PDpU|t}xYYl=3*>lhn!p^JUD(pDXjOD`~VMBTyQcqb@^`8;_CbI-bOg@R3MHgV+
zu?nz16v&S<5aNiqw_ulRjWrW3XqzZnpr5Zs7hWxfz_kM2;e9F&J=3I%A~mRIf(g}r
zphs1k+px_okC$rB#L@rBkoVUNsOcC*#_F=*$66SZf`hrZLQ|Xgnz(Z%y2kvk0vop9
z9K<cFk3hSd32nT|`~|Oy@R7Y4`5$w{)I1En<TGabfg;|z&YG6&e8$JL#6jeaTxjWc
zLSMD@kjpr-PkbV=WP*vH{-Xj;`Oa9;@)aPb-^A;WzXCGl2@rL43B;|q4p;gj@fFL^
zejG}Nfz9G*&gSDDuVOgUZWDS&--Zly?L?Q|kGQ7(-Kf`WO!_W7Mcu(~d?VkDBu9cY
z4P$+jk=L*j#sFTK2ai{(kTuh`VxPbYloh)m<M<75e$@pvvYND3w21ebrHqGv9>zJJ
z<e)A)6H646L{r#yP3Ct$jGNR6-FMxgVafnZ$vKbhD-@wN;R3fQu^j?${ICE0)X~-T
zD;!*~9=O|zbl(Df;^=;zvv9Gb`|c}}#y10;zQk3GffTr!WJPmWw&+$*42Cln$%txs
zdb;lzej0NW<7O1YeSJm7iyIIr)-$JY&jLO_V-4=rTZ2zNv8=SXG3iO$&vkwn!cvDW
z&|4XewnsU%jhzTylQkeex)u7eZr~HPAL%f;h>Lw5<Axv!GNi^>$iG&=H?_NvwY8gZ
zCX`6r3-+1O@fN5K&*4HWm5J;zNoqIj0FG74z&7RCT*~$5oLYxC-5WO=PD-nh%n${7
zbRFv`Xw2bga~?Kr>VfiBF}m)fF;Qc#aH&n7uyL3!7WY1d&Z0OzWqzHgc>D)&epw6$
zuCjkG`%}E{#y&xg=m(k>c=C<+y79VO5p=CRip3h|L1niUY0^B*?!5V68=wnvbJjub
z)e!6%mCbL=lq22!KG^<E28-D!Chf2l^om}h!ov43=A$0<+O7$XW{>#5o%2Cktq`yO
z=AmoBebn`y1N9n{aJh~aRbsn_;8iShR-`2`O*Y~GrhLcZ+pN1e%nDPdyg--PyF^p}
zBtu!NHjVmg$!Er0g!r$6d{IL#im6yQMOY}&;suRxaIrFGCrp7U=2=*J{1Kdd@)<3&
zTwq7sJLax#h1(-|^qtr#(BAe7Gy8SWSNy){^<X_le7%jz&G%r`OjR5pp}g4nUM|z}
zBe$-`h^)M7MR!ixfnx?0Xk~E$i2X?7_J3l`4%X2%o1;Y*wzA!wBnLgSbZ}tJTDZF5
z9L9fK3bt;~_zbxgP#<B3aif_cDdi%TIeK7^#YFDLA#KKJH~{s>r-O&A8CSWr2qN$Z
zW~l5y#~1gwO_g6DX4hv}=r#&l?(BpU@tWk+dB$?u*DCUmDB^PqG-2_&JT?y&Lhkcu
z&TTsf8#ijv(&f8Rp|y&!|HEK^({FfxhUE&<jqyjw4Nwo+f^GrV_?JtWA0d4`X0BrW
z8~yuuP)q{yEqcIlOb}#shJoMSROqp=qSJlnfa%P~BGV75e2!iv+V_~#lGypY(3*GB
zIVnX77u@2`9abk6!prEIS`J(u%Z~@&gvhKq9J}9|wk=l2j)+PO_*Q`_I<=ybss7xK
z%J<;$WHUE<r8%{)V!L>q4cL7AH_lC}#UqZoM7TLs;8$}U+^n2nrIip5?hb|wpC(Xf
zR;LLqy2SL*c~RvAZR+X59BpNj1^T6yw8O%Jnm*B_!h^#Fo>NXj?cHbGx|3mek#*m^
zHvQmCH)wEst1fcVhDub9vx7-q5~RdOn>X2*#dhsW;VtV+!X^p&(KZM6Ub@cf{=3J;
zwCqL?RTDlz)rQOrzK(y-sgXyG;-q{-DBg1FLfhI;{NDy8GS4#x^V$8k&Eq#8wz`%#
z?#qLkgHh;kVgX*h9)YHVmi(xG37mW@1UtUfqoW{~`!Og<D@}UAUivIVeWq*?$-xRY
zDQc|1vNIJcu?C(((D~(9+x7{!2Yf=!8|o;?492KtmIXhw9RgXNGoR08d;0b44G_dn
z@Aig4V>f)G{TP3~I*#7!*TUED%$1uy2RaQu@k>OSWb`#nYIcxiQ=Kh|;cj^xV);8$
zmn`6(T*2+`iZt_J7QgQNO>{Ytg?DSDh)XkY5r5Pn&~qGUE3;hXPfKdd*l<xShoZn-
zpqk6}Gp{P)doJDN64h^kT<kk;{4EpW(yuIdxR|l(#FwJiCkIeiTL>=pi}^naru6h5
zL)!gZiT;%~rRlDUr1V!fs9I63#73L5yFCx51*nl@{ia0UG7Ej)>k;8ccaU>j3l%p$
z@eYg!xWc&rPa3F@;^bCrd$f{Un94l4XI^si826~;;&xG^SwDAwojehK7xB<`7N$=5
zi9!tJ9azWSp`Q8RepKP^!(EuI6ah7A`535c&U)jfWaYDH?D04X*|$`nr<r*Xb*-rG
zqio(~)C?|c;vRG<)r7E46?(8!7k8{pg@IxHxK}a=x=mi8=~_cxSMD<NpSbdTz9B7n
z>L&7<836k~eucI%rQA}+^*zOQt|gH-1oMiRFXF~O7#TE-MD>m2b7Pls-E*X{S6`lP
zwLOJ4+U!k07C?39UyRI%0Kx2U;Q22V?$5Fy4t6hK^q$AK<t@uLV>~}RSDDmJhz2oJ
zMaSh6*c;=VGqi0y4i1brQPS=xkhavO_UjkI@S!fOEHt6Y`XwkR?gq!bf3Phtn`I(b
zprAGs_J(Z5_IsLmsglhZls~{>J4>o-`4T;^dSH7_4*LJS2)4JULDgZF9XQ$zrfW`c
z@pJn5wk_Jcx8_#(ETKz|*_aSH=>&fL?Hic6)&olNW^<~u-}6C(58-#IE(x8U0AYU3
zy!<1^$~eD6uy?BoUgfPxqx3n5^m`4N*{7j8?=1@F^cQ*eyMu69qG+7MOBkB?3ql%L
zCh+k@h-y-%D!*h%WzPqYT%<=bt6rhA?I0XmXh3!{#?Xk%doV@voFG-{F-{%R2?K?y
zG~t36DgSmKR5=CsHm3)?9CJ{4LMK}8HKkiwh9cK=8)O73(8gi6aEiGWQD^<AlAUFu
zj0?3e_%sr>Tx1y+bA4)W?#6lv_5Ay3DQM)v_&B39`Nr|(+-SIs$($v57hpxZqf+6~
zhBQ1l{3;BXwQ*0ED3gWEvo37SbF#md#0~w|08{UaKw^>=c8vaw-5>O*2J7JZ-O!*V
zjQ4fRI0LvOZ6c-`Ul>wtMO}xQkv8XJUgBc-xbG9&RWcr^?~20ANsqX?%YPu)do8$_
zwPD4f`EcsDG5Pp43;w*ahQKF1xcU*xQ9OIVerJVzL-z`t^S2F41L81m%K`}dat)HQ
z<?xmu22Jn07396ELzhb&7b8&u{;Om73tRwxv-84WCrP@h)DHuPhrt`kNc6h&gxmkT
z86vEuDflu5<g7c;IqWx|9WoYt9nSD#o+1&}&4on$7EC*^2LD^HNHVW?@>=E}uwrTf
zFYT{Ps?#~hJu(Y~=TC6WmrThyC`8rc`%!n_c8DK+l{c>Wi76xZ;LI1MY>)Pn+s|f1
zoeyLof64>+HsL-5t&^n<tVa@cWgQl_%j5YtS+ZQ^1*96+L19D&FXp^Q(6%pAVBb*5
zsj>W*OMI4KRLvZCtG*foL*me*4sh_)Pv{M>A;yRHqgItAX&)a7nl}Q#%Zcr_T9vS?
zU6s7(VVSGOCeB!?85?D~`I3!QyhGt*;9K>m^~1NYDAb6qnPN^cdl<e*(<5kajH|Ss
zL7T?{FwIhhl+ZYVOZ-UA@%<ler(!(l9O=dRRnPHkGjq0EYLWQ~+QdbA1{b;ID{O51
ziaz`0$=YNKn)2%u=bf+_e;Y`%4CZZKJ5_@IIc5kG^p1kJa4AgqwH0zLpK*WlWNBNE
zA{XAv&X0I0Jk2`)UW=7!><wd>lCMUhiq*l_=epp3{QA<b_n{!(fOO8C2O}7LugZt@
zLjT1&sjxHC<EA^f{B7czOdeoK^G2?FxB=Zd!+`WW55)Vc*?i!f3ZLVrL#AI*gEpNw
zP`+OR&LI|5JC=3rI%Lt|nlycM{xajFl=E{<EQn5m76AoK8k=u~!G*IhqjUgGH&+R)
zcqO8}H37q0dhqZ(Rhlv_oh!C)$Kcrpbj$B^kQrzN_S1p;JJXu^f^u<7`8-JJd52;*
zH3iLc7>9Sg9IZ%t#i_o_=LZ7&xhZ+-#E3b6=GsdV7jJoi&((S?!PTOSS0eB;O@Z-;
zS;k}0NPO{In+TqMWgZd6YaXeAmlY&P-Q+HiU=FudFZPzGpNV(y1{#aMhkc)oN$re2
zuB7S*pCS@N??f;3h}MLGE9JcQ_d3Qm-;Z{!L6DhTfI(A=!CAi>1)mJ))Zza?ZJ!)D
zvCWcpUoe5elveDV*UqPGKMa<0(_lqQA@-@Q!k*wAoUL;ps+T+fLC{0!`6|r`&GvB3
znR?_1^JxexMq*E{HXmp&&2Jf@0I8x1H0(2?J}ft}Wd!T*7V5J1-v!aw1Wn>GcL{fB
zdIIkM^cQ}<ip4a2Iq?0|hQp%npd-oPx;92(ut=GTon6Iu1ebywW0?lKOQD`XhSXkc
z#76DYVDjHTOii<<7Q5xBZ(fGLL)6Y~ohwPkJ#B+zA1ksjrh;)_wYl=ui}23abo8rW
z+}{6|@w<-blM$nrqV3L6c=<d#+vaQ2x1B8ix1*kSA2|r`8hG~mJcwg2S&=Y<aA@yj
z-J*B${F!VWn!9%oXRY)Z0>wA8{LfX#CJ7(b4IJR_8+T*)H!Uhl)u`A6BavNL3e3>4
zq=3(GbH!C$;cr7Ec3uRbLJlV}Z2(HHxr<DFHu3wr9;2Rt7-Kk0baFXAAC@LpU?Nn&
zVU;}GG^!Hf<u`$Lyb#^2>bbbMCQMo$3%53W2UY)BaBQgsQP!V?*N8Elb;E|dy|oTK
zV)}8@hcED*x!7dgR7v-VUQ9}spu$8fs@(hmb_+j){?BMod2U5b7D$njz$ow?8^EW$
zzRhPUYoh&};hbg29b8j47Tc!92zH4WW4WQ156n@)xMkz8B>Ep7yjp|Cx4%Kj(I<ko
zbuT!h7wQD;Smu9pAviNm|G1qt^uTrrI_J#@NSitfhPK4P-fA^W-p%;CIj{K0(@&xD
zuoiU){|)~wP$QeAHA!822jd8jg7~|OAtmxQpa10;_+HjR;U_my+t-`C!E{CPHra~G
z#+cJl6CdLG%k$7>c(urMTQ`4?xu>s=mn8<V^5pTx*LX}yg*s12fwXf*$nR$?gjIiG
z<tB6HUga>z_bHs3QGxAmonWl(ZM?_2EmC5(G}E=7^JA=<A2WY4zgZCOKBZ4J)Apn5
z&k*!@)WWxs@pypsnoPp2N&ZpBoEvxw>q8}}@b4>;oJtdOJzNHv?m28`*ul+SsYNS(
zD^fQrKWJS!9w)QT_@PP*#^D_0)HZiJ_w&6DNxaYjS)-oe9C=Ap-EPlET)d2Z`fR%v
zqd*cZ{-Uo|hamJ7%gWmP!?p;<hZLC73gc#e(5D2&q|fm2pPymvgmrwFp*4kdVi2cO
z51Y?OlYZu$NNxN9zGtKOl!9G+OLRO;$X|$I+gSI-c`=G@kb!7fDN@~f5$F7%sK`F=
zS`sJFuyz@W%dnqKs}{^$Oi0+FM84-k4(~cM2brZ9P9@6G>8v-U!|r#c?2eE!{j<n5
z{4JDhive=05z|&MKI@+^;2Y$`kH0KO;&n%Z`P)7C-+o;>IPnZ<OBX`V^2=PMcQQ6_
zktOeM8It1Vp?J93fHv}4)I?E>Bo1SHCAS4|=w>J8*eKD)<@LO8l?@+R*ahAxb6_u2
z^Y#M^c`3sa@JKuhP0LpEk1y!ZU93wg*YuEg%&X_-Bq>u%n>mm(S(_dc$kKy*M&Kki
z`#LGbypvMX;b*ofO<W;K|L56A$kn9Lz}U8;VRYxB0K5<?N5>SKl2yj+Z`t`FRFBqx
z{R8(w%*l$Yk6sHQjNv=+%w5z@R;RvJrTn1qDEcm9yPIfn8f*~?QyD86zjBxoUnlS#
z`CQZzDTygTme_Xgxj-T+5@v1Er*dNN_(iOvP-vk^|GYa7y=>mn<~xNGE+`dvsxq#D
zp)PH!d&PUL)T5%BQ|O>_97<GbMEMaawCso#J>jHIqb5z~yz<}k>z#Q#_QQrmO4wsX
zB|*pUr#RJ13Gj^Zt5jn;xXw~9_*TO1L>6`YbUT3DlQsO6u1_eo(o1CG*oA_>?Kt&r
zA4Vu1hLXE=yw?*oYR!707h1dEKn&ybb%yej@7`nm&{w!UzX$E_Jmyr3zH#+pA?(g}
zkDnwZNyLu$I~o2bON%<ph{?$(m}ag_<Noc%w7q(eG))F0Mc<j5BMQy82|+<tg-)IO
z4NSGS;hKp3=&K>b1(PVo_}O4><X<j3<{6guB*K=8bCCR8jzl`u!_fPFJl?KE+^@xh
z%hX8dbdiS#W`;zpOvCB0L;=*SV_A*k>!9679~aW^VEdmAI4`ciprx<j$(3}>b)Ck!
zeewq4Y)D@3Hlbhdh>?S8r@-<3G0yCUBKhXH1WSI*=f>uz;=A=HP|R+QVDFSm;Q6u+
z?H08%MmY$C8yrQ&Csv~<EFB}vSHje{<FKKe@tN&ufxl-uU;OGWh~28<bqD_NzRv8s
z)a?mhvO1K%I-Ie-j<$nKPQFOO>?ic?xPmV4f(2=_<mmb}mEg(d>5sakY1{S5oESbY
zD*0zGTAvt%krUrSvF=qU?5g5zZBBu(@+{9=vJ8t;2UM!1L&0hdqNH{TE5y?vF}(rm
zB%i<+#UQx<=Q4A><-pM;Y6L2ef{TtgyC-VX(2q$FALEF(UCMCDb17nWSC0z)&Ip>w
zIb3pIlD0kb#OWW^a9#NYG|b-z=2Va7LjhyQ{t^gh*}&e%z4&3c0a0!Kg|RK+7`wz8
z<SH*Q25l*hz&6H=Y~qK)LXkW(qQb(1EE5)j2kUyVNwo!w|K5RD3-zeoo?_U&oQE9?
zo<d(V;QB>RLGiaPbdq=c$|ZI9LZC#NjPApMwYM>{CI|lpYm$z-UI=_;$IYA{gt@Y>
zv2KAmRpyf+*E*1O%cPlOy&2rc)q-EY3bh|$#FdqffW(%OsG8{p*;EZXdTzjTYguwk
zMu(X0ap83|%}K*QFDyQ3MlEOPqwfh>&gDIG<cI@QPn-$6!`{R2k;hQ|;6ZFld4{Ti
zXF)jDguYZw#;DDQp*!meD7yN9U86A_p%V+jb4Q&*&d3w5*^6O!89Q%|x1e$gg}985
zXf#}wl)QQ>*ttC(71n(Oq1hHLO?)zQmZpkSq!h`8zqt_Dw1=}yT7hZG{xG9Vi@1)U
z5O``nmw7<~mb9zV+Nf-t;r|k|lv|+0aR^m6oaAYMItknN3Kv~teZ{i9jC;j=1-G>6
zII~7{-hCTy+enfx<BkHJItrt8jfnQEYmo9{DK<Bpz^S+IK%8bWI!8W(#-nwZc|ZZ4
zK4v|zop<2u-e9zERp)fW`uN%vqw(I^YSdIkG?+h(RR1hTe<vdt<+TrlYAZxCX5G*p
z--yN!s~A&(bqTzM;HV`<h5m~Lg)#vccrAfzf9{A{v8FV6LKJF=TeBmg8{}Lba^nq*
z>5To)&`rLOKQL%X>dStk#0V+6(_tE@E<X-YIw_pgPfxJ)afGQg*)XxrnkcY7dQqx8
zY4#8!J6D*4Tw)|IVb%)0Atu!JWGDz<lolalAW9uS0&?PC`P!v__}TIbB>95~{cc);
z$E$DL9$tscF4Llx(#PS;lha_&dBW7otmAq+6WWFraupe`dG9^5AUB+K-m_eBgN7;@
zDmAC$8AnQei4_KVc|zygc#Qeb3nfGM1?s`EQ1ZBl4@_Uh*G<}wQjN)YIwA!zOd5rE
zt@&og{D?~N;9E0#!89pJAX^|#%ddQcgHG~vIsYCTJED1IT~E|Dz6%4!Wn9mjPJUtS
z8uZ>OgtUx#&^9Z9Q}qjCu4xreJ=nmPXwCv%nJv)Q;R9d#7&COu1yRt#RA~E_hn59n
zz;0$dL>BZjhP4&!+^z)ko~FVDbj6S3PvhW<dtkmzijMnx4H}KV;%eqET@|wsOD|eb
zsd!0RT2=ubfdO1>-)JacjJW~tk>C*4!5pTq_%;VBh;LtuiO!W^SeFED|01}v2bn88
zUI@+al!;4jHka~g1Lr)Z7=4^2h;7#a{K&KZ=w~_9eJ4$qZ@&QVv}D;FZ=NV_=rh!4
zm%)!TO?s(Em)MVqgz1Yvpi9aw&UV89cPi*9wir$aPY|auIfdvnMwU=HeVSUD52yUi
z$fIe7B-vevr}y1NO}z*V^m~RgFDa9X%OzY}&T}r8sPX<*YoQZf;+Az1bkmtid?Lxb
zcDE#G^~o9FuRMh2)9o0eUxQxy!uEpZSKwiX9%*S0LZ#i3q;TwezEWug&U`OU8Z>@^
z%?D)?U93mSR^@=73d^T!XwlC5Q5fH@g4;Q98Yc0J6Fxr8TfLJc4kO!P&Gih_YO*0q
zcDAvO<5SKkPmvr~(W6c``%$^z3sf49z(J?q5crt+z*0V-!3dU{O^AnA?5wwK)kHC&
zL80yTblziv8vS%hmG*66XZ^=Cu5SU$o(=BDQwODqv#t$wHcf!f|FlV{LqD`<Bw*m;
zDinOIgH?lhxaNI7X!VC<e0V0FX?%v4*sk-|vu9BDRED0JqEB{BW8WLGnfQwBViJuT
zphR*k*!f)mm4n}LYAfUaPSD~nJSu_6b$=oFG4nJ&(V>UF{=%!4teYv)qsGfm0S2nj
zr!6XE@v`42HuDY_|1XqNP1okUq%C2~zevETO4R?4JQSw$T;51a2;4RuiZ`9d5|Jx!
zzdw>MabG2n-Kt9C67xXJYM7IPsRCW!b`mhmist&y<QxX;z|y1uwLMwyf4wycn)eg(
z7O#g?(?uv^yo2^QBWPT4gTFq#0Y>cn3{9D50gPFtAvh1dJSYU)VXE|*K$(o+u0~aL
ze7O1S=Z*|V{Bg4auKZVs0q@LNR;-YVu+ITqwezStMwWLeDL|h!YK%4d7fVB$!P83w
z3pJTTfFFXO(|u4Dast{^3Pt+Sk0D^KGC6y|kPhy8iA!$=VU$%czh1Hy_20F#uCyWP
zT%<}<U%Y`e<GZ10+8{U_t%h%gy%@CM1AZuzCc{0XnE|d6V#lllv4nZ>%A*`h2EzEA
z31_i%Up=_&3F6e39D@oiSxEUDgK5{M!mgW)t91Acg1P}6{MHAz$_ha?LY!=G)+E2<
z6-emyr{MWri3YLvXNnQy$X@w?zM6GBqcFgHKO4rHjAV0xRMEyQ%)`t$vGbQHkq?tD
zV9DDy(W<w#n0oyKhD<+$33Xz`n$2RnJ0Bw6SETLCLnAh6G`nvKnY;ZL_RL<uxjc%2
zzdKlVspl#0H_wFDZ7YM}!<mC~mj=J^W;!+;xDPj74JnslMYgZW!k3F=h;4u>)`)(?
zs{Y?_@Ubq97|Sxz)f>>lL!T^~@({;6UBeyr=b`7C1l3pSz%dUh(CtYyD2~$v;kAaM
zhvmv-Z$Ubw8254OANGS4yT^!0x;aG^#iFW1246pA74q3{QE}`tI5|O!jGOirU&UEb
zubN7J!21ts>byj;xs5olPJ%8xRf7wv#Hi^@U#?Al3}oEy1iRlN+_pIvjEhw0Ji~Ty
z{}T;Ae!hiUucWB2$1&D9`R?TSQVhZ#-{HjKRk?|?w8(b0BTx24)VZxdT9<9d7Wd<z
zs$sz82EF_LeAqhuyLkJg2FV(@%Qyn1IH(hiO$&x_qVzrH>viP6h~0-+nGT!)JJF__
z<$L3FNaOJgSbky#&aoZOe10-?{m4tG8*vIB4i@8L^)75N+KfJ4`qX#daj@=uj_cP)
z!ickx&=gPsB_aDoA+1_uOZaiNk9)>VLNlVf^Cs%VvVOeXe7@!80$BK`7F{gTMUGvU
zvDDL+PTW0=jC^T8V_G7>sUjFh$lX9ieQ6lsSPwf|2B7nm2WEw<QlZ5(-gj7(C~6AN
zW_#+SWX)R9ke35~9jixl-{0msdyaC=<whiFf)xZ-i8xEmN#O9i1>4?ef&Qs;;6Iza
z<;6|$ynHJLo2~)h#7|s{hBTy1nh9Uq>(R9Qt{_EgFIUdENfmd~xKY{h;Ks7*4%`2t
zR;C#Jq4EzdviD)%r4bNe91FP?Y~ORDl=t18z&-F%Amtww=?!^t;(I-V?<_dMoIk90
zLI<GM=L-L8mNNOqnC`WU9bm{a5oEp>LFs|pSlC_8#lK-^Htfdk>w0L~^NY{Yy#p1@
z-^MZb%l?;Vq03r~9JU(3sP%r_LA6ZK2+*M`c_T8kN{`-^Fd(_|WBJGpSD;qal{+n!
zfo&Ftp!1FrIzT)A{M(O5`sJz2h#YW}rTi8%9oje188@FSz@cL@G&uVl%n6PGx;+Tv
z4{1Q;#INXA8xMqI9jK=T5I&DNdM4R}+;T^_x<a3<7@<X0Ub}*BDnY!<k8-ZAdVuZd
z?&3hmG$>`6nk8*Av~Ac`G}A7{VT@xns@#I{;Fk%8_gYbpormyP&JDE7Ny2d-3o)KC
zCtm6y?(MRqE04(2KNrJb<mg(Ask{Ti8Z-1y@8E83x`It{v8a%4M&FiZ;<>wwll064
zk~%VAwsQ(D9NUYTQ&J&jQ8T1DbmCBAH8`;OkH_4PoVijBw1vm>Vs6osu9}`j#ix@%
zhxHx5bw%Kx*}Aw>VhwCN(FhOYEl5z^LwLBB`2jj5c{873#NK}{Cl{H@6&i%_XWN;x
z?07qu(&xhGE53kGN%k(cl!Codsk|`!vuNWNHPV(6ED~<$akBMhOgs6H>>ZZI<@SW}
zE|34i<U$Ej@_9KQ9Hj#~=GMf&{}B$T&4F=pe;~WT8|HObP?2LZ)*Q$M1(qLkTg!GP
zYdv9bj~&iGVnGh6tB~iJl2rZ23oQ9~oePD_Q1T@J-GiI3dY&<Q$qw;ub`gB_btTB{
zXLI@@RjL|10=<0ha_iqx*rxCiM!G7~o;j+V+wF_oi>+p4bMz@Zz90cj&E5z$B*kK(
z#cXi9G!+9z8xy~qESuYwC6F_(<_x)ixc*Qt^yX?auZBHJe%7WJ%zv?*OOHTUF~F5>
z*p74TW}$yiEa&y|7gv~mm%Ev%PtNQyBrc@`y!UlGh;Dj-Do2HAzsQyg-d2E7n&Es?
zK0@9lBbvYb0bKg3Kn#P2fsnB_Th44p(T@UfGk(OG8p`oMqQ8Uo)W?uglqRqq`3Z6i
zrAWwm10q;&PS01F&@d+-aAx;}v#P21+uDNceB=yn?U#6;Pb_;YZb^%;-DRKaE`i@J
zDYP%?<^osF=T~X{1#{6Y@R}xp9*L@0$g_EA;vQIat_tG12Enc2Iv@2R1chnxq9i+Y
z7}Cs#+4gF*e}*~<obJH6oK&F&gK;=MMUVVhJq)#rQlL=nG|p2`#-p8DWQFeoTsoQM
zD<@3ES+?2CSI7Ro@Aabnw*nMX$a2g(ycIfYtmxpy8hABDj#OxdbJLBq&^^5ad~c}p
zty)JgSXmPi_KA}TGFLF(0a<@buV@rm1(|{_e%F_9oO<~qs1EgU#9xW++of5yCkj+s
zi+TNtU%=$&TioV*6(rtoMTtdU;M-^k8ujcV*SI!=kNS8L9G~9g&g>B<{@<2@i%FRv
za@}!MD1HGw`{TKiJ4ex*{SMU&enD@mJiV)CNPax(h0Yty5gBELDS?l;nHu6$?76Ne
zv-vje-4x6l?%a$`J8E$DL?O$HNK&`ZEYP|kM$!`8AV29CVDC6|@a}{CJ9^<onGxB+
z*g)Ts95CS2PYj*811r2|f$?z)X={aGRLcH8SAT<xbs8=VFTm3BW;niIoOl>*;SKhS
zFzB`b`|n-F3b&=8`r4Y?7LvlUk<#EI+r!oOd!wA<Le^LNj*mCUlSO%Qq|&_vZaM$L
zl6Y@{XL1wNvh1PI`X~C^YEawVpSdX48s0IhoAVMFVtb_$t+l$y?Y4=<<Yh5zt{;PK
z65*ok!Dx`Gw}-bjhIH{56|iP+?--R5$X?fgQFG>lo9H8NDHFr?ndvB2`>mj@ekDek
zkLBXWoa207IrCSFt?8zLD#(2^o2whz&OF@uQ0xB}4|M90tW`JQ>n(A*d*xHC`lUd+
zJr99Z`c1TCyNptE=I^ju!TIi)#z!?Lf$B6nPO5(yeoK!>Ek2%Q&!q^*T<l$*id5o2
z8g8j#9h|7qoY~wA6x!$Tt9;I5c%CucKFNxh3}^SCC>c6xzdvkQdKQK;S6-0ycr+@L
zpvNCGp8Avh*e0h!Q-)588a+Zdm+mVpm;HecALzr!XKbkS3^r%G(<{iGpoW=P#y9;c
zg}T38xaIRW?4J>fa+xjM&ga7cKg@yLz#+a-><?PxC1KRBSDaBBV-g%@7T|YU=r2$M
zsJV_VdXGepbVS%7&vYKC79^0(4QAbA4Aw+>!Nb+kG(laSn)`&~w45~Db~p-Rb<-i;
zjCq2S8lmG!5xBThQEF`uevGWe*9I(a@yZ<HH!cUwdjP?+S<fZIk{-#dMZ3D&*b~;v
z#hkB1TirMOBQPeWhO0QS##<bcr6}AZLEBh=eDaVCv17TvYRUf~LjNGBvl&s!;s}A|
zS4r@lr9x-SHK$8184)?BSlGavsgBP83Rjl!byNMY`;-lA{i{JbW0yepGr;;^ry!59
zk6eE~LgyP5u>UQ~uhlp5Ml0DaeWWh=zEqkqJc9YCYd836_4VK;^oFe)7;B}u0ON}u
zVL$@Q&n9#VI{$vdOhGow>9HBpy0K{bq6vMgeBr<xOCsaC4etetk^NUfU`jR1qYOWX
zdoF3P&-rSuCEO7U<_fXZ*M!=$J;doE15$T41ma)Z=Boodp?ldf(6&~fr&9Ik8qEO+
za=j0x^>+oHv&E>tUJ4hMat>U!?-Vt#y{Bl?O_=yK5^c4Sf2jQex21fBP`~T&i}Cgs
z?#aTnXAOvlcry0flLW<TS$f-EihOno#l3}T{6tGLIzl%Y#%JkLn_hDgdtQs4B=^xn
zDFZ@_UV-AM!>Dmbh8Q$ik^TDZ@NGjPRH-vYs_r9X;UBoJUIE70$3X7&bH<yz#?>Z&
z<@^Qr`G`j&!M)}Vgs?2>kv>aOwmTX0&3;3#D(eV5VLZM^W@JIR4GHr+hhukI(!gW0
zak*?X7%phTt+8xwtDI95ZuJ1N?bT`e-!u%GdIu{OvHSwNpX7-yz*6fl+`G^d!ksRF
zs;x3Di(kjM<!89PiFf!Yc|+d5<0+RC76ofE>d@ola6Y^)8tn%!fT6@`nDs$}Mz_fk
zr(qVvYwQ&+cV7UPvx>3e9#x4BpH!fut)$4oC{?_>Hx@sNi_t=@LVog7SrW=vK0VQ4
ze75`%81fb3w&k}$Z<j35`ga)z`cLwStUu7bn*A+a-NXDR@+i%?8WFDnQq}4hZ(5al
zJ-Er;nZ@q*Ro|en@jI_9)(m^T%aEf=;$+}R3`UW8AUh=m{dXOOmY-AcXz&xp6iLKS
zuZ8$YN|g+(ZpKpPoBPEa>OFNc!1RbVc>it1rt1$N*nx4e<P5oOEq^fd-xd6nDNjsx
zXcJvd7nGR8!n3~u4t<rOjUpj<7{zi?f~|a@rxAb6rV@uvurot;0xxm74o5Qo-VWO{
z5c=AJrr&>#8JBA@bYm_Y($*)wa{{=+8e15!Fr-GQW;DF}4mc?>{=)J^&P8tqmlzfY
z9p0=HT^=e5tn=sX^n#$hEdlbCEvU+DOX`*Oo9hlYrq*koLBci0n%0-2G3&2@B=ZJt
zJixr1OOm*GTa!U*a2|}Y*C0>4b%^qqVA$2FKy0QI;GDbfG0o{Y{?T`X_}i)wcHugt
z%@%^RwJK@-DS|eaxfnL|0hNB$p!=a1*ur+8TbvHVAsr>UY_BSrSoIV0a#B#X@gC-l
z|A2u;>-jhTJx32saq8{1iupk0>3?pv^we?-T8FL7`E9@*ZPH@9=!5)^a24wPrUSc`
z!@)D_E%STM23xNn?!iuLs%x?UqyiZ8&D#+wPqRkDTUC;8*Myo%?uZvEv0?H>P`Hx=
zosF9@Vtg?!t@t17@E<7DyP#{^JGfMN7wi8j&_1>OC=v4(BinD`HC1sUa?AsXxCapV
z!i4_X$@0l!6!MrCOi%SFI?id~1;<*UDTLkqCN0GxT?6uYJL@VbyW{;Kc@mft&Mg?N
zPrhbr)6y6L{{6(58JpJ#uJ%ikPET>{seRAOP5r@TI+$XKvjp7jtwAx@C;Z-d(R}7l
z70PW*!s+^fprgfhSlbQx#~ExF*`P*u>hEKoOkLWV@E(t}<f4Rp1HO%T3oU^L_@GyX
zJa(0$;SoonishC2Vy}VJT1CiJN#}frw+I%sRO3xgJ(^x2LH~gwG2hVumD$$ldMX**
zr&Z&Oo0@dPHDl^>w@WZa=^vWfy%c4wbi$y)By5r%fNq~4=xH5BeGHXJ9GitHJ<%rJ
z*Tk{G@G>-8%FqLSHE3Gz!~M~^fG%<Vg2<hXFgW`hJlhwFiCzV8^W|B*6?GHtEtaRb
zzazOZ0fqQ&bR%>xumPno*1Oww5#JvB0t#h~uzkKbF@F37pC41Av(4nm$>el&JD-6;
z{buZBb%GN6FzzM$8OVaOm{J-huv~NjE5EG9JjDg@b-xbLU%DJ~Jw`AmWIwEnQKZ49
z%oX*Q^~g4{%y-vyIA>%;DvzInnh+QC-s}!#ljJdaiy004EZ~<k8L*y^8TI88_^-D$
z$e`m8luYKi37bt|bY(i4cRdHc-E42xJp~27KEOW(eY$w|3DDeOiE59`Nr!_DwFPM!
zRc(Y4G9%Exw;VG|?{dD?BHq|83>KTlGjDTm(Z~~zal)Y#uy;5Ei>%JzeRkFiPiCy>
z<&qdbY&XbdMqpCoGHCC;hxwt`adG-t5X*eUtqCZH+N5%R&c&(F<(Ysk>&m&5!%Do;
zo?IO4%7@901|;kBbL_t)Mn)~lf<ZrX8g(?1zt?{S?S)DxReg>1HZuys5-8_8c`k_E
z;RWvgWtg3}3wk6!qyNg=TzUB!RC7CzhUXN~aMdP!H6%uI7cjq*>Ov5+TvC|Q_DPU2
z<vIAu{1oK>+=>ggT|=*>Dcq5L(U>^Ck?lu2!BBN9%g5$&hEC%k$tD4A*WbsQeIFpo
zYAm<3-I}hmkRrz=t;xR-F(OxXjE}!K#9uDdqzU2@#BUR09WLkqnX&|U@x+iElo^NN
z`uCy4=roRh&GvtP9*T<BCc$BgFOcbKio$b2MIEXQXnw5%;@Nw;Sh@moHd+xsFFCj{
zJ_Ace{1ogL{J@0|{c*-$Gg=)U3!jxONmxr4-}lUz9z!0>&#^AjX-~}ToXB$3W#}dp
zq0$6fQggTwMr`bW&+_I(jN66Zy@wIkGi9)<{Uthn&Bt7YZJhShD<JlDvlE^zLb0EB
zMLk`cL1(TBi7I5@E3cQ}Tx&_1w8{btpWI@+=4(#&zx^<EVGp?6zQt>=V0(t7hn%j)
zc|LI?<2#&*;}3WlkqXbRn7Q;5*X9~7DjD-yG&9ANRLcqg^;V(PstgR*UWWE(&VzqG
z`x_{97C43lz|C}R^3+6z-CIZEp(z&RoP90YtB&Hj2kp>aem2f$Gq+YQ3B#_aWBe64
z_+gd9SOJt9ecyssL{(wU<gF+i)$4RCR+j3z$kCK=Q?9(j22bamK}FD^cIgk{eT)Q6
zNz=fFn`U_AIqRwzx{D4HHCk4<08a^3h~o}JIxHa*Q|M!ETy`JEEdRzl1&)|yJP1SU
zdZ2xZF{nS@$L8n(lRUm`W4zZaKFv{#P8hx#JHH(Pul=9+Zngv3a$q<vuVi~!Ee*IZ
zPz|2f3Lt;+Bj_=A=RAATF>}#m+>0LAsLb++rKUK2h9ynecm!fEX5;d*=iq(tFh+8v
zkkWgRH~y?cm%U=&rC&bqVmCXO?^%_f^Fbe-Yj@*2?kvj4zJ%-zE1~I5BDg4S74$Pk
z`Tap-a=cfU*h<Yo|3hEV@%IO`o>7LqY&Hz1(gB(#f#xh5YFJ*3J4a8$KT+C{eD^#C
zEWCmhne20&TFs>_y~R!VH3_{34XK4Rk6~YJ!KJyLmzx_1o^$)a<+&QQRXoQ>ja$mO
zcB#`|XO<0%1>Q|&1;js7hH10!<G7hD4<c4BC{g+(5WW`)S|$%++p;SD*6wUr*f<aG
zzhUR}w=R&p%66|7g<#)W!#M>g(${aQun$%;hPeoHGk$T+zw<GIedmZ0SuTF&48CIH
zd7kTyK^$`trCJhUD7_HoC-*QO4|{`uTgKgGZ?c}}yZIf*(s4pYJan$S#_8s3P^W0-
zclzOlhUZT)7UMiou2=xKvvvu%ul)=Gr_9Mdbvfb~Gz$_#jzW~Y5u0^oiH?m>C;Kli
z!=M$K^z;H{@@s<yiK@KC*L`{b=E);)%fK=4J3SiTif=-pg9Z&*Ptmt#F3c=2A>;mJ
zpom<CVK;uF?zN3r?0*&lE{agFNQM^6mqXp>b6}qS1xA?}!i-!aA`~3pr(gMjV(Cdv
zcD*c<Q(1z`|Gj5f<QiU8`6>6gK!I4FJ_kP{{-D^XA5M)fSNUz@KY=BC_jwzHp{vyi
zIDA8%M(e7R4$n&Xx;+9rM>O!89@ayo@G%7b(#O9Q1L&L3AVQxbkb1iqo~~CXn^-?p
z7&9NlZnpD&>?~WID@}z8RU)-1sx)fuaQ;rO1brH#Lz>Gpi1c)Idj66gExeh|m9*Ru
zjC@;wndx!RYo<u#7B-^xhJ09g!HmA#twZ%w9|L`4L)`-gF;I3oxAL_VIdO-*Ctjp-
zbvs+(mPQ(+4ErSd*mVq#-8hc*kvCAxcUDmd>wVk%D%08KxwwAk8z_-T<1b3x!g1=X
ze{x0y!E;q<lJ*{$E^!G=?aTOf_cjcaImSn2Bys`lzCGRG4pxVcfcj_?dic0J?Wy^P
zQp4|Jmiu!w4q$!Ibe0vFQvu$G>(M3SvLMp>1YCa<2F}(SanID7c*jGPNczap0g*YK
zt2c~P&Z&o{4`;x&s2z@|>9U>VF5W@^7sk)N$u~Bg<l;6x0Z08-UNtm<`7rB5ZQg$T
zktIcV<YpO`tAB*%Rn=H+tcjsp+0Ls&3Ia{mq3it&_}Eefdl^R~G;{*y)(N=JYlac8
z>R7(#>Rx_Uy)|{|FBAl}b#N;$Ta%S<#L1X6D{|c<A9n9~3VT;(a%<vUaJu3Z%q!o5
zCGN=}82cRjf187LpC<LSc*J?DodB0#j-uQ!JI>F@8G^Me(8JlB^Eh@J8+Km-msM8<
zl5XNeZb<`r-phx$$*J(W$dX)8?1n<SM2I=}3w}(gh1jmsFk#IRxSidCpCrY}iVgi}
zJ28kWS?I(&&Sp7>$yA_P@dd3Xy@d2C9U5glpQ|zJM?7*K_xj)BC%ih&`la$@&1VYb
zc5EjnEGX(rO9S^0W3XMg2g6){bD>(Fz%}?8OksYd;P(e0+&mw`lug;*gzYnj+`zD2
zmMSU=(Z%bCz!<VHI`|_-)QO|#si!c<#}C#v7}5R;Guk7mfwt)iRQIJ2N-gT3xcnv@
zkI<$mDatf?1LG4bUV~5Ci8x&UH@dC905$DZAl=P+**<ybE;OMd<YOQz)D2A9LU3>W
zQ%-R3DNGo<2WIVIU7WPFpy)XbY@6p{1s?(~4>#bkHdB%>AwlmM8j>w*$H1RB6)N@Y
zAx^WDA>-IQUq@1nPVA||y+>A}t<)^$kCh@)3nig>lO^$8GJu9E%i%#s3jRmL$ZIVj
zTDP`>@5{w}%;IxczvepR#M=-r<7d3@hg8vegDCj3ZzD?G*@<G_FIo5HBv)g%97`Nz
zK>CdZNn1V*0`vCqlV@dP{lfuF@re|z%DV@V35?AeUp+~jGMAUjzDa*NmcZuIazt$9
zF43A(XCYALDen^M!=-eL;Z<Kv<e$i7<M03}>Rrpal%>*WpJT`GjcVaC_(;&++Q9Be
zA~3eR0Vx-G&cnu?Q?I!TQ(sTOmW4J{xZ^U68XQE&okqCn^9&T(ZeT3FUQwv~eQ15(
z3vw%Tu;CQ*cCG)7_Aia;!SK!O9B<(qbCNk@pL`JJd+@f?-*YLG<Kdl-GBKSpD5%?$
z4eOIyprKs|%eGxYyWsC&>a`u3)osXoM|I+>WXvbRAM7sjgD*BLC!C;-hb#VIiM=`c
z?rz6e)_3&`x`*xYOCg5NLPxDt&|_!@3d~W_?R*Dk5Br6hj}<X5NgY?W)?+qfmjyi1
zC0^st@&^xIhNQfs7||37ZcYzyD{slN;d43nnXl1fwmAoD*Wyw)OA=%z0vFQAB_Di+
zVM@_lkMJ31Tm6-fwNRl8FFwW8DlK|JX%KRqr0L3)Iz%i@y(n_~Z<ri5h>NRCQKwmv
zw9Sg(ZNrX1>}4C6@lc1XIo^gAY_1{<2;<$=5qNg~<Q4D1!;6Z+?$chDBRY(`d>DiC
zzdxY#T8ms5h{cD=85m!FoVy=YiT<VS{0;3IOx^Mm<UY6Yqjn5u?iLF$#59QdTF8BN
zZ9utgXSoc+*YMrG96Rci>5uE>pk8S~^S+DGEX6R02s*;p4vZa>`VUNNpNl$gDWKnQ
zJFq{W!aBUK!EXF_$n!RW`lYNHI(m@*cI7pyru#y0g*E8@VcrJc*D&tXU8LvD>FD3l
zXh(P9-bgLD#WE8PcW%JTAYD>Ztwaq{q{z>|A{;n469k3t@Y!!=`i>ciJxs*8$Bbif
zk;}yW|Hsjp$JO|KZTK_~n&)|*kR(%`XKg}ALZ)QSJo_d?nU922NJ2s-NfJV(&a-xs
zBq^FDl?q9cB$ZI_{=I+uAkOgYz1DqSSM*;<+rqvR-cvADZbzKLX5il>X^`bMOkDO0
z-@OP!*Q1Y+P8XwpX)<hiu1SmMv7HrTiufMYz|g`AaPaLT?3TTNDYng!Ij#`9T&qFy
z>Q^|fsYsQ})JQ<t2XyY~#QIuw>iB*XT4beQddM0mS7?C`?dcf*?wPnXX*9(Cw4$Qf
zOT<?uDS*>8#wuJ_iF*4!bIne5urH(pDrAkwf=XQ)yw!+xbbsSyu7Gt-JRo|gIjyOP
z<U>?q_*c7s!C!V>n-+jjGLrc%Rvu)2;03rw!<0OXl%V}N6b~{-^KEM-a!%EdmSk%Y
z=g0*}?nmRqeTKxb@C$cIs7KCgmgDmqrnIf{D=f)<2NPzpnd-!g;33X{O^h?1f9g1d
ztx+I0Uz_1^unoC4QI+mueCNST)*t;@4ec7&k$2G`j$s`99Mq(fXWoO&B?dHeFNXr#
zY+?MLFJi&Hr`)bDk?1^00)Oo=p~^~@WXdBAf*WqZ&HEOlIPw8zrRXwT#!)`xXFWGL
zvk8YBYGn*-8-7pk7W87B3acgF5T>C_9UbHGI-4iBp1&$AJ<`Mp4IDAykR!@=)PduK
z6I{p7O7Je4g^ksNVpn}4zOHFP3e|6dpNTP!`~8sd9vEA6>T9$%G$B{^-oW&odeE8U
z&dIFOAOgD&+&>pXG9j}Rb2J9gBO?|H{(OK(j%GBkh5dhDi~_T9G7$CC1RM9S;u53p
zLdbn}P-A%&5#taRHHBk)>}0U7VV$Fv{UEvUEPSw4BByuUMI|+JShjo}oV}~T=E;nS
zvZaQP-lmSN-jhL1e<KcP*Mj`xC}`{c3wCehuwTv&b7w_~efPc;{%>wl>~<Q`&VA%X
zvuE<I1(H}`eih`$T;ZFkHI2xM0I<`ijqgv0H_S0150^-ivu$74e9eqzdS>B1nR>WA
zgFRQIAF;IbDB&74JyPIa2JV8{kSZOC))xfik-Y`6^UTLMo%<Mb=qaqewF`IqDH7M^
zmmz)X4roYw4@!NXP{gn2a(i`g%+Je^pEDD37l&~gg+UPEHHAMk>@h@)o5(xsUcf!)
zLfAgNm3^KWyHK8a#me2Vu$hCK7nMj%>L$!94FkVz-+9M+If&+4_@5&S(1(34<q|bX
z(#Q2UwC*8Z`7{kLmG$FFO<D4-O^r02)g!fGg%~h884tXe1Fgq)f*|nbBT?HgPV#3v
zoM^SBKj&JIux>qi;YTUFPBbR*;U&V^d+$PS$R+N|Y9$cpZFFw=9tLhgPe_PTrDkuG
zFlU+^P3g)8&sWW?uV6_HeM(Uq^@u}pFdVY`10G-Epo-0-6I`ZY+zfN-^Dl$<jITjI
z$vs&5a2h8VzR#sie+vE(TNB|!3I`4T;hrr;;LrGsHS0!z&)hrc*zLicn4wSS{b+&N
zqtc)-;S<+szY#jePU91e&%*~xBhqd&1<dS&V7j&>@g94Wc@BI*QCEe2j9UmR+ZZFO
z*pqX-FofH@U6=Y^DiYR>aR9o$A8$r$k_i_h(6vh%51ck*b2%IGHOqjgjm_n$R}RWv
zn2ouwYdP_nC~(m4fe*#9#5K5B94OVpMS3^F*Ai3u-|edyB5{Iijg|v{#>UT6y99|c
zQdq`W5W{gYr0rW6IIijBBH4GDBPQ~-){GUWDoa*u8;QeL8&XSID>9{7kw|tl0Cz)~
zxK3*0E$ecy^8PDK_PWnkJd&e5GLm%G2j-%Bb`<SSUj)9JF~a;Qq}=!oX>qrpdem7c
zKGT7rTF<cb`C?xEm=zsn_5u_%n9DIvKyxD}3cF<!ApS~_c%bzsgiO!peOm^^AA>Y7
zfzO8}Sxw+!@*n=YWJGnGq^NUEE$>FZV$lh<6P4S+``tXw`}Mi<QJc>|pYb%9FzYeQ
zR%KZ^X%&1lHXn7Dn2<n&3XItA#4FlJ|Nm~nVD|^qW!XQ$=nDQ*;1e|Syum-vj>czs
zMx@i@JD#1yb|H>UVDwS~{A@Px#D%$gTepJR=OR?f3WIpVr(9`aJLh$BCtQ0f!uXp8
z;5+26&^PM7c(!aT)Ub?5(N4zHO>fQ4y|+lbU6_m`|5f2C8yR*s@#J$;HVT_qo~YF4
zDemcFewCT0(2j8yL_xPWqnzOob!$2PY=4FEIaB!lq2D0HG>fZ`P$30xs~~GM>qzB=
z;a%?^XwqNE&OPVFeWgd4vx{YjvJoy6J%f$sSbyRM<Ew=`@eRM3BPc<hPVhIQB9!5)
z8sv!l;43`0Op5GM_D8ekAkJJ;ffO2#fc7!hL8f1t#w;m;)mJIBbLZf~C=J>-s7|Yo
zThhdDy0Ap|BGgKUKxfTNaFV%-KKpKg)_iYp{i)3-dj@m=Cdm*rrMcYa{yR9(&YTT<
zW@1eATYRx%2x-!fMc<eRvC2FSr|C69PV_Ulu%n&ztv5kS?@4gnzn(i#e+FjGQKaQ{
zZMb!R92Axv<4-=fBt~0Dqrf@ed7!-z;(b1Gikd77xakicSfT^tJliqv^G~opY5`5L
zFEE8(f<0%9Xx%qI*gEwTrg_e0dDL9Ay`)B49MeI#mSr*6&RMYNQnt4ab4RbL#r#xv
zaNVOLZj-7<J9Y;vDK{bE@C9{tSHm>fmk@C_8r8}Lpujqj1p%$#xK5ouJS0cF_DN9J
zuOGN@&K`m!9Pn_Y4Yhx{5Ys-g87F7P`wqql`)+^XP4(a7!VizJM*TY1v{sL<^DrRM
z(`4wh1p?~g#`Z84S=^GTi||8$4(V(k4r}x?F#oU}I2Nd2k?K=W|7}Z5x3PPi!)+LC
zUxecahfvp~TrOnyM)cuEV^7^*sC^a=>KkQ9pu%Oi*(yyoXE6S3qJVz$w4`b+3RJYB
ziF+Sy$~fH3ko;vcY+bm8J)@P-&0{!@A8JkxNt}UY!`5Trp8p^|)ejRVX`s(I3H-KS
zkA9po0t90;K~U@itIK$RyivHqqXXTNXF$sHS~%_c10(zuIC-0FZq>b9Ov$_o=6`I7
zrLq*=cYx(Ag1_-C-yVSTq4V4-XO@9~_y%8T&BJdM=H%C{-<Tp*1@h;mu{2MSAN@2P
z)ujINs?!vx<kQ<w+NFz|*sf()c0c0)&*8_PQlU>G42V&gCT<*E%{M-I&lxQFfdP>h
z@XB*(NIAgrQ!gfR|KlRa<X=RMck{68TpP^m>IKW=5~Rhn6C$Sn;Ky1jk!8FYlvqDT
zf!jJBzwU%-E6N#r`Z8`zuICNorKrKSmk{TpMi;(fGtdz&oWBmBYe5k9eNN&(w#(q&
zH<rY6?oJf6`v{dM8<TqGa`t-c&$(o=^Sjk%KBkKG{H-Q{uS2ge$MP$j&u5+&B~3nO
z$_<Dac^vXo1{j0&B^E0_N0rn(klvew6I>D8`u4%v&m8(Kui~e@{tMp^T|oPL9<Xu2
zd6>%jN7LMF>Ads|&<)fg#dEhp#g=YV4bdUxpMOKWgBCGcmmyy4dKs6fy<|+z8f?FL
z3*)P$xL*c##9#Lo=5E=^E3<r{=;#f8vbq$F+I<X6?p1^PxFdXj-aCjeQp53A3|Usd
zlPet)&I<&F8Fm>t=o`8X3g-R8Hl-3UT5JLPofXN_ng{6SlM4ASUSr4W4^SD(_D_fV
zz)Pxw_iNY>%}$b3a(*G6OVXqJmYoE-UUjlTV+bwz-+gXjDj#pY78X@j;W^Kv=suq3
zA1>A=OR{P)YV8wrnzSD8uG@g6Mgdr<lYm+qEx~SU5KOa(g>H9w=qlKNHjBPP%#Z_k
z{K_B%Hg#|sk{yus$Or@{DqRHj!(ANzP2hU2{f2??rKqLl0&1@(bMe-S7<sQ4en|<)
zz#y9$E9GP9q5se=aVR*SY~uD7reW8oGvGO&%|u1X7_o0Jm&5#~O5rNl8g!0Jn)($A
zRKDQ3Q`d0i9V4>$);m18HUzV`m{U2{SN{D~j+pJ9%dfxt5Ar@7gAo&0j^K_FW67~h
zoLoJdnLmQVtCfkbkpaI-T8?bin}{~!rr?;R7s1Gbbxd#QlksyjNlb7alm<ETkYR!i
zGk-y`_$Wlr>Ep~C+!=?>iZ|Ju#asaQFz{9(@A`B+*I{uH2h#4rL@jBWcI!Xp_-<sm
z{08tp{sFyW^Z3CmDVnysiz{6Hh4*KB51oK*I7D;{Lp{DgV{9|u_v9A8fz69YoiZVM
zydo@TE;B#ldfq$eE)*9Z$0WrL82WAqxjW<uOo?R<VdHifF(d&MlBLK+wnsZYIuq1-
zhw>NZvEA>JpD>KE%eE_>K#e=M!TUxlK5Yp_xpsXL?Gee@@i+O`jZ(z3g7v?381mal
zf>yKphkb}FRkGL%e!G@%l8gUAd;NUqT^57Rnaa51Zz{V(PQ;ttS5V5vgq)t*0P}*5
zL)YiO=(yqx?*D5@)7k82e;C_8$JT<S(2$yo@8i2}9dgxOlCFLA8h!j;@Euf>ZkaAk
z3Y}E2j?E1Pqt>x`p#g16H%Cu1B^31MI7e%x^4qV3LurL7ukw)boK+s6@8wutT*thz
z_H1qy!)Lc-RKko2x#%c2loM>d<LqYLjOBsP(PgtVEt_pY4R#HoMe}8-;O=W~zI!$5
zT^|a9*;(A?yf2Ie8o-OvqB+s%Wt_J4D>VIML02w3it<5wFqfVA7JvPXqPa7LV73oS
zD@;%^_AlBe%7Eud8yYHUMT2K4V?$IbL_`wKwlEeS|B<DBpGNSKCyUrQK!z^58;*Of
zM??IM(?U<BcX)il7!;^y2+!{@AY1H|Xt(THoHI(E%K67&+sStDJyXt=zBb{q_J)A-
zi$k!gp$^NhL}8VM1xf1v4i6Y#xb;i|FV`$f3nh!u#!rSi4;S-hh8M+x&L^Cg+&#{%
z#~i|5p247u9w<;^ed3HPz9suD#DD4)&QY@<t_BA1MoN~nd-P!9@(?`xRhpy++rV<>
zYnXk5?aIGh%g*$<jP{n=sF+>~TW^P>aCi?a`P&M|x7I_oZ8wHpH>YmqGhlC`3E5hq
zNmKHkg1Aqe`n+`p-|zQ1Co&%|wP=utv3o#GaR!)Wh&a!Q9P3a^(N0kx7S8y}*Uz>i
zW0{|$uF3`KEZm^2YBqkX8xO5-dbx@d11O`%V>P>@n|yx&%j;WE>FP8%Zaoo|6r^bA
z-xhG%y#wXnaOiWdiE|9CMSoETc<I@K!-D_u558kk@F0vltVX6CtU^q(g@i2jn#hHM
z)lg%|8=3`kU+5BxUAJ-Nk$ha-q(p?wJvwYbH+JT`GWN<zklAHOySiS1)JZe4e~2~t
zA6K-DSdhj&%;$89G3|a^P~O6v?vZ;8{?cp@8z#fmZf}E-C{^6dGP%y*OzDd@#vxeT
zh_10AxS-gIN(IT2;`AC6{ONXyPG$MABx|8tp&JAk+fm`@Cty(AiWfGOLhHT|s6P7|
zELk?cp`{xm4rKE~yHoMi%{ItgA|+HS?&NAETfky~InNcC|4%`N`qnt2^5@qm(HeuF
z9WwDDH6SPDOK?qvE)`ak!>^*3I9pMc3cei5&YgUc3ynR8?JKP@GT<XPmO65a-m25v
z)6Hr9OKVzswFTEzSP*-BfnKZQIXMdx;=2AgV;dH52}u&vAodN!=H7xQ+p;j^Q4!}m
zT%X^!WfJ=Hk&yV>0E7N+N2^#(O8q2B=*&5ooAgt>hkbruuQMm^*GEA-XNV%7^-yr-
zBFNsp!(6$poa*2SoPXb%_0t!No&JX4)QOtZ_rrbhuC0OWjNLEpX0Ece-%bJl>>0X0
zJq)b}*204?ov`?&BrVx4Pd1Kp0hZ?$7X}Zajqgiv^j*X|UwO~1e%cFhqdQQl(U8WQ
z)Cdn4r{aVZ4?Hz8jhzu^alRJ2prEZDRL0K2+?mn*o=YN_g~s$pZY{1#79l<`!P<qV
zuwnTR_)INnmh?Y<-_u&~)RLp2Tjgm)*lce3gb-|)8U(+^>eOuG3AlUiB6v&ILTl3&
zJT~2gSYKyZyM}Qt?RCRp%u5f9f4BqmzB3lL$5KAN_=4EJL>-0qqOlZqLX3M1JQ7^T
zU-`k9dO#bMPhG*_&)sMlph<#7Y))s}19i>OP<-Bv<rT%8^~<}cbv^_ASubv%u?TcO
z-^327`w(w81-HIB2r+{LIC_=@*&fQn5|&M_IH^V&WgiNqH>uL{o3EjqxpRx&4M5T9
z1biE+LY@`q(A>Z#lyJ&MgB87Sr8JLyegZL`KgBKDAxXl+EWkT*fcbLIV9n3~n0i2h
z{40?luFp3KM+aHc(<T%?-i!lJ#&{d|vmRt0_drAJ1qiv3!}YB(hSf)wqKI^g`=-9&
z3hncF(c$&D$37q0cU^$t4pwA>jV$#rd5O~-K4H<_%}7~q+HA2TUAN&O<G)(azBzWN
zzfGGYNf)xa>3+O$N<d35T!S7d5gN9dk-9tUvFU9l#wV?U+I7rNxaqj?p?MaLO_3v6
zZM$KwizOMdM3z{@w`0VHAnxv}i+I&em28P(J&9|J;6|!CIrKgaI-g2X*S<Yaa$J{4
z^eGV61-`s~RUGC<-4I@^l%x-x4M<e*Z>$@27nFK0qtVCB5I^$|M}Di3puLR!#+X9>
zI&4PnHWCHzB(SkDlpD4ph|LNda7E~2IQN3}U=|vYwmHW1sF@^LE!m4~4?!Kr4DeSE
z$<eJJ*{uBfIS|;q61RODzy)~@aB8s*6&&p0Bi^dAJWK!<8NP*&EgCf6&x{t!>(Yd?
z8E7AM0|c6V+*%QVbe7%EJFiW>%q>BF$1h&{lO?(OQH+k3^Z9Ut5zzXF`KW4?A-d%b
zA8~XY1}^9MIak85YONyuGD4aL4g~SnjOD1HmU*fxZwhmlC1bV8F4mL12>%#YOvSMm
z&aTiUM}{lXD@WDoR-2{JImH@puT`VLn}$Nr&J1)-Zxv5oaS84p)+K|B85<LR!hq*v
zP(GzWoAr93V^E5QF`xCuchUITLWX2yvG?b;NXS2HOP^n6`@rHRe&g{^`1i{x=)Px8
zH&!aKpCJcwFBNh>R~k|3txP&Yo^pMck@vM>ozUzB=)J86G|YT4dSe&gXZ@J_J9i&y
z3Ww0HSp!%b{R+H8?!lqGuRxGf<1%i31stAiMf!U_<En#0NYn=>{F^ismIWzOzqd2F
zsL}GsvA^S0QzcZo#iFn*8<woE0ws@Y;1H@wOaB)0WsJ!X{o^t3EOVUObX}bk%|8zF
z9+rdALv0+jk$K<b--6)3uiU-~HE`+CO|<`7jtRaQjAx=mY(yil(^;CgS^pE_2jh4H
zch&_@3j!adD^T%3l4OglsdbqqInR6zY9Azci|&P3b0&ckoSQF>Y@C8VN|cY@dK8ve
zm%!`C66|Lt{QAx^Xnu7FM+WHAzWzEsAkCH>7!m`OWn-Z;Y&6Key@+}HMbJ3h1vP5&
zFmNq9A8kD8?Ds{)e>!>{XV+C>TjLwpXf+aCO9Qxr;YOtTO*VK<isxeNUPEQA61-0^
zC4x|C9MAHqM~q`I`JX!VWiv6w^VM*Fs|Atmegjg{EblvY9CmAILCuhdoV;`|=lQG-
zg{eItTF474>lR^fV>Apq-G|}3m|wQh3WmC!z}zYWOpd(E*=sMsO=T=AQ+WK5#_Q)$
zQ_#$tjlB*Y-;!Z=$V4==ZR9#1?ciK{q=i=|t%R$&=ESLcCjOpnN)~MDWL}LiXr!k_
zl^#md-WDZlGxiURx@Sq8KNj%u!8JlR2U|MKG#zdxe`M#f1LEgvw29wub6#!!XYNsn
z89A4%Lqvbg#QT|h2H%ds(%cMQen%+!pPP>>;_BgFh#~oF%QDbYQ=qWz8o%x)`}6ef
zFiuQV_QNJw`my{FimJuD#^G<^`kL_$_IrbY`xi*Oy#!YNX0!384zb>;R_^*T1u_lI
zvGw*5@G!_>bC{*v{_Q#>=H74g3ET<y9%W)jYCm+YW$umZ>D;(~UqJArgWH!-j)BwX
zFs`2fcE3^~9fs);Yt{~~^eq>4Hv!$wuui7SKTf{qI=esGif36!k(PlX@G<d%z&34k
zeX&j~(z(NBFM5Ebr8?AgLnq&*>47N^JHai@3|#a~$m&glVDpJFUaa!5M(G;wk=YB!
z+X_&=GY^kP{l$_1mft<B4e1A8plswO9QKJZL9Xm#ozI8D13A**I^fHX{$JN;z5rC&
zceOWTC(H{h!xZNQ*rw9Od=on8cyt0ceNd8~c5j2Utc#qIxgj`j{EE-0G?BT<yh#6z
z!Sz}%A=h*-ULR&d`$F#U4e#aYetnjaiq{z5y@;_&xdMLQut#A1$c!9oQKvKdQ&1x4
zCQ8m?dFu>w?v;lroyu5`GiQaO&rima3%vn3Mp2Nmm3d^01a#Q0L1;e0`V=XTd8bVW
zaie7#?=OE1MD0IZ1gncsG-(HKQBnXGI~9oHcqyu*aSC4ONs?)wc<?y$8XRaNXg~ac
z(^hR~{5fM9R(21MtkWdvh0?f?u~7YZ6Y727AFf|hjdNvRqEUb}?&x5SE4Hr>G3&wX
z_v&O%V;8tCo5(d3-iO>NiNe5}jkwEKo%Jn_3R@+4uCii2RBL}mH)9RT^N1&ejC*{)
z2PPhsrhR7dD78zBrK#pr6n#V-e`qFFsoum9%-1nHu?q97dSUsQ$M`7DoCe>xitf8?
z!FO~6pV%pcuI1lBsjCS)kL(7ir7tioD}#S{?<Mv%DWh*=w6IwAJmz;tg0t&h{20r;
zIqO))w|)!!IiW=4BEMtQxej!G+QTh-5Cr{JQ5fa26aESl(O;2uOSaq-PyAv)MCOOY
z@zbA+T_0~}jE-Hf^1TjO)(`>S2dm-jfg8xVDU<Y{YeA)N5Hi2YQ2VvaCv(q=l&<!}
zz6iEMPB6j9;m(-Rtjw4_TI_#2gzpqd)8tdXxET(5q$d3g_uIgZuAQccPi6n0#)%Jb
zSLqKV8yI5g=oZfI(|Yhb@r<uzaVw2+!Kn8(mlJs(fG?lTsNE+emO~21jU%+^gEhVI
zy0{9Rb~VDKXBNa`Y$?2pmnW6n15`YUAkJnP3EACHxHK7*P4!9Oc2fwsI}yMAu_U4e
z@>D?lvqh@B%N~t<Je2esj+Pp;Jg_M?YIuRc*w5JM_!e6}_p;omJQ*=rfyO4sV5jN<
zZZf;4%#vW-*$j1bj#>o^EfHVLVOjRL?GS1hfWvxULVf`20xSInFDorH)7p$VWh(Sc
zj67MNmIg!gRY}XtXCSa^!bWo)urm9Gf&+zF&(a34efV~?6Xo;&Z89ZZH`D>AMuPBE
zD*V_WPd+f`yZZwv92cJo?z8=1b3io2E~$ppzBq`#{EzchKF=2`oPdP15aw|2LXF~A
z=(j?i6PUYkQ;HM_`yvOmUrk)~RB8I*whCEx)eP1Qd`Fwk0Cd0o2``iugXt(8;(YHA
z#;<GPx3^}2r-3E)sb7TSVi)5l6^gB`2S9AV_Pc*v`F4$kP$yV~Mrsup8eay*kEg*(
z6K$druS)ea9&z!Ct-?%md*+F67vE#<mGaXqV5OZ51xp%WTyi^7)kwTs6pN*QRB5r9
zKJ}+nFuzfPoVdt1t+lx>*6FI$rrDOpG0$tMY&Q3|LW%~DlY+OOUSa=%jo6B-K{UTt
zIHCA5x(qWUKFe7?Y(*jXvVIn4qD~zY_hGx+W$5pIi^s;F!l=a2;AK4@mOZY+;fI-P
zXvkO)Y&Z&<&vj_hs0{eLrUP9DKB3x*rF>)LHu$SDAL{OFW2nk&EV6tHw!hSfZ^%mU
zimpQsnKO)Q`V-aWO#{bal-n*JjwRcyiCVn|JSt~1<dr*k*B2&0n~i8my%tgJzl|+7
z&tcBVT<A(Zj)G4Wd`S8U-sEgGEZk#B)Yh1?jO{M&N2&t({8oYlOcSGD%q4CFa|T$k
z-b%8|Q1F~{6Mj3VVs&*t2!)NfMDiyp?v<tT>J5DF<1%b5f6U!rd#_oS8LND=h~GD<
z1VqBqT+)oU7{1z=9;$i+g`(roJhdOLKW@iIsYYafqYRnC_S$2-lkh?RW<0tw3VqL)
zF#c(|&}+3Os`cq|wha{s<<XE4Xi3Z__V5=XYv9f&WnxyQf@YI;^X+zRxS6iS8W~?$
z-4PCIQ#V4-%TjQUcY%2m-$7viE*J=$hz*J6IQ@bR&9$|rSvxdoUuQXgs_+=Duxx_H
zJ0-$WTWb#QHshI_R~c_!69?Gqn){}WE0r|hYAWCI9Y4BY>yS0LqO}I($DL=4?mm8k
z#!)POV-KqirLz9m4^Dm(@}9}pAk*v-oZk5Y^!9{cXVfk%V42+-`7eBapEiZXHW0t;
z8di_`3s&iC@aJ8|3Vl4t`JYy#S6{0Ux$DNnGmY&GgdTj>&>Y^`MjH;cGX7qxHa5Pt
z0BIE$aJ=o!7}>ExRqLlX=7bMcZ+Qj^R|fI!)x(%q*MVtsP53KcPveH~dX(Q5jUG!|
zp`Yag0#sNxKJP5wzqc3EOxEF^E1zM_C25k7eFkplu)9vQ3@zAR&9a!z;ww_IaNx*)
z=%aXxOP>3OYdv@a9ZwEN4U=x@XfUHwLQZ4$M3!}nE)&{Zyb6bI0D4aLfIr<91dkfh
z)D4VFbmkiDE4c`V3|K#R(>oA&IJgwbyhQui^0+eJghn5|&1+pZM1!dNkTxuqGw)EO
zJ#pjFkNKB&%rC=$Q=RB_fO(lKEYaEEI4lW3ESx*QHI#dxyn`Jy)_md{oMSO_4a>ca
z_JW@+Rz%0U0zziofMZpb<RoJ;9IjU+If;_=;AaII!ZqOOFIO@C#8=MGHk}jYXu`la
zTPkpI5v%=>fL-fHL!XQnI1b;z@yAT)1r-I_T08<0mMYN6tBN7HegfN-RRL*NBa$W;
z;m#>T5|Vj}yEsIW`h5uI^cyXxtNmJ`Xy{W;ek<io4W-Ee4?}8}AcWxc@^Iz#S?u0&
z5_w}Y61>=&4jZXXmtQs`xfyA^MRFcYJ)=wGZw&Izv#-K1vszFMU|F;F4x#7cv3OTY
zggNy!ICrB0b+@OS-CSAlthB_x^f2)K)+A6hgUi|X2i}}!S(kV%a1U7v(X&eV%X*QR
z>nQ;-U;7~QUkkVz9KfLmF5vjrzp*)>10wwP@;+=%q_^`9pSmFz*=Yt!=g44P?hZ(s
z?9RuZJt~YV`hY(l$&g-Y#z}n8jSsdk$Bl^<)p_y-2IQh(9-IFUE4|71@3zpFd<%a`
z7}4$AZ<Mn(q_Iw$*ep2%BQTSzI;2aYWYnSN70-2=$w1#qWzcSwB4elx3?!*jrFm*7
zJXpi_5b5CS{+^o_6%VOC=IE4u43dr4gWB|Ve&5&+5U~0++KoPitJ(WDKUAK!Xh~5)
zuIHovqgE`dpM`$w^ZE0KRf(SIao#?33YMBJ=Vr}ILcxL;Sq7iK;yDvGhuJB{OzkW1
zO6fa%6peyOdD<jo_gl_O`WOhDj3CRmihsRHm$(YXa(R~N5VzHgxTeT(^0}v&uX+-+
zw4T5hi&F3?A}a1a4fEa3U@jXwmFk7V9LCcwU-lomgvDTBTNox@aD~Pz=G1>>Bg>i>
zaSofAXW`9vUNS5YqyHlBNWEZv=vjDi&62>x)o^mEJ`p7>fu<D_bReV{)(khKEp8=n
zo?{G1k1V)voPmp76v#YaJD2kX(7ITcK6sl1q8KaTfzTmr_MOcfy{~ZciZ>9n+8a-9
zwIo^}*5T^rbjZ4rh8JdjgpVBCrCr*`xkq2(UC;mE9Ygiesg&izGrNSj?J?L|wH$Mn
z55mUSGJdwF9d#IMMP2SlkoMiI+dPCq&#tGC_V@zhRes{HS16H6EqCm%t^u1c=G&Cf
zp?)DT=-P3c3w9D<nxPG*UO$AsHD`szzIv>?X~E?_tiZP}0`k*AlMJ@79*ogcZdOqN
zrcNCLYM8@IvHo1$*k?E_U?|#eHUS3PCDL|B&_bme?3#*M-&%)Cx3b;vCXvgZK7As8
zu9_3jZCuksmY1lz1UP>hI3()R+3)_rWbgIp8~#n)=fc>s6^p@S!&KH68pA22C{Xn~
zx~yBlnB;rgV6g_viWG5RmmUC{S(o!?nK^x*qCjddf5XWM*I>kNQ}RE~>$DIiK`K>%
z1Vzf2%L7nbdk)(YK0~H1d#)^%feFhg-l>rzp>vvW+ZquzUOvdx4He+WIl8!f?sKfY
zcM=4(X2NHug7K|_Jc*i8hmMC8>FVu&@J+!hEIoJ==duiXLfcSO>r@1@b616@^It)M
z$2lk(rc4W1x5#z7lra9REU#9^codfHINl+jWkpkk^V0u74za<f8<-QO_B)@&&a;J#
z3p9@1v7g*xJdc`GRL*2Py}bK~S3}Ubg6-gI_F;r;54UgbQtZ8y!txd;vSZ^A$+R}S
zRLhto;$Py<$D=sE2Lara$cLErP7`)s)}YH|4bhP89#eko#vR8b$o*dCaXq>fJcp>$
zq?RbOneYmZywW8_RZ`SdcfGhS(iOWloPcR}Y9V>T3^e(13UfC&2&4S<LFdy6=J1@y
zt2MvjtE}!|>S<k2le~s%GZLXj<{W%7W!_o78u60=jHf6Ob@_jod#8xAvl7ARU2?P;
z^KsYBO>k(b9qMG6P>U0(AUeBV{7%D+I)6P0B97<jH4EYfe(0XBjB0P0XOv2kca?u}
zy&w+jyv^y8tgi?w_3(Cp8JjIperod>TpgJWS#z~8IwcVNx)={3AzSDnjj-1s5|x%4
z(8hHvzqV0<gx8khKMzUr{!k2xW=w^fMsXOk><M@kjN+UgRH4o*Hj^`;&cAMnu_kVi
zGx=Ntu~F;}AC|(^Sj9ki`Ao?96pHqXx56uF=BlrI4}Y#PX3#io6j*=a-oG>_BUf2a
zy?tM}kVrGUIw2b^o;`-GJH5bZ{R3$K`~V&IzvCOX)S}F9J@TU0h?KqzgPuio7+tyq
z=1x#0W)no>vj&<Zs8WVjkNW{>)dSq^;2W40t;h*<cSHK15xQpo2U#VW=xx~s(%}N2
zp(&VK)`}DG3>eAIhx((s#BtIr-mKv#ZoIb&Qr5o($s>25#q$otTWM2Q|8&lGR-~{~
zwu`TIF9v(TT4;F|26h=?yhA`5JXvHxMb;xZdw*jTb;r5vJg7xNZWnUyfuH!??g75_
z$Uolf{u55^_#`kttVniFD#rse6mfNLIq0o40a@luF5E4L26w8!{oO6t6?hBg?P-G)
z^%^i*;RlVod&DztXb`7^%$>e;HWv2;!`75lAh(If+<ggRlTv$(aBs)21%`BubPM+7
zSMiP!L%{m9CClwA34^=V;;xxmbhS?~_{d%6)!3}VOSObg`_#tmW_$24RmSMdyg!|%
zZ27*FM2K^eA!Z_xxL{Q-zEZbA5+1^Kd5D6i1;Q(M49xZq@{S7j5TyX%wz~kiKc+-$
z(KYz%{RR*G8V$KlN?cl;8Ou*7GGC7`&d-%1AeKh=z93#hQH=)di@}BW7zb}!G0tTU
z$mDTDXqxmYJkVngGfWxh|BeyO`d!DDZ_0vso4<nW-It7^bD7_*Zb%j=1i>)b%i!_r
zAGmg(<J@PIp<T#v?!W~ZdSUcKHf#QXQ!H55SdnoO%OgN^EkV5dqzxH9A{*~c2xB{w
zdl*x74D3%E(Vi7UsFQ65$}84!tH)SSfh#BU{oF4M`WXtxEF|ebRsnc^+W~E-n6pfm
z<sLSbV0?Wow`-{qHol$91<0i0+MTf=8ayM8T~Y=PTjXiu6MH`MelcT(WJAScMbc{0
z%%%JYf&a$p6IZis5NczO^?KT*IiQr~6pg_wc`0{yk{vw|w;pO9^z)??!+7r*M=+Uv
zrnhPbK|^RAIz88;-!58^%wMnI;w>Yhvtj}ExYfggmO$t*I|A)ZmUt_IF$%dd=$o<@
z(;4?obu-(&jI$&sstfSKxoy}W(}z~R3E2H}GUiHZV^aKE7|ZTN0z2ibw5$Q{*2-Q?
zHvY#i2r-4HbFGP}R2nKz>r;X8g=~e1&#>>AIt_MN4%S92!;zy;%^w)kmnV!!_B314
zoo5b?$478xb1v~=2^ysKUlV59hQYfZCX9R6!@DJ2!znKcFi)UNm51t++=xTs#vdnn
z_YwPWf?GL?G*bC4VI1lyo6^Rmv0PD)9_{SAhm#W@!P6BcWV^5lN<EK&Rp=31bDyzu
z?aQH4FNqKJdIYZAe_T#$3)nu^p>9*8=$qjz*YvC$yb@#hEHDI{`;TzPei^cNxefIm
zUWKQd?!zIYczEq;NhAW^;IN;s5hbD^-d>767?7g>P1h$4cg|yRtv<}}XAEa!Y4O>f
zZ`f6-MSX@V;m6^(AS|;5ed5NVoy}B~sb}mC!9=*qb|#XCK0@Es<Gk~WPEK=%4!Lxs
zkj*uM&}`2)am^!pECT_t{aAp$j6ECV*oa=d8(+(Eo@&)oIJVkG=gD^=Z5#8r-Ydc2
zcPBvd;ZyXecn^uujEguf39<|;IfFMR&`*6cUpR$v0*vh7Mz0y|d`q#h#ftas|08rt
zdIm`o-@*cOHrETQK|{tLjJ_&IFSF&;>GpislU<Ll!w!m9$aFzRY8l*^+k}C#b8uY5
zcMP<t;5+0}VDsDQ*cXt;dmYH&f_%Q<#gVdP)TC4_7^s7KX)_WSd=$kyF2NSYgLu8=
z1R7m;f~-AFyg^hyF3J9Y@_$Z2bVw>^S8$1Y)W^Q>V<+N<9hcG0KM3MuBtibrXuQ;`
zM2hzG!0jnoq%!R@K0gwPO9VNPzT`5D)L=QKE)PDMIsD#v8j(;n_Ulc@FyzBke$8_M
zDO%fyKX*oB>iPYUnOuj1j6GuWUjZzr6GCS22rOM<17k*<fY5ibKn#DNSfvYljQ+qO
zt{JY4*B~p5>`}CxF$+J+(8RGb;J}+2v=`VweTEY4(-{s<EYocz?!@;|=G4G66fzit
zw|q7GcV?GiXba2WiqvSxVPnLsJs44u%LyDhoK1dLpwH5$Xkl3hX=irh#w|*y_A`jf
ziu=G9PLiV@!q?z_$N`RQ{f7-*lQDJjWhj-{&;4Dt1v+1S<aJ`Qpea>?oW9_Ju7!qh
z<Cz$}w>#pg=@&5ODVuTLaf1qh9En}Qc7hXR8FzOsX3j{#zVPwj>M9Qc)?qCV_Ce_Z
zWsvk@eA0a>LIbiMXU^9oepBYa=SQkEcZaR;L1--OwJOFXZ*svvB8=rqq*2=IF!Zl^
z3bHLFSlf68hVEy~ntU5N*CUg8Hq6Onu7WX2_HyBkmN2a>3;yR~U;p|Pgq(T9uUK#c
z+@p*C*LUWBL|M@0NNXBDk8<@oYNRghHuR6q#<JH+G~$vm!2MpVjyeK;b6dG0d6t74
zEa6<|ctFUBYQFfmB{a4L^9~x1L8NIX3}$`)_@(t+g`+jmi66xCE5q3?(Ti(bFqi*$
z|28=PlBKfFnK1v2Hu;vRKs8;PG0v<MXa7h=zwO&WyV8uTVZA3&{bRAlSsns6n8V{+
zL&(P;#VFW!5$vqTfmwE(@YC~H%xd*Uhy5ZnyXAsePQS5~y&pfB-^bB2Wa(-(dHQdw
z8WDG=F}`RO{Ffn3{7yyiK1Xb6qh_kOXCY&mIDFv!jJETyH!z-Fd=NO6oa7dNlOW%=
z7hudW9jda0olRrJ=$?0q56pDLYVEK1G{&07q&MLn`92uDp+<BTMWD)O#=a%hT#b)4
zRUzYX-D=huQU8alE}h4z$8?FBzXm<A>nCdb>Hy)YI@n&7#CpB`sA_0TEOOZ1&+|Lz
zxdiiORu9EphigFe?>}+5#0$8%R)*YV%*4i2RoJt37baY1-HCF2h?V;Zp})QW#z|wA
zZ8$e?SSn0jElFEV-g84&E0Bm9D~NsZ9zKj>+;ZKOaBKoQYm2^NZhR@1wj_yP5%(YF
zu-T*5-h*J~S<0)NScAfytcR1KL+>Q!u-B_5yGyzWr1d0V*_JFUS*%Kunls?ZRz;%Z
z`v}FmJD~R6J_MFWJn5)NrfOIc?^HJ|W#6;?$F`%+s`cpjOM>%Nv*CQN?Pss)ZLA%4
z1>&oe!EC%HuYOyB+|!mNdG{!^T(F>WZCccFPBSiO%EGiA#~_K_BNLvQ<95bMx$DMU
zgW6~L9$OFedtS-Kf4MJIJj>kQ6IY_1S0LE#HmCCCN4TN)6iL%3b0RvXBMz;(1jldo
zf&V^5+Inp`NdCMFKEb!StRq>d;Ms@!W{V)Fya2owtOX0>8j!v|5@T27L+)oq?%90}
zQvP%)ep{D^^_OB$rS3K6%FY+N-ZTcijBz+8*M!E$Rq>q$Qe0ZW5KhnFBcC!V792i5
z0+CT3FA-%)26ns$y{j%TX^$jzTXzv}4TPh^kO9n>XFH-Z%dp7wA%wH<;m)?V2oFv{
z?48H(Y<ND#Yld>tdKR=?#Jc^4&5$-*hU!nzq{q!nsN+RXFuC{{cFt5L;`sIWbgu@v
zHdTjMZBeEE4cB2}lQElPr-0h%jbLeW1`mI+B)Qit;lVP-x@!;OJEgaA&SRH@)H_)!
zGO@$O)-3CGS<Id*f3Pm^3a;HU3-+f7sJ;>Vd`^Ce(tR9wo!pIkXX%r^g|2K4Q7!H@
zd5A@0pMm;8U2;7^n)=zL@$ODrK=Jf8Y&D4E?T&2#`^hUo&0z!|vGX!-Rv*qiPmpCC
z`5XKUTT7BUTM{qKi38#4Vo=g7!s3B$oTFEXP9DA}WLbdMU!};7@ntx=s2)wu#G%i+
zwNRb(6_+RLk*RXGuqE>%HXJ?!sk-+d_+&ILki3s}>P4L1hO7M6BxxwkOyLeQr?Ov~
z4_~X%j`!Fz!^nzp)Ep%#G@WE#SvS}<#*ev&6uBR)-<(x-5c>zqV1<qaRv3Ol!G!tz
zo;Me;=X)h=<HV>oO@eFL{0)<<qq*U1rh=D?G57dQVVbKRpMRkj*RuC#&c}~ncK5wd
zuT72JEq=r4)2kS>`?qkk{}3YRTkQO3tQ8SmO%j_~4e}A<dOo2=nYwx#usPoX?pw7Q
zUBdEWrh~F1{_$blu{{Wlb9Bgntr{&@k_N4DFSz8B>=|*vp65rJkjC|MLF<Yt?%SM!
z@wF1T=V%{zoJ>IHi*k^wJ(_P1jDnvriX=Yihwww68d;!RkK?*anJXk0^<MPy8vZAt
z%Jl@U7RLgX4#UJu4{#o;52|I1Uu+n}GWBv;nKlB{B?s{4$RWhuWhHc990tPSKQPwf
z1?290!;OkLi-K5po~@y<LfDS6OC_k=BV`<x{Rx_dv1nARh5D+>r1~{ur7g+i5@LRX
zXy5N_wWGRVDbgjoms$}OA0F!c|6<3MDsYrF=6t1>ay8PYIh(Lr=pNpI-{ybCd@pa9
zofrkdntEu%*xIsZL&1~ntph{fa>9{C5Ij)?qUBNiit=^XF#0?QuCT=B$#Yoz_7YeX
z-e+Ctm0bIl`H+?=K{wWG;l4xl@LSrLz7jJ>ePlY@|0$r!SrKMtc7fuTW)M9H2TCqr
zU-esDral5ck2;4kH&jV$T^q0QIUdSW3~7XQ43;KZ)6jdh;AgY~d=Cxd>(U-$P*Dp!
zA7V_?QzdEbkeN8`m@l@)Y0;Q&=0ke=hL0XOj?EdZpnBX{D2OeCn)J!2b$=bKfBOqP
zbZ>(|<C}|D?GQd<hXMA*90Q+=^;{*}9ku1H!Cz~2>7qG>=sMAgx7+rall1z4MH^Z{
zBjG-DMV7&YFLSW8oH>G`TOf7VW*o;dAFjRk#qPzW{Qq*Xt_yXzkKh1Llpo={ibt6B
zf#;6SQD!~kE#RG800qlt<CCmzToRH48BdH!Zd@;4s;p1%t|@`dHYd^XnK^%D1@lF$
zy}&ifB}2S~EH|0^0ewUlL~`5t&VD!Uca#!QJ8MH1f;Q2)_80b#5D>MA({Wh-5eSwJ
zVGiN%V7lcrmL?SQou)nf$g2-=@{R#09PSN!6vQydGIYlGp5wr(Drj^+FC2_9CJM}d
zvhBP&xicyQ^}hW;uhji~AXBB}-VupwtCnFy^&(VVQ;&sK%UPG}QkK1gKdi{;fUA4;
z$*-=HC|VGJt6Aniul6kWc!dEOQ)~pvd!?AmcLqq-zJx!~(dZZcoOA6z3<gqcM|koQ
zgfxlpnPfCZ56p*J=E1S>EQPAPA~tV{<re5BL;E#V^j*OE@P7{QZF&g38jR8ZZ8=!1
znU0ytvb0j_Dhiy&@IP1G$HvMlVue9{>gXE)rw$sDsay4lGvm$%hCk)kKDK16218Oj
z!;ogU8<5<1Eg^($1^54W&iThK7;IoO>dSk<>f#0nO<}zKCt9fHVhZX16li7i1$cdo
zJv%inLG6qP$eSxk6>sN3@R^Yq{Y3y-N)6naj(ALeZA??9@vwE|2ACXt3t;<fJl&;0
z$2>KKInIoER$+_={*zI9odP{t&USsJX?$*?6=+*n(9V1NIp26)IGz0kJ8cU1m_~mr
zlT{*j4%?8JTlb-F2<18@r8u`;0q}(7=lldi_-hlD>Ar<*uXfTB&(BXrd70z9k}w=%
zQxoAh%RZ;cI`dbDYm(Ru7j!S$h>qtM^U1EkoUMv2k+xU>v-TU4JQK#5Pg@LIKFJZ+
zg+6G@?m*Y4#o(v8jI;7zj?mG2Gv}H=kGp#(4dl)KaQ2m3A;zQwHcx$zcGF@x6=!RF
z$6UpYb6R=9#-oq?CaF-5%g5luPL|I($1)7-@3CxhAH-Te1<|u>F2QfgvCW_DNt<P;
zMTRG;PLU>(JC*6Y^F@&MZ#*Y*>TwxrZAV=9Jr(zUlO&yB$>olkDGc8vL#1CXfmHwh
zAnb|^%USh9tLzvU?mUEyD{4lOi<r}se8rg!5zx-9L*SDXJ4cUF=9YeBzRI=g^iGL7
zSz%X!a*XdDf7FxLv=Pu1>yjby&2+F?TZr5HHlS}vCi^U3gn4=rRLWdHyxwo;?MJOc
z(Vcv}@NXM#+;)@8Tg7r$?{c9}l!Jl2sgR^)N*P2Nm)*0afwKm=F?Yv+$zn0wUYLm?
z`p>uq1v|RHc>r^@|Do+9SsM6o0po~W1*<obbeG!>Jl_<JnI*SSHu(!&%haJ8qh4}O
z;p{c>tl$r9l&8PW$dWB1WXSYZ19I=oM;te!65OUrL0Lr_M)*oV|6w_5xA_Wek9rFJ
zRf&-PXEydvegMlRNzx-Sj2q-zhcDSo_vg*?82i2y9*3#Zz^SV6gI6HcTNw`^s1-`m
zq-ktfAle;dd#=N;U>0*PUEXd<@^2)8Qr%b7nUDs)>)5^jpA1j+89!!1Be&9%u}TBS
zL5NQwZ#{U6bu4G_g00K5oW6d>m)fetVd`<vSJtI-cE3X5B13A(TpQchN5M^P_KeP$
z%P$$0jGfcN`MkCJF@kwc?s|X3pw%{XxTh_BsG>`HKj_mguX@JYHphsG&b&zSsmlz;
zP~SfKH@sM;N^6uCfbIgew|`jydTFV=VCrnheb&M$M%BUHvUu={`^$BlK<Epfj-BsW
zCeB$CoO%hCtdpm$9CP6*Xp_5A%JgH~0jL>w1=WUIa(i08!hyy*4C%VcEzf4}f6bf1
zFg6RG(XC8Q+>FK*vG<_$qBPBYKAXE1rcRtreTTg0I!Id)#Eq9v#$i42ux#Ra7#IB#
z9JmhX%rk}{mxp-ASeLFn?ho>o%;Uw*A7(jGtdCdDm5JC{X`U=Rde5@nFN3mOy^n}H
zi%xNY35-j-y$YwUG^bB9O=((lEa$aXlbc~>N_Jn+rPJO%$JRJUc)b5A>e!zI)w4>p
zIyeKSy-P$HNpoWT{1{5)nUJE1YtVGMB=NFT<-$+p;}|Oy>gNd{#ax?vP9-rG+a2tB
z^$o``zK$*Hc?Tz6gO<5YC_T3d=j$`~z#T)z4i6W1ZrH~8?D@+tei@3bLvQkGhSeDT
z_%_<N|G`xkWr+86D_U`1g~rOWo?-HBK0LXIF)r5eg=V45`Pzi1LtEfkjttRRUjdsc
z42Vaq8SVCw!(Gm+AY@4<9>3U!m)LHlc==CEo6*MS{@}o;xs0Fi{uPGKFM-^VmfS(s
zeaIT83=N7~@QIQnQJpJCMlpZhx+$tu%{3aFcYo%+YR2$d>{&6e`z54wyu&4PkHN+a
z4T!kDk-IheEk=HS!<@1d*PXMbYt|VP^Q)RP@X=+ysNw-6XMf=n)AoapT|YmTF}Ne$
zdvN4ZP2#0v%HK+~B$aEIVg8$WI3!$yI_d^7Pwh-D{izk~YafH@oHwkjEJZa#T{v(*
z4E=N1ch6CtoBZ=1>a+Rf${uO5Rec$_e(S)$wmu*|{}}L{Y@cN`A5@gPK>kf6XVB3H
zI!33NPgsMlz8#G<l2_oYmLie8#T?Qb{=xeAMljWUhjVwZ&**$Rdfe_eXj#WYTF@UZ
z*0BH_md<1|XET=tyeZ20Q_Kz>#9B5#Q7<V*dv?zl{Y!$~{n);2>_|>+xgz(V_cthn
z>eE6QA2_|?6+|8=f)oDoL|hXF_760u&hT6aIdBH*=yYsnnWw30nk0~p2D?dpXg4AU
z=bz2Q=n>)Ed&6s(6j=(53lhQWkQ^VARtjoEXF-X8@s_^5=RD?kVcG|Ee!Ig2T<~Ba
zypgmfyDOexn#C&qLP0g;tnY&9ofRw>p8!7NhJnCqlZ)TH$9zP)HVxELqTw5~F@L)O
zz3wARFRf7{>t(&M*`^lU1Il<`@gn~8+j^*+bscPCuR?<SH;kx^=L<*a(4_;mWaLFF
zGC8ISBX>4}-QNfZ*IocEhHEiNsuS2omYw0O;cj*ptjN2@9G$lzbNnEzxoJUi29B~B
zqAu8(IHJDN5i}}44>9{cupZHGmtH+t@}i^~XEKLx`Cu#f{?_Mlt_l0S($va`y$>aa
z!m@}4^qszp7fn4c{2ri8wtp)}oj4V`GoJahSB2w)^+}MoI0oX_dwAO(Ws+2M7tgMc
zAOj;i@XMs*IN`sqY|o-aB-UJEOerHsb8_Gw7E6)9>B@}tG{xm0%ldZf%*7^qBQj=^
z3eJ4`0&9+h^MU3?oL}-UbTFESH;xP;Ud1AQRF)C7e#cz)hwEW_gECP7Il{^O#MWCq
z{Qk~he9>Y}R+0~x9P5gQ&b<KX8xt|&SPVPgEzZ7^DMM-+j&ak1cw8R$24fN<@zXp>
z()#-d@AyF$4d<JaMN^n(D{2F;@;VK?P8e`S!n<guqRg*Q`;8vMnj!a=GkRHQaCR?C
zc~OkW<-y`g2y}1)H>(76)K;WwU-c-1u2RvMZ+v4)v@q9fGzgZ6vj6t`gRjA3-nX|-
z++|V>-DTRep!OL&qJ|`EMLcsP`Qh`)%*nR%4Mxtsh4qtd$iGYzlKg_r7(Y!D?opVA
z4Uy)UCp!fTB|G>ldA-=-<cbyw<?uOLf;1eEq(#5(L9=%k=>81Fkm+4qShX%0yFiPa
zi7_Q%6J<$vml5u3WVw~rZQL8?#Zdmi?%#>$(62?C4{5iBYWxM>M?S;lH<vN+?i%!7
z>w%&(uecQd|KsRP{A&EZHhh`~&6>y49A85c>O5;FnH7>L2_YmQb4ZSaBuS=Jl8_`x
zLY-&rBvC>Vnj}do6iJd$@BY1i03YR?=h=I$`@XJU@U)$s-}T3HyV)MtIdd>MS(ye5
z)&ahwmO$gaRb0xr3a(F$d0Bq+gZN7T_f&rj_-V)R!q==Hr@Nicb{WTY$$tXTHrChd
zo{F2=^~sLJLFBquIHr6&E^&osURalnUp)+IlhXtg6n(?S)K>1&;koFt^*#hIw4(Q}
zpTIv|@<g}lHwc2JIvKG(vwd?WuhBXgUIjWa?j;Wszuds!v>p7ydK=>LDhnhL6EVB{
zJHND{0o8h$r)laDtb5>q!`U<U4lhgdvMYJ}#7Mq*WC^+sn2WN0tinFlS=;dDFQi2E
zb4`m`M{i1^M8$m&ed^PIzHC>XVSEuU<><r5Q-ewNf@1cZbVhWwhfiyoF*c!<?~=2k
zcbWT6qe6}5US?dSz5)zdOz`q{9s2oeJq8cT<%PBXBx2RktmAA-J}$Q*W4nUTK71y0
z?(xA{x#n0s$CPZTWto=ZhwyK<3`ya5N!NW-`u+rSdY1ZP?<FxNnOR_k)I#Vo9fdaQ
zexTs~Y5s+V2HhTB0k;^Jp?q~Q%dT&Mh9}+V_M`-2zx?FCJvF4`Cq8F$wkUq+vKpv*
z{Q-s@_<^3y7F_?8;rQ%=4G~^_BFcW73b$u!kxTN{^yHz<V0Z2yS6*2KD<shn@?a*$
z1})+;W`BTpeGjqmS}>pWeh)@P9s>0vmSmT#94+&G0xM?SgGXB#OX-0WU)T8;jAqG`
z6&KFH=-`9c{5%dOFJxK69z9Z2-3njNJOgj*SQt%Dqxgy@N~W5y@6aAeL;Xom`FsMt
z_h}Q?fjY(zOyWQ8O2M1v<^<-;;OEO9K)N5WY}7|^Nw=au?;6snC(dARuNSVXQ6=m1
z6-au(I_9xE3AQLpF8Y|${FN3&|LJG`bK+XGm(J$WkI7=-mEWMrW|YR>ER!r-hD+qS
zFyPl=u0TzP#xDKL3ssZ2?YH{bbFmf59~x1`UuXHxe+$4z&Wx_e=?0zMwj^Aw4+qkX
z$%{M{8d?$pp>MB&>!{PBmc=nJ%y}KW?ocBF!O6S5529I5&Wg@itxU8sTcGPyCF{~y
zGS9m*Z49*n-))xgg>g(%1RY#`+9ufTszkm%KaCBt#n^dcIM?Mo5idG3_MGGmYTQuZ
z{wz$xSkt}y(@o6D!1!2kH(%mz)TSe4tcb?BWqeG%2@TP2<~`d^g7nDiu&qX#NG>eK
zh3qr7w6+{(on?M;-{;^rv{cd_F9(yCN8z@wHnh@{-Tm9vi-LcRfxHEEeCdx=zHN3U
zx8`snHaZ+&9wR-fKlC47x^)QK1b29e)L-;FBSyW6W>nA=hN<HYW3<ma2sM|%DXZRt
zL*IKiAU%lIn;nL6x`RmVYb%<&jCHyjPhi-g1*i%7WYV*#5bUOjC7deBEdGJ_ZmN^Y
zrB9*v(>>nX?f~z(ryU>UDU)z^e%;ma8*m)!*)BYeea+oyd`tw{uXpn2WEE+9rXJqc
z%S5kNEo_fF4Ye|MV!N>h%uqf9u9@>C5$mmRnqo7zgRyvr4ylH3Yn4dNAja)~SkE}K
zQ4)<ElhK}w<f_-mQ%SHHjSrpx=ltZ!$=DZI_$Q1PYE?*tmu7I&(!%+T?Q$f#a}0=8
zM{*O#wqx5SYg+i{y2Q*|f#k9bgrDtX&e1Ruwzt%?{=rF3_(h*@l<MaIcHnl#_b&`g
zW?sx{$s$iH(!KmE<9rrEd2=PWSDO&~oqo*U<OBAHf*^A2ZBCfO_)P0I!>}V!@b0S=
z*?-4^L@-z9cPrMpEVG4{*HW}jx(KpQx$}2|BXEoTAo4&-m7e=xNn$qo;yn>#uQe{=
z%k*m?;8Z-s`BpMFk0bm^&qMG3^7)$jIGn4YN1py1j4D^JgQD_Rev#)je8h4xvwG{<
zJs=&Y&R3x|cIGhHUqI8jH2BhKO5IuB%&F-(WH3)d{D?Z<>hlF$9&ALKY;S{nem10d
z4C0(#vAx<hS+Z&c>qE|N;*$Jtp(pF=D7SA!ec3mBaN=*2*=NF>Q|crqN0C-quEVjl
z2LP%^f_Rv#<Qq|>Yp5dC_@qgVSBJA6)2qDBgz;eg$B<leFsJp3s?hV_Sgdr)fOl2w
z`EhTxNae0J4)w60)tdxFeohuH*{Vr}qh>HCJUfqXJ`S#rC^u4QO;=a3&d$u?yoGEn
zu*V6ApWf!SzEOY?aRbZ+?8?28KMn@*EHl?~Mijd0Cn_~1VQB~JkYvly6}|T%gPlEM
z6YjC>$sUM|{Elv$cc5USI{5B%f_<m7iAMZtZYcX+IRAT$cdQ!l&>=Mv@?FeF|EEa*
zn_r85`y08U^^34(xG`<IUk>7`bV;7^JUEv64g;p@z_L~Cs9T#0iL7Jf^qz7hDHc@k
zwt((>&K!9zyZN3kdeo^~1t$HJg`jF}dhoz~C?32G#(t}0I};z2Za)i7QeU~nI~k`-
zxe2o;_i!&lQ*p5bah9zF`m=w56FW^Qn#)6`mVm~Qd+>amF1eF=AKkR3VAaS(X!$5Z
zW!rVZE}pq26w1JEl@83gXT-Q6?zrh<Dw;a1$B&-<Xuoqkm{(?CssZC~Jah!ZFF!%t
z7U$doryzOcX9!i^4K3__RCzrGDvV0FF^mUM{3HbXGZ+Jz%LIM3b)b2w5aphkk{KdT
zbn`BP)LlL37`Yy6g{EZr6lLnSR|A#a>C?7_QQUyTU^2R_0Y`iFVwYVHeo?H)@DEp^
zb*Bv}npg!zCpTiSM>ZFE@+A(7Vg16b;gYTytyn2t4J25V_66>PA;$#da*quyds6}b
z?pxBp_9W)gXy;~`%2Nekv(B*jXjQr#hF)P#S9f2WS)xE1%|GzG?P*k;P|HU~U*&~c
zcY^TdF(}-2h%3BX!4+#T2d%?|{J44>EF69jy{fN+rt)1BcrLyhX|Mr<T$%qX%ZvyY
zav+pT6D9cHf|T9%yl1mIB=)FNzAhaDo%7*!gc9+rVa%)k2fXo(K#<S9jh|QC#TRST
z$@3BVG^-;X=IywS#dDX!;zx{OvQH13ZkSMfa1guJ?}Jv~r`RQ12aRvaB^dY>ebc<4
zSRsk=APPa$l?B#=BGE!ifp(Zkla%+LdBNP<`Po$_n6Icyn7IJfu(I8!6RfAuk;K2v
zV0#BUFOa^`1;&j6cCR17?aD}iWB&P!A5g^JSDBC!{$A2;wigeM>BY$Nqru$4kO-=e
z<k!5}hDT4Uk$Isy)aa2m=^396e%EUxro;ciwciQY{lJLETX_K;Bv17Z7Vv(_M%;=i
zL-25HGKTB^gG(lRVTzhMO*Op@eyn%sZ+jEtHH|UrkPH>}DN~0v@1bvj5mo(YMpV~W
zljTpfXh8H&PJ6~+Xe^Esl}8R`_dHd)L4olhGYIT@nSoE)-DCMLX%cB22S;lRY4e{y
z*mtcRg2*NyQ?yAN8Q?|Vd$4b8DcCos@ZleiL4n>iv>%A%Q(jC!_cRNV<-vSi<Ct%m
zW$IU$jKg_bjHqB_rsPrK5PUr6Cf;EE&-iU0`9(SVG0`&%7cMd&@*~>8W=S%nCkh#h
zcod{O?&qUd45E+MYLm4OwTS2Lm2i4U2K=4bjd^F!!|DfYH!!*wal8q*?RyWV%YTB_
zZUx%8;6DGToG~AbcZ#%*bwOW45RRIg!utG^ctL-xq^hb28#kGAc0ui2g?~L4GVdKa
zwO!}xQnuqlm9Lnt^oIAV@#6)*RQbSaQ<|9^4$GhS<Eji@Dv6548s^a5dqSP21^k9l
z^Vu_|MxQy`Ho*zz-3a)j3)M-A<Sbl7m5-&E*`NS!inkzS$Pw=E?W<s4c#C(*@W7h*
zmz-O;Eo1qJVb{V>Q00?=jYdVNFDT}xEuD{LGa4}5^C=wBw4>EWF5uI3A<To52{W#L
zhyUUgiHi|CLv{#6fj`;reV;lWE|eo)6OKZ=Uoa>;p1{*1+1_Yv5p*WZ<NXpCcXkTn
zW#=TaaD5O2z<GZ4Ga(IaDMZu9*6baoPLISYQ$^uujElJjuP+&s*yLIqz21Pl{GvuY
zv7C>xxDRF9nJ?670NngPfn|dQSsRvxEoBIQuS>AcT9$6tIs;k7d8n_M#R<Js`D`{z
zzqrhpSVVq=_Eoddk;<ZL_b6_EWIKjBuESbW#)5rzl^;8;1E!C#q@5=eu&Oo>oo1<0
z33E&oo?RhQesmks1sW(0cN4{yE(X8)(|qq$eVlc)9E!tNqw1klbQGkqy?`VCKISx9
zU7ZYZ#hSFU^90|>_!nink3z|RvH0x+n+dbMeb<LvT(!`WMmD|S(+<{xy~ifLd(cP-
z&Y^tqr1`LUuK{)4G*nWU@|Ryhe&g<3dFnN|2ZFzvVflG!D$h9Oerd-f*|`<GElEP9
ztL8+$ycF^xPVojmUty{I7Mzin3}GkO9r>{ZwN=q1cF~j2`$`(OZO=cnu<}LIx<=@k
z^#pr1j=<(#TiO}Bhj(|iBh8Y2w7$l=j0;OJYJVBFEid8fgMXrDr2wxMC==SGO+1IW
zV(Bqen$ON?H#J{l&X-RpSh+oanX3`;xMKo^_Z9i?W*Mlg@(EHLUr9z3et_RcQrW#o
znl5)xrk(3XbDJ8?>21d9DE+Sr*SE6WellaEh1Y>?pbk+S`<=U05{+RN%r&n(SERIA
zj`Wltfxnxb(0UvD**!ZX7GwIchB@fw%`=9&L6^Y7;D3A7V-UCL9_(MDPdaxx^B-7N
z#5Hj~W?KcqCUO)ze1z1snB`8d%?5{`RnW%n_Q9{C@M48I@zv9V#;=(o)md7k+Uyw$
zy(i(oab2?7oU!OMWnjib_IEuvj*2O!VDd(WJdIUFai9$KDe1;?v!_t#tH7(&%F&oz
zCNw*-h&x%E3LYEZq2tSWFimA41gJ!zP5E8uVtKmAgR&TV)s`>1$l+|p$}Btd5sZyl
z<{>(q^ZcsDWn|}Lr@0x}Y%-<-`M;7~y?3yT9EPI4msomhGQ?KQgk1IMU|XX`g8Wv)
zxV1J!#z&WY9U`O^J5TTvc?A+@+7DiBQRp+$1s7!(V6qIGJ?7rzueGopFV1$}?t2h|
zX3Ar6ybcY_TZhtBJJ50a5{P?S1Kx5yoS|nc&eqZ(cBkgUX*uSeZCBuSjeUb~SP5UZ
z=Hu5Xt>DLaM&hJSoRwAwe~%|aZ^>k!^Hk}q`P=X@%jLIQ?!|xcT4d<3xoEL(0DegZ
zQxEeM=$g&4bHh~mX6r2QR3FT_WR}1PdzJ_4>EqA8uqG-x$02Pl^QTlI@8p#LexH@G
zPbLrc{gonOfrg~e!%Xz#g%J^if>ZX45!?pn53r%9j5%Z8pe<w93HB}*8I#E{w%m+P
zTCIzD#)mnN%bxIj_eH$2Q<vPUIl%bnF`}~ZH(~klGMq8}9HzxaL*c7AqDGC=(7S0m
z<QjNFtj1q1C^`;*dRUMVLu^rpaV{?%(V~%IinyTi1ExeR;#LO8k|ghmj7gS*U*qp$
z$Q})7DtwE=L0kAC*@MX5nMNeDT!*GO>u|RsuAw+0L!#oXgUeso(BQlt9C~CqiiPT|
zLuo<%QtY|dk?(lHJ#CO<=NOGMCpoh|E5-sgg@Or;w=JIo*VNg(XxUXr(&eDB#Frn(
zy5)_2OZa1_RcIm0$BMrU=hW9(l7q9V!TpRTEkAG#+*X}{+$~2TJfsJbCoreQ=O$eH
z(ujb<9&mjpMfK0z<(?F45!rdnImohMm)D)d?_f<M{`-LQY+2_np^r1(GziNgKEvyV
zI+pEy&hKM9*@4%C$=|=$(2%|rBe$e;0Y{wSS^$q8!|KsXMvrPZws7x0Sd;NT^yun&
z7olK(A*^KF%F<nz_`>O0qLwvJa7>{x)n)UB_m7on|8pxW2(E<U$`o)sycQyf3QoJH
z2sxvDFg(7V<ydZ_uirfA9jO9~)XOmX*IrDi{K<Fp)MAb341a3a6KpVNyeBp*Y`g!8
zckN%t2cMDS)h;CAYd>51$xnf7Iv9@6pQPf;cqNkaX9;%R7V;;LnP75E587Jlvgh*&
zjGg?CcMlv)EHZLHJl0y&@WKn}igUQ_njB5N^BI)WZsXF272w<b8~xdy`u!bQQaEE3
zqiDu}II*7_+$>8ivKL~F-vvIUvKi-@4#&1>Pk8&;aoklUmTT{28D7@m4hdv};?vSp
z@Mb<Y_Ki6e;BKeN(rA$NwxD)54sunkFPKNc2*no5L}PWoK=D0&Dn7Yg6vS9&vzq_G
z$=54kyRaH6F2=zNhYRTRs+d2$w*Xv*e}GHYA?TNU1^oWmbJ5=CP$Yd3;#zZHh5Bpe
z_xdhzl50e-m8w+3NE1Vo4>0DoG#wF|%N%@nF~GH(`}RbRHoqAG(bY}t{Qm^n)?MdU
z7=6R7?>r$mSc>M@Y{A>v)<kB9I?<fL{CAHxag8D6%!jE(ZBMga(O6AR{IDWlc;XQk
zoqGi?vop8+?-VGU$?|}|OCYjx9F*n{L#Ib4_|D)hyi@Obn9uf;8Uur|qnPE?UUu;R
z3C)PBjjTki=|1RRGNj)=nGu^QKBzR*hWO<z=5^=gqf6>~%r#P>p1l<)x66<o>x;vK
zOKwB6w?6e&$>U~RMf}3DR3eA35W`p-!S4e3u*V#JWBkQ#H5(|OJOM3!MnK_`1tP(!
zz0SQ}Ied!01s{+;1ScID27WbS(SS!d9=6ZH@gsN?Cft;~xhhAxMf%jQIvB<;WsJd=
zg@AS4*w|Od%ilZ*;g?Fl`(qc5UvEoVU9V!IT|Ry$MUW$K$C>Gx%v<S!p7VBNz(zy5
zX^|ySB$@b-amuUglAz|mU%ov*2ki<kLx}AsuFdx-_jr#XIr~S4%4)ns@w0H}l<X9~
zrFt;kIrASD1eU<k^?KA*i=8uk?Jy<$4?k+05^YR&WW9*G|33%1$vsC`y?>%uC12RF
zsspXh=U{}Y0sP{bUsQDv5kE}lW$RaCK|(dVV-Lq-sn;mVkfxq;E>ORNu`dl7FQC?n
zTeFJIRFmhy#diWyv0RNyX)xzprwx<DI*j23@n@Xk73?5(eF+a5vRLk+N1L^-V9>$+
zn4q9f%bqpEomN#ExJ`wgZ(tnUv&t~XQ<Iv0Nkf}i@#s{$0yL{s$&EQOB<F-F9dlfj
z{29<DqQOD<!%>sK!)++EcjE;IWO%2>G;ZUe!Q^1|GMsm2H<XLMVyCW{Z>&8fF*NSR
zD5>3;euy$weFw_K35js38*jHqneMW>0*5|G)1hxaf>ZJ}RPtiZ=<_4_W1rZrB~zZ}
z_A&4KGAVlgwLVp;QKp+yK0@2(c<${)Ez)@96hAG~ng+ym@ClX=pxN~dHeLz@mnkQ}
z_gxfp{b3xWjf$9k-5veN7k<$Yd2*mtp8Tv95Y?0H9ca`88gk4RQFejx<rwFC>lH|1
z{j0!vaS-u-Jvy>%*_$f|8NYgfu?6Kh3wI5gvF!x7I#l!arXp_7v{G~)cO1uji$HO~
z6v^kA8*rN2Rqo<C9V+G>CF=@giC*;yj67V=rR;C#{d5Ln3d<i0Ogcroek@_0Z3mbU
z+5={NI^_LK6RH@Yf_dY@`QZ9r=ytmZwzo3IdxRpruG)r0kq^;YSDoC?FrbCUJ9wA(
z&!PQZ9{T1z#$>e`)UG@Lu?yDoSp#>$!TKWx$1UPY;{w22{u}?d&k&Z{jz)6%7EaW-
zfO(8FzVXBjtlE1BHL}igcO8$R=$kf`J7Ys!C+qPmGQNRi@lkl*cMrFxvpvc5E=YcG
z5CVTCv5bKXd{@^e|4zw~Ef1L|tl~B9d#p@EV;;cK<P!EiSdJQLdsq&23U4SbfUfC{
zC?6P#UCd47%kD`Uo+aF&0rpNz@8|3tqxg9XIH*!kq5kcYp`iK$uHMCVqBVA4y08I`
z{7|3)m$j+9awTM?N8tCVy5#AvaHw~m2E&@%!RbIcWZaUYv@;%aw6ehF>_5g>nupo9
zYq@vCjyPH!#n9*oxY=Y)>ZC2PEoB*alnsR^V^}s~Qw#`<mgCb8hSc*>3NDy&2%AF8
zX{Nt5*r}bzo%)QuyKfi<&-dgW3{fKX7Ng)*)mwBs{|uVazT$)Xs)Vf2Ccdp!sQr2}
z3MC!<ji-!VcWxnXUmC={`p7{<=vbV1DG5JZmM7Is7DT98EfLO8M@2;)>OXTh1YbXl
zFSKol?9EXy=#>=B7^+9L9%jQd+f+WQ_!_2krgEc=Z5glq4>q1r#KQG{{OTZUJl)Gf
z)Qw1h7S_9R9nY&>RHGr)`{32t7JS0?VolSGs4L@OO*-oj+w&M#DCoP$X}Ss2CFMZH
z$}Q-9{1ZP(=MaijmvGlYS`lY!FsFqgx>R<<l!9AORQ3l#hW+3Y?ikZcZUi6A`o(&Y
zQ}OCOOBxqb3@>}{qKPTR^oRans`&sD&#-RPTWuO*HxIPKFG6Eu4X=Mj0OuC7nQoLO
z&Gaw;!)JeB_x9gt-0KYcRn4jK(<E>lDuQ_|LpxUY5pI2|PNxQ_68{~?FsW|~>h5bq
z|E1bs!Wb!mGk!qqq5U|5<+s#2zCe!JF$g}}h*8lJD2s@K?B^G`?+ur*bUfQnP58$7
zD6PeVz4?$mj_r@mj^p2y>yTfm3go0qAOx4VbN5^a6Y*)9pA<a}<!^<dt7xl8^6xwf
z)YgkUn}%R<-YCYWk*AjubFgr+80^@-bb1);HT;^5!QV`|w7LvPjE%xQm$ET%QURD8
zHYZK>v5e1PNGq-8<L$dv#31<`Mw?8>omVZ$_I*XD%kofeQetQ}x(-Y=f*J=Va$fOw
zV65aFsIN36N-1VUGL!Z14W@~D)*OIrk9f{CQbr^^G7|cCO3?M4H}BNh%KuoSO72R@
zkYu?pV6kc+l<IH9;bJA4I9ij8y%G)lJR8#4>&TBC_m%Y(C-P*Q6&dr+oal7CM~$P*
zkJUQ_6YtxS^h*kKya#h*YO9mTo7Oaa`x8E-CV}OEZ1C~2!PNWLc~qJ#O@!(nxM`c~
zIKS+vTtufOq(4Xg(o!~;`<4bh0k7ba@>$4<i9*jki+I^EOK=Ft0joczG^d-L85`tj
zTaqo!Km7+AS~5U-ax?4!ZIbi(7x*?w!Nkf~+}tLl+4oIhM0Ej-N)=LlZ3!M(q(jy0
zKVk90BAj{RJ2w9shp}U)f#Sk^e%Vp>thrvsW((|`@sfi1do@xpDGz)UdLTB-hHHMe
z2unFL$cPz>o_ET51$}cu^Q`IiaT095aUSy5Skj*kdZd$etSU|?qG0|@&PYRzgcaR_
z!qOm#pm<sS$k=GS-o$d}r+s+Oyw~Xc_$7alIS6ILrb3c|3KVvyOG18mqgUcH$kX`8
zd^2pO<a&}jqi982p5?*k;kjUDr$NNacX1ZIJ@7Qb6Kq#2lExm2)1T>)Py;y>4!tOP
zIZloiZn+}Td@D^;{tW_SyU~z$>?<eKz789wPy}NgsL9$5wE7ChZn(tm$ClV=V?&ew
zuEbs0Dl~DCEmdNy10ikaoOVWXy=hvYvSkD`gvEeFwE`A@(xAK2H()cl0J*B_bne=H
zIHK<lWYsidsk?+9z2`SRZ5zO?2^*Q;=p2mK8$=FFio;c2s??kHuLJIV=Y?10Ise7+
z7-YX6B-ana;mbF0!vQUtv<krV-$$@0_zUvhLfU3z1%h64XUieXwQ^KLv^8lv3|QHc
zZ01%IYnJ4z)!k-y`9yBYz6&s<{vke_u1eFponS%mdDep*&dag$$GgL<v-j#QNY>>+
z;k`v5bS@VaF8qa_8z%DMf-WfalcBEdU$}&W<*+NI3u7}n(V*6rROHz4e&e=)%_=jx
z;a3ZalWz0rAIxc?w}&V-;5_U6q>F?@_wYZ;Wr#c+N5(y5VS7DlTxU&>GIolqVui%>
z^9+9YcLUOXLlxg|6_Bsf_2{#WhGd%fI_LetnpOsCQ9P@U0=XUeV!0KP+uBOx($N(V
zlsFwEJ#Qg9aTT`p4{*Z9EBxQ)abUPWlfEu0LEjmwFqCy?lq+t-j@j}=*yX~94D|#1
z&xDI*T#)@S0<u8r7T7IW1~_X3`+2Tn*8K>m=o`nJooW1cZF6!$>o^K;`NPe9R#Zdb
z9k1_^!xgHzh-NKng4qj|$np;|)a_(8SkUj_kW>YIpDshmzf4R!R?65^cO_9vdLh^O
zER^^jL67jQcx}8ssmPtkd9FLkZS*lD_5;tkM#lm^<+UAenVO4}hiMV5!(U-Pn{h5_
zP$I+XzhQAW>m7J}2X7};s0vSI&Pa1i8F`i4=w(QYM_1#E0abeP5p(eFd<`kG2Sr2V
z*c>|`84~_GffDN+v>BO<o9!9jrA^GsuGWTP>lILV<%Z}<K{{gv*}~9ARp9wpov$A-
zg{P%QpdwL=FMHAn0{<6W;W1y4Sj&}9QC`N&46`6(jq`9+#$)^xtVnOQnUJk_!Z1YV
z06+7)2@$;0<_a_CN>+GH$6*7q7(UYyL#YUqOO0q?1ml9|vE)$JS!_MD52rA;Lxt&0
zF6oK}MUxn4$Q?wlA7f6px|Lk@rf@uU-JFE3P{f}>N+c}qC+=EPhJxGdY%{D=q;YLK
ztVcr<_U$EfT2JRwTF-LL-cw;rfgWk|kK|+Tin&V;yCA_}KANyDsw!jW4suc=OGSC$
zl%9$*p~fU)Ir|4)+yXNltY|jb4DFLc@r3?;tpCM$V>@giY;p<fb_}Mur`&P;H9hi=
z>Jya;W2({4JdD3SaZO8I(1`KnmhRn%jc<Q*UV-@_(9O#i$jgW}&Q8Qf(Qm=cu^prS
zr9<&hfQ?IUqw#qUSm7{;CfYr~sNSs@Qv45n6>ng<V;@RmDcbj*=bINU0r7he{%=+|
zw5VFrrRH}*TB8LD6N7luBj4GX`2ghRZ9(}xIiPUUh<N3w(b@@8H2csDE}%OXV`u*4
zD^Bg>hAy{9d-{qGSZRSyM&3}&{Bo{pgTUNCNGjr-xF|oy$gDbq$K5Vq<G*G}F*`E^
z{PIU(dzHlejTp!7PlY{!S@_>qIhqkN6Wh-8vUAlw2w0~Fd7ACKeP<`O)o1Yq62?7p
z^hDzahA48irXe4)xg(i84pJUOo=#wW>@8CHBORZhIPbEg@NbtyaIZw-*_Mk5DZOAS
z?Ton>c0q9TbqLnf!E^uQ$>iWOcxz%Mjux7efjv6pqo)B`<z_~oW>lg>-F0*u{~7eP
zq-e_c0?uRpEEw%LnC#gnLk=Fk1)}sQe6;>2jN7h8{tog3i=ZSpwm_e_tc=2~qXVIL
zb~q>eWWra_c3!TV@ex@zO;gpB%o~`F<0eTH@s9$2!MidHSKo}g=9i(9uQ66!zKfn_
zo?!7-mTFJ3#GpexSoEkLet$oQKV4*qi(wg@|IYGM_r756;fo*`F_d?m9V2=lBu_@p
zO~tKIOVR#A1WcO|2^E?g<cmy5&+AA~iQETef8*e{nJj5s6eDRm#<~!<oM3-ZEY4E(
zLgACgIAXROwf?C{I&U>{&iyvj)!m=*E49hh<qvRusyxjiU6^}u2TG!@LGc7JJlLwn
zm>E2zF$Q?*o5dKv;U2fvS&eK_lp&{o)S$<MS}Z*6AlY^FHf$Z(12xAhx$A8H9uxf?
z&P&^p8ac}O-ye*p^WQ*9|47)x{_HfhSWe^7T714viKI7;f=iWApw;{h?Qf{?DN|Ls
zgjc0(9$Jhm7&qtOkrbGgP|oSEIL0k!ySzND@$4DY3fcKaeEc<O+8gy2)$ApBe6tGe
z3wj8R-uor#{-3#a!}oZIF+&6|J-O38&!NX51Ig?Q7_)f=`c-YjY7druvpIlcGd|$D
z!9{Gg9^l;I)`|uXOh{thB^3EI!sm*I5Ps+f{(JEkgDo<lz43p1p-xd&=w){9T*W87
zGsH(?D|9N&=f_sC4$MKuOgNV$5l#GpWA-C<_P^oV@&uH+7?H+;!8Fq0DEGuincTT{
z1-$fbqE~qvR(Q-or|0Id!Qvb0`)<e7`#~r^z28~Qw;JzMcw<id1Jq&}5!0o;AW%38
zjYnQ`(Ft<c#%6I%lfT1thtJqJa=&EI&qo-3xE7u-mLm2u{)38CPnh)04&zmJqHLEg
ztyR$?^YZN>W_29~-hT{F=gZNeAx+RU;xg;bzu?Q==OPZ<2@`enN!2znC>6<(o0HT?
zrkVzIIPQh-U93sPkJY?iRWj@C0=O;#k>9!lBJr(#l6AeNB;NB7uejh6-#yI~dVQut
zk@7A0K3<J1IwV6@|N4tdH}pXIqy%2bo8S(XmHseVf{$V@LXATkZ+Cw$<R|NsX_Nc7
z&!q*JUVe|?+&dbb^xyKKGi<=`NH{#7VM#}9xrN(DRWUBaOT6V`NHbGEpvFz+$W7P=
z+fAOr{Vn%U8^!_-eSvAG7_0BACRH4C6o$@z2lGdn5@Qt$uulu&D#NeBzG&7VVDF>Q
zgZ{8Q+>Ab2^A!r_HG=EgIU<3r2e+c@9Nf8hmvO0%a1~a-HQxvZSFg>YX?K%2y=pD$
zIruPMWAiWJ?z<w#J$k6sWsa_aCpr5V8D5EbglM)RbsKdCym<wxwmuTFw`cK7XU9P3
zdG^`S$>802E&9?_g$jJ4V3@HoJ`3+hk83I52lBKrbtbxwoyQNma}W$46@XpDJ1h>H
z059Ed;;YamtTNe+l(}A4w~a%UwiG;KE>HTrYhlWqpP2HF;9AoIte=_$z5)RTdi}<7
zt+%MaxJnH+rMOx02?EL^p#C{yKSd*?$o-bsW>>OamL6;t51`=DcIT$rQ*dUt1*yIE
z6HTP#$n;~(MYn7w<jEJ|hcF$|>7;~T$7h3tokLttZRTC{A2BE5IA|TN!um@|*uUTb
z#A&o(tI9<T+ddb2A7A0a%VVLm@dfAmS(A083L(v{4ASZ@K+@oIIB%XZy5G8l{bT!Z
zY+DI_|IdW<Ml)Z+UorpnMFnhaEJWSF@8~~iBHVG`kFn}a{P#Wu@^gm;4H2Bg3e!|f
zZ~eqM>aB&8xQDzC<Nu^o?T4E-R&)z<?znz5;=B`Mxc1j+D9n(8tXXYv?VUO8n_G)3
z!ulY!AsThzHSDR9V%^^q&g=1J)L3u^oTjPJJDT;7-f|Wdt}Bqp7l$B0kO__Jo^#F5
zY(RmrYrKzM<<|QflKKPZz<4I>=IhHt!iaC+HpdPhc$t#aM;mdan>?)=`3+7--9UFW
zJ5nrez|Fc!v_{gxFNsnhtBpT{t(gwlZKO|^CfKmu^8j?r)*;=h1Neb;$qM&O;6$fN
zz$N@D#@zY>FHP9}Zc9H`kzfk?CpF>6mE*YhhCB&q@r3qM!&pZD8T^cAGpPPnX5U&1
zHLWUWKRp-cFO#G7VftV+gn8nm=E9mk%Cx!qEGU-5am^~87!zkpJr~{Nw<kS>U7A18
za;kv1u6@UU|ENnOml3``I0p8-3^srK55+r1z!axy6rYF@6*;tmYu6>%e7p$yhIT=*
zRvfr~nkRA{?Z~$!Zv#(DDc(1%4Qm22IpwFdpff!VU6mt&92e63WkM=w_?)M&n#diF
zxQPPi`J%hq2ayQIFwfI^geNjBXxXwuFmU=L%b?7Lkho_UdrFC$ERm&mJ|yCg;nLLp
z_jOQh*C7Y)$depVJgR)Xg^LAAn6<%*ZfH3O0T%4cD*OkM6EzUN{|`L=`5OIbJ4!#R
z1o<#Ex^&A6Y`%B}93{gbQZ*3z8&1NoUngPnk#g`n>CQXqZGvP!16mf852aF*;Z1ZJ
zYK&;dGRYLIFl*xuSPDo>@IQz&QGk||T1eSAfSL-|&~^R*_h!2iX<Y2i)zKTEGt8ER
z?rXyMiN_#B?h#k^UXeO%_=3&qlVP=T1(<*;O)-q(?0pls8A%o3!19TjhOAHGQIcOF
z+JiEshIGRl)^q;LIx(q?ry5If&R8W<Y`YCpcJ33&^yv{`GnoQ^9t8<<TrOkJh?iPG
z#j@pm(%hk7xHlJa6mEc^lQ^w@Af$fJlesqIPEOiMlVXS>>*YNXHDB|Bp1qBz{QN#v
zUT=W?8Oo$#dkQ3(CBlEKmst8U2KNM=z@md}p1=7ypLM<v3hPoum+~~B?Dst=V(yK<
zm)_x5`#$iFPJzD9Hf#o>4a%9nV8&=2+PTFC;-vtRXBV<=sVz5N>nz5mp5nxU2Uu#R
zK!ZCixY#lc47C0Rt^<oPX2dgaxcUr+On!(nMpT1vTct$Vr2=&QZi1(hBEOC0A^aYk
z;a$G1#HiQN%$4-O*^l+hethc0((nGTM~OLMZ--##^;F1PlF7ZAYeGd8veY$9%v*J6
zLgq0IxXfmY3+*kbzD^z9m{)*<-zbp6%y<d6T$;!YHKZ|79XQaUO-lzGqx}Kka<=`2
zHH;1FceF#aH#8B;RU@%5F%|?(HF>e{kt=<l0Q(&}aLI2gn!@td-XCN67R4e+T5o^?
z<8smFYD?;Kbv{0lx{WwSf|DamXn=DlDE}LPVAT@Nm~~dtSU$n?Gs{*Ohw<^>w88Jt
zEZ*)z4F6Kkj=Y&@N$QKD;h^L$*toxh@`ZWulySMIEzL#2S6!4|c^Uk5RlwC|9=Djy
zb$j05fYyX#m|@a@4d*Js<-<D=E9c|<iUdq`>A;vQ5sKUPIeTY*MPdFV>`sZm!Uc<X
zbdbZ4wIBE@v*U1cwmFF(!#FIv8ld92G_Sp02)-`C5TP0g!~F)6!Fjh)P#Mpi44wz>
z0V+g=bp^L+en)|?9Dk@6vFEWZrj(oW&68^J>Et0G2wlj<{$^c*B@u8fzX26@k3)xp
zZ^36-D;#F;?XJRJEV3}AVRwFlQnEZ1uG#^u2aTxE#}t$GT0o@G0MmpN<))v<T%En_
z-uDl^hW~)OA1rCj_eJ1vZ7RAB8;}g`uY&X~?OcOPFBGuzw6Z}G9IF0<>n5GV!Z<lm
z{V2Bo58aoyV`u?>w!93ikO@;i?Z=|=<uKr^PA-2OO#B@0NEFSqVP}~!@rm1qt%2rL
zKAidO$2sz@35*G$&-QrF({M!#+doV`fFoXY!=@Q(WJ5V)k(D)Km);ZDcBvRc-#TOd
z%_Npd8p8WEsPo>u0S*3{&gQF2aO{NvkPdtUX_XzAu+5sfF3FN4B<@Cmj=Mz2I83oG
zWqFP0Vf<=`?WiWpczIj?VA4u${Cw>h*mHOJ+Y)W6vsysX*nVj>n<W<&%G2`9CcIPj
z5QBUKXc^9OgTD2=>x}`4<m4x)AF!tK^P8}yq@NQgy+gNvJ}fkpr>k2&Lgnq7@JKL#
zcWOG&o@LSSRXFsAn$hODuh4vy4bfsO{H1Suz<Wyw?2oCyi|PW>`CtL;&WpiNw?NpV
zFGsu$j<8&Y2y|65Q8dmUEe@679k0nO2lx^H-U&qIzBZ^8Kga$?A8hQsC{jvMCq5HC
zgRnOlHqZP9x+5E*Q0|{Z?cYI&vs{LX5s&%)si!e=d<j=@`68BHv_@OIb9idC5~&z8
z6%5sHpyYlXWdB?O_G<6Bn!r=2Em;SPG=wB!#&@WFD^F+KDT57DGjVaVDXF^p5`q*H
z;DD()o!=%+ug5CV=1|tvi_bv+i7voa<tUmo0B57v{9vyVR9copd+#Q6AMzS!AC1Rp
z2io}7)iN~9s1#dV-a~52DY$gn5GGF?M9z%YB!0<^51gY-ee7*$>Ebk~S^J*TdnzPR
z=jtFYz8eD~v!Jj&n)jUi4c8F`Dmb@-KRn8eUY?qc8k_#`s%Le`lS>vPuj?oGY-1Zn
zbuy27fhHA<HkHV#$AY}!dr(ul53XimjLDVGA5=U8%I|NYS^!{(u>gZcT!55-NK88p
zC~>2ZcS?))CH)3iXijql;~;s=dl+$`1pFqw<~rXR!hvCzaqCDiI(ZqPpZPj|Td*Dp
zPD<y}9}WYtPaik*Q5UBCc!jg}Rl~FwN>E4_@-O5pi0f1@FjE*{ER$5ccg2dJXgUa5
z*Euh<QKyp1cs8F3g@m!Aas4YcH&mJ|N%8KMRJ^U`eN>n~@YHqq=W9s|Gbi&x$pPMV
zlc!`?d?su#{{VN8La3fIbFuL_t&Mf0f2ngp<1^q^3wxg??BjnZWMN_PC_dxJKdAci
z16}SlfoHBXM8B*>HL35g{cI})B+SP7vslkDzL_ii_kbJXra(h}>|r#jbZ2-i27%MK
z{NQ?BKIQLlE_1LJKwuc_$s^}!z6hjL<w<hWVaQb73k{m>*bcVT*;$hmKJ35>w|87d
zQ6r4I&ic_unvibNBZZURaAjU<v^?r1G$uY0y*Q;$WTQ?)jVa}Z?n}eoPl@n;r4@;}
z_=UMEkBXcK<HH73NwjVr22Yo<IMv^RC=bZd$adyYTBc6&_8;O^zL|s8zJH+6>BZ@5
zmvGX9tg&$*AN~F`qZI28X@|SRX^TB*AGMnkJXz|jG5t3_oU2E~k5@}-{C9zVaU?G^
zt`=GCTE+TEe<kD7bV+YyBG;C7k2gLPiLt5^z><H9Z;x7&A^C&pES7`zy4nq9>S`o=
zQvknaZV$%3odvP%45l@r7*%Fz;R#Nc2(Av`uGy8C9XpbHn8s!y?Aai`7nT3%&N)0$
znvQcKZ=sqO%R=>B=hNqCz=}!1*upZ?(XD3GZelGex5lBiiV}DqQ^Iw#29Z%BJreJl
z!A;q1LI+$^&}swYY)weRVVxehaI_88__Y#dd^r#O$@}3?ygKopSdI67E0fsmCV2m6
z76uJk1V5hL#>Tm<kG)Bb_WgVY8o?@5BmOY_B5aNlX@KIlL0lL)0q>?Vr{Iz%UjOAA
zzS+ed(pO#P^?C(#_H$dhj&;%3`3xe3dW^32LyC5>JY$TUA5I!^9!!_E!XKqU#Pi4+
z&gSAKRAo7ZR|dnuS%t9~e~x0jL=&#lB?k1b^>V$|8W>^{$E{({cOQAyIh#C8WUr^k
zoo1h}%{kTBDR0g_Y%a&{8!7Ph7GpEc@4(N|7hvAqy(n<B6$us_QpvIk*t_Q}o?gRv
zdhFS_G1Hs`fAQjB|2eR0YU2INjOegK>5QqX0vZ!mafWV<(78pK_k3$c4HqyD{^rlH
zZU*C)9QJ^6s!y9-*5c#hXBcKY12bJeA$9nH^8{m%og&cppd04;ZwB#|Y7CiejE@eQ
zQc<c5HMQ-8M$t)bt}4qXcbL-qYwqL7Nrl*ET7ki8At*ljSTuCD2K`M9NWA+uE-tMQ
z`gVll){r24VxUb{Ctn2R=Na&0@J-zF)|x(89*K6o4>%72k5cvLFnjP=F3u+rj9AX>
zt3d^9TxU*hM4A$@`#DkJDNF9s&1krFTZ{Vjz7Uy&=n==CBVfpSmScC-1gGA=yuJG<
zUZN~vbFWb7<xL@0eJymPT9MIPbqGISi3}gGBtI-v>CBNPWa+3B2+3HDxw6bx<7dga
zJ~Cz5nO?~o3kBjIuLX6Bm8oTk3=x)=iy}}R8k(%}K#3BOTcJ#D*OXylcNy2%s?A5p
z>_GnyzA&+u&5pf8^6b~^;^5^47*hL#bG+pUZa-_GER3BG-)m4YJ0DLO`w~utvuFA^
zO~wxkhj0sRx-P_;M#i1xS6>`VZOYp4RwMJSuV-h@!5O^2WI7n`L=Z|nkPNf8r4@Zf
z5P#<c=kn?g_^rDmdV5rk^p5buQD@T8en%4L<hluF>k3HS?suU3<QAmzjD=Km1wUyW
z#jJx>&|i@Qv%2b_Gx!nj>5_mKI}J&VCE&CR`dH{>2C?&_AR||ezS_MXdZ#Ax$G$R#
z{PR8h#6U~d2|5aoZr{VgQF374Z49pRN?5C5Ow$tt*m!CrH)}!%ya=@>RlBR8=Y2P1
z^ErIY&LsdgDkumy7p+|=Mb1f7XoS8BT7A^Sl}}B`AJ&^~eAmy%zgUW!%Rj+rT}{&M
zz69@_xDBV?Ggsi)IoO!#3!QV9fj^D{fl58PS|y8K9AeJ$-Ai#n4BJO-wQ&}y<gi@P
zFHjkz!0v9wSUUAM@Az#G9D1ohc1@ef_{V4AP^cQwk1ycLy5C|~@L@<X`zk5SbHb(R
z6_8S7iKA}o6Q`xQT=TwjXsI@cG^QoN^UJz4EH)FBUnF98`EW=(+y;WM2&d?{W8mV}
z2f<$y`6X`+NEgdINU!~jF5m0WY>G9>R%34O{(jW|m<kzB?eL~SIBp(x4m>Q@<I645
zv~!S<%ed%<j%%f{`)V}UONZfD=9C?z{sU9P@1gjEDpl0b!N&*8$tw2*^!&R6U9TBp
z;hPFkpRfs3AL-B;wJe`B%?Sg-4nr$%MLd_R=SsDb`KrS~SX#dYF3)CtlsmWin$(ZH
z=W`KPY&98DwQqrjcqKd<$N}SgCCsaT#_y7Pk9Nn!ypL@!pge%}9a*|zY%M%$se>&2
zJWOMITi5<^B4KY3?>Qxij~w}#GuGC?ca_ia7vqg?nA?eceHCa#<mi7VzF|<15Q@i-
z#MnK9U{8n*$(zX7{ewGr@yKwAJmWgl*UW{P;TEKE$3gDIXCe8ofw|k>>e6w2y{Mj%
zf=_3e(Zb#L@so!(?Od|~R`=WnS)FM1Jq2i6CX1?USMpOuiiXvsg0DL3$!nT%_9j|<
z=CcZ{u&u;{OF8!QghB3^eQ3xRzzCTvI2pJVhfN!fg*kR2-PQSUb)Gi8majz)@3@0Q
z*}c0l`zl|tc@1a!F9I8HD&W`oHdNI~nvChyrV@5m5iZ;h&(zEa^J{bEJ7s9F6bEGw
z|3kO*e#qB*jO}69;M#u`SXFWn3X6w|hPAqZ*iX!|w#L+FdN1qU&f%2%SjNYjam1#r
zgavHo>VEtz*0mW@-GU-G5GhS$ZH3sgl<};d>Cxb+r_gi$VXp4iO;8DafYmdiP~QCs
z+_ICS;*Ky7R&|K7t;=|wjVz10{x}x?Xco2Iz0aLaU^(`V&D@H=0{}h($T{>6KBTG<
zyJL}DQ-cKL**ivv?A*Ralc*2XB!*Vgar4GLs2bgh?%F2AYs4r#MZcoJs?~XhIfZXZ
z7cf5U6&L4+`1MI2WA`z>wU-$eP@T;S!#;BM@;#i_^<wBK(V{7f4EWwT*}Prwd46(z
zF4`X%&r>BHhv(Rk-6z@q^L$G(OOOKED>bP7fINQi(IQ-(4QX+C01Ej^v})}i=$u{5
z4V6z|j5TfEez`2~TIq_{9~hE?cZ>zwHx$gY@-Wxs1Z+P28r<9;LgAD~E+D#%4_sf$
zMAwYve6^hy&sT-5>BHdM1`X0ZX&($NI1Bzy!r*7;6C69*kOr$+@Lwi}WBj&R=ofB}
z^qv-(!x)KSb#36yK4U3UZ=&1Asc7Rg1#7pb;{-Elk~w%clo11}NzdT!=QTKEcrsq0
zTBO{v1%4$wLDy+}B*BLhp#4ZJeDGJI{!WX~HSr*)Wq%9YMm&MpL4VMos}Mu?-$3yc
zUr}D(LTL0?<yEx}$X1on=o`TDi7hd_XXRV2H}@w$uPFwK&zeKctm~Y(<}=nL8bbYf
zPtdf`BIsa*lRh26Hvd2jou!YHe;&l-h#Dw1h{m>3C-k<<=lp7v_#>I8@han7_{G1+
z0)=uQY~NXC{0CD)E@K(nJLdmk%=rg}(9@p|;!QW5#Wvj{^$>HS;4Vey8|lzRFEVjN
z2xD`7XN>+APAJa)<=p6Fz-N{TF!fR|5)Tzpv)6}t22(`Y0u{b<LI_tf>m%cnpT&mP
z&%r_QEGk}=V!7L#JnIk{qH{)_gj{an=4Z>2UC!T_bKMGc>#l&oI6JCevK}^?oWhs<
zJ&Y;n17U23Wcy)rx<BYObBs8#UCJxIaeggFCdx3z;}6jAV=RDe2E6Hm;}Bx9kC&BV
zJR6fnEHJ7<c`I33wLBd3Ws<nqQo@y*M{$iN%eash8Fbx067ANs@v#q8`RcuQF{n2N
ztW4DL_Kz$^re{4kw~wHqna8V!7?X{EHA$R?D)lT5=QN}up=VMfY}Oo(+LNx~mSwiI
z&`FuENvc72H|8f>Fh?@w9dkFm`ozy(VoAq1=#sFbrquhgHX3#n!9ICI>RPi0^dlqq
zMJmSBLs<)5v~r-(AePrPe2BM0bclxcZT|3zVodn20P3<RxVDeu>*V(19qAtE6wL-<
zg)i9a?c$I4o099F&!V8Uh)Z^5Ory{f5HHul@8F+c#>k~u`hxi>{;HGC{18rQCS%}z
z=J9-#IaQxyP87#DLgNq*zAull+4Q2o$GHOi7k1#oT@hH}Y>3xZ2}t35P0=sr&uVqN
zi4Efz^Vi;-ernSpp%&|~QSrTK{(N@Vs_@~=SF;`Ho0pg}%aXd?*@lHf)<`^V`eE6k
zDVY4a4&Jfzo8ukkH2P{vtV9=}ZFeBHc_3%X@-wCVkNLFa?kMcg#t}Cfz^-cx>V40~
z5MB{ogC|NN*X@AjX&0gWJlikzKH=-1*ux#g-<Z-}$(Js#;9U2N5Uo9INs`uXgE0bG
zvT%(QwVQkekD3l5ds+mPJFZ0KJ71vxQN;c4&!XQ;723J+Ab0GsDLH32fW3QF@U%2L
zBg=2)vvZ?BrAHGFDP&{B14mHSJ;UCeHUtvd(XsCo%&2$*(_R_Tx)H43?-nbWXCOt3
zKHo&K6?1i5mB*A!0UsbV!dJ>A%#rzzxm)t_q=z9~pEH=W`?LL8CfggfHDSRZdlW>a
zicb77Ce7`(u<fmoHa{E*duMlG`O^bnwO#|phDV~T(P0QOuEL_11u$W~F71r=;FV4)
z6Y-^r{B-|H?h3n`+}AQErtF#VLRp4Px}iyZzs$s_i2>Lz>VYd|Z&5I94A{?D#NRt2
zOY9v~`8`GKuBx6Ys(oietQFLW==eW8KCz75gWhtji>_nLxLWl3X+YEeH1HcnSHbd8
zIoK{|0InM~p)+D67dR{jUp&2sna<u&Dyn8(HrBzM)Xgar=~3_QL!4j#5pIX2H0hDP
z4W-vtK#s~|*rgbXHm8_F&^-ia{ON>OZ=$hOLj{-BND<kYQt0^i5PDWALR1|Oe_k`T
z{!@F-Z$<({{Ak3>Tlz8VW-A1bx&i)EH{!BDV>-&DAJ2BMZsYrhSfd^f7xWZ~czSL=
zBMj4=(km$3mML0c(hc&{Ct=ah?{LuQ6SThwgv^6a@QJTA9aYP+45eeRVK;N?F$U;s
z14}yBNrimqXMFBR8xUNS;Hu?PMBi%}E*^FT$G+Z=n=i9m)V#%Lll>LmJ!Ni~d*Pth
z%(!Iq9vF}8!?l})^yqR;n(ZJ*z12Q&h5xePtcNyP|4xJMk&~l$Sr2ZWpCc*{{(<>w
z1=u!D0fr1?nR@%zIDC~3x%J(W%Hl>y-4TGJwL9?oc4Ok*6~m9sV4R_CWn8~v6FQLn
zpu(Q*Au{{%Q=$d^n)C?rt8%d6<qjNHG8d-Du{@~NL_R+*5w{L}LSb7gmzVvTJNPgM
z6j!8U*AN3L*3D$zlZ%{nLJPW7+=7BEU-UGs;<o)Cd+#4qWBbMbZk2?PgjBL6<X1vc
z-D@QkLI^zxAqhnik|fy==}AHeNl21-LI}0*wb%$fBq4+(gq|cJgmA9Une)dv^PO|%
zd%pjEKh5kJ(=?6leedhOuC?Cl_4?Qmd(GYi1JX@-$KBr8W<VUpuBnBqKYQZRsckW(
zT_dQQv^h+FHcbe*1v3ZfqPDHzQV0H`>+6)P?_oxNFYkmenyTT#nRoCzsx{Z?=0UK{
zE-LC#Ms44w!D-iLF#1&&KImwGBTp1j&Czg*NH_+01<g2fxCMoOkRf}&FRd)oLBFt7
za(l=ZwVYM*0g=s6q!l^5piz`G(T2R;9+LmRCLDX`Ev4>tqK}o@+~o9?(rWy`gU-=C
zZBe88Q+NWNoRKz#YGdX2=W^+rgOqmSfc$z?0l0144*{>DDKPn|<k+eb?mad}<3smk
z&#ujA;sXP0-t!qv_ZPXf+zqn9>DCfub>O>(BA2F+AkO8N)GDu5Ja;;fj*&5R4it5d
zln3et-6xbYVTE+6e>X0@^@YB5OrURLR3r{qu=+|J)x0?h7mVJM&dcT;IM)Z<4VC05
zbz|T3iDW#YJ(nyW2D)2Hq3G>KxZ>IzEf;j=6q~m+wL%}a)%D~rTl$bm8&eKyW`u!g
z%+dDMutF0@<^zP+_OiBP!lvJ}BuvcEy!Dklo7c-jhIi*LDVf3vGKe~umr~Z{tyJ~Q
zh#E6T(0BV{cya#>EC_9lrXugA+VwKuGHxkFJ&y#xr&naNJzl^knqiuGM|s4~7OZEw
zSI#`rk~iHKf8(HelFD<wdfn|?P_iZjs=r>6Z%bFmYR(DBl1rrC`mxmB;5wO}XwRMI
zT!AgEn;^GfKX~q6Ogq1}#@Zu0fV5^)xNQm~{}ksAo#EipJ`FC5?0daQHl^}h7-Qan
zEo<gc8Gev*YFdHGf{ozEo1~uNWMLVv5}u@6km1k<Y)74hvV<;h`OOKa{<AH`#`j>)
z?WS^`b~Q<F#ko+-*7Xf4rsVT8X-951%=x$h+Ar>gx92GMyKur)PCg|M^EAW4_**gu
zRnwi6E_fmIG1NINq`#K7M(4R&lEU#~euSSc)wS2=k9(}CtyLo|nqw*ay1!whm?LuT
z_f>X@dI*ZA{qmRZ7xiwXB)#l>QS`0`%8GCQN{f25#J*80so?v1un#*cTMQ}|^T_Am
z$dc|ji9~K>#PR%jt+jbl=4GmQwUMqbYk?*iA}=zwE6p_N&Yr1Bl10rzQm#1+JJr7_
zHN}S-{}QZ;x7G6M&2hB0M=pIF^bj1qpHP*lHxvoR$fLD7{N4O-Fti7{DryyVGmSY|
z84CWfK*sV(Id5&FG(7qddH&1t_ul)#X!;47ZF!U;TU5b)y$l*BGEMDYia7;!lQcTG
z4Q31*2eq|7Xy|waDpvfFzy78kpSH{emqU9Y@~}G<bZf~ft*OF!W(UEQ%fRd57I5mD
z11$@RC^a_?oTsi+Z$H|RD=#I(tjc{*@#-3_cj$yqhgh)AV3CodZ&bEqA*ELc7mG^;
zj7~Dcv<VL!-&khS<JltTr8N*TuiX|*rE9dd^auI9c&`36=N()<X2!2q0-5-UTmif%
zKRD|6W`_dbt@uXu_rFShAI>=%Bn%Syj0>dfXvB$rJD}S4HJMeIvc=0Iph&E6nDyi!
zxIbC}{$HEm!3DwLxtC5`oC-m`u@JVih9b@0gS*|oQsnHbP<~AKdY>qu_HS)=7<igy
zuaBo5<?V3t%Prv1-W&7_ZNYlw9SVK7o|d+?K>eBKJbq&vp7o%T9xpizE*_&v^UrJP
zZ9;eKF}oMr4A=zDfm#&W@|j%MxfTl6=uk|zzo1p>4Z#Nj$KU^m+}QJRYOf}uUE<7E
zw)+t!O`io>{`u6_;wnV^GUaE<CTNgj&KHl|q7=7n(BFC<1k8$r*HJr2r`a-iWZe!`
zleRcqQ|_lnugrP+Cec3~dr}JDln;8(wt-Lo_FR~^T{4dIhT!k!Tv-$6`2MEIDembA
zc?}-a%*_Bj0|raODrzWw?|o`**MonWB$3n1J!IHChGH%}r5wi@w79?&^%HyYe|%qs
z2CX6c(SBNC)k3)VhRKc=f|cdfmQ1}mqkYJ8s+e5`Stsm4{bnDm>v$7N-rLBL*Ka}E
z;b5@V%Y|yUTv^e-P6~c!0ihAz&`;!dH66m$1-;D3U#ure)l{kH6-x}CWWn}6pDALw
zA>~^C1sdyvk_vA+{<dt!yB^rnK-9v@4`0;(?(c>p7igp5$~@@hyaAkg=x~p|=V;E$
zZaB}RHEP{{14`Q%b?he%4eurR9#wkm7jjiD|GO0#Z=DM__T8rohXk8<e74l5I*;x~
z4kYImkEEq{4LGZd7UhJC*>x*N`Ps=VDtx0SW*Ca(&Emd(Z`+L{cgvt0q#%V&4>{`W
zC}{8=MG<{E@SeoZSY;wyH-WB{HnKnptzJSZ^*!};r)n5{<rSP$yn#tq6&RM*iLc}t
zqWzR>l4@*(G(Gecq>mjytyuJUF1kR-U-yOMXTLnuWIHVz^b(vWmCF5JPlbqfzMxv-
zsvdmUl8tAI^ZF}8i1+>p^?JKt$KOU+YY|SW+20&8Qy)RxrzLPKL53^iHc-Te2eh$8
zdsK|q;Q^~!p=ax%P=mif&84J#wH0Q(x<ZW(17OosWA3ZI1Q`|%H0Ny_^a;z9C$>=F
z+}$T=Swb8reB9J}|Fq>{VrD|6HB4@h48>agUe;WGEmfN*N@?;Gh+4l8k_YK=#mj%`
z#(`__%v=wxMz^GOFO)p~h;U(UH|MS<COB?L9pxpGRF$Sffpr-awR|?H0@EE&ynGK2
z{X5{GejTy(^6jM1vdv$%(t!Q7MepdtGr95ABl(8B6&e&7RGZ$L(?krSvBw7R`z=Y(
zk0`#?KI$K8#2=^Df=Ono$kU~O=#i7>`Z!6MuoWub{37q^PvMb;it4Qo$Pw?-q3`(z
zpt&3FsM`14F>daDO6*iBMMi!Cjy^<}Ozu+d)SINY;jq*^<pn*xo<TX&qGjXhmR#s0
zcBf<8P?+2t^rE`KXD#9WO0p16?A?yO%0KCG#4suw-Y5+y>%j5tR1hMc1IvB^;HO?K
zQ(tq+?YUjBoxAhnh5l4BW}5u(LBUuZqso6F@)Snf+L6PyOEkE}HLw{XYQ=rU%v1bP
z2OaZvd}q8FZfC(<|Ff<1-KYtYR&<3&Hrf~&@`GZhRzR1C*8Jy*I8qLB1HV<%CC|dI
z(z*0M=vVo1_~p})i&kgCxzB>9l3NHB*MCvY*Eo4VKU35Uyd@cR(PHDBjq<6v%~6rK
zAaB8~EA%?J5j0CT!m79Ws62IDj<TIZ{?|+C?(SUL()<i1_S_;Fyo#VdAGE_J9W6+X
zSuc&P-A>;Vo&d)DAk*k(oTzwCs%z=_cTZ+P>e^(YW9#8q<U^?6m_kJ*hVZUj!2zP@
z7(T6#?v60v&+`<lC^_Vynf_Jw>{cq@*t8oSWR=mNQHFS9PZgw&GAH9RZ%8r1B)`@0
zo;)i*ou*vY$KrvR)LO4Q77uNJ;v2W%;1EOXB>ruF`94J7ze@A(r-`$hB`fc@gEqUi
zikb7x;JR%WU6@!8qm(S36~%Nyw^;DOov7m<#@zn)pLFW{A!096G{`QZ*9&DZuPugT
z|KHLQKk?pOT@S5VIa0wdZAf?NPg8oCVs)P<lKTE7NJ?!D^Zz;rCmWwp#JX>^NY|Vj
zl5{yS#96jqc~7cW9fZQ>=cL>l=LP%Vx}1406;vn3fwIX|u55IZ)zfwPk4dInyy~Io
z`-|C?8=myz`fWPi$_zgkSn#(|f6-^*a?rWkk|wOY3Tu_!`CQajxRh{+PK>F5+mDo-
zWqFvExeHd$x<N2d@K7yW&O+m$Pf`K2Bh|QNQpuuClJ_$c9(E&<a@MqC#V||9)Fb=I
zYU&xet&{Li*(ONi)^)(5Mj<>r+=5rNv%>7NQ8eI=@RxHQ=oM_051qV0Cth0e7QGmn
zptuITW?0j(4(%~|cO8^F%^^x_!K(cG`3@o%(WlY`>yxbI>JIT>KP5*xE}oet@9%@(
ztUKzQpNa5T_br(yPLcn<CqRO2ssAPh7F^Wk73wbBbwx*vzkC_atnbBd`dXpuf!`F^
za{(yV3yyGcOUJsFsZiS?m*ka4Y1<$r{+js-UOf3tarYieiX<C{(bes-CgeQ4{BeP%
zeeR6W<L`m5-X1tE@??!|Go)MbX6)}~#@;sVS?jg%l<d4Br){{RzVDGsW{JYZ+58@a
zuYE?wm&7_Ud7(5fbR#(rI3s`8J`bVv7dh`MQ@eN0k`%`UYu4$Ptf<iA*xTKC*32Sm
z_-x9Z+BK5)Y*Vyp$dI#-AEemm_B_(KBPP$iCC4xMDt><l-Zfu1Gy^6}orAiw?^59w
z9CDC)yJ%y>*c5XAXM>!0>7rctWfi5FS4s00o8#Lq3j8DF7lr=tkjmOvfbsF2a^2H~
zbYk9es&t)32DS_3#j~~0(>g|;{j4QEIn<LES!t+8!AW{t6h_MZmC|X$YcMK!16f@x
zmd=&Vqp#f!c*RX~EH`tc5yd8a;r3VPIJ!H2jVqy^-Z$yp!|Ni`(MVnu#_)DzAtl-_
zhmhuv!0PTgY3z*kw03VSU0T)!--L+F?4Rk5dwTT5H>RRjJwnehXT&Qh<NN{`DC$+;
z+q{PQT5H*}a=zr!q^7=dFV0=yM~|P}fWi>b`!-rC*<4DOl|I$-{y96S>91Dcxx6op
zU$4OIDp3zM7aWGjY{661<`bvCQ{lrya>kfhki2vdnO!pmd1DXG`|G_NszNz1!<lSa
zDka4*|9s8$eGq3n5ze)=6nqb7DS6fmGU@my9a>aEfkrvfLZ`=IHfkrF@Cc=s6PcDp
ziQG;@C%ER41II)@(fZ|Q;iq~_AAkB#*58li1*gQ>?o}YUX&nWFqqAi%XaR2H-;w{!
zY|!g-RWkl&N#8aKr}+U}*)(44FD#zRqv}ZPi6Y>(-~dG$9E1M-i)m$I7pxJShu1+j
zg*WJ;oV&pS;@?z*X8$2aRr5aZd4U$cb?b`xW*tzo@~zZXXDnTQ^Bi_Rctn0}eIVnG
z5sm%jM2p1?UwX}5SlzEXe!krUzYbTTrcuu^G2cc~1)7n;^5c?G2VbCm8^PeqGTB$*
z52^Q?^C^+-S-4FNnqh`&gJwBWihc(Uxo}m~MSqcn_CaXY$&|<aH0Pyx*J$$N0!V*R
z4B@9!X!3R^^1BufllPn@t>vd+c*hPHcioJeG|g$9@(6UB)eK_~y@u}>Ujb{k#8Nx4
zpPsr)b`FnF=bZaa0sU@~%L9VUUiZLzY9?5GkSNdHk}J<2k<zBlk#>yI;YA{g@ZeMu
z;9LdHjOmWAC)H6k>p{`&(^RwND(pF-Kvjo<lF6bc(0!6InoqE!ftI2^o-u_Cqz%xv
z#YHd|XB<U86aFa}@e9|iriiE>5a`;HGky+*UU|CgGayDj;n)C)AG*RcrC?P5E%Np0
zW2Fw;GiWt;z!KsA>gH5NIeC4g{`Z!{)*A)1vt<vy_>VEB{XqHQMZs2{xf4cfT4B<S
z-DH&R0E*&c4&^hgAwusHEwrx}?z2|X(d$K&{GQ|`>+~`CL8crYC+gzX^Tpi8Wl()O
zCDm`!rR<)w;QpO9oQ!H}YIl}WMb6W7x;a+YeV2Ki7N!lUmH#|wgtJzSr-hGVsFJs;
zZ>rmItky%YTAV4n&1}ssHBaF611+9gqsw3F_EW~Rm9(OzHv70(gJyezqme>nceN57
zzdsX9>G>U5J@T{Q+%6%7Zbz_hogzgZ*akCCC6Qv|X9s6DPjzKskW}2?f?e*d2Sv;e
zhZ~1>6K3U5N%ByrAG|<rzFST5znw5V@CW>{s2k>P%7nCzo%!vl6I7p`DtV4;q4#!S
zpT88&EfxQHd5+W8Q7H8E6#v6l8%1-);w1s|f(Hi737a!;kliSkW{T+#(x#Ku>I%|C
z8>%fV2GF!Kf^{2Z!J5jp>g>dEly$@bTr%cTVN<R&>7TAhUFJc;U}w1T@&JUyF)Uni
z4P1V2C(l_2WXmAIPhD}B()V41`aW^emCAOg{<8qW>ec~u@gS3c&oJHnFUS<LM5-+n
zVo&*z%olfMi@w|8S9uNfF6qH*2UOD8zQXfVo&b(JMLjcr8k{R^%gVCZ)b~v#)gDQP
zoCGJy?{#NLoV!Kp>+}okA2;I{ialUZs4KsHltnSGeIY$+33UwUiHf8&`GRpD(izZ|
zhQ02M6OK8Ob?-+sLcbke%X~s+IpfIvaW^QkP?E`*RG3U3p!`=S5NTKnPMHqQtHR}?
z?yafUuT<)B)QG*Nt%2`J7r-TQKUr+r3x)f(Naxl!Li5kPdGEXSm^4DnqrN%>KTh}L
z*nu(_j<`$_I}+ssCt72;>1%ipX+c)M=EABrO5D=*6uh`q0$`O6b4KOS<69|kuIn!-
zz3&c|Q^b2P%1Avz_cr;>HHD}hYbo$k7fxvU8%EjaaZ%7=sQGgQEiDjdDaSJD0^A|J
zmeG(ie7@ut_C@se#ANZgc%q)Rbgab~8YyxD>3_<Q{MRPAbn6^y|F0>!57PnXaqS`L
zNn5tf{+k}%ZGo<zWoqSE2_3#XBh9&BDP_bVvb^C(1wWMVWMdpjKB6!6*GbxW+7!o&
zHM+j61qAeL%RYAnN7XS7EWG2uL0wCcdt)J7xI@Qi3qR(tUbMJ>A{G4BCet%ru$!^q
zUXFYQaYqZ}6tl0euwy&^JbW+pO@9i`6Bg6Dw}SO@=_w6M6nW-329m+rVEMuP4Pa6;
zl?v-FNS3)ekhsP}$_e^Mnzuq5Z(rR`*MFF?Zf`BlNFD}9?W|b2B1p1&xm9{4oFysh
zy|8jY7fw4nQ%)ZJiz=W0lH*&SfqP;-%F_7*j!UN4sLG)8f}gtL3R9fXVoC|HV1rUm
z`NW7@ux^<e_U0O(B6A!i9&eQLN1AZSgJJTv+hT5~*9%#7bFgIGQlDBqJP1C0zRT+(
zeuDp_0#MYJOJ7$fQL+#z7sl*@ZhsyT45BS^ll@wXQ>V$0*n_4YI7CIRo#3w>3XT{2
zpnnBhOkF4DJGxJx^G$*;d!P|i=f*hRUu%H73`ItG-8uDZ^=TN}@Eu+({RfK03{Uul
z0x&($9QVCw%gJqHVA&Hr=4N8n`N=QXVcZj6s|V6^kqx(QQ$VLR`@o{&K73sG5JKFA
zgG{U+yDq0)SBO>PKd=A!^<&uoxqc`Vs^R~A{r4LD_a6A)^B(xm`ti^5s@(sq9sha#
z&#xc$|JVBQKWtR~`;Grzf&X5C|4pvIf7TD{lV{)m|6e~G{=0tsZ*q_P_s{(A75IO<
z0{_eF$6%ZPo?rfZcKh$y^?&nw;D7o2GI-GcWB>7=-xKuAV%~oWum5@df1QKy|6Ye3
zJb09ggJSys>X*u6p_~|7O>v`KA-QxezMf%+@hLNL9W3TtQKNj(GYU(!%`xP{Qugia
z&5f6p7}shi+33fC)(r78s|In^(k9TjE{EjLld$~5EwI{d19_Gi;5%eDd8~N{`@<LU
zwMX4Jt@$bnmxI{wZ(A(8-4&FduglvL-MH#+OK$L72ezIm<lA3l9JCf;nME@!pFbB1
z-_2!(TY|l2HA(Syf)g(KNM0}3u)_9GzQ@S(P(OJM`ocZ3@@`Hqwg<D@M^nDrQe=<>
zAJ9C}hgBm!%UceHbK#<;sOfdfvBa%FuJlhLm8<Zex%8A=$`-St>ruzRA3cQ6(_C;m
zUdW14-~6H-hPZ!#7oK}Rk!vp+pq_7k^w>0nn+6TSj|N>hrkR)_?%;>U1EV14OgO@g
zzo}qHKP+}z&Sed&Audg@=`JRbN8x-dD-OaN8^>To=m`isKM%|P9m!^)W3ViL9!wu&
zhr50b!0btaU)-rwHapmd8~zmT68Q<W74tH7&xd2tx_6W|Zx!nICMq-)Znq~s>>`|W
zyIQp2vYV~BzUG=_VPVa&PbYAw(GAJ>`vU&*Y9JPf8o5DP2q~gkV^(4dN@&c3vglD@
zaI+hRzYu(}O{P?G(hxEhI&oQjb1u|;rjj6A)Yx2;(;}^?-m4o%?CFk1gSs-9uH?eG
z1L#GiDON4Y2mOpwQ0^LnA1?-@YPc48A8CV%yeU%Hm3`n`vRcy2G9#}k?b)`fp6XzN
zaMfQT#nf1-?93Xd4m<(TJFT%+8X(*S$JN=*jIi{|E@%q<M1@y2gL^+Cc5m2B{o@<J
zNSeaw9mnE@O9S{zTrf8YPLrlwTekUYGROaN72MlLvS%Mp$ost${95(Kh+E$%^hU1a
zKVu#X=b>czb1Qh|z7|>jiQKqFcq1aL`J>Tv4%~7?)QTG@vbzIUUQd%$_j}{6+jF?`
zn~j|Mk6;m+{-%r&eVl&Yn^le79NRwDW)IJm7_p%peoqqha~p4ru5#sTZzf=^dNULp
zegH{7y*Tc4B}KeZqT9_?SmbNUNgMxx+HZjv`B2PFyB?R476oENqsY)~Tc58g`BUx{
z*pG{2M&Yhjwmdk`l)VbdMOJJi#8mykUpmZVo1{tDDcz1MGyEyHxC;EP_n@4AM{si4
zP&78zp%<RMSlYRe8lUU{%Pp>`F#42NE^>wjx8tRZ!b?<}{a$dD`tv$REw((i2AgK=
zgS=HHl-#8`KMq-iKDURW^1g5rNFA`jqzE!z)<9!=Jo)}BvNXFMQORR*UyK(wUS1Gt
z%vZ4A4e|HXSaRcq<s9MG3u_~0bL#5TlzMxwaLeYXt&|@ir;Qg?xlTm;*-8ox+99i2
z-xM?7|2SNZ5zfx1-W<QDEyfSm=4*ZCbJCcBm=>H)$A<P}`)kG&^=+GQqAtg@u43+Q
z>uSfC-ERdW-<yh>M`Nd+CK&v5391B>O!MfFls3Z|3Kzcu#X3`TvpqsJJ63YC?*ukH
zJB7<P`~$HW9_)O(MpnJ(3U&*ovB8EVn6Pgd*IXQdO^146mA@UPtZ~Gm(0vr}btW(K
zwm_RBuSt2bJp?-5q_lJ4;Q4qe`%Y%EFVe@b)(^<@D^t8#HyouC$PsH7kkKZQ+ZD5l
z39uRF4|T()iOW%W`X>ZdzY`AE-BR|u-H_8~Ay{wgjn3OM)hUTf1@mpa{HvlphK3G@
zz{QI(XQjwWxTjH}%_B-|xlHuaFG~7xg1uPs$)3JlqsV@Lh<P{@vNYWUfgfx|A3K2K
zt%NhWjc{NWyI_FNRciR%gK4mW4F<+anXLoauhy9AE23~$f*qFpij&L|9z)#vK!#B%
zWSM?jI5o4uruHY~c#7=Mqs`#gp^L1F-{^Q(uPbW~oe;Sof6hBKl%3nXpaj)O)UVA4
z<441xpwn+i%&V8&p9Z0`M<D3;8jAtz7ox9W7@j-t#`dKP$Ya$jxUp&pJ1?3b+jPwW
zW78bayl5uX4mn2k{YH~PoCR!uy9iD4rnA9rZ8pi6!iw-k;C5#i>WjG?&$TPjYL}Sl
zxa`H1VRz)R^=mm%XC+qscICoEpqg(!=;oCOAD`=DY>fk}4=-TvK7Qyt_8RE56SF4|
zj>{?b?Kxv(J~`Kiv+<V}?C5UAR@ptk^Mxg4saB)sixsT&AByHn=5Scl95&vy8+2Cx
zg!m&-xXgPt`&*h}N=Sg1I~d88Q*+6C^-7+cXN^_YhOtqoBO2sS;rf==7~rrSbiP_*
zsPzcpxNZ%q4I)#VvS037V#T%xr|_||5olv17`n4hL*d|0!Ua1*U1-{j`dT;V$x{}w
zDoR(n5I+L9v>wBK=XA!bvcI9QZU?wcyCImZ$&RU~GRb4059cZUq3Hftc3&#;yIVss
z`BbXt_wLS%|6$1CZLCBuauQCz+>>jPrn7QysZ{wiME2@-j<TN2qS!-GoH4!wf45nS
zWhs-;@|{2Se?O0nZ7s-Y{!)%Qx(<}FTOjX57@F5hxc#&%79D&HdOcf1NySmvCAdau
z&Aw8^Ru@o4v$VZy5SKm-VCwP<JX~6%)totK9N(RCGIxXf*?W*3oCr-Zk?3aFlB2CB
zVu4swj6Qs#n3M)6-#3EedWiMF;jvtp6afZW5|(Wge2LGCIZ<~6)*opCr}y(QPreFa
zBlXx*eMhQN^kv2IS9HQ^7&lCF;ou%Nc&vjdy2Op*lA0*?>=-3-t6{Rv><m)5jFn;^
zh~9s70BDX~lv3Ny!g$vOIQXwttSG9Jd*yj@R$?i|L^@%~A{`1Cw-ru%%*4j$?pSYQ
zgjpLqW1!Vj^1r+SZBGzv*Xqn4r_9B&Do2Qxy;$AN6*G24bLFQ5P{qw8?*XBlcz!u#
zS+)b`^<Sx>#vU6E?gBB2k5#1_(zN{{Yl1yd8GTIh4cQIx3&efdBAV|;%*UuLi?BY-
z0yVAQLX%B%Oxw0ZFfewI=H@!dZ~1sR^!j^Hc(=<h$(_W`?$c4x%UzwCnhxt0i5cJC
zVHiKs45KH_<O?sC;4B|kys_AWt6$j@JaorEF>e#&`&lru$Ktx{z1e5dC=PRRLkp2t
zX>8_!A6Fu(M=7{Isa7hx^qh>#PeN!-JvbFL!w~CPn7+3I>n)1|<ExV3J`BVPQzwq7
zkKoKd#&Oa}!J^gjpt{VCsJVBNeC{B+S!$tI?*_1Hw8qMuP2e})gneEKe#X0SjyUxd
z^so(^XPTj+@dE`uS7ODNE~wZ%UoKy>1(Y^{(#OTkF~x5+Hm!}oy!63T@*^1nR|=1c
z`2w~I{YKfYU&wuF1wDQh&2jC6SoKX$vT+OJT{{KeC0|_Mt<&VZ1!5+o`7d&g@s^S&
zO~b%<w@7EY42sp`QR#n1jx$!n$H^;r@`q)Z@ZC<#{fq?12f~lgZl@GmAHd40DbkG^
z!R4Ae5Zk`7Vauw%{5ZoAW9;0}hup|Ucq+;ax{&_o4d8h#maNX4p{jktC%YjOt-hF2
z=9Q6b93*l^%@$&A+&d~VjKElt_sg3SLa#SFWBub&$aR^-0V!_mvt=Rse!m9Me+=RH
ztu`FdybRLzWy&tGL%FVx$oYHc$Qg%Bg|}Pq-Y1qqWouVddTC+pXamgYV9LwJ=y39u
zU6APd4`sFOLr$07ar)7I7`I^*R$jgXbYuw^yw$>Z-ASlbGnQ2)E5WC@75V($24>6J
zv;U~2tY6g%ZJZ(~r`a*8`%4?^`_@QKU$wZY`D5~{>&U;BEynDM&S*6&0IWybbLKVS
zO0pI{!g<0ixGG3m25Y#%z7Z0m3!%(r8YedSiv8JVP(G|Cj|~=V`E(i9O_|1u+$6Q;
z+bN0#M1!khooY3SE7J@yKywgY$kTXx8#{E~cvuQ---$vegmOe&8(IkSS+h9`Y)-_;
z9^QJa)BFSIFI$0%g6ZtzV@(srbz)7r4T}LhwANT+d1(>Ay)ts%BK8#fM&R^Vf2@Dh
zk`w#3B=^Y+SZl~=R@9X^I1j(1)(iSVlRbj5qIeEl{aVPSeGJ*4dNISJJK*z26XoRF
zv+DGF%3b;|Bt8{;*5gyb^HV*QA9zbP{^y|S!D7^L>4Xj6)YN#q83in^ptw$(pyajK
z1DC2D`tM!`B}+)QN%}_~b#XH8at`L0DRtzsDT<xW&OxVwaU9z79-N*Z$eM!Pvi_g%
zDW}3)IB=c$uJJsae`+P0_aDk%F50lm3gLol^#{jI8cYiX6U4s71F+2uMazIYV3krN
zX5Pyk&9>bJMV}Vnx9*8tHmN6u&c8;*YiF{-;PX;tM`sQR>&TuPcSr#%9nsrtCa2|^
z!u}1m9Cq*!c=~&hr%3|!_g6#Hoi2E6Nq^L9cr191lfn1r1M;3Yg#G+G$(fJ3Va)6$
znD%~-<PsW&jj@)nOz_g{56lMdi_6&gZ(X@TcbLdH^i}KXjzvwhGsIb2P;&NT!Nl|e
z`(3X=^Q<{5+CG$<-khOS!&(^pP28(V*JXwD2$aqv(68MqSh&ax^9FW=@>nx0d(?%C
z0*YYS2YoJiua^CjhO^>AS4V@6$0;lB1ZDV%9Q(PCv}<<|XZ-HV7rqO=-!2y}nzD{6
z@AYTf$cb!kx`rCcQb1!b93%Smkk~erf?)*5)Gg*B?MZBY*qgmKXbJwoC@{WVN;UyI
zq#Q}6oXTdTA7RA)vOP{tS%?j{#q<04I`TZI3q}J@3+~e)wW`G`?*HL1<Q%Vs&;vs_
zOaC{h9-<s<_#TS-ICA2eDLhN?$kT33!n_G0Ytg1R8>$Agj^Q2pB3pCG#7fG!DcqKc
zt2l65H*(_c=yO)&lRBK2qVq@a_No?GHrJ2(+hl_0(?l>!o65V^c0l({PPC<4nE1Rp
z(CN|$yxeR)>N)I(EqZOye*I%8y&<xG94Fk`!cVbtIqDS*qRff?(Nf2Z<939h+3epC
zs56Z$w??ApSUcgmj*&7oZ7{3lLa-2i(}cM;JXYg|<!^v}9vp`Wu2vi?&ZX|nwZXGs
zlT=jF87&XZWY5FL<P;xQt~orJ{gzBZgP}cHIk$-d(?G1BbGUY1mB`ZTpsG=D7OeC*
z%G3|ttaLdiOzipfyJDL5Mq2lA2=0mvLW9`mRBl@a`^gbK6OmNiXUWNh4Ps^|RrZ-T
zo-}oTs*}&D!A$ELRGF=(?3fs;XdI2d^24z1=LA$bL<=_VS-EIuJGL2P2+D8TlB#>H
z+GgrzP+bx{#(t9^R4{Ne-fSd2!N5@V?jYxkHHG4LQ@A>BGwFZ2L=h+dz+?H%FtTzE
z8%;bz{hNu5wz<1JxpjBUu*{$d11Dlbm;E5z-}pLkE+@|s?2A`HSiMT7xOX<3B=WaO
zAJRb8#!y`ku4tehga(ReK2h2Uv+w+X2VO>~`@1z4y5@so^Tqu1VePSPM>ifj%o!7>
zZjhpf4dv>%WY9Y_hm3^#+keqSE^^yTs)lK@pLc|;`V;}q1DRB2XQ)Zpl6}t11D%!S
zq%Yh}aevxDn%Mt@UbV!sdlxDAy8)m0)&<owyK?d!@w0tA`PguA-d-KX**k66xN|2`
zIjyJYcWb!%q%(yaw`PTF8yws@kTV;nu)FR;)-;<byNMjy#{~+metQ+tI_pZdmshf~
z(nAg%TMqW)jUe)q3Fq__tm@wf#hm+G*~<6^7&o|r<<DS@Gk*e_pmFs0X*lNymO|n#
zBBRNUsGk*rs`k~6eOH>H@*h1Zx5$8n*#*;jq>8iiFw(JW0A<@K3|m!4cSl+93B@e*
zdZ2;EHUlWNUrRJA%A_>A_B6`5FE;Ew1t}jp3omt@qsu1)P8$SJyWES@&ggROh|U}w
z`iuh3_vgYpejF*XrVYLJh&gEqHFLi?RvxU7z6fWUqT-9XHg6gpYi@&1GlI~q=WGuA
z*o-Q@PJ(8+9W)g8!xE#HvflkjDBs@-%e<P?mfyNuI_M~bin$`=jz(BuyOBCY4&pNH
zFpSzAPdSGiFyQQBy!>_r8Vw`}3rwWiPp6@*HWeDQPt)Uq1?+dGo)WaIIA_9nIp=|J
zID7cOg#K%|?pYtyKkx@C&xDe)&;*i~+j7cKC;l#cIMssXV>n|Zdmj1$n&YF;y(SMX
zFS109$xh08x)U@lt7+H1(R@PX&Vm0%lV$aD@>n~Wr_b+z{f{+)&cJYPs14^{OFMJH
zc_X%-7>;?JZPDQ8N!i_KF&LckV6!IS;Hv5jf%_MO#^EWLoF2xB3r$$lqd#Ups1tLB
zmmIUl>Z9>x9dObK=M!zlvHf#H%y}NgiipSd{R29))l1=cxH%1XDW;<B{MV3lYXtUB
znuT`Zqq*>A5H{@)e1<pMq`2|pgoEswU?0{>T8|W>X0}T9$=FK;upD>Y5}7&O$8v^Z
zFjt@IiUrEwptB|j8}Hgf&c|{v$V``<9uCLEym1)lU?e7|&XSw9D~FqfvHpqGn0)3b
zSXzD~qmO+=b}^homU&?1ojNLbFdLl{mUFB1cKo<_Cf0wbr2wrDpc(!Is&}rXk}0!s
zVT|yg6gyzv$;sGsW)tKb%ciOu?b&^12-Ux=5d6+xj<y}lFekq&mz<gaTfA0d!l^5e
z6TDaQ^3Q^(mIuN4=y<uQeI_NRd=@^dKu5p)S%ULo?RcZ!g*`9#g@9a3^zjz%rYi^J
z;xWCsvUM}zXa4G7oYaOj8yaOJ+eti1E0XmSenQMdbMD`KHEVef#^P>6FnyOj7fxG7
zR=GpTBqN-yev9?{buaS26^aETL_PIjZ`4}UAIoYcLX6iEEYI3Wnn-`PitNNn#WG3{
zn8r8W=%e$@J(69s-mE%TN10w-(S4;ghxeX<IV0{#p^p3IEx1D5hbu{WUq{kB?#hGL
z4dcnNV>q;AGKB@b1kYRI`fWcfyRVNxgSk&7ou@LLZaI$KA2@M&vREV6q(eiE9xE3-
zkf+aUhsi-}#o6@_DB64w{BkCs;{6SE`sWU;JTK~`ZUYD&O~kbBc9M>$s5Bb8ld7VH
zy3@|p7?<1`yi&Sx>?c#flQCf1^?fj-VhrDK9nN7_U9mA_AbT`SVc6h-X|LW>)T{GQ
zIdp}bwY&wSI1I$oYrEmXim|9T^#SJVt>*Y^4mftA1E#$jCaccdgKw{FNOiwTDjQvF
z{L7v*7WL({I#*eL={$Cul?he5&A8y;cF?=}jckj?3m2l5<o>ENWgjj9<=sQF!I6uS
zw?%7AT(M1_-qIc|ofol{ZV<Y}E=Ak7D#%I>M$d0gsXn|)e$id<MK@G~SsyW%IeZpJ
zXj_q2&xh3b$8vTNzGKbld1&{~K=w__AZx7^IRE@&j(W8gZ^Y=c{e<P1H2V>RD4S#9
z%cG*c(O#}yPyjYg2DIH>2U80carQC6xLb8peq1+~V~;Db_@>~2riD<l-$G9AaT4<M
zCu77GCvrEKLJvfJLScGF3TwPXesji136uV&itnLpb#ElOpDUp4JqDtan29kSs0|@I
z=Ayf!0YtkmX3g7IY8&@dDdF==QoNWW&aO7XC3;PE-uAcbceDdJo0+5U@%C*0aVG2J
ztzheo5oqIM&%uIU8rWor3AP?+y`&G@K5c+?&%IdHx-BcJkI55`J8`I0if~n|U`3`;
zzLvc<=M{QTSdW{e`1cqjHVt9VyDLyHrHslpr;@p^Hy-Ocl2y}tsNICi*?rGoURbyQ
z9~>BjA%^~(R3z2`(*ay*w+M44jUq+*L3x+^I8Hnw*2NPC$zyL77(B{{%-d_YleVZW
z?)?tMgXUvuQymp$idw<PzFgk#6Qzt_f$1x{VvLx-P(`It(x%N29V6B@)d5O8^c)mR
zU#eS~bwu;=Z86u%hCQdugX)g{Shr{zmev#iwB7;VXU)f=&S%M>AVt!76wZO8w!oLm
z6Ik<gtE4#cGe1TWuKM<U)v@W~Gj}+W$JPsOS*R{)-Yn#T+#uH5Tui#I)7U3+G~W1k
z6~<ktfSSc)Ij`kVa?hQ}Ru+CV`J9qF-Lm17CDZWnO;P_e&zHjwD$%dUK2ojO?wA#H
zhHCV@ICsraPzo-P@sOn$x1}AJB=qBkoAYt7qap9VF_cx$Qf2o&Q=zov0iBx@F3#%T
zDRj+gIcbq#yO#`-67&0EUZGgahu;J*!~K-K*Mq;LcE>XRmDFhViGpYBBST|L^xJ1n
z<sMEL&`Xc=;>KY^Off}hI>L#T?)dt09}G2a0XawSQ{rJ;)_bymg0<>_cD|y*yK@nI
zhH>Sxc9No%U_C#bPunf^*k;OPQlz$but_;D<u1@rj_?6Ov^}2AU4zb>)6^C*;(YA-
zgX*^%V^(1&Dz&%ZzWdssGHErI>h<Tq%Yyyg<i`_&x?{n)GvwH}1-{<Dij`dra7%DE
zE-ZZn6^|VS=QYM5D&9lvf!;eBoE=O#4(lbIjbg9+QmkKhhf(gw-NJ9U1~MiDp-r(H
zdCqY{XT!PFxG#|uqwhLu?uzrJ=PWd`&VZQqAzc2^j(a)VVeMoegn#;=SGZuex3qIC
zvnzs#7gIRmhYN+ZTL(TVv4S05<)9cAPMZ5~rTUcqXk~Xu><9N#`Z+P<yQfypoDhoj
z2`fN-zZI8Q?UZZ8ImP+HBw5dX3Th%Q%bijeplV|<CR_@_V@bm?=W!=8+2@C`Vos;T
z?0^(AQ=e1EH$is4R}eLOI!2wkPQ|~CF>XnF4)B_U!IyL4?nM(++N|ZAa2?3KxQ7~U
zq*1SaN(@<VjG_87u<YG5!OV8WzIMyld-GuQy!r(yPKI)^+>`A*T4Cs{*HYQ2@mx5o
z6ml%>IA=y7g&G73Ms+wR&+7qYTBWpo`WpOh(h93mOxSSJI4<eZi*n4<<(#`wR30;m
zoqw)ZCn>t3XMzdjJj#(QYwl4|UM10IQ}kU*P?<lCbVZ#yx061`ed|D#=t7ldFXagv
z+|lpuUNQf)87z$Td2+Q0+CP&}?Y|5yr;2{n^Y60F)m7O4kro@&`?8|v6{)7v2+`XZ
z4{0{ua@f%alymYXq*WZJm}ub&i>QRhfi2NDOAiy5o+LllTj0CqHCbeiWak~)IH9Qr
z7d`z0vySA0rmhpl4ZK9okK@%Zo&=+9_6!cJnhNeY<G6Bw8~M&!Pf3g3kjHlwdG!kA
zrWs}klQYTwMFc73$8y8<2n?S$AMF!OL9=16T(+krg%$pyn1Co$Y(3==Hf1#@uFRmx
zwgFtSZ7Jp)dLY@~KMW-=r*ihR(_}-3B#%4d-eNCxeW&)|T)0JQxFOd2XNgch{e<M>
z(H&;J3+6EKY)P*hjb8N$WMbVLb!WR`==uYYe0CNnTZqrqt0K~ia3W{j?dk|iB|6VL
zug;vh2*ahZ=-3ZA?WLGwH56{e1)dD;_d!g9HXB5aC8MiKbUHVb6HW;v%Q(TC7&Q!(
zZN5-t=fkpU%NyzLsPUqP4(#0U1kODdjLZGK#Z1mFO4IU}Y*Omw65V4`NkwPyoO=ht
z{loD4Fi(yeG8MB6V<9hGFzbKVuueO{Z#?CUp)G}Tc%)!8*bU~gdr@d$(gcyIC~Er$
zAkAWkWHovUmff7gB`sc)+XD->N!1c=t94X(F%Ue~1+x3~N>bcB=J>sIEElbH6MNS_
zdFAnYz~kBmsMd7gk`)`|b%&N?Tk|oj?2;krj~T#K;X_ejF%>I^4#M&l)sW}rBWkhZ
z$hM6$7L7A!#qEw1+VDZvWU^Gb59NSPqV|^a5RRF2W1sg6Fm2{<DLdMUP3|aIW%vi!
zi}$KHv?td7Y=X4t7}<NbshC@<m0yUNLe+{(j&;EvtS1lR(0|0N%!@f}czrl3-_HSq
ztJ@%sXNcZwAIaXu3!FQaIfhvpphtgm^egX6x+5lXYUjVm%G{FDmyX8l-Xqy|Tp1~K
zT7bdJ&(eg#5R5s!3kuH1L7|NiDU{yov@HhUa(V{WuX`(IlZVSu^at<XI}v@Ki#gF*
z_Z@94Hp`ZqSK+L5Z+x+(EhnYB3D3}EHd|Xr_NRM7)-+MCez(`r{^1-54>UpNdKXz|
zo<_KoOQe{cgRm%gK0BXVr!Mb#m=vS#sa0dfaasOI@>^~L0c}R&mmyZH89xC67QZ58
zvkdaoiUfsk2QVmZ&!N?GxXCty6lHb}0jc3wcD_4h&$&baPkvEIGcC63y%rOG<U{hi
zm0<kcj<RoraYSz)RLTqG`W2$rQ>v1}1WUtcpfP7PKM&PSkyJK$H2Pg!01^4GNp<O*
zRB%hMi2CfI%2qqbdF2|&z288c{A_vp-EQcq7M_NR30OZA$t=8#lFdBXw{9+0hF_49
zeC|Qq^X6D#w3K6p9iz0tT`@vD=SzOr)8i^7yG`lLg;l4(ccSQX4QwX-%vDm7P8k&(
ziQ>{{C1B<9n|zfn-0PZz4R5Q#@9kVt%)9AG9-E=*U$Nh7H4T#+0@!P5IJ?Cv&@O5w
zD=c3*_U{ryN`pg?w{|pdf76%sb~$p?xIfr8<SQx4Khd$q#aya;5dv(c@Ps96&~B-y
zx#xG5LXDq7Mf09G;hyO8jLoLlDN(3bui&)a5+;8Lfdc;loGW+)(+3N#^t)B$bA2&g
z?$#UA66-~6cO_n1Je<=8K9NJ;Tm{3BCFp6c13KluDfDEyeBq`Cs$$nr;*Ni0L2SbQ
zMK6UrvMr|`>w~{OIdiF2K1D{`32%=R=`{X^WS<w1<lULGD*K?`w)vo}pCktkK2Ao$
zOX@CWSZRfL9$jsR>*h*qqu)-<hYH4O%`rG#)QjCM$8(-;YjnHS36~{#v+{@nFXs<H
zg~}n%sAeka-8c?5Nqxw0Q6yJfABoP*kI1SS+KxH<<K?WFAh0S80)=BZIDPQN`p7KF
zD@pWZu8W+2!586%?ID$%pTM3=pGjV>MwmEm5k>WNV3kf8I4^!ADVE1cH;Q~XBl|Lh
zEe%1p1728o(+w-%FP3eN8FS@T1I)9%N1A?NlJ4&nte?FMJwwF*?UgmBo_Pz(csmUL
z-iP(yE<w#pH&D&`SH1to1hz5RMltc_R1o094X%PixcRm8rK1ZQZ~!<TD3B889+3)r
zF=(j-AKG;$YF2fT>c^)_v9Ti0G37k@o^-+BW!*V+n35yfe}|ZxV=-;LHhSHDCD_YX
zU{vXR)a2wj1_;-lOX3jhHNXv1g+tmb`y&_(Ur$Zvm!sm$1_zDj6v5Y#$?$VWj$EOI
zWe2;U=Ne}S?A`|IO|M8^ix%N_H^C;@CpcT~ldx%DEB?}X8Jp#7gj(_5`1YQP3G!_?
zX19o|I`?JcKEu&ySZg+%FoYfVT4G|WU@XC{v@RkL^>-Ym@`d6IAF&gR;*79t!9uPd
zE$YrGd2*BJ_xa&|NIJcj7ACaj{ncIAGv}>Tbn`Q0O<%};$p!OXyaDx@`Dj%moIyV`
zA>!)}@)=kTo}IEN^<gdLtPJ72UL`d4_C!|ZImktO7Gcz*t{BiY162PetLuxx(Yf-c
zRJyGev~pVVe!&wqN<Ir2!ybxR*%NY1{VZ(s`;#))2eJO}AEeZp4hHQ2D(^I-%sNw6
z-7)~rF4w?)N+f#?4HbE%AV=HS&j9(+XcM{|UfWus_p6DhS~FDeBWI$|j>(Xi8;xZ{
zTT!c#4jAV2g`5K&<z>ZlFj_MbzuReJ>BSakG<82Xhl@4iH_CRZ1?)U05{nhn(X9Id
zwy@}ds*fuin|2wZa=}?C(msgOI+=sJVg}}&UW!W7la#W21{z(eqzNBvI4pb`dhQA#
zv!oMnW8+|qaPlMPqP~*#S`(}q|2O50+6Xx}{*voXwdBB+tH40Nm^Al_)M+=~$sae4
z5Z+n`*8IT~db>Ne8o!En4H=D!p^4zqEdpJPmT-i{32<&)M2ZsGF?Wv!Yz}RqRPA4I
zdH)2g?rzH_EmpJYFIRPw?;t#JWHkqzeoLAkwUjChNu`mG!TGc~WL?oAzx$JA_aUn=
zcVad)Tuy<MnD&?xJqR=(UrRO<Ltx?9rD&zw4!1{)<-{Lvq>p{p3YPa^Sg3I1lCj4i
zaPtT9JGLFlzI>wcU-qm>9O|IASr`2DtK=lB8SK|pu&@sl(}l}6IDh<Tw4G>%ipLI|
zRuYBlN{rEWbv-0IdQpMdI+{G8Bd3@KU{n51QYG1e%{x)+5IuBXvl>b|7Ky5Bz0s!m
zOIX%PxW-h$Jl}F98U-QyeVr!yyn+*cZw6Fb2-eTy{nYp1Y~H0aoo!02(Oeyg!6T#C
zZ?&7GV>bzF&U#@dVN_50I-T`fbmB&lok<fOiI|{Km^9V{VL(r8^=cL??Ak(DuVAj)
zH;)U?Ex^J(fe;pJjJY{uFs<Gjoqf{CU2qf>MSrPd(4Gz2B}n}nyQ0ktTdZI63>top
z<N}931^0RZSDj5J#mWm(VHlC+@IiPXZWtSwyp^>og0X4NKhSVkc(*pa5w74VY*qQ1
z^u8RX`c{?ljhV~Q^WqDr53vB}vxCU*bh+Hn(F&d0ewAKq7X9FxkENn!tvKS$1oGc)
zhryjIpkj)WCvRzu`xQ%3u}JLC-T`~gDu-iUfSy5f;mi2d=y-E7s~j(?C->B0!#9@L
zcee{G@qp}Qy9$$j$3v9;3kq0aig7o5D0D_1Rr?PhosNg$*yf&`y=(y&h_gmXb){sJ
zI2IC?E#$QAcWC}xGv1!C3}^lBkEOCH4$kpH#eq0=Rc4a#|BB3#`&P<BC;npA9ZTD8
z17|lssI#?0h2bOhiGhnbt79mh@L$A%(^^2{!RMrLT|zG&4d<~>`fz5>3N)DBo?^a_
zXPrcGj`AOlp|@6w{fsld_}PN1>ql^o*;?3MppW*grnAdQPxfii96kQy;x$<z#kLTA
z)lDZzd2lY2{wsR*|A?9A-?Q0ns2!Ua*<#3)7F>Dy6ZpQ`KuW<pSM(c>KIJVrEO91!
zw&}#S9~HdpMmyFE)D<4!N6_i@WQ;%Nz;64*c_?Hh&)?sYlRvt1O<NNT=(zwF=8CiL
zM@zQ(cLkP?6+D%oAS`p~MO9nGGb%fttcpFM;m1)(>e~_R`vy_AK?Gh)wBaun-dv$Q
z11EcqVf}d<DPaBsP^`F~Z)xU%%7BT~*n2Vu*1d<=(52XLMW4+~cT<z#H$|9CAS=t|
z{5^X(8}x6(1yAF^%1i7&PLEV8J-nn6UtKIY8xA_He?i&bPart0D+lEGuu*m!EdE1e
zkFEsJ!gnoL*)3ewL?hc9ghAajPi{Pw3@X)XvOZwTiuK>rsnNe-{;p}LSm=*2=TAUJ
zt_4mQF^2<hMss@iG3>K=83Y#;lWoizRLnX<$>ZNaS)d<lEgH>{DtmU`dd_j|Q9n!!
zwc^n8&!h(7vCy+`C)`;px$@6L;5=+9`%O6qUV}!m(ZZ3OwdW5`JsQQeb9-a7;c(nF
zX#wgDjzG=uzbQt&lhQo%NpZTbLxz718NAGwOX{1!=_!k_@yBF}YgS5ek4!MI{c!T_
z5QfWwJ+SO^GoqVYDK+aiJpMMHiv}Mg&8A_Twn3YcvP7n0z<ewnvy~dpBKWo2N}f}m
zOMYFu(Z`{kvHXk<X8n3f76ZGXW~v@4Iw~P<j>vqPctf`0Ur>GfB?(`#qs@aFunql-
z()+aGLSJ)^^KK4_AMe2TLC!qbIGVo~o1w8wh3L=E<*do=>AUA(^a<QdR!5g&<L^l#
zZ%``vU5<ywPWK?KLle0@J10K-YtXafJXT#0y%K8!h=^GRX}awo@xC+WW(Tu79*0qP
z^wF!J8a#R~;lPB`U>G?b^-f!%en>f0xeVguL-`Ok;4bv<cm{4{P313Bf&}l~4AkSr
zv$D%Fb;%`heGa9AqGhQ=gJ1$=814t-rD0HJ7D$0ge{6NfL*#~5Q*Ndq=b3iGYVe1;
zJFb{8?mEp^&%wH3uK01ZwdgBrp>8^0<=PR_e&6|A-Y1NmkGV;0n}=YNRxa#%;>5q)
zy!c~OUrdWX0GYadF*w73AB2iM@w@<x>U58)pZ23Dt)~#vc0Svm-%e$poH4nBuE^vp
zb;y}zz-9j+ru`ojoqt?R`S-^MNs^3&kPJeS43f+}hd~I*NRmV&Ns=TXNd|2~Yhy!e
zV~1^tZ3!XWb4Y%LmLzR{CrN9=Y~s_>w%_;n?|nS19=Z3Pb6&64^Y!ZzIo7@&cKkh=
zn?6(u?opWdjtMVQ%uVv<pCM;+I6C!7f!tO*q^6*K6j^kdvaB|s9UO$xO^@V?Pa!x$
z=tVi7w^P+{ogCg<_)jaAb6Qq!Zmk}IE)8NHd3~0yP0(T9-7ctb?62{zWwM+j{{P#W
z!2O|MGB~wBeWV@Q{AA6~Cx)?&=-{?!{fVM}3K93#@mToF6S>eoMH4svBWX9Qgx2T-
zDL%_6W6m<slO%dLo!fHsjsCb+iCq2TI=nWo6*n*HirHVgLBKD9cl840!fIP84EiFw
zb-hl-jaFDt9?O-QYT5@IvGJNe>qnj^$3NzvOIo$)&U<|EWXmG1J8a4N@xOxAkveh`
z9bb)odkPIQ5D)F0jAnZc<Yk^D{Hsxzb}dIvJD3moV|+PzN?XkSr3)qBS<RmJBRMjv
z1`4~ROV`G}Cab~=pxp9FDs@j}&m(g%Uv!Zt?fXp1aqFnAZ=T@veh-Qf&QjEZAPo3L
zM^?}b3ddw<T(h5^?p%VO?Pju~b#p<J=zTX6Tpm^Ae%kTl1kN593!%F2L94fsqo?|T
zt^6;f28d3Ylp;u5E4;~<g?D3FJbO0o5<OQ(q43HQP;`1xpcDK+#i{&@@fKFB?{S)p
z%kRon-#?cF296;=+jA6nVj`D*{YvGl=Wta;oHSzGEG*u*o=l_f$cA)ZP3e6XiX0@k
zTW6G5ImiZ6EUF+ac^kM`g`sb&MLhYZQ5@U06!Zc6AxCsqPWZPw*CoV5ldc2x6rD_^
zD<-q5IR@80RC99DP*k20x?1%^C|r<FcIQLk`G-W@<>!k!?KsM+75#Q!#m(eiH;Vfb
zOr;H7$g7`t21ct<)wPQZJGPVFDi+dCexTaXi_v+G@Cf{{O`|%al%JLz6+1vLaNnsD
zcX^~-j}>4zx)}BxbYz!_bH#VPUP`%FOeOc!SVHlbaeNfT{5Sy>J10Th`&g>X@M2Tl
zaakA9m+f!5VN%m>N^Wb7l`HnrnYAM^XN5nWH!0C0(jUCL_@VPZ-kdVM4`pt?P3qsA
zxM`=*m0cR3>f={%wEvDmF9)L9EttH1Yfn)tw0QfbEmt}(#GW6WIPLFbvWjSr1DkXh
z^l&le4qJ|meRq<}*zYN{Wd~Gwy71GyR$RDvHWoJ)fp+o*!A{!=HGlT!P!mCnm~B-P
zy319;5=ZuZO$GBE(CJqTwCXW{>+c;DoV=w_cf5%d&+Zm@-F6YoO$7h_@fcOHM05o`
zlRf610Pod#6i_^tH1Yt{1)F2Y{%NRt-T_qgLUT;M2Pp$nAkKCkrq6U|oA@qlekqW1
z{fFSS3<Jakg^6dpH5In%EW24QL9-iS?7h*CRXsOKSzX6*z(2L%bfFa#Kk`KN=t^k$
zYX~3BnZ^}web{#YT8<E0v(!6<;JM%iWTj+L)~h=5Yqy4<wq3x*9|09kS}^vQAQ=_k
zLE5nds=OV6W1CiDcyFO^{#jiR`|Eugt6hn9(kzOaG=SaqdSc-RXZRxSnid&-!B4(R
zsk5eX`qxfab7(cEI7{r3^Z=BHpUVg0XR%TD0`x0>ldjEP!1*0w=s@fOHr%@__dK<j
z?a#Mj!~6Ym`m_R4d{;6?yQxyXVc8aoHV7T#dr!8x9?o^gMR&xaLdofY&;bgELe;Zq
zH2Mvo+@Q6Parg~2o7%JUZvm|EM=YNI3+(%}40BEjzF6)H@?Y5+ty4!}u2VFW+?kE#
zE=2WpZP<L`3XG2Mr5KMGtoQNb&wZ7g*IoG9yM)R?JMCB%d02S-{sZNpNobh!GpPRS
zOs3n@(P`BL^3;xFvrivD!$NBn{N-{pFIK&MMNOTzl2r$>OTXBJY5UHT0sk(@5gfeI
zuff>wi(tw1T_t=zcJ%hxR89*y23d3CQR_ZbHfD6>q#b!w9O6LL32WHG<U@t0?`sU-
zIODbM;=S}ai>NLhKX=oz+cDtB>7GcY6w;lG2d!5yOx_vBC#=?^_jLx_{=pcI=7OcZ
z1GF8z<)l;3z$;)rgl-Z3*yX1wdr2G{6Rtt{>_rrKB!Tz&PQ}dRM1K6)k9Fd%5_<VD
zm4$8Mx|btCYkfdYE|`n81Lg_dYJYI~D;O1d7YkfMzR;e#uI$))4J(rz$V>F>MGF?u
z^UThu%~3*-M=v~K>%a!-N`b<){-VGDOF3Dg==MVo;a&WN0zUd+%8WMT5gdbcR!2dh
z{(g+b&8wh)-Imf8^oD}gHhlNECu{A$6I>`ITby&JvI}D|VPlN&<%(}tge5pwxMFzM
z4njvbBPHED4%Q@Q${{xj0!B6q?R<-eM`s7F2%U;*MJveK`-X~>H(<{{e8f#58!pYC
zk2`#9xM}4I?0I1-mq)*$Lx1<<=pT9t2I`NXyp;@fO_Lx}%sBfB#T;L-fjzr=aB7V?
z_sbrOaUB-X(S|@&*ewEM%_a)^`Csx|^cN|Mev!R}-^?RQ&B|q$De8xTXkOQkGo9Xm
z>6Ze@RPqJNN6rxc&QyHk+EiRSV<>v0dsEN@@eSKH5Ik*#XXy4_Ioo0nlrQ;A6(JrR
z?Qu{3+`28+ADP7&3x<)}Xih3OSuWML<q_-S(c|QF>bEKZtI{^fiVb5Wnqbdaxh`zB
zSZGV_EZMXzj-%q{a_G5FpuS%$?uq|@dvuj*bC+^c`YBRah81`Vj&?y=E5V=CVoJqW
zC>+>V(p^c1n(RpSclAU?>H~25Lt^)r3D|h)1?hr^k>BZ#xM#>544*p!V#W?<x8V;-
zU%pq0x~|}|2sczE-G}`0D8XMQDf~=VEUpxJft}w;`7=dFWa2msG4F%s%M_eGUCEh#
z?%b?R1^=`2u_)P=8`mbVc1|p%wGGEe?PXB*4W|HKXN>OK6~aq4vL9c7+yfgqt<OVQ
zUGyhdoq9>SxAv&&UP0yKLIuOQrxfsDARQRz&g!pv&@~+ftEB%R^N%m2ShS^}Y<MJk
z$Ujr)B5{k2azw+7l~C!_3zLnjQMsxDjGgw$Zda<QAz~A&_bbt^<7hSwPL`Wrb;nYz
zcwcCak<UgmZ0;p8ALYeVHGVVPPK)M@YtO|TB<=%pw(Qw8gB-oban3zo)-H(_I?;Wq
znzNI<4Dqc0&|dfu@5AHWOR!{899Fp<qVTO|<l~`bJ3lM3bts~o{3l|+u_!3r{+!ec
zyR&7V05La+?9C2e;X9O}FjROe4}SuuLE|As_)-;a{jh3rkaT0h3bf6CK!<XMvSP2F
zMo}=T;QhZ-Ic8B$jyIdhUc1(@{*KTXzj*|v@6S@TaV2^c--CR$y_jE4lcT*Q7WC<i
z`#gGJ)|TJMH13rYRT+xer|(m`@d#zySD^Oo-GYTr$Dm`!4Dk984mo4<$!z5q4F7Zm
z0>=CbG|h#zmyb}|AG;|wtuIGTAI9pLZ%Oo#vBS<_UN|BEi~d-^?(O{1H9uIq%j1C3
zr?WEgz8tV_Br9Gvlj`(gp#5J2FVde%zZr^6we3mW&xXr7EfnnI)f{b}0g#aj`k%T=
z5vks68h(PD>c#J4<uZzH6NLKJE*xLCm{0t?2$gC*hCVw$ihp8g;5Fd~cI!r!#$jB$
zVk4IRWyWcHL>HKiKRfx0_ltfioiK!P?$85ZG^T>i*MoApR6}0dKPm517Nx9sNX-dW
zSQ#;uY+HJuaz_&6M}$Car5T$QzavH9T3Kr`8MiDO$^G`4W6G**O3!bmk`Lq9Fkpoy
z<@|Kv%f2G}>|cON{}wWp<w+&RC1^0~poxBWTyinRV)~U9nlN)Vnr#ZjsIFr78lDO^
zB3rm8*`B=)YOv^q@HyPvS#T+Q2s&$5b3PPM=A$#D=-W}F+H+1;+_-x&`a}uUxom(E
z(H~MBJ)afYO?)U-Jo{pwGk#wvr<|WhR%J36JPlIX&I(C!S}ElvFXfnEJ(?DME7>3I
zfR?pxsD3t`lqC-&{j(D3#;x(JbkO0J4^z1~bt8{e&%uV2d6=0a{PaV+B6RE|zSW6j
z@;xQ(*lowq<{)JJQb2~ET4V0PWGU-@JUU(ekE*Zq!)&{oaKc}O`H%Kf#P6%w@W)8n
zwYodDt`-bl!DCH(B6RQWHz9RS5HGA(qi2ioy?5)4y4hMvaaurWzB9ybJ`xQ5mD0X#
zUD4^E0iw6(Aw}o@A?bb_2yZjRyUNFz>zc+>X5~wwnj$zfLGYE$`?9*}lT;`8=_OOe
z?4kT1$3=?Xi<m87t&ibUtR%NB4PYEQPk1?6<kF~mFm4Kg2}QtZ;$Wp!d@9I(IF(#;
zUD0qUTl6QdzL*>#=EHzqyvI#=x)+CH;>mT`(oKU^ON3{Vt+}$cCr%5n<cR4*G5_KO
zsC_#Y%lF12q#h*W>M@YJ>6Luw^)${f^MaC|%dqzOQ1s~(AaXDp*c7o-)(yNu2BE8!
z3<^a3Y_WSK)<}*mSHa<iKc@X$BL`d_1=_TO1#3^lqG5F=guT(AX}Uk}=`HdE+h4*S
z<uX(yYti6vN$z=U8GCde0=YAzu*Gfy9u@oJ2`dY}yQv#y?0*4ng=J9q`vZ;g$D>kY
zh0q#519*-T-*dkXn6hjl)(F2!;egHZwO7OV<T&wee7sc54jV9fVlkAo^<WqA%vSCB
znUv4OeQEtqWOulc6w{7gY@Yv6^w)NhqRqEb?zBiYToeAs0Xk0JycjRd8iiHs|CE(`
zW?{HzPpt2}3;K2Hiu=AU$H)#xDZoO^wU3HORkcxY2a~{Zp#@g<y$5bF&Y1f2GVI8Y
z!V_mT+;kz3inqp*;pj>^e9~OFF{>{ZE**e(?F9$_py*Haz9AK;LfP@aGA;|~k6T_Y
zLUsH&w0eJnbo(Y#<u5bHWV|YwITk~4=t6Lb3qiY@H>5WO$%i%zKZ)~BxwUdS`|U3i
zIxnE{I?4Jh(WUje8#fEjZLU`+mh2Hb$&n0F@4qLf>^Moe%QB>Zy{_Qd`W&fkeK@wN
z3&tLtiOP1fQ0uvgCipI4vyVSha?DsXd~;v)HvdOAY=^Sy)*>nI*<X-$<F%Af`3Cgm
z3bGS@=uSH=Xxb%b?AFl=vlB;SOQ_HfmfE1*u8o*`dZ+y5-fT=b?kB$Sqrvg@8_1gW
zBfPyXap~Y=WSr9;oLch8ZAgw_vd@<?R!$;y^ad=NuS3@#W^jPcjB9;k(bIM-q;H)e
zn5{Zc9r+fNW&g^y-`pZqf*K6H_DC6vhoa%`Mp-+_n)PL#Ob3g=?aoy)444i1F0Ndd
zkxA2*&*6j(Vy^go0!POz!>G+&@Z_%EoVjm4cRLWprf+6};_DEpr7oCDzwg1yc^!op
z_aJ00kO3?wv31{7TsrO%8PmT~&lo#SSob3Ym?mSKaRz-+b;4Z1{%$_~geKI+amiVG
zh9nz~uKi7lsF%3<u_vp(h}&AqO{y6Q92Yqp!YjLQ+RxXd(iN!`KE?$Pi5$06gSbW1
zf26u@y;)({>|W(D3~Cz(W8RWiP+GPDz7!ATrk7@XV!1o|SdQW9^iYf_8H`>xJM#pQ
zVNE6BjSikIvSw{q-=-t_97c575XKFOBB$0(SzvV2f$_l<jMyjUGmi^&a>WMLo9d-P
zlS!leWgR-l==u3eF<ZY7^GTG%o_oh}>iF)QbET3J487SZ`7PvIi`}d-L#lFq272`d
zP8s6}N%PNv*T-wndWMA8tXiSV;bmA+KA)|)nbdO+%L50fF!G1L!DXch9Ix21<K7)m
zczuRs2puMmUDz5A#d&gA@fuFQxsK^}me9jia?hw?*w}6>By_UG{A=U*{O}HVd%Pna
za2mz~mv3T)cNeN(b`-vJiNyM%X;@J^5zBYB#<%U4bKJa7Ky!QXh&$r`a4UrS3E#DT
zatwHdt%17EPM|Cerm8Os!CSWs)0Xa|h5k0Ib1Rmcf+L{hSR89tmdR((jGH^!<78*y
z`};1At4l?m{gCkI`}X6U_M=dDa51SyEaB*b8rEL5<iek<@#)u}A^jIijMr{LVVdId
z-u7H`q*8RHeJj0R+L2u@<pDUnqwH6~Xg2sNn5lZPw&IkO{hI}42DS*zN<2R|owzv4
zm94s($V2;uECNr_7jHdkOMlg<etQY(I}K7=WDio_Y){IrTBuwaNQ%Qe#`Nlp)FSrh
zx|Mf9LF)=mw(iD0;^v-{`#t%WFAzM??W8CSl}h_f1HH0B<fx`m%G@xX*0T)<jmm?R
zci|8>dju-ImOyNoFDB~xp-1~0l)F9_tLy~B`%_QOdpD3X#;Yj#r<H8_eut#_T`-@w
zc1G3z;@Kl~HP|_MV{~;aI(2YEW2U(j5<8r=Kc#9KGq-?e8__MgU-T_}+)Y*wy*ShK
z3ZCu{$8hRO12cm-BW@*S921O$A2zWn^R}$ml`KU{267acMWe0oc`W(4z_CLxrzU&>
z%|>?)Y4k#CSd5k~(=csHi~RX;3<s>;0rn7p+C!fT%6AIS{d<7o#VbH@=+PK;ua{!J
z7M(vw;?Q%o$i}4oC<nACKyfZn+9L&{YK9%8KJvkqw^pbaX-m3RKhlj>Q@K%e?I<4~
zmjh#5xo6TC)-QTV9vdZAYLuLD+#4E;kCI`3EP8ak3+jMwpei~HsW&y^mi9`@{i_^`
zP6V^dtP%WJ<W53)Hz@B#QO>3BplF<#@Fz6O%HX~f6B~fHw<@sY%_405)lT&8c|zRD
zGek)@=+L*r(9munxt#D8x}78CMty)PyH!|hFZkh86R7G_u&ijVxtOO~&nd&)u;5rQ
ztG=6yS;4m;_0=t~ohF0+gGmZ39D(8O-@um@lTmSEI@I;?AZ5T^$yV$Nc5iyI?%@;J
zr2QWlf1W{*PzVh{YgzewC@998Y1EG#L6J39bMi_A)<O@|r;L{KA9$eIx*6D#CVZ^)
z31a_oN1JwHryfy9mwI?}UfLrXxPLOL#dqo2`RQEfIGef+>44s!lfW?RE#-+kOYCq<
zE`F)T*2Zv-d2G#wyxwGL*hvwWJ<w&m=t=e$x!=&qyf1wMmLA+KE80CRaPS(1rWOCm
zim+j0Vr)C0;*SEbGuKh{<9Isqx5ylSZ6|u7+ercIm6SF#lQP!!NA+<xoS<?-r+dMW
zePIBmxjd4Z-Ri(Yod&vp(;@b=Ox5x*_M1^m5t*K%CwvI@dn4|U-EUIUo5c|BXGtV_
zfs#&naMaE27%91O?k~@zL$2$&A!!AxtVdDS{NeoeZXizUH42M+9;Be<{aLYWN`YdH
zyR5n+yb7UWpO`50+cFzeTMmQ9V*@xnelM6Jx<c8*McB0W5%{GxK-z?Bva0HqwC9Tj
z+w|1)m(mbc{Qf_1ynhV@M*st7PRE{^mYfqgA1%eZSu3=O)>p(gcS)GYK_~K_??m?P
zb}$ucn<e9pOLA@Cd@TIInGG9~rG$#7Q2!wmRX1-#Xq%1fF5YEHUv-@JtV)jU)0%4s
z0~=0H;S9?Il<%sh$(_A1Ofir1Z)QQr=Vg3gw;r4Rw+4)GT*|H9$fk2Ar96)yPWz)y
zvKRhS?OuCO&vYj@=Zn;6`Y7~2lJtRn<)ZiV*mKNrI5}|`SGxAW$_3k?TVg2Y?z=DL
z9}MHPGi9>HFKw}gv*_AJAFSAJk9B*BpejzsrVshDYl%Bfc9eME<3!ZWT7`Z+?m}*~
z(D`zvOWICT(G=Z}ic_bfYa2i-F~^yzn#jA`4v605#=6hW?0r()w^k0|Na2I{;vR(&
zUZYXf*;<O9xfIiP`eIG;bF%BQ7~?#>!Ej(PDAq0D*3V+F_-%U#`eO?PO&o-Zhg~JT
zaTjC>pL1#I4ymB^EYyb$mTFd|k)QrD<&W?K)ya{vGT{pOcCkW3OKd?ZtU@~<;H1LN
zTp1`josPK4%{S~Y>Qf)qiMgQhA2CO_`Av%6){%Wv5;1?RJ2~5Ua~e5e{tQPn)xL$C
zlXmD*G>@w-#dE7~D_d-LhNPn&*t+dX4mIjIdb~Y0+SZaW-Uhukc~g1$w^TS{k<cu{
zxw7SN@r(-($C~|`xHZ;fn!5nQiz6_4?PRtvG=OKb&|BBc=F$a!kgmfjP7_&Iqr)sP
zKC_0(R~^aV-b}fL8@OuQFjQuW8|`nGDScBgD>AyEx5IwO?spMN6}#mL8Vxtz?}%nL
z<{bOT4-Hq^K~>8oIZnLGN!3C=zxtrljKP#LKU-u`w@Tr*eev=7vFJ4jNYQ66X@6GB
zW`8fhYfryYX{;CdT^Yiu&xfGTzpm)CG9KK=cEj?;Vup9EgRr(Dhj5}n%3W=MM$f~r
zFk%q;Z`p`7QEpf}e3-~j7K)6SfpYIwi$18HaCCSpJPEV;M&dBeK9Y#4ylT01f~e~$
zoy~fqBPz$VqtGak-+fgnD|Xb#H+DL+rm-E`ogRR0eP(f3ti<nQS71%?0#sjFi2l!J
z@QLkO)~?@87G3?=p<3h~`-t29u_u&SRzr%!5f}CPZL<2GS90OOk5W>Y78Qlnl=|T*
z^@~`+Rb$qJU#!@HG9oZ5KbVcToF)5%KSAAAU)a-r0Z0DzQh0^*XfZ?N;3}r#Kxl=#
z+O=kFdo!w7o`~VkyKr7tk+~`UO3^6`(cz~Up6qDFxl6{1tmzgot?-4o+@9jUcSPkd
z7jC&}$96ZSVY2IV)YpVah35{DT}wZVZY{oNMblUt#h?*h64S;nklk$nhyVAAY^H?b
zk-fvv@`nU;nco+ig;sUKW+6H*8^Q?(UPAhpV9sdNLDHO5GJI}LH6n{~NMuS%H}%HU
zo!!x;(g@94Vpw4_RulQB(9WKU?!zs^v2I%g>$hv6;?NLmOw^)!%PUFs%|?pyTf?ay
zf03#CRXIA~zO;7LAWliOrpCpMwC|lArtQg>qed;^0OvNGGcFG*J^a8bq!a72-pFd3
zjqDj3%Au`4)8q~woY5)}J!Sxxk6OnO@1|k&*-AO!8*>agKARi1Pr~paB3t6_hG$;R
z;hLj@1#vl_p2~HUVc7#x4&4Oh>(P)uV+dwUZVh9*D!5>@$PjOLlhW0HQ}J}M=Xte&
zwX1?vFTX<7tpKT5-wM-(H#To!9;hALanc-1uI#;w!rPUAUKX?QCGqY!(;%BxbwrmJ
zQ#jq`H^@C~6#Xfk;7G3xsBgX{XP;h&3hjB1=S9xkIAAl-=r}ekb;tCanV9t39L&5j
z4OPLZpnVXB2T}&Ix3?qaf2je}!`a~WZWyNv?vKaQM2?eBgQ@>rs?Cc;r^?>o<?4an
zb{8q`)nR!0qzDvwqHFWJzoDweOv+egLyC<lvd@PNsM-((R-WBC{gOMTIPD;P@^Us@
z7y(*1uhDhdAvf(>%Xum7v3Yknna}8t3i-XH-!~KVUE6bIff<|MSjwi|i`aav4bu@z
z&TAWoX~%-7<n|{>-&801w62Msi5?WUY79ra=TdTk0@G+6#)_^XYgZL|^;-zVCr*)l
zlE~>lUWFc;bZpFQE2U*@<esNCV)of=dha%rbCwGaQRpPO@`>o&3kt_Qe~iV#Fb6P{
zO=qWvgSoP;iH>epvu9HmR*U{g<IeZ8-Q6*8D6hMCw>Fb5e6y^N&`{~-LHN1-5>za3
zDM)ai&W)Expm*IjaOi|TA8>U*<AxZz{Y?U@s?BAClZWQ>pfUJ4t20}SXDHd!nU%?3
zq)flrtZ@``FSX{_JEJ*c`58*{dMv(qPeC>4m7MEbDVv_nXRVXD==`046@ylyOHHkq
z`DbFrY<CDByBPK6Hn4A2BG>h@p{#e|?07zno(?}xexK)|`cS!~p5;cnp0*M^mq}=Q
z>IxVd#J+atid<Vg3BB?}W<RGrS_Do4_kK$;duB4(!aX`PU4hqDm~rH{7a`~LM^f(@
z4~2aIL)Ruln$><unK2d{_a7j==MdKXHU=vP{tj;6?}5_lpQy4V4z6t@%C&n>jooa~
zbFvIMgVQ1WqBXYcnJBmvb4W4hazXiuc-Fo8Qv#O=c3L<a%C$E@e^Srt9am&+>P?6|
zHHj5bH>H@R;cW084B1DApw;0h{t_R?=TEo6x_Qq*b6_T?Oq)zm<rC3r$9Ob$nIfg5
z=$;wp!?~B=%Z5;myk(~!rY^O>q@s8{*?A`CD-=}t#0!q*TC(HZzi33?STqh;Ek}^Z
zQW$5mP1r(AZ{L^A3Ra;Zzd`c)#~K~)ek*d8;cWH2BR>9C+`guWEI_E>P90v%p%LOH
zedVx}c`^*+a--->KQ(3#4nX~o-zDc)9=xm7L~Pu=4NN~glp_aUgn+C6fy<p+bR<vQ
z`j5r(2$5H?%yDI{&1i1X+H&0381lHck-U23K+d1NIq;r8<}Q6BSEPEPMSTz&g}+TD
z{M%_^Z>hA~bgXt=iz#n=bM%NDGEH8MwOt3JuJa>`b@JhT-}PqbFMKE~qR{%-YF0l_
zl>#C}kCM|s%<MpLMEEMJXWDc0PB*j{j4f^PZP8CyM5Up^zxUTF+2QxuoIkNQYhDNN
z7P%EVoo$Ur)hjsK<)&=f{aOyv4aN~~XK?hm64DK`frN8qREU=3wfYZAwy?m^4O_|K
zy13=WC@|^A@#6P5my}0ER{zg&=zmXm{>&N+s(kNAu}iN*w$lr!Y#ayCr%J(Ndp%{W
z&xOc>6O`e66;7OpL+6xv?6m6?D8e6s@#tl#a_1vBvfiET2MSKa_=%*5N{6_ELN}Z}
zmm}<aG3H<36t58waAhfHei?|WZK05HHl7dcTZE|{zLJgPic0>CR991oPQC++S(7NG
z-)vm?DVDQq$FXIbzPQk<BRUDcx56!h^6u{k^GUIoyX2mn6JJWkAV)bT_%E<3S&HGK
z`-v{Dy`bvc0ZzOZdP}#Sg2OX~O|LVhlx~;EA|(r||5?u2$Hsxz%9R-X$sSEKiWNKl
z*7(=9#hgATz;o$G!HTisVxNUvxb{c!oQ6vlMd=iI!IIrAA_UK?kQ7~~YpO;C2%UZ`
zXHMA(+5&L{KL%83S_L^Gdt|u$01EfJLiv{Y497(0_6DYtujgR%;Qdf#T_$;6JqlHG
z;$_3*$I{dDgSq-2C-xoSC%9?Vl<Vyd&ok`!n%g|i_@OUaCo55L{WIlVvF7llwwPW%
zj6>sOQYofV<i$Kv9QD>Hx`r2|bP}1x@&Ax9`5W*W;7^LJ4e<D#$eT>=PUQt7uwG<)
z6aGsf``o8g*yFyG`ptU5I2$ZwM~R;`?JAZ0Tq9<uf8a*bM1DVW99x@3vS#*bzBJaE
zn+uAlFytzn?7xOX-;ER<kQoKFmmOID=^SVy_dr@pCs-J{7Bja^#5A)Fm_7GB9eCD}
z<MeGIaWx3P$34mO?^??D>BwrFhN+#C$+%|)Ht5%gF6P;S{k9349!<t3t3{aB?8!+c
zdp7p@BzswQ!$OPYRJGRz%%&cv2~jKg#MyOd(cg=z(o!MU?Hw61qNEXPSF!iA0MvP1
zCgqS8h)WzQGN*&^{J4o+6|q82E181Lg2xc`Qp3q&#<c1K3|^1PV$FLf7TqTO1`4lT
zWEA@~h<P~R5Tu<-l?(3;fLJ&~TMn<{_s?xOq^mbOZ81XY|H9F6w%DZ}2(HVKRa~;5
zEq?K};quQyquNvh#`3drUg;CMR^=yh9y8d{V1ws(wd3L+^T_xn1B?s2W&7nrxKRH<
zDtKmv^$jby=FDQOD6+>R=e$s}zBB5_{77jo$K=reZji}3Th5GSh}~R6@waTTwrLQC
zdVd3Z&e(B^n;#i|o+g_e*ba8jEihqhjd->sC>*+4^8B|e2iB}WZRSqdf43*<3mg#a
zUV_P0WSYD$kp5+Zocq&{P%ftn4xob1uU&~Xnx9BHVLgS+nSlxtd$71CWc3<}^|ODa
zr>8{rCD57=JdWki7w+tPYOG+OY$mUVB0v542v}zczJ^1deD}yyyzyZUnmNtH>@#8F
zUDH+`u|JeEp7#<wigOo>KF4C?+8#V`-)cOvPKoNww&ZZblXY#+f!)Ix$Y`3x-jDs!
zBm62<9}2>>$Zw%0t3M9>F&K~7DA*!>6j-$@rY~ZiFl;}qQLI}f`Bf*=nRp3n+FqdC
z*$P&Mm4ki82~fOn0sF%F{M0=f8=G6RYSa}uH24M>7kwj7ZtTH^zVoGQoJWT=BF{cc
z%t|2_r0~8X-#g}ah~Cx$X#>mT!b!H0Vn!IlwRRlerWZDz_2K{+0l9sBsqlZ@z-*Ve
zP3c}piokCQf+jh#YPv~I3H*ziOLl;+cCS=<!42M5nX~HB05CSQWNLDzlzat;AHNH+
zhr8p#Aq!AZ^Qpkzb(7FgzLm5ir*nM#YED0~kOLe<rhlXZl`J1Da{o+Y>wq)X6HWV{
z7y3^ZvqYFlE_M4##;fA*-~CStv6nDTEb4L3oY477DCUWqYu3entgm*KQ*O7A;+Nkv
zJ?o;lrcm4wyR_$&;K5if{(V-=QSpJ9zU;nZ6c;YigV*aaNUBN&MF%g|9&(iJ6}xHR
z2|d=_-wcJzLn+Pz(Bn4~saBSQo#<X3@f&c^xGSJj2T)-2M6`d{pH&0qprfG`s<(Y7
zm+yZCR=@RQ*IOI7>G^6XI^2qVK5Mb+iSR+;HYg4MPqrT@dfaD~gWtzjV0~^SA6Y88
zqOXpHs^f3u=vxap@}wVo%^8N;>hl`KpJFC{CVbzI-;nWG8>kZAt)@M<=!U2Gh8`Cl
z)mLjJ&+n^*CadBz|LL)ClPjdHT#m744B!l|d}6k%xLp7@G`eBXzg^MPaVBMq@kPVP
zM^Lw@9mVeNjv24ADQui8H{)i|zPwJ^n}3HB+buXUFOLpAbHs?5A{Qhw$0-eAtefQt
zU#Js0?dU-&Lp%85<A_z<K`M{94B=hsNj3ASRJXk+=QL+hn%!4OJFpJ*IiH|@#8Qk5
zYyhWrS)@p}m(q_MrO@jysC>&quxW~9b;M%9uj<bMH#<}GcM+f%IZhLBej`>TR?D?%
zeQ;k<cMPjQ);)EB)L6lntnJ00?@va3!*W^w$8AYzFvkg(1)r;P7*3ei4^zJH$dCIv
zve(uDT-ogwnZCZ0)qO8XWs^h}`At}X>G&0DS`&^mcR3$A(;gQdp2(@Krm$h<a*f}e
z5=x!d7j?g12mO;DAivix;X(C6<^0)PR&LE9It81v8BbUp#z~sR*w5gK0bj0D<?>Y`
zN4C6RVXq)g7&S>`kH(PFXBbR7tH6Fwg4yYDC-hn|gDX>Kf%|lGEbyF+5d}KTIBCTJ
zeL^_kg)`N3REdn3hQ0sC;P;Er8avj}^I?9xN8G0a#EkjAm&hJei0@kuKaof3&rK<>
z$TYPWa)k%6IDQ}m96t<ceg1=#r9M!4z6`Ea2%UPxE-3BtnDm=w$)d-OpRWBxPVbAz
zVEcny_b8MiXEe~I|5>nh&0838+8pbC1WI`oMuB4!*?$G_`H8OVUN3Z1--BfFcoW*E
zU89yP6&9WK<_UK^P_h1cK~g^xScYss<9-)8s9KA@zENl}jnpW5#Y=`U`!&i1W{~_m
z5fjoj@R9x^2lus4fzuq}Cz=wB2@ZneHpz{l;~c@&sfE(FKT~7+2=skD59<z`hU~w`
zVDsF6scGR^@bYaBWo!DQF87YKqauQ{K1@W*p8jlTd929{Y=o+=QF7@^F%x#^#0IUC
ztPe?-o9=c%?Kj1m@LlV~93bYVn-3s0>@{g^mvjD!n^11n42C;rW%a|}WPBTkfzNGO
zq1be>YVB+(@*kmttSgk`5>HW8r*>$p5bx4rD>x){>+0kVShsKxxoq1Cch|(S-Rf}m
z2;L56bJB#DWr7r4w2^3}V7K)U{d$2R?2;OS>e(ScONL=X@<h(4n!zcbE>P9SIO&UZ
zd#)0k4>zwmsHD+Y+_wv-+ttA5+6m}XGf@2bZWw9(2qN^6?0!ni3a>RDo=Lw@s8tut
zE$zaMotsGU+;~w@2+*hw=YYk#=y4Zo&Rzc_bQ>wYxAx-M4YZ;#U(tE|%Nvd1ODK6<
zz64Rx9r4nqschEe6$LE*2u9I8m|ZFEZ#h@xxV_b+%CaGC<~qsB$_H&1Y^AKx3(#(3
ze=J($#8mkXvKs$|ZYP8n^v+=^CDaW=_KoMJvPQCfc8S!<|4OchrlNlPG)eR4Tnt>Z
zfn645kWsJ-n<E;?kkApYE$)cn|CG{u7vb-*Nv2ZR2&&eJr1JB`w133DHK`+?S=yKN
zS47sn+W`3dpOUktEk%Fft#KH)hO3GulW|{(Y(L{4D)+qvs_BtXm|!iZ%vnx#u5+kj
zcLK*g+YXaq3|IOd7IXYGX^(3sUMO~j+_O(*j|gkgH~tM}ckj-3?{yIRT);)8$w1x_
zw;-d<RWZADr_`a#(5}pfZxqGhE;CEam|_E&WoM}<vAwuAOrzQj8jkF64ayBh81cv%
zJ>~>+Ol^Did8)-TtNgJ1SvqKsjihMv#hi2167_2a$?pYwFso@3c8d>3)f6Qs1nq>_
zHNM=`Tt-z&XNgKJxa_+QoGw2Gm#fRUl)sg;-40XMQIX3u#K|hxjvO#~KV%f;gZZ3p
z3@f&fLrM_3IbR^1vl~`fmP*FY;_kye_=Ha^o-uXhpw9+UFa1sQwdYWs_bjG4kKt%Z
zKXkd70)G9^iC())QqYK>sBzv<beh`^v=b#U=(okH8S}sxf|T*uMeOE%nCOULPETad
zD}ocY@c^V8=pnelUXXcKjGA6Pl={??GnW1Vo-rfY5OY;y)*#`o95Xfy{#o(~U&3w$
zyQtAA5Vfst7gV|#DBGb{@b(HMwfSIfN-%?vNzSb7eFGlv?~M%`m!Q|m&!8xtjG5Q=
z3;vSW`_`=%UBQ2W>5dDPUw=aS7k#DT_<iv7%La^r1sKuRo$K37!xNjQ;^(U&oL0Y&
zM)VuT0o{VhE%_B?6$q|r+$C9Iee9y4^G}rV!yq!Qs+awP`e6Fl17I(h01n+^x&CY$
zOr!v=3)OL{+yHepc~HFUDHta^vA#olYWa6EYCjx@%#P=vzEv%J9xS@S2a3JqWRaZx
z<~Hc83_=5KkNcurusGr%nR>f})d8``xy+Eg4+ioy!9y9b#ugpNbmgjveTjM`fqKC|
zU{f`m8+5){`ukxB2^@(THNl|!?=@(z3m>h)9In;fg}`0`Sh=Scez`MD`1BW|wk%6J
z@=-8Me*a#)uVN&JKgCUA-2kcb%_DG|F_DXY_C%`~;bZw`5e=*ne%p<DYEGOfIF=El
zT#zP*hg6fPFdAZaN3tS2OmLIuU`)SAtQ+=)G#+Mbx~jz3&3{v)TM}g#Sz+3Ze@W}|
zR`zsp<M++;d5d6k)g3IO#@rQHzD3ORcP5~5bEurPWgmE+{Ekuu+w}dfD_HsLiF~a?
zJs2)slFN^j(!lp?MV_{=R8!_9o~z-w&!P{vej@HC1;r2-B5pT+))3mt1$9Sf!@gJZ
zxvp*&N4Dw6(UZ2)CBp{1do>bW-WCh?_-@U$(oJkTsypkK?;tyk@XEz(mV35cj5U1)
zC%KDYSOr*c+{-0UF=#js)OW}9zbvr6xPpqGyr$2&!VCTKlAOEZdni=>tMRN>p<;Ig
z<v3n|ex?vCT`W^+|5?=Ry%!X5-J!TxN5%f@C?IzemXCb}^s9shPm@$-;Y5Y7Q>y--
zkI1YDzez%i{Mf%6hu>-i<|9?;(l(u%!$x!DRs)oM7%Et<9k_7&uhi3fxnSD%0<RmJ
z$>XLc*657jr0#-WLc%a+6mr~|;h6WX4|`bqL$N`}2M)$@&X5#}T-*<vUx|#}Y#k|T
zz0i2M9XCHyu;HZwCe(Km^HI1|<)-D3n{Dy@6Av{0>&)umR&3a)ku$tUQo`o}XkojN
zN`AZxRZB(Rr|4i-yxvieaA!8x&l|>GH6>tlbeC*D8X$b>8>l)vp2`j&H@^9i8rNxA
zrI}4y=f?$yesIE`=N53>##2-l(vG*d%s{9e&oxz3v7}%<yDt$Qe(~pr{vr0!@$=Ad
z;*lJ1{UJcVKf&hQM7Hw(8=O*Rfo=RUc6t;9C4yCG(Y=`hjyZE_Mi;2N@*MQANzTX}
z4i&=t({%SL**-ZA8JluMXX<lJ<GWjA9Qzy^%^yPE#qFS-l`AQd4uD6?d@_gmsO|M<
zLBpOISlXeKvaC}evwbJ7i0^{?wyY30zev_TdjMPhn8-)pbY;I4yQp||6i-{doUKCV
zW5x#IohTOkxVcN<?Td8?yAOgvYm)On_Tj8L@w2?JmTYHFVQqW~6&88R%4tFP#m)iq
z|2KiG4tEl|_Dd=&7=^n&4He#7v3u9qbE+Z;UDoG=wtR+k=@hbe!Vb!my728D-Ed!v
z$UfZ|CmSsfO2yX$*?6g|r1&LB%1{iY?5#)O(4e(K57NpiYdgu;dN>B%P2kDVVc2w5
z=pOHmLr+U_qnUI{6LEJi9=&hDcAtvqy?QO{pLAh^yISM3d^oy`ZeLsfdFXfX2&vkZ
zkZRdF(sW&mRX0&ecYOxY(?3H(P%wJON@!XlvN#)UHFY|XZSL_8Brelpz0-1Z>wF3H
zU$;s}^!>OhaRF4RXG_r-%EjI<>F%HoT=icm<((H?c=x}-e#8SvoTcDm8}Z(L`7fwM
z?m@BfqbB|KpX4&#M1IG9hAnYzx%sOJwzPC*I5mcAqXuKlg-|S5JXQ1v{7i4%gs$+q
z3rG9@PPxH(5D>i`B3kuhi<VaCmDv>|r!T~sgsWth^9qbke@b=(rUEP;jd$ILpmwAJ
zTRTie#|UvVA6f-L7lfy>r~}2uyaBu4mT`X0T(tQtZb<)pyy!B`6DxbpC*|7l=$kl;
zA6q2~olYk8`)8ng`4{MoadPaoMsm3ofQno5z;jezR_WS+#ovJGnjKUJ-6?wLTk;h9
zk6m;JtU41*5!4rxRKR&zdQ7zJ%-IJzz`|-5tlSvKj=>8U1dqEavo#d1>MXln)v#T<
zU=uD4M7KK|`F6!n;a^dM?$2kEm!>!F5cg@b8_hKBjPT>UEheW)R-AS&6zg{^W`p{g
zlyfGQ2R>bap%Y!$FIT}{e`{&W!MWT`+}?Cy+vLPV!DhM_k5x%KB<(T8l#9PWgLvMf
zKiIN$>KIgnFU06(TkO}P4=aBae#@Ew%v%}FrupkIqiqIMH0rtP#B!LhbR&Ph*AW%x
zjRk?HreNXrk#tR41r1jRbH<ud(w-8&&|P;mDHFS)>bw`ncWH~NFM}z4)=em^)sZW%
z!mOZ?LZe>AO82!`yl6HwBndBE=2nW%u8^H(?*+T9`ysYyH~1B0i4KGhklXSR4C-;@
zcJwq{>(Cz4I%`OgaZ(O{t|evYH*)rS7gQ8?Vz2kp$vt0ar|K#)yk8DC{vFE7qnDv-
zZ5J&1>WM+;?vOS|WXnQ!gJW?xy4A+BVNJ4}d+v->3c{;VxCS)eh)m!`4~_Yp_1qNJ
z9@7ujfUl!qzbx1Z3fCmzEtx;2YP%bDd#^>SY0<dw<7ABS6El2zAN24%M~SyZPJ`Ej
z-Ixy)o*Kv{1KOc_#(uffHBqpK!Z{}@3QcvB(I?meRp13V^Ct1p!(*`E1+sO?U=Hw}
z$(843L)g(Jm^Qycf<QNH9h1P<Iwtbkt&t)_q7yq{0Zkh<1xEzgvWq&2ARvZwyDXr*
z^*1Ow_!sHW+d*g=B5q^fPL?y;B!YeC-dO5UEw>1cim9E*+?qyXY1mt7atAS&f-T3k
z7kf@+C_9W?hAtLn{JEqD`fG%iXEp@$_clUt&%2Oe83PuIIjCPdkBg7E!NUEmS?wrc
z^qYrLz4~|3K2WhTeTQ6pZ!SbO6_9CLuIxUkEq1#!0Zlo%l-<;x_9eTa$HW`dGg@$|
zlRj!DTwcu;eLHgU+ksd+M~P;}uN1grEr;%(!|Jk`6#8u>Itnc>_f~&6GIS>TOtWCy
ztb62fL@+oznn1N^926Qa$W7TZF=5&}NSIhh={d*ALE8fpf*WZ^oE<8fpHPcBkzM2m
zaA4sK&Q{E)*pF?|@3hFB{5cGLcSNu*{a4vz<vY?&tS`u2rs5O`1n(o=xp3oiGMng!
z;f6gB^uHD|c(pCC*mRO=R`(V<xj_@IEu~7c<y5-G8H<C<#6023q3s1fe$q|~=$C*F
zRbl-3x;a~HD1h{)@oZe$EIFS|;5ubrD6B6nuo@K0%`+cR(^owo5WdrB*Dm7ydy?X|
zyHHh)lQb=7ljscaf>QG$(#3D0>Uo2)w5$;<#>}OJryl4!a3vb<htktI<>awz8U{4|
z1EE=kr0vv2%AdHOg7#aY?sW`T1-_H?RXd>me^q4cdlkGqXTV#F?%bl#W7Iq^EPU6R
zPww|*qswQ>!#Iob>)uji@k3Be{*F9-`l9{ujht~hk<w&?WIarY=FMtWj~)(32Q0&g
z@A_f*seZ!4FbqA$uL52FW3pSf{SfX?f@|4|BYU`D&l(?&G>%6<SD9i*4@Fug__}WU
zA#_h~v?%(6_6+L8`C%eMd9W`vi=DU0OXSn%x05o4uA%3@`(UBa<o5{wpLtXSSBkmz
zl2vb{>_>u8aZPqg5PHNN2eJ={K-K&k6zb}Wo`ctMOqD&RRsJa@*e>C!l4LNtcc;)t
z7MwYDET$dYDVK@&dugvkw7xu*qn58k3(sYs>T`h<RsW4XpFSF`Ura^n6v3IoUmQKy
z9I6|7b5-L3*>h+PneFVyr6s>Y>H>Srwmw3acnUl2S8{gHKyoTrgF#oaAu&hXQakD}
z=RJy>eh4;o_#5u}cf;bF4wS22%%#7Mf(mgHF}*$ldfQf@sJi5FsP9~Mn|f4m8M;!_
z+Afg1u@&k=DgZ{7L;1NioHqKL{P^HlPHAeU%+3jTL$FWNmS@Y&YrW9!KP{xa>A{Mm
z)O_twJw?x{mF)9#$dJ3AZixQ%ykDMBadQDV{ijB~#|b*IWGP;=3ljdF>vHKYKS|mD
zoT1$MLa3ax7Q;pe?o-*f<k(qw;++4K@21DFDZmN+>Qxw$Fb{XdBN{Tw3Kll5$IJ<v
zxH?i~3+pCIyNZM#Eqwz!E`AHGU3+7m;5EH{(G>%mI&sO?TcjA@Am`6NNlA}?AYZGc
zd_eES)yso;vS(MO3qOn7)P{lsab2<GXFJaSQZG7)7GT|X-;vevNf_svO1dBCaKq_I
zBCjuQ?VDDMZuc#uJ=R**R=k7TwZl2zA%^N#tmkm&P*6n7z);^#Y#X@-zx=ff;nM%0
zX47kU+}IYM+UzHVag}7fdpRl_{*jEXw&+J8+;r+AdH>RpmG)Iq^_XE?5HJHz-V`j{
z8D40rPm%@-jn(s7Ed_P!i`waF071U!<n)|$cTa(9`8d=*%Fx(nD9}z=XG=4>QPwS?
z@g>eB&(ui1ZDEJ1z-4k_zaKTFSFO;2R*1RHjO~(p<K!t4Hfi32#f^nfZoL9iZ>TxD
zd@Q>hwZgpM5VpQP4ejj29#G-R#`xvv@pLilINzV`0(F#d`voW(P|n!Z1tW*Jagy!#
zkUM1zy1wqpzJ<bz+j^UHZQW7u%s33CJ;!6Qi=JY~?4hQgR^g5D?g$@j*tFV$svr1s
zOo2BWdQ^+&;J$qFf9p{FH492NOhnUP$&&dP;ItP3JZ*PRbS!8BXgi4OvV}Km+(4)c
zTo3ztD>$T9{Ji((^1y?_gEO@)dPut=J8l8$;)b*1O0i=FzXOMZF`P8y7SxVu$0dty
zK>5)M(p(yXW4}6b)A}FD&~C4$%48u$ID4^^oew4D0q1PI1cj&CNZRfnCCh0NNBbQk
z<Hb3od6R&dwGkXgo}hF)E2j<LE793)<hp1vYrQvXf?U>$S-_X;znGwE;TV`u;eyVi
z+^|?^dK!zVoH03_YONDE&T|U8Ox?gaad}Xma)lHQ>M^knyT~;EN2#v+Xf%FymegnZ
zfR#yj@4ZLz+J9$a&f@)K5<b${VU?slFBrTRLbxJ$6ef&5503m0ympJ+_ze}grn}&!
zlWt;`C<X_|Q9{=hx4Gjw;b(B>GslKwQ&T^|746LdOJ{&qTP0gAjAqT5c3gSb0;9Hf
z;atTU%pR}+O7~3UYf}^WsptQ9^(^7aqF#{M@E2v3egkD9&uM+CJHBn+fR%p>4o3MG
zP<;QZ9JPLg@aF$9Cc5=aFx9+<oDHHE$9Ep9zYSuGjA#h^R?J7ou2NImt6*^z`01+?
zWWFO_a630(TFo*pdG|9IFP*2HZC$wHp5UA|Oa!laW1zIfnM!G!q<auAIF99@s`Zx@
z{6G29vyGVURs@cAIxIc$Rr+#X^gE?>h5F1^>{TZ8ocSLq{I`ju+<F+A%*LWl7YGG@
z;{G+s0dqU*xH8|9j#kX!()XVv|AwAyv2{Dp3*kk4GMT(<7jx5UJt@tXv+<8m2vEES
zgUuguW1c%FTSao=loUvc_C)QQ3MuXU2y~7cfhkYiQT28|6+gEpMN4ONKW2{lO$sqb
ze<oANZ&Da6;Gk1qDQ?+7jyzRN>T%VQf3$?|H$5<JvlA%x8OgrpHnhwYx7Qv%aC=h(
zKmXfY_-U6@INSi654IRr_Eq%EJs`Vpgg$+7J9(_A0z=&Df}=y6+4BXUnNNGR|N5LN
z*Y!f}Mh$qJUr5G;01DWi0M&J4xbcjN-Ez8d;n((Zop(pfdLX>db9)M|=~ZaF9*u_L
z5Gnk@3s5TqsPM;Ll2YFRdpbvRNh=@rI_`>*cCX2?ER_l)80<}AXR}G7rj$Vxw6qKc
zN^>w|^eC)Z)C{>p3Z($c2^9ZXJdge7k<&h3e(t_Rd|xc-kc*gaH+Z7%))mU!CEo4h
zmr1GF$0>TG@bcNzNDkCh@VvTnz!f`~@Qo*j-t}PbrEAe3c&$d4KjaC{B2zE2J%#?}
zs1m<lE6oNjobbEgnD|h9XE#3L5RUuwc9^j=9NdI1YS7g|@wqneIMxpfA7;os|7e9}
zErY}?GeTDW>x>Tl#CvY4H|R9u`TePxtPdSRQG2xP_k%451k4usy?tagG=QTg^asUz
zAIZM0n)5>!kvg|Zy0Lb;@KE<A?UK8i0!0W`R!)Prjbj9FBoXa;xv<^!X9WKX!$gJP
z13dc`%(C@Z>i%4^Z{Gr5-8++FQiCRAIK!5Q?Ra5QJM>z0Ky+=j(7>POW8=m@A-D4j
zs(dz*>IaJ6s(?Jon{T3khILdtwheks$N=+?!TfY|2UN9AqZE;qNHP3FL9_3}d0ByD
zhx&;dXQdo#I6?~NZW_~tbny6g8KwNMGx;Aj!@@PgFzty2bo+J~_Y7Erncs(U>m+YB
zYY-h=t-U1Z*n@-S_Gf+Z0Z_i*4~BvHkUb*_u1(!e|3}f;$Hka`Z+s*q$smMeBpC@K
zA#<O@Y?35NLYrhHq>_;kl3_!e*5<jjEwgEpgqF6=+@HgswIQ^=NlR<<99rAbwt4=Y
z-yi<r^&&I({du^~b-k~KqIKYPS5L~DKnlm_XCfwk15e&fCx@jzq`tC>3O;LvvLy?J
z)u&O^ZIC0q`Y)Kh3@uX3{Zy#>Y&rPncNSjGX`=QF-+LBq6N;upZ`H1Y((Klil+|M-
zb7?|E%)s%~)c+efFxHVQHM<~e=SqliA4Xp9Zi6ZJqns7_M7C8<C(j>KG5$~HJOAN9
z1^)5Ek2Om(vl+)bv}aQqvy5&IB;AuWl$Yy67Rx8C-+#%JK6jwVI_m?vv=u@>X1RQJ
zR8Pv(&qmFu%~0O9H{Ofui#x7Zq3t0FU0rWNR-vABKLm-2uL6W-Km?76T#wc#Q}EG6
z6KGbm=i~%yF+XI0-sMNpIx!esBZ|RsRffnI5QjIttx>mrJf_5q;I(cVC=zqzJ0@pR
znB%3;bsNw)Wu~^pdL6I93!yHwjD3iEg(duJsd*tY)?Y=Uam<%;-Q8+veK$r}cIKFi
z7hIMLt9Uj|i|M`0oPhfy$nxs7G=9!(YUb=m&HaA3de3SaliQwk<!!>_C(g_@Kb6&O
zH-gPK6NJ8poovKFk!a<KepPW8_6N_AOQH}~-Gr9*+&4ISEi<P1{?Tu<Hd}QI)Qh;k
z?W%-DMxFyr+$=r%CI@1ot3j!}AzzunIzrgDa3Zb;wsPlVrPD>Qw6_w**?$SgsT#;|
z4WQZ=w?(7kg9!8WqLQX9Lf19~460H5PX7c64|zthVKl~AVzG9WS!`Xhh8|twvzu|E
zwyc*G8gI=v`-j+o?ra1^*q;*BKNSg^a!*<<52KutoxIMB6`4OSq+;(YJRkl~G$mDt
z{Ho=Yv|<>hH}KE#DnvGLpHQg(7GWuvFW>X;jvw#EQE}`*O4`^2k&SAKDX133Upy5B
z%D;ua%Oug<G6Q`VZp5mc>5_{x&k-9|X}$g|g7n>=M3vQPSr<A`j;Oo^)*JSVM;X71
z@{(7=r21Ptuj+;Ab2bQ{QGTSkYcJAU=TgPe<xqJif~w*xgmRJ)P3wHoi_f}Q^^-(>
zA;`g#){D1o?9l1Q8&LeK0h8|B0HenXv*wf!INIcb<D5*0_ZvgkPlZs4?~jo7J@;~Q
z?^EUJ0Tg}0maLWg!0u2PRCeH@+?+o_-}^Ice4Iq4>kDPo_gbvkHU|Gc$MJIHa{M^c
zhHP$+5V}$|Hgn#@@_L)ONfApabN35Xcq{n!8A|~lmZE2%18Jf^$mS5vdRkwW6n`g}
zgU2otirvLf-zJnjJ8l@9Hw!#g45Zk~MVN7-KQ#n8i7@4Ep>FF0#<&S0_2MG(ACx7s
z!i21dKB{&4yiVxKf0jMdyeP<8iD^q73cnf;Dw}jy_`i#S;K-elF)>}JdN;`#r>5Yp
z{5iO_cmlQF*#$`xc7kT`4cSVw7O$UMiDG_xG>zXTyIqUK3#QTJ824B#Hm;zvH<PfX
zVHz5uIag=9k;;~?rijOzg)n=NsVqbkn=9q_XZ?AfUk?ZGbfw~<^&)*z9+Y1lia9PD
zuqoFXlO|o{zQdlP?w2lT2!A2zE<}q9mpy1qsDYxZJhAzb3wga?h`P38rRZbqXGpNd
z_CtDdZs^&W_wP=K`~ezFUl#zgU-l#wI%9R-3@q&gsOxl48~uGxEbg*T3ihs)tOuVL
zwHy0ENRW=|KiCOlX(+UQF&hnY*FpN?t)h01Qn>%rliC~malW7ZxRw>X4tt@aFp2c-
zAGzBq3JuvtS$8o7F5J}NYQGSS>ffH;w(CO*&A*avojY0TelV*&l7#AbH{q4@4y=w0
z#e0AHv7g`>RQ%DC-s%jfZ(1ZPa=gcd%<Dk=-Yli9v%JY=nhs|l<&4g+_R?@`?hjZr
zl^(?-ZJwDxi9?6e!H79joXdW>mQ>7D&7i>aLFj$kS!5py!K|}F&S$QS<`nm#Zf{4b
z+ewm9ekpt_J}38ohLMMgy)OHQi(SJO(!o@3^6x$q_0j2)u5B>{|JDI|c3{86Hzs&h
zJ^&LBTao{p7Evs>5qW;yvG&af4EnhZwT@4OZaz~mYs9y*cR(2DnN48l-4E*?^}+Cm
zqfz<82VrXF-(RtmsxsD+vLnydW<CSUZ>vPiRx42u;0FrDpYnr^j;uTT3h(_tiuyX{
zKsHXHkfV{99nyh253)JCmWbv6&Scv1x-i!Ys)Oo5b%`~x*nD&0n68*Jqa$Vg{!+U1
z1J9?%c~JR-VRSYw0VBVdPYGom>B*UeeBQE0twSFSJ@-VE`mq19E}4ug%H;;lX<<Cu
zNy_Rq98FyZz}eaTDQD6cvR0pjn6U_Zl6&H*dOr;Og6EW#UXa&)I>kJl0WnY5dp_$w
zIs4)bk(rt+j1eYt<1b5a{3X`rzqEqIs&S-v)=4rg;;dZ11T2mp589dj<R^NOF|<Xf
zzvRCQU9f7kJy;x$3QdpS;2uPwIwen-e*Hr<CQc#muN+0|K)!QcER~DDRDk);42<q@
z0Se{~z?MtgIdr_cTo8E%3?Fw$#V1yPN_H1^umvtXO(f^S+2}MwLyCwp^I0bwR9-Ix
zZMP+O@8>{rZ1+2;EemK*I~^9CT}HaGR&-P4fWZwrgyBnn%8dVqIr<e^??q$C7{R^4
zPe+m>FGLQE5TF~m7~)s93V1$@b<`<Xztx*A9iE0|6`oM$J`Pf!#-Zl*Rmj|Z6$;{h
zhkZ{t2XZ+Q6)W4B-5ZtMe|M2Rgq+KHWCwMu-B$Xa0Kc^wY6_o35iav6^Z8v-*e(KV
z2abolxX~1%x93jk#Z>lTgHYZ6M^+Wj0FTm3qB2;Bh{cCR;<2fiojw&(%Un?XUq>wb
zhtH?q9hb)3b;GnauGDZalyZg^2;IXPbLv9YAG__>YWgl9%j?NlGV*giCr`qpu`|f^
z$$luSIwy*oTIFnC8T`NA3SQ#}lK)yQdukm;8T;Jets}m__`6vAgFSbAdXVPf5(;|}
zhA!hqbGG4M*6-Q~Me_!6;fWXJ{Id^QE4;+ZNkDIZ=z+{OH)l;u5`mg7%;&ubim%6-
zOA8aIVbMWoNOu>Cu&>7LiD-cL3rm^F@m_>lb)yUOx}c-&5GZv@qULe8MIlYWeXkP9
zbmOpWlk7qD(^tr`kt@;gYc>Qq$B@$(St9t@14ztTj>#X6!1#$)m_CTT;?rbd=`oSU
z#Hz@%brCwG_#iYoV@mgQk-{25cK<#i#@Pwd68;qT4)(`v<8AOhv<oWpp9$5gog(W&
zA$UbU5GD8Di06}1&~BEJ;<-mebG-vNoqr^9GD2|On{cu(ABEl<xW8r5Arao(311<1
zEA6;0<?J4c`H~BU`n1C_yJN{_(JKhySwalYZ|ys9hHU8oFa|25iu~1B`@SEXxH%B*
zhOefY@Dzd{PQe~0?hx}z#n!+3GsnkI_}2u`&0l$TV3#2mkDno)yLe-<9j_h9JZIZg
z3wFQEqEp+upziWhbJgfOB2>Btjb?ryuN*V${Jzn8x7B0C%`#AT>;V;#ZOFT$j>7J(
zp{nn-RI%%=u&c|59gn7xYes)`I@XQ6=X?*d{o7(v#aoEDide4y0=%mv=pzj#MeZWG
z*ribNdi9+!gbx;BtT_jk8pVULtVb<<EV^ZmrrIrPe82QJNV+{3tLqn%g)=JZO-|Ip
zJm+$yNhn6A%4ty*;-J?eY|2}N_LpYho_qBWswx0|2XC}UufZ@h7&OgUl4)s-l-+5)
zh&;f4ltCZi6?P_fW;w(M6+->%MXY1Jg_N}&DZI~8vcEWhs{X2E#!!mv&^Q;v-nc;I
zdY+kBpX9FH$MS(7{__SI?zQq~vv!_TcJClRyWHJm$Ft<DiKt)V!<{5cM1kg^NNKHr
z*tgEim)b9-OwVGUe6@FN*L7GivpZ(y`BB*PA>yU09_wFkky~b^VBs5kdcJ=m?Wx}h
zh8i=ZIouRF!w+EiW-)1wb_3mu!4%TMO!kszpg43-Sdzbxa;6EPo|_LYPiLW$pCMCd
z0PY&lmaKC1lx$gvs@r$Dziphk;0qrS`OXSU2S<`l<iPc#td}-Mh~W?Q7++Hlj{<UG
z__%0%zKVMcN6*5l(bdpoc?ZVlQ>m$9o2+~FRv1@|B)yU|cemz~#+KQ$=Kn-hRVe$<
zhG6>b&rl=>VAbir*$b8-{J-Tn@)a*qcbP^{a2f{gSF<PLN2p#tnM79|_g~IKZ<~#D
zrJF0}jG2bjJzO#Di_WC88zIfMnuVvFx?x>hqnJI4eRZzKID_*)$ud4z4xP`ug1>eO
z@2gM1|3(jJDdPQnpDaf`?}DnMZKMnRmyvH4-`nJFSpU97va~6M;5qF`A9NFRQD?Qu
zZ^uyl=~fZrWG5VxnS*tIsCkUmor3#blav2g#tg_#=s11_mWMtN5eH_`;>E)#Z+WFC
ze3C-`Nhr!5t;WFFW}!dbT}o=dT?FoUA<TYjG2sqp1YB>+Q777v>R^-{=Co3{ZVjNS
z_eJ1f+aBFZov>#<^NzZ8K%3I}WV5gXLSzrrOH1JFib>QIR3MVFmtmf6FH|0SC*F5&
z5{Z=tv@Gk$Y#82`;txYWXFqhkc?TAUvB%1~3taH;h~*yK{oSt}jENqG$|UBL2Ye^2
zq9)S&eTPKVe2Zkc{j+R3zCn&&_!u;XZzNZ^FPi0I(6aWM6+cg;3?UKoX(ed($AGo#
z4ROja5B2LtL!bQqs5t&wcD~3=+nzjIi8C`F*PW`He-!?`HVNgcLNJ|S9+vlCFn)9g
z)GgCW%020FuyH+AZDy{HPge}BG>YJoahO(AE|Ld2ipBS1NnEfZ!`_{8eWr(eC(MOf
zrg&2I2EM0I8F+UHAv^X~X`<5Qr1}`<b<V+N|LxFo!5B<=v{zJ>hLAFPkMt_R8w<mt
zsddXn2=2l<V(v&<H{K7GZvC-&68lF@^TeqQ9^?~ikKGdeXm+0^`2KPN^Ljstn6KGm
z`^lfOdq%?WV@Z^AqgEt+dmpyiPe7CZO{tbM!iLwKgzaknPG`)g`mTcYUoY{#IfC?_
zf5<`a641N657suYM!4LUn(pPw#&COSeBCmNoEL)?_tw)o&IMN){*Ze{FTvot?U43M
z6dla-$DpufXqOd)l?mHK;6V0N_u6I-i{A$IJG;sL-X1&?=brg%F>*cpEF~`|sCt?q
zOiQo8z8cmfx>XAAzl@@;AxBh9XcY(ko=i2&Sgx!u63KfzQR&ULxX(SB;+Yv(G5A|B
zzgvr$laGsnJO|dDNm$&1(P!0EvSU5u;EdH2G<7uhdT8YOK!4EH%>jKoX4Lk3t&R6u
zMY=W9K>u(#Z9W=6infmC;UE3c&|;Dl<HkxJ)r}&xsy$i4Jg{oRmy#~2FV;8TmELMO
zGdsFF7R>A_)b0NVA?rJk;jJ&Fmu=wu^tb=t#Ui(;dXdWcJFvbplJ0FBjRlvVffw-H
zNHT%`_$<k-ZW3ko+$*v!t`$nDJroX3K(`&il)0x4tfQ59%HI`>*=wvj_fF_KU(%|^
zjRV*30n>f8RQRkv+4U`f%;48hWqAs|XWh_hl^-^2-whT2TYx!7Ct`H`NQ&ZIRqNaY
zXv%Ejt^|KFo%0gKPm@H^MmGvw=}Ka0mbkOmhTa!Fho-@M<)p-aL~$o4Y#seh*k56;
z<K{f!{zn|G9yS)=e|8#NkNBd)rd$zttRs8Nhd_1g5VH7hg5pD3(wQ^Nt>vCrzxz0-
zQ{Hn=(<b2%PzB0OoD+BcPIkCvgo5ra!d}^xjEn4<T|S#~IV0Lo%--^ebJ5MG9a*pR
zr;p)|l&WHV_JS2O<uM!Fs~2QnFNK$5BB{7&Id+TcMfHBeWyAEf&|;j1!GGw%I$eX7
z?JI<?{*t!f9`mV&9)&QgUXU}W1&jj*QB9{fOn9!q#lME2>(TbKF2$NEPoz?MiY<B+
zzXcOXV5m4F8M-ZpF##K?{?;yG9d<w{U>}!D^4;cfTXBMC{>IY^@gPo#x+7;Kk3s36
z@Y^Q6>KDmgb>@X!l0|bKdzFJVX!r1nP_JGfY6?Bjam*a<7nw*3@0HrLmM_5+yj_^W
z7Ri?LYI;9-Af*;{VBbFbpmd$E^@nyMJCk`HLnp}@PAjQm@(i(K*nAY~TGsTYYm=@#
z6!k4tqH4`p3~o0Elb;O%{ef#RUh75g^--9yv<-73T+FJzY6#B1A=jT<f!0aGNgLUf
zE}YWg4%6r4SiO+))90b>xxp0h8)wZ6kBGDC$!KW*Mk*M20<zB)i;SST6m-*>5?K4u
zw>*a`yTek{7Ks$K9%mHncQs~yG<Qo_ONI((ihlc0BuA~}43-+AIscMA%9~19cUMi>
z&)j7PR4yIMdRre+WQ?S@ReiY=y$jl$(?FlKL&$LKldSaN&;HwR4BMp@{<)qMKbg75
zAc1W_1ntWkK+vWPTJQIw;rCOpy3aByx;&Q74s*drSNDp=uKaU%F$zmtD+rimpxN1N
zG4a+!RJS=U4t8sgPuNeFoPGvOJAMF7(+A1A|131#4I@kRZgaqVKG)~J6uPlNa?5PK
zLvL&`n`+e*mB>2yCC(+h)r_mjn2WR7&l|DO9_xC)ggpbE2#eJNAwttd#s7CSWv|1m
zHrxYr(iT$AjpEMmUgX$bP01bx5pmj&%3U}sez_klelUv~OPFzY`nFJ*4?_VRgeUd0
zF|d3khOHg}ny-$@d)};~815y@W?xjj908`ugJe&|NK`Kw&Wy9q$kN#wGX~8?V^A+K
zyE&DVpOK`xtODPi<H;zF$YpFr%ZiR9eZehgs6NNM&b^|sgjpOx$7R>c^GM_7#k|Zk
z+0gX@q+yOIPrVBbhyAHCd=nUdp3mJ>dqwc}>yUn{D~7jc|8>c)+W1bvRKAQEeg6%_
z>{H&*bey}xJIz6VuX~U->WF9>nJH`HV$nD@SX*z^g|m!0G<EAID^>!Oocc#x`6`x@
zdR-QqpLU_MyW5lT^H_6>(H;vHL_yQubCPxRP|6s;j+ty8)H))9KAzR^`Nl!m-Oh%v
zfgiz6+K9EKocT#yjxYIFMFH>Afq2IFDN$C|v+Ll~QR(_h)->+^4-}hbnQiAVL)Xn6
z6&*d~iXENt<Njp4er*6owH-$>eeFQK#Dm;@xc8y|CvdXfCu07n6bBcm&|WbCvwrI!
z$~LV-?}ircD)>t7X%0f=`ZRE#G>dr!-e7%f6~C)?I6QYgnU4P@a;)Om7bJm+`%8Q|
zA6^!G1tLlYlIeMO$@1ob2)g1;&qK%2O=mldIqwE3Q`%r_{&NVnRLFTB?!hDLLI|GC
zI_Qn3qKxlaHp!`^SZ)NTf+=+UQX*zeDS<wI{><Rp4w{uS#XWcS`F+t5wK8+wBdkQ)
zR?ez5zxPfbHh^lSa3T7)!JG&E@j|H&s?M&Is*CzyDR*L|&GIF^#|dy;_?fV*-!Bdx
zc0dOM&r04L(iRlyao>q9<b5F@xZOttHx8mKdq3R4yfNjLYoO`7S$Ho$BUB%zVkka_
zh=^?>#%Czlz!4Gn8|yau^HNdsVzkYd&@?m_FID)XD6bRNbsMm#?hNQltuXAjyCN_<
zoYX&Y*XlrTOi$|#0jy2zF!v$l_L1D%)s6IzFA4vO7E$q~zfksdAoag%!R}wPD6>B=
z&N_}HhaN&C{nid^hC9%e7h&XB#xvtbcg@D!PLfUK092nn1vNZF(NFS}MfdH{qU}hg
zM_Rr!xYCsfCDuoeC+n15ka}t%DOS&=g0dmxw2kjE{S&Eig$05(v}KlHSN5sQp-{Ws
z&~)BL_*$i6=AtsF$loNQf5;ZEhVUKY(C3)TSs%quJ<K_V?LsqeHYWS+gqZW)NLl@b
zY$);&_RDot<q`<WemlW*%m<?Qdu#gL5lr7r;ZETx<niN5y!SYW6q1wKHLn4b544!Q
z+6Sv2a-TBypl453Ld?BDXzVx$%a}9oVi$uCj0?%NoWIvm2DJHeHI<!Ti5J=hF>}8s
zs*@7HVR#1n1<wno!JnYG^1i4(nhpA{%S7zBaro#{I9dmK<5r6sInKJu`qWpVrDgy{
zv{Pbs+&O4UkHN(6$5X=f!5FqMj#)mdN&R>=InSR)2i~=%bcaZ=>-(?BTg2y)`R=6C
zHwoS8-g3?NvrxaLpVYW;IH`wRhYIFx>Q@BegY%QA`QX2z>JVojQ-9VfG9Jr*W%ktc
zoI8#7B~kGiJ190T6ZsDY<B2f9MpvFk_0pkoAJ67$-bxNz4XEhskF^s=iLyg0pse9<
zXl$qw@xSFm<Wt1FIX0ACu}~PjUP^gin84)|_YD5NTC^neCci0TG3JLRXsG#5B&j0l
zfWaH{PMw0J(pA)a-HD1v@H6=`l3C%ana>_dHkPSa^xgO1d^H^N2M@v>Aw4L*xf_~t
zv`{g8A;uh;i_X$qD*E%WfF-`DHCv;@!&4&hfS!!A?4&TWGbr36Wo1GMsIsgjtEj1T
zYUVJqTRMm=$A(Joo%r1N<Xd4gw>K%;c`@5*CXET2f#r*QXk9jIOrK&zU7O)_DNcdj
z(Kkfc<T6n;+J^R7`jdabY|{U9K~k#LP_?NG#<mV4%`b(ZIethseA+HGhBbpjO#x_z
zG=on8cZdvTy;C>e{75ZB){jdm^x%H5>^7j^2L*YT4i(uSmJ6*klT@vJ>E-DKSS9>L
z^6F~gps0n2GBsIRYvF=(8xp^~fb33_!L#;rdZ`ScV&yRrw1fE)oYM_1Xaa?E<G7-A
zZ@|%#D|Y#;!I*c0gu?EQx8|1+s4C={PQnJv6~1`$`B>aFW+@rdp3CK7Ja1Vx0J9Vi
zq(}M^VfiUc)(@NmQDrkw*>ySET6f2Yj1R*7t_`-IkU$0J6Y%VsS$H$U8pHVgiBN76
zjT>iRUf$OtG={aGpC3V4bvQV-U5Qa${75N9ko_EMQcoC3RsK10+E6!)WxkpBfmR4T
z{T`yXKY*$hFXnQ_qTj4JRC<3ZwYFP8+1@uqd0ua{-g+ILtJ>qf)V`$dcL0*Dy9#^u
zt2Vq!rJNjhbh~In>d|vh4J#mU(dQJZ-$1$7df<+QYR)?I^HMwpbAq=+X!Zs0@5KM-
zt!*eT;v@6<22pGHVdl~WQb<+^x$m%|%F->uzp4(-{!d303I&yAw1KTot1$F2&;Ayi
zk%WPHaT-55+fh$dS#j*U-zn;%7ch(IwWK?lFJj6}ptvC+bXmdo^;L9Yg&Iv?E@3YC
z9J$`cQ#Sf#m~D6Pdc2nNY2O*Z@lXJJEJur+MqBP^dni@*-V8=<G-Oy!A+N?6=+FCK
znOhCBTG_Af>r73)ZG|PZ5;BMHhL`^>LSOY(DEJ`2*O~boFQ(#^J2o`_>re{o?TyM&
zJEcneAkwreQPs4A8FgOf>)RwsS5Fnjq`i{XB@~^0(PPk$UC?SG&#{X73gwcwl5+jO
z;PLH22#E=y@^>#qea$%$R$eaBx@;Cj2|-l(^9j+|=aa~M*p0$Ag^~BjNbuz~cGvw>
zvh>hE=A1K7bB5<1P2DlqD*|J0sL|g>gVDwgsQ(!xMeH0o=-E0{JzNj<rA=V^>$04;
zW&jl@`--sAccS4w^Yy16h6ibJ)U?%J6rBB4oLIx%VWDkAmBUxE>A?l&Mtv}A+I%ZT
z{@sV(Csm5~mJ*>}^B=Tc;&V=?0P+509O@G`$-&<n;N{`ZP(379yl?$h?CCiW1J`~n
z>P;Qd9J3JP+dhN(qS?aTc`%Ce2ZZZS^U*sskojFsSU-H6P|fTu8@k;U4^C{vtc*EW
zp3s*ytz?WZ`G$LSCSdWTb0T>1M`&%^1g*b^L50&qdN5-G>HqvrF0S_iA@`(@F&ogU
zC5$})dMq90h{gTBkbO5Wk7AZ9Wc?Q*@=q`~cFuXQxm+rmk1oP)M`D;0$Iv8?N8t2L
zwJ^MBlC#~kpx@q0bX)C4hQ4>9NFw2sdk7R49*Am(2Jk;@hF$N+W9FL4XuXfU-~%qn
zhRL?fX*?sM%fE&_xjggPFpAXK66n*~$xizaoA&mmG%GJuW&RBg1+PJ|__bECd<F*F
z{4Hr7MPOtD`$yxLZT{VQx)ba`ySNXj(Y;2fzgR?y8U6?oXX$=mzs$_DLOG3jD0^2*
zHrEz{>ECx!U_3#w%K-8&iNtl!;?R9dDrLJdzoX)osK1aPIi1)ovb!{M7Ci)0QW_w-
znf*e#cuMce-XpmU%pTjD{5vIyM(b2ee}4^3*3+nIzAf56>y7niGDLQdzRW(41&dc#
zS@|FnjEOzuEVnyQu{fB#^E6QNr#1IY?*v`ORr!7Av!F^CA*-yEa`_fF?!j7$#-C#)
zRZcfK)Nuxt?fD2Line&Ltsfb0O%RR~>P4R;E~slvksF)OKyk%K$W}#D#*WVP<ascu
zkMi@=eI<Etf0<|aAbRrSOiGRMq_EO1BInIUoZT`C?PlniXVeR`v>Qb8fKO1nw^hV@
zx1rb&JMLP|Dl?WXriPKp=<;JPYF>W?OqX+|f=|q}IE7UIt%IZ<Y7goId<PuoMYY@Y
zqSAa7(#w{?qm*EpUG7LFQ_e#U3p2I15!CCMK~gZ#tezJm)bliu`*jd0W354RJ4;g5
zCZPJNPPkkeN=jEd^xkO@O_eJ!VYC{@SNPM-0Xi}+y(qiQi6PzJQL^LC5YfJs8DNv!
z05d4T@kxzH9>D&9E9tUw#0A;FzJf5@P2k6#2u=SOd@^YnYF=!EzzL^C^~OTT`|nFQ
z^>ijyG7m3ftQ&e)9v9Y%G!ZCwLVfJ#^2fj>C}1j8_-$j&$pK><IK%MX6&^f^##YDG
zw4*)=3!itV%B;I0Z8_hUzsWbpq}PCZgBKRuyAHawE9k&)%gFw;nzIkTn<MJ0gk=pg
zq5Y;%O3f+g6R`}N4zj;!;MQ@k3iRY~l(S-=5}<7OYoR#P4kC6P6-BEvKzAhw{6e`O
zcR-bhnP{MzM{AgiJ;I!Q%^)h*_r(HjsZe;#Ha{|QFUkH8tlium7dwZd>DOgc-uMMH
zJzI+k-&CPaKP&2o2Z8SD9LcJ4dyGHyTIl;85~@G=bLzW_8SSNVa8|moIWSsS7xY3G
z=2I*VOChhF`QUhkvwADPl}iV9#^m|iIE&U^&W`k@qSQ@7|M@4$_=me$zvY4ycBMCL
zp3L*mwG*jg6!Vd~ju2PQ^dh5+kBIVJ#dp-{lzm_T79H`R@!v0^;om!8P5Y7fYB^GM
zDfcKgdrL}l6{whfW%<_`G>t=WcEVyzna8XvzMJ-(H<%v%u@YSl_Qs-~?76a}faNrE
zA+LQ-rs~O<kr6=t_eMg+%eIs}JqWXVj1hfS24L;*Ng_McAFhmBL8^VbCEfN$VTJ`1
z^0Ggsa8H!&&uT1QxmDH|=&))18cM#_31ZkUp*%2?`n>9i4z1fon#K*In3Iw{t~b2e
zyBzZdABU7yTe4i>&u?fcn3e|%^T#Mss4C>(R!_(&NF`0@WstjEg~eYE!tkX7G0*9L
z;=pt@)!VNTMegrG(Pj&%n#W*uwgI;mOhEOKk)SxYP<yF??{KawpvEf>>)q@`Re234
z?5CoOZ-3ImC(-ca0MzVRhyEQ?P`P(48C~~kEp!r6rs^^IWP~tIeIcnHE5PHoRhW5Z
zF%`+%gz?cxv%cd(@Yumx)K4*__$*njAGjHIUG9a7pYEK=--%>>AyYi6+yk{+;=%Jf
z)|_|!BdXqh53W;o3zdF^q`WADjjcZwb*O{d(cM8YjC0=2dW^ZMLQ~&dp$m=^rsCsb
z$CQaw^v)d}dz};Bmp>P}_2-~<?HQr@(pM;KreWfPQIz+-7_Kx1vX-=%nihtO;NYp4
z>>3KL?~lUGo(ig2zmD>5+<@Bs?A8BwlpItTjXs6!WgXjI_D>!yGHTai-P>ZAy=E+#
zeqA6OGAEK!o&=4!5yS7VqLQgm6mz&E^>kW>H)EEOWneT}GVJ82>Nd<XoR6kAM<7OB
zBSO>K(e(nPM<IOP+%y&abzRBM`Y)mC<%zy0cYxuh20jLi!`5p<Mb=#QoSf@KRkb`<
zt=wmJy?PB|GP&or`&jh2$k{|F6%PuzGvoeoVd<Quo!xyX6<^y9Ilb4j#~@wkZqI}9
z8V@X7IFH_svL$8H&!VbqB%QbziH?^)fo0Y<0i(Z!ieAhUWL{<Zi7}x2+pJAL{aT#u
z8%%DY{sa@^DD#Jx+?&;SCjWb$72PfsF^_}6&Uym5{Tq&_KG@Tvvd$FxG?^Ti@jh=i
zoP3fvyI{U3QU|W2JI))?se;$6pe^7!H4Za|MPr%G25ek110V35#yi4|`>qoq>`XK>
zv7P9^*9qix=AtOv#b-|A_d=sQC0G4>OO!m@1ZO9)j=cAX@H)*bh~E#&PDkQVRlG^|
z3~$f91G`1tq=%py)g4;TWC|blm7BW!28wS>wOQBVV7Q|{=dxoU`}s>K8Ow9?nX%}`
zTx(PL3X%L(IGQ$el%Lr6(7`3#t;2i&*10Qre~n__KleNh%>`W;ckgYUN_8zoBD}2=
z@89ik@nc7dY!gA{9r|I)#oeM7`OK$WNM+Tm?Osmeo>doAY&@)W_4)%^UAmIu%OJ9?
zO2zjDe?$Iv_SDd|C)i|dfwFHWk%uGmS)NV@&$4KI8OigRT?k30gQD=SBz*gIM@oIW
zku*P^ly!9t=Hg^W2rBK0F14Rw+2la+UNw;dl9il2uwh=uN)dQUkKxKlOn1x>F7cD8
zUj08X<d%tb%Ld||zFjemnJa}C6VV3-p|W(eI9t*k9scA#qx*6AXh~;$@L5M1Z^P%h
z(;I|c-V0DHF_;%m@<RV#_KB(=O_HwvR8eM`4_Prws7zfeicjv5>T(Lj*7p4=t70KN
zQcS11eyyPSHV01iVI85PS=RmXhivS%Q8paADAMn*7FC7guyo*DGPW5Fs$*+;?VFA<
z$KzSU`dZR|RwOn5z;p1o&KN3f7LLVXBC6ja%AFZOnyW`du;#d^Z^K+C*JDt9CQXzL
zs^ISMEobyM{|7<(jnwq;rU+}Thtgh?(2G4oA)f20VL&Kq{@f~GC{D!dRh=;FU(U{W
zeTFeV^%CaOA(S4G0v?Wki3si{E=f8fTB`W`XRDS9&p1)dvtVrW?~Qj_09}XhvvOmu
zFkH9DY4KOYh1Cl%@^}dOJoH4v!oTH|8h-D$gaT~bB`&pd!TS0^5D|R=P6f=Nq9<+f
z(TW+|Z^pCN9_!fy?1JmAEhoL>9g$#TpsJ(4LdDr6?i%3?4s#Oo&aTA*r|Hb`*gQsO
ze+(MGw58Nh1`1~XLs50LF!FxWts#PP2D<T`JOPrt2V+d4EtKCKjKRu5!h5O}c=@cP
zN7-ws>M8TnX16Dg#k?PmJts6bH)7I|Fp9akN5q^QhS?)oW1hE!(ph6~jeI3yBsHWT
zwS#Uco>WoSLmZrMN1<1?3#EC7RJG-}Y`B6VxQja!{}4ouIc?FfKMEV-?I>YNI9e|L
z1?l&FD6}nqujhI~Q`>dWl-wKiU!@A0S<IxoS0sm}XM=juJxKoP8*%3k7v?bbMawu9
z1kbo25(X{8`o?aUxWkilS^Y?XC&n4p4<*(0awuL}C;P3QN5%8ZvVPe(?iyW-decUs
z|Ns8bLo1=+0%xL^#G>^sf_l3LBGfSp_GB}E=!6p)gI{nDXFqD5dI%~`e1f`?G?6m!
zjqrGM5uO~1BTw6YXnZmmHBYjH?p&)Jw)DJEMk&E@Wp5mlwSje?NVIY4jDeHf5Z3nR
zzKvhx)s=y06Fd!@zTYMKyl969nS+qs)ERtVuy^WhcTzm~OGNR%9UX8OqQms;MIAuZ
z=2qdi%ntXgHi_2Bt3+0>S8@rai>Aa6!kT+u6F-e5&*&JuX*ZavH+~eRcUn?~-<K7q
zuYh9P&NBxF^&`Lbkyte)Q5f%Y#y;v2G_QXFhVV-wSuPe4{u8MAhhxzGaBuS7<_DEu
zdei$Uol(`M0aWAufrhEB;Pur=inlEWUo-31pW9HuqThsWP_|U^=wIQ!Tg5EBhwLjg
zka$IcGZm8I7a@6d<-YQB%nR_j2RUv&oLgEbbSdMhh6a&wmz&mF@mf^u9S4?}3iH`d
z6DakVie{Jb`ak4_D6~l>Q@tLVdL)Ue>)%3I$8aq8UP)Eo>;uKK4(L|df!r#5X-855
z?Rw=;%5evP(|cIIbBBDrC-?E(KPB8uGqB3a1@c1A!Je=DFzj#%b6|VOy2mlnuG?YM
zem8Tp*u$f!W$#{RzQ3wg$(FryCCi_&Qkh{0>0Ex26+b@{K7xC5JJyP-neD)oc0gL3
z=S3#Y>1K89kB$>&h)45Jim>>$nEA8|#=QMQRM*<zseA1(@`v$w&?*^&`&CPh>#qv`
zsY*ETr9Vd9<(bf@LRoR+7kHAfj2s(eNObn2{M*)KlZFIAA*lQ@5$g0#R8>6*BPQ$<
zo96;KN~{O&JSYO=!pLbs9iP$Iukra13d&tVxiy@xw7n^Nt>OFN>VNn=<z#-@i)Y=h
z0;SZz5Ue<<0n?8OQdQgUg=z2^Szn$mHI5Www#y8Pe*7iuSQw9K**!5LWhObEOhBKu
z*62KT5Gm@jwRLTIU7q2}_r<wriZ~$qkLk{w_`|}xa}0)V|4m%kor)Esj)LKqg399p
zP_@FF($_u|o8Q`#<&UjeliC?VUfYvm-%V}A^9=49`b*C4HBFd%>#%NDf!OiK64X!h
zfxwZ!3(M0na*6X6c-b|Hvc(ywJ@GYw=t%}Ef7GATG5;hO9V&95;BOP$>96LlfjyG@
z{Z82U(+c`{y&t)+u;KIW@8STT(XUu0V0|SALtb!x<%+k|x_L4>ebWxt1<b^dg&irZ
zK3)WmyABT4OVEGm0J0>6i;}-5l1;|HLiL3Cxqo_~X~!+e^6G`w;PaK_I((0?deVa|
z>6_(`;azCm>~(mdcpCPM97FoWKgeNxcW+R0=RpTsO#Af-*xLu8{nD}Ins5)oKHL+A
zc_XpN>1R0Q9Eq7l+u+WI5L*1I3$wz%7Kxq1(BqrsbbZQXRCQk{t6#1VeKdoy_=iX;
zZ}>{&v{+Nln0;dQqUEH3KHz(0AZAWmi9HR&@WB!8KbJqFoP{=++~<gJuGqkL(ORu4
zs8A^Go{)_(dg183nMb^t;{Q9^wie9fJtEKBA?TE-FmX@LrW@eg>P9iwyeNJ5f1t$}
zYVLFc)bJKgj0?s)<cUtvn?>eV7liZkzNCDrhr}Jc_H_cY?{a3hTm^hJ_j3y4OtjtI
z56tu)i$-R=oLV%OhL`uEoI5As)rU2Bio4nB4~3)R2=6zMUfg$IAxB2KQ<<?pC0~9i
z_PrZQ7t%M7;r>_=I5d@pJH=tq1_!jf3Ke$T^PO}0Ddfz)0lvJ3gcUs(W$6R(L_<f4
z+QjqU3qJIcvqTrFeMuybB#$U9-TB0v<Nrd*_~M+_^gnAf4(TYz=RSwD5pRT7<t=ey
z=4>iBGal8|VURTQD5MYVA?#Xb^I7w>D7)1ccBnl_k@K^p2)!;sH%*``-;Ko3n`S7v
z<B5)QxAHxB04Q5;h<lSGDf@B?S+%jpQ~NpZ9r{9QSQgA0%pz3((uMjkf5g~hsc2|v
zPa!2ED7fU5WUN`CjZW!HPCX`}T@aA|bAQ>f=VwrF>`eNzzOvIU_8ck#si?gc%epZW
zarp)DV2>?@zUPemvOWNf7GY`rUE9>+BZ|}7h{eI9(UPr^RTmFSXPuCWKDlG#^Cv>_
zB8jq0>?8d)5f%U2r~Me`kM2X_$&__QWX&>&F?+|M{>dS!>isLp)h~jK7ygtr-u1G+
zWU%a>v>v_Qr$fj;fmnR;g4}0UI9W#C5U={A;KAVwsZTm**HU_uW!7=w*5XHofK*Ui
zJ7&If(Tlv_F^{Zq94bG>P<`Hb(s%4D@ACJ<rqmbkwm0W?e<SWZ{X%}UsTOqOs^RQ#
z)(YnQ0?pG_VQc0(D2i@^7~c|RZS<6N?1gl(TZ@)6wp6w)h-yts*u%rjs|EX|#jD!T
z_04gpx@ML&!5b+hAxFGF1Q_YD0C$}Ur`%(#drh@M&w>$XT;nELALU-O#01vX=irzN
z?1^`5k&?e*Pr@Q4GaVjlV-8*dwP`jvm7j(ZK7*&lGDGNB?yXPzUAV85(72*2Hoq(5
znc*{0<vxY^3H`Oj2g8{kG(f2RGN2-NnQ-~h7Tq6MqmQyL>D%1~x6KatHmD0epXY~L
zxvL{@drxw-1XFX81C{5#hPrM%yT4@z&CeFn;;KN*?voCVhr59Hha=E@c|2|&&g|I1
zNf>zTF)XfgLyLQ(P&hcANxT(+!q%Cxl&sPA+a+`Z&j`zuU7$`o#H<i!;qQJ+Se_q{
zi^Ht2fjy~)uhJ#|-@Qff<-u6Es4vc5l*$@JGUfGZi}l+orP8Y#&@x{keLOm!lGm(&
zkf9^VCX>AgPb$H9@u7Lo_CAz4XA)Ih;I5}~_A-wXa`W=r;NNjR^VFR<6Eg;^{Z0dP
zX-{rvJ;}6ttI*tOl=XA>Nh<FI@cxhWgJzPv<|y#((2i90TZ7Q#M50}Av2cBEg_ZXG
zQ1gceMowshuE$@4F8wWgg`-hf)(-ku^Bj0$Av7ryu>Ep3s^Cm|+OiKKW!w~0XeMJ~
z<75iW_#ed1XI9vc<&c!$3;h#j3)lU;Z*{vRr;N^kB<*8JS+|CHz~NAt#jKyM%qpFK
zT-e-hfXp>vR1%gW(t2Hm2CMHOEb5g=T@i_q9hk#(>Ie3ueJ%1&&qVb-TU49=$LEOM
zvL*IcZFA}_Xm&jZHJjE_`J#N`?4_lwLEM3RY6za1)Q|G(S=-*TNdz)e)v<dL=$<!#
z{db92TFDvK*tYE7yC}jo52p(E?}Yg_cLHzD1J@-gto&^zYo24FLXLv`<4ehAK^rK!
zTnmO3bFq9DpDVFi*jxywMBBckm>wjRwHrboBea~yW@c9pB-2C#s1x=J!^z)7#H5{&
zo^z9X9tX(EYb()X)_PQ>+!FSi<54G1#W5!rq8RT=?dK;@#Xf6DxtIgdxxa|FLp&+(
z?iFy}F_Cm3M`hO`++XaRgf?^ALt`_aBVwk@wQmYUcFle;<bNxk7cR$GBWndyMnU7#
z0=Qzgh8|Q)<hdaLbC=C#MqUqcz4r?^uW%%t=^NR*%nIHQuM}l_CQ^fkBZh8{!OXGS
z#rwEyFvNe7RR^~~(N``M9mh<{b7m3zW({Z_4oCRCJKgyy7WahS1I=9aio{J41$J$*
zxV$HK{XBuJJ>NlD*8p<Z&H8kxm#k@*AqQnU(}_zwKgenlCmynY_w!~sT{Rg}jt#(!
zb$zkoundLz>68-m3{3otIhI})1=fjBUAGe)qbD(6N{v^}u&?J<Z}B#L0w!MQ!JP;d
zQt+`8l85@FC_esB+B#?*<&4-50W&{i{#p}U?=uQ7&5R?xYb1oV%tfoKk)(>^*~;ES
zkm9Drg4LTq^{0*GTV;)DyE||{v76-HKA298m`sbKb?Dk(Pab2tp!eXuSnQN5*L>ka
zr#8;Q9TirzIjI|2M1ka{oP_0x7Ew8HD(!k5hS?2am>gs)uGp$EB&$CbB!3BYJO9T%
z)HaxXYdvMRmmo{NEv){ViI%G~z)pQhXg@_`QE?P0kJpNdR?b~NnL{z3O~J-h)6wF%
zNKz)3N)Aa2DPiMao=MN6>}z~yYa1!f&L4o5xBt@yJX%b9zO<s4OG`2Iet-IyJe$(L
zcE%Ds4>uo$Vgvh{&U(9H19x=!8rYL2I%4o3&VmIkqoz5{;?c!YQPw?QlocNob!%g}
zCnsO1wlU{@z8x0%ehVJihoI!>&+HvkVExf@*pudpHCN*C%BNV$`f)yO9XpDeysRO6
z*pFaoc`LimS7U4oze8=h<KjUfsOhi(s%HKJ<zbazx61~z(wE@61O){i&k-pO{-{ZL
zEV9#G$)vjqnu8sAZOsvpzXX!OKL+Z*?@HMZFNwg9YO3SDw94UkA<4Q8y=r7$aflO!
z+WyV&3+ug|A4AyCso;7)hAz<(w7WBjVvL8t&^8QGu1+V-mbMf)|2gDZ@!VwS9Z-eL
zVE$(?HvP<g`tLg9o_{ihY0VMP_uVeCOihyYo=mvD*@a4vb|%%NPz+=by#M=`LUF4I
z{4G5w*n>6v^-0jPZXjhVOCi@)gDG=bVC%Rz_5*lQjx&4JSQj!4D*`uj6#6||feNQ4
zt&RUa_;|sF8t-lv*}*)g(KKkGO$)f@M`8Y43E%$f%}fwxOD&lUx+kyA4f(t#9(XPK
ze7yqGr+fl~r-YV*OXlQWXGLkc6FKc4MIXOrK1Rp$qWJ6~c$Gd7)s^2v#N4Ogm*Rkd
zZ{M(w?vk*0<Y-Std*fDSKIES1fbr)abC<6>B{ugbn}2>6RVQ!B>}C}Od$XCD5r>Z9
z%>TnB;PmQ((9in{RxenBg<m;g^qxu);dWOf|Fa01fms|^qtWn6h_ug2z{_Y6k6t^W
zu{J;(OkIUlhxKG*vPUsF1jF3ovG~P#$mrMs#~W6n?M^l7zHWyHR@+kR5zg27UluL=
zth_yDNA)YWO4&bqaeq@7-fNBId;SJ2{*2ekAC|Cxr!xj;nWfBLBk<9$yG3yPVL2x(
zT{Q0fD4Oo{rusfJs9{|XO7D1%uPvM5fU7t6D>6H8ONlTBUIBJ|2>pMYcbULlgI8x^
zj?H9rEOH<_<v;LzdJl5`X9jB4%Tnnv*3YmrI&HM2=jMrMdfNtKV*@GTm;R)hJCrnA
z56EM#vS00(AJkiR$iX|UNf&!w8#U2}v-&<}<GVarxAZe1CWYbFBG&z8X+-FNzrncZ
zEORAqcn5EBpvouy<h0HKO_R+~T)#!!+s+Jvnla#WVk{=i^1+1Fy)plJUrf$@4Da8Z
z1Vax!iGVHK$<P1+gGXa+w+<rKBaTvb##3nb5VB%cglDfVSgo}%L-ccD`;QF;O!}N^
zPY)3qn-++_aSR;FUWlG{oP9XuPTA(clo>k`D+)Uco6N}~;`g6Gecl<$H7hB(%^^`e
zS&!>9L8SeC2!^d|FT7KykvYK^i!Hwh;|H~@s$9)^n5CF_(;fH3tf6(e3#oq8BT@h4
zH&MU*gD5{ag>p21i1fjqi>yv>!T#6PWYa5!HIaQ<|2G<Z6ww`%n}314J}*Ua-#v2W
z3kU9FI0-%*`TX3?1-%AshStH)gz;jWxiSBV2<FVarF5q@OF9qNUk{^K4a>>t`Xuc3
zc>rhAa<##W{)O0k2C^s=a$}|nn+p31o!3lAdjDGJ=6xg5zTOF<<SS9IbO2UtabX_#
z0CVNvPUvOl%K2QL;X7#Lu$m@#)M+VRnre+st{I}`7X`WX_n@>5hatl{6qA;BqRbIL
ziGo=VM4;<qq1)_;!HMNi(Q`dky<P%^k2X^Kut`+*<grjbh^EBX>_g_x_y>m;QHV4a
zw+cV1)y&5Xo^#c5M}YZd4^+1CgC~0zp#4lc+VxjIT+BVe-u{l1ho4iHgDe@g+?G6>
zT&Z!;c2T(4kJQZD$Xa<EOkU3NgHEfk-YJFqwBJI`$D@#SG(nX0NCACPAl41CKyfc-
zalao0C;nbU6&0>jwJ=xOHD(;0J<MxfDd$LIr)aC(bm-a5jcPx%3e~jvQf;&mEaz`Y
zX|I#Xd14%jl4KN*{V=U56%|Rh%3N>Er^OFPQI_>cX!vcDNVYHosh3U`$6KIEyGe3N
z{0L$F$8$%@Gw_RzV$RA(p?YVQi?i-X70+8i9r_kD`7fky#?_Q$_a{8y`QAF72Sejs
zacW~Q`cEr{rjuW?2G9iiMs&xPNN;LCn7QJbRb;<*9O>pN#l96jr1+v#c1{n$+^u|1
z7wd)YuRZ2&ua;oCW*}bgl0fmJnCZ7MoK$`t$q*L~dGAKzm52Q(`_&=Qu*!|ZfZZZ+
z^%e0bzdeTbV|^%TDQG-LlIQgPsM|RX^t05MzmnO*bM6WK_B=41lV$($!$k3=M%nk(
zD)M?9N{V}(rD$HuytlT3;!>Ge)f!91QNMs{Z!B}>y-2@%BGm2cg~6UdLX6)5N&jU-
z(fR%CgS`Tt-^Y?_!Yiq6`aO}eSC5sGxeM(J&d#}>k$gSA;;z#a%&S<2#YJ}{MM*F5
zXyYqBgIyK@&JmcktWolwbQXNMPrKw+C>qub#Iiwu!kAw;v$Aw892l__V**`-`)|x#
z<p1iO9er@eZuXRZERodbQ$*7Lz7v;f7gLN&F(h$zLmBAIdbom|xFf{XAq#X()=+Nn
z!}ov9qgSIjw>f7jemt>+Gk*hQ_2Nz#JJgx>D3j=(Lk!u5g<{$izJpz!NI7nsIFmJ1
zYn&9Pbxh0<?HfI*XwW@jIxra&E-LCXmc4{a#$n0Axd?OF*ZA_5@IF6*)U1Uy!X{y}
zeLEPBhD-hvCPTMXqnJrOj{QWFaaT(>vbAxgoVfyu(^q2sh!FycjtK9n5K(?+5L&wA
zh_L1WDEaX!WaKC@;cMow?+zz;@Do&An~9;j-iZ)RUy2xcS{!)O3CEcDU9RC=YWSNo
zm;4uD{`_g^AJG9@11f}f<N;{%>jq(s_l2V3cdf|~Bz61Sp6tU?DTvo4e+Z}ghm*wS
zVrMjsP(k>)u{2xl!+LcnSzfLMo4<J<T+oAFE{nv~-RG0t{v}vl`?u&bV=QNJ0SZ=f
zFYkp`Dbpo|F0h|)M?3Bw-Y}kYqiSW-f2U;So(`0L+@BQX5>>V72KuG*BvnTT%G$JD
zHUxIV)Vrx@8~!=@cX-OVf!P1=%ED%sH1PGEhdH}gdp-OLg4^fIjdwYBbKnLn_FIqf
z5yQ}U$YPEfFo$zR+hl{)Rq089A0>?~f!bTlFIq7f53+B*QF})eF6_%)-&$yGT}&Zy
z*0`B-56|NlVODX4T)ut*TKDH1%Gs+TjXOwN7gPw<H%mp*hYeJGav-_}kEDhgM^LF8
zp-S$9wk!QGY42n3KHZm8)BC`sHQbBfe+rt28Hu)MwBw6nsdWB0^m^k*t%-wCZxbS-
zo~*_TS67hR+Ei41_pi|B?G|=9Yl!>nC@W_wshn0qLX|5GUpShS7Zu{(4Q4WJ8w1)!
zp_nyeCRPoZAl^UU3Yw+Yg_3<;>0i7N(Z1CpJvEwpB4g2h{zA%|u@5{R#-OI<lL&jr
z48jqcMB0bvV%@+#>@)oWEKXO<hHowjr^PDTbufvnFBU`V;eI06cE23j{VynZau?hK
zdNC8$osu0!Q`L=avdJ|b>c%WW)9frc;+6w>xK)b8;q%$Q#ax<w55SW+>;2kO`zldI
zt9#C(VAgLl`)%P|`8V2TJI=R02@yLwPe9$JU`P(_hdbh?;2!SSQv7d|)|3~6HEZIr
zENUV&d8=vjGcD~2v!~YKU4_o&H*;j-I&9+aOkb#y5=xUXIctj8ny0|$-wZ@k;Z8}f
z*d;zLRd5z;9W`#OWR6jp?BeT)1ruh8?6^!3a>oS&lyU6m*dk(mK4b5FXE3T`&5;L~
zS(N4>_6<^V5B;4nQAM_-D`#E9*H_rzN}%CST}Uqtky_8El48J0InZ`6ZKcV~_q+|+
zbGVbdCv&$aS%hNzJjtv7GR!X7EIgqnMtTk=!;p5QY*#16mkdJT)rOoSmSE`Bv!dv?
zp0s)Cy8ol-{NrlAzc_v+2_Z?6<RS?nB<XWbuB0U)Ns@3Sgd|BqxQL~NW+B8Hjn*2m
zM*5sXNDHA6LZkT^3(Xoy_@3|odl>iD=ktD_^Ljm>Xj!nHc0BFN5i9>DgEB-(FYhP0
z{RRpuX~)SAyx7+IFIsrWk16dol>VQ|yQnWEw}{3F>4W7TKTOg0$H*R>lqcy~6k9X_
zpS6s_v~dJ>b+a-4&1#uF2|r>-N7U66sY&+!SUo`)e;19g<b@3<Y5EA)T=vn;e}J0C
zX_Rg?5p?Z`De(y-plrlLsJq&vX5P2ps0<hJChu3zuUm*kdk3I<Ml19&-w!2~&Dr1g
zPc>tnE!Ju4mD;xssM{Ne+ju$V{J0vaYbE3*-=JL{0r=8nG#Y<y&S^9LCGSH)yzsRv
z8t-d@phip9yjsV8H%^1$kn{z;s=#r<N3hr<+_o|6I6ZDZWLm$5e!|KwYU`s`{BFug
zX?vmO(-&d-Ngl~j<F<CwG&H^$j;2$$Lw@cF$Zp?)BKowz@&)D)TOsG3-pkSO_(%7J
zj}$i4YS>x7ilbfXm4Y?Wu?Re?<PL0rlH(J(VonemGIT1e`wwcICx9mQ`RSq_GV?v<
zE}ddKdB#hYZC5mfY+j8w9(Lxyi+jZjQ3aZ1k?!Toqaf?j1~T7P2C2>8fWB`a2MnLh
z#+yD;s(&&as2nF5^GWIuogdrwxeER(-_zdcRaiWFAB0|?iy&`?(#GaoD(|tEKlr0#
z{wjRZ_&c2(w}z|ly-}k6HbPf%gqE#}!g&u&`N_xss9JL5MZLZPzvQ0Mbrm;3p7398
zy(GiN9fc0<R%3*<I4idP0(B*(l)FI3)hQ96k22z@ZauL4&LU`>J($b(Hy0oJ6xDM2
zJ*qo3Soq%YaMG&>Tl~0)D{ma8Ga<qQeSDLu{*2_cyM<rB>Iu1dv|>~9Ry0q(Z_QWb
zBf_Vgf#|tcscL`+Yoe`G)3bB2!Ra<dkDta>e~jhKtd3M(u|qZ8^BzJz4iLXyKfXMD
z3GbLRpG!>~!ARQ$H)9A^dn{thf*>qCodxOUZ=inqKM=gc1Ebo8@yQ30eUZ*yqS+U@
zM~{Z=NpqpjFC3f?t(08UW@T5uVQeY8kDA?Mso}&(_V4ow{EVkyWMmh#_4*?H=2mp{
zkq?IS^W}uoX;7{A#|{0~a`4w*z~#(csI+i}%>H>w=x<l)@R1n4+@rPlF(avR_-rtA
zdqqj-J76tKf7@+3mkhK=n+uXhKdeD2+6ab~&8gD-H>jJ`pl&Ywifu+Yvt8Fne%m-2
zy^kR#_Sy}pQ|6;~$85}e|5E(EEx5SNNjdj*K>O>SobYlYmhSHcnY+JL9YgwK{EwSy
z!wE#o-NLfmdXI{}{s4K8wyX7B0{C9nIry%PAMX7m{I4Igl+bmK?33yP(OddJj7LWv
z*i2l-x5c+GxC_L4i(4qeh#RJcVQ}mSj9&a!iFFb8LE<Lxn7mZnxpuIjO%$8vX(fwW
zCGO~zP;++%Ma1U9jUBSj)6WCvn@zHJ6c5w1H;_Gc1cy()NVl6iaE2FP#H&|OJmm(s
zO`1nea_;lbd83x+G{N5fgRuBW6I8lxk<X<SH0?bFnoVygZpKjFRcV9Kho7iP?}y^$
zz3W)hv&#rm?`3HCR$pl2+JzlsBGFcryLrPGvNYNN;a}3odY=y)yS^e#=<L(MTUK%9
z1tzP#z#-;Nm=q^|iJ$$@EmDW&+oz%9ppIOfq^gl!;yKXD1T|J(r``O_LAym!v;MgP
zy6Nu<4Z~`MDKH!Zr>)>pkDno;<}yU=S&O?)x#BaIkr>`<Cmj4q7}0~TL7DGWDjOS&
zZud?Dd_NOIG)p*pPFKij=gy`lO-U13t%P1(%XKy@!DWCmPv|pF_#c%J8+`(bjKb9F
z{(<BnXP2VQQ?On*v>Hv!@XEohAy_jMBYS@h!NGQ1F-GPTp7IQR?n<d=4Qkb~Ada6t
zA2PLFQNN+H5)dXCo&*2SH|NtTH%H9f@HP16bQW*J2-g2@gH<ntPk3V*`t)nXdq2mp
zuVl#2{~Ez@O#|`9$5w2%>MM3gbV1F#T_ao{Nlty;AdIGe)oRs<3O0%-Hs}r<7~z5g
z7q7(g1AMq{w>9~9t_JgwyP?WP7=OOP|C`tbODv4h*zs>lZI}nO%VIeqHJo;obd=7(
zAozU3go7_P!>qAuSR+}9ycH3gIjz5Pz+@7;1g3(CUvJL7Ebp0wAn@5e1R^>_bIpnq
z6rbAxR|Svb;7{VCbRG^dX1zGOOJ6<^K1b3A5l~e(g>TpUapsV9N^0SID2gm}cl`Sb
zl|9cVzoaViS~G>4Dx)xCwLFu$tU<?CzG%?TQJi@+=Xd)^wLUxHUYkDbGS8CDV{a&?
zw}xT7a|~oCZMo4&Ve_Cca$eK~@63hq{%jap@3VtCvoqxR+8FB^U#XgAH-)cx1T0%!
zrs^$ekS*MudE4el$5V@rhCjikdkFeRxJ&;H&?rg#^>3u3GN3uTT`;E9LG^0>rfz)j
zjj80X(jmWnCobzEJF!+8dJ<SKIpgK13(AK0p^MotTBk$|bj3<-9=(&F?aG(YsQ=kc
z`s2>Rh54Qyb%^9JuK+Iibs(g>&xEWQWt7?L3AkDG1>FyQIMQ<=S0^`8_=Ya*Rr!A(
zjBs6l4&@u)uHX%x(%}jbN1PF`=bIkv_xTZ=dEtcSkyXm7Fds~^YQrb;eA)28glaoy
zQ<FF<g1$I$s>e0Oy6r-?HgAqallHpDPw5M^coBx4Z=!;P=fb@;xNC3y0%6N0V^;ge
zkoH?wP845<E=X9r0l&GMyHDl^qX)2mvmX?#8p*GZOEa#q3m5!6oV`Y+L%Q!03SaDq
z1LfWk*D(kaCyR4p-y(b~K8c1v6{@-(rFLspU^;wH(Iv~U=JPsw{d;@N^t-86-uC8<
z#HVmz;Y1GaTSkeM*6gNs#EOg>i0<x0<^NSF@l$jhu`Pw77Hhe<S1&XV1~}Nw4IS<O
zrnD3Na9gj*>=ql(RX_BX=W?o=cJw9aUC+V0h~}7isH@_<rBZoomB8`iE!pnpF2Z@*
zp|}rjg^3_8_TE7l_+Jm)p`XW%t)y%HhZTA*nay?GzEQl=V>njM%)v#SKx2AZ)x9r<
z{89fxZPam+@g>Lkx8T~CdU6>ukM);p=x8^8ZWr5J=3o=KUo*M;`@Aa5XkAPTmjp?6
z(hahs)4}4q0QOq{9b^w(#|?6ZkIw4PdtZ0ry6oj#ZkQ-J<EtZ*!zOWdaEzR}8kIPk
zad=J6?AHc$=ZLeHC~Lbf>d%Q=V$>tyu;nOD2XaZb!a+@)`dszg6C}^4tird$tZ`dm
zjBsTxfNz31r=4TjuL6diIteF}BRSi08X0UxL;d_DNC_D)u4>`O(ijX2oX?Imi!i-k
zIEKu0VCU!8A+BU9+U`3?y12cHNiWH#1`NRDV(AHNmVW!>m(*{)o}KwRkZ{#%y@$zn
zIhl1QbIB0>z<tOg*;NO1A;a%0I5j$z%nM{s@~tyx?e@aZCSjajumzXCKKP*XEX;PO
zfv}!BhC8h=;U~$ePEJ?+MqZ>OV;9!79LOdkT44Hw!(jEDALnh!Q?%zDz(sPlH#UZ0
z=BT#n=H7GAX4^bG-(wPHy0~HLPfuay<L0b2k~xvzP$i~KJFL5=la7g&4WHV%KbzkY
z&2MQr?W?J>m(HQ+O}k{Ty&Mudzo+1BF61=Ghz;M2Q!<bLrusPBvGc=XwcO~K8Ym8G
zBY8hx^OL*&Z@1N&={}gae=^%ClE<oxb*rxVN7bzQsHBw|!1T)`G;IAwsV^-g(|_ln
zW=2mX^tv^NLK9iM?S&^|eK7aUSd9335t@4ZOeHg;(EMg!usS;(YZtwQs-J#_c>C>C
zkh7ZZEndWtq5XyT^dBVjU4s#@j(U#Ja8tGHPM%I=8@-M*+t{IYov)HtP^JXl@<QV)
z2PnSZeRAu0p5p7@QLfuoa`f?G-x?DP+YpS_i$2i7o8tWNT*oCxn_=EHaT0u3BaY2h
z>V|Anwmat|S(h2)x6WEJ+;2vh51$MXBL_%!>m0-$yGhme+o|Q}WcJnj4Fq57f|iTV
zlbclwa^BMxQ@8gapOA%QtUFJBkN%`|?`~v1b{c7Zii3V~f5<y#1bNH<P`w|yq1`tg
z(sdq)Ih7Mo-{Tjs9(9#!e3wB{NGpEiJc-ROJyj1rUCzG8CbR9?uc<n~Rkil*$C-`o
zC}oI_qn~z$!^5SQ^*IZkq#cLgB7c~9HX5BgXRza1FFLWu2+h<dkY_Xsw8?R5@bE@(
zi`)bTeD0o>`5o=3Y{6A`-;-t9R=QE%6>XPGem%xi91q6wE~{0q4ff}(5*OKB_@Son
zm_qAP@jgdL7h(DaWo<(ombMs2mHifi^PVT-*d5N!8?7+mjuT$~tQTM3&tzG3NwR=?
zY&cN_Hn)s9wR652S(8p0=djb7fZx@~v5Pse<S7NVU%;Vn&q2-UX<Xv2piAp3l;U!q
zEO&f?RG**JC;lO9by&XR1ykAY(?V=Wss{IGerWSkEEes0q3Sf>f@9hmDv$YD+ye0w
zU>MJi_kR<1qyxkl>oLr475b0v4h<$Lkace;CT;Hwj=TSWAy>n2;mjaTJ^vA$o_`0H
zKLug&fW5GJoi}U2NuBp#Ef?(F4{oPz$UfjY6hF2==aR8h6ek&w{l~~6WDI9&g>}++
zNYN}lqdM82r^t#xP6?aAnbyCnY0JVewqg}u{>B8&%MGgeSsghp4Zt1y7IDadUT7=(
zc1?z#lG&;`XU2X~%<r$mgP)e5_xbTyG$_m6@q8p1`h=@zB)1i@GnxW~q2stn_9IEF
zNb9&si5T9F8lFFe%(Q6Xsm{YETaH2YgkLE6PjM1fj$pYTa^{vbYS6m~u545&{tti1
zZ?%fe|2U;QyFL{Smb(kxPI#mDpe|VV%|di*mJHWU`18x=Yq00C@n}EvD)@G7jn1dK
za#qGCGMVO#X<Mqmr=tP%ol6zlZ!gl@_ak}X#8H^P>Kd8GOYZkpcMLx%{Vu;5a)&HX
zJ!2#<(Rk2ZTf1I$+!PJD<&v9Sb^)qSorb{3Z%9)xLeaHf?;aJ~5d(f(gNEOF(SbA5
z@pgef-xw-=pI)!1WL9tSX}yK)f8!*pvz4?py)ja{Jvj$V*;Fg%W<PJ3=OR4Eb2Or~
z-+f4b&>3`3w=46`iSKe&DwsEp=c1w473bg5$Wb?gwQGx&I^T0@jn98zw%!3FPDhi=
z$3__F-U`Eiml<*5_z|0DjKcbqa&of2OL?D%((ScTXd3Pb@wb4TJRC5SQWe7?W7X!P
zImX`^#l`PGzy|4CxP1yG`}{lNFz$f?=fq19Gk}Y$U3k|Ne>5!fP-5a1;@Y(>@v`kW
zye(&gXz*d@+pRfzt`ix%k3!Rpmr3t(5E7^K!fNk8;dhr%b=$Ai_+8)8%e}#vy8J9P
zEX{x+a$YOgJ_Q|qS;g;6I%3wo(L8X^EUtL83nny%Vtv+9E|I%xl6fWsRLm820zsU~
z0Ia$)SGZQEA<?@Hy6yd$e6}m>)cpVyO&-E-mZQPnXCV7+iDvt-2n=nQ!x`@rWNs&n
zV$WA(zTugw8!--z|IlGY?*vZk_=56wJtyt-r{r_!6*=tb#6IqSQR&1taL_Od?~V52
zqia|3j;!uny!$cb{&Iyb$3}3&e=Wi7$^mema}W%9r`5oVzrqBwm0XaeW#@J|s`+9!
z`MJ2QI=`Gw4Z}Ur-(~|j-*w^GBc~vB@+vGc>!GHc`iav1ZiUuSFTwamPfYpi3q^bR
zQG>kW1N%&n=g$p#lpvkCd<PC%HI|E?T_Wut{VAgJBuWkS<)n+<$;qgi@<R`j<)1zr
z``{ilg{n}yXb(^aE$(>T0zD0WT%PrTLS9Z_&-=g!#7&a=Q2G%r$Dq=fA>DHp1@xMO
zHOF_8x7G>mG6H$;+7@iMpjGc3jK|77i079_aPbZ~_ogj^D(7U%pEwc^d)Z;-q3uu>
z<;9x^g>$s-wURnaq01|$az=yf4Zi<TDZbHPGCQvd;Xx`z+CG51$|K6NxqfWkzq7bP
zx?#ZiN!XCn7PMaimDICYYRc#jRPk>y4cQmOHOh4K&*_8(qh@3H=K!poF`aK%4#$J1
zI-+|&YkYZnKAQFq;?jRTp<u`^$b7d)d70?I+swpMbSsiAeXoMHS(TFdbPpLzK3O|u
zwrcuW7!}uNVXVm!DD7m64o63@@q1JBvyb7_!3M?Vk%zENFR6K_lhnwT!_mBTIi$~;
z1=j87!L#dLl573Ty|&jI%F2^I;b^&IRRkAmhE}N_v&7wY;794_byM=capROAM@-2%
zOYx_qSKRtE_*C4bc%Rjv{Ut%s1Z_WUXRGCBHZ#!natLZdrWX2K??rxNZ1I`nc)oEr
z5GxN3!N8v)@uYtoJ_~P-@r#Ixru?L=%3jThKKr5mq4dXko+9U(Ix=`YCI92r;wMZ6
z%U=)CfonQW_UwtNE3BY&zI044Eam7E(rG+;liUZ&JGjpYN?p@kcv(}_D2r)qZ*~-R
zoSG@`f(aPx{}o34V}g~F_CR)<iLfid7Eg@s&CwU@!28A`v=o+dSvTooJN!T)adv3C
zX*Jf2>cKuAW9a<%>(FOWsdPT(LssKzG<>lG_rd*8zxcAM8FZVX9L!L^|AumTZ!DKp
z90I=|Z5U2J2WM9;m{xp&y)w($Jj#WwE`(!ixsPko7P9G=Tqv~`Pm7%eMf+wcX-CrN
z%s|OEEc|<f^O!S=x3Pv@?n<$Dj^whm(@+!Bnl=AP&Q$Fx&q~=zycEyy>T94`Ap5af
zbGTyKZ{*!ia+lc=ki2{b8`;<h%hHnbOUy9)<xtd(b!6RWM=Guz&sFc`Jh46yTRZi~
z$Wvz^@|&+XTwLHqVe{z8aw~2aau1Bkgh#PYI^KgM3s9~?olOatt_h%+_5ImU?d$He
zP5LsO7Gv~WEo4sWt7zs;!n%hYNq1-+#qDf|hIIvMrI`i`Mqi-Ri(X*$hcDYVo}-8j
zgRy#eOPDEVy`*k>s`;ij*!1egrLUS}?ur^X?7IwY+jo~fYG<_Ff1fJ9`9@qp$0<6Z
zD_OmDXG3~1<$Q6&a2FeFbm+vo@>b~Q6O0RAwZVgzR%7<w2^?|Mm#Rm62F;H;bbNIh
ziV80ZpTqz<C6FEFt`s)2o+CDRKvCI1)b42qPDdrjH_Z`(qI<H%qCw(LnSm~j12NB`
zCt2P>%)T`o>NA9?+xVw?`B+z(12IK^zO1YoJ{*f@O6EnsUVOukp!!vm>bTO6e7u)I
z{NHw1{X85RZw^4~<k#T1xCkQd*kk0D>0A`@P|19GiRhLqd-ynFlBwjeXMRhj0g<FV
zcn0#GrmHnKMnie`kyPGt1$ZtPfPs6uqUM{Gtm%Bhz0NHT?aqmhu0sPkG+Co=+5yGq
z_7-}5sS8`am04fyJl@grD@<`R$FK#~=u{nt(GxC_S<OSRp1+8E{JYQzahM)V8_s%*
z*@}hiK$83yqhV$n)po%sOy0Nz^X!*_q2Ctwd&itO^57eYl~rMMx1nTj(+GZhr?U2P
z6E!^a0LvR=xM^fIxlQ^5Hux^(flEelaNtsImob6Q*AC!_AT5<DPa$ZcMm)^1xVC%>
z*Uc_dou7SDLz0Zx=nvVG6$XRpMRR)h&jj?)%elwv1r-J5s=Dbtlz{xjSR|bl!~6Gz
z(cMRZ?Zq!}c*SJaM~{WqCxzp-bRjk5N%zL4JLtEqqtZ{(`JZK^7PMQ(8@ieyt(SSy
z)V8$kMhr){&Y*kWdg1}6IqX!|jq}cUQ$uYTSg)Kwnm-OHPED~`_i`E~MI-A6ETMuQ
zC0p6dk5`Rfh@YP~XTyt~g{Gm`At`bJ<h75d%T6-0?{S@;=zaxH?N~I~ph2g%;<>N%
z<+uB+x#+{!YI<XDDoL2Zbp;RLS+ilNP1{S>TRUJ<lRp>bmKPds8y8luSi@9&5kiHT
ztnIaqYtJGYBpVf+8b#GBmr&+}5u~$rq|C>pxDM5DejgjY_FE@za&u<?pBBO1C!^41
zLmid&9ZgP!D{=FwmYBZ&G->j_8Ie@l2;O_0c*j4n2nEg9ae6JK4G-beX5y1E>x^2S
z$J(1U<fZ=)G-?q!ovEeUdSN#`?CM_n!Gxn%w}zxoAE02TJ(v&bhF;HBW2omtN?bjI
z^YwQj!leeXUe=LC{5q^&I3IocjAJ*0F|9f?nk)UUfM!+$=&ZIVtNxQ-*QJ>dc_fjl
zlO)#(uOZy-U%6|jBNkSSU~`BRhTCMYx!8=WqxY$K@f+oLVW;}`y*KB+&7ma8mR41D
z!Mxl3xaeXNIBb%RgjXL3EnLc(kEW~s?YDvXm24$=jyw2893<U8$?j`UhVZp*UGek3
z_Nd$Ti;~%~3wiD~<I-+lV8epGSRK*<y{=5diU}RCpzbudf3lX}?dQTc%chwj!5Ck3
z7Rs-AgPUElu<1If?K&*Lf>y1;kN3ln+777g8K4FmEr5jeKarvOD7pCDrwZe3pga1v
zTG{PqYW>%IJowU`9RgNiblxMybpE%HdUF#D6ldO}9YZ+%!z|FnOrUHy0hw(Pt2X)J
zvk5Duhp~Wt>$}LFyEo=^lNs}aZff<AF%aC;pJF>bp*VBtpH2CT0!O@nNE_)d&Kk~E
zS7xF9hy_Q+9i{N>eL(xqf>nvw48qJf#d;V!Jo3WV<73b!UG7E?oG{9xJqrUyHD6Z+
z6~R{smQLX(*`cWMIW@xZY-cK(TT<x%`$fse+*cEp%d@2HIz?WQyys5gty-=VUs4#@
zZje3i!EkIk(2Nt0my_9pSpK|hEZ+8?%MD2fsZ7pQFL4ou*p0yuw|G8U6^+mKS)<De
zz=}VYpy8#3db!hV&UF8gG)9u+vU^G4w&k)*P9vMQtMGQ273=$VXU7N6$gk!nsuZV&
z!OslrzZ=eu4Gxr9ngV{m>>}+~OQ7g4>4a-!|9|lwg<tqWx7)>I(&w+x@xG0`k2-Pa
zpPl&Vx*43<N6!`c-8t`k9a#61*=O+gs;0%k(>77!B?+5~XYz&J)A6k^H4ahov1a0u
z^=HGFaL)I<N7m^}pxnHfu*!U}L5;-7ecl+g#s+<44j5_{$Ch2Xa|kZriW}h=XgUq`
znm(#e>=md^a#!8@2!nI-LT(%zD_r@3Y;FAqRdk!c$(!U(jYeoPD-h!}GkM+#>F17e
zS9C8+sIZR{>$~+wczOXS#*qUb{Rjum=g3~@iduPbG8gsv$^CZXVt#A462o1s_ykJV
zXNILXj1oA1xE~&Eq2a>O&YWKUkk&4oj=HlOm5A7O{CQI^G(BKPCldQf4&ehu+iD2P
zkC5f1z2pLsXzmiq1M567`0wiw9WL`D6F+cVEAF`Sd%&aZVoXj14(GdMDA`vyaMm!^
z9hUiYWFQr!P9~@19hCROuMquk0FZcC4U)e}8!8UF`*+A(vg7?$&&NVu&S{;5L3Y6e
z?LRbvZsc?*nz;z06OvThE;6f0^kl34k*N9O>%y2};ljvm&7nVSgpBhe*rml;$apoK
z6AsQmU51Tvpp!67=bM0C=M`+({2iF@x5Ee*<aay0IQ`{1bf}zzMHPRm*_XOO(LT8c
zClBT1kiKYLKM)UAEs}fuMbgaJdpbz9!}PbOf$E~L>5c*V6<VXu&+E`|XHH@Eidu*~
zIUW-pY=-pX-$Kt1YcadQ5uAT~2F^ZRsIKslnkRFUl*^^^{;4LjAV=<*BY(do1l#%z
z!TgW^kV&&HxZy7kj`%c<Guj@4R2vOHN%|3zu3Cbz%hwcMw2Z4ySAxIcsNy|-5a%^3
zQ>?nJ;NoN3$?b*gb6#B|%}TB6vb9Qb>7($$m=&B@HJB6oKBmvbh#95dfa@K9u6FyP
zWW<cZ(j%Lpa`tnWdCm;2o`|RI`%xJF?R@EMeSzH0l21OCucog4KzR?FL(i`*vEkei
z^7W8?^5kTC``m$>9A>fpdL9KH8Y-;+e<TBE&#BvF20m;M)P8-F0!O-{)g?L4``x92
zyJPv?HD7UJ`U`L04$t^c<bti+DE@jgG&fHJIuwgxPJtLI_s0p^AzZ$_S`FVe3U3#$
zz=jDsC{ntK340XwcfA9_i>I>jp&oqljE3{h#ZcbY>7>6u84K5SL$j>4?Bui_!pC2M
z_>nVYhJHw?wBAVde{{n5sJT$~hh%|d?-+gZcSYOI4@|!sM}F6;U|Yvn`K-rq`dnXd
z*)|$qKMUjHmv;QRxDDp4nuMNrg)>(1fKsoHCesEhI`FMGrr#D%$WwdJZ+K7J&U@hJ
z&#S~2@zyov&EF92D4x;RGq7BEdtT?mG4pAPYCheBsN_4DP4uFY<i32OTsqU8D^<f(
zXVk^yDLyOxAT?wNIp^%7;43biHz5e`B@V&TZT47k{VE+LWAr-y4_WsW*Z5Uov9wvo
z<{sxL?ZY+Ff4^OM>1-t2*6|P;@CtU7SfN+uBf9Z}6%MRy$+4%#a(2I-u&vJ$t}@>V
zzJCtq)NWmgDrF}5X%^L|RRC;P<^63$rVk|N^V<^ASO+NuQFAHlp9!22UqP>r1aj@H
zlhnAsJwFPwVzW`j6piO8R#}SS+8-$S*G1@aat6GU&Ph}CeyGbyfuf7$O6{6i9Cm9B
zrq<>vu&+7h{29#W_bz5X%}pq|DfjAx9NK$*JO}<-02yoIxq8<b3JVk8+jilu_d7s)
zgL*JL`CWW0-;vf3P6bzd$-`j|1_j4p-sR~S=r36~?1E{^U*J+IbFvrf$ZP$dK;L~!
zxz{E$xaN~?-B31ij*&a3@E#^g&N6)}`v0_5HTiuWT8f*`I%*{}ya<A5i)ENJGF*5l
zcD(H?ORO=-y(mQiqodZC(eF>F?QF_kmnQIGe`9p=$OE4n4@vLXlP&Ld!)oc#J3YM%
zrRLQTXANk%EeCY9yOrR+L#g5$PmZ!n;KXU-D9m-_6wi}nF(Z}}f6s%c1EE~-z3>3W
zd?9`OXeHa@AZ28<mOkJ&?%C%ROdZgmraV3f+UE>Wwq|&{zYg=htXGPbr>d}1{4_&)
z<Bj=)*tq&2MJ05?4ClUB^vRqJC-qpBs7G?Iq>RZ|$j3aGVN4W8{5c7u&;O!o(h>_w
z_D#p=H9bk&Pe(!bZBb*<wQ%o}_8d?>f;DIVm9EPg%pgyG5H(W#byF10H3#vuh`$}8
z!7$PoBGUSEdN+G^tuy7&!aHQK+=g#_@W7%)r4Zd~DCyFAkl*`h7`j$+LnaX%Uh9SB
zKW8hD@`_rISc=tEHsr%yz$0JBhkgCoxcog8m3;;NttX)K9u4IE;>3BsY*p%F_N%GI
zW#oK8gQ?vwQ1n&F?8H1&*P5Gfk?VA<8`p}p%hpicqI@dNTMX9Mb)4GsxstczXI0<S
z0-*43$ZVZRMI|Q-U3~r_=ReH2g0_?Sl&N576{1v3$_MS)wp8gZJ`T%q;91y}UEU1A
zkf)yfs9gLZfxYB&noQ|^By$(jQnkEt2b_k*3sd0<nZGU;4r>H%XgwVpN6%v0P4(ix
z>w?bdeX-|OVLpuR!LOAd;W;bx`FtGb_|3*5<GjM?*p~Qu?kqGpEE%j1Wz^b2&n|Hb
zIQf<XmX6*@H(E=Vd`Txb+Hwe|y_en4Eq4mC43G@)W_Mk)6w-II!o!DMG33)~tTr#D
z+uJ4AlODp_!KqMYn+%z&y>Y{sK^QvjHk7-4qXtDt&t;_r6|DGy%5NX0hD>89uMNS9
z{>!+=v==1}(qp5OH^26fxpPAqg&$vs4Rt|mc==elU24mTk!?BswIAz}UQp{OJts65
z*Fmx$xD*+4z*h^n$wv4inT|N5EQ;SP>%!4XGgZ%#&2jV6c5EzuJ>$Q+aH*Bd_`g2_
zPu4`DiHikmf1RP0cAo=vi$gJ`XE?iMwt@2A;ugOw>_T5les(#AoiFWEk}M6BR`HkI
zb)8A)dEb5Q7~v7#5})8X*$?07N5LM>RAaJCGJua2pOzkM$lgYoJOXqdCP8qQ4tU<F
zHTs;kh4U?^aKTaOrwka!uj?ku4C_becVBj(Ib*qD`|lJzcpM%TzoOrVR;>B1(jA`t
z4OTnbvTJQ~ET7R9Oa}R(-hPsj86AQqzYO5Ubzh-*`ZLwptPEDQpNE+v*I~)OzH+wf
zfu@~X3PYhir_VkC1u~D(e=MR%i^W(Uk^<?Siph%RWB7XUtH<V3+NtiSpWBmC<mX3n
z7T|YoCHhBxlDUDFywlt`WOGm6^~P10!oFa8dyw2Q&Ve?e8GL?cjS*TG-f?3vI>j|Y
zxo0J5&eTF)iR?rpg<oOxFF2?Cs07}XocgbMba2l~beiGHr8EFi&gjrLwgvmHlY7_t
zX6(N_hJ7MdvtQaxnz`PI{hGd^+I^Nxh2oZ|YJoe}gmA>}PUP_G04%!kzObavOw=w~
zs74t73MVIMIDCRTM(2+fzVc@BH<c{ns*KYome1#+#$n)p!i>Dcds9AQAnI~63XAgx
z<JtrBIXrJN&bucp#@q8KrS&RI3AE(G%bu9y)1D3OEenSP+Vg>{o%mXU9lHK9o%3p(
z;aa>Fo#!~Pq3$I#TnNOV9mBZDw5~AkeTw4$?;fyuq2ufy&Oz1Q*JQl^02n6Jk@J<6
z0DqX`na^u@Ui}<wToa4BNp4`eax7$yd8EX2OhDf&=BOEIN8z{sBDb!6u_7j%!%J*1
z{l{r&m#A>-dOJLM#U5P_TVVcF$!46N#QKpY6x_xH(#zy`qpuy@ZWDne(Sx~RfP?f~
zo#0IIBGk>59O3*vp!@ZiQmA)g=RT?6uQMn)e*G~0c?}(SIs#wEOXv51xJS0j9c@6a
zs#Tk-?OL@#$Nv8ibUO-8ngdjQ&KPbB-|n^a$r`5o1c__I(7ZmBOoLRiX)y>B{iku9
zUfwSO@|kyE1DaDEDC>u-;OT3|Go#G7GA@B49YfjafWiS=yyU*STdiCsytZR6guQKv
z2^Lzk>^+3dI{ykqG0V|gx_O!}GcfSTOdM#`n$z|*fN7AgxZ3)$&Ny2&a~{kIy*7|u
zzX?jcA}RKGnxqnHDB?kPcqUmh&6Fu>(X(~RUPEWr{r0#}zq_+)v0csqbsN=t<L5{&
zO_-K@{-Kzqi@C&N97jz2n~X1ea!xN_>Fk+H-dcX|t}VpCA>~wGaf{-Nmtm#XDbSQ(
za!;7qi7T^1*=l(#m+wETCN{HV|M~{Su=gG%2tzV&-h0LJ*X{J~+-lU@$up*{<X_&a
z<Ujqg61r$L8%>GgXD+i)YiJ9_vCY^pKgZo`<t8{Vd4aHdw30#HL6?WE<bwkiNT+?W
zQhBz$<Y6T%^YbdT%*Y8Jjb6gJsn@7yvL_}D%7UaHE<r+!fy{3Wz`ErMMW)um1agqx
z&TGm)H5kpu4#CX-Mw0%(S}>1%LggQKDmIpKXDrA8oBLMyz{(a+j+Q%nyZ%sZY)v&A
zXW_1;9XadmHK;QTfus)KLB;(S6!B|cZk-audoRxCw|ly9O>=RgEghi5&zHW-iPsco
zXvYP^RPx#q!5bF4v94Kjir8`-eA-!{zVM||^4^a>Uk>H4z)1Fw{i5in{S2x5_k%|I
zcr~5>qqVlH&|R`w+q|53UZKp;<|M!we<%LjRXSL~&AC=*k4>TX$aAKL^gD(@e1@<f
zwas|PSYOtC|ASH~8Ckv8B23+OSjlLa0`XpY@IUgiLPrL1?SXG#bEjz-*!vu4e%|F?
zkeUFQJ$i%goj*EVl;@yytT*rO%at1*fnn_s_qy!IN?LbeD|N1+gE_X`5cwO0HMd87
z&@`;8NLSkR5`U1b3m5!54jLr4opoy|mVT{ZWvwLy`-V|H5DoD#$BH>u$$y#5+F$mj
zdapoXxb6mzEp5=^!Y~eVY=)s1ZcxtrU=BaLi>$i#=jd(Xxf=QjB04@KFS!dhS^q}?
z-ZQaiXsYBWhI3Ru6Rc^r94&@<qG`Xr(9lca4MUu8!>Z+&+PGbDns|r8zH?^Z_p{LF
z%WcxnAFF1De^is4Z$hl)CCPHmDKz!Ypo)H}<TdIJ%)BH!hfjfEa&)wKUF!?Wr+lOC
zN_J=ee_Qjx$L+Chk~0|o73XZ{epG!ZOZA!J3R?HR@Of}Au5P|S(I0t0fkPyt@p}0P
z&9uc3VAYn5+ey~#h|FTfPJsL;HBfmC@J5&|+74-p8jXj0a64Ph?9*4N-dhf)#|8tH
z*s%Vemr8t^85<s-Q6p2nCG8c-8Erob<{wu=N^X!mw{5BJt8!{wG8=<C{Xx0gw-QY^
z#lSc5SY$Fw&Lu#ZtHxlF*QP?}Ipf$=cFe)2JMzIR%h}<D2_NVeE!pCZkQzCX%ZBFA
z<<W9~KU+J(WM?F%CZ?%ze@&APU=R<yZ-fv23gz;zFDRB_d6aJH3}qI7Q(|dHyet`_
zA<dllUSvG_K3u^CNh{d;k3S%K$Ja{OC3)`lTM9L4(ids^pp-rHlo_}@3#(BzmX7w@
ziwm*z+wrI!eHs=PxN%OSBb&|o4+1}aPX(51+3>NkFv~TQO#J(z$=P_U&RYT6e{<BR
z-(2}}n)q42RD!nuW_t4Q7$sHAfWqEO&}HTYGQIQ#F2~yNJ-GuGEO8^9Q<h@2(U;A)
z?;=a+kG0y4Xgi`4M*O{iaziaS+eMhxU6HfTE|k5fuduydshYC9Lc`@7h5li4g*PJH
zU(FTwh)=0dck?&3$g!guF>?sS$UHIDNba=tdr2o?-g$}bFga{4e?B>Y3+f+H!nk{&
zowt(H0$cHu&08sWw1y4ZqlI~Meud4$9J#LEh30+NpS^77aLP>$I-KdvxzD}XNpkqf
z?km~*iX%SD?IgR#7F-^FL`hu~MTze>lFPzZ)HF<drw$Pibl3-<oSDPWZ5I{vlzd9D
z%;??@cQ=cAPFe3ROHSUFLWdkC!`2q=6_1_qjAUXn3uM-O+lXU4oN(=5AzYhVF8t@m
zq@R0Nh0T-kKt?R@`t2*O4O+wHRn0N*z6Vxn#f3R2T^aaAgRP%VMq6P)r@RZrI`@ZQ
zzI+We<o_ye_fB9gjJkPm1KDfAZt7Vz2%ENlq!McfjBgw*^QG@8*zO7#hHh2LcK<+G
z!q6|Wo`jW+Khl%f%Vd7+54EnDJa67~;e;Q~z{}ff_=br&7CapY##PPufGz;*^8LZZ
z?f@K>vy{QGQ%#mkK*3XYYQ3`;mTu9K?&~NvbD4&P*IQteb~U<v>PDsI+aUf_C$3%e
zANl`a&q+~}dBUu&82jQksNNW_G_>qaZcl9CcHvYuROY!493Z=u+aD>m{0kMul6!jh
zU`(13Abt}q+LxAr-X>TrPr9p?>iU4)?wQ!QCyvwSn_%h!BhnYFB<qrYK;xlPUEc&^
zTE$1miIS{9K|iove-WB44Z`Z3lUchaTiw-rDOc;SK*h_S$?MrQdbe#lXBxG^u(3V3
ztkqEVY(5^dQd)9K=a&5L;!-v&84P|aXLDrQIzAZD0pq2MmgF1)UdP2>l{TGiUpR7Z
zr+9S9?8@cdWvV83yjm(QI>Vi8_sX+#(Y$adG;D~Vyxudh`cK*GSoalfqHvPuHK@-d
zALFIYMcuw9?r!E5!bob4n&E#IM(t~cPQsrD&o8vWRb~N)U#h{m!h||C333biW9kow
zz$R%dJ0uxlg7BH>_hdLAdr3pVbhZ59N=Ump4{dK>poBrM#E)<o!rR^ho!=&<b`?{`
zvcE`s*g-k`a56d^myFW>0E}^*#5;tu<?I?m8Z`!9uhOEvas{M}TOf0RB+|^i1IC|s
zLz&-kaQ3y3_x2z#ymEBcKUl4pt{DurCTlp`>pB^!Zs;+84%ZmFkk6B8XzN=GXA<V)
zqouR(+0#i}qKm-zVH3G7C{Uimz0mFJX_!&g1!>e<a=T#!$-9=~Vevv)4eiVRANGLr
zK^u5iBfr0~)4A!+GW^_gDeCIKQv&;Tl^N9#zCFy0BY&x&;LWWOTx>XE-4!aBKNwOD
zxMR&JH+0!p3=0np#=U9n*|D}CnGTiiUbY9=n&v=y-CfWv{@1<k(k05<aR{Os7qjh8
zC*fL~{`_pa8|ogFf$55^l=m@+H19SksdsB3-E<sQ8JlC%>X97O+7zo6PQ{{bw%}20
zjx|$tP~J=a+q*{UuKy%Uv02{PD<@N)T^dESTS*SNL413l{ETGVkYTokTI%ozINPeK
z<<knv$L4&nq7N_JJ|Atu66BsX6NAGBp=RrQ_p)u_!cEvo+MHRcf2DM=`nXDuq(ZeT
zm2OJ&R-iL%b`M<tmJUy}$AWG3u&Y!uT5bE1$)-8{wqgkv4;+l;Uk?^8;uf{Kb%YZ8
zj|*$|%~G_B|5a_awn3+u{gkoQjtw1y*=0~G3_)*vbYmu4&l8W)`~WKY&>ZJ^1+n&t
zUe!SxxCX9#VnbhaOLszJ-|MjL@p#Fzo>uhfxlo&E%4NIn(}wrU(cV_LS93by+SMy?
zf`c$O7PR9bPQoHd{0<CDgVntGQ!w#$7c5IzjWDY}KC!(m-i9+OE%sw)t2O9j_ZD(?
z#BzhheA&HsM(3ezS>s`My7pc*WqR#X^b4d1J|&nn0X1NA-2qdj^Xu~_8XYBnX&A5q
z%g+};=0A5-!^lDCJWS3Q@ugH`!i6<;GAB+p2SasV#qL{oxp#KuGGQ_|oEShBQ~P1T
zmA>fy?kgTRbfxexP0?r9CJ0v|xvG_LmG)Re-tevvKY9r@){MlwgYBSk>I{rn{43Or
z+Ds=_09(FuLBGqHba=eX;5WXac6*n~-@PBkx3*?oA6Iopr7P<0I&t`ZaVEBU4XL(n
zkkuyz+^xLWgnMz;ra;b^vWrr;x=?!F4)U&c!h~x9oDx?_hMI7+52>Ps>C&ItJWlS0
zk)Zj<88wl=7P>9)m7Kl_==xqPEIK1|iUVt~WRD&D4a%im@@M3AKM$H7lhmY}N6D~6
z7`Q>bDe~*@C@*TJ?D(dl2@dA?&GyuIqXkCl4^z#5ry#p#Cflb9o5E`Zh6*#F;Lj}Z
z^1MtA!`8CKxGy$Lw&uEa57p3ttNHe(W$4@|lJn+@hrK))pLJ=4MRz1`X|R*bxy<#J
zFOog$$>HWLH>xR06@*XJae9s9Wxsx;lwO=eL6S?i{^Be7jVnM2o1xvXmi*aR<aT4E
zw_AS%7Jl0X6Hca6@WtVBCfq|$>{7^Lwm)0WyhT;x{((5fhvN@eqgFg*#y2Ohf5scd
z@XKHBw}-bxyG{d`ss?iKo{OMS)^S9y)^t*d=ZMGkP;B;)%C6aAYS(|%m#5ouO|tkS
znjwa-djSQrI%4I{Kn$MU3-an0VSauo7%T?38;lMXUKaLswQ@ks8YDaYr#-3YsD~Q$
ziyQk7vE=H6@su|55kzPnQ1ZuEJ~_sjb!+We^Vr~izA2QA<qYHXr#sgh#PxL91M>Ig
zlj+3XT$3m}wC!e;+cAVgj&<XL&_3jUV-m(a9K@L&!W6^q&ytl4k(`A%l{+U>c3d}(
zZ*vo@*UrTH*7=b1kMz-h@}TIoVUYOyDAcX;Apeg=in(zPmG7B`=Law2g>oM<{49Bq
z+;&(ra1?GhZGo}&sZid$2S*>@qxSTe#cMAMpC<Vh6b&m?z0$ggPt}JcGE0xf_u~n{
zUL1EJ9@D4HCbNeJAz+gm8qbWyq*ODEJLw~ik7UWeT!1*e4>~u$rue${#WS<S6}74}
z*Cih(*Glmge=;Z~6T0Jsg(EQ})P}W%EwN~T^u?l<i=+8lI#@XrZEBj~yyRgVKUDbW
zosW^r)DdjGFBP<sEApD=hiBHwU1(xYjJ{PtRZo_%)?_oC+!T*_SI?;To=c8sY9OaR
zcLt4dI9Anl!KUZykR-R{d>{<%U%miCMjkkw_D26JC&;wh4SF`jhbtC|U*Yt6wW#JW
z#0l3&dwMJto5i8--@fpm!kIT~Lpl4)O7>qp7iN}>#h`9Ixy~n((qGwt?U?oSAUcZs
z<+!od(I2#{3l!4^Te$IlIcGh~p_4WqX!yOYd+Ln#5cA3wop1TG+3&&w?%fYcsvOaN
z*$cAYGZy_5JK^mM@($fCPOG5<89wHc@1JdWt;G~Bef&LXs()2Ox(mP1a*$$kP$M(+
zm$dEp2(*<MeqQt~*le_f?_Fvq?<vVVSP9Q+cRYA@YR(Q%J7BcMzp%Ic6t);TlQRN)
z;<n3jpBr|gFk{$w;iG&7*2#8QT=O0Ze7i&1DNF3xSz5$e$@mz{e!6oMK52MNuy2@n
zU)Cz7YhO_Dp$Fi(eG<k9%d|Qzn!NKYIO}dAq>r)VnJ=ADxM^U2Qis($wh8;|f~qh4
zTQPflj22eTMr!i|nO8i9Qq5%2eIKtDO`C~LcE0RCOI&Kpm%@WZJ=ry}tK41OQGao-
zqF>VlJ+~~t$Uf6CYr!nGo8ZX~anW2|FbGo4_7?AGqdW`8u)n=I=05SpthGNv)$&w|
zIFv+LO|#KH&JBZX?YY#sH<ym@1}=vxU|!T9DX<j^zrsw7+S!c*CQCP^!zXYYphDJH
z`^l!ABPK-Nh7*RBoa{3O{o-e`Zu@gpw?=$*-@Q=sT)ff7(T*$I&k;BMQME9pi_AgV
zQuci5N0~oYoeC2n@nj9f1|(CV|9sAsT)6!f@wJ{!!2AzuxIpWJ;lJ*qlx62hb1a@i
zzc~gDmAyIQa0tfF)nL+vTVT4{7_+8DOZIv$24|*GWzZnjd^<%6zAsEY*Y0Zg^Chgw
zo9<pZxIad}*`y{NGN;@_Yp|VUeoTE@3X^^uS#=9zukU3~)m(?J(<FD~I))9K{voH1
zS>&{TE7gTer6&#JvEPpAs9E}pa3j}I*>?viV$3OuD4b0JhfL6A#uxCb?ShUtj~t$K
z!G?D#=$DLOzaQ<<$L|f??k_pGH@+PFuRHnCDJY&9jp=_2S8kT<Qvy~{{jigidBR!P
zy}h8M%n6^h7|b(=dva1y5;c7}P5ymfQc<2CnOZGlw|EOkPnFr;8{y}seS*wje<=Oa
z7UDMPgp)r54HG*k{_FSC+D&rLE+J*_Rmmr;4X5y?6bSvdBS!Zw2E!Up*0d=&?f7#`
z_IddX{1e>7`+AhzhG#&8Rcp$RNu}0Z7NXgB@!Lk3b874|vTQ1$XNnu<f1{G2i<MHn
zdNDYaJ4v3r1#9mQR*HLPieGgirkZb7qJ>eB=l@Ji`o0Fz?@k7>HL8|7{?Ed!SA9df
zamK%Qsp-Q7(C?`PP5gVsQ&=CFfnSsf7JgiN`dbJ@Kg``&LaQX(+xTz+rYEf6h@|;c
zpSqI*LS!#}XaHTF<Bd05#X;e5M@jcy3m!{7xT)QC(luRiFW6iU)p<81>ybm|%N|S5
zx;uM$HD_Ji9%?)=6R+Luin#;V375Vd7p2Z2<I_X2=ObIJ|2%<>+RtS}zI-ohd!4Sj
z@KC(5!!d1bC0OqNiVJW0<Jv!4bH$k`%=>Ui$?9=id_tc1pkfI+{GX+UFDc^FAmQ1K
z0}zji_f6sZ4w4=Dn5&A}Dg!v5XeG{scNFrqGe+C}1E!T7VB0msq$ww0$F3Pz{C5Fp
zQWit`=FO_EW+j`SI!(5l#EW)8$N87Orn33nQ0wz6#Gem_+Ly<vw9O`}p3_+kFIm6=
z9@DurE1aWVkC!>=Xee5dQkdUi5K@mNc<+N15Ao@MdCg5&@9eKwV;#5{U!j8~ft>mL
zFdRM}%hdUM+C116^UrUG;&^8+(oa+~Hv+$tf7^tG!te<51<llg!m`V8tp6~FF7Fnv
z!Ln{_-Y$>ok|Q``;{viQFv9XcAJ)9iRLqkSAv6CaWz_vl#o;$#+q|U+^T%On$WXN2
z5{En1C*Y8*X`FDhhSKs|p=0_Ss<^uZXFeUp5sUSty;Pu-&uK~KZ{9&=htZt9cn>+<
zlNsgNH;VQ1Ba|0Vp&sth5j7p2jc~jw`QpXBxFm6nWX`^&XU<F6>#IW$S~Uf8R!dfr
zCaXT*yaT7mf1&OTDPF$_lSX?FTE7qvLluIb&IeCaE<y99cSv)#3fy+pQBikq#b&Db
znEkGj<IKh6v+)YJURc2;{Rd;aJu@U{U`7FE1K1FKw9sbxWOVt>9Rr?7o-)yoD<7{$
zO|&HzkJ4aJpBY@faUtls>{m5M%oK~gmK>Qb^WDuim4ZJ9i*w?Uuqn5K!#!6%9B+Y|
zs?~+LvB$uC&w54ocbR*1>T{)CHy!Gh4Oc7suLZ3|06Df_Kv~K*3bq{0?T*Sjr(is1
zEtWIJZ1H<aUL-ETmQT(NL(Qe+)77B`ij!?G%)5~Z_8qfH)AwUxc%dH_jn6C8FG_-Y
zmU{MUyNznFntZ2E=9-pqWLo>2GOilou3$6t3hF4|=@+$bjxdTW!&UtWVI*&J#@DZw
zan^-*q%*(o{w#JpXKyv7rZ=~!vYVC+r{g%m+aAl0C(`C|W3izsN3xNBD%mr}fO*0t
zYTThm+nwK1%&T$i(R?6>h0Z~PU%q?9x~*h7XC~S8T7cDuhDqjjvr^o13mN?Py6;-o
zjgJm?k#oo{D6-k0X5aG0vYuNYV7mCMn|&ayU5VVMBe>|PDMTC;hGuvs9f-F<%cbsU
zq_<+{9?2BAw<8Bfm62c3bm@m2QJV^Z>)cPO4fgAx`tfAcwAin>_gc+)lUsB3)f`3l
z{XMlxzR!Xx>4Nm0Mk%lL!YJ-LJa^JFFp~VW?UR1eWmr#|WIr;b*(pvA4yZp7NJX~w
zg_Rq`@&0rpMYpqr^efA#F6$KNeydV-R!`JbM;y@lls7sroq<NN(zU7@U062cFDUTa
z1iq1O9ADlS>_<%H==c&PJT?>)TI~bV{(~StNuDvgLLjX+8G>KLf&V4x*1fR<e|s~~
zO;IH0TthXsdYN^_Vw$x|hnI<qsMmDVyVR?dJ?n*OFIk9B`>42ph)yr$qALR->B3xk
zlF(W_G}7&DejdCAhhmbCa1CV6X|dCheXq4<UHIoh^2vof7h!IMEkVny8LTT9%eCiT
z)3%(o9KHJ@*}IBI_Kx(wzM8;(@0PRO*6#BDu~uzeWuE*;1Vn}(rlR08r2VxA741q>
z63*{}+Gy#nlNHar&{J5X$>cFxG6IqE|2=DinJYd}y!c!yyDr1v&OcJq;8?5~>d!eP
z-FfrIAaov+Asn|<a{hZRZa6RpEgMy;TYnP<K9D&^_aDIV=2R^BbPiIcbwHnT`D_yP
zbW%Tq^KzP_?UZX!{pBR+ug}1c2mScj^x;^M`4hM~1aafX5PXuYL+3TGD4^pcuHO3$
zs$+fzm#*?|nYsmH<?fR`p$BUITcM=>pKm<Fm)x$6<AP6)T(M#&74$y_hON$M^S3?D
zYZ1j7*N#y4t$4M?L2o}OmQCj^f(QK<VCxt1Xa7D3ZttIghdqMX=xfPO?AD`O(Nv}~
z$$Hj!Q|{JOYPZh~Q=}`HF~A4UShvEM0V6PK`VvlGkWFT}H^fJnJR<4$_u|Xi4Cd1k
zNWW~X>X|;7t8aG!i~R|5{y7Mi+tx$%+gdfR#|NmLe4o-j=8<_#h7z#9Hy17XOL^Hw
zKL6R>6^;7CY3-HYscCr_)*pA{w<jm?QQb7IEB4`};`g8%5~mo(OO`d<nl-KK3lsOg
z2D=Z#(Zt}#Ph5Ml&SJHa;Mj&s_lpm;RL=jSCQ;Eoxxa<RL&m%fpk28|tsB~;+L{+q
z-h)l*|0p`+xERx~jgJaRk|ZO^ND?I>sd>)HNRlL#BuPdpNzx`H$smLf+BSrwB_ts>
z^W2A7n}n9xmV}m((9gCcA%ypS|LU*$R5SD3_qon>eJ`iO#^m?;XXY1W$~!CnV6xR7
zg4%L6R#=Xwy{lp<J9DR~)B1>h@i(CUK?hUBK2ZBk4kY`CI;fh?_w{c4e$=Uj&NIhY
zyWAQz6&K|Sqt9yHvS2bkol3HOOpu(~&dRReWvX=!=(RE!t$tsPowGQHHF7b9PF(~>
zpEyU@BT2a8=|qasU??!;vzRQ9v$!VE%LPf875JGMTU5e#*$B+p97B)JI^dg6oWYgo
zN2eNGC?M{FSe+OFrM%lZbd)3ad`zQQ)*m`oWUwnk%qjn`C#+zCG1L6nENqz-!}p1^
zgcef;)t+!A$%rjt<?47U;GC2yHkM?6k>K6ciA>)Zlh^P1Jmb*|B&)^?&L?M5#8o9{
zefYDcF~!gkxRNyox?<wQP>MJF4yJFn3FVwm)?w#NLG?<^{$~o=@J<-3@DR@1yvP(0
zQ(0AhEaj0G1XuJ&-R$Yif3+F5ZtsNcBw$?^1GJm59AOpTk4&~^iCY$+f;0FkBbq_c
zX3rG;+MxVp0rPoM0jkNz1kLvYV#}pHyc0PPEG&D|_QCxqG5;Az9-b3xzO}JXzZ6iE
zlyL^12f0m2g^Jq<&L<zhfu}RbYT0dOu!Cn6{`ga_?B$Eik9sm#J(#{YC`mD=Sgg=-
zpH06?QFgrt%Prw-vhlX)xXl!E=G|tBhqoXzEe4xgI3M)NG%A~<gXWrjpsPIuQ4tZ8
zv;1e4z38G4Ia?3CcIU8csVjy@xsuD+I=1C`A}QTxA^gt|?|tR>;A_u?vFoK2<Z6M&
zf8PiDtW)4EGbBsBV6OWOL1}m|NY9KUzmn(dPP{K_TZ8aQSp*)~If^_s8e@q28q!$^
zLZUmMmS+(X56P(PQZQuvwh^TBI5*-3V{ht%G1h+}GYVMA@3pU(*US*o+*`)JZ|5_^
z*Hn@|<{X0Nc5#)BCl*}3EjA?Z+2{Bs@HL!`#aE`I8UKDsx;11^cc9`!K`e8j3w20M
zdCxQVaoB$nt-P%$=bJ!fvJkqqXFMtk=Luz9+QjPgWYAV;3+4L=EC&RT^lKQr=B!lJ
zVLr2-odi*h1F2#0LzcCpg8lzYs+9a9D0?pvG)7M#_~&$dbZ!k<c%@TDX9lx3PiOhj
z|FPEN=b4-Q3~Opiq@-EKC`tLly)1+K!tbO}l=pm+4nqvRSi>Z3Awuk7k>?HqA@T7}
zan~j@+}qigB41fy!>O+1{Jv7qtoc(cO5X|P->RWH5ir!jk-6^HN1p(me=mpypDZWJ
z{`WO=?6aRK7p>vVxy~qkG@Mrbm_a4+#jySA5G>QZ6B$V{Qm-qjOzhPKyL`lm^?W|c
zJIM^^tOvi5M_Jwiz{*{7&@t;Y)BEa9vW0_$k&_%zAt!mQFVA82<bBuUUO`Dp63R9o
zU<C{2l6|BTOZ#gkC5CFCzDpj&F4@2$O=i$>NB)`krGyp47zizf|6>78({baKDHLwf
ziwqCgVzs(E(=M6JBte!~Jur+p{}+g+hC@K|ITvzEoGAa}YRqxuea=_yNw-EqhMZh2
z``t;@`Wf+#mnaNf-W$XFZ(zE`i^UvT!MkyIKa@W2MrCuDH0&FbzGh6K{Zokg<tWH*
z-2rE#TcY9P$&`5Vnc&H1lFG*Y?69bZ9bMwM*WV24XWn7=?&LvP$wH>_RI+TvP<~fd
z@VweUbpF|laylY0{=W>+P3grnzi*Of{IV6amc5`N`UohDc~9q~dI*hag4Qn!p>o12
zrnqW=K7R_7%)NX;Gggyg)Mk0qfsqv6SAkhtepeaw19Ia{(b+H>?1Dz4Thv4r75fsj
z)4Z^(nCDzWUWlHZcH9%QUwp~^8&X4&<=F0E;Zvnl=e`^_ZV1ObN}}W4)?nn#Xc`&Q
z8`U2clh%0zHJyIVvi{_~dRF$Fi)#tWO@@Nw+oh!C9ft=vqs4DcF%*S+P{WWzkodAh
z(AMji$?Qb3d)tGQlZ-Jat~Xj<;k)EeFR^+;K66}^%~bk2u+KIp#rP`LVLug{ubV;5
zTka*lIG?Qh8j;?pLDX^R4WvBT!R*G_W0S2vR``r1pVOf{llYgsC^ecAH!Q(Q?J_)H
z=|@|t)>2BwNmjXj59Ar$2dQv}>HepI^C83NN(|SMsw|iy@3vh2AqfjMPs7OhbFnDm
z6uai;ND*u)Nj*{^DaM&|%iS@1l`Y?m+yMJk{m_1`j5B{0aL&sl2oi^)&$W^8eODSP
zHrC1Q$IQUw$w3%ge4hCvet`fFKGQw6#fB+MNn&9j8W;>B)xv$?Sj2me_WaBI`@3Rn
z>l9|`7f10yH^Fw)a#D`x{Xe%kyY-%wRyn1Uoq--*yA?xzOGc0?tqC;U`$71Uqs(={
zYz*)Im06YX*VAh&#5p;T`OHR^<FuF@Cm|I)<z3RZwJck6j&+Xx%Fg?nQ~Kmd*f5D_
z11{c!yi38DWzShjjn^T+Zw{;FdT{o+JfZ#;=ZDvxf#UT;n4!%da4K^epECkj@_-Cz
z4{ZY@ea-=Wx`v$lbFQOmsNiLmKq-6Uu)<x-D%%c$;e{BoG#-d<D?-3;{Q%1SX(HD$
z_b|=G^|1Z>64c&U#+=vkbM~9aoUd04r{>K@?cV_u|J)F5yVWq=slP=D=j|Fh+-BAb
z24m(eUzRXkPA%Img6nTTk)Pfmz7xt2J->2(cGzEH@xC+6{L^F#bwi#zOlHA9BqUvw
zh8@}*W^-#eWd?Em{n>I#R-b{aKch*rVKz8_8i115yM>f@J6U$;dU$kWB58hqsCKq^
zz;nvgB3qR}22bblKJwupS$g}jBwr_H&V9&gep^BLe{z2JRv8xX98Sf*rmV~=jQ8_K
zqH;hP6b?7%`oc80)WrXO^Lb1bQVva)|3Un@My4z0-nOn2D8@a6B+h@id0f?}h}CxJ
zGk!GMo4$bd5WefYGgb&c6To%vuADXU|2+{iN%qEE$h*Ps{GB~9!1}hB*0cvoqB$4E
z%pUa)^UPbXUpcezSDxqo1}O`sW6Olo%wm)cb@YG1VrI=E1DiKc-@JlksTR1`oaeZo
zJYZFa$MU@MYbY5X!QWF|MCS*kVo}HpR6(^`^KrMRxt6as|9Tz9)+VF+-&vS5^bo83
z#Lpl5?W`(@XTpx^Vf{RR-skDcnUK}2eai;c79ENz{W!U%e-!WD_F?xrreKoOR5W?W
zUtd$M=lAPPVLRqg#JTz85}U`0{MM1%@kQ9$b3Qg~8Aev`0V~RM5VheIs|p^5CEEv}
zYHlG!Ho0QwbrZBG566<oKr)(SMqz<-NU=~K6}=BYV((fe3E9q!?f0@v+j>z^?`y1c
zf(Os_$-%Vkn_z!tCDk`1ljBiO3b+>yi6yC6{AMufoZkv%9h(L7eL5yT!CCbe-m(f<
z4gp<LDf8YZzB@T9cdM{R!;;0|Y%`4o&7H{m4UWQ_ah6m)b|F(v4rBrR{7sbYXR)eK
z2wyEn>5{H2Gl-wHPxGN>1K)|f{fRkW{Q{3>JCn&QH%#&S4-)se@H|gf>>QwlobGZo
z?(9yU=h8@ef;iVaR}Ax;LYkeug!39F^6$GEcPSE4GBiOQe}`vi?u!u6c$6LYPD3z$
z4X=~?bJoHoR`%|9A)e<kijSAVy-&x%BVht&Te?v@@0vGuuLIRHxw_)VS?0WAJX6I@
zfZ|8dP?kD{5?0D+3tCf|+7Bw$PlYSvr(vxr@1^V>0kV&`SnPs%O!w9l4+n8Zx$P$A
zeb$#`dpZT!J&TGq?Begk7tnFhntYBjkcwj|u09O4+qVc(hXhiNEd-6{F1dPB3YLeR
zXNkf8i6JG!c-Ngdh4Y!pbjxMY#c=_3o<7eK4sv#n>p>>X;yalGc1(SWYc%}1m-O15
z()w3H^<{p~9ax~Q4>$;JX|u`Y*-@6iv?m(WFlO<e2WDI3i>e2De6|`WS4ozr6%|{U
zojdni_&Tyg-q&mNE`kn+4W-xr^4-nmP`YQ&Ir&c)Q=W`_7?z9|wfa+d=7P_nzGV=+
zaRJp|;mr7^9YWi*S(N`H4o~s!m)ymk<mxdKcX8b`_ta#H>$(>6`RwsUKao_ojiADH
zF1W@?$+%KUdG+R?8fGeQUbY-U&W%FnQ^YwW{0_Kc0=B+b0H@xq!Lq;dpj2YXeVG~Z
zfVK;wR%gL#76y{lb3Tg<TZ@VqHLH3SKvlc+NT+AOy$+Ye8Y{j(PE}IIokV(hGzKki
zDk!3T0BWaLkaG7V_FX*y>z3$K=!3cFyf2J-{p*iu5gIV>bVo~wri8!csK_}lr2W|q
z@AU71s=X&z{UN^ZyK)Aqo-d&$cT=uw@=lztb<D$b45>Z_Q(E`qtnR^le3aCmlq#8E
zd~7Bb26E;^ULiB=7sT3Mz2<sFCL|Rtp*tlWICgkAg}AJtg5}2rjVQy6>|A(xI0;>N
z#!B+`p8T4%C&^|7iKb4yDJILA>}E~G*H$wzXnq_e?H-Pj0&6$BK|!dQa$oIn(}fCN
zo)V?)GblbbpIvjb!Y6#6Vq9|vE={thdcA+49_CVIT04}8{PUZV$1?Qyu{y4QDrc-G
z^PyLn!o7%f+CO6f9gmp#(&<=S_lZf}H-fWe3WOin#yXx<L)Z&}($B9Zx0zC`{Pi0c
zo-XFS7#FynxE!-{7vuN!<Eg<UfSmmv3&{~%`EDYen)mSR?)M8UYGNxhJ(DFwluSX{
z(a(bO_*Wn?ZgH1w^r0-X8D#Q??>Tvou&j@<pj@0ThSr8azG^4DaT>;V5NDtQ!&ySx
zT$I^cL;Up(0DF3%tl^Civ341y6uD7@8Rv=I=tc%>tnuSqf3o}LLZ?>EpcK2ite_X?
zG9GM!>cgB75g0-+gL~a0;_3U@i6ptNX5o4(vF6q>aMruTn$34XcFRY2^O|caR}#to
z6KD6OMq%;DUqJFINc2n9LEK-x=$5Y$zFg%(d27e;J7pIt{vJR{nFFZKBoZHSe}Jbn
z9`*7aXxDQO)J3!l+WB9E0AI#xYj~Dot%4Nx|HvbuKenG(K(U=)A?&YMN}kmW%D!5V
z_BF)Ju$Q2D>nxb|{#{HTzL2!)7_9!+kTl(*1!lYg#}3xV#E5oC8MBM&?7T&@<xUvt
ztOnUnZEP&}+7)ztCbX`y!bYq9^qQhMb7Q|)H1Z|$o@+vGKQDl)+(dL<KZ!}l4g^)e
zE;v4SE!i}4&-$iNlD-)TW&^kvVd;C;5_k@x92~G{a$iyiNuu{jzRw@n#G2bBke0rZ
z#%g)jvm^JMW+1j-8%#-ZPpt1Zfted-agW$kmiui5Dwgtol@Axd`c)Y3&WIz$=SO0u
zs|icNW;o9~nU2r*MpbT>T2}d9^cnFF%j7$v{Quo#QDNLO4(C|SDHF24^$iq-6VSff
zC~!+U04a4tS?HzVWE_2r?=RgcB8YRfw^cKjIuWv7c%Wo~K$5ke<<2t*;wJQ_P@6DT
zv&NVjo-IV(%{lBN@6L!b?Mkt$rKmS*2<I^W4@_M*iNP&LSZ%ErW{ZCG<fQ|;G{s^2
zmaD*jTY9A6eym&REU~o(G`_~H<G&3|JwA|}J-nH1QUL_?DP@u#WlVB;Aly-mps4y+
zVEiZ>92@d@w&tB6iT~(seE2YE$Lte5`Yj^=_dMeo7(-qyg<zB%NHt4;;`_d0vBc1V
z_8O;Sw`?E&cMQUex*wp3RtuV+ozeHFB)WHY1xfTp@Gy_1y^j2@I6IbH*CkMBlpk}p
zs}(!9R<VKzbD`lk-g8}YQ*N-9Go)?~!t({Dq`Ch=u-z8SGsM#<Kb&VR3Relu8>DEr
zs4JOtjw7EBd2Hkno;~=n2t#+bLbZbl_iiC7pPGoOtJj!nP%q9L_{5r@Ujo_E%Yy5_
zy|`bbQOsVz^R+8)gLcAV<~O_>l_TFJ7|tM{>#5*YpUHGmLsoENzu;H%h((_5jk<UL
z2xTQVL~TJ8+hQQWf&<Yk@ko)NR~A4cXF2np9ZRvTB><Hd9tk0i>q(Y<pZUKx#8qBa
zWUq;W?_ZF6sOrSFelyAG(NNCXx-6I;S;|?tZv^Shd+eiCARdU|JDbawc@ETyG<_yO
zMc>^_qO_!q<(!B2A)NAm*#U>ik8@Sl@Jvjz2v4oCbL%q**k2|_{2q;F<>N7PS1P|d
zoQ161E|iv{VFmBc3r(p@$uo8kX|}2aN#0JmpXMQCFRl}Oj#RPWF^3`ZD))NLGlX69
zd}-H<0Fq?&=lykid0$gkz6aYQ)c&a?pKZuGe_fA;$&OGvU=^AlJj6QRXR^A|W$1m*
z5!Hq<Bz0<F**~Nxc}HsHSf$|T(81?n&XxGn4H6GM6k<0Yhx`i8OPCWNx0=uh*%3=w
zM@kBqbPd9L3O&+HnF`8#u~4x8Z$URCg4(D42RU^%e3#Z!2uL4E$=^yrb8P|1zYjpi
zxC^Yg$4D&uh^QSHfWZ%klh4%i>}5A|Y(3G=?-q4TD@`J=o{KR(BM}WfH?f@HAIv7f
znQnEg!TcV)_iH|9ig%fUyMzdofE2Pe{Q-$ay{MkwQ973n$Bp^==(>3wnb$LBy&{gX
zUVmZ*uf~I4NF`Hkm<u)A+d%XBoN#KTf^r&%Q^n`g5cY8$Hk&KZ@Zfs*xYvMGKU`4r
zTe1Mo4)_RUJeQj8)@T)l%138~rZa$%pZa0|PQ}<V8<w}{2B;Fxi-{HX;3JJ<FIO6)
z%;AAh7!`~~=cZ7*-WjIhFMFiTU@B)Vpn7VCx<R67<#d3ldw7s{n=7e~3+ltFIJ8l%
zr8nGXt3EoK!lPs8_3~-tJoXFN|5yU`{*y>kl`Trgb?2}DXi_d#v4kT$x0lqP_tWm-
z`R>(%W@8j2&g8!6xA#~@@MxBwy8^ezOu^8`U{;_VO(W$zGvBC#_|%D5^PdXJU?*#y
z$^F601xo7DgED)LWZCn3vczpnDC;^1B}W4V&rkC>7rr-ERt+FY%XPK(<0NK$kMl4$
z-v=Gf4m6xS!L0T_W||L;!tpt=SkY@5m|DlN@LLg-JtGhbe0WA8=1)-M4TGwS>oC-&
zKa{DYJiD(IvO~VIw7PwqA;j4s(YK&AhTq@38%3Avcc6M1*I=JK6t(-NLo44eyXq|@
zz3on%@%Tz~c7DMUGdx*r%o%7Ia-9`ydMBC+0%hKhAjPh6!jphFY%Yi=mUNgUCW)XL
z-yt7+X$b#6RI}o^{@^=`_bBx7MS~_O)hwG$=Z7rf9S*&r<I^^#dEz2gHy&Uiy!)zE
z+mp!-nv0#~qp2iy2Xs1cEljhLvR}?-Nz12F{HT4*dXYaF-X6|m*<ZOg@>gheienXP
zLbwm!l=7zEfMC8$3V*p9Qr?_pE%77BMe>~Q85LsK^a-f>p9x*r#y!*>i6k4aPPFly
zPL&UHSl;5t(4roV9cwu!YnVB@oL6AC&z6{G&-W~YWTY75D87C+nW6^vM*G9Z*}chS
z%&PAbW-6VD%B5YwYuR!PwdlgKAJnmcFBzh5%Y5pzILwOc2cU%u_x<^%iiU8O)z><*
z6z@;anxTd!y(+jja|g2>ATo=YeQ1}xGd7Q3!SasjGu>JKzMa&-D$cG)uX%kDa-OoL
z)~z6W-U_i-r$S<vW#ndY0$$2_$7&9rsjI@}8F^gqG_iwbV+BjLTujAV_p?hE`e2qz
z6qZ+V4S4fdYFf1u3Y462e&?N_o05uQlZ|*!&}-)P(VhZ+&lavF*r2?63bjUZ&)uZ&
z?B&a@<TA+xOBN`pu6Y{T-QavVM@uZeY|2#r>zT)vAk<d$A(ioCd1#z1*NvycuI+2_
z{GDNFID+f-{5)zm?t-p8e6ab|2nel8=G{yySV7;r!lmUyNS9v9vv#XMbE{pH3>q#L
z?wW+o-~oBbeE0NqH8cEkCMhd<X5`~rXdcBgR?%kEy3v|ewRofbr7CcnKakxkmSa?Q
zHEfqmL${xQWvfKaS(?Wg?%$)ZWA=LpJ<yH!5@gHWga~S0zn(G+=TNBCW|p^6$+Nyk
zSp2!|ARBO!XBgX{dXXN6F4@X-o7_n`!2+CznW3iiqFgFfvJzL$jM{pM$+uYG`DP=G
zxXWLgv&$)NP!x`QVTqorgGjlsH+a2Sjn?1i<N1;S=r!I08zvZ0sC@|=*~a^rLv!VA
z2m6y^zeHTMZWL*b4HPvtu2?so=k@0;W`z|FRK0-vZuuV-9{7N>^S^`S6KA%3T0o)y
z`LXQ#FGQ9#gYUN9sbfz{m}yb77{DGf=Y?m5oZWlb6SE;`6TOZ~cwemI*ckEAZLW2k
zh~gQ`b3*8>kxVi?Q9NHDCnZ?1hNMW6_2pWf%82)7no;cZdFax}v!xz}EOBBK2FxAH
zWUD5CtU!+p@9ClAS_7(A&S6*frD36zd+U!Bfa>~0xV2|0NiOwqx9Zjn3np|Id`{T0
zOpAvszA%E^!o4Zxtw7DA9jNHI1C=cN0agzEP}jeOg&Qm)yLbHOc&AZQj1rw6y%J^5
zwPJzMAz^Pb_fMZ+L|*^B1oO0YDDlpf+uI!FbEXZ2$39{%#!1u}uVS5S5gorV83Ssi
zn0{s*IsdK$^HDhv{)7A6*6$Zvr$@5-y+@(Watvv@oRUkL)74&cuX3)38_QQOAl*be
z_C_{@iYhO$a>#}D-}nsc<$)R;Cl)l15Gq{yLPO~VFb+Eh4Mopcd2Avows98Tm!GI5
zvIU$aGkLehL9Ts2V<r~9m>oZj>E;X&;o?Hd;`d$YkaY|Kr%>#T{$P5wnRl4?CezMh
zkksb77f9ZTPkI<*v*jmfam<9Wy_sT)%|_NLIV`sS!nq{YgGu-4xS$$6P+-qfFf!AZ
znso*6TH=5seR`3?zYA3y9R}V5JV<lmCxU*LL3cisDZL$8_S^;Ru#qQ~gy%7*gyAIJ
zy%(y|R-y8k1GKG+M*EPi<kQ=mAZIK|hxB7vyo*4#Igm+QPRVUv{6tpmo~SD1OCwhS
zi+4YR%DX0{tVx3JbC>Z>T>~ui=eqttu01#Y05)?!ON_V#<$<LvB%=rBoaC8Nc*m62
zB-na)5gj-df=#D3F-vhFN}7!>n?7x22D`q2Q~4rH%X<Xbzw=zdvnFt?KF;r9eJEm0
z8b%J)LvyZ6xeXVf;dBya?wN=#6ZJ7~M>$At_EYP|aUWkSXa2sP4d&f<LWkjHrr625
z76Q!B@u!CnX%<a!e*AxQ%!V2Mo(aZ9QZ!xuv-oD&5WF%V8MR@<nE8i&tXucNRMF}U
zBYDSGMe8`Gvlt+lrr#CLFImJHIR0GA9fkpaCz0MGL#${~LZ*sy&>kg#cSAaTNgju~
z-@5QVp<m(kb7ML%Wg#i@$_4cdeN@eIW42@5Deuf27XS4!v;W1B^2CYE`E!A2$osae
zf0L2q+sVrXvzwq}65j_0TgX>cJCkwS9MbO91I7ErqF<{o&kzo!j9(^mpVutD?@0$k
z&l2We+n<bQet_&XpTx4cO`LK07VLWW#Z_R=KZoy_B6NqCmRHNZ)Z36tO*l&LaIb23
zZ&b|cAu2u(5EMO6ve>^}Sf{=toqF1xDt`UIG#iT4s)xp$14x4Fk!Z>~(x3MJGZW*d
zc0ubAK3so2Bai(gKwh&sMA*2JoBkkFxw#1kW}8w?mo~7Lt;0O7tHI=8ipv>D>c$|<
zT6vjuPMgO2^7#C4=!M|8_8|*T{TIG3;r`v7^XS$?{`^~J$-KuHP}Jiva*J_d<+hw#
zy}5<8{1=QBBfFD<lZxpkc!8O>H5wk#F|S=f`6~yLD#u@K*4vcA9F~!!%TRgkp;7!f
zFpR?Ac=FsR=S7#l0L`lav>t3u8cma6KhzbzS9+6TgCja7#_=6aH_G365YpC!qmMt&
zM=f!qgb{kw_NhN6?Hf)KcE}wZM`NB#A5`__9+|z1N%vPN%MVPVhWC8OwxAQNsuQWu
z(;TDpUowT+8bSK0FH_1VlFzyW?7N~b8gCd!Ryc~xbDuEFh2bbW?T(UvMuEi0k;Thw
zF#P9iu#K#P+L$h=eSLz(=Eq>ZB$JJm_Thb8Q6O<%DC$lKAPFp2mrOaqx#)4~ygl5*
z_k94Xo7)4`QDLMFdC0b}=|vl36e#<!0df@8P(RKI6StKLnkg=1e)<vjA+J!^{xqEO
zPV-KhKh0?SnHi)yutn`ya)Tw0;N9mpZ-7&Ylp5~5WX_ov#X6Wtp5ffD(ELq^a@C_`
z$r$SFavHo_rl3zc?;@!&rteqgP-{Q_a|#EGUVn}yL*hD0Tq&4k`O=d?GRz#%mwoK+
zKmjpbDCmG3!C(q?{BFu;ffA;7Iu)bt2a&}ue(3+$nyh8>(6G=GtKGhVSx_MPKU_$$
zVJ<v#E0t@uSg<;m#Z<53T^%OQWZUYF<~m<&k@hBMpB`Ymt_7r=i(qK@8!H<h3HNxu
zqq4w}-gK<TAg;eP{jn9U{pE~~(=USIRBx0y?GW6KIiW|oJte#Lr2Mw!_+&rdvqmif
z*w3|bZ8W^>8;Jn}l9{4vAQTI{zd750QojsCqryelve}&^Ci<ci&BFqta#rkF!y1GH
zipek{Svi8l)>)lhdXTmLagujyR*91FDS}cpS}3#eLY?AIrb_X}dn5RJ)nqayT?i-p
zE~_ByoFz%eO1bAtCT73y2hFb2vFcL<dK~MG@#Bxc>wey-vCUO$23(YTg*}74|3;I+
z&PMRMu?gB`Gs(V>A=Y|Mpype3%wEZPkGf$vmU~E*^DLR7yD{gv@t({_4PwyOHK^#P
zC)^q^m-{ysbB2!_v+v5Y%Zt7V2AT<2-1-wUmR)1UCj7Z-VJw*bzEAjalYh^DS?crS
zS7N}iN)ar6Ws>!q-3#UqgZh6Y&^XEu&HHX;d)>RE`tKg-AGnaR8$Yn3gFnD#f(4Zx
zFu|DJt4Xu|q-c9ghB}{G!Fhi=NtU0G*Z;AWOqZ@^yJAM7mns~ietQSD-%P0Zz$8q+
zZH8Hm`@zefSVvtV`4paHR>kAc+4i#-?vV{+_2d}2bREJYW6B<t021*xF{-&Yy?Md)
z>5S1lhu?#1OGv8qheXxaaq2BI^iYxTU2e7pdB?UZrHn|RqJEsM?`Ovu4Fg$i=pZy;
zhnUG;1xe$opnmOkW_Z;BTt5E{C0<sT(e*j2ROE3^%SiCi8%Kt9Cm>$4l&+kgfPULI
zLfO$vtZ~#*w6Kn#a{pT_m_GxW#~Wa~3Gdh8EKN<sL!thZC#0V8<Cz8t-8+Ajt#Y1#
z&J>7cYgR+ECIOjr9R^5>nezN<G(2X`oac5hm9dFh`|g7fa3g{p|02O+eS0=i5U``u
zn)f_!f$9n0*~at~Dw}W}<leFTx%WxbXg)DRb8CP_9js(aZz>+aJGXK<pNa3HO>HN_
z;nRJvKs$y>ZvQHHYNw*r&InSU^~V5<U`+Y#E9Xn?WR^DouXx6w*?rCqdS-&@+bqfR
z#AKe2*u#`1$Kd##Wt8o2!##?}FEjm0aQ<Nemx4H-&fcEsZkn?2T@I*1?y)M|&r)Qk
zp@sKTGzKN3%DhBSe4WiR!)xS@Nms$zG61Ddd490_eJGh4k8rYqh5o*rsRllkcOKZx
z%CB=RYtVR6=NTe|_Sp#9t6UeYO(mrwKyYf4@{9<7F3vFG8sa?8rSC`1rwy6u@+e3#
zc+0B)X=MSQ;-R&)QD_Z%E;j7B&+Nmug6jXfcx)Dt^YPz>y8I=W;&za8fMQ9u$wBn%
zx0Z9A=hBr?V^Oy+oU^BXVO7y?q$&DUt>!(6MlI=B`0pGHZyJfdZ+XY>Xcu;VYBFuq
z+K{yC6p|eJtd@M>y8A6VR{WAb2g<)gz=NKoku|IDJ@li>=Y7e?zkn6>Q$aZ`Ajzi>
z@{wztvG}?jbSOuI<HT8*xT6Rp4bz13@^jE))`K)7xsUAU_v(*4uX%6$QK&i_Pu1G)
zq<bV#{nFXYiw#Au=QltH<Dt6y0cg9?pLg9&g5vIP*!M5fDWmiwL|MLqH(Lxza^Wd!
zo;QL*@@JyTzg?~hH((xmGpJ?pBj%j9mW3V*Wu_^cS?(mRE1-&bkK$hPN2!=!!n3T>
zND6&tL<JMhGmoFmFr*?7-TF@@m-#@JC;3@h=tR0>zcNG9&yf9nC8cbL!lu<9m>1Vl
zYdiI+bJQJhOnJZ>xGyC5<Piu6vSX@KOVzT`Ynbhn999CJWZ&q^{CEbaH6;)u%x7Zm
zvxU?epieJ71xy*?L6*1m$R8%6rXgC$b{G$7C3CRm{T}ET9m3rD3}hWgT|hOWQIP4L
z3KhDZoPDgsGMk@4<>?5zOH0J`8{IHtXFeFch@qn90hDn+9qVB_?s{#?JFu;=m}l`b
zjbDK0;l7kRRe{OBo@Qac_MsfJLP+Z-U{jzw?mD!TPOUdV$t=xfQ{7`R?O+xZ4;n=!
zSB6khsVBASAAtN`yIE|z9ZB5zXXN#N>Wn4rOfO4<=6~-1)6k8g|MDP|)bZZ(iD#i?
z**(xKYzLn`i%>JkLXf?hOYPdz%q8P9$d0Rp=8bnam+Y*%CDMdyFX>@H_H!Zo?^vi*
zcw<z@W#(UJfg|twkt*ncy18vQ)2L_gK5i?Xc_Y#G-hJ51=P=W>udH%+GTs^FPUd%c
zpYZVoP<`SrW|gc@m!>74MIUc0R@?!fzpa_ig?vc98G|5l-%t19DAOM(v~6BO2TZ!*
z@h;xf`EV;sIhhF+`^Q52ISubaDrR|I<^nsL1F3oem~>?lZke$LbCXu#n<+{3nrn?^
zPxc4}E-v`UC<&Xq%rK+A5tJ)@;Ew4cs_w)0DYIhnRx#%@UJS*Ke>e+k%?Nho2k%;U
zG@-&23AGy5uz*!tK<48LSuV>s6JoJj!Dx5VO)p`Ir=zLrz)TGO{GN9dSU~9XR_6L@
z6v-Q=pyj&&EV_4$Wxc!0QttDNfc;qr@DC)#mUm2bU<tK9@uT2JPk@)#;iEUBsoH%B
zll)>OW+!hG{T8}n`L0gzbzDu?CdN?l#dfBQoPY)^lhAt>@Az9V3p2AgPq*m;g!k%;
zMmJN*D(L~3mgm7n{(jACv*r1CQEqg82G*Q#AXU^S^?{9PX!1ZwDMtHQjph~jBs>R=
zQWOf_YnkFO6KwD3VcY;cGMK!a^E)1kV}(en>oyy`brzVtb}XydX~cAQ=ZXsT4l(&w
zE-MOq&bB|9gIQ`*3@=_yjcdkX)0r(`Z^D>K;2iY$Z3N1eWr=yYJ6QIu)zGxO1F~nd
z32FJIOcywrtYRlo8sFVyoSKU<e(7jhRl;%}wXyP#UC`NYHSM(t#Fn3rG3AX&(dTsr
zbk2JQ#fvX+Z`J~;_IU_yZdTCkf86W(%M~&F4bNIsfT&*KLw+aDGuv^KF}E`Uy_(WV
zej}Z(?dCe3cPOQu+yQ0ML{!FRLaTWK#NOmxX&Gl&u9`Ee?EP44ugTyat4EEOCy*|B
zj8HVf7iEL5L;aY2@Fm3^tCsj;(al?&fy}j^MsM2kWGp7u*?>WNPn7+8MmT=3A4<yB
zUN)_%1=D?-Sz+^Rs+}+oB^L|a6C-26xQi2ci8>~Weg-8Lzp!gjj$DWF6;1D&GELG~
zwYHB&uvb<yRq`Uye9C@E{c{eLwg1KHAJ1m>j=^;4jD!;2nq%%bDe7cL#Q@h1!QAH{
z+-YBn2O`IiOV2u1y&|01DMK*x!y+>Dn84yaTA9!Ksg!CHNqJw~Fx`%`E`$8Z{(L0f
za^RWUi+?f6sdTY^>wl0c;dd3QbD{yy5avFeOo~tsRvN#M^O77fzO9bs@8)_$d?y(6
z<QV~7YK3fGFtd-J&Cc_Cc%JuM3YIydDpmn@mX2uW=81Bbei-IvO1WdE(VNEcsCeAS
zl0VPJy5D7_JH1qly?u;j@6~4pM;x%mZ!}ezSaWY4zfbJYL&=%p;Mte^1U8&!Rdb!N
ziZkCU|Jn%N+#^=n*9lwt+G4ixSN3|vL@FJ^wNo_4?6h<UEnR?lgFLY&hR-}TH!FNM
zEG5H}%UPQHN!HPI1v6E@V2+ooKv!hP%KA1it7SZg`k^nn%;fp1^tEK${~YKpM6v&$
zRk|JPj-4SzAZv*rSxcqpb*GixG7lp20M0OOy(#)Q{ownd-x!QJ4F&&IK<0(@7^Yl?
zxliYyg-nKup&Q{|d4KNVbf;$ST{OpiZ2R_UX#edll%GBU29w5M)$!g~ZnY267y6Ur
zXBoOh7~{wf-KnuYfiiQsF5GHI<<m--!l02=aW;_Y`c`4pG!yjPkW3}6GpLO7BMW+6
z6TOaZVtLQ|vhuXv<ds9reuWKvT*i5$mG4;Aey)uVo56yM{s4oiZs^PJSA|z&G1Fie
zKv%96anEnOgCmvATg10l|6}179x@m62(+FUhNZ#)o-^c{T-0TuY|WpbXE&I#_Rm9O
z>0<1x^2PeM22i1LV;b(OZ7WSc$DC$J{KW5Dy#M1`ND}Hgw~LDPe2F@96NHucV&czJ
zFeCaKcqtRf^~^|Y>k)>TpSOXsLm?C=1YqTsS5VgXFK|1Xh^-q`@P(FB?OWbU7h1{!
z&htEHWh8EVXg~!$hGE3Ufn--W7|TD+MoZfyEFYf*LA@lT8L&$%n0=KspEg9@>$eav
z_Fq9#d`vJMw2w7M4o20tlY-`g1xA|kncd?xf3|NE>(l3BqSTxcKiE*`6GP6lSFz@U
zt1<NXAn+NQL~(8|*b!gI%x8?C6rN4rwJnYJf?Z=JJnL3*_aCTjnTCoD3d~v36T^3N
zUa8)7rU>^1)%;^Z{)TF>%(Fu;-tFNtrYkvz9AdFSj?gfq9!g!g*D-wnXnW{$kHlY~
zs|Xj%E_@IqlfOdj`qOabn+G1wwL|TNo)q_S4B0GAq4+2K9(9X*smISp8E5b2%us>e
z5?3^z`U(nK{e`_|gK4j)HKtS{745nS0i*O;;=)zXEdGym{MZVbbB9>Ry)hX6IUnpN
zrlLu56jt&%Gi6i_+}pkrq=$LW^>Uuy`Sqea0ekY9aUFWb_aUV>pTTDBzie*%JIhEt
z&FU8$^4Z2oP!wEXnH#M5S!hY^KNe%bvJ%nerYS}}?S-~OkATrh1x5XPi#0DX#lli&
z%Ch?mQBj=xq!@^*vsSqAKNAdITmuHXQz)f$3fTAMtf4W3QF^62=z^nJ>7uS!)inhj
zhuY$$m)x_^HlIaY;yFGmA1d1@1-<Gh(#EY~KE`>>rTbZ^a<st2&-I`i7z4VEi$tr~
zW_Is(5L%vJLizUNspY{tNJw6Z)iakP`%Qwbi>8rPTnB5fH=~N1-I%}0IMhC_fZ&0b
zA;l*Ui*2Tp5;cP2k0Q`^<9XSAUj%Ig@BaKclbQOx63#E=y>dVC=gq}0V%bp@iw#{0
zyGD#9mm%ZH%K@luk0Vz9lf#sjUm<k97CP$O;LG~eq}tiR=M&B}R=J`^<AJJ)d!b^$
zZ>%_V1)o1GP`aD5f6pjL^M1F`^u&%LuLe?6@^bDia3{y=O^_Y<6GXLTflKrz*7Qde
ztNoAr_hK?xQY-I_HF_?Z7ssO7r7Pyf4Z^hk6UgnT2pMnEQE&GIwARcd-Q%I6>O#2?
zACX2H&K_`VvBZ|Ev$5(?FnW#Pp2_a>NZS7dQ(C!-iLV?m;KMet;ptV_n=%z$SEo_U
z;>WDDp8;6)e!^VLo<QEqc2=7*nVOEZF|Sb(sN=bTQ~y|z*Mq546!nsOOV(3Kz(dds
zb|jZS{(<6ze<3aBD;PVi#<&7IGR)i0G{=5{%(()}?!E=rn$g(oaT6Mn{$-47dFT6d
zquBeGxNk;Zo}0(>e)7w5zr1!<Ff4(>Bl970?Re~%7y-6veK2RahIM<&{aHF2A*G@k
z^5$|@{eOv=b4;L&aGpu?FXQvr8qmHLpc~IV7_Ld=xq|gj({D4&8n=mE86?4xXNFSz
z)pOvt%?X<%Z^6?_55odoNjs-ZFxDAi^`*5aJ^d^1VU1#TokOX;_Az|S0IZv|3>!}f
z*jC3o|N4(+vOgrEp~ZNt;2c2K_!MgAji#utkC~732vfYe0+tpF(0Dn|hko44WUosE
z<)pP3(=8ou@z0)HdIu|8ewXQf48qFVDxRnP2b?yCa&Dg~Rz7J5tIkJEb<0hx+sAj;
zQ=K8<VG4fVI+}KESdG$OHbB7j8Q6S~`zE?Lz#H3f)OLXHPPf{USIi-nxbwDX-1I*t
z@d<GEOPo*U-`>LayVI%Q>SK`oZpB(SlV01f66-F{#EwI?pbdQ?77t9NQmH3?+|~u#
zCvoQ3$Zk;f<b&9@)f7LTwjrxIKOl9`AhaHnf(pTymHuPSf3F2hQ+Zk~3HZpq|1iY1
zfhL%r(aDTn45E4*1}$~7DJ$pz^BppgYx_y^kPYS}^Gjrja}%ic53VtId=eT99Z0hy
zRXi+<!?c_v44tcwhduat66gvQul?wf=th}4IdiOIG|I|{p!1MYQR3Px|6auVAZPR;
z`;oh#X7O9d^7w~k{cj8wS9Hb3^FX$HOQC4aMW(&{r`Y*M3)<RsoLe%>-Tz!T8W?<F
zt*ci;RLSokJ*0uqPt#EK*H}SW%zc^NE(*<ix3VuiEvVh-42zk@dCAlS&{V>8W}c~9
zw;S^F+n{NZ2`c|xiA~(Q>2h@}l`lUC*XD44+r%r-9BD+|CQKvw-xINX4d>5hg`>`O
zvgmtQfmJu-u(G9tB?e|f)vu#CKeJFMIFp8XtS_rMF_ILc=b~hz2Ogigj9iNrpnp^<
zW#8qR8ce|INX}$(vmnW_AMz`X*61PdSyreJ?{(pt@`%6H`Qv0Lvw8yooBw9MBaL|<
znm3e;bEm|^QDod@57Vxm!S8s>QTKQ=v+FjQPRR#g`^GX>|6O3F{Ct%<`C%2!#l%G;
zLAyBvB3@ckY=8~Ajh_J7b6&7VL6cEq(<o?{#ZlCs-BG$s$1>mZj<F*@p_At#GQHd?
zs6I2Xvt|SDu__fLw+8c^aK8#O<0w*<z7-71A|W_%5jL4Nv*PB{5c=#0&#!NX*V4Xp
zJarJ3J(z+1e{$Vq)C;H?xfI8mbN#9Ix!Syv^GTL9vx<ZONZ7lMq@SI@dEQH=oqinr
zr-!2I)k0V`+nyjY5&d|ERo5^DTwXb1l<F=E?H<l5mz-iAef;Q}z6&;GB#_%^&c%DC
zz<|tSf?{?gMYx;Mmi~we<8V<IYA326uO!2Xvq2(Z?#@F#vetEbgiBq+c%R}9R+}*t
zopdv(;LLmGvBMct$MwK10sT<A*B!o`kz>w|aeUVLhsi2}ain)QRF3%u&EI!1t>Iao
z%Lqehej+rac7lEXpV>!o5(U+SVAS2Aq=`N%NCrI?Wpje4`d$e5fF-#_1KRIQhGHM?
zkuI4?tp%gNGkGCurt&OK=MTYn|3)T@zXG+};z{w;6UHWUZLsHj#^!usdDYfbx$g@L
zn=}<wiD$*8FdNd2%mB^3q4;v$d~7Z}#WQ?Avo}|N!rG-PQS~WGzADcXFU|AAN1m&(
z%~gp8ukJIqh%^@CoJzKQSLR(VP%76teLnfKfTwFAaf%6qd+cJtj(zF%b>7{6)QamY
zreKV-DXBq0xAyi#W1kk5ey%4;$6KS<&cmz>Tg8x$?&SPo5sZzoLCcA$q?(u^s1n^N
z_QDG2`0yEYA!~%n2T!0gd@T8`cEC$PLoxQZT$al{=M@=Q;P`Si=P5h@Z7<Gg{gR>1
z{x*?S8(5<D{RiRY62O<1aTI!CH&bn0t9Cj#19PqwL+SDqp6R^mF3YbH3mon-MbAB=
z>Pejtb9y?q9=azME?9}>hJk4RlPfyUJ`0TJSUfifq&V|T9e%KgU1{%6<#*12xi!yK
zL|1^Wj}5kbDq<hcugA=`-XvXlAL<{TWrz26Bd<Lvq}`VRp7nNAp!=P9C=JQ^zzcTm
z`*^CjGmhLiGo%CM;2GAB>UXRoyZdv<@ybh(99LXUzn_LKPq=rw$25%a<C*ju+XX!~
z5aV6=zI#~`g+yCZ`O1}~C{~jFqR%WnGK6d14~5P_Dprw}0Btqq1h1}vQ`lNESk=IE
zD?f=9FJ#n`F_&Vu-(cEan<3oD2`$3sQ`5O*Og@*-T6P>}Peu(VN1NXn@N8IhlZr+7
zBNdO4V#closI3tNtAm^oCOrbipB{pNugEeRav^StImUWDf`TX3P~Ph|&P)=(k#qku
zArh2N_OJl;EUt}AgxGJ_*x~!;l=A*pFdj7tE4jD7@Griz=X;%$X*<~K8A+s$<Lo{A
zRwha4#&zB_(Qw9m&MD=-l@y(*blodv*IZ$xi$|lX;I_Q5h%*X%UWGR^c#eIJ5)!3%
zSOwSFWFN!8tiL^0uT21H%5qd3=CjJJIheQ;z~q&H`RlhqsJ}k6#*PEc$^@PfYXA^D
zC^KXlTI5BOa+@6~?z;)WSAJ#L@2dsv^d=#^|354$otXAqJ9xR-VMWqZ*0fDP?<1Tc
zeR=_=S@65vq6l(v$Ys9voV(Niu&6!SDXO|Rsb!yfh_Mf%N%3csSUi3d=;kd0$@L8^
zqjwK1)?13*Oy}^KJ&cM64aDY43arU>qr_(J#Tm(8>uIaWn`ahePltl!N-2DwJdT`F
zoM@GylyXDU$hvDkOmx$Vif@<Ys#hMYLGzGF4VSZ)2px+ZHwem_7m(ZO6s+?$#{Bv(
zpz{b9#=01hQk1g9Rp*43!9Q5H=&tD9W=&4k{FyuXCn0vR57S%WgVOm9SX~&;DpL+a
zTGl$MY?+JgbKXJXpaIOVX#{Fpwy^3o%gNy4cc{A`MZrB+qVd*p2sq05ypAQ{ROyZd
zhWX6GiTiXk{CN?`wVhG6)Uq=M6OAr|Buy&MEBB(PtoLAOyM`&O>IAC+GKyVg1li5Y
zd9LD!*xJtrVol|c^G_DU@LhxD6c>!zYJ?sC>Z93)cx1{fsHz))iQ`jQc|U&&51EQh
z-I|$EP9M5<dm#oiI?~wi1oSQ&g{EI0h);@5D6OXpR$OXg@p{~U<d6YRQUa;r-<?o)
z&=OSY<??3lI11^unmX*c2O(UJ$)_@y;-U!^-PjFLMb}tUw;P<FxK51N{S#KyZejlS
zlX<TE%H@c2_N4fzmHTaAkhgLInMURaikElA!sL-;b)k`!r>;hO?sIg0F^W3uqQU9M
zRGwS-19I;8V=JHERb5>9|MvuxR<GpymT)oWNiMXuUlbB$!Kez%Rp<VX`xc%8X*QjP
zj=TEkdt9GN#(#$^r>9fa{4P|#(wTg=@=lt`HA3r_5Z0cS1MR*SA)wzd7UIdzp{Faz
z*`G5j^S`s$K01&D$Eq*sg;By&FWPG*!+@K;I9Fj7-TR{tTI<}=Etq?jyK%nd1uv5S
z4p??{0(4AKvuf2-7R%49g7qR~KfeNno4e3~#d@gxty$=d<DC1SQ_xM>9qn%EV~0sI
zb6aQ2niu9ndwe7Jtwr)&^G|rIE6=}-@I&3Sy`W5a2MU`=WN#K=*^GB$S#JZT_lpNL
zUgi0;s$3>9T7vfupJv*eQIIF&xx?eZ&{8sm@7SbRSihKLLwPUa^ow%y^_@%yqrmVk
z*X>UGLuX%C@;ky<^|ENlxbrJ3Ur2nPR3sF9w}R@*K9HxJLjh~HfYN`iaNLM@cIIlp
zuou@Ad)brZx2{zCTAzw*>OnjAgwWZ;8Dmy(R{E*&qGVbU@Y9XS?miWDuHINa_A_Ip
zPoQFI2B_zC$IutE$y)A##(%tnY_H|uRcVc@+BrL{hZ7xlUrqr{TLkYj!?9(RGkOm5
zMF{@PT<3Ey=?6D<o|3Ro?8`NGJFMwF6u+DGA{J>&vHjk|s=*SvGkh|JTTUmF=ouuv
zWy?K{6=Fy3Q)nz5Mn1t7EIg6(Af7p~&ek8yIs6`z%-eri)n6aWBof{c;EOuT?l5x2
zc<T1|8Z_&-n)LdPr}9_VS@zVcqH>W1pEWu`V{41jrsXWV=9;MM+yoIGYw%-2PgMQp
zE%<AelTZ9yD8F=+g>nXlE@~!Iz4@P9lRiz*&G|#L8vP8utes0)7x*r=v_JEiGyn=>
zeg)H#4)II%P)c;D5W?n+!&aZ3Y-}g*cDf*Al1)#+O=8Ku2h2m;Ev~q6dmoDVVnc4_
z`eYDqPazv7k<Z(|S-|HZ)co!XQ~xZZJ7;A0@_H1{ua9DJw})`1=3J(Weg&ZuTR8uT
zvrb#Dv)Tjw$bOYOQ*HZK?*Ggho7?(8Wv@)g`}b$izKf-jla1hCZ-n<Ui<v(^L*$C}
z6gqhpvcG3i;RRz%4NW5dMVz}oc{rX*m`=?r!^r>30%~a850%m;Xo{-gU%O&l@O0Gt
zSPqKaAf#z^V9U9crt*hE&;h=8crSy3+h4@|*JmNSDGfCJzKN2MNA4{FO0Gc!gQCki
z!SMe7z%BAHOL${Ib(NO%B#-C2`}D#zzhY3vnoypaa}g?>Q578~-@ac!=owEIZ+X6Z
z+gBm;zsaB;J{MIAM|l}s6UrhUK*cr#Oj~;l3LbG^@8tm0-tJ)1e;2XHV}WSP>7KsR
zXQOgvolxFd0KDgr71*8^f=yb$;$}Rl^kPNb(aES7J5dO4^T0ceLn!vYaj>el1(q%1
zjHypCSRwz)G%iQf=7pD7$@K$Fd8(M1F4TynZOK%2kngL9^IgW?JXY1V3?tSXkTSnN
zMDWf8^AC5K^vG3K=6{a)m=mkItd9+kw?oH;P}b4^7PK22g{(7%RJLm+YUUhg#s2o#
z`iD8*N^qjITe%Rsm!CtN*?;RzER|>d0i8bWEaj>%#eExrg*X(o)pFDhSxVVry<qOj
z^O8TgL-@};yBuD@nM>xR@QDz#2~ym}S=pC-qsYf$C{tu@7pjX>sdkYUF~h-_)%P)I
z+?RoFT`zEsz79^K<&;vxxpG;b-7Su+A~w?jJ5%`n-)bz|lE?MOkm)RQ*I2$gT!N){
zS5WN987$F^>#1hBAh{h%rVnbxP|ir%wX!!F4{$)Xbu0Jk?Pm>^m5}}L7|TgBL+yqF
zv0ncm7-aHJ&QI3h(<hQFb%Cgq9tZQb5A0U02boN?rY)sh6VEFXU3$t$HbNAbGw%fH
z<c$61HkP}kE0*;Th2$V>JTK$-mj&}sC(RY~(uPxZF89mFi(&&i!s?A2(PxeXbpbqs
zSh5rJcn7jf-6~3&v|`eUc#JihNT!m-lt089uWbq^x48#d%BOdr{Ocgt%$Y-1!g;RS
z<RSCX-^t34#$$HaUa|0iA?KjkP}B8lC_AwL^Z(fklFR^<C-Sp>%1CUmI|ZSWMxxXq
zmF<%CCe@nj>XA1W^L(ELs`^x`ADQ$g-Hb6TXX;%Rljnf88t&WK@fJRA7)nt~BT3Wz
zlbE>4pU(51(7d1Yv3mCmGWcjqr#}1O9S>X7?HPbBvyJf-?+}6U4!obW5LAk5qVYTf
zaufc8)~QdqAH^ITo7R!y!F+zFOrgVZJy8Rv)a^cXpqOz+R22*tDmU)}!{nz-VUq~g
zc<$_V)Ex49_W+9Q=aOnexR5z<0rNl3vzjg3=QaE{Fx;~S>I2fjs{Rp}{^ALpd|zR^
z>?>4Xn}nr~t1xKrn*XEd{NrLwzc)UTB*`EoVUVOvl2G%UlR=UsMUo`5X%mu>BuQpz
z2`#a)En(Quhc+!q^_)Z6me3|Pgw}?T{0gxleDCkS{_tXEp8I~zxvuMdp*nO9Z=92e
zDQhNiPHqt9ypCh1Q_dJHi#~^09$YYOD>Vc@C+$jaIsIK5gzj@^!>bKa*r^C~ZgmvC
zbtRd%_2iqM#_^tkk*t0GH;mB@<|88)a++;ADOT1}_49PvGFZo@C;pQ3!7gfj-hZHQ
z%$F_y8pV41`)b=XBTgTBN(!7VvEAW#F3qu&6)jglowStw1$)?e=+9DX(>$=xOF)(I
zXlNgbd`H<t@j1t$P4zIW{oF#P_Z4ENJqW7r?1a*nqBn1?=oX&$KRLRu2RDz}K}tF%
z2ZB4V-x$RfH#@V5{}MFcEzZV!y;1*K_<lZLP+RSEqQKWeLvEQ)>))B7#pcymbbdPJ
z3(u3XXawr&Y*=&A1K*sRgRzF5+^Cs}jb$A$wOn|SUZzvMW4WC9GMv8r6O5ikNtj^M
zlOjbP&iP6cl~)LUV4s~*@P`M~`27E~!^0_Udl)w-T%uCr%R)1B#r(&ua;A?Ye3x}{
z@Gtj3pSDU;tbb5k|1lB_iXh20=5NSd@Cy_?x(b<TMxsAm4{6>H!Qt6jJS+SeZRhR?
zU%WY{9L^y4-2?5@w}O7FTGIcq2;*Wef=9R=wr;oKnt#mEy3G<ZO$Nc6?!vQrXT7ZQ
z5iFq~rMpw2$cR1b%QqK}!~Km;-1d4VTP6HXwcY?)p|RK=|CO9u7eM|XZ$7zq6?cE-
zz~0ww*}r%V={p!G;e$AbH|?kFGpXcvz!jrMnKJlpgoKR~$k?rsg0JZy^j;@~{9nn<
z_&II#6`9H#Vd8lbGnujInI$V$MF?NmweghG!xp^KUC_HnFdBZ_Eg1`KcX5yds;aA`
z^evsxTQ?V?rp)JpVfJjgDVDr<Z6Y^)v|xgY-T~KPXnj?~kNfxKmZ*Bt)|+v*%}~tk
z6@%%wkaaB!$vNt|96d*grdLHqHL#!i#}V^Ij?9@$vzAH;wMzknhbQ&@7O3kiiQbzV
zQ0n*_<-RyWk#l>a@3om&X;VzDXKNtC*q+~P5xMOM(H&JBNjDEp!c5&6ktr0tF)Cwj
zUc6P(n2EbI?pq!cZo~Q^W98>h>dEa$JlhXkOqy#BkbiO#WPU#mtozMDrD)kr+HS?U
zD@S3@H`d&m5l`jLe?nOwC2KcqlG|3KLXy3KN;NGM7;1&PzgJ^<)kA2uXvet?o?MOj
zlJ-;+`5gKNLsoU?z=<6&`=kTAt-L_*(}gE{tsQC$>mhFSVWH3N6|=DTd(A}mpZO8-
zIiyRDg4Gq}HU(q<bwDWVhhyF^7I)-ZsF*FW_tV`}S+R_pJtjc;t6!nvej(JhOl0T(
zIfGr1;Kd|=FSRUq4jUb&pz8Y}<lzv&&#&wPKkJ^HuTO^}o&iCh!!Y~laZ)ta%FicP
zkpIR07;<<KD;AeaiedZZs&&1w)G}F$v{;GWrkkj){0;^7I}6Px{5ighHRs$E9Dv62
zCCw)$g2UA{SfiYT_IJLcoblp!c;-hbc=0|`m08O>SE$)y!5~g>mRK?9kXm2UU7Twk
z9QMYU6Z}-<9Ndof^w9FQZS62j=_T$3Pxi6whHX2ou;{1~qizRtz0g$jOL9tLSC3-f
zx1Cs@&<d&2K>FBc8QSz-$IicobJ{W+F8ap|(^s6OxI#6yE?Ui&(`IA7+eP{0#pN7Z
z6u>v##$)xWBI$^;=rj_)3!RY?noRA`?Zxk;=pa13_X1cUX411Cy>a93UAd^ZoKh@>
z&oagotvV+OU*>tKe#bVdFq(qZkMgCUV{WKucTvt>BVqFJ<ye+$hpHjApt;%sA`{1B
z?bv_eLVGi;q*svoUhML(xN@TC@9`gC2X|sEx$dz6bX%h6{mNndrooT(d$lEs{q10P
z_E0SAGmj?-U0WF-`aqkaA<lXiBpk6oFS8Y_o$Ue*zbf!#StoWf?v70jb6A^ym2xfC
zL-~eEI(BgtI~KkNMM(RSw7vZ}qop^tF7AM?rs7_=*-bN^s##w$QLw=eOX;ooQgFY`
zl)b$HzPLMLt8pmDJ9NRD?}bMD*%ISU#p3Rlv)H6siDkRHV^K{l)ckFP%Ku2p{(F+}
zY&b)--7|>(Byx=M63m+EjoRXyux(L1uJ;za_R=e)_^+RAn61VbzbJHD_!%-q7Fbay
zyu~SHly>C}6rB^E`1lX9|8T)M(I#<GVjs>ZpM|~y#m;N=KzKPO5}U=opgQA@{N{7C
z=+l}ar@i_IQX4y=X|Om0uBTDN{XI~$=3CNj-6zKdh>pUZtEpr{G>0bjXa9pDYu3(@
za{ubbs^hyPv)pNzUtKJ#4mL^Y<1(d;A)cZG&70iB*=5Lk4PS&ON$Dka6uckGf4l)(
zYI>oj{X0sU(@f@X&O&z44brR3OR9a`p|sxyxIQ%obpr>99oh*nG`fK96-$|4w?g4-
z6HXl%Me6WQxF={5)^E!Ix5YlFv5G;>?z2)|yAo*b8!p%%ptkxVn41shkv4B49O*1r
z*%Pv83yAzqgJ4X2`wv+r_UFoRC9p*)_Mu51C_h&X5ep7dsqYiDs_1934p@Wt-gH6L
z^9K+i@*K5y1ZUpY6}9o97`S0Q*hu5C?1K~585*I+`x1q^uHfdE{{xC&58?J=Zd-dw
z(%t_c>C!u4i^*$<jSza+qGh0pkLK1LN1;C2hh5_5qhoso=CuvQ(8yJs8z}a9^F~YC
z=0~%=a;@mn6RhIY@sxVN4njY^B_FeByt7vPhJKig75~iyEzO`(c?MUW^+ipXIfTuR
zL)C}r*nDEMm_@C{Z|z6&ttkeb-xaCw>^x3?suVpGduT%c9$e>DNX^@3P?^UxjD+^+
z&DNk;@mY?25YGD9zm$ZIX3+a~R~LDUocg~iI&^Ig#^@7p(3OR#8dxmr3k7Svms;#+
zM6U6V{w4JzTI8bXXUKQdZxj*Zjc*>!$AUDGp?|uNO8Y*REzb$I+O|cUyDA$>8(pLd
zdkge+IRQ3)B5!ockyMF;B~^k2hn*cJm@F(cG+%|AUliDQPdxi}#;g>qf*G3L+_riF
z2Ukdx`K>swYsL9}^fBlkCUQglAdDEchH1cgu#8VYr(xar&JU}(BszqhPp8Tzb!)lQ
zFOO6y;b5G$n)Q$3q{qWMqfOrgygbt%Yl}CLKYgXfHLJ;4Xl`TN!_iQq!NNk}#p`jo
z#Cu$4F8R+IqsC06#u{hP=ZC7lSh@>t{(ZIb{7Uk6zC;-l_k&{aU+&JXLfbnzpIVoC
zlkV6F$$8#hIe+*N(aR!cr>84Pb$O=r#Yus6Q=6!lyb2F}JC0SiM$2X+{IRk6Ath9a
z8S+`3?ABiJ_@)gY#p!2|`mq3VFXlnyx*oXm>0r@;6d?9*d%$dq#5T>V&`CcS^<A&3
zLyUTH{nl%cmH!bkcm71?8`HpkWh^wloyftdO;qjKEE~@o&J__qlHz11{<2+sC!TQ_
zeX$SvFKd?F%7<}N_&1z*#RPrtw&SF;I<z<5KsC3HfT^iFD*NdmY|COc8Z`y8t`|e=
z7O^|M{t4dH3$En&OH$5+Xb7Df%c{vCWLkECROeeIpKI;#!YaYpjfjI9n8}$xi~OzK
zJSd&^igXDxC?(dF?JNI=JUb&kB<`5BO{>`;4a4B_U%>ys|71;i7q%X;fRlFpK`ANI
z@o~BrdkyM`B||5pc1I+Bu@U{I?d;jnYB)yz7=fydBjvX9Zz*^{5xh*9hEYDJ1<&{`
zB!v5O{r#=rePI`siG2^5pQX#qap>vh$@fl-#_LyI+4JfcOcUPUBfSM1^v5#Lwl|WW
zFUbJSi()YJ91M5FJY-YQ6%zxz*idu^^f?#Qh7P0f@_o^NEzM`sswAw+abn}^R$TG*
zF6q`T<#KEj{K@M=<Lo7Qj2yr*GuGn0Q!38rHWs5yRP3C&2*<P^%6aD#u(ekouGRNr
zjeb)}bE@#MlndQm@IM-l3BT!vS-frjVysW;AT=A!ApgWX+3IUDx{R63wfrZ&Oz~vR
z+zSx(v)Jvc*9b33Kgb_y%JBj1FfUnyiiS*Bys8shx7oAy=}3;Al1<Is+Q4ejT=wi1
z#yKArb7ff?G<1Ld|2(+jhS~e+v4s<Vyz41Ejs+sy<cQsEgSb9OCD%oZ4AyV?L_5X#
z;=2jp$$wB=BD9f|UrAxIM1rji@bX+NdblpcgnxwA`(ihgUP-6!ciq|R``)}~&;W5h
z|19|jSCH<KU?;_OqE-(LXWKuZ7$Yx!P-Vg1GYM*J=X2nX3t2PB8y?u(Vo*;r)}@KQ
zP;YZ=>12h*eaayD^K6bNx(iPK+OU&{x#;aEf%@`Gl4`KKY#)7%O5Mg&lUX|)l(_=a
zZ#KzUr8>?Db`aioF^jmYElx|U2IF&G+1n$BR6mDN==ZbH?}IN^mxPdQ<6=(r6I}um
z9)RyQV-8)G#Fp9nNqK7;2bWmUg;mz*wxESfuBv&x_$~Q6*;D+$2{_1pCAuz6qlSuy
zP<x;U)GH^EiO)*WY4MPh|D=o0V+S;5{zolSUxH_Z@WW>PNXidOSrgD+_?#_h%XG2F
zoqY&Y?vbF&7n+ILGERz|jal0Uh(1$uPBkA5$~bGN_-G9#Vs2EprWTpb0(Sdr9@QBi
z2m6y=w6VuBZgKmG6y4vHl%`dPK2jTSoG4fa;qB<cs197Ue+C<O-vm()gRmn08`8ZP
z4B-#IL$8VEJYh(z@PJRoFI5uX%pb%4b6!Zg!_AP{BDnaC796+T3NODFGj8@{in<bt
z51d0-aoYyd{~LyJ(F%0h=geOwXgG7MHJIj@VD;^5VA=Da=-D)8m2H%m-&WC0)0u4Y
z-9oewI#IaG99H;?{M>KZq}cw3S~sSWQr#V#|1t+vk*$=f5PX+YYJ9v`&AA8jMd$V|
zIkk8xS}mC(`io1Ttdrme?h_Ct?*_=Za+S&}&w^pcWU;ILh7x*wOI}`ri?(12rK=ky
zo%#<-x9-lKPj*6e)(vUr&+W0sRCu`W8YD%6G3D+T9-<xHP`TwI+zasMsIwAPq(*W{
zq!}N18P3%cE!n3@<X8?QVD7AaaK|Y@cr35dm|bC*lvhhl14XAm&&Q+>>cZNGI%!Kl
zCx(&1nA^^n?d87Q@XalH^J*w(t(k{0!mmqXBhWCjCp;hgm69TyIC}jP@);oVl>;8q
z*?GcuRN~D$>jcl}>m_KY?8qi&1Nr8M!E7>P2I@Qt<@EFU@|FW@x!UeM>|PiwIBZTB
z@#r5Y9IzCdJ8hKh|CkAyXRY$Fa}zk_Q4{4%pTb-A3#R|}Rj72jMdrg(DbHMsic|k9
zR`y+r%1cPQ<Ad4U;yy(W4d9CZ1wr23j#yjZPuYtffp3*1SBiI8<%%ohbXA4H2ZKd#
z@<=QVxhiX4Nz`l83XF59hJ@supkL=x@_5?>KAYKto$WN7zh4IL_o8R&s3jPMb`ZTL
z{X|dnH5%k(i`~D9E^6DAnDQu<^f~?2<!Vdek$fuY*2GZ8E28(xPVD{UJ#B0gXZq)#
z$Zcr=*Ipb+dWRQa(jlC$_wd84ZWCB3?x3P))2TYipHk*nvD?DMw6nE8?iPD6B^FR_
z*;K4vVguUn51`!er^xOv!sZWOWW|PWq{=-X;PH<RqDOGQTswR$2M#(ymB;hRc-&|)
z)6XMSu_ulRw8R?gQRw}mFPK*qLPd*2C7;A+x>@kMZk~jsgSV;K@Sjw@|91*KDE5Bd
z5|#cKfC;rPXt5NC_bv$L{b=D8%cuawJE1r9orjlCcgE&-b+U~r9rW+6OTi+ebD^IN
zm;U=wemQ0`?kB+{I_{4X!lKdpSQsZ&`e83W!8?okD(P>TaBEY0SnNFjb;k};>ZB1A
z?B4+tlWs!P0iktA+KVo)E^>ly9_d#uk&@bX;3S`ypkCsQm7C+)F(Z^cKbxa%|IQdV
zJQU3ji|69kE0W^d_2ic+dP@K5O!=boEw980ZMMep1*LfIOT0KJY9ZSUwqVy~XCSiu
z685fk68+PTSRL1yDo#mcrMJMMJI5ezgBfa1#|l5;KeGS%-BPN!n~hFP!R*Q_R4-kS
z{kvR~5^_%pE$*<|O>Yd&aharF`Y*Y8c?yP4f6#thCcFK41HQZrX0I_(Y}sKZ+74H7
zW#8-6I(H~k4V=rGQMXD=$4#Z|-+u##?IW=2U>JM!`;PrL>=pWh4_BTa#)=nya%uN-
zvTd^P|E>`{$vq=UmE|q=#s5fY8<SY^^bj=Pe*(4#D#3rsBy=F*M>stQCrs{yx}_7q
z+bfA|zTHXn_nRcRV<O(!UdT~4=Ggsdf3XjX!^js)I40E=YZpdQLcww>oudNhD|g9i
z%|a}_b6L{Q=plK#8RN#YUTFD^1MU{tVeg7$)Mor9Dcoy|pZ_z2v(Hary~u`ptFBVY
zP0?Yt!VERP-6-ik(uDIj$g;<}UaU+$Am1F`g^fc$QNflLs?z&$w)PwD{=@(OA1}zp
zxx(LAaGN5<PCI9mIr{b<z}0D&C9TuHpgG;06fNV6)q#Ui`zD%9V}=Qx(?PP!6g_lz
zU(@d8otXCSA^!>Eq1OFR$epl~Q(tz)v|^DH8t9FBA6s=qd;w*Lmq1f=GROZnhu{1a
zh*>wn5hjbfc1I8zn$@z$2_y7hf0sgYBhWG6G}Y_dOPUTVz>v^Kx;M;`Z7SapJ@aSP
zplWe9_NUUB=hSM^(d`)LiK$;Z3s%f9s=m=G_~?m}eP#`W&Qx%qn+u;^I|~z9bLi%E
zYqY|63ilCQlCN!$wevc~e09Q@_JRW&nnEt!){36<oD%JuDA{s^=&QST211;^Md#_}
zbmzDhGdqh6dgfqM6{#gdr)aLfGMy9$i;6X!y0hKYHQY=1aJ2!8<?77#l&<S6I+a4f
z>X&n5HRvvs_b}%R{kySO*btFB^~P%dTTtNYj%n+J_E_>!(pU^)Rp=5qed`vguC9?B
z7MWs}@QYTC`T&LzBC9{;1}K!h-E@~;NZXv}vBl63E-4tx2Dce<&Li<o{6hjJn2Bzn
z;X_2O+=Pps*MmN{SV|u?hfF&sW1gxXHl8?0H+%bV)DT}b{be*dEm(u8DPe5>=pFgm
zAEwaH)@)iSe005Y=|P5Is7~)Ec)5|N{C6MtUq4UQgA>ql@MtuuUCQ<^&On@73@R4<
zB`Z<}%O8gZ@{s`HUD|h93Z55-&CiQvwcl9GuL(n|M0Z?YBKma0rwJd-SUGd8f-9E~
zXNzZEteE<m%1T1e^;RN!c4IhpZY0_~|C0tyoW&_w85G*{E1BLlfwU)E>1=@LhEVtC
zh8$=1>@|QDKP;E?Ce7wIy*%;klzv$AV-lB)5ZUz^U05f4tD!wlQsNdz&iic)8_*2e
zf;wX3H?xJmxDjlgbYcDcuaItigPc3xkp-w8J+reYJ#8@RE_B2ApSxg`_bt#=Pi51y
zbII3($X0k6GQRyzc;k*p!PZl$dHy<Xu6iM>0~BoDN$_|)e^$HARDr3B3<=XZ(u9)b
z9IR=A$3aus-`E2#UmwSc;cDo<{X4XMxfDIKthjY#doFDqC};L^B;T3mMSgq+Woty%
zrT7v!KIw}o4Z=StI)DtGi^#m|70`{Sl$1s<C^bC{YmJdICyCwJA5Em~t>T)NCTRKX
zB1HUD3+l#EnC37WyMq?nLSKRE&UVPzHv&EGtU{mWiRdHxmlc9b^8S4|YO6%Pe(XTu
zjcK9!13#11!Dp1-ZWv}i{6x)7e@Hb+^H7oLRjkMy3Gd^)xYtluc5Lg&)vKRMp%r(i
z*T;$Id;L!^O&(6l(|svf%v#Cbk*tmtyP@`7p#IKPIlqq=n4J<H<o$URZ4mk88~?AJ
zt;S<J`f=PJmmzV_cWC238ujhEh&;$(&TyK|xqZ{A*QEfg-nE8Jx=t3nQzuY8bCqfw
z#pmY#3aUl-V_OuW^TG<L&iyNCzE+X;pqd_Jd$VcxMVN24nS6cK7@5<KjehLQ`hTXX
ztB3pm=CdABL*af3u8-p2_v4^!xfNEwWa&=iFfkv;L*3Rb5H&cOuU{L4&TDqaO8ZcX
z3Ox)q?-%mMkVvi%vz21k%@>^zB8PT(pIq@|I93d~3wEQXu=kcC3hWt=&l|78oxJbZ
zvd0=$iSEB+A)SP-X)8KJCJINw0L<yT80ssXz^bMj=nL#hS}=-l-nHh?$%$CIe4)t7
zC!_!1>1Y`C4OO-ZE@NXWIcHphB<CLNrn)Hn_$tX{vl5TVCYboj7Okz#*?;ZNa{k70
z*(%8rQZJsRF)!3yk?#UwPl0Qmrch!3K+KQGf)u}>#J*)GsEZvrc{Jeupatx|YzXKM
zAf~tabAj_avOnIJ(>32=f;NIwuHmwaM^A>nsZd>*2rsvWqQ-PBR|JUX%%>N%_K3or
zuLZ*;_z@`gje~>*VW3N0!KsZaC~8g}m~0OcY%f!)>K@OoS#QYtsvoAV7{&Ih=YiW&
zcZgea9(@1)lbnl=LV87+9NT{gYa>5EZT@ShzB(0VB&@*ve<yN9uLYbL{|@RM%w+4E
zllj7+<yhG$7z5RZAu6{Qn9UOY!wdD4d}X@mUf3rYR*a&Q`C8FSxs!67EWqt2V+j3k
z7ujioSdnA`p1<s%9Q_*jlG7iH_HPlM5F<7m34!YMsgzQ93rh1wu}}9U=z6SxGB;I2
zTDTQ994I2VC^T$b4oRJTIQv&~%<DCsou~Lf>NrzM@DOv%{ux-k;54M|$)UjQ|AE`@
zY0!UI5}W(a<OuRc<G1H%OmSCkF1tyt!G4&N)PobwbqD3#WU9V%K+>$}i!QHbaDC)t
z%DQqAHm(Xmy?L2jaiA}MtewV=n>(<<M{xEH|4Pq2JXxnV<%DIH-1L_Z*N$|8-J<i`
zW|aZTONBo8;|Wp}+!1~3_K*~}i%PFM@f!!k!11S``E5VUOuS7e|DMA7;t#Uxq6G9>
z8G+`86rg7l*yQ6>)Y#3y=!Siy9N&$D^^@8Dmth=#+)DJmiM}V3la#-t1hQ`@2z}r)
zU7kCDy@L)>&TmJ^ZF3*8acvUctHf0+%{f%CC|mFy1@9B=r#0!|9KBgSG{%KJTN3fm
zD8b_Ryn__?-j?X84C?>bNI8>)XH3jQ#^dH=>-v-AUl>58n_?m9@Di-~lfkN8CrBGG
z4}C?CUis)eNbYEl8I!+dTjS4Ay1@X0eiAyut4UJD{H3VlUfk;P3}~JyZ&@k0RNG#G
zcdDLr>s;XaZ3mG%vBZ0+%dtLwq|{P*3^Y<|iO%dJz3k%4ITnl2=9Le}go%vRsKJ7r
zyNbuW^yI)LmYlxj47uD6V@1mV$@TU=v3N*epP}Q}R`l6k_|O}hxv%heh<CpqKg&fE
zhk?D(IMn}?DEV6$!%6p{X#D6lDCR7|^}*e+@=guJWeHEsHyX4L`2uyXg+@6!9IUUK
zWBC83;QBYltl9fcQYgQw{bxFpDrq*i{^7|*5o4$#S9J54og<ybguTyfgiMMQKH%k0
zcU`80Gn2_bdjgz2Ep(;S1MUsi1sk(qB-_(5Fge`=4eMV?QSV<1KGZ8Y_s%u&-_w&c
z;0W25#q<Bb2NgTANL67*fr>6{A?|UbtLsF^;#A0f<H2=H5>cVKb;eM4ACxQhlJU}3
zs(t4ucyRC3Eo-`R1RkZ#R>9v75Z%Y01#{z9GfZu~38q~?Q=_rSoP00xLQB24yKXsJ
z?mq_^Lld~+?`8_S-<=CiHADS;FDmf64Hj(*jJO<&tzElN{I#)MpYt<mM{fkfhF@jl
zqCyH=DY%TqKR~6$UZ}_K<?^;DZg#sM<<05A{%!r}&NI>VXJn4gk4@%GjTLGJ2Fr1$
zOCetPTDs3zf^{J|kUn4=d2f$oL$-<VnJz`|<r+{7F~i{8v1D)W2hD#TmzBdNQ^mJK
zFz?!O4sQ~=a+c`*nzWpoA30%;eGwQ|exllcXR`j}JXD^zDd!LUD%X!2fEUzLd5@#u
zKfamFCSND8v+qz)3@UNIFxr|kwP#?r^$J!--GN#li0NsH`wM3ZPJ5=T>3CY*IPMR)
zXFDD}YtF((3&A^{Kao#z6xS!&$<cA^p|)#Rs{HRd=^xv}z3(ho(=(gWhZjoeBC8(s
zp*L68`~cpbt&lZt2<P?~fOZ=Nmv&4Dn)Y0Rk-1$k&@qp+>wTbpC4t&B2AyvQK+U5%
zYK$C<p7~-YnN>_F2QAQevtX?q^dzm)R;rJDBPTbF7TtPxWsOmsRQ>5A*$4EdMCA;2
z8fPSO%kv~n=uUP1xG_*V<f&}B(2op9ME>!o75reG8|r?M$^O(Q%6(r;&xH@h$=;i_
zHSICUqc?i(^TvjEL1=r$gmo`Iz=dyJahq2l7tMSN>9xOrZef|!S{qNQu;r|{Cj3*s
zFXx0HTfwb5C}%nKN7JP(VvZNA`msWb_Ueey@;{L9N`qMguYz{;30X0}7df2s<J`Qi
zST}AISG`!p8+)3v#ZUb><Z@@s6`J6-(GF<XeHIGVr2!>-h#Xxf`FeK^_Loh#!jJ(u
z^OkY&_-?{~)(dBhm$2}uBi<aN#*adOt!WTkpF{2N=0$gm=;n!;{|<rj$oUL`gV3RH
z6lc`+W5x7Vb-mhGw)i6e6*bR_H242030|^~%;O9cd^|?HtKFxK3%X<N3@2zf<t92R
z(kZ?6FYp$es+iL*Twi-1ZY~q=`SbEAaGcPSzdbCwZJ$ZHMS4m3%Uij&Qz&GMysc_|
z4_P&6pzN<{q^P|%kU1_&@CqZb{C~Ttv}Q6D&Fx4DnmkIfOk~g4e5n016g<V=HCU&j
zhSfcBTiO6N6<L=z`>oJ1(wZyN|D)27&Qw<?v_Ov@SiM<8{&D|FS;IX<<};q(A6U$J
zR|45|`a7x|u$Mr5Z+8-hvuC2%A)nM?rP*nS{woElmmPq}DnD+`=>x7>6ApD~&mn@#
zr+qj=YVLgrjK0%x@cuJY^CSv&-yDVs^E|Mkrj`^Xwd#rsV<2^C2TE>d$%%S@w4ch5
z(z217j?LvUHloMypAafd-a(1p{diAUxOh?=MW4z6uua)bWjls*;2)Qu>bi@_lZMiR
zXW?StxG4I+da>pB4AM(RXtHgQ=-%+;=EB_&z5EO85uVu+X*6cMY^5)QfDJ7kQdaSD
zZh0yEb+cEovvV9YuZt4BXIeHB*{#anT{-JfJB})TMjn|)Tr*W@vfs6Xs7?K;YIL~Z
zKkS4s-BPYUbqRJWeK4ojHo@9VK;Ip|Lfk)JsceQ3SMK?T?rGYge!%JyO^ZFI+E$a!
zXOnz3J)U=NSc$2Jx?=S&!q>VO$@g+CIJc`Jj|#!4S@jnLrApk|xSaHV9;dc$H^^dx
z50|zKl?zV>alwZK!37@&{!33$+`|U4dZ9*7!O6*bBi?&Xo}uJ13T$aKNBYYewOwAr
zF)yJTJPcvD^o3gHp9RIv@6`E&uamZagcSF;y~u$ng@4RWa&32-n$4By8-1Ok%uVo+
zm&o>8W<YfHaEx6#AMFF5Qna(!OKl%S*TIADbY9GjcaB2&*NK7y-x>6e|58VoPZJqb
z;Zxmb3k9l=piMPr?f#K$@@_sxnB<aeLnL=^SkBw#xM6Hh(HCR=5Wa{%%daSqCxkem
z@7@g}ADJoZYk!i=JmXpae0)jRfp0i1_8Dk4SIF8mB9lDZ84q<9c|{i&b?Zk<e!SC|
z;}S*p_2K_XaT`xl_oaQ<b5{^g5WS_%*Z(Kjfs??~=~q&DD$woH0&>{45Raq?Kisn1
zl7zRy+kU7GQny@(R&^CrZ+#<aH#JK-w_}ubWVm4Nc(MNaD#>kfB-JetKAu8*4E*YY
z9+3)6f4mAZ175-N>nYS4Hw4lbbR)%AC{Z++qEWnfpN$<tsiLPkcl%ekxz>hVR(50O
zhii}u{W)>G0^3YKz|QApIHB7b(RXS^MJuLK$-k30?d5M^@E<GJ{Z;^eFBh<ol%W61
z4qOyjL14R|O4U0`l+%SbanfK|?<jb!3p#@1yj`T<TBk1CBxd&$Ck5wr3*C7qdY^`h
zJjR7#vhP?M%s$f(tJhBvXWl#5_|IZ?d^kYtI%`V&9)64Zpb9xER`h?3AH#|_)8w29
z*B~M*n}X9;h)$8MQnYeBCs<vk1o#2$?SBRPxCc<bOyvCSUy$Q~ThMxWE0rehlWSf2
zgVC&Zm>+ghQjy5OtvVppKUn~7Cwp<lj7ln6I)ybpXQ^(3FIU9OAmwerTuQM-%K<s0
zGZK4+yVJp5JqMJ5kLAMWu7Y#AhZ=VcLHiS8-}3ZN(R*lw{Z9{JgLNmgI`0M7t3~(w
z;W@b5eF7FRgGSj;vNtcJ)H!`&XYd5vCU_#vmc!wrR`^qgH<T1!h{D*s{=ENL49-x9
zUgowjf-@*~mrD#}Q@;S;T$sR_LG`p}V+S@KxrtnUnt;*GjzS|2gQ)oFm@#B7273>s
zsF(yY7VrDoKffW9Z>+I2XBmWQH$eT?9?;-2g4HMHa)Z-lXpOf;+pWv6CFJY>_le1&
z6Bss%_oMKS-B8uror1+XRbJp=Rt@YV>(?$&S0x(pwiS~&bj?R-teGmf)dp}_q~w;&
zZBQ+*1Mf9mxW4f?Jr_N*RZqguyk;%e2K+|rzn{z6A6Ap&{`U|%?=;;ho`X)ZndpVs
zO>WB?sa_chMnBDE%P*%uV=)$$QZks%c|%c)tHAKqhjn%$N9#J28*KITK-?E~gGJ^h
z+l(OX8R_gS(Y~V%R6p$szAMGKaHKbHixi&FJUwX-1xeM(zmW1v82OJX1c!?uoF#nK
z`PU{w>6AE*aazrV%iXx%DTabSEaHuSMzCt?Jldl5LeD2%1p~fPu5}YTff2_b_4aRI
zt+c`28l`y8T*CXWN<yRVjrJ4E#6CMg(w#UZnS?uGzKh^S?>{5?r~UsN^hh0ax;<tE
zN1@ZmwOD&ouugrWdGYW;*f{GYM2r^acK;+Pe&$@(-?x%<mo>tx-5>QetgddpE$6n?
zh~M+~pjoBmn6EZ$awLR{x>Qj1Lklbo6<m{Gv8UE8l|x+SvZ0^QSSsByG$Rb}Jda23
zPUi4DAe{n-eu12AqsiXOi48FW$>(hhwwl{RZkjRHt!|=pl>?iv`2hN(b@Z~cV0bR9
zCAWYX=r*SvRW8bhBN=@$V}3tOn>+*^OkLPAV>`IMJ4J2Rr*Y#^F;@-@rkGCQ==@ku
zm9ysXmaNXabMgw#F$$%Y^}h(laJ6K$d;#gxl4X6~ukzy`!%^Y9uSDT!Eyt;DQGJ(e
zsrAGda_)LWYW~?BOII!wJrR9{*4GoWpRD7JO2HMkJ|(FRMqsiuk4riM7WVIi>kp1&
z|9AJnadwf=kXB2|(W5ziPyrPD;fC5NOR2xT3py`YOKu5KaFaw9vvL{gvx}spD_uCh
z{HXA)4q@H)iR|CJUTQqI78D=55GBM3zW-#>=!4|ug)hj8x>LgEY*Lw3$*I@wP<Txu
zR;QV=sr@nfV(Y>=za4`Xbx+KEe466Y{aC{%)%mMWNVVOrKz)ggoEjF$%Ex2LW&Q%z
z8jk^!Km6D-ayo}NMzF8(6wI${l+Koh2oBR-3j4PwcK>BH+bH%@RINLxelaC`Bk|0*
zi?as}vhK&3wB>=4mD{_3_pq6e_}B&uL%Xr@BvWi{ZXuQ6v;@uS#@4g_IQ45h#P!%p
zJ|a7w-6fLs{=QT{$`2fCrm`}xSWY-1e3DO|K*_aEXea#Wl`YG$+M^j#>@w-tU*BRx
z^mC|w;R?HtiP<Uf27R;-<Pzr<;*8u){yxWL+j)nmvX6u4gX+eEuDjx%?}exL^f5_a
zmQZ4KXrOo>6}zQRjpS`IjI7K8AY_v{s}_Bw=HH@0|NVV3{*+3Y-#w+Q58KHm!h-8u
zPSb<!-|_pj8CaB<BKq`J6&JlUr^fBusmSMB)Li`_C2aLzM~~6C?MIPAd-H_~%PhIP
zjwyWwOUB&;IM%l>=G)J}2=~{tQ@e^ow2QFqt2--aKa>4`x-Bb41(%fm)r+^7_u$y(
z0o-VF4xFC~R<u&^R`c6|a;F1@4fbWjn8lP`xdJo)`i&A^jUmU>Z}7s`8R(tZPxNQ}
zS>nBFG)J8rMxF&`Sl{P3G*<sh*?qs`dlBDoN#!`yOnz9BF~=75Hkp+E={7|S=*{)J
zoMg?QPGHclk?%ZmMenr+(99eNiR*%R%lYnXJpHxch|ZK-3pRqq=7AWhKM$=RmSC;J
z5KO)^3EvNxg@i{a@aK!53phyS+b+Q4`XshLG#06^;K!6$vh6e#hOeE0`mnntWkyCE
z{No&sY<9qpx4z+z;dX3w_B<$xZ;6PU$WT<w!3zZ=Flw&23p$M8)BrVQtT<0ik7r@I
ze-pI&xpMWYK~UpZ0(K`>3GFhA8a^mEql3_jU8Zu<*h`>#+nIOI9mLL+YC3s)9h*c2
z2=7LsoIhD)38#yBXzDb`xbg)O(nZe7*-p%&4@=S=*OJ%oQ#t+fAJlg64^o7TaxYa)
zmU0qoNj=pQwI(C!Uf%!=Js2ZqvkshpeH10djuYG^BaB`Wi5b@hvuV2^P;CyU9FYaJ
zIBm}fIeXzkP&d3cQ+T@0c9Cj52C~zV9@uvGEhII(rP8+HlK*$RL3638q<+&}ssAjI
zoevcM<|jf!{re|zH@KmLYL4Jycfq#xtGF#Sk#|RhbN8T7R_JoZ!WBQ>HhVSRjGe=k
zX7w;b^!1vC&mq6LW7+YC1z7&mIf_^=v^g6w$1MpI^Q3r2Pa;`-o`$vmeMimB_n>z8
zYN`l|#QI);$Yt~V&`Q4tn&o1OYDW-dYs%h93eiP;OLDv|c)HVjU~?B=RCr$l-DKb|
z8wRqK=t>E-eh+CM{)GNEbGdXy4QPKaqf+S2ilz@G9@E8h6Mh3!TH%#xFbAt_H}o$q
z0}C_Z-^`E0JMkki{p@l~N?eCUS5qOUWr4^Q%#e#B`~{z59>k*w?pe}>>pB9O9!r7X
z!%pBoSq6JAHJqKS=H?GCsQ<ay{QT_>(ZhWLbjm%_^T7Rd`SKX_E%IdTqt9fse>x}U
zbiqUWoUqaF6kIkR!}o@d5V<8;?0iGnwAKJ0zwg8f^Af6bpT?#`XNkTV3B;_-txvmg
z>AS6vdCnLO?jERG7D}3RnG`=@n8>7CqDJJp8e-3r{;x&qlwpzVFrf=a=ypMfYBBHe
z>VU>u60ku$CysZgp<>)B2=-n|RloOPo3^*)@b^$woi3FeKL}6Lq-~J?ZUkuTkHecc
zF<9k2h)dIMl6Uq<PDmXjW;4OT*nXR~bu;HJmeK5c`w3*Xm!YVmJBkeumiw$?oudmR
zd=g0OUSgJ7;ew?-Za`|rLo#~jhmjV&vC(ckM2#NK88cR4V%9)>FfEBycTyzP%~Wz-
zHx{G!)<NAl(cKznE&730QGdZTtox-2EWeGxupymLUz`Cor^LOVWd(@?g|7aj2kUyr
zi}Ulm9I<*3dcS%AQ9<8Qt>v!}RAt7-?Lq~Mq=K4`3eNQ{k&Ae4BDG9wq_*Q-SvPhS
zXY9`c+beZ~^Ybq`O<9E6cC~WQ!%mp_axofeTgbyEn0-cga%tQ!+3m3oyloyq_Ui)B
zPd+Y1KiLe<mByS~6v{7mNAYvbH98`Ep<7-%;rd@zp?>s!YWVkiGL4*wCuc^o?rD+a
zza)wc9a==rGk}k69FOT==EMF?3f}LRh{vu5VEwc;Y(C$DooY<kx=7@I-Udix=7w@$
z+Zi%nUJbQj&*0eYE?9q3beQZC?D;S;xAgV^<6BA$9y@`zbz03HKaJx2WqYLRle;AS
zf(KNa7>gU1>bSq)X=>uCWv69pIr?Y}mtMPE(pnoynu)(bf+iLf50A>e!XJ{`^q3sF
zJ8*0I3bcPQTI~HtgWjY>QVl5=*%bx8IT3}br?2HB9VcN_FQM;LoTQ3N>!|YW2u%Cr
zk7b=^p~*qfqjPJ9ociA?R{38A=N}hfc=UL#9X_3cJd(v6b`tC#oq&S1f*af{dc!9V
z#tPqfaz5ol7u>t@d(m}V`%ui^ad&0^O_yY2mp7#E_;(3pI$(pxA(|1U;nWMWM7L9a
zR^Di)EWx?-ca$jqr7^Z`wqn!0VHi5Nh4jA<Kxg-HP-PUu@9p|=%Mmm5+#zAi?Ll0x
zy9kgNh+jI}bFS!L(f-w5F70x;#QgPc(C$*mQC>~3bKFGEd}%_8(HgljZywk0{gd46
z?XZ57ET<n7bLV-HF^M<}iBrXXE_)jVj|qi>^*uT2*EP`mY#zGBMB~Pfp?JNg==VA<
zblvT*!EIy}v~7I~hWsFQG#bGtm-WFWCpGp$(HVfPqBFUdy6JrncG)j{DH9Jt^(Pgj
znwmn(>kLpVua@pWFOL7UBPTe0%Ynh~NYiaT*^Rd1qHkSTvvR&%=__;_-7++Mzgw~>
zbZ7gXzf;|P!En;QhAJ`d`-{+Z-M1GZ-{qlvws|~;JR60{(=E_&;xM(e?W4x|VmIRw
z0a??vsM#1JH_cehIXQ<Q=5q+P4J`!6L-)acL?M;$Jp_&YjRZIR9@W>6U{}K{C><=a
z;4gPl%d=LI_c^1s|8W&Hjy+BJ`Sr5y^8#F4?ae74&k5~d2c!;*1Wkj6oIa_s;_ESr
zxE9L^y+(s7KLOjWWKogxY&07VtT;NmB(H}xrmkKCp2p5xzkes$Uvc2%X@mLBHXHW;
zK`+(&{30c*#k`mpfCa|$ST!X^Dz#6d%R|iADr*()teS@|Yqk8q&joEhm4SWrebNoR
zAiwX~h1*hIl3R~h@;`r1R@o<DOVn+0b5BI&tyf^#L%a*OKLq+jUv}FZMa}nxH(+gs
zRC@G#pxD1Dd};`4j(6hZw?nx9Cnaip>(pKzBIh;v16f^Y51K8Tq;frQn_v}K$GD>|
zuuNnuuFAb!gx^AXD-~_M0E*=0Q2l!`D9Yv+2P=Ki^k}e{{d}bfH_TD<_=CDRX|Gh;
zBKDL=r%Nq&d!U<F53(`Z1q!vl1h5~<ew~0xrAJ9&eiZzL4>W9Rd%o9WF&f`x(B=4p
zYd{?D{NMjCBEA>-i6^A(_l=bH=>z#M5_^kfGa-FLcbGA3FluJBs2?8?eV#T0vGzy~
zS$$VQdhZod(UzW=@W26^hpnWv=HsNl-Vvha?1l6r*CfNT`BMGp39{bOT{2`%hw9{O
zpugP-Uf%NN`s?#WAJ=s#-PK9FHxV`Z09F`PLI1N$P_ZJQB;33m8U_!Q93I+n)|3b2
z+xb4_KYdC1?lsUD9Y(5-Pe}i3PpUQ$8G4g+{Z%w?oDhv)`Uvg3&PQ-ZyUK^wEyk?k
z3sl%*iF8TikP8O!7jfR2`=^1g=*XsQM^^SY1kMHrw1MpuG4VZxZg>XyHX?WI^pNz)
z8FKZmQZUR}irp)KcVC@|seM;6*bl;lTm!_|xMGFBH#dwb5v-Ae?vCNDpqZ?eFZUKb
z1C?%a?q7X)%lzS3GtCv#M_T~A2;sK$H6ojFi|$3+;YnvLJDy&{^@*qD@}Xx*+rGEx
z@~IQuJiXPy-D^QVsaT!<8o|$^BQ{B0Fz7%p+<4CztETj1m(Q`Ny1fwUcetV9+BrGe
z=_cq$y%o8y<!FG#q9<V@m-d@4?$AjXVm^VxCfl=Zve+rwc~fe_Xz<^>O|mUC$0+%c
z@K-$`N?gY!Dkr{vdORnM$fCNSc^s-eKn12JpzZB@YV7EXflp@h&gy=6`Ryt`yJZqy
z7um%6--9K;g9F&)x$rnA45fxIzfp6fA5^dYD2JFup_T^9dZ*`dGlY}#@R^{y_n+h=
zTd}4=B{}_X6za+k%SrX|T-76nwF$>zOn-^f#CMPu_5)}qd2q&k(dDRe#rhvi#QRRH
zTD55a+j|Y6+>29Kbw3|06aS;KoGEO0)SF_>MziwOCpqbd8<ah7AL;j9kz$t%_KV9W
zh+Ekovn<s3#ZL4Kt}|iHkn@tEohc};?Jh}JYe$#QujHV)!ZST`KQ#r3u8t#7_$A64
z$!P)JeAEl;w$@T=fHV2OB01Hti9*x0`10s7EYA+%x?3GldqXK3jJ;%yS2k!LvQ(Y%
z0d(*8%c;F$G3cd=^|`K6)i#mA+_sjSYtNJBWvt}0USg{`-*E5%kr7<?ljzYlg~Tvx
z%q(~d<z{;FJ#>J)Yo?Huti{NsgV?)+I5Q}KGd|vcz~%v1->DYL#V#*ezDaJUw}a_`
z@38Qw@C}>h6_@S{Cfgs>s5Q)yTe=Ar{jHBsT`~*Pt8Y-ztq#<@E}L$yTgGKV2esTZ
z0wZh>(c*r>hdcBhm2MmdwPTiYvdLt;bIzMhjV3~Ks~hL6)sgEF18HJ)lAXbqZAO%m
zCgp(i@w6jbgiXSVRc4$!YAG5yitNdav!L`p2-+4qxw%pFB2TU&mxJGld1MNBFBbR5
zq3P_FuZC6^4OH)(#0A!45Uy8Ixo$X{Ub3RyQwQ?SzD`_G9FOtJ;ex3g09m(wr`(z~
ztVjw$$JJNBd#5ue4V=yKn}=Y|UKi2pE;zT5D$tJ*-Gf$$`2*g8<Nv0x@1CcWk+%{<
zf_(&2^G|5L{s0<v_rZ2m6uvwnSRw}|^YwE!ShTznoK6T{;bnKqm~ARpCf6zLQzZpP
z+#wsYEO0v9S<FU)K{)L{*e~>GtGu(2nesD)oSw)|y*i+-C6RJR+G5i>(Wx4$0zV@S
z=LEfn?2s;O;?oW%D20zVJb>(~JUD&LNY=g06#mp5#fJNvAfYfuWM=NFqxOEs5srN@
z|7Hic^OrMbSRI3nd$jmGT8oXztKmY2(X2f@6W?5$&3ffUa;=HsJr{JWbJ;2H-q)U$
z9Y=BU>uA)v_7ggF8#Tu+hPquJs48v&8}}9b!?=r7aq?G6^$O!JO9R>O{!k8{bdYN5
zRs2{vhy%s{L(_LMJ9k^cp^+o_Wug}2H+14$J8v|r>CM?=YsmzRu<6rMjw^~09R`af
zCqq0JrpNJ-SF^-UIZn#x&rpB4A8Ce7Wbd9iR5niJ?h9<Opsxu={1GJ%w9R1k$1}>a
z?~1CC7v;DYK3q6IiFKn@yx2P#qr$s!<}G(h@p(r@vE9L>Wi6(!JuWpZ^WwM>ju;gn
zKA+br%sj3{7gI;phwr0kkw>wd6omRgbA(<qhL!flqLX|IT6VlbY0D;~+wWW9P#xeF
zj6*{_Qr?ZpSa4BzNl!0>y2X~5J~)I5=CsH1GAs5jcrNDa??qqZMQW~Hg>i#MVfySn
zP`a@gO7E)WlWk*p|MKo=xpOuq-xv2+oE^5b7kam27*=eW3ZXS8!RE#b+PF-Mt)@1Z
zILifNdx-s1RVih(&cZQ0gg5SSo2+U4SI+-9ns@H(!@322ymP%B`)?o2e(@_fEX@<c
z+c{(0%%LJfx|*$)ex|UIgHiDzNv?Y#c&1-MF`+V3oRjlWWB(e$do97nSIKllF`9Qz
znvPDl-S}956&jpe$#kfeTNc{!%YpV-I&XW4_vP_|{nQ^9w;zv&MZ3sEr9t&S;n+Mj
z0OQ};V_Lgxiu!*}TC0k!UcZ6ZNovkGu>mxFN@eFB73BHiU(j#*3rzEv=$r;Em;69A
z<sYGKlIXEK<%o%5Uzn5o8`<WRQS^xRsF@eU)wheqj&c+P-gIZxRvXG)*$im|J^0P5
zcKCeoKcHJ{&AKjLoONOiCPnOlmM!iW{OnJPx}@g*DDwLE9OP;*6?k{;1)3h|QbDj_
zpS|8DH%W6?=cyy-u~P-hRqV<QwxmiOilx2EB@fSqnC17HqSk*;ZV%Uhw%2cxp=Y-A
zF}EikdoU60?@DNV;xn~ow1R)vy^vD58e=9~VoTc)G(6l4`X67BvULV1?FNv3-9gFr
z`(<3;6ixXZFUd3PyK>Ni7>qUY5p1N%Trym|rzk~UcV!n!jEiCa{^8uH_vC2vP;~mA
z9Xg%t!>~CM=%~ox{d|{{s{%O5W-OZsU!5<1p`_8VnDD$ed6cX|^O+AR{jL`3Cm2hW
zH|rtz`=#u1PMkZR=d;qRO)B3hx{lo5lHuG`_Ws_9>eYe^)<JZCEfP!|+bvT1))ugL
z=ub-3d@$tsp=zf;SbeD=3cX4DpRL6D%u3n6Ac_6U&ylk5hTL{8lw<Y^#_$>8IY0Ik
zJU)nh;hUHA-dfGkzlLMMFcrFX?8oIvmT0zb9cxZ31kLjnN_&0}T0{okaQZkY)GNvJ
zL>XMLj7P;-!AaDYLv>t^q%hBR&#gHn7#@Xm)?MhBT?;{f?V|L4r_enSu=L~!_;^HQ
zEAuMVFLzC5E8lS#o@dVbU;u^JrIL^86(aMeVC^*%w6sViXR*)KOMSUsA1xV1mx(R2
z21~UqYL(Fp@H#r3buUGJVefDzzvZZUxK(~^XN2`%`$|Wu!!gO>H7R^M$Ywh=*!sJf
z$Yi_$jb=4JFQ}yGpVA@jOcWd8a`xA+mz3e+t}A>_UTF((i?1=JbrgOu-8I>H<54o4
z%n<k9-?EY1kIfFO5uOgA-=r<zj7uXqZr*cf>)k?mT~?v@;^`1-@rh2JwL;gXdT8rq
zikEFXxmw)wkBf{k^@}@sS%_J_Yfsd=ou+cz60luqg$1+wv1PXsDzO^L>1{7$gT-=;
zHf|(CL<pp6yih;<lRCye8A~$;fN}W{ym`$9Ei^`$?_EXuGwap)pLWP0@jj?L7JwSc
zk?qHSgp<p~?{QERMWh6vtE=EwM;;|-=L*SX%2EvdGMSBY{v}t(yO5Q8p0xMQlPRvI
z^hX|?bce*<|0j6v+z2)o(`nD`W!QXtKgBLe#-z<ZfnU=Ukt6RbdOd`G-eWCi(HSVT
zGZQmT9@H;93wE9!_{CL=;ju%p{#lc3zt@S<jcve6LT)y@B<Gs#rItTk`HP1Q=We(T
zt`q*DmPuKljuM>AWg^ceH1A03I8<)-2IWR;_#(c?_oj2$G3<rlaxLP}d8a`4_fN9n
zR1dPgHVJbkt)o(*;kTW61U2Kk;7jU<K67Sb(Z3&{VN@(Dk6ojfTH))NS^-XtI!wJ`
zjEYt>w4Id>&0YFpMBPKMH9k+y^?%F8A`41`L?-C+d$GrO3c>5_sbRDZ>6;%ZfA>4`
z4m(dZhf<(+y*1`w6lP62PtiZuQvIZ%9AhN1H|M>`%xN_qIyFG#3b)AB2Le$!=L)&?
z23B^ehK7}bw`&y)1^wQLJlJa~{Xt*x{_-8>b`Qr)9E15Cvx%M#5jwt#+@>FeHX}bS
z+!4h}%|)r`r7hI&{}#+`v|MTJAo%!us6Nyfl~c}1W&8WH{(xTH%V;5%DoY{wfCE@%
z_9Oq--^-q-)1c_R$QK{pOXh+Hy>r`2-cme)#VjTjK6Da%U?bL)WKnB(ZwPiPgXXMO
z=}`N5s7dX|^&{3(!Y?M!m@M7_7YxOwF`j(vt;oz|Mxo+!JM}$#;RjhC41UU4m|}eg
zwEb=5($HRl5fDnA;;gNWKL9z#nP9v+7MqJEQR<(=K=I=*CFxZQnV$OamYKmgBeoCD
zm@V=Uy%V6NP~=;O%x7)4KO|ks7;f2j4~+kdL#v~L(`09bF7Z*UwK*($S2t4d@)~HX
zTF(E+(b+h}n0{}3BnfE;Ns^3&N*GCMp8I4ZNs^Hel97-kQ$msqVw2G3ttA<W&3i~j
z&wVo6#wINx#A1gHAvSH2yzK9Me}4hZJoDW5IoEZ4uA01#AW^w_HZQB=8;M<L_vaPv
zuLy>mk+i2X{gcUw$@TS$fj`anMx|{bGd#b`s!Z2HeXuPi)&`+H{hMv4J3yjt2X`H`
zg>~O%g`K}o2Z@~}s`IB}HSvlh@)*74ECoh|#A9lW19;D%^Oa8o)4?4u>@Llosq@8%
z`JrIGcp9GnNPA(YVbC&c8N`3?2A&1aSVHkSIJLwRYwp)`?LcEmczajS{{2nNFqsK;
zs(Wnru}wVfvj)Q2HZmVC4@kT?0Yex2vTVVM+2r}a*9KoK{)gsBkA2K&(jb&>9Rrbm
z|LbXvfsk{Z!D_@H3^{awHEv44L8k}8v4zvIIc+vXK8=RX$!?%u-5<V2|H8LyqqBD4
zXmkun!pM`vAnf})y-mHabwe}P-1}F|?bHjcx027mv96^13KMjH|A<RxyF<;Zn_PER
zDO7ztKpEyUEX{^|XBCTh)y&h(B$-$uW9Kk+Tr3{j90XsFn9=TBiN1f_=34UV1%7<T
zC7#DG&ptC8t>t#aYI`Mmmjfy--YMLDj&Rkb4xlRC%3a&fh6Jmb7&v}E52+eUx?~dE
z@m+_<0s$o^4+NcZ2{FuHvokZi(0uY=tZCbL@D2Zy%j$j+ikhszzt7J+X3%q<<uU-y
zFBn35-~*!LgR9(bT5nK0Hwcow<gHHd1pSry=vg$9+s%5-j9VwLujE&3y*i!v;+Bwk
zVFqOOzRvxe!_Z=h4@Uj7lQ$1u0N2*|pv<NRbq)2!@Kqfl_ktZbmc?T0_*L9w%y2Ad
zcaKLZNaN~sPDt%{ft^~qmHXW52IeNux#NxBc;&%0kdzz<#~KC`ul|eh=vHUSAHL$n
zW8yKEvj1nG6K0<s%gl%Ra^=k}icqh?n0)^+b8@x=RVN$FqMoMK>X*!=|3pxZC7n-m
ziYafrRhWxqy!uNoZZkLr<39vpW4oPVqRkWLrsxBbw}ukCK-%$tS;H(kj6i9^f1=-`
zZkTB-5Fb750b%W_k1RMsD3r!<UDZHQtKTM?Hw+{{3}rc%7BkP_cnl4j0ikiHd4_Ta
zS3HV>z{@gJon8*+!LIQB#v<x5c}U*+OZ*Wr@~Zneu#lHMpmkq6ZlTv;ap`1;pB#eO
zDQkG;Y<ny%SVR8@(zm~DV>Y@`Jf_V7)W=ltwUz#Fjs+~l`3tLmu3}|A4v=*&3hTQ?
zg88voJUQVDxA|!au)o{m?oH2`WR5vg+fmQR&MA<6J&6@q$uZ{mT8tg<fjOPCxWwa|
zXx!2$y6m<_Q`<SP_i+j&F0ti<Hrj#4XvP!uLs`R!znGoHJ{FQ_f`Lbe!~RZm-&{g-
zR`69WN#7&7rqi9tDNV?9j%9l4@2Od0gN-&bc>(#(GtZqAl+<&i`g0(!_;)n-Ym0@>
z6MNzJZSkNz<}Y0PkNCos*$UOOFU;LPk4N1a%)Eo*F(&OJv(VauWl;~P)g5AXioPhj
z_g;|i>C0lM>!5l|y3jcHE%hSrD&Yyq&~W4(&$nz~y2ni{^V&4Z6Fv~{Pl`Z)&wWgL
ztDms@I58eqr$9+j3hb_MgrK#PQMHovwSoOGbiRZiyG!~kTgoJ>r~~PJe<+?e8jNS$
zQL=`WgyK;q`DYdjJwSTuh|5fUMTUB_)m+oo9&auffQ$GNh&uk1{Be%p;TBEr10||&
z^v1-~wn(NFNPO{xhZ>W(|EK4y$zc`gz)>j4R<U7|*MWS9H#A!MF!{kX+}z{|S7&+%
z#^?3)ebE2iS-~2!PYd;qsVuwk7&E!H1k$OCruA4g*Us!O>h3J%e&voB*0qYC59<x~
z=i`tXC3*IBnuD5m2ol5gl8U;$JhE{(`hQ)Il1>#~s>r?KqX27=eam9$(Hb<i?<<5H
zwgJ8N4%`3U9UXtY#`wc(UR|nZCiPP=y|*vOR6(pfdN;G#x|3DSn1XKa#$nKzp=fbo
z2qx&7*&x5ss0<6@4a={wm|d=DaU&580gJ?q)sdjDui)M<B`Ck3<7OJl>un`ZL5J7e
z`o&xvRwZC!Krq_$ZAWKz8;=m3K&G23dQa<wZL8F%3!8y97F}V-J~;#g$)WrHp-@X*
zy7#3P=>94k>tiW@oA`odRjvYqixn2WkAlLP)%01G3m&5vVt%bNmJH|*7Os;aa$s*r
zecuJYS4mNlzLka7E(OQUH(0&)3zKG7i~0`R*s16QY@Nn<VV+Jb@OEa`(kH>!AAhIL
zFY>CciDNnM-|(AegHax=7mtMnLV=e(PwA?pGjTO22W~Gh8dvaQWh^!{T<5Z*rec+=
zjwNr(p)TmZXjUYD>UCdcXGuQ9qn0Rj`7X3vaYn1HJ9&fG640g1<tFbIf<ACA?9k7_
z87XE!rM0ZEsEBJiL}Kg8L2S|0j*yx=4nn6oao<u`%srBb+IU|UNj+y-&Xj4JypJh6
z-{3N<4uYrI3Z73r*yo3ZLqStIa|);3@QzGY`=J{%7>Rw?L>kJo7px_o^2R^h5;Cc4
zH1rQ*7e<^d3H%s=Syz0)amf;h@t=xK)VE^PdeOUN87mvz2g1p>s400Z?4N3f4Id6O
z?b~kR-q)TWwNqhr{|DT3>vozYqd~P{G5G(Uh^n+YAz?!p1|0~4_vBZ$@H@cdeJOwO
zCJY`;>4DOH?mV}cym$$>OJ;8ih5d2Vtswozs-mq?v;Vnh)pU+oj9CK_`9Yw4S6reD
z8HM?6eK4wJGUTTABEhtlh1Ldvd%|F_ecA`CeeE&6CI#%Z(=l{H3v<8yocV8;g4SFm
zIz{efW+@>kQ{NI)wOyca#d&6a&<-P$lJQI-c?#<`@Qhx1=BAtuxmC+hQRj{CKlH;C
zaW2-{Q@3PPFla``@yxeNnR4kng}m2eQO(z2;~~<OJ?fZCZacg)aUDuWhQoVzbJY4w
zP!!I;AbfwZ4$AD7Li1%inDOT#D3}t0s&PxW!`R`d>`Q)6+EKPt&jptYage$7AE9Um
z`Q3g!FX-xhK{<Jtpz;j_-(UCf=A^SMeC!z55g81U7UKK5EXDi-2U+a^d&;`~D@q-K
z=Y<ae`J^a}?${j~SO3ZFvS&h#_!F!9ewew<@r9^q7kI&%O4c%P1iDO?fZ^h+5{<1h
zYC4}}?kB6+ohoAcHgfTPKOeBoo&m0T)b*RwgS01S@H-HM+0lKu>ygo*-Zg`_KBu0D
zS0gZOP8&D>F^TvR_SpTN6F7C5O*3d7i(Gb>6|T)?^{K~b9+`^4!X&WnLd@T>{jqUh
z02aR1bCsJtE^=IiZr1A{IfrtTTORYKGx1<74a2Iwljymgz*OsRi#1tA#E-beEPosW
zZNW;AZ5qkG9zMmLtb*vAz0X5u=dfZ!B1%6egQnwfEL?VwI6{Y6vZ0xmFQ*(+fFqM{
zTf;4TEy02Xd)e2x?aX|l3tGncpsvgmB_*p%)R|f0#sTv%D|`x^SPU4Q(;FQ8T_N-e
z@L(0)2U;%jj8#LSJVpjJ1GBhv%LwlJ=NdfK{W*_nab>AXr()%oC1_l-TU6ivAf&rd
zHtE`Eyz{UhIGV4;r+-d@tW$v~H+>~$K1k=rn0W4)8N+lh$hVgf!L!D$$M2S_A-|;^
zCb&J~roWMY&VPi$CZRtV*RK*E%T4hs%@x+G0SoU+$xGiXG`NSO^iVJ>`sXy4h1xKA
zuNEQb;%ac+=tJC*d9Wp89;W~13ROq1vr{g6xc|97cyr5oaGmu#Z+;{vUReiGQrbnV
z4KHR<&!ntjs5QusJ2HJ+2=_kT5Bv}4m~_Nbp>s9yl`s-(Ze8P&uV+0C7cPie%6u1}
z?t%reK}?n3ff<T_6%0$Cvx;$}d6e5W?xQF6q)T^Tl7p;9>y7~@oWM#SL3-d6X8hGz
zlq6e}$bNb)WOr%hsduh3b+x%rIQ1@9zH3z|*DVxI1uq2C0LpNu_o4UDAk5AU1Bt~a
zulydLDLY?Tk~W=s67Hq)n6rPe&R@Mi**I4`Mdwsn>|&NNXbjZeoQ}KOt;O))i6`Oi
z45<&MV(UByE^#nrjnWqGYP}dN-fPgI&vNK|aRjK%XK-aF5R}h<DJkkY0X;)L@IehJ
zGz%@~rlUvW`)kBI?XsU&z8jC#-45|V)dH$}r*Ln_C<qy}n<W}ud1;$3_zt_prOr>q
zw9_iK>ZA`kE;_*jZE`3B-K3W+@<z!_M@4cx?M58As2dpunR-vK>Fonf6aONf*H%IK
z;1rA6oWe_5Jh1K6NGSZ{kf1#Iz9b{6jjMXhXN^nNv9!{9mi=rxhU9%@88eq-MNSFh
zL8p24%f762<lj8XuLldxQ-J-ru2?mq2WnGBDI`uKy|(Nd2Ky-kS8q2H<YTK@L(E}b
z@aLa=Mu`d=GfZi>wpo<7zr@U>iLhfmdEF1wZv7pdEywx@WnWD}Ipn$E4_A5lRzLh7
zztZO8GRSi14%)z;inQT@*xYFVs(UWy#xELCMO@8_8<S`reW5t@TF<QB`CwM+SY%%0
zeeW^=rE`Ad5r1?*lZQ(&^z&gByMefCW<m7q1u;XH9=!U>K!{E2h%s}zg4saIHY`u(
z@?}$)WbtH74Ix(Dwc+S_!VjgR<UB3<7bcrIjjQ(@V=|AcOcywxIi`K&HbbX?-}HH?
zI5+?$l`AecSUSMhyLOaiScgiJZ!BcyM=rDel{bBogMD!XoS_{~$c2mC)x{1hcQMHP
zDG-ys_ruIbZ@6n;Z_<mMq3$o*Aw*Xy3eOGYyC;%YCnQgCNA8Gr{o1&ky44(NI^xX$
zODLBU_xpoCTDw`mbG8_tlDA9MYdUo(onx}h1|j}hCwxrby=2%<uFD7&W8Vy<J4i9t
zPMln#J2z7(o#0P>-cD#G-NdT-4_IFJjd?6Ig{E)QQ97<arnQY=(&kCnTDO*Z*MOaG
z-v^==e<uILbEdaU=8kJtQ(k}xtClXM?&ou?X6o-;Q!xgVZ|AU>32W&0oPdFJO>NZI
zh^mR-#Z%2QKsERXakAt*a^F;9B$=YdX`>LiZX18>Pr1rn7x<&aK6vww5LEwuR?JfQ
zL4CV+TzBX@@wj_{bkT5T^<q61>3YGAKRe^ig|1K^TF#C=r5VL@B)d<ZD#<fTF}QLd
z`0XHX#H&FN8Z{A%=F=JabpYiK1j-^$WlF1TG2`Gc%509tw6CX_6=f?<b+d-b7$=Zw
zPV=;^+uU8a!m52!1^=M=XdbtQ>1~#RCP%?Tua75=<2?2za1~gkI6%#qE4=fzo?xtS
z<-GAvmffxw=np8E`E-$0U@EU2iL84#X^{8c2_D1&h#9?;o0gl<E+drbP7-tI-W@S7
zBm&JHl*EjsvqNLSiZ^zq9+7Gu72w949?xWz^H+ns$58V1Jz^H;Ct*sD5$LvQDcHJ?
zp#JsE#O~?KT*@eu_Gly5c9ycj#dGof5HqaSToSEX_VJkM<vgd@8w!8eB!rgk<eAsA
zm_hM3*FC!=K0bDV*KPQbrCASTdGn}eLHnM`=I<fzz+=|9?1NA=BbBG$`T-YxrcUpI
z^*lc_6dfnJW0<CdOT3E2Yaa73?Yk@YP2a$89<stMmR1n9FPr>6gD4xlidTO!@M{}K
zVQBxk%y#l**wTe~QbRJxL$V%>v1^3KW8=wJDxmWFc={cmRy1y}<l3BvCH50%V$`WX
zrZJ;_<V7dgSH7QV%~ya<aa&XtcIJ|Z`Vz^ip^E=?M_F}pglKJ7Xe}mg;j&3QrF9(C
zKBcq%#$mp@YY~eY?Z%>Q9k3wb1+lRlxxejwZmhKsd;>0%r#BJQKC@AJu7tPn@hJJb
zy(ljfh2-v$5b$s<_%H0lv;KFl+`NJ~h1D#nVhm~(+r{d=)M_(-HJ%x|9Jj=qf%}4D
z9`bVoZ+0Tq@->l%{pWxiD;J`s1LMKu@iyQ51J+FKg%$&@b7Mw=5S(rXnXdm(_uC?-
zvn~|X|0S?I%TXBY)CFaEf@otr%))!hp!~l;$h@Hym69xlQycB~f@v2g#zEax+I7$R
z!dfRtd3@zE&^h%KTD^Yexwn%sGOC6r7u{#AePgicurD+en1bK_m8iC`7rVddfVD?r
zdF-XmsDIc6wXvX3D+da?_h0zllL=63+swbFx3c7<txQ@P%PZav<NjHu+%#?%%W)iy
zm7P_@6I>u#hE1it=Qf3T@(0c%b>tg=#m)@u1I9f&`J<y&7&Cx!<+P8`RQ$_)&kcuD
zCm(XR^-ho@tS7(KV5}U|AHN2i;IgVpA)Po4-KF!1v01^@t_MX)z*Vnn-hDygMt<fk
zci2txa7Mm&0Es6<HuVJaejtMa33*=@{lRXob_CC1dwA{Mz1%9|GHZ0cCWN$Kg`VSE
znP&PBESg4MtxN5h`8V?6oUQT-a5jgE=?dC4CxiKFkym<8M`f2g3fUMNkr98>#zoJJ
z0Ry>3x*sI0iJ{Ix5BS=(h8au;@XiKeJ&aFK_zdnrtTa!|e-;TY^(uJ3Yd!9$9!LI9
zPtfkjXGNd4bK`^Q@SZqUO5Zx6;6ymL-!%y30cE0V6laCIT39^!b!(-IA<EI3db`gE
zwS9-eG2hW(_qvIF)%?dZJI%ozJrvXvc1Kix7|-pzw{ppmS(u%!V?O%vkdPDzk+Ks!
z_dV?$`%vC`$U&|ftzt?&aHsHSo;L0%ul;*JGroE#G)T8H!;>0j`cE=;4jBZ7E>~FY
z+!5%y&=)dK|H3qLqln{wRxDUr&$DdSfSed(VR(gm2CG=+0~1!zC7HKTFH{!oI3;Tr
z2-*Yt6{%jkng4_*EPSLB*arJT-YuYefEA>gO$S}`c-Z}DEy&h~Vm9SKGaOoZ!oH2f
z$x$iJm=WJ3=Q49m_Q$fRltG(9U1#ovTs6VSY*t+66|N(A^$(p<cJw(n$c_tA7|iSM
zkHOYI1SmP&2LoemL9^kAShOz|zrH%cLvFMqmi<kp+2V<1e?$;t(u!p~wFP~0I=9GZ
zWB2RGN0_<_9j^_*l(UH-ZNGx)AH3(8`#uU!W5dv}tBA`&8kvN36~j*kLF4@8%x=U-
zrrFrWLc`W`<7Q{UY?=mBj)p<X10N_Hx&{)h8d+N62=IQZ#Og_|aQ=)Fs9U#l|EST}
zve^<6Man<&bG(Lf!zwQ`o_*;MFZb~Qe;aqu?tjOXyY`nz7hd3jw~Be@hfS<?jTdXo
zo64JYcUaoT2KL-#3R)bdUNOHp;2xWT>gi*7dGv3*`_S%~O}jej1xFtJF%()S_2H3Q
zbWBG*?uuhhXz}#|`)-*G=`O=T`|~a_HY);}Xl~y4S&njLSCBtg&oby)*{CJnvP=z{
zm{L(5_D!_f9u0<UV$AN{t4LTVgLHb1Bd-eh^66qQeR`0YW>c@9T?>ySCbVlcamZ-i
zx%rxYe+zxNQ^jnq$yo)tD_p^1v<5PdcZ1AbzF?S>BPP2BgWJMhShIZ(zkf0sPI+wR
zq3xuYHo%$LKnu?aU4fn-dcv7sXJPG&XS`Z}UXT>!d1<xt6^0Bsj~Gm>3wtxrsh<jp
zMhWJ`Zf4JKT0!fWKFqkpizQb9)Qq%)#v=jDaZxBr{vD=}20j$^6Wy5ckN!~6?gVLt
zS-ixd7oIwLh;P}lh`5Iv6xxuXn6Yv-hWd{N?;R7caLX_jGVd#o`5!;(+-5H8K8ID^
ziU8%=#**(lRcJ9ofvNeKykbZIH=6jNaot1G(&bP!Xa`q4^2dU*E>Pst4UIQPa!JE+
z{i(P=c<qO)EX>^k%S`5hrg0#>gBex@+kv+A81t7=ZohgV`5p$KGU~LDwtXqO9SH@u
zwHBED*p_J|5=^09-SYA0*|8$>8qGZ>W^@+$?uVXGFnT|0-5A0et&?C!az``_d?QNE
zy)D^4k#<_$NW1u(vfri)#6=!Xcx>%LltdO@Hhl6H8{Q2>N0SKDoLvA3UEIKU)|F<&
zHBjR|4<*hA#hNkYtne`xZ7jTa^~r4NcHY1WS3MQ9$vMm>LxKrKxB2XRXK?67yY=m+
z!YcP@<at{5W?>MxR>on~w7<BaLl8&;W-8oi4`=vFzJsl^Fw}$o=ALHU@W)zKbo~u+
z-TOka{%0QHKzF-?gE4cSElQIrxMhMD)?V4kJpWa3{mtpTXmJTEFZr35*VFx8_mG#(
zb%5q2W6}ImZx&4b56Tyf)a~+yx<f9rw&=yE{ADLE8cYll(hllm380*1FQ^)(F}?FM
zZh#wtc4LhqG|7i07LiXPigb_{I#E_?fi-^;U#xW$)AY&bb#2F)pV|2Tb^hYhJSjSv
zUFPYdr=vyxc*y)rXYSj-xw`B;%@lJmaY7?Ek3Yk*-gkp)i+JjjiG=*&6QN+iH?Cc7
z3bRksx!Yc(&a!#j{kRN+!=1^ax1MEoEv23HQlTp26ML_gf*Ji?hGL*Q@r=ubmHn7O
z|BvW?O99c`2Lh9ianH*=FmSCCw50k|wl-SfPi9D;)eKDEl4eFb;D)4pE**b~1=^0K
z-aM&bnB7%S(OyZiWxrQd$SLl*t&ZmwOvlHEe&p)$PsG+-`VH)R!yAV@79)FKWBDe3
zGfm!YuD4skYKf(+>9U*V136EavWIof(}00e;<oE7iFeh98*&4o?#pSuC1WiXJ>Sbb
zCwehe+Hb5(w;bvhJ>+)|^uwyN5$LnX1pV)BW!WvX3m-NR)E1SZ|C-TU?L1n@p4^2?
zW_|NYp7NDDy<Z5S9mvDmKM*SHOflm3BrIRHgZW1s=l*F*sGUe&o6TFf{iy|D)k620
zbn*dscS9+8eI)YI(Mr>Y;?s#r)Np4WXtIe}lsE9s?Yf}T@IV&4CK!A-c!EzF^{fQY
zy~E`{ZrnpGj)DhV`y{hO?kdG<-*H@-pcmws_n5jN0b+-Dgz6Rd#YO-1hxB)S@bN3!
zFVMYLbLkVSIh@Du99@g|O}wG#8tM6L3vaGF%E|*2;CVv;ZL?N13@3Ku00n4V<3PT3
z5tp^CC9Z!n%T@}!=7%Fp`_rJ3Eh+PnCthcn<ee;>T`V*nn~ba6SL5S-)8TuY16J#j
zA?eL<Jkve~BWA9~+Qtdsejo%B$FJk+gZJ6fze7>~nKAQ8{YiVctdJcx!(n#AF>GKd
zk17Hd<*^#v{eI-8dnmWt-Cx-8#2y-(9JyAphKE=_;T55;nT6YBCVSPB80<S)D)q!^
z#F^Zley5cKR)bnLl(`#wLfW7Sy!?|CHrCIC_%(~b@b@J}eJgQ>_^+&H=N_)k9wg*+
zr|+Tt4=i)^d%@|iN}lzb4OG|6W0EZ!ylQO1VNl6JICgL>SZ$#k#~*Lm-qpR3^KzDP
z(ghn2rwZC-p+d>d07&as%yof}1X=VR;r{d0^m)!>`e7ZpWZasP^WTSH;j|JSO_~3i
zDH`hiC04v@4r>f^M0HnZ^iLhaGvCPJ>#H=bywn{R-E#%waq=xv2dCqYq>=q}il<F{
z$zsy{pmeVnSRGGiA=5fS!vPIioFrf7>G9~~F^yUNFdoXDhd_ZvFJ3l%K9B>8r)?+Z
z)`Jkirkgd^b=t?P^we=->J3gwgF!dZo|_*i;a1Mm;AYoJVA3~?_*_ecww^uEblU|c
zlicQ^4?MZm8uGiY+$PSx;t3_>y|#*zf@Js}9=SOlZGKolXHO=pe6;|L3k!s_fwQ<_
zgI-Z<o5R&|%ChaBNZh0n9{OPv<ZY7S{gV?w-N6l|-n+!zpU?6g+F4-f-4!HT#$J|x
zJI&qJg<{(D_SE^}0WymRVrHu=77XgeP47D4bEh7dJb-e<w;TBG-Zz=wW-rXT+7mRC
z^EQ8&#WHVyWwO%tAU(8)8PY#<^`VC>`C@l8x}N2AZn0oIIgdU5)Cp%dOvjWMZ~VG1
zf%KDl-a7phw|bU<hPhu9P4oP*^wB`#<RvhZ*;B!At(Wlp*?4@^A|;KMGz!;l{ORzS
zAi3F-caL9&x)Z;N>et0won#b(94HsNvqB7=2%t%RDmMNe%<eQL;;QC_wEO8G%$VRu
zSqz3D_AT6hUI|xC*~|Ps_lL}W7C30)B5Za^V;OPO)3A3U?wB?Ul7AQq%2(wj4L_da
z^2$#___+b-*{hz#YvO3<yAZouNKmV5;`f~Zx_6mNnsPid4LicTOQp~}O~+hAsJFXc
zx@c}u!&HuU1&M8EjOgYFA+hA^U%miRW$EnE^_5V$VjjwdM2U|_?V`VpI{6l6vp3^c
zL(HS6Olf^aq1Z~9jgKm(v0VeQ(2=aB{cCPiZWdD$T`-<IVq$h*9OOA0RrCx$?KTr)
zhHYYcy0iKZ`<KgOe-(|jVN6>1T+km*W}fO5te~4Mb6t@{GdI2eQzNN+CyxjIOnoc%
zCOq$hEjFHYz~>`Vup)}OU(Sa>_JL3^4)#IU(|w?V{DrNM!eu7+#M-6HS>yIyJcoJW
zssl93UF{0hk2}Nc-{R2W$U@>EF60v5L@enr4QEu(h6XD?EO>T_W%y-L9~ZrQ;vxj|
z-s5<N<xOJmKN5|K?}Gj>HR_k=umtNQH2)!i?H)oi?DeH+@YX=l+7Ha)KSt+77(Vyt
zj+z<0Q0q9Sq@v?c-0yEgb6yT}q20;uPhVMb_&KH;eM6v64ilPYVp`8I_C3=MJ#P*H
z!!Pk-e86DL&L^g2M0-{^!G%j`x0hpY;0Atzm#&%wkF&OM{k(^4ck6zpHZv8?pJ{l+
z%ZZq!kb<;Rm0<hxcx;^!1IdLOnbG%#kUNSvmV+LNNuAVSIWYo^HFnH;4fTup%w|yz
zdzif2ZPE0T12`T0#BE+@(Vd__7Q8&j^1Wwcs{bOa+IpWW+m479l<zGKSwlX!t)d<l
zVCfYb?C$M=vQ1j<NxMq&m_yx;>EJnc3Y%d}2Hl5PekZUCG!0#dj|Tgr$$2$Yo%)X(
z^krh|P14^w9V;<LUlT36PscJ%09x%`i;3s_DAO<JDa5?8e?>f@!G#Lbu1);$e?O2X
zTF-3y6N{mddPZ`axeLsKsJF<EO)vw$<ni#huaO&O%;AcoY6yHX7Xr@4qxXx6Xx@7a
zB>gxO?LMTj`_Jq!lOWaV6|Z^PWm|q`4|V>%nvdDDJy>Dm31RQQL%{mvKy(Qo3iT@%
zL*d=N=wkH6y3RlG(D}3@NSTAV#6nA5O&Le(*mcwoK*##+Ot*g)*E%zWYSv<&+5L;)
z>(?J+Buhb+(;zf`=?1ZtJuywz0c!gNL5N5_6?YSOQF~XE`1TaS9zJ98)x}Kq&6+h!
z4cr(DnCjjGD_47>({dHhHYc5Z0rfs@ZV_{8cd@jYYH&&?J>{-H&Bi6nvn8L^EN|s@
zPv${X%py<@s1nMi4S?)#S9pHMi@f#TINtaqh82_FP12Oim1DjrDl$oz{Du4>+XsOJ
zl1erftwY0yor=Krzq7BoKk>%VGa#kYGK>k?%?oFB$1r_A+&h;%U~Z&kw$reRJr9`X
z{S|hmcrt1$`v~g0hq$5ji)jC@JNX8ZFwt!zH~mP=grO(Iu!WUOeR3O@<?d&)$Xh~j
zrUW+r;{eLGTRbzGyiae=35|>P5)W@bR~&GJsHJOJjZX`IexoC*x6c$MIR-JYbu{X}
zd2*YRcUi4fd#<0|p4HcW<myE7)?F9^y10AX5Z+Ys#)ftd`+yf%x8paLsvvXIf4uGk
zb@o_!v&PfynMtSuv@0J8sznjRzIn=2eOB?Rt3#<Xrz^&1O`!eTFQTg23T^y}Y0|NT
z**Hf)fPDvyZ@(V==C348wTiQj9Z*&HNwhsEL%aSv`QsajP=7uJ3)~0t)Xl>o^IH`&
zom|30$RpiKop!#S3Gn@&(U_Cf7j1WsCLa1!NU4p$hDnXYiC8K;noM_y8TVQ9ApmQ~
zi6Hq{Di+e&XtOf}qRzSU=AFy%RM$%^dzped_vVUL&NkSlcf}eBWiQ|V0LpcaqVaRQ
z_^2TfH1B>ErT1R)<~^Ud?4>^|i;c#YO%YI@^^HZ1tmN*ohna7ewNN<nIS)OS&J$+6
zVP*@aBDXii9j$<Nh5nfR){FVNUE#)%!D9N1a5z6_IaohzkJ;@<VdItQV8|beo<DkV
z%dh=$x5p$bG*EWhb{$snGyIsj83g`Fd9Yv0#OmrNLe}gF7)AQ-sU0;u<?te0M4sgW
z+qo>9*y!r2exk{U6x7u_Lh%p5(6Wux_$O{++V^}G_>AstPcMj#GxqW7x0S-M_<5kr
z>nc_~@rH~cIZS8PhetmgjkQ6mNk{xBWT!cEr|il2e5@OKPYMI?jU%C9bq73UZ;k#l
zm$0yLhq&d`k)Z5XB8L7QO5B;VC6Z_5morVjily^CaR+&OUj7z}re+72a@}>&X?i$l
zkH#x%WhvaBv^@U}oDU1}M&AqM_g-egn#v}E|B!GrENv098$a{%kM_9nngUE8*@E@R
z01VuAnKkEf$_q-E`91P>2et5+6C=R>lnLzBjQ}P7BQ`dciy>1cKxA$)^)Qu+=5G?1
z_HvzQJ|qrm#|(z5?rY(tjP9oUT}A!3y-Y!#eb3!pxoq}S^0-q@?E6Aq@8-h{w%rBG
z&(sUC$c40A;;Dai`=3WvSoD?pV0U`5yvu>0o7ci}ZxI_}%|Mp?A`Wd7XSmk150ezc
zT$Vgt#ltHVps)Lz=h=9HPsR|;+&vjzo|}kl*w4%%A`;zt&H~d);*HFX1}l8b>ZR70
zk@-7ymtN%s=Sx`CjSdj<({>j6W+7{mIAQVKI5ctULphoMgp383xU_NwGY(oQ7*c;g
zRn0r@F~<`82NSpKa|n|Z3zriD`au!(N!K^r<NIe#LDl&F;J9@HoOnk)iXYwznnyqJ
zI_o&pJor<H8rI5c?WhYqH4wh*mZFoT7sRwap))iTGltoLjWU=Qgss4;LqV7`Z3$#(
zukii9Q7`!~e{l6=OP={i!whdMS#{=0@~d|h)nf(;^5?)J#yNuNIa6%j^O-s1jYsW_
zv*MW;Q;b=9gw@VC&vXOxx&N~}tfbWk>dnqF1KuqOJD36vx7%ao_Wr1AO((zZYOK00
z!-A<cyzqb$Z73UE73_q*heu)aM_;(VbPlEkTA(sm#w9r_7Vr=4!rtlGu@398a42xs
zy5G2JPa=L@yaZd@zvRW622j7qHm;rJUZOijy1P?X;Jq!e+?d14d;h`x|1DwBwUaUE
zaUATve3Yxo=`L4fb=mHI3JiMWiO<!p7*f*}rEZtRz<-m`?fPg?{{FTk({?g%yxJs2
zsVjJM?+q;cgbb^u|IBAR3&P9~w?s}Fck}E1VEE}*W;mFkXxrNhWr?BGWBou#I7EGo
zxmWn0Q|rLz;2cafcw=>B2G8`P%-|dc-uO)eo;~kyy-7!?Xj=eEzbT^b7k{yJs13+n
zCP4LpG^Xx%U2v<L3L4+ZShbMydA&NZ#P&VlR1a^6y!4qncANt-|21(*N(gt4JIs_#
z>3ri?88*&w;W3iC%&~ksQ_d2Eru#jiU{n}<RevDvUo&U#b6I}LT4JP)W7&QZ?$kh<
zS<!N_=u{@t>{*B#bB4oXVFAcG?Bo8<!_heJCigu)9k#fvL~R}2VP6hH7f0g$ImBSL
z>ll_%{tu7b`hsh(kK)IEGy}=`c&3{gE@<(PAb--za@;p@r_;pz+p+@h2fN|5v}vgN
zaSXSzUkFatfgsCp6msq3Ky_Xx%1z%1jaw&SX)hlP@ahav1-2kNtr9cC>cm0ENz=Mc
zdxps)pm5)>f^tNj7&5LSy44Ion-OY!uXlzpk1`f28hCxbWTp?w;tgg|pz0xD{ncH}
zs$nn|4;cgI6p(7}e4cAMkKyV^YXwj32kxER2i<3H=EYmWFnS0*^EdLuoB_|7Do%q%
z|Ee(k*myLI3K0AgOt><jm(XY{!{=Ydf^}jqG}umKX*APi*CjAbrvTI%hYR{4CyDKo
zr%;8=CazN`YO1dB=c!U+fPCPtn??}FEC@Px_C)>zX^P$GzB_XSw$1edi;o+b)gtOD
zzUB|7j>JHsRLdeFWN6Hxo!_&L+@@Rw`NuDE7D=q)YyS!bJ=THb{$W9@oFrbOOsG@E
zL^Pw0@|3%BEcB(0@%i4wa0z1a`g#^c{d*0sIzvVQb)_EcD7yE6|K}9N&h$lP?#YsB
zMtYwz0G=PQhy1JKDVtTqG{mlw>8|s=5DYbg)}h2Wgqv@4U`51_$(|X-gW6exA?^W7
zINA+8#aNb@M9)b25aK{J@UZM$=IOtk89qmGm;IwKJ8>%0*gauR{-tcy92G|7#bfhL
z8Jr*P1Z9SH(EMyKXY)r9%iWWkZMB5dkSpA<E{=GsbwXI#N^E;UbJmhy1!IICD!Y%N
zJ5PyMZP{vKu1p4fzdV+Fsf_DQS1{9S){wkt1<04|XPIA$c*TRUEPwJCtlk(8FE6Y`
z6Ja<=D!Y|Pq`!!R4lYF1$RXf*nb_M&GeA11Sgddy1N--lgu1_pslfcHPau&yOqW8C
zZwClkFanJ(vxP=~2j*KVprP(!iIh4zs;`s^sp)?)w;`P{byXiUSv3%OyUYA}mK~-!
zO#(@3(B*`+_t_n(C(2#Jd5Cy`W^5}lTCoPy&3#4Rs<~L5u$tK>5N|hm4pwYz$Ml)x
zQ{i?jGd~0)Yo}xDuHn=L@KEUdyBS1pG{N1EX<vUfUXYkB5ej~N#0#FSV6B~WToXSN
zTt8f=Jo$X^D47Gt_9tRwc`lb1kK<8OyI^d$0MLAMK&$#|tRQh97%j8K%pwcACkBZY
zWl<ol3E>v?W|;lQOzK7N3@yZERL`3vW}gU#*~M~jBwknMU;TJ&zbUBucLNW;Hxjht
zBEj%mCa>;U!7M6v@RWtbUK{XCh=}V31~?;1y;oqy=XxfYFA+3lb1->^HK;%2i47P3
zW0I*yxzm(Btl+=-#5o-fHJcys%q}|qvd$AckzrP4IHaA7;dLEzS;)p)EOzfgjOj9&
z_yc~B5$K0D&T*_&-3MCU6ZdSEDYKe54(~U*qO6VPu$hO1pufE_>d$m0&C3ye+aF+(
zGbebN`wD1`>4*1+ECsti{$hqtn|S&@CHZP78(3i~I<7?4I;oA<I<8=b1!;<#XGcQ)
zbu~-8Qpntwc|rs+rIin-@qj}LNEy2p<5kumX}mAEsZt=!`vGsvN@qHkJ)&c$UpV=_
zz%R}jQ+*z@uv>m$-eV$<%m||n(OW!y+bqmCr5(nYJfU&hF?K>{1A&{m!}I-ikg{SW
zR+UeOqGgtte~5azAQ}|8%gJA=^)lU1%7cCw0j8$IVA$p)<PTTinfaku`#g^6_ncum
z|8Vhr<5W!DyMaZ0BCYVG9AzT;-0J2tW8aI6C){Ou&t;fAY!7wC+dwd}_RK%fJ7vxj
zQDQlh>yE4wBL4P5HjgIo_^qsI=rla`y$kraTEq9m#gOpk1+Nb}#f_8QdE5TpSifGv
zta~ZZ^(26H0(n4&_JY*3I-226bKL`CU$jyl%_0~Im(0QRXX`QM{&tp1GrDBS4PJNL
z8qz&_z+Shm)Qdg}(~<>reS3ym(+<|3_6v@2sXX~i2Q*Mt$xyOI^j$d;WO)mO-SJ%^
zVD~IcbeanFEuUDzc$$BlWVBn_P*T3q1Tsz!fu`V4u={-hKChdIn)In$LwhvkY2d~r
z6hkh&<OMG*xn^J|CTsNLxnV=_rgRD@Wt(^{@#3miUJzpLt%9&k_K;?Jf^Rv|8Punp
zMK^T9lAYu~wW$^?PmIDv$LC;iM{_WJACJmO?qHHd{tx*OEWf#h4O5XnuZj4u6LN^Z
zJPq<ky=UKD2f>+^*_dv$LyfGAOaAD_!;Atr6pkU^wx^e}c|Oaho$vh-lTqH?7mc#}
zg1?KJ`EOni;cJ4CcYMa>8!zzm0}~;6V>UB7wsQa1UQjX5k9L<POdd!bcm0WtowAlP
zkY?=Itzr0b>pYa_6bQ;r-eUc3GxAK%X35Uv&CA?UQr$Iy$;R#xsxEI}1(VP4`{PL8
z>9q=uxkTXpwNV&pNayZr_i?%54L5Z8Q}m<Vm*n|0uj;~6qIQmtXcKjjn|yM^{YLU(
z)Bg3*XnXREz2+4j)GNPaB_>d2F5#0Lt=m?k_NQeff#2>ix%qq^u3ruB!<@n7>2w@6
zAsCwNXkYz^vDv{xz<+oFD=%Gwnvh}C+ooWl>zuLtx<4dSALRKqVsth<6+NSdqT^5h
z@MzHld20*1ri#M7V<RxTV=T8=<OP!1%egGyOf-zyU*cA(K&^fvN}`T<wp{W7tHdPQ
zAy4IuxD1~8pV;|^sTliy7~bsK6B}-=#@turdFocrWLHZCEwfNa=Lo!k*&!crh?lhM
z2d1sfye2J=`;U+3C8Oe?^V1oi;l;d)a)ZSK*Mj8M`^$}nUxdVvk>I&+B2RRk%2lBo
z1<O;@F|ctSlpSLbOlM+cbSTz@)Uv&wBH_eGB|e@=S*6?%>Zzq{l_7~oZ8LChC+g{o
z?2Z{zpKy;A3RF#748sa~V^S{*)Q8iqy#l;^$D5*Z@ESpq@~2pK#0+ECg+l7xORRR!
zNXn)}Q}4T-!h=}e9$9pMyHv&8$9!gCyMr;nb|Su1hl9h$7(CyPa$OlyLGF<)cs>|_
znps-bcsPNp_bw5%<q_cdqLyb{5od3vJNozjjo1FR9CJL(&}p0(Pwz9IW~*v0iN2*!
zd%O}gE>}gHi={kw%OYs$PrkT_(vry7;m~+%IZOTf5B~K7WtqBc;@Y<5C6ChmP&Vt8
z$eNCG)2cYsnwpiAhjzl6GGDL?xWtX8n}w2ROL6~3DZY6+864M+!{%4cU}jIxY2kYA
zd-xmATrvZT8&%lUD;jN9hO_b~MLeXnp7xi0y>wUmv8ukI7&ZDZv+?=FGjE<2Rr|;{
zK3t8}=WKcQe}nPF{BhKMI+&?v%9&xu3x!?HGp?{`hhNoCx$TMpSm8ASp5Bdv$U|4T
zN;Zwo^HQdtaG4pCUlGT%K*+tG1T1bfO13Uxh5y)bN!gOi@sGP<%Z*j2oc2Zx3vUnk
z)Y(z+doNIp+QHQ+(OhdB2=ce7g3?SYO16kzHamZ1?#f@7I^+gp-G5=<pOJ=lY*$HI
zN*J`6jfUE;_gO04-+9$q(68#l7U`B@)^~r5x!g>C_vb9^9%ZxE4Hngn%URv|9%%Rn
zc~sIA9y@s;8h*DI^)-FD-9ASwtm}am{!%Rd(;Bn=Tqs|BoRyDjhfOovf$~BkPrbVm
zQt~{>w>%azlu3|(mV6u8JxlJJS;E(GCwO&25I!vp#rnP(?7pubM!B~~r-*kfGn)4K
zmS<R<{t(;!jyy{}Tv<{6S+0tgW5_QrSolRx^sjTn+;@Q(Fna*lM-Rp~#l#GZJj7hn
zW6;1z2kJW*^bMZSnmB=L8zv}BAC$5Dyft7#e~)tcXXf~|3ut5KD~3gQ;>~?)QTP2X
zK_{Joo#%L9LHog!O;eP_bf7u5OvjTKhN3a=0yF6bU>f<7?e?enN0!5rd!6IMgfP(e
z)beu0ZPxe`aeaxUQDC!^kW<c3+LL}e;~H7zInpD7FR}fKFf8jg7_}YF3-$AV;UOb$
z@MLu<mo2juj4pO4$$z9Tuvox(2U`gJ-2+uEJH-08D(1AQ1Dc<xV$$h%n0jNDsClp+
z8&;BL-SvS`wfF|NX<xyLRw}^puL<ZgL%>J<=7H;$JYJjnBQxx+=X}8Ll&5fH(oz2C
zJ=O*!2lRq5`w<I$qG#G~#D!iJB}fK-7Io2kc=KxqknKG!R7)aom3#uKUmAr#%TK(q
zZL?Su<cGD@lX;X?9k;9rL-#=|Kti5><u-uU^&_Bu>`Jt0{fCw7C!<SXG<9@3W6|*p
zwz~&CPgNVlMopOzD1FHu8w_-w&&T{tL*eD!wGc1&M2R|bbn8jlpOFtPG;SMf-u!_-
zo;VQYXQo5=3kR&|Bggv(dJ{*)7tJnH7j{xOJpHs9P3OPkIR|<}^F2STzCM6uIs|gx
z<^<FpIj@i;=L=_AR$-lol6c3!<XtI06Fd$IY;G_$y-O;Oc7kg`9U-`34fS_C;D#Z@
z_!~@p5X+yjLYxPSyuu;o`a<Hc(f;l~2kv6O5KpOQW8i}d?plAFH4Luh+R$5yjD|;C
zHnK_lx|DSNzg7U@SupU$zs!8H2X{|9#Z_BZ!go_I%7Bwk{?G5kNUkG~ehlg_9$@FM
zj>U$s%gn6Y50tj`?9>xWeDjwZmaX;xeusL**DY2Q_!hCU=tPwLv{(qd7Yl)thC@|E
zH_~P%V$6ykS$6at=HEFS6}#p_SXd^L1SGM{wkGPl{m3GI83Xdt>6ks<5+$)JQML3B
z!Ah!Q?juxiV$@t{t__9IhhCiBKErH!QjY2HLT0yU8t(pllIcGT=QDUOsMxfWH#vEt
zJ)4iKdeEE`Y|bO)*=*3y7Ema*i6L!o`R>1hA+vul)6IK8zHfJ8A^jn08+u`V@f&{3
zcRfUoZQ=zxsGHhnA={toO}oUe%sX%aCfxhTnl6W9_27Sa;oxCRy6g>a{x=MpCM(cx
zMiujqx29aeQblWbN3I;4T9Tdl6F1-0ANK~QU|^Ct_<sAsJ1_Ic+Qp9i7%auov<Qr!
zMDJG`0q+O&1*7X4ma5*(XKTkool6+n(EUs2Ie=A!EaTdwGVau6CY`}S5c*RWShbt<
z!KuR`&@c+?=oqomje&-LHgNs*P#!hPl|8-`0J>cftbT1Wgl*O`^V-2!yUvuf?{2Vn
z6Xm&sHgWS07T^(1n!ovS^l&A1e^LXpoft+t?90NP-5tSk=XIW-KE&$Z`LKdd5nNe5
zPIS9MI(z+9-q?KvSMA6Zm8(L8sJMOHZ|GRCOZkr%O<M=b$1$M$;(%(K_gwAryQo<-
z0!vqfK&(X~zMp1_4(F&twQmQsT{|2z$DR|S#y?`x_Htf8UMh2Ix=#k_c~R0;7J9!g
z_nhv>L&LB0v@yV2Z=dDa_XEiHJD8c}&>hq`iGGV-dY(KI<L}v`>@O2g)UO5k@5|U@
z&A;4D(hem*Kl2I-GQljp9mJ5XvdCUVe9m*cvXK1DG{f1+01Yvb+|Y9&2Fezpao1Q_
z6gLhVe`TQG?#rcfr$b7_Sg@^@pzg^5UjFlCc51>U>TtKA9iBP(WR6E~Z^|9%jtIJ?
zW3lewGz<!+tn`}oXz&RWD=o*-EEfmrofhIS4>{UA41!F0Ck)alLEbHh<@EWRsntEj
z){AGD<ozpFO&r+PJC<nrz=X1@J+N_4n!xXF;b}j1Wrce`i}K(Vtm?(DylUAhki~+i
zT0kt&Dmg@Ue#<4HJ@k(2zOpyhgQ+is_8~gz?HsaOOzRfU8Y>?N)#2OOHRAlpC!Q4K
z(z)n6ppeDXHL~4d%kWr)E!=tN2ic9+dHjQ3*u8l+_|k8q<?Kq7UHyTTKOTzi?XGkA
z{G&{N?;5LhmeaGB2t^e)*^Ft^plq`h`7+yKcHmSNwQxG~eK`^<Cq>e4%7U3T&4h}n
z-C1e81Dtp7LVb;UnC%7^XpSr5ZLjUYX4e?B88Dt1Y(}%f{T(32d^9@sj$l@lUDQur
zfx3+4Jl~-s-uIjfD*DdV8NZ4e=c(xPbS}1)MnUENbx?D325JMll}KH)#WP<9VW{;e
z9=h`)Q+^u+?$!e_aO5CpnDd#t1>2#|PC2|UjRu>^E?j!>zEFR80$3DM&Vh7yzKSya
z5eJ!E=ta8SXi>i+hWWb<X7Tzls9dAxw(&~H>AnI~FOD!n-WRT1Lf)gkq%jBjL+DBa
zGjt6T^}{VO^S^YSGk*g+;Vy+UY$+t~y~s4yySemq89S8@VB_h>5_Jc-?o>atOMb;H
z)-OTd&u5tI=u<(R*pF!s{#4?&b|f^)_E5iMBr|qbLdI{2kUZxoYmHZ6^&@W<7P1yg
zqf}^@6AI1M_593FeK5r1Ix9J118jf>RL2)G)f+XJt}qI!`ELCCgb<AVFb@s(`-MvC
zIj_cK-g3AdIG)+aLT}K1BWVQf0JjJ>N0Ycy{y*G`82QyB*YIkC2k(442DGbgE7XcH
zc%Qn)R~3x{`C#&+AHN|apPo#4V^eT{Jp=+es-fb`H<mb+dK<I$ih1P%NVFba8{b=E
z$bvl1CZ)4N!$zU@aUXOYPFnx5KIpk~1xtxtNL?QZSlZVLTiY1y9^#4D*4pBX7-y^?
z7G2&|+A}X9edK!~yW_F~om{^0s_(bBXU`ZG_MUukNgkrAbUkU}`Il8L5~ig)v(k_&
zXal7z>fd-s__>%{cct9Jh=JG~@{Xnb;sz(qkPkO?4t#w&hCGbxP^}&=NXEwtS^B{k
zR6+W7+)>ul(+}<HZ*a-Ss>`jW^TE4m9NP7K!a~fvAg^o~b)IbyvZ;qqTJOzWgId`4
z?=w&y70fEvd!x*lf<|3`UikYR9^NMwEXF5-YAa<v60ETCQGXcr-3jGSe-vGlKTw8;
z-jzS^=jHmV%%YWc0AnNhFl!rBjnVJ~75M{VZ}R4?qcOj-iFbcB2raY2K=P`_OENJ)
zw77GT)ku~>A=8O5l#@zz-AjF;eOXjtDCU3Dao2wUPiefdqNqQgK^`78Wu$g2UPc+2
z5KN}e-?07?tB`nt*)h7CYLc*iw>8&ZaW2sx34n-a9Z=S1BKiz6#l3H)qHBIG3(uK@
zkvT=I;wMiQHSZIbSG;Ap2V9Z&SOd8t@t-t7qH8^Qasm(2_j`gjJU`01{}%@{t~ipO
z>WaqKI|c2pPkGGCWe^(Pm3)TVS%bd`N<Lm?g+AMP2z5a^{`Q)6&U6M@*&X50{uI>v
z=di|^MwYQGj}^7M&HN7a!t&5PTy-vz$<D-y>I0<Rtm-RT?&}Ng-`be8s~b;z+#g(>
z?s7Am*^u^G%cCyYqE<Oltex708+QeAL;rWo$?gJ+S9d|jBnGWN&*GBU+d|}oM=aHE
z5NPe+k(Of(K8Ky4vD*`tV73^mVm`9SqG6!j;)u;-zVMB8v(eyxN~m8w9QC2Z30!iE
zI!JHu>{%nZ(OykmFtM2TeJaKn{HRmac=^~MDd=1lgMH>0bRU_^GhR(YS#)3OjduXP
zo6f~=E5OnFPnP!}09O@H0Li4Ec}9=#%usM#4EYvBI#&)ecx+&~;tY6MrvTpcn0aop
zg|eu5sBf6fq^)~+)fOw1y;TXuL>us(<c2o$ZMpi%dolV_UrY<v;HjFP=)QD4sEIMA
z{<M}gnL9u+>jVBjIC7gRYhHWNog2@DVCtl&OgoHvfL5Mk?_)i2cP7ow`(=>apT39T
zYe8%3jp538)Nf0KtkPJBtoh91YdtBq{+gx!Th5Ialf;VaQ+Vqw^2_a*ds+MaH~w_q
zTBKASWVh1X-TRM{fE7U)w$Q+GGA{E_&247a^CI{583n6I1KN9HF&2(o1X_<aL6V&2
z)x2f{kC}CdWx0;P`tj}<vp*bVJ(q*F>M4&f8HE9niy)Bry)UUFqi$C&&zjo~<R_!R
zBI6J*)~KkvmAo;1f9Li=gF)3{8lUmsB-+^+c-euj5UEh1eStZKs;~3=^W@MPK$*8V
zdmg(<4&L-z3wE}H#{mGte)dD3#qOBXG7(NaeZ^0G48q6@In30zJLU}u24j1ApV-}I
zhR_vk_A?u_wYPyRi#cEr?g)miJ4@pKat8PDWvq4VFtnXyk7mi%*c9gqGgdhf2P_an
zZ`eb1Dt#6$Ke7K~?|q|cT)%(+g(OLqBuQA3q(UmGb<ar>l8_=v5<-$J2}!afNs^=z
zl7%D*Nl5oK?Y6%m_L4TSlO##IO-MrQbA8S@PtF;?@f+iNo}O=GtYP#dwbpfC*EQ#S
zzhCd*UOaa(=r`8-ko!5R%eXhu8b2Kf;#&VmG34w>R5lq38X>!&K>3K3Ej$T|!yZ)2
zbRc6uKa}2Y7A1o^QSKi}OiG*sZP6WFxQ^8-WjNWWq*!`RE~*SnsAFdrid{LBXZl<c
zCGVbGl_qv&z8#~nU<SV<w7eDCZ+!seYb$h{$~i}CqcL)j9F2zY`451Q5o<%`Z+3y=
z;Cm>V@(3DBH-UrC5GK)dD6uZE0k`PM7})tOSTDZM!nL`#T)UE6#ni%-LEIlbRmDo3
zc<=Q0a7vmkr{bJ*JhxPza-Z_&&S`Cl)i-ZUr`1@co6jVMX2tr$F2kI(#aO(Z`--DA
zsbGdTXguG^Tza=MiM}U##@b^@3FjPL`Nix1ex(2BT}TMw**f#0@cV<6C}~oOuIIv_
zU|By*zV{JYW?O;k_obrop;?@Zvll9V$w_bKAiVyYFKWmSF#Y|G*rwu|JuAUcI(;0g
znHq?NtwTr>u?ahB8O!ZHh&@!<U{p^5pY*q)=R<it^)rO5N81B<-2!VL&XLRnQvBP_
zG=_2QMn!vxIx0(4TnGc70Tan1@P#mEPboW;6iw>MeZbDgkldFHrNk$(OwIj5t^2Ie
zuIqeE&ERwQtOdxfFUO!~i#dZbhWVvuK~d+koVOMZ(&&1jVWJ)xpZm#NxAOCI2InYh
zv@w?$uEVVGgb0h{Z26bPn6Y{wDzr01ji5GGe~iEXvjT<O92@2_ushsdA;a&~6Ep9}
zu=v6`aBXuBo>lc2f~+rq`wzaCE)ap`A-P<cN|MCHs}T>@v5GP4NHaZ*f+h~2#6A4E
zPTqvB;|t-QAJ^1e34|pd9nfjo7@i+-i?eu_uzT~&sckOT?L>{Ik{w)kS1odO_Iqf*
zd5KACXTf{klT+lb;65IeT%E9nX9UNIgLaru$SoN*y7cD#rh2h#xSV8V0t@(dPptiG
zF{!s2!yK1tW}Y}3stS3+VU8V1=hq3Id!!UIdmUM)eSz%RH`t->%dqJWo~04t0~TM`
zlYOi))m~X5I{D5K8~aKrvFQrCJdK}2!v>P~n>kdu_iu36Gl*n~&aCdsa&*`<3(E&j
zLBG-`pjiJ!bWN9|q411l%-)0w;W6i&a<=h?BcQq57hT)3!0^^g-t#>sJnR&S1qY2N
z;S={){p9o8yxtIXCJLRF9V}59ucn}xDPX-mMM#{?XM4Lhk<<8>CArg=V9w}a6!m5>
zm1Z51C-QZu9>CyxPX#G|%>gCXx|D7m2!Z$c-TF*#)W3R)sncUv*a{C)INlRmzTX35
z&WW%aFoAO#!l7)~WURT|8KXx=qgvohY(6VD>>I$!Y^<1ybBzmL1Jhr@wU-x`K*KH0
z20C3OD(&OpdeH(t3%?*H<wucE&K_n{*Nf_;CR|4u2aQ=1Dc7}zwO{)TdKtXF{Ai0o
zEuqNl>zPuL%*NLV<g(Wi&((6>Z9+9u$6sL*-RW1$);we;c{~H9_y?H!&P8d(X(7sV
zJ_USpgP`_(P;hY_(~4bBb;CSR*3(jKd5dJ&V*@kH=WOXdbH&R(wm5!(KU!T`hU#KZ
zXwDAh`us5zp?{qPZtRB+ZV@E^G=zL`JJ0TT!_0rog{+GbR8&j?cY{8Bb|D3;lou?1
z5d))SP4ep#M*g|JRL;E*EgMbAt#t)0xiJ>wKmEo`PalAYaaX}(8_%RG&_LJOpTTr{
z20Z^mLZulCsP>Eln9byQ&`)$x&GQ^|W^t|PqIvwh`7D?Y+)TNXEXb+ZlUcjh2(?AL
zC;RA8iJSc_I`m^Hs;tt5fJt(!j{Oa^1p|`E2g;9YuSEMYZ+ghHNfbvdFtxRr1x@?R
z)ZJFHx)&QTdpVM49`9c&hEmLnC@eb_fj$$1Kytvaq;%&iL3+xWC8qzw+E<Ta<%XW5
zIOolo1hJq~G!GM{4_M2^JAxvnMvM@Esah@xs_S_Ss?9Jkj`wlS4kq=>%S=+fk!R(;
z=h;%<nBD3$R(<9xWVnu{(#zV+c!WQ)LzPgJS^}y7FKYJ)18G9LkXe46wMQ?9K!?F-
z{k2eVy*3k6Z@JE5OA8CAy3YL1m}Bif?*%Zw0V?x<g!ZsVXy^Kq)(gF;c18exPaaA&
zw*pZ0rdITLxr;?vw6ap?EhO>pRT4ITAhvZ4K(iP8d<fD+@7+<PFaHJgWxJV2VP7hE
zphq(oYoK12MU=6+A4|=hPE9AB$@gs(H7oA0il_6aRr3k=FX&_Zb_M3EMxslD6(u_u
zgWm$Khg@0?LGoG5$xjQFhs))ref`KiWDZ&t$1v&FF<j@P3CUv?@{Fjbf=ln^6kNjl
z{WyWSj^bI=M!O(<k&G^P)uG&LC&9_Sp~TOsg;|^OXY}YiFuXV!XC5AjmHZqJ_&SQL
zKK=l;G((V8E~6)tr_;=*eD<}|ifYCW<qV!l6q~jMbMHsfC(cWN4GU4-@~=4OV;lrM
z;QL1Mzk>CiCxZ2{OAywL>*C&WMtD<`=wTT|A=(SEu}B+2zLvq|vk~NLwty1%ZD$^>
zuLO0)eHJlP1<AK&GNY%QMgQ513XTf&aK&P(J`QNLV=JjY#0zojT*;98=OYpqa^KNk
z@UWcUBPKitr!fW?R5^h8bAL?K&N>$0KZ~hGDyU6+D?0tQpE+2{nEoL%l)ed~_&r;Q
zz451(ntui3)>@ddc^P(?ykxc4Rsn0<3)aEk1P#r9nQ~_s2L1k(owIbra>ZpxxNCvR
zpPB3)pCx65@z1BzZ=B8b5@eYntYz^VLCWi`f`6_F<{zJOwlU|r2;pLaatxaD%w`9_
zCE#RRP-5iCJypeTLE`PqN+q4ZdQgLCJ-r8{`t-*n3qLHp<HcEjovCKcWOSbuP8n;4
zqVK%HRC{0p>IF^1phHi=xV9e3#~lS@Uhk-H>}S73u8E!IMUvpHSEb9kvOzCqQ?kD;
zd2D<jFgMO0eAkVs`S0p(UI6N8&CI-b3d>BXVl~G@sdny9-Y0j5vR?*N$?tlS<r%_d
zK8r1VdtNBedCO#4zMOe7kz&4XA=RETCfj5M9*azv!^ux9X+kt<Md^{u+?+{*92`qM
zKML)aQaLx^I(+&uk^+wKb#B4^L$UKPe8Ws?KFN75Vmb2|Ga7={@IKKbUW+$8hN5GB
z=;}I~8J+7!&fgbM!SEX_W1>E*uj_)%w#zAFSXVIEVNEsr7f?~76mPrvq3*zq7{=MR
zrUOr~s8i=S^L7AJ&h<vk4ogZ|(-|*^8j{uNH=t$wjn#fxElRWsOSFz1Wwn2}icX>1
z<oOY|!76<$DLU4HDSz)acumEiNdYW&>`tb6&w!2(?nH)GCA@Crz4?Y-tfOEw{yIOB
z^kRmi*??fYnPW($y>E*ieBRg^8OK~r<Y4cjOLEn0p3`FlrEU9JSjuv8-Ou-{M|$9q
z&>#K3yO93C7UtV#MWs7>3odC7Az%Lm$i9h!{^135+k)3GL+sGbI~<GkCzI2ZG<odB
z3V@gUSybjera02TBw0>Oa_^?#(a{$bZ`{Cq;uo;sJ|XGBVf>j9SkgDHgB=rsT8XPc
zbz2AJQPGsVaS6qHa<71UDDTnygsO%=A;fVg)xNwV#%`XAwVZQX*ER)9qqlMI5$E~8
zn+K8sRiZ3S8x1~=M9H$Ng3P`}i0Sa6sLNg`f7**wMNwjGQyi3D;%AJJAQ-lcW={9#
z3F9Z4k%Xir@f-Nx{xk!;CBrdRy#dSo7J*jV99BHB9>yP-O-_9tm3VCH#7d=;1($Ej
zP}(&|bXQk_Q<;sBG^__UduyYG?FMS#Uhp|zxo>9=&wx9?eG})qlWBe#n|Y1bHNCf@
zu0cP{GBzOVS2sa_ZUkBFn};(W3pmI?M$NPwkH<x$?4k#{sCdrIg|(b}p+&U@o-8uO
zm0EmNAj{DML;K~>9Q-f1-~Gf2F3QQ^s4jlr=SkkpeNpG?PgXHV2g5rFWWC;=`OW$U
z(j^)!yT23_qxtWYU(NEf4}oWY2TFGSO^AQIlc_(S7PY#ZfQa{ZnO0r|X2`!ob!ZGI
z)n`Qm4KJProW*3ikz_Ps4f<DgMcH5van1}SXYDyry8(Zuw!MNzUhf4xm=5hfkHV)l
z(e%93g=gTr2B)CESOC|ORS$GR&kQH5;QK_W)ls2sr7l@Nn~!cDYcX3~Ms^=nj7^+|
zN-LE((}8E758VavmHSw6>;SBP#Pe%Ry;0uNnVR%Xu*JHW<(Ak}yU!pfkj4oTpC`rA
zA_;3b)65#EH$IsVj^$fh!0eG6B`U`f^YNVh)}=S+ek8K|=SJjHlLZ~;?XfPy0wZ|#
z`SJ*7I_}7MBkCrobyy$<9CQ?I0+*1-D?T?3N`OOtPTaHgMjl`hDo8KSru6(kOc@Eh
z=Wju^@5c(J)=Dt#9><~wd6DYx;ar=YFX~Rzq{`>RG49=e?)#fCwzv<%{U)aP^rxtr
z{|7jYtbqiZxfu3aBsK7Tq*TuvV>ow5?R7|muHBIB<Nn2Y_T;p@PN)@*f#QNSxejz@
zKI=A-L!Y7K=e`Y+FWOM_Zd1%I{+&fdXkm8VA?CVULiSHqQq;{}<g(`mI1D}v4c%>U
zR9+OC3tyO4*Xz92wL_07-6;LTKrG5V%pxr0%we+zDWh0Psb(k|2J~a5N)^*;G~*m?
zDLxO|gv#xX7(a3{Q}pp*@iU|F$%?+@k(>uby7MVF1faN;^U#(KrmX0R=ziu0Yd7mb
zrCPxK5a~?*n)CX)c4oITCs9a46-)ZtAD!w##6*kzps)e7v%JMd74nR?Qwy-*kSi;l
z&hHKUXVh-LDl`s}P{}c4YUyFg_1t-);+()@IK$U!D3HV0Cdi52Lf2Q?QPXwK)ZbJK
z1>Hj#%c%o%gH|Yb-_DZP6bVlE7m3eLIALwnQn4z9`<X(E1Zju?H0t<5<#&E=zJDl0
z+-zi(Q>#F`%@9Mf=26^M18jWy9^`3#G1Y1w-HsehiSuK?Fl->R88?mlIypb!$ug$=
zu&|_ErwF2+rm+~`{?uR?LiPqL$^Vo;m2*yulkQ$nJOvC`e?@d%xd^Omce3>AjZ_<L
z!ZOXckMrFWQUnjgaz0DUo%w_L*logsun26~bXHV4EW+BA$C&EDA~f1M9V5Swq;u!(
z$!Cy)3Z%Zwra#w(e>??-#U5CltWV08Xd!#<SFX``2d2t22+}pi(p_`K%p4Iq>I=c}
zTMft*Hw3lq5NN)AmbI^s<37Ce;Gy{e6xTFa*x4>TtAe1)*OPK*t%01w5me?94`z#W
zQMsxcC7+rAWq)bFxhD}+rN5P&>hwT%|C_jBjUL4w^Pt9^TT$f`4XN30Sck}UWjB(b
zVT}g{1at5Fol<bx^G8YL2(GK1@Iowe^MkPMzG%j?!D?q3GUaqZNcV}RhN?*za6U&I
zq>dt^BCd1epY@!;Mo7Ni8I-!&Oe^gWq*|-l!>1$2I{viyNpmeVDAtkt*LLPT+YLL0
zO~w3;E@ao<17mgmW&wFp(X@@v<R522=PO*R+rt8HYBxg6R^I<jPZu<|a3A8I>)5qd
z?%3gO!gICsnd|jw5a8@Y(%;@e$`h{dT42n3UMJX+o^Dj{evpmI+eiWLt5}88aP+rv
zrOdKdV18#D*e%wi7&GL($`zEN!@a+AyP=_l0adphVv^DCODgUT!e6VFVPFN<ShQ_p
zOEgWXF@82Q95upX%f4u2#OpW7Kq@#n77LeXk(u*wDt36uyu*iK<D5Kz<Tz%<S^nYw
zOrv&XZwlJ<2u9trL;d@`QE|x}j4pBir}`%t@O7v?Q=krQcgh%U0H!b7VEI^n<_vnx
znj>al!`acK<*36Hoa?A!O=7{TaOPStk!g=!Px^^G)6VaL_-pbcsx@;)xN{r~&m=O3
z0i_Vu>VR5L@_621Cy?A~7d5<cSo>5P(oR8gP?<n{AN~xREu^9$78qS=jTJK&<IOfb
zWPbrJSsg;<GyTc6DW35=n`mz6Or<^c!ORLhyvD_mw_WCQ?};_Ly=Wum8&4<Iy5(Z^
z;~h}oe_AmAo%8ml4WP!jMwYSrcUIPVgPG0c+?K*(?)UK`>HAU8VV1}>NC()QOV=QC
z%?wmUZ)Os=+gDps*ONT+KibL^W)w3Y^^PFP;@1c_OOJq72-nUJvJky(HdB3_3zhy<
z$=f}74f(z^h92=n({*(a(YF&tJf07l3W3IJ_955i-V}A=9K`A`CdpiFX1=OBlUh_V
z<4}K+^a^GAPbX65iT`L8K7#eX)=YEvc&<yG4`pM0KzolB&t<d|HM$0q>ysHwzF`_A
z{`C^<;s)Zc>)Z?QY!X_1|Huj#F2UH;KcQHVv6AUVR9bYTq;^RL*qd77hVZ%Qbh0yA
zwGBsESvo7erJ$R<kE=HMEOhSH4~wqlfqLL}VZ+yX6dT&Y>?WULR^c}xeR(L#Ty~2-
zf3HDV?@g@g#aNtUa0Wuv?v%<o&2t`djlrr@jsd@_1$F%+QPRgiu99c4M2&73@ZpOf
zc{f-t8<@_te(YmS7C~5cYc=}%Xi$X7br!K@8a!#|dgqW)SbE@*xS^42I#vuPttpfE
zy&*+_w);%q;t_aAIs|L`&ro$)&6M{B$u**Qj$4nObjZb>;@tw6#rM@{KR6sCc7%{p
zD-1jgCxdGrV=@>VO^v2TXdlvtk~<f(#9t!B*=;2ssS}v&3Z@pFFM>@muVJ|tIci)Q
zYuTRzF$;TRqFpcah?-B<UAwUml4C?F_ZtddMC-Q2G#<V2=8oy;bMiK4kDX#3Bc`LI
zvCg5*x(}5#T7h%aI5HaLNd6{+P$D%KDzXH0t?on)uYYF>`%bLvpRQ1L#gc-)T4Br2
zaxtlK6?yODe$ElknOzfKLpvOpPni!oXfL5T%es?%$6N}n>xLdb7US^~{ZRSNtt4RC
zPqBRLQHYpqPKpKnaL!RXti7QlNF-k!e4-yh{jwsa=DqSNxr$Zt8A)mSZI-v3>q%!7
zve2*QoU_LF*z$uV?t89+Z>tSzo3Ey_E%Q*hTwgBB<T*fn)L<oTWK{$HWOn+&l$^AR
zF~@~C$LBlLTJkxRg8^FDaP{fc2ByB`0;Q+2N>m%Fm~YQW>KONp`6R6b#mG;r>h=Vt
z?(u>tR`7qHc7w2WLy?6}LRTfSh{1B6O*Mc?HuK(2buNTf&L=bTxui1qmqq#nkmfI*
z1L<eS46e<<=24rm<&h?7J(`6Mr;M1E-WgVtA>$0`+Yq4lT1fn$M<Kjsvo62M%+E~5
zoLM1Mwt5ClIT%8}UTM?4wqShtnBUfeBxv&QW;FjbnY<_Jq2jC$E33%|%>ZXAvvHxw
zm)$9NlpE#PyHWY;EmT*ro+JyWmRKuxGO6Pj`m|yis-IP{s*M85x=dxsbM4X9U>T^#
z9%W7oLolP&1SNWPSF7W+DQt5mY&rc>d_Rsest*y|s}H4yq{W!LV-{K+kdtchIU!SV
zfcc~&*XVv=Ru!jM?SL<WYl#UOf6E8;XBV=z`&W>;{Ud1oUCHK{S2CyM10`8ximgjx
znMUYGmY~$ZvY<zh-==}LZ$**8*WTp1$^|{MCQ#$pwUpxPgJoBZ!6a=awv2i$bX0PW
z{IZ*1m-&tPMmdqhb^??;y@vF;R#ZMLmc<V^z={W70OxI?m@z69%7TJHezqq?a!sOo
z5$_%MFo&w7MHF&rI}1Y{a{Xff=l>gu(h5Ha=(QG-d!-3cRVG-w#gJ<YPJ+?y!T7%a
zILy!End5src%DxylL-%@V9-lu{B{Ldrv!n=1{?72EydcvcA@c`Epsn?$--w4#D3hy
zq%U6xzMM;N?xvio7VL%Fm-NY}iL<h@U0I{UFbw0~fW)t>`Db*uq;bS}FwS}kwFbsu
zwUDz^lSe>HLNl8=Hj3*{r5F|sXnt@y8g}#t>C`1qe*-XQ6JSzZFa}&R5uMuPLV!<K
z(Z5?is<!0}QVR`=n`BGQ$GuQP5{1c7!ZxhqS&B7N@%wc-pUJKwt#6$1wbO;%t^Z|O
zm)&rAwI}aotRRWdS`szLkQ53#rnhf6N^}bxTPk;nwQCfZ)3S`Jd+_?FawtlZCV}j)
zD0Jmoio$;VN%s3lluk;6cGpN2bG#=R-3{U%j|8E*{Wn(cdY+Zx111ssLsge%W?ixx
zB4kY<eNY9z7IMDhmZd^&OfQVr9mur%at~qN0_Gi}O*-R7lHuQ*!1>}ttUOY}>a{em
zv*89Z%n87l9bRZM&5azAPl3nl8=}F-Rj4l1grXm6P;F@zBHo2kN$(95`z(w$B=f$x
zUr!dm^Rf)9i<s+N6MV9p^U=GnMXk~}&g0z9jC)^(_)9#;(RwgAbv_0gu3D00@)5`K
zXkDr_v!u+wWn^ucAv)Oo#UdZfC-vTNs+nL-(uAQrH)#gffS(&@?G?d2W<S6s(21S|
zjV7JnOvu&1lWS0hP}Xk>o|jb4_1dQR$;1Yuf7oK`jh^W930Z;O0WouZ1g~E%z$d)`
zk~|qrz7N+?$<alm^|=paq$sex$2gGZ`I7#q9LPP8!0M7bFzo79a{tb=hjv|K?j7f$
za_k6{%$g`x|9J|kcz>k0pC9@BOaR5}8X@rIGiFi68Q=@H3R*wgp<U7+tov%Czn393
zZ5l|+|BfOUU&IvQvmyD8FH7fqsnm`>R5W`hNPg21le^Ti+;3-i?!gO3mmbbk@Mmut
zbf+&_H1@=nMiVGr=u4(qf3e1d<18rN7>hQ_xzRBN0+w{+*<2PB{4<!IPxr*q{S|V_
zaefx}3IYc$;5=Hcwa}G{4oWXlM`sDL=p4ae%XF}=cV!V1c&5#U=Wyr@*H&3t<K^N0
zq<WysXSH`AAg7-wSy(7Ity>Awry-<eS;Go?CJ6O5b*%A>9tO;{VtU0Iq+U81jQ-h*
z>i6db<;3CW!WpygT{hq+r4MHtsi44+#LTF^q$gX9xem7A*|{s~y9~qI%~oW-HIHeV
zI8pw~1lDAwNeNX`NuTq0N`D{1wMqS{`0yME{4tQv@Hk)Wd>nIo<%YV6GqGK1%*x8m
zA$7ejO$m{Zk(&+0t~tOnM3th-;*g-#*@hyv{K+JCRq`PH8z3DVK;B&=N%?USi}zZE
zuAN?S_A~cN7!SfSuK82V+9MSF9ssq=o(T?1ENFvH6go8PP^#lPYFV_2B03*oks4Ms
zXo?Z9aqo%OEN%F?yqZG4O~wL)1t7h5L9Bfo3hjT-C)a!%lw3RxLGkT8x1008mpPDY
z<OWi$E?}k0CXn&Ut(d&@7*i+@vEWNQ7i@|hp5ybi+Il_FZ)qQLN##s{Ym+EqDbH*X
zFGAzlNHo7b3XC-lF^#Y%tl;V&(0*VTbzT~d`i~S;e`hC4Jou2QtLwmO+<0_T%)zol
zb1}Y;9@S@X-{rm$EPf?t5;$qH4P2vKn_LYUOG7A8#{t{YoG>ol1zqP30A)3hxlSZV
z{^`Qvb__%9c~Vj|+!u2{PQlQ`S=i2*)2e)ba2?{rbFAc8x@S9>zCFWKhK_=fYAa^^
z>BoXhY(f8s9x1z(u!mMXP_|&67=ErB{%Q}V<5#(M{qrDd4wsQC{IU2xeIt4{*;8%v
z5-1B^4a$k8n7f#>W6tv4Z*nU*ZT(zw=vyQ<nrr~sCb^(=9SP>W5lYV%iH)aba0b_5
z7In!2@6B7y_qUdk>ZSX^$tbHta;zBgVJ!JMXR(ld+u@K#AC!2R3kDPT-R<uiP!RV!
zw4`#4yZj?m{t*i?!XV7|ddH4)@BZas{**s&7^brAjD<e}pEaC6+^G?yk9Uh@E>5iV
z>2px`9mh)V^WvBLq2gkDlJbwB5^c9p=)gJBaYgI652+72E&c#S@8(cJbczr?ji2jR
z3&A0Q-=}~3$e;7qkn!;{xYY9ut3`vjPIn!~FWZ94yLeFSm}l%E&)q1t<MW9H7v$Ap
z<I(!<0|-i4!MT4kNP0h%XY1;qBrF?TOD1CN#?3-`$qK41U&SOl#|q9X6x5vj3bOpo
zF`;2OO7?1j^g5sYRt1tQ@H8Z>(4yvV&Xlm!92IVsWNIox8`rwSU*ma34WA!04T6aD
zD$w}01f^%sixI!tSwP1+O19t2JSG8^y&leNUiZY*F1@L0qZUQ<ZD!6hS7Y+Hbdi1d
z3yNPyl3({srkvsm^<RHyD%ZYZS(7UP&Ot2;q*}dTfdvJjeenQNtmGB}?p8^RoJmt&
zjiW#}o{RqXeDJHwXU}K%q&YjTGUc8JC6a<U4vC)DsPG=js*asxmCx>i!-C7KCW4<Y
ze{6xECmS*TFz0ig8!EQ)yI}N<E);WXGipt)<eAR9Agn_`qd$hBQ|2TlvE`QUg)iB!
zP)nYH=nDbcEuf>8&)zHhQz_RR7Kct@CKtK3@$^u1==mM=q8Ct@^JvsoBbK-=CXZ_t
ztcf^_e!VU_B_0#~-w(sqZL>KyqdVpccbT~{gZ7Yqpz5`Z-`!@gDzYT4x)%P7$T(}w
z1DbSn$b>V=<7Z5Uz`^F63$;)P|6?xIWz59ee}!V}KQEcb^nU1;9gYnxn~C-K#mX8v
zD|gs2usXP&az7`7o$d?veoYYh%{akoBaVt5xxR3ymlfF?c0mnXk4fgyXwWi(mdMwl
zYnlZs-nSaG48~yfALFPlVF(3;6be4mPP0$f{VC}6M!d{fS{5UvR2Dpna%!9~Lh>A3
zdR}6g$+pyCW`T9r0w|*V0rSb^nKy^^;hZ+-&D<Iy1oYKFEuHzC4bFA+Q{AZAbrog3
zT!E@^7cpM5jm7$|!g}sGs<}50Q@rP5f!TYeC>)6~op^@j@mcum7SHF7{{+@HuK`jo
zL4cC`>BEjgYWfbyolwC7!)hQ|x>qoK)e7zUDNy8oi<v)J3X!K2q+v4_%fHvMz-3EF
ztqx$4x3}c7{9ar;S`JE=8oBoX2^B<Ei1F8_LPSX#xK`w{milg#xby+D`Lh%Hy}Hg4
z2XIEfqp{d{Wi}=5>Q7B2a>^+gMH#(~ncoFpOxAfVBx`VnY5IJW@r=LZul=}o@~J4<
zFN%$UP7wav14!q(=;ZLF;M1%?n?bWMb%ZB1j=T&((^oQcnGHDQk7UZtZID`D4=qKL
zS>r%Y)?)faXqo>Fg1Vh#`a{1#ic>GDAHI^>pbw7fI~JW4eNfk8F!v^3EGczA360+(
zSdsNhQ0AU5i9RL4`jPut)n5@Tx^^D6S`OeGlKyD?rw;~<vlk=nqR6UwJvFxd0T0bL
zlQeW5sC64zw9j<RHB&GT*FNapXC=k^7@}g|SUzVz3R><e2J54+{fvx~D_g`&1Kyi_
ztbw7!c>UOUAS-j6!~%SWfc1eIL0LMT=gRU~&j`-<S-Y8ZOE^Dd70)u=HHg^n_joR|
z1&(?!fvWzQge6<K|Lx`^bkFRGjo<uf5Z4?2`YurO@E=UR%YbycFTh4qFLVhw&Ejg2
z3hKUyiUDe|G+_YbzrM|!4#t;M-{*|NCCkt?bO~O2xCK>4DO~rRCl8C2U`CrYsI59!
z;)oMW^6HD&(R(pv3>Ztf>qm3uo3j|JU%+l_Ny#~S80KEL0^^RwWZ$%vGMxj-+t8gt
zy8i?1PqwhC>gB9-WUL@D?9OMMn;_uC56EjCi%n8((z`GWZ@%tE4uNB!W{V|ewjTrY
zsk>Qx{8Z3N_C)yy8?wGOjQQ>g$MI#IdDd+QYu?!ea}xM`Vn&b{G1naQyEMU|qAuuj
z_XJB?z_qpSgc9Q`1<WPLmWn34Vz<{X;5{A}%3sJe7nVj$c4ZyhwD9K4r3Nu+ixGBw
zf6UAi5<qh1k=S@p7jKXAAaz`tVAI>5XHaM{7p+*3c%EYId{z@O;yjb}tA>KnrJ^~{
zf3o891HHBW6t>e1OFO4Q+5OE>UG$S>yr=;kHK4b*4z_*w!^n6`id-I!4$GF4Y+ff+
zH~Z0(I6j9fPXyzGn<%?v8J25sUtC4Dm_3AR^!<vUeONP7{T(Wljeidk%_}8}N57!G
zpBH7QdoqX5AuKUQif(D+(CU{j1w`Lsii4N<I@Thahg^@G=ZACt{ml7Fe}P*4uVBRc
zD$-n~5S#gojsL~xcrV@x8fC-Ku6_^8d$^oboxcg@=iI4OCgBW`u5|CK2DPN_fYQcB
z7H)in-R|8VU8Un7`-%n{T{kC{t*_v);W}rG&k~BQ6|B~(RZx6P2D3xa=zViE-TW8}
zvB*8JX(O?{%!Fxf4?+7j2^rrHrUDbbH(Oi)^@L}l(r>aT8DoU5Cx0=!Ye?sgjG$jP
zy{MqyPr<bLIh5u+k<0D2qFE&8)u;10t?DYve#`X?(Ps2%8qX`}S_}bC?uq(Gb6J3M
zAHn1CK4>e_C&|t`qU2-@WaKQO>aQ!wG>tK{u&tPVri{g!zGXE5oKuvlMc;)GN;))!
zG=~_WUFs)jap1Xp&!p_!sjV0tFQJrj?vdZqm6Z;RrsdY1=}<=(GTJhYT)g%&_kv?=
z=7+9Wyx)#VG)jw0rmw-yH|LVZ%%z;qKKW|=xW(vkElDU{vP!7Fu$xugpG>8@&dX)m
zZ{YZ{*_dkggasNOfiP2h(&hK5fYk?>b9OLlogE3v9nV2kSu8~4>5{V97PEsDe6I&;
ztdU_DuN4|CSCCcYF9`5j!906!qJo^oAls@d7P~!Xhkl=e?v4YgX!J}fyEmR>8&(R=
zf+0UY_}pk|6}#Lq1haQP1?lc=kZe2Y*tq>RI5TZ@X`6%|rwZ76bG|+l(Ny5Ni}~Gn
z!&>^>7vc}b!oBs5lsgJob<KQ?zs_@<Y!{J*fe{w`EEK9XEdoV<?q}g1q|D31u;|E7
zRyr<{>VlASEV-Yqw;omO;`Q`>cg`B03x<7qlk>@~r2o7DGVF{|vhNrexrS3M&zs8p
z`w}xekq)WbV<2brLaO!N0wre_lGC<(qN;nU5Ot8(a|w@1o_K8KZ2bLVT|CdZ<*Wrs
z<Y#%hrx8{kI>oO0S!23WKZ;*#$+d1g8(#MXR0Is8(2JvJ&iyz@hTag~GzJsahw%)?
z0c5;b1;&%Bz#u|Shn9Mh&aLru@7F|3Ha-JM(VfsEk26OKWSr6PP>`+A<@M@Fbbq%V
zGyBfK<rAaG({3bA8R3WpzJ82NGoX$xB~bMnXL^{7VYfGIp_a&K*1qW`NItJ;;e{J8
zm}{~mnrSR#!3K2r>j|^ZT#LS+7GU)q6RL%U)E?FwWNEsnu%7|?&T<O-#}dE);yqE+
z#fPFBI(Ok(;p$~9yS;?>Y92GADV*tMDnr#)J*o-NMbAk~F}3(lR{r5A+#InET@O{T
z(%1ck=Y6?9DAw$1>8C-ERXUT@>(2138&AAe(G3$PUt`LQ7fiA)Uue-dF8+F>g_4;M
zAmrT@<~NozsGo6`t7JaUgPF<7cwWJ^IG)+cxi!j76DYZGEsH5~B4%Mm@jq&KU2F^L
z>tSTt)DIOee+upSYccd=I0pEYaE)aMx?fi^No-fg*wNi7<oq0zm>w2#_1)<cpSxD^
zyMXz^T=4!SC7I@8%D?j#QV%_btb93|7%f4q`!*2SuP1reMNswDI(S$*03pkbS{_%j
z>|GOiM(!@2Wxjz0&Wgs^SC?4J^tt%y)he|9mMaFe+hB;HDb)_G78;$n&Tgm;lT?4Z
zYE{z$P9_&X;ZVyWZF^(Lt|t&OG>=vN?Tu9u9Tqw<6w8C}Lq|eSta>nrG#WM0aK%a{
z^I8eX;uuK&_l6jx#qVZgbh(D-4an+-2-+4iP<pORC?0T+72LC>AkVc_rTP=3%T{3V
zl^;xc!X8YyCRaJjU#K0GBV<-{f30$<kUzF7wcF=H;x{dd_w51s7t`4IqqeB1%Y?=t
zZx)$oN(#G9%vH2y4k4UtJmoyNn%xJZoPpRPII<;<QCM_>`)K|w;=Y}!R5qd=oKoyX
z_ZfC*{hj+hZ_Y&7silHh_h?ET{RK*m-9-OIb8)=mKx~fTnb#px@wnhmX54pdVKEZp
zPfkOK?T6mowRkRy8EbN$jLOUsQGLh+Bu5(r$&rU8dQBUtWtIUZxAFY*$rC8#;yIqZ
zyAVsh4Wepy2_`11*peQjDeBHy)^h$FyB0nVuP@>=#;?5umC<tWxokx7UQz7aT|~*F
z$8x>(1Myn19hqxQgNO6_k^0~|%8KcM(r{~xPk+xU+<N1Nv)l`|mHP`bdHz$^gUrcY
zSyDJ?6=_tsqWZ&VDD71s&pn1vXcUgV8Z*c^ryiv0V&20GWSaNQ$<DqBK6Ua$kLlgC
z);Z_xkw`jA{`2yf-bF(q(bD4I{2yOEC0!+}*90yL86CJdeDP=-+bPbSB)-t#NP)}O
zQQ;*k&RTCNaTQuPXL%!r&N4;wvvOuvK9O?(+@XBZH`e-OHdd^h%yW3dFyp-sYY$lm
zMRQJq!o-10M;~D-I3zaa-Us(f;n*~566!?ngVI-Rg0l8UNrUbZk~z4us?puSu8q$~
zo@+64!yHz-Ap?}RP;7su2Iz5?1zjzI=Q1C1(OZiSmIchcPJ(TprsDGPJjdl`1>}BK
z@SHOdLhM@M`-xs8o!tvt#@e!iv_E0aJ1;8J;XR^T?O@Z@mWn-ZL%rJz@b2zOPi`#5
z6om;@`RY(mx-B_P{3_2=3Dg+gljjUw7m~lVLG8TbETU~VGwi{&_@A7GmeliHv)rAM
z-y1ROlPW<gJA^7X-ht|YTo01SYaQ|;#r7-+ER2DG1207DRXqF4Z#bEGdZODoADVN-
zl3MW^%NWb|i8t3-a-aK9nR}1TX-I*(kK?dBhx=kBz3nv&0#G?)K6HHJ+6@KI&n|w-
zsuOvh!SyZ7UGa`3^#;zUH-h?qnwjxoo*TE#U5wBqsPS~fp#ILxc+?HXUR$GYEl}&5
zeM~c_4;j>S<@|JutBKpU(407~o3W?_*K7&>$~EL(+!T2R=Sa&g-i80HpF0iqU;o$g
z`7f*=yZ^a<BogI>|9<_y`yTvf{j}b=YV%)TKV$#b`uRV$ssH}=|EYK2KkFxAeO}>z
zef`+~_xkyNYNh;t|FQq~>u215pI`rdc2D@fKffIQxBcfoKWF7jlV$(8c>m}1|920^
z|Lq>`IK|mf;`@L5b?wp!%=j^yb?kZrPFy#gy)%W^UKw<)A)lN+F;Sx;fC@)0qFX<A
z;luU;h`SOn|M+bP`g9AbD30e83h1E~$ll9`;^E(t$#?j9nrj_MqkJ5x&dU;=CwAqm
z(N^gCcoJz@EMXH)AH>dnXXx;y6q<CG&n;`USw8oh2bApw^~u{p^>5$6{nH6PgREiu
zpKs?3*k^*q%RR8$Gzm}rrAyO}=AzT=-%DygO%yxLzle>sOJLs0III+IGM5ckA?t@7
zCVS844AO4w<;`R)npQ~aOY?co#slt68$nw3i=k=xY-+R>AiaY34=>e#r1gzlTKHc4
z@cIxM_%FmeriEmnHw1@H*o%_KZ?As5kb@STyQ6oE2L`7kQ*q5V@E_O%RmVbDW3P2I
zY{*&6zJG@~Z#2dT>s{beKMQWeq)^-Yb=W2M0&Z(4ME}YD^h2h^&X0#<r}P4vpo~SA
zf3m>W>jP81E2L#z^C`&64P>W}!}wiJG<x4&&L6)Z2HS3-43ps?t^F+iD#@i;OSMSv
zb{ji8I02<oo(WZ?4Qp1XVt}C)%Rl&lmE37#K21x(Y1C0_Gd9M#+*e#RbphD_I6(n#
z2Eei^Lu%;ZO4C1R(yqL09Cc+jN<SFDxXEX!vj05ly6FOq+@?#NCZ5N3!z19(TFTna
z^~1%D=dpKT0tUQ`6q{l$(AS#dct-yMj=GaY<aG!yU;KxC&kv!QS29TZ-cro&q)Te6
z^FpcqT(L#BLHsLo2dewt7S>!gp)1$C=$rm2`mJX#TD+(bF*t{2{OdzcGMA!yr!~FE
zE+k1*(be1LNp#OlMSoq7L9fUi=n!<876p4zCGW4+dQA}Z-Xv3D#sxB-9Zhd-Pmyw^
ziO_1ANIteoXd5~Yhg9vyq0)o+p~oqVT~i0L%ld-qi3jMtNT9sB0;)|MAS8@g&2u#v
z>-%ycNi8pc-;({{ZMcqeUrtl@iBl}xVkb3<BHW&sLEmqVL7yoBO#QG*l;3ti!ymUH
ztF4mN?B9a^ozL-Xv0_m<W-?Pr{}A$a4dB^Cuh`{$PmDG=!?SICNyGOJG*oP-f?-Rk
zNcaaXE!u~*EjmK(?gyYWZ7YG~QaFG9I4)jufRvGYX>{-b8t8ol%L6A+wr?msJ!VCE
z!$xAJ>;z4=J&OT{&kO(Ba>iNkLUOgtVjDWnQ0@9k@GCjQ>?Y<>5a;wbd3*%JyHTKQ
z&oA*UZ)H{ibueeH1+~51g!_|D;`yz4xS`SqJ>D{=JpW$q%(=k7KQ*E6L-I-bD;bt<
z8b-^~JJa`%vv7p-S*#i!M|MyCf?%Z?mM+;UmPR8D)U>0Vl@VB+pT+$!JH!CJWWe)!
zWI86FeXk$M^DRQ4`|BdIUXx8Xz4wvF>jGinx8wLOKb=0w`_NtOb7XpK8w(KgAbeN_
z(;l)IYqo7fkMC(zIWU-Vx<_(;Y+p9`s{z-+KZp3^!`L4CY>b$s11=utAY|bkP^KnP
z?%abQ(dBw}I{`JTT(G)11_s^OL6N5y;VbSXZ@hT`@;BWAhcE%$zVJTIu4VGgFHhnA
z)Ki@Em`Bgcr{T5Bd+GIMzLsK#ijwWqMENd545-}=jlPLgF=YlCigVcv_oL_$a{|;f
z6ec`Agg%qGFJj&qa_YKG^c+7L3)g5<x1__A8`uVCEHiMac{*-2Q{kYKDKy1r45nF6
zqWoocB)febo+WbKh~riH)y=0c<UP-tjJv@+P3Pg>mu9qOJNNEEJ3KLe%R=_Aq)}&G
zF)P^-PjN=0%FPZcAKru3q+4vwbSWKQ5Q@j@@~9$4o0^s^!oa5osiE6yTsE}_Ww|?I
zuvHdqk|?o%9pBTQ@Ek+eXfm*xj&cJxJXwALBWiScmO>o-V|x)ZO5{wjxJFDKC$N7P
z=90;SN$7HYCtdO>!cI<(^wp;jLvjYuSF1!+CoE*{C2wH#>{yhp_!Ht6sF?h-l1g)y
zu%Es9^Bf#4QjNJUObuN@?ylPWc|VU<Cvxc8&*iv2jr%ta{($H+R#<#@GZjv^$7>2_
zOx7rdjLz<muUo_*P6NAWUc}1FBS@x@fz5CQUX~rF9>=GXgX1(7)Nq2d`;I`J6Y=cW
z!!sC=_m?nb4$m@Exj^y`O=!62iTm1t{5yqX_~*qG@o_K&*f>Jz`4sY4nZd%>#<1}r
z+$aBy_fv(Z5O8fETrJ)~?w_nl5@StO7UKa}ELE;grorP*U_&lo{hM--74%{yGjqwg
zAcWShilfdID>3jRlCr8nK7ZO7G&sm}1<u@ngX{BAw%HoqMl7Hi|C~da;XhCq^M*MM
zwq|*&%yC}7G*th7SB$uSo4xGNCUeU_*tUat*znd18?>VFQ|CJ75uV6ImtAx;i2F6Y
zE^=@99_FEy$qw(!LpT0@WL@;a-s-cc{a`tUtsjlSreB!D*Wcl|y?{ynR@k;a8s%G-
zlIr+5Va(W7q<XVf$m_d|`=c8nc+eh#WuKVoOrGUY@PWO3bqw`w_mV~Y2yA=46)$ej
z#)kEM@CnzwrCJ=I_*H52eScrF&K|<&=*wAD$#3v`c^1y!ah8PLdoU-{1%DNOVCR!g
z;I*V`CX3f#l9zgd$KTz-+9Q_I{UWH>t78<iP!}x=;<43rG1YDeg|}fb*jJcE1;Tk|
zD$nH^nd;(#9(t_c;9-7NWMhXz6*D|wz%zsTLRQB-bd60Qo!|ckb;u|2NJ%tVkMo9z
zSvj!GXD5cu55Si<R2crID;2s{F{g_8qRY87mZx`sKCO4g_(l7eyuBZGZs&QQ{r9qQ
zYaD68QcD`N#)8s%?!&zwCXu8xX544HcuJpfjB-{Ra`sCHXPt$U+47y_lN$iG1qoPQ
zk<9jZT)-Ody)?{v360umi&~Dq^NjT+G|crV_A}ax8;e}1@Mk84-k(DXdu^5#Ws8+F
zZn5H5qoB4tQMgl`O!XGIETJ-q%;wcI_03Z8a8N$Z9G*oTHmB)pa{}s`eqtMk%q5q<
zkNt;5D0t|f5pyREfZ<zq6TCPKPX}aTfWaH)y7Lj}*p;*LQJKtXs~0HmT@n}c453j*
zk=Sdr1!>Ex*!#XaD7o(<uoG`Ei9GabXtqC@8y;mjzjIc>qb=;O(gYm4SBX>JjmEQs
zPNA03WKhLS70#yQ(sRx6*zfZNDz*w{Y~3R`@?aZIy^xRAUy@*N=MkKheU+tt=}%LZ
z$I<c6J#p2T{b<~Dh8cwl^r1WseIlfgAAJUVxxU%V#hF8|M$qb0r>WT`mY-W&z++hs
zX$_BOnQJaFX-JAtq`jTG`R>EJ-_BuC-)E4~w=0bIT1nE=6`;v86-|E>vp)xQrGyLi
zct1c&ct06~g96E3HV!{>y-j>m6?pVX7vk3*gzUS+ND-gTO5M1IdujyKPVoatXf6Q6
zvX$S5Q_eolGuhc6DrQf?v<n9@WWf%0a&jUa9-dDtBGS-GdV$>@*At&9c9P%N)08wJ
z0CilCz;oD1-5+1T54zh?v2i60_<j^y?^Z)kpBYqHRSo7neBs{18MvpDHr4h?69!fl
z@qD?D%;b3(P0O23VR}o^JZdZS`Iv&2O|0;4Og`rPyO$Q4?!=ulPhj_g)95x|hF%3c
zQzg!k6&m>CCoc!A{><|htU{ri`7V-*yF@Mf^8ht*R5D={dVl52rOH57S2&I9zBoVT
zM=C`xSwl|a?1d>Oqp@m4Iw%$WsdkmKAUWnIR0-*<Z%-xd?1;l%fw8FYDuh~GA=ZWk
zuwjdKpm*~m+|VH4<jFhn;>bc&daaUs_KU>Fn~vb}m*esJU{92k9=`JHdnhJeP_p+4
z0zR=Fhp(m$r;qu0c-|tOE>1Xs_pThK@jlO3AA=L*^-l`VPqr46jSirFe;h4oGNY|A
zdob!<96w9XQGRkZIjIZftvk<<z1K(9i*su3^>M=6Df=nx=tP>g{uKUD#8ZD+9vT-Z
zsWGi5E8?CX$wjHSe3v=;^2`Kf#~XP}%uYJq(*@7`isLhum*5e`?|3iv5Fgr6daf6C
zxNacbMpLp*oWt)K`TWj4kOFFqgdc$iv0&jgvFoJ>y6HcGvW(}Udx#1;Y45|?d(u!A
z>m|s3WU>2)ccDY_O(=Ulg1vLwj|o4lF>%K#Dm&hZEec*w?LT9=Pk9K`=61ryv?QAT
z^9+`?zG4c8Z6G-{R`_{$KVIDvLnr<Fk=9`oIyq5?woExrZZ0ykAxjLlo`zq~+tBu2
zyGYXWIr#OA=XdvQSg>jyJH1wkYquw(Lx4H-*>n(dIq&Jqs6(7f?!o_`BCNJfW&Q^o
z&^UqElU6sJ&e`JPT0^q82*KEB1?g?%`4x}$3hJ_pAPF(Os{DPE_-GkFvujN7v-wf1
ze49eCt))=-nctP#i%NVJ*Kl^vPA1Dd1t&Y^(~qQ7w8*Ju-R7RfO!p0RN<WgKsyIvM
zkOl_0>cjH6To-cWa!JR<wba6A*c-+V!z7n9Dt$Gc4P^OP*p>T7*Xp8&XFvMldjKVu
z^(g;*GfO+CMcFntS)$=XuE$ghzPyf1PHGW--W!6}>{Imcl8Vniw_w%cXqFq*2D5IC
zrQ)ZPVCf%&DX?-FC6Bg+iX%33uq=n1f4FjwjE}>Niapp?(hWD{YS5+Fe0~O<B>gKs
zH2W>TGY15-r<Q5B-+Tg%N#wQKfNUxh&(Y$ZF=#%k2RH@utg(V@82@D@#$5Boo3XE;
zVD~c?oiYhc2Q+c-)=7F}*@N=CW-?j$SMXV(V(k}~Kuh>=X7I%xvwjXhpW)VUdJ5-|
zdQYb1k(Q|5RWFD=V=-eOX90KfLN+~{d1!osfBW?0`G0FbGa;4kPE~PFWe^+GFPh$)
z_QR;`WSHcihVl7>;D+utQm(cW8nus;<Zw4Q4l*iX-7sdF3r>N4TrYKzH4d|Y__*FU
zdvpw$)cN4q*XPOP{5h(rI!b{?XPB(Yk-0wdW@9J!rkN&_Fm$?#WPa`9yU%H8aKHnN
z&M(Hm!xbRgrHKKbL+In8jpS#upXy5wQBa?3ey@K8S3dQjgKYySwUaHiuUJFx&ln@r
z$CLEaJGSd^BK|vJJ6`oaii&~$<TUEIywpxxxO;aW1zqUGW~&o0-B5w*+6iDi;TZg^
z$-%~CH_)z~g}!grk*kpiHU4Aq)SP_MiO`@CaY?8<aUmUlCSZH_>1@hSGi->dXTq5@
z^ptJIzvrZ)kF+lpR4jsVGabqE=?sccY=)u7V)2<@Ea&K+5T*CFFw5v|7+q1%!nX6=
z$cGd84BwKqSafE7SvOh1XBSA~+O~}MeOb%OUBbc{g=iU)iwR!dlt1e?RuEJpgm35f
z4##LP^?bx8{@jNutrXEdz=F=#oxyM8m1J;x0bX8M&4zD|!CPIDF!nY<{X1<c54WP=
zbMvsad4rJoB!_F;w}~S%1yUTI3>8%u$#UUYY;hb3eB8;tO92hN8kpH_Ej;6VfKmr=
zpT0PfGR^`#jXQ+_maEwlwuclUmLy4i=cvq=(M-;IC|ty|+MPy{a3~h<U(UpncNbE*
z>u6e8oPrh;e9?C035tC)kY@Edjn1m2Xwo$hn+gT2zgEIhJjbKa{rTA3>pkb3DX86O
z78L4lBeR#mxM^7mW}dqa0g;O+y2=A%KkWqT?>B_Vp~lo%^8&@bKEdww)!;SmW4N_=
zKmPSP1}(mh$K$JJ{!hmAuTFbNy4QxONB%3E4^77hmizF<+S8OP<GxgxnW)|46YF&`
z1NV+9BKu7S_=ndHOlS+m@4IH;Egfev|GNl6Z>*r@>#cB2{z!^ASPE-TrIT6CINaAi
z9p7<wVMW|mCh_<rzyIL~s>Us4@*oSe{!jq29$hix)(5sCMTvVVl$bCfg}jDkVt7pk
zS!8C@;IA>bTylW^Jd#CI`|m|RX$_0tZVt(-dqZnc7G+=9#!7!B2xo_lC83uedH4Fj
z3|`E^sI&2ms(Vq)mjU>2QY1=(CtS6^`6nwHrHRKhi};?Fk0alup~UsMs18;MUxN#9
zr}8j<mv_Mley35fAf9{tD<LRWmpMHh4E8zm=z~Ta{$tdarjIJ1S;G@B_30_5vWR1S
zjfQb8EYI+={w^e)7=TF&2jE82oz(f{5?tE(3{viS?D208QW%wpH8n<P+bw{qo%ci7
z-1QXn;XD{sJZDK=S74cD7<*@%gSJBpkZm1BwNc~2Znr1d`K7bwvvJH%XBS&|<N(SY
z?Xmklu5-QfMo{Op!oEf6)N<3F&A6(f_~upgnKN%P%&sws*$(RR=W<GoPNJIPJ~Yp3
zKiPj9ic>W`DHTNKbmCZvxr-;9m_CLk?>U1rqgSIzqZ>|iIDi2cmWxe)nBtXlIe4fw
z9B2Ixiq15krmv0S7fI42NtDV|nJdz8_mfM5goG#*37L{2iO6l9Q)wP=^IW$<(%BE0
zL&%(j%pny0laP16UwzU&`<%V^de-`_^27LIeFp31`bV68SfbCjChF3{*1QJGxqqrA
zCi$i?bYB;OXU0NSdlViN#o@dHcleT=&hj5-;3j%P#0zE-)z2jmonMDrhed->E3VOA
zI}-%u`}s?sn85J<UGQD33JsEWD3o;M<nsza#$^-oHm@ck$5w9tlxWDaN(YxI>6jXQ
z5KZh>!SAL1aN}VrDBqk2+4*d*D?frazR$r8bp_a2+QLa2>|-8YDLky6f<H$IP%_#Q
z8$N%h6H8fJNyJ`gDhn|2xgjR8vzIYu<!!q0u{1tDH39^#+iCZnIK~M}!-kYrYQYua
zE87w<&8tNpwKB9kD*!kk1mUO<zVn#9v}#NsSnW<{-Gbg!^RqFx$$Tp`FWQL;-d1!s
zmgD^Iqfn5&k65h>rPnG}LP*yEthk>BEBG}act4KLdHEl8R^7`VRA0q9yTn9zV!&jm
z_W;&?NTJC;OEK4Q0c7l$hr-M?#BuQ_x+Eb9jHb-M<`u0p)-{amsV#u9W8|>mSUoY_
z7Kgb9A~BHVk3KL>_Ek>?JPR_#)3U6`G-oONvCN^rqzYl<#RZ7^b?CCV6l)YeP}Rdl
zn37V<JR+>?;@1$JE1L-(=|S|KsSDN&S;Hmgb+9*83rKY|?KTg@_sLdZq`8>sOV8-Y
zW7+uqK|BU9Ugk=6mlVx2p|2PEq2H`|kQS*1D!vI+ROiZ%8dpsf(vIWG_w_X2EsrR8
zXk+Y6=2hC9iJV#(KB^mwISN`>T9$@ul=8qM<}w#BF%@UEt%mmN$Kaa*yOU{UlQtP8
z=u*5yO8+sR?#v(LH>rV!`!!^kS_JrHU!W-?Sl&bJ5zP<L$Jal1LF<HGlFQf+A}*Ud
zU?-2aUsU3j1%YVw<|ujbULRf<YGWe1%a(RKf^%p%wE-v4K5+`}$7;bCFA==`a*Aq=
znh3lO#@YO7Oj_-7P~p)zzQUdizCwHn70h~XB9s?#m%lE-R;L7RO`8}54}*yFBNy7g
z)Ezv3*5Un=YoS!e9u#tu`DZ>RGQO`N=qM~=?34-ca99SFyOxA^ewf0)KSkI&ek|d|
zB{lv&SA-MJg@fsI2Y4Gg9CTwFxf{oWP~y=j)ZG4-u2TqxJ(pMEErTKuHt5m-`7|tE
zSx=i!rqY<JrMU9VJu*VF2#a6KlOE}0rsF^2RGv$Kc*Gp4sis6@&ZvXm<P7u~W<l~k
zPs9hor|>+>*j`_}1|!F0gW$F@UyHXAT;CJ8_oNs^@$a}7Q!Rj(T0r!3#h7K7i__nZ
zf>>GBBkq_?Qs$P^PN#BC+%AK%N+&R<Hw$fet1<Wf4B)v9Yuuf-7LBV*fp`0%vE8>A
zw33gaV@{Pp|HW(+y|Um-iT99<0TJQZXz=H4(tw7a@hDC*B|dJ4iAB*GOwLHdSI=_b
z(wd3T5adnta`Hi4LjelqPeRzRGP?Dq5gwb$15w;})>9wBiGqgn4+^R=Wt#@=5ZtGt
zkGfQOvJy(qUI!v)Q~oA1edzQGriz)bX_Uq_(&v~%3nh1f>oHrff)w=7kR&}U`yhLT
z=_2E1<BA2^sPOJR{qI^Pe7vEKiyTzJDJ&X%cI>167PVM>Q3=}voMFRXHHc@vVku1(
zBJPqT+pLoy^?C$ebQ+0EMGJ8HO4j+OdJF|iBTOWhX@bB_gVX%<g7_~<L<{3Ua5$Pq
z-#+n2(}3d`mUEuYd+!5bf-zA3tAglEDkHsNiBRq!B+Zj5acNI3zS}KE=VPD9AB`5$
z#xJKv2^vfbzerwJtcE-Nn_+?S9MttcOOLg`p|s5vOHLSJ_up?cY-cUztc+uwXrX-l
zU5oL*-Yn2z=Z2<5=Cq|nM6181GvE4mlbZwbz*|{i+@#Ai-v3PKPuBT4aYGykhFu}$
zFMcvzI~|XYs$(;%I>L)TXEHU*988+l!Q&4PDPNgs2Yrm`RcH^+U*^$*Y#FT5FM|9!
zQ!G1{2F}Co@zp=)V$BV9CLA^fKg6xW+28#^_rY*7y`zIT|9(PGRC}WAq&RSu-wp;k
zRlxgpxp8+J58f|-K@YDih5KV|K%ujX?{d)-tUoV=XIFxu{!}`;54@wA8*TXg_In{^
zY6(?10(53+8W>CGLt!xM$CHo6#~*c|CO8!<zd69&rfkSnSOJ-H<nf*$1;(981og)U
zL2rH`Xs-Xp4Q!5}0@Jzts@sY1oblq*+HK+Q#~RSvBZAx$>p>{f)M(pKz;)khA$J~o
zLN@F3%(Z0tQNt<5aIYac-s<Ea<D7epeM7@~!ik&Ic^X(y3X{&;L2TX=Zq?!(@c6rj
z4g`5qzod<LSt$Xmj;trniMMFPjy*8=l-*BG+Y|kn8({Cib}(GN0v+$(A$9dl<a^ct
zb+?p(FPkdh>ctS)=P8E;)<ICLvy^n|j3WsvPNAr_jdS)L%`%$%;n4~i?Ch`R-b*Zn
zj^tW2l5(e3SxcyAdNFual@Qf~DPS=e3!x)|@XE$=YL|EtBb6#)#nT{&H(n1~Ea&!V
zND6+*2SU#<InY^FPQ`~c$)~2hczx3;@Vhq|B>D4T3-gJ$N?zx54F{>mDjzaS)&<71
zGq2Q_eI!gIU<^RkFWED|_jxI%ej|<1{Nit_(s=~muB*oO4Xjt!<TAZ4=><Vs<#GDg
zm&EGqK@y{si-Y5$@v^TNOgzHTqD%-CAN0^8r<B|JHxNW&ljsw7O`Ks;0c)pCfjai?
z$5ts4$Gl#$d2JE6{<Owc-2!YlmxC!6HWUAg-gxY*7%$g|(Zk{*nKq#qV|H4Ax?(V<
zoP17N<Fx4O>pQ`61M|CH8KhP@6?B$1V^6vN;a^lM0j*d!#!%hDxAL1rAMsPM|C~4S
z^lqATDe92q#vGXTB?FuWgCSq4oqq0Gh~KqNVb{+v@?&ZYNNtZJ5lRU-N+|^6lr_-j
ziVtz$VGUBdF3}u~DxCC`b<il3Ve5z}()Plf%${q5wlCYrq}5wtaneY19yf>6@3#dT
zLjmf9bx^CCWz@{E7+02s(Wcb9<UiLu#tZ7^qBdP13ttA3?PV3%^zbU3(I7^}wj(%q
zC<pSl=HWu#2huljh~aA|LgATc99ZE<jQe8XX1OE0Sl>g%4-Sy2#wln%z5qX6-H(ee
z#$)lEM%r^T4WHEov;6aBYVp$oHAnv<xxv%nS#1JZY+Qp&&Z}X%TN=on?4^US67*>R
z@`PiJRrcRu2%0c7P)>$kHha|V(jodAx8jmnIjEj`1YTHsgR80u%nI<p9fNU1Zb=Vy
z@8QGDH~FABBanXekO0|(>|B*JMD7=ig&(8Kz-p>I(NT}08!}cvP4R2W8=BDQF*}Y-
zQrZ9xE1%K-l4G#!rwBHj@1Qc>s~O`q0PUvi!=n*4*f!WhR;@_K$!nIO{yXN4oim*u
zW&Me0s_RpO;c+m?fDMydw&R#@r{LP%iJ*CJ1U=5yRdj&}YP83on$%r7Pwas{T3*<a
zbBhk`SH*byqo5;Ija`4MQK&5e#e4N}j#M7bKURe*a@Az_Cw1JcvI#}Ae$f~?TbMCC
z3|chvXwM=iaws$(6^uV|N;_C4YU3GxLw7HA?~cX4Zl@qDnC(qMb(Bwz1Ud10+VY?j
z9k%t8!=Dr2(!dt@?2*d&2W{m1yXhFx!twzjX?$L%9$7kH9scCqCsAIqu)Q(_6(5RG
zQ{NQHdB%j6*+s%kB}w-}ML4;;1giY~aZt4wjn~Pcpt-Q|g{~L7JFkH-9Ujqq^^LC+
zo`$P~q%hX^G^uo!frUp>X`KyQQ;hN7)k}%bqgE>MqKWS6_6OU`HpJnD1oSH%K$VNH
zNYkJfRVZ>K%a?7(;)YS=kheKj=J|qn#vT$qZ#+JPU!=)!9Q53Dp+T1C$-(DEsF~`|
zso#}A<&`>6mXQyW=5$fR|7&Y5^3b_^Be&i-4s|ST=&RgRNLlli>@!-9&XeqjK0Spu
zg;HQt;6sWn|It$tr4X^@HPy8?$Jq;qp^2|0^q<s)7Tw$A!IlEhv=;D9nv8)LU|{UF
zdnf+;F97y=gn)bJI#_j%eNQKiB6Af}VNXsmKFA4&rHTjeQp7kgy{867(LwBYR3IUz
zqA~x%8A^Bkrq+zh_-%3wY-V@NvHO_!;DjG2Y_ma`lO-%4#j>e8YWWU{LFBdEGH9><
zMF*Shu+KD%SUved<h7&0>Z~q#@+K1g{M?Qam2r6D$U5|q5aD3q1~}doK#rzcVnSaX
zF&dmjlTSIKQ}i%g^wA4z$RRcd7So$v?AeU?NiLn-2rK8<q3Lo{=sWj=&g>|HA!Q9X
zV<&_bJ=SSqcb$mm%_piO_h44bT4=lELTY)LxMiINb{TFZ16~&y2jC)o;28w7-1mZl
z>{Y%XC6sP!_Xk`k!pP?}&^BW<F|Q3lfvAESoR3D$RhPNMXT|Vv-4J=-w;1v})?%Zn
zJ^bj_MbW`s{P8sfI25lAU#*_f+0PRoYe)}1_WQ%XD?3p<^Ai7hMI_jt%7X#LWcvO=
zBu<_fgPV_>#FH|KU^i<&*5!VtU3?jEbBQ3=mV4tmwK!;f9L)_Lu*4V0vK983WD%Pk
zs|YWX_+{O6y5DnhSZW2HY8OG+Sy#G+d0`H|(ZXGp1sG`^3VSw|z~sBi$eTFcgm^wD
z{D3Sl*n1itOE4~_-yQ0$eh3B}=Fx@?HPovtnCw`c$hfNSh}X;tmY1AL+s>{h3KnYA
zU2Z;n7^R9e-7cW?PJ}{*3htw95_sL!$7bu9bd33F<VWe^r9KUIXAK}ze{IDkvuX6_
zTxm2B&4>8|nK<mUDhk%f^L<k9QLE>p!1|{jPO~in_W&12-7+3yr;UPz$9@ox4U8$O
zd5$kRLJ@>qa^sV~HE=)497U1)Y0vh9WFW18#!55oZNq8OR6m;3ztMurmjhYPE9LWE
z<j_U)!!YftEGjWRL)dCLn8=<<{PLM7ahx%Q*B+tba31YC5sn)+yr9<h!|-KS016%?
z;%1{6*qQFl&wW&i^;!qm%;7W~`!E8$<WuP-ZU^Z6m`H7^PJl*MHukujz#4zX35=s8
zW6vYnmrzb*y2i6P|1f^6-$(u=wS8<(_K-Y0uLzErSBZiBIPCPfLmJFlsG{>+5QUWC
zle_+~)O-O7(>Ku2b+Y(IHWv>+jm3gx8F26CZan?I4ByNOhhozpda)uDx~~T_9W@v2
zKc&L^W7A=<V;IE93}<uTK9j88(<n%v&HtC3hFwy{nDWn?HpQrO`?cyoXf&nK{A&ko
zI-*b1VslAneI!}v)=Qqch2X_kA`nE@@hji{rGLic)B4mkuxXAjJpB^Oy5)w^)EDtE
zC}!Qfx&EjZ84sfMN!*<YJ0akJ7R>wL3LQzoIDK&~z1N%#ymwEHb0&lWcRT=IwE4ov
zQ&rH-It^OugGszoE(!#$lx2<^TesEG_ePUpuGU)iOg*_J?Q`(vX?Jjz%*DGM3$eH^
zn0kyk&zBQef!Dk#IAYBPJU(+6F1(dTZPaxj^G`5z-}^<xc4Lu@yg=PI0+<Cx!-<ij
z@a>ji_+dDk&Fyo5)Z{FvpEN{=)*GPalnlN!V+u*1-vvVdRPq)A&@5UAcMOuzV?rnn
z+`U2@#@;8hC2F9>GXpy&eIj>OGQEDTH$4t*Bn*~Q*?A(cstl%XBc9Qd+a1w-?HJT-
zFT$1&vvK*lQuyz26^NGqA%o?AskZxEd?G7^65TS0QvOL2gJMABs7%y~_^iLw5_+Gi
z;O_PZMDvO~eW#cOiPLrPWS|hIcgtg`?RxZT^T04=Rl2<-7tGAXI4Ge5JKFPUMxi2H
zV_D-+$x3i8G3KnUiLrHV57AaNMcY}K{P5;VAhn0EL4Fdsml6w!;z;b&Hls3?@9BY%
zOo$DgNk?Vz&`qnESY6_i$Q^8!D0LL4$}ELeo+M<g+5rmUrBrY<gzmniLEdV=r`MMq
zL7C1hI(4H7{HutA4Mw(@Hew#kTDl$1FPIEI{W+xD<v-$gOA{JhY(X%$i_Ipg_+e)|
zslQ_!)-2uyx}FY1WneRr^6j9)GB<u$?rHRJt)WZ3wc#;uAN1V`$3?H^!O{{hP-omw
z-iXO2t%tL@&h00#ds!f{SeF9|sZY72s7*L{;5(f)(;hrRv$%e5FSPl$3<L*vaz`!|
z!8+DYd%jHoG1r%Z@NOx~X&MmOmk}UA!>C0-5|n@PC#pYAVUAlBzRStO^S6Zfb@mPz
zESv@!k0Q`+VjO)h%evy#E9vM<`QYo42X`LI!lm~A=%)4BQ0S!&vQN{n&hs4^W?2Fz
zjeGEnFoF4XW<cZl6(}#xhfxE$*ms_B_#Kv;9Q>|`LrEd1G8B&q3qtUbnLZ|L3!|s!
z$6`X#4`TKt7ZU#7BE440(7t8{Tq>Rnql406oLvocOG<(3s|`>b5`*q0OX0})llXq)
zDqLtf0+eOuqdChGHr1PRo>dv(+ERdFM*~q?Aqx!^(qQ5K^VIo>BHTae3^Dev=@+dc
z#?^LYGyFQ*^xzd|yVREm>@4u*<~mTbutpnMNzi)d3wMCA(WMXZ^Cjwu?S?Ddj#-b0
zhF?DHH9pPy$)kxJ%LFgK=!2%liy_y{l`&)aBzAok5gz{C*mtW4=Oz?DKARm~Tp7+7
z>-$N7n>_N8%lOCXXL9v@9%B#Z(oXk4PUYhiy8lBJdR&-~);g0Ra;*q>(%(&XLK)l=
zZbS*+P7=0Y44Jkj5a<1|fL!rfp!HEiu=NyY`e_fg551rh$E?DM{3MtY*H4rjHnLuY
zpIpd;Sy24ciQKpt3!;gXDoZPY+m8w|Vpu73t1>3*(;<FL=xR9oHv;-}SL2X;FuD)l
z20FX*XvEklcxlya;3W)k6*g?vHS0RRx=4%`0~sLAI1fU@Wtc)7(0PqE*)&QKs69uW
zPiNwE!(x2bkp$M|5!kZ-75R2U4R<@-rGjgezcSnnt=*+@(&=DKx(x7EKMlJhUy?~5
ztFUv~7Ls*@v90&W;@NvKP&i>fzF4;ic1_xdG0e9&>U0Pk>)(rE#Y(WkHyeb%O{n+H
zr&QV45;SJ!!=-FXfC-YQHTgJnp83t0HR|HqiHtQ=qd|fN$?#}Q9he?@M7KK?z`&uy
z^p8RuZSq(_XYQ@W4DaXUv%L%EZW{sDBx@N@%>#4aY=v82W@Fo`L2^LS0X?3UkTp>e
z(EP5JNO?S<Jl@I%-;z}r{%tWT{Tw9Xlewg{+8?{Vim1T&9hqglj`?vjKr*%tBkOXY
z?420w<eSN?DZ0>OlEt+e)bnM&7ZB%jhsm`5SeV9UCu`UFVP<m_)9ce=$YvKf^Lsd_
z@fqOrCx%|QRtj+`C-8HE9Oj+L#gZopP{{zI|E;S7u~9TPeD4lC?2-YZ@?H25ba6z7
zEf&j_lcp>6T<q-)=%g73$(>o?B+3LQ9WzWyA}B{+P@TL7WW#eQu$nU*c=0ayW$Qd#
z=3D{FvrIrIXgNJ-eh^;hEdb*^Wq33u36E*995$QPIiD>>k;)?C{N@dD-W<+%(aXbx
z(oEVEbb`ND7>?E7({Xfi1unLV#w(6#n71zo<Wo}MQBg416bE2y@?u)8vXyz}CP4HC
zQxqi6#>kZ`u{=E(n@7K*I~Q88&Q5E1^3e^odM5x+eGC_&U4i9$lpyAwF?^JqhP)po
z=QeeZ$IHeg7@A{=VeXHq;6FQlO1>BmB~-$W9X|<qSV8W)hGW~y8)W9jGTa$c29G<o
z!2L&t5IHvjU`!}%`mhFg3sX&G`$Wid;}HP~F=@-_CFf=ZKx<qL*>X)1>uS!?9f}$_
zIj9sZo%LZu!U$*>kwhL9Fb^<n=l5-j!X^hwKM1Svu|OSk-&K;h*NMy%!rrG^#uuGA
znt$RMWAd6#hP!hL;ZL+Ih;{}rP2>=`L>jVQ;WtDywUK^q&p@7GJNIQ`I=oxB48sDe
zs7W{LoLi8OeLh*_=7@E$>5d6Jth_+3vS(RkPyjD@%){rR3fE35VT!#Oo%(kgOk7_F
zhO*_b!)p!d{=UjDQw@Y`g+cgeULNQ?97!9teIoA@)_`;SChpUEceuZB7i_t-47NR}
zgNJo7*t#(SulkmPNv|2|JU1a5?`*;(O%Diho&*i^z7mDnQgZF5A*_<ffX=yBxMk}-
zanAS@wA{ZDq_0kc=TnPe%MT+c>kGjB$^uM&Q3kPZZ__UZBhYBs3H<tZ9BQuq!$nD7
zpj#!w!A&Bcn2amLHIx0J`bY{G-LaxW)vMrFL^+r+KY^s=0`N1~0?PTvL9q4!al4mG
z60a6xPv}`PFVO@JcFcxcFHHzz+(EgNX{grtnzkK|LieUkQ2c8XY5v|x9Nof5m-AF&
zWVDK&k*{Xnpfp_ZC=Ql}?_q1SiPSRw4zD}1L7eD9KT4be1)M|$>t34p^-jaa9cu8=
zJs)=5u0Zo2-$+=*JtAAU51s3E`B@K5p=w+bH2$;%Q;U^2<zyu|d~T%5N;_erc^wGs
z&A41cc~H9-LyjppgI(8kvQB3WwhGLMLY4%SZHxz_FX>cZwu0|C{RTOcumx4-?q;5G
z5&X5Tg%JDI=$c~*tqv1NvE+E-TfP{*S%0p~Eyf-9+DC@-MuXb`l27WoP^im%u+JF7
z`{+gplC8rw#<@NoF$aSK>X43>hp};+Q7JnDy}rF5;_6`fVR{k_ZCnG6TkGkp*yFgR
zA0Vu=mhl=-5*NmLa~aF@^>3YY;6MiUq+a2&c9!8|jg8QNj8B^=1;ahpLFb}%{I$1c
zfa44!)D3OqM`RucWyWU_T?ogzf5~Kk?@3}Gh-v4n5c2q~HQe1*4Pn-+sJ?v?rYsF3
zgGWZfB87cW+?vJu&?G=1JqXX2tcE0`g*fx%WbB-Ni#DAp<=mIg!QFFP$WWRGJSh^g
zzcT`_J7nOU;BC-!whAS+RswHHh)LscC(OTCP2#WFKt{qaxaKDb330*1U$zo?mzWmS
z_nh-?RmaEk8TaF74=tYmmUP@4Ai~O3jc!)i)ONHw-nyHG1Fs^9I6(>=0`kf4+swCR
zFu?DyuV;Sklt#f1Yij#e7b6}lL(@NVL2fD!7cUQno0Ubl^>GA<5)R_vL0!NZnIOJf
zL8qsLlFoiJ`ZYiY?spKdG8sV{Bpm6Bt24pp##W+Hv=}S;vY1x~!23%Q9xn~XIbMHA
zO_2@+UMv6~%O_OYyq%UGP{6kh6{z`k0berl4Xt9{i;kf<tYGJfJKrktXrCmW?~B1_
zUvnVICl$3n8Df=;C9YV?yvrs6l5=nyHvg?AtNh|&d`$p0O+3g?Nh~G8q1Qwx?M!>0
z1>m66L5L`y0B1f~U~H!i3Pwb7_h!dJ>%cEgG!Cdv#tQOgXA!K;_XdTKHN<Ms5KX@m
z54|6EvU^e<SNcjHY{(cmDK3KduUOZ;&sfT99L1?VkA<ei4OCN6g{wPriEL%Fug<m4
zY2C?Hc+N5j?4xq<hRs%d8SRRXANzn#^Hk!mk^sCvJ2`WuQcP<#2ie>SSQMNK^Zbv)
z-(%^}`RWDNx~qr0W$c8fTG7liX9w?&rbE-HmnJ`$_fui|f1Iq7F}@8-rybHcRLOM|
zv>tBYn;Tb<hq`Bo*t{HVEQ7J_`bcmJOvU!g?93?^!JS)5FmhBf&Z&{a)}Sk#?Y3mJ
zkY0^8hQ6#{puwccAd#%q`#`U5t$`fFF(~1^3`Ifh{NQ^83omNG?M_!*l;a1x+egyD
z9}`j6JfHafD}|nYjr@hOV=-PO8*PFm;o22D5LwB<3h!)K62KTbl$}4$sgXmoGeIJ(
zkcvkoa(*lj^mWNQ%4@1?{1kH-RPIF6>R&N%nsK0Qe+F=x&hnh@%{poukizkQr{N8=
zAmA;UZ{pKuO|9LuKs`bN%xAjeir6IPb=(7O=P!}7pR4imB{7U%5ej-eksvl};g=51
z27$6M>8z_HH}}P$rJNduem21)qqE^ZuTs2O;>%cD&#3YoOUQk-1vFy}Io-Xd`Br_;
zi27p{Xs|tvJgeMBkI}BQu-6sbeJ4Pr+bOj1v4f_Cf5?AwqwuM>8dh>_R?(5o3FnS6
z>AGA>FWgRn+b`0=*60=gPEZVLI!)k~Tx3i$c|+nNqYC3PS7S(n0Q9^Mpu&mg+y-kG
zIDTvw@_+Be-P8A=X2fCY^7R+p*PVjSdY8GDZ#g9Gju@9NmVm<6jW}?ngiKwp2040T
z(DungqW<MSwjMulDnZkTPpAz_+1#TKoGW4cy#P?t6w#_0UsOIrU|ZjM{Lz$!^%erg
zZfv7rj^kN}cs{NjwE--u0?<?hkXWgTtw&aqVuxVt+<%Inup}IttTm`euY*RtUyqs{
zjDPTZA<>y#hlT$g!L-ZT(E0BvaW!PT$S>tA_@9c!pVi5NODtDnbclA}2q4QQgyZ7H
zV(@=72Y>w)LekEia62R&{HH16@QB%rk^hQcwLApII{=yy_Rje}=IhB-!p6xGsPCYG
z_OEmBpuut&;Hl8T>ZR!Y)dxb}=|R#U>r1Ql!1v3n;krf=2urt`+&U*jmtj1(5ONX~
zEJd8N$8c<)vl%`s`ooGB)nMXegQgwxA^B?sly5CW+tcfbEaS1Sx}3_qLTOML9gN~T
z$BBfs6ns%D#IR*=iN~hb{KgPFFbE5T9Rqc^ZB-dOdK?e>qFpfNa53=4oaOh=+m9jX
zu~^wX3z{-q`6{`2w9WVcm3W^-%T#k=#TRy$*HK5s`-gGtDCVQ>Sq>2o!ck{&2`#To
zAX~--VSN?rsmKK)w4BTrnDe-P%~Dj1SB2m!75FhK0b>>X=)7!Gp!!VPEH=ay-|H|o
z-jwwhO~Kb$$I+5`tA+f1G$0BvzGN)ahb)C@deNZqu>fiLO?s$jK8np=@_A;ur1-=%
za^?n`oh6<?Z@xeN{T+mh3Xa2Wu|EzynFR$$Qy3SGdB~J!^PRU(qyuSYWcGJ4++|&m
zzgJ{}!M@WF_otF&2<CG)<nzH%-W$d0ah&kWNN~RA%;}U22kCE&**;Ytq~u>v-FNmV
zkRL|IJo5*c$QGit{|LPLRt=-WilEp-23Y$fmHuxV1ocV6V;=?J4SXZJ>*v#wU)s#)
zv4wPYoZ&jRF+X_87aG?2hTW5f!H!>^$U9ZhsHc($Z{162vf?THulW#;TN;jAKA3`(
z^mS_C9fz$C?{aGII8xAD1Tzk20C5k;-6}C8USla}Dnz2!;#^WYyaaB1(Zjk)vM`qA
zlVa-)3C~lG^Jyq&UEd?QEat^&_W6%4di;l$jt)UClVvdOB-32h>(Uzcn@bzz%eu*H
zSvIVZ8>^>-(^b+*@~<NJ`OX@p<FYW;qJ`_as!G5Aa0i>i4%pNlgGS2_kvXEjq-hOf
zP_u9I>ha|e!V_WZs>8H7=s(gJHXH7ChQiV82w2o=0GWeoXuVMawp~`jg&h&JV{Z!G
zFnj?B_8Zbz&o#7Z>>+;6l+}1%I{~GhIn&@-t8jEd4EXI_3G-Zxagcdb53J0E;`vhG
z)E0&Ieubz^J<+Z0C*yLsFs}AEP+*z15VmfXhi8GcLImUKw$Ke{gt&Wz1PNN7L=9q7
z!EA{v_FBYXuU0VBH7r4ubVcH89s+vZIiSAW61(Py5cfJmXsY>2hA%WjrP$xZa+?Ky
zzqkla8HPdeTRX=6Z=velOL0CsTSPos13PB5kd^sqv~$WH&brkSCYWV`&!P>)xmJ&}
z+`0+MVJaq%^+$sp%9uD<h(bSR<Em4{6V3^Efa(4k?pfI6n@WVu8~NFx{%~IQG-J5!
z2j_qpRIYF_^!g;g`8stl+g1rDw2Dz-;XP{moUwGyULZx|!$2wJ4>>+R1?k@R#Ik-d
zbeSHZ1;%qw&!84V2A9E=h3+WGzey$?@`GQZI_3+J!I4ap8bH?1T(ca9drBiu=@%*8
zD1atyE84#N2rM@)fv^51Ft5QAiq#{??;@s~-$L$(zYz*=+##Kv9{hl5(U>t<icR8-
zMxMthlZdVEAZPlX8eR>6&Jzb{UClkxqw<uSaU&2MO%hP&N-qf-7fQZ<T7pshTSO3&
zY0@rn9GY$zka}?}lI8?*mia4MB#?wg@UTxiAC=Fp2cL>fbb&%5dhN|(-1u!!u{0Z|
zGQSXY38zPmiot+sWue;=G1jSwlbLs))J${()tX2!HDfcvmsZ$rw+tSmAA0#U5RF@v
zX#BtzmtHQ!E%KXj<)J>pTUvIm*47I9X2jv?DJgi)J_E;p4Z_I@$vA9gJoqH{lDC>^
z<jb`*xPLSN?~An|Y>^_>e?ASqnb(3|$ZT{t>4kSL>_T&uD*DkQ12u~XeZ6cGh(@Q8
zw=!SJ;SUwq%l=)hGoH@=PzrZ*Vqx6vJ^1%#4ZNS7h1#+UQCZ#?Iy}U*``BG#$Mzoe
z_0!?~Bu!9eJoF#o>v3J509zeS)9Ixw`#DmB(`^|BZnru}a-jz<+BpN8&U%wu_4!zR
znE5T6t&Pw0m%}~9BG#q2!$epY&k3gqxhUH^r2WT2u<}emugpAbn$XU@xwah#D$L1_
z-OI7KrJfZ00{l3iG5Wuag>l1cLB)(^QzJuJulGs5)q(l6DKLn%mBrD++J#Vj$Bt?q
zspTgrzM(xwElK~-N^q|@3VywTczqR{?__T!I2Zy6^4-MNB!_4j<l~`L1}Kz^ZA`Q-
z1(BLLp=Uo3)0YCc^k6%@{^Si-2h!<=r<0-nOd59D?q|KzSBTK+5Si5jnCX>>A^L~F
z$Ip$VUA4zGZH$?#z7iLe=|PB0G@kgE0|A??VL_+|x;~tPUW@Yan1daN<Bw2Xzo-1+
zKE9B1G#Ooj{_j1`p{6^u;cQzKG>h($agJ;)ccwQsl*i(EK`pA!kw+n$tA=d&LTCQS
zgL(X)G~ap@q$Ji*UB#WWX`q5Cbh?qKfCzG(oef0$o|8ZOzY~SeAzb(LJH$0%F_;WX
zWQ^!2>N}wnI}@7t6|yRra43O}(8^%lWbSmgn}~WWjpV=n%hrl`Ea_J)N6nb2WC6=&
zybX6|XJsBNHMGF`Tnp$d%_c=Yu^_OzXkyIeq2Ts1{$8Fmn6+iWfTkvy(~yomas_0Y
zLlms}4!HEAJA`>0BL%6;ac#jWkomb2XMHF|u~!Y=9IgRY(Ix1V7=>0YwPe#l0`=_w
zyVTkaJ^6YV_UjuNqml!6Z%4rUZ5}Y|sXa_Tor4WW#TfhR3McN$!E61MsL=V03e;9`
z(jFeLwY&g|KV@Lmgrn@+?GGktYha7cBJ{G~g$bNAoAZBYyuCRAw$|$5s1;d|SIBb3
z!!q#wjJ;qOAYgxQ1j^}KfL{9!tO$)mO!tR}rG><%Z6(;Nh~VaaeOP)k5M$*8<ZIm`
zsJ!nDn=T%O{X=OW*rWtDFP379r7M=cQiKayY3P2|4sFMVqM>6j%e;*Teq<RcXY9n7
zi_5_$x`dj2Rz<s!ePp-60I{;%LJo?o;Kagm5bo9DiZivTVviSC_r&7&4UF43D~R|c
zUL*ZiYH;kzcu2ky2a5N+K}9!)KDtzZqs`(V&Xb+#I}c!eP&f4olSAXETwML75?lZL
zCcG9Yip$GDv1J~bmjshF!XOkr3*u*X=Y#6;G?@2Q5uJx?@@s4uLo!nun)Kot-~Fz{
z_HhnOQ~b<#E}g<U9B&Z$1a?30Wb@}U3Hb0?BsF3C$E{HX@Jl5FFJ0KpdbK;K;+#j+
z#(f0_S!v(|`E0yYJQpNWS#NCX8uXGb#`SICaAI{eP6>;~;(XQvkeq;(ih8KgAA~k4
zVl41D4dO^E{8jISn$udjW7+HB<^F}Jk{LsFhW#O(lDd3}fG~23`P-zP)>2_2kI1zg
z06!U~txx+zjt$CTybogm--t#dYZG=(T}LcUOR?o>D9XtGrU{KXM6zcasEsQ{*@uib
z*4at6mjpq<*ZCOzDiEYidugDwKCYd(jA;n^r21kyHZGqAZ+lxv#+R$a@6|YPQD*vJ
zS{>(stX}EyXTIi6drtFbBNZiY;m5fy!H0LVvG$HCM(YNH?XiBsyBN*xQfgQ;!4di+
z2I-8oqp;CsIkdb8B=9c)hL%r;uGC|s`C}Yo@a^Z?z=p1Osbf6wx5Qle6?NTU0JhWi
z($OEY;N9qSn7_plO{Y15*?u9Eew_hcZvG_6e+)|9EyLWIOF&0@6;%ix<hT2dfRL;y
z;Lx2$Zdin%DsLT11Sp~AVnsSVy_onJ=HR4IWyUr3#^x#aNkgG875omS&by7cO<sF(
z+``j1wagq^=m)~vrOYo5A>{OqFzB2(iQIfS3S^zwS+3okFH~day&e}*d`OB87feKf
z!C<5IuAQj<&l}EP-;GkYtjLIy$$05<I=eGFlUrF1xOvA~yc?SXZ-O)7=A^ke(95{H
zF6-&=J#LJ#9!ok`9-}ktBA6bw7rzGkLPqj_eAzi2znb`f%f@x!I6RCLhXhi|)hj_M
zQx>;$ZG`~cDjXV<gA$BckbEi|iWR@nZU+(3DXl@vv~X-0DyFfC$(+%eD>Us0(@q3i
zx#GPWNRzieHvKn_TVgyF(`HUX!R#~>|9}GM`R_D;bopuEwMiQNo2G}i&c=gkVg_`L
z1)Ab?fruH0dlF9vEX^iEg>)iZYMB6K9hGo#ODPy92gA4P%<r1VI)Q&*C%idp&b?bu
zh2CtP&uMC;yT_|iDe0Z0Va_nBzX_mzNE4l1HHhHab+T~oV$>afo;15|BTd8oIGv~x
z6n_54ol{Rktz(^3!LW;nGWEGq!<G2f<^$c$=AGBR)u8SVZ?Zk56jX=C<M_0Ac=Uy3
z<L_lq*I<2E8?_Vc6gsK!N0P}r&rT|`_{+WKkA>LR$^5Mcqroeng1qi@gIOy#u&!qz
zzvti(dCtp*NtQyKP{EiX?^WS-nE~#XK85~Go~XMej9Pu4Ne-B9#obkR$t=)@+iGlo
z*f^St&40}O-0O*NH$NxK7v_VqI1?Lqd6?F>5R$IBK+TkF+`6iSWn1$pi3=x%3FcsX
zREnlFb&z@SyK&32V~{iY2wGn)Lx;@iq*>0F<aXrZG5aleuGj$Y*SWy4Ga7g+D;8n~
z#z5Z7TsX6rj{yq|k+;%^d#DwSwlSCKyPyy_FLMI6R=Z(SKnCOAHgj$Zn@L|pGRf$<
zLT~JJ#X9#AdeJ)xinr)eanWTu-!}xbwk-yY>k+X3Q8M0~X$hLAiufVl8H@LgDm{Mk
z6KT*kB_^(x%#&VD=R|AZ&57)MIDv6m<vRI1tA5hzs)TZ1=fb2{t04TrW(11_Y=1Qt
zc!w&DZyYRTzl9u;uown&q8+iz$c~CxpMB@Om#iltoIHyOMWIa!XaC0z`<vIJ$By50
zsLYjh(pd6GA7pEi&771*eITi?a#=>Uh}5$jicpzHmv!gj{pH1|HN^*2s;*N*l_Y#T
zIR>qpQ?P$s5IS4Q@ZA{Kb9jj^bVW&!>fi%t#(Y-WKC}GPwsrj3f73z6poOgEykPCN
zJl1_uNuOEOLUdCABt@IULzhov@%v3^FuVquWMlC)>zB7X?ThE0ma;Q?Ch?T00{0u6
zu<2;EiS$Eh5Z}Abf6-+IZOqeAT51EA!piaE?Fbz1>xiSf3PE_ykHpKbgP&<uXg<Lo
zJN17O=bRc&mudWJhhi{gN-=3FTS0B<65MO8#&l^XbXBwm{W1C|D2XPsPUfTj!UJ&f
zMmVN_m&g3<e5?z2NIWjra0dg}K4#|)rdK~uBL}3KcN}O6>lxbn#2ByNErDKk54!)x
z0X|I|2F=b3h<n>wP%4nXO=BGJK;Bv`t**r@=eMEunL<n)$cA6$si5*Wm3obgBn#fv
z;2Wn@kkYzK-$h7Zleav$uRO&1_xSXpVI(L*H%<6_j(+`Q1iII}$>NETc<7%38uS+8
zN>u^Ac&7prSWc})zmi-qPKDo(D&hKeF^WCh$TYPev<%(~b;}-58J{freEBJez4wRT
z^5+N9Ei2(Z+0MYaOjE3{ISTR%ov`7rHQCVY4N_GbiP4I=q_DyYhoU6GXhj_w&d-E>
z!$%<R{JzH76%%pf{S^3}8w=LE$3pAL`y5YI#Uw?kkIL$~qUHk;|JjECc1G$x*SzIE
zQP`VEWUi!B#n6>t93jH~!%m>t*Gta4j>9Bv6})D}2c8|vE8QLr>5|rX?W_U_zhq-)
z;W+O3=aJ|Ixj0Z_MUr;(Q60-Vs^zW$0RfZY!AN;*a?T+yEwh;K<v00y?J2blzeS|K
zm!l__2a{IJV>+}p?ktXn!rK#I6qk;9@7ew+Baah=budn%0zI~`r5%mUB>&WK7|7i~
z{TCXbs#7(poKT>5CZB+&>)OPl@*MZ?PAvXA7Y{Ab_o!QXED3Op$42pCSo(bjGCVG5
z@zQa_rU|IDQ%oFJTqHXR+1Z*WP4mX5Lt1zw_NvuF(6qx?!RC2CTq<DjvKnZmM&sT~
zyJ2H{9X`&U2;1cqQT^`<NH`-R39^wWbsp%~c3*g5;{v+pKl1Z##$#^jQK()W3>s42
znD-z7#xgHT?1;@&&$$e%nIC?}kUz*N7~+!Ku`u*89`$EMpeWFWO1S2bv*JKFsGN^#
zWp{~juM_SrFC?2UAI8j)WAWIV_q5=+1}fUDggGzI(pzgzqZ>Z~l^~s7Pvl|xXnzRn
zlf#!EB4FQ$EHu|FCW1GXoaC%->N>_9pRZvZ=)v{$ibo2*?ux<4vJ{{{{m|La35SFh
zAoH7jhXSt9&brB@b>wftXFATwtD!h{MI6|^eaac!DF&;r+1MePf?v|2px*QdRBkx|
z>FZhMYtnj?yQ7j|oKh%zcAN2-f(pF65d=YD3-G8$H7ug4;B~kRqc&BMR-JgR!SNcY
zd*4e`HJOL5OonBhLTK;Hjd)Hu3wn)0!J*Zjv^VYr^*hs`Zh{p4hzv)ZT8^DTQ%O&!
zC)NBmman^G1nhV@9qm)a*tmTLBtjg#-LVD(WmCYzN1f8>EE;9Q*4N%KFh}+P63M${
z^ap$VYS>O+*Db}U<Clr+@mG|FOX9JLli<dkr)lNKLAWQW(Uy6wZ!wlaQ;#9n-@Xpg
ztTIq>v=ip_jmJgzRX{#C8C*_=Fim+rop;9x{H!f-XtOf&eyN)1`c9+*mzN}Kku3}q
zn3HX9*I>#A3p(`P3v{bfsnqH^Vqs*Aw%&PEO*4Yre_Vxjfz`NU%QZ5vN}e{I`@q>;
zc}MSPyr8v8%a{(F3-&odSlgA0Q&Uyo*u!F!emDa*9~Z&*4h6JIFrzU~)}x*M4}!Y6
zkkz^v!d!OZJUiAQVYi#=No_&V;&<H9*AdLqK+vlHGA#(1kHXE{O_pygW$U&W=kCqL
z+p*Euv?G}7QD)tFS@VgCz?ASBE>PV<2dd3{R-(Zt#7gNrsk!L_G6^HmcCRWvQDN*F
z$9Ke|{1f*$Z#$@bI*7K`VPuiN0bbY_1#+j4z*mcLF!QhowuYHw(w{IaZ9Wc--K$_*
z#7z9|JAw6Eh;RX83IwokccbYF?2+P=6&jKl;_ZoQx*4?CZ9FZR2RL`(DolDm5$hGg
zScX&{CSBeH;@=<m!j1)GcmeCK?*RNz<<2sxYvG`^5WT~uqr$N$(rMn$@dhq%-OjJ6
zj|%Jld@)3pu=#dTOA-D$8H(N6x;Wr)fwq>~(_QSYYPYe2Y$*RgKMhwzfyG6#Vq^hC
zA6<u<iCtV%$tJ#s!$<zVajDp>oK3=(%Fy-~($M>W_3`Zg$f;HI5|g7-!Svx9nrRmW
zBBNp4gk=fvWvBq8SFD14$uXcc!XMQ}@4z~(Av$@1KJ(0PKg%<fY&7Vfid6%F_^rkc
z%gv&(eozN`me)~Ut|t{$hS8u&%xAt&pU$8G&~AMa&#$w<_nqo+W=%NummFoBqGG(V
zq>0|TS^zq#*Ju`de{QwafLh=vJh=TQJy;u#uG9kP_R-)RoXy=iZ;G{E#b7uu1e;|w
z$>CSyas7Hnyq#19Z`)!~ruhT)DX=73)DvzsmcX}@JPhJ$h{#|ef7=-)RKGtLMa7qB
z@qdn35f%v-Btqeqf;{^j0{MpX_3=dBCY<#n8AI;sz;0hc>qO1OU~?ew+T}U1Pbtah
zpNV~EL&>jcv7oTDfqy^q0Oa?*CZ0+DpgH;(k+Ny0a^ua>HnaqXYcf8UIGu~#et<sC
zmIXVJh{VjZ0-GQYsJK##q6lAV;ZchROJ_jS^hDIZF$KzQ$fC;PF)+DG1N+7&(jLiC
z;O6v~jNma2!RRCmR`EwgpSQF*DV1vV>p-1Q5w5a1?A7tpFhJK9>OG_x<0goHw9>&`
z7iIV^&t{&!F^o$w2VRT~KwaKLD*Tgy^`n+U0GlOiw@*S%z4zP~n^>6O83T&y%CLWW
z9r8A`HokbG1#fIeqn5;9+QcuS5?gMOwln*wvYZ)6<}h~FT07``dyoJ9nlB`<e4XMb
zZRk3kOVkHGQ6aW+-XB=scCal`=^su{s#c=v=UpiLzLy4WsRaqwTGk!<jx-tM@*ixA
zh7JceEZ^#hQu`P~t5=4kxLhNT)Wcx@X7;;oGoa~$JmyuBK;F1WzG?drSmdGsB_kK(
z#kg4T`_C8uO^gIjl)(mlwqGCh1RsSz<ls0@$hbKX)6UCa$;V2Vb#*p`sokdc&-#Gw
z=|@~_|6D5m^@VCWJECpZRZ?-F2JF9O!1_a_=;q%=U5g{|jix`W**A{mC3cy_{M5m(
zIqD!*`o=d^G=i?^9;%S|hA))RrgQ#`#h?vJkbBe$f}WR>K|v@E%Xi1K+p}S7Nj~sS
z|1vS@S_{V=?os#r5^NpqM|yaD{11BZAhT*YCS5uVqBKkX$nWutGZup*P6vR0el|EW
zO*ZoKWNfdV3SX9o!LdDaz{6h5JRNc9ch(c!^llLwljV?DUJP-DW#GKso$pXKf~=kF
zibH1|LDQ>@ziS!uDycf*F>W^ul-5yAV-fm4Nye^q4#aD>CF(y6MAeD{=yQ5PtYb{T
zx%4<+Xj#y>wtq2{uHOb7?x{G2d2<d*dt#Jl4ACh-65dt_?r*0-R*4zhTx0^3B^IDm
za*w>MaYWyf)!;Ha1+ybpqW8Nu^kB0uSd~7Yv@4njMGZ90A&Je9q`5BE&Dxn`!QXL3
z0uH6rfl65{o_M|&-`>AVzpgQcl-w``M+?+B(n55Mu2SAQXuR}nA-pXJCU@p3gMGIw
zZrxA}!jm8P`QxkT@lRu6QKB&_tZOGeTmF$>TcVin<T(7at^(m>@5Z*yPeizB75Cuu
zAzaM-(<gtOhDImm4GCXG|Gql~tsyBi^inD;VxH8N+7$ZH@Dz@oF&-zeoYl+Z6g2(3
z121Ruv05_$cGl&x4mo8M>^Em!7svQ*!16^k*^qA;MtDsV$eu^R_@uZB`wy&zv*K6~
z9uI3g?tYD&Yjeidq^m^OpKQ`K#O~!0D^TptdaVuZVE8pPi2Zz)hCFctG1aE^GktL9
zxma{|8OdGA38dkBMxo!mgD`L4FsQu<q+@4}LYH3FE3~?lHvD@?!yjhCya)bhJ4uVy
z_}jsbo6m^X#0RAFmJ~O;s}ejmbyNQ2br^f+HQ#4?Fzd&A!}TxHKyR;I@RD`q_6S8J
z|MN}SiGOL*MgvfeQHJ<XGd!u84I}4dgID`EqVTYcuQil~wia>ZLS`A%|2Ga2nb!AH
zF$F#x3xleT)38^p57UO1!~HmRFJ2u;T%!CSw_OFqlP1uJCywA`9|X32HGIBrA*^P6
z5}wL{$=5ft;N!t$IAL1_3iJH=drz{N&zEeF85EKSjxIQB@pepei@;@`fzWBY6CGwT
zPUYJV{N&FPXf<&k(vK|D<ScE{HNd!g8<SC8zZfrZMlk6j+YhhxaLzA6IZgXfq?K{>
z$CWZp;_YW7CAgVP`Z*KQR!oBY-?8X^!4rnR0!XSghU2qmqo6Xbu}zRoRk}UMpx$;c
z&P>H?YFVgTa)j!=J%Z;j2q!(ZggZwq;Z0~V$XzE;rZ0kqbFtK8Mk;^rNX9KaT}oV8
zey!K<5VlP&LKCyYXg)Xv4>sGt)9;n=?_?<BsjytJS}Z1JufnLi{&>Br45sfK1HKif
zLGj%xkn7SxvFiuE&l3+k&zldIl@lQ9`*mXRY9}V;eWP8$f5?_aZrIVBi1s`;G+Ho&
zYNxQd+~F7$?-6oT5Jv0VX5fXkDY*Yy5QsMXCZpMRiT69&c(2=32>WJ7B3l@r>-;n_
z@m?@GB}Jl;-Y^k<{>^teB?+2RHk7CR%vk5bVf^*~T+@~ZWMy?3xz4f_&ZhM&Gt|m&
zLTAor_E?g7WhI_?RtG$tPNNoDOpWYVM{cqKy*&l6bIEi5SJx-Bv|NDkmfqMlP)l`h
z#pB!))gaVZ$Sql0ils3JaN~*@=(ehWTzkas9`XUGpRfzJ?$X40hBB!8>;^rUyq@{`
z^uR;9j<4UyW;bW2ay>t6$gZ~`u=pQ)*USs(Z>|X2$U8b>SQH3mN^+hxBhk*6b&6{I
zU*lRrbh4VMV!a<`_XRRe)f;}~+9as1uK^z~J*ulZ7iIE)u}r~n5}|Ji0U7_%=gWp8
z?@(l8w_GloupVmOk97KD*C8D0)y7Y<!=TM6pJXjHfm@dT5NmUhe!Fdq`<~UnNbd-I
zIBghaNlXL@^{eEM^Dr1UDI1Tfmmt@+35A9_Cda?0koQjw@mTtCxH82Hub4Dbx-*h!
zmKt)H!}iLIfAsr&Qz+f54VpDlbnd-uyf1$QjwBVJ8JlSiuzsh;mZLZ$J|72)n@Ik>
zIJ!P43xyN!6JD<}?oaT>vs)|B_h}j&J8T99Pb$Dx@i+H(+;RNtmx5t^n{dRcV6-}+
zN2hd`!``Q+ps^(qysP5TXar*?zD_37*OU<T6DvVVPZOKMEHK@(8XsTcgV22fQD9ka
zk*^V7_s{|w7hi;vzAeDyV~RL;3Cl))ZKXX|-jPq!mw~2S1PZ_HCmv~0<bUjaXINB8
z6Yc;K6+{I^L`4M&DpBH`5=;aUL<A)%AfRLzl7OHiL4xEYN)QQ>gMb0%6k@`F0W)I4
zgop{j9B<>g!F6}<e*5=cpJya>_w-c1U8lOb>a7Uge0a0g8>S2dV({)fT$LY(t3>P2
zMeGLEw(JDc#E#(T^EUXrb|;#>_(FHyKSa698lmKQ;zOUj5jwo$NzAz;bH#dPw5X}X
z$U8dFT3CgFXX+_By_AwjA~iM^eWf;S(7{v9d*J%i6rAN%iJxFH29dsNMytZd$114O
zu!Y%VTtI8<JfjjbN@zV{V*j#Fb3{x=BhSVNEEk?bKUm79d-SrXtNsg+F;WmzyoIO-
z?`vrBSt{7}HoL_zD;}ggzfzqxjdX@e5;8JMn75|qz^&9gXf<|&vg|An>q@1&8bh(u
zNQu-tCbfDOH4;0X4JE!sRBewU%x)W?w4cqVi*%<$nK-$dpB0JyGP>|ALk>@pe3Btq
zl1pTLir!n*Nbr4!(?1vhCF^uh<lQ(@7i=%xayARgoEveQQy$cdhvEH>b=Wh_gJ#d9
zsW%mb<5$E4{8^;t)wUh9>{D{)TycWBJvb3$m{HjIa)@(Uo(JDJu7von0eHQ(0y+|i
zuYI&Gtr8#xt=YAxc*hRLOeXPQPUmRKlhhoG+DxzaNQLRnVbHHQ4s}}=KzV^FC3^7;
zJ$qgx_HZqwZEHi}<AzM=d-jMrY2k<?ipuGvFHtydRRzkfs0MNFskp+O@T56rL&H{8
z2-{JC^4Iz4`W``A<e>_T>ygI!*)gDUH5GeC^HIw2Tj250-6St&JG8h|A<K!x1kR|&
zooh<aB`^{d_VJ_Gm<*D8C`a8l(80E$FgiFyneYHC$2hTT)Tpzcr~<uJAU=K`31ecw
za={#I$bCh()){~~sVQeA6+`0CHneP#GJ~4Kxe#Z?M^}b6q0k{&P!W$N_4gW4aD4(f
zr<uaNyJ9G+R75cYrebMJ8C+ej4a$d#pt3{>nx$RPa=;%oA_;fT_9kY+_Vp0uxs>F1
z4AA>W&xM;!hB)Gb9ZJ7$rmoEyhe4VJn0fCrm1%L0s(iB^j=gB1&+~4B9M&GXfbhF5
zAa?e<4hu?8tpdJ0T#nCqm%z(gMcA}u8%009M@cVS$(gAnf%PYJs35U;*wpGne25+(
zyDSB6Ovpjg`N>dz@fiJTKAYm5Py}anGaxZ&Cw<a<F}kF#!=;bvX>i{IkGB*-wdP66
z{CO-VWNR*MI8O@au^-dfNgBY0Nz7VpEy(9i$Ndz!SM__(c_|@E>X#}~PmVjH<0>aQ
zvU(cooFci5rkCiZJDJ$_aVkeaQ5eHx8tB=TEi|<3KsI~6t{mZF%^2Jb<6g?5{wGz+
ze4i#J#}cm7_)a>uXD8NMuBCTUp};LC0&8FSf=Xv1t#Y}YDRzW#{oFC9yFZ12txhh!
zePE31h9vOa%rqQqD?~S!YgAinEgH?&q@VC};VgAFO4KM(iv-i?5uPbjm!B0?ziumY
zFeDtD2kTH^%v#j%>Ci1F^=b6G)G3jP9+;ay1r6fE!Q8=ydT*Txg{SADQoJFY{~U}@
zcd}7h#GYgL#U4e?^O!;WMR3I-120(=qV=Ky`hZkE9G_PSfyds`d^3{K$~O`+m*r9`
z69#D!a)&!EI11!mE1*j9HO|0%UAp$Y7R-|j1ruu}+|R8K2MQA4f{Y#(?rWo^Ebr1q
z`Jo^hOVdWjOsVeM;^17r7fTExX}$eP5V;<};J7}NRqX(&jgH7jdc)z-O@mf53HTuF
zgR(cn(2D#mkg=a)d<|i?3Xg+roBdJn3*nJXIYJf2#8ZoRTA|iZ9i)fJgS%Y`R@7xc
zk98a8F((7Y^C-XwQk=1Hnj7<Ug$2n!zRzsmrvXMAcOt3XjR$WuQt#UnP|qPBnniNq
zcw#<SwmqZro_5mSHG3dr<80c|K!q9+a+Vr!3!n$m8fXO;8@JB3f|6(a_&%TTS8GJ$
z*!?r%>X}I}zHK>9-t3A!#r9NBL@zVSk#J3@T9TT_;_yDR4#Id+@Ii1qDhW&iY4dmV
z<(&`cgc)N&I^+{GNj4kAIO33_97xTcvlhF2>*@5uT*6%!gtlWzzPtB+I&O11IuB@K
z&5?Le8=ZpP3}G;OqeJ@%M>E-B@k|Rdl1Dm+hk5y`Cmy_ej$#EgKw9%UoH9!QvK8&|
z=Ab;zv{?uB^A}Ru0kbInQ`K<1dpDe$<w!WXa<J&oba);b2G16k!c{dBFgWJ~YNnP@
z+_e=fU9Qs%4}G2P7$ZD!Hwz}}6Wym74F}p9KwYy5BzOD6-mp@L*jx++1**_F`v^V7
zdoqN{nSfW%XlR~Z4I$4Q=y@v65WappwRrkk9I(y8ndve3{IUoPoT)>`^_R_s3yBV&
zVGp9p<w$*v#l(Gmw8~?1YUNo)=$b%yKP>C9!@ClSJVc>->NebCwHZ!tRse}1cS@wi
z8ALzj;L~@-bYo8(29Y(?3Fag9?(erq=d32S&lH;BTtL0rT}HL-P^WE`6H#jIF3e_b
z03nZExTBZk)y1ab({^!~*j<A57hOm^l^!)zD1cx<c)NB*VC2oGwBs$LRJ<!W3~Ibi
z!5$UtYa2t&HnK(O-V`eLF!9OmC}I`_+@{azuZMvvx0$n#-=N1`X~3=LmVp|VCV5`U
z;fH4o&>fYCC!fD0cQ4VL@jjBUd3iplxVzBOQJ?7j#Fy57y%5iI1|$2#Dcx5gr)g0G
z!cTjF_-r@UQtas`TY|-c;01dd2<k3EaBxQ3>eY~8Tut~^NWH8g!htr$2kJB9XvPas
z;Cbzgk~#hKep0v7N-i4g)+C|Yz6BU2=8av}ozy{N3u^I$1UMjWiwzyYVB?()3+m1B
zN=*uy>usX$^p#`bkp>cXQAvCtTcC)Xam)4VsdL`;5WP+qq71jd7-tgGwmJ*V91L*E
z>ow?Wlm&{fxgh<cF-km(ro2oF>3OMoII)2ZZnJ6HUvVrP4mU=5FF`7l<O{V25-wup
z$GXc8IUsd8n)WM~r;L`KrVC%na}3vK!kfq%w3|)%Nd`aDvQ>3BVUst?$TLaon-%tX
zc~hG1PN02rHI_!C;?345TzP{VA|gnx+1{g6(o$U%-TR8;mb#xhdpnK9YcxRpxI&Ed
z+Kb9EInYxyilTYSsjYpJVZ4$jBqcTAr<!~!yq-<3E~_Cm#|oKG+r2SPse(?bUV+{2
zV==}!9G|At<L2NhtYYQC?13D5{n#WB&ESD)mfHYV6oT893T&<7#;Js}y0hGcPL&m=
zUo8y4!zyWTcC8_Htx%y|GR)C!RxNJj^MK}&X)u1;Lf}%!0fq&md7K`Zn{^>ae!4ZL
zwtV8W3=$?S(R>mEorG5us<F|o8b?gqN1Z1&y7?I`N|8Mh^zHX?-Ub-p8X<RhD)*YE
zx_fEA@GOqMaRlbdmq6n(H&Rz26n47v!fc6oSg?Nrh-6uV^xizq^e!{}_#qFTPT|mb
zK}h%X?V$Kl*MnF?GOfQcrRCmpH?UqOPt|u_WBOeu=Ld;IsD7PBO_(i*VFy)#8Rm?B
zvi0=5_Cn;lq63@fZh^VR6%@neuJ%B+A*~rV4chu$=!d7(F*4L26i@Tw#b`B@oH`BM
zX6?aq+hlRkoq9?;EQjV>afRM;r2?uV;=!CetL8b*!h)G3UN>P4C=h$};OeDxoTxi>
z@31mVvhqW*i(9eGt^rOfx`AW+SteV~nO<K~5BhMQ<EL?%Gr$$g^z$gBcltWPsz(X9
z*(m`_HFx0r+)T(Y$b}reJlgTt2IioO4W!r!;#j@4XhGtgSVmmP$jdxfUsF#zUz>w%
z<yo|&$`dN9I0e|FKeg<CxCRRKK2l+etzh$k?a+KI5|`?qr@OC(!A`atl*klNxpmTL
zgoJl7kKE;)cuNh^>Tq$~XzZ(D(EX|*;K0wsR|^i)AI2{Q`(s<El0zhJc+X&S;q?QY
z7Ecw>J-!xtrZ`eUamo-D6#$hg!O(vr3CH%iL93%1_=*(6oio|6F+U1+ZUzXcjK)#7
z<EaxXC*Xc}C77FZgw}Ib##q$^IK6Bq&bmQ-0}CRUF-fsdlU4^E_T&s1f12r&6oG{U
zp_I@BCJr2A(U;V!AY|w~6;F79Dk}=n+-DNS(A}fkR&Pq~e-AOu7PDZ>$K9C6c|q+u
zT?1=uYf!(j62rr@QEg=u+UAJEo^?z(BDo&!?ytfjQVT(-&ktYO)Zjy#R4}V+K&}Ta
zSlBv-o~Jn$2952Y>0Ldga;TLPd%hMDrVY>%H+ND&Jz{9?@{I1AM&gF&$Wpn9&Y0Y^
z5av8}Ms`FM=Zan=s2sY$9FQtT83!e_jd(^EXt_a87a!$#Y714~af}*qRu$`e%;{)@
zURr5UAzqu42iASDxFmlG*yvP)d=~M|ZZhHsOvxpAANHKM!qIfQq&D!E?E-ldnqutE
zByl@RG~2{SH@sw!lD`l@^LOMxh8-IQ3V`-+uSDtReBgIEiuoZl4XvBGV7j;hdJ!Mq
ztI;{MTVf7w4KpD7dIvK(X(DvAnqos`0UCed!y?fVk|!nFT<}5$LTb;`A1YqZl{Nmr
z7E@<>iIbdFz1^JI(FbVn$|g9fnF!HUBMFDhNQyU@<aLam#~d_LfO~#Zp+7_(qHA{3
zQ6n;8!D2%cF%AVYe{!#xvWGran@sV}o{yuFqA0)3*O)nEk4G2ApkM4|W@K^<p59l5
z?sGGs_Lx03uAGmePue+~mgnLwyJTFyWG{@lSc-=Uw@1EtA<S;crtT6SuQ|UfrSjwh
z)fv8oeq%iaj_Q<S`sE-rYVgEQm14Lk?-G51Zh*6nr6?}2n8bR?L)SrXs@IE+D!MnA
zN=e)(@6$?~?IZjOV%w-!<T;hypawTRhA4@U3G@V#i|f6<0J<_KQ?`8-sC3vK#;uA6
zF4a;XJkcN;v4r~6kV2J{^H{qz*{h%TK*kj|uDowUV#7nBzCeNSH}PR%=t^XTZa@WB
zVxO&g!t_y`3Om0BLhY_VQ1cLi%J?ui!ykYN!Gx=6Py;Mq6i|D}+3nGR1`xP^g@Sik
zP`5k=*QGf^zg{r9#2lgr?v`T05Dx|uejvu}OD$cR`FLcpF)S&N!hjx9M|i|uoKm+N
z8F#yBHe*an1HUI8l8r?pTM;VAX%DQsP4fI6H__G397@bCjt;fXMg#5}G@k?EBF<CA
zm3CDqN&HL-<~e8<VMzF)av-Od8%_&|qT_ZITIJXqW~8(@sRgr$Ig^!&+$2Wu#F{3I
z+L?zZCT%BoFxx2Z``z@p!>(91KzNKxh`psWNcY6%GrOH`(<eOzC^K9DU*1@O-pEwA
z@{k(~n>NuR@`}(LDT}d_v*DddB7TtK!LbfXP@Xt}68DaTc79#rcRfs>yKV#bpAlYc
z7b8>**@~Latcj1fkI69KNli@TLvvH0isFi}yP4#L>yW&~<ugE<TFPurk;iQ1Ww7J1
z4c^$Mgo9h>f!Db-ym^pA&skUm+}flLNlhvW9pFLRB43znm;s9Y0dQ}o5ws;N=ak-z
zg*grhIG{xGL_~9Fmx?q}Z~6<>y`0qLJsnLwyO58uU!p<m5-)squ7-k_^TAHp5=DZR
zK-8R_q}KBTCfnJbnV_T#GeoLE^T>2K|B-=K_K_fMp+eVmkh^em8|=NTiT&sXni^!!
z4u;UmB<@Q_v5s!@N~7!eOUQ<=qq!`jFiCkHN;-z1eNYoMYAoTG2v(w=o7m#!Em0uD
zyB=K#2fcYnE7Pn!2EDCD;x@f)uy*Vu$iA12y1ZqmBJh#Qd~%x7Owk1~r$baHv1|Ls
zkv*tZk5a9KAI6VxXp9-69zr9S^M|6JE*HIBGXsun%0u?+q89zVyQo!L@*s%htuEma
zhCRkcFmOVYIzjF{_ehmsj>%*y?yUsPc(xZci_GE9yq!Qt-lQ%%yr5O&^JvX9S6DTx
z9-OAu!}YH=sGXogEAZEYKv*fo_6^`HHkyDl7U#f--8ZO?E%_iy?Lxn12mF8z&`j=y
z>~(#Ry`@CA!AA|`Z&*?@CqJQQQF-`<vL*F-wJ|p<0aQj$rFSuEq0f<@t{txkg7>yV
z;-m|-F7pX(DbP%HRA<AwrcF?Ptcj^VA(Wb>I2NBDDMHI@Y4rWCdrADN1kJ}=f(K=I
zDYuM7O5y!Pyd&U`qP@`^>e(Z@t0oB91+^UJl~fe96@Vp21>tj`9EvDt!;&EfjCfWG
zfrP(e>Ag0Zz0sc@>{<_2Vf7>@?kcDKFsZ#R5k<=gMiDNf7&z!WidwCbh?gJkrVm0g
z<yb4moa3B8^x<6WwO;`S>oT!w5gQo=L-e`1j$qd@gY4TT?9n0hhHmsysxFP-BP0kK
zNi%Tq33X(ZmqKmvSo~_a3v<rhrQGbJsmS?++r_Syp0ywb^-^l!nIa!P4^)Rpjt@j_
zDu*$_#J|DQL0u3Shw<r>=vXaCUDD7;YJUhG>dA*0vAKZJF~CD~-Ge*(DEl?UZ?O9~
z-QyU}lwvj0(eE0m57!BYT>O3JOs!CO&btasb$M{@lNreFi{fBY7QW$E2koN;bd_@!
zC}%~3N^=n>gXCM~Y)_*exQs%vu{m^7SOwwDenCt3l~TPV4`M<4RLmI}Pv6XKBK0Y{
z=`Q{rN<Uwf`e;RRZq-#O|HLlZ#p@mAS2KlX53JRFdWWWZ`phVQp4YUKAo0n&Ore@K
zHQ_3)cx0a+q#er^Qxc6cC=>lsc%qR2C2{3wu78GZo-+;G-ks%?rR@aA%mL1Wjg_>v
zYzBSi`5tQY8XbJPF_qSCdPvQj<_~_N>eMsb4pUAT!WPL&cqF|9drqd{rybh~cfK2m
zw+5!mxsOzcW(c*=A{Q<@f2A6G=ixd(Yb?}A;z*p+q&3wN@!;OERA_oM=oi~Cd-OV)
z84ok?*_>*4_gatcG9~AilqNhLUWX&nTB(a;)WG{j4Af`$at4myr2Tt~aNveAT@rPU
z+7Ooj=Id+FKI%NxJ7oiICzzDc3`U0ydq8So6khvO3_TC|(MZFbt|E7*{<~eEec4rd
zh{Wv`8|+32A9ec3lt47^*iFxlZ>P2xDq!d1CZ_rLRMb&RL%}<Q>%6`c9Z!5@(sD`E
zddni{xk>In?<Nw?{*fReZwzjtA*kwA1}<%>v<&gXvrEQsSdAnW+`bf(-%KMu;Cx-t
zm_?MBrWh)QrxSgiPfZS&L%zT+S{o#&wXMM*{v;Y-Kj6h^x{<aE%ElhqeDWR>@zxYI
zWWVyIVEZndJXD2^JxL&aH;n1SZHL@e_8=VC03P?A(_inc$FJ)uK>K|zHCOX8CD6zX
zPpca!BNq>fk?2E9%o3%$!XoLDBTXpXa!=%~l*P_h+~C%bL#bKLgW#}4yuENK49q)C
z`9IahlBzDMFRq3PKX98GD=P~E`<yWNs12wXTOy-pDlOyJK_#45M2SU{>CQ!M^qt4r
z_+icpC_a^p`z9B|?8Gf}`z68?;c7r-70tx97hIs+o(WUDWWng+YFhZrc4CXUV?tLr
z#FS=}Jy`_@%tcYi#SxXI6Cl}u#9i&Ng{RjaQJP$H;7a>eoLL$Og+un#m2+ueP@aSC
zJbS^qw2s<BcwhbRuLiH<8PuNa6`;+VPTTK@q3FJ3+L{^#9yLxNp|TVWJZ<33{;`B7
zqzZ1d&L;P3m+68z^U$PVFBm%)g0%i=dhDlk)GWVCFVXXW3!e<J<zx`Hy=>-m@@H_~
zWf)Ps<y-OQCKAijITHI{xx!6ZS#q~g269vkOx%zMwM;u0JiQS17Aj$BQVv*h<zuI&
z1@$1Z5gW{_30JKNGx?Pd<ORjiB?4QhMDbKQL2L!Mz4oGnE5tDKN(%PJO$GfB4eEYw
z9-f{Xi)W5CVdZ)fdoLk}XZUxb<N+=C;^~by4~+r8<3pVBe1S0g#AWLBn;Z!3DIr#d
z53r3kGJW`hVI!{tszi~RbWf&o<_4Eh<AWSv<x*Ez)MkW5{Pu+Zrhx+UXfP;U4WA@$
zQ{0lvAo1%1nss9jc7;__^)DOgR-;(-J9CC!9K!~w%cC*ACJir~5ynV?WiaVhIyf&3
z$J3|<*IKqi$>4ixU?v~^Zq_CG9*H3t=aP<RC6{B*MZo_0SUfn2JV#?fFfv{k28vDS
zc2+TJ=_Wx=s5a$tqMjOkbt>u|8c*&_ztAdrNvJ<^1hapw8nlzR+`)x+Xf?f1sQOIy
zgoPuM%PJbUg}p#!WiO{^%>p_jNtxVz3p3}JkvlgD2Z)iYfc{>7qBm?%VA~kF-f<>|
z+KoW-hA1qQp2~SLR|Ji=uBHwuEJaDK7#ycC3(Mka@tn?ROh_{aQ|l;rUoVFX%8en+
zc@2D>tc9Z8B6QQNBh+@yO6c0DMrxb!!c*l-bmEvXnCUBtjagOTe54p9PD)Zio`N`D
zhWH=3PH`M}oaZ#D+^4n_)`CFQI(q5yt8`k<R*;&Pf~mq{#20gk-kLiP+M?8`c_c1c
z<<cYSyg?3$-B|)Pm3xuV5z*4Tx{PqKEeARMT$Bo{r`5<aTH=Nbz3niC5B1Anx+@p<
z@6mvZlcgZQISM`*-ywH#KGeCj8n6#iF#kvnmc7b=<Of<{zAu?-SY!tck$T{`Y7(g-
z(Lj$O{%Kbzhb0j_=y-WLZuuMwLK<t(v|tr(ik|}m<NE1t+6pI=+Eh}Px6wgf`S{Y9
z<lYF%p+T87h`M^?=3+KfZ+AntghP~mTnF_zD-pO3#e-NQxx)^##W(2zIOlmG9I<*p
zhwT4KoiEr1=k6`Vb>`Nv>4pWI)^P+m!zj>S->LfnM4-hm7hBbJprmyi_$#_#vZo|Y
znidK6C4rd8-a#*#Iu@R=ve6}Cka8~x#hY{mWk2r<<u!DXDmzq%N^;@YsbhjtZES2w
zS3w`bAJRqQzNp}2Y_A%m7mt?5n_KQs8~U@+G`bEH0urHU)gq8im`_Wk_fvhTjdZ#q
z6V-05$5EYq)PpZ1Ch6Hn&h&%h;apz=PBPtsS-ix5RL$TNX4BNc{l`gN6<O%*^M<a*
ze0ui8dir^k6DT;TqdHk{`F&3*CE?Am$9fN@&gQ2?UVDJd$gh-cye14}kD|*>Z7I?D
zP0Uf%HMpZp5pPu#!<L7;31`GIT&$T6q7w`;mfRmc*fkI9h^;#HTq0i090!iqJ!y;H
z9r&Jm7fO7Np_hcSU~Hj0<SE{xRT<)VMBNRFq-{Y$G85M&+ry6NNO;w!hATfQ0v)oK
zj#XyE+oV`hCt(({*IwXoOebKlBo7R3twp~%O7zI1J3%mE8*;a@z-*r^Y&e+%vE&Rc
z^F|L>p3KHLO<vl<ClPf_;xTYXJ|%OEg$36)z`X<MID7M6I{TywIK8NaV5KqOGt&>+
zwiIH;`e<^7pUw0y=%<gLuOPhVU+GxiVqiE$={)J(jmGH$*tNfy>bzBoL#=|?HpYor
zy4(tL=9HjYvjNR5!vGt@bWDE90};34&~f}R&a?J%I3}Kjbj>>aTv-9H8nS5p)vGv;
zt)Zl*6%V@JNx=G(2beuO2AO{EweirpESTz82XWeo<V-sQB(y~FlkQf^Z+<^5Jf#Ne
zJYwO!Q60G5xJNs_*g*Tu;HKB)@uJ(}S=4$e8XR9*QglZXwJ}Qtr^MW*Cv07ea(n9_
z(CHS{kU0hPo0n6{QiZT3m?E6USE=V;0zq*}2xyE^!VQ}y<D{+g&~CgEy6Gg)(+?A_
zm&aP5((;;4R69xYEq4Ot?ODj~zo%<9huDu(CsG58o>M(lXDBYU?eMBIhCZ|VEoEM^
z6WL<%OzR`vRQaP5loaOz<rO|iHKv*4>D8-YOY&C8Tb)HeA~6n<<whXtyp`ki`4pw*
z8;&&tbs*z-hnn|<2Y2&t1(RYC95AWkFwIDffX^<pi`-qR&|)ex=T$vbUl&66ZhS)P
z*>A!eZ3W_o2%^{_*-Uou%oaEAR$A655puY@sJ8ALPOS(Vqi>NMzvw~T8EvF~j&%Yx
zU@Q#XumfKcA246iPINbFM0aCyA5^vz3~$z>&@>I0pqGuwN2-x~do&&p-GS0ymg6kT
z7}zi*g@KF79(Gg5?v|Ah=I8?sSCr6rhYfzQ+=&CPxaf^TDIj^(9LENwV6TD*h+ewS
z^i#UcJo#0Dc8JL$=cP1^yI)JQ$C{#F)G@l&$qpy3Hp9ib1(<H*3k4r7p#H`#6jCyQ
zgm+!^Qq9|R>+t{_k$jDIZY~6W*;$~oECF)wnBsnQl3$q>2`0ps^ZtAQw$ftYvi33U
zf7lN!B*O80W+~bi9ip?FLy#dOMfG^>VwPuk;?~6l_@SW)XBx@D;9Clr)@AsyGa2Hl
z?ozLt$Drs_q*GUxVt#5m?9(A}q`~gGSz#IASFoDSyBJ05H*M8TwMwU?)ZS1_+50K$
z1+tWh)L!^_7%)yfoysPZU$MF6P~v`x=AJPDw8~Rqt|vd*x-J4nVml{wFq5M!9Em0u
z(oymY$;m%LViY<R=<?OcbZDytdR!L(HaAn(@k|a=>I;XO&RGBq4Xc)Ni@kJLiYQ%n
zwhVWyOhEfDr|32x8N91sh5aQ0=vQ}=(oZ?d>Er6Av+K=ZS;1nQmc@k&TPq=mD;L|D
zBeAc9L-DU7@m_044BfkCI;yA=W^W;J%6?(^ZiPSUd-X8;4Rz3Zz?H71tFa-B0qo1-
zOciHe>ZI;ws>gXdb0n)ACTWm-%`dk&4D%eO<9QFJPo+7WOqHOTE}W&UBPP){0m7JC
z^p!cYOAf-Xj>d)v!uRF1g>$^44Ek)hV|m^@T7c6>^5-_wd{Y|GPp<*vmfxXN;$-M0
zYkZ-nY6UHEJBgYSJ`K)I2Iz7V#cBJ<dHQA=j*kn1dc~W}k;KO6A@{~(N0>v;w)M1V
zLpyW-y3w%6VH0X+6wzYF<i6(2Xt*lA4aM$C;gNARFt8zyS#OrlyxL$21A6D_IMqfH
zPwxdmgfFA<{5Euq*hdHMi>51grK7}I!XaCs$vIfLnC_4vHL{njft+o-$i0^or(vWl
z6!WJ7)q0xJ$HOgkFXCaSiqzMuyF&9fBtY%_`M{%42c3r=Gu0dGV624<YVXaVCHFp{
zZ&uf%_H+;W&^j_^aUF)GEQXM<Y1E>tQIzPkX*k1I8rO*{WAbxjsMu_aGBsQzQgI*k
zdMppIA<t7H%L(tzV^3gr1amxwdT3@MIeWPUp`Tg+{lG;OY~C2*>bkwKCpQR_g()aE
z>!B11%aNy>a3NKUr+Pj8z({fnRWsiguc)O$|9v9}n9zirgGK0=ltR@?PK82Q6OKt;
z9_U>rc@t@694{$ZOpG3)M!oK!dE&|m|8g~~?N$Iqd6JJJvY6gxX@f5b&u1xL8MGDA
zy8CMDfx5Vhe!w|Sd-JDad`AI3tBu3vgP9QeP7B><#^YB`4!qhENB4YqOgXG3=g_k=
z=+~!~U>)HJtv8)aYE(@}uc%Y>N$2_0)-~Li-%tS3Bd;*~$1THElh$H=xGP;HlLiAp
zvXm_t!>wfv_-2|1<jp9cq{Rg};mUDzGXGlG#L<GheVJ6G{|%b4H$b<xg5+l<Izs1y
z`IPJj!VTR%79V)0P%n`1(mJNmVwaMrm-8Z!*V_`c7kDB^b~mmnBOE{Po9OeMIjCp4
z4FfF?(>g9;D3E7HoiC1oqHHx#$-Ks#IgRXT*H!RIF&3}gC0xMsHt5!8?x5do$)Zx1
zPo`ncUf|cMgopcz;VneLlMzWUR@Vex>sg_v%`VvOx&w_?1mf~zg}^(s2ae}&14hpg
z`nEU=&u@qV%q8(I?ptx55E~as*`ncUQk!#i4<&rN3KNv>(Wy02RDN<jaHo;{lEeba
z?%5RVJ8^~zyDAT#k1s%h2wy7obPm&%oMG(p$e!3L3a^7>Np4LoR1LbJ?0PGbw^)GI
z8s4;8$aEA~IfpVounxznX+WWzIWwn0pS~FSi7t4w40xJ}ZBOd>Y;TMP%3Tag9v`RY
z3Z0|dHVJ~@jcW96C;~rLHnUt}EuJ~rN)N|&o2S+=`${#ip5;XOj3rzuduy?6vjCGN
zT>zTFtD&>?G$&bhA=I`<qg<dJZam|NDg^?_nDdy~m{W)G@|*Eya}NEgf)6*$E5m%%
zN%(1X14@Uipc$iW>3hXl*mH9}GpV@|T!1{6P3}`o`Ze?bwTOCWvxG9@51<N+ZQ#o1
zbco!#8Ez_Z!>ckSZ0TmB9Cs=>G~@zX!IqMl<c(IA70@>E3}y6$L5ofkrF-tkP-*^E
zcy^^FR(^><OUqt*Wip9@n7jk(daB{biL12s#u&<@j0@5Zra`$Lhn^vn3gWl(AhTdJ
z@N6(eY3mKt=sBb=PA(s(?EV%~_wkmllwmmrOOc!k-e~GRa}65iMS@vu6X9T<gI7~k
zU@et_4TS4Gb)72q%aQvJjS<Wa4mXy=3@W}R36x~LK)^eVW-EI!CvZgYLe*0I;+>D?
zCj0^yxm~X@*bK(-%iM%#1cM<UKz{nuKVb$h!`s)z)l1pMDbPt-UBzGl7sFQm+63Bs
zNRt{2@dUnG8jyb{7bdMFwF_1=py==@ILVt&OWbOrZ1}iv->Eq4Y}t){!u&W#KLF)Y
z;(+r$7UYR9D7kn8Of-vy@iP{J<1t0%Me|v3!MqU?T;|{d&njx>q80Fvl>!B4lkibP
zEie+eX~)w^)RFOP!0c`{u9hx_wjF|$-_~`s!u>pKvA9MD2iU=b)Lg1#vMEOH-h!iQ
z-qF)?iji@+O!u<ZN!n>qCUjmNp!mzj5WCnE9c@+7`tV$Omzf~GU$++>%VKDs9#iN!
zrc76c*WkO1G+Mp27~=Y;k{XzHoZPxH7?vOV%&Zp-E+Q`S`uFlv{U$#mbiMzVU&Hb%
zKb^Gyf0v)yclrGZrvH!qI4nQwm~7$yU4H8SUw#BD|6P7_|Ns6ay6eC1UyUE|H$0B}
zo$0Q_51!%IKj~5U;W?yfut0}l`(OV(P3(u8F)pMwb2r@6EFyMPzD|gN49K6U#9p=Y
z$Vhf;u~fK4@^<!7;R44Xb)yhOD-hm>j`z6o={96|wt*r~9?o+)0NP4daZgqZbnUwV
zLdChHPGTqoKDiB&O~n83VmT#kED1SZ%IK4q-@@$3>DZ|~1KV%Jg8fYcFc@(O>&;7`
zORtC2GU$hr8p6Lc-+<~lbc6F?-yM{uZ%~$J*PxMWEN#7YBY3Ehv!?iNDtdAp9`w77
zj)f(JFE;~jnmq=~O}pTXs65<j?}2)*lT=>_;j2b*a1_|e6y3$7>P6$h<MLiCy!h_h
z^FKd)=Cc1?e`tMMzqY;>dU)-&3-Zn;h67{x_4jKh!qET2+9~;YdKoOB{`=yQMQ3w6
zJ2(XT`a8QhI4FHHPn2A|{Jn)1EwwN;9ky14SPX?N5%Si<pbGSNa`$ofS<m8%w;oS^
zc5n}HSQq5w<*?St*>i)Fzl(#LudgSI+ss;!{MFTat*eX6aC4rY-owi&)YU&gXz{O2
zmJa_c#2PXDR&HGJZro;Wf9&*af=WI<!!r!>at%-l@N@DHaQ(f%5R3QQEcwX$Zg6#9
z?-t17bN;25Lr|c*SAdX%zw6o{cP|%*Kv$mtUw?;SH6a$iyu39(=@CF$I<0qQ3H;JU
z$iU0jDR9LvzZ7B#S`XVMDl8#u9`e=MFNigA31cHSBXBuulnF!Lv|i3cG*I3|)PyTA
z&e_@5%gfa{(B0Q3Kxmo2i>tq@%OZE@Kp~c}f<>SOOXQai7qpf&n!H4<g~*6&eFFo1
zy&b&0{8(fDPEe*nUV-l4gk<jO@9gRq=<n+z#2Wj%xW3J8c!+U-7~&_fIs5tqk|iXf
zUg63La`g#xcOnZc_WLkyLjTm=i*$AJ7h3wu7nb<9etd33jBWyMzlcedHQsHci9V6d
z1R@&~X%lWD8t!4yxcwpl3F~2quXT3{U`_m?ry5I=^pqw&Cw=Ql`u*NZ>NmMfb{qds
zQ(Ws5=<Md>OAx$Z_&YQ5ovMovYsz0tRh>2U`x{FC{D#xqew^K0*7Wa<XZ+k)#(EU_
zO#VSW!<)mw+tta3CHuo?(_qc~-dpbX-tpfhB5ytN550q({8<V=bk<}ke(yZ%=gzaO
z$C1uHLEdB!lF#cJ;6OI1v!|;IYtAo&e24T>U!V2AAzJAhqP1AcWLz#Ha1}CcpnTx3
z6H@(6*lKQ)|0HZD0`y>~VXzBXxGuF4VyXX*Bm(cbZd3oMw~N28pKnm0(4y~OSsFj|
z)^wZxPrZM^qLRa}+RNL?XT6uJgUWB{)cT7h*Jf$EjWlI&lh3R(EK5;>w_%X}4m90A
zt-B73NrWUyI#Ay_{D4aMO;GdPX8zM;e?#R@^Hu#3r+R;yG&fmXRTY;0_c_e}c@7Ky
zFyU{lRapzaw_fyf>&0%r1M^#JHI~8m)`q{g{uKyDZoeb(TWfWe@%PqCer|2z_8oF=
zO4i?6&t)wot%m_-N?QK`U}nF`ZJC?mKgsQPfc+#mweOHK{{b}1-IV`nyzh|vX)yKQ
zL1^*Acq<5uEy>I3+jAn&*YBH}UipXj_~n`T%k%7~aaFa2{#tXd`u$zp{+-7Dd2Fp8
zl(_XDM*J5_{HNjO{<d*!zK!&|TK~3|8Z28k^4K%~_Smx{%W2BsGBqd4o&5KQ?PLFE
zG-|R|5AV2P5Uly7*AF|$;Ws2W{!6|7)5JA?+(Ax%HG3`A+Fxhk{1>xu`SYx`S+2jf
zU-uX7*Z+AJ>9E{>ZSVev_P_2Uk3a1rH5HcUukF45qP_Q@c9NPZ%jf$}@+Hmxu#^0L
zlOOBfs_LHvsQG;-`Tvd}g7|=crM!O{OzZcZ6!^n<LH|jeYyXDI;J;W`H8s|TVL7r1
zx<ZIxOxVLn8;0f|AQ}3n#Z^;hg%M#6_u2TZ&kukM|4pC~|5c^y{0PWRe>L&Btj*u&
z!2Wp-TmC#_4OZm$)=@vVj{XxE)ihZ#-&=3}z4fn9jQukdwODcATgU(0I^oYi)MjlX
zt%rAWB5C~xASV4Lx8(n<(p6QyBQfO%<fQ&Lovy0-I~3D?7%<&U`wt5ICnNc9EDKe&
z->{hRS4*nH${fbxumPJjEJ=L=F@nfHATj$->#DB8+D@dYPkQ8h>+u5;cl;*Noo>v3
zn(sdt7wSJkG50U#t**+-`#y>MpC?i9=P9ePc71PM_;c&se}baAI;-e=>*C*A{|d#D
zKS5D_F01r=>pefWF8dP{)iqe<r1dZqD@f}<K(X>S!Bx5G|C8YU!MaeL`yGnaKOm>Z
zZQ(x+_;==os>bh7to>oYIyZxV8u0I+sQDWd>;Gy=HCcPV3$kHYkX!^}!+8AN%5D7f
zvTCuKz6*2Tw;n$rasO`uJ>X{iPt*Nfph7F$*L(ZAlatrMA3kq0d9t*S7l*uPrD4a1
zoAB`w5uyyn@N3xdAx17we!e}K&QSmE_;4mp3FTjneh1}m=9luY%R<R7)L?<~|LP2h
zusG#$R7Diu&gylGa!%JjpdaSpD>$cAYaHM8DRMJy6K88ZyR$0&T8XK0`%v}{ttho~
z{gMS*FK*PFj^}@K!%t|H79&pl($dsvt7jaMOwTvGv`7A&bg_+sNpq}t$r6^FqUUVM
zbE>Y_Umgog<}Xdo-dwY^Lgs$_I9=uV%t2aXhGc0Y&)V7c**T5AOu0>uWhbq&TUMF4
zoSV0<Hg~_UR;d=9cCK8_)_8)I)z^>pCwMb9njH)|cvZ2CTd4F_@O)GH-7U38w#SaT
z&v?RhGG{klbZyt(RMQjXQWI!$%Ak{X%#o)RA6JK(?%FKu<8m{rV5fblv*EFk?JG8X
zVuVOO8b6wEcH+l}=cXHq?(dITkuuKUy-U&>>RF+rbN&}|mf|AzP<ho;_4J@pkHN0#
zk*@E`BIdo*GtuRp;5FjZnK0iMFM?NJd{}tkqV)WZHNiKpht{-YUXdxB%I;`=bid(A
z#i_Zr3p#IYw!eMjR`)k&XZP|nJ#XSr8FpCw{d&)4==_WIR&pUeo%Igi++Z62>&pM)
z@x=67K9^L)PBgmMUGoaxEF64v@^#18EoNQ|1>AY~GS;UWT=0*Yt;I6#4%cC6JxiYS
zU_FEDUcSD7;4$U^-@zKLWm*2bS<`J^zW;je#Yx?|r5kRmB<7cWbgPOV9h{r;va$Ee
z^9;j-&HM0e>cr$(9elZdqaSh=Oqi>zub*y~!{)EJw$1&>$ohp5P5jTx<{iILQh0Q8
zxW1jRR0eGlI(|%w(uqYD-u)-iHYG)*yGm#Kjrb&=;(2{<<Nk(_M`6sfDMrt4oC(}E
zCUhrDf9$=|_+^spnX$*~)#HwgKbahwIy0$TSAK!ncJ+Hfr%Y@a1-)w+=_S5T77wls
zd6V?|LwjZ5z3EF*2BtPXT<5IfTl@O#l=$gV?KUor#s`L!_vNSZoC$b;HR4XM!NPvN
zLm}ERXZP5@_$qkK*Z0%-Zm;guse4r1FXhcV8WyoS`-O;M^ZLewNr|j0U1xH%IJLgP
zL1zZ7R{Nyb-0pnXn0HpEbAWH;ij0vfj@tAEJyhEt=Jepc%H8GGe!2PnYqT?8EaZ`l
z%%37PxbWDh58~Qfrf!FgwRy{SUpQ}7IHPN#Tj87)ZGv0YZ09>)S*XyNd$C7HylF(3
zC!gdSl1fn*nZb>ETOw3=yicZ7o;u@}vG}l3c~7SPt}{I^CWR}NDk*x{7Aot$-j~@@
zQl6lHC$OJ0(argjIaC%y_6SaOq&oNK%Ns4L^=DbtG)!4#S+AHH!JWGz2NG_Hd9C5~
z-r{4uvs<CGR?%Z+p=^GO)sodKF6Zg%m#wVUpX6*ci?v_c;}F3!zd*~zY3`=v$P_>v
zi+_)A8OF4K!uR*%<giAY{Li>Xi;iLaMZcYuo#*1WSO|D6<V!)rcv^GJbeGV{+fJre
z84@4KDYHC@B__%cirqVIO*D{yE_G9jH@oyQ;cVEFJ$vayv2~3j8SS<{p;1$6oJaN6
zM?7y?Twb}n!%iv4g*{QMy?n0B<f=%E>YlY<+r3YDIKO!raV5-s?y4JebLvCvr%s(X
z)Sj?YwL7BXYSZI$tjot&NL{?!6*qft<ms+sW>S^?Qd6DE%a<fbO`dgThM2xz3e&l?
zCjrkpMs-ijJ^b+O?!2HCW%qkiSDI5drdpXfAJV%r>EeVXD+{c~uQq+Yvg37caI?({
z)w6>~9pmdPq<6$=E?qmM8_QoS)my)ptFhpXN1<>xjySeV#<1y8(ahN|O+UI<-00zF
zD4VEV%r<P<)LAroCC|jUF0b4iPNXdip4MVCJ94l={fS^r+f}>buON}+u{k+frQ0y9
zDKP1R%rn_lo8ymfQ2glE*V*HH!RXQC1!FqW4!`{B@@(QNwRa<j7uwt3wKSM0vcP{-
zWJ(OBf3?tmRAq|)yyQPz)nK}*?<Ez96B$9iBi?*j89(xbzjaM*T(Cf@rlU&eItv9`
zrIQEaO)l!1<sF=a(?$&Pvh=0i3x$suaqr21tlv(ReUS@Ayez~z-h6#@=f2@4b=U2}
zN42$mcBAGz9WTAZ=eui;mZh*Z`hM+8Q&HT1TQl1I&{dw0f=+F&DR*I4RP1hb0r~BM
z+J#EG+@lw4zZa3->6N_9tt^>m?AwY&307aQ{@g32K5gyW_nH(5jF4Q%PTM`+ve?P>
z(bcm|trn+ngSFSjt_U97J}*&5I!}Ar2II-*n|8Xa=jq*`KR0vD$tPF%jKYl%%((aJ
z)U2J1iS5b!+RGw%_DpC{)wt?$cJi}hf+NolG&j2#Zg;$G>bq_75s5cZaW-qa=V9U|
z&TNs3Lj7l7`ke2=uc40$l)19^t}Ls(wQe=XU`5`(wy}--16W7epY=}NE!+F_N%w;x
zr6h&(Z#;!DLTlE;R@vUKtoPf(AKyz5TgfYVuKZ19Ehfyk^Y}??Am6-zz2{|z#<|B=
zWla!QwCiMV9ed3n>PshFGTBflSmr2bmhwg2Z~VFWGfdAPZ5TVUQ{Q2Ukdl0$y>x5N
zi3#FiHqxz`C$@{N-J`|Fzej7G$VjPuc_+4uhcUSh&0^F&cqtxpds$m0gWh>!mG}o0
zuFbO<bq`-^#LP2o+cmQ7Noe@$-F3Y8CWVGC-0e$-*sRR(?R{w)vuSCY-pDq)&~TI8
z4tq9y;VzZ0>h~SSs?wQK_H?3@jLH4T-zz0$#{9p=>bE13vX9TOwKaVGqWsJC%QuS)
z{s*xBfZShxfYh@-N2NuhUxX%DneBZr$jH8Emtv)ii^Y$ui(jtd`7Cnq!$Y~3f;ql9
zldU7iY^n1$@Qk1xT2J+|j|OsghG}_eE<IVKVShJxQ_-Qi$J7yr`vJkv*;f5hGv6JT
zKepJ|D}46h#kuhduy-*>Dzf10V2I@YFxfD%FO71`E=1dDT%Eg3M2hv=eneF6>644-
zYTK&)l@eF33T;cbSZO|4d7=q}owCvFg)UEcc<cTZ#dR&K${$zml5||UW5%m#A>n2l
z5+O+YGtaP;mtPXybdor1sqb+te{IU@Fvk3^QvRcI5dKl#cxf5g|6+`RCc6VlpTKN^
zkyMA1hPUH0#x(CX$3bojg)>sA58I#LyZUlh*R8%JyH@^Pfo4UfS;4F2Y&JFQdV1$l
z<EOg#;3#SBFY^*cpLqAw<&*eVi4?Qeh{N}f7GF9)vrl@L$WorO5e&UK9dnH0^H_qJ
z!EB>+d1K=dIV9EOwA)N}&$z<vN>*z9BJ79B&)n0`oYHA*tDuaxWIDE_#@}Bu_QI78
zvqN__z8-uwGTT(5)V?5gPyDgT+jbO<ysK3`v)Q$@`^Jlnft7<M>6bdaq&YJilEtr2
z%AUD@!D@o>agR5qj$X6fe?fE`b!qzCIj`Q1|7a|`Ezx-WVb$W73h&D0j~X6p2unKO
zC39rrsYhqr=a1dLeaz;{O0S?vD~f_D)E!i8!V1HBwW>bfUNGV9;T37?Wv2+Qdg(ov
z(~*7tAWzrB0;P&5rw272@lPfQm`;*?!o&aZsHe(YX5WtO2lE>8PU+s6^zPNimdw$+
z1uwh^+O&SCZAYw%QKi?lRo%yT$_3nsITblFp*b+pZo*E(e7(uN1}g0NPE%GLPI(tP
zV~%<~SFRtA&ykfHzE(q;UPmWOWR@Lyx0$1|%u)2Khq&K?xH}pbb4`SXn)=po4Wr1z
zAbY|&f}lI(p6u`Usv2YLzeLe*cYfcttH_V=vCYb`TNb4vvGv7g@w2rCYLi5m>3uOT
z{XOp8^{{Cblb>YczFwjK`~k5iU2Ur4(x()>*jVVh<c!hs!U-SNSk$T=**bc<!o;8>
zv!WSq7NzGYDvo3XMy>R0oRsAr)>JBUDx_UYeo>lewc=?ZBa?RN=`WvWoE))#!PB{$
zyQ`M^9a(ARR5K%Q<l;*28hhcJhix}<Wv0Fy&&VAUkQ^}N%DZl2*`j-;6I({D*)?st
z;%)zF0pY98PI-OCq-E)*UGc{hKdQWbw?pYpi`4vGGt$;7FNt~aWN(LRtID`~S)IF+
z3=Tua#Rr3#X8YDS_*~ljAXxG3*QNf!?&)Qx3)8$8tjH9yxpm5EzKvwTxc10d5&J5R
z#p=0~$O^nvD^uGh7*!{V7Hb3^7e2YM%iYbxagW}Hg|7uO=f04f7Qt7x?viG%t)_cp
zyz70<Np|C#Y#evoS=Z{iCO0&@*}K<P^O~1HL6mplLch`53>M9J&{yVTo3n@2sNkmb
z=_q$^(0$*!M>~6M8aH*@FO@vI?fyDrD}~U92S4x)1K_;Kb;A?_fY;vv@Ymsl6odai
z0N{_Bgl8BEY;J1_qOuH@S(whZbXa9<Y2>iN(!$ux(7|&4f~AXD3~TaN28%0(#cjg)
zexvT;9;oEt=kDz3<tp^cow~bEpqG09x#i}OUoGz%Xko%IVZ^dVeEUMai(&D`emm^%
zeTm>f#v6Y9J+_tq?(p~1bLhK&WY|vrkLN$rWvV_$RYu($5}KYNd@zi?<rVLomcWrh
z^Y>R+$1R<h8U5;gmb})<MS~$59-O^>Okiqqr{CL&*-!UQUB<9|vGwd$K^+g_2?q<N
z_W5|~j`l0ai!c2m;CuS&JY#VWO5*M$1wJLChlkF^C(STC5NSH?oyYy6{nt0bj`v?w
z=su0;jn_9nJH@N^#Wwy*#Qc$(eh&l3;N*^x3Inu9O^M?4ea5Nf^aG1Ix6LmzGPPW^
zCe>~Js@H$_*=IMyyC)A#ASTHh?kRgk4w^RjH9bl=@Z_1xTmN<1B~kC*6ge&3QDMAn
z^Dv-<61h+I5Q#_sU!F^UbL9Sess9m8|55&!Vd6VZRV*gI$@IqE<`H|HCXP=sE}JfR
zZ})h4%jayd$bM5-9d(iW$L?>iZE)7wJn4L5d1RMnR`o_#hmV_1oEUtexrv*h{aMp2
zS?<9-ufdq`U5V#gH_gnrpE0zIFOVhAbLq%*nRiE3pE#O5%rhx(inY9Gc`C6z_0%l>
z^|NP<5DOQ2efrUocH|1Y*~nvhiTCp<saL1>=wyx^uV|N0FMZJE@|#zcH(pG|kk8?J
zo@(r~ap~^z7P$U^Plp<{^j>t!OsxlVs$a0FN*@yo#SwR7#;09x_gL8Fck@zDfcMbd
zj?G@T4<Gtcyvg!$gX2YJ$mxa$Qk%>t&XBvjcb%D1*W8)B`$qQeD^y_0FL-Ov^+D}a
z`PD3e@vkmgF)Tk7-_>o6s(a48OVG}8Af!p;&heoef#K&s<~CnojHK}Ww#u@S(72ON
z?*<069j?0T;OO$^WuyK*|MuA#rl=?IP%9~OLt6a6!##(Znjf8w7;!PoP`Uisn6sA_
z`>kzxFnxx7*x~$?Gf~f7x4&2A7Rc;%%0FUs`@mpjq-}Z9{?6dZ8~VE{?kjaK{3u{4
zH^aqbNMOqY@3KY9I*#od%$-*nYX9a7*~I(;1-r(tJ4B$>|9^R4{mqH@CwTqt&;A$Z
zUvG2pJ)$Cd>%F+#%+$oiTb0uKI(OUMh!M8R&ajV*ah|a`^N_=?anVr&YwvI#ao(<}
zDt$XJZ6Pz<PHW|@-j3kq_xG>i5@2PSB};UMPU~Tmy%xR_D&WcY#K3sh^E+oe*WS@H
zQj#n_W0NQ^y!^%MU4fQ6wG*~#7EL&O>P_S=%5A5JhSY_SCu0}boo<o(EVOLal6S`|
z8}g+W-x+(=y6RNIU{Q)~nZvV(Ggr&;c`8?Y4G=ypS+Q)9OwHssopy$KNrtzsDQtCI
z%@jC1bT;X(v&f~P>cb`M&@mb&D;Oor6Jtz8udMDGAN*pd@m%J`n_O-al?!=g9}OOx
zHao*VCm`eY#60y~cPmZ{yVj-+oLU+zeey`}?wqrSY^IzYuV}51d${cKl!;G6_c;{I
zExC7I;`tljQOQd8f*WF|>8cys{$jSyKI(d0kE~x4IUxMKQl=Oa{^j%LkL&kG4KeOd
zrpLDf0A`*awqH*)dOf+Cct>x<TDErfQID4K-P5C+xS~#tG%9uLWfT8fyxoc9aT|)Y
zWTFT8{T4}yuKHq>SF*GwxU$Kt;daL7SD!Z(6wKMY?}B$n!<*YdH}jR|-F(q{_ys5b
z^MN|nhV^fIvKkamX3p6cebBgP;ez`kMl2eY!%(}(7Px18H$%7Zo;+7Xf2@?>Rh|ph
z{1%t1OjA`%4@|!&d}{qp#vEs5#sc;iqmZ4(^FuTwSM8sa;yHTPyuLBdc&$%r2WBQc
zm^fqM<CKFsClw~j%(!NIgYV|{m#Xm*hI5Kt=3k!v=wxL4IHi=Af}y;NAL;vOsWljC
zpO~F^uz2MJ&2=-bMGHzkn((Py6?(^h@K5+)5muZkl+T8K4u4O~D}~##f^*)yOMTO?
zE%P$9KW<!#PfR<yd3})hD3j1Iv;6V)L}BUsTDF(oX#@sN3E4HlxB7O$*1F=uHfG^%
zbK7(^`$k>yxgtF4&b@onrK}8Y-(#PZtNE&LsN;4iHK*@VPD0Td&z*yfMOlxMyY;6|
zc%N22(sp&Ven&@#)g=#c!!wh1s=hKv{Fozn)~0B3P{m-yf*^j|&LvkKQac{rZiuSp
z-d-rY{jQMxS!ab5&rthV*FODOmwXQDovvu%p7YjMA!dWc_Oo@Zy32j{)YeBS)VXb{
z^Bnj3*5%Kd_g_bBs(Ui-HD3O#bN{vHrn*VvUVpy)8ScM6A!24wcjau;htb2zrJ+=<
zONsz9^&d~J>Wqc|5}dzZQG7Fp_5KQze{oFx+h1KV?=U;2GKzk&>Rs0o(Rfdpcif_5
zQ!WYF`1A)Bo8HgLXnS!fdP!T>Bsse`iJ-1zac<z)+q$DsXEX-gMl<3jSlBk#uUm95
z=Dyu7vnwARii^Z7j7D1LELzJGli1oGH?Dbz|8b&R+R*;Zbf$Fty}ewMHmn=HrpamR
znF}|b%^wpax9C!p@u}+7mYNTigp>szp1XI|q^az{idkimTI1JfyIvSm`{K>j;5h$^
zojp$q{Hu>@xl;$9f6<v;75)Csi25g=?f62qKB{kBHZ1J*g5#r35krCZA2k6mM*SCI
z|4|hP{3dq1{LO#G=xOrg%=y@3x8PozS-I58x~au;78p%d$)VYn3F|~o`j6Y*5|pRS
zVusz%-gRV$!|^S5*wOiMY2MfNS-5lrf10;{sQu2izTK`iC#HTd^JSkm<9ji1V*b|$
zpY5o=$@#5mE5#o>nT+o=TGg{+X|Y6Ne2xlNq&e60xJd#jbGgI`<6!xtfVS(R(IYiP
z7BybwQ(Y0LC-Xcryj0bE{)6N#qK<RN)vnzlO^;Wqe0|mF<KrQW8{9Ex+yCkAJHWC2
z-v1v7A$x_)j3Q+3P4<XFc9~h(gd~)`M`eeS6tXuVyO5E+vv*|ke?O?6hn_w@-*5ll
z^}Bw$p6isW`+Yy>ocq4t=e*AAzR$&yTTK`xgkMO+!fbAL;qn_B+u({#P%WDkJ|3Zd
zcQZD0HIUh=$-~Bl;z?xQa7mvzVK;S*-nds^qyg?_pN=-XTkcn{TAa^WuGJJCOi#^#
zcNkZGe%n%(e*(7zh0ixgQZ}1IAgPMxyA<0Cy0PjPTV;W943`IibhgKaSl2V2TxQeV
zR-tXqNZO!t43a2&AKlMa_7Et|GlKIHq!RKHtz(WYH-f|JJB3-HPv6&vb4IZb5*W-^
zBxPj%jm5+{v^mRH{LC&fnHN41VETq0(o0{&LDVm^QIg%`X+ko-q$Q^E&gh;@eeVt9
z!YNbsSVZ>oEDo=g&A9aXs$?&Ix^2+N+rf0>^=6B^3gyDfh<mO9A3>KZC^bE()#Mwc
z)zeeHBKCQ5<TSN2Oc-&8tR;Nienqutb!yL_=@fT4>doXiQ!D%Qnr^!|T<m)~wNq1E
z&OXX?yBNu19?9Pczy_(_L}^6?7~}3Az5fcpct1D?JaRaAY-#c1fEK*4IK6L!Hv)xC
zf%X3*VE@ZQ)AGzhqRO7AI6%vuUX;tHFN#Mg*!=t24A(;?oy&0p&YLxpO702~;?nbF
z26Q1#&LAPpn4)JacSJlLxc1oXE)EDv-P9VYAjrJpUEP6;y@`DR-J)cNH+GTEI&CYS
z8tE26bDGsh`LXwY>2ZwLR`N*e5=Ze`a|W+O7Yb9eng<(t@ptmMwaGm5VJ%Z%r<PCT
zFL9YoGLNi~sgJOy@6#xfSUf}f&ECoN#<n%{wLzkA>uUqP=nmkK5+=PxPz|^)%OBVH
zJRsbEJyMW<i}}-Ue+0zR@Q)Ta%$T*DcS7?t$_lSrx>Zu2CkhJ+Jx!4eUctNLx|;y*
zYfsYY^F@1shoR7_mvcC`V3EA5___3lR3+v5s1W|BjU6Mi4I@Hx-awo6J416AOZBab
z@Vk8H&);_m^4dbGT?ki}pqYJzeY@r%(<M12Ol;;`r|Q(;DZQD*g?ik4+i?BwU|Lml
zpQ*MXk#4oH6>ebb5AYGB9=wB+BfE`x2Cjy6*pvogIKWZ+Zr*qGW^Dm<Y{JH8_BRNZ
z=*Pubo?6*t3aL$M50clia9@xbdd(i5e&*(S$w$NHJRfDo=0)y8_YqU_HQvZsoKMVt
zx94Beh0<3>*~&dl@S)C+bBL_f6cyY}DvQ{?oG6kq6pT2)?05m*7ei7SxJUeX5=WpU
z%L|_I>XhP;RP8ew0yS_xdi3r@rTP}2$iXRw(J!r2c0=%SV)z4Pi`5)0V|26vw}^CO
z&*g}(I-6HD<QEj2Q8lN0U{^8kHz1ghrn%A*{PbL#r!VEeY}8_!NQ0@W`SXmlZIO1g
zw-%kxM<<3KhxAl1e@ihun?yJYXDDJ58CCvOWk(}+8qPK$EVZ{Vd&a>SKJ#>=C7OyZ
zdwy<T`h-2ag$Mx;hh5{Aa(z3=q+_`WVPAcT$<*g=2ZC+|mb=Bcj2%vv8|rKL-3qJ=
zt&h}sUo4i0zMVG;C<hy#f!|;=9N?x)2!C2Z3WG3DXnY6Tdf=4*Ff-u?w|D=QeHO|F
z<rEq%Jon$fYB55I*PO+BT!quLoiuK!xSdW!L$}7BTj0d_Zk5GP8e~L6U+TpqDs8<r
zH*S1`GX{j|P;ad){7%A?RWmA@rI9F#G^$lHep+WZ;Mxq*CD8~Z_-(r7K%CHx3>)1$
zbuFQpt=!QD{t_f_KU|al5<Nz!I+1>7FB@r8vPXu-aP{V$mO$2&)rM(`n`Z)3@UDf^
zc~`5rJKmCEeCW_XbLSn(pg39|EkCGUrVkfkyFg=hXzrV_m&hf!Pr-XWeFTej_bTk@
zP(_`a9CMZVbDEiUthx!h(_=DG0IP}H-G>J0vK|*r0ZtzNM<<s7k^cAN4C(bmP$T}s
z^AFn6u#e#Lr<#6?nY_h&Vk4CgMY-R+Y!uTtElCW#y+L&1-wd+)s;MnV9)MTx=o|S~
z*vxoVFT&oeMeOt}EG+457!eDe0>!wJ($?ZwTXAeimdV!`#R~-2FV$~2!xwRdKfH4b
z3wwL~soj+n>rd3zdqnB(yw<;}S2q^n!5WEo3Ps?N(^nhn(E$+8eD}kbQ|_)t=uQT0
z=cSQRBN)WhFg00BoSi!6)`H?nU09UANA+B0F`wn|G_&?NPl`xmC88BZyo}$OFGPAH
zpZXG>`=W?nota)Pg&EiUoyEeujl~Tej)ftzJ;lLP+nZOWNcaqHTB8rX_7B0CR%4|l
zFe3_c??L#c-V#oN``r10+5Knw*Jq?pqdHB|f#`NyElCF7tL8s!_GF-Q<Wu)r7}?l^
zxr6r`CIj!_M<I6bssZ7gKX&k=AGKiY;7fn*;9Y?od|3|c9X#mam>qZrKc-4wKBXMx
zWw5{r_TOaF=8E_KxPuo1p`FmKe(nH|xiIs-3nTwBDD!K@2lyMF*vr$3o{p2Z$w_!@
zO^fwWQAF~lkP;;lB))A$=p$WocMNgl3vrlGL75m5UAN!nP`Lr@-))6I1N(P!ZBIO3
zzMEcWkhtDvu%RXCu&Qx}SuSmVETLK1iPiV$3?o$xY4LGN+w+urfsPWL6LQ{(zK7MW
zG2tFhyCv0}e}H6<Qs>u6RY%Qhl}suTb7$nHZdMcN>1VNU&w7L}-Eb~L=^Zw1;{N`C
z(UnELPMMPCEy-#NaWogPL{^60*?}&5v9XOYb(@GPTRzhkX5B!}NNlHIPduhdBP&L?
zOi>^)O@%X6qpOtZ3y-pNF_PrWJ>a@wYbAvkPXnbuhdVEw5ucQ2R@g)khn0DHHh<5e
zm3_(K@WJff$7SR13GNYDgf1vk-tXg7w`!TtQ`cFq7Vi*yB%$TR&V^v8G>59}foHLf
zvZY%Bn8lhuuHNNAKhppHX|eR@S%eG_|3&^_X}A`6|Nl69KAUim3>%xX4>a}R$o~J-
zO^Xri{6G@g`Kup3R?E49M7EJJ3B32b!opMWR3kd2I=Qog2G+QF@?4Ze@-H{`IJa3j
z6s=p9Z=saW*?-q5ukMfS_aJDn4!)#ef@~F5QS4cRe8&6S6*&E5IIQZhN?JRi57$m@
zwqtM)y+KNRho1aOHJbcZl!bUTaVW4oP_NMjd4hIA7TLnw7fsmg^N2A_BWfL-#GK*q
zS<uaIh%EbuJYSLMJ3khFHZ;WatvURB<NDL;vbT}fvdB$BZ1mq(BR{EK4=-^~v?0$Y
zChWctqlbUBPbCeNI6K6&@^rvARM(cLq~C}(SIAZiSFTdv@xVE^J783>PAQkjV5`qA
zQOtE%Nsdr-V9}EHqXam&cC9dfm%=rWQ*2`*Bo?Z<oAYvMW^$5mQGjVct&|^aP{<=Y
zX5H<Mo1Iw;;p_g_#42SUK8Sa{_AmJW7agR&=a!e%YA$*^Q9+N+*Xiw@1_fW0xE>Rn
zQk^l0L`Gw~)`yW#o}|6(pbLD;$)-0MaBhtmCn{U4cRKXj`{zaQ48|{}t>c=k>0gki
zRZy$BH(Da3k6SSW&D`sAGcXnf0DvN$#(e;wRvl&G<^04(hbrx~bE>lv%FB|nrUg6m
z#IpFJLBo==<^{X^iS|ls6aHT^HcBmI+avZ7fE3)AK<%{XJF84!vqs)%l_v#E`p*b}
zIEd+l!RZ$Qa4b5p>_;d1U(pHrUH7|x-3a^$@Ndr6F5E{U!Rb2N+nUM$zDCz9g~c<p
zE;aQ6UiX0Z1FIe-r_f;6y&&dmpBC%y4)zzzvs*1VfV^p0)C)2V#2g>)G~1JyoN-9G
zjZ58dmK3q0J>e;Hd$?u7(z*o4u<&ZZuAn^iYsDVU%saH`nC(Pu2J-JS;Ka@O$cxN*
zu=34bWF@_;)-gdx&*&0ip{+6cGA;Nf5TtLTrG_GKZ~b<?2~a{uTcw<$=0#Km_j|66
zd75d5{g83Hv4*|~SWsa8aY3N~LjF5r3mG#I4o11OU3mZedMHPum;oM<lZx>?>gQ=Y
zh8*#I{vAQEbZZkF;QyFl|Cg7z+qbOD_3c~1E!x|@IWsJmmcw@RA22cb0?40Ut<@)i
z=8`b(GQRi+)jH$Z-xSCex5rjSfO>p&$s`>j&yj~rueO~P6TAEj#L@~Ne==CX6bZLu
z9|j{bBgHTXWwnOjR_&hCmu;)^T&G|5FcfNR4|2mKZ+evR4A*llOz!R#CuZ00(|4rr
z&A;Z5wjlH~2)4-<I$KB+$<Uglr}vnpl=9BdYld+4YL$0XNI0e={*Cl540k!dSK8g<
z$;wgyU_beSG#*Kg>ZrM>E5ge3WMl!;=JSd1ob3*uoAu~y5kDCZX}RUIB&>()=R}Rl
z6rCG+noR#1N8ZKhTy$Gn<#Zz#xzugHgqPY`%x<d^VLas?oeuAj_yQONVmg<!a!Ni1
zGCf07OL?5vZp*@^!u_Gc%d&%XIU2r}iFQv|wQ$lW<ms6^(&7<hT{@d~HyVgl=aTNq
z4Ny$SsjNL8ebQZ)niGUGo*_$s&wa3QxTDHHd4J&=zX!T4ik%|j)^>o|sXSd`i}&VT
zmufAz=fg)C`#czJXFKmND5NN2JRjYe$6vfROp=p06Na+-YI1L%7tyjkc4-K(u$zC}
z9*BUhoY2Y+ck##Emv!HLFaI0&RlgAgz5g#p$ZP8|pUu)B6p@;3Zx3-d%PtqJmAka`
zVPvYUtqt?y)pf<2R4L;!HRsKGNkEtcN>S!nL|DUUpUQ8{W!vyxx^(Y5pD<8|SB-+K
z^($s(%J8NH&xrV#`sSd_awYC3`hrh^oey!ep3(FS6=jk8pc9BGKd3SrC!&j5!v8!J
z`^+arcd5f{Dd+>1n{7k(SI=gWZMDEe2ENj}2C7($ZkrEJMW$NUns<y3@|_K|WMN9#
zp&y}rWZXNQ%wh1gba<Ey95Z#Tb45P`etrFqek}`P`wJru>C{*L)!2jM=`Z{s3XC6+
zTe<0BQqho6Ba_3159%G^1`@tosK1zo!VeS(`K`|z?afs5l8hvI3>$5mb8)RNBV#zM
zp$pQbJeKXcMqTX>Kd2m)Oip#X@^tV!L@`VqU$-1MyxL3W+Q>)Tva!4;c)}C=C*8yE
zs0RV~LAmiU{J=EN9>5O-(SG0uroZq5`F;F=;uwCQu#X=QL*fT^RH~lrQ$P&Bk+CS9
zy&P2N>#CFJ8oZAm=*A-sP&hKd`>yTd2Z_J%gVnn0Or8v6qi)sj@Pq@jSvWzB3oDUJ
z#FOhZi*O2*sCRW7Z$}$j$Ku3aLCcgs+mpCins@7~hNh-xT3&EDkM7!yg|m}%1pFLS
zG&O9`6TLZ$X}0r%mp|R;ovfuR$F!44>t&x@zgKW8nZvSaJC(4?xW>`#Sxd}|@w<h}
zTQ`%OKqQm;(-!lyn`Awp%kNn<J@5+RdQB`it&5TK6HrN_i{^IH^ruN}OUgUW0->1p
z1-;Rrj(&MviOMC-*BymUZ;||FoyJ$0+MSe9nt9u&Z*_7ZfX$T^UjYA~@dHi}(eKUm
z0Q)=O%pMC0%v}3X0Uw5Cng_oJzrP>vzrQEX@|vG6qE+kGEsmgw5wnoP;|?>g8F0<J
zc`J$k<}USvy}NQ|dY<>k$KvNWr|74|Oh1$N3p71TCF1~jr`;%4qzv%R^r(_huAS`@
zyp*~+^7Rb7KZPa9GM1KMO{@F_QyOcW2-lO{#wZueMlT+<LB<k%LS}?mdGD98SF0~G
zNU|vhUtdf{WK#BvxPUVt_>^<AiK@QWgeMH1Ag9MJq-*B`KGD13&2C3DC$@b1cMP&1
zCR2I%k}#l<Ss(vSr07iNI`1osm*H|s(l-%`tQS5~D<qjcAKczQr%jX5r<eeOg7aZe
z_&u*g7)1A<jxuB<pn);oN7qpQ8~G>VpbNhrf@65{Pj1z5v*X-1JF<Toh8N)qccZVI
zf4s1TtD+#>aJlJ0a9&l8UcS_fy)RuOTv&p8EMfNj9YSvp4Nb*V+)K>O3-|R%gTF=P
zFgPYW;AzkopYG?>wq>`=!T{;IP`I;QS8Ba|R^VaPsO4%`#Mf*YrC{G1v!_`s$4chC
zUgJN}(i*)`AHR!rZHRS3!FR8q&X-k*Z1erQz$-P>Q%q?MRk6-%ubh}8HOGwnhZoTZ
zJw9e;<9FgEi3z=tAgYvS{{UA)=JpontsSXPQ?tLHp$c&&6aQBFIXbic0d>xLgH?y8
z@evY>w55CIrmJ7nQQtF_gk?9Jb0zV4%icA~anq96!XY}nFkoO9zc8aAAiqJ}xZWbG
zm2nw=<z??G<%5q*)`J(g9B*6^iAM91c^r~|!z#y?XZ>#GqGBwO_9_Rex_hYKu5E8_
z4rBI<$1$v_=u#%#>St3%9WtEYUUj_)BU~51+4u-vz_o0*3GsDYTF;!%Yu@17>l6Hw
z3-1e+nng;9#qoVD+H)0E+icO5KZbJ4u=!0~?z|k8EtzfkN#vtDH!559WMTKax@UcA
z7TT<SPooLi?;3iAz^~3^R?X*}iyJ59nzo6>@Y<+|S^M~ili~Wtx!BLG!GRxz^ch81
zNy=nvN42$BL!$hu{Uw-2>(8w<ofa{q;uuGWh}FGc!6^|Mw}%7{do2D${Of=-Isy4k
z;Dh(yJEQ^#_3v0Cq<bR$u*?6A{wEBgJz<Yw0L5(kzIWkAACJZy$bbLhe^mnfuP7cu
zNu8ENnfwHDeT}R~b>YJGb!+t3<U@SMmK;l>O9HV3NC-d9sXoQHTQq#it-!Q1TI%ry
zO3mR;K7WnQ_LUZY`C82fk2py?cf(Mw-@8%~^!yyxg-3flw4H(H83>6*7sYGDbG+8L
zrl)pW_(q8;`4`I9#!lUNKwq6GKn(D|hI3jSdG!JQR}Ljzs;{IuHWeLHA+Ers=$9Xe
zT~ygE<5xlF=OmnpW`^cQ&5*i%*XYAlU$2~s!%`@a&(aM;r+IaCdQD|z>)T6$_w&p9
zR#tEB7(4)keaAnp1!X{&{`)b9v@q(Ej6SgbJBlB_JCEOH*MbiB8SyL)6M00X%d-Ra
z$Dr0!fX1$QjF_yr<`=2Xtp?Y*wYBwk+r<I#Ake3>z|b(}LO!wwG=nus-<aa-)`5**
z5}elghKiibGsvzEO*~JPxks_-<|rcAZm=ZXuN%wtP~Jz6AA-^2t))ZsIA&M>7kVtV
zj~?rlPJefjXx0&orYlb7@RKDjMx%)CBlq%pC>5h9V=X#Rxrts^+hHyx{fNAzg_5OL
z&HM~;pfiz6y19VMQqV`<^)C<FG3c~#Wu1_k)gm=3*68%c!g)HI>Zzkwq=e^U?lBlQ
zp!>FzV&#>|vb?sVc1I|+#RK8(85aaD49yf*7**o4Z-3=+A!xQ}TOwQwb|h}Wn|AEN
zc;h4;<>lQ(D$j{@zJzEO;i@5-AQ}~8N)t<WyR(O)Yn$Xff@f!R%r|ACod^0P$lp3t
z3!BTbDNMZgsdxADx12(75MaS5Qq>xlI76-&<z<I*VWYJn4aMX+M~_IaU0<qRN?@@u
z#@q$PSpQ&#j~7md=Eq`xFvv3d%6VV<TY>-H{<30xO5tM}U|WBte~F#=@CP>R$iDB^
z(C0rt@Z+sR{J8Gl;K!R_{P?9Syimy*dfa<i=ve5M>RNY*u9n}<G@%(D*@(U;H@U@I
zkNB3BKY{W&&Z^2Da<nuFF2|h<+$$<MM$?hP-)vtEOZE^Fhvd9?)7YA^Sdi$c^M<Y$
zE9!H$H+xMTQ2(h5>C$N0cNe_8s@A7AvwND``Z>WVVzTPI)6Y3MtU3erb!yL*yqoi`
z`&OSjPUkh{*qNYLasKXQ>J@V2wURUO9|}R0=)xi2)%ueybhpYhXJjwUk&?fiKv)r~
zm*Tg=j+Ejac=2qWP;<BPRPzlB*-dP0f!@ZrE6>yi@v^_NHzsv^pr|w$E?!E$fXJ&o
zM_SGWCW4teI3r3B8?*LaAbS%>U3x$SW8OoSlmuq-p8#aO6Gn<Z&m?59;5u2bfDGJ+
zfB%UegZX0%7|1@vjKzK(GquKEn1>N0@foUqp^r=-k3En9g~y4*xdC@(iB?FHe3wjK
z>7`0Bm_ha_<o-%obL+dWti=48muH5|Yx`$LMyuvcx6^yMUB00>tbGsCrb}~|?N*ZU
zjy^p=%O)=*RPBaLegVy!JPudj4v0=3yCuc74$g=lGr*2qJgMgta+@tZ<v_4YB617M
z_fzDjCxbjw031m*$?C;GU#hvlz4i+|sF}f5lNX8WWv)K{OlD;htjr>LH9dC*Odz{-
zgg~aS#0?P0RD}l24fTU818<(Ql6cE#)^s%;8Heo?lVdSAm_X+8a8u-fKn6cuEHP9F
z5Xfl31hS{!iT+3+>je|Ytnu~<WQD9~Iuag@Kk;Le{QfI%T>Sj}EUy#Z^MlVL&EJD3
z`I_MMxLPc}<~r`GdSiGz{#!nqcEruhx*##FwrM8>b#3kRD3vs>+XF5B&pB`R)yf)w
z!k&9Y91(=~nipV;ytWvxzwFS4o`s_6&Y}63-jY4&1z-ACE0W0P@NZ>{hh6DYrtx$7
z;$Pspx9_l&Z+;gmjV&^=zw&j}&NYJ<$Bx0SyxDQ*>EO(~n3h7n4y$(g#99&OHBP0q
zX8$ibyj*0ZDr;@ig5X8gJGix1BqxB6PYOQ$0Y2u~k5B&+e}cx3i)VGa;&k<{<S7IA
z(-?q1`I_C$DCaW2#4jaTzE}J7qS5+T{=&RgN>srbFQ^)akU_JHg|eh#*Qgks-LqJk
z*%#XY5yQj8M6v0di8NP><MxH<#RMnKJ+c(+8yDw@yK-=V43mH<mXx6{ZiyVEnEf2F
z-WqvI-K}aPOzcQrs%8|=a7MFDSIJZ)Kv{*1ml5LmT;A#Dl9K8$r43?^D#;DS=*E~h
z;H(xu0FSPB@w2Gn69C93CFuOa=t9Pf1OCo$@Z-i?t3`%62l?B7!jG%%gnhU51-l0u
z{YJ>NR=TWW{DN5b%mOo*CKi@bHfEQrc=-f*%bn6<;8^ZR*eU9tT90YFjKqv3L*pjP
zPVRnM$y5LAE42yo#9NhcMySCSwwFIRa=~%8tz4mg@9UDp-NJ%;oBXsl3YIazJ*l!~
zJ$H{b%!nz@Fq|NLI&mfNo!DapPSomP)7#xB{5%|nRN2I#kJ5WY?#Y;u&|9<R(tIi`
zd7r;*Z_3mcuE0-^<Z06s8eaU`OJC+qH`wd5mzP4mnAwos-;=n1!GrR-$i$HQ<6S&Q
z22tN;Dft=hvX_3$@{)OpJYT62ynMJz8Q&E8i-x|;J7Y3UO$@I#7|pRklCw+ZQkR`v
zsSvf!s6Jp&`$QK04Xdcev@H*Z3j2puFno;C15ewW^576Y_BOsM9!u7(vuU8!Ko7vj
zOy-C1G0yew=c%^d%J^>p_!wc70?a!xRpmeDLZS$UkEQJt5w^Ai%}?3u5`f|3v>I3T
z`RhO7<2}`k>i~SbH~XZ8e#b-J#aStpG@@$XffudVECztL(m$ccEGK*`!9tG@HtdZ5
z@=bJ<KlVgemp7olhH)Q2l9wa7RL8NkWfK#wq(v1cz+Xzyb#7SHAS5P=iO?n?j~7JF
zs4^$(!$w(29P=Qp$7q5yIk_`?0-0x0yb@)@o6*aylqCIuibRRGX5+52ly=wSZz$>c
z7q~4G>D$Cm(&;fmCoP59a)}LDzbLl)Ye;%<8s_mKRH|mz3gSzE(v;(Ta0DC{MbC?C
zI64j1rc=f}iILu&`!L_zhY<wEj%(>gQqV$rrW3-mmJMy!_xB6g!kM<80GAf~<9<OF
z#Pt`(8`7WI{^iIYEC5Q|_v<$R{Lz^rhS7M`;DX^Sa+S97T!`}{gaZP~9@YHSg@VTD
zMg0=cUAM6COWtWxGs$(4_iaBp%u4QF6UQK_<=*64Mz2|4aoi?-Kok@xSW-3WQ5BZo
zbBCPR-WGwG_hB<~01iMu(>o<;0K%$%LP<^{^x@{Imu%i#LuRMiKYJ?KM%@p-BD#jV
zG%-dj=N9sg>=Z?XErm-}GI>dAWPnQX7uV$xD@++`f|`yS)fruW`k6!adgA)Vca7^)
z{hOuE7dXLFn^O953mTdRli<*5#*LyDK5l2PtFoZk0wMtA2dz?{u_(1@i!;D`f9(4=
zi;bw6$*l*|#8XU^bh4)cjfZ00z5pjk!BjbFqslx}L*CUkCb+Z${eEVviSL#<(7lyz
z;<SI4iNvqlo=<4H`#gJQ2%c1edw_nl*VsOSQ$P<<7e|99>9I#(?lLu_t7n=+aJfKc
zx!^i#QIB}go9gOt;H|%;GWb{r|80r&D&uC>Ct9(=E1QP!Jg%e`Ju1;^%nvLwZe{86
zRmh(EguRv5XX0Y6$m))6t8||?K5N0@DS|G7$}r-a_a9x}OpK}sSX7ZriqOjRuex?9
z-ACdXXm&zzPF+i|o|-LAn+1w8s3Xhd#%Yyxmfo-zTvgv|pT1t{heLE<(ENZD^^G0Q
zaTl=Fe<GjZoX}ojU<wB?GR9FO1^8zs2mEjE7;H(j(uj)hsOnBr=9em^W~<*^K<YMp
zQ0}TvMk|%nYHXPG*?FBMnJRzKB2dMy3K?Ye<T587uCo|d)r-PQ*>9dtJtquUN!`U8
zIG5`h%mT&_GAI|gD4y-{AVoet%YaNAn|-s!vAtFO{8!sjsW>aU`Ka%L&Q}7HOZ9K7
z#77{`aoMP6yde#JG~}p`C7$XoX-4f$kT-_ym|_-+pSCF4UUC03d~iv%+_j3e;t%Kp
z0zvi{!?B0XZp7E!hYN~9(oD&^l2hxi%G)8jwf&jw$^DSI2IMr%XVYJ&_K%h5$t}D7
z2l!{;p@xhM|B8S1?z{?sADlhF58R|%f8q!IlW0Do2LM8GIt2a9ffz`BiE$yJme8|y
z<=K@j3zA6D5eQjoUFw$jML!$fr=QjQq@N)L*UKQK>?e?&TKzmBjqW5mSzzQG_0ncD
zjC(Rd&gLTZ<?DHvw}dV0U(HP!<PQou?rwHp@1{4&uFH9+fs<8_#xj5bP9UTFGKK~u
zkh!S!M<!oCQ;EtzneEdNl{s9H@_wRce1~AKyX4V>{sdRWsYGxBS!LGP*)mna3$@?`
zvQORmLMSE^O{T)id8(fC0ht{Jq1WEhV_lwn(Fl6Aj<iHq=E1Z8P9SSve3G_k&Q~l~
zYt$z?VlJfms>%^lZ8e+?1M3+{OgaXa3*H|5RYQWG0K!()PXOV~)=vOo#l45Ll`j^F
zmIjrPrSb(q-v&?~Tif;-8HJnB9n#KA{$m>S8Fa56^%%pOcUq+d#`0Jsk>vOZ*|CV^
zu?1mD4%kcMKB<paaqUlG3u+wD0k0v&RM(#TbN-kZ2;+nf413$jy}v+5h4kU0_`#$i
z{1iBWj9NnxGfPeolm8Td)L;e`uQztJU-hU@32(j<7ZO=z{%HQY*VCOkFHu7|R#8DC
zH`Sikh1q#$dW}wtv|ZW6^SHr^OwbJEkBJNpN^%(UPr>0K$G-ZqsnB(OIw4bvxc+&5
zBLB7gvr#FcXh@$wb$KT<7-$8$KCZ>1-qyd-9oP~cC;o={Dh+*Dm4GCnPHu`{CP`pT
z%<a92S=yvWd^EQ|_gup$edm*HPqoEUGZbJhAr_~Q&aTQWTbqqY0zd9(aOpXc)OwBI
z;>{RT@>xvWJ3$>crV;ZN8gqd>vU$?aPTE2a;W6#-w6~sxk7`9t#l8Q^ba_$pYfW-9
z#c+1%EOJ=tsvNV1b~ayBf2!^aGsJiip4*wD9rL?>qIA37t@djTL%Q(hhR$Oi9oflG
z_BR90@z@mqGxz~`A|WHhU%(G$5BX=B_5^8DJ+1a<lhMw}Y75bcvl4)_PYUd2xvV(p
zENCp2;W0-P+zN!Y1;Ghu1xMh6{%im~kn8QA85*tnM8BQaCg8Gz?eHCd4=$$p1MmSV
z7(So|!v|vVKjDK^I52#W1cnc^FavDK#ovAg0q_AU<$#+@@(=g`?a@Abka+E0TA5aV
z*S!|%pYQ=H7(TE*gbz6K&lb>*Dr0X2s*Bafyb}Kuf&gAdjMyw=`!YW4du{XsK454z
z{{bJY-@0$zL_gW~pnc*q@#E{0r_U=6@}Fu>R#*~{GJaQEo1;-h;e&KtBF52{m2F;J
zFRZqU_F6c*uw1#mK8Ub<Af`GaF|4s-tHe=nbxvxXXj*5idJEV}=y`lnd~5hIbD4pW
zvGN-A*hh>m$M$U`+i;J0Vr(3KBIRrCX=us=4C?o$U)im=#*z6HC3bYM<dl?XGQU-^
ztS)&-DC$sG5Cj}TQ}<l^(m>X5J^wkv+v{4*VK?U=lj!3WfXoZdJQ=bkRb4KKs%2Mm
zN6%rIE85Bpe@)zC{oqY<W<*iEk?P?2GzX=%CUhErel~As`o=Y;Xb_;HHP72QzZsD0
zk5|?%H(V=XRS}fxA5_+UFgxV3qTVk@|6reVU5<>e^3VLUtFY?)96K}o!xoqM%6@?Q
zH}!v@`DaZ|>}Cp?%;$Pj&pxNJHkp<b&&&U!1Ki6MN0mIdrzacE>asYay7p38u5XJ3
zH1f<JpV5pED{gqPRLhH~vOo2lxDShmmoPi~MZ>e~)aQ-)c0%PGuk*j%TeI<@3r9yY
z>+yj<om*r3OnK2%=o)d5w?(vkHG@X#tnuX~uPBcy`n=#KAFI7F^*yf+Y&WxXRGIgs
zUJ8+KI;%z-4OWS7YraO_mN=Kv(=WC&zn|X6oO?$Cj2~eAakHU#;#vp)G`NsKW551E
zY5ULmDa8z<%>@)z!)oYhUviU3M8d!ol;BcvKLQ0*Xwa=@x~xWQS85o@9<k~K;n*k2
zh1hG+i0_wB0RV(S^OZiUm~TwV3$~9l7!BhWzjUvRe3Rhg<uBe&h#>_4gj_}AQ|pnf
zZ17lEr~rV#M()0@bXxz+D>Vc1WZ@<_Bb(qic2plS1rZ6`Yy~NHL${L&TUb^vke@zd
zv!A@Oqu)>LA6SzyP$GM&S*zn}J-^<&v(^^)-W00US=3IJ#!ZSZzfhrR=041N+(j2T
zrh8j1#_C?c-LD^Ov<n!?g5K;Wui(tTMh``MXIM-l+tS|Pg_PtJT|D`PWVLkV+f$U-
zhc-lxPljU8UKF@M_;!5V_mdYF%~a^LCr<o)?fA;;%uN<_$<Qv!SE*0lpy}?kCV#s7
zyh9Y=o{VA(z6Q1rR`Db{VCo5)dFp6=i`rLQRkF$(ymfcc&zHybxusjTqF!Le`W)^{
zCzx5j()&6bM|Cc70n9%OQ3?9RKVy<382`mTo7$PY#{UT5pRMMc{k}oaecJ3jN1t1C
zrgC7vbst@=c>%oEWY^o`fkFA_-xEf6rtlY&7r(2w&PhFTQdv3RpGis>vi~#xO!UMI
zQdkHA^8vg4-|<I-m`#EG!=tq=JR+QeD4ALz-Uy2qGWkhoXhUqq&jawm&X!$FxV83W
z06yRkcwckoE?wGKyGva-eC`gggUB@@kWR+J(jMB>>Y(6VBd5-d<8N-MBFE;ald$DJ
zeSX$)?;X#u+?e)eyy<c!hbAXbZLjVjPXALHCCmFagT-oz6I07?amb+Iea!J<Td%zw
zIU&?#H(vLEF%qOQQG!W3>$Fa}&p#tI(7|dpmH+B?zxGz&Hb&g=?Kq~O4DSUFF_Sb8
zrG|#$)~WrRy`>qQDKLH@{zsRV196^kIrqoWhV*Av$aURsjtYMJt0VY9v{awvO;7bU
z!5+%fwg>f-q6&o~qYJ9vj>cE_Pm(y?!-?~!9YVGYWx=yzzY~kwofY(HBaozCuwk;o
zDkdrD!d<SKl-kj?^)^S_?O_s93=D+<la$A1U9HsBv!?~XCkK+%{>bDJa6i~*M#xV;
zgV|@uiZVDr_S%Y2<=MCNBhB@kQJ@QYf~Q5?rbN~s1F7et45lAd@>eIjtH>jz8U525
zy>;FZ)a$;VtMRRCnXd>{CX5f`vRuWN<`BcTacwdb<fV}>;HxFDMo2XcMv9gFjy&;H
z+_%_?xE}2tM?WTwaZLSN+Spao^u&d|)Vs;$-mk78b0t1>IHld_HX<6@ayvj_DmmZ+
zc5~{rnDrde?rCwonO-h(X@GX#%XCwn{2T9%ohwqT782PaSHsdgPnn;g>b+4`LlDQi
z7px{NcQxpBMn)tGoG><`;hhz>00gmk@?pf&r=OpKQ`fo|za>Txqz_2waBO%^5?>vB
zBV5~Dh1P2VPe0x~$1aQSkMtC|>gMgiNR8YKc9W|u59MFqolyS1gPslMp7puo6~qR0
zxb)T6Y84=9P3os^4P;2aUJ$C?x+yo@*v@-Vi)U+|ZtKQ0KjS0Q-03G7c6THAWlK8+
zb9r7Yj(A=jR3Lt^e|{;=g&_OS_<_iY>n*{+510?g<wxr;$tnEEfAORxwnRuydbonB
zZaamK>uDyFhxv@<BJ#%0^f22qNz$O&;(F<Y;kMw=KG#BS^$s*zP_<T8-*v@Vtl?8%
z#?za!9>4T5Wc!@5so|*Cz8N3>yuky`7582(a{cxf?o2j)W2vsmn2#(qNr9as7gKt*
zmi^Og@^khEJglhZ*m1Hq->@0iK5KI_SPi~>y0*>tqBNe^ZF=F7OzI~UNCsg!YL6xy
zuHmdnVG1o{bBe<?V+4_u+&@33c#--&!m?<{)+th9i~@OsTgE|z8LqE=zHht`WZa_c
zjlfZ%VCiUV+XoP2IG8)9f!Gl7$JihXI)B30@Takc3=QXCh6dRD0~9d-017_{fLA&s
zAWTus+&3uyzU92Fw%c731OlbR*Vfm2YR1S3gl2>-b*EtHTeI*yxGO~9#d3w=vWWZO
z`E2y@>z@ierJW_I<szDl$yn}v9r8DL<mRsMTzf;a-j&ZS(AhrWPEo?(GY{=<HZP@0
z5d#@4{L@eTd(<+DN(Wloo+ubLXe&pHzYOSq94u!MTvb&;CI8BFpMZc5lYsF32|z%o
z6>N8))4~S`2nshNW2*A$^z_9@OvlIE<+hR&?BAE*vl$?XPY#^X(4ishD8hd4u3d!U
z=KFw6v({m;!+ePKmLJis3rU&Z#9rGHHw*&8xm`Fg0Rf$g(dY*Op+oW>4nRP#00;=Y
z1AXE@2?zx5eWpTx5D?aio@kBNf(Zy)U;@Ht*aQUl)1hBtu>%@@X0L4%gYg5-TZ_;C
z89xvQp`XyGVB-hO2Mlu*2x~fmADj~>_&<ansFf~L-p5CRKf`-kyhqQjqv+w>mi1fK
z7c0eHIq?K70A#jWA*Dw1)#Zs{k&)#F8Y-V^A@h`t<oYaAbmf@IedVUeFPDQ19S$nd
z1P7j*b<8c^8wvImyq(iu%FNXz6x~g_p}Tr{>g#@ZXny%TLkb8DwSUZDQvmV&otcD;
z5F8M5`rCy9j9-?9Yxn{6t7?smGGbR<;ocLFrDX>6+ncM3b!mK|=0(ZPS`rrF3FEoL
zTCY#jpF89+Qsn{?-o~U9@Q!b8lGRdop)(_><fii4+jHPT-q25Sjx@;C&h3vy7vP>Q
zsmS1+OM2o}ke%)9lJJIw#XyFE+ydJ%45+>ehjpLE%^%oBI4Z3tBfjF26AId-K#H*w
zX8ZgQH&+^-O13S)+Yx>_jz*}l3)KxXffF~*Pzh7p=jzvwn<}frZAe;)r65#7ufVL3
z5?%d^I~1Hq+|(ZWV<t9jZq6(M7v3$r5)W_kQ6D>h^NLmFS3&_=rstE`b-k<pvsj;)
zQ7_ED`dS#!UB`aQy=DVJFkK<=9>V?gpdzNyTau6Dh5R<>nY+RSJ`__u|3DhwVu9vG
zMXE?*HPRZ;Qa}(ON2(VucCGU?`%Y*Goc^|x4R*P&JQMuFw~>I3e41g?R}VsP7Ff|T
z1JQUy%{fgUN8Git!V09Lxa(R`6E-fGkaXU55GY{N3NBz{{`8=Ljk?6$37~)ty}A~I
zC{Vx#ugwGjC}8uH+1Bf40h_nm>9Tgj-~u+QKmi*8Z~+^F1T=zSY|T%j(xj^23J_&l
z9;&?5awv~fvV&Kj4~oE@e=%R%-N9Kq4HTy_L&DM40e}K_6|&^{{7V@N+0Ol>X&R|p
zHVrdhX0;t^rmKMbwXZ-0n@<uM9sN1W`|DvN1hLBhDg5Ao`1qgW2WKC}p6$gkJ6mH<
zCwkE*2B?1#GB>wMV8rl(7I|uI?;Ot8vt-+w9i>y(an@Xs+#G3=9-(Sn^)%vgx%=h$
zV@n6RtJe~nhHfd!q4^Su${`~WDU|Eukls+3+1t@fM0p_!A8kJZ5`N39HIBPYq}X0_
zA+h%*j#Z-y9WRm0_Ve+fMPCLgo7;VYg`Io1a!->-&r~W|E)0iJr>!ooTi@JyeC~<K
zsXeTkRC~^A<y-r?MZF(5zL%T;KtMcU$oLb003Kb)m~je-8GpbJfCPAse~BMt1NebH
z7(bX;pD}Y<+Q5m6jnQ_!E|2jnCrSMx8=Om{anwU^Zv-468X`<#mrJKuPG5h+g87+z
z+%211vXdwDb{Cn4|DB?E%m-4gpU)aU+r+{=<ypAM4kWI+X3e8rl^9jMtNk*3BHbiu
zEbg5eA~%?TK!3X%o1cr#@M4>3C}Mh#&`nV@JQ?dN?`S?1l+@%e8|pD>J_(Xg0q-9^
zAUoVZs(-AOt%XBlZ1T0r<67xQt(BR;P2tUZvX4Gh5TcdHENUa-#)tZ=*tm#Kh@KC#
zm9ne!QCQQnak`;)pAd;k$_%T$U8WN;bJtjDBEMytV%;sp%7?6D$vmf^Igsh;LH^p3
zzVRDt-!zQj*Cb<{NP#*E7D7P&+9QbkHS-${y#HVDgOa28!Hb7puKy$W!2!YXU*ZQ)
z`D^x>GQZ^?_=5}B0006S03a;T`2hez!&CU&emIcTI}@XQ0AYApxkKO(Ku~BAX2Mq4
z2M}iGEVpubMsAF$Z0Z0BY#f>@ma!!D`w47HmRbOS;6=X=AlLu^Lct@p_2#C?6d^}f
zX6IeKcc(LXzCPm`F7&uNa<QkftykL~Z>ePLI(nD=Y|Y2};8Hd&PA=lxnG{MIB;iUb
z1ODjy#|ULnBF@bx01!A%=+ZC&1n_V}`ZLRaum*sQAAsxEjUXf|vDrL4U@!~%6u0uu
zFVpZ5DpcSMQ5n$nbR^pW)vE+}CV1_s_AV#E*IES%29#7W3y=g3jy8|BE{^i@3GngU
zON%ASx+U=hGaF|;6nOPhV6oD;T}Rvx0G|=B$587^dw9ctq7h<{yC`1V3Xja1lkP1@
zJ9y`mzyA(U{pKa=FfqVDLd$)_uF>BaMiAtoMpt+xo$-R}M{Oz(gnVLpO3`9?8F>R2
z`H_kajf>rwH=8z?qjzSXat$w^*Qvuxwh$t<W$C5`($+7{Pn%P5R`=M7K25Nq$&a(^
z%m9+tG?fM=A7EX(g>5Mdzi_%F67GWp@ZKki|13o;K^3Q*#I<yXKdZy9rZeA4D7+ym
zAx|eOM`+w(b7mWzrx;ma$SQoa-_C{LWBRumqb88qZ5gBXguy(Ovg!{(maC-8oJil}
z*7hEz+qYhzXJ&rEj3-{wojv?SoIXRdG^osC8M!szzf8*`Fy4zeuLDt~DZbbA>E&}6
zS8yI@SC=t3m<*b<`zOH1ULX9JGP{Li2&d+5P=G*h$xD{DQ4khXY>83SQm?dDIv8<%
z&?9gyEvoX?G)Z`R%>4%uI~1#Nk(IgXt7%`HG8-PJe%Z`WNx{{QX_@r8GrFIi?%?+V
zVFy^V{+WIt0{SoM2h0cL!r$-@pzs6zDP8g%cYLiP1O$rSPzJG=XQf44%bmF7XhLg8
zmn^^p1U7(xpmT(PP*9OuNdJp~fDaH5c4-Y>E$h0Gi4Rzbu<Y5PUq&NT$L8QM%JyG+
zm6XfXFlGOQZ*;nkVXk~l_q>$z$Lco%^k_X+<2l+l`KeIfFXd4BDj|dbHSw6t>Zuon
zyQvsdhBvg@tM6l<K42hxtI|WdOD4gKD=lUDa9eS#%uS*NK3)0dxgNcmw0M)m`>s<#
zjb(NAUp;Sl%dRf3?30sBx0g|7P5>YfpD;GS0uaEX4H+5;V1)+Q_yLf=z<FZ)z~~S^
zP}s*0Ho*9SHILK(Li_+oVDKN(h+wvs{y&2sDAJuNfW!}epN54668ULU7B;rGENpF<
zu3QxnfMpmk_KQb<4cPMj&ohF97KQepJ$u;V9JUt^TmQp0@URU&Y(o#*@WVFpuzh;i
zMjf^>hi%+pn{e1B9=6Gc?aRaV)nWVkuzhpbrXRLo0EV1Kx3&WK+TaiDJT@GV;L!m*
zxWJtEzZpW{{__13R>nVE^9?$3%{Scd?nFLyc*{5Z&s)Av%bWu)mqP&GhjwoNJ~SWL
zw|gV*-|mg{XuryNq62n8FfbP25A5+k1F;{Dhdpo=I&g>bRT1WYKN74c;2CI*Gr9p_
zCLr)fQ3jk!{mY0&A4sKwW`PUYfe-kaYUrPX+-dz=wzrtB9F?RW@oxHYuRXM+BJeB-
zN#>)HBGo(=Zh#{22U;?C4oZ^csANqf5&7>W73ZNOS&vG_ZoaDgy(H!WlqB29C0n2+
z*^f%X{<gu3P&zq|O5PcyGyq?72mU~hkm3@QBv7CWdOj(!zUl0X0iw{7PoX8bj!KSY
zhOi0&`oJG(Nz6?so!mzy>5@TfKP1rr>0dv=(2~r@jEF-oz)lGG|KJa_DCQQFQs!e;
z1Y0!ti0Cmhg8hXtx1qHjvm?lvf&Rk5M?{Yq(u~O^FOU%6YQP6twCjlIF-wZ9OsE9`
z8NlEVw5THgkEP?jmogtSrFxsOoZpKE3qy+@vn9ri+=btZwunQE9y2D`qL|XqqQ|TW
z_7_%^hZ1EuW=(&X93^PcW7cFN@;cxkLY_J@!d*v1k6BagILF$4Yy|@Ak;zfUgA9Z$
z$E<0ks*vUPqQN(yMUPpNquuMQ-;1`WLW>@=CfK5w>d>OctO@oPR@8(RJ!VaRm>g|r
z(PP&1`INzz{kMhN(Gl)CB6`f4+?wbYe!QiQOwJ&bR@P(I^r|5s@Asm?L(rnftV!)r
zB--yqTZW-Uk69CJQOr?j(PP#G`wJ_ML5m)<raw&1S7^~=))ebPqwxE_b{!EtW=&6!
z5iTD_wWFRh0j>3zH9=ksiw*?FUq9hcqTog3!TajheOr*1j8gs*1pXGV{zL7N@`s1Z
znS+aDk3J9bQpd|s&%3{QlIQ)}N*;YO<Sl|!P*2v{`Qwv+UJ-fpiI8`_F+x4@>fXto
z2>aVY-f+eQ_3ULh{GY3Ps0(_J@gINQ9e4DLLtcEv4fSv}_<#Ly*nNP!UWo-tO&r2W
z)%>jc0>A_Z-T1ZMhP>m559;~xNdL+6VfO^`ULWY$a5{5RO$PwT&u;v3A;?=@pl4$d
z`CmUAb{`-wAAz0?g|jDB^Yg&|H{F1|KLUC-Fwp;#=fmy^<mCyhQ1ku<^Q4-7oj(1h
zE0D$T`JogLV*f`9VD|;Gn*0?gRj+YRs_GDS|4m0A%Z2kmJ-r?8-#;C8FCfch3q$F-
zcJ8ElU;u<*qW01F1zAN|21;Ea;a^Y(yPuGSf5o5_W1T;#;-5Zrypq*#x(Zp7RUS&?
z7m~lD5q57OYnF;b=}sp<sqSBXcC5Q#n%>cwg)CsH0HqRz@-L}`-CxLBjv`RXEvZhb
z9M%ySn9p>yvyjF3WTEtpQ2z~muzLzwppFwtbI3&)nh#cOe=7a=r`I!dP*}V4A1i^~
zC&(IK(AK(12SeSl71Phv#xY}M1}ClmsfW<k>do+%RKo5rWVI>i;dNp>sd5-G7fio6
zYOIjOhoEEbJkwuL2fLq;wQ`_it~|?06~m6X;FIa2U4<-U0v&VDvHcy5uzL$x<^$S_
z=QvKP`wuY}eA;%jyO3ocpsiSj>o2K<-CxKW2+%P%o#&*=Va42EF8gb-4Vl9Z9dk+f
z{)Rr-J%vo^hW6J_0w>iB!(YK^+(%~>GS?T{U%7?;f;!m!gv?Ea_SYbhlPZSoui$*t
zqg{o}8-(^(HL<^=5q57O)9S85y-67*POAG4{<@#1c(lJzSyoUw{UraEPS`z$%sLW<
z(!MBtQthxjc0aG{Xm=sgM&zIrYRmo=g|PbynMDB|ozN9dsv3sR?q{hS?J6X}9y&TX
zDgF(8uzLzg2#0>}G%KA{Gi<Njr}H1}EF@hQ`n~h)`d?BByT6cZR%pMqP(7*gKl$zN
zmQYaaMkqsWQu|vvVfPr4><8_)S2Rwl9j4#@ZW#s1*Mj!jc+J0}5O!Z7$xG0FE2nc(
z)v)~bcS|TpVhgn27VG{EeMfux3j>8@gkVA06o%d}%|B^02R`^q9FjnR10}Age@y%r
z{{QO%kbDT}Ga+4r9}hTWT`0;RB3?e!K?=|CQ=9JL-+r`A6bHZf3_Jk*1IQtPTR5+S
Wf5Ywn4gnGTJ6+%p`2QdL-~R_=Ly$uN

diff --git a/python/fedml/computing/scheduler/model_scheduler/sample_model/fedml_model_config.yaml b/python/fedml/computing/scheduler/model_scheduler/sample_model/fedml_model_config.yaml
deleted file mode 100644
index 491ed507d9..0000000000
--- a/python/fedml/computing/scheduler/model_scheduler/sample_model/fedml_model_config.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
-{
-  "platform": "onnxruntime",
-  "max_batch_size": 1,
-  "input_size": [[1,24], [1,2]],
-  "input_types": ["int", "float"],
-  "input": [
-    {
-      "name": "input",
-      "data_type": "TYPE_FP32",
-      "dims": []
-    }
-  ],
-  "output": [
-    {
-      "name": "output",
-      "data_type": "TYPE_FP32",
-      "dims": []
-    }
-  ]
-}
\ No newline at end of file
diff --git a/python/fedml/computing/scheduler/scheduler_core/general_constants.py b/python/fedml/computing/scheduler/scheduler_core/general_constants.py
index 68c1a8e09d..8c60b17bdf 100755
--- a/python/fedml/computing/scheduler/scheduler_core/general_constants.py
+++ b/python/fedml/computing/scheduler/scheduler_core/general_constants.py
@@ -192,14 +192,14 @@ def get_public_ip():
     @staticmethod
     def get_ip_address(request_json, infer_host=None):
         # OPTION 1: Use local ip
-        ip = GeneralConstants.get_local_ip()
-
-        # OPTION 2: Auto detect public ip
-        if "parameters" in request_json and \
-                GeneralConstants.CONFIG_KEY_AUTO_DETECT_PUBLIC_IP in request_json["parameters"] and \
-                request_json["parameters"][GeneralConstants.CONFIG_KEY_AUTO_DETECT_PUBLIC_IP]:
-            ip = GeneralConstants.get_public_ip()
-            logging.info("Auto detect public ip for master: " + ip)
+        # ip = GeneralConstants.get_local_ip()
+        #
+        # # OPTION 2: Auto detect public ip
+        # if "parameters" in request_json and \
+        #         GeneralConstants.CONFIG_KEY_AUTO_DETECT_PUBLIC_IP in request_json["parameters"] and \
+        #         request_json["parameters"][GeneralConstants.CONFIG_KEY_AUTO_DETECT_PUBLIC_IP]:
+        ip = GeneralConstants.get_public_ip()
+        logging.info("Auto detect public ip for master: " + ip)
 
         # OPTION 3: Use user indicated ip
         if infer_host is not None and infer_host != "127.0.0.1" and infer_host != "localhost":

From 31d8e7c1a73fafcffd8d715f7e93d9d20561c82c Mon Sep 17 00:00:00 2001
From: Alex <alex.gpt.llm@gmail.com>
Date: Wed, 12 Jun 2024 02:16:44 +0800
Subject: [PATCH 136/282] [CoreEngine] Adjust the design of FedML Python Agent
 to a decentralized architecture that supports Launch Master, Launch Slave,
 Deploy Master, and Deploy Slave at the same time.

---
 .../scheduler/master/base_master_agent.py     | 26 +++++--
 .../master/base_master_job_runner_manager.py  | 22 ++++--
 .../master/base_master_protocol_manager.py    | 10 ++-
 .../scheduler/master/cloud_server_manager.py  |  4 ++
 .../master/master_protocol_manager.py         |  3 +-
 .../scheduler_core/account_manager.py         |  5 ++
 .../scheduler_core/message_center.py          | 13 +++-
 .../scheduler_base_protocol_manager.py        | 55 ++++++++++-----
 .../scheduler/scheduler_core/status_center.py |  3 +
 .../scheduler/slave/base_slave_agent.py       | 25 +++++--
 .../computing/scheduler/slave/client_login.py | 16 ++---
 .../scheduler/slave/slave_protocol_manager.py | 13 ++--
 .../scheduler/slave/united_agents.py          | 67 +++++++++++++++++++
 python/fedml/core/mlops/__init__.py           | 16 +++--
 14 files changed, 219 insertions(+), 59 deletions(-)
 create mode 100755 python/fedml/computing/scheduler/slave/united_agents.py

diff --git a/python/fedml/computing/scheduler/master/base_master_agent.py b/python/fedml/computing/scheduler/master/base_master_agent.py
index 3aff523c24..b27ed9547a 100755
--- a/python/fedml/computing/scheduler/master/base_master_agent.py
+++ b/python/fedml/computing/scheduler/master/base_master_agent.py
@@ -23,7 +23,8 @@ def __init__(self):
 
     def login(
             self, user_id, api_key=None, device_id=None,
-            os_name=None, role=None, runner_cmd=None
+            os_name=None, role=None, runner_cmd=None,
+            communication_manager=None, sender_message_queue=None, status_center_queue=None
     ):
         # Login account
         login_result = FedMLAccountManager.get_instance().login(
@@ -48,14 +49,18 @@ def login(
         # Initialize the protocol manager
         # noinspection PyBoardException
         try:
-            self._initialize_protocol_manager()
+            self._initialize_protocol_manager(
+                communication_manager=communication_manager,
+                sender_message_queue=sender_message_queue,
+                status_center_queue=status_center_queue)
         except Exception as e:
             FedMLAccountManager.write_login_failed_file(is_client=False)
             self.protocol_mgr.stop()
             raise e
 
         # Start the protocol manager to process the messages from MLOps and slave agents.
-        self.protocol_mgr.start()
+        if communication_manager is None:
+            self.protocol_mgr.start()
 
     @staticmethod
     def logout():
@@ -69,7 +74,11 @@ def _create_protocol_manager(self, role, login_result):
             login_result, agent_config=login_result.agent_config)
         self.protocol_mgr.run_as_edge_server_and_agent = True \
             if role == FedMLAccountManager.ROLE_EDGE_SERVER else False
-        self.protocol_mgr.run_as_cloud_agent = True if role == FedMLAccountManager.ROLE_CLOUD_AGENT else False
+        self.protocol_mgr.run_as_cloud_agent = True \
+            if role == FedMLAccountManager.ROLE_CLOUD_AGENT or role == FedMLAccountManager.ROLE_GPU_MASTER_SERVER \
+            else False
+        self.use_local_process_as_cloud_server = True \
+            if role == FedMLAccountManager.ROLE_GPU_MASTER_SERVER else self.use_local_process_as_cloud_server
         self.protocol_mgr.run_as_cloud_server = True if role == FedMLAccountManager.ROLE_CLOUD_SERVER else False
         self.protocol_mgr.args = login_result
         self.protocol_mgr.edge_id = login_result.edge_id
@@ -79,12 +88,17 @@ def _create_protocol_manager(self, role, login_result):
         self.protocol_mgr.enable_simulation_cloud_agent = self.enable_simulation_cloud_agent
         self.protocol_mgr.use_local_process_as_cloud_server = self.use_local_process_as_cloud_server
 
-    def _initialize_protocol_manager(self):
+    def _initialize_protocol_manager(
+            self, communication_manager=None, sender_message_queue=None, status_center_queue=None
+    ):
         # Init local database
         self._init_database()
 
         # Initialize the master protocol
-        self.protocol_mgr.initialize()
+        self.protocol_mgr.initialize(
+            communication_manager=communication_manager,
+            sender_message_queue=sender_message_queue,
+            status_center_queue=status_center_queue)
 
         # Report the IDLE status to MLOps
         self.mlops_metrics.report_server_training_status(
diff --git a/python/fedml/computing/scheduler/master/base_master_job_runner_manager.py b/python/fedml/computing/scheduler/master/base_master_job_runner_manager.py
index 6831c9d034..08ef1d640e 100755
--- a/python/fedml/computing/scheduler/master/base_master_job_runner_manager.py
+++ b/python/fedml/computing/scheduler/master/base_master_job_runner_manager.py
@@ -1,10 +1,12 @@
 import base64
 import json
 import logging
+import os
 import time
 from abc import ABC
 from multiprocessing import Process
 from .cloud_server_manager import FedMLCloudServerManager
+from ..comm_utils.run_process_utils import RunProcessUtils
 from ..scheduler_core.scheduler_base_job_runner_manager import FedMLSchedulerBaseJobRunnerManager
 
 
@@ -38,26 +40,38 @@ def start_job_runner(
 
     def stop_job_runner(
             self, run_id, args=None, server_id=None, request_json=None,
-            run_as_cloud_agent=False, run_as_cloud_server=False
+            run_as_cloud_agent=False, run_as_cloud_server=False,
+            use_local_process_as_cloud_server=False
     ):
         super().stop_job_runner(run_id)
 
         if run_as_cloud_agent or run_as_cloud_server:
             stopping_process = Process(
-                target=FedMLCloudServerManager.stop_cloud_server, args=(run_id, server_id, args.agent_config))
+                target=FedMLCloudServerManager.stop_cloud_server,
+                args=(run_id, server_id, args.agent_config))
             stopping_process.start()
 
+            if run_as_cloud_server:
+                time.sleep(1)
+                RunProcessUtils.kill_process(os.getpid())
+
     def complete_job_runner(
             self, run_id, args=None, server_id=None, request_json=None,
-            run_as_cloud_agent=False, run_as_cloud_server=False
+            run_as_cloud_agent=False, run_as_cloud_server=False,
+            use_local_process_as_cloud_server=False
     ):
         super().complete_job_runner(run_id)
 
         if run_as_cloud_agent or run_as_cloud_server:
             stopping_process = Process(
-                target=FedMLCloudServerManager.stop_cloud_server, args=(run_id, server_id, args.agent_config))
+                target=FedMLCloudServerManager.stop_cloud_server,
+                args=(run_id, server_id, args.agent_config))
             stopping_process.start()
 
+            if run_as_cloud_server:
+                time.sleep(1)
+                RunProcessUtils.kill_process(os.getpid())
+
     def _start_cloud_server(
             self, args, run_id, request_json, edge_id=None,
             use_local_process_as_cloud_server=False
diff --git a/python/fedml/computing/scheduler/master/base_master_protocol_manager.py b/python/fedml/computing/scheduler/master/base_master_protocol_manager.py
index 1c4cbba4f4..a6b47855c6 100755
--- a/python/fedml/computing/scheduler/master/base_master_protocol_manager.py
+++ b/python/fedml/computing/scheduler/master/base_master_protocol_manager.py
@@ -197,7 +197,7 @@ def callback_start_train(self, topic=None, payload=None):
         self.run_edge_ids[run_id_str] = edge_id_list
 
         # report server running status to master agent
-        if not self.run_as_cloud_server:
+        if not self.run_as_cloud_server and not self.run_as_cloud_agent:
             self.mlops_metrics.report_server_id_status(
                 run_id, GeneralConstants.MSG_MLOPS_SERVER_STATUS_STARTING, edge_id=self.edge_id,
                 server_id=self.edge_id, server_agent_id=self.edge_id, running_json=payload)
@@ -390,6 +390,9 @@ def callback_request_job_status(self, topic, payload):
     def callback_request_device_status_in_job(self, topic, payload):
         self.response_device_status_in_job(topic, payload)
 
+    def process_extra_queues(self, extra_queues):
+        self.rebuild_status_center(extra_queues[0])
+
     def generate_protocol_manager(self):
         message_status_runner = self._generate_protocol_manager_instance(
             self.args, agent_config=self.agent_config
@@ -476,6 +479,8 @@ def init_job_task(self, request_json):
         self.setup_listener_for_run_logs(run_id)
 
     def setup_listeners_for_edge_status(self, run_id, edge_ids, server_id):
+        if self.run_as_cloud_agent:
+            return
         edge_status_topic = "fl_client/flclient_agent_" + str(server_id) + "/status"
         payload = {"run_id": run_id, "init_all_edge_id_list": edge_ids, "init_server_id": server_id}
         self.callback_edge_status(edge_status_topic, json.dumps(payload))
@@ -486,6 +491,9 @@ def setup_listeners_for_edge_status(self, run_id, edge_ids, server_id):
             self.subscribe_msg(edge_status_topic)
 
     def remove_listeners_for_edge_status(self, edge_ids=None):
+        if self.run_as_cloud_agent:
+            return
+
         if edge_ids is None:
             edge_ids = self.request_json["edgeids"]
 
diff --git a/python/fedml/computing/scheduler/master/cloud_server_manager.py b/python/fedml/computing/scheduler/master/cloud_server_manager.py
index 040a0f38a3..aa9c07e84f 100755
--- a/python/fedml/computing/scheduler/master/cloud_server_manager.py
+++ b/python/fedml/computing/scheduler/master/cloud_server_manager.py
@@ -2,6 +2,7 @@
 import json
 import logging
 import os
+import platform
 import traceback
 
 import fedml
@@ -32,6 +33,9 @@ def __init__(self, args, run_id=None, edge_id=None, request_json=None, agent_con
 
     @staticmethod
     def start_local_cloud_server(user, version, cloud_device_id, runner_cmd_encoded):
+        if platform.system() != "Windows":
+            os.setsid()
+
         print(f"start cloud server, device id {cloud_device_id}, runner cmd {runner_cmd_encoded}")
         pip_source_dir = os.path.dirname(__file__)
         login_cmd = os.path.join(pip_source_dir, "server_login.py")
diff --git a/python/fedml/computing/scheduler/master/master_protocol_manager.py b/python/fedml/computing/scheduler/master/master_protocol_manager.py
index ca9621e41d..eb8cde239f 100755
--- a/python/fedml/computing/scheduler/master/master_protocol_manager.py
+++ b/python/fedml/computing/scheduler/master/master_protocol_manager.py
@@ -40,4 +40,5 @@ def _process_job_complete_status(self, run_id, server_id, complete_payload):
         # Complete the job runner
         self._get_job_runner_manager().complete_job_runner(
             run_id, args=self.args, server_id=server_id, request_json=complete_payload,
-            run_as_cloud_agent=self.run_as_cloud_agent, run_as_cloud_server=self.run_as_cloud_server)
+            run_as_cloud_agent=self.run_as_cloud_agent, run_as_cloud_server=self.run_as_cloud_server,
+            use_local_process_as_cloud_server=self.use_local_process_as_cloud_server)
diff --git a/python/fedml/computing/scheduler/scheduler_core/account_manager.py b/python/fedml/computing/scheduler/scheduler_core/account_manager.py
index 3491e102f6..20c5fcd842 100755
--- a/python/fedml/computing/scheduler/scheduler_core/account_manager.py
+++ b/python/fedml/computing/scheduler/scheduler_core/account_manager.py
@@ -25,6 +25,7 @@ class FedMLAccountManager(Singleton):
     ROLE_CLOUD_SERVER = "cloud_server"
     ROLE_EDGE_DEVICE = "client"
     ROLE_GPU_PROVIDER = "gpu_supplier"
+    ROLE_GPU_MASTER_SERVER = "gpu_master_server"
     ROLE_DEPLOY_MASTER_ON_PREM = "md.on_premise_device.master"
     ROLE_DEPLOY_WORKER_ON_PREM = "md.on_premise_device"
 
@@ -33,6 +34,7 @@ class FedMLAccountManager(Singleton):
     DEVICE_ID_SUFFIX_CLOUD_SERVER = ".Public.Server"
     DEVICE_ID_SUFFIX_EDGE_DEVICE = ".Edge.Device"
     DEVICE_ID_SUFFIX_GPU_PROVIDER = ".Edge.GPU.Supplier"
+    DEVICE_ID_SUFFIX_GPU_MASTER_SERVER = ".Edge.GPU.MasterServer"
     DEVICE_ID_SUFFIX_DEPLOY = "MDA"
     DEVICE_ID_SUFFIX_DEPLOY_MASTER_ON_PREM = ".OnPremise.Master.Device"
     DEVICE_ID_SUFFIX_DEPLOY_WORKER_ON_PREM = ".OnPremise.Device"
@@ -144,6 +146,9 @@ def build_agent_args(self, user_id, api_key=None, device_id=None, os_name=None,
             device_id_suffix = FedMLAccountManager.DEVICE_ID_SUFFIX_EDGE_DEVICE
         elif role == FedMLAccountManager.ROLE_GPU_PROVIDER:
             device_id_suffix = FedMLAccountManager.DEVICE_ID_SUFFIX_GPU_PROVIDER
+        elif role == FedMLAccountManager.ROLE_GPU_MASTER_SERVER:
+            device_id_suffix = FedMLAccountManager.DEVICE_ID_SUFFIX_GPU_MASTER_SERVER
+            is_master = True
         elif role == FedMLAccountManager.ROLE_DEPLOY_MASTER_ON_PREM:
             device_id_suffix = FedMLAccountManager.DEVICE_ID_SUFFIX_DEPLOY_MASTER_ON_PREM
             is_master = True
diff --git a/python/fedml/computing/scheduler/scheduler_core/message_center.py b/python/fedml/computing/scheduler/scheduler_core/message_center.py
index 869ed6e510..b716a8a373 100755
--- a/python/fedml/computing/scheduler/scheduler_core/message_center.py
+++ b/python/fedml/computing/scheduler/scheduler_core/message_center.py
@@ -19,6 +19,7 @@
 class FedMLMessageCenter(object):
     FUNC_SETUP_MESSAGE_CENTER = "setup_message_center"
     FUNC_REBUILD_MESSAGE_CENTER = "rebuild_message_center"
+    FUNC_PROCESS_EXTRA_QUEUES = "process_extra_queues"
     ENABLE_SAVE_MESSAGE_TO_FILE = True
     PUBLISH_MESSAGE_RETRY_TIMEOUT = 60 * 1000.0
     PUBLISH_MESSAGE_RETRY_COUNT = 3
@@ -295,7 +296,10 @@ def get_listener_message_queue(self):
     def setup_listener_message_queue(self):
         self.listener_message_queue = Queue()
 
-    def start_listener(self, sender_message_queue=None, listener_message_queue=None, agent_config=None, message_center_name=None):
+    def start_listener(
+            self, sender_message_queue=None, listener_message_queue=None,
+            agent_config=None, message_center_name=None, extra_queues=None
+    ):
         if self.listener_message_center_process is not None:
             return
 
@@ -313,7 +317,7 @@ def start_listener(self, sender_message_queue=None, listener_message_queue=None,
             target=message_runner.run_listener_dispatcher, args=(
                 self.listener_message_event, self.listener_message_queue,
                 self.listener_handler_funcs, sender_message_queue,
-                message_center_name
+                message_center_name, extra_queues
             )
         )
         self.listener_message_center_process.start()
@@ -349,7 +353,7 @@ def unsubscribe_msg(self, topic):
 
     def run_listener_dispatcher(
             self, message_event, message_queue, listener_funcs, sender_message_queue,
-            message_center_name
+            message_center_name, extra_queues
     ):
         self.listener_message_event = message_event
         self.listener_message_queue = message_queue
@@ -363,6 +367,9 @@ def run_listener_dispatcher(
         else:
             methodcaller(FedMLMessageCenter.FUNC_REBUILD_MESSAGE_CENTER, sender_message_queue)(self)
 
+        if extra_queues is not None:
+            methodcaller(FedMLMessageCenter.FUNC_PROCESS_EXTRA_QUEUES, extra_queues)(self)
+
         while True:
             message_entity = None
             try:
diff --git a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_protocol_manager.py b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_protocol_manager.py
index 9bb8b7a7ec..9cc3544647 100755
--- a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_protocol_manager.py
+++ b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_protocol_manager.py
@@ -60,38 +60,51 @@ def add_protocol_handler(self):
         # self.add_message_listener(self.topic_start_train, self.callback_start_train)
         pass
 
-    def initialize(self):
+    def initialize(
+            self, communication_manager=None, sender_message_queue=None, status_center_queue=None
+    ):
         # Generate the message topics
         self.generate_topics()
 
         # Setup MQTT connection
-        self.communication_mgr = MqttManager(
-            self.agent_config["mqtt_config"]["BROKER_HOST"],
-            self.agent_config["mqtt_config"]["BROKER_PORT"],
-            self.agent_config["mqtt_config"]["MQTT_USER"],
-            self.agent_config["mqtt_config"]["MQTT_PWD"],
-            self.agent_config["mqtt_config"]["MQTT_KEEPALIVE"],
-            f"FedML_Agent_Daemon_@{self.user_name}@_@{self.current_device_id}@_@{str(uuid.uuid4())}@",
-            self.topic_last_will,
-            json.dumps({"ID": self.edge_id, "status": GeneralConstants.MSG_MLOPS_SERVER_STATUS_OFFLINE})
-        )
+        if communication_manager is None:
+            self.communication_mgr = MqttManager(
+                self.agent_config["mqtt_config"]["BROKER_HOST"],
+                self.agent_config["mqtt_config"]["BROKER_PORT"],
+                self.agent_config["mqtt_config"]["MQTT_USER"],
+                self.agent_config["mqtt_config"]["MQTT_PWD"],
+                self.agent_config["mqtt_config"]["MQTT_KEEPALIVE"],
+                f"FedML_Agent_Daemon_@{self.user_name}@_@{self.current_device_id}@_@{str(uuid.uuid4())}@",
+                self.topic_last_will,
+                json.dumps({"ID": self.edge_id, "status": GeneralConstants.MSG_MLOPS_SERVER_STATUS_OFFLINE})
+            )
+        else:
+            self.communication_mgr = communication_manager
 
         # Add the message listeners for all topics
         self.add_protocol_handler()
 
         # Start the message center to process edge related messages.
-        self.setup_message_center()
+        if sender_message_queue is None:
+            self.setup_message_center()
+        else:
+            self.rebuild_message_center(sender_message_queue)
 
         # Setup the message listener queue
         self.setup_listener_message_queue()
 
         # Start the status center to process edge related status.
-        self.start_status_listener_center()
+        if status_center_queue is None:
+            self.start_status_listener_center()
+        else:
+            self.set_status_queue(status_center_queue)
+            self.rebuild_status_center(status_center_queue)
 
         # Start the message center for listener
         self.start_listener(sender_message_queue=self.message_center.get_sender_message_queue(),
                             agent_config=self.agent_config,
-                            message_center_name=self.message_center_name)
+                            message_center_name=self.message_center_name,
+                            extra_queues=[self.get_status_queue()])
 
         # Init extra items, e.g. database, recovery, etc.
         self._init_extra_items()
@@ -99,11 +112,11 @@ def initialize(self):
         # Setup MQTT connected listener
         self.communication_mgr.add_connected_listener(self.on_agent_communication_connected)
         self.communication_mgr.add_disconnected_listener(self.on_agent_communication_disconnected)
-        self.communication_mgr.connect()
 
     def start(self):
         # Start MQTT message loop
         try:
+            self.communication_mgr.connect()
             self.communication_mgr.loop_forever()
         except Exception as e:
             if str(e) == "Restarting after upgraded...":
@@ -233,6 +246,9 @@ def rebuild_status_center(self, status_center_queue):
         self.status_reporter.edge_id = self.edge_id
         self.status_reporter.server_agent_id = self.server_agent_id
 
+    def process_extra_queues(self, extra_queues):
+        pass
+
     def generate_status_report(self, run_id, edge_id, server_agent_id=None):
         status_reporter = MLOpsMetrics()
         status_reporter.set_messenger(self, send_message_func=self.send_status_message)
@@ -268,6 +284,15 @@ def get_status_runner(self):
 
         return None
 
+    def get_get_protocol_communication_manager(self):
+        return self.communication_mgr
+
+    def get_protocol_sender_message_queue(self):
+        return self.message_center.get_sender_message_queue()
+
+    def get_get_protocol_status_center_queue(self):
+        return self.get_status_queue()
+
     def send_agent_active_msg(self, edge_id):
         active_msg = {"ID": edge_id, "status": GeneralConstants.MSG_MLOPS_SERVER_STATUS_IDLE}
         self.message_center.send_message_json(self.topic_active, json.dumps(active_msg))
diff --git a/python/fedml/computing/scheduler/scheduler_core/status_center.py b/python/fedml/computing/scheduler/scheduler_core/status_center.py
index 97c2115e76..53dd73c1df 100755
--- a/python/fedml/computing/scheduler/scheduler_core/status_center.py
+++ b/python/fedml/computing/scheduler/scheduler_core/status_center.py
@@ -142,6 +142,9 @@ def send_status_message(self, topic, payload):
     def get_status_queue(self):
         return self.status_queue
 
+    def set_status_queue(self, status_queue):
+        self.status_queue = status_queue
+
     def status_center_process_master_status(self, topic, payload):
         pass
 
diff --git a/python/fedml/computing/scheduler/slave/base_slave_agent.py b/python/fedml/computing/scheduler/slave/base_slave_agent.py
index 01c0a39195..a8ac9fa1cb 100755
--- a/python/fedml/computing/scheduler/slave/base_slave_agent.py
+++ b/python/fedml/computing/scheduler/slave/base_slave_agent.py
@@ -24,7 +24,8 @@ def __init__(self):
 
     def login(
             self, userid, api_key=None, device_id=None,
-            os_name=None, need_to_check_gpu=False, role=None
+            os_name=None, need_to_check_gpu=False, role=None,
+            communication_manager=None, sender_message_queue=None, status_center_queue=None
     ):
         # Preprocess the login args
         if need_to_check_gpu:
@@ -57,17 +58,21 @@ def login(
         # Initialize the protocol manager
         # noinspection PyBoardException
         try:
-            self._initialize_protocol_manager()
+            self._initialize_protocol_manager(
+                communication_manager=communication_manager,
+                sender_message_queue=sender_message_queue,
+                status_center_queue=status_center_queue)
         except Exception as e:
             FedMLAccountManager.write_login_failed_file(is_client=True)
             self.protocol_mgr.stop()
             raise e
 
+        return login_result
+
+    def start(self):
         # Start the protocol manager to process the messages from MLOps and slave agents.
         self.protocol_mgr.start()
 
-        return login_result
-
     @staticmethod
     def logout():
         GeneralConstants.cleanup_run_process(None)
@@ -84,12 +89,17 @@ def _create_protocol_manager(self, login_result):
         self.protocol_mgr.user_name = login_result.user_name
         self.protocol_mgr.agent_config = login_result.agent_config
 
-    def _initialize_protocol_manager(self):
+    def _initialize_protocol_manager(
+            self, communication_manager=None, sender_message_queue=None, status_center_queue=None
+    ):
         # Init local database
         self._init_database()
 
         # Initialize the master protocol
-        self.protocol_mgr.initialize()
+        self.protocol_mgr.initialize(
+            communication_manager=communication_manager,
+            sender_message_queue=sender_message_queue,
+            status_center_queue=status_center_queue)
 
         # Start the client API process
         self._start_slave_api()
@@ -122,6 +132,9 @@ def _start_slave_api(self):
                 should_capture_stderr=False
             )
 
+    def get_protocol_manager(self):
+        return self.protocol_mgr
+
     @abstractmethod
     def _get_log_file_dir(self):
         pass
diff --git a/python/fedml/computing/scheduler/slave/client_login.py b/python/fedml/computing/scheduler/slave/client_login.py
index 37a6dc8064..7a1c759410 100755
--- a/python/fedml/computing/scheduler/slave/client_login.py
+++ b/python/fedml/computing/scheduler/slave/client_login.py
@@ -1,11 +1,11 @@
 import argparse
 import os
 import fedml
-from fedml.computing.scheduler.slave.slave_agent import FedMLLaunchSlaveAgent
+from fedml.computing.scheduler.slave.united_agents import FedMLUnitedAgent
 
 
 def logout():
-    FedMLLaunchSlaveAgent.logout()
+    FedMLUnitedAgent.get_instance().logout()
 
 
 if __name__ == "__main__":
@@ -18,6 +18,7 @@ def logout():
     parser.add_argument("--version", "-v", type=str, default="release")
     parser.add_argument("--local_server", "-ls", type=str, default="127.0.0.1")
     parser.add_argument("--role", "-r", type=str, default="client")
+    parser.add_argument("--runner_cmd", "-rc", type=str, default="{}")
     parser.add_argument("--device_id", "-id", type=str, default="0")
     parser.add_argument("--os_name", "-os", type=str, default="")
     parser.add_argument("--api_key", "-k", type=str, default="")
@@ -30,17 +31,16 @@ def logout():
     if args.api_key == "":
         args.api_key = args.user
 
-    fedml.set_env_version("test")
-
     if args.local_on_premise_platform_host != "127.0.0.1":
         fedml.set_local_on_premise_platform_host(args.local_on_premise_platform_host)
     if args.local_on_premise_platform_port != 80:
         fedml.set_local_on_premise_platform_port(args.local_on_premise_platform_port)
 
     fedml.set_env_version(args.version)
-    slave_agent = FedMLLaunchSlaveAgent()
+    united_agents = FedMLUnitedAgent.get_instance()
     if args.type == 'login':
-        slave_agent.login(args.api_key, api_key=args.api_key, device_id=args.device_id,
-                          os_name=args.os_name, role=args.role)
+        united_agents.login(
+            args.api_key, api_key=args.api_key, device_id=args.device_id,
+            os_name=args.os_name, role=args.role, runner_cmd=args.runner_cmd)
     else:
-        FedMLLaunchSlaveAgent.logout()
+        united_agents.logout()
diff --git a/python/fedml/computing/scheduler/slave/slave_protocol_manager.py b/python/fedml/computing/scheduler/slave/slave_protocol_manager.py
index a1067a0d96..6e3cb2ebe1 100755
--- a/python/fedml/computing/scheduler/slave/slave_protocol_manager.py
+++ b/python/fedml/computing/scheduler/slave/slave_protocol_manager.py
@@ -1,7 +1,5 @@
 import copy
-import json
 import os
-import fedml
 from ..comm_utils.job_cleanup import JobCleanup
 from .base_slave_protocol_manager import FedMLBaseSlaveProtocolManager
 from .launch_job_runner_manager import FedMLLaunchJobRunnerManager
@@ -34,7 +32,8 @@ def _get_job_runner_manager(self):
     def _process_connection_ready(self):
         from fedml.core.mlops import sync_deploy_id
         sync_deploy_id(
-            self.edge_id, self.model_device_server.edge_id, self.model_device_client_edge_id_list)
+            self.edge_id, self.model_device_server_id, self.model_device_client_edge_id_list,
+            message_center=self.message_center)
 
     # Override
     def _process_connection_lost(self):
@@ -73,9 +72,8 @@ def _init_extra_items(self):
                     model_device_client.redis_port = infer_redis_port
                 if infer_redis_password is not None:
                     model_device_client.redis_password = infer_redis_password
-                model_device_client.start()
                 self.model_device_client_list.append(model_device_client)
-                self.model_device_client_edge_id_list.append(model_device_client.get_edge_id())
+                self.model_device_client_edge_id_list.append(model_device_client.bind_device())
 
         self.args = copy.deepcopy(in_args)
         if self.model_device_server is None:
@@ -91,8 +89,7 @@ def _init_extra_items(self):
             if infer_redis_password is not None:
                 self.model_device_server.redis_password = infer_redis_password
 
-            self.model_device_server.start()
-            self.model_device_server_id = self.model_device_server.get_edge_id()
+            self.model_device_server_id = self.model_device_server.bind_device()
 
         # Save the deployed master and worker id list to the environment variable.
         os.environ["FEDML_DEPLOY_MASTER_ID"] = str(self.model_device_server_id)
@@ -102,4 +99,4 @@ def _init_extra_items(self):
         self.args = copy.deepcopy(in_args)
         self.mlops_metrics.stop_device_realtime_perf()
         self.mlops_metrics.report_device_realtime_perf(self.args, self.args.agent_config["mqtt_config"])
-        pass
\ No newline at end of file
+        pass
diff --git a/python/fedml/computing/scheduler/slave/united_agents.py b/python/fedml/computing/scheduler/slave/united_agents.py
new file mode 100755
index 0000000000..e365de8860
--- /dev/null
+++ b/python/fedml/computing/scheduler/slave/united_agents.py
@@ -0,0 +1,67 @@
+
+from fedml.computing.scheduler.scheduler_core.account_manager import FedMLAccountManager
+from fedml.computing.scheduler.slave.slave_agent import FedMLLaunchSlaveAgent
+from fedml.computing.scheduler.master.master_agent import FedMLLaunchMasterAgent
+from fedml.computing.scheduler.model_scheduler.model_device_server import FedMLDeployMasterAgent
+from fedml.computing.scheduler.model_scheduler.model_device_client import FedMLDeployWorkerAgent
+from fedml.core.common.singleton import Singleton
+
+
+class FedMLUnitedAgent(Singleton):
+
+    @staticmethod
+    def get_instance():
+        return FedMLUnitedAgent()
+
+    def logout(self):
+        FedMLLaunchSlaveAgent.logout()
+
+    def login(self, userid, api_key=None, device_id=None,
+              os_name=None, need_to_check_gpu=False, role=None, runner_cmd=None):
+        # Create the launch master/slave and deploy master/slave agents.
+        launch_slave_agent = FedMLLaunchSlaveAgent()
+        launch_master_agent = FedMLLaunchMasterAgent()
+        deploy_slave_agent = FedMLDeployWorkerAgent()
+        deploy_master_agent = FedMLDeployMasterAgent()
+
+        # Login with the launch slave role
+        launch_slave_agent.login(
+            api_key, api_key=api_key, device_id=device_id,
+            os_name=os_name, role=role
+        )
+
+        # Get the communication manager, sender message queue and status center queue
+        shared_communication_mgr = launch_slave_agent.get_protocol_manager().get_get_protocol_communication_manager()
+        shared_sender_message_queue = launch_slave_agent.get_protocol_manager().get_protocol_sender_message_queue()
+        shared_status_center_queue = launch_slave_agent.get_protocol_manager().get_get_protocol_status_center_queue()
+
+        # Login with the launch master role based on the shared communication manager
+        launch_master_agent.login(
+            api_key, api_key=api_key, device_id=device_id,
+            os_name=os_name, runner_cmd=runner_cmd,
+            role=FedMLAccountManager.ROLE_GPU_MASTER_SERVER,
+            communication_manager=shared_communication_mgr,
+            sender_message_queue=shared_sender_message_queue,
+            status_center_queue=shared_status_center_queue
+        )
+
+        # Login with the deployment master role based on the shared communication manager
+        deploy_master_agent.login(
+            userid, api_key=api_key, device_id=device_id,
+            os_name=os_name, role=FedMLAccountManager.ROLE_DEPLOY_MASTER_ON_PREM,
+            communication_manager=shared_communication_mgr,
+            sender_message_queue=shared_sender_message_queue,
+            status_center_queue=shared_status_center_queue
+        )
+
+        # Login with the deployment slave role based on the shared communication manager
+        deploy_slave_agent.login(
+            userid, api_key=api_key, device_id=device_id,
+            os_name=os_name, role=FedMLAccountManager.ROLE_DEPLOY_WORKER_ON_PREM,
+            communication_manager=shared_communication_mgr,
+            sender_message_queue=shared_sender_message_queue,
+            status_center_queue=shared_status_center_queue
+        )
+
+        # Start the slave agent to connect to servers and loop forever.
+        launch_slave_agent.start()
diff --git a/python/fedml/core/mlops/__init__.py b/python/fedml/core/mlops/__init__.py
index 148427fe1f..121c8e26bb 100644
--- a/python/fedml/core/mlops/__init__.py
+++ b/python/fedml/core/mlops/__init__.py
@@ -1453,12 +1453,14 @@ def release_resources(run_id, device_id):
         MLOpsConstants.MSG_TOPIC_LAUNCH_RELEASE_GPU_IDS, json.dumps(payload))
 
 
-def sync_deploy_id(device_id, master_deploy_id, worker_deploy_id_list):
-    fedml_args = get_fedml_args()
-
-    setup_log_mqtt_mgr()
-
+def sync_deploy_id(device_id, master_deploy_id, worker_deploy_id_list, message_center=None):
     payload = {"device_id": device_id, "master_deploy_id": master_deploy_id, "worker_deploy_ids": worker_deploy_id_list}
-    MLOpsStore.mlops_log_mqtt_mgr.send_message_json(
-        MLOpsConstants.MSG_TOPIC_LAUNCH_SYNC_DEPLOY_IDS, json.dumps(payload))
+    if message_center is None:
+        fedml_args = get_fedml_args()
+        setup_log_mqtt_mgr()
+        MLOpsStore.mlops_log_mqtt_mgr.send_message_json(
+            MLOpsConstants.MSG_TOPIC_LAUNCH_SYNC_DEPLOY_IDS, json.dumps(payload))
+    else:
+        message_center.send_message( MLOpsConstants.MSG_TOPIC_LAUNCH_SYNC_DEPLOY_IDS, json.dumps(payload))
+
 

From edd148e42a8c2afdf194907cb7bd64804709c9ad Mon Sep 17 00:00:00 2001
From: Alex <alex.gpt.llm@gmail.com>
Date: Wed, 12 Jun 2024 02:45:48 +0800
Subject: [PATCH 137/282] [CoreEngine] Use the fork process on the MacOS and
 linux to avoid the crash issue.

---
 .../master/base_master_job_runner.py          |  2 +-
 .../scheduler_core/message_center.py          |  5 +++--
 .../scheduler_base_job_runner.py              |  4 +++-
 .../scheduler/scheduler_core/status_center.py |  4 +++-
 .../scheduler/slave/base_slave_job_runner.py  |  3 ++-
 python/fedml/core/mlops/mlops_device_perfs.py | 19 ++++++++++---------
 python/fedml/core/mlops/mlops_job_perfs.py    |  5 +++--
 .../core/mlops/mlops_runtime_log_daemon.py    |  4 ++--
 8 files changed, 27 insertions(+), 19 deletions(-)

diff --git a/python/fedml/computing/scheduler/master/base_master_job_runner.py b/python/fedml/computing/scheduler/master/base_master_job_runner.py
index 9ebab258bb..1383c9058c 100755
--- a/python/fedml/computing/scheduler/master/base_master_job_runner.py
+++ b/python/fedml/computing/scheduler/master/base_master_job_runner.py
@@ -425,7 +425,7 @@ def start_runner_process(
         server_runner.edge_id_status_queue = self.run_edge_id_status_queue
         server_runner.edge_device_info_queue = self.run_edge_device_info_queue
         self.run_extend_queue_list = self._generate_extend_queue_list()
-        self.run_process = Process(
+        self.run_process = fedml.get_multiprocessing_context().Process(
             target=server_runner.run if not is_server_job else server_runner.run_server_job, args=(
                 self.run_process_event, self.run_process_completed_event, self.run_edge_id_status_queue,
                 self.run_edge_device_info_queue, self.run_metrics_queue, self.run_events_queue,
diff --git a/python/fedml/computing/scheduler/scheduler_core/message_center.py b/python/fedml/computing/scheduler/scheduler_core/message_center.py
index f698d61816..74510c6939 100755
--- a/python/fedml/computing/scheduler/scheduler_core/message_center.py
+++ b/python/fedml/computing/scheduler/scheduler_core/message_center.py
@@ -10,6 +10,7 @@
 import queue
 from os.path import expanduser
 
+import fedml
 from fedml.core.distributed.communication.mqtt.mqtt_manager import MqttManager
 from ..slave.client_constants import ClientConstants
 from ....core.mlops.mlops_metrics import MLOpsMetrics
@@ -137,7 +138,7 @@ def start_sender(self, message_center_name=None):
         self.message_event.clear()
         message_center = FedMLMessageCenter(agent_config=self.sender_agent_config,
                                             sender_message_queue=self.sender_message_queue)
-        self.message_center_process = Process(
+        self.message_center_process = fedml.get_multiprocessing_context().Process(
             target=message_center.run_sender, args=(
                 self.message_event, self.sender_message_queue,
                 message_center_name
@@ -314,7 +315,7 @@ def start_listener(
         self.listener_agent_config = agent_config
         message_runner = self.get_message_runner()
         message_runner.listener_agent_config = agent_config
-        self.listener_message_center_process = Process(
+        self.listener_message_center_process = fedml.get_multiprocessing_context().Process(
             target=message_runner.run_listener_dispatcher, args=(
                 self.listener_message_event, self.listener_message_queue,
                 self.listener_handler_funcs, sender_message_queue,
diff --git a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py
index 6e0010f556..44562c65f0 100755
--- a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py
+++ b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py
@@ -9,6 +9,8 @@
 import traceback
 import zipfile
 import queue
+
+import fedml
 from ..comm_utils.constants import SchedulerConstants
 from ..comm_utils.job_utils import JobRunnerUtils, DockerArgs
 from ..scheduler_entry.constants import Constants
@@ -209,7 +211,7 @@ def retrieve_and_unzip_package(self, package_name, package_url):
         from multiprocessing import Process
         completed_event = multiprocessing.Event()
         info_queue = multiprocessing.Queue()
-        download_process = Process(target=self.download_package_proc,
+        download_process = fedml.get_multiprocessing_context().Process(target=self.download_package_proc,
                                    args=(package_url, local_package_file, completed_event, info_queue))
         download_process.start()
         allowed_block_download_time = 60
diff --git a/python/fedml/computing/scheduler/scheduler_core/status_center.py b/python/fedml/computing/scheduler/scheduler_core/status_center.py
index 53dd73c1df..0897f94c19 100755
--- a/python/fedml/computing/scheduler/scheduler_core/status_center.py
+++ b/python/fedml/computing/scheduler/scheduler_core/status_center.py
@@ -5,6 +5,8 @@
 import multiprocessing
 from multiprocessing import Process, Queue
 import queue
+
+import fedml
 from .message_common import FedMLMessageEntity, FedMLStatusEntity
 from .message_center import FedMLMessageCenter
 import traceback
@@ -114,7 +116,7 @@ def start_status_center(self, sender_message_center_queue=None,
         self.status_runner = self.get_status_runner()
         target_func = self.status_runner.run_status_dispatcher if not is_slave_agent else \
             self.status_runner.run_status_dispatcher_in_slave
-        self.status_center_process = Process(
+        self.status_center_process = fedml.get_multiprocessing_context().Process(
             target=target_func, args=(
                 self.status_event, self.status_queue, self.status_sender_message_center_queue,
                 self.status_listener_message_center_queue
diff --git a/python/fedml/computing/scheduler/slave/base_slave_job_runner.py b/python/fedml/computing/scheduler/slave/base_slave_job_runner.py
index 5e530dbba7..a495a17dd0 100755
--- a/python/fedml/computing/scheduler/slave/base_slave_job_runner.py
+++ b/python/fedml/computing/scheduler/slave/base_slave_job_runner.py
@@ -7,6 +7,7 @@
 import traceback
 from abc import ABC, abstractmethod
 
+import fedml
 from ....core.mlops.mlops_runtime_log import MLOpsRuntimeLog
 from ....core.mlops.mlops_runtime_log_daemon import MLOpsRuntimeLogDaemon
 from .client_data_interface import FedMLClientDataInterface
@@ -259,7 +260,7 @@ def start_runner_process(
         client_runner.server_id = request_json.get("server_id", "0")
         self.run_extend_queue_list = self._generate_extend_queue_list()
         logging.info("start the runner process.")
-        self.run_process = Process(target=client_runner.run, args=(
+        self.run_process = fedml.get_multiprocessing_context().Process(target=client_runner.run, args=(
             self.run_process_event, self.run_process_completed_event, self.run_extend_queue_list,
             sender_message_queue, listener_message_queue, status_center_queue
         ))
diff --git a/python/fedml/core/mlops/mlops_device_perfs.py b/python/fedml/core/mlops/mlops_device_perfs.py
index 29183a6e78..13fca2e354 100644
--- a/python/fedml/core/mlops/mlops_device_perfs.py
+++ b/python/fedml/core/mlops/mlops_device_perfs.py
@@ -9,6 +9,7 @@
 import multiprocessing
 import psutil
 
+import fedml
 from fedml.computing.scheduler.comm_utils import sys_utils
 from .device_info_report_protocol import FedMLDeviceInfoReportProtocol
 from .mlops_utils import MLOpsUtils
@@ -76,52 +77,52 @@ def setup_realtime_stats_process(self, sys_args):
         self.device_realtime_stats_event.clear()
         perf_stats.device_realtime_stats_event = self.device_realtime_stats_event
 
-        self.device_realtime_stats_process = multiprocessing.Process(
+        self.device_realtime_stats_process = fedml.get_multiprocessing_context().Process(
             target=perf_stats.report_device_realtime_stats_entry,
             args=(self.device_realtime_stats_event, ROLE_DEVICE_INFO_REPORTER, self.is_client))
         self.device_realtime_stats_process.start()
 
         if self.enable_job_total_monitor:
-            self.job_total_monitor_process = multiprocessing.Process(
+            self.job_total_monitor_process = fedml.get_multiprocessing_context().Process(
                 target=perf_stats.report_device_realtime_stats_entry,
                 args=(self.device_realtime_stats_event, ROLE_DEVICE_JOB_TOTAL_MONITOR, self.is_client))
             self.job_total_monitor_process.start()
         else:
             if self.is_client:
-                self.monitor_endpoint_master_process = multiprocessing.Process(
+                self.monitor_endpoint_master_process = fedml.get_multiprocessing_context().Process(
                     target=perf_stats.report_device_realtime_stats_entry,
                     args=(self.device_realtime_stats_event, ROLE_ENDPOINT_MASTER))
                 self.monitor_endpoint_master_process.start()
 
-                self.monitor_run_slave_process = multiprocessing.Process(
+                self.monitor_run_slave_process = fedml.get_multiprocessing_context().Process(
                     target=perf_stats.report_device_realtime_stats_entry,
                     args=(self.device_realtime_stats_event, ROLE_RUN_SLAVE))
                 self.monitor_run_slave_process.start()
 
-                self.monitor_endpoint_logs_process = multiprocessing.Process(
+                self.monitor_endpoint_logs_process = fedml.get_multiprocessing_context().Process(
                     target=perf_stats.report_device_realtime_stats_entry,
                     args=(self.device_realtime_stats_event, ROLE_ENDPOINT_LOGS))
                 self.monitor_endpoint_logs_process.start()
 
                 # Register auto-scaler process
-                self.monitor_auto_scaler_process = multiprocessing.Process(
+                self.monitor_auto_scaler_process = fedml.get_multiprocessing_context().Process(
                     target=perf_stats.report_device_realtime_stats_entry,
                     args=(self.device_realtime_stats_event, ROLE_AUTO_SCALER))
                 self.monitor_auto_scaler_process.start()
 
                 # Register replica number report channel
-                self.monitor_replica_num_process = multiprocessing.Process(
+                self.monitor_replica_num_process = fedml.get_multiprocessing_context().Process(
                     target=perf_stats.report_device_realtime_stats_entry,
                     args=(self.device_realtime_stats_event, ROLE_ENDPOINT_REPLICA_NUM))
                 self.monitor_replica_num_process.start()
 
                 # Register replica performance report channel
-                self.monitor_replica_perf_process = multiprocessing.Process(
+                self.monitor_replica_perf_process = fedml.get_multiprocessing_context().Process(
                     target=perf_stats.report_device_realtime_stats_entry,
                     args=(self.device_realtime_stats_event, ROLE_ENDPOINT_REPLICA_PERF))
                 self.monitor_replica_perf_process.start()
             else:
-                self.monitor_run_master_process = multiprocessing.Process(
+                self.monitor_run_master_process = fedml.get_multiprocessing_context().Process(
                     target=perf_stats.report_device_realtime_stats_entry,
                     args=(self.device_realtime_stats_event, ROLE_RUN_MASTER))
                 self.monitor_run_master_process.start()
diff --git a/python/fedml/core/mlops/mlops_job_perfs.py b/python/fedml/core/mlops/mlops_job_perfs.py
index fe3d921558..b1f674a5c9 100644
--- a/python/fedml/core/mlops/mlops_job_perfs.py
+++ b/python/fedml/core/mlops/mlops_job_perfs.py
@@ -8,6 +8,7 @@
 import multiprocess as multiprocessing
 import psutil
 
+import fedml
 from .mlops_utils import MLOpsUtils
 from .system_stats import SysStats
 from ...core.distributed.communication.mqtt.mqtt_manager import MqttManager
@@ -139,8 +140,8 @@ def setup_job_stats_process(self, sys_args):
         perf_stats.job_stats_event = self.job_stats_event
         perf_stats.job_process_id_map = self.job_process_id_map
 
-        self.job_stats_process = multiprocessing.Process(target=perf_stats.report_job_stats_entry,
-                                                         args=(self.job_stats_event,))
+        self.job_stats_process = fedml.get_multiprocessing_context().Process(
+            target=perf_stats.report_job_stats_entry, args=(self.job_stats_event,))
         self.job_stats_process.start()
 
     def report_job_stats(self, sys_args):
diff --git a/python/fedml/core/mlops/mlops_runtime_log_daemon.py b/python/fedml/core/mlops/mlops_runtime_log_daemon.py
index ff06dc91b3..88d49b7a22 100644
--- a/python/fedml/core/mlops/mlops_runtime_log_daemon.py
+++ b/python/fedml/core/mlops/mlops_runtime_log_daemon.py
@@ -431,8 +431,8 @@ def start_log_processor(self, log_run_id, log_device_id, log_source=None, log_fi
             self.log_process_event_map[event_map_id] = multiprocessing.Event()
         self.log_process_event_map[event_map_id].clear()
         log_processor.log_process_event = self.log_process_event_map[event_map_id]
-        log_child_process = multiprocessing.Process(target=log_processor.log_process,
-                                                    args=(self.log_process_event_map[event_map_id],))
+        log_child_process = fedml.get_multiprocessing_context().Process(
+            target=log_processor.log_process, args=(self.log_process_event_map[event_map_id],))
         # process = threading.Thread(target=log_processor.log_process)
         # process.start()
         if log_child_process is not None:

From fd5af7ec6ff09d2c7e1d4051a33758353f17e987 Mon Sep 17 00:00:00 2001
From: Alex <alex.gpt.llm@gmail.com>
Date: Wed, 12 Jun 2024 02:47:53 +0800
Subject: [PATCH 138/282] [CoreEngine] Use the fork process on the MacOS and
 linux to avoid the crash issue.

---
 python/fedml/__init__.py | 11 +++++++++--
 1 file changed, 9 insertions(+), 2 deletions(-)

diff --git a/python/fedml/__init__.py b/python/fedml/__init__.py
index c96d65adc5..e5ba1b304b 100644
--- a/python/fedml/__init__.py
+++ b/python/fedml/__init__.py
@@ -452,14 +452,21 @@ def _init_multiprocessing():
     """
     if platform.system() == "Windows":
         if multiprocessing.get_start_method() != "spawn":
-            # force all platforms (Windows/Linux/macOS) to use the same way (spawn) for multiprocessing
+            # force all platforms (Windows) to use the same way (spawn) for multiprocessing
             multiprocessing.set_start_method("spawn", force=True)
     else:
         if multiprocessing.get_start_method() != "fork":
-            # force all platforms (Windows/Linux/macOS) to use the same way (fork) for multiprocessing
+            # force all platforms (Linux/macOS) to use the same way (fork) for multiprocessing
             multiprocessing.set_start_method("fork", force=True)
 
 
+def get_multiprocessing_context():
+    if platform.system() == "Windows":
+        return multiprocessing.get_context("spawn")
+    else:
+        return multiprocessing.get_context("fork")
+
+
 def set_env_version(version):
     set_env_kv("FEDML_ENV_VERSION", version)
     load_env()

From 4a9622c439f4368a4111490aef8722145825c659 Mon Sep 17 00:00:00 2001
From: fedml-dimitris <dimitris@fedml.ai>
Date: Tue, 11 Jun 2024 15:53:08 -0400
Subject: [PATCH 139/282] Adding default http connectivity type constant.
 Fixing minor typos and reducing condition checks.

---
 .../scheduler/comm_utils/network_util.py      | 16 +++++++++-------
 .../device_client_constants.py                |  1 +
 .../model_scheduler/device_model_cache.py     | 10 +++++++---
 .../model_scheduler/device_model_inference.py | 19 ++++++++++---------
 .../model_scheduler/worker_job_runner.py      |  4 ++--
 5 files changed, 29 insertions(+), 21 deletions(-)

diff --git a/python/fedml/computing/scheduler/comm_utils/network_util.py b/python/fedml/computing/scheduler/comm_utils/network_util.py
index 13674840c5..48e478f23f 100644
--- a/python/fedml/computing/scheduler/comm_utils/network_util.py
+++ b/python/fedml/computing/scheduler/comm_utils/network_util.py
@@ -6,11 +6,13 @@ def return_this_device_connectivity_type() -> str:
     """
     Return -> "http" | "http_proxy" |"mqtt"
     """
-    if os.environ.get(ClientConstants.ENV_CONNECTION_TYPE_KEY) == ClientConstants.WORKER_CONNECTIVITY_TYPE_HTTP:
-        return ClientConstants.WORKER_CONNECTIVITY_TYPE_HTTP
-    elif os.environ.get(ClientConstants.ENV_CONNECTION_TYPE_KEY) == ClientConstants.WORKER_CONNECTIVITY_TYPE_HTTP_PROXY:
-        return ClientConstants.WORKER_CONNECTIVITY_TYPE_HTTP_PROXY
-    elif os.environ.get(ClientConstants.ENV_CONNECTION_TYPE_KEY) == ClientConstants.WORKER_CONNECTIVITY_TYPE_MQTT:
-        return ClientConstants.WORKER_CONNECTIVITY_TYPE_MQTT
+    # Get the environmental variable's value and convert to lower case.
+    env_conn_type = os.getenv(ClientConstants.ENV_CONNECTION_TYPE_KEY, "").lower()
+    if env_conn_type in [
+        ClientConstants.WORKER_CONNECTIVITY_TYPE_HTTP,
+        ClientConstants.WORKER_CONNECTIVITY_TYPE_HTTP_PROXY,
+        ClientConstants.WORKER_CONNECTIVITY_TYPE_MQTT
+    ]:
+        return env_conn_type
     else:
-        return ClientConstants.WORKER_CONNECTIVITY_TYPE_HTTP
+        return ClientConstants.WORKER_CONNECTIVITY_TYPE_DEFAULT
diff --git a/python/fedml/computing/scheduler/model_scheduler/device_client_constants.py b/python/fedml/computing/scheduler/model_scheduler/device_client_constants.py
index d66c2f966a..2c06189d2e 100644
--- a/python/fedml/computing/scheduler/model_scheduler/device_client_constants.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_client_constants.py
@@ -101,6 +101,7 @@ class ClientConstants(object):
     WORKER_CONNECTIVITY_TYPE_HTTP = "http"
     WORKER_CONNECTIVITY_TYPE_HTTP_PROXY = "http_proxy"
     WORKER_CONNECTIVITY_TYPE_MQTT = "mqtt"
+    WORKER_CONNECTIVITY_TYPE_DEFAULT = WORKER_CONNECTIVITY_TYPE_HTTP
 
     MSG_MODELOPS_DEPLOYMENT_STATUS_INITIALIZING = "INITIALIZING"
     MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYING = "DEPLOYING"
diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_cache.py b/python/fedml/computing/scheduler/model_scheduler/device_model_cache.py
index 30e4f460e6..6c90944277 100755
--- a/python/fedml/computing/scheduler/model_scheduler/device_model_cache.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_model_cache.py
@@ -344,9 +344,13 @@ def get_result_item_info(self, result_item):
             result_payload = result_item_json["result"]
         return device_id, replica_no, result_payload
 
-    def get_idle_device(self, end_point_id, end_point_name,
-                        model_name, model_version,
-                        check_end_point_status=True, limit_specific_model_version=False):
+    def get_idle_device(self,
+                        end_point_id,
+                        end_point_name,
+                        model_name,
+                        model_version,
+                        check_end_point_status=True,
+                        limit_specific_model_version=False):
         # Deprecated the model status logic, query directly from the deployment result list
         idle_device_list = list()
 
diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py b/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py
index a9205ceb9a..3aeec67932 100755
--- a/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py
@@ -313,16 +313,17 @@ def found_idle_inference_device(end_point_id, end_point_name, in_model_name, in_
     # Found idle device (TODO: optimize the algorithm to search best device for inference)
     payload, idle_device = FEDML_MODEL_CACHE. \
         get_idle_device(end_point_id, end_point_name, in_model_name, in_model_version)
-    if payload is not None:
-        deployment_result = payload
-        model_name = deployment_result["model_name"]
-        model_version = deployment_result["model_version"]
-        model_id = deployment_result["model_id"]
-        end_point_id = deployment_result["end_point_id"]
-        inference_output_url = deployment_result["model_url"]
+    if payload:
+        model_name = payload["model_name"]
+        model_version = payload["model_version"]
+        model_id = payload["model_id"]
+        end_point_id = payload["end_point_id"]
+        inference_output_url = payload["model_url"]
+        connectivity_type = \
+            payload.get("connectivity_type",
+                        ClientConstants.WORKER_CONNECTIVITY_TYPE_DEFAULT)
         url_parsed = urlparse(inference_output_url)
         inference_host = url_parsed.hostname
-        connectivity_type = deployment_result.get("connectivity_type", ClientConstants.WORKER_CONNECTIVITY_TYPE_HTTP)
     else:
         logging.info("not found idle deployment result")
 
@@ -335,7 +336,7 @@ def found_idle_inference_device(end_point_id, end_point_name, in_model_name, in_
 
 async def send_inference_request(idle_device, end_point_id, inference_url, input_list, output_list,
                                  inference_type="default",
-                                 connectivity_type=ClientConstants.WORKER_CONNECTIVITY_TYPE_HTTP):
+                                 connectivity_type=ClientConstants.WORKER_CONNECTIVITY_TYPE_DEFAULT):
     request_timeout_sec = FEDML_MODEL_CACHE.get_endpoint_settings(end_point_id) \
         .get("request_timeout_sec", ClientConstants.INFERENCE_REQUEST_TIMEOUT)
 
diff --git a/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py b/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py
index 9e178228b2..ef65e37904 100755
--- a/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py
+++ b/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py
@@ -446,7 +446,7 @@ def construct_deployment_results(self, end_point_name, device_id, model_status,
                                      model_id, model_name, model_inference_url,
                                      model_version, inference_port, inference_engine,
                                      model_metadata, model_config, replica_no=1,
-                                     connectivity=ClientConstants.WORKER_CONNECTIVITY_TYPE_HTTP):
+                                     connectivity=ClientConstants.WORKER_CONNECTIVITY_TYPE_DEFAULT):
         deployment_results_payload = {"end_point_id": self.run_id, "end_point_name": end_point_name,
                                       "model_id": model_id, "model_name": model_name,
                                       "model_url": model_inference_url, "model_version": model_version,
@@ -481,7 +481,7 @@ def send_deployment_results(self, end_point_name, device_id, model_status,
                                 model_id, model_name, model_inference_url,
                                 model_version, inference_port, inference_engine,
                                 model_metadata, model_config, replica_no=1,
-                                connectivity=ClientConstants.WORKER_CONNECTIVITY_TYPE_HTTP):
+                                connectivity=ClientConstants.WORKER_CONNECTIVITY_TYPE_DEFAULT):
         deployment_results_topic = "model_device/model_device/return_deployment_result/{}/{}".format(
             self.run_id, device_id)
 

From 653fe660df5a79db4375b4f7311290fc32d9cdac Mon Sep 17 00:00:00 2001
From: Alex <alex.gpt.llm@gmail.com>
Date: Wed, 12 Jun 2024 05:05:31 +0800
Subject: [PATCH 140/282] [CoreEngine] make the multiprocess work on windows,
 linux and mac.

---
 python/fedml/__init__.py                      |   7 ++
 .../master/base_master_job_runner.py          |  45 ++++----
 .../model_scheduler/master_job_runner.py      |   3 +-
 .../scheduler_core/message_center.py          |  47 +++++---
 .../scheduler_base_job_runner.py              |  12 ++-
 .../scheduler/scheduler_core/status_center.py |  22 ++--
 .../scheduler/slave/base_slave_job_runner.py  |  16 ++-
 python/fedml/core/mlops/mlops_device_perfs.py | 100 +++++++++++++-----
 python/fedml/core/mlops/mlops_job_perfs.py    |  10 +-
 .../core/mlops/mlops_runtime_log_daemon.py    |   9 +-
 10 files changed, 190 insertions(+), 81 deletions(-)

diff --git a/python/fedml/__init__.py b/python/fedml/__init__.py
index e5ba1b304b..bf07838e56 100644
--- a/python/fedml/__init__.py
+++ b/python/fedml/__init__.py
@@ -467,6 +467,13 @@ def get_multiprocessing_context():
         return multiprocessing.get_context("fork")
 
 
+def get_process(target=None, args=None):
+    if platform.system() == "Windows":
+        return multiprocessing.Process(target=target, args=args)
+    else:
+        return multiprocessing.get_context("fork").Process(target=target, args=args)
+
+
 def set_env_version(version):
     set_env_kv("FEDML_ENV_VERSION", version)
     load_env()
diff --git a/python/fedml/computing/scheduler/master/base_master_job_runner.py b/python/fedml/computing/scheduler/master/base_master_job_runner.py
index 1383c9058c..fe2d426af4 100755
--- a/python/fedml/computing/scheduler/master/base_master_job_runner.py
+++ b/python/fedml/computing/scheduler/master/base_master_job_runner.py
@@ -19,7 +19,6 @@
 from fedml.utils.debugging import debug
 from ..scheduler_core.status_center import JobStatus
 from ..scheduler_core.compute_cache_manager import ComputeCacheManager
-from multiprocessing import Process, Queue
 from ..scheduler_core.general_constants import GeneralConstants
 from ..scheduler_core.scheduler_base_job_runner import FedMLSchedulerBaseJobRunner, RunnerError, RunnerCompletedError
 from abc import ABC, abstractmethod
@@ -43,13 +42,13 @@ def __init__(self, args, run_id=0, request_json=None, agent_config=None, edge_id
             is_master_runner=True
         )
 
-        self.run_edge_id_status_queue = Queue()
-        self.run_metrics_queue = Queue()
-        self.run_events_queue = Queue()
-        self.run_artifacts_queue = Queue()
-        self.run_logs_queue = Queue()
-        self.run_edge_device_info_queue = Queue()
-        self.run_edge_device_info_global_queue = Queue()
+        self.run_edge_id_status_queue = multiprocessing.Manager().Queue(-1)
+        self.run_metrics_queue = multiprocessing.Manager().Queue(-1)
+        self.run_events_queue = multiprocessing.Manager().Queue(-1)
+        self.run_artifacts_queue = multiprocessing.Manager().Queue(-1)
+        self.run_logs_queue = multiprocessing.Manager().Queue(-1)
+        self.run_edge_device_info_queue = multiprocessing.Manager().Queue(-1)
+        self.run_edge_device_info_global_queue = multiprocessing.Manager().Queue(-1)
         self.run_extend_queue_list = None
         self.async_check_timeout = 0
         self.enable_async_cluster = False
@@ -425,14 +424,24 @@ def start_runner_process(
         server_runner.edge_id_status_queue = self.run_edge_id_status_queue
         server_runner.edge_device_info_queue = self.run_edge_device_info_queue
         self.run_extend_queue_list = self._generate_extend_queue_list()
-        self.run_process = fedml.get_multiprocessing_context().Process(
-            target=server_runner.run if not is_server_job else server_runner.run_server_job, args=(
-                self.run_process_event, self.run_process_completed_event, self.run_edge_id_status_queue,
-                self.run_edge_device_info_queue, self.run_metrics_queue, self.run_events_queue,
-                self.run_artifacts_queue, self.run_logs_queue, self.run_edge_device_info_global_queue,
-                self.run_extend_queue_list, sender_message_queue, listener_message_queue,  status_center_queue
+        if platform.system() == "Windows":
+            self.run_process = multiprocessing.Process(
+                target=server_runner.run if not is_server_job else server_runner.run_server_job, args=(
+                    self.run_process_event, self.run_process_completed_event, self.run_edge_id_status_queue,
+                    self.run_edge_device_info_queue, self.run_metrics_queue, self.run_events_queue,
+                    self.run_artifacts_queue, self.run_logs_queue, self.run_edge_device_info_global_queue,
+                    self.run_extend_queue_list, sender_message_queue, listener_message_queue,  status_center_queue
+                )
+            )
+        else:
+            self.run_process = fedml.get_process(
+                target=server_runner.run if not is_server_job else server_runner.run_server_job, args=(
+                    self.run_process_event, self.run_process_completed_event, self.run_edge_id_status_queue,
+                    self.run_edge_device_info_queue, self.run_metrics_queue, self.run_events_queue,
+                    self.run_artifacts_queue, self.run_logs_queue, self.run_edge_device_info_global_queue,
+                    self.run_extend_queue_list, sender_message_queue, listener_message_queue,  status_center_queue
+                )
             )
-        )
         self.run_process.start()
         ServerConstants.save_run_process(run_id, self.run_process.pid)
         return self.run_process
@@ -444,7 +453,7 @@ def put_run_edge_device_info_to_queue(self, run_id, edge_id, device_info):
         if int(edge_id) in edge_ids or str(edge_id) in edge_ids:
             run_id_str = str(run_id)
             if self.run_edge_device_info_queue is None:
-                self.run_edge_device_info_queue = Queue()
+                self.run_edge_device_info_queue = multiprocessing.Manager().Queue(-1)
             self.run_edge_device_info_queue.put(device_info)
 
     def should_continue_run_job(self, run_id):
@@ -572,7 +581,7 @@ def callback_run_logs(self, topic, payload):
         run_id = str(topic).split('/')[-1]
         run_id_str = str(run_id)
         if self.run_logs_queue is None:
-            self.run_logs_queue = Queue()
+            self.run_logs_queue = multiprocessing.Manager().Queue(-1)
         self.run_logs_queue.put(payload)
 
     def callback_run_metrics(self, topic, payload):
@@ -580,7 +589,7 @@ def callback_run_metrics(self, topic, payload):
         run_id = str(topic).split('/')[-1]
         run_id_str = str(run_id)
         if self.run_metrics_queue is None:
-            self.run_metrics_queue = Queue()
+            self.run_metrics_queue = multiprocessing.Manager().Queue(-1)
         self.run_metrics_queue.put(payload)
 
     # def send_training_request_to_edges(self, active_edge_info_dict):
diff --git a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
index a10bd2c559..e504ded561 100755
--- a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
+++ b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
@@ -6,7 +6,6 @@
 import queue
 import traceback
 from abc import ABC
-from multiprocessing import Queue
 
 import fedml
 from fedml.core.mlops import MLOpsRuntimeLog, MLOpsConfigs
@@ -50,7 +49,7 @@ def __init__(self, args, run_id=0, request_json=None, agent_config=None, edge_id
         self.replica_controller = None
         self.deployed_replica_payload = None
         self.slave_deployment_results_map = dict()
-        self.deployment_result_queue = Queue()
+        self.deployment_result_queue = multiprocessing.Manager().Queue(-1)
         self.is_fresh_endpoint = True
 
     # Override
diff --git a/python/fedml/computing/scheduler/scheduler_core/message_center.py b/python/fedml/computing/scheduler/scheduler_core/message_center.py
index 74510c6939..2bfa3b514f 100755
--- a/python/fedml/computing/scheduler/scheduler_core/message_center.py
+++ b/python/fedml/computing/scheduler/scheduler_core/message_center.py
@@ -1,12 +1,12 @@
 import json
 import logging
 import os
+import platform
 import threading
 import time
 import traceback
 import uuid
 import multiprocessing
-from multiprocessing import Process, Queue
 import queue
 from os.path import expanduser
 
@@ -133,17 +133,25 @@ def get_sender_message_queue(self):
         return self.sender_message_queue
 
     def start_sender(self, message_center_name=None):
-        self.sender_message_queue = Queue()
+        self.sender_message_queue = multiprocessing.Manager().Queue(-1)
         self.message_event = multiprocessing.Event()
         self.message_event.clear()
         message_center = FedMLMessageCenter(agent_config=self.sender_agent_config,
                                             sender_message_queue=self.sender_message_queue)
-        self.message_center_process = fedml.get_multiprocessing_context().Process(
-            target=message_center.run_sender, args=(
-                self.message_event, self.sender_message_queue,
-                message_center_name
+        if platform.system() == "Windows":
+            self.message_center_process = multiprocessing.Process(
+                target=message_center.run_sender, args=(
+                    self.message_event, self.sender_message_queue,
+                    message_center_name
+                )
+            )
+        else:
+            self.message_center_process = fedml.get_process(
+                target=message_center.run_sender, args=(
+                    self.message_event, self.sender_message_queue,
+                    message_center_name
+                )
             )
-        )
         self.message_center_process.start()
 
     def stop(self):
@@ -296,7 +304,7 @@ def get_listener_message_queue(self):
         return self.listener_message_queue
 
     def setup_listener_message_queue(self):
-        self.listener_message_queue = Queue()
+        self.listener_message_queue = multiprocessing.Manager().Queue(-1)
 
     def start_listener(
             self, sender_message_queue=None, listener_message_queue=None,
@@ -307,7 +315,7 @@ def start_listener(
 
         if listener_message_queue is None:
             if self.listener_message_queue is None:
-                self.listener_message_queue = Queue()
+                self.listener_message_queue = multiprocessing.Manager().Queue(-1)
         else:
             self.listener_message_queue = listener_message_queue
         self.listener_message_event = multiprocessing.Event()
@@ -315,13 +323,22 @@ def start_listener(
         self.listener_agent_config = agent_config
         message_runner = self.get_message_runner()
         message_runner.listener_agent_config = agent_config
-        self.listener_message_center_process = fedml.get_multiprocessing_context().Process(
-            target=message_runner.run_listener_dispatcher, args=(
-                self.listener_message_event, self.listener_message_queue,
-                self.listener_handler_funcs, sender_message_queue,
-                message_center_name, extra_queues
+        if platform.system() == "Windows":
+            self.listener_message_center_process = multiprocessing.Process(
+                target=message_runner.run_listener_dispatcher, args=(
+                    self.listener_message_event, self.listener_message_queue,
+                    self.listener_handler_funcs, sender_message_queue,
+                    message_center_name, extra_queues
+                )
+            )
+        else:
+            self.listener_message_center_process = fedml.get_process(
+                target=message_runner.run_listener_dispatcher, args=(
+                    self.listener_message_event, self.listener_message_queue,
+                    self.listener_handler_funcs, sender_message_queue,
+                    message_center_name, extra_queues
+                )
             )
-        )
         self.listener_message_center_process.start()
 
     def check_listener_message_stop_event(self):
diff --git a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py
index 44562c65f0..80de5c5b18 100755
--- a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py
+++ b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py
@@ -210,9 +210,15 @@ def retrieve_and_unzip_package(self, package_name, package_url):
         # Open a process to download the package so that we can avoid the request is blocked and check the timeout.
         from multiprocessing import Process
         completed_event = multiprocessing.Event()
-        info_queue = multiprocessing.Queue()
-        download_process = fedml.get_multiprocessing_context().Process(target=self.download_package_proc,
-                                   args=(package_url, local_package_file, completed_event, info_queue))
+        info_queue = multiprocessing.Manager().Queue(-1)
+        if platform.system() == "Windows":
+            download_process = multiprocessing.Process(
+                target=self.download_package_proc,
+                args=(package_url, local_package_file, completed_event, info_queue))
+        else:
+            download_process = fedml.get_process(
+                target=self.download_package_proc,
+                args=(package_url, local_package_file, completed_event, info_queue))
         download_process.start()
         allowed_block_download_time = 60
         download_finished = False
diff --git a/python/fedml/computing/scheduler/scheduler_core/status_center.py b/python/fedml/computing/scheduler/scheduler_core/status_center.py
index 0897f94c19..7e0cf1f98f 100755
--- a/python/fedml/computing/scheduler/scheduler_core/status_center.py
+++ b/python/fedml/computing/scheduler/scheduler_core/status_center.py
@@ -1,9 +1,9 @@
 import logging
+import platform
 import time
 
 from enum import Enum, unique
 import multiprocessing
-from multiprocessing import Process, Queue
 import queue
 
 import fedml
@@ -108,7 +108,7 @@ def get_status_runner(self):
 
     def start_status_center(self, sender_message_center_queue=None,
                             listener_message_center_queue=None, is_slave_agent=False):
-        self.status_queue = Queue()
+        self.status_queue = multiprocessing.Manager().Queue(-1)
         self.status_event = multiprocessing.Event()
         self.status_event.clear()
         self.status_sender_message_center_queue = sender_message_center_queue
@@ -116,12 +116,20 @@ def start_status_center(self, sender_message_center_queue=None,
         self.status_runner = self.get_status_runner()
         target_func = self.status_runner.run_status_dispatcher if not is_slave_agent else \
             self.status_runner.run_status_dispatcher_in_slave
-        self.status_center_process = fedml.get_multiprocessing_context().Process(
-            target=target_func, args=(
-                self.status_event, self.status_queue, self.status_sender_message_center_queue,
-                self.status_listener_message_center_queue
+        if platform.system() == "Windows":
+            self.status_center_process = multiprocessing.Process(
+                target=target_func, args=(
+                    self.status_event, self.status_queue, self.status_sender_message_center_queue,
+                    self.status_listener_message_center_queue
+                )
+            )
+        else:
+            self.status_center_process = fedml.get_process(
+                target=target_func, args=(
+                    self.status_event, self.status_queue, self.status_sender_message_center_queue,
+                    self.status_listener_message_center_queue
+                )
             )
-        )
 
         self.status_center_process.start()
 
diff --git a/python/fedml/computing/scheduler/slave/base_slave_job_runner.py b/python/fedml/computing/scheduler/slave/base_slave_job_runner.py
index a495a17dd0..8876fc7e39 100755
--- a/python/fedml/computing/scheduler/slave/base_slave_job_runner.py
+++ b/python/fedml/computing/scheduler/slave/base_slave_job_runner.py
@@ -260,9 +260,17 @@ def start_runner_process(
         client_runner.server_id = request_json.get("server_id", "0")
         self.run_extend_queue_list = self._generate_extend_queue_list()
         logging.info("start the runner process.")
-        self.run_process = fedml.get_multiprocessing_context().Process(target=client_runner.run, args=(
-            self.run_process_event, self.run_process_completed_event, self.run_extend_queue_list,
-            sender_message_queue, listener_message_queue, status_center_queue
-        ))
+
+        if platform.system() == "Windows":
+            self.run_process = multiprocessing.Process(
+                target=client_runner.run, args=(
+                    self.run_process_event, self.run_process_completed_event, self.run_extend_queue_list,
+                    sender_message_queue, listener_message_queue, status_center_queue
+                ))
+        else:
+            self.run_process = fedml.get_process(target=client_runner.run, args=(
+                self.run_process_event, self.run_process_completed_event, self.run_extend_queue_list,
+                sender_message_queue, listener_message_queue, status_center_queue
+            ))
         self.run_process.start()
         return self.run_process
diff --git a/python/fedml/core/mlops/mlops_device_perfs.py b/python/fedml/core/mlops/mlops_device_perfs.py
index 13fca2e354..d0f1f3898f 100644
--- a/python/fedml/core/mlops/mlops_device_perfs.py
+++ b/python/fedml/core/mlops/mlops_device_perfs.py
@@ -1,6 +1,7 @@
 import json
 import logging
 import os
+import platform
 import time
 import traceback
 import uuid
@@ -77,54 +78,99 @@ def setup_realtime_stats_process(self, sys_args):
         self.device_realtime_stats_event.clear()
         perf_stats.device_realtime_stats_event = self.device_realtime_stats_event
 
-        self.device_realtime_stats_process = fedml.get_multiprocessing_context().Process(
-            target=perf_stats.report_device_realtime_stats_entry,
-            args=(self.device_realtime_stats_event, ROLE_DEVICE_INFO_REPORTER, self.is_client))
+        if platform.system() == "Windows":
+            self.device_realtime_stats_process = multiprocessing.Process(
+                target=perf_stats.report_device_realtime_stats_entry,
+                args=(self.device_realtime_stats_event, ROLE_DEVICE_INFO_REPORTER, self.is_client))
+        else:
+            self.device_realtime_stats_process = fedml.get_process(
+                target=perf_stats.report_device_realtime_stats_entry,
+                args=(self.device_realtime_stats_event, ROLE_DEVICE_INFO_REPORTER, self.is_client))
         self.device_realtime_stats_process.start()
 
         if self.enable_job_total_monitor:
-            self.job_total_monitor_process = fedml.get_multiprocessing_context().Process(
-                target=perf_stats.report_device_realtime_stats_entry,
-                args=(self.device_realtime_stats_event, ROLE_DEVICE_JOB_TOTAL_MONITOR, self.is_client))
+            if platform.system() == "Windows":
+                self.job_total_monitor_process = multiprocessing.Process(
+                    target=perf_stats.report_device_realtime_stats_entry,
+                    args=(self.device_realtime_stats_event, ROLE_DEVICE_JOB_TOTAL_MONITOR, self.is_client))
+            else:
+                self.job_total_monitor_process = fedml.get_process(
+                    target=perf_stats.report_device_realtime_stats_entry,
+                    args=(self.device_realtime_stats_event, ROLE_DEVICE_JOB_TOTAL_MONITOR, self.is_client))
             self.job_total_monitor_process.start()
         else:
             if self.is_client:
-                self.monitor_endpoint_master_process = fedml.get_multiprocessing_context().Process(
-                    target=perf_stats.report_device_realtime_stats_entry,
-                    args=(self.device_realtime_stats_event, ROLE_ENDPOINT_MASTER))
+                if platform.system() == "Windows":
+                    self.monitor_endpoint_master_process = multiprocessing.Process(
+                        target=perf_stats.report_device_realtime_stats_entry,
+                        args=(self.device_realtime_stats_event, ROLE_ENDPOINT_MASTER))
+                else:
+                    self.monitor_endpoint_master_process = fedml.get_process(
+                        target=perf_stats.report_device_realtime_stats_entry,
+                        args=(self.device_realtime_stats_event, ROLE_ENDPOINT_MASTER))
                 self.monitor_endpoint_master_process.start()
 
-                self.monitor_run_slave_process = fedml.get_multiprocessing_context().Process(
-                    target=perf_stats.report_device_realtime_stats_entry,
-                    args=(self.device_realtime_stats_event, ROLE_RUN_SLAVE))
+                if platform.system() == "Windows":
+                    self.monitor_run_slave_process = multiprocessing.Process(
+                        target=perf_stats.report_device_realtime_stats_entry,
+                        args=(self.device_realtime_stats_event, ROLE_RUN_SLAVE))
+                else:
+                    self.monitor_run_slave_process = fedml.get_process(
+                        target=perf_stats.report_device_realtime_stats_entry,
+                        args=(self.device_realtime_stats_event, ROLE_RUN_SLAVE))
                 self.monitor_run_slave_process.start()
 
-                self.monitor_endpoint_logs_process = fedml.get_multiprocessing_context().Process(
-                    target=perf_stats.report_device_realtime_stats_entry,
-                    args=(self.device_realtime_stats_event, ROLE_ENDPOINT_LOGS))
+                if platform.system() == "Windows":
+                    self.monitor_endpoint_logs_process = multiprocessing.Process(
+                        target=perf_stats.report_device_realtime_stats_entry,
+                        args=(self.device_realtime_stats_event, ROLE_ENDPOINT_LOGS))
+                else:
+                    self.monitor_endpoint_logs_process = fedml.get_process(
+                        target=perf_stats.report_device_realtime_stats_entry,
+                        args=(self.device_realtime_stats_event, ROLE_ENDPOINT_LOGS))
                 self.monitor_endpoint_logs_process.start()
 
                 # Register auto-scaler process
-                self.monitor_auto_scaler_process = fedml.get_multiprocessing_context().Process(
-                    target=perf_stats.report_device_realtime_stats_entry,
-                    args=(self.device_realtime_stats_event, ROLE_AUTO_SCALER))
+                if platform.system() == "Windows":
+                    self.monitor_auto_scaler_process = multiprocessing.Process(
+                        target=perf_stats.report_device_realtime_stats_entry,
+                        args=(self.device_realtime_stats_event, ROLE_AUTO_SCALER))
+                else:
+                    self.monitor_auto_scaler_process = fedml.get_process(
+                        target=perf_stats.report_device_realtime_stats_entry,
+                        args=(self.device_realtime_stats_event, ROLE_AUTO_SCALER))
                 self.monitor_auto_scaler_process.start()
 
                 # Register replica number report channel
-                self.monitor_replica_num_process = fedml.get_multiprocessing_context().Process(
-                    target=perf_stats.report_device_realtime_stats_entry,
-                    args=(self.device_realtime_stats_event, ROLE_ENDPOINT_REPLICA_NUM))
+                if platform.system() == "Windows":
+                    self.monitor_replica_num_process = multiprocessing.Process(
+                        target=perf_stats.report_device_realtime_stats_entry,
+                        args=(self.device_realtime_stats_event, ROLE_ENDPOINT_REPLICA_NUM))
+                else:
+                    self.monitor_replica_num_process = fedml.get_process(
+                        target=perf_stats.report_device_realtime_stats_entry,
+                        args=(self.device_realtime_stats_event, ROLE_ENDPOINT_REPLICA_NUM))
                 self.monitor_replica_num_process.start()
 
                 # Register replica performance report channel
-                self.monitor_replica_perf_process = fedml.get_multiprocessing_context().Process(
-                    target=perf_stats.report_device_realtime_stats_entry,
-                    args=(self.device_realtime_stats_event, ROLE_ENDPOINT_REPLICA_PERF))
+                if platform.system() == "Windows":
+                    self.monitor_replica_perf_process = multiprocessing.Process(
+                        target=perf_stats.report_device_realtime_stats_entry,
+                        args=(self.device_realtime_stats_event, ROLE_ENDPOINT_REPLICA_PERF))
+                else:
+                    self.monitor_replica_perf_process = fedml.get_process(
+                        target=perf_stats.report_device_realtime_stats_entry,
+                        args=(self.device_realtime_stats_event, ROLE_ENDPOINT_REPLICA_PERF))
                 self.monitor_replica_perf_process.start()
             else:
-                self.monitor_run_master_process = fedml.get_multiprocessing_context().Process(
-                    target=perf_stats.report_device_realtime_stats_entry,
-                    args=(self.device_realtime_stats_event, ROLE_RUN_MASTER))
+                if platform.system() == "Windows":
+                    self.monitor_run_master_process = multiprocessing.Process(
+                        target=perf_stats.report_device_realtime_stats_entry,
+                        args=(self.device_realtime_stats_event, ROLE_RUN_MASTER))
+                else:
+                    self.monitor_run_master_process = fedml.get_process(
+                        target=perf_stats.report_device_realtime_stats_entry,
+                        args=(self.device_realtime_stats_event, ROLE_RUN_MASTER))
                 self.monitor_run_master_process.start()
 
     def report_device_realtime_stats_entry(self, sys_event, role, is_client=False):
diff --git a/python/fedml/core/mlops/mlops_job_perfs.py b/python/fedml/core/mlops/mlops_job_perfs.py
index b1f674a5c9..e834ed4a0c 100644
--- a/python/fedml/core/mlops/mlops_job_perfs.py
+++ b/python/fedml/core/mlops/mlops_job_perfs.py
@@ -1,6 +1,7 @@
 import json
 import logging
 import os
+import platform
 import time
 import traceback
 import uuid
@@ -139,9 +140,12 @@ def setup_job_stats_process(self, sys_args):
         self.job_stats_event.clear()
         perf_stats.job_stats_event = self.job_stats_event
         perf_stats.job_process_id_map = self.job_process_id_map
-
-        self.job_stats_process = fedml.get_multiprocessing_context().Process(
-            target=perf_stats.report_job_stats_entry, args=(self.job_stats_event,))
+        if platform.system() == "Windows":
+            self.job_stats_process = multiprocessing.Process(
+                target=perf_stats.report_job_stats_entry, args=(self.job_stats_event,))
+        else:
+            self.job_stats_process = fedml.get_process(
+                target=perf_stats.report_job_stats_entry, args=(self.job_stats_event,))
         self.job_stats_process.start()
 
     def report_job_stats(self, sys_args):
diff --git a/python/fedml/core/mlops/mlops_runtime_log_daemon.py b/python/fedml/core/mlops/mlops_runtime_log_daemon.py
index 88d49b7a22..f72d88cfea 100644
--- a/python/fedml/core/mlops/mlops_runtime_log_daemon.py
+++ b/python/fedml/core/mlops/mlops_runtime_log_daemon.py
@@ -1,6 +1,7 @@
 import argparse
 import logging
 import os
+import platform
 import shutil
 import threading
 import time
@@ -431,8 +432,12 @@ def start_log_processor(self, log_run_id, log_device_id, log_source=None, log_fi
             self.log_process_event_map[event_map_id] = multiprocessing.Event()
         self.log_process_event_map[event_map_id].clear()
         log_processor.log_process_event = self.log_process_event_map[event_map_id]
-        log_child_process = fedml.get_multiprocessing_context().Process(
-            target=log_processor.log_process, args=(self.log_process_event_map[event_map_id],))
+        if platform.system() == "Windows":
+            log_child_process = multiprocessing.Process(
+                target=log_processor.log_process, args=(self.log_process_event_map[event_map_id],))
+        else:
+            log_child_process = fedml.get_process(
+                target=log_processor.log_process, args=(self.log_process_event_map[event_map_id],))
         # process = threading.Thread(target=log_processor.log_process)
         # process.start()
         if log_child_process is not None:

From 23d88fc7dcfdbe9f9b319a08b72b39f0c58fdbb3 Mon Sep 17 00:00:00 2001
From: Raphael Jin <kimheavy@yahoo.com>
Date: Tue, 11 Jun 2024 11:48:20 -0700
Subject: [PATCH 141/282] [Deploy] Remove unnecessary logic.

---
 .../device_model_deployment.py                | 232 +-----------------
 .../model_scheduler/master_job_runner.py      |   1 -
 .../model_scheduler/worker_job_runner.py      |  16 +-
 3 files changed, 10 insertions(+), 239 deletions(-)

diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py b/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py
index 1876373d25..5d3ba9873d 100755
--- a/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py
@@ -1,12 +1,13 @@
+import fedml
+
 import logging
 import os
-import pickle
-import platform
 import shutil
 import time
 import traceback
 import yaml
 import datetime
+import docker
 
 import requests
 import torch
@@ -15,27 +16,18 @@
 
 import collections.abc
 
-import fedml
 from fedml.computing.scheduler.comm_utils import sys_utils, security_utils
-from fedml.computing.scheduler.comm_utils.container_utils import ContainerUtils
 from fedml.computing.scheduler.comm_utils.hardware_utils import HardwareUtil
 from fedml.computing.scheduler.comm_utils.job_utils import JobRunnerUtils
-
-for type_name in collections.abc.__all__:
-    setattr(collections, type_name, getattr(collections.abc, type_name))
-
 from fedml.computing.scheduler.comm_utils.constants import SchedulerConstants
 from fedml.computing.scheduler.model_scheduler.device_client_constants import ClientConstants
-import io
-
-import docker
-from ..scheduler_core.compute_cache_manager import ComputeCacheManager
+from fedml.computing.scheduler.model_scheduler.device_model_cache import FedMLModelCache
 from ..scheduler_core.compute_utils import ComputeUtils
 from ..comm_utils.container_utils import ContainerUtils
-
 from .device_http_inference_protocol import FedMLHttpInference
 
-from fedml.computing.scheduler.model_scheduler.device_model_cache import FedMLModelCache
+for type_name in collections.abc.__all__:
+    setattr(collections, type_name, getattr(collections.abc, type_name))
 
 no_real_gpu_allocation = None
 
@@ -432,8 +424,6 @@ def should_exit_logs(end_point_id, model_id, cmd_type, model_name, inference_eng
     if cmd_type == ClientConstants.CMD_TYPE_RUN_DEFAULT_SERVER:
         # TODO: Exited Quickly if the container is Exited or Removed
         # If the container has exited, return True, means we should exit the logs
-        # container_name = "{}".format(ClientConstants.FEDML_DEFAULT_SERVER_CONTAINER_NAME_PREFIX) + "__" + \
-        #                             security_utils.get_content_hash(model_name)
         try:
             inference_output_url, model_version, model_metadata, model_config = \
                 get_model_info(model_name, inference_engine, inference_port, infer_host,
@@ -554,8 +544,6 @@ def log_deployment_result(end_point_id, model_id, cmd_container_name, cmd_type,
 
 def is_client_inference_container_ready(infer_url_host, inference_http_port, inference_model_name, local_infer_url,
                                         inference_type="default", model_version="", request_input_example=None):
-    # logging.info(f"Inference type: {inference_type}, infer_url_host {infer_url_host}, \
-    #               inference_http_port: {inference_http_port}, local_infer_url {local_infer_url}")
 
     if inference_type == "default":
         default_client_container_ready_url = "http://{}:{}/ready".format("0.0.0.0", inference_http_port)
@@ -631,211 +619,5 @@ def run_http_inference_with_curl_request(inference_url, inference_input_list, in
         inference_type=inference_type, engine_type=engine_type, timeout=timeout)
 
 
-def convert_model_to_onnx(
-        torch_model, output_path: str, dummy_input_list, input_size: int, input_is_tensor=True
-) -> None:
-    from collections import OrderedDict
-    import torch
-    from torch.onnx import TrainingMode
-
-    torch.onnx.export(torch_model,  # model being run
-                      dummy_input_list if input_is_tensor else tuple(dummy_input_list),
-                      # model input (or a tuple for multiple inputs)
-                      f=output_path,  # where to save the model (can be a file or file-like object)
-                      export_params=True,  # store the trained parameter weights inside the model file
-                      opset_version=11,  # the ONNX version to export the model to
-                      do_constant_folding=False,  # whether to execute constant folding for optimization
-                      input_names=["input1", "input2"],
-                      # the model's input names
-                      output_names=['output'],  # the model's output names
-                      training=TrainingMode.EVAL,
-                      verbose=True,
-                      dynamic_axes={"input1": {0: "batch_size"},
-                                    "input2": {0: "batch_size"},
-                                    "output": {0: "batch_size"}}
-                      )
-
-
-def test_start_triton_server(model_serving_dir):
-    sudo_prefix = "sudo "
-    sys_name = platform.system()
-    if sys_name == "Darwin":
-        sudo_prefix = ""
-        gpu_attach_cmd = ""
-
-    triton_server_container_name = "{}".format(ClientConstants.FEDML_TRITON_SERVER_CONTAINER_NAME_PREFIX)
-    triton_server_cmd = "{}docker stop {}; {}docker rm {}; {}docker run --name {} {} -p{}:8000 " \
-                        "-p{}:8001 -p{}:8002 " \
-                        "--shm-size {} " \
-                        "-v {}:/models {} " \
-                        "bash -c \"pip install transformers && tritonserver --strict-model-config=false " \
-                        "--model-control-mode=poll --repository-poll-secs={} " \
-                        "--model-repository=/models\" ".format(sudo_prefix, triton_server_container_name,
-                                                               sudo_prefix, triton_server_container_name,
-                                                               sudo_prefix, triton_server_container_name,
-                                                               gpu_attach_cmd,
-                                                               ClientConstants.INFERENCE_HTTP_PORT,
-                                                               ClientConstants.INFERENCE_GRPC_PORT,
-                                                               8002,
-                                                               "4096m",
-                                                               model_serving_dir,
-                                                               ClientConstants.INFERENCE_SERVER_IMAGE,
-                                                               ClientConstants.FEDML_MODEL_SERVING_REPO_SCAN_INTERVAL)
-    logging.info("Run triton inference server: {}".format(triton_server_cmd))
-    triton_server_process = ClientConstants.exec_console_with_script(triton_server_cmd,
-                                                                     should_capture_stdout=False,
-                                                                     should_capture_stderr=False,
-                                                                     no_sys_out_err=True)
-
-
-def test_convert_pytorch_model_to_onnx(model_net_file, model_bin_file, model_name, model_in_params):
-    torch_model = torch.jit.load(model_net_file)
-    with open(model_bin_file, 'rb') as model_pkl_file:
-        model_state_dict = pickle.load(model_pkl_file)
-        torch_model.load_state_dict(model_state_dict)
-        torch_model.eval()
-
-    input_size = model_in_params["input_size"]
-    input_types = model_in_params["input_types"]
-
-    dummy_input_list = []
-    for index, input_i in enumerate(input_size):
-        if input_types[index] == "int":
-            this_input = torch.tensor(torch.randint(0, 1, input_i))
-        else:
-            this_input = torch.tensor(torch.zeros(input_i))
-        dummy_input_list.append(this_input)
-
-    onnx_model_dir = os.path.join(ClientConstants.get_model_cache_dir(),
-                                  ClientConstants.FEDML_CONVERTED_MODEL_DIR_NAME,
-                                  model_name, ClientConstants.INFERENCE_MODEL_VERSION)
-    if not os.path.exists(onnx_model_dir):
-        os.makedirs(onnx_model_dir, exist_ok=True)
-    onnx_model_path = os.path.join(onnx_model_dir, "model.onnx")
-
-    convert_model_to_onnx(torch_model, onnx_model_path, dummy_input_list, input_size,
-                          input_is_tensor=True)
-
-    model_serving_dir = os.path.join(ClientConstants.get_model_cache_dir(),
-                                     ClientConstants.FEDML_CONVERTED_MODEL_DIR_NAME)
-    return model_serving_dir
-
-
-def start_gpu_model_load_process():
-    from multiprocessing import Process
-    import time
-    process = Process(target=load_gpu_model_to_cpu_device)
-    process.start()
-    while True:
-        time.sleep(1)
-
-
-def load_gpu_model_to_cpu_device():
-    import pickle
-    import io
-    import torch
-
-    class CPU_Unpickler(pickle.Unpickler):
-        def find_class(self, module, name):
-            if module == 'torch.storage' and name == '_load_from_bytes':
-                return lambda b: torch.load(io.BytesIO(b), map_location='cpu')
-            else:
-                return super().find_class(module, name)
-
-    model_file = "/home/fedml/.fedml/fedml-client/fedml/models/theta_rec_auc_81_single_label/theta_rec_auc_81_single_label"
-    with open(model_file, "rb") as model_pkl_file:
-        if not torch.cuda.is_available():
-            model = CPU_Unpickler(model_pkl_file).load()
-            if model is None:
-                print("Failed to load gpu model to cpu device")
-            else:
-                print("Succeeded to load gpu model to cpu device")
-
-
 if __name__ == "__main__":
-    start_gpu_model_load_process()
-
-    model_serving_dir = test_convert_pytorch_model_to_onnx("./sample-open-training-model-net",
-                                                           "./sample-open-training-model",
-                                                           "rec-model",
-                                                           {"input_size": [[1, 24], [1, 2]],
-                                                            "input_types": ["int", "float"]})
-
-    test_start_triton_server(model_serving_dir)
-
-    # input_data = {"model_version": "v0-Sun Feb 05 12:17:16 GMT 2023",
-    #               "model_name": "model_414_45_open-model-test_v0-Sun-Feb-05-12-17-16-GMT-2023",
-    #               # "data": "file:///Users/alexliang/fedml_data/mnist-image.png",
-    #               "data": "https://raw.githubusercontent.com/niyazed/triton-mnist-example/master/images/sample_image.png",
-    #               "end_point_id": 414, "model_id": 45, "token": "a09a18a14c4c4d89a8d5f9515704c073"}
-    #
-    # data_list = list()
-    # data_list.append(input_data["data"])
-    # run_http_inference_with_lib_http_api_with_image_data(input_data["model_name"],
-    #                                                      5001, 1, data_list, "")
-    #
-    #
-    # class LogisticRegression(torch.nn.Module):
-    #     def __init__(self, input_dim, output_dim):
-    #         super(LogisticRegression, self).__init__()
-    #         self.linear = torch.nn.Linear(input_dim, output_dim)
-    #
-    #     def forward(self, x):
-    #         outputs = torch.sigmoid(self.linear(x))
-    #         return outputs
-    #
-    #
-    # model = LogisticRegression(28 * 28, 10)
-    # checkpoint = {'model': model}
-    # model_net_file = "/Users/alexliang/fedml-client/fedml/models/open-model-test/model-net.pt"
-    # torch.save(checkpoint, model_net_file)
-    #
-    # with open("/Users/alexliang/fedml-client/fedml/models/open-model-test/open-model-test", 'rb') as model_pkl_file:
-    #     model_params = pickle.load(model_pkl_file)
-    #     # torch.save(model_params, "/Users/alexliang/fedml-client/fedml/models/open-model-test/a.pt")
-    #     # model = torch.load("/Users/alexliang/fedml-client/fedml/models/open-model-test/a.pt")
-    #     loaded_checkpoint = torch.load(model_net_file)
-    #     loaded_model = loaded_checkpoint["model"]
-    #     loaded_model.load_state_dict(model_params)
-    #     for parameter in loaded_model.parameters():
-    #         parameter.requires_grad = False
-    #     loaded_model.eval()
-    #     input_names = {"x": 0}
-    #     convert_model_to_onnx(loaded_model, "/Users/alexliang/fedml-client/fedml/models/open-model-test/a.onnx",
-    #                           input_names, 28 * 28)
-
-    # parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
-    # parser.add_argument("--cf", "-c", help="config file")
-    # parser.add_argument("--role", "-r", type=str, default="client", help="role")
-    # parser.add_argument("--model_storage_local_path", "-url", type=str, default="/home/ubuntu",
-    #                     help="model storage local path")
-    # parser.add_argument("--inference_model_name", "-n", type=str, default="fedml-model",
-    #                     help="inference model name")
-    # parser.add_argument("--inference_engine", "-engine", type=str, default="ONNX", help="inference engine")
-    # parser.add_argument("--inference_http_port", "-http", type=int, default=8000, help="inference http port")
-    # parser.add_argument("--inference_grpc_port", "-gprc", type=int, default=8001, help="inference grpc port")
-    # parser.add_argument("--inference_metric_port", "-metric", type=int, default=8002, help="inference metric port")
-    # parser.add_argument("--inference_use_gpu", "-gpu", type=str, default="gpu", help="inference use gpu")
-    # parser.add_argument("--inference_memory_size", "-mem", type=str, default="256m", help="inference memory size")
-    # parser.add_argument("--inference_convertor_image", "-convertor", type=str,
-    #                     default=ClientConstants.INFERENCE_CONVERTOR_IMAGE, help="inference convertor image")
-    # parser.add_argument("--inference_server_image", "-server", type=str,
-    #                     default=ClientConstants.INFERENCE_SERVER_IMAGE, help="inference server image")
-    # args = parser.parse_args()
-    # args.user = args.user
-    #
-    # pip_source_dir = os.path.dirname(__file__)
-    # __running_model_name, __inference_output_url, __model_version, __model_metadata, __model_config = \
-    #     start_deployment(
-    #         args.model_storage_local_path,
-    #         args.inference_model_name,
-    #         args.inference_engine,
-    #         args.inference_http_port,
-    #         args.inference_grpc_port,
-    #         args.inference_metric_port,
-    #         args.inference_use_gpu,
-    #         args.inference_memory_size,
-    #         args.inference_convertor_image,
-    #         args.inference_server_image)
-    # print("Model deployment results, running model name: {}, url: {}, model metadata: {}, model config: {}".format(
-    #     __running_model_name, __inference_output_url, __model_metadata, __model_config))
+    pass
diff --git a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
index b9b9b4c356..ef2c01c49d 100755
--- a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
+++ b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
@@ -453,7 +453,6 @@ def process_deployment_result_message(self, topic=None, payload=None):
             time.sleep(3)
             self.trigger_completed_event()
 
-
     def cleanup_runner_process(self, run_id):
         ServerConstants.cleanup_run_process(run_id, not_kill_subprocess=True)
 
diff --git a/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py b/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py
index ef65e37904..8100707386 100755
--- a/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py
+++ b/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py
@@ -294,9 +294,7 @@ def run_impl(self, run_extend_queue_list, sender_message_center,
                         json.dumps(result_payload), replica_no=rank + 1)
 
                     logging.info(f"Deploy replica {rank + 1} / {prev_rank + 1 + op_num} successfully.")
-                    time.sleep(5)
 
-            time.sleep(1)
             self.status_reporter.run_id = self.run_id
             self.status_reporter.report_client_id_status(
                 self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FINISHED,
@@ -348,7 +346,8 @@ def run_impl(self, run_extend_queue_list, sender_message_center,
 
                 # TODO (Raphael) check if this will allow another job to seize the gpu during high concurrency:
                 try:
-                    JobRunnerUtils.get_instance().release_partial_job_gpu(run_id, self.edge_id, replica_occupied_gpu_ids)
+                    JobRunnerUtils.get_instance().release_partial_job_gpu(
+                        run_id, self.edge_id, replica_occupied_gpu_ids)
                 except Exception as e:
                     if op == "rollback":
                         pass
@@ -395,7 +394,7 @@ def run_impl(self, run_extend_queue_list, sender_message_center,
                         JobRunnerUtils.get_instance().release_partial_job_gpu(
                             run_id, self.edge_id, replica_occupied_gpu_ids)
 
-                    result_payload = self.send_deployment_results(
+                    self.send_deployment_results(
                         end_point_name, self.edge_id, ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED,
                         model_id, model_name, inference_output_url, inference_model_version, inference_port,
                         inference_engine, model_metadata, model_config)
@@ -496,15 +495,6 @@ def send_deployment_results(self, end_point_name, device_id, model_status,
         self.message_center.send_message_json(deployment_results_topic, json.dumps(deployment_results_payload))
         return deployment_results_payload
 
-    def send_deployment_status(self, end_point_name, device_id,
-                               model_id, model_name, model_version,
-                               model_inference_url, model_status,
-                               inference_port=ClientConstants.MODEL_INFERENCE_DEFAULT_PORT,
-                               replica_no=1,     # start from 1
-                               ):
-        # Deprecated
-        pass
-
     def reset_devices_status(self, edge_id, status):
         self.status_reporter.run_id = self.run_id
         self.status_reporter.edge_id = edge_id

From e0ad9b5bef5bcea1eaefe3458a3d6b49aa399d46 Mon Sep 17 00:00:00 2001
From: Raphael Jin <kimheavy@yahoo.com>
Date: Tue, 11 Jun 2024 12:15:22 -0700
Subject: [PATCH 142/282] [Deploy] Remove unnecessary logic; Rename readiness
 check function; Forbidden user level control of host post.

---
 .../device_model_deployment.py                | 150 +++++-------------
 1 file changed, 40 insertions(+), 110 deletions(-)

diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py b/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py
index 5d3ba9873d..edd2ebea9a 100755
--- a/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py
@@ -68,6 +68,7 @@ def start_deployment(end_point_id, end_point_name, model_id, model_version,
     num_gpus = gpu_per_replica
     gpu_ids, gpu_attach_cmd = None, ""
 
+    # Concatenate the model name
     running_model_name = ClientConstants.get_running_model_name(
         end_point_name, inference_model_name, model_version, end_point_id, model_id, edge_id=edge_id)
 
@@ -77,6 +78,7 @@ def start_deployment(end_point_id, end_point_name, model_id, model_version,
         config = yaml.safe_load(file)
 
         # Resource related
+        inference_type = "default"
         use_gpu = config.get('use_gpu', True)
         num_gpus_frm_yml = config.get('num_gpus', None)
         if not use_gpu:
@@ -85,9 +87,7 @@ def start_deployment(end_point_id, end_point_name, model_id, model_version,
             if num_gpus_frm_yml is not None:
                 num_gpus = int(num_gpus_frm_yml)
         usr_indicated_wait_time = config.get('deploy_timeout', 900)
-        usr_indicated_worker_port = config.get('worker_port', "")
-        if usr_indicated_worker_port == "":
-            usr_indicated_worker_port = os.environ.get("FEDML_WORKER_PORT", "")
+        usr_indicated_retry_cnt = max(int(usr_indicated_wait_time) // 10, 1)
         shm_size = config.get('shm_size', None)
         storage_opt = config.get('storage_opt', None)
         tmpfs = config.get('tmpfs', None)
@@ -96,17 +96,6 @@ def start_deployment(end_point_id, end_point_name, model_id, model_version,
             cpus = int(cpus)
         memory = config.get('memory', None)
 
-        if usr_indicated_worker_port == "":
-            usr_indicated_worker_port = None
-        else:
-            usr_indicated_worker_port = int(usr_indicated_worker_port)
-
-        worker_port_env = os.environ.get("FEDML_WORKER_PORT", "")
-        worker_port_from_config = config.get('worker_port', "")
-        logging.info(f"usr_indicated_worker_port {usr_indicated_worker_port}, worker port env {worker_port_env}, "
-              f"worker port from config {worker_port_from_config}")
-
-        usr_indicated_retry_cnt = max(int(usr_indicated_wait_time) // 10, 1)
         inference_image_name = config.get('inference_image_name',
                                           ClientConstants.INFERENCE_SERVER_CUSTOME_IMAGE)
         image_pull_policy = config.get('image_pull_policy', SchedulerConstants.IMAGE_PULL_POLICY_IF_NOT_PRESENT)
@@ -144,6 +133,7 @@ def start_deployment(end_point_id, end_point_name, model_id, model_version,
 
         # If using customized image, then bootstrap + job will be the entry point
         enable_custom_image = config.get("enable_custom_image", False)
+        # inference_type = "custom"
         customized_image_entry_cmd = \
             "/bin/bash /home/fedml/models_serving/fedml-deploy-bootstrap-entry-auto-gen.sh"
 
@@ -151,18 +141,7 @@ def start_deployment(end_point_id, end_point_name, model_id, model_version,
         docker_registry_user_password = config.get("docker_registry_user_password", "")
         docker_registry = config.get("docker_registry", "")
 
-        port_inside_container = int(config.get("port_inside_container", 2345))
-        use_triton = config.get("use_triton", False)
-        if use_triton:
-            inference_type = "triton"
-        else:
-            inference_type = "default"
-
-    # Config check
-    if src_code_dir == "":
-        raise Exception("Please indicate source_code_dir in the fedml_model_config.yaml")
-    if relative_entry == "":
-        logging.warning("You missed main_entry in the fedml_model_config.yaml")
+        port_inside_container = int(config.get("port", 2345))
 
     # Request the GPU ids for the deployment
     if num_gpus > 0:
@@ -175,22 +154,10 @@ def start_deployment(end_point_id, end_point_name, model_id, model_version,
             end_point_id, end_point_name, inference_model_name, edge_id, replica_rank+1, gpu_ids)
     logging.info("GPU ids allocated: {}".format(gpu_ids))
 
+    # Create the model serving dir if not exists
     model_serving_dir = ClientConstants.get_model_serving_dir()
     if not os.path.exists(model_serving_dir):
         os.makedirs(model_serving_dir, exist_ok=True)
-    converted_model_path = os.path.join(model_storage_local_path, ClientConstants.FEDML_CONVERTED_MODEL_DIR_NAME)
-    if os.path.exists(converted_model_path):
-        model_file_list = os.listdir(converted_model_path)
-        for model_file in model_file_list:
-            src_model_file = os.path.join(converted_model_path, model_file)
-            dst_model_file = os.path.join(model_serving_dir, model_file)
-            if os.path.isdir(src_model_file):
-                if not os.path.exists(dst_model_file):
-                    shutil.copytree(src_model_file, dst_model_file, copy_function=shutil.copy,
-                                    ignore_dangling_symlinks=True)
-            else:
-                if not os.path.exists(dst_model_file):
-                    shutil.copyfile(src_model_file, dst_model_file)
 
     if inference_engine != ClientConstants.INFERENCE_ENGINE_TYPE_INT_DEFAULT:
         raise Exception(f"inference engine {inference_engine} is not supported")
@@ -228,13 +195,12 @@ def start_deployment(end_point_id, end_point_name, model_id, model_version,
     logging.info(f"Start pulling the inference image {inference_image_name}... with policy {image_pull_policy}")
     ContainerUtils.get_instance().pull_image_with_policy(image_pull_policy, inference_image_name)
 
-    volumns = []
+    volumes = []
     binds = {}
     environment = {}
 
     # data_cache_dir mounting
-    assert type(data_cache_dir_input) == dict or type(data_cache_dir_input) == str
-    if type(data_cache_dir_input) == str:
+    if isinstance(data_cache_dir_input, str):
         # In this case, we mount to the same folder, if it has ~, we replace it with /home/fedml
         src_data_cache_dir, dst_data_cache_dir = "", ""
         if data_cache_dir_input != "":
@@ -253,28 +219,30 @@ def start_deployment(end_point_id, end_point_name, model_id, model_version,
             if type(src_data_cache_dir) == str and src_data_cache_dir != "":
                 logging.info("Start copying the data cache to the container...")
                 if os.path.exists(src_data_cache_dir):
-                    volumns.append(src_data_cache_dir)
+                    volumes.append(src_data_cache_dir)
                     binds[src_data_cache_dir] = {
                         "bind": dst_data_cache_dir,
                         "mode": "rw"
                     }
                     environment["DATA_CACHE_FOLDER"] = dst_data_cache_dir
-    else:
+    elif isinstance(data_cache_dir_input, dict):
         for k, v in data_cache_dir_input.items():
             if os.path.exists(k):
-                volumns.append(v)
+                volumes.append(v)
                 binds[k] = {
                     "bind": v,
                     "mode": "rw"
                 }
             else:
                 logging.warning(f"{k} does not exist, skip mounting it to the container")
-        logging.info(f"Data cache mount: {volumns}, {binds}")
+        logging.info(f"Data cache mount: {volumes}, {binds}")
+    else:
+        logging.warning("data_cache_dir_input is not a string or a dictionary, skip mounting it to the container")
 
     # Default mounting
     if not enable_custom_image or (enable_custom_image and relative_entry != ""):
         logging.info("Start copying the source code to the container...")
-        volumns.append(src_code_dir)
+        volumes.append(src_code_dir)
         binds[src_code_dir] = {
             "bind": dst_model_serving_dir,
             "mode": "rw"
@@ -284,7 +252,7 @@ def start_deployment(end_point_id, end_point_name, model_id, model_version,
     host_config_dict = {
         "binds": binds,
         "port_bindings": {
-            port_inside_container: usr_indicated_worker_port
+            port_inside_container: None
         },
         "shm_size": shm_size,
         "storage_opt": storage_opt,
@@ -312,7 +280,6 @@ def start_deployment(end_point_id, end_point_name, model_id, model_version,
     if not enable_custom_image:
         # For some image, the default user is root. Unified to fedml.
         environment["HOME"] = "/home/fedml"
-
     environment["BOOTSTRAP_DIR"] = dst_bootstrap_dir
     environment["FEDML_CURRENT_RUN_ID"] = end_point_id
     environment["FEDML_CURRENT_EDGE_ID"] = edge_id
@@ -326,12 +293,13 @@ def start_deployment(end_point_id, end_point_name, model_id, model_version,
         for key in extra_envs:
             environment[key] = extra_envs[key]
 
+    # Create the container
     try:
         host_config = client.api.create_host_config(**host_config_dict)
         new_container = client.api.create_container(
             image=inference_image_name,
             name=default_server_container_name,
-            volumes=volumns,
+            volumes=volumes,
             ports=[port_inside_container],  # port open inside the container
             environment=environment,
             host_config=host_config,
@@ -349,22 +317,18 @@ def start_deployment(end_point_id, end_point_name, model_id, model_version,
     while True:
         cnt += 1
         try:
-            if usr_indicated_worker_port is not None:
-                inference_http_port = usr_indicated_worker_port
-                break
-            else:
-                # Find the random port
-                port_info = client.api.port(new_container.get("Id"), port_inside_container)
-                inference_http_port = port_info[0]["HostPort"]
-                logging.info("inference_http_port: {}".format(inference_http_port))
-                break
+            # Find the random port
+            port_info = client.api.port(new_container.get("Id"), port_inside_container)
+            inference_http_port = port_info[0]["HostPort"]
+            logging.info("host port allocated: {}".format(inference_http_port))
+            break
         except:
             if cnt >= 5:
                 raise Exception("Failed to get the port allocation")
             time.sleep(3)
 
     # Logging the info from the container when starting
-    log_deployment_result(end_point_id, model_id, default_server_container_name,
+    log_deployment_output(end_point_id, model_id, default_server_container_name,
                           ClientConstants.CMD_TYPE_RUN_DEFAULT_SERVER,
                           inference_model_name, inference_engine, inference_http_port, inference_type,
                           retry_interval=10, deploy_attempt_threshold=usr_indicated_retry_cnt,
@@ -373,9 +337,8 @@ def start_deployment(end_point_id, end_point_name, model_id, model_version,
 
     # Return the running model name and the inference output url
     inference_output_url, running_model_version, ret_model_metadata, ret_model_config = \
-        get_model_info(inference_model_name, inference_engine, inference_http_port,
-                       infer_host, False, inference_type, request_input_example=request_input_example,
-                       enable_custom_image=enable_custom_image)
+        check_container_readiness(inference_http_port=inference_http_port, infer_host=infer_host,
+                                  request_input_example=request_input_example)
 
     if inference_output_url == "":
         return running_model_name, "", None, None, None
@@ -426,9 +389,8 @@ def should_exit_logs(end_point_id, model_id, cmd_type, model_name, inference_eng
         # If the container has exited, return True, means we should exit the logs
         try:
             inference_output_url, model_version, model_metadata, model_config = \
-                get_model_info(model_name, inference_engine, inference_port, infer_host,
-                               inference_type=inference_type, request_input_example=request_input_example,
-                               enable_custom_image=enable_custom_image)
+                check_container_readiness(inference_http_port=inference_port, infer_host=infer_host,
+                                          request_input_example=request_input_example)
             if inference_output_url != "":
                 logging.info("Log test for deploying model successfully, inference url: {}, "
                              "model metadata: {}, model config: {}".
@@ -443,7 +405,7 @@ def should_exit_logs(end_point_id, model_id, cmd_type, model_name, inference_eng
         return False
 
 
-def log_deployment_result(end_point_id, model_id, cmd_container_name, cmd_type,
+def log_deployment_output(end_point_id, model_id, cmd_container_name, cmd_type,
                           inference_model_name, inference_engine,
                           inference_http_port, inference_type="default",
                           retry_interval=10, deploy_attempt_threshold=10,
@@ -542,10 +504,10 @@ def log_deployment_result(end_point_id, model_id, cmd_container_name, cmd_type,
         time.sleep(retry_interval)
 
 
-def is_client_inference_container_ready(infer_url_host, inference_http_port, inference_model_name, local_infer_url,
-                                        inference_type="default", model_version="", request_input_example=None):
+def is_client_inference_container_ready(infer_url_host, inference_http_port, readiness_check_type="default",
+                                        readiness_check_cmd=None, request_input_example=None):
 
-    if inference_type == "default":
+    if readiness_check_type == "default":
         default_client_container_ready_url = "http://{}:{}/ready".format("0.0.0.0", inference_http_port)
         response = None
         try:
@@ -555,7 +517,7 @@ def is_client_inference_container_ready(infer_url_host, inference_http_port, inf
         if not response or response.status_code != 200:
             return "", "", {}, {}
 
-        # Report the deployed model info
+        # Construct the model metadata (input and output)
         model_metadata = {}
         if request_input_example is not None and len(request_input_example) > 0:
             model_metadata["inputs"] = request_input_example
@@ -563,51 +525,19 @@ def is_client_inference_container_ready(infer_url_host, inference_http_port, inf
             model_metadata["inputs"] = {"text": "What is a good cure for hiccups?"}
         model_metadata["outputs"] = []
         model_metadata["type"] = "default"
+
         return "http://{}:{}/predict".format(infer_url_host, inference_http_port), None, model_metadata, None
     else:
-        triton_server_url = "{}:{}".format(infer_url_host, inference_http_port)
-        if model_version == "" or model_version is None:
-            model_version = ClientConstants.INFERENCE_MODEL_VERSION
-        logging.info(
-            f"triton_server_url: {triton_server_url} model_version: {model_version} model_name: {inference_model_name}")
-        triton_client = http_client.InferenceServerClient(url=triton_server_url, verbose=False)
-        if not triton_client.is_model_ready(
-            model_name=inference_model_name, model_version=model_version
-        ):
-            return "", model_version, {}, {}
-        logging.info(f"Model {inference_model_name} is ready, start to get model metadata...")
-        model_metadata = triton_client.get_model_metadata(model_name=inference_model_name, model_version=model_version)
-        model_config = triton_client.get_model_config(model_name=inference_model_name, model_version=model_version)
-        version_list = model_metadata.get("versions", None)
-        if version_list is not None and len(version_list) > 0:
-            model_version = version_list[0]
-        else:
-            model_version = ClientConstants.INFERENCE_MODEL_VERSION
-
-        inference_output_url = "http://{}:{}/{}/models/{}/versions/{}/infer".format(infer_url_host,
-                                                                                    inference_http_port,
-                                                                                    ClientConstants.INFERENCE_INFERENCE_SERVER_VERSION,
-                                                                                    inference_model_name,
-                                                                                    model_version)
-
-        return inference_output_url, model_version, model_metadata, model_config
-
-
-def get_model_info(model_name, inference_engine, inference_http_port, infer_host="127.0.0.1", is_hg_model=False,
-                   inference_type="default", request_input_example=None, enable_custom_image=False):
-    if model_name is None:
+        # TODO(Raphael): Support arbitrary readiness check command
+        logging.error(f"Unknown readiness check type: {readiness_check_type}")
         return "", "", {}, {}
 
-    local_infer_url = "{}:{}".format(infer_host, inference_http_port)
-
-    if is_hg_model:
-        inference_model_name = "{}_{}_inference".format(model_name, str(inference_engine))
-    else:
-        inference_model_name = model_name
 
+def check_container_readiness(inference_http_port, infer_host="127.0.0.1", request_input_example=None,
+                              readiness_check_type="default", readiness_check_cmd=None):
     response_from_client_container = is_client_inference_container_ready(
-        infer_host, inference_http_port, inference_model_name, local_infer_url,
-        inference_type, model_version="", request_input_example=request_input_example)
+        infer_host, inference_http_port, readiness_check_type, readiness_check_cmd,
+        request_input_example=request_input_example)
 
     return response_from_client_container
 

From 64e8c779c61edfecf7ca8e638b6b54ff31d7983b Mon Sep 17 00:00:00 2001
From: Raphael Jin <kimheavy@yahoo.com>
Date: Tue, 11 Jun 2024 16:29:37 -0700
Subject: [PATCH 143/282] [Deploy] Nit

---
 .../computing/scheduler/model_scheduler/device_model_cards.py    | 1 -
 1 file changed, 1 deletion(-)

diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_cards.py b/python/fedml/computing/scheduler/model_scheduler/device_model_cards.py
index 8feb757a63..c2f11a2917 100755
--- a/python/fedml/computing/scheduler/model_scheduler/device_model_cards.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_model_cards.py
@@ -14,7 +14,6 @@
 
 from fedml.core.common.singleton import Singleton
 from fedml.computing.scheduler.model_scheduler.modelops_configs import ModelOpsConfigs
-from fedml.computing.scheduler.model_scheduler.device_model_deployment import get_model_info
 from fedml.computing.scheduler.model_scheduler.device_server_constants import ServerConstants
 from fedml.computing.scheduler.model_scheduler.device_model_object import FedMLModelList, FedMLEndpointDetail
 from fedml.computing.scheduler.model_scheduler.device_client_constants import ClientConstants

From 9194f8424f77008b49a48908ee72f19fe59ba23d Mon Sep 17 00:00:00 2001
From: Raphael Jin <kimheavy@yahoo.com>
Date: Tue, 11 Jun 2024 16:42:46 -0700
Subject: [PATCH 144/282] [Deploy] Hide unnecessary log.

---
 .../scheduler/model_scheduler/device_model_cache.py       | 8 ++++----
 .../scheduler/model_scheduler/device_model_inference.py   | 6 +++---
 2 files changed, 7 insertions(+), 7 deletions(-)

diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_cache.py b/python/fedml/computing/scheduler/model_scheduler/device_model_cache.py
index 6c90944277..c941c42102 100755
--- a/python/fedml/computing/scheduler/model_scheduler/device_model_cache.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_model_cache.py
@@ -369,7 +369,7 @@ def get_idle_device(self,
                 if "model_status" in result_payload and result_payload["model_status"] == "DEPLOYED":
                     idle_device_list.append({"device_id": device_id, "end_point_id": end_point_id})
 
-        logging.info(f"{len(idle_device_list)} devices this model has on it: {idle_device_list}")
+        logging.debug(f"{len(idle_device_list)} devices this model has on it: {idle_device_list}")
 
         if len(idle_device_list) <= 0:
             return None, None
@@ -398,7 +398,7 @@ def get_idle_device(self,
             logging.info("Inference Device selection Failed:")
             logging.info(e)
 
-        logging.info(f"Using Round Robin, the device index is {selected_device_index}")
+        logging.debug(f"Using Round Robin, the device index is {selected_device_index}")
         idle_device_dict = idle_device_list[selected_device_index]
 
         # Note that within the same endpoint_id, there could be one device with multiple same models
@@ -411,7 +411,7 @@ def get_idle_device(self,
         # Find deployment result from the target idle device.
         try:
             for result_item in result_list:
-                logging.info("enter the for loop")
+                logging.debug("enter the for loop")
                 device_id, _, result_payload = self.get_result_item_info(result_item)
                 found_end_point_id = result_payload["end_point_id"]
                 found_end_point_name = result_payload["end_point_name"]
@@ -425,7 +425,7 @@ def get_idle_device(self,
                     if same_model_device_rank > 0:
                         same_model_device_rank -= 1
                         continue
-                    logging.info(f"The chosen device is {device_id}")
+                    logging.debug(f"The chosen device is {device_id}")
                     return result_payload, device_id
         except Exception as e:
             logging.info(str(e))
diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py b/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py
index 3aeec67932..ba13006245 100755
--- a/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py
@@ -230,7 +230,7 @@ async def _predict(
             model_metrics.set_start_time(start_time)
 
             # Send inference request to idle device
-            logging.info("inference url {}.".format(inference_output_url))
+            logging.debug("inference url {}.".format(inference_output_url))
             if inference_output_url != "":
                 input_list = input_json.get("inputs", input_json)
                 stream_flag = input_json.get("stream", False)
@@ -329,7 +329,7 @@ def found_idle_inference_device(end_point_id, end_point_name, in_model_name, in_
 
     res = (idle_device, end_point_id, model_id, model_name, model_version, inference_host, inference_output_url,
            connectivity_type)
-    logging.info(f"found idle device with metrics: {res}")
+    logging.debug(f"found idle device with metrics: {res}")
 
     return res
 
@@ -352,7 +352,7 @@ async def send_inference_request(idle_device, end_point_id, inference_url, input
                     output_list,
                     inference_type=inference_type,
                     timeout=request_timeout_sec)
-                logging.info(f"Use http inference. return {response_ok}")
+                logging.debug(f"Use http inference. return {response_ok}")
                 return inference_response
         elif connectivity_type == ClientConstants.WORKER_CONNECTIVITY_TYPE_HTTP_PROXY:
             logging.warning("Use http proxy inference.")

From 243be07831c7ffd078203f402efae339ed0b58a3 Mon Sep 17 00:00:00 2001
From: Raphael Jin <kimheavy@yahoo.com>
Date: Tue, 11 Jun 2024 17:50:30 -0700
Subject: [PATCH 145/282] [Deploy] Read port info from env.

---
 .../scheduler/model_scheduler/device_client_constants.py  | 1 +
 .../scheduler/model_scheduler/device_server_constants.py  | 1 +
 .../scheduler/model_scheduler/master_job_runner.py        | 6 +++---
 .../scheduler/model_scheduler/worker_protocol_manager.py  | 8 +++++++-
 4 files changed, 12 insertions(+), 4 deletions(-)

diff --git a/python/fedml/computing/scheduler/model_scheduler/device_client_constants.py b/python/fedml/computing/scheduler/model_scheduler/device_client_constants.py
index 2c06189d2e..f1e7dea91f 100644
--- a/python/fedml/computing/scheduler/model_scheduler/device_client_constants.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_client_constants.py
@@ -74,6 +74,7 @@ class ClientConstants(object):
     K8S_DEPLOYMENT_SLAVE_MOUNT_HOME_DIR = "/home/fedml/fedml-client"
 
     LOCAL_CLIENT_API_PORT = 22030
+    ENV_CLIENT_PROXY_PORT_KEY = "FEDML_WORKER_INFERENCE_PROXY_PORT"
 
     INFERENCE_HTTP_PORT = 8000
     INFERENCE_GRPC_PORT = 8001
diff --git a/python/fedml/computing/scheduler/model_scheduler/device_server_constants.py b/python/fedml/computing/scheduler/model_scheduler/device_server_constants.py
index 243c197b2f..a868d03b41 100644
--- a/python/fedml/computing/scheduler/model_scheduler/device_server_constants.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_server_constants.py
@@ -103,6 +103,7 @@ class ServerConstants(object):
 
     AUTO_DETECT_PUBLIC_IP = "auto_detect_public_ip"
     MODEL_INFERENCE_DEFAULT_PORT = 2203
+    ENV_MASTER_INFERENCE_PORT_KEY = "FEDML_MASTER_INFERENCE_GATEWAY_PORT"
     MODEL_CACHE_KEY_EXPIRE_TIME = 1 * 10
 
     INFERENCE_REQUEST_TIMEOUT_KEY = "request_timeout_sec"
diff --git a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
index ef2c01c49d..d7565d7647 100755
--- a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
+++ b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
@@ -460,9 +460,9 @@ def cleanup_runner_process(self, run_id):
     def start_device_inference_gateway(inference_port=ServerConstants.MODEL_INFERENCE_DEFAULT_PORT):
         # start unified inference server
         python_program = get_python_program()
-        master_port = os.getenv("FEDML_MASTER_PORT", None)
-        if master_port is not None:
-            inference_port = int(master_port)
+        master_port_frm_env = os.getenv(ServerConstants.MODEL_INFERENCE_DEFAULT_PORT, None)
+        if master_port_frm_env is not None:
+            inference_port = int(master_port_frm_env)
         if not ServerConstants.is_running_on_k8s():
             logging.info(f"start the model inference gateway...")
             inference_gw_cmd = "fedml.computing.scheduler.model_scheduler.device_model_inference:api"
diff --git a/python/fedml/computing/scheduler/model_scheduler/worker_protocol_manager.py b/python/fedml/computing/scheduler/model_scheduler/worker_protocol_manager.py
index f9bc70452d..ee59f87441 100755
--- a/python/fedml/computing/scheduler/model_scheduler/worker_protocol_manager.py
+++ b/python/fedml/computing/scheduler/model_scheduler/worker_protocol_manager.py
@@ -79,6 +79,12 @@ def _init_extra_items(self):
 
         client_api_cmd = "fedml.computing.scheduler.model_scheduler.device_client_api:api"
         client_api_pids = RunProcessUtils.get_pid_from_cmd_line(client_api_cmd)
+
+        worker_proxy_port = ClientConstants.LOCAL_CLIENT_API_PORT
+        worker_proxy_port_frm_env = os.environ.get(ClientConstants.ENV_CLIENT_PROXY_PORT_KEY, None)
+        if worker_proxy_port_frm_env is not None:
+            worker_proxy_port = int(worker_proxy_port_frm_env)
+
         if client_api_pids is None or len(client_api_pids) <= 0:
             # Start local API services
             cur_dir = os.path.dirname(__file__)
@@ -88,7 +94,7 @@ def _init_extra_items(self):
                 "{} -m uvicorn {} --host 0.0.0.0 --port {} --reload --reload-delay 3 --reload-dir {} "
                 "--log-level critical".format(
                     python_program, client_api_cmd,
-                    ClientConstants.LOCAL_CLIENT_API_PORT, fedml_base_dir
+                    worker_proxy_port, fedml_base_dir
                 ),
                 should_capture_stdout=False,
                 should_capture_stderr=False

From 0b2349983b5fabe50666d0f845cc2e94433fdf91 Mon Sep 17 00:00:00 2001
From: Alex <alex.gpt.llm@gmail.com>
Date: Wed, 12 Jun 2024 23:08:06 +0800
Subject: [PATCH 146/282] [CoreEngine] make the status center work in the
 united agents.

---
 .../scheduler/comm_utils/sys_utils.py         | 21 +++++++++++++
 .../scheduler/master/base_master_agent.py     |  3 ++
 .../model_scheduler/master_job_runner.py      |  1 +
 .../scheduler_base_protocol_manager.py        |  2 +-
 .../scheduler/slave/united_agents.py          | 31 +++++++++++--------
 5 files changed, 44 insertions(+), 14 deletions(-)

diff --git a/python/fedml/computing/scheduler/comm_utils/sys_utils.py b/python/fedml/computing/scheduler/comm_utils/sys_utils.py
index aaa37bc4db..065482c23b 100644
--- a/python/fedml/computing/scheduler/comm_utils/sys_utils.py
+++ b/python/fedml/computing/scheduler/comm_utils/sys_utils.py
@@ -114,6 +114,8 @@ def get_sys_runner_info():
     except:
         pass
 
+    enable_simulation_gpu, simulation_gpu_count = get_simulation_gpu_env()
+
     if enable_simulation_gpu:
         gpu_count = simulation_gpu_count
         gpu_total_mem = "80G"
@@ -128,9 +130,26 @@ def get_sys_runner_info():
         gpu_count, gpu_vendor, cpu_count, gpu_device_name
 
 
+def get_simulation_gpu_env():
+    _enable_simulation_gpu = enable_simulation_gpu
+    _simulation_gpu_count = simulation_gpu_count
+
+    env_enable_simulation_gpu = os.getenv("FEDML_ENABLE_SIMULATION_GPU", None)
+    if env_enable_simulation_gpu is not None:
+        _enable_simulation_gpu = True if env_enable_simulation_gpu == "1" or env_enable_simulation_gpu == 1 else False
+
+    env_simulation_gpu_count = os.getenv("FEDML_SIMULATION_GPU_COUNT", None)
+    if env_simulation_gpu_count is not None:
+        _simulation_gpu_count = int(env_simulation_gpu_count)
+
+    return _enable_simulation_gpu, _simulation_gpu_count
+
+
 # GPU list: [GPU(ID, uuid, load, memoryTotal, memoryUsed, memoryFree, driver,
 # gpu_name, serial, display_mode, display_active, temperature)]
 def get_gpu_list():
+    enable_simulation_gpu, simulation_gpu_count = get_simulation_gpu_env()
+
     if enable_simulation_gpu:
         ret_gpu_list = [
             {'ID': 0, 'uuid': 'GPU-dab987f0-be09-294a-96d6-f9afeef49877', 'load': 1.0,
@@ -184,6 +203,8 @@ def get_gpu_list():
 
 
 def get_available_gpu_id_list(limit=1) -> List[int]:
+    enable_simulation_gpu, simulation_gpu_count = get_simulation_gpu_env()
+
     if enable_simulation_gpu:
         available_gpu_ids = [0, 1, 2, 3, 4, 5, 6, 7]
         if simulation_gpu_count > 8:
diff --git a/python/fedml/computing/scheduler/master/base_master_agent.py b/python/fedml/computing/scheduler/master/base_master_agent.py
index b27ed9547a..d6c6420cf3 100755
--- a/python/fedml/computing/scheduler/master/base_master_agent.py
+++ b/python/fedml/computing/scheduler/master/base_master_agent.py
@@ -123,6 +123,9 @@ def _init_logs(self, agent_args, edge_id):
         in_args.server_agent_id = edge_id
         MLOpsRuntimeLog.get_instance(in_args).init_logs()
 
+    def get_protocol_manager(self):
+        return self.protocol_mgr
+
     @abstractmethod
     def _get_log_file_dir(self):
         pass
diff --git a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
index e504ded561..61cce1b39c 100755
--- a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
+++ b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
@@ -1,6 +1,7 @@
 import copy
 import json
 import logging
+import multiprocessing
 import os
 import time
 import queue
diff --git a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_protocol_manager.py b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_protocol_manager.py
index bd843eecc2..f80508a509 100755
--- a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_protocol_manager.py
+++ b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_protocol_manager.py
@@ -288,7 +288,7 @@ def get_get_protocol_communication_manager(self):
     def get_protocol_sender_message_queue(self):
         return self.message_center.get_sender_message_queue()
 
-    def get_get_protocol_status_center_queue(self):
+    def get_protocol_status_center_queue(self):
         return self.get_status_queue()
 
     def send_agent_active_msg(self, edge_id):
diff --git a/python/fedml/computing/scheduler/slave/united_agents.py b/python/fedml/computing/scheduler/slave/united_agents.py
index e365de8860..7135925ec8 100755
--- a/python/fedml/computing/scheduler/slave/united_agents.py
+++ b/python/fedml/computing/scheduler/slave/united_agents.py
@@ -25,42 +25,47 @@ def login(self, userid, api_key=None, device_id=None,
         deploy_master_agent = FedMLDeployMasterAgent()
 
         # Login with the launch slave role
-        launch_slave_agent.login(
+        login_result = launch_slave_agent.login(
             api_key, api_key=api_key, device_id=device_id,
             os_name=os_name, role=role
         )
 
-        # Get the communication manager, sender message queue and status center queue
+        # Get the communication manager, sender message queue
         shared_communication_mgr = launch_slave_agent.get_protocol_manager().get_get_protocol_communication_manager()
         shared_sender_message_queue = launch_slave_agent.get_protocol_manager().get_protocol_sender_message_queue()
-        shared_status_center_queue = launch_slave_agent.get_protocol_manager().get_get_protocol_status_center_queue()
 
-        # Login with the launch master role based on the shared communication manager
+        # Login with the launch master role based on
+        # the shared communication manager, sender message center
         launch_master_agent.login(
-            api_key, api_key=api_key, device_id=device_id,
+            api_key, api_key=api_key, device_id=login_result.device_id,
             os_name=os_name, runner_cmd=runner_cmd,
             role=FedMLAccountManager.ROLE_GPU_MASTER_SERVER,
             communication_manager=shared_communication_mgr,
-            sender_message_queue=shared_sender_message_queue,
-            status_center_queue=shared_status_center_queue
+            sender_message_queue=shared_sender_message_queue
         )
 
-        # Login with the deployment master role based on the shared communication manager
+        # Get the status center queue
+        shared_slave_status_center_queue = launch_slave_agent.get_protocol_manager().get_protocol_status_center_queue()
+        shared_master_status_center_queue = launch_master_agent.get_protocol_manager().get_protocol_status_center_queue()
+
+        # Login with the deployment master role based on
+        # the shared communication manager, sender message center, status center
         deploy_master_agent.login(
-            userid, api_key=api_key, device_id=device_id,
+            userid, api_key=api_key, device_id=login_result.device_id,
             os_name=os_name, role=FedMLAccountManager.ROLE_DEPLOY_MASTER_ON_PREM,
             communication_manager=shared_communication_mgr,
             sender_message_queue=shared_sender_message_queue,
-            status_center_queue=shared_status_center_queue
+            status_center_queue=shared_master_status_center_queue
         )
 
-        # Login with the deployment slave role based on the shared communication manager
+        # Login with the deployment slave role based on
+        # the shared communication manager, sender message center, status center
         deploy_slave_agent.login(
-            userid, api_key=api_key, device_id=device_id,
+            userid, api_key=api_key, device_id=login_result.device_id,
             os_name=os_name, role=FedMLAccountManager.ROLE_DEPLOY_WORKER_ON_PREM,
             communication_manager=shared_communication_mgr,
             sender_message_queue=shared_sender_message_queue,
-            status_center_queue=shared_status_center_queue
+            status_center_queue=shared_slave_status_center_queue
         )
 
         # Start the slave agent to connect to servers and loop forever.

From 3a034717f7ebc43ff035e73cc49c13ea1c2e7d79 Mon Sep 17 00:00:00 2001
From: Raphael Jin <kimheavy@yahoo.com>
Date: Wed, 12 Jun 2024 12:04:36 -0700
Subject: [PATCH 147/282] [Deploy] Nit.

---
 .../computing/scheduler/model_scheduler/master_job_runner.py    | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
index d7565d7647..67a3e8bb82 100755
--- a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
+++ b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
@@ -460,7 +460,7 @@ def cleanup_runner_process(self, run_id):
     def start_device_inference_gateway(inference_port=ServerConstants.MODEL_INFERENCE_DEFAULT_PORT):
         # start unified inference server
         python_program = get_python_program()
-        master_port_frm_env = os.getenv(ServerConstants.MODEL_INFERENCE_DEFAULT_PORT, None)
+        master_port_frm_env = os.getenv(ServerConstants.ENV_MASTER_INFERENCE_PORT_KEY, None)
         if master_port_frm_env is not None:
             inference_port = int(master_port_frm_env)
         if not ServerConstants.is_running_on_k8s():

From f0dd29e04fa600339c0efb74526694d4dee2842e Mon Sep 17 00:00:00 2001
From: Raphael Jin <kimheavy@yahoo.com>
Date: Wed, 12 Jun 2024 12:12:25 -0700
Subject: [PATCH 148/282] [Deploy] Nit.

---
 .../scheduler/model_scheduler/master_job_runner.py        | 8 ++------
 1 file changed, 2 insertions(+), 6 deletions(-)

diff --git a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
index 67a3e8bb82..5f82a6c046 100755
--- a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
+++ b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
@@ -369,12 +369,8 @@ def process_deployment_result_message(self, topic=None, payload=None):
             """
             When all the devices have finished the add / delete / update operation
             """
-            # Generate one unified inference api
-            # Note that here we use the gateway port instead of the inference port that is used by the slave device
-            model_config_parameters = request_json["parameters"]
-            inference_port = model_config_parameters.get("server_internal_port",
-                                                         ServerConstants.MODEL_INFERENCE_DEFAULT_PORT)
-            inference_port_external = model_config_parameters.get("server_external_port", inference_port)
+            inference_port_external = os.environ.get(ServerConstants.ENV_MASTER_INFERENCE_PORT_KEY,
+                                                     ServerConstants.MODEL_INFERENCE_DEFAULT_PORT)
             ip = GeneralConstants.get_ip_address(request_json)
 
             if ip.startswith("http://") or ip.startswith("https://"):

From 21a8a4c9d97e712f029f0e7abe39e0b5e56954a2 Mon Sep 17 00:00:00 2001
From: Raphael Jin <kimheavy@yahoo.com>
Date: Wed, 12 Jun 2024 12:18:58 -0700
Subject: [PATCH 149/282] [Deploy] Change few more places relate to gateway
 port.

---
 .../scheduler/model_scheduler/master_job_runner.py  | 13 ++++++-------
 1 file changed, 6 insertions(+), 7 deletions(-)

diff --git a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
index 5f82a6c046..50d902b933 100755
--- a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
+++ b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
@@ -144,7 +144,8 @@ def run_impl(
                 # No device is added, updated or removed
                 logging.info("No device is added, updated or removed. No action needed for reconciliation.")
                 ip = GeneralConstants.get_ip_address(self.request_json)
-                master_port = os.getenv("FEDML_MASTER_PORT", None)
+                master_port = os.environ.get(ServerConstants.ENV_MASTER_INFERENCE_PORT_KEY,
+                                                     ServerConstants.MODEL_INFERENCE_DEFAULT_PORT)
                 if master_port is not None:
                     inference_port = int(master_port)
                 model_inference_port = inference_port
@@ -299,9 +300,8 @@ def process_deployment_result_message(self, topic=None, payload=None):
                 else:
                     # This is the last worker that failed, so we should continue to "ABORTED" status
                     model_config_parameters = self.request_json["parameters"]
-                    inference_port = model_config_parameters.get("server_internal_port",
-                                                                 ServerConstants.MODEL_INFERENCE_DEFAULT_PORT)
-                    inference_port_external = model_config_parameters.get("server_external_port", inference_port)
+                    inference_port_external = os.environ.get(ServerConstants.ENV_MASTER_INFERENCE_PORT_KEY,
+                                                             ServerConstants.MODEL_INFERENCE_DEFAULT_PORT)
                     ip = GeneralConstants.get_ip_address(self.request_json)
                     if ip.startswith("http://") or ip.startswith("https://"):
                         model_inference_url = "{}/inference/{}".format(ip, end_point_id)
@@ -753,9 +753,8 @@ def parse_model_run_params(running_json):
         model_version = model_config["model_version"]
         model_config_parameters = running_json.get("parameters", {})
 
-        inference_port = model_config_parameters.get("server_internal_port",  # Internal port is for the gateway
-                                                     ServerConstants.MODEL_INFERENCE_DEFAULT_PORT)
-        inference_port_external = model_config_parameters.get("server_external_port", inference_port)
+        inference_port = int(os.environ.get(ServerConstants.ENV_MASTER_INFERENCE_PORT_KEY,
+                                        ServerConstants.MODEL_INFERENCE_DEFAULT_PORT))
 
         return run_id, end_point_name, token, user_id, user_name, device_ids, device_objs, model_config, model_name, \
             model_id, model_storage_url, scale_min, scale_max, inference_engine, model_is_from_open, \

From e7e974d24f510a47e2ee5e9df1a6161665fffa1e Mon Sep 17 00:00:00 2001
From: Raphael Jin <kimheavy@yahoo.com>
Date: Wed, 12 Jun 2024 15:29:53 -0700
Subject: [PATCH 150/282] [Deploy] Write port info into env file.

---
 python/fedml/api/__init__.py                  | 12 ++++++----
 python/fedml/api/modules/device.py            | 13 ++++++++---
 python/fedml/cli/modules/login.py             | 22 +++++++++++++++++--
 .../device_client_constants.py                |  8 +++++++
 .../device_server_constants.py                |  9 ++++++++
 .../model_scheduler/master_job_runner.py      | 22 +++++++------------
 .../worker_protocol_manager.py                |  3 +--
 7 files changed, 64 insertions(+), 25 deletions(-)

diff --git a/python/fedml/api/__init__.py b/python/fedml/api/__init__.py
index 3e75b987d6..f753e4255b 100755
--- a/python/fedml/api/__init__.py
+++ b/python/fedml/api/__init__.py
@@ -24,6 +24,8 @@
 from fedml.computing.scheduler.scheduler_entry.cluster_manager import FedMLClusterModelList
 from fedml.computing.scheduler.scheduler_entry.run_manager import FedMLRunStartedModel, FedMLGpuDevices, \
     FedMLRunModelList, FeatureEntryPoint
+from fedml.computing.scheduler.model_scheduler.device_server_constants import ServerConstants
+from fedml.computing.scheduler.model_scheduler.device_client_constants import ClientConstants
 
 
 def fedml_login(api_key: str = None):
@@ -209,16 +211,18 @@ def fedml_build(platform, type, source_folder, entry_point, config_folder, dest_
     return build.build(platform, type, source_folder, entry_point, config_folder, dest_folder, ignore)
 
 
-def login(api_key, computing, server, supplier):
-    device_bind(api_key, computing, server, supplier)
+def login(api_key, computing, server, supplier,
+          master_inference_gateway_port: int = ServerConstants.MODEL_INFERENCE_DEFAULT_PORT,
+          worker_inference_proxy_port: int = ClientConstants.LOCAL_CLIENT_API_PORT):
+    device_bind(api_key, computing, server, supplier, master_inference_gateway_port, worker_inference_proxy_port)
 
 
 def logout(computing, server):
     device_unbind(computing, server)
 
 
-def device_bind(api_key, computing, server, supplier):
-    device.bind(api_key, computing, server, supplier)
+def device_bind(api_key, computing, server, supplier, master_inference_gateway_port, worker_inference_proxy_port):
+    device.bind(api_key, computing, server, supplier, master_inference_gateway_port, worker_inference_proxy_port)
 
 
 def device_unbind(computing, server):
diff --git a/python/fedml/api/modules/device.py b/python/fedml/api/modules/device.py
index a853d538d0..14591147a6 100644
--- a/python/fedml/api/modules/device.py
+++ b/python/fedml/api/modules/device.py
@@ -10,14 +10,18 @@
 from fedml.computing.scheduler.comm_utils.constants import SchedulerConstants
 from fedml.computing.scheduler.comm_utils.run_process_utils import RunProcessUtils
 from fedml.computing.scheduler.master.server_constants import ServerConstants
+from fedml.computing.scheduler.model_scheduler.device_server_constants import ServerConstants as DeviceServerConstants
 from fedml.computing.scheduler.master.server_login import logout as server_logout
 from fedml.computing.scheduler.slave.client_constants import ClientConstants
+from fedml.computing.scheduler.model_scheduler.device_client_constants import ClientConstants as DeviceClientConstants
 from fedml.computing.scheduler.slave.client_login import logout as client_logout
 from fedml.computing.scheduler.scheduler_entry.resource_manager import FedMLResourceManager
 
 
 def bind(
-        api_key, computing, server, supplier
+        api_key, computing, server, supplier,
+        master_inference_gateway_port=DeviceServerConstants.MODEL_INFERENCE_DEFAULT_PORT,
+        worker_inference_proxy_port=DeviceClientConstants.LOCAL_CLIENT_API_PORT
 ):
     userid = api_key
     runner_cmd = "{}"
@@ -43,13 +47,13 @@ def bind(
     _bind(
         userid, computing, server,
         api_key, role, runner_cmd, device_id, os_name,
-        docker)
+        docker, master_inference_gateway_port, worker_inference_proxy_port)
 
 
 def _bind(
         userid, computing, server,
         api_key, role, runner_cmd, device_id, os_name,
-        docker):
+        docker, master_inference_gateway_port, worker_inference_proxy_port):
     fedml.load_env()
     if os.getenv(ModuleConstants.ENV_FEDML_INFER_HOST) is None:
         fedml.set_env_kv(ModuleConstants.ENV_FEDML_INFER_HOST, SchedulerConstants.REDIS_INFER_HOST)
@@ -60,6 +64,9 @@ def _bind(
     if os.getenv(ModuleConstants.ENV_FEDML_INFER_REDIS_PASSWORD) is None:
         fedml.set_env_kv(ModuleConstants.ENV_FEDML_INFER_REDIS_PASSWORD, SchedulerConstants.REDIS_PASSWORD)
 
+    fedml.set_env_kv(DeviceServerConstants.ENV_MASTER_INFERENCE_PORT_KEY, str(master_inference_gateway_port))
+    fedml.set_env_kv(DeviceClientConstants.ENV_CLIENT_PROXY_PORT_KEY, str(worker_inference_proxy_port))
+
     url = fedml._get_backend_service()
     platform_name = platform.system()
     docker_config_text = None
diff --git a/python/fedml/cli/modules/login.py b/python/fedml/cli/modules/login.py
index f2e4d76322..f3c982f456 100644
--- a/python/fedml/cli/modules/login.py
+++ b/python/fedml/cli/modules/login.py
@@ -4,6 +4,8 @@
 
 import fedml.api
 from fedml.api.modules.utils import authenticate
+from fedml.computing.scheduler.model_scheduler.device_server_constants import ServerConstants
+from fedml.computing.scheduler.model_scheduler.device_client_constants import ClientConstants
 
 
 @click.command("login", help="Login the FedML® Nexus AI Platform")
@@ -51,9 +53,25 @@
     default=80,
     help="The port for local on-premise Nexus AI Platform.",
 )
+@click.option(
+    "--master_inference_gateway_port",
+    "-mgp",
+    type=int,
+    default=ServerConstants.MODEL_INFERENCE_DEFAULT_PORT,
+    help="The port for master inference gateway.",
+)
+@click.option(
+    "--worker_inference_proxy_port",
+    "-wpp",
+    type=int,
+    default=ClientConstants.LOCAL_CLIENT_API_PORT,
+    help="The port for worker inference proxy.",
+)
 def fedml_login(
         api_key, version, compute_node, server, provider, deploy_worker_num,
-        local_on_premise_platform, local_on_premise_platform_port):
+        local_on_premise_platform, local_on_premise_platform_port,
+        master_inference_gateway_port, worker_inference_proxy_port
+):
     fedml.set_env_version(version)
     fedml.set_local_on_premise_platform_host(local_on_premise_platform)
     fedml.set_local_on_premise_platform_port(local_on_premise_platform_port)
@@ -66,4 +84,4 @@ def fedml_login(
         print(f"Maybe you are using account id to login, we will try to login with account {api_key}.")
         pass
     os.environ["FEDML_MODEL_WORKER_NUM"] = str(deploy_worker_num)
-    fedml.api.login(api_key, compute_node, server, provider)
+    fedml.api.login(api_key, compute_node, server, provider, master_inference_gateway_port, worker_inference_proxy_port)
diff --git a/python/fedml/computing/scheduler/model_scheduler/device_client_constants.py b/python/fedml/computing/scheduler/model_scheduler/device_client_constants.py
index f1e7dea91f..fdcbdf0a34 100644
--- a/python/fedml/computing/scheduler/model_scheduler/device_client_constants.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_client_constants.py
@@ -458,6 +458,14 @@ def get_public_ip():
             logging.info("Failed to get public ip: {}".format(e))
         return ip
 
+    @staticmethod
+    def get_inference_worker_proxy_port() -> int:
+        # Use dotenv to load the environment variables
+        fedml.load_env()
+        worker_proxy_port = int(os.getenv(ClientConstants.ENV_CLIENT_PROXY_PORT_KEY,
+                                      default=ClientConstants.LOCAL_CLIENT_API_PORT))
+        return worker_proxy_port
+
     @staticmethod
     def check_process_is_running(process_id):
         for proc in psutil.process_iter():
diff --git a/python/fedml/computing/scheduler/model_scheduler/device_server_constants.py b/python/fedml/computing/scheduler/model_scheduler/device_server_constants.py
index a868d03b41..a5048c26a6 100644
--- a/python/fedml/computing/scheduler/model_scheduler/device_server_constants.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_server_constants.py
@@ -349,6 +349,15 @@ def get_runner_infos():
             logging.error(f"Failed to parse runner info: {e}")
         return runner_info
 
+    @staticmethod
+    def get_inference_master_gateway_port():
+        # Use dotenv to load the environment variables
+        fedml.load_env()
+        master_inference_port = int(os.getenv(ServerConstants.ENV_MASTER_INFERENCE_PORT_KEY,
+                                            default=ServerConstants.MODEL_INFERENCE_DEFAULT_PORT))
+        return master_inference_port
+
+
     @staticmethod
     def save_runner_infos(unique_device_id, edge_id, run_id=None):
         local_pkg_data_dir = ServerConstants.get_data_dir()
diff --git a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
index 50d902b933..eff26684b7 100755
--- a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
+++ b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
@@ -115,7 +115,7 @@ def run_impl(
             message_center=self.message_center)
 
         # start unified inference gateway process if not started
-        FedMLDeployMasterJobRunner.start_device_inference_gateway(inference_port=inference_port)
+        FedMLDeployMasterJobRunner.start_device_inference_gateway()
 
         # start inference monitor process
         FedMLDeployMasterJobRunner.stop_device_inference_monitor(
@@ -144,8 +144,7 @@ def run_impl(
                 # No device is added, updated or removed
                 logging.info("No device is added, updated or removed. No action needed for reconciliation.")
                 ip = GeneralConstants.get_ip_address(self.request_json)
-                master_port = os.environ.get(ServerConstants.ENV_MASTER_INFERENCE_PORT_KEY,
-                                                     ServerConstants.MODEL_INFERENCE_DEFAULT_PORT)
+                master_port = ServerConstants.get_inference_master_gateway_port()
                 if master_port is not None:
                     inference_port = int(master_port)
                 model_inference_port = inference_port
@@ -300,8 +299,7 @@ def process_deployment_result_message(self, topic=None, payload=None):
                 else:
                     # This is the last worker that failed, so we should continue to "ABORTED" status
                     model_config_parameters = self.request_json["parameters"]
-                    inference_port_external = os.environ.get(ServerConstants.ENV_MASTER_INFERENCE_PORT_KEY,
-                                                             ServerConstants.MODEL_INFERENCE_DEFAULT_PORT)
+                    inference_port_external = ServerConstants.get_inference_master_gateway_port()
                     ip = GeneralConstants.get_ip_address(self.request_json)
                     if ip.startswith("http://") or ip.startswith("https://"):
                         model_inference_url = "{}/inference/{}".format(ip, end_point_id)
@@ -369,8 +367,7 @@ def process_deployment_result_message(self, topic=None, payload=None):
             """
             When all the devices have finished the add / delete / update operation
             """
-            inference_port_external = os.environ.get(ServerConstants.ENV_MASTER_INFERENCE_PORT_KEY,
-                                                     ServerConstants.MODEL_INFERENCE_DEFAULT_PORT)
+            inference_port_external = ServerConstants.get_inference_master_gateway_port()
             ip = GeneralConstants.get_ip_address(request_json)
 
             if ip.startswith("http://") or ip.startswith("https://"):
@@ -453,12 +450,10 @@ def cleanup_runner_process(self, run_id):
         ServerConstants.cleanup_run_process(run_id, not_kill_subprocess=True)
 
     @staticmethod
-    def start_device_inference_gateway(inference_port=ServerConstants.MODEL_INFERENCE_DEFAULT_PORT):
+    def start_device_inference_gateway():
         # start unified inference server
         python_program = get_python_program()
-        master_port_frm_env = os.getenv(ServerConstants.ENV_MASTER_INFERENCE_PORT_KEY, None)
-        if master_port_frm_env is not None:
-            inference_port = int(master_port_frm_env)
+        inference_port = ServerConstants.get_inference_master_gateway_port()
         if not ServerConstants.is_running_on_k8s():
             logging.info(f"start the model inference gateway...")
             inference_gw_cmd = "fedml.computing.scheduler.model_scheduler.device_model_inference:api"
@@ -539,7 +534,7 @@ def recover_inference_and_monitor():
                 if not is_activated:
                     continue
 
-                FedMLDeployMasterJobRunner.start_device_inference_gateway(inference_port=inference_port)
+                FedMLDeployMasterJobRunner.start_device_inference_gateway()
 
                 FedMLDeployMasterJobRunner.stop_device_inference_monitor(
                     run_id, end_point_name, model_id, model_name, model_version)
@@ -753,8 +748,7 @@ def parse_model_run_params(running_json):
         model_version = model_config["model_version"]
         model_config_parameters = running_json.get("parameters", {})
 
-        inference_port = int(os.environ.get(ServerConstants.ENV_MASTER_INFERENCE_PORT_KEY,
-                                        ServerConstants.MODEL_INFERENCE_DEFAULT_PORT))
+        inference_port = ServerConstants.get_inference_master_gateway_port()
 
         return run_id, end_point_name, token, user_id, user_name, device_ids, device_objs, model_config, model_name, \
             model_id, model_storage_url, scale_min, scale_max, inference_engine, model_is_from_open, \
diff --git a/python/fedml/computing/scheduler/model_scheduler/worker_protocol_manager.py b/python/fedml/computing/scheduler/model_scheduler/worker_protocol_manager.py
index ee59f87441..cdfa43c33b 100755
--- a/python/fedml/computing/scheduler/model_scheduler/worker_protocol_manager.py
+++ b/python/fedml/computing/scheduler/model_scheduler/worker_protocol_manager.py
@@ -80,8 +80,7 @@ def _init_extra_items(self):
         client_api_cmd = "fedml.computing.scheduler.model_scheduler.device_client_api:api"
         client_api_pids = RunProcessUtils.get_pid_from_cmd_line(client_api_cmd)
 
-        worker_proxy_port = ClientConstants.LOCAL_CLIENT_API_PORT
-        worker_proxy_port_frm_env = os.environ.get(ClientConstants.ENV_CLIENT_PROXY_PORT_KEY, None)
+        worker_proxy_port = ClientConstants.get_inference_worker_proxy_port()
         if worker_proxy_port_frm_env is not None:
             worker_proxy_port = int(worker_proxy_port_frm_env)
 

From 9c8ce99c41e6bf8df8f38fe88a6f782141d3a19e Mon Sep 17 00:00:00 2001
From: Raphael Jin <kimheavy@yahoo.com>
Date: Wed, 12 Jun 2024 15:33:47 -0700
Subject: [PATCH 151/282] [Deploy] Nit.

---
 .../scheduler/model_scheduler/worker_protocol_manager.py        | 2 --
 1 file changed, 2 deletions(-)

diff --git a/python/fedml/computing/scheduler/model_scheduler/worker_protocol_manager.py b/python/fedml/computing/scheduler/model_scheduler/worker_protocol_manager.py
index cdfa43c33b..b1d0bebc47 100755
--- a/python/fedml/computing/scheduler/model_scheduler/worker_protocol_manager.py
+++ b/python/fedml/computing/scheduler/model_scheduler/worker_protocol_manager.py
@@ -81,8 +81,6 @@ def _init_extra_items(self):
         client_api_pids = RunProcessUtils.get_pid_from_cmd_line(client_api_cmd)
 
         worker_proxy_port = ClientConstants.get_inference_worker_proxy_port()
-        if worker_proxy_port_frm_env is not None:
-            worker_proxy_port = int(worker_proxy_port_frm_env)
 
         if client_api_pids is None or len(client_api_pids) <= 0:
             # Start local API services

From 505103f9f05106712de4ea7078441526ee33b9f7 Mon Sep 17 00:00:00 2001
From: bhargav191098 <bhargav3514@gmail.com>
Date: Thu, 13 Jun 2024 17:08:51 -0700
Subject: [PATCH 152/282] removing zip from upload

---
 python/fedml/api/modules/storage.py | 45 ++++++++++++++++++++---------
 1 file changed, 31 insertions(+), 14 deletions(-)

diff --git a/python/fedml/api/modules/storage.py b/python/fedml/api/modules/storage.py
index e7d492c999..a928b325b2 100644
--- a/python/fedml/api/modules/storage.py
+++ b/python/fedml/api/modules/storage.py
@@ -38,27 +38,42 @@ def upload(data_path, api_key, name, description, tag_list, service, show_progre
 
     if user_id is None:
         return FedMLResponse(code=ResponseCode.FAILURE, message=message)
+
+    data_type = _get_data_type(data_path)
     
-    if(not _check_data_path(data_path)):
+    if(data_type == "invalid"):
         return FedMLResponse(code=ResponseCode.FAILURE,message="Invalid data path")
 
-    archive_path, message = _archive_data(data_path)
-    if not archive_path:
+    if(data_type == "dir"):
+        to_upload_path, message = _archive_data(data_path)
+        name = os.path.splitext(os.path.basename(to_upload_path))[0] if name is None else name
+        file_name = name + ".zip"
+    else:
+        to_upload_path = data_path
+        base_name = os.path.basename(to_upload_path)
+        given_extension = os.path.splitext(name)[1]
+        if given_extension is None or given_extension == "":
+            given_extension = os.path.splitext(base_name)[1]
+        name = base_name if name is None else name + given_extension
+        file_name = name
+
+    if not to_upload_path:
         return FedMLResponse(code=ResponseCode.FAILURE, message=message)
 
-    name = os.path.splitext(os.path.basename(archive_path))[0] if name is None else name
-    file_name = name + ".zip"
+
     dest_path = os.path.join(user_id, file_name)
-    file_size = os.path.getsize(archive_path)
+    file_size = os.path.getsize(to_upload_path)
 
-    file_uploaded_url, message = _upload_multipart(api_key, file_name, archive_path, show_progress,
+    file_uploaded_url, message = _upload_multipart(api_key, file_name, to_upload_path, show_progress,
                                                        out_progress_to_err,
                                                        progress_desc, metadata)
 
-
-    os.remove(archive_path)
+    if(data_type == "dir"):
+        os.remove(to_upload_path)
     if not file_uploaded_url:
-        return FedMLResponse(code=ResponseCode.FAILURE, message=f"Failed to upload file: {archive_path}")
+        return FedMLResponse(code=ResponseCode.FAILURE, message=f"Failed to upload file: {to_upload_path}")
+
+    print("url: ",file_uploaded_url)
 
     json_data = {
         "datasetName": name,
@@ -438,10 +453,12 @@ def _get_storage_service(service):
     else:
         raise NotImplementedError(f"Service {service} not implemented")
 
-def _check_data_path(data_path):
-    if os.path.isdir(data_path) or os.path.isfile(data_path):
-        return True
-    return False
+def _get_data_type(data_path):
+    if os.path.isdir(data_path):
+        return "dir"
+    elif os.path.isfile(data_path):
+        return "file"
+    return "invalid"
 
 
 def _archive_data(data_path: str) -> (str, str):

From 03c58a2a42d8b43b3adf6331b38e38de92cc69d2 Mon Sep 17 00:00:00 2001
From: bhargav191098 <bhargav3514@gmail.com>
Date: Thu, 13 Jun 2024 17:32:36 -0700
Subject: [PATCH 153/282] changes in the download to support files

---
 python/fedml/api/modules/storage.py | 23 ++++++++++++++++++-----
 python/fedml/cli/modules/storage.py |  2 +-
 2 files changed, 19 insertions(+), 6 deletions(-)

diff --git a/python/fedml/api/modules/storage.py b/python/fedml/api/modules/storage.py
index a928b325b2..94031c163e 100644
--- a/python/fedml/api/modules/storage.py
+++ b/python/fedml/api/modules/storage.py
@@ -110,13 +110,26 @@ def download(data_name, api_key, service, dest_path, show_progress=True) -> FedM
             logging.error(error_message)
             return FedMLResponse(code=ResponseCode.FAILURE, message=error_message)
         download_url = metadata.download_url
-        zip_file_name = data_name + ".zip"
-        path_local = os.path.abspath(zip_file_name)
+        given_extension = os.path.splitext(data_name)[1]
+        is_file = True
+        if(given_extension is None or given_extension ==""):
+            is_file = False
+
+        if not is_file:
+            download_file_name = data_name + ".zip"
+        else:
+            download_file_name = data_name
+        path_local = os.path.abspath(download_file_name)
         dest_path = os.path.abspath(dest_path) if dest_path else data_name
-        if _download_using_presigned_url(download_url, zip_file_name, show_progress=show_progress):
+        if _download_using_presigned_url(download_url, download_file_name, show_progress=show_progress):
             try:
-                shutil.unpack_archive(path_local, dest_path)
-                os.remove(path_local)
+                if not is_file:
+                    shutil.unpack_archive(path_local, dest_path)
+                    os.remove(path_local)
+                else:
+                    if not os.path.exists(dest_path):
+                        os.makedirs(dest_path)
+                    shutil.move(path_local,dest_path)
                 abs_dest_path = os.path.abspath(dest_path)
                 return FedMLResponse(code=ResponseCode.SUCCESS, message=f"Successfully downloaded and unzipped data at "
                                                                         f"{abs_dest_path}", data=abs_dest_path)
diff --git a/python/fedml/cli/modules/storage.py b/python/fedml/cli/modules/storage.py
index af75cda85f..7e060fc12e 100644
--- a/python/fedml/cli/modules/storage.py
+++ b/python/fedml/cli/modules/storage.py
@@ -47,7 +47,7 @@ def validate_argument(ctx, param, value):
 @click.help_option("--help", "-h")
 @click.argument("data_path", nargs=1, callback=validate_argument)
 @click.option("--name", "-n", type=str, help="Name your data to store. If not provided, the name will be the same as "
-                                             "the data file or directory name.")
+                                             "the data file or directory name. For files, extension need not be mentioned!")
 @click.option("--description", "-d", type=str, help="Add description to your data to store. If not provided, "
                                                     "the description will be empty.")
 @click.option("--user_metadata", "-um", type=str, help="User-defined metadata in the form of a dictionary, for instance, "

From cb7da7009f13fdf0191ba7710fdb0b100d90796f Mon Sep 17 00:00:00 2001
From: bhargav191098 <bhargav3514@gmail.com>
Date: Thu, 13 Jun 2024 17:38:05 -0700
Subject: [PATCH 154/282] print statement removal

---
 python/fedml/api/modules/storage.py | 2 --
 1 file changed, 2 deletions(-)

diff --git a/python/fedml/api/modules/storage.py b/python/fedml/api/modules/storage.py
index 94031c163e..2d10ff2588 100644
--- a/python/fedml/api/modules/storage.py
+++ b/python/fedml/api/modules/storage.py
@@ -73,8 +73,6 @@ def upload(data_path, api_key, name, description, tag_list, service, show_progre
     if not file_uploaded_url:
         return FedMLResponse(code=ResponseCode.FAILURE, message=f"Failed to upload file: {to_upload_path}")
 
-    print("url: ",file_uploaded_url)
-
     json_data = {
         "datasetName": name,
         "description": description,

From 394906ecf03fe2e221bfba4a7a46c87105d26a35 Mon Sep 17 00:00:00 2001
From: bhargav191098 <bhargav3514@gmail.com>
Date: Fri, 14 Jun 2024 12:33:23 -0700
Subject: [PATCH 155/282] name issue

---
 python/fedml/api/modules/storage.py | 13 +++++++++----
 1 file changed, 9 insertions(+), 4 deletions(-)

diff --git a/python/fedml/api/modules/storage.py b/python/fedml/api/modules/storage.py
index 2d10ff2588..3e4219775d 100644
--- a/python/fedml/api/modules/storage.py
+++ b/python/fedml/api/modules/storage.py
@@ -51,10 +51,15 @@ def upload(data_path, api_key, name, description, tag_list, service, show_progre
     else:
         to_upload_path = data_path
         base_name = os.path.basename(to_upload_path)
-        given_extension = os.path.splitext(name)[1]
-        if given_extension is None or given_extension == "":
-            given_extension = os.path.splitext(base_name)[1]
-        name = base_name if name is None else name + given_extension
+        file_extension = os.path.splitext(base_name)[1]
+        given_extension = None
+        if name is not None:
+            given_extension = os.path.splitext(name)[1]
+            if given_extension is None or given_extension == "":
+                name = name + file_extension
+        else:
+            name = base_name
+
         file_name = name
 
     if not to_upload_path:

From 2170797de1235e78f9a92722b495cb01af8d92c2 Mon Sep 17 00:00:00 2001
From: bhargav191098 <bhargav3514@gmail.com>
Date: Fri, 14 Jun 2024 18:13:08 -0700
Subject: [PATCH 156/282] \Adding Enum for data type

---
 python/fedml/api/modules/storage.py | 15 ++++++++++-----
 1 file changed, 10 insertions(+), 5 deletions(-)

diff --git a/python/fedml/api/modules/storage.py b/python/fedml/api/modules/storage.py
index 3e4219775d..0729c09edc 100644
--- a/python/fedml/api/modules/storage.py
+++ b/python/fedml/api/modules/storage.py
@@ -4,6 +4,7 @@
 
 import requests
 import math
+from enum import Enum, unique
 
 import requests.exceptions
 import tqdm
@@ -26,6 +27,10 @@ def __init__(self, data: dict):
         self.tag_list = data.get("tags", None)
         self.download_url = data.get("fileUrl", None)
 
+class DataType(Enum):
+    FILE = "file"
+    DIRECTORY = "directory"
+    INVALID = "invalid"
 
 # Todo (alaydshah): Store service name in metadata
 # Todo (alaydshah): If data already exists, don't upload again. Instead suggest to use update command
@@ -41,10 +46,10 @@ def upload(data_path, api_key, name, description, tag_list, service, show_progre
 
     data_type = _get_data_type(data_path)
     
-    if(data_type == "invalid"):
+    if(data_type == DataType.INVALID):
         return FedMLResponse(code=ResponseCode.FAILURE,message="Invalid data path")
 
-    if(data_type == "dir"):
+    if(data_type == DataType.DIRECTORY):
         to_upload_path, message = _archive_data(data_path)
         name = os.path.splitext(os.path.basename(to_upload_path))[0] if name is None else name
         file_name = name + ".zip"
@@ -471,10 +476,10 @@ def _get_storage_service(service):
 
 def _get_data_type(data_path):
     if os.path.isdir(data_path):
-        return "dir"
+        return DataType.DIRECTORY
     elif os.path.isfile(data_path):
-        return "file"
-    return "invalid"
+        return DataType.FILE
+    return DataType.INVALID
 
 
 def _archive_data(data_path: str) -> (str, str):

From 5fb5ed43d42f54b0c47e9a0ae802bcab29197052 Mon Sep 17 00:00:00 2001
From: bhargav191098 <bhargav3514@gmail.com>
Date: Fri, 14 Jun 2024 18:32:05 -0700
Subject: [PATCH 157/282] adding user_id to bucket path

---
 python/fedml/api/modules/storage.py | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/python/fedml/api/modules/storage.py b/python/fedml/api/modules/storage.py
index 0729c09edc..33e781be08 100644
--- a/python/fedml/api/modules/storage.py
+++ b/python/fedml/api/modules/storage.py
@@ -70,11 +70,11 @@ def upload(data_path, api_key, name, description, tag_list, service, show_progre
     if not to_upload_path:
         return FedMLResponse(code=ResponseCode.FAILURE, message=message)
 
-
+    #TODO(bhargav191098) - Better done on the backend. Remove and pass file_name once completed on backend.
     dest_path = os.path.join(user_id, file_name)
     file_size = os.path.getsize(to_upload_path)
 
-    file_uploaded_url, message = _upload_multipart(api_key, file_name, to_upload_path, show_progress,
+    file_uploaded_url, message = _upload_multipart(api_key, dest_path, to_upload_path, show_progress,
                                                        out_progress_to_err,
                                                        progress_desc, metadata)
 

From a1af6151947309306a1425b86e54e9e2175ffed6 Mon Sep 17 00:00:00 2001
From: Alex <alex.gpt.llm@gmail.com>
Date: Mon, 17 Jun 2024 14:47:10 +0800
Subject: [PATCH 158/282] [CoreEngine] refactor to support to pass the
 communication manager, status center and message center to agents.

---
 .../scheduler/comm_utils/job_monitor.py       | 12 ++-
 .../scheduler/master/base_master_agent.py     | 13 +++
 .../master/base_master_job_runner.py          | 20 ++---
 .../master/base_master_job_runner_manager.py  | 84 ++++++++++++++++---
 .../master/base_master_protocol_manager.py    | 29 +++++--
 .../scheduler/master/cloud_server_manager.py  | 18 +++-
 .../master/master_protocol_manager.py         |  4 +
 .../model_scheduler/master_job_runner.py      |  2 +-
 .../scheduler_core/message_center.py          | 27 +++++-
 .../scheduler_base_job_runner.py              |  2 +-
 .../scheduler_base_protocol_manager.py        |  9 +-
 .../scheduler/scheduler_core/status_center.py |  5 +-
 .../scheduler/slave/base_slave_agent.py       |  1 +
 .../scheduler/slave/client_data_interface.py  |  9 ++
 .../scheduler/slave/united_agents.py          | 11 +--
 15 files changed, 200 insertions(+), 46 deletions(-)

diff --git a/python/fedml/computing/scheduler/comm_utils/job_monitor.py b/python/fedml/computing/scheduler/comm_utils/job_monitor.py
index a7d5214a02..916883fd0f 100644
--- a/python/fedml/computing/scheduler/comm_utils/job_monitor.py
+++ b/python/fedml/computing/scheduler/comm_utils/job_monitor.py
@@ -166,7 +166,7 @@ def autoscaler_reconcile_after_interval(self):
                     # Get cached token for authorization of autoscale request
                     cached_token = fedml_model_cache.get_end_point_token(e_id, e_name, model_name)
                     if cached_token is None:
-                        logging.error(f"Failed to get the cached token for endpoint {e_id}.")
+                        # logging.error(f"Failed to get the cached token for endpoint {e_id}.")
                         return
 
                     req_header = {
@@ -228,7 +228,7 @@ def monitor_replicas_number():
 
             cached_token = FedMLModelCache.get_instance().get_end_point_token_with_eid(endpoint_id)
             if cached_token is None:
-                logging.error(f"Failed to get the cached token for endpoint {endpoint_id}.")
+                # logging.error(f"Failed to get the cached token for endpoint {endpoint_id}.")
                 return
 
             req_header = {
@@ -338,6 +338,10 @@ def monitor_replicas_perf(edge_id, mqtt_mgr=None):
     def monitor_slave_run_process_status(self):
         try:
             count = 0
+            try:
+                client_data_interface.FedMLClientDataInterface.get_instance().create_job_table()
+            except Exception as e:
+                pass
             job_list = client_data_interface.FedMLClientDataInterface.get_instance().get_jobs_from_db()
             for job in job_list.job_list:
                 count += 1
@@ -447,6 +451,10 @@ def monitor_master_run_process_status(self, server_id, device_info_reporter=None
         try:
             ComputeCacheManager.get_instance().set_redis_params()
             count = 0
+            try:
+                server_data_interface.FedMLServerDataInterface.get_instance().create_job_table()
+            except Exception as e:
+                pass
             job_list = server_data_interface.FedMLServerDataInterface.get_instance().get_jobs_from_db()
             for job in job_list.job_list:
                 count += 1
diff --git a/python/fedml/computing/scheduler/master/base_master_agent.py b/python/fedml/computing/scheduler/master/base_master_agent.py
index d6c6420cf3..39898b5d40 100755
--- a/python/fedml/computing/scheduler/master/base_master_agent.py
+++ b/python/fedml/computing/scheduler/master/base_master_agent.py
@@ -95,6 +95,7 @@ def _initialize_protocol_manager(
         self._init_database()
 
         # Initialize the master protocol
+        self.protocol_mgr.set_parent_agent(self)
         self.protocol_mgr.initialize(
             communication_manager=communication_manager,
             sender_message_queue=sender_message_queue,
@@ -141,3 +142,15 @@ def _init_database(self):
     @abstractmethod
     def _generate_protocol_manager_instance(self, args, agent_config=None):
         return None
+
+    def start_master_server_instance(self, payload):
+        self.protocol_mgr.start_master_server_instance(payload)
+
+    def generate_agent_instance(self):
+        return FedMLBaseMasterAgent()
+
+    def process_job_complete_status(self, run_id, topic, payload):
+        if topic in self.protocol_mgr.get_subscribed_topics():
+            message_handler = self.protocol_mgr.get_listener_handler(topic)
+            if message_handler is not None:
+                message_handler(topic, payload)
diff --git a/python/fedml/computing/scheduler/master/base_master_job_runner.py b/python/fedml/computing/scheduler/master/base_master_job_runner.py
index fe2d426af4..1072e6b045 100755
--- a/python/fedml/computing/scheduler/master/base_master_job_runner.py
+++ b/python/fedml/computing/scheduler/master/base_master_job_runner.py
@@ -42,13 +42,13 @@ def __init__(self, args, run_id=0, request_json=None, agent_config=None, edge_id
             is_master_runner=True
         )
 
-        self.run_edge_id_status_queue = multiprocessing.Manager().Queue(-1)
-        self.run_metrics_queue = multiprocessing.Manager().Queue(-1)
-        self.run_events_queue = multiprocessing.Manager().Queue(-1)
-        self.run_artifacts_queue = multiprocessing.Manager().Queue(-1)
-        self.run_logs_queue = multiprocessing.Manager().Queue(-1)
-        self.run_edge_device_info_queue = multiprocessing.Manager().Queue(-1)
-        self.run_edge_device_info_global_queue = multiprocessing.Manager().Queue(-1)
+        self.run_edge_id_status_queue = multiprocessing.Queue()
+        self.run_metrics_queue = multiprocessing.Queue()
+        self.run_events_queue = multiprocessing.Queue()
+        self.run_artifacts_queue = multiprocessing.Queue()
+        self.run_logs_queue = multiprocessing.Queue()
+        self.run_edge_device_info_queue = multiprocessing.Queue()
+        self.run_edge_device_info_global_queue = multiprocessing.Queue()
         self.run_extend_queue_list = None
         self.async_check_timeout = 0
         self.enable_async_cluster = False
@@ -453,7 +453,7 @@ def put_run_edge_device_info_to_queue(self, run_id, edge_id, device_info):
         if int(edge_id) in edge_ids or str(edge_id) in edge_ids:
             run_id_str = str(run_id)
             if self.run_edge_device_info_queue is None:
-                self.run_edge_device_info_queue = multiprocessing.Manager().Queue(-1)
+                self.run_edge_device_info_queue = multiprocessing.Queue()
             self.run_edge_device_info_queue.put(device_info)
 
     def should_continue_run_job(self, run_id):
@@ -581,7 +581,7 @@ def callback_run_logs(self, topic, payload):
         run_id = str(topic).split('/')[-1]
         run_id_str = str(run_id)
         if self.run_logs_queue is None:
-            self.run_logs_queue = multiprocessing.Manager().Queue(-1)
+            self.run_logs_queue = multiprocessing.Queue()
         self.run_logs_queue.put(payload)
 
     def callback_run_metrics(self, topic, payload):
@@ -589,7 +589,7 @@ def callback_run_metrics(self, topic, payload):
         run_id = str(topic).split('/')[-1]
         run_id_str = str(run_id)
         if self.run_metrics_queue is None:
-            self.run_metrics_queue = multiprocessing.Manager().Queue(-1)
+            self.run_metrics_queue = multiprocessing.Queue()
         self.run_metrics_queue.put(payload)
 
     # def send_training_request_to_edges(self, active_edge_info_dict):
diff --git a/python/fedml/computing/scheduler/master/base_master_job_runner_manager.py b/python/fedml/computing/scheduler/master/base_master_job_runner_manager.py
index 08ef1d640e..dfaf29b5de 100755
--- a/python/fedml/computing/scheduler/master/base_master_job_runner_manager.py
+++ b/python/fedml/computing/scheduler/master/base_master_job_runner_manager.py
@@ -1,29 +1,40 @@
 import base64
 import json
 import logging
+import multiprocessing
 import os
+import platform
 import time
 from abc import ABC
 from multiprocessing import Process
+
+import fedml
 from .cloud_server_manager import FedMLCloudServerManager
 from ..comm_utils.run_process_utils import RunProcessUtils
 from ..scheduler_core.scheduler_base_job_runner_manager import FedMLSchedulerBaseJobRunnerManager
+from ..scheduler_core.account_manager import FedMLAccountManager
 
 
 class FedMLBaseMasterJobRunnerManager(FedMLSchedulerBaseJobRunnerManager, ABC):
     def __init__(self):
         FedMLSchedulerBaseJobRunnerManager.__init__(self)
+        if not hasattr(self, "master_agent_instance_map"):
+            self.master_agent_instance_map = dict()
 
     # Override
     def start_job_runner(
             self, run_id, request_json, args=None, edge_id=None, is_server_job=False,
             sender_message_queue=None, listener_message_queue=None, status_center_queue=None,
-            should_start_cloud_server=False, use_local_process_as_cloud_server=False,
-            cuda_visible_gpu_ids_str=None
+            communication_manager=None, master_agent_instance=None, should_start_cloud_server=False,
+            use_local_process_as_cloud_server=False, cuda_visible_gpu_ids_str=None
     ):
         if should_start_cloud_server:
-            self._start_cloud_server(args, run_id, request_json, edge_id=edge_id,
-                                     use_local_process_as_cloud_server=use_local_process_as_cloud_server)
+            self._start_cloud_server(
+                args, run_id, request_json, edge_id=edge_id,
+                use_local_process_as_cloud_server=use_local_process_as_cloud_server,
+                sender_message_queue=sender_message_queue, listener_message_queue=listener_message_queue,
+                status_center_queue=status_center_queue, communication_manager=communication_manager,
+                master_agent_instance=master_agent_instance)
             return
 
         run_id_str = str(run_id)
@@ -51,9 +62,14 @@ def stop_job_runner(
                 args=(run_id, server_id, args.agent_config))
             stopping_process.start()
 
+            run_id_str = str(run_id)
+            if self.master_agent_instance_map.get(run_id_str, None) is not None:
+                self.master_agent_instance_map.pop(run_id_str)
+
             if run_as_cloud_server:
                 time.sleep(1)
-                RunProcessUtils.kill_process(os.getpid())
+                RunProcessUtils.kill_process(self.cloud_run_process_map[run_id_str].pid)
+                #RunProcessUtils.kill_process(os.getpid())
 
     def complete_job_runner(
             self, run_id, args=None, server_id=None, request_json=None,
@@ -68,13 +84,20 @@ def complete_job_runner(
                 args=(run_id, server_id, args.agent_config))
             stopping_process.start()
 
+            run_id_str = str(run_id)
+            if self.master_agent_instance_map.get(run_id_str, None) is not None:
+                self.master_agent_instance_map.pop(run_id_str)
+
             if run_as_cloud_server:
                 time.sleep(1)
                 RunProcessUtils.kill_process(os.getpid())
 
     def _start_cloud_server(
             self, args, run_id, request_json, edge_id=None,
-            use_local_process_as_cloud_server=False
+            use_local_process_as_cloud_server=False,
+            sender_message_queue=None, listener_message_queue=None,
+            status_center_queue=None, communication_manager=None,
+            master_agent_instance=None
     ):
         run_id_str = str(run_id)
         cloud_server_mgr = FedMLCloudServerManager(
@@ -85,19 +108,47 @@ def _start_cloud_server(
             self.cloud_run_process_map[run_id_str] = Process(target=cloud_server_mgr.start_cloud_server_process_entry)
             self.cloud_run_process_map[run_id_str].start()
         else:
+            cloud_device_id = request_json.get("cloudServerDeviceId", "0")
             message_bytes = json.dumps(request_json).encode("ascii")
             base64_bytes = base64.b64encode(message_bytes)
-            runner_cmd_encoded = base64_bytes.decode("ascii")
-            cloud_device_id = request_json.get("cloudServerDeviceId", "0")
+            payload = base64_bytes.decode("ascii")
+
+            logging.info("start the master server: {}".format(payload))
 
-            logging.info("runner_cmd_encoded: {}".format(runner_cmd_encoded))
+            if platform.system() == "Windows":
+                self.run_process = multiprocessing.Process(
+                    target=cloud_server_mgr.start_local_master_server,
+                    args=(args.account_id, args.api_key, args.os_name, args.version,
+                          cloud_device_id, run_id, payload,
+                          communication_manager, sender_message_queue,
+                          status_center_queue, master_agent_instance))
+            else:
+                self.cloud_run_process_map[run_id_str] = fedml.get_process(
+                    target=cloud_server_mgr.start_local_master_server,
+                    args=(args.account_id, args.api_key, args.os_name, args.version,
+                          cloud_device_id, run_id, payload,
+                          communication_manager, sender_message_queue,
+                          status_center_queue, master_agent_instance))
 
-            self.cloud_run_process_map[run_id_str] = Process(
-                target=cloud_server_mgr.start_local_cloud_server,
-                args=(args.account_id, args.version, cloud_device_id, runner_cmd_encoded))
             self.cloud_run_process_map[run_id_str].start()
             time.sleep(1)
 
+    def start_local_master_server(
+            self, user, api_key, os_name, version, cloud_device_id, run_id, payload,
+            communication_manager=None, sender_message_queue=None, status_center_queue=None,
+            master_agent_instance=None
+    ):
+        if master_agent_instance is None:
+            return
+        master_agent_instance.login(
+            user, api_key=api_key, device_id=cloud_device_id, os_name=os_name,
+            role=FedMLAccountManager.ROLE_CLOUD_SERVER,
+            communication_manager=None,
+            sender_message_queue=None,
+            status_center_queue=None)
+        self.master_agent_instance_map[str(run_id)] = master_agent_instance
+        master_agent_instance.start_master_server_instance(payload)
+
     def callback_run_logs(self, run_id, topic, payload):
         run_id_str = str(run_id)
         if self.job_runners.get(run_id_str, None) is not None:
@@ -107,3 +158,12 @@ def callback_run_metrics(self, run_id, topic, payload):
         run_id_str = str(run_id)
         if self.job_runners.get(run_id_str, None) is not None:
             self.job_runners[run_id_str].callback_run_metrics(topic, payload)
+
+    def callback_proxy_unknown_messages(self, run_id, topic, payload):
+        run_id_str = str(run_id)
+        master_agent = self.master_agent_instance_map.get(run_id_str, None)
+        if master_agent is None:
+            return
+        master_agent.process_job_complete_status(run_id, topic, payload)
+
+
diff --git a/python/fedml/computing/scheduler/master/base_master_protocol_manager.py b/python/fedml/computing/scheduler/master/base_master_protocol_manager.py
index a6b47855c6..2d00e442a0 100755
--- a/python/fedml/computing/scheduler/master/base_master_protocol_manager.py
+++ b/python/fedml/computing/scheduler/master/base_master_protocol_manager.py
@@ -2,6 +2,8 @@
 import base64
 import json
 import logging
+import time
+
 import fedml
 from ..comm_utils.constants import SchedulerConstants
 from ....core.mlops.mlops_runtime_log import MLOpsRuntimeLog
@@ -212,7 +214,8 @@ def callback_start_train(self, topic=None, payload=None):
                 run_id, request_json, args=self.args, edge_id=self.edge_id,
                 sender_message_queue=self.message_center.get_sender_message_queue(),
                 listener_message_queue=self.get_listener_message_queue(),
-                status_center_queue=self.get_status_queue()
+                status_center_queue=self.get_status_queue(),
+                communication_manager=self.get_listener_communication_manager()
             )
 
             process = self._get_job_runner_manager().get_runner_process(run_id)
@@ -227,16 +230,22 @@ def callback_start_train(self, topic=None, payload=None):
                 run_id, request_json, args=self.args, edge_id=self.edge_id,
                 sender_message_queue=self.message_center.get_sender_message_queue(),
                 listener_message_queue=self.get_listener_message_queue(),
-                status_center_queue=self.get_status_queue(), should_start_cloud_server=True,
+                status_center_queue=self.get_status_queue(),
+                communication_manager=self.get_listener_communication_manager(),
+                master_agent_instance=self.generate_agent_instance(),
+                should_start_cloud_server=True,
                 use_local_process_as_cloud_server=self.use_local_process_as_cloud_server
             )
 
             process = self._get_job_runner_manager().get_runner_process(run_id, is_cloud_server=True)
             if process is not None:
                 GeneralConstants.save_run_process(run_id, process.pid, is_master=True)
+
+            self.send_status_msg_to_edges(edge_id_list, run_id, request_json.get("server_id"))
         elif self.run_as_cloud_server:
             self.server_agent_id = request_json.get("cloud_agent_id", self.edge_id)
             self.start_request_json = json.dumps(request_json)
+            server_id = request_json.get("server_id", self.edge_id)
             run_id = request_json["runId"]
             run_id_str = str(run_id)
 
@@ -248,11 +257,10 @@ def callback_start_train(self, topic=None, payload=None):
                 run_id, request_json, args=self.args, edge_id=self.edge_id,
                 sender_message_queue=self.message_center.get_sender_message_queue(),
                 listener_message_queue=self.get_listener_message_queue(),
-                status_center_queue=self.get_status_queue()
+                status_center_queue=self.get_status_queue(),
+                communication_manager=self.get_listener_communication_manager()
             )
 
-            self.send_status_msg_to_edges(edge_id_list, run_id, self.edge_id)
-
     def callback_stop_train(self, topic, payload, use_payload=None):
         # Print the payload
         logging.info(
@@ -390,6 +398,9 @@ def callback_request_job_status(self, topic, payload):
     def callback_request_device_status_in_job(self, topic, payload):
         self.response_device_status_in_job(topic, payload)
 
+    def callback_proxy_unknown_messages(self, run_id, topic, payload):
+        self._get_job_runner_manager().callback_proxy_unknown_messages(run_id, topic, payload)
+
     def process_extra_queues(self, extra_queues):
         self.rebuild_status_center(extra_queues[0])
 
@@ -550,7 +561,7 @@ def send_status_check_msg(self, run_id, edge_id, server_id, context=None):
     def send_status_msg_to_edges(self, edge_id_list, run_id, server_id, context=None):
         # Send status message to all edges
         for edge_id in edge_id_list:
-            self.send_status_check_msg(run_id, edge_id, self.edge_id, context=context)
+            self.send_status_check_msg(run_id, edge_id, server_id, context=context)
 
     def report_exception_status(self, run_id):
         self.mlops_metrics.report_job_status(run_id, GeneralConstants.MSG_MLOPS_SERVER_STATUS_EXCEPTION)
@@ -562,3 +573,9 @@ def get_start_train_topic_with_edge_id(edge_id):
     @abstractmethod
     def _generate_protocol_manager_instance(self, args, agent_config=None):
         return None
+
+    def start_master_server_instance(self, payload):
+        super().on_agent_communication_connected(None)
+
+        self.receive_message_json(self.topic_start_train, payload)
+
diff --git a/python/fedml/computing/scheduler/master/cloud_server_manager.py b/python/fedml/computing/scheduler/master/cloud_server_manager.py
index aa9c07e84f..9c35f233fd 100755
--- a/python/fedml/computing/scheduler/master/cloud_server_manager.py
+++ b/python/fedml/computing/scheduler/master/cloud_server_manager.py
@@ -7,6 +7,7 @@
 
 import fedml
 from fedml.computing.scheduler.comm_utils.sys_utils import get_python_program
+from fedml.computing.scheduler.scheduler_core.account_manager import FedMLAccountManager
 
 
 class FedMLCloudServerManager:
@@ -32,7 +33,7 @@ def __init__(self, args, run_id=None, edge_id=None, request_json=None, agent_con
         self.cloud_server_name = None
 
     @staticmethod
-    def start_local_cloud_server(user, version, cloud_device_id, runner_cmd_encoded):
+    def start_local_cloud_server(user, api_key, os_name, version, cloud_device_id, runner_cmd_encoded):
         if platform.system() != "Windows":
             os.setsid()
 
@@ -40,9 +41,22 @@ def start_local_cloud_server(user, version, cloud_device_id, runner_cmd_encoded)
         pip_source_dir = os.path.dirname(__file__)
         login_cmd = os.path.join(pip_source_dir, "server_login.py")
         run_cmd = f"{get_python_program()} -W ignore {login_cmd} -t login -r cloud_server -u {str(user)} " \
-                  f"-v {version} -id {cloud_device_id} -rc {runner_cmd_encoded}"
+                  f"-k {api_key} -v {version} -id {cloud_device_id} -rc {runner_cmd_encoded}"
         os.system(run_cmd)
 
+    def start_local_master_server(
+            self, user, api_key, os_name, version, cloud_device_id, run_id, payload,
+            communication_manager=None, sender_message_queue=None, status_center_queue=None,
+            master_agent_instance=None
+    ):
+        if platform.system() != "Windows":
+            os.setsid()
+
+        master_agent_instance.login(
+            user, api_key=api_key, device_id=cloud_device_id, os_name=os_name,
+            role=FedMLAccountManager.ROLE_CLOUD_SERVER, runner_cmd=payload,
+            communication_manager=None, sender_message_queue=None, status_center_queue=None)
+
     def start_cloud_server_process_entry(self):
         try:
             self.start_cloud_server_process()
diff --git a/python/fedml/computing/scheduler/master/master_protocol_manager.py b/python/fedml/computing/scheduler/master/master_protocol_manager.py
index eb8cde239f..c941502b9c 100755
--- a/python/fedml/computing/scheduler/master/master_protocol_manager.py
+++ b/python/fedml/computing/scheduler/master/master_protocol_manager.py
@@ -42,3 +42,7 @@ def _process_job_complete_status(self, run_id, server_id, complete_payload):
             run_id, args=self.args, server_id=server_id, request_json=complete_payload,
             run_as_cloud_agent=self.run_as_cloud_agent, run_as_cloud_server=self.run_as_cloud_server,
             use_local_process_as_cloud_server=self.use_local_process_as_cloud_server)
+
+    def generate_agent_instance(self):
+        from .master_agent import FedMLLaunchMasterAgent
+        return FedMLLaunchMasterAgent()
diff --git a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
index 61cce1b39c..e32e7421f6 100755
--- a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
+++ b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
@@ -50,7 +50,7 @@ def __init__(self, args, run_id=0, request_json=None, agent_config=None, edge_id
         self.replica_controller = None
         self.deployed_replica_payload = None
         self.slave_deployment_results_map = dict()
-        self.deployment_result_queue = multiprocessing.Manager().Queue(-1)
+        self.deployment_result_queue = multiprocessing.Queue()
         self.is_fresh_endpoint = True
 
     # Override
diff --git a/python/fedml/computing/scheduler/scheduler_core/message_center.py b/python/fedml/computing/scheduler/scheduler_core/message_center.py
index 2bfa3b514f..9229b2c0da 100755
--- a/python/fedml/computing/scheduler/scheduler_core/message_center.py
+++ b/python/fedml/computing/scheduler/scheduler_core/message_center.py
@@ -133,7 +133,7 @@ def get_sender_message_queue(self):
         return self.sender_message_queue
 
     def start_sender(self, message_center_name=None):
-        self.sender_message_queue = multiprocessing.Manager().Queue(-1)
+        self.sender_message_queue = multiprocessing.Queue()
         self.message_event = multiprocessing.Event()
         self.message_event.clear()
         message_center = FedMLMessageCenter(agent_config=self.sender_agent_config,
@@ -258,10 +258,16 @@ def run_sender(self, message_event, message_queue, message_center_name):
 
         self.release_sender_mqtt_mgr()
 
+    def get_protocol_communication_manager(self):
+        return None
+
     def setup_listener_mqtt_mgr(self):
         if self.listener_mqtt_mgr is not None:
             return
 
+        # self.listener_mqtt_mgr = self.get_protocol_communication_manager()
+        # return
+
         self.listener_mqtt_mgr = MqttManager(
             self.listener_agent_config["mqtt_config"]["BROKER_HOST"],
             self.listener_agent_config["mqtt_config"]["BROKER_PORT"],
@@ -274,7 +280,11 @@ def setup_listener_mqtt_mgr(self):
         self.listener_mqtt_mgr.connect()
         self.listener_mqtt_mgr.loop_start()
 
+    def get_listener_communication_manager(self):
+        return self.listener_mqtt_mgr
+
     def release_listener_mqtt_mgr(self):
+        #return
         try:
             if self.listener_mqtt_mgr is not None:
                 self.listener_mqtt_mgr.loop_stop()
@@ -297,6 +307,9 @@ def remove_message_listener(self, topic):
             self.listener_topics.remove(topic)
             self.listener_handler_funcs.pop(topic)
 
+    def get_listener_handler(self, topic):
+        return self.listener_handler_funcs.get(topic)
+
     def get_message_runner(self):
         return None
 
@@ -304,7 +317,7 @@ def get_listener_message_queue(self):
         return self.listener_message_queue
 
     def setup_listener_message_queue(self):
-        self.listener_message_queue = multiprocessing.Manager().Queue(-1)
+        self.listener_message_queue = multiprocessing.Queue()
 
     def start_listener(
             self, sender_message_queue=None, listener_message_queue=None,
@@ -315,13 +328,14 @@ def start_listener(
 
         if listener_message_queue is None:
             if self.listener_message_queue is None:
-                self.listener_message_queue = multiprocessing.Manager().Queue(-1)
+                self.listener_message_queue = multiprocessing.Queue()
         else:
             self.listener_message_queue = listener_message_queue
         self.listener_message_event = multiprocessing.Event()
         self.listener_message_event.clear()
         self.listener_agent_config = agent_config
-        message_runner = self.get_message_runner()
+        # message_runner = self.get_message_runner()
+        message_runner = self
         message_runner.listener_agent_config = agent_config
         if platform.system() == "Windows":
             self.listener_message_center_process = multiprocessing.Process(
@@ -427,6 +441,11 @@ def run_listener_dispatcher(
                 message_handler_func_name = self.listener_handler_funcs.get(message_entity.topic, None)
                 if message_handler_func_name is not None:
                     methodcaller(message_handler_func_name, message_entity.topic, message_entity.payload)(self)
+                else:
+                    if hasattr(self, "callback_proxy_unknown_messages") and \
+                            self.callback_proxy_unknown_messages is not None:
+                        self.callback_proxy_unknown_messages(
+                            message_entity.run_id, message_entity.topic, message_entity.payload)
             except Exception as e:
                 if message_entity is not None:
                     logging.info(
diff --git a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py
index 80de5c5b18..ffaee555af 100755
--- a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py
+++ b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py
@@ -210,7 +210,7 @@ def retrieve_and_unzip_package(self, package_name, package_url):
         # Open a process to download the package so that we can avoid the request is blocked and check the timeout.
         from multiprocessing import Process
         completed_event = multiprocessing.Event()
-        info_queue = multiprocessing.Manager().Queue(-1)
+        info_queue = multiprocessing.Queue()
         if platform.system() == "Windows":
             download_process = multiprocessing.Process(
                 target=self.download_package_proc,
diff --git a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_protocol_manager.py b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_protocol_manager.py
index f80508a509..80d67f33cb 100755
--- a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_protocol_manager.py
+++ b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_protocol_manager.py
@@ -45,6 +45,7 @@ def __init__(self, args, agent_config=None, is_master=False):
         self.mlops_metrics = None
         self.status_reporter = None
         self.user_name = args.user_name
+        self.parent_agent = None
 
         fedml._init_multiprocessing()
 
@@ -282,7 +283,7 @@ def get_status_runner(self):
 
         return None
 
-    def get_get_protocol_communication_manager(self):
+    def get_protocol_communication_manager(self):
         return self.communication_mgr
 
     def get_protocol_sender_message_queue(self):
@@ -291,6 +292,12 @@ def get_protocol_sender_message_queue(self):
     def get_protocol_status_center_queue(self):
         return self.get_status_queue()
 
+    def get_subscribed_topics(self):
+        return self.subscribed_topics
+
     def send_agent_active_msg(self, edge_id):
         active_msg = {"ID": edge_id, "status": GeneralConstants.MSG_MLOPS_SERVER_STATUS_IDLE}
         self.message_center.send_message_json(self.topic_active, json.dumps(active_msg))
+
+    def set_parent_agent(self, parent_agent):
+        self.parent_agent = parent_agent
diff --git a/python/fedml/computing/scheduler/scheduler_core/status_center.py b/python/fedml/computing/scheduler/scheduler_core/status_center.py
index 7e0cf1f98f..2a7a76c2ca 100755
--- a/python/fedml/computing/scheduler/scheduler_core/status_center.py
+++ b/python/fedml/computing/scheduler/scheduler_core/status_center.py
@@ -108,12 +108,13 @@ def get_status_runner(self):
 
     def start_status_center(self, sender_message_center_queue=None,
                             listener_message_center_queue=None, is_slave_agent=False):
-        self.status_queue = multiprocessing.Manager().Queue(-1)
+        self.status_queue = multiprocessing.Queue()
         self.status_event = multiprocessing.Event()
         self.status_event.clear()
         self.status_sender_message_center_queue = sender_message_center_queue
         self.status_listener_message_center_queue = listener_message_center_queue
-        self.status_runner = self.get_status_runner()
+        #self.status_runner = self.get_status_runner()
+        self.status_runner = self
         target_func = self.status_runner.run_status_dispatcher if not is_slave_agent else \
             self.status_runner.run_status_dispatcher_in_slave
         if platform.system() == "Windows":
diff --git a/python/fedml/computing/scheduler/slave/base_slave_agent.py b/python/fedml/computing/scheduler/slave/base_slave_agent.py
index a8ac9fa1cb..fed10fa039 100755
--- a/python/fedml/computing/scheduler/slave/base_slave_agent.py
+++ b/python/fedml/computing/scheduler/slave/base_slave_agent.py
@@ -96,6 +96,7 @@ def _initialize_protocol_manager(
         self._init_database()
 
         # Initialize the master protocol
+        self.protocol_mgr.set_parent_agent(self)
         self.protocol_mgr.initialize(
             communication_manager=communication_manager,
             sender_message_queue=sender_message_queue,
diff --git a/python/fedml/computing/scheduler/slave/client_data_interface.py b/python/fedml/computing/scheduler/slave/client_data_interface.py
index 0e9e84381a..74bf7a64a3 100755
--- a/python/fedml/computing/scheduler/slave/client_data_interface.py
+++ b/python/fedml/computing/scheduler/slave/client_data_interface.py
@@ -343,6 +343,15 @@ def handle_database_compatibility(self):
 
         self.close_job_db()
 
+    def check_if_table_exist(self, current_db_cursor):
+        results = current_db_cursor.execute("select * from sqlite_master where type='table' and name='jobs';")
+        if results is None:
+            return False
+        result_len = 0
+        for row in results:
+            result_len += 1
+        return False if result_len == 0 else True
+
     def get_agent_status(self, edge_id=0):
         self.open_job_db()
         enabled = 1
diff --git a/python/fedml/computing/scheduler/slave/united_agents.py b/python/fedml/computing/scheduler/slave/united_agents.py
index 7135925ec8..64ba3c5465 100755
--- a/python/fedml/computing/scheduler/slave/united_agents.py
+++ b/python/fedml/computing/scheduler/slave/united_agents.py
@@ -31,8 +31,8 @@ def login(self, userid, api_key=None, device_id=None,
         )
 
         # Get the communication manager, sender message queue
-        shared_communication_mgr = launch_slave_agent.get_protocol_manager().get_get_protocol_communication_manager()
-        shared_sender_message_queue = launch_slave_agent.get_protocol_manager().get_protocol_sender_message_queue()
+        shared_communication_mgr = launch_slave_agent.get_protocol_manager().get_protocol_communication_manager()
+        shared_slave_sender_message_queue = launch_slave_agent.get_protocol_manager().get_protocol_sender_message_queue()
 
         # Login with the launch master role based on
         # the shared communication manager, sender message center
@@ -41,12 +41,13 @@ def login(self, userid, api_key=None, device_id=None,
             os_name=os_name, runner_cmd=runner_cmd,
             role=FedMLAccountManager.ROLE_GPU_MASTER_SERVER,
             communication_manager=shared_communication_mgr,
-            sender_message_queue=shared_sender_message_queue
+            sender_message_queue=None
         )
 
         # Get the status center queue
         shared_slave_status_center_queue = launch_slave_agent.get_protocol_manager().get_protocol_status_center_queue()
         shared_master_status_center_queue = launch_master_agent.get_protocol_manager().get_protocol_status_center_queue()
+        shared_master_sender_message_queue = launch_master_agent.get_protocol_manager().get_protocol_sender_message_queue()
 
         # Login with the deployment master role based on
         # the shared communication manager, sender message center, status center
@@ -54,7 +55,7 @@ def login(self, userid, api_key=None, device_id=None,
             userid, api_key=api_key, device_id=login_result.device_id,
             os_name=os_name, role=FedMLAccountManager.ROLE_DEPLOY_MASTER_ON_PREM,
             communication_manager=shared_communication_mgr,
-            sender_message_queue=shared_sender_message_queue,
+            sender_message_queue=shared_master_sender_message_queue,
             status_center_queue=shared_master_status_center_queue
         )
 
@@ -64,7 +65,7 @@ def login(self, userid, api_key=None, device_id=None,
             userid, api_key=api_key, device_id=login_result.device_id,
             os_name=os_name, role=FedMLAccountManager.ROLE_DEPLOY_WORKER_ON_PREM,
             communication_manager=shared_communication_mgr,
-            sender_message_queue=shared_sender_message_queue,
+            sender_message_queue=shared_slave_sender_message_queue,
             status_center_queue=shared_slave_status_center_queue
         )
 

From 6e8788c3300f40002d5cd06f18f0f247a9d33422 Mon Sep 17 00:00:00 2001
From: Alex <alex.gpt.llm@gmail.com>
Date: Mon, 17 Jun 2024 14:54:54 +0800
Subject: [PATCH 159/282] [CoreEngine] refactor to support to pass the
 communication manager, status center and message center to agents.

---
 .../scheduler/master/base_master_job_runner_manager.py         | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)

diff --git a/python/fedml/computing/scheduler/master/base_master_job_runner_manager.py b/python/fedml/computing/scheduler/master/base_master_job_runner_manager.py
index dfaf29b5de..b67f0e1e21 100755
--- a/python/fedml/computing/scheduler/master/base_master_job_runner_manager.py
+++ b/python/fedml/computing/scheduler/master/base_master_job_runner_manager.py
@@ -69,7 +69,6 @@ def stop_job_runner(
             if run_as_cloud_server:
                 time.sleep(1)
                 RunProcessUtils.kill_process(self.cloud_run_process_map[run_id_str].pid)
-                #RunProcessUtils.kill_process(os.getpid())
 
     def complete_job_runner(
             self, run_id, args=None, server_id=None, request_json=None,
@@ -90,7 +89,7 @@ def complete_job_runner(
 
             if run_as_cloud_server:
                 time.sleep(1)
-                RunProcessUtils.kill_process(os.getpid())
+                RunProcessUtils.kill_process(self.cloud_run_process_map[run_id_str].pid)
 
     def _start_cloud_server(
             self, args, run_id, request_json, edge_id=None,

From 78e310c2fcf94aa828573dc577da434c861b6651 Mon Sep 17 00:00:00 2001
From: Alex <alex.gpt.llm@gmail.com>
Date: Mon, 17 Jun 2024 21:02:32 +0800
Subject: [PATCH 160/282] [CoreEngine] stop the status center, message center
 and other processes when the job is completed in the master server.

---
 python/fedml/api/api_test.py                  |  6 +--
 .../scheduler/comm_utils/run_process_utils.py |  4 +-
 .../scheduler/master/base_master_agent.py     | 17 ++++++--
 .../master/base_master_job_runner_manager.py  | 25 +++++------
 .../master/base_master_protocol_manager.py    |  5 +--
 .../scheduler/master/cloud_server_manager.py  |  6 ++-
 .../scheduler/master/server_login.py          |  1 +
 .../scheduler_core/message_center.py          | 37 +++++++++++-----
 .../scheduler_base_protocol_manager.py        | 43 ++++++++++++++++---
 .../scheduler/scheduler_core/status_center.py | 41 ++++++++++++------
 .../status_manager_protocols.py               |  3 ++
 .../scheduler/slave/base_slave_agent.py       | 12 ++++--
 .../slave/base_slave_protocol_manager.py      |  3 --
 .../scheduler/slave/united_agents.py          |  8 +++-
 14 files changed, 149 insertions(+), 62 deletions(-)

diff --git a/python/fedml/api/api_test.py b/python/fedml/api/api_test.py
index 5a01a76448..1aa5ac3767 100755
--- a/python/fedml/api/api_test.py
+++ b/python/fedml/api/api_test.py
@@ -4,9 +4,9 @@
 import fedml
 
 # Login
-fedml.set_env_version("test")
+fedml.set_env_version("local")
 fedml.set_local_on_premise_platform_port(18080)
-error_code, error_msg = fedml.api.fedml_login(api_key="")
+error_code, error_msg = fedml.api.fedml_login(api_key="1316b93c82da40ce90113a2ed12f0b14")
 if error_code != 0:
     print("API Key is invalid!")
     exit(1)
@@ -19,7 +19,7 @@
 
 # Launch job
 launch_result_list = list()
-for i in range(0, 10):
+for i in range(0, 1):
     launch_result = fedml.api.launch_job(yaml_file)
     launch_result_list.append(launch_result)
     # launch_result = fedml.api.launch_job_on_cluster(yaml_file, "alex-cluster")
diff --git a/python/fedml/computing/scheduler/comm_utils/run_process_utils.py b/python/fedml/computing/scheduler/comm_utils/run_process_utils.py
index 05cc342e36..6dd575f307 100644
--- a/python/fedml/computing/scheduler/comm_utils/run_process_utils.py
+++ b/python/fedml/computing/scheduler/comm_utils/run_process_utils.py
@@ -135,13 +135,15 @@ def save_run_process(run_id, process_id, data_dir, info_dir,
             pass
 
     @staticmethod
-    def kill_process(process_id):
+    def kill_process(process_id, exclude_current_pid=False):
         try:
             process = psutil.Process(process_id)
             if process is None:
                 return
             child_processes = process.children(recursive=True)
             for sub_process in child_processes:
+                if exclude_current_pid and sub_process.pid == os.getpid():
+                    continue
                 if platform.system() == 'Windows':
                     os.system("taskkill /PID {} /T /F".format(sub_process.pid))
                 else:
diff --git a/python/fedml/computing/scheduler/master/base_master_agent.py b/python/fedml/computing/scheduler/master/base_master_agent.py
index 39898b5d40..4fb3a5e755 100755
--- a/python/fedml/computing/scheduler/master/base_master_agent.py
+++ b/python/fedml/computing/scheduler/master/base_master_agent.py
@@ -24,7 +24,8 @@ def __init__(self):
     def login(
             self, user_id, api_key=None, device_id=None,
             os_name=None, role=None, runner_cmd=None,
-            communication_manager=None, sender_message_queue=None, status_center_queue=None
+            communication_manager=None, sender_message_queue=None,
+            status_center_queue=None, sender_message_event=None
     ):
         # Login account
         login_result = FedMLAccountManager.get_instance().login(
@@ -52,7 +53,8 @@ def login(
             self._initialize_protocol_manager(
                 communication_manager=communication_manager,
                 sender_message_queue=sender_message_queue,
-                status_center_queue=status_center_queue)
+                status_center_queue=status_center_queue,
+                sender_message_event=sender_message_event)
         except Exception as e:
             FedMLAccountManager.write_login_failed_file(is_client=False)
             self.protocol_mgr.stop()
@@ -67,6 +69,9 @@ def logout():
         GeneralConstants.cleanup_run_process(None, is_master=True)
         sys_utils.cleanup_all_fedml_server_api_processes()
 
+    def stop(self, kill_process=False):
+        self.protocol_mgr.stop(kill_process=kill_process)
+
     def _create_protocol_manager(self, role, login_result):
         if self.protocol_mgr is not None:
             return
@@ -89,7 +94,8 @@ def _create_protocol_manager(self, role, login_result):
         self.protocol_mgr.use_local_process_as_cloud_server = self.use_local_process_as_cloud_server
 
     def _initialize_protocol_manager(
-            self, communication_manager=None, sender_message_queue=None, status_center_queue=None
+            self, communication_manager=None, sender_message_queue=None,
+            status_center_queue=None, sender_message_event=None
     ):
         # Init local database
         self._init_database()
@@ -99,7 +105,8 @@ def _initialize_protocol_manager(
         self.protocol_mgr.initialize(
             communication_manager=communication_manager,
             sender_message_queue=sender_message_queue,
-            status_center_queue=status_center_queue)
+            status_center_queue=status_center_queue,
+            sender_message_event=sender_message_event)
 
         # Report the IDLE status to MLOps
         self.mlops_metrics.report_server_training_status(
@@ -150,6 +157,8 @@ def generate_agent_instance(self):
         return FedMLBaseMasterAgent()
 
     def process_job_complete_status(self, run_id, topic, payload):
+        if self.protocol_mgr is None:
+            return
         if topic in self.protocol_mgr.get_subscribed_topics():
             message_handler = self.protocol_mgr.get_listener_handler(topic)
             if message_handler is not None:
diff --git a/python/fedml/computing/scheduler/master/base_master_job_runner_manager.py b/python/fedml/computing/scheduler/master/base_master_job_runner_manager.py
index b67f0e1e21..664fb4671e 100755
--- a/python/fedml/computing/scheduler/master/base_master_job_runner_manager.py
+++ b/python/fedml/computing/scheduler/master/base_master_job_runner_manager.py
@@ -57,13 +57,15 @@ def stop_job_runner(
         super().stop_job_runner(run_id)
 
         if run_as_cloud_agent or run_as_cloud_server:
-            stopping_process = Process(
-                target=FedMLCloudServerManager.stop_cloud_server,
-                args=(run_id, server_id, args.agent_config))
-            stopping_process.start()
+            if not use_local_process_as_cloud_server:
+                stopping_process = Process(
+                    target=FedMLCloudServerManager.stop_cloud_server,
+                    args=(run_id, server_id, args.agent_config))
+                stopping_process.start()
 
             run_id_str = str(run_id)
             if self.master_agent_instance_map.get(run_id_str, None) is not None:
+                self.master_agent_instance_map.get(run_id_str).stop()
                 self.master_agent_instance_map.pop(run_id_str)
 
             if run_as_cloud_server:
@@ -78,19 +80,17 @@ def complete_job_runner(
         super().complete_job_runner(run_id)
 
         if run_as_cloud_agent or run_as_cloud_server:
-            stopping_process = Process(
-                target=FedMLCloudServerManager.stop_cloud_server,
-                args=(run_id, server_id, args.agent_config))
-            stopping_process.start()
+            if not use_local_process_as_cloud_server:
+                stopping_process = Process(
+                    target=FedMLCloudServerManager.stop_cloud_server,
+                    args=(run_id, server_id, args.agent_config))
+                stopping_process.start()
 
             run_id_str = str(run_id)
             if self.master_agent_instance_map.get(run_id_str, None) is not None:
+                self.master_agent_instance_map.get(run_id_str).stop(kill_process=True)
                 self.master_agent_instance_map.pop(run_id_str)
 
-            if run_as_cloud_server:
-                time.sleep(1)
-                RunProcessUtils.kill_process(self.cloud_run_process_map[run_id_str].pid)
-
     def _start_cloud_server(
             self, args, run_id, request_json, edge_id=None,
             use_local_process_as_cloud_server=False,
@@ -111,6 +111,7 @@ def _start_cloud_server(
             message_bytes = json.dumps(request_json).encode("ascii")
             base64_bytes = base64.b64encode(message_bytes)
             payload = base64_bytes.decode("ascii")
+            self.master_agent_instance_map[str(run_id)] = master_agent_instance
 
             logging.info("start the master server: {}".format(payload))
 
diff --git a/python/fedml/computing/scheduler/master/base_master_protocol_manager.py b/python/fedml/computing/scheduler/master/base_master_protocol_manager.py
index 2d00e442a0..2f8a4c5838 100755
--- a/python/fedml/computing/scheduler/master/base_master_protocol_manager.py
+++ b/python/fedml/computing/scheduler/master/base_master_protocol_manager.py
@@ -2,7 +2,6 @@
 import base64
 import json
 import logging
-import time
 
 import fedml
 from ..comm_utils.constants import SchedulerConstants
@@ -240,8 +239,6 @@ def callback_start_train(self, topic=None, payload=None):
             process = self._get_job_runner_manager().get_runner_process(run_id, is_cloud_server=True)
             if process is not None:
                 GeneralConstants.save_run_process(run_id, process.pid, is_master=True)
-
-            self.send_status_msg_to_edges(edge_id_list, run_id, request_json.get("server_id"))
         elif self.run_as_cloud_server:
             self.server_agent_id = request_json.get("cloud_agent_id", self.edge_id)
             self.start_request_json = json.dumps(request_json)
@@ -261,6 +258,8 @@ def callback_start_train(self, topic=None, payload=None):
                 communication_manager=self.get_listener_communication_manager()
             )
 
+            self.send_status_msg_to_edges(edge_id_list, run_id, server_id)
+
     def callback_stop_train(self, topic, payload, use_payload=None):
         # Print the payload
         logging.info(
diff --git a/python/fedml/computing/scheduler/master/cloud_server_manager.py b/python/fedml/computing/scheduler/master/cloud_server_manager.py
index 9c35f233fd..0aabaf5dbf 100755
--- a/python/fedml/computing/scheduler/master/cloud_server_manager.py
+++ b/python/fedml/computing/scheduler/master/cloud_server_manager.py
@@ -49,13 +49,17 @@ def start_local_master_server(
             communication_manager=None, sender_message_queue=None, status_center_queue=None,
             master_agent_instance=None
     ):
+        logging.info(f"Local master server pid: {os.getpid()}")
         if platform.system() != "Windows":
             os.setsid()
 
         master_agent_instance.login(
             user, api_key=api_key, device_id=cloud_device_id, os_name=os_name,
             role=FedMLAccountManager.ROLE_CLOUD_SERVER, runner_cmd=payload,
-            communication_manager=None, sender_message_queue=None, status_center_queue=None)
+            communication_manager=None, sender_message_queue=None,
+            status_center_queue=None)
+
+        master_agent_instance.stop()
 
     def start_cloud_server_process_entry(self):
         try:
diff --git a/python/fedml/computing/scheduler/master/server_login.py b/python/fedml/computing/scheduler/master/server_login.py
index 8dd0696bc8..be7b73103f 100755
--- a/python/fedml/computing/scheduler/master/server_login.py
+++ b/python/fedml/computing/scheduler/master/server_login.py
@@ -41,4 +41,5 @@ def logout():
         master_agent.login(args.api_key, api_key=args.api_key, device_id=args.device_id,
                            os_name=args.os_name, role=args.role, runner_cmd=args.runner_cmd)
     else:
+        master_agent.stop()
         master_agent.logout()
diff --git a/python/fedml/computing/scheduler/scheduler_core/message_center.py b/python/fedml/computing/scheduler/scheduler_core/message_center.py
index 9229b2c0da..3ceafe976b 100755
--- a/python/fedml/computing/scheduler/scheduler_core/message_center.py
+++ b/python/fedml/computing/scheduler/scheduler_core/message_center.py
@@ -29,11 +29,12 @@ class FedMLMessageCenter(object):
     MESSAGE_SENT_SUCCESS_RECORDS_FILE = "message-sent-success-records.log"
     MESSAGE_RECEIVED_RECORDS_FILE = "message-received-records.log"
 
-    def __init__(self, agent_config=None, sender_message_queue=None, listener_message_queue=None):
+    def __init__(self, agent_config=None, sender_message_queue=None,
+                 listener_message_queue=None, sender_message_event=None):
         self.sender_agent_config = agent_config
         self.listener_agent_config = agent_config
         self.sender_message_queue = sender_message_queue
-        self.message_event = None
+        self.message_event = sender_message_event
         self.message_center_process = None
         self.sender_mqtt_mgr = None
         self.sender_mlops_metrics = None
@@ -132,6 +133,9 @@ def release_sender_mqtt_mgr(self):
     def get_sender_message_queue(self):
         return self.sender_message_queue
 
+    def get_sender_message_event(self):
+        return self.message_event
+
     def start_sender(self, message_center_name=None):
         self.sender_message_queue = multiprocessing.Queue()
         self.message_event = multiprocessing.Event()
@@ -154,7 +158,7 @@ def start_sender(self, message_center_name=None):
             )
         self.message_center_process.start()
 
-    def stop(self):
+    def stop_message_center(self):
         if self.message_event is not None:
             self.message_event.set()
 
@@ -166,6 +170,10 @@ def check_message_stop_event(self):
             logging.info("Received message center stopping event.")
             raise MessageCenterStoppedException("Message center stopped (for sender)")
 
+        if self.listener_message_event is not None and self.listener_message_event.is_set():
+            logging.info("Received message center stopping event.")
+            raise MessageCenterStoppedException("Message center stopped (for listener)")
+
     def send_message(self, topic, payload, run_id=None):
         message_entity = FedMLMessageEntity(topic=topic, payload=payload, run_id=run_id)
         self.sender_message_queue.put(message_entity.get_message_body())
@@ -204,6 +212,9 @@ def retry_sending_undelivered_message(self):
                 self.save_message_record(message_entity.run_id, message_entity.device_id, sent_message_record)
 
     def run_sender(self, message_event, message_queue, message_center_name):
+        if platform.system() != "Windows":
+            os.setsid()
+
         self.message_event = message_event
         self.sender_message_queue = message_queue
         self.message_center_name = message_center_name
@@ -321,7 +332,7 @@ def setup_listener_message_queue(self):
 
     def start_listener(
             self, sender_message_queue=None, listener_message_queue=None,
-            agent_config=None, message_center_name=None, extra_queues=None
+            sender_message_event=None, agent_config=None, message_center_name=None, extra_queues=None
     ):
         if self.listener_message_center_process is not None:
             return
@@ -342,7 +353,7 @@ def start_listener(
                 target=message_runner.run_listener_dispatcher, args=(
                     self.listener_message_event, self.listener_message_queue,
                     self.listener_handler_funcs, sender_message_queue,
-                    message_center_name, extra_queues
+                    sender_message_event, message_center_name, extra_queues
                 )
             )
         else:
@@ -350,7 +361,7 @@ def start_listener(
                 target=message_runner.run_listener_dispatcher, args=(
                     self.listener_message_event, self.listener_message_queue,
                     self.listener_handler_funcs, sender_message_queue,
-                    message_center_name, extra_queues
+                    sender_message_event, message_center_name, extra_queues
                 )
             )
         self.listener_message_center_process.start()
@@ -385,13 +396,19 @@ def unsubscribe_msg(self, topic):
         self.listener_mqtt_mgr.unsubscribe_msg(topic)
 
     def run_listener_dispatcher(
-            self, message_event, message_queue, listener_funcs, sender_message_queue,
+            self, listener_message_event, listener_message_queue,
+            listener_funcs, sender_message_queue, sender_message_event,
             message_center_name, extra_queues
     ):
-        self.listener_message_event = message_event
-        self.listener_message_queue = message_queue
+        if platform.system() != "Windows":
+            os.setsid()
+
+        self.listener_message_event = listener_message_event
+        self.listener_message_queue = listener_message_queue
         self.listener_handler_funcs = listener_funcs
         self.message_center_name = message_center_name
+        self.sender_message_queue = sender_message_queue
+        self.message_event = sender_message_event
 
         self.setup_listener_mqtt_mgr()
 
@@ -417,7 +434,7 @@ def run_listener_dispatcher(
 
                 # Get the message from the queue
                 try:
-                    message_body = message_queue.get(block=False, timeout=0.1)
+                    message_body = listener_message_queue.get(block=False, timeout=0.1)
                 except queue.Empty as e:  # If queue is empty, then break loop
                     message_body = None
                 if message_body is None:
diff --git a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_protocol_manager.py b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_protocol_manager.py
index 80d67f33cb..833fa1edc0 100755
--- a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_protocol_manager.py
+++ b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_protocol_manager.py
@@ -2,11 +2,13 @@
 import json
 import logging
 import multiprocessing
+import os
 import sys
 import time
 import traceback
 import uuid
 import fedml
+from ..comm_utils.run_process_utils import RunProcessUtils
 from ....core.mlops.mlops_runtime_log import MLOpsRuntimeLog
 from ....core.distributed.communication.mqtt.mqtt_manager import MqttManager
 from ....core.mlops.mlops_metrics import MLOpsMetrics
@@ -60,7 +62,8 @@ def add_protocol_handler(self):
         pass
 
     def initialize(
-            self, communication_manager=None, sender_message_queue=None, status_center_queue=None
+            self, communication_manager=None, sender_message_queue=None,
+            status_center_queue=None, sender_message_event=None
     ):
         # Generate the message topics
         self.generate_topics()
@@ -86,6 +89,7 @@ def initialize(
         # Start the message center to process edge related messages.
         if sender_message_queue is None:
             self.setup_message_center()
+            sender_message_event = self.message_center.get_sender_message_event()
         else:
             self.rebuild_message_center(sender_message_queue)
 
@@ -94,13 +98,14 @@ def initialize(
 
         # Start the status center to process edge related status.
         if status_center_queue is None:
-            self.start_status_listener_center()
+            self.start_status_listener_center(sender_message_event=sender_message_event)
         else:
             self.set_status_queue(status_center_queue)
             self.rebuild_status_center(status_center_queue)
 
         # Start the message center for listener
         self.start_listener(sender_message_queue=self.message_center.get_sender_message_queue(),
+                            sender_message_event=sender_message_event,
                             agent_config=self.agent_config,
                             message_center_name=self.message_center_name,
                             extra_queues=[self.get_status_queue()])
@@ -124,6 +129,8 @@ def start(self):
                 logging.info("Server tracing: {}".format(traceback.format_exc()))
 
         finally:
+            logging.info(f"Protocol manager is about to exit, pid: {os.getpid()}")
+
             FedMLAccountManager.write_login_failed_file(is_client=not self.is_master_agent)
 
             self.stop()
@@ -134,7 +141,7 @@ def start(self):
                 clean_process_group=False)
             sys.exit(1)
 
-    def stop(self):
+    def stop(self, kill_process=False):
         if self.communication_mgr is not None:
             # noinspection PyBroadException
             try:
@@ -146,7 +153,9 @@ def stop(self):
             self.communication_mgr.loop_stop()
             self.communication_mgr.disconnect()
 
-        self.release_message_center()
+        if kill_process:
+            self.release_message_center()
+            RunProcessUtils.kill_process(os.getppid(), exclude_current_pid=True)
 
     @abstractmethod
     def _init_extra_items(self):
@@ -210,20 +219,37 @@ def rebuild_message_center(self, message_center_queue):
 
     def release_message_center(self):
         try:
+            self.stop_message_center()
+
             if self.message_center is not None:
-                self.message_center.stop()
+                self.message_center.stop_message_center()
                 self.message_center = None
 
         except Exception as e:
             logging.error(
-                f"Failed to release slave communication manager with Exception {e}. "
+                f"Failed to release the message center with Exception {e}. "
                 f"Traceback: {traceback.format_exc()}")
             pass
 
-    def start_status_listener_center(self):
+    def release_status_center(self):
+        try:
+            self.stop_status_center()
+
+            if self.status_center is not None:
+                self.status_center.stop_status_center()
+                self.status_center = None
+
+        except Exception as e:
+            logging.error(
+                f"Failed to release the status center with Exception {e}. "
+                f"Traceback: {traceback.format_exc()}")
+            pass
+
+    def start_status_listener_center(self, sender_message_event=None):
         self.start_status_center(
             sender_message_center_queue=self.message_center.get_sender_message_queue(),
             listener_message_center_queue=self.get_listener_message_queue(),
+            sender_message_event=sender_message_event,
             is_slave_agent=not self.is_master_agent
         )
 
@@ -289,6 +315,9 @@ def get_protocol_communication_manager(self):
     def get_protocol_sender_message_queue(self):
         return self.message_center.get_sender_message_queue()
 
+    def get_protocol_sender_message_event(self):
+        return self.message_center.get_sender_message_event()
+
     def get_protocol_status_center_queue(self):
         return self.get_status_queue()
 
diff --git a/python/fedml/computing/scheduler/scheduler_core/status_center.py b/python/fedml/computing/scheduler/scheduler_core/status_center.py
index 2a7a76c2ca..b2949af291 100755
--- a/python/fedml/computing/scheduler/scheduler_core/status_center.py
+++ b/python/fedml/computing/scheduler/scheduler_core/status_center.py
@@ -1,4 +1,5 @@
 import logging
+import os
 import platform
 import time
 
@@ -107,7 +108,9 @@ def get_status_runner(self):
         return None
 
     def start_status_center(self, sender_message_center_queue=None,
-                            listener_message_center_queue=None, is_slave_agent=False):
+                            listener_message_center_queue=None,
+                            sender_message_event=None,
+                            is_slave_agent=False):
         self.status_queue = multiprocessing.Queue()
         self.status_event = multiprocessing.Event()
         self.status_event.clear()
@@ -121,20 +124,24 @@ def start_status_center(self, sender_message_center_queue=None,
             self.status_center_process = multiprocessing.Process(
                 target=target_func, args=(
                     self.status_event, self.status_queue, self.status_sender_message_center_queue,
-                    self.status_listener_message_center_queue
+                    self.status_listener_message_center_queue, sender_message_event
                 )
             )
         else:
             self.status_center_process = fedml.get_process(
                 target=target_func, args=(
                     self.status_event, self.status_queue, self.status_sender_message_center_queue,
-                    self.status_listener_message_center_queue
+                    self.status_listener_message_center_queue, sender_message_event
                 )
             )
 
         self.status_center_process.start()
 
-    def check_message_stop_event(self):
+    def stop_status_center(self):
+        if self.status_event is not None:
+            self.status_event.set()
+
+    def check_status_stop_event(self):
         if self.status_event is not None and self.status_event.is_set():
             logging.info("Received status center stopping event.")
             raise StatusCenterStoppedException("Status center stopped (for sender)")
@@ -170,7 +177,11 @@ def rebuild_status_center(self, status_queue):
 
     def run_status_dispatcher(self, status_event, status_queue,
                               sender_message_center_queue,
-                              listener_message_center_queue):
+                              listener_message_center_queue,
+                              sender_message_event):
+        if platform.system() != "Windows":
+            os.setsid()
+
         # Save the parameters
         self.status_event = status_event
         self.status_queue = status_queue
@@ -183,10 +194,11 @@ def run_status_dispatcher(self, status_event, status_queue,
             self.rebuild_message_center(sender_message_center_queue)
             message_center = FedMLMessageCenter(
                 sender_message_queue=sender_message_center_queue,
-                listener_message_queue=listener_message_center_queue
+                listener_message_queue=listener_message_center_queue,
+                sender_message_event=sender_message_event
             )
 
-        if sender_message_center_queue is not None:
+        if status_queue is not None:
             self.rebuild_status_center(status_queue)
 
         # Init status manager instances
@@ -197,7 +209,7 @@ def run_status_dispatcher(self, status_event, status_queue,
 
             # Check if we should stop status dispatcher
             try:
-                self.check_message_stop_event()
+                self.check_status_stop_event()
             except StatusCenterStoppedException as e:
                 break
 
@@ -266,7 +278,11 @@ def run_status_dispatcher(self, status_event, status_queue,
 
     def run_status_dispatcher_in_slave(self, status_event, status_queue,
                                        sender_message_center_queue,
-                                       listener_message_center_queue):
+                                       listener_message_center_queue,
+                                       sender_message_event):
+        if platform.system() != "Windows":
+            os.setsid()
+
         # Save the parameters
         self.status_event = status_event
         self.status_queue = status_queue
@@ -279,10 +295,11 @@ def run_status_dispatcher_in_slave(self, status_event, status_queue,
             self.rebuild_message_center(sender_message_center_queue)
             message_center = FedMLMessageCenter(
                 sender_message_queue=sender_message_center_queue,
-                listener_message_queue=listener_message_center_queue
+                listener_message_queue=listener_message_center_queue,
+                sender_message_event=sender_message_event
             )
 
-        if sender_message_center_queue is not None:
+        if status_queue is not None:
             self.rebuild_status_center(status_queue)
 
         # Init status manager instances
@@ -294,7 +311,7 @@ def run_status_dispatcher_in_slave(self, status_event, status_queue,
 
             # Check if we should stop status dispatcher
             try:
-                self.check_message_stop_event()
+                self.check_status_stop_event()
             except StatusCenterStoppedException as e:
                 break
 
diff --git a/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py b/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py
index e045458db5..fec19bed70 100755
--- a/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py
+++ b/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py
@@ -143,6 +143,9 @@ def process_job_completed_status(self, master_id, status):
         if self.status_center.is_deployment_status_center and status == ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED:
             self.report_deployment_status(self.run_id, GeneralConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED)
 
+        self.message_center.stop_message_center()
+        self.status_center.stop_status_center()
+
     def process_job_exception_status(self, master_id, status):
         # Report exception job status
         self.report_exception_status(status)
diff --git a/python/fedml/computing/scheduler/slave/base_slave_agent.py b/python/fedml/computing/scheduler/slave/base_slave_agent.py
index fed10fa039..58a79aae88 100755
--- a/python/fedml/computing/scheduler/slave/base_slave_agent.py
+++ b/python/fedml/computing/scheduler/slave/base_slave_agent.py
@@ -25,7 +25,8 @@ def __init__(self):
     def login(
             self, userid, api_key=None, device_id=None,
             os_name=None, need_to_check_gpu=False, role=None,
-            communication_manager=None, sender_message_queue=None, status_center_queue=None
+            communication_manager=None, sender_message_queue=None,
+            status_center_queue=None, sender_message_event=None
     ):
         # Preprocess the login args
         if need_to_check_gpu:
@@ -61,7 +62,8 @@ def login(
             self._initialize_protocol_manager(
                 communication_manager=communication_manager,
                 sender_message_queue=sender_message_queue,
-                status_center_queue=status_center_queue)
+                status_center_queue=status_center_queue,
+                sender_message_event=sender_message_event)
         except Exception as e:
             FedMLAccountManager.write_login_failed_file(is_client=True)
             self.protocol_mgr.stop()
@@ -90,7 +92,8 @@ def _create_protocol_manager(self, login_result):
         self.protocol_mgr.agent_config = login_result.agent_config
 
     def _initialize_protocol_manager(
-            self, communication_manager=None, sender_message_queue=None, status_center_queue=None
+            self, communication_manager=None, sender_message_queue=None,
+            status_center_queue=None, sender_message_event=None
     ):
         # Init local database
         self._init_database()
@@ -100,7 +103,8 @@ def _initialize_protocol_manager(
         self.protocol_mgr.initialize(
             communication_manager=communication_manager,
             sender_message_queue=sender_message_queue,
-            status_center_queue=status_center_queue)
+            status_center_queue=status_center_queue,
+            sender_message_event=sender_message_event)
 
         # Start the client API process
         self._start_slave_api()
diff --git a/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py b/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py
index 447bd05cd9..49aad618c1 100755
--- a/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py
+++ b/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py
@@ -148,12 +148,9 @@ def add_subscribe_topic(self, topic):
 
     def stop(self):
         if self.model_device_server is not None:
-            self.model_device_server.stop()
             self.model_device_server = None
 
         if self.model_device_client_list is not None:
-            for model_client in self.model_device_client_list:
-                model_client.stop()
             self.model_device_client_list.clear()
             self.model_device_client_list = None
 
diff --git a/python/fedml/computing/scheduler/slave/united_agents.py b/python/fedml/computing/scheduler/slave/united_agents.py
index 64ba3c5465..7aef66290d 100755
--- a/python/fedml/computing/scheduler/slave/united_agents.py
+++ b/python/fedml/computing/scheduler/slave/united_agents.py
@@ -33,6 +33,7 @@ def login(self, userid, api_key=None, device_id=None,
         # Get the communication manager, sender message queue
         shared_communication_mgr = launch_slave_agent.get_protocol_manager().get_protocol_communication_manager()
         shared_slave_sender_message_queue = launch_slave_agent.get_protocol_manager().get_protocol_sender_message_queue()
+        shared_slave_sender_message_event = launch_slave_agent.get_protocol_manager().get_protocol_sender_message_event()
 
         # Login with the launch master role based on
         # the shared communication manager, sender message center
@@ -48,6 +49,7 @@ def login(self, userid, api_key=None, device_id=None,
         shared_slave_status_center_queue = launch_slave_agent.get_protocol_manager().get_protocol_status_center_queue()
         shared_master_status_center_queue = launch_master_agent.get_protocol_manager().get_protocol_status_center_queue()
         shared_master_sender_message_queue = launch_master_agent.get_protocol_manager().get_protocol_sender_message_queue()
+        shared_master_sender_message_event = launch_master_agent.get_protocol_manager().get_protocol_sender_message_event()
 
         # Login with the deployment master role based on
         # the shared communication manager, sender message center, status center
@@ -56,7 +58,8 @@ def login(self, userid, api_key=None, device_id=None,
             os_name=os_name, role=FedMLAccountManager.ROLE_DEPLOY_MASTER_ON_PREM,
             communication_manager=shared_communication_mgr,
             sender_message_queue=shared_master_sender_message_queue,
-            status_center_queue=shared_master_status_center_queue
+            status_center_queue=shared_master_status_center_queue,
+            sender_message_event=shared_master_sender_message_event
         )
 
         # Login with the deployment slave role based on
@@ -66,7 +69,8 @@ def login(self, userid, api_key=None, device_id=None,
             os_name=os_name, role=FedMLAccountManager.ROLE_DEPLOY_WORKER_ON_PREM,
             communication_manager=shared_communication_mgr,
             sender_message_queue=shared_slave_sender_message_queue,
-            status_center_queue=shared_slave_status_center_queue
+            status_center_queue=shared_slave_status_center_queue,
+            sender_message_event=shared_slave_sender_message_event
         )
 
         # Start the slave agent to connect to servers and loop forever.

From aecafb80f9d6731b6b15e4cfca7b15035b82cf84 Mon Sep 17 00:00:00 2001
From: Raphael Jin <kimheavy@yahoo.com>
Date: Mon, 17 Jun 2024 14:39:16 -0700
Subject: [PATCH 161/282] Fix compatibility by limiting numpy latest version.

---
 python/setup.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/python/setup.py b/python/setup.py
index 9651465d32..4757c10a17 100644
--- a/python/setup.py
+++ b/python/setup.py
@@ -40,7 +40,7 @@ def finalize_options(self):
     'multiprocess',
     'networkx<3.0',
     'ntplib',
-    'numpy>=1.21',
+    'numpy<2.0.0',
     'onnx',
     'paho-mqtt<2.0.0',
     'pandas',

From 1af78e732658c2f3af7f26670b9fda29c1cb0361 Mon Sep 17 00:00:00 2001
From: Alex <alex.gpt.llm@gmail.com>
Date: Tue, 18 Jun 2024 11:58:30 +0800
Subject: [PATCH 162/282] [CoreEngine] replace the queue with the managed queue
 to avoid the multiprocessing lock problem.

---
 .../master/base_master_job_runner.py          | 20 +++++++++----------
 .../model_scheduler/master_job_runner.py      |  2 +-
 .../scheduler_core/message_center.py          |  6 +++---
 .../scheduler_base_job_runner.py              |  7 ++++---
 .../scheduler/scheduler_core/status_center.py |  2 +-
 5 files changed, 19 insertions(+), 18 deletions(-)

diff --git a/python/fedml/computing/scheduler/master/base_master_job_runner.py b/python/fedml/computing/scheduler/master/base_master_job_runner.py
index 1072e6b045..9a77c2ba82 100755
--- a/python/fedml/computing/scheduler/master/base_master_job_runner.py
+++ b/python/fedml/computing/scheduler/master/base_master_job_runner.py
@@ -42,13 +42,13 @@ def __init__(self, args, run_id=0, request_json=None, agent_config=None, edge_id
             is_master_runner=True
         )
 
-        self.run_edge_id_status_queue = multiprocessing.Queue()
-        self.run_metrics_queue = multiprocessing.Queue()
-        self.run_events_queue = multiprocessing.Queue()
-        self.run_artifacts_queue = multiprocessing.Queue()
-        self.run_logs_queue = multiprocessing.Queue()
-        self.run_edge_device_info_queue = multiprocessing.Queue()
-        self.run_edge_device_info_global_queue = multiprocessing.Queue()
+        self.run_edge_id_status_queue = multiprocessing.Manager().Queue()
+        self.run_metrics_queue = multiprocessing.Manager().Queue()
+        self.run_events_queue = multiprocessing.Manager().Queue()
+        self.run_artifacts_queue = multiprocessing.Manager().Queue()
+        self.run_logs_queue = multiprocessing.Manager().Queue()
+        self.run_edge_device_info_queue = multiprocessing.Manager().Queue()
+        self.run_edge_device_info_global_queue = multiprocessing.Manager().Queue()
         self.run_extend_queue_list = None
         self.async_check_timeout = 0
         self.enable_async_cluster = False
@@ -453,7 +453,7 @@ def put_run_edge_device_info_to_queue(self, run_id, edge_id, device_info):
         if int(edge_id) in edge_ids or str(edge_id) in edge_ids:
             run_id_str = str(run_id)
             if self.run_edge_device_info_queue is None:
-                self.run_edge_device_info_queue = multiprocessing.Queue()
+                self.run_edge_device_info_queue = multiprocessing.Manager().Queue()
             self.run_edge_device_info_queue.put(device_info)
 
     def should_continue_run_job(self, run_id):
@@ -581,7 +581,7 @@ def callback_run_logs(self, topic, payload):
         run_id = str(topic).split('/')[-1]
         run_id_str = str(run_id)
         if self.run_logs_queue is None:
-            self.run_logs_queue = multiprocessing.Queue()
+            self.run_logs_queue = multiprocessing.Manager().Queue()
         self.run_logs_queue.put(payload)
 
     def callback_run_metrics(self, topic, payload):
@@ -589,7 +589,7 @@ def callback_run_metrics(self, topic, payload):
         run_id = str(topic).split('/')[-1]
         run_id_str = str(run_id)
         if self.run_metrics_queue is None:
-            self.run_metrics_queue = multiprocessing.Queue()
+            self.run_metrics_queue = multiprocessing.Manager().Queue()
         self.run_metrics_queue.put(payload)
 
     # def send_training_request_to_edges(self, active_edge_info_dict):
diff --git a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
index e32e7421f6..9854dad5f6 100755
--- a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
+++ b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
@@ -50,7 +50,7 @@ def __init__(self, args, run_id=0, request_json=None, agent_config=None, edge_id
         self.replica_controller = None
         self.deployed_replica_payload = None
         self.slave_deployment_results_map = dict()
-        self.deployment_result_queue = multiprocessing.Queue()
+        self.deployment_result_queue = multiprocessing.Manager().Queue()
         self.is_fresh_endpoint = True
 
     # Override
diff --git a/python/fedml/computing/scheduler/scheduler_core/message_center.py b/python/fedml/computing/scheduler/scheduler_core/message_center.py
index 3ceafe976b..aeac1a3855 100755
--- a/python/fedml/computing/scheduler/scheduler_core/message_center.py
+++ b/python/fedml/computing/scheduler/scheduler_core/message_center.py
@@ -137,7 +137,7 @@ def get_sender_message_event(self):
         return self.message_event
 
     def start_sender(self, message_center_name=None):
-        self.sender_message_queue = multiprocessing.Queue()
+        self.sender_message_queue = multiprocessing.Manager().Queue()
         self.message_event = multiprocessing.Event()
         self.message_event.clear()
         message_center = FedMLMessageCenter(agent_config=self.sender_agent_config,
@@ -328,7 +328,7 @@ def get_listener_message_queue(self):
         return self.listener_message_queue
 
     def setup_listener_message_queue(self):
-        self.listener_message_queue = multiprocessing.Queue()
+        self.listener_message_queue = multiprocessing.Manager().Queue()
 
     def start_listener(
             self, sender_message_queue=None, listener_message_queue=None,
@@ -339,7 +339,7 @@ def start_listener(
 
         if listener_message_queue is None:
             if self.listener_message_queue is None:
-                self.listener_message_queue = multiprocessing.Queue()
+                self.listener_message_queue = multiprocessing.Manager().Queue()
         else:
             self.listener_message_queue = listener_message_queue
         self.listener_message_event = multiprocessing.Event()
diff --git a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py
index ffaee555af..30df7f1905 100755
--- a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py
+++ b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py
@@ -75,6 +75,7 @@ def __init__(self, args, edge_id=0, request_json=None, agent_config=None, run_id
         self.user_name = None
         self.general_edge_id = None
         self.message_center = None
+        self.status_center = None
         self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES = {
             "${FEDSYS.RUN_ID}": "",
             "${FEDSYS.PRIVATE_LOCAL_DATA}": "",
@@ -210,7 +211,7 @@ def retrieve_and_unzip_package(self, package_name, package_url):
         # Open a process to download the package so that we can avoid the request is blocked and check the timeout.
         from multiprocessing import Process
         completed_event = multiprocessing.Event()
-        info_queue = multiprocessing.Queue()
+        info_queue = multiprocessing.Manager().Queue()
         if platform.system() == "Windows":
             download_process = multiprocessing.Process(
                 target=self.download_package_proc,
@@ -648,8 +649,8 @@ def rebuild_message_status_center(self, sender_message_queue, listener_message_q
         self.mlops_metrics.set_messenger(self.message_center)
         self.mlops_metrics.run_id = self.run_id
 
-        status_center = FedMLStatusCenter.rebuild_status_center_from_queue(status_queue)
+        self.status_center = FedMLStatusCenter.rebuild_status_center_from_queue(status_queue)
         if self.status_reporter is None:
             self.status_reporter = MLOpsMetrics()
-        self.status_reporter.set_messenger(status_center)
+        self.status_reporter.set_messenger(self.status_center)
         self.status_reporter.run_id = self.run_id
diff --git a/python/fedml/computing/scheduler/scheduler_core/status_center.py b/python/fedml/computing/scheduler/scheduler_core/status_center.py
index b2949af291..76ba9857c6 100755
--- a/python/fedml/computing/scheduler/scheduler_core/status_center.py
+++ b/python/fedml/computing/scheduler/scheduler_core/status_center.py
@@ -111,7 +111,7 @@ def start_status_center(self, sender_message_center_queue=None,
                             listener_message_center_queue=None,
                             sender_message_event=None,
                             is_slave_agent=False):
-        self.status_queue = multiprocessing.Queue()
+        self.status_queue = multiprocessing.Manager().Queue()
         self.status_event = multiprocessing.Event()
         self.status_event.clear()
         self.status_sender_message_center_queue = sender_message_center_queue

From 89219fb3c20972ff94badc76f8e90d71592e5647 Mon Sep 17 00:00:00 2001
From: alaydshah <alay11shah@gmail.com>
Date: Tue, 18 Jun 2024 07:00:43 +0000
Subject: [PATCH 163/282] Workaround device mapping inconsistency

---
 .../comm_utils/gpu_utils/gpu_utils.py         |  1 +
 .../comm_utils/gpu_utils/qualcomm_utils.py    | 36 +++++++++++++------
 .../scheduler/comm_utils/hardware_utils.py    |  2 ++
 .../scheduler_core/account_manager.py         |  2 +-
 4 files changed, 29 insertions(+), 12 deletions(-)

diff --git a/python/fedml/computing/scheduler/comm_utils/gpu_utils/gpu_utils.py b/python/fedml/computing/scheduler/comm_utils/gpu_utils/gpu_utils.py
index bc7a3b8216..b48a3e85b7 100644
--- a/python/fedml/computing/scheduler/comm_utils/gpu_utils/gpu_utils.py
+++ b/python/fedml/computing/scheduler/comm_utils/gpu_utils/gpu_utils.py
@@ -27,6 +27,7 @@ class GPUCard:
     memoryUsed: float
     memoryUtil: float
     load: Optional[float] = 0.0
+    device_path: Optional[str] = ""
     uuid: Optional[str] = ""
     display_mode: Optional[str] = ""
     display_active: Optional[str] = ""
diff --git a/python/fedml/computing/scheduler/comm_utils/gpu_utils/qualcomm_utils.py b/python/fedml/computing/scheduler/comm_utils/gpu_utils/qualcomm_utils.py
index 88114cf2ad..13131e362d 100644
--- a/python/fedml/computing/scheduler/comm_utils/gpu_utils/qualcomm_utils.py
+++ b/python/fedml/computing/scheduler/comm_utils/gpu_utils/qualcomm_utils.py
@@ -26,19 +26,22 @@ def detect_gpu_card_type(cls) -> Optional[GPUCardType]:
 
     @staticmethod
     def get_gpu_cards() -> List[GPUCard]:
-        from qaicrt import Util, QIDList, QDevInfo, QStatus
+        return list(QualcommNPUtil.__get_gpu_cards().values())
 
-        cards = []
+    @staticmethod
+    def __get_gpu_cards() -> Dict[int, GPUCard]:
+        from qaicrt import Util, QIDList, QDevInfo, QStatus
+        cards = dict()
         util = Util()
         status, card_list = util.getDeviceIds()
         if status.value == 0:
             for card in card_list:
                 status, card_info = util.getDeviceInfo(card)
                 if status.value == 0 and card_info.devStatus.value == 1:
-                    cards.append(QualcommNPUtil.__convert(card_info))
-
+                    gpu_card = QualcommNPUtil.__convert(card_info)
+                    cards[gpu_card.id] = gpu_card
         else:
-            logging.error("Qualcomm Card Status not Healthy")
+            logging.error("Qualcomm Cards Status not Healthy")
         return cards
 
     @staticmethod
@@ -58,11 +61,21 @@ def get_available_gpu_card_ids(order: str, limit: int, max_load: float, max_memo
 
     @staticmethod
     def get_docker_gpu_device_mapping(gpu_ids: Optional[List[int]], num_gpus: int = 0) -> Optional[Dict]:
-        if gpu_ids is not None and len(gpu_ids):
-            return {
-                "devices": [f"{QualcommNPUtil.NPU_CARD_PATH}{gpu_id}:{QualcommNPUtil.NPU_CARD_PATH}{gpu_id}" for gpu_id
-                            in gpu_ids]}
-        return None
+        if gpu_ids is None or not len(gpu_ids):
+            return None
+
+        devices = []
+        gpu_cards = QualcommNPUtil.__get_gpu_cards()
+
+        for gpu_id in gpu_ids:
+            if not (gpu_id in gpu_cards and gpu_cards[gpu_id].device_path):
+                logging.error("Failed to get gpu device mapping for docker")
+                break
+            else:
+                device_path = gpu_cards[gpu_id].device_path
+                devices.append(f"{device_path}:{device_path}")
+
+        return {"devices": devices} if len(devices) == len(gpu_ids) else None
 
     @staticmethod
     def get_docker_gpu_ids_by_container_name(container_name: str, docker_client: DockerClient) -> List[int]:
@@ -87,7 +100,8 @@ def __convert(npu) -> GPUCard:
         load = (nsp_total - nsp_free) / nsp_total
 
         return GPUCard(
-            id=npu.qid,
+            id=npu.mhiId,
+            device_path=npu.name,
             name=npu.pciInfo.devicename,
             driver=npu.devData.fwQCImageVersionString,
             serial=npu.devData.serial,
diff --git a/python/fedml/computing/scheduler/comm_utils/hardware_utils.py b/python/fedml/computing/scheduler/comm_utils/hardware_utils.py
index e73809955e..c876948145 100644
--- a/python/fedml/computing/scheduler/comm_utils/hardware_utils.py
+++ b/python/fedml/computing/scheduler/comm_utils/hardware_utils.py
@@ -60,5 +60,7 @@ def get_docker_gpu_ids_by_container_name(container_name: str, docker_client: Doc
 if __name__ == "__main__":
     gpus = HardwareUtil.get_gpus()
     get_available_gpu_cards = HardwareUtil.get_available_gpu_ids(limit=len(gpus))
+    device_mapping = HardwareUtil.get_docker_gpu_device_mapping(get_available_gpu_cards, len(get_available_gpu_cards))
     print(gpus)
     print(get_available_gpu_cards)
+    print(device_mapping)
diff --git a/python/fedml/computing/scheduler/scheduler_core/account_manager.py b/python/fedml/computing/scheduler/scheduler_core/account_manager.py
index 3491e102f6..3b80511d12 100755
--- a/python/fedml/computing/scheduler/scheduler_core/account_manager.py
+++ b/python/fedml/computing/scheduler/scheduler_core/account_manager.py
@@ -266,7 +266,7 @@ def get_uuid():
                     if not use_machine_id:
                         device_id = hex(uuid.getnode())
                     else:
-                        device_id = device_id = FedMLAccountManager.get_gpu_machine_id()
+                        device_id = FedMLAccountManager.get_gpu_machine_id()
             else:
                 device_id = sys_utils.run_subprocess_open(
                     "hal-get-property --udi /org/freedesktop/Hal/devices/computer --key system.hardware.uuid".split()

From 1d5a05db71ba3943cb42eea0836fabc181af7ac6 Mon Sep 17 00:00:00 2001
From: Raphael Jin <kimheavy@yahoo.com>
Date: Tue, 18 Jun 2024 18:23:40 -0700
Subject: [PATCH 164/282] [Deploy][Autoscale] Bug fix: continue the for loop if
 no scale op.

---
 python/fedml/computing/scheduler/comm_utils/job_monitor.py | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/python/fedml/computing/scheduler/comm_utils/job_monitor.py b/python/fedml/computing/scheduler/comm_utils/job_monitor.py
index a7d5214a02..97a4cb6ebc 100644
--- a/python/fedml/computing/scheduler/comm_utils/job_monitor.py
+++ b/python/fedml/computing/scheduler/comm_utils/job_monitor.py
@@ -148,7 +148,7 @@ def autoscaler_reconcile_after_interval(self):
                     if current_replicas == new_replicas:
                         # Basically the autoscaler decided that no scaling operation should take place.
                         logging.info(f"No scaling operation for endpoint {e_id}.")
-                        return
+                        continue
 
                     # Should scale in / out
                     curr_version = fedml.get_env_version()
@@ -159,7 +159,7 @@ def autoscaler_reconcile_after_interval(self):
                         mlops_prefix = "https://open-test.fedml.ai/"
                     else:
                         logging.error(f"Do not support the version {curr_version}.")
-                        return
+                        continue
                     autoscale_url_path = "fedmlModelServer/api/v1/endpoint/auto-scale"
                     url = f"{mlops_prefix}{autoscale_url_path}"
 
@@ -167,7 +167,7 @@ def autoscaler_reconcile_after_interval(self):
                     cached_token = fedml_model_cache.get_end_point_token(e_id, e_name, model_name)
                     if cached_token is None:
                         logging.error(f"Failed to get the cached token for endpoint {e_id}.")
-                        return
+                        continue
 
                     req_header = {
                         "Authorization": f"Bearer {cached_token}"

From 31c57e01d426a82127fd3cff2ae45ee36f6bbe14 Mon Sep 17 00:00:00 2001
From: fedml-dimitris <dimitris@fedml.ai>
Date: Tue, 18 Jun 2024 21:32:41 -0400
Subject: [PATCH 165/282] Polishing the autoscaler real test.

---
 .../test/scaling_algorithm_real_test.py       | 64 +++++--------------
 1 file changed, 15 insertions(+), 49 deletions(-)

diff --git a/python/fedml/computing/scheduler/model_scheduler/autoscaler/test/scaling_algorithm_real_test.py b/python/fedml/computing/scheduler/model_scheduler/autoscaler/test/scaling_algorithm_real_test.py
index 34721d9002..0fae77c3f3 100644
--- a/python/fedml/computing/scheduler/model_scheduler/autoscaler/test/scaling_algorithm_real_test.py
+++ b/python/fedml/computing/scheduler/model_scheduler/autoscaler/test/scaling_algorithm_real_test.py
@@ -2,9 +2,10 @@
 import logging
 
 from collections import namedtuple
-from fedml.computing.scheduler.model_scheduler.autoscaler.autoscaler import Autoscaler, ReactivePolicy
+from fedml.computing.scheduler.model_scheduler.autoscaler.autoscaler import Autoscaler
 from fedml.core.mlops.mlops_runtime_log import MLOpsRuntimeLog
 from fedml.computing.scheduler.model_scheduler.device_model_cache import FedMLModelCache
+from fedml.computing.scheduler.model_scheduler.autoscaler.policies import ConcurrentQueryPolicy
 
 
 if __name__ == "__main__":
@@ -18,9 +19,6 @@
     parser.add_argument('--redis_addr', default="local")
     parser.add_argument('--redis_port', default=6379)
     parser.add_argument('--redis_password', default="fedml_default")
-    parser.add_argument('--metric',
-                        default="latency",
-                        help="Either latency or qps")
     args = parser.parse_args()
 
     fedml_model_cache = FedMLModelCache.get_instance()
@@ -32,50 +30,18 @@
     # Init the autoscaler
     autoscaler = Autoscaler(args.redis_addr, args.redis_port, args.redis_password)
 
-    latency_reactive_policy_default = {
-        "metric": "latency",
-        "ewm_mins": 15,
-        "ewm_alpha": 0.5,
-        "ub_threshold": 0.5,
-        "lb_threshold": 0.99,
-        "triggering_value": 1.6561916828471053
+    autoscaling_policy_config = {
+            "current_replicas": 1,
+            "min_replicas": 1,
+            "max_replicas": 3,
+            "queries_per_replica": 2,
+            "window_size_secs": 60,
+            "scaledown_delay_secs": 120,
     }
-    qps_reactive_policy_default = {
-        "metric": "qps",
-        "ewm_mins": 15,
-        "ewm_alpha": 0.5,
-        "ub_threshold": 2,
-        "lb_threshold": 0.5
-    }
-    policy_config = latency_reactive_policy_default \
-        if args.metric == "latency" else qps_reactive_policy_default
-    autoscaling_policy = ReactivePolicy(**policy_config)
-
-    for endpoint_settings in endpoints_settings_list:
-        endpoint_state = endpoint_settings["state"]
-        if endpoint_state == "DEPLOYED" and endpoint_settings["enable_auto_scaling"]:
-
-            e_id, e_name, model_name = \
-                endpoint_settings["endpoint_id"], \
-                endpoint_settings["endpoint_name"], \
-                endpoint_settings["model_name"]
-            logging.info(f"Querying the autoscaler for endpoint {e_id} with user settings {endpoint_settings}.")
-
-            # For every endpoint we just update the policy configuration.
-            autoscaling_policy.min_replicas = endpoint_settings["scale_min"]
-            autoscaling_policy.max_replicas = endpoint_settings["scale_max"]
-            # We retrieve a list of replicas for every endpoint. The number
-            # of running replicas is the length of that list.
-            current_replicas = len(fedml_model_cache.get_endpoint_replicas_results(e_id))
-            autoscaling_policy.current_replicas = current_replicas
-            logging.info(f"Endpoint {e_id} autoscaling policy: {autoscaling_policy}.")
-
-            scale_op = autoscaler.scale_operation_endpoint(
-                autoscaling_policy,
-                str(e_id))
-
-            new_replicas = current_replicas + scale_op.value
+    autoscaling_policy = ConcurrentQueryPolicy(**autoscaling_policy_config)
 
-            logging.info(f"Scaling operation {scale_op.value} for endpoint {e_id} .")
-            logging.info(f"New Replicas {new_replicas} for endpoint {e_id} .")
-            logging.info(f"Current Replicas {current_replicas} for endpoint {e_id} .")
+    e_id = 1821952311
+    scale_op = autoscaler.scale_operation_endpoint(
+        autoscaling_policy,
+        str(e_id))
+    logging.info(f"Scaling operation {scale_op.value} for endpoint {e_id} .")

From 4cb53fe55f5a4e748af0daaf27eb53773cdde2d6 Mon Sep 17 00:00:00 2001
From: fedml-dimitris <dimitris@fedml.ai>
Date: Tue, 18 Jun 2024 21:37:12 -0400
Subject: [PATCH 166/282] Replacing e_id.

---
 .../autoscaler/test/scaling_algorithm_real_test.py             | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/python/fedml/computing/scheduler/model_scheduler/autoscaler/test/scaling_algorithm_real_test.py b/python/fedml/computing/scheduler/model_scheduler/autoscaler/test/scaling_algorithm_real_test.py
index 0fae77c3f3..78a1231abf 100644
--- a/python/fedml/computing/scheduler/model_scheduler/autoscaler/test/scaling_algorithm_real_test.py
+++ b/python/fedml/computing/scheduler/model_scheduler/autoscaler/test/scaling_algorithm_real_test.py
@@ -40,7 +40,8 @@
     }
     autoscaling_policy = ConcurrentQueryPolicy(**autoscaling_policy_config)
 
-    e_id = 1821952311
+    # Please replace the `e_id` below with a proper e_id value.
+    e_id = 1111
     scale_op = autoscaler.scale_operation_endpoint(
         autoscaling_policy,
         str(e_id))

From 1422fa10d649d55e484ad5844ed9006fcd8bb38e Mon Sep 17 00:00:00 2001
From: Alex <alex.gpt.llm@gmail.com>
Date: Wed, 19 Jun 2024 15:24:03 +0800
Subject: [PATCH 167/282] [CoreEngine] check the nil pointer and update the
 numpy version.

---
 .../status_manager_protocols.py               | 21 ++++++++++---------
 python/setup.py                               |  6 +++---
 2 files changed, 14 insertions(+), 13 deletions(-)

diff --git a/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py b/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py
index fec19bed70..ac3d8c3cb3 100755
--- a/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py
+++ b/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py
@@ -190,16 +190,17 @@ def process_job_status_consensus(self, run_id, master_id, status):
         status = self.get_entire_job_status()
 
         # Set the device status based on the job status
-        for edge_id_item, edge_status_item in self.edge_status_dict.items():
-            if edge_id_item == "server":
-                continue
-
-            # Calc the device status based on the job status
-            consensus_device_status = FedMLStatusManager.get_device_consensus_status_in_job(
-                status, edge_status_item)
-            if consensus_device_status is not None:
-                self.message_reporter.report_client_training_status(
-                    edge_id_item, consensus_device_status, run_id=run_id, update_db=False)
+        if self.edge_status_dict is not None:
+            for edge_id_item, edge_status_item in self.edge_status_dict.items():
+                if edge_id_item == "server":
+                    continue
+
+                # Calc the device status based on the job status
+                consensus_device_status = FedMLStatusManager.get_device_consensus_status_in_job(
+                    status, edge_status_item)
+                if consensus_device_status is not None:
+                    self.message_reporter.report_client_training_status(
+                        edge_id_item, consensus_device_status, run_id=run_id, update_db=False)
 
         # Save the job status to local storage
         FedMLServerDataInterface.get_instance().save_job_status(run_id, master_id, status, status)
diff --git a/python/setup.py b/python/setup.py
index 9651465d32..f00c0b4335 100644
--- a/python/setup.py
+++ b/python/setup.py
@@ -20,7 +20,7 @@ def finalize_options(self):
 
 requirements = [
     'GPUtil',
-    'PyYAML',
+    'PyYAML==5.3.1',
     'aiohttp>=3.8.1',
     'attrdict',
     'attrs',
@@ -40,7 +40,7 @@ def finalize_options(self):
     'multiprocess',
     'networkx<3.0',
     'ntplib',
-    'numpy>=1.21',
+    'numpy>=1.21,<2.0',
     'onnx',
     'paho-mqtt<2.0.0',
     'pandas',
@@ -126,7 +126,7 @@ def finalize_options(self):
 
 setup(
     name="fedml",
-    version="0.9.0",
+    version="0.8.31b23",
     author="FedML Team",
     author_email="ch@fedml.ai",
     description="A research and production integrated edge-cloud library for "

From 158eb9c1d281e520f364cdec0602b4b9b417836b Mon Sep 17 00:00:00 2001
From: Alex <alex.gpt.llm@gmail.com>
Date: Wed, 19 Jun 2024 15:41:32 +0800
Subject: [PATCH 168/282] [CoreEngine] remove the deprecated action runners.

---
 .../build_wheels_and_releases.yml-backup      |    0
 .../{ => deprecated}/full_e2e_test.yml-bakcup |    0
 .github/workflows/{ => deprecated}/runner.md  |    0
 ...oke_test_cross_device_mnn_server_linux.yml |    0
 ...ke_test_cross_silo_fedavg_attack_linux.yml |    0
 ...smoke_test_cross_silo_fedavg_cdp_linux.yml |    0
 ...e_test_cross_silo_fedavg_defense_linux.yml |    0
 ...smoke_test_cross_silo_fedavg_ldp_linux.yml |    0
 .../smoke_test_cross_silo_ho_linux.yml        |    0
 .../smoke_test_cross_silo_ho_win.yml          |    0
 ...moke_test_cross_silo_lightsecagg_linux.yml |    0
 .../smoke_test_cross_silo_lightsecagg_win.yml |    0
 .../smoke_test_flow_linux.yml                 |    0
 .../smoke_test_ml_engines_linux_jax.yml       |    0
 .../smoke_test_ml_engines_linux_mxnet.yml     |    0
 .../smoke_test_ml_engines_linux_tf.yml        |    0
 .../smoke_test_ml_engines_win.yml             |    0
 .../smoke_test_pip_cli_sp_linux.yml           |    0
 .../smoke_test_pip_cli_sp_win.yml             |    0
 .../{ => deprecated}/smoke_test_security.yml  |    0
 .../smoke_test_simulation_mpi_linux.yml       |    0
 .../federate/quick_start/octopus/dump.rdb     |  Bin 0 -> 7248 bytes
 .../federate/quick_start/octopus/nohup.out    | 3032 +++++++++++++++++
 23 files changed, 3032 insertions(+)
 rename .github/workflows/{ => deprecated}/build_wheels_and_releases.yml-backup (100%)
 rename .github/workflows/{ => deprecated}/full_e2e_test.yml-bakcup (100%)
 rename .github/workflows/{ => deprecated}/runner.md (100%)
 rename .github/workflows/{ => deprecated}/smoke_test_cross_device_mnn_server_linux.yml (100%)
 rename .github/workflows/{ => deprecated}/smoke_test_cross_silo_fedavg_attack_linux.yml (100%)
 rename .github/workflows/{ => deprecated}/smoke_test_cross_silo_fedavg_cdp_linux.yml (100%)
 rename .github/workflows/{ => deprecated}/smoke_test_cross_silo_fedavg_defense_linux.yml (100%)
 rename .github/workflows/{ => deprecated}/smoke_test_cross_silo_fedavg_ldp_linux.yml (100%)
 rename .github/workflows/{ => deprecated}/smoke_test_cross_silo_ho_linux.yml (100%)
 rename .github/workflows/{ => deprecated}/smoke_test_cross_silo_ho_win.yml (100%)
 rename .github/workflows/{ => deprecated}/smoke_test_cross_silo_lightsecagg_linux.yml (100%)
 rename .github/workflows/{ => deprecated}/smoke_test_cross_silo_lightsecagg_win.yml (100%)
 rename .github/workflows/{ => deprecated}/smoke_test_flow_linux.yml (100%)
 rename .github/workflows/{ => deprecated}/smoke_test_ml_engines_linux_jax.yml (100%)
 rename .github/workflows/{ => deprecated}/smoke_test_ml_engines_linux_mxnet.yml (100%)
 rename .github/workflows/{ => deprecated}/smoke_test_ml_engines_linux_tf.yml (100%)
 rename .github/workflows/{ => deprecated}/smoke_test_ml_engines_win.yml (100%)
 rename .github/workflows/{ => deprecated}/smoke_test_pip_cli_sp_linux.yml (100%)
 rename .github/workflows/{ => deprecated}/smoke_test_pip_cli_sp_win.yml (100%)
 rename .github/workflows/{ => deprecated}/smoke_test_security.yml (100%)
 rename .github/workflows/{ => deprecated}/smoke_test_simulation_mpi_linux.yml (100%)
 create mode 100644 python/examples/federate/quick_start/octopus/dump.rdb
 create mode 100644 python/examples/federate/quick_start/octopus/nohup.out

diff --git a/.github/workflows/build_wheels_and_releases.yml-backup b/.github/workflows/deprecated/build_wheels_and_releases.yml-backup
similarity index 100%
rename from .github/workflows/build_wheels_and_releases.yml-backup
rename to .github/workflows/deprecated/build_wheels_and_releases.yml-backup
diff --git a/.github/workflows/full_e2e_test.yml-bakcup b/.github/workflows/deprecated/full_e2e_test.yml-bakcup
similarity index 100%
rename from .github/workflows/full_e2e_test.yml-bakcup
rename to .github/workflows/deprecated/full_e2e_test.yml-bakcup
diff --git a/.github/workflows/runner.md b/.github/workflows/deprecated/runner.md
similarity index 100%
rename from .github/workflows/runner.md
rename to .github/workflows/deprecated/runner.md
diff --git a/.github/workflows/smoke_test_cross_device_mnn_server_linux.yml b/.github/workflows/deprecated/smoke_test_cross_device_mnn_server_linux.yml
similarity index 100%
rename from .github/workflows/smoke_test_cross_device_mnn_server_linux.yml
rename to .github/workflows/deprecated/smoke_test_cross_device_mnn_server_linux.yml
diff --git a/.github/workflows/smoke_test_cross_silo_fedavg_attack_linux.yml b/.github/workflows/deprecated/smoke_test_cross_silo_fedavg_attack_linux.yml
similarity index 100%
rename from .github/workflows/smoke_test_cross_silo_fedavg_attack_linux.yml
rename to .github/workflows/deprecated/smoke_test_cross_silo_fedavg_attack_linux.yml
diff --git a/.github/workflows/smoke_test_cross_silo_fedavg_cdp_linux.yml b/.github/workflows/deprecated/smoke_test_cross_silo_fedavg_cdp_linux.yml
similarity index 100%
rename from .github/workflows/smoke_test_cross_silo_fedavg_cdp_linux.yml
rename to .github/workflows/deprecated/smoke_test_cross_silo_fedavg_cdp_linux.yml
diff --git a/.github/workflows/smoke_test_cross_silo_fedavg_defense_linux.yml b/.github/workflows/deprecated/smoke_test_cross_silo_fedavg_defense_linux.yml
similarity index 100%
rename from .github/workflows/smoke_test_cross_silo_fedavg_defense_linux.yml
rename to .github/workflows/deprecated/smoke_test_cross_silo_fedavg_defense_linux.yml
diff --git a/.github/workflows/smoke_test_cross_silo_fedavg_ldp_linux.yml b/.github/workflows/deprecated/smoke_test_cross_silo_fedavg_ldp_linux.yml
similarity index 100%
rename from .github/workflows/smoke_test_cross_silo_fedavg_ldp_linux.yml
rename to .github/workflows/deprecated/smoke_test_cross_silo_fedavg_ldp_linux.yml
diff --git a/.github/workflows/smoke_test_cross_silo_ho_linux.yml b/.github/workflows/deprecated/smoke_test_cross_silo_ho_linux.yml
similarity index 100%
rename from .github/workflows/smoke_test_cross_silo_ho_linux.yml
rename to .github/workflows/deprecated/smoke_test_cross_silo_ho_linux.yml
diff --git a/.github/workflows/smoke_test_cross_silo_ho_win.yml b/.github/workflows/deprecated/smoke_test_cross_silo_ho_win.yml
similarity index 100%
rename from .github/workflows/smoke_test_cross_silo_ho_win.yml
rename to .github/workflows/deprecated/smoke_test_cross_silo_ho_win.yml
diff --git a/.github/workflows/smoke_test_cross_silo_lightsecagg_linux.yml b/.github/workflows/deprecated/smoke_test_cross_silo_lightsecagg_linux.yml
similarity index 100%
rename from .github/workflows/smoke_test_cross_silo_lightsecagg_linux.yml
rename to .github/workflows/deprecated/smoke_test_cross_silo_lightsecagg_linux.yml
diff --git a/.github/workflows/smoke_test_cross_silo_lightsecagg_win.yml b/.github/workflows/deprecated/smoke_test_cross_silo_lightsecagg_win.yml
similarity index 100%
rename from .github/workflows/smoke_test_cross_silo_lightsecagg_win.yml
rename to .github/workflows/deprecated/smoke_test_cross_silo_lightsecagg_win.yml
diff --git a/.github/workflows/smoke_test_flow_linux.yml b/.github/workflows/deprecated/smoke_test_flow_linux.yml
similarity index 100%
rename from .github/workflows/smoke_test_flow_linux.yml
rename to .github/workflows/deprecated/smoke_test_flow_linux.yml
diff --git a/.github/workflows/smoke_test_ml_engines_linux_jax.yml b/.github/workflows/deprecated/smoke_test_ml_engines_linux_jax.yml
similarity index 100%
rename from .github/workflows/smoke_test_ml_engines_linux_jax.yml
rename to .github/workflows/deprecated/smoke_test_ml_engines_linux_jax.yml
diff --git a/.github/workflows/smoke_test_ml_engines_linux_mxnet.yml b/.github/workflows/deprecated/smoke_test_ml_engines_linux_mxnet.yml
similarity index 100%
rename from .github/workflows/smoke_test_ml_engines_linux_mxnet.yml
rename to .github/workflows/deprecated/smoke_test_ml_engines_linux_mxnet.yml
diff --git a/.github/workflows/smoke_test_ml_engines_linux_tf.yml b/.github/workflows/deprecated/smoke_test_ml_engines_linux_tf.yml
similarity index 100%
rename from .github/workflows/smoke_test_ml_engines_linux_tf.yml
rename to .github/workflows/deprecated/smoke_test_ml_engines_linux_tf.yml
diff --git a/.github/workflows/smoke_test_ml_engines_win.yml b/.github/workflows/deprecated/smoke_test_ml_engines_win.yml
similarity index 100%
rename from .github/workflows/smoke_test_ml_engines_win.yml
rename to .github/workflows/deprecated/smoke_test_ml_engines_win.yml
diff --git a/.github/workflows/smoke_test_pip_cli_sp_linux.yml b/.github/workflows/deprecated/smoke_test_pip_cli_sp_linux.yml
similarity index 100%
rename from .github/workflows/smoke_test_pip_cli_sp_linux.yml
rename to .github/workflows/deprecated/smoke_test_pip_cli_sp_linux.yml
diff --git a/.github/workflows/smoke_test_pip_cli_sp_win.yml b/.github/workflows/deprecated/smoke_test_pip_cli_sp_win.yml
similarity index 100%
rename from .github/workflows/smoke_test_pip_cli_sp_win.yml
rename to .github/workflows/deprecated/smoke_test_pip_cli_sp_win.yml
diff --git a/.github/workflows/smoke_test_security.yml b/.github/workflows/deprecated/smoke_test_security.yml
similarity index 100%
rename from .github/workflows/smoke_test_security.yml
rename to .github/workflows/deprecated/smoke_test_security.yml
diff --git a/.github/workflows/smoke_test_simulation_mpi_linux.yml b/.github/workflows/deprecated/smoke_test_simulation_mpi_linux.yml
similarity index 100%
rename from .github/workflows/smoke_test_simulation_mpi_linux.yml
rename to .github/workflows/deprecated/smoke_test_simulation_mpi_linux.yml
diff --git a/python/examples/federate/quick_start/octopus/dump.rdb b/python/examples/federate/quick_start/octopus/dump.rdb
new file mode 100644
index 0000000000000000000000000000000000000000..f31af5cb4fd49f4a377eb444135610e4915c5730
GIT binary patch
literal 7248
zcmdT}TWlj&89p)b=GL&g*`)1mc8NDaw4#o4&fF)clAYK|ChN;~<7B%lRnFL+B$N17
zdz@s`Qf*Pi7Ag=(T(mDJf(k)ffshbX@DjG-0fEFtT*Lz+T0yxMRDo!fj<%ff%-He7
z<H@8Ch=*L1jL-Qm-~WH#|Id|3SeS<p!Tp}Hl$9%E8&dfM8-M|X_Ivfeq-CYD8{GG1
z6ge;5`RPn<dB3|_k+NfXDSzkYCx^j)w^&>rON$k07u*By1Z7?wt`435Oa}nq?6Q>2
z=f?7tm6ViSkx~^!RHQKqv23Rhjz$1C9SO&xshMbEG89ck;^E5)A)ZWKS%{~Ea4HtM
zoJxjf#t;V)0|^e%EWsi~5G>ajFBT;AL!o=a?eBrLdsN~1KevC7LEZDo(3PYRpK)On
z%~K45kfj)!U`eKC6nI$oX)ZCD!bc_-=IcJtJk3Ln=6C}80^x~ELNpo)t9Qx2I~YfF
z;fTbsZR(G{I@m6<1jCWK$J@cqb`RndApODLo3}sI$W^&oFjA$3-eR_C3z%q1UO2KS
zOhr=3L=tB*z7R{zTsB?DSz{qZvLr>)_y?q!-EP3^pN@oQBDGv6!jWjoIOI_7Axl#%
zPw+U(h<d-4dw>0#8a0zkH9TmbEhQIH*q<7RuqTM8Sc>Fm62k}s@nGJAms&ZbVG+B6
zZ8Q>j*qf9NP>cYMV>zCq34#IYtK+@xGk}P$#PoDjh)2Ll{pMt3KAE}{xi;Ua{ignK
z-Z<f$#W4{V<3_EH2)&Nn1(T!+hJ+N0&4t!r{&ny_4q#-Cab;6zE}+99^lQk~z7g(#
zfheA1NSbFTis3E3xGH3h;&Ere*yL!a7qU9{rN2znx=ZkD(v7**^h`!h$aBKNwt(XD
z93Q}cEM1=xnAqB56(K*pc8%asy6~9xQ2zSWD@u9_;SViRg1jlGXZV76m0l39qMWc+
zlw--vRw5iD65$0ZzP&=k=LLMmB9+n3tNm}CjBUrLI0*yQ*uqu}UFD*)nd!&NnJ7st
zO0!$cYJM~M<dl-%ARyYjCNFPaPprQ-nVsAndF2h@t(3uNrkD@ZkP{H)K(?3{<w9Vp
zS}s2*6_ijWQ>+%0dtaV?eem|bp74PFDCiPz4vr{bAi5@Qh~qi2urfZcl;y(8N21__
zSduSEH=YEZXFx}=zjH%W#PUu*=<=<3N@8L6e1})^t%Kaoa9^LaRgy7zBr##^40QD@
z%jJp^7x%y^uSd=*;LM(<dO|2D(u!2}ZFstK;$G*6y*;_&id@+9M0@*6tGK}9)qGmo
z{j}Gs6xXG~POeL1opSO<Y3XUNXRD-uUU6__Ow37J3h*HkF=?L6a70$5U`FC0LZnDY
z16f2Y!!)84@aU?dlqwVB<4f!VfeIO@R>n4^iZX@*VqV-X7R1d;;G*Y&@lv_CEa#+D
zNzN$MvP1?-g%t&OmssD{searwIi4j+T;>d|fX?8@E`A-H4i!K^``UT?{(-wYFTVBc
zEzScv1mN8eb5&`p3{LHJJaqG)r#}A5=P&eqZtn#T09z%@jt}~B9Xa{pcXrm_hg%1|
zwC;O)s5R7lgJ)VnE%?DM6KOtb#0hELb*TBKJQ!-h#M1DH6U>HM;;f|pw(Cv3PuTV*
zhSCWJJ@;>ijza4l<>+F7=2@B~C{AnVh&9UfyeT>yX@G%E{ivo#x~GM>Fh7gw_!GOT
zgT1`&SpO|URM>|==U5Po+A)h0QMRI%-46AwUm7yN_G(jM1*i3^4Q4eIi4ad|`ot7h
z7KR^d(Vz{n;-))WvUL3JY{{3i%xY3)re0A$<sE2G$gB=T_4~J+9@IHd1tvk;>Rg?l
z)tr+SV9194bV24c&oeB6NRGgeMu*7!)r)^~DpJG1c%AEB{(s@Ric)?p^o7v(KJe72
zsFT^~#HcQmG*%e(kLr}=8yc~r2a_FwkLY~nd!(z*62gcbOJG{9vBXI$P55Sg=QJ`5
zR+mD))4d01(3iTei8_22P8h7XG`#cO9yvQYK~Vlt%!J^qCnt+bPjnh|DAcH7SLm$K
zpidlZnV$b0KiuDvA4WgwPJHnMq8Sz|FB%%7-H*+RvF~t3b9GfNc=l@CWbH^UPGTTM
zaS+!Y#}EX356Y8)@Sq%b>TQ-*SZ6K3HZhDgw6vPUYJ+qW`C&47_FI3_yM!ZgTIOAC
zeuDM(I%SSw?FPw%<5#`osZW^}wE1Z3o_^-tH=H`0F%ZAFdY|6??Ez&xdFR&aPG=fx
zg>%t&^e655Kf@hkSZZ2ySTTBZk;GtlQV6mLYfAz`9a-cdnAAt^#-73gvNJ8@fIA(~
zG+ezGk0?C2vo!XGV<B7fNbUQ@X$Mv}bm%{RXDp>gEYq~UltLs)@&w6I(45NEZ~pvk
zCm^`qW8qPi;aNOuB3$>h(d?dk*^qNSi)qH7->^+ro&~cx9-(5JuG1>Ep)}NMFF9?K
z40-Tv6X>?ZV<25UYpQVDQnsM0-on&htr!(*S52%@pg;LaTeB%h8!7KW=i6ZeX=S19
zA=TSv0&M)#7880UyQ#mX<%C{;k2ckxQG21qkPr<`Mk7v(rjy7|_(?zIr~Qnd^+UC2
z&Q>;d<8&WmxN2IG=~XuH17j$%zsQ)1UT_FyJ?L&)2&UVEm2YVw>W(>#+9Ai7MceOY
zTXK^dD=q6qi<PbvQIo9Kq2mU5P2xn);BU-bxorkny%=2BiN(vo#jI|p3l(M26UPr2
zemejug5X(-rWi_R2A7#TxS6*YqUMH-rKoM}nB#0kn{2k;`B{fLs&_dT`LfLUIxy0H
zW6NyYu`Qg{&~49{y3Mxk8uIXu=6i-Ca29<q85(CbUVwbjAuRPbAnwxN#FCa&xH-up
zx-2KDZ~n|s6t)4=1%>+f^TrCo7IAp(ReOcj?sZd>SM{`kBkhDNG~H?^RHyEDzww9L
L55F{c_t*agW;0!L

literal 0
HcmV?d00001

diff --git a/python/examples/federate/quick_start/octopus/nohup.out b/python/examples/federate/quick_start/octopus/nohup.out
new file mode 100644
index 0000000000..2350761958
--- /dev/null
+++ b/python/examples/federate/quick_start/octopus/nohup.out
@@ -0,0 +1,3032 @@
+58896:C 27 May 2024 16:58:15.551 # oO0OoO0OoO0Oo Redis is starting oO0OoO0OoO0Oo
+58896:C 27 May 2024 16:58:15.551 # Redis version=7.0.11, bits=64, commit=00000000, modified=0, pid=58896, just started
+58896:C 27 May 2024 16:58:15.551 # Warning: no config file specified, using the default config. In order to specify a config file use redis-server /path/to/redis.conf
+58896:M 27 May 2024 16:58:15.551 * monotonic clock: POSIX clock_gettime
+58896:M 27 May 2024 16:58:15.552 * Running mode=standalone, port=6379.
+58896:M 27 May 2024 16:58:15.552 # WARNING: The TCP backlog setting of 511 cannot be enforced because kern.ipc.somaxconn is set to the lower value of 128.
+58896:M 27 May 2024 16:58:15.552 # Server initialized
+58896:M 27 May 2024 16:58:15.552 * Ready to accept connections
+58896:M 27 May 2024 16:58:22.918 * DB saved on disk
+58896:M 27 May 2024 17:58:23.012 * 1 changes in 3600 seconds. Saving...
+58896:M 27 May 2024 17:58:23.013 * Background saving started by pid 65644
+65644:C 27 May 2024 17:58:23.020 * DB saved on disk
+65644:C 27 May 2024 17:58:23.021 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 27 May 2024 17:58:23.114 * Background saving terminated with success
+58896:M 27 May 2024 19:13:18.626 * 1 changes in 3600 seconds. Saving...
+58896:M 27 May 2024 19:13:18.736 * Background saving started by pid 72278
+72278:C 27 May 2024 19:13:18.746 * DB saved on disk
+72278:C 27 May 2024 19:13:18.746 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 27 May 2024 19:13:18.838 * Background saving terminated with success
+58896:M 28 May 2024 14:08:25.606 * 1 changes in 3600 seconds. Saving...
+58896:M 28 May 2024 14:08:25.608 * Background saving started by pid 78120
+78120:C 28 May 2024 14:08:25.615 * DB saved on disk
+78120:C 28 May 2024 14:08:25.616 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 28 May 2024 14:08:25.709 * Background saving terminated with success
+58896:M 28 May 2024 15:19:26.423 * 1 changes in 3600 seconds. Saving...
+58896:M 28 May 2024 15:19:26.539 * Background saving started by pid 84225
+84225:C 28 May 2024 15:19:26.545 * DB saved on disk
+84225:C 28 May 2024 15:19:26.546 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 28 May 2024 15:19:26.642 * Background saving terminated with success
+58896:M 29 May 2024 17:57:07.834 * 1 changes in 3600 seconds. Saving...
+58896:M 29 May 2024 17:57:07.835 * Background saving started by pid 4206
+4206:C 29 May 2024 17:57:07.849 * DB saved on disk
+4206:C 29 May 2024 17:57:07.850 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 29 May 2024 17:57:07.935 * Background saving terminated with success
+58896:M 29 May 2024 18:57:08.006 * 1 changes in 3600 seconds. Saving...
+58896:M 29 May 2024 18:57:08.008 * Background saving started by pid 11453
+11453:C 29 May 2024 18:57:08.021 * DB saved on disk
+11453:C 29 May 2024 18:57:08.022 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 29 May 2024 18:57:08.109 * Background saving terminated with success
+58896:M 29 May 2024 19:21:58.973 * 100 changes in 300 seconds. Saving...
+58896:M 29 May 2024 19:21:58.973 * Background saving started by pid 19535
+19535:C 29 May 2024 19:21:58.981 * DB saved on disk
+19535:C 29 May 2024 19:21:58.981 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 29 May 2024 19:21:59.074 * Background saving terminated with success
+58896:M 29 May 2024 19:27:40.433 * DB saved on disk
+58896:M 29 May 2024 20:39:51.202 * 1 changes in 3600 seconds. Saving...
+58896:M 29 May 2024 20:39:51.203 * Background saving started by pid 21314
+21314:C 29 May 2024 20:39:51.314 * DB saved on disk
+21314:C 29 May 2024 20:39:51.315 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 29 May 2024 20:39:51.407 * Background saving terminated with success
+58896:M 29 May 2024 22:49:24.515 * 1 changes in 3600 seconds. Saving...
+58896:M 29 May 2024 22:49:24.516 * Background saving started by pid 25814
+25814:C 29 May 2024 22:49:24.587 * DB saved on disk
+25814:C 29 May 2024 22:49:24.587 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 29 May 2024 22:49:24.617 * Background saving terminated with success
+58896:M 29 May 2024 23:08:52.971 * 100 changes in 300 seconds. Saving...
+58896:M 29 May 2024 23:08:52.972 * Background saving started by pid 28739
+28739:C 29 May 2024 23:08:52.978 * DB saved on disk
+28739:C 29 May 2024 23:08:52.978 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 29 May 2024 23:08:53.073 * Background saving terminated with success
+58896:M 30 May 2024 00:09:08.434 * 1 changes in 3600 seconds. Saving...
+58896:M 30 May 2024 00:09:08.435 * Background saving started by pid 30988
+30988:C 30 May 2024 00:09:08.569 * DB saved on disk
+30988:C 30 May 2024 00:09:08.569 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 30 May 2024 00:09:08.638 * Background saving terminated with success
+58896:M 30 May 2024 12:51:39.719 * 1 changes in 3600 seconds. Saving...
+58896:M 30 May 2024 12:51:39.720 * Background saving started by pid 41021
+41021:C 30 May 2024 12:51:39.729 * DB saved on disk
+41021:C 30 May 2024 12:51:39.729 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 30 May 2024 12:51:39.821 * Background saving terminated with success
+58896:M 30 May 2024 12:56:40.011 * 100 changes in 300 seconds. Saving...
+58896:M 30 May 2024 12:56:40.017 * Background saving started by pid 41850
+41850:C 30 May 2024 12:56:40.033 * DB saved on disk
+41850:C 30 May 2024 12:56:40.034 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 30 May 2024 12:56:40.120 * Background saving terminated with success
+58896:M 30 May 2024 13:16:33.022 * 100 changes in 300 seconds. Saving...
+58896:M 30 May 2024 13:16:33.138 * Background saving started by pid 42823
+42823:C 30 May 2024 13:16:33.145 * DB saved on disk
+42823:C 30 May 2024 13:16:33.145 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 30 May 2024 13:16:33.240 * Background saving terminated with success
+58896:M 30 May 2024 15:25:24.413 * 1 changes in 3600 seconds. Saving...
+58896:M 30 May 2024 15:25:24.413 * Background saving started by pid 48206
+48206:C 30 May 2024 15:25:24.425 * DB saved on disk
+48206:C 30 May 2024 15:25:24.426 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 30 May 2024 15:25:24.513 * Background saving terminated with success
+58896:M 30 May 2024 15:30:25.016 * 100 changes in 300 seconds. Saving...
+58896:M 30 May 2024 15:30:25.017 * Background saving started by pid 48800
+48800:C 30 May 2024 15:30:25.029 * DB saved on disk
+48800:C 30 May 2024 15:30:25.031 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 30 May 2024 15:30:25.119 * Background saving terminated with success
+58896:M 30 May 2024 15:35:26.012 * 100 changes in 300 seconds. Saving...
+58896:M 30 May 2024 15:35:26.014 * Background saving started by pid 49390
+49390:C 30 May 2024 15:35:26.025 * DB saved on disk
+49390:C 30 May 2024 15:35:26.025 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 30 May 2024 15:35:26.115 * Background saving terminated with success
+58896:M 30 May 2024 16:42:51.523 * 1 changes in 3600 seconds. Saving...
+58896:M 30 May 2024 16:42:51.646 * Background saving started by pid 65592
+65592:C 30 May 2024 16:42:51.655 * DB saved on disk
+65592:C 30 May 2024 16:42:51.655 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 30 May 2024 16:42:51.747 * Background saving terminated with success
+58896:M 30 May 2024 17:15:29.037 * 100 changes in 300 seconds. Saving...
+58896:M 30 May 2024 17:15:29.039 * Background saving started by pid 69523
+69523:C 30 May 2024 17:15:29.050 * DB saved on disk
+69523:C 30 May 2024 17:15:29.050 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 30 May 2024 17:15:29.142 * Background saving terminated with success
+58896:M 30 May 2024 18:15:30.060 * 1 changes in 3600 seconds. Saving...
+58896:M 30 May 2024 18:15:30.063 * Background saving started by pid 84706
+84706:C 30 May 2024 18:15:30.075 * DB saved on disk
+84706:C 30 May 2024 18:15:30.076 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 30 May 2024 18:15:30.167 * Background saving terminated with success
+58896:M 03 Jun 2024 18:06:53.699 * 1 changes in 3600 seconds. Saving...
+58896:M 03 Jun 2024 18:06:53.703 * Background saving started by pid 90870
+90870:C 03 Jun 2024 18:06:53.713 * DB saved on disk
+90870:C 03 Jun 2024 18:06:53.714 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 03 Jun 2024 18:06:53.803 * Background saving terminated with success
+58896:M 03 Jun 2024 18:11:54.075 * 100 changes in 300 seconds. Saving...
+58896:M 03 Jun 2024 18:11:54.078 * Background saving started by pid 91526
+91526:C 03 Jun 2024 18:11:54.087 * DB saved on disk
+91526:C 03 Jun 2024 18:11:54.087 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 03 Jun 2024 18:11:54.180 * Background saving terminated with success
+58896:M 03 Jun 2024 18:18:46.019 * 100 changes in 300 seconds. Saving...
+58896:M 03 Jun 2024 18:18:46.024 * Background saving started by pid 92286
+92286:C 03 Jun 2024 18:18:46.034 * DB saved on disk
+92286:C 03 Jun 2024 18:18:46.035 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 03 Jun 2024 18:18:46.126 * Background saving terminated with success
+58896:M 03 Jun 2024 18:24:50.037 * 100 changes in 300 seconds. Saving...
+58896:M 03 Jun 2024 18:24:50.038 * Background saving started by pid 93124
+93124:C 03 Jun 2024 18:24:50.050 * DB saved on disk
+93124:C 03 Jun 2024 18:24:50.050 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 03 Jun 2024 18:24:50.140 * Background saving terminated with success
+58896:M 03 Jun 2024 18:29:51.049 * 100 changes in 300 seconds. Saving...
+58896:M 03 Jun 2024 18:29:51.051 * Background saving started by pid 93844
+93844:C 03 Jun 2024 18:29:51.063 * DB saved on disk
+93844:C 03 Jun 2024 18:29:51.064 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 03 Jun 2024 18:29:51.153 * Background saving terminated with success
+58896:M 03 Jun 2024 18:34:52.071 * 100 changes in 300 seconds. Saving...
+58896:M 03 Jun 2024 18:34:52.073 * Background saving started by pid 94768
+94768:C 03 Jun 2024 18:34:52.088 * DB saved on disk
+94768:C 03 Jun 2024 18:34:52.089 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 03 Jun 2024 18:34:52.174 * Background saving terminated with success
+58896:M 03 Jun 2024 18:48:44.981 * 100 changes in 300 seconds. Saving...
+58896:M 03 Jun 2024 18:48:44.982 * Background saving started by pid 96629
+96629:C 03 Jun 2024 18:48:45.005 * DB saved on disk
+96629:C 03 Jun 2024 18:48:45.006 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 03 Jun 2024 18:48:45.083 * Background saving terminated with success
+58896:M 03 Jun 2024 23:49:36.949 * 1 changes in 3600 seconds. Saving...
+58896:M 03 Jun 2024 23:49:36.968 * Background saving started by pid 97783
+97783:C 03 Jun 2024 23:49:37.370 * DB saved on disk
+97783:C 03 Jun 2024 23:49:37.436 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 03 Jun 2024 23:49:37.493 * Background saving terminated with success
+58896:M 04 Jun 2024 14:48:17.992 * 1 changes in 3600 seconds. Saving...
+58896:M 04 Jun 2024 14:48:18.002 * Background saving started by pid 19353
+19353:C 04 Jun 2024 14:48:18.012 * DB saved on disk
+19353:C 04 Jun 2024 14:48:18.012 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 04 Jun 2024 14:48:18.109 * Background saving terminated with success
+58896:M 04 Jun 2024 14:53:19.081 * 100 changes in 300 seconds. Saving...
+58896:M 04 Jun 2024 14:53:19.082 * Background saving started by pid 19823
+19823:C 04 Jun 2024 14:53:19.087 * DB saved on disk
+19823:C 04 Jun 2024 14:53:19.088 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 04 Jun 2024 14:53:19.184 * Background saving terminated with success
+58896:M 04 Jun 2024 15:17:23.645 * 100 changes in 300 seconds. Saving...
+58896:M 04 Jun 2024 15:17:23.646 * Background saving started by pid 23721
+23721:C 04 Jun 2024 15:17:23.656 * DB saved on disk
+23721:C 04 Jun 2024 15:17:23.656 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 04 Jun 2024 15:17:23.748 * Background saving terminated with success
+58896:M 05 Jun 2024 15:06:53.989 * 1 changes in 3600 seconds. Saving...
+58896:M 05 Jun 2024 15:06:53.991 * Background saving started by pid 74889
+74889:C 05 Jun 2024 15:06:54.001 * DB saved on disk
+74889:C 05 Jun 2024 15:06:54.001 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 05 Jun 2024 15:06:54.091 * Background saving terminated with success
+58896:M 05 Jun 2024 15:11:55.053 * 100 changes in 300 seconds. Saving...
+58896:M 05 Jun 2024 15:11:55.057 * Background saving started by pid 75549
+75549:C 05 Jun 2024 15:11:55.084 * DB saved on disk
+75549:C 05 Jun 2024 15:11:55.084 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 05 Jun 2024 15:11:55.158 * Background saving terminated with success
+58896:M 05 Jun 2024 15:16:56.075 * 100 changes in 300 seconds. Saving...
+58896:M 05 Jun 2024 15:16:56.078 * Background saving started by pid 76399
+76399:C 05 Jun 2024 15:16:56.095 * DB saved on disk
+76399:C 05 Jun 2024 15:16:56.097 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 05 Jun 2024 15:16:56.180 * Background saving terminated with success
+58896:M 05 Jun 2024 15:51:48.859 * 100 changes in 300 seconds. Saving...
+58896:M 05 Jun 2024 15:51:48.861 * Background saving started by pid 79566
+79566:C 05 Jun 2024 15:51:48.871 * DB saved on disk
+79566:C 05 Jun 2024 15:51:48.871 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 05 Jun 2024 15:51:48.963 * Background saving terminated with success
+58896:M 05 Jun 2024 16:17:59.180 * 100 changes in 300 seconds. Saving...
+58896:M 05 Jun 2024 16:17:59.183 * Background saving started by pid 82108
+82108:C 05 Jun 2024 16:17:59.192 * DB saved on disk
+82108:C 05 Jun 2024 16:17:59.193 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 05 Jun 2024 16:17:59.284 * Background saving terminated with success
+58896:M 05 Jun 2024 16:35:17.999 * 100 changes in 300 seconds. Saving...
+58896:M 05 Jun 2024 16:35:18.002 * Background saving started by pid 83723
+83723:C 05 Jun 2024 16:35:18.010 * DB saved on disk
+83723:C 05 Jun 2024 16:35:18.011 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 05 Jun 2024 16:35:18.103 * Background saving terminated with success
+58896:M 05 Jun 2024 16:43:22.260 * 100 changes in 300 seconds. Saving...
+58896:M 05 Jun 2024 16:43:22.261 * Background saving started by pid 84583
+84583:C 05 Jun 2024 16:43:22.275 * DB saved on disk
+84583:C 05 Jun 2024 16:43:22.278 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 05 Jun 2024 16:43:22.362 * Background saving terminated with success
+58896:M 05 Jun 2024 16:48:23.046 * 100 changes in 300 seconds. Saving...
+58896:M 05 Jun 2024 16:48:23.047 * Background saving started by pid 85131
+85131:C 05 Jun 2024 16:48:23.065 * DB saved on disk
+85131:C 05 Jun 2024 16:48:23.066 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 05 Jun 2024 16:48:23.148 * Background saving terminated with success
+58896:M 05 Jun 2024 17:24:20.810 * 100 changes in 300 seconds. Saving...
+58896:M 05 Jun 2024 17:24:20.813 * Background saving started by pid 90105
+90105:C 05 Jun 2024 17:24:20.823 * DB saved on disk
+90105:C 05 Jun 2024 17:24:20.826 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 05 Jun 2024 17:24:20.914 * Background saving terminated with success
+58896:M 05 Jun 2024 17:38:07.895 * 100 changes in 300 seconds. Saving...
+58896:M 05 Jun 2024 17:38:07.898 * Background saving started by pid 91506
+91506:C 05 Jun 2024 17:38:07.907 * DB saved on disk
+91506:C 05 Jun 2024 17:38:07.907 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 05 Jun 2024 17:38:07.998 * Background saving terminated with success
+58896:M 05 Jun 2024 17:43:08.028 * 100 changes in 300 seconds. Saving...
+58896:M 05 Jun 2024 17:43:08.031 * Background saving started by pid 92110
+92110:C 05 Jun 2024 17:43:08.047 * DB saved on disk
+92110:C 05 Jun 2024 17:43:08.048 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 05 Jun 2024 17:43:08.133 * Background saving terminated with success
+58896:M 05 Jun 2024 17:48:09.040 * 100 changes in 300 seconds. Saving...
+58896:M 05 Jun 2024 17:48:09.042 * Background saving started by pid 92684
+92684:C 05 Jun 2024 17:48:09.056 * DB saved on disk
+92684:C 05 Jun 2024 17:48:09.057 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 05 Jun 2024 17:48:09.144 * Background saving terminated with success
+58896:M 05 Jun 2024 17:53:10.043 * 100 changes in 300 seconds. Saving...
+58896:M 05 Jun 2024 17:53:10.045 * Background saving started by pid 93293
+93293:C 05 Jun 2024 17:53:10.054 * DB saved on disk
+93293:C 05 Jun 2024 17:53:10.056 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 05 Jun 2024 17:53:10.146 * Background saving terminated with success
+58896:M 05 Jun 2024 17:58:11.081 * 100 changes in 300 seconds. Saving...
+58896:M 05 Jun 2024 17:58:11.083 * Background saving started by pid 93757
+93757:C 05 Jun 2024 17:58:11.096 * DB saved on disk
+93757:C 05 Jun 2024 17:58:11.097 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 05 Jun 2024 17:58:11.186 * Background saving terminated with success
+58896:M 07 Jun 2024 16:39:41.351 * 1 changes in 3600 seconds. Saving...
+58896:M 07 Jun 2024 16:39:41.353 * Background saving started by pid 27460
+27460:C 07 Jun 2024 16:39:41.369 * DB saved on disk
+27460:C 07 Jun 2024 16:39:41.370 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 07 Jun 2024 16:39:41.454 * Background saving terminated with success
+58896:M 07 Jun 2024 16:44:42.066 * 100 changes in 300 seconds. Saving...
+58896:M 07 Jun 2024 16:44:42.071 * Background saving started by pid 28358
+28358:C 07 Jun 2024 16:44:42.082 * DB saved on disk
+28358:C 07 Jun 2024 16:44:42.083 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 07 Jun 2024 16:44:42.173 * Background saving terminated with success
+58896:M 07 Jun 2024 17:16:01.732 * 100 changes in 300 seconds. Saving...
+58896:M 07 Jun 2024 17:16:01.734 * Background saving started by pid 33049
+33049:C 07 Jun 2024 17:16:01.746 * DB saved on disk
+33049:C 07 Jun 2024 17:16:01.746 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 07 Jun 2024 17:16:01.835 * Background saving terminated with success
+58896:M 07 Jun 2024 17:21:02.052 * 100 changes in 300 seconds. Saving...
+58896:M 07 Jun 2024 17:21:02.059 * Background saving started by pid 33638
+33638:C 07 Jun 2024 17:21:02.070 * DB saved on disk
+33638:C 07 Jun 2024 17:21:02.071 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 07 Jun 2024 17:21:02.161 * Background saving terminated with success
+58896:M 07 Jun 2024 17:33:38.484 * 100 changes in 300 seconds. Saving...
+58896:M 07 Jun 2024 17:33:38.486 * Background saving started by pid 35103
+35103:C 07 Jun 2024 17:33:38.495 * DB saved on disk
+35103:C 07 Jun 2024 17:33:38.495 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 07 Jun 2024 17:33:38.587 * Background saving terminated with success
+58896:M 07 Jun 2024 17:38:39.030 * 100 changes in 300 seconds. Saving...
+58896:M 07 Jun 2024 17:38:39.031 * Background saving started by pid 35753
+35753:C 07 Jun 2024 17:38:39.044 * DB saved on disk
+35753:C 07 Jun 2024 17:38:39.045 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 07 Jun 2024 17:38:39.132 * Background saving terminated with success
+58896:M 07 Jun 2024 17:43:40.049 * 100 changes in 300 seconds. Saving...
+58896:M 07 Jun 2024 17:43:40.051 * Background saving started by pid 36373
+36373:C 07 Jun 2024 17:43:40.062 * DB saved on disk
+36373:C 07 Jun 2024 17:43:40.062 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 07 Jun 2024 17:43:40.152 * Background saving terminated with success
+58896:M 07 Jun 2024 17:49:19.866 * 100 changes in 300 seconds. Saving...
+58896:M 07 Jun 2024 17:49:19.867 * Background saving started by pid 36987
+36987:C 07 Jun 2024 17:49:19.874 * DB saved on disk
+36987:C 07 Jun 2024 17:49:19.875 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 07 Jun 2024 17:49:19.967 * Background saving terminated with success
+58896:M 07 Jun 2024 17:54:20.070 * 100 changes in 300 seconds. Saving...
+58896:M 07 Jun 2024 17:54:20.074 * Background saving started by pid 37622
+37622:C 07 Jun 2024 17:54:20.087 * DB saved on disk
+37622:C 07 Jun 2024 17:54:20.088 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 07 Jun 2024 17:54:20.178 * Background saving terminated with success
+58896:M 07 Jun 2024 18:00:52.446 * 100 changes in 300 seconds. Saving...
+58896:M 07 Jun 2024 18:00:52.448 * Background saving started by pid 38338
+38338:C 07 Jun 2024 18:00:52.458 * DB saved on disk
+38338:C 07 Jun 2024 18:00:52.460 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 07 Jun 2024 18:00:52.548 * Background saving terminated with success
+58896:M 07 Jun 2024 18:05:53.016 * 100 changes in 300 seconds. Saving...
+58896:M 07 Jun 2024 18:05:53.018 * Background saving started by pid 39003
+39003:C 07 Jun 2024 18:05:53.032 * DB saved on disk
+39003:C 07 Jun 2024 18:05:53.033 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 07 Jun 2024 18:05:53.119 * Background saving terminated with success
+58896:M 07 Jun 2024 18:10:54.046 * 100 changes in 300 seconds. Saving...
+58896:M 07 Jun 2024 18:10:54.047 * Background saving started by pid 39675
+39675:C 07 Jun 2024 18:10:54.071 * DB saved on disk
+39675:C 07 Jun 2024 18:10:54.073 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 07 Jun 2024 18:10:54.149 * Background saving terminated with success
+58896:M 07 Jun 2024 18:22:32.800 * 100 changes in 300 seconds. Saving...
+58896:M 07 Jun 2024 18:22:32.801 * Background saving started by pid 40966
+40966:C 07 Jun 2024 18:22:32.812 * DB saved on disk
+40966:C 07 Jun 2024 18:22:32.813 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 07 Jun 2024 18:22:32.904 * Background saving terminated with success
+58896:M 07 Jun 2024 18:27:33.013 * 100 changes in 300 seconds. Saving...
+58896:M 07 Jun 2024 18:27:33.013 * Background saving started by pid 41713
+41713:C 07 Jun 2024 18:27:33.026 * DB saved on disk
+41713:C 07 Jun 2024 18:27:33.028 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 07 Jun 2024 18:27:33.115 * Background saving terminated with success
+58896:M 07 Jun 2024 18:32:34.020 * 100 changes in 300 seconds. Saving...
+58896:M 07 Jun 2024 18:32:34.022 * Background saving started by pid 42366
+42366:C 07 Jun 2024 18:32:34.039 * DB saved on disk
+42366:C 07 Jun 2024 18:32:34.041 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 07 Jun 2024 18:32:34.124 * Background saving terminated with success
+58896:M 09 Jun 2024 01:21:17.009 * 1 changes in 3600 seconds. Saving...
+58896:M 09 Jun 2024 01:21:17.010 * Background saving started by pid 51967
+51967:C 09 Jun 2024 01:21:17.025 * DB saved on disk
+51967:C 09 Jun 2024 01:21:17.025 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 09 Jun 2024 01:21:17.111 * Background saving terminated with success
+58896:M 09 Jun 2024 01:26:18.067 * 100 changes in 300 seconds. Saving...
+58896:M 09 Jun 2024 01:26:18.068 * Background saving started by pid 52613
+52613:C 09 Jun 2024 01:26:18.076 * DB saved on disk
+52613:C 09 Jun 2024 01:26:18.077 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 09 Jun 2024 01:26:18.169 * Background saving terminated with success
+58896:M 09 Jun 2024 01:31:19.072 * 100 changes in 300 seconds. Saving...
+58896:M 09 Jun 2024 01:31:19.074 * Background saving started by pid 53131
+53131:C 09 Jun 2024 01:31:19.092 * DB saved on disk
+53131:C 09 Jun 2024 01:31:19.094 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 09 Jun 2024 01:31:19.176 * Background saving terminated with success
+58896:M 09 Jun 2024 01:36:20.080 * 100 changes in 300 seconds. Saving...
+58896:M 09 Jun 2024 01:36:20.083 * Background saving started by pid 53704
+53704:C 09 Jun 2024 01:36:20.094 * DB saved on disk
+53704:C 09 Jun 2024 01:36:20.096 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 09 Jun 2024 01:36:20.185 * Background saving terminated with success
+58896:M 09 Jun 2024 01:41:21.074 * 100 changes in 300 seconds. Saving...
+58896:M 09 Jun 2024 01:41:21.076 * Background saving started by pid 54385
+54385:C 09 Jun 2024 01:41:21.090 * DB saved on disk
+54385:C 09 Jun 2024 01:41:21.091 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 09 Jun 2024 01:41:21.179 * Background saving terminated with success
+58896:M 09 Jun 2024 01:46:22.080 * 100 changes in 300 seconds. Saving...
+58896:M 09 Jun 2024 01:46:22.083 * Background saving started by pid 54916
+54916:C 09 Jun 2024 01:46:22.102 * DB saved on disk
+54916:C 09 Jun 2024 01:46:22.102 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 09 Jun 2024 01:46:22.184 * Background saving terminated with success
+58896:M 09 Jun 2024 01:51:23.049 * 100 changes in 300 seconds. Saving...
+58896:M 09 Jun 2024 01:51:23.052 * Background saving started by pid 55511
+55511:C 09 Jun 2024 01:51:23.064 * DB saved on disk
+55511:C 09 Jun 2024 01:51:23.064 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 09 Jun 2024 01:51:23.152 * Background saving terminated with success
+58896:M 09 Jun 2024 01:59:40.782 * 100 changes in 300 seconds. Saving...
+58896:M 09 Jun 2024 01:59:40.783 * Background saving started by pid 56315
+56315:C 09 Jun 2024 01:59:40.795 * DB saved on disk
+56315:C 09 Jun 2024 01:59:40.795 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 09 Jun 2024 01:59:40.883 * Background saving terminated with success
+58896:M 09 Jun 2024 02:04:41.092 * 100 changes in 300 seconds. Saving...
+58896:M 09 Jun 2024 02:04:41.096 * Background saving started by pid 56862
+56862:C 09 Jun 2024 02:04:41.111 * DB saved on disk
+56862:C 09 Jun 2024 02:04:41.112 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 09 Jun 2024 02:04:41.198 * Background saving terminated with success
+58896:M 11 Jun 2024 13:19:27.398 * 1 changes in 3600 seconds. Saving...
+58896:M 11 Jun 2024 13:19:27.402 * Background saving started by pid 99129
+99129:C 11 Jun 2024 13:19:27.435 * DB saved on disk
+99129:C 11 Jun 2024 13:19:27.437 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 13:19:27.503 * Background saving terminated with success
+58896:M 11 Jun 2024 13:24:28.020 * 100 changes in 300 seconds. Saving...
+58896:M 11 Jun 2024 13:24:28.021 * Background saving started by pid 222
+222:C 11 Jun 2024 13:24:28.037 * DB saved on disk
+222:C 11 Jun 2024 13:24:28.038 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 13:24:28.127 * Background saving terminated with success
+58896:M 11 Jun 2024 13:29:29.094 * 100 changes in 300 seconds. Saving...
+58896:M 11 Jun 2024 13:29:29.096 * Background saving started by pid 1314
+1314:C 11 Jun 2024 13:29:29.108 * DB saved on disk
+1314:C 11 Jun 2024 13:29:29.114 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 13:29:29.197 * Background saving terminated with success
+58896:M 11 Jun 2024 13:34:30.038 * 100 changes in 300 seconds. Saving...
+58896:M 11 Jun 2024 13:34:30.042 * Background saving started by pid 1927
+1927:C 11 Jun 2024 13:34:30.054 * DB saved on disk
+1927:C 11 Jun 2024 13:34:30.055 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 13:34:30.144 * Background saving terminated with success
+58896:M 11 Jun 2024 13:39:31.059 * 100 changes in 300 seconds. Saving...
+58896:M 11 Jun 2024 13:39:31.063 * Background saving started by pid 2516
+2516:C 11 Jun 2024 13:39:31.100 * DB saved on disk
+2516:C 11 Jun 2024 13:39:31.102 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 13:39:31.164 * Background saving terminated with success
+58896:M 11 Jun 2024 13:44:32.023 * 100 changes in 300 seconds. Saving...
+58896:M 11 Jun 2024 13:44:32.024 * Background saving started by pid 3067
+3067:C 11 Jun 2024 13:44:32.033 * DB saved on disk
+3067:C 11 Jun 2024 13:44:32.033 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 13:44:32.125 * Background saving terminated with success
+58896:M 11 Jun 2024 13:51:40.611 * 100 changes in 300 seconds. Saving...
+58896:M 11 Jun 2024 13:51:40.614 * Background saving started by pid 3795
+3795:C 11 Jun 2024 13:51:40.627 * DB saved on disk
+3795:C 11 Jun 2024 13:51:40.628 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 13:51:40.715 * Background saving terminated with success
+58896:M 11 Jun 2024 13:56:41.066 * 100 changes in 300 seconds. Saving...
+58896:M 11 Jun 2024 13:56:41.069 * Background saving started by pid 4441
+4441:C 11 Jun 2024 13:56:41.091 * DB saved on disk
+4441:C 11 Jun 2024 13:56:41.095 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 13:56:41.170 * Background saving terminated with success
+58896:M 11 Jun 2024 14:01:42.031 * 100 changes in 300 seconds. Saving...
+58896:M 11 Jun 2024 14:01:42.035 * Background saving started by pid 5198
+5198:C 11 Jun 2024 14:01:42.058 * DB saved on disk
+5198:C 11 Jun 2024 14:01:42.058 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 14:01:42.137 * Background saving terminated with success
+58896:M 11 Jun 2024 14:08:57.215 * 100 changes in 300 seconds. Saving...
+58896:M 11 Jun 2024 14:08:57.217 * Background saving started by pid 5893
+5893:C 11 Jun 2024 14:08:57.230 * DB saved on disk
+5893:C 11 Jun 2024 14:08:57.232 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 14:08:57.319 * Background saving terminated with success
+58896:M 11 Jun 2024 14:13:58.030 * 100 changes in 300 seconds. Saving...
+58896:M 11 Jun 2024 14:13:58.034 * Background saving started by pid 6544
+6544:C 11 Jun 2024 14:13:58.061 * DB saved on disk
+6544:C 11 Jun 2024 14:13:58.063 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 14:13:58.142 * Background saving terminated with success
+58896:M 11 Jun 2024 14:18:59.057 * 100 changes in 300 seconds. Saving...
+58896:M 11 Jun 2024 14:18:59.060 * Background saving started by pid 7250
+7250:C 11 Jun 2024 14:18:59.074 * DB saved on disk
+7250:C 11 Jun 2024 14:18:59.076 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 14:18:59.164 * Background saving terminated with success
+58896:M 11 Jun 2024 14:24:00.000 * 100 changes in 300 seconds. Saving...
+58896:M 11 Jun 2024 14:24:00.003 * Background saving started by pid 7777
+7777:C 11 Jun 2024 14:24:00.017 * DB saved on disk
+7777:C 11 Jun 2024 14:24:00.018 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 14:24:00.105 * Background saving terminated with success
+58896:M 11 Jun 2024 14:29:01.003 * 100 changes in 300 seconds. Saving...
+58896:M 11 Jun 2024 14:29:01.005 * Background saving started by pid 9446
+9446:C 11 Jun 2024 14:29:01.022 * DB saved on disk
+9446:C 11 Jun 2024 14:29:01.023 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 14:29:01.106 * Background saving terminated with success
+58896:M 11 Jun 2024 14:38:46.573 * 100 changes in 300 seconds. Saving...
+58896:M 11 Jun 2024 14:38:46.575 * Background saving started by pid 16553
+16553:C 11 Jun 2024 14:38:46.587 * DB saved on disk
+16553:C 11 Jun 2024 14:38:46.587 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 14:38:46.677 * Background saving terminated with success
+58896:M 11 Jun 2024 14:43:47.042 * 100 changes in 300 seconds. Saving...
+58896:M 11 Jun 2024 14:43:47.046 * Background saving started by pid 18767
+18767:C 11 Jun 2024 14:43:47.064 * DB saved on disk
+18767:C 11 Jun 2024 14:43:47.065 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 14:43:47.149 * Background saving terminated with success
+58896:M 11 Jun 2024 14:48:48.071 * 100 changes in 300 seconds. Saving...
+58896:M 11 Jun 2024 14:48:48.072 * Background saving started by pid 19508
+19508:C 11 Jun 2024 14:48:48.105 * DB saved on disk
+19508:C 11 Jun 2024 14:48:48.106 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 14:48:48.173 * Background saving terminated with success
+58896:M 11 Jun 2024 15:55:55.546 * 1 changes in 3600 seconds. Saving...
+58896:M 11 Jun 2024 15:55:55.665 * Background saving started by pid 20200
+20200:C 11 Jun 2024 15:55:55.673 * DB saved on disk
+20200:C 11 Jun 2024 15:55:55.674 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 15:55:55.766 * Background saving terminated with success
+58896:M 11 Jun 2024 16:55:56.028 * 1 changes in 3600 seconds. Saving...
+58896:M 11 Jun 2024 16:55:56.029 * Background saving started by pid 26736
+26736:C 11 Jun 2024 16:55:56.039 * DB saved on disk
+26736:C 11 Jun 2024 16:55:56.041 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 16:55:56.130 * Background saving terminated with success
+58896:M 11 Jun 2024 17:00:57.094 * 100 changes in 300 seconds. Saving...
+58896:M 11 Jun 2024 17:00:57.096 * Background saving started by pid 27696
+27696:C 11 Jun 2024 17:00:57.110 * DB saved on disk
+27696:C 11 Jun 2024 17:00:57.111 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 17:00:57.197 * Background saving terminated with success
+58896:M 11 Jun 2024 17:05:58.022 * 100 changes in 300 seconds. Saving...
+58896:M 11 Jun 2024 17:05:58.028 * Background saving started by pid 28649
+28649:C 11 Jun 2024 17:05:58.040 * DB saved on disk
+28649:C 11 Jun 2024 17:05:58.042 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 17:05:58.130 * Background saving terminated with success
+58896:M 11 Jun 2024 17:10:59.011 * 100 changes in 300 seconds. Saving...
+58896:M 11 Jun 2024 17:10:59.013 * Background saving started by pid 29517
+29517:C 11 Jun 2024 17:10:59.028 * DB saved on disk
+29517:C 11 Jun 2024 17:10:59.029 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 17:10:59.114 * Background saving terminated with success
+58896:M 11 Jun 2024 17:16:00.031 * 100 changes in 300 seconds. Saving...
+58896:M 11 Jun 2024 17:16:00.033 * Background saving started by pid 30399
+30399:C 11 Jun 2024 17:16:00.044 * DB saved on disk
+30399:C 11 Jun 2024 17:16:00.045 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 17:16:00.134 * Background saving terminated with success
+58896:M 11 Jun 2024 17:21:01.099 * 100 changes in 300 seconds. Saving...
+58896:M 11 Jun 2024 17:21:01.102 * Background saving started by pid 34058
+34058:C 11 Jun 2024 17:21:01.136 * DB saved on disk
+34058:C 11 Jun 2024 17:21:01.136 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 17:21:01.204 * Background saving terminated with success
+58896:M 11 Jun 2024 17:26:02.077 * 100 changes in 300 seconds. Saving...
+58896:M 11 Jun 2024 17:26:02.078 * Background saving started by pid 35339
+35339:C 11 Jun 2024 17:26:02.092 * DB saved on disk
+35339:C 11 Jun 2024 17:26:02.093 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 17:26:02.179 * Background saving terminated with success
+58896:M 11 Jun 2024 17:31:03.024 * 100 changes in 300 seconds. Saving...
+58896:M 11 Jun 2024 17:31:03.027 * Background saving started by pid 36164
+36164:C 11 Jun 2024 17:31:03.044 * DB saved on disk
+36164:C 11 Jun 2024 17:31:03.046 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 17:31:03.129 * Background saving terminated with success
+58896:M 11 Jun 2024 17:36:04.080 * 100 changes in 300 seconds. Saving...
+58896:M 11 Jun 2024 17:36:04.083 * Background saving started by pid 37017
+37017:C 11 Jun 2024 17:36:04.100 * DB saved on disk
+37017:C 11 Jun 2024 17:36:04.100 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 17:36:04.184 * Background saving terminated with success
+58896:M 11 Jun 2024 17:41:05.070 * 100 changes in 300 seconds. Saving...
+58896:M 11 Jun 2024 17:41:05.071 * Background saving started by pid 37887
+37887:C 11 Jun 2024 17:41:05.096 * DB saved on disk
+37887:C 11 Jun 2024 17:41:05.097 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 17:41:05.173 * Background saving terminated with success
+58896:M 11 Jun 2024 17:46:06.098 * 100 changes in 300 seconds. Saving...
+58896:M 11 Jun 2024 17:46:06.099 * Background saving started by pid 38777
+38777:C 11 Jun 2024 17:46:06.110 * DB saved on disk
+38777:C 11 Jun 2024 17:46:06.111 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 17:46:06.200 * Background saving terminated with success
+58896:M 11 Jun 2024 17:51:07.052 * 100 changes in 300 seconds. Saving...
+58896:M 11 Jun 2024 17:51:07.054 * Background saving started by pid 39630
+39630:C 11 Jun 2024 17:51:07.065 * DB saved on disk
+39630:C 11 Jun 2024 17:51:07.065 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 17:51:07.156 * Background saving terminated with success
+58896:M 11 Jun 2024 17:56:08.017 * 100 changes in 300 seconds. Saving...
+58896:M 11 Jun 2024 17:56:08.020 * Background saving started by pid 40590
+40590:C 11 Jun 2024 17:56:08.031 * DB saved on disk
+40590:C 11 Jun 2024 17:56:08.032 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 17:56:08.122 * Background saving terminated with success
+58896:M 11 Jun 2024 18:01:09.041 * 100 changes in 300 seconds. Saving...
+58896:M 11 Jun 2024 18:01:09.044 * Background saving started by pid 41552
+41552:C 11 Jun 2024 18:01:09.054 * DB saved on disk
+41552:C 11 Jun 2024 18:01:09.056 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 18:01:09.145 * Background saving terminated with success
+58896:M 11 Jun 2024 18:06:10.031 * 100 changes in 300 seconds. Saving...
+58896:M 11 Jun 2024 18:06:10.034 * Background saving started by pid 42635
+42635:C 11 Jun 2024 18:06:10.047 * DB saved on disk
+42635:C 11 Jun 2024 18:06:10.048 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 18:06:10.136 * Background saving terminated with success
+58896:M 11 Jun 2024 18:11:11.043 * 100 changes in 300 seconds. Saving...
+58896:M 11 Jun 2024 18:11:11.046 * Background saving started by pid 43579
+43579:C 11 Jun 2024 18:11:11.061 * DB saved on disk
+43579:C 11 Jun 2024 18:11:11.062 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 18:11:11.147 * Background saving terminated with success
+58896:M 11 Jun 2024 18:16:12.053 * 100 changes in 300 seconds. Saving...
+58896:M 11 Jun 2024 18:16:12.056 * Background saving started by pid 44500
+44500:C 11 Jun 2024 18:16:12.068 * DB saved on disk
+44500:C 11 Jun 2024 18:16:12.068 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 18:16:12.157 * Background saving terminated with success
+58896:M 11 Jun 2024 18:21:13.023 * 100 changes in 300 seconds. Saving...
+58896:M 11 Jun 2024 18:21:13.027 * Background saving started by pid 45459
+45459:C 11 Jun 2024 18:21:13.054 * DB saved on disk
+45459:C 11 Jun 2024 18:21:13.055 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 18:21:13.128 * Background saving terminated with success
+58896:M 11 Jun 2024 18:26:14.079 * 100 changes in 300 seconds. Saving...
+58896:M 11 Jun 2024 18:26:14.080 * Background saving started by pid 46446
+46446:C 11 Jun 2024 18:26:14.095 * DB saved on disk
+46446:C 11 Jun 2024 18:26:14.096 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 18:26:14.181 * Background saving terminated with success
+58896:M 11 Jun 2024 18:31:15.020 * 100 changes in 300 seconds. Saving...
+58896:M 11 Jun 2024 18:31:15.023 * Background saving started by pid 47369
+47369:C 11 Jun 2024 18:31:15.033 * DB saved on disk
+47369:C 11 Jun 2024 18:31:15.033 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 18:31:15.125 * Background saving terminated with success
+58896:M 11 Jun 2024 18:44:26.665 * 100 changes in 300 seconds. Saving...
+58896:M 11 Jun 2024 18:44:26.668 * Background saving started by pid 47820
+47820:C 11 Jun 2024 18:44:26.794 * DB saved on disk
+47820:C 11 Jun 2024 18:44:26.795 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 18:44:26.870 * Background saving terminated with success
+58896:M 11 Jun 2024 18:51:12.584 * 100 changes in 300 seconds. Saving...
+58896:M 11 Jun 2024 18:51:12.586 * Background saving started by pid 47950
+47950:C 11 Jun 2024 18:51:12.599 * DB saved on disk
+47950:C 11 Jun 2024 18:51:12.600 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 18:51:12.689 * Background saving terminated with success
+58896:M 11 Jun 2024 19:02:27.776 * 100 changes in 300 seconds. Saving...
+58896:M 11 Jun 2024 19:02:27.778 * Background saving started by pid 48131
+48131:C 11 Jun 2024 19:02:27.802 * DB saved on disk
+48131:C 11 Jun 2024 19:02:27.804 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 19:02:27.881 * Background saving terminated with success
+58896:M 11 Jun 2024 19:07:28.043 * 100 changes in 300 seconds. Saving...
+58896:M 11 Jun 2024 19:07:28.045 * Background saving started by pid 48889
+48889:C 11 Jun 2024 19:07:28.056 * DB saved on disk
+48889:C 11 Jun 2024 19:07:28.063 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 19:07:28.147 * Background saving terminated with success
+58896:M 11 Jun 2024 19:12:29.059 * 100 changes in 300 seconds. Saving...
+58896:M 11 Jun 2024 19:12:29.061 * Background saving started by pid 49675
+49675:C 11 Jun 2024 19:12:29.074 * DB saved on disk
+49675:C 11 Jun 2024 19:12:29.076 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 19:12:29.165 * Background saving terminated with success
+58896:M 11 Jun 2024 19:17:30.038 * 100 changes in 300 seconds. Saving...
+58896:M 11 Jun 2024 19:17:30.039 * Background saving started by pid 50454
+50454:C 11 Jun 2024 19:17:30.048 * DB saved on disk
+50454:C 11 Jun 2024 19:17:30.049 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 19:17:30.141 * Background saving terminated with success
+58896:M 11 Jun 2024 19:22:31.015 * 100 changes in 300 seconds. Saving...
+58896:M 11 Jun 2024 19:22:31.019 * Background saving started by pid 51066
+51066:C 11 Jun 2024 19:22:31.034 * DB saved on disk
+51066:C 11 Jun 2024 19:22:31.035 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 19:22:31.121 * Background saving terminated with success
+58896:M 11 Jun 2024 19:27:32.083 * 100 changes in 300 seconds. Saving...
+58896:M 11 Jun 2024 19:27:32.086 * Background saving started by pid 51963
+51963:C 11 Jun 2024 19:27:32.100 * DB saved on disk
+51963:C 11 Jun 2024 19:27:32.101 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 19:27:32.187 * Background saving terminated with success
+58896:M 11 Jun 2024 19:32:33.008 * 100 changes in 300 seconds. Saving...
+58896:M 11 Jun 2024 19:32:33.010 * Background saving started by pid 52753
+52753:C 11 Jun 2024 19:32:33.021 * DB saved on disk
+52753:C 11 Jun 2024 19:32:33.021 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 19:32:33.111 * Background saving terminated with success
+58896:M 11 Jun 2024 19:37:34.032 * 100 changes in 300 seconds. Saving...
+58896:M 11 Jun 2024 19:37:34.034 * Background saving started by pid 54020
+54020:C 11 Jun 2024 19:37:34.047 * DB saved on disk
+54020:C 11 Jun 2024 19:37:34.048 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 19:37:34.135 * Background saving terminated with success
+58896:M 11 Jun 2024 19:42:35.021 * 100 changes in 300 seconds. Saving...
+58896:M 11 Jun 2024 19:42:35.025 * Background saving started by pid 55025
+55025:C 11 Jun 2024 19:42:35.055 * DB saved on disk
+55025:C 11 Jun 2024 19:42:35.056 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 19:42:35.126 * Background saving terminated with success
+58896:M 11 Jun 2024 19:47:36.075 * 100 changes in 300 seconds. Saving...
+58896:M 11 Jun 2024 19:47:36.077 * Background saving started by pid 55926
+55926:C 11 Jun 2024 19:47:36.091 * DB saved on disk
+55926:C 11 Jun 2024 19:47:36.092 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 19:47:36.178 * Background saving terminated with success
+58896:M 11 Jun 2024 19:52:37.054 * 100 changes in 300 seconds. Saving...
+58896:M 11 Jun 2024 19:52:37.061 * Background saving started by pid 56735
+56735:C 11 Jun 2024 19:52:37.078 * DB saved on disk
+56735:C 11 Jun 2024 19:52:37.078 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 19:52:37.163 * Background saving terminated with success
+58896:M 11 Jun 2024 19:57:38.075 * 100 changes in 300 seconds. Saving...
+58896:M 11 Jun 2024 19:57:38.077 * Background saving started by pid 57603
+57603:C 11 Jun 2024 19:57:38.092 * DB saved on disk
+57603:C 11 Jun 2024 19:57:38.092 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 19:57:38.178 * Background saving terminated with success
+58896:M 11 Jun 2024 20:02:39.013 * 100 changes in 300 seconds. Saving...
+58896:M 11 Jun 2024 20:02:39.015 * Background saving started by pid 58545
+58545:C 11 Jun 2024 20:02:39.028 * DB saved on disk
+58545:C 11 Jun 2024 20:02:39.029 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 20:02:39.116 * Background saving terminated with success
+58896:M 11 Jun 2024 20:07:40.055 * 100 changes in 300 seconds. Saving...
+58896:M 11 Jun 2024 20:07:40.057 * Background saving started by pid 59368
+59368:C 11 Jun 2024 20:07:40.077 * DB saved on disk
+59368:C 11 Jun 2024 20:07:40.077 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 20:07:40.159 * Background saving terminated with success
+58896:M 11 Jun 2024 20:12:41.003 * 100 changes in 300 seconds. Saving...
+58896:M 11 Jun 2024 20:12:41.005 * Background saving started by pid 60133
+60133:C 11 Jun 2024 20:12:41.021 * DB saved on disk
+60133:C 11 Jun 2024 20:12:41.022 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 20:12:41.107 * Background saving terminated with success
+58896:M 11 Jun 2024 20:17:42.079 * 100 changes in 300 seconds. Saving...
+58896:M 11 Jun 2024 20:17:42.081 * Background saving started by pid 60949
+60949:C 11 Jun 2024 20:17:42.091 * DB saved on disk
+60949:C 11 Jun 2024 20:17:42.093 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 20:17:42.181 * Background saving terminated with success
+58896:M 11 Jun 2024 20:22:43.066 * 100 changes in 300 seconds. Saving...
+58896:M 11 Jun 2024 20:22:43.067 * Background saving started by pid 61718
+61718:C 11 Jun 2024 20:22:43.077 * DB saved on disk
+61718:C 11 Jun 2024 20:22:43.077 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 20:22:43.167 * Background saving terminated with success
+58896:M 11 Jun 2024 20:27:44.036 * 100 changes in 300 seconds. Saving...
+58896:M 11 Jun 2024 20:27:44.037 * Background saving started by pid 62501
+62501:C 11 Jun 2024 20:27:44.047 * DB saved on disk
+62501:C 11 Jun 2024 20:27:44.048 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 20:27:44.138 * Background saving terminated with success
+58896:M 11 Jun 2024 20:32:45.098 * 100 changes in 300 seconds. Saving...
+58896:M 11 Jun 2024 20:32:45.099 * Background saving started by pid 63324
+63324:C 11 Jun 2024 20:32:45.112 * DB saved on disk
+63324:C 11 Jun 2024 20:32:45.113 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 20:32:45.199 * Background saving terminated with success
+58896:M 11 Jun 2024 20:37:46.011 * 100 changes in 300 seconds. Saving...
+58896:M 11 Jun 2024 20:37:46.014 * Background saving started by pid 64283
+64283:C 11 Jun 2024 20:37:46.023 * DB saved on disk
+64283:C 11 Jun 2024 20:37:46.024 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 20:37:46.115 * Background saving terminated with success
+58896:M 11 Jun 2024 20:42:47.025 * 100 changes in 300 seconds. Saving...
+58896:M 11 Jun 2024 20:42:47.028 * Background saving started by pid 65146
+65146:C 11 Jun 2024 20:42:47.036 * DB saved on disk
+65146:C 11 Jun 2024 20:42:47.037 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 20:42:47.129 * Background saving terminated with success
+58896:M 11 Jun 2024 20:47:48.097 * 100 changes in 300 seconds. Saving...
+58896:M 11 Jun 2024 20:47:48.099 * Background saving started by pid 65963
+65963:C 11 Jun 2024 20:47:48.114 * DB saved on disk
+65963:C 11 Jun 2024 20:47:48.114 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 20:47:48.200 * Background saving terminated with success
+58896:M 11 Jun 2024 20:52:49.080 * 100 changes in 300 seconds. Saving...
+58896:M 11 Jun 2024 20:52:49.081 * Background saving started by pid 66721
+66721:C 11 Jun 2024 20:52:49.089 * DB saved on disk
+66721:C 11 Jun 2024 20:52:49.089 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 20:52:49.182 * Background saving terminated with success
+58896:M 11 Jun 2024 20:57:50.072 * 100 changes in 300 seconds. Saving...
+58896:M 11 Jun 2024 20:57:50.074 * Background saving started by pid 67533
+67533:C 11 Jun 2024 20:57:50.086 * DB saved on disk
+67533:C 11 Jun 2024 20:57:50.087 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 20:57:50.175 * Background saving terminated with success
+58896:M 11 Jun 2024 21:02:51.093 * 100 changes in 300 seconds. Saving...
+58896:M 11 Jun 2024 21:02:51.096 * Background saving started by pid 68492
+68492:C 11 Jun 2024 21:02:51.104 * DB saved on disk
+68492:C 11 Jun 2024 21:02:51.105 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 21:02:51.197 * Background saving terminated with success
+58896:M 11 Jun 2024 21:07:52.069 * 100 changes in 300 seconds. Saving...
+58896:M 11 Jun 2024 21:07:52.071 * Background saving started by pid 69301
+69301:C 11 Jun 2024 21:07:52.080 * DB saved on disk
+69301:C 11 Jun 2024 21:07:52.081 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 21:07:52.172 * Background saving terminated with success
+58896:M 11 Jun 2024 21:12:53.015 * 100 changes in 300 seconds. Saving...
+58896:M 11 Jun 2024 21:12:53.016 * Background saving started by pid 70023
+70023:C 11 Jun 2024 21:12:53.026 * DB saved on disk
+70023:C 11 Jun 2024 21:12:53.027 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 21:12:53.117 * Background saving terminated with success
+58896:M 11 Jun 2024 21:17:54.036 * 100 changes in 300 seconds. Saving...
+58896:M 11 Jun 2024 21:17:54.039 * Background saving started by pid 70808
+70808:C 11 Jun 2024 21:17:54.049 * DB saved on disk
+70808:C 11 Jun 2024 21:17:54.050 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 21:17:54.139 * Background saving terminated with success
+58896:M 11 Jun 2024 21:22:55.017 * 100 changes in 300 seconds. Saving...
+58896:M 11 Jun 2024 21:22:55.020 * Background saving started by pid 71619
+71619:C 11 Jun 2024 21:22:55.034 * DB saved on disk
+71619:C 11 Jun 2024 21:22:55.035 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 21:22:55.122 * Background saving terminated with success
+58896:M 11 Jun 2024 21:27:56.099 * 100 changes in 300 seconds. Saving...
+58896:M 11 Jun 2024 21:27:56.101 * Background saving started by pid 72347
+72347:C 11 Jun 2024 21:27:56.110 * DB saved on disk
+72347:C 11 Jun 2024 21:27:56.111 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 21:27:56.204 * Background saving terminated with success
+58896:M 11 Jun 2024 21:32:57.085 * 100 changes in 300 seconds. Saving...
+58896:M 11 Jun 2024 21:32:57.087 * Background saving started by pid 73084
+73084:C 11 Jun 2024 21:32:57.097 * DB saved on disk
+73084:C 11 Jun 2024 21:32:57.098 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 21:32:57.190 * Background saving terminated with success
+58896:M 11 Jun 2024 21:37:58.093 * 100 changes in 300 seconds. Saving...
+58896:M 11 Jun 2024 21:37:58.096 * Background saving started by pid 73831
+73831:C 11 Jun 2024 21:37:58.111 * DB saved on disk
+73831:C 11 Jun 2024 21:37:58.112 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 21:37:58.198 * Background saving terminated with success
+58896:M 11 Jun 2024 21:42:59.011 * 100 changes in 300 seconds. Saving...
+58896:M 11 Jun 2024 21:42:59.013 * Background saving started by pid 74560
+74560:C 11 Jun 2024 21:42:59.029 * DB saved on disk
+74560:C 11 Jun 2024 21:42:59.032 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 21:42:59.114 * Background saving terminated with success
+58896:M 11 Jun 2024 21:48:00.014 * 100 changes in 300 seconds. Saving...
+58896:M 11 Jun 2024 21:48:00.016 * Background saving started by pid 75300
+75300:C 11 Jun 2024 21:48:00.033 * DB saved on disk
+75300:C 11 Jun 2024 21:48:00.035 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 21:48:00.118 * Background saving terminated with success
+58896:M 11 Jun 2024 21:53:01.092 * 100 changes in 300 seconds. Saving...
+58896:M 11 Jun 2024 21:53:01.094 * Background saving started by pid 76026
+76026:C 11 Jun 2024 21:53:01.107 * DB saved on disk
+76026:C 11 Jun 2024 21:53:01.109 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 21:53:01.197 * Background saving terminated with success
+58896:M 11 Jun 2024 21:58:02.084 * 100 changes in 300 seconds. Saving...
+58896:M 11 Jun 2024 21:58:02.087 * Background saving started by pid 76774
+76774:C 11 Jun 2024 21:58:02.099 * DB saved on disk
+76774:C 11 Jun 2024 21:58:02.102 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 21:58:02.188 * Background saving terminated with success
+58896:M 11 Jun 2024 22:03:03.053 * 100 changes in 300 seconds. Saving...
+58896:M 11 Jun 2024 22:03:03.057 * Background saving started by pid 77506
+77506:C 11 Jun 2024 22:03:03.079 * DB saved on disk
+77506:C 11 Jun 2024 22:03:03.080 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 22:03:03.158 * Background saving terminated with success
+58896:M 11 Jun 2024 22:08:04.061 * 100 changes in 300 seconds. Saving...
+58896:M 11 Jun 2024 22:08:04.065 * Background saving started by pid 78241
+78241:C 11 Jun 2024 22:08:04.080 * DB saved on disk
+78241:C 11 Jun 2024 22:08:04.081 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 22:08:04.168 * Background saving terminated with success
+58896:M 11 Jun 2024 22:13:05.038 * 100 changes in 300 seconds. Saving...
+58896:M 11 Jun 2024 22:13:05.038 * Background saving started by pid 78971
+78971:C 11 Jun 2024 22:13:05.049 * DB saved on disk
+78971:C 11 Jun 2024 22:13:05.050 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 22:13:05.141 * Background saving terminated with success
+58896:M 11 Jun 2024 22:18:06.009 * 100 changes in 300 seconds. Saving...
+58896:M 11 Jun 2024 22:18:06.012 * Background saving started by pid 79712
+79712:C 11 Jun 2024 22:18:06.031 * DB saved on disk
+79712:C 11 Jun 2024 22:18:06.031 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 22:18:06.115 * Background saving terminated with success
+58896:M 11 Jun 2024 22:23:07.017 * 100 changes in 300 seconds. Saving...
+58896:M 11 Jun 2024 22:23:07.018 * Background saving started by pid 80463
+80463:C 11 Jun 2024 22:23:07.030 * DB saved on disk
+80463:C 11 Jun 2024 22:23:07.031 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 22:23:07.121 * Background saving terminated with success
+58896:M 11 Jun 2024 22:28:08.091 * 100 changes in 300 seconds. Saving...
+58896:M 11 Jun 2024 22:28:08.095 * Background saving started by pid 81195
+81195:C 11 Jun 2024 22:28:08.126 * DB saved on disk
+81195:C 11 Jun 2024 22:28:08.127 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 22:28:08.197 * Background saving terminated with success
+58896:M 11 Jun 2024 22:33:09.069 * 100 changes in 300 seconds. Saving...
+58896:M 11 Jun 2024 22:33:09.073 * Background saving started by pid 81924
+81924:C 11 Jun 2024 22:33:09.090 * DB saved on disk
+81924:C 11 Jun 2024 22:33:09.091 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 22:33:09.176 * Background saving terminated with success
+58896:M 11 Jun 2024 22:38:10.051 * 100 changes in 300 seconds. Saving...
+58896:M 11 Jun 2024 22:38:10.056 * Background saving started by pid 82688
+82688:C 11 Jun 2024 22:38:10.073 * DB saved on disk
+82688:C 11 Jun 2024 22:38:10.074 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 22:38:10.158 * Background saving terminated with success
+58896:M 11 Jun 2024 22:43:11.058 * 100 changes in 300 seconds. Saving...
+58896:M 11 Jun 2024 22:43:11.062 * Background saving started by pid 83419
+83419:C 11 Jun 2024 22:43:11.085 * DB saved on disk
+83419:C 11 Jun 2024 22:43:11.086 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 22:43:11.165 * Background saving terminated with success
+58896:M 11 Jun 2024 22:48:12.050 * 100 changes in 300 seconds. Saving...
+58896:M 11 Jun 2024 22:48:12.055 * Background saving started by pid 84242
+84242:C 11 Jun 2024 22:48:12.071 * DB saved on disk
+84242:C 11 Jun 2024 22:48:12.072 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 22:48:12.156 * Background saving terminated with success
+58896:M 11 Jun 2024 22:53:13.061 * 100 changes in 300 seconds. Saving...
+58896:M 11 Jun 2024 22:53:13.064 * Background saving started by pid 84972
+84972:C 11 Jun 2024 22:53:13.080 * DB saved on disk
+84972:C 11 Jun 2024 22:53:13.082 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 22:53:13.167 * Background saving terminated with success
+58896:M 11 Jun 2024 22:58:14.056 * 100 changes in 300 seconds. Saving...
+58896:M 11 Jun 2024 22:58:14.059 * Background saving started by pid 85710
+85710:C 11 Jun 2024 22:58:14.075 * DB saved on disk
+85710:C 11 Jun 2024 22:58:14.076 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 22:58:14.161 * Background saving terminated with success
+58896:M 11 Jun 2024 23:03:15.013 * 100 changes in 300 seconds. Saving...
+58896:M 11 Jun 2024 23:03:15.014 * Background saving started by pid 86445
+86445:C 11 Jun 2024 23:03:15.027 * DB saved on disk
+86445:C 11 Jun 2024 23:03:15.031 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 23:03:15.116 * Background saving terminated with success
+58896:M 11 Jun 2024 23:08:16.036 * 100 changes in 300 seconds. Saving...
+58896:M 11 Jun 2024 23:08:16.038 * Background saving started by pid 87172
+87172:C 11 Jun 2024 23:08:16.052 * DB saved on disk
+87172:C 11 Jun 2024 23:08:16.053 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 23:08:16.138 * Background saving terminated with success
+58896:M 11 Jun 2024 23:13:17.001 * 100 changes in 300 seconds. Saving...
+58896:M 11 Jun 2024 23:13:17.006 * Background saving started by pid 87901
+87901:C 11 Jun 2024 23:13:17.020 * DB saved on disk
+87901:C 11 Jun 2024 23:13:17.021 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 23:13:17.109 * Background saving terminated with success
+58896:M 11 Jun 2024 23:18:18.101 * 100 changes in 300 seconds. Saving...
+58896:M 11 Jun 2024 23:18:18.103 * Background saving started by pid 88646
+88646:C 11 Jun 2024 23:18:18.115 * DB saved on disk
+88646:C 11 Jun 2024 23:18:18.116 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 23:18:18.205 * Background saving terminated with success
+58896:M 11 Jun 2024 23:23:19.087 * 100 changes in 300 seconds. Saving...
+58896:M 11 Jun 2024 23:23:19.090 * Background saving started by pid 89375
+89375:C 11 Jun 2024 23:23:19.101 * DB saved on disk
+89375:C 11 Jun 2024 23:23:19.103 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 23:23:19.192 * Background saving terminated with success
+58896:M 11 Jun 2024 23:28:20.011 * 100 changes in 300 seconds. Saving...
+58896:M 11 Jun 2024 23:28:20.013 * Background saving started by pid 90105
+90105:C 11 Jun 2024 23:28:20.027 * DB saved on disk
+90105:C 11 Jun 2024 23:28:20.028 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 23:28:20.115 * Background saving terminated with success
+58896:M 11 Jun 2024 23:33:21.067 * 100 changes in 300 seconds. Saving...
+58896:M 11 Jun 2024 23:33:21.069 * Background saving started by pid 90839
+90839:C 11 Jun 2024 23:33:21.079 * DB saved on disk
+90839:C 11 Jun 2024 23:33:21.080 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 23:33:21.170 * Background saving terminated with success
+58896:M 11 Jun 2024 23:38:22.003 * 100 changes in 300 seconds. Saving...
+58896:M 11 Jun 2024 23:38:22.008 * Background saving started by pid 91662
+91662:C 11 Jun 2024 23:38:22.019 * DB saved on disk
+91662:C 11 Jun 2024 23:38:22.019 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 23:38:22.111 * Background saving terminated with success
+58896:M 11 Jun 2024 23:43:23.059 * 100 changes in 300 seconds. Saving...
+58896:M 11 Jun 2024 23:43:23.063 * Background saving started by pid 92434
+92434:C 11 Jun 2024 23:43:23.075 * DB saved on disk
+92434:C 11 Jun 2024 23:43:23.075 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 23:43:23.165 * Background saving terminated with success
+58896:M 11 Jun 2024 23:48:24.037 * 100 changes in 300 seconds. Saving...
+58896:M 11 Jun 2024 23:48:24.038 * Background saving started by pid 93164
+93164:C 11 Jun 2024 23:48:24.049 * DB saved on disk
+93164:C 11 Jun 2024 23:48:24.050 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 23:48:24.142 * Background saving terminated with success
+58896:M 11 Jun 2024 23:53:25.018 * 100 changes in 300 seconds. Saving...
+58896:M 11 Jun 2024 23:53:25.021 * Background saving started by pid 93965
+93965:C 11 Jun 2024 23:53:25.035 * DB saved on disk
+93965:C 11 Jun 2024 23:53:25.037 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 23:53:25.123 * Background saving terminated with success
+58896:M 11 Jun 2024 23:58:26.009 * 100 changes in 300 seconds. Saving...
+58896:M 11 Jun 2024 23:58:26.011 * Background saving started by pid 94758
+94758:C 11 Jun 2024 23:58:26.025 * DB saved on disk
+94758:C 11 Jun 2024 23:58:26.026 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 11 Jun 2024 23:58:26.112 * Background saving terminated with success
+58896:M 12 Jun 2024 00:03:27.039 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 00:03:27.041 * Background saving started by pid 95491
+95491:C 12 Jun 2024 00:03:27.055 * DB saved on disk
+95491:C 12 Jun 2024 00:03:27.057 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 00:03:27.142 * Background saving terminated with success
+58896:M 12 Jun 2024 00:08:28.034 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 00:08:28.035 * Background saving started by pid 96284
+96284:C 12 Jun 2024 00:08:28.051 * DB saved on disk
+96284:C 12 Jun 2024 00:08:28.051 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 00:08:28.136 * Background saving terminated with success
+58896:M 12 Jun 2024 00:13:29.015 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 00:13:29.019 * Background saving started by pid 97696
+97696:C 12 Jun 2024 00:13:29.029 * DB saved on disk
+97696:C 12 Jun 2024 00:13:29.030 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 00:13:29.120 * Background saving terminated with success
+58896:M 12 Jun 2024 00:18:30.052 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 00:18:30.055 * Background saving started by pid 98669
+98669:C 12 Jun 2024 00:18:30.072 * DB saved on disk
+98669:C 12 Jun 2024 00:18:30.073 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 00:18:30.157 * Background saving terminated with success
+58896:M 12 Jun 2024 00:23:31.067 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 00:23:31.069 * Background saving started by pid 99666
+99666:C 12 Jun 2024 00:23:31.085 * DB saved on disk
+99666:C 12 Jun 2024 00:23:31.087 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 00:23:31.169 * Background saving terminated with success
+58896:M 12 Jun 2024 00:28:32.087 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 00:28:32.093 * Background saving started by pid 831
+831:C 12 Jun 2024 00:28:32.110 * DB saved on disk
+831:C 12 Jun 2024 00:28:32.111 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 00:28:32.194 * Background saving terminated with success
+58896:M 12 Jun 2024 00:33:33.077 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 00:33:33.079 * Background saving started by pid 1642
+1642:C 12 Jun 2024 00:33:33.099 * DB saved on disk
+1642:C 12 Jun 2024 00:33:33.100 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 00:33:33.181 * Background saving terminated with success
+58896:M 12 Jun 2024 00:38:34.030 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 00:38:34.032 * Background saving started by pid 2590
+2590:C 12 Jun 2024 00:38:34.045 * DB saved on disk
+2590:C 12 Jun 2024 00:38:34.046 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 00:38:34.134 * Background saving terminated with success
+58896:M 12 Jun 2024 00:43:35.021 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 00:43:35.024 * Background saving started by pid 3742
+3742:C 12 Jun 2024 00:43:35.037 * DB saved on disk
+3742:C 12 Jun 2024 00:43:35.038 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 00:43:35.125 * Background saving terminated with success
+58896:M 12 Jun 2024 00:48:36.015 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 00:48:36.018 * Background saving started by pid 4666
+4666:C 12 Jun 2024 00:48:36.030 * DB saved on disk
+4666:C 12 Jun 2024 00:48:36.031 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 00:48:36.119 * Background saving terminated with success
+58896:M 12 Jun 2024 00:53:37.025 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 00:53:37.027 * Background saving started by pid 5691
+5691:C 12 Jun 2024 00:53:37.042 * DB saved on disk
+5691:C 12 Jun 2024 00:53:37.042 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 00:53:37.129 * Background saving terminated with success
+58896:M 12 Jun 2024 00:58:38.050 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 00:58:38.054 * Background saving started by pid 6620
+6620:C 12 Jun 2024 00:58:38.065 * DB saved on disk
+6620:C 12 Jun 2024 00:58:38.066 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 00:58:38.155 * Background saving terminated with success
+58896:M 12 Jun 2024 01:03:39.073 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 01:03:39.075 * Background saving started by pid 7624
+7624:C 12 Jun 2024 01:03:39.101 * DB saved on disk
+7624:C 12 Jun 2024 01:03:39.114 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 01:03:39.176 * Background saving terminated with success
+58896:M 12 Jun 2024 01:08:40.009 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 01:08:40.012 * Background saving started by pid 8631
+8631:C 12 Jun 2024 01:08:40.025 * DB saved on disk
+8631:C 12 Jun 2024 01:08:40.026 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 01:08:40.113 * Background saving terminated with success
+58896:M 12 Jun 2024 01:20:43.620 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 01:20:43.625 * Background saving started by pid 10740
+10740:C 12 Jun 2024 01:20:43.650 * DB saved on disk
+10740:C 12 Jun 2024 01:20:43.650 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 01:20:43.727 * Background saving terminated with success
+58896:M 12 Jun 2024 01:25:44.086 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 01:25:44.089 * Background saving started by pid 11660
+11660:C 12 Jun 2024 01:25:44.101 * DB saved on disk
+11660:C 12 Jun 2024 01:25:44.102 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 01:25:44.190 * Background saving terminated with success
+58896:M 12 Jun 2024 01:30:45.057 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 01:30:45.059 * Background saving started by pid 12738
+12738:C 12 Jun 2024 01:30:45.066 * DB saved on disk
+12738:C 12 Jun 2024 01:30:45.066 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 01:30:45.159 * Background saving terminated with success
+58896:M 12 Jun 2024 01:35:46.087 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 01:35:46.088 * Background saving started by pid 13711
+13711:C 12 Jun 2024 01:35:46.099 * DB saved on disk
+13711:C 12 Jun 2024 01:35:46.099 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 01:35:46.189 * Background saving terminated with success
+58896:M 12 Jun 2024 01:40:47.061 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 01:40:47.062 * Background saving started by pid 14674
+14674:C 12 Jun 2024 01:40:47.069 * DB saved on disk
+14674:C 12 Jun 2024 01:40:47.069 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 01:40:47.162 * Background saving terminated with success
+58896:M 12 Jun 2024 01:45:48.078 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 01:45:48.082 * Background saving started by pid 18780
+18780:C 12 Jun 2024 01:45:48.103 * DB saved on disk
+18780:C 12 Jun 2024 01:45:48.105 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 01:45:48.184 * Background saving terminated with success
+58896:M 12 Jun 2024 01:50:49.025 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 01:50:49.026 * Background saving started by pid 19686
+19686:C 12 Jun 2024 01:50:49.038 * DB saved on disk
+19686:C 12 Jun 2024 01:50:49.039 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 01:50:49.127 * Background saving terminated with success
+58896:M 12 Jun 2024 01:56:34.702 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 01:56:34.702 * Background saving started by pid 20738
+20738:C 12 Jun 2024 01:56:34.741 * DB saved on disk
+20738:C 12 Jun 2024 01:56:34.744 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 01:56:34.803 * Background saving terminated with success
+58896:M 12 Jun 2024 02:01:35.007 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 02:01:35.008 * Background saving started by pid 21912
+21912:C 12 Jun 2024 02:01:35.017 * DB saved on disk
+21912:C 12 Jun 2024 02:01:35.018 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 02:01:35.108 * Background saving terminated with success
+58896:M 12 Jun 2024 02:06:36.095 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 02:06:36.097 * Background saving started by pid 23306
+23306:C 12 Jun 2024 02:06:36.103 * DB saved on disk
+23306:C 12 Jun 2024 02:06:36.104 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 02:06:36.197 * Background saving terminated with success
+58896:M 12 Jun 2024 02:11:37.037 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 02:11:37.038 * Background saving started by pid 24623
+24623:C 12 Jun 2024 02:11:37.049 * DB saved on disk
+24623:C 12 Jun 2024 02:11:37.049 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 02:11:37.138 * Background saving terminated with success
+58896:M 12 Jun 2024 02:16:38.032 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 02:16:38.035 * Background saving started by pid 28935
+28935:C 12 Jun 2024 02:16:38.056 * DB saved on disk
+28935:C 12 Jun 2024 02:16:38.060 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 02:16:38.136 * Background saving terminated with success
+58896:M 12 Jun 2024 02:21:39.065 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 02:21:39.068 * Background saving started by pid 30316
+30316:C 12 Jun 2024 02:21:39.089 * DB saved on disk
+30316:C 12 Jun 2024 02:21:39.089 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 02:21:39.169 * Background saving terminated with success
+58896:M 12 Jun 2024 02:26:40.088 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 02:26:40.092 * Background saving started by pid 31108
+31108:C 12 Jun 2024 02:26:40.115 * DB saved on disk
+31108:C 12 Jun 2024 02:26:40.116 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 02:26:40.192 * Background saving terminated with success
+58896:M 12 Jun 2024 02:31:41.089 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 02:31:41.090 * Background saving started by pid 32709
+32709:C 12 Jun 2024 02:31:41.115 * DB saved on disk
+32709:C 12 Jun 2024 02:31:41.116 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 02:31:41.191 * Background saving terminated with success
+58896:M 12 Jun 2024 02:34:11.210 * DB saved on disk
+58896:M 12 Jun 2024 02:39:12.090 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 02:39:12.090 * Background saving started by pid 34729
+34729:C 12 Jun 2024 02:39:12.098 * DB saved on disk
+34729:C 12 Jun 2024 02:39:12.099 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 02:39:12.191 * Background saving terminated with success
+58896:M 12 Jun 2024 02:44:13.030 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 02:44:13.031 * Background saving started by pid 36552
+36552:C 12 Jun 2024 02:44:13.039 * DB saved on disk
+36552:C 12 Jun 2024 02:44:13.040 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 02:44:13.132 * Background saving terminated with success
+58896:M 12 Jun 2024 02:49:14.003 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 02:49:14.007 * Background saving started by pid 40406
+40406:C 12 Jun 2024 02:49:14.016 * DB saved on disk
+40406:C 12 Jun 2024 02:49:14.017 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 02:49:14.108 * Background saving terminated with success
+58896:M 12 Jun 2024 02:54:15.052 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 02:54:15.053 * Background saving started by pid 41373
+41373:C 12 Jun 2024 02:54:15.068 * DB saved on disk
+41373:C 12 Jun 2024 02:54:15.069 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 02:54:15.155 * Background saving terminated with success
+58896:M 12 Jun 2024 02:59:16.098 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 02:59:16.100 * Background saving started by pid 42324
+42324:C 12 Jun 2024 02:59:16.108 * DB saved on disk
+42324:C 12 Jun 2024 02:59:16.109 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 02:59:16.201 * Background saving terminated with success
+58896:M 12 Jun 2024 03:04:17.012 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 03:04:17.015 * Background saving started by pid 43225
+43225:C 12 Jun 2024 03:04:17.025 * DB saved on disk
+43225:C 12 Jun 2024 03:04:17.026 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 03:04:17.116 * Background saving terminated with success
+58896:M 12 Jun 2024 03:04:34.446 * DB saved on disk
+58896:M 12 Jun 2024 03:09:35.045 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 03:09:35.048 * Background saving started by pid 44410
+44410:C 12 Jun 2024 03:09:35.055 * DB saved on disk
+44410:C 12 Jun 2024 03:09:35.057 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 03:09:35.148 * Background saving terminated with success
+58896:M 12 Jun 2024 03:14:36.016 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 03:14:36.019 * Background saving started by pid 46540
+46540:C 12 Jun 2024 03:14:36.031 * DB saved on disk
+46540:C 12 Jun 2024 03:14:36.033 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 03:14:36.119 * Background saving terminated with success
+58896:M 12 Jun 2024 03:19:37.037 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 03:19:37.041 * Background saving started by pid 47386
+47386:C 12 Jun 2024 03:19:37.055 * DB saved on disk
+47386:C 12 Jun 2024 03:19:37.058 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 03:19:37.143 * Background saving terminated with success
+58896:M 12 Jun 2024 03:24:38.035 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 03:24:38.042 * Background saving started by pid 48201
+48201:C 12 Jun 2024 03:24:38.063 * DB saved on disk
+48201:C 12 Jun 2024 03:24:38.064 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 03:24:38.147 * Background saving terminated with success
+58896:M 12 Jun 2024 03:29:39.031 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 03:29:39.035 * Background saving started by pid 49082
+49082:C 12 Jun 2024 03:29:39.051 * DB saved on disk
+49082:C 12 Jun 2024 03:29:39.052 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 03:29:39.136 * Background saving terminated with success
+58896:M 12 Jun 2024 03:34:40.043 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 03:34:40.046 * Background saving started by pid 53122
+53122:C 12 Jun 2024 03:34:40.058 * DB saved on disk
+53122:C 12 Jun 2024 03:34:40.059 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 03:34:40.147 * Background saving terminated with success
+58896:M 12 Jun 2024 03:39:41.032 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 03:39:41.035 * Background saving started by pid 54267
+54267:C 12 Jun 2024 03:39:41.056 * DB saved on disk
+54267:C 12 Jun 2024 03:39:41.057 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 03:39:41.137 * Background saving terminated with success
+58896:M 12 Jun 2024 03:44:42.005 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 03:44:42.006 * Background saving started by pid 55054
+55054:C 12 Jun 2024 03:44:42.022 * DB saved on disk
+55054:C 12 Jun 2024 03:44:42.024 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 03:44:42.107 * Background saving terminated with success
+58896:M 12 Jun 2024 03:49:43.092 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 03:49:43.094 * Background saving started by pid 55938
+55938:C 12 Jun 2024 03:49:43.104 * DB saved on disk
+55938:C 12 Jun 2024 03:49:43.105 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 03:49:43.195 * Background saving terminated with success
+58896:M 12 Jun 2024 03:54:44.085 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 03:54:44.087 * Background saving started by pid 56777
+56777:C 12 Jun 2024 03:54:44.105 * DB saved on disk
+56777:C 12 Jun 2024 03:54:44.111 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 03:54:44.189 * Background saving terminated with success
+58896:M 12 Jun 2024 03:59:45.049 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 03:59:45.050 * Background saving started by pid 57643
+57643:C 12 Jun 2024 03:59:45.059 * DB saved on disk
+57643:C 12 Jun 2024 03:59:45.061 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 03:59:45.151 * Background saving terminated with success
+58896:M 12 Jun 2024 04:04:46.023 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 04:04:46.027 * Background saving started by pid 58638
+58638:C 12 Jun 2024 04:04:46.049 * DB saved on disk
+58638:C 12 Jun 2024 04:04:46.049 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 04:04:46.129 * Background saving terminated with success
+58896:M 12 Jun 2024 04:09:47.069 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 04:09:47.071 * Background saving started by pid 59417
+59417:C 12 Jun 2024 04:09:47.089 * DB saved on disk
+59417:C 12 Jun 2024 04:09:47.090 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 04:09:47.173 * Background saving terminated with success
+58896:M 12 Jun 2024 04:14:48.091 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 04:14:48.092 * Background saving started by pid 60190
+60190:C 12 Jun 2024 04:14:48.100 * DB saved on disk
+60190:C 12 Jun 2024 04:14:48.105 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 04:14:48.193 * Background saving terminated with success
+58896:M 12 Jun 2024 04:19:49.050 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 04:19:49.052 * Background saving started by pid 61120
+61120:C 12 Jun 2024 04:19:49.060 * DB saved on disk
+61120:C 12 Jun 2024 04:19:49.060 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 04:19:49.153 * Background saving terminated with success
+58896:M 12 Jun 2024 04:24:50.028 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 04:24:50.032 * Background saving started by pid 61993
+61993:C 12 Jun 2024 04:24:50.051 * DB saved on disk
+61993:C 12 Jun 2024 04:24:50.052 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 04:24:50.134 * Background saving terminated with success
+58896:M 12 Jun 2024 04:29:51.054 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 04:29:51.058 * Background saving started by pid 62790
+62790:C 12 Jun 2024 04:29:51.077 * DB saved on disk
+62790:C 12 Jun 2024 04:29:51.077 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 04:29:51.159 * Background saving terminated with success
+58896:M 12 Jun 2024 04:34:52.067 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 04:34:52.069 * Background saving started by pid 63597
+63597:C 12 Jun 2024 04:34:52.077 * DB saved on disk
+63597:C 12 Jun 2024 04:34:52.078 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 04:34:52.170 * Background saving terminated with success
+58896:M 12 Jun 2024 04:39:53.088 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 04:39:53.089 * Background saving started by pid 64519
+64519:C 12 Jun 2024 04:39:53.098 * DB saved on disk
+64519:C 12 Jun 2024 04:39:53.099 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 04:39:53.190 * Background saving terminated with success
+58896:M 12 Jun 2024 04:44:54.089 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 04:44:54.091 * Background saving started by pid 65472
+65472:C 12 Jun 2024 04:44:54.099 * DB saved on disk
+65472:C 12 Jun 2024 04:44:54.100 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 04:44:54.193 * Background saving terminated with success
+58896:M 12 Jun 2024 04:49:55.090 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 04:49:55.091 * Background saving started by pid 66430
+66430:C 12 Jun 2024 04:49:55.102 * DB saved on disk
+66430:C 12 Jun 2024 04:49:55.104 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 04:49:55.192 * Background saving terminated with success
+58896:M 12 Jun 2024 04:54:56.019 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 04:54:56.022 * Background saving started by pid 67514
+67514:C 12 Jun 2024 04:54:56.034 * DB saved on disk
+67514:C 12 Jun 2024 04:54:56.039 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 04:54:56.124 * Background saving terminated with success
+58896:M 12 Jun 2024 04:59:57.000 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 04:59:57.001 * Background saving started by pid 68698
+68698:C 12 Jun 2024 04:59:57.010 * DB saved on disk
+68698:C 12 Jun 2024 04:59:57.012 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 04:59:57.102 * Background saving terminated with success
+58896:M 12 Jun 2024 05:04:58.100 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 05:04:58.102 * Background saving started by pid 74173
+74173:C 12 Jun 2024 05:04:58.117 * DB saved on disk
+74173:C 12 Jun 2024 05:04:58.117 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 05:04:58.204 * Background saving terminated with success
+58896:M 12 Jun 2024 05:09:59.003 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 05:09:59.005 * Background saving started by pid 77013
+77013:C 12 Jun 2024 05:09:59.012 * DB saved on disk
+77013:C 12 Jun 2024 05:09:59.013 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 05:09:59.106 * Background saving terminated with success
+58896:M 12 Jun 2024 05:15:00.056 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 05:15:00.058 * Background saving started by pid 77983
+77983:C 12 Jun 2024 05:15:00.070 * DB saved on disk
+77983:C 12 Jun 2024 05:15:00.071 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 05:15:00.159 * Background saving terminated with success
+58896:M 12 Jun 2024 05:20:01.096 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 05:20:01.103 * Background saving started by pid 79011
+79011:C 12 Jun 2024 05:20:01.122 * DB saved on disk
+79011:C 12 Jun 2024 05:20:01.132 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 05:20:01.204 * Background saving terminated with success
+58896:M 12 Jun 2024 05:25:02.097 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 05:25:02.099 * Background saving started by pid 80216
+80216:C 12 Jun 2024 05:25:02.113 * DB saved on disk
+80216:C 12 Jun 2024 05:25:02.113 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 05:25:02.200 * Background saving terminated with success
+58896:M 12 Jun 2024 05:30:03.063 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 05:30:03.068 * Background saving started by pid 81376
+81376:C 12 Jun 2024 05:30:03.083 * DB saved on disk
+81376:C 12 Jun 2024 05:30:03.088 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 05:30:03.169 * Background saving terminated with success
+58896:M 12 Jun 2024 05:35:04.075 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 05:35:04.078 * Background saving started by pid 82164
+82164:C 12 Jun 2024 05:35:04.088 * DB saved on disk
+82164:C 12 Jun 2024 05:35:04.088 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 05:35:04.179 * Background saving terminated with success
+58896:M 12 Jun 2024 05:40:05.085 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 05:40:05.087 * Background saving started by pid 83362
+83362:C 12 Jun 2024 05:40:05.113 * DB saved on disk
+83362:C 12 Jun 2024 05:40:05.116 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 05:40:05.189 * Background saving terminated with success
+58896:M 12 Jun 2024 05:45:06.011 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 05:45:06.012 * Background saving started by pid 84525
+84525:C 12 Jun 2024 05:45:06.021 * DB saved on disk
+84525:C 12 Jun 2024 05:45:06.022 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 05:45:06.114 * Background saving terminated with success
+58896:M 12 Jun 2024 05:50:07.081 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 05:50:07.082 * Background saving started by pid 85679
+85679:C 12 Jun 2024 05:50:07.095 * DB saved on disk
+85679:C 12 Jun 2024 05:50:07.096 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 05:50:07.184 * Background saving terminated with success
+58896:M 12 Jun 2024 06:09:17.678 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 06:09:17.799 * Background saving started by pid 86522
+86522:C 12 Jun 2024 06:09:17.807 * DB saved on disk
+86522:C 12 Jun 2024 06:09:17.807 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 06:09:17.913 * Background saving terminated with success
+58896:M 12 Jun 2024 07:23:10.681 * 1 changes in 3600 seconds. Saving...
+58896:M 12 Jun 2024 07:23:10.685 * Background saving started by pid 86688
+86688:C 12 Jun 2024 07:23:10.799 * DB saved on disk
+86688:C 12 Jun 2024 07:23:10.800 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 07:23:10.887 * Background saving terminated with success
+58896:M 12 Jun 2024 08:28:27.701 * 1 changes in 3600 seconds. Saving...
+58896:M 12 Jun 2024 08:28:27.709 * Background saving started by pid 86783
+86783:C 12 Jun 2024 08:28:27.805 * DB saved on disk
+86783:C 12 Jun 2024 08:28:27.806 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 08:28:27.810 * Background saving terminated with success
+58896:M 12 Jun 2024 08:37:48.107 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 08:37:48.109 * Background saving started by pid 86998
+86998:C 12 Jun 2024 08:37:48.124 * DB saved on disk
+86998:C 12 Jun 2024 08:37:48.125 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 08:37:48.210 * Background saving terminated with success
+58896:M 12 Jun 2024 08:45:00.378 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 08:45:00.441 * Background saving started by pid 88055
+88055:C 12 Jun 2024 08:45:00.998 * DB saved on disk
+88055:C 12 Jun 2024 08:45:01.006 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 08:45:01.057 * Background saving terminated with success
+58896:M 12 Jun 2024 09:03:18.658 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 09:03:18.662 * Background saving started by pid 88847
+88847:C 12 Jun 2024 09:03:18.782 * DB saved on disk
+88847:C 12 Jun 2024 09:03:18.783 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 09:03:18.865 * Background saving terminated with success
+58896:M 12 Jun 2024 09:20:51.679 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 09:20:51.682 * Background saving started by pid 89079
+89079:C 12 Jun 2024 09:20:51.805 * DB saved on disk
+89079:C 12 Jun 2024 09:20:51.806 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 09:20:51.885 * Background saving terminated with success
+58896:M 12 Jun 2024 09:36:57.986 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 09:36:57.986 * Background saving started by pid 89273
+89273:C 12 Jun 2024 09:36:57.994 * DB saved on disk
+89273:C 12 Jun 2024 09:36:57.994 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 09:36:58.089 * Background saving terminated with success
+58896:M 12 Jun 2024 09:52:59.001 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 09:52:59.002 * Background saving started by pid 89459
+89459:C 12 Jun 2024 09:52:59.013 * DB saved on disk
+89459:C 12 Jun 2024 09:52:59.014 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 09:52:59.103 * Background saving terminated with success
+58896:M 12 Jun 2024 10:09:08.619 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 10:09:08.624 * Background saving started by pid 89697
+89697:C 12 Jun 2024 10:09:08.738 * DB saved on disk
+89697:C 12 Jun 2024 10:09:08.739 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 10:09:08.831 * Background saving terminated with success
+58896:M 12 Jun 2024 10:27:48.637 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 10:27:48.640 * Background saving started by pid 90312
+90312:C 12 Jun 2024 10:27:48.754 * DB saved on disk
+90312:C 12 Jun 2024 10:27:48.754 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 10:27:48.843 * Background saving terminated with success
+58896:M 12 Jun 2024 10:45:08.649 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 10:45:08.765 * Background saving started by pid 90689
+90689:C 12 Jun 2024 10:45:08.781 * DB saved on disk
+90689:C 12 Jun 2024 10:45:08.781 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 10:45:08.872 * Background saving terminated with success
+58896:M 12 Jun 2024 11:04:19.611 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 11:04:19.731 * Background saving started by pid 91384
+91384:C 12 Jun 2024 11:04:19.738 * DB saved on disk
+91384:C 12 Jun 2024 11:04:19.739 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 11:04:19.837 * Background saving terminated with success
+58896:M 12 Jun 2024 11:13:15.115 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 11:13:15.119 * Background saving started by pid 91575
+91575:C 12 Jun 2024 11:13:15.137 * DB saved on disk
+91575:C 12 Jun 2024 11:13:15.138 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 11:13:15.222 * Background saving terminated with success
+58896:M 12 Jun 2024 12:18:47.632 * 1 changes in 3600 seconds. Saving...
+58896:M 12 Jun 2024 12:18:47.635 * Background saving started by pid 91694
+91694:C 12 Jun 2024 12:18:47.756 * DB saved on disk
+91694:C 12 Jun 2024 12:18:47.757 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 12:18:47.839 * Background saving terminated with success
+58896:M 12 Jun 2024 12:39:26.809 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 12:39:26.811 * Background saving started by pid 92013
+92013:C 12 Jun 2024 12:39:26.824 * DB saved on disk
+92013:C 12 Jun 2024 12:39:26.825 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 12:39:26.912 * Background saving terminated with success
+58896:M 12 Jun 2024 12:44:27.044 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 12:44:27.045 * Background saving started by pid 93150
+93150:C 12 Jun 2024 12:44:27.052 * DB saved on disk
+93150:C 12 Jun 2024 12:44:27.053 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 12:44:27.146 * Background saving terminated with success
+58896:M 12 Jun 2024 12:49:28.034 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 12:49:28.036 * Background saving started by pid 94288
+94288:C 12 Jun 2024 12:49:28.048 * DB saved on disk
+94288:C 12 Jun 2024 12:49:28.048 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 12:49:28.137 * Background saving terminated with success
+58896:M 12 Jun 2024 12:54:29.012 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 12:54:29.015 * Background saving started by pid 95469
+95469:C 12 Jun 2024 12:54:29.028 * DB saved on disk
+95469:C 12 Jun 2024 12:54:29.028 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 12:54:29.116 * Background saving terminated with success
+58896:M 12 Jun 2024 12:59:30.022 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 12:59:30.023 * Background saving started by pid 96610
+96610:C 12 Jun 2024 12:59:30.041 * DB saved on disk
+96610:C 12 Jun 2024 12:59:30.041 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 12:59:30.124 * Background saving terminated with success
+58896:M 12 Jun 2024 13:04:31.035 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 13:04:31.036 * Background saving started by pid 97725
+97725:C 12 Jun 2024 13:04:31.047 * DB saved on disk
+97725:C 12 Jun 2024 13:04:31.049 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 13:04:31.138 * Background saving terminated with success
+58896:M 12 Jun 2024 13:09:32.020 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 13:09:32.022 * Background saving started by pid 98866
+98866:C 12 Jun 2024 13:09:32.038 * DB saved on disk
+98866:C 12 Jun 2024 13:09:32.039 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 13:09:32.123 * Background saving terminated with success
+58896:M 12 Jun 2024 13:14:33.004 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 13:14:33.005 * Background saving started by pid 103
+103:C 12 Jun 2024 13:14:33.013 * DB saved on disk
+103:C 12 Jun 2024 13:14:33.014 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 13:14:33.106 * Background saving terminated with success
+58896:M 12 Jun 2024 13:19:34.047 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 13:19:34.048 * Background saving started by pid 1450
+1450:C 12 Jun 2024 13:19:34.058 * DB saved on disk
+1450:C 12 Jun 2024 13:19:34.058 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 13:19:34.149 * Background saving terminated with success
+58896:M 12 Jun 2024 13:24:35.060 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 13:24:35.063 * Background saving started by pid 2598
+2598:C 12 Jun 2024 13:24:35.072 * DB saved on disk
+2598:C 12 Jun 2024 13:24:35.072 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 13:24:35.165 * Background saving terminated with success
+58896:M 12 Jun 2024 13:29:36.006 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 13:29:36.009 * Background saving started by pid 3725
+3725:C 12 Jun 2024 13:29:36.019 * DB saved on disk
+3725:C 12 Jun 2024 13:29:36.020 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 13:29:36.110 * Background saving terminated with success
+58896:M 12 Jun 2024 13:34:37.016 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 13:34:37.017 * Background saving started by pid 4859
+4859:C 12 Jun 2024 13:34:37.036 * DB saved on disk
+4859:C 12 Jun 2024 13:34:37.037 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 13:34:37.118 * Background saving terminated with success
+58896:M 12 Jun 2024 13:52:39.660 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 13:52:39.663 * Background saving started by pid 5136
+5136:C 12 Jun 2024 13:52:39.783 * DB saved on disk
+5136:C 12 Jun 2024 13:52:39.783 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 13:52:39.867 * Background saving terminated with success
+58896:M 12 Jun 2024 14:18:00.088 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 14:18:00.089 * Background saving started by pid 5336
+5336:C 12 Jun 2024 14:18:00.100 * DB saved on disk
+5336:C 12 Jun 2024 14:18:00.101 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 14:18:00.192 * Background saving terminated with success
+58896:M 12 Jun 2024 14:23:39.578 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 14:23:39.579 * Background saving started by pid 5595
+5595:C 12 Jun 2024 14:23:39.589 * DB saved on disk
+5595:C 12 Jun 2024 14:23:39.590 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 14:23:39.686 * Background saving terminated with success
+58896:M 12 Jun 2024 14:28:40.051 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 14:28:40.063 * Background saving started by pid 6818
+6818:C 12 Jun 2024 14:28:40.079 * DB saved on disk
+6818:C 12 Jun 2024 14:28:40.080 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 14:28:40.165 * Background saving terminated with success
+58896:M 12 Jun 2024 14:33:41.100 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 14:33:41.101 * Background saving started by pid 7974
+7974:C 12 Jun 2024 14:33:41.111 * DB saved on disk
+7974:C 12 Jun 2024 14:33:41.112 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 14:33:41.202 * Background saving terminated with success
+58896:M 12 Jun 2024 14:39:15.833 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 14:39:15.834 * Background saving started by pid 9245
+9245:C 12 Jun 2024 14:39:15.843 * DB saved on disk
+9245:C 12 Jun 2024 14:39:15.843 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 14:39:15.936 * Background saving terminated with success
+58896:M 12 Jun 2024 14:44:16.080 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 14:44:16.083 * Background saving started by pid 10377
+10377:C 12 Jun 2024 14:44:16.090 * DB saved on disk
+10377:C 12 Jun 2024 14:44:16.091 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 14:44:16.184 * Background saving terminated with success
+58896:M 12 Jun 2024 14:49:17.004 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 14:49:17.006 * Background saving started by pid 11532
+11532:C 12 Jun 2024 14:49:17.020 * DB saved on disk
+11532:C 12 Jun 2024 14:49:17.020 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 14:49:17.107 * Background saving terminated with success
+58896:M 12 Jun 2024 14:54:18.054 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 14:54:18.055 * Background saving started by pid 12728
+12728:C 12 Jun 2024 14:54:18.071 * DB saved on disk
+12728:C 12 Jun 2024 14:54:18.072 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 14:54:18.156 * Background saving terminated with success
+58896:M 12 Jun 2024 14:59:19.060 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 14:59:19.063 * Background saving started by pid 13955
+13955:C 12 Jun 2024 14:59:19.095 * DB saved on disk
+13955:C 12 Jun 2024 14:59:19.096 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 14:59:19.164 * Background saving terminated with success
+58896:M 12 Jun 2024 15:04:50.041 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 15:04:50.046 * Background saving started by pid 14209
+14209:C 12 Jun 2024 15:04:50.161 * DB saved on disk
+14209:C 12 Jun 2024 15:04:50.162 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 15:04:50.249 * Background saving terminated with success
+58896:M 12 Jun 2024 15:16:01.278 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 15:16:01.280 * Background saving started by pid 14412
+14412:C 12 Jun 2024 15:16:01.296 * DB saved on disk
+14412:C 12 Jun 2024 15:16:01.297 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 15:16:01.382 * Background saving terminated with success
+58896:M 12 Jun 2024 15:21:42.305 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 15:21:42.308 * Background saving started by pid 15693
+15693:C 12 Jun 2024 15:21:42.425 * DB saved on disk
+15693:C 12 Jun 2024 15:21:42.426 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 15:21:42.512 * Background saving terminated with success
+58896:M 12 Jun 2024 15:26:43.091 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 15:26:43.093 * Background saving started by pid 15892
+15892:C 12 Jun 2024 15:26:43.108 * DB saved on disk
+15892:C 12 Jun 2024 15:26:43.109 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 15:26:43.208 * Background saving terminated with success
+58896:M 12 Jun 2024 15:32:08.470 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 15:32:08.473 * Background saving started by pid 16107
+16107:C 12 Jun 2024 15:32:08.586 * DB saved on disk
+16107:C 12 Jun 2024 15:32:08.587 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 15:32:08.675 * Background saving terminated with success
+58896:M 12 Jun 2024 15:37:51.306 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 15:37:51.311 * Background saving started by pid 16417
+16417:C 12 Jun 2024 15:37:51.431 * DB saved on disk
+16417:C 12 Jun 2024 15:37:51.432 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 15:37:51.515 * Background saving terminated with success
+58896:M 12 Jun 2024 15:43:24.501 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 15:43:24.505 * Background saving started by pid 16735
+16735:C 12 Jun 2024 15:43:24.619 * DB saved on disk
+16735:C 12 Jun 2024 15:43:24.620 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 15:43:24.709 * Background saving terminated with success
+58896:M 12 Jun 2024 15:49:17.280 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 15:49:17.282 * Background saving started by pid 17067
+17067:C 12 Jun 2024 15:49:17.403 * DB saved on disk
+17067:C 12 Jun 2024 15:49:17.403 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 15:49:17.486 * Background saving terminated with success
+58896:M 12 Jun 2024 15:58:31.612 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 15:58:31.612 * Background saving started by pid 17349
+17349:C 12 Jun 2024 15:58:31.624 * DB saved on disk
+17349:C 12 Jun 2024 15:58:31.625 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 15:58:31.713 * Background saving terminated with success
+58896:M 12 Jun 2024 16:07:32.858 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 16:07:32.859 * Background saving started by pid 17568
+17568:C 12 Jun 2024 16:07:32.873 * DB saved on disk
+17568:C 12 Jun 2024 16:07:32.874 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 16:07:32.961 * Background saving terminated with success
+58896:M 12 Jun 2024 16:26:11.532 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 16:26:11.537 * Background saving started by pid 17835
+17835:C 12 Jun 2024 16:26:11.655 * DB saved on disk
+17835:C 12 Jun 2024 16:26:11.655 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 16:26:11.739 * Background saving terminated with success
+58896:M 12 Jun 2024 16:50:57.768 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 16:50:57.771 * Background saving started by pid 18014
+18014:C 12 Jun 2024 16:50:57.787 * DB saved on disk
+18014:C 12 Jun 2024 16:50:57.789 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 16:50:57.873 * Background saving terminated with success
+58896:M 12 Jun 2024 17:50:58.003 * 1 changes in 3600 seconds. Saving...
+58896:M 12 Jun 2024 17:50:58.005 * Background saving started by pid 18164
+18164:C 12 Jun 2024 17:50:58.024 * DB saved on disk
+18164:C 12 Jun 2024 17:50:58.025 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 17:50:58.107 * Background saving terminated with success
+58896:M 12 Jun 2024 18:04:59.651 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 18:04:59.655 * Background saving started by pid 18427
+18427:C 12 Jun 2024 18:04:59.667 * DB saved on disk
+18427:C 12 Jun 2024 18:04:59.667 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 18:04:59.756 * Background saving terminated with success
+58896:M 12 Jun 2024 18:10:00.077 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 18:10:00.079 * Background saving started by pid 19644
+19644:C 12 Jun 2024 18:10:00.091 * DB saved on disk
+19644:C 12 Jun 2024 18:10:00.092 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 18:10:00.180 * Background saving terminated with success
+58896:M 12 Jun 2024 18:15:01.093 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 18:15:01.095 * Background saving started by pid 20794
+20794:C 12 Jun 2024 18:15:01.115 * DB saved on disk
+20794:C 12 Jun 2024 18:15:01.116 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 18:15:01.197 * Background saving terminated with success
+58896:M 12 Jun 2024 18:20:02.078 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 18:20:02.079 * Background saving started by pid 21958
+21958:C 12 Jun 2024 18:20:02.088 * DB saved on disk
+21958:C 12 Jun 2024 18:20:02.089 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 18:20:02.181 * Background saving terminated with success
+58896:M 12 Jun 2024 18:25:03.029 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 18:25:03.030 * Background saving started by pid 23368
+23368:C 12 Jun 2024 18:25:03.037 * DB saved on disk
+23368:C 12 Jun 2024 18:25:03.039 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 18:25:03.131 * Background saving terminated with success
+58896:M 12 Jun 2024 18:30:04.044 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 18:30:04.046 * Background saving started by pid 24145
+24145:C 12 Jun 2024 18:30:04.054 * DB saved on disk
+24145:C 12 Jun 2024 18:30:04.054 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 18:30:04.147 * Background saving terminated with success
+58896:M 12 Jun 2024 18:35:05.081 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 18:35:05.085 * Background saving started by pid 24948
+24948:C 12 Jun 2024 18:35:05.096 * DB saved on disk
+24948:C 12 Jun 2024 18:35:05.097 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 18:35:05.188 * Background saving terminated with success
+58896:M 12 Jun 2024 18:40:06.059 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 18:40:06.060 * Background saving started by pid 26487
+26487:C 12 Jun 2024 18:40:06.072 * DB saved on disk
+26487:C 12 Jun 2024 18:40:06.072 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 18:40:06.160 * Background saving terminated with success
+58896:M 12 Jun 2024 18:45:07.016 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 18:45:07.019 * Background saving started by pid 27478
+27478:C 12 Jun 2024 18:45:07.030 * DB saved on disk
+27478:C 12 Jun 2024 18:45:07.033 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 18:45:07.120 * Background saving terminated with success
+58896:M 12 Jun 2024 18:50:08.096 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 18:50:08.100 * Background saving started by pid 28255
+28255:C 12 Jun 2024 18:50:08.118 * DB saved on disk
+28255:C 12 Jun 2024 18:50:08.120 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 18:50:08.202 * Background saving terminated with success
+58896:M 12 Jun 2024 18:55:09.066 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 18:55:09.067 * Background saving started by pid 29026
+29026:C 12 Jun 2024 18:55:09.077 * DB saved on disk
+29026:C 12 Jun 2024 18:55:09.078 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 18:55:09.169 * Background saving terminated with success
+58896:M 12 Jun 2024 19:00:10.054 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 19:00:10.060 * Background saving started by pid 29846
+29846:C 12 Jun 2024 19:00:10.077 * DB saved on disk
+29846:C 12 Jun 2024 19:00:10.078 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 19:00:10.162 * Background saving terminated with success
+58896:M 12 Jun 2024 19:05:11.091 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 19:05:11.093 * Background saving started by pid 30656
+30656:C 12 Jun 2024 19:05:11.106 * DB saved on disk
+30656:C 12 Jun 2024 19:05:11.107 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 19:05:11.195 * Background saving terminated with success
+58896:M 12 Jun 2024 19:10:12.069 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 19:10:12.070 * Background saving started by pid 31436
+31436:C 12 Jun 2024 19:10:12.082 * DB saved on disk
+31436:C 12 Jun 2024 19:10:12.083 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 19:10:12.172 * Background saving terminated with success
+58896:M 12 Jun 2024 19:15:13.031 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 19:15:13.032 * Background saving started by pid 32456
+32456:C 12 Jun 2024 19:15:13.045 * DB saved on disk
+32456:C 12 Jun 2024 19:15:13.046 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 19:15:13.133 * Background saving terminated with success
+58896:M 12 Jun 2024 19:20:14.035 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 19:20:14.036 * Background saving started by pid 33746
+33746:C 12 Jun 2024 19:20:14.045 * DB saved on disk
+33746:C 12 Jun 2024 19:20:14.046 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 19:20:14.137 * Background saving terminated with success
+58896:M 12 Jun 2024 19:25:15.038 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 19:25:15.041 * Background saving started by pid 34925
+34925:C 12 Jun 2024 19:25:15.057 * DB saved on disk
+34925:C 12 Jun 2024 19:25:15.057 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 19:25:15.142 * Background saving terminated with success
+58896:M 12 Jun 2024 19:30:16.003 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 19:30:16.008 * Background saving started by pid 36180
+36180:C 12 Jun 2024 19:30:16.026 * DB saved on disk
+36180:C 12 Jun 2024 19:30:16.026 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 19:30:16.110 * Background saving terminated with success
+58896:M 12 Jun 2024 19:30:50.487 * DB saved on disk
+58896:M 12 Jun 2024 19:35:51.066 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 19:35:51.070 * Background saving started by pid 37695
+37695:C 12 Jun 2024 19:35:51.097 * DB saved on disk
+37695:C 12 Jun 2024 19:35:51.098 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 19:35:51.171 * Background saving terminated with success
+58896:M 12 Jun 2024 19:40:52.077 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 19:40:52.078 * Background saving started by pid 38605
+38605:C 12 Jun 2024 19:40:52.091 * DB saved on disk
+38605:C 12 Jun 2024 19:40:52.092 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 19:40:52.180 * Background saving terminated with success
+58896:M 12 Jun 2024 19:45:53.092 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 19:45:53.093 * Background saving started by pid 39738
+39738:C 12 Jun 2024 19:45:53.101 * DB saved on disk
+39738:C 12 Jun 2024 19:45:53.101 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 19:45:53.194 * Background saving terminated with success
+58896:M 12 Jun 2024 19:50:54.028 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 19:50:54.031 * Background saving started by pid 40670
+40670:C 12 Jun 2024 19:50:54.042 * DB saved on disk
+40670:C 12 Jun 2024 19:50:54.043 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 19:50:54.133 * Background saving terminated with success
+58896:M 12 Jun 2024 19:55:55.096 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 19:55:55.099 * Background saving started by pid 43457
+43457:C 12 Jun 2024 19:55:55.107 * DB saved on disk
+43457:C 12 Jun 2024 19:55:55.107 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 19:55:55.199 * Background saving terminated with success
+58896:M 12 Jun 2024 20:00:56.029 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 20:00:56.031 * Background saving started by pid 44453
+44453:C 12 Jun 2024 20:00:56.037 * DB saved on disk
+44453:C 12 Jun 2024 20:00:56.037 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 20:00:56.133 * Background saving terminated with success
+58896:M 12 Jun 2024 20:05:57.013 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 20:05:57.016 * Background saving started by pid 45420
+45420:C 12 Jun 2024 20:05:57.025 * DB saved on disk
+45420:C 12 Jun 2024 20:05:57.026 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 20:05:57.117 * Background saving terminated with success
+58896:M 12 Jun 2024 20:10:58.035 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 20:10:58.037 * Background saving started by pid 46445
+46445:C 12 Jun 2024 20:10:58.045 * DB saved on disk
+46445:C 12 Jun 2024 20:10:58.045 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 20:10:58.138 * Background saving terminated with success
+58896:M 12 Jun 2024 20:15:59.030 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 20:15:59.032 * Background saving started by pid 47438
+47438:C 12 Jun 2024 20:15:59.041 * DB saved on disk
+47438:C 12 Jun 2024 20:15:59.042 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 20:15:59.133 * Background saving terminated with success
+58896:M 12 Jun 2024 20:21:00.034 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 20:21:00.036 * Background saving started by pid 48364
+48364:C 12 Jun 2024 20:21:00.043 * DB saved on disk
+48364:C 12 Jun 2024 20:21:00.046 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 20:21:00.137 * Background saving terminated with success
+58896:M 12 Jun 2024 20:26:01.085 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 20:26:01.086 * Background saving started by pid 49394
+49394:C 12 Jun 2024 20:26:01.094 * DB saved on disk
+49394:C 12 Jun 2024 20:26:01.095 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 20:26:01.188 * Background saving terminated with success
+58896:M 12 Jun 2024 20:31:02.083 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 20:31:02.088 * Background saving started by pid 50479
+50479:C 12 Jun 2024 20:31:02.109 * DB saved on disk
+50479:C 12 Jun 2024 20:31:02.110 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 20:31:02.190 * Background saving terminated with success
+58896:M 12 Jun 2024 20:36:03.042 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 20:36:03.043 * Background saving started by pid 51442
+51442:C 12 Jun 2024 20:36:03.065 * DB saved on disk
+51442:C 12 Jun 2024 20:36:03.066 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 20:36:03.144 * Background saving terminated with success
+58896:M 12 Jun 2024 20:41:04.076 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 20:41:04.078 * Background saving started by pid 52399
+52399:C 12 Jun 2024 20:41:04.090 * DB saved on disk
+52399:C 12 Jun 2024 20:41:04.093 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 20:41:04.179 * Background saving terminated with success
+58896:M 12 Jun 2024 20:46:05.056 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 20:46:05.058 * Background saving started by pid 53397
+53397:C 12 Jun 2024 20:46:05.067 * DB saved on disk
+53397:C 12 Jun 2024 20:46:05.067 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 20:46:05.159 * Background saving terminated with success
+58896:M 12 Jun 2024 20:51:06.070 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 20:51:06.071 * Background saving started by pid 54382
+54382:C 12 Jun 2024 20:51:06.078 * DB saved on disk
+54382:C 12 Jun 2024 20:51:06.081 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 20:51:06.172 * Background saving terminated with success
+58896:M 12 Jun 2024 20:56:07.099 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 20:56:07.102 * Background saving started by pid 57248
+57248:C 12 Jun 2024 20:56:07.111 * DB saved on disk
+57248:C 12 Jun 2024 20:56:07.111 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 20:56:07.204 * Background saving terminated with success
+58896:M 12 Jun 2024 21:01:08.063 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 21:01:08.066 * Background saving started by pid 58219
+58219:C 12 Jun 2024 21:01:08.075 * DB saved on disk
+58219:C 12 Jun 2024 21:01:08.076 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 21:01:08.168 * Background saving terminated with success
+58896:M 12 Jun 2024 21:06:09.082 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 21:06:09.082 * Background saving started by pid 58998
+58998:C 12 Jun 2024 21:06:09.092 * DB saved on disk
+58998:C 12 Jun 2024 21:06:09.093 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 21:06:09.184 * Background saving terminated with success
+58896:M 12 Jun 2024 22:22:09.136 * 1 changes in 3600 seconds. Saving...
+58896:M 12 Jun 2024 22:22:09.196 * Background saving started by pid 59686
+59686:C 12 Jun 2024 22:22:09.431 * DB saved on disk
+59686:C 12 Jun 2024 22:22:09.536 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 22:22:09.634 * Background saving terminated with success
+58896:M 12 Jun 2024 22:27:10.079 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 22:27:10.083 * Background saving started by pid 60609
+60609:C 12 Jun 2024 22:27:10.097 * DB saved on disk
+60609:C 12 Jun 2024 22:27:10.099 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 22:27:10.184 * Background saving terminated with success
+58896:M 12 Jun 2024 22:32:11.032 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 22:32:11.036 * Background saving started by pid 61470
+61470:C 12 Jun 2024 22:32:11.054 * DB saved on disk
+61470:C 12 Jun 2024 22:32:11.055 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 22:32:11.138 * Background saving terminated with success
+58896:M 12 Jun 2024 22:37:12.056 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 22:37:12.057 * Background saving started by pid 62229
+62229:C 12 Jun 2024 22:37:12.071 * DB saved on disk
+62229:C 12 Jun 2024 22:37:12.071 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 22:37:12.159 * Background saving terminated with success
+58896:M 12 Jun 2024 22:42:13.023 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 22:42:13.025 * Background saving started by pid 63085
+63085:C 12 Jun 2024 22:42:13.036 * DB saved on disk
+63085:C 12 Jun 2024 22:42:13.036 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 22:42:13.126 * Background saving terminated with success
+58896:M 12 Jun 2024 22:47:14.042 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 22:47:14.043 * Background saving started by pid 63867
+63867:C 12 Jun 2024 22:47:14.052 * DB saved on disk
+63867:C 12 Jun 2024 22:47:14.053 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 22:47:14.145 * Background saving terminated with success
+58896:M 12 Jun 2024 22:52:15.071 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 22:52:15.075 * Background saving started by pid 64674
+64674:C 12 Jun 2024 22:52:15.106 * DB saved on disk
+64674:C 12 Jun 2024 22:52:15.106 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 22:52:15.177 * Background saving terminated with success
+58896:M 12 Jun 2024 22:57:16.052 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 22:57:16.053 * Background saving started by pid 65510
+65510:C 12 Jun 2024 22:57:16.071 * DB saved on disk
+65510:C 12 Jun 2024 22:57:16.076 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 22:57:16.155 * Background saving terminated with success
+58896:M 12 Jun 2024 23:02:17.039 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 23:02:17.043 * Background saving started by pid 66374
+66374:C 12 Jun 2024 23:02:17.059 * DB saved on disk
+66374:C 12 Jun 2024 23:02:17.061 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 23:02:17.145 * Background saving terminated with success
+58896:M 12 Jun 2024 23:07:18.063 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 23:07:18.065 * Background saving started by pid 69899
+69899:C 12 Jun 2024 23:07:18.097 * DB saved on disk
+69899:C 12 Jun 2024 23:07:18.097 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 23:07:18.166 * Background saving terminated with success
+58896:M 12 Jun 2024 23:12:19.010 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 23:12:19.013 * Background saving started by pid 72751
+72751:C 12 Jun 2024 23:12:19.025 * DB saved on disk
+72751:C 12 Jun 2024 23:12:19.026 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 23:12:19.114 * Background saving terminated with success
+58896:M 12 Jun 2024 23:17:20.093 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 23:17:20.097 * Background saving started by pid 73511
+73511:C 12 Jun 2024 23:17:20.117 * DB saved on disk
+73511:C 12 Jun 2024 23:17:20.118 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 23:17:20.199 * Background saving terminated with success
+58896:M 12 Jun 2024 23:22:21.096 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 23:22:21.098 * Background saving started by pid 74274
+74274:C 12 Jun 2024 23:22:21.108 * DB saved on disk
+74274:C 12 Jun 2024 23:22:21.115 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 23:22:21.199 * Background saving terminated with success
+58896:M 12 Jun 2024 23:27:22.041 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 23:27:22.044 * Background saving started by pid 75086
+75086:C 12 Jun 2024 23:27:22.055 * DB saved on disk
+75086:C 12 Jun 2024 23:27:22.055 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 23:27:22.145 * Background saving terminated with success
+58896:M 12 Jun 2024 23:32:23.057 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 23:32:23.059 * Background saving started by pid 75910
+75910:C 12 Jun 2024 23:32:23.070 * DB saved on disk
+75910:C 12 Jun 2024 23:32:23.075 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 23:32:23.161 * Background saving terminated with success
+58896:M 12 Jun 2024 23:37:24.000 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 23:37:24.002 * Background saving started by pid 76753
+76753:C 12 Jun 2024 23:37:24.021 * DB saved on disk
+76753:C 12 Jun 2024 23:37:24.026 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 23:37:24.102 * Background saving terminated with success
+58896:M 12 Jun 2024 23:42:25.039 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 23:42:25.041 * Background saving started by pid 77566
+77566:C 12 Jun 2024 23:42:25.051 * DB saved on disk
+77566:C 12 Jun 2024 23:42:25.053 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 23:42:25.142 * Background saving terminated with success
+58896:M 12 Jun 2024 23:47:26.048 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 23:47:26.051 * Background saving started by pid 78392
+78392:C 12 Jun 2024 23:47:26.058 * DB saved on disk
+78392:C 12 Jun 2024 23:47:26.059 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 23:47:26.152 * Background saving terminated with success
+58896:M 12 Jun 2024 23:52:27.035 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 23:52:27.036 * Background saving started by pid 79236
+79236:C 12 Jun 2024 23:52:27.047 * DB saved on disk
+79236:C 12 Jun 2024 23:52:27.051 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 23:52:27.137 * Background saving terminated with success
+58896:M 12 Jun 2024 23:57:28.011 * 100 changes in 300 seconds. Saving...
+58896:M 12 Jun 2024 23:57:28.017 * Background saving started by pid 80013
+80013:C 12 Jun 2024 23:57:28.026 * DB saved on disk
+80013:C 12 Jun 2024 23:57:28.027 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 12 Jun 2024 23:57:28.118 * Background saving terminated with success
+58896:M 13 Jun 2024 00:02:29.041 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 00:02:29.043 * Background saving started by pid 80854
+80854:C 13 Jun 2024 00:02:29.061 * DB saved on disk
+80854:C 13 Jun 2024 00:02:29.061 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 00:02:29.145 * Background saving terminated with success
+58896:M 13 Jun 2024 00:07:30.083 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 00:07:30.088 * Background saving started by pid 81692
+81692:C 13 Jun 2024 00:07:30.100 * DB saved on disk
+81692:C 13 Jun 2024 00:07:30.100 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 00:07:30.190 * Background saving terminated with success
+58896:M 13 Jun 2024 00:12:31.037 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 00:12:31.038 * Background saving started by pid 82630
+82630:C 13 Jun 2024 00:12:31.048 * DB saved on disk
+82630:C 13 Jun 2024 00:12:31.049 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 00:12:31.139 * Background saving terminated with success
+58896:M 13 Jun 2024 00:18:24.490 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 00:18:24.492 * Background saving started by pid 83628
+83628:C 13 Jun 2024 00:18:24.500 * DB saved on disk
+83628:C 13 Jun 2024 00:18:24.501 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 00:18:24.594 * Background saving terminated with success
+58896:M 13 Jun 2024 00:23:25.099 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 00:23:25.101 * Background saving started by pid 84426
+84426:C 13 Jun 2024 00:23:25.116 * DB saved on disk
+84426:C 13 Jun 2024 00:23:25.119 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 00:23:25.203 * Background saving terminated with success
+58896:M 13 Jun 2024 00:28:26.000 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 00:28:26.003 * Background saving started by pid 85490
+85490:C 13 Jun 2024 00:28:26.011 * DB saved on disk
+85490:C 13 Jun 2024 00:28:26.012 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 00:28:26.104 * Background saving terminated with success
+58896:M 13 Jun 2024 00:39:55.401 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 00:39:55.402 * Background saving started by pid 87583
+87583:C 13 Jun 2024 00:39:55.410 * DB saved on disk
+87583:C 13 Jun 2024 00:39:55.410 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 00:39:55.504 * Background saving terminated with success
+58896:M 13 Jun 2024 00:44:56.013 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 00:44:56.017 * Background saving started by pid 88517
+88517:C 13 Jun 2024 00:44:56.027 * DB saved on disk
+88517:C 13 Jun 2024 00:44:56.028 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 00:44:56.118 * Background saving terminated with success
+58896:M 13 Jun 2024 00:49:57.055 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 00:49:57.058 * Background saving started by pid 89534
+89534:C 13 Jun 2024 00:49:57.066 * DB saved on disk
+89534:C 13 Jun 2024 00:49:57.067 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 00:49:57.159 * Background saving terminated with success
+58896:M 13 Jun 2024 00:54:58.054 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 00:54:58.056 * Background saving started by pid 92884
+92884:C 13 Jun 2024 00:54:58.076 * DB saved on disk
+92884:C 13 Jun 2024 00:54:58.079 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 00:54:58.156 * Background saving terminated with success
+58896:M 13 Jun 2024 00:59:59.094 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 00:59:59.096 * Background saving started by pid 97454
+97454:C 13 Jun 2024 00:59:59.105 * DB saved on disk
+97454:C 13 Jun 2024 00:59:59.106 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 00:59:59.197 * Background saving terminated with success
+58896:M 13 Jun 2024 01:05:00.065 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 01:05:00.067 * Background saving started by pid 208
+208:C 13 Jun 2024 01:05:00.075 * DB saved on disk
+208:C 13 Jun 2024 01:05:00.076 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 01:05:00.168 * Background saving terminated with success
+58896:M 13 Jun 2024 01:10:01.004 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 01:10:01.007 * Background saving started by pid 1501
+1501:C 13 Jun 2024 01:10:01.016 * DB saved on disk
+1501:C 13 Jun 2024 01:10:01.017 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 01:10:01.108 * Background saving terminated with success
+58896:M 13 Jun 2024 01:15:02.069 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 01:15:02.073 * Background saving started by pid 8174
+8174:C 13 Jun 2024 01:15:02.087 * DB saved on disk
+8174:C 13 Jun 2024 01:15:02.088 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 01:15:02.176 * Background saving terminated with success
+58896:M 13 Jun 2024 01:20:03.007 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 01:20:03.009 * Background saving started by pid 12151
+12151:C 13 Jun 2024 01:20:03.021 * DB saved on disk
+12151:C 13 Jun 2024 01:20:03.022 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 01:20:03.111 * Background saving terminated with success
+58896:M 13 Jun 2024 01:25:04.011 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 01:25:04.014 * Background saving started by pid 14019
+14019:C 13 Jun 2024 01:25:04.027 * DB saved on disk
+14019:C 13 Jun 2024 01:25:04.028 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 01:25:04.115 * Background saving terminated with success
+58896:M 13 Jun 2024 01:30:05.029 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 01:30:05.032 * Background saving started by pid 15236
+15236:C 13 Jun 2024 01:30:05.048 * DB saved on disk
+15236:C 13 Jun 2024 01:30:05.050 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 01:30:05.134 * Background saving terminated with success
+58896:M 13 Jun 2024 01:46:26.863 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 01:46:26.987 * Background saving started by pid 16271
+16271:C 13 Jun 2024 01:46:26.997 * DB saved on disk
+16271:C 13 Jun 2024 01:46:26.997 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 01:46:27.096 * Background saving terminated with success
+58896:M 13 Jun 2024 02:53:37.566 * 1 changes in 3600 seconds. Saving...
+58896:M 13 Jun 2024 02:53:37.687 * Background saving started by pid 16361
+16361:C 13 Jun 2024 02:53:37.704 * DB saved on disk
+16361:C 13 Jun 2024 02:53:37.704 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 02:53:37.795 * Background saving terminated with success
+58896:M 13 Jun 2024 04:00:22.582 * 1 changes in 3600 seconds. Saving...
+58896:M 13 Jun 2024 04:00:22.694 * Background saving started by pid 16480
+16480:C 13 Jun 2024 04:00:22.703 * DB saved on disk
+16480:C 13 Jun 2024 04:00:22.704 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 04:00:22.807 * Background saving terminated with success
+58896:M 13 Jun 2024 05:06:58.599 * 1 changes in 3600 seconds. Saving...
+58896:M 13 Jun 2024 05:06:58.605 * Background saving started by pid 16586
+16586:C 13 Jun 2024 05:06:58.718 * DB saved on disk
+16586:C 13 Jun 2024 05:06:58.718 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 05:06:58.809 * Background saving terminated with success
+58896:M 13 Jun 2024 06:11:18.604 * 1 changes in 3600 seconds. Saving...
+58896:M 13 Jun 2024 06:11:18.613 * Background saving started by pid 16651
+16651:C 13 Jun 2024 06:11:18.714 * DB saved on disk
+16651:C 13 Jun 2024 06:11:18.746 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 06:11:18.815 * Background saving terminated with success
+58896:M 13 Jun 2024 07:14:47.525 * 1 changes in 3600 seconds. Saving...
+58896:M 13 Jun 2024 07:14:47.528 * Background saving started by pid 17811
+17811:C 13 Jun 2024 07:14:47.679 * DB saved on disk
+17811:C 13 Jun 2024 07:14:47.680 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 07:14:47.730 * Background saving terminated with success
+58896:M 13 Jun 2024 08:23:04.569 * 1 changes in 3600 seconds. Saving...
+58896:M 13 Jun 2024 08:23:04.574 * Background saving started by pid 17919
+17919:C 13 Jun 2024 08:23:04.680 * DB saved on disk
+17919:C 13 Jun 2024 08:23:04.680 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 08:23:04.775 * Background saving terminated with success
+58896:M 13 Jun 2024 09:29:23.578 * 1 changes in 3600 seconds. Saving...
+58896:M 13 Jun 2024 09:29:23.582 * Background saving started by pid 18003
+18003:C 13 Jun 2024 09:29:23.701 * DB saved on disk
+18003:C 13 Jun 2024 09:29:23.701 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 09:29:23.787 * Background saving terminated with success
+58896:M 13 Jun 2024 09:50:20.002 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 09:50:20.002 * Background saving started by pid 18192
+18192:C 13 Jun 2024 09:50:20.013 * DB saved on disk
+18192:C 13 Jun 2024 09:50:20.016 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 09:50:20.103 * Background saving terminated with success
+58896:M 13 Jun 2024 10:26:10.581 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 10:26:10.583 * Background saving started by pid 18369
+18369:C 13 Jun 2024 10:26:10.602 * DB saved on disk
+18369:C 13 Jun 2024 10:26:10.602 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 10:26:10.684 * Background saving terminated with success
+58896:M 13 Jun 2024 10:55:26.571 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 10:55:26.574 * Background saving started by pid 18571
+18571:C 13 Jun 2024 10:55:26.591 * DB saved on disk
+18571:C 13 Jun 2024 10:55:26.591 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 10:55:26.676 * Background saving terminated with success
+58896:M 13 Jun 2024 11:29:14.641 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 11:29:14.644 * Background saving started by pid 18818
+18818:C 13 Jun 2024 11:29:14.664 * DB saved on disk
+18818:C 13 Jun 2024 11:29:14.665 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 11:29:14.746 * Background saving terminated with success
+58896:M 13 Jun 2024 11:34:15.063 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 11:34:15.065 * Background saving started by pid 20038
+20038:C 13 Jun 2024 11:34:15.082 * DB saved on disk
+20038:C 13 Jun 2024 11:34:15.084 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 11:34:15.167 * Background saving terminated with success
+58896:M 13 Jun 2024 11:39:16.050 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 11:39:16.052 * Background saving started by pid 21202
+21202:C 13 Jun 2024 11:39:16.063 * DB saved on disk
+21202:C 13 Jun 2024 11:39:16.064 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 11:39:16.153 * Background saving terminated with success
+58896:M 13 Jun 2024 11:44:17.031 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 11:44:17.033 * Background saving started by pid 22048
+22048:C 13 Jun 2024 11:44:17.047 * DB saved on disk
+22048:C 13 Jun 2024 11:44:17.049 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 11:44:17.135 * Background saving terminated with success
+58896:M 13 Jun 2024 11:49:18.051 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 11:49:18.056 * Background saving started by pid 22821
+22821:C 13 Jun 2024 11:49:18.073 * DB saved on disk
+22821:C 13 Jun 2024 11:49:18.077 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 11:49:18.159 * Background saving terminated with success
+58896:M 13 Jun 2024 11:54:19.040 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 11:54:19.043 * Background saving started by pid 23590
+23590:C 13 Jun 2024 11:54:19.061 * DB saved on disk
+23590:C 13 Jun 2024 11:54:19.062 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 11:54:19.145 * Background saving terminated with success
+58896:M 13 Jun 2024 11:59:20.034 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 11:59:20.038 * Background saving started by pid 24369
+24369:C 13 Jun 2024 11:59:20.055 * DB saved on disk
+24369:C 13 Jun 2024 11:59:20.056 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 11:59:20.141 * Background saving terminated with success
+58896:M 13 Jun 2024 12:04:21.013 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 12:04:21.015 * Background saving started by pid 25145
+25145:C 13 Jun 2024 12:04:21.033 * DB saved on disk
+25145:C 13 Jun 2024 12:04:21.034 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 12:04:21.117 * Background saving terminated with success
+58896:M 13 Jun 2024 12:09:22.032 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 12:09:22.035 * Background saving started by pid 25919
+25919:C 13 Jun 2024 12:09:22.049 * DB saved on disk
+25919:C 13 Jun 2024 12:09:22.052 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 12:09:22.138 * Background saving terminated with success
+58896:M 13 Jun 2024 12:14:23.054 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 12:14:23.058 * Background saving started by pid 26708
+26708:C 13 Jun 2024 12:14:23.070 * DB saved on disk
+26708:C 13 Jun 2024 12:14:23.089 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 12:14:23.160 * Background saving terminated with success
+58896:M 13 Jun 2024 12:22:08.623 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 12:22:08.625 * Background saving started by pid 26869
+26869:C 13 Jun 2024 12:22:08.638 * DB saved on disk
+26869:C 13 Jun 2024 12:22:08.640 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 12:22:08.727 * Background saving terminated with success
+58896:M 13 Jun 2024 13:22:56.523 * 1 changes in 3600 seconds. Saving...
+58896:M 13 Jun 2024 13:22:56.527 * Background saving started by pid 26934
+26934:C 13 Jun 2024 13:22:56.637 * DB saved on disk
+26934:C 13 Jun 2024 13:22:56.638 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 13:22:56.730 * Background saving terminated with success
+58896:M 13 Jun 2024 13:54:40.777 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 13:54:40.779 * Background saving started by pid 27164
+27164:C 13 Jun 2024 13:54:40.788 * DB saved on disk
+27164:C 13 Jun 2024 13:54:40.789 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 13:54:40.881 * Background saving terminated with success
+58896:M 13 Jun 2024 13:59:41.086 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 13:59:41.091 * Background saving started by pid 28401
+28401:C 13 Jun 2024 13:59:41.125 * DB saved on disk
+28401:C 13 Jun 2024 13:59:41.125 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 13:59:41.193 * Background saving terminated with success
+58896:M 13 Jun 2024 14:04:42.086 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 14:04:42.089 * Background saving started by pid 29364
+29364:C 13 Jun 2024 14:04:42.106 * DB saved on disk
+29364:C 13 Jun 2024 14:04:42.108 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 14:04:42.191 * Background saving terminated with success
+58896:M 13 Jun 2024 14:09:43.039 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 14:09:43.040 * Background saving started by pid 30343
+30343:C 13 Jun 2024 14:09:43.050 * DB saved on disk
+30343:C 13 Jun 2024 14:09:43.051 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 14:09:43.141 * Background saving terminated with success
+58896:M 13 Jun 2024 14:14:44.095 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 14:14:44.098 * Background saving started by pid 31113
+31113:C 13 Jun 2024 14:14:44.106 * DB saved on disk
+31113:C 13 Jun 2024 14:14:44.107 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 14:14:44.199 * Background saving terminated with success
+58896:M 13 Jun 2024 14:19:45.073 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 14:19:45.077 * Background saving started by pid 31876
+31876:C 13 Jun 2024 14:19:45.105 * DB saved on disk
+31876:C 13 Jun 2024 14:19:45.106 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 14:19:45.181 * Background saving terminated with success
+58896:M 13 Jun 2024 14:24:46.099 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 14:24:46.103 * Background saving started by pid 32589
+32589:C 13 Jun 2024 14:24:46.117 * DB saved on disk
+32589:C 13 Jun 2024 14:24:46.119 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 14:24:46.204 * Background saving terminated with success
+58896:M 13 Jun 2024 14:29:47.029 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 14:29:47.035 * Background saving started by pid 33317
+33317:C 13 Jun 2024 14:29:47.050 * DB saved on disk
+33317:C 13 Jun 2024 14:29:47.051 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 14:29:47.137 * Background saving terminated with success
+58896:M 13 Jun 2024 14:34:48.042 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 14:34:48.044 * Background saving started by pid 34025
+34025:C 13 Jun 2024 14:34:48.057 * DB saved on disk
+34025:C 13 Jun 2024 14:34:48.060 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 14:34:48.147 * Background saving terminated with success
+58896:M 13 Jun 2024 14:39:49.001 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 14:39:49.002 * Background saving started by pid 34740
+34740:C 13 Jun 2024 14:39:49.011 * DB saved on disk
+34740:C 13 Jun 2024 14:39:49.011 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 14:39:49.104 * Background saving terminated with success
+58896:M 13 Jun 2024 14:44:50.083 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 14:44:50.086 * Background saving started by pid 35603
+35603:C 13 Jun 2024 14:44:50.098 * DB saved on disk
+35603:C 13 Jun 2024 14:44:50.101 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 14:44:50.188 * Background saving terminated with success
+58896:M 13 Jun 2024 14:49:51.042 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 14:49:51.045 * Background saving started by pid 36239
+36239:C 13 Jun 2024 14:49:51.057 * DB saved on disk
+36239:C 13 Jun 2024 14:49:51.059 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 14:49:51.146 * Background saving terminated with success
+58896:M 13 Jun 2024 14:54:52.029 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 14:54:52.032 * Background saving started by pid 36857
+36857:C 13 Jun 2024 14:54:52.066 * DB saved on disk
+36857:C 13 Jun 2024 14:54:52.067 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 14:54:52.134 * Background saving terminated with success
+58896:M 13 Jun 2024 14:59:53.033 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 14:59:53.036 * Background saving started by pid 37538
+37538:C 13 Jun 2024 14:59:53.051 * DB saved on disk
+37538:C 13 Jun 2024 14:59:53.052 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 14:59:53.138 * Background saving terminated with success
+58896:M 13 Jun 2024 15:04:54.044 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 15:04:54.046 * Background saving started by pid 38152
+38152:C 13 Jun 2024 15:04:54.067 * DB saved on disk
+38152:C 13 Jun 2024 15:04:54.069 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 15:04:54.148 * Background saving terminated with success
+58896:M 13 Jun 2024 15:09:55.087 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 15:09:55.092 * Background saving started by pid 38789
+38789:C 13 Jun 2024 15:09:55.104 * DB saved on disk
+38789:C 13 Jun 2024 15:09:55.104 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 15:09:55.196 * Background saving terminated with success
+58896:M 13 Jun 2024 15:14:56.035 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 15:14:56.037 * Background saving started by pid 39411
+39411:C 13 Jun 2024 15:14:56.050 * DB saved on disk
+39411:C 13 Jun 2024 15:14:56.051 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 15:14:56.138 * Background saving terminated with success
+58896:M 13 Jun 2024 15:19:57.033 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 15:19:57.034 * Background saving started by pid 40025
+40025:C 13 Jun 2024 15:19:57.043 * DB saved on disk
+40025:C 13 Jun 2024 15:19:57.044 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 15:19:57.135 * Background saving terminated with success
+58896:M 13 Jun 2024 15:24:58.059 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 15:24:58.061 * Background saving started by pid 40708
+40708:C 13 Jun 2024 15:24:58.082 * DB saved on disk
+40708:C 13 Jun 2024 15:24:58.082 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 15:24:58.162 * Background saving terminated with success
+58896:M 13 Jun 2024 15:29:59.058 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 15:29:59.061 * Background saving started by pid 41970
+41970:C 13 Jun 2024 15:29:59.074 * DB saved on disk
+41970:C 13 Jun 2024 15:29:59.076 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 15:29:59.162 * Background saving terminated with success
+58896:M 13 Jun 2024 15:35:00.020 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 15:35:00.023 * Background saving started by pid 46158
+46158:C 13 Jun 2024 15:35:00.046 * DB saved on disk
+46158:C 13 Jun 2024 15:35:00.047 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 15:35:00.124 * Background saving terminated with success
+58896:M 13 Jun 2024 15:40:01.046 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 15:40:01.049 * Background saving started by pid 52688
+52688:C 13 Jun 2024 15:40:01.069 * DB saved on disk
+52688:C 13 Jun 2024 15:40:01.069 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 15:40:01.150 * Background saving terminated with success
+58896:M 13 Jun 2024 15:45:53.815 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 15:45:53.818 * Background saving started by pid 53374
+53374:C 13 Jun 2024 15:45:53.840 * DB saved on disk
+53374:C 13 Jun 2024 15:45:53.840 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 15:45:53.919 * Background saving terminated with success
+58896:M 13 Jun 2024 15:50:54.050 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 15:50:54.052 * Background saving started by pid 54742
+54742:C 13 Jun 2024 15:50:54.072 * DB saved on disk
+54742:C 13 Jun 2024 15:50:54.072 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 15:50:54.153 * Background saving terminated with success
+58896:M 13 Jun 2024 15:55:55.100 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 15:55:55.103 * Background saving started by pid 55431
+55431:C 13 Jun 2024 15:55:55.114 * DB saved on disk
+55431:C 13 Jun 2024 15:55:55.114 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 15:55:55.204 * Background saving terminated with success
+58896:M 13 Jun 2024 16:00:56.039 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 16:00:56.042 * Background saving started by pid 56109
+56109:C 13 Jun 2024 16:00:56.051 * DB saved on disk
+56109:C 13 Jun 2024 16:00:56.052 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 16:00:56.144 * Background saving terminated with success
+58896:M 13 Jun 2024 16:05:57.027 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 16:05:57.029 * Background saving started by pid 56794
+56794:C 13 Jun 2024 16:05:57.044 * DB saved on disk
+56794:C 13 Jun 2024 16:05:57.045 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 16:05:57.130 * Background saving terminated with success
+58896:M 13 Jun 2024 16:10:58.007 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 16:10:58.010 * Background saving started by pid 57424
+57424:C 13 Jun 2024 16:10:58.022 * DB saved on disk
+57424:C 13 Jun 2024 16:10:58.026 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 16:10:58.111 * Background saving terminated with success
+58896:M 13 Jun 2024 16:15:59.064 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 16:15:59.067 * Background saving started by pid 58114
+58114:C 13 Jun 2024 16:15:59.076 * DB saved on disk
+58114:C 13 Jun 2024 16:15:59.077 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 16:15:59.168 * Background saving terminated with success
+58896:M 13 Jun 2024 16:21:00.080 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 16:21:00.081 * Background saving started by pid 58865
+58865:C 13 Jun 2024 16:21:00.090 * DB saved on disk
+58865:C 13 Jun 2024 16:21:00.091 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 16:21:00.182 * Background saving terminated with success
+58896:M 13 Jun 2024 16:26:01.042 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 16:26:01.045 * Background saving started by pid 59640
+59640:C 13 Jun 2024 16:26:01.059 * DB saved on disk
+59640:C 13 Jun 2024 16:26:01.060 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 16:26:01.145 * Background saving terminated with success
+58896:M 13 Jun 2024 16:31:02.024 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 16:31:02.026 * Background saving started by pid 60306
+60306:C 13 Jun 2024 16:31:02.037 * DB saved on disk
+60306:C 13 Jun 2024 16:31:02.038 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 16:31:02.127 * Background saving terminated with success
+58896:M 13 Jun 2024 16:36:03.023 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 16:36:03.027 * Background saving started by pid 60889
+60889:C 13 Jun 2024 16:36:03.056 * DB saved on disk
+60889:C 13 Jun 2024 16:36:03.056 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 16:36:03.128 * Background saving terminated with success
+58896:M 13 Jun 2024 16:41:04.062 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 16:41:04.065 * Background saving started by pid 61499
+61499:C 13 Jun 2024 16:41:04.084 * DB saved on disk
+61499:C 13 Jun 2024 16:41:04.085 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 16:41:04.167 * Background saving terminated with success
+58896:M 13 Jun 2024 16:46:05.062 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 16:46:05.064 * Background saving started by pid 62153
+62153:C 13 Jun 2024 16:46:05.078 * DB saved on disk
+62153:C 13 Jun 2024 16:46:05.079 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 16:46:05.165 * Background saving terminated with success
+58896:M 13 Jun 2024 16:51:06.057 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 16:51:06.058 * Background saving started by pid 62839
+62839:C 13 Jun 2024 16:51:06.067 * DB saved on disk
+62839:C 13 Jun 2024 16:51:06.069 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 16:51:06.159 * Background saving terminated with success
+58896:M 13 Jun 2024 16:56:07.041 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 16:56:07.044 * Background saving started by pid 63527
+63527:C 13 Jun 2024 16:56:07.063 * DB saved on disk
+63527:C 13 Jun 2024 16:56:07.064 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 16:56:07.144 * Background saving terminated with success
+58896:M 13 Jun 2024 17:01:08.073 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 17:01:08.074 * Background saving started by pid 64189
+64189:C 13 Jun 2024 17:01:08.088 * DB saved on disk
+64189:C 13 Jun 2024 17:01:08.089 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 17:01:08.175 * Background saving terminated with success
+58896:M 13 Jun 2024 17:06:09.002 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 17:06:09.003 * Background saving started by pid 64802
+64802:C 13 Jun 2024 17:06:09.018 * DB saved on disk
+64802:C 13 Jun 2024 17:06:09.021 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 17:06:09.104 * Background saving terminated with success
+58896:M 13 Jun 2024 17:11:10.042 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 17:11:10.046 * Background saving started by pid 65468
+65468:C 13 Jun 2024 17:11:10.069 * DB saved on disk
+65468:C 13 Jun 2024 17:11:10.071 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 17:11:10.148 * Background saving terminated with success
+58896:M 13 Jun 2024 17:16:11.098 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 17:16:11.099 * Background saving started by pid 66110
+66110:C 13 Jun 2024 17:16:11.108 * DB saved on disk
+66110:C 13 Jun 2024 17:16:11.109 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 17:16:11.201 * Background saving terminated with success
+58896:M 13 Jun 2024 17:21:12.003 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 17:21:12.006 * Background saving started by pid 66847
+66847:C 13 Jun 2024 17:21:12.018 * DB saved on disk
+66847:C 13 Jun 2024 17:21:12.019 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 17:21:12.111 * Background saving terminated with success
+58896:M 13 Jun 2024 17:26:13.038 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 17:26:13.041 * Background saving started by pid 67587
+67587:C 13 Jun 2024 17:26:13.051 * DB saved on disk
+67587:C 13 Jun 2024 17:26:13.052 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 17:26:13.142 * Background saving terminated with success
+58896:M 13 Jun 2024 17:31:14.027 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 17:31:14.028 * Background saving started by pid 68487
+68487:C 13 Jun 2024 17:31:14.036 * DB saved on disk
+68487:C 13 Jun 2024 17:31:14.038 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 17:31:14.130 * Background saving terminated with success
+58896:M 13 Jun 2024 17:36:15.062 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 17:36:15.064 * Background saving started by pid 69205
+69205:C 13 Jun 2024 17:36:15.071 * DB saved on disk
+69205:C 13 Jun 2024 17:36:15.072 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 17:36:15.174 * Background saving terminated with success
+58896:M 13 Jun 2024 17:41:16.100 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 17:41:16.101 * Background saving started by pid 70157
+70157:C 13 Jun 2024 17:41:16.117 * DB saved on disk
+70157:C 13 Jun 2024 17:41:16.118 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 17:41:16.203 * Background saving terminated with success
+58896:M 13 Jun 2024 17:46:17.062 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 17:46:17.064 * Background saving started by pid 70947
+70947:C 13 Jun 2024 17:46:17.078 * DB saved on disk
+70947:C 13 Jun 2024 17:46:17.080 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 17:46:17.165 * Background saving terminated with success
+58896:M 13 Jun 2024 17:51:18.076 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 17:51:18.077 * Background saving started by pid 71675
+71675:C 13 Jun 2024 17:51:18.086 * DB saved on disk
+71675:C 13 Jun 2024 17:51:18.090 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 17:51:18.179 * Background saving terminated with success
+58896:M 13 Jun 2024 17:56:19.041 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 17:56:19.042 * Background saving started by pid 72514
+72514:C 13 Jun 2024 17:56:19.052 * DB saved on disk
+72514:C 13 Jun 2024 17:56:19.053 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 17:56:19.144 * Background saving terminated with success
+58896:M 13 Jun 2024 18:01:20.068 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 18:01:20.069 * Background saving started by pid 73367
+73367:C 13 Jun 2024 18:01:20.084 * DB saved on disk
+73367:C 13 Jun 2024 18:01:20.085 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 18:01:20.171 * Background saving terminated with success
+58896:M 13 Jun 2024 18:06:21.045 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 18:06:21.048 * Background saving started by pid 74221
+74221:C 13 Jun 2024 18:06:21.057 * DB saved on disk
+74221:C 13 Jun 2024 18:06:21.062 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 18:06:21.148 * Background saving terminated with success
+58896:M 13 Jun 2024 18:11:22.084 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 18:11:22.088 * Background saving started by pid 75044
+75044:C 13 Jun 2024 18:11:22.111 * DB saved on disk
+75044:C 13 Jun 2024 18:11:22.111 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 18:11:22.189 * Background saving terminated with success
+58896:M 13 Jun 2024 18:16:23.041 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 18:16:23.042 * Background saving started by pid 76027
+76027:C 13 Jun 2024 18:16:23.057 * DB saved on disk
+76027:C 13 Jun 2024 18:16:23.058 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 18:16:23.143 * Background saving terminated with success
+58896:M 13 Jun 2024 18:21:24.069 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 18:21:24.073 * Background saving started by pid 77094
+77094:C 13 Jun 2024 18:21:24.084 * DB saved on disk
+77094:C 13 Jun 2024 18:21:24.086 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 18:21:24.175 * Background saving terminated with success
+58896:M 13 Jun 2024 18:26:25.068 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 18:26:25.073 * Background saving started by pid 78140
+78140:C 13 Jun 2024 18:26:25.088 * DB saved on disk
+78140:C 13 Jun 2024 18:26:25.091 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 18:26:25.174 * Background saving terminated with success
+58896:M 13 Jun 2024 18:31:26.038 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 18:31:26.041 * Background saving started by pid 79121
+79121:C 13 Jun 2024 18:31:26.053 * DB saved on disk
+79121:C 13 Jun 2024 18:31:26.053 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 18:31:26.142 * Background saving terminated with success
+58896:M 13 Jun 2024 18:36:27.010 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 18:36:27.013 * Background saving started by pid 80053
+80053:C 13 Jun 2024 18:36:27.025 * DB saved on disk
+80053:C 13 Jun 2024 18:36:27.026 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 18:36:27.115 * Background saving terminated with success
+58896:M 13 Jun 2024 18:41:28.100 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 18:41:28.101 * Background saving started by pid 81033
+81033:C 13 Jun 2024 18:41:28.110 * DB saved on disk
+81033:C 13 Jun 2024 18:41:28.114 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 18:41:28.203 * Background saving terminated with success
+58896:M 13 Jun 2024 18:46:29.056 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 18:46:29.058 * Background saving started by pid 82024
+82024:C 13 Jun 2024 18:46:29.068 * DB saved on disk
+82024:C 13 Jun 2024 18:46:29.069 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 18:46:29.159 * Background saving terminated with success
+58896:M 13 Jun 2024 18:51:30.025 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 18:51:30.026 * Background saving started by pid 83031
+83031:C 13 Jun 2024 18:51:30.040 * DB saved on disk
+83031:C 13 Jun 2024 18:51:30.041 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 18:51:30.128 * Background saving terminated with success
+58896:M 13 Jun 2024 18:56:31.028 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 18:56:31.032 * Background saving started by pid 84257
+84257:C 13 Jun 2024 18:56:31.061 * DB saved on disk
+84257:C 13 Jun 2024 18:56:31.061 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 18:56:31.133 * Background saving terminated with success
+58896:M 13 Jun 2024 19:01:32.042 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 19:01:32.045 * Background saving started by pid 85971
+85971:C 13 Jun 2024 19:01:32.057 * DB saved on disk
+85971:C 13 Jun 2024 19:01:32.058 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 19:01:32.146 * Background saving terminated with success
+58896:M 13 Jun 2024 19:06:33.000 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 19:06:33.004 * Background saving started by pid 87445
+87445:C 13 Jun 2024 19:06:33.015 * DB saved on disk
+87445:C 13 Jun 2024 19:06:33.016 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 19:06:33.106 * Background saving terminated with success
+58896:M 13 Jun 2024 19:24:03.583 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 19:24:03.704 * Background saving started by pid 88482
+88482:C 13 Jun 2024 19:24:03.712 * DB saved on disk
+88482:C 13 Jun 2024 19:24:03.715 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 19:24:03.820 * Background saving terminated with success
+58896:M 13 Jun 2024 19:29:04.018 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 19:29:04.020 * Background saving started by pid 89605
+89605:C 13 Jun 2024 19:29:04.035 * DB saved on disk
+89605:C 13 Jun 2024 19:29:04.037 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 19:29:04.122 * Background saving terminated with success
+58896:M 13 Jun 2024 19:34:05.068 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 19:34:05.071 * Background saving started by pid 90623
+90623:C 13 Jun 2024 19:34:05.084 * DB saved on disk
+90623:C 13 Jun 2024 19:34:05.085 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 19:34:05.172 * Background saving terminated with success
+58896:M 13 Jun 2024 19:39:06.039 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 19:39:06.040 * Background saving started by pid 91649
+91649:C 13 Jun 2024 19:39:06.050 * DB saved on disk
+91649:C 13 Jun 2024 19:39:06.051 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 19:39:06.141 * Background saving terminated with success
+58896:M 13 Jun 2024 19:44:07.052 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 19:44:07.053 * Background saving started by pid 92499
+92499:C 13 Jun 2024 19:44:07.066 * DB saved on disk
+92499:C 13 Jun 2024 19:44:07.067 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 19:44:07.154 * Background saving terminated with success
+58896:M 13 Jun 2024 19:49:08.026 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 19:49:08.029 * Background saving started by pid 93255
+93255:C 13 Jun 2024 19:49:08.047 * DB saved on disk
+93255:C 13 Jun 2024 19:49:08.048 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 19:49:08.130 * Background saving terminated with success
+58896:M 13 Jun 2024 20:09:23.789 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 20:09:23.790 * Background saving started by pid 93455
+93455:C 13 Jun 2024 20:09:23.800 * DB saved on disk
+93455:C 13 Jun 2024 20:09:23.802 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 20:09:23.891 * Background saving terminated with success
+58896:M 13 Jun 2024 21:00:24.763 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 21:00:24.764 * Background saving started by pid 93545
+93545:C 13 Jun 2024 21:00:24.776 * DB saved on disk
+93545:C 13 Jun 2024 21:00:24.777 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 21:00:24.867 * Background saving terminated with success
+58896:M 13 Jun 2024 21:09:07.599 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 21:09:07.601 * Background saving started by pid 93647
+93647:C 13 Jun 2024 21:09:07.614 * DB saved on disk
+93647:C 13 Jun 2024 21:09:07.616 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 21:09:07.702 * Background saving terminated with success
+58896:M 13 Jun 2024 22:15:01.689 * 1 changes in 3600 seconds. Saving...
+58896:M 13 Jun 2024 22:15:01.693 * Background saving started by pid 93707
+93707:C 13 Jun 2024 22:15:01.819 * DB saved on disk
+93707:C 13 Jun 2024 22:15:01.820 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 22:15:01.896 * Background saving terminated with success
+58896:M 13 Jun 2024 22:28:49.316 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 22:28:49.319 * Background saving started by pid 93877
+93877:C 13 Jun 2024 22:28:49.333 * DB saved on disk
+93877:C 13 Jun 2024 22:28:49.334 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 22:28:49.421 * Background saving terminated with success
+58896:M 13 Jun 2024 23:38:03.790 * 1 changes in 3600 seconds. Saving...
+58896:M 13 Jun 2024 23:38:03.794 * Background saving started by pid 94043
+94043:C 13 Jun 2024 23:38:03.905 * DB saved on disk
+94043:C 13 Jun 2024 23:38:03.908 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 23:38:03.997 * Background saving terminated with success
+58896:M 13 Jun 2024 23:43:04.085 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 23:43:04.087 * Background saving started by pid 94614
+94614:C 13 Jun 2024 23:43:04.094 * DB saved on disk
+94614:C 13 Jun 2024 23:43:04.095 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 23:43:04.189 * Background saving terminated with success
+58896:M 13 Jun 2024 23:48:05.057 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 23:48:05.063 * Background saving started by pid 95362
+95362:C 13 Jun 2024 23:48:05.086 * DB saved on disk
+95362:C 13 Jun 2024 23:48:05.087 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 23:48:05.165 * Background saving terminated with success
+58896:M 13 Jun 2024 23:53:06.041 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 23:53:06.042 * Background saving started by pid 96187
+96187:C 13 Jun 2024 23:53:06.060 * DB saved on disk
+96187:C 13 Jun 2024 23:53:06.061 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 23:53:06.143 * Background saving terminated with success
+58896:M 13 Jun 2024 23:58:07.014 * 100 changes in 300 seconds. Saving...
+58896:M 13 Jun 2024 23:58:07.017 * Background saving started by pid 97027
+97027:C 13 Jun 2024 23:58:07.030 * DB saved on disk
+97027:C 13 Jun 2024 23:58:07.030 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 13 Jun 2024 23:58:07.118 * Background saving terminated with success
+58896:M 14 Jun 2024 00:03:08.062 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 00:03:08.067 * Background saving started by pid 98055
+98055:C 14 Jun 2024 00:03:08.076 * DB saved on disk
+98055:C 14 Jun 2024 00:03:08.076 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 00:03:08.169 * Background saving terminated with success
+58896:M 14 Jun 2024 00:08:09.046 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 00:08:09.048 * Background saving started by pid 906
+906:C 14 Jun 2024 00:08:09.066 * DB saved on disk
+906:C 14 Jun 2024 00:08:09.068 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 00:08:09.149 * Background saving terminated with success
+58896:M 14 Jun 2024 00:13:10.017 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 00:13:10.021 * Background saving started by pid 2698
+2698:C 14 Jun 2024 00:13:10.034 * DB saved on disk
+2698:C 14 Jun 2024 00:13:10.038 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 00:13:10.122 * Background saving terminated with success
+58896:M 14 Jun 2024 00:18:11.011 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 00:18:11.013 * Background saving started by pid 3689
+3689:C 14 Jun 2024 00:18:11.022 * DB saved on disk
+3689:C 14 Jun 2024 00:18:11.023 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 00:18:11.114 * Background saving terminated with success
+58896:M 14 Jun 2024 00:23:12.093 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 00:23:12.094 * Background saving started by pid 4450
+4450:C 14 Jun 2024 00:23:12.105 * DB saved on disk
+4450:C 14 Jun 2024 00:23:12.105 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 00:23:12.196 * Background saving terminated with success
+58896:M 14 Jun 2024 00:28:13.053 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 00:28:13.055 * Background saving started by pid 5238
+5238:C 14 Jun 2024 00:28:13.065 * DB saved on disk
+5238:C 14 Jun 2024 00:28:13.065 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 00:28:13.156 * Background saving terminated with success
+58896:M 14 Jun 2024 00:33:14.055 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 00:33:14.057 * Background saving started by pid 6001
+6001:C 14 Jun 2024 00:33:14.066 * DB saved on disk
+6001:C 14 Jun 2024 00:33:14.067 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 00:33:14.158 * Background saving terminated with success
+58896:M 14 Jun 2024 00:38:15.023 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 00:38:15.025 * Background saving started by pid 6720
+6720:C 14 Jun 2024 00:38:15.034 * DB saved on disk
+6720:C 14 Jun 2024 00:38:15.037 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 00:38:15.126 * Background saving terminated with success
+58896:M 14 Jun 2024 00:43:16.074 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 00:43:16.077 * Background saving started by pid 7544
+7544:C 14 Jun 2024 00:43:16.090 * DB saved on disk
+7544:C 14 Jun 2024 00:43:16.091 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 00:43:16.179 * Background saving terminated with success
+58896:M 14 Jun 2024 00:48:17.008 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 00:48:17.009 * Background saving started by pid 8362
+8362:C 14 Jun 2024 00:48:17.018 * DB saved on disk
+8362:C 14 Jun 2024 00:48:17.020 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 00:48:17.110 * Background saving terminated with success
+58896:M 14 Jun 2024 00:53:18.083 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 00:53:18.086 * Background saving started by pid 9627
+9627:C 14 Jun 2024 00:53:18.094 * DB saved on disk
+9627:C 14 Jun 2024 00:53:18.096 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 00:53:18.187 * Background saving terminated with success
+58896:M 14 Jun 2024 00:58:19.056 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 00:58:19.058 * Background saving started by pid 10541
+10541:C 14 Jun 2024 00:58:19.068 * DB saved on disk
+10541:C 14 Jun 2024 00:58:19.069 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 00:58:19.160 * Background saving terminated with success
+58896:M 14 Jun 2024 01:03:20.085 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 01:03:20.088 * Background saving started by pid 11472
+11472:C 14 Jun 2024 01:03:20.099 * DB saved on disk
+11472:C 14 Jun 2024 01:03:20.112 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 01:03:20.189 * Background saving terminated with success
+58896:M 14 Jun 2024 01:08:21.081 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 01:08:21.083 * Background saving started by pid 12462
+12462:C 14 Jun 2024 01:08:21.094 * DB saved on disk
+12462:C 14 Jun 2024 01:08:21.095 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 01:08:21.184 * Background saving terminated with success
+58896:M 14 Jun 2024 01:13:22.058 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 01:13:22.059 * Background saving started by pid 13399
+13399:C 14 Jun 2024 01:13:22.069 * DB saved on disk
+13399:C 14 Jun 2024 01:13:22.071 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 01:13:22.161 * Background saving terminated with success
+58896:M 14 Jun 2024 01:18:23.041 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 01:18:23.042 * Background saving started by pid 14393
+14393:C 14 Jun 2024 01:18:23.054 * DB saved on disk
+14393:C 14 Jun 2024 01:18:23.055 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 01:18:23.142 * Background saving terminated with success
+58896:M 14 Jun 2024 01:23:24.029 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 01:23:24.031 * Background saving started by pid 15307
+15307:C 14 Jun 2024 01:23:24.047 * DB saved on disk
+15307:C 14 Jun 2024 01:23:24.048 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 01:23:24.132 * Background saving terminated with success
+58896:M 14 Jun 2024 01:28:25.054 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 01:28:25.057 * Background saving started by pid 16245
+16245:C 14 Jun 2024 01:28:25.067 * DB saved on disk
+16245:C 14 Jun 2024 01:28:25.068 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 01:28:25.158 * Background saving terminated with success
+58896:M 14 Jun 2024 01:33:26.047 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 01:33:26.051 * Background saving started by pid 17193
+17193:C 14 Jun 2024 01:33:26.081 * DB saved on disk
+17193:C 14 Jun 2024 01:33:26.081 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 01:33:26.153 * Background saving terminated with success
+58896:M 14 Jun 2024 01:38:27.082 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 01:38:27.084 * Background saving started by pid 18123
+18123:C 14 Jun 2024 01:38:27.100 * DB saved on disk
+18123:C 14 Jun 2024 01:38:27.101 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 01:38:27.185 * Background saving terminated with success
+58896:M 14 Jun 2024 01:43:28.063 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 01:43:28.065 * Background saving started by pid 20679
+20679:C 14 Jun 2024 01:43:28.076 * DB saved on disk
+20679:C 14 Jun 2024 01:43:28.080 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 01:43:28.172 * Background saving terminated with success
+58896:M 14 Jun 2024 01:48:29.019 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 01:48:29.024 * Background saving started by pid 21789
+21789:C 14 Jun 2024 01:48:29.033 * DB saved on disk
+21789:C 14 Jun 2024 01:48:29.035 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 01:48:29.125 * Background saving terminated with success
+58896:M 14 Jun 2024 01:53:30.004 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 01:53:30.005 * Background saving started by pid 22821
+22821:C 14 Jun 2024 01:53:30.021 * DB saved on disk
+22821:C 14 Jun 2024 01:53:30.023 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 01:53:30.108 * Background saving terminated with success
+58896:M 14 Jun 2024 01:58:31.072 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 01:58:31.072 * Background saving started by pid 23761
+23761:C 14 Jun 2024 01:58:31.081 * DB saved on disk
+23761:C 14 Jun 2024 01:58:31.082 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 01:58:31.175 * Background saving terminated with success
+58896:M 14 Jun 2024 02:03:32.084 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 02:03:32.087 * Background saving started by pid 24691
+24691:C 14 Jun 2024 02:03:32.112 * DB saved on disk
+24691:C 14 Jun 2024 02:03:32.115 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 02:03:32.188 * Background saving terminated with success
+58896:M 14 Jun 2024 02:08:33.012 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 02:08:33.013 * Background saving started by pid 25657
+25657:C 14 Jun 2024 02:08:33.023 * DB saved on disk
+25657:C 14 Jun 2024 02:08:33.024 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 02:08:33.116 * Background saving terminated with success
+58896:M 14 Jun 2024 02:13:34.062 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 02:13:34.065 * Background saving started by pid 26619
+26619:C 14 Jun 2024 02:13:34.082 * DB saved on disk
+26619:C 14 Jun 2024 02:13:34.084 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 02:13:34.168 * Background saving terminated with success
+58896:M 14 Jun 2024 02:18:35.092 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 02:18:35.095 * Background saving started by pid 27572
+27572:C 14 Jun 2024 02:18:35.110 * DB saved on disk
+27572:C 14 Jun 2024 02:18:35.112 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 02:18:35.197 * Background saving terminated with success
+58896:M 14 Jun 2024 02:23:36.006 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 02:23:36.010 * Background saving started by pid 28517
+28517:C 14 Jun 2024 02:23:36.030 * DB saved on disk
+28517:C 14 Jun 2024 02:23:36.031 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 02:23:36.111 * Background saving terminated with success
+58896:M 14 Jun 2024 02:28:37.011 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 02:28:37.014 * Background saving started by pid 29471
+29471:C 14 Jun 2024 02:28:37.025 * DB saved on disk
+29471:C 14 Jun 2024 02:28:37.025 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 02:28:37.115 * Background saving terminated with success
+58896:M 14 Jun 2024 02:33:38.046 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 02:33:38.050 * Background saving started by pid 30459
+30459:C 14 Jun 2024 02:33:38.067 * DB saved on disk
+30459:C 14 Jun 2024 02:33:38.068 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 02:33:38.154 * Background saving terminated with success
+58896:M 14 Jun 2024 02:38:39.080 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 02:38:39.086 * Background saving started by pid 31408
+31408:C 14 Jun 2024 02:38:39.107 * DB saved on disk
+31408:C 14 Jun 2024 02:38:39.108 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 02:38:39.188 * Background saving terminated with success
+58896:M 14 Jun 2024 02:43:40.090 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 02:43:40.093 * Background saving started by pid 32516
+32516:C 14 Jun 2024 02:43:40.103 * DB saved on disk
+32516:C 14 Jun 2024 02:43:40.103 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 02:43:40.195 * Background saving terminated with success
+58896:M 14 Jun 2024 02:48:41.094 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 02:48:41.098 * Background saving started by pid 33530
+33530:C 14 Jun 2024 02:48:41.110 * DB saved on disk
+33530:C 14 Jun 2024 02:48:41.113 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 02:48:41.200 * Background saving terminated with success
+58896:M 14 Jun 2024 02:53:42.065 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 02:53:42.068 * Background saving started by pid 34481
+34481:C 14 Jun 2024 02:53:42.097 * DB saved on disk
+34481:C 14 Jun 2024 02:53:42.098 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 02:53:42.170 * Background saving terminated with success
+58896:M 14 Jun 2024 03:02:19.016 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 03:02:19.020 * Background saving started by pid 35216
+35216:C 14 Jun 2024 03:02:19.144 * DB saved on disk
+35216:C 14 Jun 2024 03:02:19.144 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 03:02:19.223 * Background saving terminated with success
+58896:M 14 Jun 2024 04:09:00.287 * 1 changes in 3600 seconds. Saving...
+58896:M 14 Jun 2024 04:09:00.391 * Background saving started by pid 35351
+35351:C 14 Jun 2024 04:09:00.406 * DB saved on disk
+35351:C 14 Jun 2024 04:09:00.406 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 04:09:00.491 * Background saving terminated with success
+58896:M 14 Jun 2024 05:13:19.442 * 1 changes in 3600 seconds. Saving...
+58896:M 14 Jun 2024 05:13:19.559 * Background saving started by pid 35460
+35460:C 14 Jun 2024 05:13:19.566 * DB saved on disk
+35460:C 14 Jun 2024 05:13:19.567 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 05:13:19.680 * Background saving terminated with success
+58896:M 14 Jun 2024 05:45:38.251 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 05:45:38.253 * Background saving started by pid 35614
+35614:C 14 Jun 2024 05:45:38.263 * DB saved on disk
+35614:C 14 Jun 2024 05:45:38.264 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 05:45:38.354 * Background saving terminated with success
+58896:M 14 Jun 2024 06:06:43.153 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 06:06:43.156 * Background saving started by pid 35774
+35774:C 14 Jun 2024 06:06:43.171 * DB saved on disk
+35774:C 14 Jun 2024 06:06:43.172 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 06:06:43.259 * Background saving terminated with success
+58896:M 14 Jun 2024 06:44:23.413 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 06:44:23.415 * Background saving started by pid 35935
+35935:C 14 Jun 2024 06:44:23.428 * DB saved on disk
+35935:C 14 Jun 2024 06:44:23.428 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 06:44:23.516 * Background saving terminated with success
+58896:M 14 Jun 2024 07:48:29.341 * 1 changes in 3600 seconds. Saving...
+58896:M 14 Jun 2024 07:48:29.456 * Background saving started by pid 35997
+35997:C 14 Jun 2024 07:48:29.466 * DB saved on disk
+35997:C 14 Jun 2024 07:48:29.467 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 07:48:29.558 * Background saving terminated with success
+58896:M 14 Jun 2024 08:56:33.414 * 1 changes in 3600 seconds. Saving...
+58896:M 14 Jun 2024 08:56:33.530 * Background saving started by pid 36114
+36114:C 14 Jun 2024 08:56:33.539 * DB saved on disk
+36114:C 14 Jun 2024 08:56:33.539 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 08:56:33.632 * Background saving terminated with success
+58896:M 14 Jun 2024 09:59:37.399 * 1 changes in 3600 seconds. Saving...
+58896:M 14 Jun 2024 09:59:37.511 * Background saving started by pid 36190
+36190:C 14 Jun 2024 09:59:37.519 * DB saved on disk
+36190:C 14 Jun 2024 09:59:37.520 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 09:59:37.613 * Background saving terminated with success
+58896:M 14 Jun 2024 10:07:14.898 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 10:07:14.901 * Background saving started by pid 36472
+36472:C 14 Jun 2024 10:07:14.910 * DB saved on disk
+36472:C 14 Jun 2024 10:07:14.912 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 10:07:15.001 * Background saving terminated with success
+58896:M 14 Jun 2024 10:12:16.014 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 10:12:16.016 * Background saving started by pid 37638
+37638:C 14 Jun 2024 10:12:16.026 * DB saved on disk
+37638:C 14 Jun 2024 10:12:16.027 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 10:12:16.118 * Background saving terminated with success
+58896:M 14 Jun 2024 10:17:17.028 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 10:17:17.033 * Background saving started by pid 38618
+38618:C 14 Jun 2024 10:17:17.051 * DB saved on disk
+38618:C 14 Jun 2024 10:17:17.051 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 10:17:17.134 * Background saving terminated with success
+58896:M 14 Jun 2024 10:22:18.071 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 10:22:18.073 * Background saving started by pid 39601
+39601:C 14 Jun 2024 10:22:18.085 * DB saved on disk
+39601:C 14 Jun 2024 10:22:18.087 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 10:22:18.175 * Background saving terminated with success
+58896:M 14 Jun 2024 10:28:04.538 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 10:28:04.541 * Background saving started by pid 40107
+40107:C 14 Jun 2024 10:28:04.659 * DB saved on disk
+40107:C 14 Jun 2024 10:28:04.659 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 10:28:04.743 * Background saving terminated with success
+58896:M 14 Jun 2024 11:30:27.751 * 1 changes in 3600 seconds. Saving...
+58896:M 14 Jun 2024 11:30:27.759 * Background saving started by pid 40350
+40350:C 14 Jun 2024 11:30:27.857 * DB saved on disk
+40350:C 14 Jun 2024 11:30:27.859 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 11:30:27.860 * Background saving terminated with success
+58896:M 14 Jun 2024 12:16:13.786 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 12:16:13.787 * Background saving started by pid 40834
+40834:C 14 Jun 2024 12:16:13.796 * DB saved on disk
+40834:C 14 Jun 2024 12:16:13.797 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 12:16:13.888 * Background saving terminated with success
+58896:M 14 Jun 2024 12:21:14.062 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 12:21:14.063 * Background saving started by pid 42185
+42185:C 14 Jun 2024 12:21:14.071 * DB saved on disk
+42185:C 14 Jun 2024 12:21:14.072 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 12:21:14.165 * Background saving terminated with success
+58896:M 14 Jun 2024 12:58:11.704 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 12:58:11.706 * Background saving started by pid 50751
+50751:C 14 Jun 2024 12:58:11.762 * DB saved on disk
+50751:C 14 Jun 2024 12:58:11.763 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 12:58:11.809 * Background saving terminated with success
+58896:M 14 Jun 2024 14:00:57.749 * 1 changes in 3600 seconds. Saving...
+58896:M 14 Jun 2024 14:00:57.755 * Background saving started by pid 52756
+52756:C 14 Jun 2024 14:00:57.876 * DB saved on disk
+52756:C 14 Jun 2024 14:00:57.879 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 14:00:57.967 * Background saving terminated with success
+58896:M 14 Jun 2024 14:15:30.354 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 14:15:30.357 * Background saving started by pid 55569
+55569:C 14 Jun 2024 14:15:30.369 * DB saved on disk
+55569:C 14 Jun 2024 14:15:30.369 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 14:15:30.459 * Background saving terminated with success
+58896:M 14 Jun 2024 14:20:31.034 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 14:20:31.036 * Background saving started by pid 56310
+56310:C 14 Jun 2024 14:20:31.046 * DB saved on disk
+56310:C 14 Jun 2024 14:20:31.047 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 14:20:31.137 * Background saving terminated with success
+58896:M 14 Jun 2024 14:25:32.056 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 14:25:32.057 * Background saving started by pid 57022
+57022:C 14 Jun 2024 14:25:32.069 * DB saved on disk
+57022:C 14 Jun 2024 14:25:32.072 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 14:25:32.159 * Background saving terminated with success
+58896:M 14 Jun 2024 14:30:33.093 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 14:30:33.095 * Background saving started by pid 57937
+57937:C 14 Jun 2024 14:30:33.106 * DB saved on disk
+57937:C 14 Jun 2024 14:30:33.109 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 14:30:33.196 * Background saving terminated with success
+58896:M 14 Jun 2024 14:35:34.000 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 14:35:34.002 * Background saving started by pid 58663
+58663:C 14 Jun 2024 14:35:34.013 * DB saved on disk
+58663:C 14 Jun 2024 14:35:34.013 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 14:35:34.104 * Background saving terminated with success
+58896:M 14 Jun 2024 14:40:35.079 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 14:40:35.079 * Background saving started by pid 59354
+59354:C 14 Jun 2024 14:40:35.092 * DB saved on disk
+59354:C 14 Jun 2024 14:40:35.094 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 14:40:35.181 * Background saving terminated with success
+58896:M 14 Jun 2024 14:45:36.026 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 14:45:36.030 * Background saving started by pid 60066
+60066:C 14 Jun 2024 14:45:36.047 * DB saved on disk
+60066:C 14 Jun 2024 14:45:36.048 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 14:45:36.133 * Background saving terminated with success
+58896:M 14 Jun 2024 14:50:37.021 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 14:50:37.025 * Background saving started by pid 60762
+60762:C 14 Jun 2024 14:50:37.046 * DB saved on disk
+60762:C 14 Jun 2024 14:50:37.047 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 14:50:37.127 * Background saving terminated with success
+58896:M 14 Jun 2024 14:55:38.054 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 14:55:38.055 * Background saving started by pid 61516
+61516:C 14 Jun 2024 14:55:38.066 * DB saved on disk
+61516:C 14 Jun 2024 14:55:38.068 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 14:55:38.157 * Background saving terminated with success
+58896:M 14 Jun 2024 15:00:39.013 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 15:00:39.016 * Background saving started by pid 63393
+63393:C 14 Jun 2024 15:00:39.026 * DB saved on disk
+63393:C 14 Jun 2024 15:00:39.027 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 15:00:39.117 * Background saving terminated with success
+58896:M 14 Jun 2024 15:05:40.086 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 15:05:40.089 * Background saving started by pid 64106
+64106:C 14 Jun 2024 15:05:40.109 * DB saved on disk
+64106:C 14 Jun 2024 15:05:40.111 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 15:05:40.189 * Background saving terminated with success
+58896:M 14 Jun 2024 15:10:41.056 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 15:10:41.058 * Background saving started by pid 64797
+64797:C 14 Jun 2024 15:10:41.073 * DB saved on disk
+64797:C 14 Jun 2024 15:10:41.073 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 15:10:41.161 * Background saving terminated with success
+58896:M 14 Jun 2024 15:15:42.057 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 15:15:42.059 * Background saving started by pid 65518
+65518:C 14 Jun 2024 15:15:42.074 * DB saved on disk
+65518:C 14 Jun 2024 15:15:42.075 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 15:15:42.162 * Background saving terminated with success
+58896:M 14 Jun 2024 15:20:43.021 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 15:20:43.023 * Background saving started by pid 66505
+66505:C 14 Jun 2024 15:20:43.037 * DB saved on disk
+66505:C 14 Jun 2024 15:20:43.039 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 15:20:43.124 * Background saving terminated with success
+58896:M 14 Jun 2024 15:25:44.025 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 15:25:44.029 * Background saving started by pid 67388
+67388:C 14 Jun 2024 15:25:44.040 * DB saved on disk
+67388:C 14 Jun 2024 15:25:44.041 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 15:25:44.131 * Background saving terminated with success
+58896:M 14 Jun 2024 15:30:45.027 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 15:30:45.029 * Background saving started by pid 68256
+68256:C 14 Jun 2024 15:30:45.053 * DB saved on disk
+68256:C 14 Jun 2024 15:30:45.054 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 15:30:45.130 * Background saving terminated with success
+58896:M 14 Jun 2024 15:35:46.077 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 15:35:46.078 * Background saving started by pid 69268
+69268:C 14 Jun 2024 15:35:46.088 * DB saved on disk
+69268:C 14 Jun 2024 15:35:46.089 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 15:35:46.179 * Background saving terminated with success
+58896:M 14 Jun 2024 15:40:47.084 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 15:40:47.088 * Background saving started by pid 70026
+70026:C 14 Jun 2024 15:40:47.108 * DB saved on disk
+70026:C 14 Jun 2024 15:40:47.110 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 15:40:47.190 * Background saving terminated with success
+58896:M 14 Jun 2024 15:45:48.012 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 15:45:48.019 * Background saving started by pid 71012
+71012:C 14 Jun 2024 15:45:48.028 * DB saved on disk
+71012:C 14 Jun 2024 15:45:48.029 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 15:45:48.120 * Background saving terminated with success
+58896:M 14 Jun 2024 15:50:49.082 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 15:50:49.085 * Background saving started by pid 71940
+71940:C 14 Jun 2024 15:50:49.097 * DB saved on disk
+71940:C 14 Jun 2024 15:50:49.098 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 15:50:49.186 * Background saving terminated with success
+58896:M 14 Jun 2024 15:55:50.015 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 15:55:50.017 * Background saving started by pid 72898
+72898:C 14 Jun 2024 15:55:50.030 * DB saved on disk
+72898:C 14 Jun 2024 15:55:50.035 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 15:55:50.118 * Background saving terminated with success
+58896:M 14 Jun 2024 16:00:51.081 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 16:00:51.085 * Background saving started by pid 73649
+73649:C 14 Jun 2024 16:00:51.098 * DB saved on disk
+73649:C 14 Jun 2024 16:00:51.101 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 16:00:51.186 * Background saving terminated with success
+58896:M 14 Jun 2024 16:05:52.071 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 16:05:52.072 * Background saving started by pid 74480
+74480:C 14 Jun 2024 16:05:52.082 * DB saved on disk
+74480:C 14 Jun 2024 16:05:52.083 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 16:05:52.174 * Background saving terminated with success
+58896:M 14 Jun 2024 16:10:53.035 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 16:10:53.038 * Background saving started by pid 75372
+75372:C 14 Jun 2024 16:10:53.054 * DB saved on disk
+75372:C 14 Jun 2024 16:10:53.058 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 16:10:53.139 * Background saving terminated with success
+58896:M 14 Jun 2024 16:15:54.086 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 16:15:54.089 * Background saving started by pid 76156
+76156:C 14 Jun 2024 16:15:54.114 * DB saved on disk
+76156:C 14 Jun 2024 16:15:54.115 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 16:15:54.190 * Background saving terminated with success
+58896:M 14 Jun 2024 16:20:55.033 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 16:20:55.035 * Background saving started by pid 76928
+76928:C 14 Jun 2024 16:20:55.053 * DB saved on disk
+76928:C 14 Jun 2024 16:20:55.053 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 16:20:55.137 * Background saving terminated with success
+58896:M 14 Jun 2024 16:25:56.061 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 16:25:56.062 * Background saving started by pid 77910
+77910:C 14 Jun 2024 16:25:56.073 * DB saved on disk
+77910:C 14 Jun 2024 16:25:56.073 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 16:25:56.164 * Background saving terminated with success
+58896:M 14 Jun 2024 16:30:57.004 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 16:30:57.006 * Background saving started by pid 78693
+78693:C 14 Jun 2024 16:30:57.017 * DB saved on disk
+78693:C 14 Jun 2024 16:30:57.019 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 16:30:57.108 * Background saving terminated with success
+58896:M 14 Jun 2024 16:35:58.062 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 16:35:58.064 * Background saving started by pid 79546
+79546:C 14 Jun 2024 16:35:58.076 * DB saved on disk
+79546:C 14 Jun 2024 16:35:58.077 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 16:35:58.165 * Background saving terminated with success
+58896:M 14 Jun 2024 16:40:59.088 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 16:40:59.089 * Background saving started by pid 80305
+80305:C 14 Jun 2024 16:40:59.101 * DB saved on disk
+80305:C 14 Jun 2024 16:40:59.104 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 16:40:59.190 * Background saving terminated with success
+58896:M 14 Jun 2024 16:46:00.058 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 16:46:00.059 * Background saving started by pid 81213
+81213:C 14 Jun 2024 16:46:00.070 * DB saved on disk
+81213:C 14 Jun 2024 16:46:00.072 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 16:46:00.160 * Background saving terminated with success
+58896:M 14 Jun 2024 16:51:01.096 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 16:51:01.097 * Background saving started by pid 82247
+82247:C 14 Jun 2024 16:51:01.106 * DB saved on disk
+82247:C 14 Jun 2024 16:51:01.107 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 16:51:01.199 * Background saving terminated with success
+58896:M 14 Jun 2024 16:56:02.068 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 16:56:02.072 * Background saving started by pid 83296
+83296:C 14 Jun 2024 16:56:02.085 * DB saved on disk
+83296:C 14 Jun 2024 16:56:02.086 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 16:56:02.173 * Background saving terminated with success
+58896:M 14 Jun 2024 17:01:03.003 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 17:01:03.004 * Background saving started by pid 84035
+84035:C 14 Jun 2024 17:01:03.014 * DB saved on disk
+84035:C 14 Jun 2024 17:01:03.015 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 17:01:03.105 * Background saving terminated with success
+58896:M 14 Jun 2024 17:06:04.035 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 17:06:04.037 * Background saving started by pid 84625
+84625:C 14 Jun 2024 17:06:04.049 * DB saved on disk
+84625:C 14 Jun 2024 17:06:04.050 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 17:06:04.138 * Background saving terminated with success
+58896:M 14 Jun 2024 17:11:05.017 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 17:11:05.020 * Background saving started by pid 85292
+85292:C 14 Jun 2024 17:11:05.034 * DB saved on disk
+85292:C 14 Jun 2024 17:11:05.035 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 17:11:05.121 * Background saving terminated with success
+58896:M 14 Jun 2024 17:16:06.081 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 17:16:06.083 * Background saving started by pid 85873
+85873:C 14 Jun 2024 17:16:06.091 * DB saved on disk
+85873:C 14 Jun 2024 17:16:06.092 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 17:16:06.184 * Background saving terminated with success
+58896:M 14 Jun 2024 17:21:07.000 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 17:21:07.001 * Background saving started by pid 86432
+86432:C 14 Jun 2024 17:21:07.014 * DB saved on disk
+86432:C 14 Jun 2024 17:21:07.014 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 17:21:07.102 * Background saving terminated with success
+58896:M 14 Jun 2024 17:26:08.044 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 17:26:08.047 * Background saving started by pid 87015
+87015:C 14 Jun 2024 17:26:08.076 * DB saved on disk
+87015:C 14 Jun 2024 17:26:08.077 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 17:26:08.149 * Background saving terminated with success
+58896:M 14 Jun 2024 17:31:09.060 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 17:31:09.061 * Background saving started by pid 87646
+87646:C 14 Jun 2024 17:31:09.069 * DB saved on disk
+87646:C 14 Jun 2024 17:31:09.071 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 17:31:09.163 * Background saving terminated with success
+58896:M 14 Jun 2024 17:36:10.011 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 17:36:10.013 * Background saving started by pid 88231
+88231:C 14 Jun 2024 17:36:10.024 * DB saved on disk
+88231:C 14 Jun 2024 17:36:10.024 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 17:36:10.114 * Background saving terminated with success
+58896:M 14 Jun 2024 17:41:11.048 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 17:41:11.055 * Background saving started by pid 88988
+88988:C 14 Jun 2024 17:41:11.063 * DB saved on disk
+88988:C 14 Jun 2024 17:41:11.064 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 17:41:11.156 * Background saving terminated with success
+58896:M 14 Jun 2024 17:46:12.064 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 17:46:12.065 * Background saving started by pid 89661
+89661:C 14 Jun 2024 17:46:12.078 * DB saved on disk
+89661:C 14 Jun 2024 17:46:12.079 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 17:46:12.166 * Background saving terminated with success
+58896:M 14 Jun 2024 17:51:13.041 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 17:51:13.043 * Background saving started by pid 90249
+90249:C 14 Jun 2024 17:51:13.055 * DB saved on disk
+90249:C 14 Jun 2024 17:51:13.057 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 17:51:13.144 * Background saving terminated with success
+58896:M 14 Jun 2024 17:56:14.019 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 17:56:14.022 * Background saving started by pid 91032
+91032:C 14 Jun 2024 17:56:14.035 * DB saved on disk
+91032:C 14 Jun 2024 17:56:14.035 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 17:56:14.123 * Background saving terminated with success
+58896:M 14 Jun 2024 18:01:15.037 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 18:01:15.039 * Background saving started by pid 91806
+91806:C 14 Jun 2024 18:01:15.055 * DB saved on disk
+91806:C 14 Jun 2024 18:01:15.055 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 18:01:15.141 * Background saving terminated with success
+58896:M 14 Jun 2024 18:06:16.057 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 18:06:16.060 * Background saving started by pid 92444
+92444:C 14 Jun 2024 18:06:16.073 * DB saved on disk
+92444:C 14 Jun 2024 18:06:16.074 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 18:06:16.160 * Background saving terminated with success
+58896:M 14 Jun 2024 18:11:17.059 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 18:11:17.063 * Background saving started by pid 93096
+93096:C 14 Jun 2024 18:11:17.080 * DB saved on disk
+93096:C 14 Jun 2024 18:11:17.081 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 18:11:17.164 * Background saving terminated with success
+58896:M 14 Jun 2024 18:16:18.080 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 18:16:18.083 * Background saving started by pid 93766
+93766:C 14 Jun 2024 18:16:18.094 * DB saved on disk
+93766:C 14 Jun 2024 18:16:18.096 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 18:16:18.184 * Background saving terminated with success
+58896:M 14 Jun 2024 18:21:19.098 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 18:21:19.102 * Background saving started by pid 94418
+94418:C 14 Jun 2024 18:21:19.118 * DB saved on disk
+94418:C 14 Jun 2024 18:21:19.121 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 18:21:19.204 * Background saving terminated with success
+58896:M 14 Jun 2024 18:26:20.090 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 18:26:20.092 * Background saving started by pid 95153
+95153:C 14 Jun 2024 18:26:20.103 * DB saved on disk
+95153:C 14 Jun 2024 18:26:20.104 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 18:26:20.193 * Background saving terminated with success
+58896:M 14 Jun 2024 18:31:21.035 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 18:31:21.038 * Background saving started by pid 95904
+95904:C 14 Jun 2024 18:31:21.049 * DB saved on disk
+95904:C 14 Jun 2024 18:31:21.050 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 18:31:21.139 * Background saving terminated with success
+58896:M 14 Jun 2024 18:36:22.029 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 18:36:22.030 * Background saving started by pid 96493
+96493:C 14 Jun 2024 18:36:22.044 * DB saved on disk
+96493:C 14 Jun 2024 18:36:22.051 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 18:36:22.132 * Background saving terminated with success
+58896:M 14 Jun 2024 18:53:06.805 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 18:53:06.810 * Background saving started by pid 96635
+96635:C 14 Jun 2024 18:53:06.933 * DB saved on disk
+96635:C 14 Jun 2024 18:53:06.938 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 18:53:07.012 * Background saving terminated with success
+58896:M 14 Jun 2024 18:58:08.026 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 18:58:08.027 * Background saving started by pid 97243
+97243:C 14 Jun 2024 18:58:08.038 * DB saved on disk
+97243:C 14 Jun 2024 18:58:08.039 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 18:58:08.128 * Background saving terminated with success
+58896:M 14 Jun 2024 19:03:09.096 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 19:03:09.097 * Background saving started by pid 97816
+97816:C 14 Jun 2024 19:03:09.115 * DB saved on disk
+97816:C 14 Jun 2024 19:03:09.116 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 19:03:09.198 * Background saving terminated with success
+58896:M 14 Jun 2024 19:08:10.026 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 19:08:10.027 * Background saving started by pid 98403
+98403:C 14 Jun 2024 19:08:10.036 * DB saved on disk
+98403:C 14 Jun 2024 19:08:10.037 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 19:08:10.128 * Background saving terminated with success
+58896:M 14 Jun 2024 19:13:11.015 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 19:13:11.017 * Background saving started by pid 99088
+99088:C 14 Jun 2024 19:13:11.026 * DB saved on disk
+99088:C 14 Jun 2024 19:13:11.027 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 19:13:11.117 * Background saving terminated with success
+58896:M 14 Jun 2024 19:18:12.060 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 19:18:12.062 * Background saving started by pid 99654
+99654:C 14 Jun 2024 19:18:12.070 * DB saved on disk
+99654:C 14 Jun 2024 19:18:12.071 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 19:18:12.163 * Background saving terminated with success
+58896:M 14 Jun 2024 19:23:13.095 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 19:23:13.099 * Background saving started by pid 651
+651:C 14 Jun 2024 19:23:13.126 * DB saved on disk
+651:C 14 Jun 2024 19:23:13.126 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 19:23:13.200 * Background saving terminated with success
+58896:M 14 Jun 2024 19:28:14.064 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 19:28:14.066 * Background saving started by pid 1362
+1362:C 14 Jun 2024 19:28:14.074 * DB saved on disk
+1362:C 14 Jun 2024 19:28:14.075 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 19:28:14.168 * Background saving terminated with success
+58896:M 14 Jun 2024 19:33:15.065 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 19:33:15.067 * Background saving started by pid 1965
+1965:C 14 Jun 2024 19:33:15.095 * DB saved on disk
+1965:C 14 Jun 2024 19:33:15.096 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 19:33:15.168 * Background saving terminated with success
+58896:M 14 Jun 2024 19:38:16.019 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 19:38:16.020 * Background saving started by pid 2640
+2640:C 14 Jun 2024 19:38:16.036 * DB saved on disk
+2640:C 14 Jun 2024 19:38:16.037 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 19:38:16.121 * Background saving terminated with success
+58896:M 14 Jun 2024 19:43:17.065 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 19:43:17.067 * Background saving started by pid 3209
+3209:C 14 Jun 2024 19:43:17.082 * DB saved on disk
+3209:C 14 Jun 2024 19:43:17.083 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 19:43:17.168 * Background saving terminated with success
+58896:M 14 Jun 2024 19:48:18.083 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 19:48:18.084 * Background saving started by pid 3807
+3807:C 14 Jun 2024 19:48:18.092 * DB saved on disk
+3807:C 14 Jun 2024 19:48:18.092 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 19:48:18.185 * Background saving terminated with success
+58896:M 14 Jun 2024 19:53:19.091 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 19:53:19.093 * Background saving started by pid 4495
+4495:C 14 Jun 2024 19:53:19.105 * DB saved on disk
+4495:C 14 Jun 2024 19:53:19.106 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 19:53:19.194 * Background saving terminated with success
+58896:M 14 Jun 2024 19:53:36.373 * DB saved on disk
+58896:M 14 Jun 2024 19:58:37.006 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 19:58:37.008 * Background saving started by pid 5355
+5355:C 14 Jun 2024 19:58:37.023 * DB saved on disk
+5355:C 14 Jun 2024 19:58:37.024 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 19:58:37.110 * Background saving terminated with success
+58896:M 14 Jun 2024 20:03:38.062 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 20:03:38.068 * Background saving started by pid 5941
+5941:C 14 Jun 2024 20:03:38.079 * DB saved on disk
+5941:C 14 Jun 2024 20:03:38.085 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 20:03:38.169 * Background saving terminated with success
+58896:M 14 Jun 2024 20:08:39.052 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 20:08:39.054 * Background saving started by pid 6579
+6579:C 14 Jun 2024 20:08:39.063 * DB saved on disk
+6579:C 14 Jun 2024 20:08:39.064 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 20:08:39.155 * Background saving terminated with success
+58896:M 14 Jun 2024 20:13:40.010 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 20:13:40.014 * Background saving started by pid 7271
+7271:C 14 Jun 2024 20:13:40.054 * DB saved on disk
+7271:C 14 Jun 2024 20:13:40.055 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 20:13:40.117 * Background saving terminated with success
+58896:M 14 Jun 2024 20:15:25.868 * DB saved on disk
+58896:M 14 Jun 2024 20:20:26.073 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 20:20:26.075 * Background saving started by pid 8477
+8477:C 14 Jun 2024 20:20:26.083 * DB saved on disk
+8477:C 14 Jun 2024 20:20:26.083 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 20:20:26.176 * Background saving terminated with success
+58896:M 14 Jun 2024 20:25:27.045 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 20:25:27.048 * Background saving started by pid 9337
+9337:C 14 Jun 2024 20:25:27.059 * DB saved on disk
+9337:C 14 Jun 2024 20:25:27.060 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 20:25:27.149 * Background saving terminated with success
+58896:M 14 Jun 2024 20:30:28.053 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 20:30:28.054 * Background saving started by pid 10207
+10207:C 14 Jun 2024 20:30:28.063 * DB saved on disk
+10207:C 14 Jun 2024 20:30:28.063 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 20:30:28.155 * Background saving terminated with success
+58896:M 14 Jun 2024 20:33:51.791 * DB saved on disk
+58896:M 14 Jun 2024 20:36:02.959 * DB saved on disk
+58896:M 14 Jun 2024 20:41:03.035 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 20:41:03.039 * Background saving started by pid 12285
+12285:C 14 Jun 2024 20:41:03.048 * DB saved on disk
+12285:C 14 Jun 2024 20:41:03.048 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 20:41:03.139 * Background saving terminated with success
+58896:M 14 Jun 2024 20:46:04.007 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 20:46:04.009 * Background saving started by pid 13266
+13266:C 14 Jun 2024 20:46:04.016 * DB saved on disk
+13266:C 14 Jun 2024 20:46:04.018 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 20:46:04.110 * Background saving terminated with success
+58896:M 14 Jun 2024 20:51:05.080 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 20:51:05.082 * Background saving started by pid 14114
+14114:C 14 Jun 2024 20:51:05.090 * DB saved on disk
+14114:C 14 Jun 2024 20:51:05.090 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 20:51:05.183 * Background saving terminated with success
+58896:M 14 Jun 2024 20:56:06.040 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 20:56:06.042 * Background saving started by pid 15071
+15071:C 14 Jun 2024 20:56:06.050 * DB saved on disk
+15071:C 14 Jun 2024 20:56:06.063 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 20:56:06.143 * Background saving terminated with success
+58896:M 14 Jun 2024 21:01:07.036 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 21:01:07.038 * Background saving started by pid 16066
+16066:C 14 Jun 2024 21:01:07.046 * DB saved on disk
+16066:C 14 Jun 2024 21:01:07.047 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 21:01:07.140 * Background saving terminated with success
+58896:M 14 Jun 2024 21:06:08.027 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 21:06:08.029 * Background saving started by pid 17073
+17073:C 14 Jun 2024 21:06:08.048 * DB saved on disk
+17073:C 14 Jun 2024 21:06:08.048 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 21:06:08.129 * Background saving terminated with success
+58896:M 14 Jun 2024 21:11:09.048 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 21:11:09.052 * Background saving started by pid 18014
+18014:C 14 Jun 2024 21:11:09.063 * DB saved on disk
+18014:C 14 Jun 2024 21:11:09.065 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 21:11:09.153 * Background saving terminated with success
+58896:M 14 Jun 2024 21:16:10.061 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 21:16:10.062 * Background saving started by pid 18941
+18941:C 14 Jun 2024 21:16:10.072 * DB saved on disk
+18941:C 14 Jun 2024 21:16:10.074 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 21:16:10.162 * Background saving terminated with success
+58896:M 14 Jun 2024 21:21:11.025 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 21:21:11.031 * Background saving started by pid 19797
+19797:C 14 Jun 2024 21:21:11.065 * DB saved on disk
+19797:C 14 Jun 2024 21:21:11.066 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 21:21:11.131 * Background saving terminated with success
+58896:M 14 Jun 2024 21:26:12.051 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 21:26:12.054 * Background saving started by pid 20708
+20708:C 14 Jun 2024 21:26:12.065 * DB saved on disk
+20708:C 14 Jun 2024 21:26:12.066 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 21:26:12.154 * Background saving terminated with success
+58896:M 14 Jun 2024 21:31:13.042 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 21:31:13.045 * Background saving started by pid 21604
+21604:C 14 Jun 2024 21:31:13.060 * DB saved on disk
+21604:C 14 Jun 2024 21:31:13.061 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 21:31:13.146 * Background saving terminated with success
+58896:M 14 Jun 2024 21:36:14.095 * 100 changes in 300 seconds. Saving...
+58896:M 14 Jun 2024 21:36:14.097 * Background saving started by pid 22441
+22441:C 14 Jun 2024 21:36:14.107 * DB saved on disk
+22441:C 14 Jun 2024 21:36:14.108 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
+58896:M 14 Jun 2024 21:36:14.198 * Background saving terminated with success
+58896:signal-handler (1718372303) Received SIGTERM scheduling shutdown...
+58896:M 14 Jun 2024 21:38:23.499 # User requested shutdown...
+58896:M 14 Jun 2024 21:38:23.499 * Saving the final RDB snapshot before exiting.
+58896:M 14 Jun 2024 21:38:23.509 * DB saved on disk
+58896:M 14 Jun 2024 21:38:23.510 # Redis is now ready to exit, bye bye...

From c485282b3a46efe0e5a59e74dcdbda7c5cca267e Mon Sep 17 00:00:00 2001
From: Alex <alex.gpt.llm@gmail.com>
Date: Wed, 19 Jun 2024 15:49:19 +0800
Subject: [PATCH 169/282] [CoreEngine] remove the unused files.

---
 .../federate/quick_start/octopus/dump.rdb     |  Bin 7248 -> 0 bytes
 .../federate/quick_start/octopus/nohup.out    | 3032 -----------------
 2 files changed, 3032 deletions(-)
 delete mode 100644 python/examples/federate/quick_start/octopus/dump.rdb
 delete mode 100644 python/examples/federate/quick_start/octopus/nohup.out

diff --git a/python/examples/federate/quick_start/octopus/dump.rdb b/python/examples/federate/quick_start/octopus/dump.rdb
deleted file mode 100644
index f31af5cb4fd49f4a377eb444135610e4915c5730..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001

literal 7248
zcmdT}TWlj&89p)b=GL&g*`)1mc8NDaw4#o4&fF)clAYK|ChN;~<7B%lRnFL+B$N17
zdz@s`Qf*Pi7Ag=(T(mDJf(k)ffshbX@DjG-0fEFtT*Lz+T0yxMRDo!fj<%ff%-He7
z<H@8Ch=*L1jL-Qm-~WH#|Id|3SeS<p!Tp}Hl$9%E8&dfM8-M|X_Ivfeq-CYD8{GG1
z6ge;5`RPn<dB3|_k+NfXDSzkYCx^j)w^&>rON$k07u*By1Z7?wt`435Oa}nq?6Q>2
z=f?7tm6ViSkx~^!RHQKqv23Rhjz$1C9SO&xshMbEG89ck;^E5)A)ZWKS%{~Ea4HtM
zoJxjf#t;V)0|^e%EWsi~5G>ajFBT;AL!o=a?eBrLdsN~1KevC7LEZDo(3PYRpK)On
z%~K45kfj)!U`eKC6nI$oX)ZCD!bc_-=IcJtJk3Ln=6C}80^x~ELNpo)t9Qx2I~YfF
z;fTbsZR(G{I@m6<1jCWK$J@cqb`RndApODLo3}sI$W^&oFjA$3-eR_C3z%q1UO2KS
zOhr=3L=tB*z7R{zTsB?DSz{qZvLr>)_y?q!-EP3^pN@oQBDGv6!jWjoIOI_7Axl#%
zPw+U(h<d-4dw>0#8a0zkH9TmbEhQIH*q<7RuqTM8Sc>Fm62k}s@nGJAms&ZbVG+B6
zZ8Q>j*qf9NP>cYMV>zCq34#IYtK+@xGk}P$#PoDjh)2Ll{pMt3KAE}{xi;Ua{ignK
z-Z<f$#W4{V<3_EH2)&Nn1(T!+hJ+N0&4t!r{&ny_4q#-Cab;6zE}+99^lQk~z7g(#
zfheA1NSbFTis3E3xGH3h;&Ere*yL!a7qU9{rN2znx=ZkD(v7**^h`!h$aBKNwt(XD
z93Q}cEM1=xnAqB56(K*pc8%asy6~9xQ2zSWD@u9_;SViRg1jlGXZV76m0l39qMWc+
zlw--vRw5iD65$0ZzP&=k=LLMmB9+n3tNm}CjBUrLI0*yQ*uqu}UFD*)nd!&NnJ7st
zO0!$cYJM~M<dl-%ARyYjCNFPaPprQ-nVsAndF2h@t(3uNrkD@ZkP{H)K(?3{<w9Vp
zS}s2*6_ijWQ>+%0dtaV?eem|bp74PFDCiPz4vr{bAi5@Qh~qi2urfZcl;y(8N21__
zSduSEH=YEZXFx}=zjH%W#PUu*=<=<3N@8L6e1})^t%Kaoa9^LaRgy7zBr##^40QD@
z%jJp^7x%y^uSd=*;LM(<dO|2D(u!2}ZFstK;$G*6y*;_&id@+9M0@*6tGK}9)qGmo
z{j}Gs6xXG~POeL1opSO<Y3XUNXRD-uUU6__Ow37J3h*HkF=?L6a70$5U`FC0LZnDY
z16f2Y!!)84@aU?dlqwVB<4f!VfeIO@R>n4^iZX@*VqV-X7R1d;;G*Y&@lv_CEa#+D
zNzN$MvP1?-g%t&OmssD{searwIi4j+T;>d|fX?8@E`A-H4i!K^``UT?{(-wYFTVBc
zEzScv1mN8eb5&`p3{LHJJaqG)r#}A5=P&eqZtn#T09z%@jt}~B9Xa{pcXrm_hg%1|
zwC;O)s5R7lgJ)VnE%?DM6KOtb#0hELb*TBKJQ!-h#M1DH6U>HM;;f|pw(Cv3PuTV*
zhSCWJJ@;>ijza4l<>+F7=2@B~C{AnVh&9UfyeT>yX@G%E{ivo#x~GM>Fh7gw_!GOT
zgT1`&SpO|URM>|==U5Po+A)h0QMRI%-46AwUm7yN_G(jM1*i3^4Q4eIi4ad|`ot7h
z7KR^d(Vz{n;-))WvUL3JY{{3i%xY3)re0A$<sE2G$gB=T_4~J+9@IHd1tvk;>Rg?l
z)tr+SV9194bV24c&oeB6NRGgeMu*7!)r)^~DpJG1c%AEB{(s@Ric)?p^o7v(KJe72
zsFT^~#HcQmG*%e(kLr}=8yc~r2a_FwkLY~nd!(z*62gcbOJG{9vBXI$P55Sg=QJ`5
zR+mD))4d01(3iTei8_22P8h7XG`#cO9yvQYK~Vlt%!J^qCnt+bPjnh|DAcH7SLm$K
zpidlZnV$b0KiuDvA4WgwPJHnMq8Sz|FB%%7-H*+RvF~t3b9GfNc=l@CWbH^UPGTTM
zaS+!Y#}EX356Y8)@Sq%b>TQ-*SZ6K3HZhDgw6vPUYJ+qW`C&47_FI3_yM!ZgTIOAC
zeuDM(I%SSw?FPw%<5#`osZW^}wE1Z3o_^-tH=H`0F%ZAFdY|6??Ez&xdFR&aPG=fx
zg>%t&^e655Kf@hkSZZ2ySTTBZk;GtlQV6mLYfAz`9a-cdnAAt^#-73gvNJ8@fIA(~
zG+ezGk0?C2vo!XGV<B7fNbUQ@X$Mv}bm%{RXDp>gEYq~UltLs)@&w6I(45NEZ~pvk
zCm^`qW8qPi;aNOuB3$>h(d?dk*^qNSi)qH7->^+ro&~cx9-(5JuG1>Ep)}NMFF9?K
z40-Tv6X>?ZV<25UYpQVDQnsM0-on&htr!(*S52%@pg;LaTeB%h8!7KW=i6ZeX=S19
zA=TSv0&M)#7880UyQ#mX<%C{;k2ckxQG21qkPr<`Mk7v(rjy7|_(?zIr~Qnd^+UC2
z&Q>;d<8&WmxN2IG=~XuH17j$%zsQ)1UT_FyJ?L&)2&UVEm2YVw>W(>#+9Ai7MceOY
zTXK^dD=q6qi<PbvQIo9Kq2mU5P2xn);BU-bxorkny%=2BiN(vo#jI|p3l(M26UPr2
zemejug5X(-rWi_R2A7#TxS6*YqUMH-rKoM}nB#0kn{2k;`B{fLs&_dT`LfLUIxy0H
zW6NyYu`Qg{&~49{y3Mxk8uIXu=6i-Ca29<q85(CbUVwbjAuRPbAnwxN#FCa&xH-up
zx-2KDZ~n|s6t)4=1%>+f^TrCo7IAp(ReOcj?sZd>SM{`kBkhDNG~H?^RHyEDzww9L
L55F{c_t*agW;0!L

diff --git a/python/examples/federate/quick_start/octopus/nohup.out b/python/examples/federate/quick_start/octopus/nohup.out
deleted file mode 100644
index 2350761958..0000000000
--- a/python/examples/federate/quick_start/octopus/nohup.out
+++ /dev/null
@@ -1,3032 +0,0 @@
-58896:C 27 May 2024 16:58:15.551 # oO0OoO0OoO0Oo Redis is starting oO0OoO0OoO0Oo
-58896:C 27 May 2024 16:58:15.551 # Redis version=7.0.11, bits=64, commit=00000000, modified=0, pid=58896, just started
-58896:C 27 May 2024 16:58:15.551 # Warning: no config file specified, using the default config. In order to specify a config file use redis-server /path/to/redis.conf
-58896:M 27 May 2024 16:58:15.551 * monotonic clock: POSIX clock_gettime
-58896:M 27 May 2024 16:58:15.552 * Running mode=standalone, port=6379.
-58896:M 27 May 2024 16:58:15.552 # WARNING: The TCP backlog setting of 511 cannot be enforced because kern.ipc.somaxconn is set to the lower value of 128.
-58896:M 27 May 2024 16:58:15.552 # Server initialized
-58896:M 27 May 2024 16:58:15.552 * Ready to accept connections
-58896:M 27 May 2024 16:58:22.918 * DB saved on disk
-58896:M 27 May 2024 17:58:23.012 * 1 changes in 3600 seconds. Saving...
-58896:M 27 May 2024 17:58:23.013 * Background saving started by pid 65644
-65644:C 27 May 2024 17:58:23.020 * DB saved on disk
-65644:C 27 May 2024 17:58:23.021 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 27 May 2024 17:58:23.114 * Background saving terminated with success
-58896:M 27 May 2024 19:13:18.626 * 1 changes in 3600 seconds. Saving...
-58896:M 27 May 2024 19:13:18.736 * Background saving started by pid 72278
-72278:C 27 May 2024 19:13:18.746 * DB saved on disk
-72278:C 27 May 2024 19:13:18.746 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 27 May 2024 19:13:18.838 * Background saving terminated with success
-58896:M 28 May 2024 14:08:25.606 * 1 changes in 3600 seconds. Saving...
-58896:M 28 May 2024 14:08:25.608 * Background saving started by pid 78120
-78120:C 28 May 2024 14:08:25.615 * DB saved on disk
-78120:C 28 May 2024 14:08:25.616 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 28 May 2024 14:08:25.709 * Background saving terminated with success
-58896:M 28 May 2024 15:19:26.423 * 1 changes in 3600 seconds. Saving...
-58896:M 28 May 2024 15:19:26.539 * Background saving started by pid 84225
-84225:C 28 May 2024 15:19:26.545 * DB saved on disk
-84225:C 28 May 2024 15:19:26.546 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 28 May 2024 15:19:26.642 * Background saving terminated with success
-58896:M 29 May 2024 17:57:07.834 * 1 changes in 3600 seconds. Saving...
-58896:M 29 May 2024 17:57:07.835 * Background saving started by pid 4206
-4206:C 29 May 2024 17:57:07.849 * DB saved on disk
-4206:C 29 May 2024 17:57:07.850 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 29 May 2024 17:57:07.935 * Background saving terminated with success
-58896:M 29 May 2024 18:57:08.006 * 1 changes in 3600 seconds. Saving...
-58896:M 29 May 2024 18:57:08.008 * Background saving started by pid 11453
-11453:C 29 May 2024 18:57:08.021 * DB saved on disk
-11453:C 29 May 2024 18:57:08.022 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 29 May 2024 18:57:08.109 * Background saving terminated with success
-58896:M 29 May 2024 19:21:58.973 * 100 changes in 300 seconds. Saving...
-58896:M 29 May 2024 19:21:58.973 * Background saving started by pid 19535
-19535:C 29 May 2024 19:21:58.981 * DB saved on disk
-19535:C 29 May 2024 19:21:58.981 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 29 May 2024 19:21:59.074 * Background saving terminated with success
-58896:M 29 May 2024 19:27:40.433 * DB saved on disk
-58896:M 29 May 2024 20:39:51.202 * 1 changes in 3600 seconds. Saving...
-58896:M 29 May 2024 20:39:51.203 * Background saving started by pid 21314
-21314:C 29 May 2024 20:39:51.314 * DB saved on disk
-21314:C 29 May 2024 20:39:51.315 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 29 May 2024 20:39:51.407 * Background saving terminated with success
-58896:M 29 May 2024 22:49:24.515 * 1 changes in 3600 seconds. Saving...
-58896:M 29 May 2024 22:49:24.516 * Background saving started by pid 25814
-25814:C 29 May 2024 22:49:24.587 * DB saved on disk
-25814:C 29 May 2024 22:49:24.587 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 29 May 2024 22:49:24.617 * Background saving terminated with success
-58896:M 29 May 2024 23:08:52.971 * 100 changes in 300 seconds. Saving...
-58896:M 29 May 2024 23:08:52.972 * Background saving started by pid 28739
-28739:C 29 May 2024 23:08:52.978 * DB saved on disk
-28739:C 29 May 2024 23:08:52.978 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 29 May 2024 23:08:53.073 * Background saving terminated with success
-58896:M 30 May 2024 00:09:08.434 * 1 changes in 3600 seconds. Saving...
-58896:M 30 May 2024 00:09:08.435 * Background saving started by pid 30988
-30988:C 30 May 2024 00:09:08.569 * DB saved on disk
-30988:C 30 May 2024 00:09:08.569 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 30 May 2024 00:09:08.638 * Background saving terminated with success
-58896:M 30 May 2024 12:51:39.719 * 1 changes in 3600 seconds. Saving...
-58896:M 30 May 2024 12:51:39.720 * Background saving started by pid 41021
-41021:C 30 May 2024 12:51:39.729 * DB saved on disk
-41021:C 30 May 2024 12:51:39.729 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 30 May 2024 12:51:39.821 * Background saving terminated with success
-58896:M 30 May 2024 12:56:40.011 * 100 changes in 300 seconds. Saving...
-58896:M 30 May 2024 12:56:40.017 * Background saving started by pid 41850
-41850:C 30 May 2024 12:56:40.033 * DB saved on disk
-41850:C 30 May 2024 12:56:40.034 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 30 May 2024 12:56:40.120 * Background saving terminated with success
-58896:M 30 May 2024 13:16:33.022 * 100 changes in 300 seconds. Saving...
-58896:M 30 May 2024 13:16:33.138 * Background saving started by pid 42823
-42823:C 30 May 2024 13:16:33.145 * DB saved on disk
-42823:C 30 May 2024 13:16:33.145 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 30 May 2024 13:16:33.240 * Background saving terminated with success
-58896:M 30 May 2024 15:25:24.413 * 1 changes in 3600 seconds. Saving...
-58896:M 30 May 2024 15:25:24.413 * Background saving started by pid 48206
-48206:C 30 May 2024 15:25:24.425 * DB saved on disk
-48206:C 30 May 2024 15:25:24.426 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 30 May 2024 15:25:24.513 * Background saving terminated with success
-58896:M 30 May 2024 15:30:25.016 * 100 changes in 300 seconds. Saving...
-58896:M 30 May 2024 15:30:25.017 * Background saving started by pid 48800
-48800:C 30 May 2024 15:30:25.029 * DB saved on disk
-48800:C 30 May 2024 15:30:25.031 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 30 May 2024 15:30:25.119 * Background saving terminated with success
-58896:M 30 May 2024 15:35:26.012 * 100 changes in 300 seconds. Saving...
-58896:M 30 May 2024 15:35:26.014 * Background saving started by pid 49390
-49390:C 30 May 2024 15:35:26.025 * DB saved on disk
-49390:C 30 May 2024 15:35:26.025 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 30 May 2024 15:35:26.115 * Background saving terminated with success
-58896:M 30 May 2024 16:42:51.523 * 1 changes in 3600 seconds. Saving...
-58896:M 30 May 2024 16:42:51.646 * Background saving started by pid 65592
-65592:C 30 May 2024 16:42:51.655 * DB saved on disk
-65592:C 30 May 2024 16:42:51.655 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 30 May 2024 16:42:51.747 * Background saving terminated with success
-58896:M 30 May 2024 17:15:29.037 * 100 changes in 300 seconds. Saving...
-58896:M 30 May 2024 17:15:29.039 * Background saving started by pid 69523
-69523:C 30 May 2024 17:15:29.050 * DB saved on disk
-69523:C 30 May 2024 17:15:29.050 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 30 May 2024 17:15:29.142 * Background saving terminated with success
-58896:M 30 May 2024 18:15:30.060 * 1 changes in 3600 seconds. Saving...
-58896:M 30 May 2024 18:15:30.063 * Background saving started by pid 84706
-84706:C 30 May 2024 18:15:30.075 * DB saved on disk
-84706:C 30 May 2024 18:15:30.076 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 30 May 2024 18:15:30.167 * Background saving terminated with success
-58896:M 03 Jun 2024 18:06:53.699 * 1 changes in 3600 seconds. Saving...
-58896:M 03 Jun 2024 18:06:53.703 * Background saving started by pid 90870
-90870:C 03 Jun 2024 18:06:53.713 * DB saved on disk
-90870:C 03 Jun 2024 18:06:53.714 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 03 Jun 2024 18:06:53.803 * Background saving terminated with success
-58896:M 03 Jun 2024 18:11:54.075 * 100 changes in 300 seconds. Saving...
-58896:M 03 Jun 2024 18:11:54.078 * Background saving started by pid 91526
-91526:C 03 Jun 2024 18:11:54.087 * DB saved on disk
-91526:C 03 Jun 2024 18:11:54.087 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 03 Jun 2024 18:11:54.180 * Background saving terminated with success
-58896:M 03 Jun 2024 18:18:46.019 * 100 changes in 300 seconds. Saving...
-58896:M 03 Jun 2024 18:18:46.024 * Background saving started by pid 92286
-92286:C 03 Jun 2024 18:18:46.034 * DB saved on disk
-92286:C 03 Jun 2024 18:18:46.035 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 03 Jun 2024 18:18:46.126 * Background saving terminated with success
-58896:M 03 Jun 2024 18:24:50.037 * 100 changes in 300 seconds. Saving...
-58896:M 03 Jun 2024 18:24:50.038 * Background saving started by pid 93124
-93124:C 03 Jun 2024 18:24:50.050 * DB saved on disk
-93124:C 03 Jun 2024 18:24:50.050 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 03 Jun 2024 18:24:50.140 * Background saving terminated with success
-58896:M 03 Jun 2024 18:29:51.049 * 100 changes in 300 seconds. Saving...
-58896:M 03 Jun 2024 18:29:51.051 * Background saving started by pid 93844
-93844:C 03 Jun 2024 18:29:51.063 * DB saved on disk
-93844:C 03 Jun 2024 18:29:51.064 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 03 Jun 2024 18:29:51.153 * Background saving terminated with success
-58896:M 03 Jun 2024 18:34:52.071 * 100 changes in 300 seconds. Saving...
-58896:M 03 Jun 2024 18:34:52.073 * Background saving started by pid 94768
-94768:C 03 Jun 2024 18:34:52.088 * DB saved on disk
-94768:C 03 Jun 2024 18:34:52.089 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 03 Jun 2024 18:34:52.174 * Background saving terminated with success
-58896:M 03 Jun 2024 18:48:44.981 * 100 changes in 300 seconds. Saving...
-58896:M 03 Jun 2024 18:48:44.982 * Background saving started by pid 96629
-96629:C 03 Jun 2024 18:48:45.005 * DB saved on disk
-96629:C 03 Jun 2024 18:48:45.006 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 03 Jun 2024 18:48:45.083 * Background saving terminated with success
-58896:M 03 Jun 2024 23:49:36.949 * 1 changes in 3600 seconds. Saving...
-58896:M 03 Jun 2024 23:49:36.968 * Background saving started by pid 97783
-97783:C 03 Jun 2024 23:49:37.370 * DB saved on disk
-97783:C 03 Jun 2024 23:49:37.436 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 03 Jun 2024 23:49:37.493 * Background saving terminated with success
-58896:M 04 Jun 2024 14:48:17.992 * 1 changes in 3600 seconds. Saving...
-58896:M 04 Jun 2024 14:48:18.002 * Background saving started by pid 19353
-19353:C 04 Jun 2024 14:48:18.012 * DB saved on disk
-19353:C 04 Jun 2024 14:48:18.012 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 04 Jun 2024 14:48:18.109 * Background saving terminated with success
-58896:M 04 Jun 2024 14:53:19.081 * 100 changes in 300 seconds. Saving...
-58896:M 04 Jun 2024 14:53:19.082 * Background saving started by pid 19823
-19823:C 04 Jun 2024 14:53:19.087 * DB saved on disk
-19823:C 04 Jun 2024 14:53:19.088 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 04 Jun 2024 14:53:19.184 * Background saving terminated with success
-58896:M 04 Jun 2024 15:17:23.645 * 100 changes in 300 seconds. Saving...
-58896:M 04 Jun 2024 15:17:23.646 * Background saving started by pid 23721
-23721:C 04 Jun 2024 15:17:23.656 * DB saved on disk
-23721:C 04 Jun 2024 15:17:23.656 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 04 Jun 2024 15:17:23.748 * Background saving terminated with success
-58896:M 05 Jun 2024 15:06:53.989 * 1 changes in 3600 seconds. Saving...
-58896:M 05 Jun 2024 15:06:53.991 * Background saving started by pid 74889
-74889:C 05 Jun 2024 15:06:54.001 * DB saved on disk
-74889:C 05 Jun 2024 15:06:54.001 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 05 Jun 2024 15:06:54.091 * Background saving terminated with success
-58896:M 05 Jun 2024 15:11:55.053 * 100 changes in 300 seconds. Saving...
-58896:M 05 Jun 2024 15:11:55.057 * Background saving started by pid 75549
-75549:C 05 Jun 2024 15:11:55.084 * DB saved on disk
-75549:C 05 Jun 2024 15:11:55.084 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 05 Jun 2024 15:11:55.158 * Background saving terminated with success
-58896:M 05 Jun 2024 15:16:56.075 * 100 changes in 300 seconds. Saving...
-58896:M 05 Jun 2024 15:16:56.078 * Background saving started by pid 76399
-76399:C 05 Jun 2024 15:16:56.095 * DB saved on disk
-76399:C 05 Jun 2024 15:16:56.097 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 05 Jun 2024 15:16:56.180 * Background saving terminated with success
-58896:M 05 Jun 2024 15:51:48.859 * 100 changes in 300 seconds. Saving...
-58896:M 05 Jun 2024 15:51:48.861 * Background saving started by pid 79566
-79566:C 05 Jun 2024 15:51:48.871 * DB saved on disk
-79566:C 05 Jun 2024 15:51:48.871 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 05 Jun 2024 15:51:48.963 * Background saving terminated with success
-58896:M 05 Jun 2024 16:17:59.180 * 100 changes in 300 seconds. Saving...
-58896:M 05 Jun 2024 16:17:59.183 * Background saving started by pid 82108
-82108:C 05 Jun 2024 16:17:59.192 * DB saved on disk
-82108:C 05 Jun 2024 16:17:59.193 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 05 Jun 2024 16:17:59.284 * Background saving terminated with success
-58896:M 05 Jun 2024 16:35:17.999 * 100 changes in 300 seconds. Saving...
-58896:M 05 Jun 2024 16:35:18.002 * Background saving started by pid 83723
-83723:C 05 Jun 2024 16:35:18.010 * DB saved on disk
-83723:C 05 Jun 2024 16:35:18.011 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 05 Jun 2024 16:35:18.103 * Background saving terminated with success
-58896:M 05 Jun 2024 16:43:22.260 * 100 changes in 300 seconds. Saving...
-58896:M 05 Jun 2024 16:43:22.261 * Background saving started by pid 84583
-84583:C 05 Jun 2024 16:43:22.275 * DB saved on disk
-84583:C 05 Jun 2024 16:43:22.278 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 05 Jun 2024 16:43:22.362 * Background saving terminated with success
-58896:M 05 Jun 2024 16:48:23.046 * 100 changes in 300 seconds. Saving...
-58896:M 05 Jun 2024 16:48:23.047 * Background saving started by pid 85131
-85131:C 05 Jun 2024 16:48:23.065 * DB saved on disk
-85131:C 05 Jun 2024 16:48:23.066 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 05 Jun 2024 16:48:23.148 * Background saving terminated with success
-58896:M 05 Jun 2024 17:24:20.810 * 100 changes in 300 seconds. Saving...
-58896:M 05 Jun 2024 17:24:20.813 * Background saving started by pid 90105
-90105:C 05 Jun 2024 17:24:20.823 * DB saved on disk
-90105:C 05 Jun 2024 17:24:20.826 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 05 Jun 2024 17:24:20.914 * Background saving terminated with success
-58896:M 05 Jun 2024 17:38:07.895 * 100 changes in 300 seconds. Saving...
-58896:M 05 Jun 2024 17:38:07.898 * Background saving started by pid 91506
-91506:C 05 Jun 2024 17:38:07.907 * DB saved on disk
-91506:C 05 Jun 2024 17:38:07.907 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 05 Jun 2024 17:38:07.998 * Background saving terminated with success
-58896:M 05 Jun 2024 17:43:08.028 * 100 changes in 300 seconds. Saving...
-58896:M 05 Jun 2024 17:43:08.031 * Background saving started by pid 92110
-92110:C 05 Jun 2024 17:43:08.047 * DB saved on disk
-92110:C 05 Jun 2024 17:43:08.048 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 05 Jun 2024 17:43:08.133 * Background saving terminated with success
-58896:M 05 Jun 2024 17:48:09.040 * 100 changes in 300 seconds. Saving...
-58896:M 05 Jun 2024 17:48:09.042 * Background saving started by pid 92684
-92684:C 05 Jun 2024 17:48:09.056 * DB saved on disk
-92684:C 05 Jun 2024 17:48:09.057 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 05 Jun 2024 17:48:09.144 * Background saving terminated with success
-58896:M 05 Jun 2024 17:53:10.043 * 100 changes in 300 seconds. Saving...
-58896:M 05 Jun 2024 17:53:10.045 * Background saving started by pid 93293
-93293:C 05 Jun 2024 17:53:10.054 * DB saved on disk
-93293:C 05 Jun 2024 17:53:10.056 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 05 Jun 2024 17:53:10.146 * Background saving terminated with success
-58896:M 05 Jun 2024 17:58:11.081 * 100 changes in 300 seconds. Saving...
-58896:M 05 Jun 2024 17:58:11.083 * Background saving started by pid 93757
-93757:C 05 Jun 2024 17:58:11.096 * DB saved on disk
-93757:C 05 Jun 2024 17:58:11.097 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 05 Jun 2024 17:58:11.186 * Background saving terminated with success
-58896:M 07 Jun 2024 16:39:41.351 * 1 changes in 3600 seconds. Saving...
-58896:M 07 Jun 2024 16:39:41.353 * Background saving started by pid 27460
-27460:C 07 Jun 2024 16:39:41.369 * DB saved on disk
-27460:C 07 Jun 2024 16:39:41.370 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 07 Jun 2024 16:39:41.454 * Background saving terminated with success
-58896:M 07 Jun 2024 16:44:42.066 * 100 changes in 300 seconds. Saving...
-58896:M 07 Jun 2024 16:44:42.071 * Background saving started by pid 28358
-28358:C 07 Jun 2024 16:44:42.082 * DB saved on disk
-28358:C 07 Jun 2024 16:44:42.083 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 07 Jun 2024 16:44:42.173 * Background saving terminated with success
-58896:M 07 Jun 2024 17:16:01.732 * 100 changes in 300 seconds. Saving...
-58896:M 07 Jun 2024 17:16:01.734 * Background saving started by pid 33049
-33049:C 07 Jun 2024 17:16:01.746 * DB saved on disk
-33049:C 07 Jun 2024 17:16:01.746 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 07 Jun 2024 17:16:01.835 * Background saving terminated with success
-58896:M 07 Jun 2024 17:21:02.052 * 100 changes in 300 seconds. Saving...
-58896:M 07 Jun 2024 17:21:02.059 * Background saving started by pid 33638
-33638:C 07 Jun 2024 17:21:02.070 * DB saved on disk
-33638:C 07 Jun 2024 17:21:02.071 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 07 Jun 2024 17:21:02.161 * Background saving terminated with success
-58896:M 07 Jun 2024 17:33:38.484 * 100 changes in 300 seconds. Saving...
-58896:M 07 Jun 2024 17:33:38.486 * Background saving started by pid 35103
-35103:C 07 Jun 2024 17:33:38.495 * DB saved on disk
-35103:C 07 Jun 2024 17:33:38.495 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 07 Jun 2024 17:33:38.587 * Background saving terminated with success
-58896:M 07 Jun 2024 17:38:39.030 * 100 changes in 300 seconds. Saving...
-58896:M 07 Jun 2024 17:38:39.031 * Background saving started by pid 35753
-35753:C 07 Jun 2024 17:38:39.044 * DB saved on disk
-35753:C 07 Jun 2024 17:38:39.045 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 07 Jun 2024 17:38:39.132 * Background saving terminated with success
-58896:M 07 Jun 2024 17:43:40.049 * 100 changes in 300 seconds. Saving...
-58896:M 07 Jun 2024 17:43:40.051 * Background saving started by pid 36373
-36373:C 07 Jun 2024 17:43:40.062 * DB saved on disk
-36373:C 07 Jun 2024 17:43:40.062 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 07 Jun 2024 17:43:40.152 * Background saving terminated with success
-58896:M 07 Jun 2024 17:49:19.866 * 100 changes in 300 seconds. Saving...
-58896:M 07 Jun 2024 17:49:19.867 * Background saving started by pid 36987
-36987:C 07 Jun 2024 17:49:19.874 * DB saved on disk
-36987:C 07 Jun 2024 17:49:19.875 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 07 Jun 2024 17:49:19.967 * Background saving terminated with success
-58896:M 07 Jun 2024 17:54:20.070 * 100 changes in 300 seconds. Saving...
-58896:M 07 Jun 2024 17:54:20.074 * Background saving started by pid 37622
-37622:C 07 Jun 2024 17:54:20.087 * DB saved on disk
-37622:C 07 Jun 2024 17:54:20.088 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 07 Jun 2024 17:54:20.178 * Background saving terminated with success
-58896:M 07 Jun 2024 18:00:52.446 * 100 changes in 300 seconds. Saving...
-58896:M 07 Jun 2024 18:00:52.448 * Background saving started by pid 38338
-38338:C 07 Jun 2024 18:00:52.458 * DB saved on disk
-38338:C 07 Jun 2024 18:00:52.460 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 07 Jun 2024 18:00:52.548 * Background saving terminated with success
-58896:M 07 Jun 2024 18:05:53.016 * 100 changes in 300 seconds. Saving...
-58896:M 07 Jun 2024 18:05:53.018 * Background saving started by pid 39003
-39003:C 07 Jun 2024 18:05:53.032 * DB saved on disk
-39003:C 07 Jun 2024 18:05:53.033 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 07 Jun 2024 18:05:53.119 * Background saving terminated with success
-58896:M 07 Jun 2024 18:10:54.046 * 100 changes in 300 seconds. Saving...
-58896:M 07 Jun 2024 18:10:54.047 * Background saving started by pid 39675
-39675:C 07 Jun 2024 18:10:54.071 * DB saved on disk
-39675:C 07 Jun 2024 18:10:54.073 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 07 Jun 2024 18:10:54.149 * Background saving terminated with success
-58896:M 07 Jun 2024 18:22:32.800 * 100 changes in 300 seconds. Saving...
-58896:M 07 Jun 2024 18:22:32.801 * Background saving started by pid 40966
-40966:C 07 Jun 2024 18:22:32.812 * DB saved on disk
-40966:C 07 Jun 2024 18:22:32.813 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 07 Jun 2024 18:22:32.904 * Background saving terminated with success
-58896:M 07 Jun 2024 18:27:33.013 * 100 changes in 300 seconds. Saving...
-58896:M 07 Jun 2024 18:27:33.013 * Background saving started by pid 41713
-41713:C 07 Jun 2024 18:27:33.026 * DB saved on disk
-41713:C 07 Jun 2024 18:27:33.028 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 07 Jun 2024 18:27:33.115 * Background saving terminated with success
-58896:M 07 Jun 2024 18:32:34.020 * 100 changes in 300 seconds. Saving...
-58896:M 07 Jun 2024 18:32:34.022 * Background saving started by pid 42366
-42366:C 07 Jun 2024 18:32:34.039 * DB saved on disk
-42366:C 07 Jun 2024 18:32:34.041 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 07 Jun 2024 18:32:34.124 * Background saving terminated with success
-58896:M 09 Jun 2024 01:21:17.009 * 1 changes in 3600 seconds. Saving...
-58896:M 09 Jun 2024 01:21:17.010 * Background saving started by pid 51967
-51967:C 09 Jun 2024 01:21:17.025 * DB saved on disk
-51967:C 09 Jun 2024 01:21:17.025 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 09 Jun 2024 01:21:17.111 * Background saving terminated with success
-58896:M 09 Jun 2024 01:26:18.067 * 100 changes in 300 seconds. Saving...
-58896:M 09 Jun 2024 01:26:18.068 * Background saving started by pid 52613
-52613:C 09 Jun 2024 01:26:18.076 * DB saved on disk
-52613:C 09 Jun 2024 01:26:18.077 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 09 Jun 2024 01:26:18.169 * Background saving terminated with success
-58896:M 09 Jun 2024 01:31:19.072 * 100 changes in 300 seconds. Saving...
-58896:M 09 Jun 2024 01:31:19.074 * Background saving started by pid 53131
-53131:C 09 Jun 2024 01:31:19.092 * DB saved on disk
-53131:C 09 Jun 2024 01:31:19.094 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 09 Jun 2024 01:31:19.176 * Background saving terminated with success
-58896:M 09 Jun 2024 01:36:20.080 * 100 changes in 300 seconds. Saving...
-58896:M 09 Jun 2024 01:36:20.083 * Background saving started by pid 53704
-53704:C 09 Jun 2024 01:36:20.094 * DB saved on disk
-53704:C 09 Jun 2024 01:36:20.096 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 09 Jun 2024 01:36:20.185 * Background saving terminated with success
-58896:M 09 Jun 2024 01:41:21.074 * 100 changes in 300 seconds. Saving...
-58896:M 09 Jun 2024 01:41:21.076 * Background saving started by pid 54385
-54385:C 09 Jun 2024 01:41:21.090 * DB saved on disk
-54385:C 09 Jun 2024 01:41:21.091 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 09 Jun 2024 01:41:21.179 * Background saving terminated with success
-58896:M 09 Jun 2024 01:46:22.080 * 100 changes in 300 seconds. Saving...
-58896:M 09 Jun 2024 01:46:22.083 * Background saving started by pid 54916
-54916:C 09 Jun 2024 01:46:22.102 * DB saved on disk
-54916:C 09 Jun 2024 01:46:22.102 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 09 Jun 2024 01:46:22.184 * Background saving terminated with success
-58896:M 09 Jun 2024 01:51:23.049 * 100 changes in 300 seconds. Saving...
-58896:M 09 Jun 2024 01:51:23.052 * Background saving started by pid 55511
-55511:C 09 Jun 2024 01:51:23.064 * DB saved on disk
-55511:C 09 Jun 2024 01:51:23.064 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 09 Jun 2024 01:51:23.152 * Background saving terminated with success
-58896:M 09 Jun 2024 01:59:40.782 * 100 changes in 300 seconds. Saving...
-58896:M 09 Jun 2024 01:59:40.783 * Background saving started by pid 56315
-56315:C 09 Jun 2024 01:59:40.795 * DB saved on disk
-56315:C 09 Jun 2024 01:59:40.795 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 09 Jun 2024 01:59:40.883 * Background saving terminated with success
-58896:M 09 Jun 2024 02:04:41.092 * 100 changes in 300 seconds. Saving...
-58896:M 09 Jun 2024 02:04:41.096 * Background saving started by pid 56862
-56862:C 09 Jun 2024 02:04:41.111 * DB saved on disk
-56862:C 09 Jun 2024 02:04:41.112 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 09 Jun 2024 02:04:41.198 * Background saving terminated with success
-58896:M 11 Jun 2024 13:19:27.398 * 1 changes in 3600 seconds. Saving...
-58896:M 11 Jun 2024 13:19:27.402 * Background saving started by pid 99129
-99129:C 11 Jun 2024 13:19:27.435 * DB saved on disk
-99129:C 11 Jun 2024 13:19:27.437 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 13:19:27.503 * Background saving terminated with success
-58896:M 11 Jun 2024 13:24:28.020 * 100 changes in 300 seconds. Saving...
-58896:M 11 Jun 2024 13:24:28.021 * Background saving started by pid 222
-222:C 11 Jun 2024 13:24:28.037 * DB saved on disk
-222:C 11 Jun 2024 13:24:28.038 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 13:24:28.127 * Background saving terminated with success
-58896:M 11 Jun 2024 13:29:29.094 * 100 changes in 300 seconds. Saving...
-58896:M 11 Jun 2024 13:29:29.096 * Background saving started by pid 1314
-1314:C 11 Jun 2024 13:29:29.108 * DB saved on disk
-1314:C 11 Jun 2024 13:29:29.114 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 13:29:29.197 * Background saving terminated with success
-58896:M 11 Jun 2024 13:34:30.038 * 100 changes in 300 seconds. Saving...
-58896:M 11 Jun 2024 13:34:30.042 * Background saving started by pid 1927
-1927:C 11 Jun 2024 13:34:30.054 * DB saved on disk
-1927:C 11 Jun 2024 13:34:30.055 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 13:34:30.144 * Background saving terminated with success
-58896:M 11 Jun 2024 13:39:31.059 * 100 changes in 300 seconds. Saving...
-58896:M 11 Jun 2024 13:39:31.063 * Background saving started by pid 2516
-2516:C 11 Jun 2024 13:39:31.100 * DB saved on disk
-2516:C 11 Jun 2024 13:39:31.102 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 13:39:31.164 * Background saving terminated with success
-58896:M 11 Jun 2024 13:44:32.023 * 100 changes in 300 seconds. Saving...
-58896:M 11 Jun 2024 13:44:32.024 * Background saving started by pid 3067
-3067:C 11 Jun 2024 13:44:32.033 * DB saved on disk
-3067:C 11 Jun 2024 13:44:32.033 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 13:44:32.125 * Background saving terminated with success
-58896:M 11 Jun 2024 13:51:40.611 * 100 changes in 300 seconds. Saving...
-58896:M 11 Jun 2024 13:51:40.614 * Background saving started by pid 3795
-3795:C 11 Jun 2024 13:51:40.627 * DB saved on disk
-3795:C 11 Jun 2024 13:51:40.628 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 13:51:40.715 * Background saving terminated with success
-58896:M 11 Jun 2024 13:56:41.066 * 100 changes in 300 seconds. Saving...
-58896:M 11 Jun 2024 13:56:41.069 * Background saving started by pid 4441
-4441:C 11 Jun 2024 13:56:41.091 * DB saved on disk
-4441:C 11 Jun 2024 13:56:41.095 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 13:56:41.170 * Background saving terminated with success
-58896:M 11 Jun 2024 14:01:42.031 * 100 changes in 300 seconds. Saving...
-58896:M 11 Jun 2024 14:01:42.035 * Background saving started by pid 5198
-5198:C 11 Jun 2024 14:01:42.058 * DB saved on disk
-5198:C 11 Jun 2024 14:01:42.058 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 14:01:42.137 * Background saving terminated with success
-58896:M 11 Jun 2024 14:08:57.215 * 100 changes in 300 seconds. Saving...
-58896:M 11 Jun 2024 14:08:57.217 * Background saving started by pid 5893
-5893:C 11 Jun 2024 14:08:57.230 * DB saved on disk
-5893:C 11 Jun 2024 14:08:57.232 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 14:08:57.319 * Background saving terminated with success
-58896:M 11 Jun 2024 14:13:58.030 * 100 changes in 300 seconds. Saving...
-58896:M 11 Jun 2024 14:13:58.034 * Background saving started by pid 6544
-6544:C 11 Jun 2024 14:13:58.061 * DB saved on disk
-6544:C 11 Jun 2024 14:13:58.063 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 14:13:58.142 * Background saving terminated with success
-58896:M 11 Jun 2024 14:18:59.057 * 100 changes in 300 seconds. Saving...
-58896:M 11 Jun 2024 14:18:59.060 * Background saving started by pid 7250
-7250:C 11 Jun 2024 14:18:59.074 * DB saved on disk
-7250:C 11 Jun 2024 14:18:59.076 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 14:18:59.164 * Background saving terminated with success
-58896:M 11 Jun 2024 14:24:00.000 * 100 changes in 300 seconds. Saving...
-58896:M 11 Jun 2024 14:24:00.003 * Background saving started by pid 7777
-7777:C 11 Jun 2024 14:24:00.017 * DB saved on disk
-7777:C 11 Jun 2024 14:24:00.018 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 14:24:00.105 * Background saving terminated with success
-58896:M 11 Jun 2024 14:29:01.003 * 100 changes in 300 seconds. Saving...
-58896:M 11 Jun 2024 14:29:01.005 * Background saving started by pid 9446
-9446:C 11 Jun 2024 14:29:01.022 * DB saved on disk
-9446:C 11 Jun 2024 14:29:01.023 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 14:29:01.106 * Background saving terminated with success
-58896:M 11 Jun 2024 14:38:46.573 * 100 changes in 300 seconds. Saving...
-58896:M 11 Jun 2024 14:38:46.575 * Background saving started by pid 16553
-16553:C 11 Jun 2024 14:38:46.587 * DB saved on disk
-16553:C 11 Jun 2024 14:38:46.587 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 14:38:46.677 * Background saving terminated with success
-58896:M 11 Jun 2024 14:43:47.042 * 100 changes in 300 seconds. Saving...
-58896:M 11 Jun 2024 14:43:47.046 * Background saving started by pid 18767
-18767:C 11 Jun 2024 14:43:47.064 * DB saved on disk
-18767:C 11 Jun 2024 14:43:47.065 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 14:43:47.149 * Background saving terminated with success
-58896:M 11 Jun 2024 14:48:48.071 * 100 changes in 300 seconds. Saving...
-58896:M 11 Jun 2024 14:48:48.072 * Background saving started by pid 19508
-19508:C 11 Jun 2024 14:48:48.105 * DB saved on disk
-19508:C 11 Jun 2024 14:48:48.106 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 14:48:48.173 * Background saving terminated with success
-58896:M 11 Jun 2024 15:55:55.546 * 1 changes in 3600 seconds. Saving...
-58896:M 11 Jun 2024 15:55:55.665 * Background saving started by pid 20200
-20200:C 11 Jun 2024 15:55:55.673 * DB saved on disk
-20200:C 11 Jun 2024 15:55:55.674 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 15:55:55.766 * Background saving terminated with success
-58896:M 11 Jun 2024 16:55:56.028 * 1 changes in 3600 seconds. Saving...
-58896:M 11 Jun 2024 16:55:56.029 * Background saving started by pid 26736
-26736:C 11 Jun 2024 16:55:56.039 * DB saved on disk
-26736:C 11 Jun 2024 16:55:56.041 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 16:55:56.130 * Background saving terminated with success
-58896:M 11 Jun 2024 17:00:57.094 * 100 changes in 300 seconds. Saving...
-58896:M 11 Jun 2024 17:00:57.096 * Background saving started by pid 27696
-27696:C 11 Jun 2024 17:00:57.110 * DB saved on disk
-27696:C 11 Jun 2024 17:00:57.111 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 17:00:57.197 * Background saving terminated with success
-58896:M 11 Jun 2024 17:05:58.022 * 100 changes in 300 seconds. Saving...
-58896:M 11 Jun 2024 17:05:58.028 * Background saving started by pid 28649
-28649:C 11 Jun 2024 17:05:58.040 * DB saved on disk
-28649:C 11 Jun 2024 17:05:58.042 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 17:05:58.130 * Background saving terminated with success
-58896:M 11 Jun 2024 17:10:59.011 * 100 changes in 300 seconds. Saving...
-58896:M 11 Jun 2024 17:10:59.013 * Background saving started by pid 29517
-29517:C 11 Jun 2024 17:10:59.028 * DB saved on disk
-29517:C 11 Jun 2024 17:10:59.029 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 17:10:59.114 * Background saving terminated with success
-58896:M 11 Jun 2024 17:16:00.031 * 100 changes in 300 seconds. Saving...
-58896:M 11 Jun 2024 17:16:00.033 * Background saving started by pid 30399
-30399:C 11 Jun 2024 17:16:00.044 * DB saved on disk
-30399:C 11 Jun 2024 17:16:00.045 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 17:16:00.134 * Background saving terminated with success
-58896:M 11 Jun 2024 17:21:01.099 * 100 changes in 300 seconds. Saving...
-58896:M 11 Jun 2024 17:21:01.102 * Background saving started by pid 34058
-34058:C 11 Jun 2024 17:21:01.136 * DB saved on disk
-34058:C 11 Jun 2024 17:21:01.136 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 17:21:01.204 * Background saving terminated with success
-58896:M 11 Jun 2024 17:26:02.077 * 100 changes in 300 seconds. Saving...
-58896:M 11 Jun 2024 17:26:02.078 * Background saving started by pid 35339
-35339:C 11 Jun 2024 17:26:02.092 * DB saved on disk
-35339:C 11 Jun 2024 17:26:02.093 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 17:26:02.179 * Background saving terminated with success
-58896:M 11 Jun 2024 17:31:03.024 * 100 changes in 300 seconds. Saving...
-58896:M 11 Jun 2024 17:31:03.027 * Background saving started by pid 36164
-36164:C 11 Jun 2024 17:31:03.044 * DB saved on disk
-36164:C 11 Jun 2024 17:31:03.046 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 17:31:03.129 * Background saving terminated with success
-58896:M 11 Jun 2024 17:36:04.080 * 100 changes in 300 seconds. Saving...
-58896:M 11 Jun 2024 17:36:04.083 * Background saving started by pid 37017
-37017:C 11 Jun 2024 17:36:04.100 * DB saved on disk
-37017:C 11 Jun 2024 17:36:04.100 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 17:36:04.184 * Background saving terminated with success
-58896:M 11 Jun 2024 17:41:05.070 * 100 changes in 300 seconds. Saving...
-58896:M 11 Jun 2024 17:41:05.071 * Background saving started by pid 37887
-37887:C 11 Jun 2024 17:41:05.096 * DB saved on disk
-37887:C 11 Jun 2024 17:41:05.097 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 17:41:05.173 * Background saving terminated with success
-58896:M 11 Jun 2024 17:46:06.098 * 100 changes in 300 seconds. Saving...
-58896:M 11 Jun 2024 17:46:06.099 * Background saving started by pid 38777
-38777:C 11 Jun 2024 17:46:06.110 * DB saved on disk
-38777:C 11 Jun 2024 17:46:06.111 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 17:46:06.200 * Background saving terminated with success
-58896:M 11 Jun 2024 17:51:07.052 * 100 changes in 300 seconds. Saving...
-58896:M 11 Jun 2024 17:51:07.054 * Background saving started by pid 39630
-39630:C 11 Jun 2024 17:51:07.065 * DB saved on disk
-39630:C 11 Jun 2024 17:51:07.065 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 17:51:07.156 * Background saving terminated with success
-58896:M 11 Jun 2024 17:56:08.017 * 100 changes in 300 seconds. Saving...
-58896:M 11 Jun 2024 17:56:08.020 * Background saving started by pid 40590
-40590:C 11 Jun 2024 17:56:08.031 * DB saved on disk
-40590:C 11 Jun 2024 17:56:08.032 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 17:56:08.122 * Background saving terminated with success
-58896:M 11 Jun 2024 18:01:09.041 * 100 changes in 300 seconds. Saving...
-58896:M 11 Jun 2024 18:01:09.044 * Background saving started by pid 41552
-41552:C 11 Jun 2024 18:01:09.054 * DB saved on disk
-41552:C 11 Jun 2024 18:01:09.056 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 18:01:09.145 * Background saving terminated with success
-58896:M 11 Jun 2024 18:06:10.031 * 100 changes in 300 seconds. Saving...
-58896:M 11 Jun 2024 18:06:10.034 * Background saving started by pid 42635
-42635:C 11 Jun 2024 18:06:10.047 * DB saved on disk
-42635:C 11 Jun 2024 18:06:10.048 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 18:06:10.136 * Background saving terminated with success
-58896:M 11 Jun 2024 18:11:11.043 * 100 changes in 300 seconds. Saving...
-58896:M 11 Jun 2024 18:11:11.046 * Background saving started by pid 43579
-43579:C 11 Jun 2024 18:11:11.061 * DB saved on disk
-43579:C 11 Jun 2024 18:11:11.062 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 18:11:11.147 * Background saving terminated with success
-58896:M 11 Jun 2024 18:16:12.053 * 100 changes in 300 seconds. Saving...
-58896:M 11 Jun 2024 18:16:12.056 * Background saving started by pid 44500
-44500:C 11 Jun 2024 18:16:12.068 * DB saved on disk
-44500:C 11 Jun 2024 18:16:12.068 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 18:16:12.157 * Background saving terminated with success
-58896:M 11 Jun 2024 18:21:13.023 * 100 changes in 300 seconds. Saving...
-58896:M 11 Jun 2024 18:21:13.027 * Background saving started by pid 45459
-45459:C 11 Jun 2024 18:21:13.054 * DB saved on disk
-45459:C 11 Jun 2024 18:21:13.055 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 18:21:13.128 * Background saving terminated with success
-58896:M 11 Jun 2024 18:26:14.079 * 100 changes in 300 seconds. Saving...
-58896:M 11 Jun 2024 18:26:14.080 * Background saving started by pid 46446
-46446:C 11 Jun 2024 18:26:14.095 * DB saved on disk
-46446:C 11 Jun 2024 18:26:14.096 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 18:26:14.181 * Background saving terminated with success
-58896:M 11 Jun 2024 18:31:15.020 * 100 changes in 300 seconds. Saving...
-58896:M 11 Jun 2024 18:31:15.023 * Background saving started by pid 47369
-47369:C 11 Jun 2024 18:31:15.033 * DB saved on disk
-47369:C 11 Jun 2024 18:31:15.033 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 18:31:15.125 * Background saving terminated with success
-58896:M 11 Jun 2024 18:44:26.665 * 100 changes in 300 seconds. Saving...
-58896:M 11 Jun 2024 18:44:26.668 * Background saving started by pid 47820
-47820:C 11 Jun 2024 18:44:26.794 * DB saved on disk
-47820:C 11 Jun 2024 18:44:26.795 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 18:44:26.870 * Background saving terminated with success
-58896:M 11 Jun 2024 18:51:12.584 * 100 changes in 300 seconds. Saving...
-58896:M 11 Jun 2024 18:51:12.586 * Background saving started by pid 47950
-47950:C 11 Jun 2024 18:51:12.599 * DB saved on disk
-47950:C 11 Jun 2024 18:51:12.600 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 18:51:12.689 * Background saving terminated with success
-58896:M 11 Jun 2024 19:02:27.776 * 100 changes in 300 seconds. Saving...
-58896:M 11 Jun 2024 19:02:27.778 * Background saving started by pid 48131
-48131:C 11 Jun 2024 19:02:27.802 * DB saved on disk
-48131:C 11 Jun 2024 19:02:27.804 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 19:02:27.881 * Background saving terminated with success
-58896:M 11 Jun 2024 19:07:28.043 * 100 changes in 300 seconds. Saving...
-58896:M 11 Jun 2024 19:07:28.045 * Background saving started by pid 48889
-48889:C 11 Jun 2024 19:07:28.056 * DB saved on disk
-48889:C 11 Jun 2024 19:07:28.063 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 19:07:28.147 * Background saving terminated with success
-58896:M 11 Jun 2024 19:12:29.059 * 100 changes in 300 seconds. Saving...
-58896:M 11 Jun 2024 19:12:29.061 * Background saving started by pid 49675
-49675:C 11 Jun 2024 19:12:29.074 * DB saved on disk
-49675:C 11 Jun 2024 19:12:29.076 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 19:12:29.165 * Background saving terminated with success
-58896:M 11 Jun 2024 19:17:30.038 * 100 changes in 300 seconds. Saving...
-58896:M 11 Jun 2024 19:17:30.039 * Background saving started by pid 50454
-50454:C 11 Jun 2024 19:17:30.048 * DB saved on disk
-50454:C 11 Jun 2024 19:17:30.049 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 19:17:30.141 * Background saving terminated with success
-58896:M 11 Jun 2024 19:22:31.015 * 100 changes in 300 seconds. Saving...
-58896:M 11 Jun 2024 19:22:31.019 * Background saving started by pid 51066
-51066:C 11 Jun 2024 19:22:31.034 * DB saved on disk
-51066:C 11 Jun 2024 19:22:31.035 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 19:22:31.121 * Background saving terminated with success
-58896:M 11 Jun 2024 19:27:32.083 * 100 changes in 300 seconds. Saving...
-58896:M 11 Jun 2024 19:27:32.086 * Background saving started by pid 51963
-51963:C 11 Jun 2024 19:27:32.100 * DB saved on disk
-51963:C 11 Jun 2024 19:27:32.101 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 19:27:32.187 * Background saving terminated with success
-58896:M 11 Jun 2024 19:32:33.008 * 100 changes in 300 seconds. Saving...
-58896:M 11 Jun 2024 19:32:33.010 * Background saving started by pid 52753
-52753:C 11 Jun 2024 19:32:33.021 * DB saved on disk
-52753:C 11 Jun 2024 19:32:33.021 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 19:32:33.111 * Background saving terminated with success
-58896:M 11 Jun 2024 19:37:34.032 * 100 changes in 300 seconds. Saving...
-58896:M 11 Jun 2024 19:37:34.034 * Background saving started by pid 54020
-54020:C 11 Jun 2024 19:37:34.047 * DB saved on disk
-54020:C 11 Jun 2024 19:37:34.048 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 19:37:34.135 * Background saving terminated with success
-58896:M 11 Jun 2024 19:42:35.021 * 100 changes in 300 seconds. Saving...
-58896:M 11 Jun 2024 19:42:35.025 * Background saving started by pid 55025
-55025:C 11 Jun 2024 19:42:35.055 * DB saved on disk
-55025:C 11 Jun 2024 19:42:35.056 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 19:42:35.126 * Background saving terminated with success
-58896:M 11 Jun 2024 19:47:36.075 * 100 changes in 300 seconds. Saving...
-58896:M 11 Jun 2024 19:47:36.077 * Background saving started by pid 55926
-55926:C 11 Jun 2024 19:47:36.091 * DB saved on disk
-55926:C 11 Jun 2024 19:47:36.092 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 19:47:36.178 * Background saving terminated with success
-58896:M 11 Jun 2024 19:52:37.054 * 100 changes in 300 seconds. Saving...
-58896:M 11 Jun 2024 19:52:37.061 * Background saving started by pid 56735
-56735:C 11 Jun 2024 19:52:37.078 * DB saved on disk
-56735:C 11 Jun 2024 19:52:37.078 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 19:52:37.163 * Background saving terminated with success
-58896:M 11 Jun 2024 19:57:38.075 * 100 changes in 300 seconds. Saving...
-58896:M 11 Jun 2024 19:57:38.077 * Background saving started by pid 57603
-57603:C 11 Jun 2024 19:57:38.092 * DB saved on disk
-57603:C 11 Jun 2024 19:57:38.092 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 19:57:38.178 * Background saving terminated with success
-58896:M 11 Jun 2024 20:02:39.013 * 100 changes in 300 seconds. Saving...
-58896:M 11 Jun 2024 20:02:39.015 * Background saving started by pid 58545
-58545:C 11 Jun 2024 20:02:39.028 * DB saved on disk
-58545:C 11 Jun 2024 20:02:39.029 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 20:02:39.116 * Background saving terminated with success
-58896:M 11 Jun 2024 20:07:40.055 * 100 changes in 300 seconds. Saving...
-58896:M 11 Jun 2024 20:07:40.057 * Background saving started by pid 59368
-59368:C 11 Jun 2024 20:07:40.077 * DB saved on disk
-59368:C 11 Jun 2024 20:07:40.077 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 20:07:40.159 * Background saving terminated with success
-58896:M 11 Jun 2024 20:12:41.003 * 100 changes in 300 seconds. Saving...
-58896:M 11 Jun 2024 20:12:41.005 * Background saving started by pid 60133
-60133:C 11 Jun 2024 20:12:41.021 * DB saved on disk
-60133:C 11 Jun 2024 20:12:41.022 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 20:12:41.107 * Background saving terminated with success
-58896:M 11 Jun 2024 20:17:42.079 * 100 changes in 300 seconds. Saving...
-58896:M 11 Jun 2024 20:17:42.081 * Background saving started by pid 60949
-60949:C 11 Jun 2024 20:17:42.091 * DB saved on disk
-60949:C 11 Jun 2024 20:17:42.093 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 20:17:42.181 * Background saving terminated with success
-58896:M 11 Jun 2024 20:22:43.066 * 100 changes in 300 seconds. Saving...
-58896:M 11 Jun 2024 20:22:43.067 * Background saving started by pid 61718
-61718:C 11 Jun 2024 20:22:43.077 * DB saved on disk
-61718:C 11 Jun 2024 20:22:43.077 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 20:22:43.167 * Background saving terminated with success
-58896:M 11 Jun 2024 20:27:44.036 * 100 changes in 300 seconds. Saving...
-58896:M 11 Jun 2024 20:27:44.037 * Background saving started by pid 62501
-62501:C 11 Jun 2024 20:27:44.047 * DB saved on disk
-62501:C 11 Jun 2024 20:27:44.048 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 20:27:44.138 * Background saving terminated with success
-58896:M 11 Jun 2024 20:32:45.098 * 100 changes in 300 seconds. Saving...
-58896:M 11 Jun 2024 20:32:45.099 * Background saving started by pid 63324
-63324:C 11 Jun 2024 20:32:45.112 * DB saved on disk
-63324:C 11 Jun 2024 20:32:45.113 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 20:32:45.199 * Background saving terminated with success
-58896:M 11 Jun 2024 20:37:46.011 * 100 changes in 300 seconds. Saving...
-58896:M 11 Jun 2024 20:37:46.014 * Background saving started by pid 64283
-64283:C 11 Jun 2024 20:37:46.023 * DB saved on disk
-64283:C 11 Jun 2024 20:37:46.024 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 20:37:46.115 * Background saving terminated with success
-58896:M 11 Jun 2024 20:42:47.025 * 100 changes in 300 seconds. Saving...
-58896:M 11 Jun 2024 20:42:47.028 * Background saving started by pid 65146
-65146:C 11 Jun 2024 20:42:47.036 * DB saved on disk
-65146:C 11 Jun 2024 20:42:47.037 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 20:42:47.129 * Background saving terminated with success
-58896:M 11 Jun 2024 20:47:48.097 * 100 changes in 300 seconds. Saving...
-58896:M 11 Jun 2024 20:47:48.099 * Background saving started by pid 65963
-65963:C 11 Jun 2024 20:47:48.114 * DB saved on disk
-65963:C 11 Jun 2024 20:47:48.114 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 20:47:48.200 * Background saving terminated with success
-58896:M 11 Jun 2024 20:52:49.080 * 100 changes in 300 seconds. Saving...
-58896:M 11 Jun 2024 20:52:49.081 * Background saving started by pid 66721
-66721:C 11 Jun 2024 20:52:49.089 * DB saved on disk
-66721:C 11 Jun 2024 20:52:49.089 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 20:52:49.182 * Background saving terminated with success
-58896:M 11 Jun 2024 20:57:50.072 * 100 changes in 300 seconds. Saving...
-58896:M 11 Jun 2024 20:57:50.074 * Background saving started by pid 67533
-67533:C 11 Jun 2024 20:57:50.086 * DB saved on disk
-67533:C 11 Jun 2024 20:57:50.087 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 20:57:50.175 * Background saving terminated with success
-58896:M 11 Jun 2024 21:02:51.093 * 100 changes in 300 seconds. Saving...
-58896:M 11 Jun 2024 21:02:51.096 * Background saving started by pid 68492
-68492:C 11 Jun 2024 21:02:51.104 * DB saved on disk
-68492:C 11 Jun 2024 21:02:51.105 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 21:02:51.197 * Background saving terminated with success
-58896:M 11 Jun 2024 21:07:52.069 * 100 changes in 300 seconds. Saving...
-58896:M 11 Jun 2024 21:07:52.071 * Background saving started by pid 69301
-69301:C 11 Jun 2024 21:07:52.080 * DB saved on disk
-69301:C 11 Jun 2024 21:07:52.081 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 21:07:52.172 * Background saving terminated with success
-58896:M 11 Jun 2024 21:12:53.015 * 100 changes in 300 seconds. Saving...
-58896:M 11 Jun 2024 21:12:53.016 * Background saving started by pid 70023
-70023:C 11 Jun 2024 21:12:53.026 * DB saved on disk
-70023:C 11 Jun 2024 21:12:53.027 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 21:12:53.117 * Background saving terminated with success
-58896:M 11 Jun 2024 21:17:54.036 * 100 changes in 300 seconds. Saving...
-58896:M 11 Jun 2024 21:17:54.039 * Background saving started by pid 70808
-70808:C 11 Jun 2024 21:17:54.049 * DB saved on disk
-70808:C 11 Jun 2024 21:17:54.050 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 21:17:54.139 * Background saving terminated with success
-58896:M 11 Jun 2024 21:22:55.017 * 100 changes in 300 seconds. Saving...
-58896:M 11 Jun 2024 21:22:55.020 * Background saving started by pid 71619
-71619:C 11 Jun 2024 21:22:55.034 * DB saved on disk
-71619:C 11 Jun 2024 21:22:55.035 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 21:22:55.122 * Background saving terminated with success
-58896:M 11 Jun 2024 21:27:56.099 * 100 changes in 300 seconds. Saving...
-58896:M 11 Jun 2024 21:27:56.101 * Background saving started by pid 72347
-72347:C 11 Jun 2024 21:27:56.110 * DB saved on disk
-72347:C 11 Jun 2024 21:27:56.111 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 21:27:56.204 * Background saving terminated with success
-58896:M 11 Jun 2024 21:32:57.085 * 100 changes in 300 seconds. Saving...
-58896:M 11 Jun 2024 21:32:57.087 * Background saving started by pid 73084
-73084:C 11 Jun 2024 21:32:57.097 * DB saved on disk
-73084:C 11 Jun 2024 21:32:57.098 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 21:32:57.190 * Background saving terminated with success
-58896:M 11 Jun 2024 21:37:58.093 * 100 changes in 300 seconds. Saving...
-58896:M 11 Jun 2024 21:37:58.096 * Background saving started by pid 73831
-73831:C 11 Jun 2024 21:37:58.111 * DB saved on disk
-73831:C 11 Jun 2024 21:37:58.112 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 21:37:58.198 * Background saving terminated with success
-58896:M 11 Jun 2024 21:42:59.011 * 100 changes in 300 seconds. Saving...
-58896:M 11 Jun 2024 21:42:59.013 * Background saving started by pid 74560
-74560:C 11 Jun 2024 21:42:59.029 * DB saved on disk
-74560:C 11 Jun 2024 21:42:59.032 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 21:42:59.114 * Background saving terminated with success
-58896:M 11 Jun 2024 21:48:00.014 * 100 changes in 300 seconds. Saving...
-58896:M 11 Jun 2024 21:48:00.016 * Background saving started by pid 75300
-75300:C 11 Jun 2024 21:48:00.033 * DB saved on disk
-75300:C 11 Jun 2024 21:48:00.035 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 21:48:00.118 * Background saving terminated with success
-58896:M 11 Jun 2024 21:53:01.092 * 100 changes in 300 seconds. Saving...
-58896:M 11 Jun 2024 21:53:01.094 * Background saving started by pid 76026
-76026:C 11 Jun 2024 21:53:01.107 * DB saved on disk
-76026:C 11 Jun 2024 21:53:01.109 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 21:53:01.197 * Background saving terminated with success
-58896:M 11 Jun 2024 21:58:02.084 * 100 changes in 300 seconds. Saving...
-58896:M 11 Jun 2024 21:58:02.087 * Background saving started by pid 76774
-76774:C 11 Jun 2024 21:58:02.099 * DB saved on disk
-76774:C 11 Jun 2024 21:58:02.102 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 21:58:02.188 * Background saving terminated with success
-58896:M 11 Jun 2024 22:03:03.053 * 100 changes in 300 seconds. Saving...
-58896:M 11 Jun 2024 22:03:03.057 * Background saving started by pid 77506
-77506:C 11 Jun 2024 22:03:03.079 * DB saved on disk
-77506:C 11 Jun 2024 22:03:03.080 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 22:03:03.158 * Background saving terminated with success
-58896:M 11 Jun 2024 22:08:04.061 * 100 changes in 300 seconds. Saving...
-58896:M 11 Jun 2024 22:08:04.065 * Background saving started by pid 78241
-78241:C 11 Jun 2024 22:08:04.080 * DB saved on disk
-78241:C 11 Jun 2024 22:08:04.081 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 22:08:04.168 * Background saving terminated with success
-58896:M 11 Jun 2024 22:13:05.038 * 100 changes in 300 seconds. Saving...
-58896:M 11 Jun 2024 22:13:05.038 * Background saving started by pid 78971
-78971:C 11 Jun 2024 22:13:05.049 * DB saved on disk
-78971:C 11 Jun 2024 22:13:05.050 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 22:13:05.141 * Background saving terminated with success
-58896:M 11 Jun 2024 22:18:06.009 * 100 changes in 300 seconds. Saving...
-58896:M 11 Jun 2024 22:18:06.012 * Background saving started by pid 79712
-79712:C 11 Jun 2024 22:18:06.031 * DB saved on disk
-79712:C 11 Jun 2024 22:18:06.031 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 22:18:06.115 * Background saving terminated with success
-58896:M 11 Jun 2024 22:23:07.017 * 100 changes in 300 seconds. Saving...
-58896:M 11 Jun 2024 22:23:07.018 * Background saving started by pid 80463
-80463:C 11 Jun 2024 22:23:07.030 * DB saved on disk
-80463:C 11 Jun 2024 22:23:07.031 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 22:23:07.121 * Background saving terminated with success
-58896:M 11 Jun 2024 22:28:08.091 * 100 changes in 300 seconds. Saving...
-58896:M 11 Jun 2024 22:28:08.095 * Background saving started by pid 81195
-81195:C 11 Jun 2024 22:28:08.126 * DB saved on disk
-81195:C 11 Jun 2024 22:28:08.127 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 22:28:08.197 * Background saving terminated with success
-58896:M 11 Jun 2024 22:33:09.069 * 100 changes in 300 seconds. Saving...
-58896:M 11 Jun 2024 22:33:09.073 * Background saving started by pid 81924
-81924:C 11 Jun 2024 22:33:09.090 * DB saved on disk
-81924:C 11 Jun 2024 22:33:09.091 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 22:33:09.176 * Background saving terminated with success
-58896:M 11 Jun 2024 22:38:10.051 * 100 changes in 300 seconds. Saving...
-58896:M 11 Jun 2024 22:38:10.056 * Background saving started by pid 82688
-82688:C 11 Jun 2024 22:38:10.073 * DB saved on disk
-82688:C 11 Jun 2024 22:38:10.074 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 22:38:10.158 * Background saving terminated with success
-58896:M 11 Jun 2024 22:43:11.058 * 100 changes in 300 seconds. Saving...
-58896:M 11 Jun 2024 22:43:11.062 * Background saving started by pid 83419
-83419:C 11 Jun 2024 22:43:11.085 * DB saved on disk
-83419:C 11 Jun 2024 22:43:11.086 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 22:43:11.165 * Background saving terminated with success
-58896:M 11 Jun 2024 22:48:12.050 * 100 changes in 300 seconds. Saving...
-58896:M 11 Jun 2024 22:48:12.055 * Background saving started by pid 84242
-84242:C 11 Jun 2024 22:48:12.071 * DB saved on disk
-84242:C 11 Jun 2024 22:48:12.072 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 22:48:12.156 * Background saving terminated with success
-58896:M 11 Jun 2024 22:53:13.061 * 100 changes in 300 seconds. Saving...
-58896:M 11 Jun 2024 22:53:13.064 * Background saving started by pid 84972
-84972:C 11 Jun 2024 22:53:13.080 * DB saved on disk
-84972:C 11 Jun 2024 22:53:13.082 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 22:53:13.167 * Background saving terminated with success
-58896:M 11 Jun 2024 22:58:14.056 * 100 changes in 300 seconds. Saving...
-58896:M 11 Jun 2024 22:58:14.059 * Background saving started by pid 85710
-85710:C 11 Jun 2024 22:58:14.075 * DB saved on disk
-85710:C 11 Jun 2024 22:58:14.076 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 22:58:14.161 * Background saving terminated with success
-58896:M 11 Jun 2024 23:03:15.013 * 100 changes in 300 seconds. Saving...
-58896:M 11 Jun 2024 23:03:15.014 * Background saving started by pid 86445
-86445:C 11 Jun 2024 23:03:15.027 * DB saved on disk
-86445:C 11 Jun 2024 23:03:15.031 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 23:03:15.116 * Background saving terminated with success
-58896:M 11 Jun 2024 23:08:16.036 * 100 changes in 300 seconds. Saving...
-58896:M 11 Jun 2024 23:08:16.038 * Background saving started by pid 87172
-87172:C 11 Jun 2024 23:08:16.052 * DB saved on disk
-87172:C 11 Jun 2024 23:08:16.053 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 23:08:16.138 * Background saving terminated with success
-58896:M 11 Jun 2024 23:13:17.001 * 100 changes in 300 seconds. Saving...
-58896:M 11 Jun 2024 23:13:17.006 * Background saving started by pid 87901
-87901:C 11 Jun 2024 23:13:17.020 * DB saved on disk
-87901:C 11 Jun 2024 23:13:17.021 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 23:13:17.109 * Background saving terminated with success
-58896:M 11 Jun 2024 23:18:18.101 * 100 changes in 300 seconds. Saving...
-58896:M 11 Jun 2024 23:18:18.103 * Background saving started by pid 88646
-88646:C 11 Jun 2024 23:18:18.115 * DB saved on disk
-88646:C 11 Jun 2024 23:18:18.116 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 23:18:18.205 * Background saving terminated with success
-58896:M 11 Jun 2024 23:23:19.087 * 100 changes in 300 seconds. Saving...
-58896:M 11 Jun 2024 23:23:19.090 * Background saving started by pid 89375
-89375:C 11 Jun 2024 23:23:19.101 * DB saved on disk
-89375:C 11 Jun 2024 23:23:19.103 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 23:23:19.192 * Background saving terminated with success
-58896:M 11 Jun 2024 23:28:20.011 * 100 changes in 300 seconds. Saving...
-58896:M 11 Jun 2024 23:28:20.013 * Background saving started by pid 90105
-90105:C 11 Jun 2024 23:28:20.027 * DB saved on disk
-90105:C 11 Jun 2024 23:28:20.028 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 23:28:20.115 * Background saving terminated with success
-58896:M 11 Jun 2024 23:33:21.067 * 100 changes in 300 seconds. Saving...
-58896:M 11 Jun 2024 23:33:21.069 * Background saving started by pid 90839
-90839:C 11 Jun 2024 23:33:21.079 * DB saved on disk
-90839:C 11 Jun 2024 23:33:21.080 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 23:33:21.170 * Background saving terminated with success
-58896:M 11 Jun 2024 23:38:22.003 * 100 changes in 300 seconds. Saving...
-58896:M 11 Jun 2024 23:38:22.008 * Background saving started by pid 91662
-91662:C 11 Jun 2024 23:38:22.019 * DB saved on disk
-91662:C 11 Jun 2024 23:38:22.019 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 23:38:22.111 * Background saving terminated with success
-58896:M 11 Jun 2024 23:43:23.059 * 100 changes in 300 seconds. Saving...
-58896:M 11 Jun 2024 23:43:23.063 * Background saving started by pid 92434
-92434:C 11 Jun 2024 23:43:23.075 * DB saved on disk
-92434:C 11 Jun 2024 23:43:23.075 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 23:43:23.165 * Background saving terminated with success
-58896:M 11 Jun 2024 23:48:24.037 * 100 changes in 300 seconds. Saving...
-58896:M 11 Jun 2024 23:48:24.038 * Background saving started by pid 93164
-93164:C 11 Jun 2024 23:48:24.049 * DB saved on disk
-93164:C 11 Jun 2024 23:48:24.050 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 23:48:24.142 * Background saving terminated with success
-58896:M 11 Jun 2024 23:53:25.018 * 100 changes in 300 seconds. Saving...
-58896:M 11 Jun 2024 23:53:25.021 * Background saving started by pid 93965
-93965:C 11 Jun 2024 23:53:25.035 * DB saved on disk
-93965:C 11 Jun 2024 23:53:25.037 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 23:53:25.123 * Background saving terminated with success
-58896:M 11 Jun 2024 23:58:26.009 * 100 changes in 300 seconds. Saving...
-58896:M 11 Jun 2024 23:58:26.011 * Background saving started by pid 94758
-94758:C 11 Jun 2024 23:58:26.025 * DB saved on disk
-94758:C 11 Jun 2024 23:58:26.026 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 11 Jun 2024 23:58:26.112 * Background saving terminated with success
-58896:M 12 Jun 2024 00:03:27.039 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 00:03:27.041 * Background saving started by pid 95491
-95491:C 12 Jun 2024 00:03:27.055 * DB saved on disk
-95491:C 12 Jun 2024 00:03:27.057 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 00:03:27.142 * Background saving terminated with success
-58896:M 12 Jun 2024 00:08:28.034 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 00:08:28.035 * Background saving started by pid 96284
-96284:C 12 Jun 2024 00:08:28.051 * DB saved on disk
-96284:C 12 Jun 2024 00:08:28.051 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 00:08:28.136 * Background saving terminated with success
-58896:M 12 Jun 2024 00:13:29.015 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 00:13:29.019 * Background saving started by pid 97696
-97696:C 12 Jun 2024 00:13:29.029 * DB saved on disk
-97696:C 12 Jun 2024 00:13:29.030 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 00:13:29.120 * Background saving terminated with success
-58896:M 12 Jun 2024 00:18:30.052 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 00:18:30.055 * Background saving started by pid 98669
-98669:C 12 Jun 2024 00:18:30.072 * DB saved on disk
-98669:C 12 Jun 2024 00:18:30.073 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 00:18:30.157 * Background saving terminated with success
-58896:M 12 Jun 2024 00:23:31.067 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 00:23:31.069 * Background saving started by pid 99666
-99666:C 12 Jun 2024 00:23:31.085 * DB saved on disk
-99666:C 12 Jun 2024 00:23:31.087 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 00:23:31.169 * Background saving terminated with success
-58896:M 12 Jun 2024 00:28:32.087 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 00:28:32.093 * Background saving started by pid 831
-831:C 12 Jun 2024 00:28:32.110 * DB saved on disk
-831:C 12 Jun 2024 00:28:32.111 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 00:28:32.194 * Background saving terminated with success
-58896:M 12 Jun 2024 00:33:33.077 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 00:33:33.079 * Background saving started by pid 1642
-1642:C 12 Jun 2024 00:33:33.099 * DB saved on disk
-1642:C 12 Jun 2024 00:33:33.100 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 00:33:33.181 * Background saving terminated with success
-58896:M 12 Jun 2024 00:38:34.030 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 00:38:34.032 * Background saving started by pid 2590
-2590:C 12 Jun 2024 00:38:34.045 * DB saved on disk
-2590:C 12 Jun 2024 00:38:34.046 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 00:38:34.134 * Background saving terminated with success
-58896:M 12 Jun 2024 00:43:35.021 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 00:43:35.024 * Background saving started by pid 3742
-3742:C 12 Jun 2024 00:43:35.037 * DB saved on disk
-3742:C 12 Jun 2024 00:43:35.038 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 00:43:35.125 * Background saving terminated with success
-58896:M 12 Jun 2024 00:48:36.015 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 00:48:36.018 * Background saving started by pid 4666
-4666:C 12 Jun 2024 00:48:36.030 * DB saved on disk
-4666:C 12 Jun 2024 00:48:36.031 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 00:48:36.119 * Background saving terminated with success
-58896:M 12 Jun 2024 00:53:37.025 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 00:53:37.027 * Background saving started by pid 5691
-5691:C 12 Jun 2024 00:53:37.042 * DB saved on disk
-5691:C 12 Jun 2024 00:53:37.042 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 00:53:37.129 * Background saving terminated with success
-58896:M 12 Jun 2024 00:58:38.050 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 00:58:38.054 * Background saving started by pid 6620
-6620:C 12 Jun 2024 00:58:38.065 * DB saved on disk
-6620:C 12 Jun 2024 00:58:38.066 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 00:58:38.155 * Background saving terminated with success
-58896:M 12 Jun 2024 01:03:39.073 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 01:03:39.075 * Background saving started by pid 7624
-7624:C 12 Jun 2024 01:03:39.101 * DB saved on disk
-7624:C 12 Jun 2024 01:03:39.114 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 01:03:39.176 * Background saving terminated with success
-58896:M 12 Jun 2024 01:08:40.009 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 01:08:40.012 * Background saving started by pid 8631
-8631:C 12 Jun 2024 01:08:40.025 * DB saved on disk
-8631:C 12 Jun 2024 01:08:40.026 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 01:08:40.113 * Background saving terminated with success
-58896:M 12 Jun 2024 01:20:43.620 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 01:20:43.625 * Background saving started by pid 10740
-10740:C 12 Jun 2024 01:20:43.650 * DB saved on disk
-10740:C 12 Jun 2024 01:20:43.650 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 01:20:43.727 * Background saving terminated with success
-58896:M 12 Jun 2024 01:25:44.086 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 01:25:44.089 * Background saving started by pid 11660
-11660:C 12 Jun 2024 01:25:44.101 * DB saved on disk
-11660:C 12 Jun 2024 01:25:44.102 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 01:25:44.190 * Background saving terminated with success
-58896:M 12 Jun 2024 01:30:45.057 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 01:30:45.059 * Background saving started by pid 12738
-12738:C 12 Jun 2024 01:30:45.066 * DB saved on disk
-12738:C 12 Jun 2024 01:30:45.066 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 01:30:45.159 * Background saving terminated with success
-58896:M 12 Jun 2024 01:35:46.087 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 01:35:46.088 * Background saving started by pid 13711
-13711:C 12 Jun 2024 01:35:46.099 * DB saved on disk
-13711:C 12 Jun 2024 01:35:46.099 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 01:35:46.189 * Background saving terminated with success
-58896:M 12 Jun 2024 01:40:47.061 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 01:40:47.062 * Background saving started by pid 14674
-14674:C 12 Jun 2024 01:40:47.069 * DB saved on disk
-14674:C 12 Jun 2024 01:40:47.069 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 01:40:47.162 * Background saving terminated with success
-58896:M 12 Jun 2024 01:45:48.078 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 01:45:48.082 * Background saving started by pid 18780
-18780:C 12 Jun 2024 01:45:48.103 * DB saved on disk
-18780:C 12 Jun 2024 01:45:48.105 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 01:45:48.184 * Background saving terminated with success
-58896:M 12 Jun 2024 01:50:49.025 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 01:50:49.026 * Background saving started by pid 19686
-19686:C 12 Jun 2024 01:50:49.038 * DB saved on disk
-19686:C 12 Jun 2024 01:50:49.039 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 01:50:49.127 * Background saving terminated with success
-58896:M 12 Jun 2024 01:56:34.702 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 01:56:34.702 * Background saving started by pid 20738
-20738:C 12 Jun 2024 01:56:34.741 * DB saved on disk
-20738:C 12 Jun 2024 01:56:34.744 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 01:56:34.803 * Background saving terminated with success
-58896:M 12 Jun 2024 02:01:35.007 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 02:01:35.008 * Background saving started by pid 21912
-21912:C 12 Jun 2024 02:01:35.017 * DB saved on disk
-21912:C 12 Jun 2024 02:01:35.018 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 02:01:35.108 * Background saving terminated with success
-58896:M 12 Jun 2024 02:06:36.095 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 02:06:36.097 * Background saving started by pid 23306
-23306:C 12 Jun 2024 02:06:36.103 * DB saved on disk
-23306:C 12 Jun 2024 02:06:36.104 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 02:06:36.197 * Background saving terminated with success
-58896:M 12 Jun 2024 02:11:37.037 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 02:11:37.038 * Background saving started by pid 24623
-24623:C 12 Jun 2024 02:11:37.049 * DB saved on disk
-24623:C 12 Jun 2024 02:11:37.049 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 02:11:37.138 * Background saving terminated with success
-58896:M 12 Jun 2024 02:16:38.032 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 02:16:38.035 * Background saving started by pid 28935
-28935:C 12 Jun 2024 02:16:38.056 * DB saved on disk
-28935:C 12 Jun 2024 02:16:38.060 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 02:16:38.136 * Background saving terminated with success
-58896:M 12 Jun 2024 02:21:39.065 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 02:21:39.068 * Background saving started by pid 30316
-30316:C 12 Jun 2024 02:21:39.089 * DB saved on disk
-30316:C 12 Jun 2024 02:21:39.089 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 02:21:39.169 * Background saving terminated with success
-58896:M 12 Jun 2024 02:26:40.088 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 02:26:40.092 * Background saving started by pid 31108
-31108:C 12 Jun 2024 02:26:40.115 * DB saved on disk
-31108:C 12 Jun 2024 02:26:40.116 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 02:26:40.192 * Background saving terminated with success
-58896:M 12 Jun 2024 02:31:41.089 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 02:31:41.090 * Background saving started by pid 32709
-32709:C 12 Jun 2024 02:31:41.115 * DB saved on disk
-32709:C 12 Jun 2024 02:31:41.116 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 02:31:41.191 * Background saving terminated with success
-58896:M 12 Jun 2024 02:34:11.210 * DB saved on disk
-58896:M 12 Jun 2024 02:39:12.090 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 02:39:12.090 * Background saving started by pid 34729
-34729:C 12 Jun 2024 02:39:12.098 * DB saved on disk
-34729:C 12 Jun 2024 02:39:12.099 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 02:39:12.191 * Background saving terminated with success
-58896:M 12 Jun 2024 02:44:13.030 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 02:44:13.031 * Background saving started by pid 36552
-36552:C 12 Jun 2024 02:44:13.039 * DB saved on disk
-36552:C 12 Jun 2024 02:44:13.040 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 02:44:13.132 * Background saving terminated with success
-58896:M 12 Jun 2024 02:49:14.003 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 02:49:14.007 * Background saving started by pid 40406
-40406:C 12 Jun 2024 02:49:14.016 * DB saved on disk
-40406:C 12 Jun 2024 02:49:14.017 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 02:49:14.108 * Background saving terminated with success
-58896:M 12 Jun 2024 02:54:15.052 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 02:54:15.053 * Background saving started by pid 41373
-41373:C 12 Jun 2024 02:54:15.068 * DB saved on disk
-41373:C 12 Jun 2024 02:54:15.069 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 02:54:15.155 * Background saving terminated with success
-58896:M 12 Jun 2024 02:59:16.098 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 02:59:16.100 * Background saving started by pid 42324
-42324:C 12 Jun 2024 02:59:16.108 * DB saved on disk
-42324:C 12 Jun 2024 02:59:16.109 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 02:59:16.201 * Background saving terminated with success
-58896:M 12 Jun 2024 03:04:17.012 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 03:04:17.015 * Background saving started by pid 43225
-43225:C 12 Jun 2024 03:04:17.025 * DB saved on disk
-43225:C 12 Jun 2024 03:04:17.026 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 03:04:17.116 * Background saving terminated with success
-58896:M 12 Jun 2024 03:04:34.446 * DB saved on disk
-58896:M 12 Jun 2024 03:09:35.045 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 03:09:35.048 * Background saving started by pid 44410
-44410:C 12 Jun 2024 03:09:35.055 * DB saved on disk
-44410:C 12 Jun 2024 03:09:35.057 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 03:09:35.148 * Background saving terminated with success
-58896:M 12 Jun 2024 03:14:36.016 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 03:14:36.019 * Background saving started by pid 46540
-46540:C 12 Jun 2024 03:14:36.031 * DB saved on disk
-46540:C 12 Jun 2024 03:14:36.033 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 03:14:36.119 * Background saving terminated with success
-58896:M 12 Jun 2024 03:19:37.037 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 03:19:37.041 * Background saving started by pid 47386
-47386:C 12 Jun 2024 03:19:37.055 * DB saved on disk
-47386:C 12 Jun 2024 03:19:37.058 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 03:19:37.143 * Background saving terminated with success
-58896:M 12 Jun 2024 03:24:38.035 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 03:24:38.042 * Background saving started by pid 48201
-48201:C 12 Jun 2024 03:24:38.063 * DB saved on disk
-48201:C 12 Jun 2024 03:24:38.064 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 03:24:38.147 * Background saving terminated with success
-58896:M 12 Jun 2024 03:29:39.031 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 03:29:39.035 * Background saving started by pid 49082
-49082:C 12 Jun 2024 03:29:39.051 * DB saved on disk
-49082:C 12 Jun 2024 03:29:39.052 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 03:29:39.136 * Background saving terminated with success
-58896:M 12 Jun 2024 03:34:40.043 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 03:34:40.046 * Background saving started by pid 53122
-53122:C 12 Jun 2024 03:34:40.058 * DB saved on disk
-53122:C 12 Jun 2024 03:34:40.059 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 03:34:40.147 * Background saving terminated with success
-58896:M 12 Jun 2024 03:39:41.032 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 03:39:41.035 * Background saving started by pid 54267
-54267:C 12 Jun 2024 03:39:41.056 * DB saved on disk
-54267:C 12 Jun 2024 03:39:41.057 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 03:39:41.137 * Background saving terminated with success
-58896:M 12 Jun 2024 03:44:42.005 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 03:44:42.006 * Background saving started by pid 55054
-55054:C 12 Jun 2024 03:44:42.022 * DB saved on disk
-55054:C 12 Jun 2024 03:44:42.024 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 03:44:42.107 * Background saving terminated with success
-58896:M 12 Jun 2024 03:49:43.092 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 03:49:43.094 * Background saving started by pid 55938
-55938:C 12 Jun 2024 03:49:43.104 * DB saved on disk
-55938:C 12 Jun 2024 03:49:43.105 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 03:49:43.195 * Background saving terminated with success
-58896:M 12 Jun 2024 03:54:44.085 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 03:54:44.087 * Background saving started by pid 56777
-56777:C 12 Jun 2024 03:54:44.105 * DB saved on disk
-56777:C 12 Jun 2024 03:54:44.111 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 03:54:44.189 * Background saving terminated with success
-58896:M 12 Jun 2024 03:59:45.049 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 03:59:45.050 * Background saving started by pid 57643
-57643:C 12 Jun 2024 03:59:45.059 * DB saved on disk
-57643:C 12 Jun 2024 03:59:45.061 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 03:59:45.151 * Background saving terminated with success
-58896:M 12 Jun 2024 04:04:46.023 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 04:04:46.027 * Background saving started by pid 58638
-58638:C 12 Jun 2024 04:04:46.049 * DB saved on disk
-58638:C 12 Jun 2024 04:04:46.049 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 04:04:46.129 * Background saving terminated with success
-58896:M 12 Jun 2024 04:09:47.069 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 04:09:47.071 * Background saving started by pid 59417
-59417:C 12 Jun 2024 04:09:47.089 * DB saved on disk
-59417:C 12 Jun 2024 04:09:47.090 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 04:09:47.173 * Background saving terminated with success
-58896:M 12 Jun 2024 04:14:48.091 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 04:14:48.092 * Background saving started by pid 60190
-60190:C 12 Jun 2024 04:14:48.100 * DB saved on disk
-60190:C 12 Jun 2024 04:14:48.105 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 04:14:48.193 * Background saving terminated with success
-58896:M 12 Jun 2024 04:19:49.050 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 04:19:49.052 * Background saving started by pid 61120
-61120:C 12 Jun 2024 04:19:49.060 * DB saved on disk
-61120:C 12 Jun 2024 04:19:49.060 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 04:19:49.153 * Background saving terminated with success
-58896:M 12 Jun 2024 04:24:50.028 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 04:24:50.032 * Background saving started by pid 61993
-61993:C 12 Jun 2024 04:24:50.051 * DB saved on disk
-61993:C 12 Jun 2024 04:24:50.052 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 04:24:50.134 * Background saving terminated with success
-58896:M 12 Jun 2024 04:29:51.054 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 04:29:51.058 * Background saving started by pid 62790
-62790:C 12 Jun 2024 04:29:51.077 * DB saved on disk
-62790:C 12 Jun 2024 04:29:51.077 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 04:29:51.159 * Background saving terminated with success
-58896:M 12 Jun 2024 04:34:52.067 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 04:34:52.069 * Background saving started by pid 63597
-63597:C 12 Jun 2024 04:34:52.077 * DB saved on disk
-63597:C 12 Jun 2024 04:34:52.078 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 04:34:52.170 * Background saving terminated with success
-58896:M 12 Jun 2024 04:39:53.088 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 04:39:53.089 * Background saving started by pid 64519
-64519:C 12 Jun 2024 04:39:53.098 * DB saved on disk
-64519:C 12 Jun 2024 04:39:53.099 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 04:39:53.190 * Background saving terminated with success
-58896:M 12 Jun 2024 04:44:54.089 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 04:44:54.091 * Background saving started by pid 65472
-65472:C 12 Jun 2024 04:44:54.099 * DB saved on disk
-65472:C 12 Jun 2024 04:44:54.100 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 04:44:54.193 * Background saving terminated with success
-58896:M 12 Jun 2024 04:49:55.090 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 04:49:55.091 * Background saving started by pid 66430
-66430:C 12 Jun 2024 04:49:55.102 * DB saved on disk
-66430:C 12 Jun 2024 04:49:55.104 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 04:49:55.192 * Background saving terminated with success
-58896:M 12 Jun 2024 04:54:56.019 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 04:54:56.022 * Background saving started by pid 67514
-67514:C 12 Jun 2024 04:54:56.034 * DB saved on disk
-67514:C 12 Jun 2024 04:54:56.039 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 04:54:56.124 * Background saving terminated with success
-58896:M 12 Jun 2024 04:59:57.000 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 04:59:57.001 * Background saving started by pid 68698
-68698:C 12 Jun 2024 04:59:57.010 * DB saved on disk
-68698:C 12 Jun 2024 04:59:57.012 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 04:59:57.102 * Background saving terminated with success
-58896:M 12 Jun 2024 05:04:58.100 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 05:04:58.102 * Background saving started by pid 74173
-74173:C 12 Jun 2024 05:04:58.117 * DB saved on disk
-74173:C 12 Jun 2024 05:04:58.117 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 05:04:58.204 * Background saving terminated with success
-58896:M 12 Jun 2024 05:09:59.003 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 05:09:59.005 * Background saving started by pid 77013
-77013:C 12 Jun 2024 05:09:59.012 * DB saved on disk
-77013:C 12 Jun 2024 05:09:59.013 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 05:09:59.106 * Background saving terminated with success
-58896:M 12 Jun 2024 05:15:00.056 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 05:15:00.058 * Background saving started by pid 77983
-77983:C 12 Jun 2024 05:15:00.070 * DB saved on disk
-77983:C 12 Jun 2024 05:15:00.071 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 05:15:00.159 * Background saving terminated with success
-58896:M 12 Jun 2024 05:20:01.096 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 05:20:01.103 * Background saving started by pid 79011
-79011:C 12 Jun 2024 05:20:01.122 * DB saved on disk
-79011:C 12 Jun 2024 05:20:01.132 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 05:20:01.204 * Background saving terminated with success
-58896:M 12 Jun 2024 05:25:02.097 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 05:25:02.099 * Background saving started by pid 80216
-80216:C 12 Jun 2024 05:25:02.113 * DB saved on disk
-80216:C 12 Jun 2024 05:25:02.113 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 05:25:02.200 * Background saving terminated with success
-58896:M 12 Jun 2024 05:30:03.063 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 05:30:03.068 * Background saving started by pid 81376
-81376:C 12 Jun 2024 05:30:03.083 * DB saved on disk
-81376:C 12 Jun 2024 05:30:03.088 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 05:30:03.169 * Background saving terminated with success
-58896:M 12 Jun 2024 05:35:04.075 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 05:35:04.078 * Background saving started by pid 82164
-82164:C 12 Jun 2024 05:35:04.088 * DB saved on disk
-82164:C 12 Jun 2024 05:35:04.088 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 05:35:04.179 * Background saving terminated with success
-58896:M 12 Jun 2024 05:40:05.085 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 05:40:05.087 * Background saving started by pid 83362
-83362:C 12 Jun 2024 05:40:05.113 * DB saved on disk
-83362:C 12 Jun 2024 05:40:05.116 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 05:40:05.189 * Background saving terminated with success
-58896:M 12 Jun 2024 05:45:06.011 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 05:45:06.012 * Background saving started by pid 84525
-84525:C 12 Jun 2024 05:45:06.021 * DB saved on disk
-84525:C 12 Jun 2024 05:45:06.022 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 05:45:06.114 * Background saving terminated with success
-58896:M 12 Jun 2024 05:50:07.081 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 05:50:07.082 * Background saving started by pid 85679
-85679:C 12 Jun 2024 05:50:07.095 * DB saved on disk
-85679:C 12 Jun 2024 05:50:07.096 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 05:50:07.184 * Background saving terminated with success
-58896:M 12 Jun 2024 06:09:17.678 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 06:09:17.799 * Background saving started by pid 86522
-86522:C 12 Jun 2024 06:09:17.807 * DB saved on disk
-86522:C 12 Jun 2024 06:09:17.807 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 06:09:17.913 * Background saving terminated with success
-58896:M 12 Jun 2024 07:23:10.681 * 1 changes in 3600 seconds. Saving...
-58896:M 12 Jun 2024 07:23:10.685 * Background saving started by pid 86688
-86688:C 12 Jun 2024 07:23:10.799 * DB saved on disk
-86688:C 12 Jun 2024 07:23:10.800 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 07:23:10.887 * Background saving terminated with success
-58896:M 12 Jun 2024 08:28:27.701 * 1 changes in 3600 seconds. Saving...
-58896:M 12 Jun 2024 08:28:27.709 * Background saving started by pid 86783
-86783:C 12 Jun 2024 08:28:27.805 * DB saved on disk
-86783:C 12 Jun 2024 08:28:27.806 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 08:28:27.810 * Background saving terminated with success
-58896:M 12 Jun 2024 08:37:48.107 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 08:37:48.109 * Background saving started by pid 86998
-86998:C 12 Jun 2024 08:37:48.124 * DB saved on disk
-86998:C 12 Jun 2024 08:37:48.125 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 08:37:48.210 * Background saving terminated with success
-58896:M 12 Jun 2024 08:45:00.378 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 08:45:00.441 * Background saving started by pid 88055
-88055:C 12 Jun 2024 08:45:00.998 * DB saved on disk
-88055:C 12 Jun 2024 08:45:01.006 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 08:45:01.057 * Background saving terminated with success
-58896:M 12 Jun 2024 09:03:18.658 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 09:03:18.662 * Background saving started by pid 88847
-88847:C 12 Jun 2024 09:03:18.782 * DB saved on disk
-88847:C 12 Jun 2024 09:03:18.783 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 09:03:18.865 * Background saving terminated with success
-58896:M 12 Jun 2024 09:20:51.679 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 09:20:51.682 * Background saving started by pid 89079
-89079:C 12 Jun 2024 09:20:51.805 * DB saved on disk
-89079:C 12 Jun 2024 09:20:51.806 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 09:20:51.885 * Background saving terminated with success
-58896:M 12 Jun 2024 09:36:57.986 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 09:36:57.986 * Background saving started by pid 89273
-89273:C 12 Jun 2024 09:36:57.994 * DB saved on disk
-89273:C 12 Jun 2024 09:36:57.994 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 09:36:58.089 * Background saving terminated with success
-58896:M 12 Jun 2024 09:52:59.001 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 09:52:59.002 * Background saving started by pid 89459
-89459:C 12 Jun 2024 09:52:59.013 * DB saved on disk
-89459:C 12 Jun 2024 09:52:59.014 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 09:52:59.103 * Background saving terminated with success
-58896:M 12 Jun 2024 10:09:08.619 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 10:09:08.624 * Background saving started by pid 89697
-89697:C 12 Jun 2024 10:09:08.738 * DB saved on disk
-89697:C 12 Jun 2024 10:09:08.739 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 10:09:08.831 * Background saving terminated with success
-58896:M 12 Jun 2024 10:27:48.637 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 10:27:48.640 * Background saving started by pid 90312
-90312:C 12 Jun 2024 10:27:48.754 * DB saved on disk
-90312:C 12 Jun 2024 10:27:48.754 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 10:27:48.843 * Background saving terminated with success
-58896:M 12 Jun 2024 10:45:08.649 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 10:45:08.765 * Background saving started by pid 90689
-90689:C 12 Jun 2024 10:45:08.781 * DB saved on disk
-90689:C 12 Jun 2024 10:45:08.781 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 10:45:08.872 * Background saving terminated with success
-58896:M 12 Jun 2024 11:04:19.611 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 11:04:19.731 * Background saving started by pid 91384
-91384:C 12 Jun 2024 11:04:19.738 * DB saved on disk
-91384:C 12 Jun 2024 11:04:19.739 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 11:04:19.837 * Background saving terminated with success
-58896:M 12 Jun 2024 11:13:15.115 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 11:13:15.119 * Background saving started by pid 91575
-91575:C 12 Jun 2024 11:13:15.137 * DB saved on disk
-91575:C 12 Jun 2024 11:13:15.138 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 11:13:15.222 * Background saving terminated with success
-58896:M 12 Jun 2024 12:18:47.632 * 1 changes in 3600 seconds. Saving...
-58896:M 12 Jun 2024 12:18:47.635 * Background saving started by pid 91694
-91694:C 12 Jun 2024 12:18:47.756 * DB saved on disk
-91694:C 12 Jun 2024 12:18:47.757 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 12:18:47.839 * Background saving terminated with success
-58896:M 12 Jun 2024 12:39:26.809 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 12:39:26.811 * Background saving started by pid 92013
-92013:C 12 Jun 2024 12:39:26.824 * DB saved on disk
-92013:C 12 Jun 2024 12:39:26.825 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 12:39:26.912 * Background saving terminated with success
-58896:M 12 Jun 2024 12:44:27.044 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 12:44:27.045 * Background saving started by pid 93150
-93150:C 12 Jun 2024 12:44:27.052 * DB saved on disk
-93150:C 12 Jun 2024 12:44:27.053 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 12:44:27.146 * Background saving terminated with success
-58896:M 12 Jun 2024 12:49:28.034 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 12:49:28.036 * Background saving started by pid 94288
-94288:C 12 Jun 2024 12:49:28.048 * DB saved on disk
-94288:C 12 Jun 2024 12:49:28.048 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 12:49:28.137 * Background saving terminated with success
-58896:M 12 Jun 2024 12:54:29.012 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 12:54:29.015 * Background saving started by pid 95469
-95469:C 12 Jun 2024 12:54:29.028 * DB saved on disk
-95469:C 12 Jun 2024 12:54:29.028 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 12:54:29.116 * Background saving terminated with success
-58896:M 12 Jun 2024 12:59:30.022 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 12:59:30.023 * Background saving started by pid 96610
-96610:C 12 Jun 2024 12:59:30.041 * DB saved on disk
-96610:C 12 Jun 2024 12:59:30.041 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 12:59:30.124 * Background saving terminated with success
-58896:M 12 Jun 2024 13:04:31.035 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 13:04:31.036 * Background saving started by pid 97725
-97725:C 12 Jun 2024 13:04:31.047 * DB saved on disk
-97725:C 12 Jun 2024 13:04:31.049 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 13:04:31.138 * Background saving terminated with success
-58896:M 12 Jun 2024 13:09:32.020 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 13:09:32.022 * Background saving started by pid 98866
-98866:C 12 Jun 2024 13:09:32.038 * DB saved on disk
-98866:C 12 Jun 2024 13:09:32.039 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 13:09:32.123 * Background saving terminated with success
-58896:M 12 Jun 2024 13:14:33.004 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 13:14:33.005 * Background saving started by pid 103
-103:C 12 Jun 2024 13:14:33.013 * DB saved on disk
-103:C 12 Jun 2024 13:14:33.014 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 13:14:33.106 * Background saving terminated with success
-58896:M 12 Jun 2024 13:19:34.047 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 13:19:34.048 * Background saving started by pid 1450
-1450:C 12 Jun 2024 13:19:34.058 * DB saved on disk
-1450:C 12 Jun 2024 13:19:34.058 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 13:19:34.149 * Background saving terminated with success
-58896:M 12 Jun 2024 13:24:35.060 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 13:24:35.063 * Background saving started by pid 2598
-2598:C 12 Jun 2024 13:24:35.072 * DB saved on disk
-2598:C 12 Jun 2024 13:24:35.072 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 13:24:35.165 * Background saving terminated with success
-58896:M 12 Jun 2024 13:29:36.006 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 13:29:36.009 * Background saving started by pid 3725
-3725:C 12 Jun 2024 13:29:36.019 * DB saved on disk
-3725:C 12 Jun 2024 13:29:36.020 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 13:29:36.110 * Background saving terminated with success
-58896:M 12 Jun 2024 13:34:37.016 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 13:34:37.017 * Background saving started by pid 4859
-4859:C 12 Jun 2024 13:34:37.036 * DB saved on disk
-4859:C 12 Jun 2024 13:34:37.037 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 13:34:37.118 * Background saving terminated with success
-58896:M 12 Jun 2024 13:52:39.660 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 13:52:39.663 * Background saving started by pid 5136
-5136:C 12 Jun 2024 13:52:39.783 * DB saved on disk
-5136:C 12 Jun 2024 13:52:39.783 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 13:52:39.867 * Background saving terminated with success
-58896:M 12 Jun 2024 14:18:00.088 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 14:18:00.089 * Background saving started by pid 5336
-5336:C 12 Jun 2024 14:18:00.100 * DB saved on disk
-5336:C 12 Jun 2024 14:18:00.101 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 14:18:00.192 * Background saving terminated with success
-58896:M 12 Jun 2024 14:23:39.578 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 14:23:39.579 * Background saving started by pid 5595
-5595:C 12 Jun 2024 14:23:39.589 * DB saved on disk
-5595:C 12 Jun 2024 14:23:39.590 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 14:23:39.686 * Background saving terminated with success
-58896:M 12 Jun 2024 14:28:40.051 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 14:28:40.063 * Background saving started by pid 6818
-6818:C 12 Jun 2024 14:28:40.079 * DB saved on disk
-6818:C 12 Jun 2024 14:28:40.080 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 14:28:40.165 * Background saving terminated with success
-58896:M 12 Jun 2024 14:33:41.100 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 14:33:41.101 * Background saving started by pid 7974
-7974:C 12 Jun 2024 14:33:41.111 * DB saved on disk
-7974:C 12 Jun 2024 14:33:41.112 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 14:33:41.202 * Background saving terminated with success
-58896:M 12 Jun 2024 14:39:15.833 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 14:39:15.834 * Background saving started by pid 9245
-9245:C 12 Jun 2024 14:39:15.843 * DB saved on disk
-9245:C 12 Jun 2024 14:39:15.843 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 14:39:15.936 * Background saving terminated with success
-58896:M 12 Jun 2024 14:44:16.080 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 14:44:16.083 * Background saving started by pid 10377
-10377:C 12 Jun 2024 14:44:16.090 * DB saved on disk
-10377:C 12 Jun 2024 14:44:16.091 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 14:44:16.184 * Background saving terminated with success
-58896:M 12 Jun 2024 14:49:17.004 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 14:49:17.006 * Background saving started by pid 11532
-11532:C 12 Jun 2024 14:49:17.020 * DB saved on disk
-11532:C 12 Jun 2024 14:49:17.020 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 14:49:17.107 * Background saving terminated with success
-58896:M 12 Jun 2024 14:54:18.054 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 14:54:18.055 * Background saving started by pid 12728
-12728:C 12 Jun 2024 14:54:18.071 * DB saved on disk
-12728:C 12 Jun 2024 14:54:18.072 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 14:54:18.156 * Background saving terminated with success
-58896:M 12 Jun 2024 14:59:19.060 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 14:59:19.063 * Background saving started by pid 13955
-13955:C 12 Jun 2024 14:59:19.095 * DB saved on disk
-13955:C 12 Jun 2024 14:59:19.096 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 14:59:19.164 * Background saving terminated with success
-58896:M 12 Jun 2024 15:04:50.041 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 15:04:50.046 * Background saving started by pid 14209
-14209:C 12 Jun 2024 15:04:50.161 * DB saved on disk
-14209:C 12 Jun 2024 15:04:50.162 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 15:04:50.249 * Background saving terminated with success
-58896:M 12 Jun 2024 15:16:01.278 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 15:16:01.280 * Background saving started by pid 14412
-14412:C 12 Jun 2024 15:16:01.296 * DB saved on disk
-14412:C 12 Jun 2024 15:16:01.297 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 15:16:01.382 * Background saving terminated with success
-58896:M 12 Jun 2024 15:21:42.305 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 15:21:42.308 * Background saving started by pid 15693
-15693:C 12 Jun 2024 15:21:42.425 * DB saved on disk
-15693:C 12 Jun 2024 15:21:42.426 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 15:21:42.512 * Background saving terminated with success
-58896:M 12 Jun 2024 15:26:43.091 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 15:26:43.093 * Background saving started by pid 15892
-15892:C 12 Jun 2024 15:26:43.108 * DB saved on disk
-15892:C 12 Jun 2024 15:26:43.109 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 15:26:43.208 * Background saving terminated with success
-58896:M 12 Jun 2024 15:32:08.470 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 15:32:08.473 * Background saving started by pid 16107
-16107:C 12 Jun 2024 15:32:08.586 * DB saved on disk
-16107:C 12 Jun 2024 15:32:08.587 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 15:32:08.675 * Background saving terminated with success
-58896:M 12 Jun 2024 15:37:51.306 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 15:37:51.311 * Background saving started by pid 16417
-16417:C 12 Jun 2024 15:37:51.431 * DB saved on disk
-16417:C 12 Jun 2024 15:37:51.432 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 15:37:51.515 * Background saving terminated with success
-58896:M 12 Jun 2024 15:43:24.501 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 15:43:24.505 * Background saving started by pid 16735
-16735:C 12 Jun 2024 15:43:24.619 * DB saved on disk
-16735:C 12 Jun 2024 15:43:24.620 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 15:43:24.709 * Background saving terminated with success
-58896:M 12 Jun 2024 15:49:17.280 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 15:49:17.282 * Background saving started by pid 17067
-17067:C 12 Jun 2024 15:49:17.403 * DB saved on disk
-17067:C 12 Jun 2024 15:49:17.403 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 15:49:17.486 * Background saving terminated with success
-58896:M 12 Jun 2024 15:58:31.612 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 15:58:31.612 * Background saving started by pid 17349
-17349:C 12 Jun 2024 15:58:31.624 * DB saved on disk
-17349:C 12 Jun 2024 15:58:31.625 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 15:58:31.713 * Background saving terminated with success
-58896:M 12 Jun 2024 16:07:32.858 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 16:07:32.859 * Background saving started by pid 17568
-17568:C 12 Jun 2024 16:07:32.873 * DB saved on disk
-17568:C 12 Jun 2024 16:07:32.874 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 16:07:32.961 * Background saving terminated with success
-58896:M 12 Jun 2024 16:26:11.532 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 16:26:11.537 * Background saving started by pid 17835
-17835:C 12 Jun 2024 16:26:11.655 * DB saved on disk
-17835:C 12 Jun 2024 16:26:11.655 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 16:26:11.739 * Background saving terminated with success
-58896:M 12 Jun 2024 16:50:57.768 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 16:50:57.771 * Background saving started by pid 18014
-18014:C 12 Jun 2024 16:50:57.787 * DB saved on disk
-18014:C 12 Jun 2024 16:50:57.789 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 16:50:57.873 * Background saving terminated with success
-58896:M 12 Jun 2024 17:50:58.003 * 1 changes in 3600 seconds. Saving...
-58896:M 12 Jun 2024 17:50:58.005 * Background saving started by pid 18164
-18164:C 12 Jun 2024 17:50:58.024 * DB saved on disk
-18164:C 12 Jun 2024 17:50:58.025 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 17:50:58.107 * Background saving terminated with success
-58896:M 12 Jun 2024 18:04:59.651 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 18:04:59.655 * Background saving started by pid 18427
-18427:C 12 Jun 2024 18:04:59.667 * DB saved on disk
-18427:C 12 Jun 2024 18:04:59.667 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 18:04:59.756 * Background saving terminated with success
-58896:M 12 Jun 2024 18:10:00.077 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 18:10:00.079 * Background saving started by pid 19644
-19644:C 12 Jun 2024 18:10:00.091 * DB saved on disk
-19644:C 12 Jun 2024 18:10:00.092 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 18:10:00.180 * Background saving terminated with success
-58896:M 12 Jun 2024 18:15:01.093 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 18:15:01.095 * Background saving started by pid 20794
-20794:C 12 Jun 2024 18:15:01.115 * DB saved on disk
-20794:C 12 Jun 2024 18:15:01.116 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 18:15:01.197 * Background saving terminated with success
-58896:M 12 Jun 2024 18:20:02.078 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 18:20:02.079 * Background saving started by pid 21958
-21958:C 12 Jun 2024 18:20:02.088 * DB saved on disk
-21958:C 12 Jun 2024 18:20:02.089 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 18:20:02.181 * Background saving terminated with success
-58896:M 12 Jun 2024 18:25:03.029 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 18:25:03.030 * Background saving started by pid 23368
-23368:C 12 Jun 2024 18:25:03.037 * DB saved on disk
-23368:C 12 Jun 2024 18:25:03.039 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 18:25:03.131 * Background saving terminated with success
-58896:M 12 Jun 2024 18:30:04.044 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 18:30:04.046 * Background saving started by pid 24145
-24145:C 12 Jun 2024 18:30:04.054 * DB saved on disk
-24145:C 12 Jun 2024 18:30:04.054 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 18:30:04.147 * Background saving terminated with success
-58896:M 12 Jun 2024 18:35:05.081 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 18:35:05.085 * Background saving started by pid 24948
-24948:C 12 Jun 2024 18:35:05.096 * DB saved on disk
-24948:C 12 Jun 2024 18:35:05.097 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 18:35:05.188 * Background saving terminated with success
-58896:M 12 Jun 2024 18:40:06.059 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 18:40:06.060 * Background saving started by pid 26487
-26487:C 12 Jun 2024 18:40:06.072 * DB saved on disk
-26487:C 12 Jun 2024 18:40:06.072 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 18:40:06.160 * Background saving terminated with success
-58896:M 12 Jun 2024 18:45:07.016 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 18:45:07.019 * Background saving started by pid 27478
-27478:C 12 Jun 2024 18:45:07.030 * DB saved on disk
-27478:C 12 Jun 2024 18:45:07.033 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 18:45:07.120 * Background saving terminated with success
-58896:M 12 Jun 2024 18:50:08.096 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 18:50:08.100 * Background saving started by pid 28255
-28255:C 12 Jun 2024 18:50:08.118 * DB saved on disk
-28255:C 12 Jun 2024 18:50:08.120 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 18:50:08.202 * Background saving terminated with success
-58896:M 12 Jun 2024 18:55:09.066 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 18:55:09.067 * Background saving started by pid 29026
-29026:C 12 Jun 2024 18:55:09.077 * DB saved on disk
-29026:C 12 Jun 2024 18:55:09.078 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 18:55:09.169 * Background saving terminated with success
-58896:M 12 Jun 2024 19:00:10.054 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 19:00:10.060 * Background saving started by pid 29846
-29846:C 12 Jun 2024 19:00:10.077 * DB saved on disk
-29846:C 12 Jun 2024 19:00:10.078 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 19:00:10.162 * Background saving terminated with success
-58896:M 12 Jun 2024 19:05:11.091 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 19:05:11.093 * Background saving started by pid 30656
-30656:C 12 Jun 2024 19:05:11.106 * DB saved on disk
-30656:C 12 Jun 2024 19:05:11.107 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 19:05:11.195 * Background saving terminated with success
-58896:M 12 Jun 2024 19:10:12.069 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 19:10:12.070 * Background saving started by pid 31436
-31436:C 12 Jun 2024 19:10:12.082 * DB saved on disk
-31436:C 12 Jun 2024 19:10:12.083 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 19:10:12.172 * Background saving terminated with success
-58896:M 12 Jun 2024 19:15:13.031 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 19:15:13.032 * Background saving started by pid 32456
-32456:C 12 Jun 2024 19:15:13.045 * DB saved on disk
-32456:C 12 Jun 2024 19:15:13.046 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 19:15:13.133 * Background saving terminated with success
-58896:M 12 Jun 2024 19:20:14.035 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 19:20:14.036 * Background saving started by pid 33746
-33746:C 12 Jun 2024 19:20:14.045 * DB saved on disk
-33746:C 12 Jun 2024 19:20:14.046 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 19:20:14.137 * Background saving terminated with success
-58896:M 12 Jun 2024 19:25:15.038 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 19:25:15.041 * Background saving started by pid 34925
-34925:C 12 Jun 2024 19:25:15.057 * DB saved on disk
-34925:C 12 Jun 2024 19:25:15.057 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 19:25:15.142 * Background saving terminated with success
-58896:M 12 Jun 2024 19:30:16.003 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 19:30:16.008 * Background saving started by pid 36180
-36180:C 12 Jun 2024 19:30:16.026 * DB saved on disk
-36180:C 12 Jun 2024 19:30:16.026 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 19:30:16.110 * Background saving terminated with success
-58896:M 12 Jun 2024 19:30:50.487 * DB saved on disk
-58896:M 12 Jun 2024 19:35:51.066 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 19:35:51.070 * Background saving started by pid 37695
-37695:C 12 Jun 2024 19:35:51.097 * DB saved on disk
-37695:C 12 Jun 2024 19:35:51.098 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 19:35:51.171 * Background saving terminated with success
-58896:M 12 Jun 2024 19:40:52.077 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 19:40:52.078 * Background saving started by pid 38605
-38605:C 12 Jun 2024 19:40:52.091 * DB saved on disk
-38605:C 12 Jun 2024 19:40:52.092 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 19:40:52.180 * Background saving terminated with success
-58896:M 12 Jun 2024 19:45:53.092 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 19:45:53.093 * Background saving started by pid 39738
-39738:C 12 Jun 2024 19:45:53.101 * DB saved on disk
-39738:C 12 Jun 2024 19:45:53.101 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 19:45:53.194 * Background saving terminated with success
-58896:M 12 Jun 2024 19:50:54.028 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 19:50:54.031 * Background saving started by pid 40670
-40670:C 12 Jun 2024 19:50:54.042 * DB saved on disk
-40670:C 12 Jun 2024 19:50:54.043 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 19:50:54.133 * Background saving terminated with success
-58896:M 12 Jun 2024 19:55:55.096 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 19:55:55.099 * Background saving started by pid 43457
-43457:C 12 Jun 2024 19:55:55.107 * DB saved on disk
-43457:C 12 Jun 2024 19:55:55.107 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 19:55:55.199 * Background saving terminated with success
-58896:M 12 Jun 2024 20:00:56.029 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 20:00:56.031 * Background saving started by pid 44453
-44453:C 12 Jun 2024 20:00:56.037 * DB saved on disk
-44453:C 12 Jun 2024 20:00:56.037 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 20:00:56.133 * Background saving terminated with success
-58896:M 12 Jun 2024 20:05:57.013 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 20:05:57.016 * Background saving started by pid 45420
-45420:C 12 Jun 2024 20:05:57.025 * DB saved on disk
-45420:C 12 Jun 2024 20:05:57.026 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 20:05:57.117 * Background saving terminated with success
-58896:M 12 Jun 2024 20:10:58.035 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 20:10:58.037 * Background saving started by pid 46445
-46445:C 12 Jun 2024 20:10:58.045 * DB saved on disk
-46445:C 12 Jun 2024 20:10:58.045 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 20:10:58.138 * Background saving terminated with success
-58896:M 12 Jun 2024 20:15:59.030 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 20:15:59.032 * Background saving started by pid 47438
-47438:C 12 Jun 2024 20:15:59.041 * DB saved on disk
-47438:C 12 Jun 2024 20:15:59.042 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 20:15:59.133 * Background saving terminated with success
-58896:M 12 Jun 2024 20:21:00.034 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 20:21:00.036 * Background saving started by pid 48364
-48364:C 12 Jun 2024 20:21:00.043 * DB saved on disk
-48364:C 12 Jun 2024 20:21:00.046 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 20:21:00.137 * Background saving terminated with success
-58896:M 12 Jun 2024 20:26:01.085 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 20:26:01.086 * Background saving started by pid 49394
-49394:C 12 Jun 2024 20:26:01.094 * DB saved on disk
-49394:C 12 Jun 2024 20:26:01.095 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 20:26:01.188 * Background saving terminated with success
-58896:M 12 Jun 2024 20:31:02.083 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 20:31:02.088 * Background saving started by pid 50479
-50479:C 12 Jun 2024 20:31:02.109 * DB saved on disk
-50479:C 12 Jun 2024 20:31:02.110 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 20:31:02.190 * Background saving terminated with success
-58896:M 12 Jun 2024 20:36:03.042 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 20:36:03.043 * Background saving started by pid 51442
-51442:C 12 Jun 2024 20:36:03.065 * DB saved on disk
-51442:C 12 Jun 2024 20:36:03.066 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 20:36:03.144 * Background saving terminated with success
-58896:M 12 Jun 2024 20:41:04.076 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 20:41:04.078 * Background saving started by pid 52399
-52399:C 12 Jun 2024 20:41:04.090 * DB saved on disk
-52399:C 12 Jun 2024 20:41:04.093 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 20:41:04.179 * Background saving terminated with success
-58896:M 12 Jun 2024 20:46:05.056 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 20:46:05.058 * Background saving started by pid 53397
-53397:C 12 Jun 2024 20:46:05.067 * DB saved on disk
-53397:C 12 Jun 2024 20:46:05.067 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 20:46:05.159 * Background saving terminated with success
-58896:M 12 Jun 2024 20:51:06.070 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 20:51:06.071 * Background saving started by pid 54382
-54382:C 12 Jun 2024 20:51:06.078 * DB saved on disk
-54382:C 12 Jun 2024 20:51:06.081 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 20:51:06.172 * Background saving terminated with success
-58896:M 12 Jun 2024 20:56:07.099 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 20:56:07.102 * Background saving started by pid 57248
-57248:C 12 Jun 2024 20:56:07.111 * DB saved on disk
-57248:C 12 Jun 2024 20:56:07.111 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 20:56:07.204 * Background saving terminated with success
-58896:M 12 Jun 2024 21:01:08.063 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 21:01:08.066 * Background saving started by pid 58219
-58219:C 12 Jun 2024 21:01:08.075 * DB saved on disk
-58219:C 12 Jun 2024 21:01:08.076 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 21:01:08.168 * Background saving terminated with success
-58896:M 12 Jun 2024 21:06:09.082 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 21:06:09.082 * Background saving started by pid 58998
-58998:C 12 Jun 2024 21:06:09.092 * DB saved on disk
-58998:C 12 Jun 2024 21:06:09.093 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 21:06:09.184 * Background saving terminated with success
-58896:M 12 Jun 2024 22:22:09.136 * 1 changes in 3600 seconds. Saving...
-58896:M 12 Jun 2024 22:22:09.196 * Background saving started by pid 59686
-59686:C 12 Jun 2024 22:22:09.431 * DB saved on disk
-59686:C 12 Jun 2024 22:22:09.536 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 22:22:09.634 * Background saving terminated with success
-58896:M 12 Jun 2024 22:27:10.079 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 22:27:10.083 * Background saving started by pid 60609
-60609:C 12 Jun 2024 22:27:10.097 * DB saved on disk
-60609:C 12 Jun 2024 22:27:10.099 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 22:27:10.184 * Background saving terminated with success
-58896:M 12 Jun 2024 22:32:11.032 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 22:32:11.036 * Background saving started by pid 61470
-61470:C 12 Jun 2024 22:32:11.054 * DB saved on disk
-61470:C 12 Jun 2024 22:32:11.055 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 22:32:11.138 * Background saving terminated with success
-58896:M 12 Jun 2024 22:37:12.056 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 22:37:12.057 * Background saving started by pid 62229
-62229:C 12 Jun 2024 22:37:12.071 * DB saved on disk
-62229:C 12 Jun 2024 22:37:12.071 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 22:37:12.159 * Background saving terminated with success
-58896:M 12 Jun 2024 22:42:13.023 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 22:42:13.025 * Background saving started by pid 63085
-63085:C 12 Jun 2024 22:42:13.036 * DB saved on disk
-63085:C 12 Jun 2024 22:42:13.036 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 22:42:13.126 * Background saving terminated with success
-58896:M 12 Jun 2024 22:47:14.042 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 22:47:14.043 * Background saving started by pid 63867
-63867:C 12 Jun 2024 22:47:14.052 * DB saved on disk
-63867:C 12 Jun 2024 22:47:14.053 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 22:47:14.145 * Background saving terminated with success
-58896:M 12 Jun 2024 22:52:15.071 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 22:52:15.075 * Background saving started by pid 64674
-64674:C 12 Jun 2024 22:52:15.106 * DB saved on disk
-64674:C 12 Jun 2024 22:52:15.106 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 22:52:15.177 * Background saving terminated with success
-58896:M 12 Jun 2024 22:57:16.052 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 22:57:16.053 * Background saving started by pid 65510
-65510:C 12 Jun 2024 22:57:16.071 * DB saved on disk
-65510:C 12 Jun 2024 22:57:16.076 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 22:57:16.155 * Background saving terminated with success
-58896:M 12 Jun 2024 23:02:17.039 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 23:02:17.043 * Background saving started by pid 66374
-66374:C 12 Jun 2024 23:02:17.059 * DB saved on disk
-66374:C 12 Jun 2024 23:02:17.061 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 23:02:17.145 * Background saving terminated with success
-58896:M 12 Jun 2024 23:07:18.063 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 23:07:18.065 * Background saving started by pid 69899
-69899:C 12 Jun 2024 23:07:18.097 * DB saved on disk
-69899:C 12 Jun 2024 23:07:18.097 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 23:07:18.166 * Background saving terminated with success
-58896:M 12 Jun 2024 23:12:19.010 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 23:12:19.013 * Background saving started by pid 72751
-72751:C 12 Jun 2024 23:12:19.025 * DB saved on disk
-72751:C 12 Jun 2024 23:12:19.026 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 23:12:19.114 * Background saving terminated with success
-58896:M 12 Jun 2024 23:17:20.093 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 23:17:20.097 * Background saving started by pid 73511
-73511:C 12 Jun 2024 23:17:20.117 * DB saved on disk
-73511:C 12 Jun 2024 23:17:20.118 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 23:17:20.199 * Background saving terminated with success
-58896:M 12 Jun 2024 23:22:21.096 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 23:22:21.098 * Background saving started by pid 74274
-74274:C 12 Jun 2024 23:22:21.108 * DB saved on disk
-74274:C 12 Jun 2024 23:22:21.115 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 23:22:21.199 * Background saving terminated with success
-58896:M 12 Jun 2024 23:27:22.041 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 23:27:22.044 * Background saving started by pid 75086
-75086:C 12 Jun 2024 23:27:22.055 * DB saved on disk
-75086:C 12 Jun 2024 23:27:22.055 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 23:27:22.145 * Background saving terminated with success
-58896:M 12 Jun 2024 23:32:23.057 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 23:32:23.059 * Background saving started by pid 75910
-75910:C 12 Jun 2024 23:32:23.070 * DB saved on disk
-75910:C 12 Jun 2024 23:32:23.075 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 23:32:23.161 * Background saving terminated with success
-58896:M 12 Jun 2024 23:37:24.000 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 23:37:24.002 * Background saving started by pid 76753
-76753:C 12 Jun 2024 23:37:24.021 * DB saved on disk
-76753:C 12 Jun 2024 23:37:24.026 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 23:37:24.102 * Background saving terminated with success
-58896:M 12 Jun 2024 23:42:25.039 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 23:42:25.041 * Background saving started by pid 77566
-77566:C 12 Jun 2024 23:42:25.051 * DB saved on disk
-77566:C 12 Jun 2024 23:42:25.053 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 23:42:25.142 * Background saving terminated with success
-58896:M 12 Jun 2024 23:47:26.048 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 23:47:26.051 * Background saving started by pid 78392
-78392:C 12 Jun 2024 23:47:26.058 * DB saved on disk
-78392:C 12 Jun 2024 23:47:26.059 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 23:47:26.152 * Background saving terminated with success
-58896:M 12 Jun 2024 23:52:27.035 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 23:52:27.036 * Background saving started by pid 79236
-79236:C 12 Jun 2024 23:52:27.047 * DB saved on disk
-79236:C 12 Jun 2024 23:52:27.051 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 23:52:27.137 * Background saving terminated with success
-58896:M 12 Jun 2024 23:57:28.011 * 100 changes in 300 seconds. Saving...
-58896:M 12 Jun 2024 23:57:28.017 * Background saving started by pid 80013
-80013:C 12 Jun 2024 23:57:28.026 * DB saved on disk
-80013:C 12 Jun 2024 23:57:28.027 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 12 Jun 2024 23:57:28.118 * Background saving terminated with success
-58896:M 13 Jun 2024 00:02:29.041 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 00:02:29.043 * Background saving started by pid 80854
-80854:C 13 Jun 2024 00:02:29.061 * DB saved on disk
-80854:C 13 Jun 2024 00:02:29.061 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 00:02:29.145 * Background saving terminated with success
-58896:M 13 Jun 2024 00:07:30.083 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 00:07:30.088 * Background saving started by pid 81692
-81692:C 13 Jun 2024 00:07:30.100 * DB saved on disk
-81692:C 13 Jun 2024 00:07:30.100 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 00:07:30.190 * Background saving terminated with success
-58896:M 13 Jun 2024 00:12:31.037 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 00:12:31.038 * Background saving started by pid 82630
-82630:C 13 Jun 2024 00:12:31.048 * DB saved on disk
-82630:C 13 Jun 2024 00:12:31.049 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 00:12:31.139 * Background saving terminated with success
-58896:M 13 Jun 2024 00:18:24.490 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 00:18:24.492 * Background saving started by pid 83628
-83628:C 13 Jun 2024 00:18:24.500 * DB saved on disk
-83628:C 13 Jun 2024 00:18:24.501 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 00:18:24.594 * Background saving terminated with success
-58896:M 13 Jun 2024 00:23:25.099 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 00:23:25.101 * Background saving started by pid 84426
-84426:C 13 Jun 2024 00:23:25.116 * DB saved on disk
-84426:C 13 Jun 2024 00:23:25.119 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 00:23:25.203 * Background saving terminated with success
-58896:M 13 Jun 2024 00:28:26.000 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 00:28:26.003 * Background saving started by pid 85490
-85490:C 13 Jun 2024 00:28:26.011 * DB saved on disk
-85490:C 13 Jun 2024 00:28:26.012 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 00:28:26.104 * Background saving terminated with success
-58896:M 13 Jun 2024 00:39:55.401 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 00:39:55.402 * Background saving started by pid 87583
-87583:C 13 Jun 2024 00:39:55.410 * DB saved on disk
-87583:C 13 Jun 2024 00:39:55.410 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 00:39:55.504 * Background saving terminated with success
-58896:M 13 Jun 2024 00:44:56.013 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 00:44:56.017 * Background saving started by pid 88517
-88517:C 13 Jun 2024 00:44:56.027 * DB saved on disk
-88517:C 13 Jun 2024 00:44:56.028 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 00:44:56.118 * Background saving terminated with success
-58896:M 13 Jun 2024 00:49:57.055 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 00:49:57.058 * Background saving started by pid 89534
-89534:C 13 Jun 2024 00:49:57.066 * DB saved on disk
-89534:C 13 Jun 2024 00:49:57.067 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 00:49:57.159 * Background saving terminated with success
-58896:M 13 Jun 2024 00:54:58.054 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 00:54:58.056 * Background saving started by pid 92884
-92884:C 13 Jun 2024 00:54:58.076 * DB saved on disk
-92884:C 13 Jun 2024 00:54:58.079 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 00:54:58.156 * Background saving terminated with success
-58896:M 13 Jun 2024 00:59:59.094 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 00:59:59.096 * Background saving started by pid 97454
-97454:C 13 Jun 2024 00:59:59.105 * DB saved on disk
-97454:C 13 Jun 2024 00:59:59.106 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 00:59:59.197 * Background saving terminated with success
-58896:M 13 Jun 2024 01:05:00.065 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 01:05:00.067 * Background saving started by pid 208
-208:C 13 Jun 2024 01:05:00.075 * DB saved on disk
-208:C 13 Jun 2024 01:05:00.076 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 01:05:00.168 * Background saving terminated with success
-58896:M 13 Jun 2024 01:10:01.004 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 01:10:01.007 * Background saving started by pid 1501
-1501:C 13 Jun 2024 01:10:01.016 * DB saved on disk
-1501:C 13 Jun 2024 01:10:01.017 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 01:10:01.108 * Background saving terminated with success
-58896:M 13 Jun 2024 01:15:02.069 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 01:15:02.073 * Background saving started by pid 8174
-8174:C 13 Jun 2024 01:15:02.087 * DB saved on disk
-8174:C 13 Jun 2024 01:15:02.088 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 01:15:02.176 * Background saving terminated with success
-58896:M 13 Jun 2024 01:20:03.007 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 01:20:03.009 * Background saving started by pid 12151
-12151:C 13 Jun 2024 01:20:03.021 * DB saved on disk
-12151:C 13 Jun 2024 01:20:03.022 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 01:20:03.111 * Background saving terminated with success
-58896:M 13 Jun 2024 01:25:04.011 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 01:25:04.014 * Background saving started by pid 14019
-14019:C 13 Jun 2024 01:25:04.027 * DB saved on disk
-14019:C 13 Jun 2024 01:25:04.028 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 01:25:04.115 * Background saving terminated with success
-58896:M 13 Jun 2024 01:30:05.029 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 01:30:05.032 * Background saving started by pid 15236
-15236:C 13 Jun 2024 01:30:05.048 * DB saved on disk
-15236:C 13 Jun 2024 01:30:05.050 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 01:30:05.134 * Background saving terminated with success
-58896:M 13 Jun 2024 01:46:26.863 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 01:46:26.987 * Background saving started by pid 16271
-16271:C 13 Jun 2024 01:46:26.997 * DB saved on disk
-16271:C 13 Jun 2024 01:46:26.997 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 01:46:27.096 * Background saving terminated with success
-58896:M 13 Jun 2024 02:53:37.566 * 1 changes in 3600 seconds. Saving...
-58896:M 13 Jun 2024 02:53:37.687 * Background saving started by pid 16361
-16361:C 13 Jun 2024 02:53:37.704 * DB saved on disk
-16361:C 13 Jun 2024 02:53:37.704 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 02:53:37.795 * Background saving terminated with success
-58896:M 13 Jun 2024 04:00:22.582 * 1 changes in 3600 seconds. Saving...
-58896:M 13 Jun 2024 04:00:22.694 * Background saving started by pid 16480
-16480:C 13 Jun 2024 04:00:22.703 * DB saved on disk
-16480:C 13 Jun 2024 04:00:22.704 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 04:00:22.807 * Background saving terminated with success
-58896:M 13 Jun 2024 05:06:58.599 * 1 changes in 3600 seconds. Saving...
-58896:M 13 Jun 2024 05:06:58.605 * Background saving started by pid 16586
-16586:C 13 Jun 2024 05:06:58.718 * DB saved on disk
-16586:C 13 Jun 2024 05:06:58.718 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 05:06:58.809 * Background saving terminated with success
-58896:M 13 Jun 2024 06:11:18.604 * 1 changes in 3600 seconds. Saving...
-58896:M 13 Jun 2024 06:11:18.613 * Background saving started by pid 16651
-16651:C 13 Jun 2024 06:11:18.714 * DB saved on disk
-16651:C 13 Jun 2024 06:11:18.746 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 06:11:18.815 * Background saving terminated with success
-58896:M 13 Jun 2024 07:14:47.525 * 1 changes in 3600 seconds. Saving...
-58896:M 13 Jun 2024 07:14:47.528 * Background saving started by pid 17811
-17811:C 13 Jun 2024 07:14:47.679 * DB saved on disk
-17811:C 13 Jun 2024 07:14:47.680 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 07:14:47.730 * Background saving terminated with success
-58896:M 13 Jun 2024 08:23:04.569 * 1 changes in 3600 seconds. Saving...
-58896:M 13 Jun 2024 08:23:04.574 * Background saving started by pid 17919
-17919:C 13 Jun 2024 08:23:04.680 * DB saved on disk
-17919:C 13 Jun 2024 08:23:04.680 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 08:23:04.775 * Background saving terminated with success
-58896:M 13 Jun 2024 09:29:23.578 * 1 changes in 3600 seconds. Saving...
-58896:M 13 Jun 2024 09:29:23.582 * Background saving started by pid 18003
-18003:C 13 Jun 2024 09:29:23.701 * DB saved on disk
-18003:C 13 Jun 2024 09:29:23.701 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 09:29:23.787 * Background saving terminated with success
-58896:M 13 Jun 2024 09:50:20.002 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 09:50:20.002 * Background saving started by pid 18192
-18192:C 13 Jun 2024 09:50:20.013 * DB saved on disk
-18192:C 13 Jun 2024 09:50:20.016 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 09:50:20.103 * Background saving terminated with success
-58896:M 13 Jun 2024 10:26:10.581 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 10:26:10.583 * Background saving started by pid 18369
-18369:C 13 Jun 2024 10:26:10.602 * DB saved on disk
-18369:C 13 Jun 2024 10:26:10.602 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 10:26:10.684 * Background saving terminated with success
-58896:M 13 Jun 2024 10:55:26.571 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 10:55:26.574 * Background saving started by pid 18571
-18571:C 13 Jun 2024 10:55:26.591 * DB saved on disk
-18571:C 13 Jun 2024 10:55:26.591 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 10:55:26.676 * Background saving terminated with success
-58896:M 13 Jun 2024 11:29:14.641 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 11:29:14.644 * Background saving started by pid 18818
-18818:C 13 Jun 2024 11:29:14.664 * DB saved on disk
-18818:C 13 Jun 2024 11:29:14.665 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 11:29:14.746 * Background saving terminated with success
-58896:M 13 Jun 2024 11:34:15.063 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 11:34:15.065 * Background saving started by pid 20038
-20038:C 13 Jun 2024 11:34:15.082 * DB saved on disk
-20038:C 13 Jun 2024 11:34:15.084 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 11:34:15.167 * Background saving terminated with success
-58896:M 13 Jun 2024 11:39:16.050 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 11:39:16.052 * Background saving started by pid 21202
-21202:C 13 Jun 2024 11:39:16.063 * DB saved on disk
-21202:C 13 Jun 2024 11:39:16.064 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 11:39:16.153 * Background saving terminated with success
-58896:M 13 Jun 2024 11:44:17.031 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 11:44:17.033 * Background saving started by pid 22048
-22048:C 13 Jun 2024 11:44:17.047 * DB saved on disk
-22048:C 13 Jun 2024 11:44:17.049 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 11:44:17.135 * Background saving terminated with success
-58896:M 13 Jun 2024 11:49:18.051 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 11:49:18.056 * Background saving started by pid 22821
-22821:C 13 Jun 2024 11:49:18.073 * DB saved on disk
-22821:C 13 Jun 2024 11:49:18.077 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 11:49:18.159 * Background saving terminated with success
-58896:M 13 Jun 2024 11:54:19.040 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 11:54:19.043 * Background saving started by pid 23590
-23590:C 13 Jun 2024 11:54:19.061 * DB saved on disk
-23590:C 13 Jun 2024 11:54:19.062 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 11:54:19.145 * Background saving terminated with success
-58896:M 13 Jun 2024 11:59:20.034 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 11:59:20.038 * Background saving started by pid 24369
-24369:C 13 Jun 2024 11:59:20.055 * DB saved on disk
-24369:C 13 Jun 2024 11:59:20.056 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 11:59:20.141 * Background saving terminated with success
-58896:M 13 Jun 2024 12:04:21.013 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 12:04:21.015 * Background saving started by pid 25145
-25145:C 13 Jun 2024 12:04:21.033 * DB saved on disk
-25145:C 13 Jun 2024 12:04:21.034 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 12:04:21.117 * Background saving terminated with success
-58896:M 13 Jun 2024 12:09:22.032 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 12:09:22.035 * Background saving started by pid 25919
-25919:C 13 Jun 2024 12:09:22.049 * DB saved on disk
-25919:C 13 Jun 2024 12:09:22.052 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 12:09:22.138 * Background saving terminated with success
-58896:M 13 Jun 2024 12:14:23.054 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 12:14:23.058 * Background saving started by pid 26708
-26708:C 13 Jun 2024 12:14:23.070 * DB saved on disk
-26708:C 13 Jun 2024 12:14:23.089 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 12:14:23.160 * Background saving terminated with success
-58896:M 13 Jun 2024 12:22:08.623 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 12:22:08.625 * Background saving started by pid 26869
-26869:C 13 Jun 2024 12:22:08.638 * DB saved on disk
-26869:C 13 Jun 2024 12:22:08.640 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 12:22:08.727 * Background saving terminated with success
-58896:M 13 Jun 2024 13:22:56.523 * 1 changes in 3600 seconds. Saving...
-58896:M 13 Jun 2024 13:22:56.527 * Background saving started by pid 26934
-26934:C 13 Jun 2024 13:22:56.637 * DB saved on disk
-26934:C 13 Jun 2024 13:22:56.638 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 13:22:56.730 * Background saving terminated with success
-58896:M 13 Jun 2024 13:54:40.777 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 13:54:40.779 * Background saving started by pid 27164
-27164:C 13 Jun 2024 13:54:40.788 * DB saved on disk
-27164:C 13 Jun 2024 13:54:40.789 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 13:54:40.881 * Background saving terminated with success
-58896:M 13 Jun 2024 13:59:41.086 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 13:59:41.091 * Background saving started by pid 28401
-28401:C 13 Jun 2024 13:59:41.125 * DB saved on disk
-28401:C 13 Jun 2024 13:59:41.125 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 13:59:41.193 * Background saving terminated with success
-58896:M 13 Jun 2024 14:04:42.086 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 14:04:42.089 * Background saving started by pid 29364
-29364:C 13 Jun 2024 14:04:42.106 * DB saved on disk
-29364:C 13 Jun 2024 14:04:42.108 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 14:04:42.191 * Background saving terminated with success
-58896:M 13 Jun 2024 14:09:43.039 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 14:09:43.040 * Background saving started by pid 30343
-30343:C 13 Jun 2024 14:09:43.050 * DB saved on disk
-30343:C 13 Jun 2024 14:09:43.051 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 14:09:43.141 * Background saving terminated with success
-58896:M 13 Jun 2024 14:14:44.095 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 14:14:44.098 * Background saving started by pid 31113
-31113:C 13 Jun 2024 14:14:44.106 * DB saved on disk
-31113:C 13 Jun 2024 14:14:44.107 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 14:14:44.199 * Background saving terminated with success
-58896:M 13 Jun 2024 14:19:45.073 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 14:19:45.077 * Background saving started by pid 31876
-31876:C 13 Jun 2024 14:19:45.105 * DB saved on disk
-31876:C 13 Jun 2024 14:19:45.106 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 14:19:45.181 * Background saving terminated with success
-58896:M 13 Jun 2024 14:24:46.099 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 14:24:46.103 * Background saving started by pid 32589
-32589:C 13 Jun 2024 14:24:46.117 * DB saved on disk
-32589:C 13 Jun 2024 14:24:46.119 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 14:24:46.204 * Background saving terminated with success
-58896:M 13 Jun 2024 14:29:47.029 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 14:29:47.035 * Background saving started by pid 33317
-33317:C 13 Jun 2024 14:29:47.050 * DB saved on disk
-33317:C 13 Jun 2024 14:29:47.051 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 14:29:47.137 * Background saving terminated with success
-58896:M 13 Jun 2024 14:34:48.042 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 14:34:48.044 * Background saving started by pid 34025
-34025:C 13 Jun 2024 14:34:48.057 * DB saved on disk
-34025:C 13 Jun 2024 14:34:48.060 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 14:34:48.147 * Background saving terminated with success
-58896:M 13 Jun 2024 14:39:49.001 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 14:39:49.002 * Background saving started by pid 34740
-34740:C 13 Jun 2024 14:39:49.011 * DB saved on disk
-34740:C 13 Jun 2024 14:39:49.011 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 14:39:49.104 * Background saving terminated with success
-58896:M 13 Jun 2024 14:44:50.083 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 14:44:50.086 * Background saving started by pid 35603
-35603:C 13 Jun 2024 14:44:50.098 * DB saved on disk
-35603:C 13 Jun 2024 14:44:50.101 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 14:44:50.188 * Background saving terminated with success
-58896:M 13 Jun 2024 14:49:51.042 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 14:49:51.045 * Background saving started by pid 36239
-36239:C 13 Jun 2024 14:49:51.057 * DB saved on disk
-36239:C 13 Jun 2024 14:49:51.059 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 14:49:51.146 * Background saving terminated with success
-58896:M 13 Jun 2024 14:54:52.029 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 14:54:52.032 * Background saving started by pid 36857
-36857:C 13 Jun 2024 14:54:52.066 * DB saved on disk
-36857:C 13 Jun 2024 14:54:52.067 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 14:54:52.134 * Background saving terminated with success
-58896:M 13 Jun 2024 14:59:53.033 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 14:59:53.036 * Background saving started by pid 37538
-37538:C 13 Jun 2024 14:59:53.051 * DB saved on disk
-37538:C 13 Jun 2024 14:59:53.052 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 14:59:53.138 * Background saving terminated with success
-58896:M 13 Jun 2024 15:04:54.044 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 15:04:54.046 * Background saving started by pid 38152
-38152:C 13 Jun 2024 15:04:54.067 * DB saved on disk
-38152:C 13 Jun 2024 15:04:54.069 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 15:04:54.148 * Background saving terminated with success
-58896:M 13 Jun 2024 15:09:55.087 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 15:09:55.092 * Background saving started by pid 38789
-38789:C 13 Jun 2024 15:09:55.104 * DB saved on disk
-38789:C 13 Jun 2024 15:09:55.104 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 15:09:55.196 * Background saving terminated with success
-58896:M 13 Jun 2024 15:14:56.035 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 15:14:56.037 * Background saving started by pid 39411
-39411:C 13 Jun 2024 15:14:56.050 * DB saved on disk
-39411:C 13 Jun 2024 15:14:56.051 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 15:14:56.138 * Background saving terminated with success
-58896:M 13 Jun 2024 15:19:57.033 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 15:19:57.034 * Background saving started by pid 40025
-40025:C 13 Jun 2024 15:19:57.043 * DB saved on disk
-40025:C 13 Jun 2024 15:19:57.044 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 15:19:57.135 * Background saving terminated with success
-58896:M 13 Jun 2024 15:24:58.059 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 15:24:58.061 * Background saving started by pid 40708
-40708:C 13 Jun 2024 15:24:58.082 * DB saved on disk
-40708:C 13 Jun 2024 15:24:58.082 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 15:24:58.162 * Background saving terminated with success
-58896:M 13 Jun 2024 15:29:59.058 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 15:29:59.061 * Background saving started by pid 41970
-41970:C 13 Jun 2024 15:29:59.074 * DB saved on disk
-41970:C 13 Jun 2024 15:29:59.076 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 15:29:59.162 * Background saving terminated with success
-58896:M 13 Jun 2024 15:35:00.020 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 15:35:00.023 * Background saving started by pid 46158
-46158:C 13 Jun 2024 15:35:00.046 * DB saved on disk
-46158:C 13 Jun 2024 15:35:00.047 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 15:35:00.124 * Background saving terminated with success
-58896:M 13 Jun 2024 15:40:01.046 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 15:40:01.049 * Background saving started by pid 52688
-52688:C 13 Jun 2024 15:40:01.069 * DB saved on disk
-52688:C 13 Jun 2024 15:40:01.069 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 15:40:01.150 * Background saving terminated with success
-58896:M 13 Jun 2024 15:45:53.815 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 15:45:53.818 * Background saving started by pid 53374
-53374:C 13 Jun 2024 15:45:53.840 * DB saved on disk
-53374:C 13 Jun 2024 15:45:53.840 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 15:45:53.919 * Background saving terminated with success
-58896:M 13 Jun 2024 15:50:54.050 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 15:50:54.052 * Background saving started by pid 54742
-54742:C 13 Jun 2024 15:50:54.072 * DB saved on disk
-54742:C 13 Jun 2024 15:50:54.072 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 15:50:54.153 * Background saving terminated with success
-58896:M 13 Jun 2024 15:55:55.100 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 15:55:55.103 * Background saving started by pid 55431
-55431:C 13 Jun 2024 15:55:55.114 * DB saved on disk
-55431:C 13 Jun 2024 15:55:55.114 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 15:55:55.204 * Background saving terminated with success
-58896:M 13 Jun 2024 16:00:56.039 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 16:00:56.042 * Background saving started by pid 56109
-56109:C 13 Jun 2024 16:00:56.051 * DB saved on disk
-56109:C 13 Jun 2024 16:00:56.052 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 16:00:56.144 * Background saving terminated with success
-58896:M 13 Jun 2024 16:05:57.027 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 16:05:57.029 * Background saving started by pid 56794
-56794:C 13 Jun 2024 16:05:57.044 * DB saved on disk
-56794:C 13 Jun 2024 16:05:57.045 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 16:05:57.130 * Background saving terminated with success
-58896:M 13 Jun 2024 16:10:58.007 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 16:10:58.010 * Background saving started by pid 57424
-57424:C 13 Jun 2024 16:10:58.022 * DB saved on disk
-57424:C 13 Jun 2024 16:10:58.026 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 16:10:58.111 * Background saving terminated with success
-58896:M 13 Jun 2024 16:15:59.064 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 16:15:59.067 * Background saving started by pid 58114
-58114:C 13 Jun 2024 16:15:59.076 * DB saved on disk
-58114:C 13 Jun 2024 16:15:59.077 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 16:15:59.168 * Background saving terminated with success
-58896:M 13 Jun 2024 16:21:00.080 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 16:21:00.081 * Background saving started by pid 58865
-58865:C 13 Jun 2024 16:21:00.090 * DB saved on disk
-58865:C 13 Jun 2024 16:21:00.091 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 16:21:00.182 * Background saving terminated with success
-58896:M 13 Jun 2024 16:26:01.042 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 16:26:01.045 * Background saving started by pid 59640
-59640:C 13 Jun 2024 16:26:01.059 * DB saved on disk
-59640:C 13 Jun 2024 16:26:01.060 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 16:26:01.145 * Background saving terminated with success
-58896:M 13 Jun 2024 16:31:02.024 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 16:31:02.026 * Background saving started by pid 60306
-60306:C 13 Jun 2024 16:31:02.037 * DB saved on disk
-60306:C 13 Jun 2024 16:31:02.038 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 16:31:02.127 * Background saving terminated with success
-58896:M 13 Jun 2024 16:36:03.023 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 16:36:03.027 * Background saving started by pid 60889
-60889:C 13 Jun 2024 16:36:03.056 * DB saved on disk
-60889:C 13 Jun 2024 16:36:03.056 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 16:36:03.128 * Background saving terminated with success
-58896:M 13 Jun 2024 16:41:04.062 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 16:41:04.065 * Background saving started by pid 61499
-61499:C 13 Jun 2024 16:41:04.084 * DB saved on disk
-61499:C 13 Jun 2024 16:41:04.085 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 16:41:04.167 * Background saving terminated with success
-58896:M 13 Jun 2024 16:46:05.062 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 16:46:05.064 * Background saving started by pid 62153
-62153:C 13 Jun 2024 16:46:05.078 * DB saved on disk
-62153:C 13 Jun 2024 16:46:05.079 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 16:46:05.165 * Background saving terminated with success
-58896:M 13 Jun 2024 16:51:06.057 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 16:51:06.058 * Background saving started by pid 62839
-62839:C 13 Jun 2024 16:51:06.067 * DB saved on disk
-62839:C 13 Jun 2024 16:51:06.069 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 16:51:06.159 * Background saving terminated with success
-58896:M 13 Jun 2024 16:56:07.041 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 16:56:07.044 * Background saving started by pid 63527
-63527:C 13 Jun 2024 16:56:07.063 * DB saved on disk
-63527:C 13 Jun 2024 16:56:07.064 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 16:56:07.144 * Background saving terminated with success
-58896:M 13 Jun 2024 17:01:08.073 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 17:01:08.074 * Background saving started by pid 64189
-64189:C 13 Jun 2024 17:01:08.088 * DB saved on disk
-64189:C 13 Jun 2024 17:01:08.089 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 17:01:08.175 * Background saving terminated with success
-58896:M 13 Jun 2024 17:06:09.002 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 17:06:09.003 * Background saving started by pid 64802
-64802:C 13 Jun 2024 17:06:09.018 * DB saved on disk
-64802:C 13 Jun 2024 17:06:09.021 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 17:06:09.104 * Background saving terminated with success
-58896:M 13 Jun 2024 17:11:10.042 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 17:11:10.046 * Background saving started by pid 65468
-65468:C 13 Jun 2024 17:11:10.069 * DB saved on disk
-65468:C 13 Jun 2024 17:11:10.071 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 17:11:10.148 * Background saving terminated with success
-58896:M 13 Jun 2024 17:16:11.098 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 17:16:11.099 * Background saving started by pid 66110
-66110:C 13 Jun 2024 17:16:11.108 * DB saved on disk
-66110:C 13 Jun 2024 17:16:11.109 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 17:16:11.201 * Background saving terminated with success
-58896:M 13 Jun 2024 17:21:12.003 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 17:21:12.006 * Background saving started by pid 66847
-66847:C 13 Jun 2024 17:21:12.018 * DB saved on disk
-66847:C 13 Jun 2024 17:21:12.019 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 17:21:12.111 * Background saving terminated with success
-58896:M 13 Jun 2024 17:26:13.038 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 17:26:13.041 * Background saving started by pid 67587
-67587:C 13 Jun 2024 17:26:13.051 * DB saved on disk
-67587:C 13 Jun 2024 17:26:13.052 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 17:26:13.142 * Background saving terminated with success
-58896:M 13 Jun 2024 17:31:14.027 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 17:31:14.028 * Background saving started by pid 68487
-68487:C 13 Jun 2024 17:31:14.036 * DB saved on disk
-68487:C 13 Jun 2024 17:31:14.038 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 17:31:14.130 * Background saving terminated with success
-58896:M 13 Jun 2024 17:36:15.062 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 17:36:15.064 * Background saving started by pid 69205
-69205:C 13 Jun 2024 17:36:15.071 * DB saved on disk
-69205:C 13 Jun 2024 17:36:15.072 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 17:36:15.174 * Background saving terminated with success
-58896:M 13 Jun 2024 17:41:16.100 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 17:41:16.101 * Background saving started by pid 70157
-70157:C 13 Jun 2024 17:41:16.117 * DB saved on disk
-70157:C 13 Jun 2024 17:41:16.118 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 17:41:16.203 * Background saving terminated with success
-58896:M 13 Jun 2024 17:46:17.062 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 17:46:17.064 * Background saving started by pid 70947
-70947:C 13 Jun 2024 17:46:17.078 * DB saved on disk
-70947:C 13 Jun 2024 17:46:17.080 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 17:46:17.165 * Background saving terminated with success
-58896:M 13 Jun 2024 17:51:18.076 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 17:51:18.077 * Background saving started by pid 71675
-71675:C 13 Jun 2024 17:51:18.086 * DB saved on disk
-71675:C 13 Jun 2024 17:51:18.090 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 17:51:18.179 * Background saving terminated with success
-58896:M 13 Jun 2024 17:56:19.041 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 17:56:19.042 * Background saving started by pid 72514
-72514:C 13 Jun 2024 17:56:19.052 * DB saved on disk
-72514:C 13 Jun 2024 17:56:19.053 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 17:56:19.144 * Background saving terminated with success
-58896:M 13 Jun 2024 18:01:20.068 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 18:01:20.069 * Background saving started by pid 73367
-73367:C 13 Jun 2024 18:01:20.084 * DB saved on disk
-73367:C 13 Jun 2024 18:01:20.085 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 18:01:20.171 * Background saving terminated with success
-58896:M 13 Jun 2024 18:06:21.045 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 18:06:21.048 * Background saving started by pid 74221
-74221:C 13 Jun 2024 18:06:21.057 * DB saved on disk
-74221:C 13 Jun 2024 18:06:21.062 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 18:06:21.148 * Background saving terminated with success
-58896:M 13 Jun 2024 18:11:22.084 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 18:11:22.088 * Background saving started by pid 75044
-75044:C 13 Jun 2024 18:11:22.111 * DB saved on disk
-75044:C 13 Jun 2024 18:11:22.111 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 18:11:22.189 * Background saving terminated with success
-58896:M 13 Jun 2024 18:16:23.041 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 18:16:23.042 * Background saving started by pid 76027
-76027:C 13 Jun 2024 18:16:23.057 * DB saved on disk
-76027:C 13 Jun 2024 18:16:23.058 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 18:16:23.143 * Background saving terminated with success
-58896:M 13 Jun 2024 18:21:24.069 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 18:21:24.073 * Background saving started by pid 77094
-77094:C 13 Jun 2024 18:21:24.084 * DB saved on disk
-77094:C 13 Jun 2024 18:21:24.086 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 18:21:24.175 * Background saving terminated with success
-58896:M 13 Jun 2024 18:26:25.068 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 18:26:25.073 * Background saving started by pid 78140
-78140:C 13 Jun 2024 18:26:25.088 * DB saved on disk
-78140:C 13 Jun 2024 18:26:25.091 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 18:26:25.174 * Background saving terminated with success
-58896:M 13 Jun 2024 18:31:26.038 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 18:31:26.041 * Background saving started by pid 79121
-79121:C 13 Jun 2024 18:31:26.053 * DB saved on disk
-79121:C 13 Jun 2024 18:31:26.053 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 18:31:26.142 * Background saving terminated with success
-58896:M 13 Jun 2024 18:36:27.010 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 18:36:27.013 * Background saving started by pid 80053
-80053:C 13 Jun 2024 18:36:27.025 * DB saved on disk
-80053:C 13 Jun 2024 18:36:27.026 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 18:36:27.115 * Background saving terminated with success
-58896:M 13 Jun 2024 18:41:28.100 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 18:41:28.101 * Background saving started by pid 81033
-81033:C 13 Jun 2024 18:41:28.110 * DB saved on disk
-81033:C 13 Jun 2024 18:41:28.114 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 18:41:28.203 * Background saving terminated with success
-58896:M 13 Jun 2024 18:46:29.056 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 18:46:29.058 * Background saving started by pid 82024
-82024:C 13 Jun 2024 18:46:29.068 * DB saved on disk
-82024:C 13 Jun 2024 18:46:29.069 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 18:46:29.159 * Background saving terminated with success
-58896:M 13 Jun 2024 18:51:30.025 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 18:51:30.026 * Background saving started by pid 83031
-83031:C 13 Jun 2024 18:51:30.040 * DB saved on disk
-83031:C 13 Jun 2024 18:51:30.041 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 18:51:30.128 * Background saving terminated with success
-58896:M 13 Jun 2024 18:56:31.028 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 18:56:31.032 * Background saving started by pid 84257
-84257:C 13 Jun 2024 18:56:31.061 * DB saved on disk
-84257:C 13 Jun 2024 18:56:31.061 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 18:56:31.133 * Background saving terminated with success
-58896:M 13 Jun 2024 19:01:32.042 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 19:01:32.045 * Background saving started by pid 85971
-85971:C 13 Jun 2024 19:01:32.057 * DB saved on disk
-85971:C 13 Jun 2024 19:01:32.058 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 19:01:32.146 * Background saving terminated with success
-58896:M 13 Jun 2024 19:06:33.000 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 19:06:33.004 * Background saving started by pid 87445
-87445:C 13 Jun 2024 19:06:33.015 * DB saved on disk
-87445:C 13 Jun 2024 19:06:33.016 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 19:06:33.106 * Background saving terminated with success
-58896:M 13 Jun 2024 19:24:03.583 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 19:24:03.704 * Background saving started by pid 88482
-88482:C 13 Jun 2024 19:24:03.712 * DB saved on disk
-88482:C 13 Jun 2024 19:24:03.715 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 19:24:03.820 * Background saving terminated with success
-58896:M 13 Jun 2024 19:29:04.018 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 19:29:04.020 * Background saving started by pid 89605
-89605:C 13 Jun 2024 19:29:04.035 * DB saved on disk
-89605:C 13 Jun 2024 19:29:04.037 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 19:29:04.122 * Background saving terminated with success
-58896:M 13 Jun 2024 19:34:05.068 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 19:34:05.071 * Background saving started by pid 90623
-90623:C 13 Jun 2024 19:34:05.084 * DB saved on disk
-90623:C 13 Jun 2024 19:34:05.085 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 19:34:05.172 * Background saving terminated with success
-58896:M 13 Jun 2024 19:39:06.039 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 19:39:06.040 * Background saving started by pid 91649
-91649:C 13 Jun 2024 19:39:06.050 * DB saved on disk
-91649:C 13 Jun 2024 19:39:06.051 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 19:39:06.141 * Background saving terminated with success
-58896:M 13 Jun 2024 19:44:07.052 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 19:44:07.053 * Background saving started by pid 92499
-92499:C 13 Jun 2024 19:44:07.066 * DB saved on disk
-92499:C 13 Jun 2024 19:44:07.067 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 19:44:07.154 * Background saving terminated with success
-58896:M 13 Jun 2024 19:49:08.026 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 19:49:08.029 * Background saving started by pid 93255
-93255:C 13 Jun 2024 19:49:08.047 * DB saved on disk
-93255:C 13 Jun 2024 19:49:08.048 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 19:49:08.130 * Background saving terminated with success
-58896:M 13 Jun 2024 20:09:23.789 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 20:09:23.790 * Background saving started by pid 93455
-93455:C 13 Jun 2024 20:09:23.800 * DB saved on disk
-93455:C 13 Jun 2024 20:09:23.802 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 20:09:23.891 * Background saving terminated with success
-58896:M 13 Jun 2024 21:00:24.763 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 21:00:24.764 * Background saving started by pid 93545
-93545:C 13 Jun 2024 21:00:24.776 * DB saved on disk
-93545:C 13 Jun 2024 21:00:24.777 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 21:00:24.867 * Background saving terminated with success
-58896:M 13 Jun 2024 21:09:07.599 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 21:09:07.601 * Background saving started by pid 93647
-93647:C 13 Jun 2024 21:09:07.614 * DB saved on disk
-93647:C 13 Jun 2024 21:09:07.616 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 21:09:07.702 * Background saving terminated with success
-58896:M 13 Jun 2024 22:15:01.689 * 1 changes in 3600 seconds. Saving...
-58896:M 13 Jun 2024 22:15:01.693 * Background saving started by pid 93707
-93707:C 13 Jun 2024 22:15:01.819 * DB saved on disk
-93707:C 13 Jun 2024 22:15:01.820 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 22:15:01.896 * Background saving terminated with success
-58896:M 13 Jun 2024 22:28:49.316 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 22:28:49.319 * Background saving started by pid 93877
-93877:C 13 Jun 2024 22:28:49.333 * DB saved on disk
-93877:C 13 Jun 2024 22:28:49.334 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 22:28:49.421 * Background saving terminated with success
-58896:M 13 Jun 2024 23:38:03.790 * 1 changes in 3600 seconds. Saving...
-58896:M 13 Jun 2024 23:38:03.794 * Background saving started by pid 94043
-94043:C 13 Jun 2024 23:38:03.905 * DB saved on disk
-94043:C 13 Jun 2024 23:38:03.908 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 23:38:03.997 * Background saving terminated with success
-58896:M 13 Jun 2024 23:43:04.085 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 23:43:04.087 * Background saving started by pid 94614
-94614:C 13 Jun 2024 23:43:04.094 * DB saved on disk
-94614:C 13 Jun 2024 23:43:04.095 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 23:43:04.189 * Background saving terminated with success
-58896:M 13 Jun 2024 23:48:05.057 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 23:48:05.063 * Background saving started by pid 95362
-95362:C 13 Jun 2024 23:48:05.086 * DB saved on disk
-95362:C 13 Jun 2024 23:48:05.087 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 23:48:05.165 * Background saving terminated with success
-58896:M 13 Jun 2024 23:53:06.041 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 23:53:06.042 * Background saving started by pid 96187
-96187:C 13 Jun 2024 23:53:06.060 * DB saved on disk
-96187:C 13 Jun 2024 23:53:06.061 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 23:53:06.143 * Background saving terminated with success
-58896:M 13 Jun 2024 23:58:07.014 * 100 changes in 300 seconds. Saving...
-58896:M 13 Jun 2024 23:58:07.017 * Background saving started by pid 97027
-97027:C 13 Jun 2024 23:58:07.030 * DB saved on disk
-97027:C 13 Jun 2024 23:58:07.030 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 13 Jun 2024 23:58:07.118 * Background saving terminated with success
-58896:M 14 Jun 2024 00:03:08.062 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 00:03:08.067 * Background saving started by pid 98055
-98055:C 14 Jun 2024 00:03:08.076 * DB saved on disk
-98055:C 14 Jun 2024 00:03:08.076 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 00:03:08.169 * Background saving terminated with success
-58896:M 14 Jun 2024 00:08:09.046 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 00:08:09.048 * Background saving started by pid 906
-906:C 14 Jun 2024 00:08:09.066 * DB saved on disk
-906:C 14 Jun 2024 00:08:09.068 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 00:08:09.149 * Background saving terminated with success
-58896:M 14 Jun 2024 00:13:10.017 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 00:13:10.021 * Background saving started by pid 2698
-2698:C 14 Jun 2024 00:13:10.034 * DB saved on disk
-2698:C 14 Jun 2024 00:13:10.038 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 00:13:10.122 * Background saving terminated with success
-58896:M 14 Jun 2024 00:18:11.011 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 00:18:11.013 * Background saving started by pid 3689
-3689:C 14 Jun 2024 00:18:11.022 * DB saved on disk
-3689:C 14 Jun 2024 00:18:11.023 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 00:18:11.114 * Background saving terminated with success
-58896:M 14 Jun 2024 00:23:12.093 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 00:23:12.094 * Background saving started by pid 4450
-4450:C 14 Jun 2024 00:23:12.105 * DB saved on disk
-4450:C 14 Jun 2024 00:23:12.105 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 00:23:12.196 * Background saving terminated with success
-58896:M 14 Jun 2024 00:28:13.053 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 00:28:13.055 * Background saving started by pid 5238
-5238:C 14 Jun 2024 00:28:13.065 * DB saved on disk
-5238:C 14 Jun 2024 00:28:13.065 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 00:28:13.156 * Background saving terminated with success
-58896:M 14 Jun 2024 00:33:14.055 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 00:33:14.057 * Background saving started by pid 6001
-6001:C 14 Jun 2024 00:33:14.066 * DB saved on disk
-6001:C 14 Jun 2024 00:33:14.067 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 00:33:14.158 * Background saving terminated with success
-58896:M 14 Jun 2024 00:38:15.023 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 00:38:15.025 * Background saving started by pid 6720
-6720:C 14 Jun 2024 00:38:15.034 * DB saved on disk
-6720:C 14 Jun 2024 00:38:15.037 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 00:38:15.126 * Background saving terminated with success
-58896:M 14 Jun 2024 00:43:16.074 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 00:43:16.077 * Background saving started by pid 7544
-7544:C 14 Jun 2024 00:43:16.090 * DB saved on disk
-7544:C 14 Jun 2024 00:43:16.091 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 00:43:16.179 * Background saving terminated with success
-58896:M 14 Jun 2024 00:48:17.008 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 00:48:17.009 * Background saving started by pid 8362
-8362:C 14 Jun 2024 00:48:17.018 * DB saved on disk
-8362:C 14 Jun 2024 00:48:17.020 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 00:48:17.110 * Background saving terminated with success
-58896:M 14 Jun 2024 00:53:18.083 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 00:53:18.086 * Background saving started by pid 9627
-9627:C 14 Jun 2024 00:53:18.094 * DB saved on disk
-9627:C 14 Jun 2024 00:53:18.096 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 00:53:18.187 * Background saving terminated with success
-58896:M 14 Jun 2024 00:58:19.056 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 00:58:19.058 * Background saving started by pid 10541
-10541:C 14 Jun 2024 00:58:19.068 * DB saved on disk
-10541:C 14 Jun 2024 00:58:19.069 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 00:58:19.160 * Background saving terminated with success
-58896:M 14 Jun 2024 01:03:20.085 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 01:03:20.088 * Background saving started by pid 11472
-11472:C 14 Jun 2024 01:03:20.099 * DB saved on disk
-11472:C 14 Jun 2024 01:03:20.112 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 01:03:20.189 * Background saving terminated with success
-58896:M 14 Jun 2024 01:08:21.081 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 01:08:21.083 * Background saving started by pid 12462
-12462:C 14 Jun 2024 01:08:21.094 * DB saved on disk
-12462:C 14 Jun 2024 01:08:21.095 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 01:08:21.184 * Background saving terminated with success
-58896:M 14 Jun 2024 01:13:22.058 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 01:13:22.059 * Background saving started by pid 13399
-13399:C 14 Jun 2024 01:13:22.069 * DB saved on disk
-13399:C 14 Jun 2024 01:13:22.071 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 01:13:22.161 * Background saving terminated with success
-58896:M 14 Jun 2024 01:18:23.041 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 01:18:23.042 * Background saving started by pid 14393
-14393:C 14 Jun 2024 01:18:23.054 * DB saved on disk
-14393:C 14 Jun 2024 01:18:23.055 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 01:18:23.142 * Background saving terminated with success
-58896:M 14 Jun 2024 01:23:24.029 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 01:23:24.031 * Background saving started by pid 15307
-15307:C 14 Jun 2024 01:23:24.047 * DB saved on disk
-15307:C 14 Jun 2024 01:23:24.048 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 01:23:24.132 * Background saving terminated with success
-58896:M 14 Jun 2024 01:28:25.054 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 01:28:25.057 * Background saving started by pid 16245
-16245:C 14 Jun 2024 01:28:25.067 * DB saved on disk
-16245:C 14 Jun 2024 01:28:25.068 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 01:28:25.158 * Background saving terminated with success
-58896:M 14 Jun 2024 01:33:26.047 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 01:33:26.051 * Background saving started by pid 17193
-17193:C 14 Jun 2024 01:33:26.081 * DB saved on disk
-17193:C 14 Jun 2024 01:33:26.081 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 01:33:26.153 * Background saving terminated with success
-58896:M 14 Jun 2024 01:38:27.082 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 01:38:27.084 * Background saving started by pid 18123
-18123:C 14 Jun 2024 01:38:27.100 * DB saved on disk
-18123:C 14 Jun 2024 01:38:27.101 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 01:38:27.185 * Background saving terminated with success
-58896:M 14 Jun 2024 01:43:28.063 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 01:43:28.065 * Background saving started by pid 20679
-20679:C 14 Jun 2024 01:43:28.076 * DB saved on disk
-20679:C 14 Jun 2024 01:43:28.080 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 01:43:28.172 * Background saving terminated with success
-58896:M 14 Jun 2024 01:48:29.019 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 01:48:29.024 * Background saving started by pid 21789
-21789:C 14 Jun 2024 01:48:29.033 * DB saved on disk
-21789:C 14 Jun 2024 01:48:29.035 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 01:48:29.125 * Background saving terminated with success
-58896:M 14 Jun 2024 01:53:30.004 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 01:53:30.005 * Background saving started by pid 22821
-22821:C 14 Jun 2024 01:53:30.021 * DB saved on disk
-22821:C 14 Jun 2024 01:53:30.023 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 01:53:30.108 * Background saving terminated with success
-58896:M 14 Jun 2024 01:58:31.072 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 01:58:31.072 * Background saving started by pid 23761
-23761:C 14 Jun 2024 01:58:31.081 * DB saved on disk
-23761:C 14 Jun 2024 01:58:31.082 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 01:58:31.175 * Background saving terminated with success
-58896:M 14 Jun 2024 02:03:32.084 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 02:03:32.087 * Background saving started by pid 24691
-24691:C 14 Jun 2024 02:03:32.112 * DB saved on disk
-24691:C 14 Jun 2024 02:03:32.115 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 02:03:32.188 * Background saving terminated with success
-58896:M 14 Jun 2024 02:08:33.012 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 02:08:33.013 * Background saving started by pid 25657
-25657:C 14 Jun 2024 02:08:33.023 * DB saved on disk
-25657:C 14 Jun 2024 02:08:33.024 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 02:08:33.116 * Background saving terminated with success
-58896:M 14 Jun 2024 02:13:34.062 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 02:13:34.065 * Background saving started by pid 26619
-26619:C 14 Jun 2024 02:13:34.082 * DB saved on disk
-26619:C 14 Jun 2024 02:13:34.084 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 02:13:34.168 * Background saving terminated with success
-58896:M 14 Jun 2024 02:18:35.092 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 02:18:35.095 * Background saving started by pid 27572
-27572:C 14 Jun 2024 02:18:35.110 * DB saved on disk
-27572:C 14 Jun 2024 02:18:35.112 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 02:18:35.197 * Background saving terminated with success
-58896:M 14 Jun 2024 02:23:36.006 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 02:23:36.010 * Background saving started by pid 28517
-28517:C 14 Jun 2024 02:23:36.030 * DB saved on disk
-28517:C 14 Jun 2024 02:23:36.031 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 02:23:36.111 * Background saving terminated with success
-58896:M 14 Jun 2024 02:28:37.011 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 02:28:37.014 * Background saving started by pid 29471
-29471:C 14 Jun 2024 02:28:37.025 * DB saved on disk
-29471:C 14 Jun 2024 02:28:37.025 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 02:28:37.115 * Background saving terminated with success
-58896:M 14 Jun 2024 02:33:38.046 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 02:33:38.050 * Background saving started by pid 30459
-30459:C 14 Jun 2024 02:33:38.067 * DB saved on disk
-30459:C 14 Jun 2024 02:33:38.068 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 02:33:38.154 * Background saving terminated with success
-58896:M 14 Jun 2024 02:38:39.080 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 02:38:39.086 * Background saving started by pid 31408
-31408:C 14 Jun 2024 02:38:39.107 * DB saved on disk
-31408:C 14 Jun 2024 02:38:39.108 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 02:38:39.188 * Background saving terminated with success
-58896:M 14 Jun 2024 02:43:40.090 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 02:43:40.093 * Background saving started by pid 32516
-32516:C 14 Jun 2024 02:43:40.103 * DB saved on disk
-32516:C 14 Jun 2024 02:43:40.103 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 02:43:40.195 * Background saving terminated with success
-58896:M 14 Jun 2024 02:48:41.094 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 02:48:41.098 * Background saving started by pid 33530
-33530:C 14 Jun 2024 02:48:41.110 * DB saved on disk
-33530:C 14 Jun 2024 02:48:41.113 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 02:48:41.200 * Background saving terminated with success
-58896:M 14 Jun 2024 02:53:42.065 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 02:53:42.068 * Background saving started by pid 34481
-34481:C 14 Jun 2024 02:53:42.097 * DB saved on disk
-34481:C 14 Jun 2024 02:53:42.098 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 02:53:42.170 * Background saving terminated with success
-58896:M 14 Jun 2024 03:02:19.016 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 03:02:19.020 * Background saving started by pid 35216
-35216:C 14 Jun 2024 03:02:19.144 * DB saved on disk
-35216:C 14 Jun 2024 03:02:19.144 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 03:02:19.223 * Background saving terminated with success
-58896:M 14 Jun 2024 04:09:00.287 * 1 changes in 3600 seconds. Saving...
-58896:M 14 Jun 2024 04:09:00.391 * Background saving started by pid 35351
-35351:C 14 Jun 2024 04:09:00.406 * DB saved on disk
-35351:C 14 Jun 2024 04:09:00.406 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 04:09:00.491 * Background saving terminated with success
-58896:M 14 Jun 2024 05:13:19.442 * 1 changes in 3600 seconds. Saving...
-58896:M 14 Jun 2024 05:13:19.559 * Background saving started by pid 35460
-35460:C 14 Jun 2024 05:13:19.566 * DB saved on disk
-35460:C 14 Jun 2024 05:13:19.567 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 05:13:19.680 * Background saving terminated with success
-58896:M 14 Jun 2024 05:45:38.251 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 05:45:38.253 * Background saving started by pid 35614
-35614:C 14 Jun 2024 05:45:38.263 * DB saved on disk
-35614:C 14 Jun 2024 05:45:38.264 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 05:45:38.354 * Background saving terminated with success
-58896:M 14 Jun 2024 06:06:43.153 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 06:06:43.156 * Background saving started by pid 35774
-35774:C 14 Jun 2024 06:06:43.171 * DB saved on disk
-35774:C 14 Jun 2024 06:06:43.172 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 06:06:43.259 * Background saving terminated with success
-58896:M 14 Jun 2024 06:44:23.413 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 06:44:23.415 * Background saving started by pid 35935
-35935:C 14 Jun 2024 06:44:23.428 * DB saved on disk
-35935:C 14 Jun 2024 06:44:23.428 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 06:44:23.516 * Background saving terminated with success
-58896:M 14 Jun 2024 07:48:29.341 * 1 changes in 3600 seconds. Saving...
-58896:M 14 Jun 2024 07:48:29.456 * Background saving started by pid 35997
-35997:C 14 Jun 2024 07:48:29.466 * DB saved on disk
-35997:C 14 Jun 2024 07:48:29.467 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 07:48:29.558 * Background saving terminated with success
-58896:M 14 Jun 2024 08:56:33.414 * 1 changes in 3600 seconds. Saving...
-58896:M 14 Jun 2024 08:56:33.530 * Background saving started by pid 36114
-36114:C 14 Jun 2024 08:56:33.539 * DB saved on disk
-36114:C 14 Jun 2024 08:56:33.539 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 08:56:33.632 * Background saving terminated with success
-58896:M 14 Jun 2024 09:59:37.399 * 1 changes in 3600 seconds. Saving...
-58896:M 14 Jun 2024 09:59:37.511 * Background saving started by pid 36190
-36190:C 14 Jun 2024 09:59:37.519 * DB saved on disk
-36190:C 14 Jun 2024 09:59:37.520 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 09:59:37.613 * Background saving terminated with success
-58896:M 14 Jun 2024 10:07:14.898 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 10:07:14.901 * Background saving started by pid 36472
-36472:C 14 Jun 2024 10:07:14.910 * DB saved on disk
-36472:C 14 Jun 2024 10:07:14.912 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 10:07:15.001 * Background saving terminated with success
-58896:M 14 Jun 2024 10:12:16.014 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 10:12:16.016 * Background saving started by pid 37638
-37638:C 14 Jun 2024 10:12:16.026 * DB saved on disk
-37638:C 14 Jun 2024 10:12:16.027 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 10:12:16.118 * Background saving terminated with success
-58896:M 14 Jun 2024 10:17:17.028 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 10:17:17.033 * Background saving started by pid 38618
-38618:C 14 Jun 2024 10:17:17.051 * DB saved on disk
-38618:C 14 Jun 2024 10:17:17.051 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 10:17:17.134 * Background saving terminated with success
-58896:M 14 Jun 2024 10:22:18.071 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 10:22:18.073 * Background saving started by pid 39601
-39601:C 14 Jun 2024 10:22:18.085 * DB saved on disk
-39601:C 14 Jun 2024 10:22:18.087 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 10:22:18.175 * Background saving terminated with success
-58896:M 14 Jun 2024 10:28:04.538 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 10:28:04.541 * Background saving started by pid 40107
-40107:C 14 Jun 2024 10:28:04.659 * DB saved on disk
-40107:C 14 Jun 2024 10:28:04.659 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 10:28:04.743 * Background saving terminated with success
-58896:M 14 Jun 2024 11:30:27.751 * 1 changes in 3600 seconds. Saving...
-58896:M 14 Jun 2024 11:30:27.759 * Background saving started by pid 40350
-40350:C 14 Jun 2024 11:30:27.857 * DB saved on disk
-40350:C 14 Jun 2024 11:30:27.859 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 11:30:27.860 * Background saving terminated with success
-58896:M 14 Jun 2024 12:16:13.786 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 12:16:13.787 * Background saving started by pid 40834
-40834:C 14 Jun 2024 12:16:13.796 * DB saved on disk
-40834:C 14 Jun 2024 12:16:13.797 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 12:16:13.888 * Background saving terminated with success
-58896:M 14 Jun 2024 12:21:14.062 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 12:21:14.063 * Background saving started by pid 42185
-42185:C 14 Jun 2024 12:21:14.071 * DB saved on disk
-42185:C 14 Jun 2024 12:21:14.072 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 12:21:14.165 * Background saving terminated with success
-58896:M 14 Jun 2024 12:58:11.704 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 12:58:11.706 * Background saving started by pid 50751
-50751:C 14 Jun 2024 12:58:11.762 * DB saved on disk
-50751:C 14 Jun 2024 12:58:11.763 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 12:58:11.809 * Background saving terminated with success
-58896:M 14 Jun 2024 14:00:57.749 * 1 changes in 3600 seconds. Saving...
-58896:M 14 Jun 2024 14:00:57.755 * Background saving started by pid 52756
-52756:C 14 Jun 2024 14:00:57.876 * DB saved on disk
-52756:C 14 Jun 2024 14:00:57.879 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 14:00:57.967 * Background saving terminated with success
-58896:M 14 Jun 2024 14:15:30.354 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 14:15:30.357 * Background saving started by pid 55569
-55569:C 14 Jun 2024 14:15:30.369 * DB saved on disk
-55569:C 14 Jun 2024 14:15:30.369 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 14:15:30.459 * Background saving terminated with success
-58896:M 14 Jun 2024 14:20:31.034 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 14:20:31.036 * Background saving started by pid 56310
-56310:C 14 Jun 2024 14:20:31.046 * DB saved on disk
-56310:C 14 Jun 2024 14:20:31.047 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 14:20:31.137 * Background saving terminated with success
-58896:M 14 Jun 2024 14:25:32.056 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 14:25:32.057 * Background saving started by pid 57022
-57022:C 14 Jun 2024 14:25:32.069 * DB saved on disk
-57022:C 14 Jun 2024 14:25:32.072 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 14:25:32.159 * Background saving terminated with success
-58896:M 14 Jun 2024 14:30:33.093 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 14:30:33.095 * Background saving started by pid 57937
-57937:C 14 Jun 2024 14:30:33.106 * DB saved on disk
-57937:C 14 Jun 2024 14:30:33.109 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 14:30:33.196 * Background saving terminated with success
-58896:M 14 Jun 2024 14:35:34.000 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 14:35:34.002 * Background saving started by pid 58663
-58663:C 14 Jun 2024 14:35:34.013 * DB saved on disk
-58663:C 14 Jun 2024 14:35:34.013 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 14:35:34.104 * Background saving terminated with success
-58896:M 14 Jun 2024 14:40:35.079 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 14:40:35.079 * Background saving started by pid 59354
-59354:C 14 Jun 2024 14:40:35.092 * DB saved on disk
-59354:C 14 Jun 2024 14:40:35.094 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 14:40:35.181 * Background saving terminated with success
-58896:M 14 Jun 2024 14:45:36.026 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 14:45:36.030 * Background saving started by pid 60066
-60066:C 14 Jun 2024 14:45:36.047 * DB saved on disk
-60066:C 14 Jun 2024 14:45:36.048 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 14:45:36.133 * Background saving terminated with success
-58896:M 14 Jun 2024 14:50:37.021 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 14:50:37.025 * Background saving started by pid 60762
-60762:C 14 Jun 2024 14:50:37.046 * DB saved on disk
-60762:C 14 Jun 2024 14:50:37.047 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 14:50:37.127 * Background saving terminated with success
-58896:M 14 Jun 2024 14:55:38.054 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 14:55:38.055 * Background saving started by pid 61516
-61516:C 14 Jun 2024 14:55:38.066 * DB saved on disk
-61516:C 14 Jun 2024 14:55:38.068 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 14:55:38.157 * Background saving terminated with success
-58896:M 14 Jun 2024 15:00:39.013 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 15:00:39.016 * Background saving started by pid 63393
-63393:C 14 Jun 2024 15:00:39.026 * DB saved on disk
-63393:C 14 Jun 2024 15:00:39.027 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 15:00:39.117 * Background saving terminated with success
-58896:M 14 Jun 2024 15:05:40.086 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 15:05:40.089 * Background saving started by pid 64106
-64106:C 14 Jun 2024 15:05:40.109 * DB saved on disk
-64106:C 14 Jun 2024 15:05:40.111 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 15:05:40.189 * Background saving terminated with success
-58896:M 14 Jun 2024 15:10:41.056 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 15:10:41.058 * Background saving started by pid 64797
-64797:C 14 Jun 2024 15:10:41.073 * DB saved on disk
-64797:C 14 Jun 2024 15:10:41.073 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 15:10:41.161 * Background saving terminated with success
-58896:M 14 Jun 2024 15:15:42.057 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 15:15:42.059 * Background saving started by pid 65518
-65518:C 14 Jun 2024 15:15:42.074 * DB saved on disk
-65518:C 14 Jun 2024 15:15:42.075 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 15:15:42.162 * Background saving terminated with success
-58896:M 14 Jun 2024 15:20:43.021 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 15:20:43.023 * Background saving started by pid 66505
-66505:C 14 Jun 2024 15:20:43.037 * DB saved on disk
-66505:C 14 Jun 2024 15:20:43.039 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 15:20:43.124 * Background saving terminated with success
-58896:M 14 Jun 2024 15:25:44.025 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 15:25:44.029 * Background saving started by pid 67388
-67388:C 14 Jun 2024 15:25:44.040 * DB saved on disk
-67388:C 14 Jun 2024 15:25:44.041 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 15:25:44.131 * Background saving terminated with success
-58896:M 14 Jun 2024 15:30:45.027 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 15:30:45.029 * Background saving started by pid 68256
-68256:C 14 Jun 2024 15:30:45.053 * DB saved on disk
-68256:C 14 Jun 2024 15:30:45.054 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 15:30:45.130 * Background saving terminated with success
-58896:M 14 Jun 2024 15:35:46.077 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 15:35:46.078 * Background saving started by pid 69268
-69268:C 14 Jun 2024 15:35:46.088 * DB saved on disk
-69268:C 14 Jun 2024 15:35:46.089 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 15:35:46.179 * Background saving terminated with success
-58896:M 14 Jun 2024 15:40:47.084 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 15:40:47.088 * Background saving started by pid 70026
-70026:C 14 Jun 2024 15:40:47.108 * DB saved on disk
-70026:C 14 Jun 2024 15:40:47.110 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 15:40:47.190 * Background saving terminated with success
-58896:M 14 Jun 2024 15:45:48.012 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 15:45:48.019 * Background saving started by pid 71012
-71012:C 14 Jun 2024 15:45:48.028 * DB saved on disk
-71012:C 14 Jun 2024 15:45:48.029 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 15:45:48.120 * Background saving terminated with success
-58896:M 14 Jun 2024 15:50:49.082 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 15:50:49.085 * Background saving started by pid 71940
-71940:C 14 Jun 2024 15:50:49.097 * DB saved on disk
-71940:C 14 Jun 2024 15:50:49.098 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 15:50:49.186 * Background saving terminated with success
-58896:M 14 Jun 2024 15:55:50.015 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 15:55:50.017 * Background saving started by pid 72898
-72898:C 14 Jun 2024 15:55:50.030 * DB saved on disk
-72898:C 14 Jun 2024 15:55:50.035 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 15:55:50.118 * Background saving terminated with success
-58896:M 14 Jun 2024 16:00:51.081 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 16:00:51.085 * Background saving started by pid 73649
-73649:C 14 Jun 2024 16:00:51.098 * DB saved on disk
-73649:C 14 Jun 2024 16:00:51.101 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 16:00:51.186 * Background saving terminated with success
-58896:M 14 Jun 2024 16:05:52.071 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 16:05:52.072 * Background saving started by pid 74480
-74480:C 14 Jun 2024 16:05:52.082 * DB saved on disk
-74480:C 14 Jun 2024 16:05:52.083 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 16:05:52.174 * Background saving terminated with success
-58896:M 14 Jun 2024 16:10:53.035 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 16:10:53.038 * Background saving started by pid 75372
-75372:C 14 Jun 2024 16:10:53.054 * DB saved on disk
-75372:C 14 Jun 2024 16:10:53.058 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 16:10:53.139 * Background saving terminated with success
-58896:M 14 Jun 2024 16:15:54.086 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 16:15:54.089 * Background saving started by pid 76156
-76156:C 14 Jun 2024 16:15:54.114 * DB saved on disk
-76156:C 14 Jun 2024 16:15:54.115 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 16:15:54.190 * Background saving terminated with success
-58896:M 14 Jun 2024 16:20:55.033 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 16:20:55.035 * Background saving started by pid 76928
-76928:C 14 Jun 2024 16:20:55.053 * DB saved on disk
-76928:C 14 Jun 2024 16:20:55.053 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 16:20:55.137 * Background saving terminated with success
-58896:M 14 Jun 2024 16:25:56.061 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 16:25:56.062 * Background saving started by pid 77910
-77910:C 14 Jun 2024 16:25:56.073 * DB saved on disk
-77910:C 14 Jun 2024 16:25:56.073 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 16:25:56.164 * Background saving terminated with success
-58896:M 14 Jun 2024 16:30:57.004 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 16:30:57.006 * Background saving started by pid 78693
-78693:C 14 Jun 2024 16:30:57.017 * DB saved on disk
-78693:C 14 Jun 2024 16:30:57.019 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 16:30:57.108 * Background saving terminated with success
-58896:M 14 Jun 2024 16:35:58.062 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 16:35:58.064 * Background saving started by pid 79546
-79546:C 14 Jun 2024 16:35:58.076 * DB saved on disk
-79546:C 14 Jun 2024 16:35:58.077 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 16:35:58.165 * Background saving terminated with success
-58896:M 14 Jun 2024 16:40:59.088 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 16:40:59.089 * Background saving started by pid 80305
-80305:C 14 Jun 2024 16:40:59.101 * DB saved on disk
-80305:C 14 Jun 2024 16:40:59.104 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 16:40:59.190 * Background saving terminated with success
-58896:M 14 Jun 2024 16:46:00.058 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 16:46:00.059 * Background saving started by pid 81213
-81213:C 14 Jun 2024 16:46:00.070 * DB saved on disk
-81213:C 14 Jun 2024 16:46:00.072 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 16:46:00.160 * Background saving terminated with success
-58896:M 14 Jun 2024 16:51:01.096 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 16:51:01.097 * Background saving started by pid 82247
-82247:C 14 Jun 2024 16:51:01.106 * DB saved on disk
-82247:C 14 Jun 2024 16:51:01.107 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 16:51:01.199 * Background saving terminated with success
-58896:M 14 Jun 2024 16:56:02.068 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 16:56:02.072 * Background saving started by pid 83296
-83296:C 14 Jun 2024 16:56:02.085 * DB saved on disk
-83296:C 14 Jun 2024 16:56:02.086 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 16:56:02.173 * Background saving terminated with success
-58896:M 14 Jun 2024 17:01:03.003 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 17:01:03.004 * Background saving started by pid 84035
-84035:C 14 Jun 2024 17:01:03.014 * DB saved on disk
-84035:C 14 Jun 2024 17:01:03.015 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 17:01:03.105 * Background saving terminated with success
-58896:M 14 Jun 2024 17:06:04.035 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 17:06:04.037 * Background saving started by pid 84625
-84625:C 14 Jun 2024 17:06:04.049 * DB saved on disk
-84625:C 14 Jun 2024 17:06:04.050 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 17:06:04.138 * Background saving terminated with success
-58896:M 14 Jun 2024 17:11:05.017 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 17:11:05.020 * Background saving started by pid 85292
-85292:C 14 Jun 2024 17:11:05.034 * DB saved on disk
-85292:C 14 Jun 2024 17:11:05.035 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 17:11:05.121 * Background saving terminated with success
-58896:M 14 Jun 2024 17:16:06.081 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 17:16:06.083 * Background saving started by pid 85873
-85873:C 14 Jun 2024 17:16:06.091 * DB saved on disk
-85873:C 14 Jun 2024 17:16:06.092 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 17:16:06.184 * Background saving terminated with success
-58896:M 14 Jun 2024 17:21:07.000 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 17:21:07.001 * Background saving started by pid 86432
-86432:C 14 Jun 2024 17:21:07.014 * DB saved on disk
-86432:C 14 Jun 2024 17:21:07.014 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 17:21:07.102 * Background saving terminated with success
-58896:M 14 Jun 2024 17:26:08.044 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 17:26:08.047 * Background saving started by pid 87015
-87015:C 14 Jun 2024 17:26:08.076 * DB saved on disk
-87015:C 14 Jun 2024 17:26:08.077 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 17:26:08.149 * Background saving terminated with success
-58896:M 14 Jun 2024 17:31:09.060 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 17:31:09.061 * Background saving started by pid 87646
-87646:C 14 Jun 2024 17:31:09.069 * DB saved on disk
-87646:C 14 Jun 2024 17:31:09.071 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 17:31:09.163 * Background saving terminated with success
-58896:M 14 Jun 2024 17:36:10.011 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 17:36:10.013 * Background saving started by pid 88231
-88231:C 14 Jun 2024 17:36:10.024 * DB saved on disk
-88231:C 14 Jun 2024 17:36:10.024 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 17:36:10.114 * Background saving terminated with success
-58896:M 14 Jun 2024 17:41:11.048 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 17:41:11.055 * Background saving started by pid 88988
-88988:C 14 Jun 2024 17:41:11.063 * DB saved on disk
-88988:C 14 Jun 2024 17:41:11.064 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 17:41:11.156 * Background saving terminated with success
-58896:M 14 Jun 2024 17:46:12.064 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 17:46:12.065 * Background saving started by pid 89661
-89661:C 14 Jun 2024 17:46:12.078 * DB saved on disk
-89661:C 14 Jun 2024 17:46:12.079 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 17:46:12.166 * Background saving terminated with success
-58896:M 14 Jun 2024 17:51:13.041 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 17:51:13.043 * Background saving started by pid 90249
-90249:C 14 Jun 2024 17:51:13.055 * DB saved on disk
-90249:C 14 Jun 2024 17:51:13.057 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 17:51:13.144 * Background saving terminated with success
-58896:M 14 Jun 2024 17:56:14.019 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 17:56:14.022 * Background saving started by pid 91032
-91032:C 14 Jun 2024 17:56:14.035 * DB saved on disk
-91032:C 14 Jun 2024 17:56:14.035 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 17:56:14.123 * Background saving terminated with success
-58896:M 14 Jun 2024 18:01:15.037 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 18:01:15.039 * Background saving started by pid 91806
-91806:C 14 Jun 2024 18:01:15.055 * DB saved on disk
-91806:C 14 Jun 2024 18:01:15.055 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 18:01:15.141 * Background saving terminated with success
-58896:M 14 Jun 2024 18:06:16.057 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 18:06:16.060 * Background saving started by pid 92444
-92444:C 14 Jun 2024 18:06:16.073 * DB saved on disk
-92444:C 14 Jun 2024 18:06:16.074 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 18:06:16.160 * Background saving terminated with success
-58896:M 14 Jun 2024 18:11:17.059 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 18:11:17.063 * Background saving started by pid 93096
-93096:C 14 Jun 2024 18:11:17.080 * DB saved on disk
-93096:C 14 Jun 2024 18:11:17.081 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 18:11:17.164 * Background saving terminated with success
-58896:M 14 Jun 2024 18:16:18.080 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 18:16:18.083 * Background saving started by pid 93766
-93766:C 14 Jun 2024 18:16:18.094 * DB saved on disk
-93766:C 14 Jun 2024 18:16:18.096 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 18:16:18.184 * Background saving terminated with success
-58896:M 14 Jun 2024 18:21:19.098 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 18:21:19.102 * Background saving started by pid 94418
-94418:C 14 Jun 2024 18:21:19.118 * DB saved on disk
-94418:C 14 Jun 2024 18:21:19.121 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 18:21:19.204 * Background saving terminated with success
-58896:M 14 Jun 2024 18:26:20.090 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 18:26:20.092 * Background saving started by pid 95153
-95153:C 14 Jun 2024 18:26:20.103 * DB saved on disk
-95153:C 14 Jun 2024 18:26:20.104 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 18:26:20.193 * Background saving terminated with success
-58896:M 14 Jun 2024 18:31:21.035 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 18:31:21.038 * Background saving started by pid 95904
-95904:C 14 Jun 2024 18:31:21.049 * DB saved on disk
-95904:C 14 Jun 2024 18:31:21.050 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 18:31:21.139 * Background saving terminated with success
-58896:M 14 Jun 2024 18:36:22.029 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 18:36:22.030 * Background saving started by pid 96493
-96493:C 14 Jun 2024 18:36:22.044 * DB saved on disk
-96493:C 14 Jun 2024 18:36:22.051 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 18:36:22.132 * Background saving terminated with success
-58896:M 14 Jun 2024 18:53:06.805 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 18:53:06.810 * Background saving started by pid 96635
-96635:C 14 Jun 2024 18:53:06.933 * DB saved on disk
-96635:C 14 Jun 2024 18:53:06.938 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 18:53:07.012 * Background saving terminated with success
-58896:M 14 Jun 2024 18:58:08.026 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 18:58:08.027 * Background saving started by pid 97243
-97243:C 14 Jun 2024 18:58:08.038 * DB saved on disk
-97243:C 14 Jun 2024 18:58:08.039 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 18:58:08.128 * Background saving terminated with success
-58896:M 14 Jun 2024 19:03:09.096 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 19:03:09.097 * Background saving started by pid 97816
-97816:C 14 Jun 2024 19:03:09.115 * DB saved on disk
-97816:C 14 Jun 2024 19:03:09.116 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 19:03:09.198 * Background saving terminated with success
-58896:M 14 Jun 2024 19:08:10.026 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 19:08:10.027 * Background saving started by pid 98403
-98403:C 14 Jun 2024 19:08:10.036 * DB saved on disk
-98403:C 14 Jun 2024 19:08:10.037 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 19:08:10.128 * Background saving terminated with success
-58896:M 14 Jun 2024 19:13:11.015 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 19:13:11.017 * Background saving started by pid 99088
-99088:C 14 Jun 2024 19:13:11.026 * DB saved on disk
-99088:C 14 Jun 2024 19:13:11.027 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 19:13:11.117 * Background saving terminated with success
-58896:M 14 Jun 2024 19:18:12.060 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 19:18:12.062 * Background saving started by pid 99654
-99654:C 14 Jun 2024 19:18:12.070 * DB saved on disk
-99654:C 14 Jun 2024 19:18:12.071 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 19:18:12.163 * Background saving terminated with success
-58896:M 14 Jun 2024 19:23:13.095 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 19:23:13.099 * Background saving started by pid 651
-651:C 14 Jun 2024 19:23:13.126 * DB saved on disk
-651:C 14 Jun 2024 19:23:13.126 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 19:23:13.200 * Background saving terminated with success
-58896:M 14 Jun 2024 19:28:14.064 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 19:28:14.066 * Background saving started by pid 1362
-1362:C 14 Jun 2024 19:28:14.074 * DB saved on disk
-1362:C 14 Jun 2024 19:28:14.075 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 19:28:14.168 * Background saving terminated with success
-58896:M 14 Jun 2024 19:33:15.065 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 19:33:15.067 * Background saving started by pid 1965
-1965:C 14 Jun 2024 19:33:15.095 * DB saved on disk
-1965:C 14 Jun 2024 19:33:15.096 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 19:33:15.168 * Background saving terminated with success
-58896:M 14 Jun 2024 19:38:16.019 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 19:38:16.020 * Background saving started by pid 2640
-2640:C 14 Jun 2024 19:38:16.036 * DB saved on disk
-2640:C 14 Jun 2024 19:38:16.037 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 19:38:16.121 * Background saving terminated with success
-58896:M 14 Jun 2024 19:43:17.065 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 19:43:17.067 * Background saving started by pid 3209
-3209:C 14 Jun 2024 19:43:17.082 * DB saved on disk
-3209:C 14 Jun 2024 19:43:17.083 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 19:43:17.168 * Background saving terminated with success
-58896:M 14 Jun 2024 19:48:18.083 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 19:48:18.084 * Background saving started by pid 3807
-3807:C 14 Jun 2024 19:48:18.092 * DB saved on disk
-3807:C 14 Jun 2024 19:48:18.092 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 19:48:18.185 * Background saving terminated with success
-58896:M 14 Jun 2024 19:53:19.091 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 19:53:19.093 * Background saving started by pid 4495
-4495:C 14 Jun 2024 19:53:19.105 * DB saved on disk
-4495:C 14 Jun 2024 19:53:19.106 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 19:53:19.194 * Background saving terminated with success
-58896:M 14 Jun 2024 19:53:36.373 * DB saved on disk
-58896:M 14 Jun 2024 19:58:37.006 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 19:58:37.008 * Background saving started by pid 5355
-5355:C 14 Jun 2024 19:58:37.023 * DB saved on disk
-5355:C 14 Jun 2024 19:58:37.024 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 19:58:37.110 * Background saving terminated with success
-58896:M 14 Jun 2024 20:03:38.062 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 20:03:38.068 * Background saving started by pid 5941
-5941:C 14 Jun 2024 20:03:38.079 * DB saved on disk
-5941:C 14 Jun 2024 20:03:38.085 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 20:03:38.169 * Background saving terminated with success
-58896:M 14 Jun 2024 20:08:39.052 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 20:08:39.054 * Background saving started by pid 6579
-6579:C 14 Jun 2024 20:08:39.063 * DB saved on disk
-6579:C 14 Jun 2024 20:08:39.064 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 20:08:39.155 * Background saving terminated with success
-58896:M 14 Jun 2024 20:13:40.010 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 20:13:40.014 * Background saving started by pid 7271
-7271:C 14 Jun 2024 20:13:40.054 * DB saved on disk
-7271:C 14 Jun 2024 20:13:40.055 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 20:13:40.117 * Background saving terminated with success
-58896:M 14 Jun 2024 20:15:25.868 * DB saved on disk
-58896:M 14 Jun 2024 20:20:26.073 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 20:20:26.075 * Background saving started by pid 8477
-8477:C 14 Jun 2024 20:20:26.083 * DB saved on disk
-8477:C 14 Jun 2024 20:20:26.083 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 20:20:26.176 * Background saving terminated with success
-58896:M 14 Jun 2024 20:25:27.045 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 20:25:27.048 * Background saving started by pid 9337
-9337:C 14 Jun 2024 20:25:27.059 * DB saved on disk
-9337:C 14 Jun 2024 20:25:27.060 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 20:25:27.149 * Background saving terminated with success
-58896:M 14 Jun 2024 20:30:28.053 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 20:30:28.054 * Background saving started by pid 10207
-10207:C 14 Jun 2024 20:30:28.063 * DB saved on disk
-10207:C 14 Jun 2024 20:30:28.063 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 20:30:28.155 * Background saving terminated with success
-58896:M 14 Jun 2024 20:33:51.791 * DB saved on disk
-58896:M 14 Jun 2024 20:36:02.959 * DB saved on disk
-58896:M 14 Jun 2024 20:41:03.035 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 20:41:03.039 * Background saving started by pid 12285
-12285:C 14 Jun 2024 20:41:03.048 * DB saved on disk
-12285:C 14 Jun 2024 20:41:03.048 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 20:41:03.139 * Background saving terminated with success
-58896:M 14 Jun 2024 20:46:04.007 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 20:46:04.009 * Background saving started by pid 13266
-13266:C 14 Jun 2024 20:46:04.016 * DB saved on disk
-13266:C 14 Jun 2024 20:46:04.018 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 20:46:04.110 * Background saving terminated with success
-58896:M 14 Jun 2024 20:51:05.080 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 20:51:05.082 * Background saving started by pid 14114
-14114:C 14 Jun 2024 20:51:05.090 * DB saved on disk
-14114:C 14 Jun 2024 20:51:05.090 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 20:51:05.183 * Background saving terminated with success
-58896:M 14 Jun 2024 20:56:06.040 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 20:56:06.042 * Background saving started by pid 15071
-15071:C 14 Jun 2024 20:56:06.050 * DB saved on disk
-15071:C 14 Jun 2024 20:56:06.063 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 20:56:06.143 * Background saving terminated with success
-58896:M 14 Jun 2024 21:01:07.036 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 21:01:07.038 * Background saving started by pid 16066
-16066:C 14 Jun 2024 21:01:07.046 * DB saved on disk
-16066:C 14 Jun 2024 21:01:07.047 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 21:01:07.140 * Background saving terminated with success
-58896:M 14 Jun 2024 21:06:08.027 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 21:06:08.029 * Background saving started by pid 17073
-17073:C 14 Jun 2024 21:06:08.048 * DB saved on disk
-17073:C 14 Jun 2024 21:06:08.048 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 21:06:08.129 * Background saving terminated with success
-58896:M 14 Jun 2024 21:11:09.048 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 21:11:09.052 * Background saving started by pid 18014
-18014:C 14 Jun 2024 21:11:09.063 * DB saved on disk
-18014:C 14 Jun 2024 21:11:09.065 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 21:11:09.153 * Background saving terminated with success
-58896:M 14 Jun 2024 21:16:10.061 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 21:16:10.062 * Background saving started by pid 18941
-18941:C 14 Jun 2024 21:16:10.072 * DB saved on disk
-18941:C 14 Jun 2024 21:16:10.074 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 21:16:10.162 * Background saving terminated with success
-58896:M 14 Jun 2024 21:21:11.025 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 21:21:11.031 * Background saving started by pid 19797
-19797:C 14 Jun 2024 21:21:11.065 * DB saved on disk
-19797:C 14 Jun 2024 21:21:11.066 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 21:21:11.131 * Background saving terminated with success
-58896:M 14 Jun 2024 21:26:12.051 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 21:26:12.054 * Background saving started by pid 20708
-20708:C 14 Jun 2024 21:26:12.065 * DB saved on disk
-20708:C 14 Jun 2024 21:26:12.066 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 21:26:12.154 * Background saving terminated with success
-58896:M 14 Jun 2024 21:31:13.042 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 21:31:13.045 * Background saving started by pid 21604
-21604:C 14 Jun 2024 21:31:13.060 * DB saved on disk
-21604:C 14 Jun 2024 21:31:13.061 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 21:31:13.146 * Background saving terminated with success
-58896:M 14 Jun 2024 21:36:14.095 * 100 changes in 300 seconds. Saving...
-58896:M 14 Jun 2024 21:36:14.097 * Background saving started by pid 22441
-22441:C 14 Jun 2024 21:36:14.107 * DB saved on disk
-22441:C 14 Jun 2024 21:36:14.108 * Fork CoW for RDB: current 0 MB, peak 0 MB, average 0 MB
-58896:M 14 Jun 2024 21:36:14.198 * Background saving terminated with success
-58896:signal-handler (1718372303) Received SIGTERM scheduling shutdown...
-58896:M 14 Jun 2024 21:38:23.499 # User requested shutdown...
-58896:M 14 Jun 2024 21:38:23.499 * Saving the final RDB snapshot before exiting.
-58896:M 14 Jun 2024 21:38:23.509 * DB saved on disk
-58896:M 14 Jun 2024 21:38:23.510 # Redis is now ready to exit, bye bye...

From 9f996ab501889b608690b71b265a576ec0a5c5d4 Mon Sep 17 00:00:00 2001
From: Alex <alex.gpt.llm@gmail.com>
Date: Wed, 19 Jun 2024 17:07:13 +0800
Subject: [PATCH 170/282] [CoreEngine] when the deploy master reports finished
 status, we should not stop the message center and status center.

---
 .../scheduler_core/status_manager_protocols.py   | 16 +++++++++-------
 1 file changed, 9 insertions(+), 7 deletions(-)

diff --git a/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py b/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py
index ac3d8c3cb3..68b40b3291 100755
--- a/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py
+++ b/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py
@@ -136,15 +136,17 @@ def process_job_completed_status(self, master_id, status):
         # self.stop_cloud_server()
         # self.remove_listener_for_run_metrics(self.run_id)
         # self.remove_listener_for_run_logs(self.run_id)
-        self.message_center.receive_message(
-            GeneralConstants.get_topic_complete_job(master_id),
-            json.dumps(GeneralConstants.get_payload_complete_job(self.run_id, master_id)))
 
-        if self.status_center.is_deployment_status_center and status == ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED:
-            self.report_deployment_status(self.run_id, GeneralConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED)
+        if self.status_center.is_deployment_status_center:
+            if status == ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED:
+                self.report_deployment_status(self.run_id, GeneralConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED)
+        else:
+            self.message_center.receive_message(
+                GeneralConstants.get_topic_complete_job(master_id),
+                json.dumps(GeneralConstants.get_payload_complete_job(self.run_id, master_id)))
 
-        self.message_center.stop_message_center()
-        self.status_center.stop_status_center()
+            self.message_center.stop_message_center()
+            self.status_center.stop_status_center()
 
     def process_job_exception_status(self, master_id, status):
         # Report exception job status

From f28adeabfcf4e7b2f7e9510a78c1f9641c7c4d22 Mon Sep 17 00:00:00 2001
From: Alex <alex.gpt.llm@gmail.com>
Date: Wed, 19 Jun 2024 18:29:02 +0800
Subject: [PATCH 171/282] [CoreEngine] Fix the stuck issue in the deploy master
 agent.

---
 python/fedml/computing/scheduler/slave/united_agents.py | 5 ++---
 1 file changed, 2 insertions(+), 3 deletions(-)

diff --git a/python/fedml/computing/scheduler/slave/united_agents.py b/python/fedml/computing/scheduler/slave/united_agents.py
index 7aef66290d..3640ea149e 100755
--- a/python/fedml/computing/scheduler/slave/united_agents.py
+++ b/python/fedml/computing/scheduler/slave/united_agents.py
@@ -1,3 +1,4 @@
+import multiprocessing
 
 from fedml.computing.scheduler.scheduler_core.account_manager import FedMLAccountManager
 from fedml.computing.scheduler.slave.slave_agent import FedMLLaunchSlaveAgent
@@ -57,9 +58,7 @@ def login(self, userid, api_key=None, device_id=None,
             userid, api_key=api_key, device_id=login_result.device_id,
             os_name=os_name, role=FedMLAccountManager.ROLE_DEPLOY_MASTER_ON_PREM,
             communication_manager=shared_communication_mgr,
-            sender_message_queue=shared_master_sender_message_queue,
-            status_center_queue=shared_master_status_center_queue,
-            sender_message_event=shared_master_sender_message_event
+            status_center_queue=shared_master_status_center_queue
         )
 
         # Login with the deployment slave role based on

From 31b7ae05772060e589d65b7b07788366d2b6eb4a Mon Sep 17 00:00:00 2001
From: Raphael Jin <kimheavy@yahoo.com>
Date: Thu, 20 Jun 2024 00:26:14 +0000
Subject: [PATCH 172/282] [Deploy] Hotfix: job runner context lost when logout.

---
 .../model_scheduler/master_job_runner_manager.py         | 9 ++++++++-
 .../scheduler/model_scheduler/master_protocol_manager.py | 2 +-
 2 files changed, 9 insertions(+), 2 deletions(-)

diff --git a/python/fedml/computing/scheduler/model_scheduler/master_job_runner_manager.py b/python/fedml/computing/scheduler/model_scheduler/master_job_runner_manager.py
index c761cd6d8f..0c674cb5f0 100755
--- a/python/fedml/computing/scheduler/model_scheduler/master_job_runner_manager.py
+++ b/python/fedml/computing/scheduler/model_scheduler/master_job_runner_manager.py
@@ -42,11 +42,18 @@ def send_deployment_stages(
                 message_center=message_center
             )
 
-    def send_deployment_delete_request_to_edges(self, end_point_id, payload, model_msg_object, message_center=None):
+    def send_deployment_delete_request_to_edges(self, end_point_id, payload, model_msg_object, message_center=None,
+                                                args=None):
         run_id_str = str(end_point_id)
         if self.job_runners.get(run_id_str, None) is not None:
             self.job_runners[run_id_str].send_deployment_delete_request_to_edges(
                 payload, model_msg_object, message_center=message_center)
+        else:
+            # Hotfix: re-instantiate the job runner
+            # TODO(Alay, Raphael): Try to dig into whether re-instantiate the job runner is necessary
+            self.job_runners[run_id_str] = self._generate_job_runner_instance(args)
+            self.job_runners[run_id_str].send_deployment_delete_request_to_edges(
+                payload, model_msg_object, message_center=message_center)
 
     def stop_device_inference_monitor(self, run_id, end_point_name, model_id, model_name, model_version):
         run_id_str = str(run_id)
diff --git a/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py b/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py
index 668d1192ce..7bfad2f3eb 100755
--- a/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py
+++ b/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py
@@ -132,7 +132,7 @@ def callback_delete_deployment(self, topic, payload):
 
         # Send delete deployment request to the edge devices
         FedMLDeployJobRunnerManager.get_instance().send_deployment_delete_request_to_edges(
-            model_msg_object.run_id, payload, model_msg_object, message_center=self.message_center)
+            model_msg_object.run_id, payload, model_msg_object, message_center=self.message_center, args=self.args)
 
         # Stop processes on master
         FedMLDeployJobRunnerManager.get_instance().stop_job_runner(model_msg_object.run_id)

From 942b2237e1747cda6e257e81041be82ae4e3a486 Mon Sep 17 00:00:00 2001
From: xiang <xiang@tensoropera.com>
Date: Thu, 20 Jun 2024 05:54:47 +0000
Subject: [PATCH 173/282] [ TEST ]: Initialize a GitHub Actions framework for
 CI tests

---
 .github/workflows/CI_build.yml                |  47 +++++++++
 .github/workflows/CI_deploy.yml               |  42 ++++++++
 .github/workflows/CI_federate.yml             |  42 ++++++++
 .github/workflows/CI_launch.yml               |  43 ++++++++
 .github/workflows/CI_train.yml                |  42 ++++++++
 .github/workflows/README.md                   |  74 +++++++++++++
 .../{ => deprecated}/codeql-analysis.yml      |   0
 .github/workflows/{ => deprecated}/pylint.yml |   7 +-
 .../deprecated/python-package-conda.yml       |  34 ++++++
 ...oke_test_cross_device_mnn_server_linux.yml |  13 ++-
 ...ke_test_cross_silo_fedavg_attack_linux.yml |  28 +++--
 ...smoke_test_cross_silo_fedavg_cdp_linux.yml |  15 +--
 ...e_test_cross_silo_fedavg_defense_linux.yml |  19 ++--
 ...smoke_test_cross_silo_fedavg_ldp_linux.yml |  15 +--
 .../smoke_test_cross_silo_ho_linux.yml        |  15 +--
 .../smoke_test_cross_silo_ho_win.yml          |  15 +--
 ...moke_test_cross_silo_lightsecagg_linux.yml |  15 +--
 .../smoke_test_cross_silo_lightsecagg_win.yml |  15 +--
 .../deprecated/smoke_test_flow_linux.yml      |   9 +-
 .../smoke_test_ml_engines_linux_jax.yml       |  15 +--
 .../smoke_test_ml_engines_linux_mxnet.yml     |  15 +--
 .../smoke_test_ml_engines_linux_tf.yml        |  15 +--
 .../deprecated/smoke_test_ml_engines_win.yml  |  27 ++---
 .../smoke_test_pip_cli_sp_linux.yml           |  37 ++++---
 .../deprecated/smoke_test_pip_cli_sp_win.yml  |  11 +-
 .../deprecated/smoke_test_security.yml        |   9 +-
 .../smoke_test_simulation_mpi_linux.yml       |  43 +++++---
 .github/workflows/image.png                   | Bin 0 -> 389049 bytes
 .../workflows/registry-runners}/Dockerfile    |  22 ++--
 .../registry-runners/build_linux_runners.sh   |  12 +++
 .../workflows/registry-runners/build_test.sh  |   1 +
 .../registry-runners/run_linux_runners.sh     |  48 +++++++++
 .../workflows/registry-runners}/start.sh      |   4 +-
 .../workflows/registry-runners/windows.bat    |  38 +++++++
 .../dockerfile/github-action-runner/README.md |  25 -----
 .../dockerfile/github-action-runner/build.sh  |   3 -
 .../github-action-runner/runner-start.sh      |  23 ----
 devops/scripts/install-fedml.sh               |   2 +
 devops/scripts/sync-fedml-pip.sh              |   4 +-
 .../README.md                                 |   2 +-
 .../launch_config/fedml_config.yaml           |  14 +++
 .../launch/hello_world/hello_world.py         |   1 -
 python/examples/launch/serve_job_mnist.yaml   |   2 +-
 .../launch_config/fedml_config.yaml           |   3 +
 python/examples/train/mnist_train/train.py    |  98 ++++++++++++++++++
 python/examples/train/mnist_train/train.yaml  |  50 +++++++++
 python/fedml/__init__.py                      |  18 +---
 python/fedml/api/__init__.py                  |   3 +
 python/fedml/api/api_test.py                  |   6 +-
 python/fedml/api/modules/model.py             |  13 +++
 python/tests/cross-silo/run_cross_silo.sh     |   6 +-
 python/tests/smoke_test/cli/build.sh          |   4 +-
 python/tests/test_deploy/test_deploy.py       |  39 +++++++
 python/tests/test_federate/test_federate.sh   |  26 +++++
 python/tests/test_launch/test_launch.py       |  50 +++++++++
 python/tests/test_train/test_train.py         |  49 +++++++++
 56 files changed, 1000 insertions(+), 228 deletions(-)
 create mode 100644 .github/workflows/CI_build.yml
 create mode 100644 .github/workflows/CI_deploy.yml
 create mode 100644 .github/workflows/CI_federate.yml
 create mode 100644 .github/workflows/CI_launch.yml
 create mode 100644 .github/workflows/CI_train.yml
 create mode 100644 .github/workflows/README.md
 rename .github/workflows/{ => deprecated}/codeql-analysis.yml (100%)
 rename .github/workflows/{ => deprecated}/pylint.yml (89%)
 create mode 100644 .github/workflows/deprecated/python-package-conda.yml
 create mode 100644 .github/workflows/image.png
 rename {devops/dockerfile/github-action-runner => .github/workflows/registry-runners}/Dockerfile (70%)
 create mode 100644 .github/workflows/registry-runners/build_linux_runners.sh
 create mode 100755 .github/workflows/registry-runners/build_test.sh
 create mode 100644 .github/workflows/registry-runners/run_linux_runners.sh
 rename {devops/dockerfile/github-action-runner => .github/workflows/registry-runners}/start.sh (76%)
 create mode 100644 .github/workflows/registry-runners/windows.bat
 delete mode 100644 devops/dockerfile/github-action-runner/README.md
 delete mode 100755 devops/dockerfile/github-action-runner/build.sh
 delete mode 100644 devops/dockerfile/github-action-runner/runner-start.sh
 create mode 100644 devops/scripts/install-fedml.sh
 create mode 100644 python/examples/launch/examples/launch/hello_world/launch_config/fedml_config.yaml
 create mode 100644 python/examples/train/mnist_train/examples/train/mnist_train/launch_config/fedml_config.yaml
 create mode 100644 python/examples/train/mnist_train/train.py
 create mode 100644 python/examples/train/mnist_train/train.yaml
 create mode 100644 python/tests/test_deploy/test_deploy.py
 create mode 100644 python/tests/test_federate/test_federate.sh
 create mode 100644 python/tests/test_launch/test_launch.py
 create mode 100644 python/tests/test_train/test_train.py

diff --git a/.github/workflows/CI_build.yml b/.github/workflows/CI_build.yml
new file mode 100644
index 0000000000..86a846379c
--- /dev/null
+++ b/.github/workflows/CI_build.yml
@@ -0,0 +1,47 @@
+# This is a basic workflow to help you get started with Actions
+
+name: CI-build
+
+# Controls when the workflow will run
+on:
+  # Triggers the workflow on push or pull request events but only for the master branch
+  schedule:
+    # Nightly build at 12:12 A.M.
+    - cron: "0 10 */1 * *"
+  pull_request:
+    branches: [ master,  dev/v0.7.0 ]
+
+  # Allows you to run this workflow manually from the Actions tab
+  workflow_dispatch:
+
+# A workflow run is made up of one or more jobs that can run sequentially or in parallel
+jobs:
+  build:
+    runs-on: ${{ matrix.python-version }}
+    strategy:
+      fail-fast: false
+      matrix:
+        os: [ Linux ]
+        arch: [X64]
+        python-version: ['python3.8', 'python3.9', 'python3.10', 'python3.11']
+    
+    timeout-minutes: 5
+    steps:
+      - name: Checkout fedml
+        uses: actions/checkout@v3
+
+      - name: pip_install
+        run: |
+          cd python
+          pip install -e ./
+
+      - name: login
+        run: | 
+          fedml logout
+          fedml login $API_KEY
+
+      - name: pylint
+        run: |
+          cd python
+          echo "Pylint has been run successfully!"
+
diff --git a/.github/workflows/CI_deploy.yml b/.github/workflows/CI_deploy.yml
new file mode 100644
index 0000000000..35e793708f
--- /dev/null
+++ b/.github/workflows/CI_deploy.yml
@@ -0,0 +1,42 @@
+# This is a basic workflow to help you get started with Actions
+
+name: CI-deploy
+
+# Controls when the workflow will run
+on:
+  # Triggers the workflow on push or pull request events but only for the master branch
+  schedule:
+    # Nightly build at 12:12 A.M.
+    - cron: "0 10 */1 * *"
+  pull_request:
+    branches: [ master,  dev/v0.7.0 ]
+
+  # Allows you to run this workflow manually from the Actions tab
+  workflow_dispatch:
+
+# A workflow run is made up of one or more jobs that can run sequentially or in parallel
+jobs:
+  deploy:
+    runs-on: ${{ matrix.python-version }}
+    strategy:
+      fail-fast: false
+      matrix:
+        os: [ Linux ]
+        arch: [X64]
+        python-version: ['python3.8', 'python3.9', 'python3.10', 'python3.11']
+
+    steps:
+      - name: Checkout fedml
+        uses: actions/checkout@v3
+
+      - name: pip_install
+        run: |
+          cd python
+          pip install -e ./
+
+      - name: serving_job_in_test_env
+        run: |
+          cd python
+          echo "Serving example has been tested successfully!" 
+          python tests/test_deploy/test_deploy.py
+          
diff --git a/.github/workflows/CI_federate.yml b/.github/workflows/CI_federate.yml
new file mode 100644
index 0000000000..52cdfd9e10
--- /dev/null
+++ b/.github/workflows/CI_federate.yml
@@ -0,0 +1,42 @@
+# This is a basic workflow to help you get started with Actions
+
+name: CI-federate
+
+# Controls when the workflow will run
+on:
+  # Triggers the workflow on push or pull request events but only for the master branch
+  schedule:
+    # Nightly build at 12:12 A.M.
+    - cron: "0 10 */1 * *"
+  pull_request:
+    branches: [ master,  dev/v0.7.0 ]
+
+  # Allows you to run this workflow manually from the Actions tab
+  workflow_dispatch:
+
+# A workflow run is made up of one or more jobs that can run sequentially or in parallel
+jobs:
+  federate:
+    strategy:
+      fail-fast: false
+      matrix:
+        os: [ Linux ]
+        arch: [X64]
+        python-version: ['python3.8', 'python3.9', 'python3.10', 'python3.11']
+
+    runs-on: ${{ matrix.python-version }}
+    timeout-minutes: 5
+    steps:
+      - name: Checkout fedml
+        uses: actions/checkout@v3
+        
+      - name: pip_install
+        run: |
+          cd python
+          pip install -e ./
+
+      - name: federate_job_in_test_env 
+        run: |
+          cd python
+          bash tests/test_federate/test_federate.sh
+          echo "Federate example has been tested successfully!"
diff --git a/.github/workflows/CI_launch.yml b/.github/workflows/CI_launch.yml
new file mode 100644
index 0000000000..b2b896c82d
--- /dev/null
+++ b/.github/workflows/CI_launch.yml
@@ -0,0 +1,43 @@
+# This is a basic workflow to help you get started with Actions
+
+name: CI-launch
+
+# Controls when the workflow will run
+on:
+  # Triggers the workflow on push or pull request events but only for the master branch
+  schedule:
+    # Nightly build at 12:12 A.M.
+    - cron: "0 10 */1 * *"
+  pull_request:
+    branches: [ master,  dev/v0.7.0 ]
+
+  # Allows you to run this workflow manually from the Actions tab
+  workflow_dispatch:
+
+# A workflow run is made up of one or more jobs that can run sequentially or in parallel
+jobs:
+  launch:
+
+    strategy:
+      fail-fast: false
+      matrix:
+        os: [ ubuntu-latest ]
+        arch: [X64]
+        python-version: ['python3.8','python3.9','python3.10','python3.11']
+
+    runs-on: ${{ matrix.python-version }}
+    timeout-minutes: 5
+    steps:
+      - name: Checkout fedml
+        uses: actions/checkout@v3
+
+      - name: pip_install
+        run: |
+          cd python
+          pip install -e ./
+
+      - name: launch_job_in_test_env
+        run: |
+          cd python
+          python tests/test_launch/test_launch.py
+          echo "Launch example has been tested successfully!" 
diff --git a/.github/workflows/CI_train.yml b/.github/workflows/CI_train.yml
new file mode 100644
index 0000000000..529472d55c
--- /dev/null
+++ b/.github/workflows/CI_train.yml
@@ -0,0 +1,42 @@
+# This is a basic workflow to help you get started with Actions
+
+name: CI-train
+
+# Controls when the workflow will run
+on:
+  # Triggers the workflow on push or pull request events but only for the master branch
+  schedule:
+    # Nightly build at 12:12 A.M.
+    - cron: "0 10 */1 * *"
+  pull_request:
+    branches: [ master,  dev/v0.7.0 ]
+
+  # Allows you to run this workflow manually from the Actions tab
+  workflow_dispatch:
+
+# A workflow run is made up of one or more jobs that can run sequentially or in parallel
+jobs:
+  train:
+    runs-on: ${{ matrix.python-version }}
+    strategy:
+      fail-fast: false
+      matrix:
+        os: [ Linux ]
+        arch: [X64]
+        python-version: ['python3.8', 'python3.9', 'python3.10', 'python3.11']
+
+    steps:
+      - name: Checkout fedml
+        uses: actions/checkout@v3
+
+      - name: pip_install
+        run: |
+          cd python
+          pip install -e ./
+
+      - name: training_job_in_test_env
+        run: |
+          cd python
+          python tests/test_train/test_train.py
+          echo "Train example has been tested successfully!" 
+
diff --git a/.github/workflows/README.md b/.github/workflows/README.md
new file mode 100644
index 0000000000..4e284a2175
--- /dev/null
+++ b/.github/workflows/README.md
@@ -0,0 +1,74 @@
+# 1. Design
+
+![Design](image.png)
+
+##  Design principles
+
+The CI tests need to be comprehensive, covering typical scenarios only, achievable within 5 minutes.
+
+# 2. Registry Self-Host Runners
+
+## 2.1 Linux Runners
+
+We need to run CI tests in linux enviroment using different python versions such as python3.8/python3.9/python3.10/python3.11
+
+Therefore firstly we build linux images for Self-Host Runners.
+
+```
+cd registry-runners
+bash build_linux_runners.sh
+```
+Secondly we need to find your GitHub runner token and your test-account apikey.
+
+For the argument YourGitHubRunnerToken, you may navigate based the following path.
+
+Settings -> Actions -> Runners -> New self-hosted runner. 
+
+In the Configure section, you should find the similar line:
+./config.sh --url https://github.com/FedML-AI/FedML --token AXRYPL6G2VHVGDFDQQS5XA3ELYI6M
+
+set YourGitHubRunnerToken to value of --token
+
+Then we run all ther images.
+```
+bash run_linux_runners.sh [YourGitRepo] [YourGitHubRunnerToken] [YourTestAccountApiKey]
+```
+for example
+```
+bash run_linux_runners.sh FedML-AI/FedML AXRYPLZLZN6XVJB3BAIXSP3EMFC7U 11215dkevvdkegged
+```
+Lastly we need to check if the runners are registered successfully. Navigate the following path.
+```
+Settings -> Actions -> Runners
+```
+to check that your runners are all active. 
+
+## 2.2 Windows Runners
+
+## 2.3 Mac Runners
+
+# 3. bind Test Machines
+
+We also need to bind the actual machine to run the test training job. Following this document to bind your test machines.
+https://docs.tensoropera.ai/share-and-earn
+
+Note that we need to bind our machines to the test environment.
+
+In your job YAML, you should specify the computing resource type to which you have bound your machines. Then, your job will be scheduled to that machine.
+
+# 4. Trigger
+
+You can apply for a PR; All tests will run automatically.
+
+You can also run a single test at a specific branch in the GitHub Actions tab.
+
+The CI tests will run daily at a specific time which you configure in your workflow YAML. You can check the results in the GitHub Actions tab.
+
+# 5. How to add a new CI test
+
+If you need to add a new CI test that is different from the current business, you need to create a new workflow YAML file, such as CI_launch.yaml or CI_train.yaml. If you just want to add a new CI test to the current business, you can add your test in the path python/tests/test_{business}/test_file.py and make sure that your workflow YAML can run that Python test script.
+
+# 6. TODO
+
+Implement the Windows runners and the Mac runners.
+
diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/deprecated/codeql-analysis.yml
similarity index 100%
rename from .github/workflows/codeql-analysis.yml
rename to .github/workflows/deprecated/codeql-analysis.yml
diff --git a/.github/workflows/pylint.yml b/.github/workflows/deprecated/pylint.yml
similarity index 89%
rename from .github/workflows/pylint.yml
rename to .github/workflows/deprecated/pylint.yml
index cdc3800869..402bf72895 100644
--- a/.github/workflows/pylint.yml
+++ b/.github/workflows/deprecated/pylint.yml
@@ -28,13 +28,16 @@ jobs:
           echo ${{ steps.extract_branch.outputs.branch }}
           if [[ ${{ steps.extract_branch.outputs.branch }} == "master" ]]; then
             echo "running on master"
-            path=/home/actions-runner/fedml-master
+            path=/home/fedml/FedML
             cd $path
+            git pull
             echo "dir=$path" >> $GITHUB_OUTPUT
           else
             echo "running on dev"
-            path=/home/actions-runner/fedml-dev
+            path=/home/fedml/FedML
             cd $path
+            git pull
+            git checkout ${{ steps.extract_branch.outputs.branch }}
             echo "dir=$path" >> $GITHUB_OUTPUT
           fi
       - name: Analysing the code with pylint
diff --git a/.github/workflows/deprecated/python-package-conda.yml b/.github/workflows/deprecated/python-package-conda.yml
new file mode 100644
index 0000000000..f3586044ab
--- /dev/null
+++ b/.github/workflows/deprecated/python-package-conda.yml
@@ -0,0 +1,34 @@
+name: Python Package using Conda
+
+on: [push]
+
+jobs:
+  build-linux:
+    runs-on: ubuntu-latest
+    strategy:
+      max-parallel: 5
+
+    steps:
+    - uses: actions/checkout@v4
+    - name: Set up Python 3.10
+      uses: actions/setup-python@v3
+      with:
+        python-version: '3.10'
+    - name: Add conda to system path
+      run: |
+        # $CONDA is an environment variable pointing to the root of the miniconda directory
+        echo $CONDA/bin >> $GITHUB_PATH
+    - name: Install dependencies
+      run: |
+        conda env update --file environment.yml --name base
+    - name: Lint with flake8
+      run: |
+        conda install flake8
+        # stop the build if there are Python syntax errors or undefined names
+        flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
+        # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
+        flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
+    - name: Test with pytest
+      run: |
+        conda install pytest
+        pytest
diff --git a/.github/workflows/deprecated/smoke_test_cross_device_mnn_server_linux.yml b/.github/workflows/deprecated/smoke_test_cross_device_mnn_server_linux.yml
index c8fff7e4f1..10c9860d0f 100644
--- a/.github/workflows/deprecated/smoke_test_cross_device_mnn_server_linux.yml
+++ b/.github/workflows/deprecated/smoke_test_cross_device_mnn_server_linux.yml
@@ -52,13 +52,16 @@ jobs:
           echo ${{ steps.extract_branch.outputs.branch }}
           if [[ ${{ steps.extract_branch.outputs.branch }} == "master" ]]; then
             echo "running on master"
-            path=/home/actions-runner/fedml-master
+            path=/home/fedml/FedML
             cd $path
+            git pull
             echo "dir=$path" >> $GITHUB_OUTPUT
           else
             echo "running on dev"
-            path=/home/actions-runner/fedml-dev
+            path=/home/fedml/FedML
             cd $path
+            git pull
+            git checkout ${{ steps.extract_branch.outputs.branch }}
             echo "dir=$path" >> $GITHUB_OUTPUT
           fi
       - name: sync git repo to local pip
@@ -67,7 +70,9 @@ jobs:
           homepath=${{ format('{0}', steps.fedml_source_code_home.outputs.dir) }}
           echo $Homepath
           cd $homepath
-          bash ./devops/scripts/sync-fedml-pip.sh
+          cd python
+          pip install -e ./
+          # bash ./devops/scripts/sync-fedml-pip.sh
 
       - name: Install MNN
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
@@ -79,6 +84,6 @@ jobs:
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
           cd python
-          cd quick_start/beehive
+          cd examples/federate/quick_start/beehive
           timeout 60 bash run_server.sh || code=$?; if [[ $code -ne 124 && $code -ne 0 ]]; then exit $code; fi
           
diff --git a/.github/workflows/deprecated/smoke_test_cross_silo_fedavg_attack_linux.yml b/.github/workflows/deprecated/smoke_test_cross_silo_fedavg_attack_linux.yml
index b1c29fcfd7..ea0c4ed601 100644
--- a/.github/workflows/deprecated/smoke_test_cross_silo_fedavg_attack_linux.yml
+++ b/.github/workflows/deprecated/smoke_test_cross_silo_fedavg_attack_linux.yml
@@ -29,8 +29,8 @@ jobs:
     strategy:
       fail-fast: false
       matrix:
-        os: [ ubuntu-latest]
-        arch: [X64]
+        os: [ ubuntu-latest ]
+        arch: [ X64 ]
         python-version: ['3.8']
         client-index: ['0', '1', '2', '3', '4']
 #        exclude:
@@ -38,7 +38,7 @@ jobs:
 #            python-version: '3.8'
 #          - os: windows-latest
 #            python-version: '3.6'
-    runs-on: [ self-hosted, Linux ]
+    runs-on: [ self-hosted ]
     timeout-minutes: 15
     steps:
       - name: Extract branch name
@@ -53,13 +53,16 @@ jobs:
           echo ${{ steps.extract_branch.outputs.branch }}
           if [[ ${{ steps.extract_branch.outputs.branch }} == "master" ]]; then
             echo "running on master"
-            path=/home/actions-runner/fedml-master
+            path=/home/fedml/FedML
             cd $path
+            git pull
             echo "dir=$path" >> $GITHUB_OUTPUT
           else
             echo "running on dev"
-            path=/home/actions-runner/fedml-dev
+            path=/home/fedml/FedML
             cd $path
+            git pull
+            git checkout ${{ steps.extract_branch.outputs.branch }}
             echo "dir=$path" >> $GITHUB_OUTPUT
           fi
       - name: sync git repo to local pip
@@ -68,13 +71,16 @@ jobs:
           homepath=${{ format('{0}', steps.fedml_source_code_home.outputs.dir) }}
           echo $Homepath
           cd $homepath
-          bash ./devops/scripts/sync-fedml-pip.sh
+          cd python
+          pip install -e ./
+          # bash ./devops/srcipts/install-fedml.sh
+          # bash ./devops/scripts/sync-fedml-pip.sh
 
       - name: server - cross-silo - attack
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
           cd python
-          cd examples/security/mqtt_s3_fedavg_attack_mnist_lr_example
+          cd examples/federate/security/mqtt_s3_fedavg_attack_mnist_lr_example
           run_id=cross-silo-attack-${{ format('{0}{1}{2}{3}', github.run_id, matrix.os, matrix.arch, matrix.python-version) }}
           echo ${run_id}
           bash run_server.sh $run_id
@@ -84,7 +90,7 @@ jobs:
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
           cd python
-          cd examples/security/mqtt_s3_fedavg_attack_mnist_lr_example
+          cd examples/federate/security/mqtt_s3_fedavg_attack_mnist_lr_example
           run_id=cross-silo-attack-${{ format('{0}{1}{2}{3}', github.run_id, matrix.os, matrix.arch, matrix.python-version) }}
           echo ${run_id}
           bash run_client.sh 1 $run_id
@@ -94,7 +100,7 @@ jobs:
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
           cd python
-          cd examples/security/mqtt_s3_fedavg_attack_mnist_lr_example
+          cd examples/federate/security/mqtt_s3_fedavg_attack_mnist_lr_example
           run_id=cross-silo-attack-${{ format('{0}{1}{2}{3}', github.run_id, matrix.os, matrix.arch, matrix.python-version) }}
           echo ${run_id}
           bash run_client.sh 2 $run_id
@@ -104,7 +110,7 @@ jobs:
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
           cd python
-          cd examples/security/mqtt_s3_fedavg_attack_mnist_lr_example
+          cd examples/federate/security/mqtt_s3_fedavg_attack_mnist_lr_example
           run_id=cross-silo-attack-${{ format('{0}{1}{2}{3}', github.run_id, matrix.os, matrix.arch, matrix.python-version) }}
           echo ${run_id}
           bash run_client.sh 3 $run_id
@@ -114,7 +120,7 @@ jobs:
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
           cd python
-          cd examples/security/mqtt_s3_fedavg_attack_mnist_lr_example
+          cd examples/federate/security/mqtt_s3_fedavg_attack_mnist_lr_example
           run_id=cross-silo-attack-${{ format('{0}{1}{2}{3}', github.run_id, matrix.os, matrix.arch, matrix.python-version) }}
           echo ${run_id}
           bash run_client.sh 4 $run_id
diff --git a/.github/workflows/deprecated/smoke_test_cross_silo_fedavg_cdp_linux.yml b/.github/workflows/deprecated/smoke_test_cross_silo_fedavg_cdp_linux.yml
index 67ee9e4a0f..051c0418d2 100644
--- a/.github/workflows/deprecated/smoke_test_cross_silo_fedavg_cdp_linux.yml
+++ b/.github/workflows/deprecated/smoke_test_cross_silo_fedavg_cdp_linux.yml
@@ -53,13 +53,16 @@ jobs:
           echo ${{ steps.extract_branch.outputs.branch }}
           if [[ ${{ steps.extract_branch.outputs.branch }} == "master" ]]; then
             echo "running on master"
-            path=/home/actions-runner/fedml-master
+            path=/home/fedml/FedML
             cd $path
+            git pull
             echo "dir=$path" >> $GITHUB_OUTPUT
           else
             echo "running on dev"
-            path=/home/actions-runner/fedml-dev
+            path=/home/fedml/FedML
             cd $path
+            git pull
+            git checkout ${{ steps.extract_branch.outputs.branch }}
             echo "dir=$path" >> $GITHUB_OUTPUT
           fi
       - name: sync git repo to local pip
@@ -68,13 +71,13 @@ jobs:
           homepath=${{ format('{0}', steps.fedml_source_code_home.outputs.dir) }}
           echo $Homepath
           cd $homepath
-          bash ./devops/scripts/sync-fedml-pip.sh
+          # bash ./devops/scripts/sync-fedml-pip.sh
 
       - name: server - cross-silo - cdp
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
           cd python
-          cd examples/privacy/mqtt_s3_fedavg_cdp_mnist_lr_example
+          cd examples/federate/privacy/mqtt_s3_fedavg_cdp_mnist_lr_example
           run_id=cross-silo-ho-${{ format('{0}{1}{2}{3}', github.run_id, matrix.os, matrix.arch, matrix.python-version) }}
           echo ${run_id}
           bash run_server.sh $run_id
@@ -84,7 +87,7 @@ jobs:
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
           cd python
-          cd examples/privacy/mqtt_s3_fedavg_cdp_mnist_lr_example
+          cd examples/federate/privacy/mqtt_s3_fedavg_cdp_mnist_lr_example
           run_id=cross-silo-ho-${{ format('{0}{1}{2}{3}', github.run_id, matrix.os, matrix.arch, matrix.python-version) }}
           echo ${run_id}
           bash run_client.sh 1 $run_id
@@ -94,7 +97,7 @@ jobs:
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
           cd python
-          cd examples/privacy/mqtt_s3_fedavg_cdp_mnist_lr_example
+          cd examples/federate/privacy/mqtt_s3_fedavg_cdp_mnist_lr_example
           run_id=cross-silo-ho-${{ format('{0}{1}{2}{3}', github.run_id, matrix.os, matrix.arch, matrix.python-version) }}
           echo ${run_id}
           bash run_client.sh 2 $run_id
diff --git a/.github/workflows/deprecated/smoke_test_cross_silo_fedavg_defense_linux.yml b/.github/workflows/deprecated/smoke_test_cross_silo_fedavg_defense_linux.yml
index fac19d9552..b9348d7bf2 100644
--- a/.github/workflows/deprecated/smoke_test_cross_silo_fedavg_defense_linux.yml
+++ b/.github/workflows/deprecated/smoke_test_cross_silo_fedavg_defense_linux.yml
@@ -53,13 +53,16 @@ jobs:
           echo ${{ steps.extract_branch.outputs.branch }}
           if [[ ${{ steps.extract_branch.outputs.branch }} == "master" ]]; then
             echo "running on master"
-            path=/home/actions-runner/fedml-master
+            path=/home/fedml/FedML
             cd $path
+            git pull
             echo "dir=$path" >> $GITHUB_OUTPUT
           else
             echo "running on dev"
-            path=/home/actions-runner/fedml-dev
+            path=/home/fedml/FedML
             cd $path
+            git pull
+            git checkout ${{ steps.extract_branch.outputs.branch }}
             echo "dir=$path" >> $GITHUB_OUTPUT
           fi
       - name: sync git repo to local pip
@@ -68,13 +71,13 @@ jobs:
           homepath=${{ format('{0}', steps.fedml_source_code_home.outputs.dir) }}
           echo $Homepath
           cd $homepath
-          bash ./devops/scripts/sync-fedml-pip.sh
+          # bash ./devops/scripts/sync-fedml-pip.sh
 
       - name: server - cross-silo - defense
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
           cd python
-          cd examples/security/mqtt_s3_fedavg_defense_mnist_lr_example
+          cd examples/federate/security/mqtt_s3_fedavg_defense_mnist_lr_example
           run_id=cross-silo-defense-${{ format('{0}{1}{2}{3}', github.run_id, matrix.os, matrix.arch, matrix.python-version) }}
           echo ${run_id}
           bash run_server.sh $run_id
@@ -84,7 +87,7 @@ jobs:
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
           cd python
-          cd examples/security/mqtt_s3_fedavg_defense_mnist_lr_example
+          cd examples/federate/security/mqtt_s3_fedavg_defense_mnist_lr_example
           run_id=cross-silo-defense-${{ format('{0}{1}{2}{3}', github.run_id, matrix.os, matrix.arch, matrix.python-version) }}
           echo ${run_id}
           bash run_client.sh 1 $run_id
@@ -94,7 +97,7 @@ jobs:
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
           cd python
-          cd examples/security/mqtt_s3_fedavg_defense_mnist_lr_example
+          cd examples/federate/security/mqtt_s3_fedavg_defense_mnist_lr_example
           run_id=cross-silo-defense-${{ format('{0}{1}{2}{3}', github.run_id, matrix.os, matrix.arch, matrix.python-version) }}
           echo ${run_id}
           bash run_client.sh 2 $run_id
@@ -104,7 +107,7 @@ jobs:
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
           cd python
-          cd examples/security/mqtt_s3_fedavg_defense_mnist_lr_example
+          cd examples/federate/security/mqtt_s3_fedavg_defense_mnist_lr_example
           run_id=cross-silo-defense-${{ format('{0}{1}{2}{3}', github.run_id, matrix.os, matrix.arch, matrix.python-version) }}
           echo ${run_id}
           bash run_client.sh 3 $run_id
@@ -114,7 +117,7 @@ jobs:
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
           cd python
-          cd examples/security/mqtt_s3_fedavg_defense_mnist_lr_example
+          cd examples/federate/security/mqtt_s3_fedavg_defense_mnist_lr_example
           run_id=cross-silo-defense-${{ format('{0}{1}{2}{3}', github.run_id, matrix.os, matrix.arch, matrix.python-version) }}
           echo ${run_id}
           bash run_client.sh 4 $run_id
diff --git a/.github/workflows/deprecated/smoke_test_cross_silo_fedavg_ldp_linux.yml b/.github/workflows/deprecated/smoke_test_cross_silo_fedavg_ldp_linux.yml
index def8aca733..f849c4db71 100644
--- a/.github/workflows/deprecated/smoke_test_cross_silo_fedavg_ldp_linux.yml
+++ b/.github/workflows/deprecated/smoke_test_cross_silo_fedavg_ldp_linux.yml
@@ -53,13 +53,16 @@ jobs:
           echo ${{ steps.extract_branch.outputs.branch }}
           if [[ ${{ steps.extract_branch.outputs.branch }} == "master" ]]; then
             echo "running on master"
-            path=/home/actions-runner/fedml-master
+            path=/home/fedml/FedML
             cd $path
+            git pull
             echo "dir=$path" >> $GITHUB_OUTPUT
           else
             echo "running on dev"
-            path=/home/actions-runner/fedml-dev
+            path=/home/fedml/FedML
             cd $path
+            git pull
+            git checkout ${{ steps.extract_branch.outputs.branch }}
             echo "dir=$path" >> $GITHUB_OUTPUT
           fi
       - name: sync git repo to local pip
@@ -68,13 +71,13 @@ jobs:
           homepath=${{ format('{0}', steps.fedml_source_code_home.outputs.dir) }}
           echo $Homepath
           cd $homepath
-          bash ./devops/scripts/sync-fedml-pip.sh
+          # bash ./devops/scripts/sync-fedml-pip.sh
 
       - name: server - cross-silo - ldp
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
           cd python
-          cd examples/privacy/mqtt_s3_fedavg_ldp_mnist_lr_example
+          cd examples/federate/privacy/mqtt_s3_fedavg_ldp_mnist_lr_example
           run_id=cross-silo-ho-${{ format('{0}{1}{2}{3}', github.run_id, matrix.os, matrix.arch, matrix.python-version) }}
           echo ${run_id}
           bash run_server.sh $run_id
@@ -84,7 +87,7 @@ jobs:
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
           cd python
-          cd examples/privacy/mqtt_s3_fedavg_ldp_mnist_lr_example
+          cd examples/federate/privacy/mqtt_s3_fedavg_ldp_mnist_lr_example
           run_id=cross-silo-ho-${{ format('{0}{1}{2}{3}', github.run_id, matrix.os, matrix.arch, matrix.python-version) }}
           echo ${run_id}
           bash run_client.sh 1 $run_id
@@ -94,7 +97,7 @@ jobs:
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
           cd python
-          cd examples/privacy/mqtt_s3_fedavg_ldp_mnist_lr_example
+          cd examples/federate/privacy/mqtt_s3_fedavg_ldp_mnist_lr_example
           run_id=cross-silo-ho-${{ format('{0}{1}{2}{3}', github.run_id, matrix.os, matrix.arch, matrix.python-version) }}
           echo ${run_id}
           bash run_client.sh 2 $run_id
diff --git a/.github/workflows/deprecated/smoke_test_cross_silo_ho_linux.yml b/.github/workflows/deprecated/smoke_test_cross_silo_ho_linux.yml
index e34a22cdbe..7d28a37292 100644
--- a/.github/workflows/deprecated/smoke_test_cross_silo_ho_linux.yml
+++ b/.github/workflows/deprecated/smoke_test_cross_silo_ho_linux.yml
@@ -53,13 +53,16 @@ jobs:
           echo ${{ steps.extract_branch.outputs.branch }}
           if [[ ${{ steps.extract_branch.outputs.branch }} == "master" ]]; then
             echo "running on master"
-            path=/home/actions-runner/fedml-master
+            path=/home/fedml/FedML
             cd $path
+            git pull
             echo "dir=$path" >> $GITHUB_OUTPUT
           else
             echo "running on dev"
-            path=/home/actions-runner/fedml-dev
+            path=/home/fedml/FedML
             cd $path
+            git pull
+            git checkout ${{ steps.extract_branch.outputs.branch }}
             echo "dir=$path" >> $GITHUB_OUTPUT
           fi
       - name: sync git repo to local pip
@@ -68,13 +71,13 @@ jobs:
           homepath=${{ format('{0}', steps.fedml_source_code_home.outputs.dir) }}
           echo $Homepath
           cd $homepath
-          bash ./devops/scripts/sync-fedml-pip.sh
+          # bash ./devops/scripts/sync-fedml-pip.sh
 
       - name: server - cross-silo - ho
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
           cd python
-          cd quick_start/octopus
+          cd examples/federate/quick_start/octopus
           run_id=cross-silo-ho-${{ format('{0}{1}{2}{3}', github.run_id, matrix.os, matrix.arch, matrix.python-version) }}
           echo ${run_id}
           bash run_server.sh $run_id
@@ -84,7 +87,7 @@ jobs:
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
           cd python
-          cd quick_start/octopus
+          cd examples/federate/quick_start/octopus
           run_id=cross-silo-ho-${{ format('{0}{1}{2}{3}', github.run_id, matrix.os, matrix.arch, matrix.python-version) }}
           echo ${run_id}
           bash run_client.sh 1 $run_id
@@ -94,7 +97,7 @@ jobs:
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
           cd python
-          cd quick_start/octopus
+          cd examples/federate/quick_start/octopus
           run_id=cross-silo-ho-${{ format('{0}{1}{2}{3}', github.run_id, matrix.os, matrix.arch, matrix.python-version) }}
           echo ${run_id}
           bash run_client.sh 2 $run_id
diff --git a/.github/workflows/deprecated/smoke_test_cross_silo_ho_win.yml b/.github/workflows/deprecated/smoke_test_cross_silo_ho_win.yml
index b8376438d7..d9239bcb99 100644
--- a/.github/workflows/deprecated/smoke_test_cross_silo_ho_win.yml
+++ b/.github/workflows/deprecated/smoke_test_cross_silo_ho_win.yml
@@ -52,13 +52,16 @@ jobs:
           echo ${{ steps.extract_branch.outputs.branch }}
           if [[ ${{ steps.extract_branch.outputs.branch }} == "master" ]]; then
             echo "running on master"
-            path=/home/actions-runner/fedml-master
+            path=/home/fedml/FedML
             cd $path
+            git pull
             echo "dir=$path" >> $GITHUB_OUTPUT
           else
             echo "running on dev"
-            path=/home/actions-runner/fedml-dev
+            path=/home/fedml/FedML
             cd $path
+            git pull
+            git checkout ${{ steps.extract_branch.outputs.branch }}
             echo "dir=$path" >> $GITHUB_OUTPUT
           fi
       - name: sync git repo to local pip
@@ -67,25 +70,25 @@ jobs:
           homepath=${{ format('{0}', steps.fedml_source_code_home.outputs.dir) }}
           echo $Homepath
           cd $homepath
-          bash ./devops/scripts/sync-fedml-pip.sh
+          # bash ./devops/scripts/sync-fedml-pip.sh
 
       - name: server - cross-silo - ho
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
-          cd quick_start/octopus
+          cd examples/federate/quick_start/octopus
           .\run_server.bat ${{ format('{0}{1}{2}{3}', github.run_id, matrix.os, matrix.arch, matrix.python-version) }}
         if:   ${{ matrix.client-index == '0' }}
 
       - name: client 1 - cross-silo - ho
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
-          cd quick_start/octopus
+          cd examples/federate/quick_start/octopus
           .\run_client.bat 1 ${{ format('{0}{1}{2}{3}', github.run_id, matrix.os, matrix.arch, matrix.python-version) }}
         if:   ${{ matrix.client-index == '1' }}
 
       - name: client 2 - cross-silo - ho
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
-          cd quick_start/octopus
+          cd examples/federate/quick_start/octopus
           .\run_client.bat 2 ${{ format('{0}{1}{2}{3}', github.run_id, matrix.os, matrix.arch, matrix.python-version) }}
         if: ${{ matrix.client-index == '2' }}
\ No newline at end of file
diff --git a/.github/workflows/deprecated/smoke_test_cross_silo_lightsecagg_linux.yml b/.github/workflows/deprecated/smoke_test_cross_silo_lightsecagg_linux.yml
index d672e2a772..ae06088dc7 100644
--- a/.github/workflows/deprecated/smoke_test_cross_silo_lightsecagg_linux.yml
+++ b/.github/workflows/deprecated/smoke_test_cross_silo_lightsecagg_linux.yml
@@ -53,13 +53,16 @@ jobs:
           echo ${{ steps.extract_branch.outputs.branch }}
           if [[ ${{ steps.extract_branch.outputs.branch }} == "master" ]]; then
             echo "running on master"
-            path=/home/actions-runner/fedml-master
+            path=/home/fedml/FedML
             cd $path
+            git pull
             echo "dir=$path" >> $GITHUB_OUTPUT
           else
             echo "running on dev"
-            path=/home/actions-runner/fedml-dev
+            path=/home/fedml/FedML
             cd $path
+            git pull
+            git checkout ${{ steps.extract_branch.outputs.branch }}
             echo "dir=$path" >> $GITHUB_OUTPUT
           fi
       - name: sync git repo to local pip
@@ -68,13 +71,13 @@ jobs:
           homepath=${{ format('{0}', steps.fedml_source_code_home.outputs.dir) }}
           echo $Homepath
           cd $homepath
-          bash ./devops/scripts/sync-fedml-pip.sh
+          # bash ./devops/scripts/sync-fedml-pip.sh
 
       - name: server - cross-silo - lightsecagg
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
           cd python
-          cd examples/cross_silo/light_sec_agg_example
+          cd examples/federate/cross_silo/light_sec_agg_example
           run_id=cross-silo-lightsecagg-${{ format('{0}{1}{2}{3}', github.run_id, matrix.os, matrix.arch, matrix.python-version) }}
           echo ${run_id}
           bash run_server.sh $run_id
@@ -84,7 +87,7 @@ jobs:
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
           cd python
-          cd examples/cross_silo/light_sec_agg_example
+          cd examples/federate/cross_silo/light_sec_agg_example
           run_id=cross-silo-lightsecagg-${{ format('{0}{1}{2}{3}', github.run_id, matrix.os, matrix.arch, matrix.python-version) }}
           echo ${run_id}
           bash run_client.sh 1 $run_id
@@ -94,7 +97,7 @@ jobs:
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
           cd python
-          cd examples/cross_silo/light_sec_agg_example
+          cd examples/federate/cross_silo/light_sec_agg_example
           run_id=cross-silo-lightsecagg-${{ format('{0}{1}{2}{3}', github.run_id, matrix.os, matrix.arch, matrix.python-version) }}
           echo ${run_id}
           bash run_client.sh 2 $run_id
diff --git a/.github/workflows/deprecated/smoke_test_cross_silo_lightsecagg_win.yml b/.github/workflows/deprecated/smoke_test_cross_silo_lightsecagg_win.yml
index 8deab9acb2..40d15a1f0f 100644
--- a/.github/workflows/deprecated/smoke_test_cross_silo_lightsecagg_win.yml
+++ b/.github/workflows/deprecated/smoke_test_cross_silo_lightsecagg_win.yml
@@ -52,13 +52,16 @@ jobs:
           echo ${{ steps.extract_branch.outputs.branch }}
           if [[ ${{ steps.extract_branch.outputs.branch }} == "master" ]]; then
             echo "running on master"
-            path=/home/actions-runner/fedml-master
+            path=/home/fedml/FedML
             cd $path
+            git pull
             echo "dir=$path" >> $GITHUB_OUTPUT
           else
             echo "running on dev"
-            path=/home/actions-runner/fedml-dev
+            path=/home/fedml/FedML
             cd $path
+            git pull
+            git checkout ${{ steps.extract_branch.outputs.branch }}
             echo "dir=$path" >> $GITHUB_OUTPUT
           fi
       - name: sync git repo to local pip
@@ -67,25 +70,25 @@ jobs:
           homepath=${{ format('{0}', steps.fedml_source_code_home.outputs.dir) }}
           echo $Homepath
           cd $homepath
-          bash ./devops/scripts/sync-fedml-pip.sh
+          # bash ./devops/scripts/sync-fedml-pip.sh
 
       - name: server - cross-silo - ho
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
-          cd examples/cross_silo/light_sec_agg_example
+          cd examples/federate/cross_silo/light_sec_agg_example
           .\run_server.bat cross-silo-lightsecagg-${{ format('{0}{1}{2}{3}', github.run_id, matrix.os, matrix.arch, matrix.python-version) }}
         if:   ${{ matrix.client-index == '0' }}
 
       - name: client 1 - cross-silo - ho
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
-          cd examples/cross_silo/light_sec_agg_example
+          cd examples/federate/cross_silo/light_sec_agg_example
           .\run_client.bat 1 cross-silo-lightsecagg-${{ format('{0}{1}{2}{3}', github.run_id, matrix.os, matrix.arch, matrix.python-version) }}
         if:   ${{ matrix.client-index == '1' }}
 
       - name: client 2 - cross-silo - lightsecagg
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
-          cd examples/cross_silo/light_sec_agg_example
+          cd examples/federate/cross_silo/light_sec_agg_example
           .\run_client.bat 2 cross-silo-lightsecagg-${{ format('{0}{1}{2}{3}', github.run_id, matrix.os, matrix.arch, matrix.python-version) }}
         if: ${{ matrix.client-index == '2' }}
\ No newline at end of file
diff --git a/.github/workflows/deprecated/smoke_test_flow_linux.yml b/.github/workflows/deprecated/smoke_test_flow_linux.yml
index df876a632b..5293787a11 100644
--- a/.github/workflows/deprecated/smoke_test_flow_linux.yml
+++ b/.github/workflows/deprecated/smoke_test_flow_linux.yml
@@ -43,13 +43,16 @@ jobs:
           echo ${{ steps.extract_branch.outputs.branch }}
           if [[ ${{ steps.extract_branch.outputs.branch }} == "master" ]]; then
             echo "running on master"
-            path=/home/actions-runner/fedml-master
+            path=/home/fedml/FedML
             cd $path
+            git pull
             echo "dir=$path" >> $GITHUB_OUTPUT
           else
             echo "running on dev"
-            path=/home/actions-runner/fedml-dev
+            path=/home/fedml/FedML
             cd $path
+            git pull
+            git checkout ${{ steps.extract_branch.outputs.branch }}
             echo "dir=$path" >> $GITHUB_OUTPUT
           fi
       - name: sync git repo to local pip
@@ -58,7 +61,7 @@ jobs:
           homepath=${{ format('{0}', steps.fedml_source_code_home.outputs.dir) }}
           echo $Homepath
           cd $homepath
-          bash ./devops/scripts/sync-fedml-pip.sh
+          # bash ./devops/scripts/sync-fedml-pip.sh
 
       - name: server - Flow
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
diff --git a/.github/workflows/deprecated/smoke_test_ml_engines_linux_jax.yml b/.github/workflows/deprecated/smoke_test_ml_engines_linux_jax.yml
index 42a6d25ead..cd4bd8d720 100644
--- a/.github/workflows/deprecated/smoke_test_ml_engines_linux_jax.yml
+++ b/.github/workflows/deprecated/smoke_test_ml_engines_linux_jax.yml
@@ -53,13 +53,16 @@ jobs:
           echo ${{ steps.extract_branch.outputs.branch }}
           if [[ ${{ steps.extract_branch.outputs.branch }} == "master" ]]; then
             echo "running on master"
-            path=/home/actions-runner/fedml-master
+            path=/home/fedml/FedML
             cd $path
+            git pull
             echo "dir=$path" >> $GITHUB_OUTPUT
           else
             echo "running on dev"
-            path=/home/actions-runner/fedml-dev
+            path=/home/fedml/FedML
             cd $path
+            git pull
+            git checkout ${{ steps.extract_branch.outputs.branch }}
             echo "dir=$path" >> $GITHUB_OUTPUT
           fi
       - name: sync git repo to local pip
@@ -68,14 +71,14 @@ jobs:
           homepath=${{ format('{0}', steps.fedml_source_code_home.outputs.dir) }}
           echo $Homepath
           cd $homepath
-          bash ./devops/scripts/sync-fedml-pip.sh
+          # bash ./devops/scripts/sync-fedml-pip.sh
           cd $homepath/python
 
       - name: server - jax - fedavg
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
           cd python
-          cd examples/cross_silo/jax_haiku_mqtt_s3_fedavg_mnist_lr_example
+          cd examples/federate/cross_silo/jax_haiku_mqtt_s3_fedavg_mnist_lr_example
           run_id=jax-ml-engine-${{ format('{0}{1}{2}{3}', github.run_id, matrix.os, matrix.arch, matrix.python-version) }}
           echo ${run_id}
           bash run_server.sh $run_id
@@ -85,7 +88,7 @@ jobs:
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
           cd python
-          cd examples/cross_silo/jax_haiku_mqtt_s3_fedavg_mnist_lr_example
+          cd examples/federate/cross_silo/jax_haiku_mqtt_s3_fedavg_mnist_lr_example
           run_id=jax-ml-engine-${{ format('{0}{1}{2}{3}', github.run_id, matrix.os, matrix.arch, matrix.python-version) }}
           echo ${run_id}
           bash run_client.sh 1 $run_id
@@ -95,7 +98,7 @@ jobs:
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
           cd python
-          cd examples/cross_silo/jax_haiku_mqtt_s3_fedavg_mnist_lr_example
+          cd examples/federate/cross_silo/jax_haiku_mqtt_s3_fedavg_mnist_lr_example
           run_id=jax-ml-engine-${{ format('{0}{1}{2}{3}', github.run_id, matrix.os, matrix.arch, matrix.python-version) }}
           echo ${run_id}
           bash run_client.sh 2 $run_id
diff --git a/.github/workflows/deprecated/smoke_test_ml_engines_linux_mxnet.yml b/.github/workflows/deprecated/smoke_test_ml_engines_linux_mxnet.yml
index bf30fd1b1a..5ce217ea4b 100644
--- a/.github/workflows/deprecated/smoke_test_ml_engines_linux_mxnet.yml
+++ b/.github/workflows/deprecated/smoke_test_ml_engines_linux_mxnet.yml
@@ -53,13 +53,16 @@ jobs:
           echo ${{ steps.extract_branch.outputs.branch }}
           if [[ ${{ steps.extract_branch.outputs.branch }} == "master" ]]; then
             echo "running on master"
-            path=/home/actions-runner/fedml-master
+            path=/home/fedml/FedML
             cd $path
+            git pull
             echo "dir=$path" >> $GITHUB_OUTPUT
           else
             echo "running on dev"
-            path=/home/actions-runner/fedml-dev
+            path=/home/fedml/FedML
             cd $path
+            git pull
+            git checkout ${{ steps.extract_branch.outputs.branch }}
             echo "dir=$path" >> $GITHUB_OUTPUT
           fi
       - name: sync git repo to local pip
@@ -68,7 +71,7 @@ jobs:
           homepath=${{ format('{0}', steps.fedml_source_code_home.outputs.dir) }}
           echo $Homepath
           cd $homepath
-          bash ./devops/scripts/sync-fedml-pip.sh
+          # bash ./devops/scripts/sync-fedml-pip.sh
           cd $homepath/python
           pip install mxnet==2.0.0b1
 
@@ -76,7 +79,7 @@ jobs:
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
           cd python
-          cd examples/cross_silo/mxnet_mqtt_s3_fedavg_mnist_lr_example
+          cd examples/federate/cross_silo/mxnet_mqtt_s3_fedavg_mnist_lr_example
           run_id=mxnet-ml-engine-${{ format('{0}{1}{2}{3}', github.run_id, matrix.os, matrix.arch, matrix.python-version) }}
           echo ${run_id}
           bash run_server.sh $run_id
@@ -86,7 +89,7 @@ jobs:
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
           cd python
-          cd examples/cross_silo/mxnet_mqtt_s3_fedavg_mnist_lr_example
+          cd examples/federate/cross_silo/mxnet_mqtt_s3_fedavg_mnist_lr_example
           run_id=mxnet-ml-engine-${{ format('{0}{1}{2}{3}', github.run_id, matrix.os, matrix.arch, matrix.python-version) }}
           echo ${run_id}
           bash run_client.sh 1 $run_id
@@ -96,7 +99,7 @@ jobs:
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
           cd python
-          cd examples/cross_silo/mxnet_mqtt_s3_fedavg_mnist_lr_example
+          cd examples/federate/cross_silo/mxnet_mqtt_s3_fedavg_mnist_lr_example
           run_id=mxnet-ml-engine-${{ format('{0}{1}{2}{3}', github.run_id, matrix.os, matrix.arch, matrix.python-version) }}
           echo ${run_id}
           bash run_client.sh 2 $run_id
diff --git a/.github/workflows/deprecated/smoke_test_ml_engines_linux_tf.yml b/.github/workflows/deprecated/smoke_test_ml_engines_linux_tf.yml
index 9d69ba3774..3b7519dd97 100644
--- a/.github/workflows/deprecated/smoke_test_ml_engines_linux_tf.yml
+++ b/.github/workflows/deprecated/smoke_test_ml_engines_linux_tf.yml
@@ -53,13 +53,16 @@ jobs:
           echo ${{ steps.extract_branch.outputs.branch }}
           if [[ ${{ steps.extract_branch.outputs.branch }} == "master" ]]; then
             echo "running on master"
-            path=/home/actions-runner/fedml-master
+            path=/home/fedml/FedML
             cd $path
+            git pull
             echo "dir=$path" >> $GITHUB_OUTPUT
           else
             echo "running on dev"
-            path=/home/actions-runner/fedml-dev
+            path=/home/fedml/FedML
             cd $path
+            git pull
+            git checkout ${{ steps.extract_branch.outputs.branch }}
             echo "dir=$path" >> $GITHUB_OUTPUT
           fi
       - name: sync git repo to local pip
@@ -68,14 +71,14 @@ jobs:
           homepath=${{ format('{0}', steps.fedml_source_code_home.outputs.dir) }}
           echo $Homepath
           cd $homepath
-          bash ./devops/scripts/sync-fedml-pip.sh
+          # bash ./devops/scripts/sync-fedml-pip.sh
           cd $homepath/python
 
       - name: server - tensorflow - fedavg
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
           cd python
-          cd examples/cross_silo/tf_mqtt_s3_fedavg_mnist_lr_example
+          cd examples/federate/cross_silo/tf_mqtt_s3_fedavg_mnist_lr_example
           run_id=tf-ml-engine-${{ format('{0}{1}{2}{3}', github.run_id, matrix.os, matrix.arch, matrix.python-version) }}
           echo ${run_id}
           bash run_server.sh $run_id
@@ -85,7 +88,7 @@ jobs:
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
           cd python
-          cd examples/cross_silo/tf_mqtt_s3_fedavg_mnist_lr_example
+          cd examples/federate/cross_silo/tf_mqtt_s3_fedavg_mnist_lr_example
           run_id=tf-ml-engine-${{ format('{0}{1}{2}{3}', github.run_id, matrix.os, matrix.arch, matrix.python-version) }}
           echo ${run_id}
           bash run_client.sh 1 $run_id
@@ -95,7 +98,7 @@ jobs:
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
           cd python
-          cd examples/cross_silo/tf_mqtt_s3_fedavg_mnist_lr_example
+          cd examples/federate/cross_silo/tf_mqtt_s3_fedavg_mnist_lr_example
           run_id=tf-ml-engine-${{ format('{0}{1}{2}{3}', github.run_id, matrix.os, matrix.arch, matrix.python-version) }}
           echo ${run_id}
           bash run_client.sh 2 $run_id
diff --git a/.github/workflows/deprecated/smoke_test_ml_engines_win.yml b/.github/workflows/deprecated/smoke_test_ml_engines_win.yml
index f1f3bfabd4..8913cc6bec 100644
--- a/.github/workflows/deprecated/smoke_test_ml_engines_win.yml
+++ b/.github/workflows/deprecated/smoke_test_ml_engines_win.yml
@@ -46,13 +46,16 @@ jobs:
           echo ${{ steps.extract_branch.outputs.branch }}
           if [[ ${{ steps.extract_branch.outputs.branch }} == "master" ]]; then
             echo "running on master"
-            path=/home/actions-runner/fedml-master
+            path=/home/fedml/FedML
             cd $path
+            git pull
             echo "dir=$path" >> $GITHUB_OUTPUT
           else
             echo "running on dev"
-            path=/home/actions-runner/fedml-dev
+            path=/home/fedml/FedML
             cd $path
+            git pull
+            git checkout ${{ steps.extract_branch.outputs.branch }}
             echo "dir=$path" >> $GITHUB_OUTPUT
           fi
       - name: sync git repo to local pip
@@ -61,28 +64,28 @@ jobs:
           homepath=${{ format('{0}', steps.fedml_source_code_home.outputs.dir) }}
           echo $Homepath
           cd $homepath
-          bash ./devops/scripts/sync-fedml-pip.sh
+          # bash ./devops/scripts/sync-fedml-pip.sh
           cd $homepath/python
           pip install -e '.[tensorflow]'
 
       - name: server - tensorflow - fedavg
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
-          cd examples/cross_silo/tf_mqtt_s3_fedavg_mnist_lr_example
+          cd examples/federate/cross_silo/tf_mqtt_s3_fedavg_mnist_lr_example
           python tf_server.py --cf config/fedml_config.yaml --rank 0 --role server --run_id tf-ml-engine-${{ format('{0}{1}{2}{3}', github.run_id, matrix.os, matrix.arch, matrix.python-version) }}
         if:   ${{ matrix.client-index == '0' }}
 
       - name: client 1 - tensorflow - fedavg
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
-          cd examples/cross_silo/tf_mqtt_s3_fedavg_mnist_lr_example
+          cd examples/federate/cross_silo/tf_mqtt_s3_fedavg_mnist_lr_example
           python3 tf_client.py --cf config/fedml_config.yaml --rank 1 --role client --run_id tf-ml-engine-${{ format('{0}{1}{2}{3}', github.run_id, matrix.os, matrix.arch, matrix.python-version) }}
         if:   ${{ matrix.client-index == '1' }}
 
       - name: client 2 - tensorflow - fedavg
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
-          cd examples/cross_silo/tf_mqtt_s3_fedavg_mnist_lr_example
+          cd examples/federate/cross_silo/tf_mqtt_s3_fedavg_mnist_lr_example
           python3 tf_client.py --cf config/fedml_config.yaml --rank 2 --role client --run_id tf-ml-engine-${{ format('{0}{1}{2}{3}', github.run_id, matrix.os, matrix.arch, matrix.python-version) }}
         if: ${{ matrix.client-index == '2' }}
 
@@ -138,21 +141,21 @@ jobs:
       - name: server - jax - fedavg
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
-          cd examples/cross_silo/jax_haiku_mqtt_s3_fedavg_mnist_lr_example
+          cd examples/federate/cross_silo/jax_haiku_mqtt_s3_fedavg_mnist_lr_example
           python tf_server.py --cf config/fedml_config.yaml --rank 0 --role server --run_id jax-ml-engine-${{ format('{0}{1}{2}{3}', github.run_id, matrix.os, matrix.arch, matrix.python-version) }}
         if: ${{ matrix.client-index == '0' }}
 
       - name: client 1 - jax - fedavg
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
-          cd examples/cross_silo/jax_haiku_mqtt_s3_fedavg_mnist_lr_example
+          cd examples/federate/cross_silo/jax_haiku_mqtt_s3_fedavg_mnist_lr_example
           python3 tf_client.py --cf config/fedml_config.yaml --rank 1 --role client --run_id jax-ml-engine-${{ format('{0}{1}{2}{3}', github.run_id, matrix.os, matrix.arch, matrix.python-version) }}
         if: ${{ matrix.client-index == '1' }}
 
       - name: client 2 - jax - fedavg
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
-          cd examples/cross_silo/jax_haiku_mqtt_s3_fedavg_mnist_lr_example
+          cd examples/federate/cross_silo/jax_haiku_mqtt_s3_fedavg_mnist_lr_example
           python3 tf_client.py --cf config/fedml_config.yaml --rank 2 --role client --run_id jax-ml-engine-${{ format('{0}{1}{2}{3}', github.run_id, matrix.os, matrix.arch, matrix.python-version) }}
         if: ${{ matrix.client-index == '2' }}
 
@@ -208,20 +211,20 @@ jobs:
       - name: server - mxnet - fedavg
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
-          cd examples/cross_silo/mxnet_mqtt_s3_fedavg_mnist_lr_example
+          cd examples/federate/cross_silo/mxnet_mqtt_s3_fedavg_mnist_lr_example
           python tf_server.py --cf config/fedml_config.yaml --rank 0 --role server --run_id mxnet-ml-engine-${{ format('{0}{1}{2}{3}', github.run_id, matrix.os, matrix.arch, matrix.python-version) }}
         if: ${{ matrix.client-index == '0' }}
 
       - name: client 1 - mxnet - fedavg
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
-          cd examples/cross_silo/mxnet_mqtt_s3_fedavg_mnist_lr_example
+          cd examples/federate/cross_silo/mxnet_mqtt_s3_fedavg_mnist_lr_example
           python3 tf_client.py --cf config/fedml_config.yaml --rank 1 --role client --run_id mxnet-ml-engine-${{ format('{0}{1}{2}{3}', github.run_id, matrix.os, matrix.arch, matrix.python-version) }}
         if: ${{ matrix.client-index == '1' }}
 
       - name: client 2 - mxnet - fedavg
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
-          cd examples/cross_silo/mxnet_mqtt_s3_fedavg_mnist_lr_example
+          cd examples/federate/cross_silo/mxnet_mqtt_s3_fedavg_mnist_lr_example
           python3 tf_client.py --cf config/fedml_config.yaml --rank 2 --role client --run_id mxnet-ml-engine-${{ format('{0}{1}{2}{3}', github.run_id, matrix.os, matrix.arch, matrix.python-version) }}
         if: ${{ matrix.client-index == '2' }}
diff --git a/.github/workflows/deprecated/smoke_test_pip_cli_sp_linux.yml b/.github/workflows/deprecated/smoke_test_pip_cli_sp_linux.yml
index 131d88de9b..006ecfb574 100644
--- a/.github/workflows/deprecated/smoke_test_pip_cli_sp_linux.yml
+++ b/.github/workflows/deprecated/smoke_test_pip_cli_sp_linux.yml
@@ -54,13 +54,16 @@ jobs:
           echo ${{ steps.extract_branch.outputs.branch }}
           if [[ ${{ steps.extract_branch.outputs.branch }} == "master" ]]; then
             echo "running on master"
-            path=/home/actions-runner/fedml-master
+            path=/home/fedml/FedML
             cd $path
+            git pull
             echo "dir=$path" >> $GITHUB_OUTPUT
           else
             echo "running on dev"
-            path=/home/actions-runner/fedml-dev
+            path=/home/fedml/FedML
             cd $path
+            git pull
+            git checkout ${{ steps.extract_branch.outputs.branch }}
             echo "dir=$path" >> $GITHUB_OUTPUT
           fi
       - name: sync git repo to local pip
@@ -69,20 +72,20 @@ jobs:
           homepath=${{ format('{0}', steps.fedml_source_code_home.outputs.dir) }}
           echo $Homepath
           cd $homepath
-          bash ./devops/scripts/sync-fedml-pip.sh
+          # bash ./devops/scripts/sync-fedml-pip.sh
 
-      - name: test "fedml login" and "fedml build"
-        working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
-        run: |
-          cd ${{ format('{0}', steps.fedml_source_code_home.outputs.dir) }}/python
-          cd tests/smoke_test/cli
-          bash login.sh
-          bash build.sh
+      # - name: test "fedml login" and "fedml build"
+      #   working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
+      #   run: |
+      #     cd ${{ format('{0}', steps.fedml_source_code_home.outputs.dir) }}/python
+      #     cd tests/smoke_test/cli
+      #     bash login.sh
+      #     bash build.sh
       - name: test simulation-sp
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
           cd ${{ format('{0}', steps.fedml_source_code_home.outputs.dir) }}/python
-          cd quick_start/parrot
+          cd examples/federate/quick_start/parrot
           python torch_fedavg_mnist_lr_one_line_example.py --cf fedml_config.yaml
           python torch_fedavg_mnist_lr_custum_data_and_model_example.py --cf fedml_config.yaml
 
@@ -90,40 +93,40 @@ jobs:
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
           cd ${{ format('{0}', steps.fedml_source_code_home.outputs.dir) }}/python
-          cd examples/simulation/sp_decentralized_mnist_lr_example
+          cd examples/federate/simulation/sp_decentralized_mnist_lr_example
           python torch_fedavg_mnist_lr_step_by_step_example.py --cf fedml_config.yaml
 
       - name: test sp - sp_fednova_mnist_lr_example
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
           cd ${{ format('{0}', steps.fedml_source_code_home.outputs.dir) }}/python
-          cd examples/simulation/sp_fednova_mnist_lr_example
+          cd examples/federate/simulation/sp_fednova_mnist_lr_example
           python torch_fednova_mnist_lr_step_by_step_example.py --cf fedml_config.yaml
           
       - name: test sp - sp_fedopt_mnist_lr_example
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
           cd ${{ format('{0}', steps.fedml_source_code_home.outputs.dir) }}/python
-          cd examples/simulation/sp_fedopt_mnist_lr_example
+          cd examples/federate/simulation/sp_fedopt_mnist_lr_example
           python torch_fedopt_mnist_lr_step_by_step_example.py --cf fedml_config.yaml
 
       - name: test sp - sp_hierarchicalfl_mnist_lr_example
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
           cd ${{ format('{0}', steps.fedml_source_code_home.outputs.dir) }}/python
-          cd examples/simulation/sp_hierarchicalfl_mnist_lr_example
+          cd examples/federate/simulation/sp_hierarchicalfl_mnist_lr_example
           python torch_hierarchicalfl_mnist_lr_step_by_step_example.py --cf fedml_config.yaml
 
       - name: test sp - sp_turboaggregate_mnist_lr_example
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
           cd ${{ format('{0}', steps.fedml_source_code_home.outputs.dir) }}/python
-          cd examples/simulation/sp_turboaggregate_mnist_lr_example
+          cd examples/federate/simulation/sp_turboaggregate_mnist_lr_example
           python torch_turboaggregate_mnist_lr_step_by_step_example.py --cf fedml_config.yaml 
 
       - name: test sp - sp_vertical_mnist_lr_example
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
           cd ${{ format('{0}', steps.fedml_source_code_home.outputs.dir) }}/python
-          cd examples/simulation/sp_vertical_mnist_lr_example
+          cd examples/federate/simulation/sp_vertical_mnist_lr_example
           python torch_vertical_mnist_lr_step_by_step_example.py --cf fedml_config.yaml 
diff --git a/.github/workflows/deprecated/smoke_test_pip_cli_sp_win.yml b/.github/workflows/deprecated/smoke_test_pip_cli_sp_win.yml
index 69dac083bb..3987f90f74 100644
--- a/.github/workflows/deprecated/smoke_test_pip_cli_sp_win.yml
+++ b/.github/workflows/deprecated/smoke_test_pip_cli_sp_win.yml
@@ -51,13 +51,16 @@ jobs:
           echo ${{ steps.extract_branch.outputs.branch }}
           if [[ ${{ steps.extract_branch.outputs.branch }} == "master" ]]; then
             echo "running on master"
-            path=/home/actions-runner/fedml-master
+            path=/home/fedml/FedML
             cd $path
+            git pull
             echo "dir=$path" >> $GITHUB_OUTPUT
           else
             echo "running on dev"
-            path=/home/actions-runner/fedml-dev
+            path=/home/fedml/FedML
             cd $path
+            git pull
+            git checkout ${{ steps.extract_branch.outputs.branch }}
             echo "dir=$path" >> $GITHUB_OUTPUT
           fi
       - name: sync git repo to local pip
@@ -66,7 +69,7 @@ jobs:
           homepath=${{ format('{0}', steps.fedml_source_code_home.outputs.dir) }}
           echo $Homepath
           cd $homepath
-          bash ./devops/scripts/sync-fedml-pip.sh
+          # bash ./devops/scripts/sync-fedml-pip.sh
 
       - name: test "fedml login" and "fedml build"
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
@@ -77,6 +80,6 @@ jobs:
       - name: test simulation-sp
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
-          cd quick_start/parrot
+          cd examples/federate/quick_start/parrot
           python torch_fedavg_mnist_lr_one_line_example.py --cf fedml_config.yaml
           python torch_fedavg_mnist_lr_custum_data_and_model_example.py --cf fedml_config.yaml
diff --git a/.github/workflows/deprecated/smoke_test_security.yml b/.github/workflows/deprecated/smoke_test_security.yml
index 6644a4b513..5d5c03ee38 100644
--- a/.github/workflows/deprecated/smoke_test_security.yml
+++ b/.github/workflows/deprecated/smoke_test_security.yml
@@ -54,13 +54,16 @@ jobs:
           echo ${{ steps.extract_branch.outputs.branch }}
           if [[ ${{ steps.extract_branch.outputs.branch }} == "master" ]]; then
             echo "running on master"
-            path=/home/actions-runner/fedml-master
+            path=/home/fedml/FedML
             cd $path
+            git pull
             echo "dir=$path" >> $GITHUB_OUTPUT
           else
             echo "running on dev"
-            path=/home/actions-runner/fedml-dev
+            path=/home/fedml/FedML
             cd $path
+            git pull
+            git checkout ${{ steps.extract_branch.outputs.branch }}
             echo "dir=$path" >> $GITHUB_OUTPUT
           fi
       - name: sync git repo to local pip
@@ -69,7 +72,7 @@ jobs:
           homepath=${{ format('{0}', steps.fedml_source_code_home.outputs.dir) }}
           echo $Homepath
           cd $homepath
-          bash ./devops/scripts/sync-fedml-pip.sh
+          # bash ./devops/scripts/sync-fedml-pip.sh
 
       - name: attack tests
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
diff --git a/.github/workflows/deprecated/smoke_test_simulation_mpi_linux.yml b/.github/workflows/deprecated/smoke_test_simulation_mpi_linux.yml
index c48cc43149..b2e9676ae9 100644
--- a/.github/workflows/deprecated/smoke_test_simulation_mpi_linux.yml
+++ b/.github/workflows/deprecated/smoke_test_simulation_mpi_linux.yml
@@ -40,8 +40,8 @@ jobs:
         - os: ubuntu-latest
           mpi: mpich
           install-mpi: |
-              sudo apt-get update
-              sudo apt install -y mpich libmpich-dev
+              apt-get update
+              apt install -y mpich libmpich-dev
 #        - os: ubuntu-latest
 #          mpi: openmpi
 #          install-mpi: sudo apt install -y openmpi-bin libopenmpi-dev
@@ -50,6 +50,12 @@ jobs:
       shell: bash
       run: echo "branch=$(echo ${GITHUB_REF#refs/heads/})" >>$GITHUB_OUTPUT
       id: extract_branch
+    - name: Install MPI
+      if: matrix.mpi == 'mpich'
+      run: |
+          apt-get update
+          apt-get install -y mpich libmpich-dev
+
     - id: fedml_source_code_home
       name: cd to master or dev branch and git pull
       shell: bash
@@ -57,15 +63,18 @@ jobs:
         ls
         echo ${{ steps.extract_branch.outputs.branch }}
         if [[ ${{ steps.extract_branch.outputs.branch }} == "master" ]]; then
-          echo "running on master"
-          path=/home/actions-runner/fedml-master
-          cd $path
-          echo "dir=$path" >> $GITHUB_OUTPUT
+            echo "running on master"
+            path=/home/fedml/FedML
+            cd $path
+            git pull
+            echo "dir=$path" >> $GITHUB_OUTPUT
         else
-          echo "running on dev"
-          path=/home/actions-runner/fedml-dev
-          cd $path
-          echo "dir=$path" >> $GITHUB_OUTPUT
+            echo "running on dev"
+            path=/home/fedml/FedML
+            cd $path
+            git pull
+            git checkout ${{ steps.extract_branch.outputs.branch }}
+            echo "dir=$path" >> $GITHUB_OUTPUT
         fi
     - name: sync git repo to local pip
       working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
@@ -73,47 +82,47 @@ jobs:
         homepath=${{ format('{0}', steps.fedml_source_code_home.outputs.dir) }}
         echo $Homepath
         cd $homepath
-        bash ./devops/scripts/sync-fedml-pip.sh
+        # bash ./devops/scripts/sync-fedml-pip.sh
 
     - name: Test package - FedAvg
       working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
       run: |
         pwd
         cd python
-        cd examples/simulation/mpi_torch_fedavg_mnist_lr_example
+        cd examples/federate/simulation/mpi_torch_fedavg_mnist_lr_example
         sh run_custom_data_and_model_example.sh 4
 
     - name: Test package - Base
       working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
       run: |
         cd python
-        cd examples/simulation/mpi_base_framework_example
+        cd examples/federate/simulation/mpi_base_framework_example
         sh run.sh 4
 
     - name: Test package - Decentralized
       working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
       run: |
         cd python
-        cd examples/simulation/mpi_decentralized_fl_example
+        cd examples/federate/simulation/mpi_decentralized_fl_example
         sh run.sh 4
 
     - name: Test package - FedOPT
       working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
       run: |
         cd python
-        cd examples/simulation/mpi_fedopt_datasets_and_models_example
+        cd examples/federate/simulation/mpi_fedopt_datasets_and_models_example
         sh run_step_by_step_example.sh 4 config/mnist_lr/fedml_config.yaml
 
     - name: Test package - FedProx
       working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
       run: |
         cd python
-        cd examples/simulation/mpi_fedprox_datasets_and_models_example
+        cd examples/federate/simulation/mpi_fedprox_datasets_and_models_example
         sh run_step_by_step_example.sh 4 config/mnist_lr/fedml_config.yaml
 
     - name: Test package - FedGAN
       working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
       run: |
         cd python
-        cd examples/simulation/mpi_torch_fedgan_mnist_gan_example
+        cd examples/federate/simulation/mpi_torch_fedgan_mnist_gan_example
         sh run_step_by_step_example.sh 4
\ No newline at end of file
diff --git a/.github/workflows/image.png b/.github/workflows/image.png
new file mode 100644
index 0000000000000000000000000000000000000000..330e630c0a3f784f45b7349741a82e03855babc1
GIT binary patch
literal 389049
zcmeFZ1zc3?x-h;bVCe1^kVcS3X{1CzLQ<u>8>vAQP*Pe#K|)XjR9cY~5JBk>q(NE+
z1sr6U`7hk=eYW0n_WAzjJNMr2{;mtwtml2#ThFtedJ{K}n*}J&s%xnO5C{O=0{;Nq
z9N?!G<m><dIy!&=000C49Kr<Pfe@GkP=GN10jop!0NA&9C;-Gb1Mu%@biwD*1(x<L
z&u>p?KJ@n#groVe-(h^wqolY^K<2!Yr=O><lcyJl_z6)!Mny{pepDiOeS?pG11W}C
z<KHC#PvNT(gts@)D&e?!@_sE<Rm<}Rdg@x|)V?7?-eT?L<p!ky0Cx{R9|H{)4pTF8
z4#W!B8WI2jkO7j`w!U7<=g*%zD)aZ}xAOn^*&F@Vc3?>OsH{Jt|IYwQJ9}SS5c%_9
zavM8eTXzty1^{@jt(T7<0N@|Ra|QZ&9l_uY(D;B21mPP;u;XuV?h$PB8~pfN8Y2T$
zFwINwlaN?j`#1ssSw9%h5n$^GmO+LBVNnk|XAb}%=Kx^^cXv-a5VitgJ}`|J2p^R%
zWAmqS9RCDcTig6mrnR->AMkIufH}d6&p7*fxmjQN7WjAmxV!m-^!j%3fWIV8KAHyL
zlLn;QxT~l35ljNY8y+^gdLT>%!f0m@yWe0ak(0mX1rP?)Ls@KnRY6(;bRaBhXMIK+
zgjqmX-NQ}y=zAZ<+c-F@Yk@EY2nRX)X&Heq0|?)+_f<3eo+jVN)8Knu&{_u{RpsyT
zudIDQ=(oD2{ar5{#UGXb$;n>pd-*W3U?(FJFrEkiVB7)D#zzuC2Er$NT@8=g1GWJs
z7wn{Z6#p%akH5iD`?<k*1A7k*6%Yn-fdx4Ds2|~els?wo7lgjy4$F6P);h`y(gF6=
z&&fy=ghBeiUfNoJYd?q!tl!>4`8!Qu0rtM8M{NVsLu>8q&!~Ygh&yb>-s8d%zDMPt
zz5G;;>H_n^iM-st$p#n?=d^dzIEv>4VF}*=!|!?F#(q9VNAX}g;SOGY=Z|Ozwi6!g
zVtw{J2!ri`-vZ77s=yh51MmlL8^9BA0i54%)NO#F-`=PL)_@P-2-pL{-(!ByVe;*b
z2l#6U1OuCZ2bjk9k9;cM-r55JVEo}9vA^XN1{}V<4fvMB75D(w;0|a5ZeZ9SgzdpR
zzt?6AEP^r4e}sRp-5M;@7HqE@cptt0`|-cW{w<|4i0`@YdHsK{k%RpA{P=44XYe)g
z)$#e^OmGGGY4|ztR|&ku;4*O8-|_fcdb|&K19<azBY3lT?<}1oeSV^m4loO1^cxNB
zLHhoQ7H}T0Oi8#3TplbPEE!G*X99%b5@0>ba7D0YNibIRJBCM8`TdLh8K*xm0RHwh
zzNaG;C4>^v6Dkq%{hX6bfK2jte8189&y@VFtu}wq`}Y?7$NK;F#u=~!+o}D>*ZAH7
z*bP_}tP9o(>xMPL>HrQ{Gprug3wv<{f3IKt+t+pcqc($Y?QsRai}N38e#iIsFMh-y
zzM^HKtD>0SO7XJ~@&ox9Q1$e>;^XY-<j0{5S_FFzEf3ohLL6eEViEvwWM7Wb0KjMc
zZ*~Jh*Za4)Ycl{K-;BfIp8qZGY%>5nkp{)T;csz#G5|nH3;?ehZ2f%#ek%`t^uPnP
z1UV>ai~t+J1@M9RhyhZ-X+Q~31I_|^fDvE@Tn72o3FLDhAP@)zt^(0O9B6^lfovcT
zC<ID@a^NXY2Q&h$Kqt@x^aCTnB*;BWz&d~e_J9Kj1cHE&LZ~5(5O&CMh#*7^A_Gx?
zs6oy_3?OC@D~Kb+1L6+}g<ONgK~f;ukb95^kSCA^NGqfpG5~o8nS-oCK0&@hVNen%
z4U`qi0~LYFK$W4|P$TFis3X)H8VtP-O@!Wt7C;|CYoV>s9_R>k7P=1Ig#j>P7#-{w
zOc*8uQ-$fl%wY~NA6Pgn7M1}kfK|Yrf!}!uHVfN;?Zffm)Nl^CFv!80@C$HTxHmi;
z9uLokm%?k|FX2P*IXDWA#UsUI!4t$gg{OsQisy(Ih!>5Qfp;IT2CoB@r6s&Qe0+R5
zd|rHMP!`SbUGPKj<MHp{SK_ze58yB0?;;QgMuZ?j9-)V@M))D35m|^wh-SnfVhMpJ
zASPfZ5F=10FemUJh$P4$cu3GhFi5aWa6m{wc%1MQp&p?f;T6Ip!Xm<Fgnfidga<^F
zM7%`uL`Fm|L|2J!6IBp(5WOe*OiWD7Ni0jOPwY$_L42F|G4U(nIpTd1DiQ$_Rg#M&
z{v?ScB_u5*??^tAl9KX}Dv_F#`jIA*J|Jx;ohC(-QInk@(;~AYLz3l?)shX8eIzF&
zKTfVpewjR&JcGQ7ypMc?f{=oTLY2arBAg<JqJd(JVwaMd@+74mr8{Lj<wMGD$`vXE
z6%Umfl^xY}szR!Ess(BoH5auiwJr5E>LTh6>LnU{8eSSL8W);4nsS=AG$>jsS_xWX
z+928-+84C5bTB#|IxRXkx+J=%bfa`%=~?NO>Fw!n(pS(A(tlxKW>8|VXNYB}WEf%i
z%E-Z}#^}nJ%vi@b#RO*(VA5v_WV*}L!L-3l!+e_AmN}OBDf0viltqBWfF*>bkfoPp
zmz9lGgVl>Qi?y9~gN=?&iOq#Ajjf4om7SVhf!&!sjlG$Djf0j$nZu3aHpfd2)G?N0
zn#cT)-8=U7*a0Um=LOD4&PvWHE+Q@&E(fkOt~M?dH#@fucPMum_r!6+<1)vcj%Ob4
zI{t-+m&b%BhNq5ag_nU>i#M3JjQ2esDW4*rH(vqYFh4%O48JS?9sYg+sDPw^lR%C@
zpCD9FO3+#Gj^Kb0o{+4Nhfsmgm@tX3vT%TKnef~Rx)bM4M4qTS@lk|Z#9Sm<q)P-V
zDk<tFS|mDolIrBylaVK%o%|#wAZ9C;BQ`8fDy}Y$6t5TGmJpP%m$)l2E=et^BN-#v
zCV40&E#)IsA+;*aEo~*8D?KJdBXeFRUgni7o~(*2Qnpcc|CH1zzf(_7ZJicA?Rxs*
z=@mI%IeWPxxdnMnd29K5^0NvY3RVjF3bTsG6s;8t6z7$=l<bu5D=jPYE4wH^QvRqS
zs^X(kqw+=dlxmo2%Nf`ijWe-l-l|clT~Nzbn^xykcT_J^M`=iEglM#A;%RDYCTosq
zv1-|BJ<vj(l|CDG_N6w7wxM>m_WU`4b3W&u=|FYP>ZIzt*FCQ5p<Aa1=xOPt>P?;J
zJ@0+~xjw%BdHo#y4+bX<LJhhMsSPa+9~$l%sTd_0O<v%;;D4dbnB4fH@dM*8CTC1i
zOlD0*Ov6lj%$Ura&Fak&=Emmt&37%-EYdBOE=pdEzBqb`_fpWM?#oP<-7dedB)7D-
ztg^zhGO>DSg|XJNF0kIU(Xz?4LD{O>X4<aXDcPmjt=h}mr`Rt$$UCGstT@U$raG=V
zDLQ31Z8)nsXFG4ZXu9ONd~wxtEpf%UnYdND6S!NsH+WEaxO%+wWb+L29PkqKiuRiI
zKINV6jq=g<x$g_{z2sZ(M*}+Yz5aauQT}rQ3IRC*=s@GZsvycBkD%Tw0#|NcSq@eY
zE)Ic**oL%)9t*u1Iv1uKb}t+fZWG>y<U(FYE=6cWlwKve>U#A}q-bPH<jysdYtOE;
zUXQrG5Ty}S7EKoI6FnAlIwtQ1{D$+5w>QOaX5GZb+Qq((6OGG=!^GRhzfL%ra618)
z=#cm}Nh;~?E&N*^w?>l{lS@-5Q-V_#Qgu>m(>T&%({|D=)4MYyGV(HsGW|1WZ=bte
zpT(VZD+`nDls%H8l2e(>l6y1v%N@HrgLjqgR^+kf#pQj?cg`Qbr*W^gfTtj%5Wg^>
z@I#StQCG1{@q_zJ_v7v#mUxuRmKv14d?5Yc!9$jZNoCM7|FYFb7a#SPpDC}e5U#ja
zNmm*77<laec<qVRlhLQzPur?wsw%3FSLfDH*TmLBYOmCy>YVCk>&@y18qPMfJv;rZ
z=J|=|C5^`#b6(KCNNOT#y559q4sPCU@oL#<b!=T|vuvAeH*Ft&ssHkAhjz!SPW8_A
zF6FM~SMskKyH9mDyq0-g*CW+a+bh{y^G5Pb&0DFrwSCfk_5HH_&j#cMng$gITZhgJ
zbq;F{_l)R{42)hF9Ur?mHal)NzVgoP-S$Ml#KC05d%X9tQxsDf(=5{kGyF4^vr@A!
z=FZIZ&Ku58F4!)tFZwQGmacyw{gAQDzWiWCY^8BkeYJnhVr^;NYyDs&dXsW9_aooO
z>Mf<MUX&?nY1?NT_bL7}<L8nciJi9H^Sje~?t2GcV$qD~(tVlzuCFFvmk$Cl2+Zw6
z{=;WjZR~rTI}Yb%?PLAT7dYx`Lmj}L`KJ;9AT<L3>VD9Fu=uV2_bmnFw>~QfL%!cf
z(Lcez^%0L=fIb9J0s8Oy8=!YH1^|zX0DuQ9pZp>KoF@eUBUu0{F8;^;EeqJE{?@NN
z3gCl%i2U=1HtsgxU;Xg-t<MQY{od}MpCC5k;-K^N(-YSMP!iya5{|+koB)&(0;7cB
zUIOeO9(bUW0$#t@0)fKdc=!kcLLy=?K@9}}g}`7?I1CT(Xy5=62EGU2lz3FfPAcP5
z>suo@y=la*Cf_6AQhCx!YcPP~7Ps+<BqXAvXJBMH&cn;cFCZZ)B`qU+O7)DIx`w9K
zSwo`>#wMV9W@~5f;OOM+;_K%h5EyhN_}cZT=$IQfV^dPo(lat|XJr=@78TzwDShzp
zX;pPiZC!oCv$pn^9i3gTx?c|t4UdeDjlY|in_pO5`mnsRy0-o4^Um(x7xezuBfKC0
z>^HRj!0acyC_%iSa5xN(IKm484Fqo(B^>YANqj10eT20)HK*8B0veU%drw*kxx@`n
zv^G8iM0DH|bH}%jQ2U12{~2PD{}N_@Aoe%BCc&-+^xFl4LSc9?7z_^|54`XRK+g*w
zpMZ$q+eP&KBKdZa9bM#qTsW{02v`Oj4o86h6vTwY6#u_3+!Q#_bQd=PkiZ}yGr=eU
zMc{C5oNrv96H&~-mbQTx7MT$F$SPR>>Z)2q<zxNF7vjm!ipujnrrjgD6j{=hUx=T0
z{@A#4K=AN$9cF~DUK#<aaQaT9i&Ej^lBoN)NQey}vFGLaTY~$?iSYSb98$|QWlDj9
zSTEKU$gsW|K=bSV-$8>m*^)uZY`|VJCfJ__LdkgFw&tulf00ABl~RvBEmyL!=Kb`Y
zGH4y5v}pV^Q*P-;{^tk}fh$Te)<y3OHx;U_244ThjJCrdGis+19pEwJnYQc{!Up3Y
zmR5yUWgx$TjA|vgLE}bi3mXI(Rp_tSSBmJ#)m3#wF&(t2RlU_1Wa^fkf)<m%F~8}5
zQWXZ{{Kc|mrfg}02swQ#0UFjYjTcI<PIm3Txojo=s?^l`mE3@p_#J$K!u-C`>Rb{>
zS_387`8Y_1kw+g6xSUWpv_JfmuTO8Va9mzdRV@P7fB132X~IRP+P$ova#O-xl4n9{
zz>Dj9T*0hH^+0XWr251Oc!+aAY^qZsG|`GN>1@D+{|Un4UYJsc49Q^BZi!JugiS{5
zd2uCxv1H(vWd3i{OcIGZ6DiqhfYfbu{+;viFz4rC0vCy#@w2VeV$Ts}q(bvVDcuX*
zwW|%>Y4#(MoC}>FNDb7+U1b|mA2I-6&_m_#C*1$eXFAL}TD1w)7YX*VrLt4qnVtR-
zV^$C6I8Cax{F{p_Pb!v3bO{X^B3Le`hd$@u?RufqWBkqw`Kr_`Y^9#&Vn}QW$8xIq
z37K`H_x>N^u4cDheO1(RfXwSG@|T_cymNokb4%Y#5jSwS2a1vN@OfAOh=@N8Fa_dx
zM-7m6E_6Q?3wTUiD#(ZhMEMFb@gt~W17^(6pHw2CAHAbS1L*&{|5wsrkiX_Q4TP0u
zv--9Kkd~t+3^GkYLiHLS<oQ3~B<B)+CDMPyX8(6DDcpIojXo6Jzq<n=G;H0ag~1X9
zhHT}ANppRbXr^=(J0H9vbNRf}w~tvXy0Ze4i<b&>`jDZkqfs4<jgC;n4IX|44aX7R
zSGymz@{a%wh>ujPD8ExaG?wqz{fh>_@jxmRREIhR2}fGdW8m*{&JS$#?+8%1(+tWk
z&KKWjtA-GtIK2>iHRDko0uvim3e73)GCxxEv5ki(jTYCOpx2;eYK)$qpV5Q$vJ2(n
zANKOeaYKigf>=JVPx9;jMT7rT9)Josuj?M^V#bXyHKi$mfb};j@46%)7rr7B3{%T5
zhG*db#XE+nF8~91<=sL>b1hj(wt@ApOaN~9HzqCh@ZhSh%UH9$F{d7&@LJ5prF#qq
zEaE*tj#bZmh2)d7RSy+y7g<g=@uX-KHXP5dF|mu~`y4f=*+bba>58QwCq(weVAV2l
zKx{cVAr5#|a~}tc(PA%HbWF>jG9z}{%h1giu4&gSJEu2YAy|CmB=$-AfUoGlcw!^|
zhT<H6%EX|Ni)Kjl!yf|MQ-XU{=;II9yjKfNGKSvRlu33x?;;Daf3>f?YXxk@?&Z;<
zhuWKnQ8iuhyOZVFYENJGNx0bKIrt|KU57swT1XyWE|w}{qCmDm=0x^UVAG!BfNi56
zgBRZFZ5Ph%Hn&eAW2r5lp@Ua@z27XJy_9=)fO0A7y@H)bVclTk7M5v!*6|SE5eKx$
z;sA6k)h(wGMGOZU6%NQF$Ha-?09uV7gBLv`u=1he==Acsn8xF!-M*o>s+ZWNO=B{m
zluzGLT5FFVZN82@fu`@o@{GuF&GmHE4_70_H~IRznJUwH)5aanllO)gR9{o|{DH`Y
zKZG74DZ-DVRp+9!Jj>9Xo14NF_djv*Q@=i!qist-$ilxke3t{AFsDZnbV8AR-9&^e
zLCEXuQ-Z1S^0zZ0i{~xwwAazNFwy-$`5yvxvs?5lm*r4PC9xPu@r%xN(~fBy35*Ix
z3Cy97APn|g$rsl`O;P9aD7n$5nI55PR3Du}Gshv<^{`dU3Ui3c8m8#z<{wG(A1IZi
za6~EB^2(S-i_&iQP}HM2s%bNgjHQUv$(m~=@uMa&Nav);CZ6GAgOB9S+?sl?@M`b_
zW4<wO{%3AwhHe?)G521Hvj-Df5)L3hTyI`(#{rx@IG{uSr?82wPN3#MQF7$NNG)Cf
zGMs7{aGE9NubZcLwMh{pf=;NYjK&B)(DDu+XuJo0$X3(s=O650@n6TPh5{|(R1<N)
zby%C_=Q$jZY>xxxOMeQNy=`<q(8O?~=qgWKd*rgIaL;4XA$pWI_vKXkV`dmxRG<f<
zly)%4b3vm$i4N^(wC!-UtoThv97!F0TunxhO_X+(gb-}@;hmxr%y5!hYNxfm`d5(u
zSb&nyu9a8wb{O+;0I&=;NYGO0uq2#3D9_>vYA@lTj^tjUm@*Nw*)8#g7Q8Hg-N0$-
z)+M%kKT+&w3L3+k889&Nm&&N87Rzj$LS>8lVs1TcyzGsWRcRc%dlfU5+jQ6F+{JVp
zUuIuta*tOGcD+d_(Pt0ewH+r(?I$vij{a-Oxh#p;^hQaADUIK8a9Vha*FK@ksgT#&
z=s2a(B)AeLVl?Dmt}tUfc%tyd(`QQCI@E}S;V;H4Osu1G_uk@wzm)c#l|1GqmY|p}
zdPOq7DXpB#H5v2xSLc!;DZ|MEWly7Qo|~es0Ld14|3s_?Dq8RP{ve4z%4K<vyz9Bf
z0gi1d-6yM`C(Fk(43{3Zb${@*8J~cD@*R2+#M-s#jFOr(2$QerX0??OFMhN|rrg>}
zSiexAa!W%c=-#F1n8)k@+SHe{Ol6_4=&1){*gnDUF5!qIgCz3|kxNz0q&epsPw`J%
z#4Of*J|!^uwY(UN+C6lcX5Cn$Y#3c($<&FJ4^h|ncM<hgLH05nC?z=qC&f?t703++
z#T~Dry<w+a7ruh05=49j5&z{k>Wf*Ue@I}+gFcQytE{=YT6Mk(E*UWeS?;@c(?8PA
zhvVabTZ}lM;xi6daj4*arH59MJB|Ys*!Sryu>wgygc{gyu(K*KV@{4*_lH`SYPp7Z
zd(`ngfIq!P{XAb*jcbd`d$i$@crPD)ta7>{4{?)f(B$C$M}GH0MKik#YB=Dk=w7+7
z@T>lvtslz$v6galSI1ZS2#jptd^1(==vVio%n#2BH8pZLJDUX}b<HAX_5Z25JiF!9
z|KOj=7%xV61`4urCk}`*-0K20Nh&tzikKS?(BI43%?Uu3Emzi=7cU?aX2=v+gQADp
z*+ho3JHPJnEuU-W>f}nE5ba$nHu6w^s|PqFsH*;~i+&E5xmu3-&5$@{P9Gb!6CGde
zZ~QD{uw)x=ck3acTmbZOC}JDd&Q4XWzD<6;g6%_xgA;M1JLPl)l~g?f9V1f!;*|Oq
z(okVJ_oZ_yhMh)|xSY3jBK6W`bG7A0166LFrH@2rfu*nD5#B>wqZR0b`K_zrOqW(i
z6;3pUIPYG<0ggt|T7hl?WM}zd8LXNGZ}&(^oB!eqm0^^Q%w6;;iBn(VPh1{-JwNB3
zoFbi;b&0j54H~{CEid>=ajv2XAKCAK#K1t+c;kNB-PU^>H~<w9q>T+#{3O(jxxoJC
zfWuU%0m?nIeFS~a*Kc+1gXSXj6<tfar#EixThT4u4p?|`#U^}A5t=n(VUY#lppyz@
zU|Xh4-6uM75=8#ewisgs7et7I7S3VEQ5g^LA2>g^l@m@$7;}Bl&Sje`ih#3SHzD07
zU#M)?i>AL>j4m_cFV&1xc87aP+#^?aFLYV1{Qysl7&IA|IQ1jrmc1dl3q83W%>DGl
z-NZP5m$#S2nOUEw254I8?K2`^ppgQ3E4rE5gRQGA`)N;bT>a#^1d<LVc)AJ9i}m#~
z>-xj(8BA;-4$#B_J2nI58}0pj`O1tmhMYLSC{J1Mk&ep$$UZ7R^T)hmAn2lK_rhqZ
z`DpR2)wdh9wWUiG>Lnh_laZ#$SasmZPx5KlhzI%;cVY(h4jD#Xqf-|G+mi@`I>%bF
z_lia*UQ8;LMl+IFy_XuE;+cvSA5M)pHD5crr>^m{?ff}xTz6AWtq85H@?x9k`N_z3
zz4#vSD92+O1*P0r=o5^{0uCS!>T0v?oS+@jDWc7aFK507g{orWVcyky%Qf^?r~g9e
zDjC2J{0t3hP-U<efFeU;IiKSI7{(19vdtuHU`a6DbLB9J>W$a^Y0Ysid@d({=9?2i
zG!wU+{C{Bo9|BgPr_o)r<gq?87HIMFt`Gc{67Fkp2AK*)MQ~`+JbwL2LsxuQ-+Mc*
zMGC7JHy1f0hviI95AUN9lQA&tJ)L`^wMM~V`RZuKy?f0+5&380m?Y0&M`qyF>+1;$
z%ci$=T)AGNt8)`xoy}#vTksIsekqg$2jusH-Z(-L2T<x|DEkHPPDh6d!n0?hxI?sr
zAt{=~RUKj?LXQ~kdiUdiuU*xvnFDLV{uRX(6Pfzl56RE7s@m>WN{=ihF0_wTy$s~e
zcq(=`3y+Z1^$E%o6C12}7_17`L3Gp2NH-;x?@`Zxxsm)A4KwF_cJSv8h1<dXaQCh{
zTF6(U%~p_u@3{aE(H(}Vm2wxlR%1(e4V7qa-+D*m>FJ_@kn_JEIz0@*jr`Uxd~^qi
zRWi>!4AB7zz%=?}(1~0Im4}Hl_dN8rT(Z1-3Y#%gEwZT-Hm6AO{@F@6+vnL&b5=#F
z4gt@u^}XSK^I3X&vWr<=QMHj5Y{X~|4!CHtZ?dd5`luN|x=fi|-j>i~OQ`6Pn{kr0
z65d5nRmaj2izc3XaDj2(J9T)+aLtx$jhrD=?l8kT21{v4tu}W^H9zU<c5seP+J)e~
zw5VTNq++8cXU@Udiz{QuDvuY6F~xN`=T43j2h`X(k!W85^cnRsonSB)lWW-F36oY!
ziXfja^SV)$<}>KKo8`7q!dr!Dnm3H44qvB)8?6PkvV0!9zpa6}{Zesvp#%r4h8hjN
zi)&c5tjdh8Ca1-|y-YtCs%1@9Y<GFPS3EzcM|cB$To1uG-(+!EB1b*PP8w>C3Qw3j
z<eb!f@*)~0A*%kQDC(mbp;i4`Bb%n?!?fnbt3j67ndY6^;QhkAdK{24HEa2G!5s$_
zyX%rqTkR<<qqf>DfuOUPwYv<c-HgnjS2kjkEy+!t9O<tZTRBXYGh35xOp~BpxBI%j
zRwnq9Z>(E9@Vu9RdEhX`J1K2TQJCDq%Wc!FFZh{vRxh%Ve<3MxhQe=#ZuQn$M!!~w
z0C)Sbi<jNsiz=^Z79$Gl7tIgL!o@L?==9l5uCplPCx?apuE=H0JjePNw{XJ>UkN92
zZW37bo=9TzCud{D-K9dLjB78_wJ!<>kYmVaU(g$%GGaW7-}jYX(M3fPAG<l}+|5;r
z|6+39B)U@0<UsI5R>>KMXK`vn6@~iUiuR`Wy@0i|f`Dwd9Lao{wjgms-2p$c8;WW^
zY(}oS?amo1ehO!bIrOqTl<c~cxly*Q*OHM(;3g;iXg$OH(q8NrPScMSRQM-*#Cy5f
z9QsYjBaLpI>}Kq^(${u+VOwyocAGb;I!fHxJop@ZO}$g?rrVsAm-q*P4E-_K-RSg3
zi>!%`dzpFaHHY^x!v6ConVm%|(!NC6edd%}_xo^Z^;$!KBNu`zo~iFJza}pNd-Cz1
zoIJ`=yEgM>%5>+wopr@0oi_GX`c0Z6JttLUWLkSjDEyn9R{WkPO(eC6x<>L0*<rga
z?kT|4VSa6T;(^Rk%K<@M=BqfsnUs9b`zboaXPVZXXYyrowWnRSZK39VI-N3So94U6
zPw=)Q6)xsp)6UN`+{6LU)!@4^v*j?gQM=ET8$!b6%b|<a$!8wE%&pbc;FY`U3ifuL
zlNhA0g_Ew2iAtTQkzn$ZW2>{|%1ykk)_83V&-5_IbN7w*Y+kg8oT^}7!1g(KO6{_f
z@fA9%mtWOs$Ql8yH=zRuHkOv?uFh#Pw08cTFICdH`ZqOu>&|h;2<c8kn{;Im*Scxb
zY`18pV^S^#c`VEZV)$obZBDXJ%*}*Fng|L-8#deEzaJgiSoYqRx~0?}_U=F<=)>n$
zcEoO&ty&h>ix-p=7TviEo4l@Kw&?32vAJNUWC5|@8HWQ%6rB<1U=KOuL+59SQFFyw
zg^^PniJIw%G%1?88=DtAOacg43860(whmMmG&TaMCt%)XPL%g|c|)E%m)A6OYS%Qs
zSu<V8U0gjLB4%Q1vUdqhIeSxqbt0&*BU`PurqtEGSgkS2A(4SLi}C_Dp3k_D1#9Oi
z-<`Y|1#*lO>h@@+Qos9V;Wm_Ei>uWxg|qmavgxfSxw!=qWWj!2eMoTpsEZudgBtB=
z3+Eo$^b6BdGPx1goifD}<9kZZZAp2eSrG7%`X~c8>Ay>fZkQ!!4NAIzYL4<Z-6ftn
zMyEw$C*i|oMIY75siuC+EHgrLIoYf)d!yKPp?Q*gZsuwz`)mRo==V?1-c9$GDqL#J
z=zrwaz4+YyxCR#o)7QYclD5j|Q7i+h`Nk}2<a%(yJF|;%=~v+ewp3DwEsg;neJ=|h
z_~L+W`F_O>KE<6g>AQOEmHWKYpfB+FQYQAsSs%}IsCgctOWvn&aIo$Y$XPq1=^~pN
z$Ojk|^w1pg+T+W@O?RFR%dl)VJojOo9DZaGkoZmx(?$7X7RSB2;DE>6OqU)(sKvo)
zz597pnd6!cDQ{KmljI>zSpmz}`Vfdy!ejSDY**hH?zy5>k#mG{{O<V{@19(How(0s
zS<4k6_1b<e>uLTiU2-l4Zj-*eCmJ^;L$vc~Fyg~mmxWJaM<}LJS#mI0>0#$@71;tT
zcgrocT=J?F+epRR3OkUfl!=RK+#&OIu}WWbmjVfTiL)phfRE?t6;DE&91m||<jXF*
zC<vgFAE+3}E5$h#Eg5Su_Ev3Pz7BirD94g?QKeb1{_9HU%si{OWz2l$%cL7@%vlSK
zD31p3kQ$nl3tLl!++w%NlIsDVqGcG*3mmX_d%Gs@Wpe}@*>+PdkhpGObmtU91)N$a
zIXOk0(Soh)+WQO55nuf?Di6+JE)V+i8PC7ntuf`f*|gqud~eKfLz!djts=c&nB|<r
z1P*A;nROia=viJ<E~S@sAzX{|{X^5dEQZd-)X!&5uA6`CUJ2{jcGcJ>Yd8=zHw*IN
zt-s^<=7gC|l6UfKf{imYYCS@l0(qScO*@+hk<&g9U%N|DiwaDtc4a1YhCQkajyy4$
z(2=<GOy;DOlvGN6ODnfSQ(N4;NR$Eth6B}gBh<preKSYz@l9I#&=k#n-q*3RGC=AI
zq!Nbio@;&=*L9g{IMmy|!g(T~^s$zZp{X9z8@WksAs_Qa`T&_%0+nkm##%f4d6&>!
zZQ%?kljw2@ass7W&feF!EiA%ufaun=Wy*fvH1~F*tF|&|<q|;lv;v)x!l=-k18SG>
zW6fya2No_V-6!tYhAfV6!Cd&ye-dD^OXwvNQ{qkB-Oad$1CBE-V_U(e2GLuWH<@(7
zH8lFB*?2ViO8@0|P~O$%MSSFXBj`^uR=QPEqogqreTRWyPf}IVdpwJsFnoyrzSe;*
z8Y=H@?Jj$7FzQu3k+tX@6;Dx3_N8}qr|Z1qmt&tF=$m5uMw{@AQSrfY3S8pq!kJp1
z(i>mQG>Vls_tbfKUG$#|88z9|DlkK5?xmolV<f5ChAz|&3G?s;G-t|VwrdB$;UXEC
z3a)ujVI;uzLeZ&nNtCTt6I<6S%r0w(w4{2ZaNWG;R?(hj)m`u7bLxcaLwOQQ!zsB-
zCmu*|$kIFmdw|t7iFcK6rF4qasV=cR;l-CP3bAD|d5nE+`AMk}9M$%m`D!6=iC!<^
zO$>*)cF)CMRt>bp+pJ3+)U*!zXq4Y2duA4r+V6W==YTKN7^PF!rtL}Un*AmtHRPU-
zPkd$|Wy@uLJLi{WFQSbYiBiWa<!`=OXTSY5mmV|Iw-36O!ZzU}O$?cx$ZGOvIbl!H
zx*UT+j#z09_T2ZEXfM*~e-<U~W_7ikyS|45z`=IcL*K4!YRT!;;-ZSW)0!%H&))G6
zEG+lX^a<tXdU=;`w_|nJ=b8__hCs(^zTD#lUcX*U9@SQ;|A7>Gx%KPlTm{-h`&q17
za!zl`I~$vdX=~vZXP?$@My=Kp?dXkHVl_8r_GlV#KtH&f;jlPV_6n+`%O_N4fYVt5
z$z(R|e5y#PmLyX&%`F8PZ*xZf>NBo>lL~AH@{@)GsAG%!uvcmML#0(M^75L=JmfTH
z1y3Urm|zlRg)ZLj^bozJ-4bfsT!pYFC=+JyTsv?FkK<n5sGFR=KdQMS`1Jm>fHIoF
z7K~kVNI*MDuGmM&)fJyp*K`k)Fe3NSLoJpRjlyO5Av&59FK3q{jSb2_r7H>oL0rY5
za|`%m7?H4&P_}J>EFP3crEE>kEz%crH(y=oBUqv{>Qii#?K(Slg}VPBa}Q*h6ulxG
zFz!Kq$kR|k--4AmOmYvU&o@VB&*-?@uqup*Xs-rR$u^j#X*Z05_VBGHH(P_`RQP+#
zPg*;92c)0`7mZ13&*6X=q`esiZ?2(HO(X`tj3>)s%qC>Tz_Xg+e9s5|G>U`I^%Mk>
zY2j3NmqQM;#+^?&ZcJ91IvH<IIHS6(IAXaeJD(grg+fbVw2SOj*c=oOGO{+1ox**G
zu9-Ao=*9fxVP%ij#iZNVgDCbBvR%*@fqTir)t4;x+_&+*#CUQv58ZLgU@R{-E3jtW
z;R|?{fxy$0+l8~aF3b>P-#+t9P;{)?rdGbvceB8G@l&=D-KP#ualr}(((<K9<*9Cl
ztycD!%d=ljgVfkFwM9Q)h|?p?vTRF$mlG>|yl^6-Y9^PplP9&b&ocNk^Bs+L1;k-d
zs5Wx@xmu3N<BX511tw7!DY;uF-!t5fN)h1;>(@iv<8uC5xhJtL5+$h^<FgWMUqp9m
zG>`wl>l~s1ad>gj?g_J3Ozo_9{QNT=%XyKFvF+eb2W^*yo;|5g)w$i46dK<1A_hKr
zkt&7}9{PxIha=J=vvG&ES}L3=$Z;=x45Qvmr9j_q^JNd!C{xX*8Phx4P<!m0u2DMa
z6Q80)wcuvZHZK}Bf7Jl>EM2o?HL1uMwC{nTleuD>@Wr)f_AgN#{E60H1%eT3y+Yd+
zW0V!9{u4^?1NpgjO1hAr_-78eaKJ#E{&9!cb9+I}BfR~FtI5cRdif<|Cm;V$T!&;J
zJ*9ZoT>YSARf$b-R%D-MLm>sZ(pmYoZ$hNagiZFa$f9k}jq1bJxhXm==Hlp4v*aLS
zUSE!jkknif>g+>m6g?(3fL%**$JXR!lYE9I(j7@ErvWd2h$W?dfN8C`EpHY#J~W`f
z+p|r*XRz&__-Nt4x4ky{?PkFB<6?3*r2y!Xvg+EaE?Ddq(ou0=3I_IqB`puh%=ZuW
zZld1wwN^5@OpM+n^dt*a8TwdWaqN6)@+I~hA_}}ql}oRR;vb8NJgCh7F>8bHiR*O=
zdWX3fWfZa6K+Sb@J8e<s)A#D`;Y0S41C;Rn+S?0W&C<4!t~;macrgO38d?q8>}{t7
zHQg%g4&;U`%xfM5UUzgQq-5BfSxt1E#R2aEwa~{)SC}R-7s8I682oV3{jk`Vg?wpa
zzZT)o?N}kWgP7}`A#&ST?pL6<bWWM<?3}j<JGCaeCqAeBwD8HrP1xgNBhFB*g@y`6
z2gM0;EcIyyZcmb+d&lq0#>v=pec2Y+QWHv{Sj|hPZ2^)mryp<yvF%}nZ5J$K$NEMZ
zHRK8=G}uLNG>B_9tbR~1vMs1&VfxHY?VM>@qt~UQr}?BIs8M{*!uejJ9S6Vv>u7ix
z`HapC;$#U6NLt&z8|Jwpk3t!@T^oZlZhX>mW_go&CZX|pT(~YGZC0$bZ#uk3*OL5p
z#<K5$YN#;Uw=k4@TjPdAGwJxGi+5*=@N1=l^IKy0?&!Cgyu8!(TkkC9V%qy?Fg({x
zw%KFcYEF+eHW*626y!=b02r-;*wR5Cy1EN<tB(#jM$o(y>fagO#r{dny?K3a9S5k;
zN{2>S7_PE9Q_16FWrhXHtMdU-fWQl=Xl`x(#J_?Ac2~)<5rQdW$VI|Z<XB_#S4c(O
z53d;AoT-ZnVg<z_|A&mY5<Dm=Kb0NQ4|jY5t`PHkFj*-{?8wrOXR?Zw)YYx={_2Rq
z`s?cMKs0v|qw$B%+>65@8kH+fFx=?((WfaJ#uE~8>p|+P$0IN3ubF8=t4s6ASy6ri
zeRE|n#<#Ebq)Y8*ubEAsLi^74-8^OEtROx_eLk8_gqo_G`&@8ZGSSmz7zYar$+b)2
z_hwmUbV|{I2C14UOLb>s<Aj8|%HTEt?+YVvW<{{0c}Eht7;lNW;OhUPcO!J_et1_i
z-p=|?Q_AS49rd}F;_ox!uH7jTcp$h&D2rL2#OQ;I3EHqiD7$H3SQxMArr4RgY84y=
zw=|z}<|{`*Zh0Ye1q*&GnjIAtRELeO#3X|~-Prrf;rDtL?Sgn)pYgVXJzujRjrNDD
zCF57_o=@(n|7_=MAry>x>zceh*6IlhJ+n=gGmoTFJw3B35C>B`nc$W5;lvUP`|UBX
zk~Z4;nXgREpVUQ;=EPJQEkH-q1gqHrPJSDFSof++F1c(1iuJX-L@1FsNl!*}F#p1^
zWTo^b^O6YT=VGeb5w4`$-hF3x*niA?G#qOZhg@+1dy@>!=!evwiCK^@lcKQH=(3_o
zr|A>1X2P4i^pyKz47X*Bbv4&SJD&@4ovR&?$Ps4h34FJ~kvJ{iCH(X_L4*8Tgv88W
zmo73mXS<w$+|xv#taRxsBPQG#5~L4u`|v5BJ2zMTLL|A}1j0&i9D_iugPsSe1ad<W
zVJe*d{P7jjD!G)Vlg~74v=pWgO2GUxy}z!>BXEtSNh2jx^)N4-B~))L_2pfG$hj>8
zcwMh}wTnKn=qtwM50ZUO$fq%_!eqOiIN(X%dB1}Pfv0;I6pF=Yo8k%f-h?b;j$IbU
z0r$#q0JqKe$^Jhv5)0P4XVEg6p?qFD#Lu+grb+=H(Zh$L04xGn5$st+OV2)NI$qh4
zRd8o_r*qU<>$(kjRlK@7K6T!ghV{!}N9PC9E6x#ZdmfTn?sL(D>rBpKt*s}T((Nl&
zrJwN>v*$RLDj#p`&h<#Vz_+jsAEQJ~&N)V5X;#OA^{ywhnN6SN4Jx_)x{TveaVCe=
zV<Pg3flHNt-Fo^}uP1W~!iO16<U!5G?hec6QuP|r>w5P1L(|eyQ_Mt-92?FT9lG|1
zvxOc<(YLKT=c7Ds*FL?t@b=ET;=AYk@ZP7;AEvi0=h~(Iv$~`eGRZ8tlF-Sh<~Ag)
zUyeI-$AI?n7e({)iae^+2%_$8R&|Ck`y%Iz37%B@LNkv8`RkOEUG+<~XYSGSHne;I
zN0a}GScfO*<ff64cM<jIGzB5KK-f#jz?Yjz>1o%M6a(YKT9YazXM$4O+1R(Elcl2O
zLkMb^3Adl|XL5l)`iN-&N91Xyn!l_;L;AMq{<E3A)9B-*>zB0bugIxgrI4Sz(XxXV
z(4<hA7yMOEYtZiv0+ftuSG%ewRmv=)ol*syI{-3w_n(OcOXYJscCJWNNgF#NNV@aX
zSYoy2vW}spbM{JO@q(>flkcX1f@RKq^0P7XmaRw##x#U5)Y6qZ)wxYAR@3gJ!aGqj
z)1*eR{P>+^hVH(5f2l6?-;B=nEZST~)3;4jUY|v&*H~p|BzwZo3z@)>Jw#FJeQ;6O
z770bLO(T)s_~dNWlziLd47VFV5m)#mh$T`vd}@66tL^GfTIgp*;t~J~;F(~$s|7)}
z+A(UV&G=LPQT1)AP`L74t(G;d#nf)O6djGZ=AEP)r>>(g-R?HGUTG+8^2{pK(^{d~
zJ48O|%xCZgZ&W2Pv-L8^ZZs;+O`UEJP=DJZPH*$+?6O~q>{@R~c1Hi8v{ONB@8kcD
zHHhH2faS;P^)f~UvrUf@Et*}!=aQT0ZR_C#G3SSwO$R;g1}hf<&FxaZTVy%?vsCiF
z7wCAB__y_1oAH9WI=^bXWOd=6H0j6o?Kx<jRabZ;uE(|rLoD0o>34Xdzs!A!YC67x
zXb(S8BW)aR)0=a#uuA|6D-_{~co}SUOU=*n2Xh0iO{oNj75hiPZr@4d_Jz=CJBwGH
zD7|Zbc>p?irk#l;lib$D;x#?BMDNwafHG+{)`(e?xS-5E9klHHC5KBM+KfB<ykVcz
z9HrA?C)ck}wEC{(sD**EqJJTKI@-Omp^26);9LkRsz=kExpj}&nWntSj8H_PSNs}x
zOh%_JLV29%^)~MjdzW>c(s-$3Ec>>qKuFM|o8Po0QawG8hd@z0_rN6NFKg1yDQ}xr
zmrlS?&u!fZXKp}M#tOgbh#FA0dZa&6X!ZJ4dpbwrl)Cb4vXKH?=qc1pJ1HJUdd@yw
z=x$*B%fT!0tZffkIlMo`uKlQLHGbBUA3)968Y+QSo!z?LzeW+HBD;`e;51<^asRRn
z2|Xi>J?O(zxiA#<+}O(`iq`o&O7mVx!JxroLR7cwgxt)C0RkJsHFfWZAQ;=x8jwHR
z*Pm*Zs;l?eXUThsjJ`{cE-6r++oJ0d$_V&E>=(&M;{Jj26iM<V*?X{nc9u{Ya1o-#
z42IYfRauPu!B+f?z=H>q4xshRL$>9hOfZ7%Xrpk@1wthuecvc}VnV~p4jsg?RXd;&
zg$6OJp#A4w$qCNYBnt^oYpA7TI`P$oW~3l;sTR$D#a>1Yjcr|7XvbM(1cr4Xq>ml#
zBN!)q7V~DA^IT{QJ?V?Fo=)f%B!UHsFT?PNoF%+>hEVRxgE!?a=C1E7FS>s|{xtQ~
zw8l0~t4h~%_wL<F*S~V5O8P#47-eE)(Bz=ybAO+m87;AQD*onDPFWVo;B}eT6<%ez
zzBS6StuigFd5Q0lZIa$@vKdQ-iIS{lJ=d<HVLxG0&dT0W=3LrUww`@G`^9^f_7`Wo
z)sc-)D%7_^vGv7oMjq>*eu<}Vv<DcL{&%gN`WL5#|Cqj35hExrh>+86aYIDs*T`wR
zeCp!-VnWu1Kyfy_F`1o-tVz0IjHKSO$z9J-u*~S;r%d9bXwZ$hcDeXb5bcRwM#(TL
ztPamd<ZdyI7E8>*8)hqyFW81hTR-|<#*Nbsp(WDWu(_=$EDt&q?b=bh>FxNTXhlEv
zlx7wrJw`Rs=zvN<kq`q4Wh90rl(;~ste7?{pqu9lbT$PCNQsXm$_pMXSNfB8^`kFc
zm{Z)=^~Ms_#g3=AJo*KkDx1Uz)}uT(_EB3c0bfVzq6a@hD$V87+nJ+JaC#~8y{EOl
z%itq8_D@9Ra}#I{y)8+bXwawUbngV-N2^TDYp<TElB=*wj;$DBPC_Mmou&$NrTRQ;
z65-Kwj69a2!`rd)CY_}eVZc3qJ)tM@k<DVL_r9P#a~=LkzJJmz4}7o0h$P>c-y(9t
zN~4y(MEUPr5t4)x3KY_~3F0$Y58V<x-J$?NeQCEO=Z%&VP0_GPk;^~(L4E32PiW^e
zkss;)v&jX$Yn$eqys?u?!}eK`mC2l+*Ip~J-;}ACX}i_!IhVl0s6M7`stOxsy%xK)
zslcu!JZz$He8g^K6rb$YzAK4EjLPexj~&5k=kioiKqku1|L?G=|3aP66Z|EWa)zrh
z<$(J85byKs#k%j=KMBy~6II4vkUjOe<6`*jhd%xi5Urb}$?AK1MP9yL=8kVRmDq7(
zf7-;+h}C1hd|6Kb?WyH{cAAcFsIdp`_S#E@is3O^eFraw8+!um3%DzyWEIg<CW7>0
z^7yfzx6oP`Z~>dj1aj5~x&2IWj(0ookk}bqYS;}fzFJwb?oCoIXgt7DA?cC*wRvck
zJd8)uk9lNGeYyIbDiuKX&+C}k5iY?Zz!enCg+Js;G-i3V!z9?({8I%#XG)Lx)hvw1
zFKT_~$6vhlOA>$7D!&xOkqZ8QTtOh`B!`7A*=^IskJ81fWG!TQP>Jb6yl6GqFk|^;
zf_(O}2ypiRzXV~N8jS&izM3~vOgzse6?bu^Axop|;mtdZ9npM~2hYb@9)ASpvsK(S
zSN@58$rB{A3nryqo<XIepJSt{!7WgWIAFmK*`_zY^_6Z2TvM8g0}=*aw`Z|kQ@irz
zM{CIbQ!by!A2jj@E{EX&LdCe|6$?EKnI2YJ<Ht-ak7ScqBuv0=%8yn2l<8N){%|^e
zQS48g`HRPXN#&77`lT*@DcD~o={E!W4{YTV^u(vPPMI&WwVi7}i>lq>F<UUUjbMo=
zR^IL8JL;k;6~qdte>=IzgfMVHFiXUt3iUB#BEQxq<+Z)34cV!OPY~X$C;|2#d#)A6
zCw@5%-v<PK+2&ui`IpE1%avd7`{kJaa!mieV;Yz?G+IyoqYZTpD(q<IM!Re7HJ!sU
zDk+*D4cWbrPbw{CF1B;Je5`tGKD{tu^P})>Zq#e3Zf5`GCx}vcQa35@H`=NK&3fD5
z$aY!MkmB;^hdzP%L>i?E)sE|0ygY>y@<>&1guDOyTB{e{6Y})ulgUOcu4r;SA#gQ#
z>c6!QaZ%jel*UFl=gW!^aca!n;Xm(=Kf%>i#>b829zC=M_pYgx3u0ebgVV}*9;5NS
zd<C&e|5Hvlba2%#dM3Z)!)Nbw9M1kl#}%!Wg%K{S%1w2|*8)!*uz9+R+=owt=hzS`
z?ogXwrT~xhFjU6@l`@Jurp3asHwX09Xz}o_I9}aHeeJz+#j)N|{$1lJqhfmSohzQ;
z1_6s0?6ddCmF2R;m#@}K3u5h}{_b7{L$d*0hWY!x@fQ|1B<XfkOG`21IN*gw4*Qof
z_VPij|CfTpv;(oT0*X?KTlycm4%ZawwD-rRuf5Z&;w~usY>VAxb3;xSuKP9L30sl>
zmGl<{{&!M9%iOS>FjQE~9=}dmjmG%;-xoJ<yO34T11dd|wTBBF=Dk;Tjkv?l7#~`r
z^RQ|!Ot3G&xwU>$3540b=UN5MpJd;^@ucMbavaaaSobOp;1HI^z5o|c_2ao3yY=%k
zN9YK1g$?Z#Au@v~l~8x@w)m{{c8z4+<Q}_pG4CpJ(O?L<4(_|}Pw-DKd;dR5JN`W_
zuC}QoAI87ab|s(8^W5P~(iSwdKu3z#Gr-Ig8h$NyOK-4V_;k_@Y4$x5J<ol&kr~Nz
zacU-=0=ajt%vUI$1Ls<^lhgjgUh{$9_nL=*d(D&nw%7d6K_@c9HXQIt*x7P-{b-$Z
zz~iHRNt~nU;K{|0!INR2b*}0TV3RriSlsY`yEx)MIluT{*p^3AmRM!!=G`{EL-=Us
zb;U)af#&tM<cD;8869U<Kts}-UnvakIE(WFZIeaPA<ORCg{3BZ&^&pte_i(mcYEBe
zI8Dt}Q4q_Y^7lR?t?t5BQt`!yf$X7Sg0K1%r)D0W7PVx&9LrZ3D<APMOt1t0*?G!O
z_8Z$fCq_1<#)R`@l1_`hBUYm^8D_KkN4rpr*>8e-Uo1wD>x0NcV07tc>AsMUd_}AS
z+Hxo44#a#U^~Ye21kuqZ5pB&Gze9e=2R3kpTe3Eoy225h$(I~8;hnYIrvVSf`+(fx
zx@7P2MI7Y1TmR*o5dW?-@vm)$q%_ns_m}trinT7dIPjVma*dH4o8+=(QS4<Nh)ENS
z2UVDu;vvh(4Cq!K$@Gs)_;<PZ+48>*<dt*hW+yrA9+Cn`@QBxM$6r3W^Jg&zPmuU?
z@ZTrZg1ly9J0fIeQpNME>XX{lx>c9X-BMS0v`9YM6u2$2S$x2Y)|!yFov(}ODVesg
z7=B@;VN=`WjF>D3!sOLLiIF$}Pww!>#>GyBW0E~0mb_ZS<H#BHVSWs$EjwBubHkJj
z5%;n0u+6g>{f3@3Y2cLL=PE2)@c|vOf9eVjh|Jl-N@`%_wfBU;MIg^UgT+bDU|iy&
z{vS#?^Szzo=2CemBg^rWrO57uckWfwI$VZbSXsB>i$dX7^(~L1L-sx%-jdT@%l7PW
zr=vPlm=rf9e>wF?DO_^LF*?+HwwY-%rx_o8O>Zn8#-mMKUy+JvTFHunJ{kT;^7wC#
zhvb1v?qBhI!PbM0dVcmGDh3mqEq!>U6$cb_IIMw|f)L!LT|tFh;i+ys1{#X{3810y
zLasbHQ!Bu$2091!ct5!4l?Fd)RB#h7t3QqYxm$lvwSCi^0!xPV2Mep0)vsVb9_qh~
zM8DqAR}0nO_z!x378Uric~V({Hhd@`55G~rQ2j1#%EW|4`6ZG{;d{@SZLe@$5L~*_
zDKoyEu)i2VJ}p^uQEs+iv2KKt>E_glt7@U`2ggH0N03fiQ9hwcqkgyjLT=YE$5E3u
zCU8;`sn0!~rO?ILPBh(pP<Hlw>h(I+>uKrzVBaJQ^n%Vq)IwDckfG<dm)jJtW?%);
z{$<?-Yvqs3C$knb-`_5ZwduJ@n+Y%7oM@UZFIFp658FZ`+i#Uzk=Y{>?8M53x}s)U
zJn>Z*6sgTG&M5nbd6smLmKtnIcAxgSl!gZ}O}#q~o(6I@20tbBU0PA46*yFkPor;1
zfni7ATj-<0jw>=Wv8`t}W?p7Rjnu?BkJ@t?)rl3Uy%M;0J4J7rx01F-;Dn2gp9!a7
zm3G74F1V>X<L_CmZ*eAX6~Ti(8+A|zd;Z%c@r}&kUmdFMTeR$r@jZOQEZM86Nyeyq
z;l?6c%}98k!tpUVdz5%fxZt+rK+}l-CAPQoYXKJwg`S=vK1o2U%-c^iIj-oqdFt{@
z-wF@IcVOrB<>P`z?K%1`MKX--XsCA~I{fwgB$mScvOC+nSu9O`Hod=?>XV!=jt)k-
z)OXW2YyA1^o6J1;x+VR<_9?Z+&Nd0GJ&TS)HFq+7NIhBGG*lg?GCzHx^<)p}^Jk)O
z)m4EjmCT3np@H*#H<4AA*D&r2chq(-c^Y8*57c@da%nBjyjRIG9Wk<|G>JGBJo-9r
z&!a?pJP{d^_B9$Tov(WvUnFK?Bag_oHm9S^ZRTBf0OV@PtGTDI#N#)GCRME-b8)ny
zZe!FG$t1|p7TQGU=8g6PO^Y<AM$BF1czE|kD*7b^2bHLcto$^%ruO!J&6AZheKloe
zHQIepen|BWbc34bHIf?XQLc00S@S*YFqa|cs`s@ew1xI|$z1QN-msbOyFd3(;#g)C
zEQn^hAks7R{0VhwVZ&ENwa{&pIJk{lF}6bT238g$UxPYlyb5`oGI>7o%O*(&X2d?)
z?Cd6lfS0j*eR3uEZLu_?PjkC|r-=z@L#z1s3K~WBu4XO_vyl%i9p0IHT)$1Vc0R0F
zlCyx?$~Lw4pttd+Tvh8-U4U#^37IqBbX)4vhL33%ERF>Wm3I~l)Ql&{El>zDroz0H
zUfybW3*laPP<8?K552-6Ry+se+be>rGJ*4XuRd3i-m2DD3se5^AB6aKWBa2c;qjxN
zF{I>gj=)5Kp21!W4p5>kH#_xXuu7@O^I3RWb;1|D!CP$BKe!P-;Xk<<#+AOJpbPgu
z`LufoeT~HKPRS7AhX~mhr?F@8x;)Vt8lZ!k{d{Prf>?K9p0w-m8ucXBLNf4;A?Lt_
zZSXv*%R%6H6|JuB{UUoFZYee9$%*9FK33;E=^D_j_<pMPkAB3z*R7~LiGH%3IhWaj
z1Bl)%h{T3-)b>lfx=Ju4Wq+roFB2}9bsV{;51ue%n+@!&ed>!vGGnPisRz~TwoN($
z*qCd*&bj*YeZJqNPeT06OmioJVQeuFbVogKzyxxyVsuLm?|?E$ER0$0X0NmY_b{d0
zcq=01lf%TMfDiqbDz*pLI{Xz$kh7WLERCbcIfcUqa+i?XA|3SSxa(s!sgK_aU8d4q
z-W0z&{_%#@?7i}pxe~=Fx&n*sIt*>W*vsr5wBnu_+P*AQX=Hpeaekhk>&Dkp=94#h
z{cu2Nn?~?FbGs$Vn0<nt8lBT2Wl2zNk(9Hsmm-|DR&tPMM?G~7+go*yt*dfvD-r{l
zMaG;u6@+X#MVb^-J-&TI$bF$sMT7Y^2XoFy4)-pv<d{jZo=_}0%Y}E5E*S^F(^1$L
zLC^X+w_=}tiQA?Bpb;TNqP2KUc<9n|a>G`IksbDE959<@`D8g}L}%=}hvWl8thuF&
zw42_@u@t_(!_Sss`j@=h3@y$CC|D0Q+swSv&_16rLoC4y!-h-<w_-j_>v)pCQ{A(M
z!R8dBnpv6tKla`Otf?+t8xGQ>89{oB3W|V$pcH8lm9El>)QEtT2#88Yf*>GWx>BWy
zNRuvtM7n^0^iD`ZFOpC~Bq9FWnKS>H_nmpaf6jO2T<3iMH`m1mvUXqDd#$J3_x*hP
z3EEUXI1)8Hsr`Nc1jyYP9lk`ZB#~gi)r=v3!D$^KlhM>jXc!WI6R_Yr*U>9Q8~GF9
ze{kD;;JCsx06#OIEQ=fz!&gw5N&&sOSqQ!dTFrCFfn)}5D1873>5iiYwX5RqsM1pT
zw21&AFwX!3r<||=^riveZ!+?p%z-GC3y^z}2XsONDv){cj>4aP87Xq{8y!DE*K=CY
zq_foksLCtl30W|xXDTH50||y%V5Is^Xfjc-%!pMz@qlwgT1>@pWQ*cYkbTZ^fV^>~
zGvngp#2zY<Lp^dCQ24o2ksSzFMRbuQU1CBp<szXUg}JG5Y-Qm^n{R^R-KX4h05BZ+
zXbWEU*!*GZa6r2`R4{E`o5Hy>e^y$Zg93}ko_*!9f8R@-GMU9gy6*qtvBv7+OtFyU
zqw9d3n@6!E=@x5X1_0ff`B^~tVRk}{825_vMZwFp!IsB29v-B|Eei@BYuoFrmXGDp
z0$8i+@mLm?C%0M`y?Z^pIBv2=uY8#=0pFEOePh9hZ)9r6G&2W{qCrq+r4v*xWb~#y
z%XVYVt(O^Y8W-dy>t3FZ;SsE2Fwp9F!VXVF^8u$#1K|Bleiup6nEUk*HdS2#zxTaG
z+yOH3e&z~aKX--3_<?zaypwFfP6R-!hj17seR{ZA&4rC%k7;79EUy%dJ&*6oq$SQa
z->thm?H%Fy!~DwS3gtSE2eZ!Co|4-%1DDC_)KMfIpr)0@xrSuf8%;l;1A7(D(lF#Z
zGj(m<5nX)LN$Sm2W$1gtXWjmXv&CO#jsL`7z5^5`!~?nhEa!l!Oy(%^OL0DOJQH$o
z-rT(ch(~3h4=`fX5QKg!fM|C{?=3S@qdE2O<~=qw(!6H~OC<?VH9sKN%ets>{(BPJ
zXj<f#dhnqapfoo)027K#7zZr#?!P-d7J2S>4V9az#5b#_EI8$S-x?KqDHBrx_}v}C
zpMPi1)BMmN2w=*?2ob~$!ay68J-E*QCn$IbqN<gs?{@m8hv=j0qI8Vr8UxlGG;|A3
zc*wkg)}yNW7`1EjIqe`k2@sJoi4`+B_E7px*qmg*^Ouw%wuCSrsz^k?sO3(q4At19
z6giQKIuMj^B<njXLg|8}NHF3jd{)8GRLjXhiVG*rqTI^PyLUF6e4p#iG0_UV)XW5R
ze(EW^C7XB9A;10;#OUz>8y7n;0KW#XMMLanuDOt9;j=ReUvkG~X8W01OD}LdxV&n;
z-1Y<M44yXMHzOt35g3g~_V}@}g8&=uVO;|W;)Yeq5x;x%7AbMp!tWWjB)|kBhk>K{
zJDXEa9VP|BFj>kE-@E$PRXVYLwb0Qm%Zs&6nfg2*+6jIm2}H1fLE$NsnR?n6YK)g@
z7lK=Tqn`9NFlwy+G&n!HROJ<G<(nWi`bC286<;z<gRaPnzE5Uc#x(~;Q^e~U&9}94
z_qsNAa05})W29xQ$D|LA@i`JBm!>x)wHGU3l}KNkjU9ieeg2X3vNU;tM)#h_Y{R|#
zF@Aepk?Kl-IxTmTJw_%%Snea~5q|h{8fwx<v5q}?L|$N`t+d%1E%|0lh--vS;w#^C
z+7(C`yc*g90n}m315Ge$#``7&?<9x1adx!pJ!$~Ls<ls;v~Zpf$$U8`(+M0CJBk<E
zAmbsJy)9rci$pU}Wn8CF5fpvLGJU^-Hijbg?2lQoy-F6j03D)P#mGu&wl-Y<&a<A3
zf|)h#$7u$irVnO{#U(ZOw+?b{l2|&Ryy~E!!DayxBSEH95duiw;CdyycVo4-XS4%c
z{M;?Cu~sp7r_j07BpQ5OxuQuY_!ATZ<xrOkbUrL7JWQi9k)DxcZsGgV;iL%5uxm4$
z3HBZmCnnyeUpP`#`Ec@-;qnMYmMjuThfizoqqPXVJ+c)D%ViuXmGan2`<Qaq{C2^s
zJ1<gY!yU};D14V->X+;Y;&rXdVMfirhH|Mwag8<aJs`I&(JT8rXN*<e4tsnb(pGtD
zt@G5E)z0HOgT2JVq)e`<cm(3f=88f=V)WGV0J>hWWh**|If$uExR{{4-<5s5arFM^
z=3cv;EBAOL>r>J2m5V&Forf!=>FtDuX!)RN*b*a!OQwU$TTePuNRqbEV=tdWaPP}n
zgwM)`A1&n2F?Di{m-V~vq<Rl!u%5wlSugCeep6E;zj2gFqAk4m2f$FLm&JR;o(A;Z
zv7>~tZ{NJ0Ji0Y(|E*m@YO`otT9gMNeb{-$UKP}&2#43H3y=gl(R7z8PLT4k$PPB`
zu5<?lr0ePJ!P5$n_o{RyyA~T$&wqk755kp!x$M@Mb2FVU!t8eT?Mo<bmwiQg(3!hd
zh?)UWuX|>~3$V#K=x~|3I7Nt{5RGX}{uJ8bFSJ)y`b|0Ih3&Me^OyFpWgoti{@9vg
zvnDmAPZ=&NO_4E3zq5hjc-#%r9@+7>4aFe?t6qd}Yw<~wn_jUrkK8s8<$ff`5*^td
zdfr-DIrv%%mD3RHhKFGzIt;?$HS}E@)r4vrB==!+?`AXqSoU;xI+iz4S2PaOO*13v
z_GRbTqFeu?r(%>7)mU(ZJS12d-o__$s-B=7TJEv2RYP>te<tH~X^je}D~3(ocHlPe
z?ni`;%5O^IpcGE+7FZ7WduJJ-3pFVH6I3fx8r5WEttXwg&F?rLJJeZlWU@Dyfp=__
zYuP+G|9ipao3Uv)wEa2CTh72dC=<3I9E;hCwv|4tYgY1g!oZ$yQUu+HZ@KBioaskr
z6r$;_bhw-d%yuYklb$%oBk?&K376c_o)4<ecR|$y;)7BuMyUGQk0WKfq)oe2Q(bAH
zQ(>>}nxAZ-P2^D$96t*`n{QN(ekWgEj?F=STMRmd4)iz#G~O^C5K;v8Rz3lY$^P`V
zqj2pTg8ofotzUhQL+JVTlvvTn6Q+*W?|+cc9)i?%b#C!fR1D}TvZNcvP~kwGk<`}K
zC7o2?jn`V5>6nDjBc?qsXTlVI!~`vY!;vkTwd+X8q%VZ%j1PE|kArX`Qip46M0Ei5
z&CV6kSEfrBIt?)yw7t5#^Q<wb>EV_-eS-b`KwRLx@k-K>A*_w0SyNr=?de;JVBR>N
ztlM(#vyDNz>PG$8tJzbAvD$iO+~tRgbyG-~Y-XluviC5R3DE^u=ulViBpNeUL_q0~
zCW}cZn{T;1{;4U&SBF<&#~FoP+c*>JlSQstJ*2<K>{+b@$wjjT0wT9JNk|O(xw0~E
znd&ur`ZcZgi`Va6=@VdYah^!I@A(3;Af!!HKFlx(6C<20!)_v|?8Q=F6Xy)Dx<RdD
z-!E7!I&H_?KgJ}+D8IMghGZn!e-@@2RE5mf$>h4Eb_-?j1!BMDyt4XAJ^AQGKoqvq
z?AiRs9`e+bw_+hfPFmT(D<lFP7X&5wHnWqg@UG{kv5f61Z3sKnFJ)W>MkyROu15`~
z_P!MeJ!kV(<1^#S!Ji<<oGCG_Ps6H4;7zu&ne~Kjw=<icX3paDUx=Hf7*7@+<>^Jo
zC-dBwmGBcZ*o^gpw1KCt&BMDs1vUlo43`%Td3^V-?s6RtpW1l6AEygnq^~^jV8RTF
zL_m)>(PPl8VzFLZ?5k7Ahy~{Ew+3DumLHG}RmRi2_4qK+`un2|Jz3`h<Uef{e&4DT
z7uU0%1t%jBGO<(@{POd_Q)DPbV0)f0&~Cx%f>acLGwD!}2BOWvv9`RQHWLh!s%sht
zzAgro3orxDfk&F9NMPKp5}Xw5N_`^PaA@ztvs6WId{^`QH71|!=cebMN2r-u%l2xJ
z4dyCZJ*Ii)m%GrM&`HY4x_W-D(UAf_h2a}%8gP(G@w8{d%X9Q5^uhh=C5L%IGQK<*
zK+TK8Hv=|SIw;R3w=E<exH4H$E*hDrG;jF1$LGTAdwhd^FcZJV2d!k64tc5FPqV}m
zx(%1r;#jOn7*7HtQEE*6vag)q<Fc!`w)!+rU0&HwwR^997bg>EL<#SWb$f#S7@7k}
z7V}$E!^AX_+H2pgb~sl7o~>lHTlkf5JUgOfIWmpsW6ZjQ&=6phk9J;}@&l#8axlQ{
z@1X$CqvfLftYo{Pt_v~%b9nb~KJ9_?&_=K5$r4EN%P3iER*r`k6N3s4W1$P<L4wE$
zap<l>k<mNDj5n!?Ro=}Zv5RN=CP*bjFVa)88TH%L*)qKUR);R;l`2!QsxmstUNgs}
z;fVS1$rjr48prID0yn!2gUhcmsTe%9Jx!X%!->@&4%4Y(q@ndLf9{3?g~CExcpBpA
zVu;!P<%F)|eFC{Jiccz?sw#SY_#C=WMIA;GFV7%X%{GPAeggI9D1iS_DTh1+8<(wh
zE2oIREqRD`uWJAVxaFGb!Ba9=gRU$H@dL>Z-!?y1?cp3~l-IXp<;v{v-rT&wMLJzx
z;>L>(zQ-WS*LMXMl*(Z^nb!Czk9nC8k03=~1KhnzEP0Ey%gqy6xOU;GqnF2trw@9M
zrX+BR&-5@HKCL|ER+~$}wfi$Z1KfdJH2og>#G1mvtqg{UphW9go%gM=mtX`fzL_wA
ztWhlkW64>l+g6l1FCMZni91?RA+^v<I1}np$K82d&RYH_=zDZ$fx>mq)Z`b+Vsr{@
zPE2~zG4@>dQg+ei3P~nzn^bKUvR0r*J*k8YT1T@>*#$fvuZfmS5_o&H+#PdFAtI3N
zGj2)MUQ8Zf9g`CB15X`Bs-7W;!J~9HY9K5V{e#!l{j5791rZZG-e%qNPGV1OMQ{VO
z2;n1u@#tWHVY3}WJkcAtT9djoXF7de?xvs6*SBlRy!$US7VI+xM=Q8UGBCmx<%Fcc
zw@c@;16C+fg|-FGx0bCK!q+sJkIvojR2W^Bia9b-OnmUbkgN*0!J$im<Lf<Uc<ol?
z6oU91uL|d-7!V>%@Mo5z?5qwU+Jk0!l~o~T!nzq8;jbK{S$R$tyDh$|%Kt=S#N|Z&
z1PQA1`jY1Xzjkjro;6S#uTXfH;8|vQB~Q!r0e<Egy<t<xWv)65pTY+*FF%=$Zn9d?
z08&65?CaBhAQmZ4964E4S3fEx<<<B+ENj;@EcMjUhxYLgla8y|Pib=z;h!sl(ag=P
zHon<pOxbMTW`DyacGrh9Xo2OOjljOQUv(wcmlfvy7VgE^I7>V485np;BB(Z9dTf)2
z2RSd1mFQLoaFy&HL1J^h_So-5^Gj6(x=NeF)FJVky9*77NT$a$@p&;`74=Y;_~@UY
z<L=F8d`*aO(y4daxuo6>d~bl?yWZm|j}O%^r8M6NKVqSp#@A_T@6I<Gk%nwh<rlix
z4i{_^_mX2XF(BM{<1BaWT|~Ot#7RMSEg_XhQCO%8nucTsb<1H|&*3Wf61vzDBt6Ju
z5N{T;T6(v3<gTA5Q^T>RoNtX=s=yh;A+Wp>lMY>jypMz4aKFsQZ^}m20R0Z6Fw(wE
zz<@M?;@m?%Kj5m}EVVr|wCDTvIezvAw{B^qAAQ9c<4a%6?m;fnz9;)^`j9nzGce=N
zRK+kkp{P}LCDN<Msp*`pXVOIhJ22_)l`pr>#+oW8>~M=PYd)Gga~OlXH>;w_BP>g}
zRqIv&SAD>UB5)SFUHs}P=$Fze`zW@g+kRMn300?-645#kjjqmV&$@b9pjDW6#Og*v
z0+DwU{QbtU?-OUDMB@i|pCyFe9(ues61L~RTf6%a?)T6*2Ci9Tg6M!&k8hs9?_kEC
z2YT{!*%-h_@9$<b#$LNMdP<SsY^YSFC|ur2iaFW*=oebr5Cz8)K#62N8%WkTs{`Sc
z4;9ob>Gct)Gx)hW<6scTx2@eWuE9w-qj)84(9)VIWoYsH9cS!qy%S43K86a*6@`9C
z$_X^}CgW%5J`hzIE}VTdT5=hak+UP6y3!kCT<kbaiFY{6uidl<JTIE20IUJnrrWM~
zHC=wOqqbce(Q)1=Rm)`UXoV!n35VofhgYf!Lr3Lx%T+bS!elD=Np)ux+1oCjf5rRb
zsi6k3oJ=hH2}(kcUIE&*EI&bq9HUsktNaOCgm9yXXV#zx25L@vk`qN9HK@~X&?>vC
zY4hQcWF6`V5RIWgv1JHs*w)iap$PfH#?>pjZ`WYQZxHkYydJ$wKPkk=!Ha*O$g`Tg
zPn06%63!xd0`(5FsZy>(zR|Zy0f9l??{4@Qcp=VLiOVT0&D#mlUc6wzeB#-WMkGlL
zMM(z-Bfm@j1QqFj+>r@}ZwKW-euCD=ft>Z=gK--dapjSXGNGD{4M%!+T{v<Zs<pnI
zGrx)6gyN&yk-QH0ffhv0iJKLq3Zqf=S)byT!Y)w*+3j|Ay`lkD#pabS*6brGTnEu*
z>&ozmO9<dqT2B*qdb8@iJ!@ki#>9F2l%LS26b-ReYj|+8bf6+B1EbS~m@f+&Hym<^
zeB(#=l%GDvv3-T)9Oblhj6pg3sQj|s+6OJ$&Eb0FP?x68o3T2lL~1{}=6f0ei?hX#
z6=J5*45Y5s{^(-o{jPv^!55{8_ha6FX_<-A5D7Udw#HBSOyW^UB7mvPEChY5Q}EG@
zHcR(-;S3-{m0JI?8R~eIHvHV@mx3W5*cQV<iPqGd;V$Etv&m-VN%e?bE+&Oq=ou;t
z0#&Qsh2jm=!YgFph<M<wlE(%tWSFrRc9v8+IE1v0Bbhj!w>4%eD$zgny6g{*1LS9A
zp5+M1Lnak(5=?ECN94&?el>1B)~r@HYFW-F&3sf?{_x@~o@>#w`O;creudp(Fn~q4
zO9FDj_JwF_OloDI$xo2DCLVbhH3VQ+GJ5t3<u_eT7R`;CXN_+x=x%qyd>WR2g1BEF
z<|7?Egeg$M=2l0nepfRe;aG{=8`rUqiM(!YN;fg4HI>&zV})Lnk6$gI?noO9qVR?}
z&$AUtc6j0G_Bqg)`z1BQb@LYj(f6j7#7}2d35FaMrH%3C>O{8OL@vboN>aPjfSFv^
z<J_;%9w72o3xxG1f&feH`i%KwH)GQ?qUF;Dw>k!9O?39q)A^lLDUugiX?;!#uT`uN
zTQX%maUQ0ySv#Wt@Hs9shU@NSE*uX8fTAEOVw+hY?38N+@8!=2&H1db1CI!b3SZpN
zoGhR3e7e_%$VERNw#C^5t70$a6V1Ts<B;7&qzr0v4f;?ruDOQf38<>=0B<gl8Bkmw
zWU{VV`{?W2BW}A@Nap{rbF*mTM+8xhw2w82$*GPc)JGZLv}(5%O5biJ<X)+=?M3hp
z@LkuW4d^)UdNmF%YOe&53mUML>3~kHxADh!xIMzv)noeZ>i2$mJy;`<5`s2}^)5Xf
z;4s<5y5QiLT)d;0D!-D0zxhe4OPh28cW7mrC3U~F$>!aFYL%d<r?%6*{*3^7LtXhs
z0uK>JqQ~_qY~j!?@myfgx>1|_0zWa-zjC$joIrGc<>J-l2zK6yL>~rK;o=V_CB`iJ
zpM)wrWJva+nYla-#P~KFmQGs%ylqB);1vRxaz=TtN^WNDG+o_^tS%RJEs(V|&6;la
zfcz}%OHL(Y2TFkAOkynZZN$?09n>hoxV6^1w}VE78VsxN1eV^?t3GGcpwp=_Fz$uJ
zx_lT(cKqmMb6Je@S5i3A?oUbKUx{I0v9y0(+5;rPo`@ZznEVj-wyWX>P(W{0KmWb7
z_pi26=m{Y6UjGh29u)xM*ED54Z3HMK+P~t^-{4n`5a(khSAdFL+(3xU76{C*9H}i4
z`+b9gGv}`aSZYG^>VYxsS5;svJ)H{dU_C&6hA1sJ_zWmwx}f&H0=sK{>>2(3e-yCm
z@5NG2zsasybJ@BgIF|*em@@z-)d+y55gwhZ*PE}|`f&~h1hro~AP+8=G~dUizv7On
z7^b*pX1U`2R6#j2YfbBq3U~fj<-saCwx;rllcX?0bXOT!JgB{6PsnvMoekD*9{YS=
zQ{|qOOINR;=Q4+lQ$_Ew+a=s7yVrE~<EPfC%%?C0PY;sO%<Am70iih`!Yv=}ReSde
zwc6yZ&lZ%5Y>VolIcNrXBa?PVtaV^Q2J6%n&(%R_Bd!tX$Ex4XuuGvL>@!@i@%?ya
z@)G}E3%VTEtY)$AZh6p1ym9!vSvcrx9SfEvL{%3@p7`=<%*p!Y*PL~c8uRYhV><W9
z+g|oD5{I?6%>pv|bH)4oZ(rK-;ZYH)e7RG;h*nF-y7+>L*KsUXI;p(&Mw3P$TJ*>^
z!aGWG1js!{f&P(2LLfRIpwZ7w8f)?Tm4#^Rh@A3;7IVgsqw}F-Z}NhnQtD8^y;FJ|
zaQGn5>M&(bi{ysuWnvpKCsxUSdN%ZQNCcu#?jHzfv~hw`i>Uo@yb+w8WZzLq0Ug#q
zuERUUlzF!a*DIgru0QkP+GC!5&n+q+?kwY)H%-z(0$e@l8@d|Wik`A+mBCaz=l3N$
zoI~Fk!O(czcksYQl8d4K3O$ORFOKe1D$z^X<)7(X9)Hls)vl|0tV;8AxVbTNRl*Am
z!Q<N3jA8R>trft8CV-sJ6uwwVf{=9z=FX7xk*977*gUm`SGI$1KT4H-d$QNHy$R`m
zzML-SW418iSQvJexvhPU7ZqVT<fUVb9b21if6>PoDzVc0LW2vZC#7=nY;5|uyn;Ui
zQ2uSK`Rj4#Kk+|9&>!VJxH3xq7!|}Z@ivQ#)5s#3>vvGePkK%DpV8Y=fC8(HJ9PKa
zXxCi_uMs0nxvPIqXSaCf31PP+<xdDoKfYyh<DT|C7M$duCKo~bJ*SZ-=&u*Qb~uwd
zM1!F05tHCz6=>(_aA~=DXZdn1y+PW-*0%O6<igFly)jaB+ee=08p$nIQhKM9zTQZl
zv$R{0A)!|w=D5Zo*w9Z92qn5dbW<SmNT}j4KCK23XnM+}IWWnk5NW>(ez30vRu72j
zJ85<g{a_n<N?V|^i?+<Kq60Z8Ij`9H#7<QoXK;+Btn-4*#+%fBOy#$RuoMSJC5ir0
zEf6Cg*KRDAXZub4EM0Vibf#r(jVcTS-q^zX%-`_I#~>r@ht7^%mHN7Qe*9vO$O>yx
zV^z7|M1izlr)vce>7T5gukV6a>k}l~JY5x2Gc|oqbh|w+IUGKJK4z8Ln`$%yAYLBl
z#h02cEid_OJ-Ig7IaBxT2M!~)QHEEXPpuG(RHeskiCL$Mu%?HNw(t9#FMZg~mzBbT
zWRHbCm|yw`j{XU%UT&3%tMeX1od|?-7oW*FZe60vV<diAqWZcS@x-XEgg;Joyn66j
zyXiyVEZ>Fu6>4kLZw(^vf+y3i;~n>(+Tnvg4A<(ON+m2dek9-6a@UQ2yejeKUBU8Q
zuB(2M=?9%r;-+PnDQ=aWO!wnpoz$cFigqOPn)*>fbCz&fp6lTAa<{_sTq75}PQ56R
z1SB>bPo)Gqo(x8G2eDCm0Y89x1OWcj;&Hbcsy~k8TDh=}C(WGxT!y+U8fVJ(<Nk}=
z91pID!oDRN@edn4Fwp)cc*3Wys29zNvPVwtV2QP!*mzD~kBx)H@q_hEb-(*dX(DV(
zM<=vT*|lBFy{pvcU(`Afie#lSJDd1E!i0x$N7#9aXPap!x}13t<GA>0PWv<Fnk=&t
zFV|{~N$T1Bc#))fI|<KQtFBx-T|gHCNpNyk8uiSWsu_kQ;84G#f-Y_Up}uTp_x*6C
zY#upe{)-~U?g>aN=Dp;Kzl6(ZvJiITvZmVLPDQKCq`|yT3!D3TJC*j^4a?Dq1pb3H
zETUoTvcRL$d=??J-%@B^KKP254!ntp2m20ae8Sg9?UKA~#EfD(Jl!6I=oH*oei0Bt
zL-+6~-SI2(jhZLQF6*b?+Ti3c*CCa+QJJ>L=O$cFld0@{?ssF`6D$Yb&UJeTe5ZSV
zO~L9U{hh_+U>e4Bvhvi_T>qS;ic;dylbUf#IK#Kk^O@h}2ZQEmhTi18D?eR8y{x!)
zZaBtU8;Z?g_C<6aaE*rt+Ptw^J1|@0OrKhk6}=CxT+zOJL1W=KV$lE`^ch-J6rp+z
z^1LjNUwV#%UlK8065XhIN4GoNh0lRkX<B;kv!GKtSvpD_E1@CJbScCXyXU;nWH%3G
z^aUokadl<_ppW`)#mBk$OhGTX=@h*Jj$ZEtbJYveU=6Mvco^~y@Rmfh3ZDt>P)3Fc
zFD-SNruR$sPG@8Un8&2joKC!2uBfn~k4i`g%{&a%<RoyN%(Oq7#AFif<Gf6PVcZxy
zsr(}p2sRpCx60qqlv@A5!<X{KQNYHhvuFK<e+li1E11Dor40#gl6pguY2`-V8LWB!
zjQhP`qW)qh(@Bq%tm&lDm+7l3r9WmHJaWxp4a>reBwaii(_D>39`}{eqxO%e+WSw1
zm&oL(qFp{Z`N+AY%-Z+rEZ?_-Wxu2akV(LxGgZ{Ssfas6Oj#ne=~TI?kC6wm0vpZc
zk|2qKSJCS^OAb;4whU^*Q56Wuxr68gH~Sq3Za-XA4F@HilW7IB_*NK1m<@SBj=3gS
zTUnn>6hE;s6}cchAFLq=JQ~N@0=7b+cJRsQ&+6iUZds!iKE!(NI%YE;m5rDVxCe^q
z;@s%z$_#&@A`U7xz6jTb`+{52)1qs(Ag=sUWnIsAUuLtYX<*G8fKotftAY_JtfCLD
ztnrRp*_g==FUH)N(zQ6;=()r-jVO-BLXWL9OMjDxu=vIe%G8uu&gomdg`9E>cRaVI
zb227$<hZt>M!6V<2f)P9QNZ{FymlMuY~4`Mu>4S$>egjghCoPl;#K?w=!tI8gD|cy
z@AT7B4+I{T%={4Pgufr(7{uAQZ?(g+xs=_guXR5@QTUGS7CY9Gc}Z3&8XVf_dgP(*
zGF?DU8ypB>9;1St4KNkq9|e`Uv(ATDh&*q6C3>`S%O~O0Y~5?RY7YNwUHO${^Q88X
zWIxBV<HZu0CLgtXP#n;^sL5e0DrSMO91|$#IO?ufJ8We=;F@+ugD=I-t5-0#|AYP0
zq;Yi&4jvvvPm0Hg;T7XA`PR;IeUnM@wuRA~=6$K=-Z3|1YJZ2-Nc3@7YJ4ZI&u%oe
z?eDh0oN2Z@LmF;Hzn9r)0m}MK`^HVTak!qa;6gadd|&=p7nYoqz|sJtOVs<q_wXSD
zVhzF%Q~-jr2P*mjMy{aCgLP#!(h-Eq__^s@izj2a98Us2p<hmpn`v^%%AXx6JUexC
zILch-he%7#`>u_ad7FpG<IYsJxTPh-b$+z9K>#Py=}pJD%d=@pD;s>{q}#A>Ip82+
z5)=aq-B6{UmjrKoV$t6T>T_G_RUc4?r5nuM*gEw!t}9ikpAp2ov6A=ZPh-LkaGFUV
zK^#SqdQC#NS>}XNj1a_Gk=yl&W8c}fAg@^S5#^*wn}=YtJ5Si|0($&xfhS%Esu_g&
z-^yC>wK!ie9F~)H@$Q(Jqc3Ibk<6**v#19HjD<d?=B^L*#wB_m#gGE<`@plp+yt?k
zm!Y|hbX>87XAlt{3Km4a?WUoiZ708D4&JJjoez{i=C)7qNq~GWB>)SKoZ?O)dYter
z6uGIlHIPNwdj<;a@o`R~(Vo#9@Za?pC+oUyHB;?+8tDT=agYgp;vK@963;Rl(iN99
zm;>NCRJ%J>MH%v0F&xdjP9xbrLbtabqTZbMF2%29jxAS@*(yJT<b`j<?6(?##lo5e
zoz16?r58QYK9aTF3_@QjwbX@!*tRb!fcQRh%?)Z^FPLuhkCk+eSHd&?GBsGrz1C4j
zR^gM0r0l&tA<d>AOFQ2SL~CBYnmMZvNl~b%z2=2~gz4Do5P-(=C<6>hzT1`==VYjZ
zN|Wv_BSnd#+XftpXPA~NXoWL0VYMmK&861S2w_ixB1WujE5=_wErKet3ZFU%$_fz9
zrLzqrlBILCuO~eQ3kaT%QAjjhPIz%4(CF8Ih0_jC13H;ww_-E)lr+bR#`1tiWIpS=
z>CHLCd0CwI<%P!;+Ii1M6Kk`lX`?nCpxCMWBuOm#+4m+k0?RGx=%9?SSSq?dovn{0
z^7z)%!A@S<wnylw^~ba713JTHkF%7a5@S80A)MWSC6H_C7zxyePt@HjsWIJYe2CVV
zv|UaifdR7_4Jh)Cs`AWl9+9*GTYXF?5{LZGG)4I>UysxBe7ueC<qtvm#AT-AZJek~
zhh&L$%Y0W43}dy0<B6MYE>C>uOG_0!7e#Xo1G|o>z1)n+Jj~KWu$tYj8_7byI+ko3
z%L}+IT@g)uuP1%2TGIoS&6A3>PS@AfYX};>dGuMRTQt#66v$|&&*nvSG_Lh=0Dk9h
zDJTG4rdsmGW3Jsy6@tNn-(>g~%X4|nuXm38J+t;#+j|P&!JnQ3Y-rmg!1ZRRmjt)}
z;#{qv_Ew?4IKawfv;afP<1pYF;I1+Qpv&KWoP7Q7^;LAh7w|HaC}879&;$1Jss`v0
zz%(y>1B4AO0{jU6bxBymO8`U^{Es+X|5LY#o=W#WFj;-J^KXv*5nMe*Uj&F%_1UZ*
z!03xS2@E)DOXTKHkW_l(QgMRwZ_BP4%O!CLoQ~bx)R|w7s~O0c3WolB-POOQ$^3KR
ztrfk%EF8b-goV;rtpbCF!RL-7LsAZ|!^&GB;_<<gvhIo83qe+qKaSo~t@-+_;>!q|
z$QR^&kE!ae9AWBd#SIJc89nz3kJ-&8?Xu4c+JgGqGgp$Ny)}51J|sWv+857!0fha6
z08_YXD;VmybwnGJ!*X3lKDtADRNmq^)7Cv&!XyVr;D?shxLdcYg<(Yb!=xrfvUH$Q
zw1K$La!hTqu2<O=*0kh@Qds*hA?Gp3rpZSmG(@g|52|9Pmr|E1V?(0Om2U&TUjN}k
z{0Hm6%A^u}(PuP&AowjsjTHIjO;P@uWKXteltxz4)k9-bF^RU7hfIvyJQ$~TNF)+}
z10LfUC`79EQg6gVJAA-r)84t=<G%4#@kX$dy|~Dx;jxAtd%8^M=Aj>BmqADn^Wp0t
zfi)ys;3EdI%Hk}2E6uXS%xiVCcG2C(Vk@6h?iZi_K=3s)vT;9K{^XP$(*E6qm20|9
zyN|a|LczD2ERiCq;+%5Ia*2EvIwwPcyqY`eH~<%e#{xCEQkyT#QrjRkH&+$H@2H9w
zH>m6GOuWq1_V{7<XiRWNurW_HQ)Y9~m+jKVSIb}H!2eDU{jYkI_3#e}iIR#iAlP?v
z>+k_EVra-0V!%JRE_od60JSSLqs~+sDNFnU^-J~!vH>H@40LY{2UPD<^H2E=_vU|1
zkN*?N1}3Ra)WNV_DywDJmsJbyKHC(tX&crwIQf)%N{EY4mqxzhJWds<3-JICxM6zN
zJF7yCU4rg;O{U7;@9Gh~I)C{bpY9sXK?_Br;F@UVto4)R){fgVB3BAxa#9MIOD`0J
zFwq^+x+1^q2|jx7vT3g16COr+uXh0+)!vAA{^f6pj@<PM&ZQ1O?H}L({)Hu{KVH|#
zI?AEbIQl53sdtBFBu<;F8m=^K6@>vL7yru%k$FHU_8(pTMV|cMBSrqBsNVkq^5kC%
zApM3d`OlaJRKdhoK;eji$%^@<27*bu%Du#H)eIKn2Ocs@bQ+*8B~k8&>T8mUu9RsY
zBjZb9BtV|;Nm&sGNz*`pp5OPoZqZN-YP+=`7xBTAN_*kQDcWbjAe7|1-$0km)pg>*
z;fG!HQ~w(SGC<-c3@n;nlOW4JaOp;gw~q{c_LaKIeUQ$2i#%D=#RXx9V2h0r_i%HM
z7BtezWoKvLGSdbDRk7xoceg2OXq6QN-EE-W)#UmA%=}L>A#NY0nh>K%wq71Anv1^V
zR+_C#ez+fwndfHDyz&esf2HLc=sjujg2kx_xMe@;s4*1)TOCDJQtV_Q$5l^MwcXZz
zP;LE|HymF<$ET2z6nDmap23pLf#O6uli98jxenh`<jJNa=~*mZCf+H)fPLzUEF?pg
z&AU<CJQ-Z8rP3#`Yb)9F^#Nc_5dyy?LjB*q<F_p?TK9L%t}U=WGaE~how4fUn@Q}D
zh|o5fk$a)BTe7#n_m4h+|LAuAZBX)D-^jD}FBr_NkNY3@k3!wNb94bF3}s$7S-90X
zBC)Oik`$_S7#+x7;5!B35W4#vr&pJ0t@-9-Ijy9-sb6*Kl>j%oI2K{ugVe2-DYl)N
zzDF!I<L<(`ba8x9C3(wqM~>)pha8n`J0e;5k(Fk;<KcMhgNt=qkgJIN=Z{#&6_UkX
z-p!ohN*U{2MAVOdvQ2Z!J>Q}K*wLDz_)Wdb(e2I2ssLI@D*1ZT^_aKJ&8C~P37vAv
z_NPBFH1%sWOHrE*$ScDapZo-=Edbc%zQBbl{=_+l();FLb`K=K{`(3(AdQ*5jYQuo
zI<!gKN!eSR_=ENFd*1MVh^<Y$L0(gvB6?@efV?r7vGrq1?2zdE69jXlVE(l;(9{2K
zhyJU>{`HXj|KY`WS`*L|Jbd|}vi#lGFP;sN2kFq!Nu*M?nz#HfU!6{}OGN(eC)IkX
z$ATF?4ZV@=5~)>yMgPhE#UD+Azje|P4}b5)r_;Xs23?^4<$2dY4?tzE-M{_Vmrnmn
z!KUy}#MIy&@bCTfq~G}QwE?ffIE{4vSI)x!^mRu+6&F<$s=qY2MANs5-Zqq0LAF=@
z(zck(`K5LNIqnYK;LS#FPnI;h{z}FFb%_vp5pmg5Rp;jBM&ge7uHX3a=`HPFN`(BA
zGx0y5K=yqb>SQoE&`wpWO4cl!>AW;6GP1@KT<WeCm;#m+YIlH*aglt;zOx2o{h%p0
zGs+=WS?dXphIla|DvKs7Xz;@Q$KsB>+udD#3Mw%lI_e!R#rpv^4kGZV$Fm$z8O6Th
zs&*vxw4CdBq7Kx*t`t0hz3T^Uv=<*ZRd~mr`4If}bqLFCk-EBAyKt_f&x0l2z5NV2
z2GlR4*M}SErN@{WeE<+8uj#pV(l*_&@yAJ$L~lY)l&UyVI<3S7e}%YzA*)KObLosI
z<bjTQXY+j$0uLkR6C&Dxkaysfn!`MgO-F)olGEF=(kWFK6MNHBG4`~87@@+tt)ooq
zPczMpi=BE>+9wY*Gy^TjN`V0Ot|ZWafZF&$_*7aB<X}kiO;P&>HZNu;zg8xGeQ7QC
zV^HC0%#Sd=Vt6_v7|O2dhV__&;o?L0Hf<5J0&U|8`bE>aA*n5t*;aKqn$H7jZ_P~V
zUpB<oIakP#W=max*s=_6>$r0oo73CXY{;m4S<x+F@!XmOoY%fNC~5~hS<)#iM#$M{
zzzp#E<`|_Jj#XS*?@HhN!qM1u{!6Le(=W^nPd>Re?7jc~1fauvl9_#XIw9;-8cW}d
zF6Gjx{<jBV3pYJ4)Awk7mcTO}Y3?dwdL-JS62?VP+%O=V=tLi{fj)oaI}Xc$J(29-
zNlh(NV2YY$XYMSZ*^c`>K7Hvb3`X}IP*`|@u4W7e7>WZ^5DYF*6CQ7w7V(w;+1TC5
znw2e$3+FwWDzi^&pkoV<;)?rjtXUMKv{dgcMNwApKSAvVQv&mhod7ck;CF0dqh$nr
zov?yQQ!Y~>*SHEtYezJXgqxYtUF_xGy!X&2Ha2DA8+EK%f>aJvxdpdwg+alpZa(wV
zNba`tkNVvBzjC!laPW5DY^RBF=aR0f-H^85^^PJ*Qu%y{=>Xk3Z4`>XkwbX}zXITv
zs_n_D6#hm+*YgGHc+=2)_!Ou8Lha#B@N;Oi`!-AS#oBAao~A|l56y+h*{hmCKE6nd
z*rp#L>1G6$F-~2o55KvmGqluueQ=}xOs2bHdv#^%*?Tn2j~_1J`8top=7d3cnq`3d
zLL<nELEpxSQ|pM-q1hGT(^0ZCO(#>g&uF$vf~w95Ubs`T6UqM0%yz{Z;1<AIA$8F9
zcp+a}poWy2s!-`bp+-3=*b=SHE+B2UQ>N9;8FIAk0>z;02^z*XTRY*yEQy7N<{S@n
zDWi@fd7D^D)=$i?@N;q-*gJ{blk`pzaT1?#sNAyEARx?1qqu=-X&_L<KuQXje<HPe
zyCUWFQ+Fd9ks0(y8Hmg}Dqb(b!jibnBYWVZ8F;ZU2!KD;#Ksjn1q6__)?jtS9c|I~
z3NND0UcX~0hejx|GF%Le*#!Wp(*ih;cy&(Fqc&k4k{PE{r@HD$+Ku6*1a1vljwlPG
z7OjWdr_1rC4vEKP4W=v<&)h*h+VGd$*$C=G*S>lBrWeb`NPOBjLeWboDa+b?u6(~V
zSNHi=qT%Z*Tpznb9CK{@_%~r$ymn_!B$BxaT38Wd4<K5uC$f~?TGx46aYt11bhjIS
z_xbzB8%rD-@4S&~TYB?`e2l97Eg7}|X$R`B55tcINmG=6F|t|KO@Sg<>Zw?0-IJ%M
z$zlohIg$g&iz=w*mtFp36O2p?IM$Yy@GR&gVW2Y_E$HIHT3MH&o>}*$AIBp0Qd)FT
z8Rwy1EBkJ`_F3V=6Ci|a-a+pR-IT?{ZUG@UW&jx-yJGfnMCxnm2}R0#ROoVjlG>HI
zBhAXWc5|PwQLi-R?=ddJC>$BMdxbce(Db4f2ye9}78&NF)!rf7j=GbO(`^jC9CAI8
zgW>ACPOuUDojl0`+W^HRqUMVL^h=G{2Ef@F=B@kGGJU+^lUIm!c8(Ob?ZJjW6IetH
z^3iLD>m*K$=Ub<~As~gq1kB#S|E%S|Q#9-suEjK4s`dSMsf*c?Z%*<*#V`ab;p_+W
z4BnGrFruCIWJDotL(h|lsrWxPto~}Dd6Y~B*Fx-w<rN!kL-EyMy(7!wM2M?;+ola1
zSP!`;oJ`+uvY|c#?D!D{tXKc4c8(>pXyVCRU`H<`u>Q$Qe?y+jz-f$t9oMM9dh&(c
z*U}Gj?1=dr>Z7BFFa3>(cB@yc7lET{Pdq6p0oLaOrwjg1UQXocsB=>m7DmUp@7)D~
z9_os{=ECd)uVwwlXyI(HU2z}`pH=o%v@}7t{B}wHvE1ao7f)hoy#0Y|_;vAqP84~W
zjYc3A8qf+Msl5lN{UgXWV`?vI7xqh)D0CW3RlD+iz~s+8RB;|4Z2-ho<VuC_H2_dj
z2ch`KlU(7GKGdlFXHa~A9l-s2%Thf;{qtS+LNUciNA!W@91tmP*^KxJ63(acrx9%c
z;ek&WFhzI*Nmx6wu?PKg$88V%&9VOWe8G)s27mr8nyUZiSbuxI*V6yG`2W*-zE2A3
zQZboMT~bkziB{*x_R}$qmG5P^Xa5a!wp)Gl`-L-~zRJ!_EBCU#6B0V==xFyRu;9sW
zlGU&NXT8ap;k}H7Zqpj|TyAckOC+8;GjwP?y_FY5_ty}--~Z1Qfkpn{-h&_P0va4w
z<Qr}Wi2>q%9%g_PARwBtOM8=M)ETmU1d#2Vf*gX(YaLep`qRC>BYy%d4=K(<^{)6u
z{8r(!o5Vxa1O!4Gat@D<TUdVj+8TAP;7>5?{{-CezjJ>&)8VnKn`$ex8N;ZB*E5iV
zaBaYtn`#_ewdVeXWM;-ecUj|sln$WYllkbEe$N3i)+DLSItZZ1K(kDeKI!!Qq2TmC
zHMV^^+t=+*;?k046t8JxqoBmQm_*$b&^kOEjlcEf4QZNuo#J*_Du27TJl~(lZ};HV
zm-51$DArwBs}A?zy{UH3q@1COY5T4U{5a8wl!blku<gXJI%cS(B&m>R-lG8b@_Q<b
zZFU2^Cnpkn;0dyb@7-10_V*?OPrs6W*^xf+aFMN-r((w4kMO*0iai<M{5CIYzwehX
z3{C;Khd<<z03m0*3`I?8gR2G#99DR_iC_#uD5vr$f`sfwuY#3y$Jg1@24}}S>6?uB
z5{(-7Z^rht-wC-GGyd%-sBWK<jskjmeGL5*q=%`{Jp}rH0ZrvYPE_Xo1g&zK4T#R(
zgj}@+BnGMtT2WCx5UxNMvL3~G7z+sUU`eMds|K-NugcDgu)FODa<{Rw-g$fSJ8S<t
z9kT`GJqr;??fxb^(;cDm`?sb2PTdH8p~j#Ul!v;G*nwd06;%ZFKx@782{?=4#ZM#z
zf&1wBT9ph3myBB<B2Q~3Uf~g0y4m7$e6638$IQ|sDmLe3dL=PulTXw$XaIGB-~1>z
z+$*eGi4Sr43dC54cCIOu2+VLviujzvkS*Y~3*6h;?*inJwpw!pRl%NkZK6lWPf!Q-
zEFn`LA=zOkcg+bQLjOVTM^EXo;Sda~b3wxcQ>X7aw7vpVuACM&F%0=eJ%=;@=_!2)
z>5;_S^X+l@px}YTivy|)J2!Rpd~CwACU+DmCw!<kQ+uhcX#jPBe|)Z%v`Xd$WHF9V
z*#f~ecv!qwap|7Jr^c+jxPI&%4s-e--*4qQeDmWg9suZM$_WGRvM~l8camFi-&<>!
z7p410^exI-MHEJt%&ZfiT-?$J8><$>jWkLiO2`&9=mMNn1dx7W_o+{Xg2KmfJ{yH(
zLy91Y57Lf2M$s9)H@xLRF$(Zv?m}F`<f+*AucQR4B;yggj2P`rd;B#VJQ59@80-~U
zQLD3A@KLtgMU}QEHs$vB1g?iHaThh26BhLjp7B@ih6PDbJCQ=_A^`pL1d`z?KHr!S
z7wh)ccCNSBVpi1i_3eDQQ}MmtGfyveiLyV1*P8uU1aNIVz?bl_;V0<*<`y+7B+GOL
z=Y-y6Z1_SsT}O%~J5X-oVVeRt6&4w=FHeh{O_rDepyH-zVjC&Dq+6?DSemdkCnB|2
ze7i?Cz4NffiBMLtX@<AOVftzd(UHb{ea~PWVI_S_nM`_e3Nudm%rp3V=m{J#Hwn|R
z*lrGIX_oZc|7a!m{Ppmj{2kRKk;DhD&)@EsJuem`??9bYmjyi4wymkQ`HI*;vj=XM
zvV{4T8&ZrGB%)6Vo@TiF(DQStBVJ|8>!EE|lP|0tUf)DLH;5s`hf(-EUhDd~e~d}3
zvd(x`Eq5*}kL_{WSN89cDI!W=)Fk_m^LcCI2hi5CBUG7-nj@-apvL(+9h;~|Uy}yW
zq!QXLK@ROaWLq1`5qZETjl$db{L+moI?NB^u?QMPR__&LAN_EIid9iVw(ZNM2J<-Y
zzYYxUITNmBE$H^JG-%UBQcp^Ars&<bgE5p6%EKE0<qkYLc4;BDHUrAl?#5z&dB<%c
zhJ(g<Rrvd?q|;}3eMRtN8=62US?dfsL^Ys*A=I!YwRK4|TPOI4bKuJviN0>o{E=f!
zHl;H@&z1<#%}BB)$piagNcwR_7?e%)qRX2fmf~KLM5FUQ>|GO#+YXGPbehYLUI^($
z>8Gl^cT;1NnVz}~j#meL<xGZiN~CsyYQ~-&d2;KRdN@jF9Vas#fYFW)5~auv?NxxL
zVz0HUvy9#u-P}~*K@`NuHR<rMTAhJ|Vq0|9OygwqfRQjR@JM_s`<l9BlF=|l-{ZJd
zpV0j?<g2r1%}rw?x3A2hq<<vOmv1P{S7d2<WW2Q<FwXfL|3f`K9=f+)gk1T6=nRsw
z4ZK|m94e&S6K~sfb0kntRqeP=?cH@(JJ}$7l3+sJymH{^E_A~YBg4POU(pVqqQxG|
zP>6jSs5^2xwZ9W%AsS&&&uVv9fRVL|o=-@G?!o9hoKsz#BssrK;XDf$@TKmRK7JXf
zB6#+CxbDmD^S*}M^7bSgS8_AmTzYiwUVNL74{@Z4(4XN(K(sZVc#qHcfp*Kd5fJMA
zGVH7E?SM$0SKbUgImy`(>6=jJt;zlChp#Do3}HS<h1gP;i%%-_Movj(SiMl+Iq5i4
z#H$)EWEwi&4JaC6&{e2I)-*Ut8t?&dUMpx}fUDa!pIw~0wmOeMv6D_4;%<+WTI+h=
z?Fi}A{IHnT$bIV8m6@Z<?#@cJ0eR)6fi*e8C|A^E>h^&CVZN#&B!<F8vz^mErN{H(
zcJ~k4yNxCi7Zkb$Z_EaE-A_6Yn1>4a!m;pYkI{l8>0MWTg)+XxxjUt=m^%cy?(kXI
zSlng@2paf^cH}f}UNJg|e;p9zw6=_htldQ3Y^;gU<$qilJ#cKO`nv!#XajDCOc6ND
znp-FHQltnF00mCgz@wsHE2Mwjwzi#_PGkz6(XkK_MIMh4x0{R<dgr1)SLJgxdW29h
z<w3o*PV@q-DgaKl%moa7%W0d6KS5$%WI!}#eDo)1Ckl1Vb(LbgcgRq0_a`qN5XpkP
zgy7B4;oH7BMBge>^qUgtqIn@>mRt!3Wy`q>^=U4G@pCUBB5%Hu;|Fs9)VyWjUDB!f
z_{iMQn^?B9V_2L!$g1qAyQw|7TCxIT9p42xQq+zP4U#7X4gQF)aN((|JoX`5$Ew@z
zdzG8rcS&3hNfaCp#3-s3$Dc_%ob_Nt_;^?5LcOOp&P+k>IHtmTnr`%Pr$70+6hG<;
zh*RPKMaF*|dGY_TEiDxG$LN_pppO<#1OTi!Q9yk8ri<@2s^(1rY7>B~9lRTP-}F{g
z)Vb0g=HDuM*%sPb&3pXd#sK2C^^k+hYq&}|<+XEpuTK3FEl@_R_NE0OUy81SZr+97
zl?k`pXcoddMMzNxu>oInPd_OUOyc9xd&MDg{z1X@FqfBUO3W~H4V)x8uo3pg<%NZ-
zvUgd@O#5rKk%Q#QZV~%bNl=_@&Xv5TYebc0DzhC~yK>=qMdy=dv<JL~YfvlQb;H;j
zssy}I0F%11=#V8ng39KOA_1a$ziT)Y%&R4OZrq&w0az2qiU9SD73&cw9sxXv9Y9k5
zcP-;9i<|*UM3(BEc&d;AnIHMB;Vp80Z2c!FI{)_#6@xwJfFtubEkh{EImAl<i1Cpj
z;C7dd{{-=9|7{aIKiOylHz7UH4<&g)H(UVW@TFY9<+rM!O2_|A3rhQ3{$$sCo19D}
zehoP>ObzP;>?4MHIK>9?`-Y%6Bkk(=JB|Nnhr~Mxw|EBuWZJf3z!ot$;E@MY_-B!X
z$H?C`cpY5U6z*3G*`VM6Sw9nMXU?v)D}*Hf@h1oi0HS}_vcCH;N-Z;h{{M=umY0W;
zM3*-LkoNM0Z<0z}hII<0+W9m4M4iYzXLo=Ru;y6+>?^9ext5?pJbqaI7(FS_rM%lL
z0t8AsVBe(A4jiafr&)Q&a9npRyv8~xK6ShFHR}#J8Xo-<gf|q1x*JUDM$oONI}g+o
zKb1L}GfC#<4NOIy%WLTTy$Aar{roSF_dhW9(b<j+j}PVC5K9Lw?E=hP9w8Lg0z!>*
z{hw6Z0xE%=f1fSx1}2(Seaj1kK5x2a@4BYa!D=#-v<v145`|6X(5p%}Gm%v0J&sD;
zr*?eXUDtwaUc3Liot#RS<4?f?CRcmqXepK1S2XiYeYq`1b0d8Xlx#h|Zb);*jF<Lx
z1xzxo>fZZbY;f!Mf0@VL=n6j4`t~(<u*KY*k52gL$vuV~P0+p7DTVm_ALvPi3l>ju
z>w6cjSN@1UT$*AqXeg8T`%qml%VBO1{R$L96`t``G07l94a34Zv)+ojZcomNN%UrW
z*C~83E~vPA?C1}2n-3aq2t9hWS#b1{0W+zGMgBH+WbV6_-<#V#u|t&JhO9>e$rmO?
zvuVg<QT?XFJ|{FROjeYI`Y4uV{aJ2O8;p-1HJuyE{mcgw0k{_r0?f;}Ubp@mP?-6+
zi=|EN!pGR!Oj(;-3f*#AB_c7YZ5Q}NM6(S{(ove6ztLZYEjUq5oI+vf4hvB(GNGZ1
zA;TsYr;F@_QZ~N|B>T(Vge*wz^`lNBodBBrQ51!%2q*j;b>6=&S~Vkp=cUW|tAlNH
zmfx$)CnMe2U7IPKMz*rge<!B-n-l&A*ZnsO<NxJB`49f3CD;=IvoJZ0It1D4C;tT9
z;@%@e;W3cPaY_M-_yVwIe9#9E>MJG>)^qVu0RRNS^2L929Q?IC7YPNVJZS*07k8M9
z+E4*d5v$lSl(QrONoA%5%+oHiIC8@0CZHzMQRZlckrv3Q1@?Q$`bX%Cf3*GoDwM`A
z!mexlbpHInM#1}4b#cGF0WMq3Wvi7jfg9M<4@9%IPMwO$oUgNZ?_C1YAyq+15*3}2
z1w6{g4OIDnn^>P<2yrFC<Xgx`3qkUUy>b;QQ^))v1i*0a5Il$_qzw1;u?Clt^Oly@
zZxhZ>J=y$ZR;752S(8%?aym5bf%bPt1TzfAun`1MP1=T=6_JBw2sDWgaN&F2K{2an
z0o2M^?G}X<@X8j=g1#ETHdH(M|Iv-L-|rlIM985EXAv5yu?X9rpwGQ1s+kcGrnyC$
z{Rz^ltw7E@0#Q1N65`z`wTPFGa$<lves?Pty^FBir!oNPneBJ+B!8dItpoWoz`&=K
z{t5bF2CwEc+E8uFq&NYlR#Bik5NNd=N8#mw@K}l|^7prK_%bivZb@(%kfT&n!TB8W
zk9`7!<<gLpclp1+GT253X8`!ZG8ABn{r=`5zrQZjy>s~!ei4^R1AluzZ~ykf%*e8u
z!fjtK4|F2`=7xY`D8Ie3ipi*RL}9!C(w+v~Zu$oVo(SnXA4z3oC}NI#Q1n5(uXI7e
zxLT@8hNP+eit1>^DD)YGH%OS`g_lZeHwd?U=Q<_--Q)rNnQ%u?(?zWy=Y<<F#Ty!Y
z#!MTvWhb-<XOnzhlfpaM^lzeKb3fEI#5#uGo(?IGV}7sx42E6a@GOK(c2Sun2?(MS
zzG)l6u%-a_sIPT8|6X^|YHk__-RTi{bxN!RRFD+a|M-Vi*1+@uK#cfn1pg5Um-Sn3
zvUmTCNpc4tUK0Tvy-l0-$AC;eP?IOjGv|Oo`%lXGOZ}IBafuG0oyFZ_Kv}Jypq&2V
z?zsX-kr9aH#l3>0B%@7XRPJl=e@zV@C^vGx<ccl<NxTzA#UavuP~2;B;bmq&#DKnk
z3`qa_M4_B=PSmrnrq5Dws~`25pvhV&E6MR5VnV3`kP81~{Fll1NVDMxpoLsDK<s}{
zdf;`9C(HqV&9!Puu2Y0;iPnW=T9@Omo`VGN3Q4XVP_{|0=AmqX+J{Xw@oc`$Ozyiy
zB;-=x{j?JrLY;kjf!d0RUmTt%rN%~Toph&THTSX_YE9UmdsX`Xu=n2aaK7vMDAA$>
zk!TSnS`aOIgqVnE5fMp5H$g;;=q(vDLJ&O&qDS=Ty&Ihny_aaCGt3ZUn3D5mtzFjo
zuJ7Jwud~lSztjHmdEXD-^1RRU-1l`~_jPTaj$cJ;v;>fNCs1p~h6UUYW5z*!;RCjs
zW0`ez<C34N!^Uh=5Bl=UeM4xQ%E`T1q|%!Um;4;tEjV!$cyoe8EzUI0)Xv1P(^U6S
z&(kp!=SuKnYnFb>b>;<=ib}F<p&PJW-Df^r%a9NiNf?lna(f^^oN{{SHVqqY^=<BT
z8V|kke2yvZ%+wMa(d4=&qU44zkAK8J+gx1wv+Fx7e9`fyN|IGWdX67ePKs0K*uC;j
zZ@+mZW<}JH+>1f%kbZU4v0l1Tp>ObN!d1qITLvFLXpXkr!1deWrDky(x6#p&%5t7a
zrVCwps?;gVD3yh6qGfrFy>h+om+RFLXYTn{EZ1OSI2;Zk(^z8yFQ#%Ouf16G4gy^o
zOlbnE&f9E=7#WaHpAre-3^V@d+t<5BpAc=Q))(tIi=Sk4(i;v(*=mlW7&8kNRqY*$
z%lc&tFI%>WR#h^~e9(}YEbS3+*QmVHHqvTiqe#i4O6f_BxDI~4=ntlcUBl^h!31#e
z*%>Uljw`pE^{N66>@Q7@(!^M(QC{l{6^NLzcX__-Ro0rdblj;6U-sU$$2{3gxBfwp
zpR8_Xf8e%m<*$7{O7%nDOI;UJ8fA&smuU?es^~PgUM6Eh*s^7PJ-MDEt|-iW@4eb_
znW9puOq<LVB8`jdNtdW3mP7f`+jEa%!Hf{xi@nn~*V&&ZO7-&<y+eB4CbPbQ%yB``
zz@I|`ZXS0q(fL&NTmlmW7OqV?y*w&JIhf)LBuXSp&I?J4e|M+8#^K-iV3NX>GFDFn
zQ`>5y(3vsk-U?&I@ZX!g+rnC(u(zNfT<falF8W@OKkh=A`L@bIKwf~|$DHS=X)fEv
z)2GduJ0suZL-OYwM9$NpoTTqxOusEi+g5Bsz>N^7mKNV4YD$AVz`+e}o3HgEp;CUV
zYw<1nyt$Ph1}iN;Eth4w(U7^QypTK641k^6Y@xx0A|*rFZnot3zCmj9z6p0s>M*%|
zg;F?JPQ7jQpzA#i#pO8H6hG>u9R+qa?1$>E@j?deh{iGl&KsOUQPoBc4aNv3VT(J?
zPpGf^hOhg#ez<4rb(S(p=@M>=M8_)dfz%J<OJ)<^^3JWONM%-^f6lCVM-*raeTjH$
zL5<Tvo08Z{JEm>Nnw&m6B-Cm`6dubLxQdSlC}t<S&BUaAlD}cFdc~kGDeY-#Zo3=!
z!DIcg)t0#XQQJaIL26CB<9+XcaK<T;VFe!VOTPTMV=U=Oi>$7F_sZAi+54=Cr1xZg
zj)&pc6rB1J=&cF^da<iCWb*N>PwicdM4EC#cA~qkUw(L~t4m@nOJ!5P)LP(WFEU;m
z6(U|+X!peZ&9aVOq>0T?sHcqJ6jmbKmi5Q#@uQoc(<TSUR~}zt2>xJ6jY3G}p?GTP
zGwyFSlq43-hX3F>;M#G!7y2y5=6+O`TdcH5;_P5_&Hz<Qo*z|qt!YzsvO#3_G`Z8;
z_u^IOQ$6+}sfaU0sHSsxi){-KTJN1FD1^j{aSqNI7Z}<9eyw7D)?7@-`y%sz<OfD`
zHhKQFelm)D3rMF5Ihs8H>W!Qmn%!u1cRU4!RTriVfV+l!#Jif)BG!}(wUcMnTwjzs
zwKFS~{iuq2j7lqgwyv^;o53UuEiEK{h74Hq<en~6TV_}3x`}^3ztG5Auc$d+j@t5P
zIu3fnT8v8j`NB~IS$EqjAu`baHS$WjZ|OkB+=b=E_DH>UI2BYk)7~p_@)*XCUJrHj
zDjK7$65*?hRdU%KXt_!$uPs;^`@k`uSm-gw^cJWWRl$Q7seaB7<QxHKr<pBx08q-R
zZWo-56Rv%@l$6U{<lrBPbV17AES6vBHS1Y0fi-7tk7_f<=g(O*2^1E|2n5kBF}G>A
zadHk7o5f-WIWKYMMRM~?1B>Wg>wwh@?a>zTC*~_)2Ap*VyUKYCXm<^5-POX3F$|Ft
z1^)~lvkew)mTj49bgYr7PkOEvI|}_pCdtC6dgL4mVJcD)Ce1&EF=NhkW}u_t?G)$s
zvaIT9^Lv~M@A1{fe=I!PIT%|;b8ao@nc8K#!d(^X{YbF>*pZ{w(K(52(SBq13M2F9
zku|Y-4B3GJXO)jw-CAH1h!=o*<bnf)Y44{F5ZN+L6BHW~);k`IZOqt+#CHbSMyb2q
z_C(r0u1<PWviRBY$>O7cFyXKNw!4@1!pUGeBznP>eMyq3o$Ee9HU*4kJ;gv`K-4tY
zV*(rfiVXoGZJ&I8s5agv&lE%eYRcb9RNMD!RC0-h2nXj`>!qIPmrWvQ_|k(MB#O^e
zuHMgNmaDPXMAIslIYA%%aRjyT-D#kXIrH|5lQ$$DDm<V~GE(RWgw~UATo-Ig--0)O
zs0)PiVMhVzo<?yIAiY^=43s5Uoxn?ZkSg#yJ|>JI)@sUQ6Kyo8^26%uXT`z(6gEmr
z_{QgMj>BQt$cUB%R1vQXZ@YuA1<fiv4(#CJnUGLcT5(L6VT>>8PB=7^msW3jf1oC^
z6T~RMpBB)9EFCJH4JXke(c#F6LZ7&{4{AV3uLnEERVNoIf1WwrMVe~d_2Q32x&?<7
z1O6-N22K4WT0GtuH`MjIr5rB`8_uu?U2M8|e6QB~N+P1BHjc3Z;rnG?zC}vR6e1WX
zLvDR^FHAW1%0HQL{@dXE%isM!7K-oL1+Z-qNn$p5C+(P&D!zk-AvnO+r6<~O6faOb
z*hl<ac7{|QO{pzkJO+ZV>MJLC*j!>}E-^)!CL1@n*pY$$s8i^Gwij{4w%JtY1^cR<
z1dF_F3wiY;Yu}ciLq)uhG>u><&|13|6AXyy)x-<hRiPW{`+ChG22mVwoW69LET1p$
ziFJ_K=DGrDL)!zyY6MP*)T0a3d$c7*f4UiEB`X9!XVJWJt*?Xk?NR{KLwoWK2wjV~
z4-L}roh1g*k)|)FE3GM|NS6KK!Pk=E?~wtq!kz*=oy=NSGPFwjhh`cE7nhDvW0*Hf
z-XY{w=#t_*(;csxuA~WxGjobFb>;e`c<I!b4`EE|J$gu{Fvw-N-D0GLAWjso<P*h2
z;qzvh5bnGp{#uJ|ecd#PGwb@CMxTp5@&kjiO_<^#*=&&+(@3A_;bI${xkUj=i91>@
zeG7;z>Vcw_6_J~J8i7SAdX*NY^Cte#lxT6U8!v;vvJv!IY)^W>B6wiZq>)PsD5vGp
z05J8dpV(TAWH420;Nwu00VlUt5}k>;>X2Iz`nRQ+ts*q%1=hpgo(e*wU^8H5f+ac+
z`w9b(ZlcJC+w9Cju<)4@Xnx)S0gv~2PMq(>q;~Hjq*HDADJdW#P_q#10Pzz+B7<uW
zCIUFF=!~wuJsRNK(G4EuOJwA=CV$U%HlmkYPb|u;<VUZcA$AU95s5@h>vS&2p?8Cw
zB+V;__luK)C#B3B<lU~Wk=V$VL#$)8%TyY0rbXKp{5YFs5c7_VU)IA3**ElsNnT%_
zlizswaygJ?iD?7W|5m!aopn=4=&3?m#*C;MF0E9<wrUtEnKQ6j=p=IwJKV#-TjrG{
zXrnuQW6-q=y1ay-hRrT=HOV<)qTu0D1eG|G2#d{As+L67uIA>VPFcRSRkC`!laB0=
zb;_7N8W-?AyV6kbi|D8YMO?=hALKH7r*Rgr?K@LytE6~)+McPne$6W6VVGk4mlrH&
zx~^Nl6s4lQqTYySg*srOyC9T4(H-!MM`!n&-Yn_9*wLA|*Iu*N%iWY@B0MVi@ZLA(
zc3u_z`Nj%u7nQ*;EfOOt;!ssg4=S%1Dvh2j#=_6wnAD?mEyiau@egu5WG;mVKjuRj
zvj%gk9!xt~CX`a3GrFB;`&SZs^ui%jB+&M(UwX#p?SszbA4k<qA?g(xN&I2-&y5DY
zS5%bnSo7sSuCsajWeJu$0*RG|S`mxjw$4%5MY8IF2a*x&7Gh&hAxs0FmtQ~A2^A0)
ztn*;(zVVdS#HcpmnJQ!U??j^7c@~gBH8<x%!b4%gUC3K>NQ2tiiJ$MQdQqsQC<CaN
zNaH7L$#B~W{CcL-qRgXpilg+flMQSX?iX1$fdZ$g&>3C9ZL*Eka*{1njmI0TjaJ^x
ziwL4CLUp@jGisG4Ucs5-{Q!k0QOQpHet69=uG|{`2xl+byEfKIem{63$$I_;KkY&1
z_p`0>p^V(EM3FBo0%LG{bJG>gUBgo7YjlaM9*eQv)ZTV^D9fCF#nY$4l{AKTT5nR%
z%a=Xvr?5~%&mdlA%)VJx2|}7HtlTQ?XqMQ}J>NZS)aLE?bz!b%M#9#^s4Ls;UIeS9
zBAv=}r)l7wrB21oKwqGNTk8g3lDG0ZZcv@vEq>#qh-aqsFJf=_v%LBS?aSkGjW|pf
z1?Vg@6iBA7q4%?fU7&Xemi<v=5BS^>t?Y8)d;Ys0^)|XFs_Zh2dITHO=EyumKA5^I
zbgR&!6;LAiq#r6^bV3ToiVJ*s3&s;vs(jnSR8yx>!~E7H`3u9oh-i6m>bahN@Pcd!
z&7K@|K&TVJ?wC>i5==`_xDnm~NVObpzs5-Q2q$bGsB*{GxgNGJd+U5V83!eD2+a~W
zEdl9ioPur{W}zXORB8K)3Y)j8`68}Y+C=XfI94jEBTA+VT94=Ia$ng2zXw$@evtup
z{($)w(~Gc3#O1P8mPHPF$#*{OxT;E{6#ar`R#spAIvoK5!1(@vXfKA;2f=cW!GB^L
zfr9CuVw@Gl=)`k>ttbKRoPRG<_|G2yf2N{Dvw|R;&m!G*4$HuD*8%~R_`kefN}v}p
zhHE8{7y>W?f9e>bsJ!%X`ZDtX96bEtzZ!~^c7rGZNYWPS3+Rap!+(xgLMkT!en`g2
z{rzb0E)^3gSxod2o}B`l49K471EBZqf9>=>|K_nC<euO4bi2P&GtImJ+~qF<vdv&X
zQg%<}$~=5p>K0<H(hxc!EzSzz`0D1l5mrZ(da}Chpz+8`?YVw)bHU}yn8~EWHxFt4
zXKWS0ARx%BwBU(7#KO=aVb5s3j}Wvks)sy@+Ma~gz!xKF#>0ppv;_hbM&Qo$$z{28
z%c^}6qqV8tsdJ@=cmx4l4BE5^EDJY?ku6u|+CWL}SshM=cVDB}6wdcc{USr0QGEh_
zwtuNK3h}%&_%tu6#q`__dpCxI;@n0PVepekEi7NNa)rgA2C&2Zos@v+YH28l5+<_!
ziwxuhyGfA8TusR~w@<%p&LgUOXHqA>WEFpZOj@rz%Wj2Fofql1oS11Ad^!pAkKF1U
zIbiC5_`}cdsYrBSc96&KJy7nQv!1CnHw}X0UZRb*Vg}MsmrHAmLk(kSlC%9+_k>;z
z?kvOGx!qfTk#Uej30mXM;&~OApR-RAK6F9`-r1x3FPt=flrX9jtx7*B%@ocq$YxWy
zhskJj4r{RO;TnnpQCFw2(KkEm){Y)=ale{1=CcV3Jwzr<7p25MRo2jA3gB#=tC?y8
z5ClN80OQ>SVr}wUu~Sxmv*IMtJ!u=p>R;T2kYJIGWVO6~Ml-?tnS#>usR@rq*n7wH
zpj)JY#j~D6fhtUsP2kc6Q;T_rhBeaHVh6J?nqCjTvTSFU%?)vsI4|q%ZbB87m_p%9
zEmsDdz5=ucW=|sIkxh*_zR~>NjjShPn^M<a3S6fYX`|r-IIzjNRrG+up7qiO3cjSv
zGqs%ePDNl-t^KB#_^zkYfa1oalUR(GI!i}aRk}&KLM>Xj_tBF#62)k^**F52WakJD
z3fT4z^<fFw0w-);vi5pR>P^NcSGgK5%%xZV6ucNO8~E{LzYBcUa{!JFN9%RpkQDzi
zN|4N5oEe~C$KS0mFTD1V+{?+lL-8fv(0m!pM0h+#;=?tXU~EI5O+ZERiK%uCP#3XB
zMq-ZmAG_=l=ARDC>&P%^h1=r>!s6{cL-1-QVC0Y)Z3BkOh!AJ-A^@W(er{Y6{{Ew?
zbl=Q$(G0K4jVx>?POSv$j#K<fP(Y*Bs)P12<%+`PMqF9mUo|#WVoec{pkW1WI8AmR
zmTA3#n6+MJtjE+YcUsU5ONtl0Vt-W8WdACB_xzj+mcpuB>@{QHLIt3u&yNc!g5F0@
zb;I8!x1?FGfSIPvx5kXOG5017u~TA2<=+Hb1f#8Ax!iXCB-9_-){^ukME836fpY+f
z9%@vJLt0|8Ry0F*<qALjd@~@e<EZ5BNL7}+Y!j^X<l5lK*8+*L`+LS^Di3AUx<HHw
z2f$Ec>(WL?<01zRsBLtpT!}iEbb7FHKh4wV<-S5@q|=uer{~Wjv4v8Q#zSf`Bfmg(
z$>+t-$`A57C1(ochWtrvIGv>*8lvKwt)p9s$p&lVFRdL1WS=R2;p2p&hUzl=5(Fql
zOMDuadLm(p1W+AXuSW?bsfS^13dXkMJ3?Lf%aVT>_9w`A%+2a{8wjlu=TID$O+>Id
zV=UtnX!Wua1-1ineyb*31_OCF6#Vl9X}|Z<1D{pkaGkebAf|VR^4t}UADq0jz2_qH
z$^jh`!y<UceRw+xZw8uypx*<b&MmBS3JPQgm@Y$-j=g-}s6d)iu|0BoA0IzdwO%WY
zCdp%Zf}4ENh4;`*?~9!_COoH$kHpWf7{8L-z1kWJ^&jmz*C3XXO<(&a7uia)ZIVY!
z%lk!U2Y2*KkgWB!(43W*Oj&#NV{N}1)mT55YMWfID{cy8o!5f@b+=_bVZ~EK;{f>H
zIeJ!SMH$i*(5+J}C9wO*T<s-*YE;!2vz`{LES{h70k<blg?sXc>$3E%pIWEF_Gabd
zPfkTj2PAR)A;J6EIAhF(m6CiJ-<5slv&Oyasy6JO%-;AWK4Qw3TYC{ZDjd2C>jFA;
z|3y*+1~AInv{4c*)UZ(yCBk8!_I?8Uj*kcG-u;+$^_RU~EoLIxPPyqq-R<lF^O1;}
z3ET{T?dOE?qa|A_KR)L3iFT-=UMjX#yuwo_sG$jEx_94>826=ISx&4H=YB8U+g>1c
zl=_90jtobeK-@QhBe~l9s{aHF<Ij2?{@2Pr{|B;SXa2+UqDh$2cU5o@PQd~#^ge#^
z9!4-|GR3RBrbqrbMpin=^4Qcmm8U0}Y_4ovOwRO1HMhjNXutN9hx?r!fmbbG`?UDy
zFV#W<U}rIiE@zxzVwW9>mGC5X1HfL%p?YrnIot0eP|pTuY}GkucT(p~qY>ZB&&|dE
zBJ=b(2fvGX8fv`)IR{ZI*fL2Kadq^3wW9SRShb#;+l}sn+u7_-vtTX&y!!{30M~}+
zhWF_JUByhN8$jTlNB^3bQPKyZ{Yz{!9(Wi?q5}X20oyh(_NG1JRNEH<_sQ!@=R|7+
zd9T3Px4lkP;;n+#>+3&gUV1z9cs#vNQOKXRR0Xh{l57Q6F$IY$b79!HLaXLPr}vR;
zV;rENC`G<UMienMPmx;|G&z9p4#H+fT;21Q>7C}OwQ$=6IqrMgPjn;ep!u5|wqIM;
z_!TcF-}W`!4H8`JE}hj|L7*&cox}le8LcnFvhZ8RCM&{Jb-oub1aZ_vetNdNdgXGV
zZCeW?Z0OW53}H7EvB=m2M-H*#lsdDFQ=XLR3hA56?yxDy=nBo=Q<hw>2q?OjO18mC
z7K4}ivY9Ly{Ik%BzN3ci)FS11x*1zdxd8LsK-?o)@+WV|sC=BeAv13P!+6m-UH2~R
zg_#{J`J+X{F<VC++z2hN@<gxA>e9`c`LsGA+4|cDfZlK?h>36u2}pgn<5v_Sj!>o@
z-r!kzw%M9es`2OTPqd(wFF}^`g;%>RIL`R8uEQ_Li7Nr)5s(U%PJGl52KWyLwsGCx
z;qc8(e58eZ)M@kMy@CEW0rCMqNpvCh3C<p!(dmNgYd<!@FuiwLN#H0wDt_SjO&=&L
zo837V^TJF538YGAyA_9e5g;hVvSb(-{jikEhrZ{g$LrkiLL86XMi7qwWkYyYH}V`;
zq42eBRc@I8RSupLJRG;LfezUnS~2ItS`zbCGT9RI=H51FuO+D4&VCNKqrnk3z2v4v
z(*w5y(>uUuf!nrbqT6x#=9IrRD-?`u2sYafhSwU)mdoq7Mcq5<x%}*zVU-UTmI-%$
z32|X-L9+2uZJ6h|OX`Vwq*=t9{9>M4Im&et){mG=))+a?Xzw<{K4UiCHc4S5+b`w4
zRkpFmqPUF0ztxAl(J*%7VD42Dc}f<*M5+Hwg^rlpBw30pB+)MBbu9ms(aznx$(ZTN
zk}rm>iD?t)wEY}O9v^-DkSMx?09Lmi0BlCINuKpdX-|yN$-NrycmZ_{@gjDCRon8T
z2rmyGUD+VDG%bf*F}ieFD44`TP^`k4XU#7-oC<zOztLU5H~(#QaJQS~4&SGbQU>Nt
zk~Fs*H<$pUW)-+C(fVn_xU72-&Z7UJV&j;spK5UxJ|g%j6FvX3(PdT*{kQ&Py<$w)
ziSd$1ToT@b;DMW1*1X_z$I?Dq)s?xE9k5WK<K*Xk@A{s8kNfV|`Bz;gn2EKF-F`d}
zBehRVv>V~LR-%GDc`}tBvNLa++*f%cgHn8ixto2R!)?#lt@sKau{_lgO$`-B$0xoe
zP~>Xn*kQ@uuiTIQK4003O7OPsmtoOcgQUZ2<qMY<XIVB{Gb)`sQVK!LxPGHa>w3p<
z`&ieknXU`IhxbQn$-41`3b(#`I@A8XH{`pBGI;ROQ=D-j)Ec7<vp`_14Vy4jeHZ7}
z)WnL)G_oVRZ+-s8^WaSHbsEJ3z3F|@10?nn&~;J-KYi>8$s%!~N%6~)j9W0);V*cV
zlq{1r8%G}Z3VNST*AhLmYi})_m-ODk$Z_t-MRvf4tjUmho22kjufR8slh2oRrcIrH
z^(3v1<}&g6E=?~4$U0VuzsPL&PmjhCKg}kpfxSx(p!}F1fJ>HS0L<NLKZBLhfOHy6
zNaqke!DN!Z4v?D;Y1_JHr0j-zFV+?q5=|+ub{_S#<vJ9Ew(XIm)>)F|>5^@@U#afW
z?<^8@-0p_BK6Lp&xC}%GSYIkj`W2D}7pO4;_P!EH%gv^Gb>l6ah3g%*;*H&7g$h5O
zyX806-Vkr?MG+M795{-uS=Q)Kz?!>p<!VljgR2;J>bg&uLX;6uUQ;h!zZuxJ-m*Fl
z6<sDec1ZhxEy}|UMulqGDouaz^<N$#qvqC{eQld(0jNuW2^Z#;^y?Z!bjG2Y*_+<E
z8j0$a6RXj>SJh|XUVVca^e)f&*RQNTqa0>!GDkmd%OHnaIfnwc2V>rwLc6ie(=@{4
zMP1g8jH<clBWDIzp@ciZW|EH{JaB9vzC)~zsqnz3iib$?Ll&bjCLeN@i@N9sLwaxY
z&JN`D=hEA4#HWepJjxcUkkO8*`(PYe3=W1fHYuW0T4xPAhRzlhX4`$^GYwt*{y{8O
zHk-Y5==%oG_%2Ncykd%w4n_f#0f-6wFrh+)TVvLnz{(G3oX5P9)1x!3E;+b3$R;k#
z?N3}$F1T@d+2|)Lb}>+e&Ii;AXI3`FNOp|OU#&#iMjr7CG_H7sDADnPt}5_^H2dy%
zFV64-OR}H09>zKJOY^9*dY;ph@t@&u?Y%IWD~cRhh2+EXKelf?JrnO=jW_c-e29ai
z={t{SDYS`*B2Av?>>diLbRCp-SCiG{z{K0AD-@}`Z8S045LyDcB{2W<f>pTfyDmvG
zZDFyId=hP9<)05<%uV-S-Ej7j!}LTbbAK@-c;STba+}2}d6_<JN{j5<BHq{L>zLWX
z+BU4KXA)MPcCcvAD~LW%CZ5d&;+@B2zs(eI!c2q=&~DBzGRq$TdalP-jaU6EMHqCV
z0-n(!bu>7W(#0LGEz-Ko6Z3(T_JierGp-{4V+X8h{NJ_6$_{fs1xkNcfWFq|OrR5i
z%JJ!gKODat7U9~<$PxdoBa}_^$>(poV9hfAzDKrj{yPAY-2T@f`2XskFi{}%y#!v>
zzyV}<%Q-mU;r1_YSR?x6e9JZXwAUiweN_z`Hnaf+UAWaACS3eqmv8{!d_6+0cTWcP
zOvo=XT{c$yEl7k4unm43#2u1afqsFlFYV=}<t|`*VC>d7{)XI1&MJG9_gL@sUunk*
zG^ETJK!X`ua?}6_3B$kLsT(3v{AL6Vm>=3+9`CI(fMp<`tnL_sa^XW8PgeIINylt6
z6?2}BYJbbeMni^&X?~3p3BRGJ|8s?j6o)u))_R!Qcb-Gk4oLZ%N09?Tt=;ad<GL3x
zq@7Cy7hunS=#%(x*h4ZDQMnPy)jvUCDp78E?5PiUnyB-{3^>K@3pK-L43`t5FOWE`
zNpwkhK1qlXs3<0P#wxzDhS&A=Qz6Q91^Vf%_|VOYqxFXC@rn*t%{cM=fcO09rN70&
z{)&egwzY6$_E8l0yFQ0qaZoXgV4i;y&GM~#0GLznfjbh7yth7MV{D@;VsL(6D)mg>
zE0vdf)PhR1m0Qjsa8?xQ3a+*X##TFK(obUc)3~L$Zm&knS>WY$$<%0-@t1a=ii+Qf
zM}v3E3mnrgVQ6}OSg+^~WG4s5m(U(@?&>3JEU9wWeMavXtE$G*-caQ17Uk+_Q6lD$
zBz-LQk>GdA&e=W+It@{dkr&#Jy_n6vc`}IIwc1;4=)a&>d8M0)sg>ig{-p*7LH?f_
zVK`ILCAF^P^f<Hvk}(}^>ciAMRujJ>Y)sD95Y&6Ubw{2vR(LNfhF*%^KNFkScUs#8
zSiXr@<C@W9%k2JpumShdA@>4D83EV(XnQu1zW9&K0V_|>RxqkQqSkIJb*KeNZG+fU
zc&7;>INxHRPW&(s<+>#$D#k^qR37auS7UbL5Q)`%LK$(!7n!{8v|#VF61xJvYG@#l
zbgumUn%j*V%C^nWF-(n!vMmNs;&0Ef63ZUX!ARs(*Cf;o$6K78(VE>7oG;rmTcsM5
zisf9&2wjj2$0lI_d$dan>bH`{imDNeRJ8{j#!}aR@XLdJo?4M9MR}DY<JB_sA&<9C
zPk#F&`=eGA(6}j^gGU&T!>@j^NW2j0mGNEptC>~5HGbD;A^BG1{GG5XP<zaJDG0@$
zZ+BWcqb#_@&7;fHMOE_cL8TbDCCRq?0O(`8tpu~Nsv}Ry&R8yv>>*tE>BHN3n~7R*
z&+7%aTCfgrE|`X78nM@{dg4!WCUe|*nHmprof-nMfE~brfT}<A0f`wCbhP?9RbNvk
zrzOMFEvzCUU-IJ1dj3?3ZuM8N0n!x&Mz0l2=GgEV>{vhYvT5`oua{N;`|J&J>>fvJ
zTQ8ZbMRBR)Qy#A2BhYz=66=alRBl6Bybfy^rL-cQqJXe6V|C7)p_(#i83DA>dav|^
zkF_q)w=~sF^3X~&6%<K01$2NI)^gOzp8Ma@?(*Y;TqX52oLi{>0-!ckOkq|KoEAxF
z2z#`uWMVIJU}Pf7*w4&3y*+TqQ0xt_A|c?*(KGui)YUolVcXn%Yy6(A!Y?4AF1^Z;
zKm^QKEfu%6{wzIF>NBwjc6J_@>hnU}$|~BchPEI@yF2#y{$PM$i^%GQX1d@X)zNf4
zseZf=MxWZ{oR5&Qiwktjtz5cj5ATpct#<kpM@w)1=;f!<qRb~tX<gA*Tf%VXR)Dpa
z9m;}c5A}2mmDoSxW_pmkZtvP+8eFR<uhGM4`;B1tE^W6nX-8IH-QnTiERMe!@#N@4
z!o`r4JJ6fNIH=)8w-fJ{Yjz$=uBor^rfjJ|eJD#){7vGn5PG8JBIz6OI88F{OmT1A
z^Ca>-&UVmN%R5$-l16FIz*}7*sqk7Z9lR!K8gv%?JWV9oT&3G%+%prT^ay!evG>!i
zDAU#=@VIUOw$XL*;oT3n18yM64W3y3mZ!ZzMt4H+d*Be=JNPmdfaPWW(hykM!-NwT
zB7AVB0NfM%z(2!Q5Y{`G4iddKkSlWmpf6TBn#fm|-5oszq638ROS6C`$TS2g++?BX
zk0)|}lg<BA4U7N1P16=YwGU{U0oFVVeDiA6Z>br++(<yy-vz!|Zrp$%1USQY-esK9
z7E7Lm_ZDQqr&2+uV*27?p(6lDl%)FiJ%l^sg@4fRHuJiwX%2)e0>F*_2aaj^4h^|!
znOok;3*{~eUYDTKW@dW4voUHciQd*T2C4$54{1`M*0^PK;}YndEvB^V*6T&}vPjc!
zk<(l$_3WUAns?lXT5c)^G!*MEg~h<aPGG0Sxj<}AV;Fcwa@oPhyTeJQ({sQ;WrRyQ
z>$%?-OYX0zD+%hxM)Iv%gHuQtEsotB*ou-#!#AT*=P}`Wb(H{z>;%^J#coJ`v*c>D
z^(PLB;ed)CH~qQ8?s9xhLo_r<gl73TYgY{`D_J_{Y$Zc=&Q%u|X;T8}K3z-?2VGc}
z|5vh$!7yofJ0Q6b(Cq-<nu1@fa@4ljVZ-0RqL!d#TkBFEWDZEm>(RJJaI}%$spxA3
zA_-VO;wSLt>THbG7){c}3<BE)QrH48An?xs1KksZ&IalvWf?!8{&dC@X?~H37GfL1
z#8m#7U%uS45={^R<HUrBtj$~`K#U!zENz3Af$y))61-Wwxp;iaL^3o)d0sS9j0Si>
zf4IGIcsror9?%XSF&#nd{{#_AN@ur)%iKW#I`GH4%LfouAaJL82j~pdX2BOX!6)XB
z>T&&N>)?y<KVA{w#nb7L=xG5FBxenvVlxasXh1mTe`)&;69!V(KU_5HAKwl@T~ZJH
zL;(KJKU`GvAD@_5@+F)`3I|NDf4J(O4T)KFG(kHbr_j1^1@kKAiy1B!uU=N)6nMXq
zv%Wd@?5ujg6$+Mv^VPSXTIux_xnu(`WY8NGy$qj2w8Cj^YKeKUYj&6NLOgwAZ^%{*
z4|w`2-&Jvnd3nA=b0VAZEjd}47=<6eiJ^RpcJAIk2j!ZQgMgGL7icEeYwOSh@gpi$
zf$J%SpFSRPH?3s}GcmShKg_JvUR$jEMYdard4ui7)k>nY@s9S#Y&XTL8a*KNKRN^(
zfQIk;c9WyaUn(LF&IjWY`s;<Y6OY-1zXS*bP#Pg4?|jWCH@N*5sm9OQALL;YupY=k
zETCYaMyg+gZ?!KVY<Dn$f6{VeUZuh!^jp)h;NKuvGxhxrX9@=u<uY8xn=Dnyu&PFD
zTc>>P1rIOO`itwT-DB$rO89wf$-=*^1tt7la2DvfB|k&eVS;H<TQ0=s%c}8Kj7}X_
zVL)$yY%9O3{Y5g@sU1B`PbY+eAUjqp2j|(5Kjn@mj(c6#b(_=Qvw8YF0`w@qC;j?4
zGB4?_%Og!PS7H^(fvz(KCQ9blfflNOP74etg@&y-E&^ao#fU@+v&QlHLT$y{+H>oD
z=B)w;6wff}fjnLRd8zx)i{<}mLy!k-DxHy7tZexi=fyOm*Hy#A)>KoURIOKYEhPQw
zZQg@bA6tVbPWY*3V#ZpQRe|U~nV7(5TDSYi_P;)7q5uE<{>zH?PtNaua>@N~ucBsl
z2()o@NO{k#n(8c{y|YrHw*&7Byn|M~9PO|bb*4!xc|gq^2*|ID>sy);s&VwdGAc0w
z)kkKc-LY}#kBhWWrxq@|mPs47L<N@Ttgf<l*v2U3W)z=1e3Bw^vB2N}jPk@^A>1Ms
z66hu3O70}^h>m5(;kk?+-mR$weG&j*HRD&iWFA+RSiV)GCl0&`7ZR}%nDOM}FtGJd
zw4t74R11&th|!SL{FMCDSG=(xxiTBOP{UC4{SP&JcUW$NP3*peSpiJJe^)L1dxzio
zBJhR3vjEQZ->zahAqO0LQA{L%QIiUfzdfpa@m6cmgD7JE8zQ)(-0}D3zN-Y_75t6)
zZ?VNX0BHQL@Xx?7%?cr-;_x@2BI6`6$K+esdg~mWm$7rxSM2ZTNq0UMEc=?zB~VV5
zDxqq5n`cqo*Sl>f0!D^ATso()qSs-r?a9rrZ^rCWY5M)MZ^H5U)lX{?A2{73EI(FM
zlpkFpBNzf3tSr1Y67!hUQ3D7N2^L}l5bVI#9|=0(hd%kO!h;}%IuJjX^nvNF{-w#0
z<u9S0A^(ab2*=dUE@h0X>`Z<YuDrFb$&Ff-AKXfPWe`6e6B7|83j%(l>#<)(J4s;7
zGt~D)Y@fHz`QsU%$Z7Q10W!~V198nqV^wcxk8~}zMY%+uNb4%^m8)N3EZXb5_I5rl
z>~J4vj-ZBb`V9km3HAUWE=)0&U%~=#DH%$t3e&vix@PY-IK^eV{}3cLs?vWuX49rX
zJz8w1rNov@&aTfM%!>9aKJpP#k5%M<*1OpOa6JRgDpGoWrp+%BckEK(M6V{e;6e;5
zM_0!M4vG`DS8gyIxX5d82V+-V>XpOb1c7vJ(hmTLh2S4e;cfd4NgW$K*ixYzhsvq@
zGZ`!0!W0f+Yfw!jg3?Dtix=Yz&@NbXJ`Audns>Z>PTzJ-&}N=m!*#SC@n;Y6T8|C)
zh-*OML5fL*PP2`2KZnV>l;Etj*XG5V=NyWCfjx2s`P(L5(UvL&g+J7-Z5)EN^U@}T
zvs5S2rh^_T#k$^Rw<{uw8je=~P~nECH3#aK*K>WTtBrXPcAHVU<Xs^{zzw;3L%@TM
zd!`^5w@#bWCX4g@r{rDDG>(}mA~^Y;N(9Cn`)><Vc>QbD5b&J`rjke+W`pRT3G|$*
z2f>C6kxT*bHJKKG`%-ohd}>!#<hX0bH0uCJ0ciYf;IEF-z2ztJv)%VIqx0_uvA3>s
zB*EX9(R#^GqG-Ta9WKxn;1zXZe~~2spwSZmGJ;wae49$YhWy#}IIf_8Q#<rrj0P5)
z?c&~JVmpAk4f-E)pCa@DAASLpsUDYtSIT&A0g6N{^Wtn2Co+8w1o8A#rmfj-^4GFg
zSD&fvKys@ek-CSTl-G^PaXeZd-BSp*%O;Q2TBR~J;yL?Fi+|K#<qq+qio6MF7(`Bx
z#9Y)WF3^1??s*q<F!ZqIjA9#iVA<hCxa(v~L}{1aOwIZ+mWNnL(8r7V<l2U}seqFt
z2#T&!O3L4~@th3q2{CFWz3?!*scP87)X1`^dz(t`7Jx9B^Z(rnNSMUQAk!XW4JMl`
z(Qj6S-W~PNDgA)$=nq$OgYG#tnie{)d=W;t=+J6Ey2IS%t2_OK=4WnLXgf;wYV<o5
zGTh@%l3W~el}8ypuo1+Inf%h5aEu9YuCy@PrToZydDq2Ue{fQe{`rd*H^9`Fa0#ag
zbgl$1R)Jgl3z}@QiVCoJa|Q>m(}t6tK068Ka(lY#3|tNYHw$=acGg%^vdZf9KB@2#
z1V)s-@zRSpn$~on3i3`ryA1CYmsbU{Nz@y+hF5nsT3WB&6YBMf{ABYfVB2NE6)#pA
zLAr`N?gCc{b>6zPuil)ZJ4#T@or82fs}B>kOF-%i%72y`@WCUU3R}U#TM&2sr(1=&
zDv#H}?~X;0(NUm_Equ8G<s(<C(UF0x8(yghJ4CDHL3GtnS-g^IKPRWzlq6nn>(W$^
zvOd85#|ENv;@9)wOLO0foOg==%|6pKna2^Gt34cogrqYS1+6EcgG<#vnc5K{USN9V
z3${yhT=kxy%6@lOIe-tlQzF!qt`qv)JucQlZ9Fc5VJdTUwdrM-6H|=J1p-qOs3N|n
z!})@5jAOd|m~+XI#wU&!(GMr}b6iDuGoWJI@9NwS++(_+CqkWY&Tse|rMqPd^i%L<
zTW%rJdAQ(9^OuxB8QGj`_%8T`JmDmzOiPjLf!@~9#WZoTj1O)Z3O{__!e1<gK+ZMM
z+J1Jh5aoH1vW$&;@UHItiQ@&cKE1xML24j}7Wco+3dep;G}4=y>lwCVyRhwj++bGY
zIyXmcpMiR3S~H>k;-y$n*xO!87e>d;OV{RQ?Yw|FzpN1Q939fWc-H5s9aYh+MvSGS
zfZ7*rhO<$Mt7)yP>DsG@7D^Hoi`7Ey@b?Q8i5(54fw@-?)zu2y`NVJN4uAg|(e;5P
zqSZAp=*131UthN;?{@h~XbJQVrll5tYmU@6sd8>I3zv2^#d*14a%G?^pzBQb1&fsZ
zWVW}<Pi<W0(|^dE@-FEys4YcP;Jn+s_Se-v+fOFHRuFQVZ7-)7QVuF93Hx+swoD61
z|A+ss{9v{i0X5D9cB<r_NT{xhxJNPLsN-A#ql(ezJ1BpxZ>_D|gH#^zC%iz|BaF*h
z0da1toF_)WSV7f3Sv|F0L-}9iDZRSyn|AOspbTT~k3NT%cXJSmji$b4vR|=AEPb+I
zoFqvQ9^f+Yjy~&WBnX;sR8q1-RyCsf#k;@%B4f~h|17e6_~u42Y<wya%8Glu3=oHb
zWUxsFo!3Dj&vUs_2QFjxG~L3&ewvX&S5!7~ue({J#inKb+;3bt;P=XmxC~yZ$~aJM
zVgeWvK$1Et8b*x%M}#Nt64(q1Kw5k)(Ezjp5HL^09bcFI!<kRN0V3u<fLZ^H#KZp$
zBppiM!k6-RgqzSkyAWh$-n?9K_g4?1^39fRsd)>vjVmOyk6M=GvG%U${#ODp3;{sN
z^;`Nj7|S^Z{wbds2~vf1@X%D_{O|$<$>HMSg_%7MJ2{aAGfsxnbY}r^e{h%;netcv
zycmelR1677m=9D*06%5A8c5`~E*tE8qyU!Vw|_<Z|D6T<A030j|3o-~@QfIRSQ7+*
z(h#CM;L|e})C?P%GG}`CcwY3dgGA?4xZNPMyJ7E_Wie#wCbuKVGS@qDgPJQMvRAcj
zACh@=(gu9B0G<+O4?JBNF`5)wSIpB1a)aQ;Iu>YgaHa@?Q^D1=oEek3Z=T+YRt4)X
zZx7xUN_<9p{H;_4^NS3nj;2_KPA0S{Vu};eT>UP}be&}}yKh;?8Kx0M88gChIrtg1
z;-F|BK$Sm{E(IdPX;D4k^9z4njh7=(;qLslL4Q{&=S*+3W7u+iM3$&&*f{=Kr$6P}
zzId$d;fr|VzCx|Lj;U9+2DQmfQ`*x-(Hl`v#XMYN`^|SICDC6yuh!JRTN6+_VOH(6
zxguS5t(95bCoc-_2%qU+M!e5MCq@ulZLJGuzpd%MD!A2g2Ty_e(8l%d2DnobENStZ
zV~@MqX}G|H9_nHW9yMzo%vc7qn$4M<L^gl4?uT9$Zj+*JHQ)-rd`8!2zm4-}^fb}{
z19^*ZfK$J!z-Z)%sv&vv!gshfZBC$#1>`X?phTk@8}FK<4S-m`Ra1<aTG|U;O3@3w
z6cYY(dHKy&SJlmrk3&)ERvaf*TD4>y)@PXRoK?m^E|I!Hz%T?WTcZzkMpYznd$Vwm
z<E5&a;n|^s`zyX|N_H{&ca}rxnupv|lTT~4(TS}@+ohk_MJCxadVRD_pZTImPK5m7
z*7{PC!naHLb{*L%iBS(D1G!vORDmz}B>{})DZndjr3;TYDdhgxx$bZrtLZ0~Ds9f+
z!S`wA#d8TdX88|p*QNCGK`sO>+zdKpnZX6zS;}g($>c$=#@dNM#2YF;5AxRPlL~0l
z8l-l+eFH)Vw`U3fbCtY$*}~ydvS{2`sUEB2etB~2omGcR*!qn$|7&hfm~RX!NI(~n
z-Oc!G07<|r2^k%p%#%^zGxYeqsgXv5H#Q+h*;T=4`7CZGF4WY%Ti=K{c7jI^y+36u
z#XlttEuM#Rp{I;-?#^Ew!t-dWMbFCijDKl}rFZ*wk-<2gWi{B#ttcu}LxGx$Vykp6
znxvz!3sj%2a{zhT=@H;F2gF!nWtKAV+_H;Afu<}h5;z{fOTnkNoPUw!c*(;5(Bvg@
z9jv*t3II%a?Avh|1yre8g{fBO7a14s>z3>f#tEdo!G_nv16Imc(?w?@4tD(mWFSU{
z>u^+*CPB`2w^J@2Gy|Vg;8E>+{)6~sh%ERr8R-F|wlaz=%9KJA@Snc*i|l<>p&m0(
zRnJ=)u<eesTCo?C)4Mh~3U3OS`yQt8UEo6mU1_$%b<yob|BQ1aUZ^O#c>f*27wJoI
zXKe-^z-Io32HQH<xxxyORe`D!F7NN(eOf7n=~~v*X~|Ev0Fdrz@~GIh191^iBe^Hp
zGJq7ttsJiTLaPptCx)&{nq@19J3EJtB-$B;nI=)pYh=GKw3O39vO=nU?xmeeR{gD@
z*FS1e{b<<wx+r`%`a;)wL+**JXpZ{4Yl4ywMSUjudPa>V%G46w*u_JOwhe5u=1D9v
z@ri21MJ>o`^Kgrd*tlNzuwormx2%j+nwmi{0zMWpxLT(--sri`r!7~)A9&~_)L1P%
z)}p=L>aVc`NO!i-6A=XQ<rpAsnEf;=Mw%(|Y3~!=A`O93JTHBUdE`BpbWyU8f`ts3
z<7bSiL8{?s(d<1WX`FMGu^szl^2>_5FU64W{j#I4adPt+{3uVooEpR_6fsOy3W7}l
zOHOSEHxy^u-J)1tKJg(wb`~tZwfu}$RAFXRPoxfC;dJZ9s_iUPZYBC0u3w`T!~U*X
z$H`i!k?Tu+Rs4(4p2|#q{<>|YDWvo61jZKo3g?WD9n!(TJN)2X>AX`b3O6USz#*|G
zlF#eJ@ONYD5BUr}<O_vxTB%rDbm6Jy>o7gt2s$6N`xDSxOHPlrxgXyNJN;1%>z-0^
zxL3eA>qhf2%aOZ~C1B>t$4?*=pB`*4F{z1S<xMm-F0UuS>$EvFngQL#+BMoCW~S`+
z&G}+XRsof<aUeC7Rxe+};$<ilh!P}&ZC7l1!ge)6p*&pl(>B^G>6={wF<o2|YZ7;z
z4l?dAiXQ3#ymx{Eo@WL^;3F2c@T7?xrR)Txi^_)s?6!!<{>lZmbl(l>(*j-T7Mmg&
zSK#W^(X`Mj<1J+7Gd`l)Bl}Q(r`&}_du=BM8DmX;P1yiP7jkvKy$!bmdIiO+?^;j#
zvDb+u%A&~9U<)oZ5R-GySrF96oh6(io0hHWK31Wyp7fXaSRUq^d2UU%e8v^vwFpc&
zA@_69or~-yo5z)(W1Q~vyL+z1`LEM^#2VajqhKXlT0*-X3=uBmwWgnKAtn<<RvH(X
z?58fxD<lUB5)wW%kAB))J^R!ur%iv1CTe0GaMyNH*2Le0O^~FBnJRoZx{V$Xld`U@
z6T|DARQop)QM_6TNxaY#hf?+h!6yMZ5U<V67JpbMR36uZZbVXNNaUf%I%{4NuCr7W
zF@;(w_U!YzR9v|gE=-f(TdGq^jbmQ{`|~JOOptg94Cdm4^#Ym($=y^nb-q~)*6gk*
zO+)hO$elrL$_=@Fx!eB-AliRkvq=>57@Hg*`3{bL5*+|PUuBvwHWdA(YE14t-CBuY
zg5enawN$WSl-7mR_=Ao!bR}Si-{n0zV2>7x!SlnX`<(!;thq|uWxZ)Y+2zw;;>dr^
z^8e___5T`Q?Q6jWU$<ThMl|GB`<@F*Rf2xo6EOOs+j(uzn^(igU)ooI?nDWTAeIt1
z@ghFbT>ZW98UmzvFI;2fi{-lB)w37wlh4;|PcyW#z2JRfD+pI?;>2^3#^E*Fz;etA
zciabU1Go`3Q!Gw(ImIr5rOPp1>TL1+30qFE%R!e|mp;SW@K;Y{qbK~Vu@%HmvYcHd
z1sF&>=Kkw1+t^rXG}Ct$4bcy(oZWApY7*qVchi1Xp*kQq>A+uPVWp|ELpXu`U1v-7
zM9<9a<Txk&7UYvO(`$<0dMSolnA9ef1H7XUY&CRx(HV?(@&hn2yYbgN$3Ry~8eRyf
zl%c0c3_t~M1t^FG4-(@Lo5eUsK(+HB9AmJQf!-dPt|SF<4H38?aLg=tc_O3SmPn_N
zfs+TKPh^$Dr9b&kU(+-35K_-+b<kk^SHdH7@ml<<9s^nz?kw=Ai!a{8!^$B3<A**~
z%FCB9OgK;5U1LC3G;)J<t`vbo0ZO5NcS*T(Oh7Dt0TA&P+U3TygTpc^Akjagxd>7i
z&(>&TbbL#8T))$Cii7|6xhJpWk2%CSiQ|p&2w(g%+7_wTQ5xNHrW_ZI?lI8S({4Hu
zFnfC|r`5o=rapqjDv3g3?Uw4(N4<SLC!@G(JQ#WwH-H%&!@3ZI2z<^GKTmWV(}nBC
zCH7v5$I8zMC75VL_r6b+Z8Yk`X{jz}6G_A*fTGEC1~5Zm5Le;C@%yz&V$<%BEJI66
z%?&JGn0*KjqKhd#`<bEg)e9}o6Cl|8$EG`&<e#v)%*SCDZU3FP1(pEYA*gVsE9``e
zOiTNL1jpR2!p*5u>dcMVLE0$2Yg{LfuQ0<5)d4x+d}1cp66hn{P{*0C4DtAI0pb2T
z(azV@DW3Vpi?bH<U6<WWMLtn$eC*D|emc0w%oTM9`HooN0lAC|!D4XgSwJHhi;rHX
z_a&<18Bc}ks3Rw=CUo=#t*YiC$MaR!MweB(Lr@rr6}OX5c#Vea;_-R}gYi-V4H7EY
zyqD(am@~KdLqipRIcDx{YVSxlL)dhz*0cnfB?D9TuCm_>APA4vjRbb%>j^rMR`~YQ
zD(?$Ak%wG{=|5~<USsGj57}BQ*mi;23{4+moiIJS#3eYpPidR{X;kUgt({u~9rNbY
zu^L|Acwf+|_>)7lNh3TQ!^-S<e%xU@jIBIccT_fmVZn5Eu9LBMCF%?r=Qp8xf!GJy
z0<n%O`!hXQg}5F-v8;&;DFPG64QV<(?Xtmh2WPF$wklkwWnuQpCv(j}=h9543lQT}
zSb%9v1%;zKmP><|MiRTQ5}f)A504YJEe6%Fp=ZClJ6RqI54n!Jfzf(H@4ZzNi78cn
z{07vwOxxUbKmWM9))|Vu&!JWn=oTL|a-Ei}fPzC9p$kNIOoVGd8^jijnlv}AtVvAT
z09N1a*puTp-!bLIrZ@^E?_G#GnOAA33RQ<QS_eCTt42quai@nOO>aPrb0xaN1bC;-
zuDyCm-k`0rDN-Xr=Ol8U8me`mA`4qwpv1wzqq5QOk{x%BcktgOvQ(yOA4hx^)-C$_
zK$>1c?@Xj5>5+F4d^!PfdRfjJ%TQg1Y01Jsu+dEt{TLcZf)bOIjUgMG*ow?yr3BcA
znM%k$qT~&o351ZnZv;pfi^G53uqXcxo3vm+_jF3msVzANLT#QUr$-jdIEc8v-0iEl
zR#ZYRY!J+;x)c@v6Hd6ExC_jey9ZOh$j&%}aYUe5*ain8v1y|G(0Xm)Irat`Y)<^C
zU*m(gZ;gSrNms1^&$L%Eo4Ri{0uWY1I5lPSEL<gtZ6i_+z8jwZs-?%Tz_+II<UUt_
zynJNublXMpwZ&Shpu`S14{T_W-J@Thsy2wA#uhI@MvK)Z)EM<ko!slW*vO<BtEXyF
zihsRrC_}hVj&sV}#d~+A!L`x50&(@Q5tpkk9<qL05zkvgdT9>$1M3sjIdq^;CDs7*
zB*uDZIJ;^bc`o8SLtm`hYmmr%gqt4sfUjU}Xbx^4uR$7o`dxjZByi~vsk4L^w3Pcv
zPiZ<h%8E?o6}t7{_9F^rCIIRVfbV$g6SyW|Y}?1uW)2zW<WM@QpYAuB2|VWHzcCN!
z!3_3(&PcS^Zx8T6OxtW=YC93ph)cE~^cb60taE3ziaLES!9Vp<KRKgZkjXjdvzb*2
zwq2PdrUPv+`?GA{_9EC!_Jf8Zi}>rvtsGNsB)y<bePtV;jep-2f4#H9eGgIw0>}@`
zti%K;zu4x=u?7A?q1*m(_M3wF?v1$AcK+4W^d^rp6uV!AzvQ3!3#RnnLHc!X*^iGc
z_$xLmk?x%XP>6h>)Av?k^Q-tad%(Z4L<TUj=>^)5-3_5LJ#xRhee-Y=iZ>^EwlWS+
zCKQ#%BeGuy?r6ZOV%;2WWljB*CI9t5<}VHgGRLK?#R}9$&*D36Bojq@M-Pci5}qpy
z5<6yQkj3_OtS<(!s6|1$=%kvGGq7C5d@+Q5bl~LFY3io3&GHC$dM(*<^6j%>n@oFV
z*Ajj*a%<AboN__lA*!7_;B2sVNR?a$DhT~*Bo_?|lW2-&*}8r%MqRLj;u<scc+_%8
z(glww*V?^`g6(#zTd5^j2^O$zehK?>X=gMI!3?buhhb_>?9aDr_|9ka{IYa<^yP4Z
zv_@-T!RmFs#;>Or_rJ;kVAH4n2weKl&l@=Q_uslX=%@BDUCGqcXx2D4K0x!boq|lG
z{F$%*+5$_Ax@5l2J(2+C3FKi2Q(WfC@jz}Io0b1{V~G;TTJQpZ0bh)KJB;GAZcoP1
zlV}UkkS++bvhfOJE(3YZcYzNRG1l{izW{RW2f|yfonnNJ+?gi(-Ve2cQs(h&3=@Hs
z9-C5&uK6G#u0lyr$oSsSaA$;A9eSk>d^o#fuN7jHIgewwm$7(DNPEnK`&z%+NO5ex
z-dJYT?CjaZm#%7b?;_7@0`w6wuQN>yulUoSn({054g*y4_rICjf8l$H{v)-mbo%r!
z+}~07H)=GP7y$rxS|uV<Sw}tXhy&5hUt~uc0CezWvbK=s4<`x&{~2uW&tCukGGFuG
zMe={dJ4?rI{^Lmi1a<rqU^9Ld4p4mvAK<}2X++!(0FgCWU$2q118aurf05Wvv+~br
z0O6QObgD~>?6?i=o0<{mh0H~tz=!6q1|K`+&FOewkF-jaIdLhJAJ(EoQt(f<5WZAD
z2MNF?dAKm_?|+d!hg`%tcFdYj78K6g*GP5QI8w&VH)f;R3_{7>Zc~1sA-i-m<bUZ^
zRu&KuI%n;$RZK5V;~X^uRu(+$a)a%PRt%%=KI1euV(z<rC4$Fsa`Q9O5RgB$rz@d#
z_j^o<wjBzS?%O;f%}rxg&A~dnu%!5`>))h<gg3~vq_Y$YO5iWKO}NkOw1e466u5fm
z!#325QIu@kSNp_+wU1)#iZfz*RdMfJsP81X#+_2Bi;|=Bb_LOnJs<`cjpI$4PEq#Z
zjLz4^YrFp&d*2<`)V8e)0wPVMOAU$?DWV91v|yo$h=|f5D$=E@6bT7}6sZCV8xew3
zkrL@rLq}0Tx)gx`BArAb1X6sHeeOBGZP{n<{q8;Q-FM&lLrG?mwX$Z`9CM6seB&De
zrj*a-Rz^aecMn-~>BV`{C+uW_w}rw<BnbH|K5uC>$B?89HzGPt1r(XGxxh*%oJL%2
zhn4C(k{c|<$E#zHslACWV3!NKcvuZ%<&@coFe8%8@UW##k{dC6+BZg>uaH>Qgb?e=
zD0$wmSj$tNc<#O|^Q-$8ML!4yg-I`=<1zFi0n^$*Jv9y`pveF~SwXZZBHn05aN}(<
zpJFN!Nk-R(Om7(7@^#~^85wPV9>hOUJavu{Qmklv8LAHTuwiK>Y$Z2ML<vZQ1WvI*
zv8(r<^xu+FoP%!;ez+Fw)^wta{l3`&H~N@2bq=e`8(0_*{5&PCob5*Ah|+BSjqF8L
zpPE6AlifE57d=!<f9dmLP4|=wdf&7N%eZKK`FI=y@mY6=2v=_%EN{fdd}M(ze}h~3
zU24Ds{he9~M-%dd*2RQ^wfz{Ey+|6+WfolP+!jycbG&=FD$FU_Lor^Lr&D2dx819j
z3~537EVnD~Le`bI8_BahnCs}M29~}nI83|I9jf#T{B^-1(?H&rWA9zejvTpwS7|(t
zH9)Ms6(0Jyzu^&)X&MsKpw_44!9}*j6;*{Sq1_Ffw!9K_`Y%@+KaXm&XQ_^m&302!
zk=n@?K;bTAY(za*4uQiaml>ySY94yGT7F!fY!h(fdeva>D1XLHt<rY!OXJ~;tP1Q!
zfg9S~?;tx66BFz}G6BXwR0A$9`^{HMwcX)0W2$w7#};g7o$jNLNEakYb+XXpF&kVC
z-Cr-?g3x$}aa|3!Rlx}l-L2Ec3ac&}SuwdQ70|lY1hhVcEmb>kYGh@|KfV$<b2%9C
zJo8}?8AH^@cTYJkRbgeI!4RPaeqyqPh=5L2pAv?BI7VlX&|iIc-@dGiw^&Q0SglsV
zFCDidKwD5zn##n)$v95pGibO#RVZ5aK(!&nXy}-w^}<P&Q>S}%S_Q9uo>dT(-YKNl
zc)#JxHl#T)g&G-Oytu8O6GWAP)8OwuTFDW#FwD`-MLIbToyCT)6kkewYxLl8CKsya
z9Lw$k!Z9M>^eQk#r30T@CPa`Kt|I#aPh#b=FVD8Cy*HJb4tz}aP(dMB@6ZX+V8#zO
z!A&R)SVi~74-?rl&8cr-d#FNwj|^`Q*(UUk6WSZQa|0i(cwVas2r1ih(z;e<_c5O9
zL3*%ExDDx&Uk4T$b!(YxN*8!ET$)j?>Un1e)&y@2xUDQFiVL_E1~Xo+9_iBSe#5=P
zbqR1$A&(|XFfH9YAK;#VNT3A`SwRT<>9&=$?M%%aw<@ap+%V&R$WrV}tFqX!ogC2}
z?k7WFIf}+7{~Cbg_`fOoy}c#=00;ez1pdQt*u`H_qTrK%M~Jrn{*Oe7q;sDj9dpdB
z7am0yFyuToQUr|?LDBM?j`uYB1-GV<<;6L-vJhQJ9;GPW(HnMHLvO!Wy_U8sM-RRf
zT^Ei}q}@~^0j_cjVTw|K5Je0EY*H?#U;Q|iWljz*?|7rC(<~-sxoZD(s(G;9#b|y9
zwj|M^l^BE*vPpbRuN?h;^$aG~G`=DYW>V{^Mayr`JF5G%Rg|KS?%mS64&0f)5#4az
zBPQT()C3U?9@-*L@0UUrD{y_+vBPE8-q5JGI*%mZpIUk(+H<UTx8URG{0&tpx=*m-
zWP&~M7D<k*8q(On5Yk<Px>l2(Crx|f>DZk(g|m(>hx8&v&$Q>&@;YouM@rG*Vbon9
zx3XWk&4{#n6cBZi&?A47aQdDURx3r8N&y|dwVm{~3m@=tt?VHP4G{XdK|{aN@UOjA
znE|rBH&aOliy?|G_}A)u5kaTV5g2k$JY?iQK5rAZ?1&Sj@~t*U#W3n?>P9L$n_YOf
zavJ04gwDLcbM}e(GnE5-j(lM_&Txu7&nf4!=Gp!_d3<WeMCnfSBur@DxCPH;{La1n
z)n%)vGqC|dMcfxsHXgp|<e9kP*%q$Fgp<Ep1*tM?hJ{ffM2|eUWRLuEIm;C{q0fuw
z3trMn%JQD2>vo|W&WxhP!hpmmezg@g3dPl_3+53GmH_NS?Iy8(9;aj>Q$DSZye)GO
z>nNU{hbdD>k!3*2<(_KZ-G=>S)~c$+w%ZyFdD%Apy>0w~Y8Na0_QpXs94{wag0!+$
z^S7I=@kYurCct+RUD<&V2}$Q5dI)5`&UD7>dfSP|qYqG)`|>r?eSJjQ>m*txH{vEK
z)k^X)3Geq4b*5Vka>ct*95=jK4dY~de_`y@IWyJJ1HSU+I_W%Ij<{R^7)ERj3LumA
z0Ud$FvQ|`ET&=B(96XlbJzw_e7V<^w46CECKMz2rSdJ0^DwwI9ft>sdr5=af>d_9G
zgrC4JPDDUhD&(2{(g)RKFq}?(7Pg~>q0^BDuWtzT+~Z<+Xb@lRsmH$;9RG^^aK}_f
zv)rlHbZKI@D?@R<QY<RV*{em}WYhH6okwSXiIUnqZn5~dfb%#HXw<_?_a%fdPe}t6
z%BF*4<*A8aPx1Eb8uy$`my%arY&G?v?`<*^bC^D6zkIdidHlmhM=)H;4ao61iR>;r
z9&)m8`MQ{T;A`_F@j33Bww;X6=WR5&%|WcXU5t93_Q$eCjAH>lyb1a_1?&s&nIRNK
zb5kY4A;x7TZe!~#;KBccF1nwPo8ItX>x%k2MrBaBsP$qQtlGcDr?(LxWF3Q_0Oi)t
zuLdArI}~p|yJjif092_KyLr5}06(oCM60b~FS7yyE~p%zE;>3(06OMF|B6b+U%&IG
zIt#4PZokDA%j6K?<Fq}St7&C-^L17EAXK7vxHBN`JiuD_oS+#xrA>p>BqU?@ASaU`
zLiV_>hAK-S{cj{>!!PHqrJ#|OUU;OjI@DAA#2xj^sxhzcQc7?W_Y@&}osNk};p)k%
zxrxtCCi7OEKkS!tul>@)@UJw5BJ4`}coQt_PJ<$m&xCjb&v(|)?DT-i_4q1w!PpDt
zsAqcN53an_9OfZ<<+>mRz^S2}P%Jk-(#EfQQa+;iml50c>Hysv3FjTJn@Jx};5>Pj
z_U67Sx3$B5T}D1u0y?@Yw|Lr#bW?o6)PsH(I@*C>v+kr+#AR&rZcJ;KAL74PNI^B7
zfA3C{CYm98+Y#RZ0T5uF{S~tK?dJ4PA4}&@C}9unyz&>4j|>lw==8o->?&#Fc2ekC
zX=S)XcOWrHYR7AZ%w%t#i`NnxY0Rd&m&Nd9z{jebXyc*WN7}#IAi6;`4-B10m8l;&
z%;}*0l_puQ+ghkwqW^5Ma@n-*G6YD|P7?1xmoTJ#2ksLs8Vr3~V;ZEgE4zl7Jdg6A
zjla@pNJ6D!s@b<nNJkKFpu0i77Zo1wC)S7$%=|oky(T?h`p%7~)^}o7qnEW`akMk-
zV!zOqQg_`!Id$v~XMFE=Y_>z4ouooO+)L#n?&g$}y^}CCtEEmFt)hGT;+NRM8P?sA
zPcFir$&5VBtZVs3aP~)X(Er!vUou~gQklVaB@yrxa9AAAf~xTm%-O}CjD{%&qDfFd
z7Gfj<bl~b>9FVN^GAlOwS8;%BDEEcsFq3(Za{II<j&4Q1&-;~GUlB-lFLqC}=HYor
z+BP>eIURqkfZ6va<L_VX<=@}?vts>imN&+u(kgDWj9^+8w3*meIA2=2+ro}IRHYWN
z)~u}A4%8VR0(HhmfaJ^kTU+{vkAKBnfK%{l2Jv!&pfCzb(v1EN#e6sX<RKS96D;I!
z^a;P`@)vb}OZIo#U7+=q=00yKpt!vQKFf}Z0?F?OjyZA1Y#8+>{n{Cu?v47ljQ(Py
zZyEmsD-OW35&gp}`^}E>zx7_6(FmGq7%<I9xi>Bf=w3J1S8q+CDq!SC8|~~QuDTOn
zX>#Z11T-B0plphKmE@;O#VC1D4kcl<Wxk-POaQr-u)spL$OD+Q8cPcIlOHH(kVw$i
z<p<dS&|2$9&B$S0?CEY#fT5lI#^?WMX7xYC4gar4jNkpsw&3j_dKPXs+09HoiR*eY
zIU(rm>1ruaz!5gv`;o3=Kg2yFr1N>pL7IGeIvXW!TH+BrBf*1`1)WT9IEW!uk>tk4
zKrk6I*5Ffs-E1+EE0=lo#m?5o*qe7&^uUSMpbYVbjqPmX_3n^~@|C1`;?*^0P01J*
z>p9=p%oPoc|CHDczwnK|`$y`}KQ<fZ*?dSkhip=85Qbay&k*Br4jZ7ybgNW0>7@1h
zu*mi)PhXED70-8_M&!!7A&--E)rU+WeA2hwqoU&)oX~9)7D6W_4<Kb4SBJ2x%SDr;
zMXoxB_Eq4;BMbDUWAC1MQB~6%{ZQg~xP`))8OV?|%g0UZ(o~(NGXq)507EC~u%r)y
zuWLh8zS7vZlkCr(Q!t}nlc;~D^3=$|DP>8~So`L7iSX}K-u`C1Sf&HTL1w^VPA0Za
z>;WCM*q!4YH#QPw&$J?$cX%%aNC$U3N56(fKyk+1LU0Q_D~bhqm7RW^h_+0Xp`dSE
zhd+JrmFBhi@+d7A_0b1Kmuz@Xy!l}3NeCC0<6L+9#0U#egj~ml;f-1;E*hH3FRLny
z#>Z`5P<M$k-awgR`3e+bT|eL9BRm1CEYtO#M_!A33+&D@GnFI<SzDqQWCJmDxZ}g#
zV6lyRo-Bd1I4G4V<*8p>>mojmFoYLE0pQ!y)MyE<@v$t*L#m1ko_jux{>a_X+X(zQ
zne}O-z9q#}!N8+uDkHnPdU&NShw`dq#_Z(b;HG)TNJASW)7ENdZ=#*K-$WfXiEM<0
zLCP?}S8$TeAH;7i^6dMR5)n}Fsw70I^~FwIXz)|)$)`K&V<Tmv|ABP>eV+e~c>lw{
z|8IbJ&qyxEla7<!iMR0MEremp!|SnV^k{3z;)tx1eL%Tht9UQ{zC8V_%n9wNI@W^y
z_N8V~pyDdH!|Ap))0OGCyOx8=Ma?_LnuMF(EEpMgI8A#V9ZnX@HL|3jGoKxGy}S<P
z0r|Zz6Kr71OIgGaIXtM`$d*qHfRo&FbO?#tKS7ouWJaQ04`HuuOUiuvk`!o^)eTn$
z5usx-NQY@-h-S;$+yNH8EMfozfN0<*sQ1!q0WqByoJ5zb8Gyx5xHNo$11bls0melt
zYvlC{i(j{w&22Pp?d<vlc~=#nu|vQ8PCO)IZIMBn=9k;GW#63=xWw$~+g{b_ZW|D&
zL?Ll#&`*?5y27gqRE@toXB-i}(SqEPxV=iHw*r;ch<;<;ie%5yKQ|2;g|(saMD!0Q
z%=-RXVSt#mY(FL&*M|J=gb>_!R|>?9;{M0aF1I7NDPQ?rrSg^I@|ta2jR$fV+$IBl
z8L&O|j*HtH3e%RT#MncEpdtu0L=O@-f~wF-f@=<PTnF7+WZk&q*$SVRgue27a>r))
zG^)<u>WwJv7euGA$-$}@PQc63`&qW^cgGMaptT3kPWueK=0W0j!>ldl4J`%!Wb|F<
zJrV?XyxZv1KPnkaA@{&%tp<>by+{hp;KwUO6ji(4j9M=V+<!Jj#!DH<mn?>$A-EHA
zjn;A?@!O}Vip)Bsym^CPOGRmCMHqm-wgGr|F_@u~N`q#Sldw(h@`<fk<eHd`z01Z?
z;1k~e-7EQxQucr9J${nQU|~tr<3p&et9y|1HMcive()>T*dRxdO)}7#*Xm+G&g@YY
zHQ4}``EYXxFqR5IxqvJ#j)HKR*s_sBM4rDB`u8f6|M30T!BP;@B}Yu~K$q0OYR$C#
zTk-jiP~lP*F7wyO<jJ3oO#XYJI6ePT7xF*ig(L7&*lu>eDxDhKYGnCqH@h?ShKVD#
zCzJP_2`}pqwZF2?v+0bFqB22o9hL(|F-S)g2MkC?$;#S?%FQIWa!3~X-Fgv}?yt{W
zj4;QyAxe9+RuH_erZL2W8%w!{Rjm+aZG2pFB|Na^t>p?*&PcMW@=}fMarqnWWRW_r
z1t>dovJh4_5s_`9Hgznmz>nQnXZY}opz%?imSFYQo(?ZHe6?PX@gzE11H9m*auYJD
zq?BxcJC`_haIYKJhY_6(W}1#yn)LY{8|G%0)ZO&f51#!3qo?*@Xc1!D!mE1}4NWan
zwUXZzb!!}@EA52x@GMORD;(d6f!29}n=X!$P<QdOA(p6w+NHNC>O3@Sen&I^b~2+#
zQ+TCJYbuoq^u1@Yr*a_PSq_EwKnI%LW}`$<^%Zqdkh`~rgnHKRS$|p#<k{p01yyQ*
zz{A(mV8jkuvtHMp^FXh28V$kCAa8n+Ee8Pm&SWT*xRJF-!^8ax2&hK-Nar+u*^J+s
z*GA|dTg1T|E+XgZohFEazCe%!upog7#pE;Snr*oUnGI}#AKjqKDugR<;)YaZJ|Cs>
z%Tt*Fb3l7h2W(lR7^*1%%vqDcR{*<N=6V)#(haDy$v9E_;v9rtQf_2o#C~!_2<{aU
zcL}m3Z>db(`2o}tRmT0N&aoft(g~Q!XOQ1~YY46liMx<yG6R~&rUqQpFw9veURfJ<
zWB-yv$rE{V9L#{N$FzJjjHy953lmGW&vx3WnI8~H&l$KwEll$L4%aW%$R=6P!4HFq
z#FQtM<+o2D_9XjW3HNH4TBH3+gLr$}F&NBM9JGu#Xx?k;tq}}=vGHTC>(gboQ7WAW
z=NUlzu`zU%?JL*rGQvZs%nwPje*5f0T1G(JA0ux!shewAs$B~-NoMk4ghb@D?JiUW
zJDHn{koJ}46y<RP4~pJN*@C5TfvN5;v^`llil=%yp`=A@cgN8BDJ*8`2}zkO=JW|j
zBd+T9xj$)toma(Y78PfZic+}pl_qfL-NV-=!WV3=8>A2iCLu&|)4XXnZ+Q{wdlaV<
zyNIh?SOAOKJwz8H`HO2c25z@pdLVr7-s@hl0?6(LUa~5vaGfK|;QCrBZH0QLn?7Hk
z;#j>xzMwoFPqBDqH#QWY?X6%1to+ZbzP5KIXXs3bAB5Tt?u~3S9ufo=3&B|$ZHkUF
z0*c)O<UCKCgd*Yv(Tu7c2%*+313$`gaQ*Uk&ktV#8`(EtUMA4Y`wRd6J9LhMrJ!)8
zn9ZXfbg3<>j?}P$EywSN`j2ESBZt4zQ1zfKND^;3k|Gva?_C#1K!Va9cHoh=6@iLj
zy(zSH#<ncsuD=I9fAp7pp8^OhE=Dl`4Q~%rCLE<C-td@KTZ=V>nVzJxIFLWP>Aa71
z9}^Sj+e0igoh4)nd}|a~U%a5j9VK}|>bsGFt_n|O>pGt=UA5ik#UHH!i&J+paZcJ}
zm{YMH_(y5f9?Q^?d&P1^t68x+ALBJYxctIc)tM2vQ-%;(m1C&n!|4O%18cPB(FBEW
z8Q2*b6xep1fJdz3id=(LOptdVsfHlgCIhqQ^iSDtkFq-&q!Q;at{5UcfNN59$*M<z
z+nY(*cGtOp#c)HsGuADQxN#Kpg%;*70$x7?c>PSEPw%lf;!TGtV4_IdUN!J-W=1F(
znQ``eW*xVYpYM+Y!Ogw(phgjZsS7d!FdfkI)E4R7_ollIl+t~FAn13O;7SON)Oc_z
z1r-H=rf-i^j%>+N!feTptWs&Az(pBaX5bEx3P1YiCPIo}yxU=$JpJuKfU*5<VqoQ<
zDRHLIo9#|B-^^y#_tS!LlFAxBef)p;?D7J4G}zGZi{U+V-9geRSJRO~v0JtEg*{Kb
z`s9UI*1yvD?a;LrdH5dj3R&LKVH5@x`A7l3o@*+4S;NQt^_(i!<4AFXPCUn_eP??*
zBgZes(g$lF(KHW26%P<=N$|`O#5kn1A<IR*Ab&mZ7-PJjLxc;C?e5l9-DyhJqQ#}$
zgc|8XpO!Ci8=L>b_5Q6J{(Fzjro+kH@>C-+0@zwhl14#~^_oCL$I#D)S)Zax){M33
zw|&)r_|M<^^X}ie`+ZusEE&l`z(FU312!s8k%j8FPs_BjJ!%jS%C~{y`yyAq@#eh|
z-+B9{=o_GD`ru!>zTdm<e>QFwj2J+EcAeM@T{?$nq^9Pf1JLx8XJAbaH5^91Pn%+=
zC>v?@d{HKImQl}^HAB}o3y||SE4B{8(vY|_-J1&CyMT`0EU*tby+d<3yMC~oWUw}a
zFE08(S*Zu$_LRr!HllrG3+HE)%w6JY0w1rBm`i?cYM%<Ri*}S64ew+^k4BOSlp^RP
zDxHt=MBRw74Mj@~M$*8~lyQU($er4|0*{Y)^IA|W`)P*sTfv>oG?yo(Ad2!!DBvTR
zR2aKClTJ8$pW^F`kz2>!e>!JgnLdNEF4-FFUl_IsF@riuq1z!udn8dBm`Vx+F!|kq
zO|#M^D`3bxz)n%)04h<1Bo`GB=B&e))_I66zyU*pgKwKhps@(-<KJ8`Q1W61u5H^i
zB7xn&9a{w=cjW;$h5r4i|37A9EJ2smP4NxK9$p8#Aexy3BP|)cGU3W#VZ(9Y^<QX^
zg@az+QQgdQBUdU&F7Tc+_!if^j*{AY(VQ>CpJ*Srl%`AI26s>h*{ITE7ulYYQG~(r
z)^E!1fBvHGh4WRYzX<#Ox8fI|p);ckGHXMivM9AdkopF~o{3{`dF`NS$Am_IYO#S?
zPw%uar=g*{t4(|UCco^tThce!WJJIY&29RP%(V7@eNv?N|3pOlzm@&^S@w*)4Z^>G
zs}e)-l9T9=a&)-u-T`%!0M{v6#plkZCKCoiuxn|)HCnNqwGo05?RvtC>$f9!MjNqd
z%Zdo~W~2!m7c0MG0hqhZhj)p7-ld`K)}y`SKl}BLt|K?;$AKATg068{+U^0sj|teK
z&??Y1^TH1>>Ir4cdb1IQelYnibmn3YX0eN%!Yh^3c6#n<X5^a;v>Z;E90gRThpgf#
zLhB2cJ~r$Ghj52%EqVcam0qmIB~}bJlLP>V(fsTm&n8F>F}MPcQKKKZbTk(^=LXBY
z_2b|v0nH%_FtaluWM!~}y6OUD29<&#nU{di1Z*fsF|q;8MoVA|*J7pDJ88cMdj1)w
zg1s&7{UY@OjHHJgQd^~{SPd;v5Ql#v+E^Ukd~ll>=A!k__O;)8&1~C4<z}QB#Stz5
z2F^+<sG5qhq-6YDdH$it-52fv2-fC#33lT@9}{hpA*5jBbls&GASobMMNPZ8^*8;L
z???HKp>}hbk>Bd8f17{)A0amPqf=qKVar6i1mXcS{4%(i)LJ21?&){3W<jGLM{<he
zUF6~yQ*eLHyRKcpY**AgM18hT=K!nXE!c+*a?m|z_H6$qe@0dY@Y%rqEkVi96i0MH
zX>Xzg;L;ewqn7<}ZO1(R@m=ma8VMW9yVa|E-mQGzkr<?PAeb!>Gg;)a*qYAv4z3N4
zNLI}WE$T1WyN|A$vzCEAft#ycUuDJhb03ujn0kqNERrO29kce~so8v9GkQ~;$_{R>
zV_>Od&ueW7FMkJk{@yy+RiB@S&e8nENRjsKJSb~Aq=sxQg;InD#!)$*mcEcZ?$D*Z
z=@Xxa3mRNfcq7r#myWA`X|qQB0y-Nii5)z$uT*M$ygwFjxz29ZKc)NEbTs_W8oM7p
zFY{?xY<eZ7klL#{u{7_IqgQpCH|)`yp;_bS$q5X5A}T+rIhsz?3hT(s34nxe1A+*}
z0+|giT|BB)5EHE_)>nbv?IIADihX*0dRVC_#eerJFTmqF$~jqHXk(pBm899)`Ojzu
zf8jM&%I{cX8D-!K-ZxyOcqRhjVWk`_`zDW~$JUn+un9+sml&%`OJ$Zx$$Y0Gefy!m
z#~sX!RXDX~3_16yAhz}8A~5OKzKJ@18-XAkkWBz(nCZR+A+gS*wgk=Q(|#B`-yT=-
zbBp{L`UCtC^jE+iI=tOa6mwzGKhYXme1ePoRlo86&vu+Y8v8#ac<^t&`fsQbe=lrn
zg1TTfl~$u61W7+|`x^EQ)en6iC1yBn0u0^aJAM$l3-hD#zPnA-_)}W$2wh~8BXnk{
zP9AKUG0D`_3TBET5o{3$FioH|Y-b<vE7D}X$p5j;Jz=w(09N(=Ke1=;kak`?mz%!!
zH;e1ePB|q8;<;s;#hb+}=KwAk>V?|c2^^iQN42wE$MfTzo>8Ff`L70xp@7qe>dJpL
zo^3DI53xMDo}HLzyexuw0?4}mlC~p17|x$LxPNvmC*K0;>^x-Yi;_c+Wct<330~`2
zy-+)SUAVZq^RcIYLHT(`YzO%H9j!dLsN+Zu1m^;bL;c$RcW@{}uj|2rxaESFp82;Q
zt!)&~iio=J<`!*l{q*tEg8Zm+1!xRgIsv-ei5I&hdpijb9*3>0EXRDDaVdK_e?#$v
z@;z1PUa_aeCM`7^;ZzWTH=Wnqw;-Z8;HDUW<Ro4lQDZvfV4h$BFH+gHag6&oNilne
zKTF2}<~bn2b~`Dpfcn};H8$}U&vJHyXz8%x?oypmyB8*57fv+M3yEa$<XsdS(&VIe
zL-%~bcRj>!Of$w+6vT0N&)Pu4i;68ZINJ_o#UomuKTgu4>*&&%7n3EucBtzn9K;*l
z1CBx*rhGQFcwysFOm5<oePpL#>OG0r5oqOtYbm$Sex+uq=AgqFK0I4c*e}TEDDq>o
zWv~wHfHyhsQ()pLH7DS44|5$h0+|{LhkHz=xwfP;6YYFzw3FhqCIlhn=dPv8S6I8z
zV-71um?Jqu)h@O8ueKNkj|@H}U0;OlyoHVwk?${$+wELAexP?I&dN8wu6i3|sM!0)
z{qB+YvN(Bf2I%K^v_2o~@kYz{@Nt$zFT9KqQKr}#dgXaQzza7tw8~!FPU-cj8`AqT
zKKCm>U}0<49hnIy84+#~1vC8U%<+<~!1EOVREt;ms>r$r{Yr!8c5n9V?+`lD#!}Oc
zs^#B|^?6FAHN5i$-63bx0Rh3JYXM|9;2hre$s~$#ZtGzGx>?^RlH6=|ubcP%m-@T8
z6)zt&sp2}qN<-I<kRsF66Mabra7Ch-BOcPBK!i@w^Bv9{UCi;2U6CD1GL@ce3BDV4
z&r{j$M){q2Nco!)#|Y&0Zf2sZ0X`bSSV4ibDD!2t9Tok6aD12`Bb@Mxn^Rk$Fu~nt
zU+mgmd;SgdT?maVTJs?AwId;%Q^b;KL)EcS=U%H84`=(_(<gTCuc}JWb`-Ir`6P7g
zO!B@vo+_A;kSV^nI{AFKBIThbtLu<}QG~i7x3hoo*pxw(hQt6o_l5JhHid=`Uwi4{
zvxj;Ybnn5h5!I$J!#5fyaJ5t>^0mQRF-9#COcff+<`}C*H)Dr1PKVDSQgpKn55nA{
zKELqXyGR|Q?!@`TlXS?k#BRK=4t0#^=IahuE8JmkzBB><d_p~pvna@&^%wf2J<sXH
zZ!9XnX+c=(t)0Qn{n{1#pqJHEek9e>i?>~rF7Z}>0*$c#H2+kP!dDu1_9)9jNGT?`
zLC#NSD)4xPp~>m~hoUd8$uqpIOWw(tu)ntF4)cd!e9&i!1|)UzDO885S;s`Qh~{9i
zx(PJpCRL=e+G)*zyCznf#M$g^A<7OpE|?u%wkr1sk0Ks)2+8)dBIv&(AIY$a(va61
zD6du1<4)qR8XYwfa$@1syL4VbUo#hT&RIDFprRN4Qc(0C#`yo+WKlH)GWQ7dD2KC6
zv9tS`SRi*VT0RVYTalC_z$DN$Yd(DS@j=qK)&2LAk3CN`(Fot-A@-1@5JNBy1S4D<
zTJmW*OJ>+L>D==<glNtEIy2+r`?-^__*rd<BMeQSj>lT7s8EtK-2R&KBdD=5dkw9C
zE%ODCRETyhkJe+uMXDw03v_e26K>E%sJl}gPHIjan-&UmU2y3S`f*1k1`tAsPo^iB
zsD02KWG3tfTLWR1Hf80g{L%e7+Aky8o*z|6rnyEL6!y<xf8n(4WfXg~i^$`Kg5XE;
ziQVn0cW>K9Wi<}*nJLT^dZ9C)%Dy?bBQy9-t8{QH9nKU*vPSg4xJTS>Ofoj{-}2Zs
z`ubq_r2%wOH%Vul7ot>mVb^mQYe7XWZA2GyY3~b2mZDiVil#x?1+>~(R{EgUhG6Nh
zW1sS19@BNsw}9<)QXjrpQ&G{*<8U&|moD_0`=y@L$3#Yw_Bh#z7?1a8G_w@CeG$pJ
zekE5|^_;!(<uRL<S%n8MS>OcF1zD}yse4<q=Ys+W-)Z0W31&DwE-fz_e#JS!-#*}}
z)%?pOj*bJFuJ_ya-n(|shb{cT5skxahczPq5oO49Aa;@6aCsdVrVqr9CPH$hkEvy7
z`{Q|}kXNTeC9_RXq`uje@xummL!##yOSX}Wf1?&G|5qgPi*=_MLliL)R64w5%Uf@Y
zj6S$=?zpp=f|WwyJj0SaeGHdsO*r#RzWwFh3cIFifUtQRl?Kj&OJ17BwVWdwO-9u&
zE{mN_nlk^`e=X7~VMw98yZT8ax9HJ5heKkk7j1ifLm&G+%<4bJX*Y!dit!krbhbDQ
zCH5F>%e`M_tq*HP280quP-P-$1n`oZ=r<Um>?c6+2ONkcuTnt!_jv-#MKNP$l8I1i
z;|P+Pn8R>WZ3J{mIr)!!5)|M5&g1`TOk&{w1-w`tIfZP5gdq8|%U=*vu?8AGQ>|`c
z{&c#kl9G;TTn9oGT9@RiBvrbVTrUuXaj6zKw&dmyRxQV>(z*L=Y)6eA9BpCQcQr?0
zH0;9?qdMK44xWX6lNSu0>0Cs+Nyv^xTTvhqsb~-=e*tgPF;bzArPmn4&LqN9BqeVa
z`*p~=)@I&^#6nnsC=V<e!=)+pie#6YW{=;=z9&`9%EoqS9@mts6{e;q2ZIZ~+;6z#
z4{z;A;%Nqovrrnq1IR}3%t1@oUK#P^K22QM5RjXxx_w<0uApP9e=R{@Y@e}psZN`)
zcX44?5VeumJ^85_7E!f-qa&TIhkU~4shCyN;$F$L^FpB(HY|72n?gPOOd8V}al(a_
znmchTY1mQc2<QEMMf=Ki#Z+XUKMU3|f|;1Tk%6Q@Ih3Hm2vtg{x+4}c^0BB;F_6*V
zd3dvz%em`?_F-64tLl+?OSvw4tg_}i^)}K=*O#EXTo|fkFa<km10mQM1gu6W4^<{T
zIx8YD;7}KD@lnF`;RR<qo06>jk2VGeJ(4i?K-0j80j|{tEvdVe@tzpmD%>nca#}IA
z+B=r1?N&6`Lhqx5nw$v5uxSdfYVN@0KEw__+@ZG>_~00Nf3)W^r;PH6wAPvE-dUDC
zPPUw>rbL@;qWsk3V^q<a<TDlJgS<DnPTcZ*{;aE2(|6~6Uj9!Q%g+<+hz?X4B(&m}
zb&4Q%JSqA4tJ(HGv9U~*E`GbTH11&U!*zRhHPJ!N7@wI|L5q-&<DhNAEL4uT2vwzq
z+MIhAyga;X?;GshXMH(%7k%^=mklT?{wp+_-@f<%9WQFJ`!3=DP`wASqyu^fF{{lo
zrXfDtxq-J=qPn&w{z};1!A{w-+C4PXI<qDy2XeAdb628uT+>9E;~gbr<jClmm+gB$
zRt04wcAAEA9xRqP)TXsls-p{Y<a!R=aS}$fg5fT_^#BL~<IR9t@IM)pKxfS-)s!z}
zDVrr#_GBf{9(m;@8&%2RU>_#xAZ`82#6}{Pu?ZRm*#VdI0QboVL*+d?PMtD+ZkI!c
z#+63IYkr~mawecGA%K;YCanAQC~{Pg%4}RHIa(e!TqL&)6TGl8LO2ML)nnq1BvvU+
zv1um?Ah<yo&IG-sECVI2-vGGwEkF=HTulU6C7V4k+yKMQ@-74Izs#WIWFYwqTKO$f
zV>-L36l=h?u|ah7XjyNUvV{SY9h`>jsRmKyL;;FwkVO@E{cFd0osS^Pf+$9A9>Uno
zWgG(huNI4K72>(E!ml)|1KnR~#6jxi$CJTI*3R=mg`n6%Q1%B~b2mk9EPMjc#@tU%
z1`6k-SWtjP_T9zY{PA3X7|qSl4Kf|B^2CEf6v2lm%2qYNCTo5;(IP0i+wlp|EWW?!
zA5OD9XS6qfj>Hd_9QVU%C~=uVe_?uuj!wgP=cqI|_xfoJ$5CJtYFUcTEN9uQ$SqAd
z=SXL*9&1$*?gp3iiG_eA#*j?mW<<+Jam#Et$T?h2`aLhcVWoA8%$jolv`d*Wk3O~(
zwuD6&?53OKW`$}YCm$gBsp@bOqD4NKSFY<Io!3m||Lmsd<}>;HMYWh`!a$T`w>h^I
zr>EJzy(J`!yq`fcj1M{aY=X~himz3?8i#H~F^`p6Scvf`iyOV=Htbmuvg<}=Yb~8~
z-AHU6=f$lCtE=UKk6GSSJr|SRTuG>`T;P<-JQ4JaaaMokaf1GVUaesuA}5UOGypno
z>|37#L3)ejHU))lXa6C)=5wB?m;Py1nasmT)gNlXkOTnAuy(p)>jvzX3EV}<rX*}P
za3Nd()2oMb2wc>5cbcy>898VQXv>T*;$URL$DJiM75XWaPXw!WrDphr1w=ety`>_6
z-B)p(vs~&)ayR6l(GlU@%=Z=e5zNF-lhBa>f*+1;qQgyaIw@<awwpE8U;gHYlbupa
zhn_t0;{GM{+BIWRQ^y3epk0Am2bwxEA*c?Ugs>C&TA*y}t?v$LiY^+@-*CO>F3;QE
zUz%O2qoA9Tytg??8eLux>Vjh*K@x0<47l(~vq-qr)Upq$HT_t6m)=_asn$njV>WO7
z58Od5;Z{!js+A&iq_e!B{*jjhb@4#Lbfw6j#J7MHfYbVFh6m<5jkxDK9yrH3*uG-r
zBW2;uBIO-3`V<YLJg(C&BGyhS2dNuQ8B}2glr(rd15)oDX|S*rmt79psXvpx{+a#W
zT0xp4mFW&!&a>J&1l#i$`}R~1yX;TvV!Zs+0D|S0ykpx(k|OKlY^s*xCcC*8EZd6G
zQ?Va-r7D=x?MJV>9p&1r?@tJ4);XueKYeJ@8(QMg3KR1CVnJ+ko)&hPHjQebN>`UW
zw;1&uUXGt(5qRuKLpvl<cdBXp2qm9-n0%GkhYJDLCwf1Q5Qn7%-hK38grzo3waY>x
zMs}%alqO||8l$)med8^0!<#7FIkp={ctwyJ)6YJh&|!+V9`af0yjX6snXpgVpT1x>
z`<Wq140Prr#>Iog{vJ>Vmv!z7ZQO;M;9CGM;aS~5oZ}R<l_f4>oQD|Wp?=@mwo&Xw
zd5s`}UU23z-Tr;Ft=II-_di72Q=Vz6gEB_|JBj1yCf({Br{v>e7XwBgu)F1si0#>6
zElgmP=G1|T-97Cf)!~9tqvRuJzz^imy|vX3eS9f|r0v3sVMMy4?qUSVcHVCAeFe_G
zszO8F@E*%^yjY#tmmE)Ae|euY?Gzv8=}Eh&DL_dB`g*$-Ya}C<H8sW;OU2u&2d-CG
z@g%he+>-1vx8vlz8_m#UG7uFKiT4-{`_gU-DMvS|p~Gte8}g<u2A*{$Y<T;dzV#kI
z^Cabt$;T&Zk9V?mGUwBWz;Xq;BjHD}Z$U$(R@M6SWIL||#@b%Dyo5N^6NPjlSDFlt
zW3`UE52L#$Mv|#U5W;nk!Yn$q*>;NV?N=J7RTk<Hl!nYFzs$hco*IJ)l%D!fAtz-a
z-jFKfdo<gn<m9zmPJ_DUT^IE(u5^Bh>u0AYrjNFdv%XVzbc7`C#N>@EaO6&sE-Z*Y
zH1oth7vvWbsrZ6i9tF^R3Z9)>4{BJ7nb&}zYc17q0_#DEtdXSNZvle+AY0$0v(8^(
z1HpPU>!e!+LOV1iD}&eCwb(sa+Cv4aH)qZ0R62wG=^$CE?rB%D<_BwZl6P1Ro44LS
z!D`1OWGr=2TR8YB4f`3Q8kJdi62eUk!Ro}}#hRXO2qd={>MON}YDv3cZF*mJ+&;|V
zw%aGcC!}#fGnc50Pi_24Bc#rrLmX|oefMb5lGFxolyn`zWZ6WnZsE9D^m9hh9lPv1
zOA~I<4a$5GB=CT!CxY?}v*ZG!92#(4g)ze|h`!S(uElhA9HQ2(&;=nFJ7Vy$!b|$}
zA#%4Iysh&nVrhk0|7^_r17=OI=V<a{-DQwU_z#>`e}!@WE#eHAf^+13(>}PN@VX0d
zSMLT$!%#Uzm5oiVXJ1uX(F$bQ!T!pDSY{y^Pi0S#e|&2|U?Vy{TTr%4>xhP5D;+=c
zMJ3Ks>Q466R_Tc6JB{d`qX97@_)#MPMyn1{^DncSN67Xac##sK_lnc<!;^%QS+#qZ
zR$6ywGmA6^O5#Qv8<dHV96wYWlv`73K~tj_TShYGOS;EVW}uXyZwr+hE3sdwCq!<V
zVeynK%=Hh)zDyOT;dg(*n%y~+S6$h2b6J7GwaUt0%t+IjdP>y0REAc9XNdVoEz}pN
zge^^QYO)bqnj0V~5~-~<Jd~Wr_e4K`SXoZne;FGjWw4v1x7dV!-Tl5`QW^Xr(U@Is
z*nXw707JZQc;1WhLB(1(hk#QaQV&xO@IJY7I$rP92<?5n`it)xx4H>-#3H<<(Y<sH
zN}A@eiMC>5c;md=E$4zZdY`vQJ8!z32`XJ<PyKM00^;|LS9_#2uq{joV6L-s5LYd0
zi5&sBEhMW8+TIPj%qs=9n2b2hi+!3OkBT*{yA6-M^uP2zYsu&?vJBP~7m}_qKz75j
zog}Qb>gQ2{H&@q>)%m$O_<b@Ky`OY;Q~F}1=6SC+FT^oQGJ=VGyZ;Gs6j#-nEW(dB
z9Jdc|Mex@Nx(gk8=b$a2+<{*@QhU=^L#m9fN0ur859rt2%}z&5oVs6uM~+zeMkp>T
z>=U!d;t+C>yxidWlwsz4Hs?i0!O<@*RHj8->Qs1CgW##@db+8~9PC@^q1Fra?J+#1
zMjkicMTndds|%tM;YA(i{0|HbBcD&SBO{=E2oYBTl1s9xzXHSP%H_<SKl0!+I!WA=
zom6*FOlADpv&HghpK04Gpl;>Gi!kW$hWWdX9`eS$WNp7>(=O(>EQ*IOs~+3yNbjv0
ztQt(iur08_(I5%erxf=Q?0Vox$z}?3w}I9BmFrm3`hntWG1e_Y=<+8Q9i_RaH?E(X
zeb2Rk4xV789wDpruUkQc$!8Kf8if6(sa;m1N^$4I&XQ-X9^R<e+Dj9xf4p;=EgtVX
zi3SpWhPJkT9&Pd-zEen6x6tD9(VQsLJ^?Lpturg4S2m<OV^a=<et7E#X*AjkKTo_u
zI=?tkii(6A%g3oJyJqDU<iaLYHFF$-bV&vG7~c$!o3T|t7rXVo&@>3D;|gfy%yd9I
zzJqiS`QFzM;1q2|h<86nx*{i-JjX~a1T?XLqz9MptJ9EQTE%_P6dhZT_pYK|xmtNu
zUaI*_c1f*&YPQfdwz~L6kI0|8p)=Bd+7G4s3(n=gefPh`JO3iz{r9G~&;Ui0id#;>
zEnll8Vn~V;+IcO~n^ivdVWTI6wC+#8^(($A>v<+^@}`RDEXL(&whW4&`W9K`+bBO0
z{DsN^!m^1mXH5z7=sdjvW6_#hXIBq%`^H{=Vz4LHT>edG-Sl+#4@52k`^W~B=_3{r
zwdjR6m`tCbgJ10HH*)j3JaM(M;gJGtWBj~{&=T-O^sY}MCU;JBa*)pYMG@{0CrAgK
zARKbVm+`AD6FkH7mSf2rdms5y2g~*V5qa;ol?r7AH1IZuMpm8^Sz9sRQ}%emr;wYx
z>#(ggKrGTysy0VR0-B!(*wxVbz;mZFq#baAeBRNATpWQ=XiO`v&HeV7!`mIds*?cu
zIp`TJ|L{A=nG4{pDWEhp=GMbkL_VsvoHDf<McFlEVOjsCIZ`V78(EjX%e(wn{k3ig
znMN`Y^Na^>B3WIx5NKq^>lv401YcuoL;GW&V27|`4o;|>?#Es*O1C^!>NlHRZKcRl
z`8x=&aOn1Bb<pM$#!K+KVZBK%@Ke$`NbsN8<k$V>X||nn=h-(4#lz0DTTD|%+(f&k
zn$tC4b5jb?U^fVQ_R1IOC0ta)x9_52Lg`akJvzz6HW%{=3l|^Lt)x^h;SV`u&ay{O
zPw9P()#4Wx)@Lo;B^nZZoM&^DWH_QrI!zA1@c~uKvm+9rqIjdoPRc;lUGB=ttH;?6
zecsir&w=ym7(fX$9EU4Y(#V`}*z{<khg0?mSI6tmhRiO|wwmI3we*A%EH3F@P|0iz
zP~yRLgkapecP^w$^uU$ykS4@_qNn=#g&OUZD(8<DHjHX^Z>kws5)S)3pb54<NKe9a
z7>zJCLd!M)%^W45(bhItBxB_~$ZO1Byz0*5>+dBxUHr}>f~GWjCdhyRNUk(!5&cP4
zd69LFPyATgF|1^(Ne!dc@vHNnT^`*COz__yljB5g+kDvSWOFB)fhJj+MsjgW0H95O
zSbp$-36jpwD^h(F@HcI>6EC3atZ`pybSr#|dSA!^d2pU~<QzYQ8tbx}sxq6aoml|9
zUx7SH%NLZRRHSO2<n8zeapwb_x(5@^8S0kbriKMU|8-*EKPPPzyTb6O5VBOdW2Dk3
zoA8mx<2KsvM;24~o#=8h>p;{^D4_gO-f;v5x5PcZAb*iMS}LpgOkAQQZaI-8MHff3
zO5Y27*BlH;acM0D%4ov@H-_yYNou<zM>myC4pdeF7^W0s&%Pyt;Qz;4&9^KgH}j~1
zGKBM>mZ1uZA`4i3rNIMm#~#rMSu-Uc1vs4nl6okpWUYedixK&l+0qlhVDo5!5)}as
zFa=P6uujSYC}{Hmu&A9$02HvHwVQX5nMDz=qlZsXBPYU;IA@QqG;KXUexn^7z5Qly
zp}1jKX*h)k!wOiPy%V4fNs*w@$SA?G;W9WLA6)Xq8heq2juB-;DnmAqMcYeLgqVz*
zZhMmt8m-N%_CY`Jn!U45T>Jc?pz@CuqG1o9I5YML%pQawoCoJqfLm<c$_qui*J<rb
z3X~=U@L!8sO04Yg6Xs9$WOP<}$yk9vZx;&jCQ|j3?*RP69<rtFr5^(+P8UeBARo^$
zQY%l;ccwdfL_8(>a)SO$!tF<+g%D=Y4jP>WDA;KjYLw-8uEZZP-2Uiw#|3~2GS!3f
ze@^6_M0wplkO0e#0TNW6$hi`W8Votu1G#t?wZ&8<zYF;?KMOhZ7_ud6EEjpN2hf#M
zPS{#adTjDy!RM<WTgMms3%;WsTKh=o+Gul|902UBpUUX`6$P`~;bblp)*%H~M$p5Z
z_A!1~i??}!XIrd1k>IAyaewkoP~{@$?$$&8U9Kytco2*(NM!(!rgs;IT`ZJ!uJc(2
z!zEs08X6%Q#uK(pT7QbydnusSz=~v1=<&oL%Jr_($4Kqn31ZO-rH_NNh4%REFErNT
z7hn%xgD}q#(uHviscu>Grc574M=Y9?2)K+-*S&OjcmjiVRLo@tL3yEe=uLg(GMHrR
zRm>XJ16H<9Z2Ad~R$K$wl~+9j;n!p!A0uvL4EuRZxuYbq?DEE<UYM%gbL;Hn$m*I)
z(HAtwxqgBYeZUO_cG#0`piRyyl=v}lNZa`;JKGS{0W36xIMFZ<&N+0G)*mY;s!dSy
zP4_s)81AXh={a?NI)HU$=IFuBguFNE@+4-&+Xns~`I{DXY7#X=gKk#{^%tfL3VxAG
z(L3nU8d`PS!R_6%W9DC?X$frH=DW>WM3`|ycY)ZbADwm?%M)iG=PPV=*>5zv?mZVv
zQRZXtQ!9J4>Cpm6%Ir(E*lr$n9t)za{>bscSs9tA>e-UmYi=*cPiYFti^_)fv7Rt#
z*X+tn3;KejAxq#STVz%_U2IwV8k(R{N{s#MI}Z3*9J<zZ=84*QZ@=@5>I1MHWI-HX
zOB|0MrahgzEMdYF|MrdV?ZP<41g4yOIrh@gXEJs=Vzm~#PuS*Y)A+|lA=v%sIz&`J
z&}PJGuF+LJ*^#pL>h9<E62<pp797Et5A^OkD?qEPV>snDB&)Tb@{~l(&g!hO`)}z-
z@0eMK`h8%obEg$mRd`Mx<gYZ^qXpq1!l+D}q{BoztI9zPRyftnYX7o_WB!NNzV=*d
zJB;psvWp$P#+sK;KN<I=&I%XMD58Sz2yNK!m(a9wt9z$<ckWrqB&B<~?NcwKd8l(@
zo=Wrm#+Owv5h!-hJ;<o(5h@67@4jJqtT-b1$blFxg{;CL#*=&DovH#uxT>%<)J~$K
z9xm(-M~K0@rR60zF^5AJSeyo>`pY=YL(eOmGcvI%2O^`~eiKwCJn0Z|+A6h`i|Aq_
zva(>JIBi&yFwVa>jNbpa(=`2kFM&MK+m(nRAk8`pWu~%{6^P<b46&HdEmljO(AHf4
zOClE8Nhgn_h^LI}Heyd5VOH78V}=FEz;F~xo#8Y)AMsebfuTu<Z!pO;{!@an;*@^Q
z*t4aRY<(|1Gj@BuH!C#O4w8u=M&YKLP&{PufJr38Iewu=gSbNvC0ZaajjzGR_kS+T
z=GYwa(58V;;N%IWL?e>A?Sf@;%X<&Rz_ZiO^)`|XHD6xPd7u*&dPy+#UApQ_$#E1X
z09{abAl!Av^0Bu9_fPBNh1<K;&iA^1EP3P>3sKnBoz&$O{82fcY;j3uT^Q(=N5I7>
zX$TH7N3W6_q)}Bq;SqIW`M7&-!iv?0JB?qW-!w++eWj72McB3LcSkLva7LKnEHezI
zp`-_WQ@sE4A-A>Ho6AMnCL!f5FEX@rP6lyvHO|L@mEP!uWTy%fWw5Z&B6S{+l+p)%
zxC*QGPDYf$-*~@S`It7AUbtkwq4dR}jKoOoy4~G+3NJ$vm~A<q@KIi(vaH5W_G6m3
zzOSOeJ3g&5PcFTZ<Zpu5^_**jWAO3>H6jyG0sGXxwGt|UnvzgMo$p<-FG<&({&a(b
zx%*DcM}y1j^Rcy)9ZMf^$&=#spUJAdZ1uBGZ}<m#$&7s%yAnmy`|=#O<I2<XMhAFb
zX@1n^rTKYb3hBf)2!J~e+0=v%8&F$nJgDh%uF~6KGEfbQl)1mn>i*O2JFHuNIRsbS
z(Fnm^ln<fG^d`Gi#R4WItBd#33p+lv<+r&Vrk@dRR=htgRQ*zji^66$txM*X7^J6^
zW3CkpSR2Ym6iz$5`e-*$cqaPhWGw=D=Im2c>6)>&!Z#d)|8WA}GEGS5)vys@l<tTe
z!67OldX~g%g0H5Iutc5s)V4o5|K+u&7S?=ezXOolfc13SX0U3K;;UUkGdA(w#;SGi
zBwvz*_Nyg@+wXz^h{}X{GO;khd<z{zmBR!&wxRYY9kg88a(6gJfXuyXeLKNgs=Rlv
z{oYO24xl%{W;F#osH#sbFW`LG6Vq0!tVF*B0iQUpK(=VEbF03y)`iJ?wR)Cy8js5{
zqp<rM-ce!@+{Bhibfcjl_EYSWNMV@Ki+X>0&U;xh>Z{UwrqP`(n(N7=Gl=f=gT#A9
zM7ya^&1|+4EfZYodzP*ra(_5JzB1gxBquSVclz?`m+MzgNO=l+53F;1-dJYgTTUUJ
zgv<3e@VmljF8A>g^d!Po<hh)C)wxcHdN#2>6WYVP$4KYE<sjW^vL7X_K^$(2^KC})
z`#D~UI!#Q$4J~cpax-S9#;tN=8PoZ5;(VrO8&S|Vh01g-?(984Fh79!aCt4pq!Bd4
zi<Jyt7I;vTJhG>?lu7)^368YV9?%+z?C5iHZQUc57gb)Ai_RqGO$rYtKDBy(;ua4z
zv_R%`a*QY&cfM!#o;eejd@0Nk*;dE{x>t1!q^mBb$USRu!I*NhV0Wqr@wLUx;C8Hy
z`_0tNluD`K%iaxk4=>Wa5sV%5^g>lHezCepG$a`!dXYS21~l0lM@^k%XC%^{4>VOX
zkQu<#)K^EEk0|=abl$zPmo~`slEO82dzLK|{a_Ff+y}As@A7T7c{uDZSO1X0g8(_@
zKW58I$vhbc6eG?*@;0|$*IqW%2t2sf?4PmQ;~Yp2-|-ryipPUQ&usA;MJBlK1u?|y
zE6wFr!dIH4w>DoczAV242Wr(r>C)3{Z6=BN`TY9#*y42vglx|n9hQ!7m$e}OLe=Vj
z5Zw!59K^&H>YjJ2ARkwMW<s}Daqdl(vafgH;Zu7Ty2b84Q}e$4Zs)+Y#YfaoY|Np+
z8?7}lm5I3{#q-6*vwEU2fwub0&*HpYKLZ7EGSBWWZJGxXv5rfy$wgFz?SWM9LpgGF
zxjjej_nvaEy%uj4GTV7I=Jx&9Z~YtMNVJG|6XgzQk7?wn<CM)<LG9bPQ++x5>e1`x
zJ2hXq-wxgvdp2aFsmR&*0hRm0y|rI8FQh*$b>J49Z=x99`s3NnPL-IPiL=j3c`uk>
z8OB(4vNUe6!N+5yipp#STxG^dRP!&IkhvkDw!o*&tk1%;m?|LB2QS|4xPD{HY)-OS
ze%Nsm()j)=kgW5X9F<{t9|W?qDVlxoj%w1`(HVaAhZp$X(ME3Vs2A{e{8HM_-f@f)
z0jR{VC};!mBk7d2GtTHu((P+QW*nCZhjj)`Ckc~#)EsKqK;tD^+(}82Sv1^nO3wXb
ztW9Qs>qVaxbH$@jDin2_ay_$V`c8HYkKGqExL<-Ts4q>Wc|V=%uh^m1o!^xW=nIK^
zi$DZI|C$mV)Py_TL7f@_k@^9%@qW`NHlxk+OK_0a+5LtRjhWL;1>xkcuNwb2WHw%I
zvQ-cL2=g>2%20(<Nc_lmamrM-3J6sXsBnDq99{2}wE!aihk;FYlW3xk;}TdXC)Fp2
z$NG_*`{TYJXjspTE9j$M+F$@{A3KaHNv7=jf}9)$-(m(J$lnc+1|v1mMw@11yqZ7=
zCENkg4qFogX}l0OU9eze4w0x#z9f+k`Bq3am<89k@P(!LuQbSOUa%(@jn`d}oxvR+
zB_p7d_mO3}8HHQG(b3I5i<l#6SG1m@?jT^hDI3<_U|FkSoF~F0yNFDr(=NO&DDPhU
zs%*w|CMI64)m`)X^a#dIsxT)wdyBbax|3`Jrv5p6halM*tHvI=C5K^CD&!bDXN-Iy
z%gV>!f!B8*UQM*EzyS`)JkH9ZpD~Q8<agr|(Y3I>`FM0s#d5aC*zwZ_cQxBtY5Rxf
z?fsFh=wK96!{K=lr(wA24(1Uf57!#6TsBeH`byJ%aEP<re<7O9yZ<%c`>US11w{5K
z-5^b=xqDMa;kNsq<0XC}iUfu>TkkKS^1avB5gvH1>t#~}VjbY;#uxfAT7r~F$){Eg
z-Nvo8<JU^7tM8wjMkiGnBXIlXORqgRc5aH{)Z_$!b-^At+zqP$*wqap=ciXbpRst}
zG-jDIj3QU`x`*8veGV&L&!SqCdD-5wR>$JSl(`5CkzWA*z^f?)3h!Grj}Wg+vA{T-
zk9vKw(zHBAohf<Fd3w4=?^#Q4J#)!V1K)pZoc?#xIQ@%Vy`(ZLM1K#>_-ErXe<c;w
zS=DT<40Qp=o7t8+`O^X|{}!M5*<5);_ozGOkX3Mydz$+e;|(=av4O|FT&j*8zLe)D
z`^107{NXTn^~HUs6oS1H7!_%UjBVv{bklivF;1&t0={i=^p-I5d4b1O9J;(&LdMo%
zl@Ff;zGf{GWq1%Du70cI4B_YC*3WJSaxAc<UC38xKhS<yP@baf!n5U!E1>ElLja1y
zmIn&FRLy2#WZoM<Q@t5rv?{~M`P%d7KBG|BX9F^*W4r7I1tDc`aLsnb_lbRfA_Swq
z*f*GFs4+*E+J~9R7=vtufsG_<q;_!Lnl}yb`Ix7nYrmq~O1^I&-!R$Vb7>11W0F{8
zjR2?;G|eQjUawR4sb;z3qQ0a=)`$l36mYu!wL0Ze=+3V+qfi2&AO64Cd+)fWwsu{V
zBBFvsn)D(?kRl2wQY1E-h=>Xi5F$-LL`1rTL_k4mC;|dPqzFihbSV-#A}Ug(N^epE
z2_*(n))}s^t)=d>zkT=q_Br?7{Rh90nfx;69CM8Ew&#7G-BVIDaFsV*;N<g2`NcRk
z1F1lk_ZIeODx<pj;RhXZ<5Jf~58gF<puf#bDxtkbav|TPoE=`e66N3K(I(n;x*}%x
z;BoJcyJVXkS^;w`sbt<)?puw|kzm`0;=dISBlKzcUIiEy#7T3OHh?-dJ@S_D>g~}}
zLu;kb**6V-`jmoKLE53T1k?=|vC!>q@@L8!YXH3SAnIK~RlFT=B^CJZ9-mVXys-6+
zxvr=wmh$Ro9X%0N5YkEiZ2Y?as8-daS|c1F<(cb_r-PKyB*m4Zb5^<BFnL8pm$^&j
zrU86(aQb-za4f^+`wgV+uYon+@T=ML<1zUZqE9g5&@>*%^o<?sQS|yCLG<o)@#Ffy
z?ak`K93t}c9z;CYN|8sEbvmg9GpGqRyY;ECdR2N8QmUr+&eTCeEQitY%@m%ky1bOj
zm*=!LLZ)SE-dZx%r_lfvV`e=kdEjL$A$OVuO%)g85%;)5g2@pq#xB<JJovEQdK9|0
zs`Me9^VtlfPJU{97)O#OyHGAV$K6_!1&NR7B3m|xqEoMJudL{KW{G;*J%<KBAC)Ay
zDRC#|u3n*pxkJz9VOzR}aHJk`5=G6o2Qq{rO&YigGL<-s%A~!IEmt2B8<v4CJ-=;p
zM|({y(Wdbb1<u}m{an)imC{zZ5WUL+=GW}^>FwbBG!XN-{H75X7`lgl3`BUpV`Eu>
zpBT4y0!0zfqsLx{{e^7zPqc<SPrJ9LC*fPX8>sE|?_fk1%g@1oe&5&lAE6rg9%B9Y
z_dkvOziR9UIi`{UF~Swd@F~%H;AqmHDK3$MYQ7?*ephn1eC8J;`Qym`^d83t>ORk*
zYY0?n5-rqooTg;~sQZ4`F+YWx*@8NVL%(X~fZcYiA83Lk0Zou_pb4_`k2B}r`~NR9
ziw%Ir-qK=m%MO(QT~dsJmu&t%<9<ATR!#;$nSa=@e=M$BCj6=TuKG#DEbuo5aZoaU
zlL1QTEG}jje|UfU(;=Z1%nL=yfZmO4)~CLIEU;h4{JH#I@Z~$*{-3y-{=IwrfATr+
zPwF2_V@p5v`+rhJfSBed;%3-9{3jB)Gv6P}zt4ZPj(3@AZ-#RCPV18$vA$Usd#NyN
z%gtB42dkqFuSo95vwxh{q9VV8zGcQ1SA;%|?ZS=}kfcj!x#gO0Az$Tr3f~m!(IoTv
zH|y-q9y}L&r3(wyFX+GT)>n@jj9-g?z&V8?n0Eoz0`P1Hr$NM*gFk~iuPt(Gzo2mw
zg#A`c(R(v*v-`YHdb?B#-F0}p^FFRDpUKY;G7n>5jEBFU$E#DU$V*e$<xxAps_)aY
za$8%L3~Ak8dc8~4fA46~o3t5@Ui9Q^y~HCeh|&;_H%nh}JXAQj5$s^>eRb*-Ib@fF
zEB9nupiOS><BDXHZkaVn9cLL~F)0mBH`kcn6JwM!^o8pMPRLEFHW@tu4<8c<Zi91p
z)w=C-s)9&4d)=aZs=Hv?w{aj)WEU&{$z8`rU{O>SvcLp<sCP0wnNT~q0TE5JOO{T#
zIev2V{nPb32k$WYh~H%}WsrODoN@U{$FGac|Bxm7{`tQ^n(SXlYK`qejZjFSP8H%x
z(7^@Q?b-XSWn3WZY_?@vy~}VLem1o-lF7VEB<7&DpXR<uS>|`pJ8mPEEj5tGHnc?O
zP4R79E0ps@c};bB^YKJ!fwieSK{Gk#&tk+cR3CVC<rB&dJ&0=>FVlYDMZ4WzOx5X^
z{P-fj9{z4!+p((h=GH?$%4+lZX2rk?SSsxi&TbINM80DT(kGl(7vVg0W+4H(bCY!o
z3UMBD$4cKC9MpxXt~nm2+UEzMP8W<c-q;G*y5q_}2-fz-`G4l={P-O$Zc-S4SBfHg
z#uzb;qMDT>3jW!>o4cL_<Tp)3?v}9Y6K42|gb9nDiQW1@IgI=SbmpM8<Kai>Z^bH%
z8r|3~G-@5gzHMpTadE^JeynDwdP{f3bzF8_<_ql!HU)JHJ(Sf1uV8JK$FMBbDs1{2
ztuF{Be&}-Wiez52jc0T)p5m8e&)4Hn*_BgV#SPQ$$;64_CNQ}2`ADkDRKe<CE-CKG
z5P6^}d*o}98YJG&ZO_X7dYj)Eu9<0_tOdd;`o4=<KFfQ375M>=>L0d>pOv!ZN!mDJ
z=tMbY?Q|Uk6pds-H=}&`1t*$20ZR?vhSK~-Kr*tTXtD8!_cxk=89Mz>c-#M0L1ZTq
z3%2sq@$E?&dXS4@lh~S^)A;5*D?NfYwEz_M4}PP+1_-O?DLsTxYIuqo%?~X5%vP?!
zyfj`O6Z0p$KM=0HIbT=RFutY;=B?Eo=!VDVc3XR|z8{Mrf3%%A73f3}YR$0*O$+ht
zqU|l%`1s?YJUhMN-xxpx(T_^meqzhNF@Ut17?9A2uz=EITdk}Axf1$c<5xzqJLQry
zoD*uRG_n_W8^`K&ZwUxlP@VS-lY{v0X@$nx8BBXKEUyd7&WDZ6DzBUkjHEedp&Bm(
zy|N->qmXS%Taw1Po;*btuk6LWs*c@Jb=>ESDaFlWxHBtwMK}D-lKMx76<0X3;tug>
zXdG#l2im65yR-zia!qIvP<y=DnkA*{AzgGmM>@5$B$`)hQ&VzJ7J>I}UVlT6BO2BG
z@izuAr?zE5PH4wA@Bt5WgA3H8g@Or@B}V059H)j_zXSy<b&3S4Tjmprz=MhsBQ#@5
zWoTRhtiLfZ>VeR}nN>~;OBeN_fZ5fp3nj~Z#%*z+$$sO@AZ*hCfSL)68A$tSY#Nho
zx`w#vVcH<@dyqwS0^5%;Ef)GU{wtKc2ik^q6`$2h0L;Pv1y|LL&lb*G!~J{_(8+n!
ztKS%CJa!-|4FD*en5ECq$xx`h@wU`9gwBK}X3_aQNaw&rG=2^)9jq3?ljXq)qOhMv
zMvtnsN&?|&W+aMWT!2~=xt~itGQS)KbWt#;0fVy%T@}^LfqaYEFxU{IvWQLVRmR-Q
z0#V*im+%!NJ=5r5T5p)3>AN@G=x4E4e;k}2#*Wm(an%q0;}wVz)^UR<diYV0?bI#;
zOU!E#^XJC}9`K)@rs(kLcWC*a&Y$(;^L%sVr(yYF*hci#O=eL)UI4&A{q#5o@_!hY
zABWBVf2P#`p(*7%mkT^EJzBVe!p+)pV$1fEl0))tZrQiw6+*i&^b_^s>AiugaA7KI
z=oBlJhndvUgS+MGaW?f7*_<$#@6-X^>+XL0gVe4<ZF6bvsx-}mrc8yFVZ-ekA3I61
zw73mQ-l`&<+t;^)lfB*{@?MT!zP^C*r(pQ`g>7$4_O-?wXR~f|^AYiASsR^b+3x#t
z5)XJd8rZL%&+-p4-~$H#ABohzm#Y5kiT%me|85RJ9^(JoB8Kua=wU-rKbe<uWXOOu
zs=__b7my<D49!hQ*}i~&^0u0dNo@5UOj7eKXE@cp59bVUjIyD4Y|+>vs;oaDVA74v
zsdh%cJj`tPWOq=bOrPUXTBb*gR<>Mj@GOT#rk5nIpC&KAS|&8xLZ0A1!3r3iThn*;
zqnpM@)SADU2|fyr@bmI593ETSDS{lgsW8}7+7ldUd+?(RX{6CwgW%SJ=CB~wHduTp
zev@S#+3Mve*U5b!)bDMm=kGQUP8~DIeU(!3xm$;)<)4ke{ySnWd14hQkXC?l!bYP+
zy`V!^Nb+li(x2E`i+#gLQ->sO#6L<%=--v;F<X|eiamJFm*LJ^<3Tpk*ViT|SMjDr
zI$F<aTJsj?j@k7cqMUbMSVO<K9N~WeQ=tl!YN?v*q;ZgyEC%%dHozTPvl1h$c8#Fu
z(X}8Y&=>-Irw&irv)Y}Z_+z|e;Gl3&X~0B(T>`2D{I`I=0=k`l`U%mGR>5aUDdALe
zu%#oBz_Z)%0!04o$57uJo<+7OZoozl0C9pNp8>P%VYK7n#te#)4pIS_T^!UE_yQW=
zWe#=<;{MY0{PQ`>l5TvW<z(XaqbA2OAOS!V@do~A@Lu>|I-h?!=}_k@rvPWl4SEA^
zFUm#Tb9J9FVvLLE8Z6~nU}>q?e0})P-0mqQ{(G$3RijT9)|EQpXQ_;2X2KH5jqK$>
zn4WA+igjUKJzSjKSY2N&&ieZG-JSEd@18Kd#Cz%z1GVL!yHSV3jb2~=qt}h>H38+^
zpPvfq7h|<NJE#hE&cn81$UgIUFnoGe#lU}P27bf$*6p9iiga&TP^`S~pPdQ*>fU=H
z!?)y#j=y+}zc}T|zs;Pe0>JhQR2V}NFZ<#qyYLXy6wT6D^p<ngz(91BkosWk_{r9?
zl4YqqxwWtX%quJ&?L{&Cs?imUJmB1XhMYW+vu}|U$Znt!a&&7!2+>{o_Qk=-dAo_=
zkUgbS{Zrbs9}w+~ysDeISB&p7%x$c0$*Qr^GeEWWE2((12F3zZyU)DtqA^Q?X$Zg$
zj>3Ha?tsKh1C?%=SPrUd{_4!?W#!mKNRhyHk3mNs2@U`BiS@UC85_11Zd$DsyPL{R
zcED0E6SK$6k~1vJUobMxKcSYK7QNr0@W{m9eul6c`xZXvF=-rua>9jg58#K0!`zA=
zXnhfr1&4NimAJOfpCUdgQN`R5{JHrp@<yQxeF!Pj4}?L-hkKjhCAopCQs^e@eX00*
zsp^Nb2y(;vAnD-n@qNJYHk!S0p9u=rQlA{<ZqnG9HtDw4yghfp!l=%>vXaOyb*U+X
zljlAIE3W9iI#Mio{Y`8(XN3+Atn#}I+h2I6VV$^&-x%7h3`qfGHTP+eoxA4xE7)Fp
zjzH86TZPx<-`acauvaiqH+i;ym_t{`cO{B)i2mua^KrWPXQ-1B=ZJ6VwW&n?5c^)6
z81aMLuJim~<R7uLF+UhGel~sRYJbP|#I8^0<vZzosHXQlNU?@-VYiLyIvvM(q1qx%
zPwx=^!R6S<KnCfZ^O`2Q$giyk%?xtVGz|Z8D$UY@>@`KfhT!&;@1E<;sClJ)I4l3%
zo|1%%l1HObjTPAagC;xf6`Lx$9<#emQ_iecCVLR{NMU5tX}sLj$W-B&2Em~t@#MvU
z7AAAI183Nz5_?wfb;Z0~SI%R<*gwH>Wg2h`*12TR`*AE<B3Ujz-Wmr>9n>oCzUywj
zcP>=))=QyK=HT`@XN^Rd6HJ?&8BX>fT_6rbA}on#r{@x;TyHL2E&HTpda@Y9+fpQ9
z(Qx>1pWZ&G*C`!-;k$A#G*xS(=f8NuW^s?p$0LBE!|m}fC>s?@G`1j^MNQJ6#&3e;
z`FtV;6mNf%YoF8QQ91unf6>T4=@H6dHBwZJsEfwlLWv=C3WpY|62GZj`xq%v6E46z
zwj0_bG#kTI)HtzmY)^szfr9}>I>-R}P|a#|?^I}`7^5$=H6`gGo@$qOYd<XBxr{$8
z&_z1xvG7s!dt1iMlPZ@$zAFjuMZg)=5MfQLkv_;{DGkTENQbMON>fYtrCQ?oJgSo(
z<V$wi&Fz@sC@zwI<sp`F71lLghH6h!Aw3{45*5shvX?aM@pc8yYj;Tlv3Uv%P_@LN
zcZm#}SJn?<MQHI_UejXxsh7xGFMLzYYd$M%KzNN^a$DWy^%GyQMO1LMKt?Zd>aq&&
z!8(;!nS7x{hZEq#p)DHYkD!jL!sO6qpVEclwCHE6EG>IWuVL;*y=#uSdp~?ui*pLY
zIZAKeZ>>_(mz!#ZVl}w$W__kNHTGuDZo_n+JM-G*Up8M|-%bqQav{kRjUQ88C@Q{c
zEn|v(xt}*(y6|Eei*sicdi?G`YH_YkmBQ<oiZ<-oPJAZq`Sc(UY<gh10#szCyYSqE
z9yhTust-~0F>#T5h|PCsp8t`g>&W|sj`3cRf|agdD6_T%LYb)3|7L16LZcn6Fkj|x
zU(&}@X!5Q{cgj84e(Q;(>~vPT%L&taWtBk}=X+I6bZ%ezTcE!MLM;|{+S3ZL$Mt;W
z<aNSJ(atI3=FzHHBbS8NwdIv=YPJb8ADAJ^qVsd7<2unVQMSgz!50Caz@9;Jsduyz
z&jQPBuMvC3GuCoJ2X*i~OY6Bed=46%art_ls!zNYPv1R4xeMGjPSqza@Q0}V$(en&
zCX`gYxKobp4iwe*W$l3&>GWf(*1C3~RjYjO!z90b439o!n=X~k%B3gHUYINQ&Tj{2
z<BqEdvjTu(@Gs&kfa>JGR_ba_%k8C=(-`}}rvLsS54+q^V{WhJR&Hg?`%A2|j_ueH
z^t2lqc@&QPF#d`ZxW5|fv_f30S<Z_;eTVqU;GDJdK8Z8)cAdv3x)RQ5sod)%VCif^
za$)eP%3vX=^!XvCY|@rz6I3!BY^^~mWYh&QCaT+X=wt1w0^bx%&D5N&7jZ(ChR(KT
zb{)-M&X3-&KsNOgGE-`~6VPpLWDrynu3@+3HNeFH%{X9|uK-WW7WcLAt5^c&HwL*S
z(#>$3&Axf40Pe?+K#cy#7(C{15b&2hN1CCi1=yh!SrR9Q2Qih-3#IOtr-y$G+yai*
zS~2>kQ_+$$(f{+qzmNLAd?>goD63@ofrcV&_+!D9qHKg-cX_MI*aC@QadGVYJ$$K<
zzhNSS*P8iL4)+J{+lMF;^v_66T4B8asBt?#A4yY!_1arn!Hs;z`je!6n9zashggr+
z@4S2U!Mju2Avl1pvlWteV?j0Q;+!xHWOtVIM^bz~n41?R&AGOB8WnunLAem9cjK<S
zT=|x_pbzvmh+E(O|GgBtjv@8Xsy3m4g7hc?e4_t>mK_n+vIvA7T?~0sE}TEtqr(Z2
zD&2Qpch98mw*%fGTa~>H7=pPHmt{_c*EEg0xyzH87G&BR+yK#p#M@~QdueuJmv2Fa
z!h0YBsK%>E4RThCA*mBYO2cD1wrd_UJ5q)_%ma2+?L12F<b+ootI#kgcy*_JT8%1)
zA}hdXylKp9Ah4PRkr&8vPL#km%se6Qx&X@2i=v|Ayex6}f7bT@Y;*s~9iLx@3?gcl
zxNck5ivv@~LkkVu*4fTkK|KfQjPuS!nUm^X+tC%P@DeQk!xSW9>{uUQ2Omd|iHSnU
zMHykxdwGOr#H+&&Olk^t$*^ya&MIp-e0sz$mR$i@8{EuCe2oE13`Nzq>-1^o{H2tT
zR2x4zM2-`x{PLHT_RF@fJ*C!Q%+Gn#&Kt;Q<a&_ah07M60{hV~n-hO^{5y7Y4<BHZ
zh^0uP20KBHAlFJ8XSejL!y#9V0nynH44D7-cBd4M+L?D#HD8Rnq>I-FU+BFZ)9LQv
zCD**;C>9o{cfeHdo<1*mh++uDA*h;}qn#5<Lmo!IF+8lbN`A;z(8Uwd;oz3!T2#NI
zN-3RD^U8^a7(t^b$xX{CTpQ|l@Y7M52)Z0Oe4u&w*2*qNTZgnh`|N$soev^BDv#Mk
zyGOea)_)hyiJxA@D^59*p%ei!Xkok%y)LJeb^2h5>W#_l9o&1LT#u}_?QC4<K9LXI
z12(F3c)1(ic*>q!O|kdInDi(XznbdthJBuW{A|mGQ`G1of{?15E7@UPL2)C17@?^v
z&|1KFH_$}IR|eZOK541VeSP<6y&p#TJbk5|IWREL`mmb%BdE6^t^uDF>}KwSG^2k2
z&XNIMxqln_7+ztWKXz!~uoxba)u>rfl`?+RrL!>Xu$YWOo~g3BGfynMRAYZ|1{tnj
z4qWb#a!P=_;j7cUZ;A>HcI<op^`#&~{>cYLx8!GuCu$O`=DN<O$+nt{oundYN!o14
zp8K*r^|DTfJbd=>RP+_S4Zc*~MO}Nw<eu(*!l`2>1KSE0Kz97bP^q`PcS;i*hAX>+
z^O{2Kc{zHw(p|uQ4OffGNn4W30}T`<A#%#&HQ28;;Ul}B_GM-ISUoNXjr;}$`7MWW
zvRqdaJOX1+$ZfkCF$o%^U&sr7(TvX;RiB<o>o`91j9Mm=6?wj83I~%&JTzr6;T5OO
zqg)ze`Xn-F*F=zpAQevo>G6p)F=jw9g{>gRU#t>kBW5?IQmz<BAqv7E>UEXxva4m1
zF2{)~&oIcZ0Yx1m4i7BSj(49YyAd!a1n49GFv3+AcYWb-;a!%Bk6N7qkP}e}r`6pQ
z+hrCBelIkJZ&O6AZYyM>&iBFEaAj)vZh{Ocy)x98(3VRJn|f27bfz!aw)XAyk}CC~
zyTPnaKThnl;uePunJx%7{xj9uU+4q9S*hbDfgEJK^>^fjt<PiV#^_0Az4zn4kRn8S
z0iXFtP4Xw=HsI9ang0-TVkYX*cg^om8-})t@mf7kGVME7V4pr&9or{WdLtc&)Rp-%
zfIs#-JV_8bqe}(`*k%Mp<N8cFZS@4nbKx7v<Cp&uO#bUl%46~&r!R`mCP|br?(VY~
zSrk0D)qR4acVRo={lniF-l%;H*uoBLP;Cgt)Eu9742dZe<X-mkJ_?8k1Q}2s*pct$
z?!Td`{;TPEd;cZ$yj$N$FUSB{qlMT-gtux2Mfv57@RyS@*~5<-*NWmr+MhOJL^!XJ
zu+IyKlJq@>MYo`$C@0iIR7q0E3jb*zFVTWRKl`-wJDX;blzvi}*6Vfy7FBGfpdJcL
z*+%lAfJx{h3v@eFfb})ev32+GW^QV-&I50!FqyhFmG>=n_sabQENEJVG@0|g<%)8h
zXS?zJJ<S(GH2r<AJc{PODj`9Ud%)rZ>+R0T;N`gk5>`%s0}HSwA{@wPCyk@E8SBaZ
z(-3t9>>mBNk@^ms6qTnl*7@f#lQ*t@<0UM1Qtas+mBj-l?Zl+X{&er$#Op5m6AK<>
zM-?69@e;$Cn#7rSpt1GZ)HCD^LRYhy^&ztRMEA6&zh%sDMfttb(HBoAQk`P2)yCa3
z)?|t^36$1$`;3~%!tKoHb*$<xH{N%hjF}JLD{9Fo(vEqhy!+@O)64C9%gY|@-s9VK
z5+*cSt1NBTgE&iUZpH}W?9pNf1G0;xa`B@A9aWV)^P<C&<u?=6`J{a$j<^S%Wy*Fu
zQ2>4QrZUx)BJayhQDKn|!Ow`ZC#S5rshp6J(uQGAM{)!MFVm(g$Xb%%)_4**Fclq7
zOH+$pJ$yK)CyiayD4VzoQT32>mQSeu6mP=$f*X-au~c<(1h5u^mn~NxLitrs>cPl<
zq|Q_fWQlZ?mSIzwF#294Z#e%Vd*Oo+T}N>ZUJ0JzTlbcww#znkadN*gxb7$BBR|7g
zx|*Mqqqw$IA{YZ|m+Rf1Ja8C1>&h&H9(WPJJh3_=*gQ4gV#L-s&Qd4&(q-GUrRVs|
zLaVmhxABG3{LTk%e(5>keZ4@AyD2j9ogWr2<lyH)aY3-A4DXiK-%%tfk+@5(q;ua1
zVbS^o)9e=_ni(CVB6`%@F7#|;YHG1031BK7qUqddG>Y(aOAJ&f2ytk<Sm``KTijkR
zrUDUmA8mMY?H#{175IyUt#1_mA!|#-by`+Vb&!4>ivPOm?ZvAnD5jI|;?#ws)5}j-
zxc4c(oJz^1Dk+iO;1O^_27p%rYXL3Yz?Fj^-KvKT)wkQFrNcC<@Jl1dNTH46WEV8~
z{0s=`ff?A<PL;$t#JiF7FeFU*iV<c>+p5;bk$^>Gk%!5S6F&HPu)3^IRP=GA<~v)s
z7Ag-7pM0wvVaV4wGP38(b|kD<1A^}&Xtrp~r9C%4SRvAu>#?`c=x~y<E>`8?W&F@-
zt@NhMXPK$Bi!Eq%8WuM*jk;+};Q(BwUq`iZXI<7UNecrv2%&9|p>J(9Q57HBQlBr@
zJo-FT8J6Q2uC5^P*racQixuH+nSPj7<3JQm_Q;@jSu+s%!nKUuD>cn?dqq^qLnYNS
za&*U)#vMWVbu+JdZ)HZlKrNjf`;d$&x$0s=oF~Qhl;<7)N+5?JO7l4-o?ahk4daoW
z@MO4~q4zCN4Q%``C7}XP=WAz~mZo<<E%sw9#y*g`FDx9e*B`Z<Mv%Wu79*@oiS6$a
z`4(Ac=yUKzk(H!?TGs(h$@N_<L5Dg{s+GmYoW`agj0=~(3oEFA=^8fNAA!oJ9w8_5
za9fQHHc4wBGX(4?pY{atY!!M0T|Q-+)}8P|hIjvO3<VMP=setQbIAxmf)~>B>Xc{y
z_4vTVSO&4n`pje1#!Pxr@K-N_-o4mO5USkO*zCM~dVAk#(a-CS?P9~nm4<7ZI)SNl
z@7`3rWz0F+$ni4Y(4IInWZo`U?Q^r^(zQbhjd$zVYZ<hciI_)tstiSQsh+LJ$7pBN
zq-Bxz_^i;V;<AO*)>S(x*$K!Ci?}DLme+i0wa}y;z%rgE$k1A$8}nXpTiDF}@(YS3
zRi3OvfPjt#h?awEp<bag)OEdI<hZ$=Xu$G%j_?h!IGrvW?>B5CZkV|NE&>H9`pI2}
z8}t?mbXa@DiG(Ar&)puvt)lX8Pv?r3UwSEe6`MD;0X+h>?fO1Huatsx1RAUsJ38k$
z-Y{)^n2s7o_LeIC#;{1Ys`Z{GV*7AGVExqLCdJ->xlE*?X(gmAlH0PI+B?ee*0$dm
z)Y3-2dE&#V?`YvD=kZV!b3NpXuY7y|{W^{KTUBc3#d~Ly*)O+qz5Jp%tKVLM1mrn@
zM|3~cfv8E=Yhrt;hfM6TLgifRa23!WGJW7Z?em`FBx50=Cn~Wfo_PV-=-R!1vzbiw
z*J!2nOe6GeF-EP^6HgDA9~(JG@9zE*SnY;?^5m3XL$cwy_E-1r2g0v(Upa9|W<P5)
zoNxhxPJ;5|93LYNOsOYRjR|Atx>s6~*DXFBI)3?x*1T=Gj8<Y;*b+SZ7};_%33!+1
zK<@9k_LJP@bT;xh-`nSmTvqVvqYpIhSZ>I&3fZORN<wQ)*Vcn0pTW-xD4q$Iefhf^
z@vrDXbBA(6XP)_D>5XdR+Xt2^i)E%9sqtu#AR0VK0XU$O3`iSPqaX^1Zy5sU-J@^x
z)QxSL3%0|UENKk11j10uck2O&8i|1yd5suOp6k4u87}uv0ZxC1IZ$nBaUg$j2ve>^
z3&x^0Dw}YQF2o2DIN6gN<R=`71I&w_kv&cIZ(oa*mz(H#uy=h11xapyz{ny6@YP1n
zzGvw=Mnd$u2~d%<E%W-f6;m@UdATzI2ZD0YpHX-NFJXyfA-c|M)KXD9J^KFZ<qOd@
z;j%gUf6b2n)XD{$bENWO=@-740G^bOpy4K?0+~GU^Feke3Y-KSkvIrpoZea__8Wu#
z6!tn+@)uI6vi}@j`mczQ{~#~TYe4+-@^5a-f9UmBWD=yHUjX&{P3yTPt0`c_VB&vt
zL;t?C{@|Ma>DYH<X9jA`Ppnjp7FjvI?bQoN!GAvZ)!`hV^$wT)lRqsFX#u%v#YNoa
z0+jy1<=&5RdJ1#?C+G|dXc-tNhi)?z-Rk)B+Yc!$-!b?vewSZ=unu7Yc(WJZzpVlu
zso;`)JVA*XZsA7rfNpaZmsa>EgJu%_KVAJ#cO^eBHq1sUCQDLOzP6i?)38z+4@;62
zJqxZI=Z)IsCNW6fiaR=|fBA^7!d`{|5qKJ=f{)6RI^natcDPRM`TKeeVz#;QGG5!1
za<Uw=@y(N2!mdW#k8vVDICd*z+ISdC0)AIepl`_PWc%`J#;7l>@kL!hmgAv_eYkfQ
zvtGsoo;@V<Hsa3gJN%Q#fiKW9Rw`HTBwypY%M>O;SLv~+Tm(nS2T|2jm@s{16x?ep
zZJ|{2abM&BCgT0eZgTpY=<sXK7sxUf39NV!L9-m~7bilZu_I0Jy%#bqy%W_cdty}@
z%gVk;y@_G+ez$yT_CjaVz_QZv5QP)LMoU(ds3oK)(Xdml*@nfp4miBgtS5vrNbMfE
zeS=GT{pehMZS;w$DBG|z5)AqIM*y=E!&esVdYC}U)q<SPlEur-x}q6i7NvOpy~uOv
zvucsj_1<FDs9PUJ&NSiKJuJWImNv_&nR?zeee!LGEf&!FM4`&DR4$LEdWqMm?eMb0
z&$UFRc46>r*&igMv<*zFf?bb{Di+Kqh^hKDFdb2`<_49SJ-ie{WJkSh#3Td{>fp3H
zO`moczHExJkPT(hUs0JEZ>M+P15>QiE~<jY0!yWukk1kZrj0`m5`0=AkYRGLVUqOa
zNvI!NELvFr#&wQNS9Q4U{>&a$5&SV1;+<1KJh5aWsc05R@R+V@LZj>X;rkJXh`jaH
zH<`AGZ6kMFtwPRRKQ$P4aAHC^a#~J*?#(@1`f3C)iAG(~s4{FAu()@TZQ2Lx-%uEx
zb%HKPe13jmrd@zPd`%(X>)=kCwdE%%o9H1NeNV3oX_o9ou~>ji7TxKle>hm?c4uK<
z0!Kezzwf&EUA_0+AyzGOC;DK0lO>*u1&!$r=anPMpm&@K-5}(MQ^cE5&QnTki)P#P
z35Xy={{jOuU+J^map$D<w*`la<Np{L|50z^_y7M}M0^>^SP;`W0cJ~~sa#s9D?kPr
z!v5MH^j{Kyhc-gjlE+0+gL}kis@Gu4o5&+%8Tu8?=`IT6S<pEWv8S!RwE;kI!sVaX
z{2@>AFX`3%mncQ<i`i++0GmWDYqgUe(#o`tA^Qe~s(Y7j259Pgt21eatUY&tjzxbQ
zx=L!P-`tL;So+fFeDwFAc>v+co^JFZI?P9;Y+mS2anf@CJR$MiF=7>!Yr4U3Vtfc{
z&~K0KtR^Be{PPWB*Kb2e`lSJTEJK)Sg_{R!j~toHtBVMN>NsVFZ!=#4JNe%yQO>Su
zF4e|U-i(HJaePut0Xr{h=V1PV3sF8X9day$ch_U4{JaK+y9~_FdA7f&IOLCans;vs
z)qa2)*9R>wU34(a%S^D=7ipU{H#LyepV`SQnZnTXhm$Y~`TP6pn~n8Lhe&<o<dKVf
z_KdOJBIWN_k(+l*0~u?lotr<Z<KTGv#cYbtwW%mpGe=vdz(pg5>zOYXwxclxgN-j2
zU?$c>fW0~=uwOs>Y<EImOv83C>>qanPRhKNMBmdoA=HU`h~y*pKIh(9MxemL9yPIR
znSCqF7t=1d7vtK+EJLIjlcB_&T~oLw+-}7ASHlv-!%5Call!{sEk#nOtM{%_!iinO
zR~ih7&<Q>?m2Gyb>{iOL;gMn$dIHZ)OAoYj>|Vq;SQPO@3b~nNSV5R=shua>DGJDW
zfFy)mt}4OgZl9GUUH9ajoI6s!`sJe&O#{r{E|tIdUUz{d=|OeuBtU7AS-?3d{!eyV
zTsLm%+L95K2VRlUH~QoI_<}Gp@Q$RWTT!Ilz)S3goDzEqC+K28KNc78Fed}Rs-kJ*
zESA=;(0BEI*UO%QQ-qap8{@eHOIF@JWE15tAX!#==I?fPYkhhEFE9ytP<h{|KG)8F
z3b2MpU)dR-xe$}Q>SljAqf(Zi(J*gQYSJ|N)SNaSy>IU{PS~8PL4-GBxXD76eXg_B
z74*Imx6)2w0c9jjnVW&*y6l%mkLo8!?`r2~;)Ib0kv)9#q!EH+m^j&WYRj@&P&dn2
zG-lX(^!$d=eA}I(4EFb=c~;0Xo81jFkZvpP>HZ|OCP00q6pc_TB#T|>ZdrAAEEuX&
zTM|AYM18QkIsW1D{kWZX<C({tuz{PHk+$Zu6{)R!<GZMCtU(iAu>QGTt*2+o9<6(8
zvY+!<tG0=M)L<7#I+#XeZW<ncAPJ<VC^)<^<cstC<q#*Hki;*#4wF)Yri3FVqy)Ew
z2jUN|bM%Chb)h5U4forF5YQ<Iv+vm!ZN&kqj&QFYp!vhxs;vwW3M-;^<~*#?6<c0Y
zmi;oTd;U=;BpKy`jsEscOjMigtV}bOd!%zx#Ces;&@SY&H1y`p;0w*1_byFdi?n&n
zKV$Nqby<gZMr<ct-kNIp$<3=^m1SvcSKP;$kq9f~`=Kt`LF`;xc3Q`}YgeAc8|2f!
z+wkvz4dhW|2Wss}*DxQwaR9e%X#+2-Ah%Ma0g&Me)Co>u1s&Q8u8(Zs_SYf97NE5=
z<o{J%mjPG`6d+z%$!;JI(GSAjY<E~OES3)ZwYAf9Au!)7z#>}^HeGtc4Ax)ZpT_2I
zfR<mp{>OA1uY6xHzmjA8(-!sj&G!#`<bMyNT(lVB5^hTiJtRbLvvi|rENoK5;6EMQ
zP|gIpM?dgW{a-?J&1=1~(%;nYva}F4Hase5;1C-m<rJ*&zMVmqSt_03IU8>a^)xwl
z0{TdsznYlT65!q2Jbdi=*2<k3&X>3CB_MSxgIgwyH??FB;*<bg8-y$mX*Le&vGRpZ
zC~*3IYst5e3`nHEt%`p>ybw~Oj_2e|?-A8O1WYIm){<V+UTYb;5U-H(i6X0^TH5oT
zoYn_NdYNNd6$+1(47WAdA;Pl>6sg6#*sw<@){O>x(heC6iAE8}>3iadVpHvpAx-eX
z+UBIV>*JcIMGO1e0?tf`J*jw+!++)twZrF#_4zg(IGGVxm3$&3q5?~{Kvf=p<DP%(
zesDx`xo>g0OBj$E?4psTT8Ke0CDUSWgo}_xh`qGbdJg2r6jl;n6Ap4Vr!KF+0|BZQ
z^|g}Ey6%o^vd=u^*O8eu6^7NoI;T~tyGCDRQjIT0?^U$-5al4iAJ&Wc-tTR)X6?p%
zDhjrGH#<9WHFG(>9DeTbbnX4UYikBt1I9YMla*tIc(8h6ez|%O1r%L++c-1M`OV2L
zw5q!^Hq$lO@)a9L#{TX{>|_ahu`f@K2;+zSu=#lfxtA{?y1i|mTrqQ84=qLr(W+1r
zsTQN-QQANiVX7-Me9k3{-S}igOKx$-jcade?Fzfqn9(O3dAAKIhf$42^xcU2`>ppA
z3&SSF1dS?>y7<ddSia<<H!QO%?zQtrTcP84*`FT7={#oN^XjeEK9_YB5(ZdY8>#?#
z;tur+VMC@F&>DBv=*IPkdoEQ&Wbpe7$|a93N$FfPxM{vOKooM4a;$#6t0-uRc~c^s
z?uw8Ht->Qv!o`3)T@5I~4Zk|rgd*KEuR3AA(kZDjEpIZ?UiNL!C2#bWeewt7XXHmr
zC8S||*yjQ_LZiRsexJ!!lE$$W@jln~d$t{1;?v6KJ_d_(?QRVa(TI(!vkpy@RbB4N
z`?JOUM@fn47bQ0TQC}H6?7QVA2Gd(s_n{VQ?(ak=<0cX?YYyq0fPwylC8#03z;>Uf
zE`ggjLv<wj>d|q>+-QdraXPZ6nt*9P{Rd~7bqYu2%%Y!j3&)T|s({a+2ruU)<9_A&
zdz+9=RK9O_pJM*Fjs5Tb$F_;>0OrOK;Ri@rWKkR4zy^H=%!in*3#(}8G7E&Bq989w
z;bR6J2(rZ{kN&V7bNXK~hMx~OlJ4{uPL}K5AT?}{+Ey=k13f=cz5+BSP=iNL;%3WW
zh|kK3ym0>fdtUS5uW3iG&Me;2y=;5d-sH`jm%_rt{^WK4ORT^BO9U^Whe3I_m@e-I
zp?5$sv+&w71D;DeO{fO@62GnOy@QLEUqlbBH@%!d1wtWpQZGw3-mn^lobFYQ@LV_e
zrt}(9ganzjMtg_&D}?z*K!1|8@7M?Lg;$zSv%>eGAmy+nDHFXN2JGiKB^j@BJm6r!
z!sMe%Z@!WOZsmXicox5-GUVmWLB@q+2o#yg!6ucxil;~BtuDm0Kil6nFYd^Y{)X{n
zff-J#0?l>}`AMwOF<gt2s1B{r5+h9OmB+Q~C*8OF;Fs4bRME+@Wj|QF5O;dV!S1z^
zqw1D6#{;HweN&t7x5bQ!_Z)LE#49qFSv6|i8wnKkiI*$6aF}Docl8o3f+{0I3rRRw
zVP92KXlr?5@B1^BPv5@vz2d#V^WfrDPB;_*f=vnYlerCUkuWg`^`!IUdJ);fED^6T
zy=Lnj-sEuM98Q8^_av{L)bz1=u`)<AWQXtB322D6%A$p*#X#hhGttyrRVHDFGeA5N
zF61N-Z5u9s`)tLYP-YKA`!TH*5^vWlFRTz*sW~Mf9X%z2GVWR3<l5gqwzw#J@L7rY
zuGm49xYNu|C--{7`1X^$*G5O*y*kZQ)*<=o>N%6dn|2Kh_Y4-`shCpC@)iLzt<(tk
z-7)(;uOY0a(2qUfU3p`p<u9gp`$?VA^r*hTg23YT56tA*2I_!Y!!F%Jz7s3wYmTzO
zS7^G0Vh9Oe6Zz0GPwmUX`_0ZBZYjGhT^Zljlz+k^!E~j6A{!aDXstp(M!CB<7rIpL
zR~f%e(efD8;r=ie$K7dm!_s}nVU=g@Ry=WfTp<ZTOlM}Ev6nRbUe~~>%6zBgwMMph
zkO8dqbojTHZ`!O876I1t-@X9@7kZI$)yDCOiqtK4ew~K3pj?11-pl~pPT%lHDMu0N
zy;=dsW(&ElISW1sHO4HjtdBJ8+&!$9*b)a55avCQaP7ng5?k34AgG_fG#l^pRhmM#
zL3v${@E0M>i!8cqXJf){gb#8}UX5e$6VSHZE&s`9`{Idy*?wuMhYWjLN*Xq0q%a;I
zw9DOVFBAYVs?cdvJ9;m@dyJLrG6miHb=&wM!p<+R*+t8T%X+Mf1H-X?z-Cs;G9t_M
zXo<kc>xp>0m>5zC)s#lix?i8>vOjA|<ks5w`R>A;_)f#9GsH?=k($S!9L96bOpC|3
zw4j{eTN>!0DtdjL0q_D7H`VYi?k+CV=m))v_%cIdAq%(LhnOZ_?r&L{VW7r;;<z7p
zZC&*Rb~A(T%jZgRE)hNH*s{8ttTg>>TQoP*m*iZeaO|Yqp~B&=%N<TdQsK@H%6f9k
z`p;~to~dgc-JT+?4O}e4TzV68*3DJOP`*m%oKQ0Q74tEo9{#2K9wUO;@Q}gh8FFxD
zMKb$yrpU(9qUOEt(_TR?v$8HgqfEw(2yX2$3-3f1Hujp>nC%EYP!S`0z)$>{=X_7`
z;Q^}<bWA3hNHO%yovbXN==myLBy4Ci^+wIk)}s+sl9y|HJV_2I9(Rkm)UWGFN^JQu
zC_$(wcVC&-8a*`PK$h>osV7FcOWrY~O%3yG$~%Vx1UxDN8G?3+bB1rBCoYT$!v`bI
zrkoqf&V6(T*|%?}%<apxyui0ouRVgltZACfFY7C@E|&pZ4*tD`aj9|8Y1U54)v;@h
zQWFP1LC&Z%ursQT)YOjd7QI4&Q&B(@z|E5IxiTV4&p)>^V`om0L!K5)W$>EP_HDJ2
zrvf&C=~qSX%$-ud0pTT81#O;0SrF<m1?2RPy~cfRb}Cx?)oyN&sd^t2Puh8+u6Z^=
zp^<6jV-Jkob468zV`a@%cj;nf%HrI}^1b(jECcBh>AFN)<?h+A!HZww+&{pps;bJW
z!h3q8gLd&=KGA(9QzB9>KbnF00qgmmqt2B>X4(R@vicJUeX^ll)<QjBcO<Fi!-w)u
zql05=BOSdtd_J!#GTZAK`nzM+vY%ekM=-<95Qm8uanmT~g>*&0b?E%!LxCRYsYGd=
z8lj>Pcd{<Xjz?Wdz--dJtN7GXak;DL_V(V3q&BvQPN$rpL)l^0*QQ>N%<D#<mMgt&
zw8$ZJYyJceP5yk(hT`!Ny>5lc-erZXYtO5yqHIHCKiQaYGMHXE!N9;G&G&w^X&57h
z9ER_w%YbSj5<|e259TCU*_Tz-1mWAV_KvNd*fNV?-YVJR)SlxiV4r7pQtU&y`c{!b
zXbDuqg6m(+Qe5YwRM~p7hb-W!Q)W}|Y0r$*;+YC_Y|o1CkYzYJTCLZsreT)k=kNH^
z+PwMXSh`hFPTVnPaplA9N2>+Syz{Q?Q9hw{WrL<^+~X?QyHR_s5O4eOZH7+tu`h<g
zazIy6irClvc4g!~OT`Iza19G6Bx-Y#C7Xux`LUh>5Tmj*A%?<AQc2syL~P8v``nvY
z-OTXvLLeq^=)vDgJg^xzD%Ts_Br~}iD7kRC#VPW^^eWNMP0MO(Dq;e^UTsC&z>7_{
zxG72)vJP&d04{{k(7sj?aByzgtuWpocQklQlkPP;=Q0{`&|j?bs)>&8+=1_Y4Llk}
zusHDo%7EZLp?fqpEY|#$cZiF89OFslQzt|S0rDi}`n|w2ibDv0u2(_6vzB^A*kJQ2
z@U^~HL2){t=*JTNI?b-JE1BdcJ;-3&KEtdS^R$~38ax}0+U*qIuq1Ww+Q{!1ua@k;
zkZS3`_ahrHVyYN4rpza#f`6qy0M)#o@O8Mw`r4zO#U5$X_#0BZDJP2s?q(-495U0P
zTzm=!W6P4%L7Z<#hoE-2)wxnmI`t3F;a+@cJ~pGqsk)qEZ<9WypJ8@IGH4$!tAz40
z?g-DW?VQ(`FwELTE#Lv(NL{7v+g!&k`%`Ru`AB61+0UMtDewr@a-DuvZ$ur3@49W5
z(w+I0s!K?-tx%{A;sU6QxliFJJH}C8L!U$5EB&IRxOE74e$L-<T|^~XG~f8PTkq0l
zaji2#Hu*ak$Ncvp2E)wS0<C~9Q;0%s^{NDTJ1PiPSwfy~xQHkK*{H<f?ZFZc&e*#R
z&q{d}^54xvgxKRJs_=mxQz#Kj1TT4hk_{X38^gX831!R1dyqk|y)TlEmsUa5^#+x{
zj+`$n96z)HkBp^%7KDWt)?Fc-apku9WLvp!)E9N-tgAx&otrAH-jFfzeEiL;*B#jn
zBwMtO5gexn+CVX=87@?>y-))W-dJVS1<a*_W~oQ&(*16Z?S5UoH&W6S$*{xb43kwj
zvt;}DT0AafoFol01{sT{q=8nqFeE!!Z{p;Yb|my!bLIkZ>QYIvg&8VA;;Qie*vC3w
z&dfx{sACgw&bUAjN^BZO&{=3%^*i)(Z;2ivcye9q%!OlQua6kby=*Cb_nJ-X`Ab<{
z{JWNkD|8jADER~B68%F}J%mWY<dDy{#V&G=TUhF7ULWSXgOSjB`~LLZl^2yUxn%w3
zeH?miK!W)eG#bjbIoE^aa~9<y=vHN47r040Mvb@XvQs$h;8{^NBz0NU@!PH0yA8*e
z_npI(1bj(?z7pQ|n$h((;;i8G%bin;7X#Mj39ZhpHWWXEDWSlZwat4>l<ec?bDMBW
z@I!3`rQ=f`?3y^tH`6)rz_a*QcMerl4){=1sr!jt&5$8;Iw!f9UxLUQU!c4HP@;ZV
zou7q*z9wQ?UD(6m4r?lKUX>Bj&z&%3rnzC|OD3YZQ<_WFN;w^hhbogN!u-DGMKlQ8
zmn<4N(uYuM_r^=RXrVya!#mA-iy{naGpI(l2Q_NVUBazA8gCx8F1;0hVkBTb?{_Iw
z1ZgJltyq)PLS`d!PnDASrdXSVYMl@cgmIIAGi_{@hj<%2cp_8lSMUVAwR+wsuN2;N
zI<XEvfrU|d2kJFkh8;G>h3k&y`r49I_Ekn)tmZQmKTrp5pK|e36Nz<|JEnf<D&rqw
zAx3y4ycAA-p+W*!+*e+!^u$5fGXAGD+BMh^2v`z9z+#iU{cq^MnBBqt8%#DlhPIiW
zV7t0`>CaX0O}VvkX4Km%2+iv?2*bKyB4M!uV7vUcYx~p`u#L5m(-kF2Hr;hvrLwOF
z>hzq_t#zmOzIoH~v00o@EXle<?5X!#XhD^acjf*vIQ&8)ssC$_p+Y35<T-(xH%~C0
z5|ARWAh{Q7=88b^QJ7YRfVxaZ9IqiO5$-J;xqG)%yjX9SjeWLals%^9EU4FUL{eM#
z+Kx6f2sR$pNYx9HO((iIj3quCOvqg5kCF)D9cFWyR$!dp5gEQAD4QexzY7ol@!1Sg
z9Mxn4pjBdF&`E1QC?NrLa|~r4kSnT33I&J-hjov(F10($qz?>UeX!Z~WoL^2;y4$=
z3Pph;dlET)^`?rG-K99?bzL)X_DdXXQqoP)XM!tDx8i$nCSRF}cE6(4ixevvpl0Jp
zF+iQiXMFo^6?SNQDT5I14+2q(7#>&$D8v9f7=#g_NUVG@VdT=REz_su*(LKHbFnAt
zL&4Uk?9SQFxx>ry55C-Oi2gj62ql=Hf(4%?9a*RsAbNdCyv(0_tvv8RxXwh@qo;VA
z=+lia?rKvq>v|`FCcV%!>%;gq3y!qB1#h%!%q}lOROy$S3J!H|K0H#Nm`~ZLxdHZE
z1|C=!lndyShBiR=YwabBTqNv#IVCT~uP>PjtLPdk+#Pdhogpfav%SV#d7)yb?s6LL
z>|Zq}c!`k6-x%C5!P<gE2QXU@YB@{w8lOEoA631ELk5dxoQ4=n4#YCwh{GMeH!aYv
zdL2@ghh-)UQO;B4Xc-Etvjmonf`Av=BX6`xN@ZgW%BQ^13VXP;*Wwr_CSMmpSbz!}
z6s>)TTs6rNEHY0T4{A#NmMCqAsAkwT?srrDWFH6Qr1EvhZ9vnD@R<g67ZpO$w<xMd
zUQT?*LOpEwu$5ImR72;Ha!J9_FDnHNCj(@ei!(DSUx%y>%h!63^L#Ci3Q{gq20tTr
z>#r>cuW`E&=LnAI!(JoCyB3}EIwy<HxGI@kHQ-zRRyogg#xOtV>ZW|`>M4;Ie@iI*
zjn$A0rjincV*$7ATY(+kLxqDuc>uD=MEW~8Nu(FGL3A-UBO8Xy<8^pUt_S~KxA|vi
zL%x59dIS7e)^+0vT-11(TB{#ZK=A2-(K)}z(YuC+SDI$EI(AQ5nXdWokU2(y!g-M6
zm@*7iHFOzY<fL&Di<!c5%)`2mM|MbmsHvnlw9DX+@<qW8Aex2LzVIB)!rzx)2V6e`
z6xaevSexIHdW3i+K&2gk$y|%dmnbedR})ucueKy{X6%^_ePZS{@^oAX{jr)R{QSb-
z(gfdM{|{q0<k#9;fmVi0i_*{%sicp-pWah*tUi^{)p9R{FjnizCegf;_w0Mh44153
zC8~y40x8#hC(9=Gvd`4tu-1}t&8_g}cbO@3iNxdIWu^+`*@4Vd&<Tk`FynnOZL7nQ
z0GG5(oNS%~#C{UN22p*f{+AG)Bb8#!Zg;9<w;B|q3Z9k8&H)`VDLTMoRKW0<sW3V>
zvO`-KM;Hr(uFR$Pq4E14buby$emZA%G)6-*_!%~3i|vYfUP0wm+p+dVW4tB>N!<<F
z5?^<3%CC|Qs5bluJdKX)IA<ZpZzVmcf?pbnXO$P)Lw%fjnO1|E$h9ateGBrio-gcG
zgMgx^z3H9E!F8|QNzCYEQ-b@h+XI7uP)v!+722T98V9s$%TM<BPPg1XH$IeSNs$wP
z$b1+Lt+~B9Ua9g{_KP6Rv$V9V_x5D(##hOTib}wGlX)q&E&ND+{_8!@?=tZ7t`B}8
zDtRZ?I|c~znTv7I-@^&UU4ghVTML&G3se(q`ox(s7mbIvE?njy#W=)$xqn1a#_oVH
zY}C^Pe$1>qXDer=OZTXOIm>rWH6GYoi~!UX9*kqvx{6@bxMh8aXp1W9*|3C{8|}1P
zhkbB#`sCmz7rBxZ_uRu0w+rfmc?cEIGLQ+}c&SteoqF0_x*#}OHHekwWU}Whp4W3C
zjKp4hz2`&Kt;FJ_;DXMscgv3WEdT4<_kl^ZJM|Bv@P`sg{Y7#v<(d<H7cp(V8Or1f
zwIrjb@ESbnYrW^H)@@3;*fU3s*Zpoc?_h}PbKLRZK4geOdPacZnQGcnvj^e6s1|Dx
z*_!D1mn{dXgWoGKGh&kOWs5RidGJo{@O@FP%ur1Wf&3DjVpq0PX86475ri?IRUr4C
zsU@^WY}C3s8sN?flakyOKE2H?lMI4XWz4{)`S2j&x+C*-Z-pK?nIhrh;UxIsS;(TI
zpi5GyeZjj_Hhkmc4ZU}zB?8c;OAPX)mGFgTJGsj~!74A$D_dZKP%O}iTqr-C#WzZO
zA97e*pgwI`KdM5KH8#H?jd%VbK6+6lTH2_vSuxoFI|6`=DBgMzgdL$yR0Xh443k6k
zUr!j_-a3lA9GB7DGF_ulz%lh24K{oRZ7$>|ZDBWdswrW!zGTl%%E?AitI+~A;>eeS
zQjzmJ9(Y$CX$<8hClF;w5ZX(W9gfGC=t4S6l&06dIKAHC=7PqnKGJqOJlDHZ=xg9|
zUQ0S6BkJ?!39Y#_WAFPa?~8(6uA+3xVbkbku%20N5tO2CR)OIbaS#jAIs-|dz1)Xd
zuFhuVPqy?-EK28?A`%QW*cFkD&com9`j(3|Mt!hKm{V!97DuU$!X4$}=<I?+!a*E+
z{SBVEey45P@w@5!D`5riS`xl<%C}CEa)1i^d^6V8d_z7>Yri<taH2wjA8Vot!->WF
z(OHYb)Z?^T?W2q1745B75Ym1c#?i80@NrU$MqG)Dbli{+|L8RP1+!eGqwU+D&zZcr
zdqq$i)WL)7zv2eROZ1xYT{VWQc=?>hx1x74+{-yw2`pXK9v_dw{b8DukUdV;I|#+G
z6F7ED#q`(=i?a^Sdp7iMoO-PCW#Zob_>SW6r%(SD;QsaXKLMhd1`=pBP|$M%p&CsO
z6-6y0g?d)Kjt5%WJTY%r=WpKUoj4RZF|FR}ySn^_qW=|BJ|3uqaD3sMtCM=M-d|uj
zJIyh6Pg|VS!$-ym&BV27Ib8!*5jb8FR1a2(hV)h|H-JI7iTwx}T%jVQf$-0lx0R&(
zGHt6nM`YNfPAv)UavF%QIqnmj$6~0J++eHKH})JEMgIgB+`v8%V_(&lSWH2rl$|~H
zrsB;LWrfRMy6#T$?yK};R=8VuH#dBMEJ@TInKo|ff>4did&kd5Sce;!4b5<sTVKa=
zvNT+}cjY9j1zC^O)MrahA|{H31~zund3^OOi8*--l0iv9{rbzWTX&@&QLB?ArG=;K
zk&(2AxS3vUQPe<D7d@d%XOp>s$_J(&BRP;Lr0<GqKrwjH{EZAOnB5&c=Fdr=Vmmln
zDJg#B?Kf<;_HKkfIi6_P>Vp&`qMP=*OwtnGJV`P@<yAX5BuGW=V0&t}HDvY7qZ=ZM
zNrS$^;Qa|Q6K;)%>iCF47AehU@n$u(ob8V8C)Qp>OJAv1b?A`w{QxR)LgWR)$n;pS
z|CDiv#lw`SXv{s=gU*VxRr0br@j?!!Qm!^{v!99`N9fYZwe%*TQRz}-=?Uz^ZEUoc
z(q0``t+j;#;itu}lNi6qQ?qB1gY46dPZF<|kB_W25AUzdC8MTun^!pzhXah>>G-@H
zQ5l?<bX)b#;_Q0gDX^QWCm{JjOL`i&!<Uu5M~#?unK<y+bCtv5x>L$^6Qh?_&5lAH
zBqQ}o`S<VGZg4;Fcwl5e@j`f$b%>gipf<yzEwI>6*Ct}am2O|TSpMnDt(^x$?2~7m
z#Ye@qE1a4_4jazj`Rsjp0-5Y$(H}ecMq(X4uqx5JF{&+-J~idT*~()QSyl^Rg~BOh
zMIdKG`Hg}97T-$(>!i$Mq}11KM5_H~8(2wsEDvH%g7EhJL~P~3UCYXMk9Pep_TD?H
zsc+vG4I)K~2-3S0L8>5KAkv$NsPqy9ktR*LghW6<kdA<g5Rfh)(mSDp^j<<uXi7~0
zAwbCQE%zyJpS{n0_l!Hnxo_Mz-X9JautZr|nR9+WHH$be4-*}2y#(dMllZ_Mo8a;{
zQgU6ByBI&FjVmO(mtNu}N9TGrl8|PzF`bDQnB)1;lA$}*9~^72Cy~uyPuglv<><@T
zdg|lk`_`W@+mi^ae>l1Mfm;^Kf1EK&l-&j*i9D6%O9Jn9FLB2SwvN|W`RnK|Q@03`
zy4b9J-9rtMuhb7KpR7W5;iUvIe77gop<|iWkB7^RMalP746P~@oVIbVv;SJ3SCh1r
z!kXFSD~c|o$faA8sNRmHo8x{VYPk#cRV{V(A9~rgOy!@W@>|1If1l>yN-!g#t<>;#
z4Ve)xs77lrEgui17`bMo-s}&Vl*=T1rXC^QF|6D(4(GN6uf~?TnvCNYI8FK?K7QSC
zxoc9OilOf-ep{mZ{ik4b;cmsbGQk>Gn}I!SL+y5NG?WA_J4Nq!JT*PI5wtAHN5-l<
zdl1y4G&9Y}3!GU0MFsLtV!Dg0e`e#%XJ!GJ_oT7b%h<m^B*uJtHBnsH0`hqkKzPp)
z{uk(BAE5pVHhZ-Q)K4MTa;%+*p-F%jW#ijvRlMsnOA(|I4q)F(5+=Z#hhJX?6tn66
zrkDn@Nd?v>gLTS(I+OnQ6#D|Y?3I#|O@h_MhtlN>>&A8b)9s8*VYuZV#XJ}<`c#fv
z>Ebg~eL<&iJK#K_;Q7R7SkkA97v&I9l=k;OX~4e?{{4u5>W0~DgD|@-(~GIf5ZG}t
zwhXwI{P17t;@&SLUer)4e`L5dY8NIjf8JgnLEU5Qwq+^e$arM$d|F8tvCJw9?9TwC
z1-*m2+jHEwRJ^JpvH74ne!AUV=0eQy@@P0WwV}izZ-5JHQnd(HIz%2#7gi1w<Va$X
zuWP8^?(Hdq3)`O^YEtUah_JkjTqBKLIRb#wh|6KUou52+7I9T*_r01t0tg@HlpUyJ
znHBt@dQs!PL2kZ*_swIu7gt$@(aiVEK;$$q*WW7C$3COV*U!QDjFt3o4}C+73ixs4
z>ca+5^9kxbE%icw#p~PB*&^}u>Id3J<BU(f8sGbp0GwtbOp4&0@_>yb@!S{U0gU)Y
zfR&_X*Hd)poMM)+hz$1F&*1X5w%P2Ud#7ZybAtAa0Mj-*(M2}vWotNi-YobYR58Hx
z6`g7_nR-<{E0g4L;se;1F9<$r;4o===WwPQUe&w%bXAf)P^Y(Xq|1no%c@=4=0~B-
zE9;mTPh<X!!TX6TUqLE&&06s%E6BGnNhotI7TgtLJ43uqeaoS>c50^#N86g~#xL9a
z>+;9dr*nK+Ux!K9E{AYvJueoAcK1V5v!S&3On|dn;=%3rE?;+P<xOb6=^|sLBGHBF
z@+depocfX2#6tPi<l3%TJH`4bvAn*nuDLF4d`~dz%2cxMq6U91jdW!eC2-OMREtIJ
z;-UO2Ip?>aPez-mzd~t&|Br$U@Or;$nOy5uNf|1!bUD%S>weF|?Hrw(ShZFi+-m>w
zrB+B48gca(_ciR^M>K@4P26{p4<%~l9Hkk<M41a58G^IiUd9*~q_w4n#R!+M{=?DX
z-)#_PYv9i1BTu|oLaZi~I+Io&zES|ZA7z=&a1a2wxT14BLSW{>2rfXRT~MqewSY<!
z7-?6BZ2g_El8+oKej|6)fQ6n0?{F@PKb}Ev#zMu*w*qG5T%dK#iTBdVIAi-SzcdtK
zV`aK={B25sP3Q&;Q2R!WL&*VPl&1^%C+S82XoO%w&-QNumCULsEAkhVL_tC+5D5rW
zd4&kId7jY|4E`2nqxDBf6!u@aY=#VHfRT@a(L?Xz=(<Lj4c1+3IjhnRUR+}%R2*o3
z+&WODr|`EE{%%ctM9?{ZgomS}dYcgO;EHC{NI3t@&9v1gW3syL@mWIxSq|z+?V|ki
zsvGmy`Knn5A3ZBiKT}+R<Hcd&f@RG-3^2BZ`Gv3cZgDqHZ|Zna3?^C&I5K`F6g=k*
z@R&t_V3z_Xh*TOls>9>li$ZP;U6hx#fm`-ZDLnyiYc&q(YUBHn^5%SA%z6(gK0RE0
z`GODmf&ggR(dC`)N;7&`iiqlEI@DZtykKQ@3<GFHFjeCY*PunT;5!CAf#hTkr*+T&
z>1~_Igqq^ZRv=+nWRpaGsP^Q9!I*FL!rnr2JZ#Na$Sie)DmOU<Byg8%^PmM6e4K%I
zDulk8KsqPP(LI<Va&Pa74i(MW=y-@ZCiO`e-$?CESLpl1Z1aZDuU7sucx7bw3I#B^
z-rPbkrRZd6)Hh7X-nA+TB9RIySR%KuTpT&;GYFa9q7;ES%AVG(BwU7?0!QNlf@uP#
zuv>QzDg{&tvq*fhO^ffF*@=tPd>S`A+uhxE{<)ieu_5Ic-rYzZjRpvz&B$h1hpQj+
zT|RLwwv?4MQlcYScQcLYSpdNnh)VYb0e}v}fVU4&@RLsLrQW}g|MmN929k{U&y=dI
z=^e4_e+)IxBK{4_`Ja9KC&2dqLHGCt?mU9O<<tS6tHTuJU^BW^nPz^A8yH5STqMN5
z)ENY@NL?}OcX_VuN7Z(RBo3Ya^%HjFpC5SZT9M6F*%JbDz>3yXYIkKIe^+Qh+$e(>
zmiiKS6I134a8i)hDBHNF1w$)vh9;C6=ly=M@A|@zv^|u(A?5!g82RBbJD3;nBpf3^
z$e&s``E>xq`@snB+*KukkG{ir_74D@j_C2Ln6QK%9(+h)o8%ci)bP{-exT*O#0A{u
zJ}Avtb<vIx*6i)#*t-1_i&Z*)g>CPjZYQe$(Udi%V{dV)0t_2@cz_*K2S>w2!GB8q
zc=WIw#5ZsTB9LE|1gSWB{QVmc0@nFUl@zG{!$AII-y{qFXNv`+xy1jG7m9wE0k6~m
zkwEU_8tC%KS2gDe^%DEp)wBmdm|C2K)S-8qvd>J+MQtgqPe&FDPMN_HpRQ9imGe=#
z&Y&r9)AatP+ZCoz>%;JFW@<#gPA>#6Y}6x(vOSpSSW=yI#3pq9l3>|A^NeCFonh`J
znKt`ma<0~+pIg~oA1SQ=O?4hm0j$>i7x6%$(+G(M_7EM_iD0xcFFBoIxt?*$qg+Bi
z!evk3dXbY|_EQt%dmUfnMYnlSp2REZ*v(%5!d`IIdoQ4vI-*dazQ&C_*ihB~o>!JN
zD|7j0_cSKf-od!f0MtJ#dfbPL>Nb3Aevt)szN<6dDi)~1**DR{8=_9hDiE;>;#<&a
zFd)Nds@l^~+_<`#3Ix9X7hd%pQ3RkaJu`Z}e8i7W{}Yz<r@rwBK>}1u;w1m1sQq9%
z=g|Ug&HsPdf7=Z)k)>7d)cf8r<!AMwbf**(RG8&tj)?&(&zqXAef^r9W`7U#Y|O5K
z&`d-jYDRVj&W+|R@B<y(*KrgPs)9d<SmtEyYKu~OIWF-xNEsoeE`H`cw4v-3@dBG7
zS3V+ZHs2v#;8ADGKux>)47VVbeNIQNh++UvHv3-l;PvgQ^5phmZ6YhpUhfc?k$8*n
z09cCB2oLHn5J;!(kJXZ4XN-MIH*nPa!mU(ukDly`#Xh<CG|$_olOZt$StPbXIq`}s
zbg!2HqbmngcEaqzxXk(wAzIn4L5h#w-Zx2-Ehxz{SbLa&=9o}gL@M~7X&i#xMLcp{
z@Gnqx)P>I<$81CkY-Glf?oIR_04%xQGux!tQ3N83GmbL>^Z)qjUm*Sck|P*W0sM62
zRBX9={~|t}=gcfbFZB2RE-4NGDjs7Ux{<(Hg|?+9KYLDgkbY6LnFjs3^Q)2+=doSL
z_WDTVay7;7t#dVahd3tT4L9q!%ra^DteNHH?{Gf7fR}P`=kCHz&e8!+fkshX);m`@
zS7MG4^LL4cZWx`5FyN4-VL5$~!WS<&hd_Vo3T6B$CmzAd<{T;%zm{xNoP8CJ>3s+2
zyNZ=xEK%WkR?669x@zN{aNI3{8>7u#3Ae<{aJK{dD}**#+j%eL5SG!9U3y%U2L8AE
zxh!m4$ux!MZA}x)q_4Yynv3Q1hHzKu4?cZ^TV2<mzBQ^R(WsXSx5d}t%2yM5j98&8
zc+%2YywH6tNlk(oj%@0~q(|14lVT+5%9Yo`PZD`<dpy^OR2zcsV!%<vn|L??D$rHq
zq7wXgxLwn~L+(#NN{uP6|M*N6Lm5eLQTO>Uivl}t7m|)ctoFVmazUYZ9c-0mRRuy1
zcQj+XY~amV=FJ-ZeECN56D=X{;Wc<i!#=9&I2%W?vkh1Pl{^r6I;Fo<B@xE)MAntG
z5VfJA+G|?R7@k1n%n%-2*4JYlKA#ru8UVRd3FmU~nduw?rW7(?LVUn~Yy!N*#mJ*3
z{haUo_~LVQ*uq~RT4fnkX*W2XvSJ~gZ(R3g(}e1T*jau{S$mxNg29G%Q>W_CSckvV
zK##Fr`6z&?;(HD85i4=jy-Hw0klXY_+b&fPW?#C5kH+=G(odX5&5~i$$Jj;KAo4n2
zW`M{~vv_jd)7;p`EEQH#3q(Z%;nPef{G3dP7&$q;5>!WEl*B}qW9N^3kX6|2ok_w|
zKjtX5N6;~*e%-?llb7_ISDnutU~0o8XB+?wg`^@-;l1<l)Sb+U+S7?T$MZ!P^R<>H
z)a5#cMCrla0*wAQMlTSL7(mDyKn08407fB-AfTivDLC>*Hdy>gwez<h*BGp-t_95&
zI6iL!2MD-dPN-~UBdB9fx{WUVTvEk${Zfgn@JNy9=pA^e8aTYJ-7oiKUEBM!-Pq*S
z$eB(USH<*xvw=fCYJThNix$`7j+b`2{1s4_fmk+$*K4%K^=jX4m=|mid=LVzmcOU~
zXzX{8e9T$jQtc-Ret9?6_npl_VRC{Rsl)!sK($gIA5sSVoaa;(f#M(n6v)r+oX|5L
zNIGE0d>p5P8A5g#lyCuxDPSxO@;fGL`Se(E3brI+@TKFcCLeFno3zWHzq7WLzr-1J
z7|nA$!pW5v;T=}HYZG#9Y^@C(pDl4#d`8VH$BuS5`YzsxqqNx_a1ctdbpW<krkbfz
zq(fKL%szSUSGAzL-T>bszU#xydcs<54|k~DNl6tLv)Y0;+o+dOF&kP&amU;&HKXL2
zY!&V_iy67{KdKEk`+n68EN15JJA5{a968q|jv_gg{V;S_i9cF-#vmO;<}?G*<!gbV
zc?V9dZ&NC#?9v5U>E54uCboa*q1xQlP2RS`TSsRr$jhJIW-eX0bTCic$?ObGR!S%;
z@Bb4XJ<|(eATZx1$l~XCLir|YIO;upjquvl+o!u80S6NUGVkPmG16RNyhc8DP?QsT
zjNI8jS0~)Vjl?x;VIiyV2s><H7Z1(6R)~njVCCH6+~5<j@2~^yHi?|?2cQRRsR!Z1
zHvmTTT#Mj?k6x9$0vl?CL{-O8v;`!)oV+T3`)NbFrAd9Dk~RCGgH69rgZXZ!$K@TB
z!cL<(PmEBc>RmP5CI;MnW7Wo1T*B~Ht|8U3o^)I9OHVhKm<n~DwAcy~?H!TT41C7<
zjlc;beAHK{(?xo-2ayzKTPKMqp0(af(VKiOW|5bw{-e6;j?^M&xVNA>gCTq9P9t;E
z`IBX+;Pl3Hvn+b(4#qIQFWe}0(NH2OfHLsJ`nLM>Xni)hoR{nGJy2Ig4@-^}r@?N~
zNVL5TL1Kb%!^hDFGw{PNa&qXW_p9^!1J3+Oa9<V<<-xk*@+PDs7)8;IU`_97rN*b`
zTe+yp;{A%^<R4q{ZJ_LXH1%?qKa&SOW2I?(&1!d>34_2s;CO4Ljc;%+`{;m6U9DWf
zev4t;EEF_azsFsSn>dcZC*?tHMv+e7P^FuQqQDvJN_Ta)3wyoCMiaYg-VNh7TGiF1
z`aIw7a&XpCgkax2y!dK|bKHz-RrsaK**H2rS5P}$eQP7OdcMtEptgP>O+-)Z(wB%H
z>rZ_bIdI2C(N)v(T<AUQ9?}m$MO4`gKY7$R9o~**eo8a89?;t`pG$gp`)+CsLI5v`
zt6D7#ZB=wZRMG7U`(8kvOjpm9SG)T>FeTr)_e@ZNk&pMLg5Zlc>QqHH>LMu~g4F=d
zEBiks!~fL`Kt=*c!d5TdBX|B*2#ERDLcnIQ8_zu8c&L;}M=u)npt#FQ3!|UcN7C)g
z=01BnFLEcS(m=qqbiSv1?fcbNx61#slc4u($^U;8{Qub5*gQ)BcQXPY;_0?jMR6)e
zmL5rYRzLg#e5B?%Z+GNp&Ypo(Ti=^Yz=4pd8mHa6SQboqmndBV52!kU9;$-RaPi^l
zIx(-aa%<qHDv>()#`fjolb8Jwj%-&O1WSHNMgS#)e}aOzzRp5zv4&AGwR~S~T_)r{
zdg;g8K6A_xdam#n$h3lFmOFKs3BL}k_cl0UlY4>NL?HihFt+<liy`Cbrp*ZZ-ot*5
zlNqtnZ|(U9WmBVXoWRW&7I?FBPS_9+`HtyCyDDhH)Q75Fe0_)SY|n(kdr@}eL#GdP
zki`|7Elid3NJEwU9g*4qE7lbWzl4ty5Rn(Th<h)mND{lHJE9SaP<!+bv$E*11Q2&A
z5O&#oHBS9&5sAsYHqceTXkC13)(p(~50erw>Hmk33-Alah_U>yUdEJJ0vryWtCG1Y
z8SWui!l1h4J~T<U_(njDiB|wB@B|RVbmG8-cdyDXUKs&3YwQfvh~!tE1m=<z@w7}g
zN1@PW{UJ{VZCz$_K}OPqqXgF$;3clclL3H%WHeG#lBiog8%IjK2V{5zYXCohYP!e~
z5evqi8@+}c@!>NLD*udwz<0>1@Z@Sd*sy>5(gosgnII&1NAw7QbG8j}XjV`B0WFpc
z{7@EJS#$!pj2M6c|3^%6o992)cji7=v;NcI^AFb<|0AwZK6`fSe+xb#+C>Dz+x3vo
zBvBgZsr}A`w*a!xY@h-lr_D8uaWAxn;`%NNb~k+-p0QquH+i3G_aqSx+dfD?Ub|?J
zO`veQaxOz05<WZWb`P2!b}5}1R(hHd%4zqEqw;DHhW=1q0p#qA&m~?-J%15c*g?EI
z4jiZ$$~L2$Z<Q_7)P{RG3@v@`^&#`6!PkmVD@Az<CFd2m9SISs49{)xo5vPdp~xcu
z$(~M<e5>ptzUJ+ECp+yfYiSxkzt{`8>Tl0&G7hpu8OnO(fb==!tf$V!r|JOQa{Qv~
ze$1<bJIl*WK^=XDhbk>iwR-kO`;OZ;R-i4g68s{T1s$+UzcPcB@3!IcGXV9N3z$k-
zTaHv0rCl-G&;gy7TxGL(Df~Ta6Ajs^Iw#*o)~qEFN0+N1U3bc7UCUd_UU(*-7=KJp
zVw!4`NXyaT)(_h%^xS(}z66f~)2Q+g__5u{a2WefxmND6-PRrE;yNF@O9zSrKW_96
z?hW=f$l^6{{Ku>pC2)Jhb!({kMM|?y2*!|#F3Z{dp<H%i@kn}rJ*id@D^J&3Bh!Nk
zw(>#%aWTS+oOF(Gx739ro$opms4@57BCE{;j_)5h@idDWpyXbiM&z;z^fik^d)(i2
z=^ff`SNuY|PFTJenfWTxlD}S%<3{`Y%`YE!uXh43yV}RbB27}$dBYShd%1Q)=Y9e-
z$ls#$c@lXjpaE$39NlpWzBmU8uglPFz!3{(&Ya$UVoC1DOl_t;Y#>=Am1*nKP%4S5
zr2j5;p#{t#XzRd()%*BN^zzQ4e_+{K*nny6!8b0u58(pd!wtL)<m(jQ)b^h=>!BeL
z2##j)o9P%)ary2u&Tqed#BazXT`@gaSHJ)AR^LJ_n~^u<qh;Y6{);%ITX_^W3tqm=
zTIdbz=AxKoROEWSOY*|qcTeS>-HoBU1HEtlqfzM5V4=(JWr-VgUo%@D3}6ZITCZT<
zsslLrUO0>MaxKdnq|>~W0W)^51$`=OvB#}wxHTN-e<?Rw^C@_2CY(*VPDLu`90GlT
zGi##@Hv`ivbGSJM6>9{$z2Hac34}k}(O{_1yZ+1}Ap<LWc+wU8_62gK071Rh<atYi
zz^_by=MvNUaNa!0uh0G`q{U2t$d*j5rLykgqfm8yef+Jx-)>Bia$e;()Cb#1*55wV
z>s1?K{HZpT%i%nZT<S345nDV~Ex{vjDV+!kBK;E73<vO-+Eq0%YL~55x(8P%v)^)l
zZu-u)CbFKf{>W{ui~<v|dxE=uFC3E|M&LrLh~7iK+m&PI&Q}?5eLP=TrETdxrq!wz
z7u$2=@RkVqs`Utg(ZYk}I1nEaq}2RLycU_?`sMAFL9k;68MgG2)A|~HLwcAuq(JV`
zoS~zq+-KYm7XjzdYE4#@G|8igKoM=Jc&YKq6PNMF>xQ)_2ci~wC8*vvK!D`$LO?k$
z=!tW{v?4Atid0%ETT?xy2l!oCt{^Hk_pT{EVy)<<jGw&`bc!^@oe&sxtjGB95iTp`
zS1eqH*cWuNjbwWPO_X#x=^(?0G;M3UJ2xKPb3R}-1rm{>25To;>0KKOda#S-?|6x$
zdj5H0`g?{4Sr3C;ZmWFP1N)l&(yAG1S#LH=dzpuQ06g5<?MZ(g?x@T3ef(uCaZ1Jr
z5f8KF{6_N1l(=KnVEq2M+8h*wH*m+%b$?PsShmPbPoK%igPQ9Ozios%QNFA|M<;5(
zWC5ght!~q;lCULP(5Mr>ZWTd_cirspC;3)G%ibTPUE*T*Rv-vE!QW*mJA5F2PzwEC
z&;J2Hl-_k0^{z_3KFd=YgmG7e+t$bZW)pX4Wq#a$9LW*xE;%65MZZw2pz!m7=-hA5
zxs&5>GrlE@*G(^SyNBX)XwpvT<-@Dn1Y%5tx4eZF90k1GUdjB@elH{%JkzqjG;IpO
z>VD2!8*j~udWwS_mtqmG!A_!{C{pdZ5X)uD7_#x*zQC?JGu~YbYGFrBRQE8sdSY{u
zZBTY#A(4u@pdbSSxZjN0&lV$dWcXI~E9)DoC&l@aALgAVS9M$}@lfu+$0bdv8V9Tb
zIvu9VRVq<CW2+bHM%RD&+gmRSz#S05Ha`NccgYW}S6g+^eGGl5#$jFtU0(%XgN-hK
z-$-%$w!E!@Denlp88fYg>{1&3$dQ;7svw}>!erpV*0CFV&DFUlqxqJ=;$r!fZ{2gn
z22p*hAnx<?2gK1OalAoUAP;)ElZ$HE7FU??Ex)ePVES2A1Rqs_p1yR&>DuGhI(_BI
zg&rP+yR8?M)ksbklumctosL(_5&Ov+-o?&U*7GCw+uZB4Oe4~f-Zkn%A%p$oqHe36
zZ3z^xi7W=}M$uX7UTBGn#HWIYQgejFUcX){m$|Qkpo2YAYB*o2A7868cBTi6l01He
zA6nTD2Un|HAYuvft8PgS`GH}m#heC<4{vkgAHFRbeC9lTZ~g<tAx$hf&G;KfgS0C?
zqD$w57H~<td$Fw*+zRofZru434SQ6y=NmcW?ki~rzru13+@~^p6DCVt&0~+)%16L+
zBE$Qbx?MV)(Q)bc4zh}H1H)*;q3h7V@zBaNTA)#?f-JE1{+`<5UE$Nz!{3s-VC;Mk
zVD=zV;@_c>oxAGu22(Zdo-Bv}u~AK4i`C?3l9zPx$+E)Q<80=|E1!OYNdq6>$4Z9#
z8j=6R>bN`QT6DUxYl80a-OIz08*nCjC+i8NSWj*hJxXo8<?^!{9-a4PA^n-(6Jss!
zjDOi4%o3o_@u%4I6^h7_XrgFf%(7OU*OI1Kz@m-x>Mc)?kI5RzcdDQIq=_hgjpfYz
zQghwKb%gq&0B*M*3a=h~yoDNyBJj6LcptcZaus>7Lr#HmKufS>pJG>N#9~BoVo<}2
zTmZ-gGbePG$@eIE#wsh7ZyMy~&o)#iGrvd@Y+aQ@yc`%|{n380bv0xH1KHg^HzLju
zg%D`Tod|Ts1>OD6slgd9xE|@6uMXBXQv9d@WQUcf6bUk$Oh+?+G=Y~tVm-sZ?Gl2{
z^&o|D&Dz%1aL2A;-@bey?>kNQ2DOVnXqfNxIVN(>%sQI89?k4?XC{a;J)-@YFb3r1
zhG0)R0WrQOio<HWe9!l-Op}W7P8lD=(&d(euy51C<ekFOQoJIO4sGSb@PFjvdUQCw
zwJS7p0AHO6D{#67`CdZU{^<JOKu3u*1N{?-h_g4?*+2s{&_C{WdLrg^XTE0`e`=y>
zc!%@wvxH7g`P?gj)Fw#a=zxkD;e_29M%Z<{(Fz;vS|GK#S#8T?Ia``()Fs6MTWcMf
zIPM3a>N*Cem+5vqo<GZXqRzRY;EQAeBkDn<nHf&C;H#g_bKMRDug)Y!w=}7?sJ(0<
zp$&T@qjKRwJAgQ4r2Knj&!5i;fO{-V2HqEkdra)C1sePWOa3)uFplSdANmCFzU(52
z(XJQS-}=E+H~(gaVECiP3`G8uc<{9>57pKQ&v%xN{de39K@K?ij-oPU9mn<S(R{V3
z5tA>k>ZGrw`S)jCcR97FItyL(yz*;F1^XPi;%=|^^gE6j;a6y_OlImBxpy-HCEef8
zDaw#n%(g{|sZ`~FDvrWf2r6T(?9MNZg7SW*EkBDcr_vKkT0<-`YN|hY;PpEAku%*$
zIS927ICJcB@dfE(V@$O`Fqvr9@V2k~Qi^px^ULhce=mJacj0T4g1|M#wT;$)P!U2j
z0t?$vbTI_7GN?l4RYdOgC%@^Byj54OYmqG&Qh!i0)w27hzphJBq#`PbmhTj&SlR3x
z;y@Qh3N<v2A1N#;2sA$nn$_di52$uwPf1tkxGMEKhgX#mxXX4f>;U$>Gw&n&`@Ot0
z?W^A!dh89P{jXOAxE0IC-3&NHDF@}`aP!&^FcsPNjP9?Hj4+*|^MEn!4)NMgxgn>V
zz{8$1->Vzi@$*k=BB&Ed?2Dh1dx1u*No<#~kYfXEVLKg*!73MZ4@_jnDQ((7yLMWZ
zb+61{w{N|<I*h-Wk@ar<W{fUfD)cTEkT(H#mX$BdY)1)i*~!x1z9_4<kqFFb2^G?O
zZ|Sn#qCpy7^L8Ys^c+m<yj)?n8|toX>6nS^G<rADOP06nw1Cpi&i>Mqa@!>AW|}|x
z!DY!XDQ36*1Ue`MHYyUZs1&ZQN30)Z@#6eGGn;wo31|=T*Tu6=c7+KhN?nsM(Crjh
zxhmzhTG+OK88(_ll8)X?AnWoM_okgRV~&mX@l+N-V_X<C$&#|<`gxLOX*jQ$)1@L;
za&4JrJXDzh`*>O?^&+w}&VR<FA%ZoR<B|HE5>OXo%D@lO^+&^jmS+i_XT2TEC^(w7
z=IA>LpVC3(i(zi4c`mJ8rT*(y<URG<f%(RXCJ*i@w3lm+LS?T4NxtAK%S%Sz+TC7=
z+q&OVKUmOaaoT@l+{?kx%n6i7ZbR7dfbRl_^*@I+%rw)M;iJ%4q=1cOAdFyT`GX_k
zkjWu%+0H$Cg)9901Bjd=ji7=LSmBA}!5S+zOJTS5>c)JR@#^eB;$_jZLdol-3|ABc
z)p18E+Ym@aB@%c_GJcLOTlM;|<E1``z2zir^!|J;Xrna+&vSkga1D|bEd@rkAE%oU
z`6lUC>!)h&hWiBb>9#MvyrkXq!NVRf&YX9>25qRyz<L0&zdS+X9mb;x1EzPj95gka
zy*0MaTVr%n&YuUiWcEYC=Wr#~TfZ&D#Pf@Dt&ezjenc}vfq$cBOef$Kqd6*5BAfx^
zvmf*>&{~zg3Y3O&wgV}hsb|#xw^I%hs|vsc?}zb+#I}b(!qOKYL$(C>AJhCyWsfB&
zc4Yw%P42%7>;R7p-*G`!>-)2(%A*wWK(ph$`dz?b1!$R$e9T_KQrle?plbEN@?#$i
zA9()l^edn&Yz5*xwnvz}&+LnTOisq;bw&2&1_wV<P+Tp$vvHhF_t(sSM^HYP6L_Cg
zeG=Qp>a=A^*7l|7;CNr(-a@$6d<1SsYc@~3=+S)Vt3hsl>~T7V<3*5CekqGye1_A#
zRi4mtfGS@pw59fBB?9i)<jaQ=z7!Qx_N>EXE&XDJ^Vgaq>wv_n1U5;4;dcZ_zWSLm
z80Z`5dwTUHbVw#cZlvM%+Du`Z!BVnp=x;7YW7{83|9BK_EpT^NdBRi~dh>NKA3dH=
z`u+N}Y&cOWp5M~il62cz<Fd$LJL#p~*V5zx(&1ZvIGIRKdgAi;`K4jnx;M~kC(Mbr
z?KDr)?G+vG1q$Rw_;cEc{Qg0&DEmD_^)=oMm)j0!K~I@=Lfk4Rec6(iA5Hj)I#nSn
zleHGYoCQwb-==U`V<;u*Q~3P}58DzQA|+B$INr<}=YTK<-iX7H3ql913aI%dZi6Ue
z3cp!2tc4$eI|}f9+n77JAeq^|DG&0*M*j`)+}xnHsZiP`>AGp#z)Kf(_n+-|lCYk;
zhFB{MOIHHkr1i>lhWWH6!-F9j25LWgnnpHjVX<5`(6uW)0hcHW2s%L46Tk>@GXh~x
zar+VRnI0xApysOk1cBQ<NSS2CpV_ZcA}%MSL#+TR@b?Pj{CYX^2wMFY2*s{gvyXB(
z4!{O~WFFjNAO1?H>^XF)EtWCwB}=DG)X^s8l`7iwhka?-L0$#2RNzs|^v#b0R;<i_
zbCGr(dxvEIdbJqa=(m=(_St~b2V|qmwwicVmuW8*C%=33`6$>$5|B1^H@?77zsuBk
zJhn%#!IFjAW5u$Webo-&)@J%>el!%4`gOTC-zDHv-t3$cO^VzGN{T&A(P7kkrp8*}
zw^8<lasrSL&b)!6h=%cN-TcId?R~vr0LUrlvbMH3yj6DG=#9G@ZpeF_X7FCUV}tBO
z@&x)|62gv<+<1LG!EwL0C|&j^QVo>593C2dH{QpbR;@7QuOoLHoq~%=ju#<XQ+|32
zQa%Eo-dWqQJhwNmx<aBT5T9A%b9@5;`OUBt7q1rJzpSTjXSfSZUAZ%5kN>eUk6^?v
zx2GXo>!($DQ1gph4|PcI$OOo3Y)FwSB*l1F+wJk1y;~-tkkr1#W#YdLDY{KxF8A}+
z>Q!?Tp@MY#;%~RH*n=Zpx>2m05VFd5N&f=b{%KDeznpsv@Px@3=c<4vWtHy0GF|cI
zBHS`&uXl8r1}5nYVS-m9D`YYq42S~<(2-qtC3o=P;mPhn^NRfH)5V0Qq^vK^b&!ki
zM0yun!Dj5Y2<Gxt>LEEbrq!S9FHmoo_h1Lj6=>E%j#}n+h4c2;+elYQ#w{52MHV3l
zqr#LQf3|FpdLx<VcTzL{jV_(S9MW6mbzcdh3LbXELjq_$ZNt&$esvK?O;*MFsSe`Z
zlLlhrcgkrjW)CM=NneZ1r=C9MZsOX14Glp<qFb4&aENe8)x~^>C*?Y~kXiKLOa5M7
zGi{wmdAGS#Zo6IT#ONFsc3+sG{WI+9t~py8w&uWLRfnEFL89U<xf)faN!kbdhYFxe
z$lJ2N9$L+#a$-<|5p0?LUn0oX?DuB5=A!L`ll3-$+EYKwK#9>CBrDj(04(7&Q!wop
zB5m|L=V+06v8*(JG?FBuN!LO0we_lp?$t-GoiC2)xM5RpDge|9E+^A&ER6nr0ro`g
zREw+Hs2e<gnrdMo#a=-Yzj*%oaiYRVE>sHNvf}x!(H#KL!#xFps(-YOggE(7qn0d&
zV&7z4QEz0p6CPJcaXHa@1zE+ZN{QDvmvmmH_iLb`Ft0T9v6}p(=;86r!(;e3*(OXc
z-_BaNO{i7cFqoxdIRQ*byuG}If_J*xZJE7#7-Kc*CR(Dcc3YG~9`*E{_YJ9@2cQww
z;x|j_!O>EGfhby)pkVxF;Z|e9>&+~!F^|?CzUkMhC6ad+q&`PdBqbVY?4}bm@vAF5
z5kxU8JgnKoTiP{Cv5DEzRf9f}?S6%(atB>s{>H;ACr7fUW9Pg}Hc+`40tLqO5dOXO
zW^E1nypk(R;kSw^abu%<5Z>7;pLW}fUr|74a0uLutPVU`b&7&X8!}eRniGt%GvP-8
zE?4KotLHqpFQzE^9g=a{(?3SiXJdxUpvAvg<f0Z~cQ5jwJlK=2bRyfd3?2RAP~l}4
z4>4c2j1qN$Xvh8sF~(sfPrnE9hWC97Zb#OPbS7}y{$gI@7*jUzCa+UAJwA!jYG4lf
zq{ixxAY+zwI8^n@W_V@R2-O7MFmR0LIoT4xnMakAUA%>_O%VW!&bAJatjIbqG?aO7
z`)oCCLM<>>k`#e2{{nTPVp-(n&Y&=dZWn56YlS|L4g++!D+{zyQ$wo*1bqlh${G{_
zK_5M*&;5X)5B1opYvg($pUEi|p)OmV8N%|8_=6ea1G!Vg^N%;@n(hX?5MpcZd9^ZK
zn!A3XaXf@~$ul1*-fHE<Ii_Y-rl+|aQP=-1(bbXU-tU{OiCD9D)#coH13g^9n<|OS
zvU!~V_v5#N(lXL<ecc?1Y`RakDEY%7cVMF@IvAra2q|pnTG$KOQ*a$rBeNZ%Gx3Z@
z{GG&A38#m#YGn6-#o26Ttd~=&^mdWvJth-vAnpak1u6;;2oU6cydoK$f-mn;mBUKZ
z6wvaUV&ita^X&aT*tncXMT>h(qRp?6uME^tt`wYz^t3#0HN$Bh1_m5=U}b+kyn%y9
z{B{?Q-xz4kP?nF;kCzV9rZRH!QXv)<Qq+$ELR5#p9cTzK7g13JK|rct?1{S5n?yX|
zl9KfOexW0e)2vtBe(t>sDOK92a76F2Je>J0bg(NkALQ}ebFY_*#Rm^^J6I-C?&d;V
zm#*Wha0!p^TjQG`@#GGRWe&j~ZyUMKWCkbg+M2qE^S#aL-RTAn0cweV<l$!Pk_2Xs
zZl9l!%I?Te_nQ+KBYF$V!FmecYZ@wJYsEbRF{%+pt<rQ`Q^2(QAFEm!Ka8DuV)@%p
zPMt>K+*hzSlV~1V{X}VfzN}8Eo!)Pnrk3R+5b-(XhnU_*i~V<~=wBed)&$_8@&`sl
zakp0=ZkG$;2e4pt)Gi5{l|YSi(z5Z<irM3sd@~?MlhRo)TAJ9Xo0r?jRVrQD+48EJ
zk+N@8hW2_#qhP|nDS{ILea*iJf_o-4wc4maZI`~Q-h}^9z(Fp;JdsBlGw%NwY6~I&
z+u?s%E?k@55uJwCV#1a0F8ko<G%gT+JRAh+8Bbp>NXLxn%!uIoU1|eY$d6R6dm=xT
z+i#fmE)2gau@X2JiNWu$F88!()6J=<@&%Ib|CmwsbSyyqJh0k3B%7{u&^pjIHIial
zOb0cl4lfilj?A85<d7v$AsllwPToeT@GXpE`&Dh+*Cp%kcPi)fZ5%&DLF3*O*?Flu
zh&`z=Bi#WU+{?$7_-+gj9?(JZ5XYC9{6x3Q`2GStnesDKUD|joId?SwT$HP&e(^h<
zjI#bEAzm8yd268#L?jX@aMM`96J-OpF-vt=q9ncUiRFX1Y>%3{+Pa4hk$Q9=*fkT2
zyy8yMH^e`x0)DNWF$gKK|1S^|Oz0QSb%MjWIMi<vXt^O6pcQgk$NiqGmlwOlKVeB~
zyi=6Sr}+Gb-}9T&Mu~wbz2Iue7$Eil=sV-N1Y-+~1bi&bRpTAYgFV4UbaNMxD7TUi
z;vd7R5AVKRC8h9<sFR>B_EIgosD-*<5K+h~mRQ2ml`?%Bev9wK;RA_mUOk?SA}dY2
z0R16<CBngW4qFJ6d+kRoU7e8)iW@r4&2>4EXQ%~KePv^d@wcT1^;XP)1yQn-%M)$3
zBa0K&z){5cd2%h<*t*p5otUTQe-a4M@1uMy*2Y&^MFS7!0+Ht63VUsQIv`|3HYhV*
zsqhiLd=*0H;`M7xqoyZ=)Ur#UwV}`M?YsIM$DSt@{W)J&LU0&bTNP5Z$(moe(!B-q
zhA;aH$=(QCzc%uT{i6;{Eg%Ho&zUpM^#f-xt+_6RHm)hvsvMQk3~eZ@yIoJ;Kf6UU
z%XTo}Zqq$byLJrN-U#%#_41to$<X~)p=~GIiQR9KH7ZXX4+~qtj_(a!*&jI62}ZWv
zZWC5q&;#Fvw=2L{0^bvEW6QfQ<2FiVyl)m>5A^!3<f7_Z;YppS;nDT6{j_SkeUi@k
zB8ez~-|ppMgn^x5;?vJ3cc7A(hMl9ne3VvMeSMP}yB%q@m@7!&=xk<@>9qa)D*VUu
zi$eI0FECDxH*IwzdU)93ZVB<WiueHReos0N9bi3Dswa-L0=hRt{j-xh+U&R5`qTuB
zI{LDCO>cY*HSq#Ja4sJlGU|e}z#Vv^i45Oyg>kZU(GVKt;8&j@h?46!^Yd&OV&b0<
z$n^CaBvK4A5$~<YWS(^MaN+0Cb-PX2p5c9)X23+mfg#WIDdbXF2Rj;SgXV4cS#1VK
zJfvSP-|>kR4j|pP{8M~)j0KiLNevt<0gZTrTJK2#hgVx6n6|y=7n=FD`fTRsbx1$N
zqo=j+zP@~$%BMTnXg3tscnGWxm7Rgx=;5L>Sn;al&OiwBhCKBxrBI71jvU9U?rG|*
zaug32XwpglBq$giH{;`Qk}D&fMsJpBd`DOevJElOb`yR`=7s$<%>2#|P@2D=zuiGq
zbL^8WruWkEZgVgm*bst2nG}Pd!>8-wH!+^{E?#|*M-?-I?4MaimAt-J)jkz`eIpV>
zqQIXC7$UfVGL!$)vSbW_MK&AnvAL4a4SBO%iwG-q2eh+CA&<E38f#O1InXuvCZ1-S
zmma2wcDa=Hgp!oDh+;PtW(RM#5fj@7KU4fB@x?-*V#aqa+hJOfE`$E*ex)`fM+Vq*
zp60unHV&sdm73R{yZv;?;!k{;T;!;DIX<d_uY4wzK-HLavXam_KVPsc+Z*Nkk?T9T
z#V((~ikGyJ>u-JgzsC>$ho86M%p#8HIPh*yu<)4D6yvd)5|<bI;(2`*cA5H040*Tv
zQ~G%0v%`lx$L?{vV>8e!oybrkRTVxM+o}Cdb4+aRvt~r*TAC43tWwb_S-WjON(dy}
zQA9fK{N?rkQvC}=nZR^`*lDE=L}HBIx%+*T)l=nnlXgAO#J-IvPPWKDcpvG$64d3v
z>-!6$VSJC<1;`;BgtMr!0z5<Y%;x=sF6N%OZ>C^{F6v9V>H{e~N(sNrV9bHc;7IUQ
zWqIrYM`w`^dY)Cx6rD04{gzSK{&nrR_r%SnEn!j&4;QT0GIg`NX+IW8-YT#Kq5VA+
zKve%lLk;%~>bXKcz$cP2v$6Cw$mFrnDA+!eP6MC6G7=kLD?L>MNM>HT{ivbdH9>9J
z+_%<Q@4hxr>OW=`2^V)T!nF5MC)EDTw^I!Qg8wc7t@Qxb9|dRjsQD=N3?Cxcm4p(w
z(LILnTuS^t%i{dAP}X2IVCR=3E*FSz`U}Lp=$7pA2zqDgY9Z$p(0iK{mC_|EDZhi>
zv)gPWt;gQIV7;*iZC+qT0yR(9eiXL=K1dclv5I4gJr?&W-e7&R_2j#{nK#?7Boo**
zs8TIn4=C_M(w0d=jNf%0m$=qRgvq752vpqDwN9B3+O|#70-3Z{)=ZXuliB{+H9ce8
z{HSM<i<8MwwTshibGLVgzXX5R9NB!4Nfh6P(D=6KuPhAYYq7OfT2Q@EwUohTWm2ar
zfSf`TM*DA>)1?#1VFM_N_C@#;++H1qR;SliVcNl5Lbk<LLty(-#Kj<Q+AKnN)rS+5
zy$do~j)Yg`r*|gAL9VIZDQFP97)hY=sJcy=Jj?IV7P4cFCHHE<ADumj5hJm6z@?zU
zkuZ6J!n7(|U`>9)gK37_j;|v=vE4{=jMp6?XEVF!rA^tVeBCxv>$a-J1%gOQc!a;B
zG#iM(3~k0M-*s|27NwQmHi}=c%(gQI)?GOZo1`avYY|ks^C$jTqTEHiBoD^67zbG~
z>r&A`djP+{xN8M!{@wQ7`6z$?EQ26Ht)+kp-Zk>OlcFBt$G;g%DMo<DhdQtXJzQ9X
z&*%ly|AzCpeadh4uWUi}x<5-&-{lLEOWK=7=fOYp!Aw!0?!fis9RLsMOh3y3l!zlX
z*vOQU0f7T^h-G(~z@w4oe!HNxxYY#+Z^8%pLGxP5^RyT-ir27_<*J!Tu+uJ`Y2~=*
zH@`B6F8PFzudeA)Cnr94q#pIFnozMGZRt78B+xWo#6peH%h5|W@Zne^%y2%IIMF?0
z7*ygiHrpA(SfzI(Y4E1mlY}()H$Cg}*RC$7wH|zVM{)q~TAnNHf{Ee=5sa!_&<E4a
zWSESmz};^sx}!z8Kum%?JbM@o5EZX!W7A%E+W$si1CEV6NeKG&_%WPh&kTSD`3Tm{
z@wPU-wbXLE?n!f_PLJyeqkH{gHFOPRy}Cms{2d_h?qB*(3Y%vBwK?S;d=3A18APQe
zP{B^0V<7Y7n_9&G%-Ht#--j6hD=&=Sja{c52vy?1u9Y(ZQ(l>)UAha#%77~tcTF0w
zh<4R(u2V$tUaZh<8^Yw^?OM42D2CcR)*s&+&~uY~i#AR0rjGVgQG~E6GlMSaDAVcR
zcfPobXTxTw<2kS?z&n<Tpu%WC9_m2DFu?GFMczuSzW(XOuY7U-;94d77W<f6Z~vZO
z`hPcGsF}YV=)Y~-2SJSiZVgU_H(eU(u@z3U@btTMpu4_D=@P5EH`d_@t&Ws^w3gWw
zFS_{iUm9Fn*)9r^D+uHzu*>^~>HQrfni=;#J9YYK(j-}@homX$O#_#-)r$`xfIi)G
z6(&8+5B8idBwl&<b&m&U3YC&RmbUph%(;f^s=Y4v3T=`>1WM~ld8&8_jHD}0yTY}N
zOl6w{z9s4RAe6+Ht#Yx<?z{!Twh=9kXKeyK6tX9A>zM^PF0v7XD}Zt+j<Z5PXVv#<
zq^T*eYf&n*6W(0_;-r-n$W>LonweNtkahQp5krM^rpgHyANq_CN~%(Fed+dCtevLP
z%*WXN;^`Tug{|C}_r73D=km>fHlEs<U6*r)z^rAo&aMr5V4wb{w>M2)Jp0Ad%XHGi
ze}TlA7Tka!o?fufM{c=<dC5><_xEfE;*>f!A$T(kr62g#lLeIgUb#E9e-`=aOctqK
zuEV;$=(<!;h(4HMqG9V%IonQfgtYGJC;7-{bR1!&rP7qj*VMLXRb$X{sw7i@fJ_6u
z>v0Ge&Qx!1!>O_-pB;YlJy$<@6|B#CZGhwxCrJACAfAGh`XFn2<zy!`7f;&j091<_
zF>k>cngcl98QJ5DyqUhL`Wy#R>pFn%zE<urF}=YQB($S1T4{bZto#kKd9wGcNbT<I
zsl)NRi}dQ{_!GbbZkvL!S|-EycI@>UP3_d_j^#dkFLi~VD0Z#Ce^E_<la3V!*&*}9
z@psgv0rL`3aNAZA_ad6n-?9diQ1xr9^zFerH~O2S&#7zX@`N~ad#>(}Zrys^I~Arc
zJu9d#zL*<^^Xvp3H$a{B8#q~VSkjtlw${d=Fw&MP7>3e#s>J^OpWlF-Dw6Uz8Tc#`
zv;5|$8Vd=7JFCETu+Qjt!`ff02G`#RY184_8jQ_&68j?ViJXP8-s%xwOpJMBqM_EW
z1Net^eM!^6?)|m|Qa3n-9iAU^=!bEqTAFg#JbVAeZ54p&O45g^>eIkzN8?zM*k@(~
zuOK^SI4kv0B${Ll?#%USru~wloz2}HW0el8zBu-d^ye?Wh$PB=C0j`~5pXz?i1`}W
zpVVkHl1u1#Vt=9>+>Inv<pJV4*`Z80la`(w^v^Q8nz;4)fMyL>iWKPuDe;Y*hc91p
zq}n}91<QAy3qu3GPhhaMN$1+z)2(cgcP?3Fsxl*WrTxEbU2;sdFas%A$9jR<cml`t
zvn29NQV7?9f9cDKDhzkB1N0rPA|h?M@WFto{dq&7ZJ^=q&54>K4_wi^cM5}gGJ9X6
z?^IrIPQ9G`KuusK)Rm_qDgkz5(cA^eO_bh(DPX^)`Y?4VJ@LP0ekGP8CZxc`MIMov
z8oxN$!u4Pg0XKe-C6{ON1<;J){TYk8_n<R4_^}`SEsB`3>PZ<mZ;9oJKGKb7sp&!7
z)pI%yY5$}smigvB2dM&zgSuJaAw?#h=U8L9j41STnYmStpxcViC<}a{cahF`>^+OJ
zY;vFHYfIK`nhbj>l{RZ6eCN2EF*mj-w;!>7x_33!swE){?1Fqf0ULwUu3|593jjUq
zOcyQq#uyTx`uV6!atVWPlhSD@y);C!wAGHwsec;dA)QJ(kx#P>ijLmh4VL}%_@e;O
zfbeu70ps4F`nyT%+18J4wHA6;Dxm#3Pe53#7l<$~?PvZ@Fu=gC2FhzR;pE@>-F4^C
z`)(wS+1oP-qf9BVS%~1oN(U*MZ946Oft#U#m%_>{20FB9DZ?@N%192qdlS3XUa&FW
zrf^^r8Bqx5j4K_>ayy&>wgV8-eFQJO*Wk2@h;3|cy^bY;D@}c~cNS;9xyE(Zkf!XG
z$i3_RZNT3faN>7=HLLt3-^THaqMLm#7jx+u^>H*XXOzqC0Teea@>vKw<l44;!m)GB
z$Fi)4<8>hOIrqbx4g_(&P{ir;Rq}dx*fREhc^nUqvOa<p?9Ou?V#R@}to<}`dREgF
zBvvtWYmqC@8plBHc=_9`E;$lVHaLN2_g6f3x(mKSRmL<?3rVK|N>h7tHCGN#qCURn
z(^%xADvS*aDlZn@`d-iOYm%KHQ5Iz2GfZR!GLZfRz+eOA(eS8Us7hXY!lKnpc2iQ0
zjif4pO8Tj_=MD=qUyQZ<S^ukBH4lKJV6_uY>03qMv;NCK!DkI@)C>dAs{hovFsFmo
z9Pt8T{ngd~y?yELx*-0uuOV5X*Z%_D%EWD6yv_kEj#?405wlggF!=hNR%L)jWdxr&
ze=l|00qT8RfqEb9KN^mI`=e#3;xak0^jfjyE>WMh{Vz}v(5;|S(&7QIu*(Mzzuox@
zbnjn?S?FA^?4N#xLj07X(p2v+#Np|b;$47>eHFf~4XaLm>8g@Db0kCZ*XVx-6RS`j
zZA(*yPr&YX0V3|-m&od(eFui*)H`pO?lHd$KsWIr1%RC~c}c1nObvCzdfE(H)`iy8
zJ&#Bj;xv0*d>>w=eBJ3{f8;n4N%LZqIDo{Oh@0tr2ubhcVJ{mTO_t(8`H>%z94&Z$
zNxn^?CcOITSNr68(MK<*om56+h_>sWJH~`_>S+<`g44iM1D%djMt!*qx*GMyg|Nph
zPI>A=O)(1t$M1qu-_TA7CngE;J8LR#iS8<iIC132Kp$h_fOEf_4#nTaQypKE1HpT$
z)s0PbHgeS#bIdQ<Xx=<rZ*u0LWpu3Mr`RW7jEy<r0g~QoJh^|h3QyJ`$hTVAAy>Cp
z<h;rzC9EDsE_ka{N^9?MhF#iKULhCa4Hjq2rS`j=Flgw5`Bqa*oXq0D_fm9>a7%>e
z;$Lmv|7g{ju8#h8Gj-0Nw@);ZO)w%D@P+0CmUq=IQ>lXSbe(2r1;r~RN0WkMAK8~5
zDft&tQGaOuvhX0|q4G0^tLV>{ICiN-RMO()Bp##PRemE-I;$h0up3r*H!JLNIE<q`
z_@&@<8QD9CYd|rl3+HWJgUXJt1_rU@k4j=AJ?s29I6@v9HBLzM{FJ~1KI7!fi!q7f
zjgP7?d2gB&7nyWWxDy}^0lI8>-qdK$6$QGKS93jBh}4eU=*Q^h#Z?XayT3@-_gAK)
z#mlemGZXu{8NioXZxFyZ?#54CLj%sJgs9T1g`P#0Hbw#~9*<e|Ocui8Mh2xpJ+MV=
zZ|9I?hpiOO>78%%nidY4mM$;%!I;X{s^@94Z`)-V+}qCZq(+rRi;_`MS;M&i^$K7s
zYC#*ZFXB@$lD(d9*#)vVCsp}vQ8fh_zO1&Ezs{hBG?%;<avQTAgW|H2;&O~oo}H=!
zGekb<)t1rfj27)>J)E@}OXa+pzz;VN^~amf9`PF;XOJ$nwtp#^J8#adO;bR=U80<T
z3E-)_xJjlyce|C(89hG`rWs>P{Mwg|wcwy>SmibA1@s&;1bsX`+CVY_%m(8OOt(Q|
z$G>GhEeZ57Qc6wBce@l(dm}toVDPoX4BOq8-R7x0A!837HZ|If0V;i%2=pQHA_HoS
z3GwVGTR}3ojc4*}RHSYC^~XKZ-Psk&OuZLocaSz4Xp<`@?R}&ghuv2jYh^`2Z-x6f
zAAiz!KJD0dW~Sq<=o7I>PuAs4>601Zgep%!gNp(!_stqV-Gx)TR&3L^(^FL~s&Pis
zeOUCrk=QyGH^`z)!*ef3$40%kEsF&A7bqU-*m6ac4u9Up&5M%@MIWciBwTZ54u2RQ
zqN<nY=GPRhjJ)~GnxEuzX2TX&6VKenKu?wkz7r?83PYlSsfE&+DE!W#a-#J2vjaa0
zE6R_S7Ith0ogS=Y35uMnvd1P&B3Y;%X0RfR9eNEH1uVSJD3y66G9^M+ZTPL4rhOk5
z<<-8lbY(`xrdSys-tfEn=9~XPyqC5H?O&j`U~<25GN|MQf<TSlm9{b6?u6aN?jDy*
zATFoy`W>#dwuBVFj~e#3-(wW!<@6HWZ@eWU(y^{+#5KQQ8_3gTnwBJUI8tbn=lvxu
zz&|4FYtdSAAjq$r@gl*O%cqm4$|+1nlEGz0=|R4WyV8(b0Cl^v=0bm)@=G#Dfos}2
z>a-!^S}p@+y$*C;S?r_APruj%dUh*8l?u{dF*()Ns|h?Jvs;nQqNE`qh-P4QIuarg
zw0TV#ZQQIA1l1EXGrF{F{JL<0ASn|muZP0fOt$fZ{u8c*q$4nGV1mg-O|!vjnTBp-
zxYn35ze}#g($E#Fp!NC&7SA=^Clt5R%urp$0^F3nGmOUiQs0wR^{4f40lOLKCe|kd
zo-1_J%eI4CRxji&n}2w6y7w%O(7b4K61Zff2C<w$;N{U9=_|WF{}+4j9oE#gt&Inf
zUX>;_C?F^xB2A=3P@0GcC`gS82#A272nZzfDqTQ8h=K@8iS$nBP3cX7bW}Q_2T0;K
z-S?dP?5*y5_PyUZ_xF7FcmLo?)=aWkYp%8C7~>u9ct<lrFvvr~RhMJoz~0LA&6f?$
zhgA;JUSk@o=W@8b(JmOPg+8B2VgHHw?@2Ue;3fF^da1#KCJNmm(UytttB1uC)k*>N
z=C1f`8`%~!CpU@QJc_`AnX2+SLyWE_d;O{DcFj;8+@!p%Ui^)GnpkAh`CjxCoC3tQ
zt4_*i2_AU4eSY`H`li<~QF;U04?c@G9<hv`c^Ne3XeAI=;=4zhoX2`6#AS`zncNd~
zva>mSfnKZOm2>try(I|5$T#Xur~cTJFYliMHZJFHX8xx{8qlwR1MvzI-Ch21Do|Pe
z6gkh<d|Vn)jyEEyc|%FnfYh{UGPHH`hwanfc|80-b^Lxb*hIJtGP?ga()?^4_{9bj
zWRw>h+Fig90<?AIWY4Tn_-Kb<&wQk~qQ3Oxv^)>9NB-NeLfuBx$f9`-zQhSAXLl#p
z-btLA2yxBRH3=8o)wt*++iVtSWj%KPcAElq!^)>C3Q(*@s~ohby9tgJRx!5r8=Xc4
ziOGncQ8s~<Twcr^dwP4xP*o_wpaJ*dKD*MyYXlRX92Z^(V=h?@t38C<vm>2YlS2_-
zJ{B&I?%YmF@W8kUMjUpEzrY((9cI&n9@&`OX(oDJT$5`;GHT@ARpym1i20f(3GFJp
z)pz2Mka-8=2wSk;lf(2MnP}GB>ckEEw^=Ii98+yNT}5_NZ9bvecRcZz#y*5rvr`&3
z9@M9lqv(iK&sx%6v#+<1C8j6}F$85fuzd}bE?;t0xDzX#Fz^u_|K-w)h&RXhqva?4
zW8ElQKW?ojf?Vfq-I0`Oc<3xxKNNYP+o|;o_lFWW*tPC!cnFlmusD;1AyPK6S57Zc
zbxqK2aZ5yi?s~8zUb;A^MAxY(a-jaDm91LW{J`UAYe(ZT6;d%J=r2!oR^S|nXfU6F
zqr#0@BFhAvIoVXWD**<0g6Kd5doj+ak$7C3TfOj;rQfm64axUXRS9k`kBbT{KMh&L
z;i9Hi0@p;%@R&?HWsUG^4JWod;wnnPqo<)DT_U#ch!Gtvefk0C_SxBYP>@!-0GsWu
zMt+0bM-$JP7`4d4ir`JCKqOVQ(7rKVw~1q17@wHYnss+LA$$FZ<0gBU5UDc)($$t%
zH`Y2v&!$2H&qFek_}pRBW&T@C5IkUh5A?X|iZ08J-+SkgcJYlgvO2I&CG?Q#=UdNj
z&K?$Z)9iVeN~cnYYe{lE*0;HDyEN$=8ZCB+KY}??>;?tII#jqqTAC`t4#lb{wU)T;
zjsXz2R;;!XT(vT0nvr)oOZJ0x*;BOEvD+ZX)Huks#)Ll(eB@-CAlTxf0mxug*w4er
z2#k0*?tSCqR(}FR2Cu}$;aAC~RWg+ov4=PE+v)Aaa*xtiA7ZbsoAEWpFp#+1g{Pna
zpiv%fLx<GfvovlLHqn*rohOyVvzj{lAVF}wmACu_Qy_pPDlB1K-ynAQ3;xIOU5`Z5
z&f8bvX!wGupcl*ldDnGXNp4EG6$S|I<cc0A`dlM4EGmYiD_kif*#{grXB%6(E@*Z#
z8=p6CblhkeLYEbze4w<&65(<a+{NBJSobP@-^q%K$myGLG_tFAf{_U~mMtD_Q=z53
z%!nui6X799;o@ITmCJ;RHHz1?%=1?S$O#Je6l#?zE(yL-QUO@|DPoP&GUAlG|5Vgc
z|Kxm7#<aL@)9D4b7;Ne8JqK~i1darq+?NnzD=&6~{i3$%DhysEGW^Xb0oqe5W_u`M
zP?^g;_{vbm%_|pGbw!>y9^tdU*z8w8_84*_>SZkxq7zPmI4Fk3^*8F-?UaeUk(osc
zb$&`QqS`5vDx_R!=Nk}CEm=@Ljf)BfG^YsN`m|F;TtN}J=ZAI-m%98kQQ2dhMh0`&
zeW{{VvTlNIFrt}um|{0TaJ1D8>A$jpcWXYRF<B$?In-an#>v*QdcBQ**F{^zOVD3D
z_Wq@r*{Fat{y~!X-H9UP($<?Z+?XKj5ZV>3K4&Iidw8WtU-Jkbe5Ig#_MKRH9jpLK
zU(1FkpZpq_frB?X9F;cblDB%UTBt2~ro&~W@A9eui_e)4pnWlf%xoeMW7F*q^itw;
z2^Kg^u)w=D$68^IvK+-Fq5aF@Voj>+(jr}RoJohY<iCEomjeM=?=tv7FO6+j0FonL
zIwxW6jFwPoQnqH;#xAdgSaioCD*kCu*P}E>n1Yw}RI1yyFn$!=mPqnyxTnVy9J)wn
z?=eL;uQT09A9pcIyfE72v{wspvN1^QQ(yjP=qos@GOd>=zW24-E5a4rSZNhSg*b~=
zt$ey5vscBmRXkbu(HrFTN)#SD4HF~W+X%cuKC(e?5>4Au*#{}G7}pVB!kx^o<CmkK
z7AO;~I8b0voLX&Q%k*k4J?tjiFB-fex`t;smJ&gvco|3uEb#((C9{+*&^zWn%+5AF
z><cFf)CIR4-yxN2ftuaqK3p&%ELo1|0fNHx<~s|Zohl8e^UXhuoI74zeH5@lUPk~{
z$bIBo!Nt;(!fI-)hSn4wPQI8~(_m8NC*<5$v_4uKCr1beg}B3}QAD0C=Z{Iet^gR1
z1~~VxTcLjF#*4xUNh=xPaZClV=Y_jODfG{SM|y$C#W!$`1ff$HVz>#66w_=`VWIJh
zPOw+$0WSRq5$V5&zQ1}!h#^|x>#+bFzd)*(K+ZRe1EHcd|6eqXFn_3gNcVf3u7BWH
zG5J3l`|n5%0OpS>D70ROSvl=<I$$;zvxb+vzq>I%jP`Vd0x2%mtD3)8o-~94h-TJw
z5V_cgBtnKh-@>CvYBffr8sTq{9tLTu>o1UPbUL8Xq%Crj%w#a`kDDpdwCej=^V*sd
z4~ZwpzVK0i_VL=_+xa%2MqFmdNO%&y%UV3K_XW8rU~YYP=X+=DU-uRT$qpV^6D+8f
z3|}x69z{ve>~T=MkYVn!z2h8wwT_vWX*^<B*#0GbO$fRP3YtD?JB3bASF3!3(AjKD
ztO<R^-s60&Bc5N|{pS3+_>xKrcB@uO`AY>BQb*6CC+?m&o0Y-!2u2OFS;e31s+fwf
zB(ViAhrjq_$Cc?%Omn!+t>NcxW#){$ReYw*tooz^lW>mFvRnXo<Y7>EM1Qjd&ET?Y
zC*Q8y1vi^Boph0tXAYk^$Lv7`Fy7<fF|(TRgBA}(Z8L>d9<I$ie6M71S+@R-DOv0R
z-QpFMxcv(9*0|J)dT8kQX<{PAfe9~M7IKPojMTmNxiyTy`l{1)`^J;Ew=FWR_G{f#
z&DTg}e-xGCrQtg{<8t=>cxDEHaqsL<aSV~87oLDAq9U*fPf@iTwVy(U^BFa+vTFq}
zDWcX+R3i6mT)sgTZfvaU_?)E^C}>8!637kXrMA9F%|FZR8MYgS_vrU3GE_QpP2cE3
zc6wOTyI($|)r-@}=I=e5D7_>lo*;zre#4E}YsLq(6B{q#yA$c{lZ2upRn>Dpwzp6q
z1ifwp$Nnk7f%u|Y6LE;tVsQjhESH(Vu}Ga_n0k&Qf^u$Ifb)yGSlz8Mbr;bhI^Q?O
z`{7r<T(?)}M!f|k;LG7Sg>9?;X8=$)<~X<b!qc0*@!sB(`r5@K>jyPGc0XGi_^}q<
zgtkN=XWW*n0EWpwe5#ra=dYF6fMj~VE2oMz3ThW{%(yymDd+O&mCKSM^z!RBA_g~6
zOH-gUEg{TYO<4dj%+-`8Fm&qEC$03+Pi=F37$G$J!I#Us!Y)nUv9B8onrJMH(cQkW
zmZ?`<QD#c%%&2<bJ!TCMHPEUY)Pw~JoM+SMF&eyC81}H_anbv{@TbM5&{SyQDuIP*
zGOO;T{WOxV723pfq&b*nya`sc3@4>7bGsCsm=~-{;Xf17&9-N)#<s|l_@}&v?|nsn
z@dpFRzR4e}wb0GqAb0@{LdXOgY)QchtllOVThYR(hS0Prut*2|RnU^+|3*4G9Gm=j
z&tN`a<2=teP*xK<FjGd*763|6o#yj<Mig1jYm_sj`cM!ei6>Wmc@ty%7T;_ThgQ7a
zIv<uLY@mN8?lAYUh~;I?$*+;c-6>cf*tgz)1+GX-cmeLwLNDk5K@AtIieH_A^Z>8a
zVcje#9a>{58hpM>A&!2L)jxl5+?^9WxYx8M7cA{`#QniF94aI)Vdv6N-kriK-(BOI
z<rjEDypwK(QAVX<Y*xdQ;jOy<Y3%rn*KAZKMd}ryy3D{pRXwXBwqN(@*7F~)iNzej
z+-5);6G0;}U0y^JiaFnOiKEQPdulW^axiI|O7+zgvM<lCl*3o-9?CtLn2LVYF!a^$
zPCK}S=H#@Z;dtN4Y~`cw7K;sKCbgOA7am<L>56+e<aSel@?>GQpr|FiWLuje7k+j7
za1Ur>3~kY+H`bA9u8o>Wz0!5U5&9wRV%aEHag=XGn^}=%T-?>qc7mb{-fL=z8-zQ(
z-$8UX(lDP05_@W)gO~dv?n@Fyv71~UwrH$V^nC?IbwNk7<!#z2pM#PuhP6!q0DzCI
zPQiU-P%H#S(>u5i)7qmI#rq>AGBJ6ULVk$}mNnOIrbj=h=X?{PT}2}`_g1t`nFmaK
z>oOhgr6PQaW{W{WXYHVz;rpYb4#;~Q-Q<OZ(qXYjxnts=k^3*hPa`_h$ll={_Vv7M
za4o5C?s8R%Cj=^(-%X@kSvMAr)Z%HEgjBRora9{jV8=8btS(r&Xh_>ji}CVM8X82a
z%ra|SwV&z^PdM#mOMF9+zzM^X?YsnD*s|aguf5Z-ovKaB+7~<2bv-_b`Q7b|lWuJM
ze$J*-;<2F5_M4J!pJYfH(?NWkYB;>lL8#nrx~L*Z_n7<ShRc+hsH6_)Wb)iWQNj)#
zT@h8aNUpl3?mKNWFw=tuO#p|1l9ng=ZPDy_6V1Z&2eVzfKKZyvMW6pdI^|wF0~Vu(
zZVvqP2T^lS39!6GJNeY^MPQoHO$81J*RE*mI8$650P-nI(#@-57Y@%+?vac!L+`zj
z{FRj+G-7U?R*qkJsUWm2sX-$<&1_#LD3P;Kcn(sXi47^X6@G%$Jr2PoHZy_V2RMny
zTv+j@lJ4{)%yu|Z_J-T;8SDp?qFChVxvU`07A8*CPup~;$Gz~_aTy};glxtHw)621
zaKpMdbo^9}r`EuVlLKPx@%X)=xi7LCTn~;d^WV=UTVe2)UpqJ405ACNndPbBq%@;6
zYldW~Z}m?m*sTmKJob&8=mhdI9ygwbDNV`iUuCF%-tczI;hreUnp5Dgn2^sS3cs(h
z9FS-wN_9j!3eGVHPPw~&WV3yRbSxZ{Ub|Q6O$dFda`t|}Hk^7&`^dp7p<FqV*s3cU
z4CT&nBX@gECtAFvZm*N8cIvhHgm#{?;dQF==b##>NEssbIL_zf^u#r3lfpH|(Cmf1
z1pix}tb37Bv&6!K_u8qj9A2ia4hv_L)b19?WE`uaIK%svrry%;zS0#km<pW2-DcVt
ze{27g`_6WPr^oTywL347`J=fVqfw)dhg&Zj8to>?MBEg*aGF-jz)w`W4qDLaOSt~x
z2EKdS&lAIo|7uto>G{4YWj`QnT#It`RIfPp5Z4Rr3tv>CXfe;VlM!>#`QDvsUTb;o
z2l_#HH!%&&**rFtlxK4*-5zQar}z9*H;=@@Je~vUU*88(N080ErEr&6BG2v2z#l*c
zBnP|~Ox8(D%>P)HCMaQv#^&|EMRR+lpykaVr!~8@#<!Mbv?BLD63NmrClyajYt;%M
z#z?&Q6o!5?+Z~;01{N*i#~W#_>P6@KFODb-X}?)HjU3sj1AhNK)QJ70dN3|{8rn>h
zXjIk>f914HS+nmjbLY#-GJW@KaSJr%gHz(2-ZV&E_I6ii=DUihz7EmZ314X8a9vbz
z58h_lC_qtio9Qs`IpKYyQ756@{JgrSyDlHr2=!68wKPe%v0;Hjp1NPy8Z-cP$aR$m
zpE7+VCRgL&eRu69hRl&?dht)_9Ox^ei)6oiT5hY`Mw1cKovM|*0j6B1%!u#wJdVP(
z?CaFHH|omd1%4@o->pZUEOy?#P5-R-dC;Ac?@KC@t`(M>?MMDu+uo!p^kG{I$sXx%
z5LW!{Z6%FMgGWJ+sgTf7LB}&pXZg4JeN@``nbo{f_Z8=`NWa=M1TDZD{CYKZb!sKf
zAybDTr(5X5xjR`dLUwt1R*#&bTiCT&9^AX5x4zCeu{3O(MzscShWQyX=47}l+z49L
zE+g|TEgU|&ZyWfa^=WJp+v#&b?ZU~d<9-!eAm?Pl3Y3}_u#Wz(#uqD_8?ZO*dbA!m
zOFb97+K@RmI&$r>y*fr5$fA6MaLG7`)}Iqz8pdr|Y_z~xZNkR#CciknlTJVF^LR`r
zB=I?$F<EZT^{e4=<RQS^x!ERXEpzPa5cG(}1YPoQYDb~IVxeg0SkiSPT4arrRYA+g
z$=#-DjFV_R(86)@I|p9^C6En(1tdKk+xcpgvNn~HqM6Y8p&~KuexD>wnkt3G;40gO
zc<IUq?(eR=-}m?j!vmWvMy;z<Gl7)82!L-y!-!GnRwlf-Fd^Crlzx5efNh3N$8o{T
zeyTF~RowybNSTdT)}%;UtpL_vV(|v94PVvWX~gd<2YRT!a1ah<k#szpFxzM0$a#z|
zdZ?1{6r47=uz%DVH!rSby<7&4H}_8?(jUEGbc@7@1W7+>AH+}}&+UAJXx|gvegq7;
z6=wI6%`zQm$htT|6n~d1x)koaudT*vbov7kPEwJ>Y&GZ-4-O>;z-F)YVivoYi0q<r
zAWt2wvr4=l2ikYbWKJ>dvPnDbZTNqKsD%j?=$$($l3pYt`reBH1lylrTSGJ7AR0=K
z<Npw3=R`KOEdq!A&vnrecaZfaAeQTPh3?bOqxQIs=2L$fyV?>}0@ivU<j)UM@?dsN
zfcf8yJh|1e4g<xEBuy9)EoCo85(UC)Tzq~*uz#GIKiU4T^<4a2iuzY^`me^=@SuqL
zCxt@b`mP~KwBT+NZrwkD{OG0wGUUy9)}~XZxKSlwKQ7+^g1>bX{Kv1NYmFpY4zRPB
zeF5ah_YU76S?0<Z%f(*}2VK+sxCd0;RDTZ!yd&s>t2qv!77K9I=P1kmNR_jDaW3x<
z%4xZ&f7~2-p)WTEyJYe`yw+qojgugR0hzT2u)Rb1g%z*^0Kv%wE5b0qPF`{?iW*fs
z13|sdKRQFg95EX+nT#RF*c!M^J273XJ-GI2<UzI_4n2}@r5NXCOPdiFm&E)k;Cazm
zZu<?YOZcdL7i7J(t0$74pxU3(jA9`4@*FT0tmx;ZBD;r{T`hK@G~{Gu%<53G-@Be$
z<8-0dOZ(NBt@5#V{@3~?qMM!VT@QC2EJ%C1R(`T>?~t+KOWN>+yL08&oZU3qCW@Sv
z(KxB?=fz>e^;q<_O=y7U*Pg8d1s1P+H;x2d>hj<3jXhuQs_G0k8=f&*76-vxe9x9?
zdo4fW{kfO8*EQtCm(m`eU5o6ltx)#O-qE&c>&S7uJV|~y3^x(*y;=*3g>J*wF2bpR
zeZ$b-<$BJZVz6BFLdW8Wis+Y6nsuk?w(0XzDREyg2Wo9NV>*<##J!>}Man1%BF}Fc
zB@l%0gE$P}DX}z9&?%}opzXfe#&z7&aa=khkjCB=1B1s8unbr|u*kGHnBHHc5La8T
z#(M2<-F0dYfSQcSbQC$#Mz_TMan}rq1;*JJPwMD@1wTb2SyxrjD5!|LexdDb$IUM#
z8S1W3F}9I2$zfH>s{>m)QfmDxGTwJqxaqj1?QF`+V{QXQZ|kJ7w(D2V#oiZP+usHl
zpI?EyM#Dz;ByuFb-phKL3dw&rP9XK1{E_7Y<1$T&x@oKt1#y1#{{HKRsZ&g&86;Y#
zw~aaO313Nv0Q=b1J*iLIoc93Fo$-m|rt=zSW5Q3C8hv<j(`ZQpsD#sa(Gs8KO%4zg
zvXGAW5`)8w$zi#PM?-dNyPjuOrl~VKw$d-dwf)<sYZfYNTT@NBwnKM~ab+Z$jeQ-`
zu!V5&^v-hNwD|><{+0Xs*soA*--YM9_eYtJz6)7DZ}o{uS+}zibDzZEbr2A{$r~-d
zTOLJur<bFr;mX%2R`Qb`D>bbq2|8YpljQ(>EVd<9qaO7Wq5|D6P5_cS8Sty+;UA45
zM1kA}7i?A?{|11Mqe$Z6!F?6DFO)%<za89OABb<fK*`K>NnjsA9PjVEO_aFi#~y#{
zqMNN1?Tf_qAp?d6i7wilz?0{l227Eo=xvn>-%|qkob<PC<d6+%2mgh9pv}wAwU8n7
z{+8BVZ)-KLEU?tJ?;k_<8unmt!oX|RAjk<X0o+Z^6quJf6sg4iLukR4HQPGs#}A;m
zPpCp+BAWx<H^>3x-oOL?|NY+g3HxuEm!E)<eezZ$P7gl9Qb*#g95!ejO5;ysJub>C
zqBQtng^K!xh+6l~D{H+^!|f=XE|kJc1>X%6lbM!B);KOZ9P4im6^aZd|GE<+bus8-
z8%2iS5dmMTxg|LH=NZ~Q(_`sTBgufb)$;p0_+P-=>id`AD*&~EfhTJ+3s&^HYDSmR
z4p!{e&?|(As^ws-+HeToyIA>9;_KmBwC!$^`~GW{c77ua2|RGc>Y@AE1m3=C@7tEa
zMj>?O7M)jgN;Ypg9a%W65-I>2yLrx!YNSs+Hbd=Xu*ShNG0`du)SKu!RA?H{FwnF@
zmC(ajy@YJn2~7C7`~0;<K8~is?T-SD&N7jSi<crcO{cr*2Z#NeJmu|+P926h+2+C-
z-J?|35dK;Cj7GWMyCzq14wzv-HH>B?a8iD@Rt|t>&w1p%-DB;%G;CD*NHP>{6FEkM
z<x;1nS$q;JKV8d1j4Ux1_tIMHW3LWSYHd=oPB%FAxv}j)Jm)Om$3?iImnPnLk}15B
zhmRW{k<prGS2`@}#&bq7g#DpL1YGhAPhd=tRODl$M7;g>MdI^nD0-fB0yp88ms_H9
z%Bs9H)uzG{Xqo3g*5d`CSM5)ncyK(Rcg&8fwchD4KCbCpOVp`QZ}i;dZ4;m{Lb)=!
zw9wZ3NmCtHkaI4z)pmDyzoFKM7>$;`wPrapk5hCWCBVW>itX7W()anRZ@l8@B)_PL
z5f96tmt~Wsw~B+S6I6)FwFe0X1DH}Tx5-g)jjKZ1X4#$|vm!2v?fkZ5two0=TQ6<G
znDA8f7Oc2>96U%`TSxSQ?YL}lY3S|H1U2kuKi`Vy-e2VJENDr)$O%rU+&~5rEGJi4
z@rK>awXB2UPF^sVtfif+mEkVgUyhMYGUR7c&m6SO^Lm5pHwHR*?`oyYw7d_f$pUTZ
zNb9TUQYtFDE=DFZqn;iD2j1KXH$q|_#XqL5hMs)tt)!iMk4snjGteo(e7LgowZ`K*
zX31dx6xK}5)oD`nS^~S#h3)W+AcN@pcUn#K^&jzN9m*?d^@BMQnQqjUhlxwHj<clb
zgmitD^0;-saMzh**EsBH!_oF}Ewjl2ORqEqV%18%bW(a{KZy^oyX1B`R?5R9kXY~i
z40>u+G<(@UuKX4Pg%8DH0H)KNdPq@zjpL%fS^sprjpi46dYAn3Ru4}u+}BjlY!l?8
zVFfi)-yrnW<T$jN6O)%>&vLOSjo^lbf-8Uf@dK-mZX1t6=nI4oMBQ2aT%PNJTz1MB
zvqu#fHJOTPh^)=Cj2-5-8?g%y=G;8NdC-l=?Jbg1`aK$ic!j|s4#m<D#|NvWwo}{(
zca9z#v$-+&anqsddT1+u9Q?vUrFl?A=D2NT?y>3jSA(=YrtkR*>c7!2c*4Ub=xm_@
ztS;XvoN!|~0_t9ROe4_IrvKq+#<4c1huPHoth1sO>gyp9F{R5^3PyMO?&?)-ajR3+
zA;Xjp48UxVwC>f5rFwQ)<=|U<8KP$V+9zI=T}+UF7E=&Y4?WE}8A*+X5kqEEjPV{b
zOd-%a3FG~(2+5(XQC%5Tj<SlFLq|?&={;+!7o6qOtk0jgP6U*UJE=}hQur)=978~I
zV824gwN4(fb;}1w4h``<w^2AZyCsOqa9CbmTAnU04!!9|aV=8%YIaBl?b{3=$Rt_c
zQ%ljD&BK@RS<^5AN}~a~Pi7C_J9TLe>42ukqs;Mwu4t-}_UyZeLxcBZ2Fk+Sl8%jN
zx-HYv2we|<esfCeU^Lm@Nnn%tm<9-Tue~l}`VE~9+4g8V*gP`dxj?^r_++*NO7@DD
zzV~85#>l+jUSO02&Zz|%#HWC}2xZ=6I%J&6c-=s-|8g`jwXHq!{*vMwj^{a+16DVO
z<3NFN_-psFN&4Wa(k$K$f>tt=VV=V|qgdzE8_D9BjrI6zPcDt_JG6B}mh7R0(4}Ft
z{W>0HfMY}n5t^~8Y4`LLG+jRQv@9OF?eWP#IjE%Cd=ka%9z4U;m{r`LFj_xO=l2bw
z7kl&eHT#{=dtj0x%r4cM>0h2*S`TUX`Us+%Jap?#E21Ap`DM*$sVL7&1ji7rC7X21
zD%t!kbC;1$>~YP+2O>flPhPaIM$^>2HQ@+XW+DwuP;Bm}>NbfxCcJlY%NEMp`)FZo
z))L6wwf^e3%<WE}D}B_fa5JNBkll*M*Z|>&z-psuH2FvKg~CfV2@f~;r7lf)yuM-1
zuKyTyT2@y$*~P!6N7}1oAEsjqH~`C3mai8279T68<l_tbbt)6)#+?d+8jd>ZyL{li
zbjzRI|9ND|*W?uUqN%8Qlpl(Z1jWAhe%n0-!(ViCSdQin=87mt(T(S(3XwRZP5FuX
zR*6bGy5C%2s@qplX^N(r{DrMyvQu#W(O4R>ymaQKwGSzRpI8nSp~r8#@xA(*{1G=C
zw5dxq$At{6mb)ufTyu)Gtkl=|nFgyR?TZSr^L(!cJUQ&m%(5C1P)y6pY@{A&5xQ|-
z9y|Zgfv!nXOCi?8E}6n3Q@(w7_RhUWU82J<ht*vfBou^bc^FcBBmDebJ}O*(&UB}6
z>$#w`BKt~KgGTFz#ycv!J#?q9Q`wC}4o2%{>dY`v;hkE?xodI3;Y&vPy<0*w&W@ks
zt*+mkJRErA)>A&}`RhxYvF6CO999x906Ji}Zld23W>eMBs0kH-Vyu;ze^e>a6v3|K
z*wNuse1SDq`-s+~r(^qBs#k^t;>N2+MHS4?bulP+NHWwE+p}ucurWJa=c}W?o;)0H
z@<9yo&`X&3gv2m`Pn?#vIE#PG-Y^Vz-_%tcQ_a$4U%T0KF}y~x^BY7`SXgmbd%CS!
zV$E_pwFXbMcKw38Mq@HX9w{cHZJCEFVxYAnc7E2-$i^4m5q7LKwT~&uMR^@90m$8^
zQ#Q?QH={sLiDlPNaoB*_Y1{mloLvItA+Oq`y4dQvn68Qns|neLKizekqzcSM&>?!p
zMe)42Y0_7H+|fRPq0gnU7oXYgz3SgpzMy$co<-F+UOR#N%4Zm&hH2_*+*RTmWx0iS
zr%g@IddX!m`-j?SYYE@E%}A#%M6+;?S#rZO(I3Hs=bmy5b<QNn;l|RU=vLxi7DwEY
z>NsQcA&%3si(FnogWNZ4V|Apv-a-^t=DV`h9pW59u$&QBoiQEkkzG4DtY9d5f8<sv
z_2IL1RJJFb7p~p1=hxg>5J@i+nXVN8?He!+XmWe_Aw?V4Z6ywsQu~xp$a@D1wnOfp
zR+^4UtVxpBAWRT-Fve=V{mWjD)?mwHYj=hn(m!e2O{Ewq%tZ8hrLP5%=x!8nou10H
zLGUu%$M9C)ng%__+?F|B19m%g{%8NGvVawx?hu&}W!pQpS4NIgtrTWSngZze2u6fl
zV#UqRi*pJC`Y!e<(oN9`_HUZgVimM+Fy9o^J4LDcNXa-#tSPQtX3DZaas9KYfP5P`
z{;<jKnfiarMSd4p_v=HpQ{rI6Cm8_Q&j-5nzrg%JC80A8O$ghA5j{o$<WZ8hQ2)dB
z%?1FGs2J~+CfVoN8CA;Qj$=uj;-4XtPvbAiGUYSa>Ya!BLEoWTQU$h8fEFc~5ti-H
zayVVi!YcChLK(8MFjgbt=%;nLL(R_gdRnYcRO!zY7}lXj@FYpX(RlV7)y^-^oo&1I
z1{<xiLLFN^5d|&$TF->lzT)?Ox;0o}AphRe%)HGjLp!Alt%4egR<fTq3d)Xqj3(n_
zfbSZKEKHHS^%kG0B439osoUl(I$jcJ81MV4yO#+MILS1~xko^f8)psRpjH5M24ZlD
zq+kaCdr}%s5L_?{n5iy!VHS~saQz#^cN;|r)*#5g-@XQDHkdFB?tAO;=!Kf9C7x{J
zp`U&o_0xY4-KZk*eBM8a?14AK@lZQ}`8-56KF3(=?$P0M06lFPw#yjhxKDKwb`d${
z^j)3i<>&<v-Nu)FgYaaK*g>`lzQ<b(BXF7`*E$F*zzdN_bf7jl){ID0SmZh{6!%qP
z#DL^S`fl<7@9#p!Xl;E*R4??WWBD(h?cDb2JC}@2jUSH%_S4~_yH@cJ5|`cpgcQk!
zob(1U^HBgzGP)4sVRI7GXaZ9)z&6h*{qO9pb^K4By&}YM#1O2gwZVyg%srzyO`$JY
z<WRDLy+XO;>Zv32Aya<BHjgY@-1VLpiL^seFi<b{U2?&#IVv1gY7@G-k@*q7@WoU7
zq%7&|Y&lDJnUX&A)Di#9JB;VvpsjFjgv0P{HC%oHBN}Ow>z;J;=Ay$}xWA*Z2X_Ki
zgpPj8@OIYAgH}fKd~vVlO&F8x)8`)D2MqG3_N?qfYq?h&x#&*{+8%i8TL*!N$`804
z%(Vsdc8Co~pZzIq&bSgo@N+^g2B7w6@+7E{ZErG=!_T36VtV3Xp}pYeNh%hmlTN$r
zAe6`ll3wzQ135n=1I!*B*DzOOH4ywWgjaM!ibOm74RYKw8zd}j0KvQmi@Ch`i_suy
zZ}cztb4D?wZg37NByRBg60pqA^ITtiN)D`mFU9W*(f@<@(12Q{HIRIHha#|A8skf5
z!nHsthnHDt4D@QGZ1nm5BUY*JUL?4mx=j)7HGVn8M;Jn?o}1vSrNW2n_}}PN=C|cY
za68E>sB`6`AJbWZ4{ZYF{LJqBTyvoJusGgxExaL-j~buy($qcpV&&1~@z1&)i#rx_
z`h0SnXCGSMit}$rcXeuzsCV~|kB@6TaB!Q}QI?ZuR>@ZFeX(gEQydkQ!(yVE5$QA7
z-J|=@TH+Rn|GOmp&t8)L*Bt(@Gx675vGXOmkthmjZJH7t)7CwT%*@KfSS=Zat8b}5
z5<b`fi{UeExlbC$fYj7MdpLc1?ANK)dd#tHbDa#^K4WnkIXZrNvICxhk!PPyg$Zaq
zIY8Ar1jlKhN0f&a6hc?4bv(Fuo@Ea3Q_u7~Th?3KylKBFFr{1H9Rw#Qh;sOm*zkza
z#=UDc4+J5_aavD2;x2uD6~ET<_0HLuvyf|7G7@~^qb>tJ^XY$g@c-`U|3|OC1p%OV
zqJ0+8HnDwX6gfYv3%V-(YWQV53+P)V^5PG<_FNbXa9_*bf_V59&=(}j>-b~qDO<*z
z^T0c1PXYN-;9}1mD9I82aX{9VHDX7%FCnIOeovqLgCa<Y97_N{FzY4-*yrZ!)?wC9
z*kQIlfS;>Zb_}lE0YEzcUpmY6iEvO6!A^LHYs6oeMh821sbulCz~r4}s!Gq`E|<r0
zOnS9-Xim;LTV$$1kV;4n%;a-evG7v+w4Jg9q6Kk$Kt@xX4)*b-GarzO{bczO61I0r
zUgu?s?`PQY8qaOV@0mJVY{a^kGV#r3(Uj7oaor+W^uaUYZ{n{28Hy%O@#lwvtUo1n
z849uOJHU@2`Y;6&b!aT2!!(N1PT=y|=_o6%l{9wlS$?l{gNQO}$%xpXx;E8-m3%S7
z1F6=l0*`NmUa*`OXEE>f(s^ZKPvXIp4NAYqH<j)f7UuToa6hyHy?_iB814>l1m=64
zBm$+l$>hg_tzpW%3k=gBG~l}x`zT2Ml=ScuzOqu&aI8G9bwoiAP1GggVIm5AIU89t
z(HwDD4IhkzHlmnERQtLevfER%0_+p)?V}+sp6c~h!k^U;S)U7%)GnE{UVHqoKrdkr
zn(4X$!X-4;bFy`5>?QJ^m)>wjaYa1m^23jkvM=2tdq({jrCN4GkDUE9jX9yLh^#kI
zv|r;kgVG``bs5LTCKgUPOb@;)v0JsVY`K?PT3Y`}>$EVhrK*&XG8u?=PI_6dQJH_I
z$lm(~VK%pbFPthnJck=A-ifPFl0f@9WRP?7-@dOdq$)eC&LsF0<SI{%Y34oA)=blw
z(FN)_jl^n#wkxi2+otCfL1M~=IQAZ^sqZATE(c$@aov~m!G3l}0;kz~xC7$-1nv?%
z5K3FyK0Of;#nHsXc3hA>KWm>4d;USwDRQ%zvDe&8g!O}WFjo|rCM75c(zwAae*#%L
zc8*Ity2-HASxnFGaRK?Ng6o}oMSNPq$&2>$bGKgVxYXa+IC<U0&B|Gg^^hR*u~weD
zY}e1zzNLgf?n8JfwA9#s%kV>fPu>4dyeDgN2Vx~<K8Ta<kKwdl8aQHN$YzqJrIRP^
zhp?H9*HcfON1g`pMtFtF5m<z``O*xNNL<-A@<(00jX6+irgc<3wEG=l5WFynHx#H1
z_R4&NbaWMf^`IEpSO9t`?;XeN8V|vCWox(7!x_KU!*ENwA0LoH(Ii9HD3VUbk3TEf
zDFGehzv~$=HW7nibIIQzpKWe{2oDRJ`iOe>mB<1o<Ss%9DR56cHNSuQ{=OQ8A}3QP
zaJnE-rHgiNtS^1cp2XeCZ4So=8<hB37}9Vh3DsVwUsBEP(&A^dNB(sDQ&607l6BO!
z84jzylq2y(?0TIbCHx({lqtK7ithy%@iZQQ0D>4mtlFjof&XSI&=ZTn{PE;82nsCQ
zJb>P0{|yqKY6<kX?N{%f`1t?by!_ScW-x`u3mYw&PbnGl+FZilvJr2h=TzGo+kVNY
zA#i%!OozprHhEtei^X<eMHSA*(zze9j66GV@|Z}&+w4gQC4`5}p!dAr?+G*DJ$22S
zEOe&wGhJ!2&Uv6paY_N%R0t#T4348R@0++mnH^zEN75$NM{;WKCb5U1pInzex6y2d
zc=p!CRYu^1<k3Qr@$rF3M)$?byEWP5-}?VmnNHdbR5+Bjnq{r1jtkQW4>2Ua+Upup
zUBTfi=qRt1evdb{<)hWT`x^XD{O1O)QcX(KJB5<ufDL#^utaTJ;)zn92vS2?o)P5c
zC%sZ0#(#qhq0KISaCYW<K#w}Rdn1upURGXKT^6>|cRh_}&_e!@zd#0kjwmCwK(vO*
zG}0D5GCK8jSqk@Mb!xSV&g{*HQn_BT?G(Bfr;Aiu*R5fq`PcTM+Mxa@9-u^0uS`02
z0rV9dNH?7b-Y6i|uN@F=XJuxuv+Wieni2RAp2RQgbJfRNj9n`6Q(8+w%)YCd&Adrz
zBHAvYawyb|frRqp>ZE95ENm8YJd+>!D(3``AET&hP5O5@@4$cd4<qo~N{#hw0V<o;
z97{I)Vvd7e=4b`uMWgM!b?ilrRfhz<1_!g0qz55AtozNSu<aMbB7)@_T?-P|0V=SI
z_3cTLgXVb>B`LC$qAGpY$`V)n{FNQrj~?U1YOka0Kxd4C!~l#n6SbCL;qkJWPG{Cd
zu8M;zf-Cz<6;4Sv4QJzmVuE&B*bLfRW)Mx(Pb0gxHHe9d!uux(Crfe4VegT77V#BB
zuHq>c>#Ij*%7geXH3h$Dhe=7o^!3y{j;nlc(eU@bpR2<Wt-~|H1ok+RrkpR}*jYpO
z)SR^<p!v$cNX5W4KsA*DlE_|CbK}Q>%SXVX{!<5^KMVSQHQqf47Eu;Y;1z-L;~pJ5
z_X{L79d1(fB&f-_UF3?~JqT39hiPD&EG8Dr;Ajlg!O@(BZQe*XWn)PPe&HFv>ax0t
zb@9e;5OYV^=7ZrkMhSRsl3F=(F|U;r`M7-Pa%3e?JpDrz+3!t}(H9#68=7=^M-M;&
zT}c3xEI}j<|2#-<yAR(6b<43#xCv5o9!QmFPNU7x0^gmDS$_g}Tng;LRP~+B85W@g
zuH1pY<jMgjR#}sn3%eH;1TBOIEjtlR1P2K!Z-dS~in!ZR#u6}<8Iy8M<QmS*PiyLW
z$XWdH(d||6Wcs=%9~>eG?8|gWhZod->bA*lTjrc|`@&oo`GlE#z)-FM!)S*pW+Je`
zryXn7*x3e_Yq>_-iShQQC+J;1wo|-fD8{LB#NMR-1~I9x3?tU&+4WVH%UQ04lejfN
z|4q<$@FNUIe)St<2G~qn;J9dE=0>SVkh@UABaDr0o|50nslWW~jE|?LJ|HB2+EEnV
z0g68%N94W<5M71~K!{^_X_?iN;*)$-i^NZ#0f3bu1CS|?e}iP)gW}1uk!x6DYI@Yo
zs$fYxC@=cyL)cU?a`M&0rh`9_hLT|-DZq=0^_Dp9e}Nsr{P>XuK{krOnnH?rhn%E*
z30y$??MGqYBzanAIDY&HOh*tn7{54VZz&T2G62IbBf5Gu-kCQ(Bi#P^Gbj!V!|6Ni
z$pY)@!xj7u!iyfkifq%BKY%`g|MXeLPZywMUqOwp-i`79AJaWO`wdbCBe2R{2rY49
zCUu^vO}!`AJ$qx}<h0~_=K4%5WHvQ`tbMw6@_QYK11bn!l91wI|6;UrYL(sYP^b(8
z`?CLgF2~U?jUu$iLnLWU?-mx2#k*wIHlmw!v7RdZBua2QYa^%#8aR5(%N6(t4}0sJ
z3)D0`tF8~lgyZAy)5M+_D1BV%1e5Hv(T|UBN$vi0P9(kXm&L*9e^xXM808{xLYl@>
z1mKyAmyuE*?)@9j{KrvFJ(%TOdKk?g7K}f}o1cCRNMHLb6f^CTf%tKmiR2?{Quy%q
zTSa$#e=Bjd<6c2!@+&s}G1QzT{4o^Gv-kp-)B69Jvm?t7JtplUVtrHPpcV6De|QRv
z7de@LDspSlWzGtFcw@zjJX`ztesk|dEqfQ5_bg7wik&eT>a13=9j(bk5%h=ejcl7x
zAy8Vt?w+7?&zLF;StUq;c)P*elEut!=ZvXhjCau91=bYmjbk=y;@qE)WVfAt838r-
zI!H_;(IGmMIkx2*Iu9r)gkBTx%1}N&aH?%prk=CK<Ks(9s+DZ?3){aA1^-pZ`1jv~
z#!TRhyLV+)!DN1%vHk{my{wASQD#H7OW=@1Uy%MG3ST0;2dhLE67c)sq*iVTqTF4!
zJv&?uNec;9q$LKBHJU$n)#ySExQYH0EC0RO3`>68|0b&j^5C%>;VC!R$JI9At9u$f
z9uxKJ$IQMovmyD$06^hO{%%p^UC^Kkqq_#Y&77!y*w>A>E^co)HMgjSo)GN>X9qkr
z&~vur-v<6=Ugvj!W*zkeL!rz~@WQE0G=iq+d#ZPnt=q)8y+@W`*V8FgXFQuV!)Y9}
zzHuftYcTr-)7J%0U91g>#tB`i#fmvZB46Nui#`c1*x<&%3%efle4&X_&A-w;1ouNh
z5$uQ_1RI>)>-3O;p6~rr;;0?BN3qb!w8L&kHx+YS4R3XNpU9raBG9*-GZ%&mEJn;*
zdUX!Y6{?9jo^G3~1AZa1IyJg=6+ECg9T+oSqFHw>W1Dx4zLsX~B6n)BrF`U&L+h!8
z4f&H{W0TLMg<tEPXB!D>=}3#ZX2*$oD{V9z+%SPZaViJUTF?zqqZ$oH2=X<x`~hz*
z@mHpHWK&kj6ZCU=!<_-`+^%GX7y~a+0A0}Hzg{R?lkk`EG=FfZiZg7X%PNh1<}7`{
zlPBxvBByKKUZ0U!Dbh}5W7RYLYZ3HyCv<M{d-)QEOVbM4rmhlVh2@cFdp*QHG(o6b
zbPzU#hu}6$CP?7`Bu3z$AbYs0EP`+OObB6bHs9QOhJ(bOw)k8X=)>lic00K%&$iy;
zGE)zR*L}Yp@aSdSW}whTSuA^mm6UjAf53TNHH)R?UOkmNiVz)yqWi>l0H`RRe4#8-
z0o09z=Z6tT@ZPLJ3^q+h@;>c!vrt*J_8>WU)j_LHk^PT49p@&<5Dl1O-DT@(Q{AwM
zGKr9thj~XC_hED+<zd&ZQ0s?2%hL;Hh}fY&`$!!!yoB}zBA=pD!9ijKPTam)X^*rv
zMU!l4Oh^yQVAP{W3tdgWAror_XTK^5q)R=Gq97)~tYCqd!r*`jpkFF{b=YYTSzMCy
z^uWS$V(JU}ke5{Pp;xF3#YQZ*uCp4EeXm9M!#o@P<PF0Mw(wKH^8jFAEW;#npHqVr
zW2SGZu^1JIoHYWjz+%?~;78{<88ZDaVDO*v1^-|I{swSLZT-0yoICa>MLZ%2%@^mg
z)5AWz_(AMq;ri!VVE$qpp?izYz>iAHS6~syPY*j0`+FYm@87fS0;iY)g#2+z1IQ(*
z*T^{s_-ogn2QP3b5C6g$76mJm%`Xag$O1ub&hIZqV@%Ca9~Vjb@O@3>u)-?wtm*I)
z5pnt#q8RzX4JTH>(ceA)<3;@|+rMdFvQm+Bqqg-yG_V5D+N^LB;)~yE*F~5JDCklq
zVBwDeXJ41SB;F~Y+z~MVGNNs#|I7>c)j0ZpvS$E6x2@vEz7mpmz*@89TnNgV9=(L;
z%dEqeQ}@M?jphW2J`y*wK^JJ|p|%~1DD$?_1>P8BoRhQDXAgwek#1DgqnU{ER(WzR
z<#I56K#%Q@Pv<{zn`{?w(kPyTTN{Y)5F=&bdyWI5oBdNp1mIax0lUr(4zuqe`UB?Y
zQ+kb}#mpw+VWc`Nk`$Lo@lXN#V^P(g+}!_c+-UG$YQg`u3MrONx7Nb~VLa7}UJ`4j
zjk0OS-f>rBijN=neRi{&<)!hh+psWMgVD3-4mbz0UP}3t7spV(m-GH%f^K!s*9SGl
z-j{BN^Ya93#9Wp-g;+4V;Okqd>Vd{~oXB#x+7F|9NvuWQg^o-wC{b<K<6#fQXKq2a
zyPl`1YCd=pOsOsZaZgRmGK&v$lOO|(SpWi%@mbhK@)Yfvg3;D>vX$6WrKf2=?}hi|
zVJjWTctn6X)&xGPZS75KX4;i07^%h>nf1M|2lN7*%L|0Jeve6Q6RcSKJkvVaWEnye
zs>C)IYagU!`rap3OjqARw*5=hiJ1irZ*2qRz&_Vh0gHktgPzqF1NT1f$C+L#x;UYs
zszO-2gb^-@!V7FCPA$&Mz<n`CTvw?+ZRsjYO3gmxbA6R?53=;AAlH9i2w8u%nx@;A
zq%dNSQ)`G&U-5W-?_tL{)=c=TkZY^7`E8?fFbffMo9+_0+0sJyYm$XSWWPc99tMnk
zg8;kJrjK+0VT(0K6kbJIy%h=50Jy_m|NbS^9iBF#S2~6tgiXlFLe%C_ThtCH$^8B?
z?57bV_8Orv35tGdGz;BcC9oTi0>oKJz-0uU0ZQW+Bk{1)Uo;j!v+Yt5tH8tZ6aXZ2
z7r#L;doWOL*!#<|Ox+LiF?+je2$gRTkV>kzhRvz=!d6L90D#{_{ybu5u)%1BsMUi<
zBS$efW8FRGP4G*HO(aP-a~(5Tm|O<9V1*BLINa7&`=HQWcI0PmaNf@z2yCsqEH5uB
z4qS13b>mhDyW1n$w^aICleB~gXptL%ExgstJ!ghJe(EE9mCmEN`UP|)oQ{nXy4B--
zcd!g_Yg!OzlO&Xt3GPGHSGDMI7h^lXKfW`)KJC=FqiE>zVW#ishp0kj0TLD*z-q%_
zyrT9Rl}>q8Fh6#_5NZ3#k+dk39e3*+#4CGC21lv@6C}4b(aft4rvZsa8W|YdUEz}w
z5-&a2mFuzCo_E()E46ya23%V|LK#4g>KUfs8>I1d<QF)t_VoL<g3zte#40N2xUXW)
zo$l)6wevlS@>8p{cFwkG9DU0Vz0WaiJsP?%R82!#at_qP(|&&Q>{<f$@E3&0M??d%
zRAX7kjk0Y2mX{`y(N=6)<%L}*PyUOWf$J}tpI<)P6wpQRYE&GapQuBQT$t2dnz|`Y
z;=teGHdpDl!lribsb`#Bt(<E4OjUV&0L((|ll1cEDQ;G>z?>faedP1+@Bhi#lfSZ+
ze{qZ)X7uMZ%boub#u$3<+Lf>{k=zd2^$BuhTNxA46{H$_yFrEIA4pCMGX-bPEr_Hi
z9sN6w`r|7+357Gn>`IMkkQ&d~kb(yHY<?UXkjz*{{#3VI9rZJ=_~Ji>tP*YzBQc+a
zKzULy@j)#xxq&r^?%^4#C08J;CP91!TTI;{Jr`fxMpVvu9s5mh=heSo#s7dJ_0x9{
z65w(Ep-BiDOG|)*_0$JE-D{){_%?4pa&yhz*buKB^mSPcG+Fllm0SC#W4}t$|If+Y
z|7K2eYfMdhAJSP@54+NNpBww6UTD@4{nCYoma~nsPMG&T1LUrrnssyDT+u?2?bW;N
z>vs+31QQ0WFDTkRitaq3;Io?;E7`WiO7`Z*oA95!O*QLAV1EEq3mQOP@azb3zUnHv
zUpElGu1$afO#ugB1GM@uXTVZcgj%;B=Kvsh3xGUyuz`qVuah8Hf&(CbpB!+3_pUr;
z>sht^5$5`vck$!RaO`zj@q*pz-&C3?!A=Iwz=(W<<7Jm7i&u9=j5q;F8A!s*=hWVt
zN&cQ(>nGfOo_cp?vwN4Y@H}-vU0+FMD+6YtB`?F*yST>%`8M?5;U)ce9{=U--yVV=
zS?skG;4$+jy@xU*?U{nocyKn2c^9<%(GMfc;2nFpMU5;M#XpBr-}HMVUQu6vRWi*J
zgs7uwY=6w+7$=KAt#pjIm38I%tCF&?<D#*dF$bR8P;v4&n>=i4_!Fn~z+W7v|9?bQ
z0^oBtZ*HR|IU2PlU1Q}Hr83SGMqhtAa#7{reHC5?Pf!gD%BvUfsaamS#F%P2ndd3_
z)spw5>6*mLn`M)8xK7N*RFbp!IQuk0=#54<|04DDx@T3DDV;GI{aV}8{-K>vf;x3f
zI(?B8=*UGM>wdsTOz)a>jgj`S^rE-r#p>Xz@2XV<<=EH7-gQLu#`MHnD}|iR*#w${
z`@F~wZJ-LvwD%S0jbzb1CZf<SOh3iQu041)C2&deH2+I?;;+_qc=4|lEei8+i45J?
zFi;Zw9rF11Ru2E@JsTbvL<-sjf&DwAc4jJrWA2f?NiK2}4u*9HG|UcXQPnbKBHD*H
z8u#H|20gV$fgep`a`$Xp7hA(a&2e2Zt|!;IcJ_@Ok6(T8L|y1cTg!zUf30|yJ-#17
z4YuHE<SzRbd~Xv4+DFJ#+&~C}C&TGF$S7(76qQ_uY|s|EexM(@h=md%h9y>Wzu&sk
zdEhrDSKoJ?i}N1AzQ|d9@YN)cwv2F%ZwIngYT|OF3KWrg*x01z1DNzz|DJOFN2@YC
z1%opfgOUzI{rRYS6jzJB>-`Svly1nbj(jz-c)eV~+4A7YlfB`2HpF-LSr<0@#9e^Y
zY7T%1Es47s<x+hxOK4c_(V=2EGl3Q#bqv{Hk4v9cI%(LESk)hMz~oZ_6Tcm`YT;B0
z^r&UUVkdzdcm~S&I3Oy04Z6pdWCAT^44#(oCn@95K;Mu=E>_D!lZj976A7=`ju3l0
zazr(|H)675)>3jK0P8~HgwJ8W5?Gy@Ektm{o|^uW)!>9g+?C)HVp7^!yN=JvOQJ>T
z_eiXSChy?2@R{L8Cf{?4=gOLt%*Cy$UA0X`3dFa1Pi1?B-*+oltt%o6TyZ(Ctbv=^
z9>vS8Q8g#<QDVx56xs3@_9+6(LY8Xl9LFyNXg*hrNkk^z6r)SHuJgg!f?`TDuT~Ju
zhmNA{xH$B#Zm<Q^fB!Jn6<Y45t=fMw_CZi(e0%{al~IMx0suD@Y`DmXeA0)?;V>sb
z*<`eT{=E{VGxOY5(Vw_OmfILle{_B7Lk3lDnau{O#tZ4yjgyHWEpZEHUza92$TqZL
z+*3T-UWR$rqP3jy=yp@=XlR#Kd-qv+w)riA|I!2YC+6?>`+^Bz{(^Ch-%SMP_YaYb
z7x}@S;AZ^OWccr}6*7(z>8;JIewr`;)`f+l)d`;fg365Ks{YM<x+^o&46Q_emQj+a
zj9aI{vEJ)zKq1k>9i$rAu3UNj%KEsVtQz+yHm1It*A69D0zrNo{(r{;=PvTXwFYJ@
zf^};AR<uvh8|0o53p&B!7s;BME7^8dnk?fVJ<@;ib4mpI8-zb)VsA>A<Y%wl09`+6
zJF)eZiWJV4k^_SNKhZrt{4x7`dcgQ8+Ye}N&80umAqHD-Te$*=@{H=A*>8*#Y%&kI
zZc`aW40_m2WXRd<`f(`RQse8Yk8X1m{x~K?@x)qe!_a;+lAI)D$1azgS%cA5sH$>}
zKKiLyAnn8vk2A^#ds#RY>aB=N`0Hz)Q>`uC)L8t?RAhxyzMb49M!e}^)vvh^F5X|$
zRLJo0oH-)XMi9VF1QTxU%eZqikWTjJZFvjmgEn0ZotXpn(o*U}de-Jgm^HS(V3fbF
z|Hl<YZKkW>+=ver%7e<R3#a3r%_MlnhibMs<g?qZSPGYWSash%r_T+8i=dm0kyaqt
zsGcY~Y6~q^iun9eC?GL85dZY~Oop+puMZ>nBoFa2AnHuu3?=p?D&<J@PRL#3%VLOO
zJ$M-G$8in9nKFPYkiu6$(#W5Cf@FA=9cG`6ZieEmo$#U@M2<0TP+up9A>?L|I88~k
z%^)qX14t|N5v$)Iq9SzAk^!UXsGs(t2kPzfz!MDE1L^x2$jO!+K(TvD5rth4UDNp0
zjsVNR5O{+H$X@SW49*fTXgWF$v@W}{;WK`@D`cZ6vi=5Uu6Aw$FIfZVi`Hb8+m&Gi
z@Q;7q5o{_RGnotDgg<m5903HJ^+KP5eRDO02tX@-+zs@{DLQ|Hn4gYeMgFvpsGp|j
z1(;<c(HS(QE&PxdM?RjaX-yid5whecy<col@<C3r*r}ibTPJ(H(%<i#(agkOoHoDK
zcz5k)3<q*D9>!q=C}+nj){N><Y-U9x7U>DH%~73V+P$ZkExY|+y>Pl>zZ2Imegd}|
z0d#}h%Nk6coGXpUQ7vOHGhE_3aoah%Lt|ED7Bt5@XtaUcw**w=l3LJ=E5Qk?J>%$$
zo_qYXowxLfW4>cPI_rslitVfZXvz@Y&Z7^hPs>qSGwcAE$!JQhWi=Q{UE+Y8R{Aic
z%)X{j@A_hHhjZh${*m_{1wps9eGI^rKLS7PLt8}6br$-QBlpW>M`3Wo#T{inVve#R
zK^s^0M3H+RMxZY9C$UpSX)C%7Yt=HG>!o2(Zivi1uf-c`L>*JG{;_bFSI!4B`4Yz1
z2V<R^V15`C+LxQIUBkH2#S80tA}ecp<*<zY1$Kor{wj4j@BVpY32dnwO}LH04L4ze
z5J#0c@xhb&8+HceOb!Z8#_aOG^sJNy{sr;=XWAlPIp8MfNuMyxBsTZR#;Kj<&`B*X
zVP}PBiB0S(DBTnuDoOOSS8i@WM)k0T==<W)h{TWWt}^k}!#&~li4j~k;tBQK(R3nS
z4E4I4b=_vP0eAQG2Dmjj-lcpdi@-dqwWX&y_}cBJkLFAP`v0)^-ce0$d%G|Q3JMtM
zohYE70a1FBfJzeqLFpw3(gma_Adpa`cMuQ|QK~3KdJzdt5Rl$G1Z)&Z5Mm%@f75f$
z{@(50`?PWIJH~zQJ%40mWo6a5=9=?Yp5OB%z}*%TdA_$Jf+s+Ht|PV~Z=8e(a4Ryk
zu89hz|B6X*XuN*n$hqVtrDMk$>_%El!|*QT(6VNlvrmAK;*KktU!Hgk$4YoHqx9@K
zm8s(9!>&sIf+x1l=X)G%c(QGfXd@U|M{i`=B3^F|CZ9QR?wev);oR~Kj^nqUguOxf
zkR%8%F^E_+V9()mZ8_h_IkS+tJjmjlVPb8QR?bxT_?d809O<L1Znud4-nVbI&0BZ_
za$2Aqu5TF{rgA#)eoF&R%6z8V`KdE@@MK|8PO+($-DMbE`k+`s+{k&m<eGc^oHV;n
zVo37@E<u72(Jk;A;cUB-6)%RV##&^A+0Mt{oS0+6Ij^TIqmJBHFCN%{A0Hwtr2-z=
zY6?3^pcbMMiQN{#In>zQF>t4WRCY@WND9aklpp52c8Cqx(f!m8kE(~)MC9T^C7|=_
z%M7}*I&#<{2;TqXEm%XMRNsfFhYhNG>XvD%L9B?!JjJAfEnoyxilcWFUPh8jd>|Zz
z4*W-g)e`!UU*4y-$)@j-65@qprCsiVb%;7$#ZIpyuK~wU+d-}fI>FeG{REjH(KV(5
zhAG|c;I75@2<bz#q1TmOsS0~eO@t%mDO2dG+>VOr@5^cR-H0$qiIeqN>_VZdaK@rX
zI!3#{v+!zFW@cjMn~aC8$CG;1go8YQUto?aet^1lK@-CnCY*6~LdMlffvNwwEnfS1
zjVr7mIE<URS2*mWi9IZ$?O-dB@9%f1vLmENgbX4Fa)1L-<xsdnr1PCz9?G!PX9XOW
z(i!!-^pBOE`6l)?+SMP4r0!sB=-GF~vtj8%nx(Lus<`6tKv`vl=Y-w?h8$fEi(<3F
z;2u7&7xTL3)$g91bfylYiL;?Tr&5M<jPa46^>e(ALq&fb>ANz#x!Ty6IdGtxt7kuT
zSkq98CKd5m;0P{v^BK|X%XbweLQwe^kT4_B$#iHIaf?lCQ}A4KVK8o^Zd#j%=Szf~
zXb;(ahGVZY=}|iHCgb96tbyhUg~b+g{ImButM*z~2xxyE#(+<?K^fRZ%@Jid_Irn-
zKyb7`wq4Q8CHIo}7Q>6rw1mQv2Lg8Up9`>J72z}Ws<@(B5P3;3;W3G6R$yxORYXOV
zvETkGoSf}Ay)YqdBtmcXit6H=a!6=#t{>fsNN2n&{yQNVYjS;@v~|ddhnBO>80uJ$
zIJ3MmYL+`-+S#A#DZ#iJI4^G*G>Le$$bkBA8qS5<FscO+S4EjqbYJ@Y>NlBja08(E
z2slYuePTsx8Rn173r8I86VuzilFUTBGsU~Ddi=4SP??J>g1?2{&%>y3s97Dy(9vXF
zc#C3*uQ#tPVC`JVso?G9)~`I!0_LpHs6a)6<H|2o(~H*$o9;a?U9#BALO*}U*K6op
z_Xv1yk>3<G)h+6iw49)M@pK*C;sO<peK3|pw~XenzMP~F7?FIdGzIPn&9$dgm3!S`
zuyJVO*EAfhiB*u{H(!InOe<TQ=k4NskL8PXQm3eLU(kaAG5{02H9-nH2xt}FCenRj
znej_C9}-s?Rhl0>BR6{@#@Um%Q7wapi_eE0rx`4R3Psd<bPjToEXdE5y^CfD9(rRu
z-5n}CV=_~>g>;-E<ybDBiXT{h_0^bq5$P*xpx#O@j4}Nfe=58)`5g`H^gA1WTIMmH
zm#7lw8B3UF$-KHFt74acqv_b|$7e!28B>RRD|!=y@7IT@97ayy5eGpbYcGV2B#Xmm
zT-dlbv}UlD>^xIg)i0LZc{A<7t+(_iR}BXWZ=|1Y@KO<eMbO0z&I)0UCK7L3@dohT
zbZ>z~UTbWq9;S5N*;rT9=5u*V8_*%!m0Za5?K!n2WQtIj;56c7sm&KGS@k#qNU!j#
zRb;Es2YODloFQots+Z<9_3A5Y2|+Kri)MOQm9!P1(o%g5E5hALY*f_U@;v}mGktw&
zF+zo(kh{DXDLm#*;$E>$y%3l6c#`9lYU?>2LDM~Fkp)M_5fzP3C$33+lG|t`o(Mcl
z$ia!ML`INq=JnsTTgqkCE&Ae;k-^MWmX29F7x!+dk%47<T7@*)0IeJ8<2&CxDxV<V
z=HMWIgibi+KDE(p8F3|}I?gPsF_R=Y?GtyCkE$w5rYIgm@sQfdjg)S>x6lXHe1r<|
z4;if)QcM;b5i7j`4d`*rdpF&d>Ar>v@SRReO23|%hde|X0JKOEBppXwBtEbu1t#Qn
zGW%`8I;)kluUqiz@@qX3nPRGsVJhQxZ&KeMX>EYoOag!q>0gE*<opeSfH*;!L@=U3
zoHZH=9dd&^ctjaLIryfsAxl@)iZ0Gt=tZe%Lk#ykEkZ$5=tSQEttlz2#~-XIlM~wi
zmU(*gf1<tl|E^UJayLu4i}m`Qu?9tfX5@m8Rw`SEeckLYko#YsV*7SzUH(Id1a)YF
z+$|RM6Q<wMvl5ge&?Q6|&&f#u-3!ZlpdwCq_Ciq2ktvC2$-(4xo^QM9rKWKH@oskp
zDVD;q6TE0q6Z~zUf`<4(WD<=OnbVO=pa>G$2TjKFlfP$s-PPfUIenNa^P5mjx3Gs>
z{s}MC9kheT)TJf(#v4V~1(vF8Gn_$WZ;<14`}Aes1HwebGo^Q6htAwPi^+v4dFgip
zq~103o^s+eFQv=EgRJ;v$JB|O_&+Gp$BLXs)3=-;*%R!S0k@j$2&ztoN$xF&Vtl7{
zAO}(WW__b(>fO%dk`AqWr;`bOT!&hw^RjI0*YBTA!{~Be!+w2zJEO_H#6!GnjKwRZ
zO_6I=quaqy4KK4-OcX)BhjXdWQAV;h!AP*I7#XsIW}0)#I{RvS*1w+l!tR^7^F2qd
zj2=?|#_Ij_+A-1g4fg#I^}``pM0Z69a6oF{@lB4KaH3pU(KC#66^}#hv-#fRi8pHK
zLaSb%p=(u(xJA{?%b3)=;J7kKPZ>wmT{ee@1O_cNsId4Q=-uXxchY@c_TgiZ4braO
zG>P3{{8I3PWm5MH2l|?59;Q-9cl5S&km97)$dZl)j4OryMBGiGn3G4M@?RNIQGYRU
z%1Z))(|^B6=b!Ea``{4+t<!G<HAXzm3RnpY`|dz$DIsh6?6WuN$Fgo8mQG|742ZCY
zeeB^CwKrB5`W_dSMMYSoFem_)iZ{zBBHwNF&ir$eFU9Q@dnYLn01uM|VdR1@h)mQU
zaKJ5J;q1)}*z0(oA;7DoF*d3*?DGbq?|ARSs<(T#V6<X$xt|eV>SyW`>_huUV{oWo
zIDa5NQ8Q4wl2Edw6tHe3NHKe+wM%cz-2v=XcFkmZM*7~@M<L2Q_<umD{hLfK^L2gm
zyVBpE9`(=zS-7w}s5NdVCDCQe;zwl~=^-o*gsnkFjc<HW*0cX=th&h;ij1CKt>?ZY
ziE>v@Z&&EFz|;NV+%n<ES&2@&WF`>4`)?6Y{%xf5e|h<3P=IUEw55yONR(Tdoxgo@
zli+c&_d&tjFO3fN*Vb;<)cGb0<G<x=3VVs(^0^uDLwkdcR)GE~q%TJTzl9Y;)mB7p
z%Ali;iL}RhKs3@BU)7{<yA<g-Iv6Lgp0<fUvwZ~{umpK?mZCs9U5Py#k2|}CNB|n9
zp?+?q5{%bvF=o^0GB*x?w2ygguc?29v_^2oZY}RFlGp%zD*SBo`NIA@Ge3tJQ!#<w
zL$xuNI8t;9PRhbqw%$>Z`qx~C0B`<;tU2Ar>mt+7#<(vN6Xy6QbvTRT;U^BWcMbDA
zrB>aHvn?wS+Q=n1LBk?3KJY`&a2MWm$9?h_;<<fVYy{SEEKA)to<|x6eShhkJQI1M
z`ypje;28Na5G0ns)NZxq=vn+i#nNAL5GN+7Emf8?{AA_3mhfdR$<ld-9pcp6`}OVm
zA-NC=gy5(%EX^dj5Tx;zLI0%=Ey|<0>7i{vW>FVD#ZXV%ZhIN|`9RAl@&f=g!k}FS
zi98?#ty62R8kC7M!KV;-u~$Cw0tKnFHNzE-YJxN2=^xkoM5y0$^oYH9uzKhWbz9rg
zO5_OKtzz24di-|H1)=V`Nx7Lg+^hwt0=MZAd(X_~a7^;O>gr_g;sKNPiQGEhZf{)c
z41}e}ldXv@t3+L6ghks5`Tpz;=zM3AeNtL`io9heg~2HoayNIj8zYT6;u9~s-iqg8
ze?HDsRv=z{sZ2OID!x&5xU_*~wS4}4H4DROThp^aWSr&-v;!VG!)Gl>$gOC5W;&;3
z8(m<M{NYYvOgGz=Z3U`Zo7WSh#!MpL68$MXl3ghkok}+4G)xoqFYfrSa#7Pw#l`G(
zV~iHVYJj?7dMdoTM|`q_Q-DjT!=vO;S2p|Ctp!f!>|PTkN1A)Q0hg(IE}8-IwAL00
zG8)Nl9;i_DYT~Zx?XAVP;#8p@CpTIw9&pBTxmAmbhsoUwmXLpTym4RMe<hdx%K{_(
z*e;VX1A?h6Yccza#jT6uLyGToY?dF!G2U8bbd13b9-vIZs}nmb*hIpVxmU84HXY0a
zM9bzy>ZKI#nwtBqXlqmW<7OkStkNKuk+Tc1QAhGK71lsb+|lgkE>EaO#SguC{y9q~
z+e+I<&^fO@aRc{oo@ejiIbQmNZayl1{TW2p;$fs{pil$$QwJ(c;PjBPSz+w;XNzjf
z5>>UC^*Oh;Q%s}$@?Z@&<!K|1XtlAal5gTBJM|9*N{y7&qWN5$3(c&pI_ww|QnXt>
z2JU%=$D376$*-_?YZEV$xDqk9;B<5`Wreth+sZn{m#eN|w1%|aeT^E<r&Z`Ur!25G
zMUcSnPLk9skR1!OIL#<bO1IURyAN;QyXmWvmwaU&rd%n!Q^<Ww*p4dnd<Y_dBuwDO
z0{aI<JAK};s*Z|(8oPb1X20f@CZ5<=q9M|lb=$-Fr0{Aw{4D3!V9~m;3kflzCI3!I
z%&*)2KMF;HrQs%{WZ*~qB&={Mv3YZTiPuJMyPqxF=`*DLUe&?v6DsNbO-p0AO&MEy
z2V$g7g_K1O<W|+ch0b_jqR1fj?G_+QC5es<Jbfn|5Tz<%q%2yl-qFxA<lOrZ*dwdf
znOo&!aPiXZe+%m*Jm|gp3spS478#DE!yHOn24;~8_gHE3iPFml2w5`UHddOHqC@hc
zq;t=d>*Jo>>*{4N0RWcT!A&!w3dxKBD+?4O`1=J0U~-u?Iq`03X=(2hQiaz7=ldbO
zxV+cl(DO932ZP4<;{)vMOU!%hp3ipQbvrjJZ1~j2^K)-&s7h_OpZBtFhXkFqO8zpG
z{tM5^(brm|rc)NPT2>*(w!?xcTEeVP)C6ms!oDT0pf~1MGSN3HA}In@qbp;|7T7s6
z7jH9qo-ZE_cw@7T;xBoeu{XXM%5_=u{(3Vv4wTBg#UwJCyUtWZkhqs$Pah~7$bSAg
z-P6i^aDXt49Q=0hc>H?Wm!ZcdUv&NPCD_zu<HoW?mUU~!;k!(h@JABv((BIdzDrCC
zu3ZRuDo2G@HdDwUoF$rJ7c$k?{p1AULc#Rs1+K|Jp|V9arnD5(6OvO`xU;y%sZbfN
zeBKEKHh?bRro^7~U}Wy@x;Ieo^q!oMlbx7MADLdboZ6-+?WHLgp{c=TZ?Akzt&Ofs
zt=PHgUb3S=PQlDudsmKomkx+|mSaqpZ%|2Ui_nzxbI>g7&))As(9Dyh$wfY+AM~-a
zRuLD}lP#sQ^osfpw$~)TJjZ|RKml^MGUeWDxNVR^%W)Sp^Dk5`ZpKsOV$9iUWuB`e
zEAOpX^*sb>XXtvp1-e(io<H5%rN1CoCu^Qonywh%>}x#ARHc4~PE#PRlwN54>S)cp
zl2&!1-iVuRRc)-&i4YF?Nl&pmFQ~YMQ$xl+#!~OizCiWr?VyM(vp@kfD=`+noq`I5
z<Eh5sJJ|*mL*GZFOJt-cp7e5z2_^Pgr1E5@E2qBFj&JkQ?(*!(a}&DKFebhm5G!im
zQp5OaPTFOplkZp?UwkjUFrR?8dN3iCas*&bPGw?H`}?Eto$@o2<Y&8N{SL&duVp1)
z6s7NPba$j5yU;w$jnQ)F;3~=)I_61xdn8n<=DFQ=sOHpuLUyqW-phHWw$!AqY9&xA
zuZP#|L<Eq%r9B|27&qC4u1^TTahebg%=mV=Nm`i?FkS55&5?YkIA`^RIptOaROBqR
zrvNraKciuXmN^9H8`3O7JW2@bu!asPYvMjxjWtVou(9+nmt8kb8Pt>0W^spbN1VEB
z8TcYWw(cVQ8p<Y+w}hZ{13Qo_7wmDnVu#*VOI&!gC?UXr?YLlp2ebVNRT?TBB1e)0
zkcpwU7d|rs5DR@vw7*}3xXJ}73UkoRqkBrm!p3zyu)+5g!G285!2!@I)@73@5~m}M
z0mJk)QsMzg0Y_sP10*|#9W#9D?uk0w5<hlLXf9RoxrqM_-iQO~T95*Ophyf$3iKn0
zV<aLEUH9<y<<WOM^P>9Xcy+WbmC~8z-aWT;C`>@pe9vMDzFD}mb;!?la#>_kN@g&a
z6tt{`scf*iGMx^q(N9;%3-D{MHO#&rT&U&a%$5!<FnULL*(yMT8$0c>f@1p8av}!a
zlokJ2&ZR+N>1rIE^afpcpg8%ZGB>UQqq`VJ;dH(q@p^e)ajeH1b2(JYhp;6nJA=8U
zc`{Y_+?r~8no?vx{CCk%<ijsn8fkgvvHU!}ybLxv^kpliNvva=s)zLN-!eZHd+4QX
z<H7Le`)Td;ADT>)^aYt3jFXr;17EgXh<uvY>i@MCE3y<BEn|`6+lhb#D!p(qsraaW
z_in+AzH+(W+_tj%BVvkc2g=QZR%UR+e+5PF$9pA#{<hdgL1^y)GoPo0skNG?U^km<
z_>GuT$3<IdUR6|n&T9<}BEl%ci*yvhKt-%bC`k#!bqc4<BU4t}m073e$v7cf85`gB
zFfc~5Ph`$xSr`XS&N8HO2-)AFg7auioXKz7XEW%3egRCyfoi4B*XBF??T<68W;fcp
z<;fV`Y}h<HB6Q9s1Z}>fAAw*ZNxjsqAb7NA>7|y;<&4amo3_zz^Rf47^T1dmZuWfE
z`s%9Rxxj>tjK7_si%XBRt1v$y_G)of{Vta<yRh(!R+htb36BkIRGbOGSm;8YAViZh
zv6`CL_bnnLWreSa4vG04km(g$8=Lyd*5?B14%8gF{akkOnKFZZllT~N0_CJW20$LU
zzHV$5W6Aw*-}EO&xd}{%sm>~|JM(*HtaHbA7W&V?)=^A;a_s_)wX!OZp$#9HE&Ekt
zj$!j>hja38U&*KO>@LzTGsGb}(2*2n(~NEb?h(@ESD}^h8odm!f|Ilm3cOTF5dPMu
zkO4qe@}|?1u#Wi}i*2W@0kh>}&T|cEDIe1pmha38Oc_W-Sv#c`+ZKy+bKS~|8^2KD
zyju|fG_-k=c+pRD<<v`PxV!Qt47ZG%ibZX*;FASy_hMR=`4?9WyZTbUeQjS&KpEGR
z30)bgA^5}}?PXHznsSR-SdfVjZBuB^qk18!y<~C$qaTTakn94SOZ;AUC|WoA+8C7;
zwN!<E)H=f7Yv*qeb4**vnM--(m4FD275{`ly{v!Cc@csXSqzF9eQSQ(q;8TYJTAU)
zzRm0HWj7%q(c@GD!ba+}?lyX`rL6-NB$-jPJCqSJHL>?;NQT#J73af){51S5MDE~B
zryKWe_9ORT51Er2A0ivpkQ~^7+I)$~yYYep0P>_7Q|CR~E#AOvXTz-cuzksGCC>9k
z^XVmhTEC+c7El^OfF5?Urc*lEc_L{>DETpmsgatHNJUA}5WmsC17V){4}|%*3a_PZ
z`kmI$Vfd<4$;^~5Ai&5J1Yv}CPnoh{VQ@AIS6CQ^%m*dX0|yi5gK8fm+yoOPG7-cM
z8Bmn7u}8c8)$gFNwJKbwp+$gFSZZJVg(?;h{}y*Z_IY<uB(A^gxWS>+3kw5;!zI`b
zN(31S;1qxwm}CY0{hoPkcFh&ZlvBugRMleJAeS|P4&xYiYUu7dQ;~&pO^{KG3Ac&1
zP>s{720uUdL%hsZ?(Yv#kpqf9M9E1gXFus0LhbX<`{7-2#Io|I<F<8;m+dm5`q)C^
z^yl@dwj11?&B7DC32L~CWpsBg1C%!qHdZvg16SCb_^QL(C%T%V0_`PQp37v{?=_<l
zwYibg8nTx5A&V{^d4yncxe`#eH(z@CCCfPTh{epz?82-j<GC)`JV9768+GuH^2$H7
z*TIDC7~rrhy9AyCMK@@Wc5Mqs1Hnm6lDK{ZbVm-9>=4e?|3ZcNG9~8w(1Z?XOJ84(
zBs1;9DU9Oazb5z}c2y7k(`Qifl#qt~_@(n+%bAfP@1c`BjHu%~q1|rkmpr^Zd|3va
z&*-xH+_LFKKJdJL?RehbVLbSU>ywhGg_n+CP7-pkGYiHMC=TcI?;;t?Q0`pJQ=z15
zbe=!DMh%4vbf};DMs5f6H+|!8UFe@~_iNKRlg|IX>EIFY-!$E42L2E1ZJ_Z-33k8j
ztce736eH@h$1n&u-}C^Q8AG5~?&;ub$tf*R$^;xT+kq4|8f{i9hV|sHGe31MExOtJ
zYN?0z0kzoWU{zZ7yT$@5;DP8bC;s$6et$u#&_2kfIEX%gmjubuMr5T7H3jsS_z79o
zjP2y;X)|Pu+SWDIX^mWjkBH9>T&I03dC=*x{l)Ix;&zD$5U~XjBb9SQAb}b!?zxN(
zX`zZ?;MFmGR6Hv%BNIQuI{Ya~Vyk%da<Q#9q7+&KXkn)1@VUb~^CFn?+Coe10$qSF
zHD61|TD2clewTi&bI*{9<{2&DAufAkfyreL_Q0c9%}AfE!+tu}Lq0jY+Tm`SZf;v{
zDWZ95!7pei!cVk&gi~~3M?d*5docL<wh8c$HlJJ;SL7Gb7`bT?U}16hFq^^gRwJ5k
z!rOY<!YQuE2KevW7Buqj8p^hE!N3wq693)90)s*^w)?k(0<~Rp-oJ9d1Ap!J|C>9`
z`!51ASdXZ)x=+F?OkGT<9Q*2;VjNqyYPlYp=>DO&`G0ra=YNm#^xuV~KL41->z5S}
z%^^BupBZIDGxtRFz!liQu0$-Lny+y<{gN<wxzc`3WUMSth_at#43tn&j!SotB7T-o
zB9j>kw?=0-^|^u<*nKhNT-4`cKLjYUHHMS!ClUZd*mp{dZ#a0{?t=HQ`#FO^kGkI?
zN!nzC2mo|S?O-@ltOQ&{W2g>w<RE(IdNasog}sDQE~uK~ufV#%FqwyTT--nXqXwmI
z9LzZa`4k=)9)RQXa+Xmfep4_ftsUsDGO(5dL_zFC6#zNV!=foiOHqVfD0Uc8y+vmG
z!R4{E29ivP*ElYu#Ng0>xUiSz-76k#IR>H5Pc96do!U9JmY%{2%KzRuF$m<SU#}b!
zlaW6-k?qYsCq9h1o2-v1h-v&oM>z1ebOoeOBHD{kkM|(g?;>Unth8m-$0+l!1Ra7g
zWascqe_a?b4X=Vf$`7~ah?+WiNr<JOs(~M}IY<}RqJZpaI-9>Vc+j=m06pR6u9ti&
z=lr-hCf@y6=BAyAk5K6S=X2*ekLiE0XE&j$7^s-)ZWEBk)^{h)AWU&8A<vyTc)mv)
z2PtB<CoKFtZW7%K7VKZYdr?(1eeCN+4Q~C5upQ$V6B?XF_Y(;$m`yg-alYDwU2NYC
z!fOw=Q#r21`cGRYHw#R=62<P#vfSF{o{vstfk{P6HVB)HLl1#~@U(Zk?|sMiAZW+^
zjCOyI!o#liWzLRzX2O{{Ezay84APaQ6X*t>3Ox0KALU)C*vQ2|`TYV62sZ<8tQ|-p
z+`)lHk1|4F8@`CMFO{$ELF7qpRfXfHEExN$rThTt7wAc@TTv=UGP<qlP!Yh*V10h}
z`6kMfsjH;KQnQ{}lMf+%xXsXXDGV*vX5ggBPBdR{vd$nH6U^Z;)~v4y&z2&vo4qqm
zJf=E*FO??J)Zea5AK>~Z858$GZNT6guecjYl3a|w1r3#xoSq^{;ON?&whRJiH}%z=
zvP|2K(Dbuqa54smw9&dNK+nsJ6Dm3Yw^IuZW%P+O0K7`j4&tQRRTN%TNBG_TYNxBI
zmp&|{TEds<&-%fOMW&6h(dIfN43|jZk;1nPnJf#$V|yrk1jB99voidyotq4EVoo2@
z#LJFJl{RqqKK``o5XJwV`U|vHu0ub{1PQ}RY^q~<MkHALid#!W75#nAukjd-c^$sY
z#qBS9g{T^sG#w|kB%S5BSsPR?IwFk<6;R3Ue9C$<uF1TOQBD7pPNpy~+j3~Ss!sXb
z0kw+CX5QskiGzNZE@9oX-fl|s^19NS1smGcM%!Mz7)*Ualge(S=}LASi2{80L_c_!
z$^~*xnRMd8&pZ*0B`7zus}t!QBUW=_u58PX3aAIzUeU(IB@YUsRh{x;g8{Vsp9+5d
zmg?r$<^M3m1;aVub`^WtsLd0ACvOYAv+RMW-X*mCNexhW6O_KV0WeaO{8{bE!5fuT
z317B=f_h_;oAX7g4f&-yNllt<c9(x*(fv991yKzFhJNfd=$;CET8Gln<Ux5N?s{hN
z@SQm_pdNuG0amI1q8<Ea%Rgude@^Z15}|)wxg(i}>>gHUWBF}e(f=8hJvU@@O>~BK
zi17&X=1Bd@noGiyoGfAnm&0BvH6D3-KQ)FxhX--)&}a}EdUEyyRwNue&j;%m7&@s*
zH#L>Lbg-SF@rEp~Cs#O)*8O9dObC`Q=&J5b6w!4}mLf&2P`2<~8a!MbT}D#4)pFxS
z@ly*!b0zOli08-t>5MnddJl^>*$m?cU$jV}+5kA*5gut`=mdvwY+<Y$_%xEg*!5OP
z3LGGpt1E4LhB11DJ!!0XqTh`ooblL%5C<cW^fd&dr*n{$H9Tz$O1z1~O*1etc^Fe?
zt-v3T3?*@s(}C(9O-%wkVu7u;y2WT_KW-hLb5Y-NPP?)7!uXNmyqNtXZ_b!GeybXT
zbW4Hyxjk~J$|)q6w{thSQOA+iBfUC(v&j1o%p7Xt;q>SulcVsZL?~_Kma_T^iUsk0
z?se-#Ix*3k`0f<Eq+-$>RxZz@^UN3yqa(2};BV1DL9z;tC)$I`2x&C-GkiLg61D&X
z+<^eAa<O9k%$b%Fojpyv?RpA79Z?cB6z~>B;Qg`n4OGWdP}8oz5Amx2QZ%>Gdq>bW
zo)_<4LG*IKVW{OHRJ911uGs_#;nX2W;hH2x2>uqRG5rq0j9A$|P+b0U_ezWkIQqrz
z4<3K~_c>ssR-?Sn-4VbcD2Jv|+w8~^1I_W%C{a@qc;>(R5BevUF1lYR@qq!F_J`X7
zWW+z;i_w0eI`l>%>Dj-0pZRMK(GSItpilgv6`tl&=s}6DjMf7+44l1lK5<se6nT6a
zWllEB%lL8qrAXyDRGW~rIE!&J!4c<eh5az}LD-MQnAWGyqQ}KKmZ*Yi(?>Eg-^Et1
zNFx=h!@kbA_=-!H=MyfvI7_=g^h6Amm=TrIH*~1AcaC1)W&u*w21G3q)9XlWa)k<m
z<;={g#$79Mfq9F^_JLjL6AmiT%62no4G=S&`h}_*y?HFzB2WQ$3sS1}I$ImmE?gcn
z;a3g4>L2Xu6#JaAd%u4-KE2pcHn+s0+t>9@p>WJC7LB$0FJTAhRU*(zDvE%P1W6mH
z=O1_py9r%J$FMg~l}p#YO@7GVYdyq%qpgeal%(Rfk%ku^%3SHZEFt4rFl^XmauFbY
zMNP@wsKw@Psuk$sM3(#HV`dTc-ZJmCRE!SaoAs?+U!C7fMn|aTp>2^zN$vzY+<N;l
z!tAm`<I&n;?eMf~n@?s8C2bp`mN7~9&#ihDl+f;t#On2P*W3pp--iFHD@}v-#R#B*
zrEo-tc$tv9l+m%ptg{RcQ=!T%+f{lU)M;JyTD?Wx7@<&n3neHkC=<^Bt_r`7A_$pm
zyyasq9E?`B5PQn>RYL5Bl<x^-=ba0O^v5RI!5ivA4YEkq%Z=%}UbEiNW>41S)LoHp
z6*D}a3w34gomP_C2HfnkL|_39wP6-XM}ZN($^&2SM^`P983n-V)^#XFtzyC)hHw;U
z|LC|nNm8iCgFw!+u(xOeKQQLsNuxTlQTFJ4E@iMfWCBhKLzDs<&Xp;J%sm;vsO?5h
z2mB*8I1dUPu+q1gpxBR#(;k!v2nhQu-36xn@yT^@wg7qjQ3s^%AC3hytA2Mrnf*R>
zfq@HQnYpQmKO749ntwPUvT9hfI??7o7ypl1UV%_Sp<ltKFX{wrJ!nzHafD%9Eqffj
zgJj-xGDvP`hc|O<7sI>Cuk6kM5QzwhbrzotbS<H*gh9LsInP~S3j^usx)-x;43U{H
zkfgU~>G_39++5bLS7$<Njy0t)dsh(6rA$u?2XHEA=_T8iNM;*(Iz_!FddTA3GCT9b
zE|Hwx_$Pp?<EQ%K|4EX=|AlaRk+B~hk!G(_{KE3BqoYeaP~cqW^URB@R;r}Faa3F6
zK(P22C&;22k1SYUNm4riL^-$s9eIy~T`(12itBdjN+O9}uFYJf5%3AF;BALQ2rpN7
zGtPC0H+$7RKDiTjS$REA;a%JrfxJuBhdDRmiB6PBlk=`1wcXvxt$cd9$IM<IV#@Ad
zw3fdyl<dY#!{yK8ta;)g&qxlp?u=+5>(?LLw1!*LmzEg@i;JUqyA*49AI><@OHkeH
z;#sY-IO-?=@D*h<5X|n_Yp7Ydl_ERxqn1OpMcHs>m-|_LUJnG~svB!+H1G~d)aw&1
zy#zHP6XpHl!F1k4CW6Z1_3;yJJPG9o$rH<@EA|4Gx?3zKgIuU+Xo6wG#p&?6zH4v+
zizN{fI~0R=R}l_$2y2tAzB4i7)>EI!243QMNzvrsFYi~Mtd6LIaV#ye)39vutw&tb
zr|9y-bMIf#-hR>$kXf$1zk;opih;e5Kk~zn5Zl=z2<vZjJLGR>p`$CZ8&mM()cf?C
zoX{K(2tgQ7mOVYhcz^j9s)i_FwHWPNsk`o8LG>y@vk=%d$n+(OziTexJe1^~NVrS4
z@$45W<=R1jSE0xu3Tq60Y{>zaMlX*+t0GC;lW?ppoG=glc}KL46PS*Fp#t-<Gpge`
z+#a&`p&Yf;iNb++?N2)*(}#ft@e36&Ax3}+aShoE{nlp*!W%y>&F6RflEw29UtFhL
ztK2IDjq{ZJ5dP^2=o?|ozfg6ppCM8XLy6+^B^Chy`(?(u03&*7l1a6mi03!{cGXZ1
z_)=2K;ZaAqT!@P3ItAJU^)j=KxQEcQku9LuK8049kk~AiuYIF1BZnj^+C1Lhn<?w>
zKJIh);LJj+N_cR^R2EFZ=Cn#^C4q#l{Z3+wTmsO^*cI$OD9;xTQeQyPW#fxsFJ-!(
zxF3&$;+HH1TGDnz+Cgn5X!H);5z`tpcV7_pyODQg>W$d<HH)9m+AOdg-8^#{wl#x=
z$HJ?=x5DFxbwI2T+IEj8e)VZ$y+*D>i{X7{@59=95P1jK8k5&pA3Ql_Dd@uT7K@+n
z+or2L-@|1rtX!`)0-DWUzppk)_Y0EKjY&*yCd);@lAy&BX0e>c4Y=-&>$r<Vorb|j
z`zp`Q#a*R-cj@?bH~Af%C{-?QFV`{xfYu*W#1(f)Fn3_P@k?0o$p<p_?Mw~>^P~)e
z?riZ_;*A&V>-^bLJ8evIj2|rH1o`x**I?;b$9NJH#53xih+|JxJ6AYOzHH)ssbK7s
z9haOY!YCDSJ>q&B70ycc)}Iz-oA9n6qR@~QUQ@LfBz2rh#<DA{B=Ax3xQ9(;jP||s
zsYIo`YB$<VI(b@rdGDXdTyhN)IoT@pS@da2lixi{dl*_;aO;>RE%VS=TG%pTk9`NV
z|E(Ux!dxxJ{+Ec{f4J-aan=8~x$1xBzW=oD&s=YOD4G+POi$t4fvBnsa-`29S=oHT
zsD)*@kqE*sCddCRZ1s=M^bED_53Q0T$JH(F-yL25tB~r?D?i&`{sF-MnZFnY=>(RX
z_!@fm3!D<{>h+4G57dtqUIMwKRAB2J%A$EO1YbP}EbPe|_&!ycOV7_g`?C%T1&!_$
z7I-uv9|nJe8~|oy;g79>Q~8W%9hQofAjOz_kiFsi%;i<J0h!@!23o&{Re$|Hq%C+k
zUaXY)7|VM-Dw+~ue8pSTj0t{{kbxt1OHzPS_iC*#z*kDzygs^Y_Lk@6qtnQKxYjeh
zMDOp9NU!2H9mDWF<N}g6QJbU(t%PpWdfg&kPJ~47K6(^oEI%;4^zFKm+w#C!K5rom
z```)64*4<MVJi|{Ef;KqMTEL_)_aAg8jclQJL~b4>Z3OOz!<lneA`Uec&yaj<vZp1
z+h@-!qwqJ#;S_F4|3oDrg(MNv9OU)7yf(XFe)Zj4({#RLuVo6uH=%FoRGUyw1NDHh
zFI{JbHJ+)n8T|;T{lr@P4}Mc;lzqN07wr@KhNjc_oXdTCk^?N(_0fcreSFi){LoU!
zGj_rjE)}l^h-FR@%D3?G<fyw0kKC5tXB9^vUBg{%d=)jhh3DGlIv3yQzhPyK&#iuF
z%#Ou9FFPR}_=PAynMOcBG*+KZW+AO!3x`bO$+6JzeV>#X{%uL*kj_1K8oM)ECRAzS
z_zTF>lqpnIVr&a9p%#7{IxUBcUtv0=@n*bcU1raHWnCI~QN={8k$73Y^MwvI`UGW<
zoQQG(DF);OoH6z7ee<(97;&+}mf9lsp4!HnNw`nxp_Ux+Z(>p#&0CPQ+}AV2;tF}8
zX2a_2$tV6<&<Xrche4B=^C_1-Vsi2PKSo0FzcJj<IstYVUQ97L(hab~=$LFCyk|f#
zkeUM{-Ox{BVW9K>3jX}VcSRSJ7L?{Y2KocKEK$_X@J&SZIH6-Hckk*}B7EZ@gpwr&
z;~{d=;Im>x%MCy`R{fQ9jXB`DKH^HyxhGCE*?`;ospUP8<ogTf)f+fq!TPFld=52r
z{sQ_#wMpQ(q&Pc*3ow0uifJI9zp=XJqG4!z>6XEpHzj<0xQWbf0oUmM8}()Wk~6C4
z2UZ|{Zxngh8rA-oAmyZtz<C(KJeUgT-33PrvUwTbWlHLKG2S`q&q%O@1qU4_>LDj{
z=~4C`M~ezq)<^G_S$Fjpf1Y@7VRM?Bx-K;;Z|>Yk3!4|GcVO{$(PV3G?G}kGWgo&y
zG9MEr*kipk&D^A4-)SpPK58(+IWt<h>8T<__c5Bjcd`cg9`#CmXNEu^6RoY_aTGxp
z!`RMO%}JKCW(ps!k`0|rf}g{^InF(MRfsZbx&s^Qu>b!3MNn5|^fHPTyu@MX>aJ*V
zA{MP0({6T6yq|<_K{vGxZk2Pn3n@R~QitDF4=7|aDdQ?AjRjWy^IylDfB4-X5w*zB
zB0#dlZnUrxQlFaph3RBa%q)vA@npi|n=2?y4P*Xz-x@>b3tZ6HCuc#&cN&~p?=(Sh
zpluP%^Xok4JtV((a3attTEvRfPG6V&@K%d(=$&f&mz$uvRfKYc4m*fXwZ}R}2?{U}
zVm*kBH39YQbML}fc;$i$-yQ#W>VVOw2&y+h(TD@ciQHEnA-_;L$$0P*&YF_=ai@mU
zon%a@6O>AX+SKj|b-2?qRZDEKCbm$SZ{rkDGsWLoMRSXGOja^B>WTLV>#f!opjBI&
z)yzXA6NetR;`K>+)AF`GTZ<C9cMA@1U!UUJIl>+G*?8IJh}W6YAY%KoD9WFboc)On
z^Dj$v{|m4CPm}0xw*Q|(!~%#8ssS1%2^9et`NY8xwXey3|ARjifWUx$7l>|=W6j}|
z<hRz1sy|4n|5f(nze6e30b%hghfD=fju>GeP?CnWI0Z_wu1+c{IM$@<&ciM2#VPWg
z(||o$Ln}s=sCUC;qbnLGwLBOt-52EAK{-9L3u7pRu;IVI>f<fpK4@kdW_3d#o$b5Q
zeOL~NeWTk^JG>yG)cOcbcmZFQz~+(<ZJ@BHBq(R<j(>h$?n_|-13w5oxr3bS0b2ld
zdH(yosQ$Cr&&~LMMT5M=p(saa0KsrK@ic16R~Jy4zAZ!6uc3|sal)zol68Soqhey4
zV>PLR0kj5!357Xxq5`Xvn7Yc90EyXR$`J|dJ}TVhd&?n$%Fv?a+B2?8V(BJOXUWq-
zUv(tkGOBLRnsxV$@(k*5G7T=A0i1Ojzs)E8vt{<bhygGF-tc$iWb}r89k{;o++Rfk
ze!u?G8Ig@3HUqVvtdZZ7&sn7X#7-wY#BU77=Hq};?hS}C3>|be*8AP>+e4>$fxL_O
z4@0?t?vdYb3Fs_yf2OB<IrkozEvmo0FZMm~;vM8Azr3p-szO^yxQ^{I6DDNhAY_g<
z8^clanJWuA1HoP=lLyo2f@U9i3qU^0#ReKZBk;D*pyNooUUm2NBAsP6=lVzFZ7zNd
zBGAz^V$jTBqGEJK*P=GLS0rWwFmBbn47}ad*bk*>$%|{**PRq-&0CEw9c3IC>?kP{
z9q~?yIdkUIG+-2w+bAqVzDF?H)sG9ttaS727k1VA6(~T)-Rw|%AC79hBW^YBAETSz
zF+k|(9XYMe3+yE0zc-ry!}W;h+!|CUNtZ}N8QM45^uW4L4~8h~-o<+`f5xxe5*^JM
z=?<1H@LuXU{DrSCvB^k(GshFoOo0+yF%=;)gOSK{^+e;uirEWg?LIcVX+9TnxaclT
z2VZ!Z%c^!Gz9~9?n8dK+!9>!+-iu#Zq+1ZD#8#NU^>qb2scc^5sy9hu=(`WMOX-!v
ztgMhA{Lv;yLSZ~YaybJ%i1egu4fCD_mX-)Vr13#xdz$`CLAspOcH_14vn-!d5w%9&
z;w?06hk&*J;J1(S&z6xeaBLp<ss^D1lxWPN$SmW3%h_rq?lA+Yy{UTufqA(WIb>oF
z4QKreQRL4mM1L0y1Yn`0n^=8Pk#Bb{{y-$4eC1##UO>dDfYJKl0O1>@^%~H&$!o8y
zsJsJr2HD^bKM0L~^;f9brFV<3>))sPxC6*s<>)BSl7Ux<FW+r4IarM9x!5pBrV#?B
zc)!&*N4~LlHAcwXl^d(;CeR|(;HMj_TZWR!YmhST{Sob+=lDd#&=B2!r;JTSIG`~f
zoZ0Wjn-koV{Q`Q?gfL9d#Qw=--uI4Q-^^9UrSe@4eu0|My0|l^N|%D1|Ane<pc31T
zpT+x;bMxF>F^kc4s!UbU**ZEQ4^H?q-94@luTPI|M#@Iy;>F3)I~CL0ZIlzFi^+kP
zaQc=e>3E8<6gg@zJw;UbcKSWxy-{f9fU1)B>nG#~=v(m?%i(38x;^5vclBc^=Orx%
zPfmYx9K|#$%`?csoiY?Mo1T-WmSnd^c<`@GRS-yvYz<jpq`nah{ik)(f2_TRWT0F;
zru6}pllD^nGrz3!H!DnGb!jiVf(M;pC<dQzC%ofvsbOO`gKy$8Ho}Oe1mY51r!+YZ
z?b@STHlKPD9a%dYDbALBe%6CARaUO;)OnBdA0S_Ixv1e5>#Mml-|^S58JoJ;-5u99
z{>Ye`0)HR3s}=o>owqtp^VP5<(Initvt)nA@BAaZyZ!t<58Z=+YA=`(+LY0*5SF#?
z^|@7=z&udw!<dAUW&;%5dn6ZUW)ibR?7n{tKeV)`a$Ek6-h>1BRt0^})8Cpz|B-q5
z?V$00gv^Tsp%H7hoY9Rqth1CGlf}sMHSNSg>{(+|;V#;w-UrHh)LpAKo=IMZ>Av0n
zg^EHyNR*tn>hpBOAlRqho;Z2a;<d;PydmDca#xr*xgO7U^wTOQNg)jH0m4SF-{umS
zZ~hev@CfbzEN9+kGmwzvfRe%y6*UGnje7?{=-h^!4aOpOP+3Q73exx>B?g&BQzEuF
zP)q7yoI7E%z6qp&k!q-p-yZ~IB7b)#qkW<}2SaWWNJIX75Q9IS2-z|Wl!O0sx&ISe
zR~;BIZQLjazq58(>u&{MjnVX{r0&{2!%d}C(b2WgtL*Nd$2u<Qw^x7-+w<EN5AV?X
z=+NC1-Y6`1^6}dFkNQ&S{=EE>ciym3zrFu~?O;0?sMUVso&H06=Ert_1WNr6K~K@G
zW{}vkF)d{`v5W2K>WVP#LLB5RlZI$PgjA%flK(L=OD|<&M%2+xp>q0DEi-^tpjeY2
zgw5`NGSsLR@gM3xk)qD^?W@J7hDTV1bYn-7>$g)wxidh+6kY0)!QD8d!emx+F0A+P
zhGts*boxF<`;Y>8u@>1iS($D){C?s`)?=69zcRDGEqM9Ymf@g8WvNKu*hC8#;q`L;
zW-^u{Az_Y<cy{o~Q<D%emyZ_LPtnbZ^haFuPD?g<S}_ehK<L&0B#LDW-B0*a`$tHZ
z>^xAnr@6Pw?N~(RKex)#Z+O94L1bnz;v67~;a>5GsM0u@Z9eK?aZTgm0!Zx|10m6Y
zDAbX?Fv{VRxQiZu3srn6{>sHk3ia4mX>7&!w}svNSB1YX7o?nns6P5)&2dvF-Ph{s
z$HXxg(Yyx6AN}jVKlnfZZc0d#6t&0>-%tXBXP!CQ2?l&J`e6{pmwT&EJUR;o$#fw7
zu#u0=bNqoge0D;eeJ^(fm6%(r9|d<=<e?0rX@#&$VI8uvO`)<CY;AFp7SlDEDRK%P
zvg;ps>G~EEuG=6V86dJ1zi;vqtx2?gz$RBGsSMwhpjH!q^X6mOa$|0P-M8r*SBLwh
zs+s!h56M4l;q;r!e)A1qMEFz&xHxkpkmmzs0xYIyiHbT3XYKrtr!Y+CQ{xZCH^y&E
zYAD5=r5nFt@<}%OC8|wZW|O1Csv1FEz!TqWa=tY5v%GeaVgyxk5=36u@lB8gwr*?1
zEwZCugSZkkq8}q((2#PXU@u~5w&05007-VUtv<kxje7H;NiU)@x66Yo(4TPH1IN(K
zaJ#AC`=s#)H=*;FSy<Tbln!{$rF6BF6;Z1rZ;)gOpNQUpUT<)2-#fShp+{4f=N;B{
z-jYnt_dH54;kDY9W$%&y*pg3B$*l|U8<4`iA&Itdnw~r>omdl8Qq(0x^Zwwy)elWR
z>r*d7c5~P<(3(g<#cK$sZ8pQ5Vb^>!vUzV>DE{4KJ>x5jua+gZSemMX)uwowNAa43
zPb-PBGa|9d7jU-ivsZAFA-j&FySx_mPLsWN=V@Q_KMMYE>zbEQs5{*ghLGKH_HXr@
zN<=khM0H|^Ca!Qib*(99w|&a_o;JX0Y}sr29Mu$*q1QPkHLNKH`{;^bMzvkttSQE&
z4MsLY@9`Z*z48{cI^$+&z;*SQ+<Oh4<%HD>8Z?|VJ25fa(4!!MdG<BdT|a7OFuWYp
zlb_1*_Sx4fxYR>CB$QNst>l(%Bkcp*0+ttgbgherC}+W-s!CDtJJN@d3thopy{}73
z)#Q4e@bR=aZG>F!OL-cv$KHSi&m^zqHi0L%R(t%e9Qo;DN0|Pxr&U(01K$|mw%Cbe
zov#sHp)2_u5F~Q-FI^tZ703$6&vS5}$icw19#s!V4w|e$LQx;2T9iRfhz-8)^s3Z#
z!xAzB-j$yN1L~W)7lKajwEO>!SSQ4DmU8lk@A9#HhYTb*8wg>ZAkg25|Ai_YI4?JV
z>k?>N(lhzw%*xK&>!~Fr+}hDh$r^Zk!5!~zbI1NoRp>AOZSr06@E9nIGJvuzQ(ogF
zx+dYbh6CZZC*_VFGOG;N8uIn$dvW~h`-4acWH<U~;B}%6pyq?SLlW+V?DV<jLn5T{
zO3yTFZJ+lRc(WI)!@CuH;gXLxkn;=YadewEiC6se@d5-lzv0jO>g@=>tTmx+;TPG9
zo^-AZro1QgnUq5y@AJ@R%Dl@^2!XCWOFy(3=2WMs5$(oaSSl*7m1mW&QhFa~H-Z|X
zic8dyA2Xz;dDQsD*$k|gpZ>;K{MBjvSHG$EKyCR!2swLq-o3p8UqFx?`GRIv5!h(T
z+!#AtUb>m>mt+$-GV9f$Fwe27&@LYIhIkw~(j>B}jkUW<2ytMUGzWR0OB1)#LS!e0
zsnIkQ8KQz(Jrs^MrL@Y8_b=8ATJ3L!LXL+aqKyXu&?y&>3B~YD2l5bF?rAlb)uv{i
z-wLyr4v8#mF7R^get!p#91eW<P=E1^XkK&m|Bw0mAp<BbLQ3)gajvOe_)ZK4ZMTl=
zpfG@UO_&!9|9HtnvS33&SK{OkncR03`5^z9gQmoqFO>d%MG+&I#b|$P&i}j1j%y%c
zdl*0>%2`mKPt`P^00VtdJjmDHL4B??YeJJkJy7cpa`z9HNzkL-mE@x)A4B)W48&ui
zhr!^VsrIv4()!rTzCb2$h&lM~#0n52yI^_ml%|zB+x5RIa|`<>ML9AF_zwa=jCacM
zDhPVbps%j~$=V3%pb8=!s14B(z|>?rkqBtGJWa|?k|~4W9@HqjVE<asSYP;z)*t?h
zx*UH^y9w~(zTx8t27riM*>4b2^b#*B;thH$>?1&7<)1trdAyFA_fb-8a$|%BN-CSe
zz+Q-W_g#R4aK3yk%h^__LGDU8*0Z;p_66kmQP<cy?7eKF9YDthI_HxN^*1Hx14V0m
zYDWr{%09??FbXOh!y4*DTRTGE)*4hbn|zhY$tlWpWr}}r*v-xT8Q?{ukBT|RZlp>@
zJw$CjTG;;_$g@{d1c?41XaltZVe<~qEgqse7Cqpf!`9IRF4P_i+EMf+UcmMfoC}mh
zZPn-Qr~&pVMmQ;2lXL<9jS{8u8n`^bk4c8%y<#pzKJw0uLt($q4(=b{-=Z9XQZDEJ
z{sd`61$BXr_X>kua6-W5C-Si%Ux1*7fQJjb7)Vevunr*QzyboU)#?@U<GjS1ANfA5
zK!0~SQGj4-SwIcbO`@=%oOSyoa=<|I)wiSG0M+_W?<jxDKT?4Joc!OP4*t_EBB~!6
zY$$g;CfWV|WYC{(v0}Pc6fbad0C^q&qJP_XiNCu+lZ_8-0g!Y5H#X*5(3nw`l`wA%
zG&JjLPH~CAZIHlcdL^)Qq1i>h(sQ?B1Umf*|C9hH>Jc0e%p~z~bN}1;*m;#Nv~u{X
zdo!nE#m-GBS#c*V1$Tzhy&k0Hyr%8u&C}1=YcSku4+cnjWq>o{0p`!MrA-)K4HQ14
zkYG4SyHkcb78KNG+Klcpotg5#*S1xzUeH^am}a+t9e6>-Aei&?ELF_I(sJpq$aOHb
zB>kEY|ChdhXwqE-J#ia;%Y(@6hTd5(p4<rfleI&=(L^Y4oqmE{8AGGNz82%C^$`@A
zYU0C9LM}!9jS1zA2pEo-r0I;FqTVrp*i(fQYKPi<JoV?F+0sWeh{a!Y{-V83{ZRmf
z6Owhn*5!_Xt3+nPL}UrE$5ykr{}Ht#WM%L2T?%*||8;kcYcn}Tv4sn+ieVeIuOf7!
zEk@)YzJ|8an6odTYgK9}0^}@}GwaN<({9ZYrCfsr@s5JPerbsU^mS;0@Idxmj2j&D
zshi(Q&P70cGMz4I`%)LrR~raX_g=yNlF0@;(hFEru$@}~O?b=d)E+0D$+!%xNTtvo
zK=?&fD~4a}EAzQsIrSOLC13s)uJwa#!f>;?<R>be^M3g4_iJ*7@6(NCXG2Tty<t^!
zw^xtfaJYSI$>x6eY2os=>c!_3gHhtBrTh$@n>fF*Kq2<d7u#opaax5&RYrmR&06wQ
z(nQbr{?{^iIH>7l_8Wa`eRy=YB1(l<g>}+_v0l+X=d7$G%j>WrjmfSL+`<Q_6`sfI
z?65cji-7&<U)>hi>f&sw*@W^1qBvpFbV#Cj)MFE5MFU^QI-7H+BJ*#T)ec2)8>+Rf
zvMAmdFxHK(UOxjHRAIKXv<?)(8qLp@DS!K%<=P`u>FDd~)F*X*HN{_ew}OXUNfGz!
zSSoI_$mJvOXxWYnzNrl{_j}M&<#3|KP*d69bfJI)(--fx7}yf8ID~RUJjn6jhP4>>
zoKDn)IurYZs%P?J^Sd8gDhYt_*S5P2H%oGA8NCZ-i7u2lo5JsQRhY6tE$mshq)4jC
zSK6T+)Xj@I)AiSx+M1cD>L;=SNl-ptWJ=<|JwP5E-@Hp-{KWrSRo`v)w9yBF$qnZW
zUt0!qg}gJoXL2au4LkPJCb5E$-Hxg@>ZgR4S~>5>EKXhiZk+O_rksI)u2YRVs>c%3
zQcFMyr}{xUysy5U-<g$luuIVnKP>#DV622Vm)BDFL)EbK=C8p9MAG7`)1c1Ug6NAJ
zTBOY<_b22zjJxG2Q>A~uxpeKqWx2&D_Wf^&r27bxA%OmxiKau7{OT{eEqT7~8#zq5
z7d)Ar**&NKKJzwm4m>l=qwwEgTK?=6hQ@%y{2?KM0ZHyZz;dQFmoE?A0i|UZFQR$x
zLgE&V62N4CP>4{0S>y%$IP;%njYY-Evexuvb*#=9Jk-0m8C?~0M`>~+4QUrB4pI{Z
zBrr`?8Us7y^5=0$ng;|>`=Wn|XhBf?hQCLCtbF<fA<_UxTUSzou$ti#m!1-AG`n;~
z+cq*#l{4wgbNFe;R)I>QyPrWI9q|yV{b*HmMn~Pup~u44R6U)=j(w#{0b%+ck%dO|
z%u)X9cztsI&frYGYNE<vTt#v|p>9Tg;X2=gII9<D#|#an&v>UE_Z-YpzmFoZS$~}Q
zhQEws|M0j(nRU+DOH=qZH$Tr;!O~Ls<6%jf=2orCBwnH=$>kL`BGS%|C{M`SpIPXd
zkuPV@y!<X%)a(HH(b75YE<LG<malaa9wc>auf#EztXoauUyBRG$MCF=`<=xmhzp11
z5l>`K@`k+o4C_&*CSFDk;*qr2`J7Lki;QZ@JZ_J@G*8vuDo=Rz)ZSil^_T|z1=zS7
z$!%#+fFK3lI;M&*6v0;!uqc5Qy{4G2?3uQ6c8=khm%{B&Y7fv~pnstn2<!veUXUC$
ze+AsBaxg|cD?^4(e#juPcCU!ELk}YR@#dUy&7RMk6jNMGMtCjAdJ_|O5|&<aeMq*T
ztL&E9Ke-HzfipIvygeP4;GE6^!cT^2Q#FegXBU*Ux~X$+`kpzw#CmZG@(wz!zmnUD
z2=^puj|B2!Gt3BKZvL3pd#kTUO-t{<dt0vCo>sX{HLpo)vPRd6jz!86D(qj*`;9Jl
zm0-7Z6}3{hc4fufAB4n8`zNTdI?;uVnRK9Dq279gFHn!Poc;nT9TMV~`(7t<A^O-9
z)1P@b3U$|V-Y*y^C|BdA;ifhDoDo5h4&=wBmvMwFL-;={ek_r9NEqq9QQMf)e&D^a
ze1XFWRpF&O3erhXC~~~M2ar8{YO9zopT=ZOSm{t$f^3ybJl%?6kMi%{xmNJ>>XDqz
zWxY8nU2$6t0x(bx9vPL<&=<Kx82$g)d(W_@wzXY2h=6oa=_M#AAXTJ@B2kehBBDa1
zM(IeCj+6vJdJ_>46e3Mjn)D(Ndhflcbfl9|LVzTG!?pK*ueFwIpR><<-hF-V_nq^D
zD`5bWnK{QC<9Y7qe(pQ%wFb|%rduDf{MZ+IXzZL0-80)A1G!FoKpUBx2*tLnrQ=#=
zHku~+wBDnctj8mipuOYWyKM}mH+-;seD6^nXZ_UA+;LX#GDi+W3t9rmM|@<;2(wtE
zFZ=*iTNxLhrnkOnSS%&hPA}YEc_OyMdAZ?8Zc1l%CvzFbwg+IBaMD{cm%(BU>)?O}
zPBORzSei)NIG?f&(HZD??sp@I`FWj&o=&qg^HYKgeio-)|5dN07Ewy`bi;@DWBbY>
z+jytY?d&&?Eic`l@^BRF@PBfThxx6d{(-TNI9C3d5|psgn1*wegsPngMcF!@Zb}a8
zIO_AIlhytC>T%1-^Cv^EmK12td|#tF5M7?6D@jz;pCQp8`Zny=-SwN?Y~7rkap{mx
zS0>tCO4Xmc#`;N1j;7c%8_}nD7aMMjV-MPx8r<$uR;`Nvw(w0QUPR}uk96gWBfztI
zbOi+dS=f~+zF;6^;d5-H$~J4F%WLVwXvNVR=H>1dw@)H(Cuc1zi;kflGSWAC39Ty6
z=$8=Y!x(jTUUyx#Fvcv}TgL=l9WlG{LQ0U~yFkQoTRbJ959?8fDPj+HWo9RAT<zUa
zmvEzg(SLBSq%@iGt9rYEg1H{W)5GS@3MVM=S#^@3CSrulX-&*g&z9Qp;D&<<?=kT^
z4LYU+n#a_*Lr2cFY@WZwKl_o-_i|V{l-5U&U_84rRy_F=P{i?mtwP#;TB;1Jo(tu>
zwqYJP8rViDXeLOv@$t9LYf)Tq9c(jkk`FaX2PMhehp+A+r3gagyO(Br4k(m=K;DJZ
zHP<xNlHP$8_T_)<2w-P8d~aYlS?luR--w7`p!-2;zzA&o^YD@p8{qMrrkjOrGk`Qq
z4OoG_artQmwvURu8=Gr{O#A_XCEty$R&Q5(uT*Tcq9Jr9ef%;?8Q>oO9z#x?<hcB-
zL>!iKDnE=Y_UJWlv8J5dr;6J0>X#vFsmn}!dbdaCBn(Tu?U5iQsk^19@{wpoXv4<h
z&ta1r1e3n58l-$|br#UqPYry<yf;ht<z~A}opMy74dS{?R05ec0PCSU;6s~7?&rHK
z=`phr#(vjGym4{`*|bnpgjRM@TH=n<H9Lj5YYT0tq>h_5ro4%*351jkxO!XLtvy71
z=3!eDh@>g}_|%Yfd(cK$MTHyekOqGL8;A!4-ij!rsXc<RcJ$-=9+V^A_)Va1a5?Eb
zMS{G0yGGzGS`yJBy+x}tgVcvG>AQPDM2L;18I9FKjMHgHp^JiEXW1rDJ02&nB&ZC^
zziAWHk=z$toBaOlu&IU^uHv%oHXnu_$th2ih7wOq-ou`?9H>KaJ=_w%o1x+2z9z=4
zb~AHa^AJ;;@N5jCQg?GJAl%9Fm;v}$f9r7kzeooJgw+4>_1^*MfQCa0Mu-_0{Bw|=
zcuej;F3TU=CqDQq8);+aXLo;+o(KKX{y=n|_Ys{%Qxg_0?NxE-j;%N?c}QBh4!a1}
zbBACJ<b`i>m^)qJ)%~9Yf4>EU|0|Mg|L3|&?^)r$GNH^-;YtVuL2Uyj{)Qa4BkO6k
zb~z0e`m>%LX27FaY1qSHOUtnnyXp*j%vuVr^lie3$FHZ7!mAD)fA4GPF>-KgW#6=B
zBfo2da9Xc4DStIKYgE*Hal9tex}moyw;Lw%G{2u~-{?H)i!URf8L^iCc8`2k4s?8|
z0`TF4yG~PAePp^+#}`CNO4AK88g6uz6lXsiPHz!N6*%odL)YYvPRp(*U-Ws>t3<Js
zBv(C?ETB`<FU#FjT3J>2G&5K`^poOB5HIz@?d)B2a)ki(S;M$3JD~-ixJ=|QilYXS
zTNVVfMNt})l1`Ok39r>@o$TF;SC8FD^>=tIpPUmNz4w5b6%Pv`%Mh-0Oufs+Vd{6C
zY{z-ylU;o61;wA0TT=>0i~DDF8b=&qU{aMnXM&hTj065oIl=b>;t}#(|4#MA&0$x2
zB@xZ9Mv=k0CRg|#v3Czy>O!_+BBEcSEMU)+q;O@c=Vzm7j53}_b7-NI0;u{p=uU;V
z+ILb9zL*i{?xm3y-pi(vL!b3HL!z5xs5wG_<NdJ7m?q3Pe8yssEPxZ#cCoQ=%vQE#
zi)Cmc=AV(1IQnYr{+X}YFF5bV=X;T&YEF8lAWjf937j+eN6W$lu&hKi)j@~T$G;lc
zB%IL}(-79(QvCFOqD@GemB#d)k~itK%;acUBa~?slyp$ay^5D+H<(rNUez@P{DpCI
zrD8RYZIWc2aqm4Q+Dlh){>TLaO**;jHYP+CI=0L!$+ndnlqvVBv#&VjeH``7jn;CX
zoX`r|FO<hPXw!L<PG|>WEVYBa$q`7*k*kVUILiA)SUtdYx(dJfsqzJPcy=g*eQ2KM
zG4V9I%#%8it37f24$EW)g7#ffBF1i!Ol8gWm7G2`$!4wn-f+UC>-!Ull!tH%_p^n*
ziL(ssFZo$|qd+s_5<m|D1_V3cal~vNS;T>*D1Ca86gmJ(DPEDnO~TE_4z$*ZG;6GR
zgpOIqHOjZ-b|w0eEbO;PDl{Gw>L|{sH=U*j5965FIHYv*!K`$Ug~d%MKPd-g9!*2!
z1xkPz4x&5`bm1E|i^bj^5UP~d^YYh3L5!EDSUu+-@;}h#kML<6@x{qE_9A-_WtY)S
z(OfO3qXVkkQs2GUf6?5p%v)b<W|!kMA19^SRR%jrL$fg9f?p!NRbpBB0ijS-!Oqs3
zoGJw+tz0#Wi)tC=HBt1QlKjaF<w?i-8eiY0s3<$O5;(3&bvN#Vf`gE~gKr(yimVRc
zSJW!iJJgbx+a{WLBAl!hze}!<2P${)Av85=E*dxq*BV%nU$h`V9$u%8nHzINt%`uO
zHMPVMPH^n4u)E!lvB8WRjA#~>2IK7qv(!@~*;Iz_GO=D^IA`s7*(+%u)x6)FpN;GR
zB$*IdV<j#6d)3(Lp+UUXnLV*}!2&DKn|=}tp>4*s3^8x$=4GJc0eJUd<$=_M8M&aS
z_}*+$lg1+_A^iSdSPWBSaU%gWz*Pyi@OjGDa>QsLR&D5kJll($0@^!$9TM}ctqkXP
z(k{WgrmD4{fz}DX-O1wJT3+t^gu2;C2j4Tod9I{Ow+7ac-7#WtCc%{XjANvCRs!@@
z_Sd=HQ{)zqJtoYcc^Sb8a&f#FPOXvGV4x1lRKkYX*1ztRypt0x=#F2&=_iR=U1t`2
za6D*K4!>_prkw?~0-(jXzdwlD&bu)S=I!34{BcooO%Ci$NyLL(nn_})rdhWlbm}$e
zjnKw-U{MW@RJKWh-=C6CF6}Zq+4xE&$1ezfw{PHPe;mV7)%CgbbN*%WFku?|P)R~9
zi_xmZU?i~WQf4SnZ0=t-D~ei_;54uai>K!_dfu3)Kylw8|BccmP>9&}p$3?sF$juP
zx{R{c;4_zx-zR-gf)Y;?hCY@iVpFzmaYghMV=N0R26fi2(SMG99#W@1m^Z`j99#Cp
z!sh)4i3G;z<AfK0?T)dC<|SNeL=+*So92fW^2B|vW<(kusB)&7`R}q^t6jgwakS;J
zX*sXUjy&;1wC<3M<JkJ}u3A-6^OSA;qtpJXp;%M2fb;Xmw)2w{G<Qi!t{@#JI8N`@
zdTX8D;e|Dp!acUEKM5A7FehB3DzA=UwX}Lpr7Qq_86{_06AN(KB=C_c3;{c|1AHLu
z+uITS?5cLCKTbv8Q0xX-18BLSR@p5;3E)`Gb?}#w`d@(`16MH~m!PSotQ~*n;wjE&
z52H(Wb#`QTtm-4-n!{vHPs2NX`6n^Ov05d%NgxZ8JJ<Yv>f$k4y*pVcNBB2I(_oFr
zkdlf;0E{`@{zq60pHmAjMH9pv8?HjXah;f`!Hgv(Z0C=75d#34Ow`3-^=UH$c0;Eb
zS;|;ijrY}+_hi2*m3&{&-bG1U6dX8#{>LEG|5;V+KgXDz7E|K%q|wCTGjVF#ed1F%
zlLCj*GOw_J?_GyN{8Sa1AwlN`2r3?~w`t^OHfRWDE#G*nu+b6Qt+J8ax9Nb@D1D8{
zp$jq2mvx8k^S$*`J5@o_oR)0xHLGRu5gSs3*}F|&5C{_9UZSwp5g%2&QK3LV7va);
zZXj&?hA`y<j9>-@f*GMTaX^U<6vkLhs;49aG0aHzMR_H5EOmf=nvHyHb>nca$mU%9
z_4{M5&MBN`xpzRf@gC?ow*bFbv!4PVee(U7jbPAZk=>`nOQa{LVdL=|I$-A5UOu!y
zmJNF%9``in#5$d`He}1BK!uHRp<`q80B<pv?;y*)5$t6&B7aA*u(fs1!&(0o@vDYH
zZayu!69%cG&sCd|q#>MX(HNYGx*tFD<>Cd6S?dg=^~KgkD6Yx7>%@H~$yyXs6*CqC
zu={6L)8t6UR2xh9n~4dDLFrbYO5Y~vN9_yA&1WIFT*J8s`c6k5A8q<zWAH|cs;hXJ
zPr=N|{|GE~nOPudJk9Sy^`+^ar>+<(HSElUatVsQiN(*Rs98e%rdU&irGiJ$&6E>!
zOaXqPt>PH7$+55jeXbo$L*!|RVf9RC<(bo-PQ^Kbdsm#cAmxrT)HShb;S^|z*ms(l
zO3#K1A}>$NHGjP)hSV@lxv%5L?#RJQ<@5d5Yq-=dOxdc$N2DYuV9~neBT7Kot>;S3
zXTOOK@lO_#EEc|5Vpq9eLK{|0Fn?!8|H4AZA6x!~WgY1o{`5c#%x2-gq2Yfc^88n8
zOa6Pjx``5`sQctTSQ)(8aoloC{-RazoUgpPOMBp}*M?TwPF4;Rej5_nCMaFZBkd4I
zY#H8_5I);#zUkmv&9=4L^Tu@9x>N2uY-g;$1^~bQa8C-wlLu$VJj3pn8WCFVR1e-q
zSI=Lc=|8#MNELiAwqg8=!{wfdN*y%ARY%2_;{88XlfS=GXx|=mC7tQlQodIi);%od
z7;kld*0@o);~d2NIn8A)rEYwWPD#|RcFLvfCqZ-ZXP6=`>%M%h)?Z^xD#OeT5mBH(
z4w^sq;f$aIiza)Sj}yBQhJ+A;EMjkCRR-DPb1R-E+!{W`{<yHYajnj@r+AI}%0*&N
z><638SYJEem5;U;m4Y40yuNO8`7qV7X@=erl(>3QS%LDx;n~rl4)1X94$r)`)B01%
zy0bAH(if%L3qMWyD;#zMR*nSW?y~oUngeZu{=2rbHb~CC4aK`Th0-4nx!7chp6gM1
zVj(gZO2Ol&Hi;_25Wz0kAbG$hgCLy>3mL3>bM>;Fm;HKm>QY(!zI*!n34^aDM?Wcx
zwZ6EaF@8w&==~RurI;1<a*;B~-b&exmgta4?ixXB4;tuU*9m&@(=oa)jmN(D=Iwn&
zC|Fm0es)kUz<I$4I+Pz+qfkspz|J-xl?rDryVujFeWtUE>3tGf94}xykHqmjZhY_}
z*~GF}gx9#VDE-@{kxjfuul5rgawW&SUcBUP?=d)z)p=&-TlY7^&lP${`1PI$y3>V(
zNq2U7LYOozi{gY@c1=XF>}yYOnjqr-^kQC#=aGR-QtlgH2bpBmQNyohLkaWEs_aT2
zYJwxOQ0zp_bV3K-2sas41$$g(;GR9rYcNC0GE}D_cKX7Xh>n=vsW(SeZLXDU%+aY;
zPAp&a{TjCHUZ$gExomVZDwpNzP(VP0_d?>k<w}#HN0P4It&X{EXHWa9#7SR_Y*%m1
ze}OTF?VU%_0_p*vEdeB~$He~cD7bS4K*85G$YK6(9(~Q1s@VPZA!l))xAN<XN3RX`
z(qVmx)9TMF4hJ&KF-#qgHZ#!On+K>b5Jrl122<<PS^_yN1*81v<;Iy{-<$77ie-lG
zUh`EQa}%MSFFDVj97J!fmfZN@_`ut)M<dgi&>i|o783#nzus>yPd|;}v%%vHf#_-P
z>OF&FEU%<&8b7Tce#fM)@Sx-1N!BB_q*Ao)N1M$jrDZXKeM)reQ7Q~s3>2J)A77D`
zS>7_ziEDLTvr-#Y+*p0RD2eQzIK6pVU$Kn5VlPB9Ivj_zBLhhBHq4s&fU}1j{W-t?
z(MOjBIG4v&(4ijjd(h+Odm;!d{PPKXWN7d=TEfoso4Q=0((oxhF9w;%U!{v3)K})e
zG_ifz{^m4-9!3OyyEH%+_L0M48tp|7jWmv1yn7qQ<nH_K^z+o+Yd2y>!?4=pv9+Q&
z7~Y)_IE9w%0*6<t^mc1@akIl%mjP}>U#6!v@tF4vJz)T^GT^Ei>eCTTz3efr(Y9Ar
z*ztsYifz^!Z{(g&e=hJNqT)^8z9CpwDPH|t9sX}~!0%(Ff6^R~4<@SVr>ZJa<W5KU
z{Ur5OH!TU9QP-&R7edbACr{-MhH>=at$uAyHQ_vvS~+S9ingfwF4HgenrQmPPIzod
z;G3%nf(&a9RGqq|FXX$t{aJ+mmsLsrmyCf<rA(`w(957gT7ewvO3sGcB##dj*8E}#
zKH6p`nx!<+LjtW(hHm|w)Gu_lLEtgNjdeZ94oxQV6?}o6ctTuJL#UB?Y##XW^{uSU
zMA&qBe_qY@v?|(UIG{iYYm^~-Z+)4vj2&f~CG!q}NI47v;Bl-A4aj@p<K*L5ax!+K
zj>{?tQ=BJ}qj=eZ7c=1|68GKOet^$>bNWPby4Wb?DBa$fi5uXN3HN;`S!xSq@`+qo
zcw+luV+{S;;bF7WfFeBVXNKrw{^BceB3H=PbfXFKx%=VrLa|+%&ALTNfwh0r37ahi
zB{R8myK&5B>dZMZ(23)C3!HrDTWnA$j#9gPNFwFJ^tIDb3g)lYu4^Q-opD#RXS$`%
zu}~AA-zpi&r$mY+dSGFC1hZ+$;EV6#JRGgg_3^<NdT7QR{nsOjgIddiZaU|kjgH-y
z_atBPf#X13zwWXR)kAM5@vIOZC-J<eo|eI_v27upr}gg92{7>AgGuOfL{`0^f4b{T
z9)%T&65e}C`CJFAgyPO13FD?3Z0Bd>AbfSNorPR9aL|E%1Dkr?-y$D`6xE^+6^L#2
z=*-DF`7}!Uh`@W<9eIFQNIp$r$5LfAyeD#Ink@BBgjl|q>1Iz^P=+Q5Znndp#I?Y?
z)l~Q)G;L6<R<^HvLkcA!%f<m?HLwjFp3@}s@ydtK`AIvs&|PlZl>vpACdp_t4Vf;t
zj41X4a(u%2j*04K!wG*4%NPOv^MbdtTUFJ0Bo${<n|B-X!zP)K;!DSDF^t5kw)taM
zW)uv@Hv1H~{aYi0uc`4%2Oy%*g&WQJ0b7LdW*BdcoTs-QPCH6bBU|HiZu{bkM^gF*
zs!vTdt@s{9`Y9o3oYT%ymgVOY*+!~3h1MF(@`=n?a_00Tt!LS_%F>}+tNytGdp$hk
zF5Pk@hxRMgSIr#fknRO{jF|h4l|k9Ofl>4}PATpKA-;Wht1cdAY|El4tXAX?L=iVB
z$6orMOL^<Y&sB4nFoMmWK?F$>ZKm>BCJ>7T7IX3J?721HIM&TZ_33YYh&gB+Z#X*<
z)bE0L!!}9hsvVBP-jNI)R|_|n=c10ZN?$I!XqZ-Anl8r1=2XYqE~nfWDVg}mpTqA+
z6Ef8Ed+p@m;+~+3HKDV<$2A73=phFd+l&nE!?yF%Z41)PCmg<Hx6Zv`^16GIi5fa_
z32%&UWUJu}o{>n}9f>ZKW(b;eUwbRt-E-1eoBNHq^M!@#@D{{zA4+Ub1D258lJvDE
zGQ;S`b2YbfXU@D(vAV)}^`Jc?U?7axM(DvI@NR?vEW8PPP7y5n9*#G&Ccy=-Wu}2$
zeQe_{X_wb=$+-Pv>w7boxNAKnaq4C<+uEydOQE2Fm6;mNIa-&~VxMKwz-7{$a5nu`
zd3mYd(^Jps)VQ9_p1nXFS38X|V;%?Bn(rLpN#+uio#<f?i!O;YFtoO}+B@T<7|vU)
z{yz0_W1z|Rif%36rCAgoL4LX{_#F-rwc`!0$J2iK@(TO+e5&JNmtJTrQmlJXBG3q|
z7CM+rIRvxvJ>SHY5VSo}ZOLe|slM^9C`dEI)BG;CSZ83}HR^sQRcg^DvJ~+u!FL8T
zk);<ri;VNmC#X&5p;=3lGYxHeKB&lk?bPWIER%XSKUS%F(7;$IrFQssiXSqOc%GDo
zr1deaDF;!sXHACzrd0df+Vf&K-~Hv46YWv0v=oU~A<t<KhvCDBpyFF2OccYZg_HSw
zv<c8D5?mdJ4s^?gmnT-i#6R}DGcynQZrLJy=-FiCdqZ1E2zl`CO=drsjbk{-M18by
zX4gq^D68Zf;?Ty*lI(9QUJZ=jZx)<={P9|88|RZRkOv0*5t(HYfG4SBC-YU#9^L?D
ztFo|~eekRyyY2mkwv^Q4!fPM%`q(ML_A0)1&-ND}S+$%ev3Iq@`yLel?x$psxJvf>
zlT`1GrDD3JHKpgKh;@Ga4DcljQ6uI!q|CTy2UwE|Ho|ii;dv-q!t0yZrq+tVDeWT8
zytHQv=DIMEFVaU^6sNryQKS3&yLU*5NZ!du^2dygx-!!>#PW>4Y&ht@dZ9gB{8NZ#
zPYxZ5!9y@VrD9AT9$fU?P$SqJuzGSgj}zJ9Y+Md;_6uqjlwXaY7iZgyV}v?Ktq#w=
zZCe!Cp<Lq<T|fijh7!Or2;&nk!CF4eS}9G~4Ha!3+1G8#plMeYpcIXKyLRWwWv8f;
z?Gh=#5CfgI2(StG0Z;}<Zb#sG2T<EM+rNv{P*H$5ahCKFvwG<rV7$%@7v^FU7<<OU
z0CW8hGKs(6&Hw8CKW}MoJMjF{kw^Z9F2p>`Nj)=rYe=j_2zjGe>qC7f1oGzNc!5xs
zz}v$=`E>ICo%nS3Fa;%ps}8OcgU2SmHe=g^(*?tHgJ&2G)BIq}u&I{`WlD@Sgf1*S
z+_~)Av5~`{x&6Bt;^8zv=c)L?4@i|Lo)5#blL;U$5UBWn@=dcFCwR=Nya<HVI!M+U
zd9mAH8})uFiGdZwZCGqoUvih%z5)v8<-Zlq@9i*N(jHp18jv--pKrH*FXK~pxZs_W
z%Q+E89{3)~etDZFOzlA4hawn(INpHWRn}5DVi99^l8%Qbm?dztA)uS|xv1O4WJKk}
z%R8fDPpl#UR|852{=jI}OlIsa$-P2_DE>0uycKtiE?XmVHJwL9&`f|skZ+&?$%@4U
zxHk1Eo=hKG^l~n`P+CwHHGXts_m$eQk8`^h7RnaW;~dx&CYe2z>ya!`%TU_IQiHRI
zn<MP>7WS;4lH8{=o~ve8bSs#>@U=$Q^Bu3miDM$xT@6L5u3FKS1?YHNCv6Hdtv1KT
zZLu=p_ms^R6rF9;07%kJNUv97#nr?J1rseY3~ZC^GR_;C)W$aIBUkL7ALVd=*t%*P
z)r+L@;leg2`lc@`(e&A^?x=UdV=|v{RD@KZB<xb9`OTi^#_>QPhpKOdDd<g`mc+%S
zytblF&brE*o10^MLaH%UAWDW?mjzA%sL>E9nnQ&?8q)}RLdQ+}<4yaQQ!28~i@vs9
z@mzl>fp&{<&X!O!ixNeKxl^{FrU6?4M11>aOaMK)PCVfUgez}S1uOKFDF610r(r`4
z??5p?8alUD?d?zc4<i)ZHEK@Wj;vPY7Ko{vsjL(c&Z~V$@Muut#s<al1x{Z+GoUi?
zP^@o!w2rs<h*7+0LF31_1%hvwv}KtRr<rLNlWNju7`asnKMl$H=lXg$+Mj^4tDG<H
zIxB2*q^)_zDQOWva4u!!qu7+}yiw2edwq8KvdO~IW0P9;Cld7P>KINYzN~uS?@}DG
zP*~-H9jS#Lmm#=fLb#|>Ft!ezxYJ)mh2mE~Yn~K1tA6<^^ljd^Fs>_4=A-Ljfk_@g
zC$Yv^&t&3yA6U;B3Ruuj?8We8L_}oB_`UFWQ>vlRcBT^Ovw~?LH=r6lE6&zo@=2df
z#}Q@EI(SOuu;^->z6V5|hLckkaQvThRyIeb6T)wn_bFaAqt41Qk!V}XOVdw{W5!lF
z(0#(6Dk6$ZOEP+LUoFLAp2>{5MxN<xeOOXv9HVL(lj-~M0_1bi1E(HVaIDIoY=_!^
zq=%xLP%`Zla}#Vr86Qm8f^OKojn|UyzuRi<ut6JcvvYLpj8iYm3c?pTh(0k(IClUe
z%+4HrrR<Tl)Zb$6;_v8ya<Y&5eylP!sx}Gh_*2ERzKq~|a8{p?M24OPWl;DoSGEK6
zLaC9V;RnEd%~93G$71<ZtuCis{}#NI3_4D9p8p*Zg8Y+L3Ia%Wew76Kt#tioi3Q`A
z$P+hr!O~s)PrDud@!9|J`rnM3^KbF`hMhxA<?)H5n2@b-uZv2Pgd4KC^4brWzuoWn
z7PH;;DL)fs#Y`vklnfzW!oJmt&a&omzHt#roJhZk;-u#y(39y!oa8d6q0driLPN+C
zBMv+iq{XzTqbTNLWXXQRVHN}M6w)0NetAlGkyRB!r1DBMa*LIH`m?2|?zd(6A1qh!
zO4E)m-6bYrKwh>oJoy81V>&<CF*9IlBN}6cj&c-}^MZx>Tz)vs=%7!j!WEjEp3)D0
zdB|@Add}zJDV{Z|^!)*R7FI1xNq|U!L;3q~T9DFnA3eVoKTI}%ycdzcqQE_Gel_q8
z(h{0%;h8n1PRKlvTg1>P-gP+>jG}&>k#2OI%|>iPOz+f<HT#^as@Ies_^V%LbQuhA
zws$|JPS9=|7MaD2ZQuh*8TtHrD6Vg(qhc<^WYDPgZ=5WRXsJ4srXg@T*g1XvHkEJ7
z6q<LGc#agMD2}7KP1k3wL_6Rx#9K;h5v?8G9)3CEh5gm_`a+}>k$F&n6te?+il)z@
zBF)#lwz04};<-lL4f0G;6Bqp`ssN8EgMpST*##&EnVD#c3tp|pmQBeA7l$6RIoWGR
z)H?OWz_fk1{>Zb2-B{tccsDpgxLis9d(5kOC;N%48Ez;H*@0nmLkB~t*Ne4KQ@hcM
zaF1Ed*ST<=L3^1G=4I7f{(<nYhb4)8M{~>h^b=oB1(@`Kr|UZ|xfQD&hGbZAFT1Fh
zK9zezgTDa7%)6=wZ*0vLyOkZJp`0)d>4^CFf}R{~V_=aIo;kL-?`kS)c*M;A#O)Je
zPQoe>VF(1m0b#yD*x#D=XhhQ^$EAEsuqm%bY8Y`2QKekl1z#qbA5=VbZxW~z&2EK+
z9p#d`e~45AfAl(9^oxSMSqJ&(;=WY~%AR=~+OYq%@|(?gO0t!$3+3jE1{oH?kZ+wC
z0Y+?3-y8d|4=9}@H}eUhEzvI1BoAF|<d?wc#D`C8WTNdtT+qUyuU^z9Xm~}KEy12m
z()svKqsKqbOg84aXdcK15w)pa*o>I?NPaKY;BcBxPdrYgJ*nICsN&$E&ynF82~V~O
z6N1a3tVT)t9^URa>0&1h8S~YYq0E=hSSru?=DEk;&Z6tmjM@C^_&)b703OGOH8;kX
z>*8-e=bP28iqv$aTDPLS(8DtVm>_l183=+^vg)9Jk%4CrUPSllo#;73xqfCJDYCKc
z67!f@9iK442}@JYM~h2Nz`K`Z)=3+I{i18W=*x{M$IxUZ-KTr^$!?p2Fb0hE<XG?w
zJgfxNvQxN<a2rp39=eWfF4;TBm$`c|X=|LZ@1(Fh#Nd~&drkhXMkc4>+k^8~WID5h
z$+2C{f?HPAIMxWM{M`(xFGtV!K4umQ+sz+enL!ZGqQ^Fx$bx0B+XfwSZzY>5ug0#)
zPgcMqen7lVTfStzF1s~J1Pl7OB>p8KavCTBdiLg2(NCeNzlIaG@8`2^F6_@EJ->FI
zelA>}?r8o%K2ZBj%o3)!tR4deozyYd>;^e6GmR<U<M%E$e|9%75w`F%{U&)v|8RcH
zf`F-<(GBJc{7mHS7hf)+&k(d*<jGJ{JZ3t1olNa>t=v2E%)<E0w9FG7gZ&ks{IjL<
z_WF||7254@rDNjqvtbV~A<l#eymx=G*3H#nMNYA#(_bVIU6J+`0bUwg5~nO~I#*ny
zmeN7?=7%eRjvB&iYy&=>@cLuJyF?{!&sB*m)_$Wa*A24TH>v%~6E$UG{o9|(uWzC%
z2YJkErb-g$(Vw)5FPX<F$;~c+fVsZ1orDo0a}w0>tpt|Yo=~so03|j9&tNTFREg~N
zoiEG7w_~`vE~(b~X7F5WXX3od@x9mA37gO|S)vwF^^os1&UV^2Ay84Nx4p-gI?K^@
z<!toVtOTC!QL{<M6StESM77tC)C3QKguylb&soDsil=&<H2#c8H88zIcnI7x32U@i
zCFKGcMpOB+=w5>_#s)W>*4fUIobp@IWB0+XEDG?#yM+1a)J>J_B@eivuSL8q{ae&V
zB4R6FQpRLe|NJPa5X8_e=l+)Wn1jH(%Q$zkK|9EVIsDPrWio%fdSWQuVYvl?H<Zm*
zM!VZIZ;)+YzmAU@bou&98%`L4e63LGdgM`y#8*>P5cAktRsw8V>+W4}q+sM^l4Bzy
zmuNKM9)^0}-_|DMwm_1hjpAsO^u3b#cIuY9jvNe>40p`b*4NP%Y_A{MXI;@xF9|&b
zLU7#^!rfBmpZ2gigo%XwW7q!AzWz55N)88QxFcfZ%Ra|&!S$Eg;ztQ#O=<p?oazoU
z7D>Zu9D*Njg>=$@{u>(V>YvbkJh*`RKF%ev)1W%BT85*>Z3Auzf@x!8Emo`Q;dj}W
z&M%fYYrhN+aGH5;Hk}Zmu{?w<9-05XmU_VKiEcqk6p=DfmX|gNYD$vUQL%+BXD8mh
zd#LqZ8MEF$`(X12<Vi@Ta^W2sS#W$%Mw!ZXH{q^=GNJ64X&xyZ#+f9rb!0E*W?H|C
zaoN%^zo$*%6k)fe^ID8f1R@#*dYc#Xe>NUvKnt|y=$+UbS1i=J56548rd8K+7T?~0
z<0GAIAu1y9pw69^3Q_>SG7K^L*0kB0m*`A1!4tD;2)GW{dH}J<5w*wn>qvNDbjf0V
z7;G9KYz2-aR(cqb3UtfPdHp&f!KO5lR`avrF`V3l*}exLL@e7b=mlH+i=p!S2PR<z
z&~yn(a{y7eT?E=X4JXNJzmBB+f9_=Gd{8rqMCK{Sqn_Op8WkfJy*Uz&@%5e56_s+h
z;*@(=c048mDFss?LXe*(In3;&aMa>^eQ@)|H-10_KuGY*(BlHkfgo}e&4jT<&~)T@
zI>8Ej)ZRUK<Rv&jYgO{)aiPGP_C>ZeewFX@7I*9RXE2~9G+s2rzB)fO89IzE@c=sS
z7kh5vI^idGjE~XVzJH6^dPRFC(u6g77jV@zLPPYa&F=`(pjLxdy$>FGvg3=e!b7fU
zNr-TnVwGVTcb_UI&n?p)Nr&(2Ww()fd%+@7)&w*BMbKN6K<<a`bRYbH++y1~fFUCO
zf?>OwijDnEW=)-rxKAqbk)ILwnAu&OeUiH5VepI#5k8f&uIVY5C_c@ucfC!N|Kw4K
zhGlK7Qrteh3d5nAbEHBgde5yHiO6Zm5MZ3ngQ`bi2XrpRCmwGrkTCTP@|3#q!Rp-g
zE2gY*`plF0MekGJD$x~?DkSp7*S2-AhAouM&L^U@PP?u)Mwg34NW98>s2sB(qxr`9
zqqLgM!F?YNoZ1@n;BsA>O=++y&OReP3B(&k0y6Tr>+Qo`iq1#MC`7YfamK==dJg*r
zsEAVR4NYMtU^A8_(<_L>npi}W2a46Kh^6iL*y+63(_bE_Zgseb#q1rG@r!*~KgaLV
zpo=C5B|P)-o@S==(I_H>r)#D^+VU*K&8tQBv!f)=2^bccYp`LjRThcx1Oe)~beI^;
zyp^9MQUF=6B#G1ZcSBP>su3rs<uBh|_EJqxt;2UjDz-~KOgULbVRoJIh2IH@r)8{A
z3xJpWfVkmejZzrTPj7_!TzaeaXiF{IHa^sXwXyA#a2Ut#=TGLo%S6hO1W4HanXqG&
zTPxEZK|~d>JWw$bT4oHZC6*aaf!@slv$ASWK|`Bl@oj7ePllay=KZ6z`~i!$AnfuF
z^rW1DHX_Dw#pqx;cCPB7_-q^RCt2g!ie>JPb5he;Qkl;PT2DT*i0Y<R@$CS-nXT7`
zq)bJ9-0SJQeC+qBjk=9vqss>IaUKt4#+y^@Tq7?XeHkw)C*36&&e^DQirVzHJtx0c
z!m8|aGF+4NoXGo={qkiAo??>AgWqM5Io^C`&>^H<{q2XdbwO(U0sS>C0FBu@eQ?U>
z4o-fxD^26z0ufH|o`Daik6374>JrtGcb9H(Hs$X8X1Y$-+0UjT(B8t`%$(z6i7jh_
z`Ars^V}Q$`{h{y8CY-p_Z@5rl#okoXh^L3d&KWiEaePuy?>@?0Ue}(T$PR2G#b{cP
zw4^*CLgLJtWIdUhpfj^~GDTNQ_26z{v{uQ$y~1E=#TU|_n==#B?^YB%W4c8%`cmg6
za{!<*20)LmAw@jlcr<~+!VxtyNk7pA3eqoHP02%__}5e@>H5!Bg>Uh;>{=kL2YEPQ
z4*9H)+_h5)W>eac8MxQY?%+yv#uq4XQ+7x-`k#IkXlec2zD+D^KEHQg<UVt(l8^!M
zK8}6WV`fD%BuTf7D8ydFQ?2>>GEbWe_pLs;p-A_2303*C3Ae}SR?B4v$gSh5*{R4b
z6tiojbGz@w{Rz!Ti?Z^_)OczAC{7;gyZ+3lY9i9Ye*y)+K^Q-Eaemb*?S+Glf_Q?4
ztR0QN1Iqa8fPRq`ZqQy<ZzK<UB%CimD8tQ8GuK95?9e5Y&3HR9PCUAWo8WBte6w%u
z8_OxU`$V}FWyF>V*bD!>3XYs^`PcWYsFv|<coP(hLyeY^!vfLdL*GFA{pDW<*y*9R
zeTWmNh9;n?Fu8x04Mm{$1~v5UUq_6@eDhU&bbdXtzh1HN)vP$RU-sT#M^x(n&pFkd
zMnVypF7AMXU^@K7t_tBYBl?Qvm7z_g{n(QukY^ea0zt>RQqJ>3{5irvJV73E)4HVG
zzad*~IxV&;LohEdPf;#86?9r+y`}J@zs7ccBYG@b7d2r<rlX%hjNh=;c!z~H7@oJ7
z-b>`VFs@?8`}RSt+;@EkhcI-KoH@fqxX4GMDhcemQ}6aW2=ujF#x3>H{sR)SjMqF<
zEUH3{Z*k3<BdniCu$BbIDUenpJ{uuBeU1`jr#Adx46ca46zb4et-hIYi|3NbF6Wc`
zd(PcX&4`K8Xol2sKp+8T!yq^|{Vk0Dhlpqc4n_?3$|reg17kg|FFCN|V0x8(1ga((
z0FtG6=eJzUyhmY!*;!*I`-vJ9y8iyVBFm@0wbk4F)V$?aY^cR-ihy731cY(b%Jw-T
zJ*ce5gJ;aBhy2VoC)Z>F;5~OBzw20`+)O9aHDF!t;t+L_&{?Zzap^er`iY^cEmLSU
z+xbG;`+OM<XH|rkS(VWS`xwAHYj{WWBuGv>hH-iGC%}slrPqT=DyxXk72&*B^mR+W
zAI=+YK9Zj)+r@qZcP9M3r-yN==2v;$viRX;S_O><$qdc{-qJ6a3-aqX#_k#oVN9dP
zF-@?7p6EO@nFYaSEWQzxw{Sr(-Z6eG*zQYnjpn}gvG>bb9uGvFZ}nK(P$LKXR9H8-
zervz@SK4TP`5kx?@d4q(G|C^%STY$}BlSux%i4WSM61r^iFU^cmhXgl=l3<=g(8x)
zGV*GPK*+;C!ZQI*n8dkEC#>pDDTJG>cs*m^FA_S%`nZkuF)WBfaV3=fAeEc52|45m
zZ@38aC!Wjjgf;Zi+e<1J`8W5UBpx+XEl|byzh~WJ1c*)t*b1UUzDeN{)>?3;$Hqq@
z@+8EIb=~++SJ;;wS3^&6>x~~D6m+;Kl|L4?m$dU_Vb1IfNHqW24Ci;y>_3HLQFUA)
z2r44{X_O#vv~x~LKX+ly_bM?PAM{z6D4e(s4sQJi$%o$^^cU>+FA13sup-A%bykYV
zKHh1W3jr*x9R03Xmu*E~+uwW~fI->6*~j7E;`<FeW~Z_n)}U-KOHGi7z~w5C99|3G
zHKBteE5=9-km4I?6c7Rq<ZZhe-7kJ6&n_iyk3iG7NOt)M(Q(yJqu_Dh(fq^n{*UMW
zfAy6ZOz<Fwf*9;ME~%yVbML@oV8^8uL$*yKdFHpG0ztbzP~O2ET98bXAu+nF72W#u
z3^-zSnYdeFCGS`i;NfdR;IHWktOpW=naSzND7%tegnm+=q@dQR@o(m3QFpGJ>Xn!K
zIXuYl`}|4jmh}0p7NU|1-URemAb`pZ&>-Hd$J{N!&=52m3!7je+nT(3>i2T&J?u}c
zhTT%Xd+G(XHyzj~19gFe_%jkW_-}Zy+pBm689)<J*u9=HZ*m|Cl$fS|w#RnRbq->t
ziZGC{7WN<?mYHPLbyd0oZPvJQF?9$f@8Dc)Z1cBrUIGaKDStkqy>$vRHX$f89>ruG
zfAL<dus&D+xG?7+k6*Lw;Rw7;fHF=<uZUEInMyTSf1T?2`s!zbB-Ui@*~pC?td^VQ
zUVZ23b3LBj6r+dFO-yA(pq*ate4m2XPC}5~lT0$8xCv!k%M89zATHYRc*gMc>rfy@
zdC4y|!-ru__clZqeBtRt0)B$${~=uUyG8Z)3+(^wJtYhdY*e*2NRMNGK(5MWPBZVZ
z$~f+C1%SjmtZNA5(#Ju(VI&BYR<vw9mB}hv+~le$kQ1AR+TFVq27Le8={6@RQLSan
zB)4)f-eZ#aN7bq2*#haGY8Htne~0r^{Q13+V<h=!gOEuqDF7IAC3%DvPff&6-YE%N
zm-R98%K2D#pD990&CEFN1O=`p6RcwUq)0czcU6@H!*>~xZ$kKHzA6Y}pSOox#PeQQ
zq`D2VdRHV!^fgyOqJI`7`Y%M+yo5ik|BR5B7Jufn31eU?=z^S9@Gpko<J&-c7DPyw
z(tkrI5Fq_{3day*ejP#B{?v#C!Q6T@2<Ae4(PSzxXE#-T836Ga(47UrTqO*|a;Nbq
z0xP(-ZNCh#;;Vs}?f?Hybf*>Me)R){+1hXXSc}qJmsHKXQKQjZQX;*}BL@L%;%MmN
z(&w|PM-=^;j)<|tQlR7MwXmYBaHQ}!vK>_bYvAU++j8EBqr~`;$hwt(LkFxnNbZGI
z2zaLFXDq2q4?+{2%}CEl(J*ru6|xP*+#Q@zxQrAU8di`^zL&=}a}NkKP_|#?G-dI6
z6YrpX4A}-Pm~6-&6Rq1^B=exw@>!NWFQ$~qoDx|teUoGT8r>G?t;$=mm-uQ^sXXJk
z98wI~g4hHNLF3Bgib>Q`{Wpx&3NGL;fCl52O2>Xv;QR~EZzm)tA#^IA(-z5(-3XGk
zF5y+HFQd2p(7<6gack;%ggTveyG!w#qgU%V^7{S$wtDADHo;gFS*LR0SAc9nwo29{
zA#G*Vz+8f(dZ-0@ii&2?{0&6?@LU*V?VsVF|A~}HM@bS0hEmrk87#%4*sH|tXVkZ?
z;x*5o*Wj+Tq!T=XyLbUU`Z?jk>3cD89M~R$huTlX?s`oRFb2s-?VYL_NvjaM^si2N
z52c^*+@GG>zZm-MK{f*L9B<!m@SNmNcrJUAtp4jre#v55oeRJc|9ob@j(~Fwr|JP%
z;#Vku5|16gh;%=p#9s#hG$_XYf6%Gs?bf3U5>`J<WxwAjW)4g`^rde8)qF*fW=mum
ze|oynUZBxYCR|XD*Zw1LVMNCb3`Pwa%*^Nj?w{6jLgzie{U7Sydq4$wKvBzT`WNmu
z|8MGh{S42MNDxR}gPD(^Y?uT42my8K55e=9yqlyL^YC=#)u0zWsKe~zTmtC$+X@t~
zMiyC5Q0yfi96=82kbB9LvGpm^W;Qrz-D2r}NmaUef(Ykk)2OmPtz*1JOPkOGP8Hu3
z1n2=PlU&UN-I0epcVPC~WA`tk!(D^qFRs6~dtEce0j;i3Par)sZ><O?8%TC5J)WpR
zlVVY}+K0%4WLjgmy7_>QPS&YNLv^dp_;t@MfgVXpbUCSJ<ze=hI@7%ou`;=cJYO?F
zlz;ml&3XT$82XPt<5iG>v<XHDkL%w*#V^*`0-<OVO!0%y&@{o0*-m2b0E50;1X^r6
z{yvh$lkcHN&a#tLdxiCAdUsZ;n7pzzlS@JZl}lLx=_AYr!$t1@27uQfc~VT<;KC4R
z?#`M6HO$J;?Lvx7qBGmr`OpO@igeGr477oOUD}d)tf&qcR@JXa2G_7eXuFT1?+xl-
z>q2>|a=}-gLEUA#S+4LDSSra~V((g4Gd(RZQFVSgm!__UZ@@=r+FN-0tAJWr*l_<i
zOUSf8>yeOZEM%i?`AKfOaK2RQKU)ibKMop`pQibMv+=h@_9+Py2Bu(49Wy=tKOjdf
z<+<W=Yo^4EGi=<|Yvo^HWuoWQr<!pU*Oce-E5TA%XO6O1;*VUHVP<U*6&6FA7TV{2
zIV7pTx3fRqo|nmUC`an*+)-QqglTyF-n>l~78xlR<=i>$TVbm0e8UCKS&~NcVJ{ni
zHj$)w*di^ky`uoD+1xm68iy%tAyFuT@XSvRMSP6_={WX-(g_fVC8mMErE=m2BoaV7
zbg|RgB-$iNGJP(F@D*P2Tp#)xiX}{rnUVccKxwa*8%EYjCquap#8AD*fr2z8h-BJI
z_CdP;$3KPsdh|7OHnjy1gv;aa18_hn44eTqSlVN)!7|-U?tpF|#v#ZM5Jqj<BSH9B
zpFy|_a9PW6a4A<D$?|H%v)X~M?K9RGf<)<0cU*6Ms+gY!H~&Q+mT&l~InoZ*FpIK?
z-aq7-{{sTC`^f~2Cipn6f`4;Gn-yFrMl@(i9$A9@`D9SPobJDR?eNeR$)kWxQG^aS
zh1+OyWLn~Kh`l@~$0B2(2tTIbbitDD1J8AA$U185Zp0hP6)sc*ETA$su=GQgr<!s~
z|E6FPC;P3^aU8II4PM_rdi6wgaX=jO4f?<4whiD5=>@jT{(Q@ef4xu`kU>u0@eu(T
z)_D~$ncd2MK)BP%tU~qZ{o^tSBD*2*Kfavf-%1?)^^X5C$@2SAgnMW@)Z)!P6xP@t
zSR5~c)JX%IMSj-B4wA9qNnp@y%U=kS`(<*g{W`~Nx6yw(5!kP%I^HT4-Sro@8$1_(
zo?|4Ceg64m{(QPWFWhEB69#*KBcR_#+X|air*v|pO3UTd>RSFo|8eTXa;;|0?{O6Z
zbJsguzf{|T0Kwmj+zn%uft~Q|!RL_D#J}~lfQ6$YN6tAvK{oSEKW(No$VtVLRMm%0
zt?*b=I#0m2(^Y?x8x`E}8_b^GxBAx~!@Ej~gJ|lO<xd5KEia#z+XZRc*(<+aq{5QC
z20mm}LtMN(h%dQ8c6PfJ_+NU%y}km0Ys_4#krnxA41nDh^7e|N9+?}Q#c1Pz^JI4P
z0POqLM`w=@oa#F?!x1DK!MQ<iwfseE${+nC;?JZpC)t~P#UPsGt@RTC9RB0C)=YQn
z!Q(A%gV<U5iT}BOs*(hl-Z0qDOg@2scc{!()e@jL{QtlhVB8?7PEx5S@(_GyMIVOB
zpjci}GEd9P6{<6TcM#p7yfy?jgyY&H_6h+yWkCzD-7{Baw`GAI3M+eE?##yMNYlvC
ztVE~jiEzd{=c4+I1n-~;40kGsqQ#_0z>dE}ibryKKGRqz@3YUzu$wEc^ptZxzHV|H
z?X|tx=H0LQNhuL<6~@GVfARja??pF7@xoAS0_d^_y8qGK`5UMDQ>*(M+$aLKkLEG=
z_Z6%ma>nzrtkRY|lA6=Vy1CD!wa@>6xc^Ne6cLBPs#QyDne>s*`B)E{DWSsM2yeeV
z*_+ar$b^s_*<n{rOl-tYg{HKut^qcjLdYMt<^OPI`UL{}7k-Zd4R)2*GrQz+@Q}Yv
z>c<8X!;<7lPwGIay~%PJ+|+-jD}BB6tdI)m!?K`y;`kIZXBbSb{DNBio_{AFsKF7#
z0q!$m7)|2YaT-nJbvvPR^Ix`I{TCn#O5DF(wOvWYhIt19k*?>z=fJSG5;hUsb<>P@
z5JA{6BhN%run%ZmFE2WO?A-S!cKM$eVvE1`S1t=v;*{~$!!SEqvXGBs1;IXxAZKh*
zb-iuo@L(jiEDA3oahwf#?`c&oxM0J_Mq8?p&&Yk~V*8QW>htycY>}zVY@WN#38H41
zOSik`1Uj!}-{`K5Io7%ex$reb|B59a)d6D%Ct>73yPk)Wz1gL}Xp_=-(ocQ+!i9aV
zt&Q=rn>gLcwFzweWy+=)!kO>)`UHyxO5g)iLrOi~3t#7vJxV7Mug5gzXtx^*aGXhh
z%NH|)jszZ7&|2Z*$q*=?B5XR@toCl(u}A&ZXXYJTW1Y!H=848-A3wjf|G+Oa&p^xY
zt>Zuw*`F{;t0)8@8v>lm$mA4GyU})3%$38zr*$nci?&q%%4nmd`k-0`M?$yifQcg@
zH^Kpr>-Ds_T5ZimQfjPj@6Nq@6{RSerB)t+obW9*>B=}aJw3g{M~}#^nw%rWS8?Om
zr}CdEv3O40u1xt}7hlRUW-Dj4T2}CNbRyizIZ@hvGP3j8r)~wYk9Es5yi>l)_=R3Y
zLr|LzHE?|9qdC21>cW>Q5hvS~+Z~TSzn9Ve+Co>SL`NsQW=O&>k+}_8s+4`85<M<I
z%^FgOovBq+b`lEc;7;CbJ8lp_$_hO~ryt9wH{kCQV1o7r_ce+XQ}txpk-^7&_`%Pn
z=O+2SrP9i_zz0Ki^QgFEw=<qR^TkUpocD7&6;oRL17f#l7LJc2#T&qnWHriX$6w&I
z*`A`g+{Vv*_X^(udy^!HXlTiF1k4O-O(bg(?SLgb$<3|0Frsu1XRi{<o5v85FSu6p
zcu!Yd@Ud-Ol@Jc-&{EDyQjHTWd$$qP9qErsv6Ew-Pa^v#dn`^RXvMOzgqlab{9tn^
z<uiO;ev(fH>k(YTPteu{Ov|;Ju?$<i4SN^1g=>A~bqyj68E%Jp0=keco+Gu0V)&}@
zv~4tp4=c`*cxIB(M=4UaCunCIp3*&_;9gdt6?Ny_3)UM{%Vx1j9ME@Oc4b>j!$r&S
zOE%tu>H`ftDb6Y)w~g7fK2(XCQ;%2hOu1&4)B@I9z&oH@=rO$!GOl>cVBFKc`%-a`
zkz+{tUQEbT+yhlL8qU1^Uk!CJU}OK2wfV>zFfRbX#1+&WfO{+?515uP`<^8^**>Qm
zT<~BG$h!)j+W#<Aruxsk|Dyu5!ygmKfh%qXG(<lj>5LBs4?k<J{>49jTXf|&2lL{{
zUG4tod=LMi|LlyU`lCLyUcL*kY!&%s35L(Hujw36d14xB<YVz@QiQ`q15%;Tva+f#
zx4ngNi_gN=0`{4izbBD0TEfIBX!-{PVV$a!ev|l-6o;95o5Dn>b7F4L4*$e_h^SR!
zW-puEUs7H21xL&|DZO<fW2fx-I!&NtH%!%b!Ernte;iQk?~d1G<xQZO2~7gD)SpcQ
zdg`$sTsY5`9k~c;yuc&+z8j^W)+jkqgon8GBgL)Pa8}c9iL`9l^lhROS1us7lXJzm
z@A-DTH1Fx)o=w_$ycc}~{Uj~sz)Ho(8>@|9CB3duP;s<PC8UTm8=@%>B=3t_xK%}e
z=Cs^=r{phW)~wEoXZ3pi#yp?xKF}b0@cV`LpT37*nnAOB!t1yczwzVZ3#?L?o}+Rq
zYLdmLP0n|;-m{uekljni0W;g+DN){=p4@oHUP&b(Lg8A3+#17i@tA}@7Wf+bVVBM3
zu&np7tZ@c?!HxCbmtYY@$7yIVlIJtdBg)O&o>{HHc)-|LT_DmIA{1}dr|S%K36hA9
zL3QNXPwiX(r=EqVr;~-gfg{00NdCO7$*FQin%9)(P`Rs}=cKwv+0Wll!qQi$A*L{+
z+V~fw93M3++<E*APOCX4UA5j2!7y5x@&3)x%;T<XA3GXYYTS7g^dKXv!3`V1d{hLT
zX+!P9M1?-_Dp=}hz`ViuyP+@0tpWur_vP+6;HPn>%IB6s&)2Asu1;L^m^WUBV0Lhp
zAWCLIQ`S1{X!b^aSl)Wi4)1?I>wdoQwBVkTNd0%gA~v(_`F}LO{|lS`FSGRTe?zgy
z3j**rAaygrVn|VPh<%4a(d~h0I1yl(G=Qbdt^EVi@0#C<A&7k^ChZ0SHlb4?uwije
zMZ81PVdfGDFmmlUiX59s<$7scIL-gs|G=l{(BFUdUoC$!@b3s%6@j!4=IL6^8KM-<
zvEg%dn&h!1c%_qgpVpu_SJ{vzHBABQo(`xt#NkSsHtM2J5JiTz*gj0zKf2O;ZZFNB
z;6SHlDir23vRVxSQw`X30$340qzGf}YY!xYYNQRT=TimjP}bFF=8sTDNzA4kri^~`
z_>$X$W#_@0<>&FLVZ!-ovi}I5|94)Mu8^w8rz9J+$JAQ(3pMAzE|(%k@wGhXH9km{
z+Gh=!dZ6s9TnvE1C`jg*u@iVDta!<wiS9;-X@VhIfE!aRD1F5V1oq!<6Gg#tMG!Ih
z@ZJYO$i{l(uVWiq)=W+frVUEsdG<8VzrHW{aES+H9rKiYlt~nixz+DVqP$+vTl`9;
zI_fop-Y`z`dKl}Gu*?2_q6<ZD;F2=&Rqt;o_Up(=3%BM}iq4^ig^1{AB4yFRWe?fk
z#)N-B?xQ!6Ff8T&9r`z23FseEf`LDz1m??#P3q5ocbX~rqtgsRjdts*X2tjNaV=g}
zuwU0#mDNseiZ}d#7(2kWT}D2@;|bhkm2%WdZVNfet9<olRArd(yMMW&|1Yd;|HKNt
znR#@Dd<Su$jvBeNLAqc#vPyF6C8+$J&H1Mqw-P%LkF?4*@_`R4iTiL6l0W*$4l3SM
zUdH2l=|6jC@drPS6nm&&#zqye>fHI-<>}fF2s_MblGf)j!S|h~=nPd?XW70&(98h5
z7*%94@vZ)~QXe;CE&E)d-25E0>V3zC%$-kA`Z01lg3F=P01<()MzJhc9=4PkE-hWk
z`qFW%vN)#Ao2TnMEkrcs$l-{X=f2^ys9FUjNk)3zejl40!ududH#@!62b);?(-JXy
z7V$@QIpmsP5(CZWqeK@U34ekzE}{#%G1<f`y*}Q!k(u~R96#aK;<qI^>9MO1Z5aUA
zH5|v1aqK}_Sx1o=g`xxF3aZ#~b$+^MT&N+*GWo+J4-@FH56o54HCL~dOzuZYUukW<
zbPwBJ7pD#kl=@_J9w^=H!&qY2mSJ>?%2><{(1heQYz{V%>!MEQL5+>*srF`HHxm*2
z9O9~GKezrs(>z77%$O8jBTVSIsYH+4ZCOJZVU37JOKVn#jBoir6oyw>Igg$_S0mSa
zfg5Q+q5?$7z-bht^*2WBw}fd)ripMZNww0j{_)d`G1pR${y*fscUV(xn=eWckuFGY
zK|reXE+rz;L_k345EYQ#q(dMGN|TO&Ql*JViS$nBMU-Bo6F{VsAcO!(-o5<ho3rPA
z_mpq%Idjfj=lsFNl|Wcoizm;z@8A6^A%IlV=bOj4=07Q||K-2&{|VLgzgEgH{V51s
zuKBM?;rhhWPM{-_N!ac@2v1~MZ@={~A3gg<lG3q#Zakr|1ip7WDEiMi;6s49q$ZFl
z-kguWGHYSk{<S+!sQ7=GkpG^T>-*{&6{TntMX=H*tM6PXHM$MD1oZ%l8~3nb2zznU
zG)K?3)qRi9uby=b4cR%r-idvwZ?!)f7^Ypsoo#j~dVV*->|@DB_h!)_U9(i1zgj-~
z@QsKH-`mj4Vw%_2sGZluMZSN+xGqqC-#I@z8UsK|5xLNtz)TODjuKWeqXq?siqBa5
z@Vb>IkSq7q{IFMp3lC7V=^mk<MPKA|;YenVgVKPq``#NL!rqKG{qfNG?13E9IH$uS
zOx#Z}Eewg`rs7oS-go0(-=~M6Wnn$5zmwnYA^<eKGX{pu!uJ=#cTYl+=0VY56c%&|
z;1o#A0LU{hh#&w!eLy7KR^~1YOHT)QfIyn2j84wmd;b6TyCJht2Wt8XNclz|;Vp!7
zyghLB?Cj2gxGKibFt~S!25Jx1rS%7qvjltCIuf!sU18JeRWm5zX|gj+lXIKy!Z|58
zuo?L8w&L7@G?9c7sOY3e;IAC3g_^RHYpVP6DT@^@p{k;%m*YmvKP3f`D`AI_5G?<4
z(YpRq2`0icjM>kV(}gE>W+qj)&M`ILwN+XN80Yx$CJ}jQdrm@Lf@){GVBzymCJ5KC
z_l9rN7Tjn`P#JFS9AH=+v~<3#<u!6!l`KS#X=KO^z~}x`F=_w&feq6JC}7}?+20tY
zT|y=yyPgs+i+uzzJP-higSk8jK&H8(zkpeZ60l9SBViYuZiwThk7nDY-SD#;1Yk*h
zmIuZK#oOvOv>2d@{YRiJ5)tQJgAaMf#NW18jvjc>;WhYxCA)94>sv)-0MJfedFP*B
z)&_)OS*@ERN@XcdpYY)RvZ0wO@cloC@<+;smWv(ura+c5C&g)LM*G}|g7<X)MZxJf
zz?mqb7Aibl)xnK5#3Z3C>uh<kkTMPaG%Yp1+PsHSU%Y1ro6QH0X@BxDov&NZ#^d-3
z0x!BPM<Y~>Jj9O^f+s>mKPIX<d0Z`ek+Zsr3gKkGoWzLbLmO%X&NcZxAV%Lig)6<x
zMkDC5(KIhLU8G>%({vlUiFRWI2kfyY({6rk=70ouSPzV$>PqVJk?TH*v)ZGam$Yjt
zzh1xg%DhrU#FU0j2pck+K$iP->C6Gmx2X4j3NUzx=%}68^N>^3a%ilpOW4dZQ#Mvy
zCwuo6Lqwum?st|1v_?Q2Xz1$7s|GL$kooenAH-<SOm#`+OG}8N?rl<R6;}os`Jt$8
zqG@fV(H-&obmI63-(AFf2&NMYWXZXDDUH|Zg7n9rU8memxVM-HMW>2P23j@eUsG9l
zL=!;V!_JO^Yhaz(pt*rn?I<;tZu)vEov`t;uepFoKrJL{)T{EM4!rmXUQh*|Cdi?K
zvOVG#_838j^uPOq(NVAeQwaNCd!3xb8L16`;!=Q*uURf%1@I0NV5fj9f{)EBa`gze
zXJIGZhWJY}jW59)562O^BXoFX(PL{c%5Dq6*O}ix0?@w7ul~8_I}!Oa3(SlwHf0v5
z>xTX_9nD`~3#Jrnv-?080HaJ+*j+)4D;EmgAf|n;N<Ry&g}{XM9?4W~l}m|hg0DtV
z2WD;(au*4XLCzm7dtjv_newxg+)7U$Z*BElPtW9uxJ7TP3gim<0J(zqfLuYDwGZi-
z?>s(YB~xNQY%RX@kgaB4s=CebCL-ZQv%zIBkSlm)xA?mbhQNXyt=bOudTQ4w|LKRi
z(vPCN8-i0WnJRC!wUC;DpKZYbuA(o^3ZCV6A|KHx5X3PKFwR~8rFG$970?^90pr~k
zgy)9129Wjlw@|#>OTdN^07UQs#oLh$2(`t5I2Q+k|A_MmdPNIo<kx|$zrUs8y$5df
zTdhE*;APmEc+y{wvX6^Q29o{|tiVujOaXRm17r-6f^nJJxLcPG39mbVcM$FfY)D%e
zg8Cp3^alYl2LE&@0v9RpmNi#^wb2PoF@}6YU_2XON1=b{j8qZH&q1|s%z%u+zZ?$e
zHvfEv+$)1hbaP)sfsnz!9ud~_=X3Pj;Edk;!Sp+2@UNE;{O6NwJfi*o;03MuHA+^&
zV7)l_xrZ_{;5pNyhxWDCI*Fd^!h1a9xTPi#uD|<qiEacPR3P0CPKOi7pwW0R?Vj|`
z&9;BT=IbyIvS>NEvx0(#kE^(fVh@2FnPKU3Z__}R+f3bLO_Hap5jiCNEaA-?dGG))
zdK`%f#%B`f2w!d4GJEo=WIse$efx0wpv7AbzboMXuB1C&++a%cAsfFCf7-Qjzb7Sj
zTla(tYyKma>3{h1zse3O0deTR)Fn94mfr?=s?6B{JTdlr?pR+g_}3@oofv;3x*30~
zv`N3JQ2j}KP-XaMal-V$pXQc--D>{ddJN_R=BN2buFEWdqP2hc4<h~fS7BLD5#VzR
z0cJQCq|SgU!yx?(K$-qyJnwx~bO_Kk{3CrhF-yJd$#Ll<#b&rtyE5S(*?=cD<DaUQ
z$z!09Szn_Wz(-zNAXouuLcg<w&~c@Lwt>KWnaRN5L$QMZzq}^L!hA~CD3^1-LOl`e
zzb-);d;`2%nok5(_bb|h)x*0(F?n*>r|56~C?al~TP(j!?UoYCf5dD(4N~xWZ#Dy|
z1rNaK!hlWqimzv^<#inoM+l#BofiMxN-udX#i-&C^-KJQgX-=s;7{bb;OutDtGoYG
zD^X|T!QfU8bww!;z(!S}hlIDBWYhyVAOc^u@UT(uHT(tS8voP57rAZ_fBzSwhwfAi
zkrfSl+Ts+pU%C0_hHc&z^M|dCKlE)kG?+PF(09~nPCV)-u>Ne!6QJX)zE_Yk9!+MU
z!;@GDHyis;=t+!ja}Fe^JV72tFJGI+d9DrDe;z;9a~41N;L|<8u(29oH~#SX*V~*a
zkTg&=c|@DW1te^Oz=v-P6R>Ery!^szCXBWA4YXX=&dmz;Dq}`qSQ1$U3jCo{Au*pc
zKoG+>b*OA3s^Oi($pnV+D$}36^K}qej><&}^Uuh_20<6<HLD0B<fC#-53;cbL5aJ8
z;*LeFbTQ{Y9m9d$GG@lpnj7FZ4;DQFp4u#6-=;0Mse}pUS(n>8Dsi=IcoX30vQHdI
zC$NM-izJlbmMzD+Cc6+k_75UY2wPw&KtCX>!&7y$zZ>_2dcw`IL5D2<TRd$Rwp!Hr
z(h<D#ed_MTfJ>wr`zsb>7pk&-187<8CwzH_AQven0KJJo?e?Z<z?~W}2dUo|wSFyB
zx5iRj*A-2^my=`!x$mq{DgMgwBSeQd#OX)+0;Ti=8L2Iz!)-_wsNQie9mRo+-m71;
zG3#9BWH&8GVxOdPU^m|^eX(x>Y_NhXV$=Cem9)-?FJ5>_Ef6LrR{~dhVWeP3*MvLk
zx~P?23dc<I3<_veOT%Pkk@qwH#rLtVlZS_veymD$s_xQJbc@jG4V$*|f928@SU6HU
z9&gh{uFzK;1%^DVpJtxxeV%PG;mwgw&GhA3T{_PyMyb3ds-)DJ`MJxj`YBV>YCGs;
z%K-bqbuEh+QvPt6#WjQ+*qQES2RZ&-F-ou>_M*i%jA8mQnmhl)N|)=6u}GupmR|}Q
z{PfLcJn_eu;&Ht%O~$c@Js41(!hR9BTO#JusVxPHF1l#?ny&LQEBx@l{<YhYR^kWq
zbp|qUcSV6AZC8!CVynL1Qf-K9DNf0t*3!yk4-{j2v#uGd1Yku>SQ*o}pL_G-MXL2b
z-VBRWR9h(*6{n<17n59(8z6iQlbCMPzfi)8_UFk?B`lpRzZLhC5H}ZoTGFV?=S4*H
zJeWw7r58cg!Ghz(YN9RI0fR>57%id@G8~w(&l9^$rIevsEyKD_B()~@z$cXgJ_3qH
zFdUD@Et6ocU&!xd&US!MWJfm=8_Nb5%b4M11s1&HyiNKAq!cXQ+)}@ODIzo2cS05!
zXK&1@?pIa}<!ZBu<~JDG&gt7{#VgRa7pKIUI(XShr8Fr%=`vOt@lJFFYoZ0Cp?cJr
z++5RQe&z)}9PJg`w~8XvO{G$aFLBd^g%ak}1g#`EZ~|D`EP=|8SUzi($(<I7s6x@8
zkCxjgI^Hi#x;!LgUDAbvP4Og>k9@ucY7GLLIVg;47eP#*2`oZZ<xChpS$h&%Qkj?=
zRN6-T#QClQgK#Rj{>}!;$lTN3WS^+ZsGpv`D@!o7hCLJbp*dbOzc-JoKI&-g=fnCq
zF{K&h%%E{o@~0#f#W~mc<1hC=7d4*WTs~SB9ab_96x2*r;(H8iY9e2HBCc%E&mg2b
zKsa7NT)Z;>YrWT2&1~A2j?Q)~ld9;8;dc*DakqC2Ye-@e6B4am_Kt@^@!rXz$F2I4
z6~3XjW_2f4^{hmNQm^pFKjDj$ql_u~{D_Fy8k;5cDmB30@X$;=XR*FIAK+e7&wcMI
zTXw;CSuB1PE6f67S%n2}KDb+Dz&1?Msy?F5I#@|%qi4{bwYL0aM>N(QH2jXw&p&o>
zWR@tklq(_3iO>%tfpV<du-IT<qbcdII#s@h*LJF%l5LdR*o`%OCzKZkns|qA_O>c=
zbUf_3WL8EP&Oh=3*CpYj0Y41<H8D>s3f5mGqDOxaW!>#A_itIe@R&88gg<L)tZ9i`
zv(i4=)LF7Ad@fVp$zL{LXOW6+?EdWRwVSppZ?xS=YZv#Uitnm&!rY_ehKVrKRqlbY
z*)QPQ6<iK%AE@S^=mWTc4h{75f`u@ePo*Y;12v;Ty?7z)C;O=rJt`eP3QN5ZswXP(
zL(Z%sywWOgzRzc`kuS*1NV_$Kl~h?I)~YDW{dEHX7bxsTqSJ(vwTcI+sPG`q(Qhx^
zcW~Ev@T+6JKZqCtqYjj5WbQ}lz0Qs`n2Hm6Q(FtVO$JTBAh)(1xKOhU^f>VG0Y%BV
zTit6+Ke7)LJ6QaHOB-COBMxU2B8@wOERe3vM#_1{=5b(2*F!B2@1P#4%fT3}yBz(V
zOVG2bR=q_$i!<wuR~bqiEAU4)I=DQE@Te`WZ`_C$nTeL#M%2>ni><>G5m%w!McAa<
z4P$j;;tK{-vX34m+v-0lqu{3SU>zI;j6`(`j3hZYQLg)OxH=%GSRN-Z1@J8L<0i2F
z1jEmP{UADvt+*_|4Lv>!^D7&7qR1|8eds2B!<#H!9CyBpX~VvPLvP}9pziB1sznZc
z(~)^r_n!}_?w)2!S)RR4l^ohUF!>(t<sJD!g63kYTdIB^TXLRq58kv8P(SMH$r$J$
zmxi33-kW_yBvq$C|2m$l?aX2&DK`Xm^@OS5ql=|~ilabLxs9fSE3?8k<qGerHAaWG
z;nF&!sw1Lnh*$a5f&g<ix`V9=%@VyfgVWiz`?S1!c@udcF30pJL)X|H`O@G_`7&cX
zGg*hrYMbur5+5cRU)QF;N*CuWP_nubWvLpEvD6s4`M?^oQendJfu4y$q+HgGt(k|K
zX_?lQMt|sNXwI|rN15$}u1h0WqpscR-ppRnRGrm#&wT;nRM#M8^Y4<>lr;=jcp@Z&
zuexv~!0y!5w{)~jRTeZ<r+sNk-_+ExR}Rx<%@KtTcXe$9$6-m;Q6sM}lnL|8+<?@W
z6#>})p|v&J^DeYs4K>ttd}g@cm@?3Iral7}qA7T8E*?lR0$4IApf1)1_PNjpu$cMB
zv-`5D!Qbb<FG+>)XRPts+R+gg_uebWRy6LUd%H;}Wz5NGUl43)U1$n1tFQl#7pj@A
ztJJ%nWH||c(lWW3^eu5ym<bt(&fVorE%6>v?+YC0@=UU2!}gUz78i6KOP4Ko<`c7N
zK0&m7#CrXurKw&LKiE$(3naQqj;8+xUB8gVsi7(p+tC-8pyo-tW|Y>us)gapxqPxI
zH*cla430Z=gud6xVOHE9(R&is1G*H(sPM8X>^g|X5+EUK7YC+UL`+~WU(;Zek=2RQ
z@V#<xor_j@n8rXVcVS}Yto)RrOkg@`%*LvZn{CW7V!?WP?|8c^u0q$xn^WxivnvIp
z$>tf?wQi*)){j6RaZ7kcXYF9AENf5)9Wxu2F1KQ6TwQ(uONCV$)x6Mb5n#s?ABifB
zR}ljS`lG#J@}E%w-MNNq191|595Hes3;gMYjZ@+rrvRAly!?$(lGW?|71o$H@k9#i
zqitMk`aZo7)ct%GjWw!R@hV>{cWr%(8w+iH?$0khY1nXh`0C=@<RSaV$7Zek=Ov#G
zf55&PH0%!Lf3y@R@Gc2-R5xVXneo|aygI+|&4<nWacf6l|2j$aHeZyzdRkJ6lg64M
z?cz?VlmB^1MavtD6}Pk^gVx|p$pGqrHH4~~YL~a)g#?tep{_04AkW6LE0*h9^`ks;
zC&`*#3k!{cvp4B{Joen_uP+R6^jJF-hd$OFt&H@Dq)l7Xm*}@td2U-fEIaG<<px7Z
zPusiXFSo^i{!}@lx@JPnX+Pm<x>UEPU{RF~`q0?eVCi0wIiJw$-!PUidb5iDYH+PQ
z5y&$7KCpgVrXWtUpH~(b9_o4WetDT#^qfIj^p^sr3jI=z7ynU(0~Ot`#y+b8S*?`N
zOtWqt^EHpJamh>tVlidM&rlTC8>FH^QfH0A#wFzetGz5Upt}94a#SrcAQn(tU9+8d
z_S&X)N_Nmqc3Wb<5x9AC8sAH4m=W@48tPmaX6Puo=<q)Q%6m9CIRq9w16KE=W5$~4
zR%bq(_RAJk7IFyW&vT7gj}=H#UUz&($>pC@ro^hWZCdXRyEA5bc~(x;UCE=^yih@g
zpI4VN{A2duVsA1LXR0f34!(dvGvqZv6}$csY6Q^DUS{f#<3iz9+w<;|O7(Rat_i0y
zg`!0EZ*S_Qsy)b;SaBJ{R<DZv&SK|;`k*;5aacdWUPGe5L@HL<ecLbBf&?PnJ*jSv
ze{)LM?0@yWI`4Y2)&abpqe~+{EgOKlNj2doW43cgzs&J+i8`AJ0<U`dSEXxmBIp3z
zE7I1;L&Y~tcIgoPEL-BGR&uNFbq1KzK*(nNqD&;>%Un3cYDWLqZM%TgaqP1&*1(`J
zT75<Wb>iZ{hdSvY=^&lGiq=iB>mtbX$lR_M<SB7ynZV8IO~Nh+qdyDy>?F^sidcU0
zH}u1sPdxvAp|uVfs}%EqSJjI;xmVa9Row_mF=nG(N!1dhT#-h31#uTvY|m%pR+x8v
za#jGUFdLp!;Z?s>;j72?emBgyd)aXtSY4D!Cxod(1^Rp{jNby*t?{G?o5DP*-*RK`
zX`U0Gvj?@({6tM5&$AIm`tgJSNOf*WekAk>x)pgjJc%wED!k?`pG(!8wufHnwJ5l=
z7&F!@AECREs>fhZ@0Ako;P|Qb0)Wki5=`fT;s6+4SCM*6AvS0LJnWyeudaX6zSj8t
z+Yl;z9AL<HB^Bb5NMIbdM1UA~^*;+@cc;pSp)>$Eu?Rky0Te;ns@DDj+BTYuCjYQ8
z>A!~R{&##%l&<$Tdwd6RvJEDLyFUMnGX&0ixg0=o(f}0aau(^QG4KW%V3Ivs2R<jN
zbQ${d!*5w+e4&%aC4b}Vh5w=>0VvHSzkeHmCkxnjMbl6cxHwBsybt)CzN)4^<Q*_5
z(D`Tj(SMikbc2WgA*(9P0a!Et;pj?8v|xE$lab+{=UxBacTIg&=c&o%e?oI;fbrKy
zd)T>ZdAmn?moqS@ssgUy?cWjtA2aCOfSj}#6QmS7t#oU8U_1ZU*|d`KRD5x8<gQj&
zUFvfe)uP#BS>peqZ0O(9{UgIf2w5A`#ciDQj2cVe(aN{Udh<Vs0;FI3v|JRFvc99E
zY@g7~74s#h{A#ENV0_1d&ry>C=%pA=EI?DMq1%`9PNKBdvFZul-LTT;9PwxJ9}#nK
z+qm}j#v;cLe)zuS=f`XY>KpXSZKaq({96^`i-%C9>@IHUq`gMW!sbN1;+S0Dt0C6q
z=Kie}B3(8fMbhLh3z%5PCBh__0bvK>(zp-}`~C^Ng>p|)N3)6XQ@I#?Z@i~vU$+N8
zT?O-p3*lEvBl@it^ZKh(F4Q&E-F@6c+?jsll&Q9D)oAm(v2n3FSeSdMlk+5ADck)&
z6DWutwPM!rm2&9vz6-&y5fXhn!0<E2d|BWA`F!wwH7BD1DrEt^W*&8J^W^Ws;C<MI
zr^~#5OMWeEubu;&zvka4_`RgLn#5r;7wfdf_ro<=+9GxiKoSowNsK{0pYDN+!oIff
zFUz7U%HTL7Gzs%mJ+pw3%MIn_<)xKPB@c%;xCw;|9?Bzh({M<#fUznmMiQTb6T1+E
z%IMRs3?tAUHPxe4F6yw^`|tOoc!RELG`%F}h;`&)?z~|5n}=l7Iv%oa@=yT;Xb{-f
zzQSIBHJ}RpadK;jI=U`>4q!v<5d}sD!S#Dt*0s7!-*S)jI`s6KIDDVKTx^Yg<giF#
zs_5V*iTNbDx;^Ec`X)fTS?EK>=;$E_ln8)*q;T?6VXROyY;?gusmlFw@8a*nOfw}S
zvYr$O@k`FEkNb)m5@hPN)+k69)nsr)o^i{ZsNxpWI;hmTA*mtLW<o{J)h7AL!jCEJ
zf+95<!kWt!l3iNrY$nGEOk?P@7MCwenLc2<Yik_fxr_CHUN}dfNpF5#eq3Fsi8kud
znv3Zz(`e_LQid=}kv`Sl+N-^yBtMAedsD)ropw58!bS;Fvtc)!fy|LB*ofI4HI>Oq
zSgnU9j@vJ)=rIJAsd(GQMi)7I^Y)`J`^IW@g_*O0zOyBjrHkDC^3}f}(NI$Kp)JZW
z!50!w5Sx&wf|`67wsUqa{;p!0<@$}WjIUxsAy-o1L6etcweTewRi6w{%?vQqrY?8A
zP<CPQ>I!vyrjlzg!pD+U)+e0D(M#%jVBrO!W{di<^y?bLYFuyEWcx$oWDBUq#xPxt
z7fOUS?1PX6vhPCBoVOHuM~SUVP7<4+km`}oUv>8jeD?SoO{dPhz1Xn0w3)~Pg09YR
zzKFmuET*>xj|I$+BVDf*&#;T<`)t~%b<=-6mEM$Qi<BRG_>z**<EC0-VOsCdK>GZ0
zb})IZS@W+`Kq_aM6y6~Xu+g)9$}|I9(X=)K<~(;DQWq}ND0sa#QaQZSL=$_m&M(J_
zillSq{=i$^DPl}dLBeW(cH3yL&PKp-Vr46RABO|VcWx!8GTA@KtYu@m;_>|bYSxhi
zJ(Lo=fP^J*3u6mVbiKi}X^!hy_e_hr>57ufFU>Wt)}l67hWJ8K``$`v4l_TwzSc2b
zSZ`idrWCN4sc#`Z@HL-Ujd&Mwxy=eq4w=Rod9NX``r@(|@Os<TWwuJBea>*TsbrLv
znlVqe785vH&=uo?mHUXJ!1_qMs8_uwW@<VG_q;n+q}3{3y!`1=tP&|)?wE%0Veg1p
z>+(uePl9Zv9Z>o?j=pS3>VlI&uvR9B*G3tKW>bFG`9%f`B%Vo&TwVO3-X(YN#>nE~
z$De}29$(_!Dw6VWG$_wUsLXw0w8T11e*sNjQX+D~E^6-4%Z8u&4dxs{U{P|Ol*IIH
z1Ot@xJ<b><(1<$iMZc3>@}JxPF|T7U_{NV9o6_m9pDfeofBfiPUCtZ$r$*%GHw>m~
z>?lXzF0w$?#U7T80_GCff<o&+L!GQ?v3iQ*hERKF%#VljZ`>AE`za6H4?C~Vs`!Ih
zVZgwV-35RyfU-9cL8_wdiA>_QoKKKPd#*Vj<Mj-eL0@vs#7lH8Nyq01o#{{m3%=oe
z<E**vBBP@?gvk{ogcHY7tt-RgGGSZ<C09%bFFLvYmjAoU*;MKuoc#@>{9fGnTFEEn
z-TpQ=UVS9Ywj2rSsx(6m^djPH6+5P`w>6*%MXsTqj4od=YnOZ-(o`4%NUog13xq!A
zi9ByV-#(==(Lo`iRw(>HBsddn!TN4CwyF{u;#X?j#UOe!IW+Nm_s-es#!)eM?rpd-
z)(GXZPS-WcwhO>A^$85?$Pd+(i>~ww%bI=y<t~}wvNNcbmR?5PQ?}t8>d!Y4SE_`H
zcBN<V?pQbOZy>rr3BdT{6!or6eHHO4OlIu(@UrWC+j+6sHRr_hhOErz2N_pbLago?
zOBV3Iv*Lf;&qC~m^}#%vIi{Nn=tI<Q%@#DZp)buIX+Y5py;jY4V>d-!4HK!kzo$MX
zGFxCYP2w`bZ|_f27{wl)CaICQs!3uEQ7fHST{1AU9b||xiIVh`dC{ajU&^hwXPGcm
zDy?$d7T9S8_}ujaLz1mL&Ph3;x*^}(=RIZ;(lX^G@7%`|N_jqe(A9^MYT8%J=*wa_
zKGdr{!S16tdh=A#0x!^pNVC}JNCFSmmgBXn)zD)W`B~FhqNs-E*qT;p^QiBAx{Jh*
zOM+oh><arFo|VI8(5C>PdH|qvwO5KJk8RWfyYjv8zpSnAnRC*bJux<4_ZZI)D2$Ft
zOX9=TUkvO`A3#r0f~(8rsI*7ew9qn_t1AQRFh&SAf|#M^Enk~kOn!i=^>gjZwJMs(
zmHCbZ+ZmHCTM5+k%&Ripn;Guq+qJKk<g96`wJ{b9!qY&vkg!DM(M7%;EOg$Rlarf^
zi<~RB;MPQmBAMTzWRPJAsM-6iw<5L+&9b)oumAd)r<GxGN&T%>$=m(gIz}(jrhfS&
zF4uE-&IkWm|A~2{|0DqU<bCLCJBo}aftlPMxP9Szsl;Zrbl{WS-Ub(jm^FpS*|&1D
zpz@L8MX1Am1Vfui2#jgbR%v?DI7)Fk{r%mS+SNm=Iq@5&ux?-zg!d%H!Wi4ULqBQ+
zxv%53X3m#|rqlO(uWStq1o9tTqCbS(G16ZXI1Fom1mGl}ivjg+1r^P|#orJ#2Ii-7
zD}VR-sr-}A51xe%$9gU?h!RBC76QuCUZx)%mjPOc2k3m)8(=Qwwcq<Q<9B-CslDHn
zk*4&We?SKC-=z-!hu+6uJCw#hw=mA{Z&i{4a#>G&hz205qy*%$e||Xk-uO4i(^BiA
zbX$i`@=yIqGx)Fli&^vUSi|uDZmrwjcw!^4A$TB!Fu);!S8!oEcSe;HbYYQzFodQQ
z$TYfTwh6&8`w;X)fLj$taSpcK`T#y@_zH5QhXegV^w=E?`1Vdjv~U5!R`q|^`RVUY
zP?v0{CTAz|?+a!TZ5|(<39)b?)+|7TM3+Oui6$}Sq!<MdhT72c%x!u_?*3~{vwo4B
z+@LjpME6MPR#wWJx5;yio}Mn+<SKq2pvzOlfBe^E<^D4j8I?bO4Zw9AFYX6YdJ^Mw
zKmU5*k@^M0DYo=-<FTG65KI5z2Tyq-E4}T$CJ_vLZk7OZ24Mxjabh?swB_>p8hIB-
z+5O<KQjg;L!s<cucSBbPmBd{6-0ae>;Wxo>1eKaNR2e&oRQ6=Y$<3_J)U}37KXuci
zN=f5HbE+8A?yPdJN6>_5$EwHyQVrHEg=H4BZLVkeHv1bbj%>Y;Gv?0{TFQ$^t`OQz
z;QfcMK`2AR*I;553m<e5h8oRy?zZtxyiG=B&VtvZ^~<YG0p*xq6C8=!z=(tBpvTUE
zhWq1$G}u}QJ`9S;N11@AmUez3;rp6}`lx~ahMPQ18G#OZ<Zu0cG2C{j7fj?|%*Ayo
z!YA<(k0wsO-C8jlm$*K4Q+!NCf%)j>XCUQ?uRx^pJks`?$aq$pMal57k*3SP%5eUx
zFBlM*W@mhG=Tm$tkTSc8Pz5`IvBKnCbdeMK$oxyge-Kg8^^>)|-K%S)bLH$!f0VVY
zArRnYe4X!S1WTpnz3Pazx1gZFv|eB&81^b;4aVdm>3qu|mn~y+XGb%RH95fXDbX=^
z>5Fe+Do_)AB|$dqdxu0jJ`b>4RlOBhS89`$AG@*QqS@+Tk#R@HmDDGiZ#1n?B#({2
zf`9V|k*mZ)xlJ|tbl<gnF_5m4eo(L$^`qvDg^q2DVXiTVoHM9Vd2{cY^QEvMoCsDE
z@D<LC+=o)31~eiyCd9NCW1i_kIEQ2_Gcz=n7Sk3KiRd`4B|SfVQpjtf7JyF%+$p10
zYLcRQws%^7Ciq?HoT?v6V^H(DFTdln(RpQy_NPj-;sb<}k!H}Q`tM8kU=dc1b8-1w
zk7fP0ak^&|cKkPoFkE%45K#O7&#$whwfraoc;{ABSs6B9ttt(5+T)Mz6*J#L+l*L8
zP&8*9L4=7@(zT|PGg&E@d`k`A&9%a8%>;}aHMWD%67dkWeFD9U8tLbziF^|P)%@9O
zY75fReF=FM(myZ2Q|?K%6gXPx1+D@2gBgIZ4}qo*xwhIVs0NbD<JaBR*3Y*a8etCe
z#%sJ4gTuTH0pepJLac(3_&V6e6D%<x9pwYI*s*WE6FruVwfQFnZ+0$n#?`WB2?7gP
z*qY-j0=wb+okoeZw^hZjO(GW6vfVJ%bsv;tU(+P+B-9Z-_fLG8vR!|)lF%U>*rSOr
z595RiPag0Y(qdohPKzHtl@e7Rc8edUzs~VS*fu==SI#WeN9-?5I68ly3eB-P+q2|&
zf2=N-c2cV;$+)fl;vt4)hdJmj^ZMhapzO99vIMhmJ+F>W^M;xwfNcid{K6rguJI!r
z&}ch?V-QT_y`wQ;Vl^38d4k}q3TFmOLsolxg+I)-4v`dcGr5=PYH#1e$MzdP6)zp<
z*Ese`utF_S3bB4G?2Fny0i}VN4<{c>^;VNLGzk9?xJsSAsAVYFEXO37ny`dvT(36z
zl$%rr&?E>H01UED-tQR&V>VvsmD(l<jI~*nZ=K5>eqZ&Apkita<O_^@>DBj+Eqk-a
zQHi9<h6<C7p6zoNME>kXT(TJ`gqlow3O1m<oSx<$>(o7cI&~wERa(?qhtVpEEX=)m
zuu`o^x!&&s!P1uS^1{!4bX-I@rAV8dPbv8$QLWugZJ+SVhM!3g3V=D#0Hy(7P)A3<
zqsE2)@NTu$kZw0PH(nv>Lk~<9m#p(ak)8QU21@ItaN22V^-b^2ijT;}$@B@mW|OjK
z=}8loqQ4@M+KC9)m3a-MLupK#9NY<D{vG<7$mjdWHWWQ*n)c(6f0R6Yt)I8%<!a=Q
zv&vR~lB&g!(@t<aw;Xo+LUAGRLOZd0a)4oBd4bhqE%zqhw3yAhx^#I^q2P5kjFbrL
zi(fSLCDx4ZbLYqB&M2AE)Cvj_vT9N|b$lWC8LKZihHwkpSRB~V2j91wR*{{5^dYBz
zWMj|2#`jrE%DTu8^BnKyH=m?IzW5>lgF0151Nk0gP)@7BHuv?$3{6Cfku1AILgTla
zheXuopQ*2TEgUuSAH$5&r?Z?_Ao{0wFvC{)HnTA}V(h3VdZjZJyE%C;l$#&XT5WFo
zL<UsT(fuRhJx7uoXYdTD8%!QH!OeoEMxg2mJlo5%WA@caPWs}?qJxJ=RQpaOT2DB`
z?`1q`{9#Pt84a^Hq$Z5pN@69v?NG!0!2vx>%#-HRakd<nrVrsBmXGh%$-IiX@&H=!
zBIWx=I;p$0WVKh?rlpmQdnFEzrI-$|3;-KJ9oJ%@Oh|pRzD)MegL3Y4k5d}w(%T-M
z0#{^eSYL4DU@pB_OqO;+nZ+W}@W^9BR;zhyXY{VW>1nyUZ<KP(U39EU^TT7m5n9E;
z)s#pGXP`3X75X#|{1nh^;2t5IDxg6~Wiiuv^m?LTjLR+O$DrZig2h(0D3SxQx=RbD
zVK;Ez*mNXp4vYjADv7y=ipiP~PI6X2XWTCLgg^GDwYw;5ay1*(5oxj_6WBPjYX(E-
z)_Bx>D#moSfQIye>&-g1+vL!Yep}@UKQJ9+cgB>xF<MM^u&AN7uI){kqzl7-Q+kS5
zL9QK}a$NEiwMIZD`~*GndL2RqrNQdm$je_tF!y)|2~@)B_v^|zOukmqv!;|h9(=dT
z-J247{wCYHQh5cnQeZph>Kp2*2~}M+j6pD~aX|xs5Mfv>LAnO}`a|Ga_fDvUY1{~x
z+9uVvTQXu_88lqjc@wzVbRXYd=f@v5xw21Lv$wW?c{vpU)bD@tYW;6f%Zp?bl{wFA
z=42&S`Z1*!o3F?oRQI>W7-c*q8seeOD`h11k1KZwbttp!3oW(Q_H&n9;;dHm_1>u{
zj!=vGv@^f_`n{FdX6r|*?U{o$xzx>vuc(rB9*o=`Ay(WCcOo^8i<sY>$ISm`sDuQK
z+b8OLm%b(WrRMV>Ph_(8rH99Wra4F<qC+l_Wnftq4eNo?87jMmk*BhrFs3r!z3jdK
zZQNS_p|m|8uKFxaGzC4I1O)+Mv@BsV=;bozq{8oezp9>%OiN1wAyhZ}-i8xh$|BnR
zSb^s=os`214BL!*6tz#*v+^}JG{r^^$jS%>ZfJ2bIsBfKS^nLD#a4g)Fu|4#>$*-y
zhUMs!F=Q@VUwG#(Tg@rPEBpCv`pbB=)vtrZYNZqH&;IV$2+tzO5T+57YL~IE4Nw7b
z+!rS3%cZ2&eWlK(AC5NemU6r!6(95JwLhXy^0*s*ZXCDX0Sa8#!KSUY^9@*1E6AUR
z)e_~g9$uF}r7TW~PH5p)?Ba>Ps>74l|Mx=xxcT48(NB$yGr(lE@$Zw>2|dx5ceyLR
z-X`5~BAI5=GnlH|w;fT{)@_NyOjz$_kz4S&7nk)2^Q=bD(iz1iJ)AzyWtduVHT!IC
zMZ!4j&D$HE7wMBM9pZDFUG^dKto&@lI6#O`TL7{$pKEocj7L36k5a$H8KKBq`ko|C
zbPy+sZtNsTVYAorUj{2L0>io0<JiLqorYfK$0d}ysT|i|U=D9{H){gkEV~08bXZ5^
zYXlqi@ZoFiaX0^<Ss6!{cdWeOjLc8J-X<c^C-R#zrF(@ld>=U3(-QDnMJ$xNq}XNb
zMqg#`-Mv)d%j#@h<kgo*Xw``#M1#XAMc=feS6&=1l)3qV=$&U8C39;u#TJFOLfI_3
z$ah2`dNap1&#!qGx-g%=)F@y0k_tDzRlxU=4<$=n_J0+dL7*;pLLNEx5V@q$$4wFv
zT{9_1;X6c_WJ%riq4pkJrOpYjf%B~9%vL@4x64+hVeDvs7%Mgi`T7qcHtYyq_8ZH6
z&5-O&Q-=hL2$DOO?J^!Kl2;P>G5+tI;6Jw<qIuJ8Hb{MvA8sd0F2e>*z6>`56r{R;
zQ1D2aJ@!u43btpLMX&^#SifD^%9_q75ZcK&mAbb?u@!H48^(8=i{^jVXyd<u$;Lk*
z4F0z)5B_g>J`5H%z0z<3w7f-tJ$*1;9FE326m`*SdI49_psui_iWDu<U{sc`y%Sec
zlX+P)ZG)&_kr-&lP!^+Sa)(raufjSo*ZZ)42cfUV1RIh-!SF8tAQUfn4_5&wv6``^
zfO!Z$Nrh%@g~CO0XI{Q?tzB(xW@jm~@1|j?%Oz<bq9ur&CtRHgGXRv8PNUI~TEV?(
zWPS|jsyJek<_XiQmv=)y5Pt4`G-Z8A@c8*da|1)hG<Logch-n8z|O6W&R<*okb>6h
zSGxuyU)o8MNOv0<$z<Nq&h%qGF{9T@YxLTn>8wc?+!nF_M?_ZiCWe-NvZwE9Jq-Xx
zdZZU@hYdtXayhgr@v;Pr$wcFUid`Cv=bT=V_x|>~P2VQS85q=ipF83Reg+07e8SxC
z&&TsC`E#<8oKEmG08jkCB-Qe-^nDe+_2|8ElnEwoygwZc>5)IL_*|eDGJToj;&BlE
z+t<Jptso&?EpU*2n58TDGiV=jAq80sOv8v{)6^=9Go=yHCrW&=8~tI%HA&S5z25B^
zm)XujT~8_Foa+vv-eH5@0|VUq9Xx=z?kNhkABxI0Ms>WGj5_aTNpKh-o0;DTA6{%T
zX##h*lZDQrCNaZ6p7l+BU8DqPyF5T#${8kz37t7Sh^cz%wgIbd3g$GMUFJcpL_>jY
zxd2<PS3l!B2RKn#*>-XD(h-iIUpB$UVI|faaSXc{4Q%|;l11S2^{&~Pjdyj*8IAWb
zeQkxVex&1emiwh|A10n>d<cD?m|8f*L9R^{L90$)a5)!}k_x=%n;o(^1^;|Z5dOWN
zD#H!_lHAVbI`zFHO9P{>18UauX&)4vKt+tgov~}jA4Ax!2FCAa7&8x)FE3aPpSAl9
z$Rs^9lLp&v!3VFQH3*a^$d#oX2!*W81322fF&m7fC=&uE_C1=wu^!DvVJV7JRc{_t
z1Gy`FmN)y~o~2w6Ylhv1^=QGm9>KO=Y(uc*et<hWI~Z96Uj2kPzI_bGk+uK^&k5jt
zc6FZ<LL4p}a2Ev@TEr(nHbpvtK+*30L6iXqiT{^BQRhrHI(sg9L`q<nAyAeBDr35P
zfVN2)PA~`L05kx1lMG<X6q1*~R=EJ?%1tN2crgnY0-OpR)@c+g0>@uf6$M0G2Z%E}
zZJewlj>(J=!<`Q3r~)9k>Y@v+WNSRE5CPy_e|xj-9mLz<zrTI2-;yOVuUW8PDUNO2
zjmp<k`02p9PZ|B$*e~`cUz28cM31NgwFy)l>##)|mtet&r<qY_bv<<;OhMw2I*@p#
zRF+AgXuq(6-d7F-VhzmnficRinCxkL`-|Es5xpD5dif9`yr|MMudTPuDd+S6Pbdvb
zg-W*vU2O{}alY1<JM+z})&@PxJa+h&Kmj4bE&$g6`OOYE<(FWLE9x>FO$BNC(m6_<
za`#)#<-}#C)J>zwh-&WO*?OEjaDDHEeX*r*s=^svE?W<&>*6zRm0URBn&D8V7`PBW
zP<^flAM+%^$zp?01A6R&J#ZT5W%lZIDQ8Tzm1xQmuAXdRI-CDQpxNb_Oo)@-JL~|<
z-Vkt6$nn+pMlUq54d5tl%9gHB_A&Piol#V_B50EpU?k|})S!>J)6c~&RKj`?wJUpb
zoC?d-V_GF^{MPdIW&6z?f}{e{5BZ7C@0O=bWu0T<kZ-aB+@iYUgvze|^9|$g<K?V^
zroV|VneA{C=N!lmStCGAx=QE!F9BD8WoAL>oa_K&HwY6QL^C))%<ir?QdlcR0ABQ(
zPSj<=s=HuKcWQ87%5S6USABKs@t3%*p4p9#Y+U?~M%F+NsG2Ssr(dXv_So(1U~fVd
zxpK=O5RKKj53dw7>1>#fuYWb*qxt-H=z%&{wHeqwRKmhW0qK}nAv;`YVt_JCsLO>s
z)DyB@TN7nvi%+f4p^~&F@?K1w<J)f$HLOIq?z`jN3~Nr63}XpA&g^}E5bZajl6vK{
zLcg{+RJ|&%IOU~K`Dkgr@x4aLnoq&O?b@~#f<{*V*(|0V5Gz`Lx7PfyyxqZ-NL3m;
zLy@N(KwCYiH6x0<eJ%0iWQj3P_~>Y%1i_Is=L%CMFsq4U{ocC*iu;C@J_NnBbEA~A
zS+GC?qe22zWm^rub2EdnybLvG_;V>+Rg~R;<>B5q9IAs=s>IUuw(R#E#+<0S_S8CK
z=zPfVCEwK<-ES3ExKYwcYjH4E4^b9@KRkSde|~D5Fr9kwYA*!5ou3V<jgEoZp)^)A
z`z4sME8UGNsAM{hT{B5%(~MsRrvcYA-99!fnD~B{eG&dnYboyAe6_n83se`MS8qs3
znB4dDfn&3KYgE8j_bT6~?wDLImo+Z8u+lu$fBi9uDzzuSwFP0g^(pZ5>x%frxjzAM
zCp0P+@+4)jra$!~kK!c{G6TAgiaZzeDG+8t_qjlA0aO<4-qHNTyL1@*x;}PYFD8#U
z&|Zo$MI-#4<VA&|YWcpUXoj3Sg+@Dq)QqEUGzI*${hGyC$2FIR^QBk)`8NstGo=aQ
zynz<T)Qva)^JSZdukz@z!=+(eVG3h7s?rr_TbbsL=^M4NMXB@qtFFZ|ti;5pZ6xaS
zwtDN%BcdnLaa-rVHs4gX8XoX{-%39>$5~>hk)zRJBKYhR7#)-XD_03{UNsWb)*nra
z*F`7pP#Eg~E1|W`{J^6WnlS8<l>3d3j9M4qRA-|OlW=V4XyoC()M~<IPbSMiV~%zO
zi%GZ28P|2Y=T*?}>et^E*mbX#^*@ha>)0)<+@#&rms?ss9ZQd1v0Zub>l6U{=P3u)
zMml`+=>kQ2&qH6z@83MnD=!U=7!giyz-P-S2HhZ6e(BN`IMl@-1lwbI@4C#1y^2x0
zKI!4x`cAIh_)**r%Zqu1>>plL#3D;c8W5*(>!Ye>hb!fRp1_~qbb5b~?f3)HBS8`P
zN#c9!dPcN(LU0z)g$8EnS(!E_8{X1q2y$l(2~dO~MQB6Jo}jh62?Fi-_zoJZ|ENw|
z{-b&|YKuy~#*_m-No~3;ZXSiQB=Rur;m91btv;sKfesH>zh=s)41^wtH*C>TpmvRJ
z?w9do9S2pyz1<5v-L#>G*v+)3kUL)&9$W*G5Z7M}!6opOVNB%M_Epcs^Q3UP-h67O
z28G~fh?0ec%IP($+zBf~YT6wEsUg19rZX*Ado%dXR>hVbCxfm#tbfp_umrYihdd0Q
zB#dzLVZ$@QRBE)#SWn~e{ja4KO=BkdQ8UgdMIjKWFT+3G98D=-Nu?lC@|&CrtKnvO
zW_S<l>kf~Bor?k3<A9dypT0={oq^uazr$TbX!iU;bXo27kqQOQ4G6lAtNzdUe8K@z
zGoVU7lSRbjpKQDTK~$1#Hp_Gb-d_P67BbP-2<>8s;~Gj=FGrw&?NT|}MbHEW0Hna$
z%|^j@eybNTeyZ~NU+<o4)w8y|n4)itdiw42{?{DpAKAdIV6`L*Q?l-ErjJ!E))kt>
zNfo$oOU}G>EATHpzY*n=v!9bZoYcyj5*cx;-N4E)5^@#%44!1H!hB0R7N=^rihmv;
zeJ9-7Qd=x1^-FUS<!Y)uDHZF`d_mzaI`(;qUc<RzOcK8IaDUxd#V3e4zsQ=8*`&~!
zCE2pqj{5z_44qimp>$-OUAEPt_Lg8bf_)oA3$_Q5Vt+v_Ab{MTso2)~3Zu#gAz$pf
z24NA3&Q*J}&rmt+ivrw3lz0M~sz+BP*tVH`UTo&aqZ{ANKQ4ayN~}1#EO%sz_=8Aj
z=`~@fGM|vO4mZTVMr_Q4alio5*0Fq<lcOD~b^Y)~hd34ig!rL~17bholsnk{KX^Pf
zUw4kL?YYP>d%D}Qbap*(fD?UW_S@}RFzo^;Zf2MBaX%^`-wg={q8Ombs4jyYw?3+b
z19sqsf*7E5fwzOth7A^C(2#3{Ic|2Kl=?QIV#b}m_HaxVOuOhjd&}u_vaMdQ<J+@{
z*{?lXOmE5?uAH@(@55ukOauzR<=DOj+ltofVQE~x<QS)0w|{3)Y^Mc6rTD#bL_S}E
z`ij>*&SOP25Kx_cF|31KFaVlejjVPsfszYY5<%;Li1ih0dtxGbjIJyUG=v;#BS$v3
zak*o}h>sWKC*LnLeyNo#cp<;B?iQjY?VVUZYxV`x0Y)eF<j)NtI~rG$NS6c8&qF1&
z8$+msGbngtD-_-h-})?45%<!rJn7y%Rt}xH=GYq=e5!*U?G80T;_K_H^F&)0-sCCe
z+=9Q6=S6;Ah%)41&!$G6Ko*KtwGuvJ)9$oq8oN98WL3^2;PYwHtZkN;EMzDM!<_dw
z`d{d=qzzl!<)E!#R{#|Y_!XE%#SJJRpVZB$2(C@iXK_jNGDDs;H))#sA4Rmq-77p4
zF_+S4R;RoO^Tx-tf!{9?#z1xXF$Z-E&h_r#euZi)TNR<%tJ-o)Ki{T&(K<GpS0Lk&
zzZ?wAQC8nGqqY!N)kv^Edv+PQ#TwBn@)^H`g;g5%`R_k{D#^`*y`<Fn>k6mxW<>+j
zH^T)5f`Q#{gZAo(KZpP}vGX*>vF|V;pGJ)=&^{2p_Uv<(O-|L*7bLPkW;n5@xKJ{)
z-HsPiaIMg0hYY?Jwy_CzNV;H^0Jxz3xeyDeKXw2}mPbVGK{e1~VRQVMPO_D1SD1GQ
zJ11(Zb+2OZYX=u+51NTS-z2sPM~NC@g$%o1p)cnaVT;t!;FlG0R#6QN(<ix7?y_@i
zRvw2kBaRfQ6i*``|G>nr*Wv5Jq_+_i9h8AlUv@@~vrhYNnU<B#S)1P(<*Mw>=wr~`
zG*(Gwzx}8sl~yW#7N~U^@{<rw60xw_ftac`BUF5#U{?ziBa_{(VH)SIUVLGBC%z#$
z{^R0;h5cUFRI=#qN^Ijb+psS7%B3F93>dN2!FSS|GnBfW>y^sHo3LufeO2TrJysNX
z*tJk8)2qOJJ+*H=&A6$?r?j<;Gp&}R@wtx3EA@rp$04&{*je^fQQKfoIwq+76jXVQ
zMnm6_t|a|ci*v>KtNKo>hccnLIkMh$VNTRKipk!;%(e~p0Q&@iTGRreOBWz1*F#TN
zL1((Bz4;dmK#Mqb{N>hXwKi-L`T`l<4JQ7MyMs+A%e`heRixDrVLo@TnW}^9qSWBt
z6esS1QRcUG`q<JVJr@>M3H58Ds#JaK;20>)<x23}F<zsWJz(ypNAaLdp+mrx0B$|x
z!^gG4irh+QW&L_=5m0IKpnxp6C-fZvUF)Tasz8amomcC=eG5$PYb$HA!J$xE;(d|g
zt4imYm?6M_7U~QLZ|>BhIgMJJv-D@m+zu{pRqgRQBkk=XD!#T#)84C<xE>=iNC!l(
zp!s?bSK3Gck4*IA+`3!_*;H@02wj~ZW~q8TAwi$SwKggve0>MK=9&#lH#cX^4D?aC
zE1$j?bIP+ulHrR79^p2F6UE+=mso6itAaV@)tu0RadN-Ixw=F~TB}r2mvd?7=kiRc
z4;q3atoUkaOHw7DhuP(4i)EdsVR%ty(NLxfsX%5h-TZ6+nTn^=o*+`_DFCwq+6(F1
z^RM&nh`?QLUPdIqTp^JlTHKR01X<vyQKnu8Dk`70!1t)XP&i{%IF7>(;al5)eo3|C
z6Rz_^jf=1dCR5|V?s!J~RFxscfS}yl9oOq^RK*J)EprPvvxaZ~dY^n<gkr!2<w+gr
za|h@eS3h>hHP7rlZBNrgRo0)pOV<cuxz}*D;v*e#_q|o_xxt^8jDTh;K2W*m{AYq1
z>5gT|Cu>>44Si`}2jjGP8$FLv&Nh$7@>h!#IaU&#=ZJTL;W$ReRpzuc4jMIAo9FuE
z51LcQRqiyfPTWt?`_ZgxsToACJ@SoRiqecI^$s5SuQQG9uc{C7MTl;|7b1IZaTi_O
zz#bapO~^G)-h9$jfA^6Q>AW6?=8tf-2U3lsAYsw5tV3YrY86`rIKM?Pj4Zr^Dt~yd
zNctmjiC&vKj;Jhz`gHy*8UysrX%^TtfZ+kKK*nY|^5Ab_?w~Wd{8%uC5{9R7vWip#
zyr?3cyP9z`OseRkKxS{-vV4PKvA!8j(@?yORehZM5U-R4vx37V)N|~fu^NOxX$Ci3
zH?M<4+`2J1l3D7KCpq06CB}Hmy)O0{>qnlOHL`IUcFEC7d@?<5E0lcrbg)x9cr<R*
z2!)AWUAg+UQ?fpzz=6Y3Wig_haidk9*8ly;o${k|Tvl|i=ga(hmVHo&H-EA#FOb(3
z6whM=y61OiT;6;yPg}XuO{R!Jsk5Vz9y8lX$K;$xhEG)CT6cHSnGc9=<D)Hl&ii13
z)m=<wTV9xldA^h=C2i5H4&WrFAXi>@5RVi1u=fpG3>rCxh?><o-}5K1Ef%~bZKm^q
z2muao33IT$tYz4C)xCY#6FQ(siiVwR`v65!+Eks)G?X4a+y%L+#)ln7`iE!2<_Tnh
zCJ%z1J`Arro9A`^k<DT7ys+aoJ|A$5Apm>e`SB#|*jhGh7<3c_u7l%4xBnm-u>=DY
z%QG;1o{G>e3%JhEC|^O3#=shA6Ug^7k^0YP4FDqzz7F<@dJfSgHDzXo#gToLr-A#p
zuFbwC0j}zJd+JDtd*Hx2c^4uEQfttT7F`2H*Ha}C*D!WW`cWB*qhFd)uoKq5lIdi+
z7I|KJEXlLh#y7BXly(@kg1G?*Itq6D_92ZhTo$C)@cg&|aFQJq+9U74iM68g3Q&{X
zS()K`h}t%VkP-`P=Z1w#&p4C{bg$oe|M?Ohk(QzAsO7=#hUcX~pxvbVgUCMrRTi**
za2x^al^O*46}n#E3#C+wyPi+Dw*wR(-$^DWa?HO!s|S!UwfiJumIYhJRXkCzAEIdD
znLWKSPaBO!Icq9X-iM3uCV#COp$)!%QucW?iogWKq?)12F`f7VF*TvUFtK1U5zM!J
zQ;Qo(U!0+x8?3n$gGRHeLij4RD_Cue4r(OnLY?sKNN+VX;QmzQzK8qM<~%EXEsCD7
z1M_DoJ?_F^;)RWS6FMb(QB3fCdY_BOP#926g|(sfOS+fm0k{^{eMkJBG#9}v4P)<<
z9N@9^BK~PdOd7@*JBfxv#{59@3j^%_&F`@uel3x=$M&gBGZk3Rvetqa6dVQy&7J_7
z|9(A>)*ZfW^^3cKSD+3PYGnFMd))l9vg4oT*`C~ONL?%%dX<vH;>8DjX-ZV?bJU=m
z6bEK#1CZv<TLZmGY(xB(YO(CPO0f!q_fv1`#1w~HEKO_au&Ja#IKo!cIA@`%MOZ;h
zCdww>ENiwq!Bn*Mjl<c8Ne?d5hy3<$Xs?3ayfFiamGXNM3)igcv4;{}06CksRlyX@
zaKd5Zfv+s?&HpaHbwfIbq?OI4*w|3wYQKvy>G1D522{718;UZyi<*74=eG|sLO%>P
zt#=I`%QiP1LE5Jql_qM$#z3;J5B?O45Is1I1S+JLVTyn@GzG}_v7NA=d~C6iX7(8{
zX=7`6r6s<Q-61v6@dQ{L#hRSpTCN<)_lb%pyJU}y!ryBE#sR)02wkTTV(h>LbJzrk
zM(q-g1+7{7`dKq##t*`_^Ub+<+Y1jj%H^x6f4WQL&<%YRP)*g^K}vs$*gLtf#R=kz
z4i1kE$?=W%XOElWPj5Xdq3rXj7QN}t`*!)Rz1ic^&HusPdxtgEb^D?qO`7yxLetPu
zsuT$pKtx24-h%XAqy-6y^bP_7f&wZaN|la)^j-y|g^~mmq$h|mn816Ndq4Nw{k?mi
zd%x$ocRy#JbI)JES}R#IE0Zzj7~@x%=mnm+SWbX&h5#zT!vwB@|CN$8C|YettRM^;
zekz&twi0R1z1k9TUf9y-Lg|l}O#0#eu2?it+Tq1%K1E!IKGr>=M3_}8S_%=yKzpRy
z#_|FzB8a0=YXy0dJx*V1^TKIn_xw5{-`!RS@zknh)|kBXv9lvvgpEmR(dFE@`J)?K
zL6X|~xGe&fXt>eEy}d>|-#8J}-8SE7=J|n`+;3ft&+NS=5)#hY82jWEhI{lqm=6b~
zUGBq!&KNv5lHg8V9w49sb}z;IimZsc;oZsPl?#3nfpM7Oa&?MgDDZd_Va^6~fHM<_
z6_)!g4p;}R_iB%KKX4i{ZSF`W8k}E=>)VhV4fEYi<#N<cA<p58dw_Z+oKdY`L9lwv
zGr*YnqwVef`n<yC8m@d=%`7X+DI^=jA23h^v4m)zYUx%HbD1w~?d2_?t|b-?q8!Z>
zKaFSU3UJ9%F4|^CWjnBu+;>sM$w*AGFc)6FNifrkze-!Et<~D9vB>9h<wrM(ECY#e
zo&JZ8UoFvSMlu*X28~|h$ED>KbJb+1ruZ}3-q0$Q(xRFmJs0`tCfj`=YsisZAWL@>
zx4X^~hma<M4Tv$A%C6?`ZC~`PB&5wdEhN8>hFj`e=m|Y%v#&;2BYTwL236bWdPqvh
zHORee+xE{Yg(g*dqLKCXTA`{7^yidZWDLY}=y<8Ogo2L2F<l;qX`P~Yi|p6lXo{u@
zs(WALouVUecrTWv&q+AGwbV^yuYCEEZGf5S=Tv+#2+oW*9z-Y-k*|lsooiv-Uv0g(
zTc%~@4Z^NVTn+mnc2S$S!IV%U??<*Ku41NQvS3cL{kuUSksHjWLgGe^^|!LZYc64g
zYQCSNm9ARCwBaY<2^+S?M3dr{p*St%xoces2&L`AH(nQ@tZ6nYB2V^7ufHz&C0?MH
zlQK1U_+a-6P&YaNzU^qwf(I>yuizJ^Csyh@A~t+&AKbU}PNy`3*|~hxqN9q5?$s_M
zqDi;NQl~}7w?i_LG-{Vh+gAy;k~|X~Z73aoX%T(KRzElCk_8E>ZxDKj?_q2)S4SYI
z&(lXGgASIp&6L|*a9UyAH2PB6NvytA{(G48OZUR|R`6E{AF+I(r5JY~&E@sm|A~UO
z_!=29<MVpv`1_$;wdshTNW4Y+_!9L&tM4n<sDS?H3c8ylJ-f4+E+GIrPA{K_1DoLv
zTChTo7A)(#eEp_pl<3D~Bw$XwahlKKO<%p=soLYwJ_GjKV?7{7t%sN?8rZv)i-+m1
zZONlnfYk=iV!yA_%I$+98|l#t0WiARA}XxHzB7g;O<VO#ha4tS+&cTthvuSf6;g_B
z%Z$9~{H<gYrb+B#d}aUx)vqK9rs7;cspgQuQ&@wzN>YO<LD^OFs&`O@g(zLoq_wqh
z<>O4MS`m$`()56P=W78occQ{wJg9FUik+)buGTc0IZ!SoDV`r_RO<fBBE{ih!e9JM
zinp7J^T{8xW(<%tocHrrff9AeSJu&-9w^yb=J|%wZLuvQJ)Mbcg_l*7=CbtHb57b^
zGTew!8;8-I>ZdQ8j1$iknt+`};5*M=0xXrK6UNpjFNT!st4d~g`QCyuikG$%L=3}&
zHo$v)SPyM}xC@T6NQ@X!Za3<yW&FzbArpss5aGFM#`AWLd<WGnijgPY+ej2-aE*<a
zMq($94X#mjqywI@bHS=vGgk3)i}li+fM?Nk{WBL4E>~$CvBD?6L4ZCfrW%U1j8{24
zXM=PhT12X@*=y5tJv@>ne{i<AKhERj47u`4<!G%I$XDoz3Kz-(eKGqS{hg|_RYLQJ
z52k~g3r1;@u4krZb=<w-;u5#qvPqrR-jT9yoYaJ7K=#N7r`_ypF!T-y#ICYWY{lky
zVA^ueza62#OZZF|6(fPF-!inpJz5KbBVjC~;n%RkV#zFfm4-<UBCSR1>s(pZ`8k&1
zp%Mq;5>8Kr!unW}J1GIJ)2mv(xDBfm>lwGu#Ex*wWEYCY2YTIFk0c%Mw9<K6@L8C`
zE&Gd`-MbIzcRCe5H?{A!HYw2VaND>LD*arF4t7Xk&vFCb%c-iW?op;j)F47m3-_Ig
z;UqZRpJ<MC#KYW3#w`mDD@x%Ozh+YmtfVnw2WpRe8K=+H;xPK8E8qErUz7aLKDYh+
z8Oy_;o*m@X9LBC!E!W!0F5CHl?U{T9S7MKNdhE30wOLHdK47B05thNycTITK3dr}8
z_%3=>{d{4yzvj#ko+TFk;I3K)t#5VXzG;Nz;@rt=lA|X9h1~ZhT_I*oheFX_Ef%Jo
zl&jQAHaQOZr&<Ol_z6Vbj5FlYUld+wTA1@XXnz;y-BYCW!N^$m%W-(t=F(D*i@91l
zrKEX0-*pT?Dgu~3U5<ZIEbST+k+rZXEa62U#-EKCX8f_M{!8%g@++s@8P$A4@ez?h
zOX^oFF+Ib)_&<aq1N`7e(j&OYOsBwZCegpYg<rXM?0zow$J>G`OBdf{T3abbcf21A
zN=?RU%XxwtHQ+#pwOavIyTP=9{gol|9O<!bky`*)W7oUxt#1D?QHIAVe$$#W{N0s!
zxK&0|mpcT-e&oM6WkB4<KZ;0M3VvbRhYS=H84Jh_Gqo*w^OY`C_x_N$LhDP*xgtt@
zO(tOGXamkrrpZxWeLN#G7N`;XOjq&1eut4FAe>L-2i-mx<9`9Je45nZjDehAj?BJ2
ztt{^A&!_pqtL<BUSJG;do8O+np+hzaj2vko1Ch240Tf+tRT~}vOVH3pUUa7%EM;(w
z2Ul-jefT5<JktbfyjAzQ)j%Tkd1C0wi#I;(l!I4mFV7==AyGx3c0A%mCEC6RiCMK^
z=8w2ek+r#Vge_$A71eWp=B6frab&Th0&oW(7Rb3sRK(d3;Ljw}S))(5F|C|3wG1Jo
zF39h#zqmgea>>whmCAr&#cuK<Tiv&?*!zn+v>JfmcQR3&6aW_{VmI80{%=t`oq~Av
zA9qzX2I_kx*10?dMUxG5LedQD)gL?wY-L)gBAcDQ(iS4~;}v4zN*nD-vZeT!c?RCW
zYo<0jva!C(c}j+2=QD3rgavg&G6ss98cnv*%^7>E)jCgBRvoUD<O+s6tzXt~{2D%~
zcTZ`#2FQspuYSwttQ)6;8h8>ymR&y0h8VLMdxucJEd5{{UN$7As;QX8Qun6&ab98<
zrW8ccp<Zk2`Ab#f__xY<2K#QH=U)_fd8c&CV4ow|tk)Es-={lGTdA{n4h4Mg^!~V)
zV%btCJPOMVesT3Chdk!wTJw+MJ1512LEK<w1!A<AH8@Gm$<Q96ACUs<cXlyyZnkQc
z?6xlhu=SeG$tP-q4yt(77(oB6ZdRpGaor?BHSX>9+hC_^uBsLj)a6mk#>naCuOUK@
z*}ETa0V6CyQZ~LIwURZ<i7^9h4$n#cP4?h>=!ZTX@|#(nViNu$F7mT;1}9;W3An9w
z{pd4UnBLHkHpQ2Xx!?#K{Nn?WN(E-Y#6_17EyWMi2y+~`+c*)#r_Bi$$4y0(IY{bW
zdHk+TFHKWw_fyV092q>j{YyU9MY;5#N8(-Y@wxV?MgMLjhZa3}Kxt!&!Hx*Ihi6HI
zTFvaaw;8?`w2b&A;;|a#G3RU%6iwv&F6qGx^v9QfVC+CF$4Eg>&kC<!2+^!{O=k9)
z)r)hk+*KnIX$->_SEEeBl%jPm-gJ!m4rS}|2E{`d$#PgID=dB4?1c$FrvU2tTd1&N
zWoLrp#wA}|X^{<Y52}BpB|y`VMgsmCc!2K!B)&V&)&@Uwz02lHlXaC#A#S|6`MqG~
z0Eo{FEz?dB?wtMEd9KSdBO1-~l{kd~ly)%%%e!q`F}}=cgK?qTtZj{M&qsZ~1(;vV
z1j81y&I{bhx^8N1$6`S=B$*SH)@xIS`j)iR*A>pgUZJjGJ}fkj$O)|{HQyQSZo>*t
z$y!Q0(e{Zt{s~XRK?p54;{h}mD8WJea}&!sxPD`@Uf!*_*AGR_Da%xU?3;*DP>SZN
zI;v4imnAGH7x4Z^l%0R13H>jxbg)j2zzcLE8Q~Cx<TJ5df`j19Ls6rN<1q%FYiwbx
z)>E$ctq5F4T4>w@mc@8_8!$1S&*kdH;7=wC8G5!iSibO6km{2GQyF1T5|H#y8$!vP
zJ7{u(K3Qv)00J->xm`r5xvrB7QH!MBf9AR;b-&`caLB*EJcAfsz6SwBZRL=-2bIVp
zAxh^%1{us>6n{Ss!u>E_M4lzv0D-970w}m>oEuu>+5$93IEg$P(5Mhd0)(xxe@@xn
zM72$gMIhs&tnu8%o1-7Tg6`jjNF)0uAV6mNTgn^QQ5V_*RJ-p0hrQrW)IT-_ii9*8
zEq_sne&&YIL3|+5#X?9|A+An}-L_{abg#XkwNcQG4wrGLXFT6)siLP7-zL|BoxeTe
z*4<F~h1@GA$&+uL)@vEPKAV9yJ!*%E1Eu2SK4GD!T=6@o)f@f$Y}=kk@#-fvwwwN|
zjs^td)Gq%nA{g3l#7XoYT9cb4>z6jxjr*!F)0wtC8R78LyE5i7dRx2XOR$8cF%Oxu
zi@+Z(5zUgsHYDi9WgeVnCjE|=g3|p7#dj&IF!wb!T~0O$R<VCMsl!k~OF<DpAw**U
zr2}9+|IEjp<;v)gtOa5xcC+{c0Dt3b0?NGifWGx`@PF{!7(p<TfBLOIb8F+?bOJNz
z-#u%fP|2cvngFPbl+~Uwg!rG`d?g!+)rA9->u<+P{yqtHtp3cou09Cv{^8=-KwFiD
zYBpYbP$qv-T3|V1LR+wi)~!8SG7tX{>T0`UyJkBO<|#czy;Ccg_^<HT1ZuWn@#=Gz
z-uwgMM)I73m*?)b@mr3Gf3@*sOh@BoGk}(jN?7@}9e3eSzd#0w7TiK^_Bt$htNW&M
z1JG9Oz*~ae0$ewv+oN}H`y{KiG>OL)Y0PT-dr|x~{hwio^mpjo|Es_MJ7N#YCU)-q
z5D?K8>mL2nA4Hrv+_O<l<9VSKWvDjZ!^Wk5adpW=KUBoBdr+=je8msdVB6mb#{G&3
z)~w}#)!t9IGgEb=@t5TV@nn&4R&M8Rn&%fHy}i`M3zQ3ZSOlJojy^iC6@7k(8DVku
z1=Ki3);Z1Zc<B0$tb%us3toM^Ml%~CXWC;re_#HgE64Ly6xAh`Tq{n*<-aH{Pptq@
zpb1QE6q+d*z|+nf6seaqH8nOirW%Du1W7YLUGBeg7!(~YLZL3!4vsEJ4W4VR{>3Lf
z(z5uALFVfr1!7)9Er}fT7Osg07V7J`L#u@iMNjB*ZqZ$yQYoa=MbjHb-%_8!AJ*ri
z8`a$L<%Cq6T95kiDvwzsz>Zq?^-;3&D3EJ#@ZvdN#oF1eb8AJ1<~g_edBIA`?A|Y?
z_nWI~y#*Y$T6b*H(lg5Ky-=mw-mb13Q(E<c5`lVUg^L7+S6N<p32@!luom4$JSg7(
z&6mdZ;~#Pdj*j>{`Qnc*V^Y)4??ROw$aby7GMw@HGmYYyrY>5q$Cr|jToW}7esy$8
z)}y2IN8*a(&jmjWviM!s;^{%AoB>+@Bn~(P-<6`ZR6?TWZqCge72Y8nxd(E6YGYR$
zCW3{YDG5_nYrmI=YL@wDYR^H30X-#rmjKygTAQCpcNbrr);PB7ed^f5T$A9*zvk7j
zX2(3@&ScWO1g6rAZtlL`ml+K!PUvc!&tIPsUWO~+^J0EZQ4sS?lOxhJnBAzYLe5tY
zp3rnmYgXP)`q38H7^afdXEZP(#~30W0Wvayf?*kik2t@y4JZd$X(pK2wqN@S?sQ!B
z!?s4Sl-$I2?us2>_9mlHhhtop_4ytxsjGC``yBUQSWE-m%M|tC6Mx_J{QYtYBdXg`
zsgEeTgTdYr$pz2A)Gu>tu1;J_(~$WJVmVuM;KNt%X+VJ(PX~+X&r69^$$cd!R|0FS
z<C<<VP;j;870DC!5=%fzDzU|FEh0c<=g$Z+;_?uJZpNp^_W(`>6bHj(vFD5=zE3@G
zZV)Q*<nsY}y~ld#9T++}TPV0a-sk#;U?A3&z&3`8)NvOI-2-VBo*c;T*OcYC=P9k-
zaC^W#4`!gffKlIvssTIT7PWY?F?K{{Dxr&oY_h)H_(Os4QX`~bxDA%<p5jxVhdR;#
z7bLL;eTW{o5P*$+FUP)5+`CX^`QX4tX7Y@Kd>8j?P$A(lsK1NhB<1byhkR!P9d^C%
z`+{|y^a3TBd>82X1O+YJ2WyiHkUi*xg|jOkpP<pMa>`?zQ~AjEc7?gkBTw$RWb=s#
zRet;sm^@*cr*^Svu_naNFQIZ8S^%l5O&~jCjZr)s+%b4Mt4Qbm!Lgg2bI{O-$$wFp
z8pv-Zvr?GwhrhP;SblOP>Ip<oWj{h6*O`P|H)dTOdid7!=}2HupcE=ompwPs`8!2*
z;8mo%_N~K~bt8?JqlY(bl8LjuUCk20ibYdqpF6o>qeyx$wJWx^-p=<d;vdNPSjQTQ
zKJB)uk)TMwrz5TVIBXDvV!Tc?uGmJfHBPHQc!*tgX>aSN8(~bpm>DkX6<J)hdQFXK
z7amGIr-RGk{OJ2mx~}OUVxQ|rZBH%Awx^i4Y1OjFWV@g^t^@i;&%J#*<b|_u=u<lS
zEqO+nOV-y#zH$pXMu&g01a_LS!lHO&6j=pZi0nU;#pTSRI`iII<$rxHVF*SsRouPY
zYu!vGQ(4D*EA+%O(zIMDL;l^hD&L4=OB1LXT>Z3+td3W}4kf}PUJ?Cm)y}v&3i|E?
z`TmmG?iKqRX1sBAfj@-M{bSKW_ZTjNuAL1RHzL!>UO?<j>fl>{akwvsw3uwrLVC<L
z63K(GeTPu^Ix96)8gpfPzIMl4-37JifKoI3_*=%@-`}Tk*;=7VAaURKwLp=zotVf^
zHy;XZfKr(O@$x6$`YGqN(AUKbw_YCuF&oNi6rSc5f3v-XtFd<?-PD&L!7gTM)t73t
zOYybiIo6>)=p<-;X9Hf-wy_n@@<hd6s>Z!J_9RR`NyH*Wbk?QrZo4hZQT<Dfbk~RO
zC0WyHb2f{=AOHK9uHET9k5D125y3cj?5`B?hde`Edi80_TN^!IwsM;_9j)B_{G=;A
zfNmVO4^wKv`Z0Q0rmq3E)hEn}XNiIvW2==0Ti8$@ZJp(APi<C>rTQIfpW9Q%){IAQ
z1;{2V>U_xI_meR`qO*|Eo2r+rWhV~dw|b$p$hzQt)0tm(c)Fw-?XfbC-*(<AKUZP|
z^39jHobRR`j8jY@a#C<Hgv1$bNn)Nk?m{hg!m`KV^7Q`9kv-TdR$sZ<=SJjPI#Y|=
zu?Y_!q6LHcWwlPi_g|fs`g<4OoQ4V-kX(rlfacr+TnJN3w|gds*H8Fac`CMRG%Giw
zm!y0`{m6boMq`LmQK(mF;^S&4Mlf@UI7@ii^oR5b6YmjbT+oWBAO?+Tw9bgd4!B&3
zD^IiD%uyTNxtd0KEhrvU?N4W-sjT2Fyfu?!=CC9%KO5KYw(rdTJS^j~UjMsCpF*?F
z)A0_A)pdxj+amd3&PI5n>YUBO3*`)xxxa8KO<%6J$=0b!uA%d|FwHgzy>6$q3ddG(
z2#~@va%oA(f1O)%_x9hLY8ql~xt&_ZMeeQs=tL(Fe{fs9H|L=E`bVEgO<j0i6=I=?
zvk4DL?x4YD&=V5|9HengUz+Q;QT9&@U+Oaq%k$qqv%BA*mRmL)>Ug6>km0yCs#p+y
z4Ug{QPe@d@*C5?$n{E<Fles_JbTw8jH!qZASei)R)5j9<sGR0@MG<$Y^Z5Leih=e^
z5$2pGR^0q~<fs||LmgZEo4(rrJaYJb33A~j=(nuJy|TY3M7+CxW5H+9-nPA<BWVxx
z&n>#M6t3U$AmWu{XeP#Z!yWL%HcyoKn{?mfoPRLMu1xPA-UTpq|1p^YzDu$uatt6j
zknS0LM4ldQ2+C%&+W(xb?MYqVrlQ?|IP(uhF|1i}euA;hZ|P)K&}Cu?h97Qzny+je
zy(XW3Y;=QJ#vrXmoKZQ~vRR5v_JyDfmy7E#ob?_(*BIFa0NXF04Hjf{v&4l$QF(*{
z^PISsuCJTD^F@llxfY&$;+9V<hnE}~Ins?H({weXehejaazeeK^=~tPBp6?)afYO`
z@d&G^0hTA-XxmO<#qWdal-~@ch5{Eh6rw_*EepLFbhdD+Mx1b(Tza}|Q$1c~{!KM=
zhb6P@8~2n6Pd{$e0CBS<)rwe`GtuAgb$O^Tpv74Ng7^Zbxvns1IF-n5P+<!z`e9Tw
zWu1dzsW9Mc&)UJhO15@jcX=*U??_(Zhz8;cG9JmL%?&`*wDGqP0%u=v;Qm8t=zefp
zlC5#>%*t4^mC~n}@d<VeGNd6jnKY9uF9RwnZmjKNNe4hX%$O-u5XL>>6~JZBDA&$>
zzikvE)G$SrKa4DP)A`Yf3w0dhI~tX7uO$A$cBO+rNEa4PqQkH5B%q8Y<gJKG48Aod
z=U2zoTt?lKw&oObiFb^xy;&QN(#E_=Ngtxewdj56P{)n+Y75UlBx%1c`%d6)x#DNJ
zq|E(2OaAM{dw@gR?Q2O~tJAt75Z*J&U9>RyBEFW8j0g8jU9cjedO>_^Z_D3IJ(Bg8
zpgq71sg#XvUbfu4kY06r`2$D6Z>qj7dhKh7X()e(Cf=R!6n9!44VT048BHc&7i+}3
zayQrSYD@&lK2mg3Gn06BBb4Uo3%BzgfUHdD_U9pkh*9{A0mQ|Qx{r;-xOKh|Z|cS@
z%PdLZi2QSt63Lfqlk!9vQ)7MvPt7+OB@Y2BrhhLwsR+EF%>}n6n!L{7*o81R<+Vp%
zUb*<OH99nInf5MIe2dA`rTa^$jjNMDo16{qf!_z3NVRDohTvtx3{Lpf*4#5+_5;_4
zO2&k6e#zCuq-0)e-ssex7;#$j+RpD~fs5op;?k=#6tr=u=a3bqc^}7}jS*6WiDWjO
zgzz>AJ$O(=FJ2^x&2f<N_DD_2Q~VJ4ghE{X_xca4U+sGBQY7Mn*FEJ=Hl*H6k0T{E
zvE|jsq=zTT+)VF9PMWiyUSPQi)tT1qCgV@D50F4{3IK5+=*YK-)_z5TpM~~q`|s~G
z_n3Z`&EQ~koczH~vXp9!gJ{r>)GYLZ&UcD#BU_MZXG%~{ZOfP@B;!E24LTgBmLFX0
zbQf{^>3GLY@c1xKdPnMMgDwvvi1X|V@VNh?$bfsVXPk$J5c$G<o$=sq*LmuXCr)2{
z&=RQO6J9EKGNm-1>r-yKcwr+vCDOMTW!bj?qE}H!AVBFmAeD#>i@B{i(Jw9@Y)M&?
zPEj$Ll9C<FRa8_W%u&Cgx;$JNS)JG5tnI}F{rNK*GH7Pv%jfO+Tn*n6BYvesaMI+q
zn~Twad49^}k*T2Mp9)L6y=}juI%$CbvnJI<;;t?6AcVFA_#0?_e3L3uLtM)Ipl|MU
zjX6Sy6Uf5WSOJlJ7Q@qavOm6nWN_3WR7rG@)0ihsMRkmod9yno-HLZz=hJr6=?MAi
z3h_~hg|ePW?LyDjkm*VRLE`}|l4IFRBPASn^Q+AuJ~x}=b{vkA{Qe^V8Lg0P6=oyd
zgDLfQv7S-F&^-<;4QHR{n<B5hYKj?8S5Uk$s)LphQ=qE6%ry#A>n-Mmvyd)&AXss6
zpPCZ-eOdbE=W(`c%0W6UAN1PaxanWXT9mHta&)5C41$_iQsULR>&XVVkgb>p_MNz6
zjifYrdF9Pptr5O`lX3QQ$@C8-tpmnh#-v8uu?U`KXiLF$h<!LELwtFEyA^i6X!B_9
zZ4&p@gWMUngvs-%qZhup_&9Q#>M-brARD)0v<&eq7@jmT7ls7cXZE(7MKM}w4X^)@
zkS%3Csu#m)G*CJeW4eIu2o@a|jHV_k_9#zq+)ljd?UOn#u46!&v|h|IaC5Rw0{Yb|
zUrZu(PE3(Y1cf9}JtY3@9Qj5DN&XCU;XC0$D}4Y#ji8E){%%63OTQ(O6Iu0<PkKll
z&5EFO@PO1!){-FP%h5&-M`%4&Rd3nM{$EOs(fXm`*3LH$tUp*uPSR0WE<7^{&<-Se
z<I2sDOO|>>xeZu)UStr~f6wSTSC%u?gqQEPzFXgTYc@tZS)A88Wr+f>5o=INEjfj5
z2n&%;Z|-<PJEGw&EPD6;t;Tq%H3P;*Z_%7DqswzzjREvbt(tj1<CEp-wifoXaa8MD
zJ%_1iF7i#h^o~&|y1&FY3vHK!VScwFnPr&EsxRIlopEW-(p*4?alive^os|K;{h<|
zVe*;CBFvxU-i>Cj7D@Ltkbk2;d$F<kX1=HsU*DK;Hw8#0egtU9AHPY7%a}Su1!2L1
zw*DE~l&5dc=)O<GO3ZMdp8fWFT{;$=o2F73UKXlc^=^rW{cGem$5@$TA?QFgcq446
zc1dPXgSHlP9oxn49p&6OP5at60RH^oVJ4$pZo0?-R}#|$-a*Rs&iOz0xl9ztt~&RE
zXtk)Y5H{P>Z1ZV2q|9oy?~}w)ja;>Ove{Z(W^-F+j)d^5%!$X5oTgOk&?itJV*CNm
z5Yc$5{@=!n$1J>d)}1*)TRmVDxA>(W--g6*KVN~&OK&wrHtBLMQ{~}XVvZFOwOQxk
zEbB1s*)s3uZ0nU*-Tv~f(s!~hEZBXst@uFwNam>FjQ$V<Ve>|^g&gw0+IE)B>xss`
zkmyF+SZZ#m5ZRf@l#U8Ej@T7WKYhX63loH5V8uhDQiha<u;^%n)MvoJxt*j$TM`s5
zSnuU2-jG02T`WzN(KB<YZ&jBb4e<WbNlzBVO?B7C7eM+T3_Q70hWZLxuju{2IjLC_
zm!{{Qzq}Co&RkHkJd!#bum-_t_Cp)#x~uhjv{_-aAAN)T-&qd@Y+l{WLI-%Xq;nOi
zImX4WE;5<1@7Cft*6H>c*M}m(8}5C^#GA+l-*e5SDw?(fUF~}2z8|i~u!_rNeqzz-
zqX4~?EDJ2=K=l53mSldS>v}N@++YS-_mkOhfCrV<;Q;3m*mrAbzLg!)ktI2%*XJo3
zug)u0|MM@37%00x(!ZFQ?A<i|i>PFbBj+liPv5Ngw0azvfwNpVgk2nNxdbi5WVCy{
z!N7U&yBOr49Lt7ZKFRoOV%PH>EKN2(zuF`0Q6iubLW!xW8I8iRKFp`MGT0}f*zy9d
zgVW!4B{^P7!}uq5UT6eV^nR}$Fn+RzK1l!l%hbxsO){3^Sr1K)%lVJe%g^7@9TWa+
zLbwsjAX>ORU>zU$7X{Gh$hc*=yzKo6vNvxsK~sORsYZ{h9QK*ojzY~{NM@&t+H;`*
z5`D}JHpv)mub<s_%vTif$?UJ_lfHdbBsE&UEQHe}u$Ug%bFGsZZa@^mIosfh2joB`
z9`klQ&BXEj(>H5dAsxB7nKFkhS!R5&myR#-FCC*1`)`s3r~ve*8fGCGaRn}srY&?`
zut*KpS>jpwy=pwO3nc>AD6Lm~!=7s8f00o-nvUrMVyKIr9EKzH3SYv94OHIVgNNa%
z>^kN{Y+)!V?v;+zx*PSkj^cHc#8|FG(=GQfLmf<Xk3WFu;SoeWe2@XLul#Jbt0BV1
z4py-?t$gZyt1XbhoBMj+H;&uADgj)y0*)tC2`jH{Ujd&1vJ4tnlb&u|2EIyu|L9{Y
zzw>j4A=g4>rWCm-6j2x799q*$lqebqID5L1`u0$gtsoA>?7vKu8^muo#zAe?83ei%
z2DWN%o7?(<N!j~6b}?JrC-+vkq8LKlLhF$_-|wNp53tVN-pAElPyDK7J9OpRt5bgs
z&)Dj#^IBv+*c6FyFM5^EVghVae*oL1Yp1PbL(C_AtVJyjV$}MjX<4dq`)BFA?+e2#
zEB&8Gt~9|v2<A#o8VLHRQykYJ#?P+c)b?a55mRk^8Bo00?~u{`c=L2Vzk~80g<@Gq
zNoqT+jZx_an6I0(_Gfa)!Spenc(uK>H)Lw!3r~VfQirDR+ciCYb=3Th<bq4`@Pt4I
zhxBmi71L35vCcqZ*gM23l%hkyABTX%H-k82gRLfN(yDJ(_Bg$nmkpQF3i{xqS1cCu
z4R<cy_ao$OFfdsJ5Tn4pWFb|X-wPp2Y`|xjG|y{GzzBhI$GNYb`|N-DCif<kO8o5}
zMTwL2!Fj%hPD;2KF#%JIf^H!DSva;K&Y!g&2D<$=pDLy<_OMEQF_kBnCgvOfIJ8$5
z%@O7}?O2k!JZt~#1VrJFh%_6Z^PUj@87+<~`&F+WCpcY#hileIX0xG~Rz4eqtx{NB
z@XwYBcv)<x9PnSBy^dVA#Oe=xV$mlV$C6dbCum<s5K@AVBmzUCzG<b%Y>nSD;SJgg
z+%0a>?`{QlKj1Fhx0uts{sK4uW5cq=EM4Te+@xDojCxrg81~|B^QvF%`K<@6O{4s3
zQD>5Hhtq671Rva^PJ=G3=^B5e^p1gh{I<=eij+TW#@0aW!_x{wvFHoyJG7ULC(!3M
zPTLWJFySeTBr&qI??SB2=)wSxMy#AaS7k?>8%fs30rM)vMd1FvQGE?T0`ba3cfc8|
zu(*tY_7_L5$r3+-Gsw%s9r{%_LtHD2<=V&I$0_CeIb3xKV$1x>WYR4js@|iFm_X8Y
zN)V|Csl=^~DIO01LjAeJ`JKdX^C6VnH_~0F@Rh>M+ehHhoj|eoTf5NEY26{<E~p2X
zh5lmD*>o|LRybA&wEvqYrirfDyp^Qe0d-5AqKc`vX4gM4Ne3o;q~vK8;!l*w#}^Y2
zc=zaF<i4CtkFbdMzLS5WZ6CoF5SAM2a+whyIl3;||1e`L)#V8gC{m&}iW?Q;LJkGV
z%=m?v4moeaD%5)VP|jCmE%PMXkk8EiNb$|rqN?u41xLjRP)hIu#Q75#&@ruaJuMCk
zeDi{FxKCfxW`$0HvtqhIZ;B~O!tRQ|GxtxCL4W?UZelyDNdbr{gkTd<KL|HCyPlr+
zrhM@@(rMNzy6=?h@&}0#?agx+d!LL6bJDCV8-kh<%sWWlkPO;1!wp2*TOzVwY30s{
z>)U$^^vv&jlH5XNJ7Fg=Cw|{`n9h%w|MZgd0cND5|DPGjKxQuPL30Eb@A=yQOsRZL
zS4=bbFzc$hZDdCl_@Y4C(njV?yKzdV#AyYbZ^P>6VcZ($Oh+VUq0ubw4(U%Zp@yw~
zCTa7MDib~BjqIV-DY>6=z|@^YpCc;}_wh)=OWYRVm6FmMioBo<wDz`Vmli#z9fpud
z*aE%u`&^I9)VJjYpAOu9*fJ)j|M2%kT~1X%G{p4ZXs`XN4%`1A(?cWqCzz+wU=<;k
zJr!<(kgn3W@gaj&{~508tj_o(Z_TnH@8R)-KL<+woYDT5gTj;c3Db9faFBX4u0T2@
ze-A)5p|A+`P|*MN@{(Q7zbNt}v3uVZBRQx3T=g%PS3Nj${gv>)-}ArT^Nfmlbivcx
zTw-oo&I(|}JN>=W2cbX+63-!tQnmYr8?_*^*%$dPtU38wE9pYsTifwz+2P4u1}QqJ
z%_=?b1nA3LGYbaJ+U=iRdjceo?TuVKS!Q~4@Lcq^cr#t4W-fRo%NvU%itCdT(Oi%{
zO@E-vZ`0)=Q}N7FZUFrL$e(<<d=F?6->L+pBD&=Mq8Km%M9ZZ}A^<!<dK!5+hQyoy
z%;72*+cR4FCokrFB;OMX2l$8o_%>$9J_f`v<oHe_lyrn9n@76p3Z{IpF9xt~|9Ap~
z>n*NlIc2ZrIU@u8doHN8iN4q#kNZg1-%jZNyRlo&o0$x^)BOK_q|C&_Gx`ydC=uP4
zXCtUh-HiJ=KHMvCD)+VJW6RTYA%s&##=&rE`Ap{n0677Jq1OQ^rgStb@*yONzr>5M
z#-47te<5t@0Bez!8h9X471XB{R2AOuqRlb^t>+^4VTw=g9%exHXjsk)WM%lL4eYtN
z5wj+`T$6xv_<z&8{7*4ypZTA){jP$uBy--o1n{l6D<i#rbqdeGAC%gAKdsp_cpD?(
zmumTx-!d)Ykv`mVttq?KgCCq69MQ!B%jy*}zT1^L5a=7oKgRiP)jYHQd~HIX1tCOS
zd#K4&aN6U9im-+EkAo8K;C;5MmXsgwe`<Uj#F#j`20@8s`+i(ax;Nm~>;8@Un6_h?
zkmqgBxW2I0w~@h})yXp3#8+M87Os^N;SHenRZR^=rUW(=elWjw_8qSn_g-tvSga&K
zKd*mr{D2dxGcx7yS1Sw*39t}5duVlcR{aV0?l+%d41OS@D_M&Ub0`2aMN$!4Znw-G
z%RG-&@P5?k#AOmEQk-uez|bn1^Tbu;iFbXG^h<R!zxhtv$}l&@GAa8mg>GUt;Ya7p
z&uRs1SgbOof8@qB!+`?V$)^>|<;lwEO*@*T4m9{6!PUYc;gCkO81>jP8i{$M@fMJc
zf4&E35yW6xKFim9vwAI`-G8wuv4|;s_&{;A?<pm_6yy$E<uqTLhAg`97O&rjX1H0l
zGdt*{CZ{qm{$7BBLC;SDpczZ@-en4$P2TC0g!2<0;!AGF+$0uaNs*7|=a#TP42NW8
zH<+9YE*WqHP-$AZQ(dDyzrH;n?FE2FPD75m@&bZO(P|GgH!f9|*#nZZnWd<;K9!#d
z2|sB+wis}g#Tt1^r4Bzi(7_tmMZFXZt{th3HG$v9L7pI35VL?#fAy?2F$9BFGj5r#
z_C<LfuWG}-Chp1&KL8iKDx$X<^%Z$pz5`G>y4=w*Q09(nl|*O{g1dQkFNF9l<-TTV
z!TN!E46Q+pTUg-&(;~AdV|ELl$|3*`X3^p)BQn9S)Xg|$^!AP#Y3}j4C8F-c35x+{
zYdMMP>gv09;uSg9v0TAutT#kYefm+DdoQwCU^fu%Mx@_JF=oGsJ!GujEb$a>oX0sV
zTSL*ywM%80?jdpW<`3#dP8Tho3Pu$J?Xx(J4WqDiaFo_VJj?nhEE&pZZ}e`vxo%rE
zSMjc}g1lq>%}bl9mGWd6Cg-=_%dW(WxQZ=oBjmilmZpCgmeECuE3PLiIMq*HRV#Hy
z%5=z?mPSL0f<hgd0IfglM#gCz3rpf4an6l%)uO6|1vMTu%bDd{H_tV19$0rgz1gbm
zuoCYGlQ8+&dL&uyR?}>=b{b}zP!C@~zR*%4gx1$*6F!}mg@9YqYA`SQ>h!PZxZM8M
zlwYdr_u6hS?#b1QW<}AR_!=c0VmJoP2z$}*p!hlXOXED;if~wAmk{cm+hlRukk+ZP
z&iV<8&cVB(#?HuGJxG2eazy;j{cjf95~neZy8^~>+7~}HM~N4a1`3uZG4L5h<8SYs
z(G0H`rcMi9dKNbx?u%l_$TtzFqD**Z-VKMf^)-F{`}7htE^<R-AFqF1@9e}uqu^lN
z5B*50p0-Dxx$JgE!P6z02SwPF(Ql<dZ8QCz@oe%BXd=2np}Q-^0o;Vo>t|uOS>dgk
znV5?)3iO-e{hlq$uGb*1ctzx4V-Be_`8(|0oe=d}Ph^2hgz7Wj&$z=S@BYU;$!9>j
zga0tT767Yk|BMtBZ;fgh5RIQ+k4(MlD8x%EDCoj|*|QjTk64m{p&1jS@zlCrsl<BO
z>g1Kn@0S$XPb}xpc!^<{3>vS;&9&HT{Ah!Z3)&NeKEHPM>+Y=46ykJF=Qk*Ibm%rF
z3@}Iv7zEo4;KR-X>Vd<@g~}6hL)m<8!an<*dSkij&WA(S2c#F@jVc=Wj}c|4rIcq8
z$fP6mo`A2vJI>j>p&>~mnuTLG^A9SHjr(hyxk)zwi`#wxcO_brZo*175)y!HkGVq*
z`5I<AdTX{yWjW<5+YS#M`x@{5D_2G7hvF9}r?V)Wq4$Xj-JKl7@RH6b%&NNb7`^<Z
zHjnATU&a9g-yY2<U6CCEgSQ7lnm7i|2<BofhnuEl2k#n=&=`qgP{#giyf?{@h3Vq1
zLP8f-oTSxK&2F1=@d(n;GRRjdm39{TSwQoV%(n4z$(@Cej3vW$n%aZ~I3Cp#C>^_I
zRNXZGz>aOZS;7rWM<HAci=-+gYvKIjHsr$AFJ!+h>q^wRo4fsPHQ~_$qet}Bn^I0O
zP876r!Y4Jow>JvZ;$2t-qN}PD&ug(>-(j}D>lET_B3DhcdOrQj&US4p11^Q{>I>Iq
zCaOVkr>8}E*!RI>ygu``?+7}Te(iDvdLn#c=BUwt?)Ssg{3j)SVFuVkqNIB&5-Y2~
zNX+Y<5B$zCUX`h(x=)AleG_)oT`}g;;^0;FzQ{w%41(lo&IX<jZ%WpfP20{aJ6ar{
zkH~MyBq)hCFK73j*y^0<EDt<h=%0!~GizV*%taaZ7t#uOG4e4r@9p7@s6O;~yp7AU
z8tI;#=j#T)>f!%V0`^83lz$`c5~*SfEWg8J)@}DS0SyA&eE_%2LNlJ1C(MdldsQS_
zu%?^olnhlDX@1Glx~*J6T(RBayH5#zb(#nHia~mUNGw3e`+dpgw=*`O2O3vQt2`21
z9Pc35{P|U@Xqm?^1LaZ*x_srTqgGyw605rS`|2rXxE9Vo=4UM{ET!+UleL$FRI6?E
zJ;#InAjNZBFCs4UGU+f-5GvlMq@=`#)*BwSmKdxZegfS7HWij;Q~=#&fO0x!WniUO
zS2D^GD55p5TlnM}qj1-g?=(sT?$%hiBHki>zy2?ZJNVFcniVF7G3vJ)p(CuA_dSB3
zP3B1BS45s(1m8l_A;)){M>bwXlDSeprTR2g#-D=o_#DS|P#u8d!H*QkQY9w$1zzs#
zvS%lsA%xp7;Fqa=Pf!TEL7<4^(Y3ZaB4NIJ1|B1_hLHze?n$xFvOC{LZd(Puo$Ev<
zVUaA_B4o$Y5+AtjdYa~-Du|g3dbRTlW#;Yd$+cPGT)*f7XtVFHt)0`e)@d2mAJLL2
z&g<j@hTYeI5ba}H3uYawlDNgzGDf<S*(k|CIdb(rofgCCxz5vUKzTdOaB!iR^^nZv
zt=;m@j<GT?9IX0d#s7!ah_BXELwnvB`!v8E{F-fFSlgoz*9>9#3AD-|Q{N|Anuj~=
z3`jhasMb8mV#od>k1GbOUfmw>t7|MNwa7Deot>&3&u|8J<>SkTBuicp+)s;N=Tm0_
zCJ9p3RxXz>Ke<l#PG$KFjRf}@Q>}o<<hs%t)x`t*#A76x%HLfDXF|<)p-ftI>mU}7
zBF=f`#Dn143r3!S<=qat+((N{wu(89OQ`{h_aZiHhzs4qEX1VprELe5S@AhvSq0Iq
zXmv)4Jk_tyOHUZS))>>ptnd%=w!YUT_?~{#xHl07fgr9~!_sF7+qg@wd6J!RcOP%X
zxz8S_*+fYCi;{xXI1H|}RrkeO^Gk91pBSuVrsi&a2Qk4J@xEPy6CCSd8QNeL&Rt>Q
z*_!H-dV59GKw9;IWzcySut(-20e=a$moAoVa9yH3U==$b1%%C^{z!&phuPQ^Y5jom
z;76j^jm+DyWkL2-dQ-D+26P!jXOcRcm#AL33X8!m#LwQHQ*iRQ)E@ETMSDWrxR0}<
z-I$Ee`0db`C;CG7YzM(cas?Jkh=^BaP?g`0iy?@C-1(99TGR!+({-&bwvcygIwcVj
zkJu}0-!s@J3bhD^sFZIQ$MA0!ZYc1PodHX?aQ`>Nwv&xB^o*bGr-K}1pi6A2(iXe*
z=uP%X0r5cWO(n}YVYWXjD&>lI9)f-L!M(ixmLFaPQTm}I-Tj{Ntfzb3+}CbkA}pW5
z#t25I#aiL(;aU&ZA?I)WE*mfMon4-|_QpX?;>Ek_uj%R|U|AP{J;Y8={d>jUQgaC+
zads>`a#ZSfHd)NA(5Xrm{;VzGRRASiJ1EU6#mV@`j`_>nIJ&WNkhH`I22*SVIS1nG
z70yi6s*p=+&{K_@P%n=*`AqDD*p$v^+4Wgq>m?Drwnoi1z-f{yEtXC}^{NJ;kWj>&
zFf<D{NiovJ8x1^}StCUps@}M+kkb;{t@h3*Zt9aWKi>3(p<ZE1fUnH4_QTWXZ&KmT
zr?1`4Q>U#bayF}r`bJR{1J<zbt1_0{e<6i|CN3lF%qp*6)s$LLGF<doyYpMCbYsNN
zpXdZFMl9s~j?HE*c2!;W5N;SkSk=2fIT0AxC{I`$;x&1~LlMeF7vCuiQ|tM?kO+A1
ze7Yjrq`<i%H(!1^Ec9bOCBww0=S;LBG!zl$Q(Qb#n^~mlvm0h>MV6X(B;T|e#r&60
zvHCZe;=ilX{Z~KtKctGtb;JK>n&nO`dY8XLSmcZ;5~&x4(Y^#gq%4Pp-~FE|jQ<Hg
zDMQf*plaj$wx@kR-|rsQqcJx><LV&&4}m)eKKa`XDT%efx(!s?GX>VmPZN9rGKQh$
zJ`F+v;2-cqzUmR*1Mcn1$W!(?(slc12+q5|fD%~aKkdRQgHPyz9Oa{hk&6tVQ)Mg5
zy`PB7MDQ7qeNB-lZvx7{DGY<11Kp=@0qwSDzzF}FcJ%+(W;e6{vsNOMO?2Uu2mh!%
zY^J8BN(en#bGY;L`Z?t+196&Lx_oYKS`>5=Mb9WzxW+xSRdMdge)2x#E45Z$KbD-Q
z(kFLh5>h0hJ~_L|42!=Et|@w9<k55a%}3iVA(&Y2;KGAW{Dne)ceu6hX4U~`vQrgX
zte+LFXySwDfZIY|2W9ISOy9>mP=mGZP71@e&e!?_>BqPz^jp)u=H0(iWc4^1p7Eh3
z4JBD)Xn+TjUzL56#%q9ipkK**F+Toz)Dd6#`760sO1P;qCpDAr$nEiHprmtO{~O`_
zs=o>6|GHXrN(K0j<LxV<FsNn2yxh}$#ibSffG5AXQ$I`y)+`83prmsS%E~=E^$BTT
zEV{gM91|u&j1}EUb#K?zG##<};3gLoai~`fwNk`azaZ<Ul6bRWNk52V<y~6yM~@hP
zO52#s9?a)A13@Kk>U`h^*wqIn?K8>~rt=49eP`No*g}Z{(DT|uHUpQFXX!d$*}gpZ
zS&h7bTg@ZnB{s?U%6}H>4EZub9p2;P_lP!te)ls6D%~=IX5z7o_^pLw1rE>cN<hT0
zdCtd0wldID(U^nls-v)?*;AMn-nvKxzYwi;QS9b=E$cQ)Ig4Vnr6pjQn@)sP!gW~l
zo_4}aLCU8p9({9NcnrxN29$dp&VTOY*%79nE1gYm*r;aj{f3d&9mwguH*;%=nlrM(
zxJG8X9m-48BiUy6soH!?8>~AY8X<Y$6p_Q$ydcNwi;)R<bXALg#*9C?82LFzB{V%f
z`N%#Ll*kO&25@+sNquO*6)ZU*J-;{TrA36vLxPl9S{^4Z3!M434x<ccBG2vi7YWsq
z{Fg%vZIOFug?$H?@5Syvy5>$kLN1ZX?R7<S7X++ICo2A*Kfl>eBrj3FZ=w0OUssLQ
zc1xHmH?N<cQ@p|5*a~WWN2@w&rVx=R!<@CA;NV~r-P^tYM%nQk@cVxab;tkeYv2Ff
z`2P30_&?#gXf``vXmss6)r;A#udh}>zvJ}*`$1Iu{2zMUUlbP^HXz4L|H_z~cp3CS
zY}))cGVMWjwH+e=FA7NEgX~YYNA=%nH@fc{fG?Gxt|4#gfd2aOztG|c|3$h3C9^f*
zXqsw=A|AZ|-cpcOVdIIfI+}_=2wVm_&HV;49L3o=;_<w-uVuClQH+@RT71xL+&6Tb
zN+UoNNdf?#%exw`<I@5u&dQX}hLkm*xHzIC4y6PGtb}kA(n%vrY^LOzp+5f3j54Av
z;RR7{{V)N-aL~zVwdQ5{_}X~G#lqKfV!2G5-U6Pk-MSXqE>YGgQs16&o_f@#DQHuY
z-SD+(WO_W~dO~`YzdV_zQts<xw-=7S!C1Pd)5ATbp7O%$dZ<T7;a=_0uO_|s+P)8&
zo0>GV8%}lQ7xJREz8>zGlT^qO#5$7P($1Rp45UTnnzf3+c|V1VS;)Qb+&ecaol=*5
z>n?2@B+4kY`#D{Z0$jkSx`A=>LtHAM-0*v|8A&1YbzhWsZ6|Jo*YNkwj}!@bRCAS{
zqfQhN(QN@s6d6UfkQCqobBJ?rp_wy2Z;e=uvdKb~FY)WE)@%d)6Ot#;r+Rw`z6wB}
zy8F};Q{FuWjeR^>e&KbHpMOw@Un*<LM`4{%?F*KOD@B3xZMz%GA%d=ioKGr%K)fph
zzuxX^EQq?p6Gs9_#nDI){==c=RHE5-3pamMf#=v5bM;<xrdZ?MOY&znG-m3P6ziQp
z4ZVa2(Q70|Zv4Tw(fM}@varB1Zo|meuYIa{{$n8U5;g6YJHI%R)ZRO5c{fVwUA<_#
z8!1E3ZwJ+LW$evX!J@>!9B*Z<Gn=ARuUs7dBGDO_D%<y%xrfhc<Go;<7)a=B3f;&u
zD0HsRkXVSPx|0^{y{d79n!P=1vxeHSRiFD>aAZ(qZTS2b_3gWX38Kj_Ddcm-UV7cR
zV-uuJXemEQHv77sZO=9i_ikt=PLrTSH4I&v%QHYy>GMUr&|u-G^#qF!C1n$d*Vgc1
z?4ppqRuX3_yY~*DeMgC4g<qOi5kg{z;^i6{S8aHG4{R2P+16t;DN^fn)zzIU7>lg3
zCjuZsofJF8Y#rCJhe>E=GO(3CkL)*Q@J}Bz^+3!~H)mBQ$(6^Y&!lxcUA*`RrcNCU
zhylTs@IMT{`2X}zJyV(uX+3bV+CN^ExyL<Em=p*yBqUsN2VYeEfI1gX>R;fpX(@f?
z?vh(+fB1`kP&K>lq=*WDaxk`J9b#GH<V4#FMh!Y1mb_W9h2&{n?D#B^G_)GBQ@!}a
zdlcyfYyhDbIysl%VtAH5Mjl*E>)DmrW9tw@x}%@;)juV-eta$X9-nKSz9tb0-t_Bm
z-b13)F!Lq&u3imrg7U;2IYoQ0-G{lc@zkAdH``!m&tX}CnEkt_Z?u_ry7-Q3X%@GE
z$p|mv#_JWX(=+XiUrHZvWoF<1KC#Kf(LLZDHz4^=@ASaP#&R=s5B`aCoh(O;AZZg*
zFhl)-e4e0&{JT!Y57U07Nu@t(t5qb5r+Ad|6vwydwsKxJBb>TvYTb!M03rtOV(ZX!
z$BXx}1N*O`u7`!k6T<V8Mt6c-cpHWZ<GpkDhhJD2;3su7Z)UC{ra`nnp{c(uqxXUf
zZwP4{Gr~&l;i=<8+&J`|>f@6_kKKOI(qo)HLKC~FwIx~x6FW5lfe^X)oa%|vODRa_
zs%4`v=ODj4#>;`<?qmo=Ey1qtl&!uicThA5%6+e0`|aHwW`{8&7QlPam3I>twr_&h
z>~4d`zQxhTRe2EjXFL>FdIIFi^sV3nR*XYz&u))9TMPJ89Gj)*?WN&by8ohR%mXCW
zNIGP{S-B4PT4JR|M?57wO(K+=RUwC~!$j3ov3(^v7ULrt^IqjRtaoaOG`Pk<#8$%H
z@p-BG$+aP;v~n7IP4zY>VOJ(=XgW-oQ~c^)nKRkk{n~IvVu9@;5S1Phudn+)_FpO=
zwDC*A8gr~%+aJuRS{exxD%?xv+H4AT915Kiu)|PJ?pLN&I5~smt*slI7T)fVfD$GL
zzpu@%(LFF>8gL0hGQdTM>bQj-oWZN_xt&^{_o{OzEe)zSvSdCus6TIvzHjy6Y1VS5
zlfh;~mlsPc6p-{?0>0{E+)!V0n`+Bz;?UiwEnS6c`=6HbrVQVIm6M&^I<py`+;}O7
z$#{*BCez}l0Kay)25z%qeK^EEtfxqPHe`AAPK0hF50LtD(3(e|8{vRFzmgi2yo3Cw
zETG*Dxl4v$Hnw-3>PuU;8#ZO|67vA@>1|xlSvNoWEG4T0poA1NX$j!f2pHnN|3<t1
zyz+#i0@I583*}m%rUq~o@q1aDU)Z>M5^33z%yYu(mb1?P&K^M0HbGet(+;%6IsbPz
zxiaFEcS(HJ*0vMR*2F97FP{XI#a}P!Mgx<O9`PB(es+1qi*?0{cp!A?s&9^!p<nQ_
zYg9}2(T7L$yxXRASME>mj`<e@u~@Xt_fy+h!W6pUEW4B}eOhsL6|RDFD;CO#8%WQn
zx?a94+RxDhs4B^{^X3#juep3-&i<7n`5R8j90wT$(Rmefbjrc`@Sp(*JFNMLw6F1u
zyE!knIrl7(&3Z}e7k-4HJ6Sa+UCqA#oFx<bjW#!6_fCu;B;s2J->XQzZNNLcc<3th
zN{_vl;8ig`QCKAaC@{=-<c<*@6K8RG{rqi+NeAWG;4Zq|`ibD*^6j3$dKtO0OB(U{
znl^{CIwu~wc)@`A^RA$oz~h}SAq#I7%uhWGOOI!b?x6{gI{an^E?n!{#;%b5y<+_@
z{&GHIbK>cmuiwhhHln{Z#EYq-4BRxM1kx#M-^=9S%o9?A(M(p>#4WP~>jm~Jgvv^o
z_5{apxMSv5tl3v7Ob(T}I=G@%y5r>S*O2Sh5}YQwF8BX!X~heAMBDxsMGZUh`=zE1
z(Z49f-lqLU;ShlQUT3ZG-(%TXes4u$hNcnvQINDkE>05*ky<&N8aV|(l`a5!MS?$f
z%>U*(U8Bh%Uh^hdDhn6@1s4V+i1<I)d(WVz-gj*j1Ox=6_Y$NC(m|>qQIsYE0!rvX
zX#!HE1PKX(6zPJ1f)Ei9q(yos^eQ4vdP%@SPY`1uA)fX7?|1KapZ%T>XXeZ~XWp6p
zfl+2wj9Kf+eLv52Uqw&3CwYQ-%a+PB({cX;X>=bqIywP(e-)HAJ*{!1>bpNz{Z@>^
zyosifn#0TuVj8?Cxf2WZ)In#+Uz*!<cWDktKA`6S4CLRXLpefI&5Nl*2{`1bFggwS
zPtr@4?{1Q;AOS<sU<W+vPqd&S)F;3zFHQnx-!}+gYU<n&xG?)K%}68Bkcalaulw)&
z^3OT|cGZdCN9@8Rh{24WISvHmG!1^e;+adIt92PebeS)DZl?RjB`ca%?;OhHw3-ry
z_O^BS{(`IzTm9Z=6{DLU0U;|t%J@_?IT&!g?-IRdXEN~}uoweL0Cnxbe%W7|k^^V0
z?t^2Ae++VWU8XQcnOA7=>Hjy@GTjF9zpwh&xB9>D#J|@!y7h^3)Fm+~LNgIVk-*SW
zyvqSDx{sES`Jdd=*W~0K=dKX8HavW!#Y<4+cMbZ#lt`bl-TQxOH0G$IWk4k*u69ko
z{qKtT-yP!rVMm<1fnOrN?j#Hi2qT}ZN|S=nFn&n;)P5t(qUNikM^i(N0Muv6>2~+Q
z>!22zz<C`3^oN~|tKn2jaxa)@@jLzWiv1VHg_l-_Xa0l6+y^+HCqF@-KC%2u^Hym2
zzXF2qqqiH_k7>OzzxUXw(ee&;A^-Wede5gOYayqXyoL8srQ&*h-I&8mz@?u6Ln))S
z(M<{qu<OYI>a)`X^_g!~wafSWcLDye-7KQqoLXDigJWMGnmh7B(Ba(gTHSLYjw@vj
z>`OYnKKm{4<N3ZqgCA0XNV#LWSw$-s2m!eP?wfdu0O<42hH(0&rzqFMDo!~`Gxc98
zQc_kmW+G=SU1_DB-l2$)7S=&&tHLb=NZ(j;az2Y$dflkMVri2{{%D$C%ZqCPnfG+8
zjV0f3Iz$Q{{#vrw{8f3lX7jcDT~TXG{M4#rd;9{A?B^f!*t_m;&OfpB;I?w{#TVn|
zVh|cIQ@}_4WZP^-!nCB>-aT$zf#3=r5oe3G(Ba*>XA_&;kn_%5BLvus{U@+mepH`}
z;zNogwBMR(L?z@-K^?udQr&{xV@%FjB-KE;WQY6C8yQ;1dXFe_yvg2u(#d0G{cptF
z)(HVv{SCZKTq9JFl=s-L^kLJ~%8E((6i*VbLciRZ{`HVE6(sIKDhTk}7gZk8=Xt%a
zp87ypf6>?|wNTB3zW9E8(3h0iVBN=#*PUI491VHyrpTg|@dty-w4_QLkmJqsGd$B0
zr+m(sCojcFkW+%2!M#7_OzVZA6&cZ;8M#tMQ6Vnx-GBQ;8~PW3BXJ*?r9=1w>CYg9
z-M84ADgCjW(NtY!+aS|ThV)N;Z>s}XNa?18^!Si;{)b$zkH&*PubzB0vW{DCP4!4k
z`p95v%Si7YUa=YWy(y-x6Bro1EBT*uKi=2XJ6E6y>VQWa$4>4gulHMFhbzvOGR!<2
z7a~MEv#-l+nTxCcMCW$#Am%{~2n|wahajG~Po?4`N-z59!>S7|FubOGy~E)qz~%SA
z9!ST5-kF^(men=4&k`)FxNpeY53|8pM#Dot>^#wmZQQx~;7?x`OO%nk_svvac|KbA
zA%k1mCza>l&d4^^Bs^|@*D=<M6wm{DqcEuq`y>8f^@kJQw>R(2ML{>ayhnTI_KTqV
z8DUv2`4)GA!X9#40x48-M*nQbotw!CJ2o?Xe`x|weV&kan@Jo;dJ2{Vz#Nfx+4Y|*
z{__t4=so|-#1)zh5UY&Rk2$;Kkn>?{0Q0{Q@de@misQZtOhc-f&&i?-k=_&OdC1)z
zeA|z~!-Z6vd##1KifY}hHy(#;1~T8Vn7Q+LUhqO#3=7>g&<!i-8zIo{8W5Ri^d%t)
zkb#BI!ewys2Wy7!AAV%o-3|A+8O#JQV4p1=uKPX8++WiO0oNZ4qLLBZr12=SJazJq
z<_RispeipXuI47a+r7r+&v)M56P~p-V7k}+d}d!xFuR+4lC+MvMEYe(g5o#&Q5=m%
zsjgohd9|IfmCH0qf|rlbycxMTBS62Ia*lFxno3oKd(@M%yzvJcU6{ueBLJ?)x&+HN
z;J2c=FXkR&77{pc_~3Xr!;2Y}8+#!TeCOx8uE(Sid;pMO<o9IE77reXvg`&htX^h6
zh3ZV$_rTp%@7107K0j@d=u1zJJa|bf*sH?C41&xabOXH5NY&j5bcETMCfhihINqF4
zeW8H5pAdMCadG(buRq2jgcA^c4Lj5iiU+LK)e-8X^m5oqWO+){w9B=e3GZ)%$4qXX
zGC|^pup!Z{i;Q3HLN?ZRQCRfi4PqwgzE;2PLAt|goYa#PqQt7MU70|fq~Fq5nGEuF
z7`ltf1_uE)9~%ys8F&x@YE3w;o^45zcU-c2Ya(@Cc-+?bfd#EopbFyUMaD^W$hW^V
zDS%FPj9745Ou3I&#x|#ZBY9&%G&hsWUF<JhiTl<k;&I;f^;!FjM}g5whfa0ph^PcG
z!#_|6Rd*`2c>qPS6Fb|Qoon4dU{wm(!z~<+1Oy8~GOyibz5EW||5D~VLpRwa=@^WI
zc7sz~Gzd+lxPWsM|5?g~($xrki(FqXo{vk%fhB&8i-t0hA2J9%59D!}=vAT95`8t&
z8m{bFP1jZ5Cr-%op{?qP-^}bvwG4F1lE+=0!?-tN8@T7H`RRyUNxzVBF02G_H01(&
zLI)JLgckNORjAW1jS%XSuY6PS$e^2E<h=P4LzREECg8JLQr?FTp@?Sy`l&e|FZn7o
zMW6XE&CAcEU({}ZYPj{Tw-#L+2=)e?WWN1lH~Q~k#<cv6>c5Z5rkTG->vZZ;Z!ru7
zkaGSRk>s^cAJwo}Y~a|2PGh;3sE!)p<Nu6D2uWo!F0jh<X~0SFeTm0KDNRG+rNtLL
z6QbTItwtbyclLkt;Rx?0Mig-g*_%zW#DS7P3#cXVysEMCg5E*>ahLlDpWaHQ-yr3*
zZ(8j4R-4}sVtj!wvVM$w+f-@!hyQQ36v7+N>S=h#3d1M#gFPOrI{;@OT^N>5<v2L&
zyQ)M6134bG3QFjX0xC40_%P2oAHNjtHyHLNv}em{q6d|@hHuvAhXvpuL?qFYBE?tv
zm<ZLHI1Dyf`u-?A+@jrh3cb+ff{6JJsoiR@GQ0*<w*N@*JM*u<1Ks2=4G19ww<823
zQTW$WuOA#xgyn9)d)34hcDG)Y3fj{=oaX|wCJ~>sPQP?6sgfPJ3cxYEbpN)|sd?Nd
zri5Qh+4Y*~P)3Rs`}scBc!L%T%;npwvzzB&a=#yQ)q7zP3y=VkHuNQ6T>-Q(Nqa(S
zS860qP(45F^&|C7E@um)v-2}A-gqgdWW3E_?v{G6RL31Qv-`6O%C<f2OBJ2^y_C$7
zD(8IbD@$&D`<-0AaWzJjPj54%QVMD=_nd7i_gQ2GG-TKQt?Ikjp}1dDRmSkSRL;)3
zQctY6p-UH6;`8iAfUnyS%`h!v${!KVdjeXK%ni*~Er8_Kf35QWUvMP?dJhN!mi|Qq
z65VQZ@BO8j)|idTh6(|S+e0YWbAl`w3O&8-d`CSh8({E=|DQ(A9bP!(5GH<&vBPv7
zu)iV^Ps~`*B2FXTWy2E6?D@h537<29g!T*0Zgh`GMKhx<31?f&NN(3>I!r5D(}ql&
z^7gUWX1q!CYC7k1q~r=29cbp>b=0pP41yRakMT~3t9YLJv63;prtLXS0VrJR?6Ild
z<DRM!KoYGNyP*I+<KBXza=l)+&$gBLx)Gx0q~Ir)$z_{#Nkq~$+qy5!d!F6%UBw#I
z-a7{(lv5fcT3T8XpfG(4r;STfv0qt9JXb+0l@+a{L3p#)jOuyoA1W^@*}s?@|EMHS
zhjPQa*jzJRFmAz2I8H|{EQjGv3f&tq*^eM!D|?}{m||GvjgJK7gaFAI^-gQ1eZLwB
zAnc>`&hTgylg1AWTbC@lzdCYA(;=)-d<rgF)s(<{@WvjkR9%#M01kG8D&Ehrl;t?+
zs08H-lrUCJ{zsjIHOKwK(dsoP)DM9fF?EP>U;yO&{&gtv0E}EjHn@3f9_u?CBV3lf
zBK?neX!q11pQu*>U{@3-JSdb9{pe^sd{Bs*&ShD?ND9B5YwatrDg1I-?LN08qu}uH
zXZZyR-NqU##T;Lno(cyANq@~Mk5?H{;99Dc9T4GpZS~Za!BSvYZ~CKXDlhgVVUG<z
zW<gqePn`%}9g|53H4AGsaZ7Aat>{x6TyX!)NhwcekSu~a{5khO*1sVQ%y<uwF<!(H
zw5S6s!1=>;wJqT3+Myu=@SmOUEJbcfg?^&WKo9*v+{j($832+j*m+L^5)!2#SimAT
zc@aX*G`@BsR{THL+^+z3xGf&}oGJ;c$9m?fmqGCk_WDJ_{kAG?&s;(-YW-ZhAl&Af
zM{|@XUq}dleuSnc**Fji+dk*{KlONA?kq4DnW*UI?Jn2(c~)C=JfaCh_=8=Gr!o%!
zoF=Qn4JxN#wZZ}4&8K-txf2sy@*gLWgX|mUm-7JDxqLo8Jh_2gj07<ypyO)d_Fq&l
z8HW4H()%afV^u55zW+w;4X=0%Ej^@=q`WZ{O^L)0-6J((zqdxQ81WBod`fWd-r9C>
zEw^A6w$SPM{jNJwgLjakPgs3Q(CT?&oZg{uXj-xp$dc@t&FX(AQ{Uq%yQn;iA*mS~
zz_FvRh5w!`y0ZFVoiXjI(SrCzqllu*{U0AcXL);kP5eh}63x+x{CF2X+#W#em1!3!
zCits=7DLP?)C02NWzXLRXDXC07g@LdsKCbGjd3SA5^nTC1@vSKHStfJ3dAD5$FKUn
zx-GG8uq0&nROeyx8QzwWv(Z=ik`4>lac-no+@B<UfvpoyKRwB^tv~#EvA0GM3+1m}
zblsl~#j~(JFv>9Nac$R~tNag!AFUzGsFqMqmLVp5(LeK-#w`M~l&3gYds<9P?Sj0J
zEX&@E@Cpn25&lXQ)#d|A0&E(i^?7}psBEo`vv5n?Qv2iZ^9gV5%*wYFAE~hps`pBG
zm>jQ|Zqw3a0NGlTE?#&vZsHkLg%oDVNiuOU6m8h^4GoC$RxB83oN?ukxtneL`L%FU
z4xMD)cd8`f%NlDJAL&;whz))(Qf`N;*^;(o8N-y);{DzXwrI^XpDMYoy3^Px`%w)0
zpS0?9xQY2bsxSI9+maiIo$ib(gYqE;LBE`lyA?3hUmCxiy(?#XljO%6(MhZ901(=b
zIS-eay_lnj90>aZx$@-rW}n=`hlt!yKSl5GEp1&(H+|PpHn=9FM~5d5iJd1!;*#f@
zW+|Zxc<+kZT%%03*IX*({6)jNQm)M+u6gYnYe|4MCa8i0F(OEgHoK_J6i<cxeonXF
zn)L#cagSzuUOdW#{-aoyVMKF6mplv%;P#vy_V{|UJR3AgQ2}))6-d=~Hc-C8EqoM^
zk-DN{ejlz)`h#D>l_gW$*1^nBlasZlXt6Qt<~&JGc9A-RaZWvxr#M&pCqZxgopR_F
z%F$P6x@2IGTcqq_ifntP6?z9Krg3$NBZ)na42JDb=(4&GT#Pr;7pJjhPxUYOAX$|g
z_QLPZ9la_l#3?G9<xv!D-nHr9U%u8CZAw>o_0l|hSveX6#F1@a0|q@h)4Lsp+q$OD
zEA4tEnK>>Y0$Fo@RhU#nYA%btp5U2C0f}`9!r7)D9dsZoBDAi&sbm8JD20>B&7SAr
z`60*4mn0v%JscKX9%XjDnwE4N-VJS>=v@<mO-*1|_TJY6or!Zh=(tG}Au(Mt?Lts}
zvB@uO_u`My^+%#_=RS-PB71<$Hvao0$xwxlq{5wBS3WUKQolQ1m%n*ynEAL*9$rEh
zC-Yv-j@|NIN4hf!i$CZ?@~gAQ2z8SCa2Rd}(+CgtoI%Yb)l0`EAi11F_eFN=7Lkne
zsOSGYkX{!sKB4cbi?>nx1`>E9FJm4fLy6uC>VpQDKtDY$mB1&idA4*@57Dm>lGLv#
zR)i?22}!dwzvH4pkWTB1ypLVcG&gY62>HcZcE3*Jblyu&<l{Z5@(uk;JO(!utFQdf
zW@nYHB|B#nyQj2Q^ET44-RxDjA`ku+?_{_5q005)iv`g@Q5=KcBPap}lJY%J*4?9x
zau-sA5BG>{#yflatx0*)%W=lb4yKZ+JTK{Mj}Ol@fA~+14IPpj&_N8zW-+~Q4@?#&
zL#f;tu6a^q_@e#<5!%$W1{QNF^qkm-w^H%d2gz%~2r)Rvm+Hm|v9(uhlr29m?=yl2
z%F>?qmIBUD4tbYZ+};jGi1WK%^-RJmKZ7;|Jnd55X`6d?6a}{X(sBK*gjx9eohR@d
zep8+VmWyAPRV)Rhw4z$Rw*lVn|NPqw_;kue6z9QsLGtIENvVe(g%R#lfen$`b1Yi#
zVxoX=alN6Jw@_(KbIF3QtyI1*NwOvJW6veI1gEqi#?psVH0YGyYscUNCSV+^`xSh&
z<puSG3_dxap#D5V?iTETPD1M6J|wFFTh#0SPF%J_$N*I80AP!RHU%0MontmIJ3Hq4
z4LvymO2IYtFQT@9FhpEYi`rQPghMj;+FJaV-yeOs^bee-C>b?<RKoZQ#!oc_u5Lx0
zGR@gLp`Ic)t*4PY0F~=YYG`)l@+Y}}_~qzXX3SR&nBH}YzzH?wpc{TW38RMjA&dDV
zkK7YF)j)qjACD@Zj;@i7I#@AO7i3()EOp|;CZ@@2NI8BEAO)=TR?9p1R;~zRzgGsI
zbb#EvQh!`rnYmfhf?_zsaXI!)=*a0kk}p8!05tv`++{FwZ!b2a)8_M`;fx~vTFYy!
z-(<7=uM0!vJqJ*B#0>HfhN=%gjq(9~mOB-$R;^2~Ey>(k8_zP-kI+OoAbYN19<Ipu
zPQ!e$gU`!-o_N+4lVZ(hLUsId9!f;%^<0A8{LtvZDr#ojE)t@xeL}`g06UDoG#hgX
z9ge_GM-Ey>6s<Td-us#kKt&)^&GCHdpOg2WuLSKtwE-Z-2)yY`vI2TaX#y~B$6V_G
zPJj}`55NSr9zZA07C<cLZT^JLq4N%TB3i$FIMyKxA*L|Q2nNKM!%Z=*nN@S^Klf&C
z3;CsZZl4&^s|HA0+*8s2m2ERW_mjUHwP=Zj?(vqc`GC)(OT8P0?zPvqr;oHf(+|6R
z(f*eHbiA!(iik+ku`!i7c71}y8$;_(Xw1pyd9q)N3}u-8&YWlBH_x=OY`U2IX3yzN
zgpS?}h&dz~#D?s-r7!30AQY9>OYWU?8S~>GVQ%r9t|Om4YwGo=FnBMmr6$Eg&gR2C
zspISwYE)wBCE31u{dUqU*_a}6irU?iA!S#crv11x(%2BARk-b8O{bDd3#7f;kD>+W
zn;0Gg!R8oW_Gcgwjim`@lEe0n1%WL|P-<Gk?6G&4mF1_=lDekVDmp%zm}|Uv{pTA<
z&K1Ps8ef;V8Gg=|^gI%)qu3CoFKqPe{cA411*MBD@h^K_B#Ki%2b;1+S|&kIya1j9
z+<O9}2}!b=<aQSy-|45gUliE1?=dYE!acYrC>3{4H@NC9pX}YNHgNrVX7cJ>=bZ22
zQu@-!Hb(OIgD!V~)#gBvAl<-`kqL|axhHc;{v6s<N6A_pZFjoW_V3*Fc)8E=W{-a3
zJt7}Hj^u*fzyVXg1+pP&ju?vvvej_lSc)Hy!HMpf^R;D6M6a>br`~SDmZNV}mRjC-
zTb@7vQs!dml3}FtNxp5q*^OPxSfm(2m=ZRH)&M8`sW-d$>VeskW2LuWjiUQ&vv($S
zvh*43vY$EvXZtC3r!`$n6fpw53;Wz}dmw_xG$UmEB6n1{Fas;!-4vXb>-BrV0<mxW
zzE@FXPu~Htnr`^?=Wt(q-=`=ulZ4N+3SNyW&_0#tl$Qm@T&kSB)|qOQ(i`p^`oL0-
z@qr<(8e<a@yVQ0*hl2QE6LxJ{&A&u{nTqHJwstSFrtDr@-+V%&Y4>ob@Nu%KqeU|!
zOlDEbgCAqFV;O^K0QV`Uhe3Xk&K(9cZe3K3aEoAZbY40xmUYqmeD7o6sL8OT#_<O-
z4+RP6)VQ(Wl;5DlHuS>#*$(H&Y}o}s(;SPp6JNGwI_%*O9BbkS%StEOkfqtOvpWX{
zRysR^EW_@-e{n~C*vjBTu+9S>n#*5;pgCatZ6qVf!H<84s*SqUl-th%i;2`muS)wS
zR#QscM1nbP$@5FcY=jHfKTQohEZePU*Ntq0&Y>x1<SsoPat_s>AqkQ#0P0B*zjZ=4
z1fsevbRCdk)^<hrJm%tV(4nczmXVB?;*4Pkg$n3v!_`UL*nWame}S|5PnpM}9dfL>
zT1u+f@w~?<|Lbml?sB%IB#}^#lT{s;0n!>6|Lt==&%;$&d{r~V?poQWyKTu~5aCzJ
z?qix3+PkXXyVMEI&o_bFA4kf%tIs&obwQ@@RANz4*`gs+^sBW~Pr#*irXT+71>zG5
zf*G0TqN`mmTq*-4<mRo#p?79P!*beJhn7IBK=ip~3}C760B5)#6;T9BGeAt-uUfpC
z(tJxb{#ln*ve!3*-WxFnuG}sY1Z7-6Ocy82;Lnhl{!H<-di#YxuTl?(ugmjX`f3W0
zYSJv2e=>veKn;V<gyIbZ3hR8M4|#@S{Emd`IT>jB;1Q*eaXQTGnV{O@D%hkVnJuT9
z@`i|Nu+8-#7!wJktN7SK?M{5@`z+0-6Zj`qeyvMHz|YY`Ui{)$mF)#KvESVcbzQ}b
zQbb$7%ChF;gr;oN@0N)aWYdpsfK*7YFAjs4=UY!HYdD`LUe)=n%ik1px~g^V<1LmC
ziVf>JPDg?|bU)b_V*3}NF&H;66KTxJ%USC>{;2tnk;W>H#Y(D<*X4)LoAbY?5OZy3
zpI1;nAvyd)<(d5Nf1KR+_nX|X24uo&lcoYgqr=9d#uf$g18|1FG_qe?0ic1=4v5J$
zOG366@Vv`&8mm5G?0CO_0`dU%rm<K|QWx`7mpIJ8{^I2CSBqw0IhVOJ(k7cW^<o4~
zY9QSpkq!oe%;2gDDGK|+|9t&=<Ws-$A$4gEoe0UZS|hBN1&_A1chVJ3<B*%vR6yW2
zS3N<^9*2SvKx}W=T0`?`oEz}=`FTB9w<ndyVdPzCBjN|@SQtu!**w(;123onMqBT&
z=!!0N00&{#r|~Avp}0beysi=k8sErHuY9^3ANpL)Kv^fO_s6%-IND>#IzI(fHUzL{
ze*cqFtbqkQ+6ioHdjkaUkQSI-1N<CPc%E<3Hao!o>%Mw<f`t1o3yyZlI$puE#tJvM
z3tO?-w{mkvRioM@WoLrY3LN!+lXS@raADFC+KF_d57K<aD{=3-=hSaIwt^+O7`wp~
zli;`h9<~aVBQ%gAiW5MkT3Qc?Pe|&L_Sq_382&EHA#ZlI|E<?pk`%+MlQ#@5RoA{_
z^bo4c0EKUapoN8`9)V)oe#HGop4}nN*{nG=IC|82OI$XV?_L_E6&RLVyWpytDO^E{
zd%pnxhr*jevgs*Qv<d}u(tQWzJ@-QsnhW>MC%#KXzT#UZTz0>te{EQf_R;EDlv<>`
z=Ir#7h~w<NQr}P==puwTPP#-0Cq@FIn2<{cN8`{tbunfFjk=nRjx&z0q*mxMM;C5%
zoVmffd5(NQ3<TI85hNf;{y`HkV=qye%b>0<T4a|~M_0AlXB3LsP?qtlj^5I#&g&T~
z2BjOG^u>Ns?yl=ETsq0x$dL|;60pP^ZvxL)!1u~wY@ZB6K*Vp%&AZ&hw1c8XLICa{
zhRO`P-tl;2>s!+l7f#qSQy|Y1@&;>q^SE{=8j<~D4L=KJdQTbxb$80`@7NHuTQr+N
zo=stmjRjVl?s;L0Z_96sws*NTADm^uqJWgAV~wQ)^0_VG`3pn7g3s!Uk=(GzzN?Hq
zRPJV#g{39H5H<lm$30r~tq|~jl}a;H?G}B>K*O3X7xb>;)?U-xrp?PY$5s&RpFuP|
z+>8*ALgB{dw)oj}L#!6IebrqY6zUo(x*XjT=zA%rS055`f$L!GLsTx3FOIRr9EhZl
z@b1-a?jPsVN(e;U_7^pNLLw8jfIIo}6d*Y4f`KJp+$aS00UzsGF0@m-8rgsF`u^6A
z^s+2P|E0-XM)@4;0OUnS&;uBG!}zJb5UjEn!A|h|l+(O=Idfb0Ec2`D^>@7;yqV8v
zSV_DoymA8htcAf_SuTW4^uPECTFT8@(i^9p*{FRvFaMSy?T|j@?MLBEO;PF=D;z`I
z!S5&I*3v=ska)Q9!`u^>(rO<Lr=^jC%>KhI?2C`rdqg-_JITaF{MYB@)Df!AbXh~H
z-_e*rjiMFr-D||U!N~|$_Lr=F-9w|djxH96TotzYVcr_CR~;vp)lof7Nw4y&@jFn&
zb<}6e#Dv&OW@@sgrKnX^X^W>_7xl%WNwUTRigZ078V&T>wdLid?Nzm=%+>xLo|I?T
z70!m{+!c1SHD-<n>+iaz&i`IpI8WWM2JC8tmYqdE=P5QwemE8vvp#S7vjY5{k@pjI
z1$yWW3^Fl0UzdPhhOG+xa;JY#*-;lEJsbXhaW@!KD>G=BASOpL#7-n2xRVYS4|;{z
zl6{ne9{N>3`y4yr{xZ5o$a$~5<@mW?o}MYt$<Ce@gy<&!JgTg=?8ke*(`;>>QV_!}
zUXaVNp4LgF*${oQ8K(UQwDA4=>I5;6<P{Micb#60d$J_cR6tHb@tNW4S9ltc%qNh~
zCy+f|vEujWXBdx(Sclv+xK3%iH^Hgbb+SQ?^R=MjkJt91H1t^w=Zy|JE;rYwY>yvm
zqF0nV8;mbo-2(XCzt{SZ&s|sm^od1TghXd;Ze!fF%yrM!2y&xwa<bb!R5GJGOj0}s
z?zRq0?d!kIrRmE8faM#25Le%E-J{wx0B7%@b9W40_lfz2q}p-hR|CGQm2V1)HGauZ
zxZ^a;`K^Fae&o*GX}2#F3*-3*x#RI)!@P@f)IIJRof9`?EpLxWqv;l9-vu@b(z!&S
z8rOaXW{0=X-d}qw5MM8~%Oe#1IV#6T7gw=lCFBSWF$MnpyX-HGj_*(QBvd21*Yeq_
zs%65HqM1evPlM@Y@3yNcxy?+OZl1e-P9Jq+;uX_Ogmc3u0Nun7N}<f2<X;-71M*i+
zJ>~lPTJl;wc#nG{V3Dy8!JUOOj-><$MRn=&uT*U<ovV+qYVphzzl-ILDWaX1q~Ec9
zDP#WuDOcB7v)lGUuu)XE9@uh%KwW|qIM80936R5O%^bo&lwVMQbmu@x-5FaQ-fnAz
z&FE#N>w>t>!Vdm*%OT)Vi5>0PKuLa*rKgp>8op*gh)QIDe5Ha}pbkXW?Sj4N+&4L4
z@uvs(zCE}wrX{Sg`MJV>`%eon33=|Zgls`HUcDahhy80Ax@!#il>J8UKS&UuEOj2F
z{r3ax|M-&;hoNv5Q*Q<&p@^c*fW4d|w1Jm|`M;71hgp%VE-wRvjV~GKaqv<UVv>A~
z`iajW20*%MpxmvGQ+vwz9gsOG=Q3zzL=moz;(p_Z9LNx|3Du3^pm>tJygpF$kM%;=
z^XGp@@x<R#GZhqb7w@{mPjkiHbwuclL5}(>&;!VK<leK%1;kfqy)?1VnE0%%v7g&t
z0OaMp$Md*h6Z@_EH+BE9a~Lo&XW$@RFL}6jpET`N0plLbQQhmB+*c>(B6R}UGe@8C
zz%%0wI_L@qTGOB9DW_t5z>7+A#MgwJ*a$3&mL$hqMNuyE5g!N-3huAUwPYH;qc)Hz
z6<{}MPia!mZxI6FtpbGR%VPBe@h(5?%+Dz&KAG*f61ST<Bx1YhQCVy$)jq6C*AXyC
zJAYkoPyYtl7NCDzcnbqgf&&xJ^cAK;<tGw^>K4Y!w!*Ch9SvJkq=wIU5T$|yUb4ph
zvK+mxvG~y8Y%JLvv8u22RnMwq2^QOfId|0f<z3R@f|8;X##8^nn8<={NOsLNT}#fQ
z4HndUgaTp&$)M+qpYC53#$%$@*@W#e)BBRO9d%QWTwCII?yJ<9NE%?YX((fZM&Gt+
z^h=srn>w134nN;5WS(?SGnrSrqB2_daXsezbx|K5QakZ1sVs<~OAP!{G+VM(1v6YT
z=wP^|LKcA5?=xm1K2M(Y57+iTrrQ%L=>{STfK&M;z}y(<$JJ=+^U2C1WIyUqX&d!>
z2TV=YIPSqWQ?0WI3Df~c=qYzG<R>7r0?z2O;GM6?Q$FvT?1Sv{7JrBxO(bbG`1z0c
zsJR4->Ja<J=_g796S;hN{RxSS<LzODH$N_7Y`fT&;UTSG0-zhSO|iaJESZXsYg%XN
z{Awez_7wIoi2VeCA>LzYG8=UP#h_0I7ihtQ`{k2=Y8cJd_|G^lzRnm>2&^a=ee#V-
z`~<Sa--BF|S<fB9@SCltC;SPo+kW9N0<---iM_#dJ8Axb$!Dja-=!RS?HiW61q3FF
zMd#^Tu%{(|IumKbn)7|JR|q39^iZOrpvNtXb&030O|71?U^FI#VM9Jj=lLmiWtz%{
zm|X?X;b)8kLJ1>Y9|qUAw8jf68^=`4hoWz$wWkRbbyv3i*{cu2#}2h&VgZ{P44sb{
z&-6flddW6XE?~FjQFvTxsr#mBjMlT2n6PY{6v{ZE7^j%Nhb3@h*P^@VAO4vW@``UX
zp3wktEO&qXxMUEoh~t-fvv(n>E<yQQoWMx&7xo1q%KB*nc(WccjUirfLT)+#Mx@XE
zVBFOKYG^(Y5D?Z*{Zm5^K70fl|9qG9FPh5#Vj)O;f)RiNALmj5;m2t-0Ax)Y#<@Ws
ztpCeYaP<FmQ^BwQyAI?vey#0+2{p)Q6tglV;Jt$}vf5moPsq8WCq>O=xruvxao9jE
zKI}MGH!Pk$7HR+)1vgpt0*D*MoOE57bQaVIEAADl&P`Mwd-Q^soXpidlrb9Y_F^<T
z*p5L!MiQrzq*Qg6*t$C#kbSrhFCgC{u@fu4{H5_!79b_y0{X{w`&Cx^=(e9oY^mO0
zsJPm%Ydd-<k$Ksu$M}GwlJ&>;<NPg58cI;lV_j4i8{TIbuX+QJjivV|zpAQM@?VeG
zd$i|QoU-C{#puRm@fS=2OU;q0!hvw*jk3XXQR){^eRxs_2td<g5h_h{=nJXJZ#!zq
zdSQjP2_`trjH+CVFsxg=1ZgDn+Sw>yViKM`9h-#(10dvy!s4GqgutioKTP9tF6|%f
zkVBKUIA8Ipy}ecu&2<AqAI<&&BT<nVo|EV3;k)$d+y2G^!b`deHbhK4eMK={ueJdo
zpj5>!NtooolN2HSV%g~jwQW-eXxd9sP%pai=qt5jN@0|zH+X8+aDN6sNrkGUB`>lj
zoMm>8B46QPr&M3P^J~etJSubcjf7+0qwUGp(BRr%w>(T1je_A>ILpKiFS3{U`b0dH
z8xOd=2Cz8zG&x`r8_k1f8;+9XtuFb>&K~C(Ni(VP_3<b+-r!t>TTQ_O@Vdl&P(yq=
zcI+>W0Lf-R=-OmszMs&<o)(dvHWk})*q87^z<7+d@!qW_=9HI3!Zq%#yGK)VTkq%A
z@S-aPg$boT`Wz$-a93jh&t{-*uIeQWg$-SwF^?M}Iz0(p_ED5xtru{AxGAa<b{0@=
z2I~6*u886NUqST~J-|*4$*^BN)IY(yWi_1nDOth6C=V@zD!-k6SbL|$P>`(?6BoU>
zW`Z6lu-r)Rok-Grq7_XsE?Qdh)*`mS+ZtaqMVWtV%ntISy=bdDNVD|$8ul>xP^K?^
zLEaJXwL6CmNJ6r!FOAD@c*cp(T@L4#-o<j6NKNJGB<ycQdd`<aq@yQT9~O^|&N|A3
zF3#1juIN5HKtA>bnNz>4@d0DdratUiuQH==(==>n@RzXdT%+B3ny_!AlyOIA4i7Wa
z17W$R2B+(^;&cNfC=N299ETKybFM>$;CH6G<beR!I%kKd_rmgKX5&|=vXuG3B~z1!
zo)4}m2)hcyX!xqhMli{K5U)7t2KlBP>>QNCk#tX{Wlm4J+`cOBS<{neLCi&tW-UZh
zjauX0N36T|ignKUR+F@<DQ5`bsUdNAaJTT<Ip{mHU=80jrlqaG`4O%kUW?hV*L9lD
zq+C(?dV(pgAK+T6<N)+|$h=>dAK7kWoiMZ~Nq`cgNtJk;9v8WU-YyWnas1iZ(#2ku
z_Iz!B#h1z260S+(x(7&2ThD7{0r#QEhdFK8X{q-;s@9O++k8ZVaZva-1@Oby9L&;(
zS#tUFe2dyn>w)r{$mlM9f@V6fNBD9=MVGkPZRM>@B;@sOM@C4*>|Xc1GV1!`r9u%i
zZ4-Ax#`()ok2x{TTidF)zCtrK-xbtQ{?c6jRk;pH!gwE~6Q`kL8%!OSuxqS4xlPln
z?@@k_qpI>Fn<ZB6Ru~2Z@aQ{vUW+%5T9EP*o51tqAgLYRgoKTNxDNC3(|RCS4(;Vz
z+PB#UTEOO95}XTpSamF?6WkxB!M^K-#c;x)SWk2rEZ?kcN~x4^vGK}{2)d==FJr+D
zI%kG?UIvXesTfOiM4AH8+yBJSLvU4j=omdiToeoLvb|56^Y?_ZP6FWM&MMQC!B$=t
znNjlb9F5Yo2auYIlyBCYX68-%H6FsHd<zFiM*R!0z5#%jqE0e~V#u~xSou>qHZ_o(
z6j?7^Gv40l$S{1F@@<Usi)#>6lfni}K71_~wXlYBiGE#D^CTnl%T}1ihSmh*nC}O7
zGMUXTw0TJ|6&cv7)L!q5$qCcB&}l<-p4OK@_AZbXa0iL{ysh}ELTO-oS$#iW&1jNE
zlh!(rKDfi@p6B71oN4Rj0q4^=Jpkupf}hVdC3wDwc(*1RaQCI->e7gxa<0^Z@q%2X
z-~D0jp1oUT3Ovi!ZQpiJd^p=1|9L=dr&$1tG7BhcJ))oi7tK3e(lNg6Y_}C2NR!P=
z4g@bizR|z6?hX{l(B}P&m<F`Kw@~wIG*sS+Z!j`WjAR`ZH)l3AXuD4}z=ZpJbK7^6
zj<~~279<7qyYQ3jE4NlJPVF}&e}J1-TV$+WBOa=&E$5_Nt7DKB39e4z9ImOvvF|qG
zvvA?59zZ_UD-F=zf?sj{<wYNeEvz}^o7)*?4!@OOoXfxLMyApv%y-cN)0@sETP4l<
zu+v;UP%-?<X*oibas|I@pm@1Y2pp&2y1|NlbOW%d?MU1GWNhub3rYoZA5Fx9SoDku
zTDFPH*NYq@lDRTi>rHleR5xElAGUqC`gEAF6mANXQTHi5X|gli^y{}NjrJ&(Yw1Q=
z*E`!<yHuP_rQhXe_>0ztNoK`Vedb7Q5g3T_CY!@$TS!az%L6F3T@mAQAI&kvEX^<a
zbG{PG*W&Wy?8Q`tweD1zyIktMx4yYwEqpqh=QMhLtQ9ceqezl{NNr;Vq+DD;(iP9A
zN@dd3yM==325FA+k%GM@#KT+Plu~*E&mF(EWqtSHJk}S71`bmxUAiP9*b5YWGMoEs
zbMk2Zw{xUZdxPW1-W0@2MDwHBs}gBvGk+)1`SWoWKoD61s*y2$4}Q56M(0A(z%5Y4
zw_YrH|KXc-sMtE}u&U^sAf|l2kj6K7Jxs5z<~x0-H7N#o3kLEXKw%9y(=z%Xl<~%C
z>XBww?H*>FzB~MUG7@8~yA<Pa(~es>ellJ><8HvLfy?ACLzmA>j@0w{&|v@8kbsHD
zA4nbqI^0?xIxUd;86y}v-0`sOd%k9M(az-JkcX}mtF@WgtK!AD$va(}tV4kc76=Qo
zK7{gVF;y9EO+w>;^tx!(;S00YSAqpLW}xQrCnvfQ?-&$a<7UNWZ|UmQ3xdAW8ZiXG
zE?W}9hmecLH%U9?umBr8ItI>Gva;}Vq{?x~HxgM6y=)j1s5s(X96Ky|Kd`h#^59A;
zYGJ6CdhrWYn}VEe?3DOrS^G7#UGR~=lHz8Pe9M&J4ZZW*e1`PVw=&OqTjt1F*L|<s
z>m>VqoL$K^qs9d+8~}cl0u;w0%54&OBY@XXIStNG0gToA(4{f=_x#<smMn-vW2q(G
zh%0XX;TiUnVxkB>VBqzUJch@VpyY)=5|jR#b6^-HHy*e7&eL8xvLj-6$^_oaO~V<j
z`i6|@#;}o+O-N+If&FYld!-`SZl=Lo!C&U-M*NmUo$MZc%9&x_mIo#4_z1MxX%5`7
znj$~jVTP9+L~zdGW`qZ>R1d@#pU)1u_&Vr*s1m8h;pdsexMugP=yNgQ?u<H-jXYd)
z*}Lt&ry)GHwcmf~f|IC%r*Av<y!vy&V(RG$LhcZ5(^5w^xw+lmRSpMmlhf+3Gs^2^
zrFe`{fhR?RkY~AfX(L?_(YrV|NEXCRQM^hPoDCJe7>j(Y{#HVT<UJRM&70&y1>{&b
z)Mzw*tkOOsApy8Nc>~gO3dMm2d<%RI{lIsqpVAI{ea{9qJ(xP#>zeR*6fGt?^HwVO
zhldwQ7(3DL$UaYp1I2`bS$#CA((SFYpBw@fJbho^&OLW`K&<;vp5|6vjX<z=_0^Zw
zw`?be-Z_N+c6s+b+%L?l1@2B7Aei++>j78@DFVB4cnNNfb2jU{<IKGA{C+3(O9K4M
z2)T(<Y9e|<DE=DaHsmre@d2jQ>D|gaBzc@=YL~b@4Ar|TF{jYr2ffiWB~jO0=lEsa
z%v&|WU*zU>$;ka+{;<1Pb$u0>?!Y2ymyEw`I31$9(|N<>E9HXXUW4t_h(=Y|B(3ha
z(1(X#CHrz{l;IFZasUk44}_KkVRb<)-u!vv<ITw-k*^=rHD_WsVk9pf+q>Cb;Fo+X
zb~ZvITZUy&)7Eb-VCy_&_JprXpYni^ozRbFqnsuAR&RCj%>G_tNrhO?HL9CtnN4pX
zFUrI)i%RBrd$7JWxs?*4qwosDsxJfk(Tft<MKF`bhWMad>PSv+h}Ma1H7g(SPll7d
z+Hq5}#C)Q=nANJN@oQ9vO0bUdVl38!h$1xiVj5V%>^8XE1ngAVq{b<So#Qq?3s<|j
zFA!)rI`ECF72?4Vqcqu9E0&H|2Ap(pP{GDIKuj~|!k9BSvd{0-fgkTm7TXehp!wxC
zuSW!v*II)gtqBmD*y>vTKKp3S?=<``P4vAr{U4bWbpmqFkT5-n;ijk-!&cXN78##X
z?5n)LY$v~8SaWeQ%GztLGm*NN%C)Gj@T<iuYy6&|+$Sx#F<>*D+mBk59K<k$fSzYi
zE{TaGSo#Ukl{&7hZ11TX*q4oC7YTy^IU<dV1ua8sr{zVK`sXnS&mpwUaf%YZ%+hTM
z)cOd|Iorc)N`4-J*09@2UG9DqrB+}nf0<A}uu4zZ%xjJYS`IhCkXq#K53a3wq`ubN
z+52Aa!=v9Unn3~bKO&4nIiHfJDHroyW(N%Im7aSywH=&3vgr8Ge2j7<N%Rf00r}^9
z;30>SE32LK9{y;*J?a?Gz5upg{G$OxkY+<?kdf|3JNt7NjeO$FOc$3~`98h?69a*m
zDPI&*PNf7Mcll4KQ}+@Vp#gBOylsp}6gv)?fS`lf+#(HxYRo0M#T&Jd&5r8_xNm)D
zD$UCLobi&?R7&K7jG%tddlIyNgdDlur5Ytc2w~QvY4R=4k6_4mMn3GPUPA@sxX-gZ
zT4|@#9tbwzv?(tr$fCZe^q;I9fd%v<8=!-r#5KMSTI_+)63om1|Gr7F`nU8nwqu4%
z=Yl~~#$b=cUC`x@spCwKnD;s!M<BrM7}RgcC6kU2p+IJ^b9%=314loVZI1p7;dY}G
zb*>RE5&0&OH#DkG$K<&HCd^tNLSj<?66NtsXo1pp?;M06WwT%&L^mKhBe8IIghl(z
zO~%wwlL_7zWNqN1EP{qW=_gIiB`)5?E1#R)6jKKVI$jut3*C_Qdb(mT3^1v!uZ@S;
z1_iybc98b#sJU&^hQNFvI`de$m`?qFpGOP#B^x0};HUZEU5{Dh(Re7-1M<I$<k>(T
zfaHJ)06l5`{fDw~+DH+U=|`r381+Gp{As^jrs2N4Z)&sI*>?H({@B*lP*_XwMU25O
zX4Ep68aD!xw4fl2=U<xmF4pNqLjvQ#&W<_vy@c=uCBJD4&s-P3{NRdYKLvL`a^0n+
zVcs3EK!*lS(UMqkS_n6MzXb_@Vhezs%mGXL*cemAJ^ktTgBiz%<(ELO)++)egVx=@
z8Uywnxi)zAJzg9-(R%s1Ow7xb6<C0!R7Vc~;{4M_dr5&aQem+^;=||qWudP+oo<3Y
z{$A<C^q@G`ZqsuZ;!qsJJ>eXQ&9^Nsw<gkUBN`7Ge*ajap7q5vQv5ur(qg)?gM6Gh
z@$%2rBL#CUk``t-wuP1=9)5b(OfUMq{S<tY)Pqf*pBO|i;pd)(#uFeZdR7%%IbT@p
z-{lDVS^SJ|&){Xvp;{cQZaiFO7I&YU4g-Hg@Amm(lacJ^>-joA;>Q&oPM?x(+6Fm3
z+hulngZ0B+v^QzLXQU+u!E!*n6n4B=F1Fh0b1Xr(|Ag<g;&^2?zmVtKOv?<Iv=mc<
zprDAD8vAmd#nYmnS47eA4US9-4f(Qsi^6ur_u{qhg(c3?bl=e)Km@`-fHw^C`C1*r
zTK56B<LXZrHIiSIWxzWZAHFru_<eIFn~SKxxQ%S7CU>N)UgT4jKd*vU%e;UItm$+J
z0s9Y5!%Rpi)PAOJdA`71dUF3_;>(B!7^Qoz3TgtCu4?Nox=9b&KL9E^v<1l?Kh`g%
z>Ij>f?mclHm<g17na)?sXVPt`dv}kedp!&-3Gg%i(%|?39NjY&)Vs=xsjnrBujH*+
zhJPqtS$bdm!JOFsBS<*@Wu=*TVvghm7XLffHxYU(Fw_vh4}{{~`olQ9^CtTJJSQ6~
z%_24nWkXiPk>1df@LMk^L!)WpVJQdV?!l4J#RFr~iy8_IK_7)LU+!w_UFn)|w^yp!
zvklSGB$zzAa(SS|BiPP^=D3bV-<Fh10O8QP3mXCS+ih!u?a1fGcW3=VQoZ^fFK#ty
z>ED}r={qdZBw^BTQF*urzD)D!`Xe%A=-@d@pg}o(jirl+qA*icm5}zRu_T{tz$wLP
z>V580Ggwa09%~r<g7M~yk2MAbW4{>tSFQ{VqF5V5>4;GzhH4nZqMmRz`*DSn^TXP$
zX4NXlEg_9K`~4axwMuhwHlBy5%liV*@-8KqWB*C+UKU<>Kq%}*L?{sLx+>+kGhe+K
z>1cB`w#2|Y`<9V=K(-cV3tr9p>82a>^aDbGIvIvMBSm`Zw;>Ep6mlNl&;PJZ<TN<B
z{bkq%XDKlYc%n)earU~WNqm0uu2}xWI9kt{kd8&~>9BVnB)O>S>wKZyD_ZtxyL!~5
znWM$w?Dz@36Ydz5|I$dX*>At-8g1<d0(<YOOy_A6z}Br9GwDFxoNl=KK)#Z+w;5?C
zm7~R!`HBC*WiHKl@xmGS4Se(S1Ee4N8CAY+N?B%7eUIHPX`8AWA@-G7I<;4-o?$+v
zKeZ@PrjaIS3O2L>C^Ud^FO`#Iv!P6aAQ(uG!_*_&hpSvW+GiRS(>&*=E9F?cU)*_D
zmzkOw++&U2U31%rVj)(MmfQ%!kl6k!oq+fJH(#=&o4<dNo%>-)WXNdMr&quDR6jO_
z(J;|T8gvl=nHymU=bo4T6n-5zmeNp6W+6@=yS^w|6iJu<9`<ZYiVTs?h1YNjyvV4G
z4(BNN1DtUJ2y}L*rCkaXBLXs}5yUtTOSbqj5Ce^SEM>c+;0Djgd3O7*{K&cC?}|Nk
z-{ToSW9q^O4?y#9{u#Z1b<1aMW$BHnZg*!wExCNWA2Ynj+xC4PX6WzErnoW0o|~Pd
zag+7hagXiE+}+cf4&KLPZASo63Sp1ZY}l}z3VA(RJv&&h5HGO%n)B7cYH6ZP7Kc)Y
z>)dk~<Di(%Y1;1ZgV-miK1*)c^!0cu5!i{&b&1va{&a6_&lqj8k*jA3Po)fMZ@<1T
z7J)zm7~HPk`bzjZDj@{A$Zrs31~tENXU>H-@-gklwppq31o?$=4rwb%w)B_U4?2U5
zUG>s|kZKKR9JIlBcS$+LAK3u+!?k0Ccf>AqC}KWgTBeFVnXLU9<vpWrb(q3MvyyZQ
zL61#52iHP-^TCUB>#Mm~B!iDN$@mU1K1iT)F&B5udc`ZtjR=j6w`s9{U3x$4TvK@e
z*kbnq#!|5wTi=v4wGw7{Mowb$ja&+C*2FDqddq+5et~9%PhU)?n)l%XPLtbE)U3{s
zH9E{@5(Qwh)J6g`-6@lu*Qe!}&1L<IfCQuc10b{0oioSlQccjnV28}#fIgK<dwO)$
z=-(-?P-ZwFZ_WC4`VI(<pYB~d@xMiG!dw;}5|bfGl3n3mg!p3o&>%u?vIBDVsc7n2
z!jZl5TQ{%!Tw|{Viw)>RG4h%MD%uixbi3$9GphL<4t1K;_Jf)+4m}6A%P~Q&XI}~V
z(wb&+1Y%RGd&4V19gbo8X&5DsQ+|GuJ_KDL<5uF+@#KLnK}>pITS0Z_`_7Wa?D3;H
zTbl~$l3RxjLxJb60km(x?B+S#3NMqU!A`mumm9sSr(3>EdaVv-?+<CIz8Hy>(rQYV
z5Dv9b+PwFm&;RZjOPc9x6NV)D(;~7yC2$twd1?tqun-_i8W?)rM|MSvR7BDHo0Ykz
zBQJ;Z)0vpJmyZK>J9#pwePB-PYt~>RxI<Ny%H70GdX6WYPzUN1lvx9m)I}d}$vi>?
z)Z7mHSE1Khxf`3te$t9Ljz+g6xj#sITLT*rVAX>Vl>qbJ1EB6ZyY(Ttj?6IUeO#nw
zG>RYlMtDm%B<976e2iZ|J0C9EUdOcWy>?x!h|G^1siJ89rTJ)${ORWe*#%xLBTY6z
z&O-nIe~Qk<9$$$Y>Q7%tz@AVzNwN6$0e!id4q1=JDP@XIsm8bX4-f5E&;H6DPVGJ1
zHu~mLZYOdAut~8PQ2@U&?hgRWi?};<6`PYfg36tJ4}aQn=C{u)Vhx&dZj7vbAHC~w
z?ap<$3R^3{k~Jq`01j6VCZ$XAA8v#%r}v`F9-U(8r&NV}<i~(U`P`VJn-xVtk!N_~
z9=8Wql%IQx`fABW0&I1);u;QE9|lRMy(n5V^gH9ukj4^nu2p8zSnAXqF1ij~XzoYx
zbup9v^kew%XBm@{2|hs;{hkW-KcODFN0Fi0Tt<q4r89n6zUl$-XI~=$id}jl=D~0L
zL7IBOjlMN5jpO@x_2lwJbK!}INQp%aoV)z3Oh?_@9?5bd`YTJau923zqZ*bRKoA7u
zz!=ZXuK-iZA->s<SPH{&{McQ5n5jZ17nLVq=BMB&9jj>w?*nu}{on==Q0Uf2^d+}3
z4PyBfu(I2*@A^V5(~lGt(URG{jUAlwEV+lXV~kSma+@zDCHH~sC0)prKEsaw8Irl~
zmQWfBd)%<cx#8BuEZ(H*$Z$W0-tUe;aM^c}B+93(o9328=Q)HZMT10(6GkR-J?y=T
zw4P}g-cu6WmyKPqXwIr_>B{j`x*yNr75JAXOr@LeDGEZJ0*L^jrfz*e*sdYIXb;;-
zI7n1)*KBw*NSssMR^c;|DtH%WGFG>rDGgavQ_cGY9BO1wD==QjL=-zZlwE9*V)T*p
z`~&j7{_BXcfs%zqA1xor`jEm^oo^gc>8%NC?gr<HeuO=^6W%ygLz$L3`t?cJ({g7^
z>;V^mKU!#4dxY1<FY<7k23M5yzIc(~)onA!`xDc+7S~~kbB<hpv?>DdAMB3O6NNPP
zb{rf>v>O|rk7P!q_^djaQY_}JmcLw1n}J^k4C~I5cJPaRLl_PT0w%6YYGv%b^OAl+
zfE$rzHa5g)_MOh*q5;i$`=%P<#~+`pkctVCxQTx7A}`Vl6hpm`-pl#Z!RbkJqK0Qf
z()V?#!@<gcNA>a71}|wHFs-T5$;UdN8o~ekuRzbiv;hrl62EMkCMnLm<3~+RhqrS+
z(zND;8A_;wRv$2S2BJ6=;h|(Ob=F~Gs8g$_tt^GYyHOzYb?h#+zu7T)iDgVJv95vT
z*9b@ag`9PGKbNMgtFh!;fQBXW4CsjI48ZuC1MZ;ra0@F;tj9!Z@7Bb%=2)&+e8kCk
zxCTj7<S>306jVpBQSK1DNehG@j(BKaJ3w@Yl~K8TQ#I`e#+y(9zGdFm9G7~CmG1|)
z1f3F#KZhj*#-43=KC=Uy>C%^O0J9%nC90TjAs@a<Ot7o2xph{YS-#-gA17nM&<0O0
zc@gWD7cu*Ct!Em%X7udImna^D?*Q3@36cnO-hk)LLB4!bB*e@%{}n_z$51Fe{!E9H
z&JFI&x2!UsZtx0-jRSeO`Q59MBnS==vdA{H;`2#d5qlb^n!kLV?VTOtE~@ZQ@g{Z(
zw`o?FX_&;}svBq;(mQW`PQ(`w?^_kp4vujEe6FR@>leSt<n$h!SJ_Z+oLoypB27br
zg4P+!c8U-XAwrtK9we?Rofg4m9+%m{ZxL7>8k-YpJPSYjI!4N-*iJb`y~c*V?o$nU
z6gBHfU#N{-lqAZNY)X;6bwJXU-+J|}oxMV>Nxw!^{hCkns!OU&KJkOpN|4M8A#AvX
zCeOzC)=gWw-{+Hh{tx!vJF3ZU-4_Mv9qBztkuFLVM1mk)L_m6ph)4$!X%Z6z1nJTR
z6r_Wcfb<%AmENRC7Z3;}2q8d-=l#~bYu&ZKZ|!sT8RMKW&e?bWkr9J=dCAP2&z#Tm
z{0bTw{sKad^S_TSir`mhvP-#Sy>NFz_l8a$$Ac#+AHS_P-w<gm-+BFaId;Ix`cG6Z
zTs8LkZ=#G{K%vnDs8yYNL0rZZSkZl`SWgwDb%xj39Mpu}&L!#k%BFXmvUI6mNM#4Y
zM$`_IFV`gMkc7;gXSLa1d{H4O#cN%Bz}x2HTxZq}Y^$RH;W~Xat6h`=vvB7rG0Z_w
zT&CEjYJ`Q2-^BVYZUOsoqSouJsuB)~(buSZXA5PRY*Eg5GSEnxW;W|!fxOkNrRn@h
zoJxxrW+p<Ku)icaE!idk`vxInH%!zh3JB_)&yG%2iIH)&mt46fC_<)7Asm&|E+TRp
zvNwb#0K^HL$QsbWjI`qIjr<I?oc8^$Z$7!ddgv4)rLJA4V<na(PS$jVdbJIE-~Y0m
z>^T_@v}PKPT#LU9wHT8=ayy`RQ3i+A)VJ0rl4T4TFr6l<P4qD9GFQfw%s;!X$QKd@
z6q(478>et*<k=B`7RgL(1EUMm5jgH{2s>cFF&a@dbhQ?JvSuFXFpIhp%dwNi*^sql
z{a*hTUE#uD;**=Fx>6%I&;3_Hz%ni*%z^o!M#VqbQ=1g3JXUL~T2q(q);3+Y5YW4l
zo-7oua0+p`Wk^AubCCq)MmV3<_q~Neau&bMyDh2C?0cl1&8@{@2mLySI8xM7Imw?5
zrPwb{G;mst{K4tPj^T9IAoNSx#V#+4LTlYJ-QLD+q>9;cavJ84XQGD)hP&$`7tv=R
zLUde9+Nxm(<Sm#A?ruxc>FqKfmxtc%vQj9zh%6nf9%WHz3ZG>&v0;3y9pac^B$MA)
zRrlkIf2|=Ku4v@|m5Jf*J%P|cA#3IA-5F*HPA)#5To?kWQaSRLo6Mc_i1?`JpWPV|
zdp%8<%<zqP4IoFBgORLwu)9a?FvJcxGd?Hizlu8XGv5}x<jbGV#*<2iCuvh>twk@*
zEcC(a{d?k>b~`p2a6Sdj^Y;^2_9zng3P=pSC5_f`@2@X^KqZX_n`oi6dl2*7>k;8F
zNxTty9mm}cxYgNN$I|FFJ3pD0mZ!Q@YGd<u=xv^Qqv;~OsSfE4*X%WTIP5B(`Qj4-
z>^vI2#MyvBM&Kbu^FLvN<7JeDas6nAI+|_93Lz?{GDk(HR1u<ZHM-@61@(}1tlu==
z%jM?eF<{+f^KDnz03B;(?N)p(m&Aefsd4-I55diO3lxBBTpOuX{fMc?KE_RNa!|az
z*TlM`!)e-E?HcP@^uxAH_$fall}8H^og>~5LjlA@PDxd|0PS|d6}0y2ed#ON-5H~A
zJ*@Qu%cUG?v3~D9?(L)E-@hZdLh3PqczRLQ!n#Ihy^>N`v_dDoZa&*WQRc$6pYGrs
zEA{n@%L#RXeedQ{WpkZ+BR?Zck~+zHcFe4a9d3P18)f9hWYdSv?6u>XbT8C-o#0+m
zKIS^+m^&mm3|+w1-HE-g7OVeC@1E-Iixw%TI(Q8L3#M1$dUA~;IsU<nQ|Fs`wOc5y
z=eiUzw82wQ5!{W>yT~el+zVl|XpMhM;Gc6n3(sNgw!1s-s@$wE!}*}^vZa6&$Cb1n
zV)0F6&$BC?-Vm<n76o*zW%qcHpLZ<@&c?;()4bCAjw(`^XZy(7@X;Y)?ORg+$Tw>8
zJDl$1f+W-raVh8gP#aw6dNSJJAfCVn)tmxZHnfa0F1pS?!8rwNzJ5>fT3>a7)6_h1
z=tKg=5QgMX<$}7Rm+`{Ow8sJR`j*reIVKwoA!c_DZSFqnUm;Sp?+1wqlDFJ4zbJqS
z;K7)y%{fpV+^w}+L;8T!Q+hGZ?%S@yY~2X1o|ZT^H$8&3JeZqT@fvgz8&r(}b#Y6_
zF4xS)t0+pphRUybG))&p2%TPgWT3ia-jHEot@`jp61H!eUY$%bL@Y;94V9^b08B0r
zGeN{K@)3H9&5nMO=hCGz8Z1ito!$+%$UJo|&U@Atv_3sh$MH6$p9wpJtJ7fT+ajyO
zUq&We*pPFnjFvSw9tkWO6zfgMRqPjU=>E9$@Gap{-YnR0nQ{llhy$$xVk(bs2ie3K
zc$s!T6~v;fw3^c`scu=)W?7Pu{8%CGf!d?9I^hvn0vNMsjP}Y>GlqMuE~gA4_i``e
ziNDpW>9Q}^t0hVn8>2?%l6ivJACU$+23rvwUC075HCr*@M?t@d;!_nbyTESLO?pNf
zkm_IxrVA#mPeYVa#!jPh2H!noYyW9e)Z@iMm|h~mDHaeW(_qS#KKXq`%gZ?9NrNZ}
zD))*Ep@7(_SfK+2p`ixxR%pI4`97o_L`x8GUPc?lU&bp<wa69&Q$)lx^2O+vK`-9<
z)2O$rrjn~^o5r8Q?d}L>oH1h)fKtTq^0zc|bYWz^oh#R36JHvWh4sRK)TAy?>W?w?
zD4Q>DlsNi3M)e*AdUYT05T>$e2`pBG$+P(AJ%oqAIqUa>uul`~=d@g~93kB*&!Oma
zk>~&^u}7`XqCV|o!Sj%Hk=Pm<1QFoIDeL3qwk{pT{#?_s*d}*?E!)vOTB>I}N=@UY
zHaBZ>X!y!IGAG!CDjP<%`J8Eoz%niuJ*;7!rZKUmPZJa<+~gvdxY$H#q@VhrNRX35
z>8H&yigyL^_Dri8?T>wp3;bjg5O=bgJ?}7e6m3Mv_I50`?xSr}zwR;06=57zFoM=n
z*x*-@UEk_jj%g#sAll_HC*9Q;65U2zXwD;Bm&!Stg(b--8ZGn<IvOA?5~R&p2zReS
z+ySx?`gMbg7W?`t^zlioZ#Dx?Nnrd%=2eR@fB#wQihZGs$M4k1>Ia`Bgk+~EJ-_h=
z6V#V=`yxlh1nu<zo!yzu>cYo|o{g)a*&X`+xC7-x{GnNSz9^%2UbI7Iv0Bo3!TjSw
zL=FDwn2HCw3n03kphQ+i8C>n8Ca%>4LT|6#zNK%Pv46|^de;07w@=#p2qERGl+@Rs
zjiyo*La$zYg7oo;&#H3a)zDuFfHZ3aDwXg`VRKWWp|H|#4qF=ZzOyVr0$-+AA=8#G
z-2vi)*8-by1e^uzVr!0KTcSa?r34k=#3u|>u0eYl?RN5gFVJ6&ws0~G5=lqkP(T)p
zFplH{m<bU*1g0tIZ8QNwIk0km%hrzB{#B`FknDi(jaUeZvGa2JgDUlo>9NojdIxOy
zMFfG;d6@)i?Ta4jP(0xitgaf#B&*B$ywsS&>Gu|uv8nm)^|+tnWE5)!?RnQ~Jw6FR
z4HPKE0V>U1VZYjSFtI8-z@^ac@=(-3k9<^p?@PIgY>Qe8O5aeNSm;iYeO0K-%g|4(
zrd~opO(nlKEol$o+(jP3mo5X?ZvQg)&xw(?uYaNY>inNd?rozt1kwn`Z0rR*I#er(
zaG3?D6n`7}WOqFap3MG8eaT+ww)#3?J{vF&k<x08kZ&sGOI{5EF+pG9EY}Y>V3S~y
z7Vp(o;e!`o8XJge(Q~0lwmHa<xTWoMoZklz^0oy)#LVn@%;Pio$Ik23UX7S|R0rCv
z55nHY{!<nz8+)xqe^ttE*`lt-Pm^6h_{9_BALao|Jf4pzjazjpc2^C%k<lQQBM;aR
z@Ey@0pd><UdUc03OB%|yi+XXwn)kG}Sy1BLjC1@c;)<`tG$Au^zmnRz(aP5B*01T9
zzC982WFLqM=-Ho3Q$f`ymbIs62v{G_Lv2CI4ey#HX60XT>BCO|e^Pk7+`+<VADB)0
zMWg^at7tXAT?#SO_eDR&jmd$U8>SX{DiNQC&^vz_RpKDDrL?fdlwX$SumXxiQmbCa
z^W%2?&oyDA%EI+g9#BJCvn65lUQ&5&PxssV3z>l%WDg!)TcOvuw7_#?ZW~5~>pIti
zs$qH)90Sq*1V#i^_aU#<x%pJ0en^{jpo<%eUU`e^gk{2b_`~-jd~MCSnLWR>CZYiU
zWaK7wN>+@-I_NS$AT4d<!y)WykT`wzc$HgHUHbV;+Zi#>^jVu3*=XKAH4*G@{Lv|X
z=E*6zw<AGq%3@b#cy=i6?d9aK3Yzm@mF~v1NkQ)bU<`3*On~ZnUnHqn6D_mQuotO4
zJn;E=v56Sv2Xs%&n`w)cK>si()wvq1XGvuCh4dR$Y5>pE#S7a))uoO@PR-{X&@mBT
zjX5d>Z#j5$+_{#o05{yO$VLp*e@(hS_+4&cR0I5|tn64d2*<nb71yTHG;@rIRwa#S
zSJu#WZZb%+D?shu{6+UQNrsHr`bL=9UB`}o_k9G@#cKjHUL9w!63_W_neoU)&ibQC
zH?y6!P&{`!zq7-G-$cz%+l4NNhN6I3YT07mESjI_ho^p6YXF>YfbA36ml2QJTJefz
zv$~szGrWgxnPFF4U7zC}lE++5kwOaTeR$J1JX>yz+d@07^@X=7xFBmmd*@yrrEZnq
zfZEw^2Hn>-^_^6QmglKNIz;y_snM}`M05#W%xW6<4#=Ut)PH8qtvznUoP-e+2orX)
zn5#*-*&#AGvjNX5%B|Sa&v!8zo{m)4Yc6XQ23}&?37~KxeNN}|o2b^SLpoCVH&HZ{
z8>mZM7V}<@Cjt)@4GDCoVLkhXgKy9bNi<c;h>Ct?r`Lk5<|cdFR2zpGpC6z1{<O8M
z{$;Y}9}RamjEB&-+yQKZaoV8Q<z8|_4Yd-(S_JMAPhNXTDapqsd|du~R$mv!$c0k)
z<zKO)eO6T~!B5HsSNh^m%eu%`^l@*}E508IE9W-PwHAAObg56<z1%|~5615o-e@S+
z5|daq#0kVod43-qM7&l;qVfP7BO{dT3r>Tr)hEx!kndx;VM!m`2Vi093br(M=>%8Z
zD1P!P44O`C_!%~^ObU~PO5qj&Bq@XmzV<jvm@txc+mekV2iasVRIerK@Ysk+mpXW`
zg?I_6=3FkmNw24?dCh}xp2S6EZ5#huMS2BL|DuC?GH?X+f~jp?+vso!ZqC{B=mE~S
zPU|4wMd297w=NC~Ba7q|n;m&;$WFr;2n|8yJ8TkOjm`90b3n{5e8WX_`50gWr7y7>
zsxE1%THVxH-w1ga;eON4>U=*c9-*JEH{=GEGp{tO#g8Tucmb!&Je61L2FiUVI2i&h
z`b%K;yy96!3HN<f$UEx}9{cxO(|TENk~mfU&+p;6aIJ2bpiX$rY&(sPFPsVYlG(o4
zMOJ^~qG_OhgFI0)Cb*)@lf=obV)FsTGRwn0_90bg&1<Q%5L>OkeVYvH3VG+YKuD!j
z(OwyrSTzo~)ZHEbOrB0lqV)T{ZsR*{ZtQ7{(OX36Ho@)oI_~AK=FnjE=@u!pbvqyD
zENZy0C_ezMSKajHtpf`do6sk~lpkW69226QgLsge;%+W?J0nW(gwnWr(;YX7J-~Sb
ztz#sSXUi4zxn7dx^yZZN59@$A#^lKN&7KJ<Y9v#$vs}V0Ga2%43Oe^vxdRe<?{tY1
zX=(Gi-H#eE%?v3ThhW4smY##J)gmZiV}MQrN*ef;fPT9QDNGOm*fKJWlGW@wNiv5b
zDacJy9(On2)!4r|?~`6T`UX_X-VK_U02(2^UT>8RYRls?1D`vM?>&vOIO4XLkl_l-
z-Ni;$*nX&twS1`ciTMM=<e5bIQ<>!yyBTCknd7nVvcc$ExXbbzf(oXu4?G9Bn!#8n
zgr?*Wl)LQyTxHvs;@T)(_o!;}M_(x;3EQ`oP<8|7fcb1U-_46`yw|EUwWVra=}sLT
zS#1+5olwkfFb!kgjt=X=Lv!UG{YApT>Eh&EIdqir;(d$Ynlv$<7!3|nv3~mE!a;vB
zQ6kgti4XU_L(La9<7e9HZi3hbv{z&sKVhUz4zLz>2bs+H-U?uvpW`&mcWjL8CmSs8
zEKy0AXp74&Fo#XfAGh|dGzJ=G*1+GKVVlnde`b>(D=J^B_g^XXV0zrGI9Cz(p>7{h
z`EC&LZdJFxpW?&p5a<$s9Jj=a;(YU<m*ZFNv`xJrmddxvbRDyDDHPZny7lpk7k{0t
z(IGyii#P9REoz2iNQ6^U?)%xn;S*)6!jJJ`a0lSQ0%ONbt^)#C-8f_Yc;;OrNDrL)
zE$Sdkt06H^Am00l!QnG*9tR$?h`k&jer6HjaxhyqnDB(FnGS1676W*}-~l&Hrc*<+
zx4_<0e%6Hh+!R?U*khK5y@NI<5LP5nEKB(YlnOoB4rV5>!RD60Ggr}zO+_JutNG?P
zb{{lR?m5=I$v@nR_qI=yV!S<|#luE3xeI3lyTK#cL}pR)*<p1Al`-L_R9k_QJ5$C~
z0yR$E4{S7))A>w|6`OJ^eCnTVyeiRT>iKnRL~J|FF1W<fc{CL1Xcz`&s`*V61-T4%
z_KM69Ql)FcoJQ}>)5;2WEp$`*rLNt5pfb~$MJb%2zFkE00$!cg%Mh0(jjP5;ug3S<
zQV$ubG6%Uu>v|~c>qsw|)yEkpevNNqHB+blf_iiwc?ug!;L`cd`ql9@aT=#_P)vx-
z?GDy;IJqi~3y9*yu%TyZ(3&A*K-8+2e5`}fvlH`r@z)e;m+l5%W^uT}vM!S78jvEd
zC|9wUmV#WNGIq(2cLE$M!xo$W#-}TmBCMWG9iPm3p|5NQSY)pO92kj)_P=98{{D<N
z2Qd6{@cvkg384$n`8g9V#fBh(;?_zm=ui;)5NO|^kc3Fri%%7UVCt(uK$X#o=?}R;
z=;Zl5*jEDEMIKbj!!hXjnRMg}bJ?F<-9+i-SB01;$@H~)w6yM!k{eSZ_|eqx4t1ym
zJTJhy$0z{Yqby(hRm`_^SpKVd;+){xz0)9y596<E@!=gx&$e!!&qSF4hOT1&#u#+q
zah&rK=Q~}V;8F?NaEcJh+jnwB^TOrASuXc+P=C%NR$C#WP!l`)CoM0)x%00Qok*wB
zKN*0U$z!ikpyXMMZ1VmgLM@sbEPyS{YD+$|R~e&FEP@ZHHK2YIG4%rqIzHwvVJRIh
zi_kj_!00><f)72dIQ{&@O1$mH!;RfhR`0XNB?D7aG5Ou${dqw_#y6GL%VjsQpBR46
z{@=<HlKU5ipGInAEM`x$%eUuiW>e2hFIxASpoKKa;SUo&y{CP@L9MGzbTQLW^hj>|
z4}nRu`>lxWEVl7|yvCY7?!~Z7r>s=ZaCbC|k-+g&zxGRIKbvCXs*0?3=52oZItDQr
zl2(1lPteIpm^z`&XZ%A6^En{&sq_DcZ~n6wr+8{X@we?W-<k5y_fzZ4j8uLNUFp<j
zakcy&|L8N-)!=L6mKoP-^8q`q>e(Zj{#rW42azK64X?Ws^jSj1x4VGmt;<<fIiO=*
zcy8Gm-uaEO&^nS$i1dj2!|SB`BUqXYV7BxB^Thb$-*ox}wOROar{M+ZOnn%5<KZ}R
zcZA`BO+v)@H_<!!<%?%YzlrY1m#;FMvdVd#{R;g}q#1R=&H$((N%x~gFx9z0Hq*`e
zC+n0}y_Wj+<wGf;vu*1MKJuCFd;G+A%1Bd2{SVF2Hu;M@@J1VKI2$XogFJiMg!odr
z4w;uF=x^--dzQnsQoy^`?K64&m*U(g(m&Sdzj_4jA?f9!<D*i46R~-EdJ0XI)db49
z1wC(H&}8{l_Dio=>u!ZZto|ic(+&!im#n}V`V$ZLpI=ji!Tv{Th&H4DBT2;<3^_Y;
z|EvHHjPF0j``0oX^wwmH{n{u6uE@A41-?qx+|XKIXOOGpDN-Ng7|6{HG#3+9$0$kb
zztWT6rvCrnZ+{PQe}4WK?U31??5YU-EaY4~OXN4v!prmfuPbw-L>d1{#P{!FzJGf*
z&%ztyymIkYXxruB7HzqzpH^XYh!^knzK&auB-KdTT2oyeE;FYkzR5qE^T$H@cOFou
z3c!};H=sL77+9+MO;n_>d@j=_I}6|VP1GXzo9M{_AUHBv@R3CO|4@y5P3Y^Lzc-%#
z<)>uD6rtQO(S6y}zK}Nxs#Gm;BVbls>Vg4Mn%0NCF5#Vw^n3aVe?D#1W2~*4^0gZ=
z%aE}*atpR0QKDth|Ck2<JS9XC9Ju2(cytycE>N`*ZPH~%5XbN+GBQ-z+@QfeUT*V!
zfJu9H)0CFsR^@_UW)Z!MXpZij4fdtP55xJOHF&r58=%B8Qjd|<ys^D>t+H<7V{#wQ
zPtp@bzp=%uGcvvHRJ__>yBZui5HD~}<vV;=abBwc(q}}dZTdwfcXPNYHJ9azF3%et
zBFlJ^0-IuadRSVOxGTQMOVd01vPxhM^g3FrdL4i#1QzL!q2@PN)G~jFHgISPUD8!~
z^7_Dvj~;VBeLbFX8zw(~H@XNH(!21Wj;Yv^RMA_q>fnJ6U2V9U2t}(?KWf;6x~UZ~
z#QlGzcK<XV|C88i|L?F59sAF*)$$2rWY^gWRy-_K<W~`w+z4_rj`dML%nj;Ht95>)
ztX-h?zP`(g4ci-lGWwy0q@+VYpd*G@mvWc!pX>w<3@E%Hj@Lwc+QGt0D#P?Uw>zzF
zY@hfB?U{LYz$_qEf4~gz9npZC1cR@wVy7RLOY-F~w#e7h%d61R@o<J)!arc`R84rp
zUOXlyCboPHQhmF;wg?rE@e_qw5EPTQoAhm5C*(`zPRLBYg0>@h^aQ~IpExy5?|rQO
z*+<8&TUV=h5p=O!GK>Pr)GdG<@m9E=Ts$9ydNClRb8~I9hwFP*_%-_*k_yk4n3$|X
z1xqY?C-jND<DThI(5xX)R4WHzu$ySb*D9K1-9S5`31BQS5jGc2s?@*S@5_}-5)Y9L
z32fi2M7o1WM%S0;Mtd_}MZA}ny7qNvMw-fA#^z_3tw^$wWqoB#Zm^91BUX`TxIW7$
z$b7Rpt_tI?`5=u89QuM=itp|VZeI=~70u^w6+vQ&4x}PJ*9R-FsgJ+uGWqbZpl%dh
zzl9^SO1$5HL60%)DF<~fv#7A63$1A~hYfoZc9=CJRV||CrBvk;mW=oE!@s|w<Ul`H
zb{bVB-+@zk91tW8fgbDgZP}lyLap&Ok9&H4;6)Ka7!pA;ExJykD0-Mtq8q?9aPne7
z^u^DEx{*AOFBvGRqbnkEEp?*qN_m%PHGSOqBwUGZmh_}rlwnDffMC6Vp3K2Kn9yec
z0o01ub6>CsiXQ7iHh&ZK>lMty^8J1j)$?VqNt4dweyt+A3|Ru|?^)o#Yq_~;@F<8^
zbn4*eh^!;?G?#|xH*A6@Xg|CX^uRQ)ZY)zg#-Y5SSQt}chcB}n=1YHqA~&sr&_MGB
zb>rfqm=Y;YdXLz{EAX5rfNSJXD2r;+EKX<RLI3vr$8U~~Dva8se6<*rd05daK%gUQ
z%(1FY(GRPbXtUJWxQMOVtv=`=dPgj75#RJFsa?LNGPyvJwA@Ab&>zr5G_@4{r8;}N
zZykve3&I#;-7w#$5nWzPE_UqGWmm(yEE_|vhO5U1Pik=2jP>^DFtLh#@N}e*Kjurv
zICW%`A)ms@0=D`TnKbO_Bl9|Vo@qH|tp$2GT<)TEIJo8W`W+EzBGJ_b@O)+;jARDx
zAk7{S(bd#bcBW8LpzY-#=~EUhlqe(lti;T{Uzv44km5F7fO#LJ`X`tUKu{QR2V}zW
zSC&9iHB|CX*9U@I%dS#w?%efGeznMWM_fnpQ-g7=POnOx&FCTij5L33q4h}h?HJOn
zSqQ`TCI|lz&4xtJFLWRL_}LU*%i`iBG)(wR_0i(})o0w7`-wTWLNl=C;I+-jAWWJW
zPEhLOa9wTT&+EHJ!ve)2n<JJgsm^i!%sgQd-Z3Zr0&V&iNT|Uok_E~#uEM@*pv7y_
z1CyMQt3&UZ$(GeKh$biaGDb$9+G~hp1P6c3Qxd2}y4b4sty1*Ghk<I?Dvbd4_R#_D
zE;A+OeZ`|dRNW)lA5SbTu1IA!DI|V<ZsBZjH6^nx%gqb<xHZq<JIZ0Xh4owwr_l%I
zPywjc_i7uXP0`7ZwHuNK&*jIABNgA74kghhitDDFTCA+Ba6KSQz=+wFnSEbr-O8k6
zUv(V)c1-j00p(W1-S0wvJM^25kE?>!7QZ$>r<BKtgspgmSedsY08WnR<IJ8k<vzSn
znsB%-Mz1Z;b%N}$_c&mT=|Noa{ks8J!`FP|=%;z-0y+Ti&@pOcc5QgNx3<*4M`nT3
z(|73I3}SPiTFCK`nvuoH^Wk%`>CVac)=AA@&B!0P9Idjfh-#eR`c@R)WbL+u-b@>F
zKz3J?9deVKS8wfB;OfJB?`BI$D`qm29ib!n!BqVs-&L&FxV<QG=9U=84MP<!sg$0*
zgudAQ)bW~_(hs!n8aNS=a^J1j{rAQJ`3nHGd*Ra!EWgVwbn7(J<42P|l%aN$sT^N8
zGcJGG<akFSSy9lcN9(k65DP!lY5|(7kLXN*HT<N)b?R{uxBEE+nN?VsS>3jM`Vzi?
z?DD~>&aQ#O0T?}?ARgmQC4c+=#DpMS^r@Rr0)B$wdRH@3g9KMX?8e5m*(kWjD7g!l
zHwOMjMHqqWYM=3B14=Y6Jw9KAEQ`nZot7o){y6B)|NL2TsG;&v1Cmmzd=$?-NaMp4
zXdEDGvY4qgvrPKqi!yKbhqJAWezcr7fU_Z;ZnS)$98bJ693mrAS5L|p6hh2*{B3*O
z;JjIHA>C&bsh<rAs9uAGu6hw`VCa`dfO{&xTC=MCplne3{k!d}n&Mz>j(PIK-OwwV
zoSgg#c1o-9^Duqn+a`uC_*)p!44y1#tx9RaQ(n*$J1cc5Fv}>BlS8QI&c)mYZ(gZX
zz|u!d5d^1uE(E3L14VrYY)lJ7S9h&ZcdM!<=Xdy3$DgsLqzm4?sWvpJ)cG#pAfAs4
zbM-YQenl3lhD#EOR=ER?c8^hZwX*t&tz00E^c1XntJ0l*DfaN*dLl)Qai`Y4j4i75
z2v9g5tR6=Q?#9Q!YwTX5Ga(ZybPk_(+GH_tt;~y7+h>vzw>uZ|+<)!UTtCc_N?~Hu
zGHvLr;#tbWXq({D(b{W8*x8Wdcru7{#{B{}Ij$O=Tyr<}=QD@BUlTu#C`$8E?%F98
zxxJJQL9XPeiZ|dQo~44&NjT3z2722i5*AX;A2iVxj>&IiR9<INb!>n>thSZont^P)
zs8jka_NC)R>mrtmuY|Yz*<hlIG}bbFj#XFudH$pIoTIWI_pfj0-I`lIJC8E<+~4Yk
zPy(!SXH^hk#Ev>!4Hviu=5Ws997mNOzcM$hH*x|Hyp)r{BmOtToQ7YOa$Szy+FUrw
zMiB#m`p}nKd*}85FIOQ{F{*{Pcpx9LFxR`On1FZ^zc+I3sP-`MQ&XzA)*V`jbTo_i
z@Hx?(h73h4o>=BfWk3JV5la4J@-{b2%olxN0*#{e^?rP#BYWN_LQ~vJv{gw&ye&~;
z682e4)AE<K&$Xv{UV{7ZnCu$RYgJ+h&io!Gwsp`7m%OCmC7iG791Dmz2-z7!_a<c}
zjl{)H`$x!i)>U}VO;j5(PI@@~b?FEgKU(u7Qm{EqyJ1=&da#8}XJRnFd%t*>Yu}Of
z{5dgLCUxx(ExDT@KufOcEP@ajRSN*QzU6^AfUUPAibMdH>q{4>=Jr^JVr<1}TO#Wt
zwdV~3*U8z5-$WZOVuJ^^85azCxZ3l(&dYQy`u6B^wJ~{VZrrSKy<7j$t1FlDqsHmE
zg3@_IdN?=4lqA2|&~DblnD8Fs1kpt|K2Y-nLPcV2l)k^DcsnC4wf<RI6qi!c&{NAt
z??8!~H$~8+Jyuz1>(-!!>@^|3Hid}Q#WvybqsE%jg>vPkZOFoW7p-gu$>N^vkkaB{
zoW;r$Mx^xQGBttDmB4}P0-&NWaa>r|4Gi5<idTVc2XAa^L=xA)mil2IY3wglzwscI
zNrym+6-%Ch|Cd+r!5PZ?_aU-7T%x%xUHN~{l;x#QNdOtcSt{@mv(*wKARld_1;|I=
z1Rq(Hef;+-A4fJ|w8J)1usyqXBq~a@l+ipD$4$^|fbW*~5#nARiytnN!29bNE_{Kq
z+4!hAZ)CzOkk1VKEyn{ya$^8BzteLC__v|Y0LVXMr)>}s3wItpmCmkqx`=5)5Yj7(
zxl{-LdujQf(xZQqvIGA|yb81m@R<?a%jfLA;0uwLKieqa1FSeWpb&*k0Tde(W@XQf
z-~$a%1Au+t4@Bb$|BB2Ne|!QY0`N?fc7dGf@{(-7j0MtpAU*w)Bg}v7HA6Jmf#I|Q
zi8WO*{7qDMjQt3;Jm-K9`vNIx3E*j4kEq$^28QJIzs#opy*k3*fBv7Yp1>mw09HAH
z&9o8cy#@!pN&rX%tGtt*2l%jEk-1zKg{{i<q;F4VJ0R5VfAlZ2>D^V8TtJ-eDvCZ8
zfz=kye)G3mdox+QbufX(UpIB{NEZ3sjR&dgKSSnaOpnC6<lET<L|F&CsEH&@oZO4{
zyduc3nXYR4ez!Ts7toH2LVl^sLDQ(usww~~o;O?24_|85%zqq-mRnobZHg0G>VR&F
z(}-g>1B1~lMXWy`InvzR&R>cxMHot(WHk9BHFiNUaLxm`6CyYfdIuLa(-a@+A!T2G
zSk9z_@;r7iAr_>yoRM06pQ`jNAN@>X^R0@=ISiN933LDnY<B5+uKsM#cFdO8LUkBi
ztC~0s^5ccPZoU(v))MyED1PT*rfPKlAOH#QY}xmu&<AVyGmj|)kJ5QpuCEGiwTZV(
zo4+(PM*r~P;)DeZF|pdPjY#}7q$dn<u}9ZPfS3cE05O=%5*Mpv{oA!)`9v9bsv7$F
zqv2nDDME)|bbfF#X(<n?Icoc^60`6;C3WV&As=0jVl9RmUBp2(Fq2(Ta$bzz0^k<u
zDGzi~o3gUK#1^AP9bYU(3y?1=yem)SFdto}foh+7!6ve)j$+|m1d+)W5=Y)IVbfP@
z!`Du18f^(r-Z*Hm*3j@jBo<)ky0NILcVghMk`_iFfenD!p@J(HCPvdJuNwH}9cCL>
ze=476v$7T{&ws4P$@e;Jc#NRB6Lrg%znK#Zz^exrFBgI6VKYttV)eeMSPwWzzamvk
zOi6U$=qBV_*ScblA?FU9v5gV$-V$w$aq5abt_k{9j!*EVX(<L>aiEuFGioUw((3Q?
zVH1qUMD)Vr!F;O9yUV1=r^EL_RNF8LX|FW~#!)ZUNt#?smEyb<i%Wcpv1(mR6${Ct
zMTDVjKxV!s{^FI(Z=w;a{);GI1KBP(2*AKX!q28wRdm5`H>Yxc36briXnzqfYEpxN
zZ-++?mVyH*xTcCYMhY}pUnJ=l)-_jMPIA5BRjLBUrlMz2msc5h@f>4>YXMDNeae&=
z$LMda-g8oKaPJMcYD_dPA7b6R$hqHt_|Xb?F8yVK#ZT9CtiyvdK=TfSF)z0H#kYzK
zvICYm8dvOc{CO_D{IH&bm}uJQ9Ue@%It#y62$=H>yyPRpH1%O4r<b+I+sIw5?uU|%
zXP#?3M8+%{y0Tq8Y$^3Ki|tb4`H;fzXsO%Rx1@lXMQNUk;bz;<;=AKP*3f(C_!xQT
znuY#My{OK$8QrLTXWE;bB;INhWVgJlp6i`~P*ea10v-vN){_E5MTZAY1(Yhv-$Qq~
zQp<UCP7*aZKST#X6l2!DN3(2Y-JeRZ-heMStU$UDp#awF#W=3o3<Hjn2VI`y6rT0<
zZ)r^&9cqx(OZ}0e9+)dE6Zj-Xe1Aw`zlBbCU%jApKec(c{0tfcP))Uq(y_~^IZP3d
zVqXHFSx&t$@+rEoUjeCu6TjTA-+TJSEhybFjB0#Z|9#pf(e*x3pKM87Y$3oJQag?(
z*#WU00RkQ_s+6g{^R{;b%;qN~BCmwcJxLl2Y83b|{H*^o^~T*1@AFD=1fJE2U;se%
zuyXV8pLEBtn1Ec-P4ozYwja>X)qmq?|E|5aJ%r(b8|Bo)?a9O_kNPI0gEqU^fZ{g{
za?XF9tgjCe!pDP)E}=|%*rIFS7RPd<H_hy@#*<aQkvJp*bYnH*erOY}8KbC<Q$#O7
zaOrEBfs%3(aQVFWh*WLIIML)R;zX^7q-fcs9%%F*2QPpTXA9JqD=Tt-6XlfqfzS`Y
zG`mY;7b$pahKRA@s~H}?G;dSxIMvpsKY1T>borA^6=4Td;Aq0YP#$zUO&-aVr?}VH
z^zAU7${AUo-3y|&##>k65()x9=eA_~39IXceJTPI&#dH|Kd!p4q%6dzn$+x{f+XyI
zo-4q{xDI;b-?Rw=KHH9L3F{yVTic&j`PZLxdzR-dUX6ta8;xkqh|rcPKW;H!LG10W
z9z=nu@i*4vX`wH0=~yYfUh*5(xOW29gT-vtuR_Qv?Vp)=-oMcyNP5AFZpOYuBif~@
zaWm5NZpv+q;K-#{>StS)4RJeb`k~yOK0@AiEgFU2*RAP}xIrKOv@-H*dTD9(eKz}R
zl&h6$)*myX`lOp-K_8oqvo2b8Zj-D;`xa)M_|3Lm$r@bRJbBn4lWH0=E@<&Mjxwru
zG)%aW`|V>+bkX`2DrgPFjzeIbAbYPMeX8s&nwi~c{KDmJA2}}%Nm%y^%IfIGbYBYb
zuR|+rwgwTXvH^FgW{2NI_O=?oi8QQwaKIrzOc1y5NA@QRWPdd~+ohsGdSe@Aaixax
z1X#GYVJiKBiC`Dddjtb8`s^A+m?{g@6Z5e?zC{tkqL-Sc>&TisQ_ug1vH0yLPQ4t1
zeFcRd)XNgD5-=4D>%)YUf}cp@1G!f+hx;3S#Sqs4p~8s;7kjA|w-o>Wh1%)Yi^{Z3
z_>bpL-~)rOp=>Ou4LF_6`}#v_ih4#tcQB!=%X{nS6H@@Tuea3)b3e(z#EA)o&+wmy
zOc@+feGsJU_9QJFeLt&G@~g=wO1EZV(tvxWH_E1E+^b{_%&y{aZcga?6%e%MAA^Tx
zv#M~zU)E&|XS$f31zB?vJ6^95N_fX3_~XG%(<ir*8S;>xhEaC60HJs`z~htN7rYny
zRO0+58~iGFjJ5>h`jejm&u^jnQ4l)VBC>ip1VD7rgm!Z+#{%@<1GeuFtA&MO#Lk4R
zT>d3T=TAbSl&^zFP*L9`b;;X+g%A=Bv|By`g9)+HKCE~<8TLcV>^`cf08y`@>LM)v
zV{sqpxey8TWFE(}e_vEMeOf<mJ3=(1koQ!#eZBmZ7}|uOGaHQDBQ5|o8A;z?(x@Te
z3%#?3I6EE(AFVn;44^E)F;;#>`kcrGex}bGcntoYhRfcFUB??Hssi2gknn9VV=Y5)
z(<WkdnMJz*kUpq0DRqf;<T)rNZwOOo^E=I<?DJk2Xy4+b(7>vrrTd1ZJ7AYE($TQ1
zJ>>!7>s@{fPM-=BJV+M$q{_7B=;nzf0!7tRh0=NrZDzgBeKduTUn08YBWWsYURUOc
z(D+h`#I;{C{De@uNcZya0@56CIP+Y;a4b|lzgzE!fiB;>A8G2#e;zeCUmK=3<Uj5w
zk`M$*IQ)hy!JJ?}0(w&jD(EE)9kT1-5>EVQ%{hWm-+}jHCdZF_PB2`oO;htqx?YM$
zGe_kTiy?r*i2?Dz$e}OL2koQtvKF<Nr;+PnheseLZO>F=?wSXM^Uit1x-v3aH>J`A
zA9vrN8HIN!drGNXVef&2yj%!Wl|-LJ?t4}Uz85!?uZiSs8e|Q4!ayaIBd*dTNIX^R
zwGe7tiy7?&jAzJOZiuyDKzO0cJBmiu3QwzRV{=vAU*8c`sS;}J8%(Ia9OG2dsX90I
z!=`R&9u>5FrsWUKnmGGSWCbb$nxg?C&;FD${4eRKNCKM%&sJ2Q7N9iYR(IL8j%&Jl
z)U%ICAURbfVfLW6Yexc`(JYD=P2UIp2xpx?{SALkTmJFde4QZ#{zJA+1qkFU;4|k+
z#M}<v2Irsx$lT-E^w%^A?>cvuy6#flD!Rf&2R5vG2ReCr%7A2j^D6rFxw$5b`Ss3!
zN?RyfONtN$s_zZuh=+eJA3nHt3*P+&e8DrllwI)5${)n&3Etz#T0WdBZF5a$i$cVh
zDk=Z!v4Oq+{M=VwQ0&H)FQQ9JQ*rbGyZ268i%EWu|0ij==wn1y3_GQxXW7h=ms@VQ
zm3?>l#(4jH?c)O`XrAP~xN)Yw7x(lO?myfBTK{d#(3@z!!uh_?8X6A)RGG)o1a_tP
zT)5<_Xx)}4OO?>NBn%_Jootph4{fOjR+b(1N@HDpOMU9!s^;T)iApSh%5j&3*h<Jv
z{^JV4)zQx1M8tSOGzMMNRW5LwUC$+JrGvYFWr+X5a5`tEkSK4Ed61@FW-a2=UcJh}
zDgq#Nz$uP&Ta?hV8WHM~`&Zo}9m1N~Q<BA=26DdtAW9+<e6yUFVs$GNdY!<ZbZ3%j
zxW&|03R`Hjc#*s5NO{OpHE*d#;YsHA!o*i-!DC;T_1i`Z&59uvUJhLz4~U}`T}{b#
zY4e&xC48I#D{G~rCuDC7H7g1oAAJUdBLH3|3GON=oWL=@T7&+2d6em!!q1!=i_HTv
z*OMC^{Su<dg?JJsqfpy9Q%S8$>p!%Ix%~IM&#(V9=PDo0?kbo4I^&QkhVKbf&mXGA
zze)IJ{oxrcF#xBhV%=xPNq45fnD{QWPL%Jrs7M(P?a6*hCGp@2vlwi9ekqtdaQ(Jh
zQSN8oE$JwONOg%P0SsNpIj<Fp&X0jDMk~@}b3dC(=}MmsOLg^95}n!w4g6q^)AJC?
zHp#w=iv(b001QIq<`NU`O+jH*zFdh?eV|b6y@V~7t5>zwZ_7sQ7w4L8{`k6au*kw>
zrt=HQju*lCt}l~mqPKc7coYfT^^B5!cU!za)e+rg=^Y$oBqC?HuWZ(O5{&2k9H1pl
z>^d)14yfwo)W=OrdOo&XVeHHiwEf~aGEG8ysJtaVGj4(suI(F)r^mqo<yU%~b%k_f
z3*D-O2iP=DAxfe!u);B1;lUdLmn99Z6pqeE&zG0n&*{qxT+a4Wf^uEQqBD8bIi>am
z++Pcsh-sU|@W>?IET<OzXsNq&e@{=$Iic?-j0g>jUU`A@C;+7VaIUu$E3Nr0istf-
zWBGI?@6g^+8ollFzzUeh1`%CYoTXENHv1c^UIV#rrK(*nG8`Mxekh#dW2*1XAGYKd
z#<gqGybtq+eHlWRX=~vTK#7~`*o(p14tIZo1GQTDK`xTTn^wE(eVW;w6B~uqZ?o*H
zkU2Lj41JgDi7FdfGdA=m``C`zB;x&vgs*2=)h(M_HD5^SznS|z(7ib-e=ub#{!4H0
zo=w7DU|)B37?@7=y34ceWhVSBH^hRqQULT=K*wU;JL;i?fakoThkXMrPgD<Dv7I3_
zn+_^n5^$@7kF9p%sQj@Qf2-!w;i9HMpnKw|pxjsLjYmjMp+`w}U7EsHt}69@^qN86
zHfn%;0B5KNm<7=Tmb%eh36{J{?kBhSYo6SAUoXqv+ZErM{c>xcK!=xY!@(WV6}#$@
zc(I~DKF#n`QWvZfljbi9dcOv`A)c&8gh6g%iPSz%X;mLCqMeTc^R)3(KvK8W6aV?N
zH=Bv;2I)g29+ICg&#Awo;%8`KxEJRF(Eg;u@TO(XKn#ETVxtv_9`5??GI8tXxypVs
zXJ;vyh!@GTo2X&m8e@|`#(hLp2!93Vf(g}+(88syF4)56=Z3nrN|@C9xMPfSESder
zo89QzA8nX=g~Q2UY*60X##)@m>W~M^GUF`s)t{bD5|hIX=~*sBLhm^HNfVVsRC6@V
zYqO<=kM^!<1}!`QV1hD#uWph2chF1&Q^*ZEpkHSr^qXkRz%4+7plQHOXaNoY<ByS=
zz!{5NHTqD63h%xRz>%uR0=-@%H>@KFO^fjDErc;b3ayBbeg+g7t>Z6vj=jI7^ZK$F
z-FpX=_lOXoV0UTq?QBL>al$Cz<smS5=9#y4+u$jqfpAOM)ud<Z?o~2P_(A2%eY_F}
z%KxHv_&U#!z!AS9jX!q-;cibMaTg$TH=G#0wE*C0I`)CnTi@>POHrC^A$ZqiJRl8`
z?cwm7h@vSC;Irrb>B?7Ln#4XE@wms}VB{}U8GPjg7?ky+zg~za-~MzBngCeEYopl-
zuWIA*d^-?4?9W#oAGflQjV6Lu-x4nUCK{~+(z4L+Kpq@F{^u*(>T`n|54G_(K<$uI
z><kzkR07AG{q@4xnWG2nsz4hoTo6a|n@DxOkHFw7O#uJ-%Cn`YOIrhf8Hw$`TnMoq
zSF<@4{L?`E<--5J4&jzGitd~b$_$)vv)^)I)4fWima=npm^+`$c>Perw0~_**c9&q
zyhcLDV$3zr@mP5r2=HA)+f}{tDCN#XG2hQJ&Y1ZW-Dd4PS6O<KS|)(#kssm{(pCCY
zks%U^y|x2CyMH_hr3X?83G{CwLJvv;$PEQQ`KvhnN4^u*|C(L@wLt&pW89$+*8C#5
zjRDt80J_E4>9)e<Z^+9kcM7cot!j^M0-7T#l?0oiR)RyCd$s@7wz9v(4GZ|6QIvK6
zv3N-RUt#ILBKsfP>c6`=&c+5Pu2O`wF%Z5l@lC3*@-9Ae<s+KjOhfsnvB%i@O2~6t
ztu7Y#*}sP5rC_p}0j;TvRnWP}%Ra(K$!#vDKk&1p|3IqR<FW~wt>%PQ0PQ<Q=V=Ct
z1iHJ{)9~F1_ywS{<!eQO53X50!vbwTHP&)V<$wFKS}otJ36@+Fdtx~8j?aDta>@d{
zc@uslXzTbK2An1m^nnIqh0|{$b@>@HgwOTQ%<wW-<io4Ki7cj9FcfHq-7{GvF-AsQ
zG`CIdSpA=Ja(aheJ6d#ow>0fCPO&$`-Ks6RB)6catUdXi1chA~qiM2CC4VW=9a5m&
zxl5qhLVNCim$PiqUz<Ts-Ax{gtF*pn)R?XonS1ThjY0frKk3F6IAGj*qUT{@Ih94^
zL`=Q5u-6Kh+$!J(jQ39NJR;m|Y7XucsOJ$|Wm_l{!5_k)w2@_~GTWa-Xa`gCPeBNm
z<5>c#ySQj4P7j?ko>q*zY#I#qQEwj-Ex^38Z!3IRG5^kDWXNpH{v&l&+$bn-aIcL%
zK>8j|!yBz_5C`aaD{?)Yzhfk;GUVk=6|RoBx~F6&Qb@vsLIFo!T2&rg(5guAQGA3Q
zi>yLQZZD?YlSfyKK~Ts4IyKc|-zV0kaU=0BE`@l4gOPQX;tJ&T5*;oG!_W(1exZaA
zn#TJGJ$JnncsYlhsYj*Yq46hA!BqKfyxT>RDhvMk><hpndl#tU-@zzk)@YP&*VDMX
zs--uzoD(uzz)PAxKcb~zJ(_0q?w&H<GBi^5cAZhZF$sSJhzrlVTbcHymDssinU%Vf
z#y;NJHY_oaPb7^Kdm7MxJ&{Dt^)n4|3=z?}a+J91><5D4vltdX?crr!q~me~9OU&P
zHmIB3DlTG8id5#?PLn}<U2~-FB1^SINKTu5`y<`?p`<M8Usy&Qd#TIgg>Jgi-It9`
zA*kbW^^1yHOD@`v;en<hz58De#l`oZii|N-7nfg&DP_syymdwSh9`<o0<*j#{fD?p
zJmnltqHn@ZyJ!xgU99C^VDaQvUX(?eWdFM7oV6gY_izvyD&HUL&nC+x*btldxK-`h
zonD_&dDp0(sU$tcarQxk&!8$Tlp7t<9#5iyK5s`opMSz^@QiE!j%2~6$8f6YowzO{
z^Wwz?>m}<Ur$B(6u{}P(6UE8PVV{zWzQe=#a%#%>!(wJ~)~DRWFRFY~;78z4<bqXP
z$qLH9n?PAJUGU5E#dnH_=fW{zF|3CfVjNdaD|3Qvd(2%?Mn}kCRptQc)wGa&G<)bu
z#)83zVWDBE?t0mw+x-_uqe5RdnzP>nVk9q#ZX)z@>q??Z<X!YOw35wA_>9mFE6ZW3
zqyb!@h$RZQ)Jgsfx8T|5RF2ZJp6*{h)B15-`n(M$RpDG6je%Xo{U~Z3$AF?xkE&Z3
z#53(#n>4rrz2A`C=f9WO_u*(bh>n%MX2J7(-&|naPbFwgq=SJRdLNVh8t=bm-o%yG
zbzb5nH%|IFf%B=eWy=roo^bnnMCQnHd#KMvrYa|4%&f^7Q)aI4GBsW#-Ld57M0x8)
z<8H06_`2ubb=rJ#q8Prm3(cF4Mc6We+(qtRJF(7Y{tjzL5(63Zj<R_a$O6MxRH994
zQ0|(xdGIh&YJ*9<L-MULf1mh*1E;a}%M-(BBuNW5rgU`!PTOKHzvgmz!0W2V6P-;5
z_v`6TwK(>l<+dh9l|0_5%S@WI7dNsJ^5&BU3`dbXIG~`iii{LUT-Tf`BlO6Wg)7kp
z5HbFYx!$hyGb@kBOtEA6jg@(a*Ck7+>e%rsL(9pdwWYgFwVjKGJWky9W_M%F<VYxU
z)QoywzG2O-O_P>7l1Q=JWRJwle!`7*%}2aZF>>U~%ipPNZ5?j;w3jGGPPtCXw-Q2m
zlRtzQk)cWt>jTjd$Z=o5EI1Kc42>qm$Bmz9R>4UIF^g9RC|^HT*X2<1b4a16edVHg
z)BMBZb@n{w!pvsvui3zX<Owni$=+fBimN4tua2?`h$|p+Y`MATOxYg?ma(P`66IJX
zS5lWYJJXoMXkkMjYE=r?G&^DRlz9^(QablgWJcbtu(>S}ly(wSSyg;W?kA8GOvO*6
z8kdXMHB|~!`uJuqFX$V9f!UJ#8z$wSI0x4jF1n6Oc4+?QSN95Qji-Fc$0mY{0p7fS
z{m*X(bG3m9gUA}EcCYxCMJUsAwlb>(3~{2ZPgeyo(|`oU`!eBohO~(D&d<Bzx=DO@
zPJQx##y|*cp>CVM?IQVj;2Lv;^Paq;dRK`NR=fS^G=2);F|z6zxVPhNfLhWU*gX8m
zRT)SOh+UWQZ?y3$64*(=Cevv4HxZh~4A{!VFY<D;fjZ4L>>9kQ6X^jyljj9cE&xrO
zpq@wIV?77D)8oavzlm^UrSL;m*!Uk+oo8eJ6Tdy)<=Scvt`Pu^Q(pijl<g@1z=FRt
zjXb4}#|4<9ihdJm<skqTAsbNq>Nh>t*5sS8?b5*b{PpG_G#asBNQl*j{3dFbvIgi=
zrP#Qr+xgAm5o=rj;-i6AS0Q!T7hG{jAV^!)|0cR(j^bqr()Fif=o$U%y&wK%01)%a
zqS(ZJE>Lv#Uv6Fe*WrcKyf8uuFUo=0|9WSmfART|&f9tO`VG$in}<HW3^uonHUhuJ
z$^_`I*6-~W;|$G_<Z@G9WnP!=G{^3oR4ptB()St<-YP*<fb9*xt({=q#*PtOc&#aO
z8(ikP)2~{$S|4$>m|l}hI$biibb2<E^4f@2H59kWSg#rE^8*yjfAk*yfKRc?9T%Ax
zz9iZ_Iye<oq#)z~wT^9U#c`8489<O!Zy3EG5d`Mdu<b7=o~iyCQ&{#oK!`L46~afi
z9&Q{M%OvbDlyja<=nfQP;~`^X6u&3_?`;`8Jf>6c)2qrho+{F})_>?CoT^3rHcG}K
zX^a>`MyVvvQCb&NFetIczzLhnuua)Zg;4?po*Mg@1Z-*14Z-!MZt}gCY~rsz17Xem
zTq3%=7E>6B)!|uLoNIh1(tLL@VMl{`n-2D&g=<YQ;%s!<wmisAWd@+cAHb#XvA2`s
z8@J0YBupo3hn1JjC$2H=528pD^f?Ecvcf>W;=*ereu*dlpt?C=Sw8+AaNGQsWI&+(
z^dJ8D|3$+0q6omG2vfhB*2(>yZFEhwo-M5DH_<#{=ytE!CHLH41q>qJjUNUFAMUAN
zvj~Qsbiiwo9S|Pe%;f6Dn~VMQvK^Sq<ofiB^N&hisaF@8zE@l(@#IIDgx?f&76jzz
zCRxmhSG^)(ESUHxsL4tzS0K1fg;Rym)$g!(T*cr;xCIA!$CCu|4e!Is?fdjfhHQYf
zy~oP1Q@1jCiI2ZGeAQY)|3XhfLL$1iKI+UdgiKXW;fdd)4sz?KHnE>F%t+=xQ+j$-
zKu4<w6Ho6uyo%v=Lzv#&)_xX<hyXiy&F6w*+Q`w^$aR`%Ac*tW(P_Dr#ATr8PK|60
zeh_=}+*n|$40JhsZow0~P}%`^1kG!#LOKp2?5>Zin7s1#F9%2aE{yJdSD3Nh?XEG4
z_w7#TEdSBnpyV%p^X9O*Xa($xk&SsU;R+7C1_^m_@HUHXis@W!TEHczk-t{~z%Y~3
z6C1~sl~Ho=heG6g&Y`fcGP`1hh{z%~hUb#uWi#yh1s6at9TI%*Kuxc-sMU^CK($O%
zj(%}Q+{P^T7!u<Z&^?G4Jmj-*VRVqUn+M&X#3*;`I@#M$Mz42Ux7zlVh66U!&K;qo
zGc9iUIEi~Ro+2JE5B9iVxspX(w%<2bvhK3KA$_j8#xAb6@wz!dGx#hu3L_nrO@nv9
zNSjPb?m^+71HfI^wXd|+k1NdYu$ycI-i0utSk%}S-+oF8;ot*K>!jCOg3#;O3Jj#3
z6Yo}xJLojRy_9;cBp;2acjfY!N~7ZH5IU$GtZ}#Y<PK$~YjjT~IoJ_PY}V#(%1KCO
zV)afHtQ;T_S$ur}Nh2^d;{XoI3OpL@(hrlsg?18zCzkD6Y9^+047m06Nv^~V?vGgC
zGx#DprYq?6=y`kU5|0kUl(NJqj0sl<RHczLEyC*_X5)KazTd@1umL<CH-=Qi_iw!J
zUGw`u8^c4Nvi&gl?=amzp8r?NXa9}t%Q_ga)!W8{^I7FJ?^C_jsF0x=OJZX)Sk+Qs
z6tK!D%cM&}^nk)#6Pi*&SOit4roF5jLM#s1Zxz%ueX#Ofcc~n`Li{*!ckpuf&V4GH
z+EB(jtb6lYz2Jy89!z!^l;ZOa%&ufT`QuTDpkG}h8zz$5qo41-Udnv)e`D`Gpqgmc
zc40aqAOg~)gbq>#L6jC0X(AxKgGvVhl`4cpL3)!eD4_HnkX}QtBGP+Dk)BXOB*lMx
zcRBC-?){a0*8iRVod0}lk(FdJGfXDWJagaob=}u>*pp0rBw_vH_)PQ_3elZS1yOw4
zRYwqR@)tmvlzQg6Y;ov`YCrd8&3XMI<=&Nr&GW7O!w{wS&;=U~+c<*KXv;0-$|d@7
zahaJKtn;yUx?nJ}EbRMW)%=ew#Mk+640#fU#UHrAYS3|abavH`;?3P7#nvw9r*+?@
zh7q={j4Z?rMI97r$1)8Ny}Y+rj>LJDXym&LA-@S5FUZ7;jk&&l6~&rM%m1-n!7kQ3
z?gDX7kU7!|9fPq%jp>*Swbzfw>fI5Md@Sj{QPccN`F2&5igY2T(K`-`4nBbwr}Cnk
zdXb1)jgFB}Wg#x==@a%?Rhu}zr#=q(7DGjP4jv^Hj!l}RtcG@?xP=s+7jw7Z_mH3-
zIue`?QVr3qN48zH9l-R-lRJ~tL9r@L>tgKpEu5wb-+02^cE4lO{s8#i5ccL4lw%yH
z+w^r9UygHA5Xg+}>8?xN-H(<9rjF{;OA2>hDbPq4v+>Q<&X>ZYZO>;Mt<pvFjRD-$
z4}r8EfEw3V%fW5?#2Yh(2axjZ=7=kT)HKDw%vY>$`xeaHas@>SXe<i>AP*CU5)j&L
zF$`LZXEgtQnjsnK!!#j&AP(pJ1otGllVzxO82s=)ljaT9E<)1%FXT)>Jv+k3+G`E4
z9bNMbo{Nu@wWV-gVjg`g`FPhm@J(68iTQrCV!f@i@pDKiZ1F?%mBz}uH4Y)CuxfA=
z&a-$Cr->Ab^;yQ;oo`>_P1CKJoVlH(=gBYinJ1ImSnv8mqs9Xwq1arPoc17XwMzzo
zkc9Bx9;9m{GhGBXI6g&>@h8z<Yfz!>KZxa0bOPm=TYv9)XF3ck#JWOADzop8m(^Mh
zJDH3fNR(}k*X&S7mrl3|&MTIm=Mg=hrPw!D{&~!~5@b4RL+yaQ2e~|{d~q4~Y_ztk
z*k9wWNSfEY<*?Y)zJ;uPBDtQdx#ENqIn+pDWN0{`_B0xIvjB5@y{~>cn3H0v<K(7C
zx-FGs0$YYb!}3ek#n|R%J4O8m@^^y)RaZ~RJ%&6Rw}|mT@`b%o^?;<A3vNf_>xFx7
zb1^WpUwpE2g1JTWW|(mO$_ZI97lt8pImI`j*J2JU&ur9YRv#)mlR0_v=HZJ8{Qib(
zlpP0QZSr3VQ#rXYK)q_=^oy1~>YQNm%|wMzbrq?1m870ZS7s!j;rh0sm_YO}hI|bc
zaoMjcR?@tYFglfIR<rf3sv-FEk92KPPsNCmZOysz&Sv_`O73huK=`+BHM~9h0z48b
zxbt+&<S13!Ik={tMOt`3^P!4$fB!)5WWU7V1?NhxYBhh!PRTjTRm$Ey-Je8V1d2((
z>?jTN)~DdNlEbA3MG}wFjZ#c}OhjK(5uLC(0sR6l@DKvY6yz3Q>A3x|ugu?T_mP{h
zyJJIR=D~+VKr!b=LdGMz$RQ?nJ5p&Tj?o87=qOY+;BgN9<#F!AX}Q|_L11i)vAlKT
zT08ovLsVjg>n70+#TV+gMB2G%AzLVg6^*!KPYR%^$u_c0*}r1mY;*-NQJ!C&A+DfR
zAI1NiH%$B7WU8@#=p*}~2k!HUR1cI<IW4AWZ&Y7Yn*ihj(kWiijm+e1s=lz#>qZF4
z^#$DlBJ$uL5ln-dU&Ui+9y|}<dx{bQN+e^7bgM90H@#IbMo&@ACCA#WQy|#|eMBKy
z|H(ux_5StT>m)BmQsx_&Np~_t*Rp9XFgq)SdrYhGVfy~n=BU0`xS&GU`o|f=oy#^Z
zrWVeTpO&5*`P9ArL|>BG^_=+j%M7eV*spts%HN_sra#w7cXkIcBS7Ov5&;>HPih4q
zQQcH;T96|U{QuopNR$p><R6M}BTlx!gcoklDsjM&<6SBR<d~Cybbum*wB!qTgB&Qi
zP1k_Wh{|0D{`ht{gPbpD{5b!&66%X1IsgOh&;PYHK!5yjc}Fw}DT#}+@Wgw8&*&>Y
z)&{1B-hYd?Gd}wuch(E1i~xs#Y)3OObdd=_OnXRK^0)8uPoGVL0nhLpd^Ut~?WUr@
z+6bph6BJHUOjftY;~S>&T|l_3Y8?~(ngvs3^AZlxK~jc6n3ef<<WI_Mn6F$4G1NFJ
zr}3)Pe5cL)Rd{!h>178$&7r8Ny6bfG^rPp~UQId0F8`itiQBs|McB%4V2{N}u#CF}
z`><bECts0;y0%hfrk}>Qr!UN&QFu*%nGkOE<{X{Tw-H6L0Jjkk@~r`(2NSx^&I!^#
zoHBIX9n+=%%oL+9+CW*S!Izb?Wy6AU3dOnSbgb!7x=uAMrTAovY=5@TVitMQF3%vH
zcd@I8kCLLqC@(R|f=WxyEt8z-D+`QMnQq2Kp3oSywkJQ=*Eu~>;QCDK>Mgf`aF>@I
z9m}s(h&Wz9eaB&d^xPvyx~);<=6xqzMCsJx(XH;{Uth7?M`^$H=TBwhk9D{sR?07Z
zMY(7x8UJbzYQlk<+x)0+E{|@_#n|3KpoAhk)+@(l`t2JU8<O`lb0W1Bq>hP<t=W5^
z94N}L@)o;1S9&YwG~b^@u2Dw{JZ{hAp1*y|s?)N8iVHA-R7`VD_1#i~tQSJ3Vb!|j
z@Q&43@ETW&#D`~VH^Sd^3i-Ofywh{XT<HCB(tOkz0~$T8^RZ#VGw<TOMepI@-Y7k>
zqunmiu+^5Xyq4Krm)#*F)2Qw28qfAcmJWHpZg<n(44?XNpAhL;Ync{nc0Z{>o)|jm
z+i`i<CvSlDtxdZSQHTp|;w}tshzp9h+~BjFYd0-s4@YmZQS)BC@G4H_0qupDv1Z-&
z0=Qe&OSLC$N|;RK>8eK>(DKaKecc9BosMhB+INT<#5vd*9^GUcIDdgs=!MQh$TdJ2
zFlqfV(4x>;gHp?C7I;l7bH7?_wD<<is3x;YUJS*2d;rsxXGfO0tDk_K!__XCIQ)B^
zKrHURsXUP*7Qhx+ZCG&vnGzi}BTgN-9Q;`Sy$%0oKcf#c)U&-CbT!MF^zAre<GmH|
zBSzZ8&Tf{rg72=>>??CGz-FYB5MQfJA(PVLOt!25TW~X~0S{;+9Smy@-qU;>C0Ad|
zAM$M-cO_e{mI(H9{C`am06vh75eDHdU62f+w}LQW1fDd7cgNnkBPVzDrN|bwaAznf
z&;9~^)m2~nYczWX#JkHi@wiK{6_(xkby6hOEdD+U9P$4CwC~w%RbF%E4x@%db31At
zN(GjuqzCT(BcDOYGrbqf$9$Ny!)l-g7qx+~qr(X4#^<{*LJaXm-g!Xbx#f1bh>Ls#
z_O^LhU!b-(NZDcG)z^Bbp@Gj<nFxPa7Z+g&e0;a%CQuLj4#Q*|6sGCk-J2Oz!5k4l
zT1a-nD;NX81cpn{z$quK$l=cs1aJTh`-8U)2*tvb@TsuxDgIu7X#JO;L@1em*d1bb
z;1tAz+WgbT@t7c?cF!|<oc0fMU~X3n0$?wiK&%*0%Mn3t5~zWmfi>rUwtq*xhG!ry
zSl5;^DeO!9n(g2Sz*rClN@!yc3Sc$|Gz?Mz!aKj|3=G+a_&9=RfM0;EG=T_$@ff@1
zStmlIX*d{ZO$P|2XrS$};s1SP|NEUY(NO{uK$)@xJB~ywNmU_z;dZ4MrP?dZ!k)D<
z_1N?Mo9C;wFMS>*c|w{pBVR4d28aCPNRd4f+S35qP*yQoD7|(dUO4Y{dmP8OPAjHZ
z=jp;QKeuG><@u~gDy(lnP&#~4i^3^&ShVoT2H5;14e6DB#o#M$NOm+MzFAQl>DFC}
zy+W9by`G$5)TKA>Bd)TSq?7QZ*vWryY5}z|oOtmf)zuVWxI&5lvaRUIHD`<8H&m)V
zCBRX|CG7Dhu-K=L;P&hfZS3o6moaYWDOBx#T8?wp`Ks4_TqR>xix1YlpD@vXI3a&i
z#Z@A@Z-J3Se?_|E(Jw#vZ}(nL<P_MbYo5FIQ6acMPWgp=iqK?eK-T##S_z?!J|;Ko
zIjq;SHd|cA)KS&35mdly6+!MY#U+SRiVr*3k4>NCn7<`yY?-b;BZq|fI*3$gh~(AZ
z=uzkm&_RUyY(eBW_LP?1MKYnd$+f?L$9Sw56R5xOK2w!W<+vysi=2)+P!uf{Y%J(w
zU+Ul}B^{)G&Y&5}Y~R6}%InwL$74m&(O&4E)HB_7r7gs8A~)7!y{^74)o8hGeYc-i
zOJLy>&08kps8HJar|i+!U>|H}&ERk^*ieOa4F=veUE6Ys{k*R+ya9bh=w7<s+I0Eg
zr9_^z<$TVvC5hM9uXXn*s{}46aAcjFct$CDd9A=%w<<BQYtdboKQX8>^~auCF1OP(
z3qTS+nyIGTV87?2$T8gyuB;D%kIDi$Aj{J?dmc<;;##an*Xz@z*mm={DI0;l4poTx
zs6D2&ys%@2+H%Vm33GNm{UH5FNYsgjf8%-FkA&(NUOVu0Q4FXz>V{g+jiN9067QzA
zB#eiTfX<brCdCE2YDn{23s_9cts)Eqpz|7{AKfM8dY0L2=3e6#&dkE7o{IeRDtl>x
zbSQ>w>BbKo9~%)2d<8_l)Rws>ZrI-#$!u<LL#W|u?~l2|AHr+0JX%9551jJ-vl`c|
z?SSa_{Np`E)yJs>?k_miyct)i+wboP2BVpmna6I$z78XMa+8%^lRfJ?YVVWH^{Ez~
zHCl_EJLB(zXJ+TkT$UfVOd71RYVtWndH1DbTR)IO%Ctynz*o;ok|uouAuXAwcwId=
zhg_v_BTOhzT3ps_nW#0(sh>`+jmh=WxIxd>W_sWB+pUxV_DJGq-K3z9Ha>razSQY@
z8#ctzv@T5It9_jIa~{QnRzV{NGLl<-h3mFb{x<B55HJ=jeyGqQJ=~y4C0y*neQ)!<
zZL(w`>z+-aa=LpEN@GnRbi<Uv+!e}EsWrAx|5gGz&3^93Pa^Zm)u&$)cJvLY&K1-m
ze%xK2$_E5A5L46g`GA@EnvSa8*b1UCE6B{F`mknai?7i}dQ|iEB_FHUPi41Hd0TDT
z#4$Q+a611mJz@+0!k#%97AQ-76g#c*f|k3Fc=uH4<5J12`(gL3Aq~@yLz9stA?Osl
z*sk7p$p$bm#@?8%Q54W)6OH(V&i&oq^gsLh|DbMmVx-XC(}JxzL?3LnS$DGhQEN?F
zHw$((lUZ~A@%_rdO74{x>KghPPJ7#_^~~9nV#m{>YmJJ2`N}C@kA$fjK4FM5Bknmw
zkcG|8%W9)PBR@y4gJQC)`@$5}h#SXiIlc6woTs#S9<|*Q66L>_$VWnKqE3@Xa*j`S
zpNkrw46rf4YY#ECKE*p(!YDH0sezM4wG;A62HtKR#GB^R=gy3fn1h(Fk>AQRvT*ul
zgbRepHd0JOHEWG+<VCqxvM=`Pg!jL&$I3q$IShMNPH#Qv8&lHE%;blbiPB@;;*!Vb
zNPfW=t`)RC^>O}ueX!|zhQ*L>l-l>pvRXGeRY|S<_EOe8Ng*c4R>0S=eDx+{w=KOa
zcAnb1D)@?kyqWojEbaMp`tv;;QXExcR5YdqxS;Bl?DH;5WJurem2y6UVvVS1Agk`=
z?Ez2L7^(9l!7~Hx+b&8kerOP=7R8=H`<6(Jo(!{18o#(H0;}1Nqx;Z7`CZLx8L1b@
zE(~S&|8~aiKlW|2H(+o`<UJjCOkd?S;#BISm@Mh1Q?1pjeY=yZwxP<htth7uh}N2D
zkn)-3+wd>^6ERI9Bi^6J?$+&xc72Vc{B$hDe|{gz0O3KxJA!~Dw=0W2@tdW)wtnNJ
zpj$Dsh8&B-y*DC7B)D8sj{7raxK3nS@tP7T8idb12K4C{SS@d(e47g`Qm)WP^=#Cz
zoM$sFjx*Q0cl5$ECa5{*JL1AJTCi^H>tP}CIZ`8CbkDtjuJZXu?b-;3C(m$|4j!vv
z8fCa^7^#(!&;Y&28f#*w<{Fe^d0^Rc{tkb~zT_l>0}*e$@GGJ#lx<q@tPT7N0+r&r
zp0##&0!DRD6kv9rE7?8-8WVqXgfL@oQrt_LK1JNqf1<2{>dyEE#Drvn7*|CTnrT{C
z!5y-ah1vYwV%I#dG4^WAg>E~Lgy?oWMmVL#0=!%6SL`C3>gDfcFedT%&eMW^`>0hr
zog$;6^JOYTcJ=o{j{AB*!O(M<+Lb_XIFviilJi36+SnSa_hIU)au1<EmYvN+cE$bv
z4I&b93(OEQyc5BJBL(XAy@^(RdCLPdiR@a;SG>rA?{rBwkUon+sde@O1})d#L+&8I
zl_S?pCxmJ)=T{y<yPmnj?IN?kCa~##WltkoDef>#NU)`caF2Q;X1&nqA!sjTZ_(LQ
zD2iIA2}R9$TgKgB3eEhm@qpLpXf6Fw8nwv2K_lI12wo1QiuS_5P<q`Np0Ni76A<uP
ztO!_;)AmlY8};~wm9;DUDt97S?L|Xk-q{J3;P~<BZR|MER7Hz*fp)Qfc#mn7Y|GhF
zW1s-xJ+k>nEEjq%itTW?>Qa>uKkDUGC8ypjky<1y1fsUKbt?-@XSQfF#G(C7S^UE_
zrgP^6qF#pf3-L`Mu5shv*kO8*T&L@Un9hTHsPOn>3{JIV-lB#c1KrnL*BTpJ>yctt
zFK3A4(z>iFh<=@DPPq)CBar(Gt@a|;moGXWJog7=ir{e{iszT_Nr=CS0|ltk-)Ca~
z<la%waJ6x+BzGVeAEnG>+4nqW#tkZ5i44W|kNZEGwrmVh6TRk<Dn;gCZTHOb85K$9
zX3Q%Q!E4iCr{zcnoHa&r6%;YeOqD-eZZ10Dez{4FBcb}vniMw;pV+HH{hF9-G*W2!
z{x34L?43=V;bAQbPF&gfTTe9H-90bPC8#lks_B0xDi28guh$&?5sn7n*(7reP{euw
zuou2ewPnHIz}!JUWV0i|=<_f<txI{wn>2aPS3q9$u<qlu_@j_pdc;&hQ8LE&FJykZ
z#`}UY9r4ZJ=5nyI>yO0gP8%?a1;==>S|5j#r^&jB@g%R0I1W!{N$w~d66CSYW7hb%
zqIN_@QsO>99^|$POn1+b^}T=@F~>;xHk2Di*OTw!H|Fn!web70+30MSo|iafz2N~#
zEZR<Y<hk`oZ`77WSrW&Ip<JbQZsXQt&MsJ(A8@lPxaH~~=W@Ob=1^ZBdzT~J=$!9+
z%EU_ytnpcj*Vbae=mdOG3;Q}JsT>zQ=IQFgGL5U0p6I^Fi(*d(8Xm^@Mp?dmvq8qe
z5%nF)s#!^xUuGB2C+BY*ZMiA-ptvjV$;WG?<HQjltFy~*(ae}D;7+>WQJbBBC*wc|
z?vKxfe#DLmd*w6k)wL2|-$Xp1?)Yk+uE(9UjMK%xfv}?_`+S!f)~0c{fIblhRcA5B
z7Ucd1e73;k$5c^{&7q)~B^8Icl?NU_Oi6bsqvl<je1VyUTmX?um=aC<i1AIDuPLf{
z_{LTLeWG;xUQoqQ!-Lg`?>f%I{25NTOCIcQlvh>Da5v38>Gg&bUvC@mdvq0iH9Ajs
zUs|3qJ70`I=Wj8xPWF?Cz5DbT2B1P&lb<#Py_*voQa%vBPtW7crcf30qAk%1n77ME
zdbK^{JO%nuoE0(N0{Xo8H*;)r>w-$n^#@)@9=%tk)OsOFSaM63X&qF!<>VtkEWD4v
z7-U1Mm2d2_uF`KH>y>tRO2Mv8->Any@(k?sy{ru844x~zhs<F@&gm8QtCRF`<)Fvk
zR$VTbAR9tFrEUn-=~D_CUy}m{L-iU<3|yJp(ssw@!gQ@>%;nILaL><Kp6fSgBrb`T
z?%y|#AgUDd)jbE?FTy~ifQb$}@(w^qA#l3DW-T0+&<zjyv=tBggXL1S_{GfoNkN_R
zA}@PrBYyM|sP^7FdqMk@8De+2dq2uLw2EFXwTj=PE`kNLlWYzV6ASUEkiGy?`QYa}
zyMQ!}UWly~f=I(F$jM{8pSW+{!R&)UU()9Zn}*IfBBm3TPRY<LHjF6pNDJez@~t6=
zPyGlN;=QI7KPQy9dMaH)delGjLxYv;WK*7Uw;UO+icxYw%3CUA>UEj0NInOIh*_;~
zJsXoLmGNrlwfA-sV0)sWt490s@U(A)$YuFs+y|CDy_I3l`;)b)6YoE^RZitw>+Shk
zyebh2?kJ#^xfbK0lW6DczzruoZfjE=gV}+?U;C%2p;)2;|IBR8p4C~hIp54RC8KR-
z=CLAf&B_|L#;|8{*StyDRhV`GkAZ8eASQ@0a0Sv@SoTigILSX<hCChlIIBDrli2*8
zJw4shbE$*1Uq&jM&lNbyVlX#YY@1xR^^^oej<QobWwb^ilhG%&lyeuQA#?i7y2(SN
z?7Z6Lhf<A=gUx=Rk2D$ZmnYx|+v)<iY+DIHW`w)jhnv*aA3~RzOQ{+by;a*hnO-t7
z^StH8b!8lTUM37d$x%-69#FyWb5pG=*$l2yeN6c2a3AJMxh-ZA>SW>X8^ZIM@7Le$
zpV!F8Nn!K>smu(uZ%M1d6>PQIjgkiy2ObVBEiKK-<j)JU9G(1lRCeNT4dkp<ZOQ1X
zY5u~wNdEfx*HwuiEWp6G6pThCq8{CPD%l+rAop5$Y2dPDvcaOJ!LG`8Y7QUS(|o87
z;nQ;UK`24uJ3z$sQCJM@=3<+m>lP0f42-2T%IK&ln@|CqQ5s@AqBM{APIQ)~h+h-{
zbl_QlP^b|_kOQp5*k3|NS2;Zm>Z;yJkU1VYd*n`}*x3tPH7D>fDMVNueAala4R|`b
zf~r&EcOCqSQc(3c0T<+1V9`Qzrr=`FXj`1@!jeiA6*V6ynX%b<>+F|T1O<d2|3;Zb
zyTI866s~wBC%maC>p7QdE0RCUg`@hKRBs@0=>1DX%BC2h96(wud(V9BU@pAVWP~NG
zMqATcfx2gzBcL~*!=0%INy(nb%4+$cm-Ru+Mb`>GJ#^XlV;LCmTk9s|w*#W&%93@#
z=j>QD*COflFZrH-PoyLoh~BOGIDsPt;82~3^7ZDLX4G={S_a#FEsm-I{;2QY+rv!A
zg3onuTs~u5m+Qzrhx0<l*wCZA>r<m$o>P2q2rL{y2M&o2L4J632(pS6Io>xndl*T2
zgD<TKpQ$KW>JL&yf+KOa;QnDsl;-u*58Ec-gmK7e>Sr4%{&KEmVFr!Vq7q8dOBL6;
zz@7?j?tH6#T|uhak>WyMaGvj5qwjoB{%kmGeAwPjM0B;Gk2H?l$$wPb6F|jtxhN`8
z$&UirVU$)2{z05x4BL;^3>7L4Sw3InW2}@q&vyg#99oAdN5*%;*kp9%R8TJ)u!1SY
z7TZ-p8MheOqi^4P7-=%d+4qE1sER28m9`=W)Gy-#pl{vRg)7_-j&Bb40-{8xv@S3^
zQKqE_>0U3n@3UUK)qaDfWKfXro8Fwrlmgux(4-Oo7)En}7+g&+=eM3F4}<1pigEx~
zJr$8o*&|lc5xAOYD{@40%;P7K2Q22SbuyXJ*5ynu++=Qb-6WM*wwj8}Lw)1IC5k!q
zU7BOKEyfso-5<1S8|8@$<jODehxcTpG}NS-p~w&B57Zi-5e%+(8|j9efB(=ZV6*uP
zf!W^)IB*7>4$4$7Fn$ltY&mObQ`&I#aiu{5>$tPX6WxP1n_qyK+42`K%K%7=H?N0=
zrtJe?F^FW_e>ybLf5pg@Z$eZ~&p=N|_B$8^b_Ab?c%_1w7cueo$31u5FIvDenWkcT
zS&r5dH}#sd$_sR6mQ%Wf*#?NF%U!^fI8ZKNBq`Zb>XW78+2iXwC08mMUVnA2$cTz-
ziHgW$;3j%gnYtJX94GDmhitX67Rxm}b?3Bnx^34Sw4UEWooXbtQd)sOu+6<>#fdZJ
zb{c67KC$iU62u8GH&H-QTaWJ@yCo*g>v1;FThLW$emMyB<4j1o54dvOB>hU1C5nFA
z07${CXA?n(0^VqP!$C@4(WMG`j_M!B?2dX`i<2)3rK&V~v#-TmtGICP#FHKR8BC3H
zN8X1Jx!v@(J=eq3a#f>HlP`nVop&%MUSz0oNWwbrf>o-cK%}hSnozrBID!#!CCA^?
zHg10g<Mwc@mYO}ScH*%)-=UxCQ!ZAI_^&?PrBCke8}kq!Qidyo76=2d8lWYAn8mmO
zr58%LqINy{QoskffVLDgnOnrz-1i~?>^5;&q<{GG=;5p%Xe{FgwjEX-9|G8G?!Y4C
zv#KimM|!TC3_H0M`-j|j8*qQ06mDi`9CdD}f>gSjb)Em~yZSFO*RTGbBWc<jaDdA`
zeFfXi+yR|=SHr7C*OmMTde}}dTK6O#FK_-uCMX@C768o3zi<-Dz>EO=Sr6O}rt<*r
z4dU{_E)*CS9RSA%0ORHTW#<c}!0h;%`7MC10ww+LB(<eNe_n9i`O>dOl+=W_tuq7C
zPm0t49GePkU^PN`<yKaq*N!;6;(|E&1Z=L}G&s5M$aLHDB-V8M)$-2e7A|fAO%P1o
zZ$(*94bX?^=PLTQ#3)RML3MQ9<xZ<|yIbpYY#5L4v)d?D+S+%6%^5d6Hsfe_W~4={
zuPB|Pr2KGZ(~`TM)<ta_12l&#>LUlY^*wy%ug9^OIXr5NeN}gOwL`$bUj4yM?u+LX
zr)FH#jH0uqO?h)XB<8a-KqQJmSV0Wm%D3267!yu@;$+yB@cHJ++h#b;z;l7&jeZN;
z<t8b6RX%=u4)Tp>-C5b_J4i3wEesgdyrMe4WPwq38;UrltLqX-E_N@XiV^EsKcePS
zkC|`jXrK?XZJo!7;)@}sNR1A8+s=_WaJ!9AJ&Gl4&01MWy|;ULc3bLB%me4m*t(Xi
zyX249PNl1_k+w+AgQ%2wx3d{>*W@BkB&l4Xbg>l{3YPBH1`5S{F;53LJ7U>dDzSsf
z{3#CH87C;8LMEZ1=GFsfb8c#~VI2a9Wp)iwt~U^l)NfjXu2i>S?s^`#T4@sOUw0H-
zNzu|zWWFKA+QI$i^LQ_18gz)IDLw?@I1+`Zt!O6GTJvNdD))a0a=T&XuDD#?Rk^Z@
zRY@-DCue)6!p7x*)%2%ZgS{k3K-4gS$gZwaQ!WtxJ@44Pdf%ry0&SW89xcs{LWi$E
z{%B0uPze6m|BRhXXxloi3w*%>cO`!1=tI(NYY>*c{d^3%cX@aJ3B{LBc}cy}0Y;CU
z&J_xv=V0P<*q=n)1#f+Kp?9G}zs9W-5C-9tv%8mBa6Ba#z}!c?si}3cRy3Zjt>7b-
zkf2PIqq?nF34aU=>GPY|(s);V47|n-Jg2dinu#3Ip74f+XW(Ni3LRXY+oPpAe5m;i
z@}4#kQEb_=YyP+%wqQh0*#U}bQ^v=&D9N>t#1UwwPG}ib8<W3h@r;%YpJeuA4tLBY
zKE}rIKO3ZGBl3yqZN86~2azjZ*Foj`l&jBo1Z;-Z1Szdpik5rKSX|E$Fp-v#a)|36
zeA&#;+4X@Y*UDWzMLpNQt$sX63{iSM251#1IRq<#9X*4UF9IT;8W6v!Hl91Ot$bZv
zS0^&&H5zXTL@zGd=)Fz1dt4S1d7;O|;Og+ZO{N`0U44jq8~rRq9-rIBVNn2#eCiXk
zwT7Ci7aAUCj=2)E(?jvD5}S|0lP?HC+V*O{Kzp;;QOuzzKXh!%)NymUGOc0H_=t-@
zHywl3Z4dalM8PMdAIL5aCG^8<A%QD<l4~r%KFg$Tnu+Fab%wJ;t$v}5JojhFb&X!P
zhml=8$GtLT_Rcbn=lTJYXVYAYZps!=x$RIE!8hYfC8MAW{u-;2As%wBAXc9s`OeW#
z9+&3W_|o(2I#f8in)wu{Nu&~Q05_=X0`*{0g#7`{;y@n8spl-VE;ESZPPn{v$h<0t
zjZ*=~JG)f10aADhp|1%X2jzyif02=ls7q7_JMIZ$Rga`Us!0?oeVQ5w=Vpj@X0jvY
zetxUte!J;3sLrVa&IwTiBA!<SZU}#@AZoKK$XHhAI*Yw=L(E6E9QMHqZ4LI-Hxx$p
z8<oA$sj+4To^H)QTC+E_Th^}tQ=n6he;?Wy!?K15^8?ZFBE!-P%!6Y0$|}Tn*I2Gg
zGyRa@P2+l)V0+<{r8Gw*HN|7xB$gegKHU<CW%rN2i@MVrq}`FQ-NfVf<gn(dMdZVU
zXV2&b4wFymPj3psE)o{9u1w;jz~RbV5D+RDr`Zy^8Z~&J<4{-C=$Wi7^UA|MrV_%M
ze3jLURUtedLW!X(fso@<YqQCAt_3xBTjW<17HzBZeUqG*ct88K_jXuNQN*jvo;M5^
z#@;_#zhUsSyxDcRR9t4$-&gr5kXt>$(PC`jI5xdV8{seICwSj)+lRe3W%(@|_T23r
zDVF-@k@H@#p6zFU(@gT@sikdoT9_D=k+2M6fwKA=@K+(fqQgB)dR!>?S7TGk&r7CK
zP;~hS-{ZQ3T?v@RG1hlMd6DqlX}f$(wszB)B&-I$aVW_h_}uqe%=H11x(gRBh_bX3
z*in~2NPT#OjWDoQym|vN=As(p%Z=K^2XjxQ%DNv*hI%G*AcJV#Evi#2wpzu063N`*
zPJS?^Yz7dQU|~>M{6`3^0s<L5!0CMOXLO9s*}8uAI3uM*izB-w!+HD}O^e5qbvgvu
z#W&@29Doje*axf%<O-mYOx+m(@Gvxe8p?3Dsx=zNbDGCJxn%s(9%#<=emH7oTcF;j
z?rZX2*TY0&#i3(B$4hTE6-Lhx1qY&;h%fV*zMN7zJ$os~0x1^4OkpP*(rK(e7Uc@r
zTT9zPwTU<*z!h^9s3yyzw71eExhHCa)Q2C7i_zOTsy5EPs`z~A(n4&_zV*iAasiV8
zUHX>ifm?O;wvz+;i*n^ykoBk<(^293f66TSD{tPq9vi-6IKljr=n3960J~d{xp=H7
zriWq$$Vm$i{=F^#XFpTZQF56WUwUz~Re^Aqw&N#J5kS6BDsA-u9H{^er;2Mp$7=s?
zCRNZM#R<+OcyMp=0MLQ~MQqCI>Ynfk*Ds9#Js6{sf;n?aFjW=M&x!wf`UQNz6C(`9
zUE-QwNvWOgfCZmw1)`Ww6ib^Ms(76TxL1h#-RdYebniR}dze|GLiaDM4CzCY<3r#r
zOwZl1w{eg#Z8*D4Rr7caHpfPr`K_2oMYMhG{Ko6@Y|)#ZOFxN9<Iz<3U=Ik7mpk?r
z^oz+z;JWrS)-kit#!;i{IUu-DyLd3*Rwwj?Hi=&0?I~g#kc!)x?s!ms@M%s8X!L3|
z2>xB#{I~S^&(Cx(5$+>1@m<+9r{V0~Hk@YO*%2A{_|0t+>J<8?`7IVN-ZcD4B(|~9
ztLMWHiUaD2;Q8uq1VzhT#K-JCvvC)#{ChHZIfID{vof<R=TrUS7Trg<?#=QL6D3iZ
z#G}z|6!?1ZBk%?2_!5UFy}!@HF(|Lg)dYtC^_c@cs{}W1m9KiZ>ZBw#rHR*&T}QgN
z%V8UXa$udmLgE0NZi=SBHz=whbGr-C_wXeSZ?)cPr$3f?i`V!X$Y%Q0lD(1jCs8q2
za`r2KN${nrm=AhOB`BH<;1v8tnfTuT(Q^St9JIP{!9~8j$$ECNcT|U_gpFT=Riqtx
zizy%qBdn{Rm~5S)G=4PSmt6^c3oFuwAx~}P?HLlGeg3i1f`R;le(szhq9AlheK7dJ
zkfkN&K$+V=ey?uywN81Aqm-b6%;#s6MMjjPq0mTu+ls!vI1w}3lAxz^QwXu=<pfb1
zf*&FsG3DfDH9UG8B-wRlT@jgM(dY2cfi;Tt5xYKp01-HSTTdusEGG&Vm#?L(>*eQZ
zvb6UGphVp#Hn-SMy3m@l?3~7+sBa4p{!j{hMG1Ek%knO&2VS1c$?)}48C`pd6PvEp
zk-UqXj;S4TSG_YTSewc!?hO>m2Ci0j-4XQ5c-SROre2)V4-Wi_9uzJYF-blZysG^G
z05N(0Gb8BFBkKPnA)Y@AaF_0u{8P1^-IBkJi@z#SU(|AU7aO(GW>Bp3I9w<admF~?
zvMnvYUakl9k&*dMjp~SZ>+@<WFH3d47^!*a$5lvL5MXrtZ`fr0P2T#i&s_BXp-Q(G
z_&-qk{-PXSH2OPQs|a<pn_Iud(msEdQ16BhO(7*8H)^Ud=LOp({jSTZ=g+OjkQ?j2
zc>jcm^gR<t>eSdUQ+E~L5XFJ`U@%ZNugl8itB{}gll$P`S>*rdqDJ`-j$>_N_!#gv
zpigPm2n8n0PtSSNh5}#s@1x~?*xcOwC#<3tOU5^inGb#nK6Y_=r$QN7QAGAt8Zw^0
z<YiTiQ^Hh9JIJ`+kN)FfUpo6c5&mz4_<#ER52oX`j(^T<-#cTae@~M)g9-tv2cCdP
zC|nD;`H)(sgs;JVFrV0-9Rma~6`3!bCNF{PL0pc|3Y7D|kh_`a{^|YWFIU+2v-8D&
zaf9<H-}ti-)PS`cv+?Jq&8+~b#|?vV*iJgIZLCj9>B1+GU)d{sk_s^;>#ukVaMdHk
z9JE930lM?>*Ghnd)qnDvro)`+B5k>F+*55J3_EfZX9p{uZ<D<O=c%u*dY5=ni!nv|
zNq(Gttj5q0!xQC<oSJ-DHtW(P;YD)=kSt;FClP4@uw9SLaDZnWroNknu4HH0;Z>}}
zhbP27bTP>>w7FI8pOt^W^8vC#t<}b+_zd8I45a`6Lt^XCSNfmdQ`h|^xCma}2bJsD
z2BiXeWnAH*kpw<qE^dzb^XmU^ya$9K>1^-%@NB7Ye9rtyBtWuWkJIM+OK0cFmeqyK
zzHat-EQL@#uFmzC^1zUHR39~Llr(_?${3~aEu*YF2O)lrC=WEc@Wv)C%5i)T2s0-C
zYr_oq5D+!c;Vx11t&qP0st!;FJ+55THT@X=L~e$@Tn*tL*3+-2#V$OG2xj3~uygGv
z1_mbi1w4XrMJG9%f<R~0=Z9i9igzFW<p0`v^e)AC{cDdrN~sUnkPBVOv>UKWy<JAH
zRqD5%bJXbIy1?bjHi&tdJ^3SPrRaV?e8mA&+qz2O#p5@T3(Ej?!BldW2TQq(aU_7|
zgwT;;IvRbKW5UilJ2C0@9}^8*78lb7+<Jc>X4sVe%VEafe(QfN+(-pKl$@{c&Zccq
z&;A&`XLIBtH+K(U(#a~R>?KaAb>^q+)SHcvP#vm}k=$i+z`uw8OVNx1TgbQ5)kf*q
zk#P%AWC)rD-*R#gzQnCJYltq+u$Dg6dbic&SPb<Ya1(QSP}S-Se!|7`<{fe9BSSaI
zeY}5|&M#_maBhNJ#Zrm@Q>KOM)2FGQiUtb3^_}mYmh5+ks`UW2Lz}Qit5~Rz#<Z*7
z6)kAOiGgc!#)ke$Wc-?N!`-fih<Bs-=x~B<Ps+)_sSK#+q7bgq4ignmU>Jh^IE8&O
zRT5~^QA2yW+RijUmm`9nIRT0-R1OFn7V!;0y<UA>CIl@XF@3_nor|vy)CcyphY6n+
z^j=1$S^}bN&4fqs7;W*NM4O90iKMJXW?PG}a<!oU-q-(M&zFc9PLC95XQco|R2c3q
zO)B1h@N0nm8WchcyLMB#NqP6c^rm(#rUVAjH}%o^9+ri7VZV(BMSE<<cR#Fg=!>iq
z8c_=>4YUNPeoYS;xWcZ{^%^a<n*6|RO~>^Q0*ci>upeYVzVa6f7MqK|w_t(msHuyH
zehA&)_cr>4<NeVycDneFzAFv8_u`tOgbRLC88+<&WGQfIBeT_nUTf2zME9nR(Z&!u
zti79!5b~ghZuZn1=@kC9Ct$cf;^dlVfxv``&NEA`1{0F!2;a9xsY5=!@(C)Vr#*nJ
zCr(mtu9*;9u?Y|&TLDJ5O?Isxp51zcz!acEY`5Da3Z(77Uj{<9f?W^Jt~Y1nM2<5^
z`ewaNd$a$r47?fuom@V4_XA^$w`9oUKYoYD{&5+s^B7->_&&D}Lza#?Q-d5vZ7=_E
z*-xU-EvzYQv=->GST)W2!mK=$(<bw$r2x}|dK_H=NM^V-0<9Qut@mO<X=)IcKQGQD
zuoUiD15y^#Uceq~l2pvF5>c4(KP}bx<C%C-g|_~`_4J(R%|#P*y4F1L(M{Qeq*m``
zQp83S4h-1o1l|SgbToI?Jm)Ca<@RKYakMCOW(9O4?Xq&FSB`Dp(|{$xaUteW8Jf$6
zmtYFJ9D6o;w=9>bpt8^jX4~^EY)<)v&!?AT-(8l-<X}<lxomaYKXLAA;w#T|zarY2
z!)qbK#O`!o2LF55{r6S*>stMXtI`&I^(RpVAeZ1^*!`0T>uKvUz2UT5kMcxE0`tKt
z01<b}B1M8=d&?J3u%*LoyyN9<J<lZ*zRhOK;a#Fhr3?FK0oRn0Kh8*iGxLAXWB<2K
z^Z&$d%k~fVyyucH6&+C5dTr}H!}r?+A<9XsCzFU5vi9}$NrrV3!`)5>)q*PM1wlGi
z4?PQFP+raNebE1i1uy&=EG~GlT|@VS7{GS-=m;-FEk1hx{#~`>OU-3({1um9X*IWE
z*B@cv4cs4L;&PjB5C5W12rbefRF!tD(XI4NA3H%w*X)5H<3Dow{!@YGUu`b@U*E-e
z-}(J9V0+-QfKzGUBfL!bO~~j+Y4CUz5Cf2`gO6$Ae-dfP`t4=mx9u%N5PUe-+;0^(
z)|HnxGfIU55|S-LGVbAB#%Qu!m7)o^-qZb@`mf7x;Xs|?x7I~EU3|9pPJAfnyaX=-
zlLBL&b+FQAj=hu{$<Gni_`vIuMkuQvyKmVG;KwB=1^}iPGZ3Q6>{ZE5^Ysa+psUoY
z#5*Kr$If3tqD_aFzrLgpj^Md<VV&JXj}`w4`~U!WX_mXNcL<|b2Z}K3`mdMG%xoFO
zQVs6<y>0&ZL)^c6pZMb=Aus4D-bQYh2}`*h;9Ka9)q;+`9tvC=nuXQK49jC`k}QY$
zo(G?uLd`4E<2Ka4T|25jc*2KIf?JY@V7;!#+y;s?X7AzLn6Ie9PFo@ZXc3}<f~AW|
z^{1kzzN`;7HN9p?5L_d=Vd5y1Q_A%;Wak96yjO?>E+h6fc9+*U#YDa}Nt*2|#q*Y*
zx2vpnV<);5M;4#IN)fjIaYLk=f?rL2!ZL0x(E0h-!aX3keR~I$Inr+Yw(FYcU|prF
z@LW!Jp}(i3poZYp+eP&>d%;BH#%=Y;z8-22Gfp6U?ep7$MT>bWm$7Xsr+_xKc6!n4
zeb{z;IF0l<p0Hc+q&5!1vNCOwi7hcCXiTDw9n-lOW6HZNNEng0aH;H7Iv?${L^ILm
z@WbJtl=bH5z>zQlZ*xtGQ*e;r-g*M7w7d+BaY{HDeLr8hldkFH-HpO;04#cdF2WbJ
ziDFFVdzXkYoNgt~^|8EaPl2?Qyo^1AMR?tQ$uj0DU&>}Z({X(Vz+i<0plpg^lszM1
zNX+HIbH?Qb@y7KN_YPA#bSkBUnVt`DOt)tP<9W|`B|FrX*c(i-lf{AJ`rzX(yBp@N
za8Hi;lR!g4*sNTI=4U#9?~511=%Wm81EwY6DBJK|Sz7reg>-QXPThsM1<F|C$io58
z<Z#xT-wIw;Bz>FUJ(xJ63xm~gZgxuo2*yPyU0XC~EaGLSdoyD?*G9F-MnTWHn?h`!
zaQJx2k7_K?^EI#s2?9J1!Dmu##~C%bf>XKCjmXV-n9E3XDu~Nx<Yo4GD0{~`JD$0d
zR&DV_vy6TBkYC}=k5$93&r_aoB)wZFn)()kvO~AGOv~&j`K=h&WfzQi(fFB)u8FeI
zT)g&@|0Pr8qv+H*3f-%s-#qDY4oJ%O1B|dx9L^kd8Ldrw{h;f5ozwt_wC=q_YBpov
z?lP5mMvirk!08z<E%-6`1wc}SJak#HCE2;Qw8K&=+Q&Y-a6A8@DqrFuZ&7FQfmx@Y
z`%Rxi2B_2c$ttV{)+-thY(rJ67irRDu_L{8IIk|AWk^y#CukNa!8Af#J=Qwb29(Z%
z!(juZm?T15%>iCCuD%pN-$X1y7XEzQ4F2^U0hCb%uPl|T3+I+<?)se@?$+kHuPaTl
z>)2x3lg)XcD2u)8&$q(sj9Re7H0w3Bx*bWGzL@Xv5*%e7W0iYN;YZ^%*9OISVjtra
zkf&V;YJccyr;T{x`n|C-cL(R1XQ=8f6242G*HZYO1V$YO>czBiVMeh~oWN9zQh{rR
zmC0q@d=J!i9l}24&75u9SDjCHSJsu^$4w@emWnEOH-d6tY*1-{lcN_5p!V^CDPjgj
z^--eU3|3ZFnrx}UDd|R4#*RPIRnBYiF&+9KfUC}K2d(cv(gh;n1<EOp9LygDZyLt>
zuzg5W%q>Zc4Zrb7G5E>tn3Y;DL``c-S~p@Iyi(Y)=XdQp($dP!aGvGirD4`A;|p(u
zZ(-SN(nUVyz3{Tlm$evZef;|5@eeE-g3p2QU~gjru=-!h3&(tdn2qZvWtyuZcfSVB
zKT{!2iMndie$(K5F_|eJR)sLZj5v3)D7so7d`#8if}-oDPZv8bVjnV!r;T(>q712u
zP-ElKy-Op7Y}M1V4M!jil*pVNwJpIBXJq5cYK%-#mZi=fP$!jk2J({YceB$Yd)F*#
z5Y9j~7Gi?&L+z;H%aqwL{{B<rTH~<l6D1z&(79fq1-_l6alTHCa>&GiyGmXC5zj=L
zWX7ZBs%pwTajb?vr9qV^aP?4ZeEf1E+Td=2Rc(&@@!l<mOq*Y7;L0Ob>1q!ZE#g}O
zs@TGw15OAGS<&d^giKm}rz|(_SjV8}C0OoW_{M3ez3-zhdDxfohNquJlQEoFdmKAv
z6WYc;R#ET;q7wpe4_oz3*z&wV^6jhX0wP|A!TfX7mxwrmY%OkCyYRpbkDTt|;eetF
zr_-7T9O<Qlx#A`lBO_|u+FC_es<#zc>=uJ>UcFd%2ZgNA8y*jdhqqn8)UQiM?VV^T
zOJXial4?I5rIv};57Ecot@M<Nd>NthO1NVuOWw2}OoR9;hyXzHT{EDVJ!+u_O)?s7
zZuTs?160Mo3;*il!y)mFu!rT;=+rNHfJ6&l_&rQ~fog=%4b58N*H7J_1yGPi<b(b8
z>j#1nGSCiieO>d4lCR{f9l9{pUHa(WTdaciq%22X-Nz3zb#kBZmq4jkEccORzIzV%
z0{95sd>Z=l7le!Zc|A6a^bPNkh%@^e!CtST4I(L-r-j}>xDjP|3f+Oc5&gFQ#A*pA
zC#VlL_zv$_o)H4nB_Na)Jy<d5#Nlv(jY|^D$t9p5H#v36NP)TcM|MugVUK9g5>wn_
z8KT8_EnNmc_DL>x@fHuJ1myU<r@4|d>@Y;(mY=9NDHQf{;hV+0P`(^cL27fy7c0iQ
zQat!e$;{+5VNWArPlXlih|UE*Zrupf-w3=<LHC!^qUUq23E+=71SnZ55ARtnY1DFQ
z)Dr#gym9fCd&pQFF5dOoJoY}h@Pl$qyF&gOrK5dOWXb}i4;~mYx!xsn0s!_yDxhFq
z!2tU%SkuA-kQWPL0jkdv*!L>)CIl|n6Sngz`;4kw@*J$cI2Shk4s<4_FCG;+3Q#Q4
zZ`oL`c>Z8s1-7pTo!#7-C>?^(0=rlMpG>U-0i2SRub_st+9gB&UxQ3^-FEI8L3?v|
z_9u~@MCnhW0=E*ZNSiD^7rfC19nMBeZzIken*eZ2<oDp#zk}lazAx!-uPI+aCu|Qr
zK#Z_m>j?l1%iJlzaQ;$30FDwLJ!eWt)UX=aQv-bCR#9?<o<)G@skPMP`EvPC4GQ{6
zG}D?7+v4}8`>-wZ`&{66VzdD6^Wr~xDE}Eu^g%i8KKw}}J53lYj0A|&OKs!t9|I?a
z3>dEeX$i=`t^0qvaL{v5T-!~E9464)doR%NNOC&0uFg=Vlt556QvTF_+AG>VB>m3h
zddY`Z`B#Q&3_X!b_VB8mWYY+Y96AhRx$cHJE(aU9Da^*rthT69z0Mk3_#jldm^5Qr
zZ~U!M;u@*p%=(qP@0zt_Pi>#Lly?FF@74byDfW+G@ZW~czxqr*|C=E3EzeWBsX}<$
z5bPzLY9~<Kpa+0s>)wBLZ~vL~$!sRLvXX1EEdUYNUn({*uX%#Nuk7ht1r9IzV%#4S
z0z|A2y3s$0Qip9`EDsi_<x~^nMXJxTMApxYO?Rdbi`leZQt~EA?h#;@1B*vEVIq#x
zXdr=_{;x@(;6n(Eu-@LD16KZfBIGK@+J|<td)VYEXPAIB*OKv?3roti2JS9KZ&1tA
zdH@#GBb=uMvM17Ugwvw@`l?1}bOf9rVB1q?ZpzS;t0ugrMaffP#?eE+s?OQFSSmEK
zT<N*XUNp0P6*>y&yFwV2F(m{;Six5P#*i4pAY`O&=|{Twv+n&n9(G^_!V$?eG%Y^F
zhn-WMZVrr&4+h{QCfh;J<HhT^bcbMby>#^a@-=}9v&{b3PfnY#r3V^VQJXqya`ZQM
zR^Vr~sk-%RKC&@Hv8=;7M_$ckh2}x-pvW$B_C<X4FZ+aj<uug0nW%DJjaBS@OQ;d!
zG*QC#WS{@R>S-`R5EEc~9&@S@VmVge@VY~OL$q+w`++JS&1*Kz8~jdWV<~&@uMH8v
zE+8^RHzfRi+FJLV)f{VWmYd~#n7XsIp|X2!X>PhS<q%^qfJ$#L&<)vEiT6cUfPvn(
zi?B|E@!A&H+*W>icQ$IJuCzwow(OCojKp%X9qDVefXr+h;||*V<5ceCc<0gG1kNEB
zar{K;{L6l^avopO0=B4xF4Ey3XNc7M!Z6zo^mTnR=RMac7kAfaGJTbPLwn~JB1DBm
z#4pW?w1(0XoC331ei2-KGP%al*2ulq$>WQIX%=z%UyUEWEk4&Tq$IjeZEdRE_PWdA
zIc?X=hO%d(ujQSHvTnj-5UWezj|e=qpDjkJ8L*v5*#Vt{IUIy!lgbW_&_m?L3dRpS
z_hQS21t!MBBANzbKUNd@32*3Y=?E}~vuBGyKiAj!g6r!02H;L{i&UYinjORn9lcr{
zDBIk+v(NE<((`l6w`Ab&c<+|=kqQxU9K+KT-LUk~&%k+tm=<HEdn`DIm&(iTjV}z=
zPt%9_uD{YSCMD%KU%Ns#Q;iP9IIPpk_1RqfQdl=7lj>KWa8Ti3?4%x(7|&yPShX!&
z$kwM#qQj*Ccu#0)bn9Um-vb!Z@iv|oE<N6M5yP&HDK3FHR#zuLjLXa=GM_$id=~q-
zP5(Tv(1V+yH%S$JzO`PjcxTbtn{a3v3$ue0VRm`~vZGJ*F8Sw7?HH#QB#P=7b$Pr3
zDgsX<K2LInxqSk=!>MNcS<vDW5T5)6t3`_~1DDqt;tK_C;UxRu46SqQZ%%J8ol#k_
zbR)vD02i4REP@2HJqq^@oQm9t74SE!k>J10C0%xPP+=^Se_Y#Fh#|Wp8?c!I95X7J
z0d7ZL)pQ?>3H+58K>^++F9s}DuYa(GILnKNhaD>^bTt{U?$$IryFV~_)br>inKLz)
z)_F>y@+W7AP7tXZg7H`~=5orrME~rr)Uml)T|pnswR8s!imSJ!M6bnE-WcH&e#fpA
zK&sIL<e;ADw9e&dAaet|Bu5PAU_ADY%MAy~v@gj$tlS8F)<4?P9-B)WN-0efw495<
zc3cKTjd)SMv2b&1=a0RK(cKMt{9@s*`;Sx%EWdU=*k5N`YZ6!09X|<VJ03Qf5}j8_
zxgvHgXW&vqKq(&yE9kc9I`H2xLesm|Fd*=7&88mqh~?8ne_37>XinX}9w{fX`I<hN
zX65M%7O^A`rQ5@g{SP|Jbzne(F&}7#nOzN!+w91uJ)Ruhg<ogF-W%_`s%*1*M0;e#
z*&d%XG_p+6Hf|fgd=9bdxQye@?yjp_x-y;<r6peIDPWz?-cqyy;TtS6PQR3Tezv1)
z?*JmOvV7j3?oN0YlzL3uTL-vE>uVw(#y{;f3nlfsuIzZzQ87{H`A?!A&)L%zfOtDu
z?-^;s5pLX26VpT9|D3xqmTIv^o0>0$w%_3HM%K-nd{k;K4g4d~;FX*uR*V{D*Xr6#
zXl8tOgQVEFPU6JwgwvwLH-jqqs`Jg6)+u$B#81pF<0&z|Sv>JTWE5tHdh)QMBF$d1
z3-M-cS0&4LbN%U&oc-yJ4Jq;zEr@@oe0gVUEr_vm?MOdZPkQl$DzC-Tmr}$o&cF^(
zu&5Zc*6e@awtaS`<LKs9ZOiPp|B!VA?EoPm*;Y1v3#iy1kgsg2sh*f*`t~uczd<MN
zBPqul1zjVrxRlRZV2T#v9Ej-@RCWt(ps_X#9ToGGpc#`^JXcwyPwyV})Dy%_+Sdp-
zEYxMz0ZZjUY5@h|yGY3{Nw%$}8>1~+YnIqyxzDo{ABx{bJ00ct>tb-Vi!V0didO+^
z18p^?r%{$9z%*`5^RK2pO0W(v*rVL3(_#=h2(BeN+6wrb21Y{@id@aLcoQ!4jKkTG
zx5yq*vA(`_PH$mkHy`Q`_#{_Og<`dx4g5zs61oZ?8sj|OjPLj@k~MGCH7b!3cN=6k
zpIUBo*vhd5|3=-2JWEr9auR;avfl4h0bUBViq$R|gmgKyZ(51#O)bd8Z>diLH(1P|
zetFewe{L8XX>%p%mesfTHQ=K1k2ajF(IK_0nmFO4Fb>zRt8I*ODa(IyH^1AtC&!WK
zbp<guhv_$sy#eeqH?9j)jA$2phW^Px)Z9_|yK!^)JMS(zk{j*)a`qfY-F)9Xo={do
zJHb^sgrT-e6N+l<)aWT?UQCBwnpxu!O+qKJg!sDM(CbeYT6z-r8ze8PPOAlOKPokP
zbLm{pqf1#jwsXRB!kmL#I$VEwNu8{ip_qZF(XYABBK%zDxh%{%*ZIA~{X^$NSHh?|
z;C=UDV{~H@pA!rC@2L*t4wPcm*wu)ig(qnwnWCevN0EG`6O0$EYVm~m<zgglCu_C<
zDzmuqw!bZL-2lhE!6&nnWTe68PT-%wvv|PJ8mJ|oD<fQS0dcs?q)u>c-*~)<BxJ1z
z)x=(0mhNbGw9!<3Idfd_+6r$B9D^HBuFGa8DEl+6qm!^Vpkswv6Z;SiyQe{UZW)iX
z7_4dn-`!rxdm3gGrJ~-=z02l4XnC-czE@VB-_nxup#J6M|A)Qz4r^*n_l7}4RJt^g
z5|j=C(xnDent+H3NR85qfOM2VC<4+u2q>X<2py$GdK2j#DWOS|1QY@(`(5riGvDml
z`^=d+@0s_z-tU_~FvRQ0%32TWS@(0__pfO2Bn0PPi>{Sq2qalc`p787B76c|CUaVa
zea0@|>YWLZALql38dG>J^1W_p+=p=+x!5(-G^8FJ7z8h#b>K<-Rm%R;r+Xx1iFzat
zC{ysje0>61Wv}+Hnnvk54*0?q%tbA2Zr;3kwR);W?<flET7kDj%cK<eDxeI8J3C!V
zRekvKiUtN;EMu(ILq0{?-&1uJ4{&dex6vX^rpyBL@qihApC{h)`m?nS3G)+)E}xqp
z`;SQc4AqlRU>YYJvZ~(HT*KDKKf`yUNBr%sRcziNBl%z!@JEDJ<MfFmFOeVTm%`<J
z!o%K44@co|I~9KEh1Q72c06n_&O2C-a}pFHQ(oWvmT{UptUvArq+qkhZe}OGU)JT@
z7QO={tLOHV5kJqusTHPDH6Dh5m6)17uAUR+NxyW-BifZm)Lne4!gyQWB@c;H{w53;
zV{0ign$Jp~^ZiUs>ZX|S{Pm-0hI!+TX<7GjdOlf&Dg)M=tULh36}YX0ultrvy6N@&
zNlN%!08m=t%3L~K;U@+=X?K7QgzT4oY*G-Tm@<KA(8qU7d=KCzvoprf;rPf5S;C9<
zUr2JhFU=IcV`9`dPG(wf1N8I~TtKHg!0tLPtM%@0F&~q?meyVq`f3tDX+8VQ?-E)>
zP1zr0zUSg<ivKqMGwKG{gJPBWeeF>2b_g|8dfIA~4`Z8GGGkW%NyN+1U~knRQQIp~
zOgd-LIzqd6@!W2u>LL8~G3YYP2F&Ek(klz6uZMtqjk>SwHAGYb6v0~^v<-2o8`FJv
z#Gjq-;$qYK$iGqa)QrFmSYM%iV(|tmpt*4%BvUAw!~BZv+l0rT02-lLtF%mwi6#m=
zcf9(04qM5bXUZDi^_ensOfSk|H!-HGcjWn_y~L=o4?7cQeXVrh_w0}P3Y=>E9~fwj
zsTu1xd}MQxk>jR4)PD*$1Wqe}5im>j1ByR<IGnb6+lw()e96^?sd1&2JnMm2nSM4g
zA<5|I;0p)v+2&RCJWOx<=~4thO%f3`=Qf{^3XE@-Z!%7qzq&w59^x+V1_4^h?Q(6L
zJ2F81uZ0zJ4sykbVQX64F`+PUnD)r}x-{kM<I)og7p({Q{^K`UZ;es|1|XEm*YS7J
zEV0=mZG0BK3@TCGF9+tk^G&;43YuJaQc*9?5u!fGXfTLA?|L_HY#!?IV=5AViq*&I
zV)L{JiHdd@1=RIb3yemm-_S$NW*C1T&S0-RvZ3RVfl5xx*N~dnb6O_{!U=le$olT-
z4iFY{fVXM^%CG%(Yty;t(^*xT_^OjL%UcH;ZzWSm7007xRQvf%<Zf+ly<<8V#)KTD
zV!Uf{99Y@a&CbD~+?^{uc?BmCY%Z@gMNQZ;y{}iiiB70t5BC014vTfe2N2$Q`ctef
zHey_kEws%IJE&oI{RAFr$@u7A(GfvK$Y$SqZ*(>PYVCXFy-fi#92u&&7qI9RobW_a
zwZ)bD!n;nl6*B@7@*eklxhc9;8S_eT#A~ptrG2uCXF4l$z8v5`*2RS?i#pB41@n&9
zyPo&$)C$=%k{A78Kh<bC@m}ryy;97j0a>z%*c+<KwX4H)JDiN@)I1aind*Ct5+PBz
zw{Wg0|CrUw@ci(>`lHnLooFv<jM(l;xhh8XC<jvn<jQPYfN4&sFKSpVT(chCvS<k@
zOisEU1M}RtnFk+=?<uyDxbDcLeTi`-et5*#Cie28!&?pZC$*C7<Yb9jN+gJ=rK44>
z4?{OXYX!n67HIq4$r|G}Hjm>-H+4vs*;m*-Ie{(-r;@8ICw!0VM$$O8iDMtG>Ye|p
zTl-*I%3K}TSvhwd%Z#b9E@<|!hRa?4fe3`sW8fcwz^jW>z=m{Qsqx*^MSnk~-VcY>
zistco1)&@)Fv%ur41q3JY~S4~+++_F(4-yD1mwZz?Ge}4we~jE66Ls`^nL9V8tCK-
zrcq_5e>gpethUJUEBT39rvB!NAC{J?4}4K>+|vAlQvhxHu9@CDHt0D8iP40?^*<v^
zf4<V5Cln*s7L@sb+Enw;O}WCfE?R-8`ICc%W2r+Bcw#@m{|ng9SJ3SV``<ilAmD&W
z3;t<5D^AgOzgwBPElGE>?u*=d!-v;_n430i$!`yb&eLYwBR-ccog<E0aACPAdabd|
zeiYHDQ6tm(v1iWC*G4a&4;X0}yM+mh?`IVn=3drpudZjj(4vOB?K`luf@p`#Wpu-J
z&=u?3CcX_Faz1k!4#nbiEVLU!@gkLvn4S-)KCjpiyE<b8zP*z$j`vy7eb{4y?8H&-
z%R)vf_9_w1EhQ`iVTV46i&g?Ljn7<|A}+7d2yY@|0;Dj`)4h7Hz!Xs05;KQHN0!>1
zxrK)H^ThsfI`}o4_NN>Z`D)EU@Q35;h_iSGOh7%(7-~9_jYXi$W%hN(ziZhb8=M}!
zZ)VYNE_V+h)s2r!RTNcOm3nM9#|&Zp;RK&c1hf_?x6st`(;b5-@)oK^xA-r;&g)FL
z=I86R)7gleox|HbL=Ye~4s#_yRM7md$2-zk;54J6Kj>YLGT%7UWM{VZl@CNID1YSW
zB)_#LO33$tkxRQqz$k$ngqEH-oCU^I?8kbx8g*iPlQfzq3<Abo$~=z_ZkZ_4pdP;s
zd>`%n=rd&%=%qg7Z2${=6|(1qLa_UW-@--#@>}i7m!`y|MEI68gC*TwxY^USPJa5-
z3-xP=kLo<r$NIb*=qS&wqL5v!rbK2$dt8)J^)}EffLv2=suTB5sHYU}vre4zj*6)X
z%uJ6bpTn%W4K`JQT&6iz_c^ez6-205iybQLo#zfoGjaHAnnZ11JNKPfUfb*6Zi%VB
z57b0_b`7>*ClmlKc)&L`19v3dH{~0PSlB~9znSk}>vpWw-zQFe)9lKK{ybmcV;@P9
zWg_IvBQD``x_P9IEh@#^5VD}4RV1DErsLs!zq3EzmqP6*Ig2mrT+k0`PvZAJ4n83r
zp}x2&m=`c(aXG0+9`vf^#c~`YUJ1g`7w_QHcC{*YY6`V7`B>#slGggbe4xS8>99F<
z;hh0|KXw#z2GGu(?t+s+#jxEgF9Mjb8hRM=hBDXk9kM<8?ZyUPe3bAD?uX7Ub}?^K
zv&_3L1<5{NDx8p#T!*~|*l2ODTBuk3Z?Ei!<HZraho&PHiN{8OOM8SD;hoEx)sPs^
zi{vmW?)&%Tcr07vR*bs!D4;fIb9gMC+Jm<z)7wJEIM<#p*f)0eX{szqS<jNNmC(yF
ziFm!v#f#XemHk=x(Q9mTF4VYw!V~Apxt-u7uU^3s)4f+Qp?uGT1g(DH8ZrMwC7fcN
zB1Nnb$F@^Zj&~#ETGx*3D=HP5JI<-+F%{;J1aH~31^c{PS7EWzuD3auan;2pZ7yir
zYkW0l)PDeoB>aVs@!zpvI7vr~-NUZpZuMfQoZ!sTt~6M`PG!+4si>QE$w6WZw-3cK
zeXhITh!nqWDQ(6Ow%}2nl27q7rqwRHUdBJeOb08=;mME~^FgQv*2W#8HNBTdX9iKd
z=@%uPcz$9XJ|Ny-fBu5dGpYfORyHF-QJOv>0>FepzcMsAy^KvxjkjUSP;xQC(VwM`
zJoCjSgs$|_x%pt`DDvS_yg7!UeerC|2mmr=as)knk5SLzOe+#gx!&f#mR+OqW;EDZ
z@Z@c2>_*G+>Sp(l#`00zimvIsswjQpB1SYVS0}NSug~>MO8zZm&-=61^xMKKVsjcR
zh_0Jnh}mfkj9r(ArLK|wlvEj8TF^43Q*k@(KH`ZLy=_WCLZ9f}-HyGFm%k6s9d;}V
zqwPK-!?QT5F;qlB40&U$r<GH&Q-M}0N~_RUt-V3BKjLxeyFBmq_RoSdw4;&2hjyr4
z&=s(quH8ovi!?maq%m@360h|%Z)Z+h)s-g0+4VU6%I77rLGk3dbX$*vbP{T~7Iq6m
zfs&0@R`7K?GQmR1;TL4`3L20VP9s9PKF)TGl?m<7qPLmW5^q#?U1C;ya-QlUu%t&J
zXYJ?qyTxV)R{_%oX6#`1TwEOf0)}o;wa~)R8`DaB2cwzR<ZF&e{#2QNFpAKxB8ht~
zy00t)RU;HD3*h<D)3H2gWT@1j(#37#oM>H`>&Cpe_~RErxv%f7)s5Y-lo3!Hlswb%
z;rIs>f})*=bTmn~6(E4{PK&py@O`M3W$)@WuPdS!9dMiJ1>N9vcg4Gp_Wc6|_Opb+
zM@|_2Sg1Ir2o<-xzY1g6=9CEOC@I{QS1%|&advlh%{k<awS%NbDwLMU^--y));rxk
zLSS1lrtK!2c`G1ts9f9BGwCu(SFzW*v|e_F-R=nXLv1H#_C`_X=}qA}LH2Jqh0KFD
zC$nfU-a)HN7q^t9(e)#(W75s`m3uZMF{5{L!X6!zONKoR&Zkr1id-h%fV!Z4+MOl!
znlS9LbA<&J_nhm7__gYtzG&Qkr(&z^YebUr@I#bDF}t1HeLx~3iy4gwZlmy(?Bu*S
zbqzxkWmD9{HSvVps(DenrCq!4p<nH#%-At+Of>CVq$A4*><i4!3d<fjit@q_v9rF9
z9lft)n#{JYO<i)NOAh{8D%im)bJfyaUf}x22io_Qc2<n`=vTmzoP2<65noa%R<B21
zsp-oX`?<-g#wO-Z22kvPVnRRjhW-bLVsub~1r9Jo62$fco|CZp&5uZCym(C;Tt`bf
zM%397C0D-db-wcYtDei;DTZ2iuC|QwwX;d`v4&kFX5Sy_h!n{GOFII>x`RA`Ez$$*
zN}qDr*4XE^Prsw6sf-@k{54tbZ(S)Ge`{OekS$+9MpOZ8Cj4##dDq3M@^`;|JDdGL
z_6zu~i8`)mOOy7-Z#*eJfPshlZ#*lEKD;sbNkxPFYV?ct4Z({4Ah0e4V$;`tA-U4x
z<blSm#5SWcsr+jed1KG0dk-p<-<{d7YuE>bEbDxtx|FXr)=kxO+e@=H^uORbyrm_G
z($ae3?aI|R%WLF8(nSughOprod@!^tF31&g1(5UmQ0vCCtDYnB15sJ?1J1*(0lW?-
z-%5Z=9bI>E_xX^AV+EE`@*E>(z43^u)y)X>z-2mC^>yxR$Rc9Q<GW1mZ!J6Lci89l
z0xS&rN4JKCMtA&ab2`<x<~^!0_tL~drAs_7d^<ZmrE0imM55{u-G5G1A%o#SZuQbR
z{dh9jZNzTx>fY^M04`VYtv-V4cur^k>Gg_1O>P5EOTTxq*;#80kl3c-NNHMnGn0kw
ztDGB)bCi1GEKZuKeb>T$*_j`waQBg2Ree@%>pBCTb^3Uv2ngSy%nfsE>Pz=0H~nwf
zo9^G?eWW=WZdJ{gFq24otreVqk0wTxGv~)tD^h-a-%r^O=Dmb-{&5jq$0D<-S_FMG
zfp8Up3V41Uj!ZL3>^PdxG`L#VIDb8@RCB@7$tNan*qG<h=GkdiSHJ}0^R)4V^d8`k
zXfA(Jal7y<9+@K3$#>wRV_bE{f45k_ITG`Zztcjp9*}bBd9hn~B@E#w5=wj*)W&hi
z@bO-R{?ubmsdP-L(194bxFXMWwUta4b3KD8rb9UgWtXaTRdaD&YK!}|;?d~Po3-ag
zBsaL(VyCzSiQ*-<GQDHkh>g=k(sgzB?U&XtdMLeExDCeA5m3jlG5wT!sN+M+=fJ`J
zc>RH_`h5Pq%V)^WJg0GVrS?5;C*IKhpeS3a25k^#`()(4tCig1;zN3H<7^oX4Xq+O
zbha2P)_qzOu6(tZkW$>2u~Cs@Hm4CVs?;^o&YPV+qy8zIl!2p&ijHL9{Kp@9SL|6B
zeBs?!x+g0=2jLe6tq)nx3Vp88*<(#BeF{;iKG@`L)v218ir6^-)L><gsUM}e?mR3V
z1^ms(x=e-|!@rw9+%uX^c!ADszhQi}^G1TT_$z^14Q$>Q&eHfI?jSOVOiqFXL@pxw
z8zQqy*5pZ5jqFRa)C<>cl}W8w^Ntxg=d__$v<yfZ0+K7C2D}$cz|+eyf?D<7K)>1J
z3i<ygxb)AT|342hP1sSBYqHH$VPDJySd`~hiR}qLai$1W<&YxmG<<6YxZgvc>-~po
zrbY1VwjW@dY4kMV)WcxrN?m8%A%2^J!%_VK_gerX6eFe#@XatqH~R~p{X%k6iu9L}
ze?`ZXmCHLORc2<5?gDH3uG=CqY5xoYFX6LSbqsH~7bJu9Vp?dVF43{@I{e1-g{JmW
z0x>zAYNlt6C%*03Q{mGtL@Ls8e^z7Vc+VVtf)_z0Gs}N@{X#0nLW)``dKXNMZX-#(
zrt*Z8+}t3g$Iv0C$ZGPtv(;S;O_CkL-YCKGig(^cGgC7alxAU=0aa}D2deV(T#;jl
z&7~txLM~Ada7$*a1lvO992cK1V`Q8w)wfc+ZLMm)jbhTD708JO+R&gFHF;u?0pGu9
z-<$H>F^6b2MIQAW@nl?BapQT=%|#U(n-7TV(W$mGefNdlDz0pl(P;+6uT+4j^A|<L
zpPBD}{v*T|73HUCg4#9+<LNP<5sH3GmDFoOlJUC_38QgGtAzJIejx$v?Bhg<g9w>2
zztSPXyW(wmZ|l@=#x$Ka)(;c&G1v3<-Alc{+4i*hj>k#8nmyddGdiItI%(%9R|_MS
z^@1z_Y{;wdo>X^0EpgFtE&zhN>wZha;SxW?3x4j;;BUeDs)!{}_!2EqWD8u=kcl2P
z#awR0MuEaWx>w#?iOqU3H`uRa!i=e#QwD#yXEsj{A*&#}EZG5{uAB{!!PGt9Mlwi0
zc2iuvq*kcLby880nPYh~RsXnc8=o$AstS>NkoWd&dU8764@<cM>!l4{s;63_l3rp1
zU;z1;AA^I>nufj~_A+&)UB8&%bE%oJX}boV5=W)VW3)Yu7dOH)77=3su#3=3fO*S`
zXhuF0n>qf7tcM5PODTOdhBq$AYltfsvRU>RP7#&!^8vqj8FDE0N$y}D%ioiO)Bj4m
zjL`rr!XDmg4k6MhMEQw;9)CCsWPU2QzuifXJH7KfG;T0mb)c>1Y7$e1s>g;H))Ijg
z)FZINiI0AOiIN(>KZ&_*!5O)@ZqPq6QlQEM6bVLmL(mW4r*dF?$u<mc8}~P#gw#*y
zKiL2?!d4=Y%LU-??y!@G00;;>1p1o~BAtqJ+dl;e0K(u*u=cyKufR^>pa>|hU^2je
z``LhLeYKD`czvKtCImo9lK~7}|1Tu#dVq$=Dq!;Rx1T3e=0;J7WAlLCEkMX87X&KC
zNxzT`WEBBjJRnW}{V}9*=4#9|Ru~Q9<uc+$;HVn>zmTLT0e`FL(LXwf(@|68b@tvh
zaHrlZ3-RKG6~yaBCJZPBY)`ufr`<BCh*`|DBIFe#=XRIh@nlgW=nN0Jsoq<Xe!kEh
z&;p>Eo}t=!064Hb#)uG%Kz_eHy$avW!I|$s$bg%h0M=;%+ycyfZjJG_X6Sl<rXUW@
z@^&gP$N~CZtPL`zByk-`E9KuFpcjn3w<kaRm=No!#IGMWRM(nj7;N2dKHBdkR5cK<
zuFB07vh}EeXF1@auO7;7PXV*v32UH;t9zp;xWYku&$mUcs9?a{%)C7J5yRclpn8pr
z-P_L21EmZuT8H4+ECsYtFL<uj1gn67_viysT6%$d_w^i4Bj>11ANqEdacYQ56~$%R
zN4;gep~P}zH&KbSLB<gU2#NUnwMC(LE#K3|5LZ5Mli)R%#Q9J0!FzSrp2F5^<mPcB
zPO;dSLI7FhPN?w$o)bVH<&(*Dr&z>M{LP7P&y>HMODhq4^z^FNa|!*KQ1f|S^A9u2
zZMipT*_=SNOS`}w<FvBb1)t^bPPOrJww>3)Fozd8LY#s(I-t}WEIv<EsKc%sSPOh4
zn_fgZS-VC+>h;=(XAgU2B|6co7Lk6qdHVu;L*fCwTrBGq*-KX9>UnH#8;Rwn-(IGL
zwcQ|gm@Gv@C0F9e+gFA(ET+HR@usxNyXw2HCL!D$<+k=Vkj0Jqee8|Ph@^m}m*FAb
z0=S`^0GR&6Hz`o~fjPv&e8<~+y|QBeT9k8(zFD@<E9R?jh1gdfhtvvrv95vYk9Uzc
zFkTKDfp)+Pu4pdnF=Jyz#O{yfX6;$F)@MAP9N6_AZDJ*P$c9)12?rgVrr{a6+q14<
zU*Ke+GH9a^U=C`(n{#)cS~FL@NYq~M5pmUITetIG`(evhiRXaKq-kQKY*u%W(FfS$
zZZ~fK;GOP_nm*@7p=)&(p7(`@tC@Uc%-$}OMLWXUb-p<x0w}OF2cI8hEy7qG+%<Tf
zv2tAbj4l(V3O+A)_PH9zeGUygqjQT5s9i+HpH<8jUH-DW8gs|QWA}qUKb4uCLzhh;
zl{U%c%4H)r6C`=t*?=w>U1c3@1gLhmr{}mcsrUK4l#(y)UO{mSi``N&Hvq{nDn=x?
z+XIj9OFd$93@g(gBkCkE9Q)78y=E<#kddE_clXjsdgpkf=`tfa0x3D~$pmYxZ|*Sw
z1WF<gFmuzUgjl;eSY+DiUbQb?i7~ar%}%JUM*4F*qmWPNq0dSKi$r|s3>^m`kwCi~
z*GUPB23>?|(L^=sV?$9q_0zmK-FBR{z*Kzbob^0QUzaUcc$>$N|BnJZ304aYT8WDj
zQD(FU&$u~Vu^Id#HdpBa?Fn1FA1*5~U_?6qZB=fw9tu2^$geggsW)O(G0Lb5pu6a}
z$ft=%2J6>i)no6Q3D~tCoTnls!AvHDO1ZEr=)6^pPCa%#G_uS4^mOZ`!SLLxYijmT
zEFb;8G1Ft0gC%y4NO>+mj?Z#<7Ji42qfFro>jHDyz#-_x5Z*6o;ht|QHyeX5x13~c
zIN;ON9jr5SOK(&0Xv$d#&pX!*LS~Ui2X*1PFP!oGm_c1nnHBTgxW`rP)Jci69LJX`
z+~<P}rz#JL7XT74U)oCCDefm;v&Li_=?uU<Og3WzE)1by=hSH<i69GAl&+{6^``LF
zCbW#<D)syB4=L(C^2KD{Qx8sqHbC{0fLu6jEEvF!+0NgL>5gs;?>haeWg!!%S7UXB
zSx6={{-m@h32D#fk#x!nc?&RN+bWTu0AZ*Op=`fArnMfQ+Qu=Gn=;s1QSnxgcf>4E
z=F=YA8tI`cXD(7*Q2ApVBO$ki2_uZ+Zy%=fRal{jfUVGBymOB9svWbQRT1C=O_9cJ
zz6NhyLphZx6smlO0xbS8IOOLK|DzG_B(5uJX{MQYQH$yA!7xtKHHO=AkWf@|knIkh
zIA(HEBSgZM3ye&ZVus@O_}5Ie&$_b1=3c&;iOw!vz8y=!gj4uC{Y}2Qv%@`Ltfd%M
z5J6z?B^pTU{TiiN@Ql;MSSw(wkO9OjR>b_a&_ysEY~?j_xe)o?19b9e5{w;kR|XMc
zz{`0)8D>6LeD+w6>?sG%z#JTJlbeUIpVwRqb~>*4n>K0pg<8xnB>zN|^Je+)#8DqT
zLNpNlWzvqOHI9<*UrhmRv`z3sQi3)Fug-G)P(LxvS)QJQ*WFBp`Vcbi>>{`@d?g=}
znI0_7OdE4u(q#QA;kNCxyZuK}OJ={rCR1!8_flR|Y%Q@O?SMRk#-XI#(ff2N|LDl|
zW<3i{)tx&59>S%}Ukkz(R(Yrr(pCq7wO{YrrALl^u)p%TY*~y1D{0a=CGAA8eQcF+
z;HPGpIx??*z*C*Ja5g9Z!Fkst67U3)N%;b>(ffoAMNjv_=fSf`G(ws(YQL5M|BSQC
zdYvhI60z<)|8@_$nT_x%eIFMIbI$bF>)@nE5AU(8^sT}lW3@{8a@I6^!1a$Id$lc%
zmx>ocJ98BGu4-BbkiSgZtw#gck(Kav5UqX2A<rV$#3kK_M(^%z!0%679+qrg+xJ?a
zYk2M*{p5*}EZ~&si2-#Fzn%-I#W3_ffZ&zKT(2P;(D}?4Tp1QrUUTCNW>T_N&PH0#
zj0PRj-4Nc6G#a4|0$XV#UoTy35i2APEy`g1DvUwYU$(`&5pQzkA03VBDy_M*G8<l9
zX!}^qp~@wBZuuBg4GC&7Uj~I|36DW-3iR^LzhDuDEn#xg4t_5_*~+n08dN>^FWZ$D
zcxJZ=?gY&acC`s%=`pJ-VzIkCBZZivXs^L9@>(B-`cQM)!a0rJUt0i>Z2v{KkPzc3
zI1|$0YXH4AT6A-nSPejDzB6lOYgBaeIONnw-I{R(JKZX{8_Y9%WmWDZiA`fmm(h%N
z2p$ctg-5jr;+U}-%W)yk)-ariY{*uvSw)7OA0ekwbWN=zF<9$zr8-JHp;;OTkByK(
z{8J1+Q01fzRirE!EZ;t6VW#D%1Gs3tUTktC32y~>a7NSL^`BQC9(Mtbj$mdYr*G-l
zz6RC@9HU6j+jy$er1<r;i^Vjeul7w>janVa8M$YPTI^&iaN6&PD9B}`qcA8oiwB*u
z9|+}GQDR=z8Q-hdzqrkE_@!;bE|lYwCS91EqQt8QC+^6X`f49Ba8l?kY!E8$5*Bv{
z!yi})m2+6}Yd@FZBi+&Ra^gDZQrE4(khKRdoO4x*HVl~#jUtJ{0V8l0Mfzeq*SIq6
zb{h@G?)9}y?Y4;@f?P?rs=};oGJL<~iB5x?7w8`-x(FZ2k`Rv7SjZC46d_WGaEtRJ
zP>P&1CtBKBRVzwO_uY6dl~4YrtV%V$sHW||DmVfF+kXv+{|`+@ZeT~zOGM`5j^RsC
zDdH#*h@!8;CM(*e*G`D{9&;>OV2?ewTKpZd?K)ZD3`DsdV7K!*0&NFq!q>;`wZb|L
zUs{{Ar!ML;D>#+;R*dcJktBW6`8?WUkHN%iaBo9sh@Ug~;J6#6#G$5&)j`Ko;jKm%
zx6NxCPb{=nDO@QPSnjyoIMbl+v1f4}y@bEoMmeH*^(d|Z_H^s0K{;SpYzQZ%%}hvl
zZf@9Qi<P(jQAxHc8wj`?-x_=NU5vyRU>{CdT{p>3aKKnBn3)XZ+*Rc%Z@V3<^85l@
zBNY=#^Xrq_M=Z4%qu6itOEGOqFopZoH8p6%Odb`Hx>OS(vALqC%ZvOiqVo;KXX&!f
zJaWQj?E_IxZ?3Z&Mt@zAw9<FJQ(1ZBJ2usvM&KLgB2JHwH`XS;hx<WGs#xG<`uAZM
zj$AvJ&iMMQ#07go!ify8+Vj5}_B)YSQdX%BejqvXZPtg2s+ylndS=ug@Y2*q%L3Xh
zL&ea0&O%=3)}_FQda);I&usT`w*;YyG^`NI)H#)#jE)xPFrM&6+Y}6|RLOj&IF`Py
z2TA$H@DQ-?Y)O0>Hgnsvm;3GXjOT;*!=GViP4I?<gchS!80BL;cq#zsSa)aCf3DB{
zVvnFYm@_jMSaR}W4CUlqC`;F7?EO^w_&vw*E(TqMVl;v=D$7E3tNi)9k&KHnQJM~8
z9z`?rw?^ka9u%uN$h<ckh{<8OX*qc`39SDVK>3jzTePyd+qpbUft}V()10aUXr3VD
zspn@CPMShLmBe2FNyt|nNv}a_Im0oX%ZT03T!#$1U~nDOw7`)?!t85f<C%dM*VmhU
zVApP4kra#xQt1-hO#CPu@CsO)_%GPm3cNL;_`87eMXc3|Me}m8-9)+F#E-q_rl!45
zsSDbD68p!DBnB?>^}J40b&EmDodQr%PI+udJ`{q61xx8sXqWmLnKZT}J(?OtXF%g~
zZj?MY^KO@ft3mTw@rkCZRc=%ekXaGs@akyU7zF+%oI+6*rNlMAKklW=Y3h{R^j6ed
zl-n+Dx0o%|*4CE&)!u#LCpZ<<dHIV%cYqSMi2c6rJc_UMv?0g&Y?PGujnG*q>ly^@
zn#Muu+2}0R>h}aCH5NeOPLSlUG|fqW&x1ikfvspyDv?J9ia^TS@Z;zm*;C*V0I0P6
zscVTCh?_M<!D0eH4iMIRY3S+4nAlV(tfEEML~1Hg>u6RnS?17G_xgi;$Ft6x*~w4m
z-77yeZnyese1rtU(fOYHmmL5UFL)k;PQj+#$It*lGZT0yyFO0q>rLk)WI>irJm5{@
zgF3WQge0IhHs9$3p;H#e>(<I^L=c4>ql%SNU8n~greuguY8w-ENNm(nLY|PYJn4ux
z2yh%Da`*<*&Jp!yPgWY!RtQ;jOAM&jE}jwjujhN8ahtskY_pB$mg!(J*7vAK>fj+o
zzG;T&shmQr!sEEk)1fXuXT*9vONq-`^oK^fXic|x(kGI^?&oNe@yu8*puX>dzm0-l
z!X|F^WJzM4#cP#1u{`f~v1T4UaZ{tBhsJlLTNYpFdHMFD5B&zLem@xcw5$`ogwhMH
z^NXW5$ID}svdX*)=SZeynOWz-@vkaRj412fZ}MclDTl;vt%72~R8aDAtVjFHsT8H~
z7*A8&JMhC`0oR<`x|Hao7X3ki88WiK?$$>}3yi=@iw8T4kzzYhw4DgbEdac{K3Ta7
zHOPnPvfo@t9g)hies$@ldFRcI4~SSI1s1*>7ngMf=pZutzSO(iZI!2&<8&%gx8v+3
z4S!=B=>(0|+1^i$4%SPJ%;SIQ-w%X~!*T5qpw2iBs^bp{;Kh!AY$TpRQ*?5_a4YOA
zQ*<hJG<k(Pqs3d3`1J8n8C5Fl!}@tQ%3TmGlm+#U`=|<FMe1tkrc%>F#-%Rry7X~A
z+IOPNAIN72j#lAXK4v$X^aKnc<j{~HMYsHb88A}-SASm9*QU>*1-Fa7+?^Ns2EbYG
zkM7^!eQ(pr@hYf~B6@itU<_8r9G-1v59mXFbavnA!So7som#1R<mg^YGf3OBa817=
zet*$a(7y=51^*zBy9|{@FLf+2BW*ylk~*VOt~Bu1%Hmb<A%D@&5K)2o`%O#N)SoZ$
zC_f!e4f06)Xg*{y3FH=WT?let(@tdp^k%2_K9fwgm9s_1=MAxjt<e^RKFwhkno95Z
ze6o<fKJLY0;7hcBI{rjq5UYfuUNMmGz5VvA%V=e|*VC0&g-3Hu$&DGW_3D<i*=<s}
zo)A8F3=Q9ociJlz$O#0N<WDd9U(Fo<)+J$jHsx=f>Azl4{sgQ?x<9XELr??&AvK+Z
z?To<)Bvb28v2dbVs}Zpks9cRuOHmkP!}=(70Xa&0*mqKs$)w-D&6K+Mv$3cBt3U?n
z1}8ZHuG9ubSK0xt5Ykgd`~a<jv2N*f1NNQU+{W=(^5ReT`LZ8BjT0<r>0sxSk$DPe
z5W_VD12zh$1ZY|3czb~_ZIMb}Ba!v4e3BkWJb|PPH<xfdrxBhPK+p<)u6}cIkUqFf
z$iv)nE+E76K*_hiLjQ0-5NQhvS)%n4_))>H>GVaNtVwV`U$V~HQy`o^j3d)U!&3OG
zkU)hbkS}t)nLNUtj+KZY_!i%#99d}c@*M*-z?R@!C5|RusfZ$T2l=<cZgzeQ?NYkO
z7IKVbB}ZKGj(d#b4cTrcVnB0^#@=US`?<)_m&sBo?*of33l?miPs1KVPe8UBRD)qp
zJH;<E&5f$b4fQYTXz^A71k0Zqlm8{x@E;i?I#1~QQL;_>D__n3$P|D6B_}gKHX67E
zzTNmo4)D*8q5EF=C_EuDE@vZ9ZU~UdChy+@NYH&ie(zzFL%(O?UkDT+Ro?^5lV8ot
zM`CoM?ubN&h9fw(+rJl^<l9)DSJY1fXl&pZrR2>xbm6Y6KA>Vb`mboY{Jb`06F}I^
z2B_cH`*d!i8zb*O-toS=HYV*mB6TELg_rHXo_QQW8hDn;%Mj8-J39_Qt_yHy@DJV-
z(qRr|FTz8*W*a`R6`C@o#|}1Y^w`uSuQib-E4<d{N|Yfc2(N?ZR*&wZHTHng353$A
zGz0ef8JTvT<BF`dja<*8Rq~ZIQk%G&+*CKV^UdXBX)}1$4VB__C+ha>?$u9zk19qy
zbG6jU&SJTwcKrcK-`8zTn<uJg+3Ag(PId?Yb;lttVyP;vYX=Shir&;`iCySco>^A_
zlB~lwdrkGvY)u9AJ`Ln?ttV5^#1P1~no{4DZ%s9|C3rFI#q4TH0jv(qf=Crs4i2rK
z?VgZe;jhnUUQfh~mfFk-<TjoANjzmbnFi`h%n_A%5PDPhupn6EcDG@}*c-E+4MEF<
z$LB0w8htGYaD%mLvOUlv$KUkl;7%k!jU1h)%ubwdMCt~iRp(4u+u03o#swuQ#Ug;(
zcrX+%;Jf-BT@=xxyV81hAvJP){(-~AM%9FCNkPOx!Q%?}Occs4PSJTqj0LNWQZC?Y
zEcazIgWqL2C@oJ?y-cduk^k-a_KBqySEUwL(qUT9eiXcswqsD~+&D_KU<FR|by2wW
z%EKL#XOefNHr`l$O30VHRcv7(luz7iy%DfWc&B{E9`I)kWNUA-$PumfdEv$TgMjZ}
zZyov+S^Su9b0$Y$#e8TXQH!s(Fb&*Pv5?Xh{uznBr!YUb|0+Nt<LHGbyKkrd{V{QO
zidknG39Z$P9prNMGz&AlX8(1ih>g9Cc7*~>v_Y6l9>rOYvG;LUeu`7s4&ZsR<pFIp
zY!jbQ?AmFRIVzQ7=hScFT)MlBT+a2O>%@udjP}hq>`Sewh73|BL;arTA8NrU*1$EM
zNRCQT5wK;2J8H@>>=C-Vf#Y%+RWIF5?yQtmWEHYYjKTLMB_t3+_f2spy-oz&&B@v1
zzFzr#@1As)eWPYC-=gtF0S@o}pn3`aw4=!S#NH!uj7V}9OCx%XzS`nd?j0J**qeyA
zE#~Oq?f?NoiZV6c8S6b-6dS-|-(V6`E8!x`e8Z$9W63gZ@dkZ~nv#kaNGiQZp~T$t
zmS0u~PRGnl17|`Umcb14Gfo@LkBDEy>D+PQprO^7gkS|yll)O)7x1#^ws*xGLe4(j
z-c7JMRp=ej+(e~M3>!lEVO;KRz0zG!O>9Uu)D*jdyAHjF?!xpx1qfM9yw49z&92Q$
zjcCV`^$geHqQ7&&9}=2i8_@tj_85R7?oLY7J|s4iZNCiPO~+)dq7a>kT`-e^+UoGx
zsxhdVlPL7-w`{uz8;_CnS88u!&jw9ACx1cFu`}Nb+>)Vqo0=DEPQKrjq*JeT<-%(n
z@S&ec*N9%)tL68JgGdff*j$Kiq%FEAN^7*m)Y&ESmSnb*X`LFGNSk6ar@Kr@x}#nw
zx$VzGLCujtQ>IS)G+Q3+*KT+wa=Shu6MUe0HYCoXRq%sCaqBqYG9+gMfR<dy!y6Eg
zieNwlQu0e>OtJ5CZ6T8drDoSq7i&YR@XAMIz1P}aH4=h|<R)fnUaoGH`BgPdpgrSz
ztoKygFYiF)pzi3UXhl<CiIStgg)T!F8$-Mz@X~L7NY!gkMd-VvPu%9d_HO&)rlOtn
zkKRCKs(_KK%b2jtN-T0UnJ6^6$2!#W=9H<vbfJt};Gu-hr#F^tT%2zT&KL3fF5TBW
zpv4OE)bh6@FQ`$~+!9qZeadj+GMcJU-!ibSc~iQ&^d}WGfKkzH!S`X;A|HCOz(rfl
z%eh1<)HJ&<LF*#`t4PL8D*&e_hSR_c!p;+tT!B911+VZlq=GCV@z7=0Bl<`83wTc)
zu!3bfn$0hQK}%RW6@bq5vgujI;yc&tTZQRWZO?klLP?GD&4ld+!M94?4`q4%gLNSf
zmtdBFi-#h{B4HI7zC>Riht>3Q4WC<RdNRS5?s9cuo3c$4mL)qk%kM(UPXf;fAcYE!
zz?=sG^Bf3vvwZtXM8o9b=RwuTZ<<`=*=oj&mp>bmQrCOmI2&={`dSKZ@W>I%w6Y(>
zNkZ%&^HQJwLG1$BqrPq6Jv?uhqRydg+vSTxiu3bE@MK_M5wL0#ZPZDd0CAX82)+W3
zva?HF5$TkHd+&Yb<bApx=3?fYK#M;ZmcIA)gOwMItF}f-ZyC8e2k2Q#pTJfaFjMs}
zmX8|5d$S>RK6KsBH3_>OOjJ?P_}68ra@SSnA@_{hkpLydfi_Ye(mn4w+j*2U>Rb*r
z^?NqH)l?8dTeD4{+c%%C+L1=DJY}9;lPpjXO;|S_b8Boea~=Vr{>aZ1i&TvXFxhb<
zF1$Q`UB5|4gV}6mKEj+U0eEJ4#apw953h$mt*#dAkI&*w_Fpu(=OeLd(c;jXp`wQK
zB+diiqWW~Cjq7aM(3JF$C1m%@J>o}Jo)^3dS2{iw`pB)@#y`&!DpH6f#Nfz@fXL2n
zd$z_ZjIEcZ1S4q>&hS~B$HeRbZN}Ve6Ek;&4an~sqCau#*P*n)(B3}}ee|CU1qiFs
zhm&P@^FQQU2?8h+<4b?mNBFN^B_8Kx$c9;8(%=H%`B#IMgK6(O#-`;v$nzFgk3P$;
zYB*aJxhNe$s&sFH#QGRjXP-?KwNLw1{@q?fL5V}dw2${4DF)q(T0Cm^mi1>Y<<lnw
z%WYIH&dfR**34|nV7{2@$n-N7bFjaZsN#w%E_-yJLU3jOj4!A|LUEB1{q0?DKbO#+
zjRSS{ljoXz(q<ubc3+jq1N?}iS=_ByFWf_WBwd?~uQ#p>es1qOt4M`)@Wj&AoNb;$
zat7+)OnJ~Jn<O{33|TfxMo&<o=R+OZKE0SFCZx4%B=O8t0B683oMsmfTYYEyX?|;I
z)`G*yyE=7~Z13Dk1<E&CIQ`~w^88hU1*84_AjtXQ-~cjT;gz&5Hf>#t<6AW`c3M`k
zD8}~+Z(c1l>!r11KeG;cd*HEOcYlV?%+%E4*3_{Z4@Oy*yV&NY94kd^{SemgV-^KE
zI!-oJQK87bM)P8`vB~XckJ&F>h%w^YV?Cu+H8*Dr1ZJ+uyZWheTG?E;zl~>@T=qNW
z$N9G>%`Y=O@e3B_zKwQ$r6}nn9T)apftqh_%{f(A`*F4wg`z}H>G_jPM9B~7F?|)*
zIg5n+uc^7cYBj+fGnYDqeW!TN{!qA2nzR&&g&n1038<)J4DIeZ+N%9@qk477lD10Z
zcn1GYXiVXK@}Q*o-2sUNxw(1ob8C$xvWrr{-0@*q(V6O44s;o?N2ijKmF&z@xl=2<
zn1!;F7#2Ko;E0S{F%wg@sQs&yX?PMye;E7#mp7mfKBDXJ4~qxqnldru{;EI!+uq23
zLpIn6rw;3f5;DQNHj}XLLtX#|)8MZ$n7;$V{5?d?zlvw_&xxBrjd7P@LqHe4rUV5e
zG$QJDP{T8wL?;9xMUMd}v9<aasmuU5==ndVuK&#I{{?BT`j)opMm+0ybR@7HeP5Yq
zFl23}mT@lj*0zMlV|UZUmqu1fZGHyl>NkQbJ-{}~$^OZwM%qWWHD;+vJu+~D-}82h
z#+o2CSCsqgDc?@d>J@-&lvBnC@uNq#Rm?Wp2K)tpL6vE{VR1EA&TL8_J>1d&26-Z_
zrTpVZSmY2j@Vm+b{=x;gt9iR&fggr|<Ld*1%*AG@LDo{_pPhi?#{q-30{ZN6SF=Su
zApf5@i4CO|UR_V-V&6gyr;xGwisc@GbM~fW-OujaETz+k9FKl-UrXeFV{QHCNVfl2
zlW$&*Z_(Zckc#QUzj`}p0HC3a*7HXQrEyxk*-y8JKm8BgW-E{=>qtoI*Xz!`(a@jE
zw}E=VE#7Ux<lR^S@Uf6fH^6iqq4;I&UzI5Tnk;J)Dt{rd)*}s=Qa&Tk`JP1=BlD*6
zRJkf5-Py)P$DK5U=MtT4A#Bd<#yKP_(iS#nh4R_u#mH|S*%1mw?4ml{^cv*uZ6Rx+
z8wxHJrmU$04-X}Z`{Zm$JM&<7nGQ7ed~nx%DUYhr^dmn$Hn~RBb^DC|c%?JwB<yY#
zPsPd=AJi&I99s#L_T0XEX(K{ECC&vfP*5-)ukiA}wk<r*Hj`-2AKI0m%@`;lFQ3Tz
zm`;`Atic@dB>)fj=gvmeP<HC&yOg$qqI+qEX>R#dgA5e0nroyfli}pRQQ7Gsx5K)G
z5}KrS>VL&Bwy_f$@sz6|4gl&!y$9vUGk3bQMHH%SdJkx8XX%`;yrOk}<<TVxJ*Hz0
zCdoL7s*DfoK$BgW(br?uuCu~)r8kmN!=a=!IqQbv{EOTtn7ip527!G<-hr$O2o^$|
zG$`^2OjCq)!(?h9>!$G>ULw&()B1fQ+st=+`xAx3*sNziSy8;1WP8`s_C7Z$y!8|F
z)aFJo)@fC2Hxy&qZNY`64f#gA23X5}S!jxy-HKG6FEUq<OqnS@kT2$ah&6sr1FCRk
zo`*UR>J+tBU>C6AD;s&}<CU-&7-wE-e;JB$r~j$Zg-dGJ%J#0N`>I>`RIjKKZsyh=
zDT!r&&?6<3c#1dyUCi3eSWjA-nV}L#qf<XM^7<yJ#DVqci>29#PNqj50HKryPl09*
zt(?<-T0CQa^xVw!b0}}*qv&&n4};H3?=c#-v>o5rs~GNAh?{Ljv2@1Gjel&gpdS<U
z0);gI+9@CGqy)tq#>mn0ZuZ&=>XC(vPXj5>v}|(u%i*kiWxE5!0Dvz5r#s(};8g~*
z#`fN8ZN^FIiX}KR?;Z5lag=^o%fQ`=9J;S1)f5%KuIRDcN`?}9CGARv&E<pNoqFQr
z_e#Tl$;!vj0=Rqp<<7c6GU3&b4oEbdhNy%EApcuww!K=T>fu(UgOt3jYs|9r8qX{}
z+9B7(tGv#WD26~vCJgRoIBQoeyoA32b2zsN`EnnPjw(w52)4zFRdarwk<`)dXczi|
z6c@T60S#7&xYp&L7CvF`vGaiL{R#?(!!F@tATLKB?q;}rGE`<DG_|m<LL$KD@vL^s
zgme3kkWpQ8Ub{zAqjfc^7v~nj?MSSJBuf)j)*?@fP>r2>G*Aolxmp0%-c47Yno&-B
zMf1MGyw0Q7mE08h3F1^p9nK3(Up)<Q(%!h(-fnme(^5|i|B81WIt`^9Dic?Uy{rr`
z*p{u<=6$z&(c0A8@QgUmbLU5F?95S5FUv`HZ!K|x9hY7aeL@37@B&C|h)9%klsS6N
zRlBcQ!L*zHVpsfmN=~_xa}qGsuO&w={rpws0q%H7G^7U-2d*=W!l?ni@1NR8M(`X?
zB86#rW*<^FjI(6a6Z$nD3pzx76a3t*L!S!Hd!%o0-aSSb*y$Srj5hp3LNnN9$jWGn
zmynLMb+DbN|8m!Lm!SP%ygkuMDtu5zhEFjyC1Rc@>No%eofVFGi7DcAAPM*abQG)0
zb}s!iMwhB}H2GE_dlUd{PM7<|Gj6YEvN3RK-|gB<RKp?Nl%J*G$d1ksLcZ(GW_I4k
z9`>e=LZJ61mkIcEWA3q>cn>ERpfAC$D}d`2D0;$sh<qY8L#aW?l=2PW#%ly~XrpWk
zXzSv?qWhHXxm<4Rd_);*G+mwjkWY|9WL20L@D6daY65$hzW6^kyZ#4X$-scaM7-`k
z;W+?5Rh2GSp*>-eay>l^`GrI+a_H}EU?ZMAx(8S}l@DAzSR#Y<HPRBifJ*J?utmXd
z_5QY3t0m;O1#;7`{!AtzSBRtV0|?l57{}@bAxN4V%^IL(_fnRDT8|zUj#I`m^vZze
zOgPI>N?M}M_}Qn2%t}HiHMSLNuTBB5FwL7Sz}jMn31f#>=si;Wg(Qaz2xv`30CpdM
z!O~y|ap12i0Fbt_1SlPQk{-SL_sD<G(*MjIvIGBXyE?v?TWu7uekLsJ7ZS^(6rj$K
zFx>?L0XC7K*@qo4GQ1xOMvDdQHf5#cLcW-46ADVeX;Swkxt;wA4ckdpf+*fsEWKK)
z0`I0H@WN=9<}zQF;kF4+bNLOF-ky?{?4Wd=%U-=paC9|435f#LeGb}Dd12Hr)+cTk
zP<*;YK{W8vBWmXnIaYC;u#q4D3~mmDvBa-rd;cEx?+N*PcL)jLw;qOGC)5Dzr5jc&
z8w*=;fpt7wUTOefgkTIMWb8T&WmT-PHZ%7^SqIYm!F=8alwQ-TSdA$8DbH8ENjMKW
zF_;9Ohz8otc^Y-Y(fgp8L*gr1eCakEpQiD<hx7==zlZ%MYW@FeLcIPKGO$hiH_tPK
z5SsQQA{2H8<}yU4i8aWDS(-QokW3c6*Q;At*iVoamZr{3x8Wv>K0{GY5;qq4*Pf=o
z1sJ&e7Jo3V;$=dC4+IbcN8y0oB~yj5{BPgZs^q&i1tizS9P5q6b3XkIyudx+Z{P=p
z2oGR?DM->|7Fob!TyfFizCGDRZRk<ykkg(y*&S;2r<}>uk`%j(6aJZVjnITGjYK5@
z*3kb@hHlgRp4ZQrO#1Kle^Sc+&*mexVP?;WJA|VY{pw<hRri@^u{SY>Dj&0{9*q12
z`Sm}z83u4@|BnVn#(0=A*8<Qk6bA!vgj^q>$`TG#Swfa@5?})R<W~a;k@Idkia$^V
zJNjAFnmtq@+Nl5%f!K60X}p3BG;yEem+^mPedV97v48*=%bXudZ&wY&2b*;|DhPIR
zr8d>A8QVL)D>W+2k@OKu*n8FLyH2A?5`LE$E4&srw|{g8&9Y~;%$>6n<hjJ)5V@bX
zb3Q$F%5bfh>#~IERkD|Mgn~{I$E}HE-l0o*!kp`NFYJnE+lw6zd%c{zt_)I*-tb+E
z=~?%*>H8BHp|4CDOM$yV9ACNwvlNSvBo0FAuQQnXI?g$#rnxrh7e+kZ&7$btf5yAh
zE8wnjR^Dj;-MjUyJiBu_MZ+AE6%$U^CGWJaCd#s<jHX>p2|7#nT;p0>IPL2g%`aC9
zje)9p_Mf^||CNF>6Ltyb9q>iI)Uk&638+JuC2nVlO!8g{$SZzKcD-s)@L(t<^;7Bx
zOYdgHDY6OK(jO-@nRicdjEweZm(VO719_ZwWJ^dd$b2yEC5Wv3A)>8!PAhs0bbV&#
z&4Gy_|FcA~FSv+9YXp!*Rk7@n=Mjf86v6aF?sl32jO2sxIZaPrYmgM3?A5|2ZrAg^
z(Csdaf&MZO^y@&t9rcd~2E3jAJXIl-xRDT@4|KAue{~N0Gw|wv@Rbz)ceo+8e@__x
zPJ<A?ni`{_4fzY#Amo~9II(E~_H`5S03m{w#Ya5{d=pLM2<$)HX3{yl81;32&2AC@
zZ8PgXGswxA(wr*3HlC&&#aO{y-HIIojOO<8QaGJpN<Q5iJr}GO^Mv11cP1p5B>y$!
z^pAcgT><^|j-USzcKnk6rO7qf*OxJWSQHi(JvJTn92^u;crYQdC;Efme(!1Oqgd^5
z)eot>mXz^wS5>(@RIVid1JdvR{f<bfNoqln{Sm`<M(c#3jA5~L#)~hOky0GSUtSxK
zec&r>_|VYW5cw<3&eNDXQ3l|8OO%&Ooab7jZ)YS`0xs+?^ZrM_|AF@wPNGe68n*Te
zNeeen9o~QZ3(0uFJHVg~xXv>_A8Eh-{2w;eQe%AZ>!UyK4DkH_`2W(se-(TFIu89$
z|4x=Q4qnS%fX)0Vis^`twH)gJdE>8=@PC5d<pKf+YHEU)bMvhzD3o*I@QSM&N%r3f
z)FiN9CjQkS?booTOaN#$)*y<2DH!d&D5w=cJkJkLQlD(F$&QbGnnvbfI@t4rJtX}8
z-DRROaR6Qq>C&6&MDh0Sw&L#358JKG%i-VfXG+K<j<J^~R1IfZiTXf4%)i^h--YGN
z1C*^kLJXc3XAI=$Mr{^Jt0H`|^2${VA`G0+2+ZL8hi&<slkvA<O<uegJnj?^^o}(6
zq`m}PEyLi7K8B6@<yAvJ>-!5XM(SpN5-DJhtc83&-rd7J!RruSwdj=*0TJ=Kw(t^&
zy@}+_hOh+<k@YBex^79T=4!^+vv2wvB$6L!uNUpYuu#x0P`SD_9EC(v0?_ug(_@F#
zl792%D}@0S`jBKK8VjDyBZk6RV9U$^7^eR_tP|+r*v6eQL(P972{Zxf5B!q^_q0Qr
z&Fx916Kz0BDTI&#{%VZzA(A@$LV|~D;FW2QfKEe>UH1}}94M6?_=C&}@a-Ny93$}n
z7Am}*I0X8B9ozg12?@YaAnN52K@>PC*hnpEAAe^27m^d@xXK5_Cm&Ah)dC&>Wcc5G
zKX3uR`_gGa*bOs+NiI?lwt|M$Y7i)uMFCv22>2J0q_2PUr8~*UH=_UeHIM!nfP7*B
z8x|u5AitkP@bAC#bX{zN5?=PmXkF|NCggY1><>&S`S4_^@Il(|rlZY+kPluiP6ILF
z1t~hgvmt6z-z%KD-2D^%2H#t1FW1CJQNQx8y{A)?)EIDE><-%-2_4MDSZlWjuorMa
z&LfPXjM9GUDchty#CR78jQ`-!&^u@RB=uqKuA!-f$$G@xH{>>Pn|R0v&Iha-z%#U(
zgv`3v0X%rEFcCeJOA&p4U#iG8L5-oa)O`+b*mZ9l-%*g2N`WoIK~;wDa0>SKf0k42
zm^Qoi46mtJ^6GM4(74g1an759LG!}?6^`Z%y$K=oj$w+HhvvWoVA0k5>b~;Nw95aP
zvHv$fAzJ29McuG-_$%lR-_BkiF^7WIFRke*M$ffGO&wX*$W>n0@{_nbjX5m?Ze?B7
zoj8SFsn6W8N`6|lwl^=Ho#jayx#-|XVdRB#gR+nR&?cXh0)(C%i0qT1<n@S%O3#v~
zBeu5Q(GnJNq<Ormhi6jeJ|+du`yas~vzY3zWlm`C!#z6RV4SYM;Y^Fh?)ZE+e=15&
zm8-8P^>#4J##dD^n7B9>7L*szZ*j9>GEtm*g1MognodRb+y?jg``HJfK0)K_yTYG{
zvRJSY7KTn+UfLsnVZYgpJck;KEib*RC6noP*0AaAb%Ur&9~Sa!L)m&RE7;6vOlQes
zhnM%m7llTlOyw2pJ=KWWZKJ7p6H}>m)L6DG!&)pE(`8k>Zs<4R)!{&RJ<b=28ji)w
zq1Yhwkhw){@K~=}9F9|;Gt*V*b%%Mf>Je=5(r24ZlEml2qir;VG!$a*Hb!jKyu5H<
z9bHk64Doar`PP`8H}6&4bg9clYoLdf<UwmcKkHJBTFVrY36KnVUu0gsDB(lKg|AUJ
z*W)Zz&Heb|TPP-}9T3oWg3XEQ(TnU6bIQOWc~#?5&04PYls&rQDfl<n&aUXZW_?o{
zJkZ=I5%L4T9O}g>U%>9?JIbfMTAq-?Ra{x!Z3+!5ahys^O-gvrQ>@LWxw-D$3*{8<
zB%Yr)1@EQd?k-Acx?Izv=t*Q^o8%e)U+sN&TvJ=VZ;%d3q=S^8Ac8bSs+5386OpFU
zL7FrH1!)q9f^?(`C`ggsk=_X%=~a*#2*pB65MqQRzI|rq%&2ERZ)Wa&bKaSI{X>&%
zlC^hs*7~ire!p+wYz+BG%A~J;>AHPpna#2{wgg&(*iz2&akgd9v0MJI6c(@zt?kRR
z)wr@0IK0|)g!&BMJBkoWe-vUaXsI_;{zef_OS5)b-u&M3_=-`+r3t>4nhE(@f%SE>
zOq!B&#?l4y_B@Yvbn&L>W1l<b5K*&I1u5l^OM$Dsx-c8Wf9QAGE7j?R)c)?wmE%;m
zu0M2Mhv3d!Z{#5)?`d(I-mOVBUC{J=w)(o>^pm&f)#bpn^^@G<G9W>|!t!(?Mbp3`
zf54=HLysj+W$|;5`X%GIr;#ew?^YLv2VIr~NB~e1{ViIOJtE@J3wC_0v~=H(!{6I-
zl%2<~#s2DeUj2kHdE4oLF?X*rHJZxab>>^I)%NWyy)Zk$HQ-2#;}fOL0CGmF!;5u?
zd~n)CSEH<~B+Z*G-dMNK)r|R+2DWj9i<<dUIo34|sM4$+ip^N>!k3MF52Zwra)*%N
zPv@iDJ_YzMc*AB!BtJl^${KPki9<-*`x(qm_fAi>EYArUWhX7EC``@WxM}pNe{=cb
z(=VZm!@imJ!*$%p0MNPpSI}(gaz-$b*{Acyn>{#3H!Nj4wAgZ}FifOWS3@jT*74LQ
zKICC%@Fx=8D^)y%Q{_X@<1k5d`KBkvZyB(b4BYj3iAydw!Fd~wi?O?<qh@cMbbXnA
zLs~%OMPaOOE}g*|sT@Khct5ViMZ!$Yb?UresLv;|J6A#lUu^Lmr#m03%Fh>YH()*m
z(uFvty}T3FsMD2ZJ)t%!ua)%y`Ie>IrDTfr*t%;xSG-osWD~U(X_M=@!n%(!v&pF8
zmuTpcbfhBR`##Q;yi_th<C1zIY-B@r-IM-|W?pgMV87r~&42d2;j3!Tz+QY0A2xXp
zU&_2lms>{cSG+*%iy6+)K4kt9<Mish$wD_@PRI&1n>|lZY<>HXVw(@R3rvnf0ti*f
zOyYR$q6~(j2DNo|MY{<jj{FGrwkt6GD8|Hkj#k?0IDN8ekd=N>_WJS_Vy4mJ?8loo
z51vG2IXf4O8|7F;N0SPHt?8UmmU9<+ycT&0(UM&1%3&5dD{mztT|~)jV@LRN<-sR<
zC*^$oo_pE8u(jC%S2T;{)`l-X;Z?n*Vq`ow7u&^fRIkWgoeVR1PlfbBgg_zNUv{nk
zmR(n?xaC~&(r|W`O1Tg88iqBp^y($&glvaT3z5e)Ib1W=qNU$o+2Nzm_c_%%R2~9o
z`ljoDxCr0$SlU?k5s9NK6`9W|0vYHw4D`>t^WIpCp`pAbn5hu`37dht;E3+j#Jx^g
zsz-;E`jUsWgj2JO56?lz>0e<UyYi8Hi<#M^?1_bc1u@F=qTy`^$z5Wc3mn=IEh;-(
z^X;qA6<Jwy#?PODegSv(L+F4dSd?YenI15i4oN}>w+Ke6f$-3kZSB@6UN>>^LZu2b
z9#4U1pO}Xy#tR*|&jKq72M8EXiDN7$VJXbDFf+#u_juB~#B_+$8jzubK1D{RJ-*}e
zT>r|GT;{ii0J=H?UQ<nEA>=fJm;HpG^kA^5>BoVRx60WA!pR0yx^vAuCw=43@X3aU
z8KiZT28;p1cY<~p9jr4hwR{1P?Uuecv{(7jVam;oDaAdOKU$X^BFt*VF7i-ZV?+g`
zCU}z2Or$Xz3_jT~x{u59Vl(E8X7yG*6Jn>&_UJVkV|yuh_R=hnT8bv5W;KTMt4>Ki
zKhhXNvQlZN*1larD)^C#%w6!&JWP&IN@T~Y;=<j~tz2?uXlz5l97l_SWQ*)3t8TCj
zLzStU+%vLyq<Gn-3bQE2N!Tqyb}J{IWl{nG_6glOs0){D8?wrFWQp-BSO9T6Lzx|A
z(a#nZUu+NC6`OUH3|dLv{7|fP)iBN{9y?>Fb{55vdR3cS86x~h$!h+t&(J+2&ASjN
zPC&g!10eo7Gdh^)j-~S1U!`wzWjUuKn`{*T9xRQbD_T%GFkBkhqDRlIq<l<TJf5;X
zVmQXPyR%n%h19XY<i(OiYz0(spX{NJjH^(@G~y2Qv5ClH-OCyN!g5@U@}%4SnLTa(
zzH(1BKdP?y;aW_sYTe13_Gg1D#ieTH;d&ab(50N&tLgM~6SF>NRWTgA8G<&xW^=s3
zfP+_RbJEFha<Ahg>}5?@wc1)8PU5I^Bm?ytW^K=nN3SJbW;845<E3P4N+qL}!mKtw
z1YoK|WW)iA>bvi{kw-n*T@n{jT{T@GGo!_x?6nk%)82Wj{tY|;T)Fi$A*+>2x()f*
zq_^jZ*a~Z;3;y2A91m4^fm5kS>&yG&M@<!;tFWsd{lKJgB$KCh?dC`!ari{*lcgQ*
zfN{gdn#~6m!FfJq+Lydjw3E>6rAb=hsrP1c!v@lE+=1DIoK`Z}#j(8+`hdwsiu`*o
zaT}d1l^Z7lawa6#o~5%<X%PElQs_DHg6A`D4RT%E&Km?*!FWAOk$_Ze3q}O-cHb{j
z$WiRp%4b8d6VDBz6%G0Bb^EezaV?Fo;1J7i4t;cbPb3HOjk|2`oKDuFsxk{(f~na-
z<!$8G*f^K=LD9*}VqHycTrhsLPS>$}+2dvMH*D+#A{^<x&CS%Pqc6NTf4fgf(fVS6
zENKo7z)0|IbX#I+S^=a3Skv`FC)C>9^2RivBL(_+u(o;dWPhlpQ|q1wueR`G^Fi_9
z)hbSqtmLQsj&iaMMD4NUC&Y8O@*YKgbeDm%v<W=eWpC(`pTPYl=i?Qn<LVs3?nAsu
z#yY%fx7Z*}JMdSb_!P@d#K9h*eKX#-@erVk_&e2_h_$&`wxvA9%$nilrfZ&v@89kc
zzr_1IUrcH%A0l^blXW|B#j10gcM5I;4EBT1-CseE9w;JK0MkWu{D=5?N0UPTN9ntL
z*b8yoAkkFgkg*UxkQ(*DgH|@+41WCCerWE)9%*c!L;5)kOL>WDSJ+sQiu_9u%}m<@
zhfC+VxtM1!rswz4SCoIvoB+v-e_1q|a*!xVC?OulfqPRjI*(avUjgi`yl12wr%rfw
zZ*?Q*j$iaoyJfggqk(L4r3Tr?(jhu_W_17`qV8f5f0&$ek+Dxu2j;n?82BTLwsVQ;
z3x*<>x3PnBcco<6dCrqX0C1*|dunjEohT0`3RN^V>}AF4W1N>C`qNPa1Xhk~nIdt5
zS{ZcVya_t!Gsz{DqN{O>tMVFOK?`yxSo-9*bSc>-tspjlAKmk1NEA2aM8HX(6Ym$^
zA7pwkUfjHE<Mrgt=x#|#&T(hpnr02K!{b{B`(p+@2+GwrCHo3jcD12elfwWj)IQpj
zqu8#&+nb!;nD2gA{g(w}rT&yPz{m?opjPoAA+4;AM>H)Bqs8cPil8j>37<f_iPb;>
zvyf9l9~gs0pUFiOp0+f=R^SXF9e~o_3nbnB%ke0&W6C+<Y0+kgIcvu?9>(^>nOD>_
zV2HpFHOYHKHD$L82dZ~724i}COd`$ZN<^=YgOSt0{aL>zh9}RaZqYux7&kg476^5*
z3`!@#c`mgun$}I_R7_M8gswJE@l2df(5z`NIvJrFzY(l>{c-l=qwR?g$sU3$h;Khj
zO(QBpyR-&m_5FPJJowzz+T+Ha(^S5C4J<d*$qdu+Dq|8ra0>KY{7^B|&6N{CUYBNl
znJVylwPt6q+0idS0e*G-DR03=io7r#v<@&lEMb>0wfS7pkv41|qAKiK;CPwq@DksP
zr=k&8Q!s8v01`HJv&TxYSSX{Zvm)!Xq(fKZ9TRM-Xu2fXStH3NMdhQ}LQSrl4;$nS
z`vJ#@wNIr8n--7mwYiQU-3hvvqC27|3kIB>up(mIM%n{{E}h$ypod1&S-}24!)_uO
z$Y}(ZZcS>Tp2H0CxG2trs5^HfGvcnHQmKyxTh(XwT!}4VRZPXD9$tot;#A6%mq$2}
z1CDU-{GkF<bxyEgy-|+Q<MVgOs(0mrpFqPfFQ&j9F|(1&Bi%^$kCx!-RBclg3`N=<
zG1c@MYb>LgN%C8}V6*re*XFlr0&XeExU`C3EPDD6u{fM?-4teC_#$`JS?c35n!K7c
z7gssO)0H`c#v38waq7;8cVTSJbxH|Hhk#SHX_$fTqt2%UN9?@#Y$mZ;ZJvt?gp@rb
zH{ji&cpevuq3sH~@~m4b!^*yJAouMDYB!gP+#BJ6@~u|BuxV0IFi)^G4qCw)fwx0t
z1gCuTsj^7!_DX?-duj`*P1(aQYHMAo7I-z!elXN9_)x=_=hj}iDh`2GUj}bUXRBx9
zdVD42Tp0X{@7~{7kJ<dJ?$lpN^{g=IRQ77zwTU;*18`f*Fq5>NWDM;xBFs~!#{a_d
z%?kbm$1+-<JDKVId}nMUsf;cs%^BNzww=B4%wD)xNwERL|GdCi5i(hwcE%uCwWYZ(
zNx&`ha=fV{mxi&P?M*cSUy_hxFs?(cIl#Xf$w`Fbz}<`I-^kyVhk9g&iPp%bt3w<^
zJ{r!j-`40oTdzdH>;nQpmF|?6)Cd^?l!37A(mamrq%(BU3OAm{s*g^0W>kgZ)AF|z
zzJe?RfH*iVm!e?{XL6NS#WZ2FBC+>R5QyPR{*;~erc>Dyx23$Lr#<?u0!i;?w9ubw
z)U3HS#S=;v+>LyAVkDf%6)+kgPe_pjc2!7RhGrG8JyOZkDbnQooP>3+5ApL#>txrL
z<zl8)RH6i}ZWS-Dcr3@@1S}&;#YzK)oL6)xW*1DZb;aG2%8<BQ$uBO>de2UVM?j|Y
zh&{S_!UsX5Iks}lO-JW~4jLLLyeHS7*GB8>?<1CVi8*?+cir|Jo6rQ$`IiunF;!;L
zzh>2OBS6A1frySFB(wlC!?h$&tli(q&{M=z0jZt8dd@P3qTpXx_AL+gfb64^WP?$_
z8GwoJb_3w;0~pjjQf-nq4v^g=6Yz~$2my4n#`gWUuWk<=iEI6Epg;bLQn!+C=p*>5
zKJZq6jXe6qGqDfHN>mk*CN^XcsHTlgS_V75)fE2|`RpH+@&0&!r&P#;f5TcIa~`f9
z`63_<!5)nPR68Xfh+K_Sp30U#5g!p37d27oNRvKkMJOQs`sVjYt^d*)<eN~CcCnzZ
zAeP+fdR*Gl(3x&VBO#II#^4u*43|sX`=dV^3Cbr@l6{JrPK9@}1@s~JH06QL9QYQF
zOF3|gIPz`~f$o7+dJ`CQj=`5|Aw>QdoDFzJju?I>1c5dM16v(SZ9S!J*Z@G5vN}8i
zALBz$6KSmgpA8@n9deG4fOsG8;|vfzUjfpXl6!BM$VZ;Tm!qJzI|qE}CxDQA2a4B*
z{BVxe6u|23lNkPd=q8XwxSY^wi4zG0bg;6CY@OI^fEkzOaygy_NM&?QPh8h!RkZu<
zsQ@L`|N7T4`;z>dGXa_JAHJaThav&m3~?Y${==D|KYl-dNmUY$b;}>`oVi;d;`Yc4
zAhXuf1c{KRM1Q(8TH|vgNv=tY$|n8X%CSnNrexlrKyjH57IUgc>i-{Bd;eY)^p|7*
z8C77I5YVB(6$$!;46BNW`33fxt<zVpqz=SLY;!2dvJLweZx;#XQTC$)dLUJ+is#1h
zHljfabMKYmRVAWsWv&t%{Q5Z(Qk5F7GnpZ0&c9204hn-h0fK2@G&p1*9K!G+S@8@e
zH0pj+s{aL*z<$)4n`(bc>*@PC!m7=|ao#)aoVV6n%K^<KU_DqB+941?0b@mvblyl9
zZk%xWyr{ue*|OHj?n?6xM;3Jn;IC1_rqr4a&*8<VhyswvQN_aSjnCpH12BmNH9swX
z)y;HS9_hP1?Y30b78E2O+iIx}xFbX<se#r*78%gJPw<8quzHP8&BzHt=%K!WP~`w2
zl{ygVbpC9rUYMNHh(Us`1C+LzeHj74Wuv?jZex5xd#G=xJY6?tw>4#Zd@eXDE&MTv
zE7QKP{8CmdgdeDk3F4?%;7lJ|#TqBv?x*e?cO$+r7bW@$ctRxceToyDcbMzy#G+i~
zU(`u%Z=pxpHqh%E%aTb6%%Q=qdtdf?Dr>ke(V9MWSi2z@NK0vL#MK>c?e1Z|z_d+C
z{S`zr*I|h+fiLU&Cj$MOjo3@<IE1Dg65Fxx$}a(~-tYQXyu9sStk**sh(l2FW-Ih3
zWt=78HJG~{x*V1rsWmup`pvWOOU0`0(N%(9482D}+UBX)%DuA^+rA{UK`Wu{NRZEd
zz!}rdf{oTQ55-N8&KDmaJg$wa#Z*Eh!$3lb<U!}SI8H}CV1O~=0y5Z;E3c|zah2L~
zhvx?EY*-Uwy{Se>&{C^#I-frNmfI2Dq-#)iVvi+hGrZL4#7=}ujcM_dD_c8u%EgPl
zptSkc)nGgNJhoLAz34X^2rv<h<6TAsi{TkxD&O<Xaf%-h8N8S_IUDo*T~F-mDA$xr
zHnvJM{);-CDSHP3^$>9%X{;(v44~pWF7l6+?7TB)P*1%&vExY_*&=rhm?qasORI-p
zM>n`U86MYU<DuVM0Q8l<ExL2l|3eId2lOA{7)BLC3&y@-ObYF0V>Jm+6`5f=gd};^
zLR_!Tq{NmQ;3an1=~O><y>n}W`=uv?$h=-<Nqyw%(SauWehxS0gK|~5M{A8e4d^=T
z1Dvnx=PWQS&On{oh3w|k?nQo-#`TZ|ZnfL8Ho<%^D*F1$Q{i?xN74*Y(%q2Ro9`|+
zpz)T+-LgY9t6PJY8;Qd;suUlR^wxQrV|^cqvx1J(Z1v7R)w?Tize3q&84BlE8B6UM
zHOt)zwfB2+@t|j2ORmW<!69KF=@b>X!d>AyeIQA(E}jvsl+iZb_^9QLSOC|TcbW~&
z%_+Wb%4~Mi^1SX0dfk<N$GicwDx<@UmqI5$so6Q|lhEZ=?%}sgTh}@fmbhZH%_ZNm
zdHe~^;!&l7a+x$BfVuevJ+k8Ln4Es0DqY0%vgp)_wBXJe<ZM;YphuT-8}!`G{%z1o
znSU)XTqE!tgp5{J7**BeEu4Rzy!3;vJm<rW!t{0Qhh2?Nw_}q|y5G&FjW^|Z{%oH}
zivwg#LO-YTPRgHm!hYr!n#_GuL*Z+oBei|2XpofQxRo07U7;qRyYCJ`mk^!KBr8n*
zC*<BY`@6f=JS*#~kND6}2I@U2P{eeX32ZPTx%>{xiaYXIYRt^kY5F4b%LSHTsd8Wb
z_<Y}364!A^vadmvS7(KL{`vxz3mBtu_-i;0Kp1R0?xlm`g^6@6$g#zCxD8jyxCvEm
z9fvNMuZ+=A=us57Q5h7q6M?Au+=+~;tF2p<CXRVkJt`d&-sdP5=$9RE)ABtos~|qJ
z5@Uj$6*L2<6X}VA5b9RSkD)hkzHSA^(le{I#|H|{=S_v(8($yZN>Ynr7<jQY?6G0I
zC`(9CJPKr}x=iZAJx%2fX=CWa42&bMPP&$e>>DldJ#G$JysW7_!uN3D7dLVtpxeb|
zI-HOWzApzp=HoT20M!~_ik_WaJXv!p;xtV}_9%(To}R{$Z`jV@-w4Y621WCp16VCR
z`gN>9Qiaf=ok%8NzZ{vgC{)=TC?nxQXDV{7ABF1qIBNiU(#3jC{5Y2;T~6#<c22{$
z5UK!j{5c$Tj?>LW@(QW)!q={z92<oxe#QCv+okfmoUgREq959(=Z0gQ1ZyeRH<L5K
z&^sfu>TRuO(KmuVDl+1(Buh7j6ni=<6fIiJGqZTzE&0GvH?#hDfPcTOUGGU(-6ByI
zZ5XP^>!S{^Xd(ek_n4Blsn6$yW9V&NeeR!a3nY0h`DRSD?aPERA(_bGU<s}PCJ!->
z5~oJW@KYg+>mn)!^^LZH+zbNjbLaA8PM$wF`XD45J=bAU*XgUs=)_Vk7P2;>7C-7+
zR_4NTlFIVD25b}JCp#j!05tmvN)LnJd{M}d3OpZ1DvmpM`Pm&*Wi<8f7Ux|x-tcji
zPr?P6F0sekcIlk^TDb#yEy)x)asA*h78dEo;g(y0LbPcX2hN{t7`Cao8ee-AsJavw
zT@WC3K4O$n=#aJKJ5`D<Z=XeWrGZgBFI{+XF`Y>hv&L6W+g%P>74JWKYs<R5G4@{1
z*{O<OuK9C}yaGl@uf}^CFUCC2a)H|X25+lU0!0+plUNFBcCO+kV3rfbjbNcTu@y0T
zr}VR?{_35xNew)yy8f(s^%L1m$#N;0R>6<2Zojp6X?h-l(qhob_Ayv;>$D`pi#FjD
zmi@&Jp`G5?1QWAfrl^FgqE@^7iEH|hq5B_~g3{Gw%FVLU+AtzQ*Z`b&HjF)JN#U_~
zEbivKiz=WRBKlC;(iw$f<OsBrj|JrJ-<?sk&kpE<l1Omf_es#Monq8=yp>{1S69F&
zXPU7Pu9NosNHoaWXC>F)LzR`58;bE*a(fscG3V&KLRDtfI=Y%<E~h)OZClk^6@Pzs
zzEl44xua6?`DeQg@5(S|ql=e5KO`P~Z{VOvQR&07ZE(-q<DSsG`NE)@eBZm+WOqu^
zGc+<F!~C%;OW(?2F#ibC-q`H_&F1m`G01ITY4}%`{rBcg&h9_DJEf2ojY3z+^8nmm
zHS_>f>eToB%b^r<UjK=`+&_V^AASNK`N6*_;X8k)8o%ekOy7^_Z%6lk%5yruAF)@{
zX^t7Yx~|Yob_g-rY0u<2OW<FMV<cavey<(kUsDFZ0`mSpK3>>|e<520dJQ#nkI}^9
zIFNiW09GR9GSEGvk_AhmvNU49Lx(^%mHZgGdVLJBF$^ZK2qbl1eqWFkF_`%S+Vta%
zA8eY-<QRF&03?6>F9U!XQ@+IxPzgU_?*Q8G$v}={Lh_A&;%`Wcx)T10Kz4#B>u-4R
ze+8#U+5vSE`yvevM_@%i!Vj+PPr&E^2%Zp-uqJkI0lOGY-ZFbEkeN)li%SLQW!w1x
zy==!^x^3snDFm>&-n1A;?8RTC9Nj<h%>?!9MD@=;LRf%(&?j^FW|0q&*>Ty}x`?a7
zp8E<)*#LBy3;=5Op_`@Ss1$Ht{szqaSNg$kBF4{j6Uo0JrS;FGn^c*u|Dxgu`a1nH
zg8Z8!2q51WQ-rI!jLSY$bV{cfP41dqLxk;LaLHE~Hx}vS;$J#<;WbU(o70u*GQAHD
z7k^^}pXp$UGey>(A#m*H6Pc4N@lG*6yl(9Sq$s6;dPU2jHGB+V0$(-;Qe8a-zd2I8
znIb^C;`YPGe>V5O>E1yl5e^Ul@d3D{v>_ozKx`*<xnC;o&p;4$?G4{LBZaO$%B{@M
z#g2{k3Q8ESYh}P25=uoBnSI2kHF5OL{x)=x`FyOD-BS11vrHX2H1x9ix6Qcn<6an!
ze@BS^JM@TAE->Y9I_1x9`g2TvAK(79c(yBtHN_<!YCFQwb?{{t+y%E%LOF^XanHZ0
z%c~Y?+nVtDQ>hwmB;{0FS8?ziv`l#sblEN7ZN{G4R0i&*F&tA={DXkldkPpZJZSx}
zz@jHR0Ki{$R1N@Mj(UIRnBk0sE@clAU5B`%b0EE;gBKD;;NC!fc<rB{3i{dP&wB}U
zBExte)@~>>%;3-q7XZl2K0=`t=D9&{rWUDcWIWHy+v``I9_T1yq>SQjao&snJLD+P
z&w4B%Li%5?^ZrW<<bR7@>mO-5{r9frKWmHq2}yy!hAxlZ-(mJ~Q;IiiLA!kXaPTl*
zJff<S8gwZDJ^Nsk@}-^yIr=kds%8Zooxwt6u{56R%*QJd+Yk9LJD>82Q&oP?Ve=K1
zamlpRbQf2rTSx!g#hBM2%&meS5Hu1(<`$ndCu`<PuWZZF3W<omDLYf&_zWU)VYin}
zM%I_>`h$dS&;x(w-3*|T?j*2-_k9VI0<0|jZI-9%P}PZhZd<%$wbOZINIq)^^S~?F
z5Z;IG{feIipf~ky(Fm;~qcUocLBwOYj*0dM86on@h4b?Zrp7g!Ec^=Mg03BGPZtLD
zIAd2%Dd0LuL(B{(8<So-=X{tpDgAKI&5m(MFj_hhtCpL3=U#}n4CmRvE;Na}!7EmW
z@}yQe;+p_=+z^J>1m}|LV`)ArCgChr_gu1A&-QlEkWpehLyr<$?e$ljbV~BzLPv>j
zN7ui{RO#2efzZP#z^%hT8TV*;B*Y|YSF{OLASNnoV>A;N-+p6lekEAOm1HAmE1eP*
z(_QM^hF}h0_94}-s!N~ft!oUfnXKITbW%&s>f*E7SM1hzy^oxqjU?Hr{*@zpGoZZ4
z#NmPVc1?kAhTX(cJj(K{;F*MhaIM<cS%&dyZ8Dc{-j)~Odt}MvoKahf46c5IK}IMt
z;4-vu_w6+1ES{gdsqxNE%XhZ##nlRpLB-oVQ3WK=g7ablD9!ci1+@_)Ks?apqK?!!
zmM@*P-F)oz%csi^0pHR<lKpaF;Fc$=;)Ir~F%+fQPP2On$wn>`eu85BfVtpp|9Xwl
zyR>p7uX}uRC|~JW*rUBs0g0H@<+4zWv`Te)Pp^5j=Bbc(ZYhDX7GkMZc6|amWYckY
zWkRVuEBX^E?-4p#553)@m|gyH%0%9~qpLHoWIFHMW3$26r%8v{=_w9N--~Zjz5x<8
zJl~FTMd2UyUVdw@lX>S`$j>jSFeW`S%5F%w1uIT@N!tJ=9km)S!l6zt*?Bp2o;p{<
zz-iSs*%PR#i~D5K5hHe?F_!d_kjDs0m@x*TQ4@jvtW@du%qrt<`22B@e~y&wJ2HRg
zG8`Nwl);JvcLoSIhx61=?aj%3;Ej=^-?q{;Hk6eie|2G4;MFf|@ITXvnRj=(#AbQB
z6*(~1N|!A`cmd3rC#z_jN`d)YPg0VTIfEWob5*RX9eph-u5h)0Z2R?|TcG6$`7;<~
z+lLi6_37>+^?M?TYiTH|?UHhy-sfbmX+cZkb(Gt$<QX4sQkLl%wen57z{HjbT3z0+
z%*^IKB<;@kG3SWgySu@VB*df_IQ)){hLXP-sL*bSEL+B;0O0&;Fr7n6ZKQPIYE|@-
z6L!N{IO7f@lMo&DKHK9LG6sS9pjGl)iqTokyeo*_9Wd<3Xx6kRfzo7J->2+0ZH}?z
zcvsTPqfhP$3UN}AZqILu7p>Zs35~XL`m9{TB`=AA@yg>oNmBz|Qo5EZOox{X5;AX<
zS68muzhG2-xaIH{LzZ9iIsXA_wE|hO|7=3?kNQ4NEKrC4t2O+3xoY{v?>N*o;v&GM
z>jQc)0;K;w^O0|d^dIf{f5HNy{0YLf{De<uTLvksDO##rF}z5zDo;t&<!$lfq&wni
zISi$ECHlA?-||Z5B?M03-7I|hMekdzfA*pGm0kScCh{FuQ^oLwX-?D&ZwhxPcK|!g
z26rA^XN1GNgxO+5>sFx0?zGg`HYN$|(blKjXH;u6GT_vxsA3|S2|3@CD1`KhL0KSj
zVP?3K7}x2W%`w3AR$?<&R-N}kA+3dgKk`W@%^NH{aFYMjjkqvfT`6-NAxs~_Q3o)`
z0@5fX0DPt!FXmjT<&?4$#$ixXKe1F3?HM;abS2z=k&%Zf`m#Jz`@F*Em7R_>Au~d%
z<Vfhjh3;Dt$Ii;$PmN6e;&Nu3-mWGt*P6!VJZPM;BWa%9J@4^dqTm$ZJIRQB6}oH@
zgyI<D=;UTV4}LzVf91gxZ|MY}qOZRWek!jSzG7_1PF^d+Q$7r>^ll%R+lfT7l&#_-
zQImihBDCj9RAhl_-A$HvcZuVJWFyTyYBaic_&^sdHHh7{jmWT8h%<x>X6l-h^10jp
zin+BB!@0o4VX-}Dfg7*bM^)H7t$o4iISKtfyz01ov>f&kF4WC#iFSm^M+<eW%D!;!
zBUYN_z3`$Z?R`<}6s30S({InoFM$;AGSzd^JwhLLBj}-bGG-Yy$D8d7d{mU4$e*2!
z8=K*oEK94-s{Oz`k^PKUZ~Zw9XmxXTAkevq@T^%2HA3QZxT4dp6Q(FUDR1tA{%8)>
zt(cV53i9xpx)@w+R;ei*k@Kw0Y}>!Vnu(lUF$0~^79au?YPQZ}kUa-IFR-(a2ms2&
zN*rhq4t5Rb%x{+FxMB2acRpC|;mDnccAPi52>@T$i1k{s;gZ#bmMz(4-rt-uXnLtt
z;bwiZUbm>b?Ph;ko1cC?Hw2yB2DpJaLrE=Q_c7AlfJX~^vx<i&piaB}{G~Y|zQ3n`
z(SymwgN9sMbcn;Y>e#?=fK^>FnzCmEfO}&NaDCtiNp;Ba09I4rTurUP`qU=xuGJ=H
zHt8{n7h|Fdd-kkl+U1d*emb+>Sbg-+=QhAz111S&l<<>F4Tnh=w-<EUGj#e2X{}lE
zy%iX%@V3qo<P+dMxlxa1-P~E(IT8SX>zSLSQS1}ufT!5miLMigyQZ&p=kzDHxFzbI
zq&j7!vfDgrNlnW<aR<yl-AsZ(bRjC!5L@I0FdNLVjL17MY1VL&()%0b6^>di%qa_0
z{2>xz_k?;H;?}&5@hg$g+V7MHTS&KJrrTp^V3&?_G!dLtst`WRiL~<ru9fmuVVB8X
zU)k;`4*-;9@H_z`0HQ$v7#}tSfNFsTC^{W2#NI=eRU{Uulx0+Xk*m8ilDhlOYyNYu
zWYE$fbe3}2a+83r(?sh$E|FX?RGBChsaZI&i*XU7;Un>yHFK5PIIB6}`?yf=QK0+r
z5tfN&ZOj>z`j+~VECPV99*YT{QsiW1#Mx^)hUBu87tJg6@e{6)U~i#$kershs}D(S
zr$5*Q`1nL><JJy!eO8t?xliFd#X8|feMq}l9uNpq*?vrQ$%ayX{mQpr9sT0FoHI}A
zN_H!AWKtJ!S|P#hNC&<{Kc+@+I=XnUG>cvkS_op>y?P6XQ7ewR8l53+XVrM$eFfnJ
zAo%zZ3i#WNmtR4$&fBy2Q9`LJ!a*#OeildBfuxkAu13=NBu(f?-Z=3jec-Jy`xg!4
zxP-xlxO&WDYdsp-wG)0&*QwyvW3tn6xWLh6a=%jDF;kMzJb^r$W5;E$vyge)%Q7{u
zLx)TkOWFbB`U(;}hWQF2gx`c5=rQ}xStdT<M;z*Wu&j;=Yn4N^DX3|=c&OBkvvfJL
zoLo7l5$~IK?J-x5%Umy|Lh-)TtY&U)glSlFxH(g^U(g+ES@X`IpdP<5p38fBsu~@C
z!RLUf2nC4Mc}0FeeXFVPE9isVv^H9fs8J4Jdk<Nw%L#nZ4H>hMMD0$L8zHhWgP72A
zA4!T>jlcuq5*2<|0QA1bdx**;uvGdRjQxx6p(IQiS9+)hBSr6Y!MR|!F*lQay_p9u
z!XAjcd)o!KZAyVi?T2|c#VydOh?gfn?oqNj2(^sQs1)l$&Qi9+EA1a|HdCWhiHy#^
z=>&<?0sote3#3{F!zE(YsdvIF&EL7((YVZGUc_mrYv$usaQhfa>{U$gdQ9oC;%OKs
z=Gai~U`0q>vVM$;onyQY>S?lg+g5`PgGTD5?at|1ydSO!CB}lMM$03}w-=B3q1LvO
z?egYlhfei5a`hAPP^yW#%;$PJij!`fm1bzrubB_%M;_US23b;-!7i+%hV8rfb+b-C
z=&7Z?pyXQqhC$;IELk_KsJp76jyt5)l<+)&<D(a{+M*7<VW8)^EH<-;>|FjN9HPmh
z!|+<v<@0f4uF|c?1xEO#`qyUUH_0K9UqO`H`(ks3%jF@4DAtIrQ*ZXtZmtW3NwQ6?
zclJGe89lD{sFpA5{CX&z(du^JkT;-v0uPqg$-`?CQt*&vF-9}>5>tzh*wKxPMWQb~
zc4v5e*DV)vDxc1)DVe|Jvx(Rfo8{=RP#9B`#*5(&m!P!fxIXy{EQR?W8~t-$^~%&V
zhtdyFB_?<jv-YoUf4S~*#?t|I2|W@+JOu-wzRhrOi0J*-5<+SEJH~bCM(lgb9=5!y
zaoKT+eE}35y_2M|w2#(V4*>o%;K&w+GqC|?EFBJ<2TJ(!tXi>SxkYKeiu<c@m-U(_
z3^FK=-a%$@<}0f^E9yND^1BeT5bRS7q#aCy>(ob!J!<8c{V=g1eet|eIRNHbIulLS
zY{)Q||5W3P@|Wyb8CmVt2FwmHLtx-QE;+EVH&o(XOC@?FjK6i#?r20U5R&YLTIwp_
z3Ew@kn%>bof}#vml!WmRUhZcA>;NixAxO+#2FKgk8RrL=EKUbcG8&&%B_DK?6PJ1W
z(yQoT^=NV?RRH}4;4w@?9CJRDhACrQ-o?nvp%hMNTXr6#=T;-zPd7*L*Q8r}U-dcy
zfN#$|qoE~VZ{@;yqY%9@?0C!3nv94CJ0W<X<q_78QeIOBLcMMUkcI9F^1WgqRX2v1
zc!4<g!Og({Gj9N|cLHyO3&Icp8zDqgqcAs9bf>02Z|4Ee3p4X}vMTuy)Xr@iO%qv~
zL_?9JlYv{H&j9op{t$1vG{S(lo|M<gR|rOg%iH-EeAwK4f{oPVeR5NmIUE#wuZeOt
zv2Fb;$Og%Pn<;5Mi5J9Rt2|IzJE4kv4HF_Ya(zeZ3~w@!h%}^M(vrwz;fudZQ;wc~
zL7dJw-YU?VSOSy9oO!f{{@hV&yi)xJ=EzxCRMQf)<(q3RwP51V)Wa9cRpL$3Hj;%G
zM^ko}(*mV0$o>#R&)l^)@&+ZT8FV9sQZD?qwe&faC8u-gsW0L)<3>*`1)RZOA`}yO
z@X~JrnBKy`$yylJ@KXK>QBUd`Q|i5EBHO3vM=sW`i^XoU@UxRriniq{X8N};MS|%Z
zEoet2MkvLl9@*#5cILCqJ>ZZ*-FR5+&241HjVgoze3^JpRQVyfCH#G8IaN~U3b+jb
z`yYeRqUSD0_NL?|iza6~>1zu0yqX~sl<cRx>vdAMh}McJsLtUeunKShOu!7rBvOHC
zQ5I+CyOKDyw^5MW7<jNaEN7T#B5{+^eRZ{BZWR0=?!~*!lV3sn$><$oizs}r@&O9r
z;`RmLHQA3G3YxGIqa*^l6Yx%e!6{4*ZHa<((jw@LeIE3$rrkC@bhJ)Ut9N9O3_LYV
zOMP*h$3=1-SzWWUMUUa_Ne2gYBFOitfhtKTw|MQ;%jOel=|lQ+*AfGts+}G4?I%4p
z7XgqH1o!C=TvUB`Iw4HB-X2Bf7&A=oV>|S<T)xUP*!!RLEHrpkf@AxrXrw)YKT;}e
zj25Z=IqmRgGnn6iG5?Xt3m($|tO3$6P%HxX9GxT<X3`r1W6yUu(4I^>Fzx0V^F4J{
zq^2wG_0-GfGr2xqO;&2llMb*8Z(x4tDs193{I(7wuIeTR8iLo(O!H6%{K{J=3TGPY
zX4ZnXg!6qkxZ16D6Ah}9t>>TAHm&R&4VZ_rNg_#c7Eso(`wGmWoB9WGatlvf%i>ML
zllz}iXh)a4SKB|UbJqXD61>wbQc)Q983ms8Tj76}&hFeID4_#>AMjwr!#V#sY~3%L
zThG>7nw+jnBA(*<Y(p*Z<5eNLOW_?rMg^V+vwE{SEw74p{|dTZGr41TiyJ#TFZrgr
z!_YcsfYHUg+sK7v67sxNmXO&ji2>gPLJ#~cUIQ1rqV2w8H2E&_dU|)+k(mLCa&K2G
z_A|YNa+-@CeHN%dll?XMl}DTK8q0^0ej~G#m(e;wqq!Nq19C1xU5+j?nm4&<wy9TJ
z9Qt3xS&u7m@V@Gg3s<Zp6e}M82sr_@M>4^r(EVzSxr4_C^0Uu{AH}-o^l=A#45hMR
z|Dv`Te?Ha>%t^>pWYa<qV6WoJ?WLn&Dl06l6U`&iU95$^d?(q}-bUTK%zVw=hcA79
zMNt%IgYi%zB%j26$c33=$V0=bMQD1jznZ9?Q7sE4k7B_j);jZxt;bP)Sxr)(Uy-|e
zrLGC4^d>+I=1q9kdJ<=U9p_fyW79QZERxug+cmW*R>t8NmpLco^2GF|M<Dp<+dJOr
z7toMMPq(*=pO#xQ^5t9klG`gj4A?g?56#X9-xe8cztC*6W25lCp4;tE30R5m_XZLf
zd{mYJeQg_`;uYK43RM^{W9@T0K0PgO`FlxgDC4K^VvE>TrN(8Tktf^$F|vuvqpjy1
zk@U@4D+@GSmu5%0Qd|-D>*D&{?i)!CRXyc@oAZd-Mpuna+H+wMrjO&mNO!@h0**On
zfLH2*P?R0zo5k--++L_3>rHTqTQF{p{H#@Z_T7U*l1IEJlY=nWghLhLv=AV8(kg><
zxrZKTpK1)QeTkC$Qp4e#-)L%go5gFi9I44x$RJ)>{HP(|stlzw=FkFHS`YvzdVyOR
zVNlHDR_02KT5yqnk!s`!6|dZ^(TZ@9DD&|WeW|6#?;ML-87E+lXd&z?^mJ#qyf+HM
zjx$72gce+Vg$qPmYC0D<uJJkA#0H-&dH5tEBC}y>3GkZd1{#hoKD2?I$A#yAaMxZQ
zqE&n2Qtn2g8$0A4PxUlVg`szp^WKZwHN=xX;O<7mmJT|hch>90r}rl)GRl?x@3Tv_
zpPr<86{6ohyh}xPM8VOFZh9pnCP2i8vJ>qDp$_0ebaBiyFUvc_kHuEgWDmH<ANjOx
z^O?83P1dU0S7%cz&&PUhIjN=8@x<K5>~;%EMD4Ikf52f%e{MtnskGD++UBTMoqh`Q
zI@|igx1~DAVJ3hY1|#a2v-V^FfUoPY%wsp+izJZh2Q#C*^f+1XE+i(&9%rz9uB?I=
z0Ew6(+mNA1c12Df9n?s7IkTg#<_h4|X|~{evz04w%`R|Jg9lzQQ@!z6yvd5s(>jM-
zcKGgn(k&;P7WOWH>Z>N?BQ}7@c%bfKv6YAWw8B;1Hekt#i(LLZ3xlum>MS>LyiN9K
zX^UK~cYj^~_)E8@OLLwkJCq%N>J8LUhXQ{Omw=)ByyTiWmBCoH=T2Ya%yi<xTU9>M
zaYfTX%E5&;HqurH4R9^O5x^N*M!>OWH!==XB&`7|-!1@cJPO~md<94t2$^|r>7k&J
z7jP<FkfVxRunWzY$K1lBZY3G|z^pPWCtGfvG*1zI=RzWxY&$z8dl9r!tsG;21$`|T
zU5pazbRMBrUN%3mI3p>$<Fd`$lmd!NG<tbDHdgCsLR;>r{oDKBCZm5M0{>fu%x}vy
z|F`G=Qo-}z>H+mf%TxY(J^vB;N-!ZafMyQ%0L^JZ-t;CzEnJdTYyJ62#`A|lJ5)@S
zB=w*BdUO4PnLvgk5Dp^629m>KPNf;Mo}Gyk*BDpOQJYu}rpsc{9<DTB8Mz!UZsRZs
zR3w8&31C%_{>1&cV~}e#;}Z1@Q~f=BfmAkZ8mRC~Spu8PN}vZfX8-RtfJgqF8}*e8
zkmB(<7$r_K6E9Fbi8P8-IlMWSA9iWnKX2VM;px#O7xAsbUYU^^KQ1PcPP{65I=tC@
zS*w(^!{SUv`$6!SG9lZA%#cx?WqDoLS?Y%I^jS!ez56ctEj$FLzWfyw)XJ8sGR~i@
zhnOsTH|c49ZNqK0hU?OYx}kgLbt#Nq2$0fb(p}TsclxUu_&-8-|Lfu>;XZClHOZj?
zvOb6vcc*wA57Znj2TSF7at-qM;6=W9E!J6?wq6B4KNlul${{4cowOf79KrSM09Mz{
zjs?KJ?@4#;#HrTS5DtgZJD1ZPBdAWAo%CLtySMP@q3p=G#6OVc{k6vScLV_b$P&H(
zMa2C-`u%>7zyF}TTl3Du9{==ju6-<ds+n04XaBSdg8%!a<9`i>f27AhLRe-aj*E9w
z_QuRKI?k(~@JYBSJa`vG>}gz00IX9znd5+bIRyatP!@7K#AmFi<M~d231p>+on817
z5*v$%*FY&GNck%$GqH|9)WKvE8IDmB18*Wfe}VQqtSDEtg?A`t0^%1}=7@l9FAk6o
z__jGEK#!&py38{Qe}5W4B*GK$K)(6g_A-T?$BZH9SO@4HL+w`(qK_!?eS@PclJ83Z
zUuy}-0ktU{x%w4!;`{b!Pwd_N?yA3Ujr_?2A>MB<`unzy^tAlXzXdUnf0%x*x19Ae
z8?z&%DW2|TdFi@M<eF@|kx0!qhXCDfv<$qs9SUiOgL?V1T}xUa<Buq=lhHeJE3ZGS
z4Xx7q`TPBk@4{cezadghcUo4IRb{sWnBsxq)<Eb5RC0Kl_myn-jH;!mbmjf>D#Ul=
zp5o^~{GSnvHdYk8i8_ZJQ0%q+7{W#4=_$mw@mq42rO?ya?+}Jl^OKGD>Fpe{^Hyt5
zzB{=Y&d$ybidJ|}=3n?N*RTZ22ry&oiU3Ar&R5U@NooK0ZV3)_#Y%vsBMU$rv>5#g
zdVi#*_{@(7q#T%jaXCu*lQa|QpgiqY(5WQg17;({$Ic~XI&FwuK}W>?An<Xu#{M_k
zw}I5`cbKw2(yHN~y%yQ63&bnOdI>?iK;~n}BGSV1dz~gHTN9wE94p{vQ~o|a2mkDq
z|6cF=UDsc0NT7y=7s8D>+T`P}qZi7<UGDe#x@0vS9qZ+LD0EJ@&9$vZhsU0+Z=_&M
z3BnKWqz-ru*}HOI=WrK^zX3saSXNRJ7}6Mt_&NZRU}-$28tSn+g0m8vAti=i3;zlV
zFa_i<0dXiTEYSb646@+JBtU?wc;lkLE3+>bemq_Pzz7%!vC#KV-)DSYL*R-Ih9iVo
z6#;(s_lKd&j}f-ugVPt!5Wct(&$d=&MUsys{BShp$IB=$d7st(^YQ>H&d<B|v+sR3
zLVpg8KgXM&L*r*y{$cX^Zx4;)R@}WqaTqC11x0}|>=YyW#f~JMver5EvaMIhrIN9X
zXMs6IFSwVu-)vXwS(*I;D*yH0PeGS&<f4Afc>klve@)Qkca!}5y;hL_%z1v#Rpz&l
zlK=GQzY*fKIP;?r?|0I>|B?`|&o@H6rvHnCc>k)5@)vETJS|QSHFSSdT>DMRdU<*s
zKrOd+?i$8E#D+Lf^~&73=hS5e;l88c+&=2yhZ^sx6wda};cWo_wobq^#0-MJ@!$qC
zY;`x?89@30Xsc16%8p|NL=c$Oe|#I0N2uUB!VMEcpamcYkM6=T*ZmPa&^=)&ksQbp
zbsOgDsmr+kZ0P4b`q__u4w0W><bPa*T$`3kKR><fBC0KW-_N&aCf|45kUpcC-e!dF
zUcB?Ti-=W4bx8lbOT+?;1b0LbDBx0wp8ndsU+kVw2K%Nu5Kp1k7G95uFd9sh**fZr
zNm?aQcqD-k{rx(;FCzB(1@o;We9OO|b|Bx*H#a_9zu^)2^pj;Bu9p}b3J3(%Lvcll
z$Q8%|3*;;4$kH#_vhjlK7qU8%hwv!)dm}iW7N~tjWN#7A19qZT0Nq9uOg!6iHbS=Z
zXEQ(V&Ch=Gb1?i26hDLL|H7G}+#}zouB!r2PF*w-jj<CY+m+~(h~f6VOdU)8UQ<D6
zvLJVuGy7D8jJ1Zfc_AB3rWff$!96{F-Cx`_)-aCTr_oJ)HaW(k#d)mu!Iz7fUWLjE
zcrS?u`tYpheK)4cuVj_KNH-V?_->vq`0+i_ch}^nzn`aLzMJRwiUi)#{cPqhYnY$+
z=9h&X(6@f}o4>Aoe~zg?Z3un_if=bNKLf>oJ5a=sb_#=A=Gh3;e~vdYJ4YQ%g42c;
zYf2lXUOr&<Qjd@A%J3vnxOUCxVA@8NS!5nSMhpJQi0W4b>;GUJ`0s%=|9OsYmHDG0
z!~rJ9|EvGMZTZj7Nx%L@Hu7h${HGZ+|9g69Y^-2Tf)d?prqse}-6Pu)w&TB0b8Pi(
zb&rf`#;K`-M3|m4{a!B`m_Q8@t`bjmN$VbrZ!`7(O|k4}-~HKlzwHBg2_KXd3af!x
zSE4Q#Bq)=!@IgCgUf>m7UrwH@L~6^&2Br9@h_F}tIrFX6?vJEY*_r9q_Osm&Cf1JJ
zi4Z@_HL#%uynaeuDe$_$Tdb5Lc`90$|8@4Mm%#g7Elp&VQcp!igt{M$HTXOHp+EA=
kSJA5H-fK`wO6Jd6*wLyV>nCd}ShzYb0DP4a%C9s32Q%?IH~;_u

literal 0
HcmV?d00001

diff --git a/devops/dockerfile/github-action-runner/Dockerfile b/.github/workflows/registry-runners/Dockerfile
similarity index 70%
rename from devops/dockerfile/github-action-runner/Dockerfile
rename to .github/workflows/registry-runners/Dockerfile
index 4e6648260f..5d3168853a 100644
--- a/devops/dockerfile/github-action-runner/Dockerfile
+++ b/.github/workflows/registry-runners/Dockerfile
@@ -1,9 +1,10 @@
 # base
-FROM fedml/fedml:latest-torch1.13.1-cuda11.6-cudnn8-devel
+ARG BASE_IMAGE=python:3.11
 
-# set the github runner version
-ARG RUNNER_VERSION="2.304.0"
+FROM ${BASE_IMAGE}
 
+# set the github runner version
+ARG RUNNER_VERSION="2.317.0"
 # update the base packages and add a non-sudo user
 #RUN apt-get update -y && apt-get upgrade -y && useradd -m docker
 
@@ -24,18 +25,15 @@ COPY start.sh start.sh
 
 # make the script executable
 RUN chmod +x start.sh
-
-RUN cp -f /usr/bin/python /usr/bin/python-backup && ln -s /usr/bin/python3 python
-
-RUN pip install scikit-learn
-
-RUN pip install tensorflow && pip install tensorflow_datasets && pip install jax[cpu] && pip install dm-haiku && pip install optax && pip install jaxlib
-
 # since the config and run script for actions are not allowed to be run by root,
 # set the user to "docker" so all subsequent commands are run as the docker user
 #USER docker
 
-ENV REPO=FedML-AI/FedML ACCESS_TOKEN=1
+RUN git clone https://github.com/Qigemingziba/FedML.git
+RUN cd FedML && git pull && git checkout dev/v0.7.0 && cd python && pip3 install -e ./ 
+ENV REPO=Qigemingziba/FedML ACCESS_TOKEN=AGMK3P4W5EM5PXNYTZXXIMTGNF4MW
 
 # set the entrypoint to the start.sh script
-CMD ./start.sh ${REPO} ${ACCESS_TOKEN}
\ No newline at end of file
+CMD ./start.sh ${REPO} ${ACCESS_TOKEN} 
+
+
diff --git a/.github/workflows/registry-runners/build_linux_runners.sh b/.github/workflows/registry-runners/build_linux_runners.sh
new file mode 100644
index 0000000000..fb4b6e1abc
--- /dev/null
+++ b/.github/workflows/registry-runners/build_linux_runners.sh
@@ -0,0 +1,12 @@
+tag="0.1.0"
+
+platform="linux/amd64"
+
+echo "build python:3.11"
+docker build --no-cache --platform $platform --build-arg BASE_IMAGE=python:3.11 -t fedml/action_runner_3.11_linux64:$tag -f ./Dockerfile .
+echo "build python:3.10"
+docker build --no-cache  --platform $platform --build-arg BASE_IMAGE=python:3.10 -t fedml/action_runner_3.10_linux64:$tag -f ./Dockerfile .
+echo "build python:3.9"
+docker build --no-cache --platform $platform --build-arg BASE_IMAGE=python:3.9 -t fedml/action_runner_3.9_linux64:$tag -f ./Dockerfile .
+echo "build python:3.8"
+docker build --no-cache --platform $platform --build-arg BASE_IMAGE=python:3.8 -t fedml/action_runner_3.8_linux64:$tag -f ./Dockerfile .
diff --git a/.github/workflows/registry-runners/build_test.sh b/.github/workflows/registry-runners/build_test.sh
new file mode 100755
index 0000000000..1e17dc6847
--- /dev/null
+++ b/.github/workflows/registry-runners/build_test.sh
@@ -0,0 +1 @@
+docker build -t fedml/action_runner_3.11_linux64:0.1 -f ./Dockerfile .
diff --git a/.github/workflows/registry-runners/run_linux_runners.sh b/.github/workflows/registry-runners/run_linux_runners.sh
new file mode 100644
index 0000000000..fa70388de8
--- /dev/null
+++ b/.github/workflows/registry-runners/run_linux_runners.sh
@@ -0,0 +1,48 @@
+REPO=$1
+ACCESS_TOKEN=$2
+API_KEY=$3
+DOCKER_PULL=false
+ARCH=linux64
+TAG="0.1.0"
+
+if [ $# != 3 ]; then
+  echo "Please provide two arguments."
+  echo "./runner-start.sh [YourGitRepo][YourGitHubRunnerToken][API_KEY]"
+  exit -1
+fi
+
+# List of Docker container names
+# containers=("fedml/action_runner_3.8_$ARCH:0.1.0" "fedml/action_runner_3.9_$ARCH:0.1.0" "fedml/action_runner_3.10_$ARCH:0.1.0" "fedml/action_runner_3.11_$ARCH:0.1.0")
+containers=("action_runner_3.8_$ARCH" "action_runner_3.9_$ARCH" "action_runner_3.10_$ARCH" "action_runner_3.11_$ARCH")
+python_versions=("python3.8" "python3.9" "python3.10" "python3.11")
+
+
+# Iterate through each container
+for container_index in "${!containers[@]}"; do
+
+    container=${containers[$container_index]}
+    # Find the running container
+    if [ "$DOCKER_PULL" = "true" ]; then
+        echo "docker pull fedml/$container:$TAG"
+        docker pull fedml/$container:$TAG
+    fi
+    # docker stop `sudo docker ps |grep ${TAG}- |awk -F' ' '{print $1}'`
+
+    running_container=$(docker ps -a | grep $container | awk -F ' ' '{print $1}')
+
+    if [ -n "$running_container" ]; then
+        # Stop the running container
+        echo "Stopping running container: $container, $running_container"
+        docker stop "$running_container"
+    else
+        echo "No running container found for: $container"
+    fi
+    sleep 5
+    # docker pull $container
+    ACT_NAME=${containers[$container_index]}
+    echo "docker run --rm --name $ACT_NAME --env API_KEY=$API_KEY --env REPO=$REPO --env ACCESS_TOKEN=$ACCESS_TOKEN -d fedml/${containers[$container_index]}:$TAG bash ./start.sh ${REPO} ${ACCESS_TOKEN} ${python_versions[$container_index]}"
+    docker run --rm --name $ACT_NAME --env API_KEY=$API_KEY --env REPO=$REPO --env ACCESS_TOKEN=$ACCESS_TOKEN -d fedml/${containers[$container_index]}:$TAG bash ./start.sh ${REPO} ${ACCESS_TOKEN} ${python_versions[$container_index]}
+
+done
+echo "Script completed."
+
diff --git a/devops/dockerfile/github-action-runner/start.sh b/.github/workflows/registry-runners/start.sh
similarity index 76%
rename from devops/dockerfile/github-action-runner/start.sh
rename to .github/workflows/registry-runners/start.sh
index 917d1cfe16..b65b0f1272 100644
--- a/devops/dockerfile/github-action-runner/start.sh
+++ b/.github/workflows/registry-runners/start.sh
@@ -2,13 +2,15 @@
 
 ORGANIZATION=$1
 ACCESS_TOKEN=$2
+PYTHON_VERSION=$3
 
 echo $ORGANIZATION
 echo $ACCESS_TOKEN
+echo $PYTHON_VERSION
 
 cd /home/fedml/actions-runner
 
-RUNNER_ALLOW_RUNASROOT="1" ./config.sh --url https://github.com/${ORGANIZATION} --token ${ACCESS_TOKEN}
+RUNNER_ALLOW_RUNASROOT="1" ./config.sh --url https://github.com/${ORGANIZATION} --token ${ACCESS_TOKEN} --labels self-hosted,Linux,X64,$PYTHON_VERSION
 
 cleanup() {
     echo "Removing runner..."
diff --git a/.github/workflows/registry-runners/windows.bat b/.github/workflows/registry-runners/windows.bat
new file mode 100644
index 0000000000..dcdcf81b57
--- /dev/null
+++ b/.github/workflows/registry-runners/windows.bat
@@ -0,0 +1,38 @@
+set REPO=Qigemingziba/FedML
+set ACCESS_TOKEN=AGMK3P4W5EM5PXNYTZXXIMTGNF4MW
+set WORKPLACE=%pwd%
+mkdir actions-runner-python38; cd actions-runner-python38
+conda activate python38
+Invoke-WebRequest -Uri https://github.com/actions/runner/releases/download/v2.317.0/actions-runner-win-x64-2.317.0.zip -OutFile actions-runner-win-x64-2.317.0.zip
+Add-Type -AssemblyName System.IO.Compression.FileSystem ; [System.IO.Compression.ZipFile]::ExtractToDirectory("$PWD/actions-runner-win-x64-2.317.0.zip", "$PWD")
+./config.cmd --url https://github.com/%REPO% --token %ACCESS_TOKEN% --labels self-hosted,Windows,X64,python3.8
+.\run.cmd install
+.\run.cmd start
+
+cd WORKPLACE
+mkdir actions-runner-python39; cd actions-runner-python39
+conda activate python39
+Invoke-WebRequest -Uri https://github.com/actions/runner/releases/download/v2.317.0/actions-runner-win-x64-2.317.0.zip -OutFile actions-runner-win-x64-2.317.0.zip
+Add-Type -AssemblyName System.IO.Compression.FileSystem ; [System.IO.Compression.ZipFile]::ExtractToDirectory("$PWD/actions-runner-win-x64-2.317.0.zip", "$PWD")
+./config.cmd --url https://github.com/%REPO% --token %ACCESS_TOKEN% --labels self-hosted,Windows,X64,python3.9
+.\run.cmd install
+.\run.cmd start
+
+cd WORKPLACE
+mkdir actions-runner-python310; cd actions-runner-python310
+conda activate python310
+Invoke-WebRequest -Uri https://github.com/actions/runner/releases/download/v2.317.0/actions-runner-win-x64-2.317.0.zip -OutFile actions-runner-win-x64-2.317.0.zip
+Add-Type -AssemblyName System.IO.Compression.FileSystem ; [System.IO.Compression.ZipFile]::ExtractToDirectory("$PWD/actions-runner-win-x64-2.317.0.zip", "$PWD")
+./config.cmd --url https://github.com/%REPO% --token %ACCESS_TOKEN% --labels self-hosted,Windows,X64,python3.10
+.\run.cmd install
+.\run.cmd start
+
+cd WORKPLACE
+mkdir actions-runner-python311; cd actions-runner-python311
+conda activate python311
+Invoke-WebRequest -Uri https://github.com/actions/runner/releases/download/v2.317.0/actions-runner-win-x64-2.317.0.zip -OutFile actions-runner-win-x64-2.317.0.zip
+Add-Type -AssemblyName System.IO.Compression.FileSystem ; [System.IO.Compression.ZipFile]::ExtractToDirectory("$PWD/actions-runner-win-x64-2.317.0.zip", "$PWD")
+./config.cmd --url https://github.com/%REPO% --token %ACCESS_TOKEN% --labels self-hosted,Windows,X64,python3.11
+.\run.cmd install
+.\run.cmd start
+
diff --git a/devops/dockerfile/github-action-runner/README.md b/devops/dockerfile/github-action-runner/README.md
deleted file mode 100644
index d02e29665b..0000000000
--- a/devops/dockerfile/github-action-runner/README.md
+++ /dev/null
@@ -1,25 +0,0 @@
-# Run self-host runner in your machine
-
-## Usage
-
-./runner-start.sh [YourGitRepo] [YourRunnerPrefix] [YourRunnerNum] [YourGitHubRunnerToken] [LocalDevSourceDir] [LocalReleaseSourceDir] [LocalDataDir]
-
-For the argument YourGitHubRunnerToken, you may navigate based the following path.
-
-Settings -> Actions -> Runners -> New self-hosted runner. 
-
-In the Configure section, you should find the similar line:
-./config.sh --url https://github.com/FedML-AI/FedML --token AXRYPL6G2VHVGDFDQQS5XA3ELYI6M
-
-set YourGitHubRunnerToken to value of --token
-
-
-## Example
-
-Use the following commands to run 30 runners in the FedML-AI/FedML repo and run 6 runners in the FedML-AI/Front-End-Auto-Test repo:
-
-./runner-start.sh FedML-AI/FedML fedml-runner 30 AXRYPLZLZN6XVJB3BAIXSP3EMFC7U /home/fedml/FedML4GitHubAction-Dev /home/fedml/FedML4GitHubAction /home/fedml/fedml_data
-./runner-start.sh FedML-AI/Front-End-Auto-Test webtest-runner 6 AXRYPL57ZD35ZGDWZKRKFHLEMGLTK /home/fedml/FedML4GitHubAction-Dev /home/fedml/FedML4GitHubAction /home/fedml/fedml_data
-
-./runner-start.sh FedML-AI/FedML fedml-runner 30 AXRYPL6CCBH24ZVRSUEAYTTEMKD56 /home/chaoyanghe/sourcecode/FedML4GitHubAction-Dev /home/chaoyanghe/sourcecode/FedML4GitHubAction /home/chaoyanghe/fedml_data
-./runner-start.sh FedML-AI/Front-End-Auto-Test webtest-runner 6 AXRYPL57ZD35ZGDWZKRKFHLEMGLTK /home/chaoyanghe/sourcecode/FedML4GitHubAction-Dev /home/chaoyanghe/sourcecode/FedML4GitHubAction /home/chaoyanghe/fedml_data
diff --git a/devops/dockerfile/github-action-runner/build.sh b/devops/dockerfile/github-action-runner/build.sh
deleted file mode 100755
index 5f6dae9615..0000000000
--- a/devops/dockerfile/github-action-runner/build.sh
+++ /dev/null
@@ -1,3 +0,0 @@
-docker build -t fedml/github-action-runner:latest -f ./Dockerfile .
-docker login
-docker push fedml/github-action-runner:latest
\ No newline at end of file
diff --git a/devops/dockerfile/github-action-runner/runner-start.sh b/devops/dockerfile/github-action-runner/runner-start.sh
deleted file mode 100644
index 18a0c4f958..0000000000
--- a/devops/dockerfile/github-action-runner/runner-start.sh
+++ /dev/null
@@ -1,23 +0,0 @@
-REPO=$1
-TAG=$2
-NUM=$3
-ACCESS_TOKEN=$4
-LOCAL_DEV_SOURCE_DIR=$5
-LOCAL_RELEASE_SOURCE_DIR=$6
-LOCAL_DATA_DIR=$7
-
-if [ $# != 7 ]; then
-  echo "Please provide five arguments."
-  echo "./runner-start.sh [YourGitRepo] [YourRunnerPrefix] [YourRunnerNum] [YourGitHubRunnerToken] [LocalDevSourceDir] [LocalReleaseSourceDir] [LocalDataDir]"
-  exit -1
-fi
-
-sudo docker stop `sudo docker ps |grep ${TAG}- |awk -F' ' '{print $1}'`
-sudo docker pull fedml/github-action-runner:latest
-
-for((i=1;i<=$NUM;i++));
-do
-ACT_NAME=$TAG-$i
-sudo docker rm $ACT_NAME
-sudo docker run --name $ACT_NAME --env REPO=$REPO --env ACCESS_TOKEN=$ACCESS_TOKEN -v $LOCAL_DEV_SOURCE_DIR:/home/actions-runner/fedml-dev -v $LOCAL_RELEASE_SOURCE_DIR:/home/actions-runner/fedml-master -v $LOCAL_DATA_DIR:/home/fedml/fedml_data -v $LOCAL_DATA_DIR:/home/actions-runner/fedml_data -d fedml/github-action-runner:latest
-done
\ No newline at end of file
diff --git a/devops/scripts/install-fedml.sh b/devops/scripts/install-fedml.sh
new file mode 100644
index 0000000000..cafcfa3ac7
--- /dev/null
+++ b/devops/scripts/install-fedml.sh
@@ -0,0 +1,2 @@
+cd python
+pip install -e ./
\ No newline at end of file
diff --git a/devops/scripts/sync-fedml-pip.sh b/devops/scripts/sync-fedml-pip.sh
index 0d909fff76..6b24ac52e7 100755
--- a/devops/scripts/sync-fedml-pip.sh
+++ b/devops/scripts/sync-fedml-pip.sh
@@ -24,7 +24,7 @@ else
   fi
 fi
 
-mkdir -p /home/fedml/fedml_data
-cp -Rf /home/fedml/fedml_data_host/* /home/fedml/fedml_data
+mkdir -p ./fedml/fedml_data
+cp -Rf ./fedml/fedml_data_host/* ./fedml/fedml_data
 
 exit 0
diff --git a/python/examples/federate/cross_silo/cuda_rpc_fedavg_mnist_lr_example/README.md b/python/examples/federate/cross_silo/cuda_rpc_fedavg_mnist_lr_example/README.md
index c693d8d863..a1fa30b6f2 100644
--- a/python/examples/federate/cross_silo/cuda_rpc_fedavg_mnist_lr_example/README.md
+++ b/python/examples/federate/cross_silo/cuda_rpc_fedavg_mnist_lr_example/README.md
@@ -26,7 +26,7 @@ For info on `trpc_master_config_path` refer to `python/examples/cross_silo/cuda_
 
 Example is provided at:
 
-`python/examples/cross_silo/cuda_rpc_fedavg_mnist_lr_example/one_line`
+`python/examples/federate/cross_silo/cuda_rpc_fedavg_mnist_lr_example/one_line`
 ### Training Script
 
 At the client side, the client ID (a.k.a rank) starts from 1.
diff --git a/python/examples/launch/examples/launch/hello_world/launch_config/fedml_config.yaml b/python/examples/launch/examples/launch/hello_world/launch_config/fedml_config.yaml
new file mode 100644
index 0000000000..21e1f2e33e
--- /dev/null
+++ b/python/examples/launch/examples/launch/hello_world/launch_config/fedml_config.yaml
@@ -0,0 +1,14 @@
+containerize: false
+data_args:
+  dataset_name: mnist
+  dataset_path: ./dataset
+  dataset_type: csv
+environment_args:
+  bootstrap: fedml_bootstrap_generated.sh
+model_args:
+  input_dim: '784'
+  model_cache_path: /Users/alexliang/fedml_models
+  model_name: lr
+  output_dim: '10'
+training_params:
+  learning_rate: 0.004
diff --git a/python/examples/launch/hello_world/hello_world.py b/python/examples/launch/hello_world/hello_world.py
index 71ffaf7c16..2f68f99055 100644
--- a/python/examples/launch/hello_world/hello_world.py
+++ b/python/examples/launch/hello_world/hello_world.py
@@ -1,6 +1,5 @@
 import os
 import time
-
 import fedml
 
 if __name__ == "__main__":
diff --git a/python/examples/launch/serve_job_mnist.yaml b/python/examples/launch/serve_job_mnist.yaml
index 98c1570a4f..bd8b52ca6c 100755
--- a/python/examples/launch/serve_job_mnist.yaml
+++ b/python/examples/launch/serve_job_mnist.yaml
@@ -35,4 +35,4 @@ computing:
   maximum_cost_per_hour: $3000   # max cost per hour for your job per gpu card
   #allow_cross_cloud_resources: true # true, false
   #device_type: CPU              # options: GPU, CPU, hybrid
-  resource_type: A100-80G       # e.g., A100-80G, please check the resource type list by "fedml show-resource-type" or visiting URL: https://open.fedml.ai/accelerator_resource_type
\ No newline at end of file
+  resource_type: RTX-4090   # e.g., A100-80G, please check the resource type list by "fedml show-resource-type" or visiting URL: https://open.fedml.ai/accelerator_resource_type
\ No newline at end of file
diff --git a/python/examples/train/mnist_train/examples/train/mnist_train/launch_config/fedml_config.yaml b/python/examples/train/mnist_train/examples/train/mnist_train/launch_config/fedml_config.yaml
new file mode 100644
index 0000000000..188c19dde6
--- /dev/null
+++ b/python/examples/train/mnist_train/examples/train/mnist_train/launch_config/fedml_config.yaml
@@ -0,0 +1,3 @@
+containerize: false
+environment_args:
+  bootstrap: fedml_bootstrap_generated.sh
diff --git a/python/examples/train/mnist_train/train.py b/python/examples/train/mnist_train/train.py
new file mode 100644
index 0000000000..611a15c2b6
--- /dev/null
+++ b/python/examples/train/mnist_train/train.py
@@ -0,0 +1,98 @@
+import torch
+import torch.nn as nn
+import torch.optim as optim
+import torchvision
+import torchvision.transforms as transforms
+from torch.utils.data import DataLoader
+import fedml
+# Set random seed for reproducibility
+torch.manual_seed(42)
+
+# Define hyperparameters
+batch_size = 64
+learning_rate = 0.001
+num_epochs = 3
+
+# Prepare dataset and data loaders
+transform = transforms.Compose([
+    transforms.ToTensor(),  # Convert image to tensor, normalize to [0, 1]
+    transforms.Normalize((0.5,), (0.5,))  # Normalize with mean and std deviation of 0.5
+])
+
+train_dataset = torchvision.datasets.MNIST(root='./data', train=True, transform=transform, download=True)
+train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
+
+test_dataset = torchvision.datasets.MNIST(root='./data', train=False, transform=transform, download=True)
+test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)
+
+# Define a simple convolutional neural network model
+class SimpleCNN(nn.Module):
+    def __init__(self):
+        super(SimpleCNN, self).__init__()
+        self.conv1 = nn.Conv2d(1, 16, kernel_size=5, padding=2)
+        self.conv2 = nn.Conv2d(16, 32, kernel_size=5, padding=2)
+        self.fc1 = nn.Linear(32 * 7 * 7, 128)
+        self.fc2 = nn.Linear(128, 10)
+
+    def forward(self, x):
+        x = torch.relu(self.conv1(x))
+        x = torch.max_pool2d(x, kernel_size=2, stride=2)
+        x = torch.relu(self.conv2(x))
+        x = torch.max_pool2d(x, kernel_size=2, stride=2)
+        x = x.view(-1, 32 * 7 * 7)
+        x = torch.relu(self.fc1(x))
+        x = self.fc2(x)
+        return x
+
+model = SimpleCNN()
+
+# Define loss function and optimizer
+criterion = nn.CrossEntropyLoss()
+optimizer = optim.Adam(model.parameters(), lr=learning_rate)
+
+# Train the model
+for epoch in range(num_epochs):
+
+    # Evaluate the model on the test set during training
+    model.eval()
+    with torch.no_grad():
+        correct = 0
+        total = 0
+        for images, labels in test_loader:
+            outputs = model(images)
+            _, predicted = torch.max(outputs.data, 1)
+            total += labels.size(0)
+            correct += (predicted == labels).sum().item()
+        acc = 100 * correct / total
+        fedml.mlops.log_metric({"epoch":epoch, "acc": acc})
+
+    model.train()
+    for images, labels in train_loader:
+        # Forward pass
+        outputs = model(images)
+        loss = criterion(outputs, labels)
+
+        # Backward and optimize
+        optimizer.zero_grad()
+        loss.backward()
+        optimizer.step()
+
+# Final evaluation on the test set
+model.eval()
+with torch.no_grad():
+    correct = 0
+    total = 0
+    for images, labels in test_loader:
+        outputs = model(images)
+        _, predicted = torch.max(outputs.data, 1)
+        total += labels.size(0)
+        correct += (predicted == labels).sum().item()
+
+    acc = 100 * correct / total
+    print('Final Test Accuracy: {:.2f} %'.format(acc))
+    fedml.mlops.log_metric({"epoch":num_epochs, "acc": acc})
+
+fedml.mlops.log_model(f"model-file@test", "./simple_cnn.pth")
+# # Save the model parameters
+# torch.save(model.state_dict(), 'simple_cnn.pth')
+# print('Model saved to simple_cnn.pth')
diff --git a/python/examples/train/mnist_train/train.yaml b/python/examples/train/mnist_train/train.yaml
new file mode 100644
index 0000000000..f9a5cc5ab5
--- /dev/null
+++ b/python/examples/train/mnist_train/train.yaml
@@ -0,0 +1,50 @@
+# Local directory where your source code resides.
+# It should be the relative path to this job yaml file or the absolute path.
+# If your job doesn't contain any source code, it can be empty.
+workspace: .
+
+# Running entry commands which will be executed as the job entry point.
+# If an error occurs, you should exit with a non-zero code, e.g. exit 1.
+# Otherwise, you should exit with a zero code, e.g. exit 0.
+# Support multiple lines, which can not be empty.
+job: |
+    echo "current job id: $FEDML_CURRENT_RUN_ID"
+    echo "current edge id: $FEDML_CURRENT_EDGE_ID"
+    echo "Hello, Here is the launch platform."
+    echo "Current directory is as follows."
+    pwd
+    python3 train.py
+    echo "training job finished."
+
+# If you want to use the job created by the MLOps platform,
+# just uncomment the following three, then set job_id and config_id to your desired job id and related config.
+#job_args:
+#  job_id: 2070
+#  config_id: 111
+
+# If you want to create the job with specific name, just uncomment the following line and set job_name to your desired job name
+#job_name: cv_job
+
+job_type: train              # options: train, deploy, federate
+
+# train subtype: general_training, single_machine_training, cluster_distributed_training, cross_cloud_training
+# federate subtype: cross_silo, simulation, web, smart_phone
+# deploy subtype: none
+job_subtype: generate_training
+
+# containerize
+containerize: false
+
+# Bootstrap shell commands which will be executed before running entry commands.
+# Support multiple lines, which can be empty.
+bootstrap: |
+  # pip install -r requirements.txt
+  echo "Bootstrap finished."
+
+computing:
+  minimum_num_gpus: 1           # minimum # of GPUs to provision
+  maximum_cost_per_hour: $3000   # max cost per hour for your job per gpu card
+  #allow_cross_cloud_resources: true # true, false
+  #device_type: CPU              # options: GPU, CPU, hybrid
+  resource_type: RTX-4090       # e.g., A100-80G, please check the resource type list by "fedml show-resource-type" or visiting URL: https://open.fedml.ai/accelerator_resource_type
+
diff --git a/python/fedml/__init__.py b/python/fedml/__init__.py
index bf07838e56..c96d65adc5 100644
--- a/python/fedml/__init__.py
+++ b/python/fedml/__init__.py
@@ -452,28 +452,14 @@ def _init_multiprocessing():
     """
     if platform.system() == "Windows":
         if multiprocessing.get_start_method() != "spawn":
-            # force all platforms (Windows) to use the same way (spawn) for multiprocessing
+            # force all platforms (Windows/Linux/macOS) to use the same way (spawn) for multiprocessing
             multiprocessing.set_start_method("spawn", force=True)
     else:
         if multiprocessing.get_start_method() != "fork":
-            # force all platforms (Linux/macOS) to use the same way (fork) for multiprocessing
+            # force all platforms (Windows/Linux/macOS) to use the same way (fork) for multiprocessing
             multiprocessing.set_start_method("fork", force=True)
 
 
-def get_multiprocessing_context():
-    if platform.system() == "Windows":
-        return multiprocessing.get_context("spawn")
-    else:
-        return multiprocessing.get_context("fork")
-
-
-def get_process(target=None, args=None):
-    if platform.system() == "Windows":
-        return multiprocessing.Process(target=target, args=args)
-    else:
-        return multiprocessing.get_context("fork").Process(target=target, args=args)
-
-
 def set_env_version(version):
     set_env_kv("FEDML_ENV_VERSION", version)
     load_env()
diff --git a/python/fedml/api/__init__.py b/python/fedml/api/__init__.py
index 3e75b987d6..ac6e988dc6 100755
--- a/python/fedml/api/__init__.py
+++ b/python/fedml/api/__init__.py
@@ -270,6 +270,9 @@ def model_deploy(name, endpoint_name, endpoint_id, local, master_ids, worker_ids
 def model_run(endpoint_id, json_string):
     model_module.run(endpoint_id, json_string)
 
+def get_endpoint(endpoint_id):
+    return model_module.get_endpoint(endpoint_id)
+
 
 def endpoint_delete(endpoint_id):
     model_module.delete_endpoint(endpoint_id)
diff --git a/python/fedml/api/api_test.py b/python/fedml/api/api_test.py
index 1aa5ac3767..5a01a76448 100755
--- a/python/fedml/api/api_test.py
+++ b/python/fedml/api/api_test.py
@@ -4,9 +4,9 @@
 import fedml
 
 # Login
-fedml.set_env_version("local")
+fedml.set_env_version("test")
 fedml.set_local_on_premise_platform_port(18080)
-error_code, error_msg = fedml.api.fedml_login(api_key="1316b93c82da40ce90113a2ed12f0b14")
+error_code, error_msg = fedml.api.fedml_login(api_key="")
 if error_code != 0:
     print("API Key is invalid!")
     exit(1)
@@ -19,7 +19,7 @@
 
 # Launch job
 launch_result_list = list()
-for i in range(0, 1):
+for i in range(0, 10):
     launch_result = fedml.api.launch_job(yaml_file)
     launch_result_list.append(launch_result)
     # launch_result = fedml.api.launch_job_on_cluster(yaml_file, "alex-cluster")
diff --git a/python/fedml/api/modules/model.py b/python/fedml/api/modules/model.py
index a02e674f47..93892fc5d1 100644
--- a/python/fedml/api/modules/model.py
+++ b/python/fedml/api/modules/model.py
@@ -320,6 +320,19 @@ def run(endpoint_id: str, json_string: str) -> bool:
         click.echo("Failed to run model.")
         return False
 
+def get_endpoint(endpoint_id: str):
+    api_key = get_api_key()
+    if api_key == "":
+        click.echo('''
+                Please use one of the ways below to login first:
+                (1) CLI: `fedml login $api_key`
+                (2) API: fedml.api.fedml_login(api_key=$api_key)
+                ''')
+        return False
+    
+    endpoint_detail_result = FedMLModelCards.get_instance().query_endpoint_detail_api(user_api_key=api_key, 
+                                                                endpoint_id=endpoint_id)
+    return endpoint_detail_result
 
 def delete_endpoint(endpoint_id: str) -> bool:
     api_key = get_api_key()
diff --git a/python/tests/cross-silo/run_cross_silo.sh b/python/tests/cross-silo/run_cross_silo.sh
index 2ccdbff15b..0beaaffc52 100644
--- a/python/tests/cross-silo/run_cross_silo.sh
+++ b/python/tests/cross-silo/run_cross_silo.sh
@@ -1,10 +1,10 @@
 #!/bin/bash
 set -e
 WORKSPACE=$(pwd)
-PROJECT_HOME=$WORKSPACE/../../
-cd $PROJECT_HOME
+# PROJECT_HOME=$WORKSPACE/../../
+# cd $PROJECT_HOME
 
-cd examples/cross_silo/mqtt_s3_fedavg_mnist_lr_example/custom_data_and_model
+cd examples/federate/cross_silo/mqtt_s3_fedavg_mnist_lr_example/custom_data_and_model
 
 # run client(s)
 RUN_ID="$(python -c "import uuid; print(uuid.uuid4().hex)")"
diff --git a/python/tests/smoke_test/cli/build.sh b/python/tests/smoke_test/cli/build.sh
index 98fdb05244..de956692f1 100644
--- a/python/tests/smoke_test/cli/build.sh
+++ b/python/tests/smoke_test/cli/build.sh
@@ -16,7 +16,7 @@
 #  --help                     Show this message and exit.
 
 # build client package
-cd ../../../examples/cross_silo/mqtt_s3_fedavg_mnist_lr_example/one_line
+cd ../../../examples/federate/cross_silo/mqtt_s3_fedavg_mnist_lr_example/one_line
 echo "$PWD"
 
 SOURCE=client
@@ -30,4 +30,4 @@ SOURCE=server
 ENTRY=torch_server.py
 CONFIG=config
 DEST=./mlops
-fedml build -t server -sf $SOURCE -ep $ENTRY -cf $CONFIG -df $DEST
\ No newline at end of file
+fedml build -t server -sf $SOURCE -ep $ENTRY -cf $CONFIG -df $DEST
diff --git a/python/tests/test_deploy/test_deploy.py b/python/tests/test_deploy/test_deploy.py
new file mode 100644
index 0000000000..d7243c68de
--- /dev/null
+++ b/python/tests/test_deploy/test_deploy.py
@@ -0,0 +1,39 @@
+import os.path
+import time
+import fedml
+# Login
+API_KEY = os.getenv("API_KEY")
+fedml.set_env_version("test")
+fedml.set_local_on_premise_platform_port(18080)
+error_code, error_msg = fedml.api.fedml_login(api_key=API_KEY)
+if error_code != 0:
+    raise Exception("API Key is invalid!")
+
+# Yaml file
+cur_dir = os.path.dirname(__file__)
+fedml_dir = os.path.dirname(cur_dir)
+python_dir = os.path.dirname(fedml_dir)
+yaml_file = os.path.join(python_dir, "examples", "launch", "serve_job_mnist.yaml")
+
+# Launch job
+launch_result_dict = {}
+launch_result_status = {}
+
+launch_result = fedml.api.launch_job(yaml_file)
+print("Endpoint id is", launch_result.inner_id)
+
+cnt = 0
+while 1:
+    try:
+        r = fedml.api.get_endpoint(endpoint_id=launch_result.inner_id)
+    except Exception as e:
+        raise Exception(f"FAILED to get endpoint:{launch_result.inner_id}. {e}")
+    if r.status == "DEPLOYED":
+        print("Deployment has been successfully!")
+        break 
+    elif r.status == "FAILED":
+        raise Exception("FAILED to deploy.")
+    time.sleep(1)
+    cnt += 1
+    if cnt %3 ==0:
+        print('Deployment status is', r.status)
\ No newline at end of file
diff --git a/python/tests/test_federate/test_federate.sh b/python/tests/test_federate/test_federate.sh
new file mode 100644
index 0000000000..ebfcb60330
--- /dev/null
+++ b/python/tests/test_federate/test_federate.sh
@@ -0,0 +1,26 @@
+
+WORKSPACE=`pwd`
+echo $WORKSPACE
+cd $WORKSPACE/examples/federate/quick_start/parrot
+python torch_fedavg_mnist_lr_one_line_example.py --cf fedml_config.yaml
+python torch_fedavg_mnist_lr_custum_data_and_model_example.py --cf fedml_config.yaml
+
+cd $WORKSPACE/examples/federate/simulation/sp_decentralized_mnist_lr_example
+python torch_fedavg_mnist_lr_step_by_step_example.py --cf fedml_config.yaml
+
+cd $WORKSPACE/examples/federate/simulation/sp_fednova_mnist_lr_example
+python torch_fednova_mnist_lr_step_by_step_example.py --cf fedml_config.yaml
+          
+cd $WORKSPACE/examples/federate/simulation/sp_fedopt_mnist_lr_example
+python torch_fedopt_mnist_lr_step_by_step_example.py --cf fedml_config.yaml
+
+cd $WORKSPACE/examples/federate/simulation/sp_hierarchicalfl_mnist_lr_example
+python torch_hierarchicalfl_mnist_lr_step_by_step_example.py --cf fedml_config.yaml
+
+
+cd $WORKSPACE/examples/federate/simulation/sp_turboaggregate_mnist_lr_example
+python torch_turboaggregate_mnist_lr_step_by_step_example.py --cf fedml_config.yaml 
+
+
+cd $WORKSPACE/examples/federate/simulation/sp_vertical_mnist_lr_example
+python torch_vertical_mnist_lr_step_by_step_example.py --cf fedml_config.yaml 
diff --git a/python/tests/test_launch/test_launch.py b/python/tests/test_launch/test_launch.py
new file mode 100644
index 0000000000..a6b6ffb9cf
--- /dev/null
+++ b/python/tests/test_launch/test_launch.py
@@ -0,0 +1,50 @@
+import os.path
+import time
+import fedml
+from fedml.api.constants import RunStatus
+
+API_KEY = os.getenv("API_KEY")
+# Login
+fedml.set_env_version("test")
+fedml.set_local_on_premise_platform_port(18080)
+error_code, error_msg = fedml.api.fedml_login(api_key=API_KEY)
+if error_code != 0:
+    raise Exception("API Key is invalid!")
+
+# Yaml file
+cur_dir = os.path.dirname(__file__)
+fedml_dir = os.path.dirname(cur_dir)
+python_dir = os.path.dirname(fedml_dir)
+yaml_file = os.path.join(python_dir, "examples", "launch", "hello_job.yaml")
+
+# Launch job
+
+launch_result = fedml.api.launch_job(yaml_file)
+
+# launch_result = fedml.api.launch_job_on_cluster(yaml_file, "alex-cluster")
+if launch_result.result_code != 0:
+    raise Exception(f"Failed to launch job. Reason: {launch_result.result_message}")
+        
+# check job status
+while 1:
+    time.sleep(1)
+    # if 
+    #     if launch_result_status[run_id] == RunStatus.FINISHED:
+    #         continue
+    log_result = fedml.api.run_logs(launch_result.run_id, 1, 5)
+    if log_result is None or log_result.run_status is None:
+        raise Exception(f"Failed to get job status.")
+
+    print(f"run_id: {launch_result.run_id} run_status: {log_result.run_status}")
+    
+    if log_result.run_status in [RunStatus.ERROR, RunStatus.FAILED]:
+        log_result = fedml.api.run_logs(launch_result.run_id, 1, 100)
+        if log_result is None or log_result.run_status is None:
+            raise Exception(f"run_id:{launch_result.run_id} run_status:{log_result.run_status} and failed to get run logs.")
+
+        raise Exception(f"run_id:{launch_result.run_id} run_status:{log_result.run_status} run logs: {log_result.log_line_list}")
+    if log_result.run_status == RunStatus.FINISHED:
+        print(f"Job finished successfully.")
+        break
+        
+
diff --git a/python/tests/test_train/test_train.py b/python/tests/test_train/test_train.py
new file mode 100644
index 0000000000..039d3b81d2
--- /dev/null
+++ b/python/tests/test_train/test_train.py
@@ -0,0 +1,49 @@
+import os.path
+import time
+import fedml
+from fedml.api.constants import RunStatus
+
+API_KEY = os.getenv("API_KEY")
+# Login
+fedml.set_env_version("test")
+fedml.set_local_on_premise_platform_port(18080)
+error_code, error_msg = fedml.api.fedml_login(api_key=API_KEY)
+if error_code != 0:
+    raise Exception("API Key is invalid!")
+
+# Yaml file
+cur_dir = os.path.dirname(__file__)
+fedml_dir = os.path.dirname(cur_dir)
+python_dir = os.path.dirname(fedml_dir)
+yaml_file = os.path.join(python_dir, "examples", "train", "mnist_train", "train.yaml")
+
+# Launch job
+
+launch_result = fedml.api.launch_job(yaml_file)
+
+# launch_result = fedml.api.launch_job_on_cluster(yaml_file, "alex-cluster")
+if launch_result.result_code != 0:
+    raise Exception(f"Failed to launch job. Reason: {launch_result.result_message}")
+        
+# check job status
+while 1:
+    time.sleep(1)
+    # if 
+    #     if launch_result_status[run_id] == RunStatus.FINISHED:
+    #         continue
+    log_result = fedml.api.run_logs(launch_result.run_id, 1, 5)
+    if log_result is None or log_result.run_status is None:
+        raise Exception(f"Failed to get job status.")
+
+    print(f"run_id: {launch_result.run_id} run_status: {log_result.run_status}")
+    
+    if log_result.run_status in [RunStatus.ERROR, RunStatus.FAILED]:
+        log_result = fedml.api.run_logs(launch_result.run_id, 1, 100)
+        if log_result is None or log_result.run_status is None:
+            raise Exception(f"run_id:{launch_result.run_id} run_status:{log_result.run_status} and failed to get run logs.")
+
+        raise Exception(f"run_id:{launch_result.run_id} run_status:{log_result.run_status} run logs: {log_result.log_line_list}")
+    if log_result.run_status == RunStatus.FINISHED:
+        print(f"Job finished successfully.")
+        break
+        

From afe4147da99c102dd2153c4727214adce3a5f1a6 Mon Sep 17 00:00:00 2001
From: Alex <alex.gpt.llm@gmail.com>
Date: Thu, 20 Jun 2024 19:24:38 +0800
Subject: [PATCH 174/282] [CoreEngine] in order to debug easily for
 multiprocessing, add the process name to each python process.

---
 python/fedml/__init__.py                      | 11 ++-
 .../master/base_master_job_runner.py          | 28 +++++---
 .../master/base_master_job_runner_manager.py  | 15 ++--
 .../master/base_master_protocol_manager.py    | 16 +++--
 .../scheduler/master/cloud_server_manager.py  |  7 +-
 .../master/master_protocol_manager.py         | 11 +--
 .../master_protocol_manager.py                |  3 +-
 .../worker_protocol_manager.py                |  4 +-
 .../scheduler_core/general_constants.py       | 70 +++++++++++++++++++
 .../scheduler_core/message_center.py          | 27 ++++---
 .../scheduler_base_job_runner.py              |  2 +-
 .../scheduler_base_job_runner_manager.py      |  5 +-
 .../scheduler_base_protocol_manager.py        |  6 ++
 .../scheduler/scheduler_core/status_center.py | 31 ++++++--
 .../status_manager_protocols.py               | 11 ++-
 .../scheduler/slave/base_slave_job_runner.py  | 16 +++--
 .../slave/base_slave_protocol_manager.py      |  3 +
 .../scheduler/slave/slave_protocol_manager.py |  1 +
 .../scheduler/slave/united_agents.py          |  8 +--
 .../core/mlops/mlops_runtime_log_daemon.py    | 16 +++--
 python/setup.py                               |  2 +-
 21 files changed, 221 insertions(+), 72 deletions(-)

diff --git a/python/fedml/__init__.py b/python/fedml/__init__.py
index bf07838e56..618a5f9297 100644
--- a/python/fedml/__init__.py
+++ b/python/fedml/__init__.py
@@ -1,6 +1,7 @@
 import logging
 import platform
 
+import multiprocess
 import multiprocess as multiprocessing
 import os
 import random
@@ -37,7 +38,7 @@
 _global_training_type = None
 _global_comm_backend = None
 
-__version__ = "0.9.0"
+__version__ = "0.8.51b1"
 
 
 # This is the deployment environment used for different roles (RD/PM/BD/Public Developers). Potential VALUE: local, dev, test, release
@@ -471,7 +472,13 @@ def get_process(target=None, args=None):
     if platform.system() == "Windows":
         return multiprocessing.Process(target=target, args=args)
     else:
-        return multiprocessing.get_context("fork").Process(target=target, args=args)
+        #return multiprocessing.Process(target=target, args=args)
+        #multiprocessing.set_start_method("spawn", force=True)
+        #return multiprocess.context.SpawnContext.Process(target=target, args=args)
+        #multiprocessing.Manager().current_process().authkey = str.encode("abc")
+        new_process = multiprocessing.get_context("fork").Process(target=target, args=args)
+        #new_process.authkey = str.encode("abc")
+        return new_process
 
 
 def set_env_version(version):
diff --git a/python/fedml/computing/scheduler/master/base_master_job_runner.py b/python/fedml/computing/scheduler/master/base_master_job_runner.py
index 9a77c2ba82..32b285dc7b 100755
--- a/python/fedml/computing/scheduler/master/base_master_job_runner.py
+++ b/python/fedml/computing/scheduler/master/base_master_job_runner.py
@@ -7,6 +7,9 @@
 import os
 import time
 import traceback
+
+import setproctitle
+
 from ..scheduler_entry.constants import Constants
 from ....core.mlops.mlops_runtime_log import MLOpsRuntimeLog
 from ..master.server_constants import ServerConstants
@@ -67,9 +70,12 @@ def run(
             edge_device_info_queue=None, run_metrics_queue=None, run_event_queue=None,
             run_artifacts_queue=None, run_logs_queue=None, edge_device_info_global_queue=None,
             run_extend_queue_list=None, sender_message_center_queue=None, listener_message_queue=None,
-            status_center_queue=None
+            status_center_queue=None, process_name=None
     ):
-        print(f"Master job runner process id {os.getpid()}, run id {self.run_id}")
+        if process_name is not None:
+            setproctitle.setproctitle(process_name)
+
+        print(f"Master job runner process id {os.getpid()}, name {process_name}, run id {self.run_id}")
 
         if platform.system() != "Windows":
             os.setsid()
@@ -168,7 +174,8 @@ def run_impl(
             run_id, self.request_json, edge_id=self.edge_id, is_server_job=True,
             sender_message_queue=sender_message_queue,
             listener_message_queue=listener_message_queue,
-            status_center_queue=status_center_queue
+            status_center_queue=status_center_queue,
+            process_name=GeneralConstants.get_launch_master_user_process_name(run_id, self.edge_id)
         )
 
         # Check if the run status is normal
@@ -230,9 +237,12 @@ def run_server_job(
             edge_device_info_queue=None, run_metrics_queue=None, run_event_queue=None,
             run_artifacts_queue=None, run_logs_queue=None, edge_device_info_global_queue=None,
             run_extend_queue_list=None, sender_message_center_queue=None, listener_message_queue=None,
-            status_center_queue=None
+            status_center_queue=None, process_name=None
     ):
-        print(f"Server runner process id {os.getpid()}, run id {self.run_id}")
+        if process_name is not None:
+            setproctitle.setproctitle(process_name)
+
+        print(f"Server runner process id {os.getpid()}, name {process_name}. run id {self.run_id}")
 
         if platform.system() != "Windows":
             os.setsid()
@@ -406,7 +416,7 @@ def _generate_job_runner_instance(self, args, run_id=None, request_json=None, ag
     def start_runner_process(
         self, run_id, request_json, edge_id=None, is_server_job=False,
         sender_message_queue=None, listener_message_queue=None,
-        status_center_queue=None,
+        status_center_queue=None, process_name=None
     ):
         server_runner = self._generate_job_runner_instance(
             self.args, run_id=run_id, request_json=request_json,
@@ -430,7 +440,8 @@ def start_runner_process(
                     self.run_process_event, self.run_process_completed_event, self.run_edge_id_status_queue,
                     self.run_edge_device_info_queue, self.run_metrics_queue, self.run_events_queue,
                     self.run_artifacts_queue, self.run_logs_queue, self.run_edge_device_info_global_queue,
-                    self.run_extend_queue_list, sender_message_queue, listener_message_queue,  status_center_queue
+                    self.run_extend_queue_list, sender_message_queue, listener_message_queue,  status_center_queue,
+                    process_name,
                 )
             )
         else:
@@ -439,7 +450,8 @@ def start_runner_process(
                     self.run_process_event, self.run_process_completed_event, self.run_edge_id_status_queue,
                     self.run_edge_device_info_queue, self.run_metrics_queue, self.run_events_queue,
                     self.run_artifacts_queue, self.run_logs_queue, self.run_edge_device_info_global_queue,
-                    self.run_extend_queue_list, sender_message_queue, listener_message_queue,  status_center_queue
+                    self.run_extend_queue_list, sender_message_queue, listener_message_queue,  status_center_queue,
+                    process_name,
                 )
             )
         self.run_process.start()
diff --git a/python/fedml/computing/scheduler/master/base_master_job_runner_manager.py b/python/fedml/computing/scheduler/master/base_master_job_runner_manager.py
index 664fb4671e..f462596cbf 100755
--- a/python/fedml/computing/scheduler/master/base_master_job_runner_manager.py
+++ b/python/fedml/computing/scheduler/master/base_master_job_runner_manager.py
@@ -11,6 +11,7 @@
 import fedml
 from .cloud_server_manager import FedMLCloudServerManager
 from ..comm_utils.run_process_utils import RunProcessUtils
+from ..scheduler_core.general_constants import GeneralConstants
 from ..scheduler_core.scheduler_base_job_runner_manager import FedMLSchedulerBaseJobRunnerManager
 from ..scheduler_core.account_manager import FedMLAccountManager
 
@@ -26,7 +27,7 @@ def start_job_runner(
             self, run_id, request_json, args=None, edge_id=None, is_server_job=False,
             sender_message_queue=None, listener_message_queue=None, status_center_queue=None,
             communication_manager=None, master_agent_instance=None, should_start_cloud_server=False,
-            use_local_process_as_cloud_server=False, cuda_visible_gpu_ids_str=None
+            use_local_process_as_cloud_server=False, cuda_visible_gpu_ids_str=None, process_name=None
     ):
         if should_start_cloud_server:
             self._start_cloud_server(
@@ -34,7 +35,7 @@ def start_job_runner(
                 use_local_process_as_cloud_server=use_local_process_as_cloud_server,
                 sender_message_queue=sender_message_queue, listener_message_queue=listener_message_queue,
                 status_center_queue=status_center_queue, communication_manager=communication_manager,
-                master_agent_instance=master_agent_instance)
+                master_agent_instance=master_agent_instance, process_name=process_name)
             return
 
         run_id_str = str(run_id)
@@ -46,7 +47,8 @@ def start_job_runner(
             run_id, request_json, edge_id=edge_id, is_server_job=is_server_job,
             sender_message_queue=sender_message_queue,
             listener_message_queue=listener_message_queue,
-            status_center_queue=status_center_queue
+            status_center_queue=status_center_queue,
+            process_name=process_name
         )
 
     def stop_job_runner(
@@ -96,7 +98,7 @@ def _start_cloud_server(
             use_local_process_as_cloud_server=False,
             sender_message_queue=None, listener_message_queue=None,
             status_center_queue=None, communication_manager=None,
-            master_agent_instance=None
+            master_agent_instance=None, process_name=None
     ):
         run_id_str = str(run_id)
         cloud_server_mgr = FedMLCloudServerManager(
@@ -108,6 +110,7 @@ def _start_cloud_server(
             self.cloud_run_process_map[run_id_str].start()
         else:
             cloud_device_id = request_json.get("cloudServerDeviceId", "0")
+            server_id = request_json.get("server_id", 0)
             message_bytes = json.dumps(request_json).encode("ascii")
             base64_bytes = base64.b64encode(message_bytes)
             payload = base64_bytes.decode("ascii")
@@ -121,14 +124,14 @@ def _start_cloud_server(
                     args=(args.account_id, args.api_key, args.os_name, args.version,
                           cloud_device_id, run_id, payload,
                           communication_manager, sender_message_queue,
-                          status_center_queue, master_agent_instance))
+                          status_center_queue, master_agent_instance, process_name))
             else:
                 self.cloud_run_process_map[run_id_str] = fedml.get_process(
                     target=cloud_server_mgr.start_local_master_server,
                     args=(args.account_id, args.api_key, args.os_name, args.version,
                           cloud_device_id, run_id, payload,
                           communication_manager, sender_message_queue,
-                          status_center_queue, master_agent_instance))
+                          status_center_queue, master_agent_instance, process_name))
 
             self.cloud_run_process_map[run_id_str].start()
             time.sleep(1)
diff --git a/python/fedml/computing/scheduler/master/base_master_protocol_manager.py b/python/fedml/computing/scheduler/master/base_master_protocol_manager.py
index 2f8a4c5838..9d3b492758 100755
--- a/python/fedml/computing/scheduler/master/base_master_protocol_manager.py
+++ b/python/fedml/computing/scheduler/master/base_master_protocol_manager.py
@@ -214,7 +214,8 @@ def callback_start_train(self, topic=None, payload=None):
                 sender_message_queue=self.message_center.get_sender_message_queue(),
                 listener_message_queue=self.get_listener_message_queue(),
                 status_center_queue=self.get_status_queue(),
-                communication_manager=self.get_listener_communication_manager()
+                communication_manager=self.get_listener_communication_manager(),
+                process_name=GeneralConstants.get_launch_master_job_process_name(run_id, self.edge_id)
             )
 
             process = self._get_job_runner_manager().get_runner_process(run_id)
@@ -225,6 +226,7 @@ def callback_start_train(self, topic=None, payload=None):
         elif self.run_as_cloud_agent:
             self.init_job_task(request_json)
 
+            server_id = request_json.get("server_id", self.edge_id)
             self._get_job_runner_manager().start_job_runner(
                 run_id, request_json, args=self.args, edge_id=self.edge_id,
                 sender_message_queue=self.message_center.get_sender_message_queue(),
@@ -233,7 +235,8 @@ def callback_start_train(self, topic=None, payload=None):
                 communication_manager=self.get_listener_communication_manager(),
                 master_agent_instance=self.generate_agent_instance(),
                 should_start_cloud_server=True,
-                use_local_process_as_cloud_server=self.use_local_process_as_cloud_server
+                use_local_process_as_cloud_server=self.use_local_process_as_cloud_server,
+                process_name=GeneralConstants.get_launch_master_job_process_name(run_id, server_id)
             )
 
             process = self._get_job_runner_manager().get_runner_process(run_id, is_cloud_server=True)
@@ -255,7 +258,8 @@ def callback_start_train(self, topic=None, payload=None):
                 sender_message_queue=self.message_center.get_sender_message_queue(),
                 listener_message_queue=self.get_listener_message_queue(),
                 status_center_queue=self.get_status_queue(),
-                communication_manager=self.get_listener_communication_manager()
+                communication_manager=self.get_listener_communication_manager(),
+                process_name=GeneralConstants.get_launch_master_job_process_name(run_id, server_id)
             )
 
             self.send_status_msg_to_edges(edge_id_list, run_id, server_id)
@@ -311,7 +315,11 @@ def callback_complete_job(self, topic, payload):
         self._process_job_complete_status(run_id, server_id, request_json)
 
     def _process_job_complete_status(self, run_id, server_id, complete_payload):
-        pass
+        # Complete the job runner
+        self._get_job_runner_manager().complete_job_runner(
+            run_id, args=self.args, server_id=server_id, request_json=complete_payload,
+            run_as_cloud_agent=self.run_as_cloud_agent, run_as_cloud_server=self.run_as_cloud_server,
+            use_local_process_as_cloud_server=self.use_local_process_as_cloud_server)
 
     def callback_run_logs(self, topic, payload):
         run_id = str(topic).split('/')[-1]
diff --git a/python/fedml/computing/scheduler/master/cloud_server_manager.py b/python/fedml/computing/scheduler/master/cloud_server_manager.py
index 0aabaf5dbf..3669cb32bc 100755
--- a/python/fedml/computing/scheduler/master/cloud_server_manager.py
+++ b/python/fedml/computing/scheduler/master/cloud_server_manager.py
@@ -5,6 +5,8 @@
 import platform
 import traceback
 
+import setproctitle
+
 import fedml
 from fedml.computing.scheduler.comm_utils.sys_utils import get_python_program
 from fedml.computing.scheduler.scheduler_core.account_manager import FedMLAccountManager
@@ -47,8 +49,11 @@ def start_local_cloud_server(user, api_key, os_name, version, cloud_device_id, r
     def start_local_master_server(
             self, user, api_key, os_name, version, cloud_device_id, run_id, payload,
             communication_manager=None, sender_message_queue=None, status_center_queue=None,
-            master_agent_instance=None
+            master_agent_instance=None, process_name=None
     ):
+        if process_name is not None:
+            setproctitle.setproctitle(process_name)
+
         logging.info(f"Local master server pid: {os.getpid()}")
         if platform.system() != "Windows":
             os.setsid()
diff --git a/python/fedml/computing/scheduler/master/master_protocol_manager.py b/python/fedml/computing/scheduler/master/master_protocol_manager.py
index c941502b9c..1adda439c6 100755
--- a/python/fedml/computing/scheduler/master/master_protocol_manager.py
+++ b/python/fedml/computing/scheduler/master/master_protocol_manager.py
@@ -7,8 +7,9 @@
 class FedMLLaunchMasterProtocolManager(FedMLBaseMasterProtocolManager, ABC):
     def __init__(self, args, agent_config=None):
         FedMLBaseMasterProtocolManager.__init__(self, args, agent_config=agent_config)
+        self.message_center_name = "launch_master_agent"
 
-    # Override
+        # Override
     def generate_topics(self):
         super().generate_topics()
 
@@ -35,14 +36,6 @@ def _init_extra_items(self):
     def print_connected_info(self):
         super().print_connected_info()
 
-    # Override
-    def _process_job_complete_status(self, run_id, server_id, complete_payload):
-        # Complete the job runner
-        self._get_job_runner_manager().complete_job_runner(
-            run_id, args=self.args, server_id=server_id, request_json=complete_payload,
-            run_as_cloud_agent=self.run_as_cloud_agent, run_as_cloud_server=self.run_as_cloud_server,
-            use_local_process_as_cloud_server=self.use_local_process_as_cloud_server)
-
     def generate_agent_instance(self):
         from .master_agent import FedMLLaunchMasterAgent
         return FedMLLaunchMasterAgent()
diff --git a/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py b/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py
index 668d1192ce..6b5ffe0aac 100755
--- a/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py
+++ b/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py
@@ -263,7 +263,8 @@ def callback_start_deployment(self, topic, payload):
             run_id, request_json, args=self.args, edge_id=self.edge_id,
             sender_message_queue=self.message_center.get_sender_message_queue(),
             listener_message_queue=self.get_listener_message_queue(),
-            status_center_queue=self.get_status_queue()
+            status_center_queue=self.get_status_queue(),
+            process_name=GeneralConstants.get_deploy_master_job_process_name(run_id, self.edge_id)
         )
         process = self._get_job_runner_manager().get_runner_process(run_id)
         if process is not None:
diff --git a/python/fedml/computing/scheduler/model_scheduler/worker_protocol_manager.py b/python/fedml/computing/scheduler/model_scheduler/worker_protocol_manager.py
index f9bc70452d..51cf5c65ef 100755
--- a/python/fedml/computing/scheduler/model_scheduler/worker_protocol_manager.py
+++ b/python/fedml/computing/scheduler/model_scheduler/worker_protocol_manager.py
@@ -12,6 +12,7 @@
 from .device_model_msg_object import FedMLModelMsgObject
 from .device_client_constants import ClientConstants
 from .device_client_data_interface import FedMLClientDataInterface
+from ..scheduler_core.general_constants import GeneralConstants
 from ..slave.base_slave_protocol_manager import FedMLBaseSlaveProtocolManager
 from .worker_job_runner_manager import FedMLDeployJobRunnerManager
 from .device_mqtt_inference_protocol import FedMLMqttInference
@@ -160,7 +161,8 @@ def callback_start_deployment(self, topic, payload):
             run_id, request_json, args=self.args, edge_id=self.edge_id,
             sender_message_queue=self.message_center.get_sender_message_queue(),
             listener_message_queue=self.get_listener_message_queue(),
-            status_center_queue=self.get_status_queue()
+            status_center_queue=self.get_status_queue(),
+            process_name=GeneralConstants.get_deploy_slave_job_process_name(run_id, self.edge_id)
         )
         process = self._get_job_runner_manager().get_runner_process(run_id)
         if process is not None:
diff --git a/python/fedml/computing/scheduler/scheduler_core/general_constants.py b/python/fedml/computing/scheduler/scheduler_core/general_constants.py
index 8c60b17bdf..3b40e1df80 100755
--- a/python/fedml/computing/scheduler/scheduler_core/general_constants.py
+++ b/python/fedml/computing/scheduler/scheduler_core/general_constants.py
@@ -65,6 +65,19 @@ class GeneralConstants:
     FEDML_OTA_CMD_RESTART = "restart"
 
     FEDML_LOG_SOURCE_TYPE_MODEL_END_POINT = "MODEL_END_POINT"
+    FEDML_PROCESS_NAME_PREFIX = "fedml-process-"
+    FEDML_LAUNCH_MASTER_JOB_RUNNER_TAG = "launch-master-job-runner"
+    FEDML_LAUNCH_SLAVE_JOB_RUNNER_TAG = "launch-slave-job-runner"
+    FEDML_LAUNCH_MASTER_USER_JOB_TAG = "launch-master-user-job"
+    FEDML_DEPLOY_MASTER_JOB_RUNNER_TAG = "deploy-master-job-runner"
+    FEDML_DEPLOY_SLAVE_JOB_RUNNER_TAG = "deploy-slave-job-runner"
+    FEDML_DEPLOY_MASTER_USER_JOB_TAG = "deploy-master-user-job"
+    FEDML_MESSAGE_CENTER_LISTENER_TAG = "message-center-listener"
+    FEDML_MESSAGE_CENTER_SENDER_TAG = "message-center-sender"
+    FEDML_STATUS_CENTER_TAG = "status-center"
+    FEDML_LOG_PROCESS_TAG = "log"
+
+    FEDML_TOPIC_STATUS_CENTER_STOP = "anywhere/status_center/stop"
 
     @staticmethod
     def get_package_unzip_dir(package_download_dir):
@@ -216,3 +229,60 @@ def get_topic_complete_job(server_id):
     def get_payload_complete_job(run_id, server_id):
         payload_complete_job = {"runId": run_id, "serverId": server_id}
         return payload_complete_job
+
+    @staticmethod
+    def get_process_name(process_tag, run_id=None, edge_id=None):
+        return f"{GeneralConstants.FEDML_PROCESS_NAME_PREFIX}{process_tag}-run-{run_id}-edge-{edge_id}"
+
+    @staticmethod
+    def get_process_name_with_prefix(process_prefix, run_id=None, edge_id=None):
+        return f"{process_prefix}-run-{run_id}-edge-{edge_id}"
+
+    @staticmethod
+    def get_launch_master_job_process_name(run_id, edge_id):
+        return GeneralConstants.get_process_name(
+            GeneralConstants.FEDML_LAUNCH_MASTER_JOB_RUNNER_TAG, run_id, edge_id)
+
+    @staticmethod
+    def get_launch_slave_job_process_name(run_id, edge_id):
+        return GeneralConstants.get_process_name(
+            GeneralConstants.FEDML_LAUNCH_SLAVE_JOB_RUNNER_TAG, run_id, edge_id)
+
+    @staticmethod
+    def get_launch_master_user_process_name(run_id, edge_id):
+        return GeneralConstants.get_process_name(
+            GeneralConstants.FEDML_LAUNCH_MASTER_USER_JOB_TAG, run_id, edge_id)
+
+    @staticmethod
+    def get_deploy_master_job_process_name(run_id, edge_id):
+        return GeneralConstants.get_process_name(
+            GeneralConstants.FEDML_DEPLOY_MASTER_JOB_RUNNER_TAG, run_id, edge_id)
+
+    @staticmethod
+    def get_deploy_slave_job_process_name(run_id, edge_id):
+        return GeneralConstants.get_process_name(
+            GeneralConstants.FEDML_DEPLOY_SLAVE_JOB_RUNNER_TAG, run_id, edge_id)
+
+    @staticmethod
+    def get_deploy_master_user_process_name(run_id, edge_id):
+        return GeneralConstants.get_process_name(
+            GeneralConstants.FEDML_DEPLOY_MASTER_USER_JOB_TAG, run_id, edge_id)
+
+    @staticmethod
+    def get_log_process_name(run_id, edge_id):
+        return GeneralConstants.get_process_name(
+            GeneralConstants.FEDML_LOG_PROCESS_TAG, run_id, edge_id)
+
+    @staticmethod
+    def get_message_center_listener_process_name(message_center_name):
+        return f"{GeneralConstants.FEDML_PROCESS_NAME_PREFIX}{GeneralConstants.FEDML_MESSAGE_CENTER_LISTENER_TAG}-{message_center_name}"
+
+    @staticmethod
+    def get_message_center_sender_process_name(message_center_name):
+        return f"{GeneralConstants.FEDML_PROCESS_NAME_PREFIX}{GeneralConstants.FEDML_MESSAGE_CENTER_SENDER_TAG}-{message_center_name}"
+
+    @staticmethod
+    def get_status_center_process_name(status_center_tag):
+        return f"{GeneralConstants.FEDML_PROCESS_NAME_PREFIX}{GeneralConstants.FEDML_STATUS_CENTER_TAG}-{status_center_tag}"
+
+
diff --git a/python/fedml/computing/scheduler/scheduler_core/message_center.py b/python/fedml/computing/scheduler/scheduler_core/message_center.py
index aeac1a3855..087e74edf4 100755
--- a/python/fedml/computing/scheduler/scheduler_core/message_center.py
+++ b/python/fedml/computing/scheduler/scheduler_core/message_center.py
@@ -10,8 +10,11 @@
 import queue
 from os.path import expanduser
 
+import setproctitle
+
 import fedml
 from fedml.core.distributed.communication.mqtt.mqtt_manager import MqttManager
+from .general_constants import GeneralConstants
 from ..slave.client_constants import ClientConstants
 from ....core.mlops.mlops_metrics import MLOpsMetrics
 from operator import methodcaller
@@ -140,20 +143,21 @@ def start_sender(self, message_center_name=None):
         self.sender_message_queue = multiprocessing.Manager().Queue()
         self.message_event = multiprocessing.Event()
         self.message_event.clear()
+        process_name = GeneralConstants.get_message_center_sender_process_name(message_center_name)
         message_center = FedMLMessageCenter(agent_config=self.sender_agent_config,
                                             sender_message_queue=self.sender_message_queue)
         if platform.system() == "Windows":
             self.message_center_process = multiprocessing.Process(
                 target=message_center.run_sender, args=(
                     self.message_event, self.sender_message_queue,
-                    message_center_name
+                    message_center_name, process_name
                 )
             )
         else:
             self.message_center_process = fedml.get_process(
                 target=message_center.run_sender, args=(
                     self.message_event, self.sender_message_queue,
-                    message_center_name
+                    message_center_name, process_name
                 )
             )
         self.message_center_process.start()
@@ -211,7 +215,10 @@ def retry_sending_undelivered_message(self):
                 # Save the message
                 self.save_message_record(message_entity.run_id, message_entity.device_id, sent_message_record)
 
-    def run_sender(self, message_event, message_queue, message_center_name):
+    def run_sender(self, message_event, message_queue, message_center_name, process_name=None):
+        if process_name is not None:
+            setproctitle.setproctitle(process_name)
+
         if platform.system() != "Windows":
             os.setsid()
 
@@ -345,15 +352,16 @@ def start_listener(
         self.listener_message_event = multiprocessing.Event()
         self.listener_message_event.clear()
         self.listener_agent_config = agent_config
-        # message_runner = self.get_message_runner()
-        message_runner = self
+        message_runner = self.get_message_runner()
+        # message_runner = self
         message_runner.listener_agent_config = agent_config
+        process_name = GeneralConstants.get_message_center_listener_process_name(message_center_name)
         if platform.system() == "Windows":
             self.listener_message_center_process = multiprocessing.Process(
                 target=message_runner.run_listener_dispatcher, args=(
                     self.listener_message_event, self.listener_message_queue,
                     self.listener_handler_funcs, sender_message_queue,
-                    sender_message_event, message_center_name, extra_queues
+                    sender_message_event, message_center_name, extra_queues, process_name
                 )
             )
         else:
@@ -361,7 +369,7 @@ def start_listener(
                 target=message_runner.run_listener_dispatcher, args=(
                     self.listener_message_event, self.listener_message_queue,
                     self.listener_handler_funcs, sender_message_queue,
-                    sender_message_event, message_center_name, extra_queues
+                    sender_message_event, message_center_name, extra_queues, process_name
                 )
             )
         self.listener_message_center_process.start()
@@ -398,8 +406,11 @@ def unsubscribe_msg(self, topic):
     def run_listener_dispatcher(
             self, listener_message_event, listener_message_queue,
             listener_funcs, sender_message_queue, sender_message_event,
-            message_center_name, extra_queues
+            message_center_name, extra_queues, process_name=None
     ):
+        if process_name is not None:
+            setproctitle.setproctitle(process_name)
+
         if platform.system() != "Windows":
             os.setsid()
 
diff --git a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py
index 30df7f1905..7b0d00f53d 100755
--- a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py
+++ b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py
@@ -615,7 +615,7 @@ def job_error_processor(self, error_list):
 
     def start_runner_process(
             self, run_id, edge_id, request_json,  cuda_visible_gpu_ids_str=None,
-            sender_message_queue=None, status_center_queue=None
+            sender_message_queue=None, status_center_queue=None, process_name=None
     ):
         return None
 
diff --git a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner_manager.py b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner_manager.py
index dcc4045699..8edf57fcbb 100755
--- a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner_manager.py
+++ b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner_manager.py
@@ -20,7 +20,7 @@ def start_job_runner(
             self, run_id, request_json, args=None, edge_id=None, is_server_job=False,
             sender_message_queue=None, listener_message_queue=None, status_center_queue=None,
             should_start_cloud_server=False, use_local_process_as_cloud_server=False,
-            cuda_visible_gpu_ids_str=None
+            cuda_visible_gpu_ids_str=None, process_name=None
     ):
         run_id_str = str(run_id)
         self.job_runners[run_id_str] = self._generate_job_runner_instance(
@@ -31,7 +31,8 @@ def start_job_runner(
             run_id, request_json, edge_id=edge_id,
             sender_message_queue=sender_message_queue,
             listener_message_queue=listener_message_queue,
-            status_center_queue=status_center_queue
+            status_center_queue=status_center_queue,
+            process_name=process_name
         )
 
     def stop_job_runner(self, run_id):
diff --git a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_protocol_manager.py b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_protocol_manager.py
index 833fa1edc0..9970b1d3f6 100755
--- a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_protocol_manager.py
+++ b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_protocol_manager.py
@@ -154,6 +154,7 @@ def stop(self, kill_process=False):
             self.communication_mgr.disconnect()
 
         if kill_process:
+            self.post_status_center_stopping_message()
             self.release_message_center()
             RunProcessUtils.kill_process(os.getppid(), exclude_current_pid=True)
 
@@ -328,5 +329,10 @@ def send_agent_active_msg(self, edge_id):
         active_msg = {"ID": edge_id, "status": GeneralConstants.MSG_MLOPS_SERVER_STATUS_IDLE}
         self.message_center.send_message_json(self.topic_active, json.dumps(active_msg))
 
+    def post_status_center_stopping_message(self, run_id=None):
+        topic_status_center_stopping = GeneralConstants.FEDML_TOPIC_STATUS_CENTER_STOP
+        payload = {"run_id": run_id}
+        self.status_reporter.send_message(topic_status_center_stopping, json.dumps(payload))
+
     def set_parent_agent(self, parent_agent):
         self.parent_agent = parent_agent
diff --git a/python/fedml/computing/scheduler/scheduler_core/status_center.py b/python/fedml/computing/scheduler/scheduler_core/status_center.py
index 76ba9857c6..47dfa9d1a7 100755
--- a/python/fedml/computing/scheduler/scheduler_core/status_center.py
+++ b/python/fedml/computing/scheduler/scheduler_core/status_center.py
@@ -7,7 +7,10 @@
 import multiprocessing
 import queue
 
+import setproctitle
+
 import fedml
+from .general_constants import GeneralConstants
 from .message_common import FedMLMessageEntity, FedMLStatusEntity
 from .message_center import FedMLMessageCenter
 import traceback
@@ -84,6 +87,7 @@ class FedMLStatusCenter(object):
     TOPIC_SLAVE_JOB_LAUNCH_SUFFIX = "/start_train"
     TOPIC_SLAVE_JOB_STOP_PREFIX = "flserver_agent/"
     TOPIC_SLAVE_JOB_STOP_SUFFIX = "/stop_train"
+    TOPIC_STATUS_CENTER_STOP_PREFIX = GeneralConstants.FEDML_TOPIC_STATUS_CENTER_STOP
     ALLOWED_MAX_JOB_STATUS_CACHE_NUM = 1000
 
     def __init__(self, message_queue=None):
@@ -116,22 +120,25 @@ def start_status_center(self, sender_message_center_queue=None,
         self.status_event.clear()
         self.status_sender_message_center_queue = sender_message_center_queue
         self.status_listener_message_center_queue = listener_message_center_queue
-        #self.status_runner = self.get_status_runner()
-        self.status_runner = self
+        self.status_runner = self.get_status_runner()
+        #self.status_runner = self
+        process_name = GeneralConstants.get_status_center_process_name(
+            f'{"deploy" if self.is_deployment_status_center else "launch"}_'
+            f'{"slave" if is_slave_agent else "master"}_agent')
         target_func = self.status_runner.run_status_dispatcher if not is_slave_agent else \
             self.status_runner.run_status_dispatcher_in_slave
         if platform.system() == "Windows":
             self.status_center_process = multiprocessing.Process(
                 target=target_func, args=(
                     self.status_event, self.status_queue, self.status_sender_message_center_queue,
-                    self.status_listener_message_center_queue, sender_message_event
+                    self.status_listener_message_center_queue, sender_message_event, process_name
                 )
             )
         else:
             self.status_center_process = fedml.get_process(
                 target=target_func, args=(
                     self.status_event, self.status_queue, self.status_sender_message_center_queue,
-                    self.status_listener_message_center_queue, sender_message_event
+                    self.status_listener_message_center_queue, sender_message_event, process_name
                 )
             )
 
@@ -178,7 +185,10 @@ def rebuild_status_center(self, status_queue):
     def run_status_dispatcher(self, status_event, status_queue,
                               sender_message_center_queue,
                               listener_message_center_queue,
-                              sender_message_event):
+                              sender_message_event, process_name=None):
+        if process_name is not None:
+            setproctitle.setproctitle(process_name)
+
         if platform.system() != "Windows":
             os.setsid()
 
@@ -229,6 +239,12 @@ def run_status_dispatcher(self, status_event, status_queue,
                 message_entity = FedMLMessageEntity(message_body=message_body)
                 status_entity = FedMLStatusEntity(status_msg_body=message_body)
 
+                if message_entity.topic.startswith(FedMLStatusCenter.TOPIC_STATUS_CENTER_STOP_PREFIX):
+                    # Process the stop message for message center and status center
+                    message_center.stop_message_center()
+                    self.stop_status_center()
+                    continue
+
                 # Generate status manager instance
                 run_id_str = str(status_entity.run_id)
                 run_id_int = int(status_entity.run_id)
@@ -279,7 +295,10 @@ def run_status_dispatcher(self, status_event, status_queue,
     def run_status_dispatcher_in_slave(self, status_event, status_queue,
                                        sender_message_center_queue,
                                        listener_message_center_queue,
-                                       sender_message_event):
+                                       sender_message_event, process_name=None):
+        if process_name is not None:
+            setproctitle.setproctitle(process_name)
+
         if platform.system() != "Windows":
             os.setsid()
 
diff --git a/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py b/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py
index 68b40b3291..ec98cc7906 100755
--- a/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py
+++ b/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py
@@ -137,16 +137,13 @@ def process_job_completed_status(self, master_id, status):
         # self.remove_listener_for_run_metrics(self.run_id)
         # self.remove_listener_for_run_logs(self.run_id)
 
+        self.message_center.receive_message(
+            GeneralConstants.get_topic_complete_job(master_id),
+            json.dumps(GeneralConstants.get_payload_complete_job(self.run_id, master_id)))
+
         if self.status_center.is_deployment_status_center:
             if status == ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED:
                 self.report_deployment_status(self.run_id, GeneralConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED)
-        else:
-            self.message_center.receive_message(
-                GeneralConstants.get_topic_complete_job(master_id),
-                json.dumps(GeneralConstants.get_payload_complete_job(self.run_id, master_id)))
-
-            self.message_center.stop_message_center()
-            self.status_center.stop_status_center()
 
     def process_job_exception_status(self, master_id, status):
         # Report exception job status
diff --git a/python/fedml/computing/scheduler/slave/base_slave_job_runner.py b/python/fedml/computing/scheduler/slave/base_slave_job_runner.py
index 8876fc7e39..0486b131a6 100755
--- a/python/fedml/computing/scheduler/slave/base_slave_job_runner.py
+++ b/python/fedml/computing/scheduler/slave/base_slave_job_runner.py
@@ -7,6 +7,8 @@
 import traceback
 from abc import ABC, abstractmethod
 
+import setproctitle
+
 import fedml
 from ....core.mlops.mlops_runtime_log import MLOpsRuntimeLog
 from ....core.mlops.mlops_runtime_log_daemon import MLOpsRuntimeLogDaemon
@@ -48,8 +50,12 @@ def __repr__(self):
         )
 
     def run(self, process_event, completed_event,  run_extend_queue_list,
-            sender_message_center, listener_message_queue, status_center_queue):
-        print(f"Client runner process id {os.getpid()}, run id {self.run_id}")
+            sender_message_center, listener_message_queue, status_center_queue,
+            process_name=None):
+        if process_name is not None:
+            setproctitle.setproctitle(process_name)
+
+        print(f"Client runner process id {os.getpid()}, name {process_name}, run id {self.run_id}")
 
         if platform.system() != "Windows":
             os.setsid()
@@ -245,7 +251,7 @@ def reset_devices_status(self, edge_id, status):
     def start_runner_process(
             self, run_id, request_json, edge_id=None,
             sender_message_queue=None, listener_message_queue=None,
-            status_center_queue=None, cuda_visible_gpu_ids_str=None
+            status_center_queue=None, cuda_visible_gpu_ids_str=None, process_name=None
     ):
         client_runner = self._generate_job_runner_instance(
             self.args, run_id=run_id, request_json=request_json,
@@ -265,12 +271,12 @@ def start_runner_process(
             self.run_process = multiprocessing.Process(
                 target=client_runner.run, args=(
                     self.run_process_event, self.run_process_completed_event, self.run_extend_queue_list,
-                    sender_message_queue, listener_message_queue, status_center_queue
+                    sender_message_queue, listener_message_queue, status_center_queue, process_name
                 ))
         else:
             self.run_process = fedml.get_process(target=client_runner.run, args=(
                 self.run_process_event, self.run_process_completed_event, self.run_extend_queue_list,
-                sender_message_queue, listener_message_queue, status_center_queue
+                sender_message_queue, listener_message_queue, status_center_queue, process_name
             ))
         self.run_process.start()
         return self.run_process
diff --git a/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py b/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py
index 49aad618c1..648a49bbd1 100755
--- a/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py
+++ b/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py
@@ -262,6 +262,8 @@ def callback_start_train(self, topic, payload):
             # Report the run status with finished status and return
             self.generate_status_report(run_id, edge_id, server_agent_id=server_agent_id).report_client_id_status(
                 edge_id, GeneralConstants.MSG_MLOPS_CLIENT_STATUS_FINISHED, run_id=run_id)
+
+            MLOpsRuntimeLogDaemon.get_instance(self.args).stop_log_processor(run_id, edge_id)
             return
         logging.info(
             f"Run started, available gpu ids: {JobRunnerUtils.get_instance().get_available_gpu_id_list(edge_id)}")
@@ -279,6 +281,7 @@ def callback_start_train(self, topic, payload):
             listener_message_queue=self.get_listener_message_queue(),
             status_center_queue=self.get_status_queue(),
             cuda_visible_gpu_ids_str=cuda_visible_gpu_ids_str,
+            process_name=GeneralConstants.get_launch_slave_job_process_name(run_id, edge_id)
         )
         run_process = self._get_job_runner_manager().get_runner_process(run_id)
         if run_process is not None:
diff --git a/python/fedml/computing/scheduler/slave/slave_protocol_manager.py b/python/fedml/computing/scheduler/slave/slave_protocol_manager.py
index 6e3cb2ebe1..050cfb3f1d 100755
--- a/python/fedml/computing/scheduler/slave/slave_protocol_manager.py
+++ b/python/fedml/computing/scheduler/slave/slave_protocol_manager.py
@@ -11,6 +11,7 @@ class FedMLLaunchSlaveProtocolManager(FedMLBaseSlaveProtocolManager):
 
     def __init__(self, args, agent_config=None):
         FedMLBaseSlaveProtocolManager.__init__(self, args, agent_config=agent_config)
+        self.message_center_name = "launch_slave_agent"
 
     # Override
     def generate_topics(self):
diff --git a/python/fedml/computing/scheduler/slave/united_agents.py b/python/fedml/computing/scheduler/slave/united_agents.py
index 3640ea149e..17aee46f62 100755
--- a/python/fedml/computing/scheduler/slave/united_agents.py
+++ b/python/fedml/computing/scheduler/slave/united_agents.py
@@ -57,8 +57,7 @@ def login(self, userid, api_key=None, device_id=None,
         deploy_master_agent.login(
             userid, api_key=api_key, device_id=login_result.device_id,
             os_name=os_name, role=FedMLAccountManager.ROLE_DEPLOY_MASTER_ON_PREM,
-            communication_manager=shared_communication_mgr,
-            status_center_queue=shared_master_status_center_queue
+            communication_manager=shared_communication_mgr
         )
 
         # Login with the deployment slave role based on
@@ -66,10 +65,7 @@ def login(self, userid, api_key=None, device_id=None,
         deploy_slave_agent.login(
             userid, api_key=api_key, device_id=login_result.device_id,
             os_name=os_name, role=FedMLAccountManager.ROLE_DEPLOY_WORKER_ON_PREM,
-            communication_manager=shared_communication_mgr,
-            sender_message_queue=shared_slave_sender_message_queue,
-            status_center_queue=shared_slave_status_center_queue,
-            sender_message_event=shared_slave_sender_message_event
+            communication_manager=shared_communication_mgr
         )
 
         # Start the slave agent to connect to servers and loop forever.
diff --git a/python/fedml/core/mlops/mlops_runtime_log_daemon.py b/python/fedml/core/mlops/mlops_runtime_log_daemon.py
index f72d88cfea..bf136a36c9 100644
--- a/python/fedml/core/mlops/mlops_runtime_log_daemon.py
+++ b/python/fedml/core/mlops/mlops_runtime_log_daemon.py
@@ -8,10 +8,12 @@
 
 import multiprocess as multiprocessing
 import requests
+import setproctitle
 import yaml
 
 import fedml
 from fedml.computing.scheduler.comm_utils.run_process_utils import RunProcessUtils
+from fedml.computing.scheduler.scheduler_core.general_constants import GeneralConstants
 from fedml.core.mlops.mlops_utils import MLOpsLoggingUtils
 from ...core.mlops.mlops_configs import MLOpsConfigs
 
@@ -256,8 +258,11 @@ def should_ignore_log_line(log_line):
 
         return False
 
-    def log_process(self, process_event):
-        logging.info(f"Log uploading process id {os.getpid()}, run id {self.run_id}, edge id {self.device_id}")
+    def log_process(self, process_event, process_name=None):
+        if process_name is not None:
+            setproctitle.setproctitle(process_name)
+
+        logging.info(f"Log uploading process id {os.getpid()}, run id {self.run_id}, name {process_name}, edge id {self.device_id}")
         self.log_process_event = process_event
 
         only_push_artifact = False
@@ -419,6 +424,8 @@ def set_log_source(self, source):
         self.log_source = source
 
     def start_log_processor(self, log_run_id, log_device_id, log_source=None, log_file_prefix=None):
+        if log_run_id == "-1" or int(log_run_id) <= 0:
+            return
         log_processor = MLOpsRuntimeLogProcessor(self.args.using_mlops, log_run_id,
                                                  log_device_id, self.log_file_dir,
                                                  self.log_server_url,
@@ -432,12 +439,13 @@ def start_log_processor(self, log_run_id, log_device_id, log_source=None, log_fi
             self.log_process_event_map[event_map_id] = multiprocessing.Event()
         self.log_process_event_map[event_map_id].clear()
         log_processor.log_process_event = self.log_process_event_map[event_map_id]
+        process_name = GeneralConstants.get_log_process_name(log_run_id, log_device_id)
         if platform.system() == "Windows":
             log_child_process = multiprocessing.Process(
-                target=log_processor.log_process, args=(self.log_process_event_map[event_map_id],))
+                target=log_processor.log_process, args=(self.log_process_event_map[event_map_id], process_name))
         else:
             log_child_process = fedml.get_process(
-                target=log_processor.log_process, args=(self.log_process_event_map[event_map_id],))
+                target=log_processor.log_process, args=(self.log_process_event_map[event_map_id], process_name))
         # process = threading.Thread(target=log_processor.log_process)
         # process.start()
         if log_child_process is not None:
diff --git a/python/setup.py b/python/setup.py
index f00c0b4335..d2d9441277 100644
--- a/python/setup.py
+++ b/python/setup.py
@@ -126,7 +126,7 @@ def finalize_options(self):
 
 setup(
     name="fedml",
-    version="0.8.31b23",
+    version="0.8.51b1",
     author="FedML Team",
     author_email="ch@fedml.ai",
     description="A research and production integrated edge-cloud library for "

From fd257b801430d789225b93afff46d514f99c6654 Mon Sep 17 00:00:00 2001
From: Alex <alex.gpt.llm@gmail.com>
Date: Thu, 20 Jun 2024 19:28:24 +0800
Subject: [PATCH 175/282] [CoreEngine] update the dependant libs.

---
 python/setup.py | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/python/setup.py b/python/setup.py
index d2d9441277..f8c323d63f 100644
--- a/python/setup.py
+++ b/python/setup.py
@@ -69,7 +69,8 @@ def finalize_options(self):
     'python-dotenv',
     'protobuf>=3.20.2,<4.0dev',
     'typer<0.10.0,>=0.3.0',
-    'fastapi-cli==0.0.1'
+    'fastapi-cli==0.0.1',
+    'setproctitle'
 ]
 
 requirements_extra_mpi = [

From 7ccf195113d4c5dccfe3fabc3aae9fea71f33e0d Mon Sep 17 00:00:00 2001
From: Raphael Jin <kimheavy@yahoo.com>
Date: Sat, 15 Jun 2024 00:17:04 +0000
Subject: [PATCH 176/282] [Deploy] Support arbitrary container image
 onboarding.

---
 .../custom_inference_image.yaml               |  19 +-
 .../custom_inference_image/serve_main.py      |  16 --
 .../scheduler/comm_utils/job_monitor.py       |  27 +-
 .../device_client_constants.py                |   4 +
 .../device_http_inference_protocol.py         |   8 +-
 .../device_model_deployment.py                | 265 +++++++++---------
 .../model_scheduler/device_model_inference.py |  56 ++--
 .../model_scheduler/worker_job_runner.py      |  16 --
 python/fedml/core/mlops/mlops_device_perfs.py |   2 +-
 9 files changed, 201 insertions(+), 212 deletions(-)
 delete mode 100644 python/examples/deploy/custom_inference_image/serve_main.py

diff --git a/python/examples/deploy/custom_inference_image/custom_inference_image.yaml b/python/examples/deploy/custom_inference_image/custom_inference_image.yaml
index 0c62767b40..467c7c48b0 100644
--- a/python/examples/deploy/custom_inference_image/custom_inference_image.yaml
+++ b/python/examples/deploy/custom_inference_image/custom_inference_image.yaml
@@ -1,13 +1,14 @@
 workspace: "./"
-job: |
-  echo "Start serving..."
-  python3 serve_main.py
 
-bootstrap: |
-  echo "Bootstrap start..."
-  echo "Bootstrap finished!"
+inference_image_name: "ghcr.io/predibase/lorax:main"
+container_run_command: "--model-id mistralai/Mistral-7B-Instruct-v0.1"
 
-enable_custom_image: true
-inference_image_name: "fedml/fedml-default-inference-backend"
-deploy_timeout: 1000
+environment_variables:
+  HUGGING_FACE_HUB_TOKEN: ""
 
+readiness_probe:
+  path: "health"
+
+port: 80
+
+deploy_timeout: 1600
diff --git a/python/examples/deploy/custom_inference_image/serve_main.py b/python/examples/deploy/custom_inference_image/serve_main.py
deleted file mode 100644
index a7a1dd84f3..0000000000
--- a/python/examples/deploy/custom_inference_image/serve_main.py
+++ /dev/null
@@ -1,16 +0,0 @@
-from fedml.serving import FedMLPredictor
-from fedml.serving import FedMLInferenceRunner
-
-
-class DummyPredictor(FedMLPredictor):
-    def __init__(self):
-        super().__init__()
-        
-    def predict(self, request):
-        return {"Aloha": request}
-
-
-if __name__ == "__main__":
-    predictor = DummyPredictor()
-    fedml_inference_runner = FedMLInferenceRunner(predictor)
-    fedml_inference_runner.run()
\ No newline at end of file
diff --git a/python/fedml/computing/scheduler/comm_utils/job_monitor.py b/python/fedml/computing/scheduler/comm_utils/job_monitor.py
index 97a4cb6ebc..d216b46dad 100644
--- a/python/fedml/computing/scheduler/comm_utils/job_monitor.py
+++ b/python/fedml/computing/scheduler/comm_utils/job_monitor.py
@@ -40,6 +40,7 @@
 from fedml.core.mlops.mlops_runtime_log import MLOpsRuntimeLog
 from fedml.core.mlops.mlops_utils import MLOpsLoggingUtils
 from fedml.core.mlops.mlops_runtime_log_daemon import MLOpsRuntimeLogDaemon
+from fedml.computing.scheduler.model_scheduler.device_client_constants import ClientConstants
 from ..scheduler_core.endpoint_sync_protocol import FedMLEndpointSyncProtocol
 
 from ..model_scheduler.device_server_constants import ServerConstants
@@ -758,9 +759,8 @@ def monitor_slave_endpoint_status(self):
                 except Exception as e:
                     pass
 
-    def _lenient_check_replica_ready(
-            self, deployment_result
-    ):
+    @staticmethod
+    def _lenient_check_replica_ready(deployment_result):
         """
         Double-check the replica's liveness using /ready api:
             if 200 -> return True
@@ -769,8 +769,27 @@ def _lenient_check_replica_ready(
         """
         result_json = deployment_result
         inference_url = result_json.get("model_url", None)
+        liveliness_check = result_json.get("model_metadata", {}).get("liveliness_check", None)
+        readiness_check = result_json.get("model_metadata", {}).get("readiness_check", None)
+
+        if liveliness_check is not None:
+            if liveliness_check == ClientConstants.LIVENESS_PROBE_DEFAULT:
+                liveliness_check = readiness_check  # Follow the readiness check pattern
+            if not isinstance(liveliness_check, dict):
+                logging.warning(f"Healthiness check is not a dict. {liveliness_check}")
+                return True
+            if "path" not in liveliness_check:
+                logging.warning(f"Healthiness check does not have path. {liveliness_check}")
+                return True
+            response_ok = asyncio.run(FedMLHttpInference.is_inference_ready(
+                inference_url, timeout=SchedulerConstants.ENDPOINT_INFERENCE_READY_TIMEOUT,
+                path=liveliness_check["path"]))
+            if response_ok is None:
+                # This means the server return 202
+                return False
+            return True
 
-        # Make a curl get to inference_url with timeout 5s
+        # Make a curl get to inference_url/ready with timeout 5s
         # TODO(Raphael): Also support PROXY and MQTT to check the readiness
         response_ok = asyncio.run(FedMLHttpInference.is_inference_ready(inference_url, timeout=5))
         if response_ok is None:
diff --git a/python/fedml/computing/scheduler/model_scheduler/device_client_constants.py b/python/fedml/computing/scheduler/model_scheduler/device_client_constants.py
index fdcbdf0a34..cd21de2e04 100644
--- a/python/fedml/computing/scheduler/model_scheduler/device_client_constants.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_client_constants.py
@@ -143,6 +143,10 @@ class ClientConstants(object):
     DEVICE_DIFF_DELETE_OPERATION = "op: delete"
     DEVICE_DIFF_REPLACE_OPERATION = "op: replace"
 
+    READINESS_PROBE_DEFAULT = "DEFAULT"
+    LIVENESS_PROBE_DEFAULT = "DEFAULT"
+
+
     LOGIN_MODE_ON_PREMISE_INDEX = 0
     LOGIN_MODE_FEDML_CLOUD_INDEX = 1
     LOGIN_MODE_PUBLIC_CLOUD_INDEX = 2
diff --git a/python/fedml/computing/scheduler/model_scheduler/device_http_inference_protocol.py b/python/fedml/computing/scheduler/model_scheduler/device_http_inference_protocol.py
index 7e4c06ea5d..41c565d5d8 100755
--- a/python/fedml/computing/scheduler/model_scheduler/device_http_inference_protocol.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_http_inference_protocol.py
@@ -14,14 +14,14 @@ def __init__(self):
         pass
 
     @staticmethod    
-    async def is_inference_ready(inference_url, timeout=None):
-        '''
+    async def is_inference_ready(inference_url, path="ready", timeout=None):
+        """
         True: inference is ready
         False: cannot be reached, will try other protocols
         None: can be reached, but not ready
-        '''
+        """
         url_parsed = urlparse(inference_url)
-        ready_url = f"http://{url_parsed.hostname}:{url_parsed.port}/ready"
+        ready_url = f"http://{url_parsed.hostname}:{url_parsed.port}/{path}"
         response_ok = False
         try:
             async with httpx.AsyncClient() as client:
diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py b/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py
index edd2ebea9a..71f0c8032a 100755
--- a/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py
@@ -68,42 +68,26 @@ def start_deployment(end_point_id, end_point_name, model_id, model_version,
     num_gpus = gpu_per_replica
     gpu_ids, gpu_attach_cmd = None, ""
 
-    # Concatenate the model name
+    # Concatenate the full model name
     running_model_name = ClientConstants.get_running_model_name(
         end_point_name, inference_model_name, model_version, end_point_id, model_id, edge_id=edge_id)
 
-    # Parse the model config file and get the necessary information for the deployment
+    # Parse the model config file
     model_config_path = os.path.join(model_storage_local_path, "fedml_model_config.yaml")
     with open(model_config_path, 'r') as file:
         config = yaml.safe_load(file)
+        inference_type = "default"
 
         # Resource related
-        inference_type = "default"
-        use_gpu = config.get('use_gpu', True)
-        num_gpus_frm_yml = config.get('num_gpus', None)
-        if not use_gpu:
-            num_gpus = 0
-        else:
-            if num_gpus_frm_yml is not None:
-                num_gpus = int(num_gpus_frm_yml)
-        usr_indicated_wait_time = config.get('deploy_timeout', 900)
-        usr_indicated_retry_cnt = max(int(usr_indicated_wait_time) // 10, 1)
-        shm_size = config.get('shm_size', None)
-        storage_opt = config.get('storage_opt', None)
-        tmpfs = config.get('tmpfs', None)
-        cpus = config.get('cpus', None)
-        if cpus is not None:
-            cpus = int(cpus)
-        memory = config.get('memory', None)
-
-        inference_image_name = config.get('inference_image_name',
-                                          ClientConstants.INFERENCE_SERVER_CUSTOME_IMAGE)
-        image_pull_policy = config.get('image_pull_policy', SchedulerConstants.IMAGE_PULL_POLICY_IF_NOT_PRESENT)
-
-        # Source code dir, bootstrap dir, data cache dir
-        src_code_dir = os.path.join(model_storage_local_path, config.get('source_code_dir', ""))
+        use_gpu, num_gpus, shm_size, storage_opt, tmpfs, cpus, memory, port_inside_container = \
+            parse_resource_related_config(config, gpu_per_replica)
 
-        # Get the bootstrap and job commands inside the yaml file
+        # Image related
+        inference_image_name, image_pull_policy, registry_name, registry_provider, \
+            registry_user_name, registry_user_password = parse_image_registry_related_config(config)
+
+        # Bootstrap, job and entrypoint related
+        dst_model_serving_dir = "/home/fedml/models_serving"
         bootstrap_cmds_str_frm_yaml = config.get('bootstrap', "")
         job_cmds_str_frm_yaml = config.get('job', "")
 
@@ -119,36 +103,37 @@ def start_deployment(end_point_id, end_point_name, model_id, model_version,
         else:
             src_bootstrap_file_path = ""
 
-        data_cache_dir_input = config.get('data_cache_dir', "")
-        request_input_example = config.get('request_input_example', None)
-        extra_envs = config.get('environment_variables', None)
-
-        # Serving dir inside docker
-        dst_model_serving_dir = "/home/fedml/models_serving"
-        relative_entry = config.get('entry_point')
         if src_bootstrap_file_path != "":
             dst_bootstrap_dir = os.path.join(dst_model_serving_dir, auto_gen_bootstrap_file_name)
         else:
             dst_bootstrap_dir = ""
 
-        # If using customized image, then bootstrap + job will be the entry point
-        enable_custom_image = config.get("enable_custom_image", False)
-        # inference_type = "custom"
-        customized_image_entry_cmd = \
-            "/bin/bash /home/fedml/models_serving/fedml-deploy-bootstrap-entry-auto-gen.sh"
+        # If the entry point is in fedml format (e.g., "main.py")
+        relative_entry_fedml_format = config.get('entry_point', "")
+
+        # User indicate either fedml format python main entry filename or entry command
+        customized_image_entry_cmd = config.get('container_run_command', None)
+        customized_readiness_check = config.get('readiness_probe', ClientConstants.READINESS_PROBE_DEFAULT)
+        customized_liveliness_check = config.get('liveness_probe', ClientConstants.LIVENESS_PROBE_DEFAULT)
+
+        # Storage related
+        src_code_dir = os.path.join(model_storage_local_path, config.get('source_code_dir', ""))
+        data_cache_dir_input = config.get('data_cache_dir', "")
 
-        docker_registry_user_name = config.get("docker_registry_user_name", "")
-        docker_registry_user_password = config.get("docker_registry_user_password", "")
-        docker_registry = config.get("docker_registry", "")
+        # Others
+        extra_envs = config.get('environment_variables', None)
+        usr_indicated_wait_time = config.get('deploy_timeout', 900)
+        usr_indicated_retry_cnt = max(int(usr_indicated_wait_time) // 10, 1)
+        request_input_example = config.get('request_input_example', None)
 
-        port_inside_container = int(config.get("port", 2345))
+    # Parameter's check
+    if inference_engine != ClientConstants.INFERENCE_ENGINE_TYPE_INT_DEFAULT:
+        raise Exception(f"inference engine {inference_engine} is not supported")
 
-    # Request the GPU ids for the deployment
+    # Request the GPU
     if num_gpus > 0:
         gpu_ids, gpu_attach_cmd = request_gpu_ids_on_deployment(
             edge_id, end_point_id, num_gpus=num_gpus, master_device_id=master_device_id)
-
-        # set replica and their gpu ids
         FedMLModelCache.get_instance().set_redis_params()
         FedMLModelCache.get_instance().set_replica_gpu_ids(
             end_point_id, end_point_name, inference_model_name, edge_id, replica_rank+1, gpu_ids)
@@ -159,50 +144,51 @@ def start_deployment(end_point_id, end_point_name, model_id, model_version,
     if not os.path.exists(model_serving_dir):
         os.makedirs(model_serving_dir, exist_ok=True)
 
-    if inference_engine != ClientConstants.INFERENCE_ENGINE_TYPE_INT_DEFAULT:
-        raise Exception(f"inference engine {inference_engine} is not supported")
-
-    # Get the master device id
-    logging.info(f"master ip: {master_ip}, worker ip: {infer_host}")
+    # Determine whether to report public ip or localhost
     if infer_host == master_ip:
         logging.info("infer_host is the same as master ip, will use 127.0.0.1 to avoid firewall issue")
         infer_host = "127.0.0.1"
+    else:
+        logging.info("Master and worker are located in different machines, will use the public ip for inference")
 
+    # Init container interface client
     try:
         client = docker.from_env()
-        if enable_custom_image and docker_registry_user_name != "" and docker_registry_user_password != "" \
-                and docker_registry != "":
-            client.login(username=docker_registry_user_name, password=docker_registry_user_password,
-                         registry=docker_registry)
+        if registry_provider == "Docker" and registry_user_name != "" and registry_user_password != "" \
+                and registry_name != "":
+            client.login(username=registry_user_name, password=registry_user_password,
+                         registry=registry_name)
     except Exception:
         logging.error("Failed to connect to the docker daemon, please ensure that you have "
                       "installed Docker Desktop or Docker Engine, and the docker is running")
         return "", "", None, None, None
 
+    # Pull the inference image
+    logging.info(f"Start pulling the inference image {inference_image_name}... with policy {image_pull_policy}")
+    ContainerUtils.get_instance().pull_image_with_policy(image_pull_policy, inference_image_name)
+
+    # Remove if the container exists
     container_prefix = ("{}".format(ClientConstants.FEDML_DEFAULT_SERVER_CONTAINER_NAME_PREFIX) + "__" +
                         security_utils.get_content_hash(running_model_name))
-
     default_server_container_name = container_prefix + "__" + str(replica_rank)
-
     try:
         exist_container_obj = client.containers.get(default_server_container_name)
     except docker.errors.NotFound:
         exist_container_obj = None
     except docker.errors.APIError:
         raise Exception("Failed to get the container object")
+    # Allocate the GPU
+    # TODO: Make sure no competition for each replica in a single deployment
+    if exist_container_obj is not None:
+        client.api.remove_container(exist_container_obj.id, v=True, force=True)
 
-    # Pull the inference image
-    logging.info(f"Start pulling the inference image {inference_image_name}... with policy {image_pull_policy}")
-    ContainerUtils.get_instance().pull_image_with_policy(image_pull_policy, inference_image_name)
-
+    # Build host config
     volumes = []
     binds = {}
     environment = {}
 
-    # data_cache_dir mounting
     if isinstance(data_cache_dir_input, str):
         # In this case, we mount to the same folder, if it has ~, we replace it with /home/fedml
-        src_data_cache_dir, dst_data_cache_dir = "", ""
         if data_cache_dir_input != "":
             if data_cache_dir_input[0] == "~":
                 src_data_cache_dir = os.path.expanduser(data_cache_dir_input)
@@ -239,16 +225,17 @@ def start_deployment(end_point_id, end_point_name, model_id, model_version,
     else:
         logging.warning("data_cache_dir_input is not a string or a dictionary, skip mounting it to the container")
 
-    # Default mounting
-    if not enable_custom_image or (enable_custom_image and relative_entry != ""):
+    # FedML format main entry filename, e.g., main.py
+    if relative_entry_fedml_format != "":
         logging.info("Start copying the source code to the container...")
         volumes.append(src_code_dir)
         binds[src_code_dir] = {
             "bind": dst_model_serving_dir,
             "mode": "rw"
         }
-        environment["MAIN_ENTRY"] = relative_entry
+        environment["MAIN_ENTRY"] = relative_entry_fedml_format
 
+    # Host config
     host_config_dict = {
         "binds": binds,
         "port_bindings": {
@@ -261,10 +248,6 @@ def start_deployment(end_point_id, end_point_name, model_id, model_version,
         "mem_limit": memory
     }
 
-    # Allocate the GPU
-    # TODO: Make sure no competition for each replica in a single deployment
-    if exist_container_obj is not None:
-        client.api.remove_container(exist_container_obj.id, v=True, force=True)
     device_mapping = {}
     if no_real_gpu_allocation is not None:
         use_gpu = not no_real_gpu_allocation
@@ -277,6 +260,7 @@ def start_deployment(end_point_id, end_point_name, model_id, model_version,
         host_config_dict.update(device_mapping)
 
     # Environment variables
+    enable_custom_image = False if relative_entry_fedml_format != "" else True
     if not enable_custom_image:
         # For some image, the default user is root. Unified to fedml.
         environment["HOME"] = "/home/fedml"
@@ -288,7 +272,6 @@ def start_deployment(end_point_id, end_point_name, model_id, model_version,
     environment["FEDML_ENV_VERSION"] = fedml.get_env_version()
     environment["FEDML_ENV_LOCAL_ON_PREMISE_PLATFORM_HOST"] = fedml.get_local_on_premise_platform_host()
     environment["FEDML_ENV_LOCAL_ON_PREMISE_PLATFORM_PORT"] = fedml.get_local_on_premise_platform_port()
-
     if extra_envs is not None:
         for key in extra_envs:
             environment[key] = extra_envs[key]
@@ -304,8 +287,7 @@ def start_deployment(end_point_id, end_point_name, model_id, model_version,
             environment=environment,
             host_config=host_config,
             detach=True,
-            command=customized_image_entry_cmd if enable_custom_image else None,
-            entrypoint=customized_image_entry_cmd if enable_custom_image else None
+            command=customized_image_entry_cmd,
         )
         client.api.start(container=new_container.get("Id"))
     except Exception as e:
@@ -333,11 +315,12 @@ def start_deployment(end_point_id, end_point_name, model_id, model_version,
                           inference_model_name, inference_engine, inference_http_port, inference_type,
                           retry_interval=10, deploy_attempt_threshold=usr_indicated_retry_cnt,
                           request_input_example=request_input_example, infer_host=infer_host,
-                          enable_custom_image=enable_custom_image)
+                          readiness_check=customized_readiness_check)
 
     # Return the running model name and the inference output url
     inference_output_url, running_model_version, ret_model_metadata, ret_model_config = \
         check_container_readiness(inference_http_port=inference_http_port, infer_host=infer_host,
+                                  readiness_check=customized_readiness_check,
                                   request_input_example=request_input_example)
 
     if inference_output_url == "":
@@ -345,51 +328,24 @@ def start_deployment(end_point_id, end_point_name, model_id, model_version,
 
     # Successfully get the result from the container
     model_metadata = ret_model_metadata
+    model_metadata["liveliness_check"] = customized_liveliness_check
+    model_metadata["readiness_check"] = customized_readiness_check
     logging.info(f"[Worker][Replica{replica_rank}] Model deployment is successful with inference_output_url: "
                  f"{inference_output_url}, model_metadata: {model_metadata}, model_config: {ret_model_config}")
 
     return running_model_name, inference_output_url, model_version, model_metadata, ret_model_config
 
 
-def build_inference_req(end_point_name, model_name, token, in_model_metadata):
-    model_inputs = in_model_metadata["inputs"]
-    ret_inputs = list()
-
-    for input_item in model_inputs:
-        ret_item = input_item
-        shape = ret_item["shape"]
-        data_type = ret_item["datatype"]
-        if ClientConstants.MODEL_DATA_TYPE_MAPPING[data_type] == ClientConstants.MODEL_DATA_TYPE_INT:
-            for i in range(len(shape)):
-                if shape[i] == -1:  # if input shape is dynamic, we set a default value 1
-                    shape[i] = 1
-            ret_item["data"] = torch.randint(0, 1, shape).tolist()
-        else:
-            for i in range(len(shape)):
-                if shape[i] == -1:  # if input shape is dynamic, we set a default value 1
-                    shape[i] = 1
-            ret_item["data"] = torch.zeros(shape).tolist()
-        ret_inputs.append(ret_item)
-
-    input_json = {"end_point_name": end_point_name,
-                  "model_name": model_name,
-                  "token": str(token),
-                  "inputs": ret_inputs,
-                  "outputs": in_model_metadata["outputs"]}
-    output_json = in_model_metadata["outputs"]
-
-    return input_json, output_json
-
-
 def should_exit_logs(end_point_id, model_id, cmd_type, model_name, inference_engine, inference_port,
                      inference_type="default", request_input_example=None, infer_host="127.0.0.1",
-                     enable_custom_image=False):
+                     readiness_check=ClientConstants.READINESS_PROBE_DEFAULT):
     if cmd_type == ClientConstants.CMD_TYPE_RUN_DEFAULT_SERVER:
         # TODO: Exited Quickly if the container is Exited or Removed
         # If the container has exited, return True, means we should exit the logs
         try:
             inference_output_url, model_version, model_metadata, model_config = \
                 check_container_readiness(inference_http_port=inference_port, infer_host=infer_host,
+                                          readiness_check=readiness_check,
                                           request_input_example=request_input_example)
             if inference_output_url != "":
                 logging.info("Log test for deploying model successfully, inference url: {}, "
@@ -410,7 +366,7 @@ def log_deployment_output(end_point_id, model_id, cmd_container_name, cmd_type,
                           inference_http_port, inference_type="default",
                           retry_interval=10, deploy_attempt_threshold=10,
                           request_input_example=None, infer_host="127.0.0.1",
-                          enable_custom_image=False):
+                          readiness_check=ClientConstants.READINESS_PROBE_DEFAULT):
     deploy_attempt = 0
     last_log_time = datetime.datetime.now()
 
@@ -478,11 +434,10 @@ def log_deployment_output(end_point_id, model_id, cmd_container_name, cmd_type,
                     client.api.remove_container(container_obj.id, v=True, force=True)
                     break
 
-        # should_exit_logs will ping the inference container
-        # return True if ready
+        # should_exit_logs will ping the inference container, return True if ready
         if should_exit_logs(end_point_id, model_id, cmd_type, inference_model_name, inference_engine,
                             inference_http_port, inference_type, request_input_example,
-                            infer_host, enable_custom_image=enable_custom_image):
+                            infer_host, readiness_check=readiness_check):
             break
 
         # Not yet ready, retry
@@ -504,10 +459,58 @@ def log_deployment_output(end_point_id, model_id, cmd_container_name, cmd_type,
         time.sleep(retry_interval)
 
 
-def is_client_inference_container_ready(infer_url_host, inference_http_port, readiness_check_type="default",
-                                        readiness_check_cmd=None, request_input_example=None):
+def parse_resource_related_config(config, gpu_num_frm_platform=0):
+    use_gpu = config.get('use_gpu', True)
+    num_gpus_frm_yml = config.get('num_gpus', None)
+
+    num_gpus = gpu_num_frm_platform
+    # Priority: num_gpus from yaml > num_gpus from platform
+    if use_gpu:
+        if num_gpus_frm_yml is not None:
+            num_gpus = int(num_gpus_frm_yml)
+    else:
+        num_gpus = 0
+
+    shm_size = config.get('shm_size', None)
+    storage_opt = config.get('storage_opt', None)
+    tmpfs = config.get('tmpfs', None)
+    cpus = config.get('cpus', None)
+    if cpus is not None:
+        cpus = int(cpus)
+    memory = config.get('memory', None)
+    port_inside_container = int(config.get("port", 2345))
+
+    return use_gpu, num_gpus, shm_size, storage_opt, tmpfs, cpus, memory, port_inside_container
+
+
+def parse_image_registry_related_config(config):
+    inference_image_name = config.get('inference_image_name', ClientConstants.INFERENCE_SERVER_CUSTOME_IMAGE)
+    image_pull_policy = config.get('image_pull_policy', SchedulerConstants.IMAGE_PULL_POLICY_IF_NOT_PRESENT)
+
+    # Optional
+    registry_specs = config.get('registry_specs', {})
+    registry_name = registry_specs.get("docker_registry_user_name", "")
+    registry_provider = registry_specs.get("registry_provider", "")
+    registry_user_name = config.get("registry_user_name", "")
+    registry_user_password = config.get("registry_user_password", "")
+
+    return (inference_image_name, image_pull_policy, registry_name, registry_provider,
+            registry_user_name, registry_user_password)
+
+
+def is_client_inference_container_ready(infer_url_host, inference_http_port,
+                                        readiness_check=ClientConstants.READINESS_PROBE_DEFAULT,
+                                        request_input_example=None, container_id=None):
+    # Construct the model metadata (input and output)
+    model_metadata = {}
+    if request_input_example is not None and len(request_input_example) > 0:
+        model_metadata["inputs"] = request_input_example
+    else:
+        model_metadata["inputs"] = {"text": "What is a good cure for hiccups?"}
+    model_metadata["outputs"] = []
+    model_metadata["type"] = "default"
 
-    if readiness_check_type == "default":
+    if readiness_check == ClientConstants.READINESS_PROBE_DEFAULT:
         default_client_container_ready_url = "http://{}:{}/ready".format("0.0.0.0", inference_http_port)
         response = None
         try:
@@ -517,26 +520,36 @@ def is_client_inference_container_ready(infer_url_host, inference_http_port, rea
         if not response or response.status_code != 200:
             return "", "", {}, {}
 
-        # Construct the model metadata (input and output)
-        model_metadata = {}
-        if request_input_example is not None and len(request_input_example) > 0:
-            model_metadata["inputs"] = request_input_example
-        else:
-            model_metadata["inputs"] = {"text": "What is a good cure for hiccups?"}
-        model_metadata["outputs"] = []
-        model_metadata["type"] = "default"
-
         return "http://{}:{}/predict".format(infer_url_host, inference_http_port), None, model_metadata, None
     else:
-        # TODO(Raphael): Support arbitrary readiness check command
-        logging.error(f"Unknown readiness check type: {readiness_check_type}")
-        return "", "", {}, {}
+        if not isinstance(readiness_check, dict):
+            logging.error(f"Unknown readiness check type: {readiness_check}")
+            return "", "", {}, {}
+
+        if "path" in readiness_check:
+            readiness_check_url = f"http://{infer_url_host}:{inference_http_port}/{readiness_check['path']}"
+            response = None
+            try:
+                response = requests.get(readiness_check_url)
+            except:
+                pass
+            if not response or response.status_code != 200:
+                return "", "", {}, {}
+
+            return "http://{}:{}/".format(infer_url_host, inference_http_port), None, model_metadata, None
+        elif "command" in readiness_check:
+            # TODO(raphael): Support arbitrary readiness check command by using
+            #  container id and docker exec
+            return "http://{}:{}/".format(infer_url_host, inference_http_port), None, model_metadata, None
+        else:
+            logging.error(f"Unknown readiness check type: {readiness_check}")
+            return "", "", {}, {}
 
 
 def check_container_readiness(inference_http_port, infer_host="127.0.0.1", request_input_example=None,
-                              readiness_check_type="default", readiness_check_cmd=None):
+                              readiness_check=ClientConstants.READINESS_PROBE_DEFAULT):
     response_from_client_container = is_client_inference_container_ready(
-        infer_host, inference_http_port, readiness_check_type, readiness_check_cmd,
+        infer_host, inference_http_port, readiness_check=readiness_check,
         request_input_example=request_input_example)
 
     return response_from_client_container
diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py b/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py
index ba13006245..84141851b0 100755
--- a/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py
@@ -342,56 +342,40 @@ async def send_inference_request(idle_device, end_point_id, inference_url, input
 
     try:
         if connectivity_type == ClientConstants.WORKER_CONNECTIVITY_TYPE_HTTP:
-            response_ok = await FedMLHttpInference.is_inference_ready(
+            response_ok, inference_response = await FedMLHttpInference.run_http_inference_with_curl_request(
                 inference_url,
+                input_list,
+                output_list,
+                inference_type=inference_type,
                 timeout=request_timeout_sec)
-            if response_ok:
-                response_ok, inference_response = await FedMLHttpInference.run_http_inference_with_curl_request(
-                    inference_url,
-                    input_list,
-                    output_list,
-                    inference_type=inference_type,
-                    timeout=request_timeout_sec)
-                logging.debug(f"Use http inference. return {response_ok}")
-                return inference_response
+            logging.debug(f"Use http inference. return {response_ok}")
+            return inference_response
         elif connectivity_type == ClientConstants.WORKER_CONNECTIVITY_TYPE_HTTP_PROXY:
-            logging.warning("Use http proxy inference.")
-            response_ok = await FedMLHttpProxyInference.is_inference_ready(
+            logging.debug("Use http proxy inference.")
+            response_ok, inference_response = await FedMLHttpProxyInference.run_http_proxy_inference_with_request(
+                end_point_id,
                 inference_url,
+                input_list,
+                output_list,
+                inference_type=inference_type,
                 timeout=request_timeout_sec)
-            if response_ok:
-                response_ok, inference_response = await FedMLHttpProxyInference.run_http_proxy_inference_with_request(
-                    end_point_id,
-                    inference_url,
-                    input_list,
-                    output_list,
-                    inference_type=inference_type,
-                    timeout=request_timeout_sec)
-                logging.info(f"Use http proxy inference. return {response_ok}")
-                return inference_response
+            logging.debug(f"Use http proxy inference. return {response_ok}")
+            return inference_response
         elif connectivity_type == ClientConstants.WORKER_CONNECTIVITY_TYPE_MQTT:
-            logging.warning("Use mqtt inference.")
+            logging.debug("Use mqtt inference.")
             agent_config = {"mqtt_config": Settings.mqtt_config}
             mqtt_inference = FedMLMqttInference(
                 agent_config=agent_config,
                 run_id=end_point_id)
-            response_ok = mqtt_inference.run_mqtt_health_check_with_request(
+            response_ok, inference_response = mqtt_inference.run_mqtt_inference_with_request(
                 idle_device,
                 end_point_id,
                 inference_url,
+                input_list,
+                output_list,
+                inference_type=inference_type,
                 timeout=request_timeout_sec)
-            inference_response = {"error": True, "message": "Failed to use http, http-proxy and mqtt for inference."}
-            if response_ok:
-                response_ok, inference_response = mqtt_inference.run_mqtt_inference_with_request(
-                    idle_device,
-                    end_point_id,
-                    inference_url,
-                    input_list,
-                    output_list,
-                    inference_type=inference_type,
-                    timeout=request_timeout_sec)
-
-            logging.info(f"Use mqtt inference. return {response_ok}.")
+            logging.debug(f"Use mqtt inference. return {response_ok}.")
             return inference_response
         else:
             return {"error": True, "message": "Failed to use http, http-proxy for inference, no response from replica."}
diff --git a/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py b/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py
index 8100707386..a892412d29 100755
--- a/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py
+++ b/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py
@@ -460,22 +460,6 @@ def construct_deployment_results(self, end_point_name, device_id, model_status,
                                       }
         return deployment_results_payload
 
-    def construct_deployment_status(self, end_point_name, device_id,
-                                    model_id, model_name, model_version,
-                                    model_inference_url, model_status,
-                                    inference_port=ClientConstants.MODEL_INFERENCE_DEFAULT_PORT,
-                                    replica_no=1,     # start from 1
-                                    ):
-        deployment_status_payload = {"end_point_id": self.run_id, "end_point_name": end_point_name,
-                                     "device_id": device_id,
-                                     "model_id": model_id, "model_name": model_name,
-                                     "model_version": model_version,
-                                     "model_url": model_inference_url, "model_status": model_status,
-                                     "inference_port": inference_port,
-                                     "replica_no": replica_no,
-                                     }
-        return deployment_status_payload
-
     def send_deployment_results(self, end_point_name, device_id, model_status,
                                 model_id, model_name, model_inference_url,
                                 model_version, inference_port, inference_engine,
diff --git a/python/fedml/core/mlops/mlops_device_perfs.py b/python/fedml/core/mlops/mlops_device_perfs.py
index 29183a6e78..4bb41df73f 100644
--- a/python/fedml/core/mlops/mlops_device_perfs.py
+++ b/python/fedml/core/mlops/mlops_device_perfs.py
@@ -42,7 +42,7 @@ def __init__(self):
         self.monitor_replica_num_process = None
         self.monitor_replica_perf_process = None
         self.job_total_monitor_process = None
-        self.enable_job_total_monitor = False
+        self.enable_job_total_monitor = False   # TODO(Raphael): Enable the healthiness check by this job total monitor
         self.args = None
         self.device_id = None
         self.run_id = None

From 9ca6ecc1d23166223e7788adfbe688379a18f193 Mon Sep 17 00:00:00 2001
From: Raphael Jin <kimheavy@yahoo.com>
Date: Mon, 17 Jun 2024 18:25:59 -0700
Subject: [PATCH 177/282] [Deploy] Add LoraX and Triton examples; Add url match
 pattern.

---
 .../deploy/custom_inference_image/README.md   | 48 -------------------
 .../{ => lorax}/custom_inference_image.yaml   |  0
 .../template/custom_inference_image.yaml      | 16 +++++++
 .../model_repository/dummy/1/model.py         | 25 ++++++++++
 .../scheduler/comm_utils/network_util.py      | 11 +++++
 .../device_model_deployment.py                | 19 ++++----
 .../model_scheduler/device_model_inference.py | 28 +++++++++--
 7 files changed, 87 insertions(+), 60 deletions(-)
 delete mode 100644 python/examples/deploy/custom_inference_image/README.md
 rename python/examples/deploy/custom_inference_image/{ => lorax}/custom_inference_image.yaml (100%)
 create mode 100644 python/examples/deploy/custom_inference_image/triton_inference_server/template/custom_inference_image.yaml
 create mode 100644 python/examples/deploy/custom_inference_image/triton_inference_server/template/model_repository/dummy/1/model.py

diff --git a/python/examples/deploy/custom_inference_image/README.md b/python/examples/deploy/custom_inference_image/README.md
deleted file mode 100644
index 1269e4c064..0000000000
--- a/python/examples/deploy/custom_inference_image/README.md
+++ /dev/null
@@ -1,48 +0,0 @@
-## Create a model card at local
-First, create a model card at local
-```bash
-fedml model create -n custom_inference_image -cf custom_inference_image.yaml
-```
-
-## Low Code UI Deploy
-Push the model to nexus ai platform
-```bash
-fedml model push -n custom_inference_image
-```
-Do the following docs to deploy the model on nexus ai platform
-https://docs-dev.fedml.ai/deploy/low_code_ui
-
-## CLI Deploy
-### Deploy to current machine 
-Docs: https://docs-dev.fedml.ai/deploy/deploy_local
-```bash
-fedml model deploy -n custom_inference_image --local
-```
-
-### Deploy to On-premise  
-Docs: https://docs-dev.fedml.ai/deploy/deploy_on_premise
-```bash
-fedml device bind $api_key
-```
-```bash
-fedml model deploy -n my_model -m $master_ids -w $worker_ids
-```
-
-### Deploy to GPU Cloud  
-Docs: https://docs-dev.fedml.ai/deploy/deploy_cloud
-
-Change the `custom_inference_image.yaml` file, adding following lines
-```yaml
-computing:
-  minimum_num_gpus: 1           # minimum # of GPUs to provision
-  maximum_cost_per_hour: $3000   # max cost per hour for your job per gpu card
-  #allow_cross_cloud_resources: true # true, false
-  #device_type: CPU              # options: GPU, CPU, hybrid
-  resource_type: A100-80G       # e.g., A100-80G,
-  # please check the resource type list by "fedml show-resource-type"
-  # or visiting URL: https://fedml.ai/accelerator_resource_type
-```
-
-```bash
-fedml model deploy -n custom_inference_image
-```
\ No newline at end of file
diff --git a/python/examples/deploy/custom_inference_image/custom_inference_image.yaml b/python/examples/deploy/custom_inference_image/lorax/custom_inference_image.yaml
similarity index 100%
rename from python/examples/deploy/custom_inference_image/custom_inference_image.yaml
rename to python/examples/deploy/custom_inference_image/lorax/custom_inference_image.yaml
diff --git a/python/examples/deploy/custom_inference_image/triton_inference_server/template/custom_inference_image.yaml b/python/examples/deploy/custom_inference_image/triton_inference_server/template/custom_inference_image.yaml
new file mode 100644
index 0000000000..02dca147ce
--- /dev/null
+++ b/python/examples/deploy/custom_inference_image/triton_inference_server/template/custom_inference_image.yaml
@@ -0,0 +1,16 @@
+workspace: "./"
+
+inference_image_name: "nvcr.io/nvidia/tritonserver:24.05-py3"
+
+# If you put the model repository in $workspace/model_repository, it will be mounted to /home/fedml/models_serving/model_repository
+container_run_command: "tritonserver --model-repository=/home/fedml/models_serving/model_repository"
+
+# If your image has the repository inside it, say in /my_models_dir/model_repository, you can do:
+#container_run_command: "tritonserver --model-repository=/my_models_dir/model_repository"
+
+readiness_probe:
+  path: "v2/health/ready"
+
+port: 8000
+
+deploy_timeout: 1600
diff --git a/python/examples/deploy/custom_inference_image/triton_inference_server/template/model_repository/dummy/1/model.py b/python/examples/deploy/custom_inference_image/triton_inference_server/template/model_repository/dummy/1/model.py
new file mode 100644
index 0000000000..0404a127ff
--- /dev/null
+++ b/python/examples/deploy/custom_inference_image/triton_inference_server/template/model_repository/dummy/1/model.py
@@ -0,0 +1,25 @@
+import json
+import numpy as np
+import triton_python_backend_utils as pb_utils
+
+class TritonPythonModel:
+    def initialize(self, args):
+        self.model_name = args['model_name']
+
+    @staticmethod
+    def auto_complete_config(auto_complete_model_config):
+        auto_complete_model_config.add_input( {"name": "text_input",  "data_type": "TYPE_STRING", "dims": [-1]})
+        auto_complete_model_config.add_output({"name": "text_output", "data_type": "TYPE_STRING", "dims": [-1]})
+        auto_complete_model_config.set_max_batch_size(0)
+        return auto_complete_model_config
+
+    def execute(self, requests):
+        responses = []
+        for request in requests:
+            in_numpy = pb_utils.get_input_tensor_by_name(request, "text_input").as_numpy()
+            assert np.object_ == in_numpy.dtype, 'in this demo, triton passes in a numpy array of size 1 with object_ dtype, this dtype encapsulates a python bytes-array'
+            print('in this demo len(in_numpy) is 1:', len(in_numpy.tolist()))
+            out_numpy = np.array([ (self.model_name + ': ' + python_byte_array.decode('utf-8') + ' World').encode('utf-8') for python_byte_array in in_numpy.tolist()], dtype = np.object_)
+            out_pb = pb_utils.Tensor("text_output", out_numpy)
+            responses.append(pb_utils.InferenceResponse(output_tensors = [out_pb]))
+        return responses
diff --git a/python/fedml/computing/scheduler/comm_utils/network_util.py b/python/fedml/computing/scheduler/comm_utils/network_util.py
index 48e478f23f..b03b0428d0 100644
--- a/python/fedml/computing/scheduler/comm_utils/network_util.py
+++ b/python/fedml/computing/scheduler/comm_utils/network_util.py
@@ -1,4 +1,5 @@
 import os
+from urllib.parse import urlparse
 from fedml.computing.scheduler.model_scheduler.device_client_constants import ClientConstants
 
 
@@ -16,3 +17,13 @@ def return_this_device_connectivity_type() -> str:
         return env_conn_type
     else:
         return ClientConstants.WORKER_CONNECTIVITY_TYPE_DEFAULT
+
+
+def replace_url_with_path(url: str, path: str) -> str:
+    """
+    Replace the path of the URL with the given path.
+    """
+    if path is None:
+        return url
+    url_parsed = urlparse(url)
+    return f"{url_parsed.scheme}://{url_parsed.netloc}/{path}"
diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py b/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py
index 71f0c8032a..1aef8c09f1 100755
--- a/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py
@@ -118,6 +118,8 @@ def start_deployment(end_point_id, end_point_name, model_id, model_version,
 
         # Storage related
         src_code_dir = os.path.join(model_storage_local_path, config.get('source_code_dir', ""))
+        # TODO(Raphael): In the future, the data_cache_dir should not be controlled by the user. It only
+        #  used for internal avoiding checkpoint re-download. e.g. ~/.cache/huggingface/hub/
         data_cache_dir_input = config.get('data_cache_dir', "")
 
         # Others
@@ -225,15 +227,14 @@ def start_deployment(end_point_id, end_point_name, model_id, model_version,
     else:
         logging.warning("data_cache_dir_input is not a string or a dictionary, skip mounting it to the container")
 
-    # FedML format main entry filename, e.g., main.py
-    if relative_entry_fedml_format != "":
-        logging.info("Start copying the source code to the container...")
-        volumes.append(src_code_dir)
-        binds[src_code_dir] = {
-            "bind": dst_model_serving_dir,
-            "mode": "rw"
-        }
-        environment["MAIN_ENTRY"] = relative_entry_fedml_format
+    # Inject the source code
+    logging.info("Start copying the source code to the container...")
+    volumes.append(src_code_dir)
+    binds[src_code_dir] = {
+        "bind": dst_model_serving_dir,
+        "mode": "rw"
+    }
+    environment["MAIN_ENTRY"] = relative_entry_fedml_format
 
     # Host config
     host_config_dict = {
diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py b/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py
index 84141851b0..f6fa99d6d4 100755
--- a/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py
@@ -21,6 +21,7 @@
 from fedml.computing.scheduler.model_scheduler.device_model_cache import FedMLModelCache
 from fedml.computing.scheduler.model_scheduler.device_mqtt_inference_protocol import FedMLMqttInference
 from fedml.computing.scheduler.model_scheduler.device_http_proxy_inference_protocol import FedMLHttpProxyInference
+from fedml.computing.scheduler.comm_utils.network_util import replace_url_with_path
 from fedml.core.mlops.mlops_configs import MLOpsConfigs
 from fedml.core.mlops import MLOpsRuntimeLog, MLOpsRuntimeLogDaemon
 
@@ -168,10 +169,27 @@ async def predict_with_end_point_id(end_point_id, request: Request, response: Re
     return inference_response
 
 
+@api.post('/custom_inference/{end_point_id}/{path:path}')
+async def custom_inference(end_point_id, path: str, request: Request):
+    # Get json data
+    input_json = await request.json()
+
+    # Get header
+    header = request.headers
+
+    try:
+        inference_response = await _predict(end_point_id, input_json, header, path)
+    except Exception as e:
+        inference_response = {"error": True, "message": f"{traceback.format_exc()}"}
+
+    return inference_response
+
+
 async def _predict(
         end_point_id,
         input_json,
-        header=None
+        header=None,
+        path=None,
 ) -> Union[MutableMapping[str, Any], Response, StreamingResponse]:
     # Always increase the pending requests counter on a new incoming request.
     FEDML_MODEL_CACHE.update_pending_requests_counter(end_point_id, increase=True)
@@ -245,7 +263,8 @@ async def _predict(
                     input_list,
                     output_list,
                     inference_type=in_return_type,
-                    connectivity_type=connectivity_type)
+                    connectivity_type=connectivity_type,
+                    path=path)
 
             # Calculate model metrics
             try:
@@ -336,10 +355,13 @@ def found_idle_inference_device(end_point_id, end_point_name, in_model_name, in_
 
 async def send_inference_request(idle_device, end_point_id, inference_url, input_list, output_list,
                                  inference_type="default",
-                                 connectivity_type=ClientConstants.WORKER_CONNECTIVITY_TYPE_DEFAULT):
+                                 connectivity_type=ClientConstants.WORKER_CONNECTIVITY_TYPE_DEFAULT,
+                                 path=None):
     request_timeout_sec = FEDML_MODEL_CACHE.get_endpoint_settings(end_point_id) \
         .get("request_timeout_sec", ClientConstants.INFERENCE_REQUEST_TIMEOUT)
 
+    inference_url = replace_url_with_path(inference_url, path)
+
     try:
         if connectivity_type == ClientConstants.WORKER_CONNECTIVITY_TYPE_HTTP:
             response_ok, inference_response = await FedMLHttpInference.run_http_inference_with_curl_request(

From 786718bc6b61508b239a4106738724c458ed8c38 Mon Sep 17 00:00:00 2001
From: Raphael Jin <kimheavy@yahoo.com>
Date: Thu, 20 Jun 2024 10:37:19 -0700
Subject: [PATCH 178/282] [Deploy] Support serverless container.

---
 .../lorax/custom_inference_image.yaml         |   6 +-
 .../tensorrt_llm/tensorrtllm.yaml             |  17 ++
 .../template/custom_inference_image.yaml      |  16 +-
 .../device_client_constants.py                |  25 +--
 .../device_http_inference_protocol.py         |  42 ++--
 .../device_http_proxy_inference_protocol.py   |   1 +
 .../device_model_deployment.py                | 191 ++++++++++++------
 .../model_scheduler/device_model_inference.py |  18 +-
 .../device_server_constants.py                |   4 +
 .../model_scheduler/master_job_runner.py      |  38 ++--
 10 files changed, 233 insertions(+), 125 deletions(-)
 create mode 100644 python/examples/deploy/custom_inference_image/tensorrt_llm/tensorrtllm.yaml

diff --git a/python/examples/deploy/custom_inference_image/lorax/custom_inference_image.yaml b/python/examples/deploy/custom_inference_image/lorax/custom_inference_image.yaml
index 467c7c48b0..41cbe501d2 100644
--- a/python/examples/deploy/custom_inference_image/lorax/custom_inference_image.yaml
+++ b/python/examples/deploy/custom_inference_image/lorax/custom_inference_image.yaml
@@ -1,5 +1,6 @@
 workspace: "./"
 
+enable_serverless_container: true
 inference_image_name: "ghcr.io/predibase/lorax:main"
 container_run_command: "--model-id mistralai/Mistral-7B-Instruct-v0.1"
 
@@ -7,8 +8,9 @@ environment_variables:
   HUGGING_FACE_HUB_TOKEN: ""
 
 readiness_probe:
-  path: "health"
+  httpGet:
+    path: "/health"
 
 port: 80
 
-deploy_timeout: 1600
+deploy_timeout_sec: 1600
diff --git a/python/examples/deploy/custom_inference_image/tensorrt_llm/tensorrtllm.yaml b/python/examples/deploy/custom_inference_image/tensorrt_llm/tensorrtllm.yaml
new file mode 100644
index 0000000000..d41dba7983
--- /dev/null
+++ b/python/examples/deploy/custom_inference_image/tensorrt_llm/tensorrtllm.yaml
@@ -0,0 +1,17 @@
+workspace: "./"
+
+enable_serverless_container: true
+inference_image_name: "fedml/llama3-8b-tensorrtllm"
+
+# If you put the model repository in $workspace/model_repository, it will be mounted to /home/fedml/models_serving/model_repository
+container_run_command: ["sh", "-c", "cd / && huggingface-cli login --token $your_hf_token && pip install sentencepiece protobuf && python3 tensorrtllm_backend/scripts/launch_triton_server.py --model_repo tensorrtllm_backend/all_models/inflight_batcher_llm --world_size 1 && tail -f /dev/null"]
+
+readiness_probe:
+  httpGet:
+    path: "/v2/health/ready"
+
+port: 8000
+
+deploy_timeout_sec: 1600
+
+
diff --git a/python/examples/deploy/custom_inference_image/triton_inference_server/template/custom_inference_image.yaml b/python/examples/deploy/custom_inference_image/triton_inference_server/template/custom_inference_image.yaml
index 02dca147ce..eb02e3904a 100644
--- a/python/examples/deploy/custom_inference_image/triton_inference_server/template/custom_inference_image.yaml
+++ b/python/examples/deploy/custom_inference_image/triton_inference_server/template/custom_inference_image.yaml
@@ -1,16 +1,20 @@
 workspace: "./"
 
+enable_serverless_container: true
 inference_image_name: "nvcr.io/nvidia/tritonserver:24.05-py3"
 
-# If you put the model repository in $workspace/model_repository, it will be mounted to /home/fedml/models_serving/model_repository
-container_run_command: "tritonserver --model-repository=/home/fedml/models_serving/model_repository"
+volumes:
+  - workspace_path: "./model_repository"
+    mount_path: "/repo_inside_container"
 
-# If your image has the repository inside it, say in /my_models_dir/model_repository, you can do:
-#container_run_command: "tritonserver --model-repository=/my_models_dir/model_repository"
+container_run_command: "tritonserver --model-repository=/repo_inside_container"
 
 readiness_probe:
-  path: "v2/health/ready"
+  httpGet:
+    path: "/v2/health/ready"
 
 port: 8000
 
-deploy_timeout: 1600
+deploy_timeout_sec: 1600
+
+request_input_example: {"text_input": "Hello"}
diff --git a/python/fedml/computing/scheduler/model_scheduler/device_client_constants.py b/python/fedml/computing/scheduler/model_scheduler/device_client_constants.py
index cd21de2e04..e18c9f730b 100644
--- a/python/fedml/computing/scheduler/model_scheduler/device_client_constants.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_client_constants.py
@@ -146,7 +146,6 @@ class ClientConstants(object):
     READINESS_PROBE_DEFAULT = "DEFAULT"
     LIVENESS_PROBE_DEFAULT = "DEFAULT"
 
-
     LOGIN_MODE_ON_PREMISE_INDEX = 0
     LOGIN_MODE_FEDML_CLOUD_INDEX = 1
     LOGIN_MODE_PUBLIC_CLOUD_INDEX = 2
@@ -155,20 +154,16 @@ class ClientConstants(object):
     MODEL_DATA_TYPE_INT = "int"
     MODEL_DATA_TYPE_FLOAT = "float"
     MODEL_DATA_TYPE_STR = "str"
-    MODEL_DATA_TYPE_MAPPING = {"TYPE_BOOL": MODEL_DATA_TYPE_INT, "TYPE_UINT8": MODEL_DATA_TYPE_INT,
-                               "TYPE_UINT16": MODEL_DATA_TYPE_INT, "TYPE_UINT32": MODEL_DATA_TYPE_INT,
-                               "TYPE_UINT64": MODEL_DATA_TYPE_INT, "TYPE_INT8": MODEL_DATA_TYPE_INT,
-                               "TYPE_INT16": MODEL_DATA_TYPE_INT, "TYPE_INT32": MODEL_DATA_TYPE_INT,
-                               "TYPE_INT64": MODEL_DATA_TYPE_INT, "TYPE_FP16": MODEL_DATA_TYPE_FLOAT,
-                               "TYPE_FP32": MODEL_DATA_TYPE_FLOAT, "TYPE_FP64": MODEL_DATA_TYPE_FLOAT,
-                               "TYPE_STRING": MODEL_DATA_TYPE_STR, "TYPE_BF16": MODEL_DATA_TYPE_INT,
-                               "BOOL": MODEL_DATA_TYPE_INT, "UINT8": MODEL_DATA_TYPE_INT,
-                               "UINT16": MODEL_DATA_TYPE_INT, "UINT32": MODEL_DATA_TYPE_INT,
-                               "UINT64": MODEL_DATA_TYPE_INT, "INT8": MODEL_DATA_TYPE_INT,
-                               "INT16": MODEL_DATA_TYPE_INT, "INT32": MODEL_DATA_TYPE_INT,
-                               "INT64": MODEL_DATA_TYPE_INT, "FP16": MODEL_DATA_TYPE_FLOAT,
-                               "FP32": MODEL_DATA_TYPE_FLOAT, "FP64": MODEL_DATA_TYPE_FLOAT,
-                               "STRING": MODEL_DATA_TYPE_STR, "BF16": MODEL_DATA_TYPE_INT}
+
+    # Model config yaml related
+    DEPLOY_TIMEOUT_SEC_KEY = "deploy_timeout_sec"
+    DEPLOY_TIMEOUT_SEC_DEFAULT = 600
+
+    ENABLE_SERVERLESS_CONTAINER_KEY = "enable_serverless_container"
+
+    CUSTOMIZED_VOLUMES_MOUNT_KEY = "volumes"
+    CUSTOMIZED_VOLUMES_PATH_FROM_WORKSPACE_KEY = "workspace_path"
+    CUSTOMIZED_VOLUMES_PATH_FROM_CONTAINER_KEY = "mount_path"
 
     @staticmethod
     def get_fedml_home_dir():
diff --git a/python/fedml/computing/scheduler/model_scheduler/device_http_inference_protocol.py b/python/fedml/computing/scheduler/model_scheduler/device_http_inference_protocol.py
index 41c565d5d8..5b2658f0b3 100755
--- a/python/fedml/computing/scheduler/model_scheduler/device_http_inference_protocol.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_http_inference_protocol.py
@@ -1,3 +1,5 @@
+import logging
+
 import httpx
 import traceback
 
@@ -46,9 +48,8 @@ async def is_inference_ready(inference_url, path="ready", timeout=None):
     @staticmethod
     async def run_http_inference_with_curl_request(
             inference_url, inference_input_list, inference_output_list,
-            inference_type="default", engine_type="default", timeout=None
+            inference_type="default", engine_type="default", timeout=None, method="POST"
     ):
-        model_inference_result = {}
         if inference_type == "default":
             model_api_headers = {'Content-Type': 'application/json', 'Connection': 'close',
                                  'Accept': 'application/json'}
@@ -63,11 +64,10 @@ async def run_http_inference_with_curl_request(
                 "outputs": inference_output_list
             }
 
-        response_ok = False
         try:
             if model_inference_json.get("stream", False):
                 model_inference_result = StreamingResponse(
-                    stream_generator(inference_url, input_json=model_inference_json),
+                    stream_generator(inference_url, input_json=model_inference_json, method=method),
                     media_type="text/event-stream",
                     headers={
                         "Content-Type": model_api_headers.get("Accept", "text/event-stream"),
@@ -76,8 +76,8 @@ async def run_http_inference_with_curl_request(
                 )
                 response_ok = True
             else:
-                response_ok, model_inference_result = await redirect_request_to_worker(
-                    inference_type, inference_url, model_api_headers, model_inference_json, timeout)
+                response_ok, model_inference_result = await redirect_non_stream_req_to_worker(
+                    inference_type, inference_url, model_api_headers, model_inference_json, timeout, method=method)
         except Exception as e:
             response_ok = False
             model_inference_result = {"response": f"{traceback.format_exc()}"}
@@ -85,21 +85,22 @@ async def run_http_inference_with_curl_request(
         return response_ok, model_inference_result
 
 
-async def stream_generator(inference_url, input_json):
+async def stream_generator(inference_url, input_json, method="POST"):
     async with httpx.AsyncClient() as client:
-        async with client.stream("POST", inference_url, json=input_json,
+        async with client.stream(method, inference_url, json=input_json,
                                  timeout=ClientConstants.WORKER_STREAM_API_TIMEOUT) as response:
             async for chunk in response.aiter_lines():
                 # we consumed a newline, need to put it back
                 yield f"{chunk}\n"
 
 
-async def redirect_request_to_worker(inference_type, inference_url, model_api_headers, model_inference_json, timeout=None):
+async def redirect_non_stream_req_to_worker(inference_type, inference_url, model_api_headers, model_inference_json,
+                                            timeout=None, method="POST"):
     response_ok = True
     try:
         async with httpx.AsyncClient() as client:
-            response = await client.post(
-                url=inference_url, headers=model_api_headers, json=model_inference_json, timeout=timeout
+            response = await client.request(
+                method=method, url=inference_url, headers=model_api_headers, json=model_inference_json, timeout=timeout
             )
     except Exception as e:
         response_ok = False
@@ -107,13 +108,18 @@ async def redirect_request_to_worker(inference_type, inference_url, model_api_he
         return response_ok, model_inference_result
     
     if response.status_code == 200:
-        if inference_type == "default":
-            model_inference_result = response.json()
-        elif inference_type == "image/png":
-            binary_content: bytes = response.content
-            model_inference_result = Response(content=binary_content, media_type="image/png")
-        else:
-            model_inference_result = response.json()
+        try:
+            if inference_type == "default":
+                model_inference_result = response.json()
+            elif inference_type == "image/png":
+                binary_content: bytes = response.content
+                model_inference_result = Response(content=binary_content, media_type="image/png")
+            else:
+                model_inference_result = response.json()
+        except Exception as e:
+            response_ok = True
+            logging.warning(f"Status code 200, but cannot trans response to json due to: {e}.")
+            model_inference_result = {"response": f"{response.content}"}
     else:
         model_inference_result = {"response": f"{response.content}"}
 
diff --git a/python/fedml/computing/scheduler/model_scheduler/device_http_proxy_inference_protocol.py b/python/fedml/computing/scheduler/model_scheduler/device_http_proxy_inference_protocol.py
index 53f5a002eb..746d17bb7c 100755
--- a/python/fedml/computing/scheduler/model_scheduler/device_http_proxy_inference_protocol.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_http_proxy_inference_protocol.py
@@ -50,6 +50,7 @@ async def run_http_proxy_inference_with_request(
             endpoint_id, inference_url, inference_input_list,
             inference_output_list, inference_type="default",
             timeout=None
+            # TODO(Raphael): Add support for GET and other methods
     ):
         inference_response = {}
         http_proxy_url = f"http://{urlparse(inference_url).hostname}:{ClientConstants.LOCAL_CLIENT_API_PORT}/api/v1/predict"
diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py b/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py
index 1aef8c09f1..e18081c324 100755
--- a/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py
@@ -112,19 +112,20 @@ def start_deployment(end_point_id, end_point_name, model_id, model_version,
         relative_entry_fedml_format = config.get('entry_point', "")
 
         # User indicate either fedml format python main entry filename or entry command
-        customized_image_entry_cmd = config.get('container_run_command', None)
+        enable_serverless_container = config.get(ClientConstants.ENABLE_SERVERLESS_CONTAINER_KEY, False)
+        customized_image_entry_cmd = config.get('container_run_command', None)  # Could be str or list
         customized_readiness_check = config.get('readiness_probe', ClientConstants.READINESS_PROBE_DEFAULT)
         customized_liveliness_check = config.get('liveness_probe', ClientConstants.LIVENESS_PROBE_DEFAULT)
 
         # Storage related
         src_code_dir = os.path.join(model_storage_local_path, config.get('source_code_dir', ""))
-        # TODO(Raphael): In the future, the data_cache_dir should not be controlled by the user. It only
-        #  used for internal avoiding checkpoint re-download. e.g. ~/.cache/huggingface/hub/
         data_cache_dir_input = config.get('data_cache_dir', "")
+        usr_customized_mount_rule = config.get(ClientConstants.CUSTOMIZED_VOLUMES_MOUNT_KEY, None)
 
         # Others
         extra_envs = config.get('environment_variables', None)
-        usr_indicated_wait_time = config.get('deploy_timeout', 900)
+        usr_indicated_wait_time = config.get(ClientConstants.DEPLOY_TIMEOUT_SEC_KEY,
+                                             config.get("deploy_timeout", ClientConstants.DEPLOY_TIMEOUT_SEC_DEFAULT))
         usr_indicated_retry_cnt = max(int(usr_indicated_wait_time) // 10, 1)
         request_input_example = config.get('request_input_example', None)
 
@@ -189,52 +190,12 @@ def start_deployment(end_point_id, end_point_name, model_id, model_version,
     binds = {}
     environment = {}
 
-    if isinstance(data_cache_dir_input, str):
-        # In this case, we mount to the same folder, if it has ~, we replace it with /home/fedml
-        if data_cache_dir_input != "":
-            if data_cache_dir_input[0] == "~":
-                src_data_cache_dir = os.path.expanduser(data_cache_dir_input)
-                dst_data_cache_dir = data_cache_dir_input.replace("~", "/home/fedml")
-            else:
-                # check if the data_cache_dir is a relative path
-                if data_cache_dir_input[0] != "/":
-                    raise "data_cache_dir_input has to be an absolute path or start with ~"
-                else:
-                    src_data_cache_dir = data_cache_dir_input
-                    dst_data_cache_dir = data_cache_dir_input
-            logging.info(f"src_data_cache_dir: {src_data_cache_dir}, dst_data_cache_dir: {dst_data_cache_dir}")
+    # Handle the union volume mount
+    _handle_union_volume_mount(binds, volumes, environment, data_cache_dir_input)
 
-            if type(src_data_cache_dir) == str and src_data_cache_dir != "":
-                logging.info("Start copying the data cache to the container...")
-                if os.path.exists(src_data_cache_dir):
-                    volumes.append(src_data_cache_dir)
-                    binds[src_data_cache_dir] = {
-                        "bind": dst_data_cache_dir,
-                        "mode": "rw"
-                    }
-                    environment["DATA_CACHE_FOLDER"] = dst_data_cache_dir
-    elif isinstance(data_cache_dir_input, dict):
-        for k, v in data_cache_dir_input.items():
-            if os.path.exists(k):
-                volumes.append(v)
-                binds[k] = {
-                    "bind": v,
-                    "mode": "rw"
-                }
-            else:
-                logging.warning(f"{k} does not exist, skip mounting it to the container")
-        logging.info(f"Data cache mount: {volumes}, {binds}")
-    else:
-        logging.warning("data_cache_dir_input is not a string or a dictionary, skip mounting it to the container")
-
-    # Inject the source code
-    logging.info("Start copying the source code to the container...")
-    volumes.append(src_code_dir)
-    binds[src_code_dir] = {
-        "bind": dst_model_serving_dir,
-        "mode": "rw"
-    }
-    environment["MAIN_ENTRY"] = relative_entry_fedml_format
+    # Handle the default volume mount
+    handle_volume_mount(volumes, binds, environment, relative_entry_fedml_format, src_code_dir,
+                        dst_model_serving_dir, usr_customized_mount_rule, host_workspace_root=model_storage_local_path)
 
     # Host config
     host_config_dict = {
@@ -331,6 +292,7 @@ def start_deployment(end_point_id, end_point_name, model_id, model_version,
     model_metadata = ret_model_metadata
     model_metadata["liveliness_check"] = customized_liveliness_check
     model_metadata["readiness_check"] = customized_readiness_check
+    model_metadata[ClientConstants.ENABLE_SERVERLESS_CONTAINER_KEY] = enable_serverless_container
     logging.info(f"[Worker][Replica{replica_rank}] Model deployment is successful with inference_output_url: "
                  f"{inference_output_url}, model_metadata: {model_metadata}, model_config: {ret_model_config}")
 
@@ -527,24 +489,129 @@ def is_client_inference_container_ready(infer_url_host, inference_http_port,
             logging.error(f"Unknown readiness check type: {readiness_check}")
             return "", "", {}, {}
 
-        if "path" in readiness_check:
-            readiness_check_url = f"http://{infer_url_host}:{inference_http_port}/{readiness_check['path']}"
-            response = None
-            try:
-                response = requests.get(readiness_check_url)
-            except:
-                pass
-            if not response or response.status_code != 200:
-                return "", "", {}, {}
+        if "httpGet" in readiness_check:
+            if "path" in readiness_check["httpGet"]:
+                check_path = readiness_check["httpGet"]["path"]
+                if not isinstance(check_path, str):
+                    logging.error(f"Invalid path type: {check_path}, expected str")
+                    return "", "", {}, {}
+                else:
+                    if not check_path.startswith("/"):
+                        check_path = "/" + check_path
+                readiness_check_url = f"http://{infer_url_host}:{inference_http_port}{check_path}"
 
-            return "http://{}:{}/".format(infer_url_host, inference_http_port), None, model_metadata, None
-        elif "command" in readiness_check:
+                response = None
+                try:
+                    response = requests.get(readiness_check_url)
+                except:
+                    pass
+                if not response or response.status_code != 200:
+                    return "", "", {}, {}
+
+                return readiness_check_url, None, model_metadata, None
+            else:
+                logging.error("'path' is not specified in httpGet readiness check")
+                return "", "", {}, {}
+        elif "exec" in readiness_check:
             # TODO(raphael): Support arbitrary readiness check command by using
             #  container id and docker exec
             return "http://{}:{}/".format(infer_url_host, inference_http_port), None, model_metadata, None
         else:
-            logging.error(f"Unknown readiness check type: {readiness_check}")
-            return "", "", {}, {}
+            # Ref K8S, if no readiness check, we assume the container is ready immediately
+            return "http://{}:{}/".format(infer_url_host, inference_http_port), None, model_metadata, None
+
+
+def _handle_union_volume_mount(binds, volumes, environment, data_cache_dir_input=None):
+    """
+    Private: data_cache_dir is the union folder on host machine, which will be shard across different containers,
+    the control of this folder should be handled by the platform.
+    """
+    if isinstance(data_cache_dir_input, str):
+        # In this case, we mount to the same folder, if it has ~, we replace it with /home/fedml
+        if data_cache_dir_input != "":
+            if data_cache_dir_input[0] == "~":
+                src_data_cache_dir = os.path.expanduser(data_cache_dir_input)
+                dst_data_cache_dir = data_cache_dir_input.replace("~", "/home/fedml")
+            else:
+                # check if the data_cache_dir is a relative path
+                if data_cache_dir_input[0] != "/":
+                    raise "data_cache_dir_input has to be an absolute path or start with ~"
+                else:
+                    src_data_cache_dir = data_cache_dir_input
+                    dst_data_cache_dir = data_cache_dir_input
+            logging.info(f"src_data_cache_dir: {src_data_cache_dir}, dst_data_cache_dir: {dst_data_cache_dir}")
+
+            if isinstance(src_data_cache_dir, str) and src_data_cache_dir != "":
+                logging.info("Start copying the data cache to the container...")
+                if os.path.exists(src_data_cache_dir):
+                    volumes.append(src_data_cache_dir)
+                    binds[src_data_cache_dir] = {
+                        "bind": dst_data_cache_dir,
+                        "mode": "rw"
+                    }
+                    environment["DATA_CACHE_FOLDER"] = dst_data_cache_dir
+    elif isinstance(data_cache_dir_input, dict):
+        for k, v in data_cache_dir_input.items():
+            if os.path.exists(k):
+                volumes.append(v)
+                binds[k] = {
+                    "bind": v,
+                    "mode": "rw"
+                }
+            else:
+                logging.warning(f"{k} does not exist, skip mounting it to the container")
+        logging.info(f"Data cache mount: {volumes}, {binds}")
+    else:
+        logging.info("data_cache_dir_input is not a string or a dictionary, skip mounting it to the container")
+
+
+def handle_volume_mount(volumes, binds, environment, relative_entry_fedml_format="", src_code_dir="",
+                        dst_model_serving_dir="", customized_volumes_mount_rule=None, host_workspace_root=""):
+    # If fedml format entry point is specified, inject the source code, e.g., main.py (FedMLPredictor inside)
+    if relative_entry_fedml_format != "":
+        logging.info("Using FedML format entry point, mounting the source code...")
+        volumes.append(src_code_dir)
+        binds[src_code_dir] = {
+            "bind": dst_model_serving_dir,
+            "mode": "rw"
+        }
+        environment["MAIN_ENTRY"] = relative_entry_fedml_format
+        return  # The reason we return here is that we don't need to mount the source code again
+
+    # If customized volume mount rule is specified, just follow the mount rule
+    """
+    e.g.,
+    volumes:
+      - workspace_path: "./model_repository"
+        mount_path: "/repo_inside_container"
+    """
+    mount_list = []
+    if not isinstance(customized_volumes_mount_rule, list):
+        if not isinstance(customized_volumes_mount_rule, dict):
+            logging.warning("customized_volumes_mount_rule is not a list or a dictionary, "
+                            "skip mounting it to the container")
+            return
+
+        # transform the dict to list
+        for k, v in customized_volumes_mount_rule.items():
+            mount_list.append({ClientConstants.CUSTOMIZED_VOLUMES_PATH_FROM_WORKSPACE_KEY: k,
+                               ClientConstants.CUSTOMIZED_VOLUMES_PATH_FROM_CONTAINER_KEY: v})
+    else:
+        mount_list = customized_volumes_mount_rule if customized_volumes_mount_rule is not None else []
+
+    for mount in mount_list:
+        workspace_relative_path = mount[ClientConstants.CUSTOMIZED_VOLUMES_PATH_FROM_WORKSPACE_KEY]
+        mount_path = mount[ClientConstants.CUSTOMIZED_VOLUMES_PATH_FROM_CONTAINER_KEY]
+
+        workspace_path = os.path.join(host_workspace_root, workspace_relative_path)
+        if os.path.exists(workspace_path):
+            volumes.append(workspace_path)
+            binds[workspace_path] = {
+                "bind": mount_path,
+                "mode": "rw"
+            }
+        else:
+            logging.warning(f"{workspace_path} does not exist, skip mounting it to the container")
 
 
 def check_container_readiness(inference_http_port, infer_host="127.0.0.1", request_input_example=None,
diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py b/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py
index f6fa99d6d4..7ef9689c1c 100755
--- a/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py
@@ -8,7 +8,7 @@
 from typing import Any, Mapping, MutableMapping, Union
 from urllib.parse import urlparse
 
-from fastapi import FastAPI, Request, Response, status
+from fastapi import FastAPI, Request, Response, status, APIRouter
 from fastapi.responses import StreamingResponse, JSONResponse
 
 import fedml
@@ -38,6 +38,7 @@ class Settings:
 
 
 api = FastAPI()
+router = APIRouter()
 
 FEDML_MODEL_CACHE = FedMLModelCache.get_instance()
 FEDML_MODEL_CACHE.set_redis_params(redis_addr=Settings.redis_addr,
@@ -169,7 +170,8 @@ async def predict_with_end_point_id(end_point_id, request: Request, response: Re
     return inference_response
 
 
-@api.post('/custom_inference/{end_point_id}/{path:path}')
+# @api.post('/custom_inference/{end_point_id}/{path:path}')
+@router.api_route("/custom_inference/{end_point_id}/{path:path}", methods=["POST", "GET"])
 async def custom_inference(end_point_id, path: str, request: Request):
     # Get json data
     input_json = await request.json()
@@ -178,18 +180,21 @@ async def custom_inference(end_point_id, path: str, request: Request):
     header = request.headers
 
     try:
-        inference_response = await _predict(end_point_id, input_json, header, path)
+        inference_response = await _predict(end_point_id, input_json, header, path, request.method)
     except Exception as e:
         inference_response = {"error": True, "message": f"{traceback.format_exc()}"}
 
     return inference_response
 
+api.include_router(router)
+
 
 async def _predict(
         end_point_id,
         input_json,
         header=None,
         path=None,
+        request_method="POST"
 ) -> Union[MutableMapping[str, Any], Response, StreamingResponse]:
     # Always increase the pending requests counter on a new incoming request.
     FEDML_MODEL_CACHE.update_pending_requests_counter(end_point_id, increase=True)
@@ -264,7 +269,7 @@ async def _predict(
                     output_list,
                     inference_type=in_return_type,
                     connectivity_type=connectivity_type,
-                    path=path)
+                    path=path, request_method=request_method)
 
             # Calculate model metrics
             try:
@@ -356,7 +361,7 @@ def found_idle_inference_device(end_point_id, end_point_name, in_model_name, in_
 async def send_inference_request(idle_device, end_point_id, inference_url, input_list, output_list,
                                  inference_type="default",
                                  connectivity_type=ClientConstants.WORKER_CONNECTIVITY_TYPE_DEFAULT,
-                                 path=None):
+                                 path=None, request_method="POST"):
     request_timeout_sec = FEDML_MODEL_CACHE.get_endpoint_settings(end_point_id) \
         .get("request_timeout_sec", ClientConstants.INFERENCE_REQUEST_TIMEOUT)
 
@@ -369,7 +374,8 @@ async def send_inference_request(idle_device, end_point_id, inference_url, input
                 input_list,
                 output_list,
                 inference_type=inference_type,
-                timeout=request_timeout_sec)
+                timeout=request_timeout_sec,
+                method=request_method)
             logging.debug(f"Use http inference. return {response_ok}")
             return inference_response
         elif connectivity_type == ClientConstants.WORKER_CONNECTIVITY_TYPE_HTTP_PROXY:
diff --git a/python/fedml/computing/scheduler/model_scheduler/device_server_constants.py b/python/fedml/computing/scheduler/model_scheduler/device_server_constants.py
index a5048c26a6..b58b8fae72 100644
--- a/python/fedml/computing/scheduler/model_scheduler/device_server_constants.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_server_constants.py
@@ -147,6 +147,10 @@ class ServerConstants(object):
     DEVICE_DIFF_ADD_OPERATION = "op: add"
     DEVICE_DIFF_DELETE_OPERATION = "op: delete"
     DEVICE_DIFF_REPLACE_OPERATION = "op: replace"
+
+    # Worker comfig yaml related
+    ENABLE_SERVERLESS_CONTAINER_KEY = "enable_serverless_container"
+
     @staticmethod
     def get_fedml_home_dir():
         home_dir = expanduser("~")
diff --git a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
index eff26684b7..f95dd8e176 100755
--- a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
+++ b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
@@ -298,14 +298,7 @@ def process_deployment_result_message(self, topic=None, payload=None):
                     return
                 else:
                     # This is the last worker that failed, so we should continue to "ABORTED" status
-                    model_config_parameters = self.request_json["parameters"]
-                    inference_port_external = ServerConstants.get_inference_master_gateway_port()
-                    ip = GeneralConstants.get_ip_address(self.request_json)
-                    if ip.startswith("http://") or ip.startswith("https://"):
-                        model_inference_url = "{}/inference/{}".format(ip, end_point_id)
-                    else:
-                        model_inference_url = "http://{}:{}/inference/{}".format(ip, inference_port_external,
-                                                                                 end_point_id)
+                    model_inference_url = self.construct_final_gateway_url(end_point_id)
 
                     self.send_deployment_status(
                         end_point_id, end_point_name, payload_json["model_name"], model_inference_url,
@@ -367,13 +360,7 @@ def process_deployment_result_message(self, topic=None, payload=None):
             """
             When all the devices have finished the add / delete / update operation
             """
-            inference_port_external = ServerConstants.get_inference_master_gateway_port()
-            ip = GeneralConstants.get_ip_address(request_json)
-
-            if ip.startswith("http://") or ip.startswith("https://"):
-                model_inference_url = "{}/inference/{}".format(ip, end_point_id)
-            else:
-                model_inference_url = "http://{}:{}/inference/{}".format(ip, inference_port_external, end_point_id)
+            model_inference_url, inference_port_external = self.construct_final_gateway_url(end_point_id)
 
             # Send stage: MODEL_DEPLOYMENT_STAGE5 = "StartInferenceIngress"
             self.send_deployment_stages(end_point_id, model_name, model_id,
@@ -394,7 +381,7 @@ def process_deployment_result_message(self, topic=None, payload=None):
 
                 model_metadata = payload_json["model_metadata"]
                 model_inputs = model_metadata["inputs"]
-                ret_inputs = list()
+
                 if "type" in model_metadata and model_metadata["type"] == "default":
                     payload_json["input_json"] = {"end_point_name": end_point_name,
                                                   "model_name": model_name,
@@ -768,3 +755,22 @@ def build_dynamic_args(self, run_id, run_config, package_conf_object, base_dir):
     def build_dynamic_constrain_variables(self, run_id, run_config):
         pass
 
+    def construct_final_gateway_url(self, end_point_id):
+        inference_port_external = ServerConstants.get_inference_master_gateway_port()
+        ip = GeneralConstants.get_ip_address(self.request_json)
+
+        identifier = "inference"
+        if self.deployed_replica_payload is not None:
+            payload_json = self.deployed_replica_payload
+            enable_custom_path = payload_json["model_metadata"].get(
+                ServerConstants.ENABLE_SERVERLESS_CONTAINER_KEY, False)
+            if enable_custom_path:
+                identifier = "custom_inference"
+
+        if ip.startswith("http://") or ip.startswith("https://"):
+            model_inference_url = "{}/{}/{}".format(ip, identifier, end_point_id)
+        else:
+            model_inference_url = "http://{}:{}/{}/{}".format(ip, inference_port_external, identifier,
+                                                              end_point_id)
+        return model_inference_url, inference_port_external
+

From c0f691c7fd468549ee311c8ae260ba9c5599a43e Mon Sep 17 00:00:00 2001
From: Raphael Jin <kimheavy@yahoo.com>
Date: Thu, 20 Jun 2024 14:27:35 -0700
Subject: [PATCH 179/282] [Deploy] Nit.

---
 .../custom_inference_image/template.yaml      | 22 +++++++++++++++++++
 .../scheduler/comm_utils/job_monitor.py       |  2 +-
 .../device_http_inference_protocol.py         |  9 ++++----
 .../device_model_deployment.py                |  2 --
 .../model_scheduler/device_model_inference.py |  1 -
 .../device_server_constants.py                |  2 +-
 .../model_scheduler/master_job_runner.py      | 12 ++++------
 7 files changed, 33 insertions(+), 17 deletions(-)
 create mode 100644 python/examples/deploy/custom_inference_image/template.yaml

diff --git a/python/examples/deploy/custom_inference_image/template.yaml b/python/examples/deploy/custom_inference_image/template.yaml
new file mode 100644
index 0000000000..10e6580bcf
--- /dev/null
+++ b/python/examples/deploy/custom_inference_image/template.yaml
@@ -0,0 +1,22 @@
+# Required
+workspace: "./"                     # We will pacakge all the files in the workspace directory
+enable_serverless_container: true   # Identify whether to use serverless container
+inference_image_name: ""            # Container image name
+container_run_command: ""           # str or list, similar to CMD in the dockerfile
+port: 80                            # Service port, currently you can only indicate one arbitrary port
+
+# Optional, these are the default values
+readiness_probe:                    # Probe for checking whether a container is ready for inference
+  httpGet:
+    path: ""
+environment_variables: {}           # Environment variables inside the container
+volumes:                            # Volumes to mount to the container
+    - workspace_path: ""            # Path to the volume in the workspace
+      mount_path: ""                # Path to mount the volume inside the container
+deploy_timeout_sec: 900             # Maximum time waiting for deployment to finish (Does not include the time to pull the image)
+request_input_example: {}           # Example of input request, will be shown in the UI
+registry_specs:                     # Registry information for pulling the image
+  registry_name: ""
+  registry_provider: "DockerHub"
+  registry_user_name: ""
+  registry_user_password: ""
\ No newline at end of file
diff --git a/python/fedml/computing/scheduler/comm_utils/job_monitor.py b/python/fedml/computing/scheduler/comm_utils/job_monitor.py
index d216b46dad..667a54e565 100644
--- a/python/fedml/computing/scheduler/comm_utils/job_monitor.py
+++ b/python/fedml/computing/scheduler/comm_utils/job_monitor.py
@@ -772,7 +772,7 @@ def _lenient_check_replica_ready(deployment_result):
         liveliness_check = result_json.get("model_metadata", {}).get("liveliness_check", None)
         readiness_check = result_json.get("model_metadata", {}).get("readiness_check", None)
 
-        if liveliness_check is not None:
+        if liveliness_check:
             if liveliness_check == ClientConstants.LIVENESS_PROBE_DEFAULT:
                 liveliness_check = readiness_check  # Follow the readiness check pattern
             if not isinstance(liveliness_check, dict):
diff --git a/python/fedml/computing/scheduler/model_scheduler/device_http_inference_protocol.py b/python/fedml/computing/scheduler/model_scheduler/device_http_inference_protocol.py
index 5b2658f0b3..28d50d5a50 100755
--- a/python/fedml/computing/scheduler/model_scheduler/device_http_inference_protocol.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_http_inference_protocol.py
@@ -25,6 +25,8 @@ async def is_inference_ready(inference_url, path="ready", timeout=None):
         url_parsed = urlparse(inference_url)
         ready_url = f"http://{url_parsed.hostname}:{url_parsed.port}/{path}"
         response_ok = False
+
+        # TODO (Raphael): Support more methods and return codes rules.
         try:
             async with httpx.AsyncClient() as client:
                 ready_response = await client.get(url=ready_url, timeout=timeout)
@@ -109,11 +111,10 @@ async def redirect_non_stream_req_to_worker(inference_type, inference_url, model
     
     if response.status_code == 200:
         try:
-            if inference_type == "default":
-                model_inference_result = response.json()
-            elif inference_type == "image/png":
+            if inference_type == "image/png":
+                # wrapped media type for image
                 binary_content: bytes = response.content
-                model_inference_result = Response(content=binary_content, media_type="image/png")
+                model_inference_result = Response(content=binary_content, media_type=inference_type)
             else:
                 model_inference_result = response.json()
         except Exception as e:
diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py b/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py
index e18081c324..552d7ffaca 100755
--- a/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py
@@ -2,7 +2,6 @@
 
 import logging
 import os
-import shutil
 import time
 import traceback
 import yaml
@@ -12,7 +11,6 @@
 import requests
 import torch
 import torch.nn
-import tritonclient.http as http_client
 
 import collections.abc
 
diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py b/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py
index 7ef9689c1c..9adc17538d 100755
--- a/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py
@@ -170,7 +170,6 @@ async def predict_with_end_point_id(end_point_id, request: Request, response: Re
     return inference_response
 
 
-# @api.post('/custom_inference/{end_point_id}/{path:path}')
 @router.api_route("/custom_inference/{end_point_id}/{path:path}", methods=["POST", "GET"])
 async def custom_inference(end_point_id, path: str, request: Request):
     # Get json data
diff --git a/python/fedml/computing/scheduler/model_scheduler/device_server_constants.py b/python/fedml/computing/scheduler/model_scheduler/device_server_constants.py
index b58b8fae72..f86056229e 100644
--- a/python/fedml/computing/scheduler/model_scheduler/device_server_constants.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_server_constants.py
@@ -358,7 +358,7 @@ def get_inference_master_gateway_port():
         # Use dotenv to load the environment variables
         fedml.load_env()
         master_inference_port = int(os.getenv(ServerConstants.ENV_MASTER_INFERENCE_PORT_KEY,
-                                            default=ServerConstants.MODEL_INFERENCE_DEFAULT_PORT))
+                                              default=ServerConstants.MODEL_INFERENCE_DEFAULT_PORT))
         return master_inference_port
 
 
diff --git a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
index f95dd8e176..ab6bc4c895 100755
--- a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
+++ b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
@@ -360,7 +360,7 @@ def process_deployment_result_message(self, topic=None, payload=None):
             """
             When all the devices have finished the add / delete / update operation
             """
-            model_inference_url, inference_port_external = self.construct_final_gateway_url(end_point_id)
+            model_inference_url = self.construct_final_gateway_url(end_point_id)
 
             # Send stage: MODEL_DEPLOYMENT_STAGE5 = "StartInferenceIngress"
             self.send_deployment_stages(end_point_id, model_name, model_id,
@@ -375,7 +375,7 @@ def process_deployment_result_message(self, topic=None, payload=None):
                 payload_json = self.deployed_replica_payload
                 model_slave_url = payload_json["model_url"]
                 payload_json["model_url"] = model_inference_url
-                payload_json["port"] = inference_port_external
+                payload_json["port"] = ServerConstants.get_inference_master_gateway_port()
                 token = FedMLModelCache.get_instance(self.redis_addr, self.redis_port).get_end_point_token(
                     end_point_id, end_point_name, model_name)
 
@@ -767,10 +767,6 @@ def construct_final_gateway_url(self, end_point_id):
             if enable_custom_path:
                 identifier = "custom_inference"
 
-        if ip.startswith("http://") or ip.startswith("https://"):
-            model_inference_url = "{}/{}/{}".format(ip, identifier, end_point_id)
-        else:
-            model_inference_url = "http://{}:{}/{}/{}".format(ip, inference_port_external, identifier,
-                                                              end_point_id)
-        return model_inference_url, inference_port_external
+        model_inference_url = "http://{}:{}/{}/{}".format(ip, inference_port_external, identifier, end_point_id)
+        return model_inference_url
 

From 7a0963e97edb3ae59d3b3745893d03ed7f5385d5 Mon Sep 17 00:00:00 2001
From: xiang <xiang@tensoropera.com>
Date: Fri, 21 Jun 2024 03:04:03 +0000
Subject: [PATCH 180/282] [TEST]: add windows runners tests

---
 .github/workflows/CI_build.yml                |  4 +-
 .github/workflows/CI_deploy.yml               |  7 ++--
 .github/workflows/CI_federate.yml             |  4 +-
 .github/workflows/CI_launch.yml               |  4 +-
 .github/workflows/CI_train.yml                |  6 +--
 .github/workflows/README.md                   | 24 +++++++++++-
 .../workflows/registry-runners/windows.bat    | 38 -------------------
 .../workflows/registry-runners/windows.ps1    | 32 ++++++++++++++++
 8 files changed, 68 insertions(+), 51 deletions(-)
 delete mode 100644 .github/workflows/registry-runners/windows.bat
 create mode 100644 .github/workflows/registry-runners/windows.ps1

diff --git a/.github/workflows/CI_build.yml b/.github/workflows/CI_build.yml
index 86a846379c..b4c3642b09 100644
--- a/.github/workflows/CI_build.yml
+++ b/.github/workflows/CI_build.yml
@@ -17,11 +17,11 @@ on:
 # A workflow run is made up of one or more jobs that can run sequentially or in parallel
 jobs:
   build:
-    runs-on: ${{ matrix.python-version }}
+    runs-on: ["${{ matrix.python-version }}","${{ matrix.os }}"]
     strategy:
       fail-fast: false
       matrix:
-        os: [ Linux ]
+        os: [ Linux, Windows ]
         arch: [X64]
         python-version: ['python3.8', 'python3.9', 'python3.10', 'python3.11']
     
diff --git a/.github/workflows/CI_deploy.yml b/.github/workflows/CI_deploy.yml
index 35e793708f..982f65b3c5 100644
--- a/.github/workflows/CI_deploy.yml
+++ b/.github/workflows/CI_deploy.yml
@@ -17,14 +17,15 @@ on:
 # A workflow run is made up of one or more jobs that can run sequentially or in parallel
 jobs:
   deploy:
-    runs-on: ${{ matrix.python-version }}
+    runs-on: ["${{ matrix.python-version }}","${{ matrix.os }}"]
     strategy:
       fail-fast: false
       matrix:
-        os: [ Linux ]
+        os: [ Linux, Windows ]
         arch: [X64]
         python-version: ['python3.8', 'python3.9', 'python3.10', 'python3.11']
-
+    
+    timeout-minutes: 5
     steps:
       - name: Checkout fedml
         uses: actions/checkout@v3
diff --git a/.github/workflows/CI_federate.yml b/.github/workflows/CI_federate.yml
index 52cdfd9e10..1302771b1d 100644
--- a/.github/workflows/CI_federate.yml
+++ b/.github/workflows/CI_federate.yml
@@ -20,11 +20,11 @@ jobs:
     strategy:
       fail-fast: false
       matrix:
-        os: [ Linux ]
+        os: [ Linux, Windows ]
         arch: [X64]
         python-version: ['python3.8', 'python3.9', 'python3.10', 'python3.11']
 
-    runs-on: ${{ matrix.python-version }}
+    runs-on: ["${{ matrix.python-version }}","${{ matrix.os }}"]
     timeout-minutes: 5
     steps:
       - name: Checkout fedml
diff --git a/.github/workflows/CI_launch.yml b/.github/workflows/CI_launch.yml
index b2b896c82d..13519c41f2 100644
--- a/.github/workflows/CI_launch.yml
+++ b/.github/workflows/CI_launch.yml
@@ -21,11 +21,11 @@ jobs:
     strategy:
       fail-fast: false
       matrix:
-        os: [ ubuntu-latest ]
+        os: [ Linux, Windows ]
         arch: [X64]
         python-version: ['python3.8','python3.9','python3.10','python3.11']
 
-    runs-on: ${{ matrix.python-version }}
+    runs-on: ["${{ matrix.python-version }}","${{ matrix.os }}"]
     timeout-minutes: 5
     steps:
       - name: Checkout fedml
diff --git a/.github/workflows/CI_train.yml b/.github/workflows/CI_train.yml
index 529472d55c..2acbcc12a0 100644
--- a/.github/workflows/CI_train.yml
+++ b/.github/workflows/CI_train.yml
@@ -17,14 +17,14 @@ on:
 # A workflow run is made up of one or more jobs that can run sequentially or in parallel
 jobs:
   train:
-    runs-on: ${{ matrix.python-version }}
+    runs-on: ["${{ matrix.python-version }}","${{ matrix.os }}"]
     strategy:
       fail-fast: false
       matrix:
-        os: [ Linux ]
+        os: [ Linux, Windows ]
         arch: [X64]
         python-version: ['python3.8', 'python3.9', 'python3.10', 'python3.11']
-
+    timeout-minutes: 5
     steps:
       - name: Checkout fedml
         uses: actions/checkout@v3
diff --git a/.github/workflows/README.md b/.github/workflows/README.md
index 4e284a2175..f8c119f123 100644
--- a/.github/workflows/README.md
+++ b/.github/workflows/README.md
@@ -45,6 +45,28 @@ to check that your runners are all active.
 
 ## 2.2 Windows Runners
 
+step1: Install Anaconda packages
+Install Anaconda or Miniconda in a windows machine. the Anaconda and Miniconda can manage your python environments.
+
+step2: Create python enviroments
+Create 4 python environments named python38、python39、python310、python311, you also need to specific `python==3.8` to install specific python version.
+For example 
+```
+conda create -n python38 python==3.8
+```
+step3: Create directories 
+Create 4 directories named actions-runner-python38、actions-runner-python39、actions-runner-python310、actions-runner-python311 used for different runners.
+
+step4: Install the latest runner package. 
+Follow the insturction from navigating this path `Settings -> Actions -> Runners -> New self-hosted runner` to add a new windows runner. Note that You just do the download、extract steps in the directories which we have created, we don't need to configure it and run it. We can run a script to registry all the runners. 
+
+step5: Registry all the runners.
+Run the script from ./registry-runners/windows.ps1 to registry all the runners to your github. you need to replace the variables $REPO、$ACCESS_TOKEN、$WORKPLACE with the actual value. Note that you can get your $ACCESS_TOKEN from the following path `Settings -> Actions -> Runners -> New self-hosted runner.`.
+In the Configure section, you can find the similar line: `./config.sh --url https://github.com/FedML-AI/FedML --token AXRYPL6G2VHVGDFDQQS5XA3ELYI6M` to get your $ACCESS_TOKEN.
+
+step6: Make sure the success.
+Check if the runners are registered successfully. Navigate the following path.`Settings -> Actions -> Runners` to check that your runners are all active. 
+
 ## 2.3 Mac Runners
 
 # 3. bind Test Machines
@@ -70,5 +92,5 @@ If you need to add a new CI test that is different from the current business, yo
 
 # 6. TODO
 
-Implement the Windows runners and the Mac runners.
+Implement the Mac runners.
 
diff --git a/.github/workflows/registry-runners/windows.bat b/.github/workflows/registry-runners/windows.bat
deleted file mode 100644
index dcdcf81b57..0000000000
--- a/.github/workflows/registry-runners/windows.bat
+++ /dev/null
@@ -1,38 +0,0 @@
-set REPO=Qigemingziba/FedML
-set ACCESS_TOKEN=AGMK3P4W5EM5PXNYTZXXIMTGNF4MW
-set WORKPLACE=%pwd%
-mkdir actions-runner-python38; cd actions-runner-python38
-conda activate python38
-Invoke-WebRequest -Uri https://github.com/actions/runner/releases/download/v2.317.0/actions-runner-win-x64-2.317.0.zip -OutFile actions-runner-win-x64-2.317.0.zip
-Add-Type -AssemblyName System.IO.Compression.FileSystem ; [System.IO.Compression.ZipFile]::ExtractToDirectory("$PWD/actions-runner-win-x64-2.317.0.zip", "$PWD")
-./config.cmd --url https://github.com/%REPO% --token %ACCESS_TOKEN% --labels self-hosted,Windows,X64,python3.8
-.\run.cmd install
-.\run.cmd start
-
-cd WORKPLACE
-mkdir actions-runner-python39; cd actions-runner-python39
-conda activate python39
-Invoke-WebRequest -Uri https://github.com/actions/runner/releases/download/v2.317.0/actions-runner-win-x64-2.317.0.zip -OutFile actions-runner-win-x64-2.317.0.zip
-Add-Type -AssemblyName System.IO.Compression.FileSystem ; [System.IO.Compression.ZipFile]::ExtractToDirectory("$PWD/actions-runner-win-x64-2.317.0.zip", "$PWD")
-./config.cmd --url https://github.com/%REPO% --token %ACCESS_TOKEN% --labels self-hosted,Windows,X64,python3.9
-.\run.cmd install
-.\run.cmd start
-
-cd WORKPLACE
-mkdir actions-runner-python310; cd actions-runner-python310
-conda activate python310
-Invoke-WebRequest -Uri https://github.com/actions/runner/releases/download/v2.317.0/actions-runner-win-x64-2.317.0.zip -OutFile actions-runner-win-x64-2.317.0.zip
-Add-Type -AssemblyName System.IO.Compression.FileSystem ; [System.IO.Compression.ZipFile]::ExtractToDirectory("$PWD/actions-runner-win-x64-2.317.0.zip", "$PWD")
-./config.cmd --url https://github.com/%REPO% --token %ACCESS_TOKEN% --labels self-hosted,Windows,X64,python3.10
-.\run.cmd install
-.\run.cmd start
-
-cd WORKPLACE
-mkdir actions-runner-python311; cd actions-runner-python311
-conda activate python311
-Invoke-WebRequest -Uri https://github.com/actions/runner/releases/download/v2.317.0/actions-runner-win-x64-2.317.0.zip -OutFile actions-runner-win-x64-2.317.0.zip
-Add-Type -AssemblyName System.IO.Compression.FileSystem ; [System.IO.Compression.ZipFile]::ExtractToDirectory("$PWD/actions-runner-win-x64-2.317.0.zip", "$PWD")
-./config.cmd --url https://github.com/%REPO% --token %ACCESS_TOKEN% --labels self-hosted,Windows,X64,python3.11
-.\run.cmd install
-.\run.cmd start
-
diff --git a/.github/workflows/registry-runners/windows.ps1 b/.github/workflows/registry-runners/windows.ps1
new file mode 100644
index 0000000000..40f0f00b8f
--- /dev/null
+++ b/.github/workflows/registry-runners/windows.ps1
@@ -0,0 +1,32 @@
+
+$REPO = "Qigemingziba/FedML"
+$ACCESS_TOKEN  = "AGMK3PY3QDYUXXXEB5LWI4DGOQIFW"
+$WORKPLACE=$PWD
+
+Set-Location actions-runner-python38
+& conda activate python38
+./config.cmd --url https://github.com/$REPO --name windows-python38 --token $ACCESS_TOKEN --labels self-hosted,Windows,X64,python3.8
+Start-Process run.cmd start -WindowStyle Hidden
+
+Set-Location $WORKPLACE
+
+Set-Location actions-runner-python39
+& conda activate python39
+./config.cmd --url https://github.com/$REPO --name windows-python39 --token $ACCESS_TOKEN --labels self-hosted,Windows,X64,python3.9
+Start-Process run.cmd start -WindowStyle Hidden
+
+Set-Location $WORKPLACE
+
+Set-Location actions-runner-python310
+& conda activate python310
+./config.cmd --url https://github.com/$REPO --name windows-python310 --token $ACCESS_TOKEN --labels self-hosted,Windows,X64,python3.10
+Start-Process run.cmd start -WindowStyle Hidden
+
+Set-Location $WORKPLACE
+
+Set-Location actions-runner-python311
+& conda activate python311
+./config.cmd --url https://github.com/$REPO --name windows-python311 --token $ACCESS_TOKEN --labels self-hosted,Windows,X64,python3.11
+Start-Process run.cmd start -WindowStyle Hidden
+
+Set-Location $WORKPLACE
\ No newline at end of file

From 4355c35550d924255d7fe0c0d77d8b31af3e9461 Mon Sep 17 00:00:00 2001
From: xiang <xiang@tensoropera.com>
Date: Fri, 21 Jun 2024 04:35:07 +0000
Subject: [PATCH 181/282] [doc]: make sure the workflow documents are more
 readable.

---
 .github/workflows/README.md | 65 +++++++++++++++++++------------------
 1 file changed, 33 insertions(+), 32 deletions(-)

diff --git a/.github/workflows/README.md b/.github/workflows/README.md
index f8c119f123..2b261708f6 100644
--- a/.github/workflows/README.md
+++ b/.github/workflows/README.md
@@ -10,26 +10,24 @@ The CI tests need to be comprehensive, covering typical scenarios only, achievab
 
 ## 2.1 Linux Runners
 
-We need to run CI tests in linux enviroment using different python versions such as python3.8/python3.9/python3.10/python3.11
-
-Therefore firstly we build linux images for Self-Host Runners.
+Step1: Build linux images
 
+Build all the linux images for Self-Host Runners.
 ```
 cd registry-runners
 bash build_linux_runners.sh
 ```
-Secondly we need to find your GitHub runner token and your test-account apikey.
-
-For the argument YourGitHubRunnerToken, you may navigate based the following path.
 
-Settings -> Actions -> Runners -> New self-hosted runner. 
+Step2: Specify the token and key.
+Find your GitHub runner token and your test-account apikey.
 
-In the Configure section, you should find the similar line:
-./config.sh --url https://github.com/FedML-AI/FedML --token AXRYPL6G2VHVGDFDQQS5XA3ELYI6M
+For the argument YourGitHubRunnerToken, Navigate the path `Settings -> Actions -> Runners -> New self-hosted runner` to get.
 
-set YourGitHubRunnerToken to value of --token
+In the Configure section, you will find the similar line:
+./config.sh --url https://github.com/FedML-AI/FedML --token AXRYPL6G2VHVGDFDQQS5XA3ELYI6M to get YourGitHubRunnerToken to value of --token
 
-Then we run all ther images.
+Step3: Registry all the runners.
+Registry by run `run_linux_runners.sh` script
 ```
 bash run_linux_runners.sh [YourGitRepo] [YourGitHubRunnerToken] [YourTestAccountApiKey]
 ```
@@ -37,58 +35,61 @@ for example
 ```
 bash run_linux_runners.sh FedML-AI/FedML AXRYPLZLZN6XVJB3BAIXSP3EMFC7U 11215dkevvdkegged
 ```
-Lastly we need to check if the runners are registered successfully. Navigate the following path.
-```
-Settings -> Actions -> Runners
-```
-to check that your runners are all active. 
+Step4 Verify Success
+
+Check if all the runners are registered successfully. Navigate the following path. `Settings -> Actions -> Runners` to check that all your runners are active.
 
 ## 2.2 Windows Runners
 
 step1: Install Anaconda packages
-Install Anaconda or Miniconda in a windows machine. the Anaconda and Miniconda can manage your python environments.
+Install Anaconda or Miniconda on a Windows machine. Anaconda and Miniconda can manage your Python environments.
 
 step2: Create python enviroments
-Create 4 python environments named python38、python39、python310、python311, you also need to specific `python==3.8` to install specific python version.
+Create 4 python environments named `python38`、`python39`、`python310` and `python311` for different runners.
+Specify the python version to install.
 For example 
 ```
 conda create -n python38 python==3.8
 ```
 step3: Create directories 
-Create 4 directories named actions-runner-python38、actions-runner-python39、actions-runner-python310、actions-runner-python311 used for different runners.
+Create 4 directories named `actions-runner-python38`、`actions-runner-python39`、`actions-runner-python310` and `actions-runner-python311` for different runners.
 
 step4: Install the latest runner package. 
-Follow the insturction from navigating this path `Settings -> Actions -> Runners -> New self-hosted runner` to add a new windows runner. Note that You just do the download、extract steps in the directories which we have created, we don't need to configure it and run it. We can run a script to registry all the runners. 
+Follow the insturction from navigating this path `Settings -> Actions -> Runners -> New self-hosted runner` to add a new Windows runner. Note that you only need to download、extract the files into the directories created in Step 3. Configuration and running will be done through a script later.
 
 step5: Registry all the runners.
-Run the script from ./registry-runners/windows.ps1 to registry all the runners to your github. you need to replace the variables $REPO、$ACCESS_TOKEN、$WORKPLACE with the actual value. Note that you can get your $ACCESS_TOKEN from the following path `Settings -> Actions -> Runners -> New self-hosted runner.`.
-In the Configure section, you can find the similar line: `./config.sh --url https://github.com/FedML-AI/FedML --token AXRYPL6G2VHVGDFDQQS5XA3ELYI6M` to get your $ACCESS_TOKEN.
+Run the script from `./registry-runners/windows.ps1` to registry all the runners to your github. Replace the variables `$REPO`、`$ACCESS_TOKEN` and `$WORKPLACE` with actual values. Note that you can get your $ACCESS_TOKEN from the following path `Settings -> Actions -> Runners -> New self-hosted runner.`.
+In the Configure section, you will find the similar line: `./config.sh --url https://github.com/FedML-AI/FedML --token AXRYPL6G2VHVGDFDQQS5XA3ELYI6M` to get your `$ACCESS_TOKEN`.
 
-step6: Make sure the success.
-Check if the runners are registered successfully. Navigate the following path.`Settings -> Actions -> Runners` to check that your runners are all active. 
+step6: Verify Success
+Check if the runners are registered successfully by navigate to `Settings -> Actions -> Runners`. Make sure that all your runners are active. 
 
 ## 2.3 Mac Runners
 
-# 3. bind Test Machines
+# 3. Bind Test Machines
 
-We also need to bind the actual machine to run the test training job. Following this document to bind your test machines.
+Bind the actual machine to run the test training job. Follow this document to bind your test machines.
 https://docs.tensoropera.ai/share-and-earn
 
 Note that we need to bind our machines to the test environment.
 
-In your job YAML, you should specify the computing resource type to which you have bound your machines. Then, your job will be scheduled to that machine.
+Specify the computing resource type to which you have bound your machines. Your job will be scheduled to that machine.
 
 # 4. Trigger
 
-You can apply for a PR; All tests will run automatically.
+Applying for a PR can trigger all tests automatically.
+
+Run a single test on a specific branch from the GitHub Actions tab.
+
+Schedule daily runs at a specific time by configuring your workflow YAML. You can check the results in the GitHub Actions tab.
 
-You can also run a single test at a specific branch in the GitHub Actions tab.
+# 5. Add a new CI test
 
-The CI tests will run daily at a specific time which you configure in your workflow YAML. You can check the results in the GitHub Actions tab.
+Creating a new workflow YAML file, such as CI_launch.yaml or CI_train.yaml, allows you to add a CI test that is different from the current business.
 
-# 5. How to add a new CI test
+Adding a new CI test to the current business can be done by placing your test in the path python/tests/test_{business}/test_file.py and ensuring that your workflow YAML can run that Python test script.
 
-If you need to add a new CI test that is different from the current business, you need to create a new workflow YAML file, such as CI_launch.yaml or CI_train.yaml. If you just want to add a new CI test to the current business, you can add your test in the path python/tests/test_{business}/test_file.py and make sure that your workflow YAML can run that Python test script.
+Ensuring your workflow YAML is configured correctly will enable it to run the new test automatically.
 
 # 6. TODO
 

From be60443aea50e174b5057dbe7aa09d1922068d53 Mon Sep 17 00:00:00 2001
From: xiang <xiang@tensoropera.com>
Date: Fri, 21 Jun 2024 04:42:51 +0000
Subject: [PATCH 182/282] [doc]: make sure the workflow documents are more
 readable.

---
 .github/workflows/README.md | 20 ++++++++++----------
 1 file changed, 10 insertions(+), 10 deletions(-)

diff --git a/.github/workflows/README.md b/.github/workflows/README.md
index 2b261708f6..668cb9b302 100644
--- a/.github/workflows/README.md
+++ b/.github/workflows/README.md
@@ -10,7 +10,7 @@ The CI tests need to be comprehensive, covering typical scenarios only, achievab
 
 ## 2.1 Linux Runners
 
-Step1: Build linux images
+### Step1: Build linux images
 
 Build all the linux images for Self-Host Runners.
 ```
@@ -18,7 +18,7 @@ cd registry-runners
 bash build_linux_runners.sh
 ```
 
-Step2: Specify the token and key.
+### Step2: Specify the token and key.
 Find your GitHub runner token and your test-account apikey.
 
 For the argument YourGitHubRunnerToken, Navigate the path `Settings -> Actions -> Runners -> New self-hosted runner` to get.
@@ -26,7 +26,7 @@ For the argument YourGitHubRunnerToken, Navigate the path `Settings -> Actions -
 In the Configure section, you will find the similar line:
 ./config.sh --url https://github.com/FedML-AI/FedML --token AXRYPL6G2VHVGDFDQQS5XA3ELYI6M to get YourGitHubRunnerToken to value of --token
 
-Step3: Registry all the runners.
+### Step3: Registry all the runners.
 Registry by run `run_linux_runners.sh` script
 ```
 bash run_linux_runners.sh [YourGitRepo] [YourGitHubRunnerToken] [YourTestAccountApiKey]
@@ -35,33 +35,33 @@ for example
 ```
 bash run_linux_runners.sh FedML-AI/FedML AXRYPLZLZN6XVJB3BAIXSP3EMFC7U 11215dkevvdkegged
 ```
-Step4 Verify Success
+### Step4: Verify Success
 
 Check if all the runners are registered successfully. Navigate the following path. `Settings -> Actions -> Runners` to check that all your runners are active.
 
 ## 2.2 Windows Runners
 
-step1: Install Anaconda packages
+### Step1: Install Anaconda packages
 Install Anaconda or Miniconda on a Windows machine. Anaconda and Miniconda can manage your Python environments.
 
-step2: Create python enviroments
+### Step2: Create python enviroments
 Create 4 python environments named `python38`、`python39`、`python310` and `python311` for different runners.
 Specify the python version to install.
 For example 
 ```
 conda create -n python38 python==3.8
 ```
-step3: Create directories 
+### Step3: Create directories 
 Create 4 directories named `actions-runner-python38`、`actions-runner-python39`、`actions-runner-python310` and `actions-runner-python311` for different runners.
 
-step4: Install the latest runner package. 
+### Step4: Install the latest runner package. 
 Follow the insturction from navigating this path `Settings -> Actions -> Runners -> New self-hosted runner` to add a new Windows runner. Note that you only need to download、extract the files into the directories created in Step 3. Configuration and running will be done through a script later.
 
-step5: Registry all the runners.
+### Step5: Registry all the runners.
 Run the script from `./registry-runners/windows.ps1` to registry all the runners to your github. Replace the variables `$REPO`、`$ACCESS_TOKEN` and `$WORKPLACE` with actual values. Note that you can get your $ACCESS_TOKEN from the following path `Settings -> Actions -> Runners -> New self-hosted runner.`.
 In the Configure section, you will find the similar line: `./config.sh --url https://github.com/FedML-AI/FedML --token AXRYPL6G2VHVGDFDQQS5XA3ELYI6M` to get your `$ACCESS_TOKEN`.
 
-step6: Verify Success
+### Step6: Verify Success
 Check if the runners are registered successfully by navigate to `Settings -> Actions -> Runners`. Make sure that all your runners are active. 
 
 ## 2.3 Mac Runners

From d7481bebb214c20475adc8da4b5de362e935f134 Mon Sep 17 00:00:00 2001
From: Alex <alex.gpt.llm@gmail.com>
Date: Fri, 21 Jun 2024 17:31:32 +0800
Subject: [PATCH 183/282] [CoreEngine] set the name of all monitor processes,
 remove the redundant binding codes for deployment devices, change the account
 manager,

---
 .../scheduler/master/base_master_agent.py     |   2 +
 .../model_scheduler/model_device_client.py    |  98 ----------------
 .../model_scheduler/model_device_server.py    |  97 ---------------
 .../scheduler_core/account_manager.py         |  71 +++++------
 .../scheduler_core/general_constants.py       |  10 +-
 .../scheduler_core/message_center.py          |   3 +-
 .../scheduler/scheduler_core/status_center.py |   3 +-
 .../scheduler/slave/base_slave_agent.py       |   7 +-
 .../slave/base_slave_protocol_manager.py      |  11 +-
 .../scheduler/slave/slave_protocol_manager.py |  66 ++---------
 .../scheduler/slave/united_agents.py          |  15 ++-
 python/fedml/core/mlops/mlops_device_perfs.py | 111 ++++++++++++++----
 python/fedml/core/mlops/mlops_job_perfs.py    |  19 ++-
 13 files changed, 185 insertions(+), 328 deletions(-)
 delete mode 100755 python/fedml/computing/scheduler/model_scheduler/model_device_client.py
 delete mode 100755 python/fedml/computing/scheduler/model_scheduler/model_device_server.py

diff --git a/python/fedml/computing/scheduler/master/base_master_agent.py b/python/fedml/computing/scheduler/master/base_master_agent.py
index 4fb3a5e755..e7d18f64f7 100755
--- a/python/fedml/computing/scheduler/master/base_master_agent.py
+++ b/python/fedml/computing/scheduler/master/base_master_agent.py
@@ -64,6 +64,8 @@ def login(
         if communication_manager is None:
             self.protocol_mgr.start()
 
+        return login_result
+
     @staticmethod
     def logout():
         GeneralConstants.cleanup_run_process(None, is_master=True)
diff --git a/python/fedml/computing/scheduler/model_scheduler/model_device_client.py b/python/fedml/computing/scheduler/model_scheduler/model_device_client.py
deleted file mode 100755
index 05f43afc5f..0000000000
--- a/python/fedml/computing/scheduler/model_scheduler/model_device_client.py
+++ /dev/null
@@ -1,98 +0,0 @@
-
-import copy
-import logging
-import multiprocessing
-import time
-import traceback
-from multiprocessing import Process
-from ..scheduler_core.account_manager import FedMLAccountManager
-from .worker_agent import FedMLDeployWorkerAgent
-
-
-class FedMLModelDeviceClientRunner:
-    def __init__(self, args, current_device_id, os_name, is_from_docker, service_config, infer_host="127.0.0.1"):
-        self.agent_process = None
-        self.agent_runner = None
-        self.agent_process_event = None
-        self.args = copy.deepcopy(args)
-        self.service_config = service_config
-        self.unique_device_id = None
-        self.current_device_id = current_device_id
-        self.os_name = os_name
-        self.is_from_docker = is_from_docker
-        self.edge_id = None
-        self.infer_host = infer_host
-        self.redis_addr = "local"
-        self.redis_port = "6379"
-        self.redis_password = "fedml_default"
-
-    def get_edge_id(self):
-        return self.edge_id
-
-    def start(self):
-        self.agent_runner = FedMLModelDeviceClientRunner(self.args, self.current_device_id, self.os_name,
-                                                         self.is_from_docker, self.service_config)
-        self.agent_runner.infer_host = self.infer_host
-        self.agent_runner.redis_addr = self.redis_addr
-        self.agent_runner.redis_port = self.redis_port
-        self.agent_runner.redis_password = self.redis_password
-        if self.agent_process_event is None:
-            self.agent_process_event = multiprocessing.Event()
-        self.agent_process = Process(target=self.agent_runner.run_entry, args=(self.agent_process_event, self.args,))
-        self.edge_id = self.bind_device()
-        self.agent_process.start()
-
-    def run_entry(self, process_event, in_args):
-        # print(f"Model worker process id {os.getpid()}")
-
-        self.agent_process_event = process_event
-
-        worker_agent = FedMLDeployWorkerAgent()
-
-        while not self.agent_process_event.is_set():
-            try:
-                try:
-                    worker_agent.logout()
-                except Exception as e:
-                    pass
-
-                worker_agent.login(
-                    in_args.account_id, api_key=in_args.api_key, device_id=in_args.device_id,
-                    os_name=in_args.os_name, role=FedMLAccountManager.ROLE_DEPLOY_WORKER_ON_PREM
-                )
-            except Exception as e:
-                logging.info("Restart model device client: {}".format(traceback.format_exc()))
-                pass
-            finally:
-                try:
-                    worker_agent.logout()
-                except Exception as e:
-                    pass
-                time.sleep(15)
-
-        try:
-            self.stop()
-        except Exception as e:
-            pass
-
-    def check_runner_stop_event(self):
-        if self.agent_process_event is not None and self.agent_process_event.is_set():
-            logging.info("Received stopping event.")
-            raise Exception("Runner stopped")
-
-    def stop(self):
-        FedMLDeployWorkerAgent.logout()
-
-        if self.agent_process_event is not None:
-            self.agent_process_event.set()
-
-    def bind_device(self):
-        # Login account
-        login_result = FedMLAccountManager.get_instance().login(
-            self.args.account_id, api_key=self.args.api_key, device_id=self.args.device_id,
-            os_name=self.args.os_name, role=FedMLAccountManager.ROLE_DEPLOY_WORKER_ON_PREM
-        )
-        if login_result is not None:
-            return login_result.edge_id
-        else:
-            return None
diff --git a/python/fedml/computing/scheduler/model_scheduler/model_device_server.py b/python/fedml/computing/scheduler/model_scheduler/model_device_server.py
deleted file mode 100755
index b2ecd144b1..0000000000
--- a/python/fedml/computing/scheduler/model_scheduler/model_device_server.py
+++ /dev/null
@@ -1,97 +0,0 @@
-
-import copy
-import logging
-import multiprocessing
-import time
-import traceback
-from multiprocessing import Process
-from ..scheduler_core.account_manager import FedMLAccountManager
-from .master_agent import FedMLDeployMasterAgent
-
-
-class FedMLModelDeviceServerRunner:
-    def __init__(self, args, current_device_id, os_name, is_from_docker, service_config, infer_host="127.0.0.1"):
-        self.agent_process = None
-        self.agent_runner = None
-        self.agent_process_event = None
-        self.args = copy.deepcopy(args)
-        self.service_config = service_config
-        self.unique_device_id = None
-        self.current_device_id = current_device_id
-        self.os_name = os_name
-        self.is_from_docker = is_from_docker
-        self.edge_id = None
-        self.infer_host = infer_host
-        self.redis_addr = "local"
-        self.redis_port = "6379"
-        self.redis_password = "fedml_default"
-
-    def get_edge_id(self):
-        return self.edge_id
-
-    def start(self):
-        self.agent_runner = FedMLModelDeviceServerRunner(self.args, self.current_device_id, self.os_name,
-                                                         self.is_from_docker, self.service_config)
-        self.agent_runner.infer_host = self.infer_host
-        self.agent_runner.redis_addr = self.redis_addr
-        self.agent_runner.redis_port = self.redis_port
-        self.agent_runner.redis_password = self.redis_password
-        if self.agent_process_event is None:
-            self.agent_process_event = multiprocessing.Event()
-        self.agent_process = Process(target=self.agent_runner.run_entry, args=(self.agent_process_event, self.args))
-        self.edge_id = self.bind_device()
-        self.agent_process.start()
-
-    def run_entry(self, process_event, in_args):
-        # print(f"Model master process id {os.getpid()}")
-
-        self.agent_process_event = process_event
-        master_agent = FedMLDeployMasterAgent()
-
-        while not self.agent_process_event.is_set():
-            try:
-                try:
-                    master_agent.logout()
-                except Exception as e:
-                    pass
-
-                master_agent.login(
-                    in_args.account_id, api_key=in_args.api_key, device_id=in_args.device_id,
-                    os_name=in_args.os_name, role=FedMLAccountManager.ROLE_DEPLOY_MASTER_ON_PREM
-                )
-            except Exception as e:
-                logging.info("Restart model device server: {}".format(traceback.format_exc()))
-                pass
-            finally:
-                try:
-                    master_agent.logout()
-                except Exception as e:
-                    pass
-                time.sleep(15)
-
-        try:
-            self.stop()
-        except Exception as e:
-            pass
-
-    def check_runner_stop_event(self):
-        if self.agent_process_event is not None and self.agent_process_event.is_set():
-            logging.info("Received stopping event.")
-            raise Exception("Runner stopped")
-
-    def stop(self):
-        FedMLDeployMasterAgent.logout()
-
-        if self.agent_process_event is not None:
-            self.agent_process_event.set()
-
-    def bind_device(self):
-        # Login account
-        login_result = FedMLAccountManager.get_instance().login(
-            self.args.account_id, api_key=self.args.api_key, device_id=self.args.device_id,
-            os_name=self.args.os_name, role=FedMLAccountManager.ROLE_DEPLOY_MASTER_ON_PREM
-        )
-        if login_result is not None:
-            return login_result.edge_id
-        else:
-            return None
diff --git a/python/fedml/computing/scheduler/scheduler_core/account_manager.py b/python/fedml/computing/scheduler/scheduler_core/account_manager.py
index 20c5fcd842..6bd3dd4b19 100755
--- a/python/fedml/computing/scheduler/scheduler_core/account_manager.py
+++ b/python/fedml/computing/scheduler/scheduler_core/account_manager.py
@@ -43,8 +43,7 @@ class FedMLAccountManager(Singleton):
     DEVICE_ID_DOCKER_HUB_TAG = ".DockerHub"
 
     def __init__(self):
-        if not hasattr(self, "agent_args"):
-            self.agent_args = None
+        pass
 
     @staticmethod
     def get_instance():
@@ -52,7 +51,7 @@ def get_instance():
 
     def login(self, user_id, api_key="", device_id=None, os_name=None, role=None, runner_cmd=None):
         # Build the agent args
-        self.build_agent_args(
+        agent_args = self.build_agent_args(
             user_id, api_key=api_key, device_id=device_id, os_name=os_name, role=role, runner_cmd=runner_cmd
         )
 
@@ -95,8 +94,8 @@ def login(self, user_id, api_key="", device_id=None, os_name=None, role=None, ru
             # noinspection PyBroadException
             try:
                 edge_id, user_name, extra_url, general_edge_id = FedMLAccountManager.bind_account_and_device_id(
-                    service_config["ml_ops_config"]["EDGE_BINDING_URL"], self.agent_args.account_id,
-                    self.agent_args.unique_device_id, self.agent_args.os_name,
+                    service_config["ml_ops_config"]["EDGE_BINDING_URL"], agent_args.account_id,
+                    agent_args.unique_device_id, agent_args.os_name,
                     api_key=api_key, role=role
                 )
                 if edge_id > 0:
@@ -120,13 +119,13 @@ def login(self, user_id, api_key="", device_id=None, os_name=None, role=None, ru
             return None
 
         # Fill the bound result to agent args.
-        self.fill_argent_args(
-            log_server_url=log_server_url, server_id=edge_id,
+        agent_args = self.fill_argent_args(
+            agent_args, log_server_url=log_server_url, server_id=edge_id,
             edge_id=edge_id, general_edge_id=general_edge_id,
             user_name=user_name, extra_url=extra_url,
             agent_config=service_config)
 
-        return self.agent_args
+        return agent_args
 
     def build_agent_args(self, user_id, api_key=None, device_id=None, os_name=None, role=None, runner_cmd=None):
         # Generate the suffix for device based on the role
@@ -159,32 +158,31 @@ def build_agent_args(self, user_id, api_key=None, device_id=None, os_name=None,
 
         # Build the agent args
         version = fedml.get_env_version()
-        if self.agent_args is None:
-            self.agent_args = AgentArgs()
-        self.agent_args.role = role
-        self.agent_args.account_id = user_id
-        self.agent_args.api_key = api_key
-        self.agent_args.current_running_dir = GeneralConstants.get_deploy_fedml_home_dir(is_master=is_master) \
+        agent_args = AgentArgs()
+        agent_args.role = role
+        agent_args.account_id = user_id
+        agent_args.api_key = api_key
+        agent_args.current_running_dir = GeneralConstants.get_deploy_fedml_home_dir(is_master=is_master) \
             if is_deploy else GeneralConstants.get_launch_fedml_home_dir(is_master=is_master)
         sys_name = platform.system()
         if sys_name == "Darwin":
             sys_name = "MacOS"
-        self.agent_args.os_name = sys_name if os_name is None or os_name == "" else os_name
-        self.agent_args.version = version
-        self.agent_args.log_file_dir = GeneralConstants.get_deploy_log_file_dir(is_master=is_master) \
+        agent_args.os_name = sys_name if os_name is None or os_name == "" else os_name
+        agent_args.version = version
+        agent_args.log_file_dir = GeneralConstants.get_deploy_log_file_dir(is_master=is_master) \
             if is_deploy else GeneralConstants.get_launch_log_file_dir(is_master=is_master)
         is_from_docker = False
         if device_id is not None and device_id != "0":
-            self.agent_args.current_device_id = device_id
+            agent_args.current_device_id = device_id
         else:
             data_dir = GeneralConstants.get_deploy_data_dir(is_master=is_master) \
                 if is_deploy else GeneralConstants.get_launch_data_dir(is_master=is_master)
             is_gpu_provider = True if role == FedMLAccountManager.ROLE_GPU_PROVIDER else False
-            self.agent_args.current_device_id = FedMLAccountManager.get_device_id(
+            agent_args.current_device_id = FedMLAccountManager.get_device_id(
                 data_dir=data_dir, use_machine_id=is_gpu_provider)
-        self.agent_args.device_id = self.agent_args.current_device_id
-        self.agent_args.config_version = version
-        self.agent_args.cloud_region = ""
+        agent_args.device_id = agent_args.current_device_id
+        agent_args.config_version = version
+        agent_args.cloud_region = ""
 
         # Check if it is running in the fedml docker hub
         is_from_fedml_docker_hub = False
@@ -196,26 +194,29 @@ def build_agent_args(self, user_id, api_key=None, device_id=None, os_name=None,
         # Build unique device id
         docker_tag = FedMLAccountManager.DEVICE_ID_DOCKER_TAG if is_from_docker else ""
         docker_tag = FedMLAccountManager.DEVICE_ID_DOCKER_HUB_TAG if is_from_fedml_docker_hub else docker_tag
-        unique_device_id = f"{self.agent_args.current_device_id}@{self.agent_args.os_name}" \
+        unique_device_id = f"{agent_args.current_device_id}@{agent_args.os_name}" \
                            f"{docker_tag}{device_id_suffix}"
         if role == FedMLAccountManager.ROLE_CLOUD_SERVER:
-            unique_device_id = self.agent_args.current_device_id
+            unique_device_id = agent_args.current_device_id
 
         # Set the unique device id
-        self.agent_args.is_from_docker = is_from_docker or is_from_fedml_docker_hub
-        self.agent_args.unique_device_id = unique_device_id
-        self.agent_args.runner_cmd = runner_cmd
+        agent_args.is_from_docker = is_from_docker or is_from_fedml_docker_hub
+        agent_args.unique_device_id = unique_device_id
+        agent_args.runner_cmd = runner_cmd
+
+        return agent_args
 
     def fill_argent_args(
-            self, log_server_url=None, server_id=None, edge_id=None,
+            self, agent_args, log_server_url=None, server_id=None, edge_id=None,
             user_name=None, extra_url=None, general_edge_id=None, agent_config=None):
-        self.agent_args.log_server_url = log_server_url
-        self.agent_args.server_id = server_id
-        self.agent_args.edge_id = edge_id
-        self.agent_args.user_name = user_name
-        self.agent_args.extra_url = extra_url
-        self.agent_args.general_edge_id = general_edge_id
-        self.agent_args.agent_config = agent_config
+        agent_args.log_server_url = log_server_url
+        agent_args.server_id = server_id
+        agent_args.edge_id = edge_id
+        agent_args.user_name = user_name
+        agent_args.extra_url = extra_url
+        agent_args.general_edge_id = general_edge_id
+        agent_args.agent_config = agent_config
+        return agent_args
 
     @staticmethod
     def write_login_failed_file(is_client=True):
diff --git a/python/fedml/computing/scheduler/scheduler_core/general_constants.py b/python/fedml/computing/scheduler/scheduler_core/general_constants.py
index 3b40e1df80..0ab6f79577 100755
--- a/python/fedml/computing/scheduler/scheduler_core/general_constants.py
+++ b/python/fedml/computing/scheduler/scheduler_core/general_constants.py
@@ -76,6 +76,7 @@ class GeneralConstants:
     FEDML_MESSAGE_CENTER_SENDER_TAG = "message-center-sender"
     FEDML_STATUS_CENTER_TAG = "status-center"
     FEDML_LOG_PROCESS_TAG = "log"
+    FEDML_MONITOR_PROCESS_TAG = "monitor"
 
     FEDML_TOPIC_STATUS_CENTER_STOP = "anywhere/status_center/stop"
 
@@ -232,7 +233,9 @@ def get_payload_complete_job(run_id, server_id):
 
     @staticmethod
     def get_process_name(process_tag, run_id=None, edge_id=None):
-        return f"{GeneralConstants.FEDML_PROCESS_NAME_PREFIX}{process_tag}-run-{run_id}-edge-{edge_id}"
+        return f'{GeneralConstants.FEDML_PROCESS_NAME_PREFIX}{process_tag}'\
+               f'{"-run-" + str(run_id) if run_id is not None and int(run_id) != 0 else ""}'\
+               f'{"-edge-" + str(edge_id) if edge_id is not None else ""}'
 
     @staticmethod
     def get_process_name_with_prefix(process_prefix, run_id=None, edge_id=None):
@@ -285,4 +288,7 @@ def get_message_center_sender_process_name(message_center_name):
     def get_status_center_process_name(status_center_tag):
         return f"{GeneralConstants.FEDML_PROCESS_NAME_PREFIX}{GeneralConstants.FEDML_STATUS_CENTER_TAG}-{status_center_tag}"
 
-
+    @staticmethod
+    def get_monitor_process_name(monitor_tag, run_id, edge_id):
+        return GeneralConstants.get_process_name(
+            f"{GeneralConstants.FEDML_MONITOR_PROCESS_TAG}-{monitor_tag}", run_id, edge_id)
diff --git a/python/fedml/computing/scheduler/scheduler_core/message_center.py b/python/fedml/computing/scheduler/scheduler_core/message_center.py
index 087e74edf4..5f414d1873 100755
--- a/python/fedml/computing/scheduler/scheduler_core/message_center.py
+++ b/python/fedml/computing/scheduler/scheduler_core/message_center.py
@@ -352,8 +352,7 @@ def start_listener(
         self.listener_message_event = multiprocessing.Event()
         self.listener_message_event.clear()
         self.listener_agent_config = agent_config
-        message_runner = self.get_message_runner()
-        # message_runner = self
+        message_runner = self
         message_runner.listener_agent_config = agent_config
         process_name = GeneralConstants.get_message_center_listener_process_name(message_center_name)
         if platform.system() == "Windows":
diff --git a/python/fedml/computing/scheduler/scheduler_core/status_center.py b/python/fedml/computing/scheduler/scheduler_core/status_center.py
index 47dfa9d1a7..b1462d7ea9 100755
--- a/python/fedml/computing/scheduler/scheduler_core/status_center.py
+++ b/python/fedml/computing/scheduler/scheduler_core/status_center.py
@@ -120,8 +120,7 @@ def start_status_center(self, sender_message_center_queue=None,
         self.status_event.clear()
         self.status_sender_message_center_queue = sender_message_center_queue
         self.status_listener_message_center_queue = listener_message_center_queue
-        self.status_runner = self.get_status_runner()
-        #self.status_runner = self
+        self.status_runner = self
         process_name = GeneralConstants.get_status_center_process_name(
             f'{"deploy" if self.is_deployment_status_center else "launch"}_'
             f'{"slave" if is_slave_agent else "master"}_agent')
diff --git a/python/fedml/computing/scheduler/slave/base_slave_agent.py b/python/fedml/computing/scheduler/slave/base_slave_agent.py
index 58a79aae88..9876ac9912 100755
--- a/python/fedml/computing/scheduler/slave/base_slave_agent.py
+++ b/python/fedml/computing/scheduler/slave/base_slave_agent.py
@@ -35,7 +35,7 @@ def login(
                 print("We can't find any gpu device on your machine. \n"
                       "With the gpu_supplier(-g) option, you need to check if your machine "
                       "has nvidia GPUs and installs CUDA related drivers.")
-                return
+                return None
 
         # Login account
         login_result = FedMLAccountManager.get_instance().login(
@@ -155,3 +155,8 @@ def _init_database(self):
     @abstractmethod
     def _generate_protocol_manager_instance(self, args, agent_config=None):
         return None
+
+    def save_deploy_ids(self, deploy_master_edge_id=None, deploy_slave_edge_id=None):
+        self.protocol_mgr.save_deploy_ids(
+            deploy_master_edge_id=deploy_master_edge_id, deploy_slave_edge_id=deploy_slave_edge_id)
+
diff --git a/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py b/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py
index 648a49bbd1..534ee2f7d0 100755
--- a/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py
+++ b/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py
@@ -62,8 +62,6 @@ def __init__(self, args, agent_config=None):
         self.server_id = args.server_id
         self.model_device_server_id = None
         self.model_device_client_edge_id_list = None
-        self.model_device_server = None
-        self.model_device_client_list = None
 
     @abstractmethod
     def generate_topics(self):
@@ -147,12 +145,9 @@ def add_subscribe_topic(self, topic):
         self.subscribed_topics.append(topic)
 
     def stop(self):
-        if self.model_device_server is not None:
-            self.model_device_server = None
-
-        if self.model_device_client_list is not None:
-            self.model_device_client_list.clear()
-            self.model_device_client_list = None
+        if self.model_device_client_edge_id_list is not None:
+            self.model_device_client_edge_id_list.clear()
+            self.model_device_client_edge_id_list = None
 
         super().stop()
 
diff --git a/python/fedml/computing/scheduler/slave/slave_protocol_manager.py b/python/fedml/computing/scheduler/slave/slave_protocol_manager.py
index 050cfb3f1d..449cd7c29c 100755
--- a/python/fedml/computing/scheduler/slave/slave_protocol_manager.py
+++ b/python/fedml/computing/scheduler/slave/slave_protocol_manager.py
@@ -1,10 +1,8 @@
-import copy
+
 import os
 from ..comm_utils.job_cleanup import JobCleanup
 from .base_slave_protocol_manager import FedMLBaseSlaveProtocolManager
 from .launch_job_runner_manager import FedMLLaunchJobRunnerManager
-from ..model_scheduler.model_device_server import FedMLModelDeviceServerRunner
-from ..model_scheduler.model_device_client import FedMLModelDeviceClientRunner
 
 
 class FedMLLaunchSlaveProtocolManager(FedMLBaseSlaveProtocolManager):
@@ -47,57 +45,19 @@ def _init_extra_items(self):
         # Sync the data when startup
         JobCleanup.get_instance().sync_data_on_startup(self.args.edge_id)
 
-        # Get the environment variables
-        infer_host = os.getenv("FEDML_INFER_HOST", None)
-        infer_redis_addr = os.getenv("FEDML_INFER_REDIS_ADDR", None)
-        infer_redis_port = os.getenv("FEDML_INFER_REDIS_PORT", None)
-        infer_redis_password = os.getenv("FEDML_INFER_REDIS_PASSWORD", None)
-        model_client_num = os.getenv("FEDML_MODEL_WORKER_NUM", None)
-
-        # Start deploy master agent and slave agent
-        in_args = copy.deepcopy(self.args)
-        if self.model_device_client_edge_id_list is None:
-            self.model_device_client_edge_id_list = list()
-        if self.model_device_client_list is None:
-            model_client_num = 1 if model_client_num is None else int(model_client_num)
-            self.model_device_client_list = list()
-            for client_index in range(model_client_num):
-                model_device_client = FedMLModelDeviceClientRunner(
-                    in_args, f"{in_args.current_device_id}_{client_index + 1}", in_args.os_name,
-                    in_args.is_from_docker, self.agent_config)
-                if infer_host is not None:
-                    model_device_client.infer_host = infer_host
-                if infer_redis_addr is not None:
-                    model_device_client.redis_addr = infer_redis_addr
-                if infer_redis_port is not None:
-                    model_device_client.redis_port = infer_redis_port
-                if infer_redis_password is not None:
-                    model_device_client.redis_password = infer_redis_password
-                self.model_device_client_list.append(model_device_client)
-                self.model_device_client_edge_id_list.append(model_device_client.bind_device())
-
-        self.args = copy.deepcopy(in_args)
-        if self.model_device_server is None:
-            self.model_device_server = FedMLModelDeviceServerRunner(in_args, in_args.current_device_id,
-                                                                    in_args.os_name, in_args.is_from_docker,
-                                                                    self.agent_config)
-            if infer_host is not None:
-                self.model_device_server.infer_host = infer_host
-            if infer_redis_addr is not None:
-                self.model_device_server.redis_addr = infer_redis_addr
-            if infer_redis_port is not None:
-                self.model_device_server.redis_port = infer_redis_port
-            if infer_redis_password is not None:
-                self.model_device_server.redis_password = infer_redis_password
-
-            self.model_device_server_id = self.model_device_server.bind_device()
+        # Start the monitor process
+        self.mlops_metrics.stop_device_realtime_perf()
+        self.mlops_metrics.report_device_realtime_perf(self.args, self.args.agent_config["mqtt_config"])
+
+    def save_deploy_ids(self, deploy_master_edge_id=None, deploy_slave_edge_id=None):
+        if deploy_master_edge_id is not None:
+            self.model_device_server_id = deploy_master_edge_id
+
+        if deploy_slave_edge_id is not None:
+            if self.model_device_client_edge_id_list is None:
+                self.model_device_client_edge_id_list = list()
+            self.model_device_client_edge_id_list.append(deploy_slave_edge_id)
 
         # Save the deployed master and worker id list to the environment variable.
         os.environ["FEDML_DEPLOY_MASTER_ID"] = str(self.model_device_server_id)
         os.environ["FEDML_DEPLOY_WORKER_IDS"] = str(self.model_device_client_edge_id_list)
-
-        # Start the monitor process
-        self.args = copy.deepcopy(in_args)
-        self.mlops_metrics.stop_device_realtime_perf()
-        self.mlops_metrics.report_device_realtime_perf(self.args, self.args.agent_config["mqtt_config"])
-        pass
diff --git a/python/fedml/computing/scheduler/slave/united_agents.py b/python/fedml/computing/scheduler/slave/united_agents.py
index 17aee46f62..3c8549c06a 100755
--- a/python/fedml/computing/scheduler/slave/united_agents.py
+++ b/python/fedml/computing/scheduler/slave/united_agents.py
@@ -1,10 +1,8 @@
-import multiprocessing
-
+from fedml.computing.scheduler.model_scheduler.master_agent import FedMLDeployMasterAgent
+from fedml.computing.scheduler.model_scheduler.worker_agent import FedMLDeployWorkerAgent
 from fedml.computing.scheduler.scheduler_core.account_manager import FedMLAccountManager
 from fedml.computing.scheduler.slave.slave_agent import FedMLLaunchSlaveAgent
 from fedml.computing.scheduler.master.master_agent import FedMLLaunchMasterAgent
-from fedml.computing.scheduler.model_scheduler.model_device_server import FedMLDeployMasterAgent
-from fedml.computing.scheduler.model_scheduler.model_device_client import FedMLDeployWorkerAgent
 from fedml.core.common.singleton import Singleton
 
 
@@ -54,7 +52,7 @@ def login(self, userid, api_key=None, device_id=None,
 
         # Login with the deployment master role based on
         # the shared communication manager, sender message center, status center
-        deploy_master_agent.login(
+        deploy_master_login_result = deploy_master_agent.login(
             userid, api_key=api_key, device_id=login_result.device_id,
             os_name=os_name, role=FedMLAccountManager.ROLE_DEPLOY_MASTER_ON_PREM,
             communication_manager=shared_communication_mgr
@@ -62,11 +60,16 @@ def login(self, userid, api_key=None, device_id=None,
 
         # Login with the deployment slave role based on
         # the shared communication manager, sender message center, status center
-        deploy_slave_agent.login(
+        deploy_slave_login_result = deploy_slave_agent.login(
             userid, api_key=api_key, device_id=login_result.device_id,
             os_name=os_name, role=FedMLAccountManager.ROLE_DEPLOY_WORKER_ON_PREM,
             communication_manager=shared_communication_mgr
         )
 
+        # Set the deployment ids to launch agent so that we can report the related device info to MLOps.
+        launch_slave_agent.save_deploy_ids(
+            deploy_master_edge_id=deploy_master_login_result.edge_id,
+            deploy_slave_edge_id=deploy_slave_login_result.edge_id)
+
         # Start the slave agent to connect to servers and loop forever.
         launch_slave_agent.start()
diff --git a/python/fedml/core/mlops/mlops_device_perfs.py b/python/fedml/core/mlops/mlops_device_perfs.py
index d0f1f3898f..6ba4d6e866 100644
--- a/python/fedml/core/mlops/mlops_device_perfs.py
+++ b/python/fedml/core/mlops/mlops_device_perfs.py
@@ -9,6 +9,7 @@
 
 import multiprocessing
 import psutil
+import setproctitle
 
 import fedml
 from fedml.computing.scheduler.comm_utils import sys_utils
@@ -16,6 +17,7 @@
 from .mlops_utils import MLOpsUtils
 from .system_stats import SysStats
 from ...computing.scheduler.comm_utils.job_monitor import JobMonitor
+from ...computing.scheduler.scheduler_core.general_constants import GeneralConstants
 from ...core.distributed.communication.mqtt.mqtt_manager import MqttManager
 
 
@@ -30,6 +32,17 @@
 ROLE_ENDPOINT_REPLICA_NUM = 8
 ROLE_ENDPOINT_REPLICA_PERF = 9
 
+ROLE_DEVICE_JOB_TOTAL_MONITOR_STR = "device_job_total"
+ROLE_DEVICE_INFO_REPORTER_STR = "device_info"
+ROLE_ENDPOINT_MASTER_STR = "endpoint_master"
+ROLE_ENDPOINT_SLAVE_STR = "endpoint_slave"
+ROLE_RUN_MASTER_STR = "run_master"
+ROLE_RUN_SLAVE_STR = "run_slave"
+ROLE_ENDPOINT_LOGS_STR = "endpoint_logs"
+ROLE_AUTO_SCALER_STR = "autoscaler"
+ROLE_ENDPOINT_REPLICA_NUM_STR = "endpoint_replica_num"
+ROLE_ENDPOINT_REPLICA_PERF_STR = "endpoint_replica_perf"
+
 
 class MLOpsDevicePerfStats(object):
     def __init__(self):
@@ -81,100 +94,158 @@ def setup_realtime_stats_process(self, sys_args):
         if platform.system() == "Windows":
             self.device_realtime_stats_process = multiprocessing.Process(
                 target=perf_stats.report_device_realtime_stats_entry,
-                args=(self.device_realtime_stats_event, ROLE_DEVICE_INFO_REPORTER, self.is_client))
+                args=(self.device_realtime_stats_event, ROLE_DEVICE_INFO_REPORTER, self.is_client,
+                      GeneralConstants.get_monitor_process_name(
+                          ROLE_DEVICE_INFO_REPORTER_STR, perf_stats.run_id, perf_stats.edge_id)))
         else:
             self.device_realtime_stats_process = fedml.get_process(
                 target=perf_stats.report_device_realtime_stats_entry,
-                args=(self.device_realtime_stats_event, ROLE_DEVICE_INFO_REPORTER, self.is_client))
+                args=(self.device_realtime_stats_event, ROLE_DEVICE_INFO_REPORTER, self.is_client,
+                      GeneralConstants.get_monitor_process_name(
+                          ROLE_DEVICE_INFO_REPORTER_STR, perf_stats.run_id, perf_stats.edge_id)))
         self.device_realtime_stats_process.start()
 
         if self.enable_job_total_monitor:
             if platform.system() == "Windows":
                 self.job_total_monitor_process = multiprocessing.Process(
                     target=perf_stats.report_device_realtime_stats_entry,
-                    args=(self.device_realtime_stats_event, ROLE_DEVICE_JOB_TOTAL_MONITOR, self.is_client))
+                    args=(self.device_realtime_stats_event, ROLE_DEVICE_JOB_TOTAL_MONITOR, self.is_client,
+                          GeneralConstants.get_monitor_process_name(
+                              ROLE_DEVICE_JOB_TOTAL_MONITOR_STR, perf_stats.run_id, perf_stats.edge_id)))
             else:
                 self.job_total_monitor_process = fedml.get_process(
                     target=perf_stats.report_device_realtime_stats_entry,
-                    args=(self.device_realtime_stats_event, ROLE_DEVICE_JOB_TOTAL_MONITOR, self.is_client))
+                    args=(self.device_realtime_stats_event, ROLE_DEVICE_JOB_TOTAL_MONITOR, self.is_client,
+                          GeneralConstants.get_monitor_process_name(
+                              ROLE_DEVICE_JOB_TOTAL_MONITOR_STR, perf_stats.run_id, perf_stats.edge_id)))
             self.job_total_monitor_process.start()
         else:
             if self.is_client:
+                # Register endpoint master process
                 if platform.system() == "Windows":
                     self.monitor_endpoint_master_process = multiprocessing.Process(
                         target=perf_stats.report_device_realtime_stats_entry,
-                        args=(self.device_realtime_stats_event, ROLE_ENDPOINT_MASTER))
+                        args=(self.device_realtime_stats_event, ROLE_ENDPOINT_MASTER, True,
+                              GeneralConstants.get_monitor_process_name(
+                                  ROLE_ENDPOINT_MASTER_STR, perf_stats.run_id, perf_stats.edge_id)))
                 else:
                     self.monitor_endpoint_master_process = fedml.get_process(
                         target=perf_stats.report_device_realtime_stats_entry,
-                        args=(self.device_realtime_stats_event, ROLE_ENDPOINT_MASTER))
+                        args=(self.device_realtime_stats_event, ROLE_ENDPOINT_MASTER, True,
+                              GeneralConstants.get_monitor_process_name(
+                                  ROLE_ENDPOINT_MASTER_STR, perf_stats.run_id, perf_stats.edge_id)))
                 self.monitor_endpoint_master_process.start()
 
+                # Register endpoint slave process
+                if platform.system() == "Windows":
+                    self.monitor_endpoint_slave_process = multiprocessing.Process(
+                        target=perf_stats.report_device_realtime_stats_entry,
+                        args=(self.device_realtime_stats_event, ROLE_ENDPOINT_SLAVE, True,
+                              GeneralConstants.get_monitor_process_name(
+                                  ROLE_ENDPOINT_SLAVE_STR, perf_stats.run_id, perf_stats.edge_id)))
+                else:
+                    self.monitor_endpoint_slave_process = fedml.get_process(
+                        target=perf_stats.report_device_realtime_stats_entry,
+                        args=(self.device_realtime_stats_event, ROLE_ENDPOINT_SLAVE, True,
+                              GeneralConstants.get_monitor_process_name(
+                                  ROLE_ENDPOINT_SLAVE_STR, perf_stats.run_id, perf_stats.edge_id)))
+                self.monitor_endpoint_slave_process.start()
+
+                # Register run slave process
                 if platform.system() == "Windows":
                     self.monitor_run_slave_process = multiprocessing.Process(
                         target=perf_stats.report_device_realtime_stats_entry,
-                        args=(self.device_realtime_stats_event, ROLE_RUN_SLAVE))
+                        args=(self.device_realtime_stats_event, ROLE_RUN_SLAVE, True,
+                              GeneralConstants.get_monitor_process_name(
+                                  ROLE_RUN_SLAVE_STR, perf_stats.run_id, perf_stats.edge_id)))
                 else:
                     self.monitor_run_slave_process = fedml.get_process(
                         target=perf_stats.report_device_realtime_stats_entry,
-                        args=(self.device_realtime_stats_event, ROLE_RUN_SLAVE))
+                        args=(self.device_realtime_stats_event, ROLE_RUN_SLAVE, True,
+                              GeneralConstants.get_monitor_process_name(
+                                  ROLE_RUN_SLAVE_STR, perf_stats.run_id, perf_stats.edge_id)))
                 self.monitor_run_slave_process.start()
 
+                # Register endpoint logs process
                 if platform.system() == "Windows":
                     self.monitor_endpoint_logs_process = multiprocessing.Process(
                         target=perf_stats.report_device_realtime_stats_entry,
-                        args=(self.device_realtime_stats_event, ROLE_ENDPOINT_LOGS))
+                        args=(self.device_realtime_stats_event, ROLE_ENDPOINT_LOGS, True,
+                              GeneralConstants.get_monitor_process_name(
+                                  ROLE_ENDPOINT_LOGS_STR, perf_stats.run_id, perf_stats.edge_id)))
                 else:
                     self.monitor_endpoint_logs_process = fedml.get_process(
                         target=perf_stats.report_device_realtime_stats_entry,
-                        args=(self.device_realtime_stats_event, ROLE_ENDPOINT_LOGS))
+                        args=(self.device_realtime_stats_event, ROLE_ENDPOINT_LOGS, True,
+                              GeneralConstants.get_monitor_process_name(
+                                  ROLE_ENDPOINT_LOGS_STR, perf_stats.run_id, perf_stats.edge_id)))
                 self.monitor_endpoint_logs_process.start()
 
                 # Register auto-scaler process
                 if platform.system() == "Windows":
                     self.monitor_auto_scaler_process = multiprocessing.Process(
                         target=perf_stats.report_device_realtime_stats_entry,
-                        args=(self.device_realtime_stats_event, ROLE_AUTO_SCALER))
+                        args=(self.device_realtime_stats_event, ROLE_AUTO_SCALER, True,
+                              GeneralConstants.get_monitor_process_name(
+                                  ROLE_AUTO_SCALER_STR, perf_stats.run_id, perf_stats.edge_id)))
                 else:
                     self.monitor_auto_scaler_process = fedml.get_process(
                         target=perf_stats.report_device_realtime_stats_entry,
-                        args=(self.device_realtime_stats_event, ROLE_AUTO_SCALER))
+                        args=(self.device_realtime_stats_event, ROLE_AUTO_SCALER, True,
+                              GeneralConstants.get_monitor_process_name(
+                                  ROLE_AUTO_SCALER_STR, perf_stats.run_id, perf_stats.edge_id)))
                 self.monitor_auto_scaler_process.start()
 
                 # Register replica number report channel
                 if platform.system() == "Windows":
                     self.monitor_replica_num_process = multiprocessing.Process(
                         target=perf_stats.report_device_realtime_stats_entry,
-                        args=(self.device_realtime_stats_event, ROLE_ENDPOINT_REPLICA_NUM))
+                        args=(self.device_realtime_stats_event, ROLE_ENDPOINT_REPLICA_NUM, True,
+                              GeneralConstants.get_monitor_process_name(
+                                  ROLE_ENDPOINT_REPLICA_NUM_STR, perf_stats.run_id, perf_stats.edge_id)))
                 else:
                     self.monitor_replica_num_process = fedml.get_process(
                         target=perf_stats.report_device_realtime_stats_entry,
-                        args=(self.device_realtime_stats_event, ROLE_ENDPOINT_REPLICA_NUM))
+                        args=(self.device_realtime_stats_event, ROLE_ENDPOINT_REPLICA_NUM, True,
+                              GeneralConstants.get_monitor_process_name(
+                                  ROLE_ENDPOINT_REPLICA_NUM_STR, perf_stats.run_id, perf_stats.edge_id)))
                 self.monitor_replica_num_process.start()
 
                 # Register replica performance report channel
                 if platform.system() == "Windows":
                     self.monitor_replica_perf_process = multiprocessing.Process(
                         target=perf_stats.report_device_realtime_stats_entry,
-                        args=(self.device_realtime_stats_event, ROLE_ENDPOINT_REPLICA_PERF))
+                        args=(self.device_realtime_stats_event, ROLE_ENDPOINT_REPLICA_PERF, True,
+                              GeneralConstants.get_monitor_process_name(
+                                  ROLE_ENDPOINT_REPLICA_PERF_STR, perf_stats.run_id, perf_stats.edge_id)))
+
                 else:
                     self.monitor_replica_perf_process = fedml.get_process(
                         target=perf_stats.report_device_realtime_stats_entry,
-                        args=(self.device_realtime_stats_event, ROLE_ENDPOINT_REPLICA_PERF))
+                        args=(self.device_realtime_stats_event, ROLE_ENDPOINT_REPLICA_PERF, True,
+                              GeneralConstants.get_monitor_process_name(
+                                  ROLE_ENDPOINT_REPLICA_PERF_STR, perf_stats.run_id, perf_stats.edge_id)))
                 self.monitor_replica_perf_process.start()
             else:
                 if platform.system() == "Windows":
                     self.monitor_run_master_process = multiprocessing.Process(
                         target=perf_stats.report_device_realtime_stats_entry,
-                        args=(self.device_realtime_stats_event, ROLE_RUN_MASTER))
+                        args=(self.device_realtime_stats_event, ROLE_RUN_MASTER, False,
+                              GeneralConstants.get_monitor_process_name(
+                                  ROLE_RUN_MASTER_STR, perf_stats.run_id, perf_stats.edge_id)))
                 else:
                     self.monitor_run_master_process = fedml.get_process(
                         target=perf_stats.report_device_realtime_stats_entry,
-                        args=(self.device_realtime_stats_event, ROLE_RUN_MASTER))
+                        args=(self.device_realtime_stats_event, ROLE_RUN_MASTER, False,
+                              GeneralConstants.get_monitor_process_name(
+                                  ROLE_RUN_MASTER_STR, perf_stats.run_id, perf_stats.edge_id)))
                 self.monitor_run_master_process.start()
 
-    def report_device_realtime_stats_entry(self, sys_event, role, is_client=False):
-        # print(f"Report device realtime stats, process id {os.getpid()}")
+    def report_device_realtime_stats_entry(self, sys_event, role, is_client=False, process_name=None):
+        if process_name is not None:
+            setproctitle.setproctitle(process_name)
+
+        # print(f"Report device realtime stats, process id {os.getpid()}, name {process_name}")
 
         self.device_realtime_stats_event = sys_event
         mqtt_mgr = MqttManager(
diff --git a/python/fedml/core/mlops/mlops_job_perfs.py b/python/fedml/core/mlops/mlops_job_perfs.py
index e834ed4a0c..429e32ff1d 100644
--- a/python/fedml/core/mlops/mlops_job_perfs.py
+++ b/python/fedml/core/mlops/mlops_job_perfs.py
@@ -8,14 +8,18 @@
 
 import multiprocess as multiprocessing
 import psutil
+import setproctitle
 
 import fedml
 from .mlops_utils import MLOpsUtils
 from .system_stats import SysStats
+from ...computing.scheduler.scheduler_core.general_constants import GeneralConstants
 from ...core.distributed.communication.mqtt.mqtt_manager import MqttManager
 
 
 class MLOpsJobPerfStats(object):
+    JOB_PERF_PROCESS_TAG = "job_perf"
+
     def __init__(self):
         self.job_stats_process = None
         self.job_stats_event = None
@@ -142,17 +146,24 @@ def setup_job_stats_process(self, sys_args):
         perf_stats.job_process_id_map = self.job_process_id_map
         if platform.system() == "Windows":
             self.job_stats_process = multiprocessing.Process(
-                target=perf_stats.report_job_stats_entry, args=(self.job_stats_event,))
+                target=perf_stats.report_job_stats_entry,
+                args=(self.job_stats_event, GeneralConstants.get_monitor_process_name(
+                    MLOpsJobPerfStats.JOB_PERF_PROCESS_TAG, perf_stats.run_id, perf_stats.edge_id)))
         else:
             self.job_stats_process = fedml.get_process(
-                target=perf_stats.report_job_stats_entry, args=(self.job_stats_event,))
+                target=perf_stats.report_job_stats_entry,
+                args=(self.job_stats_event, GeneralConstants.get_monitor_process_name(
+                    MLOpsJobPerfStats.JOB_PERF_PROCESS_TAG, perf_stats.run_id, perf_stats.edge_id)))
         self.job_stats_process.start()
 
     def report_job_stats(self, sys_args):
         self.setup_job_stats_process(sys_args)
 
-    def report_job_stats_entry(self, sys_event):
-        # print(f"Report job realtime stats, process id {os.getpid()}")
+    def report_job_stats_entry(self, sys_event, process_name):
+        if process_name is not None:
+            setproctitle.setproctitle(process_name)
+
+        # print(f"Report job realtime stats, process id {os.getpid()}, name {process_name}")
 
         self.job_stats_event = sys_event
         mqtt_mgr = MqttManager(

From aa813a074aadebdbfd1f2b1ebd542334d095b5a4 Mon Sep 17 00:00:00 2001
From: Alex <alex.gpt.llm@gmail.com>
Date: Fri, 21 Jun 2024 17:32:26 +0800
Subject: [PATCH 184/282] [CoreEngine] remove the API key.

---
 python/fedml/api/api_test.py | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/python/fedml/api/api_test.py b/python/fedml/api/api_test.py
index 1aa5ac3767..5951fbd1e0 100755
--- a/python/fedml/api/api_test.py
+++ b/python/fedml/api/api_test.py
@@ -4,9 +4,9 @@
 import fedml
 
 # Login
-fedml.set_env_version("local")
+fedml.set_env_version("test")
 fedml.set_local_on_premise_platform_port(18080)
-error_code, error_msg = fedml.api.fedml_login(api_key="1316b93c82da40ce90113a2ed12f0b14")
+error_code, error_msg = fedml.api.fedml_login(api_key="")
 if error_code != 0:
     print("API Key is invalid!")
     exit(1)

From 33fb5b45fc674d18d74e7f435d41e69ebfde703d Mon Sep 17 00:00:00 2001
From: Raphael Jin <kimheavy@yahoo.com>
Date: Fri, 21 Jun 2024 14:21:16 -0700
Subject: [PATCH 185/282] [Deploy] Pass down the api key to container.

---
 .../device_client_constants.py                |  2 +
 .../model_scheduler/device_model_cache.py     | 15 +++++-
 .../device_model_deployment.py                | 47 ++++++++++++-------
 .../device_server_constants.py                |  2 +
 .../master_protocol_manager.py                | 25 +++++-----
 .../model_scheduler/worker_job_runner.py      |  4 +-
 6 files changed, 62 insertions(+), 33 deletions(-)

diff --git a/python/fedml/computing/scheduler/model_scheduler/device_client_constants.py b/python/fedml/computing/scheduler/model_scheduler/device_client_constants.py
index e18c9f730b..4aee592fca 100644
--- a/python/fedml/computing/scheduler/model_scheduler/device_client_constants.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_client_constants.py
@@ -165,6 +165,8 @@ class ClientConstants(object):
     CUSTOMIZED_VOLUMES_PATH_FROM_WORKSPACE_KEY = "workspace_path"
     CUSTOMIZED_VOLUMES_PATH_FROM_CONTAINER_KEY = "mount_path"
 
+    ENV_USER_ENCRYPTED_API_KEY = "FEDML_USER_ENCRYPTED_API_KEY"
+
     @staticmethod
     def get_fedml_home_dir():
         home_dir = expanduser("~")
diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_cache.py b/python/fedml/computing/scheduler/model_scheduler/device_model_cache.py
index c941c42102..b0021aa7df 100755
--- a/python/fedml/computing/scheduler/model_scheduler/device_model_cache.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_model_cache.py
@@ -112,7 +112,8 @@ def set_user_setting_replica_num(self, end_point_id,
                                      replica_num: int, enable_auto_scaling: bool = False,
                                      scale_min: int = 0, scale_max: int = 0, state: str = "UNKNOWN",
                                      target_queries_per_replica: int = 60, aggregation_window_size_seconds: int = 60,
-                                     scale_down_delay_seconds: int = 120, timeout_s: int = 30
+                                     scale_down_delay_seconds: int = 120, timeout_s: int = 30,
+                                     user_encrypted_api_key: str = ""
                                      ) -> bool:
         """
         Key: FEDML_MODEL_ENDPOINT_REPLICA_USER_SETTING_TAG--<end_point_id>
@@ -139,7 +140,8 @@ def set_user_setting_replica_num(self, end_point_id,
             "target_queries_per_replica": target_queries_per_replica,
             "aggregation_window_size_seconds": aggregation_window_size_seconds,
             "scale_down_delay_seconds": scale_down_delay_seconds,
-            ServerConstants.INFERENCE_REQUEST_TIMEOUT_KEY: timeout_s
+            ServerConstants.INFERENCE_REQUEST_TIMEOUT_KEY: timeout_s,
+            ServerConstants.USER_ENCRYPTED_API_KEY: user_encrypted_api_key
         }
         try:
             self.redis_connection.set(self.get_user_setting_replica_num_key(end_point_id), json.dumps(replica_num_dict))
@@ -169,6 +171,15 @@ def update_user_setting_replica_num(self, end_point_id: str, state: str = "UNKNO
             return False
         return True
 
+    def get_user_encrypted_api_key(self, end_point_id: str) -> str:
+        try:
+            replica_num_dict = self.redis_connection.get(self.get_user_setting_replica_num_key(end_point_id))
+            replica_num_dict = json.loads(replica_num_dict)
+            return replica_num_dict.get(ServerConstants.USER_ENCRYPTED_API_KEY, "")
+        except Exception as e:
+            logging.error(e)
+            return ""
+
     def get_all_endpoints_user_setting(self) -> List[dict]:
         """
         Return a list of dict, each dict is the user setting of an endpoint.
diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py b/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py
index 552d7ffaca..9416d243d2 100755
--- a/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py
@@ -19,6 +19,7 @@
 from fedml.computing.scheduler.comm_utils.job_utils import JobRunnerUtils
 from fedml.computing.scheduler.comm_utils.constants import SchedulerConstants
 from fedml.computing.scheduler.model_scheduler.device_client_constants import ClientConstants
+from fedml.computing.scheduler.model_scheduler.device_server_constants import ServerConstants
 from fedml.computing.scheduler.model_scheduler.device_model_cache import FedMLModelCache
 from ..scheduler_core.compute_utils import ComputeUtils
 from ..comm_utils.container_utils import ContainerUtils
@@ -59,7 +60,9 @@ def request_gpu_ids_on_deployment(edge_id, end_point_id, num_gpus=None, master_d
 def start_deployment(end_point_id, end_point_name, model_id, model_version,
                      model_storage_local_path, inference_model_name, inference_engine,
                      infer_host, master_ip, edge_id, master_device_id=None, replica_rank=0,
-                     gpu_per_replica=1):
+                     gpu_per_replica=1, request_json=None):
+    if request_json is None:
+        request_json = dict()
     logging.info("[Worker] Model deployment is starting...")
 
     # Real gpu per replica (container-level)
@@ -219,22 +222,9 @@ def start_deployment(end_point_id, end_point_name, model_id, model_version,
     if device_mapping:
         host_config_dict.update(device_mapping)
 
-    # Environment variables
-    enable_custom_image = False if relative_entry_fedml_format != "" else True
-    if not enable_custom_image:
-        # For some image, the default user is root. Unified to fedml.
-        environment["HOME"] = "/home/fedml"
-    environment["BOOTSTRAP_DIR"] = dst_bootstrap_dir
-    environment["FEDML_CURRENT_RUN_ID"] = end_point_id
-    environment["FEDML_CURRENT_EDGE_ID"] = edge_id
-    environment["FEDML_REPLICA_RANK"] = replica_rank
-    environment["FEDML_CURRENT_VERSION"] = fedml.get_env_version()
-    environment["FEDML_ENV_VERSION"] = fedml.get_env_version()
-    environment["FEDML_ENV_LOCAL_ON_PREMISE_PLATFORM_HOST"] = fedml.get_local_on_premise_platform_host()
-    environment["FEDML_ENV_LOCAL_ON_PREMISE_PLATFORM_PORT"] = fedml.get_local_on_premise_platform_port()
-    if extra_envs is not None:
-        for key in extra_envs:
-            environment[key] = extra_envs[key]
+    # Handle the environment variables
+    handle_env_vars(environment, relative_entry_fedml_format, extra_envs, dst_bootstrap_dir,
+                    end_point_id, edge_id, replica_rank, request_json)
 
     # Create the container
     try:
@@ -612,6 +602,29 @@ def handle_volume_mount(volumes, binds, environment, relative_entry_fedml_format
             logging.warning(f"{workspace_path} does not exist, skip mounting it to the container")
 
 
+def handle_env_vars(environment, relative_entry_fedml_format, extra_envs, dst_bootstrap_dir, end_point_id, edge_id,
+                    replica_rank, request_json):
+    enable_custom_image = False if relative_entry_fedml_format != "" else True
+    if not enable_custom_image:
+        # For some image, the default user is root. Unified to fedml.
+        environment["HOME"] = "/home/fedml"
+
+    if request_json and ServerConstants.USER_ENCRYPTED_API_KEY in request_json:
+        environment[ClientConstants.ENV_USER_ENCRYPTED_API_KEY] = request_json[ServerConstants.USER_ENCRYPTED_API_KEY]
+
+    environment["BOOTSTRAP_DIR"] = dst_bootstrap_dir
+    environment["FEDML_CURRENT_RUN_ID"] = end_point_id
+    environment["FEDML_CURRENT_EDGE_ID"] = edge_id
+    environment["FEDML_REPLICA_RANK"] = replica_rank
+    environment["FEDML_CURRENT_VERSION"] = fedml.get_env_version()
+    environment["FEDML_ENV_VERSION"] = fedml.get_env_version()
+    environment["FEDML_ENV_LOCAL_ON_PREMISE_PLATFORM_HOST"] = fedml.get_local_on_premise_platform_host()
+    environment["FEDML_ENV_LOCAL_ON_PREMISE_PLATFORM_PORT"] = fedml.get_local_on_premise_platform_port()
+    if extra_envs is not None:
+        for key in extra_envs:
+            environment[key] = extra_envs[key]
+
+
 def check_container_readiness(inference_http_port, infer_host="127.0.0.1", request_input_example=None,
                               readiness_check=ClientConstants.READINESS_PROBE_DEFAULT):
     response_from_client_container = is_client_inference_container_ready(
diff --git a/python/fedml/computing/scheduler/model_scheduler/device_server_constants.py b/python/fedml/computing/scheduler/model_scheduler/device_server_constants.py
index f86056229e..c41b150bc2 100644
--- a/python/fedml/computing/scheduler/model_scheduler/device_server_constants.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_server_constants.py
@@ -108,6 +108,8 @@ class ServerConstants(object):
 
     INFERENCE_REQUEST_TIMEOUT_KEY = "request_timeout_sec"
     INFERENCE_REQUEST_TIMEOUT_DEFAULT = 30
+
+    USER_ENCRYPTED_API_KEY = "user_encrypted_api_key"
     # -----End-----
 
     MODEL_DEPLOYMENT_STAGE1 = {"index": 1, "text": "ReceivedRequest"}
diff --git a/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py b/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py
index 7bfad2f3eb..5e16d5a02a 100755
--- a/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py
+++ b/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py
@@ -158,25 +158,20 @@ def callback_start_deployment(self, topic, payload):
         run_id = request_json["end_point_id"]
         end_point_name = request_json["end_point_name"]
         token = request_json["token"]
-        user_id = request_json["user_id"]
-        user_name = request_json["user_name"]
-        device_ids = request_json["device_ids"]
         device_objs = request_json["device_objs"]
+        enable_auto_scaling = request_json.get("enable_auto_scaling", False)
+        desired_replica_num = request_json.get("desired_replica_num", 1)
+        target_queries_per_replica = request_json.get("target_queries_per_replica", 10)
+        aggregation_window_size_seconds = request_json.get("aggregation_window_size_seconds", 60)
+        scale_down_delay_seconds = request_json.get("scale_down_delay_seconds", 120)
+        user_encrypted_api_key = request_json.get("encrypted_api_key", "")
 
         model_config = request_json["model_config"]
         model_name = model_config["model_name"]
         model_version = model_config["model_version"]
         model_id = model_config["model_id"]
-        model_storage_url = model_config["model_storage_url"]
         scale_min = model_config.get("instance_scale_min", 0)
         scale_max = model_config.get("instance_scale_max", 0)
-        inference_engine = model_config.get("inference_engine", 0)
-        enable_auto_scaling = request_json.get("enable_auto_scaling", False)
-        desired_replica_num = request_json.get("desired_replica_num", 1)
-
-        target_queries_per_replica = request_json.get("target_queries_per_replica", 10)
-        aggregation_window_size_seconds = request_json.get("aggregation_window_size_seconds", 60)
-        scale_down_delay_seconds = request_json.get("scale_down_delay_seconds", 120)
 
         model_config_parameters = request_json.get("parameters", {})
         timeout_s = model_config_parameters.get("request_timeout_sec", 30)
@@ -193,6 +188,12 @@ def callback_start_deployment(self, topic, payload):
             request_json["end_point_id"])
         request_json["is_fresh_endpoint"] = True if endpoint_device_info is None else False
 
+        if user_encrypted_api_key == "":
+            user_encrypted_api_key = (FedMLModelCache.get_instance(self.redis_addr, self.redis_port).
+                                      get_user_encrypted_api_key(run_id))
+            if user_encrypted_api_key != "":    # Pass the cached key to the workers
+                request_json[ServerConstants.USER_ENCRYPTED_API_KEY] = user_encrypted_api_key
+
         # Save the user setting (about replica number) of this run to Redis, if existed, update it
         FedMLModelCache.get_instance(self.redis_addr, self.redis_port).set_user_setting_replica_num(
             end_point_id=run_id, end_point_name=end_point_name, model_name=model_name, model_version=model_version,
@@ -201,7 +202,7 @@ def callback_start_deployment(self, topic, payload):
             aggregation_window_size_seconds=aggregation_window_size_seconds,
             target_queries_per_replica=target_queries_per_replica,
             scale_down_delay_seconds=int(scale_down_delay_seconds),
-            timeout_s=timeout_s
+            timeout_s=timeout_s, user_encrypted_api_key=user_encrypted_api_key
         )
 
         # Start log processor for current run
diff --git a/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py b/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py
index a892412d29..113a20e825 100755
--- a/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py
+++ b/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py
@@ -250,7 +250,7 @@ def run_impl(self, run_extend_queue_list, sender_message_center,
                             inference_model_name=model_name, inference_engine=inference_engine,
                             infer_host=worker_ip, master_ip=master_ip, edge_id=self.edge_id,
                             master_device_id=device_ids[0], replica_rank=rank,
-                            gpu_per_replica=int(self.replica_handler.gpu_per_replica)
+                            gpu_per_replica=int(self.replica_handler.gpu_per_replica), request_json=self.request_json
                         )
                 except Exception as e:
                     inference_output_url = ""
@@ -373,7 +373,7 @@ def run_impl(self, run_extend_queue_list, sender_message_center,
                             inference_model_name=model_name, inference_engine=inference_engine,
                             infer_host=worker_ip, master_ip=master_ip, edge_id=self.edge_id,
                             master_device_id=device_ids[0], replica_rank=rank,
-                            gpu_per_replica=int(self.replica_handler.gpu_per_replica)
+                            gpu_per_replica=int(self.replica_handler.gpu_per_replica), request_json=self.request_json
                         )
                 except Exception as e:
                     inference_output_url = ""

From f412a2637b6ae83f9fc1ecaa60b5205d4d43507d Mon Sep 17 00:00:00 2001
From: Raphael Jin <kimheavy@yahoo.com>
Date: Fri, 21 Jun 2024 21:36:12 +0000
Subject: [PATCH 186/282] [Deploy] Nit.

---
 .../scheduler/model_scheduler/device_server_constants.py        | 2 +-
 .../scheduler/model_scheduler/master_protocol_manager.py        | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/python/fedml/computing/scheduler/model_scheduler/device_server_constants.py b/python/fedml/computing/scheduler/model_scheduler/device_server_constants.py
index c41b150bc2..00f0fe73bf 100644
--- a/python/fedml/computing/scheduler/model_scheduler/device_server_constants.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_server_constants.py
@@ -109,7 +109,7 @@ class ServerConstants(object):
     INFERENCE_REQUEST_TIMEOUT_KEY = "request_timeout_sec"
     INFERENCE_REQUEST_TIMEOUT_DEFAULT = 30
 
-    USER_ENCRYPTED_API_KEY = "user_encrypted_api_key"
+    USER_ENCRYPTED_API_KEY = "encrypted_api_key"
     # -----End-----
 
     MODEL_DEPLOYMENT_STAGE1 = {"index": 1, "text": "ReceivedRequest"}
diff --git a/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py b/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py
index 5e16d5a02a..9e0d51b588 100755
--- a/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py
+++ b/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py
@@ -164,7 +164,7 @@ def callback_start_deployment(self, topic, payload):
         target_queries_per_replica = request_json.get("target_queries_per_replica", 10)
         aggregation_window_size_seconds = request_json.get("aggregation_window_size_seconds", 60)
         scale_down_delay_seconds = request_json.get("scale_down_delay_seconds", 120)
-        user_encrypted_api_key = request_json.get("encrypted_api_key", "")
+        user_encrypted_api_key = request_json.get(ServerConstants.USER_ENCRYPTED_API_KEY, "")
 
         model_config = request_json["model_config"]
         model_name = model_config["model_name"]

From d6c9411774318e812e7f0b4dd73478f2a88e4cb3 Mon Sep 17 00:00:00 2001
From: Raphael Jin <kimheavy@yahoo.com>
Date: Fri, 21 Jun 2024 15:00:33 -0700
Subject: [PATCH 187/282] [Deploy] Remove example.

---
 .../lorax/custom_inference_image.yaml            | 16 ----------------
 1 file changed, 16 deletions(-)
 delete mode 100644 python/examples/deploy/custom_inference_image/lorax/custom_inference_image.yaml

diff --git a/python/examples/deploy/custom_inference_image/lorax/custom_inference_image.yaml b/python/examples/deploy/custom_inference_image/lorax/custom_inference_image.yaml
deleted file mode 100644
index 41cbe501d2..0000000000
--- a/python/examples/deploy/custom_inference_image/lorax/custom_inference_image.yaml
+++ /dev/null
@@ -1,16 +0,0 @@
-workspace: "./"
-
-enable_serverless_container: true
-inference_image_name: "ghcr.io/predibase/lorax:main"
-container_run_command: "--model-id mistralai/Mistral-7B-Instruct-v0.1"
-
-environment_variables:
-  HUGGING_FACE_HUB_TOKEN: ""
-
-readiness_probe:
-  httpGet:
-    path: "/health"
-
-port: 80
-
-deploy_timeout_sec: 1600

From 5ae6904960987baff3e57a9e55bf369d9918f7e0 Mon Sep 17 00:00:00 2001
From: Alex <alex.gpt.llm@gmail.com>
Date: Tue, 25 Jun 2024 18:21:06 +0800
Subject: [PATCH 188/282] [CoreEngine] make the job stopping feature work.

---
 .../scheduler/master/base_master_agent.py          |  3 ++-
 .../scheduler/master/base_master_job_runner.py     | 14 +++++---------
 .../master/base_master_job_runner_manager.py       |  6 ++----
 .../master/base_master_protocol_manager.py         | 12 ++++++++++++
 .../scheduler_core/scheduler_base_job_runner.py    |  3 ++-
 .../scheduler_base_job_runner_manager.py           |  1 +
 6 files changed, 24 insertions(+), 15 deletions(-)

diff --git a/python/fedml/computing/scheduler/master/base_master_agent.py b/python/fedml/computing/scheduler/master/base_master_agent.py
index e7d18f64f7..30cf5da1c9 100755
--- a/python/fedml/computing/scheduler/master/base_master_agent.py
+++ b/python/fedml/computing/scheduler/master/base_master_agent.py
@@ -72,7 +72,8 @@ def logout():
         sys_utils.cleanup_all_fedml_server_api_processes()
 
     def stop(self, kill_process=False):
-        self.protocol_mgr.stop(kill_process=kill_process)
+        if self.protocol_mgr is not None:
+            self.protocol_mgr.stop(kill_process=kill_process)
 
     def _create_protocol_manager(self, role, login_result):
         if self.protocol_mgr is not None:
diff --git a/python/fedml/computing/scheduler/master/base_master_job_runner.py b/python/fedml/computing/scheduler/master/base_master_job_runner.py
index 32b285dc7b..fdfff143aa 100755
--- a/python/fedml/computing/scheduler/master/base_master_job_runner.py
+++ b/python/fedml/computing/scheduler/master/base_master_job_runner.py
@@ -1,4 +1,3 @@
-
 import json
 import logging
 import multiprocessing
@@ -414,9 +413,9 @@ def _generate_job_runner_instance(self, args, run_id=None, request_json=None, ag
         return None
 
     def start_runner_process(
-        self, run_id, request_json, edge_id=None, is_server_job=False,
-        sender_message_queue=None, listener_message_queue=None,
-        status_center_queue=None, process_name=None
+            self, run_id, request_json, edge_id=None, is_server_job=False,
+            sender_message_queue=None, listener_message_queue=None,
+            status_center_queue=None, process_name=None
     ):
         server_runner = self._generate_job_runner_instance(
             self.args, run_id=run_id, request_json=request_json,
@@ -440,7 +439,7 @@ def start_runner_process(
                     self.run_process_event, self.run_process_completed_event, self.run_edge_id_status_queue,
                     self.run_edge_device_info_queue, self.run_metrics_queue, self.run_events_queue,
                     self.run_artifacts_queue, self.run_logs_queue, self.run_edge_device_info_global_queue,
-                    self.run_extend_queue_list, sender_message_queue, listener_message_queue,  status_center_queue,
+                    self.run_extend_queue_list, sender_message_queue, listener_message_queue, status_center_queue,
                     process_name,
                 )
             )
@@ -450,7 +449,7 @@ def start_runner_process(
                     self.run_process_event, self.run_process_completed_event, self.run_edge_id_status_queue,
                     self.run_edge_device_info_queue, self.run_metrics_queue, self.run_events_queue,
                     self.run_artifacts_queue, self.run_logs_queue, self.run_edge_device_info_global_queue,
-                    self.run_extend_queue_list, sender_message_queue, listener_message_queue,  status_center_queue,
+                    self.run_extend_queue_list, sender_message_queue, listener_message_queue, status_center_queue,
                     process_name,
                 )
             )
@@ -731,6 +730,3 @@ def should_process_async_cluster(self):
 
     def get_client_id_list(self, server_edge_id_list):
         return server_edge_id_list
-
-
-
diff --git a/python/fedml/computing/scheduler/master/base_master_job_runner_manager.py b/python/fedml/computing/scheduler/master/base_master_job_runner_manager.py
index f462596cbf..39f7438696 100755
--- a/python/fedml/computing/scheduler/master/base_master_job_runner_manager.py
+++ b/python/fedml/computing/scheduler/master/base_master_job_runner_manager.py
@@ -2,7 +2,6 @@
 import json
 import logging
 import multiprocessing
-import os
 import platform
 import time
 from abc import ABC
@@ -11,7 +10,6 @@
 import fedml
 from .cloud_server_manager import FedMLCloudServerManager
 from ..comm_utils.run_process_utils import RunProcessUtils
-from ..scheduler_core.general_constants import GeneralConstants
 from ..scheduler_core.scheduler_base_job_runner_manager import FedMLSchedulerBaseJobRunnerManager
 from ..scheduler_core.account_manager import FedMLAccountManager
 
@@ -67,10 +65,10 @@ def stop_job_runner(
 
             run_id_str = str(run_id)
             if self.master_agent_instance_map.get(run_id_str, None) is not None:
-                self.master_agent_instance_map.get(run_id_str).stop()
+                self.master_agent_instance_map.get(run_id_str).stop(kill_process=True)
                 self.master_agent_instance_map.pop(run_id_str)
 
-            if run_as_cloud_server:
+            if use_local_process_as_cloud_server:
                 time.sleep(1)
                 RunProcessUtils.kill_process(self.cloud_run_process_map[run_id_str].pid)
 
diff --git a/python/fedml/computing/scheduler/master/base_master_protocol_manager.py b/python/fedml/computing/scheduler/master/base_master_protocol_manager.py
index 9d3b492758..05529f8c8e 100755
--- a/python/fedml/computing/scheduler/master/base_master_protocol_manager.py
+++ b/python/fedml/computing/scheduler/master/base_master_protocol_manager.py
@@ -2,6 +2,7 @@
 import base64
 import json
 import logging
+import time
 
 import fedml
 from ..comm_utils.constants import SchedulerConstants
@@ -142,6 +143,7 @@ def on_agent_communication_connected(self, mqtt_client_object):
     def callback_start_train(self, topic=None, payload=None):
         # Fetch config from MLOps
         # noinspection PyBroadException
+
         try:
             MLOpsConfigs.fetch_all_configs()
         except Exception:
@@ -290,6 +292,16 @@ def callback_stop_train(self, topic, payload, use_payload=None):
             server_agent_id = self.edge_id
             topic_stop_train_to_cloud_server = f"mlops/flserver_agent_{server_id}/stop_train"
             self.message_center.send_message(topic_stop_train_to_cloud_server, payload)
+
+            time.sleep(2)
+            MLOpsRuntimeLogDaemon.get_instance(self.args).stop_log_processor(run_id, server_id)
+            self._get_job_runner_manager().stop_job_runner(
+                run_id, args=self.args, server_id=server_id, request_json=None,
+                run_as_cloud_agent=self.run_as_cloud_agent, run_as_cloud_server=self.run_as_cloud_server,
+                use_local_process_as_cloud_server=self.use_local_process_as_cloud_server)
+            self.generate_status_report(run_id, server_id, server_agent_id=server_agent_id). \
+                report_server_id_status(run_id, GeneralConstants.MSG_MLOPS_SERVER_STATUS_KILLED,
+                                        edge_id=server_id, server_id=server_id)
             return
 
         # Reset all edge status and server status
diff --git a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py
index 7b0d00f53d..7175032375 100755
--- a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py
+++ b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py
@@ -615,7 +615,8 @@ def job_error_processor(self, error_list):
 
     def start_runner_process(
             self, run_id, edge_id, request_json,  cuda_visible_gpu_ids_str=None,
-            sender_message_queue=None, status_center_queue=None, process_name=None
+            sender_message_queue=None, listener_message_queue=None,
+            status_center_queue=None, process_name=None
     ):
         return None
 
diff --git a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner_manager.py b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner_manager.py
index 8edf57fcbb..ad32f78631 100755
--- a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner_manager.py
+++ b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner_manager.py
@@ -29,6 +29,7 @@ def start_job_runner(
         )
         self.job_runners[run_id_str].start_runner_process(
             run_id, request_json, edge_id=edge_id,
+            cuda_visible_gpu_ids_str=cuda_visible_gpu_ids_str,
             sender_message_queue=sender_message_queue,
             listener_message_queue=listener_message_queue,
             status_center_queue=status_center_queue,

From fa44ccce0a553f7c7d7dcceb5312b830061e718f Mon Sep 17 00:00:00 2001
From: Raphael Jin <kimheavy@yahoo.com>
Date: Tue, 25 Jun 2024 11:48:17 -0700
Subject: [PATCH 189/282] [Deploy] Return custom path other than /predict.

---
 .../device_client_constants.py                |   1 +
 .../device_model_deployment.py                | 108 +++++++++++-------
 2 files changed, 67 insertions(+), 42 deletions(-)

diff --git a/python/fedml/computing/scheduler/model_scheduler/device_client_constants.py b/python/fedml/computing/scheduler/model_scheduler/device_client_constants.py
index 4aee592fca..4006e50726 100644
--- a/python/fedml/computing/scheduler/model_scheduler/device_client_constants.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_client_constants.py
@@ -164,6 +164,7 @@ class ClientConstants(object):
     CUSTOMIZED_VOLUMES_MOUNT_KEY = "volumes"
     CUSTOMIZED_VOLUMES_PATH_FROM_WORKSPACE_KEY = "workspace_path"
     CUSTOMIZED_VOLUMES_PATH_FROM_CONTAINER_KEY = "mount_path"
+    CUSTOMIZED_SERVICE_KEY = "service"
 
     ENV_USER_ENCRYPTED_API_KEY = "FEDML_USER_ENCRYPTED_API_KEY"
 
diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py b/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py
index 9416d243d2..25fc1e1d64 100755
--- a/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py
@@ -87,36 +87,10 @@ def start_deployment(end_point_id, end_point_name, model_id, model_version,
         inference_image_name, image_pull_policy, registry_name, registry_provider, \
             registry_user_name, registry_user_password = parse_image_registry_related_config(config)
 
-        # Bootstrap, job and entrypoint related
-        dst_model_serving_dir = "/home/fedml/models_serving"
-        bootstrap_cmds_str_frm_yaml = config.get('bootstrap', "")
-        job_cmds_str_frm_yaml = config.get('job', "")
-
-        if bootstrap_cmds_str_frm_yaml != "" or job_cmds_str_frm_yaml != "":
-            auto_gen_bootstrap_file_name = "fedml-deploy-bootstrap-entry-auto-gen.sh"
-            src_bootstrap_file_path = os.path.join(model_storage_local_path, auto_gen_bootstrap_file_name)
-            with open(src_bootstrap_file_path, 'w') as f:
-                f.write("cd /home/fedml/models_serving/\n")
-                f.write(bootstrap_cmds_str_frm_yaml)
-                f.write("\n")
-                f.write("cd /home/fedml/models_serving/\n")
-                f.write(job_cmds_str_frm_yaml)
-        else:
-            src_bootstrap_file_path = ""
-
-        if src_bootstrap_file_path != "":
-            dst_bootstrap_dir = os.path.join(dst_model_serving_dir, auto_gen_bootstrap_file_name)
-        else:
-            dst_bootstrap_dir = ""
-
-        # If the entry point is in fedml format (e.g., "main.py")
-        relative_entry_fedml_format = config.get('entry_point', "")
-
-        # User indicate either fedml format python main entry filename or entry command
-        enable_serverless_container = config.get(ClientConstants.ENABLE_SERVERLESS_CONTAINER_KEY, False)
-        customized_image_entry_cmd = config.get('container_run_command', None)  # Could be str or list
-        customized_readiness_check = config.get('readiness_probe', ClientConstants.READINESS_PROBE_DEFAULT)
-        customized_liveliness_check = config.get('liveness_probe', ClientConstants.LIVENESS_PROBE_DEFAULT)
+        # Service app related
+        dst_bootstrap_dir, dst_model_serving_dir, relative_entry_fedml_format, enable_serverless_container, \
+            customized_image_entry_cmd, customized_readiness_check, customized_liveliness_check, customized_uri = \
+            handle_container_service_app(config, model_storage_local_path)
 
         # Storage related
         src_code_dir = os.path.join(model_storage_local_path, config.get('source_code_dir', ""))
@@ -451,7 +425,7 @@ def parse_image_registry_related_config(config):
 
 def is_client_inference_container_ready(infer_url_host, inference_http_port,
                                         readiness_check=ClientConstants.READINESS_PROBE_DEFAULT,
-                                        request_input_example=None, container_id=None):
+                                        request_input_example=None, container_id=None, customized_uri=None):
     # Construct the model metadata (input and output)
     model_metadata = {}
     if request_input_example is not None and len(request_input_example) > 0:
@@ -461,6 +435,7 @@ def is_client_inference_container_ready(infer_url_host, inference_http_port,
     model_metadata["outputs"] = []
     model_metadata["type"] = "default"
 
+    # Check the readiness of the container
     if readiness_check == ClientConstants.READINESS_PROBE_DEFAULT:
         default_client_container_ready_url = "http://{}:{}/ready".format("0.0.0.0", inference_http_port)
         response = None
@@ -486,27 +461,38 @@ def is_client_inference_container_ready(infer_url_host, inference_http_port,
                 else:
                     if not check_path.startswith("/"):
                         check_path = "/" + check_path
-                readiness_check_url = f"http://{infer_url_host}:{inference_http_port}{check_path}"
-
                 response = None
                 try:
-                    response = requests.get(readiness_check_url)
+                    response = requests.get(f"http://{infer_url_host}:{inference_http_port}{check_path}")
                 except:
                     pass
                 if not response or response.status_code != 200:
                     return "", "", {}, {}
-
-                return readiness_check_url, None, model_metadata, None
             else:
                 logging.error("'path' is not specified in httpGet readiness check")
                 return "", "", {}, {}
         elif "exec" in readiness_check:
-            # TODO(raphael): Support arbitrary readiness check command by using
-            #  container id and docker exec
-            return "http://{}:{}/".format(infer_url_host, inference_http_port), None, model_metadata, None
+            # TODO(raphael): Support arbitrary readiness check command by using container id and docker exec
+            pass
         else:
             # Ref K8S, if no readiness check, we assume the container is ready immediately
-            return "http://{}:{}/".format(infer_url_host, inference_http_port), None, model_metadata, None
+            pass
+
+        # Construct the customized URI
+        path = ""
+        if customized_uri is not None:
+            if "httpPost" in customized_uri and "path" in customized_uri["httpPost"]:
+                path = customized_uri["httpPost"]["path"]
+                if not isinstance(path, str):
+                    logging.error(f"Invalid path type: {path}, expected str")
+                    return "", "", {}, {}
+                else:
+                    if not path.startswith("/"):
+                        path = "/" + path
+            # TODO(raphael): Finalized more customized URI types
+        readiness_check_url = f"http://{infer_url_host}:{inference_http_port}{path}"
+
+        return readiness_check_url, None, model_metadata, None
 
 
 def _handle_union_volume_mount(binds, volumes, environment, data_cache_dir_input=None):
@@ -602,6 +588,43 @@ def handle_volume_mount(volumes, binds, environment, relative_entry_fedml_format
             logging.warning(f"{workspace_path} does not exist, skip mounting it to the container")
 
 
+def handle_container_service_app(config, model_storage_local_path):
+    # Bootstrap, job and entrypoint related
+    dst_model_serving_dir = "/home/fedml/models_serving"
+    bootstrap_cmds_str_frm_yaml = config.get('bootstrap', "")
+    job_cmds_str_frm_yaml = config.get('job', "")
+
+    auto_gen_bootstrap_file_name = "fedml-deploy-bootstrap-entry-auto-gen.sh"
+    if bootstrap_cmds_str_frm_yaml != "" or job_cmds_str_frm_yaml != "":
+        src_bootstrap_file_path = os.path.join(model_storage_local_path, auto_gen_bootstrap_file_name)
+        with open(src_bootstrap_file_path, 'w') as f:
+            f.write("cd /home/fedml/models_serving/\n")
+            f.write(bootstrap_cmds_str_frm_yaml)
+            f.write("\n")
+            f.write("cd /home/fedml/models_serving/\n")
+            f.write(job_cmds_str_frm_yaml)
+    else:
+        src_bootstrap_file_path = ""
+
+    if src_bootstrap_file_path != "":
+        dst_bootstrap_dir = os.path.join(dst_model_serving_dir, auto_gen_bootstrap_file_name)
+    else:
+        dst_bootstrap_dir = ""
+
+    # If the entry point is in fedml format (e.g., "main.py")
+    relative_entry_fedml_format = config.get('entry_point', "")
+
+    # User indicate either fedml format python main entry filename or entry command
+    enable_serverless_container = config.get(ClientConstants.ENABLE_SERVERLESS_CONTAINER_KEY, False)
+    customized_image_entry_cmd = config.get('container_run_command', None)  # Could be str or list
+    customized_readiness_check = config.get('readiness_probe', ClientConstants.READINESS_PROBE_DEFAULT)
+    customized_liveliness_check = config.get('liveness_probe', ClientConstants.LIVENESS_PROBE_DEFAULT)
+    customized_uri = config.get(ClientConstants.CUSTOMIZED_SERVICE_KEY, "")
+
+    return (dst_bootstrap_dir, dst_model_serving_dir, relative_entry_fedml_format, enable_serverless_container,
+            customized_image_entry_cmd, customized_readiness_check, customized_liveliness_check, customized_uri)
+
+
 def handle_env_vars(environment, relative_entry_fedml_format, extra_envs, dst_bootstrap_dir, end_point_id, edge_id,
                     replica_rank, request_json):
     enable_custom_image = False if relative_entry_fedml_format != "" else True
@@ -626,10 +649,11 @@ def handle_env_vars(environment, relative_entry_fedml_format, extra_envs, dst_bo
 
 
 def check_container_readiness(inference_http_port, infer_host="127.0.0.1", request_input_example=None,
-                              readiness_check=ClientConstants.READINESS_PROBE_DEFAULT):
+                              readiness_check=ClientConstants.READINESS_PROBE_DEFAULT,
+                              customized_uri=None):
     response_from_client_container = is_client_inference_container_ready(
         infer_host, inference_http_port, readiness_check=readiness_check,
-        request_input_example=request_input_example)
+        request_input_example=request_input_example, customized_uri=customized_uri)
 
     return response_from_client_container
 

From bd89be1a1f01f0ff1528cd1766c2a22a25af5975 Mon Sep 17 00:00:00 2001
From: Raphael Jin <kimheavy@yahoo.com>
Date: Tue, 25 Jun 2024 11:50:04 -0700
Subject: [PATCH 190/282] [Deploy] Add sqlite backup for
 get_all_deployment_result_list.

---
 .../scheduler/comm_utils/constants.py         |  1 -
 .../scheduler/comm_utils/job_monitor.py       |  2 +-
 .../model_scheduler/device_model_cache.py     | 30 +++++++++--
 .../model_scheduler/device_model_db.py        | 51 +++++++++++++++++--
 .../model_scheduler/worker_job_runner.py      |  2 +-
 5 files changed, 75 insertions(+), 11 deletions(-)

diff --git a/python/fedml/computing/scheduler/comm_utils/constants.py b/python/fedml/computing/scheduler/comm_utils/constants.py
index 67b9d8b14b..6e46ce207b 100644
--- a/python/fedml/computing/scheduler/comm_utils/constants.py
+++ b/python/fedml/computing/scheduler/comm_utils/constants.py
@@ -114,7 +114,6 @@ class SchedulerConstants:
     REDIS_PORT = "6379"
     REDIS_PASSWORD = "fedml_default"
 
-
     @staticmethod
     def get_log_source(run_json):
         run_config = run_json.get("run_config", {})
diff --git a/python/fedml/computing/scheduler/comm_utils/job_monitor.py b/python/fedml/computing/scheduler/comm_utils/job_monitor.py
index 667a54e565..b8237d93ba 100644
--- a/python/fedml/computing/scheduler/comm_utils/job_monitor.py
+++ b/python/fedml/computing/scheduler/comm_utils/job_monitor.py
@@ -210,6 +210,7 @@ def monitor_replicas_number():
             endpoint_replicas_details = {}
             if isinstance(endpoint_detail, str):
                 endpoint_replicas_details = json.loads(endpoint_detail)
+                # TODO: Check out this nested json
                 if isinstance(endpoint_replicas_details, str):
                     endpoint_replicas_details = json.loads(endpoint_replicas_details)
 
@@ -222,7 +223,6 @@ def monitor_replicas_number():
                     endpoint_replica_details["end_point_id"], 0) + 1
 
         for endpoint_id, num_replica in res_to_mlops.items():
-            curr_version = fedml.get_env_version()
             num_replica_url_path = "fedmlModelServer/api/v1/endpoint/replica-info"
             mlops_prefix = fedml._get_backend_service()
             url = f"{mlops_prefix}/{num_replica_url_path}"
diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_cache.py b/python/fedml/computing/scheduler/model_scheduler/device_model_cache.py
index b0021aa7df..1836971075 100755
--- a/python/fedml/computing/scheduler/model_scheduler/device_model_cache.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_model_cache.py
@@ -302,7 +302,27 @@ def get_all_deployment_result_list(self):
                 result_list.extend(self.redis_connection.lrange(key, 0, -1))
         except Exception as e:
             logging.error(e)
-        # TODO(Raphael): Use Sqlite for the replica backup
+
+        # Get cached results from the persist sqlite database
+        if len(result_list) <= 0:
+            db_result_list = list()
+            try:
+                db_result_list = self.model_deployment_db.get_all_deployment_results_list()
+            except Exception as e:
+                logging.error(f"Failed to get all deployment results from the database due to {e}")
+                pass
+
+            for result in db_result_list:
+                try:
+                    self.redis_connection.rpush(self.get_deployment_result_key(
+                        result["end_point_id"], result["end_point_name"], result["model_name"]),
+                        json.dumps(result["replica_info"]))
+                except Exception as e:
+                    logging.error(e)
+                    pass
+
+            for result in db_result_list:
+                result_list.append(result["replica_info"])
 
         return result_list
 
@@ -330,7 +350,8 @@ def get_deployment_status_list_size(self, end_point_id, end_point_name, model_na
         status_list = self.get_deployment_status_list(end_point_id, end_point_name, model_name)
         return len(status_list)
 
-    def get_status_item_info(self, status_item):
+    @staticmethod
+    def get_status_item_info(status_item):
         status_item_json = json.loads(status_item)
         if isinstance(status_item_json, str):
             status_item_json = json.loads(status_item_json)
@@ -341,7 +362,8 @@ def get_status_item_info(self, status_item):
             status_payload = status_item_json["status"]
         return device_id, status_payload
 
-    def get_result_item_info(self, result_item):
+    @staticmethod
+    def get_result_item_info(result_item):
         result_item_json = json.loads(result_item)
         if isinstance(result_item_json, str):
             result_item_json = json.loads(result_item_json)
@@ -386,7 +408,7 @@ def get_idle_device(self,
             return None, None
 
         # # Randomly shuffle
-        # shuffle the list of deployed devices and get the first one as the target idle device.
+        #  the list of deployed devices and get the first one as the target idle device.
         # if len(idle_device_list) <= 0:
         #     return None, None
         # shuffle(idle_device_list)
diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_db.py b/python/fedml/computing/scheduler/model_scheduler/device_model_db.py
index 09573a1d1b..606d8c010b 100755
--- a/python/fedml/computing/scheduler/model_scheduler/device_model_db.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_model_db.py
@@ -10,6 +10,7 @@
 from sqlalchemy.ext.declarative import declarative_base
 from fedml.core.common.singleton import Singleton
 from sqlalchemy.sql import text
+from typing import List, Dict
 
 Base = declarative_base()
 
@@ -42,9 +43,11 @@ def set_deployment_status(self, end_point_id, end_point_name, model_name, model_
         self.set_deployment_results_info(end_point_id, end_point_name, model_name, model_version,
                                          device_id, deployment_status=deployment_status, replica_no=replica_no)
 
-    def get_deployment_result_list(self, end_point_id, end_point_name, model_name, model_version=None):
+    def get_deployment_result_list(self, end_point_id, end_point_name, model_name, model_version=None) -> List[str]:
         """
-        query from sqlite db using e_id
+        Get the orm use get_deployment_results_info,
+        but (1) nested results with cache_device_id, cache_replica_no.
+        (2) return a list of json string, so that redis can store it.
         """
         result_list = self.get_deployment_results_info(end_point_id, end_point_name, model_name, model_version)
         ret_result_list = list()
@@ -55,6 +58,39 @@ def get_deployment_result_list(self, end_point_id, end_point_name, model_name, m
             ret_result_list.append(json.dumps(result_dict))
         return ret_result_list
 
+    def get_all_deployment_results_list(self) -> List[Dict]:
+        """
+        Similar to _get_all_deployment_results_info,
+        but return a list of json string, so that redis can store it.
+
+        return a list of dict, for each item:
+        [
+            {
+                "end_point_id": "",
+                "end_point_name": "",
+                "model_name":"",
+                "replica_res": ""   # Json string
+            },
+        ]
+        value in the dict is a string that contains the deployment result.
+        """
+        flat_ep_list = self._get_all_deployment_results_info()
+        ret_result_list = list()
+        for result in flat_ep_list:
+            result_dict = {
+                "end_point_id": result.end_point_id,
+                "end_point_name": result.end_point_name,
+                "model_name": result.model_name,
+                "replica_info": json.dumps(
+                    {
+                        "cache_device_id": result.device_id,
+                        "cache_replica_no": int(result.replica_no),
+                        "result": result.deployment_result
+                    }
+                )
+            }
+            ret_result_list.append(result_dict)
+        return ret_result_list
 
     def get_deployment_status_list(self, end_point_id, end_point_name, model_name, model_version=None):
         result_list = self.get_deployment_results_info(end_point_id, end_point_name, model_name, model_version)
@@ -156,7 +192,8 @@ def delete_deployment_run_info(self, end_point_id):
             end_point_id=f'{end_point_id}').delete()
         self.db_connection.commit()
 
-    def get_result_item_info(self, result_item):
+    @staticmethod
+    def get_result_item_info(result_item):
         result_item_json = json.loads(result_item)
         if isinstance(result_item_json, dict):
             result_item_json = json.loads(result_item)
@@ -169,7 +206,8 @@ def get_result_item_info(self, result_item):
             result_payload = result_item_json["result"]
         return device_id, replica_no, result_payload
 
-    def get_status_item_info(self, status_item):
+    @staticmethod
+    def get_status_item_info(status_item):
         status_item_json = json.loads(status_item)
         if isinstance(status_item_json, dict):
             status_item_json = json.loads(status_item)
@@ -320,6 +358,11 @@ def get_deployment_results_info(self, end_point_id, end_point_name, model_name,
                             FedMLDeploymentResultInfoModel.model_version == f'{model_version}')).all()
         return result_info
 
+    def _get_all_deployment_results_info(self):
+        self.open_job_db()
+        result_info = self.db_connection.query(FedMLDeploymentResultInfoModel).all()
+        return result_info
+
     def set_deployment_results_info(self, end_point_id, end_point_name,
                                     model_name, model_version, device_id,
                                     deployment_result=None, deployment_status=None, replica_no=None):
diff --git a/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py b/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py
index 113a20e825..c73630fb65 100755
--- a/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py
+++ b/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py
@@ -260,7 +260,7 @@ def run_impl(self, run_extend_queue_list, sender_message_center,
                     logging.error("[Worker] Failed to deploy the model.")
 
                     # Send failed result back to master
-                    result_payload = self.send_deployment_results(
+                    _ = self.send_deployment_results(
                         end_point_name, self.edge_id, ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED,
                         model_id, model_name, inference_output_url, inference_model_version, inference_port,
                         inference_engine, model_metadata, model_config)

From 43f99cf0acf9df685272fc02a5890981ac3d0ee2 Mon Sep 17 00:00:00 2001
From: Raphael Jin <kimheavy@yahoo.com>
Date: Tue, 25 Jun 2024 11:55:14 -0700
Subject: [PATCH 191/282] [Deploy] Nit.

---
 .../scheduler/model_scheduler/device_model_deployment.py       | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py b/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py
index 25fc1e1d64..a47f9dbc20 100755
--- a/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py
@@ -245,7 +245,8 @@ def start_deployment(end_point_id, end_point_name, model_id, model_version,
     inference_output_url, running_model_version, ret_model_metadata, ret_model_config = \
         check_container_readiness(inference_http_port=inference_http_port, infer_host=infer_host,
                                   readiness_check=customized_readiness_check,
-                                  request_input_example=request_input_example)
+                                  request_input_example=request_input_example,
+                                  customized_uri=customized_uri)
 
     if inference_output_url == "":
         return running_model_name, "", None, None, None

From 766c52aaf7a5b1dd567e2b91730780a05c594d36 Mon Sep 17 00:00:00 2001
From: Raphael Jin <kimheavy@yahoo.com>
Date: Tue, 25 Jun 2024 11:59:14 -0700
Subject: [PATCH 192/282] [Deploy] Nit.

---
 .../trt-llm-openai/config.yaml                | 22 +++++++++++++++++++
 .../device_model_deployment.py                |  2 +-
 2 files changed, 23 insertions(+), 1 deletion(-)
 create mode 100644 python/examples/deploy/custom_inference_image/trt-llm-openai/config.yaml

diff --git a/python/examples/deploy/custom_inference_image/trt-llm-openai/config.yaml b/python/examples/deploy/custom_inference_image/trt-llm-openai/config.yaml
new file mode 100644
index 0000000000..1bdcf32f75
--- /dev/null
+++ b/python/examples/deploy/custom_inference_image/trt-llm-openai/config.yaml
@@ -0,0 +1,22 @@
+workspace: "./"
+
+inference_image_name: "fedml/trt-llm-openai"
+
+# The image has its self-contained cmd, no need for rewriting the command
+container_run_command: null
+
+port: 3000
+
+readiness_probe:
+  httpGet:
+    path: "/health_check"
+
+# If you do not use serverless container mode, and you want to indicate another resource path,
+# e.g. localhost:3000/v1/chat/completions, you can set the following uri:
+service:
+  httpPost:
+    path: "/v1/chat/completions"
+
+deploy_timeout_sec: 1600
+
+endpoint_api_type: "text2text_llm_openai_chat_completions"
\ No newline at end of file
diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py b/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py
index a47f9dbc20..665bb4082e 100755
--- a/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py
@@ -233,7 +233,7 @@ def start_deployment(end_point_id, end_point_name, model_id, model_version,
                 raise Exception("Failed to get the port allocation")
             time.sleep(3)
 
-    # Logging the info from the container when starting
+    # Logging the info from the container when initializing
     log_deployment_output(end_point_id, model_id, default_server_container_name,
                           ClientConstants.CMD_TYPE_RUN_DEFAULT_SERVER,
                           inference_model_name, inference_engine, inference_http_port, inference_type,

From 0c29c4990d9f8f06940d3f3a658f9ffd1f0ddc86 Mon Sep 17 00:00:00 2001
From: Raphael Jin <kimheavy@yahoo.com>
Date: Tue, 25 Jun 2024 17:22:02 -0700
Subject: [PATCH 193/282] [Deploy] Hot fix hash exist.

---
 .../computing/scheduler/model_scheduler/device_model_cache.py   | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_cache.py b/python/fedml/computing/scheduler/model_scheduler/device_model_cache.py
index b0021aa7df..0d92466169 100755
--- a/python/fedml/computing/scheduler/model_scheduler/device_model_cache.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_model_cache.py
@@ -990,6 +990,8 @@ def delete_endpoint_scaling_down_decision_time(self, end_point_id) -> bool:
             end_point_id))
 
     def get_pending_requests_counter(self, end_point_id) -> int:
+        if not end_point_id:
+            return 0
         # If the endpoint does not exist inside the Hash collection, set its counter to 0.
         if self.redis_connection.hexists(self.FEDML_PENDING_REQUESTS_COUNTER, end_point_id):
             return int(self.redis_connection.hget(self.FEDML_PENDING_REQUESTS_COUNTER, end_point_id))

From 36378f876018163508f03592fca556afa3a9ec8f Mon Sep 17 00:00:00 2001
From: Raphael Jin <kimheavy@yahoo.com>
Date: Tue, 25 Jun 2024 17:52:33 -0700
Subject: [PATCH 194/282] [Deploy] Indicate worker connection type through cli
 and api.

---
 python/fedml/api/__init__.py       | 12 ++++++++----
 python/fedml/api/modules/device.py |  8 +++++---
 python/fedml/cli/modules/login.py  | 12 ++++++++++--
 3 files changed, 23 insertions(+), 9 deletions(-)

diff --git a/python/fedml/api/__init__.py b/python/fedml/api/__init__.py
index f753e4255b..b03c72b675 100755
--- a/python/fedml/api/__init__.py
+++ b/python/fedml/api/__init__.py
@@ -213,16 +213,20 @@ def fedml_build(platform, type, source_folder, entry_point, config_folder, dest_
 
 def login(api_key, computing, server, supplier,
           master_inference_gateway_port: int = ServerConstants.MODEL_INFERENCE_DEFAULT_PORT,
-          worker_inference_proxy_port: int = ClientConstants.LOCAL_CLIENT_API_PORT):
-    device_bind(api_key, computing, server, supplier, master_inference_gateway_port, worker_inference_proxy_port)
+          worker_inference_proxy_port: int = ClientConstants.LOCAL_CLIENT_API_PORT,
+          worker_connection_type: str = ClientConstants.WORKER_CONNECTIVITY_TYPE_DEFAULT):
+    device_bind(api_key, computing, server, supplier, master_inference_gateway_port, worker_inference_proxy_port,
+                worker_connection_type)
 
 
 def logout(computing, server):
     device_unbind(computing, server)
 
 
-def device_bind(api_key, computing, server, supplier, master_inference_gateway_port, worker_inference_proxy_port):
-    device.bind(api_key, computing, server, supplier, master_inference_gateway_port, worker_inference_proxy_port)
+def device_bind(api_key, computing, server, supplier, master_inference_gateway_port, worker_inference_proxy_port,
+                worker_connection_type):
+    device.bind(api_key, computing, server, supplier, master_inference_gateway_port, worker_inference_proxy_port,
+                worker_connection_type)
 
 
 def device_unbind(computing, server):
diff --git a/python/fedml/api/modules/device.py b/python/fedml/api/modules/device.py
index 14591147a6..7c4e52c8b5 100644
--- a/python/fedml/api/modules/device.py
+++ b/python/fedml/api/modules/device.py
@@ -21,7 +21,8 @@
 def bind(
         api_key, computing, server, supplier,
         master_inference_gateway_port=DeviceServerConstants.MODEL_INFERENCE_DEFAULT_PORT,
-        worker_inference_proxy_port=DeviceClientConstants.LOCAL_CLIENT_API_PORT
+        worker_inference_proxy_port=DeviceClientConstants.LOCAL_CLIENT_API_PORT,
+        worker_connection_type=DeviceClientConstants.WORKER_CONNECTIVITY_TYPE_DEFAULT
 ):
     userid = api_key
     runner_cmd = "{}"
@@ -47,13 +48,13 @@ def bind(
     _bind(
         userid, computing, server,
         api_key, role, runner_cmd, device_id, os_name,
-        docker, master_inference_gateway_port, worker_inference_proxy_port)
+        docker, master_inference_gateway_port, worker_inference_proxy_port, worker_connection_type)
 
 
 def _bind(
         userid, computing, server,
         api_key, role, runner_cmd, device_id, os_name,
-        docker, master_inference_gateway_port, worker_inference_proxy_port):
+        docker, master_inference_gateway_port, worker_inference_proxy_port, worker_connection_type):
     fedml.load_env()
     if os.getenv(ModuleConstants.ENV_FEDML_INFER_HOST) is None:
         fedml.set_env_kv(ModuleConstants.ENV_FEDML_INFER_HOST, SchedulerConstants.REDIS_INFER_HOST)
@@ -66,6 +67,7 @@ def _bind(
 
     fedml.set_env_kv(DeviceServerConstants.ENV_MASTER_INFERENCE_PORT_KEY, str(master_inference_gateway_port))
     fedml.set_env_kv(DeviceClientConstants.ENV_CLIENT_PROXY_PORT_KEY, str(worker_inference_proxy_port))
+    fedml.set_env_kv(DeviceClientConstants.ENV_CONNECTION_TYPE_KEY, worker_connection_type)
 
     url = fedml._get_backend_service()
     platform_name = platform.system()
diff --git a/python/fedml/cli/modules/login.py b/python/fedml/cli/modules/login.py
index f3c982f456..7ec4191a3e 100644
--- a/python/fedml/cli/modules/login.py
+++ b/python/fedml/cli/modules/login.py
@@ -67,10 +67,17 @@
     default=ClientConstants.LOCAL_CLIENT_API_PORT,
     help="The port for worker inference proxy.",
 )
+@click.option(
+    "--worker_connection_type",
+    "-wct",
+    type=str,
+    default=ClientConstants.WORKER_CONNECTIVITY_TYPE_DEFAULT,
+    help="The connection type for worker inference proxy.",
+)
 def fedml_login(
         api_key, version, compute_node, server, provider, deploy_worker_num,
         local_on_premise_platform, local_on_premise_platform_port,
-        master_inference_gateway_port, worker_inference_proxy_port
+        master_inference_gateway_port, worker_inference_proxy_port, worker_connection_type
 ):
     fedml.set_env_version(version)
     fedml.set_local_on_premise_platform_host(local_on_premise_platform)
@@ -84,4 +91,5 @@ def fedml_login(
         print(f"Maybe you are using account id to login, we will try to login with account {api_key}.")
         pass
     os.environ["FEDML_MODEL_WORKER_NUM"] = str(deploy_worker_num)
-    fedml.api.login(api_key, compute_node, server, provider, master_inference_gateway_port, worker_inference_proxy_port)
+    fedml.api.login(api_key, compute_node, server, provider, master_inference_gateway_port,
+                    worker_inference_proxy_port, worker_connection_type)

From 5097ff29bf48b7f6d8c097721d96e44f421a4192 Mon Sep 17 00:00:00 2001
From: Raphael Jin <kimheavy@yahoo.com>
Date: Tue, 25 Jun 2024 18:01:29 -0700
Subject: [PATCH 195/282] [Deploy] Nit.

---
 .../scheduler/model_scheduler/device_model_cache.py         | 6 ++----
 1 file changed, 2 insertions(+), 4 deletions(-)

diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_cache.py b/python/fedml/computing/scheduler/model_scheduler/device_model_cache.py
index 0d92466169..7e79126fa6 100755
--- a/python/fedml/computing/scheduler/model_scheduler/device_model_cache.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_model_cache.py
@@ -989,11 +989,9 @@ def delete_endpoint_scaling_down_decision_time(self, end_point_id) -> bool:
             self.FEDML_MODEL_ENDPOINT_SCALING_DOWN_DECISION_TIME_TAG,
             end_point_id))
 
-    def get_pending_requests_counter(self, end_point_id) -> int:
-        if not end_point_id:
-            return 0
+    def get_pending_requests_counter(self, end_point_id=None) -> int:
         # If the endpoint does not exist inside the Hash collection, set its counter to 0.
-        if self.redis_connection.hexists(self.FEDML_PENDING_REQUESTS_COUNTER, end_point_id):
+        if end_point_id and self.redis_connection.hexists(self.FEDML_PENDING_REQUESTS_COUNTER, end_point_id):
             return int(self.redis_connection.hget(self.FEDML_PENDING_REQUESTS_COUNTER, end_point_id))
         return 0
 

From 084781f3982c8aae98fe8d3564badc90f7824451 Mon Sep 17 00:00:00 2001
From: alaydshah <alay11shah@gmail.com>
Date: Tue, 2 Jul 2024 19:19:51 +0000
Subject: [PATCH 196/282] Add logs in occupy_gpu_ids, and funcs in
 hardware_utils for debugging

---
 .../scheduler/comm_utils/hardware_utils.py    | 25 +++++++++++++++++++
 .../scheduler/comm_utils/job_utils.py         |  7 ++++++
 2 files changed, 32 insertions(+)

diff --git a/python/fedml/computing/scheduler/comm_utils/hardware_utils.py b/python/fedml/computing/scheduler/comm_utils/hardware_utils.py
index c876948145..56a75fe3e1 100644
--- a/python/fedml/computing/scheduler/comm_utils/hardware_utils.py
+++ b/python/fedml/computing/scheduler/comm_utils/hardware_utils.py
@@ -11,6 +11,29 @@
 GPU_CARD_UTILS = [NvidiaGPUtil, QualcommNPUtil]
 
 
+# This function is just for debugging, can be removed at later point
+def get_gpu_list_and_realtime_gpu_available_ids() -> (List[dict], List[int]):
+    gpu_list = HardwareUtil.get_gpus()
+    gpu_count = len(gpu_list)
+    realtime_available_gpu_ids = HardwareUtil.get_available_gpu_ids(order='memory', limit=gpu_count, max_load=0.01,
+                                                            max_memory=0.01)
+    return gpu_list, realtime_available_gpu_ids
+
+# This function is just for debugging, can be removed at later point
+def trim_unavailable_gpu_ids(gpu_ids) -> List[int]:
+    # Trim the gpu ids based on the realtime available gpu id list.
+    available_gpu_ids = [int(gpu_id) for gpu_id in gpu_ids]
+    gpu_list, realtime_available_gpu_ids = get_gpu_list_and_realtime_gpu_available_ids()
+    unavailable_gpu_ids = list()
+
+    for gpu_id in available_gpu_ids:
+        if gpu_id not in realtime_available_gpu_ids:
+            unavailable_gpu_ids.append(gpu_id)
+
+    trimmed_gpu_ids = list(set(available_gpu_ids) - set(unavailable_gpu_ids))
+    return trimmed_gpu_ids.copy()
+
+
 class HardwareUtil(metaclass=Singleton):
     __gpu_util: Optional[GPUCardUtil] = None
 
@@ -60,6 +83,8 @@ def get_docker_gpu_ids_by_container_name(container_name: str, docker_client: Doc
 if __name__ == "__main__":
     gpus = HardwareUtil.get_gpus()
     get_available_gpu_cards = HardwareUtil.get_available_gpu_ids(limit=len(gpus))
+    trimmed_gpu_ids = trim_unavailable_gpu_ids(get_available_gpu_cards)
+    print(trimmed_gpu_ids)
     device_mapping = HardwareUtil.get_docker_gpu_device_mapping(get_available_gpu_cards, len(get_available_gpu_cards))
     print(gpus)
     print(get_available_gpu_cards)
diff --git a/python/fedml/computing/scheduler/comm_utils/job_utils.py b/python/fedml/computing/scheduler/comm_utils/job_utils.py
index 5b9a2c812a..8a917e539d 100644
--- a/python/fedml/computing/scheduler/comm_utils/job_utils.py
+++ b/python/fedml/computing/scheduler/comm_utils/job_utils.py
@@ -86,6 +86,8 @@ def occupy_gpu_ids(self, run_id, request_gpu_num, device_id, inner_id=None,
                     # Get the available GPU list, FEDML_GLOBAL_DEVICE_AVAILABLE_GPU_IDS_TAG-${device_id}
                     available_gpu_ids = ComputeCacheManager.get_instance().get_gpu_cache().get_device_available_gpu_ids(
                         device_id)
+                    logging.info(
+                        f"Available GPU Ids fetched from cache: {available_gpu_ids}")
 
                     logging.info(f"Check worker({device_id})'s realtime gpu availability in DB"
                                  f" for run {run_id}: {available_gpu_ids}")
@@ -94,8 +96,11 @@ def occupy_gpu_ids(self, run_id, request_gpu_num, device_id, inner_id=None,
                     if available_gpu_ids is None:
                         # Get realtime GPU availability list from the system
                         available_gpu_ids = JobRunnerUtils.get_realtime_gpu_available_ids().copy()
+                        logging.info(f"Cache not set yet, fetching realtime available GPU Ids: {available_gpu_ids}")
                     else:
                         available_gpu_ids = JobRunnerUtils.trim_unavailable_gpu_ids(available_gpu_ids)
+                        logging.info(
+                            f"Trimmed available GPU Ids: {available_gpu_ids}")
 
                     # Get the matched gpu ids string by the request gpu num
                     cuda_visible_gpu_ids_str, matched_gpu_num = JobRunnerUtils.request_gpu_ids(request_gpu_num,
@@ -119,6 +124,8 @@ def occupy_gpu_ids(self, run_id, request_gpu_num, device_id, inner_id=None,
 
                     ComputeCacheManager.get_instance().get_gpu_cache().set_device_available_gpu_ids(
                         device_id, available_gpu_ids)
+                    
+                    logging.info(f"Updated cache with following available gpu ids: {available_gpu_ids}")
 
                     # For a single run, could be scale up. So if existed such a key, should extend, not replace
                     existed_gpu_nums = ComputeCacheManager.get_instance().get_gpu_cache().get_device_run_num_gpus(

From 37be694a17b6bfdd205ec6e5b526b21856b1af87 Mon Sep 17 00:00:00 2001
From: Alay Dilipbhai Shah <alay@fedml.ai>
Date: Tue, 2 Jul 2024 15:37:48 -0700
Subject: [PATCH 197/282] Revert "Adjust the design of FedML Python Agent to a
 decentralized architecture that supports Launch Master, Launch Slave, Deploy
 Master, and Deploy Slave at the same time."

---
 .github/workflows/CI_build.yml                |  47 -----
 .github/workflows/CI_deploy.yml               |  43 -----
 .github/workflows/CI_federate.yml             |  42 -----
 .github/workflows/CI_launch.yml               |  43 -----
 .github/workflows/CI_train.yml                |  42 -----
 .github/workflows/README.md                   |  97 ----------
 .../build_wheels_and_releases.yml-backup      |   0
 .../{deprecated => }/codeql-analysis.yml      |   0
 .../deprecated/python-package-conda.yml       |  34 ----
 .../{deprecated => }/full_e2e_test.yml-bakcup |   0
 .github/workflows/image.png                   | Bin 389049 -> 0 bytes
 .github/workflows/{deprecated => }/pylint.yml |   7 +-
 .../registry-runners/build_linux_runners.sh   |  12 --
 .../workflows/registry-runners/build_test.sh  |   1 -
 .../registry-runners/run_linux_runners.sh     |  48 -----
 .../workflows/registry-runners/windows.ps1    |  32 ----
 .github/workflows/{deprecated => }/runner.md  |   0
 ...oke_test_cross_device_mnn_server_linux.yml |  13 +-
 ...ke_test_cross_silo_fedavg_attack_linux.yml |  28 ++-
 ...smoke_test_cross_silo_fedavg_cdp_linux.yml |  15 +-
 ...e_test_cross_silo_fedavg_defense_linux.yml |  19 +-
 ...smoke_test_cross_silo_fedavg_ldp_linux.yml |  15 +-
 .../smoke_test_cross_silo_ho_linux.yml        |  15 +-
 .../smoke_test_cross_silo_ho_win.yml          |  15 +-
 ...moke_test_cross_silo_lightsecagg_linux.yml |  15 +-
 .../smoke_test_cross_silo_lightsecagg_win.yml |  15 +-
 .../smoke_test_flow_linux.yml                 |   9 +-
 .../smoke_test_ml_engines_linux_jax.yml       |  15 +-
 .../smoke_test_ml_engines_linux_mxnet.yml     |  15 +-
 .../smoke_test_ml_engines_linux_tf.yml        |  15 +-
 .../smoke_test_ml_engines_win.yml             |  27 ++-
 .../smoke_test_pip_cli_sp_linux.yml           |  37 ++--
 .../smoke_test_pip_cli_sp_win.yml             |  11 +-
 .../{deprecated => }/smoke_test_security.yml  |   9 +-
 .../smoke_test_simulation_mpi_linux.yml       |  43 ++---
 .../github-action-runner}/Dockerfile          |  22 ++-
 .../dockerfile/github-action-runner/README.md |  25 +++
 .../dockerfile/github-action-runner/build.sh  |   3 +
 .../github-action-runner/runner-start.sh      |  23 +++
 .../dockerfile/github-action-runner}/start.sh |   4 +-
 devops/scripts/install-fedml.sh               |   2 -
 devops/scripts/sync-fedml-pip.sh              |   4 +-
 .../README.md                                 |   2 +-
 .../launch_config/fedml_config.yaml           |  14 --
 .../launch/hello_world/hello_world.py         |   1 +
 python/examples/launch/serve_job_mnist.yaml   |   2 +-
 .../launch_config/fedml_config.yaml           |   3 -
 python/examples/train/mnist_train/train.py    |  98 ----------
 python/examples/train/mnist_train/train.yaml  |  50 -----
 python/fedml/__init__.py                      |  23 +--
 python/fedml/api/__init__.py                  |   3 -
 python/fedml/api/modules/model.py             |  13 --
 .../scheduler/comm_utils/job_monitor.py       |  12 +-
 .../scheduler/comm_utils/run_process_utils.py |   4 +-
 .../scheduler/comm_utils/sys_utils.py         |  21 ---
 .../scheduler/master/base_master_agent.py     |  54 +-----
 .../master/base_master_job_runner.py          |  77 +++-----
 .../master/base_master_job_runner_manager.py  | 115 ++----------
 .../master/base_master_protocol_manager.py    |  60 +-----
 .../scheduler/master/cloud_server_manager.py  |  31 +--
 .../master/master_protocol_manager.py         |  12 +-
 .../scheduler/master/server_login.py          |   1 -
 .../model_scheduler/master_job_runner.py      |   4 +-
 .../master_protocol_manager.py                |   3 +-
 .../model_scheduler/model_device_client.py    |  98 ++++++++++
 .../model_scheduler/model_device_server.py    |  97 ++++++++++
 .../worker_protocol_manager.py                |   4 +-
 .../scheduler_core/account_manager.py         |  76 ++++----
 .../scheduler_core/general_constants.py       |  76 --------
 .../scheduler_core/message_center.py          | 123 +++---------
 .../scheduler_base_job_runner.py              |  22 +--
 .../scheduler_base_job_runner_manager.py      |   6 +-
 .../scheduler_base_protocol_manager.py        | 107 ++---------
 .../scheduler/scheduler_core/status_center.py |  85 ++-------
 .../status_manager_protocols.py               |  27 ++-
 .../scheduler/slave/base_slave_agent.py       |  37 +---
 .../scheduler/slave/base_slave_job_runner.py  |  29 +--
 .../slave/base_slave_protocol_manager.py      |  17 +-
 .../scheduler/slave/client_data_interface.py  |   9 -
 .../computing/scheduler/slave/client_login.py |  14 +-
 .../scheduler/slave/slave_protocol_manager.py |  74 ++++++--
 .../scheduler/slave/united_agents.py          |  75 --------
 python/fedml/core/mlops/__init__.py           |  16 +-
 python/fedml/core/mlops/mlops_device_perfs.py | 176 +++---------------
 python/fedml/core/mlops/mlops_job_perfs.py    |  26 +--
 .../core/mlops/mlops_runtime_log_daemon.py    |  21 +--
 python/setup.py                               |   7 +-
 python/tests/cross-silo/run_cross_silo.sh     |   6 +-
 python/tests/smoke_test/cli/build.sh          |   4 +-
 python/tests/test_deploy/test_deploy.py       |  39 ----
 python/tests/test_federate/test_federate.sh   |  26 ---
 python/tests/test_launch/test_launch.py       |  50 -----
 python/tests/test_train/test_train.py         |  49 -----
 93 files changed, 741 insertions(+), 2120 deletions(-)
 delete mode 100644 .github/workflows/CI_build.yml
 delete mode 100644 .github/workflows/CI_deploy.yml
 delete mode 100644 .github/workflows/CI_federate.yml
 delete mode 100644 .github/workflows/CI_launch.yml
 delete mode 100644 .github/workflows/CI_train.yml
 delete mode 100644 .github/workflows/README.md
 rename .github/workflows/{deprecated => }/build_wheels_and_releases.yml-backup (100%)
 rename .github/workflows/{deprecated => }/codeql-analysis.yml (100%)
 delete mode 100644 .github/workflows/deprecated/python-package-conda.yml
 rename .github/workflows/{deprecated => }/full_e2e_test.yml-bakcup (100%)
 delete mode 100644 .github/workflows/image.png
 rename .github/workflows/{deprecated => }/pylint.yml (89%)
 delete mode 100644 .github/workflows/registry-runners/build_linux_runners.sh
 delete mode 100755 .github/workflows/registry-runners/build_test.sh
 delete mode 100644 .github/workflows/registry-runners/run_linux_runners.sh
 delete mode 100644 .github/workflows/registry-runners/windows.ps1
 rename .github/workflows/{deprecated => }/runner.md (100%)
 rename .github/workflows/{deprecated => }/smoke_test_cross_device_mnn_server_linux.yml (88%)
 rename .github/workflows/{deprecated => }/smoke_test_cross_silo_fedavg_attack_linux.yml (83%)
 rename .github/workflows/{deprecated => }/smoke_test_cross_silo_fedavg_cdp_linux.yml (87%)
 rename .github/workflows/{deprecated => }/smoke_test_cross_silo_fedavg_defense_linux.yml (86%)
 rename .github/workflows/{deprecated => }/smoke_test_cross_silo_fedavg_ldp_linux.yml (87%)
 rename .github/workflows/{deprecated => }/smoke_test_cross_silo_ho_linux.yml (89%)
 rename .github/workflows/{deprecated => }/smoke_test_cross_silo_ho_win.yml (88%)
 rename .github/workflows/{deprecated => }/smoke_test_cross_silo_lightsecagg_linux.yml (88%)
 rename .github/workflows/{deprecated => }/smoke_test_cross_silo_lightsecagg_win.yml (88%)
 rename .github/workflows/{deprecated => }/smoke_test_flow_linux.yml (92%)
 rename .github/workflows/{deprecated => }/smoke_test_ml_engines_linux_jax.yml (87%)
 rename .github/workflows/{deprecated => }/smoke_test_ml_engines_linux_mxnet.yml (87%)
 rename .github/workflows/{deprecated => }/smoke_test_ml_engines_linux_tf.yml (87%)
 rename .github/workflows/{deprecated => }/smoke_test_ml_engines_win.yml (90%)
 rename .github/workflows/{deprecated => }/smoke_test_pip_cli_sp_linux.yml (80%)
 rename .github/workflows/{deprecated => }/smoke_test_pip_cli_sp_win.yml (90%)
 rename .github/workflows/{deprecated => }/smoke_test_security.yml (91%)
 rename .github/workflows/{deprecated => }/smoke_test_simulation_mpi_linux.yml (73%)
 rename {.github/workflows/registry-runners => devops/dockerfile/github-action-runner}/Dockerfile (70%)
 create mode 100644 devops/dockerfile/github-action-runner/README.md
 create mode 100755 devops/dockerfile/github-action-runner/build.sh
 create mode 100644 devops/dockerfile/github-action-runner/runner-start.sh
 rename {.github/workflows/registry-runners => devops/dockerfile/github-action-runner}/start.sh (76%)
 delete mode 100644 devops/scripts/install-fedml.sh
 delete mode 100644 python/examples/launch/examples/launch/hello_world/launch_config/fedml_config.yaml
 delete mode 100644 python/examples/train/mnist_train/examples/train/mnist_train/launch_config/fedml_config.yaml
 delete mode 100644 python/examples/train/mnist_train/train.py
 delete mode 100644 python/examples/train/mnist_train/train.yaml
 create mode 100755 python/fedml/computing/scheduler/model_scheduler/model_device_client.py
 create mode 100755 python/fedml/computing/scheduler/model_scheduler/model_device_server.py
 delete mode 100755 python/fedml/computing/scheduler/slave/united_agents.py
 delete mode 100644 python/tests/test_deploy/test_deploy.py
 delete mode 100644 python/tests/test_federate/test_federate.sh
 delete mode 100644 python/tests/test_launch/test_launch.py
 delete mode 100644 python/tests/test_train/test_train.py

diff --git a/.github/workflows/CI_build.yml b/.github/workflows/CI_build.yml
deleted file mode 100644
index b4c3642b09..0000000000
--- a/.github/workflows/CI_build.yml
+++ /dev/null
@@ -1,47 +0,0 @@
-# This is a basic workflow to help you get started with Actions
-
-name: CI-build
-
-# Controls when the workflow will run
-on:
-  # Triggers the workflow on push or pull request events but only for the master branch
-  schedule:
-    # Nightly build at 12:12 A.M.
-    - cron: "0 10 */1 * *"
-  pull_request:
-    branches: [ master,  dev/v0.7.0 ]
-
-  # Allows you to run this workflow manually from the Actions tab
-  workflow_dispatch:
-
-# A workflow run is made up of one or more jobs that can run sequentially or in parallel
-jobs:
-  build:
-    runs-on: ["${{ matrix.python-version }}","${{ matrix.os }}"]
-    strategy:
-      fail-fast: false
-      matrix:
-        os: [ Linux, Windows ]
-        arch: [X64]
-        python-version: ['python3.8', 'python3.9', 'python3.10', 'python3.11']
-    
-    timeout-minutes: 5
-    steps:
-      - name: Checkout fedml
-        uses: actions/checkout@v3
-
-      - name: pip_install
-        run: |
-          cd python
-          pip install -e ./
-
-      - name: login
-        run: | 
-          fedml logout
-          fedml login $API_KEY
-
-      - name: pylint
-        run: |
-          cd python
-          echo "Pylint has been run successfully!"
-
diff --git a/.github/workflows/CI_deploy.yml b/.github/workflows/CI_deploy.yml
deleted file mode 100644
index 982f65b3c5..0000000000
--- a/.github/workflows/CI_deploy.yml
+++ /dev/null
@@ -1,43 +0,0 @@
-# This is a basic workflow to help you get started with Actions
-
-name: CI-deploy
-
-# Controls when the workflow will run
-on:
-  # Triggers the workflow on push or pull request events but only for the master branch
-  schedule:
-    # Nightly build at 12:12 A.M.
-    - cron: "0 10 */1 * *"
-  pull_request:
-    branches: [ master,  dev/v0.7.0 ]
-
-  # Allows you to run this workflow manually from the Actions tab
-  workflow_dispatch:
-
-# A workflow run is made up of one or more jobs that can run sequentially or in parallel
-jobs:
-  deploy:
-    runs-on: ["${{ matrix.python-version }}","${{ matrix.os }}"]
-    strategy:
-      fail-fast: false
-      matrix:
-        os: [ Linux, Windows ]
-        arch: [X64]
-        python-version: ['python3.8', 'python3.9', 'python3.10', 'python3.11']
-    
-    timeout-minutes: 5
-    steps:
-      - name: Checkout fedml
-        uses: actions/checkout@v3
-
-      - name: pip_install
-        run: |
-          cd python
-          pip install -e ./
-
-      - name: serving_job_in_test_env
-        run: |
-          cd python
-          echo "Serving example has been tested successfully!" 
-          python tests/test_deploy/test_deploy.py
-          
diff --git a/.github/workflows/CI_federate.yml b/.github/workflows/CI_federate.yml
deleted file mode 100644
index 1302771b1d..0000000000
--- a/.github/workflows/CI_federate.yml
+++ /dev/null
@@ -1,42 +0,0 @@
-# This is a basic workflow to help you get started with Actions
-
-name: CI-federate
-
-# Controls when the workflow will run
-on:
-  # Triggers the workflow on push or pull request events but only for the master branch
-  schedule:
-    # Nightly build at 12:12 A.M.
-    - cron: "0 10 */1 * *"
-  pull_request:
-    branches: [ master,  dev/v0.7.0 ]
-
-  # Allows you to run this workflow manually from the Actions tab
-  workflow_dispatch:
-
-# A workflow run is made up of one or more jobs that can run sequentially or in parallel
-jobs:
-  federate:
-    strategy:
-      fail-fast: false
-      matrix:
-        os: [ Linux, Windows ]
-        arch: [X64]
-        python-version: ['python3.8', 'python3.9', 'python3.10', 'python3.11']
-
-    runs-on: ["${{ matrix.python-version }}","${{ matrix.os }}"]
-    timeout-minutes: 5
-    steps:
-      - name: Checkout fedml
-        uses: actions/checkout@v3
-        
-      - name: pip_install
-        run: |
-          cd python
-          pip install -e ./
-
-      - name: federate_job_in_test_env 
-        run: |
-          cd python
-          bash tests/test_federate/test_federate.sh
-          echo "Federate example has been tested successfully!"
diff --git a/.github/workflows/CI_launch.yml b/.github/workflows/CI_launch.yml
deleted file mode 100644
index 13519c41f2..0000000000
--- a/.github/workflows/CI_launch.yml
+++ /dev/null
@@ -1,43 +0,0 @@
-# This is a basic workflow to help you get started with Actions
-
-name: CI-launch
-
-# Controls when the workflow will run
-on:
-  # Triggers the workflow on push or pull request events but only for the master branch
-  schedule:
-    # Nightly build at 12:12 A.M.
-    - cron: "0 10 */1 * *"
-  pull_request:
-    branches: [ master,  dev/v0.7.0 ]
-
-  # Allows you to run this workflow manually from the Actions tab
-  workflow_dispatch:
-
-# A workflow run is made up of one or more jobs that can run sequentially or in parallel
-jobs:
-  launch:
-
-    strategy:
-      fail-fast: false
-      matrix:
-        os: [ Linux, Windows ]
-        arch: [X64]
-        python-version: ['python3.8','python3.9','python3.10','python3.11']
-
-    runs-on: ["${{ matrix.python-version }}","${{ matrix.os }}"]
-    timeout-minutes: 5
-    steps:
-      - name: Checkout fedml
-        uses: actions/checkout@v3
-
-      - name: pip_install
-        run: |
-          cd python
-          pip install -e ./
-
-      - name: launch_job_in_test_env
-        run: |
-          cd python
-          python tests/test_launch/test_launch.py
-          echo "Launch example has been tested successfully!" 
diff --git a/.github/workflows/CI_train.yml b/.github/workflows/CI_train.yml
deleted file mode 100644
index 2acbcc12a0..0000000000
--- a/.github/workflows/CI_train.yml
+++ /dev/null
@@ -1,42 +0,0 @@
-# This is a basic workflow to help you get started with Actions
-
-name: CI-train
-
-# Controls when the workflow will run
-on:
-  # Triggers the workflow on push or pull request events but only for the master branch
-  schedule:
-    # Nightly build at 12:12 A.M.
-    - cron: "0 10 */1 * *"
-  pull_request:
-    branches: [ master,  dev/v0.7.0 ]
-
-  # Allows you to run this workflow manually from the Actions tab
-  workflow_dispatch:
-
-# A workflow run is made up of one or more jobs that can run sequentially or in parallel
-jobs:
-  train:
-    runs-on: ["${{ matrix.python-version }}","${{ matrix.os }}"]
-    strategy:
-      fail-fast: false
-      matrix:
-        os: [ Linux, Windows ]
-        arch: [X64]
-        python-version: ['python3.8', 'python3.9', 'python3.10', 'python3.11']
-    timeout-minutes: 5
-    steps:
-      - name: Checkout fedml
-        uses: actions/checkout@v3
-
-      - name: pip_install
-        run: |
-          cd python
-          pip install -e ./
-
-      - name: training_job_in_test_env
-        run: |
-          cd python
-          python tests/test_train/test_train.py
-          echo "Train example has been tested successfully!" 
-
diff --git a/.github/workflows/README.md b/.github/workflows/README.md
deleted file mode 100644
index 668cb9b302..0000000000
--- a/.github/workflows/README.md
+++ /dev/null
@@ -1,97 +0,0 @@
-# 1. Design
-
-![Design](image.png)
-
-##  Design principles
-
-The CI tests need to be comprehensive, covering typical scenarios only, achievable within 5 minutes.
-
-# 2. Registry Self-Host Runners
-
-## 2.1 Linux Runners
-
-### Step1: Build linux images
-
-Build all the linux images for Self-Host Runners.
-```
-cd registry-runners
-bash build_linux_runners.sh
-```
-
-### Step2: Specify the token and key.
-Find your GitHub runner token and your test-account apikey.
-
-For the argument YourGitHubRunnerToken, Navigate the path `Settings -> Actions -> Runners -> New self-hosted runner` to get.
-
-In the Configure section, you will find the similar line:
-./config.sh --url https://github.com/FedML-AI/FedML --token AXRYPL6G2VHVGDFDQQS5XA3ELYI6M to get YourGitHubRunnerToken to value of --token
-
-### Step3: Registry all the runners.
-Registry by run `run_linux_runners.sh` script
-```
-bash run_linux_runners.sh [YourGitRepo] [YourGitHubRunnerToken] [YourTestAccountApiKey]
-```
-for example
-```
-bash run_linux_runners.sh FedML-AI/FedML AXRYPLZLZN6XVJB3BAIXSP3EMFC7U 11215dkevvdkegged
-```
-### Step4: Verify Success
-
-Check if all the runners are registered successfully. Navigate the following path. `Settings -> Actions -> Runners` to check that all your runners are active.
-
-## 2.2 Windows Runners
-
-### Step1: Install Anaconda packages
-Install Anaconda or Miniconda on a Windows machine. Anaconda and Miniconda can manage your Python environments.
-
-### Step2: Create python enviroments
-Create 4 python environments named `python38`、`python39`、`python310` and `python311` for different runners.
-Specify the python version to install.
-For example 
-```
-conda create -n python38 python==3.8
-```
-### Step3: Create directories 
-Create 4 directories named `actions-runner-python38`、`actions-runner-python39`、`actions-runner-python310` and `actions-runner-python311` for different runners.
-
-### Step4: Install the latest runner package. 
-Follow the insturction from navigating this path `Settings -> Actions -> Runners -> New self-hosted runner` to add a new Windows runner. Note that you only need to download、extract the files into the directories created in Step 3. Configuration and running will be done through a script later.
-
-### Step5: Registry all the runners.
-Run the script from `./registry-runners/windows.ps1` to registry all the runners to your github. Replace the variables `$REPO`、`$ACCESS_TOKEN` and `$WORKPLACE` with actual values. Note that you can get your $ACCESS_TOKEN from the following path `Settings -> Actions -> Runners -> New self-hosted runner.`.
-In the Configure section, you will find the similar line: `./config.sh --url https://github.com/FedML-AI/FedML --token AXRYPL6G2VHVGDFDQQS5XA3ELYI6M` to get your `$ACCESS_TOKEN`.
-
-### Step6: Verify Success
-Check if the runners are registered successfully by navigate to `Settings -> Actions -> Runners`. Make sure that all your runners are active. 
-
-## 2.3 Mac Runners
-
-# 3. Bind Test Machines
-
-Bind the actual machine to run the test training job. Follow this document to bind your test machines.
-https://docs.tensoropera.ai/share-and-earn
-
-Note that we need to bind our machines to the test environment.
-
-Specify the computing resource type to which you have bound your machines. Your job will be scheduled to that machine.
-
-# 4. Trigger
-
-Applying for a PR can trigger all tests automatically.
-
-Run a single test on a specific branch from the GitHub Actions tab.
-
-Schedule daily runs at a specific time by configuring your workflow YAML. You can check the results in the GitHub Actions tab.
-
-# 5. Add a new CI test
-
-Creating a new workflow YAML file, such as CI_launch.yaml or CI_train.yaml, allows you to add a CI test that is different from the current business.
-
-Adding a new CI test to the current business can be done by placing your test in the path python/tests/test_{business}/test_file.py and ensuring that your workflow YAML can run that Python test script.
-
-Ensuring your workflow YAML is configured correctly will enable it to run the new test automatically.
-
-# 6. TODO
-
-Implement the Mac runners.
-
diff --git a/.github/workflows/deprecated/build_wheels_and_releases.yml-backup b/.github/workflows/build_wheels_and_releases.yml-backup
similarity index 100%
rename from .github/workflows/deprecated/build_wheels_and_releases.yml-backup
rename to .github/workflows/build_wheels_and_releases.yml-backup
diff --git a/.github/workflows/deprecated/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml
similarity index 100%
rename from .github/workflows/deprecated/codeql-analysis.yml
rename to .github/workflows/codeql-analysis.yml
diff --git a/.github/workflows/deprecated/python-package-conda.yml b/.github/workflows/deprecated/python-package-conda.yml
deleted file mode 100644
index f3586044ab..0000000000
--- a/.github/workflows/deprecated/python-package-conda.yml
+++ /dev/null
@@ -1,34 +0,0 @@
-name: Python Package using Conda
-
-on: [push]
-
-jobs:
-  build-linux:
-    runs-on: ubuntu-latest
-    strategy:
-      max-parallel: 5
-
-    steps:
-    - uses: actions/checkout@v4
-    - name: Set up Python 3.10
-      uses: actions/setup-python@v3
-      with:
-        python-version: '3.10'
-    - name: Add conda to system path
-      run: |
-        # $CONDA is an environment variable pointing to the root of the miniconda directory
-        echo $CONDA/bin >> $GITHUB_PATH
-    - name: Install dependencies
-      run: |
-        conda env update --file environment.yml --name base
-    - name: Lint with flake8
-      run: |
-        conda install flake8
-        # stop the build if there are Python syntax errors or undefined names
-        flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
-        # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
-        flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
-    - name: Test with pytest
-      run: |
-        conda install pytest
-        pytest
diff --git a/.github/workflows/deprecated/full_e2e_test.yml-bakcup b/.github/workflows/full_e2e_test.yml-bakcup
similarity index 100%
rename from .github/workflows/deprecated/full_e2e_test.yml-bakcup
rename to .github/workflows/full_e2e_test.yml-bakcup
diff --git a/.github/workflows/image.png b/.github/workflows/image.png
deleted file mode 100644
index 330e630c0a3f784f45b7349741a82e03855babc1..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001

literal 389049
zcmeFZ1zc3?x-h;bVCe1^kVcS3X{1CzLQ<u>8>vAQP*Pe#K|)XjR9cY~5JBk>q(NE+
z1sr6U`7hk=eYW0n_WAzjJNMr2{;mtwtml2#ThFtedJ{K}n*}J&s%xnO5C{O=0{;Nq
z9N?!G<m><dIy!&=000C49Kr<Pfe@GkP=GN10jop!0NA&9C;-Gb1Mu%@biwD*1(x<L
z&u>p?KJ@n#groVe-(h^wqolY^K<2!Yr=O><lcyJl_z6)!Mny{pepDiOeS?pG11W}C
z<KHC#PvNT(gts@)D&e?!@_sE<Rm<}Rdg@x|)V?7?-eT?L<p!ky0Cx{R9|H{)4pTF8
z4#W!B8WI2jkO7j`w!U7<=g*%zD)aZ}xAOn^*&F@Vc3?>OsH{Jt|IYwQJ9}SS5c%_9
zavM8eTXzty1^{@jt(T7<0N@|Ra|QZ&9l_uY(D;B21mPP;u;XuV?h$PB8~pfN8Y2T$
zFwINwlaN?j`#1ssSw9%h5n$^GmO+LBVNnk|XAb}%=Kx^^cXv-a5VitgJ}`|J2p^R%
zWAmqS9RCDcTig6mrnR->AMkIufH}d6&p7*fxmjQN7WjAmxV!m-^!j%3fWIV8KAHyL
zlLn;QxT~l35ljNY8y+^gdLT>%!f0m@yWe0ak(0mX1rP?)Ls@KnRY6(;bRaBhXMIK+
zgjqmX-NQ}y=zAZ<+c-F@Yk@EY2nRX)X&Heq0|?)+_f<3eo+jVN)8Knu&{_u{RpsyT
zudIDQ=(oD2{ar5{#UGXb$;n>pd-*W3U?(FJFrEkiVB7)D#zzuC2Er$NT@8=g1GWJs
z7wn{Z6#p%akH5iD`?<k*1A7k*6%Yn-fdx4Ds2|~els?wo7lgjy4$F6P);h`y(gF6=
z&&fy=ghBeiUfNoJYd?q!tl!>4`8!Qu0rtM8M{NVsLu>8q&!~Ygh&yb>-s8d%zDMPt
zz5G;;>H_n^iM-st$p#n?=d^dzIEv>4VF}*=!|!?F#(q9VNAX}g;SOGY=Z|Ozwi6!g
zVtw{J2!ri`-vZ77s=yh51MmlL8^9BA0i54%)NO#F-`=PL)_@P-2-pL{-(!ByVe;*b
z2l#6U1OuCZ2bjk9k9;cM-r55JVEo}9vA^XN1{}V<4fvMB75D(w;0|a5ZeZ9SgzdpR
zzt?6AEP^r4e}sRp-5M;@7HqE@cptt0`|-cW{w<|4i0`@YdHsK{k%RpA{P=44XYe)g
z)$#e^OmGGGY4|ztR|&ku;4*O8-|_fcdb|&K19<azBY3lT?<}1oeSV^m4loO1^cxNB
zLHhoQ7H}T0Oi8#3TplbPEE!G*X99%b5@0>ba7D0YNibIRJBCM8`TdLh8K*xm0RHwh
zzNaG;C4>^v6Dkq%{hX6bfK2jte8189&y@VFtu}wq`}Y?7$NK;F#u=~!+o}D>*ZAH7
z*bP_}tP9o(>xMPL>HrQ{Gprug3wv<{f3IKt+t+pcqc($Y?QsRai}N38e#iIsFMh-y
zzM^HKtD>0SO7XJ~@&ox9Q1$e>;^XY-<j0{5S_FFzEf3ohLL6eEViEvwWM7Wb0KjMc
zZ*~Jh*Za4)Ycl{K-;BfIp8qZGY%>5nkp{)T;csz#G5|nH3;?ehZ2f%#ek%`t^uPnP
z1UV>ai~t+J1@M9RhyhZ-X+Q~31I_|^fDvE@Tn72o3FLDhAP@)zt^(0O9B6^lfovcT
zC<ID@a^NXY2Q&h$Kqt@x^aCTnB*;BWz&d~e_J9Kj1cHE&LZ~5(5O&CMh#*7^A_Gx?
zs6oy_3?OC@D~Kb+1L6+}g<ONgK~f;ukb95^kSCA^NGqfpG5~o8nS-oCK0&@hVNen%
z4U`qi0~LYFK$W4|P$TFis3X)H8VtP-O@!Wt7C;|CYoV>s9_R>k7P=1Ig#j>P7#-{w
zOc*8uQ-$fl%wY~NA6Pgn7M1}kfK|Yrf!}!uHVfN;?Zffm)Nl^CFv!80@C$HTxHmi;
z9uLokm%?k|FX2P*IXDWA#UsUI!4t$gg{OsQisy(Ih!>5Qfp;IT2CoB@r6s&Qe0+R5
zd|rHMP!`SbUGPKj<MHp{SK_ze58yB0?;;QgMuZ?j9-)V@M))D35m|^wh-SnfVhMpJ
zASPfZ5F=10FemUJh$P4$cu3GhFi5aWa6m{wc%1MQp&p?f;T6Ip!Xm<Fgnfidga<^F
zM7%`uL`Fm|L|2J!6IBp(5WOe*OiWD7Ni0jOPwY$_L42F|G4U(nIpTd1DiQ$_Rg#M&
z{v?ScB_u5*??^tAl9KX}Dv_F#`jIA*J|Jx;ohC(-QInk@(;~AYLz3l?)shX8eIzF&
zKTfVpewjR&JcGQ7ypMc?f{=oTLY2arBAg<JqJd(JVwaMd@+74mr8{Lj<wMGD$`vXE
z6%Umfl^xY}szR!Ess(BoH5auiwJr5E>LTh6>LnU{8eSSL8W);4nsS=AG$>jsS_xWX
z+928-+84C5bTB#|IxRXkx+J=%bfa`%=~?NO>Fw!n(pS(A(tlxKW>8|VXNYB}WEf%i
z%E-Z}#^}nJ%vi@b#RO*(VA5v_WV*}L!L-3l!+e_AmN}OBDf0viltqBWfF*>bkfoPp
zmz9lGgVl>Qi?y9~gN=?&iOq#Ajjf4om7SVhf!&!sjlG$Djf0j$nZu3aHpfd2)G?N0
zn#cT)-8=U7*a0Um=LOD4&PvWHE+Q@&E(fkOt~M?dH#@fucPMum_r!6+<1)vcj%Ob4
zI{t-+m&b%BhNq5ag_nU>i#M3JjQ2esDW4*rH(vqYFh4%O48JS?9sYg+sDPw^lR%C@
zpCD9FO3+#Gj^Kb0o{+4Nhfsmgm@tX3vT%TKnef~Rx)bM4M4qTS@lk|Z#9Sm<q)P-V
zDk<tFS|mDolIrBylaVK%o%|#wAZ9C;BQ`8fDy}Y$6t5TGmJpP%m$)l2E=et^BN-#v
zCV40&E#)IsA+;*aEo~*8D?KJdBXeFRUgni7o~(*2Qnpcc|CH1zzf(_7ZJicA?Rxs*
z=@mI%IeWPxxdnMnd29K5^0NvY3RVjF3bTsG6s;8t6z7$=l<bu5D=jPYE4wH^QvRqS
zs^X(kqw+=dlxmo2%Nf`ijWe-l-l|clT~Nzbn^xykcT_J^M`=iEglM#A;%RDYCTosq
zv1-|BJ<vj(l|CDG_N6w7wxM>m_WU`4b3W&u=|FYP>ZIzt*FCQ5p<Aa1=xOPt>P?;J
zJ@0+~xjw%BdHo#y4+bX<LJhhMsSPa+9~$l%sTd_0O<v%;;D4dbnB4fH@dM*8CTC1i
zOlD0*Ov6lj%$Ura&Fak&=Emmt&37%-EYdBOE=pdEzBqb`_fpWM?#oP<-7dedB)7D-
ztg^zhGO>DSg|XJNF0kIU(Xz?4LD{O>X4<aXDcPmjt=h}mr`Rt$$UCGstT@U$raG=V
zDLQ31Z8)nsXFG4ZXu9ONd~wxtEpf%UnYdND6S!NsH+WEaxO%+wWb+L29PkqKiuRiI
zKINV6jq=g<x$g_{z2sZ(M*}+Yz5aauQT}rQ3IRC*=s@GZsvycBkD%Tw0#|NcSq@eY
zE)Ic**oL%)9t*u1Iv1uKb}t+fZWG>y<U(FYE=6cWlwKve>U#A}q-bPH<jysdYtOE;
zUXQrG5Ty}S7EKoI6FnAlIwtQ1{D$+5w>QOaX5GZb+Qq((6OGG=!^GRhzfL%ra618)
z=#cm}Nh;~?E&N*^w?>l{lS@-5Q-V_#Qgu>m(>T&%({|D=)4MYyGV(HsGW|1WZ=bte
zpT(VZD+`nDls%H8l2e(>l6y1v%N@HrgLjqgR^+kf#pQj?cg`Qbr*W^gfTtj%5Wg^>
z@I#StQCG1{@q_zJ_v7v#mUxuRmKv14d?5Yc!9$jZNoCM7|FYFb7a#SPpDC}e5U#ja
zNmm*77<laec<qVRlhLQzPur?wsw%3FSLfDH*TmLBYOmCy>YVCk>&@y18qPMfJv;rZ
z=J|=|C5^`#b6(KCNNOT#y559q4sPCU@oL#<b!=T|vuvAeH*Ft&ssHkAhjz!SPW8_A
zF6FM~SMskKyH9mDyq0-g*CW+a+bh{y^G5Pb&0DFrwSCfk_5HH_&j#cMng$gITZhgJ
zbq;F{_l)R{42)hF9Ur?mHal)NzVgoP-S$Ml#KC05d%X9tQxsDf(=5{kGyF4^vr@A!
z=FZIZ&Ku58F4!)tFZwQGmacyw{gAQDzWiWCY^8BkeYJnhVr^;NYyDs&dXsW9_aooO
z>Mf<MUX&?nY1?NT_bL7}<L8nciJi9H^Sje~?t2GcV$qD~(tVlzuCFFvmk$Cl2+Zw6
z{=;WjZR~rTI}Yb%?PLAT7dYx`Lmj}L`KJ;9AT<L3>VD9Fu=uV2_bmnFw>~QfL%!cf
z(Lcez^%0L=fIb9J0s8Oy8=!YH1^|zX0DuQ9pZp>KoF@eUBUu0{F8;^;EeqJE{?@NN
z3gCl%i2U=1HtsgxU;Xg-t<MQY{od}MpCC5k;-K^N(-YSMP!iya5{|+koB)&(0;7cB
zUIOeO9(bUW0$#t@0)fKdc=!kcLLy=?K@9}}g}`7?I1CT(Xy5=62EGU2lz3FfPAcP5
z>suo@y=la*Cf_6AQhCx!YcPP~7Ps+<BqXAvXJBMH&cn;cFCZZ)B`qU+O7)DIx`w9K
zSwo`>#wMV9W@~5f;OOM+;_K%h5EyhN_}cZT=$IQfV^dPo(lat|XJr=@78TzwDShzp
zX;pPiZC!oCv$pn^9i3gTx?c|t4UdeDjlY|in_pO5`mnsRy0-o4^Um(x7xezuBfKC0
z>^HRj!0acyC_%iSa5xN(IKm484Fqo(B^>YANqj10eT20)HK*8B0veU%drw*kxx@`n
zv^G8iM0DH|bH}%jQ2U12{~2PD{}N_@Aoe%BCc&-+^xFl4LSc9?7z_^|54`XRK+g*w
zpMZ$q+eP&KBKdZa9bM#qTsW{02v`Oj4o86h6vTwY6#u_3+!Q#_bQd=PkiZ}yGr=eU
zMc{C5oNrv96H&~-mbQTx7MT$F$SPR>>Z)2q<zxNF7vjm!ipujnrrjgD6j{=hUx=T0
z{@A#4K=AN$9cF~DUK#<aaQaT9i&Ej^lBoN)NQey}vFGLaTY~$?iSYSb98$|QWlDj9
zSTEKU$gsW|K=bSV-$8>m*^)uZY`|VJCfJ__LdkgFw&tulf00ABl~RvBEmyL!=Kb`Y
zGH4y5v}pV^Q*P-;{^tk}fh$Te)<y3OHx;U_244ThjJCrdGis+19pEwJnYQc{!Up3Y
zmR5yUWgx$TjA|vgLE}bi3mXI(Rp_tSSBmJ#)m3#wF&(t2RlU_1Wa^fkf)<m%F~8}5
zQWXZ{{Kc|mrfg}02swQ#0UFjYjTcI<PIm3Txojo=s?^l`mE3@p_#J$K!u-C`>Rb{>
zS_387`8Y_1kw+g6xSUWpv_JfmuTO8Va9mzdRV@P7fB132X~IRP+P$ova#O-xl4n9{
zz>Dj9T*0hH^+0XWr251Oc!+aAY^qZsG|`GN>1@D+{|Un4UYJsc49Q^BZi!JugiS{5
zd2uCxv1H(vWd3i{OcIGZ6DiqhfYfbu{+;viFz4rC0vCy#@w2VeV$Ts}q(bvVDcuX*
zwW|%>Y4#(MoC}>FNDb7+U1b|mA2I-6&_m_#C*1$eXFAL}TD1w)7YX*VrLt4qnVtR-
zV^$C6I8Cax{F{p_Pb!v3bO{X^B3Le`hd$@u?RufqWBkqw`Kr_`Y^9#&Vn}QW$8xIq
z37K`H_x>N^u4cDheO1(RfXwSG@|T_cymNokb4%Y#5jSwS2a1vN@OfAOh=@N8Fa_dx
zM-7m6E_6Q?3wTUiD#(ZhMEMFb@gt~W17^(6pHw2CAHAbS1L*&{|5wsrkiX_Q4TP0u
zv--9Kkd~t+3^GkYLiHLS<oQ3~B<B)+CDMPyX8(6DDcpIojXo6Jzq<n=G;H0ag~1X9
zhHT}ANppRbXr^=(J0H9vbNRf}w~tvXy0Ze4i<b&>`jDZkqfs4<jgC;n4IX|44aX7R
zSGymz@{a%wh>ujPD8ExaG?wqz{fh>_@jxmRREIhR2}fGdW8m*{&JS$#?+8%1(+tWk
z&KKWjtA-GtIK2>iHRDko0uvim3e73)GCxxEv5ki(jTYCOpx2;eYK)$qpV5Q$vJ2(n
zANKOeaYKigf>=JVPx9;jMT7rT9)Josuj?M^V#bXyHKi$mfb};j@46%)7rr7B3{%T5
zhG*db#XE+nF8~91<=sL>b1hj(wt@ApOaN~9HzqCh@ZhSh%UH9$F{d7&@LJ5prF#qq
zEaE*tj#bZmh2)d7RSy+y7g<g=@uX-KHXP5dF|mu~`y4f=*+bba>58QwCq(weVAV2l
zKx{cVAr5#|a~}tc(PA%HbWF>jG9z}{%h1giu4&gSJEu2YAy|CmB=$-AfUoGlcw!^|
zhT<H6%EX|Ni)Kjl!yf|MQ-XU{=;II9yjKfNGKSvRlu33x?;;Daf3>f?YXxk@?&Z;<
zhuWKnQ8iuhyOZVFYENJGNx0bKIrt|KU57swT1XyWE|w}{qCmDm=0x^UVAG!BfNi56
zgBRZFZ5Ph%Hn&eAW2r5lp@Ua@z27XJy_9=)fO0A7y@H)bVclTk7M5v!*6|SE5eKx$
z;sA6k)h(wGMGOZU6%NQF$Ha-?09uV7gBLv`u=1he==Acsn8xF!-M*o>s+ZWNO=B{m
zluzGLT5FFVZN82@fu`@o@{GuF&GmHE4_70_H~IRznJUwH)5aanllO)gR9{o|{DH`Y
zKZG74DZ-DVRp+9!Jj>9Xo14NF_djv*Q@=i!qist-$ilxke3t{AFsDZnbV8AR-9&^e
zLCEXuQ-Z1S^0zZ0i{~xwwAazNFwy-$`5yvxvs?5lm*r4PC9xPu@r%xN(~fBy35*Ix
z3Cy97APn|g$rsl`O;P9aD7n$5nI55PR3Du}Gshv<^{`dU3Ui3c8m8#z<{wG(A1IZi
za6~EB^2(S-i_&iQP}HM2s%bNgjHQUv$(m~=@uMa&Nav);CZ6GAgOB9S+?sl?@M`b_
zW4<wO{%3AwhHe?)G521Hvj-Df5)L3hTyI`(#{rx@IG{uSr?82wPN3#MQF7$NNG)Cf
zGMs7{aGE9NubZcLwMh{pf=;NYjK&B)(DDu+XuJo0$X3(s=O650@n6TPh5{|(R1<N)
zby%C_=Q$jZY>xxxOMeQNy=`<q(8O?~=qgWKd*rgIaL;4XA$pWI_vKXkV`dmxRG<f<
zly)%4b3vm$i4N^(wC!-UtoThv97!F0TunxhO_X+(gb-}@;hmxr%y5!hYNxfm`d5(u
zSb&nyu9a8wb{O+;0I&=;NYGO0uq2#3D9_>vYA@lTj^tjUm@*Nw*)8#g7Q8Hg-N0$-
z)+M%kKT+&w3L3+k889&Nm&&N87Rzj$LS>8lVs1TcyzGsWRcRc%dlfU5+jQ6F+{JVp
zUuIuta*tOGcD+d_(Pt0ewH+r(?I$vij{a-Oxh#p;^hQaADUIK8a9Vha*FK@ksgT#&
z=s2a(B)AeLVl?Dmt}tUfc%tyd(`QQCI@E}S;V;H4Osu1G_uk@wzm)c#l|1GqmY|p}
zdPOq7DXpB#H5v2xSLc!;DZ|MEWly7Qo|~es0Ld14|3s_?Dq8RP{ve4z%4K<vyz9Bf
z0gi1d-6yM`C(Fk(43{3Zb${@*8J~cD@*R2+#M-s#jFOr(2$QerX0??OFMhN|rrg>}
zSiexAa!W%c=-#F1n8)k@+SHe{Ol6_4=&1){*gnDUF5!qIgCz3|kxNz0q&epsPw`J%
z#4Of*J|!^uwY(UN+C6lcX5Cn$Y#3c($<&FJ4^h|ncM<hgLH05nC?z=qC&f?t703++
z#T~Dry<w+a7ruh05=49j5&z{k>Wf*Ue@I}+gFcQytE{=YT6Mk(E*UWeS?;@c(?8PA
zhvVabTZ}lM;xi6daj4*arH59MJB|Ys*!Sryu>wgygc{gyu(K*KV@{4*_lH`SYPp7Z
zd(`ngfIq!P{XAb*jcbd`d$i$@crPD)ta7>{4{?)f(B$C$M}GH0MKik#YB=Dk=w7+7
z@T>lvtslz$v6galSI1ZS2#jptd^1(==vVio%n#2BH8pZLJDUX}b<HAX_5Z25JiF!9
z|KOj=7%xV61`4urCk}`*-0K20Nh&tzikKS?(BI43%?Uu3Emzi=7cU?aX2=v+gQADp
z*+ho3JHPJnEuU-W>f}nE5ba$nHu6w^s|PqFsH*;~i+&E5xmu3-&5$@{P9Gb!6CGde
zZ~QD{uw)x=ck3acTmbZOC}JDd&Q4XWzD<6;g6%_xgA;M1JLPl)l~g?f9V1f!;*|Oq
z(okVJ_oZ_yhMh)|xSY3jBK6W`bG7A0166LFrH@2rfu*nD5#B>wqZR0b`K_zrOqW(i
z6;3pUIPYG<0ggt|T7hl?WM}zd8LXNGZ}&(^oB!eqm0^^Q%w6;;iBn(VPh1{-JwNB3
zoFbi;b&0j54H~{CEid>=ajv2XAKCAK#K1t+c;kNB-PU^>H~<w9q>T+#{3O(jxxoJC
zfWuU%0m?nIeFS~a*Kc+1gXSXj6<tfar#EixThT4u4p?|`#U^}A5t=n(VUY#lppyz@
zU|Xh4-6uM75=8#ewisgs7et7I7S3VEQ5g^LA2>g^l@m@$7;}Bl&Sje`ih#3SHzD07
zU#M)?i>AL>j4m_cFV&1xc87aP+#^?aFLYV1{Qysl7&IA|IQ1jrmc1dl3q83W%>DGl
z-NZP5m$#S2nOUEw254I8?K2`^ppgQ3E4rE5gRQGA`)N;bT>a#^1d<LVc)AJ9i}m#~
z>-xj(8BA;-4$#B_J2nI58}0pj`O1tmhMYLSC{J1Mk&ep$$UZ7R^T)hmAn2lK_rhqZ
z`DpR2)wdh9wWUiG>Lnh_laZ#$SasmZPx5KlhzI%;cVY(h4jD#Xqf-|G+mi@`I>%bF
z_lia*UQ8;LMl+IFy_XuE;+cvSA5M)pHD5crr>^m{?ff}xTz6AWtq85H@?x9k`N_z3
zz4#vSD92+O1*P0r=o5^{0uCS!>T0v?oS+@jDWc7aFK507g{orWVcyky%Qf^?r~g9e
zDjC2J{0t3hP-U<efFeU;IiKSI7{(19vdtuHU`a6DbLB9J>W$a^Y0Ysid@d({=9?2i
zG!wU+{C{Bo9|BgPr_o)r<gq?87HIMFt`Gc{67Fkp2AK*)MQ~`+JbwL2LsxuQ-+Mc*
zMGC7JHy1f0hviI95AUN9lQA&tJ)L`^wMM~V`RZuKy?f0+5&380m?Y0&M`qyF>+1;$
z%ci$=T)AGNt8)`xoy}#vTksIsekqg$2jusH-Z(-L2T<x|DEkHPPDh6d!n0?hxI?sr
zAt{=~RUKj?LXQ~kdiUdiuU*xvnFDLV{uRX(6Pfzl56RE7s@m>WN{=ihF0_wTy$s~e
zcq(=`3y+Z1^$E%o6C12}7_17`L3Gp2NH-;x?@`Zxxsm)A4KwF_cJSv8h1<dXaQCh{
zTF6(U%~p_u@3{aE(H(}Vm2wxlR%1(e4V7qa-+D*m>FJ_@kn_JEIz0@*jr`Uxd~^qi
zRWi>!4AB7zz%=?}(1~0Im4}Hl_dN8rT(Z1-3Y#%gEwZT-Hm6AO{@F@6+vnL&b5=#F
z4gt@u^}XSK^I3X&vWr<=QMHj5Y{X~|4!CHtZ?dd5`luN|x=fi|-j>i~OQ`6Pn{kr0
z65d5nRmaj2izc3XaDj2(J9T)+aLtx$jhrD=?l8kT21{v4tu}W^H9zU<c5seP+J)e~
zw5VTNq++8cXU@Udiz{QuDvuY6F~xN`=T43j2h`X(k!W85^cnRsonSB)lWW-F36oY!
ziXfja^SV)$<}>KKo8`7q!dr!Dnm3H44qvB)8?6PkvV0!9zpa6}{Zesvp#%r4h8hjN
zi)&c5tjdh8Ca1-|y-YtCs%1@9Y<GFPS3EzcM|cB$To1uG-(+!EB1b*PP8w>C3Qw3j
z<eb!f@*)~0A*%kQDC(mbp;i4`Bb%n?!?fnbt3j67ndY6^;QhkAdK{24HEa2G!5s$_
zyX%rqTkR<<qqf>DfuOUPwYv<c-HgnjS2kjkEy+!t9O<tZTRBXYGh35xOp~BpxBI%j
zRwnq9Z>(E9@Vu9RdEhX`J1K2TQJCDq%Wc!FFZh{vRxh%Ve<3MxhQe=#ZuQn$M!!~w
z0C)Sbi<jNsiz=^Z79$Gl7tIgL!o@L?==9l5uCplPCx?apuE=H0JjePNw{XJ>UkN92
zZW37bo=9TzCud{D-K9dLjB78_wJ!<>kYmVaU(g$%GGaW7-}jYX(M3fPAG<l}+|5;r
z|6+39B)U@0<UsI5R>>KMXK`vn6@~iUiuR`Wy@0i|f`Dwd9Lao{wjgms-2p$c8;WW^
zY(}oS?amo1ehO!bIrOqTl<c~cxly*Q*OHM(;3g;iXg$OH(q8NrPScMSRQM-*#Cy5f
z9QsYjBaLpI>}Kq^(${u+VOwyocAGb;I!fHxJop@ZO}$g?rrVsAm-q*P4E-_K-RSg3
zi>!%`dzpFaHHY^x!v6ConVm%|(!NC6edd%}_xo^Z^;$!KBNu`zo~iFJza}pNd-Cz1
zoIJ`=yEgM>%5>+wopr@0oi_GX`c0Z6JttLUWLkSjDEyn9R{WkPO(eC6x<>L0*<rga
z?kT|4VSa6T;(^Rk%K<@M=BqfsnUs9b`zboaXPVZXXYyrowWnRSZK39VI-N3So94U6
zPw=)Q6)xsp)6UN`+{6LU)!@4^v*j?gQM=ET8$!b6%b|<a$!8wE%&pbc;FY`U3ifuL
zlNhA0g_Ew2iAtTQkzn$ZW2>{|%1ykk)_83V&-5_IbN7w*Y+kg8oT^}7!1g(KO6{_f
z@fA9%mtWOs$Ql8yH=zRuHkOv?uFh#Pw08cTFICdH`ZqOu>&|h;2<c8kn{;Im*Scxb
zY`18pV^S^#c`VEZV)$obZBDXJ%*}*Fng|L-8#deEzaJgiSoYqRx~0?}_U=F<=)>n$
zcEoO&ty&h>ix-p=7TviEo4l@Kw&?32vAJNUWC5|@8HWQ%6rB<1U=KOuL+59SQFFyw
zg^^PniJIw%G%1?88=DtAOacg43860(whmMmG&TaMCt%)XPL%g|c|)E%m)A6OYS%Qs
zSu<V8U0gjLB4%Q1vUdqhIeSxqbt0&*BU`PurqtEGSgkS2A(4SLi}C_Dp3k_D1#9Oi
z-<`Y|1#*lO>h@@+Qos9V;Wm_Ei>uWxg|qmavgxfSxw!=qWWj!2eMoTpsEZudgBtB=
z3+Eo$^b6BdGPx1goifD}<9kZZZAp2eSrG7%`X~c8>Ay>fZkQ!!4NAIzYL4<Z-6ftn
zMyEw$C*i|oMIY75siuC+EHgrLIoYf)d!yKPp?Q*gZsuwz`)mRo==V?1-c9$GDqL#J
z=zrwaz4+YyxCR#o)7QYclD5j|Q7i+h`Nk}2<a%(yJF|;%=~v+ewp3DwEsg;neJ=|h
z_~L+W`F_O>KE<6g>AQOEmHWKYpfB+FQYQAsSs%}IsCgctOWvn&aIo$Y$XPq1=^~pN
z$Ojk|^w1pg+T+W@O?RFR%dl)VJojOo9DZaGkoZmx(?$7X7RSB2;DE>6OqU)(sKvo)
zz597pnd6!cDQ{KmljI>zSpmz}`Vfdy!ejSDY**hH?zy5>k#mG{{O<V{@19(How(0s
zS<4k6_1b<e>uLTiU2-l4Zj-*eCmJ^;L$vc~Fyg~mmxWJaM<}LJS#mI0>0#$@71;tT
zcgrocT=J?F+epRR3OkUfl!=RK+#&OIu}WWbmjVfTiL)phfRE?t6;DE&91m||<jXF*
zC<vgFAE+3}E5$h#Eg5Su_Ev3Pz7BirD94g?QKeb1{_9HU%si{OWz2l$%cL7@%vlSK
zD31p3kQ$nl3tLl!++w%NlIsDVqGcG*3mmX_d%Gs@Wpe}@*>+PdkhpGObmtU91)N$a
zIXOk0(Soh)+WQO55nuf?Di6+JE)V+i8PC7ntuf`f*|gqud~eKfLz!djts=c&nB|<r
z1P*A;nROia=viJ<E~S@sAzX{|{X^5dEQZd-)X!&5uA6`CUJ2{jcGcJ>Yd8=zHw*IN
zt-s^<=7gC|l6UfKf{imYYCS@l0(qScO*@+hk<&g9U%N|DiwaDtc4a1YhCQkajyy4$
z(2=<GOy;DOlvGN6ODnfSQ(N4;NR$Eth6B}gBh<preKSYz@l9I#&=k#n-q*3RGC=AI
zq!Nbio@;&=*L9g{IMmy|!g(T~^s$zZp{X9z8@WksAs_Qa`T&_%0+nkm##%f4d6&>!
zZQ%?kljw2@ass7W&feF!EiA%ufaun=Wy*fvH1~F*tF|&|<q|;lv;v)x!l=-k18SG>
zW6fya2No_V-6!tYhAfV6!Cd&ye-dD^OXwvNQ{qkB-Oad$1CBE-V_U(e2GLuWH<@(7
zH8lFB*?2ViO8@0|P~O$%MSSFXBj`^uR=QPEqogqreTRWyPf}IVdpwJsFnoyrzSe;*
z8Y=H@?Jj$7FzQu3k+tX@6;Dx3_N8}qr|Z1qmt&tF=$m5uMw{@AQSrfY3S8pq!kJp1
z(i>mQG>Vls_tbfKUG$#|88z9|DlkK5?xmolV<f5ChAz|&3G?s;G-t|VwrdB$;UXEC
z3a)ujVI;uzLeZ&nNtCTt6I<6S%r0w(w4{2ZaNWG;R?(hj)m`u7bLxcaLwOQQ!zsB-
zCmu*|$kIFmdw|t7iFcK6rF4qasV=cR;l-CP3bAD|d5nE+`AMk}9M$%m`D!6=iC!<^
zO$>*)cF)CMRt>bp+pJ3+)U*!zXq4Y2duA4r+V6W==YTKN7^PF!rtL}Un*AmtHRPU-
zPkd$|Wy@uLJLi{WFQSbYiBiWa<!`=OXTSY5mmV|Iw-36O!ZzU}O$?cx$ZGOvIbl!H
zx*UT+j#z09_T2ZEXfM*~e-<U~W_7ikyS|45z`=IcL*K4!YRT!;;-ZSW)0!%H&))G6
zEG+lX^a<tXdU=;`w_|nJ=b8__hCs(^zTD#lUcX*U9@SQ;|A7>Gx%KPlTm{-h`&q17
za!zl`I~$vdX=~vZXP?$@My=Kp?dXkHVl_8r_GlV#KtH&f;jlPV_6n+`%O_N4fYVt5
z$z(R|e5y#PmLyX&%`F8PZ*xZf>NBo>lL~AH@{@)GsAG%!uvcmML#0(M^75L=JmfTH
z1y3Urm|zlRg)ZLj^bozJ-4bfsT!pYFC=+JyTsv?FkK<n5sGFR=KdQMS`1Jm>fHIoF
z7K~kVNI*MDuGmM&)fJyp*K`k)Fe3NSLoJpRjlyO5Av&59FK3q{jSb2_r7H>oL0rY5
za|`%m7?H4&P_}J>EFP3crEE>kEz%crH(y=oBUqv{>Qii#?K(Slg}VPBa}Q*h6ulxG
zFz!Kq$kR|k--4AmOmYvU&o@VB&*-?@uqup*Xs-rR$u^j#X*Z05_VBGHH(P_`RQP+#
zPg*;92c)0`7mZ13&*6X=q`esiZ?2(HO(X`tj3>)s%qC>Tz_Xg+e9s5|G>U`I^%Mk>
zY2j3NmqQM;#+^?&ZcJ91IvH<IIHS6(IAXaeJD(grg+fbVw2SOj*c=oOGO{+1ox**G
zu9-Ao=*9fxVP%ij#iZNVgDCbBvR%*@fqTir)t4;x+_&+*#CUQv58ZLgU@R{-E3jtW
z;R|?{fxy$0+l8~aF3b>P-#+t9P;{)?rdGbvceB8G@l&=D-KP#ualr}(((<K9<*9Cl
ztycD!%d=ljgVfkFwM9Q)h|?p?vTRF$mlG>|yl^6-Y9^PplP9&b&ocNk^Bs+L1;k-d
zs5Wx@xmu3N<BX511tw7!DY;uF-!t5fN)h1;>(@iv<8uC5xhJtL5+$h^<FgWMUqp9m
zG>`wl>l~s1ad>gj?g_J3Ozo_9{QNT=%XyKFvF+eb2W^*yo;|5g)w$i46dK<1A_hKr
zkt&7}9{PxIha=J=vvG&ES}L3=$Z;=x45Qvmr9j_q^JNd!C{xX*8Phx4P<!m0u2DMa
z6Q80)wcuvZHZK}Bf7Jl>EM2o?HL1uMwC{nTleuD>@Wr)f_AgN#{E60H1%eT3y+Yd+
zW0V!9{u4^?1NpgjO1hAr_-78eaKJ#E{&9!cb9+I}BfR~FtI5cRdif<|Cm;V$T!&;J
zJ*9ZoT>YSARf$b-R%D-MLm>sZ(pmYoZ$hNagiZFa$f9k}jq1bJxhXm==Hlp4v*aLS
zUSE!jkknif>g+>m6g?(3fL%**$JXR!lYE9I(j7@ErvWd2h$W?dfN8C`EpHY#J~W`f
z+p|r*XRz&__-Nt4x4ky{?PkFB<6?3*r2y!Xvg+EaE?Ddq(ou0=3I_IqB`puh%=ZuW
zZld1wwN^5@OpM+n^dt*a8TwdWaqN6)@+I~hA_}}ql}oRR;vb8NJgCh7F>8bHiR*O=
zdWX3fWfZa6K+Sb@J8e<s)A#D`;Y0S41C;Rn+S?0W&C<4!t~;macrgO38d?q8>}{t7
zHQg%g4&;U`%xfM5UUzgQq-5BfSxt1E#R2aEwa~{)SC}R-7s8I682oV3{jk`Vg?wpa
zzZT)o?N}kWgP7}`A#&ST?pL6<bWWM<?3}j<JGCaeCqAeBwD8HrP1xgNBhFB*g@y`6
z2gM0;EcIyyZcmb+d&lq0#>v=pec2Y+QWHv{Sj|hPZ2^)mryp<yvF%}nZ5J$K$NEMZ
zHRK8=G}uLNG>B_9tbR~1vMs1&VfxHY?VM>@qt~UQr}?BIs8M{*!uejJ9S6Vv>u7ix
z`HapC;$#U6NLt&z8|Jwpk3t!@T^oZlZhX>mW_go&CZX|pT(~YGZC0$bZ#uk3*OL5p
z#<K5$YN#;Uw=k4@TjPdAGwJxGi+5*=@N1=l^IKy0?&!Cgyu8!(TkkC9V%qy?Fg({x
zw%KFcYEF+eHW*626y!=b02r-;*wR5Cy1EN<tB(#jM$o(y>fagO#r{dny?K3a9S5k;
zN{2>S7_PE9Q_16FWrhXHtMdU-fWQl=Xl`x(#J_?Ac2~)<5rQdW$VI|Z<XB_#S4c(O
z53d;AoT-ZnVg<z_|A&mY5<Dm=Kb0NQ4|jY5t`PHkFj*-{?8wrOXR?Zw)YYx={_2Rq
z`s?cMKs0v|qw$B%+>65@8kH+fFx=?((WfaJ#uE~8>p|+P$0IN3ubF8=t4s6ASy6ri
zeRE|n#<#Ebq)Y8*ubEAsLi^74-8^OEtROx_eLk8_gqo_G`&@8ZGSSmz7zYar$+b)2
z_hwmUbV|{I2C14UOLb>s<Aj8|%HTEt?+YVvW<{{0c}Eht7;lNW;OhUPcO!J_et1_i
z-p=|?Q_AS49rd}F;_ox!uH7jTcp$h&D2rL2#OQ;I3EHqiD7$H3SQxMArr4RgY84y=
zw=|z}<|{`*Zh0Ye1q*&GnjIAtRELeO#3X|~-Prrf;rDtL?Sgn)pYgVXJzujRjrNDD
zCF57_o=@(n|7_=MAry>x>zceh*6IlhJ+n=gGmoTFJw3B35C>B`nc$W5;lvUP`|UBX
zk~Z4;nXgREpVUQ;=EPJQEkH-q1gqHrPJSDFSof++F1c(1iuJX-L@1FsNl!*}F#p1^
zWTo^b^O6YT=VGeb5w4`$-hF3x*niA?G#qOZhg@+1dy@>!=!evwiCK^@lcKQH=(3_o
zr|A>1X2P4i^pyKz47X*Bbv4&SJD&@4ovR&?$Ps4h34FJ~kvJ{iCH(X_L4*8Tgv88W
zmo73mXS<w$+|xv#taRxsBPQG#5~L4u`|v5BJ2zMTLL|A}1j0&i9D_iugPsSe1ad<W
zVJe*d{P7jjD!G)Vlg~74v=pWgO2GUxy}z!>BXEtSNh2jx^)N4-B~))L_2pfG$hj>8
zcwMh}wTnKn=qtwM50ZUO$fq%_!eqOiIN(X%dB1}Pfv0;I6pF=Yo8k%f-h?b;j$IbU
z0r$#q0JqKe$^Jhv5)0P4XVEg6p?qFD#Lu+grb+=H(Zh$L04xGn5$st+OV2)NI$qh4
zRd8o_r*qU<>$(kjRlK@7K6T!ghV{!}N9PC9E6x#ZdmfTn?sL(D>rBpKt*s}T((Nl&
zrJwN>v*$RLDj#p`&h<#Vz_+jsAEQJ~&N)V5X;#OA^{ywhnN6SN4Jx_)x{TveaVCe=
zV<Pg3flHNt-Fo^}uP1W~!iO16<U!5G?hec6QuP|r>w5P1L(|eyQ_Mt-92?FT9lG|1
zvxOc<(YLKT=c7Ds*FL?t@b=ET;=AYk@ZP7;AEvi0=h~(Iv$~`eGRZ8tlF-Sh<~Ag)
zUyeI-$AI?n7e({)iae^+2%_$8R&|Ck`y%Iz37%B@LNkv8`RkOEUG+<~XYSGSHne;I
zN0a}GScfO*<ff64cM<jIGzB5KK-f#jz?Yjz>1o%M6a(YKT9YazXM$4O+1R(Elcl2O
zLkMb^3Adl|XL5l)`iN-&N91Xyn!l_;L;AMq{<E3A)9B-*>zB0bugIxgrI4Sz(XxXV
z(4<hA7yMOEYtZiv0+ftuSG%ewRmv=)ol*syI{-3w_n(OcOXYJscCJWNNgF#NNV@aX
zSYoy2vW}spbM{JO@q(>flkcX1f@RKq^0P7XmaRw##x#U5)Y6qZ)wxYAR@3gJ!aGqj
z)1*eR{P>+^hVH(5f2l6?-;B=nEZST~)3;4jUY|v&*H~p|BzwZo3z@)>Jw#FJeQ;6O
z770bLO(T)s_~dNWlziLd47VFV5m)#mh$T`vd}@66tL^GfTIgp*;t~J~;F(~$s|7)}
z+A(UV&G=LPQT1)AP`L74t(G;d#nf)O6djGZ=AEP)r>>(g-R?HGUTG+8^2{pK(^{d~
zJ48O|%xCZgZ&W2Pv-L8^ZZs;+O`UEJP=DJZPH*$+?6O~q>{@R~c1Hi8v{ONB@8kcD
zHHhH2faS;P^)f~UvrUf@Et*}!=aQT0ZR_C#G3SSwO$R;g1}hf<&FxaZTVy%?vsCiF
z7wCAB__y_1oAH9WI=^bXWOd=6H0j6o?Kx<jRabZ;uE(|rLoD0o>34Xdzs!A!YC67x
zXb(S8BW)aR)0=a#uuA|6D-_{~co}SUOU=*n2Xh0iO{oNj75hiPZr@4d_Jz=CJBwGH
zD7|Zbc>p?irk#l;lib$D;x#?BMDNwafHG+{)`(e?xS-5E9klHHC5KBM+KfB<ykVcz
z9HrA?C)ck}wEC{(sD**EqJJTKI@-Omp^26);9LkRsz=kExpj}&nWntSj8H_PSNs}x
zOh%_JLV29%^)~MjdzW>c(s-$3Ec>>qKuFM|o8Po0QawG8hd@z0_rN6NFKg1yDQ}xr
zmrlS?&u!fZXKp}M#tOgbh#FA0dZa&6X!ZJ4dpbwrl)Cb4vXKH?=qc1pJ1HJUdd@yw
z=x$*B%fT!0tZffkIlMo`uKlQLHGbBUA3)968Y+QSo!z?LzeW+HBD;`e;51<^asRRn
z2|Xi>J?O(zxiA#<+}O(`iq`o&O7mVx!JxroLR7cwgxt)C0RkJsHFfWZAQ;=x8jwHR
z*Pm*Zs;l?eXUThsjJ`{cE-6r++oJ0d$_V&E>=(&M;{Jj26iM<V*?X{nc9u{Ya1o-#
z42IYfRauPu!B+f?z=H>q4xshRL$>9hOfZ7%Xrpk@1wthuecvc}VnV~p4jsg?RXd;&
zg$6OJp#A4w$qCNYBnt^oYpA7TI`P$oW~3l;sTR$D#a>1Yjcr|7XvbM(1cr4Xq>ml#
zBN!)q7V~DA^IT{QJ?V?Fo=)f%B!UHsFT?PNoF%+>hEVRxgE!?a=C1E7FS>s|{xtQ~
zw8l0~t4h~%_wL<F*S~V5O8P#47-eE)(Bz=ybAO+m87;AQD*onDPFWVo;B}eT6<%ez
zzBS6StuigFd5Q0lZIa$@vKdQ-iIS{lJ=d<HVLxG0&dT0W=3LrUww`@G`^9^f_7`Wo
z)sc-)D%7_^vGv7oMjq>*eu<}Vv<DcL{&%gN`WL5#|Cqj35hExrh>+86aYIDs*T`wR
zeCp!-VnWu1Kyfy_F`1o-tVz0IjHKSO$z9J-u*~S;r%d9bXwZ$hcDeXb5bcRwM#(TL
ztPamd<ZdyI7E8>*8)hqyFW81hTR-|<#*Nbsp(WDWu(_=$EDt&q?b=bh>FxNTXhlEv
zlx7wrJw`Rs=zvN<kq`q4Wh90rl(;~ste7?{pqu9lbT$PCNQsXm$_pMXSNfB8^`kFc
zm{Z)=^~Ms_#g3=AJo*KkDx1Uz)}uT(_EB3c0bfVzq6a@hD$V87+nJ+JaC#~8y{EOl
z%itq8_D@9Ra}#I{y)8+bXwawUbngV-N2^TDYp<TElB=*wj;$DBPC_Mmou&$NrTRQ;
z65-Kwj69a2!`rd)CY_}eVZc3qJ)tM@k<DVL_r9P#a~=LkzJJmz4}7o0h$P>c-y(9t
zN~4y(MEUPr5t4)x3KY_~3F0$Y58V<x-J$?NeQCEO=Z%&VP0_GPk;^~(L4E32PiW^e
zkss;)v&jX$Yn$eqys?u?!}eK`mC2l+*Ip~J-;}ACX}i_!IhVl0s6M7`stOxsy%xK)
zslcu!JZz$He8g^K6rb$YzAK4EjLPexj~&5k=kioiKqku1|L?G=|3aP66Z|EWa)zrh
z<$(J85byKs#k%j=KMBy~6II4vkUjOe<6`*jhd%xi5Urb}$?AK1MP9yL=8kVRmDq7(
zf7-;+h}C1hd|6Kb?WyH{cAAcFsIdp`_S#E@is3O^eFraw8+!um3%DzyWEIg<CW7>0
z^7yfzx6oP`Z~>dj1aj5~x&2IWj(0ookk}bqYS;}fzFJwb?oCoIXgt7DA?cC*wRvck
zJd8)uk9lNGeYyIbDiuKX&+C}k5iY?Zz!enCg+Js;G-i3V!z9?({8I%#XG)Lx)hvw1
zFKT_~$6vhlOA>$7D!&xOkqZ8QTtOh`B!`7A*=^IskJ81fWG!TQP>Jb6yl6GqFk|^;
zf_(O}2ypiRzXV~N8jS&izM3~vOgzse6?bu^Axop|;mtdZ9npM~2hYb@9)ASpvsK(S
zSN@58$rB{A3nryqo<XIepJSt{!7WgWIAFmK*`_zY^_6Z2TvM8g0}=*aw`Z|kQ@irz
zM{CIbQ!by!A2jj@E{EX&LdCe|6$?EKnI2YJ<Ht-ak7ScqBuv0=%8yn2l<8N){%|^e
zQS48g`HRPXN#&77`lT*@DcD~o={E!W4{YTV^u(vPPMI&WwVi7}i>lq>F<UUUjbMo=
zR^IL8JL;k;6~qdte>=IzgfMVHFiXUt3iUB#BEQxq<+Z)34cV!OPY~X$C;|2#d#)A6
zCw@5%-v<PK+2&ui`IpE1%avd7`{kJaa!mieV;Yz?G+IyoqYZTpD(q<IM!Re7HJ!sU
zDk+*D4cWbrPbw{CF1B;Je5`tGKD{tu^P})>Zq#e3Zf5`GCx}vcQa35@H`=NK&3fD5
z$aY!MkmB;^hdzP%L>i?E)sE|0ygY>y@<>&1guDOyTB{e{6Y})ulgUOcu4r;SA#gQ#
z>c6!QaZ%jel*UFl=gW!^aca!n;Xm(=Kf%>i#>b829zC=M_pYgx3u0ebgVV}*9;5NS
zd<C&e|5Hvlba2%#dM3Z)!)Nbw9M1kl#}%!Wg%K{S%1w2|*8)!*uz9+R+=owt=hzS`
z?ogXwrT~xhFjU6@l`@Jurp3asHwX09Xz}o_I9}aHeeJz+#j)N|{$1lJqhfmSohzQ;
z1_6s0?6ddCmF2R;m#@}K3u5h}{_b7{L$d*0hWY!x@fQ|1B<XfkOG`21IN*gw4*Qof
z_VPij|CfTpv;(oT0*X?KTlycm4%ZawwD-rRuf5Z&;w~usY>VAxb3;xSuKP9L30sl>
zmGl<{{&!M9%iOS>FjQE~9=}dmjmG%;-xoJ<yO34T11dd|wTBBF=Dk;Tjkv?l7#~`r
z^RQ|!Ot3G&xwU>$3540b=UN5MpJd;^@ucMbavaaaSobOp;1HI^z5o|c_2ao3yY=%k
zN9YK1g$?Z#Au@v~l~8x@w)m{{c8z4+<Q}_pG4CpJ(O?L<4(_|}Pw-DKd;dR5JN`W_
zuC}QoAI87ab|s(8^W5P~(iSwdKu3z#Gr-Ig8h$NyOK-4V_;k_@Y4$x5J<ol&kr~Nz
zacU-=0=ajt%vUI$1Ls<^lhgjgUh{$9_nL=*d(D&nw%7d6K_@c9HXQIt*x7P-{b-$Z
zz~iHRNt~nU;K{|0!INR2b*}0TV3RriSlsY`yEx)MIluT{*p^3AmRM!!=G`{EL-=Us
zb;U)af#&tM<cD;8869U<Kts}-UnvakIE(WFZIeaPA<ORCg{3BZ&^&pte_i(mcYEBe
zI8Dt}Q4q_Y^7lR?t?t5BQt`!yf$X7Sg0K1%r)D0W7PVx&9LrZ3D<APMOt1t0*?G!O
z_8Z$fCq_1<#)R`@l1_`hBUYm^8D_KkN4rpr*>8e-Uo1wD>x0NcV07tc>AsMUd_}AS
z+Hxo44#a#U^~Ye21kuqZ5pB&Gze9e=2R3kpTe3Eoy225h$(I~8;hnYIrvVSf`+(fx
zx@7P2MI7Y1TmR*o5dW?-@vm)$q%_ns_m}trinT7dIPjVma*dH4o8+=(QS4<Nh)ENS
z2UVDu;vvh(4Cq!K$@Gs)_;<PZ+48>*<dt*hW+yrA9+Cn`@QBxM$6r3W^Jg&zPmuU?
z@ZTrZg1ly9J0fIeQpNME>XX{lx>c9X-BMS0v`9YM6u2$2S$x2Y)|!yFov(}ODVesg
z7=B@;VN=`WjF>D3!sOLLiIF$}Pww!>#>GyBW0E~0mb_ZS<H#BHVSWs$EjwBubHkJj
z5%;n0u+6g>{f3@3Y2cLL=PE2)@c|vOf9eVjh|Jl-N@`%_wfBU;MIg^UgT+bDU|iy&
z{vS#?^Szzo=2CemBg^rWrO57uckWfwI$VZbSXsB>i$dX7^(~L1L-sx%-jdT@%l7PW
zr=vPlm=rf9e>wF?DO_^LF*?+HwwY-%rx_o8O>Zn8#-mMKUy+JvTFHunJ{kT;^7wC#
zhvb1v?qBhI!PbM0dVcmGDh3mqEq!>U6$cb_IIMw|f)L!LT|tFh;i+ys1{#X{3810y
zLasbHQ!Bu$2091!ct5!4l?Fd)RB#h7t3QqYxm$lvwSCi^0!xPV2Mep0)vsVb9_qh~
zM8DqAR}0nO_z!x378Uric~V({Hhd@`55G~rQ2j1#%EW|4`6ZG{;d{@SZLe@$5L~*_
zDKoyEu)i2VJ}p^uQEs+iv2KKt>E_glt7@U`2ggH0N03fiQ9hwcqkgyjLT=YE$5E3u
zCU8;`sn0!~rO?ILPBh(pP<Hlw>h(I+>uKrzVBaJQ^n%Vq)IwDckfG<dm)jJtW?%);
z{$<?-Yvqs3C$knb-`_5ZwduJ@n+Y%7oM@UZFIFp658FZ`+i#Uzk=Y{>?8M53x}s)U
zJn>Z*6sgTG&M5nbd6smLmKtnIcAxgSl!gZ}O}#q~o(6I@20tbBU0PA46*yFkPor;1
zfni7ATj-<0jw>=Wv8`t}W?p7Rjnu?BkJ@t?)rl3Uy%M;0J4J7rx01F-;Dn2gp9!a7
zm3G74F1V>X<L_CmZ*eAX6~Ti(8+A|zd;Z%c@r}&kUmdFMTeR$r@jZOQEZM86Nyeyq
z;l?6c%}98k!tpUVdz5%fxZt+rK+}l-CAPQoYXKJwg`S=vK1o2U%-c^iIj-oqdFt{@
z-wF@IcVOrB<>P`z?K%1`MKX--XsCA~I{fwgB$mScvOC+nSu9O`Hod=?>XV!=jt)k-
z)OXW2YyA1^o6J1;x+VR<_9?Z+&Nd0GJ&TS)HFq+7NIhBGG*lg?GCzHx^<)p}^Jk)O
z)m4EjmCT3np@H*#H<4AA*D&r2chq(-c^Y8*57c@da%nBjyjRIG9Wk<|G>JGBJo-9r
z&!a?pJP{d^_B9$Tov(WvUnFK?Bag_oHm9S^ZRTBf0OV@PtGTDI#N#)GCRME-b8)ny
zZe!FG$t1|p7TQGU=8g6PO^Y<AM$BF1czE|kD*7b^2bHLcto$^%ruO!J&6AZheKloe
zHQIepen|BWbc34bHIf?XQLc00S@S*YFqa|cs`s@ew1xI|$z1QN-msbOyFd3(;#g)C
zEQn^hAks7R{0VhwVZ&ENwa{&pIJk{lF}6bT238g$UxPYlyb5`oGI>7o%O*(&X2d?)
z?Cd6lfS0j*eR3uEZLu_?PjkC|r-=z@L#z1s3K~WBu4XO_vyl%i9p0IHT)$1Vc0R0F
zlCyx?$~Lw4pttd+Tvh8-U4U#^37IqBbX)4vhL33%ERF>Wm3I~l)Ql&{El>zDroz0H
zUfybW3*laPP<8?K552-6Ry+se+be>rGJ*4XuRd3i-m2DD3se5^AB6aKWBa2c;qjxN
zF{I>gj=)5Kp21!W4p5>kH#_xXuu7@O^I3RWb;1|D!CP$BKe!P-;Xk<<#+AOJpbPgu
z`LufoeT~HKPRS7AhX~mhr?F@8x;)Vt8lZ!k{d{Prf>?K9p0w-m8ucXBLNf4;A?Lt_
zZSXv*%R%6H6|JuB{UUoFZYee9$%*9FK33;E=^D_j_<pMPkAB3z*R7~LiGH%3IhWaj
z1Bl)%h{T3-)b>lfx=Ju4Wq+roFB2}9bsV{;51ue%n+@!&ed>!vGGnPisRz~TwoN($
z*qCd*&bj*YeZJqNPeT06OmioJVQeuFbVogKzyxxyVsuLm?|?E$ER0$0X0NmY_b{d0
zcq=01lf%TMfDiqbDz*pLI{Xz$kh7WLERCbcIfcUqa+i?XA|3SSxa(s!sgK_aU8d4q
z-W0z&{_%#@?7i}pxe~=Fx&n*sIt*>W*vsr5wBnu_+P*AQX=Hpeaekhk>&Dkp=94#h
z{cu2Nn?~?FbGs$Vn0<nt8lBT2Wl2zNk(9Hsmm-|DR&tPMM?G~7+go*yt*dfvD-r{l
zMaG;u6@+X#MVb^-J-&TI$bF$sMT7Y^2XoFy4)-pv<d{jZo=_}0%Y}E5E*S^F(^1$L
zLC^X+w_=}tiQA?Bpb;TNqP2KUc<9n|a>G`IksbDE959<@`D8g}L}%=}hvWl8thuF&
zw42_@u@t_(!_Sss`j@=h3@y$CC|D0Q+swSv&_16rLoC4y!-h-<w_-j_>v)pCQ{A(M
z!R8dBnpv6tKla`Otf?+t8xGQ>89{oB3W|V$pcH8lm9El>)QEtT2#88Yf*>GWx>BWy
zNRuvtM7n^0^iD`ZFOpC~Bq9FWnKS>H_nmpaf6jO2T<3iMH`m1mvUXqDd#$J3_x*hP
z3EEUXI1)8Hsr`Nc1jyYP9lk`ZB#~gi)r=v3!D$^KlhM>jXc!WI6R_Yr*U>9Q8~GF9
ze{kD;;JCsx06#OIEQ=fz!&gw5N&&sOSqQ!dTFrCFfn)}5D1873>5iiYwX5RqsM1pT
zw21&AFwX!3r<||=^riveZ!+?p%z-GC3y^z}2XsONDv){cj>4aP87Xq{8y!DE*K=CY
zq_foksLCtl30W|xXDTH50||y%V5Is^Xfjc-%!pMz@qlwgT1>@pWQ*cYkbTZ^fV^>~
zGvngp#2zY<Lp^dCQ24o2ksSzFMRbuQU1CBp<szXUg}JG5Y-Qm^n{R^R-KX4h05BZ+
zXbWEU*!*GZa6r2`R4{E`o5Hy>e^y$Zg93}ko_*!9f8R@-GMU9gy6*qtvBv7+OtFyU
zqw9d3n@6!E=@x5X1_0ff`B^~tVRk}{825_vMZwFp!IsB29v-B|Eei@BYuoFrmXGDp
z0$8i+@mLm?C%0M`y?Z^pIBv2=uY8#=0pFEOePh9hZ)9r6G&2W{qCrq+r4v*xWb~#y
z%XVYVt(O^Y8W-dy>t3FZ;SsE2Fwp9F!VXVF^8u$#1K|Bleiup6nEUk*HdS2#zxTaG
z+yOH3e&z~aKX--3_<?zaypwFfP6R-!hj17seR{ZA&4rC%k7;79EUy%dJ&*6oq$SQa
z->thm?H%Fy!~DwS3gtSE2eZ!Co|4-%1DDC_)KMfIpr)0@xrSuf8%;l;1A7(D(lF#Z
zGj(m<5nX)LN$Sm2W$1gtXWjmXv&CO#jsL`7z5^5`!~?nhEa!l!Oy(%^OL0DOJQH$o
z-rT(ch(~3h4=`fX5QKg!fM|C{?=3S@qdE2O<~=qw(!6H~OC<?VH9sKN%ets>{(BPJ
zXj<f#dhnqapfoo)027K#7zZr#?!P-d7J2S>4V9az#5b#_EI8$S-x?KqDHBrx_}v}C
zpMPi1)BMmN2w=*?2ob~$!ay68J-E*QCn$IbqN<gs?{@m8hv=j0qI8Vr8UxlGG;|A3
zc*wkg)}yNW7`1EjIqe`k2@sJoi4`+B_E7px*qmg*^Ouw%wuCSrsz^k?sO3(q4At19
z6giQKIuMj^B<njXLg|8}NHF3jd{)8GRLjXhiVG*rqTI^PyLUF6e4p#iG0_UV)XW5R
ze(EW^C7XB9A;10;#OUz>8y7n;0KW#XMMLanuDOt9;j=ReUvkG~X8W01OD}LdxV&n;
z-1Y<M44yXMHzOt35g3g~_V}@}g8&=uVO;|W;)Yeq5x;x%7AbMp!tWWjB)|kBhk>K{
zJDXEa9VP|BFj>kE-@E$PRXVYLwb0Qm%Zs&6nfg2*+6jIm2}H1fLE$NsnR?n6YK)g@
z7lK=Tqn`9NFlwy+G&n!HROJ<G<(nWi`bC286<;z<gRaPnzE5Uc#x(~;Q^e~U&9}94
z_qsNAa05})W29xQ$D|LA@i`JBm!>x)wHGU3l}KNkjU9ieeg2X3vNU;tM)#h_Y{R|#
zF@Aepk?Kl-IxTmTJw_%%Snea~5q|h{8fwx<v5q}?L|$N`t+d%1E%|0lh--vS;w#^C
z+7(C`yc*g90n}m315Ge$#``7&?<9x1adx!pJ!$~Ls<ls;v~Zpf$$U8`(+M0CJBk<E
zAmbsJy)9rci$pU}Wn8CF5fpvLGJU^-Hijbg?2lQoy-F6j03D)P#mGu&wl-Y<&a<A3
zf|)h#$7u$irVnO{#U(ZOw+?b{l2|&Ryy~E!!DayxBSEH95duiw;CdyycVo4-XS4%c
z{M;?Cu~sp7r_j07BpQ5OxuQuY_!ATZ<xrOkbUrL7JWQi9k)DxcZsGgV;iL%5uxm4$
z3HBZmCnnyeUpP`#`Ec@-;qnMYmMjuThfizoqqPXVJ+c)D%ViuXmGan2`<Qaq{C2^s
zJ1<gY!yU};D14V->X+;Y;&rXdVMfirhH|Mwag8<aJs`I&(JT8rXN*<e4tsnb(pGtD
zt@G5E)z0HOgT2JVq)e`<cm(3f=88f=V)WGV0J>hWWh**|If$uExR{{4-<5s5arFM^
z=3cv;EBAOL>r>J2m5V&Forf!=>FtDuX!)RN*b*a!OQwU$TTePuNRqbEV=tdWaPP}n
zgwM)`A1&n2F?Di{m-V~vq<Rl!u%5wlSugCeep6E;zj2gFqAk4m2f$FLm&JR;o(A;Z
zv7>~tZ{NJ0Ji0Y(|E*m@YO`otT9gMNeb{-$UKP}&2#43H3y=gl(R7z8PLT4k$PPB`
zu5<?lr0ePJ!P5$n_o{RyyA~T$&wqk755kp!x$M@Mb2FVU!t8eT?Mo<bmwiQg(3!hd
zh?)UWuX|>~3$V#K=x~|3I7Nt{5RGX}{uJ8bFSJ)y`b|0Ih3&Me^OyFpWgoti{@9vg
zvnDmAPZ=&NO_4E3zq5hjc-#%r9@+7>4aFe?t6qd}Yw<~wn_jUrkK8s8<$ff`5*^td
zdfr-DIrv%%mD3RHhKFGzIt;?$HS}E@)r4vrB==!+?`AXqSoU;xI+iz4S2PaOO*13v
z_GRbTqFeu?r(%>7)mU(ZJS12d-o__$s-B=7TJEv2RYP>te<tH~X^je}D~3(ocHlPe
z?ni`;%5O^IpcGE+7FZ7WduJJ-3pFVH6I3fx8r5WEttXwg&F?rLJJeZlWU@Dyfp=__
zYuP+G|9ipao3Uv)wEa2CTh72dC=<3I9E;hCwv|4tYgY1g!oZ$yQUu+HZ@KBioaskr
z6r$;_bhw-d%yuYklb$%oBk?&K376c_o)4<ecR|$y;)7BuMyUGQk0WKfq)oe2Q(bAH
zQ(>>}nxAZ-P2^D$96t*`n{QN(ekWgEj?F=STMRmd4)iz#G~O^C5K;v8Rz3lY$^P`V
zqj2pTg8ofotzUhQL+JVTlvvTn6Q+*W?|+cc9)i?%b#C!fR1D}TvZNcvP~kwGk<`}K
zC7o2?jn`V5>6nDjBc?qsXTlVI!~`vY!;vkTwd+X8q%VZ%j1PE|kArX`Qip46M0Ei5
z&CV6kSEfrBIt?)yw7t5#^Q<wb>EV_-eS-b`KwRLx@k-K>A*_w0SyNr=?de;JVBR>N
ztlM(#vyDNz>PG$8tJzbAvD$iO+~tRgbyG-~Y-XluviC5R3DE^u=ulViBpNeUL_q0~
zCW}cZn{T;1{;4U&SBF<&#~FoP+c*>JlSQstJ*2<K>{+b@$wjjT0wT9JNk|O(xw0~E
znd&ur`ZcZgi`Va6=@VdYah^!I@A(3;Af!!HKFlx(6C<20!)_v|?8Q=F6Xy)Dx<RdD
z-!E7!I&H_?KgJ}+D8IMghGZn!e-@@2RE5mf$>h4Eb_-?j1!BMDyt4XAJ^AQGKoqvq
z?AiRs9`e+bw_+hfPFmT(D<lFP7X&5wHnWqg@UG{kv5f61Z3sKnFJ)W>MkyROu15`~
z_P!MeJ!kV(<1^#S!Ji<<oGCG_Ps6H4;7zu&ne~Kjw=<icX3paDUx=Hf7*7@+<>^Jo
zC-dBwmGBcZ*o^gpw1KCt&BMDs1vUlo43`%Td3^V-?s6RtpW1l6AEygnq^~^jV8RTF
zL_m)>(PPl8VzFLZ?5k7Ahy~{Ew+3DumLHG}RmRi2_4qK+`un2|Jz3`h<Uef{e&4DT
z7uU0%1t%jBGO<(@{POd_Q)DPbV0)f0&~Cx%f>acLGwD!}2BOWvv9`RQHWLh!s%sht
zzAgro3orxDfk&F9NMPKp5}Xw5N_`^PaA@ztvs6WId{^`QH71|!=cebMN2r-u%l2xJ
z4dyCZJ*Ii)m%GrM&`HY4x_W-D(UAf_h2a}%8gP(G@w8{d%X9Q5^uhh=C5L%IGQK<*
zK+TK8Hv=|SIw;R3w=E<exH4H$E*hDrG;jF1$LGTAdwhd^FcZJV2d!k64tc5FPqV}m
zx(%1r;#jOn7*7HtQEE*6vag)q<Fc!`w)!+rU0&HwwR^997bg>EL<#SWb$f#S7@7k}
z7V}$E!^AX_+H2pgb~sl7o~>lHTlkf5JUgOfIWmpsW6ZjQ&=6phk9J;}@&l#8axlQ{
z@1X$CqvfLftYo{Pt_v~%b9nb~KJ9_?&_=K5$r4EN%P3iER*r`k6N3s4W1$P<L4wE$
zap<l>k<mNDj5n!?Ro=}Zv5RN=CP*bjFVa)88TH%L*)qKUR);R;l`2!QsxmstUNgs}
z;fVS1$rjr48prID0yn!2gUhcmsTe%9Jx!X%!->@&4%4Y(q@ndLf9{3?g~CExcpBpA
zVu;!P<%F)|eFC{Jiccz?sw#SY_#C=WMIA;GFV7%X%{GPAeggI9D1iS_DTh1+8<(wh
zE2oIREqRD`uWJAVxaFGb!Ba9=gRU$H@dL>Z-!?y1?cp3~l-IXp<;v{v-rT&wMLJzx
z;>L>(zQ-WS*LMXMl*(Z^nb!Czk9nC8k03=~1KhnzEP0Ey%gqy6xOU;GqnF2trw@9M
zrX+BR&-5@HKCL|ER+~$}wfi$Z1KfdJH2og>#G1mvtqg{UphW9go%gM=mtX`fzL_wA
ztWhlkW64>l+g6l1FCMZni91?RA+^v<I1}np$K82d&RYH_=zDZ$fx>mq)Z`b+Vsr{@
zPE2~zG4@>dQg+ei3P~nzn^bKUvR0r*J*k8YT1T@>*#$fvuZfmS5_o&H+#PdFAtI3N
zGj2)MUQ8Zf9g`CB15X`Bs-7W;!J~9HY9K5V{e#!l{j5791rZZG-e%qNPGV1OMQ{VO
z2;n1u@#tWHVY3}WJkcAtT9djoXF7de?xvs6*SBlRy!$US7VI+xM=Q8UGBCmx<%Fcc
zw@c@;16C+fg|-FGx0bCK!q+sJkIvojR2W^Bia9b-OnmUbkgN*0!J$im<Lf<Uc<ol?
z6oU91uL|d-7!V>%@Mo5z?5qwU+Jk0!l~o~T!nzq8;jbK{S$R$tyDh$|%Kt=S#N|Z&
z1PQA1`jY1Xzjkjro;6S#uTXfH;8|vQB~Q!r0e<Egy<t<xWv)65pTY+*FF%=$Zn9d?
z08&65?CaBhAQmZ4964E4S3fEx<<<B+ENj;@EcMjUhxYLgla8y|Pib=z;h!sl(ag=P
zHon<pOxbMTW`DyacGrh9Xo2OOjljOQUv(wcmlfvy7VgE^I7>V485np;BB(Z9dTf)2
z2RSd1mFQLoaFy&HL1J^h_So-5^Gj6(x=NeF)FJVky9*77NT$a$@p&;`74=Y;_~@UY
z<L=F8d`*aO(y4daxuo6>d~bl?yWZm|j}O%^r8M6NKVqSp#@A_T@6I<Gk%nwh<rlix
z4i{_^_mX2XF(BM{<1BaWT|~Ot#7RMSEg_XhQCO%8nucTsb<1H|&*3Wf61vzDBt6Ju
z5N{T;T6(v3<gTA5Q^T>RoNtX=s=yh;A+Wp>lMY>jypMz4aKFsQZ^}m20R0Z6Fw(wE
zz<@M?;@m?%Kj5m}EVVr|wCDTvIezvAw{B^qAAQ9c<4a%6?m;fnz9;)^`j9nzGce=N
zRK+kkp{P}LCDN<Msp*`pXVOIhJ22_)l`pr>#+oW8>~M=PYd)Gga~OlXH>;w_BP>g}
zRqIv&SAD>UB5)SFUHs}P=$Fze`zW@g+kRMn300?-645#kjjqmV&$@b9pjDW6#Og*v
z0+DwU{QbtU?-OUDMB@i|pCyFe9(ues61L~RTf6%a?)T6*2Ci9Tg6M!&k8hs9?_kEC
z2YT{!*%-h_@9$<b#$LNMdP<SsY^YSFC|ur2iaFW*=oebr5Cz8)K#62N8%WkTs{`Sc
z4;9ob>Gct)Gx)hW<6scTx2@eWuE9w-qj)84(9)VIWoYsH9cS!qy%S43K86a*6@`9C
z$_X^}CgW%5J`hzIE}VTdT5=hak+UP6y3!kCT<kbaiFY{6uidl<JTIE20IUJnrrWM~
zHC=wOqqbce(Q)1=Rm)`UXoV!n35VofhgYf!Lr3Lx%T+bS!elD=Np)ux+1oCjf5rRb
zsi6k3oJ=hH2}(kcUIE&*EI&bq9HUsktNaOCgm9yXXV#zx25L@vk`qN9HK@~X&?>vC
zY4hQcWF6`V5RIWgv1JHs*w)iap$PfH#?>pjZ`WYQZxHkYydJ$wKPkk=!Ha*O$g`Tg
zPn06%63!xd0`(5FsZy>(zR|Zy0f9l??{4@Qcp=VLiOVT0&D#mlUc6wzeB#-WMkGlL
zMM(z-Bfm@j1QqFj+>r@}ZwKW-euCD=ft>Z=gK--dapjSXGNGD{4M%!+T{v<Zs<pnI
zGrx)6gyN&yk-QH0ffhv0iJKLq3Zqf=S)byT!Y)w*+3j|Ay`lkD#pabS*6brGTnEu*
z>&ozmO9<dqT2B*qdb8@iJ!@ki#>9F2l%LS26b-ReYj|+8bf6+B1EbS~m@f+&Hym<^
zeB(#=l%GDvv3-T)9Oblhj6pg3sQj|s+6OJ$&Eb0FP?x68o3T2lL~1{}=6f0ei?hX#
z6=J5*45Y5s{^(-o{jPv^!55{8_ha6FX_<-A5D7Udw#HBSOyW^UB7mvPEChY5Q}EG@
zHcR(-;S3-{m0JI?8R~eIHvHV@mx3W5*cQV<iPqGd;V$Etv&m-VN%e?bE+&Oq=ou;t
z0#&Qsh2jm=!YgFph<M<wlE(%tWSFrRc9v8+IE1v0Bbhj!w>4%eD$zgny6g{*1LS9A
zp5+M1Lnak(5=?ECN94&?el>1B)~r@HYFW-F&3sf?{_x@~o@>#w`O;creudp(Fn~q4
zO9FDj_JwF_OloDI$xo2DCLVbhH3VQ+GJ5t3<u_eT7R`;CXN_+x=x%qyd>WR2g1BEF
z<|7?Egeg$M=2l0nepfRe;aG{=8`rUqiM(!YN;fg4HI>&zV})Lnk6$gI?noO9qVR?}
z&$AUtc6j0G_Bqg)`z1BQb@LYj(f6j7#7}2d35FaMrH%3C>O{8OL@vboN>aPjfSFv^
z<J_;%9w72o3xxG1f&feH`i%KwH)GQ?qUF;Dw>k!9O?39q)A^lLDUugiX?;!#uT`uN
zTQX%maUQ0ySv#Wt@Hs9shU@NSE*uX8fTAEOVw+hY?38N+@8!=2&H1db1CI!b3SZpN
zoGhR3e7e_%$VERNw#C^5t70$a6V1Ts<B;7&qzr0v4f;?ruDOQf38<>=0B<gl8Bkmw
zWU{VV`{?W2BW}A@Nap{rbF*mTM+8xhw2w82$*GPc)JGZLv}(5%O5biJ<X)+=?M3hp
z@LkuW4d^)UdNmF%YOe&53mUML>3~kHxADh!xIMzv)noeZ>i2$mJy;`<5`s2}^)5Xf
z;4s<5y5QiLT)d;0D!-D0zxhe4OPh28cW7mrC3U~F$>!aFYL%d<r?%6*{*3^7LtXhs
z0uK>JqQ~_qY~j!?@myfgx>1|_0zWa-zjC$joIrGc<>J-l2zK6yL>~rK;o=V_CB`iJ
zpM)wrWJva+nYla-#P~KFmQGs%ylqB);1vRxaz=TtN^WNDG+o_^tS%RJEs(V|&6;la
zfcz}%OHL(Y2TFkAOkynZZN$?09n>hoxV6^1w}VE78VsxN1eV^?t3GGcpwp=_Fz$uJ
zx_lT(cKqmMb6Je@S5i3A?oUbKUx{I0v9y0(+5;rPo`@ZznEVj-wyWX>P(W{0KmWb7
z_pi26=m{Y6UjGh29u)xM*ED54Z3HMK+P~t^-{4n`5a(khSAdFL+(3xU76{C*9H}i4
z`+b9gGv}`aSZYG^>VYxsS5;svJ)H{dU_C&6hA1sJ_zWmwx}f&H0=sK{>>2(3e-yCm
z@5NG2zsasybJ@BgIF|*em@@z-)d+y55gwhZ*PE}|`f&~h1hro~AP+8=G~dUizv7On
z7^b*pX1U`2R6#j2YfbBq3U~fj<-saCwx;rllcX?0bXOT!JgB{6PsnvMoekD*9{YS=
zQ{|qOOINR;=Q4+lQ$_Ew+a=s7yVrE~<EPfC%%?C0PY;sO%<Am70iih`!Yv=}ReSde
zwc6yZ&lZ%5Y>VolIcNrXBa?PVtaV^Q2J6%n&(%R_Bd!tX$Ex4XuuGvL>@!@i@%?ya
z@)G}E3%VTEtY)$AZh6p1ym9!vSvcrx9SfEvL{%3@p7`=<%*p!Y*PL~c8uRYhV><W9
z+g|oD5{I?6%>pv|bH)4oZ(rK-;ZYH)e7RG;h*nF-y7+>L*KsUXI;p(&Mw3P$TJ*>^
z!aGWG1js!{f&P(2LLfRIpwZ7w8f)?Tm4#^Rh@A3;7IVgsqw}F-Z}NhnQtD8^y;FJ|
zaQGn5>M&(bi{ysuWnvpKCsxUSdN%ZQNCcu#?jHzfv~hw`i>Uo@yb+w8WZzLq0Ug#q
zuERUUlzF!a*DIgru0QkP+GC!5&n+q+?kwY)H%-z(0$e@l8@d|Wik`A+mBCaz=l3N$
zoI~Fk!O(czcksYQl8d4K3O$ORFOKe1D$z^X<)7(X9)Hls)vl|0tV;8AxVbTNRl*Am
z!Q<N3jA8R>trft8CV-sJ6uwwVf{=9z=FX7xk*977*gUm`SGI$1KT4H-d$QNHy$R`m
zzML-SW418iSQvJexvhPU7ZqVT<fUVb9b21if6>PoDzVc0LW2vZC#7=nY;5|uyn;Ui
zQ2uSK`Rj4#Kk+|9&>!VJxH3xq7!|}Z@ivQ#)5s#3>vvGePkK%DpV8Y=fC8(HJ9PKa
zXxCi_uMs0nxvPIqXSaCf31PP+<xdDoKfYyh<DT|C7M$duCKo~bJ*SZ-=&u*Qb~uwd
zM1!F05tHCz6=>(_aA~=DXZdn1y+PW-*0%O6<igFly)jaB+ee=08p$nIQhKM9zTQZl
zv$R{0A)!|w=D5Zo*w9Z92qn5dbW<SmNT}j4KCK23XnM+}IWWnk5NW>(ez30vRu72j
zJ85<g{a_n<N?V|^i?+<Kq60Z8Ij`9H#7<QoXK;+Btn-4*#+%fBOy#$RuoMSJC5ir0
zEf6Cg*KRDAXZub4EM0Vibf#r(jVcTS-q^zX%-`_I#~>r@ht7^%mHN7Qe*9vO$O>yx
zV^z7|M1izlr)vce>7T5gukV6a>k}l~JY5x2Gc|oqbh|w+IUGKJK4z8Ln`$%yAYLBl
z#h02cEid_OJ-Ig7IaBxT2M!~)QHEEXPpuG(RHeskiCL$Mu%?HNw(t9#FMZg~mzBbT
zWRHbCm|yw`j{XU%UT&3%tMeX1od|?-7oW*FZe60vV<diAqWZcS@x-XEgg;Joyn66j
zyXiyVEZ>Fu6>4kLZw(^vf+y3i;~n>(+Tnvg4A<(ON+m2dek9-6a@UQ2yejeKUBU8Q
zuB(2M=?9%r;-+PnDQ=aWO!wnpoz$cFigqOPn)*>fbCz&fp6lTAa<{_sTq75}PQ56R
z1SB>bPo)Gqo(x8G2eDCm0Y89x1OWcj;&Hbcsy~k8TDh=}C(WGxT!y+U8fVJ(<Nk}=
z91pID!oDRN@edn4Fwp)cc*3Wys29zNvPVwtV2QP!*mzD~kBx)H@q_hEb-(*dX(DV(
zM<=vT*|lBFy{pvcU(`Afie#lSJDd1E!i0x$N7#9aXPap!x}13t<GA>0PWv<Fnk=&t
zFV|{~N$T1Bc#))fI|<KQtFBx-T|gHCNpNyk8uiSWsu_kQ;84G#f-Y_Up}uTp_x*6C
zY#upe{)-~U?g>aN=Dp;Kzl6(ZvJiITvZmVLPDQKCq`|yT3!D3TJC*j^4a?Dq1pb3H
zETUoTvcRL$d=??J-%@B^KKP254!ntp2m20ae8Sg9?UKA~#EfD(Jl!6I=oH*oei0Bt
zL-+6~-SI2(jhZLQF6*b?+Ti3c*CCa+QJJ>L=O$cFld0@{?ssF`6D$Yb&UJeTe5ZSV
zO~L9U{hh_+U>e4Bvhvi_T>qS;ic;dylbUf#IK#Kk^O@h}2ZQEmhTi18D?eR8y{x!)
zZaBtU8;Z?g_C<6aaE*rt+Ptw^J1|@0OrKhk6}=CxT+zOJL1W=KV$lE`^ch-J6rp+z
z^1LjNUwV#%UlK8065XhIN4GoNh0lRkX<B;kv!GKtSvpD_E1@CJbScCXyXU;nWH%3G
z^aUokadl<_ppW`)#mBk$OhGTX=@h*Jj$ZEtbJYveU=6Mvco^~y@Rmfh3ZDt>P)3Fc
zFD-SNruR$sPG@8Un8&2joKC!2uBfn~k4i`g%{&a%<RoyN%(Oq7#AFif<Gf6PVcZxy
zsr(}p2sRpCx60qqlv@A5!<X{KQNYHhvuFK<e+li1E11Dor40#gl6pguY2`-V8LWB!
zjQhP`qW)qh(@Bq%tm&lDm+7l3r9WmHJaWxp4a>reBwaii(_D>39`}{eqxO%e+WSw1
zm&oL(qFp{Z`N+AY%-Z+rEZ?_-Wxu2akV(LxGgZ{Ssfas6Oj#ne=~TI?kC6wm0vpZc
zk|2qKSJCS^OAb;4whU^*Q56Wuxr68gH~Sq3Za-XA4F@HilW7IB_*NK1m<@SBj=3gS
zTUnn>6hE;s6}cchAFLq=JQ~N@0=7b+cJRsQ&+6iUZds!iKE!(NI%YE;m5rDVxCe^q
z;@s%z$_#&@A`U7xz6jTb`+{52)1qs(Ag=sUWnIsAUuLtYX<*G8fKotftAY_JtfCLD
ztnrRp*_g==FUH)N(zQ6;=()r-jVO-BLXWL9OMjDxu=vIe%G8uu&gomdg`9E>cRaVI
zb227$<hZt>M!6V<2f)P9QNZ{FymlMuY~4`Mu>4S$>egjghCoPl;#K?w=!tI8gD|cy
z@AT7B4+I{T%={4Pgufr(7{uAQZ?(g+xs=_guXR5@QTUGS7CY9Gc}Z3&8XVf_dgP(*
zGF?DU8ypB>9;1St4KNkq9|e`Uv(ATDh&*q6C3>`S%O~O0Y~5?RY7YNwUHO${^Q88X
zWIxBV<HZu0CLgtXP#n;^sL5e0DrSMO91|$#IO?ufJ8We=;F@+ugD=I-t5-0#|AYP0
zq;Yi&4jvvvPm0Hg;T7XA`PR;IeUnM@wuRA~=6$K=-Z3|1YJZ2-Nc3@7YJ4ZI&u%oe
z?eDh0oN2Z@LmF;Hzn9r)0m}MK`^HVTak!qa;6gadd|&=p7nYoqz|sJtOVs<q_wXSD
zVhzF%Q~-jr2P*mjMy{aCgLP#!(h-Eq__^s@izj2a98Us2p<hmpn`v^%%AXx6JUexC
zILch-he%7#`>u_ad7FpG<IYsJxTPh-b$+z9K>#Py=}pJD%d=@pD;s>{q}#A>Ip82+
z5)=aq-B6{UmjrKoV$t6T>T_G_RUc4?r5nuM*gEw!t}9ikpAp2ov6A=ZPh-LkaGFUV
zK^#SqdQC#NS>}XNj1a_Gk=yl&W8c}fAg@^S5#^*wn}=YtJ5Si|0($&xfhS%Esu_g&
z-^yC>wK!ie9F~)H@$Q(Jqc3Ibk<6**v#19HjD<d?=B^L*#wB_m#gGE<`@plp+yt?k
zm!Y|hbX>87XAlt{3Km4a?WUoiZ708D4&JJjoez{i=C)7qNq~GWB>)SKoZ?O)dYter
z6uGIlHIPNwdj<;a@o`R~(Vo#9@Za?pC+oUyHB;?+8tDT=agYgp;vK@963;Rl(iN99
zm;>NCRJ%J>MH%v0F&xdjP9xbrLbtabqTZbMF2%29jxAS@*(yJT<b`j<?6(?##lo5e
zoz16?r58QYK9aTF3_@QjwbX@!*tRb!fcQRh%?)Z^FPLuhkCk+eSHd&?GBsGrz1C4j
zR^gM0r0l&tA<d>AOFQ2SL~CBYnmMZvNl~b%z2=2~gz4Do5P-(=C<6>hzT1`==VYjZ
zN|Wv_BSnd#+XftpXPA~NXoWL0VYMmK&861S2w_ixB1WujE5=_wErKet3ZFU%$_fz9
zrLzqrlBILCuO~eQ3kaT%QAjjhPIz%4(CF8Ih0_jC13H;ww_-E)lr+bR#`1tiWIpS=
z>CHLCd0CwI<%P!;+Ii1M6Kk`lX`?nCpxCMWBuOm#+4m+k0?RGx=%9?SSSq?dovn{0
z^7z)%!A@S<wnylw^~ba713JTHkF%7a5@S80A)MWSC6H_C7zxyePt@HjsWIJYe2CVV
zv|UaifdR7_4Jh)Cs`AWl9+9*GTYXF?5{LZGG)4I>UysxBe7ueC<qtvm#AT-AZJek~
zhh&L$%Y0W43}dy0<B6MYE>C>uOG_0!7e#Xo1G|o>z1)n+Jj~KWu$tYj8_7byI+ko3
z%L}+IT@g)uuP1%2TGIoS&6A3>PS@AfYX};>dGuMRTQt#66v$|&&*nvSG_Lh=0Dk9h
zDJTG4rdsmGW3Jsy6@tNn-(>g~%X4|nuXm38J+t;#+j|P&!JnQ3Y-rmg!1ZRRmjt)}
z;#{qv_Ew?4IKawfv;afP<1pYF;I1+Qpv&KWoP7Q7^;LAh7w|HaC}879&;$1Jss`v0
zz%(y>1B4AO0{jU6bxBymO8`U^{Es+X|5LY#o=W#WFj;-J^KXv*5nMe*Uj&F%_1UZ*
z!03xS2@E)DOXTKHkW_l(QgMRwZ_BP4%O!CLoQ~bx)R|w7s~O0c3WolB-POOQ$^3KR
ztrfk%EF8b-goV;rtpbCF!RL-7LsAZ|!^&GB;_<<gvhIo83qe+qKaSo~t@-+_;>!q|
z$QR^&kE!ae9AWBd#SIJc89nz3kJ-&8?Xu4c+JgGqGgp$Ny)}51J|sWv+857!0fha6
z08_YXD;VmybwnGJ!*X3lKDtADRNmq^)7Cv&!XyVr;D?shxLdcYg<(Yb!=xrfvUH$Q
zw1K$La!hTqu2<O=*0kh@Qds*hA?Gp3rpZSmG(@g|52|9Pmr|E1V?(0Om2U&TUjN}k
z{0Hm6%A^u}(PuP&AowjsjTHIjO;P@uWKXteltxz4)k9-bF^RU7hfIvyJQ$~TNF)+}
z10LfUC`79EQg6gVJAA-r)84t=<G%4#@kX$dy|~Dx;jxAtd%8^M=Aj>BmqADn^Wp0t
zfi)ys;3EdI%Hk}2E6uXS%xiVCcG2C(Vk@6h?iZi_K=3s)vT;9K{^XP$(*E6qm20|9
zyN|a|LczD2ERiCq;+%5Ia*2EvIwwPcyqY`eH~<%e#{xCEQkyT#QrjRkH&+$H@2H9w
zH>m6GOuWq1_V{7<XiRWNurW_HQ)Y9~m+jKVSIb}H!2eDU{jYkI_3#e}iIR#iAlP?v
z>+k_EVra-0V!%JRE_od60JSSLqs~+sDNFnU^-J~!vH>H@40LY{2UPD<^H2E=_vU|1
zkN*?N1}3Ra)WNV_DywDJmsJbyKHC(tX&crwIQf)%N{EY4mqxzhJWds<3-JICxM6zN
zJF7yCU4rg;O{U7;@9Gh~I)C{bpY9sXK?_Br;F@UVto4)R){fgVB3BAxa#9MIOD`0J
zFwq^+x+1^q2|jx7vT3g16COr+uXh0+)!vAA{^f6pj@<PM&ZQ1O?H}L({)Hu{KVH|#
zI?AEbIQl53sdtBFBu<;F8m=^K6@>vL7yru%k$FHU_8(pTMV|cMBSrqBsNVkq^5kC%
zApM3d`OlaJRKdhoK;eji$%^@<27*bu%Du#H)eIKn2Ocs@bQ+*8B~k8&>T8mUu9RsY
zBjZb9BtV|;Nm&sGNz*`pp5OPoZqZN-YP+=`7xBTAN_*kQDcWbjAe7|1-$0km)pg>*
z;fG!HQ~w(SGC<-c3@n;nlOW4JaOp;gw~q{c_LaKIeUQ$2i#%D=#RXx9V2h0r_i%HM
z7BtezWoKvLGSdbDRk7xoceg2OXq6QN-EE-W)#UmA%=}L>A#NY0nh>K%wq71Anv1^V
zR+_C#ez+fwndfHDyz&esf2HLc=sjujg2kx_xMe@;s4*1)TOCDJQtV_Q$5l^MwcXZz
zP;LE|HymF<$ET2z6nDmap23pLf#O6uli98jxenh`<jJNa=~*mZCf+H)fPLzUEF?pg
z&AU<CJQ-Z8rP3#`Yb)9F^#Nc_5dyy?LjB*q<F_p?TK9L%t}U=WGaE~how4fUn@Q}D
zh|o5fk$a)BTe7#n_m4h+|LAuAZBX)D-^jD}FBr_NkNY3@k3!wNb94bF3}s$7S-90X
zBC)Oik`$_S7#+x7;5!B35W4#vr&pJ0t@-9-Ijy9-sb6*Kl>j%oI2K{ugVe2-DYl)N
zzDF!I<L<(`ba8x9C3(wqM~>)pha8n`J0e;5k(Fk;<KcMhgNt=qkgJIN=Z{#&6_UkX
z-p!ohN*U{2MAVOdvQ2Z!J>Q}K*wLDz_)Wdb(e2I2ssLI@D*1ZT^_aKJ&8C~P37vAv
z_NPBFH1%sWOHrE*$ScDapZo-=Edbc%zQBbl{=_+l();FLb`K=K{`(3(AdQ*5jYQuo
zI<!gKN!eSR_=ENFd*1MVh^<Y$L0(gvB6?@efV?r7vGrq1?2zdE69jXlVE(l;(9{2K
zhyJU>{`HXj|KY`WS`*L|Jbd|}vi#lGFP;sN2kFq!Nu*M?nz#HfU!6{}OGN(eC)IkX
z$ATF?4ZV@=5~)>yMgPhE#UD+Azje|P4}b5)r_;Xs23?^4<$2dY4?tzE-M{_Vmrnmn
z!KUy}#MIy&@bCTfq~G}QwE?ffIE{4vSI)x!^mRu+6&F<$s=qY2MANs5-Zqq0LAF=@
z(zck(`K5LNIqnYK;LS#FPnI;h{z}FFb%_vp5pmg5Rp;jBM&ge7uHX3a=`HPFN`(BA
zGx0y5K=yqb>SQoE&`wpWO4cl!>AW;6GP1@KT<WeCm;#m+YIlH*aglt;zOx2o{h%p0
zGs+=WS?dXphIla|DvKs7Xz;@Q$KsB>+udD#3Mw%lI_e!R#rpv^4kGZV$Fm$z8O6Th
zs&*vxw4CdBq7Kx*t`t0hz3T^Uv=<*ZRd~mr`4If}bqLFCk-EBAyKt_f&x0l2z5NV2
z2GlR4*M}SErN@{WeE<+8uj#pV(l*_&@yAJ$L~lY)l&UyVI<3S7e}%YzA*)KObLosI
z<bjTQXY+j$0uLkR6C&Dxkaysfn!`MgO-F)olGEF=(kWFK6MNHBG4`~87@@+tt)ooq
zPczMpi=BE>+9wY*Gy^TjN`V0Ot|ZWafZF&$_*7aB<X}kiO;P&>HZNu;zg8xGeQ7QC
zV^HC0%#Sd=Vt6_v7|O2dhV__&;o?L0Hf<5J0&U|8`bE>aA*n5t*;aKqn$H7jZ_P~V
zUpB<oIakP#W=max*s=_6>$r0oo73CXY{;m4S<x+F@!XmOoY%fNC~5~hS<)#iM#$M{
zzzp#E<`|_Jj#XS*?@HhN!qM1u{!6Le(=W^nPd>Re?7jc~1fauvl9_#XIw9;-8cW}d
zF6Gjx{<jBV3pYJ4)Awk7mcTO}Y3?dwdL-JS62?VP+%O=V=tLi{fj)oaI}Xc$J(29-
zNlh(NV2YY$XYMSZ*^c`>K7Hvb3`X}IP*`|@u4W7e7>WZ^5DYF*6CQ7w7V(w;+1TC5
znw2e$3+FwWDzi^&pkoV<;)?rjtXUMKv{dgcMNwApKSAvVQv&mhod7ck;CF0dqh$nr
zov?yQQ!Y~>*SHEtYezJXgqxYtUF_xGy!X&2Ha2DA8+EK%f>aJvxdpdwg+alpZa(wV
zNba`tkNVvBzjC!laPW5DY^RBF=aR0f-H^85^^PJ*Qu%y{=>Xk3Z4`>XkwbX}zXITv
zs_n_D6#hm+*YgGHc+=2)_!Ou8Lha#B@N;Oi`!-AS#oBAao~A|l56y+h*{hmCKE6nd
z*rp#L>1G6$F-~2o55KvmGqluueQ=}xOs2bHdv#^%*?Tn2j~_1J`8top=7d3cnq`3d
zLL<nELEpxSQ|pM-q1hGT(^0ZCO(#>g&uF$vf~w95Ubs`T6UqM0%yz{Z;1<AIA$8F9
zcp+a}poWy2s!-`bp+-3=*b=SHE+B2UQ>N9;8FIAk0>z;02^z*XTRY*yEQy7N<{S@n
zDWi@fd7D^D)=$i?@N;q-*gJ{blk`pzaT1?#sNAyEARx?1qqu=-X&_L<KuQXje<HPe
zyCUWFQ+Fd9ks0(y8Hmg}Dqb(b!jibnBYWVZ8F;ZU2!KD;#Ksjn1q6__)?jtS9c|I~
z3NND0UcX~0hejx|GF%Le*#!Wp(*ih;cy&(Fqc&k4k{PE{r@HD$+Ku6*1a1vljwlPG
z7OjWdr_1rC4vEKP4W=v<&)h*h+VGd$*$C=G*S>lBrWeb`NPOBjLeWboDa+b?u6(~V
zSNHi=qT%Z*Tpznb9CK{@_%~r$ymn_!B$BxaT38Wd4<K5uC$f~?TGx46aYt11bhjIS
z_xbzB8%rD-@4S&~TYB?`e2l97Eg7}|X$R`B55tcINmG=6F|t|KO@Sg<>Zw?0-IJ%M
z$zlohIg$g&iz=w*mtFp36O2p?IM$Yy@GR&gVW2Y_E$HIHT3MH&o>}*$AIBp0Qd)FT
z8Rwy1EBkJ`_F3V=6Ci|a-a+pR-IT?{ZUG@UW&jx-yJGfnMCxnm2}R0#ROoVjlG>HI
zBhAXWc5|PwQLi-R?=ddJC>$BMdxbce(Db4f2ye9}78&NF)!rf7j=GbO(`^jC9CAI8
zgW>ACPOuUDojl0`+W^HRqUMVL^h=G{2Ef@F=B@kGGJU+^lUIm!c8(Ob?ZJjW6IetH
z^3iLD>m*K$=Ub<~As~gq1kB#S|E%S|Q#9-suEjK4s`dSMsf*c?Z%*<*#V`ab;p_+W
z4BnGrFruCIWJDotL(h|lsrWxPto~}Dd6Y~B*Fx-w<rN!kL-EyMy(7!wM2M?;+ola1
zSP!`;oJ`+uvY|c#?D!D{tXKc4c8(>pXyVCRU`H<`u>Q$Qe?y+jz-f$t9oMM9dh&(c
z*U}Gj?1=dr>Z7BFFa3>(cB@yc7lET{Pdq6p0oLaOrwjg1UQXocsB=>m7DmUp@7)D~
z9_os{=ECd)uVwwlXyI(HU2z}`pH=o%v@}7t{B}wHvE1ao7f)hoy#0Y|_;vAqP84~W
zjYc3A8qf+Msl5lN{UgXWV`?vI7xqh)D0CW3RlD+iz~s+8RB;|4Z2-ho<VuC_H2_dj
z2ch`KlU(7GKGdlFXHa~A9l-s2%Thf;{qtS+LNUciNA!W@91tmP*^KxJ63(acrx9%c
z;ek&WFhzI*Nmx6wu?PKg$88V%&9VOWe8G)s27mr8nyUZiSbuxI*V6yG`2W*-zE2A3
zQZboMT~bkziB{*x_R}$qmG5P^Xa5a!wp)Gl`-L-~zRJ!_EBCU#6B0V==xFyRu;9sW
zlGU&NXT8ap;k}H7Zqpj|TyAckOC+8;GjwP?y_FY5_ty}--~Z1Qfkpn{-h&_P0va4w
z<Qr}Wi2>q%9%g_PARwBtOM8=M)ETmU1d#2Vf*gX(YaLep`qRC>BYy%d4=K(<^{)6u
z{8r(!o5Vxa1O!4Gat@D<TUdVj+8TAP;7>5?{{-CezjJ>&)8VnKn`$ex8N;ZB*E5iV
zaBaYtn`#_ewdVeXWM;-ecUj|sln$WYllkbEe$N3i)+DLSItZZ1K(kDeKI!!Qq2TmC
zHMV^^+t=+*;?k046t8JxqoBmQm_*$b&^kOEjlcEf4QZNuo#J*_Du27TJl~(lZ};HV
zm-51$DArwBs}A?zy{UH3q@1COY5T4U{5a8wl!blku<gXJI%cS(B&m>R-lG8b@_Q<b
zZFU2^Cnpkn;0dyb@7-10_V*?OPrs6W*^xf+aFMN-r((w4kMO*0iai<M{5CIYzwehX
z3{C;Khd<<z03m0*3`I?8gR2G#99DR_iC_#uD5vr$f`sfwuY#3y$Jg1@24}}S>6?uB
z5{(-7Z^rht-wC-GGyd%-sBWK<jskjmeGL5*q=%`{Jp}rH0ZrvYPE_Xo1g&zK4T#R(
zgj}@+BnGMtT2WCx5UxNMvL3~G7z+sUU`eMds|K-NugcDgu)FODa<{Rw-g$fSJ8S<t
z9kT`GJqr;??fxb^(;cDm`?sb2PTdH8p~j#Ul!v;G*nwd06;%ZFKx@782{?=4#ZM#z
zf&1wBT9ph3myBB<B2Q~3Uf~g0y4m7$e6638$IQ|sDmLe3dL=PulTXw$XaIGB-~1>z
z+$*eGi4Sr43dC54cCIOu2+VLviujzvkS*Y~3*6h;?*inJwpw!pRl%NkZK6lWPf!Q-
zEFn`LA=zOkcg+bQLjOVTM^EXo;Sda~b3wxcQ>X7aw7vpVuACM&F%0=eJ%=;@=_!2)
z>5;_S^X+l@px}YTivy|)J2!Rpd~CwACU+DmCw!<kQ+uhcX#jPBe|)Z%v`Xd$WHF9V
z*#f~ecv!qwap|7Jr^c+jxPI&%4s-e--*4qQeDmWg9suZM$_WGRvM~l8camFi-&<>!
z7p410^exI-MHEJt%&ZfiT-?$J8><$>jWkLiO2`&9=mMNn1dx7W_o+{Xg2KmfJ{yH(
zLy91Y57Lf2M$s9)H@xLRF$(Zv?m}F`<f+*AucQR4B;yggj2P`rd;B#VJQ59@80-~U
zQLD3A@KLtgMU}QEHs$vB1g?iHaThh26BhLjp7B@ih6PDbJCQ=_A^`pL1d`z?KHr!S
z7wh)ccCNSBVpi1i_3eDQQ}MmtGfyveiLyV1*P8uU1aNIVz?bl_;V0<*<`y+7B+GOL
z=Y-y6Z1_SsT}O%~J5X-oVVeRt6&4w=FHeh{O_rDepyH-zVjC&Dq+6?DSemdkCnB|2
ze7i?Cz4NffiBMLtX@<AOVftzd(UHb{ea~PWVI_S_nM`_e3Nudm%rp3V=m{J#Hwn|R
z*lrGIX_oZc|7a!m{Ppmj{2kRKk;DhD&)@EsJuem`??9bYmjyi4wymkQ`HI*;vj=XM
zvV{4T8&ZrGB%)6Vo@TiF(DQStBVJ|8>!EE|lP|0tUf)DLH;5s`hf(-EUhDd~e~d}3
zvd(x`Eq5*}kL_{WSN89cDI!W=)Fk_m^LcCI2hi5CBUG7-nj@-apvL(+9h;~|Uy}yW
zq!QXLK@ROaWLq1`5qZETjl$db{L+moI?NB^u?QMPR__&LAN_EIid9iVw(ZNM2J<-Y
zzYYxUITNmBE$H^JG-%UBQcp^Ars&<bgE5p6%EKE0<qkYLc4;BDHUrAl?#5z&dB<%c
zhJ(g<Rrvd?q|;}3eMRtN8=62US?dfsL^Ys*A=I!YwRK4|TPOI4bKuJviN0>o{E=f!
zHl;H@&z1<#%}BB)$piagNcwR_7?e%)qRX2fmf~KLM5FUQ>|GO#+YXGPbehYLUI^($
z>8Gl^cT;1NnVz}~j#meL<xGZiN~CsyYQ~-&d2;KRdN@jF9Vas#fYFW)5~auv?NxxL
zVz0HUvy9#u-P}~*K@`NuHR<rMTAhJ|Vq0|9OygwqfRQjR@JM_s`<l9BlF=|l-{ZJd
zpV0j?<g2r1%}rw?x3A2hq<<vOmv1P{S7d2<WW2Q<FwXfL|3f`K9=f+)gk1T6=nRsw
z4ZK|m94e&S6K~sfb0kntRqeP=?cH@(JJ}$7l3+sJymH{^E_A~YBg4POU(pVqqQxG|
zP>6jSs5^2xwZ9W%AsS&&&uVv9fRVL|o=-@G?!o9hoKsz#BssrK;XDf$@TKmRK7JXf
zB6#+CxbDmD^S*}M^7bSgS8_AmTzYiwUVNL74{@Z4(4XN(K(sZVc#qHcfp*Kd5fJMA
zGVH7E?SM$0SKbUgImy`(>6=jJt;zlChp#Do3}HS<h1gP;i%%-_Movj(SiMl+Iq5i4
z#H$)EWEwi&4JaC6&{e2I)-*Ut8t?&dUMpx}fUDa!pIw~0wmOeMv6D_4;%<+WTI+h=
z?Fi}A{IHnT$bIV8m6@Z<?#@cJ0eR)6fi*e8C|A^E>h^&CVZN#&B!<F8vz^mErN{H(
zcJ~k4yNxCi7Zkb$Z_EaE-A_6Yn1>4a!m;pYkI{l8>0MWTg)+XxxjUt=m^%cy?(kXI
zSlng@2paf^cH}f}UNJg|e;p9zw6=_htldQ3Y^;gU<$qilJ#cKO`nv!#XajDCOc6ND
znp-FHQltnF00mCgz@wsHE2Mwjwzi#_PGkz6(XkK_MIMh4x0{R<dgr1)SLJgxdW29h
z<w3o*PV@q-DgaKl%moa7%W0d6KS5$%WI!}#eDo)1Ckl1Vb(LbgcgRq0_a`qN5XpkP
zgy7B4;oH7BMBge>^qUgtqIn@>mRt!3Wy`q>^=U4G@pCUBB5%Hu;|Fs9)VyWjUDB!f
z_{iMQn^?B9V_2L!$g1qAyQw|7TCxIT9p42xQq+zP4U#7X4gQF)aN((|JoX`5$Ew@z
zdzG8rcS&3hNfaCp#3-s3$Dc_%ob_Nt_;^?5LcOOp&P+k>IHtmTnr`%Pr$70+6hG<;
zh*RPKMaF*|dGY_TEiDxG$LN_pppO<#1OTi!Q9yk8ri<@2s^(1rY7>B~9lRTP-}F{g
z)Vb0g=HDuM*%sPb&3pXd#sK2C^^k+hYq&}|<+XEpuTK3FEl@_R_NE0OUy81SZr+97
zl?k`pXcoddMMzNxu>oInPd_OUOyc9xd&MDg{z1X@FqfBUO3W~H4V)x8uo3pg<%NZ-
zvUgd@O#5rKk%Q#QZV~%bNl=_@&Xv5TYebc0DzhC~yK>=qMdy=dv<JL~YfvlQb;H;j
zssy}I0F%11=#V8ng39KOA_1a$ziT)Y%&R4OZrq&w0az2qiU9SD73&cw9sxXv9Y9k5
zcP-;9i<|*UM3(BEc&d;AnIHMB;Vp80Z2c!FI{)_#6@xwJfFtubEkh{EImAl<i1Cpj
z;C7dd{{-=9|7{aIKiOylHz7UH4<&g)H(UVW@TFY9<+rM!O2_|A3rhQ3{$$sCo19D}
zehoP>ObzP;>?4MHIK>9?`-Y%6Bkk(=JB|Nnhr~Mxw|EBuWZJf3z!ot$;E@MY_-B!X
z$H?C`cpY5U6z*3G*`VM6Sw9nMXU?v)D}*Hf@h1oi0HS}_vcCH;N-Z;h{{M=umY0W;
zM3*-LkoNM0Z<0z}hII<0+W9m4M4iYzXLo=Ru;y6+>?^9ext5?pJbqaI7(FS_rM%lL
z0t8AsVBe(A4jiafr&)Q&a9npRyv8~xK6ShFHR}#J8Xo-<gf|q1x*JUDM$oONI}g+o
zKb1L}GfC#<4NOIy%WLTTy$Aar{roSF_dhW9(b<j+j}PVC5K9Lw?E=hP9w8Lg0z!>*
z{hw6Z0xE%=f1fSx1}2(Seaj1kK5x2a@4BYa!D=#-v<v145`|6X(5p%}Gm%v0J&sD;
zr*?eXUDtwaUc3Liot#RS<4?f?CRcmqXepK1S2XiYeYq`1b0d8Xlx#h|Zb);*jF<Lx
z1xzxo>fZZbY;f!Mf0@VL=n6j4`t~(<u*KY*k52gL$vuV~P0+p7DTVm_ALvPi3l>ju
z>w6cjSN@1UT$*AqXeg8T`%qml%VBO1{R$L96`t``G07l94a34Zv)+ojZcomNN%UrW
z*C~83E~vPA?C1}2n-3aq2t9hWS#b1{0W+zGMgBH+WbV6_-<#V#u|t&JhO9>e$rmO?
zvuVg<QT?XFJ|{FROjeYI`Y4uV{aJ2O8;p-1HJuyE{mcgw0k{_r0?f;}Ubp@mP?-6+
zi=|EN!pGR!Oj(;-3f*#AB_c7YZ5Q}NM6(S{(ove6ztLZYEjUq5oI+vf4hvB(GNGZ1
zA;TsYr;F@_QZ~N|B>T(Vge*wz^`lNBodBBrQ51!%2q*j;b>6=&S~Vkp=cUW|tAlNH
zmfx$)CnMe2U7IPKMz*rge<!B-n-l&A*ZnsO<NxJB`49f3CD;=IvoJZ0It1D4C;tT9
z;@%@e;W3cPaY_M-_yVwIe9#9E>MJG>)^qVu0RRNS^2L929Q?IC7YPNVJZS*07k8M9
z+E4*d5v$lSl(QrONoA%5%+oHiIC8@0CZHzMQRZlckrv3Q1@?Q$`bX%Cf3*GoDwM`A
z!mexlbpHInM#1}4b#cGF0WMq3Wvi7jfg9M<4@9%IPMwO$oUgNZ?_C1YAyq+15*3}2
z1w6{g4OIDnn^>P<2yrFC<Xgx`3qkUUy>b;QQ^))v1i*0a5Il$_qzw1;u?Clt^Oly@
zZxhZ>J=y$ZR;752S(8%?aym5bf%bPt1TzfAun`1MP1=T=6_JBw2sDWgaN&F2K{2an
z0o2M^?G}X<@X8j=g1#ETHdH(M|Iv-L-|rlIM985EXAv5yu?X9rpwGQ1s+kcGrnyC$
z{Rz^ltw7E@0#Q1N65`z`wTPFGa$<lves?Pty^FBir!oNPneBJ+B!8dItpoWoz`&=K
z{t5bF2CwEc+E8uFq&NYlR#Bik5NNd=N8#mw@K}l|^7prK_%bivZb@(%kfT&n!TB8W
zk9`7!<<gLpclp1+GT253X8`!ZG8ABn{r=`5zrQZjy>s~!ei4^R1AluzZ~ykf%*e8u
z!fjtK4|F2`=7xY`D8Ie3ipi*RL}9!C(w+v~Zu$oVo(SnXA4z3oC}NI#Q1n5(uXI7e
zxLT@8hNP+eit1>^DD)YGH%OS`g_lZeHwd?U=Q<_--Q)rNnQ%u?(?zWy=Y<<F#Ty!Y
z#!MTvWhb-<XOnzhlfpaM^lzeKb3fEI#5#uGo(?IGV}7sx42E6a@GOK(c2Sun2?(MS
zzG)l6u%-a_sIPT8|6X^|YHk__-RTi{bxN!RRFD+a|M-Vi*1+@uK#cfn1pg5Um-Sn3
zvUmTCNpc4tUK0Tvy-l0-$AC;eP?IOjGv|Oo`%lXGOZ}IBafuG0oyFZ_Kv}Jypq&2V
z?zsX-kr9aH#l3>0B%@7XRPJl=e@zV@C^vGx<ccl<NxTzA#UavuP~2;B;bmq&#DKnk
z3`qa_M4_B=PSmrnrq5Dws~`25pvhV&E6MR5VnV3`kP81~{Fll1NVDMxpoLsDK<s}{
zdf;`9C(HqV&9!Puu2Y0;iPnW=T9@Omo`VGN3Q4XVP_{|0=AmqX+J{Xw@oc`$Ozyiy
zB;-=x{j?JrLY;kjf!d0RUmTt%rN%~Toph&THTSX_YE9UmdsX`Xu=n2aaK7vMDAA$>
zk!TSnS`aOIgqVnE5fMp5H$g;;=q(vDLJ&O&qDS=Ty&Ihny_aaCGt3ZUn3D5mtzFjo
zuJ7Jwud~lSztjHmdEXD-^1RRU-1l`~_jPTaj$cJ;v;>fNCs1p~h6UUYW5z*!;RCjs
zW0`ez<C34N!^Uh=5Bl=UeM4xQ%E`T1q|%!Um;4;tEjV!$cyoe8EzUI0)Xv1P(^U6S
z&(kp!=SuKnYnFb>b>;<=ib}F<p&PJW-Df^r%a9NiNf?lna(f^^oN{{SHVqqY^=<BT
z8V|kke2yvZ%+wMa(d4=&qU44zkAK8J+gx1wv+Fx7e9`fyN|IGWdX67ePKs0K*uC;j
zZ@+mZW<}JH+>1f%kbZU4v0l1Tp>ObN!d1qITLvFLXpXkr!1deWrDky(x6#p&%5t7a
zrVCwps?;gVD3yh6qGfrFy>h+om+RFLXYTn{EZ1OSI2;Zk(^z8yFQ#%Ouf16G4gy^o
zOlbnE&f9E=7#WaHpAre-3^V@d+t<5BpAc=Q))(tIi=Sk4(i;v(*=mlW7&8kNRqY*$
z%lc&tFI%>WR#h^~e9(}YEbS3+*QmVHHqvTiqe#i4O6f_BxDI~4=ntlcUBl^h!31#e
z*%>Uljw`pE^{N66>@Q7@(!^M(QC{l{6^NLzcX__-Ro0rdblj;6U-sU$$2{3gxBfwp
zpR8_Xf8e%m<*$7{O7%nDOI;UJ8fA&smuU?es^~PgUM6Eh*s^7PJ-MDEt|-iW@4eb_
znW9puOq<LVB8`jdNtdW3mP7f`+jEa%!Hf{xi@nn~*V&&ZO7-&<y+eB4CbPbQ%yB``
zz@I|`ZXS0q(fL&NTmlmW7OqV?y*w&JIhf)LBuXSp&I?J4e|M+8#^K-iV3NX>GFDFn
zQ`>5y(3vsk-U?&I@ZX!g+rnC(u(zNfT<falF8W@OKkh=A`L@bIKwf~|$DHS=X)fEv
z)2GduJ0suZL-OYwM9$NpoTTqxOusEi+g5Bsz>N^7mKNV4YD$AVz`+e}o3HgEp;CUV
zYw<1nyt$Ph1}iN;Eth4w(U7^QypTK641k^6Y@xx0A|*rFZnot3zCmj9z6p0s>M*%|
zg;F?JPQ7jQpzA#i#pO8H6hG>u9R+qa?1$>E@j?deh{iGl&KsOUQPoBc4aNv3VT(J?
zPpGf^hOhg#ez<4rb(S(p=@M>=M8_)dfz%J<OJ)<^^3JWONM%-^f6lCVM-*raeTjH$
zL5<Tvo08Z{JEm>Nnw&m6B-Cm`6dubLxQdSlC}t<S&BUaAlD}cFdc~kGDeY-#Zo3=!
z!DIcg)t0#XQQJaIL26CB<9+XcaK<T;VFe!VOTPTMV=U=Oi>$7F_sZAi+54=Cr1xZg
zj)&pc6rB1J=&cF^da<iCWb*N>PwicdM4EC#cA~qkUw(L~t4m@nOJ!5P)LP(WFEU;m
z6(U|+X!peZ&9aVOq>0T?sHcqJ6jmbKmi5Q#@uQoc(<TSUR~}zt2>xJ6jY3G}p?GTP
zGwyFSlq43-hX3F>;M#G!7y2y5=6+O`TdcH5;_P5_&Hz<Qo*z|qt!YzsvO#3_G`Z8;
z_u^IOQ$6+}sfaU0sHSsxi){-KTJN1FD1^j{aSqNI7Z}<9eyw7D)?7@-`y%sz<OfD`
zHhKQFelm)D3rMF5Ihs8H>W!Qmn%!u1cRU4!RTriVfV+l!#Jif)BG!}(wUcMnTwjzs
zwKFS~{iuq2j7lqgwyv^;o53UuEiEK{h74Hq<en~6TV_}3x`}^3ztG5Auc$d+j@t5P
zIu3fnT8v8j`NB~IS$EqjAu`baHS$WjZ|OkB+=b=E_DH>UI2BYk)7~p_@)*XCUJrHj
zDjK7$65*?hRdU%KXt_!$uPs;^`@k`uSm-gw^cJWWRl$Q7seaB7<QxHKr<pBx08q-R
zZWo-56Rv%@l$6U{<lrBPbV17AES6vBHS1Y0fi-7tk7_f<=g(O*2^1E|2n5kBF}G>A
zadHk7o5f-WIWKYMMRM~?1B>Wg>wwh@?a>zTC*~_)2Ap*VyUKYCXm<^5-POX3F$|Ft
z1^)~lvkew)mTj49bgYr7PkOEvI|}_pCdtC6dgL4mVJcD)Ce1&EF=NhkW}u_t?G)$s
zvaIT9^Lv~M@A1{fe=I!PIT%|;b8ao@nc8K#!d(^X{YbF>*pZ{w(K(52(SBq13M2F9
zku|Y-4B3GJXO)jw-CAH1h!=o*<bnf)Y44{F5ZN+L6BHW~);k`IZOqt+#CHbSMyb2q
z_C(r0u1<PWviRBY$>O7cFyXKNw!4@1!pUGeBznP>eMyq3o$Ee9HU*4kJ;gv`K-4tY
zV*(rfiVXoGZJ&I8s5agv&lE%eYRcb9RNMD!RC0-h2nXj`>!qIPmrWvQ_|k(MB#O^e
zuHMgNmaDPXMAIslIYA%%aRjyT-D#kXIrH|5lQ$$DDm<V~GE(RWgw~UATo-Ig--0)O
zs0)PiVMhVzo<?yIAiY^=43s5Uoxn?ZkSg#yJ|>JI)@sUQ6Kyo8^26%uXT`z(6gEmr
z_{QgMj>BQt$cUB%R1vQXZ@YuA1<fiv4(#CJnUGLcT5(L6VT>>8PB=7^msW3jf1oC^
z6T~RMpBB)9EFCJH4JXke(c#F6LZ7&{4{AV3uLnEERVNoIf1WwrMVe~d_2Q32x&?<7
z1O6-N22K4WT0GtuH`MjIr5rB`8_uu?U2M8|e6QB~N+P1BHjc3Z;rnG?zC}vR6e1WX
zLvDR^FHAW1%0HQL{@dXE%isM!7K-oL1+Z-qNn$p5C+(P&D!zk-AvnO+r6<~O6faOb
z*hl<ac7{|QO{pzkJO+ZV>MJLC*j!>}E-^)!CL1@n*pY$$s8i^Gwij{4w%JtY1^cR<
z1dF_F3wiY;Yu}ciLq)uhG>u><&|13|6AXyy)x-<hRiPW{`+ChG22mVwoW69LET1p$
ziFJ_K=DGrDL)!zyY6MP*)T0a3d$c7*f4UiEB`X9!XVJWJt*?Xk?NR{KLwoWK2wjV~
z4-L}roh1g*k)|)FE3GM|NS6KK!Pk=E?~wtq!kz*=oy=NSGPFwjhh`cE7nhDvW0*Hf
z-XY{w=#t_*(;csxuA~WxGjobFb>;e`c<I!b4`EE|J$gu{Fvw-N-D0GLAWjso<P*h2
z;qzvh5bnGp{#uJ|ecd#PGwb@CMxTp5@&kjiO_<^#*=&&+(@3A_;bI${xkUj=i91>@
zeG7;z>Vcw_6_J~J8i7SAdX*NY^Cte#lxT6U8!v;vvJv!IY)^W>B6wiZq>)PsD5vGp
z05J8dpV(TAWH420;Nwu00VlUt5}k>;>X2Iz`nRQ+ts*q%1=hpgo(e*wU^8H5f+ac+
z`w9b(ZlcJC+w9Cju<)4@Xnx)S0gv~2PMq(>q;~Hjq*HDADJdW#P_q#10Pzz+B7<uW
zCIUFF=!~wuJsRNK(G4EuOJwA=CV$U%HlmkYPb|u;<VUZcA$AU95s5@h>vS&2p?8Cw
zB+V;__luK)C#B3B<lU~Wk=V$VL#$)8%TyY0rbXKp{5YFs5c7_VU)IA3**ElsNnT%_
zlizswaygJ?iD?7W|5m!aopn=4=&3?m#*C;MF0E9<wrUtEnKQ6j=p=IwJKV#-TjrG{
zXrnuQW6-q=y1ay-hRrT=HOV<)qTu0D1eG|G2#d{As+L67uIA>VPFcRSRkC`!laB0=
zb;_7N8W-?AyV6kbi|D8YMO?=hALKH7r*Rgr?K@LytE6~)+McPne$6W6VVGk4mlrH&
zx~^Nl6s4lQqTYySg*srOyC9T4(H-!MM`!n&-Yn_9*wLA|*Iu*N%iWY@B0MVi@ZLA(
zc3u_z`Nj%u7nQ*;EfOOt;!ssg4=S%1Dvh2j#=_6wnAD?mEyiau@egu5WG;mVKjuRj
zvj%gk9!xt~CX`a3GrFB;`&SZs^ui%jB+&M(UwX#p?SszbA4k<qA?g(xN&I2-&y5DY
zS5%bnSo7sSuCsajWeJu$0*RG|S`mxjw$4%5MY8IF2a*x&7Gh&hAxs0FmtQ~A2^A0)
ztn*;(zVVdS#HcpmnJQ!U??j^7c@~gBH8<x%!b4%gUC3K>NQ2tiiJ$MQdQqsQC<CaN
zNaH7L$#B~W{CcL-qRgXpilg+flMQSX?iX1$fdZ$g&>3C9ZL*Eka*{1njmI0TjaJ^x
ziwL4CLUp@jGisG4Ucs5-{Q!k0QOQpHet69=uG|{`2xl+byEfKIem{63$$I_;KkY&1
z_p`0>p^V(EM3FBo0%LG{bJG>gUBgo7YjlaM9*eQv)ZTV^D9fCF#nY$4l{AKTT5nR%
z%a=Xvr?5~%&mdlA%)VJx2|}7HtlTQ?XqMQ}J>NZS)aLE?bz!b%M#9#^s4Ls;UIeS9
zBAv=}r)l7wrB21oKwqGNTk8g3lDG0ZZcv@vEq>#qh-aqsFJf=_v%LBS?aSkGjW|pf
z1?Vg@6iBA7q4%?fU7&Xemi<v=5BS^>t?Y8)d;Ys0^)|XFs_Zh2dITHO=EyumKA5^I
zbgR&!6;LAiq#r6^bV3ToiVJ*s3&s;vs(jnSR8yx>!~E7H`3u9oh-i6m>bahN@Pcd!
z&7K@|K&TVJ?wC>i5==`_xDnm~NVObpzs5-Q2q$bGsB*{GxgNGJd+U5V83!eD2+a~W
zEdl9ioPur{W}zXORB8K)3Y)j8`68}Y+C=XfI94jEBTA+VT94=Ia$ng2zXw$@evtup
z{($)w(~Gc3#O1P8mPHPF$#*{OxT;E{6#ar`R#spAIvoK5!1(@vXfKA;2f=cW!GB^L
zfr9CuVw@Gl=)`k>ttbKRoPRG<_|G2yf2N{Dvw|R;&m!G*4$HuD*8%~R_`kefN}v}p
zhHE8{7y>W?f9e>bsJ!%X`ZDtX96bEtzZ!~^c7rGZNYWPS3+Rap!+(xgLMkT!en`g2
z{rzb0E)^3gSxod2o}B`l49K471EBZqf9>=>|K_nC<euO4bi2P&GtImJ+~qF<vdv&X
zQg%<}$~=5p>K0<H(hxc!EzSzz`0D1l5mrZ(da}Chpz+8`?YVw)bHU}yn8~EWHxFt4
zXKWS0ARx%BwBU(7#KO=aVb5s3j}Wvks)sy@+Ma~gz!xKF#>0ppv;_hbM&Qo$$z{28
z%c^}6qqV8tsdJ@=cmx4l4BE5^EDJY?ku6u|+CWL}SshM=cVDB}6wdcc{USr0QGEh_
zwtuNK3h}%&_%tu6#q`__dpCxI;@n0PVepekEi7NNa)rgA2C&2Zos@v+YH28l5+<_!
ziwxuhyGfA8TusR~w@<%p&LgUOXHqA>WEFpZOj@rz%Wj2Fofql1oS11Ad^!pAkKF1U
zIbiC5_`}cdsYrBSc96&KJy7nQv!1CnHw}X0UZRb*Vg}MsmrHAmLk(kSlC%9+_k>;z
z?kvOGx!qfTk#Uej30mXM;&~OApR-RAK6F9`-r1x3FPt=flrX9jtx7*B%@ocq$YxWy
zhskJj4r{RO;TnnpQCFw2(KkEm){Y)=ale{1=CcV3Jwzr<7p25MRo2jA3gB#=tC?y8
z5ClN80OQ>SVr}wUu~Sxmv*IMtJ!u=p>R;T2kYJIGWVO6~Ml-?tnS#>usR@rq*n7wH
zpj)JY#j~D6fhtUsP2kc6Q;T_rhBeaHVh6J?nqCjTvTSFU%?)vsI4|q%ZbB87m_p%9
zEmsDdz5=ucW=|sIkxh*_zR~>NjjShPn^M<a3S6fYX`|r-IIzjNRrG+up7qiO3cjSv
zGqs%ePDNl-t^KB#_^zkYfa1oalUR(GI!i}aRk}&KLM>Xj_tBF#62)k^**F52WakJD
z3fT4z^<fFw0w-);vi5pR>P^NcSGgK5%%xZV6ucNO8~E{LzYBcUa{!JFN9%RpkQDzi
zN|4N5oEe~C$KS0mFTD1V+{?+lL-8fv(0m!pM0h+#;=?tXU~EI5O+ZERiK%uCP#3XB
zMq-ZmAG_=l=ARDC>&P%^h1=r>!s6{cL-1-QVC0Y)Z3BkOh!AJ-A^@W(er{Y6{{Ew?
zbl=Q$(G0K4jVx>?POSv$j#K<fP(Y*Bs)P12<%+`PMqF9mUo|#WVoec{pkW1WI8AmR
zmTA3#n6+MJtjE+YcUsU5ONtl0Vt-W8WdACB_xzj+mcpuB>@{QHLIt3u&yNc!g5F0@
zb;I8!x1?FGfSIPvx5kXOG5017u~TA2<=+Hb1f#8Ax!iXCB-9_-){^ukME836fpY+f
z9%@vJLt0|8Ry0F*<qALjd@~@e<EZ5BNL7}+Y!j^X<l5lK*8+*L`+LS^Di3AUx<HHw
z2f$Ec>(WL?<01zRsBLtpT!}iEbb7FHKh4wV<-S5@q|=uer{~Wjv4v8Q#zSf`Bfmg(
z$>+t-$`A57C1(ochWtrvIGv>*8lvKwt)p9s$p&lVFRdL1WS=R2;p2p&hUzl=5(Fql
zOMDuadLm(p1W+AXuSW?bsfS^13dXkMJ3?Lf%aVT>_9w`A%+2a{8wjlu=TID$O+>Id
zV=UtnX!Wua1-1ineyb*31_OCF6#Vl9X}|Z<1D{pkaGkebAf|VR^4t}UADq0jz2_qH
z$^jh`!y<UceRw+xZw8uypx*<b&MmBS3JPQgm@Y$-j=g-}s6d)iu|0BoA0IzdwO%WY
zCdp%Zf}4ENh4;`*?~9!_COoH$kHpWf7{8L-z1kWJ^&jmz*C3XXO<(&a7uia)ZIVY!
z%lk!U2Y2*KkgWB!(43W*Oj&#NV{N}1)mT55YMWfID{cy8o!5f@b+=_bVZ~EK;{f>H
zIeJ!SMH$i*(5+J}C9wO*T<s-*YE;!2vz`{LES{h70k<blg?sXc>$3E%pIWEF_Gabd
zPfkTj2PAR)A;J6EIAhF(m6CiJ-<5slv&Oyasy6JO%-;AWK4Qw3TYC{ZDjd2C>jFA;
z|3y*+1~AInv{4c*)UZ(yCBk8!_I?8Uj*kcG-u;+$^_RU~EoLIxPPyqq-R<lF^O1;}
z3ET{T?dOE?qa|A_KR)L3iFT-=UMjX#yuwo_sG$jEx_94>826=ISx&4H=YB8U+g>1c
zl=_90jtobeK-@QhBe~l9s{aHF<Ij2?{@2Pr{|B;SXa2+UqDh$2cU5o@PQd~#^ge#^
z9!4-|GR3RBrbqrbMpin=^4Qcmm8U0}Y_4ovOwRO1HMhjNXutN9hx?r!fmbbG`?UDy
zFV#W<U}rIiE@zxzVwW9>mGC5X1HfL%p?YrnIot0eP|pTuY}GkucT(p~qY>ZB&&|dE
zBJ=b(2fvGX8fv`)IR{ZI*fL2Kadq^3wW9SRShb#;+l}sn+u7_-vtTX&y!!{30M~}+
zhWF_JUByhN8$jTlNB^3bQPKyZ{Yz{!9(Wi?q5}X20oyh(_NG1JRNEH<_sQ!@=R|7+
zd9T3Px4lkP;;n+#>+3&gUV1z9cs#vNQOKXRR0Xh{l57Q6F$IY$b79!HLaXLPr}vR;
zV;rENC`G<UMienMPmx;|G&z9p4#H+fT;21Q>7C}OwQ$=6IqrMgPjn;ep!u5|wqIM;
z_!TcF-}W`!4H8`JE}hj|L7*&cox}le8LcnFvhZ8RCM&{Jb-oub1aZ_vetNdNdgXGV
zZCeW?Z0OW53}H7EvB=m2M-H*#lsdDFQ=XLR3hA56?yxDy=nBo=Q<hw>2q?OjO18mC
z7K4}ivY9Ly{Ik%BzN3ci)FS11x*1zdxd8LsK-?o)@+WV|sC=BeAv13P!+6m-UH2~R
zg_#{J`J+X{F<VC++z2hN@<gxA>e9`c`LsGA+4|cDfZlK?h>36u2}pgn<5v_Sj!>o@
z-r!kzw%M9es`2OTPqd(wFF}^`g;%>RIL`R8uEQ_Li7Nr)5s(U%PJGl52KWyLwsGCx
z;qc8(e58eZ)M@kMy@CEW0rCMqNpvCh3C<p!(dmNgYd<!@FuiwLN#H0wDt_SjO&=&L
zo837V^TJF538YGAyA_9e5g;hVvSb(-{jikEhrZ{g$LrkiLL86XMi7qwWkYyYH}V`;
zq42eBRc@I8RSupLJRG;LfezUnS~2ItS`zbCGT9RI=H51FuO+D4&VCNKqrnk3z2v4v
z(*w5y(>uUuf!nrbqT6x#=9IrRD-?`u2sYafhSwU)mdoq7Mcq5<x%}*zVU-UTmI-%$
z32|X-L9+2uZJ6h|OX`Vwq*=t9{9>M4Im&et){mG=))+a?Xzw<{K4UiCHc4S5+b`w4
zRkpFmqPUF0ztxAl(J*%7VD42Dc}f<*M5+Hwg^rlpBw30pB+)MBbu9ms(aznx$(ZTN
zk}rm>iD?t)wEY}O9v^-DkSMx?09Lmi0BlCINuKpdX-|yN$-NrycmZ_{@gjDCRon8T
z2rmyGUD+VDG%bf*F}ieFD44`TP^`k4XU#7-oC<zOztLU5H~(#QaJQS~4&SGbQU>Nt
zk~Fs*H<$pUW)-+C(fVn_xU72-&Z7UJV&j;spK5UxJ|g%j6FvX3(PdT*{kQ&Py<$w)
ziSd$1ToT@b;DMW1*1X_z$I?Dq)s?xE9k5WK<K*Xk@A{s8kNfV|`Bz;gn2EKF-F`d}
zBehRVv>V~LR-%GDc`}tBvNLa++*f%cgHn8ixto2R!)?#lt@sKau{_lgO$`-B$0xoe
zP~>Xn*kQ@uuiTIQK4003O7OPsmtoOcgQUZ2<qMY<XIVB{Gb)`sQVK!LxPGHa>w3p<
z`&ieknXU`IhxbQn$-41`3b(#`I@A8XH{`pBGI;ROQ=D-j)Ec7<vp`_14Vy4jeHZ7}
z)WnL)G_oVRZ+-s8^WaSHbsEJ3z3F|@10?nn&~;J-KYi>8$s%!~N%6~)j9W0);V*cV
zlq{1r8%G}Z3VNST*AhLmYi})_m-ODk$Z_t-MRvf4tjUmho22kjufR8slh2oRrcIrH
z^(3v1<}&g6E=?~4$U0VuzsPL&PmjhCKg}kpfxSx(p!}F1fJ>HS0L<NLKZBLhfOHy6
zNaqke!DN!Z4v?D;Y1_JHr0j-zFV+?q5=|+ub{_S#<vJ9Ew(XIm)>)F|>5^@@U#afW
z?<^8@-0p_BK6Lp&xC}%GSYIkj`W2D}7pO4;_P!EH%gv^Gb>l6ah3g%*;*H&7g$h5O
zyX806-Vkr?MG+M795{-uS=Q)Kz?!>p<!VljgR2;J>bg&uLX;6uUQ;h!zZuxJ-m*Fl
z6<sDec1ZhxEy}|UMulqGDouaz^<N$#qvqC{eQld(0jNuW2^Z#;^y?Z!bjG2Y*_+<E
z8j0$a6RXj>SJh|XUVVca^e)f&*RQNTqa0>!GDkmd%OHnaIfnwc2V>rwLc6ie(=@{4
zMP1g8jH<clBWDIzp@ciZW|EH{JaB9vzC)~zsqnz3iib$?Ll&bjCLeN@i@N9sLwaxY
z&JN`D=hEA4#HWepJjxcUkkO8*`(PYe3=W1fHYuW0T4xPAhRzlhX4`$^GYwt*{y{8O
zHk-Y5==%oG_%2Ncykd%w4n_f#0f-6wFrh+)TVvLnz{(G3oX5P9)1x!3E;+b3$R;k#
z?N3}$F1T@d+2|)Lb}>+e&Ii;AXI3`FNOp|OU#&#iMjr7CG_H7sDADnPt}5_^H2dy%
zFV64-OR}H09>zKJOY^9*dY;ph@t@&u?Y%IWD~cRhh2+EXKelf?JrnO=jW_c-e29ai
z={t{SDYS`*B2Av?>>diLbRCp-SCiG{z{K0AD-@}`Z8S045LyDcB{2W<f>pTfyDmvG
zZDFyId=hP9<)05<%uV-S-Ej7j!}LTbbAK@-c;STba+}2}d6_<JN{j5<BHq{L>zLWX
z+BU4KXA)MPcCcvAD~LW%CZ5d&;+@B2zs(eI!c2q=&~DBzGRq$TdalP-jaU6EMHqCV
z0-n(!bu>7W(#0LGEz-Ko6Z3(T_JierGp-{4V+X8h{NJ_6$_{fs1xkNcfWFq|OrR5i
z%JJ!gKODat7U9~<$PxdoBa}_^$>(poV9hfAzDKrj{yPAY-2T@f`2XskFi{}%y#!v>
zzyV}<%Q-mU;r1_YSR?x6e9JZXwAUiweN_z`Hnaf+UAWaACS3eqmv8{!d_6+0cTWcP
zOvo=XT{c$yEl7k4unm43#2u1afqsFlFYV=}<t|`*VC>d7{)XI1&MJG9_gL@sUunk*
zG^ETJK!X`ua?}6_3B$kLsT(3v{AL6Vm>=3+9`CI(fMp<`tnL_sa^XW8PgeIINylt6
z6?2}BYJbbeMni^&X?~3p3BRGJ|8s?j6o)u))_R!Qcb-Gk4oLZ%N09?Tt=;ad<GL3x
zq@7Cy7hunS=#%(x*h4ZDQMnPy)jvUCDp78E?5PiUnyB-{3^>K@3pK-L43`t5FOWE`
zNpwkhK1qlXs3<0P#wxzDhS&A=Qz6Q91^Vf%_|VOYqxFXC@rn*t%{cM=fcO09rN70&
z{)&egwzY6$_E8l0yFQ0qaZoXgV4i;y&GM~#0GLznfjbh7yth7MV{D@;VsL(6D)mg>
zE0vdf)PhR1m0Qjsa8?xQ3a+*X##TFK(obUc)3~L$Zm&knS>WY$$<%0-@t1a=ii+Qf
zM}v3E3mnrgVQ6}OSg+^~WG4s5m(U(@?&>3JEU9wWeMavXtE$G*-caQ17Uk+_Q6lD$
zBz-LQk>GdA&e=W+It@{dkr&#Jy_n6vc`}IIwc1;4=)a&>d8M0)sg>ig{-p*7LH?f_
zVK`ILCAF^P^f<Hvk}(}^>ciAMRujJ>Y)sD95Y&6Ubw{2vR(LNfhF*%^KNFkScUs#8
zSiXr@<C@W9%k2JpumShdA@>4D83EV(XnQu1zW9&K0V_|>RxqkQqSkIJb*KeNZG+fU
zc&7;>INxHRPW&(s<+>#$D#k^qR37auS7UbL5Q)`%LK$(!7n!{8v|#VF61xJvYG@#l
zbgumUn%j*V%C^nWF-(n!vMmNs;&0Ef63ZUX!ARs(*Cf;o$6K78(VE>7oG;rmTcsM5
zisf9&2wjj2$0lI_d$dan>bH`{imDNeRJ8{j#!}aR@XLdJo?4M9MR}DY<JB_sA&<9C
zPk#F&`=eGA(6}j^gGU&T!>@j^NW2j0mGNEptC>~5HGbD;A^BG1{GG5XP<zaJDG0@$
zZ+BWcqb#_@&7;fHMOE_cL8TbDCCRq?0O(`8tpu~Nsv}Ry&R8yv>>*tE>BHN3n~7R*
z&+7%aTCfgrE|`X78nM@{dg4!WCUe|*nHmprof-nMfE~brfT}<A0f`wCbhP?9RbNvk
zrzOMFEvzCUU-IJ1dj3?3ZuM8N0n!x&Mz0l2=GgEV>{vhYvT5`oua{N;`|J&J>>fvJ
zTQ8ZbMRBR)Qy#A2BhYz=66=alRBl6Bybfy^rL-cQqJXe6V|C7)p_(#i83DA>dav|^
zkF_q)w=~sF^3X~&6%<K01$2NI)^gOzp8Ma@?(*Y;TqX52oLi{>0-!ckOkq|KoEAxF
z2z#`uWMVIJU}Pf7*w4&3y*+TqQ0xt_A|c?*(KGui)YUolVcXn%Yy6(A!Y?4AF1^Z;
zKm^QKEfu%6{wzIF>NBwjc6J_@>hnU}$|~BchPEI@yF2#y{$PM$i^%GQX1d@X)zNf4
zseZf=MxWZ{oR5&Qiwktjtz5cj5ATpct#<kpM@w)1=;f!<qRb~tX<gA*Tf%VXR)Dpa
z9m;}c5A}2mmDoSxW_pmkZtvP+8eFR<uhGM4`;B1tE^W6nX-8IH-QnTiERMe!@#N@4
z!o`r4JJ6fNIH=)8w-fJ{Yjz$=uBor^rfjJ|eJD#){7vGn5PG8JBIz6OI88F{OmT1A
z^Ca>-&UVmN%R5$-l16FIz*}7*sqk7Z9lR!K8gv%?JWV9oT&3G%+%prT^ay!evG>!i
zDAU#=@VIUOw$XL*;oT3n18yM64W3y3mZ!ZzMt4H+d*Be=JNPmdfaPWW(hykM!-NwT
zB7AVB0NfM%z(2!Q5Y{`G4iddKkSlWmpf6TBn#fm|-5oszq638ROS6C`$TS2g++?BX
zk0)|}lg<BA4U7N1P16=YwGU{U0oFVVeDiA6Z>br++(<yy-vz!|Zrp$%1USQY-esK9
z7E7Lm_ZDQqr&2+uV*27?p(6lDl%)FiJ%l^sg@4fRHuJiwX%2)e0>F*_2aaj^4h^|!
znOok;3*{~eUYDTKW@dW4voUHciQd*T2C4$54{1`M*0^PK;}YndEvB^V*6T&}vPjc!
zk<(l$_3WUAns?lXT5c)^G!*MEg~h<aPGG0Sxj<}AV;Fcwa@oPhyTeJQ({sQ;WrRyQ
z>$%?-OYX0zD+%hxM)Iv%gHuQtEsotB*ou-#!#AT*=P}`Wb(H{z>;%^J#coJ`v*c>D
z^(PLB;ed)CH~qQ8?s9xhLo_r<gl73TYgY{`D_J_{Y$Zc=&Q%u|X;T8}K3z-?2VGc}
z|5vh$!7yofJ0Q6b(Cq-<nu1@fa@4ljVZ-0RqL!d#TkBFEWDZEm>(RJJaI}%$spxA3
zA_-VO;wSLt>THbG7){c}3<BE)QrH48An?xs1KksZ&IalvWf?!8{&dC@X?~H37GfL1
z#8m#7U%uS45={^R<HUrBtj$~`K#U!zENz3Af$y))61-Wwxp;iaL^3o)d0sS9j0Si>
zf4IGIcsror9?%XSF&#nd{{#_AN@ur)%iKW#I`GH4%LfouAaJL82j~pdX2BOX!6)XB
z>T&&N>)?y<KVA{w#nb7L=xG5FBxenvVlxasXh1mTe`)&;69!V(KU_5HAKwl@T~ZJH
zL;(KJKU`GvAD@_5@+F)`3I|NDf4J(O4T)KFG(kHbr_j1^1@kKAiy1B!uU=N)6nMXq
zv%Wd@?5ujg6$+Mv^VPSXTIux_xnu(`WY8NGy$qj2w8Cj^YKeKUYj&6NLOgwAZ^%{*
z4|w`2-&Jvnd3nA=b0VAZEjd}47=<6eiJ^RpcJAIk2j!ZQgMgGL7icEeYwOSh@gpi$
zf$J%SpFSRPH?3s}GcmShKg_JvUR$jEMYdard4ui7)k>nY@s9S#Y&XTL8a*KNKRN^(
zfQIk;c9WyaUn(LF&IjWY`s;<Y6OY-1zXS*bP#Pg4?|jWCH@N*5sm9OQALL;YupY=k
zETCYaMyg+gZ?!KVY<Dn$f6{VeUZuh!^jp)h;NKuvGxhxrX9@=u<uY8xn=Dnyu&PFD
zTc>>P1rIOO`itwT-DB$rO89wf$-=*^1tt7la2DvfB|k&eVS;H<TQ0=s%c}8Kj7}X_
zVL)$yY%9O3{Y5g@sU1B`PbY+eAUjqp2j|(5Kjn@mj(c6#b(_=Qvw8YF0`w@qC;j?4
zGB4?_%Og!PS7H^(fvz(KCQ9blfflNOP74etg@&y-E&^ao#fU@+v&QlHLT$y{+H>oD
z=B)w;6wff}fjnLRd8zx)i{<}mLy!k-DxHy7tZexi=fyOm*Hy#A)>KoURIOKYEhPQw
zZQg@bA6tVbPWY*3V#ZpQRe|U~nV7(5TDSYi_P;)7q5uE<{>zH?PtNaua>@N~ucBsl
z2()o@NO{k#n(8c{y|YrHw*&7Byn|M~9PO|bb*4!xc|gq^2*|ID>sy);s&VwdGAc0w
z)kkKc-LY}#kBhWWrxq@|mPs47L<N@Ttgf<l*v2U3W)z=1e3Bw^vB2N}jPk@^A>1Ms
z66hu3O70}^h>m5(;kk?+-mR$weG&j*HRD&iWFA+RSiV)GCl0&`7ZR}%nDOM}FtGJd
zw4t74R11&th|!SL{FMCDSG=(xxiTBOP{UC4{SP&JcUW$NP3*peSpiJJe^)L1dxzio
zBJhR3vjEQZ->zahAqO0LQA{L%QIiUfzdfpa@m6cmgD7JE8zQ)(-0}D3zN-Y_75t6)
zZ?VNX0BHQL@Xx?7%?cr-;_x@2BI6`6$K+esdg~mWm$7rxSM2ZTNq0UMEc=?zB~VV5
zDxqq5n`cqo*Sl>f0!D^ATso()qSs-r?a9rrZ^rCWY5M)MZ^H5U)lX{?A2{73EI(FM
zlpkFpBNzf3tSr1Y67!hUQ3D7N2^L}l5bVI#9|=0(hd%kO!h;}%IuJjX^nvNF{-w#0
z<u9S0A^(ab2*=dUE@h0X>`Z<YuDrFb$&Ff-AKXfPWe`6e6B7|83j%(l>#<)(J4s;7
zGt~D)Y@fHz`QsU%$Z7Q10W!~V198nqV^wcxk8~}zMY%+uNb4%^m8)N3EZXb5_I5rl
z>~J4vj-ZBb`V9km3HAUWE=)0&U%~=#DH%$t3e&vix@PY-IK^eV{}3cLs?vWuX49rX
zJz8w1rNov@&aTfM%!>9aKJpP#k5%M<*1OpOa6JRgDpGoWrp+%BckEK(M6V{e;6e;5
zM_0!M4vG`DS8gyIxX5d82V+-V>XpOb1c7vJ(hmTLh2S4e;cfd4NgW$K*ixYzhsvq@
zGZ`!0!W0f+Yfw!jg3?Dtix=Yz&@NbXJ`Audns>Z>PTzJ-&}N=m!*#SC@n;Y6T8|C)
zh-*OML5fL*PP2`2KZnV>l;Etj*XG5V=NyWCfjx2s`P(L5(UvL&g+J7-Z5)EN^U@}T
zvs5S2rh^_T#k$^Rw<{uw8je=~P~nECH3#aK*K>WTtBrXPcAHVU<Xs^{zzw;3L%@TM
zd!`^5w@#bWCX4g@r{rDDG>(}mA~^Y;N(9Cn`)><Vc>QbD5b&J`rjke+W`pRT3G|$*
z2f>C6kxT*bHJKKG`%-ohd}>!#<hX0bH0uCJ0ciYf;IEF-z2ztJv)%VIqx0_uvA3>s
zB*EX9(R#^GqG-Ta9WKxn;1zXZe~~2spwSZmGJ;wae49$YhWy#}IIf_8Q#<rrj0P5)
z?c&~JVmpAk4f-E)pCa@DAASLpsUDYtSIT&A0g6N{^Wtn2Co+8w1o8A#rmfj-^4GFg
zSD&fvKys@ek-CSTl-G^PaXeZd-BSp*%O;Q2TBR~J;yL?Fi+|K#<qq+qio6MF7(`Bx
z#9Y)WF3^1??s*q<F!ZqIjA9#iVA<hCxa(v~L}{1aOwIZ+mWNnL(8r7V<l2U}seqFt
z2#T&!O3L4~@th3q2{CFWz3?!*scP87)X1`^dz(t`7Jx9B^Z(rnNSMUQAk!XW4JMl`
z(Qj6S-W~PNDgA)$=nq$OgYG#tnie{)d=W;t=+J6Ey2IS%t2_OK=4WnLXgf;wYV<o5
zGTh@%l3W~el}8ypuo1+Inf%h5aEu9YuCy@PrToZydDq2Ue{fQe{`rd*H^9`Fa0#ag
zbgl$1R)Jgl3z}@QiVCoJa|Q>m(}t6tK068Ka(lY#3|tNYHw$=acGg%^vdZf9KB@2#
z1V)s-@zRSpn$~on3i3`ryA1CYmsbU{Nz@y+hF5nsT3WB&6YBMf{ABYfVB2NE6)#pA
zLAr`N?gCc{b>6zPuil)ZJ4#T@or82fs}B>kOF-%i%72y`@WCUU3R}U#TM&2sr(1=&
zDv#H}?~X;0(NUm_Equ8G<s(<C(UF0x8(yghJ4CDHL3GtnS-g^IKPRWzlq6nn>(W$^
zvOd85#|ENv;@9)wOLO0foOg==%|6pKna2^Gt34cogrqYS1+6EcgG<#vnc5K{USN9V
z3${yhT=kxy%6@lOIe-tlQzF!qt`qv)JucQlZ9Fc5VJdTUwdrM-6H|=J1p-qOs3N|n
z!})@5jAOd|m~+XI#wU&!(GMr}b6iDuGoWJI@9NwS++(_+CqkWY&Tse|rMqPd^i%L<
zTW%rJdAQ(9^OuxB8QGj`_%8T`JmDmzOiPjLf!@~9#WZoTj1O)Z3O{__!e1<gK+ZMM
z+J1Jh5aoH1vW$&;@UHItiQ@&cKE1xML24j}7Wco+3dep;G}4=y>lwCVyRhwj++bGY
zIyXmcpMiR3S~H>k;-y$n*xO!87e>d;OV{RQ?Yw|FzpN1Q939fWc-H5s9aYh+MvSGS
zfZ7*rhO<$Mt7)yP>DsG@7D^Hoi`7Ey@b?Q8i5(54fw@-?)zu2y`NVJN4uAg|(e;5P
zqSZAp=*131UthN;?{@h~XbJQVrll5tYmU@6sd8>I3zv2^#d*14a%G?^pzBQb1&fsZ
zWVW}<Pi<W0(|^dE@-FEys4YcP;Jn+s_Se-v+fOFHRuFQVZ7-)7QVuF93Hx+swoD61
z|A+ss{9v{i0X5D9cB<r_NT{xhxJNPLsN-A#ql(ezJ1BpxZ>_D|gH#^zC%iz|BaF*h
z0da1toF_)WSV7f3Sv|F0L-}9iDZRSyn|AOspbTT~k3NT%cXJSmji$b4vR|=AEPb+I
zoFqvQ9^f+Yjy~&WBnX;sR8q1-RyCsf#k;@%B4f~h|17e6_~u42Y<wya%8Glu3=oHb
zWUxsFo!3Dj&vUs_2QFjxG~L3&ewvX&S5!7~ue({J#inKb+;3bt;P=XmxC~yZ$~aJM
zVgeWvK$1Et8b*x%M}#Nt64(q1Kw5k)(Ezjp5HL^09bcFI!<kRN0V3u<fLZ^H#KZp$
zBppiM!k6-RgqzSkyAWh$-n?9K_g4?1^39fRsd)>vjVmOyk6M=GvG%U${#ODp3;{sN
z^;`Nj7|S^Z{wbds2~vf1@X%D_{O|$<$>HMSg_%7MJ2{aAGfsxnbY}r^e{h%;netcv
zycmelR1677m=9D*06%5A8c5`~E*tE8qyU!Vw|_<Z|D6T<A030j|3o-~@QfIRSQ7+*
z(h#CM;L|e})C?P%GG}`CcwY3dgGA?4xZNPMyJ7E_Wie#wCbuKVGS@qDgPJQMvRAcj
zACh@=(gu9B0G<+O4?JBNF`5)wSIpB1a)aQ;Iu>YgaHa@?Q^D1=oEek3Z=T+YRt4)X
zZx7xUN_<9p{H;_4^NS3nj;2_KPA0S{Vu};eT>UP}be&}}yKh;?8Kx0M88gChIrtg1
z;-F|BK$Sm{E(IdPX;D4k^9z4njh7=(;qLslL4Q{&=S*+3W7u+iM3$&&*f{=Kr$6P}
zzId$d;fr|VzCx|Lj;U9+2DQmfQ`*x-(Hl`v#XMYN`^|SICDC6yuh!JRTN6+_VOH(6
zxguS5t(95bCoc-_2%qU+M!e5MCq@ulZLJGuzpd%MD!A2g2Ty_e(8l%d2DnobENStZ
zV~@MqX}G|H9_nHW9yMzo%vc7qn$4M<L^gl4?uT9$Zj+*JHQ)-rd`8!2zm4-}^fb}{
z19^*ZfK$J!z-Z)%sv&vv!gshfZBC$#1>`X?phTk@8}FK<4S-m`Ra1<aTG|U;O3@3w
z6cYY(dHKy&SJlmrk3&)ERvaf*TD4>y)@PXRoK?m^E|I!Hz%T?WTcZzkMpYznd$Vwm
z<E5&a;n|^s`zyX|N_H{&ca}rxnupv|lTT~4(TS}@+ohk_MJCxadVRD_pZTImPK5m7
z*7{PC!naHLb{*L%iBS(D1G!vORDmz}B>{})DZndjr3;TYDdhgxx$bZrtLZ0~Ds9f+
z!S`wA#d8TdX88|p*QNCGK`sO>+zdKpnZX6zS;}g($>c$=#@dNM#2YF;5AxRPlL~0l
z8l-l+eFH)Vw`U3fbCtY$*}~ydvS{2`sUEB2etB~2omGcR*!qn$|7&hfm~RX!NI(~n
z-Oc!G07<|r2^k%p%#%^zGxYeqsgXv5H#Q+h*;T=4`7CZGF4WY%Ti=K{c7jI^y+36u
z#XlttEuM#Rp{I;-?#^Ew!t-dWMbFCijDKl}rFZ*wk-<2gWi{B#ttcu}LxGx$Vykp6
znxvz!3sj%2a{zhT=@H;F2gF!nWtKAV+_H;Afu<}h5;z{fOTnkNoPUw!c*(;5(Bvg@
z9jv*t3II%a?Avh|1yre8g{fBO7a14s>z3>f#tEdo!G_nv16Imc(?w?@4tD(mWFSU{
z>u^+*CPB`2w^J@2Gy|Vg;8E>+{)6~sh%ERr8R-F|wlaz=%9KJA@Snc*i|l<>p&m0(
zRnJ=)u<eesTCo?C)4Mh~3U3OS`yQt8UEo6mU1_$%b<yob|BQ1aUZ^O#c>f*27wJoI
zXKe-^z-Io32HQH<xxxyORe`D!F7NN(eOf7n=~~v*X~|Ev0Fdrz@~GIh191^iBe^Hp
zGJq7ttsJiTLaPptCx)&{nq@19J3EJtB-$B;nI=)pYh=GKw3O39vO=nU?xmeeR{gD@
z*FS1e{b<<wx+r`%`a;)wL+**JXpZ{4Yl4ywMSUjudPa>V%G46w*u_JOwhe5u=1D9v
z@ri21MJ>o`^Kgrd*tlNzuwormx2%j+nwmi{0zMWpxLT(--sri`r!7~)A9&~_)L1P%
z)}p=L>aVc`NO!i-6A=XQ<rpAsnEf;=Mw%(|Y3~!=A`O93JTHBUdE`BpbWyU8f`ts3
z<7bSiL8{?s(d<1WX`FMGu^szl^2>_5FU64W{j#I4adPt+{3uVooEpR_6fsOy3W7}l
zOHOSEHxy^u-J)1tKJg(wb`~tZwfu}$RAFXRPoxfC;dJZ9s_iUPZYBC0u3w`T!~U*X
z$H`i!k?Tu+Rs4(4p2|#q{<>|YDWvo61jZKo3g?WD9n!(TJN)2X>AX`b3O6USz#*|G
zlF#eJ@ONYD5BUr}<O_vxTB%rDbm6Jy>o7gt2s$6N`xDSxOHPlrxgXyNJN;1%>z-0^
zxL3eA>qhf2%aOZ~C1B>t$4?*=pB`*4F{z1S<xMm-F0UuS>$EvFngQL#+BMoCW~S`+
z&G}+XRsof<aUeC7Rxe+};$<ilh!P}&ZC7l1!ge)6p*&pl(>B^G>6={wF<o2|YZ7;z
z4l?dAiXQ3#ymx{Eo@WL^;3F2c@T7?xrR)Txi^_)s?6!!<{>lZmbl(l>(*j-T7Mmg&
zSK#W^(X`Mj<1J+7Gd`l)Bl}Q(r`&}_du=BM8DmX;P1yiP7jkvKy$!bmdIiO+?^;j#
zvDb+u%A&~9U<)oZ5R-GySrF96oh6(io0hHWK31Wyp7fXaSRUq^d2UU%e8v^vwFpc&
zA@_69or~-yo5z)(W1Q~vyL+z1`LEM^#2VajqhKXlT0*-X3=uBmwWgnKAtn<<RvH(X
z?58fxD<lUB5)wW%kAB))J^R!ur%iv1CTe0GaMyNH*2Le0O^~FBnJRoZx{V$Xld`U@
z6T|DARQop)QM_6TNxaY#hf?+h!6yMZ5U<V67JpbMR36uZZbVXNNaUf%I%{4NuCr7W
zF@;(w_U!YzR9v|gE=-f(TdGq^jbmQ{`|~JOOptg94Cdm4^#Ym($=y^nb-q~)*6gk*
zO+)hO$elrL$_=@Fx!eB-AliRkvq=>57@Hg*`3{bL5*+|PUuBvwHWdA(YE14t-CBuY
zg5enawN$WSl-7mR_=Ao!bR}Si-{n0zV2>7x!SlnX`<(!;thq|uWxZ)Y+2zw;;>dr^
z^8e___5T`Q?Q6jWU$<ThMl|GB`<@F*Rf2xo6EOOs+j(uzn^(igU)ooI?nDWTAeIt1
z@ghFbT>ZW98UmzvFI;2fi{-lB)w37wlh4;|PcyW#z2JRfD+pI?;>2^3#^E*Fz;etA
zciabU1Go`3Q!Gw(ImIr5rOPp1>TL1+30qFE%R!e|mp;SW@K;Y{qbK~Vu@%HmvYcHd
z1sF&>=Kkw1+t^rXG}Ct$4bcy(oZWApY7*qVchi1Xp*kQq>A+uPVWp|ELpXu`U1v-7
zM9<9a<Txk&7UYvO(`$<0dMSolnA9ef1H7XUY&CRx(HV?(@&hn2yYbgN$3Ry~8eRyf
zl%c0c3_t~M1t^FG4-(@Lo5eUsK(+HB9AmJQf!-dPt|SF<4H38?aLg=tc_O3SmPn_N
zfs+TKPh^$Dr9b&kU(+-35K_-+b<kk^SHdH7@ml<<9s^nz?kw=Ai!a{8!^$B3<A**~
z%FCB9OgK;5U1LC3G;)J<t`vbo0ZO5NcS*T(Oh7Dt0TA&P+U3TygTpc^Akjagxd>7i
z&(>&TbbL#8T))$Cii7|6xhJpWk2%CSiQ|p&2w(g%+7_wTQ5xNHrW_ZI?lI8S({4Hu
zFnfC|r`5o=rapqjDv3g3?Uw4(N4<SLC!@G(JQ#WwH-H%&!@3ZI2z<^GKTmWV(}nBC
zCH7v5$I8zMC75VL_r6b+Z8Yk`X{jz}6G_A*fTGEC1~5Zm5Le;C@%yz&V$<%BEJI66
z%?&JGn0*KjqKhd#`<bEg)e9}o6Cl|8$EG`&<e#v)%*SCDZU3FP1(pEYA*gVsE9``e
zOiTNL1jpR2!p*5u>dcMVLE0$2Yg{LfuQ0<5)d4x+d}1cp66hn{P{*0C4DtAI0pb2T
z(azV@DW3Vpi?bH<U6<WWMLtn$eC*D|emc0w%oTM9`HooN0lAC|!D4XgSwJHhi;rHX
z_a&<18Bc}ks3Rw=CUo=#t*YiC$MaR!MweB(Lr@rr6}OX5c#Vea;_-R}gYi-V4H7EY
zyqD(am@~KdLqipRIcDx{YVSxlL)dhz*0cnfB?D9TuCm_>APA4vjRbb%>j^rMR`~YQ
zD(?$Ak%wG{=|5~<USsGj57}BQ*mi;23{4+moiIJS#3eYpPidR{X;kUgt({u~9rNbY
zu^L|Acwf+|_>)7lNh3TQ!^-S<e%xU@jIBIccT_fmVZn5Eu9LBMCF%?r=Qp8xf!GJy
z0<n%O`!hXQg}5F-v8;&;DFPG64QV<(?Xtmh2WPF$wklkwWnuQpCv(j}=h9543lQT}
zSb%9v1%;zKmP><|MiRTQ5}f)A504YJEe6%Fp=ZClJ6RqI54n!Jfzf(H@4ZzNi78cn
z{07vwOxxUbKmWM9))|Vu&!JWn=oTL|a-Ei}fPzC9p$kNIOoVGd8^jijnlv}AtVvAT
z09N1a*puTp-!bLIrZ@^E?_G#GnOAA33RQ<QS_eCTt42quai@nOO>aPrb0xaN1bC;-
zuDyCm-k`0rDN-Xr=Ol8U8me`mA`4qwpv1wzqq5QOk{x%BcktgOvQ(yOA4hx^)-C$_
zK$>1c?@Xj5>5+F4d^!PfdRfjJ%TQg1Y01Jsu+dEt{TLcZf)bOIjUgMG*ow?yr3BcA
znM%k$qT~&o351ZnZv;pfi^G53uqXcxo3vm+_jF3msVzANLT#QUr$-jdIEc8v-0iEl
zR#ZYRY!J+;x)c@v6Hd6ExC_jey9ZOh$j&%}aYUe5*ain8v1y|G(0Xm)Irat`Y)<^C
zU*m(gZ;gSrNms1^&$L%Eo4Ri{0uWY1I5lPSEL<gtZ6i_+z8jwZs-?%Tz_+II<UUt_
zynJNublXMpwZ&Shpu`S14{T_W-J@Thsy2wA#uhI@MvK)Z)EM<ko!slW*vO<BtEXyF
zihsRrC_}hVj&sV}#d~+A!L`x50&(@Q5tpkk9<qL05zkvgdT9>$1M3sjIdq^;CDs7*
zB*uDZIJ;^bc`o8SLtm`hYmmr%gqt4sfUjU}Xbx^4uR$7o`dxjZByi~vsk4L^w3Pcv
zPiZ<h%8E?o6}t7{_9F^rCIIRVfbV$g6SyW|Y}?1uW)2zW<WM@QpYAuB2|VWHzcCN!
z!3_3(&PcS^Zx8T6OxtW=YC93ph)cE~^cb60taE3ziaLES!9Vp<KRKgZkjXjdvzb*2
zwq2PdrUPv+`?GA{_9EC!_Jf8Zi}>rvtsGNsB)y<bePtV;jep-2f4#H9eGgIw0>}@`
zti%K;zu4x=u?7A?q1*m(_M3wF?v1$AcK+4W^d^rp6uV!AzvQ3!3#RnnLHc!X*^iGc
z_$xLmk?x%XP>6h>)Av?k^Q-tad%(Z4L<TUj=>^)5-3_5LJ#xRhee-Y=iZ>^EwlWS+
zCKQ#%BeGuy?r6ZOV%;2WWljB*CI9t5<}VHgGRLK?#R}9$&*D36Bojq@M-Pci5}qpy
z5<6yQkj3_OtS<(!s6|1$=%kvGGq7C5d@+Q5bl~LFY3io3&GHC$dM(*<^6j%>n@oFV
z*Ajj*a%<AboN__lA*!7_;B2sVNR?a$DhT~*Bo_?|lW2-&*}8r%MqRLj;u<scc+_%8
z(glww*V?^`g6(#zTd5^j2^O$zehK?>X=gMI!3?buhhb_>?9aDr_|9ka{IYa<^yP4Z
zv_@-T!RmFs#;>Or_rJ;kVAH4n2weKl&l@=Q_uslX=%@BDUCGqcXx2D4K0x!boq|lG
z{F$%*+5$_Ax@5l2J(2+C3FKi2Q(WfC@jz}Io0b1{V~G;TTJQpZ0bh)KJB;GAZcoP1
zlV}UkkS++bvhfOJE(3YZcYzNRG1l{izW{RW2f|yfonnNJ+?gi(-Ve2cQs(h&3=@Hs
z9-C5&uK6G#u0lyr$oSsSaA$;A9eSk>d^o#fuN7jHIgewwm$7(DNPEnK`&z%+NO5ex
z-dJYT?CjaZm#%7b?;_7@0`w6wuQN>yulUoSn({054g*y4_rICjf8l$H{v)-mbo%r!
z+}~07H)=GP7y$rxS|uV<Sw}tXhy&5hUt~uc0CezWvbK=s4<`x&{~2uW&tCukGGFuG
zMe={dJ4?rI{^Lmi1a<rqU^9Ld4p4mvAK<}2X++!(0FgCWU$2q118aurf05Wvv+~br
z0O6QObgD~>?6?i=o0<{mh0H~tz=!6q1|K`+&FOewkF-jaIdLhJAJ(EoQt(f<5WZAD
z2MNF?dAKm_?|+d!hg`%tcFdYj78K6g*GP5QI8w&VH)f;R3_{7>Zc~1sA-i-m<bUZ^
zRu&KuI%n;$RZK5V;~X^uRu(+$a)a%PRt%%=KI1euV(z<rC4$Fsa`Q9O5RgB$rz@d#
z_j^o<wjBzS?%O;f%}rxg&A~dnu%!5`>))h<gg3~vq_Y$YO5iWKO}NkOw1e466u5fm
z!#325QIu@kSNp_+wU1)#iZfz*RdMfJsP81X#+_2Bi;|=Bb_LOnJs<`cjpI$4PEq#Z
zjLz4^YrFp&d*2<`)V8e)0wPVMOAU$?DWV91v|yo$h=|f5D$=E@6bT7}6sZCV8xew3
zkrL@rLq}0Tx)gx`BArAb1X6sHeeOBGZP{n<{q8;Q-FM&lLrG?mwX$Z`9CM6seB&De
zrj*a-Rz^aecMn-~>BV`{C+uW_w}rw<BnbH|K5uC>$B?89HzGPt1r(XGxxh*%oJL%2
zhn4C(k{c|<$E#zHslACWV3!NKcvuZ%<&@coFe8%8@UW##k{dC6+BZg>uaH>Qgb?e=
zD0$wmSj$tNc<#O|^Q-$8ML!4yg-I`=<1zFi0n^$*Jv9y`pveF~SwXZZBHn05aN}(<
zpJFN!Nk-R(Om7(7@^#~^85wPV9>hOUJavu{Qmklv8LAHTuwiK>Y$Z2ML<vZQ1WvI*
zv8(r<^xu+FoP%!;ez+Fw)^wta{l3`&H~N@2bq=e`8(0_*{5&PCob5*Ah|+BSjqF8L
zpPE6AlifE57d=!<f9dmLP4|=wdf&7N%eZKK`FI=y@mY6=2v=_%EN{fdd}M(ze}h~3
zU24Ds{he9~M-%dd*2RQ^wfz{Ey+|6+WfolP+!jycbG&=FD$FU_Lor^Lr&D2dx819j
z3~537EVnD~Le`bI8_BahnCs}M29~}nI83|I9jf#T{B^-1(?H&rWA9zejvTpwS7|(t
zH9)Ms6(0Jyzu^&)X&MsKpw_44!9}*j6;*{Sq1_Ffw!9K_`Y%@+KaXm&XQ_^m&302!
zk=n@?K;bTAY(za*4uQiaml>ySY94yGT7F!fY!h(fdeva>D1XLHt<rY!OXJ~;tP1Q!
zfg9S~?;tx66BFz}G6BXwR0A$9`^{HMwcX)0W2$w7#};g7o$jNLNEakYb+XXpF&kVC
z-Cr-?g3x$}aa|3!Rlx}l-L2Ec3ac&}SuwdQ70|lY1hhVcEmb>kYGh@|KfV$<b2%9C
zJo8}?8AH^@cTYJkRbgeI!4RPaeqyqPh=5L2pAv?BI7VlX&|iIc-@dGiw^&Q0SglsV
zFCDidKwD5zn##n)$v95pGibO#RVZ5aK(!&nXy}-w^}<P&Q>S}%S_Q9uo>dT(-YKNl
zc)#JxHl#T)g&G-Oytu8O6GWAP)8OwuTFDW#FwD`-MLIbToyCT)6kkewYxLl8CKsya
z9Lw$k!Z9M>^eQk#r30T@CPa`Kt|I#aPh#b=FVD8Cy*HJb4tz}aP(dMB@6ZX+V8#zO
z!A&R)SVi~74-?rl&8cr-d#FNwj|^`Q*(UUk6WSZQa|0i(cwVas2r1ih(z;e<_c5O9
zL3*%ExDDx&Uk4T$b!(YxN*8!ET$)j?>Un1e)&y@2xUDQFiVL_E1~Xo+9_iBSe#5=P
zbqR1$A&(|XFfH9YAK;#VNT3A`SwRT<>9&=$?M%%aw<@ap+%V&R$WrV}tFqX!ogC2}
z?k7WFIf}+7{~Cbg_`fOoy}c#=00;ez1pdQt*u`H_qTrK%M~Jrn{*Oe7q;sDj9dpdB
z7am0yFyuToQUr|?LDBM?j`uYB1-GV<<;6L-vJhQJ9;GPW(HnMHLvO!Wy_U8sM-RRf
zT^Ei}q}@~^0j_cjVTw|K5Je0EY*H?#U;Q|iWljz*?|7rC(<~-sxoZD(s(G;9#b|y9
zwj|M^l^BE*vPpbRuN?h;^$aG~G`=DYW>V{^Mayr`JF5G%Rg|KS?%mS64&0f)5#4az
zBPQT()C3U?9@-*L@0UUrD{y_+vBPE8-q5JGI*%mZpIUk(+H<UTx8URG{0&tpx=*m-
zWP&~M7D<k*8q(On5Yk<Px>l2(Crx|f>DZk(g|m(>hx8&v&$Q>&@;YouM@rG*Vbon9
zx3XWk&4{#n6cBZi&?A47aQdDURx3r8N&y|dwVm{~3m@=tt?VHP4G{XdK|{aN@UOjA
znE|rBH&aOliy?|G_}A)u5kaTV5g2k$JY?iQK5rAZ?1&Sj@~t*U#W3n?>P9L$n_YOf
zavJ04gwDLcbM}e(GnE5-j(lM_&Txu7&nf4!=Gp!_d3<WeMCnfSBur@DxCPH;{La1n
z)n%)vGqC|dMcfxsHXgp|<e9kP*%q$Fgp<Ep1*tM?hJ{ffM2|eUWRLuEIm;C{q0fuw
z3trMn%JQD2>vo|W&WxhP!hpmmezg@g3dPl_3+53GmH_NS?Iy8(9;aj>Q$DSZye)GO
z>nNU{hbdD>k!3*2<(_KZ-G=>S)~c$+w%ZyFdD%Apy>0w~Y8Na0_QpXs94{wag0!+$
z^S7I=@kYurCct+RUD<&V2}$Q5dI)5`&UD7>dfSP|qYqG)`|>r?eSJjQ>m*txH{vEK
z)k^X)3Geq4b*5Vka>ct*95=jK4dY~de_`y@IWyJJ1HSU+I_W%Ij<{R^7)ERj3LumA
z0Ud$FvQ|`ET&=B(96XlbJzw_e7V<^w46CECKMz2rSdJ0^DwwI9ft>sdr5=af>d_9G
zgrC4JPDDUhD&(2{(g)RKFq}?(7Pg~>q0^BDuWtzT+~Z<+Xb@lRsmH$;9RG^^aK}_f
zv)rlHbZKI@D?@R<QY<RV*{em}WYhH6okwSXiIUnqZn5~dfb%#HXw<_?_a%fdPe}t6
z%BF*4<*A8aPx1Eb8uy$`my%arY&G?v?`<*^bC^D6zkIdidHlmhM=)H;4ao61iR>;r
z9&)m8`MQ{T;A`_F@j33Bww;X6=WR5&%|WcXU5t93_Q$eCjAH>lyb1a_1?&s&nIRNK
zb5kY4A;x7TZe!~#;KBccF1nwPo8ItX>x%k2MrBaBsP$qQtlGcDr?(LxWF3Q_0Oi)t
zuLdArI}~p|yJjif092_KyLr5}06(oCM60b~FS7yyE~p%zE;>3(06OMF|B6b+U%&IG
zIt#4PZokDA%j6K?<Fq}St7&C-^L17EAXK7vxHBN`JiuD_oS+#xrA>p>BqU?@ASaU`
zLiV_>hAK-S{cj{>!!PHqrJ#|OUU;OjI@DAA#2xj^sxhzcQc7?W_Y@&}osNk};p)k%
zxrxtCCi7OEKkS!tul>@)@UJw5BJ4`}coQt_PJ<$m&xCjb&v(|)?DT-i_4q1w!PpDt
zsAqcN53an_9OfZ<<+>mRz^S2}P%Jk-(#EfQQa+;iml50c>Hysv3FjTJn@Jx};5>Pj
z_U67Sx3$B5T}D1u0y?@Yw|Lr#bW?o6)PsH(I@*C>v+kr+#AR&rZcJ;KAL74PNI^B7
zfA3C{CYm98+Y#RZ0T5uF{S~tK?dJ4PA4}&@C}9unyz&>4j|>lw==8o->?&#Fc2ekC
zX=S)XcOWrHYR7AZ%w%t#i`NnxY0Rd&m&Nd9z{jebXyc*WN7}#IAi6;`4-B10m8l;&
z%;}*0l_puQ+ghkwqW^5Ma@n-*G6YD|P7?1xmoTJ#2ksLs8Vr3~V;ZEgE4zl7Jdg6A
zjla@pNJ6D!s@b<nNJkKFpu0i77Zo1wC)S7$%=|oky(T?h`p%7~)^}o7qnEW`akMk-
zV!zOqQg_`!Id$v~XMFE=Y_>z4ouooO+)L#n?&g$}y^}CCtEEmFt)hGT;+NRM8P?sA
zPcFir$&5VBtZVs3aP~)X(Er!vUou~gQklVaB@yrxa9AAAf~xTm%-O}CjD{%&qDfFd
z7Gfj<bl~b>9FVN^GAlOwS8;%BDEEcsFq3(Za{II<j&4Q1&-;~GUlB-lFLqC}=HYor
z+BP>eIURqkfZ6va<L_VX<=@}?vts>imN&+u(kgDWj9^+8w3*meIA2=2+ro}IRHYWN
z)~u}A4%8VR0(HhmfaJ^kTU+{vkAKBnfK%{l2Jv!&pfCzb(v1EN#e6sX<RKS96D;I!
z^a;P`@)vb}OZIo#U7+=q=00yKpt!vQKFf}Z0?F?OjyZA1Y#8+>{n{Cu?v47ljQ(Py
zZyEmsD-OW35&gp}`^}E>zx7_6(FmGq7%<I9xi>Bf=w3J1S8q+CDq!SC8|~~QuDTOn
zX>#Z11T-B0plphKmE@;O#VC1D4kcl<Wxk-POaQr-u)spL$OD+Q8cPcIlOHH(kVw$i
z<p<dS&|2$9&B$S0?CEY#fT5lI#^?WMX7xYC4gar4jNkpsw&3j_dKPXs+09HoiR*eY
zIU(rm>1ruaz!5gv`;o3=Kg2yFr1N>pL7IGeIvXW!TH+BrBf*1`1)WT9IEW!uk>tk4
zKrk6I*5Ffs-E1+EE0=lo#m?5o*qe7&^uUSMpbYVbjqPmX_3n^~@|C1`;?*^0P01J*
z>p9=p%oPoc|CHDczwnK|`$y`}KQ<fZ*?dSkhip=85Qbay&k*Br4jZ7ybgNW0>7@1h
zu*mi)PhXED70-8_M&!!7A&--E)rU+WeA2hwqoU&)oX~9)7D6W_4<Kb4SBJ2x%SDr;
zMXoxB_Eq4;BMbDUWAC1MQB~6%{ZQg~xP`))8OV?|%g0UZ(o~(NGXq)507EC~u%r)y
zuWLh8zS7vZlkCr(Q!t}nlc;~D^3=$|DP>8~So`L7iSX}K-u`C1Sf&HTL1w^VPA0Za
z>;WCM*q!4YH#QPw&$J?$cX%%aNC$U3N56(fKyk+1LU0Q_D~bhqm7RW^h_+0Xp`dSE
zhd+JrmFBhi@+d7A_0b1Kmuz@Xy!l}3NeCC0<6L+9#0U#egj~ml;f-1;E*hH3FRLny
z#>Z`5P<M$k-awgR`3e+bT|eL9BRm1CEYtO#M_!A33+&D@GnFI<SzDqQWCJmDxZ}g#
zV6lyRo-Bd1I4G4V<*8p>>mojmFoYLE0pQ!y)MyE<@v$t*L#m1ko_jux{>a_X+X(zQ
zne}O-z9q#}!N8+uDkHnPdU&NShw`dq#_Z(b;HG)TNJASW)7ENdZ=#*K-$WfXiEM<0
zLCP?}S8$TeAH;7i^6dMR5)n}Fsw70I^~FwIXz)|)$)`K&V<Tmv|ABP>eV+e~c>lw{
z|8IbJ&qyxEla7<!iMR0MEremp!|SnV^k{3z;)tx1eL%Tht9UQ{zC8V_%n9wNI@W^y
z_N8V~pyDdH!|Ap))0OGCyOx8=Ma?_LnuMF(EEpMgI8A#V9ZnX@HL|3jGoKxGy}S<P
z0r|Zz6Kr71OIgGaIXtM`$d*qHfRo&FbO?#tKS7ouWJaQ04`HuuOUiuvk`!o^)eTn$
z5usx-NQY@-h-S;$+yNH8EMfozfN0<*sQ1!q0WqByoJ5zb8Gyx5xHNo$11bls0melt
zYvlC{i(j{w&22Pp?d<vlc~=#nu|vQ8PCO)IZIMBn=9k;GW#63=xWw$~+g{b_ZW|D&
zL?Ll#&`*?5y27gqRE@toXB-i}(SqEPxV=iHw*r;ch<;<;ie%5yKQ|2;g|(saMD!0Q
z%=-RXVSt#mY(FL&*M|J=gb>_!R|>?9;{M0aF1I7NDPQ?rrSg^I@|ta2jR$fV+$IBl
z8L&O|j*HtH3e%RT#MncEpdtu0L=O@-f~wF-f@=<PTnF7+WZk&q*$SVRgue27a>r))
zG^)<u>WwJv7euGA$-$}@PQc63`&qW^cgGMaptT3kPWueK=0W0j!>ldl4J`%!Wb|F<
zJrV?XyxZv1KPnkaA@{&%tp<>by+{hp;KwUO6ji(4j9M=V+<!Jj#!DH<mn?>$A-EHA
zjn;A?@!O}Vip)Bsym^CPOGRmCMHqm-wgGr|F_@u~N`q#Sldw(h@`<fk<eHd`z01Z?
z;1k~e-7EQxQucr9J${nQU|~tr<3p&et9y|1HMcive()>T*dRxdO)}7#*Xm+G&g@YY
zHQ4}``EYXxFqR5IxqvJ#j)HKR*s_sBM4rDB`u8f6|M30T!BP;@B}Yu~K$q0OYR$C#
zTk-jiP~lP*F7wyO<jJ3oO#XYJI6ePT7xF*ig(L7&*lu>eDxDhKYGnCqH@h?ShKVD#
zCzJP_2`}pqwZF2?v+0bFqB22o9hL(|F-S)g2MkC?$;#S?%FQIWa!3~X-Fgv}?yt{W
zj4;QyAxe9+RuH_erZL2W8%w!{Rjm+aZG2pFB|Na^t>p?*&PcMW@=}fMarqnWWRW_r
z1t>dovJh4_5s_`9Hgznmz>nQnXZY}opz%?imSFYQo(?ZHe6?PX@gzE11H9m*auYJD
zq?BxcJC`_haIYKJhY_6(W}1#yn)LY{8|G%0)ZO&f51#!3qo?*@Xc1!D!mE1}4NWan
zwUXZzb!!}@EA52x@GMORD;(d6f!29}n=X!$P<QdOA(p6w+NHNC>O3@Sen&I^b~2+#
zQ+TCJYbuoq^u1@Yr*a_PSq_EwKnI%LW}`$<^%Zqdkh`~rgnHKRS$|p#<k{p01yyQ*
zz{A(mV8jkuvtHMp^FXh28V$kCAa8n+Ee8Pm&SWT*xRJF-!^8ax2&hK-Nar+u*^J+s
z*GA|dTg1T|E+XgZohFEazCe%!upog7#pE;Snr*oUnGI}#AKjqKDugR<;)YaZJ|Cs>
z%Tt*Fb3l7h2W(lR7^*1%%vqDcR{*<N=6V)#(haDy$v9E_;v9rtQf_2o#C~!_2<{aU
zcL}m3Z>db(`2o}tRmT0N&aoft(g~Q!XOQ1~YY46liMx<yG6R~&rUqQpFw9veURfJ<
zWB-yv$rE{V9L#{N$FzJjjHy953lmGW&vx3WnI8~H&l$KwEll$L4%aW%$R=6P!4HFq
z#FQtM<+o2D_9XjW3HNH4TBH3+gLr$}F&NBM9JGu#Xx?k;tq}}=vGHTC>(gboQ7WAW
z=NUlzu`zU%?JL*rGQvZs%nwPje*5f0T1G(JA0ux!shewAs$B~-NoMk4ghb@D?JiUW
zJDHn{koJ}46y<RP4~pJN*@C5TfvN5;v^`llil=%yp`=A@cgN8BDJ*8`2}zkO=JW|j
zBd+T9xj$)toma(Y78PfZic+}pl_qfL-NV-=!WV3=8>A2iCLu&|)4XXnZ+Q{wdlaV<
zyNIh?SOAOKJwz8H`HO2c25z@pdLVr7-s@hl0?6(LUa~5vaGfK|;QCrBZH0QLn?7Hk
z;#j>xzMwoFPqBDqH#QWY?X6%1to+ZbzP5KIXXs3bAB5Tt?u~3S9ufo=3&B|$ZHkUF
z0*c)O<UCKCgd*Yv(Tu7c2%*+313$`gaQ*Uk&ktV#8`(EtUMA4Y`wRd6J9LhMrJ!)8
zn9ZXfbg3<>j?}P$EywSN`j2ESBZt4zQ1zfKND^;3k|Gva?_C#1K!Va9cHoh=6@iLj
zy(zSH#<ncsuD=I9fAp7pp8^OhE=Dl`4Q~%rCLE<C-td@KTZ=V>nVzJxIFLWP>Aa71
z9}^Sj+e0igoh4)nd}|a~U%a5j9VK}|>bsGFt_n|O>pGt=UA5ik#UHH!i&J+paZcJ}
zm{YMH_(y5f9?Q^?d&P1^t68x+ALBJYxctIc)tM2vQ-%;(m1C&n!|4O%18cPB(FBEW
z8Q2*b6xep1fJdz3id=(LOptdVsfHlgCIhqQ^iSDtkFq-&q!Q;at{5UcfNN59$*M<z
z+nY(*cGtOp#c)HsGuADQxN#Kpg%;*70$x7?c>PSEPw%lf;!TGtV4_IdUN!J-W=1F(
znQ``eW*xVYpYM+Y!Ogw(phgjZsS7d!FdfkI)E4R7_ollIl+t~FAn13O;7SON)Oc_z
z1r-H=rf-i^j%>+N!feTptWs&Az(pBaX5bEx3P1YiCPIo}yxU=$JpJuKfU*5<VqoQ<
zDRHLIo9#|B-^^y#_tS!LlFAxBef)p;?D7J4G}zGZi{U+V-9geRSJRO~v0JtEg*{Kb
z`s9UI*1yvD?a;LrdH5dj3R&LKVH5@x`A7l3o@*+4S;NQt^_(i!<4AFXPCUn_eP??*
zBgZes(g$lF(KHW26%P<=N$|`O#5kn1A<IR*Ab&mZ7-PJjLxc;C?e5l9-DyhJqQ#}$
zgc|8XpO!Ci8=L>b_5Q6J{(Fzjro+kH@>C-+0@zwhl14#~^_oCL$I#D)S)Zax){M33
zw|&)r_|M<^^X}ie`+ZusEE&l`z(FU312!s8k%j8FPs_BjJ!%jS%C~{y`yyAq@#eh|
z-+B9{=o_GD`ru!>zTdm<e>QFwj2J+EcAeM@T{?$nq^9Pf1JLx8XJAbaH5^91Pn%+=
zC>v?@d{HKImQl}^HAB}o3y||SE4B{8(vY|_-J1&CyMT`0EU*tby+d<3yMC~oWUw}a
zFE08(S*Zu$_LRr!HllrG3+HE)%w6JY0w1rBm`i?cYM%<Ri*}S64ew+^k4BOSlp^RP
zDxHt=MBRw74Mj@~M$*8~lyQU($er4|0*{Y)^IA|W`)P*sTfv>oG?yo(Ad2!!DBvTR
zR2aKClTJ8$pW^F`kz2>!e>!JgnLdNEF4-FFUl_IsF@riuq1z!udn8dBm`Vx+F!|kq
zO|#M^D`3bxz)n%)04h<1Bo`GB=B&e))_I66zyU*pgKwKhps@(-<KJ8`Q1W61u5H^i
zB7xn&9a{w=cjW;$h5r4i|37A9EJ2smP4NxK9$p8#Aexy3BP|)cGU3W#VZ(9Y^<QX^
zg@az+QQgdQBUdU&F7Tc+_!if^j*{AY(VQ>CpJ*Srl%`AI26s>h*{ITE7ulYYQG~(r
z)^E!1fBvHGh4WRYzX<#Ox8fI|p);ckGHXMivM9AdkopF~o{3{`dF`NS$Am_IYO#S?
zPw%uar=g*{t4(|UCco^tThce!WJJIY&29RP%(V7@eNv?N|3pOlzm@&^S@w*)4Z^>G
zs}e)-l9T9=a&)-u-T`%!0M{v6#plkZCKCoiuxn|)HCnNqwGo05?RvtC>$f9!MjNqd
z%Zdo~W~2!m7c0MG0hqhZhj)p7-ld`K)}y`SKl}BLt|K?;$AKATg068{+U^0sj|teK
z&??Y1^TH1>>Ir4cdb1IQelYnibmn3YX0eN%!Yh^3c6#n<X5^a;v>Z;E90gRThpgf#
zLhB2cJ~r$Ghj52%EqVcam0qmIB~}bJlLP>V(fsTm&n8F>F}MPcQKKKZbTk(^=LXBY
z_2b|v0nH%_FtaluWM!~}y6OUD29<&#nU{di1Z*fsF|q;8MoVA|*J7pDJ88cMdj1)w
zg1s&7{UY@OjHHJgQd^~{SPd;v5Ql#v+E^Ukd~ll>=A!k__O;)8&1~C4<z}QB#Stz5
z2F^+<sG5qhq-6YDdH$it-52fv2-fC#33lT@9}{hpA*5jBbls&GASobMMNPZ8^*8;L
z???HKp>}hbk>Bd8f17{)A0amPqf=qKVar6i1mXcS{4%(i)LJ21?&){3W<jGLM{<he
zUF6~yQ*eLHyRKcpY**AgM18hT=K!nXE!c+*a?m|z_H6$qe@0dY@Y%rqEkVi96i0MH
zX>Xzg;L;ewqn7<}ZO1(R@m=ma8VMW9yVa|E-mQGzkr<?PAeb!>Gg;)a*qYAv4z3N4
zNLI}WE$T1WyN|A$vzCEAft#ycUuDJhb03ujn0kqNERrO29kce~so8v9GkQ~;$_{R>
zV_>Od&ueW7FMkJk{@yy+RiB@S&e8nENRjsKJSb~Aq=sxQg;InD#!)$*mcEcZ?$D*Z
z=@Xxa3mRNfcq7r#myWA`X|qQB0y-Nii5)z$uT*M$ygwFjxz29ZKc)NEbTs_W8oM7p
zFY{?xY<eZ7klL#{u{7_IqgQpCH|)`yp;_bS$q5X5A}T+rIhsz?3hT(s34nxe1A+*}
z0+|giT|BB)5EHE_)>nbv?IIADihX*0dRVC_#eerJFTmqF$~jqHXk(pBm899)`Ojzu
zf8jM&%I{cX8D-!K-ZxyOcqRhjVWk`_`zDW~$JUn+un9+sml&%`OJ$Zx$$Y0Gefy!m
z#~sX!RXDX~3_16yAhz}8A~5OKzKJ@18-XAkkWBz(nCZR+A+gS*wgk=Q(|#B`-yT=-
zbBp{L`UCtC^jE+iI=tOa6mwzGKhYXme1ePoRlo86&vu+Y8v8#ac<^t&`fsQbe=lrn
zg1TTfl~$u61W7+|`x^EQ)en6iC1yBn0u0^aJAM$l3-hD#zPnA-_)}W$2wh~8BXnk{
zP9AKUG0D`_3TBET5o{3$FioH|Y-b<vE7D}X$p5j;Jz=w(09N(=Ke1=;kak`?mz%!!
zH;e1ePB|q8;<;s;#hb+}=KwAk>V?|c2^^iQN42wE$MfTzo>8Ff`L70xp@7qe>dJpL
zo^3DI53xMDo}HLzyexuw0?4}mlC~p17|x$LxPNvmC*K0;>^x-Yi;_c+Wct<330~`2
zy-+)SUAVZq^RcIYLHT(`YzO%H9j!dLsN+Zu1m^;bL;c$RcW@{}uj|2rxaESFp82;Q
zt!)&~iio=J<`!*l{q*tEg8Zm+1!xRgIsv-ei5I&hdpijb9*3>0EXRDDaVdK_e?#$v
z@;z1PUa_aeCM`7^;ZzWTH=Wnqw;-Z8;HDUW<Ro4lQDZvfV4h$BFH+gHag6&oNilne
zKTF2}<~bn2b~`Dpfcn};H8$}U&vJHyXz8%x?oypmyB8*57fv+M3yEa$<XsdS(&VIe
zL-%~bcRj>!Of$w+6vT0N&)Pu4i;68ZINJ_o#UomuKTgu4>*&&%7n3EucBtzn9K;*l
z1CBx*rhGQFcwysFOm5<oePpL#>OG0r5oqOtYbm$Sex+uq=AgqFK0I4c*e}TEDDq>o
zWv~wHfHyhsQ()pLH7DS44|5$h0+|{LhkHz=xwfP;6YYFzw3FhqCIlhn=dPv8S6I8z
zV-71um?Jqu)h@O8ueKNkj|@H}U0;OlyoHVwk?${$+wELAexP?I&dN8wu6i3|sM!0)
z{qB+YvN(Bf2I%K^v_2o~@kYz{@Nt$zFT9KqQKr}#dgXaQzza7tw8~!FPU-cj8`AqT
zKKCm>U}0<49hnIy84+#~1vC8U%<+<~!1EOVREt;ms>r$r{Yr!8c5n9V?+`lD#!}Oc
zs^#B|^?6FAHN5i$-63bx0Rh3JYXM|9;2hre$s~$#ZtGzGx>?^RlH6=|ubcP%m-@T8
z6)zt&sp2}qN<-I<kRsF66Mabra7Ch-BOcPBK!i@w^Bv9{UCi;2U6CD1GL@ce3BDV4
z&r{j$M){q2Nco!)#|Y&0Zf2sZ0X`bSSV4ibDD!2t9Tok6aD12`Bb@Mxn^Rk$Fu~nt
zU+mgmd;SgdT?maVTJs?AwId;%Q^b;KL)EcS=U%H84`=(_(<gTCuc}JWb`-Ir`6P7g
zO!B@vo+_A;kSV^nI{AFKBIThbtLu<}QG~i7x3hoo*pxw(hQt6o_l5JhHid=`Uwi4{
zvxj;Ybnn5h5!I$J!#5fyaJ5t>^0mQRF-9#COcff+<`}C*H)Dr1PKVDSQgpKn55nA{
zKELqXyGR|Q?!@`TlXS?k#BRK=4t0#^=IahuE8JmkzBB><d_p~pvna@&^%wf2J<sXH
zZ!9XnX+c=(t)0Qn{n{1#pqJHEek9e>i?>~rF7Z}>0*$c#H2+kP!dDu1_9)9jNGT?`
zLC#NSD)4xPp~>m~hoUd8$uqpIOWw(tu)ntF4)cd!e9&i!1|)UzDO885S;s`Qh~{9i
zx(PJpCRL=e+G)*zyCznf#M$g^A<7OpE|?u%wkr1sk0Ks)2+8)dBIv&(AIY$a(va61
zD6du1<4)qR8XYwfa$@1syL4VbUo#hT&RIDFprRN4Qc(0C#`yo+WKlH)GWQ7dD2KC6
zv9tS`SRi*VT0RVYTalC_z$DN$Yd(DS@j=qK)&2LAk3CN`(Fot-A@-1@5JNBy1S4D<
zTJmW*OJ>+L>D==<glNtEIy2+r`?-^__*rd<BMeQSj>lT7s8EtK-2R&KBdD=5dkw9C
zE%ODCRETyhkJe+uMXDw03v_e26K>E%sJl}gPHIjan-&UmU2y3S`f*1k1`tAsPo^iB
zsD02KWG3tfTLWR1Hf80g{L%e7+Aky8o*z|6rnyEL6!y<xf8n(4WfXg~i^$`Kg5XE;
ziQVn0cW>K9Wi<}*nJLT^dZ9C)%Dy?bBQy9-t8{QH9nKU*vPSg4xJTS>Ofoj{-}2Zs
z`ubq_r2%wOH%Vul7ot>mVb^mQYe7XWZA2GyY3~b2mZDiVil#x?1+>~(R{EgUhG6Nh
zW1sS19@BNsw}9<)QXjrpQ&G{*<8U&|moD_0`=y@L$3#Yw_Bh#z7?1a8G_w@CeG$pJ
zekE5|^_;!(<uRL<S%n8MS>OcF1zD}yse4<q=Ys+W-)Z0W31&DwE-fz_e#JS!-#*}}
z)%?pOj*bJFuJ_ya-n(|shb{cT5skxahczPq5oO49Aa;@6aCsdVrVqr9CPH$hkEvy7
z`{Q|}kXNTeC9_RXq`uje@xummL!##yOSX}Wf1?&G|5qgPi*=_MLliL)R64w5%Uf@Y
zj6S$=?zpp=f|WwyJj0SaeGHdsO*r#RzWwFh3cIFifUtQRl?Kj&OJ17BwVWdwO-9u&
zE{mN_nlk^`e=X7~VMw98yZT8ax9HJ5heKkk7j1ifLm&G+%<4bJX*Y!dit!krbhbDQ
zCH5F>%e`M_tq*HP280quP-P-$1n`oZ=r<Um>?c6+2ONkcuTnt!_jv-#MKNP$l8I1i
z;|P+Pn8R>WZ3J{mIr)!!5)|M5&g1`TOk&{w1-w`tIfZP5gdq8|%U=*vu?8AGQ>|`c
z{&c#kl9G;TTn9oGT9@RiBvrbVTrUuXaj6zKw&dmyRxQV>(z*L=Y)6eA9BpCQcQr?0
zH0;9?qdMK44xWX6lNSu0>0Cs+Nyv^xTTvhqsb~-=e*tgPF;bzArPmn4&LqN9BqeVa
z`*p~=)@I&^#6nnsC=V<e!=)+pie#6YW{=;=z9&`9%EoqS9@mts6{e;q2ZIZ~+;6z#
z4{z;A;%Nqovrrnq1IR}3%t1@oUK#P^K22QM5RjXxx_w<0uApP9e=R{@Y@e}psZN`)
zcX44?5VeumJ^85_7E!f-qa&TIhkU~4shCyN;$F$L^FpB(HY|72n?gPOOd8V}al(a_
znmchTY1mQc2<QEMMf=Ki#Z+XUKMU3|f|;1Tk%6Q@Ih3Hm2vtg{x+4}c^0BB;F_6*V
zd3dvz%em`?_F-64tLl+?OSvw4tg_}i^)}K=*O#EXTo|fkFa<km10mQM1gu6W4^<{T
zIx8YD;7}KD@lnF`;RR<qo06>jk2VGeJ(4i?K-0j80j|{tEvdVe@tzpmD%>nca#}IA
z+B=r1?N&6`Lhqx5nw$v5uxSdfYVN@0KEw__+@ZG>_~00Nf3)W^r;PH6wAPvE-dUDC
zPPUw>rbL@;qWsk3V^q<a<TDlJgS<DnPTcZ*{;aE2(|6~6Uj9!Q%g+<+hz?X4B(&m}
zb&4Q%JSqA4tJ(HGv9U~*E`GbTH11&U!*zRhHPJ!N7@wI|L5q-&<DhNAEL4uT2vwzq
z+MIhAyga;X?;GshXMH(%7k%^=mklT?{wp+_-@f<%9WQFJ`!3=DP`wASqyu^fF{{lo
zrXfDtxq-J=qPn&w{z};1!A{w-+C4PXI<qDy2XeAdb628uT+>9E;~gbr<jClmm+gB$
zRt04wcAAEA9xRqP)TXsls-p{Y<a!R=aS}$fg5fT_^#BL~<IR9t@IM)pKxfS-)s!z}
zDVrr#_GBf{9(m;@8&%2RU>_#xAZ`82#6}{Pu?ZRm*#VdI0QboVL*+d?PMtD+ZkI!c
z#+63IYkr~mawecGA%K;YCanAQC~{Pg%4}RHIa(e!TqL&)6TGl8LO2ML)nnq1BvvU+
zv1um?Ah<yo&IG-sECVI2-vGGwEkF=HTulU6C7V4k+yKMQ@-74Izs#WIWFYwqTKO$f
zV>-L36l=h?u|ah7XjyNUvV{SY9h`>jsRmKyL;;FwkVO@E{cFd0osS^Pf+$9A9>Uno
zWgG(huNI4K72>(E!ml)|1KnR~#6jxi$CJTI*3R=mg`n6%Q1%B~b2mk9EPMjc#@tU%
z1`6k-SWtjP_T9zY{PA3X7|qSl4Kf|B^2CEf6v2lm%2qYNCTo5;(IP0i+wlp|EWW?!
zA5OD9XS6qfj>Hd_9QVU%C~=uVe_?uuj!wgP=cqI|_xfoJ$5CJtYFUcTEN9uQ$SqAd
z=SXL*9&1$*?gp3iiG_eA#*j?mW<<+Jam#Et$T?h2`aLhcVWoA8%$jolv`d*Wk3O~(
zwuD6&?53OKW`$}YCm$gBsp@bOqD4NKSFY<Io!3m||Lmsd<}>;HMYWh`!a$T`w>h^I
zr>EJzy(J`!yq`fcj1M{aY=X~himz3?8i#H~F^`p6Scvf`iyOV=Htbmuvg<}=Yb~8~
z-AHU6=f$lCtE=UKk6GSSJr|SRTuG>`T;P<-JQ4JaaaMokaf1GVUaesuA}5UOGypno
z>|37#L3)ejHU))lXa6C)=5wB?m;Py1nasmT)gNlXkOTnAuy(p)>jvzX3EV}<rX*}P
za3Nd()2oMb2wc>5cbcy>898VQXv>T*;$URL$DJiM75XWaPXw!WrDphr1w=ety`>_6
z-B)p(vs~&)ayR6l(GlU@%=Z=e5zNF-lhBa>f*+1;qQgyaIw@<awwpE8U;gHYlbupa
zhn_t0;{GM{+BIWRQ^y3epk0Am2bwxEA*c?Ugs>C&TA*y}t?v$LiY^+@-*CO>F3;QE
zUz%O2qoA9Tytg??8eLux>Vjh*K@x0<47l(~vq-qr)Upq$HT_t6m)=_asn$njV>WO7
z58Od5;Z{!js+A&iq_e!B{*jjhb@4#Lbfw6j#J7MHfYbVFh6m<5jkxDK9yrH3*uG-r
zBW2;uBIO-3`V<YLJg(C&BGyhS2dNuQ8B}2glr(rd15)oDX|S*rmt79psXvpx{+a#W
zT0xp4mFW&!&a>J&1l#i$`}R~1yX;TvV!Zs+0D|S0ykpx(k|OKlY^s*xCcC*8EZd6G
zQ?Va-r7D=x?MJV>9p&1r?@tJ4);XueKYeJ@8(QMg3KR1CVnJ+ko)&hPHjQebN>`UW
zw;1&uUXGt(5qRuKLpvl<cdBXp2qm9-n0%GkhYJDLCwf1Q5Qn7%-hK38grzo3waY>x
zMs}%alqO||8l$)med8^0!<#7FIkp={ctwyJ)6YJh&|!+V9`af0yjX6snXpgVpT1x>
z`<Wq140Prr#>Iog{vJ>Vmv!z7ZQO;M;9CGM;aS~5oZ}R<l_f4>oQD|Wp?=@mwo&Xw
zd5s`}UU23z-Tr;Ft=II-_di72Q=Vz6gEB_|JBj1yCf({Br{v>e7XwBgu)F1si0#>6
zElgmP=G1|T-97Cf)!~9tqvRuJzz^imy|vX3eS9f|r0v3sVMMy4?qUSVcHVCAeFe_G
zszO8F@E*%^yjY#tmmE)Ae|euY?Gzv8=}Eh&DL_dB`g*$-Ya}C<H8sW;OU2u&2d-CG
z@g%he+>-1vx8vlz8_m#UG7uFKiT4-{`_gU-DMvS|p~Gte8}g<u2A*{$Y<T;dzV#kI
z^Cabt$;T&Zk9V?mGUwBWz;Xq;BjHD}Z$U$(R@M6SWIL||#@b%Dyo5N^6NPjlSDFlt
zW3`UE52L#$Mv|#U5W;nk!Yn$q*>;NV?N=J7RTk<Hl!nYFzs$hco*IJ)l%D!fAtz-a
z-jFKfdo<gn<m9zmPJ_DUT^IE(u5^Bh>u0AYrjNFdv%XVzbc7`C#N>@EaO6&sE-Z*Y
zH1oth7vvWbsrZ6i9tF^R3Z9)>4{BJ7nb&}zYc17q0_#DEtdXSNZvle+AY0$0v(8^(
z1HpPU>!e!+LOV1iD}&eCwb(sa+Cv4aH)qZ0R62wG=^$CE?rB%D<_BwZl6P1Ro44LS
z!D`1OWGr=2TR8YB4f`3Q8kJdi62eUk!Ro}}#hRXO2qd={>MON}YDv3cZF*mJ+&;|V
zw%aGcC!}#fGnc50Pi_24Bc#rrLmX|oefMb5lGFxolyn`zWZ6WnZsE9D^m9hh9lPv1
zOA~I<4a$5GB=CT!CxY?}v*ZG!92#(4g)ze|h`!S(uElhA9HQ2(&;=nFJ7Vy$!b|$}
zA#%4Iysh&nVrhk0|7^_r17=OI=V<a{-DQwU_z#>`e}!@WE#eHAf^+13(>}PN@VX0d
zSMLT$!%#Uzm5oiVXJ1uX(F$bQ!T!pDSY{y^Pi0S#e|&2|U?Vy{TTr%4>xhP5D;+=c
zMJ3Ks>Q466R_Tc6JB{d`qX97@_)#MPMyn1{^DncSN67Xac##sK_lnc<!;^%QS+#qZ
zR$6ywGmA6^O5#Qv8<dHV96wYWlv`73K~tj_TShYGOS;EVW}uXyZwr+hE3sdwCq!<V
zVeynK%=Hh)zDyOT;dg(*n%y~+S6$h2b6J7GwaUt0%t+IjdP>y0REAc9XNdVoEz}pN
zge^^QYO)bqnj0V~5~-~<Jd~Wr_e4K`SXoZne;FGjWw4v1x7dV!-Tl5`QW^Xr(U@Is
z*nXw707JZQc;1WhLB(1(hk#QaQV&xO@IJY7I$rP92<?5n`it)xx4H>-#3H<<(Y<sH
zN}A@eiMC>5c;md=E$4zZdY`vQJ8!z32`XJ<PyKM00^;|LS9_#2uq{joV6L-s5LYd0
zi5&sBEhMW8+TIPj%qs=9n2b2hi+!3OkBT*{yA6-M^uP2zYsu&?vJBP~7m}_qKz75j
zog}Qb>gQ2{H&@q>)%m$O_<b@Ky`OY;Q~F}1=6SC+FT^oQGJ=VGyZ;Gs6j#-nEW(dB
z9Jdc|Mex@Nx(gk8=b$a2+<{*@QhU=^L#m9fN0ur859rt2%}z&5oVs6uM~+zeMkp>T
z>=U!d;t+C>yxidWlwsz4Hs?i0!O<@*RHj8->Qs1CgW##@db+8~9PC@^q1Fra?J+#1
zMjkicMTndds|%tM;YA(i{0|HbBcD&SBO{=E2oYBTl1s9xzXHSP%H_<SKl0!+I!WA=
zom6*FOlADpv&HghpK04Gpl;>Gi!kW$hWWdX9`eS$WNp7>(=O(>EQ*IOs~+3yNbjv0
ztQt(iur08_(I5%erxf=Q?0Vox$z}?3w}I9BmFrm3`hntWG1e_Y=<+8Q9i_RaH?E(X
zeb2Rk4xV789wDpruUkQc$!8Kf8if6(sa;m1N^$4I&XQ-X9^R<e+Dj9xf4p;=EgtVX
zi3SpWhPJkT9&Pd-zEen6x6tD9(VQsLJ^?Lpturg4S2m<OV^a=<et7E#X*AjkKTo_u
zI=?tkii(6A%g3oJyJqDU<iaLYHFF$-bV&vG7~c$!o3T|t7rXVo&@>3D;|gfy%yd9I
zzJqiS`QFzM;1q2|h<86nx*{i-JjX~a1T?XLqz9MptJ9EQTE%_P6dhZT_pYK|xmtNu
zUaI*_c1f*&YPQfdwz~L6kI0|8p)=Bd+7G4s3(n=gefPh`JO3iz{r9G~&;Ui0id#;>
zEnll8Vn~V;+IcO~n^ivdVWTI6wC+#8^(($A>v<+^@}`RDEXL(&whW4&`W9K`+bBO0
z{DsN^!m^1mXH5z7=sdjvW6_#hXIBq%`^H{=Vz4LHT>edG-Sl+#4@52k`^W~B=_3{r
zwdjR6m`tCbgJ10HH*)j3JaM(M;gJGtWBj~{&=T-O^sY}MCU;JBa*)pYMG@{0CrAgK
zARKbVm+`AD6FkH7mSf2rdms5y2g~*V5qa;ol?r7AH1IZuMpm8^Sz9sRQ}%emr;wYx
z>#(ggKrGTysy0VR0-B!(*wxVbz;mZFq#baAeBRNATpWQ=XiO`v&HeV7!`mIds*?cu
zIp`TJ|L{A=nG4{pDWEhp=GMbkL_VsvoHDf<McFlEVOjsCIZ`V78(EjX%e(wn{k3ig
znMN`Y^Na^>B3WIx5NKq^>lv401YcuoL;GW&V27|`4o;|>?#Es*O1C^!>NlHRZKcRl
z`8x=&aOn1Bb<pM$#!K+KVZBK%@Ke$`NbsN8<k$V>X||nn=h-(4#lz0DTTD|%+(f&k
zn$tC4b5jb?U^fVQ_R1IOC0ta)x9_52Lg`akJvzz6HW%{=3l|^Lt)x^h;SV`u&ay{O
zPw9P()#4Wx)@Lo;B^nZZoM&^DWH_QrI!zA1@c~uKvm+9rqIjdoPRc;lUGB=ttH;?6
zecsir&w=ym7(fX$9EU4Y(#V`}*z{<khg0?mSI6tmhRiO|wwmI3we*A%EH3F@P|0iz
zP~yRLgkapecP^w$^uU$ykS4@_qNn=#g&OUZD(8<DHjHX^Z>kws5)S)3pb54<NKe9a
z7>zJCLd!M)%^W45(bhItBxB_~$ZO1Byz0*5>+dBxUHr}>f~GWjCdhyRNUk(!5&cP4
zd69LFPyATgF|1^(Ne!dc@vHNnT^`*COz__yljB5g+kDvSWOFB)fhJj+MsjgW0H95O
zSbp$-36jpwD^h(F@HcI>6EC3atZ`pybSr#|dSA!^d2pU~<QzYQ8tbx}sxq6aoml|9
zUx7SH%NLZRRHSO2<n8zeapwb_x(5@^8S0kbriKMU|8-*EKPPPzyTb6O5VBOdW2Dk3
zoA8mx<2KsvM;24~o#=8h>p;{^D4_gO-f;v5x5PcZAb*iMS}LpgOkAQQZaI-8MHff3
zO5Y27*BlH;acM0D%4ov@H-_yYNou<zM>myC4pdeF7^W0s&%Pyt;Qz;4&9^KgH}j~1
zGKBM>mZ1uZA`4i3rNIMm#~#rMSu-Uc1vs4nl6okpWUYedixK&l+0qlhVDo5!5)}as
zFa=P6uujSYC}{Hmu&A9$02HvHwVQX5nMDz=qlZsXBPYU;IA@QqG;KXUexn^7z5Qly
zp}1jKX*h)k!wOiPy%V4fNs*w@$SA?G;W9WLA6)Xq8heq2juB-;DnmAqMcYeLgqVz*
zZhMmt8m-N%_CY`Jn!U45T>Jc?pz@CuqG1o9I5YML%pQawoCoJqfLm<c$_qui*J<rb
z3X~=U@L!8sO04Yg6Xs9$WOP<}$yk9vZx;&jCQ|j3?*RP69<rtFr5^(+P8UeBARo^$
zQY%l;ccwdfL_8(>a)SO$!tF<+g%D=Y4jP>WDA;KjYLw-8uEZZP-2Uiw#|3~2GS!3f
ze@^6_M0wplkO0e#0TNW6$hi`W8Votu1G#t?wZ&8<zYF;?KMOhZ7_ud6EEjpN2hf#M
zPS{#adTjDy!RM<WTgMms3%;WsTKh=o+Gul|902UBpUUX`6$P`~;bblp)*%H~M$p5Z
z_A!1~i??}!XIrd1k>IAyaewkoP~{@$?$$&8U9Kytco2*(NM!(!rgs;IT`ZJ!uJc(2
z!zEs08X6%Q#uK(pT7QbydnusSz=~v1=<&oL%Jr_($4Kqn31ZO-rH_NNh4%REFErNT
z7hn%xgD}q#(uHviscu>Grc574M=Y9?2)K+-*S&OjcmjiVRLo@tL3yEe=uLg(GMHrR
zRm>XJ16H<9Z2Ad~R$K$wl~+9j;n!p!A0uvL4EuRZxuYbq?DEE<UYM%gbL;Hn$m*I)
z(HAtwxqgBYeZUO_cG#0`piRyyl=v}lNZa`;JKGS{0W36xIMFZ<&N+0G)*mY;s!dSy
zP4_s)81AXh={a?NI)HU$=IFuBguFNE@+4-&+Xns~`I{DXY7#X=gKk#{^%tfL3VxAG
z(L3nU8d`PS!R_6%W9DC?X$frH=DW>WM3`|ycY)ZbADwm?%M)iG=PPV=*>5zv?mZVv
zQRZXtQ!9J4>Cpm6%Ir(E*lr$n9t)za{>bscSs9tA>e-UmYi=*cPiYFti^_)fv7Rt#
z*X+tn3;KejAxq#STVz%_U2IwV8k(R{N{s#MI}Z3*9J<zZ=84*QZ@=@5>I1MHWI-HX
zOB|0MrahgzEMdYF|MrdV?ZP<41g4yOIrh@gXEJs=Vzm~#PuS*Y)A+|lA=v%sIz&`J
z&}PJGuF+LJ*^#pL>h9<E62<pp797Et5A^OkD?qEPV>snDB&)Tb@{~l(&g!hO`)}z-
z@0eMK`h8%obEg$mRd`Mx<gYZ^qXpq1!l+D}q{BoztI9zPRyftnYX7o_WB!NNzV=*d
zJB;psvWp$P#+sK;KN<I=&I%XMD58Sz2yNK!m(a9wt9z$<ckWrqB&B<~?NcwKd8l(@
zo=Wrm#+Owv5h!-hJ;<o(5h@67@4jJqtT-b1$blFxg{;CL#*=&DovH#uxT>%<)J~$K
z9xm(-M~K0@rR60zF^5AJSeyo>`pY=YL(eOmGcvI%2O^`~eiKwCJn0Z|+A6h`i|Aq_
zva(>JIBi&yFwVa>jNbpa(=`2kFM&MK+m(nRAk8`pWu~%{6^P<b46&HdEmljO(AHf4
zOClE8Nhgn_h^LI}Heyd5VOH78V}=FEz;F~xo#8Y)AMsebfuTu<Z!pO;{!@an;*@^Q
z*t4aRY<(|1Gj@BuH!C#O4w8u=M&YKLP&{PufJr38Iewu=gSbNvC0ZaajjzGR_kS+T
z=GYwa(58V;;N%IWL?e>A?Sf@;%X<&Rz_ZiO^)`|XHD6xPd7u*&dPy+#UApQ_$#E1X
z09{abAl!Av^0Bu9_fPBNh1<K;&iA^1EP3P>3sKnBoz&$O{82fcY;j3uT^Q(=N5I7>
zX$TH7N3W6_q)}Bq;SqIW`M7&-!iv?0JB?qW-!w++eWj72McB3LcSkLva7LKnEHezI
zp`-_WQ@sE4A-A>Ho6AMnCL!f5FEX@rP6lyvHO|L@mEP!uWTy%fWw5Z&B6S{+l+p)%
zxC*QGPDYf$-*~@S`It7AUbtkwq4dR}jKoOoy4~G+3NJ$vm~A<q@KIi(vaH5W_G6m3
zzOSOeJ3g&5PcFTZ<Zpu5^_**jWAO3>H6jyG0sGXxwGt|UnvzgMo$p<-FG<&({&a(b
zx%*DcM}y1j^Rcy)9ZMf^$&=#spUJAdZ1uBGZ}<m#$&7s%yAnmy`|=#O<I2<XMhAFb
zX@1n^rTKYb3hBf)2!J~e+0=v%8&F$nJgDh%uF~6KGEfbQl)1mn>i*O2JFHuNIRsbS
z(Fnm^ln<fG^d`Gi#R4WItBd#33p+lv<+r&Vrk@dRR=htgRQ*zji^66$txM*X7^J6^
zW3CkpSR2Ym6iz$5`e-*$cqaPhWGw=D=Im2c>6)>&!Z#d)|8WA}GEGS5)vys@l<tTe
z!67OldX~g%g0H5Iutc5s)V4o5|K+u&7S?=ezXOolfc13SX0U3K;;UUkGdA(w#;SGi
zBwvz*_Nyg@+wXz^h{}X{GO;khd<z{zmBR!&wxRYY9kg88a(6gJfXuyXeLKNgs=Rlv
z{oYO24xl%{W;F#osH#sbFW`LG6Vq0!tVF*B0iQUpK(=VEbF03y)`iJ?wR)Cy8js5{
zqp<rM-ce!@+{Bhibfcjl_EYSWNMV@Ki+X>0&U;xh>Z{UwrqP`(n(N7=Gl=f=gT#A9
zM7ya^&1|+4EfZYodzP*ra(_5JzB1gxBquSVclz?`m+MzgNO=l+53F;1-dJYgTTUUJ
zgv<3e@VmljF8A>g^d!Po<hh)C)wxcHdN#2>6WYVP$4KYE<sjW^vL7X_K^$(2^KC})
z`#D~UI!#Q$4J~cpax-S9#;tN=8PoZ5;(VrO8&S|Vh01g-?(984Fh79!aCt4pq!Bd4
zi<Jyt7I;vTJhG>?lu7)^368YV9?%+z?C5iHZQUc57gb)Ai_RqGO$rYtKDBy(;ua4z
zv_R%`a*QY&cfM!#o;eejd@0Nk*;dE{x>t1!q^mBb$USRu!I*NhV0Wqr@wLUx;C8Hy
z`_0tNluD`K%iaxk4=>Wa5sV%5^g>lHezCepG$a`!dXYS21~l0lM@^k%XC%^{4>VOX
zkQu<#)K^EEk0|=abl$zPmo~`slEO82dzLK|{a_Ff+y}As@A7T7c{uDZSO1X0g8(_@
zKW58I$vhbc6eG?*@;0|$*IqW%2t2sf?4PmQ;~Yp2-|-ryipPUQ&usA;MJBlK1u?|y
zE6wFr!dIH4w>DoczAV242Wr(r>C)3{Z6=BN`TY9#*y42vglx|n9hQ!7m$e}OLe=Vj
z5Zw!59K^&H>YjJ2ARkwMW<s}Daqdl(vafgH;Zu7Ty2b84Q}e$4Zs)+Y#YfaoY|Np+
z8?7}lm5I3{#q-6*vwEU2fwub0&*HpYKLZ7EGSBWWZJGxXv5rfy$wgFz?SWM9LpgGF
zxjjej_nvaEy%uj4GTV7I=Jx&9Z~YtMNVJG|6XgzQk7?wn<CM)<LG9bPQ++x5>e1`x
zJ2hXq-wxgvdp2aFsmR&*0hRm0y|rI8FQh*$b>J49Z=x99`s3NnPL-IPiL=j3c`uk>
z8OB(4vNUe6!N+5yipp#STxG^dRP!&IkhvkDw!o*&tk1%;m?|LB2QS|4xPD{HY)-OS
ze%Nsm()j)=kgW5X9F<{t9|W?qDVlxoj%w1`(HVaAhZp$X(ME3Vs2A{e{8HM_-f@f)
z0jR{VC};!mBk7d2GtTHu((P+QW*nCZhjj)`Ckc~#)EsKqK;tD^+(}82Sv1^nO3wXb
ztW9Qs>qVaxbH$@jDin2_ay_$V`c8HYkKGqExL<-Ts4q>Wc|V=%uh^m1o!^xW=nIK^
zi$DZI|C$mV)Py_TL7f@_k@^9%@qW`NHlxk+OK_0a+5LtRjhWL;1>xkcuNwb2WHw%I
zvQ-cL2=g>2%20(<Nc_lmamrM-3J6sXsBnDq99{2}wE!aihk;FYlW3xk;}TdXC)Fp2
z$NG_*`{TYJXjspTE9j$M+F$@{A3KaHNv7=jf}9)$-(m(J$lnc+1|v1mMw@11yqZ7=
zCENkg4qFogX}l0OU9eze4w0x#z9f+k`Bq3am<89k@P(!LuQbSOUa%(@jn`d}oxvR+
zB_p7d_mO3}8HHQG(b3I5i<l#6SG1m@?jT^hDI3<_U|FkSoF~F0yNFDr(=NO&DDPhU
zs%*w|CMI64)m`)X^a#dIsxT)wdyBbax|3`Jrv5p6halM*tHvI=C5K^CD&!bDXN-Iy
z%gV>!f!B8*UQM*EzyS`)JkH9ZpD~Q8<agr|(Y3I>`FM0s#d5aC*zwZ_cQxBtY5Rxf
z?fsFh=wK96!{K=lr(wA24(1Uf57!#6TsBeH`byJ%aEP<re<7O9yZ<%c`>US11w{5K
z-5^b=xqDMa;kNsq<0XC}iUfu>TkkKS^1avB5gvH1>t#~}VjbY;#uxfAT7r~F$){Eg
z-Nvo8<JU^7tM8wjMkiGnBXIlXORqgRc5aH{)Z_$!b-^At+zqP$*wqap=ciXbpRst}
zG-jDIj3QU`x`*8veGV&L&!SqCdD-5wR>$JSl(`5CkzWA*z^f?)3h!Grj}Wg+vA{T-
zk9vKw(zHBAohf<Fd3w4=?^#Q4J#)!V1K)pZoc?#xIQ@%Vy`(ZLM1K#>_-ErXe<c;w
zS=DT<40Qp=o7t8+`O^X|{}!M5*<5);_ozGOkX3Mydz$+e;|(=av4O|FT&j*8zLe)D
z`^107{NXTn^~HUs6oS1H7!_%UjBVv{bklivF;1&t0={i=^p-I5d4b1O9J;(&LdMo%
zl@Ff;zGf{GWq1%Du70cI4B_YC*3WJSaxAc<UC38xKhS<yP@baf!n5U!E1>ElLja1y
zmIn&FRLy2#WZoM<Q@t5rv?{~M`P%d7KBG|BX9F^*W4r7I1tDc`aLsnb_lbRfA_Swq
z*f*GFs4+*E+J~9R7=vtufsG_<q;_!Lnl}yb`Ix7nYrmq~O1^I&-!R$Vb7>11W0F{8
zjR2?;G|eQjUawR4sb;z3qQ0a=)`$l36mYu!wL0Ze=+3V+qfi2&AO64Cd+)fWwsu{V
zBBFvsn)D(?kRl2wQY1E-h=>Xi5F$-LL`1rTL_k4mC;|dPqzFihbSV-#A}Ug(N^epE
z2_*(n))}s^t)=d>zkT=q_Br?7{Rh90nfx;69CM8Ew&#7G-BVIDaFsV*;N<g2`NcRk
z1F1lk_ZIeODx<pj;RhXZ<5Jf~58gF<puf#bDxtkbav|TPoE=`e66N3K(I(n;x*}%x
z;BoJcyJVXkS^;w`sbt<)?puw|kzm`0;=dISBlKzcUIiEy#7T3OHh?-dJ@S_D>g~}}
zLu;kb**6V-`jmoKLE53T1k?=|vC!>q@@L8!YXH3SAnIK~RlFT=B^CJZ9-mVXys-6+
zxvr=wmh$Ro9X%0N5YkEiZ2Y?as8-daS|c1F<(cb_r-PKyB*m4Zb5^<BFnL8pm$^&j
zrU86(aQb-za4f^+`wgV+uYon+@T=ML<1zUZqE9g5&@>*%^o<?sQS|yCLG<o)@#Ffy
z?ak`K93t}c9z;CYN|8sEbvmg9GpGqRyY;ECdR2N8QmUr+&eTCeEQitY%@m%ky1bOj
zm*=!LLZ)SE-dZx%r_lfvV`e=kdEjL$A$OVuO%)g85%;)5g2@pq#xB<JJovEQdK9|0
zs`Me9^VtlfPJU{97)O#OyHGAV$K6_!1&NR7B3m|xqEoMJudL{KW{G;*J%<KBAC)Ay
zDRC#|u3n*pxkJz9VOzR}aHJk`5=G6o2Qq{rO&YigGL<-s%A~!IEmt2B8<v4CJ-=;p
zM|({y(Wdbb1<u}m{an)imC{zZ5WUL+=GW}^>FwbBG!XN-{H75X7`lgl3`BUpV`Eu>
zpBT4y0!0zfqsLx{{e^7zPqc<SPrJ9LC*fPX8>sE|?_fk1%g@1oe&5&lAE6rg9%B9Y
z_dkvOziR9UIi`{UF~Swd@F~%H;AqmHDK3$MYQ7?*ephn1eC8J;`Qym`^d83t>ORk*
zYY0?n5-rqooTg;~sQZ4`F+YWx*@8NVL%(X~fZcYiA83Lk0Zou_pb4_`k2B}r`~NR9
ziw%Ir-qK=m%MO(QT~dsJmu&t%<9<ATR!#;$nSa=@e=M$BCj6=TuKG#DEbuo5aZoaU
zlL1QTEG}jje|UfU(;=Z1%nL=yfZmO4)~CLIEU;h4{JH#I@Z~$*{-3y-{=IwrfATr+
zPwF2_V@p5v`+rhJfSBed;%3-9{3jB)Gv6P}zt4ZPj(3@AZ-#RCPV18$vA$Usd#NyN
z%gtB42dkqFuSo95vwxh{q9VV8zGcQ1SA;%|?ZS=}kfcj!x#gO0Az$Tr3f~m!(IoTv
zH|y-q9y}L&r3(wyFX+GT)>n@jj9-g?z&V8?n0Eoz0`P1Hr$NM*gFk~iuPt(Gzo2mw
zg#A`c(R(v*v-`YHdb?B#-F0}p^FFRDpUKY;G7n>5jEBFU$E#DU$V*e$<xxAps_)aY
za$8%L3~Ak8dc8~4fA46~o3t5@Ui9Q^y~HCeh|&;_H%nh}JXAQj5$s^>eRb*-Ib@fF
zEB9nupiOS><BDXHZkaVn9cLL~F)0mBH`kcn6JwM!^o8pMPRLEFHW@tu4<8c<Zi91p
z)w=C-s)9&4d)=aZs=Hv?w{aj)WEU&{$z8`rU{O>SvcLp<sCP0wnNT~q0TE5JOO{T#
zIev2V{nPb32k$WYh~H%}WsrODoN@U{$FGac|Bxm7{`tQ^n(SXlYK`qejZjFSP8H%x
z(7^@Q?b-XSWn3WZY_?@vy~}VLem1o-lF7VEB<7&DpXR<uS>|`pJ8mPEEj5tGHnc?O
zP4R79E0ps@c};bB^YKJ!fwieSK{Gk#&tk+cR3CVC<rB&dJ&0=>FVlYDMZ4WzOx5X^
z{P-fj9{z4!+p((h=GH?$%4+lZX2rk?SSsxi&TbINM80DT(kGl(7vVg0W+4H(bCY!o
z3UMBD$4cKC9MpxXt~nm2+UEzMP8W<c-q;G*y5q_}2-fz-`G4l={P-O$Zc-S4SBfHg
z#uzb;qMDT>3jW!>o4cL_<Tp)3?v}9Y6K42|gb9nDiQW1@IgI=SbmpM8<Kai>Z^bH%
z8r|3~G-@5gzHMpTadE^JeynDwdP{f3bzF8_<_ql!HU)JHJ(Sf1uV8JK$FMBbDs1{2
ztuF{Be&}-Wiez52jc0T)p5m8e&)4Hn*_BgV#SPQ$$;64_CNQ}2`ADkDRKe<CE-CKG
z5P6^}d*o}98YJG&ZO_X7dYj)Eu9<0_tOdd;`o4=<KFfQ375M>=>L0d>pOv!ZN!mDJ
z=tMbY?Q|Uk6pds-H=}&`1t*$20ZR?vhSK~-Kr*tTXtD8!_cxk=89Mz>c-#M0L1ZTq
z3%2sq@$E?&dXS4@lh~S^)A;5*D?NfYwEz_M4}PP+1_-O?DLsTxYIuqo%?~X5%vP?!
zyfj`O6Z0p$KM=0HIbT=RFutY;=B?Eo=!VDVc3XR|z8{Mrf3%%A73f3}YR$0*O$+ht
zqU|l%`1s?YJUhMN-xxpx(T_^meqzhNF@Ut17?9A2uz=EITdk}Axf1$c<5xzqJLQry
zoD*uRG_n_W8^`K&ZwUxlP@VS-lY{v0X@$nx8BBXKEUyd7&WDZ6DzBUkjHEedp&Bm(
zy|N->qmXS%Taw1Po;*btuk6LWs*c@Jb=>ESDaFlWxHBtwMK}D-lKMx76<0X3;tug>
zXdG#l2im65yR-zia!qIvP<y=DnkA*{AzgGmM>@5$B$`)hQ&VzJ7J>I}UVlT6BO2BG
z@izuAr?zE5PH4wA@Bt5WgA3H8g@Or@B}V059H)j_zXSy<b&3S4Tjmprz=MhsBQ#@5
zWoTRhtiLfZ>VeR}nN>~;OBeN_fZ5fp3nj~Z#%*z+$$sO@AZ*hCfSL)68A$tSY#Nho
zx`w#vVcH<@dyqwS0^5%;Ef)GU{wtKc2ik^q6`$2h0L;Pv1y|LL&lb*G!~J{_(8+n!
ztKS%CJa!-|4FD*en5ECq$xx`h@wU`9gwBK}X3_aQNaw&rG=2^)9jq3?ljXq)qOhMv
zMvtnsN&?|&W+aMWT!2~=xt~itGQS)KbWt#;0fVy%T@}^LfqaYEFxU{IvWQLVRmR-Q
z0#V*im+%!NJ=5r5T5p)3>AN@G=x4E4e;k}2#*Wm(an%q0;}wVz)^UR<diYV0?bI#;
zOU!E#^XJC}9`K)@rs(kLcWC*a&Y$(;^L%sVr(yYF*hci#O=eL)UI4&A{q#5o@_!hY
zABWBVf2P#`p(*7%mkT^EJzBVe!p+)pV$1fEl0))tZrQiw6+*i&^b_^s>AiugaA7KI
z=oBlJhndvUgS+MGaW?f7*_<$#@6-X^>+XL0gVe4<ZF6bvsx-}mrc8yFVZ-ekA3I61
zw73mQ-l`&<+t;^)lfB*{@?MT!zP^C*r(pQ`g>7$4_O-?wXR~f|^AYiASsR^b+3x#t
z5)XJd8rZL%&+-p4-~$H#ABohzm#Y5kiT%me|85RJ9^(JoB8Kua=wU-rKbe<uWXOOu
zs=__b7my<D49!hQ*}i~&^0u0dNo@5UOj7eKXE@cp59bVUjIyD4Y|+>vs;oaDVA74v
zsdh%cJj`tPWOq=bOrPUXTBb*gR<>Mj@GOT#rk5nIpC&KAS|&8xLZ0A1!3r3iThn*;
zqnpM@)SADU2|fyr@bmI593ETSDS{lgsW8}7+7ldUd+?(RX{6CwgW%SJ=CB~wHduTp
zev@S#+3Mve*U5b!)bDMm=kGQUP8~DIeU(!3xm$;)<)4ke{ySnWd14hQkXC?l!bYP+
zy`V!^Nb+li(x2E`i+#gLQ->sO#6L<%=--v;F<X|eiamJFm*LJ^<3Tpk*ViT|SMjDr
zI$F<aTJsj?j@k7cqMUbMSVO<K9N~WeQ=tl!YN?v*q;ZgyEC%%dHozTPvl1h$c8#Fu
z(X}8Y&=>-Irw&irv)Y}Z_+z|e;Gl3&X~0B(T>`2D{I`I=0=k`l`U%mGR>5aUDdALe
zu%#oBz_Z)%0!04o$57uJo<+7OZoozl0C9pNp8>P%VYK7n#te#)4pIS_T^!UE_yQW=
zWe#=<;{MY0{PQ`>l5TvW<z(XaqbA2OAOS!V@do~A@Lu>|I-h?!=}_k@rvPWl4SEA^
zFUm#Tb9J9FVvLLE8Z6~nU}>q?e0})P-0mqQ{(G$3RijT9)|EQpXQ_;2X2KH5jqK$>
zn4WA+igjUKJzSjKSY2N&&ieZG-JSEd@18Kd#Cz%z1GVL!yHSV3jb2~=qt}h>H38+^
zpPvfq7h|<NJE#hE&cn81$UgIUFnoGe#lU}P27bf$*6p9iiga&TP^`S~pPdQ*>fU=H
z!?)y#j=y+}zc}T|zs;Pe0>JhQR2V}NFZ<#qyYLXy6wT6D^p<ngz(91BkosWk_{r9?
zl4YqqxwWtX%quJ&?L{&Cs?imUJmB1XhMYW+vu}|U$Znt!a&&7!2+>{o_Qk=-dAo_=
zkUgbS{Zrbs9}w+~ysDeISB&p7%x$c0$*Qr^GeEWWE2((12F3zZyU)DtqA^Q?X$Zg$
zj>3Ha?tsKh1C?%=SPrUd{_4!?W#!mKNRhyHk3mNs2@U`BiS@UC85_11Zd$DsyPL{R
zcED0E6SK$6k~1vJUobMxKcSYK7QNr0@W{m9eul6c`xZXvF=-rua>9jg58#K0!`zA=
zXnhfr1&4NimAJOfpCUdgQN`R5{JHrp@<yQxeF!Pj4}?L-hkKjhCAopCQs^e@eX00*
zsp^Nb2y(;vAnD-n@qNJYHk!S0p9u=rQlA{<ZqnG9HtDw4yghfp!l=%>vXaOyb*U+X
zljlAIE3W9iI#Mio{Y`8(XN3+Atn#}I+h2I6VV$^&-x%7h3`qfGHTP+eoxA4xE7)Fp
zjzH86TZPx<-`acauvaiqH+i;ym_t{`cO{B)i2mua^KrWPXQ-1B=ZJ6VwW&n?5c^)6
z81aMLuJim~<R7uLF+UhGel~sRYJbP|#I8^0<vZzosHXQlNU?@-VYiLyIvvM(q1qx%
zPwx=^!R6S<KnCfZ^O`2Q$giyk%?xtVGz|Z8D$UY@>@`KfhT!&;@1E<;sClJ)I4l3%
zo|1%%l1HObjTPAagC;xf6`Lx$9<#emQ_iecCVLR{NMU5tX}sLj$W-B&2Em~t@#MvU
z7AAAI183Nz5_?wfb;Z0~SI%R<*gwH>Wg2h`*12TR`*AE<B3Ujz-Wmr>9n>oCzUywj
zcP>=))=QyK=HT`@XN^Rd6HJ?&8BX>fT_6rbA}on#r{@x;TyHL2E&HTpda@Y9+fpQ9
z(Qx>1pWZ&G*C`!-;k$A#G*xS(=f8NuW^s?p$0LBE!|m}fC>s?@G`1j^MNQJ6#&3e;
z`FtV;6mNf%YoF8QQ91unf6>T4=@H6dHBwZJsEfwlLWv=C3WpY|62GZj`xq%v6E46z
zwj0_bG#kTI)HtzmY)^szfr9}>I>-R}P|a#|?^I}`7^5$=H6`gGo@$qOYd<XBxr{$8
z&_z1xvG7s!dt1iMlPZ@$zAFjuMZg)=5MfQLkv_;{DGkTENQbMON>fYtrCQ?oJgSo(
z<V$wi&Fz@sC@zwI<sp`F71lLghH6h!Aw3{45*5shvX?aM@pc8yYj;Tlv3Uv%P_@LN
zcZm#}SJn?<MQHI_UejXxsh7xGFMLzYYd$M%KzNN^a$DWy^%GyQMO1LMKt?Zd>aq&&
z!8(;!nS7x{hZEq#p)DHYkD!jL!sO6qpVEclwCHE6EG>IWuVL;*y=#uSdp~?ui*pLY
zIZAKeZ>>_(mz!#ZVl}w$W__kNHTGuDZo_n+JM-G*Up8M|-%bqQav{kRjUQ88C@Q{c
zEn|v(xt}*(y6|Eei*sicdi?G`YH_YkmBQ<oiZ<-oPJAZq`Sc(UY<gh10#szCyYSqE
z9yhTust-~0F>#T5h|PCsp8t`g>&W|sj`3cRf|agdD6_T%LYb)3|7L16LZcn6Fkj|x
zU(&}@X!5Q{cgj84e(Q;(>~vPT%L&taWtBk}=X+I6bZ%ezTcE!MLM;|{+S3ZL$Mt;W
z<aNSJ(atI3=FzHHBbS8NwdIv=YPJb8ADAJ^qVsd7<2unVQMSgz!50Caz@9;Jsduyz
z&jQPBuMvC3GuCoJ2X*i~OY6Bed=46%art_ls!zNYPv1R4xeMGjPSqza@Q0}V$(en&
zCX`gYxKobp4iwe*W$l3&>GWf(*1C3~RjYjO!z90b439o!n=X~k%B3gHUYINQ&Tj{2
z<BqEdvjTu(@Gs&kfa>JGR_ba_%k8C=(-`}}rvLsS54+q^V{WhJR&Hg?`%A2|j_ueH
z^t2lqc@&QPF#d`ZxW5|fv_f30S<Z_;eTVqU;GDJdK8Z8)cAdv3x)RQ5sod)%VCif^
za$)eP%3vX=^!XvCY|@rz6I3!BY^^~mWYh&QCaT+X=wt1w0^bx%&D5N&7jZ(ChR(KT
zb{)-M&X3-&KsNOgGE-`~6VPpLWDrynu3@+3HNeFH%{X9|uK-WW7WcLAt5^c&HwL*S
z(#>$3&Axf40Pe?+K#cy#7(C{15b&2hN1CCi1=yh!SrR9Q2Qih-3#IOtr-y$G+yai*
zS~2>kQ_+$$(f{+qzmNLAd?>goD63@ofrcV&_+!D9qHKg-cX_MI*aC@QadGVYJ$$K<
zzhNSS*P8iL4)+J{+lMF;^v_66T4B8asBt?#A4yY!_1arn!Hs;z`je!6n9zashggr+
z@4S2U!Mju2Avl1pvlWteV?j0Q;+!xHWOtVIM^bz~n41?R&AGOB8WnunLAem9cjK<S
zT=|x_pbzvmh+E(O|GgBtjv@8Xsy3m4g7hc?e4_t>mK_n+vIvA7T?~0sE}TEtqr(Z2
zD&2Qpch98mw*%fGTa~>H7=pPHmt{_c*EEg0xyzH87G&BR+yK#p#M@~QdueuJmv2Fa
z!h0YBsK%>E4RThCA*mBYO2cD1wrd_UJ5q)_%ma2+?L12F<b+ootI#kgcy*_JT8%1)
zA}hdXylKp9Ah4PRkr&8vPL#km%se6Qx&X@2i=v|Ayex6}f7bT@Y;*s~9iLx@3?gcl
zxNck5ivv@~LkkVu*4fTkK|KfQjPuS!nUm^X+tC%P@DeQk!xSW9>{uUQ2Omd|iHSnU
zMHykxdwGOr#H+&&Olk^t$*^ya&MIp-e0sz$mR$i@8{EuCe2oE13`Nzq>-1^o{H2tT
zR2x4zM2-`x{PLHT_RF@fJ*C!Q%+Gn#&Kt;Q<a&_ah07M60{hV~n-hO^{5y7Y4<BHZ
zh^0uP20KBHAlFJ8XSejL!y#9V0nynH44D7-cBd4M+L?D#HD8Rnq>I-FU+BFZ)9LQv
zCD**;C>9o{cfeHdo<1*mh++uDA*h;}qn#5<Lmo!IF+8lbN`A;z(8Uwd;oz3!T2#NI
zN-3RD^U8^a7(t^b$xX{CTpQ|l@Y7M52)Z0Oe4u&w*2*qNTZgnh`|N$soev^BDv#Mk
zyGOea)_)hyiJxA@D^59*p%ei!Xkok%y)LJeb^2h5>W#_l9o&1LT#u}_?QC4<K9LXI
z12(F3c)1(ic*>q!O|kdInDi(XznbdthJBuW{A|mGQ`G1of{?15E7@UPL2)C17@?^v
z&|1KFH_$}IR|eZOK541VeSP<6y&p#TJbk5|IWREL`mmb%BdE6^t^uDF>}KwSG^2k2
z&XNIMxqln_7+ztWKXz!~uoxba)u>rfl`?+RrL!>Xu$YWOo~g3BGfynMRAYZ|1{tnj
z4qWb#a!P=_;j7cUZ;A>HcI<op^`#&~{>cYLx8!GuCu$O`=DN<O$+nt{oundYN!o14
zp8K*r^|DTfJbd=>RP+_S4Zc*~MO}Nw<eu(*!l`2>1KSE0Kz97bP^q`PcS;i*hAX>+
z^O{2Kc{zHw(p|uQ4OffGNn4W30}T`<A#%#&HQ28;;Ul}B_GM-ISUoNXjr;}$`7MWW
zvRqdaJOX1+$ZfkCF$o%^U&sr7(TvX;RiB<o>o`91j9Mm=6?wj83I~%&JTzr6;T5OO
zqg)ze`Xn-F*F=zpAQevo>G6p)F=jw9g{>gRU#t>kBW5?IQmz<BAqv7E>UEXxva4m1
zF2{)~&oIcZ0Yx1m4i7BSj(49YyAd!a1n49GFv3+AcYWb-;a!%Bk6N7qkP}e}r`6pQ
z+hrCBelIkJZ&O6AZYyM>&iBFEaAj)vZh{Ocy)x98(3VRJn|f27bfz!aw)XAyk}CC~
zyTPnaKThnl;uePunJx%7{xj9uU+4q9S*hbDfgEJK^>^fjt<PiV#^_0Az4zn4kRn8S
z0iXFtP4Xw=HsI9ang0-TVkYX*cg^om8-})t@mf7kGVME7V4pr&9or{WdLtc&)Rp-%
zfIs#-JV_8bqe}(`*k%Mp<N8cFZS@4nbKx7v<Cp&uO#bUl%46~&r!R`mCP|br?(VY~
zSrk0D)qR4acVRo={lniF-l%;H*uoBLP;Cgt)Eu9742dZe<X-mkJ_?8k1Q}2s*pct$
z?!Td`{;TPEd;cZ$yj$N$FUSB{qlMT-gtux2Mfv57@RyS@*~5<-*NWmr+MhOJL^!XJ
zu+IyKlJq@>MYo`$C@0iIR7q0E3jb*zFVTWRKl`-wJDX;blzvi}*6Vfy7FBGfpdJcL
z*+%lAfJx{h3v@eFfb})ev32+GW^QV-&I50!FqyhFmG>=n_sabQENEJVG@0|g<%)8h
zXS?zJJ<S(GH2r<AJc{PODj`9Ud%)rZ>+R0T;N`gk5>`%s0}HSwA{@wPCyk@E8SBaZ
z(-3t9>>mBNk@^ms6qTnl*7@f#lQ*t@<0UM1Qtas+mBj-l?Zl+X{&er$#Op5m6AK<>
zM-?69@e;$Cn#7rSpt1GZ)HCD^LRYhy^&ztRMEA6&zh%sDMfttb(HBoAQk`P2)yCa3
z)?|t^36$1$`;3~%!tKoHb*$<xH{N%hjF}JLD{9Fo(vEqhy!+@O)64C9%gY|@-s9VK
z5+*cSt1NBTgE&iUZpH}W?9pNf1G0;xa`B@A9aWV)^P<C&<u?=6`J{a$j<^S%Wy*Fu
zQ2>4QrZUx)BJayhQDKn|!Ow`ZC#S5rshp6J(uQGAM{)!MFVm(g$Xb%%)_4**Fclq7
zOH+$pJ$yK)CyiayD4VzoQT32>mQSeu6mP=$f*X-au~c<(1h5u^mn~NxLitrs>cPl<
zq|Q_fWQlZ?mSIzwF#294Z#e%Vd*Oo+T}N>ZUJ0JzTlbcww#znkadN*gxb7$BBR|7g
zx|*Mqqqw$IA{YZ|m+Rf1Ja8C1>&h&H9(WPJJh3_=*gQ4gV#L-s&Qd4&(q-GUrRVs|
zLaVmhxABG3{LTk%e(5>keZ4@AyD2j9ogWr2<lyH)aY3-A4DXiK-%%tfk+@5(q;ua1
zVbS^o)9e=_ni(CVB6`%@F7#|;YHG1031BK7qUqddG>Y(aOAJ&f2ytk<Sm``KTijkR
zrUDUmA8mMY?H#{175IyUt#1_mA!|#-by`+Vb&!4>ivPOm?ZvAnD5jI|;?#ws)5}j-
zxc4c(oJz^1Dk+iO;1O^_27p%rYXL3Yz?Fj^-KvKT)wkQFrNcC<@Jl1dNTH46WEV8~
z{0s=`ff?A<PL;$t#JiF7FeFU*iV<c>+p5;bk$^>Gk%!5S6F&HPu)3^IRP=GA<~v)s
z7Ag-7pM0wvVaV4wGP38(b|kD<1A^}&Xtrp~r9C%4SRvAu>#?`c=x~y<E>`8?W&F@-
zt@NhMXPK$Bi!Eq%8WuM*jk;+};Q(BwUq`iZXI<7UNecrv2%&9|p>J(9Q57HBQlBr@
zJo-FT8J6Q2uC5^P*racQixuH+nSPj7<3JQm_Q;@jSu+s%!nKUuD>cn?dqq^qLnYNS
za&*U)#vMWVbu+JdZ)HZlKrNjf`;d$&x$0s=oF~Qhl;<7)N+5?JO7l4-o?ahk4daoW
z@MO4~q4zCN4Q%``C7}XP=WAz~mZo<<E%sw9#y*g`FDx9e*B`Z<Mv%Wu79*@oiS6$a
z`4(Ac=yUKzk(H!?TGs(h$@N_<L5Dg{s+GmYoW`agj0=~(3oEFA=^8fNAA!oJ9w8_5
za9fQHHc4wBGX(4?pY{atY!!M0T|Q-+)}8P|hIjvO3<VMP=setQbIAxmf)~>B>Xc{y
z_4vTVSO&4n`pje1#!Pxr@K-N_-o4mO5USkO*zCM~dVAk#(a-CS?P9~nm4<7ZI)SNl
z@7`3rWz0F+$ni4Y(4IInWZo`U?Q^r^(zQbhjd$zVYZ<hciI_)tstiSQsh+LJ$7pBN
zq-Bxz_^i;V;<AO*)>S(x*$K!Ci?}DLme+i0wa}y;z%rgE$k1A$8}nXpTiDF}@(YS3
zRi3OvfPjt#h?awEp<bag)OEdI<hZ$=Xu$G%j_?h!IGrvW?>B5CZkV|NE&>H9`pI2}
z8}t?mbXa@DiG(Ar&)puvt)lX8Pv?r3UwSEe6`MD;0X+h>?fO1Huatsx1RAUsJ38k$
z-Y{)^n2s7o_LeIC#;{1Ys`Z{GV*7AGVExqLCdJ->xlE*?X(gmAlH0PI+B?ee*0$dm
z)Y3-2dE&#V?`YvD=kZV!b3NpXuY7y|{W^{KTUBc3#d~Ly*)O+qz5Jp%tKVLM1mrn@
zM|3~cfv8E=Yhrt;hfM6TLgifRa23!WGJW7Z?em`FBx50=Cn~Wfo_PV-=-R!1vzbiw
z*J!2nOe6GeF-EP^6HgDA9~(JG@9zE*SnY;?^5m3XL$cwy_E-1r2g0v(Upa9|W<P5)
zoNxhxPJ;5|93LYNOsOYRjR|Atx>s6~*DXFBI)3?x*1T=Gj8<Y;*b+SZ7};_%33!+1
zK<@9k_LJP@bT;xh-`nSmTvqVvqYpIhSZ>I&3fZORN<wQ)*Vcn0pTW-xD4q$Iefhf^
z@vrDXbBA(6XP)_D>5XdR+Xt2^i)E%9sqtu#AR0VK0XU$O3`iSPqaX^1Zy5sU-J@^x
z)QxSL3%0|UENKk11j10uck2O&8i|1yd5suOp6k4u87}uv0ZxC1IZ$nBaUg$j2ve>^
z3&x^0Dw}YQF2o2DIN6gN<R=`71I&w_kv&cIZ(oa*mz(H#uy=h11xapyz{ny6@YP1n
zzGvw=Mnd$u2~d%<E%W-f6;m@UdATzI2ZD0YpHX-NFJXyfA-c|M)KXD9J^KFZ<qOd@
z;j%gUf6b2n)XD{$bENWO=@-740G^bOpy4K?0+~GU^Feke3Y-KSkvIrpoZea__8Wu#
z6!tn+@)uI6vi}@j`mczQ{~#~TYe4+-@^5a-f9UmBWD=yHUjX&{P3yTPt0`c_VB&vt
zL;t?C{@|Ma>DYH<X9jA`Ppnjp7FjvI?bQoN!GAvZ)!`hV^$wT)lRqsFX#u%v#YNoa
z0+jy1<=&5RdJ1#?C+G|dXc-tNhi)?z-Rk)B+Yc!$-!b?vewSZ=unu7Yc(WJZzpVlu
zso;`)JVA*XZsA7rfNpaZmsa>EgJu%_KVAJ#cO^eBHq1sUCQDLOzP6i?)38z+4@;62
zJqxZI=Z)IsCNW6fiaR=|fBA^7!d`{|5qKJ=f{)6RI^natcDPRM`TKeeVz#;QGG5!1
za<Uw=@y(N2!mdW#k8vVDICd*z+ISdC0)AIepl`_PWc%`J#;7l>@kL!hmgAv_eYkfQ
zvtGsoo;@V<Hsa3gJN%Q#fiKW9Rw`HTBwypY%M>O;SLv~+Tm(nS2T|2jm@s{16x?ep
zZJ|{2abM&BCgT0eZgTpY=<sXK7sxUf39NV!L9-m~7bilZu_I0Jy%#bqy%W_cdty}@
z%gVk;y@_G+ez$yT_CjaVz_QZv5QP)LMoU(ds3oK)(Xdml*@nfp4miBgtS5vrNbMfE
zeS=GT{pehMZS;w$DBG|z5)AqIM*y=E!&esVdYC}U)q<SPlEur-x}q6i7NvOpy~uOv
zvucsj_1<FDs9PUJ&NSiKJuJWImNv_&nR?zeee!LGEf&!FM4`&DR4$LEdWqMm?eMb0
z&$UFRc46>r*&igMv<*zFf?bb{Di+Kqh^hKDFdb2`<_49SJ-ie{WJkSh#3Td{>fp3H
zO`moczHExJkPT(hUs0JEZ>M+P15>QiE~<jY0!yWukk1kZrj0`m5`0=AkYRGLVUqOa
zNvI!NELvFr#&wQNS9Q4U{>&a$5&SV1;+<1KJh5aWsc05R@R+V@LZj>X;rkJXh`jaH
zH<`AGZ6kMFtwPRRKQ$P4aAHC^a#~J*?#(@1`f3C)iAG(~s4{FAu()@TZQ2Lx-%uEx
zb%HKPe13jmrd@zPd`%(X>)=kCwdE%%o9H1NeNV3oX_o9ou~>ji7TxKle>hm?c4uK<
z0!Kezzwf&EUA_0+AyzGOC;DK0lO>*u1&!$r=anPMpm&@K-5}(MQ^cE5&QnTki)P#P
z35Xy={{jOuU+J^map$D<w*`la<Np{L|50z^_y7M}M0^>^SP;`W0cJ~~sa#s9D?kPr
z!v5MH^j{Kyhc-gjlE+0+gL}kis@Gu4o5&+%8Tu8?=`IT6S<pEWv8S!RwE;kI!sVaX
z{2@>AFX`3%mncQ<i`i++0GmWDYqgUe(#o`tA^Qe~s(Y7j259Pgt21eatUY&tjzxbQ
zx=L!P-`tL;So+fFeDwFAc>v+co^JFZI?P9;Y+mS2anf@CJR$MiF=7>!Yr4U3Vtfc{
z&~K0KtR^Be{PPWB*Kb2e`lSJTEJK)Sg_{R!j~toHtBVMN>NsVFZ!=#4JNe%yQO>Su
zF4e|U-i(HJaePut0Xr{h=V1PV3sF8X9day$ch_U4{JaK+y9~_FdA7f&IOLCans;vs
z)qa2)*9R>wU34(a%S^D=7ipU{H#LyepV`SQnZnTXhm$Y~`TP6pn~n8Lhe&<o<dKVf
z_KdOJBIWN_k(+l*0~u?lotr<Z<KTGv#cYbtwW%mpGe=vdz(pg5>zOYXwxclxgN-j2
zU?$c>fW0~=uwOs>Y<EImOv83C>>qanPRhKNMBmdoA=HU`h~y*pKIh(9MxemL9yPIR
znSCqF7t=1d7vtK+EJLIjlcB_&T~oLw+-}7ASHlv-!%5Call!{sEk#nOtM{%_!iinO
zR~ih7&<Q>?m2Gyb>{iOL;gMn$dIHZ)OAoYj>|Vq;SQPO@3b~nNSV5R=shua>DGJDW
zfFy)mt}4OgZl9GUUH9ajoI6s!`sJe&O#{r{E|tIdUUz{d=|OeuBtU7AS-?3d{!eyV
zTsLm%+L95K2VRlUH~QoI_<}Gp@Q$RWTT!Ilz)S3goDzEqC+K28KNc78Fed}Rs-kJ*
zESA=;(0BEI*UO%QQ-qap8{@eHOIF@JWE15tAX!#==I?fPYkhhEFE9ytP<h{|KG)8F
z3b2MpU)dR-xe$}Q>SljAqf(Zi(J*gQYSJ|N)SNaSy>IU{PS~8PL4-GBxXD76eXg_B
z74*Imx6)2w0c9jjnVW&*y6l%mkLo8!?`r2~;)Ib0kv)9#q!EH+m^j&WYRj@&P&dn2
zG-lX(^!$d=eA}I(4EFb=c~;0Xo81jFkZvpP>HZ|OCP00q6pc_TB#T|>ZdrAAEEuX&
zTM|AYM18QkIsW1D{kWZX<C({tuz{PHk+$Zu6{)R!<GZMCtU(iAu>QGTt*2+o9<6(8
zvY+!<tG0=M)L<7#I+#XeZW<ncAPJ<VC^)<^<cstC<q#*Hki;*#4wF)Yri3FVqy)Ew
z2jUN|bM%Chb)h5U4forF5YQ<Iv+vm!ZN&kqj&QFYp!vhxs;vwW3M-;^<~*#?6<c0Y
zmi;oTd;U=;BpKy`jsEscOjMigtV}bOd!%zx#Ces;&@SY&H1y`p;0w*1_byFdi?n&n
zKV$Nqby<gZMr<ct-kNIp$<3=^m1SvcSKP;$kq9f~`=Kt`LF`;xc3Q`}YgeAc8|2f!
z+wkvz4dhW|2Wss}*DxQwaR9e%X#+2-Ah%Ma0g&Me)Co>u1s&Q8u8(Zs_SYf97NE5=
z<o{J%mjPG`6d+z%$!;JI(GSAjY<E~OES3)ZwYAf9Au!)7z#>}^HeGtc4Ax)ZpT_2I
zfR<mp{>OA1uY6xHzmjA8(-!sj&G!#`<bMyNT(lVB5^hTiJtRbLvvi|rENoK5;6EMQ
zP|gIpM?dgW{a-?J&1=1~(%;nYva}F4Hase5;1C-m<rJ*&zMVmqSt_03IU8>a^)xwl
z0{TdsznYlT65!q2Jbdi=*2<k3&X>3CB_MSxgIgwyH??FB;*<bg8-y$mX*Le&vGRpZ
zC~*3IYst5e3`nHEt%`p>ybw~Oj_2e|?-A8O1WYIm){<V+UTYb;5U-H(i6X0^TH5oT
zoYn_NdYNNd6$+1(47WAdA;Pl>6sg6#*sw<@){O>x(heC6iAE8}>3iadVpHvpAx-eX
z+UBIV>*JcIMGO1e0?tf`J*jw+!++)twZrF#_4zg(IGGVxm3$&3q5?~{Kvf=p<DP%(
zesDx`xo>g0OBj$E?4psTT8Ke0CDUSWgo}_xh`qGbdJg2r6jl;n6Ap4Vr!KF+0|BZQ
z^|g}Ey6%o^vd=u^*O8eu6^7NoI;T~tyGCDRQjIT0?^U$-5al4iAJ&Wc-tTR)X6?p%
zDhjrGH#<9WHFG(>9DeTbbnX4UYikBt1I9YMla*tIc(8h6ez|%O1r%L++c-1M`OV2L
zw5q!^Hq$lO@)a9L#{TX{>|_ahu`f@K2;+zSu=#lfxtA{?y1i|mTrqQ84=qLr(W+1r
zsTQN-QQANiVX7-Me9k3{-S}igOKx$-jcade?Fzfqn9(O3dAAKIhf$42^xcU2`>ppA
z3&SSF1dS?>y7<ddSia<<H!QO%?zQtrTcP84*`FT7={#oN^XjeEK9_YB5(ZdY8>#?#
z;tur+VMC@F&>DBv=*IPkdoEQ&Wbpe7$|a93N$FfPxM{vOKooM4a;$#6t0-uRc~c^s
z?uw8Ht->Qv!o`3)T@5I~4Zk|rgd*KEuR3AA(kZDjEpIZ?UiNL!C2#bWeewt7XXHmr
zC8S||*yjQ_LZiRsexJ!!lE$$W@jln~d$t{1;?v6KJ_d_(?QRVa(TI(!vkpy@RbB4N
z`?JOUM@fn47bQ0TQC}H6?7QVA2Gd(s_n{VQ?(ak=<0cX?YYyq0fPwylC8#03z;>Uf
zE`ggjLv<wj>d|q>+-QdraXPZ6nt*9P{Rd~7bqYu2%%Y!j3&)T|s({a+2ruU)<9_A&
zdz+9=RK9O_pJM*Fjs5Tb$F_;>0OrOK;Ri@rWKkR4zy^H=%!in*3#(}8G7E&Bq989w
z;bR6J2(rZ{kN&V7bNXK~hMx~OlJ4{uPL}K5AT?}{+Ey=k13f=cz5+BSP=iNL;%3WW
zh|kK3ym0>fdtUS5uW3iG&Me;2y=;5d-sH`jm%_rt{^WK4ORT^BO9U^Whe3I_m@e-I
zp?5$sv+&w71D;DeO{fO@62GnOy@QLEUqlbBH@%!d1wtWpQZGw3-mn^lobFYQ@LV_e
zrt}(9ganzjMtg_&D}?z*K!1|8@7M?Lg;$zSv%>eGAmy+nDHFXN2JGiKB^j@BJm6r!
z!sMe%Z@!WOZsmXicox5-GUVmWLB@q+2o#yg!6ucxil;~BtuDm0Kil6nFYd^Y{)X{n
zff-J#0?l>}`AMwOF<gt2s1B{r5+h9OmB+Q~C*8OF;Fs4bRME+@Wj|QF5O;dV!S1z^
zqw1D6#{;HweN&t7x5bQ!_Z)LE#49qFSv6|i8wnKkiI*$6aF}Docl8o3f+{0I3rRRw
zVP92KXlr?5@B1^BPv5@vz2d#V^WfrDPB;_*f=vnYlerCUkuWg`^`!IUdJ);fED^6T
zy=Lnj-sEuM98Q8^_av{L)bz1=u`)<AWQXtB322D6%A$p*#X#hhGttyrRVHDFGeA5N
zF61N-Z5u9s`)tLYP-YKA`!TH*5^vWlFRTz*sW~Mf9X%z2GVWR3<l5gqwzw#J@L7rY
zuGm49xYNu|C--{7`1X^$*G5O*y*kZQ)*<=o>N%6dn|2Kh_Y4-`shCpC@)iLzt<(tk
z-7)(;uOY0a(2qUfU3p`p<u9gp`$?VA^r*hTg23YT56tA*2I_!Y!!F%Jz7s3wYmTzO
zS7^G0Vh9Oe6Zz0GPwmUX`_0ZBZYjGhT^Zljlz+k^!E~j6A{!aDXstp(M!CB<7rIpL
zR~f%e(efD8;r=ie$K7dm!_s}nVU=g@Ry=WfTp<ZTOlM}Ev6nRbUe~~>%6zBgwMMph
zkO8dqbojTHZ`!O876I1t-@X9@7kZI$)yDCOiqtK4ew~K3pj?11-pl~pPT%lHDMu0N
zy;=dsW(&ElISW1sHO4HjtdBJ8+&!$9*b)a55avCQaP7ng5?k34AgG_fG#l^pRhmM#
zL3v${@E0M>i!8cqXJf){gb#8}UX5e$6VSHZE&s`9`{Idy*?wuMhYWjLN*Xq0q%a;I
zw9DOVFBAYVs?cdvJ9;m@dyJLrG6miHb=&wM!p<+R*+t8T%X+Mf1H-X?z-Cs;G9t_M
zXo<kc>xp>0m>5zC)s#lix?i8>vOjA|<ks5w`R>A;_)f#9GsH?=k($S!9L96bOpC|3
zw4j{eTN>!0DtdjL0q_D7H`VYi?k+CV=m))v_%cIdAq%(LhnOZ_?r&L{VW7r;;<z7p
zZC&*Rb~A(T%jZgRE)hNH*s{8ttTg>>TQoP*m*iZeaO|Yqp~B&=%N<TdQsK@H%6f9k
z`p;~to~dgc-JT+?4O}e4TzV68*3DJOP`*m%oKQ0Q74tEo9{#2K9wUO;@Q}gh8FFxD
zMKb$yrpU(9qUOEt(_TR?v$8HgqfEw(2yX2$3-3f1Hujp>nC%EYP!S`0z)$>{=X_7`
z;Q^}<bWA3hNHO%yovbXN==myLBy4Ci^+wIk)}s+sl9y|HJV_2I9(Rkm)UWGFN^JQu
zC_$(wcVC&-8a*`PK$h>osV7FcOWrY~O%3yG$~%Vx1UxDN8G?3+bB1rBCoYT$!v`bI
zrkoqf&V6(T*|%?}%<apxyui0ouRVgltZACfFY7C@E|&pZ4*tD`aj9|8Y1U54)v;@h
zQWFP1LC&Z%ursQT)YOjd7QI4&Q&B(@z|E5IxiTV4&p)>^V`om0L!K5)W$>EP_HDJ2
zrvf&C=~qSX%$-ud0pTT81#O;0SrF<m1?2RPy~cfRb}Cx?)oyN&sd^t2Puh8+u6Z^=
zp^<6jV-Jkob468zV`a@%cj;nf%HrI}^1b(jECcBh>AFN)<?h+A!HZww+&{pps;bJW
z!h3q8gLd&=KGA(9QzB9>KbnF00qgmmqt2B>X4(R@vicJUeX^ll)<QjBcO<Fi!-w)u
zql05=BOSdtd_J!#GTZAK`nzM+vY%ekM=-<95Qm8uanmT~g>*&0b?E%!LxCRYsYGd=
z8lj>Pcd{<Xjz?Wdz--dJtN7GXak;DL_V(V3q&BvQPN$rpL)l^0*QQ>N%<D#<mMgt&
zw8$ZJYyJceP5yk(hT`!Ny>5lc-erZXYtO5yqHIHCKiQaYGMHXE!N9;G&G&w^X&57h
z9ER_w%YbSj5<|e259TCU*_Tz-1mWAV_KvNd*fNV?-YVJR)SlxiV4r7pQtU&y`c{!b
zXbDuqg6m(+Qe5YwRM~p7hb-W!Q)W}|Y0r$*;+YC_Y|o1CkYzYJTCLZsreT)k=kNH^
z+PwMXSh`hFPTVnPaplA9N2>+Syz{Q?Q9hw{WrL<^+~X?QyHR_s5O4eOZH7+tu`h<g
zazIy6irClvc4g!~OT`Iza19G6Bx-Y#C7Xux`LUh>5Tmj*A%?<AQc2syL~P8v``nvY
z-OTXvLLeq^=)vDgJg^xzD%Ts_Br~}iD7kRC#VPW^^eWNMP0MO(Dq;e^UTsC&z>7_{
zxG72)vJP&d04{{k(7sj?aByzgtuWpocQklQlkPP;=Q0{`&|j?bs)>&8+=1_Y4Llk}
zusHDo%7EZLp?fqpEY|#$cZiF89OFslQzt|S0rDi}`n|w2ibDv0u2(_6vzB^A*kJQ2
z@U^~HL2){t=*JTNI?b-JE1BdcJ;-3&KEtdS^R$~38ax}0+U*qIuq1Ww+Q{!1ua@k;
zkZS3`_ahrHVyYN4rpza#f`6qy0M)#o@O8Mw`r4zO#U5$X_#0BZDJP2s?q(-495U0P
zTzm=!W6P4%L7Z<#hoE-2)wxnmI`t3F;a+@cJ~pGqsk)qEZ<9WypJ8@IGH4$!tAz40
z?g-DW?VQ(`FwELTE#Lv(NL{7v+g!&k`%`Ru`AB61+0UMtDewr@a-DuvZ$ur3@49W5
z(w+I0s!K?-tx%{A;sU6QxliFJJH}C8L!U$5EB&IRxOE74e$L-<T|^~XG~f8PTkq0l
zaji2#Hu*ak$Ncvp2E)wS0<C~9Q;0%s^{NDTJ1PiPSwfy~xQHkK*{H<f?ZFZc&e*#R
z&q{d}^54xvgxKRJs_=mxQz#Kj1TT4hk_{X38^gX831!R1dyqk|y)TlEmsUa5^#+x{
zj+`$n96z)HkBp^%7KDWt)?Fc-apku9WLvp!)E9N-tgAx&otrAH-jFfzeEiL;*B#jn
zBwMtO5gexn+CVX=87@?>y-))W-dJVS1<a*_W~oQ&(*16Z?S5UoH&W6S$*{xb43kwj
zvt;}DT0AafoFol01{sT{q=8nqFeE!!Z{p;Yb|my!bLIkZ>QYIvg&8VA;;Qie*vC3w
z&dfx{sACgw&bUAjN^BZO&{=3%^*i)(Z;2ivcye9q%!OlQua6kby=*Cb_nJ-X`Ab<{
z{JWNkD|8jADER~B68%F}J%mWY<dDy{#V&G=TUhF7ULWSXgOSjB`~LLZl^2yUxn%w3
zeH?miK!W)eG#bjbIoE^aa~9<y=vHN47r040Mvb@XvQs$h;8{^NBz0NU@!PH0yA8*e
z_npI(1bj(?z7pQ|n$h((;;i8G%bin;7X#Mj39ZhpHWWXEDWSlZwat4>l<ec?bDMBW
z@I!3`rQ=f`?3y^tH`6)rz_a*QcMerl4){=1sr!jt&5$8;Iw!f9UxLUQU!c4HP@;ZV
zou7q*z9wQ?UD(6m4r?lKUX>Bj&z&%3rnzC|OD3YZQ<_WFN;w^hhbogN!u-DGMKlQ8
zmn<4N(uYuM_r^=RXrVya!#mA-iy{naGpI(l2Q_NVUBazA8gCx8F1;0hVkBTb?{_Iw
z1ZgJltyq)PLS`d!PnDASrdXSVYMl@cgmIIAGi_{@hj<%2cp_8lSMUVAwR+wsuN2;N
zI<XEvfrU|d2kJFkh8;G>h3k&y`r49I_Ekn)tmZQmKTrp5pK|e36Nz<|JEnf<D&rqw
zAx3y4ycAA-p+W*!+*e+!^u$5fGXAGD+BMh^2v`z9z+#iU{cq^MnBBqt8%#DlhPIiW
zV7t0`>CaX0O}VvkX4Km%2+iv?2*bKyB4M!uV7vUcYx~p`u#L5m(-kF2Hr;hvrLwOF
z>hzq_t#zmOzIoH~v00o@EXle<?5X!#XhD^acjf*vIQ&8)ssC$_p+Y35<T-(xH%~C0
z5|ARWAh{Q7=88b^QJ7YRfVxaZ9IqiO5$-J;xqG)%yjX9SjeWLals%^9EU4FUL{eM#
z+Kx6f2sR$pNYx9HO((iIj3quCOvqg5kCF)D9cFWyR$!dp5gEQAD4QexzY7ol@!1Sg
z9Mxn4pjBdF&`E1QC?NrLa|~r4kSnT33I&J-hjov(F10($qz?>UeX!Z~WoL^2;y4$=
z3Pph;dlET)^`?rG-K99?bzL)X_DdXXQqoP)XM!tDx8i$nCSRF}cE6(4ixevvpl0Jp
zF+iQiXMFo^6?SNQDT5I14+2q(7#>&$D8v9f7=#g_NUVG@VdT=REz_su*(LKHbFnAt
zL&4Uk?9SQFxx>ry55C-Oi2gj62ql=Hf(4%?9a*RsAbNdCyv(0_tvv8RxXwh@qo;VA
z=+lia?rKvq>v|`FCcV%!>%;gq3y!qB1#h%!%q}lOROy$S3J!H|K0H#Nm`~ZLxdHZE
z1|C=!lndyShBiR=YwabBTqNv#IVCT~uP>PjtLPdk+#Pdhogpfav%SV#d7)yb?s6LL
z>|Zq}c!`k6-x%C5!P<gE2QXU@YB@{w8lOEoA631ELk5dxoQ4=n4#YCwh{GMeH!aYv
zdL2@ghh-)UQO;B4Xc-Etvjmonf`Av=BX6`xN@ZgW%BQ^13VXP;*Wwr_CSMmpSbz!}
z6s>)TTs6rNEHY0T4{A#NmMCqAsAkwT?srrDWFH6Qr1EvhZ9vnD@R<g67ZpO$w<xMd
zUQT?*LOpEwu$5ImR72;Ha!J9_FDnHNCj(@ei!(DSUx%y>%h!63^L#Ci3Q{gq20tTr
z>#r>cuW`E&=LnAI!(JoCyB3}EIwy<HxGI@kHQ-zRRyogg#xOtV>ZW|`>M4;Ie@iI*
zjn$A0rjincV*$7ATY(+kLxqDuc>uD=MEW~8Nu(FGL3A-UBO8Xy<8^pUt_S~KxA|vi
zL%x59dIS7e)^+0vT-11(TB{#ZK=A2-(K)}z(YuC+SDI$EI(AQ5nXdWokU2(y!g-M6
zm@*7iHFOzY<fL&Di<!c5%)`2mM|MbmsHvnlw9DX+@<qW8Aex2LzVIB)!rzx)2V6e`
z6xaevSexIHdW3i+K&2gk$y|%dmnbedR})ucueKy{X6%^_ePZS{@^oAX{jr)R{QSb-
z(gfdM{|{q0<k#9;fmVi0i_*{%sicp-pWah*tUi^{)p9R{FjnizCegf;_w0Mh44153
zC8~y40x8#hC(9=Gvd`4tu-1}t&8_g}cbO@3iNxdIWu^+`*@4Vd&<Tk`FynnOZL7nQ
z0GG5(oNS%~#C{UN22p*f{+AG)Bb8#!Zg;9<w;B|q3Z9k8&H)`VDLTMoRKW0<sW3V>
zvO`-KM;Hr(uFR$Pq4E14buby$emZA%G)6-*_!%~3i|vYfUP0wm+p+dVW4tB>N!<<F
z5?^<3%CC|Qs5bluJdKX)IA<ZpZzVmcf?pbnXO$P)Lw%fjnO1|E$h9ateGBrio-gcG
zgMgx^z3H9E!F8|QNzCYEQ-b@h+XI7uP)v!+722T98V9s$%TM<BPPg1XH$IeSNs$wP
z$b1+Lt+~B9Ua9g{_KP6Rv$V9V_x5D(##hOTib}wGlX)q&E&ND+{_8!@?=tZ7t`B}8
zDtRZ?I|c~znTv7I-@^&UU4ghVTML&G3se(q`ox(s7mbIvE?njy#W=)$xqn1a#_oVH
zY}C^Pe$1>qXDer=OZTXOIm>rWH6GYoi~!UX9*kqvx{6@bxMh8aXp1W9*|3C{8|}1P
zhkbB#`sCmz7rBxZ_uRu0w+rfmc?cEIGLQ+}c&SteoqF0_x*#}OHHekwWU}Whp4W3C
zjKp4hz2`&Kt;FJ_;DXMscgv3WEdT4<_kl^ZJM|Bv@P`sg{Y7#v<(d<H7cp(V8Or1f
zwIrjb@ESbnYrW^H)@@3;*fU3s*Zpoc?_h}PbKLRZK4geOdPacZnQGcnvj^e6s1|Dx
z*_!D1mn{dXgWoGKGh&kOWs5RidGJo{@O@FP%ur1Wf&3DjVpq0PX86475ri?IRUr4C
zsU@^WY}C3s8sN?flakyOKE2H?lMI4XWz4{)`S2j&x+C*-Z-pK?nIhrh;UxIsS;(TI
zpi5GyeZjj_Hhkmc4ZU}zB?8c;OAPX)mGFgTJGsj~!74A$D_dZKP%O}iTqr-C#WzZO
zA97e*pgwI`KdM5KH8#H?jd%VbK6+6lTH2_vSuxoFI|6`=DBgMzgdL$yR0Xh443k6k
zUr!j_-a3lA9GB7DGF_ulz%lh24K{oRZ7$>|ZDBWdswrW!zGTl%%E?AitI+~A;>eeS
zQjzmJ9(Y$CX$<8hClF;w5ZX(W9gfGC=t4S6l&06dIKAHC=7PqnKGJqOJlDHZ=xg9|
zUQ0S6BkJ?!39Y#_WAFPa?~8(6uA+3xVbkbku%20N5tO2CR)OIbaS#jAIs-|dz1)Xd
zuFhuVPqy?-EK28?A`%QW*cFkD&com9`j(3|Mt!hKm{V!97DuU$!X4$}=<I?+!a*E+
z{SBVEey45P@w@5!D`5riS`xl<%C}CEa)1i^d^6V8d_z7>Yri<taH2wjA8Vot!->WF
z(OHYb)Z?^T?W2q1745B75Ym1c#?i80@NrU$MqG)Dbli{+|L8RP1+!eGqwU+D&zZcr
zdqq$i)WL)7zv2eROZ1xYT{VWQc=?>hx1x74+{-yw2`pXK9v_dw{b8DukUdV;I|#+G
z6F7ED#q`(=i?a^Sdp7iMoO-PCW#Zob_>SW6r%(SD;QsaXKLMhd1`=pBP|$M%p&CsO
z6-6y0g?d)Kjt5%WJTY%r=WpKUoj4RZF|FR}ySn^_qW=|BJ|3uqaD3sMtCM=M-d|uj
zJIyh6Pg|VS!$-ym&BV27Ib8!*5jb8FR1a2(hV)h|H-JI7iTwx}T%jVQf$-0lx0R&(
zGHt6nM`YNfPAv)UavF%QIqnmj$6~0J++eHKH})JEMgIgB+`v8%V_(&lSWH2rl$|~H
zrsB;LWrfRMy6#T$?yK};R=8VuH#dBMEJ@TInKo|ff>4did&kd5Sce;!4b5<sTVKa=
zvNT+}cjY9j1zC^O)MrahA|{H31~zund3^OOi8*--l0iv9{rbzWTX&@&QLB?ArG=;K
zk&(2AxS3vUQPe<D7d@d%XOp>s$_J(&BRP;Lr0<GqKrwjH{EZAOnB5&c=Fdr=Vmmln
zDJg#B?Kf<;_HKkfIi6_P>Vp&`qMP=*OwtnGJV`P@<yAX5BuGW=V0&t}HDvY7qZ=ZM
zNrS$^;Qa|Q6K;)%>iCF47AehU@n$u(ob8V8C)Qp>OJAv1b?A`w{QxR)LgWR)$n;pS
z|CDiv#lw`SXv{s=gU*VxRr0br@j?!!Qm!^{v!99`N9fYZwe%*TQRz}-=?Uz^ZEUoc
z(q0``t+j;#;itu}lNi6qQ?qB1gY46dPZF<|kB_W25AUzdC8MTun^!pzhXah>>G-@H
zQ5l?<bX)b#;_Q0gDX^QWCm{JjOL`i&!<Uu5M~#?unK<y+bCtv5x>L$^6Qh?_&5lAH
zBqQ}o`S<VGZg4;Fcwl5e@j`f$b%>gipf<yzEwI>6*Ct}am2O|TSpMnDt(^x$?2~7m
z#Ye@qE1a4_4jazj`Rsjp0-5Y$(H}ecMq(X4uqx5JF{&+-J~idT*~()QSyl^Rg~BOh
zMIdKG`Hg}97T-$(>!i$Mq}11KM5_H~8(2wsEDvH%g7EhJL~P~3UCYXMk9Pep_TD?H
zsc+vG4I)K~2-3S0L8>5KAkv$NsPqy9ktR*LghW6<kdA<g5Rfh)(mSDp^j<<uXi7~0
zAwbCQE%zyJpS{n0_l!Hnxo_Mz-X9JautZr|nR9+WHH$be4-*}2y#(dMllZ_Mo8a;{
zQgU6ByBI&FjVmO(mtNu}N9TGrl8|PzF`bDQnB)1;lA$}*9~^72Cy~uyPuglv<><@T
zdg|lk`_`W@+mi^ae>l1Mfm;^Kf1EK&l-&j*i9D6%O9Jn9FLB2SwvN|W`RnK|Q@03`
zy4b9J-9rtMuhb7KpR7W5;iUvIe77gop<|iWkB7^RMalP746P~@oVIbVv;SJ3SCh1r
z!kXFSD~c|o$faA8sNRmHo8x{VYPk#cRV{V(A9~rgOy!@W@>|1If1l>yN-!g#t<>;#
z4Ve)xs77lrEgui17`bMo-s}&Vl*=T1rXC^QF|6D(4(GN6uf~?TnvCNYI8FK?K7QSC
zxoc9OilOf-ep{mZ{ik4b;cmsbGQk>Gn}I!SL+y5NG?WA_J4Nq!JT*PI5wtAHN5-l<
zdl1y4G&9Y}3!GU0MFsLtV!Dg0e`e#%XJ!GJ_oT7b%h<m^B*uJtHBnsH0`hqkKzPp)
z{uk(BAE5pVHhZ-Q)K4MTa;%+*p-F%jW#ijvRlMsnOA(|I4q)F(5+=Z#hhJX?6tn66
zrkDn@Nd?v>gLTS(I+OnQ6#D|Y?3I#|O@h_MhtlN>>&A8b)9s8*VYuZV#XJ}<`c#fv
z>Ebg~eL<&iJK#K_;Q7R7SkkA97v&I9l=k;OX~4e?{{4u5>W0~DgD|@-(~GIf5ZG}t
zwhXwI{P17t;@&SLUer)4e`L5dY8NIjf8JgnLEU5Qwq+^e$arM$d|F8tvCJw9?9TwC
z1-*m2+jHEwRJ^JpvH74ne!AUV=0eQy@@P0WwV}izZ-5JHQnd(HIz%2#7gi1w<Va$X
zuWP8^?(Hdq3)`O^YEtUah_JkjTqBKLIRb#wh|6KUou52+7I9T*_r01t0tg@HlpUyJ
znHBt@dQs!PL2kZ*_swIu7gt$@(aiVEK;$$q*WW7C$3COV*U!QDjFt3o4}C+73ixs4
z>ca+5^9kxbE%icw#p~PB*&^}u>Id3J<BU(f8sGbp0GwtbOp4&0@_>yb@!S{U0gU)Y
zfR&_X*Hd)poMM)+hz$1F&*1X5w%P2Ud#7ZybAtAa0Mj-*(M2}vWotNi-YobYR58Hx
z6`g7_nR-<{E0g4L;se;1F9<$r;4o===WwPQUe&w%bXAf)P^Y(Xq|1no%c@=4=0~B-
zE9;mTPh<X!!TX6TUqLE&&06s%E6BGnNhotI7TgtLJ43uqeaoS>c50^#N86g~#xL9a
z>+;9dr*nK+Ux!K9E{AYvJueoAcK1V5v!S&3On|dn;=%3rE?;+P<xOb6=^|sLBGHBF
z@+depocfX2#6tPi<l3%TJH`4bvAn*nuDLF4d`~dz%2cxMq6U91jdW!eC2-OMREtIJ
z;-UO2Ip?>aPez-mzd~t&|Br$U@Or;$nOy5uNf|1!bUD%S>weF|?Hrw(ShZFi+-m>w
zrB+B48gca(_ciR^M>K@4P26{p4<%~l9Hkk<M41a58G^IiUd9*~q_w4n#R!+M{=?DX
z-)#_PYv9i1BTu|oLaZi~I+Io&zES|ZA7z=&a1a2wxT14BLSW{>2rfXRT~MqewSY<!
z7-?6BZ2g_El8+oKej|6)fQ6n0?{F@PKb}Ev#zMu*w*qG5T%dK#iTBdVIAi-SzcdtK
zV`aK={B25sP3Q&;Q2R!WL&*VPl&1^%C+S82XoO%w&-QNumCULsEAkhVL_tC+5D5rW
zd4&kId7jY|4E`2nqxDBf6!u@aY=#VHfRT@a(L?Xz=(<Lj4c1+3IjhnRUR+}%R2*o3
z+&WODr|`EE{%%ctM9?{ZgomS}dYcgO;EHC{NI3t@&9v1gW3syL@mWIxSq|z+?V|ki
zsvGmy`Knn5A3ZBiKT}+R<Hcd&f@RG-3^2BZ`Gv3cZgDqHZ|Zna3?^C&I5K`F6g=k*
z@R&t_V3z_Xh*TOls>9>li$ZP;U6hx#fm`-ZDLnyiYc&q(YUBHn^5%SA%z6(gK0RE0
z`GODmf&ggR(dC`)N;7&`iiqlEI@DZtykKQ@3<GFHFjeCY*PunT;5!CAf#hTkr*+T&
z>1~_Igqq^ZRv=+nWRpaGsP^Q9!I*FL!rnr2JZ#Na$Sie)DmOU<Byg8%^PmM6e4K%I
zDulk8KsqPP(LI<Va&Pa74i(MW=y-@ZCiO`e-$?CESLpl1Z1aZDuU7sucx7bw3I#B^
z-rPbkrRZd6)Hh7X-nA+TB9RIySR%KuTpT&;GYFa9q7;ES%AVG(BwU7?0!QNlf@uP#
zuv>QzDg{&tvq*fhO^ffF*@=tPd>S`A+uhxE{<)ieu_5Ic-rYzZjRpvz&B$h1hpQj+
zT|RLwwv?4MQlcYScQcLYSpdNnh)VYb0e}v}fVU4&@RLsLrQW}g|MmN929k{U&y=dI
z=^e4_e+)IxBK{4_`Ja9KC&2dqLHGCt?mU9O<<tS6tHTuJU^BW^nPz^A8yH5STqMN5
z)ENY@NL?}OcX_VuN7Z(RBo3Ya^%HjFpC5SZT9M6F*%JbDz>3yXYIkKIe^+Qh+$e(>
zmiiKS6I134a8i)hDBHNF1w$)vh9;C6=ly=M@A|@zv^|u(A?5!g82RBbJD3;nBpf3^
z$e&s``E>xq`@snB+*KukkG{ir_74D@j_C2Ln6QK%9(+h)o8%ci)bP{-exT*O#0A{u
zJ}Avtb<vIx*6i)#*t-1_i&Z*)g>CPjZYQe$(Udi%V{dV)0t_2@cz_*K2S>w2!GB8q
zc=WIw#5ZsTB9LE|1gSWB{QVmc0@nFUl@zG{!$AII-y{qFXNv`+xy1jG7m9wE0k6~m
zkwEU_8tC%KS2gDe^%DEp)wBmdm|C2K)S-8qvd>J+MQtgqPe&FDPMN_HpRQ9imGe=#
z&Y&r9)AatP+ZCoz>%;JFW@<#gPA>#6Y}6x(vOSpSSW=yI#3pq9l3>|A^NeCFonh`J
znKt`ma<0~+pIg~oA1SQ=O?4hm0j$>i7x6%$(+G(M_7EM_iD0xcFFBoIxt?*$qg+Bi
z!evk3dXbY|_EQt%dmUfnMYnlSp2REZ*v(%5!d`IIdoQ4vI-*dazQ&C_*ihB~o>!JN
zD|7j0_cSKf-od!f0MtJ#dfbPL>Nb3Aevt)szN<6dDi)~1**DR{8=_9hDiE;>;#<&a
zFd)Nds@l^~+_<`#3Ix9X7hd%pQ3RkaJu`Z}e8i7W{}Yz<r@rwBK>}1u;w1m1sQq9%
z=g|Ug&HsPdf7=Z)k)>7d)cf8r<!AMwbf**(RG8&tj)?&(&zqXAef^r9W`7U#Y|O5K
z&`d-jYDRVj&W+|R@B<y(*KrgPs)9d<SmtEyYKu~OIWF-xNEsoeE`H`cw4v-3@dBG7
zS3V+ZHs2v#;8ADGKux>)47VVbeNIQNh++UvHv3-l;PvgQ^5phmZ6YhpUhfc?k$8*n
z09cCB2oLHn5J;!(kJXZ4XN-MIH*nPa!mU(ukDly`#Xh<CG|$_olOZt$StPbXIq`}s
zbg!2HqbmngcEaqzxXk(wAzIn4L5h#w-Zx2-Ehxz{SbLa&=9o}gL@M~7X&i#xMLcp{
z@Gnqx)P>I<$81CkY-Glf?oIR_04%xQGux!tQ3N83GmbL>^Z)qjUm*Sck|P*W0sM62
zRBX9={~|t}=gcfbFZB2RE-4NGDjs7Ux{<(Hg|?+9KYLDgkbY6LnFjs3^Q)2+=doSL
z_WDTVay7;7t#dVahd3tT4L9q!%ra^DteNHH?{Gf7fR}P`=kCHz&e8!+fkshX);m`@
zS7MG4^LL4cZWx`5FyN4-VL5$~!WS<&hd_Vo3T6B$CmzAd<{T;%zm{xNoP8CJ>3s+2
zyNZ=xEK%WkR?669x@zN{aNI3{8>7u#3Ae<{aJK{dD}**#+j%eL5SG!9U3y%U2L8AE
zxh!m4$ux!MZA}x)q_4Yynv3Q1hHzKu4?cZ^TV2<mzBQ^R(WsXSx5d}t%2yM5j98&8
zc+%2YywH6tNlk(oj%@0~q(|14lVT+5%9Yo`PZD`<dpy^OR2zcsV!%<vn|L??D$rHq
zq7wXgxLwn~L+(#NN{uP6|M*N6Lm5eLQTO>Uivl}t7m|)ctoFVmazUYZ9c-0mRRuy1
zcQj+XY~amV=FJ-ZeECN56D=X{;Wc<i!#=9&I2%W?vkh1Pl{^r6I;Fo<B@xE)MAntG
z5VfJA+G|?R7@k1n%n%-2*4JYlKA#ru8UVRd3FmU~nduw?rW7(?LVUn~Yy!N*#mJ*3
z{haUo_~LVQ*uq~RT4fnkX*W2XvSJ~gZ(R3g(}e1T*jau{S$mxNg29G%Q>W_CSckvV
zK##Fr`6z&?;(HD85i4=jy-Hw0klXY_+b&fPW?#C5kH+=G(odX5&5~i$$Jj;KAo4n2
zW`M{~vv_jd)7;p`EEQH#3q(Z%;nPef{G3dP7&$q;5>!WEl*B}qW9N^3kX6|2ok_w|
zKjtX5N6;~*e%-?llb7_ISDnutU~0o8XB+?wg`^@-;l1<l)Sb+U+S7?T$MZ!P^R<>H
z)a5#cMCrla0*wAQMlTSL7(mDyKn08407fB-AfTivDLC>*Hdy>gwez<h*BGp-t_95&
zI6iL!2MD-dPN-~UBdB9fx{WUVTvEk${Zfgn@JNy9=pA^e8aTYJ-7oiKUEBM!-Pq*S
z$eB(USH<*xvw=fCYJThNix$`7j+b`2{1s4_fmk+$*K4%K^=jX4m=|mid=LVzmcOU~
zXzX{8e9T$jQtc-Ret9?6_npl_VRC{Rsl)!sK($gIA5sSVoaa;(f#M(n6v)r+oX|5L
zNIGE0d>p5P8A5g#lyCuxDPSxO@;fGL`Se(E3brI+@TKFcCLeFno3zWHzq7WLzr-1J
z7|nA$!pW5v;T=}HYZG#9Y^@C(pDl4#d`8VH$BuS5`YzsxqqNx_a1ctdbpW<krkbfz
zq(fKL%szSUSGAzL-T>bszU#xydcs<54|k~DNl6tLv)Y0;+o+dOF&kP&amU;&HKXL2
zY!&V_iy67{KdKEk`+n68EN15JJA5{a968q|jv_gg{V;S_i9cF-#vmO;<}?G*<!gbV
zc?V9dZ&NC#?9v5U>E54uCboa*q1xQlP2RS`TSsRr$jhJIW-eX0bTCic$?ObGR!S%;
z@Bb4XJ<|(eATZx1$l~XCLir|YIO;upjquvl+o!u80S6NUGVkPmG16RNyhc8DP?QsT
zjNI8jS0~)Vjl?x;VIiyV2s><H7Z1(6R)~njVCCH6+~5<j@2~^yHi?|?2cQRRsR!Z1
zHvmTTT#Mj?k6x9$0vl?CL{-O8v;`!)oV+T3`)NbFrAd9Dk~RCGgH69rgZXZ!$K@TB
z!cL<(PmEBc>RmP5CI;MnW7Wo1T*B~Ht|8U3o^)I9OHVhKm<n~DwAcy~?H!TT41C7<
zjlc;beAHK{(?xo-2ayzKTPKMqp0(af(VKiOW|5bw{-e6;j?^M&xVNA>gCTq9P9t;E
z`IBX+;Pl3Hvn+b(4#qIQFWe}0(NH2OfHLsJ`nLM>Xni)hoR{nGJy2Ig4@-^}r@?N~
zNVL5TL1Kb%!^hDFGw{PNa&qXW_p9^!1J3+Oa9<V<<-xk*@+PDs7)8;IU`_97rN*b`
zTe+yp;{A%^<R4q{ZJ_LXH1%?qKa&SOW2I?(&1!d>34_2s;CO4Ljc;%+`{;m6U9DWf
zev4t;EEF_azsFsSn>dcZC*?tHMv+e7P^FuQqQDvJN_Ta)3wyoCMiaYg-VNh7TGiF1
z`aIw7a&XpCgkax2y!dK|bKHz-RrsaK**H2rS5P}$eQP7OdcMtEptgP>O+-)Z(wB%H
z>rZ_bIdI2C(N)v(T<AUQ9?}m$MO4`gKY7$R9o~**eo8a89?;t`pG$gp`)+CsLI5v`
zt6D7#ZB=wZRMG7U`(8kvOjpm9SG)T>FeTr)_e@ZNk&pMLg5Zlc>QqHH>LMu~g4F=d
zEBiks!~fL`Kt=*c!d5TdBX|B*2#ERDLcnIQ8_zu8c&L;}M=u)npt#FQ3!|UcN7C)g
z=01BnFLEcS(m=qqbiSv1?fcbNx61#slc4u($^U;8{Qub5*gQ)BcQXPY;_0?jMR6)e
zmL5rYRzLg#e5B?%Z+GNp&Ypo(Ti=^Yz=4pd8mHa6SQboqmndBV52!kU9;$-RaPi^l
zIx(-aa%<qHDv>()#`fjolb8Jwj%-&O1WSHNMgS#)e}aOzzRp5zv4&AGwR~S~T_)r{
zdg;g8K6A_xdam#n$h3lFmOFKs3BL}k_cl0UlY4>NL?HihFt+<liy`Cbrp*ZZ-ot*5
zlNqtnZ|(U9WmBVXoWRW&7I?FBPS_9+`HtyCyDDhH)Q75Fe0_)SY|n(kdr@}eL#GdP
zki`|7Elid3NJEwU9g*4qE7lbWzl4ty5Rn(Th<h)mND{lHJE9SaP<!+bv$E*11Q2&A
z5O&#oHBS9&5sAsYHqceTXkC13)(p(~50erw>Hmk33-Alah_U>yUdEJJ0vryWtCG1Y
z8SWui!l1h4J~T<U_(njDiB|wB@B|RVbmG8-cdyDXUKs&3YwQfvh~!tE1m=<z@w7}g
zN1@PW{UJ{VZCz$_K}OPqqXgF$;3clclL3H%WHeG#lBiog8%IjK2V{5zYXCohYP!e~
z5evqi8@+}c@!>NLD*udwz<0>1@Z@Sd*sy>5(gosgnII&1NAw7QbG8j}XjV`B0WFpc
z{7@EJS#$!pj2M6c|3^%6o992)cji7=v;NcI^AFb<|0AwZK6`fSe+xb#+C>Dz+x3vo
zBvBgZsr}A`w*a!xY@h-lr_D8uaWAxn;`%NNb~k+-p0QquH+i3G_aqSx+dfD?Ub|?J
zO`veQaxOz05<WZWb`P2!b}5}1R(hHd%4zqEqw;DHhW=1q0p#qA&m~?-J%15c*g?EI
z4jiZ$$~L2$Z<Q_7)P{RG3@v@`^&#`6!PkmVD@Az<CFd2m9SISs49{)xo5vPdp~xcu
z$(~M<e5>ptzUJ+ECp+yfYiSxkzt{`8>Tl0&G7hpu8OnO(fb==!tf$V!r|JOQa{Qv~
ze$1<bJIl*WK^=XDhbk>iwR-kO`;OZ;R-i4g68s{T1s$+UzcPcB@3!IcGXV9N3z$k-
zTaHv0rCl-G&;gy7TxGL(Df~Ta6Ajs^Iw#*o)~qEFN0+N1U3bc7UCUd_UU(*-7=KJp
zVw!4`NXyaT)(_h%^xS(}z66f~)2Q+g__5u{a2WefxmND6-PRrE;yNF@O9zSrKW_96
z?hW=f$l^6{{Ku>pC2)Jhb!({kMM|?y2*!|#F3Z{dp<H%i@kn}rJ*id@D^J&3Bh!Nk
zw(>#%aWTS+oOF(Gx739ro$opms4@57BCE{;j_)5h@idDWpyXbiM&z;z^fik^d)(i2
z=^ff`SNuY|PFTJenfWTxlD}S%<3{`Y%`YE!uXh43yV}RbB27}$dBYShd%1Q)=Y9e-
z$ls#$c@lXjpaE$39NlpWzBmU8uglPFz!3{(&Ya$UVoC1DOl_t;Y#>=Am1*nKP%4S5
zr2j5;p#{t#XzRd()%*BN^zzQ4e_+{K*nny6!8b0u58(pd!wtL)<m(jQ)b^h=>!BeL
z2##j)o9P%)ary2u&Tqed#BazXT`@gaSHJ)AR^LJ_n~^u<qh;Y6{);%ITX_^W3tqm=
zTIdbz=AxKoROEWSOY*|qcTeS>-HoBU1HEtlqfzM5V4=(JWr-VgUo%@D3}6ZITCZT<
zsslLrUO0>MaxKdnq|>~W0W)^51$`=OvB#}wxHTN-e<?Rw^C@_2CY(*VPDLu`90GlT
zGi##@Hv`ivbGSJM6>9{$z2Hac34}k}(O{_1yZ+1}Ap<LWc+wU8_62gK071Rh<atYi
zz^_by=MvNUaNa!0uh0G`q{U2t$d*j5rLykgqfm8yef+Jx-)>Bia$e;()Cb#1*55wV
z>s1?K{HZpT%i%nZT<S345nDV~Ex{vjDV+!kBK;E73<vO-+Eq0%YL~55x(8P%v)^)l
zZu-u)CbFKf{>W{ui~<v|dxE=uFC3E|M&LrLh~7iK+m&PI&Q}?5eLP=TrETdxrq!wz
z7u$2=@RkVqs`Utg(ZYk}I1nEaq}2RLycU_?`sMAFL9k;68MgG2)A|~HLwcAuq(JV`
zoS~zq+-KYm7XjzdYE4#@G|8igKoM=Jc&YKq6PNMF>xQ)_2ci~wC8*vvK!D`$LO?k$
z=!tW{v?4Atid0%ETT?xy2l!oCt{^Hk_pT{EVy)<<jGw&`bc!^@oe&sxtjGB95iTp`
zS1eqH*cWuNjbwWPO_X#x=^(?0G;M3UJ2xKPb3R}-1rm{>25To;>0KKOda#S-?|6x$
zdj5H0`g?{4Sr3C;ZmWFP1N)l&(yAG1S#LH=dzpuQ06g5<?MZ(g?x@T3ef(uCaZ1Jr
z5f8KF{6_N1l(=KnVEq2M+8h*wH*m+%b$?PsShmPbPoK%igPQ9Ozios%QNFA|M<;5(
zWC5ght!~q;lCULP(5Mr>ZWTd_cirspC;3)G%ibTPUE*T*Rv-vE!QW*mJA5F2PzwEC
z&;J2Hl-_k0^{z_3KFd=YgmG7e+t$bZW)pX4Wq#a$9LW*xE;%65MZZw2pz!m7=-hA5
zxs&5>GrlE@*G(^SyNBX)XwpvT<-@Dn1Y%5tx4eZF90k1GUdjB@elH{%JkzqjG;IpO
z>VD2!8*j~udWwS_mtqmG!A_!{C{pdZ5X)uD7_#x*zQC?JGu~YbYGFrBRQE8sdSY{u
zZBTY#A(4u@pdbSSxZjN0&lV$dWcXI~E9)DoC&l@aALgAVS9M$}@lfu+$0bdv8V9Tb
zIvu9VRVq<CW2+bHM%RD&+gmRSz#S05Ha`NccgYW}S6g+^eGGl5#$jFtU0(%XgN-hK
z-$-%$w!E!@Denlp88fYg>{1&3$dQ;7svw}>!erpV*0CFV&DFUlqxqJ=;$r!fZ{2gn
z22p*hAnx<?2gK1OalAoUAP;)ElZ$HE7FU??Ex)ePVES2A1Rqs_p1yR&>DuGhI(_BI
zg&rP+yR8?M)ksbklumctosL(_5&Ov+-o?&U*7GCw+uZB4Oe4~f-Zkn%A%p$oqHe36
zZ3z^xi7W=}M$uX7UTBGn#HWIYQgejFUcX){m$|Qkpo2YAYB*o2A7868cBTi6l01He
zA6nTD2Un|HAYuvft8PgS`GH}m#heC<4{vkgAHFRbeC9lTZ~g<tAx$hf&G;KfgS0C?
zqD$w57H~<td$Fw*+zRofZru434SQ6y=NmcW?ki~rzru13+@~^p6DCVt&0~+)%16L+
zBE$Qbx?MV)(Q)bc4zh}H1H)*;q3h7V@zBaNTA)#?f-JE1{+`<5UE$Nz!{3s-VC;Mk
zVD=zV;@_c>oxAGu22(Zdo-Bv}u~AK4i`C?3l9zPx$+E)Q<80=|E1!OYNdq6>$4Z9#
z8j=6R>bN`QT6DUxYl80a-OIz08*nCjC+i8NSWj*hJxXo8<?^!{9-a4PA^n-(6Jss!
zjDOi4%o3o_@u%4I6^h7_XrgFf%(7OU*OI1Kz@m-x>Mc)?kI5RzcdDQIq=_hgjpfYz
zQghwKb%gq&0B*M*3a=h~yoDNyBJj6LcptcZaus>7Lr#HmKufS>pJG>N#9~BoVo<}2
zTmZ-gGbePG$@eIE#wsh7ZyMy~&o)#iGrvd@Y+aQ@yc`%|{n380bv0xH1KHg^HzLju
zg%D`Tod|Ts1>OD6slgd9xE|@6uMXBXQv9d@WQUcf6bUk$Oh+?+G=Y~tVm-sZ?Gl2{
z^&o|D&Dz%1aL2A;-@bey?>kNQ2DOVnXqfNxIVN(>%sQI89?k4?XC{a;J)-@YFb3r1
zhG0)R0WrQOio<HWe9!l-Op}W7P8lD=(&d(euy51C<ekFOQoJIO4sGSb@PFjvdUQCw
zwJS7p0AHO6D{#67`CdZU{^<JOKu3u*1N{?-h_g4?*+2s{&_C{WdLrg^XTE0`e`=y>
zc!%@wvxH7g`P?gj)Fw#a=zxkD;e_29M%Z<{(Fz;vS|GK#S#8T?Ia``()Fs6MTWcMf
zIPM3a>N*Cem+5vqo<GZXqRzRY;EQAeBkDn<nHf&C;H#g_bKMRDug)Y!w=}7?sJ(0<
zp$&T@qjKRwJAgQ4r2Knj&!5i;fO{-V2HqEkdra)C1sePWOa3)uFplSdANmCFzU(52
z(XJQS-}=E+H~(gaVECiP3`G8uc<{9>57pKQ&v%xN{de39K@K?ij-oPU9mn<S(R{V3
z5tA>k>ZGrw`S)jCcR97FItyL(yz*;F1^XPi;%=|^^gE6j;a6y_OlImBxpy-HCEef8
zDaw#n%(g{|sZ`~FDvrWf2r6T(?9MNZg7SW*EkBDcr_vKkT0<-`YN|hY;PpEAku%*$
zIS927ICJcB@dfE(V@$O`Fqvr9@V2k~Qi^px^ULhce=mJacj0T4g1|M#wT;$)P!U2j
z0t?$vbTI_7GN?l4RYdOgC%@^Byj54OYmqG&Qh!i0)w27hzphJBq#`PbmhTj&SlR3x
z;y@Qh3N<v2A1N#;2sA$nn$_di52$uwPf1tkxGMEKhgX#mxXX4f>;U$>Gw&n&`@Ot0
z?W^A!dh89P{jXOAxE0IC-3&NHDF@}`aP!&^FcsPNjP9?Hj4+*|^MEn!4)NMgxgn>V
zz{8$1->Vzi@$*k=BB&Ed?2Dh1dx1u*No<#~kYfXEVLKg*!73MZ4@_jnDQ((7yLMWZ
zb+61{w{N|<I*h-Wk@ar<W{fUfD)cTEkT(H#mX$BdY)1)i*~!x1z9_4<kqFFb2^G?O
zZ|Sn#qCpy7^L8Ys^c+m<yj)?n8|toX>6nS^G<rADOP06nw1Cpi&i>Mqa@!>AW|}|x
z!DY!XDQ36*1Ue`MHYyUZs1&ZQN30)Z@#6eGGn;wo31|=T*Tu6=c7+KhN?nsM(Crjh
zxhmzhTG+OK88(_ll8)X?AnWoM_okgRV~&mX@l+N-V_X<C$&#|<`gxLOX*jQ$)1@L;
za&4JrJXDzh`*>O?^&+w}&VR<FA%ZoR<B|HE5>OXo%D@lO^+&^jmS+i_XT2TEC^(w7
z=IA>LpVC3(i(zi4c`mJ8rT*(y<URG<f%(RXCJ*i@w3lm+LS?T4NxtAK%S%Sz+TC7=
z+q&OVKUmOaaoT@l+{?kx%n6i7ZbR7dfbRl_^*@I+%rw)M;iJ%4q=1cOAdFyT`GX_k
zkjWu%+0H$Cg)9901Bjd=ji7=LSmBA}!5S+zOJTS5>c)JR@#^eB;$_jZLdol-3|ABc
z)p18E+Ym@aB@%c_GJcLOTlM;|<E1``z2zir^!|J;Xrna+&vSkga1D|bEd@rkAE%oU
z`6lUC>!)h&hWiBb>9#MvyrkXq!NVRf&YX9>25qRyz<L0&zdS+X9mb;x1EzPj95gka
zy*0MaTVr%n&YuUiWcEYC=Wr#~TfZ&D#Pf@Dt&ezjenc}vfq$cBOef$Kqd6*5BAfx^
zvmf*>&{~zg3Y3O&wgV}hsb|#xw^I%hs|vsc?}zb+#I}b(!qOKYL$(C>AJhCyWsfB&
zc4Yw%P42%7>;R7p-*G`!>-)2(%A*wWK(ph$`dz?b1!$R$e9T_KQrle?plbEN@?#$i
zA9()l^edn&Yz5*xwnvz}&+LnTOisq;bw&2&1_wV<P+Tp$vvHhF_t(sSM^HYP6L_Cg
zeG=Qp>a=A^*7l|7;CNr(-a@$6d<1SsYc@~3=+S)Vt3hsl>~T7V<3*5CekqGye1_A#
zRi4mtfGS@pw59fBB?9i)<jaQ=z7!Qx_N>EXE&XDJ^Vgaq>wv_n1U5;4;dcZ_zWSLm
z80Z`5dwTUHbVw#cZlvM%+Du`Z!BVnp=x;7YW7{83|9BK_EpT^NdBRi~dh>NKA3dH=
z`u+N}Y&cOWp5M~il62cz<Fd$LJL#p~*V5zx(&1ZvIGIRKdgAi;`K4jnx;M~kC(Mbr
z?KDr)?G+vG1q$Rw_;cEc{Qg0&DEmD_^)=oMm)j0!K~I@=Lfk4Rec6(iA5Hj)I#nSn
zleHGYoCQwb-==U`V<;u*Q~3P}58DzQA|+B$INr<}=YTK<-iX7H3ql913aI%dZi6Ue
z3cp!2tc4$eI|}f9+n77JAeq^|DG&0*M*j`)+}xnHsZiP`>AGp#z)Kf(_n+-|lCYk;
zhFB{MOIHHkr1i>lhWWH6!-F9j25LWgnnpHjVX<5`(6uW)0hcHW2s%L46Tk>@GXh~x
zar+VRnI0xApysOk1cBQ<NSS2CpV_ZcA}%MSL#+TR@b?Pj{CYX^2wMFY2*s{gvyXB(
z4!{O~WFFjNAO1?H>^XF)EtWCwB}=DG)X^s8l`7iwhka?-L0$#2RNzs|^v#b0R;<i_
zbCGr(dxvEIdbJqa=(m=(_St~b2V|qmwwicVmuW8*C%=33`6$>$5|B1^H@?77zsuBk
zJhn%#!IFjAW5u$Webo-&)@J%>el!%4`gOTC-zDHv-t3$cO^VzGN{T&A(P7kkrp8*}
zw^8<lasrSL&b)!6h=%cN-TcId?R~vr0LUrlvbMH3yj6DG=#9G@ZpeF_X7FCUV}tBO
z@&x)|62gv<+<1LG!EwL0C|&j^QVo>593C2dH{QpbR;@7QuOoLHoq~%=ju#<XQ+|32
zQa%Eo-dWqQJhwNmx<aBT5T9A%b9@5;`OUBt7q1rJzpSTjXSfSZUAZ%5kN>eUk6^?v
zx2GXo>!($DQ1gph4|PcI$OOo3Y)FwSB*l1F+wJk1y;~-tkkr1#W#YdLDY{KxF8A}+
z>Q!?Tp@MY#;%~RH*n=Zpx>2m05VFd5N&f=b{%KDeznpsv@Px@3=c<4vWtHy0GF|cI
zBHS`&uXl8r1}5nYVS-m9D`YYq42S~<(2-qtC3o=P;mPhn^NRfH)5V0Qq^vK^b&!ki
zM0yun!Dj5Y2<Gxt>LEEbrq!S9FHmoo_h1Lj6=>E%j#}n+h4c2;+elYQ#w{52MHV3l
zqr#LQf3|FpdLx<VcTzL{jV_(S9MW6mbzcdh3LbXELjq_$ZNt&$esvK?O;*MFsSe`Z
zlLlhrcgkrjW)CM=NneZ1r=C9MZsOX14Glp<qFb4&aENe8)x~^>C*?Y~kXiKLOa5M7
zGi{wmdAGS#Zo6IT#ONFsc3+sG{WI+9t~py8w&uWLRfnEFL89U<xf)faN!kbdhYFxe
z$lJ2N9$L+#a$-<|5p0?LUn0oX?DuB5=A!L`ll3-$+EYKwK#9>CBrDj(04(7&Q!wop
zB5m|L=V+06v8*(JG?FBuN!LO0we_lp?$t-GoiC2)xM5RpDge|9E+^A&ER6nr0ro`g
zREw+Hs2e<gnrdMo#a=-Yzj*%oaiYRVE>sHNvf}x!(H#KL!#xFps(-YOggE(7qn0d&
zV&7z4QEz0p6CPJcaXHa@1zE+ZN{QDvmvmmH_iLb`Ft0T9v6}p(=;86r!(;e3*(OXc
z-_BaNO{i7cFqoxdIRQ*byuG}If_J*xZJE7#7-Kc*CR(Dcc3YG~9`*E{_YJ9@2cQww
z;x|j_!O>EGfhby)pkVxF;Z|e9>&+~!F^|?CzUkMhC6ad+q&`PdBqbVY?4}bm@vAF5
z5kxU8JgnKoTiP{Cv5DEzRf9f}?S6%(atB>s{>H;ACr7fUW9Pg}Hc+`40tLqO5dOXO
zW^E1nypk(R;kSw^abu%<5Z>7;pLW}fUr|74a0uLutPVU`b&7&X8!}eRniGt%GvP-8
zE?4KotLHqpFQzE^9g=a{(?3SiXJdxUpvAvg<f0Z~cQ5jwJlK=2bRyfd3?2RAP~l}4
z4>4c2j1qN$Xvh8sF~(sfPrnE9hWC97Zb#OPbS7}y{$gI@7*jUzCa+UAJwA!jYG4lf
zq{ixxAY+zwI8^n@W_V@R2-O7MFmR0LIoT4xnMakAUA%>_O%VW!&bAJatjIbqG?aO7
z`)oCCLM<>>k`#e2{{nTPVp-(n&Y&=dZWn56YlS|L4g++!D+{zyQ$wo*1bqlh${G{_
zK_5M*&;5X)5B1opYvg($pUEi|p)OmV8N%|8_=6ea1G!Vg^N%;@n(hX?5MpcZd9^ZK
zn!A3XaXf@~$ul1*-fHE<Ii_Y-rl+|aQP=-1(bbXU-tU{OiCD9D)#coH13g^9n<|OS
zvU!~V_v5#N(lXL<ecc?1Y`RakDEY%7cVMF@IvAra2q|pnTG$KOQ*a$rBeNZ%Gx3Z@
z{GG&A38#m#YGn6-#o26Ttd~=&^mdWvJth-vAnpak1u6;;2oU6cydoK$f-mn;mBUKZ
z6wvaUV&ita^X&aT*tncXMT>h(qRp?6uME^tt`wYz^t3#0HN$Bh1_m5=U}b+kyn%y9
z{B{?Q-xz4kP?nF;kCzV9rZRH!QXv)<Qq+$ELR5#p9cTzK7g13JK|rct?1{S5n?yX|
zl9KfOexW0e)2vtBe(t>sDOK92a76F2Je>J0bg(NkALQ}ebFY_*#Rm^^J6I-C?&d;V
zm#*Wha0!p^TjQG`@#GGRWe&j~ZyUMKWCkbg+M2qE^S#aL-RTAn0cweV<l$!Pk_2Xs
zZl9l!%I?Te_nQ+KBYF$V!FmecYZ@wJYsEbRF{%+pt<rQ`Q^2(QAFEm!Ka8DuV)@%p
zPMt>K+*hzSlV~1V{X}VfzN}8Eo!)Pnrk3R+5b-(XhnU_*i~V<~=wBed)&$_8@&`sl
zakp0=ZkG$;2e4pt)Gi5{l|YSi(z5Z<irM3sd@~?MlhRo)TAJ9Xo0r?jRVrQD+48EJ
zk+N@8hW2_#qhP|nDS{ILea*iJf_o-4wc4maZI`~Q-h}^9z(Fp;JdsBlGw%NwY6~I&
z+u?s%E?k@55uJwCV#1a0F8ko<G%gT+JRAh+8Bbp>NXLxn%!uIoU1|eY$d6R6dm=xT
z+i#fmE)2gau@X2JiNWu$F88!()6J=<@&%Ib|CmwsbSyyqJh0k3B%7{u&^pjIHIial
zOb0cl4lfilj?A85<d7v$AsllwPToeT@GXpE`&Dh+*Cp%kcPi)fZ5%&DLF3*O*?Flu
zh&`z=Bi#WU+{?$7_-+gj9?(JZ5XYC9{6x3Q`2GStnesDKUD|joId?SwT$HP&e(^h<
zjI#bEAzm8yd268#L?jX@aMM`96J-OpF-vt=q9ncUiRFX1Y>%3{+Pa4hk$Q9=*fkT2
zyy8yMH^e`x0)DNWF$gKK|1S^|Oz0QSb%MjWIMi<vXt^O6pcQgk$NiqGmlwOlKVeB~
zyi=6Sr}+Gb-}9T&Mu~wbz2Iue7$Eil=sV-N1Y-+~1bi&bRpTAYgFV4UbaNMxD7TUi
z;vd7R5AVKRC8h9<sFR>B_EIgosD-*<5K+h~mRQ2ml`?%Bev9wK;RA_mUOk?SA}dY2
z0R16<CBngW4qFJ6d+kRoU7e8)iW@r4&2>4EXQ%~KePv^d@wcT1^;XP)1yQn-%M)$3
zBa0K&z){5cd2%h<*t*p5otUTQe-a4M@1uMy*2Y&^MFS7!0+Ht63VUsQIv`|3HYhV*
zsqhiLd=*0H;`M7xqoyZ=)Ur#UwV}`M?YsIM$DSt@{W)J&LU0&bTNP5Z$(moe(!B-q
zhA;aH$=(QCzc%uT{i6;{Eg%Ho&zUpM^#f-xt+_6RHm)hvsvMQk3~eZ@yIoJ;Kf6UU
z%XTo}Zqq$byLJrN-U#%#_41to$<X~)p=~GIiQR9KH7ZXX4+~qtj_(a!*&jI62}ZWv
zZWC5q&;#Fvw=2L{0^bvEW6QfQ<2FiVyl)m>5A^!3<f7_Z;YppS;nDT6{j_SkeUi@k
zB8ez~-|ppMgn^x5;?vJ3cc7A(hMl9ne3VvMeSMP}yB%q@m@7!&=xk<@>9qa)D*VUu
zi$eI0FECDxH*IwzdU)93ZVB<WiueHReos0N9bi3Dswa-L0=hRt{j-xh+U&R5`qTuB
zI{LDCO>cY*HSq#Ja4sJlGU|e}z#Vv^i45Oyg>kZU(GVKt;8&j@h?46!^Yd&OV&b0<
z$n^CaBvK4A5$~<YWS(^MaN+0Cb-PX2p5c9)X23+mfg#WIDdbXF2Rj;SgXV4cS#1VK
zJfvSP-|>kR4j|pP{8M~)j0KiLNevt<0gZTrTJK2#hgVx6n6|y=7n=FD`fTRsbx1$N
zqo=j+zP@~$%BMTnXg3tscnGWxm7Rgx=;5L>Sn;al&OiwBhCKBxrBI71jvU9U?rG|*
zaug32XwpglBq$giH{;`Qk}D&fMsJpBd`DOevJElOb`yR`=7s$<%>2#|P@2D=zuiGq
zbL^8WruWkEZgVgm*bst2nG}Pd!>8-wH!+^{E?#|*M-?-I?4MaimAt-J)jkz`eIpV>
zqQIXC7$UfVGL!$)vSbW_MK&AnvAL4a4SBO%iwG-q2eh+CA&<E38f#O1InXuvCZ1-S
zmma2wcDa=Hgp!oDh+;PtW(RM#5fj@7KU4fB@x?-*V#aqa+hJOfE`$E*ex)`fM+Vq*
zp60unHV&sdm73R{yZv;?;!k{;T;!;DIX<d_uY4wzK-HLavXam_KVPsc+Z*Nkk?T9T
z#V((~ikGyJ>u-JgzsC>$ho86M%p#8HIPh*yu<)4D6yvd)5|<bI;(2`*cA5H040*Tv
zQ~G%0v%`lx$L?{vV>8e!oybrkRTVxM+o}Cdb4+aRvt~r*TAC43tWwb_S-WjON(dy}
zQA9fK{N?rkQvC}=nZR^`*lDE=L}HBIx%+*T)l=nnlXgAO#J-IvPPWKDcpvG$64d3v
z>-!6$VSJC<1;`;BgtMr!0z5<Y%;x=sF6N%OZ>C^{F6v9V>H{e~N(sNrV9bHc;7IUQ
zWqIrYM`w`^dY)Cx6rD04{gzSK{&nrR_r%SnEn!j&4;QT0GIg`NX+IW8-YT#Kq5VA+
zKve%lLk;%~>bXKcz$cP2v$6Cw$mFrnDA+!eP6MC6G7=kLD?L>MNM>HT{ivbdH9>9J
z+_%<Q@4hxr>OW=`2^V)T!nF5MC)EDTw^I!Qg8wc7t@Qxb9|dRjsQD=N3?Cxcm4p(w
z(LILnTuS^t%i{dAP}X2IVCR=3E*FSz`U}Lp=$7pA2zqDgY9Z$p(0iK{mC_|EDZhi>
zv)gPWt;gQIV7;*iZC+qT0yR(9eiXL=K1dclv5I4gJr?&W-e7&R_2j#{nK#?7Boo**
zs8TIn4=C_M(w0d=jNf%0m$=qRgvq752vpqDwN9B3+O|#70-3Z{)=ZXuliB{+H9ce8
z{HSM<i<8MwwTshibGLVgzXX5R9NB!4Nfh6P(D=6KuPhAYYq7OfT2Q@EwUohTWm2ar
zfSf`TM*DA>)1?#1VFM_N_C@#;++H1qR;SliVcNl5Lbk<LLty(-#Kj<Q+AKnN)rS+5
zy$do~j)Yg`r*|gAL9VIZDQFP97)hY=sJcy=Jj?IV7P4cFCHHE<ADumj5hJm6z@?zU
zkuZ6J!n7(|U`>9)gK37_j;|v=vE4{=jMp6?XEVF!rA^tVeBCxv>$a-J1%gOQc!a;B
zG#iM(3~k0M-*s|27NwQmHi}=c%(gQI)?GOZo1`avYY|ks^C$jTqTEHiBoD^67zbG~
z>r&A`djP+{xN8M!{@wQ7`6z$?EQ26Ht)+kp-Zk>OlcFBt$G;g%DMo<DhdQtXJzQ9X
z&*%ly|AzCpeadh4uWUi}x<5-&-{lLEOWK=7=fOYp!Aw!0?!fis9RLsMOh3y3l!zlX
z*vOQU0f7T^h-G(~z@w4oe!HNxxYY#+Z^8%pLGxP5^RyT-ir27_<*J!Tu+uJ`Y2~=*
zH@`B6F8PFzudeA)Cnr94q#pIFnozMGZRt78B+xWo#6peH%h5|W@Zne^%y2%IIMF?0
z7*ygiHrpA(SfzI(Y4E1mlY}()H$Cg}*RC$7wH|zVM{)q~TAnNHf{Ee=5sa!_&<E4a
zWSESmz};^sx}!z8Kum%?JbM@o5EZX!W7A%E+W$si1CEV6NeKG&_%WPh&kTSD`3Tm{
z@wPU-wbXLE?n!f_PLJyeqkH{gHFOPRy}Cms{2d_h?qB*(3Y%vBwK?S;d=3A18APQe
zP{B^0V<7Y7n_9&G%-Ht#--j6hD=&=Sja{c52vy?1u9Y(ZQ(l>)UAha#%77~tcTF0w
zh<4R(u2V$tUaZh<8^Yw^?OM42D2CcR)*s&+&~uY~i#AR0rjGVgQG~E6GlMSaDAVcR
zcfPobXTxTw<2kS?z&n<Tpu%WC9_m2DFu?GFMczuSzW(XOuY7U-;94d77W<f6Z~vZO
z`hPcGsF}YV=)Y~-2SJSiZVgU_H(eU(u@z3U@btTMpu4_D=@P5EH`d_@t&Ws^w3gWw
zFS_{iUm9Fn*)9r^D+uHzu*>^~>HQrfni=;#J9YYK(j-}@homX$O#_#-)r$`xfIi)G
z6(&8+5B8idBwl&<b&m&U3YC&RmbUph%(;f^s=Y4v3T=`>1WM~ld8&8_jHD}0yTY}N
zOl6w{z9s4RAe6+Ht#Yx<?z{!Twh=9kXKeyK6tX9A>zM^PF0v7XD}Zt+j<Z5PXVv#<
zq^T*eYf&n*6W(0_;-r-n$W>LonweNtkahQp5krM^rpgHyANq_CN~%(Fed+dCtevLP
z%*WXN;^`Tug{|C}_r73D=km>fHlEs<U6*r)z^rAo&aMr5V4wb{w>M2)Jp0Ad%XHGi
ze}TlA7Tka!o?fufM{c=<dC5><_xEfE;*>f!A$T(kr62g#lLeIgUb#E9e-`=aOctqK
zuEV;$=(<!;h(4HMqG9V%IonQfgtYGJC;7-{bR1!&rP7qj*VMLXRb$X{sw7i@fJ_6u
z>v0Ge&Qx!1!>O_-pB;YlJy$<@6|B#CZGhwxCrJACAfAGh`XFn2<zy!`7f;&j091<_
zF>k>cngcl98QJ5DyqUhL`Wy#R>pFn%zE<urF}=YQB($S1T4{bZto#kKd9wGcNbT<I
zsl)NRi}dQ{_!GbbZkvL!S|-EycI@>UP3_d_j^#dkFLi~VD0Z#Ce^E_<la3V!*&*}9
z@psgv0rL`3aNAZA_ad6n-?9diQ1xr9^zFerH~O2S&#7zX@`N~ad#>(}Zrys^I~Arc
zJu9d#zL*<^^Xvp3H$a{B8#q~VSkjtlw${d=Fw&MP7>3e#s>J^OpWlF-Dw6Uz8Tc#`
zv;5|$8Vd=7JFCETu+Qjt!`ff02G`#RY184_8jQ_&68j?ViJXP8-s%xwOpJMBqM_EW
z1Net^eM!^6?)|m|Qa3n-9iAU^=!bEqTAFg#JbVAeZ54p&O45g^>eIkzN8?zM*k@(~
zuOK^SI4kv0B${Ll?#%USru~wloz2}HW0el8zBu-d^ye?Wh$PB=C0j`~5pXz?i1`}W
zpVVkHl1u1#Vt=9>+>Inv<pJV4*`Z80la`(w^v^Q8nz;4)fMyL>iWKPuDe;Y*hc91p
zq}n}91<QAy3qu3GPhhaMN$1+z)2(cgcP?3Fsxl*WrTxEbU2;sdFas%A$9jR<cml`t
zvn29NQV7?9f9cDKDhzkB1N0rPA|h?M@WFto{dq&7ZJ^=q&54>K4_wi^cM5}gGJ9X6
z?^IrIPQ9G`KuusK)Rm_qDgkz5(cA^eO_bh(DPX^)`Y?4VJ@LP0ekGP8CZxc`MIMov
z8oxN$!u4Pg0XKe-C6{ON1<;J){TYk8_n<R4_^}`SEsB`3>PZ<mZ;9oJKGKb7sp&!7
z)pI%yY5$}smigvB2dM&zgSuJaAw?#h=U8L9j41STnYmStpxcViC<}a{cahF`>^+OJ
zY;vFHYfIK`nhbj>l{RZ6eCN2EF*mj-w;!>7x_33!swE){?1Fqf0ULwUu3|593jjUq
zOcyQq#uyTx`uV6!atVWPlhSD@y);C!wAGHwsec;dA)QJ(kx#P>ijLmh4VL}%_@e;O
zfbeu70ps4F`nyT%+18J4wHA6;Dxm#3Pe53#7l<$~?PvZ@Fu=gC2FhzR;pE@>-F4^C
z`)(wS+1oP-qf9BVS%~1oN(U*MZ946Oft#U#m%_>{20FB9DZ?@N%192qdlS3XUa&FW
zrf^^r8Bqx5j4K_>ayy&>wgV8-eFQJO*Wk2@h;3|cy^bY;D@}c~cNS;9xyE(Zkf!XG
z$i3_RZNT3faN>7=HLLt3-^THaqMLm#7jx+u^>H*XXOzqC0Teea@>vKw<l44;!m)GB
z$Fi)4<8>hOIrqbx4g_(&P{ir;Rq}dx*fREhc^nUqvOa<p?9Ou?V#R@}to<}`dREgF
zBvvtWYmqC@8plBHc=_9`E;$lVHaLN2_g6f3x(mKSRmL<?3rVK|N>h7tHCGN#qCURn
z(^%xADvS*aDlZn@`d-iOYm%KHQ5Iz2GfZR!GLZfRz+eOA(eS8Us7hXY!lKnpc2iQ0
zjif4pO8Tj_=MD=qUyQZ<S^ukBH4lKJV6_uY>03qMv;NCK!DkI@)C>dAs{hovFsFmo
z9Pt8T{ngd~y?yELx*-0uuOV5X*Z%_D%EWD6yv_kEj#?405wlggF!=hNR%L)jWdxr&
ze=l|00qT8RfqEb9KN^mI`=e#3;xak0^jfjyE>WMh{Vz}v(5;|S(&7QIu*(Mzzuox@
zbnjn?S?FA^?4N#xLj07X(p2v+#Np|b;$47>eHFf~4XaLm>8g@Db0kCZ*XVx-6RS`j
zZA(*yPr&YX0V3|-m&od(eFui*)H`pO?lHd$KsWIr1%RC~c}c1nObvCzdfE(H)`iy8
zJ&#Bj;xv0*d>>w=eBJ3{f8;n4N%LZqIDo{Oh@0tr2ubhcVJ{mTO_t(8`H>%z94&Z$
zNxn^?CcOITSNr68(MK<*om56+h_>sWJH~`_>S+<`g44iM1D%djMt!*qx*GMyg|Nph
zPI>A=O)(1t$M1qu-_TA7CngE;J8LR#iS8<iIC132Kp$h_fOEf_4#nTaQypKE1HpT$
z)s0PbHgeS#bIdQ<Xx=<rZ*u0LWpu3Mr`RW7jEy<r0g~QoJh^|h3QyJ`$hTVAAy>Cp
z<h;rzC9EDsE_ka{N^9?MhF#iKULhCa4Hjq2rS`j=Flgw5`Bqa*oXq0D_fm9>a7%>e
z;$Lmv|7g{ju8#h8Gj-0Nw@);ZO)w%D@P+0CmUq=IQ>lXSbe(2r1;r~RN0WkMAK8~5
zDft&tQGaOuvhX0|q4G0^tLV>{ICiN-RMO()Bp##PRemE-I;$h0up3r*H!JLNIE<q`
z_@&@<8QD9CYd|rl3+HWJgUXJt1_rU@k4j=AJ?s29I6@v9HBLzM{FJ~1KI7!fi!q7f
zjgP7?d2gB&7nyWWxDy}^0lI8>-qdK$6$QGKS93jBh}4eU=*Q^h#Z?XayT3@-_gAK)
z#mlemGZXu{8NioXZxFyZ?#54CLj%sJgs9T1g`P#0Hbw#~9*<e|Ocui8Mh2xpJ+MV=
zZ|9I?hpiOO>78%%nidY4mM$;%!I;X{s^@94Z`)-V+}qCZq(+rRi;_`MS;M&i^$K7s
zYC#*ZFXB@$lD(d9*#)vVCsp}vQ8fh_zO1&Ezs{hBG?%;<avQTAgW|H2;&O~oo}H=!
zGekb<)t1rfj27)>J)E@}OXa+pzz;VN^~amf9`PF;XOJ$nwtp#^J8#adO;bR=U80<T
z3E-)_xJjlyce|C(89hG`rWs>P{Mwg|wcwy>SmibA1@s&;1bsX`+CVY_%m(8OOt(Q|
z$G>GhEeZ57Qc6wBce@l(dm}toVDPoX4BOq8-R7x0A!837HZ|If0V;i%2=pQHA_HoS
z3GwVGTR}3ojc4*}RHSYC^~XKZ-Psk&OuZLocaSz4Xp<`@?R}&ghuv2jYh^`2Z-x6f
zAAiz!KJD0dW~Sq<=o7I>PuAs4>601Zgep%!gNp(!_stqV-Gx)TR&3L^(^FL~s&Pis
zeOUCrk=QyGH^`z)!*ef3$40%kEsF&A7bqU-*m6ac4u9Up&5M%@MIWciBwTZ54u2RQ
zqN<nY=GPRhjJ)~GnxEuzX2TX&6VKenKu?wkz7r?83PYlSsfE&+DE!W#a-#J2vjaa0
zE6R_S7Ith0ogS=Y35uMnvd1P&B3Y;%X0RfR9eNEH1uVSJD3y66G9^M+ZTPL4rhOk5
z<<-8lbY(`xrdSys-tfEn=9~XPyqC5H?O&j`U~<25GN|MQf<TSlm9{b6?u6aN?jDy*
zATFoy`W>#dwuBVFj~e#3-(wW!<@6HWZ@eWU(y^{+#5KQQ8_3gTnwBJUI8tbn=lvxu
zz&|4FYtdSAAjq$r@gl*O%cqm4$|+1nlEGz0=|R4WyV8(b0Cl^v=0bm)@=G#Dfos}2
z>a-!^S}p@+y$*C;S?r_APruj%dUh*8l?u{dF*()Ns|h?Jvs;nQqNE`qh-P4QIuarg
zw0TV#ZQQIA1l1EXGrF{F{JL<0ASn|muZP0fOt$fZ{u8c*q$4nGV1mg-O|!vjnTBp-
zxYn35ze}#g($E#Fp!NC&7SA=^Clt5R%urp$0^F3nGmOUiQs0wR^{4f40lOLKCe|kd
zo-1_J%eI4CRxji&n}2w6y7w%O(7b4K61Zff2C<w$;N{U9=_|WF{}+4j9oE#gt&Inf
zUX>;_C?F^xB2A=3P@0GcC`gS82#A272nZzfDqTQ8h=K@8iS$nBP3cX7bW}Q_2T0;K
z-S?dP?5*y5_PyUZ_xF7FcmLo?)=aWkYp%8C7~>u9ct<lrFvvr~RhMJoz~0LA&6f?$
zhgA;JUSk@o=W@8b(JmOPg+8B2VgHHw?@2Ue;3fF^da1#KCJNmm(UytttB1uC)k*>N
z=C1f`8`%~!CpU@QJc_`AnX2+SLyWE_d;O{DcFj;8+@!p%Ui^)GnpkAh`CjxCoC3tQ
zt4_*i2_AU4eSY`H`li<~QF;U04?c@G9<hv`c^Ne3XeAI=;=4zhoX2`6#AS`zncNd~
zva>mSfnKZOm2>try(I|5$T#Xur~cTJFYliMHZJFHX8xx{8qlwR1MvzI-Ch21Do|Pe
z6gkh<d|Vn)jyEEyc|%FnfYh{UGPHH`hwanfc|80-b^Lxb*hIJtGP?ga()?^4_{9bj
zWRw>h+Fig90<?AIWY4Tn_-Kb<&wQk~qQ3Oxv^)>9NB-NeLfuBx$f9`-zQhSAXLl#p
z-btLA2yxBRH3=8o)wt*++iVtSWj%KPcAElq!^)>C3Q(*@s~ohby9tgJRx!5r8=Xc4
ziOGncQ8s~<Twcr^dwP4xP*o_wpaJ*dKD*MyYXlRX92Z^(V=h?@t38C<vm>2YlS2_-
zJ{B&I?%YmF@W8kUMjUpEzrY((9cI&n9@&`OX(oDJT$5`;GHT@ARpym1i20f(3GFJp
z)pz2Mka-8=2wSk;lf(2MnP}GB>ckEEw^=Ii98+yNT}5_NZ9bvecRcZz#y*5rvr`&3
z9@M9lqv(iK&sx%6v#+<1C8j6}F$85fuzd}bE?;t0xDzX#Fz^u_|K-w)h&RXhqva?4
zW8ElQKW?ojf?Vfq-I0`Oc<3xxKNNYP+o|;o_lFWW*tPC!cnFlmusD;1AyPK6S57Zc
zbxqK2aZ5yi?s~8zUb;A^MAxY(a-jaDm91LW{J`UAYe(ZT6;d%J=r2!oR^S|nXfU6F
zqr#0@BFhAvIoVXWD**<0g6Kd5doj+ak$7C3TfOj;rQfm64axUXRS9k`kBbT{KMh&L
z;i9Hi0@p;%@R&?HWsUG^4JWod;wnnPqo<)DT_U#ch!Gtvefk0C_SxBYP>@!-0GsWu
zMt+0bM-$JP7`4d4ir`JCKqOVQ(7rKVw~1q17@wHYnss+LA$$FZ<0gBU5UDc)($$t%
zH`Y2v&!$2H&qFek_}pRBW&T@C5IkUh5A?X|iZ08J-+SkgcJYlgvO2I&CG?Q#=UdNj
z&K?$Z)9iVeN~cnYYe{lE*0;HDyEN$=8ZCB+KY}??>;?tII#jqqTAC`t4#lb{wU)T;
zjsXz2R;;!XT(vT0nvr)oOZJ0x*;BOEvD+ZX)Huks#)Ll(eB@-CAlTxf0mxug*w4er
z2#k0*?tSCqR(}FR2Cu}$;aAC~RWg+ov4=PE+v)Aaa*xtiA7ZbsoAEWpFp#+1g{Pna
zpiv%fLx<GfvovlLHqn*rohOyVvzj{lAVF}wmACu_Qy_pPDlB1K-ynAQ3;xIOU5`Z5
z&f8bvX!wGupcl*ldDnGXNp4EG6$S|I<cc0A`dlM4EGmYiD_kif*#{grXB%6(E@*Z#
z8=p6CblhkeLYEbze4w<&65(<a+{NBJSobP@-^q%K$myGLG_tFAf{_U~mMtD_Q=z53
z%!nui6X799;o@ITmCJ;RHHz1?%=1?S$O#Je6l#?zE(yL-QUO@|DPoP&GUAlG|5Vgc
z|Kxm7#<aL@)9D4b7;Ne8JqK~i1darq+?NnzD=&6~{i3$%DhysEGW^Xb0oqe5W_u`M
zP?^g;_{vbm%_|pGbw!>y9^tdU*z8w8_84*_>SZkxq7zPmI4Fk3^*8F-?UaeUk(osc
zb$&`QqS`5vDx_R!=Nk}CEm=@Ljf)BfG^YsN`m|F;TtN}J=ZAI-m%98kQQ2dhMh0`&
zeW{{VvTlNIFrt}um|{0TaJ1D8>A$jpcWXYRF<B$?In-an#>v*QdcBQ**F{^zOVD3D
z_Wq@r*{Fat{y~!X-H9UP($<?Z+?XKj5ZV>3K4&Iidw8WtU-Jkbe5Ig#_MKRH9jpLK
zU(1FkpZpq_frB?X9F;cblDB%UTBt2~ro&~W@A9eui_e)4pnWlf%xoeMW7F*q^itw;
z2^Kg^u)w=D$68^IvK+-Fq5aF@Voj>+(jr}RoJohY<iCEomjeM=?=tv7FO6+j0FonL
zIwxW6jFwPoQnqH;#xAdgSaioCD*kCu*P}E>n1Yw}RI1yyFn$!=mPqnyxTnVy9J)wn
z?=eL;uQT09A9pcIyfE72v{wspvN1^QQ(yjP=qos@GOd>=zW24-E5a4rSZNhSg*b~=
zt$ey5vscBmRXkbu(HrFTN)#SD4HF~W+X%cuKC(e?5>4Au*#{}G7}pVB!kx^o<CmkK
z7AO;~I8b0voLX&Q%k*k4J?tjiFB-fex`t;smJ&gvco|3uEb#((C9{+*&^zWn%+5AF
z><cFf)CIR4-yxN2ftuaqK3p&%ELo1|0fNHx<~s|Zohl8e^UXhuoI74zeH5@lUPk~{
z$bIBo!Nt;(!fI-)hSn4wPQI8~(_m8NC*<5$v_4uKCr1beg}B3}QAD0C=Z{Iet^gR1
z1~~VxTcLjF#*4xUNh=xPaZClV=Y_jODfG{SM|y$C#W!$`1ff$HVz>#66w_=`VWIJh
zPOw+$0WSRq5$V5&zQ1}!h#^|x>#+bFzd)*(K+ZRe1EHcd|6eqXFn_3gNcVf3u7BWH
zG5J3l`|n5%0OpS>D70ROSvl=<I$$;zvxb+vzq>I%jP`Vd0x2%mtD3)8o-~94h-TJw
z5V_cgBtnKh-@>CvYBffr8sTq{9tLTu>o1UPbUL8Xq%Crj%w#a`kDDpdwCej=^V*sd
z4~ZwpzVK0i_VL=_+xa%2MqFmdNO%&y%UV3K_XW8rU~YYP=X+=DU-uRT$qpV^6D+8f
z3|}x69z{ve>~T=MkYVn!z2h8wwT_vWX*^<B*#0GbO$fRP3YtD?JB3bASF3!3(AjKD
ztO<R^-s60&Bc5N|{pS3+_>xKrcB@uO`AY>BQb*6CC+?m&o0Y-!2u2OFS;e31s+fwf
zB(ViAhrjq_$Cc?%Omn!+t>NcxW#){$ReYw*tooz^lW>mFvRnXo<Y7>EM1Qjd&ET?Y
zC*Q8y1vi^Boph0tXAYk^$Lv7`Fy7<fF|(TRgBA}(Z8L>d9<I$ie6M71S+@R-DOv0R
z-QpFMxcv(9*0|J)dT8kQX<{PAfe9~M7IKPojMTmNxiyTy`l{1)`^J;Ew=FWR_G{f#
z&DTg}e-xGCrQtg{<8t=>cxDEHaqsL<aSV~87oLDAq9U*fPf@iTwVy(U^BFa+vTFq}
zDWcX+R3i6mT)sgTZfvaU_?)E^C}>8!637kXrMA9F%|FZR8MYgS_vrU3GE_QpP2cE3
zc6wOTyI($|)r-@}=I=e5D7_>lo*;zre#4E}YsLq(6B{q#yA$c{lZ2upRn>Dpwzp6q
z1ifwp$Nnk7f%u|Y6LE;tVsQjhESH(Vu}Ga_n0k&Qf^u$Ifb)yGSlz8Mbr;bhI^Q?O
z`{7r<T(?)}M!f|k;LG7Sg>9?;X8=$)<~X<b!qc0*@!sB(`r5@K>jyPGc0XGi_^}q<
zgtkN=XWW*n0EWpwe5#ra=dYF6fMj~VE2oMz3ThW{%(yymDd+O&mCKSM^z!RBA_g~6
zOH-gUEg{TYO<4dj%+-`8Fm&qEC$03+Pi=F37$G$J!I#Us!Y)nUv9B8onrJMH(cQkW
zmZ?`<QD#c%%&2<bJ!TCMHPEUY)Pw~JoM+SMF&eyC81}H_anbv{@TbM5&{SyQDuIP*
zGOO;T{WOxV723pfq&b*nya`sc3@4>7bGsCsm=~-{;Xf17&9-N)#<s|l_@}&v?|nsn
z@dpFRzR4e}wb0GqAb0@{LdXOgY)QchtllOVThYR(hS0Prut*2|RnU^+|3*4G9Gm=j
z&tN`a<2=teP*xK<FjGd*763|6o#yj<Mig1jYm_sj`cM!ei6>Wmc@ty%7T;_ThgQ7a
zIv<uLY@mN8?lAYUh~;I?$*+;c-6>cf*tgz)1+GX-cmeLwLNDk5K@AtIieH_A^Z>8a
zVcje#9a>{58hpM>A&!2L)jxl5+?^9WxYx8M7cA{`#QniF94aI)Vdv6N-kriK-(BOI
z<rjEDypwK(QAVX<Y*xdQ;jOy<Y3%rn*KAZKMd}ryy3D{pRXwXBwqN(@*7F~)iNzej
z+-5);6G0;}U0y^JiaFnOiKEQPdulW^axiI|O7+zgvM<lCl*3o-9?CtLn2LVYF!a^$
zPCK}S=H#@Z;dtN4Y~`cw7K;sKCbgOA7am<L>56+e<aSel@?>GQpr|FiWLuje7k+j7
za1Ur>3~kY+H`bA9u8o>Wz0!5U5&9wRV%aEHag=XGn^}=%T-?>qc7mb{-fL=z8-zQ(
z-$8UX(lDP05_@W)gO~dv?n@Fyv71~UwrH$V^nC?IbwNk7<!#z2pM#PuhP6!q0DzCI
zPQiU-P%H#S(>u5i)7qmI#rq>AGBJ6ULVk$}mNnOIrbj=h=X?{PT}2}`_g1t`nFmaK
z>oOhgr6PQaW{W{WXYHVz;rpYb4#;~Q-Q<OZ(qXYjxnts=k^3*hPa`_h$ll={_Vv7M
za4o5C?s8R%Cj=^(-%X@kSvMAr)Z%HEgjBRora9{jV8=8btS(r&Xh_>ji}CVM8X82a
z%ra|SwV&z^PdM#mOMF9+zzM^X?YsnD*s|aguf5Z-ovKaB+7~<2bv-_b`Q7b|lWuJM
ze$J*-;<2F5_M4J!pJYfH(?NWkYB;>lL8#nrx~L*Z_n7<ShRc+hsH6_)Wb)iWQNj)#
zT@h8aNUpl3?mKNWFw=tuO#p|1l9ng=ZPDy_6V1Z&2eVzfKKZyvMW6pdI^|wF0~Vu(
zZVvqP2T^lS39!6GJNeY^MPQoHO$81J*RE*mI8$650P-nI(#@-57Y@%+?vac!L+`zj
z{FRj+G-7U?R*qkJsUWm2sX-$<&1_#LD3P;Kcn(sXi47^X6@G%$Jr2PoHZy_V2RMny
zTv+j@lJ4{)%yu|Z_J-T;8SDp?qFChVxvU`07A8*CPup~;$Gz~_aTy};glxtHw)621
zaKpMdbo^9}r`EuVlLKPx@%X)=xi7LCTn~;d^WV=UTVe2)UpqJ405ACNndPbBq%@;6
zYldW~Z}m?m*sTmKJob&8=mhdI9ygwbDNV`iUuCF%-tczI;hreUnp5Dgn2^sS3cs(h
z9FS-wN_9j!3eGVHPPw~&WV3yRbSxZ{Ub|Q6O$dFda`t|}Hk^7&`^dp7p<FqV*s3cU
z4CT&nBX@gECtAFvZm*N8cIvhHgm#{?;dQF==b##>NEssbIL_zf^u#r3lfpH|(Cmf1
z1pix}tb37Bv&6!K_u8qj9A2ia4hv_L)b19?WE`uaIK%svrry%;zS0#km<pW2-DcVt
ze{27g`_6WPr^oTywL347`J=fVqfw)dhg&Zj8to>?MBEg*aGF-jz)w`W4qDLaOSt~x
z2EKdS&lAIo|7uto>G{4YWj`QnT#It`RIfPp5Z4Rr3tv>CXfe;VlM!>#`QDvsUTb;o
z2l_#HH!%&&**rFtlxK4*-5zQar}z9*H;=@@Je~vUU*88(N080ErEr&6BG2v2z#l*c
zBnP|~Ox8(D%>P)HCMaQv#^&|EMRR+lpykaVr!~8@#<!Mbv?BLD63NmrClyajYt;%M
z#z?&Q6o!5?+Z~;01{N*i#~W#_>P6@KFODb-X}?)HjU3sj1AhNK)QJ70dN3|{8rn>h
zXjIk>f914HS+nmjbLY#-GJW@KaSJr%gHz(2-ZV&E_I6ii=DUihz7EmZ314X8a9vbz
z58h_lC_qtio9Qs`IpKYyQ756@{JgrSyDlHr2=!68wKPe%v0;Hjp1NPy8Z-cP$aR$m
zpE7+VCRgL&eRu69hRl&?dht)_9Ox^ei)6oiT5hY`Mw1cKovM|*0j6B1%!u#wJdVP(
z?CaFHH|omd1%4@o->pZUEOy?#P5-R-dC;Ac?@KC@t`(M>?MMDu+uo!p^kG{I$sXx%
z5LW!{Z6%FMgGWJ+sgTf7LB}&pXZg4JeN@``nbo{f_Z8=`NWa=M1TDZD{CYKZb!sKf
zAybDTr(5X5xjR`dLUwt1R*#&bTiCT&9^AX5x4zCeu{3O(MzscShWQyX=47}l+z49L
zE+g|TEgU|&ZyWfa^=WJp+v#&b?ZU~d<9-!eAm?Pl3Y3}_u#Wz(#uqD_8?ZO*dbA!m
zOFb97+K@RmI&$r>y*fr5$fA6MaLG7`)}Iqz8pdr|Y_z~xZNkR#CciknlTJVF^LR`r
zB=I?$F<EZT^{e4=<RQS^x!ERXEpzPa5cG(}1YPoQYDb~IVxeg0SkiSPT4arrRYA+g
z$=#-DjFV_R(86)@I|p9^C6En(1tdKk+xcpgvNn~HqM6Y8p&~KuexD>wnkt3G;40gO
zc<IUq?(eR=-}m?j!vmWvMy;z<Gl7)82!L-y!-!GnRwlf-Fd^Crlzx5efNh3N$8o{T
zeyTF~RowybNSTdT)}%;UtpL_vV(|v94PVvWX~gd<2YRT!a1ah<k#szpFxzM0$a#z|
zdZ?1{6r47=uz%DVH!rSby<7&4H}_8?(jUEGbc@7@1W7+>AH+}}&+UAJXx|gvegq7;
z6=wI6%`zQm$htT|6n~d1x)koaudT*vbov7kPEwJ>Y&GZ-4-O>;z-F)YVivoYi0q<r
zAWt2wvr4=l2ikYbWKJ>dvPnDbZTNqKsD%j?=$$($l3pYt`reBH1lylrTSGJ7AR0=K
z<Npw3=R`KOEdq!A&vnrecaZfaAeQTPh3?bOqxQIs=2L$fyV?>}0@ivU<j)UM@?dsN
zfcf8yJh|1e4g<xEBuy9)EoCo85(UC)Tzq~*uz#GIKiU4T^<4a2iuzY^`me^=@SuqL
zCxt@b`mP~KwBT+NZrwkD{OG0wGUUy9)}~XZxKSlwKQ7+^g1>bX{Kv1NYmFpY4zRPB
zeF5ah_YU76S?0<Z%f(*}2VK+sxCd0;RDTZ!yd&s>t2qv!77K9I=P1kmNR_jDaW3x<
z%4xZ&f7~2-p)WTEyJYe`yw+qojgugR0hzT2u)Rb1g%z*^0Kv%wE5b0qPF`{?iW*fs
z13|sdKRQFg95EX+nT#RF*c!M^J273XJ-GI2<UzI_4n2}@r5NXCOPdiFm&E)k;Cazm
zZu<?YOZcdL7i7J(t0$74pxU3(jA9`4@*FT0tmx;ZBD;r{T`hK@G~{Gu%<53G-@Be$
z<8-0dOZ(NBt@5#V{@3~?qMM!VT@QC2EJ%C1R(`T>?~t+KOWN>+yL08&oZU3qCW@Sv
z(KxB?=fz>e^;q<_O=y7U*Pg8d1s1P+H;x2d>hj<3jXhuQs_G0k8=f&*76-vxe9x9?
zdo4fW{kfO8*EQtCm(m`eU5o6ltx)#O-qE&c>&S7uJV|~y3^x(*y;=*3g>J*wF2bpR
zeZ$b-<$BJZVz6BFLdW8Wis+Y6nsuk?w(0XzDREyg2Wo9NV>*<##J!>}Man1%BF}Fc
zB@l%0gE$P}DX}z9&?%}opzXfe#&z7&aa=khkjCB=1B1s8unbr|u*kGHnBHHc5La8T
z#(M2<-F0dYfSQcSbQC$#Mz_TMan}rq1;*JJPwMD@1wTb2SyxrjD5!|LexdDb$IUM#
z8S1W3F}9I2$zfH>s{>m)QfmDxGTwJqxaqj1?QF`+V{QXQZ|kJ7w(D2V#oiZP+usHl
zpI?EyM#Dz;ByuFb-phKL3dw&rP9XK1{E_7Y<1$T&x@oKt1#y1#{{HKRsZ&g&86;Y#
zw~aaO313Nv0Q=b1J*iLIoc93Fo$-m|rt=zSW5Q3C8hv<j(`ZQpsD#sa(Gs8KO%4zg
zvXGAW5`)8w$zi#PM?-dNyPjuOrl~VKw$d-dwf)<sYZfYNTT@NBwnKM~ab+Z$jeQ-`
zu!V5&^v-hNwD|><{+0Xs*soA*--YM9_eYtJz6)7DZ}o{uS+}zibDzZEbr2A{$r~-d
zTOLJur<bFr;mX%2R`Qb`D>bbq2|8YpljQ(>EVd<9qaO7Wq5|D6P5_cS8Sty+;UA45
zM1kA}7i?A?{|11Mqe$Z6!F?6DFO)%<za89OABb<fK*`K>NnjsA9PjVEO_aFi#~y#{
zqMNN1?Tf_qAp?d6i7wilz?0{l227Eo=xvn>-%|qkob<PC<d6+%2mgh9pv}wAwU8n7
z{+8BVZ)-KLEU?tJ?;k_<8unmt!oX|RAjk<X0o+Z^6quJf6sg4iLukR4HQPGs#}A;m
zPpCp+BAWx<H^>3x-oOL?|NY+g3HxuEm!E)<eezZ$P7gl9Qb*#g95!ejO5;ysJub>C
zqBQtng^K!xh+6l~D{H+^!|f=XE|kJc1>X%6lbM!B);KOZ9P4im6^aZd|GE<+bus8-
z8%2iS5dmMTxg|LH=NZ~Q(_`sTBgufb)$;p0_+P-=>id`AD*&~EfhTJ+3s&^HYDSmR
z4p!{e&?|(As^ws-+HeToyIA>9;_KmBwC!$^`~GW{c77ua2|RGc>Y@AE1m3=C@7tEa
zMj>?O7M)jgN;Ypg9a%W65-I>2yLrx!YNSs+Hbd=Xu*ShNG0`du)SKu!RA?H{FwnF@
zmC(ajy@YJn2~7C7`~0;<K8~is?T-SD&N7jSi<crcO{cr*2Z#NeJmu|+P926h+2+C-
z-J?|35dK;Cj7GWMyCzq14wzv-HH>B?a8iD@Rt|t>&w1p%-DB;%G;CD*NHP>{6FEkM
z<x;1nS$q;JKV8d1j4Ux1_tIMHW3LWSYHd=oPB%FAxv}j)Jm)Om$3?iImnPnLk}15B
zhmRW{k<prGS2`@}#&bq7g#DpL1YGhAPhd=tRODl$M7;g>MdI^nD0-fB0yp88ms_H9
z%Bs9H)uzG{Xqo3g*5d`CSM5)ncyK(Rcg&8fwchD4KCbCpOVp`QZ}i;dZ4;m{Lb)=!
zw9wZ3NmCtHkaI4z)pmDyzoFKM7>$;`wPrapk5hCWCBVW>itX7W()anRZ@l8@B)_PL
z5f96tmt~Wsw~B+S6I6)FwFe0X1DH}Tx5-g)jjKZ1X4#$|vm!2v?fkZ5two0=TQ6<G
znDA8f7Oc2>96U%`TSxSQ?YL}lY3S|H1U2kuKi`Vy-e2VJENDr)$O%rU+&~5rEGJi4
z@rK>awXB2UPF^sVtfif+mEkVgUyhMYGUR7c&m6SO^Lm5pHwHR*?`oyYw7d_f$pUTZ
zNb9TUQYtFDE=DFZqn;iD2j1KXH$q|_#XqL5hMs)tt)!iMk4snjGteo(e7LgowZ`K*
zX31dx6xK}5)oD`nS^~S#h3)W+AcN@pcUn#K^&jzN9m*?d^@BMQnQqjUhlxwHj<clb
zgmitD^0;-saMzh**EsBH!_oF}Ewjl2ORqEqV%18%bW(a{KZy^oyX1B`R?5R9kXY~i
z40>u+G<(@UuKX4Pg%8DH0H)KNdPq@zjpL%fS^sprjpi46dYAn3Ru4}u+}BjlY!l?8
zVFfi)-yrnW<T$jN6O)%>&vLOSjo^lbf-8Uf@dK-mZX1t6=nI4oMBQ2aT%PNJTz1MB
zvqu#fHJOTPh^)=Cj2-5-8?g%y=G;8NdC-l=?Jbg1`aK$ic!j|s4#m<D#|NvWwo}{(
zca9z#v$-+&anqsddT1+u9Q?vUrFl?A=D2NT?y>3jSA(=YrtkR*>c7!2c*4Ub=xm_@
ztS;XvoN!|~0_t9ROe4_IrvKq+#<4c1huPHoth1sO>gyp9F{R5^3PyMO?&?)-ajR3+
zA;Xjp48UxVwC>f5rFwQ)<=|U<8KP$V+9zI=T}+UF7E=&Y4?WE}8A*+X5kqEEjPV{b
zOd-%a3FG~(2+5(XQC%5Tj<SlFLq|?&={;+!7o6qOtk0jgP6U*UJE=}hQur)=978~I
zV824gwN4(fb;}1w4h``<w^2AZyCsOqa9CbmTAnU04!!9|aV=8%YIaBl?b{3=$Rt_c
zQ%ljD&BK@RS<^5AN}~a~Pi7C_J9TLe>42ukqs;Mwu4t-}_UyZeLxcBZ2Fk+Sl8%jN
zx-HYv2we|<esfCeU^Lm@Nnn%tm<9-Tue~l}`VE~9+4g8V*gP`dxj?^r_++*NO7@DD
zzV~85#>l+jUSO02&Zz|%#HWC}2xZ=6I%J&6c-=s-|8g`jwXHq!{*vMwj^{a+16DVO
z<3NFN_-psFN&4Wa(k$K$f>tt=VV=V|qgdzE8_D9BjrI6zPcDt_JG6B}mh7R0(4}Ft
z{W>0HfMY}n5t^~8Y4`LLG+jRQv@9OF?eWP#IjE%Cd=ka%9z4U;m{r`LFj_xO=l2bw
z7kl&eHT#{=dtj0x%r4cM>0h2*S`TUX`Us+%Jap?#E21Ap`DM*$sVL7&1ji7rC7X21
zD%t!kbC;1$>~YP+2O>flPhPaIM$^>2HQ@+XW+DwuP;Bm}>NbfxCcJlY%NEMp`)FZo
z))L6wwf^e3%<WE}D}B_fa5JNBkll*M*Z|>&z-psuH2FvKg~CfV2@f~;r7lf)yuM-1
zuKyTyT2@y$*~P!6N7}1oAEsjqH~`C3mai8279T68<l_tbbt)6)#+?d+8jd>ZyL{li
zbjzRI|9ND|*W?uUqN%8Qlpl(Z1jWAhe%n0-!(ViCSdQin=87mt(T(S(3XwRZP5FuX
zR*6bGy5C%2s@qplX^N(r{DrMyvQu#W(O4R>ymaQKwGSzRpI8nSp~r8#@xA(*{1G=C
zw5dxq$At{6mb)ufTyu)Gtkl=|nFgyR?TZSr^L(!cJUQ&m%(5C1P)y6pY@{A&5xQ|-
z9y|Zgfv!nXOCi?8E}6n3Q@(w7_RhUWU82J<ht*vfBou^bc^FcBBmDebJ}O*(&UB}6
z>$#w`BKt~KgGTFz#ycv!J#?q9Q`wC}4o2%{>dY`v;hkE?xodI3;Y&vPy<0*w&W@ks
zt*+mkJRErA)>A&}`RhxYvF6CO999x906Ji}Zld23W>eMBs0kH-Vyu;ze^e>a6v3|K
z*wNuse1SDq`-s+~r(^qBs#k^t;>N2+MHS4?bulP+NHWwE+p}ucurWJa=c}W?o;)0H
z@<9yo&`X&3gv2m`Pn?#vIE#PG-Y^Vz-_%tcQ_a$4U%T0KF}y~x^BY7`SXgmbd%CS!
zV$E_pwFXbMcKw38Mq@HX9w{cHZJCEFVxYAnc7E2-$i^4m5q7LKwT~&uMR^@90m$8^
zQ#Q?QH={sLiDlPNaoB*_Y1{mloLvItA+Oq`y4dQvn68Qns|neLKizekqzcSM&>?!p
zMe)42Y0_7H+|fRPq0gnU7oXYgz3SgpzMy$co<-F+UOR#N%4Zm&hH2_*+*RTmWx0iS
zr%g@IddX!m`-j?SYYE@E%}A#%M6+;?S#rZO(I3Hs=bmy5b<QNn;l|RU=vLxi7DwEY
z>NsQcA&%3si(FnogWNZ4V|Apv-a-^t=DV`h9pW59u$&QBoiQEkkzG4DtY9d5f8<sv
z_2IL1RJJFb7p~p1=hxg>5J@i+nXVN8?He!+XmWe_Aw?V4Z6ywsQu~xp$a@D1wnOfp
zR+^4UtVxpBAWRT-Fve=V{mWjD)?mwHYj=hn(m!e2O{Ewq%tZ8hrLP5%=x!8nou10H
zLGUu%$M9C)ng%__+?F|B19m%g{%8NGvVawx?hu&}W!pQpS4NIgtrTWSngZze2u6fl
zV#UqRi*pJC`Y!e<(oN9`_HUZgVimM+Fy9o^J4LDcNXa-#tSPQtX3DZaas9KYfP5P`
z{;<jKnfiarMSd4p_v=HpQ{rI6Cm8_Q&j-5nzrg%JC80A8O$ghA5j{o$<WZ8hQ2)dB
z%?1FGs2J~+CfVoN8CA;Qj$=uj;-4XtPvbAiGUYSa>Ya!BLEoWTQU$h8fEFc~5ti-H
zayVVi!YcChLK(8MFjgbt=%;nLL(R_gdRnYcRO!zY7}lXj@FYpX(RlV7)y^-^oo&1I
z1{<xiLLFN^5d|&$TF->lzT)?Ox;0o}AphRe%)HGjLp!Alt%4egR<fTq3d)Xqj3(n_
zfbSZKEKHHS^%kG0B439osoUl(I$jcJ81MV4yO#+MILS1~xko^f8)psRpjH5M24ZlD
zq+kaCdr}%s5L_?{n5iy!VHS~saQz#^cN;|r)*#5g-@XQDHkdFB?tAO;=!Kf9C7x{J
zp`U&o_0xY4-KZk*eBM8a?14AK@lZQ}`8-56KF3(=?$P0M06lFPw#yjhxKDKwb`d${
z^j)3i<>&<v-Nu)FgYaaK*g>`lzQ<b(BXF7`*E$F*zzdN_bf7jl){ID0SmZh{6!%qP
z#DL^S`fl<7@9#p!Xl;E*R4??WWBD(h?cDb2JC}@2jUSH%_S4~_yH@cJ5|`cpgcQk!
zob(1U^HBgzGP)4sVRI7GXaZ9)z&6h*{qO9pb^K4By&}YM#1O2gwZVyg%srzyO`$JY
z<WRDLy+XO;>Zv32Aya<BHjgY@-1VLpiL^seFi<b{U2?&#IVv1gY7@G-k@*q7@WoU7
zq%7&|Y&lDJnUX&A)Di#9JB;VvpsjFjgv0P{HC%oHBN}Ow>z;J;=Ay$}xWA*Z2X_Ki
zgpPj8@OIYAgH}fKd~vVlO&F8x)8`)D2MqG3_N?qfYq?h&x#&*{+8%i8TL*!N$`804
z%(Vsdc8Co~pZzIq&bSgo@N+^g2B7w6@+7E{ZErG=!_T36VtV3Xp}pYeNh%hmlTN$r
zAe6`ll3wzQ135n=1I!*B*DzOOH4ywWgjaM!ibOm74RYKw8zd}j0KvQmi@Ch`i_suy
zZ}cztb4D?wZg37NByRBg60pqA^ITtiN)D`mFU9W*(f@<@(12Q{HIRIHha#|A8skf5
z!nHsthnHDt4D@QGZ1nm5BUY*JUL?4mx=j)7HGVn8M;Jn?o}1vSrNW2n_}}PN=C|cY
za68E>sB`6`AJbWZ4{ZYF{LJqBTyvoJusGgxExaL-j~buy($qcpV&&1~@z1&)i#rx_
z`h0SnXCGSMit}$rcXeuzsCV~|kB@6TaB!Q}QI?ZuR>@ZFeX(gEQydkQ!(yVE5$QA7
z-J|=@TH+Rn|GOmp&t8)L*Bt(@Gx675vGXOmkthmjZJH7t)7CwT%*@KfSS=Zat8b}5
z5<b`fi{UeExlbC$fYj7MdpLc1?ANK)dd#tHbDa#^K4WnkIXZrNvICxhk!PPyg$Zaq
zIY8Ar1jlKhN0f&a6hc?4bv(Fuo@Ea3Q_u7~Th?3KylKBFFr{1H9Rw#Qh;sOm*zkza
z#=UDc4+J5_aavD2;x2uD6~ET<_0HLuvyf|7G7@~^qb>tJ^XY$g@c-`U|3|OC1p%OV
zqJ0+8HnDwX6gfYv3%V-(YWQV53+P)V^5PG<_FNbXa9_*bf_V59&=(}j>-b~qDO<*z
z^T0c1PXYN-;9}1mD9I82aX{9VHDX7%FCnIOeovqLgCa<Y97_N{FzY4-*yrZ!)?wC9
z*kQIlfS;>Zb_}lE0YEzcUpmY6iEvO6!A^LHYs6oeMh821sbulCz~r4}s!Gq`E|<r0
zOnS9-Xim;LTV$$1kV;4n%;a-evG7v+w4Jg9q6Kk$Kt@xX4)*b-GarzO{bczO61I0r
zUgu?s?`PQY8qaOV@0mJVY{a^kGV#r3(Uj7oaor+W^uaUYZ{n{28Hy%O@#lwvtUo1n
z849uOJHU@2`Y;6&b!aT2!!(N1PT=y|=_o6%l{9wlS$?l{gNQO}$%xpXx;E8-m3%S7
z1F6=l0*`NmUa*`OXEE>f(s^ZKPvXIp4NAYqH<j)f7UuToa6hyHy?_iB814>l1m=64
zBm$+l$>hg_tzpW%3k=gBG~l}x`zT2Ml=ScuzOqu&aI8G9bwoiAP1GggVIm5AIU89t
z(HwDD4IhkzHlmnERQtLevfER%0_+p)?V}+sp6c~h!k^U;S)U7%)GnE{UVHqoKrdkr
zn(4X$!X-4;bFy`5>?QJ^m)>wjaYa1m^23jkvM=2tdq({jrCN4GkDUE9jX9yLh^#kI
zv|r;kgVG``bs5LTCKgUPOb@;)v0JsVY`K?PT3Y`}>$EVhrK*&XG8u?=PI_6dQJH_I
z$lm(~VK%pbFPthnJck=A-ifPFl0f@9WRP?7-@dOdq$)eC&LsF0<SI{%Y34oA)=blw
z(FN)_jl^n#wkxi2+otCfL1M~=IQAZ^sqZATE(c$@aov~m!G3l}0;kz~xC7$-1nv?%
z5K3FyK0Of;#nHsXc3hA>KWm>4d;USwDRQ%zvDe&8g!O}WFjo|rCM75c(zwAae*#%L
zc8*Ity2-HASxnFGaRK?Ng6o}oMSNPq$&2>$bGKgVxYXa+IC<U0&B|Gg^^hR*u~weD
zY}e1zzNLgf?n8JfwA9#s%kV>fPu>4dyeDgN2Vx~<K8Ta<kKwdl8aQHN$YzqJrIRP^
zhp?H9*HcfON1g`pMtFtF5m<z``O*xNNL<-A@<(00jX6+irgc<3wEG=l5WFynHx#H1
z_R4&NbaWMf^`IEpSO9t`?;XeN8V|vCWox(7!x_KU!*ENwA0LoH(Ii9HD3VUbk3TEf
zDFGehzv~$=HW7nibIIQzpKWe{2oDRJ`iOe>mB<1o<Ss%9DR56cHNSuQ{=OQ8A}3QP
zaJnE-rHgiNtS^1cp2XeCZ4So=8<hB37}9Vh3DsVwUsBEP(&A^dNB(sDQ&607l6BO!
z84jzylq2y(?0TIbCHx({lqtK7ithy%@iZQQ0D>4mtlFjof&XSI&=ZTn{PE;82nsCQ
zJb>P0{|yqKY6<kX?N{%f`1t?by!_ScW-x`u3mYw&PbnGl+FZilvJr2h=TzGo+kVNY
zA#i%!OozprHhEtei^X<eMHSA*(zze9j66GV@|Z}&+w4gQC4`5}p!dAr?+G*DJ$22S
zEOe&wGhJ!2&Uv6paY_N%R0t#T4348R@0++mnH^zEN75$NM{;WKCb5U1pInzex6y2d
zc=p!CRYu^1<k3Qr@$rF3M)$?byEWP5-}?VmnNHdbR5+Bjnq{r1jtkQW4>2Ua+Upup
zUBTfi=qRt1evdb{<)hWT`x^XD{O1O)QcX(KJB5<ufDL#^utaTJ;)zn92vS2?o)P5c
zC%sZ0#(#qhq0KISaCYW<K#w}Rdn1upURGXKT^6>|cRh_}&_e!@zd#0kjwmCwK(vO*
zG}0D5GCK8jSqk@Mb!xSV&g{*HQn_BT?G(Bfr;Aiu*R5fq`PcTM+Mxa@9-u^0uS`02
z0rV9dNH?7b-Y6i|uN@F=XJuxuv+Wieni2RAp2RQgbJfRNj9n`6Q(8+w%)YCd&Adrz
zBHAvYawyb|frRqp>ZE95ENm8YJd+>!D(3``AET&hP5O5@@4$cd4<qo~N{#hw0V<o;
z97{I)Vvd7e=4b`uMWgM!b?ilrRfhz<1_!g0qz55AtozNSu<aMbB7)@_T?-P|0V=SI
z_3cTLgXVb>B`LC$qAGpY$`V)n{FNQrj~?U1YOka0Kxd4C!~l#n6SbCL;qkJWPG{Cd
zu8M;zf-Cz<6;4Sv4QJzmVuE&B*bLfRW)Mx(Pb0gxHHe9d!uux(Crfe4VegT77V#BB
zuHq>c>#Ij*%7geXH3h$Dhe=7o^!3y{j;nlc(eU@bpR2<Wt-~|H1ok+RrkpR}*jYpO
z)SR^<p!v$cNX5W4KsA*DlE_|CbK}Q>%SXVX{!<5^KMVSQHQqf47Eu;Y;1z-L;~pJ5
z_X{L79d1(fB&f-_UF3?~JqT39hiPD&EG8Dr;Ajlg!O@(BZQe*XWn)PPe&HFv>ax0t
zb@9e;5OYV^=7ZrkMhSRsl3F=(F|U;r`M7-Pa%3e?JpDrz+3!t}(H9#68=7=^M-M;&
zT}c3xEI}j<|2#-<yAR(6b<43#xCv5o9!QmFPNU7x0^gmDS$_g}Tng;LRP~+B85W@g
zuH1pY<jMgjR#}sn3%eH;1TBOIEjtlR1P2K!Z-dS~in!ZR#u6}<8Iy8M<QmS*PiyLW
z$XWdH(d||6Wcs=%9~>eG?8|gWhZod->bA*lTjrc|`@&oo`GlE#z)-FM!)S*pW+Je`
zryXn7*x3e_Yq>_-iShQQC+J;1wo|-fD8{LB#NMR-1~I9x3?tU&+4WVH%UQ04lejfN
z|4q<$@FNUIe)St<2G~qn;J9dE=0>SVkh@UABaDr0o|50nslWW~jE|?LJ|HB2+EEnV
z0g68%N94W<5M71~K!{^_X_?iN;*)$-i^NZ#0f3bu1CS|?e}iP)gW}1uk!x6DYI@Yo
zs$fYxC@=cyL)cU?a`M&0rh`9_hLT|-DZq=0^_Dp9e}Nsr{P>XuK{krOnnH?rhn%E*
z30y$??MGqYBzanAIDY&HOh*tn7{54VZz&T2G62IbBf5Gu-kCQ(Bi#P^Gbj!V!|6Ni
z$pY)@!xj7u!iyfkifq%BKY%`g|MXeLPZywMUqOwp-i`79AJaWO`wdbCBe2R{2rY49
zCUu^vO}!`AJ$qx}<h0~_=K4%5WHvQ`tbMw6@_QYK11bn!l91wI|6;UrYL(sYP^b(8
z`?CLgF2~U?jUu$iLnLWU?-mx2#k*wIHlmw!v7RdZBua2QYa^%#8aR5(%N6(t4}0sJ
z3)D0`tF8~lgyZAy)5M+_D1BV%1e5Hv(T|UBN$vi0P9(kXm&L*9e^xXM808{xLYl@>
z1mKyAmyuE*?)@9j{KrvFJ(%TOdKk?g7K}f}o1cCRNMHLb6f^CTf%tKmiR2?{Quy%q
zTSa$#e=Bjd<6c2!@+&s}G1QzT{4o^Gv-kp-)B69Jvm?t7JtplUVtrHPpcV6De|QRv
z7de@LDspSlWzGtFcw@zjJX`ztesk|dEqfQ5_bg7wik&eT>a13=9j(bk5%h=ejcl7x
zAy8Vt?w+7?&zLF;StUq;c)P*elEut!=ZvXhjCau91=bYmjbk=y;@qE)WVfAt838r-
zI!H_;(IGmMIkx2*Iu9r)gkBTx%1}N&aH?%prk=CK<Ks(9s+DZ?3){aA1^-pZ`1jv~
z#!TRhyLV+)!DN1%vHk{my{wASQD#H7OW=@1Uy%MG3ST0;2dhLE67c)sq*iVTqTF4!
zJv&?uNec;9q$LKBHJU$n)#ySExQYH0EC0RO3`>68|0b&j^5C%>;VC!R$JI9At9u$f
z9uxKJ$IQMovmyD$06^hO{%%p^UC^Kkqq_#Y&77!y*w>A>E^co)HMgjSo)GN>X9qkr
z&~vur-v<6=Ugvj!W*zkeL!rz~@WQE0G=iq+d#ZPnt=q)8y+@W`*V8FgXFQuV!)Y9}
zzHuftYcTr-)7J%0U91g>#tB`i#fmvZB46Nui#`c1*x<&%3%efle4&X_&A-w;1ouNh
z5$uQ_1RI>)>-3O;p6~rr;;0?BN3qb!w8L&kHx+YS4R3XNpU9raBG9*-GZ%&mEJn;*
zdUX!Y6{?9jo^G3~1AZa1IyJg=6+ECg9T+oSqFHw>W1Dx4zLsX~B6n)BrF`U&L+h!8
z4f&H{W0TLMg<tEPXB!D>=}3#ZX2*$oD{V9z+%SPZaViJUTF?zqqZ$oH2=X<x`~hz*
z@mHpHWK&kj6ZCU=!<_-`+^%GX7y~a+0A0}Hzg{R?lkk`EG=FfZiZg7X%PNh1<}7`{
zlPBxvBByKKUZ0U!Dbh}5W7RYLYZ3HyCv<M{d-)QEOVbM4rmhlVh2@cFdp*QHG(o6b
zbPzU#hu}6$CP?7`Bu3z$AbYs0EP`+OObB6bHs9QOhJ(bOw)k8X=)>lic00K%&$iy;
zGE)zR*L}Yp@aSdSW}whTSuA^mm6UjAf53TNHH)R?UOkmNiVz)yqWi>l0H`RRe4#8-
z0o09z=Z6tT@ZPLJ3^q+h@;>c!vrt*J_8>WU)j_LHk^PT49p@&<5Dl1O-DT@(Q{AwM
zGKr9thj~XC_hED+<zd&ZQ0s?2%hL;Hh}fY&`$!!!yoB}zBA=pD!9ijKPTam)X^*rv
zMU!l4Oh^yQVAP{W3tdgWAror_XTK^5q)R=Gq97)~tYCqd!r*`jpkFF{b=YYTSzMCy
z^uWS$V(JU}ke5{Pp;xF3#YQZ*uCp4EeXm9M!#o@P<PF0Mw(wKH^8jFAEW;#npHqVr
zW2SGZu^1JIoHYWjz+%?~;78{<88ZDaVDO*v1^-|I{swSLZT-0yoICa>MLZ%2%@^mg
z)5AWz_(AMq;ri!VVE$qpp?izYz>iAHS6~syPY*j0`+FYm@87fS0;iY)g#2+z1IQ(*
z*T^{s_-ogn2QP3b5C6g$76mJm%`Xag$O1ub&hIZqV@%Ca9~Vjb@O@3>u)-?wtm*I)
z5pnt#q8RzX4JTH>(ceA)<3;@|+rMdFvQm+Bqqg-yG_V5D+N^LB;)~yE*F~5JDCklq
zVBwDeXJ41SB;F~Y+z~MVGNNs#|I7>c)j0ZpvS$E6x2@vEz7mpmz*@89TnNgV9=(L;
z%dEqeQ}@M?jphW2J`y*wK^JJ|p|%~1DD$?_1>P8BoRhQDXAgwek#1DgqnU{ER(WzR
z<#I56K#%Q@Pv<{zn`{?w(kPyTTN{Y)5F=&bdyWI5oBdNp1mIax0lUr(4zuqe`UB?Y
zQ+kb}#mpw+VWc`Nk`$Lo@lXN#V^P(g+}!_c+-UG$YQg`u3MrONx7Nb~VLa7}UJ`4j
zjk0OS-f>rBijN=neRi{&<)!hh+psWMgVD3-4mbz0UP}3t7spV(m-GH%f^K!s*9SGl
z-j{BN^Ya93#9Wp-g;+4V;Okqd>Vd{~oXB#x+7F|9NvuWQg^o-wC{b<K<6#fQXKq2a
zyPl`1YCd=pOsOsZaZgRmGK&v$lOO|(SpWi%@mbhK@)Yfvg3;D>vX$6WrKf2=?}hi|
zVJjWTctn6X)&xGPZS75KX4;i07^%h>nf1M|2lN7*%L|0Jeve6Q6RcSKJkvVaWEnye
zs>C)IYagU!`rap3OjqARw*5=hiJ1irZ*2qRz&_Vh0gHktgPzqF1NT1f$C+L#x;UYs
zszO-2gb^-@!V7FCPA$&Mz<n`CTvw?+ZRsjYO3gmxbA6R?53=;AAlH9i2w8u%nx@;A
zq%dNSQ)`G&U-5W-?_tL{)=c=TkZY^7`E8?fFbffMo9+_0+0sJyYm$XSWWPc99tMnk
zg8;kJrjK+0VT(0K6kbJIy%h=50Jy_m|NbS^9iBF#S2~6tgiXlFLe%C_ThtCH$^8B?
z?57bV_8Orv35tGdGz;BcC9oTi0>oKJz-0uU0ZQW+Bk{1)Uo;j!v+Yt5tH8tZ6aXZ2
z7r#L;doWOL*!#<|Ox+LiF?+je2$gRTkV>kzhRvz=!d6L90D#{_{ybu5u)%1BsMUi<
zBS$efW8FRGP4G*HO(aP-a~(5Tm|O<9V1*BLINa7&`=HQWcI0PmaNf@z2yCsqEH5uB
z4qS13b>mhDyW1n$w^aICleB~gXptL%ExgstJ!ghJe(EE9mCmEN`UP|)oQ{nXy4B--
zcd!g_Yg!OzlO&Xt3GPGHSGDMI7h^lXKfW`)KJC=FqiE>zVW#ishp0kj0TLD*z-q%_
zyrT9Rl}>q8Fh6#_5NZ3#k+dk39e3*+#4CGC21lv@6C}4b(aft4rvZsa8W|YdUEz}w
z5-&a2mFuzCo_E()E46ya23%V|LK#4g>KUfs8>I1d<QF)t_VoL<g3zte#40N2xUXW)
zo$l)6wevlS@>8p{cFwkG9DU0Vz0WaiJsP?%R82!#at_qP(|&&Q>{<f$@E3&0M??d%
zRAX7kjk0Y2mX{`y(N=6)<%L}*PyUOWf$J}tpI<)P6wpQRYE&GapQuBQT$t2dnz|`Y
z;=teGHdpDl!lribsb`#Bt(<E4OjUV&0L((|ll1cEDQ;G>z?>faedP1+@Bhi#lfSZ+
ze{qZ)X7uMZ%boub#u$3<+Lf>{k=zd2^$BuhTNxA46{H$_yFrEIA4pCMGX-bPEr_Hi
z9sN6w`r|7+357Gn>`IMkkQ&d~kb(yHY<?UXkjz*{{#3VI9rZJ=_~Ji>tP*YzBQc+a
zKzULy@j)#xxq&r^?%^4#C08J;CP91!TTI;{Jr`fxMpVvu9s5mh=heSo#s7dJ_0x9{
z65w(Ep-BiDOG|)*_0$JE-D{){_%?4pa&yhz*buKB^mSPcG+Fllm0SC#W4}t$|If+Y
z|7K2eYfMdhAJSP@54+NNpBww6UTD@4{nCYoma~nsPMG&T1LUrrnssyDT+u?2?bW;N
z>vs+31QQ0WFDTkRitaq3;Io?;E7`WiO7`Z*oA95!O*QLAV1EEq3mQOP@azb3zUnHv
zUpElGu1$afO#ugB1GM@uXTVZcgj%;B=Kvsh3xGUyuz`qVuah8Hf&(CbpB!+3_pUr;
z>sht^5$5`vck$!RaO`zj@q*pz-&C3?!A=Iwz=(W<<7Jm7i&u9=j5q;F8A!s*=hWVt
zN&cQ(>nGfOo_cp?vwN4Y@H}-vU0+FMD+6YtB`?F*yST>%`8M?5;U)ce9{=U--yVV=
zS?skG;4$+jy@xU*?U{nocyKn2c^9<%(GMfc;2nFpMU5;M#XpBr-}HMVUQu6vRWi*J
zgs7uwY=6w+7$=KAt#pjIm38I%tCF&?<D#*dF$bR8P;v4&n>=i4_!Fn~z+W7v|9?bQ
z0^oBtZ*HR|IU2PlU1Q}Hr83SGMqhtAa#7{reHC5?Pf!gD%BvUfsaamS#F%P2ndd3_
z)spw5>6*mLn`M)8xK7N*RFbp!IQuk0=#54<|04DDx@T3DDV;GI{aV}8{-K>vf;x3f
zI(?B8=*UGM>wdsTOz)a>jgj`S^rE-r#p>Xz@2XV<<=EH7-gQLu#`MHnD}|iR*#w${
z`@F~wZJ-LvwD%S0jbzb1CZf<SOh3iQu041)C2&deH2+I?;;+_qc=4|lEei8+i45J?
zFi;Zw9rF11Ru2E@JsTbvL<-sjf&DwAc4jJrWA2f?NiK2}4u*9HG|UcXQPnbKBHD*H
z8u#H|20gV$fgep`a`$Xp7hA(a&2e2Zt|!;IcJ_@Ok6(T8L|y1cTg!zUf30|yJ-#17
z4YuHE<SzRbd~Xv4+DFJ#+&~C}C&TGF$S7(76qQ_uY|s|EexM(@h=md%h9y>Wzu&sk
zdEhrDSKoJ?i}N1AzQ|d9@YN)cwv2F%ZwIngYT|OF3KWrg*x01z1DNzz|DJOFN2@YC
z1%opfgOUzI{rRYS6jzJB>-`Svly1nbj(jz-c)eV~+4A7YlfB`2HpF-LSr<0@#9e^Y
zY7T%1Es47s<x+hxOK4c_(V=2EGl3Q#bqv{Hk4v9cI%(LESk)hMz~oZ_6Tcm`YT;B0
z^r&UUVkdzdcm~S&I3Oy04Z6pdWCAT^44#(oCn@95K;Mu=E>_D!lZj976A7=`ju3l0
zazr(|H)675)>3jK0P8~HgwJ8W5?Gy@Ektm{o|^uW)!>9g+?C)HVp7^!yN=JvOQJ>T
z_eiXSChy?2@R{L8Cf{?4=gOLt%*Cy$UA0X`3dFa1Pi1?B-*+oltt%o6TyZ(Ctbv=^
z9>vS8Q8g#<QDVx56xs3@_9+6(LY8Xl9LFyNXg*hrNkk^z6r)SHuJgg!f?`TDuT~Ju
zhmNA{xH$B#Zm<Q^fB!Jn6<Y45t=fMw_CZi(e0%{al~IMx0suD@Y`DmXeA0)?;V>sb
z*<`eT{=E{VGxOY5(Vw_OmfILle{_B7Lk3lDnau{O#tZ4yjgyHWEpZEHUza92$TqZL
z+*3T-UWR$rqP3jy=yp@=XlR#Kd-qv+w)riA|I!2YC+6?>`+^Bz{(^Ch-%SMP_YaYb
z7x}@S;AZ^OWccr}6*7(z>8;JIewr`;)`f+l)d`;fg365Ks{YM<x+^o&46Q_emQj+a
zj9aI{vEJ)zKq1k>9i$rAu3UNj%KEsVtQz+yHm1It*A69D0zrNo{(r{;=PvTXwFYJ@
zf^};AR<uvh8|0o53p&B!7s;BME7^8dnk?fVJ<@;ib4mpI8-zb)VsA>A<Y%wl09`+6
zJF)eZiWJV4k^_SNKhZrt{4x7`dcgQ8+Ye}N&80umAqHD-Te$*=@{H=A*>8*#Y%&kI
zZc`aW40_m2WXRd<`f(`RQse8Yk8X1m{x~K?@x)qe!_a;+lAI)D$1azgS%cA5sH$>}
zKKiLyAnn8vk2A^#ds#RY>aB=N`0Hz)Q>`uC)L8t?RAhxyzMb49M!e}^)vvh^F5X|$
zRLJo0oH-)XMi9VF1QTxU%eZqikWTjJZFvjmgEn0ZotXpn(o*U}de-Jgm^HS(V3fbF
z|Hl<YZKkW>+=ver%7e<R3#a3r%_MlnhibMs<g?qZSPGYWSash%r_T+8i=dm0kyaqt
zsGcY~Y6~q^iun9eC?GL85dZY~Oop+puMZ>nBoFa2AnHuu3?=p?D&<J@PRL#3%VLOO
zJ$M-G$8in9nKFPYkiu6$(#W5Cf@FA=9cG`6ZieEmo$#U@M2<0TP+up9A>?L|I88~k
z%^)qX14t|N5v$)Iq9SzAk^!UXsGs(t2kPzfz!MDE1L^x2$jO!+K(TvD5rth4UDNp0
zjsVNR5O{+H$X@SW49*fTXgWF$v@W}{;WK`@D`cZ6vi=5Uu6Aw$FIfZVi`Hb8+m&Gi
z@Q;7q5o{_RGnotDgg<m5903HJ^+KP5eRDO02tX@-+zs@{DLQ|Hn4gYeMgFvpsGp|j
z1(;<c(HS(QE&PxdM?RjaX-yid5whecy<col@<C3r*r}ibTPJ(H(%<i#(agkOoHoDK
zcz5k)3<q*D9>!q=C}+nj){N><Y-U9x7U>DH%~73V+P$ZkExY|+y>Pl>zZ2Imegd}|
z0d#}h%Nk6coGXpUQ7vOHGhE_3aoah%Lt|ED7Bt5@XtaUcw**w=l3LJ=E5Qk?J>%$$
zo_qYXowxLfW4>cPI_rslitVfZXvz@Y&Z7^hPs>qSGwcAE$!JQhWi=Q{UE+Y8R{Aic
z%)X{j@A_hHhjZh${*m_{1wps9eGI^rKLS7PLt8}6br$-QBlpW>M`3Wo#T{inVve#R
zK^s^0M3H+RMxZY9C$UpSX)C%7Yt=HG>!o2(Zivi1uf-c`L>*JG{;_bFSI!4B`4Yz1
z2V<R^V15`C+LxQIUBkH2#S80tA}ecp<*<zY1$Kor{wj4j@BVpY32dnwO}LH04L4ze
z5J#0c@xhb&8+HceOb!Z8#_aOG^sJNy{sr;=XWAlPIp8MfNuMyxBsTZR#;Kj<&`B*X
zVP}PBiB0S(DBTnuDoOOSS8i@WM)k0T==<W)h{TWWt}^k}!#&~li4j~k;tBQK(R3nS
z4E4I4b=_vP0eAQG2Dmjj-lcpdi@-dqwWX&y_}cBJkLFAP`v0)^-ce0$d%G|Q3JMtM
zohYE70a1FBfJzeqLFpw3(gma_Adpa`cMuQ|QK~3KdJzdt5Rl$G1Z)&Z5Mm%@f75f$
z{@(50`?PWIJH~zQJ%40mWo6a5=9=?Yp5OB%z}*%TdA_$Jf+s+Ht|PV~Z=8e(a4Ryk
zu89hz|B6X*XuN*n$hqVtrDMk$>_%El!|*QT(6VNlvrmAK;*KktU!Hgk$4YoHqx9@K
zm8s(9!>&sIf+x1l=X)G%c(QGfXd@U|M{i`=B3^F|CZ9QR?wev);oR~Kj^nqUguOxf
zkR%8%F^E_+V9()mZ8_h_IkS+tJjmjlVPb8QR?bxT_?d809O<L1Znud4-nVbI&0BZ_
za$2Aqu5TF{rgA#)eoF&R%6z8V`KdE@@MK|8PO+($-DMbE`k+`s+{k&m<eGc^oHV;n
zVo37@E<u72(Jk;A;cUB-6)%RV##&^A+0Mt{oS0+6Ij^TIqmJBHFCN%{A0Hwtr2-z=
zY6?3^pcbMMiQN{#In>zQF>t4WRCY@WND9aklpp52c8Cqx(f!m8kE(~)MC9T^C7|=_
z%M7}*I&#<{2;TqXEm%XMRNsfFhYhNG>XvD%L9B?!JjJAfEnoyxilcWFUPh8jd>|Zz
z4*W-g)e`!UU*4y-$)@j-65@qprCsiVb%;7$#ZIpyuK~wU+d-}fI>FeG{REjH(KV(5
zhAG|c;I75@2<bz#q1TmOsS0~eO@t%mDO2dG+>VOr@5^cR-H0$qiIeqN>_VZdaK@rX
zI!3#{v+!zFW@cjMn~aC8$CG;1go8YQUto?aet^1lK@-CnCY*6~LdMlffvNwwEnfS1
zjVr7mIE<URS2*mWi9IZ$?O-dB@9%f1vLmENgbX4Fa)1L-<xsdnr1PCz9?G!PX9XOW
z(i!!-^pBOE`6l)?+SMP4r0!sB=-GF~vtj8%nx(Lus<`6tKv`vl=Y-w?h8$fEi(<3F
z;2u7&7xTL3)$g91bfylYiL;?Tr&5M<jPa46^>e(ALq&fb>ANz#x!Ty6IdGtxt7kuT
zSkq98CKd5m;0P{v^BK|X%XbweLQwe^kT4_B$#iHIaf?lCQ}A4KVK8o^Zd#j%=Szf~
zXb;(ahGVZY=}|iHCgb96tbyhUg~b+g{ImButM*z~2xxyE#(+<?K^fRZ%@Jid_Irn-
zKyb7`wq4Q8CHIo}7Q>6rw1mQv2Lg8Up9`>J72z}Ws<@(B5P3;3;W3G6R$yxORYXOV
zvETkGoSf}Ay)YqdBtmcXit6H=a!6=#t{>fsNN2n&{yQNVYjS;@v~|ddhnBO>80uJ$
zIJ3MmYL+`-+S#A#DZ#iJI4^G*G>Le$$bkBA8qS5<FscO+S4EjqbYJ@Y>NlBja08(E
z2slYuePTsx8Rn173r8I86VuzilFUTBGsU~Ddi=4SP??J>g1?2{&%>y3s97Dy(9vXF
zc#C3*uQ#tPVC`JVso?G9)~`I!0_LpHs6a)6<H|2o(~H*$o9;a?U9#BALO*}U*K6op
z_Xv1yk>3<G)h+6iw49)M@pK*C;sO<peK3|pw~XenzMP~F7?FIdGzIPn&9$dgm3!S`
zuyJVO*EAfhiB*u{H(!InOe<TQ=k4NskL8PXQm3eLU(kaAG5{02H9-nH2xt}FCenRj
znej_C9}-s?Rhl0>BR6{@#@Um%Q7wapi_eE0rx`4R3Psd<bPjToEXdE5y^CfD9(rRu
z-5n}CV=_~>g>;-E<ybDBiXT{h_0^bq5$P*xpx#O@j4}Nfe=58)`5g`H^gA1WTIMmH
zm#7lw8B3UF$-KHFt74acqv_b|$7e!28B>RRD|!=y@7IT@97ayy5eGpbYcGV2B#Xmm
zT-dlbv}UlD>^xIg)i0LZc{A<7t+(_iR}BXWZ=|1Y@KO<eMbO0z&I)0UCK7L3@dohT
zbZ>z~UTbWq9;S5N*;rT9=5u*V8_*%!m0Za5?K!n2WQtIj;56c7sm&KGS@k#qNU!j#
zRb;Es2YODloFQots+Z<9_3A5Y2|+Kri)MOQm9!P1(o%g5E5hALY*f_U@;v}mGktw&
zF+zo(kh{DXDLm#*;$E>$y%3l6c#`9lYU?>2LDM~Fkp)M_5fzP3C$33+lG|t`o(Mcl
z$ia!ML`INq=JnsTTgqkCE&Ae;k-^MWmX29F7x!+dk%47<T7@*)0IeJ8<2&CxDxV<V
z=HMWIgibi+KDE(p8F3|}I?gPsF_R=Y?GtyCkE$w5rYIgm@sQfdjg)S>x6lXHe1r<|
z4;if)QcM;b5i7j`4d`*rdpF&d>Ar>v@SRReO23|%hde|X0JKOEBppXwBtEbu1t#Qn
zGW%`8I;)kluUqiz@@qX3nPRGsVJhQxZ&KeMX>EYoOag!q>0gE*<opeSfH*;!L@=U3
zoHZH=9dd&^ctjaLIryfsAxl@)iZ0Gt=tZe%Lk#ykEkZ$5=tSQEttlz2#~-XIlM~wi
zmU(*gf1<tl|E^UJayLu4i}m`Qu?9tfX5@m8Rw`SEeckLYko#YsV*7SzUH(Id1a)YF
z+$|RM6Q<wMvl5ge&?Q6|&&f#u-3!ZlpdwCq_Ciq2ktvC2$-(4xo^QM9rKWKH@oskp
zDVD;q6TE0q6Z~zUf`<4(WD<=OnbVO=pa>G$2TjKFlfP$s-PPfUIenNa^P5mjx3Gs>
z{s}MC9kheT)TJf(#v4V~1(vF8Gn_$WZ;<14`}Aes1HwebGo^Q6htAwPi^+v4dFgip
zq~103o^s+eFQv=EgRJ;v$JB|O_&+Gp$BLXs)3=-;*%R!S0k@j$2&ztoN$xF&Vtl7{
zAO}(WW__b(>fO%dk`AqWr;`bOT!&hw^RjI0*YBTA!{~Be!+w2zJEO_H#6!GnjKwRZ
zO_6I=quaqy4KK4-OcX)BhjXdWQAV;h!AP*I7#XsIW}0)#I{RvS*1w+l!tR^7^F2qd
zj2=?|#_Ij_+A-1g4fg#I^}``pM0Z69a6oF{@lB4KaH3pU(KC#66^}#hv-#fRi8pHK
zLaSb%p=(u(xJA{?%b3)=;J7kKPZ>wmT{ee@1O_cNsId4Q=-uXxchY@c_TgiZ4braO
zG>P3{{8I3PWm5MH2l|?59;Q-9cl5S&km97)$dZl)j4OryMBGiGn3G4M@?RNIQGYRU
z%1Z))(|^B6=b!Ea``{4+t<!G<HAXzm3RnpY`|dz$DIsh6?6WuN$Fgo8mQG|742ZCY
zeeB^CwKrB5`W_dSMMYSoFem_)iZ{zBBHwNF&ir$eFU9Q@dnYLn01uM|VdR1@h)mQU
zaKJ5J;q1)}*z0(oA;7DoF*d3*?DGbq?|ARSs<(T#V6<X$xt|eV>SyW`>_huUV{oWo
zIDa5NQ8Q4wl2Edw6tHe3NHKe+wM%cz-2v=XcFkmZM*7~@M<L2Q_<umD{hLfK^L2gm
zyVBpE9`(=zS-7w}s5NdVCDCQe;zwl~=^-o*gsnkFjc<HW*0cX=th&h;ij1CKt>?ZY
ziE>v@Z&&EFz|;NV+%n<ES&2@&WF`>4`)?6Y{%xf5e|h<3P=IUEw55yONR(Tdoxgo@
zli+c&_d&tjFO3fN*Vb;<)cGb0<G<x=3VVs(^0^uDLwkdcR)GE~q%TJTzl9Y;)mB7p
z%Ali;iL}RhKs3@BU)7{<yA<g-Iv6Lgp0<fUvwZ~{umpK?mZCs9U5Py#k2|}CNB|n9
zp?+?q5{%bvF=o^0GB*x?w2ygguc?29v_^2oZY}RFlGp%zD*SBo`NIA@Ge3tJQ!#<w
zL$xuNI8t;9PRhbqw%$>Z`qx~C0B`<;tU2Ar>mt+7#<(vN6Xy6QbvTRT;U^BWcMbDA
zrB>aHvn?wS+Q=n1LBk?3KJY`&a2MWm$9?h_;<<fVYy{SEEKA)to<|x6eShhkJQI1M
z`ypje;28Na5G0ns)NZxq=vn+i#nNAL5GN+7Emf8?{AA_3mhfdR$<ld-9pcp6`}OVm
zA-NC=gy5(%EX^dj5Tx;zLI0%=Ey|<0>7i{vW>FVD#ZXV%ZhIN|`9RAl@&f=g!k}FS
zi98?#ty62R8kC7M!KV;-u~$Cw0tKnFHNzE-YJxN2=^xkoM5y0$^oYH9uzKhWbz9rg
zO5_OKtzz24di-|H1)=V`Nx7Lg+^hwt0=MZAd(X_~a7^;O>gr_g;sKNPiQGEhZf{)c
z41}e}ldXv@t3+L6ghks5`Tpz;=zM3AeNtL`io9heg~2HoayNIj8zYT6;u9~s-iqg8
ze?HDsRv=z{sZ2OID!x&5xU_*~wS4}4H4DROThp^aWSr&-v;!VG!)Gl>$gOC5W;&;3
z8(m<M{NYYvOgGz=Z3U`Zo7WSh#!MpL68$MXl3ghkok}+4G)xoqFYfrSa#7Pw#l`G(
zV~iHVYJj?7dMdoTM|`q_Q-DjT!=vO;S2p|Ctp!f!>|PTkN1A)Q0hg(IE}8-IwAL00
zG8)Nl9;i_DYT~Zx?XAVP;#8p@CpTIw9&pBTxmAmbhsoUwmXLpTym4RMe<hdx%K{_(
z*e;VX1A?h6Yccza#jT6uLyGToY?dF!G2U8bbd13b9-vIZs}nmb*hIpVxmU84HXY0a
zM9bzy>ZKI#nwtBqXlqmW<7OkStkNKuk+Tc1QAhGK71lsb+|lgkE>EaO#SguC{y9q~
z+e+I<&^fO@aRc{oo@ejiIbQmNZayl1{TW2p;$fs{pil$$QwJ(c;PjBPSz+w;XNzjf
z5>>UC^*Oh;Q%s}$@?Z@&<!K|1XtlAal5gTBJM|9*N{y7&qWN5$3(c&pI_ww|QnXt>
z2JU%=$D376$*-_?YZEV$xDqk9;B<5`Wreth+sZn{m#eN|w1%|aeT^E<r&Z`Ur!25G
zMUcSnPLk9skR1!OIL#<bO1IURyAN;QyXmWvmwaU&rd%n!Q^<Ww*p4dnd<Y_dBuwDO
z0{aI<JAK};s*Z|(8oPb1X20f@CZ5<=q9M|lb=$-Fr0{Aw{4D3!V9~m;3kflzCI3!I
z%&*)2KMF;HrQs%{WZ*~qB&={Mv3YZTiPuJMyPqxF=`*DLUe&?v6DsNbO-p0AO&MEy
z2V$g7g_K1O<W|+ch0b_jqR1fj?G_+QC5es<Jbfn|5Tz<%q%2yl-qFxA<lOrZ*dwdf
znOo&!aPiXZe+%m*Jm|gp3spS478#DE!yHOn24;~8_gHE3iPFml2w5`UHddOHqC@hc
zq;t=d>*Jo>>*{4N0RWcT!A&!w3dxKBD+?4O`1=J0U~-u?Iq`03X=(2hQiaz7=ldbO
zxV+cl(DO932ZP4<;{)vMOU!%hp3ipQbvrjJZ1~j2^K)-&s7h_OpZBtFhXkFqO8zpG
z{tM5^(brm|rc)NPT2>*(w!?xcTEeVP)C6ms!oDT0pf~1MGSN3HA}In@qbp;|7T7s6
z7jH9qo-ZE_cw@7T;xBoeu{XXM%5_=u{(3Vv4wTBg#UwJCyUtWZkhqs$Pah~7$bSAg
z-P6i^aDXt49Q=0hc>H?Wm!ZcdUv&NPCD_zu<HoW?mUU~!;k!(h@JABv((BIdzDrCC
zu3ZRuDo2G@HdDwUoF$rJ7c$k?{p1AULc#Rs1+K|Jp|V9arnD5(6OvO`xU;y%sZbfN
zeBKEKHh?bRro^7~U}Wy@x;Ieo^q!oMlbx7MADLdboZ6-+?WHLgp{c=TZ?Akzt&Ofs
zt=PHgUb3S=PQlDudsmKomkx+|mSaqpZ%|2Ui_nzxbI>g7&))As(9Dyh$wfY+AM~-a
zRuLD}lP#sQ^osfpw$~)TJjZ|RKml^MGUeWDxNVR^%W)Sp^Dk5`ZpKsOV$9iUWuB`e
zEAOpX^*sb>XXtvp1-e(io<H5%rN1CoCu^Qonywh%>}x#ARHc4~PE#PRlwN54>S)cp
zl2&!1-iVuRRc)-&i4YF?Nl&pmFQ~YMQ$xl+#!~OizCiWr?VyM(vp@kfD=`+noq`I5
z<Eh5sJJ|*mL*GZFOJt-cp7e5z2_^Pgr1E5@E2qBFj&JkQ?(*!(a}&DKFebhm5G!im
zQp5OaPTFOplkZp?UwkjUFrR?8dN3iCas*&bPGw?H`}?Eto$@o2<Y&8N{SL&duVp1)
z6s7NPba$j5yU;w$jnQ)F;3~=)I_61xdn8n<=DFQ=sOHpuLUyqW-phHWw$!AqY9&xA
zuZP#|L<Eq%r9B|27&qC4u1^TTahebg%=mV=Nm`i?FkS55&5?YkIA`^RIptOaROBqR
zrvNraKciuXmN^9H8`3O7JW2@bu!asPYvMjxjWtVou(9+nmt8kb8Pt>0W^spbN1VEB
z8TcYWw(cVQ8p<Y+w}hZ{13Qo_7wmDnVu#*VOI&!gC?UXr?YLlp2ebVNRT?TBB1e)0
zkcpwU7d|rs5DR@vw7*}3xXJ}73UkoRqkBrm!p3zyu)+5g!G285!2!@I)@73@5~m}M
z0mJk)QsMzg0Y_sP10*|#9W#9D?uk0w5<hlLXf9RoxrqM_-iQO~T95*Ophyf$3iKn0
zV<aLEUH9<y<<WOM^P>9Xcy+WbmC~8z-aWT;C`>@pe9vMDzFD}mb;!?la#>_kN@g&a
z6tt{`scf*iGMx^q(N9;%3-D{MHO#&rT&U&a%$5!<FnULL*(yMT8$0c>f@1p8av}!a
zlokJ2&ZR+N>1rIE^afpcpg8%ZGB>UQqq`VJ;dH(q@p^e)ajeH1b2(JYhp;6nJA=8U
zc`{Y_+?r~8no?vx{CCk%<ijsn8fkgvvHU!}ybLxv^kpliNvva=s)zLN-!eZHd+4QX
z<H7Le`)Td;ADT>)^aYt3jFXr;17EgXh<uvY>i@MCE3y<BEn|`6+lhb#D!p(qsraaW
z_in+AzH+(W+_tj%BVvkc2g=QZR%UR+e+5PF$9pA#{<hdgL1^y)GoPo0skNG?U^km<
z_>GuT$3<IdUR6|n&T9<}BEl%ci*yvhKt-%bC`k#!bqc4<BU4t}m073e$v7cf85`gB
zFfc~5Ph`$xSr`XS&N8HO2-)AFg7auioXKz7XEW%3egRCyfoi4B*XBF??T<68W;fcp
z<;fV`Y}h<HB6Q9s1Z}>fAAw*ZNxjsqAb7NA>7|y;<&4amo3_zz^Rf47^T1dmZuWfE
z`s%9Rxxj>tjK7_si%XBRt1v$y_G)of{Vta<yRh(!R+htb36BkIRGbOGSm;8YAViZh
zv6`CL_bnnLWreSa4vG04km(g$8=Lyd*5?B14%8gF{akkOnKFZZllT~N0_CJW20$LU
zzHV$5W6Aw*-}EO&xd}{%sm>~|JM(*HtaHbA7W&V?)=^A;a_s_)wX!OZp$#9HE&Ekt
zj$!j>hja38U&*KO>@LzTGsGb}(2*2n(~NEb?h(@ESD}^h8odm!f|Ilm3cOTF5dPMu
zkO4qe@}|?1u#Wi}i*2W@0kh>}&T|cEDIe1pmha38Oc_W-Sv#c`+ZKy+bKS~|8^2KD
zyju|fG_-k=c+pRD<<v`PxV!Qt47ZG%ibZX*;FASy_hMR=`4?9WyZTbUeQjS&KpEGR
z30)bgA^5}}?PXHznsSR-SdfVjZBuB^qk18!y<~C$qaTTakn94SOZ;AUC|WoA+8C7;
zwN!<E)H=f7Yv*qeb4**vnM--(m4FD275{`ly{v!Cc@csXSqzF9eQSQ(q;8TYJTAU)
zzRm0HWj7%q(c@GD!ba+}?lyX`rL6-NB$-jPJCqSJHL>?;NQT#J73af){51S5MDE~B
zryKWe_9ORT51Er2A0ivpkQ~^7+I)$~yYYep0P>_7Q|CR~E#AOvXTz-cuzksGCC>9k
z^XVmhTEC+c7El^OfF5?Urc*lEc_L{>DETpmsgatHNJUA}5WmsC17V){4}|%*3a_PZ
z`kmI$Vfd<4$;^~5Ai&5J1Yv}CPnoh{VQ@AIS6CQ^%m*dX0|yi5gK8fm+yoOPG7-cM
z8Bmn7u}8c8)$gFNwJKbwp+$gFSZZJVg(?;h{}y*Z_IY<uB(A^gxWS>+3kw5;!zI`b
zN(31S;1qxwm}CY0{hoPkcFh&ZlvBugRMleJAeS|P4&xYiYUu7dQ;~&pO^{KG3Ac&1
zP>s{720uUdL%hsZ?(Yv#kpqf9M9E1gXFus0LhbX<`{7-2#Io|I<F<8;m+dm5`q)C^
z^yl@dwj11?&B7DC32L~CWpsBg1C%!qHdZvg16SCb_^QL(C%T%V0_`PQp37v{?=_<l
zwYibg8nTx5A&V{^d4yncxe`#eH(z@CCCfPTh{epz?82-j<GC)`JV9768+GuH^2$H7
z*TIDC7~rrhy9AyCMK@@Wc5Mqs1Hnm6lDK{ZbVm-9>=4e?|3ZcNG9~8w(1Z?XOJ84(
zBs1;9DU9Oazb5z}c2y7k(`Qifl#qt~_@(n+%bAfP@1c`BjHu%~q1|rkmpr^Zd|3va
z&*-xH+_LFKKJdJL?RehbVLbSU>ywhGg_n+CP7-pkGYiHMC=TcI?;;t?Q0`pJQ=z15
zbe=!DMh%4vbf};DMs5f6H+|!8UFe@~_iNKRlg|IX>EIFY-!$E42L2E1ZJ_Z-33k8j
ztce736eH@h$1n&u-}C^Q8AG5~?&;ub$tf*R$^;xT+kq4|8f{i9hV|sHGe31MExOtJ
zYN?0z0kzoWU{zZ7yT$@5;DP8bC;s$6et$u#&_2kfIEX%gmjubuMr5T7H3jsS_z79o
zjP2y;X)|Pu+SWDIX^mWjkBH9>T&I03dC=*x{l)Ix;&zD$5U~XjBb9SQAb}b!?zxN(
zX`zZ?;MFmGR6Hv%BNIQuI{Ya~Vyk%da<Q#9q7+&KXkn)1@VUb~^CFn?+Coe10$qSF
zHD61|TD2clewTi&bI*{9<{2&DAufAkfyreL_Q0c9%}AfE!+tu}Lq0jY+Tm`SZf;v{
zDWZ95!7pei!cVk&gi~~3M?d*5docL<wh8c$HlJJ;SL7Gb7`bT?U}16hFq^^gRwJ5k
z!rOY<!YQuE2KevW7Buqj8p^hE!N3wq693)90)s*^w)?k(0<~Rp-oJ9d1Ap!J|C>9`
z`!51ASdXZ)x=+F?OkGT<9Q*2;VjNqyYPlYp=>DO&`G0ra=YNm#^xuV~KL41->z5S}
z%^^BupBZIDGxtRFz!liQu0$-Lny+y<{gN<wxzc`3WUMSth_at#43tn&j!SotB7T-o
zB9j>kw?=0-^|^u<*nKhNT-4`cKLjYUHHMS!ClUZd*mp{dZ#a0{?t=HQ`#FO^kGkI?
zN!nzC2mo|S?O-@ltOQ&{W2g>w<RE(IdNasog}sDQE~uK~ufV#%FqwyTT--nXqXwmI
z9LzZa`4k=)9)RQXa+Xmfep4_ftsUsDGO(5dL_zFC6#zNV!=foiOHqVfD0Uc8y+vmG
z!R4{E29ivP*ElYu#Ng0>xUiSz-76k#IR>H5Pc96do!U9JmY%{2%KzRuF$m<SU#}b!
zlaW6-k?qYsCq9h1o2-v1h-v&oM>z1ebOoeOBHD{kkM|(g?;>Unth8m-$0+l!1Ra7g
zWascqe_a?b4X=Vf$`7~ah?+WiNr<JOs(~M}IY<}RqJZpaI-9>Vc+j=m06pR6u9ti&
z=lr-hCf@y6=BAyAk5K6S=X2*ekLiE0XE&j$7^s-)ZWEBk)^{h)AWU&8A<vyTc)mv)
z2PtB<CoKFtZW7%K7VKZYdr?(1eeCN+4Q~C5upQ$V6B?XF_Y(;$m`yg-alYDwU2NYC
z!fOw=Q#r21`cGRYHw#R=62<P#vfSF{o{vstfk{P6HVB)HLl1#~@U(Zk?|sMiAZW+^
zjCOyI!o#liWzLRzX2O{{Ezay84APaQ6X*t>3Ox0KALU)C*vQ2|`TYV62sZ<8tQ|-p
z+`)lHk1|4F8@`CMFO{$ELF7qpRfXfHEExN$rThTt7wAc@TTv=UGP<qlP!Yh*V10h}
z`6kMfsjH;KQnQ{}lMf+%xXsXXDGV*vX5ggBPBdR{vd$nH6U^Z;)~v4y&z2&vo4qqm
zJf=E*FO??J)Zea5AK>~Z858$GZNT6guecjYl3a|w1r3#xoSq^{;ON?&whRJiH}%z=
zvP|2K(Dbuqa54smw9&dNK+nsJ6Dm3Yw^IuZW%P+O0K7`j4&tQRRTN%TNBG_TYNxBI
zmp&|{TEds<&-%fOMW&6h(dIfN43|jZk;1nPnJf#$V|yrk1jB99voidyotq4EVoo2@
z#LJFJl{RqqKK``o5XJwV`U|vHu0ub{1PQ}RY^q~<MkHALid#!W75#nAukjd-c^$sY
z#qBS9g{T^sG#w|kB%S5BSsPR?IwFk<6;R3Ue9C$<uF1TOQBD7pPNpy~+j3~Ss!sXb
z0kw+CX5QskiGzNZE@9oX-fl|s^19NS1smGcM%!Mz7)*Ualge(S=}LASi2{80L_c_!
z$^~*xnRMd8&pZ*0B`7zus}t!QBUW=_u58PX3aAIzUeU(IB@YUsRh{x;g8{Vsp9+5d
zmg?r$<^M3m1;aVub`^WtsLd0ACvOYAv+RMW-X*mCNexhW6O_KV0WeaO{8{bE!5fuT
z317B=f_h_;oAX7g4f&-yNllt<c9(x*(fv991yKzFhJNfd=$;CET8Gln<Ux5N?s{hN
z@SQm_pdNuG0amI1q8<Ea%Rgude@^Z15}|)wxg(i}>>gHUWBF}e(f=8hJvU@@O>~BK
zi17&X=1Bd@noGiyoGfAnm&0BvH6D3-KQ)FxhX--)&}a}EdUEyyRwNue&j;%m7&@s*
zH#L>Lbg-SF@rEp~Cs#O)*8O9dObC`Q=&J5b6w!4}mLf&2P`2<~8a!MbT}D#4)pFxS
z@ly*!b0zOli08-t>5MnddJl^>*$m?cU$jV}+5kA*5gut`=mdvwY+<Y$_%xEg*!5OP
z3LGGpt1E4LhB11DJ!!0XqTh`ooblL%5C<cW^fd&dr*n{$H9Tz$O1z1~O*1etc^Fe?
zt-v3T3?*@s(}C(9O-%wkVu7u;y2WT_KW-hLb5Y-NPP?)7!uXNmyqNtXZ_b!GeybXT
zbW4Hyxjk~J$|)q6w{thSQOA+iBfUC(v&j1o%p7Xt;q>SulcVsZL?~_Kma_T^iUsk0
z?se-#Ix*3k`0f<Eq+-$>RxZz@^UN3yqa(2};BV1DL9z;tC)$I`2x&C-GkiLg61D&X
z+<^eAa<O9k%$b%Fojpyv?RpA79Z?cB6z~>B;Qg`n4OGWdP}8oz5Amx2QZ%>Gdq>bW
zo)_<4LG*IKVW{OHRJ911uGs_#;nX2W;hH2x2>uqRG5rq0j9A$|P+b0U_ezWkIQqrz
z4<3K~_c>ssR-?Sn-4VbcD2Jv|+w8~^1I_W%C{a@qc;>(R5BevUF1lYR@qq!F_J`X7
zWW+z;i_w0eI`l>%>Dj-0pZRMK(GSItpilgv6`tl&=s}6DjMf7+44l1lK5<se6nT6a
zWllEB%lL8qrAXyDRGW~rIE!&J!4c<eh5az}LD-MQnAWGyqQ}KKmZ*Yi(?>Eg-^Et1
zNFx=h!@kbA_=-!H=MyfvI7_=g^h6Amm=TrIH*~1AcaC1)W&u*w21G3q)9XlWa)k<m
z<;={g#$79Mfq9F^_JLjL6AmiT%62no4G=S&`h}_*y?HFzB2WQ$3sS1}I$ImmE?gcn
z;a3g4>L2Xu6#JaAd%u4-KE2pcHn+s0+t>9@p>WJC7LB$0FJTAhRU*(zDvE%P1W6mH
z=O1_py9r%J$FMg~l}p#YO@7GVYdyq%qpgeal%(Rfk%ku^%3SHZEFt4rFl^XmauFbY
zMNP@wsKw@Psuk$sM3(#HV`dTc-ZJmCRE!SaoAs?+U!C7fMn|aTp>2^zN$vzY+<N;l
z!tAm`<I&n;?eMf~n@?s8C2bp`mN7~9&#ihDl+f;t#On2P*W3pp--iFHD@}v-#R#B*
zrEo-tc$tv9l+m%ptg{RcQ=!T%+f{lU)M;JyTD?Wx7@<&n3neHkC=<^Bt_r`7A_$pm
zyyasq9E?`B5PQn>RYL5Bl<x^-=ba0O^v5RI!5ivA4YEkq%Z=%}UbEiNW>41S)LoHp
z6*D}a3w34gomP_C2HfnkL|_39wP6-XM}ZN($^&2SM^`P983n-V)^#XFtzyC)hHw;U
z|LC|nNm8iCgFw!+u(xOeKQQLsNuxTlQTFJ4E@iMfWCBhKLzDs<&Xp;J%sm;vsO?5h
z2mB*8I1dUPu+q1gpxBR#(;k!v2nhQu-36xn@yT^@wg7qjQ3s^%AC3hytA2Mrnf*R>
zfq@HQnYpQmKO749ntwPUvT9hfI??7o7ypl1UV%_Sp<ltKFX{wrJ!nzHafD%9Eqffj
zgJj-xGDvP`hc|O<7sI>Cuk6kM5QzwhbrzotbS<H*gh9LsInP~S3j^usx)-x;43U{H
zkfgU~>G_39++5bLS7$<Njy0t)dsh(6rA$u?2XHEA=_T8iNM;*(Iz_!FddTA3GCT9b
zE|Hwx_$Pp?<EQ%K|4EX=|AlaRk+B~hk!G(_{KE3BqoYeaP~cqW^URB@R;r}Faa3F6
zK(P22C&;22k1SYUNm4riL^-$s9eIy~T`(12itBdjN+O9}uFYJf5%3AF;BALQ2rpN7
zGtPC0H+$7RKDiTjS$REA;a%JrfxJuBhdDRmiB6PBlk=`1wcXvxt$cd9$IM<IV#@Ad
zw3fdyl<dY#!{yK8ta;)g&qxlp?u=+5>(?LLw1!*LmzEg@i;JUqyA*49AI><@OHkeH
z;#sY-IO-?=@D*h<5X|n_Yp7Ydl_ERxqn1OpMcHs>m-|_LUJnG~svB!+H1G~d)aw&1
zy#zHP6XpHl!F1k4CW6Z1_3;yJJPG9o$rH<@EA|4Gx?3zKgIuU+Xo6wG#p&?6zH4v+
zizN{fI~0R=R}l_$2y2tAzB4i7)>EI!243QMNzvrsFYi~Mtd6LIaV#ye)39vutw&tb
zr|9y-bMIf#-hR>$kXf$1zk;opih;e5Kk~zn5Zl=z2<vZjJLGR>p`$CZ8&mM()cf?C
zoX{K(2tgQ7mOVYhcz^j9s)i_FwHWPNsk`o8LG>y@vk=%d$n+(OziTexJe1^~NVrS4
z@$45W<=R1jSE0xu3Tq60Y{>zaMlX*+t0GC;lW?ppoG=glc}KL46PS*Fp#t-<Gpge`
z+#a&`p&Yf;iNb++?N2)*(}#ft@e36&Ax3}+aShoE{nlp*!W%y>&F6RflEw29UtFhL
ztK2IDjq{ZJ5dP^2=o?|ozfg6ppCM8XLy6+^B^Chy`(?(u03&*7l1a6mi03!{cGXZ1
z_)=2K;ZaAqT!@P3ItAJU^)j=KxQEcQku9LuK8049kk~AiuYIF1BZnj^+C1Lhn<?w>
zKJIh);LJj+N_cR^R2EFZ=Cn#^C4q#l{Z3+wTmsO^*cI$OD9;xTQeQyPW#fxsFJ-!(
zxF3&$;+HH1TGDnz+Cgn5X!H);5z`tpcV7_pyODQg>W$d<HH)9m+AOdg-8^#{wl#x=
z$HJ?=x5DFxbwI2T+IEj8e)VZ$y+*D>i{X7{@59=95P1jK8k5&pA3Ql_Dd@uT7K@+n
z+or2L-@|1rtX!`)0-DWUzppk)_Y0EKjY&*yCd);@lAy&BX0e>c4Y=-&>$r<Vorb|j
z`zp`Q#a*R-cj@?bH~Af%C{-?QFV`{xfYu*W#1(f)Fn3_P@k?0o$p<p_?Mw~>^P~)e
z?riZ_;*A&V>-^bLJ8evIj2|rH1o`x**I?;b$9NJH#53xih+|JxJ6AYOzHH)ssbK7s
z9haOY!YCDSJ>q&B70ycc)}Iz-oA9n6qR@~QUQ@LfBz2rh#<DA{B=Ax3xQ9(;jP||s
zsYIo`YB$<VI(b@rdGDXdTyhN)IoT@pS@da2lixi{dl*_;aO;>RE%VS=TG%pTk9`NV
z|E(Ux!dxxJ{+Ec{f4J-aan=8~x$1xBzW=oD&s=YOD4G+POi$t4fvBnsa-`29S=oHT
zsD)*@kqE*sCddCRZ1s=M^bED_53Q0T$JH(F-yL25tB~r?D?i&`{sF-MnZFnY=>(RX
z_!@fm3!D<{>h+4G57dtqUIMwKRAB2J%A$EO1YbP}EbPe|_&!ycOV7_g`?C%T1&!_$
z7I-uv9|nJe8~|oy;g79>Q~8W%9hQofAjOz_kiFsi%;i<J0h!@!23o&{Re$|Hq%C+k
zUaXY)7|VM-Dw+~ue8pSTj0t{{kbxt1OHzPS_iC*#z*kDzygs^Y_Lk@6qtnQKxYjeh
zMDOp9NU!2H9mDWF<N}g6QJbU(t%PpWdfg&kPJ~47K6(^oEI%;4^zFKm+w#C!K5rom
z```)64*4<MVJi|{Ef;KqMTEL_)_aAg8jclQJL~b4>Z3OOz!<lneA`Uec&yaj<vZp1
z+h@-!qwqJ#;S_F4|3oDrg(MNv9OU)7yf(XFe)Zj4({#RLuVo6uH=%FoRGUyw1NDHh
zFI{JbHJ+)n8T|;T{lr@P4}Mc;lzqN07wr@KhNjc_oXdTCk^?N(_0fcreSFi){LoU!
zGj_rjE)}l^h-FR@%D3?G<fyw0kKC5tXB9^vUBg{%d=)jhh3DGlIv3yQzhPyK&#iuF
z%#Ou9FFPR}_=PAynMOcBG*+KZW+AO!3x`bO$+6JzeV>#X{%uL*kj_1K8oM)ECRAzS
z_zTF>lqpnIVr&a9p%#7{IxUBcUtv0=@n*bcU1raHWnCI~QN={8k$73Y^MwvI`UGW<
zoQQG(DF);OoH6z7ee<(97;&+}mf9lsp4!HnNw`nxp_Ux+Z(>p#&0CPQ+}AV2;tF}8
zX2a_2$tV6<&<Xrche4B=^C_1-Vsi2PKSo0FzcJj<IstYVUQ97L(hab~=$LFCyk|f#
zkeUM{-Ox{BVW9K>3jX}VcSRSJ7L?{Y2KocKEK$_X@J&SZIH6-Hckk*}B7EZ@gpwr&
z;~{d=;Im>x%MCy`R{fQ9jXB`DKH^HyxhGCE*?`;ospUP8<ogTf)f+fq!TPFld=52r
z{sQ_#wMpQ(q&Pc*3ow0uifJI9zp=XJqG4!z>6XEpHzj<0xQWbf0oUmM8}()Wk~6C4
z2UZ|{Zxngh8rA-oAmyZtz<C(KJeUgT-33PrvUwTbWlHLKG2S`q&q%O@1qU4_>LDj{
z=~4C`M~ezq)<^G_S$Fjpf1Y@7VRM?Bx-K;;Z|>Yk3!4|GcVO{$(PV3G?G}kGWgo&y
zG9MEr*kipk&D^A4-)SpPK58(+IWt<h>8T<__c5Bjcd`cg9`#CmXNEu^6RoY_aTGxp
z!`RMO%}JKCW(ps!k`0|rf}g{^InF(MRfsZbx&s^Qu>b!3MNn5|^fHPTyu@MX>aJ*V
zA{MP0({6T6yq|<_K{vGxZk2Pn3n@R~QitDF4=7|aDdQ?AjRjWy^IylDfB4-X5w*zB
zB0#dlZnUrxQlFaph3RBa%q)vA@npi|n=2?y4P*Xz-x@>b3tZ6HCuc#&cN&~p?=(Sh
zpluP%^Xok4JtV((a3attTEvRfPG6V&@K%d(=$&f&mz$uvRfKYc4m*fXwZ}R}2?{U}
zVm*kBH39YQbML}fc;$i$-yQ#W>VVOw2&y+h(TD@ciQHEnA-_;L$$0P*&YF_=ai@mU
zon%a@6O>AX+SKj|b-2?qRZDEKCbm$SZ{rkDGsWLoMRSXGOja^B>WTLV>#f!opjBI&
z)yzXA6NetR;`K>+)AF`GTZ<C9cMA@1U!UUJIl>+G*?8IJh}W6YAY%KoD9WFboc)On
z^Dj$v{|m4CPm}0xw*Q|(!~%#8ssS1%2^9et`NY8xwXey3|ARjifWUx$7l>|=W6j}|
z<hRz1sy|4n|5f(nze6e30b%hghfD=fju>GeP?CnWI0Z_wu1+c{IM$@<&ciM2#VPWg
z(||o$Ln}s=sCUC;qbnLGwLBOt-52EAK{-9L3u7pRu;IVI>f<fpK4@kdW_3d#o$b5Q
zeOL~NeWTk^JG>yG)cOcbcmZFQz~+(<ZJ@BHBq(R<j(>h$?n_|-13w5oxr3bS0b2ld
zdH(yosQ$Cr&&~LMMT5M=p(saa0KsrK@ic16R~Jy4zAZ!6uc3|sal)zol68Soqhey4
zV>PLR0kj5!357Xxq5`Xvn7Yc90EyXR$`J|dJ}TVhd&?n$%Fv?a+B2?8V(BJOXUWq-
zUv(tkGOBLRnsxV$@(k*5G7T=A0i1Ojzs)E8vt{<bhygGF-tc$iWb}r89k{;o++Rfk
ze!u?G8Ig@3HUqVvtdZZ7&sn7X#7-wY#BU77=Hq};?hS}C3>|be*8AP>+e4>$fxL_O
z4@0?t?vdYb3Fs_yf2OB<IrkozEvmo0FZMm~;vM8Azr3p-szO^yxQ^{I6DDNhAY_g<
z8^clanJWuA1HoP=lLyo2f@U9i3qU^0#ReKZBk;D*pyNooUUm2NBAsP6=lVzFZ7zNd
zBGAz^V$jTBqGEJK*P=GLS0rWwFmBbn47}ad*bk*>$%|{**PRq-&0CEw9c3IC>?kP{
z9q~?yIdkUIG+-2w+bAqVzDF?H)sG9ttaS727k1VA6(~T)-Rw|%AC79hBW^YBAETSz
zF+k|(9XYMe3+yE0zc-ry!}W;h+!|CUNtZ}N8QM45^uW4L4~8h~-o<+`f5xxe5*^JM
z=?<1H@LuXU{DrSCvB^k(GshFoOo0+yF%=;)gOSK{^+e;uirEWg?LIcVX+9TnxaclT
z2VZ!Z%c^!Gz9~9?n8dK+!9>!+-iu#Zq+1ZD#8#NU^>qb2scc^5sy9hu=(`WMOX-!v
ztgMhA{Lv;yLSZ~YaybJ%i1egu4fCD_mX-)Vr13#xdz$`CLAspOcH_14vn-!d5w%9&
z;w?06hk&*J;J1(S&z6xeaBLp<ss^D1lxWPN$SmW3%h_rq?lA+Yy{UTufqA(WIb>oF
z4QKreQRL4mM1L0y1Yn`0n^=8Pk#Bb{{y-$4eC1##UO>dDfYJKl0O1>@^%~H&$!o8y
zsJsJr2HD^bKM0L~^;f9brFV<3>))sPxC6*s<>)BSl7Ux<FW+r4IarM9x!5pBrV#?B
zc)!&*N4~LlHAcwXl^d(;CeR|(;HMj_TZWR!YmhST{Sob+=lDd#&=B2!r;JTSIG`~f
zoZ0Wjn-koV{Q`Q?gfL9d#Qw=--uI4Q-^^9UrSe@4eu0|My0|l^N|%D1|Ane<pc31T
zpT+x;bMxF>F^kc4s!UbU**ZEQ4^H?q-94@luTPI|M#@Iy;>F3)I~CL0ZIlzFi^+kP
zaQc=e>3E8<6gg@zJw;UbcKSWxy-{f9fU1)B>nG#~=v(m?%i(38x;^5vclBc^=Orx%
zPfmYx9K|#$%`?csoiY?Mo1T-WmSnd^c<`@GRS-yvYz<jpq`nah{ik)(f2_TRWT0F;
zru6}pllD^nGrz3!H!DnGb!jiVf(M;pC<dQzC%ofvsbOO`gKy$8Ho}Oe1mY51r!+YZ
z?b@STHlKPD9a%dYDbALBe%6CARaUO;)OnBdA0S_Ixv1e5>#Mml-|^S58JoJ;-5u99
z{>Ye`0)HR3s}=o>owqtp^VP5<(Initvt)nA@BAaZyZ!t<58Z=+YA=`(+LY0*5SF#?
z^|@7=z&udw!<dAUW&;%5dn6ZUW)ibR?7n{tKeV)`a$Ek6-h>1BRt0^})8Cpz|B-q5
z?V$00gv^Tsp%H7hoY9Rqth1CGlf}sMHSNSg>{(+|;V#;w-UrHh)LpAKo=IMZ>Av0n
zg^EHyNR*tn>hpBOAlRqho;Z2a;<d;PydmDca#xr*xgO7U^wTOQNg)jH0m4SF-{umS
zZ~hev@CfbzEN9+kGmwzvfRe%y6*UGnje7?{=-h^!4aOpOP+3Q73exx>B?g&BQzEuF
zP)q7yoI7E%z6qp&k!q-p-yZ~IB7b)#qkW<}2SaWWNJIX75Q9IS2-z|Wl!O0sx&ISe
zR~;BIZQLjazq58(>u&{MjnVX{r0&{2!%d}C(b2WgtL*Nd$2u<Qw^x7-+w<EN5AV?X
z=+NC1-Y6`1^6}dFkNQ&S{=EE>ciym3zrFu~?O;0?sMUVso&H06=Ert_1WNr6K~K@G
zW{}vkF)d{`v5W2K>WVP#LLB5RlZI$PgjA%flK(L=OD|<&M%2+xp>q0DEi-^tpjeY2
zgw5`NGSsLR@gM3xk)qD^?W@J7hDTV1bYn-7>$g)wxidh+6kY0)!QD8d!emx+F0A+P
zhGts*boxF<`;Y>8u@>1iS($D){C?s`)?=69zcRDGEqM9Ymf@g8WvNKu*hC8#;q`L;
zW-^u{Az_Y<cy{o~Q<D%emyZ_LPtnbZ^haFuPD?g<S}_ehK<L&0B#LDW-B0*a`$tHZ
z>^xAnr@6Pw?N~(RKex)#Z+O94L1bnz;v67~;a>5GsM0u@Z9eK?aZTgm0!Zx|10m6Y
zDAbX?Fv{VRxQiZu3srn6{>sHk3ia4mX>7&!w}svNSB1YX7o?nns6P5)&2dvF-Ph{s
z$HXxg(Yyx6AN}jVKlnfZZc0d#6t&0>-%tXBXP!CQ2?l&J`e6{pmwT&EJUR;o$#fw7
zu#u0=bNqoge0D;eeJ^(fm6%(r9|d<=<e?0rX@#&$VI8uvO`)<CY;AFp7SlDEDRK%P
zvg;ps>G~EEuG=6V86dJ1zi;vqtx2?gz$RBGsSMwhpjH!q^X6mOa$|0P-M8r*SBLwh
zs+s!h56M4l;q;r!e)A1qMEFz&xHxkpkmmzs0xYIyiHbT3XYKrtr!Y+CQ{xZCH^y&E
zYAD5=r5nFt@<}%OC8|wZW|O1Csv1FEz!TqWa=tY5v%GeaVgyxk5=36u@lB8gwr*?1
zEwZCugSZkkq8}q((2#PXU@u~5w&05007-VUtv<kxje7H;NiU)@x66Yo(4TPH1IN(K
zaJ#AC`=s#)H=*;FSy<Tbln!{$rF6BF6;Z1rZ;)gOpNQUpUT<)2-#fShp+{4f=N;B{
z-jYnt_dH54;kDY9W$%&y*pg3B$*l|U8<4`iA&Itdnw~r>omdl8Qq(0x^Zwwy)elWR
z>r*d7c5~P<(3(g<#cK$sZ8pQ5Vb^>!vUzV>DE{4KJ>x5jua+gZSemMX)uwowNAa43
zPb-PBGa|9d7jU-ivsZAFA-j&FySx_mPLsWN=V@Q_KMMYE>zbEQs5{*ghLGKH_HXr@
zN<=khM0H|^Ca!Qib*(99w|&a_o;JX0Y}sr29Mu$*q1QPkHLNKH`{;^bMzvkttSQE&
z4MsLY@9`Z*z48{cI^$+&z;*SQ+<Oh4<%HD>8Z?|VJ25fa(4!!MdG<BdT|a7OFuWYp
zlb_1*_Sx4fxYR>CB$QNst>l(%Bkcp*0+ttgbgherC}+W-s!CDtJJN@d3thopy{}73
z)#Q4e@bR=aZG>F!OL-cv$KHSi&m^zqHi0L%R(t%e9Qo;DN0|Pxr&U(01K$|mw%Cbe
zov#sHp)2_u5F~Q-FI^tZ703$6&vS5}$icw19#s!V4w|e$LQx;2T9iRfhz-8)^s3Z#
z!xAzB-j$yN1L~W)7lKajwEO>!SSQ4DmU8lk@A9#HhYTb*8wg>ZAkg25|Ai_YI4?JV
z>k?>N(lhzw%*xK&>!~Fr+}hDh$r^Zk!5!~zbI1NoRp>AOZSr06@E9nIGJvuzQ(ogF
zx+dYbh6CZZC*_VFGOG;N8uIn$dvW~h`-4acWH<U~;B}%6pyq?SLlW+V?DV<jLn5T{
zO3yTFZJ+lRc(WI)!@CuH;gXLxkn;=YadewEiC6se@d5-lzv0jO>g@=>tTmx+;TPG9
zo^-AZro1QgnUq5y@AJ@R%Dl@^2!XCWOFy(3=2WMs5$(oaSSl*7m1mW&QhFa~H-Z|X
zic8dyA2Xz;dDQsD*$k|gpZ>;K{MBjvSHG$EKyCR!2swLq-o3p8UqFx?`GRIv5!h(T
z+!#AtUb>m>mt+$-GV9f$Fwe27&@LYIhIkw~(j>B}jkUW<2ytMUGzWR0OB1)#LS!e0
zsnIkQ8KQz(Jrs^MrL@Y8_b=8ATJ3L!LXL+aqKyXu&?y&>3B~YD2l5bF?rAlb)uv{i
z-wLyr4v8#mF7R^get!p#91eW<P=E1^XkK&m|Bw0mAp<BbLQ3)gajvOe_)ZK4ZMTl=
zpfG@UO_&!9|9HtnvS33&SK{OkncR03`5^z9gQmoqFO>d%MG+&I#b|$P&i}j1j%y%c
zdl*0>%2`mKPt`P^00VtdJjmDHL4B??YeJJkJy7cpa`z9HNzkL-mE@x)A4B)W48&ui
zhr!^VsrIv4()!rTzCb2$h&lM~#0n52yI^_ml%|zB+x5RIa|`<>ML9AF_zwa=jCacM
zDhPVbps%j~$=V3%pb8=!s14B(z|>?rkqBtGJWa|?k|~4W9@HqjVE<asSYP;z)*t?h
zx*UH^y9w~(zTx8t27riM*>4b2^b#*B;thH$>?1&7<)1trdAyFA_fb-8a$|%BN-CSe
zz+Q-W_g#R4aK3yk%h^__LGDU8*0Z;p_66kmQP<cy?7eKF9YDthI_HxN^*1Hx14V0m
zYDWr{%09??FbXOh!y4*DTRTGE)*4hbn|zhY$tlWpWr}}r*v-xT8Q?{ukBT|RZlp>@
zJw$CjTG;;_$g@{d1c?41XaltZVe<~qEgqse7Cqpf!`9IRF4P_i+EMf+UcmMfoC}mh
zZPn-Qr~&pVMmQ;2lXL<9jS{8u8n`^bk4c8%y<#pzKJw0uLt($q4(=b{-=Z9XQZDEJ
z{sd`61$BXr_X>kua6-W5C-Si%Ux1*7fQJjb7)Vevunr*QzyboU)#?@U<GjS1ANfA5
zK!0~SQGj4-SwIcbO`@=%oOSyoa=<|I)wiSG0M+_W?<jxDKT?4Joc!OP4*t_EBB~!6
zY$$g;CfWV|WYC{(v0}Pc6fbad0C^q&qJP_XiNCu+lZ_8-0g!Y5H#X*5(3nw`l`wA%
zG&JjLPH~CAZIHlcdL^)Qq1i>h(sQ?B1Umf*|C9hH>Jc0e%p~z~bN}1;*m;#Nv~u{X
zdo!nE#m-GBS#c*V1$Tzhy&k0Hyr%8u&C}1=YcSku4+cnjWq>o{0p`!MrA-)K4HQ14
zkYG4SyHkcb78KNG+Klcpotg5#*S1xzUeH^am}a+t9e6>-Aei&?ELF_I(sJpq$aOHb
zB>kEY|ChdhXwqE-J#ia;%Y(@6hTd5(p4<rfleI&=(L^Y4oqmE{8AGGNz82%C^$`@A
zYU0C9LM}!9jS1zA2pEo-r0I;FqTVrp*i(fQYKPi<JoV?F+0sWeh{a!Y{-V83{ZRmf
z6Owhn*5!_Xt3+nPL}UrE$5ykr{}Ht#WM%L2T?%*||8;kcYcn}Tv4sn+ieVeIuOf7!
zEk@)YzJ|8an6odTYgK9}0^}@}GwaN<({9ZYrCfsr@s5JPerbsU^mS;0@Idxmj2j&D
zshi(Q&P70cGMz4I`%)LrR~raX_g=yNlF0@;(hFEru$@}~O?b=d)E+0D$+!%xNTtvo
zK=?&fD~4a}EAzQsIrSOLC13s)uJwa#!f>;?<R>be^M3g4_iJ*7@6(NCXG2Tty<t^!
zw^xtfaJYSI$>x6eY2os=>c!_3gHhtBrTh$@n>fF*Kq2<d7u#opaax5&RYrmR&06wQ
z(nQbr{?{^iIH>7l_8Wa`eRy=YB1(l<g>}+_v0l+X=d7$G%j>WrjmfSL+`<Q_6`sfI
z?65cji-7&<U)>hi>f&sw*@W^1qBvpFbV#Cj)MFE5MFU^QI-7H+BJ*#T)ec2)8>+Rf
zvMAmdFxHK(UOxjHRAIKXv<?)(8qLp@DS!K%<=P`u>FDd~)F*X*HN{_ew}OXUNfGz!
zSSoI_$mJvOXxWYnzNrl{_j}M&<#3|KP*d69bfJI)(--fx7}yf8ID~RUJjn6jhP4>>
zoKDn)IurYZs%P?J^Sd8gDhYt_*S5P2H%oGA8NCZ-i7u2lo5JsQRhY6tE$mshq)4jC
zSK6T+)Xj@I)AiSx+M1cD>L;=SNl-ptWJ=<|JwP5E-@Hp-{KWrSRo`v)w9yBF$qnZW
zUt0!qg}gJoXL2au4LkPJCb5E$-Hxg@>ZgR4S~>5>EKXhiZk+O_rksI)u2YRVs>c%3
zQcFMyr}{xUysy5U-<g$luuIVnKP>#DV622Vm)BDFL)EbK=C8p9MAG7`)1c1Ug6NAJ
zTBOY<_b22zjJxG2Q>A~uxpeKqWx2&D_Wf^&r27bxA%OmxiKau7{OT{eEqT7~8#zq5
z7d)Ar**&NKKJzwm4m>l=qwwEgTK?=6hQ@%y{2?KM0ZHyZz;dQFmoE?A0i|UZFQR$x
zLgE&V62N4CP>4{0S>y%$IP;%njYY-Evexuvb*#=9Jk-0m8C?~0M`>~+4QUrB4pI{Z
zBrr`?8Us7y^5=0$ng;|>`=Wn|XhBf?hQCLCtbF<fA<_UxTUSzou$ti#m!1-AG`n;~
z+cq*#l{4wgbNFe;R)I>QyPrWI9q|yV{b*HmMn~Pup~u44R6U)=j(w#{0b%+ck%dO|
z%u)X9cztsI&frYGYNE<vTt#v|p>9Tg;X2=gII9<D#|#an&v>UE_Z-YpzmFoZS$~}Q
zhQEws|M0j(nRU+DOH=qZH$Tr;!O~Ls<6%jf=2orCBwnH=$>kL`BGS%|C{M`SpIPXd
zkuPV@y!<X%)a(HH(b75YE<LG<malaa9wc>auf#EztXoauUyBRG$MCF=`<=xmhzp11
z5l>`K@`k+o4C_&*CSFDk;*qr2`J7Lki;QZ@JZ_J@G*8vuDo=Rz)ZSil^_T|z1=zS7
z$!%#+fFK3lI;M&*6v0;!uqc5Qy{4G2?3uQ6c8=khm%{B&Y7fv~pnstn2<!veUXUC$
ze+AsBaxg|cD?^4(e#juPcCU!ELk}YR@#dUy&7RMk6jNMGMtCjAdJ_|O5|&<aeMq*T
ztL&E9Ke-HzfipIvygeP4;GE6^!cT^2Q#FegXBU*Ux~X$+`kpzw#CmZG@(wz!zmnUD
z2=^puj|B2!Gt3BKZvL3pd#kTUO-t{<dt0vCo>sX{HLpo)vPRd6jz!86D(qj*`;9Jl
zm0-7Z6}3{hc4fufAB4n8`zNTdI?;uVnRK9Dq279gFHn!Poc;nT9TMV~`(7t<A^O-9
z)1P@b3U$|V-Y*y^C|BdA;ifhDoDo5h4&=wBmvMwFL-;={ek_r9NEqq9QQMf)e&D^a
ze1XFWRpF&O3erhXC~~~M2ar8{YO9zopT=ZOSm{t$f^3ybJl%?6kMi%{xmNJ>>XDqz
zWxY8nU2$6t0x(bx9vPL<&=<Kx82$g)d(W_@wzXY2h=6oa=_M#AAXTJ@B2kehBBDa1
zM(IeCj+6vJdJ_>46e3Mjn)D(Ndhflcbfl9|LVzTG!?pK*ueFwIpR><<-hF-V_nq^D
zD`5bWnK{QC<9Y7qe(pQ%wFb|%rduDf{MZ+IXzZL0-80)A1G!FoKpUBx2*tLnrQ=#=
zHku~+wBDnctj8mipuOYWyKM}mH+-;seD6^nXZ_UA+;LX#GDi+W3t9rmM|@<;2(wtE
zFZ=*iTNxLhrnkOnSS%&hPA}YEc_OyMdAZ?8Zc1l%CvzFbwg+IBaMD{cm%(BU>)?O}
zPBORzSei)NIG?f&(HZD??sp@I`FWj&o=&qg^HYKgeio-)|5dN07Ewy`bi;@DWBbY>
z+jytY?d&&?Eic`l@^BRF@PBfThxx6d{(-TNI9C3d5|psgn1*wegsPngMcF!@Zb}a8
zIO_AIlhytC>T%1-^Cv^EmK12td|#tF5M7?6D@jz;pCQp8`Zny=-SwN?Y~7rkap{mx
zS0>tCO4Xmc#`;N1j;7c%8_}nD7aMMjV-MPx8r<$uR;`Nvw(w0QUPR}uk96gWBfztI
zbOi+dS=f~+zF;6^;d5-H$~J4F%WLVwXvNVR=H>1dw@)H(Cuc1zi;kflGSWAC39Ty6
z=$8=Y!x(jTUUyx#Fvcv}TgL=l9WlG{LQ0U~yFkQoTRbJ959?8fDPj+HWo9RAT<zUa
zmvEzg(SLBSq%@iGt9rYEg1H{W)5GS@3MVM=S#^@3CSrulX-&*g&z9Qp;D&<<?=kT^
z4LYU+n#a_*Lr2cFY@WZwKl_o-_i|V{l-5U&U_84rRy_F=P{i?mtwP#;TB;1Jo(tu>
zwqYJP8rViDXeLOv@$t9LYf)Tq9c(jkk`FaX2PMhehp+A+r3gagyO(Br4k(m=K;DJZ
zHP<xNlHP$8_T_)<2w-P8d~aYlS?luR--w7`p!-2;zzA&o^YD@p8{qMrrkjOrGk`Qq
z4OoG_artQmwvURu8=Gr{O#A_XCEty$R&Q5(uT*Tcq9Jr9ef%;?8Q>oO9z#x?<hcB-
zL>!iKDnE=Y_UJWlv8J5dr;6J0>X#vFsmn}!dbdaCBn(Tu?U5iQsk^19@{wpoXv4<h
z&ta1r1e3n58l-$|br#UqPYry<yf;ht<z~A}opMy74dS{?R05ec0PCSU;6s~7?&rHK
z=`phr#(vjGym4{`*|bnpgjRM@TH=n<H9Lj5YYT0tq>h_5ro4%*351jkxO!XLtvy71
z=3!eDh@>g}_|%Yfd(cK$MTHyekOqGL8;A!4-ij!rsXc<RcJ$-=9+V^A_)Va1a5?Eb
zMS{G0yGGzGS`yJBy+x}tgVcvG>AQPDM2L;18I9FKjMHgHp^JiEXW1rDJ02&nB&ZC^
zziAWHk=z$toBaOlu&IU^uHv%oHXnu_$th2ih7wOq-ou`?9H>KaJ=_w%o1x+2z9z=4
zb~AHa^AJ;;@N5jCQg?GJAl%9Fm;v}$f9r7kzeooJgw+4>_1^*MfQCa0Mu-_0{Bw|=
zcuej;F3TU=CqDQq8);+aXLo;+o(KKX{y=n|_Ys{%Qxg_0?NxE-j;%N?c}QBh4!a1}
zbBACJ<b`i>m^)qJ)%~9Yf4>EU|0|Mg|L3|&?^)r$GNH^-;YtVuL2Uyj{)Qa4BkO6k
zb~z0e`m>%LX27FaY1qSHOUtnnyXp*j%vuVr^lie3$FHZ7!mAD)fA4GPF>-KgW#6=B
zBfo2da9Xc4DStIKYgE*Hal9tex}moyw;Lw%G{2u~-{?H)i!URf8L^iCc8`2k4s?8|
z0`TF4yG~PAePp^+#}`CNO4AK88g6uz6lXsiPHz!N6*%odL)YYvPRp(*U-Ws>t3<Js
zBv(C?ETB`<FU#FjT3J>2G&5K`^poOB5HIz@?d)B2a)ki(S;M$3JD~-ixJ=|QilYXS
zTNVVfMNt})l1`Ok39r>@o$TF;SC8FD^>=tIpPUmNz4w5b6%Pv`%Mh-0Oufs+Vd{6C
zY{z-ylU;o61;wA0TT=>0i~DDF8b=&qU{aMnXM&hTj065oIl=b>;t}#(|4#MA&0$x2
zB@xZ9Mv=k0CRg|#v3Czy>O!_+BBEcSEMU)+q;O@c=Vzm7j53}_b7-NI0;u{p=uU;V
z+ILb9zL*i{?xm3y-pi(vL!b3HL!z5xs5wG_<NdJ7m?q3Pe8yssEPxZ#cCoQ=%vQE#
zi)Cmc=AV(1IQnYr{+X}YFF5bV=X;T&YEF8lAWjf937j+eN6W$lu&hKi)j@~T$G;lc
zB%IL}(-79(QvCFOqD@GemB#d)k~itK%;acUBa~?slyp$ay^5D+H<(rNUez@P{DpCI
zrD8RYZIWc2aqm4Q+Dlh){>TLaO**;jHYP+CI=0L!$+ndnlqvVBv#&VjeH``7jn;CX
zoX`r|FO<hPXw!L<PG|>WEVYBa$q`7*k*kVUILiA)SUtdYx(dJfsqzJPcy=g*eQ2KM
zG4V9I%#%8it37f24$EW)g7#ffBF1i!Ol8gWm7G2`$!4wn-f+UC>-!Ull!tH%_p^n*
ziL(ssFZo$|qd+s_5<m|D1_V3cal~vNS;T>*D1Ca86gmJ(DPEDnO~TE_4z$*ZG;6GR
zgpOIqHOjZ-b|w0eEbO;PDl{Gw>L|{sH=U*j5965FIHYv*!K`$Ug~d%MKPd-g9!*2!
z1xkPz4x&5`bm1E|i^bj^5UP~d^YYh3L5!EDSUu+-@;}h#kML<6@x{qE_9A-_WtY)S
z(OfO3qXVkkQs2GUf6?5p%v)b<W|!kMA19^SRR%jrL$fg9f?p!NRbpBB0ijS-!Oqs3
zoGJw+tz0#Wi)tC=HBt1QlKjaF<w?i-8eiY0s3<$O5;(3&bvN#Vf`gE~gKr(yimVRc
zSJW!iJJgbx+a{WLBAl!hze}!<2P${)Av85=E*dxq*BV%nU$h`V9$u%8nHzINt%`uO
zHMPVMPH^n4u)E!lvB8WRjA#~>2IK7qv(!@~*;Iz_GO=D^IA`s7*(+%u)x6)FpN;GR
zB$*IdV<j#6d)3(Lp+UUXnLV*}!2&DKn|=}tp>4*s3^8x$=4GJc0eJUd<$=_M8M&aS
z_}*+$lg1+_A^iSdSPWBSaU%gWz*Pyi@OjGDa>QsLR&D5kJll($0@^!$9TM}ctqkXP
z(k{WgrmD4{fz}DX-O1wJT3+t^gu2;C2j4Tod9I{Ow+7ac-7#WtCc%{XjANvCRs!@@
z_Sd=HQ{)zqJtoYcc^Sb8a&f#FPOXvGV4x1lRKkYX*1ztRypt0x=#F2&=_iR=U1t`2
za6D*K4!>_prkw?~0-(jXzdwlD&bu)S=I!34{BcooO%Ci$NyLL(nn_})rdhWlbm}$e
zjnKw-U{MW@RJKWh-=C6CF6}Zq+4xE&$1ezfw{PHPe;mV7)%CgbbN*%WFku?|P)R~9
zi_xmZU?i~WQf4SnZ0=t-D~ei_;54uai>K!_dfu3)Kylw8|BccmP>9&}p$3?sF$juP
zx{R{c;4_zx-zR-gf)Y;?hCY@iVpFzmaYghMV=N0R26fi2(SMG99#W@1m^Z`j99#Cp
z!sh)4i3G;z<AfK0?T)dC<|SNeL=+*So92fW^2B|vW<(kusB)&7`R}q^t6jgwakS;J
zX*sXUjy&;1wC<3M<JkJ}u3A-6^OSA;qtpJXp;%M2fb;Xmw)2w{G<Qi!t{@#JI8N`@
zdTX8D;e|Dp!acUEKM5A7FehB3DzA=UwX}Lpr7Qq_86{_06AN(KB=C_c3;{c|1AHLu
z+uITS?5cLCKTbv8Q0xX-18BLSR@p5;3E)`Gb?}#w`d@(`16MH~m!PSotQ~*n;wjE&
z52H(Wb#`QTtm-4-n!{vHPs2NX`6n^Ov05d%NgxZ8JJ<Yv>f$k4y*pVcNBB2I(_oFr
zkdlf;0E{`@{zq60pHmAjMH9pv8?HjXah;f`!Hgv(Z0C=75d#34Ow`3-^=UH$c0;Eb
zS;|;ijrY}+_hi2*m3&{&-bG1U6dX8#{>LEG|5;V+KgXDz7E|K%q|wCTGjVF#ed1F%
zlLCj*GOw_J?_GyN{8Sa1AwlN`2r3?~w`t^OHfRWDE#G*nu+b6Qt+J8ax9Nb@D1D8{
zp$jq2mvx8k^S$*`J5@o_oR)0xHLGRu5gSs3*}F|&5C{_9UZSwp5g%2&QK3LV7va);
zZXj&?hA`y<j9>-@f*GMTaX^U<6vkLhs;49aG0aHzMR_H5EOmf=nvHyHb>nca$mU%9
z_4{M5&MBN`xpzRf@gC?ow*bFbv!4PVee(U7jbPAZk=>`nOQa{LVdL=|I$-A5UOu!y
zmJNF%9``in#5$d`He}1BK!uHRp<`q80B<pv?;y*)5$t6&B7aA*u(fs1!&(0o@vDYH
zZayu!69%cG&sCd|q#>MX(HNYGx*tFD<>Cd6S?dg=^~KgkD6Yx7>%@H~$yyXs6*CqC
zu={6L)8t6UR2xh9n~4dDLFrbYO5Y~vN9_yA&1WIFT*J8s`c6k5A8q<zWAH|cs;hXJ
zPr=N|{|GE~nOPudJk9Sy^`+^ar>+<(HSElUatVsQiN(*Rs98e%rdU&irGiJ$&6E>!
zOaXqPt>PH7$+55jeXbo$L*!|RVf9RC<(bo-PQ^Kbdsm#cAmxrT)HShb;S^|z*ms(l
zO3#K1A}>$NHGjP)hSV@lxv%5L?#RJQ<@5d5Yq-=dOxdc$N2DYuV9~neBT7Kot>;S3
zXTOOK@lO_#EEc|5Vpq9eLK{|0Fn?!8|H4AZA6x!~WgY1o{`5c#%x2-gq2Yfc^88n8
zOa6Pjx``5`sQctTSQ)(8aoloC{-RazoUgpPOMBp}*M?TwPF4;Rej5_nCMaFZBkd4I
zY#H8_5I);#zUkmv&9=4L^Tu@9x>N2uY-g;$1^~bQa8C-wlLu$VJj3pn8WCFVR1e-q
zSI=Lc=|8#MNELiAwqg8=!{wfdN*y%ARY%2_;{88XlfS=GXx|=mC7tQlQodIi);%od
z7;kld*0@o);~d2NIn8A)rEYwWPD#|RcFLvfCqZ-ZXP6=`>%M%h)?Z^xD#OeT5mBH(
z4w^sq;f$aIiza)Sj}yBQhJ+A;EMjkCRR-DPb1R-E+!{W`{<yHYajnj@r+AI}%0*&N
z><638SYJEem5;U;m4Y40yuNO8`7qV7X@=erl(>3QS%LDx;n~rl4)1X94$r)`)B01%
zy0bAH(if%L3qMWyD;#zMR*nSW?y~oUngeZu{=2rbHb~CC4aK`Th0-4nx!7chp6gM1
zVj(gZO2Ol&Hi;_25Wz0kAbG$hgCLy>3mL3>bM>;Fm;HKm>QY(!zI*!n34^aDM?Wcx
zwZ6EaF@8w&==~RurI;1<a*;B~-b&exmgta4?ixXB4;tuU*9m&@(=oa)jmN(D=Iwn&
zC|Fm0es)kUz<I$4I+Pz+qfkspz|J-xl?rDryVujFeWtUE>3tGf94}xykHqmjZhY_}
z*~GF}gx9#VDE-@{kxjfuul5rgawW&SUcBUP?=d)z)p=&-TlY7^&lP${`1PI$y3>V(
zNq2U7LYOozi{gY@c1=XF>}yYOnjqr-^kQC#=aGR-QtlgH2bpBmQNyohLkaWEs_aT2
zYJwxOQ0zp_bV3K-2sas41$$g(;GR9rYcNC0GE}D_cKX7Xh>n=vsW(SeZLXDU%+aY;
zPAp&a{TjCHUZ$gExomVZDwpNzP(VP0_d?>k<w}#HN0P4It&X{EXHWa9#7SR_Y*%m1
ze}OTF?VU%_0_p*vEdeB~$He~cD7bS4K*85G$YK6(9(~Q1s@VPZA!l))xAN<XN3RX`
z(qVmx)9TMF4hJ&KF-#qgHZ#!On+K>b5Jrl122<<PS^_yN1*81v<;Iy{-<$77ie-lG
zUh`EQa}%MSFFDVj97J!fmfZN@_`ut)M<dgi&>i|o783#nzus>yPd|;}v%%vHf#_-P
z>OF&FEU%<&8b7Tce#fM)@Sx-1N!BB_q*Ao)N1M$jrDZXKeM)reQ7Q~s3>2J)A77D`
zS>7_ziEDLTvr-#Y+*p0RD2eQzIK6pVU$Kn5VlPB9Ivj_zBLhhBHq4s&fU}1j{W-t?
z(MOjBIG4v&(4ijjd(h+Odm;!d{PPKXWN7d=TEfoso4Q=0((oxhF9w;%U!{v3)K})e
zG_ifz{^m4-9!3OyyEH%+_L0M48tp|7jWmv1yn7qQ<nH_K^z+o+Yd2y>!?4=pv9+Q&
z7~Y)_IE9w%0*6<t^mc1@akIl%mjP}>U#6!v@tF4vJz)T^GT^Ei>eCTTz3efr(Y9Ar
z*ztsYifz^!Z{(g&e=hJNqT)^8z9CpwDPH|t9sX}~!0%(Ff6^R~4<@SVr>ZJa<W5KU
z{Ur5OH!TU9QP-&R7edbACr{-MhH>=at$uAyHQ_vvS~+S9ingfwF4HgenrQmPPIzod
z;G3%nf(&a9RGqq|FXX$t{aJ+mmsLsrmyCf<rA(`w(957gT7ewvO3sGcB##dj*8E}#
zKH6p`nx!<+LjtW(hHm|w)Gu_lLEtgNjdeZ94oxQV6?}o6ctTuJL#UB?Y##XW^{uSU
zMA&qBe_qY@v?|(UIG{iYYm^~-Z+)4vj2&f~CG!q}NI47v;Bl-A4aj@p<K*L5ax!+K
zj>{?tQ=BJ}qj=eZ7c=1|68GKOet^$>bNWPby4Wb?DBa$fi5uXN3HN;`S!xSq@`+qo
zcw+luV+{S;;bF7WfFeBVXNKrw{^BceB3H=PbfXFKx%=VrLa|+%&ALTNfwh0r37ahi
zB{R8myK&5B>dZMZ(23)C3!HrDTWnA$j#9gPNFwFJ^tIDb3g)lYu4^Q-opD#RXS$`%
zu}~AA-zpi&r$mY+dSGFC1hZ+$;EV6#JRGgg_3^<NdT7QR{nsOjgIddiZaU|kjgH-y
z_atBPf#X13zwWXR)kAM5@vIOZC-J<eo|eI_v27upr}gg92{7>AgGuOfL{`0^f4b{T
z9)%T&65e}C`CJFAgyPO13FD?3Z0Bd>AbfSNorPR9aL|E%1Dkr?-y$D`6xE^+6^L#2
z=*-DF`7}!Uh`@W<9eIFQNIp$r$5LfAyeD#Ink@BBgjl|q>1Iz^P=+Q5Znndp#I?Y?
z)l~Q)G;L6<R<^HvLkcA!%f<m?HLwjFp3@}s@ydtK`AIvs&|PlZl>vpACdp_t4Vf;t
zj41X4a(u%2j*04K!wG*4%NPOv^MbdtTUFJ0Bo${<n|B-X!zP)K;!DSDF^t5kw)taM
zW)uv@Hv1H~{aYi0uc`4%2Oy%*g&WQJ0b7LdW*BdcoTs-QPCH6bBU|HiZu{bkM^gF*
zs!vTdt@s{9`Y9o3oYT%ymgVOY*+!~3h1MF(@`=n?a_00Tt!LS_%F>}+tNytGdp$hk
zF5Pk@hxRMgSIr#fknRO{jF|h4l|k9Ofl>4}PATpKA-;Wht1cdAY|El4tXAX?L=iVB
z$6orMOL^<Y&sB4nFoMmWK?F$>ZKm>BCJ>7T7IX3J?721HIM&TZ_33YYh&gB+Z#X*<
z)bE0L!!}9hsvVBP-jNI)R|_|n=c10ZN?$I!XqZ-Anl8r1=2XYqE~nfWDVg}mpTqA+
z6Ef8Ed+p@m;+~+3HKDV<$2A73=phFd+l&nE!?yF%Z41)PCmg<Hx6Zv`^16GIi5fa_
z32%&UWUJu}o{>n}9f>ZKW(b;eUwbRt-E-1eoBNHq^M!@#@D{{zA4+Ub1D258lJvDE
zGQ;S`b2YbfXU@D(vAV)}^`Jc?U?7axM(DvI@NR?vEW8PPP7y5n9*#G&Ccy=-Wu}2$
zeQe_{X_wb=$+-Pv>w7boxNAKnaq4C<+uEydOQE2Fm6;mNIa-&~VxMKwz-7{$a5nu`
zd3mYd(^Jps)VQ9_p1nXFS38X|V;%?Bn(rLpN#+uio#<f?i!O;YFtoO}+B@T<7|vU)
z{yz0_W1z|Rif%36rCAgoL4LX{_#F-rwc`!0$J2iK@(TO+e5&JNmtJTrQmlJXBG3q|
z7CM+rIRvxvJ>SHY5VSo}ZOLe|slM^9C`dEI)BG;CSZ83}HR^sQRcg^DvJ~+u!FL8T
zk);<ri;VNmC#X&5p;=3lGYxHeKB&lk?bPWIER%XSKUS%F(7;$IrFQssiXSqOc%GDo
zr1deaDF;!sXHACzrd0df+Vf&K-~Hv46YWv0v=oU~A<t<KhvCDBpyFF2OccYZg_HSw
zv<c8D5?mdJ4s^?gmnT-i#6R}DGcynQZrLJy=-FiCdqZ1E2zl`CO=drsjbk{-M18by
zX4gq^D68Zf;?Ty*lI(9QUJZ=jZx)<={P9|88|RZRkOv0*5t(HYfG4SBC-YU#9^L?D
ztFo|~eekRyyY2mkwv^Q4!fPM%`q(ML_A0)1&-ND}S+$%ev3Iq@`yLel?x$psxJvf>
zlT`1GrDD3JHKpgKh;@Ga4DcljQ6uI!q|CTy2UwE|Ho|ii;dv-q!t0yZrq+tVDeWT8
zytHQv=DIMEFVaU^6sNryQKS3&yLU*5NZ!du^2dygx-!!>#PW>4Y&ht@dZ9gB{8NZ#
zPYxZ5!9y@VrD9AT9$fU?P$SqJuzGSgj}zJ9Y+Md;_6uqjlwXaY7iZgyV}v?Ktq#w=
zZCe!Cp<Lq<T|fijh7!Or2;&nk!CF4eS}9G~4Ha!3+1G8#plMeYpcIXKyLRWwWv8f;
z?Gh=#5CfgI2(StG0Z;}<Zb#sG2T<EM+rNv{P*H$5ahCKFvwG<rV7$%@7v^FU7<<OU
z0CW8hGKs(6&Hw8CKW}MoJMjF{kw^Z9F2p>`Nj)=rYe=j_2zjGe>qC7f1oGzNc!5xs
zz}v$=`E>ICo%nS3Fa;%ps}8OcgU2SmHe=g^(*?tHgJ&2G)BIq}u&I{`WlD@Sgf1*S
z+_~)Av5~`{x&6Bt;^8zv=c)L?4@i|Lo)5#blL;U$5UBWn@=dcFCwR=Nya<HVI!M+U
zd9mAH8})uFiGdZwZCGqoUvih%z5)v8<-Zlq@9i*N(jHp18jv--pKrH*FXK~pxZs_W
z%Q+E89{3)~etDZFOzlA4hawn(INpHWRn}5DVi99^l8%Qbm?dztA)uS|xv1O4WJKk}
z%R8fDPpl#UR|852{=jI}OlIsa$-P2_DE>0uycKtiE?XmVHJwL9&`f|skZ+&?$%@4U
zxHk1Eo=hKG^l~n`P+CwHHGXts_m$eQk8`^h7RnaW;~dx&CYe2z>ya!`%TU_IQiHRI
zn<MP>7WS;4lH8{=o~ve8bSs#>@U=$Q^Bu3miDM$xT@6L5u3FKS1?YHNCv6Hdtv1KT
zZLu=p_ms^R6rF9;07%kJNUv97#nr?J1rseY3~ZC^GR_;C)W$aIBUkL7ALVd=*t%*P
z)r+L@;leg2`lc@`(e&A^?x=UdV=|v{RD@KZB<xb9`OTi^#_>QPhpKOdDd<g`mc+%S
zytblF&brE*o10^MLaH%UAWDW?mjzA%sL>E9nnQ&?8q)}RLdQ+}<4yaQQ!28~i@vs9
z@mzl>fp&{<&X!O!ixNeKxl^{FrU6?4M11>aOaMK)PCVfUgez}S1uOKFDF610r(r`4
z??5p?8alUD?d?zc4<i)ZHEK@Wj;vPY7Ko{vsjL(c&Z~V$@Muut#s<al1x{Z+GoUi?
zP^@o!w2rs<h*7+0LF31_1%hvwv}KtRr<rLNlWNju7`asnKMl$H=lXg$+Mj^4tDG<H
zIxB2*q^)_zDQOWva4u!!qu7+}yiw2edwq8KvdO~IW0P9;Cld7P>KINYzN~uS?@}DG
zP*~-H9jS#Lmm#=fLb#|>Ft!ezxYJ)mh2mE~Yn~K1tA6<^^ljd^Fs>_4=A-Ljfk_@g
zC$Yv^&t&3yA6U;B3Ruuj?8We8L_}oB_`UFWQ>vlRcBT^Ovw~?LH=r6lE6&zo@=2df
z#}Q@EI(SOuu;^->z6V5|hLckkaQvThRyIeb6T)wn_bFaAqt41Qk!V}XOVdw{W5!lF
z(0#(6Dk6$ZOEP+LUoFLAp2>{5MxN<xeOOXv9HVL(lj-~M0_1bi1E(HVaIDIoY=_!^
zq=%xLP%`Zla}#Vr86Qm8f^OKojn|UyzuRi<ut6JcvvYLpj8iYm3c?pTh(0k(IClUe
z%+4HrrR<Tl)Zb$6;_v8ya<Y&5eylP!sx}Gh_*2ERzKq~|a8{p?M24OPWl;DoSGEK6
zLaC9V;RnEd%~93G$71<ZtuCis{}#NI3_4D9p8p*Zg8Y+L3Ia%Wew76Kt#tioi3Q`A
z$P+hr!O~s)PrDud@!9|J`rnM3^KbF`hMhxA<?)H5n2@b-uZv2Pgd4KC^4brWzuoWn
z7PH;;DL)fs#Y`vklnfzW!oJmt&a&omzHt#roJhZk;-u#y(39y!oa8d6q0driLPN+C
zBMv+iq{XzTqbTNLWXXQRVHN}M6w)0NetAlGkyRB!r1DBMa*LIH`m?2|?zd(6A1qh!
zO4E)m-6bYrKwh>oJoy81V>&<CF*9IlBN}6cj&c-}^MZx>Tz)vs=%7!j!WEjEp3)D0
zdB|@Add}zJDV{Z|^!)*R7FI1xNq|U!L;3q~T9DFnA3eVoKTI}%ycdzcqQE_Gel_q8
z(h{0%;h8n1PRKlvTg1>P-gP+>jG}&>k#2OI%|>iPOz+f<HT#^as@Ies_^V%LbQuhA
zws$|JPS9=|7MaD2ZQuh*8TtHrD6Vg(qhc<^WYDPgZ=5WRXsJ4srXg@T*g1XvHkEJ7
z6q<LGc#agMD2}7KP1k3wL_6Rx#9K;h5v?8G9)3CEh5gm_`a+}>k$F&n6te?+il)z@
zBF)#lwz04};<-lL4f0G;6Bqp`ssN8EgMpST*##&EnVD#c3tp|pmQBeA7l$6RIoWGR
z)H?OWz_fk1{>Zb2-B{tccsDpgxLis9d(5kOC;N%48Ez;H*@0nmLkB~t*Ne4KQ@hcM
zaF1Ed*ST<=L3^1G=4I7f{(<nYhb4)8M{~>h^b=oB1(@`Kr|UZ|xfQD&hGbZAFT1Fh
zK9zezgTDa7%)6=wZ*0vLyOkZJp`0)d>4^CFf}R{~V_=aIo;kL-?`kS)c*M;A#O)Je
zPQoe>VF(1m0b#yD*x#D=XhhQ^$EAEsuqm%bY8Y`2QKekl1z#qbA5=VbZxW~z&2EK+
z9p#d`e~45AfAl(9^oxSMSqJ&(;=WY~%AR=~+OYq%@|(?gO0t!$3+3jE1{oH?kZ+wC
z0Y+?3-y8d|4=9}@H}eUhEzvI1BoAF|<d?wc#D`C8WTNdtT+qUyuU^z9Xm~}KEy12m
z()svKqsKqbOg84aXdcK15w)pa*o>I?NPaKY;BcBxPdrYgJ*nICsN&$E&ynF82~V~O
z6N1a3tVT)t9^URa>0&1h8S~YYq0E=hSSru?=DEk;&Z6tmjM@C^_&)b703OGOH8;kX
z>*8-e=bP28iqv$aTDPLS(8DtVm>_l183=+^vg)9Jk%4CrUPSllo#;73xqfCJDYCKc
z67!f@9iK442}@JYM~h2Nz`K`Z)=3+I{i18W=*x{M$IxUZ-KTr^$!?p2Fb0hE<XG?w
zJgfxNvQxN<a2rp39=eWfF4;TBm$`c|X=|LZ@1(Fh#Nd~&drkhXMkc4>+k^8~WID5h
z$+2C{f?HPAIMxWM{M`(xFGtV!K4umQ+sz+enL!ZGqQ^Fx$bx0B+XfwSZzY>5ug0#)
zPgcMqen7lVTfStzF1s~J1Pl7OB>p8KavCTBdiLg2(NCeNzlIaG@8`2^F6_@EJ->FI
zelA>}?r8o%K2ZBj%o3)!tR4deozyYd>;^e6GmR<U<M%E$e|9%75w`F%{U&)v|8RcH
zf`F-<(GBJc{7mHS7hf)+&k(d*<jGJ{JZ3t1olNa>t=v2E%)<E0w9FG7gZ&ks{IjL<
z_WF||7254@rDNjqvtbV~A<l#eymx=G*3H#nMNYA#(_bVIU6J+`0bUwg5~nO~I#*ny
zmeN7?=7%eRjvB&iYy&=>@cLuJyF?{!&sB*m)_$Wa*A24TH>v%~6E$UG{o9|(uWzC%
z2YJkErb-g$(Vw)5FPX<F$;~c+fVsZ1orDo0a}w0>tpt|Yo=~so03|j9&tNTFREg~N
zoiEG7w_~`vE~(b~X7F5WXX3od@x9mA37gO|S)vwF^^os1&UV^2Ay84Nx4p-gI?K^@
z<!toVtOTC!QL{<M6StESM77tC)C3QKguylb&soDsil=&<H2#c8H88zIcnI7x32U@i
zCFKGcMpOB+=w5>_#s)W>*4fUIobp@IWB0+XEDG?#yM+1a)J>J_B@eivuSL8q{ae&V
zB4R6FQpRLe|NJPa5X8_e=l+)Wn1jH(%Q$zkK|9EVIsDPrWio%fdSWQuVYvl?H<Zm*
zM!VZIZ;)+YzmAU@bou&98%`L4e63LGdgM`y#8*>P5cAktRsw8V>+W4}q+sM^l4Bzy
zmuNKM9)^0}-_|DMwm_1hjpAsO^u3b#cIuY9jvNe>40p`b*4NP%Y_A{MXI;@xF9|&b
zLU7#^!rfBmpZ2gigo%XwW7q!AzWz55N)88QxFcfZ%Ra|&!S$Eg;ztQ#O=<p?oazoU
z7D>Zu9D*Njg>=$@{u>(V>YvbkJh*`RKF%ev)1W%BT85*>Z3Auzf@x!8Emo`Q;dj}W
z&M%fYYrhN+aGH5;Hk}Zmu{?w<9-05XmU_VKiEcqk6p=DfmX|gNYD$vUQL%+BXD8mh
zd#LqZ8MEF$`(X12<Vi@Ta^W2sS#W$%Mw!ZXH{q^=GNJ64X&xyZ#+f9rb!0E*W?H|C
zaoN%^zo$*%6k)fe^ID8f1R@#*dYc#Xe>NUvKnt|y=$+UbS1i=J56548rd8K+7T?~0
z<0GAIAu1y9pw69^3Q_>SG7K^L*0kB0m*`A1!4tD;2)GW{dH}J<5w*wn>qvNDbjf0V
z7;G9KYz2-aR(cqb3UtfPdHp&f!KO5lR`avrF`V3l*}exLL@e7b=mlH+i=p!S2PR<z
z&~yn(a{y7eT?E=X4JXNJzmBB+f9_=Gd{8rqMCK{Sqn_Op8WkfJy*Uz&@%5e56_s+h
z;*@(=c048mDFss?LXe*(In3;&aMa>^eQ@)|H-10_KuGY*(BlHkfgo}e&4jT<&~)T@
zI>8Ej)ZRUK<Rv&jYgO{)aiPGP_C>ZeewFX@7I*9RXE2~9G+s2rzB)fO89IzE@c=sS
z7kh5vI^idGjE~XVzJH6^dPRFC(u6g77jV@zLPPYa&F=`(pjLxdy$>FGvg3=e!b7fU
zNr-TnVwGVTcb_UI&n?p)Nr&(2Ww()fd%+@7)&w*BMbKN6K<<a`bRYbH++y1~fFUCO
zf?>OwijDnEW=)-rxKAqbk)ILwnAu&OeUiH5VepI#5k8f&uIVY5C_c@ucfC!N|Kw4K
zhGlK7Qrteh3d5nAbEHBgde5yHiO6Zm5MZ3ngQ`bi2XrpRCmwGrkTCTP@|3#q!Rp-g
zE2gY*`plF0MekGJD$x~?DkSp7*S2-AhAouM&L^U@PP?u)Mwg34NW98>s2sB(qxr`9
zqqLgM!F?YNoZ1@n;BsA>O=++y&OReP3B(&k0y6Tr>+Qo`iq1#MC`7YfamK==dJg*r
zsEAVR4NYMtU^A8_(<_L>npi}W2a46Kh^6iL*y+63(_bE_Zgseb#q1rG@r!*~KgaLV
zpo=C5B|P)-o@S==(I_H>r)#D^+VU*K&8tQBv!f)=2^bccYp`LjRThcx1Oe)~beI^;
zyp^9MQUF=6B#G1ZcSBP>su3rs<uBh|_EJqxt;2UjDz-~KOgULbVRoJIh2IH@r)8{A
z3xJpWfVkmejZzrTPj7_!TzaeaXiF{IHa^sXwXyA#a2Ut#=TGLo%S6hO1W4HanXqG&
zTPxEZK|~d>JWw$bT4oHZC6*aaf!@slv$ASWK|`Bl@oj7ePllay=KZ6z`~i!$AnfuF
z^rW1DHX_Dw#pqx;cCPB7_-q^RCt2g!ie>JPb5he;Qkl;PT2DT*i0Y<R@$CS-nXT7`
zq)bJ9-0SJQeC+qBjk=9vqss>IaUKt4#+y^@Tq7?XeHkw)C*36&&e^DQirVzHJtx0c
z!m8|aGF+4NoXGo={qkiAo??>AgWqM5Io^C`&>^H<{q2XdbwO(U0sS>C0FBu@eQ?U>
z4o-fxD^26z0ufH|o`Daik6374>JrtGcb9H(Hs$X8X1Y$-+0UjT(B8t`%$(z6i7jh_
z`Ars^V}Q$`{h{y8CY-p_Z@5rl#okoXh^L3d&KWiEaePuy?>@?0Ue}(T$PR2G#b{cP
zw4^*CLgLJtWIdUhpfj^~GDTNQ_26z{v{uQ$y~1E=#TU|_n==#B?^YB%W4c8%`cmg6
za{!<*20)LmAw@jlcr<~+!VxtyNk7pA3eqoHP02%__}5e@>H5!Bg>Uh;>{=kL2YEPQ
z4*9H)+_h5)W>eac8MxQY?%+yv#uq4XQ+7x-`k#IkXlec2zD+D^KEHQg<UVt(l8^!M
zK8}6WV`fD%BuTf7D8ydFQ?2>>GEbWe_pLs;p-A_2303*C3Ae}SR?B4v$gSh5*{R4b
z6tiojbGz@w{Rz!Ti?Z^_)OczAC{7;gyZ+3lY9i9Ye*y)+K^Q-Eaemb*?S+Glf_Q?4
ztR0QN1Iqa8fPRq`ZqQy<ZzK<UB%CimD8tQ8GuK95?9e5Y&3HR9PCUAWo8WBte6w%u
z8_OxU`$V}FWyF>V*bD!>3XYs^`PcWYsFv|<coP(hLyeY^!vfLdL*GFA{pDW<*y*9R
zeTWmNh9;n?Fu8x04Mm{$1~v5UUq_6@eDhU&bbdXtzh1HN)vP$RU-sT#M^x(n&pFkd
zMnVypF7AMXU^@K7t_tBYBl?Qvm7z_g{n(QukY^ea0zt>RQqJ>3{5irvJV73E)4HVG
zzad*~IxV&;LohEdPf;#86?9r+y`}J@zs7ccBYG@b7d2r<rlX%hjNh=;c!z~H7@oJ7
z-b>`VFs@?8`}RSt+;@EkhcI-KoH@fqxX4GMDhcemQ}6aW2=ujF#x3>H{sR)SjMqF<
zEUH3{Z*k3<BdniCu$BbIDUenpJ{uuBeU1`jr#Adx46ca46zb4et-hIYi|3NbF6Wc`
zd(PcX&4`K8Xol2sKp+8T!yq^|{Vk0Dhlpqc4n_?3$|reg17kg|FFCN|V0x8(1ga((
z0FtG6=eJzUyhmY!*;!*I`-vJ9y8iyVBFm@0wbk4F)V$?aY^cR-ihy731cY(b%Jw-T
zJ*ce5gJ;aBhy2VoC)Z>F;5~OBzw20`+)O9aHDF!t;t+L_&{?Zzap^er`iY^cEmLSU
z+xbG;`+OM<XH|rkS(VWS`xwAHYj{WWBuGv>hH-iGC%}slrPqT=DyxXk72&*B^mR+W
zAI=+YK9Zj)+r@qZcP9M3r-yN==2v;$viRX;S_O><$qdc{-qJ6a3-aqX#_k#oVN9dP
zF-@?7p6EO@nFYaSEWQzxw{Sr(-Z6eG*zQYnjpn}gvG>bb9uGvFZ}nK(P$LKXR9H8-
zervz@SK4TP`5kx?@d4q(G|C^%STY$}BlSux%i4WSM61r^iFU^cmhXgl=l3<=g(8x)
zGV*GPK*+;C!ZQI*n8dkEC#>pDDTJG>cs*m^FA_S%`nZkuF)WBfaV3=fAeEc52|45m
zZ@38aC!Wjjgf;Zi+e<1J`8W5UBpx+XEl|byzh~WJ1c*)t*b1UUzDeN{)>?3;$Hqq@
z@+8EIb=~++SJ;;wS3^&6>x~~D6m+;Kl|L4?m$dU_Vb1IfNHqW24Ci;y>_3HLQFUA)
z2r44{X_O#vv~x~LKX+ly_bM?PAM{z6D4e(s4sQJi$%o$^^cU>+FA13sup-A%bykYV
zKHh1W3jr*x9R03Xmu*E~+uwW~fI->6*~j7E;`<FeW~Z_n)}U-KOHGi7z~w5C99|3G
zHKBteE5=9-km4I?6c7Rq<ZZhe-7kJ6&n_iyk3iG7NOt)M(Q(yJqu_Dh(fq^n{*UMW
zfAy6ZOz<Fwf*9;ME~%yVbML@oV8^8uL$*yKdFHpG0ztbzP~O2ET98bXAu+nF72W#u
z3^-zSnYdeFCGS`i;NfdR;IHWktOpW=naSzND7%tegnm+=q@dQR@o(m3QFpGJ>Xn!K
zIXuYl`}|4jmh}0p7NU|1-URemAb`pZ&>-Hd$J{N!&=52m3!7je+nT(3>i2T&J?u}c
zhTT%Xd+G(XHyzj~19gFe_%jkW_-}Zy+pBm689)<J*u9=HZ*m|Cl$fS|w#RnRbq->t
ziZGC{7WN<?mYHPLbyd0oZPvJQF?9$f@8Dc)Z1cBrUIGaKDStkqy>$vRHX$f89>ruG
zfAL<dus&D+xG?7+k6*Lw;Rw7;fHF=<uZUEInMyTSf1T?2`s!zbB-Ui@*~pC?td^VQ
zUVZ23b3LBj6r+dFO-yA(pq*ate4m2XPC}5~lT0$8xCv!k%M89zATHYRc*gMc>rfy@
zdC4y|!-ru__clZqeBtRt0)B$${~=uUyG8Z)3+(^wJtYhdY*e*2NRMNGK(5MWPBZVZ
z$~f+C1%SjmtZNA5(#Ju(VI&BYR<vw9mB}hv+~le$kQ1AR+TFVq27Le8={6@RQLSan
zB)4)f-eZ#aN7bq2*#haGY8Htne~0r^{Q13+V<h=!gOEuqDF7IAC3%DvPff&6-YE%N
zm-R98%K2D#pD990&CEFN1O=`p6RcwUq)0czcU6@H!*>~xZ$kKHzA6Y}pSOox#PeQQ
zq`D2VdRHV!^fgyOqJI`7`Y%M+yo5ik|BR5B7Jufn31eU?=z^S9@Gpko<J&-c7DPyw
z(tkrI5Fq_{3day*ejP#B{?v#C!Q6T@2<Ae4(PSzxXE#-T836Ga(47UrTqO*|a;Nbq
z0xP(-ZNCh#;;Vs}?f?Hybf*>Me)R){+1hXXSc}qJmsHKXQKQjZQX;*}BL@L%;%MmN
z(&w|PM-=^;j)<|tQlR7MwXmYBaHQ}!vK>_bYvAU++j8EBqr~`;$hwt(LkFxnNbZGI
z2zaLFXDq2q4?+{2%}CEl(J*ru6|xP*+#Q@zxQrAU8di`^zL&=}a}NkKP_|#?G-dI6
z6YrpX4A}-Pm~6-&6Rq1^B=exw@>!NWFQ$~qoDx|teUoGT8r>G?t;$=mm-uQ^sXXJk
z98wI~g4hHNLF3Bgib>Q`{Wpx&3NGL;fCl52O2>Xv;QR~EZzm)tA#^IA(-z5(-3XGk
zF5y+HFQd2p(7<6gack;%ggTveyG!w#qgU%V^7{S$wtDADHo;gFS*LR0SAc9nwo29{
zA#G*Vz+8f(dZ-0@ii&2?{0&6?@LU*V?VsVF|A~}HM@bS0hEmrk87#%4*sH|tXVkZ?
z;x*5o*Wj+Tq!T=XyLbUU`Z?jk>3cD89M~R$huTlX?s`oRFb2s-?VYL_NvjaM^si2N
z52c^*+@GG>zZm-MK{f*L9B<!m@SNmNcrJUAtp4jre#v55oeRJc|9ob@j(~Fwr|JP%
z;#Vku5|16gh;%=p#9s#hG$_XYf6%Gs?bf3U5>`J<WxwAjW)4g`^rde8)qF*fW=mum
ze|oynUZBxYCR|XD*Zw1LVMNCb3`Pwa%*^Nj?w{6jLgzie{U7Sydq4$wKvBzT`WNmu
z|8MGh{S42MNDxR}gPD(^Y?uT42my8K55e=9yqlyL^YC=#)u0zWsKe~zTmtC$+X@t~
zMiyC5Q0yfi96=82kbB9LvGpm^W;Qrz-D2r}NmaUef(Ykk)2OmPtz*1JOPkOGP8Hu3
z1n2=PlU&UN-I0epcVPC~WA`tk!(D^qFRs6~dtEce0j;i3Par)sZ><O?8%TC5J)WpR
zlVVY}+K0%4WLjgmy7_>QPS&YNLv^dp_;t@MfgVXpbUCSJ<ze=hI@7%ou`;=cJYO?F
zlz;ml&3XT$82XPt<5iG>v<XHDkL%w*#V^*`0-<OVO!0%y&@{o0*-m2b0E50;1X^r6
z{yvh$lkcHN&a#tLdxiCAdUsZ;n7pzzlS@JZl}lLx=_AYr!$t1@27uQfc~VT<;KC4R
z?#`M6HO$J;?Lvx7qBGmr`OpO@igeGr477oOUD}d)tf&qcR@JXa2G_7eXuFT1?+xl-
z>q2>|a=}-gLEUA#S+4LDSSra~V((g4Gd(RZQFVSgm!__UZ@@=r+FN-0tAJWr*l_<i
zOUSf8>yeOZEM%i?`AKfOaK2RQKU)ibKMop`pQibMv+=h@_9+Py2Bu(49Wy=tKOjdf
z<+<W=Yo^4EGi=<|Yvo^HWuoWQr<!pU*Oce-E5TA%XO6O1;*VUHVP<U*6&6FA7TV{2
zIV7pTx3fRqo|nmUC`an*+)-QqglTyF-n>l~78xlR<=i>$TVbm0e8UCKS&~NcVJ{ni
zHj$)w*di^ky`uoD+1xm68iy%tAyFuT@XSvRMSP6_={WX-(g_fVC8mMErE=m2BoaV7
zbg|RgB-$iNGJP(F@D*P2Tp#)xiX}{rnUVccKxwa*8%EYjCquap#8AD*fr2z8h-BJI
z_CdP;$3KPsdh|7OHnjy1gv;aa18_hn44eTqSlVN)!7|-U?tpF|#v#ZM5Jqj<BSH9B
zpFy|_a9PW6a4A<D$?|H%v)X~M?K9RGf<)<0cU*6Ms+gY!H~&Q+mT&l~InoZ*FpIK?
z-aq7-{{sTC`^f~2Cipn6f`4;Gn-yFrMl@(i9$A9@`D9SPobJDR?eNeR$)kWxQG^aS
zh1+OyWLn~Kh`l@~$0B2(2tTIbbitDD1J8AA$U185Zp0hP6)sc*ETA$su=GQgr<!s~
z|E6FPC;P3^aU8II4PM_rdi6wgaX=jO4f?<4whiD5=>@jT{(Q@ef4xu`kU>u0@eu(T
z)_D~$ncd2MK)BP%tU~qZ{o^tSBD*2*Kfavf-%1?)^^X5C$@2SAgnMW@)Z)!P6xP@t
zSR5~c)JX%IMSj-B4wA9qNnp@y%U=kS`(<*g{W`~Nx6yw(5!kP%I^HT4-Sro@8$1_(
zo?|4Ceg64m{(QPWFWhEB69#*KBcR_#+X|air*v|pO3UTd>RSFo|8eTXa;;|0?{O6Z
zbJsguzf{|T0Kwmj+zn%uft~Q|!RL_D#J}~lfQ6$YN6tAvK{oSEKW(No$VtVLRMm%0
zt?*b=I#0m2(^Y?x8x`E}8_b^GxBAx~!@Ej~gJ|lO<xd5KEia#z+XZRc*(<+aq{5QC
z20mm}LtMN(h%dQ8c6PfJ_+NU%y}km0Ys_4#krnxA41nDh^7e|N9+?}Q#c1Pz^JI4P
z0POqLM`w=@oa#F?!x1DK!MQ<iwfseE${+nC;?JZpC)t~P#UPsGt@RTC9RB0C)=YQn
z!Q(A%gV<U5iT}BOs*(hl-Z0qDOg@2scc{!()e@jL{QtlhVB8?7PEx5S@(_GyMIVOB
zpjci}GEd9P6{<6TcM#p7yfy?jgyY&H_6h+yWkCzD-7{Baw`GAI3M+eE?##yMNYlvC
ztVE~jiEzd{=c4+I1n-~;40kGsqQ#_0z>dE}ibryKKGRqz@3YUzu$wEc^ptZxzHV|H
z?X|tx=H0LQNhuL<6~@GVfARja??pF7@xoAS0_d^_y8qGK`5UMDQ>*(M+$aLKkLEG=
z_Z6%ma>nzrtkRY|lA6=Vy1CD!wa@>6xc^Ne6cLBPs#QyDne>s*`B)E{DWSsM2yeeV
z*_+ar$b^s_*<n{rOl-tYg{HKut^qcjLdYMt<^OPI`UL{}7k-Zd4R)2*GrQz+@Q}Yv
z>c<8X!;<7lPwGIay~%PJ+|+-jD}BB6tdI)m!?K`y;`kIZXBbSb{DNBio_{AFsKF7#
z0q!$m7)|2YaT-nJbvvPR^Ix`I{TCn#O5DF(wOvWYhIt19k*?>z=fJSG5;hUsb<>P@
z5JA{6BhN%run%ZmFE2WO?A-S!cKM$eVvE1`S1t=v;*{~$!!SEqvXGBs1;IXxAZKh*
zb-iuo@L(jiEDA3oahwf#?`c&oxM0J_Mq8?p&&Yk~V*8QW>htycY>}zVY@WN#38H41
zOSik`1Uj!}-{`K5Io7%ex$reb|B59a)d6D%Ct>73yPk)Wz1gL}Xp_=-(ocQ+!i9aV
zt&Q=rn>gLcwFzweWy+=)!kO>)`UHyxO5g)iLrOi~3t#7vJxV7Mug5gzXtx^*aGXhh
z%NH|)jszZ7&|2Z*$q*=?B5XR@toCl(u}A&ZXXYJTW1Y!H=848-A3wjf|G+Oa&p^xY
zt>Zuw*`F{;t0)8@8v>lm$mA4GyU})3%$38zr*$nci?&q%%4nmd`k-0`M?$yifQcg@
zH^Kpr>-Ds_T5ZimQfjPj@6Nq@6{RSerB)t+obW9*>B=}aJw3g{M~}#^nw%rWS8?Om
zr}CdEv3O40u1xt}7hlRUW-Dj4T2}CNbRyizIZ@hvGP3j8r)~wYk9Es5yi>l)_=R3Y
zLr|LzHE?|9qdC21>cW>Q5hvS~+Z~TSzn9Ve+Co>SL`NsQW=O&>k+}_8s+4`85<M<I
z%^FgOovBq+b`lEc;7;CbJ8lp_$_hO~ryt9wH{kCQV1o7r_ce+XQ}txpk-^7&_`%Pn
z=O+2SrP9i_zz0Ki^QgFEw=<qR^TkUpocD7&6;oRL17f#l7LJc2#T&qnWHriX$6w&I
z*`A`g+{Vv*_X^(udy^!HXlTiF1k4O-O(bg(?SLgb$<3|0Frsu1XRi{<o5v85FSu6p
zcu!Yd@Ud-Ol@Jc-&{EDyQjHTWd$$qP9qErsv6Ew-Pa^v#dn`^RXvMOzgqlab{9tn^
z<uiO;ev(fH>k(YTPteu{Ov|;Ju?$<i4SN^1g=>A~bqyj68E%Jp0=keco+Gu0V)&}@
zv~4tp4=c`*cxIB(M=4UaCunCIp3*&_;9gdt6?Ny_3)UM{%Vx1j9ME@Oc4b>j!$r&S
zOE%tu>H`ftDb6Y)w~g7fK2(XCQ;%2hOu1&4)B@I9z&oH@=rO$!GOl>cVBFKc`%-a`
zkz+{tUQEbT+yhlL8qU1^Uk!CJU}OK2wfV>zFfRbX#1+&WfO{+?515uP`<^8^**>Qm
zT<~BG$h!)j+W#<Aruxsk|Dyu5!ygmKfh%qXG(<lj>5LBs4?k<J{>49jTXf|&2lL{{
zUG4tod=LMi|LlyU`lCLyUcL*kY!&%s35L(Hujw36d14xB<YVz@QiQ`q15%;Tva+f#
zx4ngNi_gN=0`{4izbBD0TEfIBX!-{PVV$a!ev|l-6o;95o5Dn>b7F4L4*$e_h^SR!
zW-puEUs7H21xL&|DZO<fW2fx-I!&NtH%!%b!Ernte;iQk?~d1G<xQZO2~7gD)SpcQ
zdg`$sTsY5`9k~c;yuc&+z8j^W)+jkqgon8GBgL)Pa8}c9iL`9l^lhROS1us7lXJzm
z@A-DTH1Fx)o=w_$ycc}~{Uj~sz)Ho(8>@|9CB3duP;s<PC8UTm8=@%>B=3t_xK%}e
z=Cs^=r{phW)~wEoXZ3pi#yp?xKF}b0@cV`LpT37*nnAOB!t1yczwzVZ3#?L?o}+Rq
zYLdmLP0n|;-m{uekljni0W;g+DN){=p4@oHUP&b(Lg8A3+#17i@tA}@7Wf+bVVBM3
zu&np7tZ@c?!HxCbmtYY@$7yIVlIJtdBg)O&o>{HHc)-|LT_DmIA{1}dr|S%K36hA9
zL3QNXPwiX(r=EqVr;~-gfg{00NdCO7$*FQin%9)(P`Rs}=cKwv+0Wll!qQi$A*L{+
z+V~fw93M3++<E*APOCX4UA5j2!7y5x@&3)x%;T<XA3GXYYTS7g^dKXv!3`V1d{hLT
zX+!P9M1?-_Dp=}hz`ViuyP+@0tpWur_vP+6;HPn>%IB6s&)2Asu1;L^m^WUBV0Lhp
zAWCLIQ`S1{X!b^aSl)Wi4)1?I>wdoQwBVkTNd0%gA~v(_`F}LO{|lS`FSGRTe?zgy
z3j**rAaygrVn|VPh<%4a(d~h0I1yl(G=Qbdt^EVi@0#C<A&7k^ChZ0SHlb4?uwije
zMZ81PVdfGDFmmlUiX59s<$7scIL-gs|G=l{(BFUdUoC$!@b3s%6@j!4=IL6^8KM-<
zvEg%dn&h!1c%_qgpVpu_SJ{vzHBABQo(`xt#NkSsHtM2J5JiTz*gj0zKf2O;ZZFNB
z;6SHlDir23vRVxSQw`X30$340qzGf}YY!xYYNQRT=TimjP}bFF=8sTDNzA4kri^~`
z_>$X$W#_@0<>&FLVZ!-ovi}I5|94)Mu8^w8rz9J+$JAQ(3pMAzE|(%k@wGhXH9km{
z+Gh=!dZ6s9TnvE1C`jg*u@iVDta!<wiS9;-X@VhIfE!aRD1F5V1oq!<6Gg#tMG!Ih
z@ZJYO$i{l(uVWiq)=W+frVUEsdG<8VzrHW{aES+H9rKiYlt~nixz+DVqP$+vTl`9;
zI_fop-Y`z`dKl}Gu*?2_q6<ZD;F2=&Rqt;o_Up(=3%BM}iq4^ig^1{AB4yFRWe?fk
z#)N-B?xQ!6Ff8T&9r`z23FseEf`LDz1m??#P3q5ocbX~rqtgsRjdts*X2tjNaV=g}
zuwU0#mDNseiZ}d#7(2kWT}D2@;|bhkm2%WdZVNfet9<olRArd(yMMW&|1Yd;|HKNt
znR#@Dd<Su$jvBeNLAqc#vPyF6C8+$J&H1Mqw-P%LkF?4*@_`R4iTiL6l0W*$4l3SM
zUdH2l=|6jC@drPS6nm&&#zqye>fHI-<>}fF2s_MblGf)j!S|h~=nPd?XW70&(98h5
z7*%94@vZ)~QXe;CE&E)d-25E0>V3zC%$-kA`Z01lg3F=P01<()MzJhc9=4PkE-hWk
z`qFW%vN)#Ao2TnMEkrcs$l-{X=f2^ys9FUjNk)3zejl40!ududH#@!62b);?(-JXy
z7V$@QIpmsP5(CZWqeK@U34ekzE}{#%G1<f`y*}Q!k(u~R96#aK;<qI^>9MO1Z5aUA
zH5|v1aqK}_Sx1o=g`xxF3aZ#~b$+^MT&N+*GWo+J4-@FH56o54HCL~dOzuZYUukW<
zbPwBJ7pD#kl=@_J9w^=H!&qY2mSJ>?%2><{(1heQYz{V%>!MEQL5+>*srF`HHxm*2
z9O9~GKezrs(>z77%$O8jBTVSIsYH+4ZCOJZVU37JOKVn#jBoir6oyw>Igg$_S0mSa
zfg5Q+q5?$7z-bht^*2WBw}fd)ripMZNww0j{_)d`G1pR${y*fscUV(xn=eWckuFGY
zK|reXE+rz;L_k345EYQ#q(dMGN|TO&Ql*JViS$nBMU-Bo6F{VsAcO!(-o5<ho3rPA
z_mpq%Idjfj=lsFNl|Wcoizm;z@8A6^A%IlV=bOj4=07Q||K-2&{|VLgzgEgH{V51s
zuKBM?;rhhWPM{-_N!ac@2v1~MZ@={~A3gg<lG3q#Zakr|1ip7WDEiMi;6s49q$ZFl
z-kguWGHYSk{<S+!sQ7=GkpG^T>-*{&6{TntMX=H*tM6PXHM$MD1oZ%l8~3nb2zznU
zG)K?3)qRi9uby=b4cR%r-idvwZ?!)f7^Ypsoo#j~dVV*->|@DB_h!)_U9(i1zgj-~
z@QsKH-`mj4Vw%_2sGZluMZSN+xGqqC-#I@z8UsK|5xLNtz)TODjuKWeqXq?siqBa5
z@Vb>IkSq7q{IFMp3lC7V=^mk<MPKA|;YenVgVKPq``#NL!rqKG{qfNG?13E9IH$uS
zOx#Z}Eewg`rs7oS-go0(-=~M6Wnn$5zmwnYA^<eKGX{pu!uJ=#cTYl+=0VY56c%&|
z;1o#A0LU{hh#&w!eLy7KR^~1YOHT)QfIyn2j84wmd;b6TyCJht2Wt8XNclz|;Vp!7
zyghLB?Cj2gxGKibFt~S!25Jx1rS%7qvjltCIuf!sU18JeRWm5zX|gj+lXIKy!Z|58
zuo?L8w&L7@G?9c7sOY3e;IAC3g_^RHYpVP6DT@^@p{k;%m*YmvKP3f`D`AI_5G?<4
z(YpRq2`0icjM>kV(}gE>W+qj)&M`ILwN+XN80Yx$CJ}jQdrm@Lf@){GVBzymCJ5KC
z_l9rN7Tjn`P#JFS9AH=+v~<3#<u!6!l`KS#X=KO^z~}x`F=_w&feq6JC}7}?+20tY
zT|y=yyPgs+i+uzzJP-higSk8jK&H8(zkpeZ60l9SBViYuZiwThk7nDY-SD#;1Yk*h
zmIuZK#oOvOv>2d@{YRiJ5)tQJgAaMf#NW18jvjc>;WhYxCA)94>sv)-0MJfedFP*B
z)&_)OS*@ERN@XcdpYY)RvZ0wO@cloC@<+;smWv(ura+c5C&g)LM*G}|g7<X)MZxJf
zz?mqb7Aibl)xnK5#3Z3C>uh<kkTMPaG%Yp1+PsHSU%Y1ro6QH0X@BxDov&NZ#^d-3
z0x!BPM<Y~>Jj9O^f+s>mKPIX<d0Z`ek+Zsr3gKkGoWzLbLmO%X&NcZxAV%Lig)6<x
zMkDC5(KIhLU8G>%({vlUiFRWI2kfyY({6rk=70ouSPzV$>PqVJk?TH*v)ZGam$Yjt
zzh1xg%DhrU#FU0j2pck+K$iP->C6Gmx2X4j3NUzx=%}68^N>^3a%ilpOW4dZQ#Mvy
zCwuo6Lqwum?st|1v_?Q2Xz1$7s|GL$kooenAH-<SOm#`+OG}8N?rl<R6;}os`Jt$8
zqG@fV(H-&obmI63-(AFf2&NMYWXZXDDUH|Zg7n9rU8memxVM-HMW>2P23j@eUsG9l
zL=!;V!_JO^Yhaz(pt*rn?I<;tZu)vEov`t;uepFoKrJL{)T{EM4!rmXUQh*|Cdi?K
zvOVG#_838j^uPOq(NVAeQwaNCd!3xb8L16`;!=Q*uURf%1@I0NV5fj9f{)EBa`gze
zXJIGZhWJY}jW59)562O^BXoFX(PL{c%5Dq6*O}ix0?@w7ul~8_I}!Oa3(SlwHf0v5
z>xTX_9nD`~3#Jrnv-?080HaJ+*j+)4D;EmgAf|n;N<Ry&g}{XM9?4W~l}m|hg0DtV
z2WD;(au*4XLCzm7dtjv_newxg+)7U$Z*BElPtW9uxJ7TP3gim<0J(zqfLuYDwGZi-
z?>s(YB~xNQY%RX@kgaB4s=CebCL-ZQv%zIBkSlm)xA?mbhQNXyt=bOudTQ4w|LKRi
z(vPCN8-i0WnJRC!wUC;DpKZYbuA(o^3ZCV6A|KHx5X3PKFwR~8rFG$970?^90pr~k
zgy)9129Wjlw@|#>OTdN^07UQs#oLh$2(`t5I2Q+k|A_MmdPNIo<kx|$zrUs8y$5df
zTdhE*;APmEc+y{wvX6^Q29o{|tiVujOaXRm17r-6f^nJJxLcPG39mbVcM$FfY)D%e
zg8Cp3^alYl2LE&@0v9RpmNi#^wb2PoF@}6YU_2XON1=b{j8qZH&q1|s%z%u+zZ?$e
zHvfEv+$)1hbaP)sfsnz!9ud~_=X3Pj;Edk;!Sp+2@UNE;{O6NwJfi*o;03MuHA+^&
zV7)l_xrZ_{;5pNyhxWDCI*Fd^!h1a9xTPi#uD|<qiEacPR3P0CPKOi7pwW0R?Vj|`
z&9;BT=IbyIvS>NEvx0(#kE^(fVh@2FnPKU3Z__}R+f3bLO_Hap5jiCNEaA-?dGG))
zdK`%f#%B`f2w!d4GJEo=WIse$efx0wpv7AbzboMXuB1C&++a%cAsfFCf7-Qjzb7Sj
zTla(tYyKma>3{h1zse3O0deTR)Fn94mfr?=s?6B{JTdlr?pR+g_}3@oofv;3x*30~
zv`N3JQ2j}KP-XaMal-V$pXQc--D>{ddJN_R=BN2buFEWdqP2hc4<h~fS7BLD5#VzR
z0cJQCq|SgU!yx?(K$-qyJnwx~bO_Kk{3CrhF-yJd$#Ll<#b&rtyE5S(*?=cD<DaUQ
z$z!09Szn_Wz(-zNAXouuLcg<w&~c@Lwt>KWnaRN5L$QMZzq}^L!hA~CD3^1-LOl`e
zzb-);d;`2%nok5(_bb|h)x*0(F?n*>r|56~C?al~TP(j!?UoYCf5dD(4N~xWZ#Dy|
z1rNaK!hlWqimzv^<#inoM+l#BofiMxN-udX#i-&C^-KJQgX-=s;7{bb;OutDtGoYG
zD^X|T!QfU8bww!;z(!S}hlIDBWYhyVAOc^u@UT(uHT(tS8voP57rAZ_fBzSwhwfAi
zkrfSl+Ts+pU%C0_hHc&z^M|dCKlE)kG?+PF(09~nPCV)-u>Ne!6QJX)zE_Yk9!+MU
z!;@GDHyis;=t+!ja}Fe^JV72tFJGI+d9DrDe;z;9a~41N;L|<8u(29oH~#SX*V~*a
zkTg&=c|@DW1te^Oz=v-P6R>Ery!^szCXBWA4YXX=&dmz;Dq}`qSQ1$U3jCo{Au*pc
zKoG+>b*OA3s^Oi($pnV+D$}36^K}qej><&}^Uuh_20<6<HLD0B<fC#-53;cbL5aJ8
z;*LeFbTQ{Y9m9d$GG@lpnj7FZ4;DQFp4u#6-=;0Mse}pUS(n>8Dsi=IcoX30vQHdI
zC$NM-izJlbmMzD+Cc6+k_75UY2wPw&KtCX>!&7y$zZ>_2dcw`IL5D2<TRd$Rwp!Hr
z(h<D#ed_MTfJ>wr`zsb>7pk&-187<8CwzH_AQven0KJJo?e?Z<z?~W}2dUo|wSFyB
zx5iRj*A-2^my=`!x$mq{DgMgwBSeQd#OX)+0;Ti=8L2Iz!)-_wsNQie9mRo+-m71;
zG3#9BWH&8GVxOdPU^m|^eX(x>Y_NhXV$=Cem9)-?FJ5>_Ef6LrR{~dhVWeP3*MvLk
zx~P?23dc<I3<_veOT%Pkk@qwH#rLtVlZS_veymD$s_xQJbc@jG4V$*|f928@SU6HU
z9&gh{uFzK;1%^DVpJtxxeV%PG;mwgw&GhA3T{_PyMyb3ds-)DJ`MJxj`YBV>YCGs;
z%K-bqbuEh+QvPt6#WjQ+*qQES2RZ&-F-ou>_M*i%jA8mQnmhl)N|)=6u}GupmR|}Q
z{PfLcJn_eu;&Ht%O~$c@Js41(!hR9BTO#JusVxPHF1l#?ny&LQEBx@l{<YhYR^kWq
zbp|qUcSV6AZC8!CVynL1Qf-K9DNf0t*3!yk4-{j2v#uGd1Yku>SQ*o}pL_G-MXL2b
z-VBRWR9h(*6{n<17n59(8z6iQlbCMPzfi)8_UFk?B`lpRzZLhC5H}ZoTGFV?=S4*H
zJeWw7r58cg!Ghz(YN9RI0fR>57%id@G8~w(&l9^$rIevsEyKD_B()~@z$cXgJ_3qH
zFdUD@Et6ocU&!xd&US!MWJfm=8_Nb5%b4M11s1&HyiNKAq!cXQ+)}@ODIzo2cS05!
zXK&1@?pIa}<!ZBu<~JDG&gt7{#VgRa7pKIUI(XShr8Fr%=`vOt@lJFFYoZ0Cp?cJr
z++5RQe&z)}9PJg`w~8XvO{G$aFLBd^g%ak}1g#`EZ~|D`EP=|8SUzi($(<I7s6x@8
zkCxjgI^Hi#x;!LgUDAbvP4Og>k9@ucY7GLLIVg;47eP#*2`oZZ<xChpS$h&%Qkj?=
zRN6-T#QClQgK#Rj{>}!;$lTN3WS^+ZsGpv`D@!o7hCLJbp*dbOzc-JoKI&-g=fnCq
zF{K&h%%E{o@~0#f#W~mc<1hC=7d4*WTs~SB9ab_96x2*r;(H8iY9e2HBCc%E&mg2b
zKsa7NT)Z;>YrWT2&1~A2j?Q)~ld9;8;dc*DakqC2Ye-@e6B4am_Kt@^@!rXz$F2I4
z6~3XjW_2f4^{hmNQm^pFKjDj$ql_u~{D_Fy8k;5cDmB30@X$;=XR*FIAK+e7&wcMI
zTXw;CSuB1PE6f67S%n2}KDb+Dz&1?Msy?F5I#@|%qi4{bwYL0aM>N(QH2jXw&p&o>
zWR@tklq(_3iO>%tfpV<du-IT<qbcdII#s@h*LJF%l5LdR*o`%OCzKZkns|qA_O>c=
zbUf_3WL8EP&Oh=3*CpYj0Y41<H8D>s3f5mGqDOxaW!>#A_itIe@R&88gg<L)tZ9i`
zv(i4=)LF7Ad@fVp$zL{LXOW6+?EdWRwVSppZ?xS=YZv#Uitnm&!rY_ehKVrKRqlbY
z*)QPQ6<iK%AE@S^=mWTc4h{75f`u@ePo*Y;12v;Ty?7z)C;O=rJt`eP3QN5ZswXP(
zL(Z%sywWOgzRzc`kuS*1NV_$Kl~h?I)~YDW{dEHX7bxsTqSJ(vwTcI+sPG`q(Qhx^
zcW~Ev@T+6JKZqCtqYjj5WbQ}lz0Qs`n2Hm6Q(FtVO$JTBAh)(1xKOhU^f>VG0Y%BV
zTit6+Ke7)LJ6QaHOB-COBMxU2B8@wOERe3vM#_1{=5b(2*F!B2@1P#4%fT3}yBz(V
zOVG2bR=q_$i!<wuR~bqiEAU4)I=DQE@Te`WZ`_C$nTeL#M%2>ni><>G5m%w!McAa<
z4P$j;;tK{-vX34m+v-0lqu{3SU>zI;j6`(`j3hZYQLg)OxH=%GSRN-Z1@J8L<0i2F
z1jEmP{UADvt+*_|4Lv>!^D7&7qR1|8eds2B!<#H!9CyBpX~VvPLvP}9pziB1sznZc
z(~)^r_n!}_?w)2!S)RR4l^ohUF!>(t<sJD!g63kYTdIB^TXLRq58kv8P(SMH$r$J$
zmxi33-kW_yBvq$C|2m$l?aX2&DK`Xm^@OS5ql=|~ilabLxs9fSE3?8k<qGerHAaWG
z;nF&!sw1Lnh*$a5f&g<ix`V9=%@VyfgVWiz`?S1!c@udcF30pJL)X|H`O@G_`7&cX
zGg*hrYMbur5+5cRU)QF;N*CuWP_nubWvLpEvD6s4`M?^oQendJfu4y$q+HgGt(k|K
zX_?lQMt|sNXwI|rN15$}u1h0WqpscR-ppRnRGrm#&wT;nRM#M8^Y4<>lr;=jcp@Z&
zuexv~!0y!5w{)~jRTeZ<r+sNk-_+ExR}Rx<%@KtTcXe$9$6-m;Q6sM}lnL|8+<?@W
z6#>})p|v&J^DeYs4K>ttd}g@cm@?3Iral7}qA7T8E*?lR0$4IApf1)1_PNjpu$cMB
zv-`5D!Qbb<FG+>)XRPts+R+gg_uebWRy6LUd%H;}Wz5NGUl43)U1$n1tFQl#7pj@A
ztJJ%nWH||c(lWW3^eu5ym<bt(&fVorE%6>v?+YC0@=UU2!}gUz78i6KOP4Ko<`c7N
zK0&m7#CrXurKw&LKiE$(3naQqj;8+xUB8gVsi7(p+tC-8pyo-tW|Y>us)gapxqPxI
zH*cla430Z=gud6xVOHE9(R&is1G*H(sPM8X>^g|X5+EUK7YC+UL`+~WU(;Zek=2RQ
z@V#<xor_j@n8rXVcVS}Yto)RrOkg@`%*LvZn{CW7V!?WP?|8c^u0q$xn^WxivnvIp
z$>tf?wQi*)){j6RaZ7kcXYF9AENf5)9Wxu2F1KQ6TwQ(uONCV$)x6Mb5n#s?ABifB
zR}ljS`lG#J@}E%w-MNNq191|595Hes3;gMYjZ@+rrvRAly!?$(lGW?|71o$H@k9#i
zqitMk`aZo7)ct%GjWw!R@hV>{cWr%(8w+iH?$0khY1nXh`0C=@<RSaV$7Zek=Ov#G
zf55&PH0%!Lf3y@R@Gc2-R5xVXneo|aygI+|&4<nWacf6l|2j$aHeZyzdRkJ6lg64M
z?cz?VlmB^1MavtD6}Pk^gVx|p$pGqrHH4~~YL~a)g#?tep{_04AkW6LE0*h9^`ks;
zC&`*#3k!{cvp4B{Joen_uP+R6^jJF-hd$OFt&H@Dq)l7Xm*}@td2U-fEIaG<<px7Z
zPusiXFSo^i{!}@lx@JPnX+Pm<x>UEPU{RF~`q0?eVCi0wIiJw$-!PUidb5iDYH+PQ
z5y&$7KCpgVrXWtUpH~(b9_o4WetDT#^qfIj^p^sr3jI=z7ynU(0~Ot`#y+b8S*?`N
zOtWqt^EHpJamh>tVlidM&rlTC8>FH^QfH0A#wFzetGz5Upt}94a#SrcAQn(tU9+8d
z_S&X)N_Nmqc3Wb<5x9AC8sAH4m=W@48tPmaX6Puo=<q)Q%6m9CIRq9w16KE=W5$~4
zR%bq(_RAJk7IFyW&vT7gj}=H#UUz&($>pC@ro^hWZCdXRyEA5bc~(x;UCE=^yih@g
zpI4VN{A2duVsA1LXR0f34!(dvGvqZv6}$csY6Q^DUS{f#<3iz9+w<;|O7(Rat_i0y
zg`!0EZ*S_Qsy)b;SaBJ{R<DZv&SK|;`k*;5aacdWUPGe5L@HL<ecLbBf&?PnJ*jSv
ze{)LM?0@yWI`4Y2)&abpqe~+{EgOKlNj2doW43cgzs&J+i8`AJ0<U`dSEXxmBIp3z
zE7I1;L&Y~tcIgoPEL-BGR&uNFbq1KzK*(nNqD&;>%Un3cYDWLqZM%TgaqP1&*1(`J
zT75<Wb>iZ{hdSvY=^&lGiq=iB>mtbX$lR_M<SB7ynZV8IO~Nh+qdyDy>?F^sidcU0
zH}u1sPdxvAp|uVfs}%EqSJjI;xmVa9Row_mF=nG(N!1dhT#-h31#uTvY|m%pR+x8v
za#jGUFdLp!;Z?s>;j72?emBgyd)aXtSY4D!Cxod(1^Rp{jNby*t?{G?o5DP*-*RK`
zX`U0Gvj?@({6tM5&$AIm`tgJSNOf*WekAk>x)pgjJc%wED!k?`pG(!8wufHnwJ5l=
z7&F!@AECREs>fhZ@0Ako;P|Qb0)Wki5=`fT;s6+4SCM*6AvS0LJnWyeudaX6zSj8t
z+Yl;z9AL<HB^Bb5NMIbdM1UA~^*;+@cc;pSp)>$Eu?Rky0Te;ns@DDj+BTYuCjYQ8
z>A!~R{&##%l&<$Tdwd6RvJEDLyFUMnGX&0ixg0=o(f}0aau(^QG4KW%V3Ivs2R<jN
zbQ${d!*5w+e4&%aC4b}Vh5w=>0VvHSzkeHmCkxnjMbl6cxHwBsybt)CzN)4^<Q*_5
z(D`Tj(SMikbc2WgA*(9P0a!Et;pj?8v|xE$lab+{=UxBacTIg&=c&o%e?oI;fbrKy
zd)T>ZdAmn?moqS@ssgUy?cWjtA2aCOfSj}#6QmS7t#oU8U_1ZU*|d`KRD5x8<gQj&
zUFvfe)uP#BS>peqZ0O(9{UgIf2w5A`#ciDQj2cVe(aN{Udh<Vs0;FI3v|JRFvc99E
zY@g7~74s#h{A#ENV0_1d&ry>C=%pA=EI?DMq1%`9PNKBdvFZul-LTT;9PwxJ9}#nK
z+qm}j#v;cLe)zuS=f`XY>KpXSZKaq({96^`i-%C9>@IHUq`gMW!sbN1;+S0Dt0C6q
z=Kie}B3(8fMbhLh3z%5PCBh__0bvK>(zp-}`~C^Ng>p|)N3)6XQ@I#?Z@i~vU$+N8
zT?O-p3*lEvBl@it^ZKh(F4Q&E-F@6c+?jsll&Q9D)oAm(v2n3FSeSdMlk+5ADck)&
z6DWutwPM!rm2&9vz6-&y5fXhn!0<E2d|BWA`F!wwH7BD1DrEt^W*&8J^W^Ws;C<MI
zr^~#5OMWeEubu;&zvka4_`RgLn#5r;7wfdf_ro<=+9GxiKoSowNsK{0pYDN+!oIff
zFUz7U%HTL7Gzs%mJ+pw3%MIn_<)xKPB@c%;xCw;|9?Bzh({M<#fUznmMiQTb6T1+E
z%IMRs3?tAUHPxe4F6yw^`|tOoc!RELG`%F}h;`&)?z~|5n}=l7Iv%oa@=yT;Xb{-f
zzQSIBHJ}RpadK;jI=U`>4q!v<5d}sD!S#Dt*0s7!-*S)jI`s6KIDDVKTx^Yg<giF#
zs_5V*iTNbDx;^Ec`X)fTS?EK>=;$E_ln8)*q;T?6VXROyY;?gusmlFw@8a*nOfw}S
zvYr$O@k`FEkNb)m5@hPN)+k69)nsr)o^i{ZsNxpWI;hmTA*mtLW<o{J)h7AL!jCEJ
zf+95<!kWt!l3iNrY$nGEOk?P@7MCwenLc2<Yik_fxr_CHUN}dfNpF5#eq3Fsi8kud
znv3Zz(`e_LQid=}kv`Sl+N-^yBtMAedsD)ropw58!bS;Fvtc)!fy|LB*ofI4HI>Oq
zSgnU9j@vJ)=rIJAsd(GQMi)7I^Y)`J`^IW@g_*O0zOyBjrHkDC^3}f}(NI$Kp)JZW
z!50!w5Sx&wf|`67wsUqa{;p!0<@$}WjIUxsAy-o1L6etcweTewRi6w{%?vQqrY?8A
zP<CPQ>I!vyrjlzg!pD+U)+e0D(M#%jVBrO!W{di<^y?bLYFuyEWcx$oWDBUq#xPxt
z7fOUS?1PX6vhPCBoVOHuM~SUVP7<4+km`}oUv>8jeD?SoO{dPhz1Xn0w3)~Pg09YR
zzKFmuET*>xj|I$+BVDf*&#;T<`)t~%b<=-6mEM$Qi<BRG_>z**<EC0-VOsCdK>GZ0
zb})IZS@W+`Kq_aM6y6~Xu+g)9$}|I9(X=)K<~(;DQWq}ND0sa#QaQZSL=$_m&M(J_
zillSq{=i$^DPl}dLBeW(cH3yL&PKp-Vr46RABO|VcWx!8GTA@KtYu@m;_>|bYSxhi
zJ(Lo=fP^J*3u6mVbiKi}X^!hy_e_hr>57ufFU>Wt)}l67hWJ8K``$`v4l_TwzSc2b
zSZ`idrWCN4sc#`Z@HL-Ujd&Mwxy=eq4w=Rod9NX``r@(|@Os<TWwuJBea>*TsbrLv
znlVqe785vH&=uo?mHUXJ!1_qMs8_uwW@<VG_q;n+q}3{3y!`1=tP&|)?wE%0Veg1p
z>+(uePl9Zv9Z>o?j=pS3>VlI&uvR9B*G3tKW>bFG`9%f`B%Vo&TwVO3-X(YN#>nE~
z$De}29$(_!Dw6VWG$_wUsLXw0w8T11e*sNjQX+D~E^6-4%Z8u&4dxs{U{P|Ol*IIH
z1Ot@xJ<b><(1<$iMZc3>@}JxPF|T7U_{NV9o6_m9pDfeofBfiPUCtZ$r$*%GHw>m~
z>?lXzF0w$?#U7T80_GCff<o&+L!GQ?v3iQ*hERKF%#VljZ`>AE`za6H4?C~Vs`!Ih
zVZgwV-35RyfU-9cL8_wdiA>_QoKKKPd#*Vj<Mj-eL0@vs#7lH8Nyq01o#{{m3%=oe
z<E**vBBP@?gvk{ogcHY7tt-RgGGSZ<C09%bFFLvYmjAoU*;MKuoc#@>{9fGnTFEEn
z-TpQ=UVS9Ywj2rSsx(6m^djPH6+5P`w>6*%MXsTqj4od=YnOZ-(o`4%NUog13xq!A
zi9ByV-#(==(Lo`iRw(>HBsddn!TN4CwyF{u;#X?j#UOe!IW+Nm_s-es#!)eM?rpd-
z)(GXZPS-WcwhO>A^$85?$Pd+(i>~ww%bI=y<t~}wvNNcbmR?5PQ?}t8>d!Y4SE_`H
zcBN<V?pQbOZy>rr3BdT{6!or6eHHO4OlIu(@UrWC+j+6sHRr_hhOErz2N_pbLago?
zOBV3Iv*Lf;&qC~m^}#%vIi{Nn=tI<Q%@#DZp)buIX+Y5py;jY4V>d-!4HK!kzo$MX
zGFxCYP2w`bZ|_f27{wl)CaICQs!3uEQ7fHST{1AU9b||xiIVh`dC{ajU&^hwXPGcm
zDy?$d7T9S8_}ujaLz1mL&Ph3;x*^}(=RIZ;(lX^G@7%`|N_jqe(A9^MYT8%J=*wa_
zKGdr{!S16tdh=A#0x!^pNVC}JNCFSmmgBXn)zD)W`B~FhqNs-E*qT;p^QiBAx{Jh*
zOM+oh><arFo|VI8(5C>PdH|qvwO5KJk8RWfyYjv8zpSnAnRC*bJux<4_ZZI)D2$Ft
zOX9=TUkvO`A3#r0f~(8rsI*7ew9qn_t1AQRFh&SAf|#M^Enk~kOn!i=^>gjZwJMs(
zmHCbZ+ZmHCTM5+k%&Ripn;Guq+qJKk<g96`wJ{b9!qY&vkg!DM(M7%;EOg$Rlarf^
zi<~RB;MPQmBAMTzWRPJAsM-6iw<5L+&9b)oumAd)r<GxGN&T%>$=m(gIz}(jrhfS&
zF4uE-&IkWm|A~2{|0DqU<bCLCJBo}aftlPMxP9Szsl;Zrbl{WS-Ub(jm^FpS*|&1D
zpz@L8MX1Am1Vfui2#jgbR%v?DI7)Fk{r%mS+SNm=Iq@5&ux?-zg!d%H!Wi4ULqBQ+
zxv%53X3m#|rqlO(uWStq1o9tTqCbS(G16ZXI1Fom1mGl}ivjg+1r^P|#orJ#2Ii-7
zD}VR-sr-}A51xe%$9gU?h!RBC76QuCUZx)%mjPOc2k3m)8(=Qwwcq<Q<9B-CslDHn
zk*4&We?SKC-=z-!hu+6uJCw#hw=mA{Z&i{4a#>G&hz205qy*%$e||Xk-uO4i(^BiA
zbX$i`@=yIqGx)Fli&^vUSi|uDZmrwjcw!^4A$TB!Fu);!S8!oEcSe;HbYYQzFodQQ
z$TYfTwh6&8`w;X)fLj$taSpcK`T#y@_zH5QhXegV^w=E?`1Vdjv~U5!R`q|^`RVUY
zP?v0{CTAz|?+a!TZ5|(<39)b?)+|7TM3+Oui6$}Sq!<MdhT72c%x!u_?*3~{vwo4B
z+@LjpME6MPR#wWJx5;yio}Mn+<SKq2pvzOlfBe^E<^D4j8I?bO4Zw9AFYX6YdJ^Mw
zKmU5*k@^M0DYo=-<FTG65KI5z2Tyq-E4}T$CJ_vLZk7OZ24Mxjabh?swB_>p8hIB-
z+5O<KQjg;L!s<cucSBbPmBd{6-0ae>;Wxo>1eKaNR2e&oRQ6=Y$<3_J)U}37KXuci
zN=f5HbE+8A?yPdJN6>_5$EwHyQVrHEg=H4BZLVkeHv1bbj%>Y;Gv?0{TFQ$^t`OQz
z;QfcMK`2AR*I;553m<e5h8oRy?zZtxyiG=B&VtvZ^~<YG0p*xq6C8=!z=(tBpvTUE
zhWq1$G}u}QJ`9S;N11@AmUez3;rp6}`lx~ahMPQ18G#OZ<Zu0cG2C{j7fj?|%*Ayo
z!YA<(k0wsO-C8jlm$*K4Q+!NCf%)j>XCUQ?uRx^pJks`?$aq$pMal57k*3SP%5eUx
zFBlM*W@mhG=Tm$tkTSc8Pz5`IvBKnCbdeMK$oxyge-Kg8^^>)|-K%S)bLH$!f0VVY
zArRnYe4X!S1WTpnz3Pazx1gZFv|eB&81^b;4aVdm>3qu|mn~y+XGb%RH95fXDbX=^
z>5Fe+Do_)AB|$dqdxu0jJ`b>4RlOBhS89`$AG@*QqS@+Tk#R@HmDDGiZ#1n?B#({2
zf`9V|k*mZ)xlJ|tbl<gnF_5m4eo(L$^`qvDg^q2DVXiTVoHM9Vd2{cY^QEvMoCsDE
z@D<LC+=o)31~eiyCd9NCW1i_kIEQ2_Gcz=n7Sk3KiRd`4B|SfVQpjtf7JyF%+$p10
zYLcRQws%^7Ciq?HoT?v6V^H(DFTdln(RpQy_NPj-;sb<}k!H}Q`tM8kU=dc1b8-1w
zk7fP0ak^&|cKkPoFkE%45K#O7&#$whwfraoc;{ABSs6B9ttt(5+T)Mz6*J#L+l*L8
zP&8*9L4=7@(zT|PGg&E@d`k`A&9%a8%>;}aHMWD%67dkWeFD9U8tLbziF^|P)%@9O
zY75fReF=FM(myZ2Q|?K%6gXPx1+D@2gBgIZ4}qo*xwhIVs0NbD<JaBR*3Y*a8etCe
z#%sJ4gTuTH0pepJLac(3_&V6e6D%<x9pwYI*s*WE6FruVwfQFnZ+0$n#?`WB2?7gP
z*qY-j0=wb+okoeZw^hZjO(GW6vfVJ%bsv;tU(+P+B-9Z-_fLG8vR!|)lF%U>*rSOr
z595RiPag0Y(qdohPKzHtl@e7Rc8edUzs~VS*fu==SI#WeN9-?5I68ly3eB-P+q2|&
zf2=N-c2cV;$+)fl;vt4)hdJmj^ZMhapzO99vIMhmJ+F>W^M;xwfNcid{K6rguJI!r
z&}ch?V-QT_y`wQ;Vl^38d4k}q3TFmOLsolxg+I)-4v`dcGr5=PYH#1e$MzdP6)zp<
z*Ese`utF_S3bB4G?2Fny0i}VN4<{c>^;VNLGzk9?xJsSAsAVYFEXO37ny`dvT(36z
zl$%rr&?E>H01UED-tQR&V>VvsmD(l<jI~*nZ=K5>eqZ&Apkita<O_^@>DBj+Eqk-a
zQHi9<h6<C7p6zoNME>kXT(TJ`gqlow3O1m<oSx<$>(o7cI&~wERa(?qhtVpEEX=)m
zuu`o^x!&&s!P1uS^1{!4bX-I@rAV8dPbv8$QLWugZJ+SVhM!3g3V=D#0Hy(7P)A3<
zqsE2)@NTu$kZw0PH(nv>Lk~<9m#p(ak)8QU21@ItaN22V^-b^2ijT;}$@B@mW|OjK
z=}8loqQ4@M+KC9)m3a-MLupK#9NY<D{vG<7$mjdWHWWQ*n)c(6f0R6Yt)I8%<!a=Q
zv&vR~lB&g!(@t<aw;Xo+LUAGRLOZd0a)4oBd4bhqE%zqhw3yAhx^#I^q2P5kjFbrL
zi(fSLCDx4ZbLYqB&M2AE)Cvj_vT9N|b$lWC8LKZihHwkpSRB~V2j91wR*{{5^dYBz
zWMj|2#`jrE%DTu8^BnKyH=m?IzW5>lgF0151Nk0gP)@7BHuv?$3{6Cfku1AILgTla
zheXuopQ*2TEgUuSAH$5&r?Z?_Ao{0wFvC{)HnTA}V(h3VdZjZJyE%C;l$#&XT5WFo
zL<UsT(fuRhJx7uoXYdTD8%!QH!OeoEMxg2mJlo5%WA@caPWs}?qJxJ=RQpaOT2DB`
z?`1q`{9#Pt84a^Hq$Z5pN@69v?NG!0!2vx>%#-HRakd<nrVrsBmXGh%$-IiX@&H=!
zBIWx=I;p$0WVKh?rlpmQdnFEzrI-$|3;-KJ9oJ%@Oh|pRzD)MegL3Y4k5d}w(%T-M
z0#{^eSYL4DU@pB_OqO;+nZ+W}@W^9BR;zhyXY{VW>1nyUZ<KP(U39EU^TT7m5n9E;
z)s#pGXP`3X75X#|{1nh^;2t5IDxg6~Wiiuv^m?LTjLR+O$DrZig2h(0D3SxQx=RbD
zVK;Ez*mNXp4vYjADv7y=ipiP~PI6X2XWTCLgg^GDwYw;5ay1*(5oxj_6WBPjYX(E-
z)_Bx>D#moSfQIye>&-g1+vL!Yep}@UKQJ9+cgB>xF<MM^u&AN7uI){kqzl7-Q+kS5
zL9QK}a$NEiwMIZD`~*GndL2RqrNQdm$je_tF!y)|2~@)B_v^|zOukmqv!;|h9(=dT
z-J247{wCYHQh5cnQeZph>Kp2*2~}M+j6pD~aX|xs5Mfv>LAnO}`a|Ga_fDvUY1{~x
z+9uVvTQXu_88lqjc@wzVbRXYd=f@v5xw21Lv$wW?c{vpU)bD@tYW;6f%Zp?bl{wFA
z=42&S`Z1*!o3F?oRQI>W7-c*q8seeOD`h11k1KZwbttp!3oW(Q_H&n9;;dHm_1>u{
zj!=vGv@^f_`n{FdX6r|*?U{o$xzx>vuc(rB9*o=`Ay(WCcOo^8i<sY>$ISm`sDuQK
z+b8OLm%b(WrRMV>Ph_(8rH99Wra4F<qC+l_Wnftq4eNo?87jMmk*BhrFs3r!z3jdK
zZQNS_p|m|8uKFxaGzC4I1O)+Mv@BsV=;bozq{8oezp9>%OiN1wAyhZ}-i8xh$|BnR
zSb^s=os`214BL!*6tz#*v+^}JG{r^^$jS%>ZfJ2bIsBfKS^nLD#a4g)Fu|4#>$*-y
zhUMs!F=Q@VUwG#(Tg@rPEBpCv`pbB=)vtrZYNZqH&;IV$2+tzO5T+57YL~IE4Nw7b
z+!rS3%cZ2&eWlK(AC5NemU6r!6(95JwLhXy^0*s*ZXCDX0Sa8#!KSUY^9@*1E6AUR
z)e_~g9$uF}r7TW~PH5p)?Ba>Ps>74l|Mx=xxcT48(NB$yGr(lE@$Zw>2|dx5ceyLR
z-X`5~BAI5=GnlH|w;fT{)@_NyOjz$_kz4S&7nk)2^Q=bD(iz1iJ)AzyWtduVHT!IC
zMZ!4j&D$HE7wMBM9pZDFUG^dKto&@lI6#O`TL7{$pKEocj7L36k5a$H8KKBq`ko|C
zbPy+sZtNsTVYAorUj{2L0>io0<JiLqorYfK$0d}ysT|i|U=D9{H){gkEV~08bXZ5^
zYXlqi@ZoFiaX0^<Ss6!{cdWeOjLc8J-X<c^C-R#zrF(@ld>=U3(-QDnMJ$xNq}XNb
zMqg#`-Mv)d%j#@h<kgo*Xw``#M1#XAMc=feS6&=1l)3qV=$&U8C39;u#TJFOLfI_3
z$ah2`dNap1&#!qGx-g%=)F@y0k_tDzRlxU=4<$=n_J0+dL7*;pLLNEx5V@q$$4wFv
zT{9_1;X6c_WJ%riq4pkJrOpYjf%B~9%vL@4x64+hVeDvs7%Mgi`T7qcHtYyq_8ZH6
z&5-O&Q-=hL2$DOO?J^!Kl2;P>G5+tI;6Jw<qIuJ8Hb{MvA8sd0F2e>*z6>`56r{R;
zQ1D2aJ@!u43btpLMX&^#SifD^%9_q75ZcK&mAbb?u@!H48^(8=i{^jVXyd<u$;Lk*
z4F0z)5B_g>J`5H%z0z<3w7f-tJ$*1;9FE326m`*SdI49_psui_iWDu<U{sc`y%Sec
zlX+P)ZG)&_kr-&lP!^+Sa)(raufjSo*ZZ)42cfUV1RIh-!SF8tAQUfn4_5&wv6``^
zfO!Z$Nrh%@g~CO0XI{Q?tzB(xW@jm~@1|j?%Oz<bq9ur&CtRHgGXRv8PNUI~TEV?(
zWPS|jsyJek<_XiQmv=)y5Pt4`G-Z8A@c8*da|1)hG<Logch-n8z|O6W&R<*okb>6h
zSGxuyU)o8MNOv0<$z<Nq&h%qGF{9T@YxLTn>8wc?+!nF_M?_ZiCWe-NvZwE9Jq-Xx
zdZZU@hYdtXayhgr@v;Pr$wcFUid`Cv=bT=V_x|>~P2VQS85q=ipF83Reg+07e8SxC
z&&TsC`E#<8oKEmG08jkCB-Qe-^nDe+_2|8ElnEwoygwZc>5)IL_*|eDGJToj;&BlE
z+t<Jptso&?EpU*2n58TDGiV=jAq80sOv8v{)6^=9Go=yHCrW&=8~tI%HA&S5z25B^
zm)XujT~8_Foa+vv-eH5@0|VUq9Xx=z?kNhkABxI0Ms>WGj5_aTNpKh-o0;DTA6{%T
zX##h*lZDQrCNaZ6p7l+BU8DqPyF5T#${8kz37t7Sh^cz%wgIbd3g$GMUFJcpL_>jY
zxd2<PS3l!B2RKn#*>-XD(h-iIUpB$UVI|faaSXc{4Q%|;l11S2^{&~Pjdyj*8IAWb
zeQkxVex&1emiwh|A10n>d<cD?m|8f*L9R^{L90$)a5)!}k_x=%n;o(^1^;|Z5dOWN
zD#H!_lHAVbI`zFHO9P{>18UauX&)4vKt+tgov~}jA4Ax!2FCAa7&8x)FE3aPpSAl9
z$Rs^9lLp&v!3VFQH3*a^$d#oX2!*W81322fF&m7fC=&uE_C1=wu^!DvVJV7JRc{_t
z1Gy`FmN)y~o~2w6Ylhv1^=QGm9>KO=Y(uc*et<hWI~Z96Uj2kPzI_bGk+uK^&k5jt
zc6FZ<LL4p}a2Ev@TEr(nHbpvtK+*30L6iXqiT{^BQRhrHI(sg9L`q<nAyAeBDr35P
zfVN2)PA~`L05kx1lMG<X6q1*~R=EJ?%1tN2crgnY0-OpR)@c+g0>@uf6$M0G2Z%E}
zZJewlj>(J=!<`Q3r~)9k>Y@v+WNSRE5CPy_e|xj-9mLz<zrTI2-;yOVuUW8PDUNO2
zjmp<k`02p9PZ|B$*e~`cUz28cM31NgwFy)l>##)|mtet&r<qY_bv<<;OhMw2I*@p#
zRF+AgXuq(6-d7F-VhzmnficRinCxkL`-|Es5xpD5dif9`yr|MMudTPuDd+S6Pbdvb
zg-W*vU2O{}alY1<JM+z})&@PxJa+h&Kmj4bE&$g6`OOYE<(FWLE9x>FO$BNC(m6_<
za`#)#<-}#C)J>zwh-&WO*?OEjaDDHEeX*r*s=^svE?W<&>*6zRm0URBn&D8V7`PBW
zP<^flAM+%^$zp?01A6R&J#ZT5W%lZIDQ8Tzm1xQmuAXdRI-CDQpxNb_Oo)@-JL~|<
z-Vkt6$nn+pMlUq54d5tl%9gHB_A&Piol#V_B50EpU?k|})S!>J)6c~&RKj`?wJUpb
zoC?d-V_GF^{MPdIW&6z?f}{e{5BZ7C@0O=bWu0T<kZ-aB+@iYUgvze|^9|$g<K?V^
zroV|VneA{C=N!lmStCGAx=QE!F9BD8WoAL>oa_K&HwY6QL^C))%<ir?QdlcR0ABQ(
zPSj<=s=HuKcWQ87%5S6USABKs@t3%*p4p9#Y+U?~M%F+NsG2Ssr(dXv_So(1U~fVd
zxpK=O5RKKj53dw7>1>#fuYWb*qxt-H=z%&{wHeqwRKmhW0qK}nAv;`YVt_JCsLO>s
z)DyB@TN7nvi%+f4p^~&F@?K1w<J)f$HLOIq?z`jN3~Nr63}XpA&g^}E5bZajl6vK{
zLcg{+RJ|&%IOU~K`Dkgr@x4aLnoq&O?b@~#f<{*V*(|0V5Gz`Lx7PfyyxqZ-NL3m;
zLy@N(KwCYiH6x0<eJ%0iWQj3P_~>Y%1i_Is=L%CMFsq4U{ocC*iu;C@J_NnBbEA~A
zS+GC?qe22zWm^rub2EdnybLvG_;V>+Rg~R;<>B5q9IAs=s>IUuw(R#E#+<0S_S8CK
z=zPfVCEwK<-ES3ExKYwcYjH4E4^b9@KRkSde|~D5Fr9kwYA*!5ou3V<jgEoZp)^)A
z`z4sME8UGNsAM{hT{B5%(~MsRrvcYA-99!fnD~B{eG&dnYboyAe6_n83se`MS8qs3
znB4dDfn&3KYgE8j_bT6~?wDLImo+Z8u+lu$fBi9uDzzuSwFP0g^(pZ5>x%frxjzAM
zCp0P+@+4)jra$!~kK!c{G6TAgiaZzeDG+8t_qjlA0aO<4-qHNTyL1@*x;}PYFD8#U
z&|Zo$MI-#4<VA&|YWcpUXoj3Sg+@Dq)QqEUGzI*${hGyC$2FIR^QBk)`8NstGo=aQ
zynz<T)Qva)^JSZdukz@z!=+(eVG3h7s?rr_TbbsL=^M4NMXB@qtFFZ|ti;5pZ6xaS
zwtDN%BcdnLaa-rVHs4gX8XoX{-%39>$5~>hk)zRJBKYhR7#)-XD_03{UNsWb)*nra
z*F`7pP#Eg~E1|W`{J^6WnlS8<l>3d3j9M4qRA-|OlW=V4XyoC()M~<IPbSMiV~%zO
zi%GZ28P|2Y=T*?}>et^E*mbX#^*@ha>)0)<+@#&rms?ss9ZQd1v0Zub>l6U{=P3u)
zMml`+=>kQ2&qH6z@83MnD=!U=7!giyz-P-S2HhZ6e(BN`IMl@-1lwbI@4C#1y^2x0
zKI!4x`cAIh_)**r%Zqu1>>plL#3D;c8W5*(>!Ye>hb!fRp1_~qbb5b~?f3)HBS8`P
zN#c9!dPcN(LU0z)g$8EnS(!E_8{X1q2y$l(2~dO~MQB6Jo}jh62?Fi-_zoJZ|ENw|
z{-b&|YKuy~#*_m-No~3;ZXSiQB=Rur;m91btv;sKfesH>zh=s)41^wtH*C>TpmvRJ
z?w9do9S2pyz1<5v-L#>G*v+)3kUL)&9$W*G5Z7M}!6opOVNB%M_Epcs^Q3UP-h67O
z28G~fh?0ec%IP($+zBf~YT6wEsUg19rZX*Ado%dXR>hVbCxfm#tbfp_umrYihdd0Q
zB#dzLVZ$@QRBE)#SWn~e{ja4KO=BkdQ8UgdMIjKWFT+3G98D=-Nu?lC@|&CrtKnvO
zW_S<l>kf~Bor?k3<A9dypT0={oq^uazr$TbX!iU;bXo27kqQOQ4G6lAtNzdUe8K@z
zGoVU7lSRbjpKQDTK~$1#Hp_Gb-d_P67BbP-2<>8s;~Gj=FGrw&?NT|}MbHEW0Hna$
z%|^j@eybNTeyZ~NU+<o4)w8y|n4)itdiw42{?{DpAKAdIV6`L*Q?l-ErjJ!E))kt>
zNfo$oOU}G>EATHpzY*n=v!9bZoYcyj5*cx;-N4E)5^@#%44!1H!hB0R7N=^rihmv;
zeJ9-7Qd=x1^-FUS<!Y)uDHZF`d_mzaI`(;qUc<RzOcK8IaDUxd#V3e4zsQ=8*`&~!
zCE2pqj{5z_44qimp>$-OUAEPt_Lg8bf_)oA3$_Q5Vt+v_Ab{MTso2)~3Zu#gAz$pf
z24NA3&Q*J}&rmt+ivrw3lz0M~sz+BP*tVH`UTo&aqZ{ANKQ4ayN~}1#EO%sz_=8Aj
z=`~@fGM|vO4mZTVMr_Q4alio5*0Fq<lcOD~b^Y)~hd34ig!rL~17bholsnk{KX^Pf
zUw4kL?YYP>d%D}Qbap*(fD?UW_S@}RFzo^;Zf2MBaX%^`-wg={q8Ombs4jyYw?3+b
z19sqsf*7E5fwzOth7A^C(2#3{Ic|2Kl=?QIV#b}m_HaxVOuOhjd&}u_vaMdQ<J+@{
z*{?lXOmE5?uAH@(@55ukOauzR<=DOj+ltofVQE~x<QS)0w|{3)Y^Mc6rTD#bL_S}E
z`ij>*&SOP25Kx_cF|31KFaVlejjVPsfszYY5<%;Li1ih0dtxGbjIJyUG=v;#BS$v3
zak*o}h>sWKC*LnLeyNo#cp<;B?iQjY?VVUZYxV`x0Y)eF<j)NtI~rG$NS6c8&qF1&
z8$+msGbngtD-_-h-})?45%<!rJn7y%Rt}xH=GYq=e5!*U?G80T;_K_H^F&)0-sCCe
z+=9Q6=S6;Ah%)41&!$G6Ko*KtwGuvJ)9$oq8oN98WL3^2;PYwHtZkN;EMzDM!<_dw
z`d{d=qzzl!<)E!#R{#|Y_!XE%#SJJRpVZB$2(C@iXK_jNGDDs;H))#sA4Rmq-77p4
zF_+S4R;RoO^Tx-tf!{9?#z1xXF$Z-E&h_r#euZi)TNR<%tJ-o)Ki{T&(K<GpS0Lk&
zzZ?wAQC8nGqqY!N)kv^Edv+PQ#TwBn@)^H`g;g5%`R_k{D#^`*y`<Fn>k6mxW<>+j
zH^T)5f`Q#{gZAo(KZpP}vGX*>vF|V;pGJ)=&^{2p_Uv<(O-|L*7bLPkW;n5@xKJ{)
z-HsPiaIMg0hYY?Jwy_CzNV;H^0Jxz3xeyDeKXw2}mPbVGK{e1~VRQVMPO_D1SD1GQ
zJ11(Zb+2OZYX=u+51NTS-z2sPM~NC@g$%o1p)cnaVT;t!;FlG0R#6QN(<ix7?y_@i
zRvw2kBaRfQ6i*``|G>nr*Wv5Jq_+_i9h8AlUv@@~vrhYNnU<B#S)1P(<*Mw>=wr~`
zG*(Gwzx}8sl~yW#7N~U^@{<rw60xw_ftac`BUF5#U{?ziBa_{(VH)SIUVLGBC%z#$
z{^R0;h5cUFRI=#qN^Ijb+psS7%B3F93>dN2!FSS|GnBfW>y^sHo3LufeO2TrJysNX
z*tJk8)2qOJJ+*H=&A6$?r?j<;Gp&}R@wtx3EA@rp$04&{*je^fQQKfoIwq+76jXVQ
zMnm6_t|a|ci*v>KtNKo>hccnLIkMh$VNTRKipk!;%(e~p0Q&@iTGRreOBWz1*F#TN
zL1((Bz4;dmK#Mqb{N>hXwKi-L`T`l<4JQ7MyMs+A%e`heRixDrVLo@TnW}^9qSWBt
z6esS1QRcUG`q<JVJr@>M3H58Ds#JaK;20>)<x23}F<zsWJz(ypNAaLdp+mrx0B$|x
z!^gG4irh+QW&L_=5m0IKpnxp6C-fZvUF)Tasz8amomcC=eG5$PYb$HA!J$xE;(d|g
zt4imYm?6M_7U~QLZ|>BhIgMJJv-D@m+zu{pRqgRQBkk=XD!#T#)84C<xE>=iNC!l(
zp!s?bSK3Gck4*IA+`3!_*;H@02wj~ZW~q8TAwi$SwKggve0>MK=9&#lH#cX^4D?aC
zE1$j?bIP+ulHrR79^p2F6UE+=mso6itAaV@)tu0RadN-Ixw=F~TB}r2mvd?7=kiRc
z4;q3atoUkaOHw7DhuP(4i)EdsVR%ty(NLxfsX%5h-TZ6+nTn^=o*+`_DFCwq+6(F1
z^RM&nh`?QLUPdIqTp^JlTHKR01X<vyQKnu8Dk`70!1t)XP&i{%IF7>(;al5)eo3|C
z6Rz_^jf=1dCR5|V?s!J~RFxscfS}yl9oOq^RK*J)EprPvvxaZ~dY^n<gkr!2<w+gr
za|h@eS3h>hHP7rlZBNrgRo0)pOV<cuxz}*D;v*e#_q|o_xxt^8jDTh;K2W*m{AYq1
z>5gT|Cu>>44Si`}2jjGP8$FLv&Nh$7@>h!#IaU&#=ZJTL;W$ReRpzuc4jMIAo9FuE
z51LcQRqiyfPTWt?`_ZgxsToACJ@SoRiqecI^$s5SuQQG9uc{C7MTl;|7b1IZaTi_O
zz#bapO~^G)-h9$jfA^6Q>AW6?=8tf-2U3lsAYsw5tV3YrY86`rIKM?Pj4Zr^Dt~yd
zNctmjiC&vKj;Jhz`gHy*8UysrX%^TtfZ+kKK*nY|^5Ab_?w~Wd{8%uC5{9R7vWip#
zyr?3cyP9z`OseRkKxS{-vV4PKvA!8j(@?yORehZM5U-R4vx37V)N|~fu^NOxX$Ci3
zH?M<4+`2J1l3D7KCpq06CB}Hmy)O0{>qnlOHL`IUcFEC7d@?<5E0lcrbg)x9cr<R*
z2!)AWUAg+UQ?fpzz=6Y3Wig_haidk9*8ly;o${k|Tvl|i=ga(hmVHo&H-EA#FOb(3
z6whM=y61OiT;6;yPg}XuO{R!Jsk5Vz9y8lX$K;$xhEG)CT6cHSnGc9=<D)Hl&ii13
z)m=<wTV9xldA^h=C2i5H4&WrFAXi>@5RVi1u=fpG3>rCxh?><o-}5K1Ef%~bZKm^q
z2muao33IT$tYz4C)xCY#6FQ(siiVwR`v65!+Eks)G?X4a+y%L+#)ln7`iE!2<_Tnh
zCJ%z1J`Arro9A`^k<DT7ys+aoJ|A$5Apm>e`SB#|*jhGh7<3c_u7l%4xBnm-u>=DY
z%QG;1o{G>e3%JhEC|^O3#=shA6Ug^7k^0YP4FDqzz7F<@dJfSgHDzXo#gToLr-A#p
zuFbwC0j}zJd+JDtd*Hx2c^4uEQfttT7F`2H*Ha}C*D!WW`cWB*qhFd)uoKq5lIdi+
z7I|KJEXlLh#y7BXly(@kg1G?*Itq6D_92ZhTo$C)@cg&|aFQJq+9U74iM68g3Q&{X
zS()K`h}t%VkP-`P=Z1w#&p4C{bg$oe|M?Ohk(QzAsO7=#hUcX~pxvbVgUCMrRTi**
za2x^al^O*46}n#E3#C+wyPi+Dw*wR(-$^DWa?HO!s|S!UwfiJumIYhJRXkCzAEIdD
znLWKSPaBO!Icq9X-iM3uCV#COp$)!%QucW?iogWKq?)12F`f7VF*TvUFtK1U5zM!J
zQ;Qo(U!0+x8?3n$gGRHeLij4RD_Cue4r(OnLY?sKNN+VX;QmzQzK8qM<~%EXEsCD7
z1M_DoJ?_F^;)RWS6FMb(QB3fCdY_BOP#926g|(sfOS+fm0k{^{eMkJBG#9}v4P)<<
z9N@9^BK~PdOd7@*JBfxv#{59@3j^%_&F`@uel3x=$M&gBGZk3Rvetqa6dVQy&7J_7
z|9(A>)*ZfW^^3cKSD+3PYGnFMd))l9vg4oT*`C~ONL?%%dX<vH;>8DjX-ZV?bJU=m
z6bEK#1CZv<TLZmGY(xB(YO(CPO0f!q_fv1`#1w~HEKO_au&Ja#IKo!cIA@`%MOZ;h
zCdww>ENiwq!Bn*Mjl<c8Ne?d5hy3<$Xs?3ayfFiamGXNM3)igcv4;{}06CksRlyX@
zaKd5Zfv+s?&HpaHbwfIbq?OI4*w|3wYQKvy>G1D522{718;UZyi<*74=eG|sLO%>P
zt#=I`%QiP1LE5Jql_qM$#z3;J5B?O45Is1I1S+JLVTyn@GzG}_v7NA=d~C6iX7(8{
zX=7`6r6s<Q-61v6@dQ{L#hRSpTCN<)_lb%pyJU}y!ryBE#sR)02wkTTV(h>LbJzrk
zM(q-g1+7{7`dKq##t*`_^Ub+<+Y1jj%H^x6f4WQL&<%YRP)*g^K}vs$*gLtf#R=kz
z4i1kE$?=W%XOElWPj5Xdq3rXj7QN}t`*!)Rz1ic^&HusPdxtgEb^D?qO`7yxLetPu
zsuT$pKtx24-h%XAqy-6y^bP_7f&wZaN|la)^j-y|g^~mmq$h|mn816Ndq4Nw{k?mi
zd%x$ocRy#JbI)JES}R#IE0Zzj7~@x%=mnm+SWbX&h5#zT!vwB@|CN$8C|YettRM^;
zekz&twi0R1z1k9TUf9y-Lg|l}O#0#eu2?it+Tq1%K1E!IKGr>=M3_}8S_%=yKzpRy
z#_|FzB8a0=YXy0dJx*V1^TKIn_xw5{-`!RS@zknh)|kBXv9lvvgpEmR(dFE@`J)?K
zL6X|~xGe&fXt>eEy}d>|-#8J}-8SE7=J|n`+;3ft&+NS=5)#hY82jWEhI{lqm=6b~
zUGBq!&KNv5lHg8V9w49sb}z;IimZsc;oZsPl?#3nfpM7Oa&?MgDDZd_Va^6~fHM<_
z6_)!g4p;}R_iB%KKX4i{ZSF`W8k}E=>)VhV4fEYi<#N<cA<p58dw_Z+oKdY`L9lwv
zGr*YnqwVef`n<yC8m@d=%`7X+DI^=jA23h^v4m)zYUx%HbD1w~?d2_?t|b-?q8!Z>
zKaFSU3UJ9%F4|^CWjnBu+;>sM$w*AGFc)6FNifrkze-!Et<~D9vB>9h<wrM(ECY#e
zo&JZ8UoFvSMlu*X28~|h$ED>KbJb+1ruZ}3-q0$Q(xRFmJs0`tCfj`=YsisZAWL@>
zx4X^~hma<M4Tv$A%C6?`ZC~`PB&5wdEhN8>hFj`e=m|Y%v#&;2BYTwL236bWdPqvh
zHORee+xE{Yg(g*dqLKCXTA`{7^yidZWDLY}=y<8Ogo2L2F<l;qX`P~Yi|p6lXo{u@
zs(WALouVUecrTWv&q+AGwbV^yuYCEEZGf5S=Tv+#2+oW*9z-Y-k*|lsooiv-Uv0g(
zTc%~@4Z^NVTn+mnc2S$S!IV%U??<*Ku41NQvS3cL{kuUSksHjWLgGe^^|!LZYc64g
zYQCSNm9ARCwBaY<2^+S?M3dr{p*St%xoces2&L`AH(nQ@tZ6nYB2V^7ufHz&C0?MH
zlQK1U_+a-6P&YaNzU^qwf(I>yuizJ^Csyh@A~t+&AKbU}PNy`3*|~hxqN9q5?$s_M
zqDi;NQl~}7w?i_LG-{Vh+gAy;k~|X~Z73aoX%T(KRzElCk_8E>ZxDKj?_q2)S4SYI
z&(lXGgASIp&6L|*a9UyAH2PB6NvytA{(G48OZUR|R`6E{AF+I(r5JY~&E@sm|A~UO
z_!=29<MVpv`1_$;wdshTNW4Y+_!9L&tM4n<sDS?H3c8ylJ-f4+E+GIrPA{K_1DoLv
zTChTo7A)(#eEp_pl<3D~Bw$XwahlKKO<%p=soLYwJ_GjKV?7{7t%sN?8rZv)i-+m1
zZONlnfYk=iV!yA_%I$+98|l#t0WiARA}XxHzB7g;O<VO#ha4tS+&cTthvuSf6;g_B
z%Z$9~{H<gYrb+B#d}aUx)vqK9rs7;cspgQuQ&@wzN>YO<LD^OFs&`O@g(zLoq_wqh
z<>O4MS`m$`()56P=W78occQ{wJg9FUik+)buGTc0IZ!SoDV`r_RO<fBBE{ih!e9JM
zinp7J^T{8xW(<%tocHrrff9AeSJu&-9w^yb=J|%wZLuvQJ)Mbcg_l*7=CbtHb57b^
zGTew!8;8-I>ZdQ8j1$iknt+`};5*M=0xXrK6UNpjFNT!st4d~g`QCyuikG$%L=3}&
zHo$v)SPyM}xC@T6NQ@X!Za3<yW&FzbArpss5aGFM#`AWLd<WGnijgPY+ej2-aE*<a
zMq($94X#mjqywI@bHS=vGgk3)i}li+fM?Nk{WBL4E>~$CvBD?6L4ZCfrW%U1j8{24
zXM=PhT12X@*=y5tJv@>ne{i<AKhERj47u`4<!G%I$XDoz3Kz-(eKGqS{hg|_RYLQJ
z52k~g3r1;@u4krZb=<w-;u5#qvPqrR-jT9yoYaJ7K=#N7r`_ypF!T-y#ICYWY{lky
zVA^ueza62#OZZF|6(fPF-!inpJz5KbBVjC~;n%RkV#zFfm4-<UBCSR1>s(pZ`8k&1
zp%Mq;5>8Kr!unW}J1GIJ)2mv(xDBfm>lwGu#Ex*wWEYCY2YTIFk0c%Mw9<K6@L8C`
zE&Gd`-MbIzcRCe5H?{A!HYw2VaND>LD*arF4t7Xk&vFCb%c-iW?op;j)F47m3-_Ig
z;UqZRpJ<MC#KYW3#w`mDD@x%Ozh+YmtfVnw2WpRe8K=+H;xPK8E8qErUz7aLKDYh+
z8Oy_;o*m@X9LBC!E!W!0F5CHl?U{T9S7MKNdhE30wOLHdK47B05thNycTITK3dr}8
z_%3=>{d{4yzvj#ko+TFk;I3K)t#5VXzG;Nz;@rt=lA|X9h1~ZhT_I*oheFX_Ef%Jo
zl&jQAHaQOZr&<Ol_z6Vbj5FlYUld+wTA1@XXnz;y-BYCW!N^$m%W-(t=F(D*i@91l
zrKEX0-*pT?Dgu~3U5<ZIEbST+k+rZXEa62U#-EKCX8f_M{!8%g@++s@8P$A4@ez?h
zOX^oFF+Ib)_&<aq1N`7e(j&OYOsBwZCegpYg<rXM?0zow$J>G`OBdf{T3abbcf21A
zN=?RU%XxwtHQ+#pwOavIyTP=9{gol|9O<!bky`*)W7oUxt#1D?QHIAVe$$#W{N0s!
zxK&0|mpcT-e&oM6WkB4<KZ;0M3VvbRhYS=H84Jh_Gqo*w^OY`C_x_N$LhDP*xgtt@
zO(tOGXamkrrpZxWeLN#G7N`;XOjq&1eut4FAe>L-2i-mx<9`9Je45nZjDehAj?BJ2
ztt{^A&!_pqtL<BUSJG;do8O+np+hzaj2vko1Ch240Tf+tRT~}vOVH3pUUa7%EM;(w
z2Ul-jefT5<JktbfyjAzQ)j%Tkd1C0wi#I;(l!I4mFV7==AyGx3c0A%mCEC6RiCMK^
z=8w2ek+r#Vge_$A71eWp=B6frab&Th0&oW(7Rb3sRK(d3;Ljw}S))(5F|C|3wG1Jo
zF39h#zqmgea>>whmCAr&#cuK<Tiv&?*!zn+v>JfmcQR3&6aW_{VmI80{%=t`oq~Av
zA9qzX2I_kx*10?dMUxG5LedQD)gL?wY-L)gBAcDQ(iS4~;}v4zN*nD-vZeT!c?RCW
zYo<0jva!C(c}j+2=QD3rgavg&G6ss98cnv*%^7>E)jCgBRvoUD<O+s6tzXt~{2D%~
zcTZ`#2FQspuYSwttQ)6;8h8>ymR&y0h8VLMdxucJEd5{{UN$7As;QX8Qun6&ab98<
zrW8ccp<Zk2`Ab#f__xY<2K#QH=U)_fd8c&CV4ow|tk)Es-={lGTdA{n4h4Mg^!~V)
zV%btCJPOMVesT3Chdk!wTJw+MJ1512LEK<w1!A<AH8@Gm$<Q96ACUs<cXlyyZnkQc
z?6xlhu=SeG$tP-q4yt(77(oB6ZdRpGaor?BHSX>9+hC_^uBsLj)a6mk#>naCuOUK@
z*}ETa0V6CyQZ~LIwURZ<i7^9h4$n#cP4?h>=!ZTX@|#(nViNu$F7mT;1}9;W3An9w
z{pd4UnBLHkHpQ2Xx!?#K{Nn?WN(E-Y#6_17EyWMi2y+~`+c*)#r_Bi$$4y0(IY{bW
zdHk+TFHKWw_fyV092q>j{YyU9MY;5#N8(-Y@wxV?MgMLjhZa3}Kxt!&!Hx*Ihi6HI
zTFvaaw;8?`w2b&A;;|a#G3RU%6iwv&F6qGx^v9QfVC+CF$4Eg>&kC<!2+^!{O=k9)
z)r)hk+*KnIX$->_SEEeBl%jPm-gJ!m4rS}|2E{`d$#PgID=dB4?1c$FrvU2tTd1&N
zWoLrp#wA}|X^{<Y52}BpB|y`VMgsmCc!2K!B)&V&)&@Uwz02lHlXaC#A#S|6`MqG~
z0Eo{FEz?dB?wtMEd9KSdBO1-~l{kd~ly)%%%e!q`F}}=cgK?qTtZj{M&qsZ~1(;vV
z1j81y&I{bhx^8N1$6`S=B$*SH)@xIS`j)iR*A>pgUZJjGJ}fkj$O)|{HQyQSZo>*t
z$y!Q0(e{Zt{s~XRK?p54;{h}mD8WJea}&!sxPD`@Uf!*_*AGR_Da%xU?3;*DP>SZN
zI;v4imnAGH7x4Z^l%0R13H>jxbg)j2zzcLE8Q~Cx<TJ5df`j19Ls6rN<1q%FYiwbx
z)>E$ctq5F4T4>w@mc@8_8!$1S&*kdH;7=wC8G5!iSibO6km{2GQyF1T5|H#y8$!vP
zJ7{u(K3Qv)00J->xm`r5xvrB7QH!MBf9AR;b-&`caLB*EJcAfsz6SwBZRL=-2bIVp
zAxh^%1{us>6n{Ss!u>E_M4lzv0D-970w}m>oEuu>+5$93IEg$P(5Mhd0)(xxe@@xn
zM72$gMIhs&tnu8%o1-7Tg6`jjNF)0uAV6mNTgn^QQ5V_*RJ-p0hrQrW)IT-_ii9*8
zEq_sne&&YIL3|+5#X?9|A+An}-L_{abg#XkwNcQG4wrGLXFT6)siLP7-zL|BoxeTe
z*4<F~h1@GA$&+uL)@vEPKAV9yJ!*%E1Eu2SK4GD!T=6@o)f@f$Y}=kk@#-fvwwwN|
zjs^td)Gq%nA{g3l#7XoYT9cb4>z6jxjr*!F)0wtC8R78LyE5i7dRx2XOR$8cF%Oxu
zi@+Z(5zUgsHYDi9WgeVnCjE|=g3|p7#dj&IF!wb!T~0O$R<VCMsl!k~OF<DpAw**U
zr2}9+|IEjp<;v)gtOa5xcC+{c0Dt3b0?NGifWGx`@PF{!7(p<TfBLOIb8F+?bOJNz
z-#u%fP|2cvngFPbl+~Uwg!rG`d?g!+)rA9->u<+P{yqtHtp3cou09Cv{^8=-KwFiD
zYBpYbP$qv-T3|V1LR+wi)~!8SG7tX{>T0`UyJkBO<|#czy;Ccg_^<HT1ZuWn@#=Gz
z-uwgMM)I73m*?)b@mr3Gf3@*sOh@BoGk}(jN?7@}9e3eSzd#0w7TiK^_Bt$htNW&M
z1JG9Oz*~ae0$ewv+oN}H`y{KiG>OL)Y0PT-dr|x~{hwio^mpjo|Es_MJ7N#YCU)-q
z5D?K8>mL2nA4Hrv+_O<l<9VSKWvDjZ!^Wk5adpW=KUBoBdr+=je8msdVB6mb#{G&3
z)~w}#)!t9IGgEb=@t5TV@nn&4R&M8Rn&%fHy}i`M3zQ3ZSOlJojy^iC6@7k(8DVku
z1=Ki3);Z1Zc<B0$tb%us3toM^Ml%~CXWC;re_#HgE64Ly6xAh`Tq{n*<-aH{Pptq@
zpb1QE6q+d*z|+nf6seaqH8nOirW%Du1W7YLUGBeg7!(~YLZL3!4vsEJ4W4VR{>3Lf
z(z5uALFVfr1!7)9Er}fT7Osg07V7J`L#u@iMNjB*ZqZ$yQYoa=MbjHb-%_8!AJ*ri
z8`a$L<%Cq6T95kiDvwzsz>Zq?^-;3&D3EJ#@ZvdN#oF1eb8AJ1<~g_edBIA`?A|Y?
z_nWI~y#*Y$T6b*H(lg5Ky-=mw-mb13Q(E<c5`lVUg^L7+S6N<p32@!luom4$JSg7(
z&6mdZ;~#Pdj*j>{`Qnc*V^Y)4??ROw$aby7GMw@HGmYYyrY>5q$Cr|jToW}7esy$8
z)}y2IN8*a(&jmjWviM!s;^{%AoB>+@Bn~(P-<6`ZR6?TWZqCge72Y8nxd(E6YGYR$
zCW3{YDG5_nYrmI=YL@wDYR^H30X-#rmjKygTAQCpcNbrr);PB7ed^f5T$A9*zvk7j
zX2(3@&ScWO1g6rAZtlL`ml+K!PUvc!&tIPsUWO~+^J0EZQ4sS?lOxhJnBAzYLe5tY
zp3rnmYgXP)`q38H7^afdXEZP(#~30W0Wvayf?*kik2t@y4JZd$X(pK2wqN@S?sQ!B
z!?s4Sl-$I2?us2>_9mlHhhtop_4ytxsjGC``yBUQSWE-m%M|tC6Mx_J{QYtYBdXg`
zsgEeTgTdYr$pz2A)Gu>tu1;J_(~$WJVmVuM;KNt%X+VJ(PX~+X&r69^$$cd!R|0FS
z<C<<VP;j;870DC!5=%fzDzU|FEh0c<=g$Z+;_?uJZpNp^_W(`>6bHj(vFD5=zE3@G
zZV)Q*<nsY}y~ld#9T++}TPV0a-sk#;U?A3&z&3`8)NvOI-2-VBo*c;T*OcYC=P9k-
zaC^W#4`!gffKlIvssTIT7PWY?F?K{{Dxr&oY_h)H_(Os4QX`~bxDA%<p5jxVhdR;#
z7bLL;eTW{o5P*$+FUP)5+`CX^`QX4tX7Y@Kd>8j?P$A(lsK1NhB<1byhkR!P9d^C%
z`+{|y^a3TBd>82X1O+YJ2WyiHkUi*xg|jOkpP<pMa>`?zQ~AjEc7?gkBTw$RWb=s#
zRet;sm^@*cr*^Svu_naNFQIZ8S^%l5O&~jCjZr)s+%b4Mt4Qbm!Lgg2bI{O-$$wFp
z8pv-Zvr?GwhrhP;SblOP>Ip<oWj{h6*O`P|H)dTOdid7!=}2HupcE=ompwPs`8!2*
z;8mo%_N~K~bt8?JqlY(bl8LjuUCk20ibYdqpF6o>qeyx$wJWx^-p=<d;vdNPSjQTQ
zKJB)uk)TMwrz5TVIBXDvV!Tc?uGmJfHBPHQc!*tgX>aSN8(~bpm>DkX6<J)hdQFXK
z7amGIr-RGk{OJ2mx~}OUVxQ|rZBH%Awx^i4Y1OjFWV@g^t^@i;&%J#*<b|_u=u<lS
zEqO+nOV-y#zH$pXMu&g01a_LS!lHO&6j=pZi0nU;#pTSRI`iII<$rxHVF*SsRouPY
zYu!vGQ(4D*EA+%O(zIMDL;l^hD&L4=OB1LXT>Z3+td3W}4kf}PUJ?Cm)y}v&3i|E?
z`TmmG?iKqRX1sBAfj@-M{bSKW_ZTjNuAL1RHzL!>UO?<j>fl>{akwvsw3uwrLVC<L
z63K(GeTPu^Ix96)8gpfPzIMl4-37JifKoI3_*=%@-`}Tk*;=7VAaURKwLp=zotVf^
zHy;XZfKr(O@$x6$`YGqN(AUKbw_YCuF&oNi6rSc5f3v-XtFd<?-PD&L!7gTM)t73t
zOYybiIo6>)=p<-;X9Hf-wy_n@@<hd6s>Z!J_9RR`NyH*Wbk?QrZo4hZQT<Dfbk~RO
zC0WyHb2f{=AOHK9uHET9k5D125y3cj?5`B?hde`Edi80_TN^!IwsM;_9j)B_{G=;A
zfNmVO4^wKv`Z0Q0rmq3E)hEn}XNiIvW2==0Ti8$@ZJp(APi<C>rTQIfpW9Q%){IAQ
z1;{2V>U_xI_meR`qO*|Eo2r+rWhV~dw|b$p$hzQt)0tm(c)Fw-?XfbC-*(<AKUZP|
z^39jHobRR`j8jY@a#C<Hgv1$bNn)Nk?m{hg!m`KV^7Q`9kv-TdR$sZ<=SJjPI#Y|=
zu?Y_!q6LHcWwlPi_g|fs`g<4OoQ4V-kX(rlfacr+TnJN3w|gds*H8Fac`CMRG%Giw
zm!y0`{m6boMq`LmQK(mF;^S&4Mlf@UI7@ii^oR5b6YmjbT+oWBAO?+Tw9bgd4!B&3
zD^IiD%uyTNxtd0KEhrvU?N4W-sjT2Fyfu?!=CC9%KO5KYw(rdTJS^j~UjMsCpF*?F
z)A0_A)pdxj+amd3&PI5n>YUBO3*`)xxxa8KO<%6J$=0b!uA%d|FwHgzy>6$q3ddG(
z2#~@va%oA(f1O)%_x9hLY8ql~xt&_ZMeeQs=tL(Fe{fs9H|L=E`bVEgO<j0i6=I=?
zvk4DL?x4YD&=V5|9HengUz+Q;QT9&@U+Oaq%k$qqv%BA*mRmL)>Ug6>km0yCs#p+y
z4Ug{QPe@d@*C5?$n{E<Fles_JbTw8jH!qZASei)R)5j9<sGR0@MG<$Y^Z5Leih=e^
z5$2pGR^0q~<fs||LmgZEo4(rrJaYJb33A~j=(nuJy|TY3M7+CxW5H+9-nPA<BWVxx
z&n>#M6t3U$AmWu{XeP#Z!yWL%HcyoKn{?mfoPRLMu1xPA-UTpq|1p^YzDu$uatt6j
zknS0LM4ldQ2+C%&+W(xb?MYqVrlQ?|IP(uhF|1i}euA;hZ|P)K&}Cu?h97Qzny+je
zy(XW3Y;=QJ#vrXmoKZQ~vRR5v_JyDfmy7E#ob?_(*BIFa0NXF04Hjf{v&4l$QF(*{
z^PISsuCJTD^F@llxfY&$;+9V<hnE}~Ins?H({weXehejaazeeK^=~tPBp6?)afYO`
z@d&G^0hTA-XxmO<#qWdal-~@ch5{Eh6rw_*EepLFbhdD+Mx1b(Tza}|Q$1c~{!KM=
zhb6P@8~2n6Pd{$e0CBS<)rwe`GtuAgb$O^Tpv74Ng7^Zbxvns1IF-n5P+<!z`e9Tw
zWu1dzsW9Mc&)UJhO15@jcX=*U??_(Zhz8;cG9JmL%?&`*wDGqP0%u=v;Qm8t=zefp
zlC5#>%*t4^mC~n}@d<VeGNd6jnKY9uF9RwnZmjKNNe4hX%$O-u5XL>>6~JZBDA&$>
zzikvE)G$SrKa4DP)A`Yf3w0dhI~tX7uO$A$cBO+rNEa4PqQkH5B%q8Y<gJKG48Aod
z=U2zoTt?lKw&oObiFb^xy;&QN(#E_=Ngtxewdj56P{)n+Y75UlBx%1c`%d6)x#DNJ
zq|E(2OaAM{dw@gR?Q2O~tJAt75Z*J&U9>RyBEFW8j0g8jU9cjedO>_^Z_D3IJ(Bg8
zpgq71sg#XvUbfu4kY06r`2$D6Z>qj7dhKh7X()e(Cf=R!6n9!44VT048BHc&7i+}3
zayQrSYD@&lK2mg3Gn06BBb4Uo3%BzgfUHdD_U9pkh*9{A0mQ|Qx{r;-xOKh|Z|cS@
z%PdLZi2QSt63Lfqlk!9vQ)7MvPt7+OB@Y2BrhhLwsR+EF%>}n6n!L{7*o81R<+Vp%
zUb*<OH99nInf5MIe2dA`rTa^$jjNMDo16{qf!_z3NVRDohTvtx3{Lpf*4#5+_5;_4
zO2&k6e#zCuq-0)e-ssex7;#$j+RpD~fs5op;?k=#6tr=u=a3bqc^}7}jS*6WiDWjO
zgzz>AJ$O(=FJ2^x&2f<N_DD_2Q~VJ4ghE{X_xca4U+sGBQY7Mn*FEJ=Hl*H6k0T{E
zvE|jsq=zTT+)VF9PMWiyUSPQi)tT1qCgV@D50F4{3IK5+=*YK-)_z5TpM~~q`|s~G
z_n3Z`&EQ~koczH~vXp9!gJ{r>)GYLZ&UcD#BU_MZXG%~{ZOfP@B;!E24LTgBmLFX0
zbQf{^>3GLY@c1xKdPnMMgDwvvi1X|V@VNh?$bfsVXPk$J5c$G<o$=sq*LmuXCr)2{
z&=RQO6J9EKGNm-1>r-yKcwr+vCDOMTW!bj?qE}H!AVBFmAeD#>i@B{i(Jw9@Y)M&?
zPEj$Ll9C<FRa8_W%u&Cgx;$JNS)JG5tnI}F{rNK*GH7Pv%jfO+Tn*n6BYvesaMI+q
zn~Twad49^}k*T2Mp9)L6y=}juI%$CbvnJI<;;t?6AcVFA_#0?_e3L3uLtM)Ipl|MU
zjX6Sy6Uf5WSOJlJ7Q@qavOm6nWN_3WR7rG@)0ihsMRkmod9yno-HLZz=hJr6=?MAi
z3h_~hg|ePW?LyDjkm*VRLE`}|l4IFRBPASn^Q+AuJ~x}=b{vkA{Qe^V8Lg0P6=oyd
zgDLfQv7S-F&^-<;4QHR{n<B5hYKj?8S5Uk$s)LphQ=qE6%ry#A>n-Mmvyd)&AXss6
zpPCZ-eOdbE=W(`c%0W6UAN1PaxanWXT9mHta&)5C41$_iQsULR>&XVVkgb>p_MNz6
zjifYrdF9Pptr5O`lX3QQ$@C8-tpmnh#-v8uu?U`KXiLF$h<!LELwtFEyA^i6X!B_9
zZ4&p@gWMUngvs-%qZhup_&9Q#>M-brARD)0v<&eq7@jmT7ls7cXZE(7MKM}w4X^)@
zkS%3Csu#m)G*CJeW4eIu2o@a|jHV_k_9#zq+)ljd?UOn#u46!&v|h|IaC5Rw0{Yb|
zUrZu(PE3(Y1cf9}JtY3@9Qj5DN&XCU;XC0$D}4Y#ji8E){%%63OTQ(O6Iu0<PkKll
z&5EFO@PO1!){-FP%h5&-M`%4&Rd3nM{$EOs(fXm`*3LH$tUp*uPSR0WE<7^{&<-Se
z<I2sDOO|>>xeZu)UStr~f6wSTSC%u?gqQEPzFXgTYc@tZS)A88Wr+f>5o=INEjfj5
z2n&%;Z|-<PJEGw&EPD6;t;Tq%H3P;*Z_%7DqswzzjREvbt(tj1<CEp-wifoXaa8MD
zJ%_1iF7i#h^o~&|y1&FY3vHK!VScwFnPr&EsxRIlopEW-(p*4?alive^os|K;{h<|
zVe*;CBFvxU-i>Cj7D@Ltkbk2;d$F<kX1=HsU*DK;Hw8#0egtU9AHPY7%a}Su1!2L1
zw*DE~l&5dc=)O<GO3ZMdp8fWFT{;$=o2F73UKXlc^=^rW{cGem$5@$TA?QFgcq446
zc1dPXgSHlP9oxn49p&6OP5at60RH^oVJ4$pZo0?-R}#|$-a*Rs&iOz0xl9ztt~&RE
zXtk)Y5H{P>Z1ZV2q|9oy?~}w)ja;>Ove{Z(W^-F+j)d^5%!$X5oTgOk&?itJV*CNm
z5Yc$5{@=!n$1J>d)}1*)TRmVDxA>(W--g6*KVN~&OK&wrHtBLMQ{~}XVvZFOwOQxk
zEbB1s*)s3uZ0nU*-Tv~f(s!~hEZBXst@uFwNam>FjQ$V<Ve>|^g&gw0+IE)B>xss`
zkmyF+SZZ#m5ZRf@l#U8Ej@T7WKYhX63loH5V8uhDQiha<u;^%n)MvoJxt*j$TM`s5
zSnuU2-jG02T`WzN(KB<YZ&jBb4e<WbNlzBVO?B7C7eM+T3_Q70hWZLxuju{2IjLC_
zm!{{Qzq}Co&RkHkJd!#bum-_t_Cp)#x~uhjv{_-aAAN)T-&qd@Y+l{WLI-%Xq;nOi
zImX4WE;5<1@7Cft*6H>c*M}m(8}5C^#GA+l-*e5SDw?(fUF~}2z8|i~u!_rNeqzz-
zqX4~?EDJ2=K=l53mSldS>v}N@++YS-_mkOhfCrV<;Q;3m*mrAbzLg!)ktI2%*XJo3
zug)u0|MM@37%00x(!ZFQ?A<i|i>PFbBj+liPv5Ngw0azvfwNpVgk2nNxdbi5WVCy{
z!N7U&yBOr49Lt7ZKFRoOV%PH>EKN2(zuF`0Q6iubLW!xW8I8iRKFp`MGT0}f*zy9d
zgVW!4B{^P7!}uq5UT6eV^nR}$Fn+RzK1l!l%hbxsO){3^Sr1K)%lVJe%g^7@9TWa+
zLbwsjAX>ORU>zU$7X{Gh$hc*=yzKo6vNvxsK~sORsYZ{h9QK*ojzY~{NM@&t+H;`*
z5`D}JHpv)mub<s_%vTif$?UJ_lfHdbBsE&UEQHe}u$Ug%bFGsZZa@^mIosfh2joB`
z9`klQ&BXEj(>H5dAsxB7nKFkhS!R5&myR#-FCC*1`)`s3r~ve*8fGCGaRn}srY&?`
zut*KpS>jpwy=pwO3nc>AD6Lm~!=7s8f00o-nvUrMVyKIr9EKzH3SYv94OHIVgNNa%
z>^kN{Y+)!V?v;+zx*PSkj^cHc#8|FG(=GQfLmf<Xk3WFu;SoeWe2@XLul#Jbt0BV1
z4py-?t$gZyt1XbhoBMj+H;&uADgj)y0*)tC2`jH{Ujd&1vJ4tnlb&u|2EIyu|L9{Y
zzw>j4A=g4>rWCm-6j2x799q*$lqebqID5L1`u0$gtsoA>?7vKu8^muo#zAe?83ei%
z2DWN%o7?(<N!j~6b}?JrC-+vkq8LKlLhF$_-|wNp53tVN-pAElPyDK7J9OpRt5bgs
z&)Dj#^IBv+*c6FyFM5^EVghVae*oL1Yp1PbL(C_AtVJyjV$}MjX<4dq`)BFA?+e2#
zEB&8Gt~9|v2<A#o8VLHRQykYJ#?P+c)b?a55mRk^8Bo00?~u{`c=L2Vzk~80g<@Gq
zNoqT+jZx_an6I0(_Gfa)!Spenc(uK>H)Lw!3r~VfQirDR+ciCYb=3Th<bq4`@Pt4I
zhxBmi71L35vCcqZ*gM23l%hkyABTX%H-k82gRLfN(yDJ(_Bg$nmkpQF3i{xqS1cCu
z4R<cy_ao$OFfdsJ5Tn4pWFb|X-wPp2Y`|xjG|y{GzzBhI$GNYb`|N-DCif<kO8o5}
zMTwL2!Fj%hPD;2KF#%JIf^H!DSva;K&Y!g&2D<$=pDLy<_OMEQF_kBnCgvOfIJ8$5
z%@O7}?O2k!JZt~#1VrJFh%_6Z^PUj@87+<~`&F+WCpcY#hileIX0xG~Rz4eqtx{NB
z@XwYBcv)<x9PnSBy^dVA#Oe=xV$mlV$C6dbCum<s5K@AVBmzUCzG<b%Y>nSD;SJgg
z+%0a>?`{QlKj1Fhx0uts{sK4uW5cq=EM4Te+@xDojCxrg81~|B^QvF%`K<@6O{4s3
zQD>5Hhtq671Rva^PJ=G3=^B5e^p1gh{I<=eij+TW#@0aW!_x{wvFHoyJG7ULC(!3M
zPTLWJFySeTBr&qI??SB2=)wSxMy#AaS7k?>8%fs30rM)vMd1FvQGE?T0`ba3cfc8|
zu(*tY_7_L5$r3+-Gsw%s9r{%_LtHD2<=V&I$0_CeIb3xKV$1x>WYR4js@|iFm_X8Y
zN)V|Csl=^~DIO01LjAeJ`JKdX^C6VnH_~0F@Rh>M+ehHhoj|eoTf5NEY26{<E~p2X
zh5lmD*>o|LRybA&wEvqYrirfDyp^Qe0d-5AqKc`vX4gM4Ne3o;q~vK8;!l*w#}^Y2
zc=zaF<i4CtkFbdMzLS5WZ6CoF5SAM2a+whyIl3;||1e`L)#V8gC{m&}iW?Q;LJkGV
z%=m?v4moeaD%5)VP|jCmE%PMXkk8EiNb$|rqN?u41xLjRP)hIu#Q75#&@ruaJuMCk
zeDi{FxKCfxW`$0HvtqhIZ;B~O!tRQ|GxtxCL4W?UZelyDNdbr{gkTd<KL|HCyPlr+
zrhM@@(rMNzy6=?h@&}0#?agx+d!LL6bJDCV8-kh<%sWWlkPO;1!wp2*TOzVwY30s{
z>)U$^^vv&jlH5XNJ7Fg=Cw|{`n9h%w|MZgd0cND5|DPGjKxQuPL30Eb@A=yQOsRZL
zS4=bbFzc$hZDdCl_@Y4C(njV?yKzdV#AyYbZ^P>6VcZ($Oh+VUq0ubw4(U%Zp@yw~
zCTa7MDib~BjqIV-DY>6=z|@^YpCc;}_wh)=OWYRVm6FmMioBo<wDz`Vmli#z9fpud
z*aE%u`&^I9)VJjYpAOu9*fJ)j|M2%kT~1X%G{p4ZXs`XN4%`1A(?cWqCzz+wU=<;k
zJr!<(kgn3W@gaj&{~508tj_o(Z_TnH@8R)-KL<+woYDT5gTj;c3Db9faFBX4u0T2@
ze-A)5p|A+`P|*MN@{(Q7zbNt}v3uVZBRQx3T=g%PS3Nj${gv>)-}ArT^Nfmlbivcx
zTw-oo&I(|}JN>=W2cbX+63-!tQnmYr8?_*^*%$dPtU38wE9pYsTifwz+2P4u1}QqJ
z%_=?b1nA3LGYbaJ+U=iRdjceo?TuVKS!Q~4@Lcq^cr#t4W-fRo%NvU%itCdT(Oi%{
zO@E-vZ`0)=Q}N7FZUFrL$e(<<d=F?6->L+pBD&=Mq8Km%M9ZZ}A^<!<dK!5+hQyoy
z%;72*+cR4FCokrFB;OMX2l$8o_%>$9J_f`v<oHe_lyrn9n@76p3Z{IpF9xt~|9Ap~
z>n*NlIc2ZrIU@u8doHN8iN4q#kNZg1-%jZNyRlo&o0$x^)BOK_q|C&_Gx`ydC=uP4
zXCtUh-HiJ=KHMvCD)+VJW6RTYA%s&##=&rE`Ap{n0677Jq1OQ^rgStb@*yONzr>5M
z#-47te<5t@0Bez!8h9X471XB{R2AOuqRlb^t>+^4VTw=g9%exHXjsk)WM%lL4eYtN
z5wj+`T$6xv_<z&8{7*4ypZTA){jP$uBy--o1n{l6D<i#rbqdeGAC%gAKdsp_cpD?(
zmumTx-!d)Ykv`mVttq?KgCCq69MQ!B%jy*}zT1^L5a=7oKgRiP)jYHQd~HIX1tCOS
zd#K4&aN6U9im-+EkAo8K;C;5MmXsgwe`<Uj#F#j`20@8s`+i(ax;Nm~>;8@Un6_h?
zkmqgBxW2I0w~@h})yXp3#8+M87Os^N;SHenRZR^=rUW(=elWjw_8qSn_g-tvSga&K
zKd*mr{D2dxGcx7yS1Sw*39t}5duVlcR{aV0?l+%d41OS@D_M&Ub0`2aMN$!4Znw-G
z%RG-&@P5?k#AOmEQk-uez|bn1^Tbu;iFbXG^h<R!zxhtv$}l&@GAa8mg>GUt;Ya7p
z&uRs1SgbOof8@qB!+`?V$)^>|<;lwEO*@*T4m9{6!PUYc;gCkO81>jP8i{$M@fMJc
zf4&E35yW6xKFim9vwAI`-G8wuv4|;s_&{;A?<pm_6yy$E<uqTLhAg`97O&rjX1H0l
zGdt*{CZ{qm{$7BBLC;SDpczZ@-en4$P2TC0g!2<0;!AGF+$0uaNs*7|=a#TP42NW8
zH<+9YE*WqHP-$AZQ(dDyzrH;n?FE2FPD75m@&bZO(P|GgH!f9|*#nZZnWd<;K9!#d
z2|sB+wis}g#Tt1^r4Bzi(7_tmMZFXZt{th3HG$v9L7pI35VL?#fAy?2F$9BFGj5r#
z_C<LfuWG}-Chp1&KL8iKDx$X<^%Z$pz5`G>y4=w*Q09(nl|*O{g1dQkFNF9l<-TTV
z!TN!E46Q+pTUg-&(;~AdV|ELl$|3*`X3^p)BQn9S)Xg|$^!AP#Y3}j4C8F-c35x+{
zYdMMP>gv09;uSg9v0TAutT#kYefm+DdoQwCU^fu%Mx@_JF=oGsJ!GujEb$a>oX0sV
zTSL*ywM%80?jdpW<`3#dP8Tho3Pu$J?Xx(J4WqDiaFo_VJj?nhEE&pZZ}e`vxo%rE
zSMjc}g1lq>%}bl9mGWd6Cg-=_%dW(WxQZ=oBjmilmZpCgmeECuE3PLiIMq*HRV#Hy
z%5=z?mPSL0f<hgd0IfglM#gCz3rpf4an6l%)uO6|1vMTu%bDd{H_tV19$0rgz1gbm
zuoCYGlQ8+&dL&uyR?}>=b{b}zP!C@~zR*%4gx1$*6F!}mg@9YqYA`SQ>h!PZxZM8M
zlwYdr_u6hS?#b1QW<}AR_!=c0VmJoP2z$}*p!hlXOXED;if~wAmk{cm+hlRukk+ZP
z&iV<8&cVB(#?HuGJxG2eazy;j{cjf95~neZy8^~>+7~}HM~N4a1`3uZG4L5h<8SYs
z(G0H`rcMi9dKNbx?u%l_$TtzFqD**Z-VKMf^)-F{`}7htE^<R-AFqF1@9e}uqu^lN
z5B*50p0-Dxx$JgE!P6z02SwPF(Ql<dZ8QCz@oe%BXd=2np}Q-^0o;Vo>t|uOS>dgk
znV5?)3iO-e{hlq$uGb*1ctzx4V-Be_`8(|0oe=d}Ph^2hgz7Wj&$z=S@BYU;$!9>j
zga0tT767Yk|BMtBZ;fgh5RIQ+k4(MlD8x%EDCoj|*|QjTk64m{p&1jS@zlCrsl<BO
z>g1Kn@0S$XPb}xpc!^<{3>vS;&9&HT{Ah!Z3)&NeKEHPM>+Y=46ykJF=Qk*Ibm%rF
z3@}Iv7zEo4;KR-X>Vd<@g~}6hL)m<8!an<*dSkij&WA(S2c#F@jVc=Wj}c|4rIcq8
z$fP6mo`A2vJI>j>p&>~mnuTLG^A9SHjr(hyxk)zwi`#wxcO_brZo*175)y!HkGVq*
z`5I<AdTX{yWjW<5+YS#M`x@{5D_2G7hvF9}r?V)Wq4$Xj-JKl7@RH6b%&NNb7`^<Z
zHjnATU&a9g-yY2<U6CCEgSQ7lnm7i|2<BofhnuEl2k#n=&=`qgP{#giyf?{@h3Vq1
zLP8f-oTSxK&2F1=@d(n;GRRjdm39{TSwQoV%(n4z$(@Cej3vW$n%aZ~I3Cp#C>^_I
zRNXZGz>aOZS;7rWM<HAci=-+gYvKIjHsr$AFJ!+h>q^wRo4fsPHQ~_$qet}Bn^I0O
zP876r!Y4Jow>JvZ;$2t-qN}PD&ug(>-(j}D>lET_B3DhcdOrQj&US4p11^Q{>I>Iq
zCaOVkr>8}E*!RI>ygu``?+7}Te(iDvdLn#c=BUwt?)Ssg{3j)SVFuVkqNIB&5-Y2~
zNX+Y<5B$zCUX`h(x=)AleG_)oT`}g;;^0;FzQ{w%41(lo&IX<jZ%WpfP20{aJ6ar{
zkH~MyBq)hCFK73j*y^0<EDt<h=%0!~GizV*%taaZ7t#uOG4e4r@9p7@s6O;~yp7AU
z8tI;#=j#T)>f!%V0`^83lz$`c5~*SfEWg8J)@}DS0SyA&eE_%2LNlJ1C(MdldsQS_
zu%?^olnhlDX@1Glx~*J6T(RBayH5#zb(#nHia~mUNGw3e`+dpgw=*`O2O3vQt2`21
z9Pc35{P|U@Xqm?^1LaZ*x_srTqgGyw605rS`|2rXxE9Vo=4UM{ET!+UleL$FRI6?E
zJ;#InAjNZBFCs4UGU+f-5GvlMq@=`#)*BwSmKdxZegfS7HWij;Q~=#&fO0x!WniUO
zS2D^GD55p5TlnM}qj1-g?=(sT?$%hiBHki>zy2?ZJNVFcniVF7G3vJ)p(CuA_dSB3
zP3B1BS45s(1m8l_A;)){M>bwXlDSeprTR2g#-D=o_#DS|P#u8d!H*QkQY9w$1zzs#
zvS%lsA%xp7;Fqa=Pf!TEL7<4^(Y3ZaB4NIJ1|B1_hLHze?n$xFvOC{LZd(Puo$Ev<
zVUaA_B4o$Y5+AtjdYa~-Du|g3dbRTlW#;Yd$+cPGT)*f7XtVFHt)0`e)@d2mAJLL2
z&g<j@hTYeI5ba}H3uYawlDNgzGDf<S*(k|CIdb(rofgCCxz5vUKzTdOaB!iR^^nZv
zt=;m@j<GT?9IX0d#s7!ah_BXELwnvB`!v8E{F-fFSlgoz*9>9#3AD-|Q{N|Anuj~=
z3`jhasMb8mV#od>k1GbOUfmw>t7|MNwa7Deot>&3&u|8J<>SkTBuicp+)s;N=Tm0_
zCJ9p3RxXz>Ke<l#PG$KFjRf}@Q>}o<<hs%t)x`t*#A76x%HLfDXF|<)p-ftI>mU}7
zBF=f`#Dn143r3!S<=qat+((N{wu(89OQ`{h_aZiHhzs4qEX1VprELe5S@AhvSq0Iq
zXmv)4Jk_tyOHUZS))>>ptnd%=w!YUT_?~{#xHl07fgr9~!_sF7+qg@wd6J!RcOP%X
zxz8S_*+fYCi;{xXI1H|}RrkeO^Gk91pBSuVrsi&a2Qk4J@xEPy6CCSd8QNeL&Rt>Q
z*_!H-dV59GKw9;IWzcySut(-20e=a$moAoVa9yH3U==$b1%%C^{z!&phuPQ^Y5jom
z;76j^jm+DyWkL2-dQ-D+26P!jXOcRcm#AL33X8!m#LwQHQ*iRQ)E@ETMSDWrxR0}<
z-I$Ee`0db`C;CG7YzM(cas?Jkh=^BaP?g`0iy?@C-1(99TGR!+({-&bwvcygIwcVj
zkJu}0-!s@J3bhD^sFZIQ$MA0!ZYc1PodHX?aQ`>Nwv&xB^o*bGr-K}1pi6A2(iXe*
z=uP%X0r5cWO(n}YVYWXjD&>lI9)f-L!M(ixmLFaPQTm}I-Tj{Ntfzb3+}CbkA}pW5
z#t25I#aiL(;aU&ZA?I)WE*mfMon4-|_QpX?;>Ek_uj%R|U|AP{J;Y8={d>jUQgaC+
zads>`a#ZSfHd)NA(5Xrm{;VzGRRASiJ1EU6#mV@`j`_>nIJ&WNkhH`I22*SVIS1nG
z70yi6s*p=+&{K_@P%n=*`AqDD*p$v^+4Wgq>m?Drwnoi1z-f{yEtXC}^{NJ;kWj>&
zFf<D{NiovJ8x1^}StCUps@}M+kkb;{t@h3*Zt9aWKi>3(p<ZE1fUnH4_QTWXZ&KmT
zr?1`4Q>U#bayF}r`bJR{1J<zbt1_0{e<6i|CN3lF%qp*6)s$LLGF<doyYpMCbYsNN
zpXdZFMl9s~j?HE*c2!;W5N;SkSk=2fIT0AxC{I`$;x&1~LlMeF7vCuiQ|tM?kO+A1
ze7Yjrq`<i%H(!1^Ec9bOCBww0=S;LBG!zl$Q(Qb#n^~mlvm0h>MV6X(B;T|e#r&60
zvHCZe;=ilX{Z~KtKctGtb;JK>n&nO`dY8XLSmcZ;5~&x4(Y^#gq%4Pp-~FE|jQ<Hg
zDMQf*plaj$wx@kR-|rsQqcJx><LV&&4}m)eKKa`XDT%efx(!s?GX>VmPZN9rGKQh$
zJ`F+v;2-cqzUmR*1Mcn1$W!(?(slc12+q5|fD%~aKkdRQgHPyz9Oa{hk&6tVQ)Mg5
zy`PB7MDQ7qeNB-lZvx7{DGY<11Kp=@0qwSDzzF}FcJ%+(W;e6{vsNOMO?2Uu2mh!%
zY^J8BN(en#bGY;L`Z?t+196&Lx_oYKS`>5=Mb9WzxW+xSRdMdge)2x#E45Z$KbD-Q
z(kFLh5>h0hJ~_L|42!=Et|@w9<k55a%}3iVA(&Y2;KGAW{Dne)ceu6hX4U~`vQrgX
zte+LFXySwDfZIY|2W9ISOy9>mP=mGZP71@e&e!?_>BqPz^jp)u=H0(iWc4^1p7Eh3
z4JBD)Xn+TjUzL56#%q9ipkK**F+Toz)Dd6#`760sO1P;qCpDAr$nEiHprmtO{~O`_
zs=o>6|GHXrN(K0j<LxV<FsNn2yxh}$#ibSffG5AXQ$I`y)+`83prmsS%E~=E^$BTT
zEV{gM91|u&j1}EUb#K?zG##<};3gLoai~`fwNk`azaZ<Ul6bRWNk52V<y~6yM~@hP
zO52#s9?a)A13@Kk>U`h^*wqIn?K8>~rt=49eP`No*g}Z{(DT|uHUpQFXX!d$*}gpZ
zS&h7bTg@ZnB{s?U%6}H>4EZub9p2;P_lP!te)ls6D%~=IX5z7o_^pLw1rE>cN<hT0
zdCtd0wldID(U^nls-v)?*;AMn-nvKxzYwi;QS9b=E$cQ)Ig4Vnr6pjQn@)sP!gW~l
zo_4}aLCU8p9({9NcnrxN29$dp&VTOY*%79nE1gYm*r;aj{f3d&9mwguH*;%=nlrM(
zxJG8X9m-48BiUy6soH!?8>~AY8X<Y$6p_Q$ydcNwi;)R<bXALg#*9C?82LFzB{V%f
z`N%#Ll*kO&25@+sNquO*6)ZU*J-;{TrA36vLxPl9S{^4Z3!M434x<ccBG2vi7YWsq
z{Fg%vZIOFug?$H?@5Syvy5>$kLN1ZX?R7<S7X++ICo2A*Kfl>eBrj3FZ=w0OUssLQ
zc1xHmH?N<cQ@p|5*a~WWN2@w&rVx=R!<@CA;NV~r-P^tYM%nQk@cVxab;tkeYv2Ff
z`2P30_&?#gXf``vXmss6)r;A#udh}>zvJ}*`$1Iu{2zMUUlbP^HXz4L|H_z~cp3CS
zY}))cGVMWjwH+e=FA7NEgX~YYNA=%nH@fc{fG?Gxt|4#gfd2aOztG|c|3$h3C9^f*
zXqsw=A|AZ|-cpcOVdIIfI+}_=2wVm_&HV;49L3o=;_<w-uVuClQH+@RT71xL+&6Tb
zN+UoNNdf?#%exw`<I@5u&dQX}hLkm*xHzIC4y6PGtb}kA(n%vrY^LOzp+5f3j54Av
z;RR7{{V)N-aL~zVwdQ5{_}X~G#lqKfV!2G5-U6Pk-MSXqE>YGgQs16&o_f@#DQHuY
z-SD+(WO_W~dO~`YzdV_zQts<xw-=7S!C1Pd)5ATbp7O%$dZ<T7;a=_0uO_|s+P)8&
zo0>GV8%}lQ7xJREz8>zGlT^qO#5$7P($1Rp45UTnnzf3+c|V1VS;)Qb+&ecaol=*5
z>n?2@B+4kY`#D{Z0$jkSx`A=>LtHAM-0*v|8A&1YbzhWsZ6|Jo*YNkwj}!@bRCAS{
zqfQhN(QN@s6d6UfkQCqobBJ?rp_wy2Z;e=uvdKb~FY)WE)@%d)6Ot#;r+Rw`z6wB}
zy8F};Q{FuWjeR^>e&KbHpMOw@Un*<LM`4{%?F*KOD@B3xZMz%GA%d=ioKGr%K)fph
zzuxX^EQq?p6Gs9_#nDI){==c=RHE5-3pamMf#=v5bM;<xrdZ?MOY&znG-m3P6ziQp
z4ZVa2(Q70|Zv4Tw(fM}@varB1Zo|meuYIa{{$n8U5;g6YJHI%R)ZRO5c{fVwUA<_#
z8!1E3ZwJ+LW$evX!J@>!9B*Z<Gn=ARuUs7dBGDO_D%<y%xrfhc<Go;<7)a=B3f;&u
zD0HsRkXVSPx|0^{y{d79n!P=1vxeHSRiFD>aAZ(qZTS2b_3gWX38Kj_Ddcm-UV7cR
zV-uuJXemEQHv77sZO=9i_ikt=PLrTSH4I&v%QHYy>GMUr&|u-G^#qF!C1n$d*Vgc1
z?4ppqRuX3_yY~*DeMgC4g<qOi5kg{z;^i6{S8aHG4{R2P+16t;DN^fn)zzIU7>lg3
zCjuZsofJF8Y#rCJhe>E=GO(3CkL)*Q@J}Bz^+3!~H)mBQ$(6^Y&!lxcUA*`RrcNCU
zhylTs@IMT{`2X}zJyV(uX+3bV+CN^ExyL<Em=p*yBqUsN2VYeEfI1gX>R;fpX(@f?
z?vh(+fB1`kP&K>lq=*WDaxk`J9b#GH<V4#FMh!Y1mb_W9h2&{n?D#B^G_)GBQ@!}a
zdlcyfYyhDbIysl%VtAH5Mjl*E>)DmrW9tw@x}%@;)juV-eta$X9-nKSz9tb0-t_Bm
z-b13)F!Lq&u3imrg7U;2IYoQ0-G{lc@zkAdH``!m&tX}CnEkt_Z?u_ry7-Q3X%@GE
z$p|mv#_JWX(=+XiUrHZvWoF<1KC#Kf(LLZDHz4^=@ASaP#&R=s5B`aCoh(O;AZZg*
zFhl)-e4e0&{JT!Y57U07Nu@t(t5qb5r+Ad|6vwydwsKxJBb>TvYTb!M03rtOV(ZX!
z$BXx}1N*O`u7`!k6T<V8Mt6c-cpHWZ<GpkDhhJD2;3su7Z)UC{ra`nnp{c(uqxXUf
zZwP4{Gr~&l;i=<8+&J`|>f@6_kKKOI(qo)HLKC~FwIx~x6FW5lfe^X)oa%|vODRa_
zs%4`v=ODj4#>;`<?qmo=Ey1qtl&!uicThA5%6+e0`|aHwW`{8&7QlPam3I>twr_&h
z>~4d`zQxhTRe2EjXFL>FdIIFi^sV3nR*XYz&u))9TMPJ89Gj)*?WN&by8ohR%mXCW
zNIGP{S-B4PT4JR|M?57wO(K+=RUwC~!$j3ov3(^v7ULrt^IqjRtaoaOG`Pk<#8$%H
z@p-BG$+aP;v~n7IP4zY>VOJ(=XgW-oQ~c^)nKRkk{n~IvVu9@;5S1Phudn+)_FpO=
zwDC*A8gr~%+aJuRS{exxD%?xv+H4AT915Kiu)|PJ?pLN&I5~smt*slI7T)fVfD$GL
zzpu@%(LFF>8gL0hGQdTM>bQj-oWZN_xt&^{_o{OzEe)zSvSdCus6TIvzHjy6Y1VS5
zlfh;~mlsPc6p-{?0>0{E+)!V0n`+Bz;?UiwEnS6c`=6HbrVQVIm6M&^I<py`+;}O7
z$#{*BCez}l0Kay)25z%qeK^EEtfxqPHe`AAPK0hF50LtD(3(e|8{vRFzmgi2yo3Cw
zETG*Dxl4v$Hnw-3>PuU;8#ZO|67vA@>1|xlSvNoWEG4T0poA1NX$j!f2pHnN|3<t1
zyz+#i0@I583*}m%rUq~o@q1aDU)Z>M5^33z%yYu(mb1?P&K^M0HbGet(+;%6IsbPz
zxiaFEcS(HJ*0vMR*2F97FP{XI#a}P!Mgx<O9`PB(es+1qi*?0{cp!A?s&9^!p<nQ_
zYg9}2(T7L$yxXRASME>mj`<e@u~@Xt_fy+h!W6pUEW4B}eOhsL6|RDFD;CO#8%WQn
zx?a94+RxDhs4B^{^X3#juep3-&i<7n`5R8j90wT$(Rmefbjrc`@Sp(*JFNMLw6F1u
zyE!knIrl7(&3Z}e7k-4HJ6Sa+UCqA#oFx<bjW#!6_fCu;B;s2J->XQzZNNLcc<3th
zN{_vl;8ig`QCKAaC@{=-<c<*@6K8RG{rqi+NeAWG;4Zq|`ibD*^6j3$dKtO0OB(U{
znl^{CIwu~wc)@`A^RA$oz~h}SAq#I7%uhWGOOI!b?x6{gI{an^E?n!{#;%b5y<+_@
z{&GHIbK>cmuiwhhHln{Z#EYq-4BRxM1kx#M-^=9S%o9?A(M(p>#4WP~>jm~Jgvv^o
z_5{apxMSv5tl3v7Ob(T}I=G@%y5r>S*O2Sh5}YQwF8BX!X~heAMBDxsMGZUh`=zE1
z(Z49f-lqLU;ShlQUT3ZG-(%TXes4u$hNcnvQINDkE>05*ky<&N8aV|(l`a5!MS?$f
z%>U*(U8Bh%Uh^hdDhn6@1s4V+i1<I)d(WVz-gj*j1Ox=6_Y$NC(m|>qQIsYE0!rvX
zX#!HE1PKX(6zPJ1f)Ei9q(yos^eQ4vdP%@SPY`1uA)fX7?|1KapZ%T>XXeZ~XWp6p
zfl+2wj9Kf+eLv52Uqw&3CwYQ-%a+PB({cX;X>=bqIywP(e-)HAJ*{!1>bpNz{Z@>^
zyosifn#0TuVj8?Cxf2WZ)In#+Uz*!<cWDktKA`6S4CLRXLpefI&5Nl*2{`1bFggwS
zPtr@4?{1Q;AOS<sU<W+vPqd&S)F;3zFHQnx-!}+gYU<n&xG?)K%}68Bkcalaulw)&
z^3OT|cGZdCN9@8Rh{24WISvHmG!1^e;+adIt92PebeS)DZl?RjB`ca%?;OhHw3-ry
z_O^BS{(`IzTm9Z=6{DLU0U;|t%J@_?IT&!g?-IRdXEN~}uoweL0Cnxbe%W7|k^^V0
z?t^2Ae++VWU8XQcnOA7=>Hjy@GTjF9zpwh&xB9>D#J|@!y7h^3)Fm+~LNgIVk-*SW
zyvqSDx{sES`Jdd=*W~0K=dKX8HavW!#Y<4+cMbZ#lt`bl-TQxOH0G$IWk4k*u69ko
z{qKtT-yP!rVMm<1fnOrN?j#Hi2qT}ZN|S=nFn&n;)P5t(qUNikM^i(N0Muv6>2~+Q
z>!22zz<C`3^oN~|tKn2jaxa)@@jLzWiv1VHg_l-_Xa0l6+y^+HCqF@-KC%2u^Hym2
zzXF2qqqiH_k7>OzzxUXw(ee&;A^-Wede5gOYayqXyoL8srQ&*h-I&8mz@?u6Ln))S
z(M<{qu<OYI>a)`X^_g!~wafSWcLDye-7KQqoLXDigJWMGnmh7B(Ba(gTHSLYjw@vj
z>`OYnKKm{4<N3ZqgCA0XNV#LWSw$-s2m!eP?wfdu0O<42hH(0&rzqFMDo!~`Gxc98
zQc_kmW+G=SU1_DB-l2$)7S=&&tHLb=NZ(j;az2Y$dflkMVri2{{%D$C%ZqCPnfG+8
zjV0f3Iz$Q{{#vrw{8f3lX7jcDT~TXG{M4#rd;9{A?B^f!*t_m;&OfpB;I?w{#TVn|
zVh|cIQ@}_4WZP^-!nCB>-aT$zf#3=r5oe3G(Ba*>XA_&;kn_%5BLvus{U@+mepH`}
z;zNogwBMR(L?z@-K^?udQr&{xV@%FjB-KE;WQY6C8yQ;1dXFe_yvg2u(#d0G{cptF
z)(HVv{SCZKTq9JFl=s-L^kLJ~%8E((6i*VbLciRZ{`HVE6(sIKDhTk}7gZk8=Xt%a
zp87ypf6>?|wNTB3zW9E8(3h0iVBN=#*PUI491VHyrpTg|@dty-w4_QLkmJqsGd$B0
zr+m(sCojcFkW+%2!M#7_OzVZA6&cZ;8M#tMQ6Vnx-GBQ;8~PW3BXJ*?r9=1w>CYg9
z-M84ADgCjW(NtY!+aS|ThV)N;Z>s}XNa?18^!Si;{)b$zkH&*PubzB0vW{DCP4!4k
z`p95v%Si7YUa=YWy(y-x6Bro1EBT*uKi=2XJ6E6y>VQWa$4>4gulHMFhbzvOGR!<2
z7a~MEv#-l+nTxCcMCW$#Am%{~2n|wahajG~Po?4`N-z59!>S7|FubOGy~E)qz~%SA
z9!ST5-kF^(men=4&k`)FxNpeY53|8pM#Dot>^#wmZQQx~;7?x`OO%nk_svvac|KbA
zA%k1mCza>l&d4^^Bs^|@*D=<M6wm{DqcEuq`y>8f^@kJQw>R(2ML{>ayhnTI_KTqV
z8DUv2`4)GA!X9#40x48-M*nQbotw!CJ2o?Xe`x|weV&kan@Jo;dJ2{Vz#Nfx+4Y|*
z{__t4=so|-#1)zh5UY&Rk2$;Kkn>?{0Q0{Q@de@misQZtOhc-f&&i?-k=_&OdC1)z
zeA|z~!-Z6vd##1KifY}hHy(#;1~T8Vn7Q+LUhqO#3=7>g&<!i-8zIo{8W5Ri^d%t)
zkb#BI!ewys2Wy7!AAV%o-3|A+8O#JQV4p1=uKPX8++WiO0oNZ4qLLBZr12=SJazJq
z<_RispeipXuI47a+r7r+&v)M56P~p-V7k}+d}d!xFuR+4lC+MvMEYe(g5o#&Q5=m%
zsjgohd9|IfmCH0qf|rlbycxMTBS62Ia*lFxno3oKd(@M%yzvJcU6{ueBLJ?)x&+HN
z;J2c=FXkR&77{pc_~3Xr!;2Y}8+#!TeCOx8uE(Sid;pMO<o9IE77reXvg`&htX^h6
zh3ZV$_rTp%@7107K0j@d=u1zJJa|bf*sH?C41&xabOXH5NY&j5bcETMCfhihINqF4
zeW8H5pAdMCadG(buRq2jgcA^c4Lj5iiU+LK)e-8X^m5oqWO+){w9B=e3GZ)%$4qXX
zGC|^pup!Z{i;Q3HLN?ZRQCRfi4PqwgzE;2PLAt|goYa#PqQt7MU70|fq~Fq5nGEuF
z7`ltf1_uE)9~%ys8F&x@YE3w;o^45zcU-c2Ya(@Cc-+?bfd#EopbFyUMaD^W$hW^V
zDS%FPj9745Ou3I&#x|#ZBY9&%G&hsWUF<JhiTl<k;&I;f^;!FjM}g5whfa0ph^PcG
z!#_|6Rd*`2c>qPS6Fb|Qoon4dU{wm(!z~<+1Oy8~GOyibz5EW||5D~VLpRwa=@^WI
zc7sz~Gzd+lxPWsM|5?g~($xrki(FqXo{vk%fhB&8i-t0hA2J9%59D!}=vAT95`8t&
z8m{bFP1jZ5Cr-%op{?qP-^}bvwG4F1lE+=0!?-tN8@T7H`RRyUNxzVBF02G_H01(&
zLI)JLgckNORjAW1jS%XSuY6PS$e^2E<h=P4LzREECg8JLQr?FTp@?Sy`l&e|FZn7o
zMW6XE&CAcEU({}ZYPj{Tw-#L+2=)e?WWN1lH~Q~k#<cv6>c5Z5rkTG->vZZ;Z!ru7
zkaGSRk>s^cAJwo}Y~a|2PGh;3sE!)p<Nu6D2uWo!F0jh<X~0SFeTm0KDNRG+rNtLL
z6QbTItwtbyclLkt;Rx?0Mig-g*_%zW#DS7P3#cXVysEMCg5E*>ahLlDpWaHQ-yr3*
zZ(8j4R-4}sVtj!wvVM$w+f-@!hyQQ36v7+N>S=h#3d1M#gFPOrI{;@OT^N>5<v2L&
zyQ)M6134bG3QFjX0xC40_%P2oAHNjtHyHLNv}em{q6d|@hHuvAhXvpuL?qFYBE?tv
zm<ZLHI1Dyf`u-?A+@jrh3cb+ff{6JJsoiR@GQ0*<w*N@*JM*u<1Ks2=4G19ww<823
zQTW$WuOA#xgyn9)d)34hcDG)Y3fj{=oaX|wCJ~>sPQP?6sgfPJ3cxYEbpN)|sd?Nd
zri5Qh+4Y*~P)3Rs`}scBc!L%T%;npwvzzB&a=#yQ)q7zP3y=VkHuNQ6T>-Q(Nqa(S
zS860qP(45F^&|C7E@um)v-2}A-gqgdWW3E_?v{G6RL31Qv-`6O%C<f2OBJ2^y_C$7
zD(8IbD@$&D`<-0AaWzJjPj54%QVMD=_nd7i_gQ2GG-TKQt?Ikjp}1dDRmSkSRL;)3
zQctY6p-UH6;`8iAfUnyS%`h!v${!KVdjeXK%ni*~Er8_Kf35QWUvMP?dJhN!mi|Qq
z65VQZ@BO8j)|idTh6(|S+e0YWbAl`w3O&8-d`CSh8({E=|DQ(A9bP!(5GH<&vBPv7
zu)iV^Ps~`*B2FXTWy2E6?D@h537<29g!T*0Zgh`GMKhx<31?f&NN(3>I!r5D(}ql&
z^7gUWX1q!CYC7k1q~r=29cbp>b=0pP41yRakMT~3t9YLJv63;prtLXS0VrJR?6Ild
z<DRM!KoYGNyP*I+<KBXza=l)+&$gBLx)Gx0q~Ir)$z_{#Nkq~$+qy5!d!F6%UBw#I
z-a7{(lv5fcT3T8XpfG(4r;STfv0qt9JXb+0l@+a{L3p#)jOuyoA1W^@*}s?@|EMHS
zhjPQa*jzJRFmAz2I8H|{EQjGv3f&tq*^eM!D|?}{m||GvjgJK7gaFAI^-gQ1eZLwB
zAnc>`&hTgylg1AWTbC@lzdCYA(;=)-d<rgF)s(<{@WvjkR9%#M01kG8D&Ehrl;t?+
zs08H-lrUCJ{zsjIHOKwK(dsoP)DM9fF?EP>U;yO&{&gtv0E}EjHn@3f9_u?CBV3lf
zBK?neX!q11pQu*>U{@3-JSdb9{pe^sd{Bs*&ShD?ND9B5YwatrDg1I-?LN08qu}uH
zXZZyR-NqU##T;Lno(cyANq@~Mk5?H{;99Dc9T4GpZS~Za!BSvYZ~CKXDlhgVVUG<z
zW<gqePn`%}9g|53H4AGsaZ7Aat>{x6TyX!)NhwcekSu~a{5khO*1sVQ%y<uwF<!(H
zw5S6s!1=>;wJqT3+Myu=@SmOUEJbcfg?^&WKo9*v+{j($832+j*m+L^5)!2#SimAT
zc@aX*G`@BsR{THL+^+z3xGf&}oGJ;c$9m?fmqGCk_WDJ_{kAG?&s;(-YW-ZhAl&Af
zM{|@XUq}dleuSnc**Fji+dk*{KlONA?kq4DnW*UI?Jn2(c~)C=JfaCh_=8=Gr!o%!
zoF=Qn4JxN#wZZ}4&8K-txf2sy@*gLWgX|mUm-7JDxqLo8Jh_2gj07<ypyO)d_Fq&l
z8HW4H()%afV^u55zW+w;4X=0%Ej^@=q`WZ{O^L)0-6J((zqdxQ81WBod`fWd-r9C>
zEw^A6w$SPM{jNJwgLjakPgs3Q(CT?&oZg{uXj-xp$dc@t&FX(AQ{Uq%yQn;iA*mS~
zz_FvRh5w!`y0ZFVoiXjI(SrCzqllu*{U0AcXL);kP5eh}63x+x{CF2X+#W#em1!3!
zCits=7DLP?)C02NWzXLRXDXC07g@LdsKCbGjd3SA5^nTC1@vSKHStfJ3dAD5$FKUn
zx-GG8uq0&nROeyx8QzwWv(Z=ik`4>lac-no+@B<UfvpoyKRwB^tv~#EvA0GM3+1m}
zblsl~#j~(JFv>9Nac$R~tNag!AFUzGsFqMqmLVp5(LeK-#w`M~l&3gYds<9P?Sj0J
zEX&@E@Cpn25&lXQ)#d|A0&E(i^?7}psBEo`vv5n?Qv2iZ^9gV5%*wYFAE~hps`pBG
zm>jQ|Zqw3a0NGlTE?#&vZsHkLg%oDVNiuOU6m8h^4GoC$RxB83oN?ukxtneL`L%FU
z4xMD)cd8`f%NlDJAL&;whz))(Qf`N;*^;(o8N-y);{DzXwrI^XpDMYoy3^Px`%w)0
zpS0?9xQY2bsxSI9+maiIo$ib(gYqE;LBE`lyA?3hUmCxiy(?#XljO%6(MhZ901(=b
zIS-eay_lnj90>aZx$@-rW}n=`hlt!yKSl5GEp1&(H+|PpHn=9FM~5d5iJd1!;*#f@
zW+|Zxc<+kZT%%03*IX*({6)jNQm)M+u6gYnYe|4MCa8i0F(OEgHoK_J6i<cxeonXF
zn)L#cagSzuUOdW#{-aoyVMKF6mplv%;P#vy_V{|UJR3AgQ2}))6-d=~Hc-C8EqoM^
zk-DN{ejlz)`h#D>l_gW$*1^nBlasZlXt6Qt<~&JGc9A-RaZWvxr#M&pCqZxgopR_F
z%F$P6x@2IGTcqq_ifntP6?z9Krg3$NBZ)na42JDb=(4&GT#Pr;7pJjhPxUYOAX$|g
z_QLPZ9la_l#3?G9<xv!D-nHr9U%u8CZAw>o_0l|hSveX6#F1@a0|q@h)4Lsp+q$OD
zEA4tEnK>>Y0$Fo@RhU#nYA%btp5U2C0f}`9!r7)D9dsZoBDAi&sbm8JD20>B&7SAr
z`60*4mn0v%JscKX9%XjDnwE4N-VJS>=v@<mO-*1|_TJY6or!Zh=(tG}Au(Mt?Lts}
zvB@uO_u`My^+%#_=RS-PB71<$Hvao0$xwxlq{5wBS3WUKQolQ1m%n*ynEAL*9$rEh
zC-Yv-j@|NIN4hf!i$CZ?@~gAQ2z8SCa2Rd}(+CgtoI%Yb)l0`EAi11F_eFN=7Lkne
zsOSGYkX{!sKB4cbi?>nx1`>E9FJm4fLy6uC>VpQDKtDY$mB1&idA4*@57Dm>lGLv#
zR)i?22}!dwzvH4pkWTB1ypLVcG&gY62>HcZcE3*Jblyu&<l{Z5@(uk;JO(!utFQdf
zW@nYHB|B#nyQj2Q^ET44-RxDjA`ku+?_{_5q005)iv`g@Q5=KcBPap}lJY%J*4?9x
zau-sA5BG>{#yflatx0*)%W=lb4yKZ+JTK{Mj}Ol@fA~+14IPpj&_N8zW-+~Q4@?#&
zL#f;tu6a^q_@e#<5!%$W1{QNF^qkm-w^H%d2gz%~2r)Rvm+Hm|v9(uhlr29m?=yl2
z%F>?qmIBUD4tbYZ+};jGi1WK%^-RJmKZ7;|Jnd55X`6d?6a}{X(sBK*gjx9eohR@d
zep8+VmWyAPRV)Rhw4z$Rw*lVn|NPqw_;kue6z9QsLGtIENvVe(g%R#lfen$`b1Yi#
zVxoX=alN6Jw@_(KbIF3QtyI1*NwOvJW6veI1gEqi#?psVH0YGyYscUNCSV+^`xSh&
z<puSG3_dxap#D5V?iTETPD1M6J|wFFTh#0SPF%J_$N*I80AP!RHU%0MontmIJ3Hq4
z4LvymO2IYtFQT@9FhpEYi`rQPghMj;+FJaV-yeOs^bee-C>b?<RKoZQ#!oc_u5Lx0
zGR@gLp`Ic)t*4PY0F~=YYG`)l@+Y}}_~qzXX3SR&nBH}YzzH?wpc{TW38RMjA&dDV
zkK7YF)j)qjACD@Zj;@i7I#@AO7i3()EOp|;CZ@@2NI8BEAO)=TR?9p1R;~zRzgGsI
zbb#EvQh!`rnYmfhf?_zsaXI!)=*a0kk}p8!05tv`++{FwZ!b2a)8_M`;fx~vTFYy!
z-(<7=uM0!vJqJ*B#0>HfhN=%gjq(9~mOB-$R;^2~Ey>(k8_zP-kI+OoAbYN19<Ipu
zPQ!e$gU`!-o_N+4lVZ(hLUsId9!f;%^<0A8{LtvZDr#ojE)t@xeL}`g06UDoG#hgX
z9ge_GM-Ey>6s<Td-us#kKt&)^&GCHdpOg2WuLSKtwE-Z-2)yY`vI2TaX#y~B$6V_G
zPJj}`55NSr9zZA07C<cLZT^JLq4N%TB3i$FIMyKxA*L|Q2nNKM!%Z=*nN@S^Klf&C
z3;CsZZl4&^s|HA0+*8s2m2ERW_mjUHwP=Zj?(vqc`GC)(OT8P0?zPvqr;oHf(+|6R
z(f*eHbiA!(iik+ku`!i7c71}y8$;_(Xw1pyd9q)N3}u-8&YWlBH_x=OY`U2IX3yzN
zgpS?}h&dz~#D?s-r7!30AQY9>OYWU?8S~>GVQ%r9t|Om4YwGo=FnBMmr6$Eg&gR2C
zspISwYE)wBCE31u{dUqU*_a}6irU?iA!S#crv11x(%2BARk-b8O{bDd3#7f;kD>+W
zn;0Gg!R8oW_Gcgwjim`@lEe0n1%WL|P-<Gk?6G&4mF1_=lDekVDmp%zm}|Uv{pTA<
z&K1Ps8ef;V8Gg=|^gI%)qu3CoFKqPe{cA411*MBD@h^K_B#Ki%2b;1+S|&kIya1j9
z+<O9}2}!b=<aQSy-|45gUliE1?=dYE!acYrC>3{4H@NC9pX}YNHgNrVX7cJ>=bZ22
zQu@-!Hb(OIgD!V~)#gBvAl<-`kqL|axhHc;{v6s<N6A_pZFjoW_V3*Fc)8E=W{-a3
zJt7}Hj^u*fzyVXg1+pP&ju?vvvej_lSc)Hy!HMpf^R;D6M6a>br`~SDmZNV}mRjC-
zTb@7vQs!dml3}FtNxp5q*^OPxSfm(2m=ZRH)&M8`sW-d$>VeskW2LuWjiUQ&vv($S
zvh*43vY$EvXZtC3r!`$n6fpw53;Wz}dmw_xG$UmEB6n1{Fas;!-4vXb>-BrV0<mxW
zzE@FXPu~Htnr`^?=Wt(q-=`=ulZ4N+3SNyW&_0#tl$Qm@T&kSB)|qOQ(i`p^`oL0-
z@qr<(8e<a@yVQ0*hl2QE6LxJ{&A&u{nTqHJwstSFrtDr@-+V%&Y4>ob@Nu%KqeU|!
zOlDEbgCAqFV;O^K0QV`Uhe3Xk&K(9cZe3K3aEoAZbY40xmUYqmeD7o6sL8OT#_<O-
z4+RP6)VQ(Wl;5DlHuS>#*$(H&Y}o}s(;SPp6JNGwI_%*O9BbkS%StEOkfqtOvpWX{
zRysR^EW_@-e{n~C*vjBTu+9S>n#*5;pgCatZ6qVf!H<84s*SqUl-th%i;2`muS)wS
zR#QscM1nbP$@5FcY=jHfKTQohEZePU*Ntq0&Y>x1<SsoPat_s>AqkQ#0P0B*zjZ=4
z1fsevbRCdk)^<hrJm%tV(4nczmXVB?;*4Pkg$n3v!_`UL*nWame}S|5PnpM}9dfL>
zT1u+f@w~?<|Lbml?sB%IB#}^#lT{s;0n!>6|Lt==&%;$&d{r~V?poQWyKTu~5aCzJ
z?qix3+PkXXyVMEI&o_bFA4kf%tIs&obwQ@@RANz4*`gs+^sBW~Pr#*irXT+71>zG5
zf*G0TqN`mmTq*-4<mRo#p?79P!*beJhn7IBK=ip~3}C760B5)#6;T9BGeAt-uUfpC
z(tJxb{#ln*ve!3*-WxFnuG}sY1Z7-6Ocy82;Lnhl{!H<-di#YxuTl?(ugmjX`f3W0
zYSJv2e=>veKn;V<gyIbZ3hR8M4|#@S{Emd`IT>jB;1Q*eaXQTGnV{O@D%hkVnJuT9
z@`i|Nu+8-#7!wJktN7SK?M{5@`z+0-6Zj`qeyvMHz|YY`Ui{)$mF)#KvESVcbzQ}b
zQbb$7%ChF;gr;oN@0N)aWYdpsfK*7YFAjs4=UY!HYdD`LUe)=n%ik1px~g^V<1LmC
ziVf>JPDg?|bU)b_V*3}NF&H;66KTxJ%USC>{;2tnk;W>H#Y(D<*X4)LoAbY?5OZy3
zpI1;nAvyd)<(d5Nf1KR+_nX|X24uo&lcoYgqr=9d#uf$g18|1FG_qe?0ic1=4v5J$
zOG366@Vv`&8mm5G?0CO_0`dU%rm<K|QWx`7mpIJ8{^I2CSBqw0IhVOJ(k7cW^<o4~
zY9QSpkq!oe%;2gDDGK|+|9t&=<Ws-$A$4gEoe0UZS|hBN1&_A1chVJ3<B*%vR6yW2
zS3N<^9*2SvKx}W=T0`?`oEz}=`FTB9w<ndyVdPzCBjN|@SQtu!**w(;123onMqBT&
z=!!0N00&{#r|~Avp}0beysi=k8sErHuY9^3ANpL)Kv^fO_s6%-IND>#IzI(fHUzL{
ze*cqFtbqkQ+6ioHdjkaUkQSI-1N<CPc%E<3Hao!o>%Mw<f`t1o3yyZlI$puE#tJvM
z3tO?-w{mkvRioM@WoLrY3LN!+lXS@raADFC+KF_d57K<aD{=3-=hSaIwt^+O7`wp~
zli;`h9<~aVBQ%gAiW5MkT3Qc?Pe|&L_Sq_382&EHA#ZlI|E<?pk`%+MlQ#@5RoA{_
z^bo4c0EKUapoN8`9)V)oe#HGop4}nN*{nG=IC|82OI$XV?_L_E6&RLVyWpytDO^E{
zd%pnxhr*jevgs*Qv<d}u(tQWzJ@-QsnhW>MC%#KXzT#UZTz0>te{EQf_R;EDlv<>`
z=Ir#7h~w<NQr}P==puwTPP#-0Cq@FIn2<{cN8`{tbunfFjk=nRjx&z0q*mxMM;C5%
zoVmffd5(NQ3<TI85hNf;{y`HkV=qye%b>0<T4a|~M_0AlXB3LsP?qtlj^5I#&g&T~
z2BjOG^u>Ns?yl=ETsq0x$dL|;60pP^ZvxL)!1u~wY@ZB6K*Vp%&AZ&hw1c8XLICa{
zhRO`P-tl;2>s!+l7f#qSQy|Y1@&;>q^SE{=8j<~D4L=KJdQTbxb$80`@7NHuTQr+N
zo=stmjRjVl?s;L0Z_96sws*NTADm^uqJWgAV~wQ)^0_VG`3pn7g3s!Uk=(GzzN?Hq
zRPJV#g{39H5H<lm$30r~tq|~jl}a;H?G}B>K*O3X7xb>;)?U-xrp?PY$5s&RpFuP|
z+>8*ALgB{dw)oj}L#!6IebrqY6zUo(x*XjT=zA%rS055`f$L!GLsTx3FOIRr9EhZl
z@b1-a?jPsVN(e;U_7^pNLLw8jfIIo}6d*Y4f`KJp+$aS00UzsGF0@m-8rgsF`u^6A
z^s+2P|E0-XM)@4;0OUnS&;uBG!}zJb5UjEn!A|h|l+(O=Idfb0Ec2`D^>@7;yqV8v
zSV_DoymA8htcAf_SuTW4^uPECTFT8@(i^9p*{FRvFaMSy?T|j@?MLBEO;PF=D;z`I
z!S5&I*3v=ska)Q9!`u^>(rO<Lr=^jC%>KhI?2C`rdqg-_JITaF{MYB@)Df!AbXh~H
z-_e*rjiMFr-D||U!N~|$_Lr=F-9w|djxH96TotzYVcr_CR~;vp)lof7Nw4y&@jFn&
zb<}6e#Dv&OW@@sgrKnX^X^W>_7xl%WNwUTRigZ078V&T>wdLid?Nzm=%+>xLo|I?T
z70!m{+!c1SHD-<n>+iaz&i`IpI8WWM2JC8tmYqdE=P5QwemE8vvp#S7vjY5{k@pjI
z1$yWW3^Fl0UzdPhhOG+xa;JY#*-;lEJsbXhaW@!KD>G=BASOpL#7-n2xRVYS4|;{z
zl6{ne9{N>3`y4yr{xZ5o$a$~5<@mW?o}MYt$<Ce@gy<&!JgTg=?8ke*(`;>>QV_!}
zUXaVNp4LgF*${oQ8K(UQwDA4=>I5;6<P{Micb#60d$J_cR6tHb@tNW4S9ltc%qNh~
zCy+f|vEujWXBdx(Sclv+xK3%iH^Hgbb+SQ?^R=MjkJt91H1t^w=Zy|JE;rYwY>yvm
zqF0nV8;mbo-2(XCzt{SZ&s|sm^od1TghXd;Ze!fF%yrM!2y&xwa<bb!R5GJGOj0}s
z?zRq0?d!kIrRmE8faM#25Le%E-J{wx0B7%@b9W40_lfz2q}p-hR|CGQm2V1)HGauZ
zxZ^a;`K^Fae&o*GX}2#F3*-3*x#RI)!@P@f)IIJRof9`?EpLxWqv;l9-vu@b(z!&S
z8rOaXW{0=X-d}qw5MM8~%Oe#1IV#6T7gw=lCFBSWF$MnpyX-HGj_*(QBvd21*Yeq_
zs%65HqM1evPlM@Y@3yNcxy?+OZl1e-P9Jq+;uX_Ogmc3u0Nun7N}<f2<X;-71M*i+
zJ>~lPTJl;wc#nG{V3Dy8!JUOOj-><$MRn=&uT*U<ovV+qYVphzzl-ILDWaX1q~Ec9
zDP#WuDOcB7v)lGUuu)XE9@uh%KwW|qIM80936R5O%^bo&lwVMQbmu@x-5FaQ-fnAz
z&FE#N>w>t>!Vdm*%OT)Vi5>0PKuLa*rKgp>8op*gh)QIDe5Ha}pbkXW?Sj4N+&4L4
z@uvs(zCE}wrX{Sg`MJV>`%eon33=|Zgls`HUcDahhy80Ax@!#il>J8UKS&UuEOj2F
z{r3ax|M-&;hoNv5Q*Q<&p@^c*fW4d|w1Jm|`M;71hgp%VE-wRvjV~GKaqv<UVv>A~
z`iajW20*%MpxmvGQ+vwz9gsOG=Q3zzL=moz;(p_Z9LNx|3Du3^pm>tJygpF$kM%;=
z^XGp@@x<R#GZhqb7w@{mPjkiHbwuclL5}(>&;!VK<leK%1;kfqy)?1VnE0%%v7g&t
z0OaMp$Md*h6Z@_EH+BE9a~Lo&XW$@RFL}6jpET`N0plLbQQhmB+*c>(B6R}UGe@8C
zz%%0wI_L@qTGOB9DW_t5z>7+A#MgwJ*a$3&mL$hqMNuyE5g!N-3huAUwPYH;qc)Hz
z6<{}MPia!mZxI6FtpbGR%VPBe@h(5?%+Dz&KAG*f61ST<Bx1YhQCVy$)jq6C*AXyC
zJAYkoPyYtl7NCDzcnbqgf&&xJ^cAK;<tGw^>K4Y!w!*Ch9SvJkq=wIU5T$|yUb4ph
zvK+mxvG~y8Y%JLvv8u22RnMwq2^QOfId|0f<z3R@f|8;X##8^nn8<={NOsLNT}#fQ
z4HndUgaTp&$)M+qpYC53#$%$@*@W#e)BBRO9d%QWTwCII?yJ<9NE%?YX((fZM&Gt+
z^h=srn>w134nN;5WS(?SGnrSrqB2_daXsezbx|K5QakZ1sVs<~OAP!{G+VM(1v6YT
z=wP^|LKcA5?=xm1K2M(Y57+iTrrQ%L=>{STfK&M;z}y(<$JJ=+^U2C1WIyUqX&d!>
z2TV=YIPSqWQ?0WI3Df~c=qYzG<R>7r0?z2O;GM6?Q$FvT?1Sv{7JrBxO(bbG`1z0c
zsJR4->Ja<J=_g796S;hN{RxSS<LzODH$N_7Y`fT&;UTSG0-zhSO|iaJESZXsYg%XN
z{Awez_7wIoi2VeCA>LzYG8=UP#h_0I7ihtQ`{k2=Y8cJd_|G^lzRnm>2&^a=ee#V-
z`~<Sa--BF|S<fB9@SCltC;SPo+kW9N0<---iM_#dJ8Axb$!Dja-=!RS?HiW61q3FF
zMd#^Tu%{(|IumKbn)7|JR|q39^iZOrpvNtXb&030O|71?U^FI#VM9Jj=lLmiWtz%{
zm|X?X;b)8kLJ1>Y9|qUAw8jf68^=`4hoWz$wWkRbbyv3i*{cu2#}2h&VgZ{P44sb{
z&-6flddW6XE?~FjQFvTxsr#mBjMlT2n6PY{6v{ZE7^j%Nhb3@h*P^@VAO4vW@``UX
zp3wktEO&qXxMUEoh~t-fvv(n>E<yQQoWMx&7xo1q%KB*nc(WccjUirfLT)+#Mx@XE
zVBFOKYG^(Y5D?Z*{Zm5^K70fl|9qG9FPh5#Vj)O;f)RiNALmj5;m2t-0Ax)Y#<@Ws
ztpCeYaP<FmQ^BwQyAI?vey#0+2{p)Q6tglV;Jt$}vf5moPsq8WCq>O=xruvxao9jE
zKI}MGH!Pk$7HR+)1vgpt0*D*MoOE57bQaVIEAADl&P`Mwd-Q^soXpidlrb9Y_F^<T
z*p5L!MiQrzq*Qg6*t$C#kbSrhFCgC{u@fu4{H5_!79b_y0{X{w`&Cx^=(e9oY^mO0
zsJPm%Ydd-<k$Ksu$M}GwlJ&>;<NPg58cI;lV_j4i8{TIbuX+QJjivV|zpAQM@?VeG
zd$i|QoU-C{#puRm@fS=2OU;q0!hvw*jk3XXQR){^eRxs_2td<g5h_h{=nJXJZ#!zq
zdSQjP2_`trjH+CVFsxg=1ZgDn+Sw>yViKM`9h-#(10dvy!s4GqgutioKTP9tF6|%f
zkVBKUIA8Ipy}ecu&2<AqAI<&&BT<nVo|EV3;k)$d+y2G^!b`deHbhK4eMK={ueJdo
zpj5>!NtooolN2HSV%g~jwQW-eXxd9sP%pai=qt5jN@0|zH+X8+aDN6sNrkGUB`>lj
zoMm>8B46QPr&M3P^J~etJSubcjf7+0qwUGp(BRr%w>(T1je_A>ILpKiFS3{U`b0dH
z8xOd=2Cz8zG&x`r8_k1f8;+9XtuFb>&K~C(Ni(VP_3<b+-r!t>TTQ_O@Vdl&P(yq=
zcI+>W0Lf-R=-OmszMs&<o)(dvHWk})*q87^z<7+d@!qW_=9HI3!Zq%#yGK)VTkq%A
z@S-aPg$boT`Wz$-a93jh&t{-*uIeQWg$-SwF^?M}Iz0(p_ED5xtru{AxGAa<b{0@=
z2I~6*u886NUqST~J-|*4$*^BN)IY(yWi_1nDOth6C=V@zD!-k6SbL|$P>`(?6BoU>
zW`Z6lu-r)Rok-Grq7_XsE?Qdh)*`mS+ZtaqMVWtV%ntISy=bdDNVD|$8ul>xP^K?^
zLEaJXwL6CmNJ6r!FOAD@c*cp(T@L4#-o<j6NKNJGB<ycQdd`<aq@yQT9~O^|&N|A3
zF3#1juIN5HKtA>bnNz>4@d0DdratUiuQH==(==>n@RzXdT%+B3ny_!AlyOIA4i7Wa
z17W$R2B+(^;&cNfC=N299ETKybFM>$;CH6G<beR!I%kKd_rmgKX5&|=vXuG3B~z1!
zo)4}m2)hcyX!xqhMli{K5U)7t2KlBP>>QNCk#tX{Wlm4J+`cOBS<{neLCi&tW-UZh
zjauX0N36T|ignKUR+F@<DQ5`bsUdNAaJTT<Ip{mHU=80jrlqaG`4O%kUW?hV*L9lD
zq+C(?dV(pgAK+T6<N)+|$h=>dAK7kWoiMZ~Nq`cgNtJk;9v8WU-YyWnas1iZ(#2ku
z_Iz!B#h1z260S+(x(7&2ThD7{0r#QEhdFK8X{q-;s@9O++k8ZVaZva-1@Oby9L&;(
zS#tUFe2dyn>w)r{$mlM9f@V6fNBD9=MVGkPZRM>@B;@sOM@C4*>|Xc1GV1!`r9u%i
zZ4-Ax#`()ok2x{TTidF)zCtrK-xbtQ{?c6jRk;pH!gwE~6Q`kL8%!OSuxqS4xlPln
z?@@k_qpI>Fn<ZB6Ru~2Z@aQ{vUW+%5T9EP*o51tqAgLYRgoKTNxDNC3(|RCS4(;Vz
z+PB#UTEOO95}XTpSamF?6WkxB!M^K-#c;x)SWk2rEZ?kcN~x4^vGK}{2)d==FJr+D
zI%kG?UIvXesTfOiM4AH8+yBJSLvU4j=omdiToeoLvb|56^Y?_ZP6FWM&MMQC!B$=t
znNjlb9F5Yo2auYIlyBCYX68-%H6FsHd<zFiM*R!0z5#%jqE0e~V#u~xSou>qHZ_o(
z6j?7^Gv40l$S{1F@@<Usi)#>6lfni}K71_~wXlYBiGE#D^CTnl%T}1ihSmh*nC}O7
zGMUXTw0TJ|6&cv7)L!q5$qCcB&}l<-p4OK@_AZbXa0iL{ysh}ELTO-oS$#iW&1jNE
zlh!(rKDfi@p6B71oN4Rj0q4^=Jpkupf}hVdC3wDwc(*1RaQCI->e7gxa<0^Z@q%2X
z-~D0jp1oUT3Ovi!ZQpiJd^p=1|9L=dr&$1tG7BhcJ))oi7tK3e(lNg6Y_}C2NR!P=
z4g@bizR|z6?hX{l(B}P&m<F`Kw@~wIG*sS+Z!j`WjAR`ZH)l3AXuD4}z=ZpJbK7^6
zj<~~279<7qyYQ3jE4NlJPVF}&e}J1-TV$+WBOa=&E$5_Nt7DKB39e4z9ImOvvF|qG
zvvA?59zZ_UD-F=zf?sj{<wYNeEvz}^o7)*?4!@OOoXfxLMyApv%y-cN)0@sETP4l<
zu+v;UP%-?<X*oibas|I@pm@1Y2pp&2y1|NlbOW%d?MU1GWNhub3rYoZA5Fx9SoDku
zTDFPH*NYq@lDRTi>rHleR5xElAGUqC`gEAF6mANXQTHi5X|gli^y{}NjrJ&(Yw1Q=
z*E`!<yHuP_rQhXe_>0ztNoK`Vedb7Q5g3T_CY!@$TS!az%L6F3T@mAQAI&kvEX^<a
zbG{PG*W&Wy?8Q`tweD1zyIktMx4yYwEqpqh=QMhLtQ9ceqezl{NNr;Vq+DD;(iP9A
zN@dd3yM==325FA+k%GM@#KT+Plu~*E&mF(EWqtSHJk}S71`bmxUAiP9*b5YWGMoEs
zbMk2Zw{xUZdxPW1-W0@2MDwHBs}gBvGk+)1`SWoWKoD61s*y2$4}Q56M(0A(z%5Y4
zw_YrH|KXc-sMtE}u&U^sAf|l2kj6K7Jxs5z<~x0-H7N#o3kLEXKw%9y(=z%Xl<~%C
z>XBww?H*>FzB~MUG7@8~yA<Pa(~es>ellJ><8HvLfy?ACLzmA>j@0w{&|v@8kbsHD
zA4nbqI^0?xIxUd;86y}v-0`sOd%k9M(az-JkcX}mtF@WgtK!AD$va(}tV4kc76=Qo
zK7{gVF;y9EO+w>;^tx!(;S00YSAqpLW}xQrCnvfQ?-&$a<7UNWZ|UmQ3xdAW8ZiXG
zE?W}9hmecLH%U9?umBr8ItI>Gva;}Vq{?x~HxgM6y=)j1s5s(X96Ky|Kd`h#^59A;
zYGJ6CdhrWYn}VEe?3DOrS^G7#UGR~=lHz8Pe9M&J4ZZW*e1`PVw=&OqTjt1F*L|<s
z>m>VqoL$K^qs9d+8~}cl0u;w0%54&OBY@XXIStNG0gToA(4{f=_x#<smMn-vW2q(G
zh%0XX;TiUnVxkB>VBqzUJch@VpyY)=5|jR#b6^-HHy*e7&eL8xvLj-6$^_oaO~V<j
z`i6|@#;}o+O-N+If&FYld!-`SZl=Lo!C&U-M*NmUo$MZc%9&x_mIo#4_z1MxX%5`7
znj$~jVTP9+L~zdGW`qZ>R1d@#pU)1u_&Vr*s1m8h;pdsexMugP=yNgQ?u<H-jXYd)
z*}Lt&ry)GHwcmf~f|IC%r*Av<y!vy&V(RG$LhcZ5(^5w^xw+lmRSpMmlhf+3Gs^2^
zrFe`{fhR?RkY~AfX(L?_(YrV|NEXCRQM^hPoDCJe7>j(Y{#HVT<UJRM&70&y1>{&b
z)Mzw*tkOOsApy8Nc>~gO3dMm2d<%RI{lIsqpVAI{ea{9qJ(xP#>zeR*6fGt?^HwVO
zhldwQ7(3DL$UaYp1I2`bS$#CA((SFYpBw@fJbho^&OLW`K&<;vp5|6vjX<z=_0^Zw
zw`?be-Z_N+c6s+b+%L?l1@2B7Aei++>j78@DFVB4cnNNfb2jU{<IKGA{C+3(O9K4M
z2)T(<Y9e|<DE=DaHsmre@d2jQ>D|gaBzc@=YL~b@4Ar|TF{jYr2ffiWB~jO0=lEsa
z%v&|WU*zU>$;ka+{;<1Pb$u0>?!Y2ymyEw`I31$9(|N<>E9HXXUW4t_h(=Y|B(3ha
z(1(X#CHrz{l;IFZasUk44}_KkVRb<)-u!vv<ITw-k*^=rHD_WsVk9pf+q>Cb;Fo+X
zb~ZvITZUy&)7Eb-VCy_&_JprXpYni^ozRbFqnsuAR&RCj%>G_tNrhO?HL9CtnN4pX
zFUrI)i%RBrd$7JWxs?*4qwosDsxJfk(Tft<MKF`bhWMad>PSv+h}Ma1H7g(SPll7d
z+Hq5}#C)Q=nANJN@oQ9vO0bUdVl38!h$1xiVj5V%>^8XE1ngAVq{b<So#Qq?3s<|j
zFA!)rI`ECF72?4Vqcqu9E0&H|2Ap(pP{GDIKuj~|!k9BSvd{0-fgkTm7TXehp!wxC
zuSW!v*II)gtqBmD*y>vTKKp3S?=<``P4vAr{U4bWbpmqFkT5-n;ijk-!&cXN78##X
z?5n)LY$v~8SaWeQ%GztLGm*NN%C)Gj@T<iuYy6&|+$Sx#F<>*D+mBk59K<k$fSzYi
zE{TaGSo#Ukl{&7hZ11TX*q4oC7YTy^IU<dV1ua8sr{zVK`sXnS&mpwUaf%YZ%+hTM
z)cOd|Iorc)N`4-J*09@2UG9DqrB+}nf0<A}uu4zZ%xjJYS`IhCkXq#K53a3wq`ubN
z+52Aa!=v9Unn3~bKO&4nIiHfJDHroyW(N%Im7aSywH=&3vgr8Ge2j7<N%Rf00r}^9
z;30>SE32LK9{y;*J?a?Gz5upg{G$OxkY+<?kdf|3JNt7NjeO$FOc$3~`98h?69a*m
zDPI&*PNf7Mcll4KQ}+@Vp#gBOylsp}6gv)?fS`lf+#(HxYRo0M#T&Jd&5r8_xNm)D
zD$UCLobi&?R7&K7jG%tddlIyNgdDlur5Ytc2w~QvY4R=4k6_4mMn3GPUPA@sxX-gZ
zT4|@#9tbwzv?(tr$fCZe^q;I9fd%v<8=!-r#5KMSTI_+)63om1|Gr7F`nU8nwqu4%
z=Yl~~#$b=cUC`x@spCwKnD;s!M<BrM7}RgcC6kU2p+IJ^b9%=314loVZI1p7;dY}G
zb*>RE5&0&OH#DkG$K<&HCd^tNLSj<?66NtsXo1pp?;M06WwT%&L^mKhBe8IIghl(z
zO~%wwlL_7zWNqN1EP{qW=_gIiB`)5?E1#R)6jKKVI$jut3*C_Qdb(mT3^1v!uZ@S;
z1_iybc98b#sJU&^hQNFvI`de$m`?qFpGOP#B^x0};HUZEU5{Dh(Re7-1M<I$<k>(T
zfaHJ)06l5`{fDw~+DH+U=|`r381+Gp{As^jrs2N4Z)&sI*>?H({@B*lP*_XwMU25O
zX4Ep68aD!xw4fl2=U<xmF4pNqLjvQ#&W<_vy@c=uCBJD4&s-P3{NRdYKLvL`a^0n+
zVcs3EK!*lS(UMqkS_n6MzXb_@Vhezs%mGXL*cemAJ^ktTgBiz%<(ELO)++)egVx=@
z8Uywnxi)zAJzg9-(R%s1Ow7xb6<C0!R7Vc~;{4M_dr5&aQem+^;=||qWudP+oo<3Y
z{$A<C^q@G`ZqsuZ;!qsJJ>eXQ&9^Nsw<gkUBN`7Ge*ajap7q5vQv5ur(qg)?gM6Gh
z@$%2rBL#CUk``t-wuP1=9)5b(OfUMq{S<tY)Pqf*pBO|i;pd)(#uFeZdR7%%IbT@p
z-{lDVS^SJ|&){Xvp;{cQZaiFO7I&YU4g-Hg@Amm(lacJ^>-joA;>Q&oPM?x(+6Fm3
z+hulngZ0B+v^QzLXQU+u!E!*n6n4B=F1Fh0b1Xr(|Ag<g;&^2?zmVtKOv?<Iv=mc<
zprDAD8vAmd#nYmnS47eA4US9-4f(Qsi^6ur_u{qhg(c3?bl=e)Km@`-fHw^C`C1*r
zTK56B<LXZrHIiSIWxzWZAHFru_<eIFn~SKxxQ%S7CU>N)UgT4jKd*vU%e;UItm$+J
z0s9Y5!%Rpi)PAOJdA`71dUF3_;>(B!7^Qoz3TgtCu4?Nox=9b&KL9E^v<1l?Kh`g%
z>Ij>f?mclHm<g17na)?sXVPt`dv}kedp!&-3Gg%i(%|?39NjY&)Vs=xsjnrBujH*+
zhJPqtS$bdm!JOFsBS<*@Wu=*TVvghm7XLffHxYU(Fw_vh4}{{~`olQ9^CtTJJSQ6~
z%_24nWkXiPk>1df@LMk^L!)WpVJQdV?!l4J#RFr~iy8_IK_7)LU+!w_UFn)|w^yp!
zvklSGB$zzAa(SS|BiPP^=D3bV-<Fh10O8QP3mXCS+ih!u?a1fGcW3=VQoZ^fFK#ty
z>ED}r={qdZBw^BTQF*urzD)D!`Xe%A=-@d@pg}o(jirl+qA*icm5}zRu_T{tz$wLP
z>V580Ggwa09%~r<g7M~yk2MAbW4{>tSFQ{VqF5V5>4;GzhH4nZqMmRz`*DSn^TXP$
zX4NXlEg_9K`~4axwMuhwHlBy5%liV*@-8KqWB*C+UKU<>Kq%}*L?{sLx+>+kGhe+K
z>1cB`w#2|Y`<9V=K(-cV3tr9p>82a>^aDbGIvIvMBSm`Zw;>Ep6mlNl&;PJZ<TN<B
z{bkq%XDKlYc%n)earU~WNqm0uu2}xWI9kt{kd8&~>9BVnB)O>S>wKZyD_ZtxyL!~5
znWM$w?Dz@36Ydz5|I$dX*>At-8g1<d0(<YOOy_A6z}Br9GwDFxoNl=KK)#Z+w;5?C
zm7~R!`HBC*WiHKl@xmGS4Se(S1Ee4N8CAY+N?B%7eUIHPX`8AWA@-G7I<;4-o?$+v
zKeZ@PrjaIS3O2L>C^Ud^FO`#Iv!P6aAQ(uG!_*_&hpSvW+GiRS(>&*=E9F?cU)*_D
zmzkOw++&U2U31%rVj)(MmfQ%!kl6k!oq+fJH(#=&o4<dNo%>-)WXNdMr&quDR6jO_
z(J;|T8gvl=nHymU=bo4T6n-5zmeNp6W+6@=yS^w|6iJu<9`<ZYiVTs?h1YNjyvV4G
z4(BNN1DtUJ2y}L*rCkaXBLXs}5yUtTOSbqj5Ce^SEM>c+;0Djgd3O7*{K&cC?}|Nk
z-{ToSW9q^O4?y#9{u#Z1b<1aMW$BHnZg*!wExCNWA2Ynj+xC4PX6WzErnoW0o|~Pd
zag+7hagXiE+}+cf4&KLPZASo63Sp1ZY}l}z3VA(RJv&&h5HGO%n)B7cYH6ZP7Kc)Y
z>)dk~<Di(%Y1;1ZgV-miK1*)c^!0cu5!i{&b&1va{&a6_&lqj8k*jA3Po)fMZ@<1T
z7J)zm7~HPk`bzjZDj@{A$Zrs31~tENXU>H-@-gklwppq31o?$=4rwb%w)B_U4?2U5
zUG>s|kZKKR9JIlBcS$+LAK3u+!?k0Ccf>AqC}KWgTBeFVnXLU9<vpWrb(q3MvyyZQ
zL61#52iHP-^TCUB>#Mm~B!iDN$@mU1K1iT)F&B5udc`ZtjR=j6w`s9{U3x$4TvK@e
z*kbnq#!|5wTi=v4wGw7{Mowb$ja&+C*2FDqddq+5et~9%PhU)?n)l%XPLtbE)U3{s
zH9E{@5(Qwh)J6g`-6@lu*Qe!}&1L<IfCQuc10b{0oioSlQccjnV28}#fIgK<dwO)$
z=-(-?P-ZwFZ_WC4`VI(<pYB~d@xMiG!dw;}5|bfGl3n3mg!p3o&>%u?vIBDVsc7n2
z!jZl5TQ{%!Tw|{Viw)>RG4h%MD%uixbi3$9GphL<4t1K;_Jf)+4m}6A%P~Q&XI}~V
z(wb&+1Y%RGd&4V19gbo8X&5DsQ+|GuJ_KDL<5uF+@#KLnK}>pITS0Z_`_7Wa?D3;H
zTbl~$l3RxjLxJb60km(x?B+S#3NMqU!A`mumm9sSr(3>EdaVv-?+<CIz8Hy>(rQYV
z5Dv9b+PwFm&;RZjOPc9x6NV)D(;~7yC2$twd1?tqun-_i8W?)rM|MSvR7BDHo0Ykz
zBQJ;Z)0vpJmyZK>J9#pwePB-PYt~>RxI<Ny%H70GdX6WYPzUN1lvx9m)I}d}$vi>?
z)Z7mHSE1Khxf`3te$t9Ljz+g6xj#sITLT*rVAX>Vl>qbJ1EB6ZyY(Ttj?6IUeO#nw
zG>RYlMtDm%B<976e2iZ|J0C9EUdOcWy>?x!h|G^1siJ89rTJ)${ORWe*#%xLBTY6z
z&O-nIe~Qk<9$$$Y>Q7%tz@AVzNwN6$0e!id4q1=JDP@XIsm8bX4-f5E&;H6DPVGJ1
zHu~mLZYOdAut~8PQ2@U&?hgRWi?};<6`PYfg36tJ4}aQn=C{u)Vhx&dZj7vbAHC~w
z?ap<$3R^3{k~Jq`01j6VCZ$XAA8v#%r}v`F9-U(8r&NV}<i~(U`P`VJn-xVtk!N_~
z9=8Wql%IQx`fABW0&I1);u;QE9|lRMy(n5V^gH9ukj4^nu2p8zSnAXqF1ij~XzoYx
zbup9v^kew%XBm@{2|hs;{hkW-KcODFN0Fi0Tt<q4r89n6zUl$-XI~=$id}jl=D~0L
zL7IBOjlMN5jpO@x_2lwJbK!}INQp%aoV)z3Oh?_@9?5bd`YTJau923zqZ*bRKoA7u
zz!=ZXuK-iZA->s<SPH{&{McQ5n5jZ17nLVq=BMB&9jj>w?*nu}{on==Q0Uf2^d+}3
z4PyBfu(I2*@A^V5(~lGt(URG{jUAlwEV+lXV~kSma+@zDCHH~sC0)prKEsaw8Irl~
zmQWfBd)%<cx#8BuEZ(H*$Z$W0-tUe;aM^c}B+93(o9328=Q)HZMT10(6GkR-J?y=T
zw4P}g-cu6WmyKPqXwIr_>B{j`x*yNr75JAXOr@LeDGEZJ0*L^jrfz*e*sdYIXb;;-
zI7n1)*KBw*NSssMR^c;|DtH%WGFG>rDGgavQ_cGY9BO1wD==QjL=-zZlwE9*V)T*p
z`~&j7{_BXcfs%zqA1xor`jEm^oo^gc>8%NC?gr<HeuO=^6W%ygLz$L3`t?cJ({g7^
z>;V^mKU!#4dxY1<FY<7k23M5yzIc(~)onA!`xDc+7S~~kbB<hpv?>DdAMB3O6NNPP
zb{rf>v>O|rk7P!q_^djaQY_}JmcLw1n}J^k4C~I5cJPaRLl_PT0w%6YYGv%b^OAl+
zfE$rzHa5g)_MOh*q5;i$`=%P<#~+`pkctVCxQTx7A}`Vl6hpm`-pl#Z!RbkJqK0Qf
z()V?#!@<gcNA>a71}|wHFs-T5$;UdN8o~ekuRzbiv;hrl62EMkCMnLm<3~+RhqrS+
z(zND;8A_;wRv$2S2BJ6=;h|(Ob=F~Gs8g$_tt^GYyHOzYb?h#+zu7T)iDgVJv95vT
z*9b@ag`9PGKbNMgtFh!;fQBXW4CsjI48ZuC1MZ;ra0@F;tj9!Z@7Bb%=2)&+e8kCk
zxCTj7<S>306jVpBQSK1DNehG@j(BKaJ3w@Yl~K8TQ#I`e#+y(9zGdFm9G7~CmG1|)
z1f3F#KZhj*#-43=KC=Uy>C%^O0J9%nC90TjAs@a<Ot7o2xph{YS-#-gA17nM&<0O0
zc@gWD7cu*Ct!Em%X7udImna^D?*Q3@36cnO-hk)LLB4!bB*e@%{}n_z$51Fe{!E9H
z&JFI&x2!UsZtx0-jRSeO`Q59MBnS==vdA{H;`2#d5qlb^n!kLV?VTOtE~@ZQ@g{Z(
zw`o?FX_&;}svBq;(mQW`PQ(`w?^_kp4vujEe6FR@>leSt<n$h!SJ_Z+oLoypB27br
zg4P+!c8U-XAwrtK9we?Rofg4m9+%m{ZxL7>8k-YpJPSYjI!4N-*iJb`y~c*V?o$nU
z6gBHfU#N{-lqAZNY)X;6bwJXU-+J|}oxMV>Nxw!^{hCkns!OU&KJkOpN|4M8A#AvX
zCeOzC)=gWw-{+Hh{tx!vJF3ZU-4_Mv9qBztkuFLVM1mk)L_m6ph)4$!X%Z6z1nJTR
z6r_Wcfb<%AmENRC7Z3;}2q8d-=l#~bYu&ZKZ|!sT8RMKW&e?bWkr9J=dCAP2&z#Tm
z{0bTw{sKad^S_TSir`mhvP-#Sy>NFz_l8a$$Ac#+AHS_P-w<gm-+BFaId;Ix`cG6Z
zTs8LkZ=#G{K%vnDs8yYNL0rZZSkZl`SWgwDb%xj39Mpu}&L!#k%BFXmvUI6mNM#4Y
zM$`_IFV`gMkc7;gXSLa1d{H4O#cN%Bz}x2HTxZq}Y^$RH;W~Xat6h`=vvB7rG0Z_w
zT&CEjYJ`Q2-^BVYZUOsoqSouJsuB)~(buSZXA5PRY*Eg5GSEnxW;W|!fxOkNrRn@h
zoJxxrW+p<Ku)icaE!idk`vxInH%!zh3JB_)&yG%2iIH)&mt46fC_<)7Asm&|E+TRp
zvNwb#0K^HL$QsbWjI`qIjr<I?oc8^$Z$7!ddgv4)rLJA4V<na(PS$jVdbJIE-~Y0m
z>^T_@v}PKPT#LU9wHT8=ayy`RQ3i+A)VJ0rl4T4TFr6l<P4qD9GFQfw%s;!X$QKd@
z6q(478>et*<k=B`7RgL(1EUMm5jgH{2s>cFF&a@dbhQ?JvSuFXFpIhp%dwNi*^sql
z{a*hTUE#uD;**=Fx>6%I&;3_Hz%ni*%z^o!M#VqbQ=1g3JXUL~T2q(q);3+Y5YW4l
zo-7oua0+p`Wk^AubCCq)MmV3<_q~Neau&bMyDh2C?0cl1&8@{@2mLySI8xM7Imw?5
zrPwb{G;mst{K4tPj^T9IAoNSx#V#+4LTlYJ-QLD+q>9;cavJ84XQGD)hP&$`7tv=R
zLUde9+Nxm(<Sm#A?ruxc>FqKfmxtc%vQj9zh%6nf9%WHz3ZG>&v0;3y9pac^B$MA)
zRrlkIf2|=Ku4v@|m5Jf*J%P|cA#3IA-5F*HPA)#5To?kWQaSRLo6Mc_i1?`JpWPV|
zdp%8<%<zqP4IoFBgORLwu)9a?FvJcxGd?Hizlu8XGv5}x<jbGV#*<2iCuvh>twk@*
zEcC(a{d?k>b~`p2a6Sdj^Y;^2_9zng3P=pSC5_f`@2@X^KqZX_n`oi6dl2*7>k;8F
zNxTty9mm}cxYgNN$I|FFJ3pD0mZ!Q@YGd<u=xv^Qqv;~OsSfE4*X%WTIP5B(`Qj4-
z>^vI2#MyvBM&Kbu^FLvN<7JeDas6nAI+|_93Lz?{GDk(HR1u<ZHM-@61@(}1tlu==
z%jM?eF<{+f^KDnz03B;(?N)p(m&Aefsd4-I55diO3lxBBTpOuX{fMc?KE_RNa!|az
z*TlM`!)e-E?HcP@^uxAH_$fall}8H^og>~5LjlA@PDxd|0PS|d6}0y2ed#ON-5H~A
zJ*@Qu%cUG?v3~D9?(L)E-@hZdLh3PqczRLQ!n#Ihy^>N`v_dDoZa&*WQRc$6pYGrs
zEA{n@%L#RXeedQ{WpkZ+BR?Zck~+zHcFe4a9d3P18)f9hWYdSv?6u>XbT8C-o#0+m
zKIS^+m^&mm3|+w1-HE-g7OVeC@1E-Iixw%TI(Q8L3#M1$dUA~;IsU<nQ|Fs`wOc5y
z=eiUzw82wQ5!{W>yT~el+zVl|XpMhM;Gc6n3(sNgw!1s-s@$wE!}*}^vZa6&$Cb1n
zV)0F6&$BC?-Vm<n76o*zW%qcHpLZ<@&c?;()4bCAjw(`^XZy(7@X;Y)?ORg+$Tw>8
zJDl$1f+W-raVh8gP#aw6dNSJJAfCVn)tmxZHnfa0F1pS?!8rwNzJ5>fT3>a7)6_h1
z=tKg=5QgMX<$}7Rm+`{Ow8sJR`j*reIVKwoA!c_DZSFqnUm;Sp?+1wqlDFJ4zbJqS
z;K7)y%{fpV+^w}+L;8T!Q+hGZ?%S@yY~2X1o|ZT^H$8&3JeZqT@fvgz8&r(}b#Y6_
zF4xS)t0+pphRUybG))&p2%TPgWT3ia-jHEot@`jp61H!eUY$%bL@Y;94V9^b08B0r
zGeN{K@)3H9&5nMO=hCGz8Z1ito!$+%$UJo|&U@Atv_3sh$MH6$p9wpJtJ7fT+ajyO
zUq&We*pPFnjFvSw9tkWO6zfgMRqPjU=>E9$@Gap{-YnR0nQ{llhy$$xVk(bs2ie3K
zc$s!T6~v;fw3^c`scu=)W?7Pu{8%CGf!d?9I^hvn0vNMsjP}Y>GlqMuE~gA4_i``e
ziNDpW>9Q}^t0hVn8>2?%l6ivJACU$+23rvwUC075HCr*@M?t@d;!_nbyTESLO?pNf
zkm_IxrVA#mPeYVa#!jPh2H!noYyW9e)Z@iMm|h~mDHaeW(_qS#KKXq`%gZ?9NrNZ}
zD))*Ep@7(_SfK+2p`ixxR%pI4`97o_L`x8GUPc?lU&bp<wa69&Q$)lx^2O+vK`-9<
z)2O$rrjn~^o5r8Q?d}L>oH1h)fKtTq^0zc|bYWz^oh#R36JHvWh4sRK)TAy?>W?w?
zD4Q>DlsNi3M)e*AdUYT05T>$e2`pBG$+P(AJ%oqAIqUa>uul`~=d@g~93kB*&!Oma
zk>~&^u}7`XqCV|o!Sj%Hk=Pm<1QFoIDeL3qwk{pT{#?_s*d}*?E!)vOTB>I}N=@UY
zHaBZ>X!y!IGAG!CDjP<%`J8Eoz%niuJ*;7!rZKUmPZJa<+~gvdxY$H#q@VhrNRX35
z>8H&yigyL^_Dri8?T>wp3;bjg5O=bgJ?}7e6m3Mv_I50`?xSr}zwR;06=57zFoM=n
z*x*-@UEk_jj%g#sAll_HC*9Q;65U2zXwD;Bm&!Stg(b--8ZGn<IvOA?5~R&p2zReS
z+ySx?`gMbg7W?`t^zlioZ#Dx?Nnrd%=2eR@fB#wQihZGs$M4k1>Ia`Bgk+~EJ-_h=
z6V#V=`yxlh1nu<zo!yzu>cYo|o{g)a*&X`+xC7-x{GnNSz9^%2UbI7Iv0Bo3!TjSw
zL=FDwn2HCw3n03kphQ+i8C>n8Ca%>4LT|6#zNK%Pv46|^de;07w@=#p2qERGl+@Rs
zjiyo*La$zYg7oo;&#H3a)zDuFfHZ3aDwXg`VRKWWp|H|#4qF=ZzOyVr0$-+AA=8#G
z-2vi)*8-by1e^uzVr!0KTcSa?r34k=#3u|>u0eYl?RN5gFVJ6&ws0~G5=lqkP(T)p
zFplH{m<bU*1g0tIZ8QNwIk0km%hrzB{#B`FknDi(jaUeZvGa2JgDUlo>9NojdIxOy
zMFfG;d6@)i?Ta4jP(0xitgaf#B&*B$ywsS&>Gu|uv8nm)^|+tnWE5)!?RnQ~Jw6FR
z4HPKE0V>U1VZYjSFtI8-z@^ac@=(-3k9<^p?@PIgY>Qe8O5aeNSm;iYeO0K-%g|4(
zrd~opO(nlKEol$o+(jP3mo5X?ZvQg)&xw(?uYaNY>inNd?rozt1kwn`Z0rR*I#er(
zaG3?D6n`7}WOqFap3MG8eaT+ww)#3?J{vF&k<x08kZ&sGOI{5EF+pG9EY}Y>V3S~y
z7Vp(o;e!`o8XJge(Q~0lwmHa<xTWoMoZklz^0oy)#LVn@%;Pio$Ik23UX7S|R0rCv
z55nHY{!<nz8+)xqe^ttE*`lt-Pm^6h_{9_BALao|Jf4pzjazjpc2^C%k<lQQBM;aR
z@Ey@0pd><UdUc03OB%|yi+XXwn)kG}Sy1BLjC1@c;)<`tG$Au^zmnRz(aP5B*01T9
zzC982WFLqM=-Ho3Q$f`ymbIs62v{G_Lv2CI4ey#HX60XT>BCO|e^Pk7+`+<VADB)0
zMWg^at7tXAT?#SO_eDR&jmd$U8>SX{DiNQC&^vz_RpKDDrL?fdlwX$SumXxiQmbCa
z^W%2?&oyDA%EI+g9#BJCvn65lUQ&5&PxssV3z>l%WDg!)TcOvuw7_#?ZW~5~>pIti
zs$qH)90Sq*1V#i^_aU#<x%pJ0en^{jpo<%eUU`e^gk{2b_`~-jd~MCSnLWR>CZYiU
zWaK7wN>+@-I_NS$AT4d<!y)WykT`wzc$HgHUHbV;+Zi#>^jVu3*=XKAH4*G@{Lv|X
z=E*6zw<AGq%3@b#cy=i6?d9aK3Yzm@mF~v1NkQ)bU<`3*On~ZnUnHqn6D_mQuotO4
zJn;E=v56Sv2Xs%&n`w)cK>si()wvq1XGvuCh4dR$Y5>pE#S7a))uoO@PR-{X&@mBT
zjX5d>Z#j5$+_{#o05{yO$VLp*e@(hS_+4&cR0I5|tn64d2*<nb71yTHG;@rIRwa#S
zSJu#WZZb%+D?shu{6+UQNrsHr`bL=9UB`}o_k9G@#cKjHUL9w!63_W_neoU)&ibQC
zH?y6!P&{`!zq7-G-$cz%+l4NNhN6I3YT07mESjI_ho^p6YXF>YfbA36ml2QJTJefz
zv$~szGrWgxnPFF4U7zC}lE++5kwOaTeR$J1JX>yz+d@07^@X=7xFBmmd*@yrrEZnq
zfZEw^2Hn>-^_^6QmglKNIz;y_snM}`M05#W%xW6<4#=Ut)PH8qtvznUoP-e+2orX)
zn5#*-*&#AGvjNX5%B|Sa&v!8zo{m)4Yc6XQ23}&?37~KxeNN}|o2b^SLpoCVH&HZ{
z8>mZM7V}<@Cjt)@4GDCoVLkhXgKy9bNi<c;h>Ct?r`Lk5<|cdFR2zpGpC6z1{<O8M
z{$;Y}9}RamjEB&-+yQKZaoV8Q<z8|_4Yd-(S_JMAPhNXTDapqsd|du~R$mv!$c0k)
z<zKO)eO6T~!B5HsSNh^m%eu%`^l@*}E508IE9W-PwHAAObg56<z1%|~5615o-e@S+
z5|daq#0kVod43-qM7&l;qVfP7BO{dT3r>Tr)hEx!kndx;VM!m`2Vi093br(M=>%8Z
zD1P!P44O`C_!%~^ObU~PO5qj&Bq@XmzV<jvm@txc+mekV2iasVRIerK@Ysk+mpXW`
zg?I_6=3FkmNw24?dCh}xp2S6EZ5#huMS2BL|DuC?GH?X+f~jp?+vso!ZqC{B=mE~S
zPU|4wMd297w=NC~Ba7q|n;m&;$WFr;2n|8yJ8TkOjm`90b3n{5e8WX_`50gWr7y7>
zsxE1%THVxH-w1ga;eON4>U=*c9-*JEH{=GEGp{tO#g8Tucmb!&Je61L2FiUVI2i&h
z`b%K;yy96!3HN<f$UEx}9{cxO(|TENk~mfU&+p;6aIJ2bpiX$rY&(sPFPsVYlG(o4
zMOJ^~qG_OhgFI0)Cb*)@lf=obV)FsTGRwn0_90bg&1<Q%5L>OkeVYvH3VG+YKuD!j
z(OwyrSTzo~)ZHEbOrB0lqV)T{ZsR*{ZtQ7{(OX36Ho@)oI_~AK=FnjE=@u!pbvqyD
zENZy0C_ezMSKajHtpf`do6sk~lpkW69226QgLsge;%+W?J0nW(gwnWr(;YX7J-~Sb
ztz#sSXUi4zxn7dx^yZZN59@$A#^lKN&7KJ<Y9v#$vs}V0Ga2%43Oe^vxdRe<?{tY1
zX=(Gi-H#eE%?v3ThhW4smY##J)gmZiV}MQrN*ef;fPT9QDNGOm*fKJWlGW@wNiv5b
zDacJy9(On2)!4r|?~`6T`UX_X-VK_U02(2^UT>8RYRls?1D`vM?>&vOIO4XLkl_l-
z-Ni;$*nX&twS1`ciTMM=<e5bIQ<>!yyBTCknd7nVvcc$ExXbbzf(oXu4?G9Bn!#8n
zgr?*Wl)LQyTxHvs;@T)(_o!;}M_(x;3EQ`oP<8|7fcb1U-_46`yw|EUwWVra=}sLT
zS#1+5olwkfFb!kgjt=X=Lv!UG{YApT>Eh&EIdqir;(d$Ynlv$<7!3|nv3~mE!a;vB
zQ6kgti4XU_L(La9<7e9HZi3hbv{z&sKVhUz4zLz>2bs+H-U?uvpW`&mcWjL8CmSs8
zEKy0AXp74&Fo#XfAGh|dGzJ=G*1+GKVVlnde`b>(D=J^B_g^XXV0zrGI9Cz(p>7{h
z`EC&LZdJFxpW?&p5a<$s9Jj=a;(YU<m*ZFNv`xJrmddxvbRDyDDHPZny7lpk7k{0t
z(IGyii#P9REoz2iNQ6^U?)%xn;S*)6!jJJ`a0lSQ0%ONbt^)#C-8f_Yc;;OrNDrL)
zE$Sdkt06H^Am00l!QnG*9tR$?h`k&jer6HjaxhyqnDB(FnGS1676W*}-~l&Hrc*<+
zx4_<0e%6Hh+!R?U*khK5y@NI<5LP5nEKB(YlnOoB4rV5>!RD60Ggr}zO+_JutNG?P
zb{{lR?m5=I$v@nR_qI=yV!S<|#luE3xeI3lyTK#cL}pR)*<p1Al`-L_R9k_QJ5$C~
z0yR$E4{S7))A>w|6`OJ^eCnTVyeiRT>iKnRL~J|FF1W<fc{CL1Xcz`&s`*V61-T4%
z_KM69Ql)FcoJQ}>)5;2WEp$`*rLNt5pfb~$MJb%2zFkE00$!cg%Mh0(jjP5;ug3S<
zQV$ubG6%Uu>v|~c>qsw|)yEkpevNNqHB+blf_iiwc?ug!;L`cd`ql9@aT=#_P)vx-
z?GDy;IJqi~3y9*yu%TyZ(3&A*K-8+2e5`}fvlH`r@z)e;m+l5%W^uT}vM!S78jvEd
zC|9wUmV#WNGIq(2cLE$M!xo$W#-}TmBCMWG9iPm3p|5NQSY)pO92kj)_P=98{{D<N
z2Qd6{@cvkg384$n`8g9V#fBh(;?_zm=ui;)5NO|^kc3Fri%%7UVCt(uK$X#o=?}R;
z=;Zl5*jEDEMIKbj!!hXjnRMg}bJ?F<-9+i-SB01;$@H~)w6yM!k{eSZ_|eqx4t1ym
zJTJhy$0z{Yqby(hRm`_^SpKVd;+){xz0)9y596<E@!=gx&$e!!&qSF4hOT1&#u#+q
zah&rK=Q~}V;8F?NaEcJh+jnwB^TOrASuXc+P=C%NR$C#WP!l`)CoM0)x%00Qok*wB
zKN*0U$z!ikpyXMMZ1VmgLM@sbEPyS{YD+$|R~e&FEP@ZHHK2YIG4%rqIzHwvVJRIh
zi_kj_!00><f)72dIQ{&@O1$mH!;RfhR`0XNB?D7aG5Ou${dqw_#y6GL%VjsQpBR46
z{@=<HlKU5ipGInAEM`x$%eUuiW>e2hFIxASpoKKa;SUo&y{CP@L9MGzbTQLW^hj>|
z4}nRu`>lxWEVl7|yvCY7?!~Z7r>s=ZaCbC|k-+g&zxGRIKbvCXs*0?3=52oZItDQr
zl2(1lPteIpm^z`&XZ%A6^En{&sq_DcZ~n6wr+8{X@we?W-<k5y_fzZ4j8uLNUFp<j
zakcy&|L8N-)!=L6mKoP-^8q`q>e(Zj{#rW42azK64X?Ws^jSj1x4VGmt;<<fIiO=*
zcy8Gm-uaEO&^nS$i1dj2!|SB`BUqXYV7BxB^Thb$-*ox}wOROar{M+ZOnn%5<KZ}R
zcZA`BO+v)@H_<!!<%?%YzlrY1m#;FMvdVd#{R;g}q#1R=&H$((N%x~gFx9z0Hq*`e
zC+n0}y_Wj+<wGf;vu*1MKJuCFd;G+A%1Bd2{SVF2Hu;M@@J1VKI2$XogFJiMg!odr
z4w;uF=x^--dzQnsQoy^`?K64&m*U(g(m&Sdzj_4jA?f9!<D*i46R~-EdJ0XI)db49
z1wC(H&}8{l_Dio=>u!ZZto|ic(+&!im#n}V`V$ZLpI=ji!Tv{Th&H4DBT2;<3^_Y;
z|EvHHjPF0j``0oX^wwmH{n{u6uE@A41-?qx+|XKIXOOGpDN-Ng7|6{HG#3+9$0$kb
zztWT6rvCrnZ+{PQe}4WK?U31??5YU-EaY4~OXN4v!prmfuPbw-L>d1{#P{!FzJGf*
z&%ztyymIkYXxruB7HzqzpH^XYh!^knzK&auB-KdTT2oyeE;FYkzR5qE^T$H@cOFou
z3c!};H=sL77+9+MO;n_>d@j=_I}6|VP1GXzo9M{_AUHBv@R3CO|4@y5P3Y^Lzc-%#
z<)>uD6rtQO(S6y}zK}Nxs#Gm;BVbls>Vg4Mn%0NCF5#Vw^n3aVe?D#1W2~*4^0gZ=
z%aE}*atpR0QKDth|Ck2<JS9XC9Ju2(cytycE>N`*ZPH~%5XbN+GBQ-z+@QfeUT*V!
zfJu9H)0CFsR^@_UW)Z!MXpZij4fdtP55xJOHF&r58=%B8Qjd|<ys^D>t+H<7V{#wQ
zPtp@bzp=%uGcvvHRJ__>yBZui5HD~}<vV;=abBwc(q}}dZTdwfcXPNYHJ9azF3%et
zBFlJ^0-IuadRSVOxGTQMOVd01vPxhM^g3FrdL4i#1QzL!q2@PN)G~jFHgISPUD8!~
z^7_Dvj~;VBeLbFX8zw(~H@XNH(!21Wj;Yv^RMA_q>fnJ6U2V9U2t}(?KWf;6x~UZ~
z#QlGzcK<XV|C88i|L?F59sAF*)$$2rWY^gWRy-_K<W~`w+z4_rj`dML%nj;Ht95>)
ztX-h?zP`(g4ci-lGWwy0q@+VYpd*G@mvWc!pX>w<3@E%Hj@Lwc+QGt0D#P?Uw>zzF
zY@hfB?U{LYz$_qEf4~gz9npZC1cR@wVy7RLOY-F~w#e7h%d61R@o<J)!arc`R84rp
zUOXlyCboPHQhmF;wg?rE@e_qw5EPTQoAhm5C*(`zPRLBYg0>@h^aQ~IpExy5?|rQO
z*+<8&TUV=h5p=O!GK>Pr)GdG<@m9E=Ts$9ydNClRb8~I9hwFP*_%-_*k_yk4n3$|X
z1xqY?C-jND<DThI(5xX)R4WHzu$ySb*D9K1-9S5`31BQS5jGc2s?@*S@5_}-5)Y9L
z32fi2M7o1WM%S0;Mtd_}MZA}ny7qNvMw-fA#^z_3tw^$wWqoB#Zm^91BUX`TxIW7$
z$b7Rpt_tI?`5=u89QuM=itp|VZeI=~70u^w6+vQ&4x}PJ*9R-FsgJ+uGWqbZpl%dh
zzl9^SO1$5HL60%)DF<~fv#7A63$1A~hYfoZc9=CJRV||CrBvk;mW=oE!@s|w<Ul`H
zb{bVB-+@zk91tW8fgbDgZP}lyLap&Ok9&H4;6)Ka7!pA;ExJykD0-Mtq8q?9aPne7
z^u^DEx{*AOFBvGRqbnkEEp?*qN_m%PHGSOqBwUGZmh_}rlwnDffMC6Vp3K2Kn9yec
z0o01ub6>CsiXQ7iHh&ZK>lMty^8J1j)$?VqNt4dweyt+A3|Ru|?^)o#Yq_~;@F<8^
zbn4*eh^!;?G?#|xH*A6@Xg|CX^uRQ)ZY)zg#-Y5SSQt}chcB}n=1YHqA~&sr&_MGB
zb>rfqm=Y;YdXLz{EAX5rfNSJXD2r;+EKX<RLI3vr$8U~~Dva8se6<*rd05daK%gUQ
z%(1FY(GRPbXtUJWxQMOVtv=`=dPgj75#RJFsa?LNGPyvJwA@Ab&>zr5G_@4{r8;}N
zZykve3&I#;-7w#$5nWzPE_UqGWmm(yEE_|vhO5U1Pik=2jP>^DFtLh#@N}e*Kjurv
zICW%`A)ms@0=D`TnKbO_Bl9|Vo@qH|tp$2GT<)TEIJo8W`W+EzBGJ_b@O)+;jARDx
zAk7{S(bd#bcBW8LpzY-#=~EUhlqe(lti;T{Uzv44km5F7fO#LJ`X`tUKu{QR2V}zW
zSC&9iHB|CX*9U@I%dS#w?%efGeznMWM_fnpQ-g7=POnOx&FCTij5L33q4h}h?HJOn
zSqQ`TCI|lz&4xtJFLWRL_}LU*%i`iBG)(wR_0i(})o0w7`-wTWLNl=C;I+-jAWWJW
zPEhLOa9wTT&+EHJ!ve)2n<JJgsm^i!%sgQd-Z3Zr0&V&iNT|Uok_E~#uEM@*pv7y_
z1CyMQt3&UZ$(GeKh$biaGDb$9+G~hp1P6c3Qxd2}y4b4sty1*Ghk<I?Dvbd4_R#_D
zE;A+OeZ`|dRNW)lA5SbTu1IA!DI|V<ZsBZjH6^nx%gqb<xHZq<JIZ0Xh4owwr_l%I
zPywjc_i7uXP0`7ZwHuNK&*jIABNgA74kghhitDDFTCA+Ba6KSQz=+wFnSEbr-O8k6
zUv(V)c1-j00p(W1-S0wvJM^25kE?>!7QZ$>r<BKtgspgmSedsY08WnR<IJ8k<vzSn
znsB%-Mz1Z;b%N}$_c&mT=|Noa{ks8J!`FP|=%;z-0y+Ti&@pOcc5QgNx3<*4M`nT3
z(|73I3}SPiTFCK`nvuoH^Wk%`>CVac)=AA@&B!0P9Idjfh-#eR`c@R)WbL+u-b@>F
zKz3J?9deVKS8wfB;OfJB?`BI$D`qm29ib!n!BqVs-&L&FxV<QG=9U=84MP<!sg$0*
zgudAQ)bW~_(hs!n8aNS=a^J1j{rAQJ`3nHGd*Ra!EWgVwbn7(J<42P|l%aN$sT^N8
zGcJGG<akFSSy9lcN9(k65DP!lY5|(7kLXN*HT<N)b?R{uxBEE+nN?VsS>3jM`Vzi?
z?DD~>&aQ#O0T?}?ARgmQC4c+=#DpMS^r@Rr0)B$wdRH@3g9KMX?8e5m*(kWjD7g!l
zHwOMjMHqqWYM=3B14=Y6Jw9KAEQ`nZot7o){y6B)|NL2TsG;&v1Cmmzd=$?-NaMp4
zXdEDGvY4qgvrPKqi!yKbhqJAWezcr7fU_Z;ZnS)$98bJ693mrAS5L|p6hh2*{B3*O
z;JjIHA>C&bsh<rAs9uAGu6hw`VCa`dfO{&xTC=MCplne3{k!d}n&Mz>j(PIK-OwwV
zoSgg#c1o-9^Duqn+a`uC_*)p!44y1#tx9RaQ(n*$J1cc5Fv}>BlS8QI&c)mYZ(gZX
zz|u!d5d^1uE(E3L14VrYY)lJ7S9h&ZcdM!<=Xdy3$DgsLqzm4?sWvpJ)cG#pAfAs4
zbM-YQenl3lhD#EOR=ER?c8^hZwX*t&tz00E^c1XntJ0l*DfaN*dLl)Qai`Y4j4i75
z2v9g5tR6=Q?#9Q!YwTX5Ga(ZybPk_(+GH_tt;~y7+h>vzw>uZ|+<)!UTtCc_N?~Hu
zGHvLr;#tbWXq({D(b{W8*x8Wdcru7{#{B{}Ij$O=Tyr<}=QD@BUlTu#C`$8E?%F98
zxxJJQL9XPeiZ|dQo~44&NjT3z2722i5*AX;A2iVxj>&IiR9<INb!>n>thSZont^P)
zs8jka_NC)R>mrtmuY|Yz*<hlIG}bbFj#XFudH$pIoTIWI_pfj0-I`lIJC8E<+~4Yk
zPy(!SXH^hk#Ev>!4Hviu=5Ws997mNOzcM$hH*x|Hyp)r{BmOtToQ7YOa$Szy+FUrw
zMiB#m`p}nKd*}85FIOQ{F{*{Pcpx9LFxR`On1FZ^zc+I3sP-`MQ&XzA)*V`jbTo_i
z@Hx?(h73h4o>=BfWk3JV5la4J@-{b2%olxN0*#{e^?rP#BYWN_LQ~vJv{gw&ye&~;
z682e4)AE<K&$Xv{UV{7ZnCu$RYgJ+h&io!Gwsp`7m%OCmC7iG791Dmz2-z7!_a<c}
zjl{)H`$x!i)>U}VO;j5(PI@@~b?FEgKU(u7Qm{EqyJ1=&da#8}XJRnFd%t*>Yu}Of
z{5dgLCUxx(ExDT@KufOcEP@ajRSN*QzU6^AfUUPAibMdH>q{4>=Jr^JVr<1}TO#Wt
zwdV~3*U8z5-$WZOVuJ^^85azCxZ3l(&dYQy`u6B^wJ~{VZrrSKy<7j$t1FlDqsHmE
zg3@_IdN?=4lqA2|&~DblnD8Fs1kpt|K2Y-nLPcV2l)k^DcsnC4wf<RI6qi!c&{NAt
z??8!~H$~8+Jyuz1>(-!!>@^|3Hid}Q#WvybqsE%jg>vPkZOFoW7p-gu$>N^vkkaB{
zoW;r$Mx^xQGBttDmB4}P0-&NWaa>r|4Gi5<idTVc2XAa^L=xA)mil2IY3wglzwscI
zNrym+6-%Ch|Cd+r!5PZ?_aU-7T%x%xUHN~{l;x#QNdOtcSt{@mv(*wKARld_1;|I=
z1Rq(Hef;+-A4fJ|w8J)1usyqXBq~a@l+ipD$4$^|fbW*~5#nARiytnN!29bNE_{Kq
z+4!hAZ)CzOkk1VKEyn{ya$^8BzteLC__v|Y0LVXMr)>}s3wItpmCmkqx`=5)5Yj7(
zxl{-LdujQf(xZQqvIGA|yb81m@R<?a%jfLA;0uwLKieqa1FSeWpb&*k0Tde(W@XQf
z-~$a%1Au+t4@Bb$|BB2Ne|!QY0`N?fc7dGf@{(-7j0MtpAU*w)Bg}v7HA6Jmf#I|Q
zi8WO*{7qDMjQt3;Jm-K9`vNIx3E*j4kEq$^28QJIzs#opy*k3*fBv7Yp1>mw09HAH
z&9o8cy#@!pN&rX%tGtt*2l%jEk-1zKg{{i<q;F4VJ0R5VfAlZ2>D^V8TtJ-eDvCZ8
zfz=kye)G3mdox+QbufX(UpIB{NEZ3sjR&dgKSSnaOpnC6<lET<L|F&CsEH&@oZO4{
zyduc3nXYR4ez!Ts7toH2LVl^sLDQ(usww~~o;O?24_|85%zqq-mRnobZHg0G>VR&F
z(}-g>1B1~lMXWy`InvzR&R>cxMHot(WHk9BHFiNUaLxm`6CyYfdIuLa(-a@+A!T2G
zSk9z_@;r7iAr_>yoRM06pQ`jNAN@>X^R0@=ISiN933LDnY<B5+uKsM#cFdO8LUkBi
ztC~0s^5ccPZoU(v))MyED1PT*rfPKlAOH#QY}xmu&<AVyGmj|)kJ5QpuCEGiwTZV(
zo4+(PM*r~P;)DeZF|pdPjY#}7q$dn<u}9ZPfS3cE05O=%5*Mpv{oA!)`9v9bsv7$F
zqv2nDDME)|bbfF#X(<n?Icoc^60`6;C3WV&As=0jVl9RmUBp2(Fq2(Ta$bzz0^k<u
zDGzi~o3gUK#1^AP9bYU(3y?1=yem)SFdto}foh+7!6ve)j$+|m1d+)W5=Y)IVbfP@
z!`Du18f^(r-Z*Hm*3j@jBo<)ky0NILcVghMk`_iFfenD!p@J(HCPvdJuNwH}9cCL>
ze=476v$7T{&ws4P$@e;Jc#NRB6Lrg%znK#Zz^exrFBgI6VKYttV)eeMSPwWzzamvk
zOi6U$=qBV_*ScblA?FU9v5gV$-V$w$aq5abt_k{9j!*EVX(<L>aiEuFGioUw((3Q?
zVH1qUMD)Vr!F;O9yUV1=r^EL_RNF8LX|FW~#!)ZUNt#?smEyb<i%Wcpv1(mR6${Ct
zMTDVjKxV!s{^FI(Z=w;a{);GI1KBP(2*AKX!q28wRdm5`H>Yxc36briXnzqfYEpxN
zZ-++?mVyH*xTcCYMhY}pUnJ=l)-_jMPIA5BRjLBUrlMz2msc5h@f>4>YXMDNeae&=
z$LMda-g8oKaPJMcYD_dPA7b6R$hqHt_|Xb?F8yVK#ZT9CtiyvdK=TfSF)z0H#kYzK
zvICYm8dvOc{CO_D{IH&bm}uJQ9Ue@%It#y62$=H>yyPRpH1%O4r<b+I+sIw5?uU|%
zXP#?3M8+%{y0Tq8Y$^3Ki|tb4`H;fzXsO%Rx1@lXMQNUk;bz;<;=AKP*3f(C_!xQT
znuY#My{OK$8QrLTXWE;bB;INhWVgJlp6i`~P*ea10v-vN){_E5MTZAY1(Yhv-$Qq~
zQp<UCP7*aZKST#X6l2!DN3(2Y-JeRZ-heMStU$UDp#awF#W=3o3<Hjn2VI`y6rT0<
zZ)r^&9cqx(OZ}0e9+)dE6Zj-Xe1Aw`zlBbCU%jApKec(c{0tfcP))Uq(y_~^IZP3d
zVqXHFSx&t$@+rEoUjeCu6TjTA-+TJSEhybFjB0#Z|9#pf(e*x3pKM87Y$3oJQag?(
z*#WU00RkQ_s+6g{^R{;b%;qN~BCmwcJxLl2Y83b|{H*^o^~T*1@AFD=1fJE2U;se%
zuyXV8pLEBtn1Ec-P4ozYwja>X)qmq?|E|5aJ%r(b8|Bo)?a9O_kNPI0gEqU^fZ{g{
za?XF9tgjCe!pDP)E}=|%*rIFS7RPd<H_hy@#*<aQkvJp*bYnH*erOY}8KbC<Q$#O7
zaOrEBfs%3(aQVFWh*WLIIML)R;zX^7q-fcs9%%F*2QPpTXA9JqD=Tt-6XlfqfzS`Y
zG`mY;7b$pahKRA@s~H}?G;dSxIMvpsKY1T>borA^6=4Td;Aq0YP#$zUO&-aVr?}VH
z^zAU7${AUo-3y|&##>k65()x9=eA_~39IXceJTPI&#dH|Kd!p4q%6dzn$+x{f+XyI
zo-4q{xDI;b-?Rw=KHH9L3F{yVTic&j`PZLxdzR-dUX6ta8;xkqh|rcPKW;H!LG10W
z9z=nu@i*4vX`wH0=~yYfUh*5(xOW29gT-vtuR_Qv?Vp)=-oMcyNP5AFZpOYuBif~@
zaWm5NZpv+q;K-#{>StS)4RJeb`k~yOK0@AiEgFU2*RAP}xIrKOv@-H*dTD9(eKz}R
zl&h6$)*myX`lOp-K_8oqvo2b8Zj-D;`xa)M_|3Lm$r@bRJbBn4lWH0=E@<&Mjxwru
zG)%aW`|V>+bkX`2DrgPFjzeIbAbYPMeX8s&nwi~c{KDmJA2}}%Nm%y^%IfIGbYBYb
zuR|+rwgwTXvH^FgW{2NI_O=?oi8QQwaKIrzOc1y5NA@QRWPdd~+ohsGdSe@Aaixax
z1X#GYVJiKBiC`Dddjtb8`s^A+m?{g@6Z5e?zC{tkqL-Sc>&TisQ_ug1vH0yLPQ4t1
zeFcRd)XNgD5-=4D>%)YUf}cp@1G!f+hx;3S#Sqs4p~8s;7kjA|w-o>Wh1%)Yi^{Z3
z_>bpL-~)rOp=>Ou4LF_6`}#v_ih4#tcQB!=%X{nS6H@@Tuea3)b3e(z#EA)o&+wmy
zOc@+feGsJU_9QJFeLt&G@~g=wO1EZV(tvxWH_E1E+^b{_%&y{aZcga?6%e%MAA^Tx
zv#M~zU)E&|XS$f31zB?vJ6^95N_fX3_~XG%(<ir*8S;>xhEaC60HJs`z~htN7rYny
zRO0+58~iGFjJ5>h`jejm&u^jnQ4l)VBC>ip1VD7rgm!Z+#{%@<1GeuFtA&MO#Lk4R
zT>d3T=TAbSl&^zFP*L9`b;;X+g%A=Bv|By`g9)+HKCE~<8TLcV>^`cf08y`@>LM)v
zV{sqpxey8TWFE(}e_vEMeOf<mJ3=(1koQ!#eZBmZ7}|uOGaHQDBQ5|o8A;z?(x@Te
z3%#?3I6EE(AFVn;44^E)F;;#>`kcrGex}bGcntoYhRfcFUB??Hssi2gknn9VV=Y5)
z(<WkdnMJz*kUpq0DRqf;<T)rNZwOOo^E=I<?DJk2Xy4+b(7>vrrTd1ZJ7AYE($TQ1
zJ>>!7>s@{fPM-=BJV+M$q{_7B=;nzf0!7tRh0=NrZDzgBeKduTUn08YBWWsYURUOc
z(D+h`#I;{C{De@uNcZya0@56CIP+Y;a4b|lzgzE!fiB;>A8G2#e;zeCUmK=3<Uj5w
zk`M$*IQ)hy!JJ?}0(w&jD(EE)9kT1-5>EVQ%{hWm-+}jHCdZF_PB2`oO;htqx?YM$
zGe_kTiy?r*i2?Dz$e}OL2koQtvKF<Nr;+PnheseLZO>F=?wSXM^Uit1x-v3aH>J`A
zA9vrN8HIN!drGNXVef&2yj%!Wl|-LJ?t4}Uz85!?uZiSs8e|Q4!ayaIBd*dTNIX^R
zwGe7tiy7?&jAzJOZiuyDKzO0cJBmiu3QwzRV{=vAU*8c`sS;}J8%(Ia9OG2dsX90I
z!=`R&9u>5FrsWUKnmGGSWCbb$nxg?C&;FD${4eRKNCKM%&sJ2Q7N9iYR(IL8j%&Jl
z)U%ICAURbfVfLW6Yexc`(JYD=P2UIp2xpx?{SALkTmJFde4QZ#{zJA+1qkFU;4|k+
z#M}<v2Irsx$lT-E^w%^A?>cvuy6#flD!Rf&2R5vG2ReCr%7A2j^D6rFxw$5b`Ss3!
zN?RyfONtN$s_zZuh=+eJA3nHt3*P+&e8DrllwI)5${)n&3Etz#T0WdBZF5a$i$cVh
zDk=Z!v4Oq+{M=VwQ0&H)FQQ9JQ*rbGyZ268i%EWu|0ij==wn1y3_GQxXW7h=ms@VQ
zm3?>l#(4jH?c)O`XrAP~xN)Yw7x(lO?myfBTK{d#(3@z!!uh_?8X6A)RGG)o1a_tP
zT)5<_Xx)}4OO?>NBn%_Jootph4{fOjR+b(1N@HDpOMU9!s^;T)iApSh%5j&3*h<Jv
z{^JV4)zQx1M8tSOGzMMNRW5LwUC$+JrGvYFWr+X5a5`tEkSK4Ed61@FW-a2=UcJh}
zDgq#Nz$uP&Ta?hV8WHM~`&Zo}9m1N~Q<BA=26DdtAW9+<e6yUFVs$GNdY!<ZbZ3%j
zxW&|03R`Hjc#*s5NO{OpHE*d#;YsHA!o*i-!DC;T_1i`Z&59uvUJhLz4~U}`T}{b#
zY4e&xC48I#D{G~rCuDC7H7g1oAAJUdBLH3|3GON=oWL=@T7&+2d6em!!q1!=i_HTv
z*OMC^{Su<dg?JJsqfpy9Q%S8$>p!%Ix%~IM&#(V9=PDo0?kbo4I^&QkhVKbf&mXGA
zze)IJ{oxrcF#xBhV%=xPNq45fnD{QWPL%Jrs7M(P?a6*hCGp@2vlwi9ekqtdaQ(Jh
zQSN8oE$JwONOg%P0SsNpIj<Fp&X0jDMk~@}b3dC(=}MmsOLg^95}n!w4g6q^)AJC?
zHp#w=iv(b001QIq<`NU`O+jH*zFdh?eV|b6y@V~7t5>zwZ_7sQ7w4L8{`k6au*kw>
zrt=HQju*lCt}l~mqPKc7coYfT^^B5!cU!za)e+rg=^Y$oBqC?HuWZ(O5{&2k9H1pl
z>^d)14yfwo)W=OrdOo&XVeHHiwEf~aGEG8ysJtaVGj4(suI(F)r^mqo<yU%~b%k_f
z3*D-O2iP=DAxfe!u);B1;lUdLmn99Z6pqeE&zG0n&*{qxT+a4Wf^uEQqBD8bIi>am
z++Pcsh-sU|@W>?IET<OzXsNq&e@{=$Iic?-j0g>jUU`A@C;+7VaIUu$E3Nr0istf-
zWBGI?@6g^+8ollFzzUeh1`%CYoTXENHv1c^UIV#rrK(*nG8`Mxekh#dW2*1XAGYKd
z#<gqGybtq+eHlWRX=~vTK#7~`*o(p14tIZo1GQTDK`xTTn^wE(eVW;w6B~uqZ?o*H
zkU2Lj41JgDi7FdfGdA=m``C`zB;x&vgs*2=)h(M_HD5^SznS|z(7ib-e=ub#{!4H0
zo=w7DU|)B37?@7=y34ceWhVSBH^hRqQULT=K*wU;JL;i?fakoThkXMrPgD<Dv7I3_
zn+_^n5^$@7kF9p%sQj@Qf2-!w;i9HMpnKw|pxjsLjYmjMp+`w}U7EsHt}69@^qN86
zHfn%;0B5KNm<7=Tmb%eh36{J{?kBhSYo6SAUoXqv+ZErM{c>xcK!=xY!@(WV6}#$@
zc(I~DKF#n`QWvZfljbi9dcOv`A)c&8gh6g%iPSz%X;mLCqMeTc^R)3(KvK8W6aV?N
zH=Bv;2I)g29+ICg&#Awo;%8`KxEJRF(Eg;u@TO(XKn#ETVxtv_9`5??GI8tXxypVs
zXJ;vyh!@GTo2X&m8e@|`#(hLp2!93Vf(g}+(88syF4)56=Z3nrN|@C9xMPfSESder
zo89QzA8nX=g~Q2UY*60X##)@m>W~M^GUF`s)t{bD5|hIX=~*sBLhm^HNfVVsRC6@V
zYqO<=kM^!<1}!`QV1hD#uWph2chF1&Q^*ZEpkHSr^qXkRz%4+7plQHOXaNoY<ByS=
zz!{5NHTqD63h%xRz>%uR0=-@%H>@KFO^fjDErc;b3ayBbeg+g7t>Z6vj=jI7^ZK$F
z-FpX=_lOXoV0UTq?QBL>al$Cz<smS5=9#y4+u$jqfpAOM)ud<Z?o~2P_(A2%eY_F}
z%KxHv_&U#!z!AS9jX!q-;cibMaTg$TH=G#0wE*C0I`)CnTi@>POHrC^A$ZqiJRl8`
z?cwm7h@vSC;Irrb>B?7Ln#4XE@wms}VB{}U8GPjg7?ky+zg~za-~MzBngCeEYopl-
zuWIA*d^-?4?9W#oAGflQjV6Lu-x4nUCK{~+(z4L+Kpq@F{^u*(>T`n|54G_(K<$uI
z><kzkR07AG{q@4xnWG2nsz4hoTo6a|n@DxOkHFw7O#uJ-%Cn`YOIrhf8Hw$`TnMoq
zSF<@4{L?`E<--5J4&jzGitd~b$_$)vv)^)I)4fWima=npm^+`$c>Perw0~_**c9&q
zyhcLDV$3zr@mP5r2=HA)+f}{tDCN#XG2hQJ&Y1ZW-Dd4PS6O<KS|)(#kssm{(pCCY
zks%U^y|x2CyMH_hr3X?83G{CwLJvv;$PEQQ`KvhnN4^u*|C(L@wLt&pW89$+*8C#5
zjRDt80J_E4>9)e<Z^+9kcM7cot!j^M0-7T#l?0oiR)RyCd$s@7wz9v(4GZ|6QIvK6
zv3N-RUt#ILBKsfP>c6`=&c+5Pu2O`wF%Z5l@lC3*@-9Ae<s+KjOhfsnvB%i@O2~6t
ztu7Y#*}sP5rC_p}0j;TvRnWP}%Ra(K$!#vDKk&1p|3IqR<FW~wt>%PQ0PQ<Q=V=Ct
z1iHJ{)9~F1_ywS{<!eQO53X50!vbwTHP&)V<$wFKS}otJ36@+Fdtx~8j?aDta>@d{
zc@uslXzTbK2An1m^nnIqh0|{$b@>@HgwOTQ%<wW-<io4Ki7cj9FcfHq-7{GvF-AsQ
zG`CIdSpA=Ja(aheJ6d#ow>0fCPO&$`-Ks6RB)6catUdXi1chA~qiM2CC4VW=9a5m&
zxl5qhLVNCim$PiqUz<Ts-Ax{gtF*pn)R?XonS1ThjY0frKk3F6IAGj*qUT{@Ih94^
zL`=Q5u-6Kh+$!J(jQ39NJR;m|Y7XucsOJ$|Wm_l{!5_k)w2@_~GTWa-Xa`gCPeBNm
z<5>c#ySQj4P7j?ko>q*zY#I#qQEwj-Ex^38Z!3IRG5^kDWXNpH{v&l&+$bn-aIcL%
zK>8j|!yBz_5C`aaD{?)Yzhfk;GUVk=6|RoBx~F6&Qb@vsLIFo!T2&rg(5guAQGA3Q
zi>yLQZZD?YlSfyKK~Ts4IyKc|-zV0kaU=0BE`@l4gOPQX;tJ&T5*;oG!_W(1exZaA
zn#TJGJ$JnncsYlhsYj*Yq46hA!BqKfyxT>RDhvMk><hpndl#tU-@zzk)@YP&*VDMX
zs--uzoD(uzz)PAxKcb~zJ(_0q?w&H<GBi^5cAZhZF$sSJhzrlVTbcHymDssinU%Vf
z#y;NJHY_oaPb7^Kdm7MxJ&{Dt^)n4|3=z?}a+J91><5D4vltdX?crr!q~me~9OU&P
zHmIB3DlTG8id5#?PLn}<U2~-FB1^SINKTu5`y<`?p`<M8Usy&Qd#TIgg>Jgi-It9`
zA*kbW^^1yHOD@`v;en<hz58De#l`oZii|N-7nfg&DP_syymdwSh9`<o0<*j#{fD?p
zJmnltqHn@ZyJ!xgU99C^VDaQvUX(?eWdFM7oV6gY_izvyD&HUL&nC+x*btldxK-`h
zonD_&dDp0(sU$tcarQxk&!8$Tlp7t<9#5iyK5s`opMSz^@QiE!j%2~6$8f6YowzO{
z^Wwz?>m}<Ur$B(6u{}P(6UE8PVV{zWzQe=#a%#%>!(wJ~)~DRWFRFY~;78z4<bqXP
z$qLH9n?PAJUGU5E#dnH_=fW{zF|3CfVjNdaD|3Qvd(2%?Mn}kCRptQc)wGa&G<)bu
z#)83zVWDBE?t0mw+x-_uqe5RdnzP>nVk9q#ZX)z@>q??Z<X!YOw35wA_>9mFE6ZW3
zqyb!@h$RZQ)Jgsfx8T|5RF2ZJp6*{h)B15-`n(M$RpDG6je%Xo{U~Z3$AF?xkE&Z3
z#53(#n>4rrz2A`C=f9WO_u*(bh>n%MX2J7(-&|naPbFwgq=SJRdLNVh8t=bm-o%yG
zbzb5nH%|IFf%B=eWy=roo^bnnMCQnHd#KMvrYa|4%&f^7Q)aI4GBsW#-Ld57M0x8)
z<8H06_`2ubb=rJ#q8Prm3(cF4Mc6We+(qtRJF(7Y{tjzL5(63Zj<R_a$O6MxRH994
zQ0|(xdGIh&YJ*9<L-MULf1mh*1E;a}%M-(BBuNW5rgU`!PTOKHzvgmz!0W2V6P-;5
z_v`6TwK(>l<+dh9l|0_5%S@WI7dNsJ^5&BU3`dbXIG~`iii{LUT-Tf`BlO6Wg)7kp
z5HbFYx!$hyGb@kBOtEA6jg@(a*Ck7+>e%rsL(9pdwWYgFwVjKGJWky9W_M%F<VYxU
z)QoywzG2O-O_P>7l1Q=JWRJwle!`7*%}2aZF>>U~%ipPNZ5?j;w3jGGPPtCXw-Q2m
zlRtzQk)cWt>jTjd$Z=o5EI1Kc42>qm$Bmz9R>4UIF^g9RC|^HT*X2<1b4a16edVHg
z)BMBZb@n{w!pvsvui3zX<Owni$=+fBimN4tua2?`h$|p+Y`MATOxYg?ma(P`66IJX
zS5lWYJJXoMXkkMjYE=r?G&^DRlz9^(QablgWJcbtu(>S}ly(wSSyg;W?kA8GOvO*6
z8kdXMHB|~!`uJuqFX$V9f!UJ#8z$wSI0x4jF1n6Oc4+?QSN95Qji-Fc$0mY{0p7fS
z{m*X(bG3m9gUA}EcCYxCMJUsAwlb>(3~{2ZPgeyo(|`oU`!eBohO~(D&d<Bzx=DO@
zPJQx##y|*cp>CVM?IQVj;2Lv;^Paq;dRK`NR=fS^G=2);F|z6zxVPhNfLhWU*gX8m
zRT)SOh+UWQZ?y3$64*(=Cevv4HxZh~4A{!VFY<D;fjZ4L>>9kQ6X^jyljj9cE&xrO
zpq@wIV?77D)8oavzlm^UrSL;m*!Uk+oo8eJ6Tdy)<=Scvt`Pu^Q(pijl<g@1z=FRt
zjXb4}#|4<9ihdJm<skqTAsbNq>Nh>t*5sS8?b5*b{PpG_G#asBNQl*j{3dFbvIgi=
zrP#Qr+xgAm5o=rj;-i6AS0Q!T7hG{jAV^!)|0cR(j^bqr()Fif=o$U%y&wK%01)%a
zqS(ZJE>Lv#Uv6Fe*WrcKyf8uuFUo=0|9WSmfART|&f9tO`VG$in}<HW3^uonHUhuJ
z$^_`I*6-~W;|$G_<Z@G9WnP!=G{^3oR4ptB()St<-YP*<fb9*xt({=q#*PtOc&#aO
z8(ikP)2~{$S|4$>m|l}hI$biibb2<E^4f@2H59kWSg#rE^8*yjfAk*yfKRc?9T%Ax
zz9iZ_Iye<oq#)z~wT^9U#c`8489<O!Zy3EG5d`Mdu<b7=o~iyCQ&{#oK!`L46~afi
z9&Q{M%OvbDlyja<=nfQP;~`^X6u&3_?`;`8Jf>6c)2qrho+{F})_>?CoT^3rHcG}K
zX^a>`MyVvvQCb&NFetIczzLhnuua)Zg;4?po*Mg@1Z-*14Z-!MZt}gCY~rsz17Xem
zTq3%=7E>6B)!|uLoNIh1(tLL@VMl{`n-2D&g=<YQ;%s!<wmisAWd@+cAHb#XvA2`s
z8@J0YBupo3hn1JjC$2H=528pD^f?Ecvcf>W;=*ereu*dlpt?C=Sw8+AaNGQsWI&+(
z^dJ8D|3$+0q6omG2vfhB*2(>yZFEhwo-M5DH_<#{=ytE!CHLH41q>qJjUNUFAMUAN
zvj~Qsbiiwo9S|Pe%;f6Dn~VMQvK^Sq<ofiB^N&hisaF@8zE@l(@#IIDgx?f&76jzz
zCRxmhSG^)(ESUHxsL4tzS0K1fg;Rym)$g!(T*cr;xCIA!$CCu|4e!Is?fdjfhHQYf
zy~oP1Q@1jCiI2ZGeAQY)|3XhfLL$1iKI+UdgiKXW;fdd)4sz?KHnE>F%t+=xQ+j$-
zKu4<w6Ho6uyo%v=Lzv#&)_xX<hyXiy&F6w*+Q`w^$aR`%Ac*tW(P_Dr#ATr8PK|60
zeh_=}+*n|$40JhsZow0~P}%`^1kG!#LOKp2?5>Zin7s1#F9%2aE{yJdSD3Nh?XEG4
z_w7#TEdSBnpyV%p^X9O*Xa($xk&SsU;R+7C1_^m_@HUHXis@W!TEHczk-t{~z%Y~3
z6C1~sl~Ho=heG6g&Y`fcGP`1hh{z%~hUb#uWi#yh1s6at9TI%*Kuxc-sMU^CK($O%
zj(%}Q+{P^T7!u<Z&^?G4Jmj-*VRVqUn+M&X#3*;`I@#M$Mz42Ux7zlVh66U!&K;qo
zGc9iUIEi~Ro+2JE5B9iVxspX(w%<2bvhK3KA$_j8#xAb6@wz!dGx#hu3L_nrO@nv9
zNSjPb?m^+71HfI^wXd|+k1NdYu$ycI-i0utSk%}S-+oF8;ot*K>!jCOg3#;O3Jj#3
z6Yo}xJLojRy_9;cBp;2acjfY!N~7ZH5IU$GtZ}#Y<PK$~YjjT~IoJ_PY}V#(%1KCO
zV)afHtQ;T_S$ur}Nh2^d;{XoI3OpL@(hrlsg?18zCzkD6Y9^+047m06Nv^~V?vGgC
zGx#DprYq?6=y`kU5|0kUl(NJqj0sl<RHczLEyC*_X5)KazTd@1umL<CH-=Qi_iw!J
zUGw`u8^c4Nvi&gl?=amzp8r?NXa9}t%Q_ga)!W8{^I7FJ?^C_jsF0x=OJZX)Sk+Qs
z6tK!D%cM&}^nk)#6Pi*&SOit4roF5jLM#s1Zxz%ueX#Ofcc~n`Li{*!ckpuf&V4GH
z+EB(jtb6lYz2Jy89!z!^l;ZOa%&ufT`QuTDpkG}h8zz$5qo41-Udnv)e`D`Gpqgmc
zc40aqAOg~)gbq>#L6jC0X(AxKgGvVhl`4cpL3)!eD4_HnkX}QtBGP+Dk)BXOB*lMx
zcRBC-?){a0*8iRVod0}lk(FdJGfXDWJagaob=}u>*pp0rBw_vH_)PQ_3elZS1yOw4
zRYwqR@)tmvlzQg6Y;ov`YCrd8&3XMI<=&Nr&GW7O!w{wS&;=U~+c<*KXv;0-$|d@7
zahaJKtn;yUx?nJ}EbRMW)%=ew#Mk+640#fU#UHrAYS3|abavH`;?3P7#nvw9r*+?@
zh7q={j4Z?rMI97r$1)8Ny}Y+rj>LJDXym&LA-@S5FUZ7;jk&&l6~&rM%m1-n!7kQ3
z?gDX7kU7!|9fPq%jp>*Swbzfw>fI5Md@Sj{QPccN`F2&5igY2T(K`-`4nBbwr}Cnk
zdXb1)jgFB}Wg#x==@a%?Rhu}zr#=q(7DGjP4jv^Hj!l}RtcG@?xP=s+7jw7Z_mH3-
zIue`?QVr3qN48zH9l-R-lRJ~tL9r@L>tgKpEu5wb-+02^cE4lO{s8#i5ccL4lw%yH
z+w^r9UygHA5Xg+}>8?xN-H(<9rjF{;OA2>hDbPq4v+>Q<&X>ZYZO>;Mt<pvFjRD-$
z4}r8EfEw3V%fW5?#2Yh(2axjZ=7=kT)HKDw%vY>$`xeaHas@>SXe<i>AP*CU5)j&L
zF$`LZXEgtQnjsnK!!#j&AP(pJ1otGllVzxO82s=)ljaT9E<)1%FXT)>Jv+k3+G`E4
z9bNMbo{Nu@wWV-gVjg`g`FPhm@J(68iTQrCV!f@i@pDKiZ1F?%mBz}uH4Y)CuxfA=
z&a-$Cr->Ab^;yQ;oo`>_P1CKJoVlH(=gBYinJ1ImSnv8mqs9Xwq1arPoc17XwMzzo
zkc9Bx9;9m{GhGBXI6g&>@h8z<Yfz!>KZxa0bOPm=TYv9)XF3ck#JWOADzop8m(^Mh
zJDH3fNR(}k*X&S7mrl3|&MTIm=Mg=hrPw!D{&~!~5@b4RL+yaQ2e~|{d~q4~Y_ztk
z*k9wWNSfEY<*?Y)zJ;uPBDtQdx#ENqIn+pDWN0{`_B0xIvjB5@y{~>cn3H0v<K(7C
zx-FGs0$YYb!}3ek#n|R%J4O8m@^^y)RaZ~RJ%&6Rw}|mT@`b%o^?;<A3vNf_>xFx7
zb1^WpUwpE2g1JTWW|(mO$_ZI97lt8pImI`j*J2JU&ur9YRv#)mlR0_v=HZJ8{Qib(
zlpP0QZSr3VQ#rXYK)q_=^oy1~>YQNm%|wMzbrq?1m870ZS7s!j;rh0sm_YO}hI|bc
zaoMjcR?@tYFglfIR<rf3sv-FEk92KPPsNCmZOysz&Sv_`O73huK=`+BHM~9h0z48b
zxbt+&<S13!Ik={tMOt`3^P!4$fB!)5WWU7V1?NhxYBhh!PRTjTRm$Ey-Je8V1d2((
z>?jTN)~DdNlEbA3MG}wFjZ#c}OhjK(5uLC(0sR6l@DKvY6yz3Q>A3x|ugu?T_mP{h
zyJJIR=D~+VKr!b=LdGMz$RQ?nJ5p&Tj?o87=qOY+;BgN9<#F!AX}Q|_L11i)vAlKT
zT08ovLsVjg>n70+#TV+gMB2G%AzLVg6^*!KPYR%^$u_c0*}r1mY;*-NQJ!C&A+DfR
zAI1NiH%$B7WU8@#=p*}~2k!HUR1cI<IW4AWZ&Y7Yn*ihj(kWiijm+e1s=lz#>qZF4
z^#$DlBJ$uL5ln-dU&Ui+9y|}<dx{bQN+e^7bgM90H@#IbMo&@ACCA#WQy|#|eMBKy
z|H(ux_5StT>m)BmQsx_&Np~_t*Rp9XFgq)SdrYhGVfy~n=BU0`xS&GU`o|f=oy#^Z
zrWVeTpO&5*`P9ArL|>BG^_=+j%M7eV*spts%HN_sra#w7cXkIcBS7Ov5&;>HPih4q
zQQcH;T96|U{QuopNR$p><R6M}BTlx!gcoklDsjM&<6SBR<d~Cybbum*wB!qTgB&Qi
zP1k_Wh{|0D{`ht{gPbpD{5b!&66%X1IsgOh&;PYHK!5yjc}Fw}DT#}+@Wgw8&*&>Y
z)&{1B-hYd?Gd}wuch(E1i~xs#Y)3OObdd=_OnXRK^0)8uPoGVL0nhLpd^Ut~?WUr@
z+6bph6BJHUOjftY;~S>&T|l_3Y8?~(ngvs3^AZlxK~jc6n3ef<<WI_Mn6F$4G1NFJ
zr}3)Pe5cL)Rd{!h>178$&7r8Ny6bfG^rPp~UQId0F8`itiQBs|McB%4V2{N}u#CF}
z`><bECts0;y0%hfrk}>Qr!UN&QFu*%nGkOE<{X{Tw-H6L0Jjkk@~r`(2NSx^&I!^#
zoHBIX9n+=%%oL+9+CW*S!Izb?Wy6AU3dOnSbgb!7x=uAMrTAovY=5@TVitMQF3%vH
zcd@I8kCLLqC@(R|f=WxyEt8z-D+`QMnQq2Kp3oSywkJQ=*Eu~>;QCDK>Mgf`aF>@I
z9m}s(h&Wz9eaB&d^xPvyx~);<=6xqzMCsJx(XH;{Uth7?M`^$H=TBwhk9D{sR?07Z
zMY(7x8UJbzYQlk<+x)0+E{|@_#n|3KpoAhk)+@(l`t2JU8<O`lb0W1Bq>hP<t=W5^
z94N}L@)o;1S9&YwG~b^@u2Dw{JZ{hAp1*y|s?)N8iVHA-R7`VD_1#i~tQSJ3Vb!|j
z@Q&43@ETW&#D`~VH^Sd^3i-Ofywh{XT<HCB(tOkz0~$T8^RZ#VGw<TOMepI@-Y7k>
zqunmiu+^5Xyq4Krm)#*F)2Qw28qfAcmJWHpZg<n(44?XNpAhL;Ync{nc0Z{>o)|jm
z+i`i<CvSlDtxdZSQHTp|;w}tshzp9h+~BjFYd0-s4@YmZQS)BC@G4H_0qupDv1Z-&
z0=Qe&OSLC$N|;RK>8eK>(DKaKecc9BosMhB+INT<#5vd*9^GUcIDdgs=!MQh$TdJ2
zFlqfV(4x>;gHp?C7I;l7bH7?_wD<<is3x;YUJS*2d;rsxXGfO0tDk_K!__XCIQ)B^
zKrHURsXUP*7Qhx+ZCG&vnGzi}BTgN-9Q;`Sy$%0oKcf#c)U&-CbT!MF^zAre<GmH|
zBSzZ8&Tf{rg72=>>??CGz-FYB5MQfJA(PVLOt!25TW~X~0S{;+9Smy@-qU;>C0Ad|
zAM$M-cO_e{mI(H9{C`am06vh75eDHdU62f+w}LQW1fDd7cgNnkBPVzDrN|bwaAznf
z&;9~^)m2~nYczWX#JkHi@wiK{6_(xkby6hOEdD+U9P$4CwC~w%RbF%E4x@%db31At
zN(GjuqzCT(BcDOYGrbqf$9$Ny!)l-g7qx+~qr(X4#^<{*LJaXm-g!Xbx#f1bh>Ls#
z_O^LhU!b-(NZDcG)z^Bbp@Gj<nFxPa7Z+g&e0;a%CQuLj4#Q*|6sGCk-J2Oz!5k4l
zT1a-nD;NX81cpn{z$quK$l=cs1aJTh`-8U)2*tvb@TsuxDgIu7X#JO;L@1em*d1bb
z;1tAz+WgbT@t7c?cF!|<oc0fMU~X3n0$?wiK&%*0%Mn3t5~zWmfi>rUwtq*xhG!ry
zSl5;^DeO!9n(g2Sz*rClN@!yc3Sc$|Gz?Mz!aKj|3=G+a_&9=RfM0;EG=T_$@ff@1
zStmlIX*d{ZO$P|2XrS$};s1SP|NEUY(NO{uK$)@xJB~ywNmU_z;dZ4MrP?dZ!k)D<
z_1N?Mo9C;wFMS>*c|w{pBVR4d28aCPNRd4f+S35qP*yQoD7|(dUO4Y{dmP8OPAjHZ
z=jp;QKeuG><@u~gDy(lnP&#~4i^3^&ShVoT2H5;14e6DB#o#M$NOm+MzFAQl>DFC}
zy+W9by`G$5)TKA>Bd)TSq?7QZ*vWryY5}z|oOtmf)zuVWxI&5lvaRUIHD`<8H&m)V
zCBRX|CG7Dhu-K=L;P&hfZS3o6moaYWDOBx#T8?wp`Ks4_TqR>xix1YlpD@vXI3a&i
z#Z@A@Z-J3Se?_|E(Jw#vZ}(nL<P_MbYo5FIQ6acMPWgp=iqK?eK-T##S_z?!J|;Ko
zIjq;SHd|cA)KS&35mdly6+!MY#U+SRiVr*3k4>NCn7<`yY?-b;BZq|fI*3$gh~(AZ
z=uzkm&_RUyY(eBW_LP?1MKYnd$+f?L$9Sw56R5xOK2w!W<+vysi=2)+P!uf{Y%J(w
zU+Ul}B^{)G&Y&5}Y~R6}%InwL$74m&(O&4E)HB_7r7gs8A~)7!y{^74)o8hGeYc-i
zOJLy>&08kps8HJar|i+!U>|H}&ERk^*ieOa4F=veUE6Ys{k*R+ya9bh=w7<s+I0Eg
zr9_^z<$TVvC5hM9uXXn*s{}46aAcjFct$CDd9A=%w<<BQYtdboKQX8>^~auCF1OP(
z3qTS+nyIGTV87?2$T8gyuB;D%kIDi$Aj{J?dmc<;;##an*Xz@z*mm={DI0;l4poTx
zs6D2&ys%@2+H%Vm33GNm{UH5FNYsgjf8%-FkA&(NUOVu0Q4FXz>V{g+jiN9067QzA
zB#eiTfX<brCdCE2YDn{23s_9cts)Eqpz|7{AKfM8dY0L2=3e6#&dkE7o{IeRDtl>x
zbSQ>w>BbKo9~%)2d<8_l)Rws>ZrI-#$!u<LL#W|u?~l2|AHr+0JX%9551jJ-vl`c|
z?SSa_{Np`E)yJs>?k_miyct)i+wboP2BVpmna6I$z78XMa+8%^lRfJ?YVVWH^{Ez~
zHCl_EJLB(zXJ+TkT$UfVOd71RYVtWndH1DbTR)IO%Ctynz*o;ok|uouAuXAwcwId=
zhg_v_BTOhzT3ps_nW#0(sh>`+jmh=WxIxd>W_sWB+pUxV_DJGq-K3z9Ha>razSQY@
z8#ctzv@T5It9_jIa~{QnRzV{NGLl<-h3mFb{x<B55HJ=jeyGqQJ=~y4C0y*neQ)!<
zZL(w`>z+-aa=LpEN@GnRbi<Uv+!e}EsWrAx|5gGz&3^93Pa^Zm)u&$)cJvLY&K1-m
ze%xK2$_E5A5L46g`GA@EnvSa8*b1UCE6B{F`mknai?7i}dQ|iEB_FHUPi41Hd0TDT
z#4$Q+a611mJz@+0!k#%97AQ-76g#c*f|k3Fc=uH4<5J12`(gL3Aq~@yLz9stA?Osl
z*sk7p$p$bm#@?8%Q54W)6OH(V&i&oq^gsLh|DbMmVx-XC(}JxzL?3LnS$DGhQEN?F
zHw$((lUZ~A@%_rdO74{x>KghPPJ7#_^~~9nV#m{>YmJJ2`N}C@kA$fjK4FM5Bknmw
zkcG|8%W9)PBR@y4gJQC)`@$5}h#SXiIlc6woTs#S9<|*Q66L>_$VWnKqE3@Xa*j`S
zpNkrw46rf4YY#ECKE*p(!YDH0sezM4wG;A62HtKR#GB^R=gy3fn1h(Fk>AQRvT*ul
zgbRepHd0JOHEWG+<VCqxvM=`Pg!jL&$I3q$IShMNPH#Qv8&lHE%;blbiPB@;;*!Vb
zNPfW=t`)RC^>O}ueX!|zhQ*L>l-l>pvRXGeRY|S<_EOe8Ng*c4R>0S=eDx+{w=KOa
zcAnb1D)@?kyqWojEbaMp`tv;;QXExcR5YdqxS;Bl?DH;5WJurem2y6UVvVS1Agk`=
z?Ez2L7^(9l!7~Hx+b&8kerOP=7R8=H`<6(Jo(!{18o#(H0;}1Nqx;Z7`CZLx8L1b@
zE(~S&|8~aiKlW|2H(+o`<UJjCOkd?S;#BISm@Mh1Q?1pjeY=yZwxP<htth7uh}N2D
zkn)-3+wd>^6ERI9Bi^6J?$+&xc72Vc{B$hDe|{gz0O3KxJA!~Dw=0W2@tdW)wtnNJ
zpj$Dsh8&B-y*DC7B)D8sj{7raxK3nS@tP7T8idb12K4C{SS@d(e47g`Qm)WP^=#Cz
zoM$sFjx*Q0cl5$ECa5{*JL1AJTCi^H>tP}CIZ`8CbkDtjuJZXu?b-;3C(m$|4j!vv
z8fCa^7^#(!&;Y&28f#*w<{Fe^d0^Rc{tkb~zT_l>0}*e$@GGJ#lx<q@tPT7N0+r&r
zp0##&0!DRD6kv9rE7?8-8WVqXgfL@oQrt_LK1JNqf1<2{>dyEE#Drvn7*|CTnrT{C
z!5y-ah1vYwV%I#dG4^WAg>E~Lgy?oWMmVL#0=!%6SL`C3>gDfcFedT%&eMW^`>0hr
zog$;6^JOYTcJ=o{j{AB*!O(M<+Lb_XIFviilJi36+SnSa_hIU)au1<EmYvN+cE$bv
z4I&b93(OEQyc5BJBL(XAy@^(RdCLPdiR@a;SG>rA?{rBwkUon+sde@O1})d#L+&8I
zl_S?pCxmJ)=T{y<yPmnj?IN?kCa~##WltkoDef>#NU)`caF2Q;X1&nqA!sjTZ_(LQ
zD2iIA2}R9$TgKgB3eEhm@qpLpXf6Fw8nwv2K_lI12wo1QiuS_5P<q`Np0Ni76A<uP
ztO!_;)AmlY8};~wm9;DUDt97S?L|Xk-q{J3;P~<BZR|MER7Hz*fp)Qfc#mn7Y|GhF
zW1s-xJ+k>nEEjq%itTW?>Qa>uKkDUGC8ypjky<1y1fsUKbt?-@XSQfF#G(C7S^UE_
zrgP^6qF#pf3-L`Mu5shv*kO8*T&L@Un9hTHsPOn>3{JIV-lB#c1KrnL*BTpJ>yctt
zFK3A4(z>iFh<=@DPPq)CBar(Gt@a|;moGXWJog7=ir{e{iszT_Nr=CS0|ltk-)Ca~
z<la%waJ6x+BzGVeAEnG>+4nqW#tkZ5i44W|kNZEGwrmVh6TRk<Dn;gCZTHOb85K$9
zX3Q%Q!E4iCr{zcnoHa&r6%;YeOqD-eZZ10Dez{4FBcb}vniMw;pV+HH{hF9-G*W2!
z{x34L?43=V;bAQbPF&gfTTe9H-90bPC8#lks_B0xDi28guh$&?5sn7n*(7reP{euw
zuou2ewPnHIz}!JUWV0i|=<_f<txI{wn>2aPS3q9$u<qlu_@j_pdc;&hQ8LE&FJykZ
z#`}UY9r4ZJ=5nyI>yO0gP8%?a1;==>S|5j#r^&jB@g%R0I1W!{N$w~d66CSYW7hb%
zqIN_@QsO>99^|$POn1+b^}T=@F~>;xHk2Di*OTw!H|Fn!web70+30MSo|iafz2N~#
zEZR<Y<hk`oZ`77WSrW&Ip<JbQZsXQt&MsJ(A8@lPxaH~~=W@Ob=1^ZBdzT~J=$!9+
z%EU_ytnpcj*Vbae=mdOG3;Q}JsT>zQ=IQFgGL5U0p6I^Fi(*d(8Xm^@Mp?dmvq8qe
z5%nF)s#!^xUuGB2C+BY*ZMiA-ptvjV$;WG?<HQjltFy~*(ae}D;7+>WQJbBBC*wc|
z?vKxfe#DLmd*w6k)wL2|-$Xp1?)Yk+uE(9UjMK%xfv}?_`+S!f)~0c{fIblhRcA5B
z7Ucd1e73;k$5c^{&7q)~B^8Icl?NU_Oi6bsqvl<je1VyUTmX?um=aC<i1AIDuPLf{
z_{LTLeWG;xUQoqQ!-Lg`?>f%I{25NTOCIcQlvh>Da5v38>Gg&bUvC@mdvq0iH9Ajs
zUs|3qJ70`I=Wj8xPWF?Cz5DbT2B1P&lb<#Py_*voQa%vBPtW7crcf30qAk%1n77ME
zdbK^{JO%nuoE0(N0{Xo8H*;)r>w-$n^#@)@9=%tk)OsOFSaM63X&qF!<>VtkEWD4v
z7-U1Mm2d2_uF`KH>y>tRO2Mv8->Any@(k?sy{ru844x~zhs<F@&gm8QtCRF`<)Fvk
zR$VTbAR9tFrEUn-=~D_CUy}m{L-iU<3|yJp(ssw@!gQ@>%;nILaL><Kp6fSgBrb`T
z?%y|#AgUDd)jbE?FTy~ifQb$}@(w^qA#l3DW-T0+&<zjyv=tBggXL1S_{GfoNkN_R
zA}@PrBYyM|sP^7FdqMk@8De+2dq2uLw2EFXwTj=PE`kNLlWYzV6ASUEkiGy?`QYa}
zyMQ!}UWly~f=I(F$jM{8pSW+{!R&)UU()9Zn}*IfBBm3TPRY<LHjF6pNDJez@~t6=
zPyGlN;=QI7KPQy9dMaH)delGjLxYv;WK*7Uw;UO+icxYw%3CUA>UEj0NInOIh*_;~
zJsXoLmGNrlwfA-sV0)sWt490s@U(A)$YuFs+y|CDy_I3l`;)b)6YoE^RZitw>+Shk
zyebh2?kJ#^xfbK0lW6DczzruoZfjE=gV}+?U;C%2p;)2;|IBR8p4C~hIp54RC8KR-
z=CLAf&B_|L#;|8{*StyDRhV`GkAZ8eASQ@0a0Sv@SoTigILSX<hCChlIIBDrli2*8
zJw4shbE$*1Uq&jM&lNbyVlX#YY@1xR^^^oej<QobWwb^ilhG%&lyeuQA#?i7y2(SN
z?7Z6Lhf<A=gUx=Rk2D$ZmnYx|+v)<iY+DIHW`w)jhnv*aA3~RzOQ{+by;a*hnO-t7
z^StH8b!8lTUM37d$x%-69#FyWb5pG=*$l2yeN6c2a3AJMxh-ZA>SW>X8^ZIM@7Le$
zpV!F8Nn!K>smu(uZ%M1d6>PQIjgkiy2ObVBEiKK-<j)JU9G(1lRCeNT4dkp<ZOQ1X
zY5u~wNdEfx*HwuiEWp6G6pThCq8{CPD%l+rAop5$Y2dPDvcaOJ!LG`8Y7QUS(|o87
z;nQ;UK`24uJ3z$sQCJM@=3<+m>lP0f42-2T%IK&ln@|CqQ5s@AqBM{APIQ)~h+h-{
zbl_QlP^b|_kOQp5*k3|NS2;Zm>Z;yJkU1VYd*n`}*x3tPH7D>fDMVNueAala4R|`b
zf~r&EcOCqSQc(3c0T<+1V9`Qzrr=`FXj`1@!jeiA6*V6ynX%b<>+F|T1O<d2|3;Zb
zyTI866s~wBC%maC>p7QdE0RCUg`@hKRBs@0=>1DX%BC2h96(wud(V9BU@pAVWP~NG
zMqATcfx2gzBcL~*!=0%INy(nb%4+$cm-Ru+Mb`>GJ#^XlV;LCmTk9s|w*#W&%93@#
z=j>QD*COflFZrH-PoyLoh~BOGIDsPt;82~3^7ZDLX4G={S_a#FEsm-I{;2QY+rv!A
zg3onuTs~u5m+Qzrhx0<l*wCZA>r<m$o>P2q2rL{y2M&o2L4J632(pS6Io>xndl*T2
zgD<TKpQ$KW>JL&yf+KOa;QnDsl;-u*58Ec-gmK7e>Sr4%{&KEmVFr!Vq7q8dOBL6;
zz@7?j?tH6#T|uhak>WyMaGvj5qwjoB{%kmGeAwPjM0B;Gk2H?l$$wPb6F|jtxhN`8
z$&UirVU$)2{z05x4BL;^3>7L4Sw3InW2}@q&vyg#99oAdN5*%;*kp9%R8TJ)u!1SY
z7TZ-p8MheOqi^4P7-=%d+4qE1sER28m9`=W)Gy-#pl{vRg)7_-j&Bb40-{8xv@S3^
zQKqE_>0U3n@3UUK)qaDfWKfXro8Fwrlmgux(4-Oo7)En}7+g&+=eM3F4}<1pigEx~
zJr$8o*&|lc5xAOYD{@40%;P7K2Q22SbuyXJ*5ynu++=Qb-6WM*wwj8}Lw)1IC5k!q
zU7BOKEyfso-5<1S8|8@$<jODehxcTpG}NS-p~w&B57Zi-5e%+(8|j9efB(=ZV6*uP
zf!W^)IB*7>4$4$7Fn$ltY&mObQ`&I#aiu{5>$tPX6WxP1n_qyK+42`K%K%7=H?N0=
zrtJe?F^FW_e>ybLf5pg@Z$eZ~&p=N|_B$8^b_Ab?c%_1w7cueo$31u5FIvDenWkcT
zS&r5dH}#sd$_sR6mQ%Wf*#?NF%U!^fI8ZKNBq`Zb>XW78+2iXwC08mMUVnA2$cTz-
ziHgW$;3j%gnYtJX94GDmhitX67Rxm}b?3Bnx^34Sw4UEWooXbtQd)sOu+6<>#fdZJ
zb{c67KC$iU62u8GH&H-QTaWJ@yCo*g>v1;FThLW$emMyB<4j1o54dvOB>hU1C5nFA
z07${CXA?n(0^VqP!$C@4(WMG`j_M!B?2dX`i<2)3rK&V~v#-TmtGICP#FHKR8BC3H
zN8X1Jx!v@(J=eq3a#f>HlP`nVop&%MUSz0oNWwbrf>o-cK%}hSnozrBID!#!CCA^?
zHg10g<Mwc@mYO}ScH*%)-=UxCQ!ZAI_^&?PrBCke8}kq!Qidyo76=2d8lWYAn8mmO
zr58%LqINy{QoskffVLDgnOnrz-1i~?>^5;&q<{GG=;5p%Xe{FgwjEX-9|G8G?!Y4C
zv#KimM|!TC3_H0M`-j|j8*qQ06mDi`9CdD}f>gSjb)Em~yZSFO*RTGbBWc<jaDdA`
zeFfXi+yR|=SHr7C*OmMTde}}dTK6O#FK_-uCMX@C768o3zi<-Dz>EO=Sr6O}rt<*r
z4dU{_E)*CS9RSA%0ORHTW#<c}!0h;%`7MC10ww+LB(<eNe_n9i`O>dOl+=W_tuq7C
zPm0t49GePkU^PN`<yKaq*N!;6;(|E&1Z=L}G&s5M$aLHDB-V8M)$-2e7A|fAO%P1o
zZ$(*94bX?^=PLTQ#3)RML3MQ9<xZ<|yIbpYY#5L4v)d?D+S+%6%^5d6Hsfe_W~4={
zuPB|Pr2KGZ(~`TM)<ta_12l&#>LUlY^*wy%ug9^OIXr5NeN}gOwL`$bUj4yM?u+LX
zr)FH#jH0uqO?h)XB<8a-KqQJmSV0Wm%D3267!yu@;$+yB@cHJ++h#b;z;l7&jeZN;
z<t8b6RX%=u4)Tp>-C5b_J4i3wEesgdyrMe4WPwq38;UrltLqX-E_N@XiV^EsKcePS
zkC|`jXrK?XZJo!7;)@}sNR1A8+s=_WaJ!9AJ&Gl4&01MWy|;ULc3bLB%me4m*t(Xi
zyX249PNl1_k+w+AgQ%2wx3d{>*W@BkB&l4Xbg>l{3YPBH1`5S{F;53LJ7U>dDzSsf
z{3#CH87C;8LMEZ1=GFsfb8c#~VI2a9Wp)iwt~U^l)NfjXu2i>S?s^`#T4@sOUw0H-
zNzu|zWWFKA+QI$i^LQ_18gz)IDLw?@I1+`Zt!O6GTJvNdD))a0a=T&XuDD#?Rk^Z@
zRY@-DCue)6!p7x*)%2%ZgS{k3K-4gS$gZwaQ!WtxJ@44Pdf%ry0&SW89xcs{LWi$E
z{%B0uPze6m|BRhXXxloi3w*%>cO`!1=tI(NYY>*c{d^3%cX@aJ3B{LBc}cy}0Y;CU
z&J_xv=V0P<*q=n)1#f+Kp?9G}zs9W-5C-9tv%8mBa6Ba#z}!c?si}3cRy3Zjt>7b-
zkf2PIqq?nF34aU=>GPY|(s);V47|n-Jg2dinu#3Ip74f+XW(Ni3LRXY+oPpAe5m;i
z@}4#kQEb_=YyP+%wqQh0*#U}bQ^v=&D9N>t#1UwwPG}ib8<W3h@r;%YpJeuA4tLBY
zKE}rIKO3ZGBl3yqZN86~2azjZ*Foj`l&jBo1Z;-Z1Szdpik5rKSX|E$Fp-v#a)|36
zeA&#;+4X@Y*UDWzMLpNQt$sX63{iSM251#1IRq<#9X*4UF9IT;8W6v!Hl91Ot$bZv
zS0^&&H5zXTL@zGd=)Fz1dt4S1d7;O|;Og+ZO{N`0U44jq8~rRq9-rIBVNn2#eCiXk
zwT7Ci7aAUCj=2)E(?jvD5}S|0lP?HC+V*O{Kzp;;QOuzzKXh!%)NymUGOc0H_=t-@
zHywl3Z4dalM8PMdAIL5aCG^8<A%QD<l4~r%KFg$Tnu+Fab%wJ;t$v}5JojhFb&X!P
zhml=8$GtLT_Rcbn=lTJYXVYAYZps!=x$RIE!8hYfC8MAW{u-;2As%wBAXc9s`OeW#
z9+&3W_|o(2I#f8in)wu{Nu&~Q05_=X0`*{0g#7`{;y@n8spl-VE;ESZPPn{v$h<0t
zjZ*=~JG)f10aADhp|1%X2jzyif02=ls7q7_JMIZ$Rga`Us!0?oeVQ5w=Vpj@X0jvY
zetxUte!J;3sLrVa&IwTiBA!<SZU}#@AZoKK$XHhAI*Yw=L(E6E9QMHqZ4LI-Hxx$p
z8<oA$sj+4To^H)QTC+E_Th^}tQ=n6he;?Wy!?K15^8?ZFBE!-P%!6Y0$|}Tn*I2Gg
zGyRa@P2+l)V0+<{r8Gw*HN|7xB$gegKHU<CW%rN2i@MVrq}`FQ-NfVf<gn(dMdZVU
zXV2&b4wFymPj3psE)o{9u1w;jz~RbV5D+RDr`Zy^8Z~&J<4{-C=$Wi7^UA|MrV_%M
ze3jLURUtedLW!X(fso@<YqQCAt_3xBTjW<17HzBZeUqG*ct88K_jXuNQN*jvo;M5^
z#@;_#zhUsSyxDcRR9t4$-&gr5kXt>$(PC`jI5xdV8{seICwSj)+lRe3W%(@|_T23r
zDVF-@k@H@#p6zFU(@gT@sikdoT9_D=k+2M6fwKA=@K+(fqQgB)dR!>?S7TGk&r7CK
zP;~hS-{ZQ3T?v@RG1hlMd6DqlX}f$(wszB)B&-I$aVW_h_}uqe%=H11x(gRBh_bX3
z*in~2NPT#OjWDoQym|vN=As(p%Z=K^2XjxQ%DNv*hI%G*AcJV#Evi#2wpzu063N`*
zPJS?^Yz7dQU|~>M{6`3^0s<L5!0CMOXLO9s*}8uAI3uM*izB-w!+HD}O^e5qbvgvu
z#W&@29Doje*axf%<O-mYOx+m(@Gvxe8p?3Dsx=zNbDGCJxn%s(9%#<=emH7oTcF;j
z?rZX2*TY0&#i3(B$4hTE6-Lhx1qY&;h%fV*zMN7zJ$os~0x1^4OkpP*(rK(e7Uc@r
zTT9zPwTU<*z!h^9s3yyzw71eExhHCa)Q2C7i_zOTsy5EPs`z~A(n4&_zV*iAasiV8
zUHX>ifm?O;wvz+;i*n^ykoBk<(^293f66TSD{tPq9vi-6IKljr=n3960J~d{xp=H7
zriWq$$Vm$i{=F^#XFpTZQF56WUwUz~Re^Aqw&N#J5kS6BDsA-u9H{^er;2Mp$7=s?
zCRNZM#R<+OcyMp=0MLQ~MQqCI>Ynfk*Ds9#Js6{sf;n?aFjW=M&x!wf`UQNz6C(`9
zUE-QwNvWOgfCZmw1)`Ww6ib^Ms(76TxL1h#-RdYebniR}dze|GLiaDM4CzCY<3r#r
zOwZl1w{eg#Z8*D4Rr7caHpfPr`K_2oMYMhG{Ko6@Y|)#ZOFxN9<Iz<3U=Ik7mpk?r
z^oz+z;JWrS)-kit#!;i{IUu-DyLd3*Rwwj?Hi=&0?I~g#kc!)x?s!ms@M%s8X!L3|
z2>xB#{I~S^&(Cx(5$+>1@m<+9r{V0~Hk@YO*%2A{_|0t+>J<8?`7IVN-ZcD4B(|~9
ztLMWHiUaD2;Q8uq1VzhT#K-JCvvC)#{ChHZIfID{vof<R=TrUS7Trg<?#=QL6D3iZ
z#G}z|6!?1ZBk%?2_!5UFy}!@HF(|Lg)dYtC^_c@cs{}W1m9KiZ>ZBw#rHR*&T}QgN
z%V8UXa$udmLgE0NZi=SBHz=whbGr-C_wXeSZ?)cPr$3f?i`V!X$Y%Q0lD(1jCs8q2
za`r2KN${nrm=AhOB`BH<;1v8tnfTuT(Q^St9JIP{!9~8j$$ECNcT|U_gpFT=Riqtx
zizy%qBdn{Rm~5S)G=4PSmt6^c3oFuwAx~}P?HLlGeg3i1f`R;le(szhq9AlheK7dJ
zkfkN&K$+V=ey?uywN81Aqm-b6%;#s6MMjjPq0mTu+ls!vI1w}3lAxz^QwXu=<pfb1
zf*&FsG3DfDH9UG8B-wRlT@jgM(dY2cfi;Tt5xYKp01-HSTTdusEGG&Vm#?L(>*eQZ
zvb6UGphVp#Hn-SMy3m@l?3~7+sBa4p{!j{hMG1Ek%knO&2VS1c$?)}48C`pd6PvEp
zk-UqXj;S4TSG_YTSewc!?hO>m2Ci0j-4XQ5c-SROre2)V4-Wi_9uzJYF-blZysG^G
z05N(0Gb8BFBkKPnA)Y@AaF_0u{8P1^-IBkJi@z#SU(|AU7aO(GW>Bp3I9w<admF~?
zvMnvYUakl9k&*dMjp~SZ>+@<WFH3d47^!*a$5lvL5MXrtZ`fr0P2T#i&s_BXp-Q(G
z_&-qk{-PXSH2OPQs|a<pn_Iud(msEdQ16BhO(7*8H)^Ud=LOp({jSTZ=g+OjkQ?j2
zc>jcm^gR<t>eSdUQ+E~L5XFJ`U@%ZNugl8itB{}gll$P`S>*rdqDJ`-j$>_N_!#gv
zpigPm2n8n0PtSSNh5}#s@1x~?*xcOwC#<3tOU5^inGb#nK6Y_=r$QN7QAGAt8Zw^0
z<YiTiQ^Hh9JIJ`+kN)FfUpo6c5&mz4_<#ER52oX`j(^T<-#cTae@~M)g9-tv2cCdP
zC|nD;`H)(sgs;JVFrV0-9Rma~6`3!bCNF{PL0pc|3Y7D|kh_`a{^|YWFIU+2v-8D&
zaf9<H-}ti-)PS`cv+?Jq&8+~b#|?vV*iJgIZLCj9>B1+GU)d{sk_s^;>#ukVaMdHk
z9JE930lM?>*Ghnd)qnDvro)`+B5k>F+*55J3_EfZX9p{uZ<D<O=c%u*dY5=ni!nv|
zNq(Gttj5q0!xQC<oSJ-DHtW(P;YD)=kSt;FClP4@uw9SLaDZnWroNknu4HH0;Z>}}
zhbP27bTP>>w7FI8pOt^W^8vC#t<}b+_zd8I45a`6Lt^XCSNfmdQ`h|^xCma}2bJsD
z2BiXeWnAH*kpw<qE^dzb^XmU^ya$9K>1^-%@NB7Ye9rtyBtWuWkJIM+OK0cFmeqyK
zzHat-EQL@#uFmzC^1zUHR39~Llr(_?${3~aEu*YF2O)lrC=WEc@Wv)C%5i)T2s0-C
zYr_oq5D+!c;Vx11t&qP0st!;FJ+55THT@X=L~e$@Tn*tL*3+-2#V$OG2xj3~uygGv
z1_mbi1w4XrMJG9%f<R~0=Z9i9igzFW<p0`v^e)AC{cDdrN~sUnkPBVOv>UKWy<JAH
zRqD5%bJXbIy1?bjHi&tdJ^3SPrRaV?e8mA&+qz2O#p5@T3(Ej?!BldW2TQq(aU_7|
zgwT;;IvRbKW5UilJ2C0@9}^8*78lb7+<Jc>X4sVe%VEafe(QfN+(-pKl$@{c&Zccq
z&;A&`XLIBtH+K(U(#a~R>?KaAb>^q+)SHcvP#vm}k=$i+z`uw8OVNx1TgbQ5)kf*q
zk#P%AWC)rD-*R#gzQnCJYltq+u$Dg6dbic&SPb<Ya1(QSP}S-Se!|7`<{fe9BSSaI
zeY}5|&M#_maBhNJ#Zrm@Q>KOM)2FGQiUtb3^_}mYmh5+ks`UW2Lz}Qit5~Rz#<Z*7
z6)kAOiGgc!#)ke$Wc-?N!`-fih<Bs-=x~B<Ps+)_sSK#+q7bgq4ignmU>Jh^IE8&O
zRT5~^QA2yW+RijUmm`9nIRT0-R1OFn7V!;0y<UA>CIl@XF@3_nor|vy)CcyphY6n+
z^j=1$S^}bN&4fqs7;W*NM4O90iKMJXW?PG}a<!oU-q-(M&zFc9PLC95XQco|R2c3q
zO)B1h@N0nm8WchcyLMB#NqP6c^rm(#rUVAjH}%o^9+ri7VZV(BMSE<<cR#Fg=!>iq
z8c_=>4YUNPeoYS;xWcZ{^%^a<n*6|RO~>^Q0*ci>upeYVzVa6f7MqK|w_t(msHuyH
zehA&)_cr>4<NeVycDneFzAFv8_u`tOgbRLC88+<&WGQfIBeT_nUTf2zME9nR(Z&!u
zti79!5b~ghZuZn1=@kC9Ct$cf;^dlVfxv``&NEA`1{0F!2;a9xsY5=!@(C)Vr#*nJ
zCr(mtu9*;9u?Y|&TLDJ5O?Isxp51zcz!acEY`5Da3Z(77Uj{<9f?W^Jt~Y1nM2<5^
z`ewaNd$a$r47?fuom@V4_XA^$w`9oUKYoYD{&5+s^B7->_&&D}Lza#?Q-d5vZ7=_E
z*-xU-EvzYQv=->GST)W2!mK=$(<bw$r2x}|dK_H=NM^V-0<9Qut@mO<X=)IcKQGQD
zuoUiD15y^#Uceq~l2pvF5>c4(KP}bx<C%C-g|_~`_4J(R%|#P*y4F1L(M{Qeq*m``
zQp83S4h-1o1l|SgbToI?Jm)Ca<@RKYakMCOW(9O4?Xq&FSB`Dp(|{$xaUteW8Jf$6
zmtYFJ9D6o;w=9>bpt8^jX4~^EY)<)v&!?AT-(8l-<X}<lxomaYKXLAA;w#T|zarY2
z!)qbK#O`!o2LF55{r6S*>stMXtI`&I^(RpVAeZ1^*!`0T>uKvUz2UT5kMcxE0`tKt
z01<b}B1M8=d&?J3u%*LoyyN9<J<lZ*zRhOK;a#Fhr3?FK0oRn0Kh8*iGxLAXWB<2K
z^Z&$d%k~fVyyucH6&+C5dTr}H!}r?+A<9XsCzFU5vi9}$NrrV3!`)5>)q*PM1wlGi
z4?PQFP+raNebE1i1uy&=EG~GlT|@VS7{GS-=m;-FEk1hx{#~`>OU-3({1um9X*IWE
z*B@cv4cs4L;&PjB5C5W12rbefRF!tD(XI4NA3H%w*X)5H<3Dow{!@YGUu`b@U*E-e
z-}(J9V0+-QfKzGUBfL!bO~~j+Y4CUz5Cf2`gO6$Ae-dfP`t4=mx9u%N5PUe-+;0^(
z)|HnxGfIU55|S-LGVbAB#%Qu!m7)o^-qZb@`mf7x;Xs|?x7I~EU3|9pPJAfnyaX=-
zlLBL&b+FQAj=hu{$<Gni_`vIuMkuQvyKmVG;KwB=1^}iPGZ3Q6>{ZE5^Ysa+psUoY
z#5*Kr$If3tqD_aFzrLgpj^Md<VV&JXj}`w4`~U!WX_mXNcL<|b2Z}K3`mdMG%xoFO
zQVs6<y>0&ZL)^c6pZMb=Aus4D-bQYh2}`*h;9Ka9)q;+`9tvC=nuXQK49jC`k}QY$
zo(G?uLd`4E<2Ka4T|25jc*2KIf?JY@V7;!#+y;s?X7AzLn6Ie9PFo@ZXc3}<f~AW|
z^{1kzzN`;7HN9p?5L_d=Vd5y1Q_A%;Wak96yjO?>E+h6fc9+*U#YDa}Nt*2|#q*Y*
zx2vpnV<);5M;4#IN)fjIaYLk=f?rL2!ZL0x(E0h-!aX3keR~I$Inr+Yw(FYcU|prF
z@LW!Jp}(i3poZYp+eP&>d%;BH#%=Y;z8-22Gfp6U?ep7$MT>bWm$7Xsr+_xKc6!n4
zeb{z;IF0l<p0Hc+q&5!1vNCOwi7hcCXiTDw9n-lOW6HZNNEng0aH;H7Iv?${L^ILm
z@WbJtl=bH5z>zQlZ*xtGQ*e;r-g*M7w7d+BaY{HDeLr8hldkFH-HpO;04#cdF2WbJ
ziDFFVdzXkYoNgt~^|8EaPl2?Qyo^1AMR?tQ$uj0DU&>}Z({X(Vz+i<0plpg^lszM1
zNX+HIbH?Qb@y7KN_YPA#bSkBUnVt`DOt)tP<9W|`B|FrX*c(i-lf{AJ`rzX(yBp@N
za8Hi;lR!g4*sNTI=4U#9?~511=%Wm81EwY6DBJK|Sz7reg>-QXPThsM1<F|C$io58
z<Z#xT-wIw;Bz>FUJ(xJ63xm~gZgxuo2*yPyU0XC~EaGLSdoyD?*G9F-MnTWHn?h`!
zaQJx2k7_K?^EI#s2?9J1!Dmu##~C%bf>XKCjmXV-n9E3XDu~Nx<Yo4GD0{~`JD$0d
zR&DV_vy6TBkYC}=k5$93&r_aoB)wZFn)()kvO~AGOv~&j`K=h&WfzQi(fFB)u8FeI
zT)g&@|0Pr8qv+H*3f-%s-#qDY4oJ%O1B|dx9L^kd8Ldrw{h;f5ozwt_wC=q_YBpov
z?lP5mMvirk!08z<E%-6`1wc}SJak#HCE2;Qw8K&=+Q&Y-a6A8@DqrFuZ&7FQfmx@Y
z`%Rxi2B_2c$ttV{)+-thY(rJ67irRDu_L{8IIk|AWk^y#CukNa!8Af#J=Qwb29(Z%
z!(juZm?T15%>iCCuD%pN-$X1y7XEzQ4F2^U0hCb%uPl|T3+I+<?)se@?$+kHuPaTl
z>)2x3lg)XcD2u)8&$q(sj9Re7H0w3Bx*bWGzL@Xv5*%e7W0iYN;YZ^%*9OISVjtra
zkf&V;YJccyr;T{x`n|C-cL(R1XQ=8f6242G*HZYO1V$YO>czBiVMeh~oWN9zQh{rR
zmC0q@d=J!i9l}24&75u9SDjCHSJsu^$4w@emWnEOH-d6tY*1-{lcN_5p!V^CDPjgj
z^--eU3|3ZFnrx}UDd|R4#*RPIRnBYiF&+9KfUC}K2d(cv(gh;n1<EOp9LygDZyLt>
zuzg5W%q>Zc4Zrb7G5E>tn3Y;DL``c-S~p@Iyi(Y)=XdQp($dP!aGvGirD4`A;|p(u
zZ(-SN(nUVyz3{Tlm$evZef;|5@eeE-g3p2QU~gjru=-!h3&(tdn2qZvWtyuZcfSVB
zKT{!2iMndie$(K5F_|eJR)sLZj5v3)D7so7d`#8if}-oDPZv8bVjnV!r;T(>q712u
zP-ElKy-Op7Y}M1V4M!jil*pVNwJpIBXJq5cYK%-#mZi=fP$!jk2J({YceB$Yd)F*#
z5Y9j~7Gi?&L+z;H%aqwL{{B<rTH~<l6D1z&(79fq1-_l6alTHCa>&GiyGmXC5zj=L
zWX7ZBs%pwTajb?vr9qV^aP?4ZeEf1E+Td=2Rc(&@@!l<mOq*Y7;L0Ob>1q!ZE#g}O
zs@TGw15OAGS<&d^giKm}rz|(_SjV8}C0OoW_{M3ez3-zhdDxfohNquJlQEoFdmKAv
z6WYc;R#ET;q7wpe4_oz3*z&wV^6jhX0wP|A!TfX7mxwrmY%OkCyYRpbkDTt|;eetF
zr_-7T9O<Qlx#A`lBO_|u+FC_es<#zc>=uJ>UcFd%2ZgNA8y*jdhqqn8)UQiM?VV^T
zOJXial4?I5rIv};57Ecot@M<Nd>NthO1NVuOWw2}OoR9;hyXzHT{EDVJ!+u_O)?s7
zZuTs?160Mo3;*il!y)mFu!rT;=+rNHfJ6&l_&rQ~fog=%4b58N*H7J_1yGPi<b(b8
z>j#1nGSCiieO>d4lCR{f9l9{pUHa(WTdaciq%22X-Nz3zb#kBZmq4jkEccORzIzV%
z0{95sd>Z=l7le!Zc|A6a^bPNkh%@^e!CtST4I(L-r-j}>xDjP|3f+Oc5&gFQ#A*pA
zC#VlL_zv$_o)H4nB_Na)Jy<d5#Nlv(jY|^D$t9p5H#v36NP)TcM|MugVUK9g5>wn_
z8KT8_EnNmc_DL>x@fHuJ1myU<r@4|d>@Y;(mY=9NDHQf{;hV+0P`(^cL27fy7c0iQ
zQat!e$;{+5VNWArPlXlih|UE*Zrupf-w3=<LHC!^qUUq23E+=71SnZ55ARtnY1DFQ
z)Dr#gym9fCd&pQFF5dOoJoY}h@Pl$qyF&gOrK5dOWXb}i4;~mYx!xsn0s!_yDxhFq
z!2tU%SkuA-kQWPL0jkdv*!L>)CIl|n6Sngz`;4kw@*J$cI2Shk4s<4_FCG;+3Q#Q4
zZ`oL`c>Z8s1-7pTo!#7-C>?^(0=rlMpG>U-0i2SRub_st+9gB&UxQ3^-FEI8L3?v|
z_9u~@MCnhW0=E*ZNSiD^7rfC19nMBeZzIken*eZ2<oDp#zk}lazAx!-uPI+aCu|Qr
zK#Z_m>j?l1%iJlzaQ;$30FDwLJ!eWt)UX=aQv-bCR#9?<o<)G@skPMP`EvPC4GQ{6
zG}D?7+v4}8`>-wZ`&{66VzdD6^Wr~xDE}Eu^g%i8KKw}}J53lYj0A|&OKs!t9|I?a
z3>dEeX$i=`t^0qvaL{v5T-!~E9464)doR%NNOC&0uFg=Vlt556QvTF_+AG>VB>m3h
zddY`Z`B#Q&3_X!b_VB8mWYY+Y96AhRx$cHJE(aU9Da^*rthT69z0Mk3_#jldm^5Qr
zZ~U!M;u@*p%=(qP@0zt_Pi>#Lly?FF@74byDfW+G@ZW~czxqr*|C=E3EzeWBsX}<$
z5bPzLY9~<Kpa+0s>)wBLZ~vL~$!sRLvXX1EEdUYNUn({*uX%#Nuk7ht1r9IzV%#4S
z0z|A2y3s$0Qip9`EDsi_<x~^nMXJxTMApxYO?Rdbi`leZQt~EA?h#;@1B*vEVIq#x
zXdr=_{;x@(;6n(Eu-@LD16KZfBIGK@+J|<td)VYEXPAIB*OKv?3roti2JS9KZ&1tA
zdH@#GBb=uMvM17Ugwvw@`l?1}bOf9rVB1q?ZpzS;t0ugrMaffP#?eE+s?OQFSSmEK
zT<N*XUNp0P6*>y&yFwV2F(m{;Six5P#*i4pAY`O&=|{Twv+n&n9(G^_!V$?eG%Y^F
zhn-WMZVrr&4+h{QCfh;J<HhT^bcbMby>#^a@-=}9v&{b3PfnY#r3V^VQJXqya`ZQM
zR^Vr~sk-%RKC&@Hv8=;7M_$ckh2}x-pvW$B_C<X4FZ+aj<uug0nW%DJjaBS@OQ;d!
zG*QC#WS{@R>S-`R5EEc~9&@S@VmVge@VY~OL$q+w`++JS&1*Kz8~jdWV<~&@uMH8v
zE+8^RHzfRi+FJLV)f{VWmYd~#n7XsIp|X2!X>PhS<q%^qfJ$#L&<)vEiT6cUfPvn(
zi?B|E@!A&H+*W>icQ$IJuCzwow(OCojKp%X9qDVefXr+h;||*V<5ceCc<0gG1kNEB
zar{K;{L6l^avopO0=B4xF4Ey3XNc7M!Z6zo^mTnR=RMac7kAfaGJTbPLwn~JB1DBm
z#4pW?w1(0XoC331ei2-KGP%al*2ulq$>WQIX%=z%UyUEWEk4&Tq$IjeZEdRE_PWdA
zIc?X=hO%d(ujQSHvTnj-5UWezj|e=qpDjkJ8L*v5*#Vt{IUIy!lgbW_&_m?L3dRpS
z_hQS21t!MBBANzbKUNd@32*3Y=?E}~vuBGyKiAj!g6r!02H;L{i&UYinjORn9lcr{
zDBIk+v(NE<((`l6w`Ab&c<+|=kqQxU9K+KT-LUk~&%k+tm=<HEdn`DIm&(iTjV}z=
zPt%9_uD{YSCMD%KU%Ns#Q;iP9IIPpk_1RqfQdl=7lj>KWa8Ti3?4%x(7|&yPShX!&
z$kwM#qQj*Ccu#0)bn9Um-vb!Z@iv|oE<N6M5yP&HDK3FHR#zuLjLXa=GM_$id=~q-
zP5(Tv(1V+yH%S$JzO`PjcxTbtn{a3v3$ue0VRm`~vZGJ*F8Sw7?HH#QB#P=7b$Pr3
zDgsX<K2LInxqSk=!>MNcS<vDW5T5)6t3`_~1DDqt;tK_C;UxRu46SqQZ%%J8ol#k_
zbR)vD02i4REP@2HJqq^@oQm9t74SE!k>J10C0%xPP+=^Se_Y#Fh#|Wp8?c!I95X7J
z0d7ZL)pQ?>3H+58K>^++F9s}DuYa(GILnKNhaD>^bTt{U?$$IryFV~_)br>inKLz)
z)_F>y@+W7AP7tXZg7H`~=5orrME~rr)Uml)T|pnswR8s!imSJ!M6bnE-WcH&e#fpA
zK&sIL<e;ADw9e&dAaet|Bu5PAU_ADY%MAy~v@gj$tlS8F)<4?P9-B)WN-0efw495<
zc3cKTjd)SMv2b&1=a0RK(cKMt{9@s*`;Sx%EWdU=*k5N`YZ6!09X|<VJ03Qf5}j8_
zxgvHgXW&vqKq(&yE9kc9I`H2xLesm|Fd*=7&88mqh~?8ne_37>XinX}9w{fX`I<hN
zX65M%7O^A`rQ5@g{SP|Jbzne(F&}7#nOzN!+w91uJ)Ruhg<ogF-W%_`s%*1*M0;e#
z*&d%XG_p+6Hf|fgd=9bdxQye@?yjp_x-y;<r6peIDPWz?-cqyy;TtS6PQR3Tezv1)
z?*JmOvV7j3?oN0YlzL3uTL-vE>uVw(#y{;f3nlfsuIzZzQ87{H`A?!A&)L%zfOtDu
z?-^;s5pLX26VpT9|D3xqmTIv^o0>0$w%_3HM%K-nd{k;K4g4d~;FX*uR*V{D*Xr6#
zXl8tOgQVEFPU6JwgwvwLH-jqqs`Jg6)+u$B#81pF<0&z|Sv>JTWE5tHdh)QMBF$d1
z3-M-cS0&4LbN%U&oc-yJ4Jq;zEr@@oe0gVUEr_vm?MOdZPkQl$DzC-Tmr}$o&cF^(
zu&5Zc*6e@awtaS`<LKs9ZOiPp|B!VA?EoPm*;Y1v3#iy1kgsg2sh*f*`t~uczd<MN
zBPqul1zjVrxRlRZV2T#v9Ej-@RCWt(ps_X#9ToGGpc#`^JXcwyPwyV})Dy%_+Sdp-
zEYxMz0ZZjUY5@h|yGY3{Nw%$}8>1~+YnIqyxzDo{ABx{bJ00ct>tb-Vi!V0didO+^
z18p^?r%{$9z%*`5^RK2pO0W(v*rVL3(_#=h2(BeN+6wrb21Y{@id@aLcoQ!4jKkTG
zx5yq*vA(`_PH$mkHy`Q`_#{_Og<`dx4g5zs61oZ?8sj|OjPLj@k~MGCH7b!3cN=6k
zpIUBo*vhd5|3=-2JWEr9auR;avfl4h0bUBViq$R|gmgKyZ(51#O)bd8Z>diLH(1P|
zetFewe{L8XX>%p%mesfTHQ=K1k2ajF(IK_0nmFO4Fb>zRt8I*ODa(IyH^1AtC&!WK
zbp<guhv_$sy#eeqH?9j)jA$2phW^Px)Z9_|yK!^)JMS(zk{j*)a`qfY-F)9Xo={do
zJHb^sgrT-e6N+l<)aWT?UQCBwnpxu!O+qKJg!sDM(CbeYT6z-r8ze8PPOAlOKPokP
zbLm{pqf1#jwsXRB!kmL#I$VEwNu8{ip_qZF(XYABBK%zDxh%{%*ZIA~{X^$NSHh?|
z;C=UDV{~H@pA!rC@2L*t4wPcm*wu)ig(qnwnWCevN0EG`6O0$EYVm~m<zgglCu_C<
zDzmuqw!bZL-2lhE!6&nnWTe68PT-%wvv|PJ8mJ|oD<fQS0dcs?q)u>c-*~)<BxJ1z
z)x=(0mhNbGw9!<3Idfd_+6r$B9D^HBuFGa8DEl+6qm!^Vpkswv6Z;SiyQe{UZW)iX
z7_4dn-`!rxdm3gGrJ~-=z02l4XnC-czE@VB-_nxup#J6M|A)Qz4r^*n_l7}4RJt^g
z5|j=C(xnDent+H3NR85qfOM2VC<4+u2q>X<2py$GdK2j#DWOS|1QY@(`(5riGvDml
z`^=d+@0s_z-tU_~FvRQ0%32TWS@(0__pfO2Bn0PPi>{Sq2qalc`p787B76c|CUaVa
zea0@|>YWLZALql38dG>J^1W_p+=p=+x!5(-G^8FJ7z8h#b>K<-Rm%R;r+Xx1iFzat
zC{ysje0>61Wv}+Hnnvk54*0?q%tbA2Zr;3kwR);W?<flET7kDj%cK<eDxeI8J3C!V
zRekvKiUtN;EMu(ILq0{?-&1uJ4{&dex6vX^rpyBL@qihApC{h)`m?nS3G)+)E}xqp
z`;SQc4AqlRU>YYJvZ~(HT*KDKKf`yUNBr%sRcziNBl%z!@JEDJ<MfFmFOeVTm%`<J
z!o%K44@co|I~9KEh1Q72c06n_&O2C-a}pFHQ(oWvmT{UptUvArq+qkhZe}OGU)JT@
z7QO={tLOHV5kJqusTHPDH6Dh5m6)17uAUR+NxyW-BifZm)Lne4!gyQWB@c;H{w53;
zV{0ign$Jp~^ZiUs>ZX|S{Pm-0hI!+TX<7GjdOlf&Dg)M=tULh36}YX0ultrvy6N@&
zNlN%!08m=t%3L~K;U@+=X?K7QgzT4oY*G-Tm@<KA(8qU7d=KCzvoprf;rPf5S;C9<
zUr2JhFU=IcV`9`dPG(wf1N8I~TtKHg!0tLPtM%@0F&~q?meyVq`f3tDX+8VQ?-E)>
zP1zr0zUSg<ivKqMGwKG{gJPBWeeF>2b_g|8dfIA~4`Z8GGGkW%NyN+1U~knRQQIp~
zOgd-LIzqd6@!W2u>LL8~G3YYP2F&Ek(klz6uZMtqjk>SwHAGYb6v0~^v<-2o8`FJv
z#Gjq-;$qYK$iGqa)QrFmSYM%iV(|tmpt*4%BvUAw!~BZv+l0rT02-lLtF%mwi6#m=
zcf9(04qM5bXUZDi^_ensOfSk|H!-HGcjWn_y~L=o4?7cQeXVrh_w0}P3Y=>E9~fwj
zsTu1xd}MQxk>jR4)PD*$1Wqe}5im>j1ByR<IGnb6+lw()e96^?sd1&2JnMm2nSM4g
zA<5|I;0p)v+2&RCJWOx<=~4thO%f3`=Qf{^3XE@-Z!%7qzq&w59^x+V1_4^h?Q(6L
zJ2F81uZ0zJ4sykbVQX64F`+PUnD)r}x-{kM<I)og7p({Q{^K`UZ;es|1|XEm*YS7J
zEV0=mZG0BK3@TCGF9+tk^G&;43YuJaQc*9?5u!fGXfTLA?|L_HY#!?IV=5AViq*&I
zV)L{JiHdd@1=RIb3yemm-_S$NW*C1T&S0-RvZ3RVfl5xx*N~dnb6O_{!U=le$olT-
z4iFY{fVXM^%CG%(Yty;t(^*xT_^OjL%UcH;ZzWSm7007xRQvf%<Zf+ly<<8V#)KTD
zV!Uf{99Y@a&CbD~+?^{uc?BmCY%Z@gMNQZ;y{}iiiB70t5BC014vTfe2N2$Q`ctef
zHey_kEws%IJE&oI{RAFr$@u7A(GfvK$Y$SqZ*(>PYVCXFy-fi#92u&&7qI9RobW_a
zwZ)bD!n;nl6*B@7@*eklxhc9;8S_eT#A~ptrG2uCXF4l$z8v5`*2RS?i#pB41@n&9
zyPo&$)C$=%k{A78Kh<bC@m}ryy;97j0a>z%*c+<KwX4H)JDiN@)I1aind*Ct5+PBz
zw{Wg0|CrUw@ci(>`lHnLooFv<jM(l;xhh8XC<jvn<jQPYfN4&sFKSpVT(chCvS<k@
zOisEU1M}RtnFk+=?<uyDxbDcLeTi`-et5*#Cie28!&?pZC$*C7<Yb9jN+gJ=rK44>
z4?{OXYX!n67HIq4$r|G}Hjm>-H+4vs*;m*-Ie{(-r;@8ICw!0VM$$O8iDMtG>Ye|p
zTl-*I%3K}TSvhwd%Z#b9E@<|!hRa?4fe3`sW8fcwz^jW>z=m{Qsqx*^MSnk~-VcY>
zistco1)&@)Fv%ur41q3JY~S4~+++_F(4-yD1mwZz?Ge}4we~jE66Ls`^nL9V8tCK-
zrcq_5e>gpethUJUEBT39rvB!NAC{J?4}4K>+|vAlQvhxHu9@CDHt0D8iP40?^*<v^
zf4<V5Cln*s7L@sb+Enw;O}WCfE?R-8`ICc%W2r+Bcw#@m{|ng9SJ3SV``<ilAmD&W
z3;t<5D^AgOzgwBPElGE>?u*=d!-v;_n430i$!`yb&eLYwBR-ccog<E0aACPAdabd|
zeiYHDQ6tm(v1iWC*G4a&4;X0}yM+mh?`IVn=3drpudZjj(4vOB?K`luf@p`#Wpu-J
z&=u?3CcX_Faz1k!4#nbiEVLU!@gkLvn4S-)KCjpiyE<b8zP*z$j`vy7eb{4y?8H&-
z%R)vf_9_w1EhQ`iVTV46i&g?Ljn7<|A}+7d2yY@|0;Dj`)4h7Hz!Xs05;KQHN0!>1
zxrK)H^ThsfI`}o4_NN>Z`D)EU@Q35;h_iSGOh7%(7-~9_jYXi$W%hN(ziZhb8=M}!
zZ)VYNE_V+h)s2r!RTNcOm3nM9#|&Zp;RK&c1hf_?x6st`(;b5-@)oK^xA-r;&g)FL
z=I86R)7gleox|HbL=Ye~4s#_yRM7md$2-zk;54J6Kj>YLGT%7UWM{VZl@CNID1YSW
zB)_#LO33$tkxRQqz$k$ngqEH-oCU^I?8kbx8g*iPlQfzq3<Abo$~=z_ZkZ_4pdP;s
zd>`%n=rd&%=%qg7Z2${=6|(1qLa_UW-@--#@>}i7m!`y|MEI68gC*TwxY^USPJa5-
z3-xP=kLo<r$NIb*=qS&wqL5v!rbK2$dt8)J^)}EffLv2=suTB5sHYU}vre4zj*6)X
z%uJ6bpTn%W4K`JQT&6iz_c^ez6-205iybQLo#zfoGjaHAnnZ11JNKPfUfb*6Zi%VB
z57b0_b`7>*ClmlKc)&L`19v3dH{~0PSlB~9znSk}>vpWw-zQFe)9lKK{ybmcV;@P9
zWg_IvBQD``x_P9IEh@#^5VD}4RV1DErsLs!zq3EzmqP6*Ig2mrT+k0`PvZAJ4n83r
zp}x2&m=`c(aXG0+9`vf^#c~`YUJ1g`7w_QHcC{*YY6`V7`B>#slGggbe4xS8>99F<
z;hh0|KXw#z2GGu(?t+s+#jxEgF9Mjb8hRM=hBDXk9kM<8?ZyUPe3bAD?uX7Ub}?^K
zv&_3L1<5{NDx8p#T!*~|*l2ODTBuk3Z?Ei!<HZraho&PHiN{8OOM8SD;hoEx)sPs^
zi{vmW?)&%Tcr07vR*bs!D4;fIb9gMC+Jm<z)7wJEIM<#p*f)0eX{szqS<jNNmC(yF
ziFm!v#f#XemHk=x(Q9mTF4VYw!V~Apxt-u7uU^3s)4f+Qp?uGT1g(DH8ZrMwC7fcN
zB1Nnb$F@^Zj&~#ETGx*3D=HP5JI<-+F%{;J1aH~31^c{PS7EWzuD3auan;2pZ7yir
zYkW0l)PDeoB>aVs@!zpvI7vr~-NUZpZuMfQoZ!sTt~6M`PG!+4si>QE$w6WZw-3cK
zeXhITh!nqWDQ(6Ow%}2nl27q7rqwRHUdBJeOb08=;mME~^FgQv*2W#8HNBTdX9iKd
z=@%uPcz$9XJ|Ny-fBu5dGpYfORyHF-QJOv>0>FepzcMsAy^KvxjkjUSP;xQC(VwM`
zJoCjSgs$|_x%pt`DDvS_yg7!UeerC|2mmr=as)knk5SLzOe+#gx!&f#mR+OqW;EDZ
z@Z@c2>_*G+>Sp(l#`00zimvIsswjQpB1SYVS0}NSug~>MO8zZm&-=61^xMKKVsjcR
zh_0Jnh}mfkj9r(ArLK|wlvEj8TF^43Q*k@(KH`ZLy=_WCLZ9f}-HyGFm%k6s9d;}V
zqwPK-!?QT5F;qlB40&U$r<GH&Q-M}0N~_RUt-V3BKjLxeyFBmq_RoSdw4;&2hjyr4
z&=s(quH8ovi!?maq%m@360h|%Z)Z+h)s-g0+4VU6%I77rLGk3dbX$*vbP{T~7Iq6m
zfs&0@R`7K?GQmR1;TL4`3L20VP9s9PKF)TGl?m<7qPLmW5^q#?U1C;ya-QlUu%t&J
zXYJ?qyTxV)R{_%oX6#`1TwEOf0)}o;wa~)R8`DaB2cwzR<ZF&e{#2QNFpAKxB8ht~
zy00t)RU;HD3*h<D)3H2gWT@1j(#37#oM>H`>&Cpe_~RErxv%f7)s5Y-lo3!Hlswb%
z;rIs>f})*=bTmn~6(E4{PK&py@O`M3W$)@WuPdS!9dMiJ1>N9vcg4Gp_Wc6|_Opb+
zM@|_2Sg1Ir2o<-xzY1g6=9CEOC@I{QS1%|&advlh%{k<awS%NbDwLMU^--y));rxk
zLSS1lrtK!2c`G1ts9f9BGwCu(SFzW*v|e_F-R=nXLv1H#_C`_X=}qA}LH2Jqh0KFD
zC$nfU-a)HN7q^t9(e)#(W75s`m3uZMF{5{L!X6!zONKoR&Zkr1id-h%fV!Z4+MOl!
znlS9LbA<&J_nhm7__gYtzG&Qkr(&z^YebUr@I#bDF}t1HeLx~3iy4gwZlmy(?Bu*S
zbqzxkWmD9{HSvVps(DenrCq!4p<nH#%-At+Of>CVq$A4*><i4!3d<fjit@q_v9rF9
z9lft)n#{JYO<i)NOAh{8D%im)bJfyaUf}x22io_Qc2<n`=vTmzoP2<65noa%R<B21
zsp-oX`?<-g#wO-Z22kvPVnRRjhW-bLVsub~1r9Jo62$fco|CZp&5uZCym(C;Tt`bf
zM%397C0D-db-wcYtDei;DTZ2iuC|QwwX;d`v4&kFX5Sy_h!n{GOFII>x`RA`Ez$$*
zN}qDr*4XE^Prsw6sf-@k{54tbZ(S)Ge`{OekS$+9MpOZ8Cj4##dDq3M@^`;|JDdGL
z_6zu~i8`)mOOy7-Z#*eJfPshlZ#*lEKD;sbNkxPFYV?ct4Z({4Ah0e4V$;`tA-U4x
z<blSm#5SWcsr+jed1KG0dk-p<-<{d7YuE>bEbDxtx|FXr)=kxO+e@=H^uORbyrm_G
z($ae3?aI|R%WLF8(nSughOprod@!^tF31&g1(5UmQ0vCCtDYnB15sJ?1J1*(0lW?-
z-%5Z=9bI>E_xX^AV+EE`@*E>(z43^u)y)X>z-2mC^>yxR$Rc9Q<GW1mZ!J6Lci89l
z0xS&rN4JKCMtA&ab2`<x<~^!0_tL~drAs_7d^<ZmrE0imM55{u-G5G1A%o#SZuQbR
z{dh9jZNzTx>fY^M04`VYtv-V4cur^k>Gg_1O>P5EOTTxq*;#80kl3c-NNHMnGn0kw
ztDGB)bCi1GEKZuKeb>T$*_j`waQBg2Ree@%>pBCTb^3Uv2ngSy%nfsE>Pz=0H~nwf
zo9^G?eWW=WZdJ{gFq24otreVqk0wTxGv~)tD^h-a-%r^O=Dmb-{&5jq$0D<-S_FMG
zfp8Up3V41Uj!ZL3>^PdxG`L#VIDb8@RCB@7$tNan*qG<h=GkdiSHJ}0^R)4V^d8`k
zXfA(Jal7y<9+@K3$#>wRV_bE{f45k_ITG`Zztcjp9*}bBd9hn~B@E#w5=wj*)W&hi
z@bO-R{?ubmsdP-L(194bxFXMWwUta4b3KD8rb9UgWtXaTRdaD&YK!}|;?d~Po3-ag
zBsaL(VyCzSiQ*-<GQDHkh>g=k(sgzB?U&XtdMLeExDCeA5m3jlG5wT!sN+M+=fJ`J
zc>RH_`h5Pq%V)^WJg0GVrS?5;C*IKhpeS3a25k^#`()(4tCig1;zN3H<7^oX4Xq+O
zbha2P)_qzOu6(tZkW$>2u~Cs@Hm4CVs?;^o&YPV+qy8zIl!2p&ijHL9{Kp@9SL|6B
zeBs?!x+g0=2jLe6tq)nx3Vp88*<(#BeF{;iKG@`L)v218ir6^-)L><gsUM}e?mR3V
z1^ms(x=e-|!@rw9+%uX^c!ADszhQi}^G1TT_$z^14Q$>Q&eHfI?jSOVOiqFXL@pxw
z8zQqy*5pZ5jqFRa)C<>cl}W8w^Ntxg=d__$v<yfZ0+K7C2D}$cz|+eyf?D<7K)>1J
z3i<ygxb)AT|342hP1sSBYqHH$VPDJySd`~hiR}qLai$1W<&YxmG<<6YxZgvc>-~po
zrbY1VwjW@dY4kMV)WcxrN?m8%A%2^J!%_VK_gerX6eFe#@XatqH~R~p{X%k6iu9L}
ze?`ZXmCHLORc2<5?gDH3uG=CqY5xoYFX6LSbqsH~7bJu9Vp?dVF43{@I{e1-g{JmW
z0x>zAYNlt6C%*03Q{mGtL@Ls8e^z7Vc+VVtf)_z0Gs}N@{X#0nLW)``dKXNMZX-#(
zrt*Z8+}t3g$Iv0C$ZGPtv(;S;O_CkL-YCKGig(^cGgC7alxAU=0aa}D2deV(T#;jl
z&7~txLM~Ada7$*a1lvO992cK1V`Q8w)wfc+ZLMm)jbhTD708JO+R&gFHF;u?0pGu9
z-<$H>F^6b2MIQAW@nl?BapQT=%|#U(n-7TV(W$mGefNdlDz0pl(P;+6uT+4j^A|<L
zpPBD}{v*T|73HUCg4#9+<LNP<5sH3GmDFoOlJUC_38QgGtAzJIejx$v?Bhg<g9w>2
zztSPXyW(wmZ|l@=#x$Ka)(;c&G1v3<-Alc{+4i*hj>k#8nmyddGdiItI%(%9R|_MS
z^@1z_Y{;wdo>X^0EpgFtE&zhN>wZha;SxW?3x4j;;BUeDs)!{}_!2EqWD8u=kcl2P
z#awR0MuEaWx>w#?iOqU3H`uRa!i=e#QwD#yXEsj{A*&#}EZG5{uAB{!!PGt9Mlwi0
zc2iuvq*kcLby880nPYh~RsXnc8=o$AstS>NkoWd&dU8764@<cM>!l4{s;63_l3rp1
zU;z1;AA^I>nufj~_A+&)UB8&%bE%oJX}boV5=W)VW3)Yu7dOH)77=3su#3=3fO*S`
zXhuF0n>qf7tcM5PODTOdhBq$AYltfsvRU>RP7#&!^8vqj8FDE0N$y}D%ioiO)Bj4m
zjL`rr!XDmg4k6MhMEQw;9)CCsWPU2QzuifXJH7KfG;T0mb)c>1Y7$e1s>g;H))Ijg
z)FZINiI0AOiIN(>KZ&_*!5O)@ZqPq6QlQEM6bVLmL(mW4r*dF?$u<mc8}~P#gw#*y
zKiL2?!d4=Y%LU-??y!@G00;;>1p1o~BAtqJ+dl;e0K(u*u=cyKufR^>pa>|hU^2je
z``LhLeYKD`czvKtCImo9lK~7}|1Tu#dVq$=Dq!;Rx1T3e=0;J7WAlLCEkMX87X&KC
zNxzT`WEBBjJRnW}{V}9*=4#9|Ru~Q9<uc+$;HVn>zmTLT0e`FL(LXwf(@|68b@tvh
zaHrlZ3-RKG6~yaBCJZPBY)`ufr`<BCh*`|DBIFe#=XRIh@nlgW=nN0Jsoq<Xe!kEh
z&;p>Eo}t=!064Hb#)uG%Kz_eHy$avW!I|$s$bg%h0M=;%+ycyfZjJG_X6Sl<rXUW@
z@^&gP$N~CZtPL`zByk-`E9KuFpcjn3w<kaRm=No!#IGMWRM(nj7;N2dKHBdkR5cK<
zuFB07vh}EeXF1@auO7;7PXV*v32UH;t9zp;xWYku&$mUcs9?a{%)C7J5yRclpn8pr
z-P_L21EmZuT8H4+ECsYtFL<uj1gn67_viysT6%$d_w^i4Bj>11ANqEdacYQ56~$%R
zN4;gep~P}zH&KbSLB<gU2#NUnwMC(LE#K3|5LZ5Mli)R%#Q9J0!FzSrp2F5^<mPcB
zPO;dSLI7FhPN?w$o)bVH<&(*Dr&z>M{LP7P&y>HMODhq4^z^FNa|!*KQ1f|S^A9u2
zZMipT*_=SNOS`}w<FvBb1)t^bPPOrJww>3)Fozd8LY#s(I-t}WEIv<EsKc%sSPOh4
zn_fgZS-VC+>h;=(XAgU2B|6co7Lk6qdHVu;L*fCwTrBGq*-KX9>UnH#8;Rwn-(IGL
zwcQ|gm@Gv@C0F9e+gFA(ET+HR@usxNyXw2HCL!D$<+k=Vkj0Jqee8|Ph@^m}m*FAb
z0=S`^0GR&6Hz`o~fjPv&e8<~+y|QBeT9k8(zFD@<E9R?jh1gdfhtvvrv95vYk9Uzc
zFkTKDfp)+Pu4pdnF=Jyz#O{yfX6;$F)@MAP9N6_AZDJ*P$c9)12?rgVrr{a6+q14<
zU*Ke+GH9a^U=C`(n{#)cS~FL@NYq~M5pmUITetIG`(evhiRXaKq-kQKY*u%W(FfS$
zZZ~fK;GOP_nm*@7p=)&(p7(`@tC@Uc%-$}OMLWXUb-p<x0w}OF2cI8hEy7qG+%<Tf
zv2tAbj4l(V3O+A)_PH9zeGUygqjQT5s9i+HpH<8jUH-DW8gs|QWA}qUKb4uCLzhh;
zl{U%c%4H)r6C`=t*?=w>U1c3@1gLhmr{}mcsrUK4l#(y)UO{mSi``N&Hvq{nDn=x?
z+XIj9OFd$93@g(gBkCkE9Q)78y=E<#kddE_clXjsdgpkf=`tfa0x3D~$pmYxZ|*Sw
z1WF<gFmuzUgjl;eSY+DiUbQb?i7~ar%}%JUM*4F*qmWPNq0dSKi$r|s3>^m`kwCi~
z*GUPB23>?|(L^=sV?$9q_0zmK-FBR{z*Kzbob^0QUzaUcc$>$N|BnJZ304aYT8WDj
zQD(FU&$u~Vu^Id#HdpBa?Fn1FA1*5~U_?6qZB=fw9tu2^$geggsW)O(G0Lb5pu6a}
z$ft=%2J6>i)no6Q3D~tCoTnls!AvHDO1ZEr=)6^pPCa%#G_uS4^mOZ`!SLLxYijmT
zEFb;8G1Ft0gC%y4NO>+mj?Z#<7Ji42qfFro>jHDyz#-_x5Z*6o;ht|QHyeX5x13~c
zIN;ON9jr5SOK(&0Xv$d#&pX!*LS~Ui2X*1PFP!oGm_c1nnHBTgxW`rP)Jci69LJX`
z+~<P}rz#JL7XT74U)oCCDefm;v&Li_=?uU<Og3WzE)1by=hSH<i69GAl&+{6^``LF
zCbW#<D)syB4=L(C^2KD{Qx8sqHbC{0fLu6jEEvF!+0NgL>5gs;?>haeWg!!%S7UXB
zSx6={{-m@h32D#fk#x!nc?&RN+bWTu0AZ*Op=`fArnMfQ+Qu=Gn=;s1QSnxgcf>4E
z=F=YA8tI`cXD(7*Q2ApVBO$ki2_uZ+Zy%=fRal{jfUVGBymOB9svWbQRT1C=O_9cJ
zz6NhyLphZx6smlO0xbS8IOOLK|DzG_B(5uJX{MQYQH$yA!7xtKHHO=AkWf@|knIkh
zIA(HEBSgZM3ye&ZVus@O_}5Ie&$_b1=3c&;iOw!vz8y=!gj4uC{Y}2Qv%@`Ltfd%M
z5J6z?B^pTU{TiiN@Ql;MSSw(wkO9OjR>b_a&_ysEY~?j_xe)o?19b9e5{w;kR|XMc
zz{`0)8D>6LeD+w6>?sG%z#JTJlbeUIpVwRqb~>*4n>K0pg<8xnB>zN|^Je+)#8DqT
zLNpNlWzvqOHI9<*UrhmRv`z3sQi3)Fug-G)P(LxvS)QJQ*WFBp`Vcbi>>{`@d?g=}
znI0_7OdE4u(q#QA;kNCxyZuK}OJ={rCR1!8_flR|Y%Q@O?SMRk#-XI#(ff2N|LDl|
zW<3i{)tx&59>S%}Ukkz(R(Yrr(pCq7wO{YrrALl^u)p%TY*~y1D{0a=CGAA8eQcF+
z;HPGpIx??*z*C*Ja5g9Z!Fkst67U3)N%;b>(ffoAMNjv_=fSf`G(ws(YQL5M|BSQC
zdYvhI60z<)|8@_$nT_x%eIFMIbI$bF>)@nE5AU(8^sT}lW3@{8a@I6^!1a$Id$lc%
zmx>ocJ98BGu4-BbkiSgZtw#gck(Kav5UqX2A<rV$#3kK_M(^%z!0%679+qrg+xJ?a
zYk2M*{p5*}EZ~&si2-#Fzn%-I#W3_ffZ&zKT(2P;(D}?4Tp1QrUUTCNW>T_N&PH0#
zj0PRj-4Nc6G#a4|0$XV#UoTy35i2APEy`g1DvUwYU$(`&5pQzkA03VBDy_M*G8<l9
zX!}^qp~@wBZuuBg4GC&7Uj~I|36DW-3iR^LzhDuDEn#xg4t_5_*~+n08dN>^FWZ$D
zcxJZ=?gY&acC`s%=`pJ-VzIkCBZZivXs^L9@>(B-`cQM)!a0rJUt0i>Z2v{KkPzc3
zI1|$0YXH4AT6A-nSPejDzB6lOYgBaeIONnw-I{R(JKZX{8_Y9%WmWDZiA`fmm(h%N
z2p$ctg-5jr;+U}-%W)yk)-ariY{*uvSw)7OA0ekwbWN=zF<9$zr8-JHp;;OTkByK(
z{8J1+Q01fzRirE!EZ;t6VW#D%1Gs3tUTktC32y~>a7NSL^`BQC9(Mtbj$mdYr*G-l
zz6RC@9HU6j+jy$er1<r;i^Vjeul7w>janVa8M$YPTI^&iaN6&PD9B}`qcA8oiwB*u
z9|+}GQDR=z8Q-hdzqrkE_@!;bE|lYwCS91EqQt8QC+^6X`f49Ba8l?kY!E8$5*Bv{
z!yi})m2+6}Yd@FZBi+&Ra^gDZQrE4(khKRdoO4x*HVl~#jUtJ{0V8l0Mfzeq*SIq6
zb{h@G?)9}y?Y4;@f?P?rs=};oGJL<~iB5x?7w8`-x(FZ2k`Rv7SjZC46d_WGaEtRJ
zP>P&1CtBKBRVzwO_uY6dl~4YrtV%V$sHW||DmVfF+kXv+{|`+@ZeT~zOGM`5j^RsC
zDdH#*h@!8;CM(*e*G`D{9&;>OV2?ewTKpZd?K)ZD3`DsdV7K!*0&NFq!q>;`wZb|L
zUs{{Ar!ML;D>#+;R*dcJktBW6`8?WUkHN%iaBo9sh@Ug~;J6#6#G$5&)j`Ko;jKm%
zx6NxCPb{=nDO@QPSnjyoIMbl+v1f4}y@bEoMmeH*^(d|Z_H^s0K{;SpYzQZ%%}hvl
zZf@9Qi<P(jQAxHc8wj`?-x_=NU5vyRU>{CdT{p>3aKKnBn3)XZ+*Rc%Z@V3<^85l@
zBNY=#^Xrq_M=Z4%qu6itOEGOqFopZoH8p6%Odb`Hx>OS(vALqC%ZvOiqVo;KXX&!f
zJaWQj?E_IxZ?3Z&Mt@zAw9<FJQ(1ZBJ2usvM&KLgB2JHwH`XS;hx<WGs#xG<`uAZM
zj$AvJ&iMMQ#07go!ify8+Vj5}_B)YSQdX%BejqvXZPtg2s+ylndS=ug@Y2*q%L3Xh
zL&ea0&O%=3)}_FQda);I&usT`w*;YyG^`NI)H#)#jE)xPFrM&6+Y}6|RLOj&IF`Py
z2TA$H@DQ-?Y)O0>Hgnsvm;3GXjOT;*!=GViP4I?<gchS!80BL;cq#zsSa)aCf3DB{
zVvnFYm@_jMSaR}W4CUlqC`;F7?EO^w_&vw*E(TqMVl;v=D$7E3tNi)9k&KHnQJM~8
z9z`?rw?^ka9u%uN$h<ckh{<8OX*qc`39SDVK>3jzTePyd+qpbUft}V()10aUXr3VD
zspn@CPMShLmBe2FNyt|nNv}a_Im0oX%ZT03T!#$1U~nDOw7`)?!t85f<C%dM*VmhU
zVApP4kra#xQt1-hO#CPu@CsO)_%GPm3cNL;_`87eMXc3|Me}m8-9)+F#E-q_rl!45
zsSDbD68p!DBnB?>^}J40b&EmDodQr%PI+udJ`{q61xx8sXqWmLnKZT}J(?OtXF%g~
zZj?MY^KO@ft3mTw@rkCZRc=%ekXaGs@akyU7zF+%oI+6*rNlMAKklW=Y3h{R^j6ed
zl-n+Dx0o%|*4CE&)!u#LCpZ<<dHIV%cYqSMi2c6rJc_UMv?0g&Y?PGujnG*q>ly^@
zn#Muu+2}0R>h}aCH5NeOPLSlUG|fqW&x1ikfvspyDv?J9ia^TS@Z;zm*;C*V0I0P6
zscVTCh?_M<!D0eH4iMIRY3S+4nAlV(tfEEML~1Hg>u6RnS?17G_xgi;$Ft6x*~w4m
z-77yeZnyese1rtU(fOYHmmL5UFL)k;PQj+#$It*lGZT0yyFO0q>rLk)WI>irJm5{@
zgF3WQge0IhHs9$3p;H#e>(<I^L=c4>ql%SNU8n~greuguY8w-ENNm(nLY|PYJn4ux
z2yh%Da`*<*&Jp!yPgWY!RtQ;jOAM&jE}jwjujhN8ahtskY_pB$mg!(J*7vAK>fj+o
zzG;T&shmQr!sEEk)1fXuXT*9vONq-`^oK^fXic|x(kGI^?&oNe@yu8*puX>dzm0-l
z!X|F^WJzM4#cP#1u{`f~v1T4UaZ{tBhsJlLTNYpFdHMFD5B&zLem@xcw5$`ogwhMH
z^NXW5$ID}svdX*)=SZeynOWz-@vkaRj412fZ}MclDTl;vt%72~R8aDAtVjFHsT8H~
z7*A8&JMhC`0oR<`x|Hao7X3ki88WiK?$$>}3yi=@iw8T4kzzYhw4DgbEdac{K3Ta7
zHOPnPvfo@t9g)hies$@ldFRcI4~SSI1s1*>7ngMf=pZutzSO(iZI!2&<8&%gx8v+3
z4S!=B=>(0|+1^i$4%SPJ%;SIQ-w%X~!*T5qpw2iBs^bp{;Kh!AY$TpRQ*?5_a4YOA
zQ*<hJG<k(Pqs3d3`1J8n8C5Fl!}@tQ%3TmGlm+#U`=|<FMe1tkrc%>F#-%Rry7X~A
z+IOPNAIN72j#lAXK4v$X^aKnc<j{~HMYsHb88A}-SASm9*QU>*1-Fa7+?^Ns2EbYG
zkM7^!eQ(pr@hYf~B6@itU<_8r9G-1v59mXFbavnA!So7som#1R<mg^YGf3OBa817=
zet*$a(7y=51^*zBy9|{@FLf+2BW*ylk~*VOt~Bu1%Hmb<A%D@&5K)2o`%O#N)SoZ$
zC_f!e4f06)Xg*{y3FH=WT?let(@tdp^k%2_K9fwgm9s_1=MAxjt<e^RKFwhkno95Z
ze6o<fKJLY0;7hcBI{rjq5UYfuUNMmGz5VvA%V=e|*VC0&g-3Hu$&DGW_3D<i*=<s}
zo)A8F3=Q9ociJlz$O#0N<WDd9U(Fo<)+J$jHsx=f>Azl4{sgQ?x<9XELr??&AvK+Z
z?To<)Bvb28v2dbVs}Zpks9cRuOHmkP!}=(70Xa&0*mqKs$)w-D&6K+Mv$3cBt3U?n
z1}8ZHuG9ubSK0xt5Ykgd`~a<jv2N*f1NNQU+{W=(^5ReT`LZ8BjT0<r>0sxSk$DPe
z5W_VD12zh$1ZY|3czb~_ZIMb}Ba!v4e3BkWJb|PPH<xfdrxBhPK+p<)u6}cIkUqFf
z$iv)nE+E76K*_hiLjQ0-5NQhvS)%n4_))>H>GVaNtVwV`U$V~HQy`o^j3d)U!&3OG
zkU)hbkS}t)nLNUtj+KZY_!i%#99d}c@*M*-z?R@!C5|RusfZ$T2l=<cZgzeQ?NYkO
z7IKVbB}ZKGj(d#b4cTrcVnB0^#@=US`?<)_m&sBo?*of33l?miPs1KVPe8UBRD)qp
zJH;<E&5f$b4fQYTXz^A71k0Zqlm8{x@E;i?I#1~QQL;_>D__n3$P|D6B_}gKHX67E
zzTNmo4)D*8q5EF=C_EuDE@vZ9ZU~UdChy+@NYH&ie(zzFL%(O?UkDT+Ro?^5lV8ot
zM`CoM?ubN&h9fw(+rJl^<l9)DSJY1fXl&pZrR2>xbm6Y6KA>Vb`mboY{Jb`06F}I^
z2B_cH`*d!i8zb*O-toS=HYV*mB6TELg_rHXo_QQW8hDn;%Mj8-J39_Qt_yHy@DJV-
z(qRr|FTz8*W*a`R6`C@o#|}1Y^w`uSuQib-E4<d{N|Yfc2(N?ZR*&wZHTHng353$A
zGz0ef8JTvT<BF`dja<*8Rq~ZIQk%G&+*CKV^UdXBX)}1$4VB__C+ha>?$u9zk19qy
zbG6jU&SJTwcKrcK-`8zTn<uJg+3Ag(PId?Yb;lttVyP;vYX=Shir&;`iCySco>^A_
zlB~lwdrkGvY)u9AJ`Ln?ttV5^#1P1~no{4DZ%s9|C3rFI#q4TH0jv(qf=Crs4i2rK
z?VgZe;jhnUUQfh~mfFk-<TjoANjzmbnFi`h%n_A%5PDPhupn6EcDG@}*c-E+4MEF<
z$LB0w8htGYaD%mLvOUlv$KUkl;7%k!jU1h)%ubwdMCt~iRp(4u+u03o#swuQ#Ug;(
zcrX+%;Jf-BT@=xxyV81hAvJP){(-~AM%9FCNkPOx!Q%?}Occs4PSJTqj0LNWQZC?Y
zEcazIgWqL2C@oJ?y-cduk^k-a_KBqySEUwL(qUT9eiXcswqsD~+&D_KU<FR|by2wW
z%EKL#XOefNHr`l$O30VHRcv7(luz7iy%DfWc&B{E9`I)kWNUA-$PumfdEv$TgMjZ}
zZyov+S^Su9b0$Y$#e8TXQH!s(Fb&*Pv5?Xh{uznBr!YUb|0+Nt<LHGbyKkrd{V{QO
zidknG39Z$P9prNMGz&AlX8(1ih>g9Cc7*~>v_Y6l9>rOYvG;LUeu`7s4&ZsR<pFIp
zY!jbQ?AmFRIVzQ7=hScFT)MlBT+a2O>%@udjP}hq>`Sewh73|BL;arTA8NrU*1$EM
zNRCQT5wK;2J8H@>>=C-Vf#Y%+RWIF5?yQtmWEHYYjKTLMB_t3+_f2spy-oz&&B@v1
zzFzr#@1As)eWPYC-=gtF0S@o}pn3`aw4=!S#NH!uj7V}9OCx%XzS`nd?j0J**qeyA
zE#~Oq?f?NoiZV6c8S6b-6dS-|-(V6`E8!x`e8Z$9W63gZ@dkZ~nv#kaNGiQZp~T$t
zmS0u~PRGnl17|`Umcb14Gfo@LkBDEy>D+PQprO^7gkS|yll)O)7x1#^ws*xGLe4(j
z-c7JMRp=ej+(e~M3>!lEVO;KRz0zG!O>9Uu)D*jdyAHjF?!xpx1qfM9yw49z&92Q$
zjcCV`^$geHqQ7&&9}=2i8_@tj_85R7?oLY7J|s4iZNCiPO~+)dq7a>kT`-e^+UoGx
zsxhdVlPL7-w`{uz8;_CnS88u!&jw9ACx1cFu`}Nb+>)Vqo0=DEPQKrjq*JeT<-%(n
z@S&ec*N9%)tL68JgGdff*j$Kiq%FEAN^7*m)Y&ESmSnb*X`LFGNSk6ar@Kr@x}#nw
zx$VzGLCujtQ>IS)G+Q3+*KT+wa=Shu6MUe0HYCoXRq%sCaqBqYG9+gMfR<dy!y6Eg
zieNwlQu0e>OtJ5CZ6T8drDoSq7i&YR@XAMIz1P}aH4=h|<R)fnUaoGH`BgPdpgrSz
ztoKygFYiF)pzi3UXhl<CiIStgg)T!F8$-Mz@X~L7NY!gkMd-VvPu%9d_HO&)rlOtn
zkKRCKs(_KK%b2jtN-T0UnJ6^6$2!#W=9H<vbfJt};Gu-hr#F^tT%2zT&KL3fF5TBW
zpv4OE)bh6@FQ`$~+!9qZeadj+GMcJU-!ibSc~iQ&^d}WGfKkzH!S`X;A|HCOz(rfl
z%eh1<)HJ&<LF*#`t4PL8D*&e_hSR_c!p;+tT!B911+VZlq=GCV@z7=0Bl<`83wTc)
zu!3bfn$0hQK}%RW6@bq5vgujI;yc&tTZQRWZO?klLP?GD&4ld+!M94?4`q4%gLNSf
zmtdBFi-#h{B4HI7zC>Riht>3Q4WC<RdNRS5?s9cuo3c$4mL)qk%kM(UPXf;fAcYE!
zz?=sG^Bf3vvwZtXM8o9b=RwuTZ<<`=*=oj&mp>bmQrCOmI2&={`dSKZ@W>I%w6Y(>
zNkZ%&^HQJwLG1$BqrPq6Jv?uhqRydg+vSTxiu3bE@MK_M5wL0#ZPZDd0CAX82)+W3
zva?HF5$TkHd+&Yb<bApx=3?fYK#M;ZmcIA)gOwMItF}f-ZyC8e2k2Q#pTJfaFjMs}
zmX8|5d$S>RK6KsBH3_>OOjJ?P_}68ra@SSnA@_{hkpLydfi_Ye(mn4w+j*2U>Rb*r
z^?NqH)l?8dTeD4{+c%%C+L1=DJY}9;lPpjXO;|S_b8Boea~=Vr{>aZ1i&TvXFxhb<
zF1$Q`UB5|4gV}6mKEj+U0eEJ4#apw953h$mt*#dAkI&*w_Fpu(=OeLd(c;jXp`wQK
zB+diiqWW~Cjq7aM(3JF$C1m%@J>o}Jo)^3dS2{iw`pB)@#y`&!DpH6f#Nfz@fXL2n
zd$z_ZjIEcZ1S4q>&hS~B$HeRbZN}Ve6Ek;&4an~sqCau#*P*n)(B3}}ee|CU1qiFs
zhm&P@^FQQU2?8h+<4b?mNBFN^B_8Kx$c9;8(%=H%`B#IMgK6(O#-`;v$nzFgk3P$;
zYB*aJxhNe$s&sFH#QGRjXP-?KwNLw1{@q?fL5V}dw2${4DF)q(T0Cm^mi1>Y<<lnw
z%WYIH&dfR**34|nV7{2@$n-N7bFjaZsN#w%E_-yJLU3jOj4!A|LUEB1{q0?DKbO#+
zjRSS{ljoXz(q<ubc3+jq1N?}iS=_ByFWf_WBwd?~uQ#p>es1qOt4M`)@Wj&AoNb;$
zat7+)OnJ~Jn<O{33|TfxMo&<o=R+OZKE0SFCZx4%B=O8t0B683oMsmfTYYEyX?|;I
z)`G*yyE=7~Z13Dk1<E&CIQ`~w^88hU1*84_AjtXQ-~cjT;gz&5Hf>#t<6AW`c3M`k
zD8}~+Z(c1l>!r11KeG;cd*HEOcYlV?%+%E4*3_{Z4@Oy*yV&NY94kd^{SemgV-^KE
zI!-oJQK87bM)P8`vB~XckJ&F>h%w^YV?Cu+H8*Dr1ZJ+uyZWheTG?E;zl~>@T=qNW
z$N9G>%`Y=O@e3B_zKwQ$r6}nn9T)apftqh_%{f(A`*F4wg`z}H>G_jPM9B~7F?|)*
zIg5n+uc^7cYBj+fGnYDqeW!TN{!qA2nzR&&g&n1038<)J4DIeZ+N%9@qk477lD10Z
zcn1GYXiVXK@}Q*o-2sUNxw(1ob8C$xvWrr{-0@*q(V6O44s;o?N2ijKmF&z@xl=2<
zn1!;F7#2Ko;E0S{F%wg@sQs&yX?PMye;E7#mp7mfKBDXJ4~qxqnldru{;EI!+uq23
zLpIn6rw;3f5;DQNHj}XLLtX#|)8MZ$n7;$V{5?d?zlvw_&xxBrjd7P@LqHe4rUV5e
zG$QJDP{T8wL?;9xMUMd}v9<aasmuU5==ndVuK&#I{{?BT`j)opMm+0ybR@7HeP5Yq
zFl23}mT@lj*0zMlV|UZUmqu1fZGHyl>NkQbJ-{}~$^OZwM%qWWHD;+vJu+~D-}82h
z#+o2CSCsqgDc?@d>J@-&lvBnC@uNq#Rm?Wp2K)tpL6vE{VR1EA&TL8_J>1d&26-Z_
zrTpVZSmY2j@Vm+b{=x;gt9iR&fggr|<Ld*1%*AG@LDo{_pPhi?#{q-30{ZN6SF=Su
zApf5@i4CO|UR_V-V&6gyr;xGwisc@GbM~fW-OujaETz+k9FKl-UrXeFV{QHCNVfl2
zlW$&*Z_(Zckc#QUzj`}p0HC3a*7HXQrEyxk*-y8JKm8BgW-E{=>qtoI*Xz!`(a@jE
zw}E=VE#7Ux<lR^S@Uf6fH^6iqq4;I&UzI5Tnk;J)Dt{rd)*}s=Qa&Tk`JP1=BlD*6
zRJkf5-Py)P$DK5U=MtT4A#Bd<#yKP_(iS#nh4R_u#mH|S*%1mw?4ml{^cv*uZ6Rx+
z8wxHJrmU$04-X}Z`{Zm$JM&<7nGQ7ed~nx%DUYhr^dmn$Hn~RBb^DC|c%?JwB<yY#
zPsPd=AJi&I99s#L_T0XEX(K{ECC&vfP*5-)ukiA}wk<r*Hj`-2AKI0m%@`;lFQ3Tz
zm`;`Atic@dB>)fj=gvmeP<HC&yOg$qqI+qEX>R#dgA5e0nroyfli}pRQQ7Gsx5K)G
z5}KrS>VL&Bwy_f$@sz6|4gl&!y$9vUGk3bQMHH%SdJkx8XX%`;yrOk}<<TVxJ*Hz0
zCdoL7s*DfoK$BgW(br?uuCu~)r8kmN!=a=!IqQbv{EOTtn7ip527!G<-hr$O2o^$|
zG$`^2OjCq)!(?h9>!$G>ULw&()B1fQ+st=+`xAx3*sNziSy8;1WP8`s_C7Z$y!8|F
z)aFJo)@fC2Hxy&qZNY`64f#gA23X5}S!jxy-HKG6FEUq<OqnS@kT2$ah&6sr1FCRk
zo`*UR>J+tBU>C6AD;s&}<CU-&7-wE-e;JB$r~j$Zg-dGJ%J#0N`>I>`RIjKKZsyh=
zDT!r&&?6<3c#1dyUCi3eSWjA-nV}L#qf<XM^7<yJ#DVqci>29#PNqj50HKryPl09*
zt(?<-T0CQa^xVw!b0}}*qv&&n4};H3?=c#-v>o5rs~GNAh?{Ljv2@1Gjel&gpdS<U
z0);gI+9@CGqy)tq#>mn0ZuZ&=>XC(vPXj5>v}|(u%i*kiWxE5!0Dvz5r#s(};8g~*
z#`fN8ZN^FIiX}KR?;Z5lag=^o%fQ`=9J;S1)f5%KuIRDcN`?}9CGARv&E<pNoqFQr
z_e#Tl$;!vj0=Rqp<<7c6GU3&b4oEbdhNy%EApcuww!K=T>fu(UgOt3jYs|9r8qX{}
z+9B7(tGv#WD26~vCJgRoIBQoeyoA32b2zsN`EnnPjw(w52)4zFRdarwk<`)dXczi|
z6c@T60S#7&xYp&L7CvF`vGaiL{R#?(!!F@tATLKB?q;}rGE`<DG_|m<LL$KD@vL^s
zgme3kkWpQ8Ub{zAqjfc^7v~nj?MSSJBuf)j)*?@fP>r2>G*Aolxmp0%-c47Yno&-B
zMf1MGyw0Q7mE08h3F1^p9nK3(Up)<Q(%!h(-fnme(^5|i|B81WIt`^9Dic?Uy{rr`
z*p{u<=6$z&(c0A8@QgUmbLU5F?95S5FUv`HZ!K|x9hY7aeL@37@B&C|h)9%klsS6N
zRlBcQ!L*zHVpsfmN=~_xa}qGsuO&w={rpws0q%H7G^7U-2d*=W!l?ni@1NR8M(`X?
zB86#rW*<^FjI(6a6Z$nD3pzx76a3t*L!S!Hd!%o0-aSSb*y$Srj5hp3LNnN9$jWGn
zmynLMb+DbN|8m!Lm!SP%ygkuMDtu5zhEFjyC1Rc@>No%eofVFGi7DcAAPM*abQG)0
zb}s!iMwhB}H2GE_dlUd{PM7<|Gj6YEvN3RK-|gB<RKp?Nl%J*G$d1ksLcZ(GW_I4k
z9`>e=LZJ61mkIcEWA3q>cn>ERpfAC$D}d`2D0;$sh<qY8L#aW?l=2PW#%ly~XrpWk
zXzSv?qWhHXxm<4Rd_);*G+mwjkWY|9WL20L@D6daY65$hzW6^kyZ#4X$-scaM7-`k
z;W+?5Rh2GSp*>-eay>l^`GrI+a_H}EU?ZMAx(8S}l@DAzSR#Y<HPRBifJ*J?utmXd
z_5QY3t0m;O1#;7`{!AtzSBRtV0|?l57{}@bAxN4V%^IL(_fnRDT8|zUj#I`m^vZze
zOgPI>N?M}M_}Qn2%t}HiHMSLNuTBB5FwL7Sz}jMn31f#>=si;Wg(Qaz2xv`30CpdM
z!O~y|ap12i0Fbt_1SlPQk{-SL_sD<G(*MjIvIGBXyE?v?TWu7uekLsJ7ZS^(6rj$K
zFx>?L0XC7K*@qo4GQ1xOMvDdQHf5#cLcW-46ADVeX;Swkxt;wA4ckdpf+*fsEWKK)
z0`I0H@WN=9<}zQF;kF4+bNLOF-ky?{?4Wd=%U-=paC9|435f#LeGb}Dd12Hr)+cTk
zP<*;YK{W8vBWmXnIaYC;u#q4D3~mmDvBa-rd;cEx?+N*PcL)jLw;qOGC)5Dzr5jc&
z8w*=;fpt7wUTOefgkTIMWb8T&WmT-PHZ%7^SqIYm!F=8alwQ-TSdA$8DbH8ENjMKW
zF_;9Ohz8otc^Y-Y(fgp8L*gr1eCakEpQiD<hx7==zlZ%MYW@FeLcIPKGO$hiH_tPK
z5SsQQA{2H8<}yU4i8aWDS(-QokW3c6*Q;At*iVoamZr{3x8Wv>K0{GY5;qq4*Pf=o
z1sJ&e7Jo3V;$=dC4+IbcN8y0oB~yj5{BPgZs^q&i1tizS9P5q6b3XkIyudx+Z{P=p
z2oGR?DM->|7Fob!TyfFizCGDRZRk<ykkg(y*&S;2r<}>uk`%j(6aJZVjnITGjYK5@
z*3kb@hHlgRp4ZQrO#1Kle^Sc+&*mexVP?;WJA|VY{pw<hRri@^u{SY>Dj&0{9*q12
z`Sm}z83u4@|BnVn#(0=A*8<Qk6bA!vgj^q>$`TG#Swfa@5?})R<W~a;k@Idkia$^V
zJNjAFnmtq@+Nl5%f!K60X}p3BG;yEem+^mPedV97v48*=%bXudZ&wY&2b*;|DhPIR
zr8d>A8QVL)D>W+2k@OKu*n8FLyH2A?5`LE$E4&srw|{g8&9Y~;%$>6n<hjJ)5V@bX
zb3Q$F%5bfh>#~IERkD|Mgn~{I$E}HE-l0o*!kp`NFYJnE+lw6zd%c{zt_)I*-tb+E
z=~?%*>H8BHp|4CDOM$yV9ACNwvlNSvBo0FAuQQnXI?g$#rnxrh7e+kZ&7$btf5yAh
zE8wnjR^Dj;-MjUyJiBu_MZ+AE6%$U^CGWJaCd#s<jHX>p2|7#nT;p0>IPL2g%`aC9
zje)9p_Mf^||CNF>6Ltyb9q>iI)Uk&638+JuC2nVlO!8g{$SZzKcD-s)@L(t<^;7Bx
zOYdgHDY6OK(jO-@nRicdjEweZm(VO719_ZwWJ^dd$b2yEC5Wv3A)>8!PAhs0bbV&#
z&4Gy_|FcA~FSv+9YXp!*Rk7@n=Mjf86v6aF?sl32jO2sxIZaPrYmgM3?A5|2ZrAg^
z(Csdaf&MZO^y@&t9rcd~2E3jAJXIl-xRDT@4|KAue{~N0Gw|wv@Rbz)ceo+8e@__x
zPJ<A?ni`{_4fzY#Amo~9II(E~_H`5S03m{w#Ya5{d=pLM2<$)HX3{yl81;32&2AC@
zZ8PgXGswxA(wr*3HlC&&#aO{y-HIIojOO<8QaGJpN<Q5iJr}GO^Mv11cP1p5B>y$!
z^pAcgT><^|j-USzcKnk6rO7qf*OxJWSQHi(JvJTn92^u;crYQdC;Efme(!1Oqgd^5
z)eot>mXz^wS5>(@RIVid1JdvR{f<bfNoqln{Sm`<M(c#3jA5~L#)~hOky0GSUtSxK
zec&r>_|VYW5cw<3&eNDXQ3l|8OO%&Ooab7jZ)YS`0xs+?^ZrM_|AF@wPNGe68n*Te
zNeeen9o~QZ3(0uFJHVg~xXv>_A8Eh-{2w;eQe%AZ>!UyK4DkH_`2W(se-(TFIu89$
z|4x=Q4qnS%fX)0Vis^`twH)gJdE>8=@PC5d<pKf+YHEU)bMvhzD3o*I@QSM&N%r3f
z)FiN9CjQkS?booTOaN#$)*y<2DH!d&D5w=cJkJkLQlD(F$&QbGnnvbfI@t4rJtX}8
z-DRROaR6Qq>C&6&MDh0Sw&L#358JKG%i-VfXG+K<j<J^~R1IfZiTXf4%)i^h--YGN
z1C*^kLJXc3XAI=$Mr{^Jt0H`|^2${VA`G0+2+ZL8hi&<slkvA<O<uegJnj?^^o}(6
zq`m}PEyLi7K8B6@<yAvJ>-!5XM(SpN5-DJhtc83&-rd7J!RruSwdj=*0TJ=Kw(t^&
zy@}+_hOh+<k@YBex^79T=4!^+vv2wvB$6L!uNUpYuu#x0P`SD_9EC(v0?_ug(_@F#
zl792%D}@0S`jBKK8VjDyBZk6RV9U$^7^eR_tP|+r*v6eQL(P972{Zxf5B!q^_q0Qr
z&Fx916Kz0BDTI&#{%VZzA(A@$LV|~D;FW2QfKEe>UH1}}94M6?_=C&}@a-Ny93$}n
z7Am}*I0X8B9ozg12?@YaAnN52K@>PC*hnpEAAe^27m^d@xXK5_Cm&Ah)dC&>Wcc5G
zKX3uR`_gGa*bOs+NiI?lwt|M$Y7i)uMFCv22>2J0q_2PUr8~*UH=_UeHIM!nfP7*B
z8x|u5AitkP@bAC#bX{zN5?=PmXkF|NCggY1><>&S`S4_^@Il(|rlZY+kPluiP6ILF
z1t~hgvmt6z-z%KD-2D^%2H#t1FW1CJQNQx8y{A)?)EIDE><-%-2_4MDSZlWjuorMa
z&LfPXjM9GUDchty#CR78jQ`-!&^u@RB=uqKuA!-f$$G@xH{>>Pn|R0v&Iha-z%#U(
zgv`3v0X%rEFcCeJOA&p4U#iG8L5-oa)O`+b*mZ9l-%*g2N`WoIK~;wDa0>SKf0k42
zm^Qoi46mtJ^6GM4(74g1an759LG!}?6^`Z%y$K=oj$w+HhvvWoVA0k5>b~;Nw95aP
zvHv$fAzJ29McuG-_$%lR-_BkiF^7WIFRke*M$ffGO&wX*$W>n0@{_nbjX5m?Ze?B7
zoj8SFsn6W8N`6|lwl^=Ho#jayx#-|XVdRB#gR+nR&?cXh0)(C%i0qT1<n@S%O3#v~
zBeu5Q(GnJNq<Ormhi6jeJ|+du`yas~vzY3zWlm`C!#z6RV4SYM;Y^Fh?)ZE+e=15&
zm8-8P^>#4J##dD^n7B9>7L*szZ*j9>GEtm*g1MognodRb+y?jg``HJfK0)K_yTYG{
zvRJSY7KTn+UfLsnVZYgpJck;KEib*RC6noP*0AaAb%Ur&9~Sa!L)m&RE7;6vOlQes
zhnM%m7llTlOyw2pJ=KWWZKJ7p6H}>m)L6DG!&)pE(`8k>Zs<4R)!{&RJ<b=28ji)w
zq1Yhwkhw){@K~=}9F9|;Gt*V*b%%Mf>Je=5(r24ZlEml2qir;VG!$a*Hb!jKyu5H<
z9bHk64Doar`PP`8H}6&4bg9clYoLdf<UwmcKkHJBTFVrY36KnVUu0gsDB(lKg|AUJ
z*W)Zz&Heb|TPP-}9T3oWg3XEQ(TnU6bIQOWc~#?5&04PYls&rQDfl<n&aUXZW_?o{
zJkZ=I5%L4T9O}g>U%>9?JIbfMTAq-?Ra{x!Z3+!5ahys^O-gvrQ>@LWxw-D$3*{8<
zB%Yr)1@EQd?k-Acx?Izv=t*Q^o8%e)U+sN&TvJ=VZ;%d3q=S^8Ac8bSs+5386OpFU
zL7FrH1!)q9f^?(`C`ggsk=_X%=~a*#2*pB65MqQRzI|rq%&2ERZ)Wa&bKaSI{X>&%
zlC^hs*7~ire!p+wYz+BG%A~J;>AHPpna#2{wgg&(*iz2&akgd9v0MJI6c(@zt?kRR
z)wr@0IK0|)g!&BMJBkoWe-vUaXsI_;{zef_OS5)b-u&M3_=-`+r3t>4nhE(@f%SE>
zOq!B&#?l4y_B@Yvbn&L>W1l<b5K*&I1u5l^OM$Dsx-c8Wf9QAGE7j?R)c)?wmE%;m
zu0M2Mhv3d!Z{#5)?`d(I-mOVBUC{J=w)(o>^pm&f)#bpn^^@G<G9W>|!t!(?Mbp3`
zf54=HLysj+W$|;5`X%GIr;#ew?^YLv2VIr~NB~e1{ViIOJtE@J3wC_0v~=H(!{6I-
zl%2<~#s2DeUj2kHdE4oLF?X*rHJZxab>>^I)%NWyy)Zk$HQ-2#;}fOL0CGmF!;5u?
zd~n)CSEH<~B+Z*G-dMNK)r|R+2DWj9i<<dUIo34|sM4$+ip^N>!k3MF52Zwra)*%N
zPv@iDJ_YzMc*AB!BtJl^${KPki9<-*`x(qm_fAi>EYArUWhX7EC``@WxM}pNe{=cb
z(=VZm!@imJ!*$%p0MNPpSI}(gaz-$b*{Acyn>{#3H!Nj4wAgZ}FifOWS3@jT*74LQ
zKICC%@Fx=8D^)y%Q{_X@<1k5d`KBkvZyB(b4BYj3iAydw!Fd~wi?O?<qh@cMbbXnA
zLs~%OMPaOOE}g*|sT@Khct5ViMZ!$Yb?UresLv;|J6A#lUu^Lmr#m03%Fh>YH()*m
z(uFvty}T3FsMD2ZJ)t%!ua)%y`Ie>IrDTfr*t%;xSG-osWD~U(X_M=@!n%(!v&pF8
zmuTpcbfhBR`##Q;yi_th<C1zIY-B@r-IM-|W?pgMV87r~&42d2;j3!Tz+QY0A2xXp
zU&_2lms>{cSG+*%iy6+)K4kt9<Mish$wD_@PRI&1n>|lZY<>HXVw(@R3rvnf0ti*f
zOyYR$q6~(j2DNo|MY{<jj{FGrwkt6GD8|Hkj#k?0IDN8ekd=N>_WJS_Vy4mJ?8loo
z51vG2IXf4O8|7F;N0SPHt?8UmmU9<+ycT&0(UM&1%3&5dD{mztT|~)jV@LRN<-sR<
zC*^$oo_pE8u(jC%S2T;{)`l-X;Z?n*Vq`ow7u&^fRIkWgoeVR1PlfbBgg_zNUv{nk
zmR(n?xaC~&(r|W`O1Tg88iqBp^y($&glvaT3z5e)Ib1W=qNU$o+2Nzm_c_%%R2~9o
z`ljoDxCr0$SlU?k5s9NK6`9W|0vYHw4D`>t^WIpCp`pAbn5hu`37dht;E3+j#Jx^g
zsz-;E`jUsWgj2JO56?lz>0e<UyYi8Hi<#M^?1_bc1u@F=qTy`^$z5Wc3mn=IEh;-(
z^X;qA6<Jwy#?PODegSv(L+F4dSd?YenI15i4oN}>w+Ke6f$-3kZSB@6UN>>^LZu2b
z9#4U1pO}Xy#tR*|&jKq72M8EXiDN7$VJXbDFf+#u_juB~#B_+$8jzubK1D{RJ-*}e
zT>r|GT;{ii0J=H?UQ<nEA>=fJm;HpG^kA^5>BoVRx60WA!pR0yx^vAuCw=43@X3aU
z8KiZT28;p1cY<~p9jr4hwR{1P?Uuecv{(7jVam;oDaAdOKU$X^BFt*VF7i-ZV?+g`
zCU}z2Or$Xz3_jT~x{u59Vl(E8X7yG*6Jn>&_UJVkV|yuh_R=hnT8bv5W;KTMt4>Ki
zKhhXNvQlZN*1larD)^C#%w6!&JWP&IN@T~Y;=<j~tz2?uXlz5l97l_SWQ*)3t8TCj
zLzStU+%vLyq<Gn-3bQE2N!Tqyb}J{IWl{nG_6glOs0){D8?wrFWQp-BSO9T6Lzx|A
z(a#nZUu+NC6`OUH3|dLv{7|fP)iBN{9y?>Fb{55vdR3cS86x~h$!h+t&(J+2&ASjN
zPC&g!10eo7Gdh^)j-~S1U!`wzWjUuKn`{*T9xRQbD_T%GFkBkhqDRlIq<l<TJf5;X
zVmQXPyR%n%h19XY<i(OiYz0(spX{NJjH^(@G~y2Qv5ClH-OCyN!g5@U@}%4SnLTa(
zzH(1BKdP?y;aW_sYTe13_Gg1D#ieTH;d&ab(50N&tLgM~6SF>NRWTgA8G<&xW^=s3
zfP+_RbJEFha<Ahg>}5?@wc1)8PU5I^Bm?ytW^K=nN3SJbW;845<E3P4N+qL}!mKtw
z1YoK|WW)iA>bvi{kw-n*T@n{jT{T@GGo!_x?6nk%)82Wj{tY|;T)Fi$A*+>2x()f*
zq_^jZ*a~Z;3;y2A91m4^fm5kS>&yG&M@<!;tFWsd{lKJgB$KCh?dC`!ari{*lcgQ*
zfN{gdn#~6m!FfJq+Lydjw3E>6rAb=hsrP1c!v@lE+=1DIoK`Z}#j(8+`hdwsiu`*o
zaT}d1l^Z7lawa6#o~5%<X%PElQs_DHg6A`D4RT%E&Km?*!FWAOk$_Ze3q}O-cHb{j
z$WiRp%4b8d6VDBz6%G0Bb^EezaV?Fo;1J7i4t;cbPb3HOjk|2`oKDuFsxk{(f~na-
z<!$8G*f^K=LD9*}VqHycTrhsLPS>$}+2dvMH*D+#A{^<x&CS%Pqc6NTf4fgf(fVS6
zENKo7z)0|IbX#I+S^=a3Skv`FC)C>9^2RivBL(_+u(o;dWPhlpQ|q1wueR`G^Fi_9
z)hbSqtmLQsj&iaMMD4NUC&Y8O@*YKgbeDm%v<W=eWpC(`pTPYl=i?Qn<LVs3?nAsu
z#yY%fx7Z*}JMdSb_!P@d#K9h*eKX#-@erVk_&e2_h_$&`wxvA9%$nilrfZ&v@89kc
zzr_1IUrcH%A0l^blXW|B#j10gcM5I;4EBT1-CseE9w;JK0MkWu{D=5?N0UPTN9ntL
z*b8yoAkkFgkg*UxkQ(*DgH|@+41WCCerWE)9%*c!L;5)kOL>WDSJ+sQiu_9u%}m<@
zhfC+VxtM1!rswz4SCoIvoB+v-e_1q|a*!xVC?OulfqPRjI*(avUjgi`yl12wr%rfw
zZ*?Q*j$iaoyJfggqk(L4r3Tr?(jhu_W_17`qV8f5f0&$ek+Dxu2j;n?82BTLwsVQ;
z3x*<>x3PnBcco<6dCrqX0C1*|dunjEohT0`3RN^V>}AF4W1N>C`qNPa1Xhk~nIdt5
zS{ZcVya_t!Gsz{DqN{O>tMVFOK?`yxSo-9*bSc>-tspjlAKmk1NEA2aM8HX(6Ym$^
zA7pwkUfjHE<Mrgt=x#|#&T(hpnr02K!{b{B`(p+@2+GwrCHo3jcD12elfwWj)IQpj
zqu8#&+nb!;nD2gA{g(w}rT&yPz{m?opjPoAA+4;AM>H)Bqs8cPil8j>37<f_iPb;>
zvyf9l9~gs0pUFiOp0+f=R^SXF9e~o_3nbnB%ke0&W6C+<Y0+kgIcvu?9>(^>nOD>_
zV2HpFHOYHKHD$L82dZ~724i}COd`$ZN<^=YgOSt0{aL>zh9}RaZqYux7&kg476^5*
z3`!@#c`mgun$}I_R7_M8gswJE@l2df(5z`NIvJrFzY(l>{c-l=qwR?g$sU3$h;Khj
zO(QBpyR-&m_5FPJJowzz+T+Ha(^S5C4J<d*$qdu+Dq|8ra0>KY{7^B|&6N{CUYBNl
znJVylwPt6q+0idS0e*G-DR03=io7r#v<@&lEMb>0wfS7pkv41|qAKiK;CPwq@DksP
zr=k&8Q!s8v01`HJv&TxYSSX{Zvm)!Xq(fKZ9TRM-Xu2fXStH3NMdhQ}LQSrl4;$nS
z`vJ#@wNIr8n--7mwYiQU-3hvvqC27|3kIB>up(mIM%n{{E}h$ypod1&S-}24!)_uO
z$Y}(ZZcS>Tp2H0CxG2trs5^HfGvcnHQmKyxTh(XwT!}4VRZPXD9$tot;#A6%mq$2}
z1CDU-{GkF<bxyEgy-|+Q<MVgOs(0mrpFqPfFQ&j9F|(1&Bi%^$kCx!-RBclg3`N=<
zG1c@MYb>LgN%C8}V6*re*XFlr0&XeExU`C3EPDD6u{fM?-4teC_#$`JS?c35n!K7c
z7gssO)0H`c#v38waq7;8cVTSJbxH|Hhk#SHX_$fTqt2%UN9?@#Y$mZ;ZJvt?gp@rb
zH{ji&cpevuq3sH~@~m4b!^*yJAouMDYB!gP+#BJ6@~u|BuxV0IFi)^G4qCw)fwx0t
z1gCuTsj^7!_DX?-duj`*P1(aQYHMAo7I-z!elXN9_)x=_=hj}iDh`2GUj}bUXRBx9
zdVD42Tp0X{@7~{7kJ<dJ?$lpN^{g=IRQ77zwTU;*18`f*Fq5>NWDM;xBFs~!#{a_d
z%?kbm$1+-<JDKVId}nMUsf;cs%^BNzww=B4%wD)xNwERL|GdCi5i(hwcE%uCwWYZ(
zNx&`ha=fV{mxi&P?M*cSUy_hxFs?(cIl#Xf$w`Fbz}<`I-^kyVhk9g&iPp%bt3w<^
zJ{r!j-`40oTdzdH>;nQpmF|?6)Cd^?l!37A(mamrq%(BU3OAm{s*g^0W>kgZ)AF|z
zzJe?RfH*iVm!e?{XL6NS#WZ2FBC+>R5QyPR{*;~erc>Dyx23$Lr#<?u0!i;?w9ubw
z)U3HS#S=;v+>LyAVkDf%6)+kgPe_pjc2!7RhGrG8JyOZkDbnQooP>3+5ApL#>txrL
z<zl8)RH6i}ZWS-Dcr3@@1S}&;#YzK)oL6)xW*1DZb;aG2%8<BQ$uBO>de2UVM?j|Y
zh&{S_!UsX5Iks}lO-JW~4jLLLyeHS7*GB8>?<1CVi8*?+cir|Jo6rQ$`IiunF;!;L
zzh>2OBS6A1frySFB(wlC!?h$&tli(q&{M=z0jZt8dd@P3qTpXx_AL+gfb64^WP?$_
z8GwoJb_3w;0~pjjQf-nq4v^g=6Yz~$2my4n#`gWUuWk<=iEI6Epg;bLQn!+C=p*>5
zKJZq6jXe6qGqDfHN>mk*CN^XcsHTlgS_V75)fE2|`RpH+@&0&!r&P#;f5TcIa~`f9
z`63_<!5)nPR68Xfh+K_Sp30U#5g!p37d27oNRvKkMJOQs`sVjYt^d*)<eN~CcCnzZ
zAeP+fdR*Gl(3x&VBO#II#^4u*43|sX`=dV^3Cbr@l6{JrPK9@}1@s~JH06QL9QYQF
zOF3|gIPz`~f$o7+dJ`CQj=`5|Aw>QdoDFzJju?I>1c5dM16v(SZ9S!J*Z@G5vN}8i
zALBz$6KSmgpA8@n9deG4fOsG8;|vfzUjfpXl6!BM$VZ;Tm!qJzI|qE}CxDQA2a4B*
z{BVxe6u|23lNkPd=q8XwxSY^wi4zG0bg;6CY@OI^fEkzOaygy_NM&?QPh8h!RkZu<
zsQ@L`|N7T4`;z>dGXa_JAHJaThav&m3~?Y${==D|KYl-dNmUY$b;}>`oVi;d;`Yc4
zAhXuf1c{KRM1Q(8TH|vgNv=tY$|n8X%CSnNrexlrKyjH57IUgc>i-{Bd;eY)^p|7*
z8C77I5YVB(6$$!;46BNW`33fxt<zVpqz=SLY;!2dvJLweZx;#XQTC$)dLUJ+is#1h
zHljfabMKYmRVAWsWv&t%{Q5Z(Qk5F7GnpZ0&c9204hn-h0fK2@G&p1*9K!G+S@8@e
zH0pj+s{aL*z<$)4n`(bc>*@PC!m7=|ao#)aoVV6n%K^<KU_DqB+941?0b@mvblyl9
zZk%xWyr{ue*|OHj?n?6xM;3Jn;IC1_rqr4a&*8<VhyswvQN_aSjnCpH12BmNH9swX
z)y;HS9_hP1?Y30b78E2O+iIx}xFbX<se#r*78%gJPw<8quzHP8&BzHt=%K!WP~`w2
zl{ygVbpC9rUYMNHh(Us`1C+LzeHj74Wuv?jZex5xd#G=xJY6?tw>4#Zd@eXDE&MTv
zE7QKP{8CmdgdeDk3F4?%;7lJ|#TqBv?x*e?cO$+r7bW@$ctRxceToyDcbMzy#G+i~
zU(`u%Z=pxpHqh%E%aTb6%%Q=qdtdf?Dr>ke(V9MWSi2z@NK0vL#MK>c?e1Z|z_d+C
z{S`zr*I|h+fiLU&Cj$MOjo3@<IE1Dg65Fxx$}a(~-tYQXyu9sStk**sh(l2FW-Ih3
zWt=78HJG~{x*V1rsWmup`pvWOOU0`0(N%(9482D}+UBX)%DuA^+rA{UK`Wu{NRZEd
zz!}rdf{oTQ55-N8&KDmaJg$wa#Z*Eh!$3lb<U!}SI8H}CV1O~=0y5Z;E3c|zah2L~
zhvx?EY*-Uwy{Se>&{C^#I-frNmfI2Dq-#)iVvi+hGrZL4#7=}ujcM_dD_c8u%EgPl
zptSkc)nGgNJhoLAz34X^2rv<h<6TAsi{TkxD&O<Xaf%-h8N8S_IUDo*T~F-mDA$xr
zHnvJM{);-CDSHP3^$>9%X{;(v44~pWF7l6+?7TB)P*1%&vExY_*&=rhm?qasORI-p
zM>n`U86MYU<DuVM0Q8l<ExL2l|3eId2lOA{7)BLC3&y@-ObYF0V>Jm+6`5f=gd};^
zLR_!Tq{NmQ;3an1=~O><y>n}W`=uv?$h=-<Nqyw%(SauWehxS0gK|~5M{A8e4d^=T
z1Dvnx=PWQS&On{oh3w|k?nQo-#`TZ|ZnfL8Ho<%^D*F1$Q{i?xN74*Y(%q2Ro9`|+
zpz)T+-LgY9t6PJY8;Qd;suUlR^wxQrV|^cqvx1J(Z1v7R)w?Tize3q&84BlE8B6UM
zHOt)zwfB2+@t|j2ORmW<!69KF=@b>X!d>AyeIQA(E}jvsl+iZb_^9QLSOC|TcbW~&
z%_+Wb%4~Mi^1SX0dfk<N$GicwDx<@UmqI5$so6Q|lhEZ=?%}sgTh}@fmbhZH%_ZNm
zdHe~^;!&l7a+x$BfVuevJ+k8Ln4Es0DqY0%vgp)_wBXJe<ZM;YphuT-8}!`G{%z1o
znSU)XTqE!tgp5{J7**BeEu4Rzy!3;vJm<rW!t{0Qhh2?Nw_}q|y5G&FjW^|Z{%oH}
zivwg#LO-YTPRgHm!hYr!n#_GuL*Z+oBei|2XpofQxRo07U7;qRyYCJ`mk^!KBr8n*
zC*<BY`@6f=JS*#~kND6}2I@U2P{eeX32ZPTx%>{xiaYXIYRt^kY5F4b%LSHTsd8Wb
z_<Y}364!A^vadmvS7(KL{`vxz3mBtu_-i;0Kp1R0?xlm`g^6@6$g#zCxD8jyxCvEm
z9fvNMuZ+=A=us57Q5h7q6M?Au+=+~;tF2p<CXRVkJt`d&-sdP5=$9RE)ABtos~|qJ
z5@Uj$6*L2<6X}VA5b9RSkD)hkzHSA^(le{I#|H|{=S_v(8($yZN>Ynr7<jQY?6G0I
zC`(9CJPKr}x=iZAJx%2fX=CWa42&bMPP&$e>>DldJ#G$JysW7_!uN3D7dLVtpxeb|
zI-HOWzApzp=HoT20M!~_ik_WaJXv!p;xtV}_9%(To}R{$Z`jV@-w4Y621WCp16VCR
z`gN>9Qiaf=ok%8NzZ{vgC{)=TC?nxQXDV{7ABF1qIBNiU(#3jC{5Y2;T~6#<c22{$
z5UK!j{5c$Tj?>LW@(QW)!q={z92<oxe#QCv+okfmoUgREq959(=Z0gQ1ZyeRH<L5K
z&^sfu>TRuO(KmuVDl+1(Buh7j6ni=<6fIiJGqZTzE&0GvH?#hDfPcTOUGGU(-6ByI
zZ5XP^>!S{^Xd(ek_n4Blsn6$yW9V&NeeR!a3nY0h`DRSD?aPERA(_bGU<s}PCJ!->
z5~oJW@KYg+>mn)!^^LZH+zbNjbLaA8PM$wF`XD45J=bAU*XgUs=)_Vk7P2;>7C-7+
zR_4NTlFIVD25b}JCp#j!05tmvN)LnJd{M}d3OpZ1DvmpM`Pm&*Wi<8f7Ux|x-tcji
zPr?P6F0sekcIlk^TDb#yEy)x)asA*h78dEo;g(y0LbPcX2hN{t7`Cao8ee-AsJavw
zT@WC3K4O$n=#aJKJ5`D<Z=XeWrGZgBFI{+XF`Y>hv&L6W+g%P>74JWKYs<R5G4@{1
z*{O<OuK9C}yaGl@uf}^CFUCC2a)H|X25+lU0!0+plUNFBcCO+kV3rfbjbNcTu@y0T
zr}VR?{_35xNew)yy8f(s^%L1m$#N;0R>6<2Zojp6X?h-l(qhob_Ayv;>$D`pi#FjD
zmi@&Jp`G5?1QWAfrl^FgqE@^7iEH|hq5B_~g3{Gw%FVLU+AtzQ*Z`b&HjF)JN#U_~
zEbivKiz=WRBKlC;(iw$f<OsBrj|JrJ-<?sk&kpE<l1Omf_es#Monq8=yp>{1S69F&
zXPU7Pu9NosNHoaWXC>F)LzR`58;bE*a(fscG3V&KLRDtfI=Y%<E~h)OZClk^6@Pzs
zzEl44xua6?`DeQg@5(S|ql=e5KO`P~Z{VOvQR&07ZE(-q<DSsG`NE)@eBZm+WOqu^
zGc+<F!~C%;OW(?2F#ibC-q`H_&F1m`G01ITY4}%`{rBcg&h9_DJEf2ojY3z+^8nmm
zHS_>f>eToB%b^r<UjK=`+&_V^AASNK`N6*_;X8k)8o%ekOy7^_Z%6lk%5yruAF)@{
zX^t7Yx~|Yob_g-rY0u<2OW<FMV<cavey<(kUsDFZ0`mSpK3>>|e<520dJQ#nkI}^9
zIFNiW09GR9GSEGvk_AhmvNU49Lx(^%mHZgGdVLJBF$^ZK2qbl1eqWFkF_`%S+Vta%
zA8eY-<QRF&03?6>F9U!XQ@+IxPzgU_?*Q8G$v}={Lh_A&;%`Wcx)T10Kz4#B>u-4R
ze+8#U+5vSE`yvevM_@%i!Vj+PPr&E^2%Zp-uqJkI0lOGY-ZFbEkeN)li%SLQW!w1x
zy==!^x^3snDFm>&-n1A;?8RTC9Nj<h%>?!9MD@=;LRf%(&?j^FW|0q&*>Ty}x`?a7
zp8E<)*#LBy3;=5Op_`@Ss1$Ht{szqaSNg$kBF4{j6Uo0JrS;FGn^c*u|Dxgu`a1nH
zg8Z8!2q51WQ-rI!jLSY$bV{cfP41dqLxk;LaLHE~Hx}vS;$J#<;WbU(o70u*GQAHD
z7k^^}pXp$UGey>(A#m*H6Pc4N@lG*6yl(9Sq$s6;dPU2jHGB+V0$(-;Qe8a-zd2I8
znIb^C;`YPGe>V5O>E1yl5e^Ul@d3D{v>_ozKx`*<xnC;o&p;4$?G4{LBZaO$%B{@M
z#g2{k3Q8ESYh}P25=uoBnSI2kHF5OL{x)=x`FyOD-BS11vrHX2H1x9ix6Qcn<6an!
ze@BS^JM@TAE->Y9I_1x9`g2TvAK(79c(yBtHN_<!YCFQwb?{{t+y%E%LOF^XanHZ0
z%c~Y?+nVtDQ>hwmB;{0FS8?ziv`l#sblEN7ZN{G4R0i&*F&tA={DXkldkPpZJZSx}
zz@jHR0Ki{$R1N@Mj(UIRnBk0sE@clAU5B`%b0EE;gBKD;;NC!fc<rB{3i{dP&wB}U
zBExte)@~>>%;3-q7XZl2K0=`t=D9&{rWUDcWIWHy+v``I9_T1yq>SQjao&snJLD+P
z&w4B%Li%5?^ZrW<<bR7@>mO-5{r9frKWmHq2}yy!hAxlZ-(mJ~Q;IiiLA!kXaPTl*
zJff<S8gwZDJ^Nsk@}-^yIr=kds%8Zooxwt6u{56R%*QJd+Yk9LJD>82Q&oP?Ve=K1
zamlpRbQf2rTSx!g#hBM2%&meS5Hu1(<`$ndCu`<PuWZZF3W<omDLYf&_zWU)VYin}
zM%I_>`h$dS&;x(w-3*|T?j*2-_k9VI0<0|jZI-9%P}PZhZd<%$wbOZINIq)^^S~?F
z5Z;IG{feIipf~ky(Fm;~qcUocLBwOYj*0dM86on@h4b?Zrp7g!Ec^=Mg03BGPZtLD
zIAd2%Dd0LuL(B{(8<So-=X{tpDgAKI&5m(MFj_hhtCpL3=U#}n4CmRvE;Na}!7EmW
z@}yQe;+p_=+z^J>1m}|LV`)ArCgChr_gu1A&-QlEkWpehLyr<$?e$ljbV~BzLPv>j
zN7ui{RO#2efzZP#z^%hT8TV*;B*Y|YSF{OLASNnoV>A;N-+p6lekEAOm1HAmE1eP*
z(_QM^hF}h0_94}-s!N~ft!oUfnXKITbW%&s>f*E7SM1hzy^oxqjU?Hr{*@zpGoZZ4
z#NmPVc1?kAhTX(cJj(K{;F*MhaIM<cS%&dyZ8Dc{-j)~Odt}MvoKahf46c5IK}IMt
z;4-vu_w6+1ES{gdsqxNE%XhZ##nlRpLB-oVQ3WK=g7ablD9!ci1+@_)Ks?apqK?!!
zmM@*P-F)oz%csi^0pHR<lKpaF;Fc$=;)Ir~F%+fQPP2On$wn>`eu85BfVtpp|9Xwl
zyR>p7uX}uRC|~JW*rUBs0g0H@<+4zWv`Te)Pp^5j=Bbc(ZYhDX7GkMZc6|amWYckY
zWkRVuEBX^E?-4p#553)@m|gyH%0%9~qpLHoWIFHMW3$26r%8v{=_w9N--~Zjz5x<8
zJl~FTMd2UyUVdw@lX>S`$j>jSFeW`S%5F%w1uIT@N!tJ=9km)S!l6zt*?Bp2o;p{<
zz-iSs*%PR#i~D5K5hHe?F_!d_kjDs0m@x*TQ4@jvtW@du%qrt<`22B@e~y&wJ2HRg
zG8`Nwl);JvcLoSIhx61=?aj%3;Ej=^-?q{;Hk6eie|2G4;MFf|@ITXvnRj=(#AbQB
z6*(~1N|!A`cmd3rC#z_jN`d)YPg0VTIfEWob5*RX9eph-u5h)0Z2R?|TcG6$`7;<~
z+lLi6_37>+^?M?TYiTH|?UHhy-sfbmX+cZkb(Gt$<QX4sQkLl%wen57z{HjbT3z0+
z%*^IKB<;@kG3SWgySu@VB*df_IQ)){hLXP-sL*bSEL+B;0O0&;Fr7n6ZKQPIYE|@-
z6L!N{IO7f@lMo&DKHK9LG6sS9pjGl)iqTokyeo*_9Wd<3Xx6kRfzo7J->2+0ZH}?z
zcvsTPqfhP$3UN}AZqILu7p>Zs35~XL`m9{TB`=AA@yg>oNmBz|Qo5EZOox{X5;AX<
zS68muzhG2-xaIH{LzZ9iIsXA_wE|hO|7=3?kNQ4NEKrC4t2O+3xoY{v?>N*o;v&GM
z>jQc)0;K;w^O0|d^dIf{f5HNy{0YLf{De<uTLvksDO##rF}z5zDo;t&<!$lfq&wni
zISi$ECHlA?-||Z5B?M03-7I|hMekdzfA*pGm0kScCh{FuQ^oLwX-?D&ZwhxPcK|!g
z26rA^XN1GNgxO+5>sFx0?zGg`HYN$|(blKjXH;u6GT_vxsA3|S2|3@CD1`KhL0KSj
zVP?3K7}x2W%`w3AR$?<&R-N}kA+3dgKk`W@%^NH{aFYMjjkqvfT`6-NAxs~_Q3o)`
z0@5fX0DPt!FXmjT<&?4$#$ixXKe1F3?HM;abS2z=k&%Zf`m#Jz`@F*Em7R_>Au~d%
z<Vfhjh3;Dt$Ii;$PmN6e;&Nu3-mWGt*P6!VJZPM;BWa%9J@4^dqTm$ZJIRQB6}oH@
zgyI<D=;UTV4}LzVf91gxZ|MY}qOZRWek!jSzG7_1PF^d+Q$7r>^ll%R+lfT7l&#_-
zQImihBDCj9RAhl_-A$HvcZuVJWFyTyYBaic_&^sdHHh7{jmWT8h%<x>X6l-h^10jp
zin+BB!@0o4VX-}Dfg7*bM^)H7t$o4iISKtfyz01ov>f&kF4WC#iFSm^M+<eW%D!;!
zBUYN_z3`$Z?R`<}6s30S({InoFM$;AGSzd^JwhLLBj}-bGG-Yy$D8d7d{mU4$e*2!
z8=K*oEK94-s{Oz`k^PKUZ~Zw9XmxXTAkevq@T^%2HA3QZxT4dp6Q(FUDR1tA{%8)>
zt(cV53i9xpx)@w+R;ei*k@Kw0Y}>!Vnu(lUF$0~^79au?YPQZ}kUa-IFR-(a2ms2&
zN*rhq4t5Rb%x{+FxMB2acRpC|;mDnccAPi52>@T$i1k{s;gZ#bmMz(4-rt-uXnLtt
z;bwiZUbm>b?Ph;ko1cC?Hw2yB2DpJaLrE=Q_c7AlfJX~^vx<i&piaB}{G~Y|zQ3n`
z(SymwgN9sMbcn;Y>e#?=fK^>FnzCmEfO}&NaDCtiNp;Ba09I4rTurUP`qU=xuGJ=H
zHt8{n7h|Fdd-kkl+U1d*emb+>Sbg-+=QhAz111S&l<<>F4Tnh=w-<EUGj#e2X{}lE
zy%iX%@V3qo<P+dMxlxa1-P~E(IT8SX>zSLSQS1}ufT!5miLMigyQZ&p=kzDHxFzbI
zq&j7!vfDgrNlnW<aR<yl-AsZ(bRjC!5L@I0FdNLVjL17MY1VL&()%0b6^>di%qa_0
z{2>xz_k?;H;?}&5@hg$g+V7MHTS&KJrrTp^V3&?_G!dLtst`WRiL~<ru9fmuVVB8X
zU)k;`4*-;9@H_z`0HQ$v7#}tSfNFsTC^{W2#NI=eRU{Uulx0+Xk*m8ilDhlOYyNYu
zWYE$fbe3}2a+83r(?sh$E|FX?RGBChsaZI&i*XU7;Un>yHFK5PIIB6}`?yf=QK0+r
z5tfN&ZOj>z`j+~VECPV99*YT{QsiW1#Mx^)hUBu87tJg6@e{6)U~i#$kershs}D(S
zr$5*Q`1nL><JJy!eO8t?xliFd#X8|feMq}l9uNpq*?vrQ$%ayX{mQpr9sT0FoHI}A
zN_H!AWKtJ!S|P#hNC&<{Kc+@+I=XnUG>cvkS_op>y?P6XQ7ewR8l53+XVrM$eFfnJ
zAo%zZ3i#WNmtR4$&fBy2Q9`LJ!a*#OeildBfuxkAu13=NBu(f?-Z=3jec-Jy`xg!4
zxP-xlxO&WDYdsp-wG)0&*QwyvW3tn6xWLh6a=%jDF;kMzJb^r$W5;E$vyge)%Q7{u
zLx)TkOWFbB`U(;}hWQF2gx`c5=rQ}xStdT<M;z*Wu&j;=Yn4N^DX3|=c&OBkvvfJL
zoLo7l5$~IK?J-x5%Umy|Lh-)TtY&U)glSlFxH(g^U(g+ES@X`IpdP<5p38fBsu~@C
z!RLUf2nC4Mc}0FeeXFVPE9isVv^H9fs8J4Jdk<Nw%L#nZ4H>hMMD0$L8zHhWgP72A
zA4!T>jlcuq5*2<|0QA1bdx**;uvGdRjQxx6p(IQiS9+)hBSr6Y!MR|!F*lQay_p9u
z!XAjcd)o!KZAyVi?T2|c#VydOh?gfn?oqNj2(^sQs1)l$&Qi9+EA1a|HdCWhiHy#^
z=>&<?0sote3#3{F!zE(YsdvIF&EL7((YVZGUc_mrYv$usaQhfa>{U$gdQ9oC;%OKs
z=Gai~U`0q>vVM$;onyQY>S?lg+g5`PgGTD5?at|1ydSO!CB}lMM$03}w-=B3q1LvO
z?egYlhfei5a`hAPP^yW#%;$PJij!`fm1bzrubB_%M;_US23b;-!7i+%hV8rfb+b-C
z=&7Z?pyXQqhC$;IELk_KsJp76jyt5)l<+)&<D(a{+M*7<VW8)^EH<-;>|FjN9HPmh
z!|+<v<@0f4uF|c?1xEO#`qyUUH_0K9UqO`H`(ks3%jF@4DAtIrQ*ZXtZmtW3NwQ6?
zclJGe89lD{sFpA5{CX&z(du^JkT;-v0uPqg$-`?CQt*&vF-9}>5>tzh*wKxPMWQb~
zc4v5e*DV)vDxc1)DVe|Jvx(Rfo8{=RP#9B`#*5(&m!P!fxIXy{EQR?W8~t-$^~%&V
zhtdyFB_?<jv-YoUf4S~*#?t|I2|W@+JOu-wzRhrOi0J*-5<+SEJH~bCM(lgb9=5!y
zaoKT+eE}35y_2M|w2#(V4*>o%;K&w+GqC|?EFBJ<2TJ(!tXi>SxkYKeiu<c@m-U(_
z3^FK=-a%$@<}0f^E9yND^1BeT5bRS7q#aCy>(ob!J!<8c{V=g1eet|eIRNHbIulLS
zY{)Q||5W3P@|Wyb8CmVt2FwmHLtx-QE;+EVH&o(XOC@?FjK6i#?r20U5R&YLTIwp_
z3Ew@kn%>bof}#vml!WmRUhZcA>;NixAxO+#2FKgk8RrL=EKUbcG8&&%B_DK?6PJ1W
z(yQoT^=NV?RRH}4;4w@?9CJRDhACrQ-o?nvp%hMNTXr6#=T;-zPd7*L*Q8r}U-dcy
zfN#$|qoE~VZ{@;yqY%9@?0C!3nv94CJ0W<X<q_78QeIOBLcMMUkcI9F^1WgqRX2v1
zc!4<g!Og({Gj9N|cLHyO3&Icp8zDqgqcAs9bf>02Z|4Ee3p4X}vMTuy)Xr@iO%qv~
zL_?9JlYv{H&j9op{t$1vG{S(lo|M<gR|rOg%iH-EeAwK4f{oPVeR5NmIUE#wuZeOt
zv2Fb;$Og%Pn<;5Mi5J9Rt2|IzJE4kv4HF_Ya(zeZ3~w@!h%}^M(vrwz;fudZQ;wc~
zL7dJw-YU?VSOSy9oO!f{{@hV&yi)xJ=EzxCRMQf)<(q3RwP51V)Wa9cRpL$3Hj;%G
zM^ko}(*mV0$o>#R&)l^)@&+ZT8FV9sQZD?qwe&faC8u-gsW0L)<3>*`1)RZOA`}yO
z@X~JrnBKy`$yylJ@KXK>QBUd`Q|i5EBHO3vM=sW`i^XoU@UxRriniq{X8N};MS|%Z
zEoet2MkvLl9@*#5cILCqJ>ZZ*-FR5+&241HjVgoze3^JpRQVyfCH#G8IaN~U3b+jb
z`yYeRqUSD0_NL?|iza6~>1zu0yqX~sl<cRx>vdAMh}McJsLtUeunKShOu!7rBvOHC
zQ5I+CyOKDyw^5MW7<jNaEN7T#B5{+^eRZ{BZWR0=?!~*!lV3sn$><$oizs}r@&O9r
z;`RmLHQA3G3YxGIqa*^l6Yx%e!6{4*ZHa<((jw@LeIE3$rrkC@bhJ)Ut9N9O3_LYV
zOMP*h$3=1-SzWWUMUUa_Ne2gYBFOitfhtKTw|MQ;%jOel=|lQ+*AfGts+}G4?I%4p
z7XgqH1o!C=TvUB`Iw4HB-X2Bf7&A=oV>|S<T)xUP*!!RLEHrpkf@AxrXrw)YKT;}e
zj25Z=IqmRgGnn6iG5?Xt3m($|tO3$6P%HxX9GxT<X3`r1W6yUu(4I^>Fzx0V^F4J{
zq^2wG_0-GfGr2xqO;&2llMb*8Z(x4tDs193{I(7wuIeTR8iLo(O!H6%{K{J=3TGPY
zX4ZnXg!6qkxZ16D6Ah}9t>>TAHm&R&4VZ_rNg_#c7Eso(`wGmWoB9WGatlvf%i>ML
zllz}iXh)a4SKB|UbJqXD61>wbQc)Q983ms8Tj76}&hFeID4_#>AMjwr!#V#sY~3%L
zThG>7nw+jnBA(*<Y(p*Z<5eNLOW_?rMg^V+vwE{SEw74p{|dTZGr41TiyJ#TFZrgr
z!_YcsfYHUg+sK7v67sxNmXO&ji2>gPLJ#~cUIQ1rqV2w8H2E&_dU|)+k(mLCa&K2G
z_A|YNa+-@CeHN%dll?XMl}DTK8q0^0ej~G#m(e;wqq!Nq19C1xU5+j?nm4&<wy9TJ
z9Qt3xS&u7m@V@Gg3s<Zp6e}M82sr_@M>4^r(EVzSxr4_C^0Uu{AH}-o^l=A#45hMR
z|Dv`Te?Ha>%t^>pWYa<qV6WoJ?WLn&Dl06l6U`&iU95$^d?(q}-bUTK%zVw=hcA79
zMNt%IgYi%zB%j26$c33=$V0=bMQD1jznZ9?Q7sE4k7B_j);jZxt;bP)Sxr)(Uy-|e
zrLGC4^d>+I=1q9kdJ<=U9p_fyW79QZERxug+cmW*R>t8NmpLco^2GF|M<Dp<+dJOr
z7toMMPq(*=pO#xQ^5t9klG`gj4A?g?56#X9-xe8cztC*6W25lCp4;tE30R5m_XZLf
zd{mYJeQg_`;uYK43RM^{W9@T0K0PgO`FlxgDC4K^VvE>TrN(8Tktf^$F|vuvqpjy1
zk@U@4D+@GSmu5%0Qd|-D>*D&{?i)!CRXyc@oAZd-Mpuna+H+wMrjO&mNO!@h0**On
zfLH2*P?R0zo5k--++L_3>rHTqTQF{p{H#@Z_T7U*l1IEJlY=nWghLhLv=AV8(kg><
zxrZKTpK1)QeTkC$Qp4e#-)L%go5gFi9I44x$RJ)>{HP(|stlzw=FkFHS`YvzdVyOR
zVNlHDR_02KT5yqnk!s`!6|dZ^(TZ@9DD&|WeW|6#?;ML-87E+lXd&z?^mJ#qyf+HM
zjx$72gce+Vg$qPmYC0D<uJJkA#0H-&dH5tEBC}y>3GkZd1{#hoKD2?I$A#yAaMxZQ
zqE&n2Qtn2g8$0A4PxUlVg`szp^WKZwHN=xX;O<7mmJT|hch>90r}rl)GRl?x@3Tv_
zpPr<86{6ohyh}xPM8VOFZh9pnCP2i8vJ>qDp$_0ebaBiyFUvc_kHuEgWDmH<ANjOx
z^O?83P1dU0S7%cz&&PUhIjN=8@x<K5>~;%EMD4Ikf52f%e{MtnskGD++UBTMoqh`Q
zI@|igx1~DAVJ3hY1|#a2v-V^FfUoPY%wsp+izJZh2Q#C*^f+1XE+i(&9%rz9uB?I=
z0Ew6(+mNA1c12Df9n?s7IkTg#<_h4|X|~{evz04w%`R|Jg9lzQQ@!z6yvd5s(>jM-
zcKGgn(k&;P7WOWH>Z>N?BQ}7@c%bfKv6YAWw8B;1Hekt#i(LLZ3xlum>MS>LyiN9K
zX^UK~cYj^~_)E8@OLLwkJCq%N>J8LUhXQ{Omw=)ByyTiWmBCoH=T2Ya%yi<xTU9>M
zaYfTX%E5&;HqurH4R9^O5x^N*M!>OWH!==XB&`7|-!1@cJPO~md<94t2$^|r>7k&J
z7jP<FkfVxRunWzY$K1lBZY3G|z^pPWCtGfvG*1zI=RzWxY&$z8dl9r!tsG;21$`|T
zU5pazbRMBrUN%3mI3p>$<Fd`$lmd!NG<tbDHdgCsLR;>r{oDKBCZm5M0{>fu%x}vy
z|F`G=Qo-}z>H+mf%TxY(J^vB;N-!ZafMyQ%0L^JZ-t;CzEnJdTYyJ62#`A|lJ5)@S
zB=w*BdUO4PnLvgk5Dp^629m>KPNf;Mo}Gyk*BDpOQJYu}rpsc{9<DTB8Mz!UZsRZs
zR3w8&31C%_{>1&cV~}e#;}Z1@Q~f=BfmAkZ8mRC~Spu8PN}vZfX8-RtfJgqF8}*e8
zkmB(<7$r_K6E9Fbi8P8-IlMWSA9iWnKX2VM;px#O7xAsbUYU^^KQ1PcPP{65I=tC@
zS*w(^!{SUv`$6!SG9lZA%#cx?WqDoLS?Y%I^jS!ez56ctEj$FLzWfyw)XJ8sGR~i@
zhnOsTH|c49ZNqK0hU?OYx}kgLbt#Nq2$0fb(p}TsclxUu_&-8-|Lfu>;XZClHOZj?
zvOb6vcc*wA57Znj2TSF7at-qM;6=W9E!J6?wq6B4KNlul${{4cowOf79KrSM09Mz{
zjs?KJ?@4#;#HrTS5DtgZJD1ZPBdAWAo%CLtySMP@q3p=G#6OVc{k6vScLV_b$P&H(
zMa2C-`u%>7zyF}TTl3Du9{==ju6-<ds+n04XaBSdg8%!a<9`i>f27AhLRe-aj*E9w
z_QuRKI?k(~@JYBSJa`vG>}gz00IX9znd5+bIRyatP!@7K#AmFi<M~d231p>+on817
z5*v$%*FY&GNck%$GqH|9)WKvE8IDmB18*Wfe}VQqtSDEtg?A`t0^%1}=7@l9FAk6o
z__jGEK#!&py38{Qe}5W4B*GK$K)(6g_A-T?$BZH9SO@4HL+w`(qK_!?eS@PclJ83Z
zUuy}-0ktU{x%w4!;`{b!Pwd_N?yA3Ujr_?2A>MB<`unzy^tAlXzXdUnf0%x*x19Ae
z8?z&%DW2|TdFi@M<eF@|kx0!qhXCDfv<$qs9SUiOgL?V1T}xUa<Buq=lhHeJE3ZGS
z4Xx7q`TPBk@4{cezadghcUo4IRb{sWnBsxq)<Eb5RC0Kl_myn-jH;!mbmjf>D#Ul=
zp5o^~{GSnvHdYk8i8_ZJQ0%q+7{W#4=_$mw@mq42rO?ya?+}Jl^OKGD>Fpe{^Hyt5
zzB{=Y&d$ybidJ|}=3n?N*RTZ22ry&oiU3Ar&R5U@NooK0ZV3)_#Y%vsBMU$rv>5#g
zdVi#*_{@(7q#T%jaXCu*lQa|QpgiqY(5WQg17;({$Ic~XI&FwuK}W>?An<Xu#{M_k
zw}I5`cbKw2(yHN~y%yQ63&bnOdI>?iK;~n}BGSV1dz~gHTN9wE94p{vQ~o|a2mkDq
z|6cF=UDsc0NT7y=7s8D>+T`P}qZi7<UGDe#x@0vS9qZ+LD0EJ@&9$vZhsU0+Z=_&M
z3BnKWqz-ru*}HOI=WrK^zX3saSXNRJ7}6Mt_&NZRU}-$28tSn+g0m8vAti=i3;zlV
zFa_i<0dXiTEYSb646@+JBtU?wc;lkLE3+>bemq_Pzz7%!vC#KV-)DSYL*R-Ih9iVo
z6#;(s_lKd&j}f-ugVPt!5Wct(&$d=&MUsys{BShp$IB=$d7st(^YQ>H&d<B|v+sR3
zLVpg8KgXM&L*r*y{$cX^Zx4;)R@}WqaTqC11x0}|>=YyW#f~JMver5EvaMIhrIN9X
zXMs6IFSwVu-)vXwS(*I;D*yH0PeGS&<f4Afc>klve@)Qkca!}5y;hL_%z1v#Rpz&l
zlK=GQzY*fKIP;?r?|0I>|B?`|&o@H6rvHnCc>k)5@)vETJS|QSHFSSdT>DMRdU<*s
zKrOd+?i$8E#D+Lf^~&73=hS5e;l88c+&=2yhZ^sx6wda};cWo_wobq^#0-MJ@!$qC
zY;`x?89@30Xsc16%8p|NL=c$Oe|#I0N2uUB!VMEcpamcYkM6=T*ZmPa&^=)&ksQbp
zbsOgDsmr+kZ0P4b`q__u4w0W><bPa*T$`3kKR><fBC0KW-_N&aCf|45kUpcC-e!dF
zUcB?Ti-=W4bx8lbOT+?;1b0LbDBx0wp8ndsU+kVw2K%Nu5Kp1k7G95uFd9sh**fZr
zNm?aQcqD-k{rx(;FCzB(1@o;We9OO|b|Bx*H#a_9zu^)2^pj;Bu9p}b3J3(%Lvcll
z$Q8%|3*;;4$kH#_vhjlK7qU8%hwv!)dm}iW7N~tjWN#7A19qZT0Nq9uOg!6iHbS=Z
zXEQ(V&Ch=Gb1?i26hDLL|H7G}+#}zouB!r2PF*w-jj<CY+m+~(h~f6VOdU)8UQ<D6
zvLJVuGy7D8jJ1Zfc_AB3rWff$!96{F-Cx`_)-aCTr_oJ)HaW(k#d)mu!Iz7fUWLjE
zcrS?u`tYpheK)4cuVj_KNH-V?_->vq`0+i_ch}^nzn`aLzMJRwiUi)#{cPqhYnY$+
z=9h&X(6@f}o4>Aoe~zg?Z3un_if=bNKLf>oJ5a=sb_#=A=Gh3;e~vdYJ4YQ%g42c;
zYf2lXUOr&<Qjd@A%J3vnxOUCxVA@8NS!5nSMhpJQi0W4b>;GUJ`0s%=|9OsYmHDG0
z!~rJ9|EvGMZTZj7Nx%L@Hu7h${HGZ+|9g69Y^-2Tf)d?prqse}-6Pu)w&TB0b8Pi(
zb&rf`#;K`-M3|m4{a!B`m_Q8@t`bjmN$VbrZ!`7(O|k4}-~HKlzwHBg2_KXd3af!x
zSE4Q#Bq)=!@IgCgUf>m7UrwH@L~6^&2Br9@h_F}tIrFX6?vJEY*_r9q_Osm&Cf1JJ
zi4Z@_HL#%uynaeuDe$_$Tdb5Lc`90$|8@4Mm%#g7Elp&VQcp!igt{M$HTXOHp+EA=
kSJA5H-fK`wO6Jd6*wLyV>nCd}ShzYb0DP4a%C9s32Q%?IH~;_u

diff --git a/.github/workflows/deprecated/pylint.yml b/.github/workflows/pylint.yml
similarity index 89%
rename from .github/workflows/deprecated/pylint.yml
rename to .github/workflows/pylint.yml
index 402bf72895..cdc3800869 100644
--- a/.github/workflows/deprecated/pylint.yml
+++ b/.github/workflows/pylint.yml
@@ -28,16 +28,13 @@ jobs:
           echo ${{ steps.extract_branch.outputs.branch }}
           if [[ ${{ steps.extract_branch.outputs.branch }} == "master" ]]; then
             echo "running on master"
-            path=/home/fedml/FedML
+            path=/home/actions-runner/fedml-master
             cd $path
-            git pull
             echo "dir=$path" >> $GITHUB_OUTPUT
           else
             echo "running on dev"
-            path=/home/fedml/FedML
+            path=/home/actions-runner/fedml-dev
             cd $path
-            git pull
-            git checkout ${{ steps.extract_branch.outputs.branch }}
             echo "dir=$path" >> $GITHUB_OUTPUT
           fi
       - name: Analysing the code with pylint
diff --git a/.github/workflows/registry-runners/build_linux_runners.sh b/.github/workflows/registry-runners/build_linux_runners.sh
deleted file mode 100644
index fb4b6e1abc..0000000000
--- a/.github/workflows/registry-runners/build_linux_runners.sh
+++ /dev/null
@@ -1,12 +0,0 @@
-tag="0.1.0"
-
-platform="linux/amd64"
-
-echo "build python:3.11"
-docker build --no-cache --platform $platform --build-arg BASE_IMAGE=python:3.11 -t fedml/action_runner_3.11_linux64:$tag -f ./Dockerfile .
-echo "build python:3.10"
-docker build --no-cache  --platform $platform --build-arg BASE_IMAGE=python:3.10 -t fedml/action_runner_3.10_linux64:$tag -f ./Dockerfile .
-echo "build python:3.9"
-docker build --no-cache --platform $platform --build-arg BASE_IMAGE=python:3.9 -t fedml/action_runner_3.9_linux64:$tag -f ./Dockerfile .
-echo "build python:3.8"
-docker build --no-cache --platform $platform --build-arg BASE_IMAGE=python:3.8 -t fedml/action_runner_3.8_linux64:$tag -f ./Dockerfile .
diff --git a/.github/workflows/registry-runners/build_test.sh b/.github/workflows/registry-runners/build_test.sh
deleted file mode 100755
index 1e17dc6847..0000000000
--- a/.github/workflows/registry-runners/build_test.sh
+++ /dev/null
@@ -1 +0,0 @@
-docker build -t fedml/action_runner_3.11_linux64:0.1 -f ./Dockerfile .
diff --git a/.github/workflows/registry-runners/run_linux_runners.sh b/.github/workflows/registry-runners/run_linux_runners.sh
deleted file mode 100644
index fa70388de8..0000000000
--- a/.github/workflows/registry-runners/run_linux_runners.sh
+++ /dev/null
@@ -1,48 +0,0 @@
-REPO=$1
-ACCESS_TOKEN=$2
-API_KEY=$3
-DOCKER_PULL=false
-ARCH=linux64
-TAG="0.1.0"
-
-if [ $# != 3 ]; then
-  echo "Please provide two arguments."
-  echo "./runner-start.sh [YourGitRepo][YourGitHubRunnerToken][API_KEY]"
-  exit -1
-fi
-
-# List of Docker container names
-# containers=("fedml/action_runner_3.8_$ARCH:0.1.0" "fedml/action_runner_3.9_$ARCH:0.1.0" "fedml/action_runner_3.10_$ARCH:0.1.0" "fedml/action_runner_3.11_$ARCH:0.1.0")
-containers=("action_runner_3.8_$ARCH" "action_runner_3.9_$ARCH" "action_runner_3.10_$ARCH" "action_runner_3.11_$ARCH")
-python_versions=("python3.8" "python3.9" "python3.10" "python3.11")
-
-
-# Iterate through each container
-for container_index in "${!containers[@]}"; do
-
-    container=${containers[$container_index]}
-    # Find the running container
-    if [ "$DOCKER_PULL" = "true" ]; then
-        echo "docker pull fedml/$container:$TAG"
-        docker pull fedml/$container:$TAG
-    fi
-    # docker stop `sudo docker ps |grep ${TAG}- |awk -F' ' '{print $1}'`
-
-    running_container=$(docker ps -a | grep $container | awk -F ' ' '{print $1}')
-
-    if [ -n "$running_container" ]; then
-        # Stop the running container
-        echo "Stopping running container: $container, $running_container"
-        docker stop "$running_container"
-    else
-        echo "No running container found for: $container"
-    fi
-    sleep 5
-    # docker pull $container
-    ACT_NAME=${containers[$container_index]}
-    echo "docker run --rm --name $ACT_NAME --env API_KEY=$API_KEY --env REPO=$REPO --env ACCESS_TOKEN=$ACCESS_TOKEN -d fedml/${containers[$container_index]}:$TAG bash ./start.sh ${REPO} ${ACCESS_TOKEN} ${python_versions[$container_index]}"
-    docker run --rm --name $ACT_NAME --env API_KEY=$API_KEY --env REPO=$REPO --env ACCESS_TOKEN=$ACCESS_TOKEN -d fedml/${containers[$container_index]}:$TAG bash ./start.sh ${REPO} ${ACCESS_TOKEN} ${python_versions[$container_index]}
-
-done
-echo "Script completed."
-
diff --git a/.github/workflows/registry-runners/windows.ps1 b/.github/workflows/registry-runners/windows.ps1
deleted file mode 100644
index 40f0f00b8f..0000000000
--- a/.github/workflows/registry-runners/windows.ps1
+++ /dev/null
@@ -1,32 +0,0 @@
-
-$REPO = "Qigemingziba/FedML"
-$ACCESS_TOKEN  = "AGMK3PY3QDYUXXXEB5LWI4DGOQIFW"
-$WORKPLACE=$PWD
-
-Set-Location actions-runner-python38
-& conda activate python38
-./config.cmd --url https://github.com/$REPO --name windows-python38 --token $ACCESS_TOKEN --labels self-hosted,Windows,X64,python3.8
-Start-Process run.cmd start -WindowStyle Hidden
-
-Set-Location $WORKPLACE
-
-Set-Location actions-runner-python39
-& conda activate python39
-./config.cmd --url https://github.com/$REPO --name windows-python39 --token $ACCESS_TOKEN --labels self-hosted,Windows,X64,python3.9
-Start-Process run.cmd start -WindowStyle Hidden
-
-Set-Location $WORKPLACE
-
-Set-Location actions-runner-python310
-& conda activate python310
-./config.cmd --url https://github.com/$REPO --name windows-python310 --token $ACCESS_TOKEN --labels self-hosted,Windows,X64,python3.10
-Start-Process run.cmd start -WindowStyle Hidden
-
-Set-Location $WORKPLACE
-
-Set-Location actions-runner-python311
-& conda activate python311
-./config.cmd --url https://github.com/$REPO --name windows-python311 --token $ACCESS_TOKEN --labels self-hosted,Windows,X64,python3.11
-Start-Process run.cmd start -WindowStyle Hidden
-
-Set-Location $WORKPLACE
\ No newline at end of file
diff --git a/.github/workflows/deprecated/runner.md b/.github/workflows/runner.md
similarity index 100%
rename from .github/workflows/deprecated/runner.md
rename to .github/workflows/runner.md
diff --git a/.github/workflows/deprecated/smoke_test_cross_device_mnn_server_linux.yml b/.github/workflows/smoke_test_cross_device_mnn_server_linux.yml
similarity index 88%
rename from .github/workflows/deprecated/smoke_test_cross_device_mnn_server_linux.yml
rename to .github/workflows/smoke_test_cross_device_mnn_server_linux.yml
index 10c9860d0f..c8fff7e4f1 100644
--- a/.github/workflows/deprecated/smoke_test_cross_device_mnn_server_linux.yml
+++ b/.github/workflows/smoke_test_cross_device_mnn_server_linux.yml
@@ -52,16 +52,13 @@ jobs:
           echo ${{ steps.extract_branch.outputs.branch }}
           if [[ ${{ steps.extract_branch.outputs.branch }} == "master" ]]; then
             echo "running on master"
-            path=/home/fedml/FedML
+            path=/home/actions-runner/fedml-master
             cd $path
-            git pull
             echo "dir=$path" >> $GITHUB_OUTPUT
           else
             echo "running on dev"
-            path=/home/fedml/FedML
+            path=/home/actions-runner/fedml-dev
             cd $path
-            git pull
-            git checkout ${{ steps.extract_branch.outputs.branch }}
             echo "dir=$path" >> $GITHUB_OUTPUT
           fi
       - name: sync git repo to local pip
@@ -70,9 +67,7 @@ jobs:
           homepath=${{ format('{0}', steps.fedml_source_code_home.outputs.dir) }}
           echo $Homepath
           cd $homepath
-          cd python
-          pip install -e ./
-          # bash ./devops/scripts/sync-fedml-pip.sh
+          bash ./devops/scripts/sync-fedml-pip.sh
 
       - name: Install MNN
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
@@ -84,6 +79,6 @@ jobs:
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
           cd python
-          cd examples/federate/quick_start/beehive
+          cd quick_start/beehive
           timeout 60 bash run_server.sh || code=$?; if [[ $code -ne 124 && $code -ne 0 ]]; then exit $code; fi
           
diff --git a/.github/workflows/deprecated/smoke_test_cross_silo_fedavg_attack_linux.yml b/.github/workflows/smoke_test_cross_silo_fedavg_attack_linux.yml
similarity index 83%
rename from .github/workflows/deprecated/smoke_test_cross_silo_fedavg_attack_linux.yml
rename to .github/workflows/smoke_test_cross_silo_fedavg_attack_linux.yml
index ea0c4ed601..b1c29fcfd7 100644
--- a/.github/workflows/deprecated/smoke_test_cross_silo_fedavg_attack_linux.yml
+++ b/.github/workflows/smoke_test_cross_silo_fedavg_attack_linux.yml
@@ -29,8 +29,8 @@ jobs:
     strategy:
       fail-fast: false
       matrix:
-        os: [ ubuntu-latest ]
-        arch: [ X64 ]
+        os: [ ubuntu-latest]
+        arch: [X64]
         python-version: ['3.8']
         client-index: ['0', '1', '2', '3', '4']
 #        exclude:
@@ -38,7 +38,7 @@ jobs:
 #            python-version: '3.8'
 #          - os: windows-latest
 #            python-version: '3.6'
-    runs-on: [ self-hosted ]
+    runs-on: [ self-hosted, Linux ]
     timeout-minutes: 15
     steps:
       - name: Extract branch name
@@ -53,16 +53,13 @@ jobs:
           echo ${{ steps.extract_branch.outputs.branch }}
           if [[ ${{ steps.extract_branch.outputs.branch }} == "master" ]]; then
             echo "running on master"
-            path=/home/fedml/FedML
+            path=/home/actions-runner/fedml-master
             cd $path
-            git pull
             echo "dir=$path" >> $GITHUB_OUTPUT
           else
             echo "running on dev"
-            path=/home/fedml/FedML
+            path=/home/actions-runner/fedml-dev
             cd $path
-            git pull
-            git checkout ${{ steps.extract_branch.outputs.branch }}
             echo "dir=$path" >> $GITHUB_OUTPUT
           fi
       - name: sync git repo to local pip
@@ -71,16 +68,13 @@ jobs:
           homepath=${{ format('{0}', steps.fedml_source_code_home.outputs.dir) }}
           echo $Homepath
           cd $homepath
-          cd python
-          pip install -e ./
-          # bash ./devops/srcipts/install-fedml.sh
-          # bash ./devops/scripts/sync-fedml-pip.sh
+          bash ./devops/scripts/sync-fedml-pip.sh
 
       - name: server - cross-silo - attack
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
           cd python
-          cd examples/federate/security/mqtt_s3_fedavg_attack_mnist_lr_example
+          cd examples/security/mqtt_s3_fedavg_attack_mnist_lr_example
           run_id=cross-silo-attack-${{ format('{0}{1}{2}{3}', github.run_id, matrix.os, matrix.arch, matrix.python-version) }}
           echo ${run_id}
           bash run_server.sh $run_id
@@ -90,7 +84,7 @@ jobs:
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
           cd python
-          cd examples/federate/security/mqtt_s3_fedavg_attack_mnist_lr_example
+          cd examples/security/mqtt_s3_fedavg_attack_mnist_lr_example
           run_id=cross-silo-attack-${{ format('{0}{1}{2}{3}', github.run_id, matrix.os, matrix.arch, matrix.python-version) }}
           echo ${run_id}
           bash run_client.sh 1 $run_id
@@ -100,7 +94,7 @@ jobs:
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
           cd python
-          cd examples/federate/security/mqtt_s3_fedavg_attack_mnist_lr_example
+          cd examples/security/mqtt_s3_fedavg_attack_mnist_lr_example
           run_id=cross-silo-attack-${{ format('{0}{1}{2}{3}', github.run_id, matrix.os, matrix.arch, matrix.python-version) }}
           echo ${run_id}
           bash run_client.sh 2 $run_id
@@ -110,7 +104,7 @@ jobs:
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
           cd python
-          cd examples/federate/security/mqtt_s3_fedavg_attack_mnist_lr_example
+          cd examples/security/mqtt_s3_fedavg_attack_mnist_lr_example
           run_id=cross-silo-attack-${{ format('{0}{1}{2}{3}', github.run_id, matrix.os, matrix.arch, matrix.python-version) }}
           echo ${run_id}
           bash run_client.sh 3 $run_id
@@ -120,7 +114,7 @@ jobs:
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
           cd python
-          cd examples/federate/security/mqtt_s3_fedavg_attack_mnist_lr_example
+          cd examples/security/mqtt_s3_fedavg_attack_mnist_lr_example
           run_id=cross-silo-attack-${{ format('{0}{1}{2}{3}', github.run_id, matrix.os, matrix.arch, matrix.python-version) }}
           echo ${run_id}
           bash run_client.sh 4 $run_id
diff --git a/.github/workflows/deprecated/smoke_test_cross_silo_fedavg_cdp_linux.yml b/.github/workflows/smoke_test_cross_silo_fedavg_cdp_linux.yml
similarity index 87%
rename from .github/workflows/deprecated/smoke_test_cross_silo_fedavg_cdp_linux.yml
rename to .github/workflows/smoke_test_cross_silo_fedavg_cdp_linux.yml
index 051c0418d2..67ee9e4a0f 100644
--- a/.github/workflows/deprecated/smoke_test_cross_silo_fedavg_cdp_linux.yml
+++ b/.github/workflows/smoke_test_cross_silo_fedavg_cdp_linux.yml
@@ -53,16 +53,13 @@ jobs:
           echo ${{ steps.extract_branch.outputs.branch }}
           if [[ ${{ steps.extract_branch.outputs.branch }} == "master" ]]; then
             echo "running on master"
-            path=/home/fedml/FedML
+            path=/home/actions-runner/fedml-master
             cd $path
-            git pull
             echo "dir=$path" >> $GITHUB_OUTPUT
           else
             echo "running on dev"
-            path=/home/fedml/FedML
+            path=/home/actions-runner/fedml-dev
             cd $path
-            git pull
-            git checkout ${{ steps.extract_branch.outputs.branch }}
             echo "dir=$path" >> $GITHUB_OUTPUT
           fi
       - name: sync git repo to local pip
@@ -71,13 +68,13 @@ jobs:
           homepath=${{ format('{0}', steps.fedml_source_code_home.outputs.dir) }}
           echo $Homepath
           cd $homepath
-          # bash ./devops/scripts/sync-fedml-pip.sh
+          bash ./devops/scripts/sync-fedml-pip.sh
 
       - name: server - cross-silo - cdp
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
           cd python
-          cd examples/federate/privacy/mqtt_s3_fedavg_cdp_mnist_lr_example
+          cd examples/privacy/mqtt_s3_fedavg_cdp_mnist_lr_example
           run_id=cross-silo-ho-${{ format('{0}{1}{2}{3}', github.run_id, matrix.os, matrix.arch, matrix.python-version) }}
           echo ${run_id}
           bash run_server.sh $run_id
@@ -87,7 +84,7 @@ jobs:
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
           cd python
-          cd examples/federate/privacy/mqtt_s3_fedavg_cdp_mnist_lr_example
+          cd examples/privacy/mqtt_s3_fedavg_cdp_mnist_lr_example
           run_id=cross-silo-ho-${{ format('{0}{1}{2}{3}', github.run_id, matrix.os, matrix.arch, matrix.python-version) }}
           echo ${run_id}
           bash run_client.sh 1 $run_id
@@ -97,7 +94,7 @@ jobs:
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
           cd python
-          cd examples/federate/privacy/mqtt_s3_fedavg_cdp_mnist_lr_example
+          cd examples/privacy/mqtt_s3_fedavg_cdp_mnist_lr_example
           run_id=cross-silo-ho-${{ format('{0}{1}{2}{3}', github.run_id, matrix.os, matrix.arch, matrix.python-version) }}
           echo ${run_id}
           bash run_client.sh 2 $run_id
diff --git a/.github/workflows/deprecated/smoke_test_cross_silo_fedavg_defense_linux.yml b/.github/workflows/smoke_test_cross_silo_fedavg_defense_linux.yml
similarity index 86%
rename from .github/workflows/deprecated/smoke_test_cross_silo_fedavg_defense_linux.yml
rename to .github/workflows/smoke_test_cross_silo_fedavg_defense_linux.yml
index b9348d7bf2..fac19d9552 100644
--- a/.github/workflows/deprecated/smoke_test_cross_silo_fedavg_defense_linux.yml
+++ b/.github/workflows/smoke_test_cross_silo_fedavg_defense_linux.yml
@@ -53,16 +53,13 @@ jobs:
           echo ${{ steps.extract_branch.outputs.branch }}
           if [[ ${{ steps.extract_branch.outputs.branch }} == "master" ]]; then
             echo "running on master"
-            path=/home/fedml/FedML
+            path=/home/actions-runner/fedml-master
             cd $path
-            git pull
             echo "dir=$path" >> $GITHUB_OUTPUT
           else
             echo "running on dev"
-            path=/home/fedml/FedML
+            path=/home/actions-runner/fedml-dev
             cd $path
-            git pull
-            git checkout ${{ steps.extract_branch.outputs.branch }}
             echo "dir=$path" >> $GITHUB_OUTPUT
           fi
       - name: sync git repo to local pip
@@ -71,13 +68,13 @@ jobs:
           homepath=${{ format('{0}', steps.fedml_source_code_home.outputs.dir) }}
           echo $Homepath
           cd $homepath
-          # bash ./devops/scripts/sync-fedml-pip.sh
+          bash ./devops/scripts/sync-fedml-pip.sh
 
       - name: server - cross-silo - defense
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
           cd python
-          cd examples/federate/security/mqtt_s3_fedavg_defense_mnist_lr_example
+          cd examples/security/mqtt_s3_fedavg_defense_mnist_lr_example
           run_id=cross-silo-defense-${{ format('{0}{1}{2}{3}', github.run_id, matrix.os, matrix.arch, matrix.python-version) }}
           echo ${run_id}
           bash run_server.sh $run_id
@@ -87,7 +84,7 @@ jobs:
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
           cd python
-          cd examples/federate/security/mqtt_s3_fedavg_defense_mnist_lr_example
+          cd examples/security/mqtt_s3_fedavg_defense_mnist_lr_example
           run_id=cross-silo-defense-${{ format('{0}{1}{2}{3}', github.run_id, matrix.os, matrix.arch, matrix.python-version) }}
           echo ${run_id}
           bash run_client.sh 1 $run_id
@@ -97,7 +94,7 @@ jobs:
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
           cd python
-          cd examples/federate/security/mqtt_s3_fedavg_defense_mnist_lr_example
+          cd examples/security/mqtt_s3_fedavg_defense_mnist_lr_example
           run_id=cross-silo-defense-${{ format('{0}{1}{2}{3}', github.run_id, matrix.os, matrix.arch, matrix.python-version) }}
           echo ${run_id}
           bash run_client.sh 2 $run_id
@@ -107,7 +104,7 @@ jobs:
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
           cd python
-          cd examples/federate/security/mqtt_s3_fedavg_defense_mnist_lr_example
+          cd examples/security/mqtt_s3_fedavg_defense_mnist_lr_example
           run_id=cross-silo-defense-${{ format('{0}{1}{2}{3}', github.run_id, matrix.os, matrix.arch, matrix.python-version) }}
           echo ${run_id}
           bash run_client.sh 3 $run_id
@@ -117,7 +114,7 @@ jobs:
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
           cd python
-          cd examples/federate/security/mqtt_s3_fedavg_defense_mnist_lr_example
+          cd examples/security/mqtt_s3_fedavg_defense_mnist_lr_example
           run_id=cross-silo-defense-${{ format('{0}{1}{2}{3}', github.run_id, matrix.os, matrix.arch, matrix.python-version) }}
           echo ${run_id}
           bash run_client.sh 4 $run_id
diff --git a/.github/workflows/deprecated/smoke_test_cross_silo_fedavg_ldp_linux.yml b/.github/workflows/smoke_test_cross_silo_fedavg_ldp_linux.yml
similarity index 87%
rename from .github/workflows/deprecated/smoke_test_cross_silo_fedavg_ldp_linux.yml
rename to .github/workflows/smoke_test_cross_silo_fedavg_ldp_linux.yml
index f849c4db71..def8aca733 100644
--- a/.github/workflows/deprecated/smoke_test_cross_silo_fedavg_ldp_linux.yml
+++ b/.github/workflows/smoke_test_cross_silo_fedavg_ldp_linux.yml
@@ -53,16 +53,13 @@ jobs:
           echo ${{ steps.extract_branch.outputs.branch }}
           if [[ ${{ steps.extract_branch.outputs.branch }} == "master" ]]; then
             echo "running on master"
-            path=/home/fedml/FedML
+            path=/home/actions-runner/fedml-master
             cd $path
-            git pull
             echo "dir=$path" >> $GITHUB_OUTPUT
           else
             echo "running on dev"
-            path=/home/fedml/FedML
+            path=/home/actions-runner/fedml-dev
             cd $path
-            git pull
-            git checkout ${{ steps.extract_branch.outputs.branch }}
             echo "dir=$path" >> $GITHUB_OUTPUT
           fi
       - name: sync git repo to local pip
@@ -71,13 +68,13 @@ jobs:
           homepath=${{ format('{0}', steps.fedml_source_code_home.outputs.dir) }}
           echo $Homepath
           cd $homepath
-          # bash ./devops/scripts/sync-fedml-pip.sh
+          bash ./devops/scripts/sync-fedml-pip.sh
 
       - name: server - cross-silo - ldp
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
           cd python
-          cd examples/federate/privacy/mqtt_s3_fedavg_ldp_mnist_lr_example
+          cd examples/privacy/mqtt_s3_fedavg_ldp_mnist_lr_example
           run_id=cross-silo-ho-${{ format('{0}{1}{2}{3}', github.run_id, matrix.os, matrix.arch, matrix.python-version) }}
           echo ${run_id}
           bash run_server.sh $run_id
@@ -87,7 +84,7 @@ jobs:
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
           cd python
-          cd examples/federate/privacy/mqtt_s3_fedavg_ldp_mnist_lr_example
+          cd examples/privacy/mqtt_s3_fedavg_ldp_mnist_lr_example
           run_id=cross-silo-ho-${{ format('{0}{1}{2}{3}', github.run_id, matrix.os, matrix.arch, matrix.python-version) }}
           echo ${run_id}
           bash run_client.sh 1 $run_id
@@ -97,7 +94,7 @@ jobs:
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
           cd python
-          cd examples/federate/privacy/mqtt_s3_fedavg_ldp_mnist_lr_example
+          cd examples/privacy/mqtt_s3_fedavg_ldp_mnist_lr_example
           run_id=cross-silo-ho-${{ format('{0}{1}{2}{3}', github.run_id, matrix.os, matrix.arch, matrix.python-version) }}
           echo ${run_id}
           bash run_client.sh 2 $run_id
diff --git a/.github/workflows/deprecated/smoke_test_cross_silo_ho_linux.yml b/.github/workflows/smoke_test_cross_silo_ho_linux.yml
similarity index 89%
rename from .github/workflows/deprecated/smoke_test_cross_silo_ho_linux.yml
rename to .github/workflows/smoke_test_cross_silo_ho_linux.yml
index 7d28a37292..e34a22cdbe 100644
--- a/.github/workflows/deprecated/smoke_test_cross_silo_ho_linux.yml
+++ b/.github/workflows/smoke_test_cross_silo_ho_linux.yml
@@ -53,16 +53,13 @@ jobs:
           echo ${{ steps.extract_branch.outputs.branch }}
           if [[ ${{ steps.extract_branch.outputs.branch }} == "master" ]]; then
             echo "running on master"
-            path=/home/fedml/FedML
+            path=/home/actions-runner/fedml-master
             cd $path
-            git pull
             echo "dir=$path" >> $GITHUB_OUTPUT
           else
             echo "running on dev"
-            path=/home/fedml/FedML
+            path=/home/actions-runner/fedml-dev
             cd $path
-            git pull
-            git checkout ${{ steps.extract_branch.outputs.branch }}
             echo "dir=$path" >> $GITHUB_OUTPUT
           fi
       - name: sync git repo to local pip
@@ -71,13 +68,13 @@ jobs:
           homepath=${{ format('{0}', steps.fedml_source_code_home.outputs.dir) }}
           echo $Homepath
           cd $homepath
-          # bash ./devops/scripts/sync-fedml-pip.sh
+          bash ./devops/scripts/sync-fedml-pip.sh
 
       - name: server - cross-silo - ho
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
           cd python
-          cd examples/federate/quick_start/octopus
+          cd quick_start/octopus
           run_id=cross-silo-ho-${{ format('{0}{1}{2}{3}', github.run_id, matrix.os, matrix.arch, matrix.python-version) }}
           echo ${run_id}
           bash run_server.sh $run_id
@@ -87,7 +84,7 @@ jobs:
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
           cd python
-          cd examples/federate/quick_start/octopus
+          cd quick_start/octopus
           run_id=cross-silo-ho-${{ format('{0}{1}{2}{3}', github.run_id, matrix.os, matrix.arch, matrix.python-version) }}
           echo ${run_id}
           bash run_client.sh 1 $run_id
@@ -97,7 +94,7 @@ jobs:
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
           cd python
-          cd examples/federate/quick_start/octopus
+          cd quick_start/octopus
           run_id=cross-silo-ho-${{ format('{0}{1}{2}{3}', github.run_id, matrix.os, matrix.arch, matrix.python-version) }}
           echo ${run_id}
           bash run_client.sh 2 $run_id
diff --git a/.github/workflows/deprecated/smoke_test_cross_silo_ho_win.yml b/.github/workflows/smoke_test_cross_silo_ho_win.yml
similarity index 88%
rename from .github/workflows/deprecated/smoke_test_cross_silo_ho_win.yml
rename to .github/workflows/smoke_test_cross_silo_ho_win.yml
index d9239bcb99..b8376438d7 100644
--- a/.github/workflows/deprecated/smoke_test_cross_silo_ho_win.yml
+++ b/.github/workflows/smoke_test_cross_silo_ho_win.yml
@@ -52,16 +52,13 @@ jobs:
           echo ${{ steps.extract_branch.outputs.branch }}
           if [[ ${{ steps.extract_branch.outputs.branch }} == "master" ]]; then
             echo "running on master"
-            path=/home/fedml/FedML
+            path=/home/actions-runner/fedml-master
             cd $path
-            git pull
             echo "dir=$path" >> $GITHUB_OUTPUT
           else
             echo "running on dev"
-            path=/home/fedml/FedML
+            path=/home/actions-runner/fedml-dev
             cd $path
-            git pull
-            git checkout ${{ steps.extract_branch.outputs.branch }}
             echo "dir=$path" >> $GITHUB_OUTPUT
           fi
       - name: sync git repo to local pip
@@ -70,25 +67,25 @@ jobs:
           homepath=${{ format('{0}', steps.fedml_source_code_home.outputs.dir) }}
           echo $Homepath
           cd $homepath
-          # bash ./devops/scripts/sync-fedml-pip.sh
+          bash ./devops/scripts/sync-fedml-pip.sh
 
       - name: server - cross-silo - ho
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
-          cd examples/federate/quick_start/octopus
+          cd quick_start/octopus
           .\run_server.bat ${{ format('{0}{1}{2}{3}', github.run_id, matrix.os, matrix.arch, matrix.python-version) }}
         if:   ${{ matrix.client-index == '0' }}
 
       - name: client 1 - cross-silo - ho
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
-          cd examples/federate/quick_start/octopus
+          cd quick_start/octopus
           .\run_client.bat 1 ${{ format('{0}{1}{2}{3}', github.run_id, matrix.os, matrix.arch, matrix.python-version) }}
         if:   ${{ matrix.client-index == '1' }}
 
       - name: client 2 - cross-silo - ho
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
-          cd examples/federate/quick_start/octopus
+          cd quick_start/octopus
           .\run_client.bat 2 ${{ format('{0}{1}{2}{3}', github.run_id, matrix.os, matrix.arch, matrix.python-version) }}
         if: ${{ matrix.client-index == '2' }}
\ No newline at end of file
diff --git a/.github/workflows/deprecated/smoke_test_cross_silo_lightsecagg_linux.yml b/.github/workflows/smoke_test_cross_silo_lightsecagg_linux.yml
similarity index 88%
rename from .github/workflows/deprecated/smoke_test_cross_silo_lightsecagg_linux.yml
rename to .github/workflows/smoke_test_cross_silo_lightsecagg_linux.yml
index ae06088dc7..d672e2a772 100644
--- a/.github/workflows/deprecated/smoke_test_cross_silo_lightsecagg_linux.yml
+++ b/.github/workflows/smoke_test_cross_silo_lightsecagg_linux.yml
@@ -53,16 +53,13 @@ jobs:
           echo ${{ steps.extract_branch.outputs.branch }}
           if [[ ${{ steps.extract_branch.outputs.branch }} == "master" ]]; then
             echo "running on master"
-            path=/home/fedml/FedML
+            path=/home/actions-runner/fedml-master
             cd $path
-            git pull
             echo "dir=$path" >> $GITHUB_OUTPUT
           else
             echo "running on dev"
-            path=/home/fedml/FedML
+            path=/home/actions-runner/fedml-dev
             cd $path
-            git pull
-            git checkout ${{ steps.extract_branch.outputs.branch }}
             echo "dir=$path" >> $GITHUB_OUTPUT
           fi
       - name: sync git repo to local pip
@@ -71,13 +68,13 @@ jobs:
           homepath=${{ format('{0}', steps.fedml_source_code_home.outputs.dir) }}
           echo $Homepath
           cd $homepath
-          # bash ./devops/scripts/sync-fedml-pip.sh
+          bash ./devops/scripts/sync-fedml-pip.sh
 
       - name: server - cross-silo - lightsecagg
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
           cd python
-          cd examples/federate/cross_silo/light_sec_agg_example
+          cd examples/cross_silo/light_sec_agg_example
           run_id=cross-silo-lightsecagg-${{ format('{0}{1}{2}{3}', github.run_id, matrix.os, matrix.arch, matrix.python-version) }}
           echo ${run_id}
           bash run_server.sh $run_id
@@ -87,7 +84,7 @@ jobs:
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
           cd python
-          cd examples/federate/cross_silo/light_sec_agg_example
+          cd examples/cross_silo/light_sec_agg_example
           run_id=cross-silo-lightsecagg-${{ format('{0}{1}{2}{3}', github.run_id, matrix.os, matrix.arch, matrix.python-version) }}
           echo ${run_id}
           bash run_client.sh 1 $run_id
@@ -97,7 +94,7 @@ jobs:
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
           cd python
-          cd examples/federate/cross_silo/light_sec_agg_example
+          cd examples/cross_silo/light_sec_agg_example
           run_id=cross-silo-lightsecagg-${{ format('{0}{1}{2}{3}', github.run_id, matrix.os, matrix.arch, matrix.python-version) }}
           echo ${run_id}
           bash run_client.sh 2 $run_id
diff --git a/.github/workflows/deprecated/smoke_test_cross_silo_lightsecagg_win.yml b/.github/workflows/smoke_test_cross_silo_lightsecagg_win.yml
similarity index 88%
rename from .github/workflows/deprecated/smoke_test_cross_silo_lightsecagg_win.yml
rename to .github/workflows/smoke_test_cross_silo_lightsecagg_win.yml
index 40d15a1f0f..8deab9acb2 100644
--- a/.github/workflows/deprecated/smoke_test_cross_silo_lightsecagg_win.yml
+++ b/.github/workflows/smoke_test_cross_silo_lightsecagg_win.yml
@@ -52,16 +52,13 @@ jobs:
           echo ${{ steps.extract_branch.outputs.branch }}
           if [[ ${{ steps.extract_branch.outputs.branch }} == "master" ]]; then
             echo "running on master"
-            path=/home/fedml/FedML
+            path=/home/actions-runner/fedml-master
             cd $path
-            git pull
             echo "dir=$path" >> $GITHUB_OUTPUT
           else
             echo "running on dev"
-            path=/home/fedml/FedML
+            path=/home/actions-runner/fedml-dev
             cd $path
-            git pull
-            git checkout ${{ steps.extract_branch.outputs.branch }}
             echo "dir=$path" >> $GITHUB_OUTPUT
           fi
       - name: sync git repo to local pip
@@ -70,25 +67,25 @@ jobs:
           homepath=${{ format('{0}', steps.fedml_source_code_home.outputs.dir) }}
           echo $Homepath
           cd $homepath
-          # bash ./devops/scripts/sync-fedml-pip.sh
+          bash ./devops/scripts/sync-fedml-pip.sh
 
       - name: server - cross-silo - ho
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
-          cd examples/federate/cross_silo/light_sec_agg_example
+          cd examples/cross_silo/light_sec_agg_example
           .\run_server.bat cross-silo-lightsecagg-${{ format('{0}{1}{2}{3}', github.run_id, matrix.os, matrix.arch, matrix.python-version) }}
         if:   ${{ matrix.client-index == '0' }}
 
       - name: client 1 - cross-silo - ho
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
-          cd examples/federate/cross_silo/light_sec_agg_example
+          cd examples/cross_silo/light_sec_agg_example
           .\run_client.bat 1 cross-silo-lightsecagg-${{ format('{0}{1}{2}{3}', github.run_id, matrix.os, matrix.arch, matrix.python-version) }}
         if:   ${{ matrix.client-index == '1' }}
 
       - name: client 2 - cross-silo - lightsecagg
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
-          cd examples/federate/cross_silo/light_sec_agg_example
+          cd examples/cross_silo/light_sec_agg_example
           .\run_client.bat 2 cross-silo-lightsecagg-${{ format('{0}{1}{2}{3}', github.run_id, matrix.os, matrix.arch, matrix.python-version) }}
         if: ${{ matrix.client-index == '2' }}
\ No newline at end of file
diff --git a/.github/workflows/deprecated/smoke_test_flow_linux.yml b/.github/workflows/smoke_test_flow_linux.yml
similarity index 92%
rename from .github/workflows/deprecated/smoke_test_flow_linux.yml
rename to .github/workflows/smoke_test_flow_linux.yml
index 5293787a11..df876a632b 100644
--- a/.github/workflows/deprecated/smoke_test_flow_linux.yml
+++ b/.github/workflows/smoke_test_flow_linux.yml
@@ -43,16 +43,13 @@ jobs:
           echo ${{ steps.extract_branch.outputs.branch }}
           if [[ ${{ steps.extract_branch.outputs.branch }} == "master" ]]; then
             echo "running on master"
-            path=/home/fedml/FedML
+            path=/home/actions-runner/fedml-master
             cd $path
-            git pull
             echo "dir=$path" >> $GITHUB_OUTPUT
           else
             echo "running on dev"
-            path=/home/fedml/FedML
+            path=/home/actions-runner/fedml-dev
             cd $path
-            git pull
-            git checkout ${{ steps.extract_branch.outputs.branch }}
             echo "dir=$path" >> $GITHUB_OUTPUT
           fi
       - name: sync git repo to local pip
@@ -61,7 +58,7 @@ jobs:
           homepath=${{ format('{0}', steps.fedml_source_code_home.outputs.dir) }}
           echo $Homepath
           cd $homepath
-          # bash ./devops/scripts/sync-fedml-pip.sh
+          bash ./devops/scripts/sync-fedml-pip.sh
 
       - name: server - Flow
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
diff --git a/.github/workflows/deprecated/smoke_test_ml_engines_linux_jax.yml b/.github/workflows/smoke_test_ml_engines_linux_jax.yml
similarity index 87%
rename from .github/workflows/deprecated/smoke_test_ml_engines_linux_jax.yml
rename to .github/workflows/smoke_test_ml_engines_linux_jax.yml
index cd4bd8d720..42a6d25ead 100644
--- a/.github/workflows/deprecated/smoke_test_ml_engines_linux_jax.yml
+++ b/.github/workflows/smoke_test_ml_engines_linux_jax.yml
@@ -53,16 +53,13 @@ jobs:
           echo ${{ steps.extract_branch.outputs.branch }}
           if [[ ${{ steps.extract_branch.outputs.branch }} == "master" ]]; then
             echo "running on master"
-            path=/home/fedml/FedML
+            path=/home/actions-runner/fedml-master
             cd $path
-            git pull
             echo "dir=$path" >> $GITHUB_OUTPUT
           else
             echo "running on dev"
-            path=/home/fedml/FedML
+            path=/home/actions-runner/fedml-dev
             cd $path
-            git pull
-            git checkout ${{ steps.extract_branch.outputs.branch }}
             echo "dir=$path" >> $GITHUB_OUTPUT
           fi
       - name: sync git repo to local pip
@@ -71,14 +68,14 @@ jobs:
           homepath=${{ format('{0}', steps.fedml_source_code_home.outputs.dir) }}
           echo $Homepath
           cd $homepath
-          # bash ./devops/scripts/sync-fedml-pip.sh
+          bash ./devops/scripts/sync-fedml-pip.sh
           cd $homepath/python
 
       - name: server - jax - fedavg
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
           cd python
-          cd examples/federate/cross_silo/jax_haiku_mqtt_s3_fedavg_mnist_lr_example
+          cd examples/cross_silo/jax_haiku_mqtt_s3_fedavg_mnist_lr_example
           run_id=jax-ml-engine-${{ format('{0}{1}{2}{3}', github.run_id, matrix.os, matrix.arch, matrix.python-version) }}
           echo ${run_id}
           bash run_server.sh $run_id
@@ -88,7 +85,7 @@ jobs:
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
           cd python
-          cd examples/federate/cross_silo/jax_haiku_mqtt_s3_fedavg_mnist_lr_example
+          cd examples/cross_silo/jax_haiku_mqtt_s3_fedavg_mnist_lr_example
           run_id=jax-ml-engine-${{ format('{0}{1}{2}{3}', github.run_id, matrix.os, matrix.arch, matrix.python-version) }}
           echo ${run_id}
           bash run_client.sh 1 $run_id
@@ -98,7 +95,7 @@ jobs:
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
           cd python
-          cd examples/federate/cross_silo/jax_haiku_mqtt_s3_fedavg_mnist_lr_example
+          cd examples/cross_silo/jax_haiku_mqtt_s3_fedavg_mnist_lr_example
           run_id=jax-ml-engine-${{ format('{0}{1}{2}{3}', github.run_id, matrix.os, matrix.arch, matrix.python-version) }}
           echo ${run_id}
           bash run_client.sh 2 $run_id
diff --git a/.github/workflows/deprecated/smoke_test_ml_engines_linux_mxnet.yml b/.github/workflows/smoke_test_ml_engines_linux_mxnet.yml
similarity index 87%
rename from .github/workflows/deprecated/smoke_test_ml_engines_linux_mxnet.yml
rename to .github/workflows/smoke_test_ml_engines_linux_mxnet.yml
index 5ce217ea4b..bf30fd1b1a 100644
--- a/.github/workflows/deprecated/smoke_test_ml_engines_linux_mxnet.yml
+++ b/.github/workflows/smoke_test_ml_engines_linux_mxnet.yml
@@ -53,16 +53,13 @@ jobs:
           echo ${{ steps.extract_branch.outputs.branch }}
           if [[ ${{ steps.extract_branch.outputs.branch }} == "master" ]]; then
             echo "running on master"
-            path=/home/fedml/FedML
+            path=/home/actions-runner/fedml-master
             cd $path
-            git pull
             echo "dir=$path" >> $GITHUB_OUTPUT
           else
             echo "running on dev"
-            path=/home/fedml/FedML
+            path=/home/actions-runner/fedml-dev
             cd $path
-            git pull
-            git checkout ${{ steps.extract_branch.outputs.branch }}
             echo "dir=$path" >> $GITHUB_OUTPUT
           fi
       - name: sync git repo to local pip
@@ -71,7 +68,7 @@ jobs:
           homepath=${{ format('{0}', steps.fedml_source_code_home.outputs.dir) }}
           echo $Homepath
           cd $homepath
-          # bash ./devops/scripts/sync-fedml-pip.sh
+          bash ./devops/scripts/sync-fedml-pip.sh
           cd $homepath/python
           pip install mxnet==2.0.0b1
 
@@ -79,7 +76,7 @@ jobs:
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
           cd python
-          cd examples/federate/cross_silo/mxnet_mqtt_s3_fedavg_mnist_lr_example
+          cd examples/cross_silo/mxnet_mqtt_s3_fedavg_mnist_lr_example
           run_id=mxnet-ml-engine-${{ format('{0}{1}{2}{3}', github.run_id, matrix.os, matrix.arch, matrix.python-version) }}
           echo ${run_id}
           bash run_server.sh $run_id
@@ -89,7 +86,7 @@ jobs:
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
           cd python
-          cd examples/federate/cross_silo/mxnet_mqtt_s3_fedavg_mnist_lr_example
+          cd examples/cross_silo/mxnet_mqtt_s3_fedavg_mnist_lr_example
           run_id=mxnet-ml-engine-${{ format('{0}{1}{2}{3}', github.run_id, matrix.os, matrix.arch, matrix.python-version) }}
           echo ${run_id}
           bash run_client.sh 1 $run_id
@@ -99,7 +96,7 @@ jobs:
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
           cd python
-          cd examples/federate/cross_silo/mxnet_mqtt_s3_fedavg_mnist_lr_example
+          cd examples/cross_silo/mxnet_mqtt_s3_fedavg_mnist_lr_example
           run_id=mxnet-ml-engine-${{ format('{0}{1}{2}{3}', github.run_id, matrix.os, matrix.arch, matrix.python-version) }}
           echo ${run_id}
           bash run_client.sh 2 $run_id
diff --git a/.github/workflows/deprecated/smoke_test_ml_engines_linux_tf.yml b/.github/workflows/smoke_test_ml_engines_linux_tf.yml
similarity index 87%
rename from .github/workflows/deprecated/smoke_test_ml_engines_linux_tf.yml
rename to .github/workflows/smoke_test_ml_engines_linux_tf.yml
index 3b7519dd97..9d69ba3774 100644
--- a/.github/workflows/deprecated/smoke_test_ml_engines_linux_tf.yml
+++ b/.github/workflows/smoke_test_ml_engines_linux_tf.yml
@@ -53,16 +53,13 @@ jobs:
           echo ${{ steps.extract_branch.outputs.branch }}
           if [[ ${{ steps.extract_branch.outputs.branch }} == "master" ]]; then
             echo "running on master"
-            path=/home/fedml/FedML
+            path=/home/actions-runner/fedml-master
             cd $path
-            git pull
             echo "dir=$path" >> $GITHUB_OUTPUT
           else
             echo "running on dev"
-            path=/home/fedml/FedML
+            path=/home/actions-runner/fedml-dev
             cd $path
-            git pull
-            git checkout ${{ steps.extract_branch.outputs.branch }}
             echo "dir=$path" >> $GITHUB_OUTPUT
           fi
       - name: sync git repo to local pip
@@ -71,14 +68,14 @@ jobs:
           homepath=${{ format('{0}', steps.fedml_source_code_home.outputs.dir) }}
           echo $Homepath
           cd $homepath
-          # bash ./devops/scripts/sync-fedml-pip.sh
+          bash ./devops/scripts/sync-fedml-pip.sh
           cd $homepath/python
 
       - name: server - tensorflow - fedavg
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
           cd python
-          cd examples/federate/cross_silo/tf_mqtt_s3_fedavg_mnist_lr_example
+          cd examples/cross_silo/tf_mqtt_s3_fedavg_mnist_lr_example
           run_id=tf-ml-engine-${{ format('{0}{1}{2}{3}', github.run_id, matrix.os, matrix.arch, matrix.python-version) }}
           echo ${run_id}
           bash run_server.sh $run_id
@@ -88,7 +85,7 @@ jobs:
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
           cd python
-          cd examples/federate/cross_silo/tf_mqtt_s3_fedavg_mnist_lr_example
+          cd examples/cross_silo/tf_mqtt_s3_fedavg_mnist_lr_example
           run_id=tf-ml-engine-${{ format('{0}{1}{2}{3}', github.run_id, matrix.os, matrix.arch, matrix.python-version) }}
           echo ${run_id}
           bash run_client.sh 1 $run_id
@@ -98,7 +95,7 @@ jobs:
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
           cd python
-          cd examples/federate/cross_silo/tf_mqtt_s3_fedavg_mnist_lr_example
+          cd examples/cross_silo/tf_mqtt_s3_fedavg_mnist_lr_example
           run_id=tf-ml-engine-${{ format('{0}{1}{2}{3}', github.run_id, matrix.os, matrix.arch, matrix.python-version) }}
           echo ${run_id}
           bash run_client.sh 2 $run_id
diff --git a/.github/workflows/deprecated/smoke_test_ml_engines_win.yml b/.github/workflows/smoke_test_ml_engines_win.yml
similarity index 90%
rename from .github/workflows/deprecated/smoke_test_ml_engines_win.yml
rename to .github/workflows/smoke_test_ml_engines_win.yml
index 8913cc6bec..f1f3bfabd4 100644
--- a/.github/workflows/deprecated/smoke_test_ml_engines_win.yml
+++ b/.github/workflows/smoke_test_ml_engines_win.yml
@@ -46,16 +46,13 @@ jobs:
           echo ${{ steps.extract_branch.outputs.branch }}
           if [[ ${{ steps.extract_branch.outputs.branch }} == "master" ]]; then
             echo "running on master"
-            path=/home/fedml/FedML
+            path=/home/actions-runner/fedml-master
             cd $path
-            git pull
             echo "dir=$path" >> $GITHUB_OUTPUT
           else
             echo "running on dev"
-            path=/home/fedml/FedML
+            path=/home/actions-runner/fedml-dev
             cd $path
-            git pull
-            git checkout ${{ steps.extract_branch.outputs.branch }}
             echo "dir=$path" >> $GITHUB_OUTPUT
           fi
       - name: sync git repo to local pip
@@ -64,28 +61,28 @@ jobs:
           homepath=${{ format('{0}', steps.fedml_source_code_home.outputs.dir) }}
           echo $Homepath
           cd $homepath
-          # bash ./devops/scripts/sync-fedml-pip.sh
+          bash ./devops/scripts/sync-fedml-pip.sh
           cd $homepath/python
           pip install -e '.[tensorflow]'
 
       - name: server - tensorflow - fedavg
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
-          cd examples/federate/cross_silo/tf_mqtt_s3_fedavg_mnist_lr_example
+          cd examples/cross_silo/tf_mqtt_s3_fedavg_mnist_lr_example
           python tf_server.py --cf config/fedml_config.yaml --rank 0 --role server --run_id tf-ml-engine-${{ format('{0}{1}{2}{3}', github.run_id, matrix.os, matrix.arch, matrix.python-version) }}
         if:   ${{ matrix.client-index == '0' }}
 
       - name: client 1 - tensorflow - fedavg
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
-          cd examples/federate/cross_silo/tf_mqtt_s3_fedavg_mnist_lr_example
+          cd examples/cross_silo/tf_mqtt_s3_fedavg_mnist_lr_example
           python3 tf_client.py --cf config/fedml_config.yaml --rank 1 --role client --run_id tf-ml-engine-${{ format('{0}{1}{2}{3}', github.run_id, matrix.os, matrix.arch, matrix.python-version) }}
         if:   ${{ matrix.client-index == '1' }}
 
       - name: client 2 - tensorflow - fedavg
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
-          cd examples/federate/cross_silo/tf_mqtt_s3_fedavg_mnist_lr_example
+          cd examples/cross_silo/tf_mqtt_s3_fedavg_mnist_lr_example
           python3 tf_client.py --cf config/fedml_config.yaml --rank 2 --role client --run_id tf-ml-engine-${{ format('{0}{1}{2}{3}', github.run_id, matrix.os, matrix.arch, matrix.python-version) }}
         if: ${{ matrix.client-index == '2' }}
 
@@ -141,21 +138,21 @@ jobs:
       - name: server - jax - fedavg
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
-          cd examples/federate/cross_silo/jax_haiku_mqtt_s3_fedavg_mnist_lr_example
+          cd examples/cross_silo/jax_haiku_mqtt_s3_fedavg_mnist_lr_example
           python tf_server.py --cf config/fedml_config.yaml --rank 0 --role server --run_id jax-ml-engine-${{ format('{0}{1}{2}{3}', github.run_id, matrix.os, matrix.arch, matrix.python-version) }}
         if: ${{ matrix.client-index == '0' }}
 
       - name: client 1 - jax - fedavg
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
-          cd examples/federate/cross_silo/jax_haiku_mqtt_s3_fedavg_mnist_lr_example
+          cd examples/cross_silo/jax_haiku_mqtt_s3_fedavg_mnist_lr_example
           python3 tf_client.py --cf config/fedml_config.yaml --rank 1 --role client --run_id jax-ml-engine-${{ format('{0}{1}{2}{3}', github.run_id, matrix.os, matrix.arch, matrix.python-version) }}
         if: ${{ matrix.client-index == '1' }}
 
       - name: client 2 - jax - fedavg
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
-          cd examples/federate/cross_silo/jax_haiku_mqtt_s3_fedavg_mnist_lr_example
+          cd examples/cross_silo/jax_haiku_mqtt_s3_fedavg_mnist_lr_example
           python3 tf_client.py --cf config/fedml_config.yaml --rank 2 --role client --run_id jax-ml-engine-${{ format('{0}{1}{2}{3}', github.run_id, matrix.os, matrix.arch, matrix.python-version) }}
         if: ${{ matrix.client-index == '2' }}
 
@@ -211,20 +208,20 @@ jobs:
       - name: server - mxnet - fedavg
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
-          cd examples/federate/cross_silo/mxnet_mqtt_s3_fedavg_mnist_lr_example
+          cd examples/cross_silo/mxnet_mqtt_s3_fedavg_mnist_lr_example
           python tf_server.py --cf config/fedml_config.yaml --rank 0 --role server --run_id mxnet-ml-engine-${{ format('{0}{1}{2}{3}', github.run_id, matrix.os, matrix.arch, matrix.python-version) }}
         if: ${{ matrix.client-index == '0' }}
 
       - name: client 1 - mxnet - fedavg
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
-          cd examples/federate/cross_silo/mxnet_mqtt_s3_fedavg_mnist_lr_example
+          cd examples/cross_silo/mxnet_mqtt_s3_fedavg_mnist_lr_example
           python3 tf_client.py --cf config/fedml_config.yaml --rank 1 --role client --run_id mxnet-ml-engine-${{ format('{0}{1}{2}{3}', github.run_id, matrix.os, matrix.arch, matrix.python-version) }}
         if: ${{ matrix.client-index == '1' }}
 
       - name: client 2 - mxnet - fedavg
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
-          cd examples/federate/cross_silo/mxnet_mqtt_s3_fedavg_mnist_lr_example
+          cd examples/cross_silo/mxnet_mqtt_s3_fedavg_mnist_lr_example
           python3 tf_client.py --cf config/fedml_config.yaml --rank 2 --role client --run_id mxnet-ml-engine-${{ format('{0}{1}{2}{3}', github.run_id, matrix.os, matrix.arch, matrix.python-version) }}
         if: ${{ matrix.client-index == '2' }}
diff --git a/.github/workflows/deprecated/smoke_test_pip_cli_sp_linux.yml b/.github/workflows/smoke_test_pip_cli_sp_linux.yml
similarity index 80%
rename from .github/workflows/deprecated/smoke_test_pip_cli_sp_linux.yml
rename to .github/workflows/smoke_test_pip_cli_sp_linux.yml
index 006ecfb574..131d88de9b 100644
--- a/.github/workflows/deprecated/smoke_test_pip_cli_sp_linux.yml
+++ b/.github/workflows/smoke_test_pip_cli_sp_linux.yml
@@ -54,16 +54,13 @@ jobs:
           echo ${{ steps.extract_branch.outputs.branch }}
           if [[ ${{ steps.extract_branch.outputs.branch }} == "master" ]]; then
             echo "running on master"
-            path=/home/fedml/FedML
+            path=/home/actions-runner/fedml-master
             cd $path
-            git pull
             echo "dir=$path" >> $GITHUB_OUTPUT
           else
             echo "running on dev"
-            path=/home/fedml/FedML
+            path=/home/actions-runner/fedml-dev
             cd $path
-            git pull
-            git checkout ${{ steps.extract_branch.outputs.branch }}
             echo "dir=$path" >> $GITHUB_OUTPUT
           fi
       - name: sync git repo to local pip
@@ -72,20 +69,20 @@ jobs:
           homepath=${{ format('{0}', steps.fedml_source_code_home.outputs.dir) }}
           echo $Homepath
           cd $homepath
-          # bash ./devops/scripts/sync-fedml-pip.sh
+          bash ./devops/scripts/sync-fedml-pip.sh
 
-      # - name: test "fedml login" and "fedml build"
-      #   working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
-      #   run: |
-      #     cd ${{ format('{0}', steps.fedml_source_code_home.outputs.dir) }}/python
-      #     cd tests/smoke_test/cli
-      #     bash login.sh
-      #     bash build.sh
+      - name: test "fedml login" and "fedml build"
+        working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
+        run: |
+          cd ${{ format('{0}', steps.fedml_source_code_home.outputs.dir) }}/python
+          cd tests/smoke_test/cli
+          bash login.sh
+          bash build.sh
       - name: test simulation-sp
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
           cd ${{ format('{0}', steps.fedml_source_code_home.outputs.dir) }}/python
-          cd examples/federate/quick_start/parrot
+          cd quick_start/parrot
           python torch_fedavg_mnist_lr_one_line_example.py --cf fedml_config.yaml
           python torch_fedavg_mnist_lr_custum_data_and_model_example.py --cf fedml_config.yaml
 
@@ -93,40 +90,40 @@ jobs:
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
           cd ${{ format('{0}', steps.fedml_source_code_home.outputs.dir) }}/python
-          cd examples/federate/simulation/sp_decentralized_mnist_lr_example
+          cd examples/simulation/sp_decentralized_mnist_lr_example
           python torch_fedavg_mnist_lr_step_by_step_example.py --cf fedml_config.yaml
 
       - name: test sp - sp_fednova_mnist_lr_example
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
           cd ${{ format('{0}', steps.fedml_source_code_home.outputs.dir) }}/python
-          cd examples/federate/simulation/sp_fednova_mnist_lr_example
+          cd examples/simulation/sp_fednova_mnist_lr_example
           python torch_fednova_mnist_lr_step_by_step_example.py --cf fedml_config.yaml
           
       - name: test sp - sp_fedopt_mnist_lr_example
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
           cd ${{ format('{0}', steps.fedml_source_code_home.outputs.dir) }}/python
-          cd examples/federate/simulation/sp_fedopt_mnist_lr_example
+          cd examples/simulation/sp_fedopt_mnist_lr_example
           python torch_fedopt_mnist_lr_step_by_step_example.py --cf fedml_config.yaml
 
       - name: test sp - sp_hierarchicalfl_mnist_lr_example
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
           cd ${{ format('{0}', steps.fedml_source_code_home.outputs.dir) }}/python
-          cd examples/federate/simulation/sp_hierarchicalfl_mnist_lr_example
+          cd examples/simulation/sp_hierarchicalfl_mnist_lr_example
           python torch_hierarchicalfl_mnist_lr_step_by_step_example.py --cf fedml_config.yaml
 
       - name: test sp - sp_turboaggregate_mnist_lr_example
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
           cd ${{ format('{0}', steps.fedml_source_code_home.outputs.dir) }}/python
-          cd examples/federate/simulation/sp_turboaggregate_mnist_lr_example
+          cd examples/simulation/sp_turboaggregate_mnist_lr_example
           python torch_turboaggregate_mnist_lr_step_by_step_example.py --cf fedml_config.yaml 
 
       - name: test sp - sp_vertical_mnist_lr_example
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
           cd ${{ format('{0}', steps.fedml_source_code_home.outputs.dir) }}/python
-          cd examples/federate/simulation/sp_vertical_mnist_lr_example
+          cd examples/simulation/sp_vertical_mnist_lr_example
           python torch_vertical_mnist_lr_step_by_step_example.py --cf fedml_config.yaml 
diff --git a/.github/workflows/deprecated/smoke_test_pip_cli_sp_win.yml b/.github/workflows/smoke_test_pip_cli_sp_win.yml
similarity index 90%
rename from .github/workflows/deprecated/smoke_test_pip_cli_sp_win.yml
rename to .github/workflows/smoke_test_pip_cli_sp_win.yml
index 3987f90f74..69dac083bb 100644
--- a/.github/workflows/deprecated/smoke_test_pip_cli_sp_win.yml
+++ b/.github/workflows/smoke_test_pip_cli_sp_win.yml
@@ -51,16 +51,13 @@ jobs:
           echo ${{ steps.extract_branch.outputs.branch }}
           if [[ ${{ steps.extract_branch.outputs.branch }} == "master" ]]; then
             echo "running on master"
-            path=/home/fedml/FedML
+            path=/home/actions-runner/fedml-master
             cd $path
-            git pull
             echo "dir=$path" >> $GITHUB_OUTPUT
           else
             echo "running on dev"
-            path=/home/fedml/FedML
+            path=/home/actions-runner/fedml-dev
             cd $path
-            git pull
-            git checkout ${{ steps.extract_branch.outputs.branch }}
             echo "dir=$path" >> $GITHUB_OUTPUT
           fi
       - name: sync git repo to local pip
@@ -69,7 +66,7 @@ jobs:
           homepath=${{ format('{0}', steps.fedml_source_code_home.outputs.dir) }}
           echo $Homepath
           cd $homepath
-          # bash ./devops/scripts/sync-fedml-pip.sh
+          bash ./devops/scripts/sync-fedml-pip.sh
 
       - name: test "fedml login" and "fedml build"
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
@@ -80,6 +77,6 @@ jobs:
       - name: test simulation-sp
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
         run: |
-          cd examples/federate/quick_start/parrot
+          cd quick_start/parrot
           python torch_fedavg_mnist_lr_one_line_example.py --cf fedml_config.yaml
           python torch_fedavg_mnist_lr_custum_data_and_model_example.py --cf fedml_config.yaml
diff --git a/.github/workflows/deprecated/smoke_test_security.yml b/.github/workflows/smoke_test_security.yml
similarity index 91%
rename from .github/workflows/deprecated/smoke_test_security.yml
rename to .github/workflows/smoke_test_security.yml
index 5d5c03ee38..6644a4b513 100644
--- a/.github/workflows/deprecated/smoke_test_security.yml
+++ b/.github/workflows/smoke_test_security.yml
@@ -54,16 +54,13 @@ jobs:
           echo ${{ steps.extract_branch.outputs.branch }}
           if [[ ${{ steps.extract_branch.outputs.branch }} == "master" ]]; then
             echo "running on master"
-            path=/home/fedml/FedML
+            path=/home/actions-runner/fedml-master
             cd $path
-            git pull
             echo "dir=$path" >> $GITHUB_OUTPUT
           else
             echo "running on dev"
-            path=/home/fedml/FedML
+            path=/home/actions-runner/fedml-dev
             cd $path
-            git pull
-            git checkout ${{ steps.extract_branch.outputs.branch }}
             echo "dir=$path" >> $GITHUB_OUTPUT
           fi
       - name: sync git repo to local pip
@@ -72,7 +69,7 @@ jobs:
           homepath=${{ format('{0}', steps.fedml_source_code_home.outputs.dir) }}
           echo $Homepath
           cd $homepath
-          # bash ./devops/scripts/sync-fedml-pip.sh
+          bash ./devops/scripts/sync-fedml-pip.sh
 
       - name: attack tests
         working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
diff --git a/.github/workflows/deprecated/smoke_test_simulation_mpi_linux.yml b/.github/workflows/smoke_test_simulation_mpi_linux.yml
similarity index 73%
rename from .github/workflows/deprecated/smoke_test_simulation_mpi_linux.yml
rename to .github/workflows/smoke_test_simulation_mpi_linux.yml
index b2e9676ae9..c48cc43149 100644
--- a/.github/workflows/deprecated/smoke_test_simulation_mpi_linux.yml
+++ b/.github/workflows/smoke_test_simulation_mpi_linux.yml
@@ -40,8 +40,8 @@ jobs:
         - os: ubuntu-latest
           mpi: mpich
           install-mpi: |
-              apt-get update
-              apt install -y mpich libmpich-dev
+              sudo apt-get update
+              sudo apt install -y mpich libmpich-dev
 #        - os: ubuntu-latest
 #          mpi: openmpi
 #          install-mpi: sudo apt install -y openmpi-bin libopenmpi-dev
@@ -50,12 +50,6 @@ jobs:
       shell: bash
       run: echo "branch=$(echo ${GITHUB_REF#refs/heads/})" >>$GITHUB_OUTPUT
       id: extract_branch
-    - name: Install MPI
-      if: matrix.mpi == 'mpich'
-      run: |
-          apt-get update
-          apt-get install -y mpich libmpich-dev
-
     - id: fedml_source_code_home
       name: cd to master or dev branch and git pull
       shell: bash
@@ -63,18 +57,15 @@ jobs:
         ls
         echo ${{ steps.extract_branch.outputs.branch }}
         if [[ ${{ steps.extract_branch.outputs.branch }} == "master" ]]; then
-            echo "running on master"
-            path=/home/fedml/FedML
-            cd $path
-            git pull
-            echo "dir=$path" >> $GITHUB_OUTPUT
+          echo "running on master"
+          path=/home/actions-runner/fedml-master
+          cd $path
+          echo "dir=$path" >> $GITHUB_OUTPUT
         else
-            echo "running on dev"
-            path=/home/fedml/FedML
-            cd $path
-            git pull
-            git checkout ${{ steps.extract_branch.outputs.branch }}
-            echo "dir=$path" >> $GITHUB_OUTPUT
+          echo "running on dev"
+          path=/home/actions-runner/fedml-dev
+          cd $path
+          echo "dir=$path" >> $GITHUB_OUTPUT
         fi
     - name: sync git repo to local pip
       working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
@@ -82,47 +73,47 @@ jobs:
         homepath=${{ format('{0}', steps.fedml_source_code_home.outputs.dir) }}
         echo $Homepath
         cd $homepath
-        # bash ./devops/scripts/sync-fedml-pip.sh
+        bash ./devops/scripts/sync-fedml-pip.sh
 
     - name: Test package - FedAvg
       working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
       run: |
         pwd
         cd python
-        cd examples/federate/simulation/mpi_torch_fedavg_mnist_lr_example
+        cd examples/simulation/mpi_torch_fedavg_mnist_lr_example
         sh run_custom_data_and_model_example.sh 4
 
     - name: Test package - Base
       working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
       run: |
         cd python
-        cd examples/federate/simulation/mpi_base_framework_example
+        cd examples/simulation/mpi_base_framework_example
         sh run.sh 4
 
     - name: Test package - Decentralized
       working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
       run: |
         cd python
-        cd examples/federate/simulation/mpi_decentralized_fl_example
+        cd examples/simulation/mpi_decentralized_fl_example
         sh run.sh 4
 
     - name: Test package - FedOPT
       working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
       run: |
         cd python
-        cd examples/federate/simulation/mpi_fedopt_datasets_and_models_example
+        cd examples/simulation/mpi_fedopt_datasets_and_models_example
         sh run_step_by_step_example.sh 4 config/mnist_lr/fedml_config.yaml
 
     - name: Test package - FedProx
       working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
       run: |
         cd python
-        cd examples/federate/simulation/mpi_fedprox_datasets_and_models_example
+        cd examples/simulation/mpi_fedprox_datasets_and_models_example
         sh run_step_by_step_example.sh 4 config/mnist_lr/fedml_config.yaml
 
     - name: Test package - FedGAN
       working-directory: ${{ steps.fedml_source_code_home.outputs.dir }}
       run: |
         cd python
-        cd examples/federate/simulation/mpi_torch_fedgan_mnist_gan_example
+        cd examples/simulation/mpi_torch_fedgan_mnist_gan_example
         sh run_step_by_step_example.sh 4
\ No newline at end of file
diff --git a/.github/workflows/registry-runners/Dockerfile b/devops/dockerfile/github-action-runner/Dockerfile
similarity index 70%
rename from .github/workflows/registry-runners/Dockerfile
rename to devops/dockerfile/github-action-runner/Dockerfile
index 5d3168853a..4e6648260f 100644
--- a/.github/workflows/registry-runners/Dockerfile
+++ b/devops/dockerfile/github-action-runner/Dockerfile
@@ -1,10 +1,9 @@
 # base
-ARG BASE_IMAGE=python:3.11
-
-FROM ${BASE_IMAGE}
+FROM fedml/fedml:latest-torch1.13.1-cuda11.6-cudnn8-devel
 
 # set the github runner version
-ARG RUNNER_VERSION="2.317.0"
+ARG RUNNER_VERSION="2.304.0"
+
 # update the base packages and add a non-sudo user
 #RUN apt-get update -y && apt-get upgrade -y && useradd -m docker
 
@@ -25,15 +24,18 @@ COPY start.sh start.sh
 
 # make the script executable
 RUN chmod +x start.sh
+
+RUN cp -f /usr/bin/python /usr/bin/python-backup && ln -s /usr/bin/python3 python
+
+RUN pip install scikit-learn
+
+RUN pip install tensorflow && pip install tensorflow_datasets && pip install jax[cpu] && pip install dm-haiku && pip install optax && pip install jaxlib
+
 # since the config and run script for actions are not allowed to be run by root,
 # set the user to "docker" so all subsequent commands are run as the docker user
 #USER docker
 
-RUN git clone https://github.com/Qigemingziba/FedML.git
-RUN cd FedML && git pull && git checkout dev/v0.7.0 && cd python && pip3 install -e ./ 
-ENV REPO=Qigemingziba/FedML ACCESS_TOKEN=AGMK3P4W5EM5PXNYTZXXIMTGNF4MW
+ENV REPO=FedML-AI/FedML ACCESS_TOKEN=1
 
 # set the entrypoint to the start.sh script
-CMD ./start.sh ${REPO} ${ACCESS_TOKEN} 
-
-
+CMD ./start.sh ${REPO} ${ACCESS_TOKEN}
\ No newline at end of file
diff --git a/devops/dockerfile/github-action-runner/README.md b/devops/dockerfile/github-action-runner/README.md
new file mode 100644
index 0000000000..d02e29665b
--- /dev/null
+++ b/devops/dockerfile/github-action-runner/README.md
@@ -0,0 +1,25 @@
+# Run self-host runner in your machine
+
+## Usage
+
+./runner-start.sh [YourGitRepo] [YourRunnerPrefix] [YourRunnerNum] [YourGitHubRunnerToken] [LocalDevSourceDir] [LocalReleaseSourceDir] [LocalDataDir]
+
+For the argument YourGitHubRunnerToken, you may navigate based the following path.
+
+Settings -> Actions -> Runners -> New self-hosted runner. 
+
+In the Configure section, you should find the similar line:
+./config.sh --url https://github.com/FedML-AI/FedML --token AXRYPL6G2VHVGDFDQQS5XA3ELYI6M
+
+set YourGitHubRunnerToken to value of --token
+
+
+## Example
+
+Use the following commands to run 30 runners in the FedML-AI/FedML repo and run 6 runners in the FedML-AI/Front-End-Auto-Test repo:
+
+./runner-start.sh FedML-AI/FedML fedml-runner 30 AXRYPLZLZN6XVJB3BAIXSP3EMFC7U /home/fedml/FedML4GitHubAction-Dev /home/fedml/FedML4GitHubAction /home/fedml/fedml_data
+./runner-start.sh FedML-AI/Front-End-Auto-Test webtest-runner 6 AXRYPL57ZD35ZGDWZKRKFHLEMGLTK /home/fedml/FedML4GitHubAction-Dev /home/fedml/FedML4GitHubAction /home/fedml/fedml_data
+
+./runner-start.sh FedML-AI/FedML fedml-runner 30 AXRYPL6CCBH24ZVRSUEAYTTEMKD56 /home/chaoyanghe/sourcecode/FedML4GitHubAction-Dev /home/chaoyanghe/sourcecode/FedML4GitHubAction /home/chaoyanghe/fedml_data
+./runner-start.sh FedML-AI/Front-End-Auto-Test webtest-runner 6 AXRYPL57ZD35ZGDWZKRKFHLEMGLTK /home/chaoyanghe/sourcecode/FedML4GitHubAction-Dev /home/chaoyanghe/sourcecode/FedML4GitHubAction /home/chaoyanghe/fedml_data
diff --git a/devops/dockerfile/github-action-runner/build.sh b/devops/dockerfile/github-action-runner/build.sh
new file mode 100755
index 0000000000..5f6dae9615
--- /dev/null
+++ b/devops/dockerfile/github-action-runner/build.sh
@@ -0,0 +1,3 @@
+docker build -t fedml/github-action-runner:latest -f ./Dockerfile .
+docker login
+docker push fedml/github-action-runner:latest
\ No newline at end of file
diff --git a/devops/dockerfile/github-action-runner/runner-start.sh b/devops/dockerfile/github-action-runner/runner-start.sh
new file mode 100644
index 0000000000..18a0c4f958
--- /dev/null
+++ b/devops/dockerfile/github-action-runner/runner-start.sh
@@ -0,0 +1,23 @@
+REPO=$1
+TAG=$2
+NUM=$3
+ACCESS_TOKEN=$4
+LOCAL_DEV_SOURCE_DIR=$5
+LOCAL_RELEASE_SOURCE_DIR=$6
+LOCAL_DATA_DIR=$7
+
+if [ $# != 7 ]; then
+  echo "Please provide five arguments."
+  echo "./runner-start.sh [YourGitRepo] [YourRunnerPrefix] [YourRunnerNum] [YourGitHubRunnerToken] [LocalDevSourceDir] [LocalReleaseSourceDir] [LocalDataDir]"
+  exit -1
+fi
+
+sudo docker stop `sudo docker ps |grep ${TAG}- |awk -F' ' '{print $1}'`
+sudo docker pull fedml/github-action-runner:latest
+
+for((i=1;i<=$NUM;i++));
+do
+ACT_NAME=$TAG-$i
+sudo docker rm $ACT_NAME
+sudo docker run --name $ACT_NAME --env REPO=$REPO --env ACCESS_TOKEN=$ACCESS_TOKEN -v $LOCAL_DEV_SOURCE_DIR:/home/actions-runner/fedml-dev -v $LOCAL_RELEASE_SOURCE_DIR:/home/actions-runner/fedml-master -v $LOCAL_DATA_DIR:/home/fedml/fedml_data -v $LOCAL_DATA_DIR:/home/actions-runner/fedml_data -d fedml/github-action-runner:latest
+done
\ No newline at end of file
diff --git a/.github/workflows/registry-runners/start.sh b/devops/dockerfile/github-action-runner/start.sh
similarity index 76%
rename from .github/workflows/registry-runners/start.sh
rename to devops/dockerfile/github-action-runner/start.sh
index b65b0f1272..917d1cfe16 100644
--- a/.github/workflows/registry-runners/start.sh
+++ b/devops/dockerfile/github-action-runner/start.sh
@@ -2,15 +2,13 @@
 
 ORGANIZATION=$1
 ACCESS_TOKEN=$2
-PYTHON_VERSION=$3
 
 echo $ORGANIZATION
 echo $ACCESS_TOKEN
-echo $PYTHON_VERSION
 
 cd /home/fedml/actions-runner
 
-RUNNER_ALLOW_RUNASROOT="1" ./config.sh --url https://github.com/${ORGANIZATION} --token ${ACCESS_TOKEN} --labels self-hosted,Linux,X64,$PYTHON_VERSION
+RUNNER_ALLOW_RUNASROOT="1" ./config.sh --url https://github.com/${ORGANIZATION} --token ${ACCESS_TOKEN}
 
 cleanup() {
     echo "Removing runner..."
diff --git a/devops/scripts/install-fedml.sh b/devops/scripts/install-fedml.sh
deleted file mode 100644
index cafcfa3ac7..0000000000
--- a/devops/scripts/install-fedml.sh
+++ /dev/null
@@ -1,2 +0,0 @@
-cd python
-pip install -e ./
\ No newline at end of file
diff --git a/devops/scripts/sync-fedml-pip.sh b/devops/scripts/sync-fedml-pip.sh
index 6b24ac52e7..0d909fff76 100755
--- a/devops/scripts/sync-fedml-pip.sh
+++ b/devops/scripts/sync-fedml-pip.sh
@@ -24,7 +24,7 @@ else
   fi
 fi
 
-mkdir -p ./fedml/fedml_data
-cp -Rf ./fedml/fedml_data_host/* ./fedml/fedml_data
+mkdir -p /home/fedml/fedml_data
+cp -Rf /home/fedml/fedml_data_host/* /home/fedml/fedml_data
 
 exit 0
diff --git a/python/examples/federate/cross_silo/cuda_rpc_fedavg_mnist_lr_example/README.md b/python/examples/federate/cross_silo/cuda_rpc_fedavg_mnist_lr_example/README.md
index a1fa30b6f2..c693d8d863 100644
--- a/python/examples/federate/cross_silo/cuda_rpc_fedavg_mnist_lr_example/README.md
+++ b/python/examples/federate/cross_silo/cuda_rpc_fedavg_mnist_lr_example/README.md
@@ -26,7 +26,7 @@ For info on `trpc_master_config_path` refer to `python/examples/cross_silo/cuda_
 
 Example is provided at:
 
-`python/examples/federate/cross_silo/cuda_rpc_fedavg_mnist_lr_example/one_line`
+`python/examples/cross_silo/cuda_rpc_fedavg_mnist_lr_example/one_line`
 ### Training Script
 
 At the client side, the client ID (a.k.a rank) starts from 1.
diff --git a/python/examples/launch/examples/launch/hello_world/launch_config/fedml_config.yaml b/python/examples/launch/examples/launch/hello_world/launch_config/fedml_config.yaml
deleted file mode 100644
index 21e1f2e33e..0000000000
--- a/python/examples/launch/examples/launch/hello_world/launch_config/fedml_config.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
-containerize: false
-data_args:
-  dataset_name: mnist
-  dataset_path: ./dataset
-  dataset_type: csv
-environment_args:
-  bootstrap: fedml_bootstrap_generated.sh
-model_args:
-  input_dim: '784'
-  model_cache_path: /Users/alexliang/fedml_models
-  model_name: lr
-  output_dim: '10'
-training_params:
-  learning_rate: 0.004
diff --git a/python/examples/launch/hello_world/hello_world.py b/python/examples/launch/hello_world/hello_world.py
index 2f68f99055..71ffaf7c16 100644
--- a/python/examples/launch/hello_world/hello_world.py
+++ b/python/examples/launch/hello_world/hello_world.py
@@ -1,5 +1,6 @@
 import os
 import time
+
 import fedml
 
 if __name__ == "__main__":
diff --git a/python/examples/launch/serve_job_mnist.yaml b/python/examples/launch/serve_job_mnist.yaml
index bd8b52ca6c..98c1570a4f 100755
--- a/python/examples/launch/serve_job_mnist.yaml
+++ b/python/examples/launch/serve_job_mnist.yaml
@@ -35,4 +35,4 @@ computing:
   maximum_cost_per_hour: $3000   # max cost per hour for your job per gpu card
   #allow_cross_cloud_resources: true # true, false
   #device_type: CPU              # options: GPU, CPU, hybrid
-  resource_type: RTX-4090   # e.g., A100-80G, please check the resource type list by "fedml show-resource-type" or visiting URL: https://open.fedml.ai/accelerator_resource_type
\ No newline at end of file
+  resource_type: A100-80G       # e.g., A100-80G, please check the resource type list by "fedml show-resource-type" or visiting URL: https://open.fedml.ai/accelerator_resource_type
\ No newline at end of file
diff --git a/python/examples/train/mnist_train/examples/train/mnist_train/launch_config/fedml_config.yaml b/python/examples/train/mnist_train/examples/train/mnist_train/launch_config/fedml_config.yaml
deleted file mode 100644
index 188c19dde6..0000000000
--- a/python/examples/train/mnist_train/examples/train/mnist_train/launch_config/fedml_config.yaml
+++ /dev/null
@@ -1,3 +0,0 @@
-containerize: false
-environment_args:
-  bootstrap: fedml_bootstrap_generated.sh
diff --git a/python/examples/train/mnist_train/train.py b/python/examples/train/mnist_train/train.py
deleted file mode 100644
index 611a15c2b6..0000000000
--- a/python/examples/train/mnist_train/train.py
+++ /dev/null
@@ -1,98 +0,0 @@
-import torch
-import torch.nn as nn
-import torch.optim as optim
-import torchvision
-import torchvision.transforms as transforms
-from torch.utils.data import DataLoader
-import fedml
-# Set random seed for reproducibility
-torch.manual_seed(42)
-
-# Define hyperparameters
-batch_size = 64
-learning_rate = 0.001
-num_epochs = 3
-
-# Prepare dataset and data loaders
-transform = transforms.Compose([
-    transforms.ToTensor(),  # Convert image to tensor, normalize to [0, 1]
-    transforms.Normalize((0.5,), (0.5,))  # Normalize with mean and std deviation of 0.5
-])
-
-train_dataset = torchvision.datasets.MNIST(root='./data', train=True, transform=transform, download=True)
-train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
-
-test_dataset = torchvision.datasets.MNIST(root='./data', train=False, transform=transform, download=True)
-test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)
-
-# Define a simple convolutional neural network model
-class SimpleCNN(nn.Module):
-    def __init__(self):
-        super(SimpleCNN, self).__init__()
-        self.conv1 = nn.Conv2d(1, 16, kernel_size=5, padding=2)
-        self.conv2 = nn.Conv2d(16, 32, kernel_size=5, padding=2)
-        self.fc1 = nn.Linear(32 * 7 * 7, 128)
-        self.fc2 = nn.Linear(128, 10)
-
-    def forward(self, x):
-        x = torch.relu(self.conv1(x))
-        x = torch.max_pool2d(x, kernel_size=2, stride=2)
-        x = torch.relu(self.conv2(x))
-        x = torch.max_pool2d(x, kernel_size=2, stride=2)
-        x = x.view(-1, 32 * 7 * 7)
-        x = torch.relu(self.fc1(x))
-        x = self.fc2(x)
-        return x
-
-model = SimpleCNN()
-
-# Define loss function and optimizer
-criterion = nn.CrossEntropyLoss()
-optimizer = optim.Adam(model.parameters(), lr=learning_rate)
-
-# Train the model
-for epoch in range(num_epochs):
-
-    # Evaluate the model on the test set during training
-    model.eval()
-    with torch.no_grad():
-        correct = 0
-        total = 0
-        for images, labels in test_loader:
-            outputs = model(images)
-            _, predicted = torch.max(outputs.data, 1)
-            total += labels.size(0)
-            correct += (predicted == labels).sum().item()
-        acc = 100 * correct / total
-        fedml.mlops.log_metric({"epoch":epoch, "acc": acc})
-
-    model.train()
-    for images, labels in train_loader:
-        # Forward pass
-        outputs = model(images)
-        loss = criterion(outputs, labels)
-
-        # Backward and optimize
-        optimizer.zero_grad()
-        loss.backward()
-        optimizer.step()
-
-# Final evaluation on the test set
-model.eval()
-with torch.no_grad():
-    correct = 0
-    total = 0
-    for images, labels in test_loader:
-        outputs = model(images)
-        _, predicted = torch.max(outputs.data, 1)
-        total += labels.size(0)
-        correct += (predicted == labels).sum().item()
-
-    acc = 100 * correct / total
-    print('Final Test Accuracy: {:.2f} %'.format(acc))
-    fedml.mlops.log_metric({"epoch":num_epochs, "acc": acc})
-
-fedml.mlops.log_model(f"model-file@test", "./simple_cnn.pth")
-# # Save the model parameters
-# torch.save(model.state_dict(), 'simple_cnn.pth')
-# print('Model saved to simple_cnn.pth')
diff --git a/python/examples/train/mnist_train/train.yaml b/python/examples/train/mnist_train/train.yaml
deleted file mode 100644
index f9a5cc5ab5..0000000000
--- a/python/examples/train/mnist_train/train.yaml
+++ /dev/null
@@ -1,50 +0,0 @@
-# Local directory where your source code resides.
-# It should be the relative path to this job yaml file or the absolute path.
-# If your job doesn't contain any source code, it can be empty.
-workspace: .
-
-# Running entry commands which will be executed as the job entry point.
-# If an error occurs, you should exit with a non-zero code, e.g. exit 1.
-# Otherwise, you should exit with a zero code, e.g. exit 0.
-# Support multiple lines, which can not be empty.
-job: |
-    echo "current job id: $FEDML_CURRENT_RUN_ID"
-    echo "current edge id: $FEDML_CURRENT_EDGE_ID"
-    echo "Hello, Here is the launch platform."
-    echo "Current directory is as follows."
-    pwd
-    python3 train.py
-    echo "training job finished."
-
-# If you want to use the job created by the MLOps platform,
-# just uncomment the following three, then set job_id and config_id to your desired job id and related config.
-#job_args:
-#  job_id: 2070
-#  config_id: 111
-
-# If you want to create the job with specific name, just uncomment the following line and set job_name to your desired job name
-#job_name: cv_job
-
-job_type: train              # options: train, deploy, federate
-
-# train subtype: general_training, single_machine_training, cluster_distributed_training, cross_cloud_training
-# federate subtype: cross_silo, simulation, web, smart_phone
-# deploy subtype: none
-job_subtype: generate_training
-
-# containerize
-containerize: false
-
-# Bootstrap shell commands which will be executed before running entry commands.
-# Support multiple lines, which can be empty.
-bootstrap: |
-  # pip install -r requirements.txt
-  echo "Bootstrap finished."
-
-computing:
-  minimum_num_gpus: 1           # minimum # of GPUs to provision
-  maximum_cost_per_hour: $3000   # max cost per hour for your job per gpu card
-  #allow_cross_cloud_resources: true # true, false
-  #device_type: CPU              # options: GPU, CPU, hybrid
-  resource_type: RTX-4090       # e.g., A100-80G, please check the resource type list by "fedml show-resource-type" or visiting URL: https://open.fedml.ai/accelerator_resource_type
-
diff --git a/python/fedml/__init__.py b/python/fedml/__init__.py
index c2fc2e3a0f..c96d65adc5 100644
--- a/python/fedml/__init__.py
+++ b/python/fedml/__init__.py
@@ -1,7 +1,6 @@
 import logging
 import platform
 
-import multiprocess
 import multiprocess as multiprocessing
 import os
 import random
@@ -38,7 +37,7 @@
 _global_training_type = None
 _global_comm_backend = None
 
-__version__ = "0.8.51b1"
+__version__ = "0.9.0"
 
 
 # This is the deployment environment used for different roles (RD/PM/BD/Public Developers). Potential VALUE: local, dev, test, release
@@ -461,26 +460,6 @@ def _init_multiprocessing():
             multiprocessing.set_start_method("fork", force=True)
 
 
-def get_multiprocessing_context():
-    if platform.system() == "Windows":
-        return multiprocessing.get_context("spawn")
-    else:
-        return multiprocessing.get_context("fork")
-
-
-def get_process(target=None, args=None):
-    if platform.system() == "Windows":
-        return multiprocessing.Process(target=target, args=args)
-    else:
-        #return multiprocessing.Process(target=target, args=args)
-        #multiprocessing.set_start_method("spawn", force=True)
-        #return multiprocess.context.SpawnContext.Process(target=target, args=args)
-        #multiprocessing.Manager().current_process().authkey = str.encode("abc")
-        new_process = multiprocessing.get_context("fork").Process(target=target, args=args)
-        #new_process.authkey = str.encode("abc")
-        return new_process
-
-
 def set_env_version(version):
     set_env_kv("FEDML_ENV_VERSION", version)
     load_env()
diff --git a/python/fedml/api/__init__.py b/python/fedml/api/__init__.py
index ff2b0c7307..b03c72b675 100755
--- a/python/fedml/api/__init__.py
+++ b/python/fedml/api/__init__.py
@@ -278,9 +278,6 @@ def model_deploy(name, endpoint_name, endpoint_id, local, master_ids, worker_ids
 def model_run(endpoint_id, json_string):
     model_module.run(endpoint_id, json_string)
 
-def get_endpoint(endpoint_id):
-    return model_module.get_endpoint(endpoint_id)
-
 
 def endpoint_delete(endpoint_id):
     model_module.delete_endpoint(endpoint_id)
diff --git a/python/fedml/api/modules/model.py b/python/fedml/api/modules/model.py
index 93892fc5d1..a02e674f47 100644
--- a/python/fedml/api/modules/model.py
+++ b/python/fedml/api/modules/model.py
@@ -320,19 +320,6 @@ def run(endpoint_id: str, json_string: str) -> bool:
         click.echo("Failed to run model.")
         return False
 
-def get_endpoint(endpoint_id: str):
-    api_key = get_api_key()
-    if api_key == "":
-        click.echo('''
-                Please use one of the ways below to login first:
-                (1) CLI: `fedml login $api_key`
-                (2) API: fedml.api.fedml_login(api_key=$api_key)
-                ''')
-        return False
-    
-    endpoint_detail_result = FedMLModelCards.get_instance().query_endpoint_detail_api(user_api_key=api_key, 
-                                                                endpoint_id=endpoint_id)
-    return endpoint_detail_result
 
 def delete_endpoint(endpoint_id: str) -> bool:
     api_key = get_api_key()
diff --git a/python/fedml/computing/scheduler/comm_utils/job_monitor.py b/python/fedml/computing/scheduler/comm_utils/job_monitor.py
index 50ca315a10..b8237d93ba 100644
--- a/python/fedml/computing/scheduler/comm_utils/job_monitor.py
+++ b/python/fedml/computing/scheduler/comm_utils/job_monitor.py
@@ -167,7 +167,7 @@ def autoscaler_reconcile_after_interval(self):
                     # Get cached token for authorization of autoscale request
                     cached_token = fedml_model_cache.get_end_point_token(e_id, e_name, model_name)
                     if cached_token is None:
-                        # logging.error(f"Failed to get the cached token for endpoint {e_id}.")
+                        logging.error(f"Failed to get the cached token for endpoint {e_id}.")
                         continue
 
                     req_header = {
@@ -229,7 +229,7 @@ def monitor_replicas_number():
 
             cached_token = FedMLModelCache.get_instance().get_end_point_token_with_eid(endpoint_id)
             if cached_token is None:
-                # logging.error(f"Failed to get the cached token for endpoint {endpoint_id}.")
+                logging.error(f"Failed to get the cached token for endpoint {endpoint_id}.")
                 return
 
             req_header = {
@@ -339,10 +339,6 @@ def monitor_replicas_perf(edge_id, mqtt_mgr=None):
     def monitor_slave_run_process_status(self):
         try:
             count = 0
-            try:
-                client_data_interface.FedMLClientDataInterface.get_instance().create_job_table()
-            except Exception as e:
-                pass
             job_list = client_data_interface.FedMLClientDataInterface.get_instance().get_jobs_from_db()
             for job in job_list.job_list:
                 count += 1
@@ -452,10 +448,6 @@ def monitor_master_run_process_status(self, server_id, device_info_reporter=None
         try:
             ComputeCacheManager.get_instance().set_redis_params()
             count = 0
-            try:
-                server_data_interface.FedMLServerDataInterface.get_instance().create_job_table()
-            except Exception as e:
-                pass
             job_list = server_data_interface.FedMLServerDataInterface.get_instance().get_jobs_from_db()
             for job in job_list.job_list:
                 count += 1
diff --git a/python/fedml/computing/scheduler/comm_utils/run_process_utils.py b/python/fedml/computing/scheduler/comm_utils/run_process_utils.py
index 6dd575f307..05cc342e36 100644
--- a/python/fedml/computing/scheduler/comm_utils/run_process_utils.py
+++ b/python/fedml/computing/scheduler/comm_utils/run_process_utils.py
@@ -135,15 +135,13 @@ def save_run_process(run_id, process_id, data_dir, info_dir,
             pass
 
     @staticmethod
-    def kill_process(process_id, exclude_current_pid=False):
+    def kill_process(process_id):
         try:
             process = psutil.Process(process_id)
             if process is None:
                 return
             child_processes = process.children(recursive=True)
             for sub_process in child_processes:
-                if exclude_current_pid and sub_process.pid == os.getpid():
-                    continue
                 if platform.system() == 'Windows':
                     os.system("taskkill /PID {} /T /F".format(sub_process.pid))
                 else:
diff --git a/python/fedml/computing/scheduler/comm_utils/sys_utils.py b/python/fedml/computing/scheduler/comm_utils/sys_utils.py
index 065482c23b..aaa37bc4db 100644
--- a/python/fedml/computing/scheduler/comm_utils/sys_utils.py
+++ b/python/fedml/computing/scheduler/comm_utils/sys_utils.py
@@ -114,8 +114,6 @@ def get_sys_runner_info():
     except:
         pass
 
-    enable_simulation_gpu, simulation_gpu_count = get_simulation_gpu_env()
-
     if enable_simulation_gpu:
         gpu_count = simulation_gpu_count
         gpu_total_mem = "80G"
@@ -130,26 +128,9 @@ def get_sys_runner_info():
         gpu_count, gpu_vendor, cpu_count, gpu_device_name
 
 
-def get_simulation_gpu_env():
-    _enable_simulation_gpu = enable_simulation_gpu
-    _simulation_gpu_count = simulation_gpu_count
-
-    env_enable_simulation_gpu = os.getenv("FEDML_ENABLE_SIMULATION_GPU", None)
-    if env_enable_simulation_gpu is not None:
-        _enable_simulation_gpu = True if env_enable_simulation_gpu == "1" or env_enable_simulation_gpu == 1 else False
-
-    env_simulation_gpu_count = os.getenv("FEDML_SIMULATION_GPU_COUNT", None)
-    if env_simulation_gpu_count is not None:
-        _simulation_gpu_count = int(env_simulation_gpu_count)
-
-    return _enable_simulation_gpu, _simulation_gpu_count
-
-
 # GPU list: [GPU(ID, uuid, load, memoryTotal, memoryUsed, memoryFree, driver,
 # gpu_name, serial, display_mode, display_active, temperature)]
 def get_gpu_list():
-    enable_simulation_gpu, simulation_gpu_count = get_simulation_gpu_env()
-
     if enable_simulation_gpu:
         ret_gpu_list = [
             {'ID': 0, 'uuid': 'GPU-dab987f0-be09-294a-96d6-f9afeef49877', 'load': 1.0,
@@ -203,8 +184,6 @@ def get_gpu_list():
 
 
 def get_available_gpu_id_list(limit=1) -> List[int]:
-    enable_simulation_gpu, simulation_gpu_count = get_simulation_gpu_env()
-
     if enable_simulation_gpu:
         available_gpu_ids = [0, 1, 2, 3, 4, 5, 6, 7]
         if simulation_gpu_count > 8:
diff --git a/python/fedml/computing/scheduler/master/base_master_agent.py b/python/fedml/computing/scheduler/master/base_master_agent.py
index 30cf5da1c9..3aff523c24 100755
--- a/python/fedml/computing/scheduler/master/base_master_agent.py
+++ b/python/fedml/computing/scheduler/master/base_master_agent.py
@@ -23,9 +23,7 @@ def __init__(self):
 
     def login(
             self, user_id, api_key=None, device_id=None,
-            os_name=None, role=None, runner_cmd=None,
-            communication_manager=None, sender_message_queue=None,
-            status_center_queue=None, sender_message_event=None
+            os_name=None, role=None, runner_cmd=None
     ):
         # Login account
         login_result = FedMLAccountManager.get_instance().login(
@@ -50,31 +48,20 @@ def login(
         # Initialize the protocol manager
         # noinspection PyBoardException
         try:
-            self._initialize_protocol_manager(
-                communication_manager=communication_manager,
-                sender_message_queue=sender_message_queue,
-                status_center_queue=status_center_queue,
-                sender_message_event=sender_message_event)
+            self._initialize_protocol_manager()
         except Exception as e:
             FedMLAccountManager.write_login_failed_file(is_client=False)
             self.protocol_mgr.stop()
             raise e
 
         # Start the protocol manager to process the messages from MLOps and slave agents.
-        if communication_manager is None:
-            self.protocol_mgr.start()
-
-        return login_result
+        self.protocol_mgr.start()
 
     @staticmethod
     def logout():
         GeneralConstants.cleanup_run_process(None, is_master=True)
         sys_utils.cleanup_all_fedml_server_api_processes()
 
-    def stop(self, kill_process=False):
-        if self.protocol_mgr is not None:
-            self.protocol_mgr.stop(kill_process=kill_process)
-
     def _create_protocol_manager(self, role, login_result):
         if self.protocol_mgr is not None:
             return
@@ -82,11 +69,7 @@ def _create_protocol_manager(self, role, login_result):
             login_result, agent_config=login_result.agent_config)
         self.protocol_mgr.run_as_edge_server_and_agent = True \
             if role == FedMLAccountManager.ROLE_EDGE_SERVER else False
-        self.protocol_mgr.run_as_cloud_agent = True \
-            if role == FedMLAccountManager.ROLE_CLOUD_AGENT or role == FedMLAccountManager.ROLE_GPU_MASTER_SERVER \
-            else False
-        self.use_local_process_as_cloud_server = True \
-            if role == FedMLAccountManager.ROLE_GPU_MASTER_SERVER else self.use_local_process_as_cloud_server
+        self.protocol_mgr.run_as_cloud_agent = True if role == FedMLAccountManager.ROLE_CLOUD_AGENT else False
         self.protocol_mgr.run_as_cloud_server = True if role == FedMLAccountManager.ROLE_CLOUD_SERVER else False
         self.protocol_mgr.args = login_result
         self.protocol_mgr.edge_id = login_result.edge_id
@@ -96,20 +79,12 @@ def _create_protocol_manager(self, role, login_result):
         self.protocol_mgr.enable_simulation_cloud_agent = self.enable_simulation_cloud_agent
         self.protocol_mgr.use_local_process_as_cloud_server = self.use_local_process_as_cloud_server
 
-    def _initialize_protocol_manager(
-            self, communication_manager=None, sender_message_queue=None,
-            status_center_queue=None, sender_message_event=None
-    ):
+    def _initialize_protocol_manager(self):
         # Init local database
         self._init_database()
 
         # Initialize the master protocol
-        self.protocol_mgr.set_parent_agent(self)
-        self.protocol_mgr.initialize(
-            communication_manager=communication_manager,
-            sender_message_queue=sender_message_queue,
-            status_center_queue=status_center_queue,
-            sender_message_event=sender_message_event)
+        self.protocol_mgr.initialize()
 
         # Report the IDLE status to MLOps
         self.mlops_metrics.report_server_training_status(
@@ -134,9 +109,6 @@ def _init_logs(self, agent_args, edge_id):
         in_args.server_agent_id = edge_id
         MLOpsRuntimeLog.get_instance(in_args).init_logs()
 
-    def get_protocol_manager(self):
-        return self.protocol_mgr
-
     @abstractmethod
     def _get_log_file_dir(self):
         pass
@@ -152,17 +124,3 @@ def _init_database(self):
     @abstractmethod
     def _generate_protocol_manager_instance(self, args, agent_config=None):
         return None
-
-    def start_master_server_instance(self, payload):
-        self.protocol_mgr.start_master_server_instance(payload)
-
-    def generate_agent_instance(self):
-        return FedMLBaseMasterAgent()
-
-    def process_job_complete_status(self, run_id, topic, payload):
-        if self.protocol_mgr is None:
-            return
-        if topic in self.protocol_mgr.get_subscribed_topics():
-            message_handler = self.protocol_mgr.get_listener_handler(topic)
-            if message_handler is not None:
-                message_handler(topic, payload)
diff --git a/python/fedml/computing/scheduler/master/base_master_job_runner.py b/python/fedml/computing/scheduler/master/base_master_job_runner.py
index fdfff143aa..9ebab258bb 100755
--- a/python/fedml/computing/scheduler/master/base_master_job_runner.py
+++ b/python/fedml/computing/scheduler/master/base_master_job_runner.py
@@ -1,3 +1,4 @@
+
 import json
 import logging
 import multiprocessing
@@ -6,9 +7,6 @@
 import os
 import time
 import traceback
-
-import setproctitle
-
 from ..scheduler_entry.constants import Constants
 from ....core.mlops.mlops_runtime_log import MLOpsRuntimeLog
 from ..master.server_constants import ServerConstants
@@ -21,6 +19,7 @@
 from fedml.utils.debugging import debug
 from ..scheduler_core.status_center import JobStatus
 from ..scheduler_core.compute_cache_manager import ComputeCacheManager
+from multiprocessing import Process, Queue
 from ..scheduler_core.general_constants import GeneralConstants
 from ..scheduler_core.scheduler_base_job_runner import FedMLSchedulerBaseJobRunner, RunnerError, RunnerCompletedError
 from abc import ABC, abstractmethod
@@ -44,13 +43,13 @@ def __init__(self, args, run_id=0, request_json=None, agent_config=None, edge_id
             is_master_runner=True
         )
 
-        self.run_edge_id_status_queue = multiprocessing.Manager().Queue()
-        self.run_metrics_queue = multiprocessing.Manager().Queue()
-        self.run_events_queue = multiprocessing.Manager().Queue()
-        self.run_artifacts_queue = multiprocessing.Manager().Queue()
-        self.run_logs_queue = multiprocessing.Manager().Queue()
-        self.run_edge_device_info_queue = multiprocessing.Manager().Queue()
-        self.run_edge_device_info_global_queue = multiprocessing.Manager().Queue()
+        self.run_edge_id_status_queue = Queue()
+        self.run_metrics_queue = Queue()
+        self.run_events_queue = Queue()
+        self.run_artifacts_queue = Queue()
+        self.run_logs_queue = Queue()
+        self.run_edge_device_info_queue = Queue()
+        self.run_edge_device_info_global_queue = Queue()
         self.run_extend_queue_list = None
         self.async_check_timeout = 0
         self.enable_async_cluster = False
@@ -69,12 +68,9 @@ def run(
             edge_device_info_queue=None, run_metrics_queue=None, run_event_queue=None,
             run_artifacts_queue=None, run_logs_queue=None, edge_device_info_global_queue=None,
             run_extend_queue_list=None, sender_message_center_queue=None, listener_message_queue=None,
-            status_center_queue=None, process_name=None
+            status_center_queue=None
     ):
-        if process_name is not None:
-            setproctitle.setproctitle(process_name)
-
-        print(f"Master job runner process id {os.getpid()}, name {process_name}, run id {self.run_id}")
+        print(f"Master job runner process id {os.getpid()}, run id {self.run_id}")
 
         if platform.system() != "Windows":
             os.setsid()
@@ -173,8 +169,7 @@ def run_impl(
             run_id, self.request_json, edge_id=self.edge_id, is_server_job=True,
             sender_message_queue=sender_message_queue,
             listener_message_queue=listener_message_queue,
-            status_center_queue=status_center_queue,
-            process_name=GeneralConstants.get_launch_master_user_process_name(run_id, self.edge_id)
+            status_center_queue=status_center_queue
         )
 
         # Check if the run status is normal
@@ -236,12 +231,9 @@ def run_server_job(
             edge_device_info_queue=None, run_metrics_queue=None, run_event_queue=None,
             run_artifacts_queue=None, run_logs_queue=None, edge_device_info_global_queue=None,
             run_extend_queue_list=None, sender_message_center_queue=None, listener_message_queue=None,
-            status_center_queue=None, process_name=None
+            status_center_queue=None
     ):
-        if process_name is not None:
-            setproctitle.setproctitle(process_name)
-
-        print(f"Server runner process id {os.getpid()}, name {process_name}. run id {self.run_id}")
+        print(f"Server runner process id {os.getpid()}, run id {self.run_id}")
 
         if platform.system() != "Windows":
             os.setsid()
@@ -413,9 +405,9 @@ def _generate_job_runner_instance(self, args, run_id=None, request_json=None, ag
         return None
 
     def start_runner_process(
-            self, run_id, request_json, edge_id=None, is_server_job=False,
-            sender_message_queue=None, listener_message_queue=None,
-            status_center_queue=None, process_name=None
+        self, run_id, request_json, edge_id=None, is_server_job=False,
+        sender_message_queue=None, listener_message_queue=None,
+        status_center_queue=None,
     ):
         server_runner = self._generate_job_runner_instance(
             self.args, run_id=run_id, request_json=request_json,
@@ -433,26 +425,14 @@ def start_runner_process(
         server_runner.edge_id_status_queue = self.run_edge_id_status_queue
         server_runner.edge_device_info_queue = self.run_edge_device_info_queue
         self.run_extend_queue_list = self._generate_extend_queue_list()
-        if platform.system() == "Windows":
-            self.run_process = multiprocessing.Process(
-                target=server_runner.run if not is_server_job else server_runner.run_server_job, args=(
-                    self.run_process_event, self.run_process_completed_event, self.run_edge_id_status_queue,
-                    self.run_edge_device_info_queue, self.run_metrics_queue, self.run_events_queue,
-                    self.run_artifacts_queue, self.run_logs_queue, self.run_edge_device_info_global_queue,
-                    self.run_extend_queue_list, sender_message_queue, listener_message_queue, status_center_queue,
-                    process_name,
-                )
-            )
-        else:
-            self.run_process = fedml.get_process(
-                target=server_runner.run if not is_server_job else server_runner.run_server_job, args=(
-                    self.run_process_event, self.run_process_completed_event, self.run_edge_id_status_queue,
-                    self.run_edge_device_info_queue, self.run_metrics_queue, self.run_events_queue,
-                    self.run_artifacts_queue, self.run_logs_queue, self.run_edge_device_info_global_queue,
-                    self.run_extend_queue_list, sender_message_queue, listener_message_queue, status_center_queue,
-                    process_name,
-                )
+        self.run_process = Process(
+            target=server_runner.run if not is_server_job else server_runner.run_server_job, args=(
+                self.run_process_event, self.run_process_completed_event, self.run_edge_id_status_queue,
+                self.run_edge_device_info_queue, self.run_metrics_queue, self.run_events_queue,
+                self.run_artifacts_queue, self.run_logs_queue, self.run_edge_device_info_global_queue,
+                self.run_extend_queue_list, sender_message_queue, listener_message_queue,  status_center_queue
             )
+        )
         self.run_process.start()
         ServerConstants.save_run_process(run_id, self.run_process.pid)
         return self.run_process
@@ -464,7 +444,7 @@ def put_run_edge_device_info_to_queue(self, run_id, edge_id, device_info):
         if int(edge_id) in edge_ids or str(edge_id) in edge_ids:
             run_id_str = str(run_id)
             if self.run_edge_device_info_queue is None:
-                self.run_edge_device_info_queue = multiprocessing.Manager().Queue()
+                self.run_edge_device_info_queue = Queue()
             self.run_edge_device_info_queue.put(device_info)
 
     def should_continue_run_job(self, run_id):
@@ -592,7 +572,7 @@ def callback_run_logs(self, topic, payload):
         run_id = str(topic).split('/')[-1]
         run_id_str = str(run_id)
         if self.run_logs_queue is None:
-            self.run_logs_queue = multiprocessing.Manager().Queue()
+            self.run_logs_queue = Queue()
         self.run_logs_queue.put(payload)
 
     def callback_run_metrics(self, topic, payload):
@@ -600,7 +580,7 @@ def callback_run_metrics(self, topic, payload):
         run_id = str(topic).split('/')[-1]
         run_id_str = str(run_id)
         if self.run_metrics_queue is None:
-            self.run_metrics_queue = multiprocessing.Manager().Queue()
+            self.run_metrics_queue = Queue()
         self.run_metrics_queue.put(payload)
 
     # def send_training_request_to_edges(self, active_edge_info_dict):
@@ -730,3 +710,6 @@ def should_process_async_cluster(self):
 
     def get_client_id_list(self, server_edge_id_list):
         return server_edge_id_list
+
+
+
diff --git a/python/fedml/computing/scheduler/master/base_master_job_runner_manager.py b/python/fedml/computing/scheduler/master/base_master_job_runner_manager.py
index 39f7438696..6831c9d034 100755
--- a/python/fedml/computing/scheduler/master/base_master_job_runner_manager.py
+++ b/python/fedml/computing/scheduler/master/base_master_job_runner_manager.py
@@ -1,39 +1,27 @@
 import base64
 import json
 import logging
-import multiprocessing
-import platform
 import time
 from abc import ABC
 from multiprocessing import Process
-
-import fedml
 from .cloud_server_manager import FedMLCloudServerManager
-from ..comm_utils.run_process_utils import RunProcessUtils
 from ..scheduler_core.scheduler_base_job_runner_manager import FedMLSchedulerBaseJobRunnerManager
-from ..scheduler_core.account_manager import FedMLAccountManager
 
 
 class FedMLBaseMasterJobRunnerManager(FedMLSchedulerBaseJobRunnerManager, ABC):
     def __init__(self):
         FedMLSchedulerBaseJobRunnerManager.__init__(self)
-        if not hasattr(self, "master_agent_instance_map"):
-            self.master_agent_instance_map = dict()
 
     # Override
     def start_job_runner(
             self, run_id, request_json, args=None, edge_id=None, is_server_job=False,
             sender_message_queue=None, listener_message_queue=None, status_center_queue=None,
-            communication_manager=None, master_agent_instance=None, should_start_cloud_server=False,
-            use_local_process_as_cloud_server=False, cuda_visible_gpu_ids_str=None, process_name=None
+            should_start_cloud_server=False, use_local_process_as_cloud_server=False,
+            cuda_visible_gpu_ids_str=None
     ):
         if should_start_cloud_server:
-            self._start_cloud_server(
-                args, run_id, request_json, edge_id=edge_id,
-                use_local_process_as_cloud_server=use_local_process_as_cloud_server,
-                sender_message_queue=sender_message_queue, listener_message_queue=listener_message_queue,
-                status_center_queue=status_center_queue, communication_manager=communication_manager,
-                master_agent_instance=master_agent_instance, process_name=process_name)
+            self._start_cloud_server(args, run_id, request_json, edge_id=edge_id,
+                                     use_local_process_as_cloud_server=use_local_process_as_cloud_server)
             return
 
         run_id_str = str(run_id)
@@ -45,58 +33,34 @@ def start_job_runner(
             run_id, request_json, edge_id=edge_id, is_server_job=is_server_job,
             sender_message_queue=sender_message_queue,
             listener_message_queue=listener_message_queue,
-            status_center_queue=status_center_queue,
-            process_name=process_name
+            status_center_queue=status_center_queue
         )
 
     def stop_job_runner(
             self, run_id, args=None, server_id=None, request_json=None,
-            run_as_cloud_agent=False, run_as_cloud_server=False,
-            use_local_process_as_cloud_server=False
+            run_as_cloud_agent=False, run_as_cloud_server=False
     ):
         super().stop_job_runner(run_id)
 
         if run_as_cloud_agent or run_as_cloud_server:
-            if not use_local_process_as_cloud_server:
-                stopping_process = Process(
-                    target=FedMLCloudServerManager.stop_cloud_server,
-                    args=(run_id, server_id, args.agent_config))
-                stopping_process.start()
-
-            run_id_str = str(run_id)
-            if self.master_agent_instance_map.get(run_id_str, None) is not None:
-                self.master_agent_instance_map.get(run_id_str).stop(kill_process=True)
-                self.master_agent_instance_map.pop(run_id_str)
-
-            if use_local_process_as_cloud_server:
-                time.sleep(1)
-                RunProcessUtils.kill_process(self.cloud_run_process_map[run_id_str].pid)
+            stopping_process = Process(
+                target=FedMLCloudServerManager.stop_cloud_server, args=(run_id, server_id, args.agent_config))
+            stopping_process.start()
 
     def complete_job_runner(
             self, run_id, args=None, server_id=None, request_json=None,
-            run_as_cloud_agent=False, run_as_cloud_server=False,
-            use_local_process_as_cloud_server=False
+            run_as_cloud_agent=False, run_as_cloud_server=False
     ):
         super().complete_job_runner(run_id)
 
         if run_as_cloud_agent or run_as_cloud_server:
-            if not use_local_process_as_cloud_server:
-                stopping_process = Process(
-                    target=FedMLCloudServerManager.stop_cloud_server,
-                    args=(run_id, server_id, args.agent_config))
-                stopping_process.start()
-
-            run_id_str = str(run_id)
-            if self.master_agent_instance_map.get(run_id_str, None) is not None:
-                self.master_agent_instance_map.get(run_id_str).stop(kill_process=True)
-                self.master_agent_instance_map.pop(run_id_str)
+            stopping_process = Process(
+                target=FedMLCloudServerManager.stop_cloud_server, args=(run_id, server_id, args.agent_config))
+            stopping_process.start()
 
     def _start_cloud_server(
             self, args, run_id, request_json, edge_id=None,
-            use_local_process_as_cloud_server=False,
-            sender_message_queue=None, listener_message_queue=None,
-            status_center_queue=None, communication_manager=None,
-            master_agent_instance=None, process_name=None
+            use_local_process_as_cloud_server=False
     ):
         run_id_str = str(run_id)
         cloud_server_mgr = FedMLCloudServerManager(
@@ -107,49 +71,19 @@ def _start_cloud_server(
             self.cloud_run_process_map[run_id_str] = Process(target=cloud_server_mgr.start_cloud_server_process_entry)
             self.cloud_run_process_map[run_id_str].start()
         else:
-            cloud_device_id = request_json.get("cloudServerDeviceId", "0")
-            server_id = request_json.get("server_id", 0)
             message_bytes = json.dumps(request_json).encode("ascii")
             base64_bytes = base64.b64encode(message_bytes)
-            payload = base64_bytes.decode("ascii")
-            self.master_agent_instance_map[str(run_id)] = master_agent_instance
-
-            logging.info("start the master server: {}".format(payload))
+            runner_cmd_encoded = base64_bytes.decode("ascii")
+            cloud_device_id = request_json.get("cloudServerDeviceId", "0")
 
-            if platform.system() == "Windows":
-                self.run_process = multiprocessing.Process(
-                    target=cloud_server_mgr.start_local_master_server,
-                    args=(args.account_id, args.api_key, args.os_name, args.version,
-                          cloud_device_id, run_id, payload,
-                          communication_manager, sender_message_queue,
-                          status_center_queue, master_agent_instance, process_name))
-            else:
-                self.cloud_run_process_map[run_id_str] = fedml.get_process(
-                    target=cloud_server_mgr.start_local_master_server,
-                    args=(args.account_id, args.api_key, args.os_name, args.version,
-                          cloud_device_id, run_id, payload,
-                          communication_manager, sender_message_queue,
-                          status_center_queue, master_agent_instance, process_name))
+            logging.info("runner_cmd_encoded: {}".format(runner_cmd_encoded))
 
+            self.cloud_run_process_map[run_id_str] = Process(
+                target=cloud_server_mgr.start_local_cloud_server,
+                args=(args.account_id, args.version, cloud_device_id, runner_cmd_encoded))
             self.cloud_run_process_map[run_id_str].start()
             time.sleep(1)
 
-    def start_local_master_server(
-            self, user, api_key, os_name, version, cloud_device_id, run_id, payload,
-            communication_manager=None, sender_message_queue=None, status_center_queue=None,
-            master_agent_instance=None
-    ):
-        if master_agent_instance is None:
-            return
-        master_agent_instance.login(
-            user, api_key=api_key, device_id=cloud_device_id, os_name=os_name,
-            role=FedMLAccountManager.ROLE_CLOUD_SERVER,
-            communication_manager=None,
-            sender_message_queue=None,
-            status_center_queue=None)
-        self.master_agent_instance_map[str(run_id)] = master_agent_instance
-        master_agent_instance.start_master_server_instance(payload)
-
     def callback_run_logs(self, run_id, topic, payload):
         run_id_str = str(run_id)
         if self.job_runners.get(run_id_str, None) is not None:
@@ -159,12 +93,3 @@ def callback_run_metrics(self, run_id, topic, payload):
         run_id_str = str(run_id)
         if self.job_runners.get(run_id_str, None) is not None:
             self.job_runners[run_id_str].callback_run_metrics(topic, payload)
-
-    def callback_proxy_unknown_messages(self, run_id, topic, payload):
-        run_id_str = str(run_id)
-        master_agent = self.master_agent_instance_map.get(run_id_str, None)
-        if master_agent is None:
-            return
-        master_agent.process_job_complete_status(run_id, topic, payload)
-
-
diff --git a/python/fedml/computing/scheduler/master/base_master_protocol_manager.py b/python/fedml/computing/scheduler/master/base_master_protocol_manager.py
index 05529f8c8e..1c4cbba4f4 100755
--- a/python/fedml/computing/scheduler/master/base_master_protocol_manager.py
+++ b/python/fedml/computing/scheduler/master/base_master_protocol_manager.py
@@ -2,8 +2,6 @@
 import base64
 import json
 import logging
-import time
-
 import fedml
 from ..comm_utils.constants import SchedulerConstants
 from ....core.mlops.mlops_runtime_log import MLOpsRuntimeLog
@@ -143,7 +141,6 @@ def on_agent_communication_connected(self, mqtt_client_object):
     def callback_start_train(self, topic=None, payload=None):
         # Fetch config from MLOps
         # noinspection PyBroadException
-
         try:
             MLOpsConfigs.fetch_all_configs()
         except Exception:
@@ -200,7 +197,7 @@ def callback_start_train(self, topic=None, payload=None):
         self.run_edge_ids[run_id_str] = edge_id_list
 
         # report server running status to master agent
-        if not self.run_as_cloud_server and not self.run_as_cloud_agent:
+        if not self.run_as_cloud_server:
             self.mlops_metrics.report_server_id_status(
                 run_id, GeneralConstants.MSG_MLOPS_SERVER_STATUS_STARTING, edge_id=self.edge_id,
                 server_id=self.edge_id, server_agent_id=self.edge_id, running_json=payload)
@@ -215,9 +212,7 @@ def callback_start_train(self, topic=None, payload=None):
                 run_id, request_json, args=self.args, edge_id=self.edge_id,
                 sender_message_queue=self.message_center.get_sender_message_queue(),
                 listener_message_queue=self.get_listener_message_queue(),
-                status_center_queue=self.get_status_queue(),
-                communication_manager=self.get_listener_communication_manager(),
-                process_name=GeneralConstants.get_launch_master_job_process_name(run_id, self.edge_id)
+                status_center_queue=self.get_status_queue()
             )
 
             process = self._get_job_runner_manager().get_runner_process(run_id)
@@ -228,17 +223,12 @@ def callback_start_train(self, topic=None, payload=None):
         elif self.run_as_cloud_agent:
             self.init_job_task(request_json)
 
-            server_id = request_json.get("server_id", self.edge_id)
             self._get_job_runner_manager().start_job_runner(
                 run_id, request_json, args=self.args, edge_id=self.edge_id,
                 sender_message_queue=self.message_center.get_sender_message_queue(),
                 listener_message_queue=self.get_listener_message_queue(),
-                status_center_queue=self.get_status_queue(),
-                communication_manager=self.get_listener_communication_manager(),
-                master_agent_instance=self.generate_agent_instance(),
-                should_start_cloud_server=True,
-                use_local_process_as_cloud_server=self.use_local_process_as_cloud_server,
-                process_name=GeneralConstants.get_launch_master_job_process_name(run_id, server_id)
+                status_center_queue=self.get_status_queue(), should_start_cloud_server=True,
+                use_local_process_as_cloud_server=self.use_local_process_as_cloud_server
             )
 
             process = self._get_job_runner_manager().get_runner_process(run_id, is_cloud_server=True)
@@ -247,7 +237,6 @@ def callback_start_train(self, topic=None, payload=None):
         elif self.run_as_cloud_server:
             self.server_agent_id = request_json.get("cloud_agent_id", self.edge_id)
             self.start_request_json = json.dumps(request_json)
-            server_id = request_json.get("server_id", self.edge_id)
             run_id = request_json["runId"]
             run_id_str = str(run_id)
 
@@ -259,12 +248,10 @@ def callback_start_train(self, topic=None, payload=None):
                 run_id, request_json, args=self.args, edge_id=self.edge_id,
                 sender_message_queue=self.message_center.get_sender_message_queue(),
                 listener_message_queue=self.get_listener_message_queue(),
-                status_center_queue=self.get_status_queue(),
-                communication_manager=self.get_listener_communication_manager(),
-                process_name=GeneralConstants.get_launch_master_job_process_name(run_id, server_id)
+                status_center_queue=self.get_status_queue()
             )
 
-            self.send_status_msg_to_edges(edge_id_list, run_id, server_id)
+            self.send_status_msg_to_edges(edge_id_list, run_id, self.edge_id)
 
     def callback_stop_train(self, topic, payload, use_payload=None):
         # Print the payload
@@ -292,16 +279,6 @@ def callback_stop_train(self, topic, payload, use_payload=None):
             server_agent_id = self.edge_id
             topic_stop_train_to_cloud_server = f"mlops/flserver_agent_{server_id}/stop_train"
             self.message_center.send_message(topic_stop_train_to_cloud_server, payload)
-
-            time.sleep(2)
-            MLOpsRuntimeLogDaemon.get_instance(self.args).stop_log_processor(run_id, server_id)
-            self._get_job_runner_manager().stop_job_runner(
-                run_id, args=self.args, server_id=server_id, request_json=None,
-                run_as_cloud_agent=self.run_as_cloud_agent, run_as_cloud_server=self.run_as_cloud_server,
-                use_local_process_as_cloud_server=self.use_local_process_as_cloud_server)
-            self.generate_status_report(run_id, server_id, server_agent_id=server_agent_id). \
-                report_server_id_status(run_id, GeneralConstants.MSG_MLOPS_SERVER_STATUS_KILLED,
-                                        edge_id=server_id, server_id=server_id)
             return
 
         # Reset all edge status and server status
@@ -327,11 +304,7 @@ def callback_complete_job(self, topic, payload):
         self._process_job_complete_status(run_id, server_id, request_json)
 
     def _process_job_complete_status(self, run_id, server_id, complete_payload):
-        # Complete the job runner
-        self._get_job_runner_manager().complete_job_runner(
-            run_id, args=self.args, server_id=server_id, request_json=complete_payload,
-            run_as_cloud_agent=self.run_as_cloud_agent, run_as_cloud_server=self.run_as_cloud_server,
-            use_local_process_as_cloud_server=self.use_local_process_as_cloud_server)
+        pass
 
     def callback_run_logs(self, topic, payload):
         run_id = str(topic).split('/')[-1]
@@ -417,12 +390,6 @@ def callback_request_job_status(self, topic, payload):
     def callback_request_device_status_in_job(self, topic, payload):
         self.response_device_status_in_job(topic, payload)
 
-    def callback_proxy_unknown_messages(self, run_id, topic, payload):
-        self._get_job_runner_manager().callback_proxy_unknown_messages(run_id, topic, payload)
-
-    def process_extra_queues(self, extra_queues):
-        self.rebuild_status_center(extra_queues[0])
-
     def generate_protocol_manager(self):
         message_status_runner = self._generate_protocol_manager_instance(
             self.args, agent_config=self.agent_config
@@ -509,8 +476,6 @@ def init_job_task(self, request_json):
         self.setup_listener_for_run_logs(run_id)
 
     def setup_listeners_for_edge_status(self, run_id, edge_ids, server_id):
-        if self.run_as_cloud_agent:
-            return
         edge_status_topic = "fl_client/flclient_agent_" + str(server_id) + "/status"
         payload = {"run_id": run_id, "init_all_edge_id_list": edge_ids, "init_server_id": server_id}
         self.callback_edge_status(edge_status_topic, json.dumps(payload))
@@ -521,9 +486,6 @@ def setup_listeners_for_edge_status(self, run_id, edge_ids, server_id):
             self.subscribe_msg(edge_status_topic)
 
     def remove_listeners_for_edge_status(self, edge_ids=None):
-        if self.run_as_cloud_agent:
-            return
-
         if edge_ids is None:
             edge_ids = self.request_json["edgeids"]
 
@@ -580,7 +542,7 @@ def send_status_check_msg(self, run_id, edge_id, server_id, context=None):
     def send_status_msg_to_edges(self, edge_id_list, run_id, server_id, context=None):
         # Send status message to all edges
         for edge_id in edge_id_list:
-            self.send_status_check_msg(run_id, edge_id, server_id, context=context)
+            self.send_status_check_msg(run_id, edge_id, self.edge_id, context=context)
 
     def report_exception_status(self, run_id):
         self.mlops_metrics.report_job_status(run_id, GeneralConstants.MSG_MLOPS_SERVER_STATUS_EXCEPTION)
@@ -592,9 +554,3 @@ def get_start_train_topic_with_edge_id(edge_id):
     @abstractmethod
     def _generate_protocol_manager_instance(self, args, agent_config=None):
         return None
-
-    def start_master_server_instance(self, payload):
-        super().on_agent_communication_connected(None)
-
-        self.receive_message_json(self.topic_start_train, payload)
-
diff --git a/python/fedml/computing/scheduler/master/cloud_server_manager.py b/python/fedml/computing/scheduler/master/cloud_server_manager.py
index 3669cb32bc..040a0f38a3 100755
--- a/python/fedml/computing/scheduler/master/cloud_server_manager.py
+++ b/python/fedml/computing/scheduler/master/cloud_server_manager.py
@@ -2,14 +2,10 @@
 import json
 import logging
 import os
-import platform
 import traceback
 
-import setproctitle
-
 import fedml
 from fedml.computing.scheduler.comm_utils.sys_utils import get_python_program
-from fedml.computing.scheduler.scheduler_core.account_manager import FedMLAccountManager
 
 
 class FedMLCloudServerManager:
@@ -35,37 +31,14 @@ def __init__(self, args, run_id=None, edge_id=None, request_json=None, agent_con
         self.cloud_server_name = None
 
     @staticmethod
-    def start_local_cloud_server(user, api_key, os_name, version, cloud_device_id, runner_cmd_encoded):
-        if platform.system() != "Windows":
-            os.setsid()
-
+    def start_local_cloud_server(user, version, cloud_device_id, runner_cmd_encoded):
         print(f"start cloud server, device id {cloud_device_id}, runner cmd {runner_cmd_encoded}")
         pip_source_dir = os.path.dirname(__file__)
         login_cmd = os.path.join(pip_source_dir, "server_login.py")
         run_cmd = f"{get_python_program()} -W ignore {login_cmd} -t login -r cloud_server -u {str(user)} " \
-                  f"-k {api_key} -v {version} -id {cloud_device_id} -rc {runner_cmd_encoded}"
+                  f"-v {version} -id {cloud_device_id} -rc {runner_cmd_encoded}"
         os.system(run_cmd)
 
-    def start_local_master_server(
-            self, user, api_key, os_name, version, cloud_device_id, run_id, payload,
-            communication_manager=None, sender_message_queue=None, status_center_queue=None,
-            master_agent_instance=None, process_name=None
-    ):
-        if process_name is not None:
-            setproctitle.setproctitle(process_name)
-
-        logging.info(f"Local master server pid: {os.getpid()}")
-        if platform.system() != "Windows":
-            os.setsid()
-
-        master_agent_instance.login(
-            user, api_key=api_key, device_id=cloud_device_id, os_name=os_name,
-            role=FedMLAccountManager.ROLE_CLOUD_SERVER, runner_cmd=payload,
-            communication_manager=None, sender_message_queue=None,
-            status_center_queue=None)
-
-        master_agent_instance.stop()
-
     def start_cloud_server_process_entry(self):
         try:
             self.start_cloud_server_process()
diff --git a/python/fedml/computing/scheduler/master/master_protocol_manager.py b/python/fedml/computing/scheduler/master/master_protocol_manager.py
index 1adda439c6..ca9621e41d 100755
--- a/python/fedml/computing/scheduler/master/master_protocol_manager.py
+++ b/python/fedml/computing/scheduler/master/master_protocol_manager.py
@@ -7,9 +7,8 @@
 class FedMLLaunchMasterProtocolManager(FedMLBaseMasterProtocolManager, ABC):
     def __init__(self, args, agent_config=None):
         FedMLBaseMasterProtocolManager.__init__(self, args, agent_config=agent_config)
-        self.message_center_name = "launch_master_agent"
 
-        # Override
+    # Override
     def generate_topics(self):
         super().generate_topics()
 
@@ -36,6 +35,9 @@ def _init_extra_items(self):
     def print_connected_info(self):
         super().print_connected_info()
 
-    def generate_agent_instance(self):
-        from .master_agent import FedMLLaunchMasterAgent
-        return FedMLLaunchMasterAgent()
+    # Override
+    def _process_job_complete_status(self, run_id, server_id, complete_payload):
+        # Complete the job runner
+        self._get_job_runner_manager().complete_job_runner(
+            run_id, args=self.args, server_id=server_id, request_json=complete_payload,
+            run_as_cloud_agent=self.run_as_cloud_agent, run_as_cloud_server=self.run_as_cloud_server)
diff --git a/python/fedml/computing/scheduler/master/server_login.py b/python/fedml/computing/scheduler/master/server_login.py
index be7b73103f..8dd0696bc8 100755
--- a/python/fedml/computing/scheduler/master/server_login.py
+++ b/python/fedml/computing/scheduler/master/server_login.py
@@ -41,5 +41,4 @@ def logout():
         master_agent.login(args.api_key, api_key=args.api_key, device_id=args.device_id,
                            os_name=args.os_name, role=args.role, runner_cmd=args.runner_cmd)
     else:
-        master_agent.stop()
         master_agent.logout()
diff --git a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
index 32f5ebdeab..ab6bc4c895 100755
--- a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
+++ b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
@@ -1,12 +1,12 @@
 import copy
 import json
 import logging
-import multiprocessing
 import os
 import time
 import queue
 import traceback
 from abc import ABC
+from multiprocessing import Queue
 
 import fedml
 from fedml.core.mlops import MLOpsRuntimeLog, MLOpsConfigs
@@ -50,7 +50,7 @@ def __init__(self, args, run_id=0, request_json=None, agent_config=None, edge_id
         self.replica_controller = None
         self.deployed_replica_payload = None
         self.slave_deployment_results_map = dict()
-        self.deployment_result_queue = multiprocessing.Manager().Queue()
+        self.deployment_result_queue = Queue()
         self.is_fresh_endpoint = True
 
     # Override
diff --git a/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py b/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py
index efa56f4db5..9e0d51b588 100755
--- a/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py
+++ b/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py
@@ -264,8 +264,7 @@ def callback_start_deployment(self, topic, payload):
             run_id, request_json, args=self.args, edge_id=self.edge_id,
             sender_message_queue=self.message_center.get_sender_message_queue(),
             listener_message_queue=self.get_listener_message_queue(),
-            status_center_queue=self.get_status_queue(),
-            process_name=GeneralConstants.get_deploy_master_job_process_name(run_id, self.edge_id)
+            status_center_queue=self.get_status_queue()
         )
         process = self._get_job_runner_manager().get_runner_process(run_id)
         if process is not None:
diff --git a/python/fedml/computing/scheduler/model_scheduler/model_device_client.py b/python/fedml/computing/scheduler/model_scheduler/model_device_client.py
new file mode 100755
index 0000000000..05f43afc5f
--- /dev/null
+++ b/python/fedml/computing/scheduler/model_scheduler/model_device_client.py
@@ -0,0 +1,98 @@
+
+import copy
+import logging
+import multiprocessing
+import time
+import traceback
+from multiprocessing import Process
+from ..scheduler_core.account_manager import FedMLAccountManager
+from .worker_agent import FedMLDeployWorkerAgent
+
+
+class FedMLModelDeviceClientRunner:
+    def __init__(self, args, current_device_id, os_name, is_from_docker, service_config, infer_host="127.0.0.1"):
+        self.agent_process = None
+        self.agent_runner = None
+        self.agent_process_event = None
+        self.args = copy.deepcopy(args)
+        self.service_config = service_config
+        self.unique_device_id = None
+        self.current_device_id = current_device_id
+        self.os_name = os_name
+        self.is_from_docker = is_from_docker
+        self.edge_id = None
+        self.infer_host = infer_host
+        self.redis_addr = "local"
+        self.redis_port = "6379"
+        self.redis_password = "fedml_default"
+
+    def get_edge_id(self):
+        return self.edge_id
+
+    def start(self):
+        self.agent_runner = FedMLModelDeviceClientRunner(self.args, self.current_device_id, self.os_name,
+                                                         self.is_from_docker, self.service_config)
+        self.agent_runner.infer_host = self.infer_host
+        self.agent_runner.redis_addr = self.redis_addr
+        self.agent_runner.redis_port = self.redis_port
+        self.agent_runner.redis_password = self.redis_password
+        if self.agent_process_event is None:
+            self.agent_process_event = multiprocessing.Event()
+        self.agent_process = Process(target=self.agent_runner.run_entry, args=(self.agent_process_event, self.args,))
+        self.edge_id = self.bind_device()
+        self.agent_process.start()
+
+    def run_entry(self, process_event, in_args):
+        # print(f"Model worker process id {os.getpid()}")
+
+        self.agent_process_event = process_event
+
+        worker_agent = FedMLDeployWorkerAgent()
+
+        while not self.agent_process_event.is_set():
+            try:
+                try:
+                    worker_agent.logout()
+                except Exception as e:
+                    pass
+
+                worker_agent.login(
+                    in_args.account_id, api_key=in_args.api_key, device_id=in_args.device_id,
+                    os_name=in_args.os_name, role=FedMLAccountManager.ROLE_DEPLOY_WORKER_ON_PREM
+                )
+            except Exception as e:
+                logging.info("Restart model device client: {}".format(traceback.format_exc()))
+                pass
+            finally:
+                try:
+                    worker_agent.logout()
+                except Exception as e:
+                    pass
+                time.sleep(15)
+
+        try:
+            self.stop()
+        except Exception as e:
+            pass
+
+    def check_runner_stop_event(self):
+        if self.agent_process_event is not None and self.agent_process_event.is_set():
+            logging.info("Received stopping event.")
+            raise Exception("Runner stopped")
+
+    def stop(self):
+        FedMLDeployWorkerAgent.logout()
+
+        if self.agent_process_event is not None:
+            self.agent_process_event.set()
+
+    def bind_device(self):
+        # Login account
+        login_result = FedMLAccountManager.get_instance().login(
+            self.args.account_id, api_key=self.args.api_key, device_id=self.args.device_id,
+            os_name=self.args.os_name, role=FedMLAccountManager.ROLE_DEPLOY_WORKER_ON_PREM
+        )
+        if login_result is not None:
+            return login_result.edge_id
+        else:
+            return None
diff --git a/python/fedml/computing/scheduler/model_scheduler/model_device_server.py b/python/fedml/computing/scheduler/model_scheduler/model_device_server.py
new file mode 100755
index 0000000000..b2ecd144b1
--- /dev/null
+++ b/python/fedml/computing/scheduler/model_scheduler/model_device_server.py
@@ -0,0 +1,97 @@
+
+import copy
+import logging
+import multiprocessing
+import time
+import traceback
+from multiprocessing import Process
+from ..scheduler_core.account_manager import FedMLAccountManager
+from .master_agent import FedMLDeployMasterAgent
+
+
+class FedMLModelDeviceServerRunner:
+    def __init__(self, args, current_device_id, os_name, is_from_docker, service_config, infer_host="127.0.0.1"):
+        self.agent_process = None
+        self.agent_runner = None
+        self.agent_process_event = None
+        self.args = copy.deepcopy(args)
+        self.service_config = service_config
+        self.unique_device_id = None
+        self.current_device_id = current_device_id
+        self.os_name = os_name
+        self.is_from_docker = is_from_docker
+        self.edge_id = None
+        self.infer_host = infer_host
+        self.redis_addr = "local"
+        self.redis_port = "6379"
+        self.redis_password = "fedml_default"
+
+    def get_edge_id(self):
+        return self.edge_id
+
+    def start(self):
+        self.agent_runner = FedMLModelDeviceServerRunner(self.args, self.current_device_id, self.os_name,
+                                                         self.is_from_docker, self.service_config)
+        self.agent_runner.infer_host = self.infer_host
+        self.agent_runner.redis_addr = self.redis_addr
+        self.agent_runner.redis_port = self.redis_port
+        self.agent_runner.redis_password = self.redis_password
+        if self.agent_process_event is None:
+            self.agent_process_event = multiprocessing.Event()
+        self.agent_process = Process(target=self.agent_runner.run_entry, args=(self.agent_process_event, self.args))
+        self.edge_id = self.bind_device()
+        self.agent_process.start()
+
+    def run_entry(self, process_event, in_args):
+        # print(f"Model master process id {os.getpid()}")
+
+        self.agent_process_event = process_event
+        master_agent = FedMLDeployMasterAgent()
+
+        while not self.agent_process_event.is_set():
+            try:
+                try:
+                    master_agent.logout()
+                except Exception as e:
+                    pass
+
+                master_agent.login(
+                    in_args.account_id, api_key=in_args.api_key, device_id=in_args.device_id,
+                    os_name=in_args.os_name, role=FedMLAccountManager.ROLE_DEPLOY_MASTER_ON_PREM
+                )
+            except Exception as e:
+                logging.info("Restart model device server: {}".format(traceback.format_exc()))
+                pass
+            finally:
+                try:
+                    master_agent.logout()
+                except Exception as e:
+                    pass
+                time.sleep(15)
+
+        try:
+            self.stop()
+        except Exception as e:
+            pass
+
+    def check_runner_stop_event(self):
+        if self.agent_process_event is not None and self.agent_process_event.is_set():
+            logging.info("Received stopping event.")
+            raise Exception("Runner stopped")
+
+    def stop(self):
+        FedMLDeployMasterAgent.logout()
+
+        if self.agent_process_event is not None:
+            self.agent_process_event.set()
+
+    def bind_device(self):
+        # Login account
+        login_result = FedMLAccountManager.get_instance().login(
+            self.args.account_id, api_key=self.args.api_key, device_id=self.args.device_id,
+            os_name=self.args.os_name, role=FedMLAccountManager.ROLE_DEPLOY_MASTER_ON_PREM
+        )
+        if login_result is not None:
+            return login_result.edge_id
+        else:
+            return None
diff --git a/python/fedml/computing/scheduler/model_scheduler/worker_protocol_manager.py b/python/fedml/computing/scheduler/model_scheduler/worker_protocol_manager.py
index 9204291c48..b1d0bebc47 100755
--- a/python/fedml/computing/scheduler/model_scheduler/worker_protocol_manager.py
+++ b/python/fedml/computing/scheduler/model_scheduler/worker_protocol_manager.py
@@ -12,7 +12,6 @@
 from .device_model_msg_object import FedMLModelMsgObject
 from .device_client_constants import ClientConstants
 from .device_client_data_interface import FedMLClientDataInterface
-from ..scheduler_core.general_constants import GeneralConstants
 from ..slave.base_slave_protocol_manager import FedMLBaseSlaveProtocolManager
 from .worker_job_runner_manager import FedMLDeployJobRunnerManager
 from .device_mqtt_inference_protocol import FedMLMqttInference
@@ -164,8 +163,7 @@ def callback_start_deployment(self, topic, payload):
             run_id, request_json, args=self.args, edge_id=self.edge_id,
             sender_message_queue=self.message_center.get_sender_message_queue(),
             listener_message_queue=self.get_listener_message_queue(),
-            status_center_queue=self.get_status_queue(),
-            process_name=GeneralConstants.get_deploy_slave_job_process_name(run_id, self.edge_id)
+            status_center_queue=self.get_status_queue()
         )
         process = self._get_job_runner_manager().get_runner_process(run_id)
         if process is not None:
diff --git a/python/fedml/computing/scheduler/scheduler_core/account_manager.py b/python/fedml/computing/scheduler/scheduler_core/account_manager.py
index 8d73a42679..3b80511d12 100755
--- a/python/fedml/computing/scheduler/scheduler_core/account_manager.py
+++ b/python/fedml/computing/scheduler/scheduler_core/account_manager.py
@@ -25,7 +25,6 @@ class FedMLAccountManager(Singleton):
     ROLE_CLOUD_SERVER = "cloud_server"
     ROLE_EDGE_DEVICE = "client"
     ROLE_GPU_PROVIDER = "gpu_supplier"
-    ROLE_GPU_MASTER_SERVER = "gpu_master_server"
     ROLE_DEPLOY_MASTER_ON_PREM = "md.on_premise_device.master"
     ROLE_DEPLOY_WORKER_ON_PREM = "md.on_premise_device"
 
@@ -34,7 +33,6 @@ class FedMLAccountManager(Singleton):
     DEVICE_ID_SUFFIX_CLOUD_SERVER = ".Public.Server"
     DEVICE_ID_SUFFIX_EDGE_DEVICE = ".Edge.Device"
     DEVICE_ID_SUFFIX_GPU_PROVIDER = ".Edge.GPU.Supplier"
-    DEVICE_ID_SUFFIX_GPU_MASTER_SERVER = ".Edge.GPU.MasterServer"
     DEVICE_ID_SUFFIX_DEPLOY = "MDA"
     DEVICE_ID_SUFFIX_DEPLOY_MASTER_ON_PREM = ".OnPremise.Master.Device"
     DEVICE_ID_SUFFIX_DEPLOY_WORKER_ON_PREM = ".OnPremise.Device"
@@ -43,7 +41,8 @@ class FedMLAccountManager(Singleton):
     DEVICE_ID_DOCKER_HUB_TAG = ".DockerHub"
 
     def __init__(self):
-        pass
+        if not hasattr(self, "agent_args"):
+            self.agent_args = None
 
     @staticmethod
     def get_instance():
@@ -51,7 +50,7 @@ def get_instance():
 
     def login(self, user_id, api_key="", device_id=None, os_name=None, role=None, runner_cmd=None):
         # Build the agent args
-        agent_args = self.build_agent_args(
+        self.build_agent_args(
             user_id, api_key=api_key, device_id=device_id, os_name=os_name, role=role, runner_cmd=runner_cmd
         )
 
@@ -94,8 +93,8 @@ def login(self, user_id, api_key="", device_id=None, os_name=None, role=None, ru
             # noinspection PyBroadException
             try:
                 edge_id, user_name, extra_url, general_edge_id = FedMLAccountManager.bind_account_and_device_id(
-                    service_config["ml_ops_config"]["EDGE_BINDING_URL"], agent_args.account_id,
-                    agent_args.unique_device_id, agent_args.os_name,
+                    service_config["ml_ops_config"]["EDGE_BINDING_URL"], self.agent_args.account_id,
+                    self.agent_args.unique_device_id, self.agent_args.os_name,
                     api_key=api_key, role=role
                 )
                 if edge_id > 0:
@@ -119,13 +118,13 @@ def login(self, user_id, api_key="", device_id=None, os_name=None, role=None, ru
             return None
 
         # Fill the bound result to agent args.
-        agent_args = self.fill_argent_args(
-            agent_args, log_server_url=log_server_url, server_id=edge_id,
+        self.fill_argent_args(
+            log_server_url=log_server_url, server_id=edge_id,
             edge_id=edge_id, general_edge_id=general_edge_id,
             user_name=user_name, extra_url=extra_url,
             agent_config=service_config)
 
-        return agent_args
+        return self.agent_args
 
     def build_agent_args(self, user_id, api_key=None, device_id=None, os_name=None, role=None, runner_cmd=None):
         # Generate the suffix for device based on the role
@@ -145,9 +144,6 @@ def build_agent_args(self, user_id, api_key=None, device_id=None, os_name=None,
             device_id_suffix = FedMLAccountManager.DEVICE_ID_SUFFIX_EDGE_DEVICE
         elif role == FedMLAccountManager.ROLE_GPU_PROVIDER:
             device_id_suffix = FedMLAccountManager.DEVICE_ID_SUFFIX_GPU_PROVIDER
-        elif role == FedMLAccountManager.ROLE_GPU_MASTER_SERVER:
-            device_id_suffix = FedMLAccountManager.DEVICE_ID_SUFFIX_GPU_MASTER_SERVER
-            is_master = True
         elif role == FedMLAccountManager.ROLE_DEPLOY_MASTER_ON_PREM:
             device_id_suffix = FedMLAccountManager.DEVICE_ID_SUFFIX_DEPLOY_MASTER_ON_PREM
             is_master = True
@@ -158,31 +154,32 @@ def build_agent_args(self, user_id, api_key=None, device_id=None, os_name=None,
 
         # Build the agent args
         version = fedml.get_env_version()
-        agent_args = AgentArgs()
-        agent_args.role = role
-        agent_args.account_id = user_id
-        agent_args.api_key = api_key
-        agent_args.current_running_dir = GeneralConstants.get_deploy_fedml_home_dir(is_master=is_master) \
+        if self.agent_args is None:
+            self.agent_args = AgentArgs()
+        self.agent_args.role = role
+        self.agent_args.account_id = user_id
+        self.agent_args.api_key = api_key
+        self.agent_args.current_running_dir = GeneralConstants.get_deploy_fedml_home_dir(is_master=is_master) \
             if is_deploy else GeneralConstants.get_launch_fedml_home_dir(is_master=is_master)
         sys_name = platform.system()
         if sys_name == "Darwin":
             sys_name = "MacOS"
-        agent_args.os_name = sys_name if os_name is None or os_name == "" else os_name
-        agent_args.version = version
-        agent_args.log_file_dir = GeneralConstants.get_deploy_log_file_dir(is_master=is_master) \
+        self.agent_args.os_name = sys_name if os_name is None or os_name == "" else os_name
+        self.agent_args.version = version
+        self.agent_args.log_file_dir = GeneralConstants.get_deploy_log_file_dir(is_master=is_master) \
             if is_deploy else GeneralConstants.get_launch_log_file_dir(is_master=is_master)
         is_from_docker = False
         if device_id is not None and device_id != "0":
-            agent_args.current_device_id = device_id
+            self.agent_args.current_device_id = device_id
         else:
             data_dir = GeneralConstants.get_deploy_data_dir(is_master=is_master) \
                 if is_deploy else GeneralConstants.get_launch_data_dir(is_master=is_master)
             is_gpu_provider = True if role == FedMLAccountManager.ROLE_GPU_PROVIDER else False
-            agent_args.current_device_id = FedMLAccountManager.get_device_id(
+            self.agent_args.current_device_id = FedMLAccountManager.get_device_id(
                 data_dir=data_dir, use_machine_id=is_gpu_provider)
-        agent_args.device_id = agent_args.current_device_id
-        agent_args.config_version = version
-        agent_args.cloud_region = ""
+        self.agent_args.device_id = self.agent_args.current_device_id
+        self.agent_args.config_version = version
+        self.agent_args.cloud_region = ""
 
         # Check if it is running in the fedml docker hub
         is_from_fedml_docker_hub = False
@@ -194,29 +191,26 @@ def build_agent_args(self, user_id, api_key=None, device_id=None, os_name=None,
         # Build unique device id
         docker_tag = FedMLAccountManager.DEVICE_ID_DOCKER_TAG if is_from_docker else ""
         docker_tag = FedMLAccountManager.DEVICE_ID_DOCKER_HUB_TAG if is_from_fedml_docker_hub else docker_tag
-        unique_device_id = f"{agent_args.current_device_id}@{agent_args.os_name}" \
+        unique_device_id = f"{self.agent_args.current_device_id}@{self.agent_args.os_name}" \
                            f"{docker_tag}{device_id_suffix}"
         if role == FedMLAccountManager.ROLE_CLOUD_SERVER:
-            unique_device_id = agent_args.current_device_id
+            unique_device_id = self.agent_args.current_device_id
 
         # Set the unique device id
-        agent_args.is_from_docker = is_from_docker or is_from_fedml_docker_hub
-        agent_args.unique_device_id = unique_device_id
-        agent_args.runner_cmd = runner_cmd
-
-        return agent_args
+        self.agent_args.is_from_docker = is_from_docker or is_from_fedml_docker_hub
+        self.agent_args.unique_device_id = unique_device_id
+        self.agent_args.runner_cmd = runner_cmd
 
     def fill_argent_args(
-            self, agent_args, log_server_url=None, server_id=None, edge_id=None,
+            self, log_server_url=None, server_id=None, edge_id=None,
             user_name=None, extra_url=None, general_edge_id=None, agent_config=None):
-        agent_args.log_server_url = log_server_url
-        agent_args.server_id = server_id
-        agent_args.edge_id = edge_id
-        agent_args.user_name = user_name
-        agent_args.extra_url = extra_url
-        agent_args.general_edge_id = general_edge_id
-        agent_args.agent_config = agent_config
-        return agent_args
+        self.agent_args.log_server_url = log_server_url
+        self.agent_args.server_id = server_id
+        self.agent_args.edge_id = edge_id
+        self.agent_args.user_name = user_name
+        self.agent_args.extra_url = extra_url
+        self.agent_args.general_edge_id = general_edge_id
+        self.agent_args.agent_config = agent_config
 
     @staticmethod
     def write_login_failed_file(is_client=True):
diff --git a/python/fedml/computing/scheduler/scheduler_core/general_constants.py b/python/fedml/computing/scheduler/scheduler_core/general_constants.py
index 0ab6f79577..8c60b17bdf 100755
--- a/python/fedml/computing/scheduler/scheduler_core/general_constants.py
+++ b/python/fedml/computing/scheduler/scheduler_core/general_constants.py
@@ -65,20 +65,6 @@ class GeneralConstants:
     FEDML_OTA_CMD_RESTART = "restart"
 
     FEDML_LOG_SOURCE_TYPE_MODEL_END_POINT = "MODEL_END_POINT"
-    FEDML_PROCESS_NAME_PREFIX = "fedml-process-"
-    FEDML_LAUNCH_MASTER_JOB_RUNNER_TAG = "launch-master-job-runner"
-    FEDML_LAUNCH_SLAVE_JOB_RUNNER_TAG = "launch-slave-job-runner"
-    FEDML_LAUNCH_MASTER_USER_JOB_TAG = "launch-master-user-job"
-    FEDML_DEPLOY_MASTER_JOB_RUNNER_TAG = "deploy-master-job-runner"
-    FEDML_DEPLOY_SLAVE_JOB_RUNNER_TAG = "deploy-slave-job-runner"
-    FEDML_DEPLOY_MASTER_USER_JOB_TAG = "deploy-master-user-job"
-    FEDML_MESSAGE_CENTER_LISTENER_TAG = "message-center-listener"
-    FEDML_MESSAGE_CENTER_SENDER_TAG = "message-center-sender"
-    FEDML_STATUS_CENTER_TAG = "status-center"
-    FEDML_LOG_PROCESS_TAG = "log"
-    FEDML_MONITOR_PROCESS_TAG = "monitor"
-
-    FEDML_TOPIC_STATUS_CENTER_STOP = "anywhere/status_center/stop"
 
     @staticmethod
     def get_package_unzip_dir(package_download_dir):
@@ -230,65 +216,3 @@ def get_topic_complete_job(server_id):
     def get_payload_complete_job(run_id, server_id):
         payload_complete_job = {"runId": run_id, "serverId": server_id}
         return payload_complete_job
-
-    @staticmethod
-    def get_process_name(process_tag, run_id=None, edge_id=None):
-        return f'{GeneralConstants.FEDML_PROCESS_NAME_PREFIX}{process_tag}'\
-               f'{"-run-" + str(run_id) if run_id is not None and int(run_id) != 0 else ""}'\
-               f'{"-edge-" + str(edge_id) if edge_id is not None else ""}'
-
-    @staticmethod
-    def get_process_name_with_prefix(process_prefix, run_id=None, edge_id=None):
-        return f"{process_prefix}-run-{run_id}-edge-{edge_id}"
-
-    @staticmethod
-    def get_launch_master_job_process_name(run_id, edge_id):
-        return GeneralConstants.get_process_name(
-            GeneralConstants.FEDML_LAUNCH_MASTER_JOB_RUNNER_TAG, run_id, edge_id)
-
-    @staticmethod
-    def get_launch_slave_job_process_name(run_id, edge_id):
-        return GeneralConstants.get_process_name(
-            GeneralConstants.FEDML_LAUNCH_SLAVE_JOB_RUNNER_TAG, run_id, edge_id)
-
-    @staticmethod
-    def get_launch_master_user_process_name(run_id, edge_id):
-        return GeneralConstants.get_process_name(
-            GeneralConstants.FEDML_LAUNCH_MASTER_USER_JOB_TAG, run_id, edge_id)
-
-    @staticmethod
-    def get_deploy_master_job_process_name(run_id, edge_id):
-        return GeneralConstants.get_process_name(
-            GeneralConstants.FEDML_DEPLOY_MASTER_JOB_RUNNER_TAG, run_id, edge_id)
-
-    @staticmethod
-    def get_deploy_slave_job_process_name(run_id, edge_id):
-        return GeneralConstants.get_process_name(
-            GeneralConstants.FEDML_DEPLOY_SLAVE_JOB_RUNNER_TAG, run_id, edge_id)
-
-    @staticmethod
-    def get_deploy_master_user_process_name(run_id, edge_id):
-        return GeneralConstants.get_process_name(
-            GeneralConstants.FEDML_DEPLOY_MASTER_USER_JOB_TAG, run_id, edge_id)
-
-    @staticmethod
-    def get_log_process_name(run_id, edge_id):
-        return GeneralConstants.get_process_name(
-            GeneralConstants.FEDML_LOG_PROCESS_TAG, run_id, edge_id)
-
-    @staticmethod
-    def get_message_center_listener_process_name(message_center_name):
-        return f"{GeneralConstants.FEDML_PROCESS_NAME_PREFIX}{GeneralConstants.FEDML_MESSAGE_CENTER_LISTENER_TAG}-{message_center_name}"
-
-    @staticmethod
-    def get_message_center_sender_process_name(message_center_name):
-        return f"{GeneralConstants.FEDML_PROCESS_NAME_PREFIX}{GeneralConstants.FEDML_MESSAGE_CENTER_SENDER_TAG}-{message_center_name}"
-
-    @staticmethod
-    def get_status_center_process_name(status_center_tag):
-        return f"{GeneralConstants.FEDML_PROCESS_NAME_PREFIX}{GeneralConstants.FEDML_STATUS_CENTER_TAG}-{status_center_tag}"
-
-    @staticmethod
-    def get_monitor_process_name(monitor_tag, run_id, edge_id):
-        return GeneralConstants.get_process_name(
-            f"{GeneralConstants.FEDML_MONITOR_PROCESS_TAG}-{monitor_tag}", run_id, edge_id)
diff --git a/python/fedml/computing/scheduler/scheduler_core/message_center.py b/python/fedml/computing/scheduler/scheduler_core/message_center.py
index 5f414d1873..dbe11700a0 100755
--- a/python/fedml/computing/scheduler/scheduler_core/message_center.py
+++ b/python/fedml/computing/scheduler/scheduler_core/message_center.py
@@ -1,20 +1,16 @@
 import json
 import logging
 import os
-import platform
 import threading
 import time
 import traceback
 import uuid
 import multiprocessing
+from multiprocessing import Process, Queue
 import queue
 from os.path import expanduser
 
-import setproctitle
-
-import fedml
 from fedml.core.distributed.communication.mqtt.mqtt_manager import MqttManager
-from .general_constants import GeneralConstants
 from ..slave.client_constants import ClientConstants
 from ....core.mlops.mlops_metrics import MLOpsMetrics
 from operator import methodcaller
@@ -24,7 +20,6 @@
 class FedMLMessageCenter(object):
     FUNC_SETUP_MESSAGE_CENTER = "setup_message_center"
     FUNC_REBUILD_MESSAGE_CENTER = "rebuild_message_center"
-    FUNC_PROCESS_EXTRA_QUEUES = "process_extra_queues"
     ENABLE_SAVE_MESSAGE_TO_FILE = True
     PUBLISH_MESSAGE_RETRY_TIMEOUT = 60 * 1000.0
     PUBLISH_MESSAGE_RETRY_COUNT = 3
@@ -32,12 +27,11 @@ class FedMLMessageCenter(object):
     MESSAGE_SENT_SUCCESS_RECORDS_FILE = "message-sent-success-records.log"
     MESSAGE_RECEIVED_RECORDS_FILE = "message-received-records.log"
 
-    def __init__(self, agent_config=None, sender_message_queue=None,
-                 listener_message_queue=None, sender_message_event=None):
+    def __init__(self, agent_config=None, sender_message_queue=None, listener_message_queue=None):
         self.sender_agent_config = agent_config
         self.listener_agent_config = agent_config
         self.sender_message_queue = sender_message_queue
-        self.message_event = sender_message_event
+        self.message_event = None
         self.message_center_process = None
         self.sender_mqtt_mgr = None
         self.sender_mlops_metrics = None
@@ -136,33 +130,21 @@ def release_sender_mqtt_mgr(self):
     def get_sender_message_queue(self):
         return self.sender_message_queue
 
-    def get_sender_message_event(self):
-        return self.message_event
-
     def start_sender(self, message_center_name=None):
-        self.sender_message_queue = multiprocessing.Manager().Queue()
+        self.sender_message_queue = Queue()
         self.message_event = multiprocessing.Event()
         self.message_event.clear()
-        process_name = GeneralConstants.get_message_center_sender_process_name(message_center_name)
         message_center = FedMLMessageCenter(agent_config=self.sender_agent_config,
                                             sender_message_queue=self.sender_message_queue)
-        if platform.system() == "Windows":
-            self.message_center_process = multiprocessing.Process(
-                target=message_center.run_sender, args=(
-                    self.message_event, self.sender_message_queue,
-                    message_center_name, process_name
-                )
-            )
-        else:
-            self.message_center_process = fedml.get_process(
-                target=message_center.run_sender, args=(
-                    self.message_event, self.sender_message_queue,
-                    message_center_name, process_name
-                )
+        self.message_center_process = Process(
+            target=message_center.run_sender, args=(
+                self.message_event, self.sender_message_queue,
+                message_center_name
             )
+        )
         self.message_center_process.start()
 
-    def stop_message_center(self):
+    def stop(self):
         if self.message_event is not None:
             self.message_event.set()
 
@@ -174,10 +156,6 @@ def check_message_stop_event(self):
             logging.info("Received message center stopping event.")
             raise MessageCenterStoppedException("Message center stopped (for sender)")
 
-        if self.listener_message_event is not None and self.listener_message_event.is_set():
-            logging.info("Received message center stopping event.")
-            raise MessageCenterStoppedException("Message center stopped (for listener)")
-
     def send_message(self, topic, payload, run_id=None):
         message_entity = FedMLMessageEntity(topic=topic, payload=payload, run_id=run_id)
         self.sender_message_queue.put(message_entity.get_message_body())
@@ -215,13 +193,7 @@ def retry_sending_undelivered_message(self):
                 # Save the message
                 self.save_message_record(message_entity.run_id, message_entity.device_id, sent_message_record)
 
-    def run_sender(self, message_event, message_queue, message_center_name, process_name=None):
-        if process_name is not None:
-            setproctitle.setproctitle(process_name)
-
-        if platform.system() != "Windows":
-            os.setsid()
-
+    def run_sender(self, message_event, message_queue, message_center_name):
         self.message_event = message_event
         self.sender_message_queue = message_queue
         self.message_center_name = message_center_name
@@ -276,16 +248,10 @@ def run_sender(self, message_event, message_queue, message_center_name, process_
 
         self.release_sender_mqtt_mgr()
 
-    def get_protocol_communication_manager(self):
-        return None
-
     def setup_listener_mqtt_mgr(self):
         if self.listener_mqtt_mgr is not None:
             return
 
-        # self.listener_mqtt_mgr = self.get_protocol_communication_manager()
-        # return
-
         self.listener_mqtt_mgr = MqttManager(
             self.listener_agent_config["mqtt_config"]["BROKER_HOST"],
             self.listener_agent_config["mqtt_config"]["BROKER_PORT"],
@@ -298,11 +264,7 @@ def setup_listener_mqtt_mgr(self):
         self.listener_mqtt_mgr.connect()
         self.listener_mqtt_mgr.loop_start()
 
-    def get_listener_communication_manager(self):
-        return self.listener_mqtt_mgr
-
     def release_listener_mqtt_mgr(self):
-        #return
         try:
             if self.listener_mqtt_mgr is not None:
                 self.listener_mqtt_mgr.loop_stop()
@@ -325,9 +287,6 @@ def remove_message_listener(self, topic):
             self.listener_topics.remove(topic)
             self.listener_handler_funcs.pop(topic)
 
-    def get_listener_handler(self, topic):
-        return self.listener_handler_funcs.get(topic)
-
     def get_message_runner(self):
         return None
 
@@ -335,42 +294,29 @@ def get_listener_message_queue(self):
         return self.listener_message_queue
 
     def setup_listener_message_queue(self):
-        self.listener_message_queue = multiprocessing.Manager().Queue()
+        self.listener_message_queue = Queue()
 
-    def start_listener(
-            self, sender_message_queue=None, listener_message_queue=None,
-            sender_message_event=None, agent_config=None, message_center_name=None, extra_queues=None
-    ):
+    def start_listener(self, sender_message_queue=None, listener_message_queue=None, agent_config=None, message_center_name=None):
         if self.listener_message_center_process is not None:
             return
 
         if listener_message_queue is None:
             if self.listener_message_queue is None:
-                self.listener_message_queue = multiprocessing.Manager().Queue()
+                self.listener_message_queue = Queue()
         else:
             self.listener_message_queue = listener_message_queue
         self.listener_message_event = multiprocessing.Event()
         self.listener_message_event.clear()
         self.listener_agent_config = agent_config
-        message_runner = self
+        message_runner = self.get_message_runner()
         message_runner.listener_agent_config = agent_config
-        process_name = GeneralConstants.get_message_center_listener_process_name(message_center_name)
-        if platform.system() == "Windows":
-            self.listener_message_center_process = multiprocessing.Process(
-                target=message_runner.run_listener_dispatcher, args=(
-                    self.listener_message_event, self.listener_message_queue,
-                    self.listener_handler_funcs, sender_message_queue,
-                    sender_message_event, message_center_name, extra_queues, process_name
-                )
-            )
-        else:
-            self.listener_message_center_process = fedml.get_process(
-                target=message_runner.run_listener_dispatcher, args=(
-                    self.listener_message_event, self.listener_message_queue,
-                    self.listener_handler_funcs, sender_message_queue,
-                    sender_message_event, message_center_name, extra_queues, process_name
-                )
+        self.listener_message_center_process = Process(
+            target=message_runner.run_listener_dispatcher, args=(
+                self.listener_message_event, self.listener_message_queue,
+                self.listener_handler_funcs, sender_message_queue,
+                message_center_name
             )
+        )
         self.listener_message_center_process.start()
 
     def check_listener_message_stop_event(self):
@@ -403,22 +349,13 @@ def unsubscribe_msg(self, topic):
         self.listener_mqtt_mgr.unsubscribe_msg(topic)
 
     def run_listener_dispatcher(
-            self, listener_message_event, listener_message_queue,
-            listener_funcs, sender_message_queue, sender_message_event,
-            message_center_name, extra_queues, process_name=None
+            self, message_event, message_queue, listener_funcs, sender_message_queue,
+            message_center_name
     ):
-        if process_name is not None:
-            setproctitle.setproctitle(process_name)
-
-        if platform.system() != "Windows":
-            os.setsid()
-
-        self.listener_message_event = listener_message_event
-        self.listener_message_queue = listener_message_queue
+        self.listener_message_event = message_event
+        self.listener_message_queue = message_queue
         self.listener_handler_funcs = listener_funcs
         self.message_center_name = message_center_name
-        self.sender_message_queue = sender_message_queue
-        self.message_event = sender_message_event
 
         self.setup_listener_mqtt_mgr()
 
@@ -427,9 +364,6 @@ def run_listener_dispatcher(
         else:
             methodcaller(FedMLMessageCenter.FUNC_REBUILD_MESSAGE_CENTER, sender_message_queue)(self)
 
-        if extra_queues is not None:
-            methodcaller(FedMLMessageCenter.FUNC_PROCESS_EXTRA_QUEUES, extra_queues)(self)
-
         while True:
             message_entity = None
             try:
@@ -444,7 +378,7 @@ def run_listener_dispatcher(
 
                 # Get the message from the queue
                 try:
-                    message_body = listener_message_queue.get(block=False, timeout=0.1)
+                    message_body = message_queue.get(block=False, timeout=0.1)
                 except queue.Empty as e:  # If queue is empty, then break loop
                     message_body = None
                 if message_body is None:
@@ -468,11 +402,6 @@ def run_listener_dispatcher(
                 message_handler_func_name = self.listener_handler_funcs.get(message_entity.topic, None)
                 if message_handler_func_name is not None:
                     methodcaller(message_handler_func_name, message_entity.topic, message_entity.payload)(self)
-                else:
-                    if hasattr(self, "callback_proxy_unknown_messages") and \
-                            self.callback_proxy_unknown_messages is not None:
-                        self.callback_proxy_unknown_messages(
-                            message_entity.run_id, message_entity.topic, message_entity.payload)
             except Exception as e:
                 if message_entity is not None:
                     logging.info(
diff --git a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py
index 7175032375..6e0010f556 100755
--- a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py
+++ b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py
@@ -9,8 +9,6 @@
 import traceback
 import zipfile
 import queue
-
-import fedml
 from ..comm_utils.constants import SchedulerConstants
 from ..comm_utils.job_utils import JobRunnerUtils, DockerArgs
 from ..scheduler_entry.constants import Constants
@@ -75,7 +73,6 @@ def __init__(self, args, edge_id=0, request_json=None, agent_config=None, run_id
         self.user_name = None
         self.general_edge_id = None
         self.message_center = None
-        self.status_center = None
         self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES = {
             "${FEDSYS.RUN_ID}": "",
             "${FEDSYS.PRIVATE_LOCAL_DATA}": "",
@@ -211,15 +208,9 @@ def retrieve_and_unzip_package(self, package_name, package_url):
         # Open a process to download the package so that we can avoid the request is blocked and check the timeout.
         from multiprocessing import Process
         completed_event = multiprocessing.Event()
-        info_queue = multiprocessing.Manager().Queue()
-        if platform.system() == "Windows":
-            download_process = multiprocessing.Process(
-                target=self.download_package_proc,
-                args=(package_url, local_package_file, completed_event, info_queue))
-        else:
-            download_process = fedml.get_process(
-                target=self.download_package_proc,
-                args=(package_url, local_package_file, completed_event, info_queue))
+        info_queue = multiprocessing.Queue()
+        download_process = Process(target=self.download_package_proc,
+                                   args=(package_url, local_package_file, completed_event, info_queue))
         download_process.start()
         allowed_block_download_time = 60
         download_finished = False
@@ -615,8 +606,7 @@ def job_error_processor(self, error_list):
 
     def start_runner_process(
             self, run_id, edge_id, request_json,  cuda_visible_gpu_ids_str=None,
-            sender_message_queue=None, listener_message_queue=None,
-            status_center_queue=None, process_name=None
+            sender_message_queue=None, status_center_queue=None
     ):
         return None
 
@@ -650,8 +640,8 @@ def rebuild_message_status_center(self, sender_message_queue, listener_message_q
         self.mlops_metrics.set_messenger(self.message_center)
         self.mlops_metrics.run_id = self.run_id
 
-        self.status_center = FedMLStatusCenter.rebuild_status_center_from_queue(status_queue)
+        status_center = FedMLStatusCenter.rebuild_status_center_from_queue(status_queue)
         if self.status_reporter is None:
             self.status_reporter = MLOpsMetrics()
-        self.status_reporter.set_messenger(self.status_center)
+        self.status_reporter.set_messenger(status_center)
         self.status_reporter.run_id = self.run_id
diff --git a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner_manager.py b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner_manager.py
index ad32f78631..dcc4045699 100755
--- a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner_manager.py
+++ b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner_manager.py
@@ -20,7 +20,7 @@ def start_job_runner(
             self, run_id, request_json, args=None, edge_id=None, is_server_job=False,
             sender_message_queue=None, listener_message_queue=None, status_center_queue=None,
             should_start_cloud_server=False, use_local_process_as_cloud_server=False,
-            cuda_visible_gpu_ids_str=None, process_name=None
+            cuda_visible_gpu_ids_str=None
     ):
         run_id_str = str(run_id)
         self.job_runners[run_id_str] = self._generate_job_runner_instance(
@@ -29,11 +29,9 @@ def start_job_runner(
         )
         self.job_runners[run_id_str].start_runner_process(
             run_id, request_json, edge_id=edge_id,
-            cuda_visible_gpu_ids_str=cuda_visible_gpu_ids_str,
             sender_message_queue=sender_message_queue,
             listener_message_queue=listener_message_queue,
-            status_center_queue=status_center_queue,
-            process_name=process_name
+            status_center_queue=status_center_queue
         )
 
     def stop_job_runner(self, run_id):
diff --git a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_protocol_manager.py b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_protocol_manager.py
index 9970b1d3f6..19bb7e9882 100755
--- a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_protocol_manager.py
+++ b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_protocol_manager.py
@@ -2,13 +2,11 @@
 import json
 import logging
 import multiprocessing
-import os
 import sys
 import time
 import traceback
 import uuid
 import fedml
-from ..comm_utils.run_process_utils import RunProcessUtils
 from ....core.mlops.mlops_runtime_log import MLOpsRuntimeLog
 from ....core.distributed.communication.mqtt.mqtt_manager import MqttManager
 from ....core.mlops.mlops_metrics import MLOpsMetrics
@@ -47,7 +45,6 @@ def __init__(self, args, agent_config=None, is_master=False):
         self.mlops_metrics = None
         self.status_reporter = None
         self.user_name = args.user_name
-        self.parent_agent = None
 
         fedml._init_multiprocessing()
 
@@ -61,54 +58,38 @@ def add_protocol_handler(self):
         # self.add_message_listener(self.topic_start_train, self.callback_start_train)
         pass
 
-    def initialize(
-            self, communication_manager=None, sender_message_queue=None,
-            status_center_queue=None, sender_message_event=None
-    ):
+    def initialize(self):
         # Generate the message topics
         self.generate_topics()
 
         # Setup MQTT connection
-        if communication_manager is None:
-            self.communication_mgr = MqttManager(
-                self.agent_config["mqtt_config"]["BROKER_HOST"],
-                self.agent_config["mqtt_config"]["BROKER_PORT"],
-                self.agent_config["mqtt_config"]["MQTT_USER"],
-                self.agent_config["mqtt_config"]["MQTT_PWD"],
-                self.agent_config["mqtt_config"]["MQTT_KEEPALIVE"],
-                f"FedML_Agent_Daemon_@{self.user_name}@_@{self.current_device_id}@_@{str(uuid.uuid4())}@",
-                self.topic_last_will,
-                json.dumps({"ID": self.edge_id, "status": GeneralConstants.MSG_MLOPS_SERVER_STATUS_OFFLINE})
-            )
-        else:
-            self.communication_mgr = communication_manager
+        self.communication_mgr = MqttManager(
+            self.agent_config["mqtt_config"]["BROKER_HOST"],
+            self.agent_config["mqtt_config"]["BROKER_PORT"],
+            self.agent_config["mqtt_config"]["MQTT_USER"],
+            self.agent_config["mqtt_config"]["MQTT_PWD"],
+            self.agent_config["mqtt_config"]["MQTT_KEEPALIVE"],
+            f"FedML_Agent_Daemon_@{self.user_name}@_@{self.current_device_id}@_@{str(uuid.uuid4())}@",
+            self.topic_last_will,
+            json.dumps({"ID": self.edge_id, "status": GeneralConstants.MSG_MLOPS_SERVER_STATUS_OFFLINE})
+        )
 
         # Add the message listeners for all topics
         self.add_protocol_handler()
 
         # Start the message center to process edge related messages.
-        if sender_message_queue is None:
-            self.setup_message_center()
-            sender_message_event = self.message_center.get_sender_message_event()
-        else:
-            self.rebuild_message_center(sender_message_queue)
+        self.setup_message_center()
 
         # Setup the message listener queue
         self.setup_listener_message_queue()
 
         # Start the status center to process edge related status.
-        if status_center_queue is None:
-            self.start_status_listener_center(sender_message_event=sender_message_event)
-        else:
-            self.set_status_queue(status_center_queue)
-            self.rebuild_status_center(status_center_queue)
+        self.start_status_listener_center()
 
         # Start the message center for listener
         self.start_listener(sender_message_queue=self.message_center.get_sender_message_queue(),
-                            sender_message_event=sender_message_event,
                             agent_config=self.agent_config,
-                            message_center_name=self.message_center_name,
-                            extra_queues=[self.get_status_queue()])
+                            message_center_name=self.message_center_name)
 
         # Init extra items, e.g. database, recovery, etc.
         self._init_extra_items()
@@ -116,11 +97,11 @@ def initialize(
         # Setup MQTT connected listener
         self.communication_mgr.add_connected_listener(self.on_agent_communication_connected)
         self.communication_mgr.add_disconnected_listener(self.on_agent_communication_disconnected)
+        self.communication_mgr.connect()
 
     def start(self):
         # Start MQTT message loop
         try:
-            self.communication_mgr.connect()
             self.communication_mgr.loop_forever()
         except Exception as e:
             if str(e) == "Restarting after upgraded...":
@@ -129,8 +110,6 @@ def start(self):
                 logging.info("Server tracing: {}".format(traceback.format_exc()))
 
         finally:
-            logging.info(f"Protocol manager is about to exit, pid: {os.getpid()}")
-
             FedMLAccountManager.write_login_failed_file(is_client=not self.is_master_agent)
 
             self.stop()
@@ -141,7 +120,7 @@ def start(self):
                 clean_process_group=False)
             sys.exit(1)
 
-    def stop(self, kill_process=False):
+    def stop(self):
         if self.communication_mgr is not None:
             # noinspection PyBroadException
             try:
@@ -153,10 +132,7 @@ def stop(self, kill_process=False):
             self.communication_mgr.loop_stop()
             self.communication_mgr.disconnect()
 
-        if kill_process:
-            self.post_status_center_stopping_message()
-            self.release_message_center()
-            RunProcessUtils.kill_process(os.getppid(), exclude_current_pid=True)
+        self.release_message_center()
 
     @abstractmethod
     def _init_extra_items(self):
@@ -220,37 +196,20 @@ def rebuild_message_center(self, message_center_queue):
 
     def release_message_center(self):
         try:
-            self.stop_message_center()
-
             if self.message_center is not None:
-                self.message_center.stop_message_center()
+                self.message_center.stop()
                 self.message_center = None
 
         except Exception as e:
             logging.error(
-                f"Failed to release the message center with Exception {e}. "
-                f"Traceback: {traceback.format_exc()}")
-            pass
-
-    def release_status_center(self):
-        try:
-            self.stop_status_center()
-
-            if self.status_center is not None:
-                self.status_center.stop_status_center()
-                self.status_center = None
-
-        except Exception as e:
-            logging.error(
-                f"Failed to release the status center with Exception {e}. "
+                f"Failed to release slave communication manager with Exception {e}. "
                 f"Traceback: {traceback.format_exc()}")
             pass
 
-    def start_status_listener_center(self, sender_message_event=None):
+    def start_status_listener_center(self):
         self.start_status_center(
             sender_message_center_queue=self.message_center.get_sender_message_queue(),
             listener_message_center_queue=self.get_listener_message_queue(),
-            sender_message_event=sender_message_event,
             is_slave_agent=not self.is_master_agent
         )
 
@@ -272,9 +231,6 @@ def rebuild_status_center(self, status_center_queue):
         self.status_reporter.edge_id = self.edge_id
         self.status_reporter.server_agent_id = self.server_agent_id
 
-    def process_extra_queues(self, extra_queues):
-        pass
-
     def generate_status_report(self, run_id, edge_id, server_agent_id=None):
         status_reporter = MLOpsMetrics()
         status_reporter.set_messenger(self, send_message_func=self.send_status_message)
@@ -310,29 +266,6 @@ def get_status_runner(self):
 
         return None
 
-    def get_protocol_communication_manager(self):
-        return self.communication_mgr
-
-    def get_protocol_sender_message_queue(self):
-        return self.message_center.get_sender_message_queue()
-
-    def get_protocol_sender_message_event(self):
-        return self.message_center.get_sender_message_event()
-
-    def get_protocol_status_center_queue(self):
-        return self.get_status_queue()
-
-    def get_subscribed_topics(self):
-        return self.subscribed_topics
-
     def send_agent_active_msg(self, edge_id):
         active_msg = {"ID": edge_id, "status": GeneralConstants.MSG_MLOPS_SERVER_STATUS_IDLE}
         self.message_center.send_message_json(self.topic_active, json.dumps(active_msg))
-
-    def post_status_center_stopping_message(self, run_id=None):
-        topic_status_center_stopping = GeneralConstants.FEDML_TOPIC_STATUS_CENTER_STOP
-        payload = {"run_id": run_id}
-        self.status_reporter.send_message(topic_status_center_stopping, json.dumps(payload))
-
-    def set_parent_agent(self, parent_agent):
-        self.parent_agent = parent_agent
diff --git a/python/fedml/computing/scheduler/scheduler_core/status_center.py b/python/fedml/computing/scheduler/scheduler_core/status_center.py
index b1462d7ea9..97c2115e76 100755
--- a/python/fedml/computing/scheduler/scheduler_core/status_center.py
+++ b/python/fedml/computing/scheduler/scheduler_core/status_center.py
@@ -1,16 +1,10 @@
 import logging
-import os
-import platform
 import time
 
 from enum import Enum, unique
 import multiprocessing
+from multiprocessing import Process, Queue
 import queue
-
-import setproctitle
-
-import fedml
-from .general_constants import GeneralConstants
 from .message_common import FedMLMessageEntity, FedMLStatusEntity
 from .message_center import FedMLMessageCenter
 import traceback
@@ -87,7 +81,6 @@ class FedMLStatusCenter(object):
     TOPIC_SLAVE_JOB_LAUNCH_SUFFIX = "/start_train"
     TOPIC_SLAVE_JOB_STOP_PREFIX = "flserver_agent/"
     TOPIC_SLAVE_JOB_STOP_SUFFIX = "/stop_train"
-    TOPIC_STATUS_CENTER_STOP_PREFIX = GeneralConstants.FEDML_TOPIC_STATUS_CENTER_STOP
     ALLOWED_MAX_JOB_STATUS_CACHE_NUM = 1000
 
     def __init__(self, message_queue=None):
@@ -112,42 +105,25 @@ def get_status_runner(self):
         return None
 
     def start_status_center(self, sender_message_center_queue=None,
-                            listener_message_center_queue=None,
-                            sender_message_event=None,
-                            is_slave_agent=False):
-        self.status_queue = multiprocessing.Manager().Queue()
+                            listener_message_center_queue=None, is_slave_agent=False):
+        self.status_queue = Queue()
         self.status_event = multiprocessing.Event()
         self.status_event.clear()
         self.status_sender_message_center_queue = sender_message_center_queue
         self.status_listener_message_center_queue = listener_message_center_queue
-        self.status_runner = self
-        process_name = GeneralConstants.get_status_center_process_name(
-            f'{"deploy" if self.is_deployment_status_center else "launch"}_'
-            f'{"slave" if is_slave_agent else "master"}_agent')
+        self.status_runner = self.get_status_runner()
         target_func = self.status_runner.run_status_dispatcher if not is_slave_agent else \
             self.status_runner.run_status_dispatcher_in_slave
-        if platform.system() == "Windows":
-            self.status_center_process = multiprocessing.Process(
-                target=target_func, args=(
-                    self.status_event, self.status_queue, self.status_sender_message_center_queue,
-                    self.status_listener_message_center_queue, sender_message_event, process_name
-                )
-            )
-        else:
-            self.status_center_process = fedml.get_process(
-                target=target_func, args=(
-                    self.status_event, self.status_queue, self.status_sender_message_center_queue,
-                    self.status_listener_message_center_queue, sender_message_event, process_name
-                )
+        self.status_center_process = Process(
+            target=target_func, args=(
+                self.status_event, self.status_queue, self.status_sender_message_center_queue,
+                self.status_listener_message_center_queue
             )
+        )
 
         self.status_center_process.start()
 
-    def stop_status_center(self):
-        if self.status_event is not None:
-            self.status_event.set()
-
-    def check_status_stop_event(self):
+    def check_message_stop_event(self):
         if self.status_event is not None and self.status_event.is_set():
             logging.info("Received status center stopping event.")
             raise StatusCenterStoppedException("Status center stopped (for sender)")
@@ -166,9 +142,6 @@ def send_status_message(self, topic, payload):
     def get_status_queue(self):
         return self.status_queue
 
-    def set_status_queue(self, status_queue):
-        self.status_queue = status_queue
-
     def status_center_process_master_status(self, topic, payload):
         pass
 
@@ -183,14 +156,7 @@ def rebuild_status_center(self, status_queue):
 
     def run_status_dispatcher(self, status_event, status_queue,
                               sender_message_center_queue,
-                              listener_message_center_queue,
-                              sender_message_event, process_name=None):
-        if process_name is not None:
-            setproctitle.setproctitle(process_name)
-
-        if platform.system() != "Windows":
-            os.setsid()
-
+                              listener_message_center_queue):
         # Save the parameters
         self.status_event = status_event
         self.status_queue = status_queue
@@ -203,11 +169,10 @@ def run_status_dispatcher(self, status_event, status_queue,
             self.rebuild_message_center(sender_message_center_queue)
             message_center = FedMLMessageCenter(
                 sender_message_queue=sender_message_center_queue,
-                listener_message_queue=listener_message_center_queue,
-                sender_message_event=sender_message_event
+                listener_message_queue=listener_message_center_queue
             )
 
-        if status_queue is not None:
+        if sender_message_center_queue is not None:
             self.rebuild_status_center(status_queue)
 
         # Init status manager instances
@@ -218,7 +183,7 @@ def run_status_dispatcher(self, status_event, status_queue,
 
             # Check if we should stop status dispatcher
             try:
-                self.check_status_stop_event()
+                self.check_message_stop_event()
             except StatusCenterStoppedException as e:
                 break
 
@@ -238,12 +203,6 @@ def run_status_dispatcher(self, status_event, status_queue,
                 message_entity = FedMLMessageEntity(message_body=message_body)
                 status_entity = FedMLStatusEntity(status_msg_body=message_body)
 
-                if message_entity.topic.startswith(FedMLStatusCenter.TOPIC_STATUS_CENTER_STOP_PREFIX):
-                    # Process the stop message for message center and status center
-                    message_center.stop_message_center()
-                    self.stop_status_center()
-                    continue
-
                 # Generate status manager instance
                 run_id_str = str(status_entity.run_id)
                 run_id_int = int(status_entity.run_id)
@@ -293,14 +252,7 @@ def run_status_dispatcher(self, status_event, status_queue,
 
     def run_status_dispatcher_in_slave(self, status_event, status_queue,
                                        sender_message_center_queue,
-                                       listener_message_center_queue,
-                                       sender_message_event, process_name=None):
-        if process_name is not None:
-            setproctitle.setproctitle(process_name)
-
-        if platform.system() != "Windows":
-            os.setsid()
-
+                                       listener_message_center_queue):
         # Save the parameters
         self.status_event = status_event
         self.status_queue = status_queue
@@ -313,11 +265,10 @@ def run_status_dispatcher_in_slave(self, status_event, status_queue,
             self.rebuild_message_center(sender_message_center_queue)
             message_center = FedMLMessageCenter(
                 sender_message_queue=sender_message_center_queue,
-                listener_message_queue=listener_message_center_queue,
-                sender_message_event=sender_message_event
+                listener_message_queue=listener_message_center_queue
             )
 
-        if status_queue is not None:
+        if sender_message_center_queue is not None:
             self.rebuild_status_center(status_queue)
 
         # Init status manager instances
@@ -329,7 +280,7 @@ def run_status_dispatcher_in_slave(self, status_event, status_queue,
 
             # Check if we should stop status dispatcher
             try:
-                self.check_status_stop_event()
+                self.check_message_stop_event()
             except StatusCenterStoppedException as e:
                 break
 
diff --git a/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py b/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py
index ec98cc7906..e045458db5 100755
--- a/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py
+++ b/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py
@@ -136,14 +136,12 @@ def process_job_completed_status(self, master_id, status):
         # self.stop_cloud_server()
         # self.remove_listener_for_run_metrics(self.run_id)
         # self.remove_listener_for_run_logs(self.run_id)
-
         self.message_center.receive_message(
             GeneralConstants.get_topic_complete_job(master_id),
             json.dumps(GeneralConstants.get_payload_complete_job(self.run_id, master_id)))
 
-        if self.status_center.is_deployment_status_center:
-            if status == ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED:
-                self.report_deployment_status(self.run_id, GeneralConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED)
+        if self.status_center.is_deployment_status_center and status == ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED:
+            self.report_deployment_status(self.run_id, GeneralConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED)
 
     def process_job_exception_status(self, master_id, status):
         # Report exception job status
@@ -189,17 +187,16 @@ def process_job_status_consensus(self, run_id, master_id, status):
         status = self.get_entire_job_status()
 
         # Set the device status based on the job status
-        if self.edge_status_dict is not None:
-            for edge_id_item, edge_status_item in self.edge_status_dict.items():
-                if edge_id_item == "server":
-                    continue
-
-                # Calc the device status based on the job status
-                consensus_device_status = FedMLStatusManager.get_device_consensus_status_in_job(
-                    status, edge_status_item)
-                if consensus_device_status is not None:
-                    self.message_reporter.report_client_training_status(
-                        edge_id_item, consensus_device_status, run_id=run_id, update_db=False)
+        for edge_id_item, edge_status_item in self.edge_status_dict.items():
+            if edge_id_item == "server":
+                continue
+
+            # Calc the device status based on the job status
+            consensus_device_status = FedMLStatusManager.get_device_consensus_status_in_job(
+                status, edge_status_item)
+            if consensus_device_status is not None:
+                self.message_reporter.report_client_training_status(
+                    edge_id_item, consensus_device_status, run_id=run_id, update_db=False)
 
         # Save the job status to local storage
         FedMLServerDataInterface.get_instance().save_job_status(run_id, master_id, status, status)
diff --git a/python/fedml/computing/scheduler/slave/base_slave_agent.py b/python/fedml/computing/scheduler/slave/base_slave_agent.py
index 9876ac9912..01c0a39195 100755
--- a/python/fedml/computing/scheduler/slave/base_slave_agent.py
+++ b/python/fedml/computing/scheduler/slave/base_slave_agent.py
@@ -24,9 +24,7 @@ def __init__(self):
 
     def login(
             self, userid, api_key=None, device_id=None,
-            os_name=None, need_to_check_gpu=False, role=None,
-            communication_manager=None, sender_message_queue=None,
-            status_center_queue=None, sender_message_event=None
+            os_name=None, need_to_check_gpu=False, role=None
     ):
         # Preprocess the login args
         if need_to_check_gpu:
@@ -35,7 +33,7 @@ def login(
                 print("We can't find any gpu device on your machine. \n"
                       "With the gpu_supplier(-g) option, you need to check if your machine "
                       "has nvidia GPUs and installs CUDA related drivers.")
-                return None
+                return
 
         # Login account
         login_result = FedMLAccountManager.get_instance().login(
@@ -59,22 +57,17 @@ def login(
         # Initialize the protocol manager
         # noinspection PyBoardException
         try:
-            self._initialize_protocol_manager(
-                communication_manager=communication_manager,
-                sender_message_queue=sender_message_queue,
-                status_center_queue=status_center_queue,
-                sender_message_event=sender_message_event)
+            self._initialize_protocol_manager()
         except Exception as e:
             FedMLAccountManager.write_login_failed_file(is_client=True)
             self.protocol_mgr.stop()
             raise e
 
-        return login_result
-
-    def start(self):
         # Start the protocol manager to process the messages from MLOps and slave agents.
         self.protocol_mgr.start()
 
+        return login_result
+
     @staticmethod
     def logout():
         GeneralConstants.cleanup_run_process(None)
@@ -91,20 +84,12 @@ def _create_protocol_manager(self, login_result):
         self.protocol_mgr.user_name = login_result.user_name
         self.protocol_mgr.agent_config = login_result.agent_config
 
-    def _initialize_protocol_manager(
-            self, communication_manager=None, sender_message_queue=None,
-            status_center_queue=None, sender_message_event=None
-    ):
+    def _initialize_protocol_manager(self):
         # Init local database
         self._init_database()
 
         # Initialize the master protocol
-        self.protocol_mgr.set_parent_agent(self)
-        self.protocol_mgr.initialize(
-            communication_manager=communication_manager,
-            sender_message_queue=sender_message_queue,
-            status_center_queue=status_center_queue,
-            sender_message_event=sender_message_event)
+        self.protocol_mgr.initialize()
 
         # Start the client API process
         self._start_slave_api()
@@ -137,9 +122,6 @@ def _start_slave_api(self):
                 should_capture_stderr=False
             )
 
-    def get_protocol_manager(self):
-        return self.protocol_mgr
-
     @abstractmethod
     def _get_log_file_dir(self):
         pass
@@ -155,8 +137,3 @@ def _init_database(self):
     @abstractmethod
     def _generate_protocol_manager_instance(self, args, agent_config=None):
         return None
-
-    def save_deploy_ids(self, deploy_master_edge_id=None, deploy_slave_edge_id=None):
-        self.protocol_mgr.save_deploy_ids(
-            deploy_master_edge_id=deploy_master_edge_id, deploy_slave_edge_id=deploy_slave_edge_id)
-
diff --git a/python/fedml/computing/scheduler/slave/base_slave_job_runner.py b/python/fedml/computing/scheduler/slave/base_slave_job_runner.py
index 0486b131a6..5e530dbba7 100755
--- a/python/fedml/computing/scheduler/slave/base_slave_job_runner.py
+++ b/python/fedml/computing/scheduler/slave/base_slave_job_runner.py
@@ -7,9 +7,6 @@
 import traceback
 from abc import ABC, abstractmethod
 
-import setproctitle
-
-import fedml
 from ....core.mlops.mlops_runtime_log import MLOpsRuntimeLog
 from ....core.mlops.mlops_runtime_log_daemon import MLOpsRuntimeLogDaemon
 from .client_data_interface import FedMLClientDataInterface
@@ -50,12 +47,8 @@ def __repr__(self):
         )
 
     def run(self, process_event, completed_event,  run_extend_queue_list,
-            sender_message_center, listener_message_queue, status_center_queue,
-            process_name=None):
-        if process_name is not None:
-            setproctitle.setproctitle(process_name)
-
-        print(f"Client runner process id {os.getpid()}, name {process_name}, run id {self.run_id}")
+            sender_message_center, listener_message_queue, status_center_queue):
+        print(f"Client runner process id {os.getpid()}, run id {self.run_id}")
 
         if platform.system() != "Windows":
             os.setsid()
@@ -251,7 +244,7 @@ def reset_devices_status(self, edge_id, status):
     def start_runner_process(
             self, run_id, request_json, edge_id=None,
             sender_message_queue=None, listener_message_queue=None,
-            status_center_queue=None, cuda_visible_gpu_ids_str=None, process_name=None
+            status_center_queue=None, cuda_visible_gpu_ids_str=None
     ):
         client_runner = self._generate_job_runner_instance(
             self.args, run_id=run_id, request_json=request_json,
@@ -266,17 +259,9 @@ def start_runner_process(
         client_runner.server_id = request_json.get("server_id", "0")
         self.run_extend_queue_list = self._generate_extend_queue_list()
         logging.info("start the runner process.")
-
-        if platform.system() == "Windows":
-            self.run_process = multiprocessing.Process(
-                target=client_runner.run, args=(
-                    self.run_process_event, self.run_process_completed_event, self.run_extend_queue_list,
-                    sender_message_queue, listener_message_queue, status_center_queue, process_name
-                ))
-        else:
-            self.run_process = fedml.get_process(target=client_runner.run, args=(
-                self.run_process_event, self.run_process_completed_event, self.run_extend_queue_list,
-                sender_message_queue, listener_message_queue, status_center_queue, process_name
-            ))
+        self.run_process = Process(target=client_runner.run, args=(
+            self.run_process_event, self.run_process_completed_event, self.run_extend_queue_list,
+            sender_message_queue, listener_message_queue, status_center_queue
+        ))
         self.run_process.start()
         return self.run_process
diff --git a/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py b/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py
index 534ee2f7d0..447bd05cd9 100755
--- a/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py
+++ b/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py
@@ -62,6 +62,8 @@ def __init__(self, args, agent_config=None):
         self.server_id = args.server_id
         self.model_device_server_id = None
         self.model_device_client_edge_id_list = None
+        self.model_device_server = None
+        self.model_device_client_list = None
 
     @abstractmethod
     def generate_topics(self):
@@ -145,9 +147,15 @@ def add_subscribe_topic(self, topic):
         self.subscribed_topics.append(topic)
 
     def stop(self):
-        if self.model_device_client_edge_id_list is not None:
-            self.model_device_client_edge_id_list.clear()
-            self.model_device_client_edge_id_list = None
+        if self.model_device_server is not None:
+            self.model_device_server.stop()
+            self.model_device_server = None
+
+        if self.model_device_client_list is not None:
+            for model_client in self.model_device_client_list:
+                model_client.stop()
+            self.model_device_client_list.clear()
+            self.model_device_client_list = None
 
         super().stop()
 
@@ -257,8 +265,6 @@ def callback_start_train(self, topic, payload):
             # Report the run status with finished status and return
             self.generate_status_report(run_id, edge_id, server_agent_id=server_agent_id).report_client_id_status(
                 edge_id, GeneralConstants.MSG_MLOPS_CLIENT_STATUS_FINISHED, run_id=run_id)
-
-            MLOpsRuntimeLogDaemon.get_instance(self.args).stop_log_processor(run_id, edge_id)
             return
         logging.info(
             f"Run started, available gpu ids: {JobRunnerUtils.get_instance().get_available_gpu_id_list(edge_id)}")
@@ -276,7 +282,6 @@ def callback_start_train(self, topic, payload):
             listener_message_queue=self.get_listener_message_queue(),
             status_center_queue=self.get_status_queue(),
             cuda_visible_gpu_ids_str=cuda_visible_gpu_ids_str,
-            process_name=GeneralConstants.get_launch_slave_job_process_name(run_id, edge_id)
         )
         run_process = self._get_job_runner_manager().get_runner_process(run_id)
         if run_process is not None:
diff --git a/python/fedml/computing/scheduler/slave/client_data_interface.py b/python/fedml/computing/scheduler/slave/client_data_interface.py
index 74bf7a64a3..0e9e84381a 100755
--- a/python/fedml/computing/scheduler/slave/client_data_interface.py
+++ b/python/fedml/computing/scheduler/slave/client_data_interface.py
@@ -343,15 +343,6 @@ def handle_database_compatibility(self):
 
         self.close_job_db()
 
-    def check_if_table_exist(self, current_db_cursor):
-        results = current_db_cursor.execute("select * from sqlite_master where type='table' and name='jobs';")
-        if results is None:
-            return False
-        result_len = 0
-        for row in results:
-            result_len += 1
-        return False if result_len == 0 else True
-
     def get_agent_status(self, edge_id=0):
         self.open_job_db()
         enabled = 1
diff --git a/python/fedml/computing/scheduler/slave/client_login.py b/python/fedml/computing/scheduler/slave/client_login.py
index 7a1c759410..95c772a225 100755
--- a/python/fedml/computing/scheduler/slave/client_login.py
+++ b/python/fedml/computing/scheduler/slave/client_login.py
@@ -1,11 +1,11 @@
 import argparse
 import os
 import fedml
-from fedml.computing.scheduler.slave.united_agents import FedMLUnitedAgent
+from fedml.computing.scheduler.slave.slave_agent import FedMLLaunchSlaveAgent
 
 
 def logout():
-    FedMLUnitedAgent.get_instance().logout()
+    FedMLLaunchSlaveAgent.logout()
 
 
 if __name__ == "__main__":
@@ -18,7 +18,6 @@ def logout():
     parser.add_argument("--version", "-v", type=str, default="release")
     parser.add_argument("--local_server", "-ls", type=str, default="127.0.0.1")
     parser.add_argument("--role", "-r", type=str, default="client")
-    parser.add_argument("--runner_cmd", "-rc", type=str, default="{}")
     parser.add_argument("--device_id", "-id", type=str, default="0")
     parser.add_argument("--os_name", "-os", type=str, default="")
     parser.add_argument("--api_key", "-k", type=str, default="")
@@ -37,10 +36,9 @@ def logout():
         fedml.set_local_on_premise_platform_port(args.local_on_premise_platform_port)
 
     fedml.set_env_version(args.version)
-    united_agents = FedMLUnitedAgent.get_instance()
+    slave_agent = FedMLLaunchSlaveAgent()
     if args.type == 'login':
-        united_agents.login(
-            args.api_key, api_key=args.api_key, device_id=args.device_id,
-            os_name=args.os_name, role=args.role, runner_cmd=args.runner_cmd)
+        slave_agent.login(args.api_key, api_key=args.api_key, device_id=args.device_id,
+                          os_name=args.os_name, role=args.role)
     else:
-        united_agents.logout()
+        FedMLLaunchSlaveAgent.logout()
diff --git a/python/fedml/computing/scheduler/slave/slave_protocol_manager.py b/python/fedml/computing/scheduler/slave/slave_protocol_manager.py
index 449cd7c29c..a1067a0d96 100755
--- a/python/fedml/computing/scheduler/slave/slave_protocol_manager.py
+++ b/python/fedml/computing/scheduler/slave/slave_protocol_manager.py
@@ -1,15 +1,18 @@
-
+import copy
+import json
 import os
+import fedml
 from ..comm_utils.job_cleanup import JobCleanup
 from .base_slave_protocol_manager import FedMLBaseSlaveProtocolManager
 from .launch_job_runner_manager import FedMLLaunchJobRunnerManager
+from ..model_scheduler.model_device_server import FedMLModelDeviceServerRunner
+from ..model_scheduler.model_device_client import FedMLModelDeviceClientRunner
 
 
 class FedMLLaunchSlaveProtocolManager(FedMLBaseSlaveProtocolManager):
 
     def __init__(self, args, agent_config=None):
         FedMLBaseSlaveProtocolManager.__init__(self, args, agent_config=agent_config)
-        self.message_center_name = "launch_slave_agent"
 
     # Override
     def generate_topics(self):
@@ -31,8 +34,7 @@ def _get_job_runner_manager(self):
     def _process_connection_ready(self):
         from fedml.core.mlops import sync_deploy_id
         sync_deploy_id(
-            self.edge_id, self.model_device_server_id, self.model_device_client_edge_id_list,
-            message_center=self.message_center)
+            self.edge_id, self.model_device_server.edge_id, self.model_device_client_edge_id_list)
 
     # Override
     def _process_connection_lost(self):
@@ -45,19 +47,59 @@ def _init_extra_items(self):
         # Sync the data when startup
         JobCleanup.get_instance().sync_data_on_startup(self.args.edge_id)
 
-        # Start the monitor process
-        self.mlops_metrics.stop_device_realtime_perf()
-        self.mlops_metrics.report_device_realtime_perf(self.args, self.args.agent_config["mqtt_config"])
-
-    def save_deploy_ids(self, deploy_master_edge_id=None, deploy_slave_edge_id=None):
-        if deploy_master_edge_id is not None:
-            self.model_device_server_id = deploy_master_edge_id
-
-        if deploy_slave_edge_id is not None:
-            if self.model_device_client_edge_id_list is None:
-                self.model_device_client_edge_id_list = list()
-            self.model_device_client_edge_id_list.append(deploy_slave_edge_id)
+        # Get the environment variables
+        infer_host = os.getenv("FEDML_INFER_HOST", None)
+        infer_redis_addr = os.getenv("FEDML_INFER_REDIS_ADDR", None)
+        infer_redis_port = os.getenv("FEDML_INFER_REDIS_PORT", None)
+        infer_redis_password = os.getenv("FEDML_INFER_REDIS_PASSWORD", None)
+        model_client_num = os.getenv("FEDML_MODEL_WORKER_NUM", None)
+
+        # Start deploy master agent and slave agent
+        in_args = copy.deepcopy(self.args)
+        if self.model_device_client_edge_id_list is None:
+            self.model_device_client_edge_id_list = list()
+        if self.model_device_client_list is None:
+            model_client_num = 1 if model_client_num is None else int(model_client_num)
+            self.model_device_client_list = list()
+            for client_index in range(model_client_num):
+                model_device_client = FedMLModelDeviceClientRunner(
+                    in_args, f"{in_args.current_device_id}_{client_index + 1}", in_args.os_name,
+                    in_args.is_from_docker, self.agent_config)
+                if infer_host is not None:
+                    model_device_client.infer_host = infer_host
+                if infer_redis_addr is not None:
+                    model_device_client.redis_addr = infer_redis_addr
+                if infer_redis_port is not None:
+                    model_device_client.redis_port = infer_redis_port
+                if infer_redis_password is not None:
+                    model_device_client.redis_password = infer_redis_password
+                model_device_client.start()
+                self.model_device_client_list.append(model_device_client)
+                self.model_device_client_edge_id_list.append(model_device_client.get_edge_id())
+
+        self.args = copy.deepcopy(in_args)
+        if self.model_device_server is None:
+            self.model_device_server = FedMLModelDeviceServerRunner(in_args, in_args.current_device_id,
+                                                                    in_args.os_name, in_args.is_from_docker,
+                                                                    self.agent_config)
+            if infer_host is not None:
+                self.model_device_server.infer_host = infer_host
+            if infer_redis_addr is not None:
+                self.model_device_server.redis_addr = infer_redis_addr
+            if infer_redis_port is not None:
+                self.model_device_server.redis_port = infer_redis_port
+            if infer_redis_password is not None:
+                self.model_device_server.redis_password = infer_redis_password
+
+            self.model_device_server.start()
+            self.model_device_server_id = self.model_device_server.get_edge_id()
 
         # Save the deployed master and worker id list to the environment variable.
         os.environ["FEDML_DEPLOY_MASTER_ID"] = str(self.model_device_server_id)
         os.environ["FEDML_DEPLOY_WORKER_IDS"] = str(self.model_device_client_edge_id_list)
+
+        # Start the monitor process
+        self.args = copy.deepcopy(in_args)
+        self.mlops_metrics.stop_device_realtime_perf()
+        self.mlops_metrics.report_device_realtime_perf(self.args, self.args.agent_config["mqtt_config"])
+        pass
\ No newline at end of file
diff --git a/python/fedml/computing/scheduler/slave/united_agents.py b/python/fedml/computing/scheduler/slave/united_agents.py
deleted file mode 100755
index 3c8549c06a..0000000000
--- a/python/fedml/computing/scheduler/slave/united_agents.py
+++ /dev/null
@@ -1,75 +0,0 @@
-from fedml.computing.scheduler.model_scheduler.master_agent import FedMLDeployMasterAgent
-from fedml.computing.scheduler.model_scheduler.worker_agent import FedMLDeployWorkerAgent
-from fedml.computing.scheduler.scheduler_core.account_manager import FedMLAccountManager
-from fedml.computing.scheduler.slave.slave_agent import FedMLLaunchSlaveAgent
-from fedml.computing.scheduler.master.master_agent import FedMLLaunchMasterAgent
-from fedml.core.common.singleton import Singleton
-
-
-class FedMLUnitedAgent(Singleton):
-
-    @staticmethod
-    def get_instance():
-        return FedMLUnitedAgent()
-
-    def logout(self):
-        FedMLLaunchSlaveAgent.logout()
-
-    def login(self, userid, api_key=None, device_id=None,
-              os_name=None, need_to_check_gpu=False, role=None, runner_cmd=None):
-        # Create the launch master/slave and deploy master/slave agents.
-        launch_slave_agent = FedMLLaunchSlaveAgent()
-        launch_master_agent = FedMLLaunchMasterAgent()
-        deploy_slave_agent = FedMLDeployWorkerAgent()
-        deploy_master_agent = FedMLDeployMasterAgent()
-
-        # Login with the launch slave role
-        login_result = launch_slave_agent.login(
-            api_key, api_key=api_key, device_id=device_id,
-            os_name=os_name, role=role
-        )
-
-        # Get the communication manager, sender message queue
-        shared_communication_mgr = launch_slave_agent.get_protocol_manager().get_protocol_communication_manager()
-        shared_slave_sender_message_queue = launch_slave_agent.get_protocol_manager().get_protocol_sender_message_queue()
-        shared_slave_sender_message_event = launch_slave_agent.get_protocol_manager().get_protocol_sender_message_event()
-
-        # Login with the launch master role based on
-        # the shared communication manager, sender message center
-        launch_master_agent.login(
-            api_key, api_key=api_key, device_id=login_result.device_id,
-            os_name=os_name, runner_cmd=runner_cmd,
-            role=FedMLAccountManager.ROLE_GPU_MASTER_SERVER,
-            communication_manager=shared_communication_mgr,
-            sender_message_queue=None
-        )
-
-        # Get the status center queue
-        shared_slave_status_center_queue = launch_slave_agent.get_protocol_manager().get_protocol_status_center_queue()
-        shared_master_status_center_queue = launch_master_agent.get_protocol_manager().get_protocol_status_center_queue()
-        shared_master_sender_message_queue = launch_master_agent.get_protocol_manager().get_protocol_sender_message_queue()
-        shared_master_sender_message_event = launch_master_agent.get_protocol_manager().get_protocol_sender_message_event()
-
-        # Login with the deployment master role based on
-        # the shared communication manager, sender message center, status center
-        deploy_master_login_result = deploy_master_agent.login(
-            userid, api_key=api_key, device_id=login_result.device_id,
-            os_name=os_name, role=FedMLAccountManager.ROLE_DEPLOY_MASTER_ON_PREM,
-            communication_manager=shared_communication_mgr
-        )
-
-        # Login with the deployment slave role based on
-        # the shared communication manager, sender message center, status center
-        deploy_slave_login_result = deploy_slave_agent.login(
-            userid, api_key=api_key, device_id=login_result.device_id,
-            os_name=os_name, role=FedMLAccountManager.ROLE_DEPLOY_WORKER_ON_PREM,
-            communication_manager=shared_communication_mgr
-        )
-
-        # Set the deployment ids to launch agent so that we can report the related device info to MLOps.
-        launch_slave_agent.save_deploy_ids(
-            deploy_master_edge_id=deploy_master_login_result.edge_id,
-            deploy_slave_edge_id=deploy_slave_login_result.edge_id)
-
-        # Start the slave agent to connect to servers and loop forever.
-        launch_slave_agent.start()
diff --git a/python/fedml/core/mlops/__init__.py b/python/fedml/core/mlops/__init__.py
index 121c8e26bb..148427fe1f 100644
--- a/python/fedml/core/mlops/__init__.py
+++ b/python/fedml/core/mlops/__init__.py
@@ -1453,14 +1453,12 @@ def release_resources(run_id, device_id):
         MLOpsConstants.MSG_TOPIC_LAUNCH_RELEASE_GPU_IDS, json.dumps(payload))
 
 
-def sync_deploy_id(device_id, master_deploy_id, worker_deploy_id_list, message_center=None):
-    payload = {"device_id": device_id, "master_deploy_id": master_deploy_id, "worker_deploy_ids": worker_deploy_id_list}
-    if message_center is None:
-        fedml_args = get_fedml_args()
-        setup_log_mqtt_mgr()
-        MLOpsStore.mlops_log_mqtt_mgr.send_message_json(
-            MLOpsConstants.MSG_TOPIC_LAUNCH_SYNC_DEPLOY_IDS, json.dumps(payload))
-    else:
-        message_center.send_message( MLOpsConstants.MSG_TOPIC_LAUNCH_SYNC_DEPLOY_IDS, json.dumps(payload))
+def sync_deploy_id(device_id, master_deploy_id, worker_deploy_id_list):
+    fedml_args = get_fedml_args()
 
+    setup_log_mqtt_mgr()
+
+    payload = {"device_id": device_id, "master_deploy_id": master_deploy_id, "worker_deploy_ids": worker_deploy_id_list}
+    MLOpsStore.mlops_log_mqtt_mgr.send_message_json(
+        MLOpsConstants.MSG_TOPIC_LAUNCH_SYNC_DEPLOY_IDS, json.dumps(payload))
 
diff --git a/python/fedml/core/mlops/mlops_device_perfs.py b/python/fedml/core/mlops/mlops_device_perfs.py
index 61da372d97..4bb41df73f 100644
--- a/python/fedml/core/mlops/mlops_device_perfs.py
+++ b/python/fedml/core/mlops/mlops_device_perfs.py
@@ -1,7 +1,6 @@
 import json
 import logging
 import os
-import platform
 import time
 import traceback
 import uuid
@@ -9,15 +8,12 @@
 
 import multiprocessing
 import psutil
-import setproctitle
 
-import fedml
 from fedml.computing.scheduler.comm_utils import sys_utils
 from .device_info_report_protocol import FedMLDeviceInfoReportProtocol
 from .mlops_utils import MLOpsUtils
 from .system_stats import SysStats
 from ...computing.scheduler.comm_utils.job_monitor import JobMonitor
-from ...computing.scheduler.scheduler_core.general_constants import GeneralConstants
 from ...core.distributed.communication.mqtt.mqtt_manager import MqttManager
 
 
@@ -32,17 +28,6 @@
 ROLE_ENDPOINT_REPLICA_NUM = 8
 ROLE_ENDPOINT_REPLICA_PERF = 9
 
-ROLE_DEVICE_JOB_TOTAL_MONITOR_STR = "device_job_total"
-ROLE_DEVICE_INFO_REPORTER_STR = "device_info"
-ROLE_ENDPOINT_MASTER_STR = "endpoint_master"
-ROLE_ENDPOINT_SLAVE_STR = "endpoint_slave"
-ROLE_RUN_MASTER_STR = "run_master"
-ROLE_RUN_SLAVE_STR = "run_slave"
-ROLE_ENDPOINT_LOGS_STR = "endpoint_logs"
-ROLE_AUTO_SCALER_STR = "autoscaler"
-ROLE_ENDPOINT_REPLICA_NUM_STR = "endpoint_replica_num"
-ROLE_ENDPOINT_REPLICA_PERF_STR = "endpoint_replica_perf"
-
 
 class MLOpsDevicePerfStats(object):
     def __init__(self):
@@ -91,161 +76,58 @@ def setup_realtime_stats_process(self, sys_args):
         self.device_realtime_stats_event.clear()
         perf_stats.device_realtime_stats_event = self.device_realtime_stats_event
 
-        if platform.system() == "Windows":
-            self.device_realtime_stats_process = multiprocessing.Process(
-                target=perf_stats.report_device_realtime_stats_entry,
-                args=(self.device_realtime_stats_event, ROLE_DEVICE_INFO_REPORTER, self.is_client,
-                      GeneralConstants.get_monitor_process_name(
-                          ROLE_DEVICE_INFO_REPORTER_STR, perf_stats.run_id, perf_stats.edge_id)))
-        else:
-            self.device_realtime_stats_process = fedml.get_process(
-                target=perf_stats.report_device_realtime_stats_entry,
-                args=(self.device_realtime_stats_event, ROLE_DEVICE_INFO_REPORTER, self.is_client,
-                      GeneralConstants.get_monitor_process_name(
-                          ROLE_DEVICE_INFO_REPORTER_STR, perf_stats.run_id, perf_stats.edge_id)))
+        self.device_realtime_stats_process = multiprocessing.Process(
+            target=perf_stats.report_device_realtime_stats_entry,
+            args=(self.device_realtime_stats_event, ROLE_DEVICE_INFO_REPORTER, self.is_client))
         self.device_realtime_stats_process.start()
 
         if self.enable_job_total_monitor:
-            if platform.system() == "Windows":
-                self.job_total_monitor_process = multiprocessing.Process(
-                    target=perf_stats.report_device_realtime_stats_entry,
-                    args=(self.device_realtime_stats_event, ROLE_DEVICE_JOB_TOTAL_MONITOR, self.is_client,
-                          GeneralConstants.get_monitor_process_name(
-                              ROLE_DEVICE_JOB_TOTAL_MONITOR_STR, perf_stats.run_id, perf_stats.edge_id)))
-            else:
-                self.job_total_monitor_process = fedml.get_process(
-                    target=perf_stats.report_device_realtime_stats_entry,
-                    args=(self.device_realtime_stats_event, ROLE_DEVICE_JOB_TOTAL_MONITOR, self.is_client,
-                          GeneralConstants.get_monitor_process_name(
-                              ROLE_DEVICE_JOB_TOTAL_MONITOR_STR, perf_stats.run_id, perf_stats.edge_id)))
+            self.job_total_monitor_process = multiprocessing.Process(
+                target=perf_stats.report_device_realtime_stats_entry,
+                args=(self.device_realtime_stats_event, ROLE_DEVICE_JOB_TOTAL_MONITOR, self.is_client))
             self.job_total_monitor_process.start()
         else:
             if self.is_client:
-                # Register endpoint master process
-                if platform.system() == "Windows":
-                    self.monitor_endpoint_master_process = multiprocessing.Process(
-                        target=perf_stats.report_device_realtime_stats_entry,
-                        args=(self.device_realtime_stats_event, ROLE_ENDPOINT_MASTER, True,
-                              GeneralConstants.get_monitor_process_name(
-                                  ROLE_ENDPOINT_MASTER_STR, perf_stats.run_id, perf_stats.edge_id)))
-                else:
-                    self.monitor_endpoint_master_process = fedml.get_process(
-                        target=perf_stats.report_device_realtime_stats_entry,
-                        args=(self.device_realtime_stats_event, ROLE_ENDPOINT_MASTER, True,
-                              GeneralConstants.get_monitor_process_name(
-                                  ROLE_ENDPOINT_MASTER_STR, perf_stats.run_id, perf_stats.edge_id)))
+                self.monitor_endpoint_master_process = multiprocessing.Process(
+                    target=perf_stats.report_device_realtime_stats_entry,
+                    args=(self.device_realtime_stats_event, ROLE_ENDPOINT_MASTER))
                 self.monitor_endpoint_master_process.start()
 
-                # Register endpoint slave process
-                if platform.system() == "Windows":
-                    self.monitor_endpoint_slave_process = multiprocessing.Process(
-                        target=perf_stats.report_device_realtime_stats_entry,
-                        args=(self.device_realtime_stats_event, ROLE_ENDPOINT_SLAVE, True,
-                              GeneralConstants.get_monitor_process_name(
-                                  ROLE_ENDPOINT_SLAVE_STR, perf_stats.run_id, perf_stats.edge_id)))
-                else:
-                    self.monitor_endpoint_slave_process = fedml.get_process(
-                        target=perf_stats.report_device_realtime_stats_entry,
-                        args=(self.device_realtime_stats_event, ROLE_ENDPOINT_SLAVE, True,
-                              GeneralConstants.get_monitor_process_name(
-                                  ROLE_ENDPOINT_SLAVE_STR, perf_stats.run_id, perf_stats.edge_id)))
-                self.monitor_endpoint_slave_process.start()
-
-                # Register run slave process
-                if platform.system() == "Windows":
-                    self.monitor_run_slave_process = multiprocessing.Process(
-                        target=perf_stats.report_device_realtime_stats_entry,
-                        args=(self.device_realtime_stats_event, ROLE_RUN_SLAVE, True,
-                              GeneralConstants.get_monitor_process_name(
-                                  ROLE_RUN_SLAVE_STR, perf_stats.run_id, perf_stats.edge_id)))
-                else:
-                    self.monitor_run_slave_process = fedml.get_process(
-                        target=perf_stats.report_device_realtime_stats_entry,
-                        args=(self.device_realtime_stats_event, ROLE_RUN_SLAVE, True,
-                              GeneralConstants.get_monitor_process_name(
-                                  ROLE_RUN_SLAVE_STR, perf_stats.run_id, perf_stats.edge_id)))
+                self.monitor_run_slave_process = multiprocessing.Process(
+                    target=perf_stats.report_device_realtime_stats_entry,
+                    args=(self.device_realtime_stats_event, ROLE_RUN_SLAVE))
                 self.monitor_run_slave_process.start()
 
-                # Register endpoint logs process
-                if platform.system() == "Windows":
-                    self.monitor_endpoint_logs_process = multiprocessing.Process(
-                        target=perf_stats.report_device_realtime_stats_entry,
-                        args=(self.device_realtime_stats_event, ROLE_ENDPOINT_LOGS, True,
-                              GeneralConstants.get_monitor_process_name(
-                                  ROLE_ENDPOINT_LOGS_STR, perf_stats.run_id, perf_stats.edge_id)))
-                else:
-                    self.monitor_endpoint_logs_process = fedml.get_process(
-                        target=perf_stats.report_device_realtime_stats_entry,
-                        args=(self.device_realtime_stats_event, ROLE_ENDPOINT_LOGS, True,
-                              GeneralConstants.get_monitor_process_name(
-                                  ROLE_ENDPOINT_LOGS_STR, perf_stats.run_id, perf_stats.edge_id)))
+                self.monitor_endpoint_logs_process = multiprocessing.Process(
+                    target=perf_stats.report_device_realtime_stats_entry,
+                    args=(self.device_realtime_stats_event, ROLE_ENDPOINT_LOGS))
                 self.monitor_endpoint_logs_process.start()
 
                 # Register auto-scaler process
-                if platform.system() == "Windows":
-                    self.monitor_auto_scaler_process = multiprocessing.Process(
-                        target=perf_stats.report_device_realtime_stats_entry,
-                        args=(self.device_realtime_stats_event, ROLE_AUTO_SCALER, True,
-                              GeneralConstants.get_monitor_process_name(
-                                  ROLE_AUTO_SCALER_STR, perf_stats.run_id, perf_stats.edge_id)))
-                else:
-                    self.monitor_auto_scaler_process = fedml.get_process(
-                        target=perf_stats.report_device_realtime_stats_entry,
-                        args=(self.device_realtime_stats_event, ROLE_AUTO_SCALER, True,
-                              GeneralConstants.get_monitor_process_name(
-                                  ROLE_AUTO_SCALER_STR, perf_stats.run_id, perf_stats.edge_id)))
+                self.monitor_auto_scaler_process = multiprocessing.Process(
+                    target=perf_stats.report_device_realtime_stats_entry,
+                    args=(self.device_realtime_stats_event, ROLE_AUTO_SCALER))
                 self.monitor_auto_scaler_process.start()
 
                 # Register replica number report channel
-                if platform.system() == "Windows":
-                    self.monitor_replica_num_process = multiprocessing.Process(
-                        target=perf_stats.report_device_realtime_stats_entry,
-                        args=(self.device_realtime_stats_event, ROLE_ENDPOINT_REPLICA_NUM, True,
-                              GeneralConstants.get_monitor_process_name(
-                                  ROLE_ENDPOINT_REPLICA_NUM_STR, perf_stats.run_id, perf_stats.edge_id)))
-                else:
-                    self.monitor_replica_num_process = fedml.get_process(
-                        target=perf_stats.report_device_realtime_stats_entry,
-                        args=(self.device_realtime_stats_event, ROLE_ENDPOINT_REPLICA_NUM, True,
-                              GeneralConstants.get_monitor_process_name(
-                                  ROLE_ENDPOINT_REPLICA_NUM_STR, perf_stats.run_id, perf_stats.edge_id)))
+                self.monitor_replica_num_process = multiprocessing.Process(
+                    target=perf_stats.report_device_realtime_stats_entry,
+                    args=(self.device_realtime_stats_event, ROLE_ENDPOINT_REPLICA_NUM))
                 self.monitor_replica_num_process.start()
 
                 # Register replica performance report channel
-                if platform.system() == "Windows":
-                    self.monitor_replica_perf_process = multiprocessing.Process(
-                        target=perf_stats.report_device_realtime_stats_entry,
-                        args=(self.device_realtime_stats_event, ROLE_ENDPOINT_REPLICA_PERF, True,
-                              GeneralConstants.get_monitor_process_name(
-                                  ROLE_ENDPOINT_REPLICA_PERF_STR, perf_stats.run_id, perf_stats.edge_id)))
-
-                else:
-                    self.monitor_replica_perf_process = fedml.get_process(
-                        target=perf_stats.report_device_realtime_stats_entry,
-                        args=(self.device_realtime_stats_event, ROLE_ENDPOINT_REPLICA_PERF, True,
-                              GeneralConstants.get_monitor_process_name(
-                                  ROLE_ENDPOINT_REPLICA_PERF_STR, perf_stats.run_id, perf_stats.edge_id)))
+                self.monitor_replica_perf_process = multiprocessing.Process(
+                    target=perf_stats.report_device_realtime_stats_entry,
+                    args=(self.device_realtime_stats_event, ROLE_ENDPOINT_REPLICA_PERF))
                 self.monitor_replica_perf_process.start()
             else:
-                if platform.system() == "Windows":
-                    self.monitor_run_master_process = multiprocessing.Process(
-                        target=perf_stats.report_device_realtime_stats_entry,
-                        args=(self.device_realtime_stats_event, ROLE_RUN_MASTER, False,
-                              GeneralConstants.get_monitor_process_name(
-                                  ROLE_RUN_MASTER_STR, perf_stats.run_id, perf_stats.edge_id)))
-                else:
-                    self.monitor_run_master_process = fedml.get_process(
-                        target=perf_stats.report_device_realtime_stats_entry,
-                        args=(self.device_realtime_stats_event, ROLE_RUN_MASTER, False,
-                              GeneralConstants.get_monitor_process_name(
-                                  ROLE_RUN_MASTER_STR, perf_stats.run_id, perf_stats.edge_id)))
+                self.monitor_run_master_process = multiprocessing.Process(
+                    target=perf_stats.report_device_realtime_stats_entry,
+                    args=(self.device_realtime_stats_event, ROLE_RUN_MASTER))
                 self.monitor_run_master_process.start()
 
-    def report_device_realtime_stats_entry(self, sys_event, role, is_client=False, process_name=None):
-        if process_name is not None:
-            setproctitle.setproctitle(process_name)
-
-        # print(f"Report device realtime stats, process id {os.getpid()}, name {process_name}")
+    def report_device_realtime_stats_entry(self, sys_event, role, is_client=False):
+        # print(f"Report device realtime stats, process id {os.getpid()}")
 
         self.device_realtime_stats_event = sys_event
         mqtt_mgr = MqttManager(
diff --git a/python/fedml/core/mlops/mlops_job_perfs.py b/python/fedml/core/mlops/mlops_job_perfs.py
index 429e32ff1d..fe3d921558 100644
--- a/python/fedml/core/mlops/mlops_job_perfs.py
+++ b/python/fedml/core/mlops/mlops_job_perfs.py
@@ -1,25 +1,19 @@
 import json
 import logging
 import os
-import platform
 import time
 import traceback
 import uuid
 
 import multiprocess as multiprocessing
 import psutil
-import setproctitle
 
-import fedml
 from .mlops_utils import MLOpsUtils
 from .system_stats import SysStats
-from ...computing.scheduler.scheduler_core.general_constants import GeneralConstants
 from ...core.distributed.communication.mqtt.mqtt_manager import MqttManager
 
 
 class MLOpsJobPerfStats(object):
-    JOB_PERF_PROCESS_TAG = "job_perf"
-
     def __init__(self):
         self.job_stats_process = None
         self.job_stats_event = None
@@ -144,26 +138,16 @@ def setup_job_stats_process(self, sys_args):
         self.job_stats_event.clear()
         perf_stats.job_stats_event = self.job_stats_event
         perf_stats.job_process_id_map = self.job_process_id_map
-        if platform.system() == "Windows":
-            self.job_stats_process = multiprocessing.Process(
-                target=perf_stats.report_job_stats_entry,
-                args=(self.job_stats_event, GeneralConstants.get_monitor_process_name(
-                    MLOpsJobPerfStats.JOB_PERF_PROCESS_TAG, perf_stats.run_id, perf_stats.edge_id)))
-        else:
-            self.job_stats_process = fedml.get_process(
-                target=perf_stats.report_job_stats_entry,
-                args=(self.job_stats_event, GeneralConstants.get_monitor_process_name(
-                    MLOpsJobPerfStats.JOB_PERF_PROCESS_TAG, perf_stats.run_id, perf_stats.edge_id)))
+
+        self.job_stats_process = multiprocessing.Process(target=perf_stats.report_job_stats_entry,
+                                                         args=(self.job_stats_event,))
         self.job_stats_process.start()
 
     def report_job_stats(self, sys_args):
         self.setup_job_stats_process(sys_args)
 
-    def report_job_stats_entry(self, sys_event, process_name):
-        if process_name is not None:
-            setproctitle.setproctitle(process_name)
-
-        # print(f"Report job realtime stats, process id {os.getpid()}, name {process_name}")
+    def report_job_stats_entry(self, sys_event):
+        # print(f"Report job realtime stats, process id {os.getpid()}")
 
         self.job_stats_event = sys_event
         mqtt_mgr = MqttManager(
diff --git a/python/fedml/core/mlops/mlops_runtime_log_daemon.py b/python/fedml/core/mlops/mlops_runtime_log_daemon.py
index bf136a36c9..ff06dc91b3 100644
--- a/python/fedml/core/mlops/mlops_runtime_log_daemon.py
+++ b/python/fedml/core/mlops/mlops_runtime_log_daemon.py
@@ -1,19 +1,16 @@
 import argparse
 import logging
 import os
-import platform
 import shutil
 import threading
 import time
 
 import multiprocess as multiprocessing
 import requests
-import setproctitle
 import yaml
 
 import fedml
 from fedml.computing.scheduler.comm_utils.run_process_utils import RunProcessUtils
-from fedml.computing.scheduler.scheduler_core.general_constants import GeneralConstants
 from fedml.core.mlops.mlops_utils import MLOpsLoggingUtils
 from ...core.mlops.mlops_configs import MLOpsConfigs
 
@@ -258,11 +255,8 @@ def should_ignore_log_line(log_line):
 
         return False
 
-    def log_process(self, process_event, process_name=None):
-        if process_name is not None:
-            setproctitle.setproctitle(process_name)
-
-        logging.info(f"Log uploading process id {os.getpid()}, run id {self.run_id}, name {process_name}, edge id {self.device_id}")
+    def log_process(self, process_event):
+        logging.info(f"Log uploading process id {os.getpid()}, run id {self.run_id}, edge id {self.device_id}")
         self.log_process_event = process_event
 
         only_push_artifact = False
@@ -424,8 +418,6 @@ def set_log_source(self, source):
         self.log_source = source
 
     def start_log_processor(self, log_run_id, log_device_id, log_source=None, log_file_prefix=None):
-        if log_run_id == "-1" or int(log_run_id) <= 0:
-            return
         log_processor = MLOpsRuntimeLogProcessor(self.args.using_mlops, log_run_id,
                                                  log_device_id, self.log_file_dir,
                                                  self.log_server_url,
@@ -439,13 +431,8 @@ def start_log_processor(self, log_run_id, log_device_id, log_source=None, log_fi
             self.log_process_event_map[event_map_id] = multiprocessing.Event()
         self.log_process_event_map[event_map_id].clear()
         log_processor.log_process_event = self.log_process_event_map[event_map_id]
-        process_name = GeneralConstants.get_log_process_name(log_run_id, log_device_id)
-        if platform.system() == "Windows":
-            log_child_process = multiprocessing.Process(
-                target=log_processor.log_process, args=(self.log_process_event_map[event_map_id], process_name))
-        else:
-            log_child_process = fedml.get_process(
-                target=log_processor.log_process, args=(self.log_process_event_map[event_map_id], process_name))
+        log_child_process = multiprocessing.Process(target=log_processor.log_process,
+                                                    args=(self.log_process_event_map[event_map_id],))
         # process = threading.Thread(target=log_processor.log_process)
         # process.start()
         if log_child_process is not None:
diff --git a/python/setup.py b/python/setup.py
index 262fc060c4..4757c10a17 100644
--- a/python/setup.py
+++ b/python/setup.py
@@ -20,7 +20,7 @@ def finalize_options(self):
 
 requirements = [
     'GPUtil',
-    'PyYAML==5.3.1',
+    'PyYAML',
     'aiohttp>=3.8.1',
     'attrdict',
     'attrs',
@@ -69,8 +69,7 @@ def finalize_options(self):
     'python-dotenv',
     'protobuf>=3.20.2,<4.0dev',
     'typer<0.10.0,>=0.3.0',
-    'fastapi-cli==0.0.1',
-    'setproctitle'
+    'fastapi-cli==0.0.1'
 ]
 
 requirements_extra_mpi = [
@@ -127,7 +126,7 @@ def finalize_options(self):
 
 setup(
     name="fedml",
-    version="0.8.51b1",
+    version="0.9.0",
     author="FedML Team",
     author_email="ch@fedml.ai",
     description="A research and production integrated edge-cloud library for "
diff --git a/python/tests/cross-silo/run_cross_silo.sh b/python/tests/cross-silo/run_cross_silo.sh
index 0beaaffc52..2ccdbff15b 100644
--- a/python/tests/cross-silo/run_cross_silo.sh
+++ b/python/tests/cross-silo/run_cross_silo.sh
@@ -1,10 +1,10 @@
 #!/bin/bash
 set -e
 WORKSPACE=$(pwd)
-# PROJECT_HOME=$WORKSPACE/../../
-# cd $PROJECT_HOME
+PROJECT_HOME=$WORKSPACE/../../
+cd $PROJECT_HOME
 
-cd examples/federate/cross_silo/mqtt_s3_fedavg_mnist_lr_example/custom_data_and_model
+cd examples/cross_silo/mqtt_s3_fedavg_mnist_lr_example/custom_data_and_model
 
 # run client(s)
 RUN_ID="$(python -c "import uuid; print(uuid.uuid4().hex)")"
diff --git a/python/tests/smoke_test/cli/build.sh b/python/tests/smoke_test/cli/build.sh
index de956692f1..98fdb05244 100644
--- a/python/tests/smoke_test/cli/build.sh
+++ b/python/tests/smoke_test/cli/build.sh
@@ -16,7 +16,7 @@
 #  --help                     Show this message and exit.
 
 # build client package
-cd ../../../examples/federate/cross_silo/mqtt_s3_fedavg_mnist_lr_example/one_line
+cd ../../../examples/cross_silo/mqtt_s3_fedavg_mnist_lr_example/one_line
 echo "$PWD"
 
 SOURCE=client
@@ -30,4 +30,4 @@ SOURCE=server
 ENTRY=torch_server.py
 CONFIG=config
 DEST=./mlops
-fedml build -t server -sf $SOURCE -ep $ENTRY -cf $CONFIG -df $DEST
+fedml build -t server -sf $SOURCE -ep $ENTRY -cf $CONFIG -df $DEST
\ No newline at end of file
diff --git a/python/tests/test_deploy/test_deploy.py b/python/tests/test_deploy/test_deploy.py
deleted file mode 100644
index d7243c68de..0000000000
--- a/python/tests/test_deploy/test_deploy.py
+++ /dev/null
@@ -1,39 +0,0 @@
-import os.path
-import time
-import fedml
-# Login
-API_KEY = os.getenv("API_KEY")
-fedml.set_env_version("test")
-fedml.set_local_on_premise_platform_port(18080)
-error_code, error_msg = fedml.api.fedml_login(api_key=API_KEY)
-if error_code != 0:
-    raise Exception("API Key is invalid!")
-
-# Yaml file
-cur_dir = os.path.dirname(__file__)
-fedml_dir = os.path.dirname(cur_dir)
-python_dir = os.path.dirname(fedml_dir)
-yaml_file = os.path.join(python_dir, "examples", "launch", "serve_job_mnist.yaml")
-
-# Launch job
-launch_result_dict = {}
-launch_result_status = {}
-
-launch_result = fedml.api.launch_job(yaml_file)
-print("Endpoint id is", launch_result.inner_id)
-
-cnt = 0
-while 1:
-    try:
-        r = fedml.api.get_endpoint(endpoint_id=launch_result.inner_id)
-    except Exception as e:
-        raise Exception(f"FAILED to get endpoint:{launch_result.inner_id}. {e}")
-    if r.status == "DEPLOYED":
-        print("Deployment has been successfully!")
-        break 
-    elif r.status == "FAILED":
-        raise Exception("FAILED to deploy.")
-    time.sleep(1)
-    cnt += 1
-    if cnt %3 ==0:
-        print('Deployment status is', r.status)
\ No newline at end of file
diff --git a/python/tests/test_federate/test_federate.sh b/python/tests/test_federate/test_federate.sh
deleted file mode 100644
index ebfcb60330..0000000000
--- a/python/tests/test_federate/test_federate.sh
+++ /dev/null
@@ -1,26 +0,0 @@
-
-WORKSPACE=`pwd`
-echo $WORKSPACE
-cd $WORKSPACE/examples/federate/quick_start/parrot
-python torch_fedavg_mnist_lr_one_line_example.py --cf fedml_config.yaml
-python torch_fedavg_mnist_lr_custum_data_and_model_example.py --cf fedml_config.yaml
-
-cd $WORKSPACE/examples/federate/simulation/sp_decentralized_mnist_lr_example
-python torch_fedavg_mnist_lr_step_by_step_example.py --cf fedml_config.yaml
-
-cd $WORKSPACE/examples/federate/simulation/sp_fednova_mnist_lr_example
-python torch_fednova_mnist_lr_step_by_step_example.py --cf fedml_config.yaml
-          
-cd $WORKSPACE/examples/federate/simulation/sp_fedopt_mnist_lr_example
-python torch_fedopt_mnist_lr_step_by_step_example.py --cf fedml_config.yaml
-
-cd $WORKSPACE/examples/federate/simulation/sp_hierarchicalfl_mnist_lr_example
-python torch_hierarchicalfl_mnist_lr_step_by_step_example.py --cf fedml_config.yaml
-
-
-cd $WORKSPACE/examples/federate/simulation/sp_turboaggregate_mnist_lr_example
-python torch_turboaggregate_mnist_lr_step_by_step_example.py --cf fedml_config.yaml 
-
-
-cd $WORKSPACE/examples/federate/simulation/sp_vertical_mnist_lr_example
-python torch_vertical_mnist_lr_step_by_step_example.py --cf fedml_config.yaml 
diff --git a/python/tests/test_launch/test_launch.py b/python/tests/test_launch/test_launch.py
deleted file mode 100644
index a6b6ffb9cf..0000000000
--- a/python/tests/test_launch/test_launch.py
+++ /dev/null
@@ -1,50 +0,0 @@
-import os.path
-import time
-import fedml
-from fedml.api.constants import RunStatus
-
-API_KEY = os.getenv("API_KEY")
-# Login
-fedml.set_env_version("test")
-fedml.set_local_on_premise_platform_port(18080)
-error_code, error_msg = fedml.api.fedml_login(api_key=API_KEY)
-if error_code != 0:
-    raise Exception("API Key is invalid!")
-
-# Yaml file
-cur_dir = os.path.dirname(__file__)
-fedml_dir = os.path.dirname(cur_dir)
-python_dir = os.path.dirname(fedml_dir)
-yaml_file = os.path.join(python_dir, "examples", "launch", "hello_job.yaml")
-
-# Launch job
-
-launch_result = fedml.api.launch_job(yaml_file)
-
-# launch_result = fedml.api.launch_job_on_cluster(yaml_file, "alex-cluster")
-if launch_result.result_code != 0:
-    raise Exception(f"Failed to launch job. Reason: {launch_result.result_message}")
-        
-# check job status
-while 1:
-    time.sleep(1)
-    # if 
-    #     if launch_result_status[run_id] == RunStatus.FINISHED:
-    #         continue
-    log_result = fedml.api.run_logs(launch_result.run_id, 1, 5)
-    if log_result is None or log_result.run_status is None:
-        raise Exception(f"Failed to get job status.")
-
-    print(f"run_id: {launch_result.run_id} run_status: {log_result.run_status}")
-    
-    if log_result.run_status in [RunStatus.ERROR, RunStatus.FAILED]:
-        log_result = fedml.api.run_logs(launch_result.run_id, 1, 100)
-        if log_result is None or log_result.run_status is None:
-            raise Exception(f"run_id:{launch_result.run_id} run_status:{log_result.run_status} and failed to get run logs.")
-
-        raise Exception(f"run_id:{launch_result.run_id} run_status:{log_result.run_status} run logs: {log_result.log_line_list}")
-    if log_result.run_status == RunStatus.FINISHED:
-        print(f"Job finished successfully.")
-        break
-        
-
diff --git a/python/tests/test_train/test_train.py b/python/tests/test_train/test_train.py
deleted file mode 100644
index 039d3b81d2..0000000000
--- a/python/tests/test_train/test_train.py
+++ /dev/null
@@ -1,49 +0,0 @@
-import os.path
-import time
-import fedml
-from fedml.api.constants import RunStatus
-
-API_KEY = os.getenv("API_KEY")
-# Login
-fedml.set_env_version("test")
-fedml.set_local_on_premise_platform_port(18080)
-error_code, error_msg = fedml.api.fedml_login(api_key=API_KEY)
-if error_code != 0:
-    raise Exception("API Key is invalid!")
-
-# Yaml file
-cur_dir = os.path.dirname(__file__)
-fedml_dir = os.path.dirname(cur_dir)
-python_dir = os.path.dirname(fedml_dir)
-yaml_file = os.path.join(python_dir, "examples", "train", "mnist_train", "train.yaml")
-
-# Launch job
-
-launch_result = fedml.api.launch_job(yaml_file)
-
-# launch_result = fedml.api.launch_job_on_cluster(yaml_file, "alex-cluster")
-if launch_result.result_code != 0:
-    raise Exception(f"Failed to launch job. Reason: {launch_result.result_message}")
-        
-# check job status
-while 1:
-    time.sleep(1)
-    # if 
-    #     if launch_result_status[run_id] == RunStatus.FINISHED:
-    #         continue
-    log_result = fedml.api.run_logs(launch_result.run_id, 1, 5)
-    if log_result is None or log_result.run_status is None:
-        raise Exception(f"Failed to get job status.")
-
-    print(f"run_id: {launch_result.run_id} run_status: {log_result.run_status}")
-    
-    if log_result.run_status in [RunStatus.ERROR, RunStatus.FAILED]:
-        log_result = fedml.api.run_logs(launch_result.run_id, 1, 100)
-        if log_result is None or log_result.run_status is None:
-            raise Exception(f"run_id:{launch_result.run_id} run_status:{log_result.run_status} and failed to get run logs.")
-
-        raise Exception(f"run_id:{launch_result.run_id} run_status:{log_result.run_status} run logs: {log_result.log_line_list}")
-    if log_result.run_status == RunStatus.FINISHED:
-        print(f"Job finished successfully.")
-        break
-        

From f5ad35b6b2cf15cf39a183e587e465bc8eba0439 Mon Sep 17 00:00:00 2001
From: Raphael Jin <kimheavy@yahoo.com>
Date: Mon, 8 Jul 2024 12:03:39 -0700
Subject: [PATCH 198/282] [Deploy] Fix round-robin algorithm; Format code.

---
 .../model_scheduler/device_model_cache.py     | 197 +++++++++---------
 1 file changed, 101 insertions(+), 96 deletions(-)

diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_cache.py b/python/fedml/computing/scheduler/model_scheduler/device_model_cache.py
index 292af4e659..7a47c1961e 100755
--- a/python/fedml/computing/scheduler/model_scheduler/device_model_cache.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_model_cache.py
@@ -250,7 +250,7 @@ def delete_deployment_result(self, element: str, end_point_id, end_point_name, m
 
         device_id, replica_no, _ = self.get_result_item_info(element)
         self.model_deployment_db.delete_deployment_result_with_device_id_and_rank(
-            end_point_id, end_point_name, model_name, device_id, replica_rank=replica_no-1)
+            end_point_id, end_point_name, model_name, device_id, replica_rank=replica_no - 1)
 
         return
 
@@ -286,8 +286,9 @@ def get_deployment_result_list(self, end_point_id, end_point_name, model_name):
             result_list = self.model_deployment_db.get_deployment_result_list(end_point_id, end_point_name, model_name)
             try:
                 for result in result_list:
-                    self.redis_connection.rpush(self.get_deployment_result_key(end_point_id, end_point_name, model_name),
-                                                json.dumps(result))
+                    self.redis_connection.rpush(
+                        self.get_deployment_result_key(end_point_id, end_point_name, model_name),
+                        json.dumps(result))
             except Exception as e:
                 logging.info(e)
                 pass
@@ -332,7 +333,8 @@ def get_deployment_result_list_size(self, end_point_id, end_point_name, model_na
 
     def get_deployment_status_list(self, end_point_id, end_point_name, model_name):
         try:
-            status_list = self.redis_connection.lrange(self.get_deployment_status_key(end_point_id, end_point_name, model_name), 0, -1)
+            status_list = self.redis_connection.lrange(
+                self.get_deployment_status_key(end_point_id, end_point_name, model_name), 0, -1)
         except Exception as e:
             status_list = None
 
@@ -340,8 +342,9 @@ def get_deployment_status_list(self, end_point_id, end_point_name, model_name):
             status_list = self.model_deployment_db.get_deployment_status_list(end_point_id, end_point_name, model_name)
             try:
                 for status in status_list:
-                    self.redis_connection.rpush(self.get_deployment_status_key(end_point_id, end_point_name, model_name),
-                                                json.dumps(status))
+                    self.redis_connection.rpush(
+                        self.get_deployment_status_key(end_point_id, end_point_name, model_name),
+                        json.dumps(status))
             except Exception as e:
                 pass
         return status_list
@@ -369,7 +372,7 @@ def get_result_item_info(result_item):
             result_item_json = json.loads(result_item_json)
 
         device_id = result_item_json["cache_device_id"]
-        replica_no = result_item_json.get("cache_replica_no", 1)    # Compatible with the old version
+        replica_no = result_item_json.get("cache_replica_no", 1)  # Compatible with the old version
 
         if isinstance(result_item_json["result"], str):
             result_payload = json.loads(result_item_json["result"])
@@ -400,7 +403,7 @@ def get_idle_device(self,
                     found_model_name == model_name and
                     (not limit_specific_model_version or found_model_version == model_version)):
                 if "model_status" in result_payload and result_payload["model_status"] == "DEPLOYED":
-                    idle_device_list.append({"device_id": device_id, "end_point_id": end_point_id})
+                    idle_device_list.append({"device_id": device_id, "result_payload": result_payload.copy()})
 
         logging.debug(f"{len(idle_device_list)} devices this model has on it: {idle_device_list}")
 
@@ -416,54 +419,21 @@ def get_idle_device(self,
 
         # Round Robin
         total_device_num = len(idle_device_list)
-        redis_round_robin_key = self.get_round_robin_prev_device(end_point_id, end_point_name, model_name, model_version)
-
-        selected_device_index = 0
-        try:
-            if self.redis_connection.exists(redis_round_robin_key):
-                selected_device_index = int(self.redis_connection.get(redis_round_robin_key))
-                selected_device_index %= total_device_num
-            else:
-                selected_device_index = 0
-            next_selected_device_index = (selected_device_index + 1) % total_device_num
-            self.redis_connection.set(redis_round_robin_key, str(next_selected_device_index))
-        except Exception as e:
-            logging.info("Inference Device selection Failed:")
-            logging.info(e)
-
-        logging.debug(f"Using Round Robin, the device index is {selected_device_index}")
-        idle_device_dict = idle_device_list[selected_device_index]
+        redis_round_robin_key = self.get_round_robin_prev_device(end_point_id, end_point_name, model_name,
+                                                                 model_version)
+        if self.redis_connection.exists(redis_round_robin_key):
+            selected_device_index = int(self.redis_connection.get(redis_round_robin_key))
+            selected_device_index %= total_device_num
+        else:
+            selected_device_index = 0
 
-        # Note that within the same endpoint_id, there could be one device with multiple same models
-        same_model_device_rank = 0
-        start = selected_device_index
-        while(start != 0 and idle_device_list[start]["device_id"] == idle_device_list[start-1]["device_id"]):
-            start -= 1
-            same_model_device_rank += 1
+        next_idx = (selected_device_index + 1) % total_device_num
+        self.redis_connection.set(redis_round_robin_key, next_idx)
 
-        # Find deployment result from the target idle device.
-        try:
-            for result_item in result_list:
-                logging.debug("enter the for loop")
-                device_id, _, result_payload = self.get_result_item_info(result_item)
-                found_end_point_id = result_payload["end_point_id"]
-                found_end_point_name = result_payload["end_point_name"]
-                found_model_status = result_payload["model_status"]
-
-                if found_model_status != "DEPLOYED":
-                    continue
-
-                if str(found_end_point_id) == str(idle_device_dict["end_point_id"]) \
-                        and device_id == idle_device_dict["device_id"]:
-                    if same_model_device_rank > 0:
-                        same_model_device_rank -= 1
-                        continue
-                    logging.debug(f"The chosen device is {device_id}")
-                    return result_payload, device_id
-        except Exception as e:
-            logging.info(str(e))
+        idle_device_info = idle_device_list[selected_device_index]
+        payload = idle_device_info["result_payload"]
 
-        return None, None
+        return payload, idle_device_info["device_id"]
 
     def get_latest_version(self, status_list):
         latest_version = None
@@ -472,8 +442,8 @@ def get_latest_version(self, status_list):
             try:
                 _, status_payload = self.get_status_item_info(status_item)
                 model_version = status_payload["model_version"]
-                prefix = model_version.split("-")[0]    # version-date
-                prefix_int = int(prefix[1:])    # v12 -> 12
+                prefix = model_version.split("-")[0]  # version-date
+                prefix_int = int(prefix[1:])  # v12 -> 12
 
                 if latest_version is None:
                     latest_version = model_version
@@ -568,32 +538,47 @@ def delete_end_point(self, end_point_id, end_point_name, model_name, model_versi
         # Device id is either deploy master or deploy worker
         try:
             logging.info("Will Delete the related redis keys permanently")
-            self.redis_connection.expire(self.get_deployment_result_key(end_point_id, end_point_name, model_name), ServerConstants.MODEL_CACHE_KEY_EXPIRE_TIME)
-            self.redis_connection.expire(self.get_deployment_status_key(end_point_id, end_point_name, model_name), ServerConstants.MODEL_CACHE_KEY_EXPIRE_TIME)
-            self.redis_connection.expire(self.get_monitor_metrics_key(end_point_id, end_point_name, model_name, model_version), ServerConstants.MODEL_CACHE_KEY_EXPIRE_TIME)
-            self.redis_connection.expire(self.get_deployment_token_key(end_point_id, end_point_name, model_name), ServerConstants.MODEL_CACHE_KEY_EXPIRE_TIME)
-
-            any_version_round_robin_key = self.get_round_robin_prev_device_any_version(end_point_id, end_point_name, model_name)
+            self.redis_connection.expire(self.get_deployment_result_key(end_point_id, end_point_name, model_name),
+                                         ServerConstants.MODEL_CACHE_KEY_EXPIRE_TIME)
+            self.redis_connection.expire(self.get_deployment_status_key(end_point_id, end_point_name, model_name),
+                                         ServerConstants.MODEL_CACHE_KEY_EXPIRE_TIME)
+            self.redis_connection.expire(
+                self.get_monitor_metrics_key(end_point_id, end_point_name, model_name, model_version),
+                ServerConstants.MODEL_CACHE_KEY_EXPIRE_TIME)
+            self.redis_connection.expire(self.get_deployment_token_key(end_point_id, end_point_name, model_name),
+                                         ServerConstants.MODEL_CACHE_KEY_EXPIRE_TIME)
+
+            any_version_round_robin_key = self.get_round_robin_prev_device_any_version(end_point_id, end_point_name,
+                                                                                       model_name)
             for key in self.redis_connection.scan_iter(any_version_round_robin_key + "*"):
                 self.redis_connection.expire(key, ServerConstants.MODEL_CACHE_KEY_EXPIRE_TIME)
 
-            self.redis_connection.expire(self.get_deployment_device_info_key(end_point_id), ServerConstants.MODEL_CACHE_KEY_EXPIRE_TIME)
-            self.redis_connection.expire(self.get_end_point_activation_key(end_point_id), ServerConstants.MODEL_CACHE_KEY_EXPIRE_TIME)
-            self.redis_connection.expire(self.get_end_point_status_key(end_point_id), ServerConstants.MODEL_CACHE_KEY_EXPIRE_TIME)
-            self.redis_connection.expire(self.get_user_setting_replica_num_key(end_point_id), ServerConstants.MODEL_CACHE_KEY_EXPIRE_TIME)
+            self.redis_connection.expire(self.get_deployment_device_info_key(end_point_id),
+                                         ServerConstants.MODEL_CACHE_KEY_EXPIRE_TIME)
+            self.redis_connection.expire(self.get_end_point_activation_key(end_point_id),
+                                         ServerConstants.MODEL_CACHE_KEY_EXPIRE_TIME)
+            self.redis_connection.expire(self.get_end_point_status_key(end_point_id),
+                                         ServerConstants.MODEL_CACHE_KEY_EXPIRE_TIME)
+            self.redis_connection.expire(self.get_user_setting_replica_num_key(end_point_id),
+                                         ServerConstants.MODEL_CACHE_KEY_EXPIRE_TIME)
 
             # Delete all replicas gpu ids
-            matched_prefix_replica = self.get_replica_gpu_ids_key_all_replicas(end_point_id, end_point_name, model_name, device_id)
+            matched_prefix_replica = self.get_replica_gpu_ids_key_all_replicas(end_point_id, end_point_name, model_name,
+                                                                               device_id)
             for key in self.redis_connection.scan_iter(matched_prefix_replica + "*"):
                 self.redis_connection.expire(key, ServerConstants.MODEL_CACHE_KEY_EXPIRE_TIME)
 
                 logging.info(f"Those keys are deleted: {key}")
 
             # Delete the compute gpu cache
-            self.redis_connection.expire(ComputeGpuCache.get_run_total_num_gpus_key(end_point_id), ServerConstants.MODEL_CACHE_KEY_EXPIRE_TIME)
-            self.redis_connection.expire(ComputeGpuCache.get_run_total_num_gpus_key(end_point_id), ServerConstants.MODEL_CACHE_KEY_EXPIRE_TIME)
-            self.redis_connection.expire(ComputeGpuCache.get_run_device_ids_key(end_point_id), ServerConstants.MODEL_CACHE_KEY_EXPIRE_TIME)
-            self.redis_connection.expire(ComputeGpuCache.get_edge_model_id_map_key(end_point_id), ServerConstants.MODEL_CACHE_KEY_EXPIRE_TIME)
+            self.redis_connection.expire(ComputeGpuCache.get_run_total_num_gpus_key(end_point_id),
+                                         ServerConstants.MODEL_CACHE_KEY_EXPIRE_TIME)
+            self.redis_connection.expire(ComputeGpuCache.get_run_total_num_gpus_key(end_point_id),
+                                         ServerConstants.MODEL_CACHE_KEY_EXPIRE_TIME)
+            self.redis_connection.expire(ComputeGpuCache.get_run_device_ids_key(end_point_id),
+                                         ServerConstants.MODEL_CACHE_KEY_EXPIRE_TIME)
+            self.redis_connection.expire(ComputeGpuCache.get_edge_model_id_map_key(end_point_id),
+                                         ServerConstants.MODEL_CACHE_KEY_EXPIRE_TIME)
 
             logging.info(f"Those keys are deleted:"
                          f"{ComputeGpuCache.get_endpoint_run_id_map_key(end_point_id)}, "
@@ -703,7 +688,8 @@ def get_end_point_token(self, end_point_id, end_point_name, model_name):
         token = None
         try:
             if self.redis_connection.exists(self.get_deployment_token_key(end_point_id, end_point_name, model_name)):
-                token = self.redis_connection.get(self.get_deployment_token_key(end_point_id, end_point_name, model_name))
+                token = self.redis_connection.get(
+                    self.get_deployment_token_key(end_point_id, end_point_name, model_name))
         except Exception as e:
             token = None
 
@@ -711,7 +697,8 @@ def get_end_point_token(self, end_point_id, end_point_name, model_name):
             token = self.model_deployment_db.get_end_point_token(end_point_id, end_point_name, model_name)
             if token is not None:
                 try:
-                    self.redis_connection.set(self.get_deployment_token_key(end_point_id, end_point_name, model_name), token)
+                    self.redis_connection.set(self.get_deployment_token_key(end_point_id, end_point_name, model_name),
+                                              token)
                 except Exception as e:
                     pass
 
@@ -743,32 +730,41 @@ def get_endpoint_devices_replica_num(self, end_point_id):
 
         return replica_num
 
-    def get_deployment_result_key(self, end_point_id, end_point_name, model_name):
-        return "{}-{}-{}-{}".format(FedMLModelCache.FEDML_MODEL_DEPLOYMENT_RESULT_TAG, end_point_id, end_point_name, model_name)
+    @staticmethod
+    def get_deployment_result_key(end_point_id, end_point_name, model_name):
+        return "{}-{}-{}-{}".format(FedMLModelCache.FEDML_MODEL_DEPLOYMENT_RESULT_TAG, end_point_id, end_point_name,
+                                    model_name)
 
-    def get_deployment_status_key(self, end_point_id, end_point_name, model_name):
-        return "{}-{}-{}-{}".format(FedMLModelCache.FEDML_MODEL_DEPLOYMENT_STATUS_TAG, end_point_id, end_point_name, model_name)
+    @staticmethod
+    def get_deployment_status_key(end_point_id, end_point_name, model_name):
+        return "{}-{}-{}-{}".format(FedMLModelCache.FEDML_MODEL_DEPLOYMENT_STATUS_TAG, end_point_id, end_point_name,
+                                    model_name)
 
-    def get_end_point_status_key(self, end_point_id):
+    @staticmethod
+    def get_end_point_status_key(end_point_id):
         return "{}{}".format(FedMLModelCache.FEDML_MODEL_END_POINT_STATUS_TAG, end_point_id)
 
     @staticmethod
     def get_end_point_activation_key(end_point_id):
         return "{}{}".format(FedMLModelCache.FEDML_MODEL_END_POINT_ACTIVATION_TAG, end_point_id)
 
-    def get_deployment_device_info_key(self, end_point_id):
+    @staticmethod
+    def get_deployment_device_info_key(end_point_id):
         return "{}{}".format(FedMLModelCache.FEDML_MODEL_DEVICE_INFO_TAG, end_point_id)
 
     @staticmethod
     def get_deployment_token_key(end_point_id, end_point_name, model_name):
-        return "{}-{}-{}-{}".format(FedMLModelCache.FEDML_MODEL_END_POINT_TOKEN_TAG, end_point_id, end_point_name, model_name)
+        return "{}-{}-{}-{}".format(FedMLModelCache.FEDML_MODEL_END_POINT_TOKEN_TAG, end_point_id, end_point_name,
+                                    model_name)
 
     @staticmethod
     def get_deployment_token_key_eid(end_point_id):
         return "{}-{}".format(FedMLModelCache.FEDML_MODEL_END_POINT_TOKEN_TAG, end_point_id)
 
-    def get_round_robin_prev_device(self, end_point_id, end_point_name, model_name, version):
-        return "{}-{}-{}-{}-{}".format(FedMLModelCache.FEDML_MODEL_ROUND_ROBIN_PREVIOUS_DEVICE_TAG, end_point_id, end_point_name, model_name, version)
+    @staticmethod
+    def get_round_robin_prev_device(end_point_id, end_point_name, model_name, version):
+        return "{}-{}-{}-{}-{}".format(FedMLModelCache.FEDML_MODEL_ROUND_ROBIN_PREVIOUS_DEVICE_TAG, end_point_id,
+                                       end_point_name, model_name, version)
 
     @staticmethod
     def get_round_robin_prev_device_any_version(endpoint_id, endpoint_name, model_name):
@@ -797,8 +793,9 @@ def set_monitor_metrics(self, end_point_id, end_point_name,
                         "total_request_num": total_request_num, "current_qps": current_qps,
                         "avg_qps": avg_qps, "timestamp": timestamp, "device_id": device_id}
         try:
-            self.redis_connection.rpush(self.get_monitor_metrics_key(end_point_id, end_point_name, model_name, model_version),
-                                        json.dumps(metrics_dict))
+            self.redis_connection.rpush(
+                self.get_monitor_metrics_key(end_point_id, end_point_name, model_name, model_version),
+                json.dumps(metrics_dict))
         except Exception as e:
             pass
         self.model_deployment_db.set_monitor_metrics(end_point_id, end_point_name,
@@ -809,16 +806,20 @@ def set_monitor_metrics(self, end_point_id, end_point_name,
 
     def get_latest_monitor_metrics(self, end_point_id, end_point_name, model_name, model_version):
         try:
-            if self.redis_connection.exists(self.get_monitor_metrics_key(end_point_id, end_point_name, model_name, model_version)):
-                return self.redis_connection.lindex(self.get_monitor_metrics_key(end_point_id, end_point_name, model_name, model_version), -1)
+            if self.redis_connection.exists(
+                    self.get_monitor_metrics_key(end_point_id, end_point_name, model_name, model_version)):
+                return self.redis_connection.lindex(
+                    self.get_monitor_metrics_key(end_point_id, end_point_name, model_name, model_version), -1)
         except Exception as e:
             pass
 
-        metrics_dict = self.model_deployment_db.get_latest_monitor_metrics(end_point_id, end_point_name, model_name, model_version)
+        metrics_dict = self.model_deployment_db.get_latest_monitor_metrics(end_point_id, end_point_name, model_name,
+                                                                           model_version)
         if metrics_dict is not None:
             try:
-                self.redis_connection.rpush(self.get_monitor_metrics_key(end_point_id, end_point_name, model_name, model_version),
-                                            metrics_dict)
+                self.redis_connection.rpush(
+                    self.get_monitor_metrics_key(end_point_id, end_point_name, model_name, model_version),
+                    metrics_dict)
             except Exception as e:
                 pass
 
@@ -826,21 +827,25 @@ def get_latest_monitor_metrics(self, end_point_id, end_point_name, model_name, m
 
     def get_monitor_metrics_item(self, end_point_id, end_point_name, model_name, model_version, index):
         try:
-            if self.redis_connection.exists(self.get_monitor_metrics_key(end_point_id, end_point_name, model_name, model_version)):
-                metrics_item = self.redis_connection.lindex(self.get_monitor_metrics_key(end_point_id, end_point_name, model_name,
-                                                                                     model_version), index)
-                return metrics_item, index+1
+            if self.redis_connection.exists(
+                    self.get_monitor_metrics_key(end_point_id, end_point_name, model_name, model_version)):
+                metrics_item = self.redis_connection.lindex(
+                    self.get_monitor_metrics_key(end_point_id, end_point_name, model_name,
+                                                 model_version), index)
+                return metrics_item, index + 1
         except Exception as e:
             pass
 
-        metrics_dict = self.model_deployment_db.get_monitor_metrics_item(end_point_id, end_point_name, model_name, model_version, index)
+        metrics_dict = self.model_deployment_db.get_monitor_metrics_item(end_point_id, end_point_name, model_name,
+                                                                         model_version, index)
         if metrics_dict is not None:
             try:
-                self.redis_connection.rpush(self.get_monitor_metrics_key(end_point_id, end_point_name, model_name, model_version),
-                                            metrics_dict)
+                self.redis_connection.rpush(
+                    self.get_monitor_metrics_key(end_point_id, end_point_name, model_name, model_version),
+                    metrics_dict)
             except Exception as e:
                 pass
-            return metrics_dict, index+1
+            return metrics_dict, index + 1
 
         return None, 0
 

From c8e575503be9265b3abd49a09871eb9aef22376e Mon Sep 17 00:00:00 2001
From: Raphael Jin <kimheavy@yahoo.com>
Date: Mon, 22 Jul 2024 15:51:14 -0700
Subject: [PATCH 199/282] [Deploy] Use terminology expose_subdomains.

---
 .../examples/deploy/custom_inference_image/template.yaml  | 2 +-
 .../custom_inference_image/tensorrt_llm/tensorrtllm.yaml  | 2 +-
 .../template/custom_inference_image.yaml                  | 2 +-
 .../scheduler/model_scheduler/device_client_constants.py  | 2 +-
 .../scheduler/model_scheduler/device_model_deployment.py  | 8 ++++----
 .../scheduler/model_scheduler/device_server_constants.py  | 2 +-
 .../scheduler/model_scheduler/master_job_runner.py        | 2 +-
 7 files changed, 10 insertions(+), 10 deletions(-)

diff --git a/python/examples/deploy/custom_inference_image/template.yaml b/python/examples/deploy/custom_inference_image/template.yaml
index 10e6580bcf..1dd13e1530 100644
--- a/python/examples/deploy/custom_inference_image/template.yaml
+++ b/python/examples/deploy/custom_inference_image/template.yaml
@@ -1,6 +1,6 @@
 # Required
 workspace: "./"                     # We will pacakge all the files in the workspace directory
-enable_serverless_container: true   # Identify whether to use serverless container
+expose_subdomains: true             # For customized image, if you want to route all the subdomains, set to true. e.g. localhost:2345/{all-subdomain}
 inference_image_name: ""            # Container image name
 container_run_command: ""           # str or list, similar to CMD in the dockerfile
 port: 80                            # Service port, currently you can only indicate one arbitrary port
diff --git a/python/examples/deploy/custom_inference_image/tensorrt_llm/tensorrtllm.yaml b/python/examples/deploy/custom_inference_image/tensorrt_llm/tensorrtllm.yaml
index d41dba7983..a72c1f7753 100644
--- a/python/examples/deploy/custom_inference_image/tensorrt_llm/tensorrtllm.yaml
+++ b/python/examples/deploy/custom_inference_image/tensorrt_llm/tensorrtllm.yaml
@@ -1,6 +1,6 @@
 workspace: "./"
 
-enable_serverless_container: true
+expose_subdomains: true
 inference_image_name: "fedml/llama3-8b-tensorrtllm"
 
 # If you put the model repository in $workspace/model_repository, it will be mounted to /home/fedml/models_serving/model_repository
diff --git a/python/examples/deploy/custom_inference_image/triton_inference_server/template/custom_inference_image.yaml b/python/examples/deploy/custom_inference_image/triton_inference_server/template/custom_inference_image.yaml
index eb02e3904a..11ae9f82ff 100644
--- a/python/examples/deploy/custom_inference_image/triton_inference_server/template/custom_inference_image.yaml
+++ b/python/examples/deploy/custom_inference_image/triton_inference_server/template/custom_inference_image.yaml
@@ -1,6 +1,6 @@
 workspace: "./"
 
-enable_serverless_container: true
+expose_subdomains: true
 inference_image_name: "nvcr.io/nvidia/tritonserver:24.05-py3"
 
 volumes:
diff --git a/python/fedml/computing/scheduler/model_scheduler/device_client_constants.py b/python/fedml/computing/scheduler/model_scheduler/device_client_constants.py
index 4006e50726..3bb2e12aed 100644
--- a/python/fedml/computing/scheduler/model_scheduler/device_client_constants.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_client_constants.py
@@ -159,7 +159,7 @@ class ClientConstants(object):
     DEPLOY_TIMEOUT_SEC_KEY = "deploy_timeout_sec"
     DEPLOY_TIMEOUT_SEC_DEFAULT = 600
 
-    ENABLE_SERVERLESS_CONTAINER_KEY = "enable_serverless_container"
+    EXPOSE_SUBDOMAINS_KEY = "expose_subdomains"
 
     CUSTOMIZED_VOLUMES_MOUNT_KEY = "volumes"
     CUSTOMIZED_VOLUMES_PATH_FROM_WORKSPACE_KEY = "workspace_path"
diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py b/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py
index 665bb4082e..6a637653fc 100755
--- a/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py
@@ -88,7 +88,7 @@ def start_deployment(end_point_id, end_point_name, model_id, model_version,
             registry_user_name, registry_user_password = parse_image_registry_related_config(config)
 
         # Service app related
-        dst_bootstrap_dir, dst_model_serving_dir, relative_entry_fedml_format, enable_serverless_container, \
+        dst_bootstrap_dir, dst_model_serving_dir, relative_entry_fedml_format, expose_subdomains, \
             customized_image_entry_cmd, customized_readiness_check, customized_liveliness_check, customized_uri = \
             handle_container_service_app(config, model_storage_local_path)
 
@@ -255,7 +255,7 @@ def start_deployment(end_point_id, end_point_name, model_id, model_version,
     model_metadata = ret_model_metadata
     model_metadata["liveliness_check"] = customized_liveliness_check
     model_metadata["readiness_check"] = customized_readiness_check
-    model_metadata[ClientConstants.ENABLE_SERVERLESS_CONTAINER_KEY] = enable_serverless_container
+    model_metadata[ClientConstants.EXPOSE_SUBDOMAINS_KEY] = expose_subdomains
     logging.info(f"[Worker][Replica{replica_rank}] Model deployment is successful with inference_output_url: "
                  f"{inference_output_url}, model_metadata: {model_metadata}, model_config: {ret_model_config}")
 
@@ -616,13 +616,13 @@ def handle_container_service_app(config, model_storage_local_path):
     relative_entry_fedml_format = config.get('entry_point', "")
 
     # User indicate either fedml format python main entry filename or entry command
-    enable_serverless_container = config.get(ClientConstants.ENABLE_SERVERLESS_CONTAINER_KEY, False)
+    expose_subdomains = config.get(ClientConstants.EXPOSE_SUBDOMAINS_KEY, False)
     customized_image_entry_cmd = config.get('container_run_command', None)  # Could be str or list
     customized_readiness_check = config.get('readiness_probe', ClientConstants.READINESS_PROBE_DEFAULT)
     customized_liveliness_check = config.get('liveness_probe', ClientConstants.LIVENESS_PROBE_DEFAULT)
     customized_uri = config.get(ClientConstants.CUSTOMIZED_SERVICE_KEY, "")
 
-    return (dst_bootstrap_dir, dst_model_serving_dir, relative_entry_fedml_format, enable_serverless_container,
+    return (dst_bootstrap_dir, dst_model_serving_dir, relative_entry_fedml_format, expose_subdomains,
             customized_image_entry_cmd, customized_readiness_check, customized_liveliness_check, customized_uri)
 
 
diff --git a/python/fedml/computing/scheduler/model_scheduler/device_server_constants.py b/python/fedml/computing/scheduler/model_scheduler/device_server_constants.py
index 00f0fe73bf..44eaeb9371 100644
--- a/python/fedml/computing/scheduler/model_scheduler/device_server_constants.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_server_constants.py
@@ -151,7 +151,7 @@ class ServerConstants(object):
     DEVICE_DIFF_REPLACE_OPERATION = "op: replace"
 
     # Worker comfig yaml related
-    ENABLE_SERVERLESS_CONTAINER_KEY = "enable_serverless_container"
+    EXPOSE_SUBDOMAINS_KEY = "expose_subdomains"
 
     @staticmethod
     def get_fedml_home_dir():
diff --git a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
index ab6bc4c895..00b08acfb8 100755
--- a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
+++ b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
@@ -763,7 +763,7 @@ def construct_final_gateway_url(self, end_point_id):
         if self.deployed_replica_payload is not None:
             payload_json = self.deployed_replica_payload
             enable_custom_path = payload_json["model_metadata"].get(
-                ServerConstants.ENABLE_SERVERLESS_CONTAINER_KEY, False)
+                ServerConstants.EXPOSE_SUBDOMAINS_KEY, False)
             if enable_custom_path:
                 identifier = "custom_inference"
 

From 281f8c0a2f573b77986f872c09266b7b5c33300b Mon Sep 17 00:00:00 2001
From: Alay Shah <alay11shah@gmail.com>
Date: Thu, 1 Aug 2024 13:04:27 -0700
Subject: [PATCH 200/282] Add marketplace_type, price_per_hour as optional
 login parameters

---
 python/fedml/api/__init__.py                  | 15 ++++++-----
 python/fedml/api/constants.py                 | 17 ++++++++++++-
 python/fedml/api/modules/device.py            | 17 +++++++------
 python/fedml/cli/modules/login.py             | 25 +++++++++++++++++--
 .../scheduler_core/account_manager.py         | 16 ++++++++----
 .../scheduler/slave/base_slave_agent.py       |  6 +++--
 .../scheduler/slave/client_daemon.py          | 10 +++++++-
 .../computing/scheduler/slave/client_login.py |  8 ++++--
 8 files changed, 88 insertions(+), 26 deletions(-)

diff --git a/python/fedml/api/__init__.py b/python/fedml/api/__init__.py
index b03c72b675..b1c56403f1 100755
--- a/python/fedml/api/__init__.py
+++ b/python/fedml/api/__init__.py
@@ -15,7 +15,7 @@
 """
 from typing import List, Tuple
 
-from fedml.api.constants import RunStatus
+from fedml.api.constants import RunStatus, MarketplaceType
 from fedml.api.fedml_response import FedMLResponse
 from fedml.api.modules import launch, utils, build, device, logs, diagnosis, cluster, run, train, federate, storage, \
     model as model_module  # Since "model" has conflict with one of the input parameters, we need to rename it
@@ -214,9 +214,10 @@ def fedml_build(platform, type, source_folder, entry_point, config_folder, dest_
 def login(api_key, computing, server, supplier,
           master_inference_gateway_port: int = ServerConstants.MODEL_INFERENCE_DEFAULT_PORT,
           worker_inference_proxy_port: int = ClientConstants.LOCAL_CLIENT_API_PORT,
-          worker_connection_type: str = ClientConstants.WORKER_CONNECTIVITY_TYPE_DEFAULT):
+          worker_connection_type: str = ClientConstants.WORKER_CONNECTIVITY_TYPE_DEFAULT,
+          marketplace_type: str = MarketplaceType.SECURE.value, price_per_hour: float = 0.0):
     device_bind(api_key, computing, server, supplier, master_inference_gateway_port, worker_inference_proxy_port,
-                worker_connection_type)
+                worker_connection_type, marketplace_type, price_per_hour)
 
 
 def logout(computing, server):
@@ -224,9 +225,11 @@ def logout(computing, server):
 
 
 def device_bind(api_key, computing, server, supplier, master_inference_gateway_port, worker_inference_proxy_port,
-                worker_connection_type):
-    device.bind(api_key, computing, server, supplier, master_inference_gateway_port, worker_inference_proxy_port,
-                worker_connection_type)
+                worker_connection_type, marketplace_type, price_per_hour):
+    device.bind(api_key=api_key, computing=computing, server=server, supplier=supplier,
+                master_inference_gateway_port=master_inference_gateway_port,
+                worker_inference_proxy_port=worker_inference_proxy_port, worker_connection_type=worker_connection_type,
+                marketplace_type=marketplace_type, price_per_hour=price_per_hour)
 
 
 def device_unbind(computing, server):
diff --git a/python/fedml/api/constants.py b/python/fedml/api/constants.py
index e804775f74..d4c59898a8 100755
--- a/python/fedml/api/constants.py
+++ b/python/fedml/api/constants.py
@@ -1,6 +1,22 @@
 from enum import Enum, unique
 
 
+class MarketplaceType(Enum):
+    SECURE = 1
+    COMMUNITY = 2
+
+    def __str__(self):
+        return self.name
+
+    @classmethod
+    def from_str(cls, name: str):
+        """Get the enum member from a string."""
+        if name.upper() in cls.__members__:
+            return cls[name.upper()]
+        else:
+            raise ValueError(f"Invalid marketplace type: {name}")
+
+
 class ApiConstants:
     RESOURCE_MATCHED_STATUS_MATCHED = "MATCHED"
     RESOURCE_MATCHED_STATUS_JOB_URL_ERROR = "ERROR_JOB_URL"
@@ -106,4 +122,3 @@ def get_run_enum_from_str(cls, run_status_str: str):
             if run_status.value == run_status_str:
                 return run_status
         return cls.UNDETERMINED
-
diff --git a/python/fedml/api/modules/device.py b/python/fedml/api/modules/device.py
index 7c4e52c8b5..4a932ac631 100644
--- a/python/fedml/api/modules/device.py
+++ b/python/fedml/api/modules/device.py
@@ -19,11 +19,10 @@
 
 
 def bind(
-        api_key, computing, server, supplier,
+        api_key, computing, server, supplier, marketplace_type, price_per_hour,
         master_inference_gateway_port=DeviceServerConstants.MODEL_INFERENCE_DEFAULT_PORT,
         worker_inference_proxy_port=DeviceClientConstants.LOCAL_CLIENT_API_PORT,
-        worker_connection_type=DeviceClientConstants.WORKER_CONNECTIVITY_TYPE_DEFAULT
-):
+        worker_connection_type=DeviceClientConstants.WORKER_CONNECTIVITY_TYPE_DEFAULT):
     userid = api_key
     runner_cmd = "{}"
     device_id = "0"
@@ -48,13 +47,13 @@ def bind(
     _bind(
         userid, computing, server,
         api_key, role, runner_cmd, device_id, os_name,
-        docker, master_inference_gateway_port, worker_inference_proxy_port, worker_connection_type)
+        docker, master_inference_gateway_port, worker_inference_proxy_port, worker_connection_type, marketplace_type,
+        price_per_hour)
 
 
 def _bind(
-        userid, computing, server,
-        api_key, role, runner_cmd, device_id, os_name,
-        docker, master_inference_gateway_port, worker_inference_proxy_port, worker_connection_type):
+        userid, computing, server, api_key, role, runner_cmd, device_id, os_name, docker, master_inference_gateway_port,
+        worker_inference_proxy_port, worker_connection_type, marketplace_type, price_per_hour):
     fedml.load_env()
     if os.getenv(ModuleConstants.ENV_FEDML_INFER_HOST) is None:
         fedml.set_env_kv(ModuleConstants.ENV_FEDML_INFER_HOST, SchedulerConstants.REDIS_INFER_HOST)
@@ -179,6 +178,10 @@ def _bind(
                 user_api_key,
                 "-ngc",
                 "1"
+                "-mpt",
+                marketplace_type,
+                "-pph",
+                price_per_hour
             ]
         ).pid
         sys_utils.save_login_process(ClientConstants.LOCAL_HOME_RUNNER_DIR_NAME,
diff --git a/python/fedml/cli/modules/login.py b/python/fedml/cli/modules/login.py
index 7ec4191a3e..2692a89177 100644
--- a/python/fedml/cli/modules/login.py
+++ b/python/fedml/cli/modules/login.py
@@ -1,8 +1,10 @@
 import os
+from enum import Enum
 
 import click
 
 import fedml.api
+from fedml.api import MarketplaceType
 from fedml.api.modules.utils import authenticate
 from fedml.computing.scheduler.model_scheduler.device_server_constants import ServerConstants
 from fedml.computing.scheduler.model_scheduler.device_client_constants import ClientConstants
@@ -74,10 +76,29 @@
     default=ClientConstants.WORKER_CONNECTIVITY_TYPE_DEFAULT,
     help="The connection type for worker inference proxy.",
 )
+@click.option(
+    "--marketplace_type",
+    "-mpt",
+    type=click.Choice([marketplace_type for marketplace_type in MarketplaceType.__members__]),
+    default=MarketplaceType.SECURE.name,
+    help="Specify the marketplace type: 'SECURE' for Secure Cloud or 'COMMUNITY' for Community Cloud. "
+         "Defaults to Secure Cloud.",
+)
+@click.option(
+    "--price_per_hour",
+    "-pph",
+    type=click.FLOAT,
+    default=0.0,
+    help="Enter the price per GPU per hour as a floating-point number. For example, if the cost of using an H100 node "
+         "for one hour is $1.5 per GPU, then you would input 1.5. Do not multiply this number by the total number of "
+         "GPUs in the node, as the system will automatically detect the number of GPUs and include it in the cost "
+         "calculation. Default is 0.0."
+         "Optionally, you can also set this price later through supplier page on the FEDML® Nexus AI Platform."
+)
 def fedml_login(
         api_key, version, compute_node, server, provider, deploy_worker_num,
         local_on_premise_platform, local_on_premise_platform_port,
-        master_inference_gateway_port, worker_inference_proxy_port, worker_connection_type
+        master_inference_gateway_port, worker_inference_proxy_port, worker_connection_type, marketplace_type, cost
 ):
     fedml.set_env_version(version)
     fedml.set_local_on_premise_platform_host(local_on_premise_platform)
@@ -92,4 +113,4 @@ def fedml_login(
         pass
     os.environ["FEDML_MODEL_WORKER_NUM"] = str(deploy_worker_num)
     fedml.api.login(api_key, compute_node, server, provider, master_inference_gateway_port,
-                    worker_inference_proxy_port, worker_connection_type)
+                    worker_inference_proxy_port, worker_connection_type, marketplace_type, cost)
diff --git a/python/fedml/computing/scheduler/scheduler_core/account_manager.py b/python/fedml/computing/scheduler/scheduler_core/account_manager.py
index 3b80511d12..3b3f1efc75 100755
--- a/python/fedml/computing/scheduler/scheduler_core/account_manager.py
+++ b/python/fedml/computing/scheduler/scheduler_core/account_manager.py
@@ -48,7 +48,8 @@ def __init__(self):
     def get_instance():
         return FedMLAccountManager()
 
-    def login(self, user_id, api_key="", device_id=None, os_name=None, role=None, runner_cmd=None):
+    def login(self, user_id, api_key="", device_id=None, os_name=None, role=None, runner_cmd=None, marketplace_type=None,
+              price_per_hour=None):
         # Build the agent args
         self.build_agent_args(
             user_id, api_key=api_key, device_id=device_id, os_name=os_name, role=role, runner_cmd=runner_cmd
@@ -93,9 +94,9 @@ def login(self, user_id, api_key="", device_id=None, os_name=None, role=None, ru
             # noinspection PyBroadException
             try:
                 edge_id, user_name, extra_url, general_edge_id = FedMLAccountManager.bind_account_and_device_id(
-                    service_config["ml_ops_config"]["EDGE_BINDING_URL"], self.agent_args.account_id,
-                    self.agent_args.unique_device_id, self.agent_args.os_name,
-                    api_key=api_key, role=role
+                    url=service_config["ml_ops_config"]["EDGE_BINDING_URL"], account_id=self.agent_args.account_id,
+                    device_id=self.agent_args.unique_device_id, os_name=self.agent_args.os_name,
+                    api_key=api_key, role=role, marketplace_type=marketplace_type, price_per_hour=price_per_hour
                 )
                 if edge_id > 0:
                     break
@@ -308,7 +309,7 @@ def get_machine_id():
 
     @staticmethod
     def bind_account_and_device_id(
-            url, account_id, device_id, os_name, api_key="",
+            url, account_id, device_id, marketplace_type, price_per_hour, os_name,api_key="",
             role=ROLE_EDGE_SERVER):
         ip = requests.get('https://checkip.amazonaws.com').text.strip()
         fedml_ver, exec_path, os_ver, cpu_info, python_ver, torch_ver, mpi_installed, \
@@ -335,6 +336,11 @@ def bind_account_and_device_id(
                             "available_mem": available_mem, "total_mem": total_mem,
                             "cpu_count": cpu_count, "gpu_count": 0, "host_name": host_name}
         }
+
+        if role == FedMLAccountManager.ROLE_GPU_PROVIDER:
+            json_params["marketplace_type"] = marketplace_type
+            json_params["price_per_hour"] = price_per_hour
+
         if gpu_count > 0:
             if gpu_total_mem is not None:
                 json_params["gpu"] = gpu_info if gpu_info is not None else "" + ", Total GPU Memory: " + gpu_total_mem
diff --git a/python/fedml/computing/scheduler/slave/base_slave_agent.py b/python/fedml/computing/scheduler/slave/base_slave_agent.py
index 01c0a39195..61ef6bf4c5 100755
--- a/python/fedml/computing/scheduler/slave/base_slave_agent.py
+++ b/python/fedml/computing/scheduler/slave/base_slave_agent.py
@@ -24,7 +24,8 @@ def __init__(self):
 
     def login(
             self, userid, api_key=None, device_id=None,
-            os_name=None, need_to_check_gpu=False, role=None
+            os_name=None, need_to_check_gpu=False, role=None,
+            marketplace_type=None, price_per_hour=None
     ):
         # Preprocess the login args
         if need_to_check_gpu:
@@ -38,7 +39,8 @@ def login(
         # Login account
         login_result = FedMLAccountManager.get_instance().login(
             userid, api_key=api_key, device_id=device_id,
-            os_name=os_name, role=role
+            os_name=os_name, role=role, marketplace_type=marketplace_type,
+            price_per_hour=price_per_hour
         )
         if login_result is not None:
             self.agent_args = login_result
diff --git a/python/fedml/computing/scheduler/slave/client_daemon.py b/python/fedml/computing/scheduler/slave/client_daemon.py
index e543115b4c..6d83952e1f 100755
--- a/python/fedml/computing/scheduler/slave/client_daemon.py
+++ b/python/fedml/computing/scheduler/slave/client_daemon.py
@@ -11,6 +11,7 @@
     daemon_ota_upgrade
 from fedml.computing.scheduler.slave.client_constants import ClientConstants
 from fedml.computing.scheduler.comm_utils.run_process_utils import RunProcessUtils
+from fedml.api.constants import MarketplaceType
 
 
 if __name__ == "__main__":
@@ -26,6 +27,8 @@
     parser.add_argument("--no_gpu_check", "-ngc", type=int, default=1)
     parser.add_argument("--local_on_premise_platform_host", "-lp", type=str, default="127.0.0.1")
     parser.add_argument("--local_on_premise_platform_port", "-lpp", type=int, default=80)
+    parser.add_argument("--market_place_type", "-mpt", type=int, default=MarketplaceType.SECURE.value)
+    parser.add_argument("--price_per_hour", "-pph", type=float, default=0.0)
 
     args = parser.parse_args()
     args.user = args.user
@@ -84,7 +87,12 @@
                     "-k",
                     args.api_key,
                     "-ngc",
-                    str(args.no_gpu_check)
+                    str(args.no_gpu_check),
+                    "-mpt",
+                    args.marketplace_type,
+                    "-pph",
+                    args.price_per_hour
+
                 ]
             )
             ret_code, exec_out, exec_err = ClientConstants.get_console_sys_out_pipe_err_results(login_pid)
diff --git a/python/fedml/computing/scheduler/slave/client_login.py b/python/fedml/computing/scheduler/slave/client_login.py
index 95c772a225..1ecc581f0d 100755
--- a/python/fedml/computing/scheduler/slave/client_login.py
+++ b/python/fedml/computing/scheduler/slave/client_login.py
@@ -1,6 +1,7 @@
 import argparse
 import os
 import fedml
+from fedml.api import MarketplaceType
 from fedml.computing.scheduler.slave.slave_agent import FedMLLaunchSlaveAgent
 
 
@@ -24,6 +25,8 @@ def logout():
     parser.add_argument("--no_gpu_check", "-ngc", type=int, default=1)
     parser.add_argument("--local_on_premise_platform_host", "-lp", type=str, default="127.0.0.1")
     parser.add_argument("--local_on_premise_platform_port", "-lpp", type=int, default=80)
+    parser.add_argument("--market_place_type", "-mpt", type=int, default=MarketplaceType.SECURE.value)
+    parser.add_argument("--price_per_hour", "-pph", type=float, default=0.0)
 
     args = parser.parse_args()
     args.user = args.user
@@ -38,7 +41,8 @@ def logout():
     fedml.set_env_version(args.version)
     slave_agent = FedMLLaunchSlaveAgent()
     if args.type == 'login':
-        slave_agent.login(args.api_key, api_key=args.api_key, device_id=args.device_id,
-                          os_name=args.os_name, role=args.role)
+        slave_agent.login(userid=args.api_key, api_key=args.api_key, device_id=args.device_id,
+                          os_name=args.os_name, role=args.role, marketplace_type=args.market_place_type,
+                          price_per_hours=args.price_per_hour)
     else:
         FedMLLaunchSlaveAgent.logout()

From 24e4ce4e338bd37598dc40102ee01a66bdd1bb45 Mon Sep 17 00:00:00 2001
From: Alay Shah <alay11shah@gmail.com>
Date: Mon, 5 Aug 2024 12:57:45 -0700
Subject: [PATCH 201/282] Fixes

---
 python/fedml/api/__init__.py                    |  2 +-
 python/fedml/api/constants.py                   | 16 ----------------
 python/fedml/api/modules/device.py              |  4 ++--
 python/fedml/cli/modules/login.py               |  6 +++---
 .../scheduler/scheduler_core/account_manager.py |  6 +++---
 .../scheduler_core/general_constants.py         | 17 +++++++++++++++++
 .../computing/scheduler/slave/client_daemon.py  |  4 ++--
 .../computing/scheduler/slave/client_login.py   |  6 +++---
 8 files changed, 31 insertions(+), 30 deletions(-)

diff --git a/python/fedml/api/__init__.py b/python/fedml/api/__init__.py
index b1c56403f1..cd8dd378d5 100755
--- a/python/fedml/api/__init__.py
+++ b/python/fedml/api/__init__.py
@@ -215,7 +215,7 @@ def login(api_key, computing, server, supplier,
           master_inference_gateway_port: int = ServerConstants.MODEL_INFERENCE_DEFAULT_PORT,
           worker_inference_proxy_port: int = ClientConstants.LOCAL_CLIENT_API_PORT,
           worker_connection_type: str = ClientConstants.WORKER_CONNECTIVITY_TYPE_DEFAULT,
-          marketplace_type: str = MarketplaceType.SECURE.value, price_per_hour: float = 0.0):
+          marketplace_type: str = MarketplaceType.SECURE.name, price_per_hour: float = 0.0):
     device_bind(api_key, computing, server, supplier, master_inference_gateway_port, worker_inference_proxy_port,
                 worker_connection_type, marketplace_type, price_per_hour)
 
diff --git a/python/fedml/api/constants.py b/python/fedml/api/constants.py
index d4c59898a8..b284d7a056 100755
--- a/python/fedml/api/constants.py
+++ b/python/fedml/api/constants.py
@@ -1,22 +1,6 @@
 from enum import Enum, unique
 
 
-class MarketplaceType(Enum):
-    SECURE = 1
-    COMMUNITY = 2
-
-    def __str__(self):
-        return self.name
-
-    @classmethod
-    def from_str(cls, name: str):
-        """Get the enum member from a string."""
-        if name.upper() in cls.__members__:
-            return cls[name.upper()]
-        else:
-            raise ValueError(f"Invalid marketplace type: {name}")
-
-
 class ApiConstants:
     RESOURCE_MATCHED_STATUS_MATCHED = "MATCHED"
     RESOURCE_MATCHED_STATUS_JOB_URL_ERROR = "ERROR_JOB_URL"
diff --git a/python/fedml/api/modules/device.py b/python/fedml/api/modules/device.py
index 4a932ac631..41380cc51e 100644
--- a/python/fedml/api/modules/device.py
+++ b/python/fedml/api/modules/device.py
@@ -177,11 +177,11 @@ def _bind(
                 "-k",
                 user_api_key,
                 "-ngc",
-                "1"
+                "1",
                 "-mpt",
                 marketplace_type,
                 "-pph",
-                price_per_hour
+                str(price_per_hour)
             ]
         ).pid
         sys_utils.save_login_process(ClientConstants.LOCAL_HOME_RUNNER_DIR_NAME,
diff --git a/python/fedml/cli/modules/login.py b/python/fedml/cli/modules/login.py
index 2692a89177..d05106d04f 100644
--- a/python/fedml/cli/modules/login.py
+++ b/python/fedml/cli/modules/login.py
@@ -4,10 +4,10 @@
 import click
 
 import fedml.api
-from fedml.api import MarketplaceType
 from fedml.api.modules.utils import authenticate
 from fedml.computing.scheduler.model_scheduler.device_server_constants import ServerConstants
 from fedml.computing.scheduler.model_scheduler.device_client_constants import ClientConstants
+from fedml.computing.scheduler.scheduler_core.general_constants import MarketplaceType
 
 
 @click.command("login", help="Login the FedML® Nexus AI Platform")
@@ -98,7 +98,7 @@
 def fedml_login(
         api_key, version, compute_node, server, provider, deploy_worker_num,
         local_on_premise_platform, local_on_premise_platform_port,
-        master_inference_gateway_port, worker_inference_proxy_port, worker_connection_type, marketplace_type, cost
+        master_inference_gateway_port, worker_inference_proxy_port, worker_connection_type, marketplace_type, price_per_hour
 ):
     fedml.set_env_version(version)
     fedml.set_local_on_premise_platform_host(local_on_premise_platform)
@@ -113,4 +113,4 @@ def fedml_login(
         pass
     os.environ["FEDML_MODEL_WORKER_NUM"] = str(deploy_worker_num)
     fedml.api.login(api_key, compute_node, server, provider, master_inference_gateway_port,
-                    worker_inference_proxy_port, worker_connection_type, marketplace_type, cost)
+                    worker_inference_proxy_port, worker_connection_type, marketplace_type, price_per_hour)
diff --git a/python/fedml/computing/scheduler/scheduler_core/account_manager.py b/python/fedml/computing/scheduler/scheduler_core/account_manager.py
index 3b3f1efc75..b29dd1bec2 100755
--- a/python/fedml/computing/scheduler/scheduler_core/account_manager.py
+++ b/python/fedml/computing/scheduler/scheduler_core/account_manager.py
@@ -12,7 +12,7 @@
 from fedml.computing.scheduler.comm_utils import sys_utils, security_utils
 from fedml.computing.scheduler.comm_utils.constants import SchedulerConstants
 from fedml.computing.scheduler.comm_utils.sys_utils import get_sys_runner_info
-from fedml.computing.scheduler.scheduler_core.general_constants import GeneralConstants
+from fedml.computing.scheduler.scheduler_core.general_constants import GeneralConstants, MarketplaceType
 from fedml.core.common.singleton import Singleton
 from fedml.core.mlops import MLOpsConfigs
 
@@ -338,8 +338,8 @@ def bind_account_and_device_id(
         }
 
         if role == FedMLAccountManager.ROLE_GPU_PROVIDER:
-            json_params["marketplace_type"] = marketplace_type
-            json_params["price_per_hour"] = price_per_hour
+            json_params["marketplace_type"] = MarketplaceType.from_str(marketplace_type).value
+            json_params["price_per_hour"] = float(price_per_hour)
 
         if gpu_count > 0:
             if gpu_total_mem is not None:
diff --git a/python/fedml/computing/scheduler/scheduler_core/general_constants.py b/python/fedml/computing/scheduler/scheduler_core/general_constants.py
index 8c60b17bdf..0fbd4881d9 100755
--- a/python/fedml/computing/scheduler/scheduler_core/general_constants.py
+++ b/python/fedml/computing/scheduler/scheduler_core/general_constants.py
@@ -1,5 +1,6 @@
 import logging
 import os
+from enum import Enum
 
 from fedml.computing.scheduler.comm_utils.constants import SchedulerConstants
 from fedml.computing.scheduler.comm_utils.run_process_utils import RunProcessUtils
@@ -9,6 +10,22 @@
 from fedml.computing.scheduler.model_scheduler import device_server_constants
 
 
+class MarketplaceType(Enum):
+    SECURE = 1
+    COMMUNITY = 2
+
+    def __str__(self):
+        return self.name
+
+    @classmethod
+    def from_str(cls, name: str):
+        """Get the enum member from a string."""
+        if name.upper() in cls.__members__:
+            return cls[name.upper()]
+        else:
+            raise ValueError(f"Invalid marketplace type: {name}")
+
+
 class GeneralConstants:
     MSG_TOPIC_REQUEST_JOB_STATUS_PREFIX = f"anywhere/master_agent/request_job_status/"
     MSG_TOPIC_REPORT_DEVICE_STATUS_IN_JOB = f"slave_job/slave_agent/report_device_status_in_job"
diff --git a/python/fedml/computing/scheduler/slave/client_daemon.py b/python/fedml/computing/scheduler/slave/client_daemon.py
index 6d83952e1f..14cf1ce23a 100755
--- a/python/fedml/computing/scheduler/slave/client_daemon.py
+++ b/python/fedml/computing/scheduler/slave/client_daemon.py
@@ -27,8 +27,8 @@
     parser.add_argument("--no_gpu_check", "-ngc", type=int, default=1)
     parser.add_argument("--local_on_premise_platform_host", "-lp", type=str, default="127.0.0.1")
     parser.add_argument("--local_on_premise_platform_port", "-lpp", type=int, default=80)
-    parser.add_argument("--market_place_type", "-mpt", type=int, default=MarketplaceType.SECURE.value)
-    parser.add_argument("--price_per_hour", "-pph", type=float, default=0.0)
+    parser.add_argument("--market_place_type", "-mpt", type=str, default=MarketplaceType.SECURE.name)
+    parser.add_argument("--price_per_hour", "-pph", type=str, default="0.0")
 
     args = parser.parse_args()
     args.user = args.user
diff --git a/python/fedml/computing/scheduler/slave/client_login.py b/python/fedml/computing/scheduler/slave/client_login.py
index 1ecc581f0d..4464443089 100755
--- a/python/fedml/computing/scheduler/slave/client_login.py
+++ b/python/fedml/computing/scheduler/slave/client_login.py
@@ -25,8 +25,8 @@ def logout():
     parser.add_argument("--no_gpu_check", "-ngc", type=int, default=1)
     parser.add_argument("--local_on_premise_platform_host", "-lp", type=str, default="127.0.0.1")
     parser.add_argument("--local_on_premise_platform_port", "-lpp", type=int, default=80)
-    parser.add_argument("--market_place_type", "-mpt", type=int, default=MarketplaceType.SECURE.value)
-    parser.add_argument("--price_per_hour", "-pph", type=float, default=0.0)
+    parser.add_argument("--market_place_type", "-mpt", type=str, default=MarketplaceType.SECURE.name)
+    parser.add_argument("--price_per_hour", "-pph", type=str, default="0.0")
 
     args = parser.parse_args()
     args.user = args.user
@@ -43,6 +43,6 @@ def logout():
     if args.type == 'login':
         slave_agent.login(userid=args.api_key, api_key=args.api_key, device_id=args.device_id,
                           os_name=args.os_name, role=args.role, marketplace_type=args.market_place_type,
-                          price_per_hours=args.price_per_hour)
+                          price_per_hour=args.price_per_hour)
     else:
         FedMLLaunchSlaveAgent.logout()

From b692734ff70141c25bd69bdc94e97e7a1aea7e14 Mon Sep 17 00:00:00 2001
From: Alay Shah <alay11shah@gmail.com>
Date: Mon, 5 Aug 2024 13:06:19 -0700
Subject: [PATCH 202/282] Nits

---
 python/fedml/api/__init__.py                            | 4 ++--
 python/fedml/computing/scheduler/slave/client_daemon.py | 2 +-
 python/fedml/computing/scheduler/slave/client_login.py  | 2 +-
 3 files changed, 4 insertions(+), 4 deletions(-)

diff --git a/python/fedml/api/__init__.py b/python/fedml/api/__init__.py
index cd8dd378d5..70b4b1c547 100755
--- a/python/fedml/api/__init__.py
+++ b/python/fedml/api/__init__.py
@@ -15,12 +15,12 @@
 """
 from typing import List, Tuple
 
-from fedml.api.constants import RunStatus, MarketplaceType
+from fedml.api.constants import RunStatus
 from fedml.api.fedml_response import FedMLResponse
 from fedml.api.modules import launch, utils, build, device, logs, diagnosis, cluster, run, train, federate, storage, \
     model as model_module  # Since "model" has conflict with one of the input parameters, we need to rename it
-from fedml.api.modules.launch import FeatureEntryPoint
 from fedml.api.modules.storage import StorageMetadata
+from fedml.computing.scheduler.scheduler_core.general_constants import MarketplaceType
 from fedml.computing.scheduler.scheduler_entry.cluster_manager import FedMLClusterModelList
 from fedml.computing.scheduler.scheduler_entry.run_manager import FedMLRunStartedModel, FedMLGpuDevices, \
     FedMLRunModelList, FeatureEntryPoint
diff --git a/python/fedml/computing/scheduler/slave/client_daemon.py b/python/fedml/computing/scheduler/slave/client_daemon.py
index 14cf1ce23a..2ded72f15b 100755
--- a/python/fedml/computing/scheduler/slave/client_daemon.py
+++ b/python/fedml/computing/scheduler/slave/client_daemon.py
@@ -9,9 +9,9 @@
 from fedml.computing.scheduler.comm_utils.sys_utils import cleanup_all_fedml_client_api_processes, \
     cleanup_all_fedml_client_learning_processes, cleanup_all_fedml_client_login_processes, get_python_program, \
     daemon_ota_upgrade
+from fedml.computing.scheduler.scheduler_core.general_constants import MarketplaceType
 from fedml.computing.scheduler.slave.client_constants import ClientConstants
 from fedml.computing.scheduler.comm_utils.run_process_utils import RunProcessUtils
-from fedml.api.constants import MarketplaceType
 
 
 if __name__ == "__main__":
diff --git a/python/fedml/computing/scheduler/slave/client_login.py b/python/fedml/computing/scheduler/slave/client_login.py
index 4464443089..d08984e5ba 100755
--- a/python/fedml/computing/scheduler/slave/client_login.py
+++ b/python/fedml/computing/scheduler/slave/client_login.py
@@ -1,7 +1,7 @@
 import argparse
 import os
 import fedml
-from fedml.api import MarketplaceType
+from fedml.computing.scheduler.scheduler_core.general_constants import MarketplaceType
 from fedml.computing.scheduler.slave.slave_agent import FedMLLaunchSlaveAgent
 
 

From 043fa6e93c81f79f3be0844c9f3c0705c2ac2669 Mon Sep 17 00:00:00 2001
From: Alay Shah <alay11shah@gmail.com>
Date: Mon, 5 Aug 2024 17:18:21 -0700
Subject: [PATCH 203/282] Bugfix

---
 .../scheduler/scheduler_core/account_manager.py          | 4 ++--
 python/fedml/computing/scheduler/slave/client_daemon.py  | 9 +++------
 python/fedml/computing/scheduler/slave/client_login.py   | 4 ++--
 .../core/distributed/communication/mqtt/mqtt_manager.py  | 1 +
 4 files changed, 8 insertions(+), 10 deletions(-)

diff --git a/python/fedml/computing/scheduler/scheduler_core/account_manager.py b/python/fedml/computing/scheduler/scheduler_core/account_manager.py
index b29dd1bec2..fd27836d4b 100755
--- a/python/fedml/computing/scheduler/scheduler_core/account_manager.py
+++ b/python/fedml/computing/scheduler/scheduler_core/account_manager.py
@@ -338,8 +338,8 @@ def bind_account_and_device_id(
         }
 
         if role == FedMLAccountManager.ROLE_GPU_PROVIDER:
-            json_params["marketplace_type"] = MarketplaceType.from_str(marketplace_type).value
-            json_params["price_per_hour"] = float(price_per_hour)
+            json_params["marketplaceType"] = MarketplaceType.from_str(marketplace_type).value
+            json_params["providerPricePerHour"] = float(price_per_hour)
 
         if gpu_count > 0:
             if gpu_total_mem is not None:
diff --git a/python/fedml/computing/scheduler/slave/client_daemon.py b/python/fedml/computing/scheduler/slave/client_daemon.py
index 2ded72f15b..93dc7e65e1 100755
--- a/python/fedml/computing/scheduler/slave/client_daemon.py
+++ b/python/fedml/computing/scheduler/slave/client_daemon.py
@@ -1,4 +1,3 @@
-
 import argparse
 import os
 import time
@@ -13,7 +12,6 @@
 from fedml.computing.scheduler.slave.client_constants import ClientConstants
 from fedml.computing.scheduler.comm_utils.run_process_utils import RunProcessUtils
 
-
 if __name__ == "__main__":
     parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
     parser.add_argument("--type", "-t", help="Login or logout to MLOps platform")
@@ -27,7 +25,7 @@
     parser.add_argument("--no_gpu_check", "-ngc", type=int, default=1)
     parser.add_argument("--local_on_premise_platform_host", "-lp", type=str, default="127.0.0.1")
     parser.add_argument("--local_on_premise_platform_port", "-lpp", type=int, default=80)
-    parser.add_argument("--market_place_type", "-mpt", type=str, default=MarketplaceType.SECURE.name)
+    parser.add_argument("--marketplace_type", "-mpt", type=str, default=MarketplaceType.SECURE.name)
     parser.add_argument("--price_per_hour", "-pph", type=str, default="0.0")
 
     args = parser.parse_args()
@@ -62,7 +60,6 @@
             logging.error(f"Cleanup failed | Exception: {e}")
             pass
 
-
         # daemon_ota_upgrade(args)
 
         if platform.system() == "Windows":
@@ -92,7 +89,6 @@
                     args.marketplace_type,
                     "-pph",
                     args.price_per_hour
-
                 ]
             )
             ret_code, exec_out, exec_err = ClientConstants.get_console_sys_out_pipe_err_results(login_pid)
@@ -101,7 +97,8 @@
             login_logs = os.path.join(ClientConstants.get_log_file_dir(), "login.log")
             run_login_cmd = f"nohup {get_python_program()} -W ignore {login_cmd} -t login -u {args.user} " \
                             f"-v {args.version} -r {args.role} -id {args.device_id} " \
-                            f"-k {args.api_key} -ngc {str(args.no_gpu_check)} > {login_logs} 2>&1 &"
+                            f"-k {args.api_key} -ngc {str(args.no_gpu_check)} -mpt {args.marketplace_type} " \
+                            f"-pph {args.price_per_hour} > {login_logs} 2>&1 &"
             if args.os_name != "":
                 run_login_cmd += f" -os {args.os_name}"
             os.system(run_login_cmd)
diff --git a/python/fedml/computing/scheduler/slave/client_login.py b/python/fedml/computing/scheduler/slave/client_login.py
index d08984e5ba..e3f2378132 100755
--- a/python/fedml/computing/scheduler/slave/client_login.py
+++ b/python/fedml/computing/scheduler/slave/client_login.py
@@ -25,7 +25,7 @@ def logout():
     parser.add_argument("--no_gpu_check", "-ngc", type=int, default=1)
     parser.add_argument("--local_on_premise_platform_host", "-lp", type=str, default="127.0.0.1")
     parser.add_argument("--local_on_premise_platform_port", "-lpp", type=int, default=80)
-    parser.add_argument("--market_place_type", "-mpt", type=str, default=MarketplaceType.SECURE.name)
+    parser.add_argument("--marketplace_type", "-mpt", type=str, default=MarketplaceType.SECURE.name)
     parser.add_argument("--price_per_hour", "-pph", type=str, default="0.0")
 
     args = parser.parse_args()
@@ -42,7 +42,7 @@ def logout():
     slave_agent = FedMLLaunchSlaveAgent()
     if args.type == 'login':
         slave_agent.login(userid=args.api_key, api_key=args.api_key, device_id=args.device_id,
-                          os_name=args.os_name, role=args.role, marketplace_type=args.market_place_type,
+                          os_name=args.os_name, role=args.role, marketplace_type=args.marketplace_type,
                           price_per_hour=args.price_per_hour)
     else:
         FedMLLaunchSlaveAgent.logout()
diff --git a/python/fedml/core/distributed/communication/mqtt/mqtt_manager.py b/python/fedml/core/distributed/communication/mqtt/mqtt_manager.py
index 937e9f6644..401f2e7521 100644
--- a/python/fedml/core/distributed/communication/mqtt/mqtt_manager.py
+++ b/python/fedml/core/distributed/communication/mqtt/mqtt_manager.py
@@ -11,6 +11,7 @@
 
 import fedml
 
+
 class MqttManager(object):
     def __init__(self, host, port, user, pwd, keepalive_time,
                  client_id, last_will_topic=None, last_will_msg=None,

From bbf24933565fc39c81163e4dea43c7201e040a6a Mon Sep 17 00:00:00 2001
From: Alay Shah <alay11shah@gmail.com>
Date: Thu, 8 Aug 2024 14:18:33 -0700
Subject: [PATCH 204/282] Adding validation and price range restriction

---
 python/fedml/cli/modules/login.py | 24 ++++++++++++++++++++++--
 1 file changed, 22 insertions(+), 2 deletions(-)

diff --git a/python/fedml/cli/modules/login.py b/python/fedml/cli/modules/login.py
index d05106d04f..0e44159692 100644
--- a/python/fedml/cli/modules/login.py
+++ b/python/fedml/cli/modules/login.py
@@ -89,7 +89,8 @@
     "-pph",
     type=click.FLOAT,
     default=0.0,
-    help="Enter the price per GPU per hour as a floating-point number. For example, if the cost of using an H100 node "
+    help="Enter the price per GPU per hour as a non-negative floating-point number between 0.0 and 1000.0. For "
+         "example, if the cost of using an H100 node"
          "for one hour is $1.5 per GPU, then you would input 1.5. Do not multiply this number by the total number of "
          "GPUs in the node, as the system will automatically detect the number of GPUs and include it in the cost "
          "calculation. Default is 0.0."
@@ -98,12 +99,20 @@
 def fedml_login(
         api_key, version, compute_node, server, provider, deploy_worker_num,
         local_on_premise_platform, local_on_premise_platform_port,
-        master_inference_gateway_port, worker_inference_proxy_port, worker_connection_type, marketplace_type, price_per_hour
+        master_inference_gateway_port, worker_inference_proxy_port, worker_connection_type, marketplace_type,
+        price_per_hour
 ):
     fedml.set_env_version(version)
     fedml.set_local_on_premise_platform_host(local_on_premise_platform)
     fedml.set_local_on_premise_platform_port(local_on_premise_platform_port)
 
+    try:
+        price_per_hour = float(price_per_hour)
+    except ValueError as e:
+        raise click.BadParameter(str(e), param_hint="price_per_hour")
+
+    __validate_mpt_pph(marketplace_type, price_per_hour)
+
     api_key = api_key[0] if len(api_key) > 0 else None
     try:
         authenticate(api_key)
@@ -114,3 +123,14 @@ def fedml_login(
     os.environ["FEDML_MODEL_WORKER_NUM"] = str(deploy_worker_num)
     fedml.api.login(api_key, compute_node, server, provider, master_inference_gateway_port,
                     worker_inference_proxy_port, worker_connection_type, marketplace_type, price_per_hour)
+
+
+def __validate_mpt_pph(marketplace_type, price_per_hour):
+    try:
+        MarketplaceType.from_str(marketplace_type)
+    except ValueError as e:
+        raise click.BadParameter(str(e), param_hint="marketplace_type")
+
+    if price_per_hour < 0 or price_per_hour > 1000:
+        raise click.BadParameter(f"Price per hour should be a non-negative float ranging between 0 and 1000. Current "
+                                 f"input value {price_per_hour} is not valid", param_hint="price_per_hour")

From a990a6013336b0ea665168529cc6ae28367bc143 Mon Sep 17 00:00:00 2001
From: Raphael Jin <kimheavy@yahoo.com>
Date: Mon, 12 Aug 2024 10:54:38 -0700
Subject: [PATCH 205/282] [Deploy] Automatically mount the workspace to
 container in the default setting.

---
 .../device_client_constants.py                |  5 +-
 .../device_model_deployment.py                | 66 ++++++-------------
 2 files changed, 24 insertions(+), 47 deletions(-)

diff --git a/python/fedml/computing/scheduler/model_scheduler/device_client_constants.py b/python/fedml/computing/scheduler/model_scheduler/device_client_constants.py
index 3bb2e12aed..fbe7a95ab9 100644
--- a/python/fedml/computing/scheduler/model_scheduler/device_client_constants.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_client_constants.py
@@ -162,8 +162,9 @@ class ClientConstants(object):
     EXPOSE_SUBDOMAINS_KEY = "expose_subdomains"
 
     CUSTOMIZED_VOLUMES_MOUNT_KEY = "volumes"
-    CUSTOMIZED_VOLUMES_PATH_FROM_WORKSPACE_KEY = "workspace_path"
-    CUSTOMIZED_VOLUMES_PATH_FROM_CONTAINER_KEY = "mount_path"
+
+    CUSTOMIZED_WORKSPACE_MOUNT_PATH_KEY = "workspace_mount_path"
+
     CUSTOMIZED_SERVICE_KEY = "service"
 
     ENV_USER_ENCRYPTED_API_KEY = "FEDML_USER_ENCRYPTED_API_KEY"
diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py b/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py
index 6a637653fc..5aa58ae520 100755
--- a/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py
@@ -95,7 +95,7 @@ def start_deployment(end_point_id, end_point_name, model_id, model_version,
         # Storage related
         src_code_dir = os.path.join(model_storage_local_path, config.get('source_code_dir', ""))
         data_cache_dir_input = config.get('data_cache_dir', "")
-        usr_customized_mount_rule = config.get(ClientConstants.CUSTOMIZED_VOLUMES_MOUNT_KEY, None)
+        usr_customized_workspace_dst = config.get(ClientConstants.CUSTOMIZED_WORKSPACE_MOUNT_PATH_KEY, "")
 
         # Others
         extra_envs = config.get('environment_variables', None)
@@ -170,7 +170,7 @@ def start_deployment(end_point_id, end_point_name, model_id, model_version,
 
     # Handle the default volume mount
     handle_volume_mount(volumes, binds, environment, relative_entry_fedml_format, src_code_dir,
-                        dst_model_serving_dir, usr_customized_mount_rule, host_workspace_root=model_storage_local_path)
+                        dst_model_serving_dir, usr_customized_workspace_dst)
 
     # Host config
     host_config_dict = {
@@ -541,52 +541,22 @@ def _handle_union_volume_mount(binds, volumes, environment, data_cache_dir_input
 
 
 def handle_volume_mount(volumes, binds, environment, relative_entry_fedml_format="", src_code_dir="",
-                        dst_model_serving_dir="", customized_volumes_mount_rule=None, host_workspace_root=""):
+                        dst_model_serving_dir="", usr_customized_workspace_dst=""):
     # If fedml format entry point is specified, inject the source code, e.g., main.py (FedMLPredictor inside)
+    volumes.append(src_code_dir)
+    dst_mount_dir = dst_model_serving_dir
+
+    if usr_customized_workspace_dst != "" and relative_entry_fedml_format == "":
+        # We only allow user to indicate the workspace mount rule when they are using the custom image
+        dst_mount_dir = usr_customized_workspace_dst
+
+    binds[src_code_dir] = {
+        "bind": dst_mount_dir,
+        "mode": "rw"
+    }
+
     if relative_entry_fedml_format != "":
-        logging.info("Using FedML format entry point, mounting the source code...")
-        volumes.append(src_code_dir)
-        binds[src_code_dir] = {
-            "bind": dst_model_serving_dir,
-            "mode": "rw"
-        }
         environment["MAIN_ENTRY"] = relative_entry_fedml_format
-        return  # The reason we return here is that we don't need to mount the source code again
-
-    # If customized volume mount rule is specified, just follow the mount rule
-    """
-    e.g.,
-    volumes:
-      - workspace_path: "./model_repository"
-        mount_path: "/repo_inside_container"
-    """
-    mount_list = []
-    if not isinstance(customized_volumes_mount_rule, list):
-        if not isinstance(customized_volumes_mount_rule, dict):
-            logging.warning("customized_volumes_mount_rule is not a list or a dictionary, "
-                            "skip mounting it to the container")
-            return
-
-        # transform the dict to list
-        for k, v in customized_volumes_mount_rule.items():
-            mount_list.append({ClientConstants.CUSTOMIZED_VOLUMES_PATH_FROM_WORKSPACE_KEY: k,
-                               ClientConstants.CUSTOMIZED_VOLUMES_PATH_FROM_CONTAINER_KEY: v})
-    else:
-        mount_list = customized_volumes_mount_rule if customized_volumes_mount_rule is not None else []
-
-    for mount in mount_list:
-        workspace_relative_path = mount[ClientConstants.CUSTOMIZED_VOLUMES_PATH_FROM_WORKSPACE_KEY]
-        mount_path = mount[ClientConstants.CUSTOMIZED_VOLUMES_PATH_FROM_CONTAINER_KEY]
-
-        workspace_path = os.path.join(host_workspace_root, workspace_relative_path)
-        if os.path.exists(workspace_path):
-            volumes.append(workspace_path)
-            binds[workspace_path] = {
-                "bind": mount_path,
-                "mode": "rw"
-            }
-        else:
-            logging.warning(f"{workspace_path} does not exist, skip mounting it to the container")
 
 
 def handle_container_service_app(config, model_storage_local_path):
@@ -618,6 +588,12 @@ def handle_container_service_app(config, model_storage_local_path):
     # User indicate either fedml format python main entry filename or entry command
     expose_subdomains = config.get(ClientConstants.EXPOSE_SUBDOMAINS_KEY, False)
     customized_image_entry_cmd = config.get('container_run_command', None)  # Could be str or list
+
+    if customized_image_entry_cmd is not None and relative_entry_fedml_format != "":
+        logging.warning("Both entry_point and container_run_command are specified, "
+                        "entry_point will be ignored")
+        relative_entry_fedml_format = ""
+
     customized_readiness_check = config.get('readiness_probe', ClientConstants.READINESS_PROBE_DEFAULT)
     customized_liveliness_check = config.get('liveness_probe', ClientConstants.LIVENESS_PROBE_DEFAULT)
     customized_uri = config.get(ClientConstants.CUSTOMIZED_SERVICE_KEY, "")

From dfd83088c96e78c4848dce63f591dd4896e87391 Mon Sep 17 00:00:00 2001
From: Raphael Jin <kimheavy@yahoo.com>
Date: Mon, 12 Aug 2024 15:08:58 -0700
Subject: [PATCH 206/282] [Deploy] Support bootstrap and CMD be indicated
 together.

---
 .../device_model_deployment.py                | 27 +++++++++++++++++++
 1 file changed, 27 insertions(+)

diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py b/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py
index 5aa58ae520..ce4da9553c 100755
--- a/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py
@@ -594,6 +594,33 @@ def handle_container_service_app(config, model_storage_local_path):
                         "entry_point will be ignored")
         relative_entry_fedml_format = ""
 
+    # if usr indicates both the customized_image_entry_cmd and bootstrap, we will inject the bootstrap into the entry
+    # Using /bin/bash to run the bootstrap script, there are three legal formats for the customized_image_entry_cmd
+    # However, only the third one is supported in this function
+    """
+    1. CMD ["executable","param1","param2"] (exec form)
+    e.g. 
+        CMD ["python3", "/app/app.py", "--port", "8080"]
+    
+    2. CMD ["param1","param2"] (exec form, as default parameters to ENTRYPOINT)
+    e.g.
+        ENTRYPOINT ["nginx"]
+        CMD ["-g", "daemon off;"]
+        
+    3. CMD command param1 param2
+    e.g.
+        echo "Container is running" && curl http://example.com
+    """
+    if dst_bootstrap_dir != "" and customized_image_entry_cmd is not None:
+        if isinstance(customized_image_entry_cmd, str):
+            if customized_image_entry_cmd == "":
+                # We do not know the original CMD in the Dockerfile and do not want to overwrite it
+                pass
+            else:
+                customized_image_entry_cmd = f"/bin/bash {dst_bootstrap_dir} && {customized_image_entry_cmd}"
+        else:
+            logging.warning("The customized_image_entry_cmd is not a string, skip injecting the bootstrap script")
+
     customized_readiness_check = config.get('readiness_probe', ClientConstants.READINESS_PROBE_DEFAULT)
     customized_liveliness_check = config.get('liveness_probe', ClientConstants.LIVENESS_PROBE_DEFAULT)
     customized_uri = config.get(ClientConstants.CUSTOMIZED_SERVICE_KEY, "")

From 1a09b0edc210f51e05190c7b54ad51b9c45a549f Mon Sep 17 00:00:00 2001
From: Raphael Jin <kimheavy@yahoo.com>
Date: Mon, 12 Aug 2024 16:58:12 -0700
Subject: [PATCH 207/282] [Deploy] Nit.

---
 .../device_model_deployment.py                | 19 +++++++++++++++----
 1 file changed, 15 insertions(+), 4 deletions(-)

diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py b/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py
index ce4da9553c..2fbbbfcb0d 100755
--- a/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py
@@ -555,13 +555,16 @@ def handle_volume_mount(volumes, binds, environment, relative_entry_fedml_format
         "mode": "rw"
     }
 
+    logging.info(f"Mounting the source code to the container..., target: {dst_mount_dir}")
+
     if relative_entry_fedml_format != "":
         environment["MAIN_ENTRY"] = relative_entry_fedml_format
 
 
 def handle_container_service_app(config, model_storage_local_path):
     # Bootstrap, job and entrypoint related
-    dst_model_serving_dir = "/home/fedml/models_serving"
+    dst_model_serving_dir = config.get(ClientConstants.CUSTOMIZED_WORKSPACE_MOUNT_PATH_KEY,
+                                       "/home/fedml/models_serving")
     bootstrap_cmds_str_frm_yaml = config.get('bootstrap', "")
     job_cmds_str_frm_yaml = config.get('job', "")
 
@@ -569,16 +572,21 @@ def handle_container_service_app(config, model_storage_local_path):
     if bootstrap_cmds_str_frm_yaml != "" or job_cmds_str_frm_yaml != "":
         src_bootstrap_file_path = os.path.join(model_storage_local_path, auto_gen_bootstrap_file_name)
         with open(src_bootstrap_file_path, 'w') as f:
-            f.write("cd /home/fedml/models_serving/\n")
+            f.write(f"cd {dst_model_serving_dir}/\n")
             f.write(bootstrap_cmds_str_frm_yaml)
             f.write("\n")
-            f.write("cd /home/fedml/models_serving/\n")
+            f.write(f"cd {dst_model_serving_dir}/\n")
             f.write(job_cmds_str_frm_yaml)
     else:
         src_bootstrap_file_path = ""
 
     if src_bootstrap_file_path != "":
         dst_bootstrap_dir = os.path.join(dst_model_serving_dir, auto_gen_bootstrap_file_name)
+
+        # User could specify "workspace_mount_path", override the default path
+        if ClientConstants.CUSTOMIZED_WORKSPACE_MOUNT_PATH_KEY in config:
+            dst_bootstrap_dir = os.path.join(config[ClientConstants.CUSTOMIZED_WORKSPACE_MOUNT_PATH_KEY],
+                                             auto_gen_bootstrap_file_name)
     else:
         dst_bootstrap_dir = ""
 
@@ -617,7 +625,10 @@ def handle_container_service_app(config, model_storage_local_path):
                 # We do not know the original CMD in the Dockerfile and do not want to overwrite it
                 pass
             else:
-                customized_image_entry_cmd = f"/bin/bash {dst_bootstrap_dir} && {customized_image_entry_cmd}"
+                # TODO(Raphael): Try to fix the compatibility issue with the first two formats and
+                #  also the restriction of /bin/bash
+                customized_image_entry_cmd = \
+                    f"/bin/bash -c '/bin/bash {dst_bootstrap_dir} && {customized_image_entry_cmd}'"
         else:
             logging.warning("The customized_image_entry_cmd is not a string, skip injecting the bootstrap script")
 

From 7e5f6a177afd5767f020890d19d38814cafdc40b Mon Sep 17 00:00:00 2001
From: Raphael Jin <kimheavy@yahoo.com>
Date: Mon, 12 Aug 2024 17:05:44 -0700
Subject: [PATCH 208/282] [Deploy] Add example.

---
 .../quickstart/config.yaml                    | 12 ++++++++
 .../quickstart/main_entry.py                  | 28 +++++++++++++++++++
 2 files changed, 40 insertions(+)
 create mode 100644 python/examples/deploy/custom_inference_image/quickstart/config.yaml
 create mode 100644 python/examples/deploy/custom_inference_image/quickstart/main_entry.py

diff --git a/python/examples/deploy/custom_inference_image/quickstart/config.yaml b/python/examples/deploy/custom_inference_image/quickstart/config.yaml
new file mode 100644
index 0000000000..382209ad1b
--- /dev/null
+++ b/python/examples/deploy/custom_inference_image/quickstart/config.yaml
@@ -0,0 +1,12 @@
+workspace: "."
+inference_image: "your_docker_hub_repo/your_image_name"
+
+workspace_mount_path: "/my_workspace"  # Default is "/home/fedml/models_serving"
+
+container_run_command: "echo hello && python3 /my_workspace/main_entry.py"
+
+# If you want to install some packages
+# Please write the command in the bootstrap.sh
+bootstrap: |
+  echo "Install some packages..."
+  echo "Install finished!"
diff --git a/python/examples/deploy/custom_inference_image/quickstart/main_entry.py b/python/examples/deploy/custom_inference_image/quickstart/main_entry.py
new file mode 100644
index 0000000000..7b7caee87b
--- /dev/null
+++ b/python/examples/deploy/custom_inference_image/quickstart/main_entry.py
@@ -0,0 +1,28 @@
+from fedml.serving import FedMLPredictor
+from fedml.serving import FedMLInferenceRunner
+import uuid
+
+
+class Bot(FedMLPredictor):  # Inherit FedMLClientPredictor
+    def __init__(self):
+        super().__init__()
+
+        # --- Your model initialization code here, here is a example ---
+        self.uuid = uuid.uuid4()
+        # -------------------------------------------
+    
+    def predict(self, request: dict):
+        input_dict = request
+        question: str = input_dict.get("text", "").strip()
+
+        # --- Your model inference code here ---
+        response = f"I am a replica, my id is {self.uuid}"
+        # ---------------------------------------
+
+        return {"v1_generated_text": f"V1: The answer to your question {question} is: {response}"}
+
+
+if __name__ == "__main__":
+    chatbot = Bot()
+    fedml_inference_runner = FedMLInferenceRunner(chatbot)
+    fedml_inference_runner.run()

From 47efcde55a7e8fd669dd88ce505b74c197e5f6e8 Mon Sep 17 00:00:00 2001
From: Alay Shah <alay11shah@gmail.com>
Date: Thu, 22 Aug 2024 14:26:44 -0700
Subject: [PATCH 209/282] feat: Add name parameter to the bindingEdge method

---
 python/fedml/api/__init__.py                           |  8 ++++----
 python/fedml/api/modules/device.py                     | 10 ++++++----
 python/fedml/cli/modules/login.py                      |  9 ++++++++-
 .../scheduler/scheduler_core/account_manager.py        |  9 ++++++---
 .../computing/scheduler/slave/base_slave_agent.py      |  4 ++--
 .../fedml/computing/scheduler/slave/client_daemon.py   |  7 +++++--
 python/fedml/computing/scheduler/slave/client_login.py |  3 ++-
 7 files changed, 33 insertions(+), 17 deletions(-)

diff --git a/python/fedml/api/__init__.py b/python/fedml/api/__init__.py
index 70b4b1c547..6c82c9b9b3 100755
--- a/python/fedml/api/__init__.py
+++ b/python/fedml/api/__init__.py
@@ -215,9 +215,9 @@ def login(api_key, computing, server, supplier,
           master_inference_gateway_port: int = ServerConstants.MODEL_INFERENCE_DEFAULT_PORT,
           worker_inference_proxy_port: int = ClientConstants.LOCAL_CLIENT_API_PORT,
           worker_connection_type: str = ClientConstants.WORKER_CONNECTIVITY_TYPE_DEFAULT,
-          marketplace_type: str = MarketplaceType.SECURE.name, price_per_hour: float = 0.0):
+          marketplace_type: str = MarketplaceType.SECURE.name, price_per_hour: float = 0.0, name=""):
     device_bind(api_key, computing, server, supplier, master_inference_gateway_port, worker_inference_proxy_port,
-                worker_connection_type, marketplace_type, price_per_hour)
+                worker_connection_type, marketplace_type, price_per_hour, name)
 
 
 def logout(computing, server):
@@ -225,11 +225,11 @@ def logout(computing, server):
 
 
 def device_bind(api_key, computing, server, supplier, master_inference_gateway_port, worker_inference_proxy_port,
-                worker_connection_type, marketplace_type, price_per_hour):
+                worker_connection_type, marketplace_type, price_per_hour, name):
     device.bind(api_key=api_key, computing=computing, server=server, supplier=supplier,
                 master_inference_gateway_port=master_inference_gateway_port,
                 worker_inference_proxy_port=worker_inference_proxy_port, worker_connection_type=worker_connection_type,
-                marketplace_type=marketplace_type, price_per_hour=price_per_hour)
+                marketplace_type=marketplace_type, price_per_hour=price_per_hour, name=name)
 
 
 def device_unbind(computing, server):
diff --git a/python/fedml/api/modules/device.py b/python/fedml/api/modules/device.py
index 41380cc51e..9f39419a74 100644
--- a/python/fedml/api/modules/device.py
+++ b/python/fedml/api/modules/device.py
@@ -19,7 +19,7 @@
 
 
 def bind(
-        api_key, computing, server, supplier, marketplace_type, price_per_hour,
+        api_key, computing, server, supplier, marketplace_type, price_per_hour, name,
         master_inference_gateway_port=DeviceServerConstants.MODEL_INFERENCE_DEFAULT_PORT,
         worker_inference_proxy_port=DeviceClientConstants.LOCAL_CLIENT_API_PORT,
         worker_connection_type=DeviceClientConstants.WORKER_CONNECTIVITY_TYPE_DEFAULT):
@@ -48,12 +48,12 @@ def bind(
         userid, computing, server,
         api_key, role, runner_cmd, device_id, os_name,
         docker, master_inference_gateway_port, worker_inference_proxy_port, worker_connection_type, marketplace_type,
-        price_per_hour)
+        price_per_hour, name)
 
 
 def _bind(
         userid, computing, server, api_key, role, runner_cmd, device_id, os_name, docker, master_inference_gateway_port,
-        worker_inference_proxy_port, worker_connection_type, marketplace_type, price_per_hour):
+        worker_inference_proxy_port, worker_connection_type, marketplace_type, price_per_hour, name):
     fedml.load_env()
     if os.getenv(ModuleConstants.ENV_FEDML_INFER_HOST) is None:
         fedml.set_env_kv(ModuleConstants.ENV_FEDML_INFER_HOST, SchedulerConstants.REDIS_INFER_HOST)
@@ -181,7 +181,9 @@ def _bind(
                 "-mpt",
                 marketplace_type,
                 "-pph",
-                str(price_per_hour)
+                str(price_per_hour),
+                "-n",
+                name
             ]
         ).pid
         sys_utils.save_login_process(ClientConstants.LOCAL_HOME_RUNNER_DIR_NAME,
diff --git a/python/fedml/cli/modules/login.py b/python/fedml/cli/modules/login.py
index 0e44159692..fc1cd408c4 100644
--- a/python/fedml/cli/modules/login.py
+++ b/python/fedml/cli/modules/login.py
@@ -96,11 +96,18 @@
          "calculation. Default is 0.0."
          "Optionally, you can also set this price later through supplier page on the FEDML® Nexus AI Platform."
 )
+@click.option(
+    "--name",
+    "-n",
+    type=str,
+    default="",
+    help="Name of the node.",
+)
 def fedml_login(
         api_key, version, compute_node, server, provider, deploy_worker_num,
         local_on_premise_platform, local_on_premise_platform_port,
         master_inference_gateway_port, worker_inference_proxy_port, worker_connection_type, marketplace_type,
-        price_per_hour
+        price_per_hour, name
 ):
     fedml.set_env_version(version)
     fedml.set_local_on_premise_platform_host(local_on_premise_platform)
diff --git a/python/fedml/computing/scheduler/scheduler_core/account_manager.py b/python/fedml/computing/scheduler/scheduler_core/account_manager.py
index fd27836d4b..1a378d54cd 100755
--- a/python/fedml/computing/scheduler/scheduler_core/account_manager.py
+++ b/python/fedml/computing/scheduler/scheduler_core/account_manager.py
@@ -49,7 +49,7 @@ def get_instance():
         return FedMLAccountManager()
 
     def login(self, user_id, api_key="", device_id=None, os_name=None, role=None, runner_cmd=None, marketplace_type=None,
-              price_per_hour=None):
+              price_per_hour=None, name=""):
         # Build the agent args
         self.build_agent_args(
             user_id, api_key=api_key, device_id=device_id, os_name=os_name, role=role, runner_cmd=runner_cmd
@@ -96,7 +96,8 @@ def login(self, user_id, api_key="", device_id=None, os_name=None, role=None, ru
                 edge_id, user_name, extra_url, general_edge_id = FedMLAccountManager.bind_account_and_device_id(
                     url=service_config["ml_ops_config"]["EDGE_BINDING_URL"], account_id=self.agent_args.account_id,
                     device_id=self.agent_args.unique_device_id, os_name=self.agent_args.os_name,
-                    api_key=api_key, role=role, marketplace_type=marketplace_type, price_per_hour=price_per_hour
+                    api_key=api_key, role=role, marketplace_type=marketplace_type, price_per_hour=price_per_hour,
+                    name=name
                 )
                 if edge_id > 0:
                     break
@@ -310,7 +311,7 @@ def get_machine_id():
     @staticmethod
     def bind_account_and_device_id(
             url, account_id, device_id, marketplace_type, price_per_hour, os_name,api_key="",
-            role=ROLE_EDGE_SERVER):
+            role=ROLE_EDGE_SERVER, name=""):
         ip = requests.get('https://checkip.amazonaws.com').text.strip()
         fedml_ver, exec_path, os_ver, cpu_info, python_ver, torch_ver, mpi_installed, \
             cpu_usage, available_mem, total_mem, gpu_info, gpu_available_mem, gpu_total_mem, \
@@ -340,6 +341,8 @@ def bind_account_and_device_id(
         if role == FedMLAccountManager.ROLE_GPU_PROVIDER:
             json_params["marketplaceType"] = MarketplaceType.from_str(marketplace_type).value
             json_params["providerPricePerHour"] = float(price_per_hour)
+            if name:
+                json_params["name"] = name
 
         if gpu_count > 0:
             if gpu_total_mem is not None:
diff --git a/python/fedml/computing/scheduler/slave/base_slave_agent.py b/python/fedml/computing/scheduler/slave/base_slave_agent.py
index 61ef6bf4c5..a149dfe046 100755
--- a/python/fedml/computing/scheduler/slave/base_slave_agent.py
+++ b/python/fedml/computing/scheduler/slave/base_slave_agent.py
@@ -25,7 +25,7 @@ def __init__(self):
     def login(
             self, userid, api_key=None, device_id=None,
             os_name=None, need_to_check_gpu=False, role=None,
-            marketplace_type=None, price_per_hour=None
+            marketplace_type=None, price_per_hour=None, name=""
     ):
         # Preprocess the login args
         if need_to_check_gpu:
@@ -40,7 +40,7 @@ def login(
         login_result = FedMLAccountManager.get_instance().login(
             userid, api_key=api_key, device_id=device_id,
             os_name=os_name, role=role, marketplace_type=marketplace_type,
-            price_per_hour=price_per_hour
+            price_per_hour=price_per_hour, name=name
         )
         if login_result is not None:
             self.agent_args = login_result
diff --git a/python/fedml/computing/scheduler/slave/client_daemon.py b/python/fedml/computing/scheduler/slave/client_daemon.py
index 93dc7e65e1..a82c446c0d 100755
--- a/python/fedml/computing/scheduler/slave/client_daemon.py
+++ b/python/fedml/computing/scheduler/slave/client_daemon.py
@@ -27,6 +27,7 @@
     parser.add_argument("--local_on_premise_platform_port", "-lpp", type=int, default=80)
     parser.add_argument("--marketplace_type", "-mpt", type=str, default=MarketplaceType.SECURE.name)
     parser.add_argument("--price_per_hour", "-pph", type=str, default="0.0")
+    parser.add_argument("--name", "-n", type=str, default="")
 
     args = parser.parse_args()
     args.user = args.user
@@ -88,7 +89,9 @@
                     "-mpt",
                     args.marketplace_type,
                     "-pph",
-                    args.price_per_hour
+                    args.price_per_hour,
+                    "-n",
+                    args.name
                 ]
             )
             ret_code, exec_out, exec_err = ClientConstants.get_console_sys_out_pipe_err_results(login_pid)
@@ -98,7 +101,7 @@
             run_login_cmd = f"nohup {get_python_program()} -W ignore {login_cmd} -t login -u {args.user} " \
                             f"-v {args.version} -r {args.role} -id {args.device_id} " \
                             f"-k {args.api_key} -ngc {str(args.no_gpu_check)} -mpt {args.marketplace_type} " \
-                            f"-pph {args.price_per_hour} > {login_logs} 2>&1 &"
+                            f"-pph {args.price_per_hour} -n {args.name} > {login_logs} 2>&1 &"
             if args.os_name != "":
                 run_login_cmd += f" -os {args.os_name}"
             os.system(run_login_cmd)
diff --git a/python/fedml/computing/scheduler/slave/client_login.py b/python/fedml/computing/scheduler/slave/client_login.py
index e3f2378132..c830f9e8c2 100755
--- a/python/fedml/computing/scheduler/slave/client_login.py
+++ b/python/fedml/computing/scheduler/slave/client_login.py
@@ -27,6 +27,7 @@ def logout():
     parser.add_argument("--local_on_premise_platform_port", "-lpp", type=int, default=80)
     parser.add_argument("--marketplace_type", "-mpt", type=str, default=MarketplaceType.SECURE.name)
     parser.add_argument("--price_per_hour", "-pph", type=str, default="0.0")
+    parser.add_argument("--name", "-n", type=str, default="")
 
     args = parser.parse_args()
     args.user = args.user
@@ -43,6 +44,6 @@ def logout():
     if args.type == 'login':
         slave_agent.login(userid=args.api_key, api_key=args.api_key, device_id=args.device_id,
                           os_name=args.os_name, role=args.role, marketplace_type=args.marketplace_type,
-                          price_per_hour=args.price_per_hour)
+                          price_per_hour=args.price_per_hour, name=args.name)
     else:
         FedMLLaunchSlaveAgent.logout()

From 135c55b77a1631a78cd0027fb6f2fdf2a2e57a68 Mon Sep 17 00:00:00 2001
From: Alay Shah <alay11shah@gmail.com>
Date: Thu, 22 Aug 2024 14:51:02 -0700
Subject: [PATCH 210/282] Pass name into login

---
 python/fedml/cli/modules/login.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/python/fedml/cli/modules/login.py b/python/fedml/cli/modules/login.py
index fc1cd408c4..b76346ec1b 100644
--- a/python/fedml/cli/modules/login.py
+++ b/python/fedml/cli/modules/login.py
@@ -129,7 +129,7 @@ def fedml_login(
         pass
     os.environ["FEDML_MODEL_WORKER_NUM"] = str(deploy_worker_num)
     fedml.api.login(api_key, compute_node, server, provider, master_inference_gateway_port,
-                    worker_inference_proxy_port, worker_connection_type, marketplace_type, price_per_hour)
+                    worker_inference_proxy_port, worker_connection_type, marketplace_type, price_per_hour, name)
 
 
 def __validate_mpt_pph(marketplace_type, price_per_hour):

From 4cf806621ac5d2387720b8de47cb2a380ca6b4c4 Mon Sep 17 00:00:00 2001
From: fedml-dimitris <dimitris@tensoropera.com>
Date: Wed, 4 Sep 2024 00:20:00 +0000
Subject: [PATCH 211/282] Fixing grpc and trpc ipconfig from 127.0.0.0 to
 0.0.0.0

---
 .../custom_data_and_model/config/grpc_ipconfig.csv          | 6 +++---
 .../one_line/config/grpc_ipconfig.csv                       | 6 +++---
 .../step_by_step/config/grpc_ipconfig.csv                   | 6 +++---
 .../one_line/config/trpc_master_config.csv                  | 2 +-
 4 files changed, 10 insertions(+), 10 deletions(-)

diff --git a/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/custom_data_and_model/config/grpc_ipconfig.csv b/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/custom_data_and_model/config/grpc_ipconfig.csv
index f97ee8f910..4618a98b7a 100644
--- a/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/custom_data_and_model/config/grpc_ipconfig.csv
+++ b/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/custom_data_and_model/config/grpc_ipconfig.csv
@@ -1,4 +1,4 @@
 receiver_id,ip
-0,127.0.0.1
-1,127.0.0.1
-2,127.0.0.1
\ No newline at end of file
+0,0.0.0.0
+1,0.0.0.0
+2,0.0.0.0
\ No newline at end of file
diff --git a/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/one_line/config/grpc_ipconfig.csv b/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/one_line/config/grpc_ipconfig.csv
index f97ee8f910..4618a98b7a 100644
--- a/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/one_line/config/grpc_ipconfig.csv
+++ b/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/one_line/config/grpc_ipconfig.csv
@@ -1,4 +1,4 @@
 receiver_id,ip
-0,127.0.0.1
-1,127.0.0.1
-2,127.0.0.1
\ No newline at end of file
+0,0.0.0.0
+1,0.0.0.0
+2,0.0.0.0
\ No newline at end of file
diff --git a/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/step_by_step/config/grpc_ipconfig.csv b/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/step_by_step/config/grpc_ipconfig.csv
index f97ee8f910..4618a98b7a 100644
--- a/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/step_by_step/config/grpc_ipconfig.csv
+++ b/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/step_by_step/config/grpc_ipconfig.csv
@@ -1,4 +1,4 @@
 receiver_id,ip
-0,127.0.0.1
-1,127.0.0.1
-2,127.0.0.1
\ No newline at end of file
+0,0.0.0.0
+1,0.0.0.0
+2,0.0.0.0
\ No newline at end of file
diff --git a/python/examples/federate/cross_silo/trpc_fedavg_mnist_lr_example/one_line/config/trpc_master_config.csv b/python/examples/federate/cross_silo/trpc_fedavg_mnist_lr_example/one_line/config/trpc_master_config.csv
index dbfb0c6a4b..9708cd18e6 100644
--- a/python/examples/federate/cross_silo/trpc_fedavg_mnist_lr_example/one_line/config/trpc_master_config.csv
+++ b/python/examples/federate/cross_silo/trpc_fedavg_mnist_lr_example/one_line/config/trpc_master_config.csv
@@ -1,2 +1,2 @@
 master_ip, master_port
-127.0.0.1,29600
+0.0.0.0,29600

From 277f4caefe31478f2a5accbfb24170307cb73035 Mon Sep 17 00:00:00 2001
From: Alay Shah <alay11shah@gmail.com>
Date: Thu, 5 Sep 2024 15:46:19 -0700
Subject: [PATCH 212/282] Remove if condition, add log

---
 .../computing/scheduler/scheduler_core/account_manager.py     | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/python/fedml/computing/scheduler/scheduler_core/account_manager.py b/python/fedml/computing/scheduler/scheduler_core/account_manager.py
index 1a378d54cd..85d76d5973 100755
--- a/python/fedml/computing/scheduler/scheduler_core/account_manager.py
+++ b/python/fedml/computing/scheduler/scheduler_core/account_manager.py
@@ -341,8 +341,8 @@ def bind_account_and_device_id(
         if role == FedMLAccountManager.ROLE_GPU_PROVIDER:
             json_params["marketplaceType"] = MarketplaceType.from_str(marketplace_type).value
             json_params["providerPricePerHour"] = float(price_per_hour)
-            if name:
-                json_params["name"] = name
+            json_params["name"] = name
+            logging.info(f"[DEBUG] marketplaceType: {marketplace_type}, price_per_hour: {price_per_hour}, name: {name}")
 
         if gpu_count > 0:
             if gpu_total_mem is not None:

From 17dd2b743c779a36f7aa4353723d3b7e9fd73108 Mon Sep 17 00:00:00 2001
From: Alay Shah <alay11shah@gmail.com>
Date: Thu, 12 Sep 2024 11:43:14 -0700
Subject: [PATCH 213/282] Stringify name

---
 python/fedml/api/modules/device.py                      | 2 +-
 python/fedml/computing/scheduler/slave/client_daemon.py | 3 +--
 2 files changed, 2 insertions(+), 3 deletions(-)

diff --git a/python/fedml/api/modules/device.py b/python/fedml/api/modules/device.py
index 9f39419a74..27b2d0d198 100644
--- a/python/fedml/api/modules/device.py
+++ b/python/fedml/api/modules/device.py
@@ -183,7 +183,7 @@ def _bind(
                 "-pph",
                 str(price_per_hour),
                 "-n",
-                name
+                str(name)
             ]
         ).pid
         sys_utils.save_login_process(ClientConstants.LOCAL_HOME_RUNNER_DIR_NAME,
diff --git a/python/fedml/computing/scheduler/slave/client_daemon.py b/python/fedml/computing/scheduler/slave/client_daemon.py
index a82c446c0d..5e346a0901 100755
--- a/python/fedml/computing/scheduler/slave/client_daemon.py
+++ b/python/fedml/computing/scheduler/slave/client_daemon.py
@@ -6,8 +6,7 @@
 
 import fedml
 from fedml.computing.scheduler.comm_utils.sys_utils import cleanup_all_fedml_client_api_processes, \
-    cleanup_all_fedml_client_learning_processes, cleanup_all_fedml_client_login_processes, get_python_program, \
-    daemon_ota_upgrade
+    cleanup_all_fedml_client_learning_processes, cleanup_all_fedml_client_login_processes, get_python_program
 from fedml.computing.scheduler.scheduler_core.general_constants import MarketplaceType
 from fedml.computing.scheduler.slave.client_constants import ClientConstants
 from fedml.computing.scheduler.comm_utils.run_process_utils import RunProcessUtils

From 4a198ebd8385a0c7fb83222eb5d5162c2f777604 Mon Sep 17 00:00:00 2001
From: Alay Shah <alay11shah@gmail.com>
Date: Thu, 12 Sep 2024 14:31:52 -0700
Subject: [PATCH 214/282] Set name arg required to True

---
 python/fedml/computing/scheduler/slave/client_daemon.py | 2 +-
 python/fedml/computing/scheduler/slave/client_login.py  | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/python/fedml/computing/scheduler/slave/client_daemon.py b/python/fedml/computing/scheduler/slave/client_daemon.py
index 5e346a0901..4c69210346 100755
--- a/python/fedml/computing/scheduler/slave/client_daemon.py
+++ b/python/fedml/computing/scheduler/slave/client_daemon.py
@@ -26,7 +26,7 @@
     parser.add_argument("--local_on_premise_platform_port", "-lpp", type=int, default=80)
     parser.add_argument("--marketplace_type", "-mpt", type=str, default=MarketplaceType.SECURE.name)
     parser.add_argument("--price_per_hour", "-pph", type=str, default="0.0")
-    parser.add_argument("--name", "-n", type=str, default="")
+    parser.add_argument("--name", "-n", type=str, required=True)
 
     args = parser.parse_args()
     args.user = args.user
diff --git a/python/fedml/computing/scheduler/slave/client_login.py b/python/fedml/computing/scheduler/slave/client_login.py
index c830f9e8c2..e551639409 100755
--- a/python/fedml/computing/scheduler/slave/client_login.py
+++ b/python/fedml/computing/scheduler/slave/client_login.py
@@ -27,7 +27,7 @@ def logout():
     parser.add_argument("--local_on_premise_platform_port", "-lpp", type=int, default=80)
     parser.add_argument("--marketplace_type", "-mpt", type=str, default=MarketplaceType.SECURE.name)
     parser.add_argument("--price_per_hour", "-pph", type=str, default="0.0")
-    parser.add_argument("--name", "-n", type=str, default="")
+    parser.add_argument("--name", "-n", type=str, required=True)
 
     args = parser.parse_args()
     args.user = args.user

From 16417d57d29e73303ab0bf9023c6dc7e48f82b3a Mon Sep 17 00:00:00 2001
From: Alay Shah <alay11shah@gmail.com>
Date: Thu, 12 Sep 2024 14:36:28 -0700
Subject: [PATCH 215/282] Making name optional

---
 python/fedml/computing/scheduler/slave/client_daemon.py | 2 +-
 python/fedml/computing/scheduler/slave/client_login.py  | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/python/fedml/computing/scheduler/slave/client_daemon.py b/python/fedml/computing/scheduler/slave/client_daemon.py
index 4c69210346..a3dff06e2d 100755
--- a/python/fedml/computing/scheduler/slave/client_daemon.py
+++ b/python/fedml/computing/scheduler/slave/client_daemon.py
@@ -26,7 +26,7 @@
     parser.add_argument("--local_on_premise_platform_port", "-lpp", type=int, default=80)
     parser.add_argument("--marketplace_type", "-mpt", type=str, default=MarketplaceType.SECURE.name)
     parser.add_argument("--price_per_hour", "-pph", type=str, default="0.0")
-    parser.add_argument("--name", "-n", type=str, required=True)
+    parser.add_argument("--name", "-n", type=str, nargs='?', default="")
 
     args = parser.parse_args()
     args.user = args.user
diff --git a/python/fedml/computing/scheduler/slave/client_login.py b/python/fedml/computing/scheduler/slave/client_login.py
index e551639409..6d8b9d1ae1 100755
--- a/python/fedml/computing/scheduler/slave/client_login.py
+++ b/python/fedml/computing/scheduler/slave/client_login.py
@@ -27,7 +27,7 @@ def logout():
     parser.add_argument("--local_on_premise_platform_port", "-lpp", type=int, default=80)
     parser.add_argument("--marketplace_type", "-mpt", type=str, default=MarketplaceType.SECURE.name)
     parser.add_argument("--price_per_hour", "-pph", type=str, default="0.0")
-    parser.add_argument("--name", "-n", type=str, required=True)
+    parser.add_argument("--name", "-n", type=str, nargs='?', default="")
 
     args = parser.parse_args()
     args.user = args.user

From 53aead35928fedfaff68cfc863ea47c34888694d Mon Sep 17 00:00:00 2001
From: Alex <alex.gpt.llm@gmail.com>
Date: Fri, 13 Sep 2024 11:11:27 +0800
Subject: [PATCH 216/282] add the new certs.

---
 python/fedml/core/mlops/mlops_configs.py      | 38 ++++++++---
 .../mlops/ssl/open.chainopera.ai.local.crt    | 63 +++++++++++++++++++
 .../ssl/open.chainopera.ai.local.rootca.crt   | 29 +++++++++
 3 files changed, 121 insertions(+), 9 deletions(-)
 create mode 100644 python/fedml/core/mlops/ssl/open.chainopera.ai.local.crt
 create mode 100644 python/fedml/core/mlops/ssl/open.chainopera.ai.local.rootca.crt

diff --git a/python/fedml/core/mlops/mlops_configs.py b/python/fedml/core/mlops/mlops_configs.py
index 1ed2e0476d..338f59e697 100644
--- a/python/fedml/core/mlops/mlops_configs.py
+++ b/python/fedml/core/mlops/mlops_configs.py
@@ -8,6 +8,7 @@
 
 import fedml
 from fedml.core.mlops.mlops_utils import MLOpsUtils
+from urllib.parse import urlparse
 
 
 class Configs(Enum):
@@ -49,9 +50,15 @@ def get_request_params():
         cert_path = None
         if str(url).startswith("https://"):
             cur_source_dir = os.path.dirname(__file__)
-            cert_path = os.path.join(
-                cur_source_dir, "ssl", "open-" + fedml.get_env_version() + ".fedml.ai_bundle.crt"
-            )
+            version = fedml.get_env_version()
+            if version == "local":
+                cert_path = os.path.join(
+                    cur_source_dir, "ssl", f"{urlparse(url).hostname}.{version}.crt"
+                )
+            else:
+                cert_path = os.path.join(
+                    cur_source_dir, "ssl", "open-" + fedml.get_env_version() + ".fedml.ai_bundle.crt"
+                )
 
         return url, cert_path
 
@@ -88,17 +95,30 @@ def get_cert_path_with_version():
         cert_path = None
         if str(url).startswith("https://"):
             cur_source_dir = os.path.dirname(__file__)
-            cert_path = os.path.join(
-                cur_source_dir, "ssl", "open-" + version + ".fedml.ai_bundle.crt"
-            )
+            if version == "local":
+                cert_path = os.path.join(
+                    cur_source_dir, "ssl", f"{urlparse(url).hostname}.{version}.crt"
+                )
+            else:
+                cert_path = os.path.join(
+                    cur_source_dir, "ssl", "open-" + version + ".fedml.ai_bundle.crt"
+                )
+
         return cert_path
 
     @staticmethod
     def get_root_ca_path():
         cur_source_dir = os.path.dirname(__file__)
-        cert_path = os.path.join(
-            cur_source_dir, "ssl", "open-root-ca.crt"
-        )
+        version = fedml.get_env_version()
+        if version == "local":
+            url = fedml._get_backend_service()
+            cert_path = os.path.join(
+                cur_source_dir, "ssl", f"{urlparse(url).hostname}.{version}.rootca.crt"
+            )
+        else:
+            cert_path = os.path.join(
+                cur_source_dir, "ssl", "open-root-ca.crt"
+            )
         return cert_path
 
     @staticmethod
diff --git a/python/fedml/core/mlops/ssl/open.chainopera.ai.local.crt b/python/fedml/core/mlops/ssl/open.chainopera.ai.local.crt
new file mode 100644
index 0000000000..400c30aaa0
--- /dev/null
+++ b/python/fedml/core/mlops/ssl/open.chainopera.ai.local.crt
@@ -0,0 +1,63 @@
+-----BEGIN CERTIFICATE-----
+MIIF8zCCBFugAwIBAgIQKyZVUxZMMiOwsUN8tL5DgjANBgkqhkiG9w0BAQwFADBZ
+MQswCQYDVQQGEwJDTjElMCMGA1UEChMcVHJ1c3RBc2lhIFRlY2hub2xvZ2llcywg
+SW5jLjEjMCEGA1UEAxMaVHJ1c3RBc2lhIFJTQSBEViBUTFMgQ0EgRzIwHhcNMjQw
+OTA5MDAwMDAwWhcNMjQxMjA4MjM1OTU5WjAdMRswGQYDVQQDExJvcGVuLmNoYWlu
+b3BlcmEuYWkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDLX+3ww3v2
+rTn+rvoqkjKPRwhtHtgYjaSRzoovPiiS9QrC/sTHSVAIR/FrftD+MOWn4GmyMBjv
+lYkLi5qT0e5jLQwVQeRHgGZmuBHuNbk4RDzRI1bM2HmO7Lgv6u1Ce0K3CE5U2A5r
+4a40KJFqhJlV6TEOu0XkxMMZ+l1l/rr/1MZDioYP9Bvwq09hvVaokHhrbirhTSYF
+JkvnjaXu1e2Lq7c+7vMphr5AK+H3lT6Ct7PBZw0Yby1AX6EzMbjpqlU+fRwUuOeg
+NJAzSWw9U4lCwW3H3JptZyvjbbm+4V/TZfl3q8G2JUJFkEEE6M3IeL4DIWaf9xAs
+dzFOHPAxuWzRAgMBAAGjggJxMIICbTAfBgNVHSMEGDAWgBRfOnwREH4MZ3Fh3Iuj
+tQADZ/VXHDAdBgNVHQ4EFgQUl+bxD+UL1hxiISSDQA8N2RsEd24wDgYDVR0PAQH/
+BAQDAgWgMAwGA1UdEwEB/wQCMAAwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUF
+BwMCMEkGA1UdIARCMEAwNAYLKwYBBAGyMQECAjEwJTAjBggrBgEFBQcCARYXaHR0
+cHM6Ly9zZWN0aWdvLmNvbS9DUFMwCAYGZ4EMAQIBMH0GCCsGAQUFBwEBBHEwbzBC
+BggrBgEFBQcwAoY2aHR0cDovL2NydC50cnVzdC1wcm92aWRlci5jbi9UcnVzdEFz
+aWFSU0FEVlRMU0NBRzIuY3J0MCkGCCsGAQUFBzABhh1odHRwOi8vb2NzcC50cnVz
+dC1wcm92aWRlci5jbjCCAQMGCisGAQQB1nkCBAIEgfQEgfEA7wB1AHb/iD8KtvuV
+UcJhzPWHujS0pM27KdxoQgqf5mdMWjp0AAABkdUkleAAAAQDAEYwRAIgLnpFW+eZ
+M0ueXvdQpXsBRoWQUrL+UdD9gFSoH140GR4CICq/zgGD7Nwwehb3BpdpVLr0sfA3
+9i39Bm11r0yipqvGAHYAPxdLT9ciR1iUHWUchL4NEu2QN38fhWrrwb8ohez4ZG4A
+AAGR1SSVwgAABAMARzBFAiEAnR+qJq2xcMYoBG4J6xJwhX+a/WoBSN0AVs7EGEv2
+WxgCIF38/4vZJ6Hf+5R/j1SH/XRuFoiV9/dU1dqHn/2C78bqMB0GA1UdEQQWMBSC
+Em9wZW4uY2hhaW5vcGVyYS5haTANBgkqhkiG9w0BAQwFAAOCAYEAjjqLYm/6hx4s
+ZSbPvyCsQ2KbjjX50aKHhk+/rkcGBtwwfDqF926/pW04dQ7/YiA+8eQGcg8yORSB
+YSPoJjKzj72dt0KfccVAvido/2OnFBzDGqSPYXEpsaC//zioztao5DxWHPM8BtMo
+nkav7slLkCBAtH1B8P50usY3b2k4JoIzPSowMxyfHeCMyzW90X9AhegPl/3SVTaJ
+ec8l/oLpmhYWKaX8QkDfdBL2ceOVj2QDVNmdvy4UNKD/ZFedL/rZAETSx6H2bgGZ
+PukL0gXSCaEydi33cKi46ExogHkdqp9nmay9un20ZESbOH9o3fth7EtzlK8s98tG
+uiqxm+Gq4rSU2mS1zxaHQsKANBN52LdA86TZPxE48jPtvTMXbZhHujJ3OIqwOwIU
+yA0p72D0uXLlRtusun3xq3vAcG7L4n8qLu601oJPkOd2asFYWCXI3D33bpPhLW2g
+Ds2c6MGRaNxrmYx90fzyudF7w40AX9PMgBLWKuuX+qiEpab9MHhI
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIFBzCCA++gAwIBAgIRALIM7VUuMaC/NDp1KHQ76aswDQYJKoZIhvcNAQELBQAw
+ezELMAkGA1UEBhMCR0IxGzAZBgNVBAgMEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4G
+A1UEBwwHU2FsZm9yZDEaMBgGA1UECgwRQ29tb2RvIENBIExpbWl0ZWQxITAfBgNV
+BAMMGEFBQSBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczAeFw0yMjAxMTAwMDAwMDBaFw0y
+ODEyMzEyMzU5NTlaMFkxCzAJBgNVBAYTAkNOMSUwIwYDVQQKExxUcnVzdEFzaWEg
+VGVjaG5vbG9naWVzLCBJbmMuMSMwIQYDVQQDExpUcnVzdEFzaWEgUlNBIERWIFRM
+UyBDQSBHMjCCAaIwDQYJKoZIhvcNAQEBBQADggGPADCCAYoCggGBAKjGDe0GSaBs
+Yl/VhMaTM6GhfR1TAt4mrhN8zfAMwEfLZth+N2ie5ULbW8YvSGzhqkDhGgSBlafm
+qq05oeESrIJQyz24j7icGeGyIZ/jIChOOvjt4M8EVi3O0Se7E6RAgVYcX+QWVp5c
+Sy+l7XrrtL/pDDL9Bngnq/DVfjCzm5ZYUb1PpyvYTP7trsV+yYOCNmmwQvB4yVjf
+IIpHC1OcsPBntMUGeH1Eja4D+qJYhGOxX9kpa+2wTCW06L8T6OhkpJWYn5JYiht5
+8exjAR7b8Zi3DeG9oZO5o6Qvhl3f8uGU8lK1j9jCUN/18mI/5vZJ76i+hsgdlfZB
+Rh5lmAQjD80M9TY+oD4MYUqB5XrigPfFAUwXFGehhlwCVw7y6+5kpbq/NpvM5Ba8
+SeQYUUuMA8RXpTtGlrrTPqJryfa55hTuX/ThhX4gcCVkbyujo0CYr+Uuc14IOyNY
+1fD0/qORbllbgV41wiy/2ZUWZQUodqHWkjT1CwIMbQOY5jmrSYGBwwIDAQABo4IB
+JjCCASIwHwYDVR0jBBgwFoAUoBEKIz6W8Qfs4q8p74Klf9AwpLQwHQYDVR0OBBYE
+FF86fBEQfgxncWHci6O1AANn9VccMA4GA1UdDwEB/wQEAwIBhjASBgNVHRMBAf8E
+CDAGAQH/AgEAMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAiBgNVHSAE
+GzAZMA0GCysGAQQBsjEBAgIxMAgGBmeBDAECATBDBgNVHR8EPDA6MDigNqA0hjJo
+dHRwOi8vY3JsLmNvbW9kb2NhLmNvbS9BQUFDZXJ0aWZpY2F0ZVNlcnZpY2VzLmNy
+bDA0BggrBgEFBQcBAQQoMCYwJAYIKwYBBQUHMAGGGGh0dHA6Ly9vY3NwLmNvbW9k
+b2NhLmNvbTANBgkqhkiG9w0BAQsFAAOCAQEAHMUom5cxIje2IiFU7mOCsBr2F6CY
+eU5cyfQ/Aep9kAXYUDuWsaT85721JxeXFYkf4D/cgNd9+hxT8ZeDOJrn+ysqR7NO
+2K9AdqTdIY2uZPKmvgHOkvH2gQD6jc05eSPOwdY/10IPvmpgUKaGOa/tyygL8Og4
+3tYyoHipMMnS4OiYKakDJny0XVuchIP7ZMKiP07Q3FIuSS4omzR77kmc75/6Q9dP
+v4wa90UCOn1j6r7WhMmX3eT3Gsdj3WMe9bYD0AFuqa6MDyjIeXq08mVGraXiw73s
+Zale8OMckn/BU3O/3aFNLHLfET2H2hT6Wb3nwxjpLIfXmSVcVd8A58XH0g==
+-----END CERTIFICATE-----
\ No newline at end of file
diff --git a/python/fedml/core/mlops/ssl/open.chainopera.ai.local.rootca.crt b/python/fedml/core/mlops/ssl/open.chainopera.ai.local.rootca.crt
new file mode 100644
index 0000000000..e87d0c5783
--- /dev/null
+++ b/python/fedml/core/mlops/ssl/open.chainopera.ai.local.rootca.crt
@@ -0,0 +1,29 @@
+-----BEGIN CERTIFICATE-----
+MIIFBzCCA++gAwIBAgIRALIM7VUuMaC/NDp1KHQ76aswDQYJKoZIhvcNAQELBQAw
+ezELMAkGA1UEBhMCR0IxGzAZBgNVBAgMEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4G
+A1UEBwwHU2FsZm9yZDEaMBgGA1UECgwRQ29tb2RvIENBIExpbWl0ZWQxITAfBgNV
+BAMMGEFBQSBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczAeFw0yMjAxMTAwMDAwMDBaFw0y
+ODEyMzEyMzU5NTlaMFkxCzAJBgNVBAYTAkNOMSUwIwYDVQQKExxUcnVzdEFzaWEg
+VGVjaG5vbG9naWVzLCBJbmMuMSMwIQYDVQQDExpUcnVzdEFzaWEgUlNBIERWIFRM
+UyBDQSBHMjCCAaIwDQYJKoZIhvcNAQEBBQADggGPADCCAYoCggGBAKjGDe0GSaBs
+Yl/VhMaTM6GhfR1TAt4mrhN8zfAMwEfLZth+N2ie5ULbW8YvSGzhqkDhGgSBlafm
+qq05oeESrIJQyz24j7icGeGyIZ/jIChOOvjt4M8EVi3O0Se7E6RAgVYcX+QWVp5c
+Sy+l7XrrtL/pDDL9Bngnq/DVfjCzm5ZYUb1PpyvYTP7trsV+yYOCNmmwQvB4yVjf
+IIpHC1OcsPBntMUGeH1Eja4D+qJYhGOxX9kpa+2wTCW06L8T6OhkpJWYn5JYiht5
+8exjAR7b8Zi3DeG9oZO5o6Qvhl3f8uGU8lK1j9jCUN/18mI/5vZJ76i+hsgdlfZB
+Rh5lmAQjD80M9TY+oD4MYUqB5XrigPfFAUwXFGehhlwCVw7y6+5kpbq/NpvM5Ba8
+SeQYUUuMA8RXpTtGlrrTPqJryfa55hTuX/ThhX4gcCVkbyujo0CYr+Uuc14IOyNY
+1fD0/qORbllbgV41wiy/2ZUWZQUodqHWkjT1CwIMbQOY5jmrSYGBwwIDAQABo4IB
+JjCCASIwHwYDVR0jBBgwFoAUoBEKIz6W8Qfs4q8p74Klf9AwpLQwHQYDVR0OBBYE
+FF86fBEQfgxncWHci6O1AANn9VccMA4GA1UdDwEB/wQEAwIBhjASBgNVHRMBAf8E
+CDAGAQH/AgEAMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAiBgNVHSAE
+GzAZMA0GCysGAQQBsjEBAgIxMAgGBmeBDAECATBDBgNVHR8EPDA6MDigNqA0hjJo
+dHRwOi8vY3JsLmNvbW9kb2NhLmNvbS9BQUFDZXJ0aWZpY2F0ZVNlcnZpY2VzLmNy
+bDA0BggrBgEFBQcBAQQoMCYwJAYIKwYBBQUHMAGGGGh0dHA6Ly9vY3NwLmNvbW9k
+b2NhLmNvbTANBgkqhkiG9w0BAQsFAAOCAQEAHMUom5cxIje2IiFU7mOCsBr2F6CY
+eU5cyfQ/Aep9kAXYUDuWsaT85721JxeXFYkf4D/cgNd9+hxT8ZeDOJrn+ysqR7NO
+2K9AdqTdIY2uZPKmvgHOkvH2gQD6jc05eSPOwdY/10IPvmpgUKaGOa/tyygL8Og4
+3tYyoHipMMnS4OiYKakDJny0XVuchIP7ZMKiP07Q3FIuSS4omzR77kmc75/6Q9dP
+v4wa90UCOn1j6r7WhMmX3eT3Gsdj3WMe9bYD0AFuqa6MDyjIeXq08mVGraXiw73s
+Zale8OMckn/BU3O/3aFNLHLfET2H2hT6Wb3nwxjpLIfXmSVcVd8A58XH0g==
+-----END CERTIFICATE-----

From f46cd1e2e5920d3be5c1edcbbb5f19a9c7ba0b38 Mon Sep 17 00:00:00 2001
From: Alex <alex.gpt.llm@gmail.com>
Date: Fri, 13 Sep 2024 11:13:52 +0800
Subject: [PATCH 217/282] update new certs.

---
 python/setup.py | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/python/setup.py b/python/setup.py
index 4757c10a17..3847f360fa 100644
--- a/python/setup.py
+++ b/python/setup.py
@@ -173,6 +173,8 @@ def finalize_options(self):
                 "fedml/core/mlops/ssl/open-test.fedml.ai_bundle.crt",
                 "fedml/core/mlops/ssl/open-release.fedml.ai_bundle.crt",
                 "fedml/core/mlops/ssl/open-root-ca.crt",
+                "fedml/core/mlops/ssl/open.chainopera.ai.local.crt",
+                "fedml/core/mlops/ssl/open.chainopera.ai.local.rootca.crt",
             ],
         )
     ],

From e046f5b6dd7a70e7b928f4778d32cb99e19a7e41 Mon Sep 17 00:00:00 2001
From: fedml-dimitris <dimitris@tensoropera.com>
Date: Fri, 13 Sep 2024 23:30:27 +0000
Subject: [PATCH 218/282] Fixing grpc compatibility with the fedml.ai platform
 and simplifying the user-end requirements to define the grpc server ip and
 port. The values defined in the grpc config file will override the default ip
 and port values.

---
 .../custom_data_and_model/README.md           |  22 ++-
 .../config/grpc_ipconfig.csv                  |   6 +-
 .../one_line/README.md                        |  22 ++-
 .../one_line/config/grpc_ipconfig.csv         |   6 +-
 .../step_by_step/README.md                    |  22 ++-
 .../step_by_step/config/grpc_ipconfig.csv     |   6 +-
 .../model_scheduler/autoscaler/autoscaler.py  |   2 +-
 .../communication/grpc/grpc_comm_manager.py   | 138 ++++++++++--------
 .../communication/grpc/ip_config_utils.py     |  14 --
 .../core/distributed/communication/message.py |  17 ++-
 .../core/distributed/fedml_comm_manager.py    |   8 +-
 .../client/fedml_client_master_manager.py     |   5 +-
 .../fedml/cross_silo/client/message_define.py |   1 +
 13 files changed, 158 insertions(+), 111 deletions(-)
 delete mode 100644 python/fedml/core/distributed/communication/grpc/ip_config_utils.py

diff --git a/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/custom_data_and_model/README.md b/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/custom_data_and_model/README.md
index d125847dd6..da37ebda43 100644
--- a/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/custom_data_and_model/README.md
+++ b/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/custom_data_and_model/README.md
@@ -1,7 +1,21 @@
-## Training Script
 
-At the client side, the client ID (a.k.a rank) starts from 1.
-Please also modify config/fedml_config.yaml, changing the `worker_num` the as the number of clients you plan to run.
+## Prerequisites
+At the client side, the client ID (a.k.a rank) starts from 1. 
+Please also modify `config/fedml_config.yaml` as you see fit. Changing the `worker_num` the as the number of clients you plan to run.
+The default ip of every groc server is set to `0.0.0.0`, and all grpc ports start from 8890 and increase based on the client's rank.
+
+> **_NOTE:_** 
+> The `config/grpc_ipconfig.csv` file contains only one record referring to the grpc server of 
+> the aggregator (rank: 0). This record is mandatory. However, you can change the values of the `ip` and `port` 
+> attributes as you see fit, and more records for grpc server of the rest of clients. For instance:
+```
+eid,rank,ip,port
+0,0,0.0.0.0,8890
+1,1,0.0.0.0,8899
+2,2,0.0.0.0,8898
+```
+
+## Start Script
 
 At the server side, run the following script:
 ```
@@ -18,7 +32,7 @@ bash run_client.sh 2 your_run_id
 ```
 Note: please run the server first.
 
-## A Better User-experience with FedML MLOps (open.fedml.ai)
+## A Better User-experience with FedML FLOps (fedml.ai)
 To reduce the difficulty and complexity of these CLI commands. We recommend you to use our MLOps (open.fedml.ai).
 FedML MLOps provides:
 - Install Client Agent and Login
diff --git a/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/custom_data_and_model/config/grpc_ipconfig.csv b/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/custom_data_and_model/config/grpc_ipconfig.csv
index 4618a98b7a..c92391a46a 100644
--- a/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/custom_data_and_model/config/grpc_ipconfig.csv
+++ b/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/custom_data_and_model/config/grpc_ipconfig.csv
@@ -1,4 +1,2 @@
-receiver_id,ip
-0,0.0.0.0
-1,0.0.0.0
-2,0.0.0.0
\ No newline at end of file
+eid,rank,ip,port
+0,0,0.0.0.0,8890
diff --git a/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/one_line/README.md b/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/one_line/README.md
index d125847dd6..da37ebda43 100644
--- a/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/one_line/README.md
+++ b/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/one_line/README.md
@@ -1,7 +1,21 @@
-## Training Script
 
-At the client side, the client ID (a.k.a rank) starts from 1.
-Please also modify config/fedml_config.yaml, changing the `worker_num` the as the number of clients you plan to run.
+## Prerequisites
+At the client side, the client ID (a.k.a rank) starts from 1. 
+Please also modify `config/fedml_config.yaml` as you see fit. Changing the `worker_num` the as the number of clients you plan to run.
+The default ip of every groc server is set to `0.0.0.0`, and all grpc ports start from 8890 and increase based on the client's rank.
+
+> **_NOTE:_** 
+> The `config/grpc_ipconfig.csv` file contains only one record referring to the grpc server of 
+> the aggregator (rank: 0). This record is mandatory. However, you can change the values of the `ip` and `port` 
+> attributes as you see fit, and more records for grpc server of the rest of clients. For instance:
+```
+eid,rank,ip,port
+0,0,0.0.0.0,8890
+1,1,0.0.0.0,8899
+2,2,0.0.0.0,8898
+```
+
+## Start Script
 
 At the server side, run the following script:
 ```
@@ -18,7 +32,7 @@ bash run_client.sh 2 your_run_id
 ```
 Note: please run the server first.
 
-## A Better User-experience with FedML MLOps (open.fedml.ai)
+## A Better User-experience with FedML FLOps (fedml.ai)
 To reduce the difficulty and complexity of these CLI commands. We recommend you to use our MLOps (open.fedml.ai).
 FedML MLOps provides:
 - Install Client Agent and Login
diff --git a/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/one_line/config/grpc_ipconfig.csv b/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/one_line/config/grpc_ipconfig.csv
index 4618a98b7a..b46eb6d33b 100644
--- a/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/one_line/config/grpc_ipconfig.csv
+++ b/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/one_line/config/grpc_ipconfig.csv
@@ -1,4 +1,2 @@
-receiver_id,ip
-0,0.0.0.0
-1,0.0.0.0
-2,0.0.0.0
\ No newline at end of file
+eid,rank,ip,port
+0,0,0.0.0.0,8890
\ No newline at end of file
diff --git a/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/step_by_step/README.md b/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/step_by_step/README.md
index d125847dd6..da37ebda43 100644
--- a/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/step_by_step/README.md
+++ b/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/step_by_step/README.md
@@ -1,7 +1,21 @@
-## Training Script
 
-At the client side, the client ID (a.k.a rank) starts from 1.
-Please also modify config/fedml_config.yaml, changing the `worker_num` the as the number of clients you plan to run.
+## Prerequisites
+At the client side, the client ID (a.k.a rank) starts from 1. 
+Please also modify `config/fedml_config.yaml` as you see fit. Changing the `worker_num` the as the number of clients you plan to run.
+The default ip of every groc server is set to `0.0.0.0`, and all grpc ports start from 8890 and increase based on the client's rank.
+
+> **_NOTE:_** 
+> The `config/grpc_ipconfig.csv` file contains only one record referring to the grpc server of 
+> the aggregator (rank: 0). This record is mandatory. However, you can change the values of the `ip` and `port` 
+> attributes as you see fit, and more records for grpc server of the rest of clients. For instance:
+```
+eid,rank,ip,port
+0,0,0.0.0.0,8890
+1,1,0.0.0.0,8899
+2,2,0.0.0.0,8898
+```
+
+## Start Script
 
 At the server side, run the following script:
 ```
@@ -18,7 +32,7 @@ bash run_client.sh 2 your_run_id
 ```
 Note: please run the server first.
 
-## A Better User-experience with FedML MLOps (open.fedml.ai)
+## A Better User-experience with FedML FLOps (fedml.ai)
 To reduce the difficulty and complexity of these CLI commands. We recommend you to use our MLOps (open.fedml.ai).
 FedML MLOps provides:
 - Install Client Agent and Login
diff --git a/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/step_by_step/config/grpc_ipconfig.csv b/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/step_by_step/config/grpc_ipconfig.csv
index 4618a98b7a..b46eb6d33b 100644
--- a/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/step_by_step/config/grpc_ipconfig.csv
+++ b/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/step_by_step/config/grpc_ipconfig.csv
@@ -1,4 +1,2 @@
-receiver_id,ip
-0,0.0.0.0
-1,0.0.0.0
-2,0.0.0.0
\ No newline at end of file
+eid,rank,ip,port
+0,0,0.0.0.0,8890
\ No newline at end of file
diff --git a/python/fedml/computing/scheduler/model_scheduler/autoscaler/autoscaler.py b/python/fedml/computing/scheduler/model_scheduler/autoscaler/autoscaler.py
index dd6ca67706..3f3da8d656 100644
--- a/python/fedml/computing/scheduler/model_scheduler/autoscaler/autoscaler.py
+++ b/python/fedml/computing/scheduler/model_scheduler/autoscaler/autoscaler.py
@@ -8,7 +8,7 @@
 from enum import Enum
 from fedml.computing.scheduler.model_scheduler.device_model_cache import FedMLModelCache
 from fedml.computing.scheduler.model_scheduler.autoscaler.policies import *
-from utils.singleton import Singleton
+from fedml.computing.scheduler.model_scheduler.autoscaler.utils.singleton import Singleton
 
 
 class ScaleOp(Enum):
diff --git a/python/fedml/core/distributed/communication/grpc/grpc_comm_manager.py b/python/fedml/core/distributed/communication/grpc/grpc_comm_manager.py
index 6eb9fe613e..a2cc255437 100644
--- a/python/fedml/core/distributed/communication/grpc/grpc_comm_manager.py
+++ b/python/fedml/core/distributed/communication/grpc/grpc_comm_manager.py
@@ -1,52 +1,50 @@
+import csv
+import grpc
+import logging
 import os
 import pickle
 import threading
-from concurrent import futures
-from typing import List
-
-import grpc
-
-from ..grpc import grpc_comm_manager_pb2_grpc, grpc_comm_manager_pb2
+import time
 
 lock = threading.Lock()
 
+from concurrent import futures
+from fedml.core.mlops.mlops_profiler_event import MLOpsProfilerEvent
+from typing import List
+
 from ...communication.base_com_manager import BaseCommunicationManager
 from ...communication.message import Message
 from ...communication.observer import Observer
 from ..constants import CommunicationConstants
-
-from fedml.core.mlops.mlops_profiler_event import MLOpsProfilerEvent
-
-import time
+from ..grpc import grpc_comm_manager_pb2_grpc, grpc_comm_manager_pb2
 
 # Check Service or serve?
 from ...communication.grpc.grpc_server import GRPCCOMMServicer
 
-import logging
-
-import csv
-
 
 class GRPCCommManager(BaseCommunicationManager):
+    MSG_ARG_KEY_SENDER_RANK = "sender_rank"
+    MSG_ARG_KEY_SENDER_IP = "sender_ip"
+    MSG_ARG_KEY_SENDER_PORT = "sender_port"
+
     def __init__(
-        self,
-        host,
-        port,
-        ip_config_path,
-        topic="fedml",
-        client_id=0,
-        client_num=0,
+            self,
+            grpc_ipconfig_path,
+            topic="fedml",
+            client_rank=0,
+            client_num=0,
+            args=None
     ):
-        # host is the ip address of server
-        self.host = host
-        self.port = str(port)
+
         self._topic = topic
-        self.client_id = client_id
-        self.client_num = client_num
         self._observers: List[Observer] = []
-        self.rank = client_id
+        self.grpc_ipconfig_path = grpc_ipconfig_path
+        self.client_rank = client_rank
+        self.client_id = self.client_rank
+        self.client_num = client_num
+        self.args = args
 
-        if client_id == 0:
+        if self.client_rank == 0:
             self.node_type = "server"
             logging.info("############# THIS IS FL SERVER ################")
         else:
@@ -61,24 +59,43 @@ def __init__(
             futures.ThreadPoolExecutor(max_workers=client_num),
             options=self.opts,
         )
-        self.grpc_servicer = GRPCCOMMServicer(host, port, client_num, client_id)
+
+        self.grpc_mappings = self._init_grpc_mappings()  # Load input mappings.
+        if self.client_id not in self.grpc_mappings:
+            # if no record exists for the current client id, then
+            # default ip and rank to "0.0.0.0" and BASE + RANK.
+            self.ip = "0.0.0.0"
+            self.port = CommunicationConstants.GRPC_BASE_PORT + self.client_rank
+            self.grpc_mappings[self.client_id] = (self.client_rank, self.ip, self.port)
+        else:
+            _, self.ip, self.port = self.grpc_mappings[self.client_id]
+
+        self.grpc_servicer = GRPCCOMMServicer(
+            self.ip,
+            self.port,
+            self.client_num,
+            self.client_rank
+        )
         grpc_comm_manager_pb2_grpc.add_gRPCCommManagerServicer_to_server(
             self.grpc_servicer, self.grpc_server
         )
         logging.info(os.getcwd())
-        self.ip_config = self._build_ip_table(ip_config_path)
 
-        # starts a grpc_server on local machine using ip address "0.0.0.0"
-        self.grpc_server.add_insecure_port("{}:{}".format("0.0.0.0", port))
+        self.grpc_server.add_insecure_port("{}:{}".format(self.ip, self.port))
 
         self.grpc_server.start()
+        # Wait for 100 milliseconds to make sure the grpc
+        # server has started before proceeding.
+        time.sleep(0.01)
         self.is_running = True
-        logging.info("grpc server started. Listening on port " + str(port))
+        logging.info("grpc server started. Listening on port " + str(self.port))
 
     def send_message(self, msg: Message):
-        logging.info("msg = {}".format(msg))
-        # payload = msg.to_json()
-
+        # Register the sender rank, ip and port attribute on the message.
+        msg.add_params(GRPCCommManager.MSG_ARG_KEY_SENDER_RANK, self.client_rank)
+        msg.add_params(GRPCCommManager.MSG_ARG_KEY_SENDER_IP, self.ip)
+        msg.add_params(GRPCCommManager.MSG_ARG_KEY_SENDER_PORT, self.port)
+        logging.info("sending msg = {}".format(msg.get_params_wout_model()))
         logging.info("pickle.dumps(msg) START")
         pickle_dump_start_time = time.time()
         msg_pkl = pickle.dumps(msg)
@@ -86,10 +103,8 @@ def send_message(self, msg: Message):
         logging.info("pickle.dumps(msg) END")
 
         receiver_id = msg.get_receiver_id()
-        PORT_BASE = CommunicationConstants.GRPC_BASE_PORT
-        # lookup ip of receiver from self.ip_config table
-        receiver_ip = self.ip_config[str(receiver_id)]
-        channel_url = "{}:{}".format(receiver_ip, str(PORT_BASE + receiver_id))
+        receiver_rank, receiver_ip, receiver_port = self.grpc_mappings[int(receiver_id)]
+        channel_url = "{}:{}".format(receiver_ip, receiver_port)
 
         channel = grpc.insecure_channel(channel_url, options=self.opts)
         stub = grpc_comm_manager_pb2_grpc.gRPCCommManagerStub(channel)
@@ -98,7 +113,6 @@ def send_message(self, msg: Message):
         logging.info("sending message to {}".format(channel_url))
 
         request.client_id = self.client_id
-
         request.message = msg_pkl
 
         tick = time.time()
@@ -116,10 +130,8 @@ def remove_observer(self, observer: Observer):
     def handle_receive_message(self):
         self._notify_connection_ready()
         self.message_handling_subroutine()
-
-        # Cannont run message_handling_subroutine in new thread
+        # Cannot run message_handling_subroutine in new thread
         # Related https://stackoverflow.com/a/70705165
-        
         # thread = threading.Thread(target=self.message_handling_subroutine)
         # thread.start()
 
@@ -131,19 +143,28 @@ def message_handling_subroutine(self):
                 lock.acquire()
                 busy_time_start_time = time.time()
                 msg_pkl = self.grpc_servicer.message_q.get()
-                logging.info("unpickle START")
+                logging.info("Unpickle START.")
                 unpickle_start_time = time.time()
                 msg = pickle.loads(msg_pkl)
                 MLOpsProfilerEvent.log_to_wandb({"UnpickleTime": time.time() - unpickle_start_time})
-                logging.info("unpickle END")
+                logging.info("Unpickle END.")
                 msg_type = msg.get_type()
+
+                sender_id = int(msg.get_sender_id())
+                if sender_id not in self.grpc_mappings:
+                    sender_rank = int(msg.get_params()[GRPCCommManager.MSG_ARG_KEY_SENDER_RANK])
+                    sender_ip = str(msg.get_params()[GRPCCommManager.MSG_ARG_KEY_SENDER_IP])
+                    sender_port = int(msg.get_params()[GRPCCommManager.MSG_ARG_KEY_SENDER_PORT])
+                    self.grpc_mappings[sender_id] = (sender_rank, sender_ip, sender_port)
+
                 for observer in self._observers:
                     _message_handler_start_time = time.time()
                     observer.receive_message(msg_type, msg)
                     MLOpsProfilerEvent.log_to_wandb({"MessageHandlerTime": time.time() - _message_handler_start_time})
                 MLOpsProfilerEvent.log_to_wandb({"BusyTime": time.time() - busy_time_start_time})
                 lock.release()
-            time.sleep(0.0001)
+        time.sleep(0.0001)
+
         MLOpsProfilerEvent.log_to_wandb({"TotalTime": time.time() - start_listening_time})
         return
 
@@ -158,20 +179,17 @@ def notify(self, message: Message):
 
     def _notify_connection_ready(self):
         msg_params = Message()
-        msg_params.sender_id = self.rank
-        msg_params.receiver_id = self.rank
+        msg_params.sender_id = self.client_rank
+        msg_params.receiver_id = self.client_rank
         msg_type = CommunicationConstants.MSG_TYPE_CONNECTION_IS_READY
         for observer in self._observers:
             observer.receive_message(msg_type, msg_params)
 
-    def _build_ip_table(self, path):
-        ip_config = dict()
-        with open(path, newline="") as csv_file:
-            csv_reader = csv.reader(csv_file)
-            # skip header line
-            next(csv_reader)
-
-            for row in csv_reader:
-                receiver_id, receiver_ip = row
-                ip_config[receiver_id] = receiver_ip
-        return ip_config
+    def _init_grpc_mappings(self):
+        mappings = dict()
+        csv_reader = csv.reader(open(self.grpc_ipconfig_path, "r"))
+        next(csv_reader)  # skip header line
+        for row in csv_reader:
+            eid, rank, ip, port = row
+            mappings[int(eid)] = (int(rank), str(ip), int(port))
+        return mappings
diff --git a/python/fedml/core/distributed/communication/grpc/ip_config_utils.py b/python/fedml/core/distributed/communication/grpc/ip_config_utils.py
deleted file mode 100644
index 1ebedfd73a..0000000000
--- a/python/fedml/core/distributed/communication/grpc/ip_config_utils.py
+++ /dev/null
@@ -1,14 +0,0 @@
-import csv
-
-
-def build_ip_table(path):
-    ip_config = dict()
-    with open(path, newline="") as csv_file:
-        csv_reader = csv.reader(csv_file)
-        # skip header line
-        next(csv_reader)
-
-        for row in csv_reader:
-            receiver_id, receiver_ip = row
-            ip_config[receiver_id] = receiver_ip
-    return ip_config
diff --git a/python/fedml/core/distributed/communication/message.py b/python/fedml/core/distributed/communication/message.py
index 7d465461e5..7b1bc63cec 100644
--- a/python/fedml/core/distributed/communication/message.py
+++ b/python/fedml/core/distributed/communication/message.py
@@ -4,16 +4,10 @@
 
 class Message(object):
 
-    MSG_ARG_KEY_OPERATION = "operation"
     MSG_ARG_KEY_TYPE = "msg_type"
     MSG_ARG_KEY_SENDER = "sender"
     MSG_ARG_KEY_RECEIVER = "receiver"
 
-    MSG_OPERATION_SEND = "send"
-    MSG_OPERATION_RECEIVE = "receive"
-    MSG_OPERATION_BROADCAST = "broadcast"
-    MSG_OPERATION_REDUCE = "reduce"
-
     MSG_ARG_KEY_MODEL_PARAMS = "model_params"
     MSG_ARG_KEY_MODEL_PARAMS_URL = "model_params_url"
     MSG_ARG_KEY_MODEL_PARAMS_KEY = "model_params_key"
@@ -54,6 +48,15 @@ def add_params(self, key, value):
     def get_params(self):
         return self.msg_params
 
+    def get_params_wout_model(self):
+        # We explicitly return the message triple, because the msg params
+        # dictionary is populated at different stages during execution,
+        # e.g., Message.MSG_ARG_KEY_MODEL_PARAMS
+        return {
+            k: v for k, v in self.msg_params.items()
+            if k != Message.MSG_ARG_KEY_MODEL_PARAMS
+        }
+
     def add(self, key, value):
         self.msg_params[key] = value
 
@@ -65,7 +68,7 @@ def get(self, key):
     def get_type(self):
         return self.msg_params[Message.MSG_ARG_KEY_TYPE]
 
-    def to_string(self):
+    def to_string(self, include_model_params=True):
         return self.msg_params
 
     def to_json(self):
diff --git a/python/fedml/core/distributed/fedml_comm_manager.py b/python/fedml/core/distributed/fedml_comm_manager.py
index d358b0b7cd..29cd498bdc 100644
--- a/python/fedml/core/distributed/fedml_comm_manager.py
+++ b/python/fedml/core/distributed/fedml_comm_manager.py
@@ -188,11 +188,11 @@ def _init_manager(self):
             )
         elif self.backend == "GRPC":
             from .communication.grpc.grpc_comm_manager import GRPCCommManager
-
-            HOST = "0.0.0.0"
-            PORT = CommunicationConstants.GRPC_BASE_PORT + self.rank
             self.com_manager = GRPCCommManager(
-                HOST, PORT, ip_config_path=self.args.grpc_ipconfig_path, client_id=self.rank, client_num=self.size,
+                grpc_ipconfig_path=self.args.grpc_ipconfig_path,
+                client_rank=self.rank,
+                client_num=self.size,
+                args=self.args,
             )
         elif self.backend == "TRPC":
             from .communication.trpc.trpc_comm_manager import TRPCCommManager
diff --git a/python/fedml/cross_silo/client/fedml_client_master_manager.py b/python/fedml/cross_silo/client/fedml_client_master_manager.py
index f03f7f03c1..fa333af2cc 100644
--- a/python/fedml/cross_silo/client/fedml_client_master_manager.py
+++ b/python/fedml/cross_silo/client/fedml_client_master_manager.py
@@ -78,7 +78,10 @@ def register_message_receive_handlers(self):
             MyMessage.MSG_TYPE_S2C_CHECK_CLIENT_STATUS, self.handle_message_check_status
         )
 
-        self.register_message_receive_handler(MyMessage.MSG_TYPE_S2C_INIT_CONFIG, self.handle_message_init)
+        self.register_message_receive_handler(
+            MyMessage.MSG_TYPE_S2C_INIT_CONFIG, self.handle_message_init
+        )
+
         self.register_message_receive_handler(
             MyMessage.MSG_TYPE_S2C_SYNC_MODEL_TO_CLIENT, self.handle_message_receive_model_from_server,
         )
diff --git a/python/fedml/cross_silo/client/message_define.py b/python/fedml/cross_silo/client/message_define.py
index ef482e102b..0b694c68c0 100644
--- a/python/fedml/cross_silo/client/message_define.py
+++ b/python/fedml/cross_silo/client/message_define.py
@@ -1,3 +1,4 @@
+
 class MyMessage(object):
     """
     message type definition

From 303f29bbce2740103039dd787764b1242d584290 Mon Sep 17 00:00:00 2001
From: fedml-dimitris <dimitris@tensoropera.com>
Date: Sat, 14 Sep 2024 00:54:11 +0000
Subject: [PATCH 219/282] Removing empty line.

---
 .../computing/scheduler/model_scheduler/autoscaler/autoscaler.py | 1 -
 1 file changed, 1 deletion(-)

diff --git a/python/fedml/computing/scheduler/model_scheduler/autoscaler/autoscaler.py b/python/fedml/computing/scheduler/model_scheduler/autoscaler/autoscaler.py
index 4da3d1135d..4cab1e133c 100644
--- a/python/fedml/computing/scheduler/model_scheduler/autoscaler/autoscaler.py
+++ b/python/fedml/computing/scheduler/model_scheduler/autoscaler/autoscaler.py
@@ -8,7 +8,6 @@
 from enum import Enum
 from fedml.computing.scheduler.model_scheduler.device_model_cache import FedMLModelCache
 from fedml.computing.scheduler.model_scheduler.autoscaler.policies import *
-
 from fedml.computing.scheduler.comm_utils.singleton import Singleton
 
 

From 142089860238d4a577e56d208deb008cc736503c Mon Sep 17 00:00:00 2001
From: Alex <alex.gpt.llm@gmail.com>
Date: Mon, 16 Sep 2024 09:36:23 +0800
Subject: [PATCH 220/282] [CoreEngine] set the cuda visible id into the docker
 container when training.

---
 python/fedml/computing/scheduler/slave/base_slave_job_runner.py | 1 +
 1 file changed, 1 insertion(+)

diff --git a/python/fedml/computing/scheduler/slave/base_slave_job_runner.py b/python/fedml/computing/scheduler/slave/base_slave_job_runner.py
index 5e530dbba7..9ea2c4beaf 100755
--- a/python/fedml/computing/scheduler/slave/base_slave_job_runner.py
+++ b/python/fedml/computing/scheduler/slave/base_slave_job_runner.py
@@ -251,6 +251,7 @@ def start_runner_process(
             agent_config=None, edge_id=edge_id
         )
         client_runner.start_request_json = request_json
+        client_runner.cuda_visible_gpu_ids_str = cuda_visible_gpu_ids_str
         run_id_str = str(run_id)
         self.run_process_event = multiprocessing.Event()
         client_runner.run_process_event = self.run_process_event

From 30cfe0228c7794bfb2fc65fbadf9a1b531956f3a Mon Sep 17 00:00:00 2001
From: Alex <alex.gpt.llm@gmail.com>
Date: Wed, 18 Sep 2024 19:38:59 +0800
Subject: [PATCH 221/282] set the gpu ids when training.

---
 .../scheduler_core/scheduler_base_job_runner_manager.py          | 1 +
 1 file changed, 1 insertion(+)

diff --git a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner_manager.py b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner_manager.py
index dcc4045699..0e30beeab4 100755
--- a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner_manager.py
+++ b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner_manager.py
@@ -29,6 +29,7 @@ def start_job_runner(
         )
         self.job_runners[run_id_str].start_runner_process(
             run_id, request_json, edge_id=edge_id,
+            cuda_visible_gpu_ids_str=cuda_visible_gpu_ids_str,
             sender_message_queue=sender_message_queue,
             listener_message_queue=listener_message_queue,
             status_center_queue=status_center_queue

From 16a79d9f1f431e687cf387b2383eb9a0b5ecbfef Mon Sep 17 00:00:00 2001
From: fedml-dimitris <dimitris@tensoropera.com>
Date: Wed, 16 Oct 2024 21:45:51 +0000
Subject: [PATCH 222/282] Adding simple local env docker client checker.

---
 .../scheduler/comm_utils/job_utils.py          |  9 +++++++++
 .../slave/base_slave_protocol_manager.py       | 18 +++++++++---------
 2 files changed, 18 insertions(+), 9 deletions(-)

diff --git a/python/fedml/computing/scheduler/comm_utils/job_utils.py b/python/fedml/computing/scheduler/comm_utils/job_utils.py
index 8a917e539d..ae50239d25 100644
--- a/python/fedml/computing/scheduler/comm_utils/job_utils.py
+++ b/python/fedml/computing/scheduler/comm_utils/job_utils.py
@@ -574,6 +574,15 @@ def get_run_container_name(run_id: int) -> str:
         container_name = f"{container_prefix}__{run_id}"
         return container_name
 
+    @staticmethod
+    def docker_client_exists() -> bool:
+        try:
+            client = docker.from_env()
+            client.ping()
+            return True
+        except docker.errors.DockerException:
+            return False
+
     @staticmethod
     def get_docker_client(docker_args: DockerArgs) -> DockerClient:
         try:
diff --git a/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py b/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py
index 447bd05cd9..bb2215bf95 100755
--- a/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py
+++ b/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py
@@ -509,16 +509,16 @@ def process_status(self, run_id, status, edge_id, master_id=None):
             if run_process is not None:
                 if run_process.pid is not None:
                     RunProcessUtils.kill_process(run_process.pid)
-
                     # Terminate the run docker container if exists
-                    try:
-                        container_name = JobRunnerUtils.get_run_container_name(run_id)
-                        docker_client = JobRunnerUtils.get_docker_client(DockerArgs())
-                        logging.info(f"Terminating the run docker container {container_name} if exists...")
-                        JobRunnerUtils.remove_run_container_if_exists(container_name, docker_client)
-                    except Exception as e:
-                        logging.error(f"Error occurred when terminating docker container."
-                                      f"Exception: {e}, Traceback: {traceback.format_exc()}.")
+                    if JobRunnerUtils.docker_client_exists():
+                        try:
+                            docker_client = JobRunnerUtils.get_docker_client(DockerArgs())
+                            container_name = JobRunnerUtils.get_run_container_name(run_id)
+                            logging.info(f"Terminating the run docker container {container_name} if exists...")
+                            JobRunnerUtils.remove_run_container_if_exists(container_name, docker_client)
+                        except Exception as e:
+                            logging.error(f"Error occurred when terminating docker container."
+                                          f"Exception: {e}, Traceback: {traceback.format_exc()}.")
 
             # Stop log processor for current run
             MLOpsRuntimeLogDaemon.get_instance(self.args).stop_log_processor(run_id, edge_id)

From f299a8e451a4148f4d3dd62694efc116bcfadf48 Mon Sep 17 00:00:00 2001
From: fedml-dimitris <dimitris@tensoropera.com>
Date: Wed, 16 Oct 2024 22:27:36 +0000
Subject: [PATCH 223/282] Adding more docker client existence checkpoints.

---
 .../scheduler_base_job_runner.py              | 20 ++++++++++---------
 .../slave/base_slave_protocol_manager.py      |  3 ++-
 2 files changed, 13 insertions(+), 10 deletions(-)

diff --git a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py
index 6e0010f556..f5e08107f2 100755
--- a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py
+++ b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py
@@ -616,15 +616,17 @@ def cleanup_containers_and_release_gpus(run_id, edge_id, job_type=SchedulerConst
         if not (job_type == SchedulerConstants.JOB_TASK_TYPE_SERVE or
                 job_type == SchedulerConstants.JOB_TASK_TYPE_DEPLOY):
 
-            # Terminate the run docker container if exists
-            try:
-                container_name = JobRunnerUtils.get_run_container_name(run_id)
-                docker_client = JobRunnerUtils.get_docker_client(DockerArgs())
-                logging.info(f"Terminating the run docker container {container_name} if exists...")
-                JobRunnerUtils.remove_run_container_if_exists(container_name, docker_client)
-            except Exception as e:
-                logging.error(f"Exception {e} occurred when terminating docker container. "
-                              f"Traceback: {traceback.format_exc()}")
+            # Check if docker client exists and then terminate containers.
+            if JobRunnerUtils.docker_client_exists():
+                try:
+                    # Terminate docker container.
+                    docker_client = JobRunnerUtils.get_docker_client(DockerArgs())
+                    container_name = JobRunnerUtils.get_run_container_name(run_id)
+                    logging.info(f"Terminating the run docker container {container_name} if exists...")
+                    JobRunnerUtils.remove_run_container_if_exists(container_name, docker_client)
+                except Exception as e:
+                    logging.error(f"Exception {e} occurred when terminating docker container. "
+                                  f"Traceback: {traceback.format_exc()}")
 
             # Release the GPU ids and update the GPU availability in the persistent store
             JobRunnerUtils.get_instance().release_gpu_ids(run_id, edge_id)
diff --git a/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py b/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py
index bb2215bf95..05469e78ff 100755
--- a/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py
+++ b/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py
@@ -509,9 +509,10 @@ def process_status(self, run_id, status, edge_id, master_id=None):
             if run_process is not None:
                 if run_process.pid is not None:
                     RunProcessUtils.kill_process(run_process.pid)
-                    # Terminate the run docker container if exists
+                    # Check if docker client exists and then terminate containers.
                     if JobRunnerUtils.docker_client_exists():
                         try:
+                            # Terminate docker container.
                             docker_client = JobRunnerUtils.get_docker_client(DockerArgs())
                             container_name = JobRunnerUtils.get_run_container_name(run_id)
                             logging.info(f"Terminating the run docker container {container_name} if exists...")

From 3349667322b72fc4a2ff8d614e8185659e7d3421 Mon Sep 17 00:00:00 2001
From: fedml-dimitris <dimitris@tensoropera.com>
Date: Thu, 17 Oct 2024 00:12:22 +0000
Subject: [PATCH 224/282] Fixing grpc readme file.

---
 .../grpc_fedavg_mnist_lr_example/README.md      | 17 ++++++++++-------
 1 file changed, 10 insertions(+), 7 deletions(-)

diff --git a/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/README.md b/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/README.md
index 47250ef894..037747c8ee 100644
--- a/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/README.md
+++ b/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/README.md
@@ -8,22 +8,25 @@ comm_args:
   grpc_ipconfig_path: config/grpc_ipconfig.csv
 ```
 
-`grpc_ipconfig_path` specifies the path of the config for gRPC communication. Config file specifies an ip address for each process through with they can communicate with each other. The config file should have the folliwng format:
+`grpc_ipconfig_path` specifies the path of the config for gRPC communication. Config file specifies an ip address for each process through with they can communicate with each other. The config file should have the following format:
 
 ```csv
-receiver_id,ip
-0,127.0.0.1
-1,127.0.0.1
-2,127.0.0.1
+eid,rank,ip,port
+0,0,0.0.0.0,8890
+1,1,0.0.0.0,8899
+2,2,0.0.0.0,8898
 ```
 
-Here the `receiver_id` is the rank of the process.
+Here, `eid, rank, ip, port` are the id, rank, ip address and port of the server or client process. For server processes the rank is always set to 0, while for clients is always set to 1 or above.
 
 ## One Line API Example
 
-Example is provided at:
+Examples are provided at:
 
 `python/examples/cross_silo/grpc_fedavg_mnist_lr_example/one_line`
+`python/examples/cross_silo/grpc_fedavg_mnist_lr_example/step_by_step`
+`python/examples/cross_silo/grpc_fedavg_mnist_lr_example/custom_data_and_model`
+
 ### Training Script
 
 At the client side, the client ID (a.k.a rank) starts from 1.

From d2484fad6c30cc8fbd2f1c89de9437377d702b9f Mon Sep 17 00:00:00 2001
From: fedml-dimitris <dimitris@tensoropera.com>
Date: Thu, 17 Oct 2024 02:01:09 +0000
Subject: [PATCH 225/282] Remove circular dependency.

---
 python/fedml/computing/scheduler/comm_utils/sys_utils.py | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)

diff --git a/python/fedml/computing/scheduler/comm_utils/sys_utils.py b/python/fedml/computing/scheduler/comm_utils/sys_utils.py
index aaa37bc4db..2e6ba8a08c 100644
--- a/python/fedml/computing/scheduler/comm_utils/sys_utils.py
+++ b/python/fedml/computing/scheduler/comm_utils/sys_utils.py
@@ -20,8 +20,6 @@
 import sys
 import subprocess
 
-from fedml.computing.scheduler.slave.client_constants import ClientConstants
-
 FETAL_ERROR_START_CODE = 128
 
 SYS_ERR_CODE_MAP = {"0": "Successful exit without errors.",
@@ -817,6 +815,7 @@ def daemon_ota_upgrade_with_version(in_version="release"):
 
 
 def run_cmd(command, show_local_console=False):
+    from fedml.computing.scheduler.slave.client_constants import ClientConstants
     process = ClientConstants.exec_console_with_script(command, should_capture_stdout=True,
                                                        should_capture_stderr=True)
     ret_code, out, err = ClientConstants.get_console_pipe_out_err_results(process)

From a959802390c8c2dfb6869084544ff41afa08ce67 Mon Sep 17 00:00:00 2001
From: fedml-dimitris <dimitris@tensoropera.com>
Date: Thu, 17 Oct 2024 22:27:39 +0000
Subject: [PATCH 226/282] Extending grpc support to also consider docker
 container ips.

---
 .../scheduler/comm_utils/sys_utils.py         |  1 +
 .../communication/grpc/grpc_comm_manager.py   | 76 +++++++++++++------
 2 files changed, 54 insertions(+), 23 deletions(-)

diff --git a/python/fedml/computing/scheduler/comm_utils/sys_utils.py b/python/fedml/computing/scheduler/comm_utils/sys_utils.py
index 2e6ba8a08c..a0ee3402f0 100644
--- a/python/fedml/computing/scheduler/comm_utils/sys_utils.py
+++ b/python/fedml/computing/scheduler/comm_utils/sys_utils.py
@@ -815,6 +815,7 @@ def daemon_ota_upgrade_with_version(in_version="release"):
 
 
 def run_cmd(command, show_local_console=False):
+    # Had to import ClientConstans here because otherwise it was raising circular import errors.
     from fedml.computing.scheduler.slave.client_constants import ClientConstants
     process = ClientConstants.exec_console_with_script(command, should_capture_stdout=True,
                                                        should_capture_stderr=True)
diff --git a/python/fedml/core/distributed/communication/grpc/grpc_comm_manager.py b/python/fedml/core/distributed/communication/grpc/grpc_comm_manager.py
index a2cc255437..a92d3abbc8 100644
--- a/python/fedml/core/distributed/communication/grpc/grpc_comm_manager.py
+++ b/python/fedml/core/distributed/communication/grpc/grpc_comm_manager.py
@@ -8,9 +8,10 @@
 
 lock = threading.Lock()
 
+from collections import namedtuple
 from concurrent import futures
 from fedml.core.mlops.mlops_profiler_event import MLOpsProfilerEvent
-from typing import List
+from typing import List, TypedDict
 
 from ...communication.base_com_manager import BaseCommunicationManager
 from ...communication.message import Message
@@ -22,6 +23,11 @@
 from ...communication.grpc.grpc_server import GRPCCOMMServicer
 
 
+GRPCMapping = \
+    namedtuple("GRPCMapping",
+               ["eid", "rank", "grpc_server_ip", "grpc_server_port", "ingress_ip"])
+
+
 class GRPCCommManager(BaseCommunicationManager):
     MSG_ARG_KEY_SENDER_RANK = "sender_rank"
     MSG_ARG_KEY_SENDER_IP = "sender_ip"
@@ -39,11 +45,14 @@ def __init__(
         self._topic = topic
         self._observers: List[Observer] = []
         self.grpc_ipconfig_path = grpc_ipconfig_path
+        self.grpc_mappings = dict()
         self.client_rank = client_rank
         self.client_id = self.client_rank
         self.client_num = client_num
         self.args = args
 
+        self._init_grpc_mappings()  # Initialize self.grpc_mappings variable.
+
         if self.client_rank == 0:
             self.node_type = "server"
             logging.info("############# THIS IS FL SERVER ################")
@@ -60,19 +69,19 @@ def __init__(
             options=self.opts,
         )
 
-        self.grpc_mappings = self._init_grpc_mappings()  # Load input mappings.
         if self.client_id not in self.grpc_mappings:
             # if no record exists for the current client id, then
             # default ip and rank to "0.0.0.0" and BASE + RANK.
-            self.ip = "0.0.0.0"
-            self.port = CommunicationConstants.GRPC_BASE_PORT + self.client_rank
-            self.grpc_mappings[self.client_id] = (self.client_rank, self.ip, self.port)
-        else:
-            _, self.ip, self.port = self.grpc_mappings[self.client_id]
+            self.grpc_mappings[self.client_id] = GRPCMapping(
+                eid=self.client_id,
+                rank=self.client_id,
+                grpc_server_ip="0.0.0.0",
+                grpc_server_port=CommunicationConstants.GRPC_BASE_PORT + self.client_rank,
+                ingress_ip=None)
 
         self.grpc_servicer = GRPCCOMMServicer(
-            self.ip,
-            self.port,
+            self.grpc_mappings[self.client_id].grpc_server_ip,
+            self.grpc_mappings[self.client_id].grpc_server_port,
             self.client_num,
             self.client_rank
         )
@@ -81,20 +90,22 @@ def __init__(
         )
         logging.info(os.getcwd())
 
-        self.grpc_server.add_insecure_port("{}:{}".format(self.ip, self.port))
+        grpc_insecure_ip_port = "{}:{}".format(self.grpc_mappings[self.client_id].grpc_server_ip,
+                                               self.grpc_mappings[self.client_id].grpc_server_port)
+        self.grpc_server.add_insecure_port(grpc_insecure_ip_port)
 
         self.grpc_server.start()
         # Wait for 100 milliseconds to make sure the grpc
         # server has started before proceeding.
         time.sleep(0.01)
         self.is_running = True
-        logging.info("grpc server started. Listening on port " + str(self.port))
+        logging.info("Started gRPC server: {}.".format(grpc_insecure_ip_port))
 
     def send_message(self, msg: Message):
         # Register the sender rank, ip and port attribute on the message.
         msg.add_params(GRPCCommManager.MSG_ARG_KEY_SENDER_RANK, self.client_rank)
-        msg.add_params(GRPCCommManager.MSG_ARG_KEY_SENDER_IP, self.ip)
-        msg.add_params(GRPCCommManager.MSG_ARG_KEY_SENDER_PORT, self.port)
+        msg.add_params(GRPCCommManager.MSG_ARG_KEY_SENDER_IP, self.grpc_mappings[self.client_id].grpc_server_ip)
+        msg.add_params(GRPCCommManager.MSG_ARG_KEY_SENDER_PORT, self.grpc_mappings[self.client_id].grpc_server_port)
         logging.info("sending msg = {}".format(msg.get_params_wout_model()))
         logging.info("pickle.dumps(msg) START")
         pickle_dump_start_time = time.time()
@@ -103,8 +114,13 @@ def send_message(self, msg: Message):
         logging.info("pickle.dumps(msg) END")
 
         receiver_id = msg.get_receiver_id()
-        receiver_rank, receiver_ip, receiver_port = self.grpc_mappings[int(receiver_id)]
-        channel_url = "{}:{}".format(receiver_ip, receiver_port)
+        receiver_grpc_mappings = self.grpc_mappings[int(receiver_id)]
+        if receiver_grpc_mappings.ingress_ip:
+            channel_url = "{}:{}".format(receiver_grpc_mappings.ingress_ip,
+                                         receiver_grpc_mappings.grpc_server_port)
+        else:
+            channel_url = "{}:{}".format(receiver_grpc_mappings.grpc_server_ip,
+                                         receiver_grpc_mappings.grpc_server_port)
 
         channel = grpc.insecure_channel(channel_url, options=self.opts)
         stub = grpc_comm_manager_pb2_grpc.gRPCCommManagerStub(channel)
@@ -155,7 +171,12 @@ def message_handling_subroutine(self):
                     sender_rank = int(msg.get_params()[GRPCCommManager.MSG_ARG_KEY_SENDER_RANK])
                     sender_ip = str(msg.get_params()[GRPCCommManager.MSG_ARG_KEY_SENDER_IP])
                     sender_port = int(msg.get_params()[GRPCCommManager.MSG_ARG_KEY_SENDER_PORT])
-                    self.grpc_mappings[sender_id] = (sender_rank, sender_ip, sender_port)
+                    self.grpc_mappings[sender_id] = GRPCMapping(
+                        eid=sender_id,
+                        rank=sender_rank,
+                        grpc_server_ip=sender_ip,
+                        grpc_server_port=sender_port,
+                        ingress_ip=sender_ip)
 
                 for observer in self._observers:
                     _message_handler_start_time = time.time()
@@ -186,10 +207,19 @@ def _notify_connection_ready(self):
             observer.receive_message(msg_type, msg_params)
 
     def _init_grpc_mappings(self):
-        mappings = dict()
-        csv_reader = csv.reader(open(self.grpc_ipconfig_path, "r"))
-        next(csv_reader)  # skip header line
-        for row in csv_reader:
-            eid, rank, ip, port = row
-            mappings[int(eid)] = (int(rank), str(ip), int(port))
-        return mappings
+        csv_dict_reader = csv.DictReader(open(self.grpc_ipconfig_path, "r"))
+        data_dict = list(csv_dict_reader)
+        for row in data_dict:
+            eid = int(row["eid"])
+            rank = int(row["rank"])
+            grpc_server_ip = str(row["grpc_server_ip"])
+            grpc_server_port = int(row["grpc_server_port"])
+            ingress_ip = None
+            if "ingress_ip" in row:
+                ingress_ip = row["ingress_ip"]
+            self.grpc_mappings[int(eid)] = GRPCMapping(
+                eid=eid,
+                rank=rank,
+                grpc_server_ip=grpc_server_ip,
+                grpc_server_port=grpc_server_port,
+                ingress_ip=ingress_ip)

From aa691223264bd94fa23661ee5534d4d05f8ef0b5 Mon Sep 17 00:00:00 2001
From: fedml-dimitris <dimitris@tensoropera.com>
Date: Thu, 17 Oct 2024 22:32:06 +0000
Subject: [PATCH 227/282] Fixing notation and attribute names in grpc config
 files.

---
 .../federate/cross_silo/grpc_fedavg_mnist_lr_example/README.md  | 2 +-
 .../custom_data_and_model/README.md                             | 2 +-
 .../custom_data_and_model/config/grpc_ipconfig.csv              | 2 +-
 .../cross_silo/grpc_fedavg_mnist_lr_example/one_line/README.md  | 2 +-
 .../one_line/config/grpc_ipconfig.csv                           | 2 +-
 .../grpc_fedavg_mnist_lr_example/step_by_step/README.md         | 2 +-
 .../step_by_step/config/grpc_ipconfig.csv                       | 2 +-
 .../core/distributed/communication/grpc/grpc_comm_manager.py    | 2 +-
 8 files changed, 8 insertions(+), 8 deletions(-)

diff --git a/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/README.md b/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/README.md
index 037747c8ee..3ee1850a03 100644
--- a/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/README.md
+++ b/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/README.md
@@ -11,7 +11,7 @@ comm_args:
 `grpc_ipconfig_path` specifies the path of the config for gRPC communication. Config file specifies an ip address for each process through with they can communicate with each other. The config file should have the following format:
 
 ```csv
-eid,rank,ip,port
+eid,rank,grpc_server_ip,grpc_server_port
 0,0,0.0.0.0,8890
 1,1,0.0.0.0,8899
 2,2,0.0.0.0,8898
diff --git a/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/custom_data_and_model/README.md b/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/custom_data_and_model/README.md
index da37ebda43..dd787b9141 100644
--- a/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/custom_data_and_model/README.md
+++ b/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/custom_data_and_model/README.md
@@ -9,7 +9,7 @@ The default ip of every groc server is set to `0.0.0.0`, and all grpc ports star
 > the aggregator (rank: 0). This record is mandatory. However, you can change the values of the `ip` and `port` 
 > attributes as you see fit, and more records for grpc server of the rest of clients. For instance:
 ```
-eid,rank,ip,port
+eid,rank,grpc_server_ip,grpc_server_port
 0,0,0.0.0.0,8890
 1,1,0.0.0.0,8899
 2,2,0.0.0.0,8898
diff --git a/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/custom_data_and_model/config/grpc_ipconfig.csv b/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/custom_data_and_model/config/grpc_ipconfig.csv
index c92391a46a..9729b81833 100644
--- a/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/custom_data_and_model/config/grpc_ipconfig.csv
+++ b/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/custom_data_and_model/config/grpc_ipconfig.csv
@@ -1,2 +1,2 @@
-eid,rank,ip,port
+eid,rank,grpc_server_ip,grpc_server_port
 0,0,0.0.0.0,8890
diff --git a/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/one_line/README.md b/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/one_line/README.md
index da37ebda43..dd787b9141 100644
--- a/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/one_line/README.md
+++ b/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/one_line/README.md
@@ -9,7 +9,7 @@ The default ip of every groc server is set to `0.0.0.0`, and all grpc ports star
 > the aggregator (rank: 0). This record is mandatory. However, you can change the values of the `ip` and `port` 
 > attributes as you see fit, and more records for grpc server of the rest of clients. For instance:
 ```
-eid,rank,ip,port
+eid,rank,grpc_server_ip,grpc_server_port
 0,0,0.0.0.0,8890
 1,1,0.0.0.0,8899
 2,2,0.0.0.0,8898
diff --git a/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/one_line/config/grpc_ipconfig.csv b/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/one_line/config/grpc_ipconfig.csv
index b46eb6d33b..8d082a9613 100644
--- a/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/one_line/config/grpc_ipconfig.csv
+++ b/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/one_line/config/grpc_ipconfig.csv
@@ -1,2 +1,2 @@
-eid,rank,ip,port
+eid,rank,grpc_server_ip,grpc_server_port
 0,0,0.0.0.0,8890
\ No newline at end of file
diff --git a/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/step_by_step/README.md b/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/step_by_step/README.md
index da37ebda43..dd787b9141 100644
--- a/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/step_by_step/README.md
+++ b/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/step_by_step/README.md
@@ -9,7 +9,7 @@ The default ip of every groc server is set to `0.0.0.0`, and all grpc ports star
 > the aggregator (rank: 0). This record is mandatory. However, you can change the values of the `ip` and `port` 
 > attributes as you see fit, and more records for grpc server of the rest of clients. For instance:
 ```
-eid,rank,ip,port
+eid,rank,grpc_server_ip,grpc_server_port
 0,0,0.0.0.0,8890
 1,1,0.0.0.0,8899
 2,2,0.0.0.0,8898
diff --git a/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/step_by_step/config/grpc_ipconfig.csv b/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/step_by_step/config/grpc_ipconfig.csv
index b46eb6d33b..8d082a9613 100644
--- a/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/step_by_step/config/grpc_ipconfig.csv
+++ b/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/step_by_step/config/grpc_ipconfig.csv
@@ -1,2 +1,2 @@
-eid,rank,ip,port
+eid,rank,grpc_server_ip,grpc_server_port
 0,0,0.0.0.0,8890
\ No newline at end of file
diff --git a/python/fedml/core/distributed/communication/grpc/grpc_comm_manager.py b/python/fedml/core/distributed/communication/grpc/grpc_comm_manager.py
index a92d3abbc8..399b2d44b9 100644
--- a/python/fedml/core/distributed/communication/grpc/grpc_comm_manager.py
+++ b/python/fedml/core/distributed/communication/grpc/grpc_comm_manager.py
@@ -11,7 +11,7 @@
 from collections import namedtuple
 from concurrent import futures
 from fedml.core.mlops.mlops_profiler_event import MLOpsProfilerEvent
-from typing import List, TypedDict
+from typing import List
 
 from ...communication.base_com_manager import BaseCommunicationManager
 from ...communication.message import Message

From c302749f8f539bcbdf692109db17dec43a2d3e8b Mon Sep 17 00:00:00 2001
From: fedml-dimitris <dimitris@tensoropera.com>
Date: Fri, 18 Oct 2024 00:32:30 +0000
Subject: [PATCH 228/282] testing with ingress ip.

---
 .../core/distributed/communication/grpc/grpc_comm_manager.py | 5 ++++-
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git a/python/fedml/core/distributed/communication/grpc/grpc_comm_manager.py b/python/fedml/core/distributed/communication/grpc/grpc_comm_manager.py
index 399b2d44b9..347f449937 100644
--- a/python/fedml/core/distributed/communication/grpc/grpc_comm_manager.py
+++ b/python/fedml/core/distributed/communication/grpc/grpc_comm_manager.py
@@ -104,7 +104,10 @@ def __init__(
     def send_message(self, msg: Message):
         # Register the sender rank, ip and port attribute on the message.
         msg.add_params(GRPCCommManager.MSG_ARG_KEY_SENDER_RANK, self.client_rank)
-        msg.add_params(GRPCCommManager.MSG_ARG_KEY_SENDER_IP, self.grpc_mappings[self.client_id].grpc_server_ip)
+        if self.grpc_mappings[self.client_id].ingress_ip:
+            msg.add_params(GRPCCommManager.MSG_ARG_KEY_SENDER_IP, self.grpc_mappings[self.client_id].ingress_ip)
+        else:
+            msg.add_params(GRPCCommManager.MSG_ARG_KEY_SENDER_IP, self.grpc_mappings[self.client_id].grpc_server_ip)
         msg.add_params(GRPCCommManager.MSG_ARG_KEY_SENDER_PORT, self.grpc_mappings[self.client_id].grpc_server_port)
         logging.info("sending msg = {}".format(msg.get_params_wout_model()))
         logging.info("pickle.dumps(msg) START")

From 292bfb360b48568e72bbf355d7b7cd43877a028e Mon Sep 17 00:00:00 2001
From: fedml-dimitris <dimitris@tensoropera.com>
Date: Fri, 18 Oct 2024 01:52:30 +0000
Subject: [PATCH 229/282] Polishing grpc + docker examples.

---
 .../custom_data_and_model/README.md           |  4 +-
 .../grpc_docker_fedmlai/README.md             | 51 ++++++++++++++++++
 .../grpc_docker_fedmlai/__init__.py           |  0
 .../grpc_docker_fedmlai/config/bootstrap.bat  | 12 +++++
 .../grpc_docker_fedmlai/config/bootstrap.sh   |  7 +++
 .../config/fedml_config.yaml                  | 52 +++++++++++++++++++
 .../config/grpc_ipconfig.csv                  |  3 ++
 .../grpc_docker_fedmlai/run_client.sh         |  3 ++
 .../grpc_docker_fedmlai/run_server.sh         |  3 ++
 .../grpc_docker_fedmlai/torch_client.py       | 18 +++++++
 .../grpc_docker_fedmlai/torch_server.py       | 18 +++++++
 .../one_line/README.md                        |  4 +-
 .../step_by_step/README.md                    |  4 +-
 13 files changed, 173 insertions(+), 6 deletions(-)
 create mode 100644 python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/grpc_docker_fedmlai/README.md
 create mode 100644 python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/grpc_docker_fedmlai/__init__.py
 create mode 100755 python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/grpc_docker_fedmlai/config/bootstrap.bat
 create mode 100644 python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/grpc_docker_fedmlai/config/bootstrap.sh
 create mode 100644 python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/grpc_docker_fedmlai/config/fedml_config.yaml
 create mode 100644 python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/grpc_docker_fedmlai/config/grpc_ipconfig.csv
 create mode 100644 python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/grpc_docker_fedmlai/run_client.sh
 create mode 100644 python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/grpc_docker_fedmlai/run_server.sh
 create mode 100644 python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/grpc_docker_fedmlai/torch_client.py
 create mode 100644 python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/grpc_docker_fedmlai/torch_server.py

diff --git a/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/custom_data_and_model/README.md b/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/custom_data_and_model/README.md
index dd787b9141..22e628e502 100644
--- a/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/custom_data_and_model/README.md
+++ b/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/custom_data_and_model/README.md
@@ -11,8 +11,8 @@ The default ip of every groc server is set to `0.0.0.0`, and all grpc ports star
 ```
 eid,rank,grpc_server_ip,grpc_server_port
 0,0,0.0.0.0,8890
-1,1,0.0.0.0,8899
-2,2,0.0.0.0,8898
+1,1,0.0.0.0,8891
+2,2,0.0.0.0,8892
 ```
 
 ## Start Script
diff --git a/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/grpc_docker_fedmlai/README.md b/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/grpc_docker_fedmlai/README.md
new file mode 100644
index 0000000000..174309aa55
--- /dev/null
+++ b/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/grpc_docker_fedmlai/README.md
@@ -0,0 +1,51 @@
+
+# Introduction
+In this working example, we will run 1 aggregation server and 2 clients on the same machine using Docker + gRPC and we will use the FEDML.ai platform to run the FL job. 
+
+# gRPC Configuration File
+The content of the gRPC configuration file is as follows:
+```
+eid,rank,grpc_server_ip,grpc_server_port,ingress_ip
+0,0,0.0.0.0,8890,fedml_server
+1,1,0.0.0.0,8899,fedml_client_1
+2,2,0.0.0.0,8898,fedml_client_2
+```
+The ingress_ip variable refers to the name of the container that we assign to either the server or the client, as we discuss in detail below:
+
+
+# Docker Configuration
+Before creating any docker container one our machine, we need to pull the latest fedml image (e.g., `fedml:v090`) and ensure that all spawned containers can communicate to each other through a network bridge (e.g., `fedml_grpc_network`).  
+Specifically, what you need to do is:
+```bash
+docker pull fedml:v090
+docker network create fedml_grpc_network
+``` 
+
+Once these two steps are configured we can start 1 aggregation server and 2 clients (without using a GPU) and register them using our <FEDML_API_KEY> with the fedml platform as follows:
+
+```bash
+# Server
+docker run -it -p 8890:8890 --entrypoint /bin/bash --name fedml_server --network fedml_grpc_network fedml:dev090
+redis-server --daemonize yes
+source /fedml/bin/activate
+fedml login -s <FEDML_API_KEY>
+```
+
+```bash
+# Client 1
+docker run -it -p 8891:8891 --entrypoint /bin/bash --name fedml_client_1 --network fedml_grpc_network fedml:dev090
+redis-server --daemonize yes
+source /fedml/bin/activate
+fedml login -c <FEDML_API_KEY>
+```
+
+```bash
+# Client-2
+docker run -it -p 8892:8892 --entrypoint /bin/bash --name fedml_client_2 --network fedml_grpc_network fedml:dev090
+redis-server --daemonize yes
+source /fedml/bin/activate
+fedml login -c <FEDML_API_KEY>
+```
+
+Then we only need to compile our job and submit to our dockerb-based cluster as it is also discussed in detail in the official FEDML documentation: https://fedml.ai/octopus/userGuides
+
diff --git a/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/grpc_docker_fedmlai/__init__.py b/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/grpc_docker_fedmlai/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/grpc_docker_fedmlai/config/bootstrap.bat b/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/grpc_docker_fedmlai/config/bootstrap.bat
new file mode 100755
index 0000000000..fb0dd54d6d
--- /dev/null
+++ b/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/grpc_docker_fedmlai/config/bootstrap.bat
@@ -0,0 +1,12 @@
+:: ### don't modify this part ###
+:: ##############################
+
+
+:: ### please customize your script in this region ####
+set DATA_PATH=%userprofile%\fedml_data
+if exist %DATA_PATH% (echo Exist %DATA_PATH%) else mkdir %DATA_PATH%
+
+
+:: ### don't modify this part ###
+echo [FedML]Bootstrap Finished
+:: ##############################
\ No newline at end of file
diff --git a/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/grpc_docker_fedmlai/config/bootstrap.sh b/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/grpc_docker_fedmlai/config/bootstrap.sh
new file mode 100644
index 0000000000..3d969974b0
--- /dev/null
+++ b/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/grpc_docker_fedmlai/config/bootstrap.sh
@@ -0,0 +1,7 @@
+
+# pip install fedml==0.7.15
+#pip install --upgrade fedml
+
+### don't modify this part ###
+echo "[FedML]Bootstrap Finished"
+##############################
diff --git a/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/grpc_docker_fedmlai/config/fedml_config.yaml b/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/grpc_docker_fedmlai/config/fedml_config.yaml
new file mode 100644
index 0000000000..d7183b6ada
--- /dev/null
+++ b/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/grpc_docker_fedmlai/config/fedml_config.yaml
@@ -0,0 +1,52 @@
+common_args:
+  training_type: "cross_silo"
+  scenario: "horizontal"
+  using_mlops: false
+  random_seed: 0
+
+environment_args:
+  bootstrap: config/bootstrap.sh
+
+data_args:
+  dataset: "mnist"
+  data_cache_dir: "../../../../data/mnist"
+  partition_method: "hetero"
+  partition_alpha: 0.5
+
+model_args:
+  model: "lr"
+  model_file_cache_folder: "./model_file_cache" # will be filled by the server automatically
+  global_model_file_path: "./model_file_cache/global_model.pt"
+
+train_args:
+  federated_optimizer: "FedAvg"
+  client_id_list:
+  client_num_in_total: 1000
+  client_num_per_round: 2
+  comm_round: 50
+  epochs: 1
+  batch_size: 10
+  client_optimizer: sgd
+  learning_rate: 0.03
+  weight_decay: 0.001
+
+validation_args:
+  frequency_of_the_test: 5
+
+device_args:
+  worker_num: 2
+  using_gpu: false
+  gpu_mapping_file: config/gpu_mapping.yaml
+  gpu_mapping_key: mapping_default
+
+comm_args:
+  backend: "GRPC"
+  grpc_ipconfig_path: config/grpc_ipconfig.csv
+  
+tracking_args:
+   # When running on MLOps platform(open.fedml.ai), the default log path is at ~/.fedml/fedml-client/fedml/logs/ and ~/.fedml/fedml-server/fedml/logs/
+  local_log_output_path: ./log
+  enable_wandb: false
+  wandb_key: ee0b5f53d949c84cee7decbe7a619e63fb1f8408
+  wandb_project: fedml
+  wandb_name: fedml_torch_fedavg_mnist_lr
\ No newline at end of file
diff --git a/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/grpc_docker_fedmlai/config/grpc_ipconfig.csv b/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/grpc_docker_fedmlai/config/grpc_ipconfig.csv
new file mode 100644
index 0000000000..8f461936dd
--- /dev/null
+++ b/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/grpc_docker_fedmlai/config/grpc_ipconfig.csv
@@ -0,0 +1,3 @@
+eid,rank,grpc_server_ip,grpc_server_port,ingress_ip
+0,0,0.0.0.0,8890,fedml_server
+1,1,0.0.0.0,8891,fedml_client_1
\ No newline at end of file
diff --git a/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/grpc_docker_fedmlai/run_client.sh b/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/grpc_docker_fedmlai/run_client.sh
new file mode 100644
index 0000000000..18d3cea9fe
--- /dev/null
+++ b/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/grpc_docker_fedmlai/run_client.sh
@@ -0,0 +1,3 @@
+#!/usr/bin/env bash
+RANK=$1
+python3 torch_client.py --cf config/fedml_config.yaml --rank $RANK --role client
\ No newline at end of file
diff --git a/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/grpc_docker_fedmlai/run_server.sh b/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/grpc_docker_fedmlai/run_server.sh
new file mode 100644
index 0000000000..08007b7e81
--- /dev/null
+++ b/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/grpc_docker_fedmlai/run_server.sh
@@ -0,0 +1,3 @@
+#!/usr/bin/env bash
+
+python3 torch_server.py --cf config/fedml_config.yaml --rank 0 --role server
\ No newline at end of file
diff --git a/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/grpc_docker_fedmlai/torch_client.py b/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/grpc_docker_fedmlai/torch_client.py
new file mode 100644
index 0000000000..9085c85ebe
--- /dev/null
+++ b/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/grpc_docker_fedmlai/torch_client.py
@@ -0,0 +1,18 @@
+import fedml
+from fedml import FedMLRunner
+
+if __name__ == "__main__":
+    args = fedml.init()
+
+    # init device
+    device = fedml.device.get_device(args)
+
+    # load data
+    dataset, output_dim = fedml.data.load(args)
+
+    # load model
+    model = fedml.model.create(args, output_dim)
+
+    # start training
+    fedml_runner = FedMLRunner(args, device, dataset, model)
+    fedml_runner.run()
diff --git a/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/grpc_docker_fedmlai/torch_server.py b/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/grpc_docker_fedmlai/torch_server.py
new file mode 100644
index 0000000000..9085c85ebe
--- /dev/null
+++ b/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/grpc_docker_fedmlai/torch_server.py
@@ -0,0 +1,18 @@
+import fedml
+from fedml import FedMLRunner
+
+if __name__ == "__main__":
+    args = fedml.init()
+
+    # init device
+    device = fedml.device.get_device(args)
+
+    # load data
+    dataset, output_dim = fedml.data.load(args)
+
+    # load model
+    model = fedml.model.create(args, output_dim)
+
+    # start training
+    fedml_runner = FedMLRunner(args, device, dataset, model)
+    fedml_runner.run()
diff --git a/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/one_line/README.md b/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/one_line/README.md
index dd787b9141..22e628e502 100644
--- a/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/one_line/README.md
+++ b/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/one_line/README.md
@@ -11,8 +11,8 @@ The default ip of every groc server is set to `0.0.0.0`, and all grpc ports star
 ```
 eid,rank,grpc_server_ip,grpc_server_port
 0,0,0.0.0.0,8890
-1,1,0.0.0.0,8899
-2,2,0.0.0.0,8898
+1,1,0.0.0.0,8891
+2,2,0.0.0.0,8892
 ```
 
 ## Start Script
diff --git a/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/step_by_step/README.md b/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/step_by_step/README.md
index dd787b9141..22e628e502 100644
--- a/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/step_by_step/README.md
+++ b/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/step_by_step/README.md
@@ -11,8 +11,8 @@ The default ip of every groc server is set to `0.0.0.0`, and all grpc ports star
 ```
 eid,rank,grpc_server_ip,grpc_server_port
 0,0,0.0.0.0,8890
-1,1,0.0.0.0,8899
-2,2,0.0.0.0,8898
+1,1,0.0.0.0,8891
+2,2,0.0.0.0,8892
 ```
 
 ## Start Script

From 55ff447dc34b758fffdf8f74fd3769c4a3980ec7 Mon Sep 17 00:00:00 2001
From: fedml-dimitris <dimitris@fedml.ai>
Date: Mon, 4 Nov 2024 14:31:37 -0500
Subject: [PATCH 230/282] Parameterizing deploy host, port.

---
 python/fedml/serving/fedml_inference_runner.py | 7 ++++---
 1 file changed, 4 insertions(+), 3 deletions(-)

diff --git a/python/fedml/serving/fedml_inference_runner.py b/python/fedml/serving/fedml_inference_runner.py
index 30dd9a6fca..64a8a2d596 100644
--- a/python/fedml/serving/fedml_inference_runner.py
+++ b/python/fedml/serving/fedml_inference_runner.py
@@ -6,8 +6,10 @@
 
 
 class FedMLInferenceRunner(ABC):
-    def __init__(self, client_predictor):
+    def __init__(self, client_predictor, host="0.0.0.0", port=2345):
         self.client_predictor = client_predictor
+        self.host = host
+        self.port = port
 
     def run(self):
         api = FastAPI()
@@ -43,5 +45,4 @@ async def ready():
                 return Response(status_code=status.HTTP_202_ACCEPTED)
 
         import uvicorn
-        port = 2345
-        uvicorn.run(api, host="0.0.0.0", port=port)
+        uvicorn.run(api, host=self.host, port=self.port)

From a108a8a63c4bf93f3251456c1f5d7fc47d8b2d78 Mon Sep 17 00:00:00 2001
From: Raphael Jin <kimheavy@yahoo.com>
Date: Sun, 10 Nov 2024 23:00:40 -0800
Subject: [PATCH 231/282] [Deploy] Edge Case Handling.

---
 python/fedml/__init__.py                      |   5 +-
 .../scheduler/comm_utils/run_process_utils.py |  53 ++-
 .../scheduler/comm_utils/sys_utils.py         | 450 ++++++++++--------
 .../scheduler/slave/client_daemon.py          |   4 +
 python/fedml/core/mlops/mlops_configs.py      |  19 +-
 5 files changed, 308 insertions(+), 223 deletions(-)

diff --git a/python/fedml/__init__.py b/python/fedml/__init__.py
index c96d65adc5..d6341c25c9 100644
--- a/python/fedml/__init__.py
+++ b/python/fedml/__init__.py
@@ -511,7 +511,8 @@ def _get_mqtt_service():
 
 
 def set_local_on_premise_platform_host(local_on_premise_platform_host):
-    os.environ['FEDML_ENV_LOCAL_ON_PREMISE_PLATFORM_HOST'] = local_on_premise_platform_host
+    # Should Also update the .env file
+    set_env_kv("FEDML_ENV_LOCAL_ON_PREMISE_PLATFORM_HOST", local_on_premise_platform_host)
 
 
 def get_local_on_premise_platform_host():
@@ -519,7 +520,7 @@ def get_local_on_premise_platform_host():
 
 
 def set_local_on_premise_platform_port(local_on_premise_platform_port):
-    os.environ['FEDML_ENV_LOCAL_ON_PREMISE_PLATFORM_PORT'] = str(local_on_premise_platform_port)
+    set_env_kv("FEDML_ENV_LOCAL_ON_PREMISE_PLATFORM_PORT", str(local_on_premise_platform_port))
 
 
 def get_local_on_premise_platform_port():
diff --git a/python/fedml/computing/scheduler/comm_utils/run_process_utils.py b/python/fedml/computing/scheduler/comm_utils/run_process_utils.py
index 05cc342e36..a84b078b54 100644
--- a/python/fedml/computing/scheduler/comm_utils/run_process_utils.py
+++ b/python/fedml/computing/scheduler/comm_utils/run_process_utils.py
@@ -166,26 +166,39 @@ def generate_yaml_doc(run_config_object, yaml_file):
     @staticmethod
     def get_pid_from_cmd_line(cmd_line, break_on_first=True):
         ret_pids = list()
-        pids = psutil.process_iter()
-        for pid in pids:
-            try:
-                for cmd in pid.cmdline():
-                    if cmd.find(cmd_line) != -1:
-                        is_running = False
-                        try:
-                            process = psutil.Process(pid.pid)
-                            if process.status() == psutil.STATUS_RUNNING or \
-                                    process.status() == psutil.STATUS_SLEEPING or \
-                                    process.status() == psutil.STATUS_IDLE:
-                                is_running = True
-                        except Exception as e:
-                            pass
-                        if is_running:
-                            ret_pids.append(pid.pid)
-                        if break_on_first:
-                            return ret_pids
-            except Exception as e:
-                pass
+        try:
+            for pid in psutil.process_iter():
+                try:
+                    try:
+                        _ = pid.as_dict(attrs=['cpu_times', 'name', 'pid', 'status'])
+                    except psutil.ZombieProcess:
+                        # Filter out zombie processes
+                        continue
+                    except psutil.NoSuchProcess:
+                        continue
+
+                    for cmd in pid.cmdline():
+                        if cmd.find(cmd_line) != -1:
+                            is_running = False
+                            try:
+                                process = psutil.Process(pid.pid)
+                                if process.status() == psutil.STATUS_RUNNING or \
+                                        process.status() == psutil.STATUS_SLEEPING or \
+                                        process.status() == psutil.STATUS_IDLE:
+                                    is_running = True
+                            except Exception as e:
+                                print(f"Error in get_pid_from_cmd_line inner loop: {e}")
+                                pass
+                            if is_running:
+                                ret_pids.append(pid.pid)
+                            if break_on_first:
+                                return ret_pids
+                except Exception as e:
+                    # print(f"Error in get_pid_from_cmd_line inner loop: {e}")
+                    continue
+        except Exception as e:
+            print(f"Error in get_pid_from_cmd_line outer loop: {e}")
+            pass
 
         return ret_pids
 
diff --git a/python/fedml/computing/scheduler/comm_utils/sys_utils.py b/python/fedml/computing/scheduler/comm_utils/sys_utils.py
index a0ee3402f0..6dbef9bde3 100644
--- a/python/fedml/computing/scheduler/comm_utils/sys_utils.py
+++ b/python/fedml/computing/scheduler/comm_utils/sys_utils.py
@@ -301,249 +301,303 @@ def save_login_process(runner_home_dir, runner_info_dir, edge_process_id):
 
 def cleanup_all_fedml_client_learning_processes():
     # Cleanup all fedml client learning processes.
-    for process in psutil.process_iter():
-        try:
-            pinfo = process.as_dict(attrs=["pid", "name", "cmdline"])
-            found_learning_process = False
-            found_client_process = False
-            for cmd in pinfo["cmdline"]:
-                if str(cmd).find("fedml_config.yaml") != -1:
-                    found_learning_process = True
-
-                if str(cmd).find("client") != -1:
-                    found_client_process = True
-
-            if found_learning_process and found_client_process:
-                # click.echo("find client learning process at {}.".format(process.pid))
-                if platform.system() == 'Windows':
-                    os.system("taskkill /PID {} /T /F".format(process.pid))
-                else:
-                    os.killpg(os.getpgid(process.pid), signal.SIGKILL)
-        except Exception as e:
-            pass
+    try:
+        for process in psutil.process_iter():
+            try:
+                pinfo = process.as_dict(attrs=["pid", "name", "cmdline"])
+                found_learning_process = False
+                found_client_process = False
+                for cmd in pinfo["cmdline"]:
+                    if str(cmd).find("fedml_config.yaml") != -1:
+                        found_learning_process = True
+
+                    if str(cmd).find("client") != -1:
+                        found_client_process = True
+
+                if found_learning_process and found_client_process:
+                    # click.echo("find client learning process at {}.".format(process.pid))
+                    if platform.system() == 'Windows':
+                        os.system("taskkill /PID {} /T /F".format(process.pid))
+                    else:
+                        os.killpg(os.getpgid(process.pid), signal.SIGKILL)
+            except Exception as e:
+                print(f"Failed to cleanup the client learning process due to {e}.")
+                pass
+    except Exception as e:
+        print(f"Failed to cleanup the client learning process due to {e}.")
+        pass
 
 
 def cleanup_all_fedml_client_diagnosis_processes():
     # Cleanup all fedml client learning processes.
-    for process in psutil.process_iter():
-        try:
-            pinfo = process.as_dict(attrs=["pid", "name", "cmdline"])
-            found_client_diagnosis_process = False
-            for cmd in pinfo["cmdline"]:
-                if str(cmd).find("client_diagnosis") != -1:
-                    found_client_diagnosis_process = True
-
-            if found_client_diagnosis_process:
-                # click.echo("find client diagnosis process at {}.".format(process.pid))
-                if platform.system() == 'Windows':
-                    os.system("taskkill /PID {} /T /F".format(process.pid))
-                else:
-                    os.killpg(os.getpgid(process.pid), signal.SIGKILL)
-        except Exception as e:
-            pass
+    try:
+        for process in psutil.process_iter():
+            try:
+                pinfo = process.as_dict(attrs=["pid", "name", "cmdline"])
+                found_client_diagnosis_process = False
+                for cmd in pinfo["cmdline"]:
+                    if str(cmd).find("client_diagnosis") != -1:
+                        found_client_diagnosis_process = True
+
+                if found_client_diagnosis_process:
+                    # click.echo("find client diagnosis process at {}.".format(process.pid))
+                    if platform.system() == 'Windows':
+                        os.system("taskkill /PID {} /T /F".format(process.pid))
+                    else:
+                        os.killpg(os.getpgid(process.pid), signal.SIGKILL)
+            except Exception as e:
+                print(f"Failed to cleanup the client diagnosis process due to {e}.")
+                pass
+    except Exception as e:
+        print(f"Failed to cleanup the client diagnosis process due to {e}.")
+        pass
 
 
 def cleanup_all_fedml_client_login_processes(login_program, clean_process_group=True):
     # Cleanup all fedml client login processes.
-    for process in psutil.process_iter():
-        try:
-            pinfo = process.as_dict(attrs=["pid", "name", "cmdline"])
-            for cmd in pinfo["cmdline"]:
-                if str(cmd).find(login_program) != -1:
-                    if os.path.basename(cmd) == login_program:
-                        # click.echo("find client login process at {}.".format(process.pid))
-                        if platform.system() == "Windows":
-                            os.system("taskkill /PID {} /T /F".format(process.pid))
-                        else:
-                            os.kill(process.pid, signal.SIGKILL)
-                            if clean_process_group:
-                                os.killpg(os.getpgid(process.pid), signal.SIGKILL)
-        except Exception as e:
-            pass
+    try:
+        for process in psutil.process_iter():
+            try:
+                pinfo = process.as_dict(attrs=["pid", "name", "cmdline"])
+                for cmd in pinfo["cmdline"]:
+                    if str(cmd).find(login_program) != -1:
+                        if os.path.basename(cmd) == login_program:
+                            # click.echo("find client login process at {}.".format(process.pid))
+                            if platform.system() == "Windows":
+                                os.system("taskkill /PID {} /T /F".format(process.pid))
+                            else:
+                                os.kill(process.pid, signal.SIGKILL)
+                                if clean_process_group:
+                                    os.killpg(os.getpgid(process.pid), signal.SIGKILL)
+            except Exception as e:
+                print(f"Failed to cleanup the client login process due to {e}.")
+                pass
+    except Exception as e:
+        print(f"Failed to cleanup the client login process since psutil.process_iter() failed.")
+        pass
 
 
 def cleanup_all_fedml_server_learning_processes():
     # Cleanup all fedml server learning processes.
-    for process in psutil.process_iter():
-        try:
-            pinfo = process.as_dict(attrs=["pid", "name", "cmdline"])
-            found_learning_process = False
-            found_server_process = False
-            for cmd in pinfo["cmdline"]:
-                if str(cmd).find("fedml_config.yaml") != -1:
-                    found_learning_process = True
-
-                if str(cmd).find("server") != -1:
-                    found_server_process = True
-
-            if found_learning_process and found_server_process:
-                # click.echo("find server learning process at {}.".format(process.pid))
-                if platform.system() == 'Windows':
-                    os.system("taskkill /PID {} /T /F".format(process.pid))
-                else:
-                    os.killpg(os.getpgid(process.pid), signal.SIGKILL)
-        except Exception as e:
-            pass
+    try:
+        for process in psutil.process_iter():
+            try:
+                pinfo = process.as_dict(attrs=["pid", "name", "cmdline"])
+                found_learning_process = False
+                found_server_process = False
+                for cmd in pinfo["cmdline"]:
+                    if str(cmd).find("fedml_config.yaml") != -1:
+                        found_learning_process = True
+
+                    if str(cmd).find("server") != -1:
+                        found_server_process = True
+
+                if found_learning_process and found_server_process:
+                    # click.echo("find server learning process at {}.".format(process.pid))
+                    if platform.system() == 'Windows':
+                        os.system("taskkill /PID {} /T /F".format(process.pid))
+                    else:
+                        os.killpg(os.getpgid(process.pid), signal.SIGKILL)
+            except Exception as e:
+                print(f"Failed to cleanup the server learning process due to {e}.")
+                pass
+    except Exception as e:
+        print(f"Failed to cleanup the server learning process due to {e}.")
+        pass
 
 
 def cleanup_all_fedml_client_api_processes(kill_all=False, is_model_device=False):
     # Cleanup all fedml client api processes.
-    for process in psutil.process_iter():
-        try:
-            pinfo = process.as_dict(attrs=["pid", "name", "cmdline"])
-            find_api_process = False
-            for cmd in pinfo["cmdline"]:
-                if is_model_device:
-                    if str(cmd).find("model_scheduler.device_client_api:api") != -1:
-                        find_api_process = True
-                else:
-                    if str(cmd).find("slave.client_api:api") != -1:
-                        find_api_process = True
+    try:
+        for process in psutil.process_iter():
+            try:
+                pinfo = process.as_dict(attrs=["pid", "name", "cmdline"])
+                find_api_process = False
+                for cmd in pinfo["cmdline"]:
+                    if is_model_device:
+                        if str(cmd).find("model_scheduler.device_client_api:api") != -1:
+                            find_api_process = True
+                    else:
+                        if str(cmd).find("slave.client_api:api") != -1:
+                            find_api_process = True
 
-            if find_api_process:
-                # click.echo("find client api process at {}.".format(process.pid))
-                if platform.system() == 'Windows':
-                    os.system("taskkill /PID {} /T /F".format(process.pid))
-                else:
-                    if kill_all:
-                        os.killpg(os.getpgid(process.pid), signal.SIGKILL)
+                if find_api_process:
+                    # click.echo("find client api process at {}.".format(process.pid))
+                    if platform.system() == 'Windows':
+                        os.system("taskkill /PID {} /T /F".format(process.pid))
                     else:
-                        os.kill(process.pid, signal.SIGKILL)
-        except Exception as e:
-            pass
+                        if kill_all:
+                            os.killpg(os.getpgid(process.pid), signal.SIGKILL)
+                        else:
+                            os.kill(process.pid, signal.SIGKILL)
+            except Exception as e:
+                print(f"Failed to cleanup the client api process due to {e}.")
+                pass
+    except Exception as e:
+        print(f"Failed to cleanup the client api process due to {e}.")
+        pass
 
 
 def cleanup_all_fedml_server_api_processes(kill_all=False, is_model_device=False):
     # Cleanup all fedml server api processes.
-    for process in psutil.process_iter():
-        try:
-            pinfo = process.as_dict(attrs=["pid", "name", "cmdline"])
-            find_api_process = False
-            for cmd in pinfo["cmdline"]:
-                if is_model_device:
-                    if str(cmd).find("model_scheduler.device_server_api:api") != -1:
-                        find_api_process = True
-
-                    if str(cmd).find("model_scheduler.device_model_inference:api") != -1:
-                        find_api_process = True
-                else:
-                    if str(cmd).find("master.server_api:api") != -1:
-                        find_api_process = True
-
-            if find_api_process:
-                # click.echo("find server api process at {}.".format(process.pid))
-                if platform.system() == 'Windows':
-                    os.system("taskkill /PID {} /T /F".format(process.pid))
-                else:
-                    if kill_all:
-                        os.killpg(os.getpgid(process.pid), signal.SIGKILL)
+    try:
+        for process in psutil.process_iter():
+            try:
+                pinfo = process.as_dict(attrs=["pid", "name", "cmdline"])
+                find_api_process = False
+                for cmd in pinfo["cmdline"]:
+                    if is_model_device:
+                        if str(cmd).find("model_scheduler.device_server_api:api") != -1:
+                            find_api_process = True
+
+                        if str(cmd).find("model_scheduler.device_model_inference:api") != -1:
+                            find_api_process = True
                     else:
-                        os.kill(process.pid, signal.SIGKILL)
-        except Exception as e:
-            pass
+                        if str(cmd).find("master.server_api:api") != -1:
+                            find_api_process = True
 
+                if find_api_process:
+                    # click.echo("find server api process at {}.".format(process.pid))
+                    if platform.system() == 'Windows':
+                        os.system("taskkill /PID {} /T /F".format(process.pid))
+                    else:
+                        if kill_all:
+                            os.killpg(os.getpgid(process.pid), signal.SIGKILL)
+                        else:
+                            os.kill(process.pid, signal.SIGKILL)
+            except Exception as e:
+                print(f"Failed to cleanup the server api process due to {e}.")
+                pass
+    except Exception as e:
+        print(f"Failed to cleanup the server api process due to {e}.")
+        pass
 
 def cleanup_all_fedml_server_login_processes(login_program, clean_process_group=False):
     # Cleanup all fedml client login processes.
-    for process in psutil.process_iter():
-        try:
-            pinfo = process.as_dict(attrs=["pid", "name", "cmdline"])
-            for cmd in pinfo["cmdline"]:
-                if str(cmd).find(login_program) != -1:
-                    if os.path.basename(cmd) == login_program:
-                        # click.echo("find server login process at {}.".format(process.pid))
-                        if platform.system() == 'Windows':
-                            os.system("taskkill /PID {} /T /F".format(process.pid))
-                        else:
-                            os.kill(process.pid, signal.SIGKILL)
-                            if clean_process_group:
-                                os.killpg(os.getpgid(process.pid), signal.SIGKILL)
-        except Exception as e:
-            pass
+    try:
+        for process in psutil.process_iter():
+            try:
+                pinfo = process.as_dict(attrs=["pid", "name", "cmdline"])
+                for cmd in pinfo["cmdline"]:
+                    if str(cmd).find(login_program) != -1:
+                        if os.path.basename(cmd) == login_program:
+                            # click.echo("find server login process at {}.".format(process.pid))
+                            if platform.system() == 'Windows':
+                                os.system("taskkill /PID {} /T /F".format(process.pid))
+                            else:
+                                os.kill(process.pid, signal.SIGKILL)
+                                if clean_process_group:
+                                    os.killpg(os.getpgid(process.pid), signal.SIGKILL)
+            except Exception as e:
+                print(f"Failed to cleanup the server login process due to {e}.")
+                pass
+    except Exception as e:
+        print(f"Failed to cleanup the server login process due to {e}.")
+        pass
 
 
 def cleanup_all_bootstrap_processes(bootstrap_program, clean_process_group=False):
     # Cleanup all fedml bootstrap processes.
-    for process in psutil.process_iter():
-        try:
-            pinfo = process.as_dict(attrs=["pid", "name", "cmdline"])
-            for cmd in pinfo["cmdline"]:
-                if str(cmd).find(bootstrap_program) != -1:
-                    if os.path.basename(cmd) == bootstrap_program:
-                        # click.echo("find server login process at {}.".format(process.pid))
-                        if platform.system() == 'Windows':
-                            os.system("taskkill /PID {} /T /F".format(process.pid))
-                        else:
-                            os.kill(process.pid, signal.SIGKILL)
-                            if clean_process_group:
-                                os.killpg(os.getpgid(process.pid), signal.SIGKILL)
-        except Exception as e:
-            pass
+    try:
+        for process in psutil.process_iter():
+            try:
+                pinfo = process.as_dict(attrs=["pid", "name", "cmdline"])
+                for cmd in pinfo["cmdline"]:
+                    if str(cmd).find(bootstrap_program) != -1:
+                        if os.path.basename(cmd) == bootstrap_program:
+                            # click.echo("find server login process at {}.".format(process.pid))
+                            if platform.system() == 'Windows':
+                                os.system("taskkill /PID {} /T /F".format(process.pid))
+                            else:
+                                os.kill(process.pid, signal.SIGKILL)
+                                if clean_process_group:
+                                    os.killpg(os.getpgid(process.pid), signal.SIGKILL)
+            except Exception as e:
+                print(f"Failed to cleanup the bootstrap process due to {e}.")
+                pass
+    except Exception as e:
+        print(f"Failed to cleanup the bootstrap process due to {e}.")
+        pass
 
 
 def cleanup_model_monitor_processes(run_id, end_point_name, model_id, model_name, model_version):
     # Cleanup all fedml server api processes.
-    for process in psutil.process_iter():
-        try:
-            pinfo = process.as_dict(attrs=["pid", "name", "cmdline"])
-            find_monitor_process = False
-            find_monitor_name_arg = False
-            find_endpoint_id_name_arg = False
-            for cmd in pinfo["cmdline"]:
-                if str(cmd).endswith("device_model_monitor.py"):
-                    find_monitor_name_arg = True
-
-                if find_monitor_name_arg and str(cmd) == f"-ep":
-                    find_endpoint_id_name_arg = True
-
-                if find_monitor_name_arg and find_endpoint_id_name_arg and str(cmd) == f"{run_id}":
-                    find_monitor_process = True
-                    break
+    try:
+        for process in psutil.process_iter():
+            try:
+                pinfo = process.as_dict(attrs=["pid", "name", "cmdline"])
+                find_monitor_process = False
+                find_monitor_name_arg = False
+                find_endpoint_id_name_arg = False
+                for cmd in pinfo["cmdline"]:
+                    if str(cmd).endswith("device_model_monitor.py"):
+                        find_monitor_name_arg = True
+
+                    if find_monitor_name_arg and str(cmd) == f"-ep":
+                        find_endpoint_id_name_arg = True
+
+                    if find_monitor_name_arg and find_endpoint_id_name_arg and str(cmd) == f"{run_id}":
+                        find_monitor_process = True
+                        break
 
-            if find_monitor_process:
-                # click.echo("find the monitor process at {}.".format(process.pid))
-                if platform.system() == 'Windows':
-                    os.system("taskkill /PID {} /T /F".format(process.pid))
-                else:
-                    os.kill(process.pid, signal.SIGKILL)
-                break
-        except Exception as e:
-            pass
+                if find_monitor_process:
+                    # click.echo("find the monitor process at {}.".format(process.pid))
+                    if platform.system() == 'Windows':
+                        os.system("taskkill /PID {} /T /F".format(process.pid))
+                    else:
+                        os.kill(process.pid, signal.SIGKILL)
+                    break
+            except Exception as e:
+                logging.error(f"Failed to cleanup the model monitor process due to {e}.")
+                pass
+    except Exception as e:
+        logging.error(f"For loop failed to stop the model inference monitor due to {e}.")
+        pass
 
 
 def get_process_running_count(process_name):
     count = 0
-    for process in psutil.process_iter():
-        try:
-            pinfo = process.as_dict(attrs=["pid", "name", "cmdline"])
-            for cmd in pinfo["cmdline"]:
-                if str(cmd).find(process_name) != -1:
-                    if os.path.basename(cmd) == process_name:
-                        count += 1
-        except Exception as e:
-            pass
+    try:
+        for process in psutil.process_iter():
+            try:
+                pinfo = process.as_dict(attrs=["pid", "name", "cmdline"])
+                for cmd in pinfo["cmdline"]:
+                    if str(cmd).find(process_name) != -1:
+                        if os.path.basename(cmd) == process_name:
+                            count += 1
+            except Exception as e:
+                print(f"Error in get_process_running_count: {e}")
+                pass
+    except Exception as e:
+        print(f"Error in get_process_running_count: {e}")
+        pass
 
     return count
 
 
 def edge_simulator_has_login(login_program="client_login.py"):
-    for process in psutil.process_iter():
-        try:
-            pinfo = process.as_dict(attrs=["pid", "name", "cmdline"])
-            found_login_process = False
-            found_simulator_process = False
-            for cmd in pinfo["cmdline"]:
-                if str(cmd).find(login_program) != -1:
-                    if os.path.basename(cmd) == login_program:
-                        found_login_process = True
-
-                if str(cmd).find("edge_simulator") != -1:
-                    found_simulator_process = True
-
-            if found_login_process and found_simulator_process:
-                return True
-        except Exception as e:
-            pass
+    try:
+        for process in psutil.process_iter():
+            try:
+                pinfo = process.as_dict(attrs=["pid", "name", "cmdline"])
+                found_login_process = False
+                found_simulator_process = False
+                for cmd in pinfo["cmdline"]:
+                    if str(cmd).find(login_program) != -1:
+                        if os.path.basename(cmd) == login_program:
+                            found_login_process = True
+
+                    if str(cmd).find("edge_simulator") != -1:
+                        found_simulator_process = True
+
+                if found_login_process and found_simulator_process:
+                    return True
+            except Exception as e:
+                print(f"Error in edge_simulator_has_login: {e}")
+                pass
+    except Exception as e:
+        print(f"Error in edge_simulator_has_login: {e}")
+        pass
 
     return False
 
diff --git a/python/fedml/computing/scheduler/slave/client_daemon.py b/python/fedml/computing/scheduler/slave/client_daemon.py
index a3dff06e2d..14b841707f 100755
--- a/python/fedml/computing/scheduler/slave/client_daemon.py
+++ b/python/fedml/computing/scheduler/slave/client_daemon.py
@@ -97,6 +97,7 @@
             time.sleep(3)
         else:
             login_logs = os.path.join(ClientConstants.get_log_file_dir(), "login.log")
+            # If we use this kind of command, we cannot penetrate the environment variables to the subprocess
             run_login_cmd = f"nohup {get_python_program()} -W ignore {login_cmd} -t login -u {args.user} " \
                             f"-v {args.version} -r {args.role} -id {args.device_id} " \
                             f"-k {args.api_key} -ngc {str(args.no_gpu_check)} -mpt {args.marketplace_type} " \
@@ -106,6 +107,9 @@
             os.system(run_login_cmd)
 
             login_pids = RunProcessUtils.get_pid_from_cmd_line(login_cmd)
+            if len(login_pids) == 0:
+                print(f"[Client] Cannot find login pid {login_pids}, check the log file {login_logs}")
+                retry_count += 1
             while len(login_pids) > 0:
                 with open(login_logs, "r") as f:
                     log_list = f.readlines()
diff --git a/python/fedml/core/mlops/mlops_configs.py b/python/fedml/core/mlops/mlops_configs.py
index 338f59e697..891f721c9d 100644
--- a/python/fedml/core/mlops/mlops_configs.py
+++ b/python/fedml/core/mlops/mlops_configs.py
@@ -137,8 +137,20 @@ def _fetch_configs(configs) -> dict:
         request_configs = request_configs.union(configs)
         json_params = {"config_name": [config.value for config in request_configs],
                        "device_send_time": int(time.time() * 1000)}
-        response = MLOpsConfigs._request(request_url=url, request_json=json_params, cert_path=cert_path)
-        status_code = response.json().get("code")
+        try:
+            response = MLOpsConfigs._request(request_url=url, request_json=json_params, cert_path=cert_path)
+        except Exception as e:
+            print(f"Fetch configs failed due to {e} "
+                  f"please check the network connection and try again.")
+            return {}
+
+        msg_str = ""
+        if response:
+            status_code = response.json().get("code")
+            msg_str = response.json()
+        else:
+            status_code = "FAILED"
+
         result = {}
         if status_code == "SUCCESS":
             data = response.json().get("data")
@@ -147,7 +159,8 @@ def _fetch_configs(configs) -> dict:
             mlops_config = data.get(Configs.ML_OPS_CONFIG.value)
             MLOpsUtils.calc_ntp_from_config(mlops_config)
         else:
-            raise Exception("failed to fetch device configurations!")
+            raise Exception(f"failed to fetch device configs from server, with status code: {status_code} "
+                            f"and response: {msg_str}")
         return result
 
     @staticmethod

From 698e95e1b9281d387ead72075a3a6d417d2f7c29 Mon Sep 17 00:00:00 2001
From: charlieyl <charlie@tensoropera.com>
Date: Mon, 2 Dec 2024 16:14:11 +0800
Subject: [PATCH 232/282] [fixbug] 1.The work inference proxy port needs to be
 read from the configuration file 2.occupy_gpu_ids fail,get gpu ids from cache
 may return [] instead of None

---
 python/fedml/computing/scheduler/comm_utils/job_utils.py       | 2 +-
 .../model_scheduler/device_http_proxy_inference_protocol.py    | 3 ++-
 2 files changed, 3 insertions(+), 2 deletions(-)

diff --git a/python/fedml/computing/scheduler/comm_utils/job_utils.py b/python/fedml/computing/scheduler/comm_utils/job_utils.py
index ae50239d25..0fca06a11b 100644
--- a/python/fedml/computing/scheduler/comm_utils/job_utils.py
+++ b/python/fedml/computing/scheduler/comm_utils/job_utils.py
@@ -93,7 +93,7 @@ def occupy_gpu_ids(self, run_id, request_gpu_num, device_id, inner_id=None,
                                  f" for run {run_id}: {available_gpu_ids}")
 
                     # If the available GPU list is not in the cache, set it to the current system available GPU list
-                    if available_gpu_ids is None:
+                    if available_gpu_ids is None or available_gpu_ids == []:
                         # Get realtime GPU availability list from the system
                         available_gpu_ids = JobRunnerUtils.get_realtime_gpu_available_ids().copy()
                         logging.info(f"Cache not set yet, fetching realtime available GPU Ids: {available_gpu_ids}")
diff --git a/python/fedml/computing/scheduler/model_scheduler/device_http_proxy_inference_protocol.py b/python/fedml/computing/scheduler/model_scheduler/device_http_proxy_inference_protocol.py
index 746d17bb7c..180b10994b 100755
--- a/python/fedml/computing/scheduler/model_scheduler/device_http_proxy_inference_protocol.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_http_proxy_inference_protocol.py
@@ -53,7 +53,8 @@ async def run_http_proxy_inference_with_request(
             # TODO(Raphael): Add support for GET and other methods
     ):
         inference_response = {}
-        http_proxy_url = f"http://{urlparse(inference_url).hostname}:{ClientConstants.LOCAL_CLIENT_API_PORT}/api/v1/predict"
+        worker_proxy_port = ClientConstants.get_inference_worker_proxy_port()
+        http_proxy_url = f"http://{urlparse(inference_url).hostname}:{worker_proxy_port}/api/v1/predict"
         if inference_type == "default":
             model_api_headers = {'Content-Type': 'application/json', 'Connection': 'close',
                                  'Accept': 'application/json'}

From 757e5f0faa7f737973ba2638d35dc6680ab7ff11 Mon Sep 17 00:00:00 2001
From: charlieyl <charlie@tensoropera.com>
Date: Tue, 10 Dec 2024 15:00:01 +0800
Subject: [PATCH 233/282] add log in get_available_gpu_ids[hardware_utils.py]

---
 python/fedml/computing/scheduler/comm_utils/hardware_utils.py | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/python/fedml/computing/scheduler/comm_utils/hardware_utils.py b/python/fedml/computing/scheduler/comm_utils/hardware_utils.py
index 56a75fe3e1..70f6e3d114 100644
--- a/python/fedml/computing/scheduler/comm_utils/hardware_utils.py
+++ b/python/fedml/computing/scheduler/comm_utils/hardware_utils.py
@@ -62,7 +62,9 @@ def get_gpus() -> List[GPUCard]:
     def get_available_gpu_ids(order: str = "memory", limit: int = 1, max_load: float = 0.01,
                               max_memory: float = 0.01) -> List[int]:
         gpu_util = HardwareUtil.__get_util()
-        return gpu_util.get_available_gpu_card_ids(order, limit, max_load, max_memory) if gpu_util is not None else []
+        card_ids = gpu_util.get_available_gpu_card_ids(order, limit, max_load, max_memory) if gpu_util is not None else []
+        logging.info(f"Available GPU card ids ---> {card_ids}")
+        return card_ids
 
     @staticmethod
     def get_docker_gpu_device_mapping(gpu_ids: Optional[List[int]], num_gpus: int = 0) -> Optional[Dict]:

From c5c22c830b35eca26cac857fc7492d7f962c4f80 Mon Sep 17 00:00:00 2001
From: charlieyl <charlie@tensoropera.com>
Date: Tue, 10 Dec 2024 17:18:09 +0800
Subject: [PATCH 234/282] add logs

---
 .../computing/scheduler/slave/base_slave_protocol_manager.py    | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py b/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py
index 05469e78ff..1af75d936e 100755
--- a/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py
+++ b/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py
@@ -379,6 +379,8 @@ def callback_report_device_info(self, topic, payload):
                                 "edge_info": device_info_json}
             if context is not None:
                 response_payload["context"] = context
+            
+            logging.info(f"Response payload --> {response_payload}")
             self.message_center.send_message(response_topic, json.dumps(response_payload), run_id=run_id)
 
     def callback_request_device_info_from_mlops(self, topic, payload):

From ffa54a374783e600af4b52707b3f8b4f41fc3e7b Mon Sep 17 00:00:00 2001
From: charlieyl <charlie@tensoropera.com>
Date: Tue, 10 Dec 2024 18:04:37 +0800
Subject: [PATCH 235/282] add logs

---
 python/fedml/computing/scheduler/comm_utils/job_utils.py       | 1 +
 python/fedml/computing/scheduler/comm_utils/sys_utils.py       | 1 +
 .../computing/scheduler/slave/base_slave_protocol_manager.py   | 3 +++
 3 files changed, 5 insertions(+)

diff --git a/python/fedml/computing/scheduler/comm_utils/job_utils.py b/python/fedml/computing/scheduler/comm_utils/job_utils.py
index 0fca06a11b..fa143510cd 100644
--- a/python/fedml/computing/scheduler/comm_utils/job_utils.py
+++ b/python/fedml/computing/scheduler/comm_utils/job_utils.py
@@ -351,6 +351,7 @@ def get_realtime_gpu_available_ids():
         gpu_list = sys_utils.get_gpu_list()
         gpu_count = len(gpu_list)
         realtime_available_gpu_ids = sys_utils.get_available_gpu_id_list(limit=gpu_count)
+        logging.info(f"get_available_gpu_id_list limit:{gpu_count}, available_gpu_ids:{realtime_available_gpu_ids}")
         return realtime_available_gpu_ids
 
     @staticmethod
diff --git a/python/fedml/computing/scheduler/comm_utils/sys_utils.py b/python/fedml/computing/scheduler/comm_utils/sys_utils.py
index 6dbef9bde3..fe518e4ba4 100644
--- a/python/fedml/computing/scheduler/comm_utils/sys_utils.py
+++ b/python/fedml/computing/scheduler/comm_utils/sys_utils.py
@@ -191,6 +191,7 @@ def get_available_gpu_id_list(limit=1) -> List[int]:
 
     gpu_available_list = HardwareUtil.get_available_gpu_ids(order='memory', limit=limit, max_load=0.01,
                                                             max_memory=0.01)
+    logging.info(f"GPU available ids from HardwareUtil.get_available_gpu_ids, limit --> {limit}")
     return gpu_available_list
 
 
diff --git a/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py b/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py
index 1af75d936e..9ba8c26e15 100755
--- a/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py
+++ b/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py
@@ -340,10 +340,13 @@ def callback_report_device_info(self, topic, payload):
             else:
                 total_mem, free_mem, total_disk_size, free_disk_size, cup_utilization, cpu_cores, gpu_cores_total, \
                     gpu_cores_available, sent_bytes, recv_bytes, gpu_available_ids = sys_utils.get_sys_realtime_stats()
+                logging.info(f"GPU available ids from get_sys_realtime_stats --> {gpu_available_ids}")
                 host_ip = sys_utils.get_host_ip()
                 host_port = sys_utils.get_available_port()
                 gpu_available_ids = JobRunnerUtils.get_available_gpu_id_list(self.edge_id)
+                logging.info(f"GPU available ids from get_available_gpu_id_list(device_id) --> {gpu_available_ids}")
                 gpu_available_ids = JobRunnerUtils.trim_unavailable_gpu_ids(gpu_available_ids)
+                logging.info(f"GPU available ids from trim_unavailable_gpu_ids --> {gpu_available_ids}")
                 gpu_cores_available = len(gpu_available_ids)
                 gpu_list = sys_utils.get_gpu_list()
                 device_info_json = {

From 589dc47a551c0317a7ab34616b6847ef496cdc0b Mon Sep 17 00:00:00 2001
From: charlieyl <charlie@tensoropera.com>
Date: Tue, 10 Dec 2024 19:01:39 +0800
Subject: [PATCH 236/282] add logs

---
 .../fedml/computing/scheduler/comm_utils/hardware_utils.py | 7 +++++--
 1 file changed, 5 insertions(+), 2 deletions(-)

diff --git a/python/fedml/computing/scheduler/comm_utils/hardware_utils.py b/python/fedml/computing/scheduler/comm_utils/hardware_utils.py
index 70f6e3d114..96aeecfc4a 100644
--- a/python/fedml/computing/scheduler/comm_utils/hardware_utils.py
+++ b/python/fedml/computing/scheduler/comm_utils/hardware_utils.py
@@ -56,14 +56,17 @@ def __get_util(cls) -> Optional[GPUCardUtil]:
     @staticmethod
     def get_gpus() -> List[GPUCard]:
         gpu_util = HardwareUtil.__get_util()
-        return gpu_util.get_gpu_cards() if gpu_util is not None else []
+        cards = gpu_util.get_gpu_cards() if gpu_util is not None else []
+        logging.info(f"hardware_utils Available GPU cards len ---> { len(cards)}")
+        logging.info(f"hardware_utils Available GPU cards ---> {cards}")
+        return cards
 
     @staticmethod
     def get_available_gpu_ids(order: str = "memory", limit: int = 1, max_load: float = 0.01,
                               max_memory: float = 0.01) -> List[int]:
         gpu_util = HardwareUtil.__get_util()
         card_ids = gpu_util.get_available_gpu_card_ids(order, limit, max_load, max_memory) if gpu_util is not None else []
-        logging.info(f"Available GPU card ids ---> {card_ids}")
+        logging.info(f"hardware_utils get_available_gpu_ids ids ---> {card_ids}")
         return card_ids
 
     @staticmethod

From 40735d96f98ec8dcdcd03f8a24bc39838ca2c2d3 Mon Sep 17 00:00:00 2001
From: charlieyl <charlie@tensoropera.com>
Date: Tue, 10 Dec 2024 19:48:16 +0800
Subject: [PATCH 237/282] add logs

---
 python/fedml/computing/scheduler/comm_utils/hardware_utils.py | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)

diff --git a/python/fedml/computing/scheduler/comm_utils/hardware_utils.py b/python/fedml/computing/scheduler/comm_utils/hardware_utils.py
index 96aeecfc4a..f50f5d73f5 100644
--- a/python/fedml/computing/scheduler/comm_utils/hardware_utils.py
+++ b/python/fedml/computing/scheduler/comm_utils/hardware_utils.py
@@ -58,7 +58,6 @@ def get_gpus() -> List[GPUCard]:
         gpu_util = HardwareUtil.__get_util()
         cards = gpu_util.get_gpu_cards() if gpu_util is not None else []
         logging.info(f"hardware_utils Available GPU cards len ---> { len(cards)}")
-        logging.info(f"hardware_utils Available GPU cards ---> {cards}")
         return cards
 
     @staticmethod
@@ -66,7 +65,7 @@ def get_available_gpu_ids(order: str = "memory", limit: int = 1, max_load: float
                               max_memory: float = 0.01) -> List[int]:
         gpu_util = HardwareUtil.__get_util()
         card_ids = gpu_util.get_available_gpu_card_ids(order, limit, max_load, max_memory) if gpu_util is not None else []
-        logging.info(f"hardware_utils get_available_gpu_ids ids ---> {card_ids}")
+        logging.info(f"hardware_utils get_available_gpu_ids ids ---> {card_ids}, limit ---> {limit}")
         return card_ids
 
     @staticmethod

From 90c1191ddf942d580715715eddab334836ee80ea Mon Sep 17 00:00:00 2001
From: charlieyl <charlie@tensoropera.com>
Date: Wed, 11 Dec 2024 11:11:08 +0800
Subject: [PATCH 238/282] [bugfix]Enhance GPU management(need compare the
 readtime availabe gpu with init gpu ids because of the system gpu resource
 may change) and logging in hardware and job utilities

---
 .../scheduler/comm_utils/job_utils.py         | 23 +++++++++++---
 .../scheduler_core/compute_gpu_cache.py       | 31 +++++++++++++++++++
 2 files changed, 49 insertions(+), 5 deletions(-)

diff --git a/python/fedml/computing/scheduler/comm_utils/job_utils.py b/python/fedml/computing/scheduler/comm_utils/job_utils.py
index fa143510cd..21becad68e 100644
--- a/python/fedml/computing/scheduler/comm_utils/job_utils.py
+++ b/python/fedml/computing/scheduler/comm_utils/job_utils.py
@@ -86,8 +86,7 @@ def occupy_gpu_ids(self, run_id, request_gpu_num, device_id, inner_id=None,
                     # Get the available GPU list, FEDML_GLOBAL_DEVICE_AVAILABLE_GPU_IDS_TAG-${device_id}
                     available_gpu_ids = ComputeCacheManager.get_instance().get_gpu_cache().get_device_available_gpu_ids(
                         device_id)
-                    logging.info(
-                        f"Available GPU Ids fetched from cache: {available_gpu_ids}")
+                    logging.info(f"Available GPU Ids fetched from cache: {available_gpu_ids}")
 
                     logging.info(f"Check worker({device_id})'s realtime gpu availability in DB"
                                  f" for run {run_id}: {available_gpu_ids}")
@@ -99,8 +98,20 @@ def occupy_gpu_ids(self, run_id, request_gpu_num, device_id, inner_id=None,
                         logging.info(f"Cache not set yet, fetching realtime available GPU Ids: {available_gpu_ids}")
                     else:
                         available_gpu_ids = JobRunnerUtils.trim_unavailable_gpu_ids(available_gpu_ids)
-                        logging.info(
-                            f"Trimmed available GPU Ids: {available_gpu_ids}")
+                        logging.info(f"Trimmed available GPU Ids: {available_gpu_ids}")
+                        
+                        # check if the initial available GPU ids is changed
+                        initial_available_gpu_ids = ComputeCacheManager.get_instance().get_gpu_cache().get_device_initial_available_gpu_ids(device_id)
+                        # logging.info(f"Initial available GPU Ids: {initial_available_gpu_ids}")
+                        if initial_available_gpu_ids and (not all(gpu_id in available_gpu_ids for gpu_id in initial_available_gpu_ids)):
+                            # then add the initial_available_gpu_ids to the available_gpu_ids
+                            available_gpu_ids.extend(initial_available_gpu_ids)
+                            available_gpu_ids = list(set(available_gpu_ids))
+                            # sort the available_gpu_ids
+                            available_gpu_ids.sort()
+                            logging.info(f"Device {device_id} available GPU ids is changed because of the system gpu resource change, "
+                                         f"initial available GPU ids: {initial_available_gpu_ids}, "
+                                         f"current available GPU ids: {available_gpu_ids}")
 
                     # Get the matched gpu ids string by the request gpu num
                     cuda_visible_gpu_ids_str, matched_gpu_num = JobRunnerUtils.request_gpu_ids(request_gpu_num,
@@ -317,10 +328,12 @@ def get_available_gpu_id_list(device_id):
                     device_id)
 
                 # If the available GPU list is not in the cache, set it to the current system available GPU list
-                if available_gpu_ids is None:
+                if available_gpu_ids is None or available_gpu_ids == []:
                     # Get realtime GPU availability list from the system
                     gpu_ids = JobRunnerUtils.get_realtime_gpu_available_ids().copy()
                     ComputeCacheManager.get_instance().get_gpu_cache().set_device_available_gpu_ids(device_id, gpu_ids)
+                    # Set the initial available GPU ids to the cache, use to check if the device all available GPU ids is changed because of the system resource change
+                    ComputeCacheManager.get_instance().get_gpu_cache().set_device_initial_available_gpu_ids(device_id, gpu_ids)
                     available_gpu_ids = gpu_ids
             return available_gpu_ids
 
diff --git a/python/fedml/computing/scheduler/scheduler_core/compute_gpu_cache.py b/python/fedml/computing/scheduler/scheduler_core/compute_gpu_cache.py
index 7bab71212e..6b3addc320 100755
--- a/python/fedml/computing/scheduler/scheduler_core/compute_gpu_cache.py
+++ b/python/fedml/computing/scheduler/scheduler_core/compute_gpu_cache.py
@@ -10,6 +10,7 @@ class ComputeGpuCache(object):
     FEDML_GLOBAL_DEVICE_RUN_NUM_GPUS_TAG = "FEDML_GLOBAL_DEVICE_RUN_NUM_GPUS_TAG-"
     FEDML_GLOBAL_DEVICE_RUN_GPU_IDS_TAG = "FEDML_GLOBAL_DEVICE_RUN_GPU_IDS_TAG-"
     FEDML_GLOBAL_DEVICE_AVAILABLE_GPU_IDS_TAG = "FEDML_GLOBAL_DEVICE_AVAILABLE_GPU_IDS_TAG-"
+    FEDML_GLOBAL_DEVICE_INITIAL_AVAILABLE_GPU_IDS_TAG = "FEDML_GLOBAL_DEVICE_INITIAL_AVAILABLE_GPU_IDS_TAG-"
     FEDML_GLOBAL_DEVICE_TOTAL_NUM_GPUS_TAG = "FEDML_GLOBAL_DEVICE_TOTAL_NUM_GPUS_TAG-"
     FEDML_GLOBAL_RUN_TOTAL_NUM_GPUS_TAG = "FEDML_GLOBAL_RUN_TOTAL_NUM_GPUS_TAG-"
     FEDML_GLOBAL_RUN_DEVICE_IDS_TAG = "FEDML_GLOBAL_RUN_DEVICE_IDS_TAG-"
@@ -107,6 +108,25 @@ def get_device_available_gpu_ids(self, device_id):
             return []
 
         return device_available_gpu_ids
+    
+    def get_device_initial_available_gpu_ids(self, device_id):
+        # Get the initial available GPU ids from the cache, for checking if the device all available GPU ids is changed
+        device_initial_available_gpu_ids = None
+        try:
+            if self.redis_connection.exists(self.get_device_initial_available_gpu_ids_key(device_id)):
+                device_initial_available_gpu_ids = self.redis_connection.get(self.get_device_initial_available_gpu_ids_key(device_id))
+                if str(device_initial_available_gpu_ids).strip() == "":
+                    return []
+        except Exception as e:
+            pass
+
+        if device_initial_available_gpu_ids is not None and str(device_initial_available_gpu_ids).strip() != "":
+            device_initial_available_gpu_ids = device_initial_available_gpu_ids.split(',')
+            device_initial_available_gpu_ids = self.map_str_list_to_int_list(device_initial_available_gpu_ids)
+        else:
+            return []
+
+        return device_initial_available_gpu_ids
 
     def get_device_total_num_gpus(self, device_id):
         device_total_num_gpus = None
@@ -241,6 +261,14 @@ def set_device_available_gpu_ids(self, device_id, gpu_ids):
             pass
 
         ComputeGpuDatabase.get_instance().set_device_available_gpu_ids(device_id, gpu_ids)
+    
+    def set_device_initial_available_gpu_ids(self, device_id, gpu_ids):
+        # Set the initial available GPU ids to the cache, use to check if the device all available GPU ids is changed
+        try:
+            str_gpu_ids = self.map_list_to_str(gpu_ids)
+            self.redis_connection.set(self.get_device_initial_available_gpu_ids_key(device_id), str_gpu_ids)
+        except Exception as e:
+            pass
 
     def set_device_total_num_gpus(self, device_id, num_gpus):
         try:
@@ -311,6 +339,9 @@ def get_device_run_gpu_ids_key(device_id, run_id):
 
     def get_device_available_gpu_ids_key(self, device_id):
         return f"{ComputeGpuCache.FEDML_GLOBAL_DEVICE_AVAILABLE_GPU_IDS_TAG}{device_id}"
+    
+    def get_device_initial_available_gpu_ids_key(self, device_id):
+        return f"{ComputeGpuCache.FEDML_GLOBAL_DEVICE_INITIAL_AVAILABLE_GPU_IDS_TAG}{device_id}"
 
     def get_device_total_num_gpus_key(self, device_id):
         return f"{ComputeGpuCache.FEDML_GLOBAL_DEVICE_TOTAL_NUM_GPUS_TAG}{device_id}"

From 30e1f70de68ed60efb38f0bf77083b97ac556949 Mon Sep 17 00:00:00 2001
From: charlieyl <charlie@tensoropera.com>
Date: Wed, 11 Dec 2024 11:32:47 +0800
Subject: [PATCH 239/282] [debug]add logs

---
 python/fedml/computing/scheduler/comm_utils/job_utils.py | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/python/fedml/computing/scheduler/comm_utils/job_utils.py b/python/fedml/computing/scheduler/comm_utils/job_utils.py
index 21becad68e..ff5a2dd478 100644
--- a/python/fedml/computing/scheduler/comm_utils/job_utils.py
+++ b/python/fedml/computing/scheduler/comm_utils/job_utils.py
@@ -102,7 +102,7 @@ def occupy_gpu_ids(self, run_id, request_gpu_num, device_id, inner_id=None,
                         
                         # check if the initial available GPU ids is changed
                         initial_available_gpu_ids = ComputeCacheManager.get_instance().get_gpu_cache().get_device_initial_available_gpu_ids(device_id)
-                        # logging.info(f"Initial available GPU Ids: {initial_available_gpu_ids}")
+                        logging.info(f"Initial available GPU Ids: {initial_available_gpu_ids}")
                         if initial_available_gpu_ids and (not all(gpu_id in available_gpu_ids for gpu_id in initial_available_gpu_ids)):
                             # then add the initial_available_gpu_ids to the available_gpu_ids
                             available_gpu_ids.extend(initial_available_gpu_ids)
@@ -334,6 +334,7 @@ def get_available_gpu_id_list(device_id):
                     ComputeCacheManager.get_instance().get_gpu_cache().set_device_available_gpu_ids(device_id, gpu_ids)
                     # Set the initial available GPU ids to the cache, use to check if the device all available GPU ids is changed because of the system resource change
                     ComputeCacheManager.get_instance().get_gpu_cache().set_device_initial_available_gpu_ids(device_id, gpu_ids)
+                    logging.info(f"Set device {device_id} initial available GPU ids: {gpu_ids}")
                     available_gpu_ids = gpu_ids
             return available_gpu_ids
 

From 21a374ce82e028153b31473d6e127637d43a322e Mon Sep 17 00:00:00 2001
From: charlieyl <charlie@tensoropera.com>
Date: Wed, 11 Dec 2024 12:50:05 +0800
Subject: [PATCH 240/282] [bugfix] Enhance GPU cache management by setting
 initial available GPU IDs when reset available gpus

---
 python/fedml/computing/scheduler/comm_utils/job_utils.py | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/python/fedml/computing/scheduler/comm_utils/job_utils.py b/python/fedml/computing/scheduler/comm_utils/job_utils.py
index ff5a2dd478..de25a8e342 100644
--- a/python/fedml/computing/scheduler/comm_utils/job_utils.py
+++ b/python/fedml/computing/scheduler/comm_utils/job_utils.py
@@ -353,6 +353,9 @@ def reset_available_gpu_id_list(device_id):
                 current_available_gpu_ids = JobRunnerUtils.get_realtime_gpu_available_ids().copy()
                 ComputeCacheManager.get_instance().get_gpu_cache().set_device_available_gpu_ids(device_id,
                                                                                                 current_available_gpu_ids)
+                # Set the initial available GPU ids to the cache, use to check if the device all available GPU ids is changed because of the system resource change
+                ComputeCacheManager.get_instance().get_gpu_cache().set_device_initial_available_gpu_ids(device_id, current_available_gpu_ids)
+                
                 gpu_list = sys_utils.get_gpu_list()
                 ComputeCacheManager.get_instance().get_gpu_cache().set_device_total_num_gpus(device_id, len(gpu_list))
         except Exception as e:

From 02b87f44facee475aeb4f933dd02a349e8f241df Mon Sep 17 00:00:00 2001
From: charlieyl <charlie@tensoropera.com>
Date: Wed, 11 Dec 2024 13:18:08 +0800
Subject: [PATCH 241/282] [bugfix]calculate the difference between
 realtime_available_gpu_ids and initial_available_gpu_ids

---
 .../scheduler/comm_utils/job_utils.py         | 29 +++++++++++--------
 1 file changed, 17 insertions(+), 12 deletions(-)

diff --git a/python/fedml/computing/scheduler/comm_utils/job_utils.py b/python/fedml/computing/scheduler/comm_utils/job_utils.py
index de25a8e342..654e7d2ea2 100644
--- a/python/fedml/computing/scheduler/comm_utils/job_utils.py
+++ b/python/fedml/computing/scheduler/comm_utils/job_utils.py
@@ -90,28 +90,33 @@ def occupy_gpu_ids(self, run_id, request_gpu_num, device_id, inner_id=None,
 
                     logging.info(f"Check worker({device_id})'s realtime gpu availability in DB"
                                  f" for run {run_id}: {available_gpu_ids}")
+                    
+                     # Get realtime GPU availability list from the system
+                    realtime_available_gpu_ids = JobRunnerUtils.get_realtime_gpu_available_ids().copy()
+                    logging.info(f"Cache not set yet, fetching realtime available GPU Ids: {realtime_available_gpu_ids}")
 
                     # If the available GPU list is not in the cache, set it to the current system available GPU list
                     if available_gpu_ids is None or available_gpu_ids == []:
                         # Get realtime GPU availability list from the system
-                        available_gpu_ids = JobRunnerUtils.get_realtime_gpu_available_ids().copy()
-                        logging.info(f"Cache not set yet, fetching realtime available GPU Ids: {available_gpu_ids}")
+                        available_gpu_ids = realtime_available_gpu_ids
                     else:
                         available_gpu_ids = JobRunnerUtils.trim_unavailable_gpu_ids(available_gpu_ids)
                         logging.info(f"Trimmed available GPU Ids: {available_gpu_ids}")
-                        
-                        # check if the initial available GPU ids is changed
-                        initial_available_gpu_ids = ComputeCacheManager.get_instance().get_gpu_cache().get_device_initial_available_gpu_ids(device_id)
-                        logging.info(f"Initial available GPU Ids: {initial_available_gpu_ids}")
-                        if initial_available_gpu_ids and (not all(gpu_id in available_gpu_ids for gpu_id in initial_available_gpu_ids)):
-                            # then add the initial_available_gpu_ids to the available_gpu_ids
-                            available_gpu_ids.extend(initial_available_gpu_ids)
+
+                        initial_available_gpu_ids = ComputeCacheManager.get_instance().get_gpu_cache().get_device_initial_available_gpu_ids(
+                            device_id)
+                        # calculate the difference between realtime_available_gpu_ids and initial_available_gpu_ids
+                        # if the difference is not empty, then add to available gpu ids
+                        diff_gpu_ids = list(set(realtime_available_gpu_ids) - set(initial_available_gpu_ids))
+                        if diff_gpu_ids:
+                            available_gpu_ids.extend(diff_gpu_ids)
                             available_gpu_ids = list(set(available_gpu_ids))
-                            # sort the available_gpu_ids
                             available_gpu_ids.sort()
                             logging.info(f"Device {device_id} available GPU ids is changed because of the system gpu resource change, "
-                                         f"initial available GPU ids: {initial_available_gpu_ids}, "
-                                         f"current available GPU ids: {available_gpu_ids}")
+                                         f"initial available gpu ids: {initial_available_gpu_ids}, "
+                                         f"realtime available gpu ids: {realtime_available_gpu_ids}, "
+                                         f"diff gpu ids: {diff_gpu_ids}, "
+                                         f"new available gpu ids: {available_gpu_ids}")
 
                     # Get the matched gpu ids string by the request gpu num
                     cuda_visible_gpu_ids_str, matched_gpu_num = JobRunnerUtils.request_gpu_ids(request_gpu_num,

From 9ff4a560776205abcf18cb3405ef599fc6060778 Mon Sep 17 00:00:00 2001
From: charlieyl <charlie@tensoropera.com>
Date: Wed, 11 Dec 2024 16:23:41 +0800
Subject: [PATCH 242/282] [bugfix]set shm_size to 8G if not specified

---
 python/fedml/computing/scheduler/comm_utils/job_utils.py      | 4 ++--
 .../scheduler/model_scheduler/device_model_deployment.py      | 4 ++++
 2 files changed, 6 insertions(+), 2 deletions(-)

diff --git a/python/fedml/computing/scheduler/comm_utils/job_utils.py b/python/fedml/computing/scheduler/comm_utils/job_utils.py
index 654e7d2ea2..b1ecd9b11b 100644
--- a/python/fedml/computing/scheduler/comm_utils/job_utils.py
+++ b/python/fedml/computing/scheduler/comm_utils/job_utils.py
@@ -96,7 +96,7 @@ def occupy_gpu_ids(self, run_id, request_gpu_num, device_id, inner_id=None,
                     logging.info(f"Cache not set yet, fetching realtime available GPU Ids: {realtime_available_gpu_ids}")
 
                     # If the available GPU list is not in the cache, set it to the current system available GPU list
-                    if available_gpu_ids is None or available_gpu_ids == []:
+                    if available_gpu_ids is None:
                         # Get realtime GPU availability list from the system
                         available_gpu_ids = realtime_available_gpu_ids
                     else:
@@ -333,7 +333,7 @@ def get_available_gpu_id_list(device_id):
                     device_id)
 
                 # If the available GPU list is not in the cache, set it to the current system available GPU list
-                if available_gpu_ids is None or available_gpu_ids == []:
+                if available_gpu_ids is None:
                     # Get realtime GPU availability list from the system
                     gpu_ids = JobRunnerUtils.get_realtime_gpu_available_ids().copy()
                     ComputeCacheManager.get_instance().get_gpu_cache().set_device_available_gpu_ids(device_id, gpu_ids)
diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py b/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py
index 2fbbbfcb0d..baee7a2973 100755
--- a/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py
@@ -398,6 +398,10 @@ def parse_resource_related_config(config, gpu_num_frm_platform=0):
         num_gpus = 0
 
     shm_size = config.get('shm_size', None)
+    # set shm_size to 8G if not specified
+    if not shm_size:
+        shm_size = "8G"
+
     storage_opt = config.get('storage_opt', None)
     tmpfs = config.get('tmpfs', None)
     cpus = config.get('cpus', None)

From d5831b956c5b6d2e60033e47559d21b668b41d25 Mon Sep 17 00:00:00 2001
From: charlieyl <charlie@tensoropera.com>
Date: Tue, 17 Dec 2024 17:10:45 +0800
Subject: [PATCH 243/282] Revert "Merge pull request #2233 from
 FedML-AI/charlie/dev/v0.7.0"


From bb31c93a66539608333c58867503b89acfebb6ae Mon Sep 17 00:00:00 2001
From: charlieyl <charlie@tensoropera.com>
Date: Wed, 18 Dec 2024 17:19:31 +0800
Subject: [PATCH 244/282] remove debug logs

---
 python/fedml/computing/scheduler/comm_utils/hardware_utils.py | 4 ++--
 python/fedml/computing/scheduler/comm_utils/sys_utils.py      | 1 -
 2 files changed, 2 insertions(+), 3 deletions(-)

diff --git a/python/fedml/computing/scheduler/comm_utils/hardware_utils.py b/python/fedml/computing/scheduler/comm_utils/hardware_utils.py
index f50f5d73f5..8e0763753f 100644
--- a/python/fedml/computing/scheduler/comm_utils/hardware_utils.py
+++ b/python/fedml/computing/scheduler/comm_utils/hardware_utils.py
@@ -57,7 +57,7 @@ def __get_util(cls) -> Optional[GPUCardUtil]:
     def get_gpus() -> List[GPUCard]:
         gpu_util = HardwareUtil.__get_util()
         cards = gpu_util.get_gpu_cards() if gpu_util is not None else []
-        logging.info(f"hardware_utils Available GPU cards len ---> { len(cards)}")
+        # logging.info(f"hardware_utils Available GPU cards len ---> { len(cards)}")
         return cards
 
     @staticmethod
@@ -65,7 +65,7 @@ def get_available_gpu_ids(order: str = "memory", limit: int = 1, max_load: float
                               max_memory: float = 0.01) -> List[int]:
         gpu_util = HardwareUtil.__get_util()
         card_ids = gpu_util.get_available_gpu_card_ids(order, limit, max_load, max_memory) if gpu_util is not None else []
-        logging.info(f"hardware_utils get_available_gpu_ids ids ---> {card_ids}, limit ---> {limit}")
+        # logging.info(f"hardware_utils get_available_gpu_ids ids ---> {card_ids}, limit ---> {limit}")
         return card_ids
 
     @staticmethod
diff --git a/python/fedml/computing/scheduler/comm_utils/sys_utils.py b/python/fedml/computing/scheduler/comm_utils/sys_utils.py
index fe518e4ba4..6dbef9bde3 100644
--- a/python/fedml/computing/scheduler/comm_utils/sys_utils.py
+++ b/python/fedml/computing/scheduler/comm_utils/sys_utils.py
@@ -191,7 +191,6 @@ def get_available_gpu_id_list(limit=1) -> List[int]:
 
     gpu_available_list = HardwareUtil.get_available_gpu_ids(order='memory', limit=limit, max_load=0.01,
                                                             max_memory=0.01)
-    logging.info(f"GPU available ids from HardwareUtil.get_available_gpu_ids, limit --> {limit}")
     return gpu_available_list
 
 

From e27b830a7080331ed8f46d4e9ff0ceea32d4be2c Mon Sep 17 00:00:00 2001
From: "alex.liang" <alexliang.kh@gmail.com>
Date: Fri, 20 Dec 2024 11:15:48 +0800
Subject: [PATCH 245/282] check the gpu avaiablity using the random api to
 adapte the rental gpus.

---
 python/fedml/__init__.py                                       | 2 +-
 .../computing/scheduler/comm_utils/gpu_utils/nvidia_utils.py   | 3 ++-
 .../computing/scheduler/slave/base_slave_protocol_manager.py   | 2 +-
 python/fedml/core/mlops/mlops_device_perfs.py                  | 2 +-
 python/setup.py                                                | 2 +-
 5 files changed, 6 insertions(+), 5 deletions(-)

diff --git a/python/fedml/__init__.py b/python/fedml/__init__.py
index d6341c25c9..677d06b4e5 100644
--- a/python/fedml/__init__.py
+++ b/python/fedml/__init__.py
@@ -37,7 +37,7 @@
 _global_training_type = None
 _global_comm_backend = None
 
-__version__ = "0.9.0"
+__version__ = "0.9.2"
 
 
 # This is the deployment environment used for different roles (RD/PM/BD/Public Developers). Potential VALUE: local, dev, test, release
diff --git a/python/fedml/computing/scheduler/comm_utils/gpu_utils/nvidia_utils.py b/python/fedml/computing/scheduler/comm_utils/gpu_utils/nvidia_utils.py
index 34d0c3be1c..a6717de8cb 100644
--- a/python/fedml/computing/scheduler/comm_utils/gpu_utils/nvidia_utils.py
+++ b/python/fedml/computing/scheduler/comm_utils/gpu_utils/nvidia_utils.py
@@ -25,7 +25,8 @@ def get_gpu_cards() -> List[GPUCard]:
 
     @staticmethod
     def get_available_gpu_card_ids(order: str, limit: int, max_load: float, max_memory: float) -> List[int]:
-        return GPUtil.getAvailable(order=order, limit=limit, maxLoad=max_load, maxMemory=max_memory)
+        # return GPUtil.getAvailable(order=order, limit=limit, maxLoad=max_load, maxMemory=max_memory)
+        return GPUtil.getAvailable(order='random', limit=limit)
 
     @staticmethod
     def get_docker_gpu_device_mapping(gpu_ids: List[int], num_gpus: int = 0) -> Optional[Dict]:
diff --git a/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py b/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py
index 9ba8c26e15..3acd9f2488 100755
--- a/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py
+++ b/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py
@@ -321,7 +321,7 @@ def callback_report_device_info(self, topic, payload):
         run_id = payload_json.get("run_id", 0)
         listen_edge_id = str(topic).split("/")[-1]
         context = payload_json.get("context", None)
-        need_gpu_info = payload_json.get("need_gpu_info", False)
+        need_gpu_info = payload_json.get("need_gpu_info", True)
         need_running_process_list = payload_json.get("need_running_process_list", False)
         model_master_device_id = payload_json.get("model_master_device_id", None)
         model_slave_device_id_list = payload_json.get("model_slave_device_id_list", None)
diff --git a/python/fedml/core/mlops/mlops_device_perfs.py b/python/fedml/core/mlops/mlops_device_perfs.py
index 4bb41df73f..0c2bde6785 100644
--- a/python/fedml/core/mlops/mlops_device_perfs.py
+++ b/python/fedml/core/mlops/mlops_device_perfs.py
@@ -233,7 +233,7 @@ def report_gpu_device_info(edge_id, mqtt_mgr=None):
         # Do not use the following two lines as the realtime available gpu ids.
         # gpu_available_ids = JobRunnerUtils.get_available_gpu_id_list(edge_id)
         # gpu_available_ids = JobRunnerUtils.trim_unavailable_gpu_ids(gpu_available_ids)
-        gpu_cores_available = len(gpu_available_ids)
+        gpu_cores_available = len(gpu_available_ids) if gpu_available_ids is not None else 0
         deploy_worker_id_list = list()
         try:
             deploy_worker_id_list = json.loads(os.environ.get("FEDML_DEPLOY_WORKER_IDS", "[]"))
diff --git a/python/setup.py b/python/setup.py
index 3847f360fa..032bdb4eed 100644
--- a/python/setup.py
+++ b/python/setup.py
@@ -126,7 +126,7 @@ def finalize_options(self):
 
 setup(
     name="fedml",
-    version="0.9.0",
+    version="0.9.2",
     author="FedML Team",
     author_email="ch@fedml.ai",
     description="A research and production integrated edge-cloud library for "

From 77c69066507aac2ba791e239b63dd3dd3c32ecd9 Mon Sep 17 00:00:00 2001
From: charlieyl <charlie@tensoropera.com>
Date: Fri, 20 Dec 2024 12:41:41 +0800
Subject: [PATCH 246/282] [bugfix]Adapt log method(in transformers/trainer.py)
 parameters

---
 python/fedml/train/llm/hf_trainer.py | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/python/fedml/train/llm/hf_trainer.py b/python/fedml/train/llm/hf_trainer.py
index aaf27340fe..e287a8518f 100644
--- a/python/fedml/train/llm/hf_trainer.py
+++ b/python/fedml/train/llm/hf_trainer.py
@@ -64,7 +64,7 @@ def __init__(
         if TYPE_CHECKING:
             self.args: ExperimentArguments = self.args  # noqa
 
-    def log(self, logs: Dict[str, float]) -> None:
+    def log(self, logs: Dict[str, float], start_time: Optional[float] = None) -> None:
         # Adapted from https://github.com/huggingface/transformers/blob/b71f20a7c9f3716d30f6738501559acf863e2c5c/examples/pytorch/language-modeling/run_clm.py#L630-L634
         # compute perplexity
         for key in tuple(logs.keys()):
@@ -77,7 +77,7 @@ def log(self, logs: Dict[str, float]) -> None:
                     perplexity = math.inf
                 logs[f"{prefix}perplexity"] = perplexity
 
-        super().log(logs)
+        super().log(logs, start_time)
 
     def has_callback(self, callback: Union[Type[TrainerCallback], TrainerCallback]) -> bool:
         # Adapted from https://github.com/huggingface/transformers/blob/a7da2996a00c0ea083012ac86ab70f0bc4799f33/src/transformers/trainer_callback.py#L332

From 74803feed153298eec990347b48fa66203fd936f Mon Sep 17 00:00:00 2001
From: charlieyl <charlie@tensoropera.com>
Date: Fri, 20 Dec 2024 14:48:50 +0800
Subject: [PATCH 247/282] [bugfix]Add the. zip suffix to the s3 key of the
 model card

---
 .../computing/scheduler/model_scheduler/device_model_cards.py   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_cards.py b/python/fedml/computing/scheduler/model_scheduler/device_model_cards.py
index c2f11a2917..8697d0a62c 100755
--- a/python/fedml/computing/scheduler/model_scheduler/device_model_cards.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_model_cards.py
@@ -784,7 +784,7 @@ def push_model_to_s3(self, model_name, model_zip_path, user_id, show_progress=Tr
         args = {"config_version": self.config_version}
         _, s3_config = ModelOpsConfigs.get_instance(args).fetch_configs(self.config_version)
         s3_storage = S3Storage(s3_config)
-        model_dst_key = "{}@{}@{}".format(user_id, model_name, str(uuid.uuid4()))
+        model_dst_key = "{}@{}@{}.zip".format(user_id, model_name, str(uuid.uuid4()))
         model_storage_url = s3_storage.upload_file_with_progress(model_zip_path, model_dst_key,
                                                                  show_progress=show_progress,
                                                                  out_progress_to_err=True,

From 93f9760de9dbbc389889942b6ba1f148c03aa9ff Mon Sep 17 00:00:00 2001
From: charlieyl <charlie@tensoropera.com>
Date: Fri, 20 Dec 2024 16:39:45 +0800
Subject: [PATCH 248/282] =?UTF-8?q?[update]Upgrade=20official=20website=20?=
 =?UTF-8?q?address:=20https://tensoropera.ai=20,=20an=20the=20brand:=20Ten?=
 =?UTF-8?q?sorOpera=20=C2=AE=20=20AI?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

---
 python/README.md                              |  4 +-
 python/examples/README.md                     | 14 +++----
 .../examples/deploy/complex_example/README.md |  2 +-
 .../deploy/complex_example/config.yaml        |  2 +-
 python/examples/deploy/mnist/README.md        |  4 +-
 python/examples/deploy/mnist/mnist.yaml       |  2 +-
 .../examples/deploy/multi_service/README.md   |  6 +--
 python/examples/deploy/quick_start/README.md  |  2 +-
 .../deploy/scalellm-multi-engine/README.md    |  2 +-
 python/examples/deploy/scalellm/README.md     |  2 +-
 .../deploy/streaming_response/README.md       |  2 +-
 .../deploy/streaming_response/config.yaml     |  2 +-
 python/examples/deploy/triton/README.md       |  6 +--
 python/examples/deploy/your_own_llm/README.md |  4 +-
 python/examples/deploy/your_own_llm/llm.yaml  |  2 +-
 .../grpc_docker_fedmlai/README.md             |  4 +-
 python/examples/launch/README.md              |  4 +-
 .../launch/federate_build_package/README.md   |  2 +-
 .../launch/train_build_package/README.md      |  2 +-
 python/examples/train/README.md               |  2 +-
 python/examples/train/llm_train/job.yaml      |  2 +-
 python/fedml/api/constants.py                 |  2 +-
 python/fedml/api/modules/build.py             |  4 +-
 python/fedml/api/modules/device.py            |  6 +--
 python/fedml/api/modules/model.py             |  4 +-
 python/fedml/api/modules/utils.py             |  2 +-
 python/fedml/cli/README.md                    | 20 +++++-----
 python/fedml/cli/modules/build.py             |  4 +-
 python/fedml/cli/modules/cluster.py           |  8 ++--
 python/fedml/cli/modules/device.py            | 18 ++++-----
 python/fedml/cli/modules/federate.py          |  4 +-
 python/fedml/cli/modules/launch.py            |  4 +-
 python/fedml/cli/modules/login.py             | 12 +++---
 python/fedml/cli/modules/logout.py            |  4 +-
 python/fedml/cli/modules/run.py               | 40 +++++++++----------
 python/fedml/cli/modules/storage.py           | 16 ++++----
 python/fedml/cli/modules/train.py             |  4 +-
 .../computing/scheduler/env/collect_env.py    |  2 +-
 .../scheduler_core/account_manager.py         |  2 +-
 .../scheduler/scheduler_entry/README.md       |  4 +-
 .../scheduler/scheduler_entry/app_manager.py  |  4 +-
 python/fedml/core/mlops/__init__.py           |  8 ++--
 .../serving/templates/hf_template/config.yaml |  2 +-
 .../customized_job_example/README.md          |  4 +-
 python/spotlight_prj/unitedllm/README.md      |  2 +-
 45 files changed, 126 insertions(+), 126 deletions(-)

diff --git a/python/README.md b/python/README.md
index ad85023e6d..1e30f2b167 100644
--- a/python/README.md
+++ b/python/README.md
@@ -43,5 +43,5 @@ Other low-level APIs related to security and privacy are also supported. All alg
 
 **utils**: Common utilities shared by other modules.
 
-## About FedML, Inc.
-https://FedML.ai
+## About TensorOpera, Inc.
+https://tensoropera.ai
diff --git a/python/examples/README.md b/python/examples/README.md
index 45086c27cf..32831a63f3 100644
--- a/python/examples/README.md
+++ b/python/examples/README.md
@@ -2,14 +2,14 @@
 # FEDML Examples (Including Prebuilt Jobs in Jobs Store)
 
 - `FedML/python/examples` -- examples for training, deployment, and federated learning
-  - `FedML/python/examples/launch` -- examples for FEDML®Launch
-  - `FedML/python/examples/serving` -- examples for FEDML®Deploy
-  - `FedML/python/examples/train` -- examples for FEDML®Train
-  - `FedML/python/examples/cross_cloud` -- examples for FEDML®Train cross-cloud distributed training
+  - `FedML/python/examples/launch` -- examples for TensorOpera®Launch
+  - `FedML/python/examples/serving` -- examples for TensorOpera®Deploy
+  - `FedML/python/examples/train` -- examples for TensorOpera®Train
+  - `FedML/python/examples/cross_cloud` -- examples for TensorOpera®Train cross-cloud distributed training
   - `FedML/python/examples/federate/prebuilt_jobs` -- examples for federated learning prebuilt jobs (FedCV, FedNLP, FedGraphNN, Healthcare, etc.)
   - `FedML/python/examples/federate/cross_silo` -- examples for cross-silo federated learning
   - `FedML/python/examples/federate/cross_device` -- examples for cross-device federated learning
   - `FedML/python/examples/federate/simulation` -- examples for federated learning simulation
-  - `FedML/python/examples/federate/security` -- examples for FEDML®Federate security related features
-  - `FedML/python/examples/federate/privacy` -- examples for FEDML®Federate privacy related features
-  - `FedML/python/examples/federate/federated_analytics` -- examples for FEDML®Federate federated analytics (FA)
+  - `FedML/python/examples/federate/security` -- examples for TensorOpera®Federate security related features
+  - `FedML/python/examples/federate/privacy` -- examples for TensorOpera®Federate privacy related features
+  - `FedML/python/examples/federate/federated_analytics` -- examples for TensorOpera®Federate federated analytics (FA)
diff --git a/python/examples/deploy/complex_example/README.md b/python/examples/deploy/complex_example/README.md
index 1f67f587fd..b7a03aeea6 100644
--- a/python/examples/deploy/complex_example/README.md
+++ b/python/examples/deploy/complex_example/README.md
@@ -16,7 +16,7 @@ Use -cf to indicate the configuration file.
     curl -XPOST localhost:2345/predict -d '{"text": "Hello"}'
     ```
 
-## Option 2: Deploy to the Cloud (Using fedml®launch platform)
+## Option 2: Deploy to the Cloud (Using TensorOpera®launch platform)
 - Uncomment the following line in config.yaml
 
     For information about the configuration, please refer to fedml ® launch.
diff --git a/python/examples/deploy/complex_example/config.yaml b/python/examples/deploy/complex_example/config.yaml
index 037183a066..cd658aae33 100644
--- a/python/examples/deploy/complex_example/config.yaml
+++ b/python/examples/deploy/complex_example/config.yaml
@@ -15,7 +15,7 @@ environment_variables:
   LOCAL_RANK: "0"
 
 # If you do not have any GPU resource but want to serve the model
-# Try FedML® Nexus AI Platform, and Uncomment the following lines.
+# Try TensorOpera® Nexus AI Platform, and Uncomment the following lines.
 # ------------------------------------------------------------
 computing:
   minimum_num_gpus: 1           # minimum # of GPUs to provision
diff --git a/python/examples/deploy/mnist/README.md b/python/examples/deploy/mnist/README.md
index 11dd696234..b64b4bd70e 100644
--- a/python/examples/deploy/mnist/README.md
+++ b/python/examples/deploy/mnist/README.md
@@ -11,9 +11,9 @@ curl -XPOST localhost:2345/predict -d '{"arr":[$DATA]}'
 #For $DATA, please check the request_input_example, it is a 28*28=784 float array
 #Output:{"generated_text":"tensor([0.2333, 0.5296, 0.4350, 0.4537, 0.5424, 0.4583, 0.4803, 0.2862, 0.5507,\n        0.8683], grad_fn=<SigmoidBackward0>)"}
 ```
-## Option 2: Deploy to the Cloud (Using fedml® launch platform)
+## Option 2: Deploy to the Cloud (Using TensorOpera® launch platform)
 Uncomment the following line in mnist.yaml,
-for infomation about the configuration, please refer to fedml® launch.
+for infomation about the configuration, please refer to TensorOpera® launch.
 ```yaml
 # computing:
 #   minimum_num_gpus: 1
diff --git a/python/examples/deploy/mnist/mnist.yaml b/python/examples/deploy/mnist/mnist.yaml
index fe419abb1c..cae8050674 100644
--- a/python/examples/deploy/mnist/mnist.yaml
+++ b/python/examples/deploy/mnist/mnist.yaml
@@ -5,7 +5,7 @@ data_cache_dir: ""
 bootstrap: ""
 
 # If you do not have any GPU resource but want to serve the model
-# Try FedML® Nexus AI Platform, and Uncomment the following lines.
+# Try TensorOpera® Nexus AI Platform, and Uncomment the following lines.
 # ------------------------------------------------------------
 computing:
  minimum_num_gpus: 1           # minimum # of GPUs to provision
diff --git a/python/examples/deploy/multi_service/README.md b/python/examples/deploy/multi_service/README.md
index 2b897d087a..59bd7429f3 100644
--- a/python/examples/deploy/multi_service/README.md
+++ b/python/examples/deploy/multi_service/README.md
@@ -15,7 +15,7 @@ fedml model create --name $model_name --config_file config.yaml
 ```
 
 ## On-premsie Deploy
-Register an account on FedML website: https://fedml.ai
+Register an account on TensorOpera website: https://tensoropera.ai
 
 You will have a user id and api key, which can be found in the profile page.
 
@@ -44,8 +44,8 @@ You will have a user id and api key, which can be found in the profile page.
   ```
  - Result
     
-    See the deployment result in https://fedml.ai
+    See the deployment result in https://tensoropera.ai
 
 - OPT2: Deploy - UI
     
-    Follow the instructions on https://fedml.ai
+    Follow the instructions on https://tensoropera.ai
diff --git a/python/examples/deploy/quick_start/README.md b/python/examples/deploy/quick_start/README.md
index 1f67f587fd..b7a03aeea6 100644
--- a/python/examples/deploy/quick_start/README.md
+++ b/python/examples/deploy/quick_start/README.md
@@ -16,7 +16,7 @@ Use -cf to indicate the configuration file.
     curl -XPOST localhost:2345/predict -d '{"text": "Hello"}'
     ```
 
-## Option 2: Deploy to the Cloud (Using fedml®launch platform)
+## Option 2: Deploy to the Cloud (Using TensorOpera®launch platform)
 - Uncomment the following line in config.yaml
 
     For information about the configuration, please refer to fedml ® launch.
diff --git a/python/examples/deploy/scalellm-multi-engine/README.md b/python/examples/deploy/scalellm-multi-engine/README.md
index 4de6058c95..b65ad7dd5c 100644
--- a/python/examples/deploy/scalellm-multi-engine/README.md
+++ b/python/examples/deploy/scalellm-multi-engine/README.md
@@ -40,7 +40,7 @@ computing:
   #device_type: CPU              # options: GPU, CPU, hybrid
   resource_type: A100-80G       # e.g., A100-80G,
   # please check the resource type list by "fedml show-resource-type"
-  # or visiting URL: https://fedml.ai/accelerator_resource_type
+  # or visiting URL: https://tensoropera.ai/accelerator_resource_type
 ```
 
 ```bash
diff --git a/python/examples/deploy/scalellm/README.md b/python/examples/deploy/scalellm/README.md
index 4de6058c95..b65ad7dd5c 100644
--- a/python/examples/deploy/scalellm/README.md
+++ b/python/examples/deploy/scalellm/README.md
@@ -40,7 +40,7 @@ computing:
   #device_type: CPU              # options: GPU, CPU, hybrid
   resource_type: A100-80G       # e.g., A100-80G,
   # please check the resource type list by "fedml show-resource-type"
-  # or visiting URL: https://fedml.ai/accelerator_resource_type
+  # or visiting URL: https://tensoropera.ai/accelerator_resource_type
 ```
 
 ```bash
diff --git a/python/examples/deploy/streaming_response/README.md b/python/examples/deploy/streaming_response/README.md
index f91cda5278..b190b50dc7 100644
--- a/python/examples/deploy/streaming_response/README.md
+++ b/python/examples/deploy/streaming_response/README.md
@@ -16,7 +16,7 @@ Use -cf to indicate the configuration file.
     curl -XPOST localhost:2345/predict -d '{"text": "Hello"}'
     ```
 
-## Option 2: Deploy to the Cloud (Using fedml®launch platform)
+## Option 2: Deploy to the Cloud (Using TensorOpera®launch platform)
 - Uncomment the following line in config.yaml
 
     For information about the configuration, please refer to fedml ® launch.
diff --git a/python/examples/deploy/streaming_response/config.yaml b/python/examples/deploy/streaming_response/config.yaml
index 83479068e6..1a18b9d85b 100644
--- a/python/examples/deploy/streaming_response/config.yaml
+++ b/python/examples/deploy/streaming_response/config.yaml
@@ -8,7 +8,7 @@ bootstrap: |
   echo "Bootstrap finished"
 
 # If you do not have any GPU resource but want to serve the model
-# Try FedML® Nexus AI Platform, and Uncomment the following lines.
+# Try TensorOpera® Nexus AI Platform, and Uncomment the following lines.
 # ------------------------------------------------------------
 computing:
   minimum_num_gpus: 1           # minimum # of GPUs to provision
diff --git a/python/examples/deploy/triton/README.md b/python/examples/deploy/triton/README.md
index 4d861fb7ff..5430939d28 100644
--- a/python/examples/deploy/triton/README.md
+++ b/python/examples/deploy/triton/README.md
@@ -39,7 +39,7 @@ fedml model create --name $model_name --config_file config.yaml
 ```
 
 ## On-premsie Deploy
-Register an account on FedML website: https://fedml.ai
+Register an account on TensorOpera website: https://tensoropera.ai
 
 You will have a user id and api key, which can be found in the profile page.
 
@@ -68,8 +68,8 @@ You will have a user id and api key, which can be found in the profile page.
   ```
  - Result
     
-    See the deployment result in https://fedml.ai
+    See the deployment result in https://tensoropera.ai
 
 - OPT2: Deploy - UI
     
-    Follow the instructions on https://fedml.ai
+    Follow the instructions on https://tensoropera.ai
diff --git a/python/examples/deploy/your_own_llm/README.md b/python/examples/deploy/your_own_llm/README.md
index fc7234293b..415db7fe92 100644
--- a/python/examples/deploy/your_own_llm/README.md
+++ b/python/examples/deploy/your_own_llm/README.md
@@ -9,9 +9,9 @@ fedml model deploy --name llm --local
 #INFO:     Uvicorn running on http://0.0.0.0:2345 (Press CTRL+C to quit)
 curl -XPOST localhost:2345/predict -d '{"text": "Hello"}'
 ```
-## Option 2: Deploy to the Cloud (Using fedml®launch platform)
+## Option 2: Deploy to the Cloud (Using TensorOpera®launch platform)
 Uncomment the following line in llm.yaml,
-for infomation about the configuration, please refer to fedml®launch.
+for infomation about the configuration, please refer to TensorOpera®launch.
 ```yaml
 # computing:
 #   minimum_num_gpus: 1
diff --git a/python/examples/deploy/your_own_llm/llm.yaml b/python/examples/deploy/your_own_llm/llm.yaml
index 5e5e09730b..b3b3d5da15 100644
--- a/python/examples/deploy/your_own_llm/llm.yaml
+++ b/python/examples/deploy/your_own_llm/llm.yaml
@@ -11,7 +11,7 @@ bootstrap: |
   echo "Bootstrap finished"
 
 # If you do not have any GPU resource but want to serve the model
-# Try FedML® Nexus AI Platform, and Uncomment the following lines.
+# Try TensorOpera® Nexus AI Platform, and Uncomment the following lines.
 # ------------------------------------------------------------
 # computing:
 #   minimum_num_gpus: 1           # minimum # of GPUs to provision
diff --git a/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/grpc_docker_fedmlai/README.md b/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/grpc_docker_fedmlai/README.md
index 174309aa55..8c56622d06 100644
--- a/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/grpc_docker_fedmlai/README.md
+++ b/python/examples/federate/cross_silo/grpc_fedavg_mnist_lr_example/grpc_docker_fedmlai/README.md
@@ -1,6 +1,6 @@
 
 # Introduction
-In this working example, we will run 1 aggregation server and 2 clients on the same machine using Docker + gRPC and we will use the FEDML.ai platform to run the FL job. 
+In this working example, we will run 1 aggregation server and 2 clients on the same machine using Docker + gRPC and we will use the TensorOpera.ai platform to run the FL job. 
 
 # gRPC Configuration File
 The content of the gRPC configuration file is as follows:
@@ -47,5 +47,5 @@ source /fedml/bin/activate
 fedml login -c <FEDML_API_KEY>
 ```
 
-Then we only need to compile our job and submit to our dockerb-based cluster as it is also discussed in detail in the official FEDML documentation: https://fedml.ai/octopus/userGuides
+Then we only need to compile our job and submit to our dockerb-based cluster as it is also discussed in detail in the official TensorOpera documentation: https://tensoropera.ai/octopus/userGuides
 
diff --git a/python/examples/launch/README.md b/python/examples/launch/README.md
index 1ded267276..fc79cbfe26 100644
--- a/python/examples/launch/README.md
+++ b/python/examples/launch/README.md
@@ -132,7 +132,7 @@ You just need to customize the following config items.
 
 3. `bootstrap`, It is the bootstrap shell command which will be executed before running entry commands.
 
-Then you can use the following example CLI to launch the job at FedML® Nexus AI Platform
+Then you can use the following example CLI to launch the job at TensorOpera® Nexus AI Platform
 (Replace $YourApiKey with your own account API key from open.fedml.ai)
 
 Example:
@@ -142,7 +142,7 @@ fedml launch hello_job.yaml
 
 After the launch CLI is executed, the output is as follows. Here you may open the job url to confirm and actually start the job.
 ```
-Submitting your job to FedML® Nexus AI Platform: 100%|████████████████████████████████████████████████████████████████████████████████████████| 6.07k/6.07k [00:01<00:00, 4.94kB/s]
+Submitting your job to TensorOpera® Nexus AI Platform: 100%|████████████████████████████████████████████████████████████████████████████████████████| 6.07k/6.07k [00:01<00:00, 4.94kB/s]
 
 Searched and matched the following GPU resource for your job:
 +-----------+-------------------+---------+------------+-------------------------+---------+-------+----------+
diff --git a/python/examples/launch/federate_build_package/README.md b/python/examples/launch/federate_build_package/README.md
index c0d3356150..325258407e 100644
--- a/python/examples/launch/federate_build_package/README.md
+++ b/python/examples/launch/federate_build_package/README.md
@@ -3,7 +3,7 @@
 ```
 Usage: fedml federate build [OPTIONS] [YAML_FILE]
 
-  Build federate packages for the FedML® Nexus AI Platform.
+  Build federate packages for the TensorOpera® Nexus AI Platform.
 
 Options:
   -h, --help              Show this message and exit.
diff --git a/python/examples/launch/train_build_package/README.md b/python/examples/launch/train_build_package/README.md
index 03c8dbe71b..f0f1dff857 100644
--- a/python/examples/launch/train_build_package/README.md
+++ b/python/examples/launch/train_build_package/README.md
@@ -3,7 +3,7 @@
 ```
 Usage: fedml train build [OPTIONS] [YAML_FILE]
 
-  Build training packages for the FedML® Nexus AI Platform.
+  Build training packages for the TensorOpera® Nexus AI Platform.
 
 Options:
   -h, --help              Show this message and exit.
diff --git a/python/examples/train/README.md b/python/examples/train/README.md
index 9a6853d740..0e301c86b2 100644
--- a/python/examples/train/README.md
+++ b/python/examples/train/README.md
@@ -1 +1 @@
-# Examples (Prebuilt Jobs) for FEDML®Train
\ No newline at end of file
+# Examples (Prebuilt Jobs) for TensorOpera®Train
\ No newline at end of file
diff --git a/python/examples/train/llm_train/job.yaml b/python/examples/train/llm_train/job.yaml
index d1ba08ed4c..a9e81c91f7 100644
--- a/python/examples/train/llm_train/job.yaml
+++ b/python/examples/train/llm_train/job.yaml
@@ -44,4 +44,4 @@ computing:
 
   allow_cross_cloud_resources: false # true, false
   device_type: GPU              # options: GPU, CPU, hybrid
-  resource_type: A100-80G       # e.g., A100-80G, please check the resource type list by "fedml show-resource-type" or visiting URL: https://fedml.ai/accelerator_resource_type
+  resource_type: A100-80G       # e.g., A100-80G, please check the resource type list by "fedml show-resource-type" or visiting URL: https://tensoropera.ai/accelerator_resource_type
diff --git a/python/fedml/api/constants.py b/python/fedml/api/constants.py
index b284d7a056..313da61798 100755
--- a/python/fedml/api/constants.py
+++ b/python/fedml/api/constants.py
@@ -18,7 +18,7 @@ class ApiConstants:
 
     RESOURCE_MATCHED_STATUS_BIND_CREDIT_CARD_FIRST = \
         """
-        Before we can start a job, please add a credit card to your FEDML account at https://fedml.ai/billing/home.
+        Before we can start a job, please add a credit card to your FEDML account at https://tensoropera.ai/billing.
         Once it's added, please try to run the launch command again
         """
 
diff --git a/python/fedml/api/modules/build.py b/python/fedml/api/modules/build.py
index 7d23bc02ed..9299944bb0 100644
--- a/python/fedml/api/modules/build.py
+++ b/python/fedml/api/modules/build.py
@@ -22,7 +22,7 @@ def build(platform, type, source_folder, entry_point, config_folder, dest_folder
 
     if type == "client" or type == "server":
         click.echo(
-            "Now, you are building the fedml packages which will be used in the FedML® Nexus AI Platform "
+            "Now, you are building the fedml packages which will be used in the TensorOpera® Nexus AI Platform "
             "platform."
         )
         click.echo(
@@ -34,7 +34,7 @@ def build(platform, type, source_folder, entry_point, config_folder, dest_folder
             + "."
         )
         click.echo(
-            "Then you may upload the packages on the configuration page in the FedML® Nexus AI Platform to "
+            "Then you may upload the packages on the configuration page in the TensorOpera® Nexus AI Platform to "
             "start your training flow."
         )
         click.echo("Building...")
diff --git a/python/fedml/api/modules/device.py b/python/fedml/api/modules/device.py
index 27b2d0d198..1b578aa903 100644
--- a/python/fedml/api/modules/device.py
+++ b/python/fedml/api/modules/device.py
@@ -78,7 +78,7 @@ def _bind(
     else:
         docker_install_url = "https://docs.docker.com/engine/install/"
         docker_config_text = " Moreover, you need to config the docker engine to run as a non-root user. Here is the docs. https://docs.docker.com/engine/install/linux-postinstall/"
-    print("\n Welcome to FedML.ai! \n Start to login the current device to the FedML® Nexus AI Platform\n")
+    print("\n Welcome toTensorOpera.ai! \n Start to login the current device to the TensorOpera® Nexus AI Platform\n")
     print(" If you want to deploy models into this computer, you need to install the docker engine to serve your models.")
     print(f" Here is the docs for installation docker engine. {docker_install_url}")
     if docker_config_text is not None:
@@ -137,7 +137,7 @@ def _bind(
         client_daemon_cmd = "client_daemon.py"
         client_daemon_pids = RunProcessUtils.get_pid_from_cmd_line(client_daemon_cmd)
         if client_daemon_pids is not None and len(client_daemon_pids) > 0:
-            print("Your computer has been logged into the FedML® Nexus AI Platform. "
+            print("Your computer has been logged into the TensorOpera® Nexus AI Platform. "
                   "Before logging in again, please log out of the previous login using the command "
                   "'fedml logout -c'. If it still doesn't work, run the command 'fedml logout -c' "
                   "using your computer's administrator account.")
@@ -193,7 +193,7 @@ def _bind(
         server_daemon_cmd = "server_daemon.py"
         server_daemon_pids = RunProcessUtils.get_pid_from_cmd_line(server_daemon_cmd)
         if server_daemon_pids is not None and len(server_daemon_pids) > 0:
-            print("Your computer has been logged into the FedML® Nexus AI Platform. "
+            print("Your computer has been logged into the TensorOpera® Nexus AI Platform. "
                   "Before logging in again, please log out of the previous login using the command "
                   "'fedml logout -s'. If it still doesn't work, run the command 'fedml logout -s' "
                   "using your computer's administrator account.")
diff --git a/python/fedml/api/modules/model.py b/python/fedml/api/modules/model.py
index a02e674f47..3b4a7afd0b 100644
--- a/python/fedml/api/modules/model.py
+++ b/python/fedml/api/modules/model.py
@@ -252,9 +252,9 @@ def deploy(name: str, endpoint_name: str = "", endpoint_id: str = None, local: b
             return FedMLModelCards.get_instance().serve_model_on_premise(
                 name, endpoint_name, master_ids, worker_ids, use_remote, endpoint_id)
         else:
-            # FedML® Launch deploy mode
+            # TensorOpera® Launch deploy mode
             click.echo("Warning: You did not indicate the master device id and worker device id\n\
-                       Do you want to use FedML® Nexus AI Platform to find GPU Resources deploy your model?")
+                       Do you want to use TensorOpera® Nexus AI Platform to find GPU Resources deploy your model?")
             answer = click.prompt("Please input your answer: (y/n)")
             if answer == "y" or answer == "Y":
                 api_key = get_api_key()
diff --git a/python/fedml/api/modules/utils.py b/python/fedml/api/modules/utils.py
index 76801ffe81..abbea71f9f 100644
--- a/python/fedml/api/modules/utils.py
+++ b/python/fedml/api/modules/utils.py
@@ -21,7 +21,7 @@ def _check_api_key(api_key=None):
     if api_key is None or api_key == "":
         saved_api_key = get_api_key()
         if saved_api_key is None or saved_api_key == "":
-            api_key = click.prompt("FedML® Launch API Key is not set yet, please input your API key")
+            api_key = click.prompt("TensorOpera® Launch API Key is not set yet, please input your API key")
         else:
             api_key = saved_api_key
 
diff --git a/python/fedml/cli/README.md b/python/fedml/cli/README.md
index f94200f258..425bf0c5de 100644
--- a/python/fedml/cli/README.md
+++ b/python/fedml/cli/README.md
@@ -27,7 +27,7 @@ fedml build \
     --ignore __pycache__,*.git
 ```
 
-## 2. Login into the FedML® Nexus AI Platform (fedml.ai)
+## 2. Login into the TensorOpera® Nexus AI Platform (fedml.ai)
 login as general computing device with local pip mode:
 ```
 fedml login $YourApiKey
@@ -38,7 +38,7 @@ login as federated-learning server with local pip mode:
 fedml login $YourApiKey -s
 ```
 
-### 2.1. Examples for Logining into the FedML® Nexus AI Platform (fedml.ai)
+### 2.1. Examples for Logining into the TensorOpera® Nexus AI Platform (fedml.ai)
 
 ```
 fedml login 113343dad999933 
@@ -48,7 +48,7 @@ fedml login 113343dad999933
 fedml login 113343dad999933 -s
 ```
 
-## 3. Logout from the FedML FedML® Nexus AI Platform (fedml.ai)
+## 3. Logout from the FedML TensorOpera® Nexus AI Platform (fedml.ai)
 logout from computing device with local pip mode:
 ```
 fedml logout 
@@ -81,17 +81,17 @@ fedml diagnosis --open --s3 --mqtt
 ```
 
 ## 7. Jobs
-Start a job at FedML® Nexus AI Platform
+Start a job at TensorOpera® Nexus AI Platform
 ```
 Usage: fedml jobs start [OPTIONS]
 
-Start a job at FedML® Nexus AI Platform
+Start a job at TensorOpera® Nexus AI Platform
 
 Options:
 -pf, --platform TEXT           The platform name at the MLOps platform(options: octopus, parrot, spider, beehive).
--prj, --project_name TEXT      The project name at FedML® Nexus AI Platform
--app, --application_name TEXT  Application name in the My Application list at FedML® Nexus AI Platform
--jn, --job_name TEXT           The job name at FedML® Nexus AI Platform If you don't specify here, the job name from the job yaml file will be used.
+-prj, --project_name TEXT      The project name at TensorOpera® Nexus AI Platform
+-app, --application_name TEXT  Application name in the My Application list at TensorOpera® Nexus AI Platform
+-jn, --job_name TEXT           The job name at TensorOpera® Nexus AI Platform If you don't specify here, the job name from the job yaml file will be used.
 -ds, --devices_server TEXT     The server to run the launching job, for the launch platform, we do not need to set this option.
 -de, --devices_edges TEXT      The edge devices to run the launching job. Separated with ',', e.g. 705,704. For the launch platform, we do not need to set this option.
 -u, --user TEXT                user id or api key.
@@ -238,7 +238,7 @@ You just need to customize the following config items.
 
 3. `bootstrap`, It is the bootstrap shell command which will be executed before running entry commands.
 
-Then you can use the following example CLI to launch the job at FedML® Nexus AI Platform
+Then you can use the following example CLI to launch the job at TensorOpera® Nexus AI Platform
 (Replace $YourApiKey with your own account API key from open.fedml.ai)
 
 Example:
@@ -248,7 +248,7 @@ fedml launch hello_job.yaml
 
 After the launch CLI is executed, the output is as follows. Here you may open the job url to confirm and actually start the job.
 ```
-Submitting your job to FedML® Nexus AI Platform: 100%|████████████████████████████████████████████████████████████████████████████████████████| 6.07k/6.07k [00:01<00:00, 4.94kB/s]
+Submitting your job to TensorOpera® Nexus AI Platform: 100%|████████████████████████████████████████████████████████████████████████████████████████| 6.07k/6.07k [00:01<00:00, 4.94kB/s]
 
 Searched and matched the following GPU resource for your job:
 +-----------+-------------------+---------+------------+-------------------------+---------+-------+----------+
diff --git a/python/fedml/cli/modules/build.py b/python/fedml/cli/modules/build.py
index 4674a88e9e..2fd68492fd 100644
--- a/python/fedml/cli/modules/build.py
+++ b/python/fedml/cli/modules/build.py
@@ -3,14 +3,14 @@
 import fedml.api
 
 
-@click.command("build", help="Build packages for the FedML® Nexus AI Platform")
+@click.command("build", help="Build packages for the TensorOpera® AI Platform")
 @click.help_option("--help", "-h")
 @click.option(
     "--platform",
     "-pf",
     type=str,
     default="octopus",
-    help="The platform name at the FedML® Nexus AI Platform (options: octopus, parrot, spider, beehive, falcon, launch).",
+    help="The platform name at the TensorOpera® AI Platform (options: octopus, parrot, spider, beehive, falcon, launch).",
 )
 @click.option(
     "--type",
diff --git a/python/fedml/cli/modules/cluster.py b/python/fedml/cli/modules/cluster.py
index 95822e1c18..47617b1f12 100644
--- a/python/fedml/cli/modules/cluster.py
+++ b/python/fedml/cli/modules/cluster.py
@@ -7,10 +7,10 @@
 # Message strings constants
 confirmation_message: str = "Are you sure you want to {} these clusters?"
 failure_message: str = ("Failed to {} the clusters, please check the arguments are valid and your network "
-                        "connection and make sure be able to access the FedML® Nexus AI Platform.")
-version_help: str = "specify version of FedML® Nexus AI Platform. It should be dev, test or release"
+                        "connection and make sure be able to access the TensorOpera® AI Platform.")
+version_help: str = "specify version of TensorOpera® AI Platform. It should be dev, test or release"
 api_key_help: str = "user api key."
-cluster_action_help: str = "{} clusters from FedML® Nexus AI Platform"
+cluster_action_help: str = "{} clusters from TensorOpera® AI Platform"
 
 
 @click.group("cluster")
@@ -27,7 +27,7 @@
 )
 def fedml_clusters(api_key, version):
     """
-    Manage clusters on FedML® Nexus AI Platform
+    Manage clusters on TensorOpera® AI Platform
     """
     pass
 
diff --git a/python/fedml/cli/modules/device.py b/python/fedml/cli/modules/device.py
index 5c4804fa69..b21b3d09d2 100644
--- a/python/fedml/cli/modules/device.py
+++ b/python/fedml/cli/modules/device.py
@@ -7,12 +7,12 @@
 @click.help_option("--help", "-h")
 def fedml_device():
     """
-    Bind/unbind devices to the FedML® Nexus AI Platform
+    Bind/unbind devices to the TensorOpera® AI Platform
     """
     pass
 
 
-@fedml_device.command("bind", help="Bind to the FedML® Nexus AI Platform")
+@fedml_device.command("bind", help="Bind to the TensorOpera® AI Platform")
 @click.help_option("--help", "-h")
 @click.argument("api_key", nargs=-1)
 @click.option(
@@ -20,13 +20,13 @@ def fedml_device():
     "-v",
     type=str,
     default="release",
-    help="Bind to which version of FedML® Nexus AI Platform. It should be dev, test or release.",
+    help="Bind to which version of TensorOpera® AI Platform. It should be dev, test or release.",
 )
 @click.option(
     "--compute_node", "-c", default=None, is_flag=True,
     help="Bind as the general compute node in FEDML Nexus AI compute network. This is enabled by default. "
-    "After binding, you can view and manage the device in the FEDML® Nexus AI Platform: https://fedml.ai/compute. "
-    "It can be grouped as a cluster and then you can use FEDML®Launch to schedule any job (training, deployment, federated learning) to it. "
+    "After binding, you can view and manage the device in the TensorOpera® AI Platform: https://tensoropera.ai/gpu/local?label=Private. "
+    "It can be grouped as a cluster and then you can use TensorOpera®Launch to schedule any job (training, deployment, federated learning) to it. "
     "You can not specify the option -c and -s simultaneously.",
 )
 @click.option(
@@ -36,7 +36,7 @@ def fedml_device():
 )
 @click.option(
     "--provider", "-p", default=None, is_flag=True,
-    help="Bind as the FedML compute node (GPU) provider (supplier). This is used by Nexus AI Platform - Share and Earn: https://fedml.ai/gpu-supplier. You can share your GPUs in this way and earn money. "
+    help="Bind as the FedML compute node (GPU) provider (supplier). This is used by Nexus AI Platform - Share and Earn: https://tensoropera.ai/share-and-earn. You can share your GPUs in this way and earn money. "
     "You can specify the option -p and -c simultaneously (can be used as provider for others as well compute node for your own jobs), but you can not specify -p and -s simultaneously.",
 )
 def fedml_device_bind(api_key, version, compute_node, server, provider):
@@ -47,14 +47,14 @@ def fedml_device_bind(api_key, version, compute_node, server, provider):
     fedml.api.device_bind(api_key, compute_node, server, provider)
 
 
-@fedml_device.command("unbind", help="Unbind from the FedML® Nexus AI Platform")
+@fedml_device.command("unbind", help="Unbind from the TensorOpera® AI Platform")
 @click.help_option("--help", "-h")
 @click.option(
     "--version",
     "-v",
     type=str,
     default="release",
-    help="Unbind which backend environment version of FedML® Nexus AI Platform. It should be dev, test, or release.",
+    help="Unbind which backend environment version of TensorOpera® AI Platform. It should be dev, test, or release.",
 )
 @click.option(
     "--compute_node", "-c", default=None, is_flag=True, help="Unbind from the FedML general compute node.",
@@ -75,7 +75,7 @@ def fedml_device_unbind(version, computing, server):
     "-v",
     type=str,
     default="release",
-    help="show resource type at which version of FedML® Nexus AI Platform. It should be dev, test or release",
+    help="show resource type at which version of TensorOpera® AI Platform. It should be dev, test or release",
 )
 def resource_type(version):
     fedml.set_env_version(version)
diff --git a/python/fedml/cli/modules/federate.py b/python/fedml/cli/modules/federate.py
index 6f26b2bea8..ff4fd6c791 100644
--- a/python/fedml/cli/modules/federate.py
+++ b/python/fedml/cli/modules/federate.py
@@ -7,12 +7,12 @@
 @click.help_option("--help", "-h")
 def fedml_federate():
     """
-    Manage federated learning resources on FedML® Nexus AI Platform
+    Manage federated learning resources on TensorOpera® AI Platform
     """
     pass
 
 
-@fedml_federate.command("build", help="Build federate packages for the FedML® Nexus AI Platform.")
+@fedml_federate.command("build", help="Build federate packages for the TensorOpera® AI Platform.")
 @click.help_option("--help", "-h")
 @click.option(
     "--dest_folder",
diff --git a/python/fedml/cli/modules/launch.py b/python/fedml/cli/modules/launch.py
index 16450e08a9..c14bbac353 100644
--- a/python/fedml/cli/modules/launch.py
+++ b/python/fedml/cli/modules/launch.py
@@ -13,7 +13,7 @@
 from fedml.computing.scheduler.scheduler_entry.run_manager import FedMLRunStartedModel, FeatureEntryPoint
 
 
-@click.command("launch", help="Launch job at the FedML® Nexus AI Platform")
+@click.command("launch", help="Launch job at the TensorOpera® AI Platform")
 @click.help_option("--help", "-h")
 @click.option(
     "--api_key", "-k", type=str, help="user api key.",
@@ -56,7 +56,7 @@
 @click.argument("yaml_file", nargs=-1)
 def fedml_launch(yaml_file, cluster, version, api_key, group, local_on_premise_platform, local_on_premise_platform_port):
     """
-    Manage resources on the FedML® Nexus AI Platform.
+    Manage resources on the TensorOpera® AI Platform.
     """
     set_env_version(version)
     fedml.set_local_on_premise_platform_host(local_on_premise_platform)
diff --git a/python/fedml/cli/modules/login.py b/python/fedml/cli/modules/login.py
index b76346ec1b..5e77910cbb 100644
--- a/python/fedml/cli/modules/login.py
+++ b/python/fedml/cli/modules/login.py
@@ -10,7 +10,7 @@
 from fedml.computing.scheduler.scheduler_core.general_constants import MarketplaceType
 
 
-@click.command("login", help="Login the FedML® Nexus AI Platform")
+@click.command("login", help="Login the TensorOpera® AI Platform")
 @click.help_option("--help", "-h")
 @click.argument("api_key", nargs=-1)
 @click.option(
@@ -18,13 +18,13 @@
     "-v",
     type=str,
     default="release",
-    help="Login which backend environment version of FedML® Nexus AI Platform. It should be dev, test, or release.",
+    help="Login which backend environment version of TensorOpera® AI Platform. It should be dev, test, or release.",
 )
 @click.option(
     "--compute_node", "-c", default=None, is_flag=True,
     help="Login as the general compute node in FEDML Nexus AI compute network. This is enabled by default. "
-         "After login, you can view and manage the device in the FEDML® Nexus AI Platform: https://fedml.ai/compute. "
-         "It can be grouped as a cluster and then you can use FEDML®Launch to schedule any job (training, deployment, federated learning) to it. "
+         "After login, you can view and manage the device in the TensorOpera® AI Platform: https://tensoropera.ai/gpu/local?label=Private. "
+         "It can be grouped as a cluster and then you can use TensorOpera®Launch to schedule any job (training, deployment, federated learning) to it. "
          "You can not specify the option -c and -s simultaneously.",
 )
 @click.option(
@@ -34,7 +34,7 @@
 )
 @click.option(
     "--provider", "-p", default=None, is_flag=True,
-    help="Login as the FedML compute node (GPU) provider (supplier). This is used by Nexus AI Platform - Share and Earn: https://fedml.ai/gpu-supplier. You can share your GPUs in this way and earn money. "
+    help="Login as the FedML compute node (GPU) provider (supplier). This is used by Nexus AI Platform - Share and Earn: https://tensoropera.ai/share-and-earn. You can share your GPUs in this way and earn money. "
          "You can specify the option -p and -c simultaneously (can be used as provider for others as well compute node for your own jobs), but you can not specify -p and -s simultaneously.",
 )
 @click.option(
@@ -94,7 +94,7 @@
          "for one hour is $1.5 per GPU, then you would input 1.5. Do not multiply this number by the total number of "
          "GPUs in the node, as the system will automatically detect the number of GPUs and include it in the cost "
          "calculation. Default is 0.0."
-         "Optionally, you can also set this price later through supplier page on the FEDML® Nexus AI Platform."
+         "Optionally, you can also set this price later through supplier page on the TensorOpera® AI Platform."
 )
 @click.option(
     "--name",
diff --git a/python/fedml/cli/modules/logout.py b/python/fedml/cli/modules/logout.py
index 94a51b395a..ab2abfde95 100644
--- a/python/fedml/cli/modules/logout.py
+++ b/python/fedml/cli/modules/logout.py
@@ -3,7 +3,7 @@
 import fedml.api
 
 
-@click.command("logout", help="Logout from the FedML® Nexus AI Platform")
+@click.command("logout", help="Logout from the TensorOpera® AI Platform")
 @click.help_option("--help", "-h")
 @click.option(
     "--computing", "-c", default=None, is_flag=True, help="Logout from the FedML general compute node.",
@@ -16,7 +16,7 @@
     "-v",
     type=str,
     default="release",
-    help="Logout which backend environment version of FedML® Nexus AI Platform. It should be dev, test, or release.",
+    help="Logout which backend environment version of TensorOpera® AI Platform. It should be dev, test, or release.",
 )
 def fedml_logout(computing, server, version):
     fedml.set_env_version(version)
diff --git a/python/fedml/cli/modules/run.py b/python/fedml/cli/modules/run.py
index f2c24b445a..a2c479897b 100644
--- a/python/fedml/cli/modules/run.py
+++ b/python/fedml/cli/modules/run.py
@@ -15,24 +15,24 @@
     "-v",
     type=str,
     default="release",
-    help="version of FedML® Nexus AI Platform. It should be dev, test or release",
+    help="version of TensorOpera® AI Platform. It should be dev, test or release",
 )
 @click.option(
     "--platform",
     "-pf",
     type=str,
     default="falcon",
-    help="The platform name at the FedML® Nexus AI Platform (options: octopus, parrot, spider, beehive, falcon, launch,"
+    help="The platform name at the TensorOpera® AI Platform (options: octopus, parrot, spider, beehive, falcon, launch,"
          "default is falcon).",
 )
 def fedml_run(api_key, version, platform):
     """
-    Manage runs on the FedML® Nexus AI Platform.
+    Manage runs on the TensorOpera® AI Platform.
     """
     pass
 
 
-@fedml_run.command("stop", help="Stop a run from the FedML® Nexus AI Platform.")
+@fedml_run.command("stop", help="Stop a run from the TensorOpera® AI Platform.")
 @click.help_option("--help", "-h")
 @click.option(
     "--run_id",
@@ -49,14 +49,14 @@ def fedml_run(api_key, version, platform):
     "-v",
     type=str,
     default="release",
-    help="stop a run at which version of FedML® Nexus AI Platform. It should be dev, test or release",
+    help="stop a run at which version of TensorOpera® AI Platform. It should be dev, test or release",
 )
 @click.option(
     "--platform",
     "-pf",
     type=str,
     default="falcon",
-    help="The platform name at the FedML® Nexus AI Platform (options: octopus, parrot, spider, beehive, falcon, launch, "
+    help="The platform name at the TensorOpera® AI Platform (options: octopus, parrot, spider, beehive, falcon, launch, "
          "default is falcon).",
 )
 def stop_run(platform, run_id, api_key, version):
@@ -68,14 +68,14 @@ def stop_run(platform, run_id, api_key, version):
         click.echo(f"Failed to stop Run {run_id}. Please check if the run id is valid.")
 
 
-@fedml_run.command("list", help="List runs from the FedML® Nexus AI Platform.")
+@fedml_run.command("list", help="List runs from the TensorOpera® AI Platform.")
 @click.help_option("--help", "-h")
 @click.option(
     "--platform",
     "-pf",
     type=str,
     default="falcon",
-    help="The platform name at the FedML® Nexus AI Platform (options: octopus, parrot, spider, beehive, falcon, launch, "
+    help="The platform name at the TensorOpera® AI Platform (options: octopus, parrot, spider, beehive, falcon, launch, "
          "default is falcon).",
 )
 @click.option(
@@ -83,14 +83,14 @@ def stop_run(platform, run_id, api_key, version):
     "-r",
     type=str,
     default="",
-    help="Run name at the FedML® Nexus AI Platform.",
+    help="Run name at the TensorOpera® AI Platform.",
 )
 @click.option(
     "--run_id",
     "-rid",
     type=str,
     default="",
-    help="Run id at the FedML® Nexus AI Platform.",
+    help="Run id at the TensorOpera® AI Platform.",
 )
 @click.option(
     "--api_key", "-k", type=str, help="user api key.",
@@ -100,7 +100,7 @@ def stop_run(platform, run_id, api_key, version):
     "-v",
     type=str,
     default="release",
-    help="list runs at which version of FedML® Nexus AI Platform. It should be dev, test or release",
+    help="list runs at which version of TensorOpera® AI Platform. It should be dev, test or release",
 )
 def list_runs(platform, run_name, run_id, api_key, version):
     fedml.set_env_version(version)
@@ -109,14 +109,14 @@ def list_runs(platform, run_name, run_id, api_key, version):
     _print_run_table(run_list_obj)
 
 
-@fedml_run.command("status", help="Get status of run from the FedML® Nexus AI Platform.")
+@fedml_run.command("status", help="Get status of run from the TensorOpera® AI Platform.")
 @click.help_option("--help", "-h")
 @click.option(
     "--platform",
     "-pf",
     type=str,
     default="falcon",
-    help="The platform name at the FedML® Nexus AI Platform (options: octopus, parrot, spider, beehive, falcon, launch, "
+    help="The platform name at the TensorOpera® AI Platform (options: octopus, parrot, spider, beehive, falcon, launch, "
          "default is falcon).",
 )
 @click.option(
@@ -124,14 +124,14 @@ def list_runs(platform, run_name, run_id, api_key, version):
     "-r",
     type=str,
     default=None,
-    help="Run name at the FedML® Nexus AI Platform.",
+    help="Run name at the TensorOpera® AI Platform.",
 )
 @click.option(
     "--run_id",
     "-rid",
     type=str,
     default=None,
-    help="Run id at the FedML® Nexus AI Platform.",
+    help="Run id at the TensorOpera® AI Platform.",
 )
 @click.option(
     "--api_key", "-k", type=str, help="user api key.",
@@ -141,7 +141,7 @@ def list_runs(platform, run_name, run_id, api_key, version):
     "-v",
     type=str,
     default="release",
-    help="get status of run at which version of FedML® Nexus AI Platform. It should be dev, test or release",
+    help="get status of run at which version of TensorOpera® AI Platform. It should be dev, test or release",
 )
 def status(platform, run_name, run_id, api_key, version):
     fedml.set_env_version(version)
@@ -153,14 +153,14 @@ def status(platform, run_name, run_id, api_key, version):
     _print_run_table(run_list_obj)
 
 
-@fedml_run.command("logs", help="Get logs of run from the FedML® Nexus AI Platform.")
+@fedml_run.command("logs", help="Get logs of run from the TensorOpera® AI Platform.")
 @click.help_option("--help", "-h")
 @click.option(
     "--platform",
     "-pf",
     type=str,
     default="falcon",
-    help="The platform name at the FedML® Nexus AI Platform (options: octopus, parrot, spider, beehive, falcon, launch, "
+    help="The platform name at the TensorOpera® AI Platform (options: octopus, parrot, spider, beehive, falcon, launch, "
          "default is falcon).",
 )
 @click.option(
@@ -168,7 +168,7 @@ def status(platform, run_name, run_id, api_key, version):
     "-rid",
     type=str,
     default=None,
-    help="Run id at the FedML® Nexus AI Platform.",
+    help="Run id at the TensorOpera® AI Platform.",
 )
 @click.option(
     "--api_key", "-k", type=str, help="user api key.",
@@ -178,7 +178,7 @@ def status(platform, run_name, run_id, api_key, version):
     "-v",
     type=str,
     default="release",
-    help="get logs of run at which version of FedML® Nexus AI Platform. It should be dev, test or release",
+    help="get logs of run at which version of TensorOpera® AI Platform. It should be dev, test or release",
 )
 @click.option(
     "--page_num",
diff --git a/python/fedml/cli/modules/storage.py b/python/fedml/cli/modules/storage.py
index 7e060fc12e..8b75075289 100644
--- a/python/fedml/cli/modules/storage.py
+++ b/python/fedml/cli/modules/storage.py
@@ -12,7 +12,7 @@
 from fedml.api.fedml_response import ResponseCode
 
 # Message strings constants
-version_help: str = "specify version of FedML® Nexus AI Platform. It should be dev, test or release"
+version_help: str = "specify version of TensorOpera® AI Platform. It should be dev, test or release"
 api_key_help: str = "user api key."
 
 
@@ -31,7 +31,7 @@
 )
 def fedml_storage(api_key, version):
     """
-    Manage storage on FedML® Nexus AI Platform
+    Manage storage on TensorOpera® AI Platform
     """
     pass
 
@@ -43,7 +43,7 @@ def validate_argument(ctx, param, value):
     return value
 
 
-@fedml_storage.command("upload", help="Upload data on FedML® Nexus AI Platform")
+@fedml_storage.command("upload", help="Upload data on TensorOpera® AI Platform")
 @click.help_option("--help", "-h")
 @click.argument("data_path", nargs=1, callback=validate_argument)
 @click.option("--name", "-n", type=str, help="Name your data to store. If not provided, the name will be the same as "
@@ -78,7 +78,7 @@ def upload(data_path: str, name: str, user_metadata: str, description: str, vers
         click.echo(f"Failed to upload data. Error message: {response.message}")
 
 
-@fedml_storage.command("list", help="List data stored on FedML® Nexus AI Platform")
+@fedml_storage.command("list", help="List data stored on TensorOpera® AI Platform")
 @click.help_option("--help", "-h")
 @click.option(
     "--api_key", "-k", type=str, help=api_key_help,
@@ -108,7 +108,7 @@ def list_data(version, api_key):
                    f"Error message: {response.message}")
 
 
-@fedml_storage.command("get-user-metadata", help="Get user-defined metadata of data object stored on FedML® Nexus AI "
+@fedml_storage.command("get-user-metadata", help="Get user-defined metadata of data object stored on TensorOpera® AI "
                                                  "Platform")
 @click.help_option("--help", "-h")
 @click.argument("data_name", nargs=1, callback=validate_argument)
@@ -136,7 +136,7 @@ def get_user_metadata(data_name, version, api_key):
         click.echo(f"Failed to fetch user-metadata for {data_name}. Error message: {response.message}")
 
 
-@fedml_storage.command("get-metadata", help="Get metadata of data object stored on FedML® Nexus AI Platform")
+@fedml_storage.command("get-metadata", help="Get metadata of data object stored on TensorOpera® AI Platform")
 @click.help_option("--help", "-h")
 @click.argument("data_name", nargs=1, callback=validate_argument)
 @click.option(
@@ -167,7 +167,7 @@ def get_metadata(data_name, version, api_key):
         click.echo(f"Fetching metadata failed. Error message: {response.message}")
 
 
-@fedml_storage.command("download", help="Download data stored on FedML® Nexus AI Platform")
+@fedml_storage.command("download", help="Download data stored on TensorOpera® AI Platform")
 @click.help_option("--help", "-h")
 @click.argument("data_name", nargs=1, callback=validate_argument)
 @click.option("--dest_path", "-d", default=None, type=str, help="Destination path to download data. By default, "
@@ -194,7 +194,7 @@ def download(data_name, dest_path, version, api_key, service):
         click.echo(f"Failed to download data {data_name}. Error message: {response.message}")
 
 
-@fedml_storage.command("delete", help="Delete data stored on FedML® Nexus AI Platform")
+@fedml_storage.command("delete", help="Delete data stored on TensorOpera® AI Platform")
 @click.argument("data_name", nargs=1, callback=validate_argument)
 @click.help_option("--help", "-h")
 @click.option(
diff --git a/python/fedml/cli/modules/train.py b/python/fedml/cli/modules/train.py
index b4c36d1663..ae9c5fcbb1 100644
--- a/python/fedml/cli/modules/train.py
+++ b/python/fedml/cli/modules/train.py
@@ -7,12 +7,12 @@
 @click.help_option("--help", "-h")
 def fedml_train():
     """
-    Manage training resources on FedML® Nexus AI Platform
+    Manage training resources on TensorOpera® AI Platform
     """
     pass
 
 
-@fedml_train.command("build", help="Build training packages for the FedML® Nexus AI Platform.")
+@fedml_train.command("build", help="Build training packages for the TensorOpera® AI Platform.")
 @click.help_option("--help", "-h")
 @click.option(
     "--dest_folder",
diff --git a/python/fedml/computing/scheduler/env/collect_env.py b/python/fedml/computing/scheduler/env/collect_env.py
index da4d54e7a0..39654eac6c 100644
--- a/python/fedml/computing/scheduler/env/collect_env.py
+++ b/python/fedml/computing/scheduler/env/collect_env.py
@@ -9,7 +9,7 @@
 
 
 def collect_env():
-    print("\n======== FedML (https://fedml.ai) ========")
+    print("\n======== FedML (https://tensoropera.ai) ========")
     print("FedML version: " + str(fedml.__version__))
     env_version = fedml.get_env_version()
     print("FedML ENV version: " + str(env_version))
diff --git a/python/fedml/computing/scheduler/scheduler_core/account_manager.py b/python/fedml/computing/scheduler/scheduler_core/account_manager.py
index 85d76d5973..4b6a628b43 100755
--- a/python/fedml/computing/scheduler/scheduler_core/account_manager.py
+++ b/python/fedml/computing/scheduler/scheduler_core/account_manager.py
@@ -84,7 +84,7 @@ def login(self, user_id, api_key="", device_id=None, os_name=None, role=None, ru
             print("Please check whether your network is normal!")
             return None
 
-        # Bind account id to FedML® Nexus AI Platform
+        # Bind account id to TensorOpera® Nexus AI Platform
         register_try_count = 0
         edge_id = -1
         user_name = None
diff --git a/python/fedml/computing/scheduler/scheduler_entry/README.md b/python/fedml/computing/scheduler/scheduler_entry/README.md
index 0d1da81950..41d32ff399 100644
--- a/python/fedml/computing/scheduler/scheduler_entry/README.md
+++ b/python/fedml/computing/scheduler/scheduler_entry/README.md
@@ -132,7 +132,7 @@ You just need to customize the following config items.
 
 3. `bootstrap`, It is the bootstrap shell command which will be executed before running entry commands.
 
-Then you can use the following example CLI to launch the job at FedML® Nexus AI Platform
+Then you can use the following example CLI to launch the job at TensorOpera® Nexus AI Platform
 (Replace $YourApiKey with your own account API key from open.fedml.ai)
 
 Example:
@@ -142,7 +142,7 @@ fedml launch hello_job.yaml
 
 After the launch CLI is executed, the output is as follows. Here you may open the job url to confirm and actually start the job.
 ```
-Submitting your job to FedML® Nexus AI Platform: 100%|████████████████████████████████████████████████████████████████████████████████████████| 6.07k/6.07k [00:01<00:00, 4.94kB/s]
+Submitting your job to TensorOpera® Nexus AI Platform: 100%|████████████████████████████████████████████████████████████████████████████████████████| 6.07k/6.07k [00:01<00:00, 4.94kB/s]
 
 Searched and matched the following GPU resource for your job:
 +-----------+-------------------+---------+------------+-------------------------+---------+-------+----------+
diff --git a/python/fedml/computing/scheduler/scheduler_entry/app_manager.py b/python/fedml/computing/scheduler/scheduler_entry/app_manager.py
index 91b5ff64cc..267db08901 100755
--- a/python/fedml/computing/scheduler/scheduler_entry/app_manager.py
+++ b/python/fedml/computing/scheduler/scheduler_entry/app_manager.py
@@ -278,7 +278,7 @@ def push_app_package_to_s3(self, app_name, app_package_path):
         app_storage_url = s3_storage.upload_file_with_progress(app_package_path, app_dst_key,
                                                                out_progress_to_err=True,
                                                                progress_desc="Submitting your job to "
-                                                                             "FedML® Nexus AI Platform")
+                                                                             "TensorOpera® Nexus AI Platform")
         return app_storage_url
 
     def pull_app_package_from_s3(self, model_storage_url, model_name):
@@ -315,7 +315,7 @@ def push_model_to_s3(self, model_name, model_zip_path):
         return FedMLModelCards.get_instance().push_model_to_s3(
             model_name, model_zip_path, "FedMLLaunchServe",
             show_progress=False,
-            progress_desc="Submitting your job to FedML® Nexus AI Platform")
+            progress_desc="Submitting your job to TensorOpera® Nexus AI Platform")
 
     def check_model_package(self, workspace):
         model_config_file = os.path.join(
diff --git a/python/fedml/core/mlops/__init__.py b/python/fedml/core/mlops/__init__.py
index 148427fe1f..4d60534547 100644
--- a/python/fedml/core/mlops/__init__.py
+++ b/python/fedml/core/mlops/__init__.py
@@ -107,7 +107,7 @@ def init(args, should_init_logs=True):
         return
     else:
         if hasattr(args, "simulator_daemon"):
-            # Bind local device as simulation device on FedML® Nexus AI Platform
+            # Bind local device as simulation device on TensorOpera® Nexus AI Platform
             setattr(args, "using_mlops", True)
             setattr(args, "rank", 1)
             MLOpsStore.mlops_bind_result = bind_simulation_device(args, args.user)
@@ -125,7 +125,7 @@ def init(args, should_init_logs=True):
     if project_name is None or api_key is None:
         raise Exception("Please check mlops_project_name and mlops_api_key params.")
 
-    # Bind local device as simulation device on FedML® Nexus AI Platform
+    # Bind local device as simulation device on TensorOpera® Nexus AI Platform
     setattr(args, "using_mlops", True)
     setattr(args, "rank", 1)
     MLOpsStore.mlops_bind_result = bind_simulation_device(args, api_key, args.config_version)
@@ -753,7 +753,7 @@ def push_artifact_to_s3(artifact: fedml.mlops.Artifact, version="release", show_
                                                                     show_progress=show_progress,
                                                                     out_progress_to_err=True,
                                                                     progress_desc="Submitting your artifact to "
-                                                                                  "FedML® Nexus AI Platform")
+                                                                                  "TensorOpera® Nexus AI Platform")
         artifact_storage_url = str(artifact_storage_url).split("?")[0]
     except Exception as e:
         pass
@@ -1289,7 +1289,7 @@ def bind_simulation_device(args, userid):
         device_role = "Edge.Simulator"
         unique_device_id = "{}@{}.{}".format(args.device_id, args.os_name, device_role)
 
-    # Bind account id to FedML® Nexus AI Platform
+    # Bind account id to TensorOpera® Nexus AI Platform
     register_try_count = 0
     edge_id = -1
     while register_try_count < 5:
diff --git a/python/fedml/serving/templates/hf_template/config.yaml b/python/fedml/serving/templates/hf_template/config.yaml
index da512f4a46..72551635d4 100644
--- a/python/fedml/serving/templates/hf_template/config.yaml
+++ b/python/fedml/serving/templates/hf_template/config.yaml
@@ -62,7 +62,7 @@ environment_variables:
   VERBOSE: "True"
 
 # If you do not have any GPU resource but want to serve the model
-# Try fedml® launch platform, and uncomment the following lines.
+# Try TensorOpera® launch platform, and uncomment the following lines.
 # ------------------------------------------------------------
 computing:
   minimum_num_gpus: 1           # minimum # of GPUs to provision
diff --git a/python/fedml/workflow/driver_example/customized_job_example/README.md b/python/fedml/workflow/driver_example/customized_job_example/README.md
index 647cddc290..cd95ef5c75 100644
--- a/python/fedml/workflow/driver_example/customized_job_example/README.md
+++ b/python/fedml/workflow/driver_example/customized_job_example/README.md
@@ -1,6 +1,6 @@
 
 # Make your own workflow with multiple jobs
-## Define the job yaml based on the FEDML® Launch docs (https://doc.fedml.ai/launch)
+## Define the job yaml based on the TensorOpera® Launch docs (https://doc.fedml.ai/launch)
 ```
     working_directory = os.path.dirname(os.path.abspath(__file__))
     deploy_image_job_yaml = os.path.join(working_directory, "deploy_image_job.yaml")
@@ -119,7 +119,7 @@
 ```
 The output of the above deploy workflow is as follows.
 ```
-Submitting your job to FedML® Nexus AI Platform: 100%|██████████| 3.00k/3.00k [00:00<00:00, 3.10kB/s]
+Submitting your job to TensorOpera® Nexus AI Platform: 100%|██████████| 3.00k/3.00k [00:00<00:00, 3.10kB/s]
 Final status of the workflow is as follows. JobStatus.FINISHED
 Output of the workflow is as follows. {'endpoint_id': 2131, 'endpoint_name': 'endpoint_test1', 'inference_url': 'https://open-test.fedml.ai/inference', 'request_body': {'arr': [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.0100005, -0.0100005, -0.0100005, -0.013973799, -0.0189315247, -0.023184301, -0.0360728861, -0.0392619154, -0.0380269994, -0.0390143887, -0.0346046778, -0.0257765396, -0.0209733754, -0.0217809993, -0.0144984527, -0.0118807892, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.0178081425, -0.0232058779, -0.0298662898, -0.0414395151, -0.0586512813, -0.0812643979, -0.105997038, -0.121704878, -0.134457288, -0.139756261, -0.141562422, -0.135229133, -0.120246727, -0.104490087, -0.0870044931, -0.0716699334, -0.0485892545, -0.0324260775, -0.0216926329, -0.0100005, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.0132956624, -0.0225936238, -0.0383702224, -0.0598206019, -0.0842014426, -0.118390816, -0.154266827, -0.188282524, -0.219803054, -0.242936317, -0.255020324, -0.259481423, -0.249404582, -0.226727106, -0.200418885, -0.16716117, -0.134317009, -0.0958717755, -0.0736565245, -0.0503983075, -0.0269783475, -0.0168919, -0.0100005, 0.0, 0.0, 0.0, 0.0, -0.0147795885, -0.025122101, -0.0381226487, -0.0786317321, -0.119593671, -0.165704529, -0.228814281, -0.288620224, -0.354491034, -0.421140618, -0.480243669, -0.527064646, -0.540807419, -0.521388017, -0.474446021, -0.403948632, -0.336571539, -0.271580657, -0.20666741, -0.154539645, -0.108856709, -0.0677589146, -0.0340327281, -0.0215091205, 0.0, 0.0, -0.0100005, -0.0107381289, -0.0260253876, -0.0570600482, -0.0914378767, -0.143000013, -0.199005834, -0.266034404, -0.353401549, -0.450251488, -0.551598332, -0.647939202, -0.743171364, -0.818162561, -0.851073275, -0.83112168, -0.763764496, -0.659992784, -0.547527626, -0.439376979, -0.33557659, -0.254856553, -0.183933732, -0.126755715, -0.0706477667, -0.0388818206, 0.0, 0.0, 0.0, -0.0134176155, -0.0390612132, -0.0873974922, -0.133107017, -0.194532142, -0.27478633, -0.369886454, -0.482920333, -0.605294063, -0.735621386, -0.869509827, -0.989564738, -1.09132506, -1.13182948, -1.09408349, -0.996373436, -0.868781173, -0.717778845, -0.570649327, -0.439021868, -0.326889344, -0.235934504, -0.167697996, -0.0995100269, -0.0479392976, -0.0187851186, 0.0, -0.0117322667, -0.0288274493, -0.0646532861, -0.118956716, -0.17783758, 1.53795878, 2.57176245, 1.53212043, 1.00392168, -0.179355647, -0.591732991, -1.05273662, -1.15378689, -1.22142979, -1.2388156, -1.21321586, -1.14302847, -1.02018313, -0.857098743, -0.676706697, -0.516203262, -0.379287244, -0.271402545, -0.189934521, -0.119940614, -0.0556340911, -0.0145752163, 0.0, -0.0206611389, -0.0437166621, -0.0808756237, -0.140488164, -0.207699245, 3.7747726, 3.14033146, 2.28939169, 1.76127332, 1.4318542, 1.1313135, 0.679164893, 0.665484747, 0.666043389, 0.680680095, 0.677305174, 0.665508286, 0.721340316, 0.883661589, 0.91751869, 0.0282541074, -0.401002939, -0.283099723, -0.194831338, -0.123075256, -0.066612686, -0.0161462821, -0.0112546885, -0.0293918605, -0.0484646663, -0.093178326, -0.146682925, -0.218121209, 0.830460131, 1.04725853, 0.147086928, 0.259684517, 0.495679969, 0.998953721, 1.29535061, 1.12204782, 1.41528197, 1.4259952, 1.36416372, 1.22805443, 1.03395727, 1.40874227, 1.73166837, 1.00260058, -0.401823716, -0.275049233, -0.181713744, -0.107567122, -0.0566041118, -0.0189159236, -0.0121427928, -0.0243168731, -0.050270377, -0.0887358114, -0.138806025, -0.212706019, -0.321729999, -0.462313723, -0.652442841, -0.845524923, -0.961258323, -0.793125052, -0.226359955, -0.640468216, -0.12372009, -0.167157468, -0.255843161, -0.441448335, -0.792766628, 1.30597044, 1.81460411, 0.691054579, -0.383665051, -0.26310513, -0.166473946, -0.0799663431, -0.0455007946, -0.0195541446, -0.0100005, -0.0186206584, -0.0414986832, -0.0722615997, -0.123238725, -0.212256343, -0.331309824, -0.491126078, -0.687704902, -0.86260267, -0.939124713, -0.869991467, -0.758168797, -0.722198511, -0.739826964, -0.809980626, -0.911188613, -1.00032001, -0.221550751, 1.53134484, 1.47605194, -0.273150738, -0.363157263, -0.252975575, -0.157152039, -0.0652009258, -0.0335283586, -0.0124209728, 0.0, -0.014849279, -0.0329699917, -0.0601451792, -0.118353377, -0.219271688, -0.354392407, -0.523006773, -0.71568287, -0.862626101, -0.90524289, -0.831592288, -0.751312636, -0.762948163, -0.825877849, -0.930232292, -1.04727288, -0.879016953, 1.11455708, 1.61660969, 0.264000765, -0.464282235, -0.354907482, -0.256014147, -0.158427696, -0.0620647188, -0.0242921899, 0.0, 0.0, -0.0117874599, -0.0252632841, -0.0502423656, -0.115068847, -0.235195531, -0.377531303, -0.547311188, -0.723069536, -0.848981953, -0.878897369, -0.826469482, -0.795496372, -0.883536617, -0.994814123, -1.13364619, -1.20871511, 5.60198157e-05, 1.28700658, 1.50082995, -0.122561277, -0.462110102, -0.360151562, -0.263898374, -0.166295096, -0.0568635009, -0.0105441394, 0.0, 0.0, 0.0, -0.016636779, -0.0423254862, -0.119931644, -0.252550583, -0.39191634, -0.556171069, -0.717849905, -0.829516019, -0.854549188, -0.84598967, -0.889246054, -1.03761315, -1.16457617, -1.30025654, -0.740699086, 1.05188993, 1.3036988, -0.163440609, -0.59058464, -0.474233049, -0.368789557, -0.274082099, -0.174264813, -0.0696188843, -0.018003151, 0.0, 0.0, 0.0, -0.0168610568, -0.0451688568, -0.131668459, -0.267838929, -0.398906806, -0.548202377, -0.690077015, -0.789823563, -0.831599129, -0.861314493, -0.95681566, -1.11036634, -1.22743073, -1.31006468, -0.02573686, 1.14239899, 0.761423491, -0.706825874, -0.608999426, -0.492457882, -0.380502867, -0.279282191, -0.173984018, -0.0767235054, -0.0195871373, -0.0100005, 0.0, -0.0100005, -0.024817808, -0.0552275065, -0.148243512, -0.283202341, -0.4022125, -0.534598048, -0.656007943, -0.738083794, -0.781657503, -0.824620535, -0.918824463, -1.04078449, -1.13391454, -1.09212795, 0.70592031, 1.17679031, -0.37378182, -0.758547572, -0.62868064, -0.501492113, -0.381043892, -0.270505206, -0.168251255, -0.0784168728, -0.022799968, -0.0157856413, 0.0, 0.0, -0.0269850288, -0.0676999793, -0.167498207, -0.298089736, -0.411096027, -0.522810883, -0.625838621, -0.693423683, -0.731704263, -0.767086709, -0.82998003, -0.921590434, -1.00562716, 0.0779492952, 1.22959017, 0.636500653, -0.901400043, -0.769630793, -0.635363773, -0.494618472, -0.369117095, -0.255794246, -0.156732083, -0.0783809414, -0.0267109338, -0.0148726634, 0.0, -0.0100005, -0.0348385687, -0.0869311199, -0.185622432, -0.311777198, -0.427690033, -0.530457702, -0.612837575, -0.669073252, -0.706628103, -0.737178903, -0.779583917, -0.866698428, -0.288157768, 1.2193059, 1.10500698, -0.50413989, -0.909137779, -0.774520432, -0.619405771, -0.472096102, -0.344822207, -0.235626373, -0.144455008, -0.0769092863, -0.0286146987, -0.0100005, 0.0, -0.0100005, -0.0342628198, -0.101174053, -0.195711272, -0.324606261, -0.442716711, -0.545960978, -0.637281741, -0.703742928, -0.753441795, -0.788772419, -0.829773267, -0.745526297, 0.949893727, 1.18293215, 0.385795002, -1.023299, -0.89872884, -0.736858006, -0.575258663, -0.430322485, -0.30912025, -0.209889823, -0.13189517, -0.0731506415, -0.0276674735, -0.0100005, 0.0, -0.0100005, -0.0400234981, -0.10709374, -0.194645695, -0.316981297, -0.440895564, -0.560086039, -0.667605659, -0.763806998, -0.843535003, -0.903604039, -0.938010529, 0.763887624, 1.12176928, 0.784111, -0.818046093, -0.991046672, -0.828340182, -0.652780006, -0.495325185, -0.364891317, -0.261772085, -0.17529887, -0.112966586, -0.0617374486, -0.0270715466, 0.0, 0.0, 0.0, -0.0406825662, -0.0978606438, -0.177848987, -0.287783481, -0.412614752, -0.543271605, -0.671018812, -0.798159188, -0.916686263, -1.02499517, -0.773682132, 1.09355574, 1.05041156, -0.498209852, -1.05256459, -0.870980804, -0.688431167, -0.523166414, -0.391308572, -0.282035183, -0.199071147, -0.13652517, -0.0893688913, -0.041317086, -0.016850831, 0.0, 0.0, 0.0, -0.0283386899, -0.0765120563, -0.141969555, -0.232658498, -0.341261378, -0.469723228, -0.606194512, -0.747366354, -0.880786554, -0.729389144, 0.895224865, 1.11943124, -0.105438374, -1.00783177, -0.859696548, -0.683890026, -0.531181637, -0.395889778, -0.289956123, -0.203267966, -0.14295145, -0.0963532989, -0.0643914026, -0.0337070214, -0.0111853003, 0.0, 0.0, -0.0100005, -0.0151722732, -0.0480051146, -0.0951161616, -0.160643556, -0.245453283, -0.353245922, -0.474265429, -0.598667391, -0.729305101, 0.389322873, 1.38694264, 1.37486731, -0.403963644, -0.77444593, -0.638730244, -0.502999283, -0.387339921, -0.279971294, -0.198381814, -0.135822721, -0.0965383286, -0.0633365644, -0.0427549534, -0.0257581657, -0.0100005, 0.0, 0.0, 0.0, 0.0, -0.0237543896, -0.0522032466, -0.0858749627, -0.140703979, -0.208515621, -0.290149335, -0.368567087, 0.334201602, 2.33307288, 2.27286258, 2.23777229, 0.0412218057, -0.494890333, -0.422342015, -0.339048837, -0.257069088, -0.185534152, -0.136577185, -0.0860242391, -0.0578259874, -0.033636416, -0.0181122384, -0.0100005, 0.0, 0.0, 0.0, 0.0, 0.0, -0.0136274661, -0.0285803164, -0.0474793553, -0.0779785591, -0.118532172, -0.167201555, -0.214787719, 2.22171299, 4.30500754, 4.03125111, 3.36505818, 0.379953648, -0.284269948, -0.247694588, -0.205869945, -0.155925102, -0.116435448, -0.0857647974, -0.0546508166, -0.0401800073, -0.023758997, -0.0165780693, -0.0100005, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.0115748833, -0.0284271584, -0.0506655656, -0.0740332846, -0.100455604, -0.124744578, 4.17363552, 7.81243004, 5.7896979, 0.322149281, -0.181506609, -0.160333393, -0.139182079, -0.118875455, -0.0873316648, -0.0700227708, -0.0540690537, -0.0384297037, -0.0265616274, -0.0161844507, -0.0119683967, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.0132918601, -0.0159980455, -0.0207236291, -0.0266997366, -0.0284703819, -0.0343035092, -0.0410336906, -0.0488886427, -0.0548357917, -0.0551988782, -0.0469971082, -0.0388769026, -0.0316010302, -0.0285226846, -0.021736589, -0.0100005, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'model': 'fedml-qa-customer-0219/endpoint_test1'}, 'key_token': '5d427244128c45f58a74f3ecdb09b1e0'}
 Output of all jobs is as follows. {'deploy_image_job': {'endpoint_id': 2131, 'endpoint_name': 'endpoint_test1', 'inference_url': 'https://open-test.fedml.ai/inference', 'request_body': {'arr': [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.0100005, -0.0100005, -0.0100005, -0.013973799, -0.0189315247, -0.023184301, -0.0360728861, -0.0392619154, -0.0380269994, -0.0390143887, -0.0346046778, -0.0257765396, -0.0209733754, -0.0217809993, -0.0144984527, -0.0118807892, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.0178081425, -0.0232058779, -0.0298662898, -0.0414395151, -0.0586512813, -0.0812643979, -0.105997038, -0.121704878, -0.134457288, -0.139756261, -0.141562422, -0.135229133, -0.120246727, -0.104490087, -0.0870044931, -0.0716699334, -0.0485892545, -0.0324260775, -0.0216926329, -0.0100005, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.0132956624, -0.0225936238, -0.0383702224, -0.0598206019, -0.0842014426, -0.118390816, -0.154266827, -0.188282524, -0.219803054, -0.242936317, -0.255020324, -0.259481423, -0.249404582, -0.226727106, -0.200418885, -0.16716117, -0.134317009, -0.0958717755, -0.0736565245, -0.0503983075, -0.0269783475, -0.0168919, -0.0100005, 0.0, 0.0, 0.0, 0.0, -0.0147795885, -0.025122101, -0.0381226487, -0.0786317321, -0.119593671, -0.165704529, -0.228814281, -0.288620224, -0.354491034, -0.421140618, -0.480243669, -0.527064646, -0.540807419, -0.521388017, -0.474446021, -0.403948632, -0.336571539, -0.271580657, -0.20666741, -0.154539645, -0.108856709, -0.0677589146, -0.0340327281, -0.0215091205, 0.0, 0.0, -0.0100005, -0.0107381289, -0.0260253876, -0.0570600482, -0.0914378767, -0.143000013, -0.199005834, -0.266034404, -0.353401549, -0.450251488, -0.551598332, -0.647939202, -0.743171364, -0.818162561, -0.851073275, -0.83112168, -0.763764496, -0.659992784, -0.547527626, -0.439376979, -0.33557659, -0.254856553, -0.183933732, -0.126755715, -0.0706477667, -0.0388818206, 0.0, 0.0, 0.0, -0.0134176155, -0.0390612132, -0.0873974922, -0.133107017, -0.194532142, -0.27478633, -0.369886454, -0.482920333, -0.605294063, -0.735621386, -0.869509827, -0.989564738, -1.09132506, -1.13182948, -1.09408349, -0.996373436, -0.868781173, -0.717778845, -0.570649327, -0.439021868, -0.326889344, -0.235934504, -0.167697996, -0.0995100269, -0.0479392976, -0.0187851186, 0.0, -0.0117322667, -0.0288274493, -0.0646532861, -0.118956716, -0.17783758, 1.53795878, 2.57176245, 1.53212043, 1.00392168, -0.179355647, -0.591732991, -1.05273662, -1.15378689, -1.22142979, -1.2388156, -1.21321586, -1.14302847, -1.02018313, -0.857098743, -0.676706697, -0.516203262, -0.379287244, -0.271402545, -0.189934521, -0.119940614, -0.0556340911, -0.0145752163, 0.0, -0.0206611389, -0.0437166621, -0.0808756237, -0.140488164, -0.207699245, 3.7747726, 3.14033146, 2.28939169, 1.76127332, 1.4318542, 1.1313135, 0.679164893, 0.665484747, 0.666043389, 0.680680095, 0.677305174, 0.665508286, 0.721340316, 0.883661589, 0.91751869, 0.0282541074, -0.401002939, -0.283099723, -0.194831338, -0.123075256, -0.066612686, -0.0161462821, -0.0112546885, -0.0293918605, -0.0484646663, -0.093178326, -0.146682925, -0.218121209, 0.830460131, 1.04725853, 0.147086928, 0.259684517, 0.495679969, 0.998953721, 1.29535061, 1.12204782, 1.41528197, 1.4259952, 1.36416372, 1.22805443, 1.03395727, 1.40874227, 1.73166837, 1.00260058, -0.401823716, -0.275049233, -0.181713744, -0.107567122, -0.0566041118, -0.0189159236, -0.0121427928, -0.0243168731, -0.050270377, -0.0887358114, -0.138806025, -0.212706019, -0.321729999, -0.462313723, -0.652442841, -0.845524923, -0.961258323, -0.793125052, -0.226359955, -0.640468216, -0.12372009, -0.167157468, -0.255843161, -0.441448335, -0.792766628, 1.30597044, 1.81460411, 0.691054579, -0.383665051, -0.26310513, -0.166473946, -0.0799663431, -0.0455007946, -0.0195541446, -0.0100005, -0.0186206584, -0.0414986832, -0.0722615997, -0.123238725, -0.212256343, -0.331309824, -0.491126078, -0.687704902, -0.86260267, -0.939124713, -0.869991467, -0.758168797, -0.722198511, -0.739826964, -0.809980626, -0.911188613, -1.00032001, -0.221550751, 1.53134484, 1.47605194, -0.273150738, -0.363157263, -0.252975575, -0.157152039, -0.0652009258, -0.0335283586, -0.0124209728, 0.0, -0.014849279, -0.0329699917, -0.0601451792, -0.118353377, -0.219271688, -0.354392407, -0.523006773, -0.71568287, -0.862626101, -0.90524289, -0.831592288, -0.751312636, -0.762948163, -0.825877849, -0.930232292, -1.04727288, -0.879016953, 1.11455708, 1.61660969, 0.264000765, -0.464282235, -0.354907482, -0.256014147, -0.158427696, -0.0620647188, -0.0242921899, 0.0, 0.0, -0.0117874599, -0.0252632841, -0.0502423656, -0.115068847, -0.235195531, -0.377531303, -0.547311188, -0.723069536, -0.848981953, -0.878897369, -0.826469482, -0.795496372, -0.883536617, -0.994814123, -1.13364619, -1.20871511, 5.60198157e-05, 1.28700658, 1.50082995, -0.122561277, -0.462110102, -0.360151562, -0.263898374, -0.166295096, -0.0568635009, -0.0105441394, 0.0, 0.0, 0.0, -0.016636779, -0.0423254862, -0.119931644, -0.252550583, -0.39191634, -0.556171069, -0.717849905, -0.829516019, -0.854549188, -0.84598967, -0.889246054, -1.03761315, -1.16457617, -1.30025654, -0.740699086, 1.05188993, 1.3036988, -0.163440609, -0.59058464, -0.474233049, -0.368789557, -0.274082099, -0.174264813, -0.0696188843, -0.018003151, 0.0, 0.0, 0.0, -0.0168610568, -0.0451688568, -0.131668459, -0.267838929, -0.398906806, -0.548202377, -0.690077015, -0.789823563, -0.831599129, -0.861314493, -0.95681566, -1.11036634, -1.22743073, -1.31006468, -0.02573686, 1.14239899, 0.761423491, -0.706825874, -0.608999426, -0.492457882, -0.380502867, -0.279282191, -0.173984018, -0.0767235054, -0.0195871373, -0.0100005, 0.0, -0.0100005, -0.024817808, -0.0552275065, -0.148243512, -0.283202341, -0.4022125, -0.534598048, -0.656007943, -0.738083794, -0.781657503, -0.824620535, -0.918824463, -1.04078449, -1.13391454, -1.09212795, 0.70592031, 1.17679031, -0.37378182, -0.758547572, -0.62868064, -0.501492113, -0.381043892, -0.270505206, -0.168251255, -0.0784168728, -0.022799968, -0.0157856413, 0.0, 0.0, -0.0269850288, -0.0676999793, -0.167498207, -0.298089736, -0.411096027, -0.522810883, -0.625838621, -0.693423683, -0.731704263, -0.767086709, -0.82998003, -0.921590434, -1.00562716, 0.0779492952, 1.22959017, 0.636500653, -0.901400043, -0.769630793, -0.635363773, -0.494618472, -0.369117095, -0.255794246, -0.156732083, -0.0783809414, -0.0267109338, -0.0148726634, 0.0, -0.0100005, -0.0348385687, -0.0869311199, -0.185622432, -0.311777198, -0.427690033, -0.530457702, -0.612837575, -0.669073252, -0.706628103, -0.737178903, -0.779583917, -0.866698428, -0.288157768, 1.2193059, 1.10500698, -0.50413989, -0.909137779, -0.774520432, -0.619405771, -0.472096102, -0.344822207, -0.235626373, -0.144455008, -0.0769092863, -0.0286146987, -0.0100005, 0.0, -0.0100005, -0.0342628198, -0.101174053, -0.195711272, -0.324606261, -0.442716711, -0.545960978, -0.637281741, -0.703742928, -0.753441795, -0.788772419, -0.829773267, -0.745526297, 0.949893727, 1.18293215, 0.385795002, -1.023299, -0.89872884, -0.736858006, -0.575258663, -0.430322485, -0.30912025, -0.209889823, -0.13189517, -0.0731506415, -0.0276674735, -0.0100005, 0.0, -0.0100005, -0.0400234981, -0.10709374, -0.194645695, -0.316981297, -0.440895564, -0.560086039, -0.667605659, -0.763806998, -0.843535003, -0.903604039, -0.938010529, 0.763887624, 1.12176928, 0.784111, -0.818046093, -0.991046672, -0.828340182, -0.652780006, -0.495325185, -0.364891317, -0.261772085, -0.17529887, -0.112966586, -0.0617374486, -0.0270715466, 0.0, 0.0, 0.0, -0.0406825662, -0.0978606438, -0.177848987, -0.287783481, -0.412614752, -0.543271605, -0.671018812, -0.798159188, -0.916686263, -1.02499517, -0.773682132, 1.09355574, 1.05041156, -0.498209852, -1.05256459, -0.870980804, -0.688431167, -0.523166414, -0.391308572, -0.282035183, -0.199071147, -0.13652517, -0.0893688913, -0.041317086, -0.016850831, 0.0, 0.0, 0.0, -0.0283386899, -0.0765120563, -0.141969555, -0.232658498, -0.341261378, -0.469723228, -0.606194512, -0.747366354, -0.880786554, -0.729389144, 0.895224865, 1.11943124, -0.105438374, -1.00783177, -0.859696548, -0.683890026, -0.531181637, -0.395889778, -0.289956123, -0.203267966, -0.14295145, -0.0963532989, -0.0643914026, -0.0337070214, -0.0111853003, 0.0, 0.0, -0.0100005, -0.0151722732, -0.0480051146, -0.0951161616, -0.160643556, -0.245453283, -0.353245922, -0.474265429, -0.598667391, -0.729305101, 0.389322873, 1.38694264, 1.37486731, -0.403963644, -0.77444593, -0.638730244, -0.502999283, -0.387339921, -0.279971294, -0.198381814, -0.135822721, -0.0965383286, -0.0633365644, -0.0427549534, -0.0257581657, -0.0100005, 0.0, 0.0, 0.0, 0.0, -0.0237543896, -0.0522032466, -0.0858749627, -0.140703979, -0.208515621, -0.290149335, -0.368567087, 0.334201602, 2.33307288, 2.27286258, 2.23777229, 0.0412218057, -0.494890333, -0.422342015, -0.339048837, -0.257069088, -0.185534152, -0.136577185, -0.0860242391, -0.0578259874, -0.033636416, -0.0181122384, -0.0100005, 0.0, 0.0, 0.0, 0.0, 0.0, -0.0136274661, -0.0285803164, -0.0474793553, -0.0779785591, -0.118532172, -0.167201555, -0.214787719, 2.22171299, 4.30500754, 4.03125111, 3.36505818, 0.379953648, -0.284269948, -0.247694588, -0.205869945, -0.155925102, -0.116435448, -0.0857647974, -0.0546508166, -0.0401800073, -0.023758997, -0.0165780693, -0.0100005, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.0115748833, -0.0284271584, -0.0506655656, -0.0740332846, -0.100455604, -0.124744578, 4.17363552, 7.81243004, 5.7896979, 0.322149281, -0.181506609, -0.160333393, -0.139182079, -0.118875455, -0.0873316648, -0.0700227708, -0.0540690537, -0.0384297037, -0.0265616274, -0.0161844507, -0.0119683967, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.0132918601, -0.0159980455, -0.0207236291, -0.0266997366, -0.0284703819, -0.0343035092, -0.0410336906, -0.0488886427, -0.0548357917, -0.0551988782, -0.0469971082, -0.0388769026, -0.0316010302, -0.0285226846, -0.021736589, -0.0100005, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'model': 'fedml-qa-customer-0219/endpoint_test1'}, 'key_token': '5d427244128c45f58a74f3ecdb09b1e0'}}
diff --git a/python/spotlight_prj/unitedllm/README.md b/python/spotlight_prj/unitedllm/README.md
index 5d5972cef1..5a300f2cfc 100644
--- a/python/spotlight_prj/unitedllm/README.md
+++ b/python/spotlight_prj/unitedllm/README.md
@@ -4,7 +4,7 @@
 
 # UnitedLLM: Training and Serving LLM Collaboratively on Decentralized GPU Clouds
 
-[FEDML® UnitedLLM](https://blog.fedml.ai/releasing-fedllm-build-your-own-large-language-models-on-proprietary-data-using-the-fedml-platform/)
+[TensorOpera® UnitedLLM](https://blog.fedml.ai/releasing-fedllm-build-your-own-large-language-models-on-proprietary-data-using-the-fedml-platform/)
 is an MLOps-supported training pipeline for decentralized pretraining and finetuning of large language models.
 
 ## Getting Started

From 4acb0f051705b0479d38faedbf03ab37b854accc Mon Sep 17 00:00:00 2001
From: charlieyl <charlie@tensoropera.com>
Date: Fri, 20 Dec 2024 18:36:53 +0800
Subject: [PATCH 249/282] undo "Welcome to FedML.ai!"

---
 python/fedml/api/modules/device.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/python/fedml/api/modules/device.py b/python/fedml/api/modules/device.py
index 1b578aa903..4ca5695523 100644
--- a/python/fedml/api/modules/device.py
+++ b/python/fedml/api/modules/device.py
@@ -78,7 +78,7 @@ def _bind(
     else:
         docker_install_url = "https://docs.docker.com/engine/install/"
         docker_config_text = " Moreover, you need to config the docker engine to run as a non-root user. Here is the docs. https://docs.docker.com/engine/install/linux-postinstall/"
-    print("\n Welcome toTensorOpera.ai! \n Start to login the current device to the TensorOpera® Nexus AI Platform\n")
+    print("\n Welcome to FedML.ai! \n Start to login the current device to the TensorOpera® Nexus AI Platform\n")
     print(" If you want to deploy models into this computer, you need to install the docker engine to serve your models.")
     print(f" Here is the docs for installation docker engine. {docker_install_url}")
     if docker_config_text is not None:

From 07ae5ec68f2848a0a8fb12fc89bea8bc9d8c4023 Mon Sep 17 00:00:00 2001
From: charlieyl <charlie@tensoropera.com>
Date: Wed, 25 Dec 2024 12:08:41 +0800
Subject: [PATCH 250/282] [bugfix]start_job_perf on execute_job_task

---
 .../scheduler/scheduler_core/scheduler_base_job_runner.py       | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py
index f5e08107f2..cf700e9a9d 100755
--- a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py
+++ b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py
@@ -594,7 +594,7 @@ def callback_start_fl_job(self, job_pid):
 
     def start_job_perf(self, job_pid):
         GeneralConstants.save_learning_process(self.run_id, job_pid, data_dir=self.agent_data_dir)
-        #self.mlops_metrics.report_job_perf(self.args, self.agent_config["mqtt_config"], job_pid)
+        self.mlops_metrics.report_job_perf(self.args, self.agent_config["mqtt_config"], job_pid)
 
     def job_error_processor(self, error_list):
         self.check_runner_stop_event()

From 46d766a2ba697bd76dcb68ed16090cc9c4427a9c Mon Sep 17 00:00:00 2001
From: charlieyl <charlie@tensoropera.com>
Date: Sat, 28 Dec 2024 13:09:45 +0800
Subject: [PATCH 251/282] [logs]add deploy param

---
 .../device_model_deployment.py                | 23 +++++++++++++++++++
 .../master_protocol_manager.py                |  5 ++++
 2 files changed, 28 insertions(+)

diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py b/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py
index baee7a2973..647882c84f 100755
--- a/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py
@@ -65,6 +65,10 @@ def start_deployment(end_point_id, end_point_name, model_id, model_version,
         request_json = dict()
     logging.info("[Worker] Model deployment is starting...")
 
+    logging.info("=" * 80)
+    logging.info("[Device Model Deployment] Received start deployment request: {}".format(request_json))
+    logging.info("=" * 80)
+
     # Real gpu per replica (container-level)
     num_gpus = gpu_per_replica
     gpu_ids, gpu_attach_cmd = None, ""
@@ -213,6 +217,25 @@ def start_deployment(end_point_id, end_point_name, model_id, model_version,
             detach=True,
             command=customized_image_entry_cmd,
         )
+
+        logging.info("=" * 80)
+        logging.info("[Device Model Deployment] Creating container with following parameters:")
+        logging.info("=" * 80)
+        logging.info("Image: {}".format(inference_image_name))
+        logging.info("Container name: {}".format(default_server_container_name))
+        logging.info("Volumes:")
+        for vol in volumes:
+            logging.info("  - {}".format(vol))
+        logging.info("Ports: [{}]".format(port_inside_container))
+        logging.info("Environment variables:")
+        for key, value in environment.items():
+            logging.info("  {} = {}".format(key, value))
+        logging.info("Host config:")
+        for key, value in host_config_dict.items():
+            logging.info("  {} = {}".format(key, value))
+        logging.info("Command: {}".format(customized_image_entry_cmd))
+        logging.info("=" * 80)
+
         client.api.start(container=new_container.get("Id"))
     except Exception as e:
         logging.error(f"Failed to create the container with exception {e}, traceback : {traceback.format_exc()}")
diff --git a/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py b/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py
index 9e0d51b588..e06c5162eb 100755
--- a/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py
+++ b/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py
@@ -155,6 +155,11 @@ def callback_start_deployment(self, topic, payload):
 
         # Get deployment params
         request_json = json.loads(payload)
+
+        logging.info("=" * 80)
+        logging.info("[Master Protocol Manager] Received start deployment request: {}".format(request_json))
+        logging.info("=" * 80)
+
         run_id = request_json["end_point_id"]
         end_point_name = request_json["end_point_name"]
         token = request_json["token"]

From 0f1a37ef20426fc3e6378effc19a3ebf93016f52 Mon Sep 17 00:00:00 2001
From: "alex.liang" <alexliang.kh@gmail.com>
Date: Tue, 11 Feb 2025 00:51:42 +0800
Subject: [PATCH 252/282] add the login cli for the service provider named
 chainopera.

---
 python/fedml/cli/modules/login.py | 14 +++++++++++++-
 1 file changed, 13 insertions(+), 1 deletion(-)

diff --git a/python/fedml/cli/modules/login.py b/python/fedml/cli/modules/login.py
index 5e77910cbb..d17b0d0125 100644
--- a/python/fedml/cli/modules/login.py
+++ b/python/fedml/cli/modules/login.py
@@ -103,16 +103,28 @@
     default="",
     help="Name of the node.",
 )
+@click.option(
+    "--service_provider",
+    "-sp",
+    type=str,
+    default="",
+    help="Service provider.",
+)
 def fedml_login(
         api_key, version, compute_node, server, provider, deploy_worker_num,
         local_on_premise_platform, local_on_premise_platform_port,
         master_inference_gateway_port, worker_inference_proxy_port, worker_connection_type, marketplace_type,
-        price_per_hour, name
+        price_per_hour, name, service_provider
 ):
     fedml.set_env_version(version)
     fedml.set_local_on_premise_platform_host(local_on_premise_platform)
     fedml.set_local_on_premise_platform_port(local_on_premise_platform_port)
 
+    if service_provider == "chainopera" or service_provider == "co":
+        fedml.set_env_version('local')
+        fedml.set_local_on_premise_platform_host('open.chainopera.ai')
+        fedml.set_local_on_premise_platform_port(443)
+
     try:
         price_per_hour = float(price_per_hour)
     except ValueError as e:

From a912c5738a90992fd06fd2394e068dcde9139aa0 Mon Sep 17 00:00:00 2001
From: charlieyl <charlie@tensoropera.com>
Date: Tue, 11 Feb 2025 12:53:28 +0800
Subject: [PATCH 253/282] [bugfix] Handle deployment failure by deleting
 deployed replicas and releasing GPU

---
 .../computing/scheduler/model_scheduler/master_job_runner.py  | 4 ++++
 1 file changed, 4 insertions(+)

diff --git a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
index 00b08acfb8..bc943307f2 100755
--- a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
+++ b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
@@ -11,6 +11,7 @@
 import fedml
 from fedml.core.mlops import MLOpsRuntimeLog, MLOpsConfigs
 from fedml.core.mlops.mlops_runtime_log import MLOpsFormatter
+from .device_model_msg_object import FedMLModelMsgObject
 from .device_client_constants import ClientConstants
 from .device_model_cache import FedMLModelCache
 from .device_server_constants import ServerConstants
@@ -278,6 +279,9 @@ def process_deployment_result_message(self, topic=None, payload=None):
                     end_point_id, end_point_name, payload_json["model_name"], "",
                     ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED,
                     message_center=self.message_center)
+                # when report failed to the MLOps, need to delete the replica has successfully deployed and release the gpu
+                model_msg_object = FedMLModelMsgObject(topic, payload)
+                self.send_deployment_delete_request_to_edges(payload, model_msg_object, message_center=self.message_center)
                 return
 
             # Failure handler, send the rollback message to the worker devices only if it has not been rollback

From c6bfe20e84915c8d692961a1c9ad6e26f3fdbb0c Mon Sep 17 00:00:00 2001
From: charlieyl <charlie@tensoropera.com>
Date: Wed, 12 Feb 2025 11:11:47 +0800
Subject: [PATCH 254/282] [refactor] Disable request timeout middleware in
 device model inference

---
 .../model_scheduler/device_model_inference.py | 80 +++++++++----------
 1 file changed, 40 insertions(+), 40 deletions(-)

diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py b/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py
index 9adc17538d..6c37f10322 100755
--- a/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py
@@ -46,46 +46,46 @@ class Settings:
                                    redis_password=Settings.redis_password)
 
 
-@api.middleware("http")
-async def auth_middleware(request: Request, call_next):
-    if "/inference" in request.url.path or "/api/v1/predict" in request.url.path:
-        try:
-            # Attempt to parse the JSON body.
-            request_json = await request.json()
-        except json.JSONDecodeError:
-            return JSONResponse(
-                {"error": True, "message": "Invalid JSON."},
-                status_code=status.HTTP_400_BAD_REQUEST)
-
-        # Get endpoint's total pending requests.
-        end_point_id = request_json.get("end_point_id", None)
-        pending_requests_num = FEDML_MODEL_CACHE.get_pending_requests_counter(end_point_id)
-        if pending_requests_num:
-            # Fetch metrics of the past k=3 requests.
-            pask_k_metrics = FEDML_MODEL_CACHE.get_endpoint_metrics(
-                end_point_id=end_point_id,
-                k_recent=3)
-
-            # Get the request timeout from the endpoint settings.
-            request_timeout_s = FEDML_MODEL_CACHE.get_endpoint_settings(end_point_id) \
-                .get(ServerConstants.INFERENCE_REQUEST_TIMEOUT_KEY, ServerConstants.INFERENCE_REQUEST_TIMEOUT_DEFAULT)
-
-            # Only proceed if the past k metrics collection is not empty.
-            if pask_k_metrics:
-                # Measure the average latency in seconds(!), hence the 0.001 multiplier.
-                past_k_latencies_sec = \
-                    [float(j_obj["current_latency"]) * 0.001 for j_obj in pask_k_metrics]
-                mean_latency = sum(past_k_latencies_sec) / len(past_k_latencies_sec)
-
-                # If timeout threshold is exceeded then cancel and return time out error.
-                should_block = (mean_latency * pending_requests_num) > request_timeout_s
-                if should_block:
-                    return JSONResponse(
-                        {"error": True, "message": "Request timed out."},
-                        status_code=status.HTTP_504_GATEWAY_TIMEOUT)
-
-    response = await call_next(request)
-    return response
+# @api.middleware("http")
+# async def auth_middleware(request: Request, call_next):
+#     if "/inference" in request.url.path or "/api/v1/predict" in request.url.path:
+#         try:
+#             # Attempt to parse the JSON body.
+#             request_json = await request.json()
+#         except json.JSONDecodeError:
+#             return JSONResponse(
+#                 {"error": True, "message": "Invalid JSON."},
+#                 status_code=status.HTTP_400_BAD_REQUEST)
+
+#         # Get endpoint's total pending requests.
+#         end_point_id = request_json.get("end_point_id", None)
+#         pending_requests_num = FEDML_MODEL_CACHE.get_pending_requests_counter(end_point_id)
+#         if pending_requests_num:
+#             # Fetch metrics of the past k=3 requests.
+#             pask_k_metrics = FEDML_MODEL_CACHE.get_endpoint_metrics(
+#                 end_point_id=end_point_id,
+#                 k_recent=3)
+
+#             # Get the request timeout from the endpoint settings.
+#             request_timeout_s = FEDML_MODEL_CACHE.get_endpoint_settings(end_point_id) \
+#                 .get(ServerConstants.INFERENCE_REQUEST_TIMEOUT_KEY, ServerConstants.INFERENCE_REQUEST_TIMEOUT_DEFAULT)
+
+#             # Only proceed if the past k metrics collection is not empty.
+#             if pask_k_metrics:
+#                 # Measure the average latency in seconds(!), hence the 0.001 multiplier.
+#                 past_k_latencies_sec = \
+#                     [float(j_obj["current_latency"]) * 0.001 for j_obj in pask_k_metrics]
+#                 mean_latency = sum(past_k_latencies_sec) / len(past_k_latencies_sec)
+
+#                 # If timeout threshold is exceeded then cancel and return time out error.
+#                 should_block = (mean_latency * pending_requests_num) > request_timeout_s
+#                 if should_block:
+#                     return JSONResponse(
+#                         {"error": True, "message": "Request timed out."},
+#                         status_code=status.HTTP_504_GATEWAY_TIMEOUT)
+
+#     response = await call_next(request)
+#     return response
 
 
 @api.on_event("startup")

From 7134c15c38bb0f06bfa3ddc9d7491dbb3996dcb3 Mon Sep 17 00:00:00 2001
From: charlieyl <charlie@tensoropera.com>
Date: Wed, 12 Feb 2025 12:25:22 +0800
Subject: [PATCH 255/282] add logs

---
 .../device_http_inference_protocol.py                | 12 ++++++++++++
 .../model_scheduler/device_model_inference.py        | 11 +++++++++++
 2 files changed, 23 insertions(+)

diff --git a/python/fedml/computing/scheduler/model_scheduler/device_http_inference_protocol.py b/python/fedml/computing/scheduler/model_scheduler/device_http_inference_protocol.py
index 28d50d5a50..f71caa110c 100755
--- a/python/fedml/computing/scheduler/model_scheduler/device_http_inference_protocol.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_http_inference_protocol.py
@@ -1,4 +1,6 @@
 import logging
+import time
+import uuid
 
 import httpx
 import traceback
@@ -99,12 +101,22 @@ async def stream_generator(inference_url, input_json, method="POST"):
 async def redirect_non_stream_req_to_worker(inference_type, inference_url, model_api_headers, model_inference_json,
                                             timeout=None, method="POST"):
     response_ok = True
+    request_id = str(uuid.uuid4())[:8]  # 生成短UUID作为请求ID
+    start_time = time.time()
+    logging.info(f"[Request-{request_id}] Starting HTTP request to {inference_url}")
+    
     try:
         async with httpx.AsyncClient() as client:
             response = await client.request(
                 method=method, url=inference_url, headers=model_api_headers, json=model_inference_json, timeout=timeout
             )
+            end_time = time.time()
+            elapsed_time = end_time - start_time
+            logging.info(f"[Request-{request_id}] Completed HTTP request. Time taken: {elapsed_time:.3f} seconds")
     except Exception as e:
+        end_time = time.time()
+        elapsed_time = end_time - start_time
+        logging.error(f"[Request-{request_id}] Failed HTTP request after {elapsed_time:.3f} seconds. Error: {str(e)}")
         response_ok = False
         model_inference_result = {"error": e}
         return response_ok, model_inference_result
diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py b/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py
index 6c37f10322..35d47f8dbd 100755
--- a/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py
@@ -4,6 +4,7 @@
 import time
 import traceback
 import os
+import uuid
 
 from typing import Any, Mapping, MutableMapping, Union
 from urllib.parse import urlparse
@@ -198,6 +199,7 @@ async def _predict(
     # Always increase the pending requests counter on a new incoming request.
     FEDML_MODEL_CACHE.update_pending_requests_counter(end_point_id, increase=True)
     inference_response = {}
+    request_uuid = str(uuid.uuid4())  # Generate unique request ID
 
     try:
         in_end_point_id = end_point_id
@@ -260,6 +262,10 @@ async def _predict(
                 output_list = input_json.get("outputs", [])
 
                 # main execution of redirecting the inference request to the idle device
+                inference_start_time = time.time()
+                start_time_str = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(inference_start_time))
+                logging.info(f"[Request {request_uuid}] Starting send_inference_request at {start_time_str}")
+                
                 inference_response = await send_inference_request(
                     idle_device,
                     end_point_id,
@@ -269,6 +275,11 @@ async def _predict(
                     inference_type=in_return_type,
                     connectivity_type=connectivity_type,
                     path=path, request_method=request_method)
+                
+                inference_end_time = time.time()
+                end_time_str = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(inference_end_time))
+                inference_duration = inference_end_time - inference_start_time
+                logging.info(f"[Request {request_uuid}] Completed send_inference_request at {end_time_str}, duration: {inference_duration:.3f} seconds")
 
             # Calculate model metrics
             try:

From f7552c49aa6da10a3cf3148a7982745b9e3f6090 Mon Sep 17 00:00:00 2001
From: charlieyl <charlie@tensoropera.com>
Date: Wed, 12 Feb 2025 13:57:39 +0800
Subject: [PATCH 256/282] [logs] Always enable log file

---
 python/fedml/core/mlops/mlops_runtime_log.py | 25 ++++++++++----------
 1 file changed, 13 insertions(+), 12 deletions(-)

diff --git a/python/fedml/core/mlops/mlops_runtime_log.py b/python/fedml/core/mlops/mlops_runtime_log.py
index 0fc5db3d23..258f413448 100644
--- a/python/fedml/core/mlops/mlops_runtime_log.py
+++ b/python/fedml/core/mlops/mlops_runtime_log.py
@@ -139,10 +139,11 @@ def __init__(self, args):
         self.stdout_handle = None
         self.logger = None
         self.args = args
-        if hasattr(args, "using_mlops"):
-            self.should_write_log_file = args.using_mlops
-        else:
-            self.should_write_log_file = False
+        # if hasattr(args, "using_mlops"):
+        #     self.should_write_log_file = args.using_mlops
+        # else:
+        #     self.should_write_log_file = False
+        self.should_write_log_file = True
         if not hasattr(args, "log_file_dir"):
             setattr(args, "log_file_dir", "./logs")
         self.log_file_dir = args.log_file_dir
@@ -175,14 +176,14 @@ def init_logs(self, log_level=None):
         self.logger.setLevel(log_level)
         self.logger.handlers.clear()
         self.logger.addHandler(self.stdout_handle)
-        if hasattr(self, "should_write_log_file") and self.should_write_log_file:
-            run_id, edge_id = self.args.run_id, MLOpsLoggingUtils.get_edge_id_from_args(self.args)
-            log_config_file = os.path.join(self.log_file_dir, MLOpsLoggingUtils.LOG_CONFIG_FILE)
-            file_handle = MLOpsFileHandler(filepath=log_file_path, log_config_file=log_config_file, run_id=run_id,
-                                           edge_id=edge_id)
-            file_handle.setFormatter(self.format_str)
-            file_handle.setLevel(logging.INFO)
-            self.logger.addHandler(file_handle)
+        # if hasattr(self, "should_write_log_file") and self.should_write_log_file:
+        run_id, edge_id = self.args.run_id, MLOpsLoggingUtils.get_edge_id_from_args(self.args)
+        log_config_file = os.path.join(self.log_file_dir, MLOpsLoggingUtils.LOG_CONFIG_FILE)
+        file_handle = MLOpsFileHandler(filepath=log_file_path, log_config_file=log_config_file, run_id=run_id,
+                                        edge_id=edge_id)
+        file_handle.setFormatter(self.format_str)
+        file_handle.setLevel(logging.INFO)
+        self.logger.addHandler(file_handle)
         logging.root = self.logger
         # Rewrite sys.stdout to redirect stdout (i.e print()) to Logger
         sys.stdout.write = self.logger.info

From d3b447a1e0abfa728e413335e8a5decec8c255ba Mon Sep 17 00:00:00 2001
From: charlieyl <charlie@tensoropera.com>
Date: Wed, 12 Feb 2025 14:16:10 +0800
Subject: [PATCH 257/282] [refactor] Optimize HTTP inference client and log
 file handling

---
 .../device_http_inference_protocol.py         | 25 +++++++++++++------
 python/fedml/core/mlops/mlops_runtime_log.py  | 25 +++++++++----------
 2 files changed, 29 insertions(+), 21 deletions(-)

diff --git a/python/fedml/computing/scheduler/model_scheduler/device_http_inference_protocol.py b/python/fedml/computing/scheduler/model_scheduler/device_http_inference_protocol.py
index f71caa110c..ef98b085b0 100755
--- a/python/fedml/computing/scheduler/model_scheduler/device_http_inference_protocol.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_http_inference_protocol.py
@@ -14,6 +14,15 @@
 
 
 class FedMLHttpInference:
+    _http_client = None  # Class variable for shared HTTP client
+
+    @classmethod
+    async def get_http_client(cls):
+        if cls._http_client is None:
+            limits = httpx.Limits(max_keepalive_connections=100, max_connections=100)
+            cls._http_client = httpx.AsyncClient(limits=limits)
+        return cls._http_client
+
     def __init__(self):
         pass
 
@@ -101,18 +110,18 @@ async def stream_generator(inference_url, input_json, method="POST"):
 async def redirect_non_stream_req_to_worker(inference_type, inference_url, model_api_headers, model_inference_json,
                                             timeout=None, method="POST"):
     response_ok = True
-    request_id = str(uuid.uuid4())[:8]  # 生成短UUID作为请求ID
+    request_id = str(uuid.uuid4())[:8]
     start_time = time.time()
     logging.info(f"[Request-{request_id}] Starting HTTP request to {inference_url}")
     
     try:
-        async with httpx.AsyncClient() as client:
-            response = await client.request(
-                method=method, url=inference_url, headers=model_api_headers, json=model_inference_json, timeout=timeout
-            )
-            end_time = time.time()
-            elapsed_time = end_time - start_time
-            logging.info(f"[Request-{request_id}] Completed HTTP request. Time taken: {elapsed_time:.3f} seconds")
+        client = await FedMLHttpInference.get_http_client()
+        response = await client.request(
+            method=method, url=inference_url, headers=model_api_headers, json=model_inference_json, timeout=timeout
+        )
+        end_time = time.time()
+        elapsed_time = end_time - start_time
+        logging.info(f"[Request-{request_id}] Completed HTTP request. Time taken: {elapsed_time:.3f} seconds")
     except Exception as e:
         end_time = time.time()
         elapsed_time = end_time - start_time
diff --git a/python/fedml/core/mlops/mlops_runtime_log.py b/python/fedml/core/mlops/mlops_runtime_log.py
index 258f413448..0fc5db3d23 100644
--- a/python/fedml/core/mlops/mlops_runtime_log.py
+++ b/python/fedml/core/mlops/mlops_runtime_log.py
@@ -139,11 +139,10 @@ def __init__(self, args):
         self.stdout_handle = None
         self.logger = None
         self.args = args
-        # if hasattr(args, "using_mlops"):
-        #     self.should_write_log_file = args.using_mlops
-        # else:
-        #     self.should_write_log_file = False
-        self.should_write_log_file = True
+        if hasattr(args, "using_mlops"):
+            self.should_write_log_file = args.using_mlops
+        else:
+            self.should_write_log_file = False
         if not hasattr(args, "log_file_dir"):
             setattr(args, "log_file_dir", "./logs")
         self.log_file_dir = args.log_file_dir
@@ -176,14 +175,14 @@ def init_logs(self, log_level=None):
         self.logger.setLevel(log_level)
         self.logger.handlers.clear()
         self.logger.addHandler(self.stdout_handle)
-        # if hasattr(self, "should_write_log_file") and self.should_write_log_file:
-        run_id, edge_id = self.args.run_id, MLOpsLoggingUtils.get_edge_id_from_args(self.args)
-        log_config_file = os.path.join(self.log_file_dir, MLOpsLoggingUtils.LOG_CONFIG_FILE)
-        file_handle = MLOpsFileHandler(filepath=log_file_path, log_config_file=log_config_file, run_id=run_id,
-                                        edge_id=edge_id)
-        file_handle.setFormatter(self.format_str)
-        file_handle.setLevel(logging.INFO)
-        self.logger.addHandler(file_handle)
+        if hasattr(self, "should_write_log_file") and self.should_write_log_file:
+            run_id, edge_id = self.args.run_id, MLOpsLoggingUtils.get_edge_id_from_args(self.args)
+            log_config_file = os.path.join(self.log_file_dir, MLOpsLoggingUtils.LOG_CONFIG_FILE)
+            file_handle = MLOpsFileHandler(filepath=log_file_path, log_config_file=log_config_file, run_id=run_id,
+                                           edge_id=edge_id)
+            file_handle.setFormatter(self.format_str)
+            file_handle.setLevel(logging.INFO)
+            self.logger.addHandler(file_handle)
         logging.root = self.logger
         # Rewrite sys.stdout to redirect stdout (i.e print()) to Logger
         sys.stdout.write = self.logger.info

From 591011120ed6dda2935dd6915e69566bcc59503a Mon Sep 17 00:00:00 2001
From: charlieyl <charlie@tensoropera.com>
Date: Wed, 12 Feb 2025 14:45:47 +0800
Subject: [PATCH 258/282] [perf] Optimize Uvicorn server configuration for
 improved inference gateway performance

---
 .../device_http_inference_protocol.py         |  2 +-
 .../model_scheduler/master_job_runner.py      | 23 +++++++++++--------
 2 files changed, 15 insertions(+), 10 deletions(-)

diff --git a/python/fedml/computing/scheduler/model_scheduler/device_http_inference_protocol.py b/python/fedml/computing/scheduler/model_scheduler/device_http_inference_protocol.py
index ef98b085b0..7f248ee281 100755
--- a/python/fedml/computing/scheduler/model_scheduler/device_http_inference_protocol.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_http_inference_protocol.py
@@ -19,7 +19,7 @@ class FedMLHttpInference:
     @classmethod
     async def get_http_client(cls):
         if cls._http_client is None:
-            limits = httpx.Limits(max_keepalive_connections=100, max_connections=100)
+            limits = httpx.Limits(max_keepalive_connections=50, max_connections=1000)
             cls._http_client = httpx.AsyncClient(limits=limits)
         return cls._http_client
 
diff --git a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
index bc943307f2..c61e1d4374 100755
--- a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
+++ b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
@@ -452,15 +452,20 @@ def start_device_inference_gateway():
             if inference_gateway_pids is None or len(inference_gateway_pids) <= 0:
                 cur_dir = os.path.dirname(__file__)
                 fedml_base_dir = os.path.dirname(os.path.dirname(os.path.dirname(cur_dir)))
-                inference_gateway_process = ServerConstants.exec_console_with_script(f"{python_program} "
-                                                                                     f"-m uvicorn {inference_gw_cmd} "
-                                                                                     f"--host 0.0.0.0 "
-                                                                                     f"--port {str(inference_port)} "
-                                                                                     f"--reload --reload-delay 3 "
-                                                                                     f"--reload-dir {fedml_base_dir} "
-                                                                                     f"--log-level info",
-                                                                                     should_capture_stdout=False,
-                                                                                     should_capture_stderr=False)
+                inference_gateway_process = ServerConstants.exec_console_with_script(
+                    f"{python_program} -m uvicorn {inference_gw_cmd} "
+                    f"--host 0.0.0.0 "
+                    f"--port {str(inference_port)} "
+                    f"--workers 10 "
+                    f"--loop uvloop "
+                    f"--http httptools "
+                    f"--limit-concurrency 1000 "
+                    f"--backlog 2048 "
+                    f"--timeout-keep-alive 75 "
+                    f"--log-level warning ",
+                    should_capture_stdout=False,
+                    should_capture_stderr=False
+                )
                 return inference_gateway_process
             else:
                 return inference_gateway_pids[0]

From a7280376a69f631a63909844f0dd7157e4cd3c59 Mon Sep 17 00:00:00 2001
From: charlieyl <charlie@tensoropera.com>
Date: Wed, 12 Feb 2025 16:18:10 +0800
Subject: [PATCH 259/282] [perf] Disable uvloop and httptools in model
 inference gateway

---
 .../scheduler/model_scheduler/master_job_runner.py       | 9 +++++----
 1 file changed, 5 insertions(+), 4 deletions(-)

diff --git a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
index c61e1d4374..4fb585756c 100755
--- a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
+++ b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
@@ -446,19 +446,20 @@ def start_device_inference_gateway():
         python_program = get_python_program()
         inference_port = ServerConstants.get_inference_master_gateway_port()
         if not ServerConstants.is_running_on_k8s():
-            logging.info(f"start the model inference gateway...")
             inference_gw_cmd = "fedml.computing.scheduler.model_scheduler.device_model_inference:api"
             inference_gateway_pids = RunProcessUtils.get_pid_from_cmd_line(inference_gw_cmd)
             if inference_gateway_pids is None or len(inference_gateway_pids) <= 0:
                 cur_dir = os.path.dirname(__file__)
                 fedml_base_dir = os.path.dirname(os.path.dirname(os.path.dirname(cur_dir)))
+                workers = 10
+                logging.info(f"start the model inference gateway workers[{workers}] no uvloop/httptools...")
                 inference_gateway_process = ServerConstants.exec_console_with_script(
                     f"{python_program} -m uvicorn {inference_gw_cmd} "
                     f"--host 0.0.0.0 "
                     f"--port {str(inference_port)} "
-                    f"--workers 10 "
-                    f"--loop uvloop "
-                    f"--http httptools "
+                    f"--workers {workers} "
+                    # f"--loop uvloop "
+                    # f"--http httptools "
                     f"--limit-concurrency 1000 "
                     f"--backlog 2048 "
                     f"--timeout-keep-alive 75 "

From 3e1ae10b0785f2cb5bd9f054f37064ad53badb0d Mon Sep 17 00:00:00 2001
From: charlieyl <charlie@tensoropera.com>
Date: Wed, 12 Feb 2025 16:35:40 +0800
Subject: [PATCH 260/282] [perf] Reduce model inference gateway workers from 10
 to 2

---
 .../computing/scheduler/model_scheduler/master_job_runner.py    | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
index 4fb585756c..bfc3b5d354 100755
--- a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
+++ b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
@@ -451,7 +451,7 @@ def start_device_inference_gateway():
             if inference_gateway_pids is None or len(inference_gateway_pids) <= 0:
                 cur_dir = os.path.dirname(__file__)
                 fedml_base_dir = os.path.dirname(os.path.dirname(os.path.dirname(cur_dir)))
-                workers = 10
+                workers = 2
                 logging.info(f"start the model inference gateway workers[{workers}] no uvloop/httptools...")
                 inference_gateway_process = ServerConstants.exec_console_with_script(
                     f"{python_program} -m uvicorn {inference_gw_cmd} "

From d6d67e82e80b6c6ed20f96a6e6eff392df37f504 Mon Sep 17 00:00:00 2001
From: charlieyl <charlie@tensoropera.com>
Date: Wed, 12 Feb 2025 16:55:20 +0800
Subject: [PATCH 261/282] [perf] Optimize HTTP inference client and Uvicorn
 server configuration

---
 .../device_http_inference_protocol.py         | 25 ++++++++++++-------
 .../model_scheduler/master_job_runner.py      |  6 ++---
 2 files changed, 19 insertions(+), 12 deletions(-)

diff --git a/python/fedml/computing/scheduler/model_scheduler/device_http_inference_protocol.py b/python/fedml/computing/scheduler/model_scheduler/device_http_inference_protocol.py
index 7f248ee281..4f4c19aaee 100755
--- a/python/fedml/computing/scheduler/model_scheduler/device_http_inference_protocol.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_http_inference_protocol.py
@@ -19,7 +19,11 @@ class FedMLHttpInference:
     @classmethod
     async def get_http_client(cls):
         if cls._http_client is None:
-            limits = httpx.Limits(max_keepalive_connections=50, max_connections=1000)
+            limits = httpx.Limits(
+                max_keepalive_connections=100,
+                max_connections=1000,
+                keepalive_expiry=60
+            )
             cls._http_client = httpx.AsyncClient(limits=limits)
         return cls._http_client
 
@@ -39,8 +43,9 @@ async def is_inference_ready(inference_url, path="ready", timeout=None):
 
         # TODO (Raphael): Support more methods and return codes rules.
         try:
-            async with httpx.AsyncClient() as client:
-                ready_response = await client.get(url=ready_url, timeout=timeout)
+            # async with httpx.AsyncClient() as client:
+            client = await FedMLHttpInference.get_http_client()
+            ready_response = await client.get(url=ready_url, timeout=timeout)
 
             if isinstance(ready_response, (Response, StreamingResponse)):
                 error_code = ready_response.status_code
@@ -99,12 +104,13 @@ async def run_http_inference_with_curl_request(
 
 
 async def stream_generator(inference_url, input_json, method="POST"):
-    async with httpx.AsyncClient() as client:
-        async with client.stream(method, inference_url, json=input_json,
-                                 timeout=ClientConstants.WORKER_STREAM_API_TIMEOUT) as response:
-            async for chunk in response.aiter_lines():
-                # we consumed a newline, need to put it back
-                yield f"{chunk}\n"
+    # async with httpx.AsyncClient() as client:
+    client = await FedMLHttpInference.get_http_client()
+    async with client.stream(method, inference_url, json=input_json,
+                                timeout=ClientConstants.WORKER_STREAM_API_TIMEOUT) as response:
+        async for chunk in response.aiter_lines():
+            # we consumed a newline, need to put it back
+            yield f"{chunk}\n"
 
 
 async def redirect_non_stream_req_to_worker(inference_type, inference_url, model_api_headers, model_inference_json,
@@ -115,6 +121,7 @@ async def redirect_non_stream_req_to_worker(inference_type, inference_url, model
     logging.info(f"[Request-{request_id}] Starting HTTP request to {inference_url}")
     
     try:
+         # async with httpx.AsyncClient() as client:
         client = await FedMLHttpInference.get_http_client()
         response = await client.request(
             method=method, url=inference_url, headers=model_api_headers, json=model_inference_json, timeout=timeout
diff --git a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
index bfc3b5d354..aa4abeef81 100755
--- a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
+++ b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
@@ -451,7 +451,7 @@ def start_device_inference_gateway():
             if inference_gateway_pids is None or len(inference_gateway_pids) <= 0:
                 cur_dir = os.path.dirname(__file__)
                 fedml_base_dir = os.path.dirname(os.path.dirname(os.path.dirname(cur_dir)))
-                workers = 2
+                workers = 4
                 logging.info(f"start the model inference gateway workers[{workers}] no uvloop/httptools...")
                 inference_gateway_process = ServerConstants.exec_console_with_script(
                     f"{python_program} -m uvicorn {inference_gw_cmd} "
@@ -460,9 +460,9 @@ def start_device_inference_gateway():
                     f"--workers {workers} "
                     # f"--loop uvloop "
                     # f"--http httptools "
-                    f"--limit-concurrency 1000 "
+                    f"--limit-concurrency 1024 "
                     f"--backlog 2048 "
-                    f"--timeout-keep-alive 75 "
+                    f"--timeout-keep-alive 60 "
                     f"--log-level warning ",
                     should_capture_stdout=False,
                     should_capture_stderr=False

From 3bc06666a51b911fc0f3412f3f141a9047976bc1 Mon Sep 17 00:00:00 2001
From: charlieyl <charlie@tensoropera.com>
Date: Thu, 13 Feb 2025 12:36:06 +0800
Subject: [PATCH 262/282] [perf] Remove verbose logging in model inference
 request handling

---
 .../device_http_inference_protocol.py         | 18 +++++++++---------
 .../model_scheduler/device_model_inference.py | 19 +++++++++----------
 2 files changed, 18 insertions(+), 19 deletions(-)

diff --git a/python/fedml/computing/scheduler/model_scheduler/device_http_inference_protocol.py b/python/fedml/computing/scheduler/model_scheduler/device_http_inference_protocol.py
index 4f4c19aaee..00f18a78e4 100755
--- a/python/fedml/computing/scheduler/model_scheduler/device_http_inference_protocol.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_http_inference_protocol.py
@@ -116,9 +116,9 @@ async def stream_generator(inference_url, input_json, method="POST"):
 async def redirect_non_stream_req_to_worker(inference_type, inference_url, model_api_headers, model_inference_json,
                                             timeout=None, method="POST"):
     response_ok = True
-    request_id = str(uuid.uuid4())[:8]
-    start_time = time.time()
-    logging.info(f"[Request-{request_id}] Starting HTTP request to {inference_url}")
+    # request_id = str(uuid.uuid4())[:8]
+    # start_time = time.time()
+    # logging.info(f"[Request-{request_id}] Starting HTTP request to {inference_url}")
     
     try:
          # async with httpx.AsyncClient() as client:
@@ -126,13 +126,13 @@ async def redirect_non_stream_req_to_worker(inference_type, inference_url, model
         response = await client.request(
             method=method, url=inference_url, headers=model_api_headers, json=model_inference_json, timeout=timeout
         )
-        end_time = time.time()
-        elapsed_time = end_time - start_time
-        logging.info(f"[Request-{request_id}] Completed HTTP request. Time taken: {elapsed_time:.3f} seconds")
+        # end_time = time.time()
+        # elapsed_time = end_time - start_time
+        # logging.info(f"[Request-{request_id}] Completed HTTP request. Time taken: {elapsed_time:.3f} seconds")
     except Exception as e:
-        end_time = time.time()
-        elapsed_time = end_time - start_time
-        logging.error(f"[Request-{request_id}] Failed HTTP request after {elapsed_time:.3f} seconds. Error: {str(e)}")
+        # end_time = time.time()
+        # elapsed_time = end_time - start_time
+        # logging.error(f"[Request-{request_id}] Failed HTTP request after {elapsed_time:.3f} seconds. Error: {str(e)}")
         response_ok = False
         model_inference_result = {"error": e}
         return response_ok, model_inference_result
diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py b/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py
index 35d47f8dbd..feabbf321b 100755
--- a/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py
@@ -199,8 +199,6 @@ async def _predict(
     # Always increase the pending requests counter on a new incoming request.
     FEDML_MODEL_CACHE.update_pending_requests_counter(end_point_id, increase=True)
     inference_response = {}
-    request_uuid = str(uuid.uuid4())  # Generate unique request ID
-
     try:
         in_end_point_id = end_point_id
         in_end_point_name = input_json.get("end_point_name", None)
@@ -261,11 +259,12 @@ async def _predict(
                 input_list["stream"] = input_list.get("stream", stream_flag)
                 output_list = input_json.get("outputs", [])
 
-                # main execution of redirecting the inference request to the idle device
-                inference_start_time = time.time()
-                start_time_str = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(inference_start_time))
-                logging.info(f"[Request {request_uuid}] Starting send_inference_request at {start_time_str}")
+                # request_uuid = str(uuid.uuid4())  # Generate unique request ID
+                # inference_start_time = time.time()
+                # start_time_str = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(inference_start_time))
+                # logging.info(f"[Request {request_uuid}] Starting send_inference_request at {start_time_str}")
                 
+                # main execution of redirecting the inference request to the idle device
                 inference_response = await send_inference_request(
                     idle_device,
                     end_point_id,
@@ -276,10 +275,10 @@ async def _predict(
                     connectivity_type=connectivity_type,
                     path=path, request_method=request_method)
                 
-                inference_end_time = time.time()
-                end_time_str = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(inference_end_time))
-                inference_duration = inference_end_time - inference_start_time
-                logging.info(f"[Request {request_uuid}] Completed send_inference_request at {end_time_str}, duration: {inference_duration:.3f} seconds")
+                # inference_end_time = time.time()
+                # end_time_str = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(inference_end_time))
+                # inference_duration = inference_end_time - inference_start_time
+                # logging.info(f"[Request {request_uuid}] Completed send_inference_request at {end_time_str}, duration: {inference_duration:.3f} seconds")
 
             # Calculate model metrics
             try:

From ad22de4e398cffe7ef22fec428d73e0a77f59f9b Mon Sep 17 00:00:00 2001
From: charlieyl <charlie@tensoropera.com>
Date: Thu, 13 Feb 2025 14:56:07 +0800
Subject: [PATCH 263/282] [bugfix-combination] Add model configuration details
 for deployment failure handling

---
 .../device_model_msg_object.py                | 39 ++++++++++---------
 .../model_scheduler/master_job_runner.py      | 12 +++++-
 .../model_scheduler/worker_job_runner.py      | 25 ++++++------
 .../worker_protocol_manager.py                |  2 +-
 python/setup.py                               |  2 +-
 5 files changed, 46 insertions(+), 34 deletions(-)

diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_msg_object.py b/python/fedml/computing/scheduler/model_scheduler/device_model_msg_object.py
index 6ec05f64ed..5d2ac8319b 100755
--- a/python/fedml/computing/scheduler/model_scheduler/device_model_msg_object.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_model_msg_object.py
@@ -43,31 +43,34 @@ def __init__(self, topic, payload):
             request_json = json.loads(payload)
         self.msg_topic = topic
         self.request_json = request_json
-        self.run_id = request_json["end_point_id"]
-        self.end_point_name = request_json["end_point_name"]
-        self.token = request_json["token"]
-        self.user_id = request_json["user_id"]
-        self.user_name = request_json["user_name"]
-        self.device_ids = request_json["device_ids"]
-        self.device_objs = request_json["device_objs"]
+        self.run_id = request_json.get("end_point_id")
+        self.end_point_name = request_json.get("end_point_name", "")
+        self.token = request_json.get("token", "")
+        self.user_id = request_json.get("user_id")
+        self.user_name = request_json.get("user_name", "")
+        self.device_ids = request_json.get("device_ids", [])
+        self.device_objs = request_json.get("device_objs", [])
 
-        self.model_config = request_json["model_config"]
-        self.model_name = self.model_config["model_name"]
-        self.model_id = self.model_config["model_id"]
-        self.model_version = self.model_config["model_version"]
-        self.model_storage_url = self.model_config["model_storage_url"]
-        self.scale_min = self.model_config.get("instance_scale_min", 0)
-        self.scale_max = self.model_config.get("instance_scale_max", 0)
-        self.inference_engine = self.model_config.get("inference_engine", 0)
-        self.inference_end_point_id = self.run_id
+        # check if model_config is in request_json and is not None
+        self.scale_min = 1
+        self.max_unavailable_rate = 0.1
+        if "model_config" in request_json and request_json["model_config"] is not None:
+            self.model_config = request_json["model_config"]
+            self.model_name = self.model_config["model_name"]
+            self.model_id = self.model_config["model_id"]
+            self.model_version = self.model_config["model_version"]
+            self.model_storage_url = self.model_config["model_storage_url"]
+            self.scale_min = self.model_config.get("instance_scale_min", 0)
+            self.scale_max = self.model_config.get("instance_scale_max", 0)
+            self.inference_engine = self.model_config.get("inference_engine", 0)
+            self.max_unavailable_rate = self.model_config.get("max_unavailable_rate", 0.1)
 
+        self.inference_end_point_id = self.run_id
         self.request_json["run_id"] = self.run_id
 
         self.gpu_topology = self.get_devices_avail_gpus()
         self.gpu_per_replica = self.get_gpu_per_replica()
 
-        self.max_unavailable_rate = self.model_config.get("max_unavailable_rate", 0.1)
-
     def get_devices_avail_gpus(self):
         """
         {
diff --git a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
index aa4abeef81..d6829719ad 100755
--- a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
+++ b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py
@@ -279,9 +279,17 @@ def process_deployment_result_message(self, topic=None, payload=None):
                     end_point_id, end_point_name, payload_json["model_name"], "",
                     ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED,
                     message_center=self.message_center)
+                
                 # when report failed to the MLOps, need to delete the replica has successfully deployed and release the gpu
-                model_msg_object = FedMLModelMsgObject(topic, payload)
-                self.send_deployment_delete_request_to_edges(payload, model_msg_object, message_center=self.message_center)
+                model_config = dict()
+                model_config["model_name"] = payload_json["model_name"]
+                model_config["model_id"] = payload_json["model_id"]
+                model_config["model_version"] = payload_json["model_version"]
+                # add model_config to the payload for the delete request
+                payload_json["model_config"] = model_config
+                payload_for_del_deploy = json.dumps(payload_json)
+                model_msg_object = FedMLModelMsgObject(topic, payload_for_del_deploy)
+                self.send_deployment_delete_request_to_edges(payload_for_del_deploy, model_msg_object, message_center=self.message_center)
                 return
 
             # Failure handler, send the rollback message to the worker devices only if it has not been rollback
diff --git a/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py b/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py
index c73630fb65..42c53b549a 100755
--- a/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py
+++ b/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py
@@ -261,8 +261,8 @@ def run_impl(self, run_extend_queue_list, sender_message_center,
 
                     # Send failed result back to master
                     _ = self.send_deployment_results(
-                        end_point_name, self.edge_id, ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED,
-                        model_id, model_name, inference_output_url, inference_model_version, inference_port,
+                        end_point_name, self.edge_id, device_ids, ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED,
+                        model_id, model_name, inference_output_url, model_version, inference_port,
                         inference_engine, model_metadata, model_config)
 
                     self.status_reporter.run_id = self.run_id
@@ -272,7 +272,7 @@ def run_impl(self, run_extend_queue_list, sender_message_center,
                     # Send failed successful result back to master
                     logging.info("Finished deployment, continue to send results to master...")
                     result_payload = self.send_deployment_results(
-                        end_point_name, self.edge_id, ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED,
+                        end_point_name, self.edge_id, device_ids, ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED,
                         model_id, model_name, inference_output_url, model_version, inference_port_external,
                         inference_engine, model_metadata, model_config, replica_no=rank + 1,
                         connectivity=connectivity
@@ -283,7 +283,7 @@ def run_impl(self, run_extend_queue_list, sender_message_center,
                         logging.info("inference_port_external {} != inference_port {}".format(
                             inference_port_external, inference_port))
                         result_payload = self.construct_deployment_results(
-                            end_point_name, self.edge_id, ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED,
+                            end_point_name, self.edge_id, device_ids, ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED,
                             model_id, model_name, inference_output_url, model_version, inference_port,
                             inference_engine, model_metadata, model_config, replica_no=rank + 1,
                             connectivity=connectivity
@@ -317,7 +317,7 @@ def run_impl(self, run_extend_queue_list, sender_message_center,
 
                 # Report the deletion msg to master
                 result_payload = self.send_deployment_results(
-                    end_point_name, self.edge_id, ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DELETED,
+                    end_point_name, self.edge_id, device_ids, ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DELETED,
                     model_id, model_name, inference_output_url, model_version, inference_port_external,
                     inference_engine, model_metadata, model_config, replica_no=rank_to_delete + 1)
 
@@ -395,8 +395,8 @@ def run_impl(self, run_extend_queue_list, sender_message_center,
                             run_id, self.edge_id, replica_occupied_gpu_ids)
 
                     self.send_deployment_results(
-                        end_point_name, self.edge_id, ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED,
-                        model_id, model_name, inference_output_url, inference_model_version, inference_port,
+                        end_point_name, self.edge_id, device_ids, ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED,
+                        model_id, model_name, inference_output_url, model_version, inference_port,
                         inference_engine, model_metadata, model_config)
 
                     self.status_reporter.run_id = self.run_id
@@ -407,7 +407,7 @@ def run_impl(self, run_extend_queue_list, sender_message_center,
                 else:
                     logging.info("Finished deployment, continue to send results to master...")
                     result_payload = self.send_deployment_results(
-                        end_point_name, self.edge_id, ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED,
+                        end_point_name, self.edge_id, device_ids, ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED,
                         model_id, model_name, inference_output_url, model_version, inference_port_external,
                         inference_engine, model_metadata, model_config, replica_no=rank + 1,
                         connectivity=connectivity
@@ -417,7 +417,7 @@ def run_impl(self, run_extend_queue_list, sender_message_center,
                         logging.info("inference_port_external {} != inference_port {}".format(
                             inference_port_external, inference_port))
                         result_payload = self.construct_deployment_results(
-                            end_point_name, self.edge_id, ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED,
+                            end_point_name, self.edge_id, device_ids, ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED,
                             model_id, model_name, inference_output_url, model_version, inference_port,
                             inference_engine, model_metadata, model_config, replica_no=rank + 1,
                             connectivity=connectivity
@@ -441,12 +441,13 @@ def run_impl(self, run_extend_queue_list, sender_message_center,
             logging.error(f"Unsupported op {op} with op num {op_num}")
             return False
 
-    def construct_deployment_results(self, end_point_name, device_id, model_status,
+    def construct_deployment_results(self, end_point_name, device_id, device_ids, model_status,
                                      model_id, model_name, model_inference_url,
                                      model_version, inference_port, inference_engine,
                                      model_metadata, model_config, replica_no=1,
                                      connectivity=ClientConstants.WORKER_CONNECTIVITY_TYPE_DEFAULT):
         deployment_results_payload = {"end_point_id": self.run_id, "end_point_name": end_point_name,
+                                      "device_ids": device_ids,
                                       "model_id": model_id, "model_name": model_name,
                                       "model_url": model_inference_url, "model_version": model_version,
                                       "port": inference_port,
@@ -460,7 +461,7 @@ def construct_deployment_results(self, end_point_name, device_id, model_status,
                                       }
         return deployment_results_payload
 
-    def send_deployment_results(self, end_point_name, device_id, model_status,
+    def send_deployment_results(self, end_point_name, device_id, device_ids, model_status,
                                 model_id, model_name, model_inference_url,
                                 model_version, inference_port, inference_engine,
                                 model_metadata, model_config, replica_no=1,
@@ -469,7 +470,7 @@ def send_deployment_results(self, end_point_name, device_id, model_status,
             self.run_id, device_id)
 
         deployment_results_payload = self.construct_deployment_results(
-            end_point_name, device_id, model_status,
+            end_point_name, device_id, device_ids, model_status,
             model_id, model_name, model_inference_url,
             model_version, inference_port, inference_engine,
             model_metadata, model_config, replica_no=replica_no, connectivity=connectivity)
diff --git a/python/fedml/computing/scheduler/model_scheduler/worker_protocol_manager.py b/python/fedml/computing/scheduler/model_scheduler/worker_protocol_manager.py
index b1d0bebc47..7f7b041b77 100755
--- a/python/fedml/computing/scheduler/model_scheduler/worker_protocol_manager.py
+++ b/python/fedml/computing/scheduler/model_scheduler/worker_protocol_manager.py
@@ -170,7 +170,7 @@ def callback_start_deployment(self, topic, payload):
             ClientConstants.save_run_process(run_id, process.pid)
 
     def callback_delete_deployment(self, topic, payload):
-        logging.info("[Worker] callback_delete_deployment")
+        logging.info("[Worker] callback_delete_deployment, topic: {}, payload: {}".format(topic, payload))
 
         # Parse payload as the model message object.
         model_msg_object = FedMLModelMsgObject(topic, payload)
diff --git a/python/setup.py b/python/setup.py
index 032bdb4eed..d09bed3c3f 100644
--- a/python/setup.py
+++ b/python/setup.py
@@ -126,7 +126,7 @@ def finalize_options(self):
 
 setup(
     name="fedml",
-    version="0.9.2",
+    version="0.9.6-dev",
     author="FedML Team",
     author_email="ch@fedml.ai",
     description="A research and production integrated edge-cloud library for "

From 2528f4f082a69333ee66582e69492cf0ed0ba335 Mon Sep 17 00:00:00 2001
From: charlieyl <charlie@tensoropera.com>
Date: Thu, 13 Feb 2025 17:02:30 +0800
Subject: [PATCH 264/282] [bugfix] Update default model configuration
 parameters for safer deployment

---
 .../scheduler/model_scheduler/device_model_msg_object.py  | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_msg_object.py b/python/fedml/computing/scheduler/model_scheduler/device_model_msg_object.py
index 5d2ac8319b..6a21c880b0 100755
--- a/python/fedml/computing/scheduler/model_scheduler/device_model_msg_object.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_model_msg_object.py
@@ -59,10 +59,10 @@ def __init__(self, topic, payload):
             self.model_name = self.model_config["model_name"]
             self.model_id = self.model_config["model_id"]
             self.model_version = self.model_config["model_version"]
-            self.model_storage_url = self.model_config["model_storage_url"]
-            self.scale_min = self.model_config.get("instance_scale_min", 0)
-            self.scale_max = self.model_config.get("instance_scale_max", 0)
-            self.inference_engine = self.model_config.get("inference_engine", 0)
+            self.model_storage_url = self.model_config.get("model_storage_url", "")
+            self.scale_min = self.model_config.get("instance_scale_min", 1)
+            self.scale_max = self.model_config.get("instance_scale_max", 1)
+            self.inference_engine = self.model_config.get("inference_engine")
             self.max_unavailable_rate = self.model_config.get("max_unavailable_rate", 0.1)
 
         self.inference_end_point_id = self.run_id

From 60be0f71ad3cec4bdcc690b9cd091ce8db19ab13 Mon Sep 17 00:00:00 2001
From: charlieyl <charlie@tensoropera.com>
Date: Fri, 14 Feb 2025 17:47:22 +0800
Subject: [PATCH 265/282] [feature] Add endpoint_name parameter to model
 deployment method

---
 .../computing/scheduler/model_scheduler/device_model_cards.py  | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_cards.py b/python/fedml/computing/scheduler/model_scheduler/device_model_cards.py
index 8697d0a62c..1600b58bd2 100755
--- a/python/fedml/computing/scheduler/model_scheduler/device_model_cards.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_model_cards.py
@@ -59,7 +59,8 @@ def serve_model_on_premise(self, model_name, endpoint_name, master_device_ids,
 
         if use_remote:
             if not self.deploy_model(model_name, device_type, target_devices, "", user_api_key,
-                                     additional_params_dict, use_local_deployment, endpoint_id=endpoint_id):
+                                     additional_params_dict, use_local_deployment, endpoint_name=endpoint_name, 
+                                     endpoint_id=endpoint_id):
                 print("Failed to deploy model")
                 return False
             return True

From 7853c9064d8c8a45ceee04a50d8d1b57c7d7f690 Mon Sep 17 00:00:00 2001
From: charlieyl <charlie@tensoropera.com>
Date: Mon, 17 Feb 2025 16:58:41 +0800
Subject: [PATCH 266/282] [bugfix] Restore full GPU card selection parameters
 in NvidiaGPUtil

---
 .../computing/scheduler/comm_utils/gpu_utils/nvidia_utils.py   | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)

diff --git a/python/fedml/computing/scheduler/comm_utils/gpu_utils/nvidia_utils.py b/python/fedml/computing/scheduler/comm_utils/gpu_utils/nvidia_utils.py
index a6717de8cb..34d0c3be1c 100644
--- a/python/fedml/computing/scheduler/comm_utils/gpu_utils/nvidia_utils.py
+++ b/python/fedml/computing/scheduler/comm_utils/gpu_utils/nvidia_utils.py
@@ -25,8 +25,7 @@ def get_gpu_cards() -> List[GPUCard]:
 
     @staticmethod
     def get_available_gpu_card_ids(order: str, limit: int, max_load: float, max_memory: float) -> List[int]:
-        # return GPUtil.getAvailable(order=order, limit=limit, maxLoad=max_load, maxMemory=max_memory)
-        return GPUtil.getAvailable(order='random', limit=limit)
+        return GPUtil.getAvailable(order=order, limit=limit, maxLoad=max_load, maxMemory=max_memory)
 
     @staticmethod
     def get_docker_gpu_device_mapping(gpu_ids: List[int], num_gpus: int = 0) -> Optional[Dict]:

From 068ed5141d7fff84b3271a40a0fb4672f3bb86ba Mon Sep 17 00:00:00 2001
From: charlieyl <charlie@tensoropera.com>
Date: Mon, 17 Feb 2025 18:26:42 +0800
Subject: [PATCH 267/282] [perf] Reduce job metrics reporting sleep interval to
 15 seconds and disable MQTT debug logging

---
 python/fedml/core/mlops/mlops_job_perfs.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/python/fedml/core/mlops/mlops_job_perfs.py b/python/fedml/core/mlops/mlops_job_perfs.py
index fe3d921558..fe205323ec 100644
--- a/python/fedml/core/mlops/mlops_job_perfs.py
+++ b/python/fedml/core/mlops/mlops_job_perfs.py
@@ -175,7 +175,7 @@ def report_job_stats_entry(self, sys_event):
                     logging.debug("exception when reporting job pref: {}.".format(traceback.format_exc()))
                     pass
 
-            time.sleep(10)
+            time.sleep(15)
 
         logging.info("Job metrics process is about to exit.")
         mqtt_mgr.loop_stop()

From 8ac783d7e1f552769f2c7b7c5e24cd67d9c23df9 Mon Sep 17 00:00:00 2001
From: charlieyl <charlie@tensoropera.com>
Date: Tue, 18 Feb 2025 10:40:21 +0800
Subject: [PATCH 268/282] [chore] Bump version to 0.9.6-dev202502181030

---
 python/fedml/__init__.py | 2 +-
 python/setup.py          | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/python/fedml/__init__.py b/python/fedml/__init__.py
index 677d06b4e5..2ddeac361c 100644
--- a/python/fedml/__init__.py
+++ b/python/fedml/__init__.py
@@ -37,7 +37,7 @@
 _global_training_type = None
 _global_comm_backend = None
 
-__version__ = "0.9.2"
+__version__ = "0.9.6-dev202502181030"
 
 
 # This is the deployment environment used for different roles (RD/PM/BD/Public Developers). Potential VALUE: local, dev, test, release
diff --git a/python/setup.py b/python/setup.py
index d09bed3c3f..31d2483237 100644
--- a/python/setup.py
+++ b/python/setup.py
@@ -126,7 +126,7 @@ def finalize_options(self):
 
 setup(
     name="fedml",
-    version="0.9.6-dev",
+    version="0.9.6-dev202502181030",
     author="FedML Team",
     author_email="ch@fedml.ai",
     description="A research and production integrated edge-cloud library for "

From b11558b3cd09150d4ae60422afa4f2b1b2732eb9 Mon Sep 17 00:00:00 2001
From: charlieyl <charlie@tensoropera.com>
Date: Tue, 18 Feb 2025 11:12:43 +0800
Subject: [PATCH 269/282] [feature] Enhance container log retrieval for exited
 containers

---
 .../device_model_deployment.py                | 30 +++++++++++++++----
 1 file changed, 24 insertions(+), 6 deletions(-)

diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py b/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py
index 647882c84f..7fab64d9b9 100755
--- a/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py
@@ -342,10 +342,15 @@ def log_deployment_output(end_point_id, model_id, cmd_container_name, cmd_type,
             if container_obj is not None:
                 out_logs, err_logs = None, None
                 try:
-                    out_logs = container_obj.logs(stdout=True, stderr=False, stream=False, follow=False,
-                                                  since=last_log_time)
-                    err_logs = container_obj.logs(stdout=False, stderr=True, stream=False, follow=False,
-                                                  since=last_log_time)
+                    if container_obj.status == "exited":
+                        # If the container has exited, we need to get the whole logs from the container
+                        out_logs = container_obj.logs(stdout=True, stderr=False, stream=False, follow=False)
+                        err_logs = container_obj.logs(stdout=False, stderr=True, stream=False, follow=False)
+                    else:
+                        out_logs = container_obj.logs(stdout=True, stderr=False, stream=False, follow=False,
+                                                      since=last_log_time)
+                        err_logs = container_obj.logs(stdout=False, stderr=True, stream=False, follow=False,
+                                                      since=last_log_time)
                 except Exception as e:
                     logging.error(f"Failed to get the logs from the container with exception {e}")
                     pass
@@ -355,16 +360,29 @@ def log_deployment_output(end_point_id, model_id, cmd_container_name, cmd_type,
                 if err_logs is not None:
                     err_logs = sys_utils.decode_our_err_result(err_logs)
                     if len(err_logs) > 0:
-                        logging.error(f"{format(err_logs)}")
+                        logging.error(f"[-- Container Error Logs Start --]\n{format(err_logs)}\n[-- Container Error Logs End --]")
 
                 if out_logs is not None:
                     out_logs = sys_utils.decode_our_err_result(out_logs)
                     if len(out_logs) > 0:
-                        logging.info(f"{format(out_logs)}")
+                        logging.info(f"[-- Container Stdout Logs Start --]\n{format(out_logs)}\n[-- Container Stdout Logs End --]")
 
                 if container_obj.status == "exited":
                     logging.info("Container {} has exited, automatically remove it".format(cmd_container_name))
 
+                    # try to get the logs from the filesystem
+                    if out_logs is None or err_logs is None:
+                        try:
+                            logs_path = f"/var/lib/docker/containers/{container_obj.id}/{container_obj.id}-json.log"
+                            if os.path.exists(logs_path):
+                                with open(logs_path, 'r') as f:
+                                    raw_logs = f.readlines()
+                                    out_logs = '\n'.join([line for line in raw_logs if '"stream":"stdout"' in line])
+                                    err_logs = '\n'.join([line for line in raw_logs if '"stream":"stderr"' in line])
+                                logging.error(f"read Container Error Logs from log file: {err_logs}")
+                        except Exception as e:
+                            logging.warning(f"Failed to read logs from filesystem: {str(e)}")
+
                     # Save the failed log into ~/.fedml/fedml-model-client/fedml/logs/failed_logs/
                     # $run_id/$container_name.log
                     try:

From f376824cca0c3a31ab02f011834b2e3856078ac0 Mon Sep 17 00:00:00 2001
From: charlieyl <charlie@tensoropera.com>
Date: Wed, 19 Feb 2025 11:19:43 +0800
Subject: [PATCH 270/282] [bugfix] Improve lock handling in MLOps logging
 utilities

---
 python/fedml/core/mlops/mlops_runtime_log.py |  8 +++-
 python/fedml/core/mlops/mlops_utils.py       | 45 +++++++++++++++++---
 2 files changed, 44 insertions(+), 9 deletions(-)

diff --git a/python/fedml/core/mlops/mlops_runtime_log.py b/python/fedml/core/mlops/mlops_runtime_log.py
index 0fc5db3d23..6b8f5fb9d2 100644
--- a/python/fedml/core/mlops/mlops_runtime_log.py
+++ b/python/fedml/core/mlops/mlops_runtime_log.py
@@ -60,8 +60,11 @@ def update_config_and_rotate(self, source, dest):
         MLOpsLoggingUtils.release_lock()
 
     def __initialize_config(self):
+        lock_acquired = False
         try:
-            MLOpsLoggingUtils.acquire_lock()
+            lock_acquired = MLOpsLoggingUtils.acquire_lock(block=True)
+            if not lock_acquired:
+                raise RuntimeError("Failed to acquire lock")
             config_data = MLOpsLoggingUtils.load_log_config(run_id=self.run_id, device_id=self.edge_id,
                                                             log_config_file=self.log_config_file)
             if not config_data:
@@ -72,7 +75,8 @@ def __initialize_config(self):
         except Exception as e:
             raise ValueError("Error initializing log config: {}".format(e))
         finally:
-            MLOpsLoggingUtils.release_lock()
+            if lock_acquired:
+                MLOpsLoggingUtils.release_lock()
 
 
 class MLOpsFormatter(logging.Formatter):
diff --git a/python/fedml/core/mlops/mlops_utils.py b/python/fedml/core/mlops/mlops_utils.py
index 8bde9e4299..f18a88e21f 100644
--- a/python/fedml/core/mlops/mlops_utils.py
+++ b/python/fedml/core/mlops/mlops_utils.py
@@ -1,4 +1,5 @@
 import json
+import logging
 import multiprocessing
 import os
 import time
@@ -75,13 +76,37 @@ class MLOpsLoggingUtils:
 
     @staticmethod
     def acquire_lock(block=True):
-        return MLOpsLoggingUtils._lock.acquire(block)
+        logging.info("acquire_lock start, block: {}".format(block))
+        lock_acquired = MLOpsLoggingUtils._lock.acquire(block)
+        logging.info("acquire_lock end, lock_acquired: {}".format(lock_acquired))
+        return lock_acquired
 
     @staticmethod
     def release_lock():
-        # Purposefully acquire lock with non-blocking call to make it idempotent
-        MLOpsLoggingUtils._lock.acquire(block=False)
-        MLOpsLoggingUtils._lock.release()
+        # # Purposefully acquire lock with non-blocking call to make it idempotent
+        # MLOpsLoggingUtils._lock.acquire(block=False)
+        # MLOpsLoggingUtils._lock.release()
+
+        # modify by charlie
+        # release_lock method may have incorrect implementation:
+        # -> The acquire(block=False) in release_lock may incorrectly acquire and release the lock, especially in a multi-threaded environment.
+        # -> If the current thread already holds the lock, acquire(block=False) will fail (return False) in multiprocessing.Lock,
+        # because the lock is not re-entrant, and cross-thread use may lead to undefined behavior.
+        # -> Therefore, the acquire call in release_lock may fail, causing subsequent release() to throw an exception,
+        # or incorrectly release the lock held by other threads.
+
+        # modify by charlie
+        # acquire the lock and release it in old lock implementation 
+        # perhaps cause the lock is released in the wrong place
+        # so we need to release the lock directly
+        try:
+            logging.info("release_lock start")
+            MLOpsLoggingUtils._lock.release()
+            logging.info("release_lock end")
+        except ValueError as e:
+            # The lock is not acquired, ignore it
+            logging.warning("release_lock error: {}".format(e))
+            pass
 
     @staticmethod
     def build_log_file_path_with_run_params(
@@ -185,9 +210,15 @@ def save_log_config(run_id, device_id, log_config_file, config_data):
     @staticmethod
     def load_yaml_config(log_config_file):
         """Helper function to load a yaml config file"""
-        if MLOpsLoggingUtils._lock.acquire(block=False):
-            MLOpsLoggingUtils._lock.release()
-            raise ValueError("Able to acquire lock. This means lock was not acquired by the caller")
+        
+        # modify by charlie
+        # the lock is acquired in the caller, so the check is not necessary, 
+        # it should be removed to avoid potential exceptions and logical conflicts.
+        
+        # if MLOpsLoggingUtils._lock.acquire(block=False):
+        #     MLOpsLoggingUtils._lock.release()
+        #     raise ValueError("Able to acquire lock. This means lock was not acquired by the caller")
+        
         if not os.path.exists(log_config_file):
             MLOpsLoggingUtils.generate_yaml_doc({}, log_config_file)
         with open(log_config_file, "r") as stream:

From cd976a58bd29d94c44030d6890b501e781452745 Mon Sep 17 00:00:00 2001
From: charlieyl <charlie@tensoropera.com>
Date: Wed, 19 Feb 2025 12:37:49 +0800
Subject: [PATCH 271/282] [refactor] Simplify lock handling in MLOps logging
 utilities

---
 python/fedml/core/mlops/mlops_utils.py | 32 ++++++++++++++------------
 1 file changed, 17 insertions(+), 15 deletions(-)

diff --git a/python/fedml/core/mlops/mlops_utils.py b/python/fedml/core/mlops/mlops_utils.py
index f18a88e21f..f56e10b52c 100644
--- a/python/fedml/core/mlops/mlops_utils.py
+++ b/python/fedml/core/mlops/mlops_utils.py
@@ -76,16 +76,18 @@ class MLOpsLoggingUtils:
 
     @staticmethod
     def acquire_lock(block=True):
-        logging.info("acquire_lock start, block: {}".format(block))
-        lock_acquired = MLOpsLoggingUtils._lock.acquire(block)
-        logging.info("acquire_lock end, lock_acquired: {}".format(lock_acquired))
-        return lock_acquired
+        return MLOpsLoggingUtils._lock.acquire(block)
+    
+        # logging.info("acquire_lock start, block: {}".format(block))
+        # lock_acquired = MLOpsLoggingUtils._lock.acquire(block)
+        # logging.info("acquire_lock end, lock_acquired: {}".format(lock_acquired))
+        # return lock_acquired
 
     @staticmethod
     def release_lock():
-        # # Purposefully acquire lock with non-blocking call to make it idempotent
-        # MLOpsLoggingUtils._lock.acquire(block=False)
-        # MLOpsLoggingUtils._lock.release()
+        # Purposefully acquire lock with non-blocking call to make it idempotent
+        MLOpsLoggingUtils._lock.acquire(block=False)
+        MLOpsLoggingUtils._lock.release()
 
         # modify by charlie
         # release_lock method may have incorrect implementation:
@@ -99,14 +101,14 @@ def release_lock():
         # acquire the lock and release it in old lock implementation 
         # perhaps cause the lock is released in the wrong place
         # so we need to release the lock directly
-        try:
-            logging.info("release_lock start")
-            MLOpsLoggingUtils._lock.release()
-            logging.info("release_lock end")
-        except ValueError as e:
-            # The lock is not acquired, ignore it
-            logging.warning("release_lock error: {}".format(e))
-            pass
+        # try:
+        #     logging.info("release_lock start")
+        #     MLOpsLoggingUtils._lock.release()
+        #     logging.info("release_lock end")
+        # except ValueError as e:
+        #     # The lock is not acquired, ignore it
+        #     logging.warning("release_lock error: {}".format(e))
+        #     pass
 
     @staticmethod
     def build_log_file_path_with_run_params(

From add749a2cb7f257b3cf20e5e9abc70d569039345 Mon Sep 17 00:00:00 2001
From: charlieyl <charlie@tensoropera.com>
Date: Wed, 19 Feb 2025 15:48:29 +0800
Subject: [PATCH 272/282] [feature] Add robust database operation error
 handling decorator

---
 .../model_scheduler/device_model_db.py        | 69 ++++++++++++++++++-
 1 file changed, 68 insertions(+), 1 deletion(-)

diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_db.py b/python/fedml/computing/scheduler/model_scheduler/device_model_db.py
index 606d8c010b..5be1b55ae5 100755
--- a/python/fedml/computing/scheduler/model_scheduler/device_model_db.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_model_db.py
@@ -11,6 +11,8 @@
 from fedml.core.common.singleton import Singleton
 from sqlalchemy.sql import text
 from typing import List, Dict
+import functools
+from sqlalchemy import exc
 
 Base = declarative_base()
 
@@ -25,7 +27,57 @@ def __init__(self):
             self.db_engine = None
         if not hasattr(self, "db_base_dir"):
             self.db_base_dir = None
-
+    
+    @staticmethod
+    def db_operation(func):
+        """decorator: handle the database operation exceptions"""
+        @functools.wraps(func)
+        def wrapper(self, *args, **kwargs):
+            try:
+                # open the database connection
+                self.open_job_db()
+                # execute the function
+                return func(self, *args, **kwargs)
+            except (
+                # session state error
+                exc.InvalidRequestError,     # including "prepared state" error
+                exc.StatementError,          # SQL statement execution error
+                # connection error
+                exc.DBAPIError,             # base class of database API error
+                exc.OperationalError,        # database operation error (e.g. connection failure)
+                exc.DisconnectionError,      # connection disconnected
+                # transaction error
+                exc.InvalidatePoolError,     # connection pool invalid
+                exc.TimeoutError,            # connection timeout
+                exc.ResourceClosedError,     # resource (e.g. cursor) closed
+                # concurrent error
+                exc.PendingRollbackError,    # pending rollback transaction
+                exc.IntegrityError          # integrity constraint violation
+            ) as e:
+                logging.error(f"Database error in {func.__name__}, rebuilding session: {e}")
+                # rollback any unfinished transactions
+                if self.db_connection:
+                    try:
+                        self.db_connection.rollback()
+                    except:
+                        pass
+                    try:
+                        self.db_connection.close()
+                    except:
+                        pass
+                    # set the db connection to None, then open again in open_job_db method
+                    self.db_connection = None
+                # retry open the database connection
+                self.open_job_db()
+                # retry execute the function
+                return func(self, *args, **kwargs)
+            except Exception as e:
+                # other unexpected errors, record logs and raise
+                logging.error(f"Unexpected error in {func.__name__}: {e}")
+                self.db_connection = None
+                raise
+        return wrapper
+    
     @staticmethod
     def get_instance():
         return FedMLModelDatabase()
@@ -134,6 +186,7 @@ def get_deployment_status_with_device_id(self, end_point_id, end_point_name, mod
 
         return None
 
+    @db_operation
     def delete_deployment_status(self, end_point_id, end_point_name, model_name, model_version=None):
         self.open_job_db()
         if model_version is None:
@@ -149,6 +202,7 @@ def delete_deployment_status(self, end_point_id, end_point_name, model_name, mod
                      FedMLDeploymentResultInfoModel.model_version == f'{model_version}')).delete()
         self.db_connection.commit()
 
+    @db_operation
     def delete_deployment_result(self, end_point_id, end_point_name, model_name, model_version=None):
         self.open_job_db()
         if model_version is None:
@@ -164,6 +218,7 @@ def delete_deployment_result(self, end_point_id, end_point_name, model_name, mod
                      FedMLDeploymentResultInfoModel.model_version == f'{model_version}')).delete()
         self.db_connection.commit()
     
+    @db_operation
     def delete_deployment_result_with_device_id(self, end_point_id, end_point_name, model_name, device_id):
         self.open_job_db()
         self.db_connection.query(FedMLDeploymentResultInfoModel).filter(
@@ -173,6 +228,7 @@ def delete_deployment_result_with_device_id(self, end_point_id, end_point_name,
                  FedMLDeploymentResultInfoModel.device_id == f'{device_id}')).delete()
         self.db_connection.commit()
 
+    @db_operation
     def delete_deployment_result_with_device_id_and_rank(self, end_point_id, end_point_name, model_name,
                                                          device_id, replica_rank):
         replica_no = replica_rank + 1
@@ -185,6 +241,7 @@ def delete_deployment_result_with_device_id_and_rank(self, end_point_id, end_poi
                  FedMLDeploymentResultInfoModel.replica_no == f'{replica_no}')).delete()
         self.db_connection.commit()
 
+    @db_operation
     def delete_deployment_run_info(self, end_point_id):
         # db / table -> model-deployment.db / "deployment_run_info"
         self.open_job_db()
@@ -343,6 +400,7 @@ def drop_table(self):
         except Exception as e:
             pass
 
+    @db_operation
     def get_deployment_results_info(self, end_point_id, end_point_name, model_name, model_version):
         self.open_job_db()
         if model_version is None:
@@ -358,11 +416,13 @@ def get_deployment_results_info(self, end_point_id, end_point_name, model_name,
                             FedMLDeploymentResultInfoModel.model_version == f'{model_version}')).all()
         return result_info
 
+    @db_operation
     def _get_all_deployment_results_info(self):
         self.open_job_db()
         result_info = self.db_connection.query(FedMLDeploymentResultInfoModel).all()
         return result_info
 
+    @db_operation
     def set_deployment_results_info(self, end_point_id, end_point_name,
                                     model_name, model_version, device_id,
                                     deployment_result=None, deployment_status=None, replica_no=None):
@@ -402,12 +462,14 @@ def set_deployment_results_info(self, end_point_id, end_point_name,
 
         self.db_connection.commit()
 
+    @db_operation
     def get_deployment_run_info(self, end_point_id):
         self.open_job_db()
         run_info = self.db_connection.query(FedMLDeploymentRunInfoModel). \
             filter_by(end_point_id=f'{end_point_id}').first()
         return run_info
 
+    @db_operation
     def set_deployment_run_info(self, end_point_id, end_point_name,
                                 end_point_status=None, device_info=None,
                                 activated=None, token=None):
@@ -435,6 +497,7 @@ def set_deployment_run_info(self, end_point_id, end_point_name,
 
         self.db_connection.commit()
 
+    @db_operation
     def get_deployment_auth_info(self, end_point_id, end_point_name, model_name):
         self.open_job_db()
         run_info = self.db_connection.query(FedMLDeploymentAuthInfoModel). \
@@ -443,6 +506,7 @@ def get_deployment_auth_info(self, end_point_id, end_point_name, model_name):
                         FedMLDeploymentAuthInfoModel.model_name == f'{model_name}')).first()
         return run_info
 
+    @db_operation
     def set_deployment_auth_info(self, end_point_id, end_point_name, model_name, token):
         self.open_job_db()
         auth_info = self.db_connection.query(FedMLDeploymentAuthInfoModel). \
@@ -462,6 +526,7 @@ def set_deployment_auth_info(self, end_point_id, end_point_name, model_name, tok
 
         self.db_connection.commit()
 
+    @db_operation
     def get_latest_end_point_metrics(self, end_point_id, end_point_name, model_name, model_version):
         self.open_job_db()
         endpoint_metric = self.db_connection.query(FedMLEndPointMetricsModel). \
@@ -473,6 +538,7 @@ def get_latest_end_point_metrics(self, end_point_id, end_point_name, model_name,
             return endpoint_metric[-1]
         return None
 
+    @db_operation
     def get_end_point_metrics_by_index(self, end_point_id, end_point_name, model_name, model_version, index):
         self.open_job_db()
         endpoint_metric = self.db_connection.query(FedMLEndPointMetricsModel). \
@@ -483,6 +549,7 @@ def get_end_point_metrics_by_index(self, end_point_id, end_point_name, model_nam
             offset(index).limit(1).first()
         return endpoint_metric
 
+    @db_operation
     def set_end_point_metrics(self, end_point_id, end_point_name,
                               model_name, model_version,
                               total_latency=None, avg_latency=None, current_latency=None,

From 46b8ce7459374dafcfc3095e86bf91eacffb5771 Mon Sep 17 00:00:00 2001
From: charlieyl <charlie@tensoropera.com>
Date: Wed, 19 Feb 2025 16:09:22 +0800
Subject: [PATCH 273/282] [upd] Bump version to 0.9.6-dev202502191600

---
 python/fedml/__init__.py | 2 +-
 python/setup.py          | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/python/fedml/__init__.py b/python/fedml/__init__.py
index 2ddeac361c..6ceaecc368 100644
--- a/python/fedml/__init__.py
+++ b/python/fedml/__init__.py
@@ -37,7 +37,7 @@
 _global_training_type = None
 _global_comm_backend = None
 
-__version__ = "0.9.6-dev202502181030"
+__version__ = "0.9.6-dev202502191600"
 
 
 # This is the deployment environment used for different roles (RD/PM/BD/Public Developers). Potential VALUE: local, dev, test, release
diff --git a/python/setup.py b/python/setup.py
index 31d2483237..7cd9a2591c 100644
--- a/python/setup.py
+++ b/python/setup.py
@@ -126,7 +126,7 @@ def finalize_options(self):
 
 setup(
     name="fedml",
-    version="0.9.6-dev202502181030",
+    version="0.9.6-dev202502191600",
     author="FedML Team",
     author_email="ch@fedml.ai",
     description="A research and production integrated edge-cloud library for "

From 38a930a073bad0d72ae948718193544abb3bb66f Mon Sep 17 00:00:00 2001
From: charlieyl <charlie@tensoropera.com>
Date: Wed, 19 Feb 2025 19:16:21 +0800
Subject: [PATCH 274/282] [debug] Add logging for endpoint replica information
 in job monitor

---
 .../fedml/computing/scheduler/comm_utils/job_monitor.py  | 9 +++++++--
 1 file changed, 7 insertions(+), 2 deletions(-)

diff --git a/python/fedml/computing/scheduler/comm_utils/job_monitor.py b/python/fedml/computing/scheduler/comm_utils/job_monitor.py
index b8237d93ba..966d28e435 100644
--- a/python/fedml/computing/scheduler/comm_utils/job_monitor.py
+++ b/python/fedml/computing/scheduler/comm_utils/job_monitor.py
@@ -207,6 +207,7 @@ def monitor_replicas_number():
         res_to_mlops = {}  # endpoint_id -> num_replica
 
         for endpoint_detail in res_frm_db:
+            logging.info(f"endpoint_detail: {endpoint_detail}")
             endpoint_replicas_details = {}
             if isinstance(endpoint_detail, str):
                 endpoint_replicas_details = json.loads(endpoint_detail)
@@ -218,11 +219,13 @@ def monitor_replicas_number():
                 endpoint_replica_details = {}
                 if isinstance(endpoint_replicas_details["result"], str):
                     endpoint_replica_details = json.loads(endpoint_replicas_details["result"])
-
+                    
+                logging.info(f"endpoint_replica_details: {endpoint_replica_details}")
                 res_to_mlops[endpoint_replica_details["end_point_id"]] = res_to_mlops.get(
                     endpoint_replica_details["end_point_id"], 0) + 1
 
         for endpoint_id, num_replica in res_to_mlops.items():
+            logging.info(f"endpoint_id: {endpoint_id}, num_replica: {num_replica}")
             num_replica_url_path = "fedmlModelServer/api/v1/endpoint/replica-info"
             mlops_prefix = fedml._get_backend_service()
             url = f"{mlops_prefix}/{num_replica_url_path}"
@@ -240,13 +243,15 @@ def monitor_replicas_number():
                 "replicaNumber": int(num_replica),
                 "timestamp": int(time.time() * 1000)
             }
-
+            logging.info(f"req_header: {req_header}")
+            logging.info(f"req_body: {req_body}")
             try:
                 response = requests.post(
                     url,
                     headers=req_header,
                     json=req_body
                 )
+                logging.info(f"endpoint_id: {endpoint_id}, response: {response}")
                 if response.status_code != 200:
                     logging.error(f"Failed to send the replica number request to MLOps platform.")
                 else:

From 69c0d844c554bd57e26590d646fc97dc956490fd Mon Sep 17 00:00:00 2001
From: charlieyl <charlie@tensoropera.com>
Date: Thu, 20 Feb 2025 18:14:41 +0800
Subject: [PATCH 275/282] [debug] Re-enable logging for model monitoring
 metrics

---
 .../computing/scheduler/model_scheduler/device_model_monitor.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_monitor.py b/python/fedml/computing/scheduler/model_scheduler/device_model_monitor.py
index 472cab84aa..5c49011204 100755
--- a/python/fedml/computing/scheduler/model_scheduler/device_model_monitor.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_model_monitor.py
@@ -141,7 +141,7 @@ def send_monitoring_metrics(self, index):
                                          "total_request_num": int(total_request_num),
                                          "timestamp": timestamp,
                                          "edgeId": device_id}
-        # logging.info("send monitor metrics {}".format(json.dumps(deployment_monitoring_payload)))
+        logging.info("send monitor metrics {}".format(json.dumps(deployment_monitoring_payload)))
 
         self.monitor_mqtt_mgr.send_message_json(deployment_monitoring_topic, json.dumps(deployment_monitoring_payload))
         self.monitor_mqtt_mgr.send_message_json(deployment_monitoring_topic_prefix,

From dcfe268f3d111465044a011f461d9c303383015f Mon Sep 17 00:00:00 2001
From: charlieyl <charlie@tensoropera.com>
Date: Thu, 20 Feb 2025 19:59:13 +0800
Subject: [PATCH 276/282] [refactor] Improve lock handling and error management
 in MLOps logging utilities

---
 python/fedml/core/mlops/mlops_runtime_log.py  | 59 +++++++-------
 .../core/mlops/mlops_runtime_log_daemon.py    | 80 +++++++++++++------
 python/fedml/core/mlops/mlops_utils.py        |  8 +-
 3 files changed, 92 insertions(+), 55 deletions(-)

diff --git a/python/fedml/core/mlops/mlops_runtime_log.py b/python/fedml/core/mlops/mlops_runtime_log.py
index 6b8f5fb9d2..4d2d68f50b 100644
--- a/python/fedml/core/mlops/mlops_runtime_log.py
+++ b/python/fedml/core/mlops/mlops_runtime_log.py
@@ -33,38 +33,39 @@ def __init__(self, run_id, edge_id, log_config_file, filepath):
     def update_config_and_rotate(self, source, dest):
         # source = current log file name
         # dest = log file name (dated)
-        MLOpsLoggingUtils.acquire_lock()
-
-        # Check if the source and destination files exist. If it does, return
-        if os.path.exists(source):
-            # Copy the contents of the source file to the destination file
-            shutil.copy(source, dest)
-            # Clear everything in the source file
-            with open(source, 'w') as src_file:
-                src_file.truncate(0)
-            src_file.close()
-
-        config_data = MLOpsLoggingUtils.load_log_config(self.run_id, self.edge_id,
-                                                        self.log_config_file)
-
-        # Update file name of current log file
-        config_data[self.rotate_count].file_path = dest
-        self.rotate_count += 1
-
-        # Store the rotate count, and corresponding log file name in the config file
-        rotated_log_file = LogFile(file_path=source)
-        config_data[self.rotate_count] = rotated_log_file
-        MLOpsLoggingUtils.save_log_config(run_id=self.run_id, device_id=self.edge_id,
-                                          log_config_file=self.log_config_file,
-                                          config_data=config_data)
-        MLOpsLoggingUtils.release_lock()
+        lock_acquired = MLOpsLoggingUtils.acquire_lock()
+        try:
+            # Check if the source and destination files exist. If it does, return
+            if os.path.exists(source):
+                # Copy the contents of the source file to the destination file
+                shutil.copy(source, dest)
+                # Clear everything in the source file
+                with open(source, 'w') as src_file:
+                    src_file.truncate(0)
+                src_file.close()
+
+            config_data = MLOpsLoggingUtils.load_log_config(self.run_id, self.edge_id,
+                                                            self.log_config_file)
+
+            # Update file name of current log file
+            config_data[self.rotate_count].file_path = dest
+            self.rotate_count += 1
+
+            # Store the rotate count, and corresponding log file name in the config file
+            rotated_log_file = LogFile(file_path=source)
+            config_data[self.rotate_count] = rotated_log_file
+            MLOpsLoggingUtils.save_log_config(run_id=self.run_id, device_id=self.edge_id,
+                                            log_config_file=self.log_config_file,
+                                            config_data=config_data)
+        except Exception as e:
+            raise ValueError("Error updating log config: {}".format(e))
+        finally:
+            if lock_acquired:
+                MLOpsLoggingUtils.release_lock()
 
     def __initialize_config(self):
-        lock_acquired = False
+        lock_acquired = MLOpsLoggingUtils.acquire_lock(block=True)
         try:
-            lock_acquired = MLOpsLoggingUtils.acquire_lock(block=True)
-            if not lock_acquired:
-                raise RuntimeError("Failed to acquire lock")
             config_data = MLOpsLoggingUtils.load_log_config(run_id=self.run_id, device_id=self.edge_id,
                                                             log_config_file=self.log_config_file)
             if not config_data:
diff --git a/python/fedml/core/mlops/mlops_runtime_log_daemon.py b/python/fedml/core/mlops/mlops_runtime_log_daemon.py
index ff06dc91b3..9218519659 100644
--- a/python/fedml/core/mlops/mlops_runtime_log_daemon.py
+++ b/python/fedml/core/mlops/mlops_runtime_log_daemon.py
@@ -120,15 +120,19 @@ def log_upload(self, run_id, device_id):
             line_start_req = line_end_req
 
             # Update the uploaded file index
-            MLOpsLoggingUtils.acquire_lock()
-            config_data = MLOpsLoggingUtils.load_log_config(run_id, device_id,
+            lock_acquired = MLOpsLoggingUtils.acquire_lock()
+            try:
+                config_data = MLOpsLoggingUtils.load_log_config(run_id, device_id,
                                                             self.log_config_file)
-
-            config_data[self.file_rotate_count].uploaded_file_index = uploaded_file_index
-            MLOpsLoggingUtils.save_log_config(run_id=run_id, device_id=device_id,
-                                              log_config_file=self.log_config_file,
-                                              config_data=config_data)
-            MLOpsLoggingUtils.release_lock()
+                config_data[self.file_rotate_count].uploaded_file_index = uploaded_file_index
+                MLOpsLoggingUtils.save_log_config(run_id=run_id, device_id=device_id,
+                                                log_config_file=self.log_config_file,
+                                                config_data=config_data)
+            except Exception as e:
+                raise ValueError("Error updating log config: {}".format(e))
+            finally:
+                if lock_acquired:
+                    MLOpsLoggingUtils.release_lock()
 
     @staticmethod
     def __format_log_lines(log_lines: list, line_start_req: int, line_end_req: int):
@@ -296,37 +300,65 @@ def log_process(self, process_event):
         print("Log Process exits normally.")
 
     def fetch_file_path_and_index(self) -> (str, int):
+        lock_acquired = False
         try:
             upload_file_index = None
-            MLOpsLoggingUtils.acquire_lock()
-            config_data = MLOpsLoggingUtils.load_log_config(run_id=self.run_id, device_id=self.device_id,
-                                                            log_config_file=self.log_config_file)
-            MLOpsLoggingUtils.release_lock()
+            # Acquire lock for initial config read
+            lock_acquired = MLOpsLoggingUtils.acquire_lock()
+            try:
+                config_data = MLOpsLoggingUtils.load_log_config(
+                    run_id=self.run_id, 
+                    device_id=self.device_id,
+                    log_config_file=self.log_config_file
+                )
+            finally:
+                if lock_acquired:
+                    MLOpsLoggingUtils.release_lock()
+                    lock_acquired = False
+            
             if config_data is not None:
                 config_len = len(config_data)
                 upload_file_config = config_data.get(self.file_rotate_count, None)
                 if upload_file_config is not None:
-                    file_path, uploaded_file_index = upload_file_config.file_path, upload_file_config.uploaded_file_index
+                    file_path = upload_file_config.file_path
+                    uploaded_file_index = upload_file_config.uploaded_file_index
                     shutil.copyfile(file_path, self.log_file_path)
-                    if MLOpsRuntimeLogProcessor.is_file_rotated(self.log_file_path, uploaded_file_index, config_len,
-                                                                self.file_rotate_count):
-                        MLOpsLoggingUtils.acquire_lock()
-                        config_data = MLOpsLoggingUtils.load_log_config(run_id=self.run_id, device_id=self.device_id,
-                                                                        log_config_file=self.log_config_file)
-                        config_data[self.file_rotate_count].upload_complete = True
-                        MLOpsLoggingUtils.save_log_config(run_id=self.run_id, device_id=self.device_id,
-                                                          log_config_file=self.log_config_file, config_data=config_data)
-                        MLOpsLoggingUtils.release_lock()
+                    
+                    if MLOpsRuntimeLogProcessor.is_file_rotated(
+                        self.log_file_path, uploaded_file_index, config_len, self.file_rotate_count
+                    ):
+                        # Acquire new lock for config update
+                        lock_acquired = MLOpsLoggingUtils.acquire_lock()
+                        try:
+                            config_data = MLOpsLoggingUtils.load_log_config(
+                                run_id=self.run_id,
+                                device_id=self.device_id, 
+                                log_config_file=self.log_config_file
+                            )
+                            config_data[self.file_rotate_count].upload_complete = True
+                            MLOpsLoggingUtils.save_log_config(
+                                run_id=self.run_id,
+                                device_id=self.device_id,
+                                log_config_file=self.log_config_file,
+                                config_data=config_data
+                            )
+                        finally:
+                            if lock_acquired:
+                                MLOpsLoggingUtils.release_lock()
+                                lock_acquired = False
+                            
                         self.file_rotate_count += 1
-                        # Re-fetch file path and index if file is rotated
+                        # Recursive call without holding any locks
                         return self.fetch_file_path_and_index()
                     return uploaded_file_index
 
             return upload_file_index
+        
         except Exception as e:
             raise ValueError(f"Failed to open log file. Exception: {e}")
         finally:
-            MLOpsLoggingUtils.release_lock()
+            if lock_acquired:
+                MLOpsLoggingUtils.release_lock()
 
     @staticmethod
     def is_file_rotated(file_path, uploaded_file_index, config_len, rotate_count):
diff --git a/python/fedml/core/mlops/mlops_utils.py b/python/fedml/core/mlops/mlops_utils.py
index f56e10b52c..5fab10dd09 100644
--- a/python/fedml/core/mlops/mlops_utils.py
+++ b/python/fedml/core/mlops/mlops_utils.py
@@ -203,10 +203,14 @@ def save_log_config(run_id, device_id, log_config_file, config_data):
             log_config_key = "log_config_{}_{}".format(run_id, device_id)
             log_config = MLOpsLoggingUtils.load_yaml_config(log_config_file)
             log_config[log_config_key] = MLOpsLoggingUtils.__convert_to_dict(config_data)
+             # Use with statement to ensure file is properly closed
             with open(log_config_file, "w") as stream:
-                yaml.dump(log_config, stream)
+                # use safe_dump to avoid the problem of the lock
+                yaml.safe_dump(log_config, stream)
         except Exception as e:
-            MLOpsLoggingUtils.release_lock()
+            # modify by charlie
+            # Don't release lock here - let caller handle it
+            # MLOpsLoggingUtils.release_lock()
             raise ValueError("Error saving log config: {}".format(e))
 
     @staticmethod

From a26e1b3b412582c2f95f4d8624675cd55b61b3b9 Mon Sep 17 00:00:00 2001
From: charlieyl <charlie@tensoropera.com>
Date: Thu, 20 Feb 2025 20:00:30 +0800
Subject: [PATCH 277/282] [upd] Bump version to 0.9.6-dev202502202000

---
 python/fedml/__init__.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/python/fedml/__init__.py b/python/fedml/__init__.py
index 6ceaecc368..108ea8ae0b 100644
--- a/python/fedml/__init__.py
+++ b/python/fedml/__init__.py
@@ -37,7 +37,7 @@
 _global_training_type = None
 _global_comm_backend = None
 
-__version__ = "0.9.6-dev202502191600"
+__version__ = "0.9.6-dev202502202000"
 
 
 # This is the deployment environment used for different roles (RD/PM/BD/Public Developers). Potential VALUE: local, dev, test, release

From cdb32e982193d3c14172354dcb046cb99205c37d Mon Sep 17 00:00:00 2001
From: charlieyl <charlie@tensoropera.com>
Date: Thu, 20 Feb 2025 20:01:51 +0800
Subject: [PATCH 278/282] [upd] Bump version to 0.9.6-dev202502202000-2

---
 python/setup.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/python/setup.py b/python/setup.py
index 7cd9a2591c..59be71c5e0 100644
--- a/python/setup.py
+++ b/python/setup.py
@@ -126,7 +126,7 @@ def finalize_options(self):
 
 setup(
     name="fedml",
-    version="0.9.6-dev202502191600",
+    version="0.9.6-dev202502202000",
     author="FedML Team",
     author_email="ch@fedml.ai",
     description="A research and production integrated edge-cloud library for "

From 4dd13dffb9bd189433eefb78ffb18c9a62c56eef Mon Sep 17 00:00:00 2001
From: charlieyl <charlie@tensoropera.com>
Date: Thu, 20 Feb 2025 20:09:19 +0800
Subject: [PATCH 279/282] [debug] Remove verbose logging in job monitor and
 model metrics

---
 python/fedml/computing/scheduler/comm_utils/job_monitor.py      | 2 --
 .../computing/scheduler/model_scheduler/device_model_monitor.py | 2 +-
 2 files changed, 1 insertion(+), 3 deletions(-)

diff --git a/python/fedml/computing/scheduler/comm_utils/job_monitor.py b/python/fedml/computing/scheduler/comm_utils/job_monitor.py
index 966d28e435..a1d0cc25ec 100644
--- a/python/fedml/computing/scheduler/comm_utils/job_monitor.py
+++ b/python/fedml/computing/scheduler/comm_utils/job_monitor.py
@@ -225,7 +225,6 @@ def monitor_replicas_number():
                     endpoint_replica_details["end_point_id"], 0) + 1
 
         for endpoint_id, num_replica in res_to_mlops.items():
-            logging.info(f"endpoint_id: {endpoint_id}, num_replica: {num_replica}")
             num_replica_url_path = "fedmlModelServer/api/v1/endpoint/replica-info"
             mlops_prefix = fedml._get_backend_service()
             url = f"{mlops_prefix}/{num_replica_url_path}"
@@ -243,7 +242,6 @@ def monitor_replicas_number():
                 "replicaNumber": int(num_replica),
                 "timestamp": int(time.time() * 1000)
             }
-            logging.info(f"req_header: {req_header}")
             logging.info(f"req_body: {req_body}")
             try:
                 response = requests.post(
diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_monitor.py b/python/fedml/computing/scheduler/model_scheduler/device_model_monitor.py
index 5c49011204..472cab84aa 100755
--- a/python/fedml/computing/scheduler/model_scheduler/device_model_monitor.py
+++ b/python/fedml/computing/scheduler/model_scheduler/device_model_monitor.py
@@ -141,7 +141,7 @@ def send_monitoring_metrics(self, index):
                                          "total_request_num": int(total_request_num),
                                          "timestamp": timestamp,
                                          "edgeId": device_id}
-        logging.info("send monitor metrics {}".format(json.dumps(deployment_monitoring_payload)))
+        # logging.info("send monitor metrics {}".format(json.dumps(deployment_monitoring_payload)))
 
         self.monitor_mqtt_mgr.send_message_json(deployment_monitoring_topic, json.dumps(deployment_monitoring_payload))
         self.monitor_mqtt_mgr.send_message_json(deployment_monitoring_topic_prefix,

From 9516a2e28258f6e5b2c5a2268c45f9c780c91d0f Mon Sep 17 00:00:00 2001
From: charlieyl <charlie@tensoropera.com>
Date: Fri, 21 Feb 2025 17:51:51 +0800
Subject: [PATCH 280/282] [debug] Reorder logging initialization in deployment
 protocol managers

---
 .../master_protocol_manager.py                | 24 ++++++++++---------
 .../worker_protocol_manager.py                |  6 +++++
 2 files changed, 19 insertions(+), 11 deletions(-)

diff --git a/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py b/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py
index e06c5162eb..d88224b1f2 100755
--- a/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py
+++ b/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py
@@ -156,9 +156,7 @@ def callback_start_deployment(self, topic, payload):
         # Get deployment params
         request_json = json.loads(payload)
 
-        logging.info("=" * 80)
-        logging.info("[Master Protocol Manager] Received start deployment request: {}".format(request_json))
-        logging.info("=" * 80)
+        
 
         run_id = request_json["end_point_id"]
         end_point_name = request_json["end_point_name"]
@@ -183,7 +181,19 @@ def callback_start_deployment(self, topic, payload):
 
         inference_end_point_id = run_id
 
+        # Start log processor for current run
+        self.args.run_id = run_id
+        self.args.edge_id = self.edge_id
+        MLOpsRuntimeLog(args=self.args).init_logs()
+        MLOpsRuntimeLogDaemon.get_instance(self.args).set_log_source(
+            ServerConstants.FEDML_LOG_SOURCE_TYPE_MODEL_END_POINT)
+        MLOpsRuntimeLogDaemon.get_instance(self.args).start_log_processor(run_id, self.edge_id)
+
+        # print the log after the log had initialized
         logging.info("[Master] received start deployment request for end point {}.".format(run_id))
+        logging.info("=" * 80)
+        logging.info("[Master Protocol Manager] Received start deployment request: {}".format(request_json))
+        logging.info("=" * 80)
 
         # Set redis config
         FedMLModelCache.get_instance().set_redis_params(self.redis_addr, self.redis_port, self.redis_password)
@@ -210,14 +220,6 @@ def callback_start_deployment(self, topic, payload):
             timeout_s=timeout_s, user_encrypted_api_key=user_encrypted_api_key
         )
 
-        # Start log processor for current run
-        self.args.run_id = run_id
-        self.args.edge_id = self.edge_id
-        MLOpsRuntimeLog(args=self.args).init_logs()
-        MLOpsRuntimeLogDaemon.get_instance(self.args).set_log_source(
-            ServerConstants.FEDML_LOG_SOURCE_TYPE_MODEL_END_POINT)
-        MLOpsRuntimeLogDaemon.get_instance(self.args).start_log_processor(run_id, self.edge_id)
-
         # Add additional parameters to the request_json
         run_id = inference_end_point_id
         self.args.run_id = run_id
diff --git a/python/fedml/computing/scheduler/model_scheduler/worker_protocol_manager.py b/python/fedml/computing/scheduler/model_scheduler/worker_protocol_manager.py
index 7f7b041b77..0e56e2ce5c 100755
--- a/python/fedml/computing/scheduler/model_scheduler/worker_protocol_manager.py
+++ b/python/fedml/computing/scheduler/model_scheduler/worker_protocol_manager.py
@@ -154,6 +154,12 @@ def callback_start_deployment(self, topic, payload):
             ClientConstants.FEDML_LOG_SOURCE_TYPE_MODEL_END_POINT)
         MLOpsRuntimeLogDaemon.get_instance(self.args).start_log_processor(run_id, self.edge_id)
 
+        # print the log after the log had initialized
+        logging.info("[Worker] received start deployment request for end point {}.".format(run_id))
+        logging.info("=" * 80)
+        logging.info("[Worker Protocol Manager] Received start deployment request: {}".format(request_json))
+        logging.info("=" * 80)
+
         # Start the job runner
         request_json["run_id"] = run_id
         run_id_str = str(run_id)

From dac284e757f83f57742591c0cb82e172b51ca56f Mon Sep 17 00:00:00 2001
From: charlieyl <charlie@tensoropera.com>
Date: Mon, 24 Feb 2025 10:44:16 +0800
Subject: [PATCH 281/282] [debug] Remove verbose logging in job monitor

---
 .../fedml/computing/scheduler/comm_utils/job_monitor.py   | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/python/fedml/computing/scheduler/comm_utils/job_monitor.py b/python/fedml/computing/scheduler/comm_utils/job_monitor.py
index a1d0cc25ec..15e3230055 100644
--- a/python/fedml/computing/scheduler/comm_utils/job_monitor.py
+++ b/python/fedml/computing/scheduler/comm_utils/job_monitor.py
@@ -207,7 +207,7 @@ def monitor_replicas_number():
         res_to_mlops = {}  # endpoint_id -> num_replica
 
         for endpoint_detail in res_frm_db:
-            logging.info(f"endpoint_detail: {endpoint_detail}")
+            # logging.info(f"endpoint_detail: {endpoint_detail}")
             endpoint_replicas_details = {}
             if isinstance(endpoint_detail, str):
                 endpoint_replicas_details = json.loads(endpoint_detail)
@@ -220,7 +220,7 @@ def monitor_replicas_number():
                 if isinstance(endpoint_replicas_details["result"], str):
                     endpoint_replica_details = json.loads(endpoint_replicas_details["result"])
                     
-                logging.info(f"endpoint_replica_details: {endpoint_replica_details}")
+                # logging.info(f"endpoint_replica_details: {endpoint_replica_details}")
                 res_to_mlops[endpoint_replica_details["end_point_id"]] = res_to_mlops.get(
                     endpoint_replica_details["end_point_id"], 0) + 1
 
@@ -242,14 +242,14 @@ def monitor_replicas_number():
                 "replicaNumber": int(num_replica),
                 "timestamp": int(time.time() * 1000)
             }
-            logging.info(f"req_body: {req_body}")
+            # logging.info(f"req_body: {req_body}")
             try:
                 response = requests.post(
                     url,
                     headers=req_header,
                     json=req_body
                 )
-                logging.info(f"endpoint_id: {endpoint_id}, response: {response}")
+                # logging.info(f"endpoint_id: {endpoint_id}, response: {response}")
                 if response.status_code != 200:
                     logging.error(f"Failed to send the replica number request to MLOps platform.")
                 else:

From d5b81503afbe6e287e417e76d5e84db140d7d788 Mon Sep 17 00:00:00 2001
From: charlieyl <charlie@tensoropera.com>
Date: Tue, 25 Feb 2025 10:38:18 +0800
Subject: [PATCH 282/282] Release version 0.9.6

---
 python/fedml/__init__.py | 2 +-
 python/setup.py          | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/python/fedml/__init__.py b/python/fedml/__init__.py
index 108ea8ae0b..c9dda8911e 100644
--- a/python/fedml/__init__.py
+++ b/python/fedml/__init__.py
@@ -37,7 +37,7 @@
 _global_training_type = None
 _global_comm_backend = None
 
-__version__ = "0.9.6-dev202502202000"
+__version__ = "0.9.6"
 
 
 # This is the deployment environment used for different roles (RD/PM/BD/Public Developers). Potential VALUE: local, dev, test, release
diff --git a/python/setup.py b/python/setup.py
index 59be71c5e0..8f5fe25219 100644
--- a/python/setup.py
+++ b/python/setup.py
@@ -126,7 +126,7 @@ def finalize_options(self):
 
 setup(
     name="fedml",
-    version="0.9.6-dev202502202000",
+    version="0.9.6",
     author="FedML Team",
     author_email="ch@fedml.ai",
     description="A research and production integrated edge-cloud library for "