From 127692d0e9296b58c5937adddd31c6f5f695eaf5 Mon Sep 17 00:00:00 2001 From: Liangyx2 Date: Fri, 14 Jun 2024 13:44:52 +0800 Subject: [PATCH 01/21] [NeuralChat] Refine path in AskDoc server (#1593) * Update askdoc.yaml Signed-off-by: Liangyx2 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Signed-off-by: Liangyx2 Co-authored-by: Sun, Xuehao Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .../neural_chat/server/restful/retrieval_api.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/intel_extension_for_transformers/neural_chat/server/restful/retrieval_api.py b/intel_extension_for_transformers/neural_chat/server/restful/retrieval_api.py index 40c2aa8a3e6..d338482f220 100644 --- a/intel_extension_for_transformers/neural_chat/server/restful/retrieval_api.py +++ b/intel_extension_for_transformers/neural_chat/server/restful/retrieval_api.py @@ -234,6 +234,13 @@ def handle_retrieval_request(self, request: RetrievalRequest) -> RetrievalRespon RETRIEVAL_FILE_PATH = os.getenv("RETRIEVAL_FILE_PATH", default="./retrieval_docs")+'/' EXCEPT_PATTERNS = ["/xuhui_doc", "default/persist_dir"] +def safe_join(base_path, *paths): + # Prevent path traversal by ensuring the final path is within the base path + base_path = os.path.abspath(base_path) + final_path = os.path.abspath(os.path.join(base_path, *paths)) + if not final_path.startswith(base_path): + raise ValueError("Attempted Path Traversal Detected") + return final_path @router.post("/v1/askdoc/upload_link") async def retrieval_upload_link(request: Request): @@ -316,7 +323,7 @@ async def retrieval_add_files(request: Request, path_prefix = get_path_prefix(kb_id, user_id) upload_path = path_prefix + '/upload_dir' persist_path = path_prefix + '/persist_dir' - save_path = Path(upload_path) / file_path + save_path = safe_join(Path(upload_path), file_path) save_path.parent.mkdir(parents=True, exist_ok=True) # save file content to local disk @@ -618,7 +625,7 @@ async def delete_single_file(request: Request): logger.info(f"[askdoc - delete_file] successfully delete kb {knowledge_base_id}") return {"status": True} - delete_path = Path(path_prefix) / "upload_dir" / del_path + delete_path = safe_join(Path(path_prefix) / "upload_dir", del_path) logger.info(f'[askdoc - delete_file] delete_path: {delete_path}') # partially delete files/folders from the kb From e6dfe65a6745d4f7ba9fffaa63b0d8b7ab880297 Mon Sep 17 00:00:00 2001 From: "Sun, Xuehao" Date: Tue, 18 Jun 2024 11:19:10 +0800 Subject: [PATCH 02/21] limit setuptools version (#1615) Signed-off-by: Sun, Xuehao --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 0b45ec85d3f..f73ff94bd6b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,3 +1,3 @@ py-cpuinfo -setuptools>=65 +setuptools==69.5.1 setuptools_scm[toml]>=6.2 From 583520579ea46de48c43083d9176ceea4a906275 Mon Sep 17 00:00:00 2001 From: ZePan110 Date: Tue, 18 Jun 2024 15:21:27 +0800 Subject: [PATCH 03/21] Remove permissions. (#1610) Signed-off-by: zepan Co-authored-by: Sun, Xuehao --- .github/checkgroup.yml | 7 ------- .github/workflows/Scaner_BDBA.yaml | 1 - .github/workflows/Scaner_Coverity.yaml | 1 - .github/workflows/Scaner_Trivy.yaml | 1 - .github/workflows/build-container.yaml | 1 - .github/workflows/chatbot-finetune-mpt-7b-chat-hpu.yml | 1 - .github/workflows/chatbot-finetune-mpt-7b-chat.yml | 1 - .../workflows/chatbot-inference-llama-2-7b-chat-hf.yml | 1 - .../chatbot-inference-llama-2-7b_70b-chat-hf-hpu.yml | 1 - .../workflows/chatbot-inference-mpt-7b-chat-hpu.yml | 1 - .github/workflows/chatbot-inference-mpt-7b-chat.yml | 1 - .github/workflows/chatbot-test.yml | 1 - .github/workflows/chatbot_finetuning.yml | 1 - .github/workflows/deploy-test.yml | 1 - .github/workflows/format_scan.yml | 1 - .github/workflows/llm-test.yml | 2 +- .github/workflows/optimize-test.yml | 1 - .github/workflows/publish.yml | 1 - .github/workflows/script/launch_llm.sh | 2 +- .github/workflows/script/models/run_llm.sh | 10 ++++------ .github/workflows/script/prepare_env_with_conda.sh | 9 +-------- .github/workflows/sparse_lib_CI.yml | 1 - .github/workflows/trellix.yaml | 1 - .github/workflows/unit-test-engine.yml | 1 - .github/workflows/unit-test-kernel.yml | 1 - .github/workflows/unit-test-neuralchat.yml | 1 - .github/workflows/unit-test-neuralspeed.yml | 1 - .github/workflows/unit-test-optimize.yml | 1 - .github/workflows/windows-test.yml | 1 - 29 files changed, 7 insertions(+), 47 deletions(-) diff --git a/.github/checkgroup.yml b/.github/checkgroup.yml index aadf36180b1..4ac0920c729 100644 --- a/.github/checkgroup.yml +++ b/.github/checkgroup.yml @@ -30,13 +30,6 @@ subprojects: - "optimize-unit-test-PR-test" - "Genreate-OptimizeUT-Report" - - id: "Neural Speed Unit Test workflow" - paths: - - .github/workflows/unit-test-neuralspeed.yml - - ".github/workflows/script/unitTest/run_unit_test_neuraspeed.sh" - checks: - - "neural-speed-unit-test" - - id: "NeuralChat Unit Test" paths: - ".github/workflows/unit-test-neuralchat.yml" diff --git a/.github/workflows/Scaner_BDBA.yaml b/.github/workflows/Scaner_BDBA.yaml index 1a84a54bf81..53fce282267 100644 --- a/.github/workflows/Scaner_BDBA.yaml +++ b/.github/workflows/Scaner_BDBA.yaml @@ -3,7 +3,6 @@ name: Scanner BDBA on: workflow_dispatch: -permissions: write-all jobs: bdba_job: name: BDBA Scan diff --git a/.github/workflows/Scaner_Coverity.yaml b/.github/workflows/Scaner_Coverity.yaml index 8a89ebbc808..a2ee1363a75 100644 --- a/.github/workflows/Scaner_Coverity.yaml +++ b/.github/workflows/Scaner_Coverity.yaml @@ -3,7 +3,6 @@ name: Scanner Coverity PYTHON on: workflow_dispatch: -permissions: write-all jobs: coverity_job: name: Coverity diff --git a/.github/workflows/Scaner_Trivy.yaml b/.github/workflows/Scaner_Trivy.yaml index 70dbb788016..cebddadb656 100644 --- a/.github/workflows/Scaner_Trivy.yaml +++ b/.github/workflows/Scaner_Trivy.yaml @@ -2,7 +2,6 @@ name: Trivy Scan for Containers on: workflow_dispatch: -permissions: write-all jobs: trivy_container_job: uses: "intel-innersource/frameworks.ai.infrastructure.code-scan-tools/.github/workflows/Scanner_Trivy.yml@one-ci-cd" diff --git a/.github/workflows/build-container.yaml b/.github/workflows/build-container.yaml index 332484c2bc3..b6ef1d207cf 100644 --- a/.github/workflows/build-container.yaml +++ b/.github/workflows/build-container.yaml @@ -3,7 +3,6 @@ on: workflow_dispatch: # Can be manually executed schedule: # 1/week Sunday at 07:00AM - cron: "5 7 * * 0" -permissions: write-all jobs: build: container: # MLOps Dev container for Compose Automation diff --git a/.github/workflows/chatbot-finetune-mpt-7b-chat-hpu.yml b/.github/workflows/chatbot-finetune-mpt-7b-chat-hpu.yml index 3ffac7f3131..175df5ab40a 100644 --- a/.github/workflows/chatbot-finetune-mpt-7b-chat-hpu.yml +++ b/.github/workflows/chatbot-finetune-mpt-7b-chat-hpu.yml @@ -6,7 +6,6 @@ on: concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}-ft-mpt-7b-hpu cancel-in-progress: true -permissions: write-all jobs: finetuning: name: finetuning test diff --git a/.github/workflows/chatbot-finetune-mpt-7b-chat.yml b/.github/workflows/chatbot-finetune-mpt-7b-chat.yml index 398411d47a1..37177d55fa7 100644 --- a/.github/workflows/chatbot-finetune-mpt-7b-chat.yml +++ b/.github/workflows/chatbot-finetune-mpt-7b-chat.yml @@ -6,7 +6,6 @@ on: concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}-ft-mpt-7b cancel-in-progress: true -permissions: write-all jobs: finetuning: name: finetuning test diff --git a/.github/workflows/chatbot-inference-llama-2-7b-chat-hf.yml b/.github/workflows/chatbot-inference-llama-2-7b-chat-hf.yml index b606c5ee138..ade6b97c9e5 100644 --- a/.github/workflows/chatbot-inference-llama-2-7b-chat-hf.yml +++ b/.github/workflows/chatbot-inference-llama-2-7b-chat-hf.yml @@ -6,7 +6,6 @@ on: concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}-inf-lla-7b cancel-in-progress: true -permissions: write-all jobs: inference: name: inference test diff --git a/.github/workflows/chatbot-inference-llama-2-7b_70b-chat-hf-hpu.yml b/.github/workflows/chatbot-inference-llama-2-7b_70b-chat-hf-hpu.yml index 1627cb9bd74..bbefd811f6b 100644 --- a/.github/workflows/chatbot-inference-llama-2-7b_70b-chat-hf-hpu.yml +++ b/.github/workflows/chatbot-inference-llama-2-7b_70b-chat-hf-hpu.yml @@ -6,7 +6,6 @@ on: concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}-inf-lla-7b-hpu cancel-in-progress: true -permissions: write-all jobs: inference: name: inference test diff --git a/.github/workflows/chatbot-inference-mpt-7b-chat-hpu.yml b/.github/workflows/chatbot-inference-mpt-7b-chat-hpu.yml index b7974c5db9f..0ab4630031e 100644 --- a/.github/workflows/chatbot-inference-mpt-7b-chat-hpu.yml +++ b/.github/workflows/chatbot-inference-mpt-7b-chat-hpu.yml @@ -6,7 +6,6 @@ on: concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}-inf-mpt-7b-hpu cancel-in-progress: true -permissions: write-all jobs: inference: name: inference test diff --git a/.github/workflows/chatbot-inference-mpt-7b-chat.yml b/.github/workflows/chatbot-inference-mpt-7b-chat.yml index 96135ec5b33..e7ed8c612fc 100644 --- a/.github/workflows/chatbot-inference-mpt-7b-chat.yml +++ b/.github/workflows/chatbot-inference-mpt-7b-chat.yml @@ -6,7 +6,6 @@ on: concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}-inf-mpt-7b cancel-in-progress: true -permissions: write-all jobs: inference: name: inference test diff --git a/.github/workflows/chatbot-test.yml b/.github/workflows/chatbot-test.yml index cd4a8e8dd32..64e16a9cf41 100644 --- a/.github/workflows/chatbot-test.yml +++ b/.github/workflows/chatbot-test.yml @@ -27,7 +27,6 @@ on: - '!intel_extension_for_transformers/neural_chat/README.md' workflow_dispatch: -permissions: write-all # If there is a new commit, the previous jobs will be canceled concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} diff --git a/.github/workflows/chatbot_finetuning.yml b/.github/workflows/chatbot_finetuning.yml index ead32180106..e93355232f1 100644 --- a/.github/workflows/chatbot_finetuning.yml +++ b/.github/workflows/chatbot_finetuning.yml @@ -7,7 +7,6 @@ on: concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} cancel-in-progress: true -permissions: write-all jobs: call-finetune-mpt-7b-chat: uses: ./.github/workflows/chatbot-finetune-mpt-7b-chat.yml diff --git a/.github/workflows/deploy-test.yml b/.github/workflows/deploy-test.yml index d7d33c6b9cb..683665ba2ed 100644 --- a/.github/workflows/deploy-test.yml +++ b/.github/workflows/deploy-test.yml @@ -7,7 +7,6 @@ on: concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} cancel-in-progress: true -permissions: write-all env: OUT_SCRIPT_PATH: ${{ github.workspace }}/.github/workflows/script/models SCRIPT_PATH: /intel-extension-for-transformers/.github/workflows/script diff --git a/.github/workflows/format_scan.yml b/.github/workflows/format_scan.yml index 01209afd5e3..2c8fbfed2eb 100644 --- a/.github/workflows/format_scan.yml +++ b/.github/workflows/format_scan.yml @@ -11,7 +11,6 @@ on: - .github/workflows/format_scan.yml - .github/workflows/script/formatScan/** workflow_dispatch: -permissions: write-all # If there is a new commit, the previous jobs will be canceled concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} diff --git a/.github/workflows/llm-test.yml b/.github/workflows/llm-test.yml index 02b8e484b5f..9b5b68d74a4 100644 --- a/.github/workflows/llm-test.yml +++ b/.github/workflows/llm-test.yml @@ -13,7 +13,6 @@ on: - "!intel_extension_for_transformers/transformers/runtime/third_party/**" - "!intel_extension_for_transformers/transformers/runtime/docs/**" workflow_dispatch: -permissions: write-all # If there is a new commit, the previous jobs will be canceled concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} @@ -50,6 +49,7 @@ jobs: - name: Binary build run: | cd ${{ github.workspace }} + source ~/.bashrc conda activate llm-test || source activate llm-test compiler_version=11.1.0 conda install --update-deps -c conda-forge gxx==${compiler_version} gcc==${compiler_version} gxx_linux-64==${compiler_version} libstdcxx-ng sysroot_linux-64 libxcrypt -y diff --git a/.github/workflows/optimize-test.yml b/.github/workflows/optimize-test.yml index b0bf5922146..c0ae69e4beb 100644 --- a/.github/workflows/optimize-test.yml +++ b/.github/workflows/optimize-test.yml @@ -7,7 +7,6 @@ on: concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} cancel-in-progress: true -permissions: write-all env: OUT_SCRIPT_PATH: ${{ github.workspace }}/.github/workflows/script/models SCRIPT_PATH: /intel-extension-for-transformers/.github/workflows/script diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index 2d5427a01bd..2d9a99d1d51 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -5,7 +5,6 @@ on: branches: - main workflow_dispatch: -permissions: write-all jobs: build: diff --git a/.github/workflows/script/launch_llm.sh b/.github/workflows/script/launch_llm.sh index 7e8dc3aebd2..74afd6738c3 100644 --- a/.github/workflows/script/launch_llm.sh +++ b/.github/workflows/script/launch_llm.sh @@ -23,7 +23,7 @@ function main() { fi # init conda - #. $(dirname ${CONDA_EXE})/../etc/profile.d/conda.sh + source ~/.bashrc conda activate $conda_env || source activate $conda_env # env diff --git a/.github/workflows/script/models/run_llm.sh b/.github/workflows/script/models/run_llm.sh index 30369b9543e..4ef62af935e 100644 --- a/.github/workflows/script/models/run_llm.sh +++ b/.github/workflows/script/models/run_llm.sh @@ -36,9 +36,9 @@ main() { } function prepare() { - [[ -d ${HOME}/anaconda3/bin ]] && export PATH=${HOME}/anaconda3/bin/:$PATH - [[ -d ${HOME}/miniconda3/bin ]] && export PATH=${HOME}/miniconda3/bin/:$PATH - export LD_LIBRARY_PATH=/lib64/libcrypto.so.1.1:${HOME}/miniconda3/envs/${conda_env_name}/lib/:$LD_LIBRARY_PATH + source ~/.bashrc + source activate ${conda_env_name} || conda activate ${conda_env_name} + export LD_LIBRARY_PATH=/lib64/libcrypto.so.1.1:${CONDA_PREFIX}/lib/:$LD_LIBRARY_PATH if [[ ${precision} == "fp8" ]]; then export NE_WEIGHT_FP8_4E3M=1 fi @@ -46,7 +46,6 @@ function prepare() { working_dir="${WORKING_DIR}/examples/huggingface/pytorch/text-generation/deployment" fi $BOLD_YELLOW && echo "Running ---- ${framework}, ${model}----Prepare" - source activate ${conda_env_name} || conda activate ${conda_env_name} if [[ ${cpu} == *"spr"* ]] || [[ ${cpu} == *"SPR"* ]] || [[ ${cpu} == *"Spr"* ]]; then export CC=/opt/rh/gcc-toolset-11/root/usr/bin/gcc export CXX=/opt/rh/gcc-toolset-11/root/usr/bin/g++ @@ -56,8 +55,7 @@ function prepare() { echo "Working in ${working_dir}" echo -e "\nInstalling model requirements..." export PATH=/lib64/libcrypto.so.1.1:$PATH - cp /lib64/libcrypto.so.1.1 ${HOME}/miniconda3/envs/${conda_env_name}/lib/libcrypto.so.1.1 - cp /lib64/libcrypto.so.1.1 ${HOME}/miniconda3/lib/libcrypto.so.1.1 + cp /lib64/libcrypto.so.1.1 ${CONDA_PREFIX}/lib/libcrypto.so.1.1 if [ -f "requirements.txt" ]; then sed -i '/^transformers/d' requirements.txt n=0 diff --git a/.github/workflows/script/prepare_env_with_conda.sh b/.github/workflows/script/prepare_env_with_conda.sh index b6622a08837..7f5f6784a51 100644 --- a/.github/workflows/script/prepare_env_with_conda.sh +++ b/.github/workflows/script/prepare_env_with_conda.sh @@ -6,13 +6,6 @@ if [[ -z "${conda_env_name}" ]] || [[ -z "${python_version}" ]]; then exit 1 fi +source ~/.bashrc conda create -n ${conda_env_name} python=${python_version} -y source activate ${conda_env_name} || conda activate ${conda_env_name} -#pip install -U pip -# -#if [ -f "requirements.txt" ]; then -# python -m pip install --default-timeout=100 -r requirements.txt -# pip list -#else -# echo "Not found requirements.txt file." -#fi diff --git a/.github/workflows/sparse_lib_CI.yml b/.github/workflows/sparse_lib_CI.yml index d908f994518..035c456e9e5 100644 --- a/.github/workflows/sparse_lib_CI.yml +++ b/.github/workflows/sparse_lib_CI.yml @@ -8,7 +8,6 @@ concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} cancel-in-progress: true -permissions: write-all env: DOCKER_CONFIG_NAME: "commonDockerConfig" diff --git a/.github/workflows/trellix.yaml b/.github/workflows/trellix.yaml index ffed7d15e24..f9167339f27 100644 --- a/.github/workflows/trellix.yaml +++ b/.github/workflows/trellix.yaml @@ -3,7 +3,6 @@ name: Trellix Command Line Scanner on: workflow_dispatch: -permissions: write-all jobs: Trellix: runs-on: inner-source diff --git a/.github/workflows/unit-test-engine.yml b/.github/workflows/unit-test-engine.yml index fef1f600514..94c2a8c5166 100644 --- a/.github/workflows/unit-test-engine.yml +++ b/.github/workflows/unit-test-engine.yml @@ -14,7 +14,6 @@ on: - "!intel_extension_for_transformers/transformers/runtime/third_party/**" - "!intel_extension_for_transformers/transformers/runtime/docs/**" workflow_dispatch: -permissions: write-all # If there is a new commit, the previous jobs will be canceled concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} diff --git a/.github/workflows/unit-test-kernel.yml b/.github/workflows/unit-test-kernel.yml index 75ba18a16c2..b551049081d 100644 --- a/.github/workflows/unit-test-kernel.yml +++ b/.github/workflows/unit-test-kernel.yml @@ -14,7 +14,6 @@ on: concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} cancel-in-progress: true -permissions: write-all env: DOCKER_CONFIG_NAME: "commonDockerConfig" REPO_NAME: "intel-extension-for-transformers" diff --git a/.github/workflows/unit-test-neuralchat.yml b/.github/workflows/unit-test-neuralchat.yml index b5a13f9686b..1289e0ad2d1 100644 --- a/.github/workflows/unit-test-neuralchat.yml +++ b/.github/workflows/unit-test-neuralchat.yml @@ -25,7 +25,6 @@ on: concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} cancel-in-progress: true -permissions: write-all env: DOCKER_CONFIG_NAME: "commonDockerConfig" REPO_NAME: "intel-extension-for-transformers" diff --git a/.github/workflows/unit-test-neuralspeed.yml b/.github/workflows/unit-test-neuralspeed.yml index f140c9601e9..14d9a538243 100644 --- a/.github/workflows/unit-test-neuralspeed.yml +++ b/.github/workflows/unit-test-neuralspeed.yml @@ -12,7 +12,6 @@ on: concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} cancel-in-progress: true -permissions: write-all env: DOCKER_CONFIG_NAME: "commonDockerConfig" REPO_NAME: "intel-extension-for-transformers" diff --git a/.github/workflows/unit-test-optimize.yml b/.github/workflows/unit-test-optimize.yml index d210c6a26c3..3bbc782c76e 100644 --- a/.github/workflows/unit-test-optimize.yml +++ b/.github/workflows/unit-test-optimize.yml @@ -23,7 +23,6 @@ on: concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} cancel-in-progress: true -permissions: write-all env: DOCKER_CONFIG_NAME: "commonDockerConfig" REPO_NAME: "intel-extension-for-transformers" diff --git a/.github/workflows/windows-test.yml b/.github/workflows/windows-test.yml index 1905f5bb57a..1a6576d5761 100644 --- a/.github/workflows/windows-test.yml +++ b/.github/workflows/windows-test.yml @@ -14,7 +14,6 @@ on: - "!intel_extension_for_transformers/transformers/runtime/test/**" - "!intel_extension_for_transformers/qbits/qbits_ut/**" workflow_dispatch: -permissions: write-all # If there is a new commit, the previous jobs will be canceled concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} From b262d39af9675269120c7554821123b7d18c0b37 Mon Sep 17 00:00:00 2001 From: kevinintel Date: Wed, 19 Jun 2024 16:20:15 +0800 Subject: [PATCH 04/21] add badge (#1618) Add OpenSSF --- SECURITY.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/SECURITY.md b/SECURITY.md index 71a71eff1b6..eb546f7f75a 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -1,3 +1,8 @@ +OpenSSF Badge +=============== + +## [OpenSSF Badge](https://www.bestpractices.dev/en/projects/9128) + Security Policy =============== From cb2725acf124bf2ea64eb34c4b0d4ad651211d52 Mon Sep 17 00:00:00 2001 From: kevinintel Date: Wed, 19 Jun 2024 16:21:47 +0800 Subject: [PATCH 05/21] Create scorecard.yml (#1619) Signed-off-by: kevinintel --- .github/workflows/scorecard.yml | 73 +++++++++++++++++++++++++++++++++ 1 file changed, 73 insertions(+) create mode 100644 .github/workflows/scorecard.yml diff --git a/.github/workflows/scorecard.yml b/.github/workflows/scorecard.yml new file mode 100644 index 00000000000..79973f7cab7 --- /dev/null +++ b/.github/workflows/scorecard.yml @@ -0,0 +1,73 @@ +# This workflow uses actions that are not certified by GitHub. They are provided +# by a third-party and are governed by separate terms of service, privacy +# policy, and support documentation. + +name: Scorecard supply-chain security +on: + # For Branch-Protection check. Only the default branch is supported. See + # https://github.com/ossf/scorecard/blob/main/docs/checks.md#branch-protection + branch_protection_rule: + # To guarantee Maintained check is occasionally updated. See + # https://github.com/ossf/scorecard/blob/main/docs/checks.md#maintained + schedule: + - cron: '25 5 * * 5' + push: + branches: [ "main" ] + +# Declare default permissions as read only. +permissions: read-all + +jobs: + analysis: + name: Scorecard analysis + runs-on: ubuntu-latest + permissions: + # Needed to upload the results to code-scanning dashboard. + security-events: write + # Needed to publish results and get a badge (see publish_results below). + id-token: write + # Uncomment the permissions below if installing in a private repository. + # contents: read + # actions: read + + steps: + - name: "Checkout code" + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + persist-credentials: false + + - name: "Run analysis" + uses: ossf/scorecard-action@0864cf19026789058feabb7e87baa5f140aac736 # v2.3.1 + with: + results_file: results.sarif + results_format: sarif + # (Optional) "write" PAT token. Uncomment the `repo_token` line below if: + # - you want to enable the Branch-Protection check on a *public* repository, or + # - you are installing Scorecard on a *private* repository + # To create the PAT, follow the steps in https://github.com/ossf/scorecard-action?tab=readme-ov-file#authentication-with-fine-grained-pat-optional. + # repo_token: ${{ secrets.SCORECARD_TOKEN }} + + # Public repositories: + # - Publish results to OpenSSF REST API for easy access by consumers + # - Allows the repository to include the Scorecard badge. + # - See https://github.com/ossf/scorecard-action#publishing-results. + # For private repositories: + # - `publish_results` will always be set to `false`, regardless + # of the value entered here. + publish_results: true + + # Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF + # format to the repository Actions tab. + - name: "Upload artifact" + uses: actions/upload-artifact@97a0fba1372883ab732affbe8f94b823f91727db # v3.pre.node20 + with: + name: SARIF file + path: results.sarif + retention-days: 5 + + # Upload the results to GitHub's code scanning dashboard (optional). + # Commenting out will disable upload of results to your repo's Code Scanning dashboard + - name: "Upload to code-scanning" + uses: github/codeql-action/upload-sarif@1b1aada464948af03b950897e5eb522f92603cc2 # v3.24.9 + with: + sarif_file: results.sarif From fce38b918e2818f1f6a803e2d094c4c4591761af Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20Guimar=C3=A3es?= Date: Wed, 19 Jun 2024 05:31:49 -0300 Subject: [PATCH 06/21] Fix typo at README.md (#1620) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Removed an extra space from two shell commands examples Signed-off-by: Lucas Guimarães --- .../tensorflow/language-modeling/quantization/README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/examples/huggingface/tensorflow/language-modeling/quantization/README.md b/examples/huggingface/tensorflow/language-modeling/quantization/README.md index 883689068bc..bb41901b183 100644 --- a/examples/huggingface/tensorflow/language-modeling/quantization/README.md +++ b/examples/huggingface/tensorflow/language-modeling/quantization/README.md @@ -31,7 +31,7 @@ pip install -r requirements.txt ``` cd ptq -bash run_tuning.sh --topology=[topology] +bash run_tuning.sh --topology=[topology] ``` * To benchmark the int8 model @@ -52,7 +52,7 @@ bash run_benchmark.sh --topology=[topology] --mode=benchmark --int8=true ``` cd ptq -bash run_tuning.sh --topology=[topology] +bash run_tuning.sh --topology=[topology] ``` * To benchmark the int8 model @@ -60,4 +60,4 @@ bash run_tuning.sh --topology=[topology] ``` cd ptq bash run_benchmark.sh --topology=[topology] --mode=benchmark --int8=true -``` \ No newline at end of file +``` From 9a5a578b686473a42ddc41af5fbb6fd5b09da948 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20Guimar=C3=A3es?= Date: Wed, 19 Jun 2024 05:32:01 -0300 Subject: [PATCH 07/21] Fix typo at README.md (#1621) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixed typos and improved some phrases. Signed-off-by: Lucas Guimarães --- workflows/chatbot/inference/README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/workflows/chatbot/inference/README.md b/workflows/chatbot/inference/README.md index ba5da39484a..1d9598953a1 100644 --- a/workflows/chatbot/inference/README.md +++ b/workflows/chatbot/inference/README.md @@ -49,7 +49,7 @@ numactl -m -C python generate.py \ To enable FP32 inference, you can add the parameter `--dtype "float32"`. To check the statistical information of inference, you can add the parameter `--return_stats`. ## LLama2 INT8 Inference -[Llama2](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf) int8 inference demonstrates in [int8_llama2](https://github.com/intel/intel-extension-for-transformers/tree/int8_llama2/workflows/chatbot/inference) branch and need install Intel-extension-for-pytorch [llm_feature_branch](https://github.com/intel/intel-extension-for-pytorch/tree/llm_feature_branch) branch. Please follow the [README.md](https://github.com/intel/intel-extension-for-transformers/blob/81a4484dcc93f09d7609e6896fe3fbc22756975b/workflows/chatbot/inference/README.md) to setup the environments and make quantization. +[Llama2](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf) int8 inference demonstrates in [int8_llama2](https://github.com/intel/intel-extension-for-transformers/tree/int8_llama2/workflows/chatbot/inference) branch and need install Intel-extension-for-pytorch [llm_feature_branch](https://github.com/intel/intel-extension-for-pytorch/tree/llm_feature_branch) branch. Please follow the [README.md](https://github.com/intel/intel-extension-for-transformers/blob/81a4484dcc93f09d7609e6896fe3fbc22756975b/workflows/chatbot/inference/README.md) to set up the environments and make quantization. # Inference on Habana Gaudi @@ -107,7 +107,7 @@ python ../utils/gaudi_spawn.py --use_deepspeed --world_size 8 generate.py \ Habana supports HPU graph mode for inference speedup, which is available for bloom, gpt2, opt, gptj, gpt_neox, mpt, llama. You can use the parameter `use_hpu_graphs` to speed up the inference. -you can use '--peft_model_path' to apply you peft finetuned output model during generation. +you can use '--peft_model_path' to apply your peft finetuned output model during generation. ```bash python ../utils/gaudi_spawn.py --use_deepspeed --world_size 8 generate.py \ @@ -122,7 +122,7 @@ python ../utils/gaudi_spawn.py --use_deepspeed --world_size 8 generate.py \ # Additional Notes -Here are the explanations of parameters in generate.py: +Here are the explanations of the parameters in generate.py: `--temperature`: Controls the diversity of generated text. Lower values result in more deterministic outputs. The default value is 0.1. `--top_p`: During text generation, only consider tokens with cumulative probability up to this value. This parameter helps to avoid extremely low probability tokens. The default value is 0.75. `--top_k`: The number of highest probability vocabulary tokens to consider for each step of text generation. The default value is 40. From 97300e7279452133b4fd084d8957f0678c23cfaa Mon Sep 17 00:00:00 2001 From: jiqing-feng <107918818+jiqing-feng@users.noreply.github.com> Date: Thu, 20 Jun 2024 13:39:15 +0800 Subject: [PATCH 08/21] fix deepspeed script and add a optimum-intel option (#1617) * fix deepspeed script and add a optimum-intel option * add comments for optimum-intel --- .../run_generation_with_deepspeed.py | 23 ++++++++++++------- 1 file changed, 15 insertions(+), 8 deletions(-) diff --git a/examples/huggingface/pytorch/text-generation/inference/run_generation_with_deepspeed.py b/examples/huggingface/pytorch/text-generation/inference/run_generation_with_deepspeed.py index ee629d0fd17..f705e275d8f 100644 --- a/examples/huggingface/pytorch/text-generation/inference/run_generation_with_deepspeed.py +++ b/examples/huggingface/pytorch/text-generation/inference/run_generation_with_deepspeed.py @@ -79,6 +79,7 @@ parser.add_argument("--token-latency", action="store_true") parser.add_argument("--throughput", action="store_true") parser.add_argument("--accuracy-only", action="store_true") +parser.add_argument("--optimum-intel", action="store_true", help="Use IPEXModel in optimum-intel to optimize the model") parser.add_argument( "--acc-tasks", nargs="+", @@ -143,7 +144,7 @@ def get_repo_root(model_name_or_path): model_name_or_path, local_files_only=is_offline_mode(), cache_dir=os.getenv("TRANSFORMERS_CACHE", None), - ignore_patterns=["*.safetensors", "*.msgpack", "*.h5"], + ignore_patterns=["*.safetensors", "*.msgpack", "*.h5", "training_args.bin"], resume_download=True, ) @@ -153,7 +154,7 @@ def get_repo_root(model_name_or_path): model_name_or_path, local_files_only=is_offline_mode(), cache_dir=os.getenv("TRANSFORMERS_CACHE", None), - ignore_patterns=["*.safetensors", "*.msgpack", "*.h5"], + ignore_patterns=["*.safetensors", "*.msgpack", "*.h5", "training_args.bin"], resume_download=True, ) @@ -201,7 +202,7 @@ def print_mem_usage(msg): if args.benchmark: print_mem_usage("pre-from-pretrained") -is_meta_support = not model_type in ["falcon"] +is_meta_support = model_type not in ["falcon"] # Construct model with fake meta tensors, later will be replaced during ds-inference ckpt load with deepspeed.OnDevice(dtype=load_dtype, device="meta", enabled=is_meta_support): @@ -226,7 +227,8 @@ def print_mem_usage(msg): def write_checkpoints_json(): checkpoint_files = get_checkpoint_files(model_name) if local_rank == 0: - data = {"type": "BLOOM", "checkpoints": checkpoint_files, "version": 1.0} + type = "BLOOM" if model.config.model_type == "bloom" else "ds_model" + data = {"type": type, "checkpoints": checkpoint_files, "version": 1.0} json.dump(data, open(checkpoints_json, "w")) @@ -255,6 +257,7 @@ def write_checkpoints_json(): checkpoint=checkpoints_json if is_meta_support else None, **kwargs, ) +model = model.module if args.benchmark: print_mem_usage("post-ds-inference-init") @@ -264,7 +267,11 @@ def write_checkpoints_json(): # to ipex if args.ipex: - model = ipex.optimize_transformers(model.eval().to("xpu"), dtype=infer_dtype) + if args.optimum_intel and args.device == "cpu" and model.config.model_type == "llama": + from optimum.intel import IPEXModelForCausalLM + model = IPEXModelForCausalLM(model.eval(), config) + else: + model = ipex.optimize_transformers(model.eval().to(model.device), dtype=infer_dtype) # bypass assertion for beam4 if isinstance(model, deepspeed.InferenceEngine): @@ -378,7 +385,7 @@ def _model_generate(self, context, max_length, eos_token_id): config=config, model=model, tokenizer=tokenizer, - device="xpu", + device=model.device, num_beams=args.num_beams, batch_size=args.batch_size, dtype=args.dtype, @@ -480,7 +487,7 @@ def generate(): with torch.inference_mode(): # latency for i in range(cycles): - with torch.autograd.profiler_legacy.profile(enabled=do_profiling, use_xpu=True, record_shapes=True) as prof: + with torch.autograd.profiler_legacy.profile(enabled=do_profiling, use_xpu=True if model.device.type=="cpu" else False, record_shapes=True) as prof: t0 = time.time() gen_ids, outputs = generate() if args.cuda: @@ -488,7 +495,7 @@ def generate(): t1 = time.time() if do_profiling: - torch.save(prof.key_averages().table(sort_by="self_xpu_time_total"), "./profile_{}.pt".format(local_rank)) + torch.save(prof.key_averages().table(sort_by=f"self_{args.device}_time_total"), "./profile_{}.pt".format(local_rank)) torch.save(prof.table(sort_by="id", row_limit=-1),'./profile_{}_id.pt'.format(local_rank)) torch.save(prof.key_averages(group_by_input_shape=True).table(), "./profile_{}_detail.pt".format(local_rank)) prof.export_chrome_trace("./trace.json") From 3e3f73bca5981f249318e9ce3672c71dab7ef6d0 Mon Sep 17 00:00:00 2001 From: kevinintel Date: Thu, 20 Jun 2024 13:43:51 +0800 Subject: [PATCH 09/21] add contributors (#1622) add contributors --- docs/contributors.md | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/docs/contributors.md b/docs/contributors.md index 4b92fd91bd0..b4771da78c8 100644 --- a/docs/contributors.md +++ b/docs/contributors.md @@ -116,9 +116,20 @@ Liangliang-Ma + + Lucas Guimarães + + Siddhi Velankar + + Sergey Nesterov - + + + Srikanth Ramakrishna + + igeni +
From 6292b279b44e13ecc5902f6fa7b233314e61d3bc Mon Sep 17 00:00:00 2001 From: kevinintel Date: Thu, 20 Jun 2024 14:16:58 +0800 Subject: [PATCH 10/21] remove scorecard (#1624) --- .github/workflows/scorecard.yml | 73 --------------------------------- 1 file changed, 73 deletions(-) delete mode 100644 .github/workflows/scorecard.yml diff --git a/.github/workflows/scorecard.yml b/.github/workflows/scorecard.yml deleted file mode 100644 index 79973f7cab7..00000000000 --- a/.github/workflows/scorecard.yml +++ /dev/null @@ -1,73 +0,0 @@ -# This workflow uses actions that are not certified by GitHub. They are provided -# by a third-party and are governed by separate terms of service, privacy -# policy, and support documentation. - -name: Scorecard supply-chain security -on: - # For Branch-Protection check. Only the default branch is supported. See - # https://github.com/ossf/scorecard/blob/main/docs/checks.md#branch-protection - branch_protection_rule: - # To guarantee Maintained check is occasionally updated. See - # https://github.com/ossf/scorecard/blob/main/docs/checks.md#maintained - schedule: - - cron: '25 5 * * 5' - push: - branches: [ "main" ] - -# Declare default permissions as read only. -permissions: read-all - -jobs: - analysis: - name: Scorecard analysis - runs-on: ubuntu-latest - permissions: - # Needed to upload the results to code-scanning dashboard. - security-events: write - # Needed to publish results and get a badge (see publish_results below). - id-token: write - # Uncomment the permissions below if installing in a private repository. - # contents: read - # actions: read - - steps: - - name: "Checkout code" - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - with: - persist-credentials: false - - - name: "Run analysis" - uses: ossf/scorecard-action@0864cf19026789058feabb7e87baa5f140aac736 # v2.3.1 - with: - results_file: results.sarif - results_format: sarif - # (Optional) "write" PAT token. Uncomment the `repo_token` line below if: - # - you want to enable the Branch-Protection check on a *public* repository, or - # - you are installing Scorecard on a *private* repository - # To create the PAT, follow the steps in https://github.com/ossf/scorecard-action?tab=readme-ov-file#authentication-with-fine-grained-pat-optional. - # repo_token: ${{ secrets.SCORECARD_TOKEN }} - - # Public repositories: - # - Publish results to OpenSSF REST API for easy access by consumers - # - Allows the repository to include the Scorecard badge. - # - See https://github.com/ossf/scorecard-action#publishing-results. - # For private repositories: - # - `publish_results` will always be set to `false`, regardless - # of the value entered here. - publish_results: true - - # Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF - # format to the repository Actions tab. - - name: "Upload artifact" - uses: actions/upload-artifact@97a0fba1372883ab732affbe8f94b823f91727db # v3.pre.node20 - with: - name: SARIF file - path: results.sarif - retention-days: 5 - - # Upload the results to GitHub's code scanning dashboard (optional). - # Commenting out will disable upload of results to your repo's Code Scanning dashboard - - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@1b1aada464948af03b950897e5eb522f92603cc2 # v3.24.9 - with: - sarif_file: results.sarif From 5d9dfbc55c57c506d81b0f5672f601b4b71d6dbc Mon Sep 17 00:00:00 2001 From: StepSecurity Bot Date: Thu, 20 Jun 2024 01:15:03 -0700 Subject: [PATCH 11/21] [StepSecurity] ci: Harden GitHub Actions (#1625) Signed-off-by: StepSecurity Bot --- .github/workflows/build-container.yaml | 3 +++ .../workflows/chatbot-finetune-mpt-7b-chat-hpu.yml | 3 +++ .github/workflows/chatbot-finetune-mpt-7b-chat.yml | 3 +++ .../chatbot-inference-llama-2-7b-chat-hf.yml | 3 +++ .../chatbot-inference-llama-2-7b_70b-chat-hf-hpu.yml | 3 +++ .../workflows/chatbot-inference-mpt-7b-chat-hpu.yml | 3 +++ .github/workflows/chatbot-inference-mpt-7b-chat.yml | 3 +++ .github/workflows/deploy-test.yml | 11 +++++++++++ .github/workflows/format_scan.yml | 3 +++ .github/workflows/llm-test.yml | 7 +++++++ .github/workflows/optimize-test.yml | 11 +++++++++++ .github/workflows/publish.yml | 5 +++++ .github/workflows/sparse_lib_CI.yml | 3 +++ .github/workflows/trellix.yaml | 3 +++ .github/workflows/unit-test-engine.yml | 3 +++ .github/workflows/unit-test-kernel.yml | 3 +++ .github/workflows/unit-test-neuralchat.yml | 3 +++ .github/workflows/unit-test-neuralspeed.yml | 3 +++ .github/workflows/unit-test-optimize.yml | 3 +++ .github/workflows/windows-test.yml | 3 +++ 20 files changed, 82 insertions(+) diff --git a/.github/workflows/build-container.yaml b/.github/workflows/build-container.yaml index b6ef1d207cf..4285231e3be 100644 --- a/.github/workflows/build-container.yaml +++ b/.github/workflows/build-container.yaml @@ -3,6 +3,9 @@ on: workflow_dispatch: # Can be manually executed schedule: # 1/week Sunday at 07:00AM - cron: "5 7 * * 0" +permissions: + contents: read + jobs: build: container: # MLOps Dev container for Compose Automation diff --git a/.github/workflows/chatbot-finetune-mpt-7b-chat-hpu.yml b/.github/workflows/chatbot-finetune-mpt-7b-chat-hpu.yml index 175df5ab40a..0eeb04e6fe7 100644 --- a/.github/workflows/chatbot-finetune-mpt-7b-chat-hpu.yml +++ b/.github/workflows/chatbot-finetune-mpt-7b-chat-hpu.yml @@ -6,6 +6,9 @@ on: concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}-ft-mpt-7b-hpu cancel-in-progress: true +permissions: + contents: read + jobs: finetuning: name: finetuning test diff --git a/.github/workflows/chatbot-finetune-mpt-7b-chat.yml b/.github/workflows/chatbot-finetune-mpt-7b-chat.yml index 37177d55fa7..b7a0bb28dc7 100644 --- a/.github/workflows/chatbot-finetune-mpt-7b-chat.yml +++ b/.github/workflows/chatbot-finetune-mpt-7b-chat.yml @@ -6,6 +6,9 @@ on: concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}-ft-mpt-7b cancel-in-progress: true +permissions: + contents: read + jobs: finetuning: name: finetuning test diff --git a/.github/workflows/chatbot-inference-llama-2-7b-chat-hf.yml b/.github/workflows/chatbot-inference-llama-2-7b-chat-hf.yml index ade6b97c9e5..61f1773cd4d 100644 --- a/.github/workflows/chatbot-inference-llama-2-7b-chat-hf.yml +++ b/.github/workflows/chatbot-inference-llama-2-7b-chat-hf.yml @@ -6,6 +6,9 @@ on: concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}-inf-lla-7b cancel-in-progress: true +permissions: + contents: read + jobs: inference: name: inference test diff --git a/.github/workflows/chatbot-inference-llama-2-7b_70b-chat-hf-hpu.yml b/.github/workflows/chatbot-inference-llama-2-7b_70b-chat-hf-hpu.yml index bbefd811f6b..0ddb3cc3b89 100644 --- a/.github/workflows/chatbot-inference-llama-2-7b_70b-chat-hf-hpu.yml +++ b/.github/workflows/chatbot-inference-llama-2-7b_70b-chat-hf-hpu.yml @@ -6,6 +6,9 @@ on: concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}-inf-lla-7b-hpu cancel-in-progress: true +permissions: + contents: read + jobs: inference: name: inference test diff --git a/.github/workflows/chatbot-inference-mpt-7b-chat-hpu.yml b/.github/workflows/chatbot-inference-mpt-7b-chat-hpu.yml index 0ab4630031e..02c7eb328f9 100644 --- a/.github/workflows/chatbot-inference-mpt-7b-chat-hpu.yml +++ b/.github/workflows/chatbot-inference-mpt-7b-chat-hpu.yml @@ -6,6 +6,9 @@ on: concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}-inf-mpt-7b-hpu cancel-in-progress: true +permissions: + contents: read + jobs: inference: name: inference test diff --git a/.github/workflows/chatbot-inference-mpt-7b-chat.yml b/.github/workflows/chatbot-inference-mpt-7b-chat.yml index e7ed8c612fc..bf0601615f9 100644 --- a/.github/workflows/chatbot-inference-mpt-7b-chat.yml +++ b/.github/workflows/chatbot-inference-mpt-7b-chat.yml @@ -6,6 +6,9 @@ on: concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}-inf-mpt-7b cancel-in-progress: true +permissions: + contents: read + jobs: inference: name: inference test diff --git a/.github/workflows/deploy-test.yml b/.github/workflows/deploy-test.yml index 683665ba2ed..b4625842967 100644 --- a/.github/workflows/deploy-test.yml +++ b/.github/workflows/deploy-test.yml @@ -18,8 +18,15 @@ env: EXTRA_CONTAINER_NAME: "utTest" EXTRA_CONTAINER_NAME2: "codeScan" +permissions: + contents: read + jobs: Deploy-Workflow: + permissions: + actions: read # for dawidd6/action-download-artifact to query and download artifacts + contents: read # for actions/checkout to fetch code + pull-requests: read # for dawidd6/action-download-artifact to query commit hash runs-on: itrex-node strategy: matrix: @@ -127,6 +134,10 @@ jobs: retention-days: 60 # 1 <= retention-days <= 90 Genreate-Report: + permissions: + actions: read # for dawidd6/action-download-artifact to query and download artifacts + contents: read # for actions/checkout to fetch code + pull-requests: read # for dawidd6/action-download-artifact to query commit hash runs-on: itrex-node-spell needs: [Deploy-Workflow] steps: diff --git a/.github/workflows/format_scan.yml b/.github/workflows/format_scan.yml index 2c8fbfed2eb..db3f280c33d 100644 --- a/.github/workflows/format_scan.yml +++ b/.github/workflows/format_scan.yml @@ -23,6 +23,9 @@ env: DOCKER_FILE_NAME: "codeScan" CONTAINER_NAME: "codeScan" +permissions: + contents: read + jobs: format-scan: runs-on: itrex-node-spell diff --git a/.github/workflows/llm-test.yml b/.github/workflows/llm-test.yml index 9b5b68d74a4..cdf4374f1e9 100644 --- a/.github/workflows/llm-test.yml +++ b/.github/workflows/llm-test.yml @@ -25,6 +25,9 @@ env: EXTRA_CONTAINER_NAME: "codeScan" +permissions: + contents: read + jobs: LLM-Workflow: runs-on: spr @@ -83,6 +86,10 @@ jobs: retention-days: 60 # 1 <= retention-days <= 90 Generate-LLM-Report: + permissions: + actions: read # for dawidd6/action-download-artifact to query and download artifacts + contents: read # for actions/checkout to fetch code + pull-requests: read # for dawidd6/action-download-artifact to query commit hash runs-on: itrex-node-spell needs: [LLM-Workflow] steps: diff --git a/.github/workflows/optimize-test.yml b/.github/workflows/optimize-test.yml index c0ae69e4beb..527c4a1a53f 100644 --- a/.github/workflows/optimize-test.yml +++ b/.github/workflows/optimize-test.yml @@ -19,8 +19,15 @@ env: EXTRA_CONTAINER_NAME2: "codeScan" +permissions: + contents: read + jobs: Optimize-Workflow: + permissions: + actions: read # for dawidd6/action-download-artifact to query and download artifacts + contents: read # for actions/checkout to fetch code + pull-requests: read # for dawidd6/action-download-artifact to query commit hash runs-on: itrex-node strategy: matrix: @@ -132,6 +139,10 @@ jobs: retention-days: 60 # 1 <= retention-days <= 90 Genreate-Report: + permissions: + actions: read # for dawidd6/action-download-artifact to query and download artifacts + contents: read # for actions/checkout to fetch code + pull-requests: read # for dawidd6/action-download-artifact to query commit hash runs-on: itrex-node-spell needs: [Optimize-Workflow] steps: diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index 2d9a99d1d51..34a01a10a36 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -5,9 +5,14 @@ on: branches: - main workflow_dispatch: +permissions: + contents: read + jobs: build: + permissions: + contents: write # for peaceiris/actions-gh-pages to push pages branch runs-on: ubuntu-latest steps: diff --git a/.github/workflows/sparse_lib_CI.yml b/.github/workflows/sparse_lib_CI.yml index 035c456e9e5..4a66ad63d62 100644 --- a/.github/workflows/sparse_lib_CI.yml +++ b/.github/workflows/sparse_lib_CI.yml @@ -16,6 +16,9 @@ env: DOCKER_FILE_NAME: "unitTest" CONTAINER_NAME: "utTest" +permissions: + contents: read + jobs: sparselib: runs-on: itrex-node diff --git a/.github/workflows/trellix.yaml b/.github/workflows/trellix.yaml index f9167339f27..0a4a7a3ff71 100644 --- a/.github/workflows/trellix.yaml +++ b/.github/workflows/trellix.yaml @@ -3,6 +3,9 @@ name: Trellix Command Line Scanner on: workflow_dispatch: +permissions: + contents: read + jobs: Trellix: runs-on: inner-source diff --git a/.github/workflows/unit-test-engine.yml b/.github/workflows/unit-test-engine.yml index 94c2a8c5166..d0045bac863 100644 --- a/.github/workflows/unit-test-engine.yml +++ b/.github/workflows/unit-test-engine.yml @@ -28,6 +28,9 @@ env: EXTRA_CONTAINER_NAME: "modelTest" CONTAINER_SCAN: "codeScan" +permissions: + contents: read + jobs: engine-unit-test: runs-on: [self-hosted, linux, X64, itrex-node] diff --git a/.github/workflows/unit-test-kernel.yml b/.github/workflows/unit-test-kernel.yml index b551049081d..31829d87057 100644 --- a/.github/workflows/unit-test-kernel.yml +++ b/.github/workflows/unit-test-kernel.yml @@ -22,6 +22,9 @@ env: CONTAINER_NAME: "utTest" EXTRA_CONTAINER_NAME: "modelTest" +permissions: + contents: read + jobs: unit-test: runs-on: [self-hosted, linux, X64, itrex-node] diff --git a/.github/workflows/unit-test-neuralchat.yml b/.github/workflows/unit-test-neuralchat.yml index 1289e0ad2d1..f2a6e15b2ac 100644 --- a/.github/workflows/unit-test-neuralchat.yml +++ b/.github/workflows/unit-test-neuralchat.yml @@ -35,6 +35,9 @@ env: CONTAINER_SCAN: "codeScan" GOOGLE_API_KEY: ${{ vars.GOOGLE_API_KEY }} +permissions: + contents: read + jobs: neuralchat-unit-test: runs-on: [self-hosted, Linux, X64, itrex-node] diff --git a/.github/workflows/unit-test-neuralspeed.yml b/.github/workflows/unit-test-neuralspeed.yml index 14d9a538243..78d41a4dc70 100644 --- a/.github/workflows/unit-test-neuralspeed.yml +++ b/.github/workflows/unit-test-neuralspeed.yml @@ -20,6 +20,9 @@ env: CONTAINER_NAME: "utTest" EXTRA_CONTAINER_NAME: "modelTest" +permissions: + contents: read + jobs: neural-speed-unit-test: runs-on: [self-hosted, linux, X64, llmruntime-node] diff --git a/.github/workflows/unit-test-optimize.yml b/.github/workflows/unit-test-optimize.yml index 3bbc782c76e..4d11947d92c 100644 --- a/.github/workflows/unit-test-optimize.yml +++ b/.github/workflows/unit-test-optimize.yml @@ -32,6 +32,9 @@ env: EXTRA_CONTAINER_NAME: "modelTest" CONTAINER_SCAN: "codeScan" +permissions: + contents: read + jobs: optimize-unit-test: runs-on: [self-hosted, Linux, X64, itrex-node] diff --git a/.github/workflows/windows-test.yml b/.github/workflows/windows-test.yml index 1a6576d5761..f08d1059d47 100644 --- a/.github/workflows/windows-test.yml +++ b/.github/workflows/windows-test.yml @@ -23,6 +23,9 @@ env: SCRIPT_PATH: ${{ github.workspace }}\.github\workflows\script WORKING_DIR: ${{ github.workspace }} +permissions: + contents: read + jobs: Windows-Binary-Test: runs-on: 'Windows' From 25e37417d1130688140a323bcb2c2279c2a0521f Mon Sep 17 00:00:00 2001 From: Liangyx2 Date: Fri, 21 Jun 2024 13:48:00 +0800 Subject: [PATCH 12/21] solve codescan#57 in chatbot backend (#1626) Signed-off-by: Liangyx2 --- workflows/chatbot/inference/backend/fastrag/fastrag_service.py | 3 ++- workflows/chatbot/inference/requirements.txt | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/workflows/chatbot/inference/backend/fastrag/fastrag_service.py b/workflows/chatbot/inference/backend/fastrag/fastrag_service.py index 7d01c0d1659..fd3ec89e329 100644 --- a/workflows/chatbot/inference/backend/fastrag/fastrag_service.py +++ b/workflows/chatbot/inference/backend/fastrag/fastrag_service.py @@ -46,6 +46,7 @@ from database.mysqldb import MysqlDb from starlette.responses import RedirectResponse from mysqldb import MysqlDb +from werkzeug.utils import secure_filename logger = build_logger("fastrag_service", f"fastrag_service.log") parser = argparse.ArgumentParser() @@ -473,7 +474,7 @@ def query(request: QueryRequest): if request.blob: file_content = base64.b64decode(request.blob) random_suffix = str(uuid.uuid4().hex) - sanitized_filename = os.path.basename(request.filename) + sanitized_filename = secure_filename(request.filename) file_path = f"/tmp/customized_doc_{random_suffix}_{sanitized_filename}" with open(file_path, "wb") as f: f.write(file_content) diff --git a/workflows/chatbot/inference/requirements.txt b/workflows/chatbot/inference/requirements.txt index df899c47d4f..e99461277fa 100644 --- a/workflows/chatbot/inference/requirements.txt +++ b/workflows/chatbot/inference/requirements.txt @@ -9,3 +9,4 @@ peft rouge_score sentencepiece torch +werkzeug From 141833910c802caf3ac3bc1f34d616e0a410bed2 Mon Sep 17 00:00:00 2001 From: Liangyx2 Date: Fri, 21 Jun 2024 13:49:04 +0800 Subject: [PATCH 13/21] solve codescan#53-56 in neural-chat (#1627) Signed-off-by: Liangyx2 --- .../neural_chat/models/base_model.py | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/intel_extension_for_transformers/neural_chat/models/base_model.py b/intel_extension_for_transformers/neural_chat/models/base_model.py index 1aa31ef1782..5f3616f89ff 100644 --- a/intel_extension_for_transformers/neural_chat/models/base_model.py +++ b/intel_extension_for_transformers/neural_chat/models/base_model.py @@ -58,6 +58,16 @@ def construct_parameters(query, model_name, device, assistant_model, config): params["device"] = device return params +def safe_path(*paths): + # Prevent path traversal by ensuring the final path is within the base path or assets_path + current_working_directory = os.getcwd() + path_parts = current_working_directory.split('/') + base_path = '/' + path_parts[1] + assets_path = '/intel-extension-for-transformers/intel_extension_for_transformers/neural_chat/assets/' + final_path = os.path.abspath(*paths) + if final_path.startswith(base_path) or final_path.startswith(assets_path): + return final_path + class BaseModel(ABC): """A base class for LLM.""" @@ -158,7 +168,7 @@ def predict_stream(self, query, origin_query="", config=None): my_origin_query = origin_query if is_audio_file(query): - if not os.path.exists(query): + if not os.path.exists(safe_path(query)): raise ValueError(f"The audio file path {query} is invalid.") query_include_prompt = False @@ -181,7 +191,7 @@ def predict_stream(self, query, origin_query="", config=None): if response: logging.info("Get response: %s from cache", response) return response['choices'][0]['text'], link - if plugin_name == "asr" and not os.path.exists(query): + if plugin_name == "asr" and not os.path.exists(safe_path(query)): continue if plugin_name == "retrieval": try: @@ -281,7 +291,7 @@ def predict(self, query, origin_query="", config=None): config.ipex_int8 = self.ipex_int8 if is_audio_file(query): - if not os.path.exists(query): + if not os.path.exists(safe_path(query)): raise ValueError(f"The audio file path {query} is invalid.") query_include_prompt = False @@ -302,7 +312,7 @@ def predict(self, query, origin_query="", config=None): if response: logging.info("Get response: %s from cache", response) return response['choices'][0]['text'] - if plugin_name == "asr" and not os.path.exists(query): + if plugin_name == "asr" and not os.path.exists(safe_path(query)): continue if plugin_name == "retrieval": try: From 8992791fcbe245b915d71d5645f6b7da318cf8b9 Mon Sep 17 00:00:00 2001 From: Liangyx2 Date: Fri, 21 Jun 2024 13:50:26 +0800 Subject: [PATCH 14/21] solve codescan#43-45 in neural-chat (#1628) Signed-off-by: Liangyx2 --- .../neural_chat/models/model_utils.py | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/intel_extension_for_transformers/neural_chat/models/model_utils.py b/intel_extension_for_transformers/neural_chat/models/model_utils.py index dd0c2c99102..fd187f138e9 100644 --- a/intel_extension_for_transformers/neural_chat/models/model_utils.py +++ b/intel_extension_for_transformers/neural_chat/models/model_utils.py @@ -1009,17 +1009,19 @@ def is_llm_runtime_model(model, device): def remove_prompt_history(model_name, prompt): result = prompt if re.search("llama", model_name, re.IGNORECASE): - matches = re.findall(r'\[INST\](.*?)\[/INST\]', prompt) + matches = re.findall(r'\[INST\]([^\[]*?)\[/INST\]', prompt) if matches: result = "[INST]" + matches[-1] + "[/INST]" elif re.search("chatglm", model_name, re.IGNORECASE): - pattern = re.compile(r'问:.*?\n答:', re.DOTALL) - matches = pattern.findall(prompt) - if matches: - result = matches[-1].replace("问:", "").replace("\n答:", "").strip() + last_q_index = prompt.rfind("问:") + last_a_index = prompt.rfind("\n答:") + if last_q_index != -1 and last_a_index != -1 and last_q_index < last_a_index: + result = prompt[last_q_index + len("问:"):last_a_index].strip() elif re.search("neuralchat", model_name, re.IGNORECASE): - matches = re.findall(r'### User:.*?### Assistant:', prompt, re.DOTALL) - if matches: + start = prompt.rfind('### User:') + end = prompt.rfind('### Assistant:') + if start != -1 and end != -1: + match = prompt[start:end+len('### Assistant:')] result = ''' ### System: - You are a helpful assistant chatbot trained by Intel. @@ -1028,7 +1030,7 @@ def remove_prompt_history(model_name, prompt): but will refuse to do anything that could be considered harmful to the user. - You are more than just an information source, you are also able to write poetry,\ short stories, and make jokes. -''' + matches[-1] +''' + match return result From 851d1575f3e4c5e1f7fc5ca66f5cfe50f14f99e2 Mon Sep 17 00:00:00 2001 From: Liangyx2 Date: Fri, 21 Jun 2024 13:51:13 +0800 Subject: [PATCH 15/21] solve codescan#5-42 in docs (#1629) Signed-off-by: Liangyx2 --- ...le Processors Product Specifications.html" | 40 +++++++++---------- 1 file changed, 20 insertions(+), 20 deletions(-) diff --git "a/intel_extension_for_transformers/neural_chat/assets/docs/4th Generation Intel\302\256 Xeon\302\256 Scalable Processors Product Specifications.html" "b/intel_extension_for_transformers/neural_chat/assets/docs/4th Generation Intel\302\256 Xeon\302\256 Scalable Processors Product Specifications.html" index ed26bdf1b00..54b492697fa 100644 --- "a/intel_extension_for_transformers/neural_chat/assets/docs/4th Generation Intel\302\256 Xeon\302\256 Scalable Processors Product Specifications.html" +++ "b/intel_extension_for_transformers/neural_chat/assets/docs/4th Generation Intel\302\256 Xeon\302\256 Scalable Processors Product Specifications.html" @@ -640,7 +640,7 @@ \ No newline at end of file From 31d3f2678f1aeea20b0fd2c0da800876293d5c48 Mon Sep 17 00:00:00 2001 From: "Cheng, Penghui" Date: Fri, 21 Jun 2024 15:14:56 +0800 Subject: [PATCH 16/21] Fixed issue of loading woq model for intel GPU (#1614) Signed-off-by: Cheng Penghui --- .../transformers/modeling/modeling_auto.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/intel_extension_for_transformers/transformers/modeling/modeling_auto.py b/intel_extension_for_transformers/transformers/modeling/modeling_auto.py index 3cbd2bb2f25..c0a9925494a 100644 --- a/intel_extension_for_transformers/transformers/modeling/modeling_auto.py +++ b/intel_extension_for_transformers/transformers/modeling/modeling_auto.py @@ -180,7 +180,7 @@ def build_woq_model(model, quantization_config): def convert_model_to_public(model): # reorder weight and scales if they have been transposed - if model.device == "xpu": + if model.device == "xpu" or (isinstance(model.device, torch.device) and model.device.type == "xpu"): for name, module in model.named_modules(): if isinstance(module, WeightOnlyQuantizedLinear): if module.weight_transposed: From fb6120a5a76c1ebe50e34cebc75943bd9d10ff5d Mon Sep 17 00:00:00 2001 From: "Wang, Zhe" Date: Fri, 21 Jun 2024 15:26:22 +0800 Subject: [PATCH 17/21] disable qbits lib import when using gpu version itrex (#1611) Co-authored-by: Cheng, Penghui --- intel_extension_for_transformers/qbits/__init__.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/intel_extension_for_transformers/qbits/__init__.py b/intel_extension_for_transformers/qbits/__init__.py index 5e48eeb8fc6..c23599090dc 100644 --- a/intel_extension_for_transformers/qbits/__init__.py +++ b/intel_extension_for_transformers/qbits/__init__.py @@ -16,4 +16,5 @@ # limitations under the License. import torch -from intel_extension_for_transformers.qbits_py import * # pylint: disable=E0401, E0611 +if not torch.xpu._is_compiled(): + from intel_extension_for_transformers.qbits_py import * # pylint: disable=E0401, E0611 From 4e96249a319d53a0151b7cf677ccaa23cbc0fa48 Mon Sep 17 00:00:00 2001 From: "Sun, Xuehao" Date: Fri, 21 Jun 2024 15:55:03 +0800 Subject: [PATCH 18/21] remove unused binary (#1631) Signed-off-by: Sun, Xuehao --- .../test/kernels/benchmark/ci/benchmark | Bin 2849464 -> 0 bytes 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100755 intel_extension_for_transformers/transformers/runtime/test/kernels/benchmark/ci/benchmark diff --git a/intel_extension_for_transformers/transformers/runtime/test/kernels/benchmark/ci/benchmark b/intel_extension_for_transformers/transformers/runtime/test/kernels/benchmark/ci/benchmark deleted file mode 100755 index afc7f483231690a1f5c3860b4c75393cb0cc3f75..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 2849464 zcmeF4378yJ)%Po58}??CmbKWz0Fv}%oq%9LGB6H4QTGfZBe?~(Vd=ebi= zzrORYTj!p;)~)UjH!V1B;)DsCS+Z?r9c@jZ<5K&U5pjgC%N-_1mgQKD*0%KbU~7_P zVr@>rkxy~oK9N7reVxQJ7c~DjET7|kFNvpnlP0s7z4^adTYe=A>fTClO!I$Rm_tu4 z2>Y*mWWOk)?koQ*ujz3JnY}eDd**;2P z4Tak&+)ZpB3V)>I(11jP}2y^D8MVrLaGppFrVTbbJ%-pQZg-w7-b<^C_6^ zt8~7c!Z+ynWC}l^upNcJQ!v}H6n3P~KSQB%OVasc6z(JVBMJx6@gWpGMaK_N7^HA4 zor^12_tLp>w=N21`?~BuPRCQ|I7|D_Qn;CpzeM4q6b_|ewug9Ub&X^DgQ&r>X+kN=y*CEn|wA~miDJmI6}_%p#7IATuk8$^!ZHM zzeM{k3dhs&*|h&0?YE(y z-$nah(f;cceof&;3bzt_o%SB>KTrGnX+N9xX1jsH)fA4Va0&4|h0`dQZEp%0I_{P8 z8|nPha%?`=?Nh{>DO^Wj0qt+4eJh2_DLhQ!L}F1%}4uw4- z&K?yPoll?b44<2i52fG$f0dX|Y?g^2+aY=&`(MaDsQBL#cidq zRQ~61`rxPfq3kEi{%f>%NP59|hf>B({n| z2Z4493n?5VAAW>*j=~~3e}Tf5!mlQFp&ai($7Xwo_6O7cJJS{7wqT3Hu1`8|64B`x9yJQuu&qJWj`xC|p42?G$dH@J0FTWZK_JpPf#7n?e(X zed#<;p@ojkwwA(ublgY#U1-0W_LtNCS_(JQ@dqh9OveuGb<=Lfrv;oQ`)||!B#Nus zVR|6@6z!+T@msWizr=l<_DSJpcMhFTlk=nKI7Q*R6`wy!$KS6wuRMIB;$$NIpV@Yy z&=MU{{Xu(!hvlG+@W*^n?fjdt&$zh_Xmhd$e<@&h^kCGBlF{)_C-rnq%OhnKS=RcP?#s|$FeuOD@E%XvzKGTUon4VTO#~5Ip0b4J;Gdfv44b)e?sB+6s8LQFWH;jG4y{UwEv{&{F#ooH6O@UuLl+T3ni|i^W@+W;)he1 zs~^aIGO_)EnbXZFd=K~!<@||?Yy3voWx^)Nz8C&?YvEe~*VBK|!C@5VN!kxe+_UKT zC<^CNFxz{H5AmV(6z%t>^GOo-gR(ate@x>1T=r*Fd}h#X6s8IL8twOz2JK22VRokSj zpl*V*m9xdftLS{^%Wia>e1S93w!&0mk#q2p3C~|eDV>n8CeE#UZ8NKRzr?l^ zChof1HYa4P2@?jld}?R_iUb4KoJKui$wmaOAu(zwPvz+}L zE4|sV`)@yS*Q9r>Lp|s%ny;`fOBCviEh;zT*QVYz<=?brk5SOg^qX)qb=)^77~5BF z-;$GOXupPnsgIu(W_Hh0_)f*S9#+<^%FN!BrI!S}EPM6w@6&lvnCSy(SvKf(3ZXfm z?M(`%%>I;u`WGEqzo201PSYpUGC_w?|NeL8KsK#+{z&I*5`fjElumgnz1?~4}AHn#wZl*k$&Gi3G|H+ht zeJI!z_NQRB17v?7?GK_*PvH;>hf*-*)oiA3V>VO2Orc=fH&edMm_zIFS#)f+M%tVD z@CXV=QZQ|%X%ig^roJ;<6YY~HoxN7Fw;JtOd$>2jBm_l+8xthFm=FL z6ii#Do2fJ0it`paUQEH%=cb?5LE$_KofOR0O?$1=b98LR3%wNjDD+b>+Xb{Y^@gd3 zKP1fXi|F{0igQz!TuQ<8P0Xh4%++*$4F%I*xsJm16ih$uMha%TiT1Zp_^6zJjP`j7 zw^R5yg*z$SML{>8jz3A^UJCbB@cZfb(-r5#bo?0#pQB*if04fN2ghWV9ro3;9?DMnOtJZm?VrE@(mU>XI5T(P zwC(54IlLwJ)Y0A+`@M138x3c_m`L;=w*Q?6-T%gITkO2UAwQqyCGT71*zI4)E-L=% z=C>c+_csr}HvLOiJbh`?+_z47{?_WqJyZ`d-)4n_M`x(v9a?K-On*QqTi*`EYlb_9;_t`_g*?-rQCtSbt7O$^ed*h_bpV|NUJ%9Oj z_Sr)ox?zt+!yOk6-8=Z!;+OvQ%TL_<^1X?R|NgC~j(Oq_KiYiDLED=*vfzw;UU}}c z-JZQPdDTnXUG&D!{3m{Q>+ihrB;Gthn}@|GHuC z8Mi-i;H(pues24FFL56!v|aO!zC%u$^Oao|cf4@_y4>cEzt(@vOD_(dar=Tj?mm3N zVT}jxzx$tt-#GY!*Kc2R-O$ckY;XVa?QdUq@5;|zGIZ~+UjE$<-(US@?t6RwZp#VhEdTIrKfbFgIP$rtj(PHlXRf&Bl?7*< z@xrCa!q?B7xcVc%-{A)-dynH6t-QH!)wSRJ-A|5MapsP{pL*(>i6g$@9`POj$Bl!Z zU%t)Nf7*R<$Ftvmad7UFuk~NO$eHxrpX__bRR>-FvmZ4~IPB#Mezd=RV;EIIH~>2+wXY#%<#l@3$Hlk#BKgK z@#*uXuU!6(LYTd*jCK68)cl z|E<|YzYYF-@Tt%KZSRpczx=6Jwr=aXde&d=8<@2Ak{kZH@}9|i-|>NIPwe&O8{U5O z(+g8K-?rVQmo)wM=A9-jpFex{EAy{^VIqRwwM{_WoCoMfuT|J%gse2*H}{aFp2in`A*Iik|AAXaoZl7f}uKURytH)2$eO1es(`)3*B)V=j zyS-E+{(bvZ*I!n{p8wrdUH>o|gjY-Ft2ONJ)JV?^^3Q7NUw&wH{`(r~xu!-r%B-la zv%H4CeP)~L@l8LyS~|C&NrYAbRr{V%Vf ze=p@@we);)7^!2OpNqbc1`5O8D zUh>as>AaFA(W>c_n^fiJ*YMjrYuxXY8g?_|tZLWI*NA^`4Zl6EMmkTb;h&Rgr1R5< zSGUhljdXsch8^~%$+v3v_0JmVIjTndZ){s#=NC2nr>=(GUaDcAw`-*Tup0ci8trCX z4f{-}!T(vKUj1~9cC)Jn->t^|{&Hq@yS-V%ZVkQF;}?#u&aZc>^KaI;-#csMdw06J z&c4T1_rnuvlz_;{bCKisD{pk zRPL(TbMG4V|5=UudY=5Mntz^Eqo4o28v2tCs_qY8so_^=P(4#E{WB?ltMOA7Rp-C0 zVgGY#l*iU7^lXNnzOLPtpZF!jS7%YZy+5sBDg5Dph)faZb`NA;x}B|L=5)>W_{l~m zVL3mh2((f1qU}#vj-NV>1ZmS}V^6)Gd9aYV$XM6VznkuAD(wtq%{X-$@-KX)0Y)8>z@hb4ac>FuJJJCPrl?R~RY$9kOaSM|4| z^qchf`*C_2Il8q+Gv9AX7R>DirIq{S42Y&i8wv#J~6hX01@da;LK0 z)c&nvw~@yyz}C6Dh{^*iUtbsya-Qn+$g7J7lqXznYJ^^SU9 z2QFmZ_@4Ovo^meh*xj7|SH+%-4-@?x7`!A2+I%td;10fSo!r2$=1TlkoKCgRUru8_ zavO8=T*7R3Q@v{L%NIY9?6N-D!5sbJi5;1zTREM9TzA59=7nE#dhQcD97^Uj`l)j{ z{=RZwb+k9!dLf?v`n zhNIt3rwlUX%f5j9t8?sd|aF&m1M`8KQv$ZFSp6JFA(Z zne^M;Y|rZ%Tes2Y(S3oR|LP#NbM7Fvb5i2Zl5!W^%sS^zkVEZ2Owp6#sb7PhB!8lBu3?1x{Ha`ytA8h>yH*dGp#)M%YMweM_U0kT^z z`I|a8Zig1B9~`My)!z;wgG7D;<#E6L_>e?oNDP{Fm14ED`+&Cq*$^CBECvI>8H^ zPHiuCr*_2Hry%9ak@$1SuOoYYn{}vqiMBV$nN9o@)c@IL)(;^cucV6C*rD(p&PTnk z$0)wh$%~Gb_ZumBCcbkS+fDs*w=}26ev}ML8@WNWZ6|gvT*UeK4{@NEDcMFRcNg=w z>Nx%`4s-9|{UUJn=FG1UJC~rmZ$<5AqyyzWA@K_ra_QFldYkNT;$y$-%nu5O_VK`} zYzKQ%eUuO_zkQN^8~TC&r2dt$TL}LA7Wr*tHz?O0b^i>?Y)j5sh zV>xZ6^hACF`TnaGj_)t$`cV7HdkZi8gAK3#cDVEpN5s#yT;D+D)TFa;IoAWK^Wb#m zSU-G^{Kv!}k@i}$-Fky6Uc<3|I5Wlf8%nvLY>KvaX-E7E*-ti?a=Pj)*1_`IyAyLP zkEhR&>psQhOY6y^=b^+C6CcXvZh+k5)i0{lg{Fl~sO1oga!Rb*uFA_ho#6P+8(Ylx{+s*4C2cYdt zPNsFg^t*g%-_`&Ba-5_e`r9`VH+FEZiP7?=mu%ibbep@KUhC_ zXRr>|1Akr2=?|r!M(z}CroXVWS$XJ5h6~?T^IiI@dS5S72hXJ6hW4>Y&$A*9<#ggq z=Bak>M`=Aa?MUV>w96M!gJpD5&~MyP+GYFcxL;jz4BI(?@$77>&y9|MG22bkzm523 zqJzt+jysN{4t$jU%Q#<@KXE+sf}~T+SI6OeT^s6wVe+e}KP>I>Sq5SR}&A(CozlzI?#vhdW zZRqQ4PqoigR1p~+ET{hxds;(mPp!xHlm21qRc_ZlAEk=c>XF|XJ2*1#P&+IVKTmhE z{nh{9GoS4lKs)$7$}gjn7kg4NqAelyPxx!T?)GAbH>e#k>30U=_xq5<4_@c{)%=}e zi`{)AsRx@rUplzOMGWCP@9+4E_IusKSlXf9d|w z6}5go<9OC@hJJXj)Y~DnQ+u|t4%P!-J(2my-JE`%e>j*<&2_OJ__g@I^WnHX?~(pS z;^nwbuhcW)0GCts+xg<>Sg!XG`}k6CoVFPo`h05lO?t3gA9bwU7t{l9Q@v{Z(3bjN z%kLdp-)_a}(Q>-0=sTCj?`s{|)8u2__MDHlrc#?QHfKvCkFk zZ`zJOa};x|2VPjld}xsU?IJdZHK(1qmx-s}ZDNk)`U>G#e)l6kG56IBNmp+ zy{xbOsLLf^axj0`zAeWOpdCCaWv-jB3evAo|NOek@zXGV@1pcac82-ednmz%V?EH& z%G`RCZiF_O&a=*u^e1j)zdBs}YDe+6l=NG)U%lY{tmAuZ0Im0){D9aE`iD=_eMRXx zfv>w4o6mYx+Q9(Ezeh`bj^+LL4(k`8JnncH>jzhJ!rzMSj@}QJ`q_thhI;9j7NESp z6tyqATlJ9sPi?{aHrU~H>0bpu;`^FR@ywQydM|jE%Zq$$E!u^3a?npR?~WMz*ia8# z8r45EA1LExs;;ALXb+B$e)!fb=2#D`rFt*&6EmSf+jQ~&x2fD2p1XnVrt{B>rn8-g zp?!Rj{KS-tdZ=%&lYaXU+?SP|x9G&|lD6KqWgUI}ZM}<5oYUFU-L|M@ac7&A%`WTe z>CX1`xAgXBvmCWX#FoC6?rcYQN57R_N;~@3i1fDQdgy;Uds>z>TG_Lfbf#B!!DoH_ zJ-u!8A^!titM$BWS4(GSPiwY8X-}@dgA%f&t-r0c-=v4)tmtdYwk)M2XM5ZFSM+vU zef_s;rI@`Ldd^FwaYU^svt%8rDG&Pvd`r7D@V)qh#HeF#Xy;H5Fy=}&9 z(Y?n$nnriueL=Ky`-}(+ufr#QSE;@ksI7vl#p!su40#)5l@6kR<_I* zw=E@;XP2z%Zt3c1m0ZiV4zy%j`r8If7V-_Sbs<`GYb(0d4YJERdlt8J_UZquZ0qea zS=H6i*VWSB+MaD~Z==jwSFXHClF?c^JC>PSHL2`x@9nuj zFDM=s8DlCy<90O+G)trMbt40n*^2I5Pe&hnzVXJkuAW|W5u@EOQ&lQcv7U}irJU-R z3ChwH2_$wF<(b9BL$4kCx3Zlp{;kZSQq|Iz)%#Ng=2=dzwX&}}*W1zEzm%?a(#&N& zJqC$ngdmre+uPkU!*lIy+idHBD`5nwyFe zx@h63*P0r?);qa~vDCj)Uv7e~(U8hExAe95o60{jQ9}dO6l9ld|EgSD)58Agxt5OJ zdGi-GWGRP@ffmlpHqk#eW=-AP($Pr@3)?!Wl5XfD1NJwzk!d@#{XO&OU(ElvlYDf1 z(!$1ULu2;Do+a7dmhNS3S@TIp_cBsz%uY>Z=e8|vS<%_gA1;YL)sND>9bIjdP3Au| zFCw{F>r@G4&7j3|14AO4m310OkLd4M-2v*f6A$OtrWANwY{ZRjU9PJE17w{qs+)IoIx2+ zj?y?*M=|l$QRw)jg~!x#lw)c;O7u~6M3EIckt zf99G$v$J&Pv#5q_Th>e7-P+Slm2G$byu}L^lb0E%I^EPS4b%E^U0v(zlQEU!hAQ>M z80#X*zxfOMXSAB~I;A{j_xJKWKo4&qmoq5B)LZUHx@Fk+D8t^ zmHWI%cv|dw(d~_1dv1D5&@i2cc5z>Cwxw%ep0Rdn@lxuq^f%0;yj##Yjasgz^uqp`E2*OH z>7AG6C=_43ViEbhDYLU^-`Uxjo*y;!;?ga>)bW;xsNGbNRZ|<|u3Xu?X8Wg`v4U1O z|NeGb%Z9m!sp28I-kyO~v)TsQT37V9Mfp`W$o!S&_NUS9bH+)#M~?K)>q4r-G@sU+ zJSrLu{cYWSJ!Ew%8j-!JjjV_}idAY979%ujx20Uwmv3iAW!1iT#nRffyLK1mlP#y# zNJPWTR_Y@5w`Hj#+11hCv6AdtrLdMubCWTDL$-DaHrLvCAr%`hTBud;M*Xy+mTv57 zZ=o#cCa*5{$|%>l4zG2C;ujmwqJ~)@J+_*Yx*29fPOWUuD(+r#Uu;%o%@#SYR-Dk% zXbx>8gDQ%jDH2s|pduT~<{4D^QopcVJ4}z4@%3&`70<%;9dUBCws{-xSVuPKrK*&o zj;)rcb`N7I3vBI&dnCwopCG<(w;& z@sv9$wC2*)dkIC-Z&{5Z$v|lW6gMD)@b96(TmQE%9{C|O>T@XK5JaAsSV7imDPCCzG1CiyV|U1 z3r!ydJgvqUe||^QV~Yl;a-Qz(O8NGmt8Hns0&u%3pD>#o~4MUwqUA@ zh7T39WzkS#%w2n^hR4#2(W-jXiO7cWlqRjCH|&8n%r1BIX)u-TS-hNvlvVrs4LC60 zNbe}_X5$%jm2LkYI{_6H)p1WijqV5I>DI`YXyXI1XlTVFma1Lfc;-^ITv4hj%gldd z4ZD8GGfH>v9h;xbMx(;`IAxS;jHjrNxdNQu@lNW$snq6-z5W|#hl0C{Jfm;w(8?JA zp6E19Se1G`-imEP)joPxxdFz=W2xM=`lX56@)X^MGjH-J>h#h2Q=8gZuhI*6H*D{eYy8nm~6-U&42)wHtm(Kk(4QFoXY9%MV{>4urknwRc04|SX7 z=1yKjYlCj%F2Y6=WwNVF`h-0Wc$J3%|(l%auuAf|^hb5KI$FxPKyP&54t>;D0 zQ>I0aQyQ~vGrFdli5O#tmFY#<^rBR@vy+~CFI~~yYUXO@9k-BNayor%3j3&&O&hn< zSo}%vHs19b`^;dKX_zrywtBm}K|3Xt)hGpNoXOD4s#uXg^?6H9OZ_+=-;_rZG(M*J z8Jf|e=fCt+njWgvTIyh;=@aZ|eMGIHe(jOI`K010Z<`a`T> zRyg~HGr?-Z(TnXyrspbatIAIOf9A50ctg?%(_Lmd&6v4L$0**h{LeQ8PautZnvz^9 z{`9rH9ALwlg)!9=n_gd5ZGYeC)2gZF;lDhs%67K&^+$_STYEZ}=(DT;>zh?B4cK53 zO1lcxADcwa7e^b`Y@j)oW{Y#b;!!IV;!#?rag5)}{hR0+3-m*AUd*&C@y46FVy5%J zYl=4L0hyY|m<@C)AI(;&nBGkzwy6zN`ncmtD_c9xUt!i$FKKJ)>Y{}$G%wCG`VpH) zliy}JU{jM>`!{-KZ`eAH-K#35YNTsa*#>i#{yV&FEH}34^``bHX`-=#*2Cp!j{Ll~ ztSrXrZ&_wMcGOA8QI>R!#nybc<@KkPy*(-vRo#4QR;CuEZ&tG(N0V0NejFW}l39Jl z>i=LruJU<}_Tx^a0mN8xb=*rJMqRtMX!I$NarW59<|ozHJB-yoqS_Is1GQ4-YIU5I zvZ>H{RM^r|dj!~!EiWZANx>?7{gms*o1R*;KVx{vK8|*7d{v)T3#0oBT4Ai2`R}MQ zH3v-Y#3ttTc6d?Q2K9H#GP^(t}wyg;po z1n~kf-dc0iTZ|RLv6KiMKvcP}4Q8P99=PVmLOp7ya8s(`Q5)LPuRhAd*D*8(V|BDz zJ}{>IkTXc37jx7cdpX#Q+rN=65p`j$d@K6jL! z%SIpOj44BLg*)Y+K9!0L5UZ5Ulza4`X?^olTFn)E=heJS zG$8N%RT^%rZv)aBf%IhTxDNAhtSfq}F5NIKn_bzG?WC_9Fn8C~weZ;Nh3fsWvNrYw zX5|Wbw3kV=eTcU;(ru5**!p+fXl`G-3sm6W@oNlvOH~HPm9jK}MXw+Y@KdUdzZhW@ zN9#UqO2f?~#mc!WZZvB>^r%){Njaq#;NtCHMWGw3@2r1h@}IhpZ#sP)#u8c)IZG<( zxJk%-@2Zj3rad4U!vpf__3l_2oiUZ1P48WE0jeBpt#8|^yic^gO{+1k(76dLOs3$y z%ihi(>%av2M`OwTD)ri^)#xVq+8BI!)17TBK0S_psJ2bfBvQ@R#9X}EsBLLe^BXV{+~)|2Kvy zkkLn!GVa^xqA?xhS6NY9(WsAWv7R&w|L11(s1_D=mP??H7-bzJkA)lFVK?hP^DToK z?qT`_@2-_soAzF$d>KG|LFd>??RZ{qDCf#3^C#o)S8Uje8TA?aIGTT|ct?MBKb}sQ zsnN+tqwgx2(ceR@w)p}Yp5Wb)C?{^bw+0h?vZ~Afx zxpT9&cq6ZHte;=PMac4U$LN!e<7*F1j&oP4%1p(W`cs>3qS4#r(Yuo4duToSl}C4x z@f9aZ+J^JA6dfGpyZo2lO5DA>Y&!N+H!y5qc8XG*l@Xj{27bP zeu^h;@Bo#I^2Y1NaW7%1GNW9z(x_}6H>J*yNB^;`GY04G?E1OLf9FkC%r-Yv&Y0^s zy1M&R{MW_{t;&oxP9BHrk6z-atZy(=_Acw&sC}beTWkcoe$Gb=FMS1|5um&2-go|0r?9T&DmYpl(zczG2dEz>g(EzEZl_L=pc9bLK3d0l3KR_k)=U6^v7E?q8j@z+~c1xpvgvqPQHby!jT0hAcoS!AG|5jA~j)z!BeM}7^ zJOCd{eM9Dme{0(4y~S#M-7z?_IH^8HtgPR3-G`bbssS7H&CUM)UUMf=P5nPxi;sS< zSvgu1$X-~@{=nK<9Spqtd)qZ0R=m?!{Wyop_4{~wi+M;Qo7b|Sb2_<&OqG_W5UWj<;uqMx%SO$cy28ZN&9$21sHZ#X7-;;{_u5?< z)wRd@bb2i1^M9Zk*r2RN?Hvp^mX^P9WFrRONb^(i1aaH-P&Vx`*66+a@lDaJ z@269a#=l>+hV}m=pO&)m$NjCBrq2Yvy}?jg{dmLvRaCvxPtUZ|H|#9Qp4YZ2%U_sC zKfTk|+e^RuM89*@V!kORD!V5=I71x(Fles7?yx2vO@zIa_;nJ+JLp3PtHPU~&WPj}E?`T?Jfe8oFIh@zTs zcJ|c8^z$C{>rwK8HT_-{ebWK`;!{g^YujS_5doTO(u3!tJ-#?~p>$>&J9_#e zIDH;f23r*iuPHG#zH*-LYpR*gGe7!7LQAqOy}d1~vfXVLWa&4(mi4#Oy=IpU z3}oqRw&^#U=!c^ES7leG*q+mJEBd3~zM$(hL_VHfysE#gkLFXmWU8dnvh=MhG^JS0 zxF=QqtOhM#K9PS{gUnL?8H38qqwi1B%%jq)*6-=7X7i>Qt@J(TG&RsqD;?&MwJ7vc z9!6POVcsQeEv=*cf7-c0JwGzFxyO-C>l9qvZ{?|0(Tk%8LCh1C{xfpQrc0Z3 zUG)13#+%85$oT6^N-KR8NmLV%^S;yDoYmUXMc<&p0%ardcD&jOV zXt-Jb*f6b^etOINT2tg;L`^X&Pt#Zbj-%+=PCA_Ge4PTzD|Q~frZp?H7YcICC*e+>vO7H$J2AF z@ukMJjV-wx6<_0%YD@a@NB%j$sIq9K19a!N(Ev@IT=;$>K#iB)(_o3M{cE7Hyz&g5DQX^{G*<~xD=EhV26I5;pUu>}sL_9;rQjGHl++?~L_goSi8ZNlFYcQ)om!=?ct@?x zrg(T?AN|s$Spy=F#UnOUd;%A2h>2KJ+Iza%ru41oUfyv*TgQ}E>ithy+S}6A7CoMy+|n_* zyEAuqe@{>6c^&Sm?mqLIlhl`=vY5VZlWP0E zDcx-=%n$8RZ=XDW%HkCrolB;Wb&r4)(9oxY-oRecGHnWFHT_#>j+OyLmp9SrSCuDN z6X}=HH>bThGJhvnTa=F`SX&C)idbF5>&komk1egODOMc?vo|W{c)j98;;e0swxMGa zk7I3RZCkzuU#-sC&f2Q{|8XV0!gkj7^0{6~<7}(30*k!IvTO?JNvpQC$iJ>7W;WwJZSpz&B)#PvIEj79~{MY%1TJA9{ef z58Qo%`7rR*OUw(vt)DVq4SeJ;%md)THd{vaTmyV)Z{{Pw-4yd8@YEdUA#m$-=4*kE zv@VT)jPZGc_@slL*5%CinxG#QE4?HJ+k^=6CpELqb zh@UvXL-CVl;05uMG;mM+Bm+Dxe&PbR#ZTISmwv+jkOLlwp9}ymh@W`CJ@J!4;A!!b zA>g+7NgjCVG|qP)cp!c<4BWn+>xTkxNBC;suJ8c3CwvWXU-$^{KzI>&C_DsieT37w z7Pu|E1l$q64!A4a+BWY0p71*0zVHO_KzI^(DBK2a-N5Ou2W|^b0e6Hq0(XTwz&+v3 zzCYy369?FLTI2yl%b0{0~TTHqRg z9dK)^HHwlj8@#hvdH2053@V5#(~eMc}~|tiKj`O43sT?uh<6;GU$%+994_X^CG4 zytIn1n*g5r5OW)Nv!tgUcz7PiPXW(KdK!V3B)$W@Fu*#^z@4j^XMnqs9vAqC=(huR zB|SOd*2!B%_2dBX+y#7H4|s4D^C92^lAb*9oTSGGo|g0s1NS6;0eEU9Uw1WdN7Azf z_>iP$1b9O9i@+U8PYB$W_-lce`Z+x%;K7y5Eol$6y%?7C)B(3dKLI=?=}7|5?acXQ z126RPb?bpUlAcE3fuzR)?o0j94BVFVq=6SCeg=51m($|{cci@MfEOh_1Hgww-vgeI z^b7)bHfQ?`0Z*OJ>B$2RE@wUryd>!<0QV$4tASgRo&b38XVzZ>ywt;d1h^yVSqnUD z;`XQnT-&4iM7-X!FW~r%z+K_Zzzf1%;3L8ZfR}`Oz_r~O0-hM)^!UJ2!iRwm%K2*G zuEbvh+!sCqyddWx@KEBf1+Mm72i#dD{ehk0cGma_;J(C90@wKU!0iiJKLuRlJHTy8 zUpw%W@FC!i@I3I5%V?U6HXnHDO6IG9XGA9e?q0$1*8umgW*!39{9Oy&%W(V>aA$z| zI^bcJdEL(Oe922X6Tk}>ar`9mOPJfhRX+t>{i+f9nXKag&#hwK47||GJOezC^t-_Q z3psu}@W5l91FrfW@SNxm0#CigIzzxq&Nh*sO>c;Y#ZKLT9)y&>?T*kLX3l=>lXSNJ;M>1+79)-G{B^gqbF4tPrPJqf%d z>9K*^a@~61IpHbb?undVjljdRI6V&V-~#4p;B`{oGQbmZT^D#-csp?G3{FoDxO*;N zcL2B}=@|rWOL~TY(?m7kI-PSm1YG+e)^2gXa(>L|sROR@ zlfZ{XHwCJE{8GQd|$`~l$ZLXPhN&j}v{?g<|PJ|sL3+!sC!e2t{10NfToTn*e` z!2THkPydtsVGVHg!x7->hehD2pR!Jfykq;wo@;@Z9$;Pqu70=5YcqrTkp6=v)tOuStjd==q?sVo3@O6@&X5a<6ZW{QA@C@+OxvcL3FG>7%_ikHO%XQ`=XNq9=y)+8-a(1o0S4I?+)Bw&heX(f1P<6cg4=LdG=a@GG9}%52@S)Fe z{0#7s2bkx8uN9pE;Qq@T-vb^@-Z@I=AaLtIjz5I_6U_6#(>`+__)s60i(%kint1{F zt;|;gPZwA}0Iq&J0^E}GBJk2(tP=va?qG3IpD!`jz0iAoW*^DAJfXhVz`GKsn8u=vV0r13e zyGM3d1Ke(A{Sn|p9`hpbK++ikSN|^ox8-~taQ7fir?qE1-*dw2fICNW`~+}M;wOQZ zrf~dv;9-vaEd^Zt*#U0Lc{A|TmAkTE0e1$OXMlSbb9!9l9>;G-ekt=DaLva7QU;{BhP_2R!#Gb4%*Ex+v9_rFN?Wu69cT_ddgRvw^GKQos{(-iUn7d!iIL zz{8(2Zw78Z&UQ!xcfZN;GsvG}?gCeTYX@Ff&H6du{@ct4fUDg+;A*!a;Nin;w>)s| zFAW2?AK~}~;M!gUz-m@QN{RD80Zv*!v zem!uF-w1qA(%}Hl?ZeLjn}HW*GEW09+1!p~fCv4|UErw|%-eyhU*&*XD>?oE@SJcD zxcb!~aP_M^a9`};15aJR*Bu5vB)kCJyM^Pg1|D9_JOFNgnE43s)smhfa9ge$0{4Wk z1zx(D^-IA0ica7*my0WXPv4gyb0dWL|zSMqi9z{3INK5*M( zUI3ny^sELRN_qm|j-+P|aQ6n*9|7)5{33Em&syN&uectw_Ko}1I^jv+iC=Sk8@Sf< zDd3L8Zv?LOUNi7e(w7EaIQYF$dzu06y^nbg_&U)U0PY;Z@jc)phceFtPsq6sd}Im7 z9|rDyg?RzE?I)x3uLeG1bNm3feIWBS$bT&P4m>C@F9ILgfv+0^w^FRX7P!{K>wwqG zxn;-wB-o#I>VS_3PXJF%=J-kAp~Sa=rzGD~z;pXeitO18d`P$p+!x*sT>W+ccp&jT z;Oe(S!0r7xJ$c|7e;BwY@e9B;egM2)&es6Xy|!m$hY{d~A2DAG+>!GV@bFcRzYe(d zI`hPS@qEunyd-ez&m7+d?(Vf$l+JqO-(i190Z%={I*rIjm^;AT=a@GGFV*i6T{jJU z=uGAr;HfuR#|5tCB8Ob;FaSLDJnMMCUEzbsLykWLyz~O|Jn-Dh%!h#wN_q;wN5np> zfu|)s0q{`buK`~84qtZ!cuCR|0{107Yk}uPzXaTo^sEE!NqlSnxF6;uejV~xIGsu0 z_G~VvDd3LqX5g;yG;pmqT;QI>ZwIdBdH{Gr&OP9Tse4C$I|#fqgSij9Ue1Ss+YKDQ z0Nk0*d<}9r9|4~D6|d_n0#851JOn;6iOc(1;Hg9Tx+UPH_c31w+&hf9bwE7d{exJ) z4tQyRc>;KDHtQsj%eu#U;2B9r3iyzurxCa>+(ABzuiFf~Ao0_{{S?P{ftOBTza0Rc zn9qC&xGg*nT>W+!xFhikz}0U9;J(CP16<=5fm;hWogr|IUjm+y^L4-zw}?L+825+p zW0JqXbMpT-aQ~wmzaDt%Hs%iSLD6Xjp7;XCPXix$hIt0~(8j5|W+-@DWK*5_nE@Y~a4|df=|4Cj~rl z1*fwS_|Q$vn}LVjT<+4q)t_D9b`QsI2d?FG0JtadJ>VLD2)HBXdEkXLoPHm;C3YAF zK9txeY99;0M=snq;;Vs&moN{2J7==a8sNbo^AX^QuQ4wIAGw3|L*NN1M zj|<$pS^N;V`t1PljGTMGE%B>C;A!DQzzbjH^yh(l_c8Z@j~uXHsmopE5C*H^UYmkdy6@j~-XS$A>eC8ClB1sb9^7T|8eH4fm?DO03UjN z%P2i-fLkNXM}Q}qINyuF!wdMjA@J12%+~@h4KObO&$Y7tI^gL?nOpU7Kk+}wI(5K9 z(N6+TimnYjwTgA>fxE&}z(;(J-w3?)A?6P7+@;LZz*CZ*3~*bn>jF;;ZwGGO&H6du zg$tPv053^;27xzAdWL`}&xT7X3hyc>?&zlkB%i;I>@X2A+75(^(I^An{YcgP*c~ zBXDB^1-OaLfC|7fd&&Dsz^%QRkJR8{4Q|bdr^h?_kf>jg z1fCN++rWLfZVI_v&jB8Yeg^m|%}3y>p98LT@qpWDPJbTw<)ZHcSN#I;K++!o_e8%4 z{G*~D0$2SKaLp%cX59Xv=qG{StM&)3`YGU=-wts9Aoi0C@Gq(Tfvdg;+^%PxJn*lo zoq?-P0Ng#4b&9~BRC@wf9cxzHZnn)jN#JYLZopN?K`!@|0sdXJ4{+7-fZLPAKES`P z_5rRs0q{_Ciojo2`v6xRt1)gLSMDnb{3mK3;Hu*Q5BKN$$^ieB+6TDmc))GR_dM`F zsC|H|PJmqUy$JlTY9HXLW6cKpsQ&=Brt$sSz*WZq9_}ml0lt;!xWHA%18$2>9{3KT z;{#Wn0C=eO0lu5)guqqDIwEc#`w+gbB=Eh|KEPGS0q*W6_5pr?+6TDmc*sR35ByNI z4{+59kjs4)fj6jqfUAymB-rNwu@CS@wGVLBae#+fKLh`O+6TDmc))G75AfsEKEPEc z0Pbr21N=m_4{+77-XFJ*yN}oh_#(9raMf{;i%tgk*=ir)s^bB759ajbfiG7309Ty= zxGnWz5qO8%2e|53M}d9Re}L!IKEPGS0q$!31N;KD4{+7-kgI)wU!wK_t~vqmQ1Yt? z{7SVCaMiJnj@!oR1lgN82ypPpN%?tB!+Q%4r7pb7~*ps^it*dEhUpeSoV@0NmC32l%UMAKZ`D4)RVM%*YWV{GC$$f7)v=C^ z+s751B=CQzeSoWu13Xm!0lwLEt_NJ;s^cNo{vq&fM8^lNIsx!d`UyqgJBdyRTy?BD zar^jcAK*#Rv4N{j2s}K3?N9=qo5kFk8^5l`PXO2WHgJufs=*!LI^U88ZppldTZ89n za1XfG#P^j49?Jdt$i+Sd-?JyT(6q~?r44i_eDPgT<70h;HsYk zp4R*VZpnS+f$RL64_x&Nz+KS~kc)m1xX!s+TSDgU3_kPwX0{?~jGjP?hn&b9y=7@cO|4!`# zTy-4amgr=F|5Nvo?V>mq-;7PR)aMkgEJ0B4L0j~WkAGqoS$hCX{-&g$yxawH*FHC<9#k=Pq#7@qk-m=REL9>Oa6$CjjnC{37tf)jq&g$65gPk#dm)K23CN z;Hu*Q_q6^2euU__z*WZs9;khQA0s+GaMcNbJ5o-Iz?;=Rz*WaOF>W76@;wP$=l^Zs zs^b8+jurm_ezMvJxaxSo9kmbe)73t}RVM)MNqUOF&sF;XR~_r5xP3hJAK*(w#|ExC z4)8$gu?+C?JUMXH@qhxxA0m;9(73s==*O;&#(6Rtd%)c@ zSSJr$pHuk2RVM)MpUFB!;18-l16LhuQQU6IlfWNVzXh)G9pItdR|fdwqT>Qr9S?XY zcFqIW=Tkm#)d_(6r;B}n>-vfixawG^#qHzEeIpZg$Ty+BAZbs|_{AEc`2wZin)8qDW(yWsNuFr97;Hu*Q4<$Vr;6GIR09PFk zcp&M?1J~!jK5*3ufV*PnB5>{Jhrm_GIwNi$U;H@KAz|#f$O}74P12`;I`I3!1Z~w3tV+P z;GvY$Jn*$@AKUh8{ zEnmP(Y9HXL69D(M{sI0M(FuX8j&)YtKA!k<61evBZQ!co0QVQNKWBhzKi>teIv#La z(w_(Zw%P}{>IA?;ZNGq**M$OC9qa74eQdFF68M&9^ZnYuRmTBti=8vTb$-bOt~ws@ z;1sEUfa^L#AGqoSz-{gS0N-BjD+I1O);V$exRUQl;5t8P16LgfxUK#JT;~;C;Hu*R z54C*NvnX$@dI!U7zd%R~-+z z`VVkjU+M!_od9?!`Ben2^FkqT)v-Ppw~sCPl?1Nyz&3Ezae!NDAK<#4!3C~59&nup z7y_>I0DcW#sKEo^u8gyaz;zrN0(V=uyqAD$x~**7o`H_zfL}gU>MP)?p8~G;?*O+? z=W${NxQ;_z;HsYku6Fc*d!nBQey^n82d?@B;F=!+aBmsgp$NRZo(H(;hLE4|pJU&I8wVzdmr)32N{n@XIB?Lg1=nEsonK zQ2zn0>o#rRs^b8+&SN`gfNOi_0#_XmxTF39y!^fZaMcNbdrQQBfa`jI5V-1Ct#SJ} zqLTz(evSrQbsXTHmM`G?yxRq?Iv#MRjqfWDy!@ORxatJJJ+X5UxURbhfvb+SByJx| z(vt+P&);p}s^b8+#1AvTuhIGkxaxSo11YC@;JWVD2d+8+@IdPy;QC%z2wZinHn5M> z^GV?4^_IX@#{q6hdNROuoxclQbv)pn+6TD4hvfrTod9^C?H6!eXBz@n9cyXaK91BI zN#NSQwSlXS1Kd&n0bX9u3|w_Q;8weoFW|cF*9WdT0dPm{16=3#Lg1=nEsNX7lk_Bk z>w7ddaMf{ud+I;Hb^W;uTy;F)fz*e2;QCzH2d+8+@IdN~BJd$?zksWb)gHG`p!NZ- z?-RlfvW z@84P;x3e$$N#Hsjv4N|83b|a*0d8evyaQavBQ9{&&jHu`@PIo*QttuR_QeOT`T_9Z zX4WYJ*ZIj1xawHv#qDX`$~sBlIUh8%xvxC%P}1WASDgU3 zBlSiRxITXjfvb+y6Sq%zJ=-}6T<1A$;Hu*w7o7}n9q+rqRmTJFe1y}J2d>Z8eBi1R z0QV(5Mc_I=69QKqD;Kv&qrL~s^b9t1}|suKYB zB;Sj`^?ig8xawHFar?N}iG6^V*Bby=9S69l?K|-D^J3tt;{gw}{sI1Tt$%>4P5|7~ z{ts|{z8?Zt9jh;HA5Ysa;N|Ciz*WZq?uZ{|fa`eH1+F?Ca7%RZz;zzO2d+8+@Zd(y z_abnee+YrAj@2Kxk0tGL61dLu*uYiC0d9*QW`O@z^4$flIv#LO?kf*m=b3%rsuKXW z)IPv-fjPJv0JyLA0j}fg5V-1C19AJf4~u<(>o~#&t~w6zQ0$xmuI-cy zTy;F)zSubrT*p~HaMcNbTLr$aB5+;r7XnuuYgODnw)zin?dRCQRmTDDOTK4-m*)+D ztB!|U?E_rLvp#Uu34pumKftxU3xTVSbz$5-q39%m>v+}%t~w5INAfEJyu8i+lJOr*f)`#Nu@xLVH3%JhP+rU-FLH*Rr#`=!8DCjcIbP7%0{Lqp)IV_gLH5uGG(?JwEDRmVXt`JMr;^Jy+{)$x$4 zeSqtHh!0$K0^p&P_abl|M})vt$GSLfpHRw05_oyu0J!Qnz+Lqp;5z^50#_Xmcqsmy z2d?W(eBi1R0Qa?g0oV74L*S}oT@tsCuk9Cb9q-$~RmTAyenH9?aGeivfvb)O+*SJk z*Le&dxatJJ-OqD+iokXLDg>@N)`#Qvam5dlz{~Txz*WZq?rHrDT-VpRz*WZs?n--_ z2d?vbK5*3ufV$+1Lxav5-Ey?!`aD5-y1+F??4W0*H zUataNbpqi2m&Jd8>o_X}t~%DGar=a#lLW5gP#d`FIKW*g?-}6wKC%m3bv)p%q$dwt z$Fn|g)d_&RlJ7;}x_&+ct~%Cbar^k{KfufDV1cWSgIw+_16=3#T;QtX0WXh>ftSa{ zHF%*04}gakBfl^FR~w&fNQ#~%j5QR&f;-v5_oxB3|#e7z=z~|4sc)eGr-H^ zV&JNu1MZ8y2i!WF)1L=k9v1^w{Q_{!j{ta3lyVKcT<-x_ed`La^J}b=1g`HR*uYiC z0q(udIvL;}{V{VFxaxSwB|UlIcZ!Y=Ty+BA&Z}Y{;5tqZfvb*nW!yg255+#f^|_A? zTy-4ap4?Xk_@~r9z*Wbq!SleiALs*DodCG~J+Tk)^80JRRmZw2ZXZYTJqi2)xi1^I z>Nv>7&KcnPzN!mcbv)qCfAM|gfqz;32e|44$VI0J{4vo9fvb*nb=*GgE8;)E^}Qh* zxav5-E%E0JaGeivfvb)O+!LKVa2-GTz*Q#z9=;_01H9aB0#_YtFm50JWw8%%9benP zRmTAyia%$7>-Xfiz*WZs?tWkV2e{5-_`p>s0PcxD7lG^hydiMav91C8h@F$b%l%T| zs^cJ+`^o^<@sSH$bv)!!-t)kp(((mdbpqi2cg25zm&Y~0RmZwEZXZkf6-nUb_tSx^ zjsx5ioeXg8Z@a)%#{=$&P9Avq{YT)c6Cl_23%HKcL*S}oT^F}cFe2p(xPC8<4P12` z;GWcn8Q{-LzPrFx#{=$5eV7NX?^XH0RVM%*O1)78uJ6-@z*Wb(K5ifDM`9n~ugHDb zz*WZqZvB9DGQf4d(FLwL9&ktMjXZFDF7E?ZodCEk<-G`8pVNiFRmb{B+&-bUU%<=n zuK`yb2e_sF1N^_#e}Jow2i%c*BM-d%emZc~34mMDA1eaa_qapgs$<;{w~wX$AK*Vx z{{gN#4sci7FW~y#gbQ4CJm8Lu3-Z8qzR?G+IstG`#}~l$`9cU>b*vlX_Hng+2d?vJ zHgMH(fZO7S8Q}UJt_xgsJm9vZCl6f5={|7P34mK_AK>~NCj_oK)==C&p41yj;7?8C z`ospVIu3A4>iGX(4) z{aZK3?QEYT^DV%2KEwvD`YGV9T+adSiGBvS&WE_bRX+z@^TPw~z0LN_1K0M>2d?@7 zaBIRr(f4*1f$MYd5V-1Cx5Vw~Y{oiC;95V}z*WZq?n!zwz{~v#;Hu*RxBeye0bZUj z1+F>)@IZ8mz{~3~fUA!6QLvBPR}#36M{MA#;{dnB&KclZKe)hE#{=$JoL_n1<#8Wy z)d_%G6UBdk>vN3|xawH9#_i*YP7=7*!!~f$aexO>E;7K&^U1(f#{=%D{{YwL2tIJt z34r^5Wv~Zexav5_MJEGXzi-n8t~wrYNBsx5uEX(x zt4;vi6MrrO*Zx}wTy?CE#qDFS6Z-(y@vIG8bsXTn=wyIvf5`=|Iv#LWbn?J;9gYuN zbpqg?)<3}YIaCN-b*y~cKH*=)KEQRox(!@)9N?ka2Y9*P4qSCS;Et5{JaAo~<^xxq z0J!rHDPO?#J<1Tc>R7kO?PL8@>;t^~{yuQkaexO}KLgisgbQ4CJm8kxS01>2U$hTg zbpqg)l+z;c@_HxWs$<;|w~z89a9y`x1K0QtaQ|;?=L~S~2<9$u)$x#vKj(q#IvgLk z>IA?8?f(GRb#5VW)v-Pvw~r(Dl?1Nuz1qN4#{nLyeSqutPP@QW#{(W{{R~{!1^K{L zCjcI3`wm>kYawvevF?o9#}j`}0@wG-ZQ!co01veN0Y}V;SJOPRRwXIv#LGbn?KR5Nh?c-_v47|La5V-0%z{}%e;N@{~4W6sPJ>a2?`|`lc<6_|c zKHPsU0M~Q}$j{|*YY}*PTnt?GOTg1|ZrvTXvoHEd;N@{KaMe!%cSYX;Zher`p8;MT z7Xw%Q9B|DK4|q69#x=looaF;o{Q$Uoh>UB1YdanSR~^d-d;UN6-aJ5#s$Bms2!V*o z47e~NOM^jzAPk7w0mTZNGmz+rTU;rSC4kGQ8Ejd!WYU;n9351=Be>K>(6AZ_a9iSp zVoM@!BW~kLsVLQiC4fS+NWSlT&Uva&Kbcbe-QWFB{$T3;)amEE=RHfErMk0^)#)LQ zbKVMZ=!C@6-L1}I;uvQl;?RkSXRcfm#Br{g5Qk1mJle~}hj{VaBo3Y6zdPeondjm| zyjb@Uhfc`cm1{9^ydM$~hfYj9a`iPq9Osw`apPRg=)}Y;`&yj|;&{I(Ar76CIoc0#?DJ&Ap%Yx%8K2CRtA{v# zAErVaIwA4Q^^3*CaefpLhfYj9uDJLR$2v424xN;E=*l%s9KYX{5r_0@rp%W9&Tzj4%UaX6WLnkF3&9!l!CXV@bMjSf9H#_4Kx^ndp z$2zM*96BNKbPw0Rh!^vA;?RkiWBehG_XHB+&`FsiKE#W8DRJlo-|CD{w5Kgs4{_`d zSBOI=Bp$i`zL+@HSrKvQ#Khy>T>m0oe1D%fbW-9K7w2i>#ruTBp%bj=j8Ba5hj{V3 zNyMQO5|7Y+h!^_?#Gw-tuVDT|9Q%z4ap63`apH($LOdL>acdBtbC~5Rap+GGM}1_(6X$=6 zc%Rb`uI`LK^t*|pUMs{C=YN&>gwqd+Lw|rc+DAk@L;Q*7PCq6N{c+-fD}O?~@+KF5 z;?=`l{E0(rv&xl8EU!_GH`%b|%CGltO^ee+~bymE-Kb zP>neDonqq9A159n{={Ra-yn{Cr<6GKr-%0OxZ9TS#W8XFS_RR6G5DynwohosR zpCNJh84-`)Xmx7Dv9A&nhfYE~b^bJnV?K})hfYR3JjTU`IQF-Ll_l{Z9xQb6A&&Pt zLgMfzA|AVP)rez05EF+^LOgVFZV< zh>aeOZ+xV|Jl#6yhF#Ic?YiNl|WcFxgx-TRSf1(mzBaZjXW8%!z|69bdehgN3#vl6K#8Dp=;?Zd~o>k%)XF}r8 zkBDdQck>G3#kfZtItlU0hpbM6IL7moICL`N(Fd$fi#XP^!S`m4TZMRr_z=hXIwTH% zq7q&s{`i4*-!&! zyqI?phfYGgvc$C?;`m;0N*p>F@rvv3E#lY@32rKh5An$L_bPGhV}!)vPeeRA(Zz>2 z)}b+R=p@7|?{o1Xj`NX}ICL`R&Yu=>tV4seBtFC=_q?kT$L|t^#NkgwJaX-@MjZQK zF>&Z5#4D~`4dPhOro^F>5wEy@+9F=8Yi=%y5An>k=PL1HUxhgQiI}@~SR-ESTM~y( zLOjFxOuX1PAP$|3`6;#?wuoas8QfA5AL5Z~&sE}Bhla%APsAMKGx1{Ik~nk{;(;qy zgE-dvDRJmz%rX8D$39qaYe{^V zF-LreWBnN1UJ@VT6{k}rj`d?m9R5VaV?5u9V|^VHhfYE~L;oU<{i~EXbTa0yJ-3MC zJ-1+ONqmS$&YvoA?4O6k;ZH<7a&fK^FZL~oLnk4Q_3TRGShuDne5!hTg5Ko)9RCXOc-ZPxi5KG@arhY#Pn|zC;+QAL#G#WA56`py zG>BuoN{K@!BOZL(#fLc7`@#21;zK-k<*E|LJSQX$eUd$`*D2Wg8)ag`-V|^VGhd)sXuMx-hxMSkbNr(q%Kg2P9ro^F> z5s#fti+C}w_(4g0h{vuSR*4t;D#YPWL_9_NA&zxKOdL81@xb->264Pcof3ymMm+nZ zYd^%XehhwC5+C9r#vkJN9(PC_{zS|_<@y(K{Qg@^96AZ{6zzvN=5Hx+=w!sxb6tFh z<30S~MP5;@FyZ3JDnPF?9ar+p_32~oKAx{*4HU<=w!rW*H2r-|1{t3!v{Yti4XC> z=~Rgq=f%X~PeeSz_)HxC{zXh2ItlR<;}3EC9z{wVIvH`Sk2Vs=`Y5=wGd|$mCA>mB zbMyZyajcI*;_27ycsM{Den-T^_qp{8ajcJG;?N%_j`AnO6Q|!Gj`dMW9Qsql17~N< zPju&f#IZgKe%2X(=ywxGeN>1Cm%8UB@nYUZ9QqOQ_?uRzM!XpJh(jk~zQXD>h-2S1 zB@Ug8cWiCF%v3^X6 zLnmYI>Z?T@>#X1xCGjC1x^h*CsxfisB*a74o*Tq5A4rKqCu8n( zTEvTem0y*_hj`}dy-FPCm?3fa6ES!FqDCC+^q4qw65_$-u743P-h(9$os9Wctxk(L z&c%Z1lK2o0-Se(Wyjb@Uhd&YV!0FV87w4hGp_32~T)$`#FZPp(Lnk91V|*rFtZRN< z5+CA$tM@ALV&924{E3LiuD)u-aV{1Uhfc!Wm8(G<=SL}V=w!s>OI-UQj{TY7Hzn~Q z9=i5iC64!UL*no!A|9iE5yyHhCJvpXgg1!ey_b|YbTZ~&aq%I(!j(7pZApBHhpxV= z#Iau#5{Ew#@roPgYs7Jm854(2LOgWOFbEnfHj{T6}_a*Tm9%FnaUc8S% z9R5VaL+4M8IQG+G;?PN$yZUMnFU|#sLnk91V*DYF{j}f@CGla7`44ftw-6GCKN0Z& z@ga_V=9oBi65?3Tt|X3iYg)pmN_a**!g`H3*0aG~opB4VvF*8=IQ*^&aR6Gt2q;<3|j5XX8pB@X>5;;4^|c=jFlTp*5p*C3lY z{uScM^;V}!yjb@Uho2GgA$BOW=O7IFM8Xz=Hf z_z;g=KdlnS{&q+l{zSwpuHI|JG5?Q=Lnk2~xi~k77yG5ep_37hT)$`$$NDF@dW*gc(HGgm&Av7#kIpKah!{V#Nkg= z!fV8feJA43Nr-1?Kg99x4y44PlM%=Ke?9T5*tcJp)MnE%Jbp+8O>(WBwo9+Zli8 zcN0f_REURn*?3loWB)%S4*iID`bVo%BaYuYjfq1iAs*aqbsEI+@9L(+p_360|73Mq z#PNHl!G@XRRw14uKE$!V9TJB>Q3F+R4RQDr5l{bYb!x=%?+(Yrp_32~T{~$+3&l3`dKN0Z&&oAQm_X1+#&`F5L)3#qUh+`d*5{FJkJjM7!ym+7Ik&^fjk6nMS z634$&9}PF>&Z5#8b2%;`sL+QsU6bh{t%o6URC|_*+SQh(|xS^;IQ~ zeV&jw{E3KX7=M_%{vH#DPC`6(^Nj{^{JwEY96A|u*A836i*td$m&Av7jOQ0|{2p~k z9R5VaWArcL_;zK;f{D(O9_d??E zCn6rW`B;rO&aY$Q&`F2~Zai!d$NN_)ap+{k1J|Bg#Bq)jJXR7P;;CzgRpQuZ4vE8` zhErbap+GGM;tTavD0r6FZP|D z=!`$~yNRPdD#R-!7^dWKRN5qpCTb&y5U+-agOdL81@oX2HzCrwtPA4S} zos4*8XRFg9-gG*_lQYMyLOelyh(F|XLgMfzA|CAE;zJz&eo#ytItlZgTzrT>j`$FV zPDVU)_0=N2#Y?Py@Ki~Bh{rFq>8r%C-xv~yKN0ct`7S=hi*b)QbQ0nT+7Iz(xpJk% zp_37hUts-d5y$*C_(w^6h=${E3KXPNznEZ^VZj z7cJsAR}BLDT%BW{_CWxh3h@-<4{_|Dhs2>15f8R^@gZJ(kApaLk`mq^j`x&O;?T*6 z2d-Q#;@CeAwkU}Y@z9m4O1$`<0&(~g5szJc)rez%CMFJ@gm~(78pN?aN{K@!Bc5V> zCXW5sV9S#D5RYAduM)@kbx0ikM8qquU(|@t$M{1WItlT#+l@cOi}%clLnkAiVg5sW z0mkQ9CGjC%+12_}CEo9JLgMfzA|ANbDOdL81@zC|t2Jzxtn>chb;;DP?w1~gdy|)=`QxYHI zDaL2w#l9|a_!AM2vHl{C_gZ7(&`FrPI5&vj?c$sghfYR3bK_Wx_z#^;_xRT9%K9=j`u5K;?PNm$FANR#0TBDkrIbaM!e$U+#>!x#OGNh@gbf#ohtFG zoK8p_{zSyF4>m{~`(1GfA1~nv@z#soxg_yyi7n?eaqN#}#KZmUIIxj8$`fqc8Mp9M zYxfYx{#b=L^m~aTE+O&6=`SXZ{jrER^aqI}zA^FOH0%EaaqN#J#G${EIO-`S9-Zdm zPkhk*-gQPC`WuO(zJq6X#y@$5t*;*9ct4>+9Qq;g=+#zdF>&H*tmPPY-dN7gUHtCnR3k-^GVGe$Of*4xN~IhWHT2 z^Ex38os@W`>f%Ej&)JMPbb{x0#wR(z>huuD`y>_O&Q3yjKtrhfYj9Imr4m zK^)%$O^8D$B_18<;zJzovt`7g6KvNRpNiAzA&&P+D#W1^60f*^x|lf5sUzahiHT>f zpH2|RIw&Cyos@XR#d(@Iem5W^4xONj;^X}3A&zxNg*bFV;)&}Qi-}`h9}$O6OguyT zA&zt5ggA6k;t~26aqQn_#Gw;(cg82e_)Hw{HC2d1CnR2ZrESlPiQ{}QA`YFHc!c;6 z$M=yF;?PNnC(fT~;y6Feh(jmXp5o)i)gI!zx_L;2ICMhd37%iXvHu?thfYj9bK}?q zah!K2#G#WC5AggV{=k`bo|X}ZPOw8~d@_tb#PR-Wg*bFV=FXqR#Idg!5rCK0IP_N%M}4Hk(*NTOkge zka&Egdu|fP`#BMD=)}b1BdpE@alE&m5Qk1mJajtK#PR*#j5u_H9XsO_xN`Ln$NQxf z;?N0+hex^i5XXD_5pn3m#M9Tg_z=hYW(jfVq{L&VGff=dugQo*C+O*nPkNa3r-wMs zPb`w_+7_@ICN6t0mf(I_?_O2ICO$tI^z?de-X#` z*(=1M6A}-dKZ}Xu{e*}(bYkK$+7EHOpO6rTPD(s*?RlCw-qXv7Lnqj^Gd`)S_a5R8 zxO0#Sap;7^gX8S@Gf=`K;+UTg636^JF5%-PJRx3rzb$8j_=@*hKU3oHe~NhM?2LHi z^jpL~a=O(IcI%8Y^t*{8UKQe%*nYlB9P5jaIP?dIN6x>9c&!IO;DU zo;m#najY*=;?SQW9wYw5!%x}xw1{JU5mZXzPaN&3LOedn)?<}8_VYvH@IPX{#OlqD36%Zox}R;#0z_#If!UiNl|Wc;wo1jX3t5V&c$A zh=;De8pLtlkrIbaMm$FQA&&2z1#?T{Lp(j+wIAa6UT{bp{zS}AaP5cqHu#<(ap)w( zW2e&~j&(sw96A~C*!A}oah&4>dzQq9c;@CCRpxHrJ0uQ&BH|(X7jgXVM@$?#3G?^a zI5&vnTrwpNos9X3R;NWA=aRuoOX5R3b?vZ99KVkd5{Ew#bJs6w#Btsc6NgShJVg5; zj{UlnICL`Ru3xl>iTJmIL^6(mzBhac!>ESalC&L5{Ew#@fh(Tj`Qf4ICK)?G3JNF@&0p496A|stXnq{ z$GSDxhvKt~J%{Zs;T7V^r|jpe#EW$w@o<@aUJMY2-x2ZPT&rIrUab3wLw}q&%AXLA zoPL9NvF;-d{VC#zV@5o6`Yqzcx^Lgk_(Q*&IO?N9JRP&~tP;n3J0uSMi23KNPK`L` zfiZFDB*cRYtWJY?@g6U6=w!rG=TD0`zE>5zeCD`Sn4jn3Lmb~b3yH&@hA$BOaiC5y$>a&|4B8;?bxrSCu&C{~>Ys6A=$aTzrV*ydx$KorHMc z{Amy`<`u-DlMzpsTYp-_ac&zNP!b>F!TBye#PNP(NF4q|%rX8D$9_mm96Ct}ZxF}% zOG+F%8FR#kIL;S>154sVJVtzo)Hphj{Fsr&Z$k{!vIA{zSx6v>)QwCya?hCm|kVd?t?X^QOe1lMzo{ ze{T`T`+LDbCGjC1xaUrlc=0_Q;_xRTo?v_?j`eIz96AYe%zudEdod|-=w!qLJimzJ zy~N;^CGjC1BR<6OeW;K){E3LiuD{oaLnmYI=DjWASRVzi zDv1yA81rZ1*bfPb!=H$F>c)*4@nYYAICK)?f%B(9yf_yi4xNm6jPZwfv9I##lK3!p z{j^FP`-CBJ_!AKioIf?<*sqR>Lnk2~JAWF)@xEnB96A~C6!9UBbFQFT5+CANU-uHn z`Z_G(10_5nUKzH2)QDq!9TN}!+dg;3iC3JR5RaXHgE-dLDRJme5l8tm;+fNL5y$#E znBN&^=ywwj-RCRJm)Xx(iDP{o5{Lc(anwgdJouK4XN@@KCoys8C&c5+txkhD&Y@D` z(8-9WSGsux@uJ-xJaarN%&)LIRpP~d7jgI#5swic;+Sv8#G#WA4_CPO5XZh*N*p>_ z32za{`{BVMCGjDiy85aTFV2gJ!=H$FdZ{f}jX2&Tjfq1iAs)MWZxF|Lo)U*n#@v;w zMZDNgeoaYyh{tF@#BmND5{Ew#b62h!@#48a96AZ{(6z$`aXf!h;?T*6r%tCu9P8QO zwI%T(p1E^hd`WNwHUO^l>8S(TRR;NWA|Gr^x zXi0pChpt~#iR1nAkU0E_n4^CYf82eKI3^CAggM3^;yBkxi9;tN9wR=)*E@fL*OkPF zc#8f-9P{LmIQ)r-$CtVBhd93P854(2LOewKA&&WWN*p>F@z}+=MI7f)L0A$W;xXC} zalBt05{Ew#@$?cGAL7OP2E?J05D#5_HHhOqfs{COGUAc*r$rp^nFI?;;zK;d^NV=# zJ{WQM6A=$FJ`=}2Mob(!33JyD8^rPb!IU_3GUD+!-S|Ts`!m5|CGjDiI-M$U{60WP z9R5VaL#IItg*CvsMzvIx8*VQzbkjp15^Hi#XO#UeK^v8)KjtO(8-yn{4R!SWDQ^Zjp8S(U1*RP3V9vB=s zbNnmBgSA$tN*w#-A#wN_5f5*(IyK^$Z^y);lMoMYu{sUn#k!9;bTZ-r;zJzIm*A+9 z_z(|VxvIo5o`=NYPeeSu-NlDE&c$Nl&`F3VYg~MY7w_p2hfYR3b>(Ui$NV;UeMx+X zhpt>z;>CU-arhGv4_v+1h~xPi6NgShJVyH=j&*2C96A~CbhV9hi#X07gT9jZ5RY9u ztP(HgrNrS+M7-kay+$1Kx0pC|65^rj?+xNOmrsd9CnH|D$;F2__Bn(8lK2o$Caq4D zIL-}2;_xS8?sRIz@%^TlICK)?u`5@DIOhK;ap+{kQ`Zh##Iau#99CJBZA&&i!lsNp!hzD-mXc5QyC^)7hKE%V+wIAZxPYa2| zpNM$u`bCX+ao$H9ItlU6_4fvG?DM3=p_36$zUTTE@#4IAVM%<5$9R4b$NpzX9R5Va zL+4M8cyZoG96AZ{!09xI7w=;bhfYR3M*AU-_qc*ZCGjDiy8d1zj{WM8IQ)r-hpu1L zh+~}|6NgShJVyT_j{U2YICL`R?s?H7UcBe|hLZRYkDN}GIQC;h;_xRT9w0u%v0oh% zhfYE~cKy9U9Q%zaap+{k1E

j{VQzjV19Ro?`qVj(xC@IQ*$T%l_{A7V^8|?{vQ{ zu7A>JxA}aryWPFKgttogoafA}Q|X?W_m=SGC46NGU-A5z^&2HTdeO}3lMH?i_=*yqmhkBkzOjU7UF~@K>ebNb~aJSpLg5F1U4%FOA*627>E50>!dB|ItN-FwXJ&%6>Imhi=8{3SE%50>!dB|ItN zjS@ap!oxjh_GfVkA1vXEUplkSR0+>Zc(B*ZIz1)4w}khV@W#BE^`}aBUc!UDXV&Q{ z;k_liuY@moZd?BgC)FH!s8OYyo8UJ@QD(>*n58Ye#Rxd`huCu6_)V65<;q&!gJM{Sy zue{Io$~)ne*XNm6UZ2nN%Iotv-ni}a0k6D1uX^S6dF++f=Pj?iKF_@J`h11guHNXC zccWKcpI5zkg3lLw&v&0kUU_{!?UmQ(<6e1vK4)&bo%=lY#-$GKjRQWP^2)o^D{qfi zk3NsQ^7=gX%IovxUU_{U?B4d@=MAsCKJWF)>+=;}c{{jQUZ1BgxA|@@W!=^*?|P3v z$Kz8T@ACLYkH6UCEsyW&@j3go^WDSa-5$6960W^j@%WQo`d*K}!sAtsAL{YYOhR3h*cC{o9^VpB{&stOmdATMzLm!-9-r;;c^=n3iT`>%u0G+v zs>kO5%HPoA&+>Sm$Di%-0grFz@x>m;9+KXUJifh`eu>9-@c5v|pXc$K$Di-<*yAto z_;QcG(BtDC-_hd}9`EsZ;_(-Ge1*q%^7u-R@9go0$9MC1>hX%l*L!?-k5760B_5yl zxc!E>y_tF3?(kcldwefXXQRh$%eFUL9=AJ{?n4K(+yBcvojD%g$K%}|-`C?k9)G#V zD<0p^FMrukiTc9$)G4qdea5`0G8Mdc4o$ z>pkA@@hOiV-|F#hkH5|1 zJsy9%$15IR?D2UXe}~6=J^oIQS3UkNkB1(Ax5xWD{$7s{c>FkzFZOuk@yO%Hdwhw< zPw@Dl$KU7in#WJ{cjk29zVn5!7JMB|6?AXG7(^V~>X(|CGo3Jbtdn2RuIP@x>lr=JCkm zb&oIc_<0^5^!Rd**E~Mr@z~?%dwjXaM?F67@y~dC!sBBePdxrvkFW6f=RLmC<6rQ2 z!{cA{cc$5(i~&*N8me8A&Zdwj9S zzwPnJ<10PB#N*d_{6ClfiNJp%@Sh0$Cj$S8z<(m}p9uUX0{=G==o`LoPX4^+te|iB zwmG*3!KPRK=G%J)oA&&j{J(3rP_EQoqM0|dP4eHKcdEUG+Ed29L+vfqPK|ww+OyPN zY3v)--b(Gn*w?D9TiDHUV_%{6)@sMbzC`V9)E+eUMQYDcJ2Lj?)qa-R1I9jI?QPW# zjeV}#&sMwF*k`J(M^ep-u|J~rbJgxP_GxNwr*>fM6V&cfd*jmr_I#(>x|Q9`jD4)y zx`o}GGWOAG@1S;S?8DU7E$ik=V;`*c^VLp_eW2PePtsbHLc!s;yhk&Cu9es{LZMdyW0XHDc=)bhBdYht=Li?QUb=tM;yH z2gd%B+PkT}@gG|L->6+tJ2Up3YVWT0l(Fwndk?i!W8b3oOVnOz>>Jgdt9D}SYt`OU z?QvsYq4rDFj*WeZ+Iy)zXzYvB*2PsbGWO@y-dpVfW1p|KZaFtYW1p+`K5F+G`%JZU zi@8}b_D9ryx!T>vK27cY)DDb&g4(*3+}!w-seiR~3%QvY`&hMg>$o{(?4#8_Q0>&% zhpGJvwO1PZV6_iYJ2CcwYQIwLabv$s?N_NC8+#A6U#<3_v3F8iw}zXMv3F2=zS;xE z-d1hhB5sDp-coJd8gBL)`-!W?)-B;?#n=z4{aUrVjeW1$hpHVI`%h}WPVJ3Pn)+8e zR68^FooX*od&<~%sC}5)sj+WS`*5{a8v91Ik5D@?_O)sssrI85#y(co{k?4#8_R_)Z-hpGK$wO1PZV71?(c4F)U)qbnm zrD`X}K2Yr;wa1P9GPTc8J2v(nYJW`aL1XWv zc1`Wb*gL5GakU4Gy{+13svR18OSN@Nwb^UzC$1D*w^ExGV?V6+Icj$s`(Cv_p>|;G zKdJpmwKqOy>R;_x?abJBs{JXor;L4v+UKgB8v7Qtht*zb>>JfyrgmcNYt^o+J#OqP z)ILw`*w~k-{b{uajeU{Y%hisI{du)V)E+SQ`D&l9c4+K#)gD#5*Vt#O{Ta0@#{P)f zV`_IBd#B6x3{D;w+x*uzTHh#P=({*5N$*H;gi={)aoBQPy3y+cB(luq?#k6Pv1yg8>#4+KfV*UiF^H{ zYfLKfV|KpfrddHgrqUyK<$KDbdnVVlUgT3tq#~{N>!mFDDUWX2v~YCNcyC?qzxlyg z>f(a6pEf1&-Pd8_-*UhC8qIEWQUjT9&|`=E&rLh-aOcwjK`l?VcqS~U^o7* z63;N$I_Uj--hQ?ub`O|pUo)?W-8^+>pMS@h%+>QVuKDH`#u*#Bl~R<2BlqMz@?7q} z`My~vFIcy1uBjevY4elhl6)O|(i zOsE(A?cWIX#(%qXFENhT(%q?_o}stt(4h^zaLhE>>mDhZ?84Ej1Np3UtSRS!w%-1m z?{Bx>1?%cIPR~5>%QbASoxa+*X5HM!x~V-@>tMlJR|VR}{9fX9-{xyXODpa;sleu8 zCC#g9o7Z~1Z3dQ#Hm@H({BKmiN>~18Udl^Kdfornpy!>=yzbv|CjGaad4V{iVXC^= zN{wn1{bp{*!{!F<)T5Ky;X7non8oqc4TBs=Jfy9fE#e=|$d z=AV`<^wWM5SU8fY-F1T*_m`zhx9<9Ek5uu3wMV5}f8Hi?QOI3_dO^|Z_rUiy!xvxkt*c}n*n}O74rQ|J9sSrlHAXCciDbdjBy%| z`$uk*T&K0V+MaaPkCz6}H+J}xab@I|zMJoz72l{Mlgui9C+)Grqn|!tinE?Y9+$Q% z;$~bQJuB|W;5nMX=tswkVtvMnn-0`WIvePH)kq7*C;mB_JTV=S&Lo>Wj zYFo*Vu-7JdReP-^%J-G)d@m2Y*j{U^$h-9BwicLAK5gpV*1wFK`2%9-mrGBRIx=#< zS1=(!I!wOOe6qe?q&h49Jeekp>+n6akM{F<8etw@xBSG;3)XHmcZ>WqQEBdR^d^*8=L*^>vj(ojr^s5-Bxo~oqzp;{*fc*&Jp}o z`Ha^3!m&4ZE9>qXnO-z@_T0z~vk%FCS~fAa(r*9o!w<{l9KC$xkw;uxk`YrU=u79Q z^Ji(39C_ZN`lCbJ=htW#9DD7@8uIWt!9?M)g(DX(mc;qb?vp#95tYSG2QpY{#6y1LH$qLl1_J0+imBVW})K-5g1AGuyd#Sxtk z=J!Y~^^F=N>NdeT>3!R4? zg(HvVOEpYmpUgJ%VZ(h|1&7Z$;QRffpG^BkZs{L+qde)0fm}7Z`L8I zF!BWZ_$={pcK);!XzeyKy_Jk)?CaYsw@GoMZ*<4LvDbI^&A(;oUVS4s_ubr_6@Ry= z(RKNkO`4BMQ~OolrnUJ+xdeE=X}wGL>udbo{iiK;yH{mL_K%q7y=!%Pvw!r6xwW-H zuwjP-(gW_q{gDf`RrY=AHtiJeTJY`#?^*EP1;@ST)|3`b-;)3GuWr!zykNU8?J19pS8kF&n)$E9v;L8__^67B&s4a>Ca2BMTdDJ>y? zRQ}fv+=SGS1n$Gv7lAw3%xX;FE|w2<=G|5Qw*&X3zy7PhohU_@!2RNjCU9#nw1NAY zc=k*J_o$@HcNYl_+;@$~cjenlA?3rJ&i@;M`{Q2_xOGw``7QdtNmIX*EuDPruHn2& z8=b`V2+eft3z4)qT87;_OPb=^b4?TJ8yikEwC%PdqZYPbigtO~Ryt{#eZUtq7L#U; zKAcK-8rB1q{0S*$Gc%Kv`naq+cyZ~8%R{MudQ6p{B=ur??^Mtq**ntX@=Ksk$>%OtrDYy{ic@g4Xkik72<1VX^|}- zt;$uEQ(Wh#80;=)frm&I5~Z4?&%Z4-mhYtFjdZ$eO+R`tKU4m8&fG;FWi1@IYD;xs zb!tnuc*fE_Zqf`-X_~RYE%NlwnsuS&m2hlW&#E*uZP>h0kUG{h?KE37a-SwBCVS0M z{mAH~Hivw6_tqK@EmcQ=w>K`-L**mGdY~q$TDtb}tLlqhDjS-3LX;Yc) zvyp8?2m2*OP)?N;M;>th!jUWcY)(Dw&+AP)oW1L~X{zS7ZK`Iea+3yVY;~vsqp9Uf z%?F3gt+CYunnFfkd7j9V$s2*YO|n(yzN~Y#O&$FwiO(x#K+#zKNW0(Y|7kyHJ|cZu zSJemTDBiIIbUAymA39PRk*WL9`hW&&{<^MZpV88_>u2b|zR_Vl+cb^ntUpS3K21}M zPHOz}oum=VwRY|N4KjA;{qjEnDwn@jLDhl@1^e3TnqFt7E7a%7wV&O5acRQVU!b?t zrMer1TvZR9Aug?eOL-xbd(}nrpYB?= zT##G36}j&%BVqsOHZou2nvN>kV%>qqmOq)_?!(qFZ!?qTa#Kc2rzm`H85AMpiKA*;tVLm60A zPFw3PYPYlOs@o<0aH3pC;#d7aYV$EIuxtTo2Dfi(#?M$4{0|;&Yh2fQlC&&Dud%6g z$!J5=Vx4_#3bQmU#^D;LdZCv}jx+T33>7lRrfL+qZC3rWY>IzT{ePF)szlACDlglp z)k=%pmb2IAN{RzyB{8h^-rQDW>~=72GSxPj$S3ku_F5yHf6HF0-}$>_@W|gL|C0}C z3pMpG*IN4gP`x?d-qdL2AF!WPAM)Mx=Faw}sl;x3t=%v`!pov1%}Nv}^h5hvZgQ%a z>-rMm9hI(!YZ4gM=^K@)gWn+Zpsu(lKSt_y-~RGjZM0?w6+`5iGK_9C67@mNK_)-D zcv3^B^o_1EBfmbRE|brTizXr;m&Tbd6Q8vg>}=)?lj>tWC_{g~kGZiaf1eog2~*g& z+iMe*XPf&E=7($IL+uUCKR?i1*9WwI{1W}ZmS~BoH`!41@2xZUWZMzt{Ks!ZDHjLT zw^-t*6X-r0$4C_VZ+<|xJkPTW98YWLqY^dEW8vuO>pS?u(T}E!=Kp=^89afstA;~) zCUCLwX`8-7X6K)lce<91i!EMkn(7~3J8#4F5)&Qw$}d#%~$`*_#8nQIQo zP_J{<>}Go5SoV+zh`!PJvx)O#`ETVf^8ATh=XWR=n|;>Fd&)BN6LWNY>mS*=vlK%I z_KzLaBWJL(*!Fi(btQI2kBxTMvVAs*+~}{RQ|+!-A68*mo1ZFw$8~}+CGO|%`n}S- z@<+swJ>?_jnhn*p_Q|JqqS1DCeznx!ngNZVe56;T^B2qK^Q~1{##b$0JGFjVwg!q% zONu$-h+liMH!Yh4^Ctu0GPjpbvw4EUd(4=7%e1zZ2b(obAx&NWer~fenJDhxd(@+m^;<&QxE?1EBWA$v>!_uP7<;H zvG2{TXd&i_k*{)hj`6bjx=8=)I(uBaEb8oR&E*u)%8#{v{h!v^c9I*{*}W!Sx8}D< z{`v2vJ!yG|Y`{#NeP0rc>ooppNwBZn$p1%D>45P@O(m&!RCtMl2P+&Ko_bpUtnQV7 zc}*_=%H6UT|1p=eOOk49=+?CDcA5W5-0U0u>O94x^&b5*|JJ3)(@K7@Z*&%Qx0A%1 zzTsQvZR*~zHow5AT(54cvpf7!ocxD&k=#_>t@)G`EZ-nR%P_3P|AXHCzPtS$z5PRX z`$VlBN%lQ+b6tL&{vE9+I$3YGhCq5 zm|QPP`F{86QNv`c*T??F96N+MZSr%dYWlI+8;;lUXwCY^<+d4;&NBy`w9ML3^BX&F zT2e{2CUM6A)m49Jwt94frYN2Y_xxOp(Rv?e{We|RkIf#mS=ZWG59%dly+CKUW9Kcm zS)0U^b+4HN-=Haqtlw|59^kB>W3&EyWJ*1+opoh~toPJ1jh$DuS)0U^^>b~hHARv2 zLq9X6?&hp-wOOBOvz~5ADjV=Ay`)lq+m6K;ZjZ%(^ zGL++ent8`+TlTQF)P2_GU!Rh6qm$bA@_nbxh?dWlzrwWNlT-?pCU;--@59%>YRCGT3pOHZac~?shTN2ZYzFj zga>75E=SLSuFySw(UGI)g(H7%pFZpDVjpO2Y)g5cj1d|D%~)eCX|Rh|1iV-v>icIB zT=iVM>6T+nuK+{mXmFJwxLb>di$?w--ep==dCVIp?XI@Ex7mAE&a0EL*S&Vwc)4(7 zL-WojRl-*KS6vIa(ET0LzwX|r?Db-;fFXN5q1UI{>ps2S#dQD4C3^iP;rT*)Q=4rb z>h;LFylR<_5&3;TGya=`^Xo4~bbf6nNLqi9oalRz>8jr;w(6*2^^K)R6`sSxqr^k0zvkQY>FUC}hqP(!oIAO{JLq3`Z)@MYz4{NpLfBA#>w^$lghhxp; zZ$?u;m){~uko*QszPD|IQ*zUH^PN%xEwgyLqO$~D^+(M#a*dfGlKK0}Fy^ZF&zERTY__fv^V|(&4>`8wW&$Pct`IFAvyXr^T@{cRW zsCdPC&B2U+GDd0dlDUqG>Oj?=WMw8-Z|HL~440dJpcob1t>SGxd5*08w0Wd7fAg>g zxg9gf(m1Pg7`)uqYBg6$G6{(mHGf=&Z8?(7e`-yt6)U#?I5@7pdYN z9q8zBwxy;Ry+KnHvEOf<&I_J`KT@e08?R!~w2Fu2xUME;;FZ8gKBgjlXV1~~qpNt= z#9l1jq^bXVrW2b(L|>bMHdUTJ?``$Xv>FZQ)U~D%&F^Xtk*3?P&j>dik=S)U3g53! zEvH{H26ffnr6D)-cQXL^1zae%wBVW-9LR4sn%Y8LbHd}@Yr^f>`{s}K4bAxvX?(ob zv6~M`3(TLEqPb?cMjwUwIg(YrL`IAJ%{si(liTi?t{Kod!Awu?Rrbjp;c2?Rb=-CU zSvg4kqryv`+3zt5YsS5{y4f@Ptu~hgxqSaNZLUu?FVN9Y>gRG9wxv*7#rb<|=Qq{u z7r$drF|W0m>M)BEzdA0^`gIE!+uoMFUD{0kR>`xuNUok~qw{9drf%Dr4C!pzv;)+4 z>sC9Po?{*Wwje*3I?=-Y(EYtl|GGjp?6r9iUghz`rJZnp$L&2c*;r=Sy8LYUs}qf- z_MQov*1u?*bno`A2kf;Ow2tu7SLIqYdX>q%eV+XNmpjG762W5Eace1Y{*W}r{CBp( z{!e!uCvH)HJ9mngYQC~lyik14Ptk56!QP|=l0eAxJb$w!(7nM>TA4q*y-|OvkNR+UMoOM(Y$6-6f8QmtCEM+( ztA3lgy(;yyy7Xp~)n+q>&seDeD>cYcbt@J7QtvQQlU4ah{+^%6HcVBzxz_)btvhwA zG(ta=4&@3bQ8F(2uK(2Bwnaje^n3F1k(!b1h*CQ|=DX^WNXt=@_t5+0zA69cDpQ_z zzC$0DFq*O>&mo(q)`1>5X?WM#{TMIL$22OVlUm;VAUl$3>&WNXYxO1H*geiEK^_dr_2y(k1#jdYs2rh*G7BVUK_n1>a{e@gXGDY-z42e1OIk0WN%wbmA_bT zeAC@HRBy;MLndJPmU?4MZs^DN6hn{J?iW|`nuFa2`X@4-M`lCEN(AkoG&m)f4PGW( zWGmMuso_+wE8puTnMj*APS4T&=D7S46OYI8mJ9{?Bc@Ys%8xO@G1~fcYd_Y!CO#Av zj?`p==Hw$qv`&$hF2{Nd9W??sgr(Rd)V+@%(?k z^Z#E)rP%rJStDM|lJe#cm`L23|579HkoGI7n-Qtv{BAMwpSauqqqpyLw@-5|;1+Xp zU4End<=NV-XPd`XU?(HtUqyJc$LQfE&3WBeW}iDLI-`@lnpis0U$o`RcW$H(H1gPh zYk#G#)hZ@!_ibyki&bj=UHaZ?=Zmcy&HeMnk=2fpl?Ge#@OqYzIhuL9Uv|~6u*#t= z$h3Z7vx3}d)d!&d-S25YHi%qMn!!5TnPuC@l~r@?fOGh>lgowVS2gU59PVxrf>+AK zQFD|6$b2Ez>6>4U{p`H5V_LVp_$eQ0u+&agDiyERQ`i&-l31_{l&3ZeV^}jf4+0thHPtIEI zH?#X2I?h_QwORLZ*1ymR!`SRE9A!$qTxYv>c^2!XOfvlTrC-*QfKSLRX)`7O? zQvT7%<}>sTRXpMX={F;Ht%>CkAxqNy`I2?>Tk-^ykvJBC{1F)xh3|7K(O z=i>h}T>uP8q^g?x=$f#zAL`2nH<$x3%sw9bAF}|EPpH2#DBG~Nk3(FAv>%Q$t@-wq zhn@{(T*w;{nuHh zy(WJ|nsL6LfP8PIa(8$4_eD09cI*5{rm;_IW6y8aPdp`qVE&kngz||8-QRokuS~Ty zT;5)a*TA~!<|Qh#XzT6_XIK42W+!4zpd2B+^adlT@ziCg*$0z$+z#Y2_f7H5Bl%?W zJx_^~cS@y5OVj70T`RvyB=gs3A@(x<+P1ro_1#o;>55ZN>Ffr;I6J@8j81D}-}!s4 zH_mI;|6d!!rB4_M>y|FXi?X|iH5TT@6YbB^lTZnxYf{~?YIppux*q%MvqL$8{5tVd zmOuStUs~A_=X+}W$e?MPt*LfD#(&iD)HM@i+AEpLiF;E| zjxqX4cYX$KZ&WjK1At6{xP-b3EG0A@-(=QRf3bAYrv4AlkZ;JNLC39&<*%?*N1V^; zwTy_Lc7H$V{(j8;{gD2ZGiXiwKF8kW{=V7$J;tUP&@`{J*XEh^3VS_A8c4pEOS_w8 zI{nI@XV|*@*_N5lY_da&xqif6YtzW@vDef3x!>Ar(d}! z>wD$>qvz?qojK+DukXl(jDp@K+-QA;ezaYsL+{h-d`y^}D9)&QnW=^IrZkPd^t?b- z+dA!DDzJ686>r1llUl2z_K@mPGdfQ;BvpRn?dBXL(86ESzP2Y@&%M@ss9SGdU~ih5 zsM~83vCC6A&iI>jD6Q(RSY<6iJ}%d$ca2V}K+&=CkI5%CT<30S#_e10Rm$PdQH2#bk%QhBcSSx*3B|{v!&`9*JY5`c9NGKH}57haQTo9 z1f9{dLfSk!pIX{GZx|?^SaKYD>(zyXseEzmWb`(?EYp7aHizXvKpV^1GDb`E%n&r_ zU7u*LwHWzgxz?d{z+*?4Yg0_kuBaBz{jR$G%Dki`1~Z2qc%(CYuh%B0%syV1 zeORvZ=t_yvDN5Zl@^zYl%-dgWGd0hsedM|!T`Fjvp>9bNdyz}Ez5G2OZ5}81U!D$} zB-K%z4&+xz=0B6JlCN2*ng8FuM}DT{#-{^s(|qN0U_U8B-me3MlG1LwQqgV1nI8)J{d=KL-KANPsdiL@)*neG}oWG zgr~cNuTZ$g!MBOg`EFxZ{T`_U+1A!A_J#8|>1#HfTL#xiCCL{2l(sDWipaT_i{J;G z;A|t<`2juslc7A83dptj%jmYNS2}rj%_tr&?`F$v;1wb#Dc74WW%m$WmzB79N8z5r z(CbA$T{!>2rMq)sK4hfj+av1f^a}CxRvjoLaPN}HOLgU|%uu>1zta4DEdPf2`(XZT zxu1VY5^I(1XA=AGfOggA8R^yLz@}ZY&ew!?Hfe^tb*$0Cy-hqQ3fD2CHHEX^ZPv|2 zuMeAV?@NV*A~Iuz+-l-$e^W+_`Ovq-^WRCI(_$_)?LyxFHWl<>evsJtO>$F<^>%a9 zFV>KL$x_b)7S4ZcX+>LN+moRMQtjiY_EF8{442DxM(Dx(WS7f(<*zhxt?v9-$Bs6C zAIlGOe-HN3>}S}f{H2C%wwwMr6Sn{Dd-VNArPxis}e@a(x2y9a1~IHj|7ZL_83cEsF@ChOJjc<#wxUVg4gvQ+Ml*sm^s z?lM!#L>%bIblP@_71~D3S734}LVmoY)-Q=mrmoA5%~KT`o6N8AjM`h~H;<0IT6QO=q~f(NEU|U8QZm!SIaEyZQdN=x zSu)H|G->@VA-^D{6*0Pcy(`x2{K2bC(BXAav-Xg3bzUS~YZGcsX@T3tlQ&gnU>cu} zfL>~I?z4hj%Vx_MkbhW2n@<{_>pISRpU^H^*WCI?m(CsZZ@fvr7UXtTyO!M{E9?CG z-;l?0DpxuTUZtb>x6If(siOJC_FBuBf6iVfdOfTOKVgC9lz+rttMB>Adh>YsACJS| zGX*)ze938c{_QK}mHN@^r{p8zugc=ERBG6LN3RI#%{B(}bPP3(O1~zSKS!LG&uT~0 zVz!%-)~*si)+;(wnIG_$52FfPRzn9%B`-m!KcgYqS{!Yj>q0c5O~ZQK(R|Lay9cV4 zpCoDgIadG3?ae#weG|PS<-RmEbvvJLuXP;AUoO}3TW9h}kaJ-(g|rPlf4=G8lfAma zotCeH%YD7JtJ+taAWSaNo2%{3LB0OAy`IqPpMA%))rwqu?V+pwNqOZ>Vlbe5boEr{ zJg%!=Gq+b|#avF73KRdWY3toA^SmEIqj^28L~cUj`}&=W3x7iDeD1EF}lj6K-L$0-ITh;S)XOpX76OP zZs@**E%i#hq*9-3vtDm@FIJnxl=buEoLRCqDUkIGZPo!do2sTAoBj2DOsSW&vyNuS zdN-4JxMs69i7D$XZPq3QvcC9hrqpvd>r9{KW3%Vltb5v7ch8XZ4@~0Wc{Xd4n6h4F zCI_ob3S?c8tn+7IVjp7Pu+QF=;)jkxX3X%1J3Q5}&mrwIb~WV&$>#ks;<&Mil2WQM#+9fMtY{^X(|zJ~H3mS~T*v=6mgPHd2RwZ-z%xsQjmL z?Se2U_lh8VSC0Ifzc5{KGSk>Ek$$H0#6&)jU#tov>+&yHr8%nldNT%0>Uf_YqMh;# zP5EJ$@>KWtc>4(*n)4SJN%Lw{@jN`-bn(?@|67&;BE0#|`ynRHaE9i+%SBoS*)J>3 z!OR;dRsC=~$g*uZx_UrABfqLLqwbh_fDT8{z5UB3vFb<&RjS=qMPyLQ&NwG zL{nx>`IC02uW2#FE!QkIKZEY756iH#`8l-AaD+>Bnv*k)hJLB*CRvCyWPdtXFsc9V z-}`nKszs;YFW>)y)j$7s`=hejZi+>Re(kH?{sx%&c}e>PuzLIZUv+%_OVU0+cHa5+u@()VQY0o*?4EOdt z=AC*i9q3rC)kQABkq*9AVO9RuKYYJ@@y%av=}QGi%5kB;??1GYtWo5p0{vQ;u2!Cw zVOaBfREJ~9;{p9EZ#8R|yTrDw2`OIwOX2xX%}v|U+M$rIJ?P_VZ1!m{F_VUtj&63o zVtzl2<}0(bLyis4u@gj-xWmJ)`k|)Ft}-d`euEEpqCo8*$?hvvR={7I?+qY zdV8C7LvM|)Hi;?gzilbbjjl2&koATOJF;F^JeStSrU{E%8lukL?d~=AlDaib-!-S^ zjdu6e1a-leO(Ql#s>BoHX)(SXXr=*mGki;Hd&p)|+KcIB~4eG^=`HaYXtM>*=rrZ@@@25n)@bO z16p@3tmgOHI-e+A%YL9O#!DkViqd|f{NM|+`oCOL$j*m~pC-LsGrvT{^RLSPG^Y#o zulz)jCK%Bh%M7rChbO&ILZX9&w!$amk9VtI8?7=^*$4Q|KU2WHpm&~u9BY$xw@aZg70f@XUMeNejR?b%zxI5OWV?M>?p~pdAZJr z6euClS)>aKCX=pppi36rqura`zFzYyBBHHuXK9Va zr^?O=e`dlcrzNW@x+CD%tJ{j}Rt~SH$Bm{9#sjn54_CH(uNFr|3ioEHzWt;nYcXV8 z!bpXIr8vt(pVaT}*iNz9_|f)nNPZgB`V8M*BfjZ3;oog4%(n9$Y4f^P=yreSczxh0 z88Pi~*?}^rFmEq5=gF09b(k-|bv#3c-XuNW;IY-_yN@%i-acx_Wpm%{^M31d(vCj= z1zRI8lNzz#2uvRpe3m$%-Qt;yv|q3-SXU0xg1fc7w`0*&UuJHvswhW1_SLL+G+E1j zmI;7Aiyk^gN;D;dy_8p90nqj6OFH+h^;~q7x#vpr>(7ehZPMek=e^rV-jiP^Bf7S? ztK_eALUl1uey* zFH>Vrlj@%Q9Fyvw_uI>*!rZ8)t-S3)SN)r&E>{mIhZnyY(~L{jTBezbg5UI)xi=gA zCsy;h_ginzma2KC)o=TveP+l2#?Ibst3UFceYrThXyobUF?RgYYAG4rhdyp&kaf1I zA^n-VDZfSi<}?4K{3~bZ`ZJdQ6Da@ZRn4}SL3{oGX#3K@DvGS_0HV0wxZ|F|sNgoi zHMkKJHHb#zd4jG{3P1|taJb;Uj7MpQ7>peC+F zQ23tboa*lCzBh=UGe459+f{w`bE@i8b+rWWvrwisPj>E=IOXA5ro2s|ZCfDsWWdw| zlPQm{$XI2w*L0QrvJ5mSOU{BZAsEVdy z@B&Ftg616U7gpKbG-)u_MXG zB5O8rX>tKYhk`1}iVYz|bKmt4=UOO%Pto!WaQiBaTsQV2G=b3-5tnFXtwei z%b~}`)sWYzC{Y$iBa*MKEOg8rn4zSKTzp0&MSYhei$1``Nbw9D6RZS*Ea-#z@*Y(N zGR34KaC~`B&Cim40=iH0bQMS+g%U}>9f6yq2RT*$8(_v(lov2a37mf%+OX{fxZ<9e z!D;s133^XDl_Rt)<}=hO@9DJJ31!0C&bhZS%60N}jbr6&8s3Mgin)|SY%8FC7@`AF zv$%G`z>ziR3*BX2EZ)4rx|%}RhJoM%2&c^c)m=R|LMh>{o{jLV`2`+ej7IeT z={vvwsH95b@BEe%FLw2e2A5_WDB(tcbya6MwExr40IPYC;b4?91{HBhro4UVKec_$ zMo0r_>Oopz+w0G09GPm$YZgzluc6MMlf|kVrFwc9?*~C*u?I@tg?G@RE(EEuNQh;; zUlKq@DULScu9?x!ucs^U4|HJbsS{{&0V!z9obNd2!*T_eK~f z2nCm__>|xuuK=R=$*SToH&D%RZ@@4`z>PBDnelp`teLS|ZfE&qEmO#*M9WmF8Lnjt z*%WG7-rL<3W%ExF=QCxf1pFy%lMg~n3JA?Is1jThu@ZW*(-t#Mp1FrF{Gzsm7o0J9 zNIX2G56nDc@^DyfaKF}rwL zZKjR)VWsToT`R5Bk&2}70xNC5j%_`Y1a*!Mddd`(q~2UmJPsYU(LfXds9Ob8a~&Qr zSKx&}GhX6DhglC|p@UhTgE^r&7LELsYDnb893sZ!8Sv;3E-%E55kjU{v9(Lcwn$Zj z*D~H0LaaCR12N(au4nuADl#H4?@>>1JOFmv#Gqd)(7myOe1@^+6{hPvsh&>b4W)47 zwFKK-On&};qv&k(mng3_8O$CI{4O%tYd)%aY0OQVq)`+g{eRl00CqmWhVt7>!50$z z)LIC<5A7ZHc$dm?3dweb^@`dDAAs_>DW{vE1G zvS`+Wo#81`VRE!AGWlNIznPR6BW15Y+PECJq@4kIr8^Bo^U&V} zZ555_5r`io&`QlVH-O;1z@v+3$-Af~c5|vy)yDGCTBi4z!%%jgK>Kon{K`stcnyRUVMq^$5J@UjFZng6iqnUI#AyArFCdsP1iqXpP#8Bk8mcjmaLt}tCbecK zEsGeoLOB^y{c8a(=}H%>s6;JaOjHhlh|xB%B=b7t>w7kJg~~c$3oD-tD#2C*o^nOt zvW>+>FHhK>kvQlreF8)gJLK)ctA*QKG&h$(p}^VA@}g=GvW1GGi#PVN6bIBS`9Cxz z6=qA+piGQr11&Q~nchpHIFl0D3%a@XYpT(s)Z(f0kgr_S%OALl6o1(>f=3IZCyfVz*{+43DY(cWzfq_ z9Q%SWG;U$z$f;7^XoV%CG@PTI6_&P{SVVk;wdBeetk!V-tF=5>!nI#AT56s^Y^EZC zGu2o}X7Fob`&tuQ`V`K;s$a!hDQ{~moDZ9LKt2xk-vUKShHtE5jL)Y6>kTG~p?Wrw z_4XO{svfEy>PdO$<2JsBY4F=_uaD^SH$~%Lp=LI6@|Kzw6qOnzq=PdzR3Sn3J*4J2 zP36n}TPtQ#^Av7})X0u+hnh)rCQM(b^`(%Lxel()j&e&50&to-D?VYxdA6dLjt*$5 z<~Udx*1*c)XOXOQJ`5qNZ^Fph2U{3IMvDu@T^EYWSXU?m z9P(s2_U}bhAit8o50o^FQ@kAAkifVTZAjoR*^sb1T8K*q4j0>yKtI$YX(3WcjXNUp zp5!pDw(r$zgi}qK;9={vB`!3Q$E4-N?iG}u?q#la*W$69W`~4qVf(q$X$57MBcz@n z_JB?m!fyg>nYWu$P-TiI;NPCG63h)KS z%=X;l(M&m+y>M{7u8Z(9Wm$F?49MJFkn-{X3bTCdFt`Cvaed(KC|Jjc;al3=O5G1H zNTfVX!FQUk!JbWSbeKyn105$L{FuWdAhB$Ks~FB_MxytG*zhgVRK+y-+lbn0aCW1c zbqNl5K5QWBB!83fk;k-9hRWK@dvQ*-I4yt@>6?^SEbR{K2SAd&$pCaSqAflFg0EvJ zp!)&HGPl&+f*<}`pb-LZqrk^OshEgQK{;`T@6O~Tz5ys)N`M9UyU~aC;^XdOy@oDp zjp;+!=hkRH8c9vY8`9?nVdMH-IN5>Mhsao z9tVHK1bk^E;8TGJIH(>o3&IY4 zYrrKMwX@M;#2m`#Gh9jQ2e_JdMYS2s^Pl+V>>&(m%%FjtkQ?IkUuJZ&$K2WSaL$AGg9az)B_6?xi5 zX>mlDr)|6hPdItnhw=M2L1wFg z_Vo(Pj%1xF($ljUpGAW@Gnn$LiDljX1!(3DJYgybBQxvv3k6m4U&y*`LfZalX=~+= z0YtmLkiNidYFYRSSJTfntEW6+;oTJOckm!axFc{g5p7|bB|o7_7f!!Lc{IysU4j=~ZW+Ps4$U}Ib&5~#1$;6a*sdORF=NG~7`J&(sanDbAmq>%B z^Gg_MIORCyVp7N9DUWiUB=d#neR%(9j@n}j8Yz$8)8hMaIXxk{UK%`|Pxlif-I&00 ze56RUBf*{u2hUz~TY$sgL&#lVF1HG~7*~d# zbM5ss-baoA0(WEYVYo*n5MT+rP&X2hj3V9)y0o2v$!tp7gmNB-s(I}zkednF{6xiB zwSFZUU1}ELS_d=(F~LV&DKEfcN1kS`T0%e0cdx%rtb%3D#4`VZXA1F*xlk)5?{m7A znKLk+0LVEO>J{kiY!PL*356|-7k%nQ`I-27!-4`QgZDq4b*%SmWwY`>ZP3zOn}Ni{Q1*-oS?v9Ki7v2>+$XOCR~w%1gc#c`PFu~1tM5{?jK zBEOnG8-ukdx0EoyrQuSj9Bs&(S%R`mj)E<##{od>Mq%Oxo-`EYn-E_B0}Fwgp+dJNVk7UQGte4bGtgBqZ#3^`$z?t!+6!}; zo-U3w=aH>tUVTAoi5PSvt_=g}y~>~NNPmGBoa<_~~+V(qR{L{kWG zeF2!X7{35bID68MqX*ocqz_J|)Zfu4I|e`;Df;hQUx0E7Q56QUhiLzOeQdrR{?cJ6S4PFyllsoJKpfNhv%^v;pSQvfyw>luZWqp;>>h4qNEsmu*v4T-~Q z&62SMUG0SBhKP#*2AQ1)zs<*+{xh3sT-Zx6qPnty#p-w#;V;BlTgmPQl99&M@#GG_ z8Htl*q(ch2>ifEVBz@3rBdfFMxb%D|0ZVrIm*!?szYV zEU|d!s*&JMgS``(jh`#l*G$hauOh6%WkdnO_!+Zmen?B4!zOzt;s;FD+nZssa-|iw zc5Sj@EW>9{R@D`-4ORE%86?{qoq;ld9z!`b8y$hLLpr`xI%K^EQIgN_gWpHf4_s%} zBweNCu1L$FpvB*nE>U|M5134-JUjpsBPYRGJ=+~M`B%zFrJ01A#i9+x9ifyn;^JW_ zRdE|FFKUyjNQ=2Pi-sQ{9C!YQDYQRP8;EIKt7<1bCaq-(UgAC7h;kmXAUzfvy&&d5 zk3lao1Wnonn%5GDX*t=5=;0w{m0fH@5e+yUBKnUqN?@j!;$$>3&(ICy6S}|R;nG0Y zi^kw#%!z2HLi|ezvgQSew9UkCrn3w?orWvVG+v5(DfptfQ$Q~;d*BBu>RIt4P@8LQ z#TM3MFHvZ@gm$)+lT4?)YxF$wJlcab;|x395^tS@_5!vcZ8`L53@tTCGvO>s1rT-BsFDmIkoaiootust#=iAwH2|e&|Dy} z3Z*JZjO_>Qus>uesZ4(EXHPgXC==&zMEJ$%G{#O1LV2-%J)ArvA<>76Lo3bNkan2e zyy0Xpb~!xMoWbq{9ryXTVp(HE?Y-?k;`aVvI!O+AWKfLg1sA1O4VD9cFgi+*N7QC> zA+N)va5dKp*UkM=F|!Obnzc#7_i!V#03kA9>2sEm^qfJmkb^>VjE)0yvskjdMtw|g z8esA`@06FueIyv=HENoryhc5&8F%T3g}4)ENZi0x;XpJ+%RD{{1E+_()q@3ZV=3c(UMXtH<0K=y-DSB3lR5tD)JGjXcMSjMcsQdf#chCNUDj~etPIp#6-bHjn) z6pJY&3#10v!+Q~_qEh+rsa38`?{e%J&Xe?mr@_7n-VZ*0Dw8RF`<(%MgL1y+e%yB} zR~|a}EB_+|D(0q-)ZQl$STGboJ`{(%vU@t{F9;d3We$e|fPS+LpFpp?sazMB^#wvR zNi@B*OnWjvNCZ@G;Ts2m=X~sx-$Yqg*ZtWb2;!j789C-=d?_%Ew^c=2h(#j%$Wqh$ z1$qjsv*3cvmUR%qxhmfa)pZa zdqxFBpD#5!2$pua*J|4IUJEh5s$~&wi7Y(z~c!O_JvrGe!i9ecmD-(kXj$a)p_avC-q5lb$845YHNw*VPpzY7W=8(KT$HaJwGf)}E`a!a z{sV6`HkT>Nzda%UaqQw;`RfN*RsP#6tzxvB3#|)GO7DsB_HpW$A|Td{s2!ePPH;8^ z@^`)S+Z+Wkh6eS{v9OsJM&sY3co+n?!I7NSd|He&3aI^7#>h=$(HM)ZLu3gNp9MxP zJhOqd>2rWBxhi=u@uGv?5{#I=K?$w*HRXzs%pb^A+*O#VoKViwKjB01j1HH{a6D|C zWDeWO%RzNH@ITrqCz^6x-$hFfH6Q5mhkMw1pgphv!x3Vq}DBEXW#MA?*NUdK5zy<)lXj1%rOh;jEkXJb$xa^up6IjSR~bL0$N< z2&%9f)hZ}%)JY0zi;YB3#i#<|(l{ZgQ`n-Wku6kEsS2J|W6mx?Xgr1?MoBlrVPVK7 z6#STpWmnh_=l`toTE?%Bv%@*^{(>F;e3>{o<1uTEF`l^a#7RHHD*$YWO$}4Px4#AAKRyH?Gml=lNOKl4cZ zz^2_Ks0+~+_e&I1UJ}$f3aY>b)n7qnoOvkk@znOc|3lGbTcpeRS#-Il8og0nO5=ns zw-slNe{AcXKMNNbXC5B2o|w^&in=O7U7iK?L1+wro1&x&t&MPNol(RrF} zf!^n9B!LWgJ=LDTdlpPFvk=rHxSy``m8WayECAf|B zPEvsW+smZiU|5q=Qeye@^CdESrKq%ma*d^FbYv{Mo{-&GE<$1#ny0zJSn|FBAedW7 z>=BaFZ7ksZAR-?1Ct~Z_SZ;(0&E1d*NMiifuZC@fr_7r0SM!aAL8>^0OJ`GI_nNfZ z`8X#CT@)t=At`f5grtZ^gY=~BI;*9={}Z^x-tE&$P)u9vwGh{PIW#|QedEQn##L5#b&?jzT> zm>Mm2?cWO0E?ly5ceYs8WU05@bstXd_+rZH_|eJA91q}N$i^iro`aP+skhA40Z%qp zdrHQb3(S~NW`XkuU9+`J>eNB&F+PbVoQF5}> zTbX@*=;~WSdMu)Lp31ABCwWa0Qtp*{%fR)(!F>7;?293<^EG0L*PegKt<|zIf zY3~fjmFC8<9Umd!#f)I=o2pnJz7`w*`pZF@S(Bi8;a}3cPRDjRc0QcSz`SUP+05$+ zX*OBvt*B-X4@C8uc0P;48H^JVREa6?9+AUsTnq%B2Ensz{7%KyKNY{|_bGqQ?mVef z%}IUr`;y{333#$Njnff?oyITP!*x+cS;z5w-vUuny0T#1s=M3Y10tf-+ZolNkv3e- z+Y;iNIa4%~tShb7Y)y~s54w|#Z71r`ja=XfyOYZgS-sg>umd=Ad1i@!o3%uWHf|vM z&_HruEWZ{tW1Q~2h`Zc zXqYJNDNHc0$AK5?t?joC3vB<_JB;*U;Gte|G*}!sWhNHm3K-)8_~-@C1$>5xw=iy? zfDV_Aqr(iv)$GTAd*dIZt=62_?PC2QeQu}@9lSW|7>zMky*r@!p-A90z}oTsuNlKs&>2I$jiS#!|*=)ocz`TxDj3uorhU>i_ z<%;uvaWy`FWD1(w7Rf9qlCyau^GSJj`%pLS(+Ybr7&ZSo2voJBB-4ptl<2=sAz)WM zpqU9oc^Us*f`7oVN*gX#kw|VUA|QPr?bi{>CSL*GU?N`!r9dRPL;%z4Cy4Yg)}Ik- zDV6VWWz3;r#KI*GB-V70RB_V$cQ{JCPra6|gDt`ffpr=IX##shJU}h`tUaR2v;ADC#GS*WOfyx|J&6Evp<7d{GF(mAW^P9+)W0ah4Yc;BWX;C zW$1>?gJ_UkeYh-GeZT^MyZVs6FkE@K3b@Re%#?Db~|F}_BU@;K_kMr5*)^+Rd@koxJlsF&4}dHVt3 zba)$SK0*-2);)NR3(yF#yaeZGVi$Bt0O)T7biM`D+fsfa9;I++fnqaCDm%;~{J@$_ zIWtv{uwpM;@v89EVe*A9yx(E3n-EIon#t9W)$d<3*;k;1YbH%YK+uEqyU9aI`)qR= zu$!B3hl~s&y%ns!*jAsw>Pv0)I%Eya&XCGhb0V%mKtX_s0?M!vGKCtw2oLxDb6D1l z$IRe1zLNsuC)RgySZ;3KZXag_rVn~c)ZOR)i!|KKy z4y%J)%$!1^^SYCunlV4b{T2l!GkX_QBdQUwT8JV5b)|x8NP>EuZ}Qak?K4NR(*xPB z>q>*H^~U@u<+XBxQd?PwR$xII$HH35yFyX3MTmOkzQRRQ67_N9qPB0M5uKYH6I{dI=In5I9?esMl81{I+RK&+SYu)%Lx4wl3{Ns0*^7eu02%D~lC%p|pr| z{hlrdutkV^@;)%bD`=`P_zmI0u$B3sBi2R6h%zufG~(BW!D7ZcL&r;2w9$EIsIM6W zBryMWn*46V#T9ZoP`>|QbjY&X4*Tl>&vc0!d)+ivZrcgQD=`9I(k{3}En2qsyB7Vd z3>F$?HC#tR-WuXg%|*oj_#x_wME3{cKm2DhAJ12Gss9y#Nbo3Ad=~{jZ3G<_x&C+P z-U5bXbD<8+SCxNUi%2mwB1Ow7{Tgo|$_}6DKjEGcAveMwV^#ZusDZp^M|x$;0p_K> zFxa{XrI_UWHQ27Enb6Hmf~lEMji+UX7}0NgQtm8*8lGMzmDH;7hY0pDD8~@KtC}xXsZ^~< zRJWs=gz&zV%=v(79^Dg?;ch)s&(gQQC{r@Q2~Esh(kO!7qa_Xg!jc9K0wR2xrL%hZ zo0l~nR6g6#k(f6R2jes7jyQ~rjCVoP#DL$U@jE2WyiZHLM_ZD8GnXwVeu*!R#5#I> zaU{9#F>O~t_JDpkLA(J^a^P3NE4F76S|J@20ie|44-xx=fYba%ntI|67$^rz8jf!8 zx}?mM#3dqENuLwAkLUw7ur0`dVdYY$U=%CjAuddFOE|;C<90mkPqS`98sCv1*-RqL zJT13SX3w(>+MmYN%8q?*2-e&+46?hXn|Sr<>*a}7bDctAXJht(`-*OZkRusQJi%aS zMn|yA@H>Ki+E6iLA?ob|@h!;M2Vhd(xw_0O?AGW~QY0N52c@y9+zli@7myP76QPi| zesST=rDQc8sy@-4@uKE@Ie8UtOUeFIybcIKIT}iN@36nEt*n9p(o1NEiN0y3_Q-5u z9mu-@X>4rjfXc`twwyB~;Hb#jUyuh2G96dqfq~R9n2wKL(&G?0%cJ4O#>LgQ7E`Bd zfbZfODKmXZWjHG~6uAf?N@dQSDC>#b*8XP*hC|;5XpuHP6<6qvVmEhdnbep%)?+Od zWl4y~(i_VV^MXpm|9I}>V+K|%80{`ynb#i>94TZj81X{|iN&JyVK7tKd@QQLLeV_z ziIXVgUjy*wQjtJ2DbAE?>C>*01%gVHXUctYZexb^83jM7@>5S%4oQLfTZB|Q`&l`gq@du(lC_NbD z*Wqc9mV8kj>sXD^i}K^AnA3O08|^m%9U+=Slt4ag_LZ_X6=g>ua#%IV0ifiI)=U_> zM_GQ{1u!{4;J=L$a1fSRK$`;Nx5Kw^uOIQgqIM>1zMy6C>*>-K@4w-?y8b;1-1A`& zQ;>{BPiW^{#QTAdpEgK+{2g|p@S@N{{oeEnP`cadV^4YYG-Ye=)@&oyq`aHNoEyc= z0}4LIz@JfOkc?tiYrmo#^GZ7k%rC;hyltbHX#oFfz&B$NN4VcLhiU~=qpf|G@M`6PluD4YX*nY5~C4F#Ulvareh}tHkLkS>3j}^ zupfx#@H|ZeQRZni5iy0ST4q;f?ne1nbDK-Sb(q_{5-7~s^ve=~^S{k)t_N(%+@^v2 znYC&Ge)A!mf=U>t0?)xX-n61Y9n)`^>zJKUVV*%N4t++k6iDwc3GE<5&H?$#(FnK?7st0U^{dYRDpsjP*5dFP}?Y|F)paCd;!HS zHO9=roO##wz2eE$$;1ufJbaazxO|zZH^wq?X+_PcR19@LMZKb7buw|^rUbRYB$8gu z`D5dd|e{$_=R4qN=E+aRT)wc8jWu z`frLlFNwMvSuLRMr>G^lrl_-J;(969#fmyxCayb6CTT9k};RaCbX4Z2>J|=W4qDC?gvgYW#Kg%xU(0+K)14 z0=9EG*pK=+?PkGq9`6S{-};9zgd`Jn{&xrxoKBvO7#8p4d769d4hJP5)f}%BGWwX8 zcf;80K?WMU*~A-LXoxo(N=(!QSYItOu$Z3^9MP8Ko39usyYtY=@g!Q@m7(4H+Jdpe zyn-@1Ti%a%w!@xB&OK+upqJo0Q&1bFu=q<-SleM@EA;=46c$rn;rD=1mJb5s>%lTx z-=W>>q~l zK|KCr$c>yd!~;DSCNq)xK7x}n9baW`-40{fYhyI}4`ED2y`Q%Oam$W=+EI{=Dj{5) zyhAMR*w3-GG(R88PjJHw6PfAs6(B9jN+TDx5l8@wvIB23*bK_tcww&J1uS;5SH?)2 zH~y2K!03OC+BgmPUIClFt}-oz!p*^i^OC}m&w}5SgtMeP2E@OTU^BCdmI-ltL=#ZZoc{_Bs|Ts zt-(*Hq&{U75z|o&fqZdIGCX-Q)eL8#Dep~@)XZF&Zm!Vn0rvwiBF9+J46>?tkxbif z$?&|xUTyVuOQiXVD6v43^O=H(VzSITxE(Z%$9+MY>4Haa-chB(mC*N*XlH?W$6jC6 zz(BaBMaxrZ`Q!PlhQ-Y7@t-na*vgE3O)qgWhuq%L+6|V8Ttc_clH0?3z=_%DOrvQYbhLraZ;b11t`#LPHxDWlaxok_=;H@ZO zG{I>U(jI{dZ8!`qC^WkCk(%}py*e9)O9K55iqamN0lw)4*2(o9sz%aozG6^>pDp4Q zJ9F;}|3}#&wjkEo72y<)*u7BZCNB z$9?xKTQYQaj5k)3N}hYIzK-EboWe8{jpE+H(B*^vngSTi5CQT1+luiiUa_H;B70cN z;z4&x`BxV6W>QKLN<k+n(U-Vw#03 z_1=INT9vK<&{nOzg#kQM)XV+X7)mVb)6g9R<@;EMh;Fyn8?}y++Dw$P{aC4!%8B{M z9pD`!4Uj5_fx2ShSJ$B$IXKjcJ80EDJe}vu?*|8Sw#O@K>;i7NnDhqO~i@sn|=mJE3d2=7h%F&L<;Traam?4UhMQ z3Jk=mHx-a$z~!jgf62aIM_+g+3KgyfrPIU;U9c8q4_HF9#~9d?BcZ$fe|6%a*kT>O zT^Hn91%E_WdJpP2qUmn)!zLJ?-U``E)O(+@hlby<*H+b@)Xh-66vZPyceHthARK_l zQsfRYyeBbpw4;W4c%i8PLfx|tI6aI8$#E%7&i!d5d}r@U8bVWBpKkVwGV=Roj6cwlAQgFt3(Pgp7965Xvt$TQ3E zZ#5Ci7IN|i4c0MsMo81g0wFaSn)Oo~sSGliUG8sA>$GoFUdgx%~5REbVQCbd{zv0Y^ zr#6&u=$EO$3CT<*USuE70qy2m`aKxm0^L0r&0&KJ(V&hu-(ic>(1KajrT7t_aHhNs zVT8e6(2T<@eir+%`#}eR5VMH|^;ia|^i=>OXJRC`l#UB`rGAkM5O2aoNzHkgAo#hq zBGAw6qIroOo;cz;+TnQ*0aaN*c><`@lpzGO<0zFY^8fZa23N{r9)x0!u@5bw2y185 z|AKo@uwp-3@fLm%%Fg!b0em_$zgz=p{Qms%p#mk$FV9VZkSAel=5A5yY*PX3<~{}m zFt9J_yqVQk+v>AfU2d!UlO;4OmC9B#mRC&V;~_QWZKWyP1`Vr;C9RaViKK92%UZ9% z4$L4B=f9{CW3&XOaLkdd9yW2M)OE%i<$%y&0H{{X|$3099mbQ7I| z&euLUvBH<~cy>FmI;(tr%>+P(H?%OE|7~kYOORpn-Ih|W;{DCHm$FQ}_TQFe*Rp0P+TYmDA{D-f~r$cB_XJk zcdILl=66gAWH;C>)3$t?PViaM6Se|kMZ%|-th>i=MiDo`J& zs3j+_TxZ)#`w|+1Yo_OcS{hf!HM9!B@JdDUAbUNpDRlB=~$moryuS(Xwn(K5}?oQ<*_^3dX= zAW&3jNdZJX+8s%?ublMl4k* zXKYGxcO{NSgD9oTUy{=G5e+9NCRFG8)exj{0ZsCLPR(Yg}daeBVy@K1+1Z%MM-7Ed;{{~S(204wmj~p ziOF{{M`C|@2kaq6=aM@id`T@_Bc{@7F5?xu{iwe4lg<4NiQ0{^{)gg)t>z$oj*>8Y zXjwKXd?7BNTaLHHeX}VmF$7*8SF7!K_THCu*cz`MqUOkTZpM**DerFhxy(iz6Z40Z zce?gHEkEGtAe=slArB@cc!DEhf$MBb)?u-1s)H`;P#x^?q8eMneO~_l;2tdsSE=nP z)pi{O2yG{F-zo2FjZpFYH^_s`%PI5Cpkb70)#mz5aQey?fjOc!GRHSIS98QoT_#we zFh&!`b?B`86B!LKu%$xo*XXMfb-G+8G{6ug0$N+xZMfYDzdZrVoF8^4u*`GT5*ZMb z5Qg^2Wg};|qW@*LG^bC;?8d=^T^W*YNz5JO@Vp#Xi%Zr7EAPQGIJUY-J7>~iW~q%4 ziD}fb`01l8Bm6yptCb?<6G^d}?lc%A#%S{&guBG=jT5$^}I9)kaXxdid3! zH7Nu`l&7%Vhk@}lNlSR85WmZ-d~pGfv!|fvxZ-|L#2wr7@%U1}`>rnn<%1~2X!38u z9TG+bw;-0rYcH0uzSjnVVigWXei0)$t*MO7BVsrWj}{kx0Wao4Qt_Q|B`#z}0w?ej zgDKK-3CnwFSu``y(q+XINj|kHWb{f>&i;?md4vb41UVso378%MsU;Q<(pST9-Gze= zhUFL60Idw=z*|q6>Ls4anJzAuT%@8h0k<$A0B*|~$a@k)QWe`#2Imp3e$2JgjMNw{ zo?4{kvBJNW#YhHfnZdwp&9dFl8ww%_Zq>7sv#Ph%H^tH5n{H}>eRrd^!3;3F9Z8J2XO(xMA0y@@d5>va60((d?-Z!Y=f5DF^#EI6Jy(@W zT#ICyJ}ZcULAP;bZ-K`l3-;s(Y1_HstLLPTblW@rLg@=}zP!d_i9tJRnN7`>S{BdP z0Oi%pbzpWPGi*;;66z-PIPG)x3O+gu?8c*SwLA+mX3 zF{Xi|6=S(dDmU3P9D!;`E642_Mgk*1mEaLbtBBqg$tH8Wf-OY?By&M^5bn#k1<=In zag5drx;^{{ngSyF{58Rt9V)_B<10Yo6Ly#Hx$sy70TKUQc*xv_e}phuuK4~Xt~5k} zAG)P~0})|MY9ufA2n`++T8x^l%zBe>DADwzN6x8ziMo$_Rs@mwXYznZ)I z6aolycV_{uITE44>?iR4x4F9?(fJ}~#&UOW(PWUjdldN19J(DCIfcexb9Yao0e3W( zaCQ}mG39+G+Px}&9@oLwnaM8$nOlIt{LMnyT%cq=_?mrisy@gA;LS;V@G$$}Cn}2! zj2sU0d$L9!%;SRt_~82X!H2Rwn8xnctMaS#p#nbi)z5gS4XH^oTZ)H9)IOiz&R%&> zieaX$IbOgulMeH!#BVL%>y&SH%9T`PFQ^ok9sIT(z|_%^AYP=-ebvI)Mh;Qbx<%vK=LtO!X&=lXb1NjOGgu{$-xbCsRdT*VQ6GHs^c89i#&9M@ifs#b!J_=f_&8qa0DAK%Z33Z{ zm{%ZvkxFa9QAmX@e`MfVGpBdD5fBk;u%^&%(A`{U%Ezou&+6wx7f#2+bV=$2a@4$ zK$?<-bg>E)!vX3vRZ&4dTG9(+a_)S>6O>Iqy+;nJ}qL6q`;h}K>eh1p>9##FNc>&Ji zzmB`{9vR4O6G)Ql^2g@U6m zCX5ND3`6m(*{2(w)ZPN6nQAbbI)=Em!`&|u=sV00yw*HE7HwmcJg&0sN zS)gHb_PVG|vasEiW8T+{y=%L6iU!9vZ zPV+Ud5jAN9c#YEwe}G0O&dFQym}?*AKnQZ?7(visTF}N!gI3LL5~rI^_4OTn$XL zvrR#)DJ9yItmU04*V$%&Hap%n`x-wu7C#J?u`t4D!PHwMnv^#H;THExG=)FkT2yF1 z?5dH1Xy^SPWUfG}1-)S&&HXd>b{|8(q%RG`Nre592<5=z+PV~z z2BZ}#X>DxfQa(kDuTgs5fV49~8XoCRf;s}jG~6#zP{G?YA(z8Z4KC$_X#xu05(YZ6 z3BdVtg}i1;&U{A^W9KQ${}dhTN6$ah`((kqp=FY7W=lDwCvwz`FJ1IiJ&eIq5;k2< zdx;V*n-Xf^s&9pi{zOrZunLlAds-Z|z-(gkgFz{FiQT=E(uP=;nPDPE{}2MXSq zbx9$gsQRzx{Xoboz7Zjhj)O`c4e)%`0t2#=IGFU25bl@|M5FJl5p zc|~dfCA?pg`7w27LV&vqWQGRt3pTz3)Yv4b?G;p)kUN7~jB)dbf@*L zZ6&PtCb%2}^B`4i31GP*bO5HW1`PuhS29O@B{pT)LrqfNbrMIKbI{Zr{Y!6pO9RVu zmrJ>Y<;93&xX-dKT8w}UaYH^D{F+vdVZ#?u_MfK2Ynn-%X-8jxGUaOC(K3lL^R!I4 zn&(kAt#~28AFsU_MQ0wC7V?$jo>;^#W^Xj0K{N9bK85dDO>YY|pFRmR^dsKVpM@*h zj9Iv>caFaUP`dD!yo;vt(c>MIL?SR-Xn8El>ub4-Nvo{BfuR9io=qPP{cUOcMS9 zYT4bq9|V@RFU2nAQHUQw58#yKnUbH#q2O#LALc#;a~H9HJSwehit$7!o7)sh3!$t7 zDCTV1D8kk+qL4_sOD4tO4O5{F?3uyzr4T^9GMCtYlE%4o&ZpS1%(ilgFjhs{`zT3c zNYdrqVC-E>dqA`poABKvy+$*|Y#!4xDKhn1rfAJAS|%l?3S}#WBK!^jno@YJf2hdM z%LM?(4$=pJ4T=XO%a-3rg&Z5sknO>NehR^c*&gAC(PoBrsDx)yN(7Obw-J*3Hd(T0 z7P)^5kz1<9xKw>WPrJv)d#IXmBP6+*l`{Y(rk3+`C1fgEIy~rhfAB$>f{!UwUc`qU zUkP5Scwcx)Rg3{7Gj=g-`FMp{5Qiz>nQP#^n(4{KB!rO}0~wSotR>*VY)x2vA0tNa zN4Npj2;3~zxJ}~CCt4-~^OlyyAm)m2yJFPqr($1dVIKe<7vsVt)N6c;5mS5oW+34s zcC0k4Rf7y(UZbghMw7=%)u`$(!etyhIFNu`DZL7h98vQEUk5WQ*4!k=41ueUtX^u~ zl(?>#-3PAb(QGv@a1IA}-M$cp4{o__E|E5qu(1NO4}3`Q2aG*OR>v7UTh5e!p+!Sj zo-&w#v$cF2VbXlIA)Ln}I8mB95Dy4&Z#L%ur1?<9?6FJ=qX}eeGjq!0U>jVOt63Cf zrG_)V5jB(Q*oM?>2WlW*tB#leCM&oi1y`6A+;OtWVNyu|Zhi;Dg;yY?>atuk45q?~ zA>y!Aw^Tsgem>;`onHgh!S1MIbE%fei8)8hgkesUvX^J!Sqvj;C*^#j7$TgxXfvYv z6QYf7me>a)LgYH<65;fqXrUPs{+DV*#UbhE-K3E+IZh;T58#~8tJ-KL=Kw!fKJ2rjC>WOFxb=X+GP~ zGx{nl28Rp-Qu}r-*=jj%-u{t7d^7cL9zvLf9K?X{&@#c8T9ijj{0axh;lm*z&HO;; zp;#7o_xADNxK6y@`6?`pX^dkC1ylbc6j~0U4MjN_rFh&v;zB%1r7A8baS_H;h4v@S zd4#jnC!D3G6IU#Sx$Qh%N_ioNWDMyXpC6|jd+p`avPqJhp(JN~4j8iG;Ojotw}R26 z^{*%nz`^`GrN38;x!BTiu3S6KX`~15zmUdbgti6d2)$P%{L8)BW^<8=7kK*IU`fM< zrby2Fj;J}jDIsSZ71m7Mtjwhd4w&CN=il(o9Dh3y#ycM%Xxb*`b(SM3@?#v6=Ci{i z=P%;5EPq4(f|~xJPmLj&Q&5H|^18LxwHYw`rV7D@aRr7&#&|Dcf({g>o#R!MjC05T zio!%-yN!Z@62egCKLzAjp^L9YEZLVQYA4MnwXh>T7c7*-nhE1NO5T>2Xenz-dXsg_(0iDtDtO|nm_3*h@OmgJCB+tp=6(p<%p%QoWw3qm}E<@CA@)Uowu zPx_G#b2YATje1nEKU}qI1g09f2Aw5Twc`k&HsKn$Lnn9GOP@?-OY{i`h143PXCrdb7>L-<;URc?JtVo1*Ub~F^1TTk-g{0z^ukYJHVC94o z4(}7A-)_ZJ7hrngV=|u{BU#9fz;45i9`mh8Sc1K*3Ykmrv^g10Z4PIj65|?QxC1SW z(CCx$;(byY-@TYr7btEQ(qOr@f{iRjr5f=*viduJZEi1uHaiuKfL${r$4N z&XVg=^KZGvrwTf}c%eugM?yO0LKU+TJDMu)=M8LCXY{xfaNK}l*lAx*=h!4Q?&H4E44+fVf4LkW1>{S?^U+##Jzh$M-21PRsG748! zcgM4a{%+O~EdSe8)t3K=x?{VB?q+v@Rn_spZ*HV7LiRgR_IB6M1T^4TYW46H^9fs> zhgMW@FVT=5vPOBE5?f*L0}HWpM%F%9&Zqk0H*))9H8d6A!hR&!CWhIRzIECi`n{D^ zJjA8(bqaZj&-@z@%;#_=+U^7Zm+6jml##X5i=d1pv*JM~<>|iltMdE70SieAvme~+ z&b^SELJ?M@9cBuWyJjNbn~R_tdfR0h`^bS=*3~!1$BApGh|LkO(n8=vnv2nD`~YBK z!S$6(M$`ejc{;y^ku1n@9t#AJcmWw+z~uI@8?8t$KR77gzr8oPODf>^EY;(LO| zgzlWc9z@yP%YS#^Up&-6r&LGHjDGYXA!^}Ay=8+Cy(-!G9BDj(8@%wQk^IQq>6T6Y zL-oOVM6u4y(a?t3`W;bKV1$ubioeCAp3Qgz-2@Y+zheIdHv*xj8vqVK1g$WxJ?z}g za*yhF+#tO>#DRIT?hNCE$GnLb0{op&VuUA z%~qa5sWx}b0-C0F@y{02nq3!2IfvzP_0_UDF-hP(XC#pUEf@E(rd_ZwQC%&$>c%48 z55mErZ;4vVlc3UQ3s5BrD&vT(G6j|6f?D>bfEt(twWfl~Q&0_`C#YHpqOmG3$B7A5 zDQ^vVqPZWQF!=}dCm~~AN<6lhyw>*J?NoUwb?W!1*7srMEQN$Cze^mol}%^`!ARp+ zPmuDOlnS;8Wiv<_Dodi?VY>VM5w zwIosJqAj@IS5fziQ0K6iK-~#C!F7K{EsYbX-)D=OMz#pK-cwOmB~g#wUr;}PvT)t> zT_|w<;G#QxO?d~PF;LG#D+pW~Cs5yZfS_iJ5OvcV!gXB|^`zYd^)N*}HbTu$PA0j& z2#tZdOi@eY1nN`RqNb58LevK;>M2RoPwgtG=bj{7_m5ELWdsMk`|AN)nQZsRJEeZ`)|guuU6)CN@$xHL|n z{+KQB;v^pq1peNu!gWg$^-Qz{*QY7!st9#?7SsM4p^8YfU!DQdO|QJTDUHZP|G*t6I^$xg%>MoX`Dd)K%sEW79r|(ygG>(K0k?i(U!vXd5YR};Sm*h zJBta_uc9%Ws~c4ixHL|nZe$C*n$Cv-1NBrz zEsYbXFUS+rY!RYf{IYP}o&kFJe;y#F$qefh!*TFCuqK}RaNqH&U z!brZ2!?&M6Gs+~>T==e($Fh7A<`KBhvTiKvQbr9Q8n2ZlY&aHWe=SM%dHqSPK12>J z(dJggL0-(-f+HME219LZ1M`{y4jdGinAp?!d~stD?*$^?2eTbCC&|PT&ZO*Wj9;l4 zY6{9MlQ636eXZqzy#JY&iPF4>viYmlwJ64S%6kS?;5`TL--W#I z2V@TK%>72DF%}pdxrZtC@C>;n{`nk5VLBdf8DD5=rZsxVB?e$tla@@Z{ez%jX&|-CWM`bBeve=K!%q)|K_&&V*o$Qon^nGlp zw;T2MpUX*X_AgU7XOx7PHj>llD;pG3OHY_`^pJX~x0v3XLw+z|T+%1XEF=Hob67-1 z>!Ibi+*00SqOr;@MJFHD&c@iqvZ~xD^%mV3K=)f1+ea3r_%*T z1A>AC)ao1fm}?vlt8iLeaKjZG=a_z9fTR5LKzEYLb%m75Qiy|w)ss}d?=6lc^;RmM z&HiojKU?MBpqN^E!t{X3U+OKUGGKyX$r;j!HMxvH`E6maHT_ZcuakueH$#dKw<2n* z%t{#%T$z2UF`h3xhbqVg9;hIpSKMKg5K{vLt8MPd(W51?_fNkEHw^uy6|ZgNe=4iD z?O6i+EpLJ5*KM^+SOv{S!wn)?6N6+_EeB0ZYFh$$3-`EIC|)8(u7|y$EYP!rEEr>M)%WI-3d8e6D5bk>1xb;hI-aCf7C1?B__U zW!^mbCRpsJWKFeX*}7yZ8$?zKku&G9LwAV8N17Vjcu$DrVIj?VL_7Ws(SIp4W6Wzn z#B-9FpWk2T=`?%FwZrV9v=Y484rR0Ri%7*b0b@~|CVy*z&|!W;#AF=)23PZ?{jrc& zwFK!wGL8dEj~7d8ERTC<`XY1zrLJc_+CqUX0WdnJZcqq?hP#Q5#(6i2>lJEh?|lVs zjuMiZX`JRz(prRn^z%Zcl|8ki4{MrXpuaPWPZBsHnd3!a3Cc4i(Ab`98kQ7&{#&&W zN}yS2tSIho=}{b{xUTll3nX>96+)a!|0bl*$VVw6JY9zjp2aeha z_TRCKwR=l*cifQPOZ1~bH)1xu8_3NG$4V)096ZA3Kl2SkC{XgyeqKMtm1O+~SMv`4 zy}=rMRf0p9iEx~xh?(!E`mtR$>vHYd5`-$tQ7@}BH_jwH8*aF9C9;d^wjOt7ydqdC zxhWSRyoJEBb!<9>Xq0*zShoW{2G$W``R<4#%};Sj*t6E_NbF!pOsw3-jXk)^^G-A3 z#z5QAJj*}|i9LaSUHN%&U~TPIHa?USKZMHsus7{nea}8)l5u zwwlpW>&<7p(eA*Jl-ZBbK(cN%ULx%HS2AbT!(b@*)MyYnCt#A4^BE)O%&t#?DKSY} zyqyj;(ZU8$Z)qO8n{H%;%V4tjdL~D_y4Ro+%3K`2WpqyKb=r3%$D152&4X`Kc$dPU zpacQMF>6M|NqO%<^XhOd3y8@XFutLt*TDOtCuHOco~OFGhm~P7^Xs9gP>S7|mb+X! zNu$pdOcP*5Rie1Fw@S32WClV!#Bu;33MmnZa*(=^9gug^43f9pKN-U;dKKONLMWe6 z{cL+Z1=pbND3k}c4C8lfa`ZDRq&_;6kH6$hzH8B7W_fSQs>7TFA^6u}%T3%_(>!EB zo`AaJvsj@t#}#D9ze$z&OU6{{eHjIGnCS@fpp+oZKT*y+dxpC_ljqI^p9ek=G9a}V z5(*sTY=u@tXn&D=Zfr|=IF=Uj=RY8B|fsO24mof-CDfA zPkC49XuK%w^Hbgf7_s^MCt|QW<71AJ^6o@Dv7KiUVuOSl#T01otR{7qkpi{T3`Zy* zaqZVd9L`Vtx`<0;8*bP?XVj1{(w$A9C}&tR|Bo0nXT*z()*RJq(WoJdQXUR<%rR@j zBdELkp}S$vqf4SC)g$yeQ5APa!}ek!rWGEA7+C~wp&aNmi5U4LTVOQ6s(|om&l1}Q z04V9{>bK)q`dciaUpbh=dp2!>svys7Z54Nd^lx;B!7fJl;sHWgtuoHMv#t-2xriXNqjtgqPDVJ2`|k;_^IkOY!MP({)|K@ z<`&~d!Z64?A$ibyfHS;jjid<@$r(AQedGo014s4xa@3G7Q{FVBNz6uYZ<_t1G=IE( z447sGV|>ol8@ zzttNDrM&eJ5+GpO1n_lnz!TAcJ&BvDuYB(CId{68)rF_%0vHgfb~N&FsoZiNymp=0 z$1L&RAtH;`eYrV?{JmVzCaJd~;&d@)bj_YM^c#2wEBHv8H!9aWg#vjH> zIxyJZ!AP)rwzTpGpqh#vCbV>zA-E2EzBjDX{{U6CI2@47w$j8kW11Gbv98374JThU z=7e1s29Mu8DWa2v(kJLeX<~0MmS-gl{R%#KY|{u+*~|{;?w0W1YHq<*k5l%~1Z5mf zhTHM{Plz~B{px(K~8fCK>Dv3~e!*XeG^(!}E3N z&(QqajH-Ci7s1I)qFWS99nLOXm0Q zwzLrTklsTF;5)JmWo(P9*+MJW8JgGNZmj6biX3Ux%pK>CAbj$q6zF?u%%lJb^`Z&Y@v3*~TTo=xeW zs|!iJ74l1u{T4?}d3wZ6Wr0qJih9EI8Tw*6E9)($6M>1&T0sZXq$0&0DI(8`s5Lx8 z+^(LYbubdi5Y@3CXs7Ze!#@0eA+BbD{D{vye*o_Q6pVrvf{4gOQ{G|~O$+Y_1JbF} z08Jj!y`P7;tYPsSD& z_TCfO43TI8uI4iSyTsl(k5}yt2`5V+xWWz(nWKngk{*Y!Tk7U5{`e?OWwjDONrT9e z2KtJiZMQxL0o}AnRPQuR{4rgAbeL)Uv7=~QWFv_f^Ux!>TL;%R6Hw1h+Gi@4bW5D4 zQ;LgY4_RK3Ez0tW>;V{*$R{7!w~rVyC*9h_nsHP9$E@m?!uG4HF$Xm^t z_PPS0kxKXxSM!bi;p5sc3pjpF1i15cJ0e@CH>AANa0~8>Y#|5O zv7z!}d1qm+<_aSPWE++A{|}KZFb9HG9ThG_%xV$~`+(7FbqD)gkEK6SfatVIXk<|- z?+0&PL`u71K7|A(9ycp1h=rip~X>t(%v2>?8>Cj0|#Xpgb zF=qp(e-F3Yf$3t{k#<#`7*XjbJ{NIpwYp5-O#&>HcE;c`{fH3JC^L*8hAn(pW;p)8 zgob!X{8(ieB;yt(WO%vCu$=ep#LdT;#pgdpD>yU78IBFa4-(m;73e7=ic6Df>ZcT! zyK3446hkGjFmW!c;~vjW7Al-6cF>3J`JM{>Z4KfDFcim8t?9(n`!d8KlZ-}5qPmi8 zMHMF>Zij#ULlW)^0XQ>79Sk%4t1&bMD{N(PCPyNHOP-h&clv}R-i;J5g(HRoI&B{s zW;y!-DdGs7!@P}H8kAclu6CFg?Dbi#t7p8qV5+cFYP&NF((m`yYTG9keKBgn#FOD% z=AZ7UDUATCWf;xJbpNTkf~<7nd$%0jy87n=2Dqx?ee2$x^8vH+LAduKlvdk?D+md(SH)D2 zDW)qqXmG_e0626e_^x|?lPRar@RX7E}eezH_qSZLSapaz&ZY`MNnOAmyLW9)Q0V@ zg)v)AV-DMx!zXOW-Uo9DpZ}WHC9t#rTVS4tYJ#xPpk)!yKTuw6V;&EXkW7mjb3=wP zr@V960(suhpi|ztxb^$(dCr^Q2r|y`AEW@V&5_!F7{0RF)c2#-uy(4{cV*G*k8*NS zBU=RGT!ve}pZYRHRsBZ8B90_rqPxOw&A#^`oZQYJW5?&4@$QEBXM4HOCT{}g=ufN- z$d&~+UR9!+aU{%Tq$-AB6l~tR7YP0+NGyCG^I~@ji$^!>vvBb%*vcM z(r?Q9N^~%@fsSRPNEcR%ef|q{dlitZ?jZ0XHC7T!Hfeq?I&ScRq|13<&|$2;+uh`8 zx&M<+yX)fpHP1|!j6EK!N^HbB1SrbFjaaFQKfVpS{{(M7`a7`n4ofRJj;m=VR5Sg4 zRpK;UDa8l*TgRT}b}fsPYoxx140_6&2qsjul8tuxyQw$%)68|i-zbnl{*IOfemBvb z^7sJ<84GGI(gig{Ks8FFs;(qJP$`$C@hCG_AwZHPWWT%t4)Lk%q{B*lPo>J*ha6IKaS$@e~N?-o_d&@bm=m0wynfVt$4V1xB1W8^y<=SdG>~#UIX7TM<+PKbqj*EY( zMp21Ct2gMAR4E1r%4Qd1Q?)ytNqKKl5EyHPtfsuTaZB0d)Qgkj-#F+S`6^Y}Ly$@T z30)Rn>O~&Cq=#iX9JX+;)WLhXOt=o>7&YbbxM#RzFh+?xK+5CM%#fcrhV5mp z$Gp|&KRlKP3DPJ7r8~}}$5Y^awI>FIC$@L#+YdoLE*vQR2Py%y5<;t)+09mw7pA<| zRf+vDKDJ$5#icyv=@=U9}o#+0OwAE`br!|n)rx`a{R=|raip3i^qYqDpSP7xpP zg7C~P=n<%m?Rm9+zqwOh*Vgy3*X4+sXh()kC(G#hJMarWro!(!2^_E33cI1s2*-8=g=o19`a(|BXu9MJT1bRFYWk%@3 zaWx0C0?OLYX|TK8TVQsyb=%tOX1L;07wzV1Dy1Jn8BL@fKUVX!pm%9uwb@S#dQPlw zRi_2rE-j+df^NTApk6SGLk%GI z%tU3ZbH%lmt0xw*h;=KMg+-*XcH(GO)oA0y;BKCBAuWOke@CrwA<1*e_2V_dbzah9*OaziJ4vdU zAzyTLP2nL0C0~FIK%D_lW)#8#yQ)QSM4b~vBinER4WC2t??P}~WrH6i{x{PK&9mwZ zxt7Ygx^Y4#sKc6}%N%T@hJO{Jo(Fo5=oS3JnsdtgD~OdK5WFE6JIp&g%UkZQSHALZ zH~^ieedk8SO@iWFuRn|a8Jw2XG&5$H6D2su(-3>glwe{x-s6Fn*71^U@W!DcV+iYQ z8e;3~A;q=8FMF%|5buG8LBLFhYphaWLw`-8cc?^Z;pS{{G(EZbFDRQ^7|(CSKZeao z_WCzoF@}d+%{Y{aKc>YKxy=bDA;lFlxWzx4-Azq19ohT@^UiqkT?q_&vSxpPHXGv~ zEoWWg1Py(7a|HykN~U?JY9;}?&i<+LOB*)Ns0Fbc>klHMmM>WRKkmLgzOG??Kh;L# za?Xx>Ih+PT>zdL!DMg}6LQ3509voHopds2KB)hW3t~hRSk9(*~iT0Ev z;1Ji&R<*_fY) zRvmC;{gPTV?o{-PE&h=$#%TuDGnNhR-TmQz#R%hg+!1Ic$9f}aq4JvJPadr1$o!;3 z#Yx3{%{VeX*ECKj=3NrKZ8QFbCp(Dtv|(~{HkD@Ptuo;;P47bZ6g}VoaR!`lCmc&3 zK?uF9Svtt#6nSyZOX8SeD7N%bZy5k)5b(^KrM4N2zjq_3@Qq`+ci%^x1FU zq6B!|LG|)x^M`p*l*LS!F~He0Nos9i9V>5=`kKg&b5LJmV`>zBd`mbl&LglDNN2+U zmyh+QFPop`L-j+LuGQ2;xJJEZDpp9DOvMfxWz%${Vg&Ms2zE=QADbURaX*Vx^A?x( zyA$+f)f0l64P+Y7D_t`+&JLI##YVRh*nFjN^2vOpadyVMP2<}Y1)>3>9l31exDwEe zF%gf3xABSRr2eRQ0FsQ7UmJFzG?)ZA@iU43ATX(t>6S_NL59s~texLX@l|k*Dj_Q{ zd1$0J6XKG^^n45%;;hg-1~0Pc1Xx**1RuSph4hSPwn|2Bu+iBzlx8ra@t_TE54V9EC`0G0`+x}hbLh;5nn~keB*l5IaTY-vrKXQSF=T`qIsN2>Y8VBcJS8oxC&hp z)3u(R%!;cqthsWBGG6!t(77wwEUA}T31&_}t^T(J{9spk(OgI2fF`Rm`!^`E0>*jIqPoevxt9(JT$x@M zWyITK5RBG~d#9i_OxHdi$2iuDBa?aPjX4A@;lA_}?WXuR{sscxKiLMRmDJ5_#KS73 z!&(lzZJ#5>uCCs8oz8A3Wm;vXi_RwZlOuxP1aX6eiH*p(hB(iciEk^z13fXfz*W&% zSIXFw6LK$sk5&dDZ-QzU*Imh5O>sG?eB1p>6f7`GhKr-cDAL5Igfa z2zXaJ8VPwrhG_*Ar=+7vWiN5xoEZ<;j8XjKR*SWaTrumQ*8y+e)XAW_5LD(wwjp_y zw6$60Z8Y}}xf}dT;G2QAJ@>GqKT&x;vt#>}JQtW3N!CS81PdxT9++11wd_VIg3JgI3IECBnnld?O6j>zqAU?cXoqhpl-B>5+2OCy_ z-I&kOFu!9=U({)q+{=9J)wptR6Lf{+^f>zEzZN+XeL-xy?n zOwC0!l)Q=>8bi8qDJ{p+IV6o;_tmddD0{!Iv;=Vlr?#v!tI>0PW~LhZS&n) zQm+4AcP>r1nR_X_9rV>B#Tl@qbf@q>5mVz!-k9xR5WqhcGsnSRn4`pK$kp4XNaK`G zGm4piV{qS<5T5hcLoLUayT`}NDQ8JhwS5o~>~KWzF5;a?lp7I4?G$YD9u1{lwC_XQ z6UD;PHxeGNH1~^vUG5Pj`G|diK;xVB$u&yFUf8>LIxFcnS?ML0V6THJ8dha?I+ebUTo_1hQD54%TUWp37@tXb{ zjn`K+Ud~s~!j0E>ZGhM{F-8Bn9L8%Lw3WtdIXhW5GY_2)nBg+AD#_7E^gYUEJoAHf zdeWztLC9BCInL(}A~heAa{+yNM#wz+^wva-&}@-`nN$~VgAzM{I(X8wnk`a2!yfkg zDcCjENl7~p1Yj4E2HsV)qqV44(T+y-kUoyR)h6?2>&DyiGB);h{nkrFca4iAue;kP@0&7#8A!5As9y9*_17P4OARebGptTtj!#& z?IW?5QNz(1A_ix?iaoeowjU#j)VxoK+>0QuiwIz?YJbHS)~yt?t*u+CCfpf!$5I}O zL2)f5HPGxn3EkM9v5pIYy&tnKR`lY`nM0av9agf%GIt@tJn5qn;-_ov|rJ9cFJB5pwlSb~JEu?p^ui z-P{Z~r%FaH;kQfuuhBl%U*^d-0IC=EGpo-B;1>`uisaSGMRLyEB0?8UY4i}~;syV6 zx!6eO9@H|(#Z5s8F3v_ay4OUi02i}Jfg3=*-of+HAfV+7TJoSE8LFK0Yg{Dyic0u6 z6hJj?y#30{#W$3TolJMQxGp>WJye)njAO1xg)E=u9M=J;lH$$#T$9ihV^CcN(;V35 z^8kCdh22wNTgg9jFw&_0j9(!$Kr6XrE|P&}zmJ!{7nt3lK2|_H6-T1a zZ7zbNU7l4+0ZFzi5?zgKw5`A^IGdquE?Yf6n%I#$p?GHFkK1Cb=62APtoN$7F&fxt zZ9uPAeClFk0bhs%BWf?vC56kDJI|G>2ML4SVMJpbW0h2q6iwWk(uN5LWy$SePq$D3 z%J%kj7gT2k!TN9?JA<4Ik60y}C_*F4w3Bi_TXHxlX0uAY(pv&-^EWXzOU&y!H;{2=TNN`Axy>|A#+mgrPJFYP zjM2J$GE>22XgsrFrOOaGJCia8M54!n5<)OR88R+}A!jQ?NZ|^>t!MX*Aq95SFzQu7 zmEKVC>%C0*8}Q9G&}*vna~?+gStP;x>t#{g0x^)OAJW{H+~x}i8pn^%0_eY>IRrgJ zLG#JQ%cxBducv5^A45RxeL#j}nYojHJCfwHV9v_I8Wgcfo{=s@XT^Rw2IVx?Ki1a& znT%YkjPyvULJi6;w)*XC^;>D4NWlggFJPN9C2q}hXtOAUECv<`Z#_+;t&?$Zn$|u6 zO?w)uM%EmxO%t1TvS_>7w||H&*Je)DxHM}Lg$OC%Tce|-aE0`nGiBsB#DS?GbncP!4QhmSBJeN_ zpH4!nP@hgT!WrjU5o zd$Yz1?;I$1FXsHH{uC^gf{T&sbn0eP0M}ui?CvJ8KYodV>ggiaA&S%-MxWUss46i= z7T^BG&AD<2En1{_d~xoxai=01c(qKIVTsf{g^9}i?+oBQj2PMW0f!;5s5*hOP&{+; zH?G{};AL@eAyUlAC_Xix@2czO4#6Y1-q~BHD^j>JcLge^uqD4Iu&`P`{4tr6sA{CI z8*h_q@yxGQxUH$D5IYvfGmKUXnp6uKNEMi`pHeL#h1;Sp|1Kj*P3)BxRHAAfLcLr+ zsRcOkY1gpcWzu!PH^KEp+XQ9^hleKoHvgQP^ z#wcqRr?Td7*c0YDtht?=_KjFsy*dO2Q6iqE?twjz@@Y-t2e8v*(Z6Lb(0S}-#=)-V zHH|jT%o4gK<_QnoeHy3zF|_yIEpFl_9JJuE2UOG{_P8cjZpb$0UI@8dO$`Z|$d>5{ zWPI(8KpFR(A)D4Fu8Q#WIAmi4CXgzi?EKj{_(BR-*;kjbyM5TSRs#gp=j%YrZ=Agf zU!!$&dROFkpbj$?T2F=lD@4t*fldtf?4vn@>bZ-sLAk!M+9B3NQA_R;xL)uGU&X~} zkJ|jXm3)(uY!B~6yrkCc?1pFv*iU0&HpSU%#APh6#d+4m)2%ISkqEhuF6xUw2Jc} zPqjLoRi;d5yk2G&bDf6oiMV<4WHfwdmQhj9K*wEs`zmP^FY2T7U=ZK-RhQ}3~a0<*_>|bP5oI=CnSz!E=Hu-iUKW=UEKBr2zhp^ih z!dTPToQ0=P_7X>v8n2~DbqI3Fl+l)YZ~krP%H3?*4w@zt-Cr~=Wh0EE>_ADgPO-4n zKYkvE<&ku1Rz0=QE2c|i!Kc%^v!H$PL}>L4F&{Xe6*6A(UO1jPCgF}(S;%k_V75Ht^$HgB<^VJxF<&l2jxTd821dc)2Zpw;GP%iILLqK~2v5{~8csblChG15wccFAT>M)1G63}jb zPHn+l^(zz%6{`&lA4unC>Qa^Z4!l+A&(!CNpTQZ-RmY>MPXNuTCW~T*x!@e(_v{qK z(kJ0%b1k!i1AP~vYb|t+>b#iFiE52SOebiZ!)Q+Q#7i|UHsS!peKYEJhB}hEO|&k` zS3h^hzCg#mASfk&)g}8l9s8n?vDcrT&LV|7_OBc#L%@djW`P`uen`uu>vN2KK~TH! z!jA%8>K*$tQHMDKc7p@*g>WHC*$GtW3`mJ8u85CkFphIX%^8er!u}76L)aTVjDA?s zmYEMlymVT8Q{&{CY4@bJ$Y3rr%}j%BnQs5SUue4Vq(-7=gW=j#>20#9xZFoJN6HJzO6^roKc39lqoa|vKys5p|e8LBXA0h)r(`55}R+`mRLr-SDubq{haS3L6l ziHG>z6&$D3yHjxp5(2%XxcY@5tFxDOfRmyw4Kh9GcJ?}&@oT7f7s>c7F_R?MP9C29 z`gnjrm^S*@_aEE`nUNI8Vw%D^&zXcZCv>plJy$&&IW}{3E zt;94pF%Av8PSc2Ksx&S#G7)jV4)LtKSb5ZwAK1yzIV^9Mg&UEY*U%AD$hvu9KkLr5 z^=+wjjAMPY_sq-Ev`VJ^3=Tq0OBtU5wO|{Krcut!`-~&)Ej)|k2g$jeh3;F-34wME zM2?OKLCi#{=RrJvVwVAxBW7 zbBM8q&!@kCxbV<-K7BS!1<#Gvqkc0TYDLR6TA3{8MsEcH&y8k4T+C0Tii+|rFF| z(e>%|@!aSKhpypsqnD!sG<+47Tjr`0#nmEbhQ2>uSX&lktvxsT118Beb8hs@u`sG@ z{M_hN)RL;bNO6ulxo=RAmTj3}&9Lv>=vPn}@Q=mJ){tGZB?8)aCS+?|jyw%u=4zVG zvyX=G{O)t3xkT}v8@(UcVdq2lgsBTlcg~HT!*a3Tcy4sx5{#Dj+~`(*z4bH37iPeWRE6#6iNQ@T~150Q#%Bhj^_#AL&t9%Y)dK#mcS6K^VU&RNQkL(lKYYI|}Ai>|AOo?kjxgxBd#vV(ak z9n|yxel|?7R4zknvwo);Y(ABQ^W7y7gGya8{Rkh#EC#6Yy5p zZ^Wgx^^TGLI(@U?6&GPEko`eVR&F2JHv3NV7K^9R`wgZ@^mHNfoaSA-7mBzWfVijhd;@T43y-Pw24bTlh__8?R?lCgUfonYohjmsEcgLu8)$VH-8 zWr<(NVAYKuou<71zD2TIX1k6EXzpRC2@T8o#MjtJv`(S8cZK*@(M#c7Yt=}0b3N0Y z?bHB3K|%?|K}l*p&Qr@*P*G2Iu%08eo(8R_AXrc5hU`&uskfdx50oYk3PN>>Ns7); zsEi=g+mhX!?S(pAp_*94+dVbN(R$me;68PhLX~>=scQqutbh_w&SMxIpZ}dAUZlb) zqvNeiuNYC*=tV(%{~jD$`Gsb@THdVJwKRmd7BjR}zIX5CV_aVO+T-i38sun)_O>n9 z+qcA+Hg|e^J4kV5*w7aYJBI_m&q|u^#ej2Wfp*>Q;no(&02z=J4z|P*kR+M?+)e^6 zI>=uuiEC6Vu8xE@h6=I0w}fX`Ed)qa+?p zk*B@&(29txjtgD>!EqT$s(7_H^Xpo|-2Hii(<~H@t}KR{``tm2sP#gTZ%{co3F;LS z-YNi9$o6`5ZvYH9%5HFY$;;#bkZDXwa@AvV`3}udN;`CZT4x0Q3(G^1-V~yZs_1F( zRAa6@fJ*=PI3)8sku|(Q+3$piM03}o|7;9yWAq<-<`0(POen5?k-n}rpsUOz?;~PO z&h*8v?m7KWCga{r5qssA!`{pG=hEr>M3;&WVa?GHA5%&#-5W6|q}f)xRAjs4PQo>U zsP1vxH4ro;muZD}e%rIium$!lSL2kuP}-&N*BYMuoQ5go$pi z^7K6tT_VKQ^4b7D9}Lm{MWm=}77B+A7ZpmYs%)zkx=xqcloYbaSK}>$r!PY=0R)W# zu54g+^?gSaaBUs!f%v4cZVd5S z<8l+SF6p(#G~BTt)u@%MgMmpg)!s#pqYsl&`*PNf(eWRst0?Elcn{N^1$d7PE_4N9 zm2JLFvBoVpa*21iWYFfBKf(0V^k&L)iTMQ<0C{YRSwU-nyo9ES*}Y3tL%L?(&~)m8 zi6Wh}t)8@}5I2u80mJ=(ecq`ATvwQSjgwENM&ndpQ>pQC#?RF_$I6sRJas%*$eiY# zbL~N-6g8E1ZSiV3X;jxTj*JMHF}-c4Ie8WdikyEt;)V)uv{z%afbc+95PzeiGq-pCjTbH z-BUF%-I1b0_7*-5ks_@fc*yU>*z;tE+N( zXL5zD`s#P_!D#N!I0x^2;lA`*(frx5p2Bu?eg`6PecQJ%Gy^wJ}Gd^Zb9 zXAoV=8KjI?%iC9MtV8ryPf|1sg~O8kz51@dXKI_QOB(9Ti04LY}<4EZRamro3mLLM`Y#S0Q4~w zi*L&&3D7dLNX)ewgQ=3Y#LSgZTViI}(R)H^D97eL#LZnyfK|BJ62$E@%5>b4)y;@T zq65`Li+#e}!M%f*QMzSpk<&wt=T*QxCB+APF$z9P z*{jE^<(PI|OKk{GACbD7lLxuM)3e}`#`3ZjXlQW?TSJzYk?2_}I*Zu?EHV9Tjzmw^ zYU;h)(X~(wy2lxWjb7+3Gbzo9o2VPsRCK_;A})h41v*Ew@ro%Q+MbY>~9X zYiBtTTn`?HR)pN@>u%f*G9Y_4msv>nFn{vI`&rTbQEX;wJ=yn_uym#QOw%|d=06%2 z5r|VsFzhc{x>n}l@0m&?!~clH#s2sesg2A?wi7zC`<%=i z&B=4#ki=K=*eKCCS?7N(AMthWb6^E2tNh720a0El8$tDdU0rUzYY5KJT11=xnKVuv zwin41juWd5?t{@t4iB^1U5}>Y!FUF@D8k0d2&qHe@qVVX{#tE4Igi$NaGM55m)}<^ zj}|lCQILmslYuKk5i=cw1tY_Kkr_Nn;!hC6&73V&$YPuC7g>!nT+U*WM~{S$P}AV>+$#d5!)Dd(By={xKvsF$dUxY<`@U$# z@!AZ-p2qMtENFO|m8l-{#N**;MT(T(Map=!900FtnLw(*y5t%if#eZ!Na=RFij~B(9*dk9#aIN0RRc5?8o9lio9{C8=W>@n<;f(h`cX4$BLPe<~V;p8vvTOq!;ev11 zUJL}biD%cn<_veGFPy5s0gNnBpd@wT=g(>5mYETl51eUisK&)!ZN)h3(_>ng<#hs?X=hmH_*WP5c)8iz&Q#H^^8v5ki>!$K3Txd^G)b-F| zr=tasXT-&~G+gGOm1bv66T6rraTR2Mx+*$J7Ovj8kjHe<5JwnX$`f@FEC+JyT2hbe zMxqDmB3LM#;h((|_00MM@xIirW1;J{>B38ucC1o@tmYMvTpD{>eFSPjo0ZT056~tp zU6S(@hos=B(yOSYGWj8XTglp}={nJLU^vY^h$jw_&EqojJWMiINRvdhbOrTh6=|B5L}x&E6k zntfM){}(+pXA<*G>9RWe4#b2)-tWta1w7tUCm3Ry9c|t=czTzdiTQZ(;7i~|+L#Z@ ztfo4XfLyQ*-mBg6xFRSt&J0nhMxiw5C|SqO4z8mFMxkkl7=^^qU$VZ(b>@^A(Ayky zA?|Wb+yEXj72Ko9xwFVo%;Y{58ElvttrnfbLk)8p?`1=&dww$#a=`6;Io~T5tQF`5W3dO54XanfqwOq>Ky~v) zuIGf}5ci8*&uxzqo-cAeioa98$n_l>V6KE(a)rJb*Zi zyQhIip*FJvikl6kU2FCh+1Iv{arK=Bew5>i)4*4wesdQzmnUXN2*34MfPK=7svo%?1#AZ(4Z}?MAKE zaJ4Fj!A=Aq@BU#z;G9{V(n^-l=7H94Oaqf>HrLe7ivjwf?lE=u6* z16I@d2|apv&{S)Q@w<@>)bF3IoIL;OqeQ8m%SgH2VWC!?#cWjFVXAk=cQWQv+Sakc z)lZ)OVW9J|xb6yjGVU;xK}#z`xLf4sWW^OIWe>#-0fu>ITj}IDka=Ezxa&P<&INj@ zMY0`C3?%FEyB#E(lPNxRt?ZpSLgN$&b0FfT5CQu$QZv{hvkT(Buf6?DA{;G4ChE$= zgL`Xda|0G%BfmSxBGVq|vdsw0SeIa0DgZCd$(a33JCuy(>=ba&YkYs(*WP{vt3-d< znJL?#XJ=B0kAPrtjys+Lradqp?DYqH?QL^ZK@ag&-g*bH-epi{^DfK@JNL1D@^8=A z-ln5D-F@w?WozVbCsNreLvlOHqy70Do@t1ibwo5~R_ctympNuC3YiO8`-wvBoEVaXCD(it8FZJQ4Pxv-R|dPL{0D zLqBwOwfYN$!ddW*$U&3qqK(0-PZqSyxsOTJP06Yo84Vb`Fda(4*SsAQr&n=(5i(v=0AUvrM(skr|SK*>czocf2j=b zm@d7}N>-i0XrR}twbz5RY9S6({exC56i(GI{8f706|DLL?e$o#dNvnGI@UA9x#)lO z)2dZ>$TF%$hyztOYSlvFR9&o9m-@e~8j03w)r%gLUQb9?T^3UHxmtClRxQMVs*lyG zg~F-&wXLMrtAf4mr&S-TRS!y5ogGrOj!yLuty+i!Rlj97nWS*4K2obT!K&q?Sv>Ro zN2J#s$&*QKAt#e=;;{srcVQNMSwi6#vwQU5f>1bBFWypmT@+;anL->#p6h{NTKB8zt|54PjQVE zqoiq^C4WZi5}Sz^uH8H-R=1T_*PITmB*7g>Gq*4!)^4WR=Ts%6jWm0K&{QCym)~{U zLK>YoA5$#x_qLFG*ym0PO&e%FgpLFjGW2Y6tgtVv7e0Aqb*#0u~KL9bk|+3JnA8rCO# z%0R9(i`}Nw<#>c zGTHgLgM0LLZ0x&4y_oDM>-H+LrvVkCxF@nl3zF@s*at%^ZMAY%&b4*bW{|V-BG*gI z>0;koiEWPc#19dfUSdY$*;^jT*VN(A0dG-nRnRcss54tV(;Xgtx2eM;4z44$NwIQf z+o1fUW7VrzPV~09pR6SoS%v||KiJ&GlHJ_tZSEMixs=OZG&fy4AHk4v0{Iknt+(y9 z*J@jhmAi72Lyhl?s0hk=ABVj+Z{o0*Hjo=MPqS%XET^5>DqVA9dgG0DI{fIhl|ZRJ z*({MjX{wdL3n_uWZVU-L(@Nk+TgWU&Z1c7!{u1J3(R@jB4UBE^Y>ZAu;#pZersQEA zlaaV9JP$&ck+>>6kMmLDj6R4^0}4RD?T}Ojjc81=o9n$sbS?BVhMPaeY!^`o+ia~gMp~hLxoFQ>*pF+nq66L2*)VjzasN~p2qWW{0 z7CA%Zh}z61^_TqM#K~5l~}jRUpEt?|&OW1u_w{;*$ou zqQJ7=FIu8jB5oFw7k!zBiFv=cU|MBLoC-_t4ErR+DGC?a=h=9gGn7DxkFyzv+h?)n z6_E-1Fpf!igr>=CF;wHr!5FiZ#xqE}fu&)3ro@B#Ijk*VZ@#7wrs;Vk`i{$0vB6%! z`7^P!rs2hPbacaL14fgK;6BLv!Pn@DDcPFVVlxV4qn6e{*Bt ztUAoU2BY3f*-}6ddJYIF30@}L5B?O@RabF4g|Acd5q5&nzSNU^dxG(b2*&;_C@NRB z{V+!8UKv^nE)7C$>?Lb*W(DVE3|14-*+Q1`UEE#0-^Cq38@~nWNc<$LR-FHb(L+}V zZdPcVK;|>VO&0=6=3>ntBh0H3m)d+B_4dC_ z8TZw0ieSr2WDOC_>u;VkIx4SfjwrJ)GA?g72BWWRk^;9b`2 z1Ymwh_d|$g?`H0bGKech0K+URh-4KN$DvQM+K{%xD8IU{teO({(aPC{OcM;XPiH>l zw{PoO$tzQbOd6GHwdNvfmwMvoNMTRsc>w-D&`1ivp~&pz`_XNQ84rIyI#BBC?)~Uz zFp@=e$@in5ts`iu??>kdVR!FGZ&xVy{pjt4^1L6t7Jnz-kM{Vs^A#}MecO2`%9@eT zY?CXstl77nU0EDD|F?PP{F%L3U~WSF=1C|TUl<%D{MEOecYuI*AC+kmgiyYI2;5VY%4xd0w@1l(+Z}HjZ;J4lrmuA_ zGwcKJeAt;|zAiu$+vt*s$U6RYGshwBwGuE8`OLl+Cu-O7(C1G`@G%6?Sh}S`-_lxF2ndSQwC6^$jhtGbpVrZv%8RBUIDd!+H?Y1SrVTf zc$w|tdXjhX>J07%fNn)M@vZC*RA!UOD9kH3JbmZK66Nh+*yR^{1~0#8;?yz}Elk`k zQ%ft+Oco`!u9aLhbD&d*7cjmYro}8pfCTp5e8_}%5pX%WNHZuxW+5}?YXYUf%tYKf zyW)kZd>s&tog)!GTC>f#xG!u89xABAd7;6{-lhFbCb&s0x zm8?+g&i$YEkb?`_=tr%=G- zS^eDGG#DuIa;g|AmGMa$7lxmYxU&#JwQX3g`B?X`sB3gKRwtO8>*@UQN~ryDvV4Qf znV@$MsM>dA_?v!GbxyGApGb@Ee`wWmCRBSZhe!ieuMpzuA=+ypj`?4( zeW6tgg)?TCYSs0@so9goN0CPOcBnC?LBtWb?XsC)IT*Eogh3`){N zlHJ_Jbb*T0oJ&RgZw3_cG{k)CQ!dfQs+dzq%$ZPE*bs`Zxnpg}-z^A`K)E&7GLCuj zYE9$NnmwgJv^*$bkBE*&E2WjqJO<F&DbOXUZU2 zl5Z9GC}(sWsqiy5e+?C-V5f1>`2)h%Rn(5pA^M23UFLWgoy3b`a$4B|^N7qJt*pk} zrEwv=LF25-L=iXjtAAO&^fA=#w8*wGaVgNuX+j!Q*UwhNLqM8%I)nJrba@@*rXYB8`;@($x$%bC$54#@Jo0=$oN=x3OQ5)C?s<`B0= z^s1FjH?w8hXk{JdF`CNPJP8HMOy*+9tvjG~oG?%kBxYz+eXXY^j zRiFrFCa*&-N2>_zMLc9J8k=a5RM;Zt8ob$ONp^D%-;g=BsPX4tD1{D`JDECPg=!Gz ze=GZD{;q~U(07#12==^2ZA2F(6(dK%OE#oplE?oQO-Q3!8_Df$Au;GTCDh!S1#VwO zZpT~7Ve*70F`S9hb-5RBJJdlEQ-S@onF|f(8vGa?Mz+m73dt}Js(7$b<~GEGF2ZKD zT%G7~=x@aJB828Ha)iG~KI4DC;Qkw_ZKW~Fn{qoORxaelOJOUzn4z!kIef`i_xy{m z_)BD&=Il7|>yq}Ub2k8@&gB5ylj!vTV8WkyV?g33Oe^f`%P=ln!#s~?$SeD8QqLJg zqQmfV2W3`gCoaPHuyl=@sC=@&Qsa~hb1qRKk`C_^m!ewpZ*(7nH3$@QE73u7^iSYN z!LJB*(!!KOvl|QJZlHm}{b(uD;6QD{gF|TfR9wAmNJ(Iteuhk|8|dE^Mxsw487Bqh zh=lU5y`oKDaNVf7+2EJPio+qHWvnmd_69o7S{jn3tmbY?QT#_3iJLgf;?0b|op8qQ zss@LZm?JUxKE|90EFNRr3mJ|X+?8>dnCFJ{VL&YO<$o8wo%qd=ZLn*J+aWYa9|CwV zFz{9*5^a-g&zp@%%|Hx^sa}cE`b8|DNJ5(4R$Qt*E%W!Zq7s8r>K&9lfQM#K2e?+_ zSbO-2PFb&Pc=vuzOp)%jvU}zoG2E@>n0ZCx?4Fs2I8DMoHLZzh(>2ZkGxym1TkZ3D z&7*{vYQ)VIP-DohBrL*ol&S@@F@PC~?J%FQ2k1Nb!j3o&dB;$={J%~4&&D`{nw$?J zQJzV61ly5_fj|7RdTi{M#NTwuSZS5o8Voe5D4^yANXuqg%7HPy0Bl}Ny%q3C%^_6t zhrY+)79vJ&#nhM)(_F4`;oOB9C$89gD)LE4>s`ky_C}k?b+k!dDS11sH3FeQ`{I^~}{_7$6JKFM#kpE;zex5D= zKJu>*$=^fszr(kP5*LKz?`6w>kNm?z@@@Hv19CGGyM^T2_8+wY@*^Sndn*3MgOR@s zav$vf?wbG1&d7f~B!6ekKWZfMpA5;jKL`1{h2-y~`8Q2Qek3G+d(EFX75U4c0>S<}@==fc*F*A$Tl_nb|71x1E}H+$ zBgnr#B!5@U-*_hSFG$Uow;RV69#ax;<+<>}ql;tT7anzF(N$j>S;A)?*|=>+taS(% zjKY6r$$%cAFXP8H5Y^ZzUlW(do|aB8skb(W4tIzW4tFyV}sn-mToM^jpe$r zA#QAEHO8Tu@W~{>c)<8W8>V|iEgaSjs4w?O>kr9 zy0LOMHqnh$y0Iy4tjdkmxUpI{cAXomcVjoXu?9DGryFZ@V-L8oCO7t&8=K+Ao^fNd z-PjyA*5<}uaAO@3%lNjv`c=ff#XolrJ#jb4M|@r_#Hbhx1S$Y}=1SfqjLS7_ z^5;li7~Krg-4D6Z)-42(;_6%Yy$xjoU^Yq~5Ldv*EZ|-Wc%DtZ*(M*kfz)%0O}@e= z|9cZjo@bNKw8`7??00;GvB{-2dGMB!yumcBa->cEIY*NBvdP=n%`H-Go1vbKCBvzF(-Pf)2*sRAV!@&#pYH@5pE?KO-#wvsk<7U;Xhw%d-5=E3;}K*F~fQb{9%5 z9Ej+g+-&ZzVtyC#1D4EytT;AD$b&SMmS#C=uwfxB{-<6F}$DYY$p`!U; z^tT;}M7I#Mn{qSInqe;$#X5^)*X4=_AN>f7`$o2A_)svM*ec{g8oS zi^z-O*iE@C;98tJXdCc>QAqBvwh}h0MtR|F*7<~}i&Ol^#vUTAZ#YIvwg?A)R9b~a zcyUZs2FC=gL@yMXU4_N0qYdNtsd-YvXF%voZA4 zj}0tVfT&SA2fU4dM^>6`6iC>LD|9$U@$qd%u{StIL~t1$WUKoBlH46Tlr=~{i(=pS z1alCwit08kicf+i_|Z4|nJg6_J7{eD{C7&~_Rp@fi=;2c#u8R!2~}AA(ZK3u5aNGu zBo4@bxiE6~e3+w~pD(O=1BbO?f1#Q;KOecft>*bi^cnme`w3H3;p$F|O(SJ4X@FhH zD6W%P67nV#IMf)lO1lE6I6kcj03gXxqe*Me$0^q+-7! zv#w>M@fU1K_4ECUs`uIZmThTqR{uF8BmZUCRV)S1#bj1pkk!w!IjOE|N2bl$0ZLkR zMO#Ls=F|byxQDt>&vQr6Qp88>SHl^t3%&J>7rbptzS4dEVN0<^hXIa{xVd(4hADvU z1TBTdydw--&dH;`WstLXz` zT_+TtSa?$5$%Ut!v_RJz_=pX5r5NclQKi(Wc5k$Q*rItf^u@7TNM&X(u`}(|M%7u- z0Z6=S-L_z-<5LT~ldTXVu}RCMOu5Ih|CmNGd0=&S0jQuc6To5& zaqMMhe)eh0VIaaWAv-0Iwg`Trs=3z8=1aCSC6v(_8q=Ze?Uc}H1=vm;LEGil^e)zO z?vyZ>auJCxLYi+%n8gMb$2vJBkfazwQs_#_bVAIQmXB3?5{cd;5MnRJqUCf@>GH>8 zlVRO6q`pZ?Jb~SG=7d>lY8Lv~@-2)8N|BJd<;)2%M-*Li>{545z;r9p9P8+APS~9l z*Uc67@s#=K&!&yIoLv+P_ zu2s`5({i$cO30Ze8jp}h8xYN@#2_UoGm_gjAmCgjOKc;B#q`W zka&#dm|>qLUV@3h&Z`l6@hsLy8P9E{wLEYI$}am1E<4nY4iLEwf2uh?5N$9|y_`h8 z*^dk}Lop~+tE>W)xCT`LErWeIR%_W8wS-LopTokj#zWm2-?|z#?uQx^yTPph{5Ifw zW=2=2w}xn-V`4C2$w+3d`Pea-a#%`NA|8Q`MAsGFpIYuSm?UOT=*I}zqr5EAuLe_T z0tR!PQT?yXH1B@+%j(|lQ`V_jgc+*ImIyVO!%=HdJoB0J(wof6Ps2^-SCDQLJ#I=b zOy(SFtTmWphP?-I9UHq-&8ByvsTZ2Y!?1)Tg z@Ihy#SN_Re;mZFB8iJxXU98GaH$>X?($k>T_0nRHaUCS&fq}iorRuU&qz;bYiJ+p- zfer!~6514cjj&s%L19Usso2hv(?vkv1B(TB_CLZOM#xp9fd_!J_@6@elBJ`EK&%t0 zyve#1JkE#Pg-rbeaB9K(FgA9bxD_Z^$?c4c=NT50eiLp#=v06~*e}?|5ugJr;H@DD zT{SG>&Qcp7o9+h)@i5GEa6eeKc-;@5{skCG_X7pq>kWEVvK_NoMSG$GkEKcP+z&H) zFq@A1VKi!T&E_m>UL?8$(tKuf8aq>D-4D`MkNaT-oRW?qbleYbiBd)O6erUqa&g6EO5g9J zZF&owud7kMF1eYPeKy*htE3WwdkX80&zl_4TVX&A|4@Qul_@>>9`%XUPSM-vku6h6SLTugEX+Tc!d&-N>NjCl4BK-=y0U@t)Dbdt)H~QJIVdJVXF0eO*rXTKeoRc>vyvf zn;s{*DiX%#k}zzNI|p~g^l1UU3;S6&S2{;K_%nVypI8?^?S>`Fkhvyxsr^36&F zyseKOwR+z&K+BFXXl=>C4b#w(qzaR!88V8Jm_F-xy6?)`E~vi z0sA8xrrp`9!#HF?Y_K=?nS_lA;_k^w;WG&dxbE%SDlVS5REVjR_SPgU@1;q&7&RqL z0wp{WJr-$xlOPRLlOVd{H3?f2K|Pt!uM9CNt0F{jOaj~AjY*hiiF;fSJxapZ+$_5w za$f&9d-8E_^wA_t;DIkJ!N`4@gv5t12`dqx=><#-Sq80*HemP7`50#!NM*`jb}T}f zYIn*i=taEd24n88QC4sIDTZA{MMPM@b-6Y4W0iPbt0a5am z04aU11a6RQXKF1_J(8jAtsZ4_%+RcbReZs@%D$sZ?lGJiu42m$afj~8Ky{&!+Q@0djYI0Y#XV}4)_wb z9{sCAi4-l`Si3jFt3?M^;pks2s8jUk8{k{N`gjn8DbJoD>67w2Y72xqt|lO}D4sd# z$n?sy{Y&A>v$eFcDQ zPyVqui6YTYkj#^R1S7A~y;8moboRubuKcHV%4o;N*dlYY~=!ao8`|iyMI{ zYmxVKEpn%nuto&u5|E7r$OI@CmOog7aQP$L@UD^y3F59rNbFmSY%1A~4J}g}Dq7uJ z8=B*n%@iBzydNk)O|A`{#VR7vT%`GI=rn1du0^C3-nGcr{aq94u0<9h4R$CC3lrEp zwjbsq$M2|ck0%~4)lHq?HKC#J2ZoXmM^OSM)LDzX3Qg+M_GBB%8jhln`!t~^y#o_^ z4uY}qCmNBcKDg18zW9hkPvWc9WhJr4=?gR0zrZ)D%gmX0svGXSQ{jdi_XrPGEHec# ztM&pCJ8!L8mALlT!(6UN;+{44s5S**ZPlX!b!K(>9S7 zuQa=`@DK!Z;f6*1OUzPKOkeayg3@kYd0tA>%$s?LyM)h!&^*pu)b*fJ30H3O#Q!Dr zyl!5U=R2kqPu5bQG{S{PY}%=ww4)I>r5>OI7)M1@2vpa~Im+abXz#hn_6^nolDUzm z@IVsokCJ8|23aUl5^Fbk#KP0e7YJ$0mPlg*-(niru)h4=ZU&IHu2nj?3XFB)kAaR@ z58!F;kwWd}R$KU2mh8d`A3;W#AKG(I+we0h!Hl_QI@R@Wc<3`r6Gwc$)pCP`lZS zPfNV35G*m)=?ufH9U(6%@J7yax_qy4_J~xyn?k`6^ zJGmDBwv6}zykyxYY}wDrTPv0CYFq~BHBbC%(3*IQ5K87#p3E}@=yh`viN-NFk}0;j zgAiwR`)Qi!;vO28F7Mz;|BEL*f;gLg5Nm?+?1v|DRzd;H5y}?<` zn?WaoR}eVOn1?v|_fJg|)=k&Aj05%EJo>D5=0170n`@OoWNxku$P#l2)6ij&A1lkJ zvG92)#17o-8PQ?|kQ(;UxbS|M#s!%2!2Pm^O?Qe?%TKtZMOa8KOK(Mr4U zH18oG!khN_ihVAy&ldY^=F<+(!-%u?d+;=m&ep!&$0xp&&52{G5=$2*X^^~#k#vpVukz#`E)nN67F&?G%9edjpP-`8G}(@NPnkBCCdWtPdC!!Xt8D@fJuOKb zj~6C!vTZ+S3$y_pZ%h()c`E$W625Ocdbn-$<@R~CK2cehE-)?g$~+jG%RIlpzBf%= zsDCEja{6zf@3HA8Q#JSkv@>y=TQUc$*u)O9CR&i#%svNeHY#uPRbJ|;JYs9vM9YA- z(MMb0q5X@+z0y9X+vj9FBZI53Ld$S|?^}3uN&NBUXq5igHw&xcqPooSIJ|+yDc>F( zP?t4^KlIH|kMRqK4TdNE%i{Ji8T^Saeo=k(5%}qEY)qEC+>K}6kFhF_-_^xMdu&X$ zIAY;=;SYToAAmT2zRNg&MB&8O_r$Y3@r^z4L7sRn;@{vOaq1$Ng8Pg3$E7=GBY3mv`jmI-*vv9P)fVL$7t zaQATk6^hIgj>QQ{T=T=%A+c)GTS6b45ieXzrNEJPtsva-3Rj|VRSHM?Ft~pzT&=*7 z78j8E-+!&GaQi4+y~2?`4DNJ=YY;fn4hX_66pGAU6|Pa?NFN3lQMe|7BP}k%@waY@ z!VOlq845@GFt}Ot6~27RC=pzhzN8Gh5->6pr*^aHlC;hrp3`ogmy>LXp`| z;W`zL^kHyYDcoX#Bkj6DxZ4!&n`1;!x)hG|VQ|mEmxj(7fg|nO_y}TIlGqi$eHHb6 zqE>&IHSDF57+y73vmm~})rp;HNxWq_9=z_ObZl%@NxZI$o7nh`%Y|m2kFLsvk0QF5 z`)};kOJZ@pgEP-{iZ6`GC3TYYteiHC?=yo1peV)s0bcA^*oa@B?&o-o3Kw1_j#%d| z`I7iPL(bS1P7-@B%qqBOf5Klh6szzLQQ8R`rI}!Kd|_-F&4j6$2_qYC&J$ZGzu9ZQ zn)tMF?N<{-W5m!HAv8t+#!dkv^95j4;kbp>#2Uc%yF45$%+6eVw2VxFkXG9gBuuLC zz$-INp){$5&e?_4B+d+u^e5_R>`=T?#S>?)3alS*#WU<;%Q(R!af(ftrg*azuT1es zpCUPI-@S@Q&j(Bv!W5%0Qt_G;uUzrOQxev`!xgVm@JPINu)m+er3bxhP`oO|BYkN5 z`YB$m;E_1REPShYwTf4-c%%=-d#YLZ*C2Q#PO%GjD_*7IH7XwIL-9^kye7dTaf)Fm zR=f#{H$(AAABwku;>{L35~o;(@8C@Y|Hdg^o8pl^6mS01(%%ljBk_8c%$4VRfL9W) zZWHCNn<3g>M<*33*#nh~jo~)xY629;UM}IQ1HHtl7+yzv|4~2KhfdZ|MQy`h`y79{ zn2AoMX=6#mAL+ zY97W6`ypv4l13tFBig4rCZc^R^_#{*ZP290`{Dk!r^H-jb3qwz86@jUu^I=&o@Phz zpv}O81_KYG;X$bJ?N@U(l94ga^7HtG)dcG2@^$}pWSsJj5<8(Q+iI;O@cM#!xVf}N zZsb~6P2v>2{whoGauhF5T3bCt9wB%yVa}r^Lq3cc!W6x3P`qr#D^NVrhsx1qidQ6f zBu<&_M=D;1;*}~M=|k~`D&9E3BXNpd{B)r7clnXRzcR%meJI|4o|OLb-74%D!W6r6 zzv6W%Ub*6tJ`^vec$I=j;uJe^sN!`B-ehdCs!1FQHcG%I*Df4gm0}*ghW{0s_L9QX z7sW=WzTcvNta^eSoZPT*bbX3Rcu+B3R19o-MmL1E;~B+hS~$8f#SD}y#(c$?0gR?l zjB>@8y>Rpl=WY#G2yyg9ZhBEj*4(g+dsO3{b8cnU!)oiT>*E>4!zEvp|Gfkq}}_hV@s7!Y_VIDAxzg z!fMhxBb*ld7{$&OY?e!lJw>rM7HrZx%8(ZOr-!69g9Mx9)=Jmz9L3&Juu1P|NLuW> z6+1_;SuQR13)7_)xq?l4$7rR+-e0kY2sX><8^RENPf+Zg1)KDa0ZWVh`GeA$Ji%tU zwAkw__He-_y)zl4#lBXt^97sb(qhkeR9Z1guu1Ps3?bMN{PJ`?vn#O2%e{5b?K0Kv zQYtle5bf0jlFH(B1xSR7kL9oAU*Ylq%Yp)A7sa=_c496Ld@q$7Ah8gT`91cbvwSFt z-6cx{yHY5GRzOYq;T}_28Vq~M?6pDvjFLK`DvEueYX!SVc=7?foQrQ|F3!!c1+hSo zC5O9Ez>WCSM6O&sTMN&=!W{g_FRL+=e|Wst7s`A>%H`!Uy$6ijdmpOjPqCLi#Lrmlx$ePiL+q^>3}UbmgFiD^ zhrs{_tD6w~gkXH+g3SA{y`;N^Z}$o#8`R{Mp(2T}c8PFxB9R;?lIKJUoJgq?ITw*) zy!nNLW+AS)JP9+H*%fxd3}ukZU@(I%7;M5Io56Yr=FzKT?nO-B9Gb)z_&|q8#!i)X$yD(LT8 zbmISMe^)iw*Nwkx{T}^YbJU~s%E_{bE2l9?@9%0?7f>ar7FLtiF?;R;W2C~(QMf9F zBYn!02yfgi3Rf#|q;)Kv3-_72fF4)4dW9o>7~K8}*C24DbtV%R?m>mSTj3fNj`U%0 zU#klUdX3JKHf3Xbg2G*|a5EH+^kHx}E8J{>Bds$?YCEwhBEzs=uu zVYzH%gZ{3UThsZw3RnPZyQIJC&yC&ryYi3t?f$NtV9VCHzw1}^{}kbr3Kq5n*yclp z^W_n;EIM48)SbWUdBw|Dyi&z0N{81^@x~#4VKs?UY{GepH$?Hu6p!>NlEZD(Q}@d9 zXM*67IK?OoQoKQmSFU)Z55+rG@hSz6#3}pE1?vCFQoJg~BYh~|28vfJcqC3S3yTzQ z6=oicN4?^aJ``_0Obf^w1dqficHwfxGm6)!c%%=-yHN3(1dqfihGB@}Empi4ibwiT zysZ^)w&0OC#WK99{;m$iYg0VZhvL0)kMy@g@JQ@zgIvRPMK}Jg`%%en@^^iKn}CZ# zcdR~t*B)DgJnW*VgLqYCJ*YGDmzN>HS?pr7hk0 zyIxg)QoZ8kDc%rygxH<`+$H0lkNhwm@?ZBQoM4- zD^)zwhsxzh#TzGhBzCrou3Roqe^Qy^(S?QfkvHS?V+(9Ny z4)u2(rWnH%BfYPH>?dxNp5_QP%caGBt3fFH;O{y@v4;qDxBjk5#irkC zVKwO;1D3YkKm0>llPB0Lm)0k>rD6{kY|=Z6ytLT2D|WtMvs_y2wp*nYqXe7u&Vnxl zTm4=80{i#+yS6y}_xZaH{*@n*6n~d`q3~|Z`n%@c(C>FRx=y_Xu9Ghjj5~e}xU-HV z`k@Sp863c1KL&d-7{MTq!A=P1&U*8JHSf+k1*&O|XK*xwQU-@GIFLahgHZ_7owW!t z$DOs_*KlWT&HODHY{pDEx;YSX~=+Mo5c!eOJv z&calut{~C6Ui-7==|*^l!ZoFV>$N{?hQhTe-0U=Pz4mAI)1l~8xQ;Y%z4m8)qrB@< zxW#GU`s&YmpQyjhpH)~U8`Ypc>!(N4`LnWFz~|4}7oR#?DDN}zf+gkr)`b0jyFcrN z>p54i;qzP8pSAcDQIZ1nC*-H|CtM{>>V-e6L-9%#uP7bfFvS~({O;E_0; zKdV;p>J^Xlp?E9e(%%NbBeAnIcl&GoS(S>{sCc9g#k*JWngox;&T`zvv;M5C<3(O( zC?4rU@eWtK*@8#nbpEW$;{>lw@kk$v*H7^}1dqgh@@F+;I{!`ntmp9Mt6uoCPH*dW zFFKJZYs8=R+%?#R-drc>U@k%@p|@Wl_?%wR9t$=Bd}JY zpZ}$p^tWe!R;l1k?$)0*MZmiCXB8_! zMxXsze82L?__KaJ#^=wPCpWRBJpTYbv@AhYvUj<8Y$Tei_8t`X*bCrl;78ZUz z__GE{6I%M<&$>mivjw|bf7YC-(u$2iiBqZwok=sq2crJ0y%c+pV6$9WpHrD)_rag_ z0rokt*ExdStv_pkV)wzHb+uv-5$taLSxr%CMIZcG+bedSV0Y`!DpBm=poH;8*rPw| zz{S7b}@WR3W< zs)>FDgUc99WN-n4vl*Phpp3x@2T; z!~U#KAa{R+KWituIfUW)Bm7xg+P$H|RmvlrZxTsBl=nq;8b}*K2>)uX@I) zQQ;cW!1danwM^k=C|pw-xL*6Sx)iQW;by0S>$N}Y4TbAexQ;Y%z4m84uW(%ow>S-4 zU;SB+5%stEvsN4`8`YpcYsZFk{;W0@@cFaOpVXZ{t6}Hg?$7#e3g_zoKmM#HS@`#O zKIptZpH-`PWr|1oP=Ch;iZ?;rjR`m=J0vPS$_UrfX<^hL~+ zFc)jqpOvBK=z8*J#iT90@MkTT12io?`LjkUUeErlF2(D~pOv^&)D?*L-Bg@XZ?7I+A;A5k~qaq*z;M9g4ex2>oEcA)}M8o!ld_S zO;C*P{8=|EMtXl%u3~g|KI;a>Nbk?;svu(~_t~FS3E)4*pY_BTpFeBa^WEBzDo}T0 zLw>hEtMPnTlF1Xvlr`Yb+U5cg!z?WPdhllzNfY|w&suoCU}p<qXe7u&Vnz5 zCwe~XZeagjf7b1Vzt5lb=;E~gtik909)H&Sa(G2bE?h(YtY)G=!Qc@F4=}ig!R-ug zW>C+d4gvjHdkkLl{;YXWAM+f8nG9wyn9krK2KO?!6M_1(cBlRE`m?TL{uBn6GPsDr zc?>2nIGw@C431-P6obPU9L!)0g8~M7GuV^CFa|p_*nz<|47Or02tl|0tTRb<5k_wA zj_S|4rHKBl^0RyLXRWM^%HwzVvo2S-dWEY^1J`SR*7*w8sBjHw;Ck)PIz{1TC|pw-xL*6SN)@h6;by0S z>$N|tP~kcit|JXxul-rOD_obtElva1SAW))ME!04tb6y7jcU-Jb$m3PKdYPteEzJv z&+N{h)d6ec`4K!$7khpOk7#%IYvsb0{r~c34XO|oE>M3$emZ}`M`y`Gqe%P-BzBIa zyZ#gVd&4ZnD^)zwhx#XOP`q)1N8)t;tW^`GePxPA`cS+=#hV~_BzBfJZu{){ETeek zibwiTycH9qzm(834c#Voj`cS-niq|A~BzBhLF8}QLtR}^qp?IVZ#e3>Z;ooe*BXK%^R)gZTDIV!V z@lI8|4#6XFpZr-Dqmtj`&)Q<7n2DbJSqpCOb}#x?ez$wk-*Laz7N=ns`u!PVF0y$> zXN{fDD!*9tuqS`kv(lDc__N9sFHa5@r9Yo_mg4p7&ni{Cp8Q#xD_+n3tOCU=)%KA- zR4!loyU2OZ{;WL3>&c&Wnd0^A&&p9ex~RH4pEXqRDnW+39uTJ33G2_w7QD&bp3i#! zbau8|f7Ut*lir`zpcvixvoaMUy+7+H#puqT)lV_f`?InYqtE^D{;b;-dyrsv>(6Q{lUDS>pS7=I=LmMU{;a<%HvLk)^k;p1ytHPBV6$9WzfhK9 z(=WBKcfU8Bs@Qpg&2nk6A3H@_(FcFlj*6Wx*xmZGN)@{g{;b!I`~Ci`gY$l$KkMBW z)B3XxJMQ=RvsRo0ugH{>*N{K!6QX~>;6DuBV(>bHml?dspp8Kb0{XK~Uw_T}vz9@9 z%ohwkW6;ImJqG_~@CJid5vV`wG}<4pKkF&xKgQr;2KOEPpO`81^f}vO=@MN<&SBJlZWZEHx@E z%nt0tgF-D`Ly~RV!VaSwI}Gi@tfVx=6D&)#Ln=$`u%XO5rLz2gKi}^xvb*!l?6PdX z*FRq`KeIFQo%hT<^USl)_cPB9kjo)MAa)W{KkErZy+Bv4EK&Td`;*~k6)5E@(gNZ^0D_u26vs+RwT(pO3CYaK#E-(0*2i;K~J8s=x*9XAKivmEbBBxS;*4 ziv(9AxM~G1R6na5sEzZphIQja)!)y0V1mleipK^%e%2?$&HSu7YHb?pXAP&e?4R

|nTKGN zpH-(fS;9lSA+MA0a+sGzs`9fmz0DFH;thElM)3LOGtWt?^0TUiS0FsZ8}eoduaJ2N z4#-n=KdVxBMZ!b8A+NjeikXLCm7i5Ayb|Fd-jKI-IG=AR^AN1^vxr&v`g)0Jt;hEKPy9cR(_U4c-DSan()%(_>_Lu@5A_f zt^KTI;aT}v^Mz;aXC(>`7L}Qwb(Qd}{j7NAO*Qqi+Az%2&*~r;wVzd)h5|EwU%dYQ ztmA~C_OoUR!_3cWCk(Zpl`M>q{j9o~P332`J=Mq0y6jogHl*eh(>5gR@mWz0YDvBx zii;BgKkGgR-zG8C@w4!=iun*i@v}}Bb}X|^{j5}BA4`!zj?eng&c_|cY;30-L$zxN zM~2{M-68CFW}Et1j|n>jKkH~=CotR8&q@+@2!7VP7xS4WGTYS8+MLRf=Tl^m+iwg`&oZ<4A0Lxa*@){nm#xjKdaNF)=nO+g>{XavUTUaunnUNHfSmx(me5dW2?u{H(3C zO4|(C1lb7r7*YvY2`PiT2YDOvCS(ERWk?a^X~-PNY{-L zXTA0$N)?aiHo6yf1Do0l&M^~sgx}g26{(>tJT(JTdw4aqIxN^ajDsVyj zS)Bw|CAdljE@(gNXu;J8u3CW$)z3OuNEf(qepa*pa-!<*XT5)&%Fn7g0sYB_?}Pt4 z#mvu2*mp{!{j8bPmi<$HR^1bPlO&6ukfibxQuvU9@Ut}Gr3ufj;_V*5U7d7VZ|-MR z3ok=>h&S{*<_a&9c?eedS(U=e5+33WdBcR4!#o75{H#*pIfaLKLtZoCu06OSG|>=)q#&Ci1As;!n5+T*7oP?ZtZ6!3eU>Vnk+nPKPz5%R(@6&;aU4xvBIou#R!e%6HyGxf7-1*7(}>iXgYP5rDNg`xJd-WG}~@@mXc_iUlqu|P%tpL-;Jv}`_T-&n+sJ34v?`I=*iOlABW(Cm zLHbz_2s??{*iOkV>BUD8f}eFZ@xuMR;e!8!=V$dUQuKWhN! z{UE&{=R?kcbcLJ-iHH1`1b)`aZISo0E~DFCy96=_k__ns=?Uo$=|&=c)(ZOM<7c(Q z`ZkbOkmiuT=%&%?AbTM@AhnPmAX^|`K|Y66K|X@4fh>nCg)D}=1$iA(Ok(P19Y9ob zs?BA+#m_qZeE3;W=UVw$b81w6*1nJY{j75r{Hy(}7pdMgg`ainYn%*cQNz;|e%2_# zIR%%)lQ8_OA%ZIqT)qMq^!TiPf-4eSp#m4QpLLGlN(5J|zy?eEh7l&oT3}(ziC= z&w9H%zN`P4_h)I;(oyduS^R_~m7g%44=D>6F$z!n3P*orIT8>&^YFO5tS)5AlY6 z$A(1i-efWl!74wiRCrm!L%bnxhVXKjhhUYTRV+NG@DOjv>n^-}<{?<+XB7&sKzN8Z z;LvTob)&SZ`qx`I{(cDZ}`B_(7Z<>q#(9$#)4deHQ3%XDins*jA7m@X| zlIL?fVdZBz_*jDQvl4}8)_zvJ@T~l-`NFgIvtos35A zQl@|3aDy<^epZn%LiV#t3EVV()*Vqke%70Jo3QwFEwjV<{%c@mXDk9mi~JryQS^ChQRWtWQqi zqlss>sh_p)42}%J&zc}?IwmS_Q$MRv*wpM$nIPWM&uTAh>d+7y+bKP#3xy3|Do8); z{Z4$QNzBG}N_NfZ92tV2buF>O^|P)%cxsem`7iWW&Uds(b;+M5bZtplbAOcc(DY`h zF8<4f-ZyQgtjXoY!|ZPUGY{R@YPWcle%6-waQv(pr;#hthRP%&e%3_L?|_Vh+yJ=_ zay4WGBn@&Y3H+?RpGMx#nohT-HU*Lm$%5Ppxd}1`GKxg}tUdI}$InW^`U@c!K+c7n z1xbLM3h4;x067+7gB%HohD1UBq+3DT57`ab4*3c49poEGHHoR8bp}A^(v|a$&uX6t zKP&NMD?e+^$0|Ro^Lze&)^|())qd8eRPUO?&-!z@bgHP~X$n7UyWpIH%i&2Fe%3dF zD-c}10vEKO^@-q$1XrlQ1?^`o6I_YliWRt^{j3tfl?$#^feYHtnk%>}!Br}7LHk*c z2(CtO)e2mwe%3vpHg0^@n7UJ==nt3q`&lRSR{2?p*uclny7ojfKP&%}#`{@6bi{Y{ zKjUZRl=Cf|EPg_g%1@ZfhZKaLl_|V5;n`KZVZuwN_2zz7y6`fDhj>H3qnYqBnTKGN zpJf+bmhcd7$Xj?4cb9UQhhUYTl_Wf;@DOjv8za1Y<{?<+XC(-)KzN8ZqcRy{j6h!VKzQ%j4;%GR@Lz+ zn9$z`Zzpim_*oryo*Ff*`&zq;lU;4g4W@0#t{tXrNZ8}E3Sy}xxwSnCB?5ld(e1bz z#!$!4!p}8$9BKLkH(u&@)EZR%&;ChYSmGDtt`=eB$_ zNzBG}N26-6r03;9MgiIxYpLOc;$opAu(ygg2fV>PTf;tiKI%Gvs>6Xh=Hb3dm5112PcOAJQAr1Cj_i6LLDF6XZn5@sOh-|AEAinEF{) zBIjhULxMBq^Xg{l5aOHw4Rp5g5vla@jN^q45T+n{j3xcZ=T(trhs-N{B zsEzZp+Wo|ds=uFg$r&m?t7aSelMR1w_)u#zKdW>}4F%awc#l4 zXxQ0LNGA0@#;bo{xc)x)9O0!=2sH_aH}ofF2rr#^2o8A9j{f%zGliESJj5IFx(hFp zc?kA)Z|aXve;<6h@Un!5cthUSBl&!Dn1^7MpJf-GQ+S9skv2uei!tRq`e7W!i=ZZ0D0 zXVt9dcEZZfn$O1)gr8L?jB^0QhC&)Ux_5uTNw_1+PD-&p%uMZ&Z4 zv&IR}+RrKwo|T`~L3lZqewLGYQ%(J>bY%^dQK(H zxgiL_&pKP!@ys^$vxW;h1V3xzK`xU7W}Et1b$fXKkU| zNBau$Iiw2m5o8TyIbAWuLZfp{SKkX*<#$YjVQ$eoZ|AU8rX zAlE=fLWV(7As0g~g7k$Xk(l~fvk>(NUAeNJ@_WOPr@_xE`2C=TpLKe%%FmklsK1|e z(YOC~kR@wfjs5MTED5me78Txbt8k<3kE!e3o5!Y2r`VRsO`nI__|#vp<1gm7kR)ybR$X-q7zDBfL!JAz0;S zB?vD|c!)RTwG&nN2weZS?hj>HY z4B=HW55Xb%S&L{VjT)cTdjmHUR({s^T}*S)eO0EpXc)gY?7f?^(BppN<|49wR>@|r zM^=8;m;3m-C$N8l;DGyDe;<62@T~kSm+-9ptODU#`B}Y$XYFS>g=ghw)$Zl1lD!chBJlZ0XB zXQc>3?PnzlBV<1-fxu1UXH8q@<7f5nZ`y{WQFJ8ztX)4-OY+e!T;K@!SrdNY+a!iM zeinXKAs<30epY*7$1>a0&$>|9A^2JE|IEi7$81wSt7a!hhTvyiE9`h?oBCNZg&l&Q z)m+#K%r^D25`-OspEa+R&oq(QrheAC9UK{gpLL0_lbCJlXJrXH1V3ws7QUZ#OJ#U| z)*r)_e%75@IDS^UU&$4Dh9n}tH*5j=-`h!khwOvwg!~No5mE#Bngo7U?$eR?vut$R zYezz&AyJS&soHD%A-f^lNyN{(kJkG5SsSo^9i#%X0`dW*6tW2N24p_uCCKxTry!3* z3LtLC{g4@udmwj1CO~e7WRjTrS&I?1f(p3oT=BC8$HUJm-)7}!jeK0?XT9z8_p|o= z_^Q~6oR*ucln+VQ=apA|Q!@qX4MYRmp9KTF%jw{Wuf2}vqHp*tT^5Pnv* z@X~~5SMj#~$X%UuT5tY)!%E?02oLdwe#dO#Wik)JDnF}Kcv-?jydf_|csa~Nu*%OW z7M@dhh&SZz{(;XopLqyY`B{a+D-a&y4S92gSI9gBtNg5d;S~uF@rJx%!YgJTf>nN2 zj_^u^hj>F?GvSpo55X!wD^qyo!b7|vZ{hb`ewEBaa7cdE0NP2T{H&jsaWi4%XB~6A zX)gK_C=nT-b<4Mug%0|Tn~TW$S;^ERBH!D}&uY!b62$ndMB!QaS?_J->u&95#S72M z&l)E@Ydhv16J!t- zxd8ECFpoQ#7Sb&~#Py-W^^s%w&n@hpN0Vu_-E-0ji^KD?x?ZI8v!4Gv96u}ND{@8F zkwnDLIuG=-AzdJyAtyslfE)*jg&ai!KWkBbr3xCq(U`f-CDLepb&D;Aidn)XLAg|2~zU^~3G{e%6ZJ|7t($Q7XQs@Uy<&E1fFb z?9{2^iC?Gc2-`yyHu}A%XGspNrgu1y20XLv6^n@Ib%JvW4)FoG4dLbpE}wCT4M<&o zm_Q-{wY5zeGlKl$>Jv@sr-a7d`Lm~S$5&23D2(LwG&=C ztvC0xl7yEbJj4gw#K_y0Z{)61Ci4&+&?xA?H%t&-mhcd7$Qv)b9OfZdm!YdFS;thElHt_itG7rHjKdX+u$*A!y5+33Wc{7Ap%sd3E{47m) zCBj3zA+NjeN|}dXm7i5DymH|o-jKJoiqE%_c?b^4&ssz~X_TM!)EnGPSov84TAJpf z!$FCNpY_y7l!a!m=jI}^epbmYu18jWRxdu5ApEQ%;aT}vwV&{HxAwCNglFYvJt;hE zKg%gRD?iI2JZnEIOL$g(*6$zl`C9u~8N$o5@U!L%&)UyQW8PF#KWh}jO#Li}VAOtA zoG?tsX9+{?XMMH~g=G3Z_!MEN{j36EgzRS}5V&dltZ(Q0_*u_)Fl|FhDB7eA3ER&~ zTTLy=nIEBGBj9IkT+P)whB|&0epcOw90^}4$iHuRm#|}*ZR%$|BkU0TtP_MC$81wS zD_Pj^rGk8aR@o{((|BfMJLPW~e^|?rA^2I>3p;_?rhZnwutV^(ju3VtvrYZ1Zo&@1 z&swmO&oqhIrhZmcB}c-BT9b=-Z->v|x1;j@tf9mX*U$R!C%7WP6)JE+`&s7*u0(Lf3S7{B zR=nWK1y`!T1?^|G6I_+xDiye({jBDKs}Wqa0vD>Ewf$GBevR|9Hq7Nj)!)xLaG&*RsQs++_fRmQzYo5Yz)j<4Jyhu9 zXRSZN)X&PGXp=T1Y(LBS7PTbTl%h}~;Ab^o#MLl{I(`;@RstVFDC4u{y~XTUW}Et1 z>lSll2!7Tj!j5CMsh^c4>=68{9Siwr;+bvgXB{i-5d5r1gq^@_Q$MTpT|SBs{H&hB zPGq*JpEXL@A^2HemGIFdG27J7iV}7Re%3T%hwEoec`Q6X>wy!Me%7Hk!|}7`zD=%3 zw|64sXFUq~Ll75aCS*Ef3M3nn1-X?3e%3K#BkyNDPq(J_6y$M80mKctA2I`S4~h6$ zN2C2|`2MUhSU(DKCFF9*5QrTz0MZZA3vxc>97tElX^?oxeVBhS=&ab{H%d?e?RNK7V%LI*Ps~3^6fO*F#1L6 zH8?!a*0hMi(%6)WnVs>ToZa*fpIg%BW_z0L!RMF;pW8I}9NXaYu?;@QHTc|_KJTG_ z;3U!Ks3`e-VT0w#{F$mg#SCu{lina^RD+mF^m#A+Lwt_@S=&qXt||PisjcIi($Bh0 za87E?$R5k#Nf>_CXu%Z-E?ct5KpwPpX5pHfRen~n@Ct;7ctc)y;T19u!74wiP#Kae#mqyn%FoIdUWxD!Z^)Z1 zyi(>NSmkHs2(Mgth&SY=2(OZP2oA~5x`KAnC_n2DH#ZYje%6cMn)+F9gAx%x>yMWy z3;ldPHy4rhv(k>>cEZZfdXkSN2tO-XcvgOvLwMGHR-*8%{H))Lr5&RV-I`nkx74fK zVc$C@3S-6Ng=ghw%@>}vpA{=SD?jTh;aU4xQNqiz@UvPA&)U!06^-j_?q@B11%+qo zXVnTu?Pui(!_3e6Q5b4J>wICD`B__qq4u+O&EsNm^B&pNc%v<-=+ zXp=T1Y(J~yd1^`AFQZT*;Afp%#J61xb^I*+taLtvQ2eYfo@aI}vrYZ1gD-Jp2!7TS zVaGAs)Xypsb_jk}M`6b^+tkk*DC`jYtQF7knIp@|M z;Ab5v>?CHJ`dNv>4#CfQ`bCV`(dAT9EK)_S^qv=1SxA?1)Ikar;qA+M2$pLG%1pN4+cY^;9} zG7EAar&PQm5y zBn&_6cEJ@0E?%T&R9l8&DhPXLX;>iK@S!b<4XdKPw6w`1o0G6q@;2)zsQF_I>c@ zQ(N{=`B^dN@GYDyenOJUPuTqwcYEyYCm>kmXVsm}yfon<-q4?zE4*~(Az0;SX~N48 z9^wso!-SW~JOr!!tZLz92@mmxyk^47VIG22epaRMoWeuAA#dT6Tz>h?L$J!vDivOV z@DOjv8za0z<{?<+XB7*tNO*`h0VdZCi`jKfadIKmC@w2)rofsS&z?36rPoz z_2uJyzSe$Lyzs32ESK=C{j6BwS@~JLglFw%MKRCZ&)WVNPS(`V`n?;f$yBwURV)lM zKkGMPsQs)Pgkk1q?G}dG&x#jD$bMEeft$w9+L7(!XPvXvv<)ezXp=T1Y(J~!L25~s zJc0`x0YB?{58oy+)bX?Mv-0^6Lh-YX5OyrHP5rEH!VbaDTJRtrcO0`#{j93l92tV2 zHB{K~%r^D2CJ8$PKWmShk0ycHrhZnOutV^(<_J5H*`|Kh(gHq;5d5s(!cJngsh^c0 z>=68{ZyyNX&-!Y7cz)K;`KV5&6C0 z-iL_315yk50kQ@1736bB733ol_*oPCM&8dlM7NLj2jn-%F32yCZIG>y%_QPyO+Xvb z(9c?q^-Cd(A#XumhZI9zgggg%67m@2VaNlJJctuA6>=A3JmfaW&5-LMqao=erhe8d zh|#{;X#3vr6)<{H(zj1o&C4j*h~!xR*}x_p_S!4Dhp3Jw4h?pxR91v*^#4 zb-D1AD1F2hMn3(&9<4Qq3NzJpK)V+g-QD-Oo+zh3QeFGCuc>)y3g7F|3%E0!Ma@)G z_+B#w=M-EHPr~rMvISQlxO@dJXy5B5!4(OvP=O2D_qtMWC4wtf;DYwO?1C#7T&V&V zwC~kRa8-h%T&TX+v7oAaFLw>S{tLT7T#;ZD-s^!4SB~2ub6oV4)DG@3-1}>l?V^5ze7>d3 zLvVoiwf;yhzXyd^E?9N=@@tEz>k_4dZVip6#TJwe?JHE+XW8 zoz$AI(+%hIeQo7^eVohJ-NgH>>MJ}G@2iXOS~Abv``R@FCu{6| ztv!NI^RM$zO-#J6*Mwo{eL02kyD-$=SEeuwy{}7!@tZKz-dAT~77!vMMu*6`e8D)B+I7b0!P66x@ijECNb0pv+%wi z`#MtCvCKC0z7mBUg7@{tWIpaVW}A9npH1V)5WKI;g&ogqQ|~KB*dcgd`*Zkc z5}0l3eVr)m5WKIagq_H2Q}3(%UOtKtysv)3PGYvH_jQA?gYv$Pnz?aOEA6|x$(Si< z{CnfC&j`==+Vj2A_qzG+aD1=U_mF$?7)eBYuRo^}eLrM3WIN<1$aj!$Ak~mhN#J`; z?HYOCs}bQ5WHkiC!{kXpzOB;tEbLHnTlUf=&piQZbQUjr~kM#=`Qxm>l zYqQ{-)Pgm|?-4&1T!G;7c@k)g1?^*%39d+Rg$i8IKGvIpD-m3=0vEK8RV27_!Idg- zLHk&<1y?1wN(C-xAL~BB)d;RyfeY2gx(!s7kF^Be75i92Ty@kHg{^hi%33&zlT-ik zTMG)<$6{;C_kHnBY~V9~YsUn$@mq1HHGcfoBx=hd?qU5co=#?WzJ-&;Lr7A22;KRR zf_T4Hy71D3XIJsIPU7xOI;}T>c$v&YaDa!Uzh5g!cv-?jydf_| zcsa~NaDa!kQaqgm;W>qecthUqiG05K%tLU1hqY07al$JQ9^wsobA?yPJOl@LSoaDq zMtDWSL%bnxnDC03hu{DYD?xa5G7_^yc!)RTH4|Pb^AH^1VSOqdmL|M%;UV6Tw{QZN zUnTPp9N=M1Gx4xi(oP!XVRcRCX2NRx)-m%;^UoiyG|fN5@UU*VoifisEAybAq=&L^^!0QJ*>;*IED#B?O|mI!_dPz!o1qSa)3E<6+&o z#Iy}brszm|Sn)SgOY-M!xHu8;uwJ;Ct91-@{46}INBH<)a9}!#Y>k3CuS2u+oJcf`|3RjeInT%r^C~ z4&K6%A$V9*gq_4}QxB_1*g<(%^1k?v#18lUT325do{!aagVM)({)TXTtdw!&imW4v zh>vw1=x0N^KsrNChMWL74iXDFiUdB^qT?g)WA&%oUh56%0ZD|M2{|3o334Kd_*iez zC$Eon)DH5onqmDxx=XYJkUfxJAsXa+$hVL$A)i6kLq3G8hLl5=K;DHcguDiM1@Z#q zSx6y?*T1U0#=y_|2~m5f;L1)HKdb!#_*uKIxAL>@Z>RFJemHm%eMfC~R1At*^uRkQ zG}@wh%D>vrdX$QXZ00af#C9a5{933w&02cSE#@R?Pr}V zxDvq?D{w*kS+RmE7hI_Vm(n&024p3zUXzR1fV6cHM?^OhT$SJuACP9!pP~%f&)PG9 zD@u*vsuj3U{jANPHqOr)a~&tD{(jb1_p1D?L~P*WXLZal^Rw(nH{Q>BiQ2M%%Fn94 zh_6ht_z6iWKVb|XQV@PtrSQ^(XIJss2``=2oBLU%!pjgI;sYuo|NX-;+;z%i9)eYV zRn^-f<{?<+XQd0TTzH5#NwKJ&w9Stv<=xc!n6%JJoLNbe!T7%W4sR!{60MHyW)O~`sNYf z75C#kzjbvozC3L>wIpX=je?DUpS5u~SL+z+_*wW_bysmDe5qjn{-LmAnQiK4JtOQ8 z{Hzm%9mi}_KPy?-A^2HkSMZs}Guzb9`XQYoL-4b%7j^=(P5rEVVTa&n9U<&QW}Et1 z-Gm*2pS9p}KGP&-oBCN*S8`+se%4T8hwEn@d3kt#*41T7KWqK4aQv+CBgqvxND>i0 zYb@y3Lau@ghg=4^1TqMc4CzAxKkNIJk@vIiq+3(F1#%-K19A;yBxD#Ql|=lk@92|{ zpVb}fyFt!?oB}xs5(ha3(iYMh(h_ors=f9Hkgbr-kWG+{kdGmiB&L4W z@qnK~wYjW&R1|fw`kuZ6epY;%m7n$Q-<{}pcCXjaZ-@rEV+Q4|wdLvKvyT1Q-_M$G z#lPCmT1oY;Dg3ODuHsHw7BxRj@jJt%f^!NkhbMssRM3C_@O8lz2rgfN3wnIkbAl@p zT%iINw4e2`;7SBntiT2BXE_B|F1S(!E@(e%yx^(?SE;}S?PpytxEjG#D{!IuS*f5l z&d+K$loM5dKkLX_Ren~@5cDUt7LMh$`Y#NJ4l(nyat?KB^!v3wyA0pe|IFV%tW4uu zI9dFJB$b~qn-3`nKPyRiX~MIscqzh5r}gH3R`Hd5d>O(+yrJK*dnk8rGMR^9m7i58 zye#1%-jFv}csa~Nu*%QM7oJmih&SX76J9>^5UldEa)ehPJj5IFnhCFvc?eedS((Bs z5+33Wc?*Yd`4uw{!74widMICy65%1NSJPI#5fLvTob z){7$>>1RFQ;AX*4j(Bo?H7_S;DjOvnC7A+Rv)FRN67|+YoP9&$|fE+Rv&Io|T{V zrGwAc+RrK7P5rE6g&l&Q^@y+&m~HB3 zl@8*g2*JKBfmHFp5HBHac?$j^Ed^oJlW$V|v|$P`F6BnxsY3H+>M_M8-j(Lrr{)(lFqwVT;F zgPe2zfEPFM2iWTTA>R8T(fc9U`ytKy;Wql(*oDVq%(iL(VBp&i#NP9>- zNE=8iNOQB&L4W#R$Eeu3Xvq;%9yF6a1{SzE*zLwqI0!*1%2ve%AG)|J8oh zUaEIZ;b-;9;7(c=H9t+^XLS>tQ*b#v3B%7iNpJ;%%U9rn_Osdwu1Ii&3S7{B)}iaT z>`DYztiT2BXYCSPx!_6_xS;*4t%9o(T%`gRw4b$6a5aLfR^USQvzCI|I6rIog`BAR z`&qryRen|hHt_MYru8=Svx>Jj-p^`DZP`ENXO&*d)i7E7gd~-pu>3;qXxQ0LKybi2 zvsTFO7mJ0LCOpI&=56DJm(Dx{2fQ;&|NDo9!pjgI;thG7gqO)Y1gre4eBosY5AlY) z4So20bC`!zTh&SYQ7hWOr5UldE(uG$fJj5IF zw)W=pEoL5qReqLTcqPI^ydiJ4@JgA7V3nVhB)oFrA>NReBD_lGAvh#I>loTeqsC|T zPU2?5%FlZ7cGFyR-vy?*Xc)gY{Db;H+UH5!TtwE-ilrV=5aY9+&&tngEj(*K ztAu&xe%8{SC_Ga?YrbIAepZe!%>1mEg`xJd&KHK6pEXw)YCmgN4?dTW$7kgexM}>X zY3KR)S%>a3Z9~#1I+A`?$=TGBxX(wSM8MBF_Z+_MVyNS1;b*1uA%x;*eQ`FkW0`I0 zXB|9`BSY}BrU*NZ*`|J0k+4JXvpNbpp4p~;)<9v0;AgEki_bKH*`|J$b}mPT;Ah<= z>_ldp`dJSOI|M)LNMR>2+tklW6m|%H)*Icz_p@#}CpWYrvMs`&sMh_R&6stcH|BmO$QxEQGv9B7W9I zXnz{|S+lYJLC7q~eUN)0Igp8vJ0RmAH$bj~Tn!lkNrPMp84O8*TnM=UaxUa7NCJte zpLI8)?xQPLc9!^AgTI5HR}ga6$W7f8Wd}TO_zb1ukemYp38!1XrxU1?^|m2(Dak zr3zfoe%1!TRSB+AfeYHtS|PX^!Bs18q54^`f!a7fYy6p z3^PBgnp&Gi|NTR1%l;`ptN13#1I15BQuztHyKuM1&VE8NsmjkP6keL}5O3&D%oScb z^AN1^v+{+PAw0wz@`ed7lX(bM`B^!_%Mu>q4SCIkm%}^+tNg4?;W>qecthU81TMdP z<{?<+XQd0TKzN8Z!V3nVhFHZ@|g@<@U-gx0vG7rHa`B}L)Hqy`f`E+h3to*D`uQts^Ujii}D*jI*3WWMk0=N~Ywc-#-4oa^!eFj|`&$3|hgrh2^0Ouj&)Uz*5T4cetS-W{ z9-oyaJS#uz%g%hh)_zv9@T~kSm+-9ptVH2i`B}Y$XYFUjGtb=5+I}id*3{4HAQ-iu zRV)lMKkGPQsQs)Pgkk1qwG)Qg&x#jD$bMGc*rxKc&OXJ*&pIc|v<-=(Xp=T1?D1JO z|D~2>Nhe(32>4mopUAgK40Zf0{H%OFgi!pfBZM8xY*Rn0o3KOhvljf9k2{XprhZmc zJV%D$XAKp0JhM&xtVzNS!Oz-r0v}BRvrYZ1IAMq2XU!3IBC}2XtfeROQH0=U^%iy# zvrYZ13}J`hXMNitd_U`ij^X)PKj$g^tm`_2<7YMNNUq3(BoX<&;og&oz5`MV`2n&8 z@)hKBNEPHG68KpY*G1mXIz+dR_6OuQ$S%k)kZq8ykj*6GXH7sG(a_IYj`d3+iy?18 zUWXJzUW7acc@pv%%3Xlie z-O;W!oucSOCpAA*WzSs|vl72w+;$3W6MZr!z3L{lrKt1jqiPV*k|TQj_2Vf~?={Ck zbotx9{BO#jcG;h_ANbwwS_33DJh#Dydu<*6aQ3aBjiRy>M%Z)v-m|I`ZR`4RQXBfK zgVi5)iqbBnW78(1FBtti#WfqfawWF<#Vz9`@jm>N;aAvQf792?`|G9md$&uUNUMgU12Z{Oq8ht*Gg8z^u-@iN?y}whjzmAssYczi)eU)fs_&%xs z+YUz8r_ube;9ncSFN>qQvv%Y$Xk=0U%91fZ`0#vXGn(&tcs>#9Pd_}*+qXYFpMdp8 z9G>Ux_a9x)kH`9Le)C@(p6B>A_48EM8{VX->(^4tGvs+{lXAS_uX;0FYH~`)Q+Yd@p+G4NlUbTxNUzwPVV9E zG*jT+Kkb+s6>1W_?FEiw^>cI_p6M}iEEm&@HX0sncCy_)l@8?4q@pK&8Yt&}r$X=o;>I1iLBU45=Tz{dN^Ne;n zT;Ebdok1;WetD-TcT`@vEpG`o`%{x0uFcf6$Iqn}zM=)c_t5o+WBGP!hA)kwBg(LQ zdb~>Q5gHS_YhtX?pQ+uwyV)Kr#~+xCirTwZZx(HNMZAwI5-EGL+g)ijeyz{+GF-`2 z*UnT|rl$QCOHT)SeVg6Kq5maZ+L*jdFP7T)+*KLWh^CBAxpwr*Wf`)74^%T+PTM4@tsA|@P;xiKl?J{@~e6N ze^P$9v%KZ^jxF@^8-{At`10FB#jpK}8?ixyWLkbPWya-K^xXfX{Lqql%P+fa=;har z>TDS0_ZIHq*GYmdzsjY?<(Kj7|D^mH-oHKm6MFe=pfV4m{3cNGYqygGU4HpXjLR?n zng2=o;hWN1|9(3v^zxfcWgbTPol8xIb{0v{%A>#e`md8>q~~D8C6*{Mzj#L6=|tyT;`g|HS{K{Ls_)*1zA52)+DfQ<;ZRe&hR}DOsm71IGB5fWnBj;(9`0T>IuS^-~aQ&S!B4xP4^%>8;~lPVQaw{MhICm)+1mw=k*Da|?O6V6i~9U%Kn%Yw z#}oYsQV!PuN{aH3IPRu@^bs#!jM}q@QldQ0i1zN?o1$lyw!HqdlOe8~%N@A~ZMIo6 zDF314DA_kBwB^m_`(oD+*WEQJ5Ivo=<&CH14tGD=xLI~us;fK|>7rkO#muDa?jwLv zpns>>-i_s$ZQepnPTDn{gWjN^{NsuD%YTqJ4$1q$mUk;Hb9gSU@l8Z+dF8x(WQ?QY z;uHfph%mhddGA2~lYbHz z_wkgp$J%?XnA90YV8h)^AeQ@(LQ4p zuDqT>rd&6ArR`)VEr{yAG1YY-)#brM*Hl*?9;(t`S;r$&tr=A+yPH3!y7e^`sjgkA zZjSc3Hiro1z0M9-KU>~@THx>uj&-=Nq@waYhZR`o8cB8dyHwX2tpinS!us%DL5RCw z?Hx4iAeGYlm#KZ!YDmbi)N<%%*AG%M;aM$+;LhU*y?R22Mb!Ouq=u+`{+Kz2BogQUSCgDS)!s0#KOKjD{#5I=(;ziO<@3kPF(iEceR@Q_7Hof@ z--1y31Bp}z1Ns9~=^|)zs3v;bpFTaDuLisSviYIxzl7>Q!2bWFLeSd#>^~~#{r3xL z|A|KXpNjp@p+abI{tJRW|KgC&pXxy1`O`(w+WYK3D(L<93u*s}M*E+N{m-F7XmI}X zgFgRPLOTD0M(0l#MQiV~|EQq%-!G*7CmQX4D)v8z3ZcRI7YBX*^FliRKaI|xE{fLP zXa7+_@4sJ2`%g65|5WUM4i$p;{N)=y@cE1_?*sZmm1nxtGimrt*OrI=7j;DrrNvSY zy^Y>Wu)SM=&MwX6*0jnkZ$ZzWyS>th9z|cBRCQ0wnH#fjbPs4leM8q3ZF1|H;gR&{ zy0t6iIP9+K20b4k;ePQvD4CY=qn@>gYN-9x$+7%gc$aN?t6yHys(+_b z%soKz22OT932mdz#mqA#hZC}!@O;YqygRodpue;er&&t9<|zEPyRV2rr@IfG*cJ0| zVx8OrV)%q&=!9%D-=rXJ^bhah@Ns>^`4@_-Vn7T9Qu%S;%D>d~Gnzopo+<{=3kj%u zMNgbkoJ%{)^S7~5G=7&P`zpB{hvSAA7hyLpzT`L@=b;+(C-JFvAN7Z|%l1_gDAV{Tb%>^<%$zzptmzU87|~5~&{H`#KXd zV@V>=UpbNX+XUxNl|8)v%47TL&;JjcKO~V#mCqkDV@aC!{JFn=BJH;c&Yx;gnCHK@ z{`}E;WJ40ERQdcdGnT}A{&;#!`C3fMaJq*%W5ZCNdLx2@j zfIJHScn=k4?#cw(#iT*LZK^HraSrU`yMyd|(Zu(OKB}lwlx^nioZoK&;!qWia|xYD zgOkV}j}xE^HmRq>b1$_)4UaW@l*64Ji!n}UW4e4x4NJ=C(Y3wkn5dDTNqeUQrM0qr zr4~!|OFN1L%}ooQR%m~6q3_i8qW#{X&uqiYRuXBiN2Yr2CU;<0$_N}9J;$#vdJh$` z-F;mwE)ZqGZE8_C@NZIG-}@H_E)$h|LaO^z-}A8LUBH+D3A8#11GLCyY4|;4%lj_} zT$)75zTGpvnKoiKodvF;Pl4F-YH`Z+B|+=r3_+VX;8OZh6Hprhm8D@JmC2!9e;#7v zsUKYyD(?V1#wj@5gBqms4KJ20?@?ZVZxp5UpHc-U1ntP@lHEzgNUv=}TTEk_>)o{M z%N*`|@jf@)q*0FC)roXzt5bXJ)YJPL>B#spVz>bgbs)Yv`a^m{dO#9M;C>|N(;7Mi z?Q0TpwY6gq(U#=!ccnf)gO3ki%U*}_@a2UiPIt_k_u!6M=EO1cO>?o+_q1$zyXgWs z++`d>K3Z3Y=l%kO!xO9@n`X<~%wg1Tp;a`JR3xc6?ZT}H>H_m4c$Pfj&$ zCqBhFn6(oylf3Q3%R5Zk3Dg?9+rOQdD(%GgPY2UZ*zz7Ah{Jtt9NLO=sRR;yOCUSj z;l7KO)wdH=k!o#pRJ!k2U%9lOQEPrAK{fk^rxn`gdM)}GD=Q%@A!U%sxEINUTSzmQ z&ZjAA++3`DhU9RK^ zzTD0bsc)PAFs^UOw1H85V@sGWlhik~C-wEM%Vg8~_8Hd_8J=bMy(H;_d@uE&V^n@G z-DgqOV9j-qt08FbwOG2*QNipqgP3G2>_dWu=h4*LU!d`3c+PUrKBEHr zq&vaa=5=Tq#e`vOmg^lB%bC#zxL9WN5lPS2k2Y9 zIR|xkoNwRio9xB@Pc@#SEgWt-88n(bP!N8trY$d*LndK`Pt*CChU%8=eS$*|{@$X- zRC^umJXLIN@2OTkOY`_jejFC-VBbU?d`rrK~4 z|L<#hrl?o+D}4IHr&GF1-`X9rxfLyE~es+vgM@^$u~dm3pVZv9D?7y@Hy{SUsr!GaG>u!cp~lF;ih{5>!=D} z{S)qmrTiqsA3Bwwsh%E(Cd$2-Nt^IPFB*8Mp?3Hk!^QeBAPzSWw*xr_$570HZ+GT( zbPQi?lViYb;t&0m3I&Y|uk%7P44@r!D6b>v3>_Lru|wztqiCw#vk!OZ(#|mkdzwaj zN_Eu@ala<38{Ve__$Z!X2yWG}e5*?SGr{yeeJ`5M$?QH|zyW&x^P|aqI++6l?$gn{ z824$bM^*Q!Z@by9!O7rOJqINcYfvJ2ghpMFb+}=*GwBEsUS)9oPv>xbo%d4JaG-A; zyjJJ(I$ZxBq&-gZzxc~q>NSq`SV|y=YhdSqpvMtJH3R|m)vnTDw_Wtz@;ch>b-df9 zor^fuw~YGeP+sRfml6Z$tsMAvXI@9pBlUC1#8*O^cGMpEN=WxVwa@9z(A0q9Dqz2@J;OBkGAD~&5Kdr zo+wb&H{Yk-v%F`7%GL$n3mN{U5zir$u!5`Es_&#m#QO(+#^3q)ilvLf-_VKv$xMvS zN<{NERR_i5a>V%4W)Mxk2t1IYJ`vC0-Hf6K#R1>T8}2YI|CezenceRna)6ZoH(O20 z|3(fBxZfST80CNb!@-rm&3iH^|Gp^yIRAp!M^HQ%kMpxXQm&=J6tz9+F!g@5*Z<%Z zE6M)}!Viv_xzUz~v!T&9^`7vr%-|he2tAC z`3@yxKdyEob;jiPR`Atp-=|r0B|iM=(4pFQX+}$|DN zUve@}E41VFsZF1@qN&y#@&VmXS{*f!+Fr;G$b4FcKFd~`Np&64zQn@Mc;06e775GZg%w(+KFy#h4kTTl zrqNWp6f&5X(D~C8JLsuT&(^1>(^Rg1gWtno43=4csrJ?;>n}YTLzO_;Uz+9V^bhuz zx_&EPfEw2{-!BclzqJ0A2=|xfeIv)vr2VB~I@G7Xl=@4P_m^gmGi^UZ>MuRJnJO%q zz0^tv<~5f6rA;nX`{{dG4{*OhI)9<|m;U@(E>)BEm*z3_p1`h9Wcy3!=;u;oQ1u(# zU;64R`KH(VOJ8eE-Cyc-vuS+`slRj<9jvmxS@xH1o~5d9zWt?#L+>v=_NA0!*!`tT zbx_mwm*(DRTK*yRmtL=S;e3=v(anU4QARZ@vAcc>03S{!4=X(lMB63u&z{X@;4D)F5jI z&?-~RPM%h1Kk3sg`g9XbwT%!D-A`I2KCOh5K_=5ObW;}64Ejs+vG66HIiNj*nI|BR zKs=CqNG_xwaFg+A668)2bZ2hH%=J98MY|d^BlMXen2GyB!jgGmg_fjG6KRSANZ{!K zEnc6-(NsGI(v}1}I6|NPO${XG_tUh#zr=&p(NRkEyqriL8vl-X#`bE|26yzQGw}-1 zmt$##j8va^6lzKIM>o2S2)|~L)a^v_=86DlitI>>D_rG_I=!@X}Du^*F1+E6eGb=9(Gk@Yi9${o&qxCI?OG zoNV{FQ79Dabh!3AJdbsD@@%DpMu_vt(n+SQxu27UHTUr(r9NZpfMa*-gv)AVeO*fZ z0QTLjF@#$(4u|WqB3s^DyyJnL<7u?IlL{|^P861JGJTH6N%#8QHuFr{jC^qaJmUsh zY7pHn&Wj7DM zz@{Im4{`0#4;<&L7nc`f%Zui7%;hZz=6_K8`Ji%QwDx>d-l6-Oc(He@fDG$Xbgtbw zv0BZiRC_&3s;K$WhHs_w_be%)&;F_6a~z+`t2|4}5kZfb&+tDOls8=uUavnK-nkLC zN41aJaWTR?(MzvGm0MB-n(;V-Uun0g?nQ_gLa+81M$btC()j^^)#nKg_ehL)vdw&+ z{4VlUuba`ynwNngbc-{)Y< z`d-BcotpnSS}4;FJcC&oqG{h5ihmNer?Bkx;$=aQ5zP{vwR3kxCzy- zO$}Ed->NoQfV_&XO%Z4q;o*09?}Fxuc8ubB#;z#WvjG8-vt++s;}liA(qO;8bJ!wY zMf*+Q{VpluNZ*p_EsC-iy z46J1%bs+7SJZp;P(X~D|0|pp5-0kh2DfBh(+ZT)CBL`n|mN)C2fs+X3yGjg&sj zr5J51war-aB~7)@AnWxd6_{B8`9NRt4rWRquaekZ>$Mkox=DLlpFXNj-TE|_rWnL9 zm8RNVknz0cfYt@A3lYX(;fXY(9nVWDw97bRi`I(fmlaXTYiHr>j$i6lQ+qc(Dn$3c z3!>Z~#L)?BC(&ng$?^Ibyp`^od#RW1*1O&4PUH7=`n7hrXLhb5=o)(AQA>JKtkqDv zseKLk0y3L!JZ&95RX|oi?xkhe=R24wA;C;BW?qCm2YHeg@6=}NQx_J_BtdPw5YW4M z%>nIBtht5PRA|>@X0*O$1ZLvbVjCL&p+_WRVIN3OeaYGS^mKjN5i2{8;IP{1)7JXR zW>|TUiV`dL(NurFZjt}cI1iuvXRMz8r2h}{pDpNLQ~onP$o%IO&LHTaJ6&eGaSAk@ zL+E)Gr8PCG6leqo8KgkNJ7+jt!R0@#aqRW^PkBK8vz7YB-b_XEpBH$q7WvPAX)}ZT z2PqEcKNSY~&p=vfn*RXWnEdBA-m5p03CMr8@oMBhd{o~2r^08efc$4AccGF0tfBkC zoBw>K%zqx@c+30;x z^cyMvnbZmSPj~9Q`sY6vamU?v@a=Q7EQtK4fQ#dB{)2Ng&VT*@kn*2l!Q?-cvUjWe z=X}|F4xKNa_Ig(F-qSGa*DIbNKwf2%|9s71hw~o+#u2bS|DkU*IzyxUr!{S?$bVj- zYv9cp{PLeP)Be(O>MJ|&It5k!vx5Wl{AZ;}e`y8>2K1M1q{VvvLn8}<>8Bor)3nZi z#!I>qi?Y-6pA~d@4DufjAA)86GfsdtXl;@IfM$^Y%ma<`pUW)spSxwhR{2jYhaJv; zc)y1EPbUEu;RsFgp8^5a=ReE&MD+Y;y>b4t9LWK4Yrp*G11vYpe}dJ88s3F!gGV_f%B*&rk|znE#Y>xlw=9Hw#_G-F1`v zr>}4%{}G5`{&N?BjPjq6Sgy=}QccUf1$WU+?&r1~pyxl!Ov-&F-B%ZB zxN<+;dpfH8r;ab>;rxe=sy^lUhIbcQ%99Mk=l@5(vyD<0pL}PXalUh&p6}ecn)026 z$ai*ATa0|CmZsVdkS+R>O_>Gu!% z^hqpyj0E{kDxkA?%>ivX)=c3w71{*M+^(;=5i^NPu}y`Ru1BO{;iZtl`jUS7w5L8j z8!Nkz;IKOB(+>K|c39a)U)YkSit)sL?~S$P-Afrxs^?91`#jOT$qpXk+Rm?KC7Z7P z2e9;NTU*{GNQT_YIMlC^>&mP8YhCHA>Nm3G9mR{~*^MXg5y4NKB~TZ6?-HNQDtNhf zvw%MGi?Ug)w$yjCT%XMXe3O;j2hNGp@_aXYk(Yb-M(ww+-7;M^i>Id{cnqRl%v*cA z^H=$7oZ!FlAKW9%Nl00mNZ-QRQM4`HzlWcp+`{X*uaVm9C;zXGi+OE-e0d*PMlD;E z*86=l!K-2%GY&dwKKmW?ZfT9yy^{NZm1zI`uFZI+ZhF?pPe~|-e;|G$I>#P)B;t3j-u)$wW!({jMB*_Ip+{jl&l4D(XO9Cwn)_7A_N@Rkjc>zzpn*fNcdBN3-5+ z;ixsrY~fp^1nAMd7=!H_J`hO2D(};hedKGeqU)vm+QSB#_Akn)Z)w`ETg(B{zZkvP z+rQWs_`Kw14h-nmUCNzTJTH0T27~^^zQE@t$qmkix)`eGB{K4>;q#KRchU6vrls=i zz%o}D&qukahRO|BW)hK%a)n*!33{SW8eo|#p#M!(%qmxSgv07{g@f;K1uw#3_}wvM z1<0#Ra)t8+=*<;gW#ClG6(*)>Z>%)V75?OYE4|6gFIQO09lf#itmH@i$3}GS%`QTH zLQ6qkKSn>Z8c$m}+z6n<hz~2zG=V>6d>8cPpMtDY2)AK zDnQ>hzF~L}WOtin3zLMy*+MsgP_{5Jpk=)n2ZZ;>-Rk?a1kZ<3-DBx#$&cAtwI5Se zIP8~G`OiqUDF2_Rb+9OZc@(0TfBqtq@<-c;x1jl#!Zm1u_}5;`GlDDsDRjm7wDj_) zO5t0uW9qX8F8|$cQTfvsDRtY*@SNiCLy+1pXqAkYNugJ<(N_c=Bh^UNi&^LuBm5*} z8XYq>;O8JiT-(rDIQ&JQ&Sy%$8Z;PHegUnc+C}ewIUJAQ#5>#^AJCC;(-CoeTZcy!Vd>X>* zwrJ5<~7W`!fR;!J5LX2Pw3N!vGM_)*{R)! znS1q_Y|LcoGntqftIu46nfA2bhU2pwu0t}G9d8oF7%hAK_^k6VFva!k;qh66sJ78v zo11(R)rjBq_lBl=78Owyq4$MiaMmjz&}bf-rJgi*=Io6gn@@M&LOVVG)~LObMod?I#IkbHDwLIXEI6#Em4Qtd*FL0M{u}x zz+E-?!iv)a}av{=iz}_(`aDULIm`gR+Rn95YL-V zY7qD`Qn^uvrXM(pNH47h-=n#P z_xc2{N4hehnRe3ayn|8$XtqGHilCJ$s9cgKc)bp-d<_kO?{~?`C_qp2&@2AhQ20xtxxT?gU;%{iK>#x$rbP1pg~CmLlnN?@!{> z%>;{2ca~9bcg8N`D1Gla27Axsu*JNJ_I|zW-6;9NGY~qR`s9aBC&K{PT!6fafb9er zN0D^%pk%eS^j%1oD~31ry`Xp;!!LArh1WJreqw3W5cgsbhVm9D_+C3$oDJY4gvBixdlg)=$&&A?~{=2SQ%GoDNFz>geCk_tq<>f9`$6i(H^{38Q@!#n zZF!&~^(q=ic(`8q{=B0-lc-+#mc<|S8BqTb{`HD7AcJ}}*?TJR?Wqhh{E<&kfWv)v zhF+cSe1$7OynonJd_dD>ycoS8oW4+ePp!XjI0Nb`5c++{odFq8Dl#3sIqYWN4Cp>O zH7aw;fc6E97mJ>8o@u!^r)^Ek{XZNa}KF=k$bJf|<2gPGZo z2TAZo@|iqcuieAb3T=`;y-ig*{00zcPklUi+_9 zKlZS@bL;3jtf5g}L+Pmp-sZlTZV?>RPHJ$q1vI0*%u6b?Z)gdgB|J{^@&slH_N2em z{pG#bey6q=+r0&O9a792?bM2B>U}Wt7*;$Cc>pqqc;NQM&T_D1BIFJd0B^#~7|1Bd zm5|FJ|KaU+YJ)J7Oo9wH2{SET#5q)GXJY1beN9K2Vqtrp->J3L=Z~PN)=Z!Io!UMG z?WU==9rBaDWD8AkIGeC=qrUJ%%&dl#lkodFuXL#Mu@{H=}&M2>31?S=c4YuweVB$R`j^09Y(H;dyU_3kYo zAKNE;OVp;(X4LtsqgGPe^SUx0TP8XVOop64Bu(w6?;NjC}<33{%XvX>2+w?7EnU7r!8jcKoeyS1Pd~A;7WAlb| zLOzyA-RQu4tS2q??{Ggw%N*AE*g`tC2KiXpb6j{D9fE)1T|tp_y8FBG>1NiSu3b&PkVFcj6;C9bTpPk~6TCUoUMBhb`t+1e_tjIEwVY~Pd}rnwjT1KzGQ_yeIEjG?dF61mo0&lcai>IkK|2h^cY>*=%tLY|0 z{`CP(wK{4JwY`uXBslABnAr;14A}(P2=VZCJGIrADJMbxRf?J1$8io7+N+qEr>}XA zrdT+K=XYuk>GSu~RGXpCOxCC4vG6v?&H9q-Xo|zR5(_Wa7hZyyL6Bq;gZDcHj9<&< z6vR7zZ3@}3%JFL#A*C@Mzjg|*>aX2GCt9Cw*z#KNV(-u}a+rLFhVd_S+%$geJm1Zh zanIbl8TI#lH+xz-?y=gz$9(qo9Xg(F?`GuV_-Lv{C^<5eX&>9M>bXh~NdAdc~>*MKFp$5XnZZCSDNKDrvni5fKC- zQ9=Hn&pUI@o^y5+P}}~0eZK7T?97~*ciy?bGxNSP@A&WgZ}j?#r+?%5wFt8KBhIglKb^g5`{&nMSjYqpG#=S4T&s~}Jim4pGnC={+A;KR`~2FHgSJ!u-}n4l{q614|7Z}R z_Rp_npSEN5KMuPF=lohBdSRLr=U1`;grZi=o>-S%j6;Nc5k{*-Jqnp5zY}4pgKSHu ze6RCs<58Bj9LcPbpNKjy-a^)EjjF(3Ey<8B(~VT`KD;=M}s<;Sh`9!q014CZ?nKdvz!gT-F0E~e)I z^KlM84!R9SFS>fdV49~&@H%xOJ&!S-NAlxP^RYia@@Txu0fjgn`Jw53i`opTslFuX zL`X9~KBlpX#(OYW^f!(D3O$#}$2zr;A2sG<4nH0@9}n{5G9<=r#$EXEo?r7QVhx^O z8;&C+IKP$)aii8RcCnV19Q+e(sgfP3TMRm=gox|IYkvi#|KJ)$mC5g|RE(Om&~KMS zEOUOLxubg|tO1LBo@hn5pD$k^tAFzHUd6ToPxx`IZ136U7bG!N1x=i5taui)D5pPS zItd5_T-@`C!jZGwZld_lzTE+k7zB|~tE?$zH5frcWXZaY6^liySSU^{1TnjRanycB zRNVu#qReW9_!4rKjZv^iDTRnGS%Y(LQZ=b_Zwm~He4wE6bcIYut&u4~ON{}bwl$L= z!mC5^cZ8uowjxr518%pW=ux|OMHHu^ETfyOcQu?mu@<%JqR1`}yy|`stjK$0lhIS{ zF^T&fT`F@PE)mz__kdim`+BIFkDvJ7??Oy2zY; zb&<7hlOTAB$%DW-d6<+T4-*9NnF0wz9^S6fc_{|OM#lDfuA@V;JV080z#`Ot^_@}sH3G^KcqW0(H`FNDszA!DkYDZP^f$^y(hk7h($Ux7 z`l7%28$jzd)BWwz@xK1%FaIwE6Z)Iq`$%|e{`~Y4g8dEr`3(K-k8W{Li=t0=SPc7z~j8p%Z2K`8nl4`}fOs4*OA!87caM>7&H%f|et^;>as~Hw0RqU>qPm2o6XK53P=Si(nds*7*VZBf< zXfMNMA7_2{w$!arcIGE~&p6=u&398~BJXiU?XrrfJ+iuajR;O!RAn67q#F@V>BlaZ zXjC?hEg+Aog~kRLT&sU(?0*^iJq9j^K?w!vy{f0NRD66GtcR^ z14O#AP8~!P`_Tx&V2(NyA9gz1K=-s%S*(E1V0WjAN#=$>Nks8Thi|jZN!lg-~MaAJ%x(jyT|{J z`>#pZ14cpJ(uT^_+#JeTif?iHAQUg7+{E5@UIk(LFuMOMZe&i>21)aLHL zHo5z+wWc~Gu77<^a5OQRtZc)pBRh`V}w@1WZJfg?z+UC8o+GZri|99)Mr4< z6J}J%$S@*a*%snnQWbY<0E75cAkMTOya)RYEXcBQ^a^zjakjM|yb7?~R}iQyzLxr{ z)89jW5xMm42g`i>!9V}(h}3?tcdpI-V3~J6=s(tSIa1`BcnY)SEMKen5m*o^0Zi=& zBcpUZX4((Vq`=6|S=&(;c{I^Uku&cHhnqYIoRf!18S-$6$%BMp+4j;6IuEkv3v`mx zcXU6odYwVq1>f#|@an3z_k&tPL+1S;Z|h{-4?fkwSs$8vnf^hMFFO!- zeVD!<+(iKN`f%Y$R6|?)!9U5iqi1l`X)zReXe}Vy&iR@4_Jb#(LfxXEmD=17ZZ*3n z&wlV=v^@HT@~_KHrCYMG6|fO;sfA8Z+}i)8kLt>fF+4;CPSjQhdE z5zyv-uq4arZ$ExNSjL>D`kQ}`K&@vP_9Lgc`@xCfV1LWJAAHp<4r(#;e$d&&Is3t9 zuGdwFImVx<;_dGTTg~1^_Jh|LM49)4+b}_8+z)=}ZlTilgG;^(V@e_aCu99@cR%U|K~uqGSvSJQa9NTcE$+{|9H<_|NT)RdUNa4f7*UA3PZubN7Q+ zA`oN#KeHbk3%zi?LAW`rq^U#m+t?45595At9l){VMAi8AgJ($rdV^x5ZwQY@81^l` z4WYh8Ch*`A_(}HXs9}N6Z=Q}FAJ_fve(+-153Wfp^jPBaPc>^u?&O`uns!rCXjR~u-og^^PB0(r5%-xRaQ<&_M;*_o2_$#p6@&;Q``?ObkK-$oMS zuz>fIyBlRCPBl57;n+hceh5~Kkl!_NxY#*gfLEINEM93|C|-)~4NiavYyj;&g0`pn z;CI{>xVLEX#Jf^sPgf?lR)QhTdBh*>HD5H8!GFT-F&!;WUFVNx`J!PI`lDT;qvboN z>bgm6M}*YV7dae^T*sAlVsu!2d#zXS^r(0{1q2oJCwYoa(m9K_7;()#^pxz%0{_(U*WcT?!NGw%y=Q4lq1*vPSA#UO_>U%>19=pf zK#J9T&=K6VB=D84Q0_F@PArfuN}$XK>3WqQ)EhKQ-YJxyrG6>Th-{1NDP>u%wdAzfv z#x+$m4WqXy{+7N*BXf2s1;B(eLunh2LkVW*y!KU?W2+tmLU*##G)S#fT7#H$WfNlg z(_MqLG791BPoB6^I)rRN0t2RtbxMnRnfp@&^82*-?uLbGyp*+}-S zF++p1PCYV#Wid3H`Q=no>7N)3F_lN3+*2FbpV~Q(H^nU3+tDX?y;-u0kfdgaP)rC# zGw74+CXng+%=Ux7UKM`{as7^*$J>M$QW$d{Z&upsQxtJR2efQweFWq9Z&znV2GwN>XYlXVLSD|yDW{{ z)#s;`E*O1sg~(%@=OORI=*n~+?U z&*M$~ZN~F>8g^Uih~Bm)k>{l3P$MXZI>C`w?dbWvP}Q3wc2^$`cFtk7eT?r@jKQ{! z@hwAG@EG4?DfqM{&?E;wzqt+@5kytrUX6XnbNEo>L3_Dqy%Dw=LE}2{S*@-VyG312 z&x?%bulZ3%BSNDT1{b!ci@idfWWJAu&ArP}u+?Gmu}&SpkG*oy^A;(nX;8 z%GjS9`(xN@6^(l^Y}9h&{ThAh%}1@VYm7aIfsfO81O|(CKR@oKaVL$*G;X62r%^%U zbfD+vW;i~W?`yI?>+YCcZn$1&#PNPZk@KKAEF zo_rwx7#~bB%LKN9q^gZHzM#=e<6{_Jc~QiU=Ruz4zMnq+^Fnn02(-k(7gj5FF?bzx z9?q8ZN@Vqp*W%RH{v1&nz~1xm4Tk_c0(EnH&VtALa?mJUA?uSg;IZtmTnR=mM7V|8u6Q~pkjQB=?Mk_T8Xm|tG zP>eE%Q39ar_a#qbJN)&5til?^&f-6sIA5zi-nKWh(4!}IPgtoQjjxgf`0IQh@_fx- zzjGfB)F$5%fk{Xa<>;_hm!q1QYNH$jP38#GCiB0R^^%^s=kp|U58%`XGIt-of|=`t z%&}T3n1evIyv;BO0VE6)k{k+~B7pvN#w?nDLG*!Cp`wCFp9WHtT&>Y1 zA4+sm@~VvBx0pN#oRf#q8S>DCYGNKF40%|5na)GM3c$&%2aUF!hwR@LZT*&~vE(rro10fw!K4*XH~FVX}vFVi2l-;p}(|-bX!T7(^gFa=r8gs(x}fA zNEq5`(IrHh;hp=ZSvEUoNHeSWg@PL5l4|Z%P#Em3;{nT_B0x^+^%+v{Wm1Q5k3{xZtbCAx-fOKC|RT2iIYcAGB!e#lJ3;L7Y7l0V|C%L{gVY? zq^!AnF`Jdk(BZzI4rl+CsiVe}mcpkz+8}GbvAOJVS7Ktqv`swJ;rs=4IQy${7qpEo z3`C+L2`CR{`!#7koTCG#*wJ68zh0y(FdPKTl3d+f5M}{IO*NM0{u1_+RvUrCV`zy% z1`%$~b1fXmSx=P3)W2Vs+3yg*EawL6|G7`wum45hQny?GFA0FI|CcUor~WU}n9}OM z2mE#YZ{20b>i}Jz~XuV&tGzxlUj{0br&=>ndr<`DUd9AqREN<(4B}rV%Ly(2Q&H{Gm1yU zxmAwZw+kggtbvGyGy&e|8%MVSJYv;z%^;g=FIkr`z0@i@_6beX?3!U2675ec8Zw&f z%R$cU>B-Gsj|DzR5+g!ZWWc!zjRIt~1i@+{RMmlO$QrEi`5PB2QlVSPnM8v`EPFjy ziq^C&tQ*xW(l+8o6EP%ekD3#)=B*@B8wna-&$zC8pZ`HDz8qiLA1JO`gav96B}hc`wmV(&mj~up&_$e$Q9C^ zb;Fs%(NSwUBO07YWxX$zRNSc&_Zi*T*7B(JEFq&uZ!%FMMH@!!9Q*Cr5hMB#Kg;7pcHeq- zA>GdLMyW{E(OnEr6qs`x(cYYP*du*7F4s6#BD_(D$LoVQ7JvbZaQ0%|U;aT}qrbo8 zCv=>~h!w95Hc?lAfJ|^0LjMv;Qq+hd+90#Uq^w>@X z?ik+}9*)ceM42UGcMZkm^}^&t7J(kPAW=aYpzGd8vUsiq-MoTe5D|u|2C`seeegF9 zJ7DT?zO^I3q@AWUpPqy(Z2Ute0w4VHXm)tS2zjZ1=3wC@Uu zcrtd)Z(#rk)X^JFG@*MD>pgYwuZ8Y8<&4-TA0CPyj+j`~A(j*nxLtyD6KmOInAgx2 zptifczvkDD{$=oXYJTMvq5VpD!wP!T(r}%CAO{!dX64fC$kCV-ThT9<4!;6DdR1_N zHk?hGVDpZ6EmHYy#~UUu0uHvPdb*!C)dg*(I^qsYi)!QmvRir~W_(+yswXKk?yPXo zc*{Cl@;9mi%uQMJo{J>#t{n$aDE_Z4fKKRPlaOHp6q*`(s;gX3fLd`;4FFODBN0HlljJ2d(2bHYDliIdnc;Ew$z~53;o(+f5zk@IK~o1p;wp5lbc> z=kUvUdLayr?t9FmO9~zEKmfFGapV;w#)gl^nT{SHG_jYhXt zgX#L)IWR7y>$mhfm#&N8;`lnVKR$ZEAe!#{=t|>Q@o7;<@#8QW2hrG1yw|BcU^8|n zV{g&qMtucat)uZ7jZeh8LCs`hiG`e>)F0tyT9(+W)e^w)`#HiIm?y;#`g2X#OxS7$ zjj1&5pkcuvC%goB)Xnr6B|hMP^W$n7SI`KXgz{l8t_4j{_j7XKHHBUd-<8{%=DR*a z*9~2lvuzuAtjTO1K;gRVIX>>rU&KmqtCM+)lc;C~>N=P`k;ezssDxlollkShi?O1lN? zHEj+dInd=Yq#{bBMhE{w4cP=)A;3h{Jt(D{uQd5N#|(T%;i7npI9h0 zyj;7b=3j=J?EfXkevVCePc>8S~yD@c)R9c~U&U z%awNf@iUPa2m}~V zA9AT?FP=mA09zv;fMto<3~G}jKQ^PH5Ir8%W5oC2erNsqr zy)8wkZo3F%SR-eIXftPu^-%+eEdE_V#)sgQA!j$>Lz7IbHIgli z@(hod_<3m4S;MJEOb+f#cHClxYy_+LBi56hv*ax)@ldzK8I|Tf#f&8~D9!b!zPCb6G&Mz@2kg(oaQ*Pd!82@G%)ig%t8xLAO>B z&9+7$()Y&`l67AZr=iK5BJvmx>MAcoCH+cT*88l<>R!@njT!pPp~wunfdrwzd`YKC z!2GSYNgDTA2|}^&5`^iRbXNcydSK5tqz97crpZS7%O4H=Xq@Uf_AgpFN)XJ7)LFQyY*h^QA1Oee|rL388L)F{AV&5A#Flt}WFW6w?fm(Xq zsDZxr7$lN`>nIoqRjrW3On+MwwU)d6ZSTf#d;RTN;ZwHL-!2sZ-QNyHeszEQFulJW zCxB`FEl<4J-!536p}&2o+fA-rR_$El;CuYBo7UdCL_28EKc1Jei6X~@JB`|AqZ8ylhzQIYk7_Jdb{ng zN_gMxw8JL?pxa?uNjvQ@MgY^=;d1e2J2ZUpz1m^6Q#{iSX(EF{sbH$xm3C+?#`7nf zthlOOgAZ&ZxjqTSKV}woOm4vCjYn?a@UJNS%S3ecMIISOLLN;{=m(mHc0g{zWDHIUSe282^rG}(tE zJ}o=_3`~WFAvH>a`8i>deQFsd(t(q~It_4phmsXb>Aj!NT{wMWh6o`Mv~o-Vrox-}#Jaa01PGUn(B9?+fT? zW%|_(;riY|$hNq?QduAUl9i>!Tp78(gMcz>-%-w-+s{s7i&?kS2zz!q+b>nxG@-#a zVCw6R#Av>bv_km(eIPT?&ktJZ+@e_kDYMUk$e<7BAvCmKWKf||KxEK|9U3X6v#s7H zn>pEs{aAORugUiGiwxv7aS<^ov~0|kGnzW%f(Hp7l05XPNa1 zItBy?pFx1|JlRrMug+z!O-$p)41rpJ;0?bH;RsmCy?BPMBsX{M2~*f0)&?A{5GW?0 zI|#ailhWSAlrW9bK;TB!*Dp!|0?r+WY1Cds3Kg{^LXe{EPh1J1 zVQ2#lKfi*7vF1t3L5>i$UU5YUcaU@F%7a95AW{Rl>}O9HEuq2%m8USR_zaUZY%*)4 zrIwXhZ^x{Kv*e?};S(q12wS(!B2*};nkZl%({W%o>Oe6)8cY1)zRcM(4o5Lpps1e9M6$BHRLJYjYbeY07 z@L$2zFaj2JAQ}`DlpQ>k8x9vYH4(q@1pCeyiHNz0=I<|PbQoNyN-rh0Z)qH&W+i4%rML?cd^ zFHq^?gufS|=vLAxs1_%Lr6F`~F_2iyP<*Ti5_DE6NO&4m3_-%#j=s928U|dcLJmS5 zJ7`T2@dPJ4qY1&t+1M{Z7@SFhFrE$Ql^{%WB?!X`z3DAWPjAGXpjYhQsynEEN`BCn zR9Xx9eS>nbdCue$PtLosAk7j$#AFI|_N0%KRY_M_i68oR+D2YAD2 z_*e@Ayae?LKbmN~Z+zb3$Llm+rty;TenIS{ddApK82cgE>d!Rpp)pOo8&nw+Gh?{t8uQ?B1|dF^$t{oI;};4DMK(F;Y|keGZ2~j_!f{ z*oVfRH0l9Bz*^W^uF!`QM7%Z@>fO{_VT1`!uRa7zu1*CaO{md=1d0;m)Ko@M0`--& zg7nlumhcVl!XoMWSlQEQ{F>Ra;X?VOOyjki%O}+xfsX%G@F)Lz5Mv#p14w{WKOI5V zD7_dMI!R*HGVmlF*hKfwwOcSB3^%b!`|o8wc7d(l_yvqDQt|85SFqJO^RXJX(+c;Z z%dlnMq8|$Mst%J9EQVV>F9DGE^5aPwvy4x*v1ib8s`*G5yVBTWV3Qj^TY-tzdb(-1l=_qI5?89~UG0{6;x)LTK~^o(eiH9F zMo#l*c^$CZ|`;Z~C*AgL-0 zz-W4&gjnhX8b{M8Fi0*zSc=*@@&xJ+j@eJGOCe9N`{(hxak95uIfz=lDN4ZmPuK^3-m%PHSc~2ZUkDT$OOjOuT>3fJ5cus} zM4;Qbia@t>8G&x+Is)C!g#@~tD+zQvmlEg>BMn?%fL`sY0_S1^-Okknx}D1jbf;ZU zNRqXl7y~J@pH=X83IpVD>%b4eE{X4h^&U#Q=I>!x3&`FeZ!yy?+Bh**3JBP%pK(cO zFJRRKlNbWPSYj@N+Ef|)YCtA2Kwn|_7>r49{%=Oskhl5x5|y#M!Z1KzVdxD;H`>9s zAlQ{T1Ap{|FtbEiNLyuikFgVT3DRcPdSuSpu3&K@YybQ}vUY5Stj#2Zo$(g%IY>8> z&_QIQws8yid119bBI9&j#7MzZWz+C^er{-lyV-YiL zHKMih8Hxp(d${Lb1jQ0^kGPC$@(;M}v1XDP7qh;NS}WEJcJ^;}PV;kNR8gC9iC?qF zwI`RjS^zYEa?^1rfygB?@QIHQz%>43j(C$ldE$F*@h4lcv}W#or94dfsxBoeIdwhQ zKcIEvn(M*aVyNeHnM*sPf`khsGuyo-%&RWAt-GG}3WVPsON8mSgijuL1{p;9v8Y!|mP@epn-{H3jLe3o=@r zQN-?t8^U(~@?c(1AZm6z1bchkUht14hU1oSiz}DVUCt*5yYX4pMw|WZbMQUe?Qfq6 zfbMVcW1RlBDqSuyMF7+K+X(Sye_OmFLw{T4$idS3+kB^de0~lk7#6IU{!-_>I4rl& z)zQ)`x(2~^gV+HjZ0F*z##79$^`J%)Mnk0W-%n#nq#?q-hWH8`R5S!G4x=HAT*A{3 zMlSK?yms5+Dj;dM9mqMxHEEGYZtc=eJ9HDkw08K;l}nuZ&i87E!%%-xJkt(cf)%{K zRPb=jCc_hLX_UdeI^du0+JaB?fLX1SArAzYFCz+ZxwA zhL0)?9iX%qW}?w@)y7^(0`0xn5E!ewvK{TYii6Oe+C+5T`wV{R#4S#vYk2Pfycj{q z2&a?Sv2%$hSTj05=honS?HmLn+|N_qLondHf3mL;N~H6Y=a3=pUR&4 zkcCm=&U&XKAf|aL2!z05y+8bRiJ0*+gTsN-0uOVUVLgP0k^~2r^ZbLG+A+Wet+~=@ z);sz@uS7JEtvIc@9#fX&A;`SbGCgS`KJnn(X4ve+jTEgP@$;Cq0Tt-2vQvJBVV2Nd zrsj;y0yjH6^ZzX(vzT1Y8PlFSgS`(o9oX3R)G21MptF=FvpAijQdSj==EIsVSo_wa zZwZnNGK*%BS@d} zx!cm2oNo4O)LF)EhOQ#95;k#Q$H-lsucy695rDM>ohfhy;%fx>u0Wutd9O%Nc7RxI zNiZHDUVHKsHVs}_0Ze+Q=>^gv=mq}Z=y^Ik{jCxq$nIpXn>1(ePr+2tS zmd4V(OdyX&FVjzZIK3>U@1Z+2)_4?aOxV;;#aT(TWGvg?%l2R?+rJO-0h`VdXS8=8 zVm1I%_aQjuA#AIV3i60EUUtP9lZ7?z&=q?Io37|Z9BKhu7c}BU$(`38**9FjS&q}py^69ZZfIfE{6aUMy@eWpwi_UUxiV0$~APgYq>^3itflY=CPPKPYF*a ztC{5*Gk_~q$ZNoKchH)6q4>MPi5HAJDA#zsZMnvb!@cQUpPrt`HQ2+|r9bt`HLfEi z&u}l|ViDbpbmbbSA_1RV16mhB0XT|c2^lXCbj)HaC-ivAAmR9_;M5l-et|^EhVxAU ze`1$Sj9O`^4cEAf5A%@dba{tJ1bMFz8zZ75r_P6Rq15uIS`W%=Qz&tbiv2Am1fhU) zh$SQt*hu{A#>?#qxnx1isVfwuMD$6QeS|5uaP`T^J&M_F$UkON{z2NCg0Df2wEUxQ zK>qQjd_$oC?o;%^Sm4N&4Q*(C+I@;$p`9j26(8EpdkI^lZp?cL{`(PMBFI!3=2DNA zOhffU@uV9Wdk=r)xAxO%Z-#iL}4vIxKH%~WWk~XY>v|~8j)Wms>gso76VXMn%Tu9?Q8t2m3jrr*f7ooQ@hlTjy0rvIG zWMZKZj*qpQ^Ql@1H(B_30=Git(l*I%Assbw*2AskA*XO+5qXGeu5Cs=A*nYJs$Qc} zPh%+zdN1I|d>T*Dc!I{GG=76rc`4)S*C|Koo#mIKL@m8_YiG+A#Y_Z(R|jdzFCk4) zzXDcdl~hCJ=;E!U`)jlp{7InHTDW6qr%&jIPH?D>BXE0bx4_-{PsrIVMce4`EGWH>erLztQ;D>o|h^1?`qn4%9TKf1y|`qID!aJ%}O&Wl=G zB9%!Hp4R@p3FjqHX}+5~D}qqs2su zrE}y-fE|dCq#Q4x+$dS$NQ{ydA}@kkDclmx4pmL-!08YtYt$K`cnv;c_8DYuBNN-y z(0rETn{JyiRFE=yce08PSA6V^IBYM~-x7H%%|%o=g< z*}xjSo4^-tBUWM5dXwdd7XmN%6s#LQ4~qWo&+VCl_g~Uuebx3?1c;WwqJ|Nt#1ado z524?piwLsHb;Z(LbrV^dY!%-ROJe;25rM!2(>hnuYKmHy@b0>YNpb=L%B+(Mz#3%# z4aHZp_7l<^xl=J7#^^Y=N$t{pFH1 zJx1)N=3zLh+5=!bPS<7>hlKuq_FUL^!B&4XA5lW|f)T?K;b!AG0yYKH*P8DF0v#?u z4eA1TE-pvf>iGRhet1wpHD8KVsynfo;x0&?Q<%Y;)rF=fF0GE{RJsi_8HvPFWR_W5 z0Gr<%Wze;#$F;v1I)S04%1|(W?Tp09{+kQBaINNB){Q)nSJS$vds!(H4Dvf*tEi-~ zR1HF@)Xnm-O6>>Q<y?>=_jtvPS)0Wo=h|U~;rjY*2(b zK$Mi-h0wC`q-9@z2{nAxzkMqpM{g5obNc?4c;EX9$Hv z%&EhyE)cL{q{r8@e!Wm1h_FncBFSRjpfa-9cvBGc5g(`!U0_A?#PgB959@V4P)@SM z8V2Y}gEcf?W|XSh5xHk{4BhdIRD@)47^JQz;2}GRHwJUDMTBBbhPx1TP&L*Y9uRO2 zi6L1m<6Ql5A57BLOBe}hHE|G11b?Z^qVn8OkJ)p%m^1^WPI8~E zmjamXo&m&JcBfC69gIL^ycodh`n@qA6VvE6Gv6|G2eRP9LgL6&eKlAkViKJjv|fh% z@`8+$U#mP9oLe??(W$7=j-|*rKL)dJF@9MCLLh*9oaj*i8_y`w@i(0b8gT(CU(1E_(P}N5yJ&A>; z37SwvW!BfAUU_gu!ECqY)w807oxG0K-zuMMiG{_0X|{>}@GNJQ;3OW3-wp1*f7uy| z&m_k`L4BQ-o}9E>qRSA)LFy~fg$yw2fH*v;*<~-3#sLn+cLxM`-8v=R`BMsgK80k# zbn~*rf$lnMDZTB{xzPl#ZTX-7en5xiMH0huqd4}!N4(u-5N}UJPVa|B&X=fT9O3VR z+5scHD=ZN4LR#@RNBoq#<=G()G4TUmoDi5w6A}Y!B=!MpIoab9(hj}5y`SpWtr5Gl z+ANob^s??|2MTIdKTFDHy3v?f5$kNkkf%vP^DRcevzWVt?zos>98|pOP&y0bVt7S~ zXa|Ye4^|)>z%MM8x?35k6%;^1c}NS7{S-^wN*TVzDGnEhSxKFie+r+;boR6n+8Qfb z$We^WDP>(KQ)JP?S>k6)8RDG$v|2c6z|h_moZw)EwZ-WPU&XK}TE%GfET?#1PFEo$ zP@XD`C1$e2`y23j5iz!cmFQj8p~tf!L-DgIMN0I1zVlTf$S!8GJ=LSTNjs7(1En;v zCZZg&uIi{9#56>u+nX5V>k*ZV>R4iEF8n!)oNnB*2_6Begu%!jf~9aM{x=c8Xv!<} zX+i;JKbFiD@Dxd>*C5y&*k?5Q_jaN99xQ0$R2;_&Rgw7n*HO4!heTp_Wgh#XH8T%s zmnD8?iVQ|A=`S}eC^56nRaYdK#5q7wcKRg+kxT~!BDZv!> z?QYH-HpHr7hhE$drB}D25lKJ1l~~UF+tAerVjo65@cb=cQIO9Fr?<@R1)*$1JwOuK z2SV|KSU&7=a!uC{Ug`e{OPY{@6HT1zOL-&8K-$Awc-b3N?~b~LyC5T7r9m)^nSt}5 zuN0k3Cy?#dp@_Y#^9s6{lrh7GgBzHSnS|XJOXX2%VYNE>E=>F74(`&y?OihvZu&A# zELMOPO~MF~p6{){D*zU=22V0{1p5CcnW;eM3+-H4j3zRpi$!t*z>Y@;^VP}joI3>^vEk2J0uVJ|=xjV+Q^$r$z13@=9?lCn+ zU@2ZA$b@Y#$(otoWhirqCyLoroHJWSf}?M+0C$!d@jk9!0=s3tgUFr?^5A7L90G4}Q&1H>gBZ6!#uBqm zM9u=l3000DljE56csUw1X8ntJBbCMZ*=T2ar5czEN2yr1$y?pI_g0OHy$Pr|0r|SY z1q`-Jo3JWPmQLbubf!V;DGp~B&(8$UR6$^mQ4ya9v0R+}s+EIsEIOjhZ>hlnAZ%~DAgqw@>|mMWml#|x00^{4 zfxIQvft|>HOMlLDVkbd__3_yJjEMFV00bIDuXn^vX;L$uuwR0YW?sq=JrTOP9X5}f z*H>g+EA%Rw=FK5QdMza)Lc>i;EvaN7(>xpcxMY2jzG=M~ zxMX}9I^ePaBZ*?HFVAJ;%Q%(rm^Bm)f?>-A@H8Q1(=^p{3*~3&3(2xW52T%AFbYsPSsj<%&yG88@So-JCPAp_GRFRHH3aO5V zTSamtP&^LNfovF+2RAd7tNpkXRa*ed#J^%3kcQ87T!C+bTfJ`p-r~pWI#go5C;_sCRRe$=2&}k2(`N|PfX-Xnss3MUHBE7d7XM4jm6T|vC)>maN28=v;pL2p1l53 zyQLbJYj-27@n!8MtyW_NVT&3`T&TV$83y}d*s7Ywy&mYv^!Te(!D@A@C*Tx8vs#^? zvm{CWhQNnPz&f?R!Cywth>j@>gfr;Tk}V~X8a@^tEb2$vPjb9MyH_&z?{EEvrIsWX zYk#TqxgH#kYPTeCFWr{SMZ#k83)L}_$9eR?(5EW8THwWsidOS&Ymmm3-uNi48X!nk zs5^8kOGa;%dRV7U^8o*z;i$f!!H?bKGDf}-M$}5K$%$Ia)?67JtyLJU6k*|7iOeb^ zewJw-y5(dQ9f&`78lIpqW)iZ$T5;I9MK-=MUA0Kt9K0o}PEnh}`MIC{8O``xtil44I=}1m<(kt7m0d@rYz19;TJZ+4NXAOJ5t? zV)nE2xA&hm8I;HF{x`{aKX75{Vs>=}F=+O=C@;2#fzP(GBsqbUDRpmb0vYFIgCoS@ zoWtp`&f#=;=WsgAb2uIDIhgCWh0M&)!%gB|f$;t|s@E@@{2PP7p|kmS6WwkX1j3jpyiQnq^`77;JmFU z$9C=E{aA5%9J|TV2nvN|?0RMnT;@C!m0TrVo!O~F_)S0?v(FxgDBR$npT17!nX9&< zM#6EaC|NpChp+JQnx-I}`uM|0majW>)ybC=R<^0+q6mL8+vTPUcL+@L2;!Qpk&}EV zek?uBY0qS-;0eXa2ardBh`sW2P7On`-7tCL^8o8nz%nS7YJzR}sZd;0X0>8*t-Fxv zfTjj`yfalFhJ)nP4o*|d>%z{lS@r)2DPof+^|!9$g(8r!7gVEom${N`spGFrI&6eS zfD3~HAx3o3YLL5Ip6+B5*i&Z#6BzGA$X}TyM;1~Iw2Oex1HVz%v?ux+$QQ#59G_?p8N1qY>NLmv}N0ieDfBy1_i?Lh(}&fc*_dxHns- z=1olm>FU;9Bu=f4!>IfPq2AO`^$dcDg&_C^q1I`rMg}D-3Cv#*>Lm@el0nHz0`nJy zx?4jvF(_F{VE%$ozZa;fib2WBCW7E+IN#1==yv6FnQ!ZaAbkOXG@q2TxUdf7F-h`C z`j`yGsd&HYWl~WgtDub0@>8!>fFiSZ4i6mF0v+M`+aBq<2N7M; zVR=;upc69B(LIj}=sqgptiw>Ew%)YZynag)0;ChOik)?Gdp0b_tj* z1ngz?rw>W_*L4KT)le}=RKb`)K`&~XfN8FxbIB#nKAc9tR$|wQh8U=;mhN{p|1kMB{XIAV5lQa~8 zGeKRhaZS-s1kMEYOAR$cLlHO=)S()xT0;>y6I7T`n68}Qge}X{B0G#JFBIPe!IUTB zysu0#w7Kl3%_V)rzG9_Ef~cxCVmCUD#!WOv(ileL8X7}r45G2+OK!ndsIOqFb@I`m zRvXV%u(6Ng8b`fl{9ZLMOBseO_5uyVy?~l6eyh}@utl%3I({>t^e$hDqxVbAAK^Um zqFhvix_y%*!F9A6rcrUNt$wTB;y(~>ZWH@?!h6B3deS(JMt2x2#_?jWQa`0<*nH%} zUNi@(k@Iyh5>&e|IGe_n^)Om!Y=FTNWS_xi!Tx2w-xC|>x9M*xqt5s((tcf$V@W_Q z{7Pr_VFajZ12seJ1~rAA2^te9tETFV-TOoILiY>~WS%@Q`88fST%B}cuqMrt>o2qQqQ z9$d$AuU4m{m;QhZb^;EOrn*}pBwwc&tWKs<85-FSeo}x0^5pkN&Yesij!ZgxDCmJn zO%tunTpprAr0*(erG+fG`iIBesNDkd65L+?0vQWXZqL;vPwLH*`pJ3Z?l3y-!U&OD z^O!~g^T5MARtm0pTnY8enNx8H=>DgUs{6%VK`TVhy>J!moEZS;BEJGiMc+L*0-Sir zVt_NiUf@wV_>#SV?b}B6lh0Y`n+wXpWeF1IT$$#36@&>e6wLjmAjCf<&fX!@9^C0_M$xq9FR`+Svs@=ObVlBuN{Hoxa+Ju-m zR9cIY0K*&HtGF|2wdDO9N^-EyW&R0-dARbQ*yM_YukaOYa#74q0IjQJuE3leTgx%> z>!4KbKB7y6zYVWibV70ilDn^^k_hB?K0eu;7sfH9$k|hI$^zuK=l@VlzCem~iWTn_k zAr*E=fSDskhS9kdJNIsjNVL?VPY~@f;z4#w*5czn#7&(pPwDS+?qiT(dhcV@pq9Xg z{oS1)0@jo4pD;a!O zyh3=f?&S=HqPUkcc1ObA%NehtExngB)~sf&NscmI&fq={BRJT<;G_G~-Yl2CpMldp z=6(ihG*op5M~%}(?J!EgYa5VX)W+X*IEz+{lk*e#v9xwWH%T;wOkyi%kdi*D>37mR^l{y9a6aa8E zvJ84;^Zdu_9+#*y*{fp~&g9Dtko*X5fE`SMHH_WUE{M^^Nxm2w{S06-ru7=~qAw zM1Z*h!XX3Zvkap+`@ZKCBZw8pmxowQ$#~zR2|(e27h?s;?mdaTuerJ zMGdbcfj)p()MycFwM-c~=S~j8a~cn-B=ULJBz1yal3ww0bP(EOy~~ z(S<0<%82WVxvqZVa<6~{kA~;t0)!0gG7Doh!~Kjkd3rF&>eeCW6s7x?3(pxSSQ;Pi zdhnct4{D-;%6Fj9HtxB@z)I*`z?NVThPeV_x?KRv10z0+^8O5 zDaM#H9sxyvT`-O*mOExuOCrIX^DakP8y|o=h*simD+D>War7qi=)>!{Ox}LBTJk$R zi-Qldw+LKtY|*XGEX{IDodH`3v1&AdOP&#HGGiH*oLgU1WEHa~)&g(Lo*gD$YqF$b zeH-KS#;YPYYA+%VEaxW@giYk^4cvXfuYt*+1sD+$EEN9*{JHH(EGAl$zN}L2L5BdZ za}H_=0{j^6)LQd+R)FggPH&YTK~nlU_Pjjvf%A3>Rx?-MMsK5Ah(|==DwhS^4cA}k zhaHl2M=Dw5e?pu2{~aQ@`Ja%={}A{)xyEvaOPjYaWx9zUitt!s0#h+7lkAev{K?#8 z7ru+N3)TS}AR3L@k=)*)`G?^>f~fU(SvFLD+!;Qy%;{bDuW`Hf=I(D1U=cr(>nw9h z3SSMy_h!SGUW>=8QAxO90YxON8Qe20H?uuX2j0VkBi07rO0J(&hUrVwnbk<;x`7ev zg4RrjOm7zGnKEk8v-I@}Ubb-VR`}$6K(R7nUtAHhXH^ifwbER;Sd0B-T^{>p>Y@dQ zkJEEfy_N@TQzHj=Eaq{vfh4eWiFq}&iso?w3&m{_Gs`@Ktt%%LZboV8X@{K-PEEt% ztX$oShj{JCc~(7gVlC-bODRx>YZt|6MO~k|g8hZWWKLCNK#?w$uBNnEHx63vmyzSS zpTWf0gbZWx>@Q~;;T({PEoW4D7<*DJ>Zv$1KN)G+?9vG>I53M}r>8=X97l}x24r18 zWQa%AuQ;@P_vfL92TWJF$duuJ#n%XnBo34#sQXLqWrOW@@@(l6`dCxqG%V}Dwuspk zO-S3QH>-2VxvT^nxqZEs+rP<1v(Lo>xISvV$_gpL;5I-~jIccj`_!6+k&M5}Xb%;X1y=z45V>BTY)9xj66X{mN8C3e7K({fZX#m?*7i zQwLjXv3c9V0{(9*(RClI!)-HuCa4PU@RFThc07AjxrhX)##V9Gz@Oe zsC4F=F&uJ~8F6mB-)gvelaUb|23Bz7YIE~;3f_Qio>=!e!PYus7j zWA+@-CkuQsZ>~g!?M%?+kR7Vgrl0u0jwEVu$l1;9ziIMz%cNKWe_6JeMGSI2p;)F) zX&a)@!+}khw(P1xHIRkUC1UAV85RQPv0?Qpi|?D{`*L-xAb_F^vn-R#AZpROtbASs zIUg56I9GDJQV;g0LFB{1SKHywk_o{HlXOE#9inkz5p4l_Lu#;FAdLx-3NiCK$jrJ9HH4lByz<1S~a1eYNli>2wk9q2*^;~)$K&V46I zx8@E|_r4*4yB$w%HkAlfE}Ir%RM^ zI7$lM&zM#Sq+~c=`{p;>ny(TmsnAtr_zKpF=AY{Yk}Q+G zRWlD=4&9664HUq`#{N0#3>~bDOdZ@Nk^cD+$x~vQo}xXlj}VEjl`gWr~m2f)g#SM`$bBhQlWJ+tKNpF$s+UyQSaoV-56n*s^S z*yKo(8H%P3%yJZ3Su)>kI7|xN#8Kj{i#_0->0Sf*d>9E*cf8jiq{Gj(`xZ6h?gpDD zLWGOSls6h$-vV=@BaJU;6um(r)S#BUt+^!6!A55${ZOwhPq~+9HwT{@$%u12fe+CQ z7R!CGsgUYC000SxyP9OeinSl}tVRgjYIQBc3|zT(vnkbi65gVYV`hM_KxazA3K6DG zA}|7WH@WTzTOB5`wyAe8ipW8K9X7B_tPT>PMSVj|$z7Z~1`BQ#Zqh7dDp^6^0hNdB z&~1YW&TWI=Ko^@@>M7iGCV?8A&04xmKJb1$@}kDd$12sG8AI-m_hfJo!YEZPLM%=L zdwari7&!|D0}6@6T)JTWsJ;?oojP0x)U$%$VyhY!z>3$zEse22`%5Ft)^4eu2jM32 zd+Dcdl`kewNgX5E&|_Vn45$Hq%)GRkZ-Bx%b*08b8mBH6JnK|{?I&qYWIdcI0g(SQ z07bu^p1(6?y#lt=P#1dKW!f!RO0-)_*PU+GpV)XGMXIJnZ3S|*vXMmrzY;d(8My+~ zpqeBHEovomVCwG)z~FTE8mvN=HqR1T5}aNz%+jd$R1GQHt;A>&afn2OfIt3sA~!4# zyjPC(E2?#f-srT-2z547bwU(5q0L~8V%QrWwRt%t>c5g=w5Vn&cRCYgwQ3;PF;dD^ z>O}$5S8)zto8gc>R_=8`uh&Ad)obQ#ni3zF_u5nXvf7Z(ymnIYv>Ao$?uwvz_u8G8(|s`o8j+f7SmF$!C5Fs-wET3gck9hP=%2pHzEs zFu?wlp4sN(If6X{gYucrIW+*{V?9N1N2FF7zbOPvzyPt7fW-*V@|kH&pbh!VRb#d% zpE;E{k?4t%iPh>#;H7-#aP1F@CBn0Sd}aw6{`cfF-z{VJPswKvL)z|Q z#a(^`xduDsGctv^yW^emnVs^PP<*F+W~Y2+r+j9odaGotJQ817d#{3E$VjUkgS|4+D6hc17T_^fe~;G0?2+D%%a>P zvG9I9QX+@`1f8N_{EbAw`yaGZ@|lCL`uE6Zj?x*F!nM$i8VxZ|pUFp3J%Ev|K9G-9 zY8pP6dz-c?))ItKK6AHZ9{RgHVOKD6ISh(WW)O*yFR*^pqY`7C8m9w9f^m|L-AI|q zsn19RspNyTzvMHd-BLXr4f1cLH;4{jEuXnffZCPMBpJtKXaU^3oH)?|x{N~>m)&s;~a+W^M1u}6A( z#SJXszf(SQ{Gc6?&oo@}-S>!W^ zBS6b%HZg%VLP>uH>`t0 zC55G`J4&MZ$;T?C@bRPMGoed2{>;w%Gdu6k?7Tm-^ZrbRi#*@^{>&${eyDur@rPusyi-2oEKhhW zojf34jr@n?Gq*g#%HJuU*(slKVq$WR(sO3C2_%TjVp-?~zobF~(?r z$>(+2E!8vFAitP?-upA32~fN8nV)MslA(hI&pNfQ_LDUGGLs!70Oy&dyCu~x=y|;4 ze6{+i&bj2|Adh=b?G`NA+AXEq#D+!duN52b%SfHIsD(&G%|Hrl{V9^t3RN#a4XRpl z(4uDOtVwpS1=Q@`1dlg~Ub?SDu<^T`a*z4g;! zyieE9?&A1Qs@F;QVLyUFqL!GC8woK22IVtP7|)0F6d_`m8OE<40ndhk8A<}iAwbJ# z&SwH`$Y+j@Y)?MZf{Zd->tJ(2`#JD(4qv7H+mO$UM}vGn`Ah>VaOH`FLke4?_ z@tLFBOshN-ttL?XGz+`Y|1?T~)69tL9< zT(HHA>A%r_L?-=a?@et}K0|$IY5L8ULtht0h+N&>o_@1e0qfOow&+hBcq#eJhe+F9 ztT+siH2r2Lwxi$dU}iCsezTft>>qYWzuCf|ezS|9oxvH8$XzZF>5Iiqxxbfwvs2Ra zo9WT#=r@~NO|=U4)GUnB9h1+rru3WXSL#JkS*v@Pw^8()$)oZy`|&XGTJp$vCgo=C z8}-huYF8V`NUw4;&lC0MQ@L4Ufn?iy*HIHjH5@Z>-~D?lH=Br#nz~T!7NU;Ym-J)k z?!}>3L?hGp&~L`a$qh!RL90bYb6AAp6X5S;Jd=L29thu#ezSc5vZMOVKKxJUHyefw zU}Dr)=(nri>^=`PF3KDIW_@QtImYGCHZr>cbo)_g7r&2wvm>^n-;6rU+R|^v*UcR*L|VU@UklL@(j^QrDs=Rl zRW_k8AQU`Xq=yJJ3W;XDfXa3%_@nkJXnl`bZ;^6o{bm*$t(T()@Fmf2c0oYD86N#T zU34UA)e4#c@n6+%wjTPV9F1fBqQ#DoqS}fxPRhq!_uS&ug0_egqpt(?7cFS(BgsCK zIKe<0TH9LC+;*uK`PIq8EE8dN3@`I_i91=HMOA0pdAv26sHuQQ+7cA z+1Jny{y(DsY?w#?S#qcTGjtMnz5L_!pKf+)LfM3U}IsL1p=d9o0M`d(QXbqHIfnMcmf}y8!VXnV2l1U004R(15BRft;} zV}bUUMwqSLQaumCP2~5|&nv$`N=6)`xhuvK)`r z)Etk;!?XCX*8)VsQ@q7d>mLz&&(XiYkVA3~JFQns8hD!;Zy!gjeog-8rACgEoI{Ek zvxX}5QxqUt^x??Uqb3PN#_IwOTI9C!rE>Ew$fZjcmm^dyVarj&U+|?^pCZX2kxe!m9wT<|4Q8+L1X?`w}Y3=VOYOE>&BZ ziG1`3hyUfYA75*`5^sz(^^f2+cq9;bec?rDkB-^H^CN@%14=l@?$k72v`m-gX{VqD&cab6FbcXsX)F03La5M608AJ1z4kOV15=Y`XCg) z9|*i80c0TJc^+uU6y&V=jBpl*^y_@@5HItEMOXFn2O%nE6`+kE&#FI?;v{pdPV)J_ zY&E<49#~GO6=;HJXLM#Lte-o(dr-sWRE1KAE%d=}AK@hP+tIUTx$;kl4$ zY2tmP76=HG-F?k*Xf*qL(5hh|P2KO4W}3m*@bGe{Ch5aur~u;(;7E!R&(u?6q~H3f zF?rNRo*L6GKAfXnd^ktD_;3ztVPmp_59i=TGQ6iD3+)QS;rOFZ`g>&ipUm{#= zDO=gGBT@A=wLlcq6WuKZg-7BELCE?HkR5!Ggdk*N2FSO_cT1KsD61^KIaKv0GU{ru zTjOtBtZ04cRtsKeeJI5v(^^fq5sjP>cf=lE8?nBOSo3Pw9$>q9awz^Nz@mxsb7R&Q431d+E1Ye&U0R(&oQg44VvNLaUyg~rI(qabCqmJN z5m1ib0IlY#>nIN8CD^CLvYUO>whyD*kjY`8fa1sJNZCBQqvHV}uZ|JMduuX`fHJ6Z z@7)cri>!?m)#D*Kh<{KA!Z%3FV?UR2T5&c7$0Nt}S@s!EvH|h@T9ILFgy!GI#R$GI z`BuKzvG5hmv)Uz>4=b3ryaz-2uvJ(*f;pDhC2IFWw*Aw?YC_oiRYVg|jh&dsgd=vR zM*aF5Li%CG_QzN#I5DyklFf)Um-#o97*t3%1He0w@f!i=%lI&fm!93BLC=F`phGV4_3@h+Cp0?` z;_BQ;QQgQ@);qz{k?$1CcHVgi&CmR>Y6+lHh3g!OcZWyR9*x{!0p(GI-i;~x3O71T z7|NzHJ!QHJ;GxnyT&X&p(K$46cbUt2oZR&g_|`@ScbXW?DM39|A>OJDHp;Q6 z6^=9XPY%v#<+5xa+T~zmku{ndtxV>|jz&pZJ4B0?g>IdS%_t&iV&@srx0`%Wbu_VR za^?XTB>G#EzD`521IU+&|OlX6&aS3CmdSED7qV`eFGUEcZZ> zeyvvf-QX+t@?n|E{Q_qS+}2>X$~-noS#lV~6dP`vZa4(Q-dS=p88cojTo&?R^dDY$9X1FOi_0 zs^Yu!_8to6H_KfB0?dBSm!7klAN!h*jcx8$+iY(Jb?%JOuJ%FKCpYMu;as?{RvTce zJ<%_~3StjX|AYs*cJDIaZ5p$M>?|a~Q%fZ@soqD8Ix%L8-wO4pvHuL4sHYp)*&f(R z12YD;xD3BC)BzpydxqcI%@E3`piR;VQG z#oPj^Cy7SC0x=79U^Z9H3&~u!UTWS8drdz-&Dk% z`iB!p1Qk5-&g-S7HEqEg(26DkniP;3ohTMBXw{;4!PZu+pr}}bnbDjck7ALERWxd~ zSZ}llA_xS~Ow=los^FDWtLzag2nvC!`Tu@v?Q`bLWCFh5_rBlreb4jdc`|38z1Lp* zzV_N{t-VfJ+&{6rCVfjFNkc{OMp+uqSUhgyRm5p{MX;AmK`=OyAsC#=5R^li!tzu` zEQl@71}n0`z6|2nR#W$!i1IK&Qe!wfQhTux?!2*hF*pB$gDqh&i)4rGx2S^sD)lZ% z13s=@WFA9bqd^cQz5rCrO&6Orb&J_NFyD^>&>RDNkrM0Mq{Jj~_)DtMO9@X(2|jES z9)QjTNPsKh4r_B#$0MfJzpb2^pnOM{)-AO~9VkgRw+S1|BN_DHk@MCU$P&V?<%>Nd zxWnq4R5KCswB!w|%g_0sq@CQBU*JuQ_fBrJh!XC}jZW%f0rq$th#fwxu@;}B1M624 z_S{gUqf_O$Ai{-qoD=KhSQozHr1x3)c{=+IYWrOte2}n+sPoR7AF$E>T=MmRC>Mx& z0h9xwGW-^QUe*VZ6&%@%s1);UiaCe1;PZ%_-jmL>-;$S^hi|jXq~((n4NMzX#3fGO zWf8^=XK2oXrJVf67t*E^Nz8{gX8wVlkDo=}bRYy5dM}3vIkoc(hg;KgZjqgWcgft_6!T8(8F#lt&NY)J$GrVwRjm_$ zieoS2_BhXef-N}0lTWI_hsemMo7tCc@30srRlyi24s20W2giY`AH**0-NCZ|=IKzp zXkNXX-Lcg)%X6Jsf+yvL^X>bScowF25qZ=rHK*6Q&y(Jbm#M0(zMGniK7u^zH~W^8 zPH>y>aCn=+ZA(5xjuS_4F7O9U!RT6%)ZH>CyAVm$A$R)!zDNorPeoFcL0ZX#3ed3j z!lm~%s#s7e@vPKcL^N?8n$mKp$5>)b{h^Kz96ixi*+Z(9ImvAxmZEP#i<{7dgg+!j za11NN6Edt_9fZ%7BgqW*Uu+!CdXb~=AW2dYh+k3%36vD*9b=F4kV!Ph{K*hT}pS>*|Y0i#EJST50XP?CoqZi^M{~1D!75!5$AHP zyR2D86QTzgsD*Ug+D%}-EEW8+qtL6->KBH{!-zk~sU)lqf}bS9;2Vt0fNSKMQjS-|b=O z03b4FAVNct0i0{c!nbk&O^enNPHp^KT6{_v)t1Wrp_$;+2@1$ zNSCsM65fD(MgErkNrZO6mLJRC^7~50QL)(IeKWh4lQh&U#wdUqz0?~hq50>6%;OUt z0KS+fN#@66IZKvf*>jeGLeNeuhc>=MXun=&aay22GJw;5z&FBI-A>4~KoVp`>fcVo z5#(jK*&K>M+@DYNTYMbOh*YJ`ma-zMwEp{%(k&`SRpQ=lqQzU*25ti86{#iwbMuT9 zFUwNNE&CP~$*xNkuj|vu2>cnXq_B@J;9I($*jOL4^?WWeNsV~|%iZ&HR1BLbfwOpx z^s8izxv%OszIhargHO9X=sBrV2<8^Y{hRBgpHp*KWRxHtO$eOTPzS64wn3^}+kFXh z#M!dNtgajml@?+Y)e{Xa{{>YaB2^bi*7-dUcGQ{8&DQxlT4yPWBfddC?HW{hp*@;B z#mR)P9sBi-Kt5t%6S`Kq4YdwFc#Fi@NJI*&7HJQ9leyTNPU=@WF(z=*>RA(?>rP8f z0wA{x0Pt$NaG^o=>)<=y0{~Ldj&)^XIAF( zAopu}In0Us#Qi@QsZ=hTl2nks00CI;^^XZmvzO4)Bl{vZsu@`V|JjG>+I8+C4g*Y` z!#>0%z4n@=?c7ClyyTPU7%w^FYf+cDf8$s~aHHcIk$wCH$SpG-^X76Woz#O;;9`T3 zCfgG8JQnKolabD0_>;`7^C;#ms*~JPB=-#7&jC}fwV2DK95(8FLivk&f-)t%MM6qk zPanBQm>K{kTmh_whUv7pe|Hk;Tc;pem( zxRO(~+)4fpYG+jAB(28jQjL*4{ZXjKSv~99QH_4AhF!d3Y;K2AXS7~?WK?)wt1%c)AHD#b3b>fyB{jxVIi^7YD3uR;%8$NtL3zfBL%@UGPjn?D= zDdZj*xzsxoPsO1J!;{-D^}GZf_6trT+An_rr-^>~QK<~it$6c>v0L2H+AHVCZPR(} zoJdvk$co_Gm#9$Oe@=`F2R6n1TSZnGH3VYVZmqz+u07UoTm7;zSWlZ{j|I(~`aId3 zh-eCO7pNF2Any+Cqs%*RvvcLrX-?lS{5E~B-BKret>0OdeT(3L!o5J+1W99_++I9S z;z6cIfr{kX9E?O#2cqknVCFWr4kM{lT&S(P3B-VhhPZ#*8lf$eScJQJK;fI~x{)EV zH_c-1OI)s)Fx;SmC9d14lUgg&LblCXZ%KeOG$&|5a_!+uT2WXyV?W?v`t|ZT%`m?>V#3qz+_W8h54A5Zogte& z$yyF6I!0RKOGVTGB8U9$uF0~GyHcM-F-9grpid!$EA_yHVTZ9n_81fP%0Yr>hEQM( z2nnL+aa;(@3fa9r%&->Ci+g{N?)7Bh@~AF^G_dr6gt&gJ#02F9vTG-Gr({?53RXg} zLjhpC-xgc1SZL3)$&4LMmTws<&rV1bf1WcSC&+*dtHWOIdNP0!{=l`{&Gij-0@-N` zQAHgGC7b|XaJBoxrXit$2^i4Xoraz;-V7mYI2vM`OAni7fl0W6p1Q=H!~M*b7=Y@ERDlw9v2xC&AgUd>cOa ziV`RJE}3Wu|8}0|a?R5d;wR11Yv*|eXr5k9av~ReeY#9UhR<}vi_*;A8I zU#`A98|jstChj8kY0y7aE1l#Mz?|?OYQh>C_~OFT(Jhpd`GZP#I|!k5?3PPh=@ql- zEEO}nja3!MTfWmvB7;7ihKKGS%Lx2U6%W>LAq0ces}zoVr)|HFzwk4SxapOqGg>veC*jKO!>)X3I$-Aj}z|ABlDc3iJPn^fNy!pDnp!Op3 z)vi~SuHZ~4lv7W)Va?}}H9%F&z8`mVz#=VK%a!AoZgK2QklTin3V?!f)4;(I$|)#6 zxIJ(LanUC1n8idz>2+o`Qc$?cF9V~?M|(hCUEQ-dTeNJDT08Sd8}y3K7~OYr7@kee zo)YV%El={13432ypbcB8HiY{r*x(X40ABv3$Qdg zHag3CWN1fu$uu<@?c!X~!U}0x16OshxrhDr9|6GbCPsxmY@zcG&Ah=K^VF!Y_#`rR zq0Ayg@)t!?7g#2C;x~>|B-3b__*`6&aEaz{KW7V)MZ&0&!Q2Y6io%2)jM6c^%{^Dx zR-pS8&}CjGY^;b`CFoG;Q0+XiURmt!GV;PXq5g%sFH z9U|416B`n7iYRbO*+5beOgKm^a?cXCNh;40AXT|%iC-cKRMS=q^^sMG>VZ7wA{nAZ z6p^XOSqrRGnOTpeJ#bZK8S?c$FdDL2)qlufq_ zzJ&APO37PryO1@c+r?+tDzR^7=kpbxb3TKk>y;k1lWkL~U&LbE_tL{;v&%Jlc^Bj+ z=Hc2*XI<@cOmf$Dj4BK4TAWm42;OOfJ6c)`@IIN<8?hd<^?h`MtUtRR_s{Gl? zr+m|q*+bh$Pmz83em8R4q3vowR-hWg2lsc9E3uV=*@A`h4hL|20Du3tgQu#N_)~|# z+%NMBWRm?>HhWWF`wbTy7iMM@Tq&?2S0FC)lK557>&<(dOiro8Ji=8ByN37iuKS?* z2cpdTF{Y=@$0POmzJ0!lr+H0YI?SUM$py@DFJ7RR;bvtUrQ$sU& z+krTZ?jjl*?9of)!XbX*?$aWWfDKDhq!kjN_Cf;XL4f)OiK#{aggm&EJQBla*gmr> zerUu|94Y-v(7vTqu9%bAbk#4lqtaLjd}J=d29I{Mu^lZ!C^b9@-_eOT(=nkboi zXjt2?fYP4+8Wv=SYFPSfHHQOMO)GU+B(6KsIn@_shsr!9aI4KTKhYLEg{S$WjeJld z)8=>bo;Ekx=XH3Rc{(Ce^+yplf5H#Z-^-V~G*!y=G%O9fkzw@nHS%0*uE5j$!oFO{ zmm^t&F$fXYD8`&@Uxx8z4<>ILp|u^zm?QY}Lkq+CV-Bz{d-0{WeJSNjk$q{G{vD2( zB{e{0Ik68IJ4^gPdItEZc37?ChcF-FdP8&&*R|V7Ll-; z%k5wf=d3$Hrr=t0tUOnn{){}#Mt;vm9>B6W%2*`a&C=5TUIjaVj z+IcDZVX|IwDSGOs3#4C4+dex4jKsr~S*bnlkFn8!x!+jItC3SG#@4zY$I~ShVwLOi zMp|Hx`&sw6|8zwi(8Li>)~80Dpz(erdk?QApwntndvnihP*l`V7jXy2pOf%*heSm0;&e|`t-(Hw^xCQxn)%=o z)`)i8Wctmn@v0S0{ZJlOat7E*Etk^IZvx|yaM$K2aPHTn#+Z9zlhq66gG^*ymg0oO zzZ(Gv;8z2i(|*lZ-_Wm?BaO}`}E zS`|jyZA}!vpVK3>_l}sbO6kW_I6LGH=R3iC>h~(x5-0gXlmKhU7f|7%OfxtJE2qH0&V3cLMJG+PswM`i zjxLatoa&Mg_mvuBIJ24UgkbDsV$hz9 zZYDBslVxf@UJKfYr#YOp@P|IV^}r&tfdhn^tQLT^W+k4iSr68zn^bSDS!fggh9~G@ z+}#w?!?^oO!d)g*k)3C$qG9#D(w8#Oce^u+IKSM7%kfa+D3_@G(!Gm$S>d0UROR`! zaVB7CzFF(vThRz)N*n!QeQVNFDee&B=q|?_X(NqzxJTfX^pUR!W=(jek_YCMgHVF@ zBa$`T9v&O>hV`|s55vUXq(it+4J85&Ln^z?ZevdB7edOQee_26sijpVtTRHhj1fqL zxBvurMo($CUnY-PedLj9$CNPJ*rA?Ac><7ZNQuLyG?1Szn`TOjNZ7zox{ZW#S zgCN66u8>KXdlY{N=?OQ(Z9A%%l6w^H2-N1deEM*;%&G&V{P3lXh7q*9+@ z^viNCXpor2(T;@sOBjKoJ)|eE3C>(W*9!ow^{<~(>tEHV!!7Apvm9ru+q7OZ15Kxx zn!Fijk7LF_q2bJ*&tV!~*?i-!)yXZm{fm;~{^)7AObn+NU=0F88g=)}K^FX>$!4%S z`X#UB%dMzwPq@(FMtLwN^(l%dH1Ld%xyNNr$Gjsay$h)hjwtUBCV@zs`N#1+Z9Wr% zkzy;CHcFm(99PpURLYlG?#+;)wp3~_98h>{O@0!gQaXSToVi*j4Db7eBHufxf|(TlhU(!J>7hb&6mNPxx!?1$GkCf=Umb|} z&G!BkJQ&E`EBaPfeh=ykUcbT&t@R%%2d=D>buPnnJ;|B)!>A7^S-PHtj5IAR*XA@7 z*deCt11rRIePDwKvoqx2H3Vy7QP~Qon@vvYKuB`nnR5hY?zLbw#E3iYCo3jk03%ex z+De7etdYnT_pYS$ZzdW%a22f~#!4Q1TXgG|W*I-22em{<3F!W806VE|;DqBI9Ig&& z%_j|Skx2$6;Uc>kD>8oy^Qu5C>c@p2^Wd*G15#c8Pr0!WF0oHY`STpr9)^r)Xf1{W zerVlQ4Spl)fsU*PxoBy93=^USm5W?xtlMtX~)STOD4xUHz?|!Cc-9)`jkDHA6cI z$`%yBk+MkIEneJ=fIEVXvchw>fKovv4e8|5b}|s@TwBfYL^V;p9aQU4K(RhypNp0F z#6uv8$eS!l1Jq_hrL5q*w3NrfWFk$hfo)1J+#Tc6bW#hD5G)7XEAot)r#{aW9oa4} zna1-P?A>h{*1$dPpI%WHcUQ{^T&XU2m&+^aYW>G6bcct3l1z793&r7RrBH#Tu*V=D z#9^#;6BTXXK{>U|gFhx4pmWocWKh`18gf!6BReB2Gl9UfoL+c)_cbMQ?WXb;bn#WFao>A!fr*~p5k#*b!s7%}D6tQBV zU0%^>CnzW=lk_cM3Lw2@UNqNWN|PkjEIBXgh`a4zFk=f`GS6z7KAg zo5T-~(cPruXx%in+UEv5DF<6;pN9-$dfKG>>(;T-!lm&vA0Ux?;z9y@LJrKUh%+zp z=XuGx(mZFOo|g9x^SHb(W<8g33oz@Cm1)2TkivvBVq@m75KnW@9Q7ghOcC|!H0F+( zJB4Am(+(`n-HI|tN#i=qZk$)x*M1@;tv1KuX}a7CGOz$<;_Ju?5Z#)B9t z*Z(-?DDIh-Lt_D#MPqCG!CJrcmL+JQ`#C29i(EPmOXsk{^A+_$MP4W7#9Oss)=oqkn)%o<|4nV^EoZr1iTJp8JTezg z8bwKn3j^9b&w8?xzD&d*+-ScuA1owb=;K;udUfSSC4uyE*RVhEt?$i#t4;mONzrFP z+#lJNztuHK{Fy!D>5*;dKh$Dm6J0KW2@H=Gx3kdVw6GK`8ysh$x=jFBa}Qv+v5iAI zJiqaG6AkSkM#7Y3IXtQ4T~hw7q+f%B{_0qLbFt}lB<4Yt45X9dgpKQPu$mYS83P1) z6&H^PDYgDn2CjlC0VI#xBE%76WKidke=w)X!jUipGMbX+9V1VmbH;||0Ksj(xO=>l z+JXpN`v#wW7P>0;qV;m7xo3jW`T#en&1C~Qg*WEl`!hkj<1@d;vf~Q>ascN1008p9 z-XhCgDzbP3EudEG}UjJmhem zmcsFUIFxHf6VYu_gS5HXK95HvRWVoNozJ6K0G^lNUF90zay<@x``fI?tqcGE^+-DM z|KjyHr5|R%|IT_`##s!f3tXxHo9po{uBM%=$BO}D8cBOF+bfUYGqN5>9@gc0>;+hJ zsbox>j>D1hzrP-P9t9jLkk@=;5pA*0ClSeY_%Yu3ypILo`6k}&dgSIPs)8E#D>%HI z#qJarxX;&NU!3>mA=U-%gCH!}<-XWRb*}in#kYIv7)eR6^>>Nw&mQ5^vb%5-OxK0S zL~)jg9dL6^-B-I#OkDjVJa=LL7pM=+SmGWhC%&#VrzAqdK?M66y0+a9^sa z|Fi_Tik(|Ngjh6gQ_eC=;;ftX?WKQP3EfuHKF~DwJ+uj|y z;jj1qq(9=h{W0}Zn%tsG9(*I{64`tF(tkprS+GmyplEUg->OTh>%Q65Nxp|Nt{IGQ z$)riR$Gq7&Vq>EC11g-bzk!#Q!;%=@R#$G*)8Kk!sc!hu`s+j~o&CZ@Z)(_VCfc&LSO8m-7@h$P<*JWS&n4D?uw=t;L8duZMs{-TZ zq@Dz(=-5qto*fn$ygIz%iacv|`X$b9A>ZD%>=Upfv?czl47a;YSoGrR%Fm~iLdG=d zfJ@x{*FQit-Eb@+vjJBTxQQLhmmgs+yE9HlAIqS-^Ug&OO_811y#9mj9iF7Z1C?&p zsU}ZgWIr5^y^Pc1-ZfoJi_Bo4%RB=?QDk~#{gGP#u);>Fi&!|UD82a~!QA+Se2HIr z!#CWa-VLY6Rb#natc%!SeiXXu`T6I1wUZ1>3090VzjWZ%@*>>5x){MnwlL@thM|_6oWltX=P7FF zGC{g$>T+}hS?UYW;p|MlQwTBT9hkVf(${ml>Him zX&O{4AVV9$f0_Q@%b87bKEaZCakFzy3+OP&T^VfPN?SS(EP}CEEzI7|UY7kI!!Y~` zGx3}z^B{;EparV;!-(DNj48bmPoje@9kZU(v?4NlSssd8<@8^Eb&nOg-Zuj-3bWZc ze>rvdq8owI9!Qs&{SW23dpqX7MIr#A)P!}Km12<&;AF9T-L}$+hb$!P8}rUr7HA)1Z`N%qafUBn0sBXY`oiQ>moKct@B%Qg96Ic^<~taHoC>2+=PpAXmxK7% zAt&C?#`~D?{S2pXtL-}HtlpR6bDC+ZB|G@dKHp8pL*f?!3;2Rh9q;AXl z-Owcz9&K?@}q8e;wnWgsL~Lak?+eV_S+*Fzl)Q61Mj$Z$%^HlKz|QZF;BV%)UGb! zkHv;ALdJwEEeXeC93T^e$JIH>{ScyLPV_`w9ew))sHP4;MW_!8w#u;=6W%;_#X>Rz zxDD}N|30wDUZMaG_bh5$WafdAkfHPO3@%gr6>vD!!CJ>0!)O|+A^Y2i<_`Kjo8=@> zKU<9V7M?%6z>q&aN2By_Ga;_amih(ybXqv0>f65^f`M5CkI9^{Gjg*CZ6kCk zm@RoTGJ{oUn>~($S|wYFle!!6aqFQFL9RO}7iE74>S#5@5!TFnjTLG7l;dY_{Ez~y zXUr~)`R+mZIS4=dwhMDtPxwZ?8;+CsU};@OBC z?twF%QHRh7Zr5esBNnMQ(A*^vQ`~x(IG0((3nGQIYjX(dVdX31*##cKs7`asD{cBDuFYX^wf$l)He(r zfec$gW5dw+Fmx|MJgujv zHri0Q)h#T~_Gak>>Aw1E5Fm<;lmCUT=KPwoKL`C|XfWdbJ|*T#BYPYNEa5lO9Tn~W4h0LB$aD)HZ zg8=Pce|Ph+nS!aSJ^EIG(`(7_Th=`6Zh)sI%bd$qH2&f{f^l}>j4Z!?r)&Bgr1Q+( zss8;neF4(v6iWZzA#7ji6!uAa7|rO=fX>(9w-Ak2dIrJ7b$zOumG!jFpw3~3H3_69 zyV}BfLldHQE~O~s?;?=iVrhK$$6g(7evsAm+4I)BuQz=fc` zloC68Iil0FHjJD(05m+(HL}vY(s#8(xt66*aO;}o;9im)f$Z`W@j(yz&hi(fAt|0?W{Y;dV@a7T?ip0S=7YZ^ULWxcjXAdU3k_VqZ znY3#y+%u)tvJC&BYH%+3)XGQhg!>qi;`LuckvDmNqVvWg!n#=u7u^=t2gYSugg``B z+RKu>h}8wE;z_3oiz8oihHgCIEUbad8>}@|YgFs)C#R#%YQ3=lYS=&K3C{JqO_Rom zL!ul57$3vz1d3;ASX^JV?kHjcL@`r_aGdjqRfT6X+G894S`F z(2l=stBZrvzc+)tQPj#H^((2?Jc;*DDGR>n<-&a2ugnKd5SX8OOP@$X5llebd00Fk zAM8kirB7rB76tw8*RNr}xv2>P9Uc?{e^M7=*=!M*Zcgf_xFv`NO+$kc>bnIQ$1)>4 ztm!gtvHOH2BALCr6%|d(-vCDLi^b_ZR408t=(e?b7CsYmha$`;d1cfe^o)%@L8DXa z$Xfc|04@T5dO05ug<)u=kK7gzf-*!>0m2cnlM+)5UMD7SaNO5w-8OAE4mYi!U}F)T zt|vy+QYMv61~q}P1mf9M*#U!72?G)(*7QISWSKbWH8(S4cp?w{g>SPU|UZ+bY-e@R4qIlc@ypyQ@PLf4*4b+RRpMW)mK{ zu1s&C7Yf&QVNq|k~E!7huc1xGgVL+b;O+$Uyl`UGE?4Vy#~D@jlg}E zUJy*d&xm|gQx005U&pHBV?a65gFtHVg}8T05hh;$d@-)Y!-boW5w@CB8B>8n!&YZo z9M-Q1`N0^lezjE}IX68=e13ha^i!}W6o);tSdzUmDZyhwoqA>z3JcNSt<{qXHhRW9C?D8{)6-@|xEy64RIIL003sNJ`BG^e-#i$R`Jckv|Zr3Ogz zBCPv`f_g?J<2`@2B!AL9;a$)(Y{xq9&K-fS1gJA#+AVT3UY6@- z*@UhxUpFF8O?r3*4tQElR?Jpd$~VzwkUfdTWaVCkGsKVqeZua=)Pr5u@QSZkSi&7k z>tY$mDD!r(z>5bJW+O9WffW8<{D<1G$&48Iq6n53-Xsld!;*dQP1!3s1>g)#By;9?EMFel z_rnvSX4ITBKU@tbqDG{%p3M3js(!3gR%nNriCK=^^caV=IOtli-zJGTVCIHnX77O$ zgm@phWQ6@BZRT&-?5V#3buL1Eu`O@p63*Y2+YpIuc@)!fw&iNX6xfyrAR1Qfz}StI zL0fgbWBbLn%pwq-8;@<7rR2tEw`CUHLy=^+WtJXL;pTc4+V!@4&e0I2T?FZ*=<~sJ zkf@@YR;@-i_3i=si3LsinQTG=8e0U~Hn2m7?^YyHSmh7;@O5YdICkoajAo>DyRnZq zm@#-R1hd7?e-2o!(x4d8;7eP8E}umoqvSzq;!PKb=xjY+lILP03vO@EHiA;|j&-Xvdny3%w`*%xo)=qY62U`THn2pS5MaF1}vd4$42)=c>6|Il7 zEqNs)yu}UZYjZz($jYq|C2I=$6^_d!)I|w*ca9A1QZtitH*Eje-xovxL=y^Jx7=h>>960rheS_~`*PT~A@RBVy^ebjI8MlY7!1w@;`I^y|8F^BxYissh@L33XIFAn z{aUa&VLf)lo`oKQe_+bmOJe-G>)8Ufn5kmj7O%pm)fY9fJZD5}thm*=X(s9s!&g;q zx}5>s|MI6dN`0WLK%Mkfhjm$+b$q#}Al73(-9XM|o}l7^Mfw!rUv4xM~hoIs3cK;V)@J3N^oZ;lR_AFGyy;AZU+_CTG_irG$POwiSCYpWq^|! zD#NFIthXz?X-duXtQRu06;d;URMsrMbfb3Mjp$6<2%HC4GxE_}u#isjdsV`4JwcyB zp0dB5Rvns3aKXhesO!uKF9&;wd0U6>htKgEx%W6AWJRS;!z-KA^x$2>`Xh_08;*s{ z3STbi@~Y%B3fZlBBpKlyg5C*G%L~W+Yf2hMf)SQObk=j@#=yN~@ZHq$g8rJG>5;+> z?>xf^nt{H;F>I*P&Pv!1aDkgb_!D98RDJCvpMr7}_l(x}3fBjw0>oambb|o{7aSyl zYU3V(VTb4Q+(+i(^E$5+8?$p|SUm7PHi_Q&M z@sYL-uS{#zVVNNJWviVT>6&D_lll{i(zelKsn3Q3_M${>KJp-jtH98HQFC{ zh4uh8t;V^cwg~Mg!-ulixKlT>L8)WVx>&u8W6{JhnD3P&Mo}Vs4Mkv<6zYIOhSbD` z@A|U-MXy75hvS(QuHAnBpKaJ*6vneKa(K$YD_2h~UY9=Z2IQCgPt0LkassN?LX~?d z3qrMR=GSm2m*va)0wb?+d~ELLULnU6#^u!GY@i@1c6IBGHvLXHbp89HuvL9k;_B&MN*J*7$jE5MGHKR~iey)tKt&UA4j zs&78uwynbqL0)sLAXq$sv4?*}WQ!>bHA}zV2J*MnY$ecM_~FYZ_W2+Z^}^K5?KVU< z&da{wK1cC(g&`50iWgv>$x<%1m}hCp4UD@(!z7jF;%`{kIx`DTGm}5l`Exw0T*aTG z@nbn+Ge$EC;)Jc;aE+5@R7MadfF@{OIpWQMQWM;*{eWBjpdl~g{NfLtgomlteB`># z)94nq{?kbQHiq39(wVPIw~0_MKy`M#Baw&A_`c>9__vU5UgytB z{=CGW7x>e}pGEwc&!4~I2eaQi%BQXKEDg)d;e0|oSv~UtdezivB6&75F_LM#gvE4X zi8YSU5AMt$`^6%Dfa+zI!$F<7R~sgMT)kBakj7sm!-nq%;)z0Mv*BgLi-z~a({$GY zrG;BDCjlfCS&y*!m=P%FJDOXnyqAV0LoVL8_` z!Is&dkiXD~A|{?s=(9CK=2mY&0J@Ti#f1I}gTevdM^Vaz>Z=*15$K>uxQy6#*9hs* z4tC^19o9NRzpCgZ@dGBl$3%bVF3p(Z59shnqRt;pbXOp8BKp{@WOj6W+CG=cbE~=Z zOU7?CKex}b__UPbdKAM3T%`@@%W7Q8Fb4V%6kyKLQVwM)d$5$aLe3=QPz_6e4AijH z?;`+mSP#}X>CD#k z2lDblCJyBfBhR+a%wGCF6F|%~$-ZAFa`+cAds?_Uq|-3KkJ=Pup0cTbuthy&b3d-o znFsJL_TMmGrn4^CHdj#nkp?T2 zKWJDJ1v(RONEy_KT^`m=ykdzM9EjnSYD4hRDcbhxho@q=ssz+oM_%f~ODvPbg;(*W zqD|?W>DXbDgy_vgL$qo_s3?82ICh}L(ap4K(zpad;4U|(Z>GhOSQ6cQx(%7pUvNal ztSUTYsua3(P9s{VN4_}}S9rE<3u0ht@{jfZ5jl=R1U~fr1*5W%S;GR@{WK8Tzy6M9 zIM63198<9McGj)`OyKGVuz3Iana#uQ0J!=P&pYxSp%@!sl*4mbr~38K^e*j!j?wfk z?Sl41dcr&M@AsomtZ5KF2H07;0pSOHz#v{fVc~WDLdSfOk7%;1O;dxPX~N<}^b2(n zNHBjC)4<>dR^ff}Zvvm9#Ufrm@Q~LzhbbbS*eDP1>nlb4lxoN?@k> zAA(D#qImPyEm*}q?b&k$JBL#v;f+AN`Z~5>kx)mUIZPi_5|~XbQk|&D)8zd6ER8^7 zi)6C)K;Qfx@BxX^5pO?5lv}yW6wv};K#SZSW<6+Sp|3d(Ry;j{XAXMDhCxfdB=IzeD15HXohxqW^i zfvp)9zJn6Xp}Y<_R-wicLDyzjy%cJ^LJ_#oBL63+ufJ%5LJ_zS)O>}StWX3l1oeqP z=~w2WW>_?^8iRf@&te^57H`b&qlaY;mHtLM6_h}G(|(c86%Wu{4q1XImR1)cTlQOY zaoTp|^%Or50`YmR_v@ZH%V`7)R70*>N|ZYLE0=FC=m*r0Jd-tc#f&R!SXB%|57S!oQiH2O+%lHVbvJ4M;u-i zBK{u`7y4j_hE*>BNDQlpbN!xyd{FGGdb6H+75p2K`qO7}@IAF|NJ~9+4&>Tb`2-gG zDuHKRy4a|Gj6PEPDrs{e`>K6KeB8Bt)pijK2KH4GI7qawTE3ps%;&od426qo74J?ZnKRXZJ`kGD14tU?Labov zMm8&l1f^g$j+v7>l<2%gQi~l7QHg+wUwV#IbXtCvby*K)jDH|&+ya6=4D0Z-C}cG- z3MjYx@wU4|&>>mST541`fYGbQ9*O(Whjy}$dX2({`bE+xC-puUP8^3}Pbe1=z!wGH zxc&;b8fOVq{<`ZV+|SIk&oMB0yUraVynvO)bw+&QMlPHEmPy@z0_Egn?A3Bt&E6rz zS5he-2$$P!|>|v!eO#2)5KWuj=UCBWLqb4HMm3)OGD&}OT&8%)KPCiJF`KEj`tV<2X!mDN~Z=IP&+1GKI>r z6YcraaKM7msS;l>bhM@bm{S zE_mVXpjf)j9;k}abv#C~2gBQ_h{hYw&v>6ijEZT%%eMIN_W_ggGG944<`GT39_vj z3qUxxn$0ML5|fun3?Re#5bx%F{%re*#V_VGFt12e7IWy$GA2&JM5K7^X}l24;|d_a zjT&aX&FKQyVQ$qpc9yB;B!)nyQYN#0AmDPQBd!8*<`DiIz@JH|m6;%PYB5)nDPe5K z$6D886)BgiIb6eB;ASn-%>jroZ!px#pO^8&3a`LZzxZ%AP+57NtS6`ifhhBqeiuV( z|0YUi>z-t(8N%ECxB*xYmj1YsVO0Gxd9F4WXqwdI3=K;SU2TRSzCjRgFoTfJ0*;jD z26HjuO`R6R1!ne91Z`|$PYKK7r%Q@m{{RWjvve`QKe&!iVXF{=nA<>EO7619Te(IK z&_}h<0(>P4ydkEPtuS?rpNM$VKZ5K~4Nqp`Sxnpu5mYnmhNl_9RDb9mPeCZS?L!#@DeB+4VaC`*)5kMbgDr4gA_~A?$ z1|W(bJD|kib2wz2Dps6Beprv&nRB6&Ve0+%`Kf(AW^>(-ToDHffxnVCTwckt^b^BJ)zR z7{qhJ2cM+pok`G4`D8qAO;XYra>F-}F~R*1@5b^RDtjK{;d($^#2tPkTA?l|!+e7@ zy)U~cJ*)}%q3OIlGAO7ejI%kGqKi=Z(>9FejUL?|6==gzd9NZbqNG}e7OFAbd zpH=cl1HrHsd?tbJh1?l}80Ur1Q|EnBF??nWI}Zy*F?O@bmUa_ia0sJH8Lsm@IGf7^)(-3Tp%KF?Z-YG7ujm`ET01eG?~VLI z0qpd(8Ty8$Ki|QnJsRLhD0vyj6CV@-nvPZ=n)HlqB^Z3RVYn+7`ob>!ZqU}$mWe+G zsIJ0DM;#;`byX|230>}qep}3;sHx6#xD+7ic2bq7i1j0otRO*!!G~VUkpRgjj|PAW zovP8cP54Xs9O|F5d~^b?;$e3BYji!OBL1N1v*j8JM=8hb^zip}|LTq>_R@tEGyweh zC$N)0^`54!1{DLAd{a?;2Jc_wg<=3Rqu<5P@oR}>tvLoyGmt+=NKD#P@M*o)_K~>N zrWIM}*`S3_oA#AdW2^Z>o-56#e0q!7@1`*V_j)A01z|E?({9arJdhqM+=ujefIe&n zXY~Zol7To;YLYg`+vhRzTxotSwSfOiJeP8NG>?;@i{D(6xf@2_tbb2|-fD_$u1Y}D z$>+v**uJf%FR|G69V8f6ntkNC)%=|>-eS%fQ-nA&2hCsUvkVGxzVjK#Cv*HAwSGcQ zAg=z7`r@V>)Qc-_7G_`plGGC}WkOhGhd)ea;D!kacfGsHx;J`?Gac^4YyHJV$gP}! ztiFI>$a;(qyv0M6DBj{lfhgYMMOj!slL`v`n%KrWvBkXfR!Fu?qbbw%4GN! z78!i&f%_y}cP)qYpGXn`1J{N1>EghD7aRaK!Q&e2+USfMo%k_O;`^{A6}W5DEnfAy zlPpC{+><`CXfb}bv%0aNSD>BFE%ytpnOq!qwXgjl%P>wHpkR35_d~EG(`+A>Dk`?S z+YuXxQAc=PkE!eG&OBmP`0h+yhn2k{c;B!WLXNvtmwA0#F}ox7+J@M!X?Ds=bP61b zWjp4kb$~?w_Wt)@4=l>OE!kFrv1-C<0)y9rhq>;|uvx)qECMt~h)*yt0*x;d13Ggs zmKX>;m!p$<0PTgPGG;acLh7^LwYh%q#^nJb&&3>(|Co>70xfM^ji1N zBF>#L2k4h_3A^)HDqPw4PwO+q%9vdM9l>_VJ01=Q%_o4B{%7}LFdB-Iwj_vJch%}8`NLGT~Y3SKIyx_|3sbe4wV7#K)-dF$!k!# ztKMXWwdR+2nnxJ(0DkBnw2S&WDXtvg{Cw&T>wMLY>in~4ovS<7`6%Guc4JPR&5P(p z@*Al8lJTg*igQGAY;TB-D0Zr@_ueJ?gthM5_-}9RD3X? zoFwgDt@6#8FXp*;Q;X^9%JwPvb`7v@#t9dn0KozjE)@=}imf*<#42#5^q3qABtU?v8#F)r?+DdY zg)3z@aC#@lkN3TMv}mhX;f48AH79@O}?d5|m4*66?A$rO}tKfN`pFYwwb!g1gt&L?OgA3_1O-a!=! zcT7{z0DPXS4}rrLax?(YMiQ8oZ(EU1PxMNpn)(AWE2P*J{SmR{gO+4E#iP* ziTz1(dN!vtKkn0Qvwj2&ihP)X5(O3^7#ce-TN1@T;oFdv;{>o>$jUcekd-|JrnLcA3%VdH>xAPU39_;q)w?dfR*>)dS-$8^@dD@y zh)Tn$#Zp=)A2qisD$*5{w*y>16Tq`d8cqdafl!8KD%KldYc7rLr;sEpXVJOFj$ps! zUf`ca!g4AI%MM|ek#% z(saX<>YVK_1@|DPp&4=`0!*Tr-m2Lb32vX2Tv{iiAN?jN=nJcCCIN}CNcurip6GFv z1u38=Y~mi>(x&YyHC}6$Nds4#7t?IXYO~BfpOt6Y6ibJ|e(di|Pn*x>iSK*jY5pZI zTg|)9#m)r_S-6V)5EYdjCAylLkjzV7-XzG7BdcsTG$8IxLfXdIQq-A$-DCa0 zLdus4A)PLziXgc+;i|UK6)Hb%yfIb`kQDbir@pUjB_;>2*!B3=HNi9$9e};yv+SpN z=5UksuWO9CKWS3G&fpz$K^g!8;~_va#`~oMpapGGKb$bNXdbRy-^ej{24xxM2F^84 zqdbWMXgP9WkmOPS$pjH=7pIj!DCVs@z%aL%nML|$6n83E&mbudD-Z0$=(#|a9$dA3 zlR!Sc5a8x z(pZ&GI_RV(vtJ;L#$4sGCoMKW7`4DE2y!5j&z$oQEase>RzggJ-ARb5|J$+oi(eHY z+C=xdkVJ$Ts`E44A{CJgBpPX1>a0l2y$-LqFK1a!D%Vj_G$(5!!kU77-#n7hK9$2v`xDy%hS(xZaNTm%4n z-|2I^-}9nV!hd)aDrCRJy?|53E3N)DRS@zGd6hl=SZI9ds1#KYQ!2*Rx*z9%gPZ%k zh@SkXE9!tozrl?g{Ml`tZHS1Vx)49_CcPK#<{jhDdcTZHUagx;6)>gHMEC8E$I~4D~*)?9Z!l@rhl9BiE6p(nwuvu!eD6y&a%3m!g&IR6Lq{e4c@m}FtsL%MVb}%)VV?maioLFGqN=dlIFs} zYbP}fX$co;RWi4nRBuG&35+-eBLs6AdOiV19@z;R(m}2G1jK=(G-zZ=T8$K*qBhBa8K!Kp75)^NapMqF%W*h-m!B81>#iW zB!7q!68?}EP~oCXGpMriLt>8cDrSpLnrKzc>?YPzQ*x>cx|HcFHO6peGuz1{heq>D z$*gS-G7GBA-${p{4t;A0Y0X;mnmkvVT}kQiUW_Lxiy5``Dds1KXE4N z17W1z`V=cI9F)>GmxpnqD5``^A|2lUOy4{Paa8k~_(FC}E(`U2aVuH+$R%iN)=V0k zk@SVXU_6_Ux4b{qd$0nLy$M=Sy^5QlHxT+;+yw2xn%FC#xTiEgCSi2Hgl}j?)!5Y) zP&&E;iY~8!9;NABUIE<|>9_*=;>GBU9b5r@4e$SXj1FWHE@4SK z^o}8|By(tJT?7)o)g$*l6U7}NSNc;et`CZ%;bi`39HmG!DRT95LVju6^hP7=m3!w? z+`5CG!=eJKT%KRRH@JO<<|1D2H-^_f*W{Pb!h33-y#*P)_BkdW^7j!)PYjs5y7oC% zOYe))XA(=^TpXf^n55S}zgQ%#=(H#8p?DClw-a;7K^s_=EsO7x3)Np>u~;2xD6T`c$MO6#>#vFh`# zdycoAnrBqlYIkJ(#K^ znBM-hKJ~8!9GrVklcV0o91?UUlibMF&!a{CT=44W_hcU0+t;11eh!dixlSQhKhMKi zNE)DSz|f_(heBt#X*3kmSb@F~%`CcCXnsEEY=CBOf$k3dB0m;zFe|I#s*n@Dhe*He z{m-ij{H^YPevP7ba{qHJYqJCY`p-*xaQ{`}A!oW9(;-v6A>ip&!t;VmW^GLIlCd;fEkOd!1x z1B%%7{ZD#3!_}Q46o7fRqx+wuP)ay-eUQy=dmswg`=33fzB|4DnW2_mFBazC|NKWb zDfj+oU)zUa9fn6CbMAk}@xaI!~3AGk^$9~ z0O@CEHyKX?T5g5nIw-{ezkP-bZ>}GL-xH$;@%wc~EEFO)LT_Z!j&Fos01&+q`cs~Q z^4P>VBK{9`n}%Bv`YmcKdPst^rCD0eg06rImOrk?s0N`$OA?;9z@5`RS*QfOeB>i}Pm>#gglY(T^Ef&z zD-?pH2?U}t;0K~0j|)iY8)r*z3uN|OW3HAlnl*gwIIV z+4B@Gg7Or_$lM!YmfS!9%p+t}o`HRNf+d&1)p7el2eB*)=$w<G0h+C zMLhP|JKC;ZN>CSp<1M;oX+?Wxq==M<6MFFir13uJ7IXXGNS=5flyN!5p8?FIg$3?| z>Li*`$1K)!LsXV^2rItB`=GmO)1*x6n?hL-UgN~IA0RdppOT&x7~Js5ubvWOy%YK% zJyN3;_BLp6arYXLMdn=z^c&)DooBgkfHcnmYW#>86@!L}wfVFD8-YglS>xrBu>Q8@OipHbHl+(@kk zGzd>MXizTw!5gU+9s~5jo9kFS?Vv`@kRzUp}AEDU0$cHv~!n4lQ^trBKV9PYkNOnSTVNe_qI%cQzd<7HA^g)o=YFEvhv z=@7!A9{Xz^Deh#X1=)Zr9>yd)4rGf|dKKrgorgcbgyAG9<{sm zbh3>>QF#uxHl>cu`i_R#T(b(3g_Q9nJke~fFSA7AHkd8wHN;J3+$S0y!|?kE zo4Yy7?$lfy5!0d&oXuv6U;sa&>4IT4bB)so0Xhp|Yc~P=t)1#~CydI|=ZayEUs`wR z|HJy+e_)9h(&x_Q(l4OT?E%Iu)aM=&)#r8-eQpQx71rmT$90~k&wWu!FR0IbM3WkK zpwImiK;la=MEtw)|EfOsy$3ik{@eQ8@FIn^#m!!%7$*(g=p9EA5!yyu5B0E%7DM4< z1n4FA_(0ps)nT>0C8F)6$w{E?eVt1uukR)S187UWws#XrM;F@O*N~Q@?d=5|JJ$BX zuKio+d*k;@A9ktlE%~3)_g3GB6&~n7MI)N|7l_3FuD-ViiK0Iwg+VCFJOVnjbA9iE z2U*W9^u0fj8e>4a(Dz<1nRle`-F7ef8}_U<=y&sOBdOj7^SXWZ!e(K5;HQwXcO^Qo zu(J2JR@pmi)DD!r?~n>ZV>=#Y@!80#;yK2?Ps6+M_yYa!<<$RjzyNS8;>>R>^kH~m z58tyoUN-Sh&L5fZux- zh(qA=H>=@AhXrnb#AyXr~ZET-no`{y`t(;%iFB!U21uc)ATO2y!#>@THX&wqBC}& z<;|$t6;eV`P~`Kv%T?`aWC}&KyfYaSW%O>1K&na+I=@aT#q+f|YI(_ZsUI4w7f;|ziPF-!jpws-uC@**86SbmV zX*Y$e2BZu^sO8<5pJAS6=r;m&*{PPdUUjZx$R&5})OU|>0&^eB@1i3WYA0IW zZVEL)adoBTT}?IYPPM#`3shk(?-=xp`Il@}ot^q#%8Xv=EcQ`WJdaaD1FL2A{K9(4 zX)ySfas{&OSj+o0RE#)Cc5>?b9^zHYOQ%~XXqpzZCV%vnaAJg7UIL?^ttp6PvRd90 z;JVQAUdW`VmiHJ{(8+l=Pk(xp%)>4n{0@=itd{rZ&=^Vsy43Q%NF6Iov)M{_$6DUM z05q%R?M*w@|Ih1rrQOu?zCE2xlFxS$EYR;zRL?ts9mJTaMG)e(H(RNL7dmk-tmox? z`Br*fk_FN8PMyh$=p5`rY|}j_P;syPKSFu73BAq9`h;-@S!NJJ#=B29U1xy9-p`X~jnztj^-2 zdj%$|-`(vFGMc#?by8qTFFhf5*DE0Yx=)>uKTycG6f&aU z?LHc_{!5#UUge;R*!+0@~hI+>}J8Wyyl-Ugr_67nZ#jX9V}K(?=f zNZsH1oz#dOZQf&vePlMP^!#z;M|H;&ZzID{zs;2AYBLQ_Rok$Kl@e4mgETg}7F-G(2C>K;84qPjbE!b=Y~Fy8j| zbEu-ZhS0y^&nrOB0w30z%Y3`6LBM0ItgrDhy{00}#dWR5$$%Y6)R=PLj}&n>(y79V zYJ6Fb9IBeD@g=U#oKL>Sw@fP5nG>)Y-(`aeast0Y6Gh{@kqx^1X_RD5%c5JL)%ea~ zMGEr;R^vOH5Mn0P&QKJG)%dmuM4<7VoDXF+z5>-XUtl%93$(O+e$Q%rjh56EUtl%9 z0#&$8R^!{GrJ!G?<<~Y}c!6b_bwOD7`IG61_JQ@>*ljK`# ze8Xd!-50IKx53sYdu)@9Iyx*?sn@6#8%Nfi5(@?B_t^TZgde9@)X3(G@DRc|AJUJc5XwXnc1vR~jisvNqv` z=Ze+%?qII`4mb*Ee5EN?nMY+ zfp>WA<=sBi#X%@-#YMu4mrnW-roB6FBi=j&X0#!ITZO5W$|?!4KlIPQ1m#8J&#B4~ zPaV?T_+FvEpotxC{dG{IK|KdJN6#ltt~8);MX>AZYwENka5iq;cZeeoaWtNFf(tVPv`r%_ zcqN!kjD{F*h2R0~iZBZvAW4K61M5kRt8y7wP}%?Y`&4yL_v|d9<~=^YfA#}Y-BnK= z{nS%WJ@wR6PuZ8%$LzCk@b{=-^R(NLycK1JE9Y`?oM5F^Kvs=APw?Z!E16r_HR1l8 z+FqXUCfP_zJ+!&$M!(r|a6e1;#FXD(XaOkxZU;;eLb>356@_5TkB6t&qjKy&Mi-gl zJ*T))A9B_n0L%c-cg~PS32?Dd8-<%PXLs?(!&5^VI%n6v!J=7K^tqJ@Hh1Xd0R`+CnRIRsXtGNyQrV-q;V6jVG*h`D^$N)WP@epTYq9D!yP+m!@@KVuOH8)R|@U@EX+&7D6$^|$P% z1}4F8&IYwiRekw5p_YWRS{;jtL3IZ4s{#CJMH(2JxJ~#Apeb$b1`vXFkU3CNTh8|4ZmMaR8|Z>{xp3eySU4DK}U zePLjomqgu3!=aFem;A72oGS?khfi;1qdyji2IZ9lq&;V$J#bao$fW4F_oY3aPoQ;pi#mfk{YW=wS=7BxfxY6sgm81} z35TzJ>yzk@1!AVOW_c9Oe##fuB=gU3JuBtj%N81E_xUy7FA%##-Fa2`e!u?E0IV77 z)p;+0P-mw((ha>|LB3ik+|&{L@S1ouGl8l6yJP{o*h1&iGNPk|OI0g6T5XlyUaB_m zq2XT~r&ku+h3~o#%+bUTeFPfpK)eRNVoy?nkID2a^O_ zr5k3x!vcrNG9Ipw4F1N(yAWsM_Dd`uoKLQO@cB8%ySR$K1;Ys#)vbB<0K%{Gqx|_W`+IyKizu$4( z``YigdAG>H=yzPde*(Yb%dAN^gVUjbz618x;XVB|>$on?c?T9*$(zs!H2ogp5@9mv zf1SS91FqH^?8XMwJ=s)i9b0eR@iSz!&Ru97TXH4}y7@MHNy8`mxwnqPmGRX#*l!MS zPrq~A$B;zfWOmCz*2EZEVQ+T#nO*@hZP>O?Lut9Q(+n0c@x}^rT>OfYPXKICXC=m`S;N!8%IONiQDf{ zgb|7CZcea$mfS9ugGZvNkxAkdzBp0-Dg3*DR6yzM&A1~y7%vyT1W)gTHR3&5N^!)D zYP?|^OFIQ-NDjh~+yU|lnOs0Pk#!Eb$#?huNwo7Tboai5?%uQ2XhE+1T#wDf++yg( zkkdiyhloe_W8OfRT`sazimfQBsBPUa=GF>pUh4}Rz#DsRNp3mBx^oN49QP&=R3yHE zBKG>ftcm<(i+fgUrTyBW>nrUg5sm9e!WSS zxp;K#)}5^MG0fALJre<_t;=rwvN8BFRL%aL(1q!Q?%bH|iRU?F(%D1sly0?B`yd1z z)I62l6;JBC(U^j|-a-cEl6lSN>|oC_{4Gs(r|uesWJi)wp~OM5(p)ZXRW@cJ;Nbic zp&-sJkZ_GU@K%((FMsyL4`w5Zb@Ny}eCj}K<{YLrRm1z+PHf2QguD;0IeTVV-^Sfp z%K&MdQF1EmgCZ~$K71Ib!dY{WsHeblg`<@`d^Q+}J8%SUsIix$z1=n;B4&dlx+Su{ zrEg?=AUyljC)`Kr8y`G=8F^eRvkPa;?O&B)mA#VrpS&$Yfqlj7X+6lk;1ATy85P2p z^X|*&Eu7BG7Y(?$2=`@RX}ZO~FC#1WWxT*~UBvq`#I2#s7oqzyUhYw7!l2SH=Dys6 zTrP%7!2G!)f6|r9ta}(}@ zqA;p?YsQZbgth1yZhS5<1Gb-j8Z?0DgT6XTPbtl}4Vy}?Ltar2Ua=4y_*LsTxcAx* zJP$Q=ohHrUq>UF{UBvUrC9~bdON^;CVDwDqaq*nVb){);Wf7LBJIBi3uu|8c{FtMW zV+zDhiJ-jcCkW_2{MWwP57ZDCkvoJJ&1U<+cnmGvw9mvp#oQMg}Me zp=ErQknrh4JPJbCS$s-KPjGN75`91m=CcNobuKKq`%Em)kkS@WLmPl3YciKobn>5C zfd{gNIU_i{HU9-IV=Jy6HNvqL@2yaufDc2r!YKB$hemenVW^7z&JaG%2%dd`g_g_s zuQ_$cF5@8Q2+S|4JJ^jHaud0eEIQ3i8JoeK$X3))w6{59Pk$UQdAD+ClG{_?$}wUj zZ{;8q8Yz-H_#+;}w{on@Udp|f4{A{9L&%~2cmsZZk00XZ*Aiw_3Ms}vpYT7q96~r? z-u*xiA58uy7m{fHw}ivrEqORj{#Ob9hx>tuypIt9ua#l`SE>1*To+`OHEZ@)Srq1f zV2o<}*9BQ+3p4-yHwGyTG;?E+#{#F4mE`finE>MJ9`XU(jT7i7VbF)gN;PdsOZRiWk419gpcez~3-dc=b(#&*P>cC&#Y2 z6KEZD_l?yb!3E?rc+B)*E{! zT?$y0To076Cz4HjoNf}}qHdJZIXK*%dRBqTFW`BigXVMQR-gu??Pf2I_HLBB;oj>w z?maD?n|F))4oc_xJqzi)(X!W*dqLunTYIT|ir}-?6)Z6u3@%cLCjm8nb8f^RUr#71*8H8FlXv|2gb{ zJpS{Z=0Ar;!~Cb)t%%qz|H;W;`3$HK^ZB0^z;>b}qpvITm=Ez&fd9;1ROm`W5m}h(Eo9D~Qea8T21?Ao7t{G0fvp`viH^@4qXLQkB<%2NaPb?e+`& zoW$2+JdrJBuig>1bOoSx%;%+{K?#>*yS|Ml)jGWdlhk?;`82pieyl?kzW_)PkZL^ zCtpzV7!w61N=yDlV@=`JAOxir?}D;0UYP62QF ze{w%ZT+V%m{hYh>K2UH!rvym)ckbuh1v=WX{hZEd%#QEp45fnd+wA8Y%?vxZpHofP zf8Tx%Jv3k>x7p9x7xAI}98vuP@}PdySZmPMN?b6OauAQDG|fb~ z#BOu{+9b3sGD+qJ**{5fe_{K3TfdXJVY_=zO*W2&^Y}f%%{_kvt*X~*OfPz$&UE`hx1209!7=9{%$@{jY+-G+T-xSUTJsja zfXl>M?aY9li)Gv871Yx1whK004qY$XE**=o?XowvUG_1XF6arf@uK&>BI(zx)ElUy zb0&MeOkPFY^XQApWbf-V#6cmtbSyt973bRufcE#I(a26#`UJdU=O@yzWuHiLOGhhp z7$OGPOOi`Q0brMiHLW50E;d{WZhzI>{_oiO!hq^>d^u3J5tf12L$Xpsh(qoprRaGF zjizDHeA(Jb;Ym&AEHvn{c6VAPUj$v|z3M~+TGlnBlYcU@u zNxLIUWA;De?&zvSPk0w9x$lz$Afm62HGI|qPS2>MjN)1e3am)Hn+4kayGCF)g`JJ{ z_`~DQ+~k09As*#&Ti6psYW3C!?%zWt3HulIvE)~lHE|q=qC2|6>`wUm?;KrFbKz>F zCJt&p{zk;doKujwxMnbK7O`QcQ)tk?#)7G*+=goxy9&(5-qo?bljA1=G-*GS*KE$S zerTm-8^Imar1z_HH(l`(@tAvlr&wlC6Q~@m7*wBs<UaT9t;f319-CvxJ zj5)VA(QDLz{EvTt#pIk7_mJBsKhND=HdfNLvtpiJ{lSGWfhm`cw9=(WjOp%RGrc~* zBX)V&^8qcq+3(|lq0J!1!RSqB=!QzC&ZOsr{_B(sItH{!=uqFZvO3ex zG5ZuNy$KO9cOY&pz5uAg4Vr;(c!g>9`A~&p<9m_1D10TR$eMu_cM<;rX?3{IK`Jq}paQ8H%KZeazAyF^km79T_}n^QiWK3orRqFbW099i6ZD2+`aQb{#`#a1^d`eb*J^HVwc0r zvMUHFw}Z;Q@sxNwZ^$IQd#3dksu%<(#ZHlCz)3L!mM7j~p;P4+3yCZB<9hf{WqiVI z)&AK0G34filjsDY8AftJvn?yV7t&%*+xW9Z)*b{qwx3{;MD?^i>sp|}XBTAzaLz)Z zxAo{{O3fJ=$$saRkOWa@seQ*XNYs6igcNfkeZ(>-K)5U+?mkvT{M+LRWp4I@`{xjaTLSnjcj5p@ z_^{qS>W;5qA?T(>(#RtJqmM$?gmYn^n0q6>d41e|P(x%+OZG z-MckLUL?gpQYdw0c3>pZUtS)eV3 zyUTDjkNBauVm#}B->T-Z-g-6aqQC6dcoTB&%Hsvd7wtQ!la=EBDnMWvoCh*XgTU|Y zvl^s(8<0CRNOvnGyjCs_%vh=AgwN~8{~~C4M=O1updNPDflgk9M65HS_KVU#K!#7o zuQADy4htXmkMR!5sX$EBJ*`ft23D6SG{5&|qkx1^j?@{){gDRC$XHvbzpIdgeTJ*z zY7OWdd{I#xr$&BA`_C|i@*I-&lr}afZ>edD^Ah7MHiK<4vDD(~&b3=QSW^y1sg8_^ z2dIIe)W5+x(&g~lyaiA78>|Z(Oe|6^Y}kN38{7*;He{u*MkP$N_H?w-ufxLeMy|lA z1Pu&LB^Bs_rP+H(4DKz_?GM8VC>+Bux~rAG1yYv%ENRG=re%5j!^pRekQnYBj`jpQ zY3UG6w(Ml3#sD*T>Hrh_WJ_^%434F;KS2I`p7R4#LK=1~;@oE%R_aj{(657a=R-s> z=7&#*=zW)bg11t48%Um7A>=E7q(1?%S@BD(-lJcK+jnL9?<75{$3%X&$qj)Z$kYv- zmta|#4{$lDxGyO7NKkAxMkrHKd4rWIH@TYsU}Br+oB2ELUftAtISu^LNELNY+zr;- z*x2T053|MIS)rA8_H>GZGCnF>f<-Pc@8E$ z8>y;#FtaUHzdj!?_u+>xcbae_Bgf-sKBptqh+;4g$F0hy?D3RUk2qJ?eTancpG^33 z_>;g7Y|guM+ZnT+xpb-^)zK`W1%)qyP1UW$24>l=$V~gQP6+Wys)!|iDc`bLJ%{e# z_cqJ-ZdM=gD^~I6@BH~If1Z-|tWayt)&faliL0V%0h`&j`egq>xHQ|TjYLtxifP6ji zOq5VqzK#P{{XsHj)Tzw)?d9tzxl9KZbPAq0l20<>=V!6$o7Lg+37gd_yt9S-;yo;1 z!(LIJ#}hG|@^vS2RHtO))`P(h%?Ui7^{EoEvt;y9sN_SvYKyZP*8qXh@voBnF)LH2_QCQR& zjjN1Xi)$_hQR^Dl;qIWo{Ohm1U!u}f9}B}gC|1v1!}5KfMlG{H2}pXL8r`XR5AF98 zily`!h)rNoP?vx1c_+5R#eLMcudtR!yy`ScMMm;P>i;%s0nxeYm%Pp{E>se%F2ZgS?RqcQI5q@-h%HNS&gH;xl@C}zwDKHayI?0rFH^^ zWNQJ9^?@S?5vj(3JVQQB%~RksSdMUsCe+T3job9^BF7RRKZ8x+`DDJ5iu~HP2_=j!6TPp$Uew1KdoNcT$RG8w z&QLp+3eABfQTLcn-J(NurjtL)m=EQ=iWS=ZyOXZ#$U_1|ZP=LbFT<~ZIrZ7;Y(zc> z#!b1p?VYk|m{0KRIlfwr5a7AQfVvXT>Nos(5I_1XWK|r6i5$Exdk5v<%LF-V)ddK> zbDX<@DaiRvGKbtD%$>I!5OhFdj!@f^^m&ij5sxs0PqEaUQm5? zB6*0qLB|WS4nsUMU!>!NSS-R~TjO6>t$(Of{GCS_h?TRAxdHZpnb{EPm z*>@ww?78|Tvss-0Fu>_6A?fTCg>+Uj(x2flxdlyXw~G& zzZ*6wS~ZCit(rV(tr~M24dJ3l;(GZ{C&*Jk=a$?TpQGD|h1`#SL>uigE_p^(ZlRNb zR6RXBx+h-96hvQcq8G~~dE$5zeY%cL-+_BbVd+*6wvs=}u0&1qX)4PD2<&CR6sfxg zNUw2pHF0Jk(u3puJfNHN+0#HGXBDJ#xHU0@Ld3HC480`7m`(}%S^~%Ku{hb`IirM77XlB5ZA|mB5)z7V+Cq*4-P%5j)iGv(#)m7-wf$6cLq2(NWy+x za_|&q?R@HiuwYxa57IE9pa&E7^HNe|P798KVIMc)R`&>Kx_JKIBaLALY|Xhq6}6-a zU&w$2D<}fQB-Y{AGzB%)YNrb^=Vhr0&KIP58Sk7vV@^t%7_oY4@{GGr&cs*cYh}+t zwgl>dXH--kJPI9kW;278{s};0Ha|cEMeTV^z`0~G%;ZVYfIfI&t{v1-Pl`KJG}r_k zg!}R!sXpFg67F=bKD&^nd=H*|7Mfz8#GD&DMavgisXn|RIQi4k&!Mxi(p%vXI^sk+ zxg)S!dSfT3!LU6}@2@;;{&|Oa>;@usSj3(!sPs;SzmL)r&hG_PiS&oso}BG&Gpqi5 zuW&o)p8{&AbPsq0jzF;LM0#P)-ThH2y@oe)Jhc%QAK>Epz46J+faVePAG3iG)16lO zKqA}OxkuE6&yzeo=T@Qu{!&0;*f(t*+||nl5rL6bdKn;0?(X@ypUTVK-QDR^ zUlA{t>Bvft2P1=rX3gG3+ggvKzHAW!J;}=)fwo8q@FD^J8xjU^gpZj$_x8V>cG!*h#oq<{Fb68vunk_Hrq6hdK5` z0B?_D--^+yZ<=7upw9Bzil?Hqh$KHof?Q`tUWZs$9LlHA>$-dHr$vuc!`g4E= zo*dk){9x6JGp5fXOWb~w{8MrT8OId%evW^#n9AOpB6Gd5t7Ysw);W{Z!R{HsT63(E zQB{;%Y@S!^02caZ8TX#6n*C9k*D~jMX|t97iZdgmZ4{+h=wxZ7uuwK9;XX#y!WplbW}VEkD`NJuGG4}I$o+WyPPZz|Gp`3Q&od53ixbYX1dZ8uXnq;q z>NpjUoLvMmy@@PyELc?-s2hNyqW;z_^CobrH(`uI-{r}lex`(Fu7MyFan9`I-Uya? zQ>Ww>u*@Z7nLuLXCE*4G?E8tds8;X$2#xkxQjkpbzRL{`V~JrfaaB(ldOgC5@iC^WvWh6?Lj zNMvzQ4eL2VdZTb8sP-z7h7cYGf$ZTzP3y&Iw0AK&Z6*L}$zZ-_+J1X^lOB3lJW z$XcJ0ZZmR53xo`8wy#PkhrEp(>yRU|D9wx|J}PC}jJfF}(3Mc|VfN$%WzE)30A8uE z9$J)IZcVSRNG-C`=b~!Ns?%Sw9;#1488`s%GI-r|DM`VX0Qts0oP`r<5GD?+-PFoh z3iF06_)6CSR(hLo^SN5k7r79byj(sJoui?c1qdK8|NJki*x3H5s)g$?gcW(?!z+lGWu-_NXTUO zkdO~Yw}8=ZdtcanC%i?Rfm%WayQhSFE}9B-<)mZ#Y0dA$I>Z$ypZuQvCo=Nu|Keh) zfQ|vGo2AEGZhdIF#7kD*)T-=(-3dK78UK3tw7VJ&LOv3XEk)p z9*+m^htqmYCJx)>gyMbF2iUWQrI@5RI>me<9dghYMHv1$z(Z>&MHs*%H$sPW20ln_ z>W!ZWpFYJC^>3(2^mLUbR zc)>QsZJjGpn_y3eKE}cn@R&V-3ye>nuty;+=A0CD$HJJUB`hReAo<)z)JV00ho9*h zKQgjg&!>J##<-*7HLid%X7j7rS*qb`1B;xnB=w<8rGjByO91_?gS{n>!KvwrYzr(b zNMGM>8HbwMU&ifD380;2Tr+h|+g-*raCX$W^Otd{a=0c?N1lYeKHpzXu0K1hXMg72 zpWMXv7Yuz`*?r|5%WU*_WXJk@B-%n*EE8$l-@lV~_Ac1p+v807Dfkq zS@$=42t8rC-HeS;f73mE%qcF^-+SmoHxo-_I|~PLj7=53hV5K$`i|@N(UJt(CqZmmB>Y&V6QH?(Qa6_Jc3*Ei#V7<5t!H z*6e3A3+E1EvY|sopY>+k{uYyV9G2w%xD8l;jpBsf-nix75Zq#T<5v0+pQQ&@t{>$I zZIpON_cqR~bKH8S&O1vrdqmw^+KyY!5L(!laVx{$Ba^eeZ^wk7ix;Updp39>Fl)e6 z`UK&wB&6&-NdlYE^1B@@PhAW^Fd9v2Iz`i8wiSbm?j9d(f)PrlBi#fA_U>&_O%zl~ z;BYK;hKpc{b17(zihpo)HU4<+dmuqIo&X*#`yMUX$qGroKoTdc%Mfz={sM$*uU_~` z@F~il4t(8q9069SlNi&RKS%TD2>hTLey#ckK-Am(d5b@T~{N@dH9p z@A2hb{CKEXrk~}TUuEKaMxa@niOUWAd4kajPmmK-9bVYlmGZtqEklTjST&lkRiIZXi8sFMkBqIo%nFJlT0|$gbPi0zX|_o!uJsxd%LJRky0e`uY^8uaBv3xT&v-^<5OMZ$YkZ;q%}2eI6H}*?r4Adm#P3 zOYXf4V>`R)2tS=llkCU-J33NU80bOqHPvW8Qt6aP$Lkv2Q``; zK3D_6oY8$~BLZD^|7e&ttn`ZlFfxjp!qv5?8HZjn>yIS!_rTX&lpu6z@HOKoVCjDT+{2$U zdulvQzzgv;)`T}9^zrpki zkG+5Iywz5&;iC2#*q!cQg;n04AWKq51d|^ez0zwrefBEVgEndF*VBt^s)tym@rcHY z6q2CbsgpW}^a3_Pp@YGBf_%L&K%c_7oL!Z!k6Ji>a~xaQ))G)R*DEJ*)~5L>wG+my`kDjN7hXMo6Yb!y`(Fy< z@9F3MSMWzplc%yC@nLd+)gZX$CDevhGs)%ylK}g_?;n2~?;n2~U&>PC z6zfZdk&0y>rtp}PfIJY^$vj3L;J#Fgu86{t0gZ&slQ|?7(g)uHK$w75CEW8cJx$_H z8BZ3tXH+C0xY@JnfD9^?xV0!QDi?`jy+|KR!T}m@q4^6G6mx1&$kU}L#2j0S_|i#V zq#MOBK&YR!(u|8bQfWk&+v0iQ5rsWJD%>|c4$lfcHP@dJE|ir>e<^?x}Wr!;22d=L22zNU5J1^gu_N8 zlD7NaxQVWaW$_G_T+9k=I!#1-D5fqPlRJ|3U(D=|st4*k{aw@^QQHQ6CktG(4tgJa z%!qmxA9tgeJxA3U;5aPUI0>QSoO)Z~_1KWx{T_fX@iIa$e0tqjQfe*>@_+K89Sh!@H*x!*vRN zxU^h^p+BGFN9`V%gm^p)YfX}(x>6uls9yt;*{2~?cgo8OHIXml&C9J4ZdSi!BX#GF%7@fuC`pP&w2^Mi+=AA%r0bn5|#Z>Cc{8t=ti zmn0xQq{s!_lHbTEs_dE(Lki&nNuLXR*F&%v`-$?8t<+bj3l5HPAQwasPz3LX#(^!x z9Gnjioq9Z&2{4w1HTp?0dxhDoiS^vvdxdN_ z$zU^wL%~%%VevNvb?+j(kY?m0XjmIG;t4*h!MBbZV#4cl=)hft)I{s4WUIhBx^vDX2o zDnmkuQw?B0%&jL63MmDw^aUt2YM)(iJ|8>4O@zv!Eyt;P1C$C&`^F{%9i7tx%;E2L zp$+<{$B@LN!D-dU$aKNTr@#nQiGm7818Drja3~*F740AGxdAH|dSa?f6e zmdJis`XALt$L)8#hQ5lzIc_?!Niz00$QGdU*>;SCc=4)e`P)sdEczgF!B9O*jK_eY9mP*aLp2~QHL4$0}-mXP2?VU zp(cmzWo6rbfW#N%WwTn37uK9bh{b3M%@xI!_&44j|E*tGnE$T);y?U1pYZ>~e}nw@ z*lr-X|L|Wh1Z>XhCU@e54&l7ws2NCS6` zglp8p2L4wF)%hlJ3SP*6*q{U@4$L; z-bW}HY038hO#TpTSFHseV1-ga8|uu?Jw*J)SqdImc?`g`t4_Gr_Na;>5{uGrzF~?> z+MKb6oUQmH=Wut-hd3?Kghx+iY42=B4PbCt8=yagkS4N=KZ$3V=Hj@Pf2Ng2oD^?- z5Zoe*xsbN(zESrA zIq~IJd7Lh?DQfdR#qe>x-H-;n#q2E!r?QD9M&w+owrh6>bu>g#9(Y_~kolAYu zNBZW~>a(#l&)4vu8Fdino_OXY&e#rvmpgHPMu&sbT{~PpKwJ#UeXLV}+k3Ng4j{oSh0;AdASN{K{ zJ}ZGCm#@$2Jny^cvlev0IGu+d>a#Xko<6G|8I;*n3F@<^3FHd(Iv~l^UPY+p%gYM& zXTCgZULKQhv-%w)FOh6Z)p@LKDq_fwYPsRLL;Pso>~F)5p5CnhKl+k2?~9r}e$@BG zq8+aPly6mkuR`^YX8reqQRnKf{qqig|EzEI{ks&(rO^KUhn!*Ek{cgD z^R}~p|2o08w}1b%0OPEbOHG@R)x2RUs_-f|76+2 zhf3vGX2$Ke*v_9f7&R`={s1v514OBv*fC!dJ1zL(1isFM%7je_w`cEQ@O9Vy|3-Y> zw#)a#S6^Mgcf;4Vl6-u9iWqfYXZ$P&b*ne{(}*A9tKNijOvv%KJ$(I{GNW5!-zWGQ zW$Z&Hdi|VV38tj>xROOcw z5rC0Zv=%9rj}J@7xIvape)3*6873=Y(gL-p>{R%Iaz+z#`xY(B1wfC82X`*q!(((I zG9qXe(<{HNX~yblw@WZj9Q*opl*6sCl7T}bP~E* z5|-&s6x*h$tfmcbtO4sA-98me3{b&Yx`2Et*sgukX(1};f)axEI-jxE6@NaE4qvT) zMF#LTW8UJ=WEmvre@V`$mreM*35DN-H~b#&)I)Et|9Jx#`~RNp_dlKJ0444B|1Wjd z>HaS+-2Ve~A?^2nLKmX@zo>2he`)&v?(O!!?%wU#wyg^GKdWie|F~^cV75a8cLiwR z3|&Y*4Pdate+xKhX#q|U_vXhv^cvu&+SXq?`k%AMyq1zIWiYp_=nB0H@E&M^SjKLCSQ zD>$h4;?qCB#8W3u8|b+r#(YVJiI!N^@Z~f-%>amn#tF^#@eoZEUVDC55{s2RXtsyBZm4UA_!;Jiy}46tte7c%^A>1WpUn)O$A68KoqQETZ}q%VJd3L4lI-(a zT4etJR_CdH#Ts(%Pd?&B3X$5mW(@IoW$xTu&{U)5X6)O_Akwfkm*q~#h2TyTxbE3A zI95dN$&H_5W&Cghcxchrt=Tdvc~`Nc9{37Bf92C};B!DN!4sigVa#0fG7F)U7m#xe(>V1ISb_1Dh$wC$X8EPVd$|44t$-1e2}uVXn+WjUsL z@M-NP{2YKM_SasF>0(|=5SspSFTlLAjH>RuIY`S8oOQxmALntTIPgAQa@->a7uAfx z@LM#|_q5EYU)MU zPLmnI=tX^cZhl+K^*b_u?V4I%y^Cu2R}1`Sv^RrIL6wDPuwP>6hi0$~3(sIb1sqQC zm>JB|_)Q`RZCsP~0KBQo@Y7m=o3o{5eNrR;$zbQfXd(iZCc(>iZVYkGnTx zM`B&%Ja#L%rY~3ApgRI4j9;TKYIe^OlcalG8<8*D9YI)kbXnXvET5f9cR6qpNu5A> z3%ywe`HYqRTCjCSA56hf_r_xN$QL5iwGmK+d!lbqQ1^lby`T$%G@uK*7zK$ci*xo_ zc;Tu0H0OS7MYbt^26DrhDT?2RD^q->3T`Id0=vng-ka1Bn_HM2l?3$~HRl0^u;w9` z2yR!_oQTM-j1*oUx%83+Y|OLZ`05G%%;e9*{CNmJ)Gyy_!ZT5kis6UH>c$b`HvHr| zQ1k_2ZxeOX%U~AZ9|VCN^SDn(u}AE*amO->W&=*iX}|NV${&F51Z*BkvfJgHRnVQ< zQG9!sKL9=0kBqxdQx1$~#2gY+WiPs;0F3Nk!XB&^CF)H@v4x!Aoe$Bwi{h|w0xhET z6YXvFlW!3{!MBK>AQsWsG&VM9Va!|Ue`4#9jDXfs{0pPrO3y)h%uV334-kjF1O|Ke z^K)VJ=-V8+07S(~u&=MPQhV^L z+$s73AZ+DruIXgVprdX-7-@#v=JF$P8Hs&FV3=Cz>k$nuBn~!S+nIPCSW5t)nP%a5 zt;geVgK<34hDFOa;;UQjyMbIa>`VJG!4;^>aao9GdO*+GIMQv((rqm^bcG>8>m@7w zGvqZ=B>ZYSv!nn`M#gyGr_$pMD6D$v@ z-`$Ru}xIwoRd#5r?HrvOYP(HuZ|ik>DsHZ#%YwdVD0-k=)n;j<7)-u*k{Io!HhPf zk6~FLtEnEX(ufS#UpKl((tR=#gK$E(m^aOTMHTR4p1 z>{;}LAy3sPiM7C;KgQW~jv#;7xRdavTGp{CnnySi6~e45HIMF;{Se;>WW?=LgXyVt zD1!p|xb=`Xqt1+}tf73yCSq|3l_qB6Zoj`F0KQy)A@{Mr`jo=R*4~>aVA(PgsqT7o z5X8`bJ~6zfTR8#{m?ev{SHN2}x{7Q=-LV#8%|iT`ew3NepX_uM%};`F{j_>lH3n6w zNuS|oB%kIr zp%8W>#4rnqYv`3H%F{|brBsQd7)ExyOFJv1*E;7M`p{p>*ap1}k}vvHa+{$vJ4H`qSS zARUAa$)irKXGs1TMhx0U5?5cKAMre#&t<3%&+I?-u8x=It~$Po@vkCYwc=Zd%gqw5 zQBN`QG5&mHQfA-*Xzs?#Ja8B68TplTRurP;C&P*gA&+xfmYO<3Tl=Ua46NVi<*912>_P3iI!(-2C5I(#c#3$4JMaMXD(=-f(Rjp z1``3{bFF$FFX~P-Sv`h~Y|m+ESIm8=g5$B_V+_XT4!Hf8GPAmGO-rkOZfU+3#YqqT z5+zXnci~uE>v_dSfyxu_l+7%`@nzW8fUNgBzHr9MlcBigfozAFtev&l1Odpc%{rqs z07k7rVXS6|h~8#hRvMNRYbv778IY6onym3-`Qm_;!ED4B{V_cWV11NPce7Jn{!HRe zl0RedlUx1B{s`_5r5-{|=Hl)X6dZZp>-D(ZAj-dSpEgSSOHSvusXaXk03816gFu_W z{!(VdlHHcWcn;U>VJCrW3~d3+m0-yO0M!Ptb8Ar)7#MUG{mWqO$ll?~2l&SuS3g|e zP{i^KEO7u7k8&y$Ya72~Hi6|Qcti?#mup4?6BG{$L8h+P`%r034?g8t=|_={G)!Pa zD0y~A>jKn@7;54$F%Zld31V}5Ap&=AmLgD-KLOP{5|yflkX4QS&FM@b3AVz|KZu$I z`19gXQcjv^>%Ba>c_eO2tSk)?kI{d^21GuXCk$6Z)EenJ}8neJoZYoi{T>w-lVM4@Y#&h94>C)cg_;lpIIk8{44u zL$I2di(6BZ{ks>Ne2esg%HRSw%R*3o0B|^}Mf$PQ!|}pt7ppZP1{~5NwP6wu%(*hJ z#ohkUu+)c8a5sx$`*BZ9@B{8-Q}6+C`wFZA@F7INL?XbMuDd%xE>{B>{$HYK`_pmf z44l(j&V#@_?x)Y(;&83U4^AhnQvn&y9LJ4qUJt~-m5-()3D{+%*?3lQMu4;j@TK*mT%&%B2u{S!j03wli~y_fLOc6? zO|Cr{xmY6Cs0N)|%KjI5Af$rN5UQC-@b;IW&-VBcu8dlSHt9Q(zv7iqqK|P(>aqKY zE2HX===4g%58!XPEOtwly+IcAFLP^D!ktI$i@7k0=h3O&=cQ3NaM%g%PCf2}M^Eu$ zE}114!lS2p!6T(Xo>j-Ar+mSqr+vYrzBytePQPnGBRAI}=4rIlO0Sj0c4L%o<3q_a zC3%*t?aN>6c$EdoPy|MG?0ZnSxff#{3%7%SO4#q9I8I;TB#Jsq43b%|-Vw>E7gJaJAtP}GB zXKJ2{k2_DWnP_c21E@yw2gzMnF~|E}6UDuqLc^;==kKR+J?;EBitbJE$YE{}Udj}A zU^Kz?y*a!Mh7&f?Nl5mA7^w!Lg+sO6eEpwMq3?|K0D#8qzenwj+4Ddoo}O4QZ!`zP zie3~SK2zUEikIV>QK#6ra<$UD-&JOI=WNXGVb25a>NIE^Fa_9OhL0k@$i%)waJyj3kB{CaxWG;2&RMm<7p7t9J+!gj=e1RY!8*~_*8=%93}!qXFy4rJoz_{YmoO#C zdNl7E0MSH1`{P8ylLLJy#@!hDr+@c+%05}jjzDO^xfiWD^4p`2RC5v%kF-&{x8Ed@ zfKa`?A?kUuL~kVD_wF?BvrJ!>c8*9&xOmH*I0F5&DAE-zhb|O~4j1AfXB?R@i~UN= zn8QB9Q+7aX>6O1bM1_Qb$59;7aAG@vNRca{T1*XXTeAuztBdD4-LR(h;>RS>oXL67ie%$cvuCmOD?=n)h`MtUH+ zHCrWw(z^Gmt$3{uQ5@g4pn>;b75R`iaWx@M`pc(IK1xszDNU_N8+k4l(P z73RH=>s454}rKa?{TNC^D_BX0s2>$n~v9ab({g~-e0C8I868Pf>%6r#}t@<-KQ zQdg_K2q~hB1OVlhqa=Q*x&rSMk&bdHBJD!e0LhF(f=6yK=U!0-z)Qz?>5bLu7%2t* zGZ3;t2br*&#BWwz5UR#^S=9osB{m%6sxKsNwVDh>^#m*`YA=?xcFKVbI1p;2`RDS-8mGq=MoTIuN}zWk+S70cCTbW`qmL zmQ<}L8R=^@W&264(bP}=mXuthS&SuGzUJygHYOvKhlUi%K8uxlh@y}%+3Bx z_jq|LO9_ANDghX%wKE&vmQtjj%q@FKHJE&nE!4axH!&=xum~MO!XDen@TGkD>RAmU z0!Mz@ubAfGl%>8Uj*9RrMgtd4+SD(YNh(N^h3mge7haaIhxwCJ-q&@~AiW^)0N3F! zU(fz`;Oh>a!r2F?|ASjwTbs|DitZx+ik8b}ft5a9AcZVB0~EZ8k;5W?y-4J!+ieJ= z@S{;c`aB%v!e?<5i^pjGRBuTd{SFO8+PVOR;7$ifexUJ$T_YpLr?wL)MY@#`X`t7_ z)gEEWveG{giBZrCHGn&b0s!>nQ8{lH+N4{NkF3u`5?UFxe-diRvlL{^Cfy=dhpXl| zR2;d(lAvhEN+!4imzh_kUz?E9h{x=U7&k_5l&4-eK}4YrxzQsk6La@1v^Q->j}iH- zbg^XR)(-BagwtczS)QprIMn)q0t5hRCO7}3@CaH!@duk}IEP$#1o84z^q?L=&*)P4 zjb^wM8kOZzrd;hQJBo1#nC*<90{}`GC=sx*%I}t9reYf@IEW4&CH+9$Lg?|>3kQ*D zkuTXb0`ovFk;5v0&zzK(H(161lrX)_ZdQ0GQ8v=MS-G)v-_4X2!i2lCDFvHa2j_o^ zTt33x>q47aD`nD=O|3Rm-Jn@CnFgskS}VheM@Vxdoj)T`0o2~cR+{BuW2?FW{&0x;1(fbf?lb{UglN(#>jQ8hrxa!XKiouyaKD@z8*({w<}9 zOTf!T-&DxDrJJBy@BdU^qm8)wS?Lt!jJVrFc6wOG{-8ssrMxYknW#{m_*bEDVQtC! zDh`E3GRxwQkf4c-mHxH<29J|n#a-$beHX+hLlrTFgXM!bt>b)QLCiWhxV>nvu~ z`#6F_Ed*t|ZRS9m%Tc6$2Vm~7J;>5B#T4MMexyh@Gzp<+9*@=Y52J1l5otd|>BZVp z+`a{NdH$sL)<{L_OtvP*pPl)|r%DyrzgaE~?nIEIn0mM*tWgIeM0=#mmjj6H*_8k# z{3#Ns&8kHsU2yiPewL!@kVE~0ee-qz;tp13FK9l1KclfZlwApegXo%Fr^DvTLVS)^ zgOolBN*>x*l#x7cd6N}j|81$+7b1yViAZ%88+0-o^do*4VCDjb+b<=52L_Y!p+(j3 z^w!oCY&u6pn~fhUy%_PFBxA{iz2fC-t#myi)JLFFwT$%BMv&sh2&RDY)|yK>y83#p zWOL@dfoy+d-f7GW1mWtY1sU=|E&Q>*{Pbsijru$LN^?`p$fp zBjY}u1JuCcY}sh>Ix-ZwwZ2m>Hg`)V!FvK6j%ddsd0t1fzVmbFJLzx)+Z}=JmcU+% zx6cB1BVaItQA3EqT&m|+%zf$MOw$v8uY9nh02YT*6&Qm=n33z#^*&0(SF^Qp-nHN1 zx|8#vKM^itJQRKQtSS}@GXw_I?5qd7jmjB`110Yw7_@-h{+KVuz7PwWJ_M)tKD{NO zH>lKfU>!4X6p6xz)$^cdtbi+|4wES(HX;FDoNMh<(ke6+Ee^K!X^EdMh=K=`V3IA3 z#J-ZtD<#5{wCH1~=yD=R3!A^a%g>baHy}5iFK6XO0SyI~wn>gvN|?j`kJ59YRCIV#jXOZ;IF9%oj%CmZ2h0)wM})a;gB z2%4AqxjBEw>q5?qF70EF%5Q7AOq$!-gFQ|Fa(l4p#h{Nid$67zEjzIt``GtnNaprp z`=KD;KK3}g@B(eDj#!vhB1=2IW5&J8#K77G>mQ9*cb|VD1;(-#^8AbV zb3T90X7so4Rg*o2?Igb=tKFMLJjvo0v$mb_O0~;ZGBD?og{XD-m~tUvfJP}c!DqCl5+SC}eA0U# zSV}lG5CxlHh#rTRP^=cY+0U~l`D6ceLw`}6tJ2s8TVxmeylkFO*$}LJXbP~EX4)Yi zvFQZ@z-7@2S@>*QgysnuhI{W>U63Ke0tGG61%YAcf@ZNGoQN5WcB`3uab~b-S)MW> zno{U(@@35JFg+g@$xNhXtEELXX8@cOqJvS`@A=(M+le46_<)5B%rA2a6ZjGqc(`(c zV!AwoD++afc5eu74{5LLj6?B`)y~RLvY1K38id(wEIQjZ;A35|kb|~9WB&#Use$PU z$E`Lz4_8ow6Uu#Icm7l($m3Sh-2aZ54aSO>~Suo2WNxws4-lhLYxM4g4!Gi+neq0m6}~qG11T)q&v1(s$ok(gr{ zhjtBOIb{C zlsJ-KgE$ogDI-YFbuqhM%2=hfL%Nru75N6f7fAjY(r1n5NDLUs8d_QA zb~sCe)k6l+R#)zvA{lPsS%1QA#Dpdf`vF3Db-3e71s>Pd1TAyDtBbAlbY!KcS3mn* z%#h1ofj8Xx)n4{Xkv-uYZgR(az8|-L%Pers?3FR+jo0;Gtf93-+O`Mw~~a=Uwf^ z*?yo3nTSk-omlz1H9ybBQ9Qpp>XyV3;02sq3Of<01*8}dLWHCL56mUuJ@jAjqv*n; z=Qed-J!c6txBT5|oD2?g@19ap-+ZC$0EzC1b8B~AcQEFbtJq!~$wD!$%h=jKgA`@R z1;@%gQtJ$fhrG^l(xF54Pr!<2-`XZiT(U?}dPAW4g#0F+7y!YIk4^24!Y^~ zhLhnMB{KW^galIy@9+(`2@=jpar?$PG?(A(W%WH=N67+P$-c1rbqJ|M*8@2N^aPPh zU41fYHK=WeKkh5dG#xl9EMe>tOzV{>-&}KTc7I~*kXn2&>HPB3c_}pf34+dJ_AdE2 zn2*L$5yJBfJ+Sf9hR$!My5G4;=={355R;ON%1{sNQe=}atVTUiN;@ULl8(Z_N$U?^ zO9QVS~xI3&A>mS@bfi3sjdMI^u z9eYh^FT}SNa?x$SZ_VDV4cFqpDP2CXu%Z&w4e$ zx6rpj>yG|};8=Fz@9}q)IKlaOY8V5NqY*UoCA^xS45pYjoOrE8~SL{ko~E^G1F)UB>5_Ki5`(o zOnnI60Pcf%4b`Q)6LCSx;T-{B9fuFZ6e-gqz=B;?oWK9TrCkEDL<_CvfUpp2r)ORa z(b!P~fb1a8zaI(Ip%^#2!6e!0;1;;=?IXMdypClW-@4=z>0fdYpAi})KaDP29+B=W#-)Cb=jXl5k9q0 z`IiLBKiZV94r4jTqG+{0qPUppjdrL{8GQtvs8nu6$dt`Y*}$LAOw2-nQM>y!!RPR2 zqlx(wUltMew|sh+KlkxxIeu8!V@PAx-$}SxJt!g0GmxTTO$+R(fp{j&SOaqtLgrMQ$eb5rZOtFh0ZcOE>Gkv6lq zGp+?CEdqx*idX~>`9!`Wquys|dhKm~;9W0D1evQMe!RpNm{^TOqV(}VLKQRM1BM1N z#wBirN&$}Ww`q7ObOfIN=a0oeE>38dUSL}p{eisLkTvv!kRs#VEQzqWAcndCY>+o zm@H6PTWif<=yEVK4vB1UVmw)wcN)smaP5@WMVEIP$~$d)qM+9%&Gg(FFJEFMXLjXx z&n=?iDf>ULXuOq~+*wV4PS`U9f|8!O6-v&` zBSqeFH6`H>QE2|e7rd03*4K~H!*u2>i`96x(%y7Cu#^G zTK73_A%Wl-#CZ`~cOiJEp6}XN-7mUT&@EVvK+sYOu zdM@ybxfu1!y=eCmJbV=8W|rejU;)vduf)sWvBs|y`kq(MDA2baGa)kHB$<2&ldowX zHYr^TF*jzsKLb)Jf{L z;!ZN01jA&`YJ*X!Eg+kT?Dl+L_530S1Y_u+u z&?u9^_NI%dE_;J6+lOvv66&VQCN#=su)VV1-(T9jQJ3vQw^R00fClymjj|bRuk71& z*)6(k4?5UwztwFe1n5nIJl@}n5cUK~a-XA;4)rroYd&JPJ=a}FG0-#MQ~zZAJ%bSe zhQJ_R{=Su3p&7zZzUMK7fl|jZw1BBGG%%$%Z) z%&|9$5~eVPIl@d~Es^KUV;Rw&TjJ$^w~|Ak?vFc;RO_beN$sdTf$QmVq>f=wXA(5= z2XvE%pgK%;x>?f+X2wTu-iw#7w8mf5onO70$9OTe@+Q0M{}h<)G=QG-C>4}1wNh`g zyAK(Fs;JDAbw~u$)&Og^u!F!vSJ`J7E}nU%GF5MGo{8X!nab2FR(kV(-c$#GdbxY% zl}Kutm0pDene1fF50~rDcI}t@czfmEg9OZY`{ic5!I<&3z;{uB8Lv~IYcS*W=YpS1 zC`hZtq!MSgPy#G&0(iu%ck)c)P{$g9A#YM`LKm1~WA+sG7_Oz=KUBj)fME6!YOv?& zigY^)H0SpOw&xKTqhL4!P&=kcVPM> z)Q(Jlzkamv^jFVX{OONxe#67lU!CAi5Z=!8=k5zU+7slP+~;c9W$ru%>Q3x;gD{8Y z(|bJy4@cJBSYE)WU_>W1vI$a%JEord8-%yN1RvBnn>*+WZNV#w)Sn)2X|46HqHaJz zQMcsOlej}MqaOJxVXI}ogRM%D0jPSEuBog?hgPvea}w9E0k*io)M)g4Vp-{nv{qbOcW^>W~myWJ&Nl%^G0ed+Y9Rp6b<9!I@lf0 zM6qO?q0yR<&w?{K`x2zf269d#EO(V<_Tq<&D`OxUZ8JcH{d!I#w7n9pj0HHzdY;&u z1(hU#L113+ZMHMv5CFO*G*85+D<2^j(xP}U{~p~W|3@eIie6|`v@xR{cC-1eeETdi zznDEk3{7%o09N`LNuDl!uh|?eJSuzF&xa>d84$?D9`vYQF9e1~Z0%?nT~(VKHZc2^ zooIB33@rZ$@pbrKGo{9$@JD%8`fS93WbX%ZxI-AzR#7n=*tB{I(S+yib-<=spMt3{ zmzDl2;$rrgS`<@<2$^+eGl@;|wxs5b}&W^I5&P)|r%mrR18RP|ZtWN6UB zi`L};i1PlpBL0Wx5_rLW%SsjRftTb@w+x1`Z>9eN!xZ?(c294`wt_fR+P^E(G0g(4 zFA%3d-DEMJ3DtfGYj<@2>6YwuJO}rfWqSLuvfj|T-V=+w6{691vE(%^ z5XHtdq!5n-8ylm4f;NnLF9Kq-nd;2YW-|df)0!2jXhM9-nY$-@Ws0mw?IlLBG5!YZ zxQk?^u@##^aIY6Iy5Fk82wnH?MjL6 ztSS_2iW034{BCm2G8z-_AbJstbM9fY(U=OhQ&xP_CA$D)`-q0e#u-R%ca2|cG($0a z8D>1@#@?gx48psck7UhU@C6kG>apMCYY8B1aymzT>{yb~a;j$gwW>UyN#D+KqLm1- z4x&%W{$s7U2!Nalk7PqZ_^|NOQe5ET`bx?ZjwHwI##qmm1mxr7mz~h5>H6wEXsnTr zRYx?@gEUgRULg4EfJUPmLMgH0b@AfWvGRKDbv}DKd~4bcBrtu+ZI-YD6CXY#Y&|V$J|S;J#VE&`=E8!o~P+0%K)Ik%#}W~`5U?! zGaC^{B^{P5ZFL@lz#Xw{#|2>USn*a^)AhBgZ{mO~q6~fH3Oj2@3fr%3VLKzL0Das5`nbA%`gj6V z1^SRRSbOwwAm&rh$0*^I`SkIN2fm3uI52Yb!J6{vqneF?dq{S^Jwex_>0{`|0DaIy zMc+7W(K6nUwoyBpFO+2zshY_cy)5QG!+p8JEXKc;QEVfm?*u7kziNgYfc<{-3i)M) z_0Xb7YQ2?y5{aHl-kL2lTh#p}#_f98D0067Ou!~+FV?X+sb8Wc#t9Ax!PI#34%L`C1*3jmDVnT)jj2f6FSNna z<@XDo1jA;a}4gCSWyo-RZj@N#4hPd5-xzXYCc)_98XG>vu^ z^gZxYjd{Ss(}|EcWKneLeKL>F1R_CDbqKtzF1@7%N8i}5ASfg@Bl+SiKKukzZ=b#! z8M3u&h$wS+GAKhUu<9ewbETgK0L-#uoLlsT%=iKkTB7c0m{mn^s+3s;Gj~`qhRc^< zhWVDhlR2;4hmimz9S@|LY5YdmTEAnZHhxT8HOB14Z7Ym?V}&W|zx-+Zv>BMjr-Ct8 zf&W6{V!%>!jJYBd6Hu@m2#yQ+Xs|H$S)~HTqjo_PNl(FIBu>wyawW!%r)|ZSpJbIi z29{!W=*&3Pl4MTeoGw{K5i8orS(3Zw24CVVxWCYMdSL}i6BVnzf`xD9!}G;W8fSuf z__2`lve#hW74$F%sBB$lHFV70Kz4Sl=fCYm6Te@!j8-vIb8rvHyHN^&a6mb-Y_w); zIn3S)nJ)QChdxVg+cmWq_e1>l9=RW4m4Cz0=TJ`1Bag!JaiW<1?HzS*g%$vtg#kIY z3PTQazO2;k)vf1r^>P`_2Ap*h!eQp_`I(Poxq-~xO{VP6gkg9}4c-3slp6Yei}&u{ z-u)I$SYOe&rOgdZI1$Svw0tmHu9XM);D>#9v^l)qPI~a&v(3`V0m#22_i*e53-~=$ zIoj#Vv+dr;@h_Mmw$t9fF)6>jbA0$V?Y-ZF@1AwBM{V@$tJUBKaIZ>FgJ)RfRPg;8 zfnfQjTU1X(d%f~H7CvewNL)QP4L@V=#m_DLd7eKv@aHl9{G2~^{JDrf_wwg1{>1q+ zkw2&LXAFP(@Mk1{4!#RN`{T#QkK%Yz15p{GfM<0S-%n!Yhw|xYJPC_Y%aP46n~eG! z<|-ZiQZRZRqf0?d>i3N16Haug`+_eI`Y^S@=%g3T48xG3e#;hL%$8h$AFL3mEmQDW zYD=~%vVU#2l)dC1c-BixwHHfD1acR9pT?7RUrhDCZiQNd7kWQB zgwW`?6&SfR$FlIQc4egYwLFh?vnQSD6>Qs`NXkw{7otu|sOlk;@m>?Y36#e1eTxv% zR&JiWXVh$>`Wbmyqpkw5`n`GiH9|s8lW>Kaj8F{%1RW0CESI88whE5&rbUkC&wvp4mEu0mRR3Lg=OaHSpXRcz!wX{4}0bGqB3c595V} z{VD*k)TCt)dIi74XM79kojCs!LiMNykPiPZ7J!28)^St=sDTofQK#s_ML0Mf&+0nn z9*!SYUB#D+m<@e6gfC~B7t*`efRpje1|Ls=V+_EL_)=8#;>Qyp7!`X%-QreK8!ISQa7`37k$~^Xi{g$JHHsoGRTNMv7K6!yTrXGSf?_K+ zwl1`8sR~Bf9)bzDl!yzs68CvRHG)E1`G3D>=58-9arys#pU*F!Pu`t7bLPyMGv}N+ zv)(Q0Thp|g+;Jk?>6*rG0%$QCu0k-s%q&cus{xiS&v03GhSsSCH zM`#%mS^Ml}uwnoreEW#|47~5@6krb5|0dbA4YT1i!Or9$77ryicjSF_1oubg4C4mp zgN^|JE-@m2yTiAC;r>YZoX%YM6gah*a2rx|f%M);b7~R2|Ju;`MAjFt!T!i3=R&jJ z=W1fPKhgxbKhgxbKQeb?4ljHyLU5xFa(|==_78&GA8BH_KhgxbKhgxbKhgxbKhgxt zf*|)tn%GznyQv`9jv#kq z0`He5=xfneLm&}u-b;J%tlNUQa8kQC?)HbH2sm_HtVW<^?&xiy8%ii^7_ zm+pc_d5g}|pMjzh&Oe#N&l;X4_X=gs_Fu$engAkeAKqsTt=bDp5MOiDcZbVt{?x;< zR|c0Y<>N!3grencR_U)&XkZyM<>B}@FVK=0v6u5Z6$xtwsM}3B+SC) z*1@Q_3zRrLQC9D+YyIF(QfryW!EJqP8mW9uwmk10Aw+nyJ|sfVx)KUGL2A@fN*~z#AQYxHsLPFK?c=-793W0L4KAp9zhvAbK^WA zso|Etkx9-!iJ`Le<4G5#TfnG#$ikI7(uVVsnqY=p)Se*;IN);mBlG@qFi*UVfssasNIY9x zN$`BsDvM6`y=u-7xv*tQ+JOh72W2YH6k1DX!XWC-5RIe&?8iEI zv`rrdBF%N|@(9^<@(*0kC)dZPUjo;I`4jN}7VAg&zssRN%Ku*rh~fY28@l2DV}}q~ zXa3)cc+LNp5BzETZ-NL*wG97ZbfWw?S{OS4*KUhi=YlLOV2i(3I)@ISl{}GjyNmMC z&2s_U%|QUWS@-~(uz;PUGr*v~fZc%?y?~8XYdV>sRqL_!30!?A9%pl<`&282pYdre z^?2~?QVzENWE!dUV)Ts@x^vwHy8{wZa@FQoUITO6wZ*MisZ+NPX5Jr&CjNJNv{5~E z1WgI<9N8YiztJk#VFWh5j@PxT6b-14dRB@zu}% znG-*S0yt9x@Ej4o{P|Ed8r!|SxC*rv#O4B5y?@Ia9&~iby}7T8k&T{kUH~J=dvjeJ z#Lk##S|_!_1r|r<4g3RyBKZ=k0*PHC76a!nq(ka-0(-PzL^2f$XJ#g6a%-vpTs@fz zwf=h2KXqiL@brOn#T~>OB+%|C%eNDNtE-6Q0U*gdN4$($>*n>;w3c*1>uf-wm=dp; zSY|P|>tbRo<`_+DCkt||MxreUD%{C}R0zP;VnM!{Di!WzJ^E`T(urq1x86r1kuH25 zkSwl-CQu;f3dY*vDoiPxy_DQ1RSgYOFVYsfwZ)AP;=vqUkWot5Z{l(vrxjsoMpPcqpscPEPd#Eb(2e@pXH8>J^wD;Kk{w+XcQmy#EkW zwXpaSjknuVR7YvNI7M~4z;}nYkHLGX#@p>_s<&vQ-6=Rtb$c=44L|)s!F(Eb)O|eD zvfOlh3)9gSOsCO+?SQd^lYnC{?ue0OsGeiM?Eq0LaUx5$u?+;yM!0V~P)e*Vo(znM z{ii^UlWU`LWF@yAOsZ;&t63_9hp?0?z~+;HFuuP7w$gpFxX!>b4`DO4`wHR&F2XUr zGKrzxQ~LvH6HxLGn<+Z18zvpVykOL<6(q}cTjep2{<=E%FYJ*dj2+FAf-fKvz*(eg z@p(`GLc_iX0h1#XlZC8AbRS+Hl8y_CkVvW%xgF8;4}pMN3J5^5&2}ZffF`7qP3^pp z)ZP+zZkIyH9_~`o4HIl9nMQOD+isQK#Pp=(G;>5mRPc>vZf4FsB1v$L`6Ac=RG+h`hA7Ikm0*qopaDYM-qAtqX zK^om!1J`QcK$*xv80R>`0vBP4OM28327p}u>LO@ z3Hxhulw?KLvdWMKQDoUnCpn2+Xkuu&t1rnw8p!~oqI~Y~ceK2FpkN|@sp*5z&h#;K ziS!ZD{*Laa#oe-!e@7Oqr3=p;m+e%Ea+ouK#s>m#FCHmT!Ip3j!MOuj=7I(=hq6U5 zBjj=DVy%ofc&;LJe(-UYt&dg6hNTMGb0V9^^!^0BD*-zira`%}Vp#Gr_c+w9v}R#r z%IjA066$=$2||OEXDWUK>E79qZjWdzNaLnU?+oEMY(MF+l1IU?@Pdc%&YV#}Q_Nb5-B^rp5n+7w0MQvgj%Y+Cq9YIiIX_(Q$4g;X&v@=u zxgNV!CS$kC4^}dfMc`|Z%WEEr;&?6ax}^gT!mVi~D}9Tggagh_7JqjbMB*YWVSIrP zTnq^O5&LL??>k8ZUQ-ql_>aIYBk&|DBJf`$EoARWOs2p;Z@5h&An;Kx2LgYmV3OX3 z+-igq5qD+^8?*=EeUrdDD_|u@NtWA$~+qX;3;Cdn0dvrteNYEypBjIhzxL*)R zli;!;Y6~H=UpVy7tH7PC7Y?1l3Zj$(R4{Ji2mmXJ&+28i@b9EFZ?>+TZ6@PSozu9& zk)g$geTjKtaUn2vbD%W^FicK<0gQC43EiE;KGR^`O=D#?0q^4KqEQ#Z!*}Kt2^uq0ZxwWp|LYtci8JR_6~!cxdJMkFUUyKF)au@`b}osYns+P%w^1oZC80n!ki3%1(clzW;54D2pQh z6<{1y3<4Mxn&7At)G|8C8C$H4NQr?XyoQTb`Y^x|58;?TIK_RPrvzh+$2 z-T1zXD@tq6qv^xN07nBj0)UZ-HcO$A$iu$_mG&28B`XrX zeZoq%Adx8~J(k;=>8vy$nHGp3q~3gh+#iHYWOgvL9i10G3Cux=@D@!jy`U^N2j?@6 zB09&>O=->#&Xmk%P$7zK7N$M(;T@nKEtzG_1bdHVGy+JIQSJTd51=4=@z&m?&Qy+= z%UDT$Fj__;dMIZn>&V)^*Ydy|rraQQ#_!j->5J(FAkAr44nxD_(C>&y-UEBHdIE*P z$S5PGzC$zx$s}Vk$UxU*aC#Qn2a5?wAS03-m{aq0j?7bZf~ zy$~xx)e%YtA+PKSe9=M3Em(KtAmqBsvwL9`QZpIzlA)w3(lHI9?BdHAD4d@ZsLt$_ zdXWe@I|D+>&6Y%e@bPAmeMCO#8t1gBYamO?)cyg>q)i?{Lmt&`Aygu(-%7pSnoe+J2v z&3Z_@LD)%r@{;k~YScILpl%0SE}1kp8sum(f1uQ{i(=K!7tLiyfT8-x{q znBal;)w+ik^xG~KLD!^&ys!Vk^o#ceY9bPTht>d=NzTpAl<-Taj7VqzFQTjroUS7@ z4o}XevbtX-*3cj(b|IbzI6!$-w|#*d=&M$PQ6rWWZ|m`%ZM)PmjANwBN6C>z19({G*J`Llp45Nv@83-*C5 z3X(V%f^86X66~$iGYI5dFj@q=F;lQ#GZ2l0Ai5?9cBd}kMG6=2G!e=dY*debTCo3D zs0I6(aoGiX7El<$o*NPDhqX|$Ov-O#reIgIY$P;+7g1IQf<01j$Xuu}9^OD4-(N;S z^aDB%C3X_{+k~giJhp=jC6Fz1C?QduhZ0(o^l5!Flwg?yHaF`HZaiP<-Ca6rEI5*N zD6s+hg<6f2@)O04k$!WTPOO$hy|w|^xp<9$)dMW<=p_!!b`m_{%#%q`ccw?4$ZMlK#r5dD#LUng;`0S|Vez#F@u%G( z9;6YkCt_yk4)NB#Gz+E}#Q59{){wdwv%Mt1Q`?D{83-FT6>|-;YNW-T;qAsm`u$ac`zJR@ikX0A_Njli!im72O;m2jyMTZmLJr4X9?^y}Cdzt&9iL z)Ny}e6iU_BWi}b<7tZh8a@noX#QM(DpNZH#0bhA!`tuNlCIJbReS(0@EMChp_s4CE z0wxU=p5gvxS1rywHAHD3IHe)8vwj|Mad&31GH`^~lEu;+GE27z4s-UMV5CnRfV9v&8wumh-HstDB=MUd$!rc1 z1p^)Bb_llU-_d^5RpD;L6O#i z$wuC_eo+g?K?~|ktgi(JXf3FM79gE+s1+ro1wD)ww4=$P1yx-r%k}`HQSTra#FpR` z+e|HZi!B8$$iZYU;Rr9{Ev0LL3YC5z!2w2a0i%h1Cm&pY#vACMUSdP31*g*ZLJR(R zj%YzAm7WH4Mhi$_L<`PFT1X2>nAQSKR7eZbuZ_5zu33L6r=CGPK}naIgLsjeGLI)9AQo(+=cvv7MvyXXcGVS~`O>!D9orqEBXMy(+q<1*!b z?aq|esgvW5fMg2ke~?Te&8Lv=&n;S14Uw|2<=;qt1FXQvGQmYivun}<172vrnJoaP zm_$}iYY7g~C%Ax7^uNhNg#z!w*1|_8V;s8%i2mF^is*MD_ClaDVnx{{tr77r)#(&w zrjs}={+h6m`1d4X03l%z|0vf3@lOcCS>uQhZPW-Kfu4mfIQu|p2V!>as77_Mosfeg z87M`)i&ne_9ei*icm?|==jzG$M3OAFu)G0VjDr!3kiJoeoe2(MncbO1W0%Nn^Vvteps7gE>n>29!~*`UmNUA0El1uA?Rn~&O$vb z!w34Vf_;d3@+WP_+}rSr3$+D35r%xJGbmrO)5piO7vSK*`^dthfv>mKRI+GjIj%|b zOPqAGN1DHbhex$2x}J|p7WlZ1u35yH;L{s7j}83L*hpeMC-Vq4%=_p;V?{$AG00@&$$S@R^CCo0bE!5!Bb-^*Gj-^*$x4;-mMq-`r9 z*0|2$rv`6M?}DTM9-mu8*nt+)TWS<0h&`i?KNqprNf6Zi;`+8TZiftOWlF&2lR4Z5Tyje1-NxT(U6Uel+aw_4oj*;r zS`=4ODOD_v+|MkIU&g~-HIjtlR-P;yhT*+N1@s!h=5Mn;9esF|>^};C-ez5^kqlB! z)pua()S^smdlJ|^r?Ao)i?lZ*9YzNTs&u9nR+fG@>GHJQLnfI`d%1Zpzq=Q5UT=;k zZIg+3zSIWEs4@06p8-9JlKG$jTqFCM-#H0&t!cs0r0=ZSA{3N4z4|`^J|?q}ml3T` zFYJhk9`AcxTZ8X$HE}>G_g73?0U7k~uL$C|SxNaV$D}M2pS-xPxQ{8uLR(-;4R5%< zq$bs~r1d%rS1D|-J{#%X3&4EfAoyf|97nJ2-9drd9i$a{;baLuNtLm6>`A2IbcEde z;BBgy9l(QC(XD1zc<_bk^H2?}XZ<{SkNyeto-Dmtc6tXREt6h;3o-5pd?$Lp1ST3 z2w>sM&U{HVL)9}xwMnR%S_o;gYS&9Up58xG;{GN;SNEqfUwWc+p4BiL{QPM01vpg+ zN1GA5rKtlpMDMBBMp%Y9uypZh8`>eBRFkpx4V-X=IT{Hhdj7Tpe|r9wAsQ1sX{+No zX{$q=v}H){pEP$*s(iE(D#?p6)!U=L+bXwX1}DKgI^?EIP7d=Ev%QHTUGkh5M86un zP>u8^HwKctDm4-V&RWf0ef$Oi1=4%|U>JWym z2d~tA3{7IFh#~S%?ZVJ`4E14XSLW!!&<+fB@TVt#KI6~VsGj;;A0v&%aH~Kln0-KeJ6tJzpIB(21)t&nf0P0Z)A+$AA<3t1Md7 zKy=)Tiy$ZJ*NkR}xp&syNDK`s^fGQRFsOn2*;jC`Q~mJd=cIP!yR%53wF#- z66f)ykuUnFt-23kLXU>fd2#hRoi3BAKs?ij=yZu2q{H+E)dd>1kztW{XAxag)yL%P zrD0Qy>wsjS41pb}^GSS>4mT0zRShHMtk7YJU#PBSMT#t zFCt8sS_5+(o=6kC<8@fdzCeeiE|edSzL}DLi>hEc_kZX#p=G=d*OQiFoxhIZQ952| zOh|rwzdeM%12PQ!ZG;`DVTGO|9Ts|C)i6TO3LO@h??OCp*I_B=KESY^cWYRQt7jNA zuaGuqQ7Jst6hx}|4807J)J*=g@aG2pEaJ~({yf2-%kcwBrH03v5{}R%2=$i}&&7gX z+baMI$)}mP9ZaS?ok^p2E z7WlgWLsr~ESbLSSbsk~KUvyYld8H0Z{`+-)iN8yS1-=q6#5K%3j|$}9U;O!yKT8nDQm-MvU=(pR-dlJ!p`G$I7T)djWF4;`FQ>O3{T_{xc7Bf zs>#w37@|BJ1a2TC2kiEODkOsa9j3%ecv*k}gA7J;)3& znSaNV{J9fPb*sE=RX3W%8~Ah<35LU%i}J(D3?Mq>CV%)m(>zZy&p4i{fA zUk;L&HEKV;>|ep_;0Lu4YTr0c!cy8xIxLu$>af6krMW5bX&n~$ zIe=m9X5pzCHM^==bS)$PjtKRpjunC))mcXp;(U|!EIdJqFyW?9F~{ooM#5elD)JH? zZeaXx431q03n4f`r`0j-*buJG!*%*+c#_0kDfbwXe8&(w2A6>;&)ru z{|2Ry7(~{a!kYC$HvB)_`Ef+>>w)knH#1nBegzi!k2;cp1)zvYC0&NfTpk-r28mPI(DG$)_hFXNaCgX6`0o z2fzY>z>L`Mu@SdFq3!VP29plLEiw{~8H$xhqZ;mlHV~v<0`ColE7R4FLr(s9EVSf~ z3V@(1zX{JUxnbCxjXkgm^a9c#OHfDRorGjtorE~a!z;{Ms`o?)0fdSXk`Kq?Y6KD)sJxZzL+aeTYg<9JBlo#$ z7WaUJvtqp84wfnN0;ZQRyKW#*lg;?<8G2uRBS|a3@wNGC)%$lq&|%*FBzIxKRi@+;?wIOKY7Uf)L$U91N_`A> zlHgvb=#=CV0XPKs6_GXBzW{4D(3@9v`52*-&_%O^DIqqS9hrV zM;8;mLnT_wECIM$?oj!*RA?>Ar1c_=Bz%WT6iJmvB6q0F*GSMp(It_+G!k#EVK^qF zqeyntNMxu4hSjUK(mnD*pJ&O>= z-Alk$>|GbO?tmAVl~lrgs2#;(aU@|Yu8)Y4w`lzXY_r(o++fvYBrwAKZP{JSA8y;i z1tnK)PS{gYYFkC)w}`e5#!o$S8uc2+2mHw(t0$2-XXN9nUD#ml?ywHn)SE}Kr9EYq ze$APZN|bJ$_}i!}f^Sx_|8OSYQ%}vg>e#~}xG+^>%ixWncw?nH!q!WD`ft$V%#p6E zbPl^NEBOaOBs~wm=oM?JE~3H0pVe1tjt4s$+-Sv~FLa|-#(K$_SQ6!6>_RB3YJjPw z?9Rqh-Nv7rOw4d5Pm>pnf93g!x{?r=@#iA`oR1&&$7h-6$>w>yc^+$?N8l;rKY7Ra zU*5N>_n|RrFBAD1UP#Ig1~^aRTh#YQ>dfAUy?rE48o-+xGyF z9>5uGJ&Qfox+2d`?}oi_UztC2d*c8Uv+K#i4EEvtDeyL9?f7xPkm~C|0BbqX5He5* z#r&_lZ&9Q1RA(5V6Y&gO6S3Fb_pp+tKUHeZ5hy15E4k5|w)V*_A z&2{39wxe1r(@T(|E`ds^`_aIx)*L*;6w}{>KkJ?g6yzL(KBc2uVgs60 zj;QaCa8L|JaPZGJl81Nd%bU<>-NA6A*_a*_XbJ-F(3SF zw8@Y7;Kzh~@L>moFyn*A%f$y*xu}8HJ|(a;E7Sb|rJg?wKY6AC-=ncQwNLq}3HPD` zcv`1KI)KMWzWr^?xgHG^?bmLW2I5wcQ{O`aU5i?7YGExG$RI)5iJwOd=bD5OpH4Pn zM;fCRa#8|SlxW!P^AD0LQqt7evhTM35$aPu`t0=XaHfMQnAojtYHyl$Xc+Ci+8IA= zK-&8NSdW*y`=Ni-zhB#iDuw;~=7@hkc;ElG{QEt%fB)Czp!#R{_wR_q>kma2T@G^Z~6Cg zyZ7&>{U7n~59{8)w~*}lRla|}@u&Ou+b-?azd!CL`1cc|{(Z|IvitYjYE)hO_r(IB z{rek$B-5`;!5xMzIWQ#@cjezN)n0Yi{{8*hs}}!$4^3-P)EMC-ei}*F{{483MEv{L z50DCX^6wAONV@j#f2Wa%e}4;*`2Kw@Uc5%NlJCG2W_Om8&xp^M#lN2<=CgbMUYzhQ z{QF~(zYG8VKq*uF`_XT8;on~jnCv9`{(U_x_%HMC-xu1t_U~6oy7upn7#Q;J55UVW z^6#ryWq0RYtG>B+U%4Oh{a^F<oFBM65pn-C@#=n6PL-z8hWCnIt9D~CwoX^WfTti+w`iZh%y6T5v zon4vNd{-v5pLS((_V!$vA7I`&Rhg=E$5v-fRlb3Yac6in=GJ7Y@}SJA%I^i>^m-70 zTXv5HSa8zwm!LX4e|aRT4*w<#$-aNnh>0O@ zJ}ia(!yU(@zr;3InWr%djE8>Zbh`_Os6rf?V8ZjWJUB$@{$bG=01&JRWS{V)w}b;` zdcyOhy;;RaSb@--r_@8Q&P3-uH_ifC&a66s$4X~$cGgNRrwp~!c$1x+`?UJjh(vOv)Yd#QY~CZT{5ahk7vtdDJzG zxe7nTvL9kN8@{(-!~CZ}u2Cn;vqc?mp2r&aBk<%jWRKV2mbCQv)nRSJxSu?jkhE{p zGm(*cIn|u_YQ{|DZqPLK)$d78i~1j)=pEJlGyrBk^bN#m|EM1XJd1yHKNi?Yd81^; z`A0#9@3#>Nkj|F)PgK3cx2QVvtj1GKk;E1?fiIVvl(7=IRh@%p*a4FD;*b1~c-6G* zX-qPw5wo0v$gf>(B{}9&r$WdooH2Wr+s~;cV6~T4*ll-}Z~4CTWoza!&>!ou6}iv5 zeG2fYV0SOP!`=@xcEbCvxcxQ0KV7r5XIaS?*KNzMPA8WM;Dr9MF$DgCiz4QZRD3aX z+v=}-S~q>FL5eG!q5TaA?nISO8{<_YtSDJ|-L}JUp=$C;4V~|dD8{Fdqc`VGMq1o{ z0|SEmXu=stNVtD2N;o@K+HZtEo(!B5w<{MqP8Og`3oD#I7FF8mE})75HOyW*%zEs_ z3hS}=tj9(aBK$!`?$U%iw4mI2?1f7EIjUIh*PbEH3=@F}0+GFYa1$bS($vX4d zP1dLt_epv!k3{Y&U7a^k zon^W@FHfpZ52X6<(hthl7eD-zW}0>t&-d`8iF!kxThz-YKK&bQBi@rI((&Y8*_X|8 zr94-uD~o{iGX7kIA8ube7tgU-^jXF;E^maSYMb#)A)C(08}kBuBitwhd#wjD4je-G zOt={uI&4uceTuPd%F#>*Gp;P{pZ?{>(` zud-DTc#xgkNvMdrpzDbXFoOsK1Cz|p0`#T<>Q5l7IVZ)N&TwYAmQGBC1OmsB9qcJH zfTkND3=H`_FMP8u3(#l-luw{C0I2{dgUM$AlvOMv=ny+u5K!M0SNfSOfc_A8Ge=n+ zuwZa+*T$Wlim{wqVb8&I4eF5;kJ26bD!wROVdHkCB-=`#m*dR>0IExVAfWS7czF6~ znl_7NZ(vD|OEl&7=wf)AzV$D0i3uhMrtwCAv34oPG@$sN<+$MNRsW3mc?O18p6Lb@ zAl`zO@_|Mhpt%6j4d??k`9QKtkJ1_dq#Mu&n(G7Q8%(nSq#Mu&YVd*Hh5nK5RsiV+ z^nqq)c5-hS*cTnsf&$7qwszfl-fJ?gaHJtkXUj}4X5zmUpuDEB-bqsVD(&yZ%>(EP zuN$(G7Ye?qMR9z<5dA`QWH8he));qA$Jj{d^Q-z>CL&kk;FW_{^V$RVs9HoR3zjUv zpN28vULl9*!`mNLAR%t=QnM_#RD{h+HlrWll?4 z$&v5bwOAK@;d zKG=VUPg6$v2j)QkF!VX;A1KDUe|Vuc;@!OZ0^L89#qE`y`vs>(8J9={4PSqW3{P?(l-?%3(aPXFco&PL<*Ol zp)0fb8oIvrP=o{rz|f!Fn;Xb)hF*(k@mhYT{GK=K>BT|;}W?-L--KyNzM z>f38jdChaV@tWpd@tWm5DoP(Ay;kjY=n4Qu60{#Ja#kV)p#Mr)$&G|?Pgz@GU;4H= zsGQM>b9w^bg9ZwF=vp9S6(Yz;zDD+ifQsz76B_NIuYws(nbR2$Ynrq#z~tygw4_d^ zIDoEJY&dPqPS>eY7#kOGFWN%^+N$ZQqdefvaBPL{srM9kN(|sv*h63R;UmiT2CTG` zq^Q_KaXb9bR3`m{Ng#HDu{S|i690m2Ifv@67VSz?Ad-}DBq}0M>w!A%;8TwoTJ>MV z$DCagxf^|wvR7da05MCo$tvg(swrZkH9a3-7|4v;CJ~t=+hq0afp5qN5j(OZ-t>rX zn7y}+Y4MU3*NaU28X(n2Y-=U|BpdzSA8Sp;jMb+-m#PAD4KHT6^Z={mdClB`$bxg> zy^7Nl^&qL1DuG&wn!qlKYLvP(Y*hT$ z=$eYzV;#fL?hNhBp921Dk011k{n;8@cV;?)cIHn3es~P8$r2FignF1^#NB6}cbn(! z^2GVo9hr{(Tk>3^X70rP5%Zlu&3C`h)b!vV;? z{CbI(j$=tRDy{w?*d)U1U;E-xANH?3m-+oGMyS%4#hmiV>{C~grRM$;uRo33o6Ap2 zxR10V%^!002iMW1wlCTXLi7l|7u}`a1A~b!HUEFRA%1!Q&oMs$ z{*GGWwbO!;>`MR13r2;|O`U^FM>*(s@nKS{_B)hS=?>4&oE+U1#8ujx;MZYVWIZmn z@a7(V!vr@`jIQV~T&U~+{qEQm&FHsTUq)ouKb4n$KAg`i=#g1bF|3zG|E-n9acuwGMihQ z#lT=!hj+2m-wEp8LQ}6lhAM{3FSSlEJr5=@?jvC-D zUyP3JTL|svEw2t|^0uFDCwu_=UXR&VR!6Tp-WOj#K$q48F+pd?F_SuH2M8{%k7N`+ z8X+lI882^0TqKkQDL7W zvxFmegCm%A7FIYKK=TK;81Y85-C_e^)3M&C891jv7)bC;23><90LbYLczj+I3C zR0a_joCa^-C9XY$0b$Y*zb7m)^sjj)TO_Ial9H_ zqfz_A;@!{<49nrDAv!T1$M6;{M)TGK*_O}}seUw%$yCA`rqT)ClLDK%2RRB`x}!W- zsTU}sn2YKu?_1QjVry5aP3F1Y*xr8{xWCEs6ZMM3Z&5GcsivFA#du-wKzdo&qfES5 zUbd)vCC4Y~EpHxczW!bP}F+b!0Bl-$^iM7_BBJm9ot| zT_KpRE~|71BDFNCN@=Z**KD<{8Rx*{qqwS|D6mxMGiH!8Dh*chzd(i+)qLd2oirLl zbX{V5&Byr88E2D)ab+d#+TB?Iw7{c*xCmg)FNhAzJjdu!K-TR_)a!pG>wdiZEI9(P zoW5w_2C0BiRQ-atvpgylJ$#aq^lMPHHFBb$y*GEcRe-rThJhw8VYz5s^1+zG zxQjdc(AeJT3><{$PbggHsUc+E-S30;MG3lRoY3DhrWq{hLl9?J5x_9GOtzt{Oyf<# zt91T^*-T820cI^>P^d2tmldbtHQz%XZh}t5VaM9cGJ%0G^iXixu;v1EXtDmtf`x|> zqK5Nyxm^jw{2t6=i2DIj(@7wF*|`&-A`z%f)OFaG<+`T~q8UxG@!PUB{GH2$VuFlnV?hpeRD$R^W(=6s0k^Z~u*fCDD4#D3Xp zBONgNjsID-9RH=O0X?=Jy2j?>G}~5nU>};zsFO5;bl6xc!(nC9uD$?sK!?th@+VBA ztK-YoP$ZhR9$LcojJCNnHcDs1ChSk)v2XyX25w6dI^eCycoI_Nm-d55!j%5$p~&Ld zZ@BCP9K2dxg{}<5=xiZ6R*B5PB%v0tR8fVWCOBu&KC%Yzxk5QFW}-b56Ywwuv>_p( zHfBuhm$6siocdEfiO96TR&obTdwx2JZV)t+WvYD}0xu#5)Q1SGe#otM$Ip@!8b`H( z7rOgwPv3}Pg<6Lst_H7>Ql#R_fl=V9Pz{YkOKh^Lx?6v{_GzMRFv(P97EKy1gzVJ6#Ac&ZYCXid{uy}Cw2c`DAqOawnolZ&dWhO&JuK>c^oLVn~!&lxfI@$ zSXeH>cMW20UhnUAgEO|l8*wGvqegFsX-$l{xZ~j&+$;-U0n^oaNeYy^D2F!Zaxi5ME40}ER!`!|{e}G|5Z+c;G3m@aXO?(V{qcVnNT!!<-3>P6UM{5ihhr<0s z;eny>piuabP`D%%9uf+dF^mF;zakV)Aj~t)j6XFL9vuo_5DH%w3Qt6s)46B>em6mF zquW@*m%{F$jfJ-{_s|!&#SWES8RyoNK}L{-9!DStOD+k^4S3Ynv9`?1l-K;w+qzkf zLr+`Z09_os`e`(cG!|xDaA-r{#w z05=3A>+ugJ)XNG02sgL~hDQ`0e0-Hxqq_^Gfd{ALqIXnRZg9tw&jfnj`o*Yf(i^lD4tcrv0!5yH=eP zH3uIfWD4#;#G?`&Ew@JUbXkX0uIYf%)ezL0F#u|V&pWm;pWbL?Abe8nCzxm@KL#st zNJ%PO^EI4`7p{1~mfTjd$=WDgy5Sbs;YL2UVFp)5B6%-#IMH zbPgYobny?c%}lq0OQwsf_}q7B3-s912olW|7K}!u6~L=(bOb8nj~i~-+d_3Bqr6MjoEh)-byZ&B+Y7^pvCM$v_IZm zO0S{7#oY0^>ich{VCuJb3RnvVW>;#)gPd}`bjLd}p$aKV=+DprmwOL0C=d!oMSS^l zM&{#iJU&DKU1&uS&;==py6`fiJL$qC&<$P4;dJaz(*=TBGxmY`fG!j0%TfXl4A(}ky);YW1gA;8c%7POhVum+zPfG%(v61qToxm6F3Y`X9d0*fvP zd^fr<0gzf3h%2iuj3BaLpwvkhHb{C`y72VZzf>1qz()@ZGXh;W3#yf&3#qPkp+-<- z=mO!bWL1Ejr^!3%!XT+t4Fyhg;fs!LbfLPFF3_9$dAi_@p?zEMw0y51Qy0)L%tiuq zfh$7L1u2QTFrLw!bm3yq4P7Wh>;6PtAgDFtw-jjfYs}Y`E-(>m`0{}Qebh>H;SOBy z+?_7G#ESihF5Cwg(FIm7Qx~pgiSBJO*l=n{FLZ&#WYdLm0*fvPd^ftV9$lf<1>(x8 z3(Ek~oi32cUVdS9l!g!U<5V3|&~!wJv-s*YRiQ0^zOXT1gV@S#_aV zTDV4*AlWZ`^i?;yP~S-x%6_&k6lV4dgN4FOT`07Y{Kx=wVWidtDT%tEXvsV2!r7o3 zx=@YQ{fW9jP;16syvWF*>vG)0*fvPd^fsqBOtXd5LZ@RIG@P6=@%}QHtR|k?%niD zb>V$jCc|T#ppHYQJSDKsO2Id3r4FEK@Ux6V>TMMHr{okz3X8kd1k5~RJ2^m}RTUnR z2CkvJNw@Ik#%@%hAyXA5RR6p_p(s-iUd88$P+JZmLjF;am3#sTm~O1coh#KcO1Om) zoiyN&AeW;AwCPXO0D@XGe#Z~0V3fdoU1AcK?X@p9~n8 zGpx>({W;J}%o)~kaDc)&IKU&D;9p8e3jWYKDXxo2<6{73Mh3)}ZDg=3F?A#LbOgKZ z0Um1qrBZ*Vd^2H6bzuHaga&z%Z-xbHyEgwPOa2V=Pk1YNpg~Q6?{urqi&EdA^&;&X zKJP}_8#_sR>gP#&|4eDWMLzeFDee7nL7Ir;6bwD5q(s_JU_>Wr{}agNIDmtOpEwQx zs5RqI8f}aNn6E2oXCjtPwqss0mko+0aPIxQ1ok5*d+BZmj%pFocZ0J(jjR_8hzcF{RtVp2K@g4YQg?S>r zn7kUl1#`mAjAxW`?Kh>6l;>%W%aghnxZjq5(IJsfZSs{;E;TaG+*~(dqD(T=g0QNHx&Jj4Q zE)XE-zXTWV??PqE{V9u@YEiKHh~%P(nf?hc5G-Sop1H!Lk4#$V*{P_`CiMIjKAD%W zUyn}Ks3A1Rc02c534JnU6PcSkU0Oh&g#qL_xeTY2~v zL>VVMeetGW!iO@-+|SXA>Iff(R$Ixjh%v??lL4Wrq{i%pdK{mqQojaC>HyHg4NK1n%`Iw?q5Uhwt9HDg zQfGdSO5OK~uGHO#Q)`gQ8m=PLjV8;pd|AYwwJZuq$4g3!8fPFHnD{rm0BJ8GJ<%XN z4sq%Rq!Q`1gc|mte)$t$#_>n!2hu$Z(j5)b5lkGekv{erkbd*A&bkrErG7-Z524yj zafN)@kw1R`rA+S4m!&58YZLheo~jRjKEV$w^ml=1QFj{1RZM&yFR0MLEPJXc`)b6g z{~(pf|3#<^OqRR(awmTtC(S^5yri_KV-3W$Oq_%lAbl4*Y}EjR^tXspqmfFarxMCC zS&rk&vHUsPApJ`6;QTh8SfW(>NVYZVd&z*7KEX4OM44NDP+NzpNCbqxY-T*B# zIp2~JTGT5{{0xyQEh%f%y$0fGro1XItJK4i9rx7=L8}zyN9dRLEo!E`9H05v3R188MQ?w#EdCEd$E7z{ zW7{;g_|1h&iYjCF&b4jZyjzFscPpDad z3eG7_=c>M5fuGJreG}xGz6sKxa2vJnrmuv7h&%F0Jd8l2`)`h-8RaMcu)S(waVsg- zZn~d)7s*|#)7;6Jxr}1q84sRI8X0rSIwkU^u1f5D85wu$>sU#*sand2Zh3ndU@`lL zxKl?U?pOA2#QhJ^A6ds-R_)nD=T=rF?B^q$K`nvo6H{h?b=;jrc!KDd;G4yzgmkZI zi#vvpjS)n@7DU&#Eyj2y$6$+w2T40fVi`V|r5 zBe9a^UJFJ=&m^Xihk0}GOgP7PV&rnBBR|t)?lWXl!u_k%G6T~rV&FZkH)>vf-m`iM zI~$A)uVUuX8=1?h%_W?3t8SGOu{9~(v(i4J+AQ40-4h!arC)P=6g&zNbDklJxP5(t zfqojXIf>FWUGixj(qq@h>GlK$WiOx&&gT8 z9LigZG;GvZnZ61N7rbvy^CJ%G$@gJrR(*h<=kdei?l-)LmqmP!OOju%@0nI^FoPjx+iuI0}p{*1>DorRKH0%yV0g&u=fk2rR~fzsw4Iq3l) zm7ALnLr#4Hb5C@nUbp`bjPN{rg!{tAqaLC2^&+V_7A-DOvVadK?<)tUsQ~K<$#w9l zF=Cfw_t4}UAjwE$%pQ*oz=!^WcFBGYpE0PS2B)Pwcdw6`a#BoyGfgeR`TrRXn z74BuJQ0Mq38SkEvk9*I}1`8bVzX0s;j|blf2G@ZM-kFt`(h}NxXClt_mcN2wn zNG_-Q?=Ij5Jxm4?5WIjDh}QoO=~ZhudGP}J z>A15|XbVZ;rFOj%rR|e2=@hM16F|V7R+I_E!h(R6n$`H`$W32}_>XuMAF0KD!hBL_ zuNr256}R6lulZz~a`(8NST%XEPPdJXt@!xv_$u$9>4AXP_f=^@H%DhnW(`6D4)Vb; zT*Z#dotheEUyke7qB?etPHU>P$EGsVj?-y#k=B%%Hb|#6K-NW|G+G>JnCt73bj0pd zURfYw>4P-*AYY>DpbSGlE!;i`?kk9%0OlTDCpijrGT-Mdw}<1V0s$(SMJDLF^7tams<}+-<;f-qAJ6pEm+%64>mEbaAIid>kxN$0o_*B^P_Je`g4kyrF2dK9#(P#sQuAys_OORko6ckfcB*`K{%R7*TU8ZV|{++rNpQ-u%hs&v$R&XA6IN zka%>v-@JyGuka(^Z)D0^{LuYxW2Cc){8hc)MvuvkOCA3%V)dn;Qd!+4rgN|X8Z2WJ z`{es<9W;`Ua#a+Jo0N0wg0oGJ%4U!KAXpQddvzJyjx&zJaD z&0aZJbWUD|OcllG>LW# zwZ_8tY}m}1hpCTXoyKevw;n@JknGPo)qK;#s_l(s7wcx(iO&rh@506*0gGmjQ^d7N z!R=3n;X6xZs9L4{kz8uxe=O)Btcxu!gJQwM!xuRSgO?m-qe1y~sV9LyxSixeS<=*Y z$`N1?Gm6C6Yq?pRpW-I@(N1Mw$#rDRJ+9Y}^iW@Mok&iWpV&)tqtiF%rF&QbIJoEA zK%7|mGgo5QS%tk>J%P`xrI&$z@Svxad{pX%-Ul5>ww<&aq;kh>Yt_D!?{kaYa?Bmo zfGd2YO4S%1p-LFYT!T_^997uG?bR)14JaT-oeh;IuYM)yeDE?>#u#;{tij_HPcdt< zfot&QX1Ou1L*)a>`t4&a!cL;~v(92`>q6a8DUgM-!_0=sC`&Pidi7OJ$jf*s59mHU zOh_!nbJVyYTbj)gHC1$3u8_pl0yw_qPH97{u>UF|PVdXp4-R z{{aS!t!WZ=#q1FZd+HDqa{%oOZNrHY(cRK{+R#svw0YY2@lHe;K;c2V*V(9J&~&U= zJP|+2G)4tQh>4RO`0|}UT094cgGQ<*jId|kE1Es0%L<4$Je?RcJ&Y@XLoK!B@O>}{ zGEKz%mB|mSo6bXn@XXOoYX!f;*%N--l2SwQ^8~Z{mz;1-~y|ebLylW0!v2FShiXy$3w+?sRMJ>H%2( z7B{Fy<@vWCe6z{gFSW@!C-0gKyXC;w;|&S8ZVneDcrM?~s46Z**d~t4<|<1+pEL^v zld>4@5vt7nveF$09ZHhOSm`p{Zlu{6Tl#G;573@dNUC}A@urJ8v#)Ci{%?k`jzk)H zn{bOyLDc-1yXP#vqq6x#>da!DEA98d(?P^D0C)WLg4^rXG2ZP1i|`7rFUgI&hai*O zm>C&MRZGJ+GR$ zVe9P|MP73*CDLu%>+C4GGKouJhftAs@c*OL%Z4A(wIn} zWB@k_Ie7J08{H<%i+@uh-lE_N1D3%l2qfBCPS6Yh5YeJa8@I1XgoOjSxg{b4* zojC0Wi_}mBf$!w-Y)0;o!$TN?tJyE<&fy8X1P;%$fft3&fxuqp(4E7>d);ErPjPro zCge|Wc=iEYHizf^Q-9c+Ud?aM2985o$PB%oyV>VXWiCDeH>$d`Z}TKs7V&MW5tE~) zy-2o9IxOT)+=6(+Y)rma8-}ERmqEp%jwn?W!W-)f{c$&KZ* zcN`??^G5QaJI)i_;(oExd4-6vZvH0*PU$yUFAT=c2912qf*nHd-j~*<~ zEoy&BY*D|H=Q_2A#II93$~(rF=KTW@qkdSaDc&s4b?U!D;U}sYPgZNOd0zd3uF2(i zq9a)WM(7(UTw*glg&t$Zgl#ecN^jhV@!?;An_M_Z^5Xk-c&ZD@x8QW`@27ASgkiq8 zWEt+5Bc>4OhjOrnV8*Pl zUbD)a6*jTKfmA)~Am=}2@NEE!I2_QiG8dtPauyYH#qrW+tCqJQ#NE>|zbV~~KYx54 zhH;fRu;vWM;aVJ6^Cl{9TpqVo+w>m-Eb}F^HA5MI1O7wG$6@G??`@qb-TmQO6pPPfc7|U z8zzo-b>FXBDOQ$w^S~LBUxpFzR{9gY?X=<1tS4qsykxCORk2(h8!Y)0g^Mo)a1}AQ zO*vwNJ0he`mjR|sm&fe82;r6CPAd|4xnfLKmZb3DTe`E5l{d?PL@B_&g&BhEWq8AV zoGdO|_H&TE(!qzHC3j-L`w9DAX2DfZ9*(A$Uetf_rCWO@a7}Lv1NEEUh7nD;6N+Hm zXyf2s7Ga7FS0Hh}VceZqtT60S>hUL8{QF!j3}yqdgYiy|o$NPp0JINo*2zdb1c_W5 z&8^#>vx!`(`&8errsp1?$2n5WvF=P9?vz|0Nfg6@0^|<@$(tqw5*Ht8(9bbOsvCie z{g(HK(~L_jKF18h>8K|rv(alb^_MO|Y7(&k6X>Kv_#T5o&y zOg{v!$UJwpED6aX>XmQ?@q@F6WRiGj2*lokz7{5cvp*En#DL9e-cs6qt)XFR?v*;| zc>7H@X~Hf7U>Wn}w)wN8NrBT;^V%0@RuOd!@IJ^T0^&-=c=5I4iq4kGI+K-Lz~QA{ z{medFIc+&DerRgeGpMj5dA^NIi9Qj0bRKwWm(TYUIfbUSKyoOKK(mhU}Hj zVF~-n24G>Gy{t{}NceYsjOJ@A%@_LYb15ad9IVD*f(rAJ>fb};+k;Z?F6`qjLZ)dz ztoVgnpm#WJP-W?sNtdN}10|mS!uUMZ=EIXnR{kY!W~=0akV=^(eNns&0|y>k-doBdXD2O`+aNv_7t;7 z(&OxsXpZV&(z-EQw@TLv>t@X#(Y@9B6Clg$XxqeQEf3i(XoJYQqpJ{ZyU&FyT6;qM zUPed$h_IBOM;EU(ljC;;NZ6f+@z(@NILt2A(HRsU&<>LLlHgYm&MQENpPz6i)0?TH zQXVi!IIgF?fez)CFuv4f8(2xIS^CQ!#*RMiOS)NKEnRBez}tK18D9=@fo8;*-3B6D zAd?Gsg9bowdg_wlZVb~yR#L+r$&nU})u^(-oagvAWjbVCF54vX?=+^}1XEvu zM28E=PM0$9uAjP7c8lZY9n+1rKqQwJCQ9#@My&l1))3YQ{ThnvypYB_31e>F*ciG< z{k2bY0Kn*&js!$s)R3r4+?QUAiC-3U&Lm=8Q;GG%nsZ9*+}4Iqq%x5oRky_ zrWC|lhN5F&ewmZ>oBdBrZyAcie0Xw7{3^~Ew#Yz)af4Io8)Oc#Q>Rrc!$q@h_F`&_ zId_rz^NwoqVvJQ*qN)A;GU_2lZC)Tz6)4KA=;pSk568f;9`9ngLuyxhHix4I|78Zh z8bdsn1GPF6Q5E&AASkiqSGa&LV z19Xc3tx-0fdg*Z$y#zfhlulQVzS4JNSjaWUL4V?!qg`I zzcjSJso)d6zloPAuI1GzcCrcEp{cHXIC+H^$zV1ga86+o-<-LxK?GEw+`aycv1`m8 zg>h?B5srVyvFhjUbvHU*L>A!lh^azE@+LfQK#MI-GrWN;%?e(F!R!pZ#fiI}W^jv> zzF%nu7f?))gIN>g`lAVQCD{Zym^DETW=)WTSrZ%>1P2AdLxNyQ5F8Q&%YtA<5R3)E zL=YSq1Wyfuqk`b*AXpUyF9?F;g5YHm%ppt0BZy613{DJ!lY`)OL9jXq)&{}4AUHD! z)(64cgW&8Scy|zN2!i(o!MQ>3;UL%;1fL9oO+j#B5KINZ4`EDD0f zL9l-i92f)#1;ImtU`Y@h5(LYFU_}s&1;IoR92o>p4T7VB;OHP&6$CE`g5!eVWkGO! z5S*xk*cmfpauB>O2v!HdS_Jj{)Qq|yc4iP;AH?1s#Lf<4?+#)cg4p|l*ttRM!$E9g z5c^~h+Z4nu3}RD3?D8OXMG*U95ZfBWz8b`?4PxI8V%vh)4}(}0#BK;;+k@CIgV>HB z_PZc9=LTcsqz~3xWkTdM_>jb}~WV(LM4m_OWwxA6qk$_o}!H`b+;hy$a`W zjeqaMe|Wb*3?f9!^l^ZIe}jHGItYJSLzQT#@qnTan+f$4p&ao7fNQb`SHny9}2{Psz)*q6F6J(+X>Y4kvJ>}C<1G!(VWS&`Nn4Af}zhJ(n9j>p*#yJXmP7V-o2!tg*X7z_N zo5)*xdKf-01ps(^RhhUK=+^28t)D^&cT3J1Ny1-|Qw8sDfH*c@i3?Lw@ zl{vW6;xWBVLlM}psUwrLt$!2NH!3U`acBl!F_(ZtK$`V@ygL0c@p?07A)iKWaEn;+ zWn5AtTg}6gb)yC$8{_sJ#7)8Tb^_sJA9rvUigktNB))H}wW1sg!Pfl795@Lv_c+U3 z23S$jTyEWy;-@e57c9%HSzlWF!P0>n9qLh zp)Fg>n>tuHwr~v_yc)~jpk_R1DEBW1HI+e&X{km#RM^tO04?HvQ_NTy2|iy29@j;H=?-bfye7zc^K{~)ny8>8 z`66fYG)eKf@WpKuu0_<~m@|WUW7AsJii%-5Vo|$f56wp}48h+ObCw9`mf#dcJ<_37 zLMvv@OzD1c7fN<97%``wYKe*!vSO%I_^`ww!~=dI2_mB*q7$&&X@~|;4z(a4s@rA? zG@{}kCwuKyYV@O{h^}QinN0R(UQquapipR)h9aXZHYBUCn)M)0? zNM#NvB&+}mgt|jr0V%@ZHdD>7!O4(7%}HX+J+UxSYD_@pQLdhmQt=&k!rAUx)LRXQ z__Iomyhd{NMow;7#3$WZEhr+mk)qss8%Cj`jUoh#D4ftk(MBwMWzj|gdfEsbB%WSs z?Z1D9^8Iip=y4WHExS^_X`-O>rG`+x*j%v9krsZ2DmZxaU{S;F6wj93tIgF$yQYta z`i<2p2QE!UMFY*N7rrE8MuI4ffg&0$G;KE47MmG&=NHhH*&B5UInssU)@SfztS!fe z{L@9_ATXGVrJk9NDCXS6zRpUPi_|SH1S&L-#u%N|+g%{$w<0qHjey7q?Z!7n=4pxd zPCVxN68bK7iJ*;VqgKen%4y+L5xHmt6ePYuLlKxzP^3<#@LIw5KwGE}V=j#}L*m;r zB>pdKy3P{c4=4kP7yk1MRanp3aSX~>QkVf2$mF17TaI+;*fiUIX~#AJXtH!{qY({- z%Iw&VLbO(UONvTVVLue@xC(G# z1mvcS>SMplEP?jubq49ju*1xjZN@;8!jTMwC}tKeM}F1o7;$Z5JOR!eJG z5m*=2umV~cXAvc?dkrICNW+Myml}X-{bXM;aTDlq7E3L=(y%LC!pxT%Lc{L57rvr& zRAD_^(5;4rdn@Q3I}0e<-^8!emuZ>qs?(?z3u<@$MNFmiFmmx07L*stQtk8t3`&O$ zP7NEpGGy5U^$c9hdSSMwkr3#S5&>VucvkqHs$+c0&*^h&Ev2+*zYUCu*%hZH*dh(5k_@By~$v%ER zg0Tvw)&=VptE3T22q2jtRpL^>rIO-yBPszbL}htDpXWJq=iZwM_S@e(f6P7ioM$=b zInQ~{bJjZ<0>t))?`f?U3)CEOM$uYdROfPsd#+#ixChuMtU zH~KxxAp#=Kgm{)^)^p_SZ2mLbsC4fxxj)^DQd`flAh1y-Y=zR3NJB@1Hm5Z>m_Zu9 z!UFw3Llu|8de;cixKilxNWtSx5^0cgcO#B^*iYng?!|8HK$jD>K6Uc%mDfVVrfUK( zAZLK?0yGwbXZHsdRZf6~Fv@RiYs04yR5&w{y9B*amxzXxq)OUgK_4g@TsW0(nNrK7w$s9GPtPM6v zJvC$Ocsj*cB>18cWOI~J5?_X&R36j@Zs}U`Fw{&{3lp) zLMfGybT-u_@z#AcNf;2lXB@R$a z*hEsTH!&mIAS7n&<%V)h$fzaIx;Va1;{lJYqjuG^h<;IOD7{KVPhl}tq5F+u}BIo#g${C&&@lv~1 zs2@4&8IyEZQa8R_Bk0Yn0Gx1wEf>I{o0HAK>|(+Eqcii5G@aPR(iP5p*H=}ABx1Mr zu*$8OQuyF}GJ4+q1gf?T0-)I;H1ubUsVCpAmoKV?;6{MJIuK)5rsp+xYgeYH zQNcS1UN@Uqg!Zc5ACS{9z9V>~Ykdd|DUS9xl~iLW6zkY`IC!3xxd6#%3bunu2|Y zHib_f?AsIyKtP)UGvibA1CW$z3d}{YDSS5r+%4h0i>B~zK&6^OAru%*LAj-G3N0iX zpDj|3&!?2p6o}_<3ZwgJ3Nb-%ruelf2w>W#Q2juEO(9Amc54YM+?pwcPtz2RQz!?r zVN)Qq(LrA`__=CoDUzY7gx*y%*jJ&u4N+q4su_HydFUZ{-E3k7o55>_aLAM++QCz}B?<7);lBPrDkn2TUDcmxn= z2J%6uNDuI&E!l4-uI$bm9RIvMy3{55UuA0G<3cWAD z*tUqZt7b4&!E*^-H=9_&W>5|YG=m2Yb(;Z!(F`j3YX+Udvu`svSa{XaO-C~@P- zkO?R>gFG@hW-ft)&0zgB=?w}9k*FnL27W=;=ygCLmxT&NU_znG?c*9fO~`~m5+r>y z`28)~3|9ZXZ!@?B1hh9`W_-=yDkP8~jP=B%48#HUs6Bz8O@K zY<#vz8Arvm84%Ck3_AO11|tQ%nc~-GAb@F`!Ib;@YX*Cfh>o<-45aXBn!#6h3RDqf zLyhecmg154dV>{}0wyD@y4i%@RWo>0p^FVs8DW7p580&K^IIw4we=WPq8ShiiV_&m zqjqG@!N#`0>b6_}&`tGtPFSMms0N=D05vXI8-&!>TpSL;S6dEYa(oVnMdMdxM~8NW zPk#i87rN`L0W4!}6E9*|gaDi26Z#%Km(nB-xge@*$DIVWMti!TEPB{Sx3k7UZTaec((B8VqY!J*hR zk9cqsi(G2Svpyn_p`SX-ZPrd-u^t5-N4%IGUEjgM=SC4G4kzHAgJ6c*b)JqHh*}f; z%N@-~Lu79EbD2CykTL9iY}h^zI~tPbR?wNA1cX^~ zCq9*>k#VlCX^)yL<{Pd50u0){AFjc;=`bYX1JF2df^!ON!u`y^e0?+FaNvYwaN?x@ z#=Z=YrFCQTaZqjdH=Y*@*7X&r`@S8}ckLK?$(bW8o zB4~c>puqr3-wF>vp#!bZKTQ;aoRh6^KpdSMuh6XU%47XN+{IOe9SHa(syUOw5GyQE z%}EoD=cI{-IBCL=Yg~>N7PG|Yq--Nk%(>(Fm!(8v8^b9*jE~6<55*DPUaI3_o@^?? z!K!K}bL6m(p>S2yJ-hZL4*RWGJ>KN*f`N6hI)uA(rJ0nVbnZfxo264Gq13o%G+tXM z8%{h`S0G8alSTGov6apPHIKay#b(|}naye-|9ERdkizXqj-1-si1<*=aX3(fOcoIc zhC9_Mgkno8l9`ZHZ_@pe#7vMnGeN~Ebyo;5mEOQQ;c1mjoKT@CLN$kj7xJan-BYA3 zH!xnL%L6PB>pAvbkBz{ThJDEEXwLX<3O8D{v7Gkra3TlYrDqQ3$5LFz`zOrzWIV3T z495>O*I9xHTa-Ho{*^GkJR6@VLK2NZb#LaB6T)E8$YX+vM1ybvAP&i# zfHHteIemnLC>wy!M7}yGF1L$@>A}pGXEJv;#an~i!pmf)Mbh|Ch{1D}@#E`-LzhAl z!9dcu`e?QT5>9eG9R(io0bybh+!$E3D_jF;q@V6)uh%&MX9v?P)ja}|kOf(B0~x4E zm3m83qaYL@##ChJ%hV($bKBTAWpNcYkW1x&mF$E-ZR4_0>H9|%y@B0hi-WtzHilwX z|2FL+`)EWXcNAIF^6wx!Q5fhkOzZOtjf}YVfP; zgt>TsxZ{VFF%U;djX9}K{Be;KjXV0_@b(nP_jrBNk8X4&Lj4R{!+ zup}aVGuPgy?oWZ3x!sK7$6Wh`F>yIm3D*xyw>o=h zXk<;tTgTI8d9Bj!%;u@B<%kc(9>?K&4qw{EJvcv-~eRQ*o``&MEsELn~f- zwIbUrzR#rI24qNhGz${2w$}>X`Q=23hd69=H>!=5B<&?FtmeJQJ#x+$cVV#_+Z15N z(Cl+C56taaX#E^*tL;l#%7jj%i@A_>J7X@w)!*$Ns(txt$fNqZt3tJ}Gc^6X&j!NL ztlgw9p`-ZnCSR=gpGUPdfhgV9}v#OXl*I( z)51k@QJmymyly~bXy-ZhY3Mq~7H3Ad{IE(sn_m6t;E|zbSt3r2g*;!8645gdz}8n%XfSW$&GM z7$@g4-w)e&3{fY%A9wwB4EiGWZJ9XOYXEVp1%~DyNX&%@ydz=|k{3B-movB`1YJCMbqf3sIAj+A}v6 zG$QuQLRcHLuP!0*TAB5PkOF4VRvm#(hBh653vk6F%5u_@dw9H1_hUBKpw-%~xM>y5 zT=!&R?Bdn@&!tnSB?0vF1XMDW*wVa<+dx`q!{NTQEtCozMli%WNGO*!0pm759W{7D@G=pA#-#9jcjeKG3aIKAZIn z+pi_Nwc`Nua(j?Tc4@Y0j8Mb~l2dm@OqZswqj76ewh#Auq#--B2Vtu;2a^n#004XV zFw7!4SSAH6zzEc#4kjT{8USg(U@9LP5dACaRQ084f1%+k9V`l*~#V=Et@mwqFBRNN|p;>6^`d@%nMk3eNW zUc;4_-`H~`1~sS`LAe8=l$xWud!%>rsKIbBN4$1`RoYE1<;Y?2`ZFV2KZu6HUB;b{ zRBuDa@rNsr+3Ibumvb4KzD+xh=k-N9j+uDDZvIy?*%Vscj$i(cYYVx6wE%6yONSJaqH={U$cnGZlw{@S#Ye_@?_6K zfjN_^r1pqSA2ZY}-ks}XdKF_W&BMUiM5|s8v;I#N$RLy#=5gKw=s8-FJ%{=skOr>a zER*|#wBLYKS8Knq0sRJS__nY;=>G8HE_4}-tu^P^C+;52#MMJ%hruGQM%NMM&8lMG z<-XJrMQf++UQz?+>?uea20mgf4K?2ZXz4Zj_5*hlEb|T6czZb75w;9(B?Ca?o_Nz6 zloWldy}@skCQJzWjT0mNK%EV!lDfj`_2@Ic)jnf?^ckuskus6SA7|rZH<=&*!oK6M zU7GewF>$~-An+S&Ap7erP66mI?9&b_tuvEA{q#Rgu#Tfb&H0BxS?l6MG#q_F zBro8g4jsjiyZB)fX_$_Er*stKNyoI+Ku1w01ZqhDW{rSK24fX=6d25>yI^V@u-Z$q z!KV!kK?bF|HCKJYUT4q)_`XD-gE$IEra46d9mI8FvDDCbI*1RWtV7d5Y&-|(aQ?ir z3)R}MHZb>AW`v!D1vklKjyOdX(SAaq0{z7GNJpmFCq;t`r7*Rp{}lKsR-$OZSh zN!)BE1v#=x+R^;<@yqu-v6PG_@(k- zIZW}xefo%Nl#djWeZ-;QV=548orBu~vGEg!l_+pkY13h}iQfRG_yQ|wbG5zV~WIJ~`K(GaokT6ql z7UytK26Ia>7=)rV>Mkr&l-Cr6m!HHuXy z%$K2}*w(`h?!0Og2=G=rIU2+e5eUX%X%{CX6RJ=FZocN02OPv}?~jWO)69(Q?g2Sw zd^W_n2Bgif%t!3H%cKUokc@gE6zc_$eR||Fg>)2>2nMo8isi8f#}hJ9OGp9Xp0=aH zpxT0#z@P4E`+7VXt#uB$g(@#=sFaQ?n@7b3u%&aiv;bbabgw3pJ4DC)1eb6R<7|YS zo07b^+s|DNd&xwCO$kFp73dJKG%J#V0*Q|UyeWs~eKoEuua}UzKkwdqQK1E*Gtg1U**vi2nIrws+;tEwrsDuC~ z^&qII*DGX^LK4A1b_F%&JVGXF3F&G~)+lOBk5^;b=$=4hKEGKsMziJ-)>&GgJ(Q9q zS@ZQ|2Wm38BboULYD_mrEvhlhOTQXJsbVgxNi@bVqV>h7POYyy!@g3+LCuu49FG-E zA)W$4Q)YLHrlkCimt|{gD0Yvawz3{L^P9p67w-twBp@g=Uq(L?<3vs+@Ybxmi66E{ zha#BkK>dy=TFyz`n&p_i5Pc5KT3m_)z_GFmF{it3aA{E|l1E}pKWjp*BoPec_@FM%rq#ePQv+fKEw1|C$Yh)*Ve2qc z!5u-xKT}|u*~@V%_?#QXcUaU5^M{ihv}_Co-oXsnSs#9P2u8M0D%l?` zDuEoQx^xIfaCGJ~)xit(5!Q?!Knc=qmG*U{cT>f1l>(S^kB9cTefM}=1~5#UvUkNc z3__#3P2bNRr4%hgksa->)_gakS~wxAl!?_Z<-bb}w%%LGO>I;>L!l*vB?baa3I;G)64gEY%2hPPQOPxeFFCB^!xO_ zap0h*7atn?*GbiDy32)WZ3CHN4CdLG5ok8c;WOPgXfnAY znfVEt?h@uhO*iw>*L3f%B*dLS7b>QEwlYfJ*`G^_SkR#`9V;(1hAe4svB>H>l@VrWMI{DeP4mfawHw+&8`(Op{2`CIjdI$xl4v!4{ zf@X0oN0)YOuR;-+P@vZqr1umdu}wA!LWLkH2#`dVklb=ubp8IQ19FyDzD z74l-1e9AHlNz^5vAengyD$CWJELCM;KKd%l&y|FD5?ID$D48l%*G5A$k>Yy{uM38D){aeg#D7+0TV;on!RmBCdY&%03HybK5Ta z`#4039T=rWZOQ(=t!8m3Hd!Ri_2E7=&;DMT$EK(GLqwy$KT!MoCFt*endt>uuXU7LaHGNIbWeOMDz#btnkYG)G) zTe9(hp!=xyt5*uG9EBn_~BMsODt|vYr#iQ0C;f+w8Lr#ilptDVHfi&CPUu zO5|1>*_SY@t&~c;3R0|D&dY7&wi{ez^Qy?zI4`D6oc-UBn%;-hjfjEv9t!Qv3s*16 z3+;Px7v8V#3GI7j4>KMMqeg>jTvsk?oTqBc+UcXlO%R_?Ps6~08c$RUtqb}d1O6i< zqndI-3u>&Z(XuVZM`Zow0+mN7%);OSu~I=hg6ztaUV%aq!9X?zRrx?dVt8jjTvhgs z;2R^V$`?-%RjwCy9#!54M4&Xx2K1E~^QrSk$TyP78_CQ~P@U(qWU9{0NndrY6B4s1 zSIDVJ)On^dqB>(xC8jqy^%RP|PKxo_y6s`U$laPvyukkJfI25HSe_y1%~}&YnASbn2b3t!g43RO?2ga9X7e-Lur#X_$^A&Foh8-sS@ z8A4(#VL;N`i5Vl2dvtjxTq$-!Gv=`qe+Cg60%QY*fTi%+iAqf-ZzMA}K|Apa=0fcR zbJEvN{76Y8oqw?tvz1Z$Zm;7aQ0(LUi&gMB|6i$$lV&D4cQaMcUFRQA3eoG#d|#Y@ z?Fl?GIFlHrbguK?$g+y_KN^L1W?U!Ef60I4{IBEmYX8pv-;fNISpyC!=bz2Rr!p}> z!9(gI6sCvqpdti&zJHGsT8=^y*r-cGP+guTBt{WQ5IXisxX=5qA;2* zkFd@ko%E5IC7-e!ha~6{IYXSxOHf%d0AZAc`RJ=GZ_^<*r>5cjqw+W{ER51COBQC# z;3IjQfA~nm^ZQ7n`%#t!m>lys|0!h=AE^Vf)bD--{C=}}^Q_d>fBkLVTn%xixgn`d zr0jS6lRHAcpJ-)zqE{dqzS8k2&;MF=nNCmRGHn|xrf{^nOsuRvrf^MACI1c_=rujj zbe{ixNQPebfL6-$?+xnV<-+*6UP2`VIGtWkQd$}Ok^)Ao05*DE7o>M8Au)siBtcv= z==1#VEm5sL=2FpW&6G#0j|LI-{F!^7Ru4uJRFZt5)yzpytKY>&AEVXGMPIFchC6|D zClehH)oNvwUaJoTLSN6nk5(|6f9ue*gb{k?#NC z|2Om@Rf`zt@F^+(|0eqX)8POAu7CeOfdXR~TdvB?I_jg$^McBJ7jU4=<5ihu->i3r z<`T-#uFa+b9y7OIcBA&)4s^6IelADVl)xGI1&#D*rIo8t1SS+l2^B$QK8TPQNEi@T znSH+hKTap|TIaHhMVWh~WFBSS9Yj=_nIWGtf6BSQxJ|xLX67WQ%q`q0ru{i{(N~${ zLSh!>3HdaMGS5^->HYpkenD-VEfU7pn~4X%UsN~w85K_Ebq16<>GzKn^k%IA@%ssw z*6-gw4g%qY6i)C$ioqm;9e*fMYk4NZxG<1~X9vF&6;Tg?IG2fWnBT!lfq2(qA__`X zk*>?{5Se?1i+|*JOwUcgH0FsB@g(1fWbp3=PiRvvlha(qd%8kpz+BVl0odqCA4@z~ zA+r^d2nMo`nm=_mIO9Zn5`^9U>&GCo{z>9{PF0=SW4!28c~GY=D-*?GMrZ{oJMW6{ zbzEbP+oBY>rw+`5dUZM=j9xJ>ef26wNX(*SA*mkKD`k{kuij?K;2v>C)~KeSzC4R$)t6B%eU+t}sc;ZcW!QKYiJuGF>9yp1LdIu{B;)f%uDAn4 zbi!+WM)#v7y9s);3Y%IYfN8a4+L)x4h`a+@^5qETxOE~efOT#YSGka5-;&@mfS~TV z#es7I1P$AjCFwwCr9eUS8bH>K7C^3drQ5T6c?yBFi2>TBq^*)Mz-ke7NN->ih68cFR{P$Y`AZoyZox*aC_c9;x+AwdNPFE3qplI!miL ze4N{f=rVqQ*}IqfK@f#5cOZ@T*$5_2P!|!|d&XrVxho4C#t?Yx^|0O0h$b4&VnZ$) z`Z^H9g6Q@>*#1vBP)3bKf+xLW%g%=mFJO!ss^f$b*W$6EMz?PK@2~a2HJr2Wq9*BWT@2jFf8*E zwyMb++n(KRTklc@t!iN~eJT4F?ym!^3-LUa%%1H~jg#kU-=eMojqpj83&o}iWNnw| zeKQfbmqyh$0_URC-N|!~a9x9ex&T}M3EK~`U&4(69g+o6aJc-Dq1%1^Tob$&96(C8 zOHZ?dl!zLNO&7;T8h$t)CslmISd?$W(FofQwF!qcKDuiluPFLEu@*B3fviV{E-x$Y z4$ZjwCsgjcB^GYMMpmwaU1~EKY?^?-?H z{@aw1Z%J^>kBerzmM9b(r*5+D;;WN7%ueMrKg-}aE;$jj>l6%kLZrm{M9JIt`@Q`dg)(jRGGUzkH8KS^47P4lr z9Dte^bW%SH3rUVBc;EwUWpIISz;omfv1Ty8E!_Pz!IH1h4Qp$cD|RvBZ0(dnd_!9^ zq14hVc~uolgP(i26?~HR2t}YGg<_YIxwRbiBb0n5s~Bm?>R3ZQaaBT?39>^qY)sNa zbzluO0|;1-MN=oZuRZ)ry!Dw~LZ+6AN3K40+~b?HAjl6vL$TeMzxYg*#7ySdOhWnm z@OMZ(E&~=YAFog+D5{rH5)OzR?QYRt&$6LW=EK% zXE$9685e@=u=ScNjitE2MH$^ryox@p2A^Shy+fX zzK7GrDJ+!fVBzHA^uy_x6b?BN37j^652t_oh6j1VDT#yXaVz#EflGSUgbUlR^a^q0 z$KyH3@W-lV54UxW2DgYaefVNyv)9=s*i7d4Vc4R zRRRTr#43+d<3dJ^H@X)VgP4rMKQ&?09 zMdMf>f0*QLv~du$7jav0DAo?DW$_6p)Dwg;y$niI=*Mt-e~a0O&(-|TC6vJlc-1cr zg`I|1{o_&MRnrx%Sq7=&ydh6V|4;t(#$(d@)9)erfA^>FO;5BE(eS5FP5IN)eEzid zB_7Rs`RB-m@oVF+NF5k|T5Hq;I^q;&IF`+tcKvBqp{h;}<6)jQpC$gZg3ao~@HAmK zOQ`$&Y4Y~^)58g9RSSdZOUDZzzdt<$5E1*rhCa?Tfm6=(=a`8pvnSL?;9uTJo8fppPq9@A0)h{ngA?*2hkdDT zv=MBhaihr;C(!=Fp>sc8^mrD}o|&u1?MV#Q0($HBqK_7+*?9^@V1@E~(c93#;EEH9 z$>^j;BZv=zrAjQL0}IDl@y)KRDwGe|o6GFMr!@|>-|=i@qYe7eL=45MBq`}flNJ2v zIwVD`fFFHs3Nq=qVJ;l5RvY|i!GNHWJ{BR~fa8YkaQmc~qwa|k&Z-B4pdbC=$!;dY z@g_o`;<3n14*udi14yVxw))Wm;$e+4Aw3cwH815vv#bz+%sT?qZNI-z$R;?#ECE3J zO!FSBAT4rUTD}Ptv2JOBPlrinYlaqN4NgI#4&)0J;(EC*biL$XUFeb4eXOKDzVi#1 z#7X+P@SUkXq1f(-RNq-h_?+2KPE7gE4P+2k)l2!#Zzk!X3YLUo&j3MvXM!__t^_P3 zL3JC2jA&gS-}y3tB+cMuhaUC2D0zuYCB2}~A#rsS;O6ZewUU)6Sy}u!f$(x|M>3{1>59r-pZ}oM!~LHL6xbf0v;IQfu88A= zhsZUIUd)FMXQ7eE-2Q}D(d7oSdul#GCL;m){qxUpk{$Ohg3aV#1SL31h@IAl$xy_H zaGr5UxxviVxX;m4;6g-bn;g=mV>i01i1X0Mq^QR}lP zZt~n&EOnzxgPtZfTOeCG%w%c#H4Gg-dSE6fH5oplxGyOwuDbg2b{xPU;7i+GjpB+e z27{aEgeP${}m@PP53l@8*l+1hqArRE^?Q1at92C7(+eRN1LQTQ~Z8D`CaaqIe%K zbrr9I>%!Rf86;={5{f6gcMQ|I^f1jChH3lB(r_|@lUFq|QQW$QoD8XkAgP}SyJqf| zWBzX!QVT}|%$YXgJLa${NcK{eMfWB{-|C6!E$RtKhQ8GyUW}U2x8+Oz8+{x5qwm$X zCyw;!+lrHRp>KN$vA+7Y?U1Ct4f?2GeX9%>`)yCTpl?S*1dTyyo{Nz$l&t|}H_CQ2 zG6=}}Ahqj7?ya&rz|>7sklJ2Kt-#dH+7pFTM-c3XtPeY@Zn3Ld=Je>AAY_*iY|S7+ z!?YzmOmmK5daW{bf?!jXl$@I~q}qZoztQ~j7#4NwP$9Lj9AM57XNhjL1;ug@P#~@< z#6!2_FrZhrW~1(*TMeKny7ji&vg_N7Ze42jo%L(aPB{2`_3OAJJo*`m(SVUjD*>Y5>Y(~F2gA#pI`t;^`a*j$m&DTy4rKk!*js+x?;^sxJ zQnblo$G%r>W~i0mX1=6Sj1N-VPxhcMp4Q&xy*32Rad7#;1=H{*wXC%KK_gQav zXs=y&+JKArHQ*@c=7M}j<8d&pRm0nVMY3XI9mtVS6_Uo5czgpOR(u*k4p;gRw_0q!b zt%G$!uYVFbf)cYw}6BEjTk&Gj1>;c`|i%#5EYBNWG%@yRl_ zQpG{WkJ60y>?RpsgUWF}7yF}!evO9Va=0{EeFN?VvMNf(tjdY-zMJwM1(@cP3e|lw zlp5nhsi%?exN_&rB7G432u0Z339MhHz*`R+yXv2Qj{-M$>-Q+WJBU4$6w; z8^H;SG_zJs;@P?PbIE?Ob!h+S*}CT+f<>>#*(zJBN!#H+t?$SRM&v9ojiOUd&YTE=Kx^X;`; z-=UL$2sl?BwaSy<-{^k!VYK7plKbOCFRn`Ig*`s2`}3z+`870)8Foch_oI({-*@Z2 z-}^p(x9$aA`0<0g|KfeG7~FlO2R^wFlzwa2J;*YOqPwR4oYd#Qd2-o0L%75-Silaw+O1)rl=G$U*qkZ#jj!K49Ip*c$bCBz}?BEMLoQN z33n{<#CmBBF4TWf7_W+0xZ3Y4>>OM-7#B#-)UAUz;o1dR7#LafCh(SwOx@j7ip!fu z)__A1%8{P(kCB3v$)j;{-R$(c5qB@V7OGjbhBkahgxd;&EN}gOmNh?v!neY5W;n&@ z!g2YO)?Zm{`Q%eTm$y4xE1m5>p~!3%uK^Yt!blxAhJ^4 zWR*aa**%-0HtxToHlr`K8xaM$@?;xs1!%*q0290LemPDBjfa4@hGJ(yK;18*)?xYz zu?Gz|9`HeH-R^F=%kjl{R!?Wk*x{c1Mv=};!lkf!{F_*lO9i3W6^Bc=Mb+fx8L3^Y z$U~g>9^P&evliFg(H0Q&Tkc&iHLg1Cp22=Q z4aK3%iFCW9s6J9!ig^GOisFHR;`^ZJoOc9rdoi+{vS%s#l5VN|T#)hD7X?Y$ml=OR zuZ)W6!L|q}2Ooar==8;Sl;fp5R`WgiqchobW)d!?98E#2u%S5AjQHA={fB%i z`fDR^%Ixr5M>KaX!BSEfZIG}drV#q53GSj_C}t^cm*NsSna!3ilo!8gvgP*Q*~k-i zrqpNwd4MJ!BSo&l7B3CeyaIKGU8h!w{>r_nVf(+jY~)B}qUJ5{y(Rl|Vq16og_;ei z;fKPB{JtwtUWX+8q+UUc*^>ebm4JnsMhkT_EEG3;uhDx;(zVwXwma>}(X-J$bC`Kn za1rRSAEv-8JCChCE?b!C^I9S#j_@wVx+1y&P48HE_MT6*WxuFPzQ5l zH?-l={Y!BrN;25*3`_MpF=eX#&MghH+83W*0R=;kGf|q2zsHHqB(?Bb_$uOu20D+Y z$&y{==L7$*oyX6S_W-q{5QVCMbDr!x%2_oxqG~2e=TWohLmDtyl}z#bue!b(_B*Y4 zlHY_g2mP<{t7{AWTSZbkbp{?lb8=Zj#F|T(R3pkU7-&Q}=^D{rejteTKqP4T7W7n> zwxF|ECEQNW7DRH%7PJ_7b^V9`sU2O8eE5~W3^D&xI~v@F)PLafqjL7g-Y(OQ#<8-l zLSsGBtb1ZTicTEhGJWyxY(BV`H{3GP4Bxy z`54SqyHsQ!j^Pz-Pg^{r601)5*bQtQ-J?DLC-}U>jL&05Ul!Nx zgF%P)52(@ZWpI5a)Tj4K|6l#xX5>FT<%f}i+BqoY?_RF{?#al`|3Ch21YLovug-Oc z!Y8=p#}YLH-te#ZJ^}B}`3zmc(AfmLkk(| zVCY4LS{ZtZp_dqXgrUDMG>f4|hT;s(W@tJ?_b_xTLo*nyCsP-Y?*WHM zgn)M|NlxoHQ^A-grT?jMO+;9(Va`N43lVl&`1c}Xt<}!cnp)4)8;2}#- zFdMgXrWACwGY%PJ$?lcsR_AQnj2W%z8{H#ae z#e6$&GCB{%XY@O6^a5}6LPpCd?9W~zPaxX4Ph})>S%#cBRg6HkTTC{a82L2+ol(GCfkAK%f0M3#4Be6^6 zxxqPrk%b1KfM7@P@5lIq;6>Q`F^t^q!GtXQsZ?Icb)}MUT9Yzlo&Sj`B8w*}XZ>D? zvjUuw;c3*4_(IW|7d`P$YM7Hy&O;1iX))}Ujc;P<);Ki5k8b>!dYdWZ}1skSWd13 zJe-;l?`+g8iDrG`$=vRQsx$bRW72hMlLV1j&C5Ha|hBMa2Juq<`i_f$v;RVn!_=LfE;GfA`^!B!&V+<$JuRipD685FX4-Kyo{wp~*sT zMwr;^8Hcow@N_;@0FlI78kQ38#+;#wS_w3Sl!38PISBA48eYS!Kd6Wu4BH5kOUyi{ z%JVa4io%G-Ut`j)FliTg(#lL)%u|}%G+apf*CWh|JrZzE31xM#VoSNt^FR(4$PLbS z2P(#9JRu9o!Dkv4oP!ldiXGDML~p^=p3f<~l=V@G z+u*!Mz9u7YAWXbkg%bfU@T5PdVd3$pz<%aj3|MCh%h$xe>-hHs|4KbXjQ->?b4mR< zOfvA9GeqGeQ+rF?1}Dp;Z9hOVCutiIcD~@>XBsOadLLmH@okM0$b^O+a-;sBcoM%s z!_uJttT3WCt2JKet?6t z*k{hsChc%MogXPcIde+=A=2&|CpkM^xiv6-;}4`HqO_lAcs%3Z)o>%juWPu8;l&!B z$nboGS+UP*oMh-v41?c;Cha~?TFj(P^~6mwaMzf$E99AQE;7&a`HU}=e8*H6BGF=v zmm0X9@!p;{6m=(pHF%(i%ffF+!x>^cJcv)U7Z!6L4ReR>bxf4OfuXWeO|tzchG^ zul2zEPQ#L?<2>+JdT9Po<3(J1Xjtg)*iZSN3|QvlIO0GSKT%wPKS;xp_k9dpJMfZi zdI7`m2scSJt#&@gQ_3&z3FnU<*f+GQgzgND7rM6~Oq}No&J%b#i;+?14o`0D6@5V6Xv;(xsc?%i6urL0;U}!M^mVmyqoqsL-+swb`_}9(9$M6RUc93Hw5R)4A z3{VknAeVB)Q!3*<=}`>}UX{X@lg4BXi);!MMzrw|PdG=z0zXK@^~5bwT%q|>gvs+` zwhH}9u9NGby_W~JO<}T0<0TDCZ8|6$X>@5BCH)-@OWo8dtc35-u<)+%(EGKAUW1`m zuQa4wBQ#y|U#aoZ0)A`4B0mlHGCWqpBG<5n=P`a;NXl8qaG}Oa8S*t;!T4MaJ1pm3 z2s?u{Vj>g2gCLzPD$Pnle6HaphCkGBBLUws@eH?XIG5oC8Ww%KL37ha+$%L)#qewo z%r87JXLw*v(6B?AKQUqAUaYuMs&h3g^am)6w2PB9UfSi+3`4gMm*;BdAbGBJ-r7eb z6_)&KxRPP9pYer~&#$yrgyuR83(ZvsI~$duFkhyLlG&Fv+(u&0YIqI9f6{OV!xtz+ zscuW-TNpo8!y@=e8lK4b-)LCe_KO%sfzH>sFolvudWbt+vgmW#Q@r6=Pmua}3>H8sTPMJo`BfnA&mot14 z!^ros8dpe!!%W-|JelWxHLZkc%l0DtT4%e~O9g3c)NnS#pBVZ6%ai`5hC7(P%me$f zz^-&A3C-2cQ}Uj0u8{YY&VzV&#`48@+F=cg&`$G&D*(uhP80&GopO1ub#fI(1iKf) zgc)IAPBS=P?vSvW_-LjMrL#TN>04#GO7^1zO zRPlVxyw8yLtd$KDH}byHxm2EOoeRvfj88N2n=XV_JGb#^hX2=VSh9YF zh8@cL0uA>t9MNzui@F`El*Aq8!95aTn(%*VdL3ckM3`~QHBRwWw37aUNq@?dK1aiA zh<~4kyQKa-)%LPf$p&YVrb`}wqv7#{eNbWNF>D*yk(%B>y2S`HU&A!6f^kQhxF2bp z&^%x1NP3n@-@dz)U(z>fxR-pgAZMpUg?KpDh(UMYyg zKhk)iyjsIego%5&EHb!sAz#$6bQM=CjKKd&!&Vn7tki*E9SWTS$E2M26RB zycFjj2(z+kG`)q~D-mW~g~o}pJf=93KHj8{_N0%{uyh(lCcRMOnn*KW!yV+Gt6|Ak z)Z~RQ86KFg2TOhh#?f##akl}+G4d^E#Ck^j#3SVQJXD@jOeys`g^^OPVi@x!Z^$#@ z)GC}*%mRh0Ao1rkERuax!#&LDg&z33J@9vGSTyt|4U65M;DI?vaixaN)p&}O7`^a;c)GpsYXg$KK&J>|t<$R+l3M>l!{b?- zT^ep=_#F)wGSeLhGt+Gvm&>@xS&~=D&O{A&G5!(__mIYD4Lb~v@WdA(EbT|>35{QC zSYXF{;`dTK(Xb4JU1@L5WCmnPxRukz;Y7whCwGm+ooBh=$C?@ji0)_K=F3`V5NJ9( zS$7BV?*RUFf}XP{L+#2%O7=P^lgu8xR*tJkYUuq zeez5=5#pjTO~<=)n!K!aCM&``B23h<$l($VOVb_gff?a}De}PllwlD6n97IT9)fpg zE0{PZuykJvyM(h$k`vB$78He>FYjBOF9|fyBz8&SYNtWoS333bT>`cG zCaJv{5!MKi**2g%r)WeaA=WFs4u*Fko{C5LfaG}ccpA+EHwJefKUAOjKgGUJ>(%ac z@=SE!b}ZkQbw6+%pNZ~ck5{<&&2vnlzMpUKe=-Fn*OMHp7b+) zKf^qinfQo#zuY{pG0(Vp?suZ%9c-Rgn)sia_XEuHcjkGHp}QDxEZ;qd=ksfngU`(d z?)j6H?n%dJ{Cjw3xrd#s&)rVZ^lR~Cxu%)-eTo#m3UJV{?i_hfbQj2TS@);r{nTRb z_OJR2!qz9H<4P}#SUXBDh*+TV zk*_*`%K(qq`{DSMd7+sLxWX6P5}Nq}0#SR=$x-Y2#*xEt0ma9_n0JnSNMXd<5{{2- zI@kVbrL$uD7OXyRXbWG}iETF5ymj@22@@9YI5fQY+rx)$vVU-Y=37HM<;&;zjJT6! zE}%rj=Frh?n?sjnJuqYlzwi|EI%~1{`|!|A)RlI9Qw_5%YQwzV{bzr6&0AA`i5vE+69Xn# zXYU9XublE$1R}-ex3c1Ir?hsT4=F(kDA<&FEJA!_V>t6e#MEQMXl9r@unr(d^{Zje zX%Xk=F#BxeC*V23)vu^MrM~on(yesJZsBS+(JI3E}j9&$0lO80QQv^ zzLc9$W^Iimc6;&e`K7q2rvw*1WJhor9TKrSA8J1;2Pa*)b9N(s2o6FJ`zAKa#Y+oW zfyHlxVlN@UJ6fom?;s2bAK9}ym(L%)gXj7Kal23TvP@{m0PKz#5H0>HRP(gdvV3^h ze8da;#*EN9?#Sk+W*-1ZY3Of*@)vjIObFf3;cnE?4|U;dqAFtp&Z~$*e;cU4ots0K zycQb!cIfzZp|LA&9R!trEp&WGsP<-x(mt!}Wp0^R@F*JFBxnm2e zV``OCjKG3D31FWzHH)eIcycB1oe$Bt%r-Z({VtXHPGlZCi_c7Ep1lQd*cLTw&gRg? z%bbJJwihrDpnkbhhk0^pK-n#)ZtQc*X()dQB$Sf`(c>u19?)!e5<8J&Y@;I0bCNk; z>J&*ZM}W$|`JG%g2+@B8J^DWg`aekYzZ$Tv3W-7|RcK(5QRo{<*IKi=G?cSf zzU~_JX}$}1hVtD%z0TiKolf7^XBvIKiISyQBM2n*-5%i6_piR~Pu~Z^!il~g#muHu z9hts(P`+~#R4=LJw<%3(`CQOTujMYiwNMzN;?V7Z&U(~$QnznR=5DDoR&pk~eVb?$ z3Q_^4Mz>Gdg>K7!N7d`Msbbw*Rj>bz=lALLi~Z{L@3&B#{pxj%(d#?N1WYH(Q}wz9 zMRbO<6i9dO8l1FFif2ait7gxBr!o>wHVl9niCU8}jTnTA)M$zGWH!@$sa>WOP ztxdkenI}ncabjk-=+M`&{W5efw@^lgcHlVJ{@G>d-0yt|LW*SeVxMy;_7>$8kj}SA zr*~x?>|^+Av5B|mL@m^f`ZDgOQ=V^%T1YE?oVpc??TrYwO`LfZEx}n9r}iou?Y=3uCXfd?oW4^b3!$P(q2RY-OJ1DM!Q>OA$eT9!Y_bG7p>Efp-yo5X}8xg1aD zRf&{(T2w`0Tl1x|?tu(ZAEvTIYh!Kbsk^!#!hH)9LN{hz{1~hZtP8z>q+&$kBl0WT zA%OQF0H*+q$tVv`=U2>yefo&)xf#x(tRUoMumE8H6Ey?xr!Yq+NcPq`pCgh$>+nqZ zyJ0)lgz~A&`?ax$a$}qonzZmc z@5)UD4@m{{7);qxTsyr7wLL5Qjzh$LXY=@H*g|g1SVNA7)wE6CEi`j8_P?TGWaGTj zSQz#+5dQ!KPmS(gH%8KVN&#n@*5Ts=LZ_&6T6PUa2ahPF-G$Qbq}1Pu-U>r;kGe4m z*AK|whLl^kj* zr{72RwnaA@Nk?_jq(~aIeO-r#SfSXt0NNUv0*RWbyD8pxshYW5-Zj~1@r>U z2SC)mIWL?Tju|}~S3hN22^1hbkLjS%7^HDDVG_d&Bpp3L&0lzSyQLQ@jNi0yac5}8 z3yiXGGTWBByR7+j(jsQnjRoY{#-LBx*bfFcXfuICZa zcKQ@)HCO>s41={b93PWQ1a`iPl;QTlFB1~C$3d_w_2zT2Forg0l>4})5bFf-j{Mw+ zwU96d!fMgBma#mtv0@c0zgxD<2rj&v zXynas1;QdcZ~g5<>bRr{sAVWgg|BkAV*m!W6U&a7ER;mRUTjAJDo}e9P3?;Gy)c~$ zQwf*~15<7?>grz$VG_eDnO=tBM@C(X;+M+R0`lnuobts9+Z7Q@&f_=Ap^U_v-^R$9*r&1d!9vY(g~7l!P$VgEQ1^ zIiY>SI8(9%a{*Mf_$;kukBvEi!bOch8^zJFViSYfSd(7`F}Mm@lV7K=eKckYD?h1| zFDaN;qpo1OT}^6b$EdHLPPIoVm~cr5G!V!?sVlDf6$s=j^-NAw6M;X|^SDrd9U#es z;%`_RGwOV-K9zvgRsIGll<|$-{PhL{}>pI#2-g_@I!dT7{M z1)bp`diVSEtEjS^QsrrMDCIfuI+EX~Tq0lfDW)HyJ(Ejn+?lyLsp@k2c`}a zj_$&{oPG$I;GG8C)$m_aqIE7AOFhum(9Q7{M$j0 zVurp(i1eI28TCVAWiqrELS|_&;Vk;nyf}}4ick*tp+9iuF`6&VOhjYyg6iZfA=#-w zr_VeeFGyf_=RCzQ;{GI0?0>u!voQ6y{&omqBFh4^X8L}_P;{RD zcgnShE0+@Aj-JYM@2aey#BZ*|`A8nG%yPOvV!=K_{>3f$eB-w)i&XVxJ>TwhUIuf@ z>U`8i6z5k!l#}D7W}vse9nj?N)T0vJT>c&WDFYAhDJLV2bi?tR3j*=C^AcUn&qc_O zGXhLGx%z=eB847>$=9&tG8f_GeHzL6PmG5~qqpIB$P9+oh_=Uct|FEzT~XF2_u=+1 zowAdL2?-BLw*Iy91d&aE&mWm>5H*L-@4 zshK04G~)OuHD{ElO->nwi@yuiyhGC;e^#8a>b62mkLIAW$tE{cUc@frD@UDW_HQt3 zvBL;cub{jtd5!cyQ=?BCE&Tzld^duR@_vZo^C(*2NZH69j(QzadHN(9t;RZpt z8Q;&wEiSiqMTTxM2&qq0ZE`mJo0_*6TEpT-GdFsaCKNlABrpdOv7+fmdfz}i5qoN0 zsCf*e*LSSQH5w4ump2r_)9;lICf!i*2_P7KO`-Ok$i+vJ^gBl6k3v))xaxNT{TPYCCNC5BLd!X%&H_dkY7 z*1GU9!%=ikH?}9@(Y;ZmoToreGWw)P63`H)Kz^zsBbmfN{8ZK3iZY)=3gL-;Nh{nxTC~v3MyQ5+EL5cR51X-L|Lm z6Pk!&>$dEycMfK70zQ}Iyoq<`3%(xpG5*$x&M#y!on|D_c7KHgnzB#vq;>hwJZGEq zH%xlF#BXr!L_8X`(71`@Uk6c{4|1x(()1TnAWJMucY?QW>Jkt;sj~ZFIpPahJkv3S z7>m1&+%y1Z`C2laLsNi7<;lnSt9(iUs@hz2V4B~Q!%-%vSHaGl^{+d7 zFrWWn;Ze}-@?5$eCL`hW;Kf;iqB);R!&>Xy`H6ROo%NNppRVDoPK+>Zhl}*dOE%29 zuDeRH4ffLM1hrDEY7~KmwKzU}#Dk?C2@NW97nXh zu#IHv+c#%N?J+qc;dOM2f}Ai&ND ztE?%CufbUDBdu)_Yvst;$Dx@!Y5B0Zz_rqO;)dUeS{5tk#*d+t<7Wn=PZ8mthl%CE z<`W>1(DTb&A>;_wl%{BWT()blH-RUv1%V;;8-ssJUcka1&1L1VeO`9dW#N*aEg0uz zgXhXfb#G=U_9QG)gf|Ap=7?>&U>=r28(|NYV|&5fk*nSb+o#oH8U}OUwaNZ2Y0u5t zu-@bNitv^vQ+MZEe9C(KV)JofECHbSREgaFGnV({HbbcA_JEJ z`frvo3(zk1PQf0JRT}@a6~E;XiSHIFSRA(T<=CfQ-iATgozla29kIy*);Ipoh)-8( zjJC(2((oD6PL>J+eBG_;BEdQv$x}LzkU;k?Qroqh{sk+GrRc$t>I8(abkK<6saQLC zfacu4%_8Yvv0@^N*A0j)?i!#K^n+Tc;ym!G8@8PLKO}3R<|ejhys5xTih9hZ85vPq zy{MT1S~CXFR3HJfFch3}8R}(l^Yhupi*0eoz7OkGfInLM3>Z>p1wO{-Z26ETb!vn2B?6AazkvYauD$Qx)92d+0fiMz zZ0Fx%#>A)AJ1r)uiLp=f?+O0>Nut*}FN1>f694XH^xgc6@vn-14>94wf8p;wgxvip z*rHT=LDYV>6Gbq)RHlDXAa$5E`Yx_tKHGsLov5EHh9+xbb1vf7M-n4-Lf*DBfDo}p zX7f9HVLR&)b{W>loP>O3k9$=T)yd_`M0IktGEtqJmrqnDr{xpX$yxbCb#hWZQ7v=w zn20Yc{xnpx4ZOg?ArG-cL0nRoVn%s3Q#4JmT*_t4&Gac^kGd|?hh&roqmEEYgGD_lt& z`dt}(=^33gRq*?QHfb4P!`5`^SnZJ|%#E3^XjgYOb;S=CNW=Dh;xNnoRMfYt&v!GNF*52tk%G(WCUafzafVV5>Wu^|n`4O_Q08je9n|E&S2i-D)*2L&8v zyoGx-adjs(5?{APU8t`nY8Kj8vy`MeK!bbnyZm(>PGYMX9M#Lad8WtdV3@ zB-LmbK%qe>f0w59re^m=HzHsw%hB_jkUv)AQ3pb3}bd*08~EcQ90BY5+1q< zG6I%>Z+}+=$Z$|Vw}OqRBnHjL2c>kDBu>u9!mElMmk-_dPQSDcmMnK*n_!uBqHJk# zRVy1wly?rBPImY86gm8|ve^5Re;T#kOJ?*P5NEr=t;eV3OWR{3TZ+nO-q8}cl3u(D zeVDI&b5eO?Wq0o*Lfcj+rg*~S|-YuxK zz3+?(+jA<|bflIJ<&`(IBw}H`JUX zFrOw@Khgi{^5=5^;*E}HtlwMp@8eMHI>gCSo zGr_Hu!W-^ly1emHEx8?eS{G-7l4B3Ikh0kJ11PcWHPS(_22e=C)vbrg`2uM zjBT&ZUT;D}ilrh$u{`EetAp80)&YW?oW;Xuy{Zs_`B``pXmq1do7lcFUMh7yEw^(% z^a?hGVQgU06c~~ov_V4q>SiZMCF?n2oe_#{K?FXfg}!kdMCC8T5~f)f!j`OyTJ6p? z|D=wN9c^nzwIRWpw4jM_41#y#QNIb}Ef3mQUfR~UXLouTh+kC2hRAISi7MAGs&f6JD%UTn z62Azo)cGqu5sm3rUo{MrykMu0eRD3v1^#GQ?ttDXEvh1e`e5NZckNi|Lbd0Gt()Pr zC3`z;zQQ2T)07_|SR5P6?Wt%jv0mgoXa#N*O1G4h#R($X(XL%5dylF}^-h=&4tK$> zJjDtkmHe5}7-s+*L$NnNEF8Z*8(5iBE`d@j#~zG!iE4I6xVC|ptAUOJeIx6TXmH*M zwxSMWMF3fMAW5&^NA?+F3pv(z5L<@L2#wKzTQGq6z!;f5TNS668daoka)|6MuzpPT znb?FpT)1-Nna-3f1DKc+FiLAn-X3#;q_mW*@*8wM#ju z?6J68lh2+cIXRBLqJU6Q>sy7$^cl{hz(GCLn+ptuU_I|ekod?vfE7R$y3(iu6|XFQ zJ$5LJ$cHhBtsxAUy87_jd~_+>p${`@0O413nfv<)IH-qy^w9|o!lyk@m+fN|gWWBl)8!OqEMg0?@ z`E-lB6-`|aFJgWA>s%@u+*z?a|AJdIkt=808_WWxljW6K9RZ zdj-ikrz86&$(MI71Cv5`j1vX8?FmJ2+FmE{LQ24S31Mdg|31gxqBc~H)3g%l;?DMT z-;E_&=Xnv%2IooPA_aGT1*5>3)-f~=sfpjQcVOc8Ww$Ghi>`#NI(95ZM)=HY3L6J@!au-KD%BhJ_HY}lXi zte=qDt?fCFAd?-1V4#y7n8rXCKMzty7hir$a!2B^w7B>l=~A9Z_$oQkQD!e}1f)!K z*jay}uE-+ONObRA|ECSwx>s8|cklkf=U`JJ@j>}vyArL$KaqI~vV`%+&e1Thf6tfY z3N>tQyT}m>U88AsCFc436xNeEdb`olcuAj0z~#bvZ2su5JwwJG_JS%(g?oHFUEUvQ z^>`-gZUI8^-HF+JCY^dWu|hSMqS2SxW#z%~=~fy8>pahRTeze;371@lm<0!HtVv5=gQcCXI^iUcBNbYow*QqQwr1r%hhNN(D z9kD_+nZl`_KoR>qOwV)=EVIT{wNK}84n6&Ojf~RQ)b2bfTU=I$Bi6J=1N|~+Wkict z_baEvnC;N>+%uIO!Ev~?VS7OXa9}XylM^_4klLa;BDqKPQ)aaJegal~yT|04s=nPb zRP&&`4GPsXAuz11>f5ZzlcL4zL$Q6q1IT&&yKh2+ zbJy>NZR!3wOoRK5dEBhMusn|MS8yCMb;Zt|>+eUqXZ+XL(`@3WWMyV_-wHyE{~#5A zJqAYImm@yepD_LJsr1ta0PjqsJNtLQP#w1%e?#%d8G*y_T-1m>I)l+SC+8g&%Dlr` zXCOg$0Ev?g&Fy&END5!zk0X%B7_$~HoaT7)mee$d5fCkRQaF+LEHTsrGP>VE*}3`p zanfq!-$Oz$;ryO2Gx=xnuZDlq@Yjz$eZ0+#DzbhMG*aPuhJgcO_I{@Nv=;kWj?Jltcqpf)JS#S^Nd&BxIf#7$i}e zsL4-GM4*9aJ85ztKc*yyLkx33#Gw&%kFOjR$t$b{->E;k!Mi4(qod4C8QAw-lh1kM zO(x8l<77C`6VBJLuq^O|f9eSrdcr3ntb4_4@{2t2r+MN_Jn9_263RK_tD z%ZZAg1aeo%4rDx*dZO{;!@3?g!g{uz_%E{tF}MEZ-pt>vsQb!tm&N=_4~zNt2#ZAm zXp73kN0*1~muVGbE4#-e{z0JT3zYSHesI>45$xGLq-a-; zr}L&0>Uf>q$yiTJKz0X`g#AhrNyJOW2SvMiFG=oXvQ;fm);DIh-e=91DWGJ-wSbt?R3ENG% zkV({jF$dNlp}T)k*!CH_v2vUtE4D)_)fHkYHVs=&3TV{)q$moUi)gy0pe7$0X|EbU ztO=HGkW?u}t*1%bRfttQsbRk;pOwV|4U&)n)Bu|3{6>>WElQ@_^UelN)PALs{1TEI zpuiscyWNRGxw=ZB2uvtsceIf8xc?^zy{8LNBa2{%xY?Ch1UJ(c?cxq(qUGT#;D5+(g=_!kNv&)|FQNZ@KqJp{|OL@xV&g> z#fmjnR1g=a(k3qTMHXK)(O9kGQjJ;>w@L*S#cJT?g*>kpV--XZOkHuODrrOs55xqm zO0+1c8>t&NqJk?Ct^B{=bLOt^C0c)fJ|FVt&YU?jbLN~gXJ*dKNHsC&j!(LRa5b}Y zjQkpkJV~N4tNKRkp09lr{FpQ4h#g(>y!b}Hf_IRls;6#%CeG-ycd;;&C1kb zvF2w|Za20^L)F=Y;}Tr*#p(AkBi<-`vb!6F!r2W315XTR71ECj+3lxF@Kx3-3usv6 z`7Cjc$SBc?Xc+s+PA-&Of`f@*4tb~vztRYoNk*q{qXO_HegX4?5q_WoKGfQ=ZS<08V%%ZFvV3uQ80eFM4xb#y(v?#L-V#&fRQq2&vboUQ3i}ck0 z&Mfx>Ne*WDE21G>QD&LMBJsu(YD}KtH5C~UdLv1gew-u(&s5>3Mc9bd@!3Ub1ja!( z=%(WM^xuU6ESo()UR4OR$~a)~nM9xdaz`GE=si{PmMmF5UaZ`~d_H#&XD;seo8(D$ z0~p-Fx|@Dib_#URg$gO_oo$lAK*j}i0CFTC!66d^9Ku{VIix1SA^SWf91_)KuXFX# zN=Z>^C9!>SSd}~okV@_V-6Ts66t%LX=wS@1bn0ACF-RBU1DZtqf1_Lo!0eD zHNHwDZ`1u6Sim18%i^*5tvGoxD-*A6g}0(sjueaXNH=G_R*KY@t)%o#r1WAkgv$)9 zh~Wl6)&LS0j~M;dEK`n%UcWLi-!#N}!-njWQq0-$I7Lrhcr<6}*JhoUW z=xY+rGD4FE!Z`>_eBre1kcQJVD6_&kb0Dw6u#f40f8fjm`>?Yot;P-tTggq5^t@_> zKq#0-tYtETMbdkQAXJ`yb98!rz2K=#zlz0w60I`*68E97!X#~iJVgDh&VlL6dG^Y( z^yz~;q6S6Wq-H=rmTc0wfaJT5{mcM0SEjLM8b$FrTEPMqR;DKpZjPp^*TL0;-!xsx zbec`LSLQ$cSkB{|i@PJ+i+pxin09p_Rn&Sa2G(hm# z((z064ly-)K)fnv#B;nZ=J4Crurse7yufLI7$={}FP*2>@E%}#_@U$dH*!H#xNjs2 zTPoGq7~BV?#aX2w+g{JfkM+1{A8f!EErOabl?sBLs;CxHzzwQI6xB#YC1VIsJ^O^9 z5+Vdq^;J|81E{hLr#_0RhA=>Nm7o$6)466jeeLIsW9e`W#!oE8Mcr~tr)0_kbtuzx zd~>Qo?TA@v?O@K4B*4)?0!411*bQ)wgn2!umVtS_^T0naGrfm@8u5p7;W_-c>v6Vl z3Axd^1!3o%X8idJlcw;`B>wpm{s4#)=v)dQE(qTCq{NXl&P)wAGCZB(benUP`Mngs z&c){ATz*vZ!xXs_FzXqVpMxv7S9m1KM=Cqn`Kn3F`UJmj+&dbU3cqI3U&62RocUOW zUq#urSW#}!>a(+*yO0W0Qq@~DEb*4foeZR3fv|I_Nw@&NiguzwOZov7JKm|#Y(kB= zh9&-BlY75FdNIPzuKaTnX}tqO+w#v*{OrTff5E~|2`Pb<-r_(ajB=J)@6aWJ9vI4Fy12_? zNg)&y93K0d1vs>2j3-+p(C6YWujNtW(T|R;}AwQ1-?2Eo)8F6M40O-&dp#%-f{3P)k?Z;F=qRu`fi}Qvw)hzevk?g zAT)WLX92D=spJxNUf=O!(l&`5yWw@_oVLCinijS*{fhGz<0VCO3C8%fz}fW{xEV?r zlO>r?34U(L)R?lt11$SLZ=>w}ByO#9A_c`tk#W?Q4FVD8v=)rqhr3@$NR--SGwg^C zxr8+(j0&QIFKseErhCNp>PFsV`x$?BA5QCQkrlf209~Bm-M*Iwy__brvw^_^;Nscb* zr38gdXoD6Ws)UcF&i1ialx~^~7t!#ZP@AMD$#4mKV!kuyK`EGQOyMrKuMOnj;Lcc_x}Pl1^36v#XpSu z2^yR68Ww25{ngB;?$FuX&%{{juc!_K^ z{~UP?RA}sTe_sd_G;fgmGd**Em7s`lKjCAk{eA2?xj#=@l_bLn_usZ4%Kfqgp$tOq zZ;W#Pg*88h^&55!v3^l3bva@%%WkMhPpT1>e+a6U&GbW2K`?y{nfHe=JwanLx|Hb~ znXf0NXJRb1SV%UtMmhdUE*$s5@wJ5h9*#c^Fyb{-6J__OF=)eUs2;q|YC_Yq4!L;! zIzo!)P`wn_gPK1BV9Mx3l#|g{g3!IG`9?|aNzGq*?~i5lr(!I3YK_n3yFq9oT)whr zF27vzN4T8uvD66$HCcR*sDfXK5j$axY7qWjd{30W2XYMb`Fmj&e`kISe?JX&bB&2g z>*Rs4)SbeBlVCYa!u1(Y>br|25jAPbQ9yV4TClONk8F}o?9@%ojQGkK z4IyzEJ9cUYwo5zt8tnqFne7|g_*Hlq4vS8ZY@+IMmaT?>QHR>Y4d$HeJy>{u6bvL7 zFK^r#V(%bp>u3@%ToMHxQmIrYXQM$wa2dDNaG4((pRlG0)U;)t0*qS=)#Ss017pHJ%b-6vdZHPN<0RM8GNsUWZ2|)+7 z>*6*p{4y{Pdh!*zWo&WIy8EjwSe&gDPe^;@J0|*Q6MZV@+xEz9{-w|dX>{rs%!avv zfohnUh)RAwdW(JCnq*R+5H+shCY@3*VKoaG3LGIuc>Q>QP(eB}acDCka6dA4vwC6Y zmwnu={c)`_&M~H?2=yA_-JBE5(`f~bYM5)X`U73FoK-Lw-$(Qn-a;S=~J5iF(r=M!?nJ!F%2X8SYH8mI>$?Lm_`;x@Z77@$6uFXp0_qumZN*(!iS3Zrb>}DSrqQB8I(zOX zCiM&T({4U3dsQJHvri14yZy94_w{<5*gxUtY3+}5;C?@~pp~Yd;E-u2^Qc?lF^p*X zXTzAvyW}0cqm*@cmx^WcdDgjI&*phVK)& z=eG89P`B&LKG-uj!1bq-_?5WCsOwEgG5pO5H)j*RUjR3l?ML3Ln&+=V+(OfbIiANLST!Fy*pZpkS zK8E81qmMLXFcXe22|EW8wn3Qn>x*CR_5HJ)dwuyl$;pi#ZqWSry*}9(s{&a2so>r$XbAJbDxE$>BEOve#yM-9mIL{mO?*Oy& zmI7==fExv1lk=Pu+2$-Gz|;J*#$;jtu%Ft%1J0dDA;;V%znh%D67peI3e`;UqefEJ zIO9zAi3V~A(9kQrTD~_p1Lb>zQ)SW%O?upVw2us(u+*`N|T<4 zZ|56)fCPhApoNbL?{0F|nOd&Juk$+pd?`h)aUM2Rd6tnG&Dw?2IL>5~^&uv-8R%;b z^eu>Ft8ZZ9b@;;#-ll))?=$#K;K8lc=J$R4`u2&Q|8@6!iy+%&>DSCD;L7w09t0lO zhiWB-+lqlF)xjUDV80^=@#30P9&%WPyySleE`Q)qD$?cHtUV_q_U^nwDieFR3m&JX zJPt0?yUpNQZF(H`i=T0Q8<6A$vKeoY?*(&C*Xc;zy3IxJYnSnkI2cgsuLj&$3ijreiSJndWborGZ_N`fo^#F`d!BNek`>;@*6_l=iq z=50YR4pTsD6~!KXIX&w=z>ndxTSsujq7NV3nsV*aEyRjI&2I<9YX8gr$L2p8G2)dc z`*3u!M?SR_^BQ}h?woAIgq%psBxdn!z&Pj6qIhm{%JA#H1Cweo)2tQFhc_b{I~_#! z$Z}4S);X^s4q13C&Djj8X%=VBwiMe4NWKy#4@BT}%RF+dY&jp$an;rp=PY8oCXc{8 zK~8?=YS1doughwVz|o#{XoSgP8V1X$C5{s4ya2AQOn(H8OBaT+Re!>LSRHJk%( z(iMa+L7B1|9Ht^?#!@@#8j5tfRZnQ{+W~cjwMe^ZmFtLz+iRQ-w3a75NmuMP!eF?n zF_HXmoZ$&?LE};^8H6u^xLsBX7BS~|<93OcQWnHgmL!}6AF{EHU_X)dQk$|KOF3K* zXiFOD$Z_=yt|Xy|VS}BjBCx@3Xp}hTWuZ~pCKo1fdleWUPpCx(;O?rszm3_0G`OvX z=|Z#3H-5DRtC`5jwU^(6a+pOB$hF{Nsl8>zLKuywMUa!LZUDjgMDhe#tgKZyz~8g& z!sJHold(_k=dE$5n`!o~Sx*2M(ap|fo^uTIpr`Exok+;dKTHu?e9}Pvxom8JcMf6ARLIbFLYlUVE2~xm zoHXJqmxG*Jfu!qjR0cOjvXaES{^h_&*RxVZVH0@R<6%D>LzzIT+G8^bduFu|7~Yks zW?}(xv81Y9t2eU46J!oq+xLk68$ZozYoe`=7|@Ym{o^_NYpWx6ENa8iQVaA5;50 zHgAAO-R9{xJq9mq%*vG2E{mnoNC5SsPSF=2+R(X77Mf74mF7++r3VhFr?i(`3c2@a zp#SBvMz7O;3S`FPqulyET1Dg4VQ#Afgfq@`1Z^=6R_&W$#BA-ROO@?tY@BO51|5COif=a-d| z;w-5u@DGmKaI&t)7kqDpqIhQ#w;weyMm&<{AcUtIEoq57^Yby@&;p1+=i`>j z)IxyBm6hLArq@A@;5_*HLA>8U+F%~VtjNcci7R5Y8IlEV`n!>o4;vO27FF z-IyKLz*@vsK|_<474}YZ%(>)p`>@AwhcV{2BBvhjx)+&_#$$6^gKzaR$^_n}Y=dGW z3ES{a-R8^Ma6KgMB5Xy8mp$IKH#Lx@ZMGJX;+5$yNGi)FsS^1%YoX6pZj7a#5q+lg z#1kV-!b`^dfhc=Scc}J}j>6zcI2ge!2Yu^2|w3<+9vh3}=e1m3u zQZRd^UG*eNI~Q^58=Oz-#O`u_M%K5MkfhE4hA~O&gJ0*rQqxt=m-4&D`Ix^3@IMH1 zdirnqh5UdY)v@;$sjA41^ORJ7lQRjaZqD-%riFN>{NjFwK-w_~J7f9hZ|n|KG)v<+BAtx@)k_a<#W?5h${&ZzNV|a~oT^e01$dw3|;E&U_P^ z5pGY#eEjTdR?Y#5B~w+sc=n z+f@Pda_-A6=-Ljtz{V{=cv2|gz5eD6pEZEQLUtj?P2A+dx4XjL1?RC*7A|VTQ2AvI zv=*yd`sU>FsPqgKOov8U>E{yNKXGWmfd>qNXTtfcueo@=owfZ3SW(0#C@VKfsq-Y> zQn~Hhx9dSrMb}YsWT}uWf~vwA&WC~yj#hDG0EEws#X%980%X*RpG(V+b0B#ggQn3%0o;|FHzE%_Zl=93|^4G4WBv0W$xhf^7K|w z8Z1TSa1p!mIWG6h;`1voLoK&17e3$a8Z5#lTZf>WSgqRgVS{p{VEb*fkp7~;$?Kzr zk=M@!Z9sx8?d$(;_QRKYx(nV1s=l}!ADej`sh^%DonxO;Tw#wd7QdrO4i%5%Y{pZ& z&`(*a=~;|B&}XrxZL*Rc^v8H2s*IE|?vaFuEO?1|ZfZjsi z3a?w*gkaxo5UOa`2gbd~K>o0hl-Hcf$Q?D|XZuLXt?fg+k@S-=gAwg$4KE+M5mn+x zMgFo1`>djbJq`~E;2};k@a;cbD4r@J^P{>bw}in`n|%;VQOvr&=wtNglOCtz*uH=* zi%sWkd9?6e`U};DE%DT_xUdb^$cq9HoUXV;%#ATJCsyTy5y_GsOeCG+fE0RqqZq*D z)=qQ=M-^hw^m&RE`&+p#$wW93DUoOmK8ar4wu4gKmKK!MKLvQUgi2 zAZPMUZm5pdtuRE&@CY!x8w0Rbp<;gYHf!GBRS8?7yWHxB108Zg*z2PEi470_4CC`N zL7hJb30$T&SDglvQc?rFNZiI}PMIG>2@Dq(GresT7un!2hn3VzIrnH29%LNejG4}u z>_Mw`iCnO;WCB{_?02oca|RSw{>1qO_|+MDwOl4!ODuT*j8P|DB~hbMlo@mLH+KC7 zSNL%~f_Q?&tNlX&{|iu|$!nV7HVpEmsA{O=BvatnT)zw(c5 zV$Kzab=4Ow2L-7*E(ThOXwH-|)|+_`_n3MjEG!N6POe=Meeb;(SJqwwAE# zoevNu%r6Yg&+yBlpTA^Wvk&4hE*GVyL&UF4-+H031?2KT2tR+;g4(a12p2EbP zq~@EPial%eUx8x1fy-gCI^I?oEkX z#r zR>73LWHOro+-Pg~7~bf$YCc(Xp1|1!OK;&W(d4?45L z@3&?bq*2kEJIC#(;(!i3%8Xz?m{_?VWY(*@HbKRDPA4)KHg`N83S@L)e!;s+1&gK&Nz+?vZa>Ke&@09OMTF`@#ME z;K6?IFh4jNL8HVH5-Zxig0bj&bXJuQJI;qa-G{CAVgKmEUgXD4^kZxN;2a;|c0ab! z4>tS3r~Tk+1PxDi__1$FP^$igAKUE*ha}bO1r6HI1idZBw7g2pB;uDQH)w>J+M4q1pjO3)MjR6AC%Q)H0K^ zk``76H;{G;Y|Rl-jMv`GdZuVWpj-kA#)4)V*H|=1e5eMa0FQ?e^0>6G+02R|w(6P3 zv-o!eAle!LOcZd>07zHTcnQumUcX7ITIeG+wmy(DkB9Uj0i4f`1>hF=$i#6vAS&%g zRuZDEj(}`W^eD`Uf~T%sp$IIk#wvKUy}wY%jc`en?Q|ezE&)dqbS~8GtosuUL7e#y zubOxKRj5m><>?mICSf0k#q+=(-TeTeQ%8Cx@iNKvV~M3^qPxeJ#V%1Q@*Xi^hcd9N z>#ia9hsk3fqf3@!A3TF+1e==jFapX|gza!{0up41m$YIZU7o}iMjnD_kR`fFzBQuD z?Yd617i7#x#b=U0iS*m7T)9279g(=--89{h78@odh&OXXvpf$E6=E1*Vu80tO?o5{ zOCK;p0?gnz7t^Lr5rgct_>?sibzMEYwb@f8TZRzOnyrB0+pGU0EZ)>f;-_Z|V4Bx} z4iQ4R7qI&ND)*~M6r#nD1~%LX54$hK+mjTsNFkZQKxTE-;eZ6c>Z6uv>(G~5<`XbE|H_dFD01WnE+!X*GgPPT#;iGv}^KPKZj>sIvCYFjL z38Iuh&Cxwe2J0a0QuMcU~LPCj(o@Q5fCPsk}szW_>CJ*Qh(#jIiM76oA+f;KEL zEKV09UBpSr_Tz@KK|qp2oC*+~V<`K;rYLB8IeB1>Hu zh{c?7l?xU*<@6&2hkyY&BA!0ZaIm;K2Xx3KUmQqUWmBr#E5z%V8rH{B@);4z>b#H0 zKvt8`4Z zCqkAm*4z~y!D@s?sRkody~xi3$r!$6+OmhEk|B4&h1chhB-)6$?f|5~4j@Gxy`zl? zj9G6XCj@N*8<%^`Y7&U1IC=qNmH>*7X(h79tZM*|-mga+A!KZ7E+MNDtaS|vMc|%< z?B{~--grRBm@B7{RYipCDRyeMt+><*3Yqjq^sdu(h_D7_PkO@U2V^;Qk zv#qojX%PnRn}u`PPk;tu7QJuw+v{?Q*;|NKF`G?_shCxxV>}6fm{s8$Vn*h|0fJt{ zjDP_#Bc6WFbLadIVm5~Lv08+HJ&D}ab=C~5?vcm#UPKOxK+?XE%S~{29Ba*UFVr6pR&jPXWw4jE`^E1ec9i!O_CR3CUm?dB> zp8`u+{cJ*Wn%L7G)HsD+W{@(MfWxZ9mQ~ipYF8-uN`lwVX0GTQbU-~g-!3@qqg^TW z?Sznx>N@5@o9n$c4`-XF-Ysoz?4iwlv^+U3j5hZK_8N$d(*CAxCUkFYzDuFI07gPG zS8r{;M8SD-8Es~+Xq$JKA#FZyy|h{Gp42uo586D0_KrAU6tm4U?vmQZB{sXCH(w*k zJjciC^#t~kf|UUaszvDD+B{dGcQ8nqtG70vr{FxZ59G`hZS%l7Y4fBHrOm~J&^F7x zZg$-iugwG5=9})6Mm6-%=Fk2j$t9+GJ%P0<*p1p|Lig6@n-sd#AZ4!J+I*&f*R@mI z)z4-w24FpQZDP#)f_FOJKK5=gGF z9wxF%>)gVMp`D4t@5sY&I)jf4foG~Y8Z{yYA|Gd6e5K`#J%g6S5>ZZ6Q*Wal)N;;l zSCZexVDCCr%xWT!XR&;);|)u!uqMzwAj@ERGYC?RV`K$4J5YRfRZ(T>`?1=8qc-q5 zDs?!eGudZ)swF04$wH@O7g zYHtPT_C^3My$-%q=yj@WA;ZuQoaaszvo4F6k7_>l3w<;?5W?%&> z((vO5cHRJ_A#z~Xe41Rl%RCz5maCsngJ9wm1Jc?9T$MbZt+kb9u*CFT-$ed0*pzHV z;z`$82Tx4l9gRgo18lRzn9I)P~vpaHh+Nw5BxrQ8LZA0I5_YS4Lo=-to1NX9S+E{`V$Ak@$gxC zO2BL4VYUj+hvj4yIPtLIK@zu6V2!j|RYk7Y;XpMZ({DSO%b0PNX7t24mby_gN)MxJ zDOdxM1HvuD_Qe+BH0J$X;k#K~lt`{066N%raabbZkCR%Yexj)nSt@B$3{~RPm5iyka@VWWCUTE z#lSWj*wC2Oi>bAFTE6an^)l7h>ePtEd2)s@N7@R*gIK>C?K$N#@`wCbhEAiBqRCyKUz56Cbs@ z6q7?V;=l;b@2x>Y1w>X_62su!w%AiC%ewnQtt?NJJ`+n;(!0?f^&RdE=7nIC$k=Q# zvdz3!uQYO(QIp@`UbFIkLWzu0qR_NfC&+9+X@flxrsw4qW0>*0u}Xwicpo8yb81%z zmKqN3Z9-N6mw_wtLI~Yjg$TL=dAX=Vf?`p}bh*)T9%nwFB?#HETx6h7LRJUW3%+|B zIKtBz3P_oYd;}HAqP2Bg)M<>&H;+Lta=w{kpgJ6A4L@Lig4Immk+4up_67)C2T4TC zw3S2rvjjNlOB9@HZmz`IG%1eC=*xmKH%iz?;odp3Lm_ARu$|`aP%JCsLB0`}`^!4Z z%nk*P@%k5gWlOuVzG+c38k_2`?2vDA}DEKvr7SH#+kHV}-ln%Sx z)kaiPKU^cVO4szNksb~7-mtR5uJci910@P!ku3guYj{eFdVu$J%x6w2%91n~95AEHodA_`^m@jWV(8`!rx zz<3JfxUfQ*A|SI=q0D4eLyU8!aEH++B)qFnaKKW;IiOI+l(g&N;Ed5II2)W9!-v+6 z02W;-cGtLgHW*5MZzSYUD!WLf0!n3OPfEqS??(rKu6LC`X2)^K6C0svd7jj65H4;K zakZAvKq^l+5!fKXTC?+)Q!qA9g0<}1$Fr+@!1tiy6QRVBL&Zm6gRk5c&EX6s&btL? z9P++)OMBO7y%GujtKh&I5M6HktpN(s*Ick*22qMMyCrL;icyDI$>*`u86X)T8xVnb zW)S}Ox!EH_T!=f1?FY+siZIpHd?@*to%t%#|6xVAfGm`hCvmZPD*C|FK7ClGHJer- zv?Ng&74L1|d_)qZXR{E9OCBJgHbtvqevGe*^e0*l?@&OG$rTD&a0l{)&soNx13(LlhC9zt*3Iqxjvl6%$MwbRje(7E|7K@EK*ss45lh0Zb zf~q?t5I6pbas4@1K%_`#3`4C+)*IvC=-ZPQaYmvE5q-Rb?5-8P<^7yP-aQGXmNh?_uYn~V# zNKRN!aVOEky&s^=V1Z`bfo9;XDemv%oB^~K$!XT+OO}am2j6}ZHTK;la7%)d(lUB{ zz*hPM-((zX30q&*T!aS7H9O93Rq)NO!KX8-f@=F2vcJa-<3ap{U9j*CuGwH3m|jA? z%w;*8heMr)uuige(TQ8IoIGw%*>!Qo+Ea@qWl1L+iEESyuy&>ZEaZq`mcmh-b|G!k zYQdLn-+oH2wClzpxzf7VKusXxU~Sg`s=~goq;lB3gp8WgE6BO_s5lM+zO%oL3WU%rZ29l3$T8 z>e#FG5|S3qP3dFep&nT-Brllr?_Ft~WZ3P>JEm{^o}Z#U?^4IZT+b@PDAJxcP~)OK z|0gMDR26!jkhyyHWCa&Z;;9MAxzt-YNn6Ct?=(n-ZjyT z>8{`ZPn>7^Ur08pO7+gDamoYLhJ@}$aEM-27$VU7qyie9_mT8|RZqQtfDS`K6l)QQ z!m);#LcQPFL+^j1nw138-X~!0-d{7;^u93W(RRN*Lh5V@)kJ#KIMlgaMO)N_$K@35 zh8{ZDW-VY*Xa^gpc(j8j0Scl$0Ub=psAyNS0`{nh=$HxY7%)KA9C@_BOq~K42svTG zqFuuDwoy}{CJ!n>DPHiEHAP2 zm{aOXUVHZ9gV*oNbDYz@KH|t2VE%P7GkksR>^b;&O+U7DepwK!`!_#uCK4CfY+9Gz zm~6$3RUd8x*gNnFogu4X!=UT zM_KPB5n0iSP^E+j74#1zbL9i^$xm?jlXTbt&BXmTqU-GK-Z%p{NGyKl%OR)Vu>8v* z4KVsE>1OgS+5iX0_VP{dux-hAz6IVsiF_Uj#vLv8PJgYyNl9ESRz9@-82d1=|B0pf z<>`OnkfJ=}mHePTUY!;<#4#nGUyG||u*$Ko&>1LNF71nx8G;Ppfz#j8!1tq{1(jeT_sT2+qL~+U|wbZdq>+>@1cwGJ)C!Z9ji;__Lcc4 z`a}bXYXh*&%qEE`7cbd}(lN7}4Z%LzK5UP&`r~^=ISFuMV@-qcCdtYQIGz#PJ1#c& z#jyH|SZ-4Trs`PL$Dy_@W9s*i0nm5lq5=P5UtczK66X6!HqZcuiw-0c(;i%O142Jv znZB3Um@_(CoIrRuE+BdF0LqCyvT+iRZ`ss5&J$zkzS>gH+_+8=QBeiAogkip`V;R&%i76un>ZS<=xEdE^?KI zk~Uw+=7^}lpH8%O>Bd-X>R7hy_@YGVnpkboiTH@z=(m(8)8ct$0W4S=dnI9)6&g7N zE0!bKz1IZ$5?UK6EB)v2vcu^}Lv^SLWXlDsO0|7ZJmyCk5-dHNNm1cHL@I+#qs^cw zWuJpbZkE|uKs)#lO0doaQm*uBMd>;|b=Db5|5(W+?7`PU9{w%VwQEIUeh6HywWf6T z_g6xA9u_>(7Gzg)w@t<{9`e4nln5hqem0j?4zF3>b^$9XD^nF#F-6py zsm(wVkX30x9YBUrgvH7n;WDIRLV!C8NJ@+&HIRAdCx^2qbt0X9#&8w}h?q7CoHw6v zu^9u(xfWN~h>!3++#o31WNLvD34*OJWhrTO@|*lv?Xu%haV~P72uucrXJBt6v^_t8 z%7Xt{B3PM-gq6{2>3M1c2b%i?c{v|%K-4jt?3(tVpi_VZ%R_GwC2vakT~saofyn%~ ze|{x8q^qyDWHVCI-M6iT8TshX-G^eg(ac7$l!3jZP#41f%CL(90)h_e1v-NHjF^$1WW{C~*rtd*4JHz_T$+N?PtRM3Sk;eND@Rqqf^H&H zL$ffu!S1zsR{+xFLlU}0e(c$eEXNZ3(zav7FeC#oHA0uuhg~6?^`0rY)!9-JqNX2qkn7()i|q80xCX#1^|Gu* zJnCMAEa2Nh{8U6lso)GkND^Jqya#&9Pq&mD4~{{!ll?kH;=QzVV? z82El9a4&!r#QltHQfb@d!`WaDW`EYms;iQ!hzmLHT^%AS40`UAQnHdp8`bKX^X?7J zqV<9p_hEIE{a85Y=@phI6_U>2-Bh)|981u7)C!5xwXxde{Dxo!Nm)}Z1R)+UE~fON zN`eKR1PBc5ltr|JL=7#c4O0Y^M;I=l#uqZls$+M6=7N}`+heE(MmKh>>8z+~IPi2< z)QoN{F%SQUI@=)Ha+)jW#8*+eiX&OL8dsj_RYTsrdiOUnl9BN|(mez;Ag8dWr3b;# zm*G+B->4Ou(@qxVC^gr|1GkL)&?rT1;YZ+Z3yNl}wo~|AS2EJ?pf&z*Va8v>gf8=K zi+X|htSiK9BW6JG7a4@xt}rUbIk%p}YBe=*VHr~cxQONKqg{#HKHQ4!21l0p zgmwVC5&#tm`>jjwl~i$Bc>6_Sp1WnWB>wpmYxUH4N_b zvqlMc#UFv9cMw6+|uof$Exn%$5iNgN4?wM!UQd_ht)#snlecX^v?UYK6BX$X z%;e3PSd~3xo5>uO8q!vH$~F*dYv{8Y*0QuCR$F=uYhdPTwKOs#bG5Zt3GHF7_B4}p zt|na=ovYm}S+SxVnWs%gJnBIH@aAckqiBp|tQSTyCPjJX*MiNAWGs)ol3Pn|1yJsw zMdoSi#(N!TB%W4Acx2j6=|kR-ktu(KH^TXP5{79fzx-jzr8Xix9nmDslqSJu!1aQW zlfyFuWZ)n;d$<%;WF*a3xH8ZLF((vroRu`9nbnXvioMEhNQq((Pm;W0tf$6=S{RX{K6HmVhMSduK)RUxeiOj{86>a=W2s@ZwXlGP2*fjk^sK+PV~hMn*r#lDV1*iykLP0$GSn6YhFM!hSnS&%|bh-037F{$j%EQo2$0} zeCF|cONK|dwbfqJhjYG1?uMfe^i?B%z|yY29{aTGKDK}NIOGd#?Js({PE|28tj9|} zl^YA-R3)o%*b|J`zJw!IkQv}B!Nmp3xKk+Ggr8$)#IUD-%G^ODPYumxEwIg86x-os z+`HOBVguORl}?vH_TgxKoi9-;B#6~6!W2gq#$fN}G91?(Cq?q81<>R2{aDja2nj!2 z;F2S0Xd#Y`FcLbOl^Eb!XDfAS??K=If;*7}2;xzy^x(-mO^S5jk*5o$OuhTg zQk)Bjv#pN(po}G2*}%I`LmsZM4=F6OgkT%E7tcnByQS;xa}gK1bOM0_m{&5PHXh3! zD8;au6rV1g_33DRKP@1;~r&-v}IWT0-x2NzA}ggT@FkdW3QSjlKVVT>@f%8Zttpv4efQpD?+ zvycbz3C3HvS2>5%;L!n)VdDzXF&$F8&UJ&x_5&o6JCO|KMkr-OZ*R&gc6)_%6p|SX zWJ%Ok+sLUiuKsXAu#Qwa;n%3Nx9Le+VEVeFDd%gQ{|@6oBu)<@+Pn`mFm0&1KpSZQ z%o1;|^ol04bR@GriTsYx1a~s)A;)#4E}r*>w+45nq9BE-i$BJXHCqd!a?ieQ`4Guq zl)GF{f%79+QSQQuPZRUEO1E*WD#jPPO=Dcoo>;1{pqNU}8-kb&&bWJcW2>wbhKO(! zoCVzGS&L|l0ry5WHjTSMQtfn+s2KTQs{CRwQ5N?^M#5OC54{iE>Ee8m?jS#G-~u+h zGhFe&VY;L&@ z3K9HBib8G1go?7!CTQ@#s1i87eKcNpu;e5_1jV3l|uiGP_!0dx4of~JqlaCE0UZ~Icl8}z@)CS~#1A@n zF85P7!D4k6YZ)akn*o2;@_6lQvD9!x%N8Dn!wAxozaQxq^imF_4x&GgT@s@`z%d}U zLt-Oak~$py%PS{PlqZ?*D%%T#VfB>Vg>*9K9fv4fpMYd=522L7WCZaP*XOkL zvC^(k1U78g5XJYBK(^GtHwPzBfRcFx*jm&<35_gWAu%*8PV_Cc(f!pGR`m?S6 z6!8b+S$rqwvxCJr8=gy2=~5VK-C9^tkkkxW)|#&#+JRx<1tgcb%fJe&J;H1VMP` zuan6CcZjl6)FhdR7LE7{3kA}0VK|+lLB~WHZhU&;PN*+gv#l52bEf`E*e;%n?TVso z_a0p%@Is5ST_Hz+-~jO;*AuZs52Fukm!|`SOhSTCS1J^*dj_`<*f3Pq`G!#n$tz<4 z$qWW^W0du_BP4j(1tII5GCYg*28>eHyO)$!x45us0qcRkSP={qtXhcunl+iFBboJy zvft$u9{aH_+3Z&i0F+G1f>Ie}KP@OX`)x~%)@-4*wMZzBEuJj)>+FI3mSIr^mb&%@ z*-rp-vR~OA!hWvJ7-7MShzKr~XjMyfKEmmlLI9c?^fu;f-kC2Vy-2hmBz*Vb!dq2Y zycN|k{kinxZhy=d?hbOyo3I8^b~nlfUx~2d!6pmivbPziWLFluxcT`dXHl& zUij8z)+fqWO@J_JjdjW9t7WRC^YDT?#>;X%r6Yyp=BeumY|R!r>;4=Tx|lVr(uyS} zTcwpr_9pEI@{|DPcMd8n2@*kzVu?xUNB68`JyJhJa1S1t>VR_rw1%@fI2H#gXQ?|!8 z17@LtSs7)ld3x-E5X)Sq zX)G6M3#0sUG>eyEaBOonvlL5aIh+Ksildv$tz&^dH@Cb%T-I!%rn=UETbL=#E!916 z%QVT}qy#6o2w+ZbSvtt&7NL2BTlOO&+|xrXkUn;ySq$(OPp~fEPZ}oEU*1eX(1blD zo(pt!rLYQT%0PNPAnU6H5W^}6k-aB5-_JnY%mLhG#NE>&@9};XlyQNSp&5E8W4NDz zWiSWIII_1gx@AC-CZmiCr3}r`LmA8c3@n2=P{yd<%DB$Yf-)vb8ImC=T-5?3^Ixo} z6G%MYNzMRIk0caa*Ri8FMM0`?y4azl|LtaE(X|V1KQcAFsF5SRJ!Jy+y*QDb-;U_Y zH159&dSZ3x*t8O9(0l~ZQv?f{Ip|7opN=!^ba+t;eELQB9W_Es69p9J%Aqe}Fw00r z&!EN%EKLGDML{*3c3qFkIRP@$l7!X3?6K6XqRVRp5rA@n+VE$4;O*oV16!8nz#G0>7m`^Czplo)h#TBJaZ2GScp)tL|dE)03t+tBN zcVo5xWJUe+KXT6}r*ibMH1os2RIaCco6b8j=Txo=(cFyV4nqX#3I6=BC!~XNBP~EQ z=7+oD3?}A>Tr)ls^TRiFeux2=y%S! z!@g7=TEKXmPEYPkC2hd63!EyFtJniaaMDSZU6Jp2v)8dwTv7^{_6*0EsPo1kK)}(( zAsZ{IN=)KG%tSg#c$ej3sShDcV60Bz`zFbN-FKIL18sa#q^<)i1_GP@GGWi<(1Z&= zR!R+ozp+46KJ>pKo2vXMGW@??vBZKwnOS7RWx33XgVO+Uo$V}(SnO zB1C%v(0<;|4MA-F*34wdI$Mdph)jZ_71({rMRu7Y+1***P|4n;)Rm(z0L<+n|N2j5 zkfTBXj5+#(dh*NE6XaHo4p8hqA+uT8t(VTr?~3l@UF8E|M?UXMv=!PaT~7j|z=<%T zC?@V*267i=>vUeW;JKT)ZWsHo=k1w=zySl8sDJ~!8ur=xn(ek{7bz5h6)OB}_TL0o zOI)D{tWe*X_ToklXzB6##<=0N);g(HqM#kHS-!VN`3*HH5&TF<9R8Cex#$75Hp@&* zYwZvu8^goKm#94KPL2lS6R|&KK%oSy2v5!XlVix$U zq5vw4;AoTb;S$zDLZE~yEg@W!695VCL(UsuAPGpIXtW9G7*uJ~yHb-*vbi8u*6ik! zF#;^}pmdV$094k@=4plEY$AbYIZ&)jEO4bbODRt$SzkE>KR!K!i-sdItuxI1!!0$^ zh_B0`4{#)2T$T(gmbFfke)A0K3Y6nYD4IYBhBrOdX{4; z6T%;b#i1Mih&V%7o7Fgg6x)oD0qBYPVmykqbAjRq(heT!(z60mNPTq z;);!A9?cYjzRHJ&LAVQm>l<9yp6ur`KM(CrW9g-C>92k+l(egq5a+}YnP$iqhUGXzX0nFPyuhyvzekp>1d)q^uO4bHcl;+is0Q)uxx50%lZKBFef zraliip{}MN3nqs(0b?kr;@1fwyi|4}S;tB%?M&1Q4uKxC=3#-ma962>3>_muR7v8T0X1GeVR2#S3{_PA$Hvn!Q_Y8VeSboHuiJg-bk_MPLxu@ z_VaDXd$O%o`xb zG=$VJ2(TWF9C%lf4a7*zFE?`z8~SA~xVb!8FU`!hq3`yAU_)mxO!FFSF{4Rmlv!iU zXcmgSJ9$N+2P^vJLz!ddN??H%O=Qp(;McGfeV0JZZdWJ*D^%EuzKl?CGBe2-I`VKt z3_ebNi#gK3E`TII+9``tGYC?2ofkdmf9KV<<{5*uIQwbY%2&L>x}^Q1ouB>0=W=|Fk1kS ztR>#Z6r@?r=gXVNjCiH&$?k43>#)Y0PHI~^&<8ijt~>DiCUjituO{@E^o<0_vYits z!L@Z^JCl84sRLN7*v^6?WXujgn%e~l7Jx-AGL>&Tf9>Masm#l9k{hBoli-07I+l6_ zNnqV7K_gt3WjiMU;<5pFgrKq1+02LKWZ-0lVG=8G1MWEoDF}!w)dPqUHvrO)AnCEq zViRxZD|A&MZ=wLeavr;2ywWvlFE2ZS6=;#dbJ@$cO1hxoK=eRCz+QF@W#u7bD2K>$ zoaDY{fj-lngDqxYY{eMTOx(v3H}*@z)=w;TGGG#R!KNGG_6Bzc(5?t}mT}PWcyOp- zx~4&jD!0crM$cB{X;FfL@))PiVH?IVR6G;Lfw^-(-`3(V#9#4}g$ED_`IjvY61 zaeO%Q>MX#k+eOHNmeIPbv>p+Np!k#_8B3ZpMdfDR)p^q34;py$%ebQTXSntfU$TY2 z6Ar&b3b5@kmwK@+M1Yz-Oi>pfp<4=@HmS*wm=wJ z(vGF}lfhmV1X<|5(5V{yrTwPfY=*#D|30#kMv32CYHdnDp z60jX0*p@VcX`Npc3A5EmE}5r#%vOzDXo3SUFq=RGnQaD=!E95IQ{!-@|8}Ljrs@HNO`=yOEZcmCa|95wCpq#3McO8JidC%MOftZo3Sh)#sp2 zQaOW3hR@buw1!_UnM3yKov%=75Rm(#p%SiX^ zn=Ei(dX`va)OzDEyXZ;ZJ+@~KK7?Hx7!yAcRyF$W^Y2jip7hG#gRon`!kl-XfHg@H5RO!Y3jyhrZX=x6m;?Dm%w^A>gCuaCLWDW*MeLP; z^8|vN_qKB0U(#XD`~LCvonp2lGTw&yZ@g$X!+(>$?S=pD+MUR<^NrwMk_(u3oJT!x2#{2t zS$Ye05aQc`3S1)I?!l%$hGeh^eb;4plzjuc;M5srcs!NoetOhPtA4@eNBAPl+hc!8 z5W^RzZSI9HuGx){*?e&p;*~FMzON^~$UdKNHiXn|7?@&T(4Ztr&aK5pY~pzX!x4+W z7LMqQLbiyFC@$4*KM#pgdZxfM@r(>R=MqI#A4PQ<%}TpqBQ%mxMa3weYA_19mlj&j zWTQRiFdR`wt5h9H63X4wEln5~#df)1Y#qFvlpW zB74vX}OYwau3sT=R~pXrsY06-78n#V8jeF3b{@sx<#HWGUb-!D7RS! zUy@MniZ6w9jZthVfT6vWe!1;f!9jb6L?MsUayyu8%5@lyh-$HxD@iD~Ld#7=v3-m= z1RN(m108Pg^sXNUrNn4DGh-<9+h-(kn5xq?e7ze4cm^ zt1vTm!b{hHHBWKp;q7a_zilw^d2JKaH?^@|)HZ_;X)~7D`7}6x$C=+b=68YlIbmrDd!H$k}HOH)jWnK z?_R?)N4$aL2cGl3K_iZ*ylwvz=Li{WvkzLGOv4+#1USGP^;|v}N!X|808z0*S7Xjg zdb8)Pk!&to;sO~E(3B87@Of~ZS~z;M7656KL;tU1868XBw3?lLRZTM8z2HAMy3gH< zL#p8Hx}?c#SM>6gHwO`HcA;?5E0YgPBsb#*h?)Jl(vKS;WN@u8w?H&QV;NYtwrXAA zRvQ@-^@vz#`Zp2y6Zvbf?w{Z>Z!G5Yw4;05F2TE9x?X#db@s_CWbtPHr^$q6R;Q9z zY9Cyj*oniZUKPH15^zWN(5OgmO0L4DUcnNO^iRJX*a(|_ z@BC6Ur-act0*DJ0aOXjEx%>>s2D&1V#xoGT^uwz^#Jo%eNPFmIp}*z>v#5qBIF`)>(1yJOm>U0F^(6NbagKTap}(q!AF%& z99%3l?O~UFv;bZ`cn_k|!?it}#r`QEiEa<~Ri%njdF~;W8qEc|9L_TRkOT1v_6-iq zihtxg&=b(jrB~xEr1U+~Z8D*JcKyjZf3HmMm$j65KiE3UP7cqX?iLVZCm*yy?PQdg zyT2he9`RzJ9$g{`X}7;e%i(gOY0rf6a|zBZE8ijx`;ROpkuo}o&{a$ z?_Zmp$lR`pUk&8`$~~l3x%Kqm1tb*wJ#<@s`jl=V(zTtfr{P-$Pj=hFr0$Z;) z0d2W;{oqF6!=6Af;99-i%DiYUcbC6XRT|x9p>_!7XX@<1ZFW1!)5K*;b4f7(bNlK2 z>JyBhvd5SKjPIw%FH;bxndPT97wb6K&`^e&#uj0>x@+KTl&;ZC(w*MiTh*H&lL7kb zZnj6?xY9R|o20mD3qN7{cEWGLj|tHCW}qc~tMY}ug!4(2iRNaS524xUYWX61N60K5_Zg3vn~T|1NRa91?e%P7=2kq|VE;Cl+*_ zx*+(yRoBs>@BUqf1isCEG9fvkN|3u`RLe%Z`rej0=vpdOY%uG)Oq0iO+NsEcL)9~q zB(JZ@@szZ=t8@v!x;}f*@P`@3d_}qawcBD?a#llKc=vOBO=lzbKB&+es71K(U|VOM z0N0Qn^*e`0@~$Ib0YJ}VheVtlA}1XrKPJk~AEV{r;XAZ6RM<~YSj@Q!kiPz4d1{Qj z^Dy)Z_O3FMf5RBWUkm4+u_oUtVgYd5#9LT`Q;t?L&~c? z5@OL(EcIREe7TX`34H*4Srb)ot_$v+yHjlEJEE-S6U>*vYLf|`RNV5@1mB%`I~0Vh zq%7{_BCsn}%@7v;!{Y)t9x!YGLa0?s!=}#)j?UP z_CO3eXBd2#&{=KhtV`Hglh9dLz?rE7Pne>axm2|CQ!<(v^P8L5C79418QnD8K$F&h zrEPAA8lQI>TsS($2+XuDSVxvN>DlAK4S~Ya>bDKi16v36#@QjDm8_9HCxf4^pNmNu zM<>0bm$r?3oE-QP&y5v9iFWOUZ^$z$S|#&4*q2Y|urFIhH_zr}RkSbnRr_)Q?03V6(5ETSKVuFgoG`v}C^k8u9O!NrRSq9w zC_=slNUM zsv{7CPN8q3(F^CXlTJe?@z78&df_vep{~GN&BATa3ph07x%PSU(zgTChO|9Jb%VMC zQa6xJ)D6eJD*~nI*}CDfK!IsxZ!IgTAL1cEDaW2esp$IczbXE4Xs}l%AB?FyG`KTG zN>#rqJq~9MyCtk7SxcaOHhj$wumPNE9`ASuO4a zd0eu+R-S%^dY|tug3D)W&tDodj$va;N~vnm|f%*-(O_yK^O#DFs|o0)4IOg0(<0@$s-J%s@@4Gn`3K? z12c#ucDJQ2$2z_7i8O~qx)qj)>b2ah`M=WX(jMtFPU-Y3pH5dOowh=i{{PVF6i1Y{ z(a(A}esHile|zwb+Q%}SMC{`unasy~BDV1_C7y3IHgFIM+Qxk(sSJKldVOemZCCmc zEO~4JBRd1g&A5kFtPf!)z18a#bSkw^`a4>~YH!0p&3>w(4vJjNV6$e@)rJ|IlY9@j zufZRVq)q%dYc2jf#L#*Cb0`1&fq&-k&k6i9lYh$jr?#o}nukn$FP03{7F^ zptmVJ^5ltgG8&FB?FQ!<5@9UCjt7_vLHZ=pYn<)SR;s8mY;&IZDB;IX6o)K%_SbM8 z@qNR@CZOW1U_Du60^i&e#0&1^zJQ_P6kH3;v*F&dr^t2x={%52Ev^hPgN5 z+@)dqSvj|8nAkA>!A0YC0CA9MLJ+kDLAN1A_XiPk>+#n`|+CxNaeZ6^xU z24?~R&cPqlZ2@U^GQ%K2jG55PqvU&&b0+Cz+INt`OFj1q6Uq>;1Rx)pJmS_ga{AZDGL^${H&z+hi z#3jF@jA9b;It>f+ToZ`DG!Q=?VdpIVIbCA~OOe)8x~QLqCB8c#DjykIzNc~0l7YaX z4E8n}PD-~T)p_}K{22s3Va#gaBbl~EE1fpBq>mCz82dF17qR}&GYmQ`*SMAJpe7AV zZ646@1jf%tnAlHZc{gf8I}=hGmdsabxE}E>H57vq?Y;~FPuE1LMHRxtRw!*;=fpKl z=y9-yrG}1*r^EbjYFH>=q%cDHeu2ErgaLakR9L}xH^MB7^3JrI@$1~608;B`48f*q zoHYGW((M9`5G?Z%L0Zk`uTf@4YkD4O`CAQ3OD{DD_eC5_K3nq$;*&Hyfh{^Zfa|+} zP=AK7^D!e(-S;(N2n*RoGmK;A?E;yXDU1}bNW&PB$<8>WkqQmuf(PAbR`a)BuF=S)(EB2eD^h1mFRyY= z)oMu9wnvea!Lb_0T`}hn4GUWxz%cUet#KljufL)AUcoPGF+gD2oId7vGb-dPLZY*f ze;yRDwa#7qH9GzlgxRDUOxg_mI@1+E`uTDVOC25{zHcV-x{G5ivH8wywKFDjuV$EU<-T6_I< z_+vCI_1dyV>MJk@nY@JA2Vu5zt)>g?zYu2JHb`??HDMr$yICtEI`|U}OM~t-8E#Wp ziF--YiL4^I3diz zM0^QLwhy1v2S(wZulh)L)*4AaZxaX~p$OtE;2?yZW0hpWguj$h);gPsc_c~KPL!y3 zp`^4q8GN%*-I`0#e5B!eqHNSENU^ImUV3{a!j$Wb#z|>Un7E5HkHl4Lcp;hKW=UV` zT+cjc`)E`4)rv#7YdGSZVVX$@c&21(b52$uL3)fJSnG@m#QjFYTrqZrAzXn*?ionm znPJ4m=s6#MR9+JnE= zbcx$H5Plo!Kq~om4B*=eVfK8Brc1uhl<-pCy^KdS-_|%0lUFn>u&o-Fo?VPE3I3SI zX?_i7*de!RxQIkLR?8EjPB-w3zno!|_b2>1=krf3TM4X#kb_j*NplMJgg~!#;`}v( z^T7y{C->7hDY;nU+MI#%yTK{aG-0jx6rWJ0o8u5hT+FG>@a+v zhMO6ltKmG-@&*mpFh0pJ>8Ej>l&MQh+<6+;!MM{*+zA>tfq9S8a6QA{yrg&z(ztPq z+sEYDUE`!5f1=@qOz)4d(?=sZ7|~>qJc3_x&Ii&@Yn}OwH{A9L;%K}*|Nm%v8~CV- z>kT*ofIaCn#5QW8Rzbxk3N{g19ckW)cI%`7CwzYs?MXB02Kb00Bu$MkaqY;f_#lJMI z{i&o)sV(xoNj*Uv?qX4+kLY*NZeA#S8DR}^=HXp6g=+f;1en3Z26gevhsy}E#NH?*7v4&WG(eI)z z>UCL!!yZI1-ikK&z7^`MDoK8^ZaXndP~mokpm?~?9H{l0+k zvrsWw2u+%10vp%wLW0SBr+y2CU5R&c_NDSnsS5>WlR7}-m2?H6ymx@b&;}A-uJK4d zM@srl>JU8DTJqukd^;K>RBcGFj^NvRzV+kVt9;v!Z_9}tF7-F?L2g;crx_f!=y##$ zLj696HGM{4*QzJ+RLh9!Qe??`JctlJqq>~J6U_i1Z7Ugu90rHtuGCnCHwWnOHs%o3 z;nGgkA($PSrKCjVI$Ti(j@0k7nYM)QsFCmw`CrGaV%!ea)^zAfze}38_ztc2An_wD z>BF{)%lBIKBEHql^07%hs}l;s-|P1Q)cX(XcR}d#9SBAs4esMhFWV1WXPp)4WBcgm zBAi5q4&rlVdaxV?v?4veM8`+_>F0br8NXbfFQxz9SEu`k=iVUwtCBvIo^H~Q zL3-jn%%uNmvHpI~;JNl-jqgT0Ssqv7UrMKdm*4+3;rkz=;dXpOKkqi--$6LxPBYJ_ zdH&Qq$KuKIPBq_8m~`tT{H65w<=Da%>8W@UZl3x6`C%Hb#q%+N{~?pE(R}~MJij%7 z>0V0zpLxzQ`Asq3OAQ?!HSpUIPJFF+W|o&VU1U4|skyjjOw^myRC7_yg%RhA(YEut z9s6jU?G~-SkxO&!_off{#-A$`-In-*LgF?K{M(o2_ z%~{UvHP&$llo?=zX9HgVU_h?hZXI{EX zet<;Wkpg9eD)tm?f5Rar3yU>*K6tF}FTEjAld&yHj!$aI)IS4d+tpqi>LYl4X1 z;{)ruF5>CD*(^UA%sv^+p1wZJ>?1U@f9KY0X1^oM?9ZZrdt>%iEB-ink<9)OUT~it z*?kUP$>VLi`U~5ydJMdNuwMTFJ}FUwYs#m!jgu~w^_#}RT+5C zXYg?vC$ysS1YcXerL|J~FTpOXeVJzQt%7SKn%rmaOMP4~sTyI8x5+JIg{e@LpCVhs z-lt}>_v8?JgKsonuV>;%q)c1Y{lvGl3tuk{^Ywi2^*C?|PAwQOk9p*B+qu0#IGtR6 zB?8o>rKGKZM%`MU%Nd37AXAdrae5R}r++Ry5C4V~`TUQUfd4-%GU`NXo~I%jJnRa+qDkAs__y`m=oOA9FC&1`^5ia52nH@ZD! z1W_FtL+F!F3b)}^?poZojjwXq;`Z&^gXNG#$PgO54u=3_Z-{sw`V$>~-SMZ&_JJV5 zO9+p8BZ}=5S2`hW_O`#rn9Q3}Vy8xM;SEq^9|$BXAX6haQCLuiE-GHf1PeM*EBs+; zd%Jblxy-`pfF>J^Fl=$G!~+MS@(@(@#3Htzykk?P#Naw00l+cqBZ{Mi?Rb>Hh?+x6 zv`D9bjyU@i#uKENXSGF~Pi*%L0&>fN2F5yF|5)+^0aoWm%Zkn6h*9U7o-bovBF@M%h)8FC5h>7e(Y=V+&JWtG z#G_z#n_Tbcy$RuVn#ee3W7I`-IQ%7sFF2I={6(&Jq^s@ZnV#u?w_3EV234RRH8a(DvW z8UtZhwAf3fP$(9JYyjS2Je0~~UO66MQ7uu6DPQX31r#}XhmAn+2v&~bSZ$_oC57$v zssuCZR7PG~F{+Oi?j%iOANb~3b!Dfv&kpUuazR!*toh&Q2IezRqwz4Dxf!ka5X*n9 z`)mY6DpsNC4=b$0K=D3{0fn7)i7i&*hCH-e$}sE%kxHGC)jWS?6f{*7N+VZGHR)um zJ(WqcN>~u5iz7To0?DZ%3Cu7##K&qJ3^An(UAE@GEJ0%oG#^fO@em^ovpMoE;yX4qaZVYE*s3*0S(G8PIbIYaKZwd{Io%5#Q z0nUauDp$2X&lcBeRkyQ{96$`k81{s!%J4Sv9eY)kdC(EALeR`=ourDuzY z219otfMe!KO{68Ux07`gwbquN_c=A>^+jb@QEl}q%T`+>%S3M-AqCx}P7?H+)UnL# z+sxb>{?t4D3)Fu>DS9B+%-7ErJeAZ3fiIZAzv4;%NB8<}fsm}I^>Wd7ET-0Xr)YiG zHaXCD|IM%OaB#1etM8^!AEzQ^6t;u{ftH!Y*@rsbFfTHnnV#c~b$z3k2I4~aJE zs_Sk-cw`ZZ%|9#Z_GN#DeSph4dq%NwBfb{#byce<=+X;PlW1k4Nsl|qi9?0&U zmrL$>HQwVo3Nq2H#-l!tEbY`~xwDJzi#lW!t?j&WK!H`rlsH3yTmqhCccX@bUBT~K z$7wiRp+x}PGKfdSSrNf5N52DajTXM0rCu&&z1$wQvn^}E{7|Xx%eK=yTJf^zvF ztWk;4efLLKVr=Rj}hmCNP1K|Mk>jqm$K31R&po+>VZS(;E9E1K0< z4DvPMuZ<=g_}bWV zRbmmgx53Q&`veS!y<6)exD7egg9f1OoRzWv(r^(!*FY4`S=#!OyG}CNaB4;yPR(e; zsX5vZ-7l;^4<5{jcQ!_2AHe_=(uEDZk%*2u?RD-Q*#=-4!UIcdzleKK6dk1~#a=cq zdw!Z|tJc;m1S1wity<`i)*JiKK8&3Ny$_UG0zQ`6cw>?gw|CUpkYxvs)=^;PkXFnF zxr~Kb)DAM^?@nq(^!_1>kkXzgMjV*41P{=BfIQ}v%0twEI`$bdxQ4^EmyGca7~osQ zI4KhnuagPq!a9`HIDLko+7Pq7VS{0CMKBO%zZ83qThaVYh`4A>uHvb!d;?q9R@A~- zJ%2v?G3-Rc9`M(##Jf=MaI9m`vTRM>$t`Zx@pdb*nq7mFtb^{!!5i_azZ&n=-QgXD z2sp^EA7J$x6sh{V6@N!K^D$~tOki#9WBjSe7uVZ4Z|L0Vkenl>z0FH34S12xc5{F6%PpxJ#!Vi~`crZ&!&5$8GKiH(j;9u4(E^RE?po z+Vo4_1=YEF;c}#ndKZ+!sLS+%*?OUr*`gPOHdsWjOrR3hV3@(eAPg&pWC$g%>>5wz zxkAg@dU2X>IeHISe?Xr=YId*~VUv0poq}G-JtrJI*AJ#E;vvLfB%;0X55K~MLc>*yGN2S!tA zuI)byuykD2d5@Mg1DW=9cq7VdQkm|~S&lFEc7@vt#TdmWdpv&PReC%b6-+HbDCSPL z2Zx}Yj1tsA7VB+yM7gKOi z)v|8x1Pm}{7E8UQGlAvD-UBV$$+qajws3Wj?rq}9s4EF?Bs=LaI{Tmv(OE)`h#WFRX+_Xb*5niK`h(IDhJjO{{7q$!_KevlOEW3+w=_izWTM`NA! zh&tDlz{LkQ1eC&Sf)m|T=)ks>0Ic(-%&GGxH0$P;cnrDN&MkMbgDi~dF_QVRy;0DO z*s0#Cw~g~|Q?UTh!Q9!0-AlqnP7!Z(lQq8@s{t_d<%$4gt1)!4kp+KEi`cN8HNH~w zdcBr=>f^yEVbm$lZU2vuMZSh-xb2*tz7Twek)%3|3J2=p<3CbOq*Tk3{KkG!(Q66T za%r=q5kHw)FAB;NE;rs3agnxjcOa7%;q=( zAB`V>e(y7*-=f~UIVeiVco#RnTU2m9{TcJdBhoESU_ARFtx&HhIj%p#x0{6>y!IUH zI(YRR;^@DHmBT&|99ri+$sQ^tx8V8Kdn!hC#@UTj%-3Y87!?_HS}I1S*Kdd5jV|6) zY(VgKWbjtXHz>TEspG9MzA@EX3(ma-teBXE;zzuA*?5#Jq%~;GhxOg}-+tgM$`sk-*IlGkZ4BTZxk-bs>!7)?C#vhfW}95(?QgNMtSx ze}VWql#haMyOq#8P}P{7&xiy>4AbNv;=G>+C~=i9!v~91AcyIS)}cu*##J8c8og$e z#dcn!nMuWEj$j&D`5F?S`Th;9&3k#Q`4<=0v}om7Ig^R3_&p*>o$QL(#npKamQ~{! zbrz2!QlyHoft^GXK1t3|h?UE65ryDJ1RHk~z8drw%tm}nCW0Jl4?uRdYr&SY;^O&r z1#zIEFJK4b)u5QdI?$WTYmA1$Vl5J*M!eEOkdc+UG)0Cp6)rDBhSvO=;*dhwiDqR> z86?mtg16JI!iiMvBH`KW@E^96D-|)MDl~j{x>QKJ!T&}m6cfeUOR;_fD3&Lnd4e>b zI0%Mn4qf635vt)2qJ?te5=~MhGSb8v9&a7364A6093`Y;S{!s=VfB;h;Xz@7dx#Pp z>Mrzs2yIZcx?RRPGJ=bE7nU_doDI~-M(e_MxwfpK&ihMQiLe7!mtdw%WT7x4aY;aanJ@+`x0L`1OM9a`1kRdDaZ6uF)(XynS!nsJGsFtx7;ZV%y z`ST@vPzic8P{d?}6dzfp@1X*}`$MwY%GNvkZ~&H|=^ORVDaP;=sz_%0ObhCBEV$sw z_Rc98*+}Wph=Lm3jI8P~MWt9>J9UoCctT|uEf7)xR$?@ZwQLLSK@)heW=n!{3Sqjo ztl2Q35n=u1cl=gjkW?v(eB7B-R5dcEgyrNIdy`+)#7`+#(6A%Rn#uOQ{TAP47I%#P zo$e5h|E*T{-@%g|#1ZnHQU}X-N}b1VSEN}DGHE)R*?-`Jecp%o;PYJuekrLJvj|J6 z!PPMEBwCRH+VRw$-Ugdo{R4pP_a6Nr`#o8*rCE`^UN#%zK)g<%!ZNP(FQo8m)kBg^ zO5KYm`>1K=`%Ll-zE3mHUm{#p;DdeR<@`8OK2~IF@kvMVBv8du#r9u9epI!;2JB#g z-K17YwXyyi2v#(W%}n-iy_e|>BhF5l@^58h=v9tWKPpGDVd=L=WWIkDqO^(s8>)7% zsQ4E4hns_`e$OjlvvS5?cZ!cwS7NF^(+}!CQ1@-@1NED4%rbmlZbOcFvK#$fr z?^4!EsN*x2HNMO)gck`^bVlI>yKA0gE>rB`e$h%S6F|)A($ik?d0o}#)?KY4=eL*g z0HhErCstBzJ5$*KM!ec;3YU?ZKn3_m^Kk;f_U>obr0+9;nHka~je_X~-E%G1sXJ|N zYCYhYrJ&Yo#TTT+rBc!{pdThXaetq4KcR!PH4WiJOAHP7mqZ}ndfU07AyVPb@7cII z@mN^IP<{(2Dvp=LLtXZWNRI@$5=0&7)ig&7JBZ$PZfQ0-ngw}5TdeDWw&*#mEZgwF zA$tsm#^C}3a-@}b4{gDLLblXF`ICJ^%3tiih_^UN1(?$>Ukt1`LCBxejChGQLa{}B zZS1qr5P*V%Q2#a^*BM?20^W!qTL>{m3v*DufFd)6B9DS1(0P1^suKWLNXE%&5b`@^g!E*^jRV};#sXr-~N@q#r#4_{%n(P(0|KQ0MHXo~k%~9Z5c@8mnO5oOnXi!NK9Wpc8?da#PGAiR@9xRzP*K?2rV`G*zv~ zLDO0F8=ff>8#{fFpdX2v74AtF;zM2YD`>KtKs9RH6VIi#)r&Q*qr{P9H(BwI>N zqy{|(V#`29rK528aHw+xDGHy@!SY+}Jez#~B>XMyfeOY%0(I{{M%&(F zVzy8HsF7^w7F{=z&X&igy_AfCLYhlO^n69U4Jp&+~D0SxM z12zA>ZzZllSOiOQbB7j535WhqokKB3Db7h^aGvE1xR03@rep}myv0#1ergeTm{YQV zmWSTSS(10Qv3nhvTlq(b#_eyzpn^?35yy1PL_pwkxI^M^hI4j<9cw96f0+Resgj1Y z60|TPx;a=(G>5bSUZK6%@>$xe0RaWPH~=dkJPt$PaJobqbsjah`UH{!ox*o`5p@g{ zNjqea&{=ImBhhc?h;ycu*ogp47NKvv7^KRs!*a$sW1vg046H@_Vztp}jG`9WZC6Vf}0qjO_=Ff1cV&M?5Lhb3W3oy3F- zXi@PGEUj0@3;!BoNrXE>JfSJcyhBx@fNJ|p{G5n4+IGj7=i3OR-}W%60{pl5W(j-a zTl=H6Jrr>|YQ|6)vA&CfXgr^@PH2mnYwrgWZ5=qSb(rCoV)@^MiIAI=j4zA-8nwR7qHJT|5&~QI=FfuQNw~+x6 zSV1)K!$!~uBmvJS?N}#7`j=!tnETbaY~3JWm$ku%9Eo*`{K5s$jg06OKdn)Wz^(cH zMSkD$S5EmW>z_bQjiWKqc7`v%7^+N??9c8ZzjG08d!r%0XSadm;4BmvGp0~v!6L`I z2vdmNBYGWFcV)W*U?RVF9%+gj6p@FJ z0yS7N*rdwk*`eyCSSfW0o-Aew!u|0JoOvZ637vUg>X~>NE)dpJhKn<=9ogufvtG}{ zFT(tU@#HNXg(|{jm&8N*1G8RPe0ieh&^8JPQqncB=S(&DQxmC%U>wsBjAI(aflUuy zgN$R)tE%6Db1?Iir8av?S-`>GgaKx0GO{X0MM7XuodpbW;Ie6TJ^$W>O40_cOFC%ULJ53bRI#$07%`G1;NqV90If0|?^QmIACCtk9N+6}Tq8&b_XjWlI8PBVhvOLYHEy$$K`52_$YT*YvCcY2i&=@?!UeIX zb5KcsR}an%Y9M10cP90t$b41}u4abLhymp5X6ld-QyVp={fk;(fpY4*D87jzaFEY| zy6na21cs3)GeuU-xre}uNdff-ysJ(8c>_O7lV}{(d@tgq(`--QO{d}GLY%GYVaY`* zuFiu}U|&0e%0W94Ino_@1}6Mp5WAPAuPeM_JB{pE439+IY5tJ+@92`%<;;DUWP$zb zcN6Uf^%0)x9K?`6Ht}O1GsggToq>3nfkh_Sm&DPDANJ+#=BdoH4Nsz(#gw|;)T;xLdtpu6|Ywx4l70%&s{b8O>K zP;5yOCp>q?#yOgddI!t{cL7XR>dx0A$l9 zVkc$vwpnvxMh#Zt4N6?$GT-}YJ3Z0EGiT?#j8>M36VwlLU9n`(O0%1K2MT5;Gh!)h zNMAc?jU7u5meK!tkUw}L*stXONyx~xY=lRwbxS;lfNk8AK36ClEkP2=x4O=^8 zwcjx5BS`HBV`Oifp9D2ex%)=~aO{1ms$fF`%TfHelpo&k zC)UAIE*13i$R%Q>gJbGhmH`O&hu5w|`=!-1_82{oK!66B+ZOk73){L>e zL_3a6=NOpFS}!Rk-4h#mlDd;f^5GV3973nm;#PdQ17OFr;l-J`N?<&NN8(K@vA7Z` z(f2Ua8#;V(|AK|$EO1`3o$)Yyhu+P)db69+FGrSr1bb*a91U9$@y_d9^CY_naXU#H zrpRhi@iIE1weKok=Fg`yoL0DA8x$||=hPYQ&#LpCwzv<&nRWl2bL;+lY3RKS@0^Wb z+Var*38DAFq4!fm@0FqVVWIcx(EG^HyB&IuhTcbo-Y*QjUlMx1H1u8{dcQLCJ}&fr zZRmY`=zSvJQDN41a_IfG(0fDZJudGB0^f*tkV@X~357R>!e@ovn?vujL+^7!?+Zfj z$ePigoE%dHJ@9m-YPP{|0Z&!ywsyKOgN1URTT8LYU(1{HFKBeq9ghjol zlHxz!%6>zg1Mr$fot;?nv6$?>-`HB`u>v5o?vs@^Q#Y5w>aHEKP9KQzC#{Sic);2B zHE=aF+}M$N?pg3tCVQB?kI;GdWZus}tN8K?A)=(|nGOg>wR8(D1wCj2Qo5?JBCFKn^vQF6R!^UeHc-<1c!YfJnAiK zC3a^KGpWO7y7*uQbN24BIlzRzaseJHaR(d8t%j9_Zj3?jkr1E`g9KcvhYacQGAj|F zwPFOdrq>?EOk(v+r{@uMNdtFE)Om`LD9))Yrj)~ueTIR}wnJDDc)f5pNj|UkEL_Yh zBMjc2ruPbV@rtcNQ*kgOXmckrk7thFj8)Xxu9VkKNlB$$a>lefPXRE8H8FwT%g81L zNQ`WdDJ#Z-ZGEnp@;H%Y2Da6pV3zMKEwqra4XhB|4jzdvF7DO0p)Fh;jJYiYa|Qw~ z3TXwbyH{<@IB4j3f)3NeQ4L_Z@<`-?jv0~i|u&oQUV_x81BFe|T?+pg9aIe7cg?pa|q~>1Y%FDeArJTXYybJgC6MA>Yy?t-}U%B`6 z41F@(`xh`#4)=0)svGXzMmpqjFX64kT8R?sNAq&8kj~ec0O6kmCL=*-~A8){XGh}!M&0Nxi@`MKJHyPS-3ZeHr+?=C8#ys>nGgX#&q3r zFC(qQ)yNmz%W`P$J&ud_td*4_NSa__N{uh1vMy&r>#a=3R@_uRYbyDWN+?j^jHcu}GR zdtUB+R&tM#S%iBNH|6EthJ4)HfJFPqy%?or>-<4du3YXN0B6++!o5wv4epgJ$i3?s z+(q~9htk6ODA={n+)Gev`lb@$-cqLPj(ZttCB`6Ma4*ZDx%bEe5J2u7LkY;yy`_Zq zW;7DojWI89FA?SA-p$7YSh!c<_rkp|0#b7?apmRS2Z?Mi+`Clj(H-~RH}QYv-a{l` zq0f+=>U)p^pKWnE94v;7ln)-zpq%*#kC@qe0rEQe;^{raFa$h`LxM-KCzKxmk~gofD*3^aT3 z$j7`LG_tkXOZdGo?zV`LWL-u+PzWXy3|uJs!-du?wrCIqsT>DiK# zq>Ossy=w(mhhi7bm)TQW=!fMQ2hKbhLtwhW>@kS1o7mgB z_gTRuDOwWE$iA@gUM9f`^utgkBE1ku@wuOlhqOIa0FO6>t{GAefi{DSuNn+g=F;$_ zOf^D-fl4cJ5F#-8C}A$nTxKRR{oi9LK^zXxMmaGY=4edabiK%Q12`QrEdw-H0JW~S>-rWt7^#v)%122c(y)2G0S5}8gAM~+MnCNu^EgvMY17&sWfBcDuv zehh&1V1V#@k?B_fX$AvCnNOzwKxBK7={};He3{lX^hhun%I>~SWLkn<3EtPCY?`(qKd=S z;G6O%jsSut%5)j_dF^od+?qBNQHE@D8ET-fkw<=gk zz@jBJSWKWdGr+gSII4(-1r$-hSCA9kc|u@JnmG6OVQER|9@exY2S|ceeCe52?MAcu z6{Mg4Xb$F?qWaKNhX;Hu`;I5h#sB*d}cWdXiH9^GEo&mc-@kWqM8{cy(C*(K>OKSWcW z=+7tb%_DauaZ#P@#UpnHV8kO=fc`LjEH1J)`_%X1>D%+|ug1F`FoL(5cs*Q${ndDX zwGVi2@bS{RO3&H3FL;OU1KtCCybZ+r>+L>Xs`&T?n-R7W9rD;FE9s+sxrNPH1y*Vr|%gsZOBAhs^lplSndopoG_z zk|PCedcQ3`UA_(-Y)aSHp~J<Ad63Y7g-O?>{}(z`L-lQ54lGQ1-v7i8m11Rl4Y}x(O^2k=;UiX7GEy=3qK4d&wU-ac;}m0{LmnDcnN`; z4h6Y%_${M6@q~a5O_~l&)rAhFLaID;IN<89qQj}k1au(U^cA@I+t>3lZ20C_ZQ!hg zLPd973gH>Ddf}o17TI*vA3kq7YDYRw>B?2nIA*(5{rf82S;LB<-SnIJ=&T)tAx=8$ zr_oFc*h{0wZXoh?=%{7wt?8A`j@R6-ny~1q^I>e(`=ADt9nCPe8s;R<%`!$~O6w%n zE4I6jHSJ9dBk*>GH4T>tU_Zp}4|>A5n}#(3{ID>fu%^Aw4`d0f5V6m1AJ&nhFeUXNj`M4cbiB*D#@_t`ommgN5W9$1!sz!7c_3Pd zWt8vsh*Y$m=M4i>U3g|;#CaXNkYwx{d!Jj3f6Xqqw(`9@vBel`ItB^df#M}>GQRZ8 zLModYK$=1H$jW#1zVr_)qtON;qWZUjnMC9px@BYFA>^VewHQT;WdOCXm_mg02~-GAE`R6<$C z*EA%g!f>myg8#|;;ovMp{@k7$Kigtm&c~Ll4OV;zJmk5%zaB*b^aOex9F`@Vk2Oc& zxR9CmE288P2ku-#MYxC{jUB=MA=@_+0|yg0IwPF!bX^o`1ly@8fm6bYe@99nD!_JT z=tX+jxwVKy0oyJ(MwlNO0~E@)uAoUWR`h#>^PNQuFiV-Q8FE*pCLWaqgY!#BCSecG za1Z)vppFP6n7SyLK#C#b@F-?57pTS37E}-OW8q*DY$8-65o^0fB*3i2_9> z_byd$qlbV?ubNALytz$+;4^sNh)UR37&ddN!ISJ!(DfsFA;#krrx%|UaiSEf)_RiL z<9onH*D#5#~n=4OX*+{AAe2n7!nqUE;-0CZ4hPf6O@!Y-0$d*Y`a6;=67# z#TUE1tDy^)vxH{3sg)=b2Ek!03-%W(9k~9P{sow~*^2TS?h7vp3aewe}lNJL~k^kZ}VY5Qh+jAi8Lz2v&D>`JS;aYjDmkdkK;;M zC1`v+x{O~mh#9|ximIZbi|OB>Llj1+lrOAnexwOXwCPh-U{88aRWl=R(Hw}2p5UN? zNcpPivd+)1sg>v{MV>oK3OSc3#Dnyv^oTq&nS_A=urzAv7!c5NldPX#KmowC1rNTV z!p>gBM^I~ee;6I0OB+72DdCp`o(@J@iJvfE_yACjNNiW36@L_#1}9Py_Yy~r4R94n zfmI+ZHCBPJSXc#uhnB(YU1cW|Sg!&hJo~F|=Cy_cQcqhDSKeuhFQuG;xK-z><9|xL zKW#x8!q-L`igG^ZA~K?O$bG$Oi=Q79&_|{%h*_zNF>PU;kkp)kAn=mF~l!|p{L!dK%X9l zAC8hcP+;swQ7ruN=nvE}6nbyC^?MAL-yoC0ve<^Ou}w9xm4nll?Er5KYGA8kcSaIS zKA4uc(=I(6xGrOOn_^}x?~76WsCP}P4F8sJB#JR0=69Gm4jN3V(=!UkuVX;FZwT$C zB(3?{&nTUFEIGJKe=Y}~T_RTP+!7fjOlfFD=Z^zdz-RMTAu~$i-p!eZG24zoLNj%| zH=#*J2ooBZrkmlxWlZ2sY4FDeC(X``4SEVmr!;mmF!+`v9npYY5{>&bBV1o7L^NJwr%;AltIItv49X(d?U;VL)^SX z{e31)%YJaWq%f$LEU2gaQ_a%Xz5g&1BpOt)t0ydP>NZB)Z%iHTdga0AX|LKKI8A`)Gk zF#}Nq-JNp$pZ+b!l|ldE+ixgCgQ%25qdbfmmu0`rk!~}`ev`h{l~lepfjSVEm2i-A z$bO?$VI{6bptj${U*2ijP@xwd(v;gac=a+h*iRT2cT=sEy{aT zsP5=hxRGSyP|bPlCS-|@al(4+NYofnc=Aqo<9nqSDG%GNWpP4m*Nqi72VlZ>Q&PAN`967W)PC5$E3Y|xoPzOpl33W{pc24?lump$`ergUu z9qhu!0gCfQi_#i!<}$oRJnNmRW(K6=IClP$q@bH54EK{9#3V}vQ2K_<+DWXEdQnOP zx!i5d7pInpXV;UP9t9k2G|etOZsl_lI~ z2)Bir!R~AK%h+1U5cS{>p}Tli(MW$2h!2ibuz-#b%-U3pA)LoB)NniqxthogS5BA z;QVHj_B2KoMs(T=ew6X&`v!c2fTh%01HK<4d+0K3ei&t#f1XbI3;62AF3Fp4^gNY`zQ3 zcPFr`?ff}N@=2-x;z`K2&G&lqy~aFS@KoeD6^UOnkfG1OMV1N01QN2DOMe?v!$EP|xrqZ9aa> zkGIUngLvwrNPlsTF5PlQKL#vnfxw{t_|PzC1BNBNLo&hnr=|qIl<%$T7*oQ_O#ERc z{s$($w~0T~e4mD=8e%>w@MNb;Gx4kCyF(S4@6TA!9r$6QkNELN1M#>? zxxrN6KJ%TD?+&$8o+))R!u|2*QwH|;^2GTEc&hm*uR32+?@^A4slm5;RBFPjZN6Ok;`#r(K{KL<+mR#l4+#(Zg$kSR43-z4aFP56II_&4QygE|yX)hdDD ze*=^>K;$2FFn`vtNPYPBlF8^rhIGhBhg!>+yOD}n-zBNHs)Y=k$3SoRZ)Sj3uQrW) zxdC_t09-Qg*>7E7*@S~}*V?Ht_MJ~J zX~613{mPyG=5;{kmHYfnXldqEcKPEx{lHZH6U=nmyQMnfUWmI{bH<_55gEbOZ4?^U z>l#A|241Ngq6@o22NeV-Kz_~yI22W1m=$prw1G5!x~dHmkJ0lGIoZ68vCl~B+5Q;B z728$2CT+sa)DWmG1CNBom>5UX#qq}DG18ei84qkYk{bI{|VlMVJeJ{mvD4<(XV?DdBVR0kUVF~R+(heJ$XMRg+H;QM??`dE0 z238|5(Xjok_^GhphV`}Lyt)5yg~c}H&0$4W{6{dn>b&|Sj4T2#sfHH@qE6%(nPN4wCU9u77LinS63_+ecOxK>u7II}Ov(ksZy^fKY(i${X2+p)5qC0< zKE!zvI!n3OTTZaAY0(?uVJmvfQ&rUpVq(Op9!@IajcSW{;}us;;POilfW;cCm*e=E zM4U_7I1WT2D{&?~Tw`N;onpk9tgOU+2#L7WZ4q~rid3z!64Mxvnl5aIB6IDrf7>vh zWL@mBrzmCZu`d|1C(X;BFo=tj3ZPb#dBq8z@hB~hRDkBl3bi0Rc& z&gx3WNuz^9vTLQ_vj`rd9(}=^0>YuONGzXzpVdH#Xi}hk7?Um`E#KU{7^STzrc7zs zP$(@|oS?*pd0Aq^0htmrp_KSfz;7DfFgWaR-Q-r{Sqv_M!aswLh&ziZg&gosZPhb? z`i{gv!s9UMBkt{x^3aJhbF_Sz6K8PXn30bk%81~r&z$)-;CR}MoGvqxCuDERq12xB zpXXBQzZeUFO4a`>m6%Yd6sz6~m8L-c0xI38sl=3fr_yw!+Gi?V0AT3KuRNLObinPM zN-w0kP$`KaDD+1|S4P@96IC||ZLa$MUT8BGS}LGT93K()l7_t#W{g1XBm4XS!23d& z;;$r3^DA8l^G7%yBHrZY5Mk;eVOVGd(W2_-938k&^rMk2+gVH@g<@?cbub5a2|m?X zXHtwM3ev^|i6rEWNFZ&*I_YrCZW7jagjtD8G`*T-NS}hvAN-iJ0iY{h-6- zFrqjM_f?eB>m05^3NwGxm98ZE$Kp!HcsKT_^?uP44bDcYyJBa= zdiJGS@7BV)E4I{G&+e$ZVhhsDrRP;j2t>&&kLeGr8L zC~&i3qhF1ihlJ|{tY;_z7P#cBnvq#Hp7>2FAdMl!eH};$Q6PD1V#|9)u2^nYB`(32 zb>AKMMfArBpyKfdC^9@Q1B}!f%jpoUe-(VEE!jPTOAOA2Jwm9;bQI#Vm21PLi;&ApK+1g=3PAN)`g=151fhVV60EC*? z%rGU98KxvM!<0m3n352CQHC@L8K&shaw5aIRgtO|E6!7BBi;u4s=|t+MXA73K3$$C4;!qLgUD+SQfxdZ^-Zxmco{AQkOc1}r$ytA)JWE>M!xT7X(ZFW+EwX#(-=}2G0FVkV#&Ruc#hy(6fiCcHRwwQhvmMNqWUskw03>F+ zi@83h%$D0ivuB#GVx?Rh+>N_c{y7AikHxKeYholK0|VSOpBtH+nznsPk}1hkKm8^L&i%~l8AUk-NB&a#8Y#N zI5Y%;w5bC3P<=UT)#Kz6osTRM;tmS+;ZwBgH3_@XkB3PyO#@RW zmU84{#kqkhca6wU!FFbDCi{s3C$E3!E%N%$K^u@@PwVEt>iKDVX`s8{fzj%V6K>YM zKa_rarZkTCqtd81wv_vOya|emN2${>M+yDZMK!HNzXN?{&F|YK>N$2vR7Z_aX;emC z7f_4ZdH`^u-K_cN?OkhK2gsU6>Ne3?FNO3L+E#eo)l~?$-BzKBZu(Ham(!6yEhNQF zqq^tLrT9Y`B<#+^_+yzU3~@v|x`QJXnCp~}XyLM`cXee}Um;-2th;!6It{^Rz%Gys38#j|O$UWAH>dyv zr!Ou!=0+_?RJt_mkz8p(JEq`c&Gg>>)o7(MG&`gWf+fXw#sv3rd1qtDgxk&7AAcP5 z$4ltvfs=D93K?-Hmf^7S#Y`!en%$htzSImG*RV=;n|kmB)Wlhal?~BC@d*ZY)*PY% zCqiT5gkvpSSyR(oHUn-OfX?qiLUo-c zqm3FzA0ENs)}|XxfhAuWfU8R=NLKuPXhUcp^lLzp7{G=LbIyBY0k7!EzoCni?h#<& zCc~;W?8^Lg=2XTX8AzeGI_+k#L&6SV`pkotzAd$S~V72?Z z7-96mPvCD=n;CT4@e(u=S?a#cE^JSK4}-X-eE5GF!vB|(G=BAa;&}#Kq#i*KM^Ar+ zaHwnf^GEz}M(j=#b2FamM*du9LTUx#19dyWEBQ0QgdES0vk7|?-!A1(fBsy6A7*nL z;y8Nxwmdu3KzZ&_>&VgWVm7^M!8>7gBMl3()1+m2Iq&@>%l%s>0K$J6pw|RwgIbN} zQjVOI#r{n%l90YzkG|56%5LE6i^jhdj5^tC=k3MasWRAvhB=;8gbw+IWRp_=$ERtv zF?d(~SRQvVm8&9s{KzUs?m&yNCMSd<+7QMHY~<4HA7)9O4tVVJf6h&mrz`@ewPA2C~&Ft*aqg2O3%lWeCUs7ai2{) z*jHZ&x5L~&CF>&HqWbj%AvN>b5U&TZpKuwmS5!LIOtIC$nUm_xDK>MR&6DVEIgG9I zmb9Uv5K2818n;sT@yW5P8Ko9yUjM%zyXs-=37ISgXI@XuyjJoRHDYIDR6vt5*6 zUWI!UAgGc1G>(?|?$qfHj{CeReFRgVy{XfcfF#EK8%JxnZWMGUV7-My>@`oCXC5@v6F09U9(OBeO9JD<03TaZVUuK`oq2K^^RO*jd zeaElC_!?|i?VebqcHs0D293iqw{cPW9i;n2|1uu^$3|+}aE3DlXGg@1A|8`4!B~4D zBI+=&Iu1t;MZNPTM=M&9ULk##D=Z1=tiWom$+8pK_~_xT<^cvc2ZUL;C=53KGW&|< z(W*DB*k-ZW9_Gk6*CSGo6cOh>MzzS;9M|nkFC`#~u0t|TfxZN0K_kw?gfS`X1z29n zzdNH<8?C$kjk(h(=Xty&%iwq{8~n3E5S)dNFZHLRMWyHi?KA++!zUar$HW6WTkNXO ztoS?VwM^2?`c)X?xJHxhrMFD$NnCG~g?lj~DE7$8w`;85tbp^}zX`h9n%Pnl-)Q$?vn1hJ5T(xfSHyi@K(aRx-_2pSH=_w@>)Z#+8t|0+K*f_+SJ4_6 znLi>R_UpTg2ph`42%nYcO9(ec6m~@_T9vZy8pjC;&t=JSYg8!Ei*%uPiR>)5?z)*H z4R3k@nk(WgoeREprvRKnUzHSmWampSZ>7RU zBD30jQ#dAF#?G3%`7qL6#lhHr(`gpP3~1|vGOZ#@~kQ2e&&9{6Qykh_+9f)G};h~0CI?OE^!|Ssl8o3=Vri8w!WNJJHi< zZ7OUUtx8&VU4SOWf;Ff{b}H(WLDK687rTNIs!=*ASCV4zhn-C5+~JF33tRDJ$i2y^ zV4UkAXTe0gnnCD)a1adpVE-hLn#vXEbW%JEWM;A^p0W}LBJn*E^zicJwH#XNJqLE~~y6v5d zMtT{?3GiwvH2DLRzM9IHX=k8SaNti({1r5L4>nR3wp|!opiRDE$Cj6}p{#xvvyFb- zZuR>anrNF9SKRPQr$xGFCchuu48 zYV1v)NT3dUMWhpmgn_(W1wuSRBK5+M)5tS+57bnGw|=u6b<`#mNI6%?wI1i=uy>+G zhFUyC`q0mphU_0F`1&fcD zIoI7{tmS5T^H*mXKZRr~A=y4bFQ?43&jX^n=awMxB1$wv~qE>Bt`ZJj3%(4=(9Jdd~li#c# zC)i8*)CP?@#KavSaTx#0_a^lPDi0Wu`*yr@39w=q(zluKS)c~-HkoImc{bq5B2Jd? zl$vAUXG{7Gs@}xUlJHHciQy)%xWKGc!wk%+@?5K~62C z^*vbAt23`7Gp}~$HJW)Hm3h4|^Lk0<_0r61edhJb%$REJ@tN0&@)}yJtn~G2 z(rP&4AGQL75Mzo33esV^-=H&`p9Nf=0hS$Ox~~p^pCWK7MiYi8mn!_Ie;+0b#j)e8 z8R6iQHu6c>5FE#3UJM(x8NG_pjt2H(vvr#0+%)A#L-$HCdQJ_K<0F@o`Y$~Nb1b;l zwHP%M@B>W!{B!GaFC#q0B;eLTm`2$S>)(Bk`;tPnFDfWi=LXCL4L43j95P!!Y9!=H0~lVa7xkzPianCPwM2U^ z)X@fNoDYR@S0b4UwVgEsuJMGDyM}-A4HPh#&gN+fcl83haphL)H>eDdF@MHI3l*O* zyAs*N;LK5%LSJwy>+K0WyjVSqF*@7oNt=9&Urqve#GNnnaXnHCn_HPG1Kf$mXpoSM zGtWP)?F(pvh`U5nP>$v_qcU~vJ)C`?smT>;;`L0yYLTFmBi{T0(tx;NEzyMB)s;u$ zBnoIj-THG39W$*zO1C%hqV+G)UtP$38*%}42!+-csBr6F%|gJpG>46G{4)k;7asb7 z0b2ktUhPM4{-La`reJ|)Inlr-5l>jA;U8dU<-*pYoe7f^2*VFod=tPSCn)$q4c7dS zEFnMW{P`qq;R=lc70sp-8KX;O9zmvH3?Y>_qgfb3qohYbn)@yq+?jP%6Ie`lzEIEg zNK5br6J~(H8#9s;l5r~dyiqO5muTY3skpvp0b|PeO4LE-2kD2E6{9tB(AwQBgB|w6 zu7=USEWX;e`DK`dD^50nINDo^XzhfJzlKS41VgPv^wNVYoFN}bh#1ngk>83x_=h#ni7gY4 z$hcEPtsQ@&X5-He)NIV0NgZM}Y)k~GVm5`y#$BpWk1WB)LL_fSn_*+3?2e63M~XUk zn$S(Nany-1VKy5V(1r^6#(Xw@>?D-dU82bgvk%WMxb#VAxtiX6*&a#8-0^jPbBcA#D3DsVwtI0ji7l1yu_YW}|u@ zA>dl>bc53%38%p*It_jXr@#l?*HppAy>>dx#a*xs~~c<2@6IxJqCcAfbqlxD@Kn-)SgiTFmsdmg2y0wa`GfVdOD0>0a20LJIDYsQtEw|!3k41QF_;jo?!Frrm@l^wdaIrUPPmKeZ zSSJo>d*>Kt+ODtLWjS%%#4J(1y5Gzy`alrOl)3xTL|MN)rMdh_Da~i|r8GV9)wMKj z<$&>G)u=A5eC*1oc|nfb&e&483)o}%{0n{oZj;((FqBb=pg-}Ds<`X(ctPBT0A8@+ zEh=RioCRwj;I0J*F{7ML*_lUD)IvQ;%j>;|SS3j`u1-vb`{|+lKJ>JCTLr3i;|XzkT^XE zZ3G+wh-;E?&Wb(}7#!>C3uhP94mP5K0j8mN$bZg&f`ZDIZ!5{Xf&<&-~Btv#C9R-pDi*I{? z%t=FL)90CCANIyjfg3qjZTrdK*F*;WUc0Dzjm?QM3uIW%hm&YFsv-d9HJ&0P5E>1W zZ>T#DP+x1P>zD+2hq^bgfL_sSzXB6rUJgaVK3A^l0`($J_J9`)!rD;oR#R@X>ow3U zCjwPQvyers#4$u~w4yUQdz3wxeX4hj0=JMCcti~{4bXj)ioqG7z>QzUBmXy7c;r#& zL7zv~qiuypaOgbCtH#7k7ikT3 z5fyf9_%TZE$7GR-h0?MVn+S~LeSp~UR`kA7?17V$aVcCG% zAzOIPQ_vc826!Wq?7+}-L5KV)OS6|Dx#=FLMsZQ)M-Y@cFIrZLQ{|FTXEl=r9mobm z+TN5^b?&tU5Wjm6`7|PNlF%v{<{cu(q#pR?U%?~1nQtPzftpS@!~0`8>!*p*b#}vl z9c#h5gO=NH7<|%794r!c!%FQ~548|%%Mx2RQ*Qo7}fSG!dnHFYxnMr8E^;Y~=9>~oq6{3nY?nTAk zx#(W%ishiPLrW2f7ORm<6VMX1L*DeSGeSEwOBWlo#Uoj5aWr&oc3T{Q5H^7`8VN(q z&{vfEl-YTk;V4OT&1|yfF!0(fbA;DkM`?8fNUK@%|ElT#8HOLRW!S#|iP@ffFS||B z-u7;VQzyHPD}~)i{&Oo4KNts!Wl{PTQX0p!yNC|Az|{=*Zh?nr4O$P$1g%}MKH#3H zv=PP{y9ZeDhx)Oww_(9hPMaJrjTJ4!u7g1EvR5CyKs=#Vt3#{3X2rkIo$BL-%I3N!ljCgh1Q+VYlSGgn8MA^qy3ZR~x!g$4D@T*i zV|?k6H9Z+#zk!V7j>#`QBPH?z_~7%1)=3hH2*^-n7a2MRv6P|8-1a(@$wIARWavy< zpOm4;O1d;eVi76}cMy6FdJr&s$xe@Jb?{I}_WlftS;x3KMh`^OaU>ht1pSAVy7Pm3 zb0V>o@B&LOVNR^TmnK@~#ESbs?so8e0S@bg(sQp_1>Oeli>eB=$PLmc{I#A2g3#BZ$SAQ^OgT7 zDppaQUCXa8?PS|`jT^~wq5_pj+@<0tm`YSi1{Y;o-IaA2oW)wE2+c%{LjmfkGE(Xu;vw}pGwlFBp!r~ze;%b!0Vs#uD<7GtTk&O>&VpBG5IIyd1 ztUwOk%0@xY?quU0B-8t1_KBZrP-&l^W{~(%S7>Q_3t7cnMji-oG6+cUK)(3P(Hdm* zYT=RnEnr|5xrpYCbdg(vF&KtI6j94y32DhfrGKbFc5#Ay30|ehBLItW#j)I^B0lJ-(4Qbc=Uzt1?VnA zgdS3h|W7*day;T&Q4t;BH%r(a)cr%pqdjfawidZ7f^g<-(f5tKL6#LuQ z@LxVDO}q@v+0_;C9`%m1N_(qNOfyQ&9r>IKLYdNbUyc_`fh#on@f|dW!R|si{`-WK zqZNeiT8{f=S?Zjiv zKF*v0NC!FDdiHzp?iK@V(x#~MPL}Gd0Op8$FDZ!u$AAdETTod68}&nc)U6o+XL_f! zT}bV|E!4f2!ee_cux?TB5!M&eD%lIIlYrzpeHRK4z~LPUyvH(HANW??wS=!j$U!Jh z-hfcMCgHTVg|mQ#Rlz(FN>Kwd7_&{4&DVp;iAZWub9K!Y{8oFT@ zPGLK4Kfd4?wyW_1hjGI$9K`n55*9D;X$MpuD8l-bEy5afN7=mGBS%(^i{wL~ho~<9 zt>}nXLBL!WiNj2NzhXAd8Nye;E=Rt0;?0RagtG!BYc>pyybCEOSp3>GbKF6a@3}hP z_p#u}9YN2GGg>=lN~Nlg$;z0j#}oXW-r=!ZF?>3XdV zF@Ml$S9}&*HW`%PZa;+u*jykr(rL?DgH2*m0cEL_!&zWYJ3nz>2_i#4pOar%?fE(?1Op?#kx`gtAY-AxjEV>Y`QeSn;25%Oi#o@#R_@`l67#zzLKX{TrmU5>rs;%2w+h z4+*d|H_lzN>XJ`(WY)*#xYqX28)iMbf4WXiSjTigdIstRCX15;aSK=a$5^8qeCM;0 zKb2bf1M6%H8W%O8#$5;2KVrjbOSnAO5E+5oM{I9#_!_0+v$W0MwuMC zVU)>{So%3o-sc}5xEFyXkISGoy77i4H`GZy2Z7$)fyEPR|1p4JU%`) zHqc_N8bvFL*0d2sBM`O>YSmP&;8MYrXGA5g;L7{`o^zjVW)k)HzMs!;{+Q=3=bn4- zx#ygF?(QD(*|(U?U6~nzeZo=?Z@-vHr7jT~cU-U4fR=RW9P2(~qT;yL96zkCnx5Bi(|B6;+YRUI|} z$Amp>V2t4bzkcqK8-2ZrO~ZMCa?5=Z;?OoIZ~%Wu0H0IT9XUzFrqsTW!=ebdHy!F?~ z68MM-n7Qj+<;Ne>zVQv=_RUX^Wt{Koh8_zLhxYw{L67q!oBZ@B*KB?qJ#NY5qepjo zq(Ah?j&9j|VR~c&{s;{PKZJ_uPM!RllL_9k1hop(ARFITgL1rIby1F}-iySdi%K%P z!W#c!?Wp`*_ip$6OqJMO`;Ees-v6Wa%WThV^0(ig5Ql7jT>E`u+V98d*DK%oe*Jou z6g7WG#uMw>g9ZAvG=R@(w5{9u`t@9S=@ok8+8xqkzNg#T8SJ)``_$V~Sha@}*8kFD z&{iNL-vaZs`;*8aZsV=5+}`V-jI!2A@8xv{vwiTKvR>kWmt@9&OyQ`L+FX?XF z1(_|>+#pgzdh-F znNa=VYV_>+0yG|sats>GltAWCAcmCd7fRN+HsUL=ri4C||bCoQdv4;dw(Z`s|2#D-5m zj;IxTgCaJ3;$fsoXZS=UDsvaQ68AM!*L{zY@*lchhwDc%^Q*lgwU0Lc&oPVH-C(W{ z;=s&oV`+YAFl~N@c|$v|`JY8T-7)h+hzpMk{pP<{)F4Q!3g?`6sEd>{VBLEseADjV z(Vs6VML*@<{6Dxbo&dhI`$q=T<4!@a+d}r3k3WpVsW07gKrF(YhA*}43Fdnv0Y}5* z9zh5*AvZ!|H@HI?F_?b?HI}xUyNkNn#_qO^?4<#0ME4NQlyq_TSN9gW_fdBj-7l<@ ztlH_`CKUb^niWNmwEs{Tb}Q~Z>Xz`A6-LrNt!}}&MBM^^xx%+F?cYN%e+|JLuQ00# zGa7F9`Vin{>XuA?u7HAg4+0L*0Evt%pyV`4-O?L$z46)Br96r2&Lp&6%x2E_@{<|1 z68UZ)%4P{tc;x?=x`m#9SGUmcRi(4g;W-liDg3cDm(g?ctAaslREt#ZwFqGB75KV$ z3g`y+&jh{Icr1EOVe|?7yN2G`uj8+lE}O244CvVmIo)_pq^C+eo7}PHJBopa^6ys? zvccV-o}U}f-t?5xe=h0X2_Xz)9rT0F%I_Y|b zfA779zc=~!I{()2?*;xnj9BW^`v}!+Jag!25)a=0Gv6cwZ{Xk6Cgd`DE;gPE%y$?w zKFxSeFyCYF75-t_52I(Ki8;`G$venTdp()*LpGtS z0bLAuoo<|cpM_>&ZvQ60jclXkCgur-EZ|=Y{&-G)9zEw1=1vJg`!nQLv>*hr{!8Q-+&OII>$Q zn&TAQ?0^k8ndIxB`6O2HXJn_d>Dq&?Q|LN@f5+mFw+@WPSK43vo83rJjhT&si_K1=||;UU-Q8Oz|uKuxW25knqn8x{lC1g!e8(04p?e?)b8ZCosve z{2PZqR94j9mk<$FXavImouSCQgFlZNB}}o4#IAAs(X$Ob_F~bdUy;0@^RJ74^EII; zv~>)46@Nr8^8GABR^yK?&@M5X+(it$B9!6%0)YE6__Y^{N9gtCmDW zdOLNuQ@j^L0g&dOlhXELsddIs{caOS{z05{Hx9z!Zf4r)R310Kguff{N3D0M#!7wX zYN}2`o<_)%@JBk{WYCWe0Uttu%QS^_Iv2w2*5D6|`0hmWt!ChH{5x7gHn}@7_j2PI z!I%%$;BP;;SZn2NC1$f*WWL|BRRH-pzHXQC%p>^Q40!{8cpoD3GH&nW zcN^TLijz4R-mOaia^pW0x5_enlleX<-&L7iaX~6i@4)7%L2O)l=hJsvbOosoy_hpRr!H6G5K0xYOj>WaC(cX4AMaG1V# z@x!k1FgJSe8V}>-H6F%Eb~etzfs@yGm{4BhVVq=n<2*QU@){2l%4vcQ;N&$PCY0BB7$?QcI4=mCyvD8=h(m*51dtjvpR5|5;!LY z&NBmNP2ju$PV+eCq#*R-Ahb3Jog9Qt4MMLALhFLi`XIC+2)#K7Z45$Z1)+0-(7y$t z%|YnBLFl|7^r0ZMB?x^q2wfC}J{g3z2cav2(A7cczk|@uAoQgmbZrp&dJwuk2z@6A zb%W3kg3zuY^s^vzOAz`^5Lz%p_?N8F2Ts#&eS^>)g3#h1bO1vEjASK2=w3l+X%IR% z2ptlH4huqu2cZWCp=Ckns35c=2pt=Q#)HtRAhbFNJtYX87=)e~gw_P17f7h6!ATM- z)vRTx-s&_tfSnq^UKhaD2e1tR?9BmeV*ooRfc;wl+Z@2&8^Askz_tXiD}&J00qnm6 zfOmpWH*gjt{U+=KC#uHQ>?=;`no9*h=rA|{7!o)q!ih_@KSi=LjngeE=L}McrULe} z#*5Z-0}^e6)Nlm3+ho3maWL|cfxPuAUFFPelpQ5sxQn0pY%89nkYv$$*}vMfHIf9x zKDB0kxJxv9&~>PQwMsnlOLi2x*3+=ToaAt+T zFW1A*Tku1XI;BYKX|w<|Eif3hVckfMf@-2dch54`bTO-in!3YaoU%&R7w*T%*Ge_y zsmo0_NnMx&>axDOx|}UwtrBnQviK?5$3j=GAF)1_9dg6#mUvMoh#am2sNIg-Bh%aa z3Qj=~w;PxuUbI>oCsk)){-ehHdC$@`PCro!Xf(vzXb5*FhE3nt%*|c6 zA5G}q+WRX%IZR~im|YO*>&=w6nVr8xNo@& zv(K28Y+)d7r{l*-7H<(>UqgtX%{%J)fJ4yq!js;yChKXOd&bVaj=ElqH3ysG&Mn-D z1?07YC$y-%(n7)Ydg8b(P_fj#2#ZheXqP0WSUdt>Dv3io7Oo(m)3${LH?`48g}D7^ zruJ8##|dC-HJRLM5_leRU(jqYMo+fm;!VI;IjtomQ#wg%k-*u{!mMjx_Q_I(A~2yq zyLpCMKC?UkND3LI{+G&3F7A=@k%nal=&HG465L@#(0z8bFh%y|YE?HPD_gU&+N439 z({SEAbeLjq10BT5Tm zNj9ubhn%Y3;^pAC$tx)mP$`FNYxnbr87^27Xv6KGHJ31$nMA-f&0a?H% zdqQ%xY$>bd%t4U5CnTI~ZHqUOGLDogFgB~D0@DnddF%{QacFDpygPt;+aO3m-cb!9aLV>-xQ;7T&g#_Jk3fg zn~U86Ak|vh8{s8#!*1SvKsYg=9L_80S}0VN){{9x)k%iH{eYaSK?Z?DWp!R-tvV4Q z(`k*>V> z@zY7xLK##vpRd6F0Vvddy+RR~Q2Dx_selBd)P=eqrXqOMbOXA^EUrHuO_+!4_Xf&< z>xIzS-OoqU1VkIMNs38j=yyNM8Si&LLOaC&xb9~%aOCNJ&O&gW?&nAZo9>6?QfcZ$ z%Y>wrL8%UitpU@}r;5lO0k|l;|wmk#u8 z7}h4ci@8f6S_^Q}1iVVX@9$zZAPo&G79WO{&xX-1=5fKCY*r`&8{W=YThH%eo@Kn>#R%;XuU(9}Sdb$e zxd;l^EpddyI~nx=s6I*16-!+YEa+s)mhq7%jDThXs3VrT2*G(enPU)aIvEm7JDF0n zO-O2r*;H7i{Ju_xaNGOxLnOW2Z=d+#olNW1q)(eLX?LB>4buqID#U{<{b7;c$@n83 zJ@$=3f*f7W+q?K9oNB^YI60JRq{erOR*n5h0Yv9!vCb%N{;-Eva;5dAS@21EhKz8; z)t8e8;@ZJ?75lCM#?>*Sgf8?AlBts{k;w`^s=Nr4b(_BVj+05lCDiPH7ei04^uD`R zM4%Q0jxB5=lljuU6=j6nZM48=9Dgz<~5s;j*ikcTsW-78xD6Q(0~?vyw~#tu#q8V0~Ly-??8) zed|oNz!a&;8<$E==4j!%gKAZ%=~7?9_Eg{L3VfTQWSXApd%Qw71G=G!X|n74mn)^d zO{1m0^9aFP+iZ~e($GZMp6dIY0%sH@)AUr|+ZDPC&<#yYlU?6!wZ5$tQr|6v z(E2h7>O0Y|?;O_m#Vu0b+HUH*pJvB5lu_UApuV|SsB)RqSKeFhL4KcB;Qoq|X?m*f zUkI)9jtxyr6L;hZY3Up>yUuRTN=-F}uTkTG!RA+GOd;`^S*j1sXp1}RQEKwVqK2*IV!g>ci~IZM zzeR-3{yKc7G&L#PI6q4|4w=+I#?fiT-!aZ9`tV3R>tJ7r`?0Ev$CrN|t6z&^#H~9x zrBP*lil?48PSE?1oojL5*g@IM@D7{L#$5#Hn6MXl45%Xguy6PGL#^ZX74j(h+JB-h z$rVEq<^P+Uh-bZJ?FgpBvop9sg83l&&gbw!^vT$Mfjx;tv$Ye5rab0Z7q%2SqIw5X z73r{~%C5y-WM51!gFnX--7qzP!y{{Q$p{s)P+{m7?m!mY7%^ehzKE*b)_E^&iYjLw z8!~QzhwR)IIghfV1*o#JTI3|eA+RLV2*a zHJn7OC0@IX*wk+Do+FEEykSl96+PFMOH_bNBO~cJ|({%IPG~UFG$#0r(Tfj@H zKHCc~HB;d_t<@O8n8|g^pBcuC8JeII7IiRD#Wj zXhIrCa*Al$0_maJCUTUOy`mJEOqeXTn?0O{ywxP|7OuB+8i<$+U}D!6`s%%sD%@#Z z%TWxb^zecBaQG0>IzOYzgH~540uu_N<*|;BuTb#K?+T5#m`agmO=K6~dVp}pa4q;F z&v@$z>47vWtBpmBJ10a{|6K|qB(`iRCipoRZ`q9Z$6IVi=Bwi^vjh@aQCco5N-y{; zO1KHXGnP6TIJ_yn<#I!Qd;N0UK8N5uqpkDN4rYR!6&FicH~#)a32+;sL(GSVpEJjR3Z%>rJsON$FY%UioPiK)Z%bV>Jo9)QMu7PktHzkGZoF z(g7>7_=H&9mx|vB)?}$d5!lF&loy2YAy={U!9qAh!(Wm0k;)vKZ?7dwv*hP+HX7&R zqQZLvP@)}_G;Pvi+4B|WD~ND~)g;AaYx(mPCotZZAEmA*KRNv+E$sVNDhkz$_t`3eYa(5H>fLCu`YGIAVm>sj@QYQ!qCis1z*9s>(?cE)XGN zDRiqIqzG~T5O-4}AJo_rPCpC3T~>0;gjHF#ppl`Rwp%G$s8z@Z8G7=5ks)%AKu-tQN}v$X2<~=MeLz=syQSm#Ym?+m=co3 zX8|_32BIT5Hb4gb&7Jd7&~?xR?@k+ zd&psvRarH~Rpq3|L3!x$Uz!_E(Y#*?J+|b~W6k?QkK(ViY}xcU+?1d6=niHtmfLCT z4;0@^j|(w1;n9N$dZNdXoW@XA$)(2~Y-h8ak*p{l0=FvW5Y1A`3lELZVKB*WhQX#K z!UWz7@q|E-LKKMoVIK6A0!9tMX4)fmt3*L2=G`kY(VWezvoz}wnl;(bWG$u_?B{US zS4dsj1j=N+NJ<#Z`asQEqLB50(X6#^p~a_t3#H7XNNyZ1yG7-oaH^pIuqz2iQ85$1 zQbB4O(W?YqRowd6@2wzfI&lyCkGwbH3vWY`J-ZfuZ&3=}<3*O-^6u|2IQKjfPG{cr z4qe<@th<%OhKxG{4vD26yAV)T6H9>JuTIDsGWqXiw5=w;(>6z-Zq+?u;qywd)YfwW zh?-EHT1^7j<4W*LfJgT~k?Ii3Q21!K7wvf#3f5$eLJ`<>d(?@MCVixa_~s7}b$d*u zNVCkwimYyLEE_g=3F>N~M4L)hRvU|$GfjWCg0N*!YbhpLH0bt@WxU_*36T;1W2fnh zfhAA3_vJa%#yMM&K0vUUrYFJFaBM-lczqLB>ex*7VzOY5h9lvEUXMt6xfAC8aKmxy z*(9OT&UAV?E%&<%2-7OWgOsJ4eF3lZcUZ8g1A|b=O*)$`L5tWqVkTaDbu<|}JKqe9 zu*uJ)Kt1+)JA^=_*E_2!x7Ygxft;e{!$ix+A==Si@8fqw%hwonvPqF)sC*Zk?$>SZuazi{_oJ zBi?Z??_JTbpbWv8vOcKBKyXE}7NpP^wS_Htmz6Gb#n&{Az0Zz5oJJrc$I?+P)@B+fTStw0*u}UOrKp zU9!X0QITioFO2yTUckymZF9ywBwAX!pDR0wvZi!;pMw{#zQRfxO2?5X6Na+ zvQEqs@QrvgQGlo3+sAQApd46qrodaco!wr^Ww=sAfNE7%EK;alq)^fjX3BuXON&J> zBxE7X)^Vdobs7gFp!qH6)bVUsY~k!?RtnE&;)8JyZqD(C1m1W8M4Lk(r?xbXi(K8G zOe7hSM1iO}AxhIC`jDeD6<+1vSU~#TO+KMy`HHV$rZhx@H{Dn@=tfY-Z;308f1}5Ty>+nHF;lkCqF5>5q{uwKIKec$7p0ryrWzk#{grxQZUW{k~h|EVaN z2APb+&AKstbj+Cf&Z4!2K%>TU(0|E;+P6v_r6a{dcHos}fmO{ERZF5xj9U6M*8e_{ z9rP=zjiTCvm8n3{&~u1_DaFc(-*=X!$`FNHgCGgFvx0b=O_UK8XhA_~CGu?m1d}M6 z=T$}*BkA`mqx8v=!4FVI_W=@eRQp4eQC*fYdJ63h(-XBO8%22e*DQcLz6`$xPd+`v(M_^A+TJO6b zVzE9m6Q%X z6!_Z6K>l92gMx^ebrA&SYy_d%@RJdQ>}i`>nrtG9jn0#$H+`*0CkpRt@5>dkMj@HN zKrS*l_>iXn5`xee5(K8o+bcI`2}1jy^Nzu0oE8=W787Hzs1Qun&QIuvF&II@#XpzP zlz}gCRk_ewWPp2Ca%JE-$bcO72+P37vD6SC$uk1$uUV61Rp^+)*%lxQ+~5twOsPCM z@UFdelJ_&lS_b4C>6;B&*&DK!f(p2yd-gIORKX#XN8rairM#w4g-xL$J6#Z#ml^_TdE-N$aahVk~$Z78CGp zau1vhQesCZZ#Q&WTOeTLtq($7aWKPVc*UrOMDpA1Vl)4E9EOm|Z_kg-yp68u-)?|u z5u4epp4fi$Sn%M`lD^`x^tjV!mc6{{l7lz?!kp8KPyh0$(J{cCJQn4f{^hhk&w*!x zdbY0ryeKyFbb2fpApePP-e7elJNiJo_XF%*GYbo@S`un#zZJF>hFL7~?IdeZJDENc z?Sl<+@VmR9=3;`GBT<}Kk~VL-yV}NTh zbU3rTki8%w4U3E|dmnr4UWla@6Rz<2u?;28(coab3jej_M(DQ#=vNi`Y{fb{ws2Hs z!fIEfJQgQ>ho=PmbTH5}`nCvDHN?>dWhS}#ZWbEv?`~2Ms#uvVG ztaIHU-KWsa#Qazc*%Hpxg~<981BsiMF@^sgau-veqO41|5AFldW1UfhDjQDh5#`sv zE+{xUws7#EDr+?#k0Z96or`BeR>n09;*uCy!kOQNo|3(xGjgwYb^|;2eB^fsw^{~A ze#;-?kzeVMeFp7CX*33$T^?bn)5HuHCiNCM%e!#ib`(j6Yo*KAU_U}RJXtR-^kX_p zjs)5O3+t=ZwCS8m41a`!(TGu-uO)0YUKZ^t|0Z!FL$ZN`Z)ep=_HE>PPGRN|n>Lzs zx>Y5eq+CJYh<45;_8c(^*LQ3y@jiuSicy$k1He2g z{wYIrFyRGc0IXGj+NeS&@^Y9=AgC=ByY9;DM! z(hCtqrToXli&;T1bjF0zT7<+>r!X~U-wlFb1dPj^m^79;Qt<0Wxx;4?X{>$*CQ?wk z0&ecJ7fMFX5wBdhCv&PZIdzDj;t)`Kd75jS`3fw?QbfALTUv{ZL-Ug9&Ig zbuueEDIbhQn2+rcKWl;YDrmbTLusmDfD96Q!x<}8i^3(!pN-Y;b|7%E=4S2>1=hCU z;Mhd4bIkk&eV5s%*tA|`4%#L@j3tRa$GP6|)S!=r5I&$z~~HQi?&XQzpe|V--Oc#xmz{!flRbp|J1mE656S>|X1< zJq{O?LFM2)01=fxKY4I=F{(nvptPUcAkt1c`9-avZGy!LmdOmNbF{`}_MdjLhA_ml zM}P;L?+QaILpy{ZpsM^~wsfq1!U04fRtRh<=+HnueCtre%-ef^gp|t>g9lI9!>)t1 zaG~%(TAx}2X_Vs%{U#}9BFC13S~lzCalEa9Y3a7eFv;C9Op|0YqIWS;%#nPgVV)zD z-K0>9h3N`Eace*3f=6qO2vZle4>0F;5O*N)*oL_R<{WYDxqBAiKGiwqp!?;V`4U`}53^}>Z%YMfN1bv@h3Y9*4O`a($EPUpA%-U8n?k??k_ zut3(D@wQT=CPL6=K$WlW`i_S7S_z3-n(IbmQ|6*`&@S?j8)gWQ#1>HZJ%_Bfm?ovW z{Y<#Ig*lph9+Jlw!QkTWsQ3X%<`FONjLrBnV{AEa4V3PKNT0wx2Fgx;ZKeYXIPumT zC5!Z_vg}4UeTd%!0^3mgChXl3&hgkb8(knjh0!+l>y|$qt1s#$dC~y)FNm%diEkoK zZrKXD7#tNrRF(A{o*#ta@=@?G`v^_K5>;Wvh@UKl|8^xZvQ)i5jrbN_!rIB0h>7&k zobJc7Rca|djuf!vDm#uYj#?b^hzdK_SUxjn4Ou=S!pOsp=(zBaL z99*`-C*8j)jMcCFg-{fw(?ZJGad(NR9)(3sse84iD77hWj_l5684A}x<%;-Hy>_!V z<|;r2IyG}P3fssII=LJL8pRFy)^oT<6I)`0kw-{j#GSB~k##b=14!rfz=|>t;>#(w zO3B`(9;0jktA?|ThGS9JxcJfJDn2| z*9Nd0Mn&tXR&gE)Fs2vkFt(m96-T)IbYau*D*C+aUSf0z{T?t3BFtm zmIAc4Fgh(zl<;hoxIOV$em+jK5ci3)*|@R z*rj|Nx0rhZWBub@tVHSvG;TJ_&r6^LPFpeA#!3p0Wtm3P6{18Li-6>sYy+wvxg1j+ z3Ps?2Q0vKhPFpRad4G(a@W56EAN(s^!K)I4@u4Fv9N4|-iatb|t`L(bU9p4uiut+7 ziVwg!rEfXmRREhjN2k|i%Vk{DgUpZPAr@H(3(Mt`Qq&}`{)b%ZY+ zmVy3Ng>Oa(+n-3Bwi;9j+gPN%4Ew=Auog?l1Q86F&2Zm{7cR1@2lT{w?#m6rb;bF( zZfbS|Uq2e1 z{R-JvA(_BHmSq?0GD3od4Tx~wqx&K(%6aeoN;$8Pl(uzS5a{Ns?Td)USN|+pq<1)6 zqa}B!8)TNveLDiea39N(!+oa;hS@?`%!(=ZX+HV6uS60w390P`Bs{oJ(nYwhu^aAt z_9#}Ul}(}CN5K5tcgI!2eIN_l;aH~+CSXIPxr&&y(rDruj24#Ez?z3`!Raq^dFYoS z(F>>pxx{tNOyRB49Nx-SF*{2!ML~hDgulalwc$u)1inJ1;42Zo1Ya2t;j4=WBP_~SbA~HlJwO`f*_V9_i)O1K zXi3o`e09D?OYR^sGRx+xk$~{{3eh=ywVl?z1+B|I(zaQB)>(`NW1V`lEu)TLX_|~0 z;Y0y=ib**E;OQky-E_lKA9FdxZe=qlPZ2ObPbH=pp3)QCSz7IHvaXUWMupIuiW0MI zX+AMn<0zPWXuYt|@EjI`8X})Y+Vv@i`iO5<0oVTx=-aQ<3 zbH=p9(_`ito)B3LqgkLZivZJNU>dSZvOn7+J!TQ(g=voYDFUKQbBW9oY;Y%&NVab0 z3rpN5(oBxzZ;hE3;h1fdV%5oRgIgIPqO>{H;+JT*!3R?=IUg@E~)Wv?qdW?_}HyTBuf$Z4BIzP65Q zF&128IKP|ZCF5Ld)D}o*YDGTC4az91*=$bRZ-KL+S^&|Vq7u0iq5ljL05ucDovD>X z-QA4LH9-nw@jJ;v6Lgcs*dPJ2U;<<@x~D953{oJA-%A#npqng49}F7mG83|30%S3! zr!4vfDUe02WFZN{0`cZSL~9msk_3u7SI0Sb5oYIwgt98*n0B73Al2^B!vJg6y|6n= z3pB~Sd@k>xzP%IvD(j8NwB#gUsdAcD6Vz@}i0Jajky08n+mR$7u7HR;i<%*d@ghp> z_@TTRK^B^VYQT)5K8dA1fsKIeN2XW_Sf7=~{EXX+m^_wR0T3+C3C5x6s`AaTWNZ+E zu(s2U7+5qs?D<;W`GgAe?C`8Kdin>uhcOa@8e$aaaa((T;K;4$3e(hS(YbTmM< zQA1|S1q8Kp;nOghUB_1Bu5K$Olb6`nRPYDtU;~z$;m(qwS({Ww2U+9Rbmm=E*yS6L z|C8nU>CB+Yd4OF=_T)n=ATZPK3IhwO@zyikR|0H=udHj6xujTXxY5U-$h0%RtV5=EBW;y^2i0rBZXyIX+L+|Kmck3`LT>qPl8@hS za6+tw5q=2h;h8N*0c*mxrGmPMfw2&wJ+Z(%ax|@<^cxb&Zc-Y`J<{mJ2plA>%}VNx z-*%CttxDkkayc4+iJ_l+DgE)=`|cSRYX=530GO&mbNQf6p9h}DYAFe+al1jPSBU$j zF+~}gcHKy$!z23K;?KwoSY~Sw;-dXVv|nV&gaas80b@xhs8=y-*1=uIbHskJOMnBW zEm5I?&_dC5q2CEqQ>j7`SfO5ysN+!r)mE)g1Xif;OnpbR6Z|4e?!GqV*PZgv#hrl7 z*%0%V@SUh{V?=-@2^mW*mnfZ8#a1%rIlW`8Ai;`7cqesZ0C`b7_J?*5Wg@`2h00zi zDMH|c`0k?E#pzh8I>;O?%_Mjq0U4MhAih;o8iKQ11Oj6s%^_NnkH9`y=#lvwt5rZk zS))$EUOKd~XN$yGGgw^^Yo6%65EEDmOcOa_&0swMl{01$tzgTTCBAGE^hzs-FR}iT z3DU8+dIryuH^xS^cibpv7tq5uN*%sruZP16vRrLAv9e>jTsuwU4{g*`_LBfus9sgD6E$X znk4xSxrY1dSn6j0@%GlEJOquUdNG|D9bqCxlIySxMet{tI??k2N{1pUDCSzCFw~ud z!b~tbDbQI+3SzS`hS(P=fei%>m3;zEkX+iN5a_;s#;S(v_zQ^_eB8l%)^E6L5|2vA zF-s#$7&pbiCD>N`sBEkCx75Pmana81Uyg#Nz`8tZ3cUo)fWv`Q9mt7&Dt1PNFVD@^ z7588o9dusHOqi6&j3EtoKtdRn49ZNaRdEgBG&;QD4*Tg(X{J^m~K zxM~YY5dtT~d*IKNvDCyMbF@2?#EW?IRkpSm5`wc^1Ol%$Xb#bme4*xcGX)=CNd(lF^)9~BUCzwx7L1^U>ehc0M-CBYRJ|m7b?giGOu3x61T2c6ze}`9ul#X z;9DFPLKFOvO&~pruPCae_^K$zS+^1(M?-uAOWc!a?xRA($5L-0P&I_0h-io%CceH;VgQq%orn^p<4P$Ed7pJ`u6N34@i7sE3#p(R)!A8m$DI&%tL!#K7MDsEGSeO*!ZQNa zCUffs)x>O6w;NQPe+R0;ifTwU*d#^8l?jiYHQ6v_ii(FliRzUaX&PQc5Ghp=!JMM= zKHLMJHlD9H*LhUXnYASMB1V@OvIrEaz?(!S3Rv_ykDD+q1VJPUS)Z+0yV+TvMlh#n z!w1M(Z!h5;+2Ef!*^q+-VcW#e4JMQFTqE)cHy_xo5`}!FC|q zvYKveuR+J}kBycKSgS-K-!IOV`W9q!!XF9d6io~AwtVAH-+^W?j~ z<||Ri_Xf?kZ+5<$xwh{VZ5!lUtobg=&UYCh?Y0s|n|z1l$@fnJ)+$lRcN@vqDPg|l zPA8DYQs3@}KFV3jO++Kw6OMF$34IAw44dQzyi~NQ9YGi1wKfSF`sMUx7xyYCKqv5N zd_EK7y)`Dq2d}U3i8wDQlF2i1@W+RK37!WTt|PhZKGV$Y~Nv+}IUvn$UzYXxjo*>;w-bGLR7$u_%z1n@Qs zhuGtEMq9GWG<%osu@c2|*Sc>pl3y1Jko}tbfcs& zGA|FSwN&={%`N8sx^PeMQ$jgyx~J9bUyt5zvjZTwO-i=ewGyzZ+>O;Iwk-(uooCR> zs2QZf%X2T71|v2M&?WHL5(yF275?z%Tm-hJVg zg}12Nz+Lo@_fteKyDhhIhum@V<<@XM2@i{^|r$riKJsf~xjDcnq{ zY#$ofyIfMlZF4AaRrv<)hmX$wv`f4ag!|Y})T|ne3JeBN&Do52j$#(UFW*GWZiHZm zDrO3b?2z9)Q7SQ&m~}c8T-hb7Z(lGWhi2K?VJ6gP&#W4Ts*RaFIjMm)BAE>`d*B0k zZ2lREV3rYXHODiU#^mSEzFbN`EJCqV&irCIm*E{WX@n0q18PxuCom>kOmlyA(qpm3X*lfGfQ-3&DTv7jA zEVT*(0i0zxWx9-~jpe9u3j!~yOzIO<7zEFvKX&QuQbL(pKc@>@3Av9CjH#nOOdj0q zpUL%Y){`i2c2a9N)w@{{X&K@}4JOp#Z9^sA@vkoEF z_TI_ar}Y5ie|KSvQfxaUc{d^(LvFE!yF)Ht5W051`nH}3RX;Q|2{1U+=-x6Rzursn z5aH0109{%dOZ0^rtijr%T%mEDg1#=GOb`Lhe^z^p7J~x?oYkHkH`0m2+D*jclQAZ6 zvt>|VOY1~Tw0CwEDsyZQOFfHyCD;jAEBi)+oq&tXs=r$r8B-Y)tiVa?FWV{CKG!r2Ff zblN@vKu3~@jBH68SoO(&U*%;Kk!LZWR& zW)>^xHv-B85zzcHQ_nPdIv+Blmu`WokeOEE@f)&Q;JTHWn_&F%@*v7g{T@4L3I9K+ z*$Kdtqh>1+X!_{j_Q1giq^^~QiRxM^ez_(Hlj^FQGuu4xwY&1cr!PnSXT1{j-}hS7 z|MdSnzh?)PFN@Wm0D(3Zhfk;z_9Q`7P;g3RKRu(PNP&qIy1jsp>pgb%ea9!lavLC)VkR9rigd_L$ zr3Urevef>wnKc0#LJ!irBX^ygtEkBygm?19m}bIJNL5QE#y73Scl7cWkr5HSRdoqqh80j-MWKz zT9vhtGZr`y#%uR05S#aXisO-cyr=gm{w%ra`xIp-pe5yfiq|I}ll?x$PJ$`teTq*g z9CG9S<=^aqlf{!+D{I}>&W#K7JgTXoEv062_^kDvu$Jz%uqJtL6J&0^3%uEgamZ>` z(|Keb>W=+@oM^6FYWk^XOPH%Mzm^grVSSSrx)Rs?$7Xz76hvZ+lGz{uGmUUG{X&}| zV17k6P$PS^XU&J^ZHvf5y3g z>sF_>DD!F?k!b(!)NYe`I^y3p^F+uWPqk0RbvECLz4*Ml<9Hc~kcD8+=za;==Mc9W z+u5SJR?g}Fp{wJC>8iqyQ{L^s1>or|0 zNBv&sWh>X)Iw-griIw)XGNF$2+!fW|b@%+-l_rrW!^i6lnr+z)!zzrd&N&|dPa-E4%I zKGs+wd~xK5;5t4B*C@6Y_81+^Jw|A+G}`N`^$fXgFj{OHwi~UFrN*GHsFT=!VJ&wT zr$JPm$L&Rn*e;k~9GyLsUaH2vFs$XQmJzgUreQB?_M2hCadv`yZQ^q?8@GFqnWeiy z@box%J6xiY2+(Sj$n3<)tb|$M-S1;28fX_3_ZF-ihei;s*p1;$GD!v7V1Km@E&#N= z)1>A>Z>lZdJP_FY=D{Fh3z-MG9dAEahTFRLK(sGv)bpyG^T><<*EM3F?BqkCCaXxU zw7a9wN%Rp)C_CqD+0KVHkZwRX;F(^da3yqNqWq;;>QA6XX6CCHaKDM?;CZA;-cp>s ztg;@*cw0#;>&@{N4p?AkA7CJyGC`1Vsl46wG{Rw*ai*S9D>Qndd<38K!9(^puG1p4E zoMT4XXy0K3R>MIO7%0!}Jre*YBQF_9+2xr&fOQKXmMMBQ zqGf=THNHEY$z*&dbA;jXUAaV67E{e}rp?-rS;SGC)6~chr;22Z9Nb&DL@cJP!8C!* zV49se*Pt_deug$GBA81oQoc ze!FQE^0wREfRlDJ@|gKO7AM^y%=jQ=?CwL?P`U=tHIT0D>Dra90=oLq^)g(_T@Q^j zE=u1cU;qy2*MPm;7lg;wxDNuBk#F&9FRp_&aThX9AER;~RQFVd->L42bpJ`+)IRP^ zb<==wub~_G{>0K;!qQAVMp>>D9$=*^b_b|iQuhsoe*;*+G!kYwFfHi|y1HM0ugJ7- zFxFbE>L(+`+u$zK;z^Mfs9O?~K7>CP@a~f=H$0D;I^8Uho85X%AjD#O zEB77AhOrlO3d}vvprm^O-0q;!_}dGAfRM&1X22Op?C!=~Y&inw?jORu58SNB7c9yM zLViHkesuLB&Tp9z4Dva?Vr;qdfs_P%8zKDOj^=R}GS$cU^I9&2P`ITDt|r7wh$Q&M z8X*i(qi!kdEW~Rae{C9Iw8lB%M7F1w@4g{=z6uDI`UMqh$+ttoHoHqq+(LZae=2|!{62L{maizX zsifJn>aL@E8Qkv8M=AEnp~REaEe&-h+{}FzfZ2#u@N0Qb(x#B?uGFea+4eUneu}Sq zmL%KYp31zc`PYxZ+u#p%6;d6B(4_?!e7cR4jZ}L)Ha1m)0>a$nC-c?srJED}asN%3 zNfXdCQ5ypmh63K#vPjk6FfH)1d^fwd08UF!e)do-($YxjF9rnbdx2tXuy662H#fs(6J_KUFsehwcWrtDs%h$#wzfhiAM@eC&Ek0GZlyg5#FAT%_G2Jfm-Qf0<@AK}@ zq>RtIU-N5p%O-UTzPHsaIM%@Jz7UG+fSbI312QG@D>d^*7Wj7(w!u9w6gENK(!MvL z0xJ8aJ*CK#6}z~H2!_pWjfM%yPEohy5LdSpZ4}&8RR?RBG{q3OB?lJqwKAz+EsGXY zps94PI9$S;>3&Szf^cJqzU$O2^k`BTDNGvStoBlkFC&gCG+qkun-IP;)GaV4gkZ*m zU=CGx9q}BXZpr5ylh0|IkKpd7@x_EaO2enpZTLs(u%CttzCG35NSNmj3;1P)x&`L3 zkb5JD@BSSD?%f(%%9Q`r2CSv~DQTWf?lKLNQcVFYDRPN?H@O$ecY}L9O6xAdA0w+3 zp;T?Gx+{pVOn^7L!$a|d)jfpq6E!2La+SKJ$`w+O&F+?Rsg}SF(|D;?sk$Y<0qU0g zRs-IB8yUE7@NW;LsMPI4De5NoU52F#cRFPKxFxi$(6?h=y_3TI{z$BD&s|HKYO}jo z(+Um#Y0^G`ue*}vhl)6pWdncCQow4~r$GQWxOIS0NjO>?se!TkYi*k8-bdYn{v^d9 z;YX`m;J;D^7m9oYx0_(4`ta{4{(Z&Tj-;zgu?gf#P1V9&wjh$7QI~u-yRLjUxy=eA z`tv0dce93x9&I;pi}3Yy=0ob164x>3HvTQ--#_t3iL?|%x~S{ncBkTx#H^*q1{9D< zqYY$cDA?|rse~ODO3nJw>N->qY;sQsrG5KQ&FwY$LVt$Rr4dHFTM3Wv=1{tgaMOHv zTfS-cHTgd8zG%X2rZvXNG<8d5uLyW9?j{re48myPu8?oqZBv*@EW@RMaeqfZ)1lnSNL1?2__`+| zg3X*Tm?{-es5C{et#L1r?>hHFCNva0oo*oM@1@r`$?*t*-{6kaFyWA)61K^G6N2jD z_%GZn^^OWBc@~20}mzsFPcyg+UgdB)6^{pub>;Pe1ITGyXPDKp5ou^o(#XclZiZ9BG{{@3B5()c7{Lj0?@MXydo9|u> zH-bfaZSgD~;&Ch@5zeEDBA{=+%^ zrZc^6)9;_+`L4{|DfrWwNw;gd;=l2`DzmSAS7!cT`~&Y$JdenCW#*T6 zD*OoZJ;;15`KB|C@?DkrOuikNn!5)Kj^P z!3Ar7e|+3}cS>bEy)DM3SQ?pzJvdll^JAwBk2_O~(|--B`%7SnK{f`2U=21|CPQU^?$c=LglFm z>)XmxPL4Zg)g^ElNb-Z4xLp)~ZI1%GAk`jAwILC9&Qv6<*YUi?`nFwh# z%2(WWh09`ao#~hwY^R^k^gkM2G=>+$i&g`d!^m&bnkseeSKS@Jj; z^t~3>npCgPqU|)P2`q;yYm)0OH~GHU6Xw=oBrXWhqO{4CHRf2xSu2*-Al5zc zKu8v+@5Zgo{F0?y^tt(=GPFb!=CxE04`bu@n5i8F1sfl6KlugP@3Q~Fdn=VLi? zJ=g(qc^sSZqF;LQaBBSUk<~vq|65-EG5OblNptdlhGck;Y$W?lgnv$>1XekhG%5)o zev=@6Pp#Pl?R80=ir`i(Z6=&?72~asF!4N<{Oc6mG82#QU@1LejRSd#9ziIa#b!@h zB0a8vLRy)Yvj8fEGbx0HpF;4w&CGWIGqOAEWwe5GZJ8eMgZ#?=R3GYcLODf!77(gU z_GidRz%gankYfa&f|w&>tHFBNN8lUYsv?R$mf9an$O&iKkn#3@ zQC$>E{en^UX+scQ7BBw_Cwz6=fq%Kd&H``@XtPF6d=#ND0M6=6VyX35x(1@Yh%3p9 z=w$&oro=<^%_y0GwsL^`yriO zEC00z+pRBljA7sB?%8d326hl6Y(#^V1}B`;K-%ctRtj)c3D4q=M~M^3^blk*Ig!mE zdkUtbvNPDq`rEt558w617r52)x2SHFC1|$3z_LD;L%myoNos;J?FTy4WaGRR05~mP z13XnI*22Lcq_s9ejeQ8>6dm`th_PJj8@F=A_){s|m||p7$}?Bs+^~5QOYNXJmBI#% zCp!w=B+9OZ7|->3Zhhzy3& z%zN~_NsoPfgF6Sp;WqK_If+lZ9gJVbzw0Q)SU{eP@6rln>psD{r`oGxI>~Ur2x-%z({_bSKyv`j)e9ZKGVn3Uq^qopywZNp^xEFd9Ll5KMNE3A+ zzFGYjI^zgc4;Cw&y`;x(|s0!g~c_vOyqIwbU?y z{SZsN4t-o@w=tS(e}@bhL-tYT(fgNlulL7)pWZLe()&B2So!q+R;aEX_5Qa45a|7t zr6Ill7(5}pFIpL`IW?Q;i$!yq==|D$fq+~)721FL&!kNng@n;Ixk;dutayeP5d1|b zY#T)A0Om0+Eo~uSc559&&<*S*-N2TClsX7NP9IQ%TA%|%R2G*F5$w_jOp0Xtm1@o! z>eb(5Rw5s>-_6;z{GlHEfyzCUB6r&llnSPteqcAzr0fT#{rd;@0}pz& zG5tVowsz+GRpQKukAH9S?{)k!?j?Gj#~-_g zG(F43lXjn=X8}DxngvF}R&_e1RaL%Y}Whku`*ADE@* ze~!ZB)AJufZ}q6>-xh#C&%geYke+`Yo{*l;Z{I&Is9(gtX+kvS^`@TVSt!LlATmnL7+4lz!#3}my!y?Ar+4lv4s0aIg4*@t% z?0dc@`g>%xL@l;;?%DACP-Fk6P?_%D{3~I{03BXTPe1-`!@olOQJa4Wu5&*l>rr3M zhorcF=ii$Qg1&r(@h|c3j}&Pb`#0jNdihxr!sz8Ueu!QcWB*3Myv{v}_?YRH#C{n= z>6=8~`2v%6&+tM|V(57O#Z6QNzR=$yC^|po>(qoc#y&7HR84LhcaA|f8`W4GAja); zaJR;5laE6`+c(>+*_Hq@1q=s3PsZzV%O@WYPad&i)gCZVQ;QL7kLl}FZ?#VDo7{qT z#yAx#Gg+RUbL|f4F?|sT1uP})?nH4jKrnxs9)pvJaPgAEIz1+feEy1{xQ+KzFt=@m z2ZU2@K!a?pF7{!kT&QpuHb>wRfQ#D|B`n+4^}&2M`pK8t;zB=FPx5ugebXi}&Vb0cuNj*p@nP#Nq zn67NX{|aE3Pbh|VoqS@#xo&Vy)BGgTGzt3@fS_%TLfh=OQrd=BOH|rp2eWl5EyPRv zJVQz*R|4OK5~)YUcZp`;#%3fJ-2Ml41w4E z1r)rS*&Tm_>Dsgl{(b@%p>}4-j>c1jubJO;P6JEHrJGqYG=&tlDK)saB6Kn6sS=KJ zhF1~iy(v^nqP(`Vz$=Dgq!kiYHgUzog&yKHCJgWXEtEyyyz1YQnSib&w;`6ciO zzy%WMoli6J_RbzEZx>6?vc|nYzSuv5uX`H*PLhyy?y>ZYGoB;x4UBopUxGJ5%9Kak znOzO0NLb%-`RH>BwbXh@C=MD@F=CrGd6?mOQ4AdYvGkU?bRI2O@3`97X23*o7P!JIK!3TXbeP1rkCL9gQQ>#{9YVL80HP;L#-x}jk~#Rix+3Ra+YC*#oYH?h zmO2LEyuJ{Z2Fk%#+!RRFDAs`96sY`H<(yQJE%fIiEy7FV*74ox1x`63n%bbP7g*!ZP-AhqwDVNN9adzLQs6Co1^O<{arrtf(WaCHC*k7CrnT0!@TSs)6)m|t`fR}&JkRU z&T)SflJW|MPPLfae2FSVBqiLw81Z3ITaR8mVecgu+&%H^zx5Y?{2=}Hjt^anYggnJ ziDg@Q$77EykKJNjZm1KxH8RkM zrB+Mak{VDHB3I{Kxs?TV2cY6g$%HkNz#ibx6~P-(rVt#7oYW6oVx@RR16of$h3p$5 z1UQlXu~YQMY|kF;op5GWvu*KKnjvF{^bv&#=NJ;V9iecdVN(qTz^C(OnbA@Z8)kBt z)Xy`{8Phj$z-dI&n4R`^DG3Jp{^&0?CmZJX0&{H%V5lU(rcuqwIY-2mNL=cLSp81S zly}+k2XkZgBBJCY>)S8}gZW|GQ&bm7xHSePLkSAI@F@Y&q7>jIj5)=o9RN`$DH6`n z?Qt6$hsfwW7cO)Q?#%cQpqM*@ak4;&XiaDjU5tp-%}nW~(z^rkY>=e7s&B%&vKem@ zY4n!F&@bZtBO;%;yY*#TFGB%Iv+Ro>ZYBz6S}Arli98~g$lXX&SqH42;!dN}03`<#UlC9FczaDuAK$oN?9ZyvJEHEIlAt)5_%M(+1d$2Qyw$+SyF0QF!?|Zn7!ukCFnUR#$-g;0>?G`+ltuBtdB~QWG7Oc zurOrCjmjh;sO_VAlm%rT3F}O#J);8fzzzXbxx@?C`rFlRCAc+~Ip5Jqkn%g`;S)<; zuUR$2LjxezU)UNSYH64O>_kFlfHzLV1tK%P;0%{RmuC|INok_N-M-?(zP#vMOf1Fu zMql|Yr1FC;K&GLj`+D$G%&f)gW3U9`P9Llxtw(<&IvMIP^8v~N{y;j)I@94^yX5w8$_094w!Y z3-}N(VXH&1PpX?@>eez(o7g(=N5kc8O~tl!Ph#wN36Q>*X2bE1(QJ@S0U!sP1doQ$ zyvIkH!>-#xiolFGk2rQha^gK&a}~UY!R?O3-_rE}b@ycT^boK#yCs=r`ax-;{8G-< zkO#Ph0#Cy2Zf1aUMCmCoK-&5s;O`8*n}2`9A9H+()%qhto`wr_+_)qEE?`sK&wLL; zy6oPFhCb&4vShQ9!PdI1nEhHTjXrr7=uqKfTLRoGdm*E9nOhZe%hBBKs@@1Tsi!QD z<~!x0oIIyoBl&Hd@7|1rxLc0dR+YaUtG^n&7Zt`S6SPwg>ys(Rf(E!XXvo&$jjPEH ztJseCE|%|#jp%67ZVNox{(pyvGx|uj6Jq{s+U*trPrEbmRsG?uzv103Zn1lqZRXo^ zoZyEASJNUJHAo<};O>;Zw`l6RU16AP^p@EYj>Fk?*=xtA#GInLP&w|mOAAkNmLi?> zCMfWpsb_%+tBF;X^zE~xZ=WT_KBLW++{;*Mib55E!uLX3@mZ&Ay8J4E@r0>#YPPEsMkNk>MQ|_E`3)!SSJ6 zsxZkhV}WE|H5j>L<|CvfmkA*3#)P#ZG4$*4|vxw-VY?WKAk-^;i!yAD=^2M^<6KitKN0VdTU>~hg$&u#rb?7ew- zRn-+YoB+`{-ryMLXsHHK3yz^mO$_Lb-e~FsTD56a#5q+^9IAnvE4dzyq7kJ7npUe= zv|=NOMl>Sk28R;S3a!?rPMnBJKx!f?e81mX`;7Mnsn7R)&-=&A^N_Rd-fOSD_S$Q& zz4rVCN0$<0Nf5LV_vthz^5r)1c>0EBLm}J)FSjGRNqifGMe?Us7;!G*wY&es<=?H%`j7Cr@DycOTbs75kQ3oq5NG2 z+xG}XH@NnJxm? z!N0g(lb$^Rfog(f*Q$MONmcq^3AjSlLvhh}!F?9QlU^&2?z-m>FhY`P;C;_3HE`ZB@x{VzFscAX2#X3ruld`{6zGf$Xg~Nvw5on8t#b0f_WEx{n63F|Au6+ znUy10EPuf=hi`$tMdGiAW4=PRi|)PQ3%bIrNIb5J$SSQ4vgy_VKv*xU>&m=NI`jEdpbfNMm3~de8=C1RiYFbXeVbu1fzTmiiI_7{{P* z91Br}%dnVn>9b)=crBiD!$vB%T=wQ!3A$jYo9?U7_Ne#;z;Vj)hu5R5{Dxm)Oi!Au zf)K!>^iJl$$#C2m%$D;QaGk9sVa9oY3BB#B-YFE(Un8u*8Ite*cL+#aWY2@UxFraiua zzU}{y_SntV^d0SS5^$V(v^~)F_*eV}+T+;Xe%s^Tz;&*dgc&Esg#UYcEdCla-t5Dl z9RYDXvX%Khk3hD=FQo{*&R_7G9Da=7;qghro?iqb=N-hG@+ln?^o%b($id0{9<+V! z8EZMQiGBWW(N)-rOQ zx!xP6$P9&yQ|i5OO1+Fz&}%w}Q3U=U4ONjRW*nFN zwqwQ-X1D~W17+T?T{yzBrxQqk$L@$`jRESf(R-!(Q21!abhS{ruDseJidbtUCf;0q zRdN%$^b0T~LUy$zZelUmEd)1a9)~kH&3I82#NxRA8>#Xqod>1X9DC4DGpU)}D=+K8 zOB!uB4hgcLqex%Liwo||ltC@)P|UvolNU(dRJeLa*)M?I=dsi&2(89cQ!|&AgYq0e zKI}8lON|~I(@|uxOm?}A9&7&SI~CF zb<|}rj52T-fTfYAKn^uliX{1{D`|%FHQ)XeV!;3=G0bfg?=GScp6=WSRM7~OCOq;x zi22%Q3}@xHvGTieRsJ@_lFA>xxyt_wvN4sP+p$|JEC(ICaOJ-$@qXo3_P~^l6gw14 z?JSXEQ#dk>XVhSTUo+ha|4!^zdS%GPE62F%vsl9bXh?TGuz7#Iteny9+vMCzJFQc# z-aP9m%qm@33h}!2>Um{n3L7)2e+n8TV$ z`zUQbl=4z$h?N!Ubu(>9K&g!x6Z>WC6(r{@`hrY|g$wb%gYsV3o#Je-wTL2f$^}_x z1djylT+Kh1;m-?gs2pebe?d%!mp$FLVg-sIj#OEaA-lr48DD1wViua!d`NQm z7M*7Ik8}2?Yybjd+$u44 z3%QG@FT-fiIerC9s>k;%NeD%dY2X&3g=d0y6bY_#mUXlJa~YXsk1ivhLo)F4O7MiWN=j8AJ6DiNP#NrF)?%|5mSx`#&bCptHe;kf^z-lI{iUH?WQS0ZL=%N|Arbgo-qX* z4CYDamufRrOnGEPLe#bPdYTJ=N&azak2Rmo%XGpC>tVJs>>y{9U9e;#hAVbUGb>Qf z45$urvo@`2B#}Z=Pkapkg4ooy!b8qZHoY}J@K*AsV#ii1J{}DLRGM4 zDldVSQYeo|*pfe-*jbLA|HdGK$BR3XkP9B7oej;pL6tELc64$DU!33 zwbW$m6p+u|6doceGzIuU^R?YKNfJ=^n?rp+&EXq}p~{*#WO!Bj?|9~pXjMc1>!$N| zRea*Kee5;|7B`VE4@7#k@*>HgImEH8qRse0p|#YTtzFZ9`9!<7Lkbs zK2}EU30?u9k06<5B!OUZ6UP4Bac2)nF@Os#FMlZJMA~Xl|u)P9%DtLxapUGUS z@|U156~2RsGq6=yO2Eq;!Yra<3vQ=^ArNgRxp-jND|oSUX)N_9F|qRowDL3L2_>97 zXwr{X!&;k8Y=$Q6!xmw@wp!MSgz#5;u+Pj~ot4ZC!ZW**{)XaUA4ybdf8;YE+Hc}* zE@jp0?3dZdsx8F6OHqg;?=$;{(%8L# zk?kk?Fy`Ily39O?dKZ|*f(%EnU&Iauo-`d#CL6*sxd7OBACWC-5W2Q}wx&PezL8`J z{F@QWO}7YrX;Z}%H5D}!QL)zSJ8IZernJjOQt%YfO*mT!#88xiHx&ylU5H)R7fgDw)C6E8hCR$YJQ3A0%x)>A50(*DS)0-6)DuY?FJCDZk)_T}(+rCI zsTeRbf=LvfPHkwUy<6CWRQPvm0`a-prF})%l;A6@7E$1i!*| zryR&QJ5a=}eT%4VPRny&fk#=f?Jv>JnrQGK95{Fx#_Dmh6-#l;lJPH+&Y>? z1fiX6?r!u*HcZpe=I%yc7DAirM&}5I*Nu)Du~bKO(kM6EfsRhgJJ}H{<@caQu{~om zldb+cJItG?DO=QGzD1=D^prWefOkj?Q-f^lejgngFDxO7PbJzD-K7*n8X4&{S&yV( zTrBO4E=Lb7y-~w20AW!Zxw&+vvkwf6g@!=}4#aB~%!u1aJ7l_3ghue}ZV}x6ps_yfO~5^-lUz zce%1e4#5E(sf2BH;ukbT8*aiLbduS6LvI-UxXNUJ*Gg~2qknigu8TG?|COrra_%dG z&H61nTKweErwVu$I2m^HHe(vziKWIMg|(A=c8pN*f=9p{fOTTVjoHP)cpF-;p3wh;nvGVEz3kjxB-Ctn;iK0S7$smUsAZA<+a z&D*y(s1m99U>{d5Wol)=&UKT6fFsot#JQ6awOXXd#NMMH{)7FT=X!cA;s$n&#zQfr zL{)>NuH8;ZiBASn|HP}}Af*HHfU~ofT-gN|UW`dXGqMI+4;4R1?QLQHriHR|8wgpW z+9JvB+751+x{7((t(@tKI`F^M?ABl8W+(GuAlMhpagvH_LO@(|qBxI;2d7g($oIi;_X7C<#)Pv^7wL0QEoK*)<- zMe06|E?Z1}ZHuKoF38JE8~5?W0c?VQ>|TV9Li5SN-RnMg=t$MCn8{{ZmA}zje{lVA zvDBzU(=T?YBmX?|#AeDbVuu_ zyr%a@S-sT3LH;t2FFC2`$qCDHH*bTJow3waL^qP+AzVxltb$)AQ(@(0eY%`m0i^Oq z3MrqarE26GlkVt#21_bXwg&bFXzW|cUpJi!V;Z)AzxhP}F8;VrHjBSKg8W%BZl>RL z0v>J8RlCPfrTgoUzNgF@95Vo&#`9Y`4&=#~tZ`uX`n4iW89Rt#i{fDVcE?+{ zhcp@PbkgeOoLm`vE6&X>bDjW#;de{tS#D)Z*{p*&PB1p-AigavCEy^oZMlOe#oS^C zu@*Ufw}W^%`Pm`|@f+y7x?&jf=2bk0gh5vihgT?g#&ti8*KeJx{*KkY8{Wd7iwIp! z=v+b<1LAn~$~X19>lBAv$+G+4?zxQJb8WaSlLbIs5RB(}_PH;19R{tR}g) z<;MzV-?#B&572ZIeTD#T11w-fULfKv20UPBJOUcTx|4+extuI5bEcBn*MY_#FB>|G z5b4ss%Y>c7R1XCrCNn}?v~vik0H#;qlqgK!#@R(-`a(`z@fR}uIE5v=hx}sqQmKZC zPVXIv?-b$nIvrXrN%IYg!W~dgX&BuVXO6*ZW*7>?-{48`r%M>#uhcL)8_q(Zf%6}R z)*On*;GYLP=Xzyc7@7;%8P7j|z#mS{-=OGpsGLKUY#xjG3k{dbJWF9=ZyUuaB+j=1 zF^*6i3I7P;P9C!b|7FbW3o>#TLkwpF6kSU2rG`t=KPyS8f%_GfY?mvJu+~nBQ#4Ze zo)kcQl#v%{fKZ=>0Is>$PUUGQxyszEjdJ~cOZ=&F`cqc256!6^ZRHPqop=CpaK44Y zv-*xg7TouGDD!qCaE<8C3tWD7t|K>kD9S8p^V5C8f9O~CI!|k}k@{J!(p|~gcp%W6 z-qdiZjSOMrcIWFNP6=xX#6JSqS-ealh0xiCqf-L}Paq5;HwF?OX7CPFJV|@5#!G#i zs_+~#cLHFl&jvP1Q>TY%yh!~3z|KDWQ_Md{C_BRI)`}+Z*AxxdchS<&gaXqo@QEeyS{MoU8 z^ZD2h;mm3G0LASTmRvWXOpNc9DZ2=6bGJGol-nJ2Z^3OGWdsh$$5O+QtsGLbCu7%8ctK?X z;?PrUs=;=%=Z18W7LiR0ZouQfokG* zOrWdd{hLzDwP5r#5Ggf@%OSNI#@qJ{;c9cjieqhFrW*VsWCs}nM1bJR5_T<^m)GFE zYuSGd98_N1ibHs72G+}sCzv@uAK~$laR_f0AwRAR#jmGms?$HO_XKuYb5L4t;#Ot| zNmZq9Z8oUSASf>}^53zBD*$nx5I4buPuyL_RIw*w6in5~uP4376BI$729YOwGL}m* zt1B9$*T~3p2xdVrBEclc0i?g6CSJc`+lklKZx|G7;8kw*8?b)zqyXDYoH(MbegkHv zUKE~I$5Oo73@t#^9${wCEq7*PP|Zdd8dTRK0P~#PSEHfL81ZHIk1^Nd`Qw;*Dj0`t zNs06;>>s{e_RXf%zr{Qe!zccU=O-onqu0LK)O{-mF#J3}{9jvb+|+#u!rk!` z7j1+1lOpkZ`|*ztMEpU4cuyZ1WSSmM)Q#vwoU>pbojO97vrj?rh5T^={?P3^TfWPj z9beU+bhn%Qb67@FrYjXGRarCpAd~h1dEKu<7|!m7Obp)_ni1pt7m#~0>sUFNwaQsb z9zLa^hR^WOEBNtmhWt}PRyfP~<8}V&;2-uE&hz*~W*6YA?aMvSipxpnp_d9=PBKdj zL#hs;m|?Hw$0|ro;+8VmZlk-$i@8n6QY?E`iKLm`Ok{hXx?$X2hPz1YeP%+W0E@Wc z+gw*-rPwXTapW|5+?CgnKvN6-Bd_svU&cH2N;90C9c997cZX(k#x;6@N(nuk0CR0fEl1YjhFYHVY8s|ahc~C7OY&i?f z=AX-Da}^LRdm&n+YuQ71hx&LGjl5CNp1ciYB|Sv0lVg3licZ0kE^xmBX(qvc60O>`1qs`T+(Nf-1MjT_@uur=x?_<`UxMO^npwIOp=@a?mj;0 zZwLBN!ElfAQ^=6SxXV#AQjH^SzAN$;qt*$XMItxe(NJZdIiwiZnbeD-S;i|CYq>)HZV=A9Uv*d|hEqD5ViAZKj9a=Y*VCGCYc~;U* z`J^5lpi7J^hg`eGekHT&>YmtsK&Ti^*qE^Dz)nio8Tw@+J+EGP=y8`|{Y#6?1x47l z8^>(e9Rt`Mr|nr3S+c#P+MW!d&t`=taMnHzQh#d1kLvU{)#(iH#jvk+sD+O6+ZQO= zf6z0CElB6VJTxrj*1mVK!y%FWPi(;vSy4;5`N6&^zoHR`k>R3nLsGYHND>0&Woyy9 zW3hJ*5SdFp_T%IOvx;q@8%xH#%1uy5m%XT{Q-NC1IB^IjCSStQg(dj^8ICWU2}53; z0{dSsEJepu4Fa)Kdoat*l41&8N@z*hs&J2wsRMgzE##I+%ZGNRT^+pyZYgQF3%f3= z(=S)0|F-%9?0+bDBf!fDuLVWw}cRmqK7iTwn=gu~N#);Ud`w!bdK4y^@#7(4oM%g>>{XnFY`ZRpkJ+ zQr(Wj23Iu@YTRu@K)Dbv27lFTo;bWSQSQD9GIfS|k1UdO7& zEe-r>)D1U0$ReDlfWGmWB5cnTi)$!ZC*##DV?%CdI+u#KHN`aW#K@l5)Fy6>!Q8~p zq`DSqwFo`h{*fzU4X;2QW?=JB71foEjt9G+WCM@eaqOvB%7f>j%OSP#x|c&XBL{SI z6=h5Ha>%9F!cpGvH*DtPqCOily!r^;)3$)_S^d#H4iVY=H8x=Xl(dI%TiRtO*3HeR z8z?pIR|vP+M{8xjjurKz?Fx&HiGf#R9)}yjT&$uk_PI z_LDZ_mkda`pBP=g(DYm1Fri?&?q};s!EJfZYSMKst)UxenD%BLhc~{{lE%6BS=vZV zk6lptZ@9_rsr8cc?eg(>&pIGlF2&c{NaXiKm1;k1dEsl0QZ{R59gXP7nYH+!G0-`( zTR9apqAhM)(x7!ngK}{BK%9Mo$yK=d_p9WK7eN!gh|k5~n1igq;CTFuoH{)VS+H%@ z(XltC??JABrAy>~y~fT=W6H1x`hHx;>+LD4fJGY9 z0P>C4&5)b&RfvI2;gTn=lgeNZ*!99u!;zF}dZQSzXqMN1ca{xDK;?B&5G6%`)+JtF!zfW{HoG@?}Gjn7V^SepT$9f|DGM65(&Fk2cQb8I6#37 z|Mdd8SA5S7dl8F}N!0L<5o*UrGg&QL5$IvHTu2Y=V(C#f;bosE&Agu(M;NnWPpf4+ zw(gJ?2<3xyV~A@N>}lnU_Zm1Xo4eKheA!uZZ#`Aok}-_ww&Ur0naCxvunRok->t@| zNUy%AkTulnq@HH$k};a=>q06cm$@4-*;?3C&`fmR%f4nm+$@7OK&y}aOVX8Bl&v)_ z#=G(Prw{~M9$OB3^@;|xzEKnoefS`~>$i+vFBp!}`v75Ra|fZlm7v{~#0LMe3%{@K zi4DFCjqfXc?=k8%a*GW+91$G)^uwb@@fJaNO8ddIv;wNcWlwpAC`$54qyQCKQc|Tc z3c8}VlOW<|AkOUuyMLwk239@eUJqN9URL$urh(P5C%(cJ&apeL#7HY)UsaN@uSYYP zt?tC1v@NZ!POry^4Ck)g`9Oc1b@*RtaI(i)S75(FKjW<9xs_o{###FUk&g5hjI#~_ zYGj?Im0Zt%@D_r>**^aXjsJQOu;PS}+)ww37AOonqrC@VGj zo%nFlhICCMj{1@3%T5j*zNgn!S=D(|xcH%YWX?Aho~<|v_*K?!-UNiS!Fc+0hi(Q7-tYiESi_Kd z(171wg%i^GJyo6?cBgLcZa^${TQz)%zMu_$RYgN*EHxKF%mu}aE%+-Puqyj>^aP!7 z$;1aUHIME4#)rcL2^HMH{|~5mgc#Nrll?Q@)K zoPY%%X|w6m+T5mOpVS;k<2Bx`wEE{z^*H=bk8So-)N@V(8RZ&%yv$V#cl@G5Dcl!n zT?gO!`ueJ&JuVeA;k|h&n;dthmO1 zoLBk#ZJ;=RfZ1{0<)8cbXF31a{L{fdN&a~re_a17Yl5p`zW^T-T|V&qGWpH#>%Je+ z;(@bG41vW~cYNE&L3j+%opSu)>ex7DwKM-5h7ex%uoZsryAjM1<7h^%CFh3_+FBBB zayIeDp7`1LwkE_^w9$DnSH3T%^LMTt8f5id$3NT{e$H zeVpgWUa#=I##tsDE^{*YQUWi?Z@iDd-$tS4O4u6bLBYv5zmx9@r%7?fQ5e$%2luH+ zT(5Jz`F*wdUShucLG4)iV+AkcoF?IGoRiFV6I6ln>^0vt_<|3~;{(F*wD%I+jI*;z zvz_@4G~r)UoG8l|QbxT0Y0CP6g!ek{$nRy&oBVCcm@zmnnD5j0I@bg;f7nFaFW*hh zz2DB(8aT!|m#hWbODI$yrM&ROPnHRBBz`G(k6&_g%uP>mM} z>>t406|mD8KpezKWb};@@p}DsxAFCATp99Dd}$W~>$!EG>UyrcG{MPY9Mwe-K0Tk% zpcE_|%f;M96A(DsDjt)jSO<`c$dJxNADrxiH~V0{4>tH4R_i;7T8S*9W_N@IxPTeDG5r?D4_%KG^4j z-}qqO6t6A@`Cy?BZtsIdKDdhy7W?4dK3L*|LwvB*2M_VV;XZhz50?4haXwh?gQI;g z?t=**tntA!eQ=x)p67$LK6sH2PVm7ie6Y?3C;H%IAH3NI>wU1n2OE8Gx)09q!Fzmg zmJj~k2b&4%Wk3%BL~CMmn&XE)=7-MnL!a_P7y6;k`Jruo=n_A)!w-GU5AF0r-||CO z`l0Xop*?=+dOx(!2aBe%KvIfb0Ga|73&>4Z(}w$@NBZDJ0C|rQ5GEMV^(Iq)xdA5L zF0g*2BQcz@ZI(N{jL~0M2%~PM>q5iIyopTL@+gqolXLJFWBMM?@mIrt6$B6j9Z?=$g$Mo z0M@W(A#5iLbtq99Mg9Yk?PL+7v40L9qd&u=m`TMfR!k-^n2k|QlQx-264TSP*Dggq z-a@$C(J;79HSL^c(KKZ(AmESLv@Kg0NoxUun)aqfOYUhYKvdHf0mIzWKwh%$X?RpA zh_vmdE^3Em^2vQqLxUtRieii4t*50LsB-BRl5m%R?4~>p&oL2WmVrEN-An4{1*w>H zE3w=(TohRXk#T>|_r0It;;VDs&u}V&xwQAa?q~S+s+=@GMsW6ezqy|QO-5=CcX3H$ zm_I|*Bv8K_enpm4^>B*`^$IriESs0-tj zH_oT7nG@yiEig}_8N?Qp$8k$a33Q<`RpPT?ELGetIPbbWF*)f^@}zD>GCUt#iE!*O znEeY%simAMXOB9F$mlt7`3M7ao>peXeB&9_%G*S@cldM?lUht?w9<0~E(Jyc*N-47 zVgIgPC|l+H#jWsokjB?_2$%7ce?_~z8sguMvIEE%nm{8pqQz3*qT|6SN+D=lJrIIn zK;BotwZ#qf<7qHv)*uS&W{*>r-BXwr<5-W&)mMTHOYE&~#8Uren1v&b!mcU#=6GyD zf?Pa4f^fqXcN6elh1C~(sIk-qh)-BamZ3to;^V$Ko)y5&1h2{7q~wQNV|7pV(2ps5 z|5m+8;73S?S@n3KNz_+S_IfNelv9Cexsu207TZJ>B?H4p^d6LliGgcvzyjx_8}0^w z(Ae!gwq=2aCQ3Q*b_C|!!6)J7jqXJTM+ROpBKMy zxwHItUu@ze_?{P`Fy0!+t5j3megj0bSymnNJP*I*0r>WYA$j%k>~P?UKCoMG<+TxY zPlGY2`9Lx~d`{*rMS@(+osV!&VN}mA1UFvhyC9x=4!Zw~o__=2G{ve|23%`=?xi!X-zYjc{mx*V!y;OF6T#NBx+{s{NAhmS}$q$7a?Jo32*v zM>->AuZHgh2DKki)PA9CF~{86kBFQK_G6F-ZTSgUc^L5hD8E&M?(~(Xf}JNp+bYK` z0EPk2&R+^a!k?M1e&0-fx~C!!SvU6d{W$jH5q%%dFx7XESGr>F`(tvD@2t@G@|f&H zpKaK3{hBw=_sWB-W7riz-;p3o-(Oe1=A(dL^NI0m>gfuE{TggP&gIt}qw2#93?Fz5 ziCwto`HZA{2z>{J==+V}Z_D-lSQH%ko`==yEPId5JFxdMub}5pVdJsoi}e5@F$pW2 zR)5Xi9H~kSJ)Ih8r#GtGA3X{j9ipQFFtM@yTCG9bk2`{#dqC=i6^Cw{=lm1UXmz%5 z<@zm`n045lXK-nC&2rb%MK40{={GNaQan2Sd44A@f*aa7I{kjKtsb|WbjC^-kFSW` z^0srzgs;HkBJmp1?<4H0cHlclBDf;CB!BeCKh=WFqz5>;g8R4RHnx;whVbLVH?9F^ z4)5r~3n<|+KIFp0luIo2q4d11^N5*|1Ps^dw8_yyU)L{LZLM9BbsKy^N0TwNCc|-* zMGGZa_dxI?Rnk61?oj28jksjZMGd<6w$klUY9#7YTWP3ybu|IVw0<(;pHH!ZER!+~ z)FGe?Zj(;85IdGS97&B%qE%Bm=**b+u_Az2!!YV)WQ*hlGWfh3op- z50-;+#4=jqJwV?Xhp}fw{;||AD0FM4$XEFYwU$#E?Xnc7w;W-qQ_Pad1(!nQ=Bzds zXH+e4rg7r8dq(O3O2V2c0=HVFYJ+XKQ3XXw#~OY~+aBSnRLp>MA8}q)l_ZHQdDZ9Q zN}6jrsc~VhVlYFS;i^w%rCi;7ZZ=o>@hu~yVM$e5w!7@_^Va%iIDJ0m;WR66$z$ia6wa}nsrf%;+MFtw5es&jtq1pD4 zG69fm1V^z2O@>^(X|*BDy@+YaG<;(d@zJGX-zg^-qVw^;W_{T$X(h@Dd3L{;jRf6( zlAJ9XAT#!jO-$H7^<$L!?Mj_<_eymO25y_;u1kl*TgrNzSg+j1(2E#2~4UiRsVxWiG-tT~j+1`2R>d{PypEc=Q z&L$L`%^40K0L{D%;ka|Ojob#K_hGc1oJCs09yF^4oc{=64OFaVa+nchRkG|U;*X^B zOwLhECNP+FQkcLr6Wst>b%P?S21REgEWB!P8k!AP4eZ-xE>~F#^xpL(W9gJ>T38EB z&`hb7XvrPxOUNubmwP!d%v>&c$(qX@s}w}A=-Jo{TTPS~B_#H|o-+-8fBjn;MXN!hIV z-Rdv z>R~i)AH~OLg7k_(9UP~aeTvBh26IA`=0?>SViHAk=E&2jGcerzr*|~}aGdJQIgO$- z%345YR!Ejqux!aaog_a|ow)`WMrX)Nmd;$D6hyF?{}-Jx`Q)De zA1Mi1MAVi|!E@5z)fqAH(3z$@TwlbhHx9YycKq;ia@?v_(Ml}W=jO!!OAr#yl{zv4dxi^9_@4p|<I7@Z=*D29$Wh@|(*F_a<<4TdS zkZvE0;W>0$^2X8^qNBRK3owjslanmn&Ql5^R=cT$8i~P)-)FvCp}@;q6%JL3TmteuJdOZV9R5vPd0_5dXAG z8#X!DcA3*I6R^h_H}=M1vfw|e&IBs)R2fvx?NYUtT8gT5UsQYkDSo>QxS-Y?KZLsQ z45RHNZ!d#dbBqPGZi^CmT(L@6omgkZ$0%!Iv=!Z{n5Bxz1O~G#O7l=U$(bZEiQ=iX zd(1-A`Zic`cJmKU6}48z0unhyN@oijX<;m+*5Xg1vt`~;E@Tx|>r3F^8MP)SS!#WT zIDj)ntah?i)Y_6O-=v%8iw4@wyQjGhlLSp7XluSBIX6iYm6{l@=i9AmN!PovOl{i)(mo(5#&7lgBnt+ zLS^auIfav)rI<`$FiWGR<6~lCz+q5wn~t+iRnu|#En+&9v4BKol8DZ&!8WiIA=9yg zv}0TH#-Iw(QPa^2RWYW6oMf4f4@B#9;}S*)Vmg{MpWGt^X|h=(1Thru79<%VL`;W! zLf(@6yb%JhjL~p9dO1ZW6TNKl-W2D>OreGIM+nW7gp3f1Ab01vX);2{Ta!a$#E}ge zox$)zFsdw7jS;ss7`>65sGVF%VR|EkdC}VaP_eqmM@Ep19?&?1G^SlK9mQkfEN`QA;620bR1RsBLtj$X4OYW2uG{VxFZB$ z1-+=3@=%`0_(0Bh;jTt(Zjf7~Mwk7~b)NJLv3Ev){wXp*D0_qFhH<&iO!cJhM{sn2 zV2FBk-ftoy@ax+BZMg&km7Ap z)$Xhj!c{t6?LoczBZN6oO*~Vv`hW$6Br=3ddzjUjj#f+_?F1$h7|g;b%^}3Za08e` z@$8{{U|d|ObE;Ige|CfDwlWrwh)leAyWhxINVjLp{DUodr-hrSZchV-(QR^)rQ6pk z1raMo2-HYR1R!3Br>_2t$*D+?Jwgyo4(fJ!Ke~MzrQ(keBD(z?1SRLxzR&nzK|DG> zcnBfiYkV*{i`008K$nl#)9@T0Oy>CDR-A#H92y^}#ssQxC}y~!)FrCa)R=H5AKhn` z;@q>9S_pkY2d}f|0T=QrMq?)@DiaFLRS>9Yb5!}xRIDOZQz9ER&1yeKD`v4`GJ(O& zi_&b<2_0e*#Z&0e*x+v1Z;lOazfKfd84F0`&2v~fTiC#umbnI}a~#YDAzJdrra(kT z6?!=^j6#!>EQNla6fh`&W^uEKWoaJl+_T38vn9JG5wbO(B9Et=n3Mxi*Sp8KKti&| z1>+@o3oF5l3xJq=TyXG2$V0{jEP8ZYFw`)O0zF@wj{>AEmc2?YX+iy#gEMCuY(!L9 zzTw1!kMZ!wHNZ||0&q56F?fCf1^y7$Tc1ztTtj{I}9i^HW9af z$NaZS@KPnH6aD?42%jv3?QuewY5E;boS}3tQ@Un~U)H^GB%KeH$@tE_Z*qjtT{K+i zGEM)!{&q_EawROY`n2)R4&-X#8g zc`pCHhAajh_#=e{nbe2Ia9%YOy+5PvWCw*g>_JE8NU72xtS0Q6%f`zGK0z&8p%>f zSBhm=V@uBIBzcMI3P(0ZSI9?}u6)9g3g%ZSQ26ztD<+@Ze*JZ4fLpjnl%Zw!K!)_| zU0w10di037{Q62Hhwr0R(3%IVpkMzgMCsjs@V))}j^lIs_fI1D`}_Ct+(btpxL^OC zoo{eh`^ZUDu(>sA3#GcEzI{BZ**75#Y+)A#Z9P~h-fzGnO<=Sw&(uM)^B9diDp5_m zb~H0?ai~B3+k8>YJ_lH!)uUKMHk#cLrFRT5{fGQKI~eloA04H7oxD=?TA2#y^>b`) z*vA6(VU9r?_&B1VU6QXppBvTdCSVx7CcjyFeXCLsal)skHd-d1+0WuUMNA! z7I9-@R05>rdEI&@C^hAeAe4W%Q~_fX=Js1kp~Xgk?QTEiYJRwd4| z@vkaY9tX}Cb^_U`C!MKKc8@D&p<*(D!OUu1cM=n;Tn5EcWYi!3>GJF+u-WwIpS?^J zSs4o`@?TG5>BOglGLx~8GC!cvk~g;PfoxQn)4(vwOir?td6H0=DPo0NPZhK*%_p}{ zUn2>cM9|j!Hp&M+y{K-;ry!B`>L$3MbV@R{L7&PI)PZnj#cHosd zt$Qh9nS0-i2j5C;E5(`j#wMl-wUs|AtrCV?$$AlI_|mB9z(9kq(*{q(tOlPoWq!Wq z*{8i0C#gf3o-CQ_lhVT;Y?Mq>9J_?DhAY-^vWE3Id`vUWvM)O+W|?9#fx%=og=ns? zgfKEmViF~HgCBT|Hu$CErNJv>fd>B@c=%}W!iIThIoNyMrqPnOK8zi0@O8j24W682 zHTd(Cf`}C!d>;y7HBkU83yn*=S=@77@M9!FiwN4XDRz4BOy?oX484T-4c<*YNRqc| zMYOg7OEc(m4#dD7GxRF)fC%2JfC#j3@ZgJ>p`9!zhvLE4L+;L=b>hL-=knk?qLuv{ z60}nD5RNXQE2=TKG1^W}Mh@sAR;tFh_Q4y;T|q1i_JGCOP1x7gN1dFrgivK;JX2Xu z9Jottav>!dDP$@e882RY?Fb--z4q=SRY(4Gk?4rB5)jab(rM7UAvd88;qM#+Shi%F zuB?wL$0e9PQst;@%+idrxi$dLNCe*0g_=UE)f{sBH6u6~1=oiXvuqJB94@A#X88U( zC(&}u!*3L;)T#n+%>&l|`~Lc~N9FX_A4Kr?_t&qMp3gO{`xV; zQitZw5X*;(iaP7VqkhhgNB||Cqe|QtRpM4wyj?j18Z6V;=@k2?km@}|wXv_A1@)Xk z^^2L!_L)4nPyT!O<6l8=;r%zOXd1rH{+oxgNZpMP=;rSJ{CZ6n zexHW<^|(L39#ztA(U`f-s-&~Bl(ZB1}}?HozEGeu_bv_(lz0r}i+_MZj2NkoVX zl4+CSW{XOO+-wQ)dtG-+%@dNmg;F+dHV|{W*=x>o-E0aw>Sn(Le+4xbvD_EoaRWTo8^)*g5@;<<`)$Hj zyr2`fcnqQnT>E#3uRYo;waF!J)e|$8dQYO{8WeQElIN86l|n==VE2hU<71AG$F6FF zkS2gPzC2+qkQ9MDiSgz5*cG^fH|=K*Pm4+71zpG>t}g(1#lAFvXU!7~I|;vtIfRRI zl491d64Nb23AMZ7h{l@_GS-D%WXrT>v&Mr|sxS!%CSs`_g_sOB5yo;rKjKkKu%10E z<~-2mc)KUEC5UShNgnCQ?!rDa%JFHroNh#0`!v=FT*=mEDZ^JhyEnc<&nB6ZJ2)37 z)#iC+zd<(cdkM&ndK61dM<8DAK=lYEka1CBsY!@)Yhw-tVTle_;RO(8hgw+cB5Q3V zSPvTN$6*}J2jhP@8K@&YhGXmmPvw}ZTv>JZV~DB7I}Qgt64mB+4}w%ql*m zy%AC7wFbA+W2vKkOjkWifv(0vQbdf!dr;4pVyUhC%u(%35-*s645G&3LeVs1$L9%# zogA(?go{%FO!mxBB2o(~!(oiFv{SY!=j65$TJr5Azsh-N>f|2s$?B%pi|SP_RQqu4 z!Z9P3S3EzE6^L4gWjb?%p@5_T2T>C5U*G0prffHflQJD*Nj_) zkCB;)2f}Ubs9j{)O2+Bl6 zK+|T`@R64#c;w}yiKm&)-K*L74@m5|tW544!usF~FK>KXj2B)m1fLkJlMdg|`)JYv zJwF0#O;0~qvN};;Eg8<@9E_GAvyefY>!5hB4`oUEilW9j3=hadv zf-F=b)5A5>;;1xERZ<-RQY)jRrYotH!~v;Qr;2H4#?PqUo`{mdG%$*$VY_a&6x>gj z!!A6vKWa9&UHAi|HfI+qQTsu=a27bqWfyi*LjBu?K|%;d!@eFjfo7cTLCCWUo2tWh zVHV3DQT8SgO;VzKyU-Yw;LilDbp|NFF0@1AVHf7)p!X9Z;s7n8*wmx)9U{(%(4hlL z0=QejAVXn=4`Ua)a@vK1QK+C@sE;bdQgLQvs}Sr$J+lZZ=>kN+?1kFo?SmQh?Q=eOP`9?ZYBAYT23RtOeU>yUkJZN%U1{yM+|2X}ft*oFkNc zF>%n~t`pjN^e0#sB_w4R?6=Z07E`KlnN0#up6BukV#VL zL`J0xAI5I<>;$gdywVL@V+ARAj6ajW)2D1q2>Ow@LRW%|!>`zqu{X6W%)%&v2 zHft}235u~7ad6tNKe78>B$n+@?2mA@7gPSbd3z!2hQX2*!eqN8Gxnl1T93D(Bcgdm z>5aYk<4Iy3+M<}Jmk_hHnDR5tt1OB$TB(&12hD4&lIn^YsWr5Lc0t}-o_VO34Qbg2 zpWh&+*_dN8t%+v(jxg3zWHNpKMB%?Digv$}lD%t&)PyL`hsr-s*^~eKKng>RHvEk0 z?dQl8{sg1G%bysw2WmFAKYXKsILc)gAW0JH-!42Og#2;Fb7KN_ z;R*Z*+J%KIf5a}tMaoHu^6f%AD#4Fc%JV=0cHuf`JnTY84th(8h<%!f%EP~?e4p^? zz%F!=051A6$WU0}!`Owq9>iviGyXX`Y!_rhZLoQqDOs3(%5$5UMKJm?M5Fc0LU?X} zV*fY3!#*@f?QYgS+$Pmx>_aIy?$w_Td+6H*X(gXK=8D@2cgK5*qtZ z7p>3tV8CdGAq8U}rdCS(S{TJ_QcK>&nFoA2urqnA4NQO=WK^~AVeCv% zPCIiAZqE#kTgs!_Fj}%OcOuPgBagvW_yZIec7`|bg#C{fU;7UG^17IW&Dxhg3W~8W z9mj9RzI?J%Zu|02gsXkY|7`R2g$KGrC3^z}cS~mM%dBWUKD(#1s1B6g*p~yqW!##b zM`|Ny_n-;lK|)ZfvR~k3Vz^7Nt;&`Kttx$VDG&bunwLg!iiodG_t}kI`FVNi4v0Ga z;)pgpJ6#bQJgo+=f&kaNKf;GZ_~?kCppTAVLOU~!FO4viY53F#zvbq89=>?OdK}tR z;0f#4e)UX%I}!0*qBWb3ob5LnWwV=x^KiU9>hwMK#Qo^zd^)yGq!;x;815dP$o%4z zy(zkV_C=Zz%V`&QJ*sV$kQ64Ot9rlCYq{Nr_1bku87$og#i5%>u{0e^9U!)*po3BN zahOXF&x^$*dTY6ka{C>@b7WF%J($7u0$uV99)>8gn6HfI$L+D0^n3#^Gm@{0WEYM$ z$(z5U%U-;-5XW;_O0??P z7u!)-vWLf;eC<^|tLsnCpk!50HLy{EBXqcaUX*?e4$>@&r_z=P_~4%Uo{?&YbZfk z{L8c3;h0p}mH(rf(KEsovz0mYEZ(~|q-T6hI-(4zi%q7gI^UsZqujhaMN=Wo+8Sj_ z|2smvVOJ{J&9KUHuGwy?p|I>xQ+J8z+2-uobaAHlITdNtp7C0LTq7)U->f~`8A+mg zRtai&$FFba+No|W*E zmWym*&xYey4tu8iMnVld6|`rEDrKU1DtLz!}B0Zy| zm{mm0x(AVNZhN-tDBqqnOTT&4o;yYQ&CS`f-=XT!Z+<-3*s~c~9VYD=?_Q4Dv$;qT z(lgx7b(Sbn!3;*_uxBR(^z2#@R@j~`f%64Dt051$^z6Ir*>t9lY8t1vVh!IGMC@5D zGJ&$y$)~AaitX7m_N8Ha#vxa~_N;lUpgkK#7xI6$XAZohEwN{i@(5%X)w3sMEK|8< z{pL(JFHg}_NW}yDw`Y(2SnZkmYh&(p{Wbla|Csw5&tTJupljE??hg>XyPlAdM?f_E zcc|cuEM&^>W%tW(G9kZP@rbC>{nz6zUH!f0negw{m)+kOjMUQSL-k9)tBw=KCJZH2 z?TO+4g*SNq3bFuJp22dK9K(^(sOrad&2wgrWSJY78hV+mr47!ek5D4?L*Ln1=<`_? zVmTKn{mtvZ&f!T^@? zF<8z+M~i$8CH=_(`Uy)s(5PO1IhZQ-2?s><2He}(K149Kh|x9ppi^QfT%>AG}2oaNG?4}y(uRL3Z}8zkK;QMN8I`JGOpK|HaY^({aX2T(TA-8=Ba* z=3JRZd;8^|0`1Lis_lo~*16~rIY4h8(z_Y$(AWQ_@_y*e-A4G+tM*h%=@udf=)K+u zdiOpUqF2%nz0-2hBXWS=uSxH*KL+TfGu;`?XnXwP_f}W{3;sSa^82&^esmIPF&zlZ zY;3l5HynWZ4{kg;^$7Pp^eCNY4V}$ep@th}IN$4R<Wug)nUw0Uw5(v>5PpF9E z#1?}~j|%)a&;8+?;9?VJj2Ar2P3r6)L>NK@p}nn(z;rvf>G&fLKQP(jltN(65AcUS z_TrD-mGcb*LA3Sq?R7q-U{>+Z`~34S6a07n=)@mBsosIF@^Z_e z24eP8j#N^Q689ngc>sT?_V@9}-T1@Qx65~#^GgVizi(vZBnGNUcZNcsjQE8~aE?hZ z!X&6-g3*BleLvC!!%c!xlOWF|C}x74H32<+Cu0(95`<;W3X|g+{B}NJptXozhI79` zd>dgb$lr({^}mD=>t6)H;!lM{RVot>0p!;H!Pz5#vxCAM zRyi*!j^NK%Si&DwxQk#4@|N5LcFsZ{|x4z?fEBzxEE^SaXRM{E7cZ{jn?Oy)AH%A zs%L+-#(58gbbg~$gG+YWo`w1NOM9!XyQ$r!{sIU~A_7gbLMeL@+sU`l&3QIS#QSrwy{BRAQ zL-=Epuh7@N=W`Av3=OyRhrGd5 z7nv>9Oxg(Vt*}V4NZ}%e4+8AGz)IQxS+K7EsucPdYvd#FKPA4`S#G|sP_;UoI!!&9Jquxg-j?i=;gy8%P2@&`&6S=GT{W^c^OIc37 z5+6t6|5f=)p}IxptDOJf>-=5_EOXu>0til*-z%IY@_U(cbAaGHhFOb*;6n-v!C8RG z`3y;uaT@V;Duqz5bCU@?!elheWbgxgnP@NbJwxz%o$ciJ3TL3;^*YBQoT~Q4f%;vI zFPIRK{afK8R_o48Z^`@h&ekT)Cap?||3YC&w@P7A(RTvjcPc#zPn!I0(lAl2Clpt# z+l40nXBsa}`80(kJ_%gsmOw^_OM+F-We8)zf2nb8)aBhYp|q`&CBD}=LF2{D9<6Xa zaSsOUeDp&SS>x=(AH^CwoE7~VA|QrBilj|pu|7{JEI1BDYdpYr74Bo&PK70HgW`+0 z7HYV(sK*o*u4;k9su`!abI4r+urn=yMEN-P2Fu5Qlm-ZEBN;GJ1B6B?11MkCCy5>d zdd|8bKGz3o2Ez583QNiUrZ`f9#R^Ln3kXBIKPORqAI5KT{+?zcrChGCRDL^P*3t9$ zIyXq-WzLg|AesJ&aC(LFYZl!e_3+nw zYVm_Dy_i4FkRQFyDT2}K3`Af8Ieh&CBK0~)Aly03AbpHL^0%+~ZS<#;wYV)2UlxQl z&LXBZe)3vY&8K7-c|N2O(tcMm%=E2Bf5ajh{gDD2{i$URa}-}B{(GU<>)eAcC7O_o zd!5mw4@uuF@hhA&Q5^oh!u%d>ejmrOk7b}S$)^ZH#`yse^eP{x_W&in2494UPJE!S zG@0Esy;R+H3eO>8+F~}6cQm||@G`)xs*Hv$ByOw14&f&xeuZ-h z)1$BtFb;KeJqw!|xZ}}XW1gqm8OLInHM57R`}=*r*wnohu3-S*!pG%8W0ms*(4fHf zQeH&JrwP4`a}ek`Czv05iX?kQf1ql|eg#!K5OL1_24N5Wcx!+BS;jxxN^q~U7(bZ7 zv+`Z#{1sm?A#LMfh09o|-zwZj_;1Y1RK#M1rQiz`mb8~CzVxNHDDz?s{u(IAZGp5m zDlGI53-EA2Am7~-?jaA`DlBbsU!^IPe7eH(h_jV}Dd8V0EEWE_rWGr4fQF0V{*o|c z@s|=QUpXuLeT|n&d0SzrgxeKI%crn}U!!m-^B=FU$nRW*Ye@4{g(nc_1cjwc*C~Bb z(90AS{CyQi6w#DVTI5y?M{C)*AHOS{qnQS+_H+DZogRqa&VTU3d6yVwVxd#v8s@%4 z;V!~`DwQH~_-Q~w&59$nNBsdEk$Q*1BI)M>Y3C^{+&-kR&>t7zA)&BHte!Y1#WZ-zqi{XpHK5@ftQpiY z;5`QHp#kF<@HztuHDEXco?}2S{(xRF171ZW^L|N@B=4sc7Tzw`{DhC6D=gt>C@exd zN#WrX_^}EXlJ<`jor;#r$oLdUZYQ%>uyAll=3ThR$cG z(H|+oJq(8k$!ep*Qq1F&B`Ml)g(ZB5!i8kDSm6@F+bi5bI4=;ses7^K;w@GBLUV7E zFY!Bp!>a30+?BKr&nYZb+oPftaXzHs_Zo%A5gx6ur0v@iiuVQnpzwltPylf+Lx7nqQyfY3 zlERYV(+cMiXRg9hxPPmVrA@sFxGKG*`|2ii*yHerKTf{~KTcuDi6-PygfQkw^Bp=< ze;;nX$C~d+_{P&Obx$zAZ!q7b=DXZ{ce_{94>RA3%=b0(ZNZmx3Jv}vhL86U-u>j= zns}%C^m~c<*5TW|%KTVwzDo?nL9-P3X!9LxzMbZ~-sJEr^ZPb@{qsv_#nT^EjIB63 zZl`*%6jgCn#hGWsr+n(j-lEh5E@h=tW&BtLjZ3h3I=yHdepvwKI}&Tp!-vmD-*`Hg zV>FF@)Lysk3Y-_?>AFH}Mw`i#rS_~YE`f1xRVJ_H_W2cvEW$Rc;UJKacahiPUuFuQ zlU=(ue_dt@H+tr;Bg9u@72>P03h|97g^E3BZx1T*pdlVq>OqHi&~Ohr(u2x8=r|85 z2Q+BiAn-C8P~N(AfZ`sM@SqwGI@5#3dC++tRO>+(dC&w8y269%JZPc^P4=LhJ*eJ; z8a$}cgQk1X3=g`;gJyZq?>(s5gB~J;0%xYo@u0^%Xr2c>A_dgxg}vp4t@Og)^}@Qmun)a3#|!(^3tR6&-+0g_?O%;Fm~tHSTaf2%_Z27^UMIi&td}uwg{|(@)uj!RPe!W zG~}zCR(#PdKZ`wz*nDx+yNtVpVRmadmLnZ&55Tw~2yKE}oMVHrTwBidn7sD90k{`R zn&im&Y?64YoOiBT^Vxui9Jik@L>c3@UcZ!)50l0-xAh@HKzCboUVeC6bZqc#6iend z3WAsjpd|2NkquAS@J!nf*zj!jH*3Rx7F(fMtRLRKE2UA+?MW!~*~MD;=XVub1zI#3 zp$!>3NKqs_VU<>=K8rQHUdq&KocLg-{+z;0FLr(m8n{%TmTR}zRE0N;5W%YjoEqe! z7ah6RJj%-*OjGNdI%x%5ZP%7D0z6MX&%Rb^hv!lME{)IfY znON#Dq3fP;y%E|7$Az8Wx_ImmG`gqfpeb=Xe0qK%a|`YW`s8Hpp;Jon^fY2ZhY)C_ zn;5xf;*jX(ia$xb9Dj}5b@|TjV7&@Eob&<>-2w*PuZ8YcWXug3eSO>m28HjUN^Xg^ z+mo}dDLF_ojwpQ`TE7({p!Gef^^yg({=KSPT0d(C(fax=)_P*brj09uRBWpmKl;^r zMq=j>vW5J}hG>1mhNF1JD`MJ6L$`E#dqIX0+ir5G%eWhv;)fJ=o~N)yQH6cuq>#cA zGnRS^fkxmt6?VGB>uXz7*zwy@VLye!V%s9NK4XJYh1ryeYqmldt<3Xm_1$)13Oj8Z}Vdf_Z>ks{w$VSPEwH_onMY&QCdpLyd_z3 zAD5@bv_51qWPP4x8qsD+wi#`fwbl$AvHTe*5^n7L6lPWqZS`^RM}F<2CMS2zbJc@p4JGsr(m58pP}O@%k_E)pzx z01HMp4@X;}q9v>sT+{mhaQE)vRTWqNcmhOYz3f=AM6H@CY7j5rr6dI;k=r?PqEWoi zdW%&t-YQk1QB(rS(QLO{V|loeV^z1$M2Wt$(h+RYu2n;vu4eG4N#i?!iZM_)#|sB0czdeV7vVygRDh$ z?84BZa38C>seeJ+opP7y0^mogVrSrB4JOU4rfGWisQ%UiCrj?jbw*o?TWEp?Lq&Oh zLwy6K6z#5doGFE3g4oDvl55qxjxb{?X?IjS1(4$EQG<`NYTRMK0CO=?wO)fqe$G8vZU}!YxdS6;b~ilrM3h~S{92?j`I!b{CCd>O%(ejEXSoAUO4u-PyF(@R0e-vz zNZAOY%3}7qtt8M8O#S(c`d|#O;;BIq{RP7)`i`Udqlib->eYe%8t@(RGMhMIF|Ti_ zzXY+Ktg}zxd_$`o-5=QXq(XfIrAGPpdjwGj!IJF@M6r<$Nw;eLbq4d?qZqFSh-n7u zM5!|UzN%Nq?`j5jFumUlj0B6Icn!J~5KcA8XETVql>_u85cJJZ0?kS42x&f^5;iRk zvEdfbPfF9h9k*bM2861!eKf06Bp9_Sg40X*8wI9XB#y#g+@Gw(CA@hV^6(Vz*nN~1 zaq4(>-CQ}sb0bE8bbJQ>TB-(cc#c1Pemc_^o{`81!c&AA;H(-5cvRnBrcKDnmD^mP z)5$XYWZk2r41`*`FVLcAI^%B4&X+);f{+4DNJCVT7fLcsr+$(`C$8L=a~(F~&-F`x zzagIs*6Hh{cOVs-d?#9GbJ4zH%9@=cwX&%YzY9#ud!G~4!IHl>aR#BKLMWeNuM%1V z_Y*otihz~rqU1B$7I>L&uR<*5+cDcHqy=T*pjwyd>nL<9M(f3^zp?x_IFE4Fy$M+Q z#`wL8Y!Bb6f*nF+i7f zXht|$yjKqrYK4SACpT0lQ4Dvg43zLAZO5Tke{d)!ZoG?vM8kK)os}%N8q*I$8TZ~! zozdKAM(>DaKt0}SsUnofLRL57TZ%g6%TAV>bdkY99a3+za!y>wg=$?Utf76f-Mahm z76c*c^cUbxwer_)w|>z;q}!~^Uc~ns_-~zRo&WMw>!g3*I#gBxkn|-4cTcr0XhXp9 zt=p{CU+o2;Z?{=f)=#xAT!+|IK)V0z?twsh@+zyk@+iMz^h!eEz2ufPdln<`;1boM zovbwUYoVYH=*5Y}m@jzsQoWd*7--#l050sIpP~@(Q9w3JEJeXAZQOp7@}fsUxU8=g zj=AZTejoYSlbK%2tMh=CK!@p#bf&L5-w8;QkP8q&RxE#bw`&*EUUXA2p z5n@<3{z8jqO8*4l@vs7p_y}hu_hklnPX?I1K8p53Y7XZuG0ZQ)D6FqFp`@6jHL$?; zoK`qkW5bGEOahYg82|(6-nHA*eZcL_M!amTWDLs3blH9@plXYwkA=1j(7=l{yDiK9 zNa7(4l_i@I{$%(?v9@qFZlig?RMjvczEfEo^5E;W-=@g$tG~T4WL%?c@8BVVKc_GWIrx922>m>O6;oWX(P1N z6Qsbr^X`vJzdV73H$t>bOy}UuG1_BcSDM0{M$Agoyfl3;>w)(O3{fs#Mq}2^)dXj^ z+73O14Cd+#+AzduqEG_T$S%~a7;9#q%iyVYt-w0c+WP2vFq2Xm;ZSxHCY*o+VLk#nbmTp!`yp*if zJd)WY7*R#h0N4EXaX65gt{l<%jP5o3`xi>Uo|~9C{5unQ0ho<8glmbw>A@OaEbbGR zt#NT^C%ATW6PHm{f)Z6;24NUK8~`g8vqJ#AnDn<3pR>FXPI{=Rhmx39b>q>N95#Nv zO3k4g#7GvDnyd742;NoCOpxG>liljuD4@Drd z71h9+`64trSQVMG%6h!1W|ftEijB}r1hHT8*$3U25I)^btmx~Z-o_8#!9gqwp^5q% z<#jhNemo@8=>>|(u|@=kYSb8P@h6N|3X~&nz``(+4;41{e>$OH#@~y^LIf64LuUMy zlvk!DXBJ;`VzB2Bu4sfZ@l7EF|C#Sn?%jgSgV;^Y}V>lR-rv- zKC=P?{6gHG2KLO!=OYDH8Dq|Gn20Njp1vf+PHj%UFr7bMdtnm1Pa=7Q@9ytIIGAf7 zfF309wmz!?uap4ZLv1BDLLm6k0-jSNaFEEqGs#%p#B^8}nHvFf4j#`q$|=@)}_zGGXrsb)OA{u=Qif7(Xl)g@{}LYFO+){Gk^o zA`gE|ksOvWJ1(pq9i7??(-g2!v?e#?Kifb;JBMe*D}CiV-d@gX!(dGw{O}vnW+;3v zJ9)^>1kI0MBk>|UME=Tl^c!4-mF^GXfR+pu(=L6tn+-{uI^Mp2Ftb8GT7{mCouOn2 zmD+$@5}eiQl@X(j(sOcZz2+*JmzyCJRDtvwMtypk!SC*Kg*+Me`+DHP`4Cqt`Q`B} z*_<5FE)GcvNyIvd{%s>*F@>pRv>qY&xaSeVT~NgpFuPtuaG6|2HX`0EW~R@o>j4j~ zJD;q?4iOxx&1g41TA&s*YA6D0sPGc5C3#o3L3OXp0RRv(1wp~RR#9W#XavTy%-h>u zNVggYAwU;xxagBOU=lL5Y^SbjR*aw9KS-T*2yM_DmbBMT^KIYtHK8nl>wWJ#ZjFQ9-ly`0ApX9q7(# zR$IyQC0k`3h;A=1Dma}vI(o$#@9#-9IAj=GsqXC16?5>Iuv5}W$g&mY!htou%;svQ z%ur`Sa!s=!thY?!vu5RS%uVx!tt{FQek<|1-p~-%bjt99vao`UjE!Ra@Fu^(r3Z>! zrr}JC+_W3Xju>A5qtPl;k#5!zcjac_RgV9Wc0(!k8%?abDJQ*C(K6dxv*M$CX3mc` zF`LIWbdD4T4#a_6%V~h2t;eE>Znfe#-I-^gWJD8W`y&E{cm0OWBP0;@8e?4pkc5l( zIL!NlQC^La`0M8*zJVeeZ3Batg?TzoCSsq^oW^3Xk$?Ji!X0@`*P+h(HyJ%g(-7#n z|H(Y$b;7w+8-A-@CE+PBs3H33+vCg@55;+lC0bnk4&W9>>+==hQWIyR&Lkk$V)~3QXRtDzIhuliIJf@96X&LoIM0`G zigRmJoRf$`cpHj7*A&ReoiEZC62uqj40dwPBxKOHNdH)%BO)z{LL!|fU1@KTem#$w z1w@*FeT(!lU+hk#YXpf|gqhSzQbR&Ih4Jp#O=(&7${qqc24Ilj+R2-CE5tjGlsbRg z2r-)%)Cux~Q9y$WCIRKtQwu;LkqzibYA3%9q1+VWH`mnkAiJE8#$ayjw?kh-Dw!K* zU>Y(o5{WrQIB0M#bI|LT3Fn&Z#E^Cz>^jG~_f1Q3&VekgEX{x)^qERl!^lK;M!W7x zM54^DG~PsWWCK=xv)4DMa&J!=N1J?6@hMw(1hdu!a4Io<4U6`?U^EZoW=*>_7mcMX zr=+l}qAaxrRHe@Y)2Mif-RmKw1dzlXylaG>6+1{5f<=vvED7-Lp9 zG5dSvY!B+5nTZ3MDx<-W&iXD&C5=QTEf0M(eSyPu8J3;lJ%iHcTQ< zFF;e#3N+9#ryrJv!$v)7i-R1!beq&j`77v1oy;;2iXObC-1G~Kb#Z(;lz_F;6JQQWXRI`cg zLO&}Mo?LA?gk_XfnWcMQYjIhXZAiV+0q!}n31E&k0%bd5!V}tgl3Zkz_C*&AlL6J1 z{t0J3m%Kq3(|D_*U0)(Vn4cCgO#V4i#Bw2wccrn#yyfnDj?#Ff8Ksk1$wUCysp7su zUpD+MLyae#F+4<8vk5K23^9E9{wd?l;ZD~n1^Fe;ahQ}B!H0tD66=+2zC9I-%J)OL z*xMR;{@|EZbNgOI)xn9nf5h-QM8}=e2FD=9mv~cDD>)ItF?T}DtQXUsXGpq^WV|yW zMx(U)SYRByD}22o*T?*yLC6>fWbOZ?Pp>&V|A>wYXkF|P^T zfQ<6a5z5;`FgbuROXGx#<25ZTN{WJ|Ki^1LAWhtyD6x{Qs2V<;kzeB8Ft~sx%Z`e+ z#9Z(_?u;9Zmk+RJc)+LZe0}Zh*Uo1`1T0{exUg}ZfZhS3@Caa;fbgj=B3k>2^x_m@ z^b2KXcu(HyrjqHm!`qOq#GXDlJKct)kp%fP5ilUDf!JUbo+GMYG}-8_%)?3xp)$9r z)th$#TGq)y!n8e*&;~Y@*f$K$Pm@p;t*RR|2}PO&*#ZudP^?LqLeg~4LK4P+gs7lf z8j&<3>~?UdfEfp!bg$|2{VhRAC^DRdET_M;|oZfRrjRUtk#f+?cyK zqaXMaSrET1)M_AX>s=I3uk(wPYqr0#s-%Bq{6+BvF z+T&x$N;n^^yAc{hn5B;f#hhBvV|eQVx@>rqcbSqv+gJW(0C^kM8gD}-$WR8Okw90q z6v1(OJVbR&6yxtD#mRl~zJptErW>|D;!Yor#ml4#U1haw%W_VugZ#mdr?32W_FIi- zlkoj%Z@b2qv>WtJUlCRhu%w^1<8KZjPPLbCV(w!*U9i;)8oZatwGz9YiA^_atb5mN z;4V^s0)ykS4$xdp=uB^>--ohk#a!3Ow;Y>j{FG|%E@-;&p3b9y+vA>2g#ci~$2{sX z3e<;+2d>&zA!`?`RU93G38X7!psdlR>dM_Ma%Z zEoT<}8b2E8*e_tjT3v-^<#iio8$>jHUkdwUJXG*q3VVS(X0Tg@T`SSDmCExl(3A&% zwhL8*o(ja&v5j-75nJf)`RJKs(NixaiWFk)vKScR_W65M*#~MOcBisGqmfOgUQ{+H zsQ+|VPwE`BD>J^c-3)0e$(T5LX9WASr^>3K+YQTy>L#!i`wsMDj4~g9vR%s**u`af zD|rJKlX&V#pF9a^%!@%n97kS~8?2f&EH$>;*@mzO1VXTQEF)=fg2rj_r1v=c5wby} z$}j?ihDT^CnFVbAD$jN-isG*i`E;W7ui zCUcb7^gYO5c5zcjd_-xl!Z41iX&Q?gRH#=1>u#oURXpZ^wIM)gRj8*`_I!q;F=xnj z{aisjr&bULH0Taa_RK`VEseoQeZr9=9pG3aMHozCaWvV1P5p(Eun6NgQ=)iRCp7!( zQI%3FaMDF2?2|)Y49D zD6rg*P4k5N9D84RL)^FFDs3=O@cW^FaHvSSX=kN$z;-=TQ{9?P*Xq@oR_&C1xIw

x~9h~-Kp!|qs;SJj}vk6IN{F3R6DHk+x91pi!e)X^A`ZJQDWe21!Zwiv*guR z^k00{uKqZimav}tH#j7nKJCjLS;LXjsS5?xAWaV?H|gWBCK2xbc0or5!p2Ga~cih}FCl(Jzz=~iYzkrS zjAJV@;!ikFHi0cT9!+La_)X2k+^om0=8{HvPQ3by{OBw@8`%1KQE&6UB#$rjo_r6M z9Fmt}b|d{4!v9#w>tx7n4j+A?p9l|LfT5q8 z0&@BQIbs{({=wW1@H@ieh0ZyREVLOjLr-G=ncLew&$@pga;6A1@?LH(TQ%7z5t2+n z7#p2;k(75z^`V`R2KjGdaI?<4%3H-XyMi(}4M9$qyq3xC0v3dhN59Z5I}PD+=6C^H zAW>KrI(MT?-pWkn`Q`he#+FA6j=rN)*n-obuz?=R^#f^13x*MnCo407FZKD0kcd6p zcYK10D9(Gj?|2)4WdT9+*e%NlLs1g}i0u~45FG)RoMG_TKuIg%=UDBw8s zFs5I}E_nl3hy!~FBi-hLXmFaL4xpHs%dPs3W9Xu(Zq!IES6rbQ683B1>zwL1unw&p z>rU+yCViBH#ZN&%&ksXn!iVGwSHm@9(nk6NQiyyj|12=5lfEeG6FC%(>#?dWMs zea1nRUKjCI;4nby<|2cpAgA{iB&ILCmm{pyRTxTg{H6y!*3zA0Qs2tv`s>vnWs0JI zJr&#Mb&j2I;&@6^svfZ%&SAuii+Di60Ryf#%sdOX^tlqroozV-uoMf*8EV8^6t)g! z!Nqh!Oc?-tS_xFuvNt7;+vVy*)TEq-VW#M~YV`iM?s3AF3sU!y|IWfD)P{7j>Tm10 z{^c(^riU#9XZ{ef%Mp;;g3R=MhiUSxMUxw;@IY@GeE*sIs=Yv{x9+{X7AZMG$pa`J z@(Mr~6?D)15f*repHS+75dNP4|MQ|ydyjoAB->X#?!z;KpP)P(xRJJD>Z8}BJaZ8c z$P&qPc^Iya@Y6F*4Eb&9vphfb;ULt^DkdksBAviNKyJ}MQOiHJdGj6OjRP+>x%wQ# z>2kb<1e#x*`T>8OGucwo?aSo(G9WhFO4D-Ug>P9k?{MPdW}SQq4Z+A}K?3_oF&m|L z16N@;pH~-iXJjXA9B~Q{1-JF5a+;y{k1!C-jb0wzvNr*4k^Q6m^$@%2HTm(v52l|< zAjAhBp9Zu@B_(dU`_j*l*C~Ni z$I{R9)jCXM|A+9a6W_p3Hhul^Lt_|UBZQ#a;qplK)653$Je}P%X0cJ?I@EfBZBZ?B zjmR%EaSPz8XLW?g=AQyUk7&P??mq38Qs1rp6nb?t{IGbT&O%{4eh z8tMN~htH?~4f>JpWw@%DKdbOV#xD`KNBtTpaZ6GCg^@3UVl`99?@%w8V%#Jy?4J_i zOr4RG{ZqHUuMZhCfY=>@xH{CrCQfpW&*Hi2i;2cqv}jeo3*Gkxl$r#-By}c^*H2-9(H} zGN4Hq(xK)CA`&KxahepV_q#(@Z-M|Fb zFu?v$T@IJL93+Xll%*3%f%zY#?5_%?(C?Z$k>(}Z-#|)l zl=v<+K7jZ30Nxw5zlpF{z|Ybj0$5hpfjW)EWtp&VU)C^Rirb<71y?EjfY2Nw`LQ5@ z{Z}29&9H0Uw;T>Uol=OFb&?dQ~rOhjQSu)sB@!Eoz2*x2RM3?JS>90TiMgsXC7`#{XW2=h1(*!1kznbeP2d%;bGD!dRx`<-0}w zQocLXp(g%36aNDff2N7wsmsU|v`a`-y^sJ?v=!B(9~fN#J_HOR2_ zIe}QO{w!{b`jC0B{lScBRegY;K>9y|2&CUc zTKem}B>f9Iy_EQAN#CXBF${&e{zanbQ45-N;hr|`Jhf(nxx_varAblRXKO$V}pK`fDc-f)W8BDVUQ-@kA-!1BI#Pke*T184WsxrxAy_#Ym z>lo=U&>cOUd5w{xb*K>tQ#T`)`CKP1?BByB#fO^I=gD`AIt#8E&z~}rPd7=X{(`F} znZU7-4O9Oe>PJj#2+D3tU9BdIO`FtJH>jK?i*dv$8v z1Tn6XIwPjt7md5YxE~ugBd+6rB<@;FzMC-rO*Na~9dJ?Ksh7{vV4LCc`{_yiUYlAe z?u)5M6EOVQh#v-CjX)Gau@I|F*yIu&N7suxeq!4H*MNWI**cS(;bQVvgT6H*L+pQE z5Vyaj-B0(@s<{1Lton;`So{3_wA>6QgN8kC$9g1YeLp$oU~I^iWXJ3eU>sfB?;OkX zgp0Yv&5eI|Y#Gk8Vq2tDn;xV&Pfoa6?vZ2fG^uzGm$oxtG|``D9t7-h=K>(Q12aj? zyaQ*!w}HZhJGm(4oCT~KLr11wLtNaBwVNF?AK$YG2QJ_nn-|yXvD|kvYi#L@w6h53 z+<^`2$|fDz9V=Wv{j#N?hnF}tf5P+3inC%@u8vvH?Tvea+4kyK;a3$OaP4E8b$nC2 z@V$z17+T~GC@^H0qB5827t6(9;-zcqa05JRO6=f_&24Hph3*9Wa27RK+>PqJH3%GJ z0xh^J%YZ?Bx(c#MSsuSB^=mGGFaFZ}89VhcTQb&}GAibbt!&0((M(iM`gmvEnzUce z1FXmg`qZM8-g5kS^h&?T1q8$2&}fi6CJ#eCv6x-jE_+x0`d!6Lp&SY* zVV5iQ<=^D+`hnmtnJ(*NSc^WqhS{|MNTQ@(a zPFanbIuSoXj_URVd%85Fn{c0;2E3DF_SfKGB{(?YMr>2rPx20|Zd!+8F%B*0gFg(E zt*4?A`!k+%VB?D0JW1zf{WpMltcGVH${2`N(p?;GVEAMUqcL1~y(X4kBa_3Te_=y& z@A{;;Wqdo?V?^7#{S!6zQUP!*z5x0YwusQeNs4hU8h;DcbqJO$Cj*1)G;Br*cSa6Q z)ng%vQ>O5W&~FeQcgOQYK1cZ(D+xsmrY));LMEXTqw7EC5ifArBV+DG*`X{?)+iw< zlfk2~ak708hXO+(lsMztnT&78TZ{CyZ+GI}+>!*B#}&MUn@_87Q`fusw5k{%$yK1v zyZN-r+8_B$^kj* zDX0Rd{BhDBik#~F{mP6}eB7)DCV6|Kd&g*n6kR}Yt^P1VIhW_elJ8ER1icj@+e+?D z0NzH=;D5}SoJ~WfLu1vWkG>Y`3RZGCf}OG1ZfOrBr@9+Paj0sAQcWMQ?q^!mceLLI zB!qr$8H;TY_;}~DN?%YzKEFp$%)#lwN)#fv`Cf51S4~TEeAtSsH5^uKLvV4x@i0AU z>%mzK(_eBd(9>zH+BM< z53v<5eOtEQP%fi{yIEJqP%$_m!|RVY!qI7J(FZ#PUgILGa53~8TT3y{+UfT4G9|nEMd15Tmg*pV#g|^1`o|)YUxYCM z5&%TH+x@OQBVnr=kf}j7;w{jtVK8KG`A2H;8^YJF!Uixeg z_2o|Ze^_4=aiRn|=ez4G4;(f1H5f(v|7v};F7xW^?4_uaeW>Hc(P#Y~KR<(y^>qqE zPBb6Kz%}*d%K0(&p^-jF24oIeJ1LAbbSG7JG1dyDM=<9veR(^j8CxYW;XWDv!&b zHR9s^Ggjtp{JB{|K2+E7<7)G9DcnBh197{DQ-Xy146hsOCpL1W4zf-~oh9sNaJa8$ zoRd2~`L2~b9tsMTxC)2ZOYALi_qKv~%XoP(17}jRwN2$LygA=f;y&2`HXsz^nU1*o zSUbKj3Z#P4bF5y|0z6>=fc<2SD&Cy5Hy2*MXP}OQhT#c;^f=B+y4;|3v%bNVBupZT zv|=CI6jkh35UfRxQ-GF?mHGHl6Bp>?-R{_i7;cAg{u)F*Q&1bbg~?V%?;8vql3syXX9R4Z}TA4_fqsvm<0PVLA2!mK_p84H9a>)&F&33Wzavuc*hFOY831B@S zcXE|yo5dXEPG?t$>>hP>_jQjwvoM zV~VgHv_BBf3Sf}ocL~Q1@xo54W&}?sRFD4SrP5FR4gkfx%HZCOaRr7Z)w?hbKZs`q zz$>{VGSwFIOL8L6Uy`trs{w-x>|{cah~T){O8$^U$RZO7i8~1nY+ko&&cr8(I8+lc zSct%&#ytx}0H%5;i1>hnLJGu|k2$gfNfTiVwy2+wSU2m1Gbpk%+IH7iza%>6;$oF4v^qgkvBX6_Hgyh=|5J0x13*$NKJ*rPpw;Oz6Yh=mG9{IPIzxE(h_g) zgeipT)%^?B!45Vi^-l?T{`d~Bh~O0n|K?;BV&gOx+{eP?<%l!Q9h$f2Jj|EO8xOkO zTglVYsWIvcFKhlfk~-IDixf|X{@>2?j$}r=o9FH1`chx>JS+JQqwG*miTS4w9~r7z z$;UkS+|-Xjrs*%E{FVkc&RV1^*4~8!snyL`RcXF+pYeQu18OcnQ?0nxg87v$8H=Q52?ps# zQhOs-wfzMnjhFee=ovkRd=~D?b~GjRk4E%4xZPuoSE(kP4^;Jt!YWP^G2Kf)1~D_? zs-@52XCZzNOU0qC)sWO+7t2u}VDXIkDFc*|{!KGNB#2|#LOE@FN z2;+p{Noc6T^%c%m7-Q%XAbVtv0 zX{cWMyyA?Qo3*)MPrAI~R8R#y&Q-Q};|}oaY0at|z&x*_s}Sai#NSan>N(c#N#4?p zH($hk6nPTPEMfE5xGZ7M`eLz^KC9|NS;dYaLa$<)gtKn~W2+&pu~psu6NQ_sn)Qe;DQsN{ZIFSjmhM<)_s#pn3)fiHqeb^@3SnC< zxnI@<{hGi`B6ywuk20AH?x;mTrjwSH-I|t}00i)B1~xVX$MYe|bMg=ky_7G` zLf*=RVb%h1k`yKNuRpU&HJqd2@EjNc9D$45YvQE^3>O3 zftpARc}-3GFSI#FhiZr32A1{X!e#054+3}g)+TGO8 zryxVQi+CJzNVuccg5Zo)Gqwlx_#uI%Y5O%uix!$7sY*d(K|ZvY1j&V(PUQ>5sLpg^ zBRokKS3NC>CCD$Uph~?$4Tu8(hfpS=h7S~Xql%-);&(Az9vCQO4|bXaNT6^kI={9f5T6ZD4BE9k935K`bk$MLVP;;Ns8P&m6FXIQh`U4Nc zuKt6pAW(9f0DP!!VjRBz%X=8 zu})U`xt!50FGrv=EV;uVTC+-28_^7snanFw36WTqj-v*M*6m=adk)=L7n688gkpG8(Ei|hO>n5ge8 zUK{6^x3*pqxRqn~l*Qb10+ixRl&zl=GEnZcQq=Ags1a?7#K#E*+YQBHC0`!v7ct?KPk{h1Je2z%0w6=n zOEGS(l!7IvM5TNxAz`0^(JcZc;)<8tqL9M?iDOW;8j=YJ8C`4ZM|>^SgkrSV*N}%d z0=6ZUHM{>vCf%yGJTAS3CM&QIbA=SZ6(Vd+eDGM&*`&q(Cs;>15}6o11`ZvKVq#lp zK+$+{jm&A*gLt3!N9Zhs%diQ7YxOYJAZ(%*Pain)EgpPzCi$*40cqpa%q`Cnl`@pLErW?Zc^ppiW0oNycGqiSMqV*?ysy2HbBQ3Kn-mmI#AUrR3%Fd-~A>L2QbV&$XZv6Kmi{4ha1kxMCn#@IURCDU_zm6 zu_(U%3E3i$1Q7#Q#}*)LaVL6b{An|$w-+*#4psdJ5j9O#fV&fl8UGa+EQALIX#}D$ z;*^|ip(Sbo@^C=NGaRCgt^kKSfY z2zC0-J#6|L!H$amd3+4Y7!Zb$q_p@C3sY$&hY=EX{SAn3^t4@M-w6Lb@(yK(y61Ns z8;9qJZ=c}_K8azS`>L)PQ4oB|)7IvSGqIZ#fcPsQjNp@kUV=YYV-ViD^*I>&L-=(* zeG7k&@X-@~vKhiJOxMCs6lvifMuh&xM_TyT3YHa8zj!Y!okoOT2g{KkZ$;THJrX-6 zkUZ{0m?L9F1#fwxPqW#yR|q!Nyu=nDX0svfBM@Q^)cB|_`aY=k?S<;A(?!OQgN)}v z#`CNL%i1CHH+5PEmTO_>WI8JB|CZ(^bpv^6qjcb?kk)w5FeTDie07-#yY>*l#Uh9IW_iLl;3&zEYyt0WkNd zhA%-ZqJ%yt!r*RyFmW=WoH;U%!I~shv*WPH90iD=N{ytgee5L(jJQ9t31x!j7hc=- zk{sfwYmuvt(?r*MV;FA}&Pu*ALejBUrwwaok<_!jfbf0W{XOwv&X{4!8g@zl85%R* zPh5Dj3Os=Md8su$pa?y8|vy z1J~9Q7BixM8^nDa5|r6UVLA=Q-G@jiFdL~e>G~eZ?~_hcTQ6zc#XPdUrwPM5Xs+p! z9O`Fe()FFuf2R}G)=L_9@%Hb?Xhu2>N_3c?kxAEgMw8NsYU?G9ySO`DC;nh8pKve( zMy#eh|@_75-t|q)`o4s@lw-{qt+_Hy`5gTieeUl+{Oq=MKrKkDa zOE`@JjOFsEj%f%$4Q&*}ZgswJAgaBrQp11K_vGbI%t~e_LUK^o)JqiA5GhDx z!k%WJQ@snZ+`>dO>GYm9^Xr0TK({Qs#ps`Xpi{5&=zGdiBniAuRoLt-5~d?t&RW<( z>Uj~Gej^d~F>BdNXG<*;bCyVLVB(Tas%rPejKlsYv;zG;l(8wMCtz+PcZrcv5i3FF zEz?-@b}|pB)y9Em6HjKxiO+eWZW55(e5diAnw>&oQ1C<42sliy7J8_L^HOQ@iJGrZzux7 za}~aw#vW|rgMH6ca*&Uns|c|{*~ye5nYqf?s%SU%Ida$IwXwCm25WDVG|W;^rFxd4 zvG+YtTTEbomO|EIpeBq4Ih>_XFw(OWk*VIZlsg2=3Sm8FDd{vaP+JEc`RBR*xATN{Hp0aBgDHvPDmLO_1=?(6~IyK=go~8x9 zTm&BLWD6UkqP&F2e1R8uC)o??(EyZteH}(@8f94f%>oo2VVDast)rMmM|7uaF>gTp zT*gDpBaNu2gVMEBOY;! z8RyG%cTlEFk0aI&b;NzWW%>-ZDlRbLIAl;`pCsYftRLGLmFL3{h495-K$weZdCnk6 zdd5LD>r6rheNQ*u3Ci>%nw!FC7h0l_;ujeshTR`m$UTvB7;%DoZ{FLZ)%ZLT_roT(NF zyeZ4voF`>b)g1_(JD@bSs(nDrJ&i`KrwN*54k{C0ZCY^4OxBRax%8UF@WR)unzMc= zT-Wef*$43~Eo^%Ze@T=H7O47uD$3*|bhgM_Q52M>tl`I^g9k0C+Ij-+X$G?|K!NXe4ApV!ti z!JhCg2Pp8~;o*g`xZUHOWAJXSt!IKg;k`-Yo#El#yeGVsh7X;!^-Ms}U{k(Km%R#6 z+;hg-cldDUSj1z|qD&;Anqc9vXmnbU3wT-ghf$1!z`rMOOErN7iUw+rwh*?$ z(_VOBIab(g%|7ZU%&|pXn(TYzdH2zXcU&O{TPw^xsLL*3M*e~!sY+ym@qxL`7(kc> zLz2^L4B9mq7%-`k{`sGn*XTU@p4YquV4ek(_k#3Hh8=8rCL^74T7h|1y`PeFcd{my zHKtRjmR;RYpL+Qw$iHPQwOuVW0}J?|JC3@}k-1AU13XmAACO7QG{_6ZpfoBi;f~}s z;ETJ7!7<8SsjgwpeL|J?3ufv%o0uD<23w1i8a|IBYIW6k=Y7v=U1TFWWV z#~Z?OL3z-TGP5WyXv4M(kJ`>z&86oI1V^VU2IocJiSr9o5oRk#jn?-Y-PN1ZqmuWB& zieMvrK?qpZO)0^C2ONrUl+530WEos@#cH#G+D#L?gur;8-mFQ>IjmEEfUu?BOa+3T zOL{5jgMZ8_Xi>#gBIvyc{1e!z1)ao&1ihYZ#7LX7P%vTz+CarJJN+G(U5EREPO5qd z`saw}^aV-v6kTW4x1hfSU|x=Z6%dzP$E=(Q!-76cLV64OQ6QOuj_O!O7$fL1ZCNRT zUPbkiYxA_EB~o7Dc5H0;#sr^OhLrEf-E>JcRV1f>P~?RsOpt~gR$LgR4fI(GCEE; zQR0{7XxSwgGVCSc7Fp#wxf+VVgwhf&k`-(;r&C^9stGJmRONED%kTM7AZd~8{m(sw zh+5R;clF*mITjtamTnRjlJ0{LWkd{WT*wzSuBi#A@onsP@fBt`*TtyBz3x(sD$%yU_&157bJrM97|C}Z1r=#ZVH?a;n4Pfyh zUpAWoMB%pS2jGM|7AMrVcgO?=+9r{SCvF3kdX~_v&&k*h3vWgv8VjnbdRS!J#v6vb z#KD#+pzRRK%NbUc(O&(f$N2z+aCVAtIx;=M^cwKyyGWRYx=v@>$!L?Q9>xVSJx^yU9TGAf zs56a8Cl~-5p;2uLpvsTJ9H~*sAP%Trx(%(=UC=1HDm=i<1Z2Wl#weJhuh>7;f(9bz zJxXY3Cp;H=|3_3`GxJNFlcTk}j#19Ud_)(-3cIJ{Ifby=Y!ZNT3Q+Aa_t%-~bkGD~ zRAM-fdo&_EeKa$-PegZWM7bWKFK+dpK7u`1eDDZa@(c`2FtwadE=6*~yZ z9fd8^2czBAvEAa0FN(1d+)TAE%H&Bg2%QqG>@TRf^}0{OSy0Cefa}Thz+wl)9S1ji{*jtQYH8UxiO{(Z;IzBg5<$QzcvQ1^uP< z5*3YKjFBYQ(>fciWG>dRQnT;_c%0S8Loxrx#K)}Dwul-4QY-O)$o zYmshMzMn_LZpcesArhUnHiPe;+|pEHf5~b?r_2rjlGPxxgWgY{*ND&hmlV=nJ{PE? zZQx%Vh`N%zZVIA=|C@sikIZ@$l+VPc9JdX%KMJ3;+edT7T1h=9n z+Ai+^Oo?+>5!K!Ssa8;<>kUt5M)lrH2O&Svd&fmdS;VM4>Al|zKw9sO204A{z1uXR z-Rr%J1>iQ(dwdM#Kfey>y$*bEk5Kd;SXL0FvmB2+;x(%R#KFFr(RvZK#s%iCVNiE3 zNIU8Mc;QfhfRVTu7z4xPX0qQn2AbrkkTL)i5g_hVpgU-po`*MlwBq^b4i5+di9qPIN`@A%Ps#E0b9W*Sc-h z;nZ#N7%Pl601tKBXBCU2pN*F6SqyR@+_@mls`)b(Q^3#ssDSfvS@1TW0>1es8J%$5 zI$pTeO7`Or)~%lgttU@4tSHMQAYBTDK+0D%G3N;Hmk%N}`uc>kSRhadKZ|9x z32?ZBnIt?kz*QQ8PdSUuL)o|fO3KcxLp{VqW&%5vUk^fF`uH@JrMsj-GRce*b+izr zV?oV1F?S+HCSh9snFE~uHU5}$j=shJ#C`~_9(_LQ80HPC@#Rbub6yu|^F-G$-s!y) zwG`u+G#{!+m*zuVAAGoxd3#K&(hxyDbljNcgS%LvIYdkN0x`H*_v{RF#GWOvg8$%#a|IrWsJyM<#cMGSLvhOg_88XF$T8 zpAQVtj;dXT8s>m{BO!29r((ZUv*?Z@*qHb%+C`i%t?zD(caM#-vzSrN1dMm@?t8rZ zOBwOp3Ps!^Z&!^6fqZH`is)O22M|H;-`)3k_Xv&1*ZbdG@9X_8wG|(}-uEiOU7RNz z40oDO9uTZ14PnL({vddosEEJ_ssf2j(;mJf?ySfKV)c78IyCNKePpsZ&E!~16(RT+ zt%`L5Wlk9h&Y1g1jObeKVa4z-=G-GR7H*t=JPvC$a&+UKCZlz+Y}c%xp2<{ST*qES zfAJD_U9=2WrD;ky^Rt;vA1BDp3PLqx0IH<$(~9d;Z-Ewl{@u+Q()Atv%7H-g9sSC7 z>BddJG7Plqe&th+nZpOrGWI31-u+4^*B#{*yK4va(XV_eW9GIf7w$+mpoF8#2dgRL z(=wAo#zb`W0a|xay<)Q(5722Kui+(ze5P)^RvKPwG{0#mr-$5G+{PN!>aCsLh-!Tr zMfHKJG=!H4ySQwfiKuzEve)1yhk3zn>X#ASR$Kt2o1&D}NoGqpX5hn5nktU}qEsCo zqDn&qsk*sbirya0;F(~jwYY(pbta8{WYQ$H>Gg0LBAChkIuoe^w;U^h;oAVuo zv`VV915+;}9}H$|;6RLf>$RqANS52DUVDa7d(vxL&@IHBv|hXMn!fcKU)S8DUi)v2 z$k%IwfoMf32j*&8(?(KX=&MNY1zX0HQkpCp8AqbndY}=SmiPsL!JWK-cFpSMH%c@8ddc@A{k}!K}}Y zoWAQbTCD7S{8B?aG3|O6+%=WuRze4aCGyuiu%G^{Xvp!$(i=dmIV_6r}YMH9ey z8)^n&RcsILvTU>_EQPR9akmbRw7`~X-ryE*bw_t>NO#s3_`oE`UNyXFd+cdgnc(&Q zfRzbZ&-oYDZR+rpq+S=$4*_p(34h;<*EMxKio+R}Ou=Si2`Pp3TbeSseS|7R^H$^7 zaPWDGI-Z}Wo6iyIFn%8E$L`0^LHJbt=qo{?RX0D!(zk`auOMdX6Z%eO*gNzcLtoLg zuq*T+mYSBLh0$nMj@_2J72lZ8?AJ08Wdyf$-|Owe*{iYN8?cb|&;HQc_BSmP(e=X@ zHX=OCRJ@C))AX%Be{*1eM%l|bF@q^aOgHKCZ3sDw9~{KGiG3kDstmOP$psN{tYWZW{WG!R?KJRM zJ*u%4(7zr$WB=I#7x6qfq-G$VVYi42`px$|!r1H8qmrgaP1bk>=e;_;;2devoeEby zKo-aOQ^TJz{JDcar{f1Xaz{?Z4d!Bl`Kw<_TsEmWQNp^^F>qBLe-6VB>3D-sa9@VY zidZ9Vhx(f6cMv^nb!_2B3O_8-Kl$+UQW(d+rK4nI6wxtbr#`Lh^5WMHGj^r-m^ zoX4NX`SUyeJd7U{fU8t0CRul>Du#iiO5+YQq~*h9c%E@{={n0}OuL_f8KA@R8P*Ni z@ZwJZE)qW&t{Ow09!lSv03p%)o9HaK%wjo{!d+UkL5Mzg{yvJKJGK_Y~$Wx+?(O58_mbn#=X?I7l^x2 z>#LagD1xiB{A^X*A+xO4&Ej^ckHy`p4kIvLrK99nOz;C8z{#lE1*+8&E$xjA_?NJ^ zOMR-@o6GXOt^GOlyBby`ca;toiC(DvQbm8!eyN(r;AeHrF>Z-YFJdrD!_8;D74WN4 zGScDC1pdt8&nW)f#-CI8b0dE!KdQgR#QCTip)qk*riN<2S7b)`vju?~~G z&I!aNC_I)^FnZx0UYDZM+ari*5@Zn@hzf~Ql z(|1exC9Ffefd&aU_M56j`-R`jv|nJiYF=}Opgx9QJFPQDe9oUI`P0sy zNBQ#>e;(q`4NP$je}1VEmovi);a^GW)hK3>#KSY=JYz>i07xDCf|Noi8ln>lrH8{$ zy>+k-llXXm%F)^{)DPEwN%!d`Qd+5rx8bK|cwL7{O{@*Xe+9bPM*j^L9BOCjKh=Jz z>9@6i6T@FMuxkUbOSE6|y3yoybs(>wOIVjW*Tltj+%Pg`w27+_yj#_Y=KB~8Bl#a9 zVO!O?I-d-t8xOzAK!h3@$o*u&g7+_Vn2ZMAy;zt2Ac@=Mi!k*J% z!sU;_arFUz-saCg@WZC?ig9ZsNsqeIB%LnqR`plJ1FfKF(0);H4@bgLx%S6!u1Qux(E9Uyi37ws4B zKc@X6Qx6gFe*WCcP-HlhA9wJlf@2S&Z?0m%k%sy~n)*6MOwvHo-p3H=XAA(c zQ~06fXRD(8kO}YqT$koexImptR=*@+T`H3a&6w&-T@{_oAq77<@PtmYm|<_~cHF`UrA%7S_*#lOPG(WxR80t?U4|C`Y1sKXKZYnI6c!U zC%Xa*#S1Z5#a8>)=BV>700j>q7ioBupUwR?Ut^H$E|UuopRY5KBQAmbCm;jG z;|-EQ#bjWXrnVFGd%67T3e14`wo?%Q<~I>7TID%iPs~8ggMahEUd$YREm4o359cY+ zKtTztZB&dx7|UVK69l4gt6tWWm+^>&rp>DPD;S~91+vti$P#C{2;MMe(r@$OBo1LY zz~`OpPy??5qbsj)~<7kf_cUc(|1;Ex3Y$yW5E6No!k%Y}2 zMW{7N%;s&ybku1j!-Ta~vKSmpk=Mv6EA<9E;Te6!$TY43zy{b+aT;*R9b<#*OW;lY zK~Qw4lSH()s$;e2iWWLF;6H#;+i&xgK=0LnDZN+TfIv(_m=Vaz2oW*ZY^*hsn5A0J zlZKRs!mC%xz@1Ex6Os^IAkbp!P7zR6wVI{6el5QtDD<6|}JGzYi9-cgTAHZD2?7`Iur)j?MalyB)A2n3pL}0Ky~Htl8= z3>%2m#?Ag?0Iti82^_G<=IS8;{)l4ZGe<9Wi1ol~?&8v%Dy(1lKDjp}VRCOseB1=q zaJjyZ)&u!gAGW8hns1O-i8}$tR+|F#Yb6JPxDtDN+_?}>6Ksb4>}9gjVkM7Y1dg}n z#GRkU?TvU9km=$>y8JT&|G`wh#QunfymR8%i%#2$Uxygk?HJOXfW4mC3ng=yK{r@@ z6tpnT^mxW^O?=4LC52yGv)jOWeS#o-=!`21F?$w`5MUR*NE(U0X{?1RBb`4DxMJm& zH_i2?OIxVSwSs90qhV;%6G?Yg83MSeE~w=+Xxb9OB|_1irK%eiVxZ0!Q+hSaX1GI1D}i8P;Ivfj(VUJU4f&6aCc2&aKNs(A(7cd z^fCMP27@824Pe8yAL^q2DBOvWYVsKzV>qqg!>BtY(n@}ex}a57>p)pkgF`98su{&M zv}!hv^{~8&ok3(t?;>E0+2?u>FXF&aJE45Fg{ttxXgEM2K`4KFvpHb97|oG?xDX84 zh$CC{f4toc`h1Se5sth9j$r&PtZ+1d=1&|;wP4~Qu_4>g8zYr~zdBhMNPm<;w;>ym z1*`?19OV*?_w#NDR&X`ju}yWw*}KKXhr)oOB(tl;K5a0I?em*ElH_*|n@)D>MzZ|K zBJ57TJN<-&{f+PDk{7q%$5r66k|D4oo^{cC2SNS*Tl#PB{mZe?g1A!VX8nc;@fvDzOOgsvzsHjF z)GpSk}F>z*6kf6h+(4Lx95BgcAF<_K@WK8sTy03?$C1 zz(ukqngE7mVL{hi6)I?(DR(koCSo*5l;2vEBQT$NFhvEO8t7R7)eZu_&HHQl!*R0f z5f*nRwnukCnj{?hzhxquF{qf>WF8VR4}Mt52bpPROWd9yn;Pg16cFkz%a9& zn=a#`h3DfOF=tTBotVStz;Mh;o^ABAxm)6I!-DiiIcV|TS&)!IsYolChse0D4wC8B z0ld&&toe}kszN}<@C_K2qtTV3Hep_Rid5=CYHsxt3ZEl>7}(Y^P$ZpbgM<#%O%G6c z^&Gd)u#!6v5O*h_Z@dVs3YTFu;lrhf97?1lX$++I_6#)12p|9g9NNrttKFbN~%0$o-wXWQWg11ON z(wB8*83BoPW$p9FM^*m#sLCH7Rmu2>qo=Aoj+Bt3rE0MFGGHAzA3oX%j&>w#utDsd zP61Pb8wzB)eH>!M>tgF!A7`?p}BIl%Ft$E9zsz^4=m-bZA+3@ zCkVt;*zj2x!?(9jHisX$+=gEco$Q69;PdaH)U*6Jn~791nDaNPB2T%Dz=o}3VOX+Z zyLJ5QFiQWxJ91t_DDkyJGoXkqFM1zg36sxZSz=dqjvwB{@ePjN_p_c`jnM;Xvz}WO zn;Xk)6Xi2%Hdv(30a3|*-XAZXm?vh;2HH5hF@qORU>v^%bQD)NWp1~oKmm3Z7q(Wc z+iqR9diVxp^3rzez~l`?Z_Z~J)&rl&C_eG(Y;_atX*QV`RBi^o?N-j-+pS9^W%cUJ zSZ3FD>*r0#p|x;b#s2{NIu>BN^@~;8tSMOqUk&7d?WJwjFPf~ATCIEYu$#BtIu`&r z#~gVy+6`X57`}t=%HhNWh@ph!^KuZri^vm5$yED!c4NTZ-aM-xh1?8TP=Y55Sb`)s z2}_(CkQ7&Hnaq|^bBVj0y0yezSOuq9%9R;C=Q;>!F*3%mLV93GkzZ*85EUPqWMY4l zu$z_ZPHKf&jV6Rc`rIi(XyDdZyTIJS7lvCk42)$glp9UKo8@Z>)-twDKzPsX5bjQ| zXonKP-%PpQ{uG07bkCkMOv;rkg`=iTfHs4&XAqtc5N9Fz7kZZJM&ifX3d8NOkgBcy z{U?SlWt=^>RbOAuk)(kVoW*EPC+x9p<{qWbm7g)rt*1`s^*V=``@YWEfHqj7`D(71 z;X)_Vd3TWc>=)AN&oqyxAzj>&L;QTAD|nG}1(;>pua{f#$)=fCrRp$29+hR)wBvlkr~y`uoCoS7?>}N8{`=&89pIe3 z&Cu zP}Skr!nZ*NIG=;=tSna(j-){etg_Bzi>TKZmr43Xz)N&4O~%}(my5S_ z@g>C1WC;l7VkJ`ugKZ9GqOo@C?w~tXxK2NHokm|K=%Lo!%IugMJ0#j=pDXc|*@?`p zz5tt7`OMg$s9@Wj`_KY)@_4{Pjt)NM#{=PBhUr7}-D@lP4QNaBFZa%18Og*#r|y6Y z+VE!F20Up8u*XzzQIHSh4=x(v}F?BS1Mj2ts-4HBm zbI8y=wb-7+5{BCEnMlZn^0n8*h za|z8P$ANH8Sao0Nk8Qb58FTPRc0_MANw`*){V~ovq0Q(mSXnaTLI{iZD#lI7t#(jq z4UWX~o}xPuL*?*+%?h0l!o&LU>hGIRLrv54Kd2{)u#PhPB^-l07+Xx<`@Y>+iwjh* z%oR=5hibVCnSGyHewRcAYT3<24HULo4{pH0wba4*ka4$ebeFBR@>{(jbSj%whBI|> z_0}1fjQ$fsm~gLbSUMicKjxl(;M-h2?@;lvoQ!s;;bS1Avo`ZuLi+S=xB5NOGJ0Dv zaJWRYX(WE;jpEd`MLh~vJ%Xg_ArlfO9_%QAG<7L>#MP~XnXVKcY7&1Y@aGDq97W#< z{+!3;&hjEAI~DWk9v#nXL>$JcC+SDp$8?y`DfMGAP3mweEp>YU<{8#gD>%85s|9Kc>p`D( z$N(s&KXg)$Y=s{?(B$k4rnr!z1sVC3CP(VvmyEbeSBgNM%YYjKwN%7_ixHqMWT-wQ z!e!R3%?EL`L%q_sPI0QKQ<`x{_%Tt`MF3we75xP$Q5{-Zgp79?u!A*$9Yq#*9RgCD zU&L}TZZmhnt(%(Ok@DZSO1KMi&?^^Xee_%XKDE9YlU4uOAheo=oj|2mLd!UcakCB^ z#HHcA7TmY6cx5Ssv&4O>368AqxmiyF7rf|9W!(ylR6r12chPTD`TJ74Uw6SOis~oQ zwI-xg`U@?1NzTRb+V>10qs%cd6QtewFsAn0V1Vr)}t}5-dDw|d$5Fi8G9giLRmU1)-&ldodBOf zY_O8A0a?tMk&RTD6_=oE(>ZqH^;9M`O5b0bU8egX4fK@^9K24%fO?(7vw*A}MAVxP z5WR`}%~o;}LQ61OX^76k`;iWJGFBzWp$ILYO|vw7LiZD9pNZM@TUdGF<*D`FjjsOE5E(3%HM}jQu)J}!|p2oS!81> zzwd?omxSctLVmdNUzYfENzBR~n-BJS_3Cr%d?8~FPKnwp%Prn4DGSGlVJKLE_DfaEBFVJ*#E@r+Q5K4?%dOjY}l*5 z%AS1{)QkFqWYMF}7qlBy00>adB1ir+j6T{dC-(f-_6}W)C0O;iQD%GrG%(Jraa^$4=mt=89h37w#{hR^rZHF}D)! zL}qKgl70nI7#H@GLcy-!GHq0d@0YaqNR*4kf=K#%5nbY*U7B!;dL19RQj$)<6L+Nr z$Qvpx+|`XlJoZUrP7Iw*U`_f6hL4O5vuZ}7nU}cd4=zjC8$zw;H$-ip)8wt2x(^So zBJqKhR&S}~HiEj?&UHvZ*f92FK{K;`1zM0EcVCHai7x=Fh}OEkdI(uuqAHKY%rip$caSZ zhgJ=zzP_FRaQSXwnXTt&%@5b+zOMK%T9RxccKPyR_{}j{fx+&LUV=G_IV>uTRqgzN zRnvfct9D|c_62x`H&yM#+U+Ys%g0z70Hi0_2xGT|*J<^1c-D(-X@Q<7JpkWL_1p|> zd#Ii%h-UQ+-!c59)KAeIAq+LhLRPu55>q+RMUU2^mY@1{PwEl?1mjQG$B%#Jx^H?? zqayM9rQ_Wj5&z>ryw@sgnxLRK)4hH;6xf7e__>0{))-Xf9#iAa!-GYA_sd8HIERF^XGT?0b4&rpAdTgw-J_9 z0{hc*sRHaIZU4b3qvK><{Wi~yES!%otx0=4@6|L5F!@6DSDU_X98^3L3Q?z!ild(OH0y$uDDW_BZyoxRF$;*#|Q zxYWI7LBs$DMXlxOC2KoHDx8@JZ|CD!i1G zo%B|&lLeTwQao9?mc4*nxz@1#JZOXm9qd6x9(05Um3YuN4~lwF+=I$J=wuI?;6Z12 zPz52h6s8}GnJ7F=Z1Z8t_g?&Fg_a7mv@ za?{_{$0z-5K_4oZ*+s;o)@YnKJ!>>ln^zpE9+Y+lDV!*W+!l-6*eQsiLBw?9tb`~! zW+aO{F}*LHi&G22ZLC~zgiWRPZ zJh)CAkwh99)kHC?L()yg?g4ayY1YiFAnGiUy%6X)1Un7K;+86KE!J<{L=3YC5_O(# z#V^E(z_?@xcFnqr*A(s`mt663Dy0#(rZJUIqmeYQ@2QXU?}$idv67t43t#?+ z{);Z{?T7XlkMTSo#nb3Bwm=I8DK}7L$+m(r2Z!vf*{tw5_7Ynlb$=axlv&@GS?w~f zeWgn+blruov6QP>H|A;4niuF3-o|Y!cs47t;I~;(FYq=O=eqpTI&8K?uZ8u}-A{(e z7N_n@iq@dH$6k%Oys-tjlJyimcuotgg7!hUnxA=!i1KjIR;iJnEL+4b9n96wZmyw#;?CUzws_;(X z>69IN$!bErTSC9NDw0}^Xim*lW3*C;;#A^xUqJeJg&M7tGc)VUyWBD7G4`kH6jNzbEQRiY4z?l z?!B6Y_lIAJq^2Q4j2W$di0<*5LHCrt=pKOxb7Q!<2S18?@HgOfR|q4lwuABX3)`GZ zPK9Fsc&5!hD6zOu8hcDdG*Ikc6re+E%g->e6}aaI(=BDt%gSS&{v$Fpb|E6EvDMTg z=wIf*=spu$Q_oMl_|r&DvFL81j=w{vJ$Q{_3?p4*3<33v+I372Nj(X% zaLS+_zoK?i0v}(IafHEBWEZo(NQ>@)8qESS7`2`zvY)h(N!fJ6B-!-q zN`hy7`w9hPb_)}v;J4nAbZwf}@C|fKd$NtQR-g;UT-~dGPNUQG*lm#iYC5?yH6h7w zl#f^N-mt=zNb$8c@a$w*sfKb#s8yrlwP#h)OBBzlzz4>_&XFURv5aiR1@vriGp}Qj z&OXZ|*RiZmE*7#b7Vh`hOPUFa zgJuZzhi`_D<@i7CGV_=5Nlk1;N!<)~x|aeMorKb&9=_*{H#NLB%;%9?>t8ekbdl(( zuwiqCl+3vTE~SSZ+3`iMBHc1LqV!uGrSM4OYe2pZTN-myz7#R=DF;Q9%ivSm2Ss+c zMmVZDm@>6~fNE!FdCjJ?tobceUM~@`;DNpQ0#6YZmPpSn(^(wscWhxq;{2^8g6J$M z0Diigr#{AX71JH~NNU6O5R-a}xSf5k&+UkgTS~|@K!KAzt0Gg+LLJ62x4V_p-F)dg zS&#y@%!pfbkGri`R)h)=pe=WxzcEI0XZNBEM{^2jT%PYa4w2f&DEY-bPaJZVvW{IT=1C(OK`8Alkm;!LZc9osX+yU>eRH!&pb z6`W}$jQ0#2o^2u374G^>up7lpAXQq&Fs9j#TDLM$nnZIKc)-8O#;HiFzIY&OXq|iB z9@46-i_^X?q*A;hQu93Mvj2_~Y%mp=iCp&^b*>nSlQXaZdi``$URt!qSd3a48ub$( z2v{C1hpl>P&6-GRG=-yME1%vqn@4XQ7!K3>H^TfKqs#Du`q^Fh%@@=!gz>G{dz8by z#s{V578Q!%pbH0uo#G9G@RYWLmUj1`62}bj98s9$6G#Cn^rWOJ@B0Q;a&9C+#ML8? z4J_-r`*J+Z0QttANsqK*){5BDAN!R>9$z2jKKM&GSLj??5O=PDnapPI0)s2HrO(Q& zZ!jXm@xt`{AeL3lIO{~*b(L$Jbuo5T_A$;H!wr|4GtQDNo9LZv#yD#~&#c8c)1G$hbg&%Vzh?_7ajJv`5PMBkN?B6k zW$~&?>Qz)+_abz8c>XH-Z2HV264ep2xIHp;7sRr0`*>Gr-x<#P*5ycWekYFtgBj8_ zMjUR|&*c6IGkmw!#%yeA2E(Uc8t$Id7Xx+$jbn=elbdY*eGsW>;_iGs46bAFR0 z{x3stw(a-n$*hb?!E=egM^_MWI4O}jpIwG_QAdVhXW%smG5btjn{)`5#GswMJ9!>2-9tBn1#ftO zK3Gj*0yNNXkL9n0=ZksWu$!>0&+D&X;D@%L75%Exn$Ae-5hO7SZ;=JE`#0vCh?bxe zT{7u|nX1AQW~L8^chO4uFxqOUc!*dVg0QvsvK?c|Wd-Pbp&>Z6ivg#hB{)kss@Z~j z3{@^B!zd0aY(|DPN22Z2oqGe(De(MldTeqsx}?t0T~X`QF5mJnB*95a-w^#)n=7@P zYpgWuT2ZbvrbqEdHwMyp)wiWq|6HmbhyT{8jed#*=OpNda*RIY+BZ`8gT zedlWuvEe;wDlp+adFYf+A8G&WhaJ2jtQfRRk;e0vMxELUh$CvvkdIfutZ~PsR5^Cf zp%L99*O2q9`J$Q!zhJn`ow{%DLfMv3Kd;@%ePuA#b5=wBJpFN77_{Y%>GOx~BmBJp zDDD)v9d{A`T*E(4^3NswGnaqP;h($lC*A+b=3{7pkCW4U=+sj*zqk8tM0+;{_D3&3 zyW^h@0^xmt?iSz=cNG>ftFOt|&k(}R4IkqNzwhEpj9rNFDlxVr^a=iOZ@_!}u>rxN zXK`z#>wq|SOgxVmRle&GA`sW+Za}+XSX$}+NpRP>w+l7gUnFKLGd1pb3aCT_=)2s5 z8E_!}FyHf$0u?~V;_ktSL&)0Y!t7UWOz5q1OXd5cTa2%JF#bHtaoi)ne{^r-Z=+DQgni{E1*hG;PQEMMD;0+w5chn+`O!T~ z;?}vRn%~Ep@6qP_Bg}yg@le5QcMp_s-2Y|1@6lW!&z0u8g810M$~=}!+I8-Kg!gv$ zYg2~L%=h0W{4IQ0mVe6cAKjNs{Bj9j=eEf274AI#Hf4O!;M`-rx8dua7|6WVMBE@> z%qN)d0m3NmZ&6$hwcOK~cxU0Z-7Uipazp*09*&mpI(L-$y+7j(7w;QgU5#%#Yk3(p%58J~I!^NT})fMYX^aYkJ<3ybKO){x%yygXUXsKNr{s5{`; z;!a~8$z`2BE<)mVant_kV}BQ{LR`mHXplhtajKYWxc%1>^44&}r;p_vvbTmSr;lX~ zH{q?}CTJcS=B?o-yfxf}w}zXb{V1NdhMVx#a1-7dZh{7;c-|Ur!dt^l=(G;cMAK~b zbo99%R4Gs%VlM^+<^fIepzA%T#)GDN&@CQx7a(*2khakadz4{9uSvr2Qg&hyAS@z( zjq8A2;G=?Dr5cmEHzw7AU*Vzo+`ZVhz;XUZWGf*WDX;TYqpSKi1FnXX&pJsx^fL@FY0a%1s`^gY-=B&aRk_e?;{}phxX_VuaQj zj__jEQP(zskx1_Jyzx{Un!96 zh#B=p7RU~$n%5?6o!HPXGGjO*%b*gC;I-76ErZ-i19xXh_Sff0jAZ3tRDv z{n|Lzmav@$#~WkZj3=ScscCs|4*g|DAGV;I%*;Z#f9SD=NSG}fgIwc~a+#$R%nYbE z#{IQ$1x`R6Ly=zMwGtNHyhXxG%|+iBBPtwIQ+~T0mdnR|D4EFF_n7 zJc8NQO_%J=fFzx*Qy$$Y`2#@$dsL!koit;lzK4*90EPT5l1*GM6)?sOd!g&5EAgE3 z7qUaac=pVx*>@!hS1a9B*ae!DIceiwB^FItrbM!I`R|gj92;BiD$HYbvbK;i8cvBB zg-k2BPE!tH3Z?}0EJZ&HICQ%OjK?ljvj^1hLuD;tjP)vgjv%3`jH1RM{tDr!gRw;9 zz_-*0h}MuWE}Cd&Jx}`}Y>Mb1>wr)O^HD??NpK@)v5dhYLGb$a_Lhw}@AQzSTx3W*z@yB}b1N(ASrLgI#Ty0MRe>_!C#b&~_w* zP6lg*s7z9x1NC7;Zi0}zD<{qjgHu5|)?J0f4YPHH;EXG*A3ySiGUQ2I;$S=_@*P*$ zKyl!AwXv97az-O{BPDIDDTX5{^|;DUi$hc6%5sCA2(YqkFW)dRx6Ha2X9lcPlgZ1o z&d<2n0tX73%B)F+bAizu#(2YEH1mi=6ENn6G2Y^8IcT(&VMj+ZFdD-c&l`-6GV2UE zlhY8!c-&xMmh_Cm4$!C%W84l5_!q6iafO}CaTO5j!id)ckifO!AhQ17GGO_UmUXneT1k2rwUx+zq zc&$;Ljf3;tZ3nQIx2Ui~Xk_j(_{yxQGZA}^#L=|e7KzRaCOALsClj2~2}tjPUW9aQ zC0pmoHIuj*69R}^Us9p-z;XJi8f;xP-D)(l8kpxU!oFy8l^+d(_?UJlAIU}XE6*gy z#g>tp0!~pOwRid7Ny4!flUo34_#j1iD)bOjEQ(E!hE?Ue|Ndc(cM425(Bh#&Y%{;B zumqhqR#Un7Z&VU`Lz}9hw=n6+DsD72R1O;GFtLV6E>{gT$&xv=0rBY?I^Z7E&;|Xp zhN4Uu+6VHkB!U}99BYU;n^QxNFdbjJW-VzAMXh(e+hjl91G|)P_k}a+^Si;pgOm+6 z;)UMpxhTLfggt4Wu%J&g+_Ci7=#=l?@ZU8MgD9ElIvbj*>y-F-zQ6p8Og%29qj$G? z3W=3P_qc6&S8pmBwx~7OPhh&jW%i>ow#aDW-$8uLMpd%Li{J5{F)o1_4EU>sIdrGb z_l@3>W#NsyhqF`=^gW!}->)(68@-eLzEQ0p9v-g4Yb2O!w<{mw+HL54qj%(eBkhX? zdM&#kRk}!g5NBVMe?(X=M+?$ZGK}Iyvv>yQKmP%T_UsCx7SArhCz5(tmUL$q;fEZL z)7=+2pTaT2aI^QKH`!VRTQCpdU?z3CcJdq3ubIt{IO69IoT= z9Vb%)`!>dpwut)^*bktasFifDcN@0Y;^JxaoO2cElz zTcLN5^f~%#-A6ZuajXk+m$NQ91)-*p z;t}^TW<99D-P3c8{BGf&kww~NW!6F$>mA+-DvyeoXf?6@gg5)$ZIIY{;pIK|PjE&H zkp-K==Novm0=G>*?{3535gL2|xaU;JcQk7f`>db4d*0d~pW!VQoO7hbSh7XQJsX!f zcb~-WG}?>=SY{1Z{Dd~68_V_5EihV9Pq}EDL2|j;j3u&V0E&e8v{r0DQQT1|1Y7kw zCJeP1w@V^)^ASg_AkOA!#Y0TjcboC#&gnK|!tLPT%HEu9#^3rF%V}GY}rL(JiGMwC&Em;&deh5LvKQ(%t5W{K&LHoQuUmOqkSl zhEzjXq!T1>JWs`4>6w!yM>5L6DMfoiIu5%ks5st0qmJ7k?2=*V4oQ|d#;6wpg@r&D zSxmgId$6`aIqa>l);i+_(a>2^|NC4MEhJ-+|EX`;lZ~J?}rOMxT!NpLODkYHCJoY4UlT)DP79LJ;7N z=S}gXZ`3&ylf6a8RM@#f;d+ia(9PxH0o_|{@dAb9(tbDuU~Y>V1x$PBxMDXlV>VbS zjXeIIJ)b4PhLgGcAT_RYe0Yg{X7Hj7=6Jiu7pyUtkviS3yQ_73B8& zKR7rK*Fe4yIbdmIS_cY;Yan0X9zZEpP^}P01#J!M|5T|YslsT#LuDQr<>0)Kxd3x5 ztTFI{T$xI+9mSxLpcq4O1J~+txGk*##i-qc0r0v3+;oX6EK&`^Bh+!&GUE(zSKIPP z>dh^b)eb|-^Aw8^fppZiXg}=Z+vtsRH!5bLqHgQrTfsU>)2_nGwgWT zjWB$mnaXUocNEfKpzo``KEd>{x$5gvwsp?Y1H(-*Z&bh;DTHTIzZ>E4eutXAx_8xA zPv*5{mB7MQh8?!-z613A^QXvZP&lU(G$({-=2Xh?eyBcPNQF|ariEs2hxry|soSah zAm&pK`a{C>^&*4G2+ySdCFjsLN`Een|J}!h@SVR~qx(#3hmIz>G{1QDUv|Q#8#bn{2E}%&060mMW%cdT2jd(S>U^c!yoSQ9p{s&Z?|ug&bMHf#dk_Eo zS#zGtEG}g^F8CUMCb5(d)AbMsH-r{`#oz62oES$l34V;>kHgH5gZN{A{xGj)$%-jz zz@Ak?+D{^?`w8AZg%xlHiH{!QDo>UsOcy6`9sKS>uP-%V_m=qJt?{a+IZ5e1xVhjT|K=|${Lh3zte}s_4 zK1_l;)X3m&3QL`D&lJO;OYYuC;r^V^YEWdFH3;FiTfVsO4c}*}MD8>|W=ly0D54+6 zPxqKE!t2~>RzJQ~=3A-iAQ|3eh}|OJb?$VMGK`f3@KoP%e>&pb^ZDm&N%58Yo6kvo zrF$X+%kYOk#!KWncY8!~U(P7`y~6zh1>!mVkHlP7x_e1D-ZT45vK5xLHZKKwe&g5l1R5`4QaGe533-$~|suK8YR zzQ2@jdv}NVJ?wh!?pWSE4PO%b8Nvg%o1s4%*zYmu^YnfXZ1%t$Gt{ET2Uq#v+dkOkgCF{!>w};BV2=-er-kPi;_!681l zrwr}IoB$Nr%Sqy({?XYvLxD%sV81vJ?x)Eh?pgoswM zL}txb;B0m)soAy!fVF6{8-qbR5AkL&!UL?tac)3_x2mO){zGIp3Y$b}DF~#+Ij5R? zFUiYm#wd)J`)mI{%+%ADfj;hn_wJX%&7Kem&MAw;9Ay{WaT9`J%RdnKb7YMx$6Hb8 zxHGd6AIcFsM&j5t;Up^{-;)6(@>Qc9(7xaeqNfRzRJAJ28s#4n%J& zqr3yU7Z1VWZeULDvjkX3j_yP_7&`JCGBhtib$3QTMPwX#ab~tMfKzt5MoYyo@-ceh z)%2~TXK#P4B(<3?TwU$gQ2&x$h>AORHVS1s!C%%gYFc1U9>QWgg;i>nsQn3EkTEI% zG*Tm4Bvp;@sNEt29lHV&Fbr@}@G~(lSlDU}8YWXoIiZ>Qt zj9LGXJ13GKZiQ2uvP)om(Fc?9)D14IuR}7NmWmEc?{F(EdO4CBE*nwSLNTP=l(8qt zSyxeFl2Mu;dzPwB0P7lJS)yJ%cT{r~c#S&M+oh9ieCGQYrp7A@YtgMQ-)>0>4 zO7oM$PTmq(s7}#NK0235vblCLiFr6J1(#m+=9Q3&GkPqM#e@<50s=f)0xWgIK3ShFrh|7 z!GtP=vR%SoxlD-hlx3C){TpO_{UkM_MN)p)=5`h8mn&B=!1t z9;YWwL|H<1bTq@nj+ldzrXBUk=N?Fqi_Zwc(|T*#o8?dkFY}!!CU{Tel4VC5X>UG6 zXEbC-n`m#Y6Q3g)rTNKWM`x-X<$)b(Z_X2EySa9>FH<`$dDzmjId(+*fJ}+bN@aU< z>RX-x(QK?sMY zJ$EvGIr9p@C5&YkB8#z9Fo0^a$==xr;duME<}sY#5ur6dLn`m#IKnkdrUmTo9&(SX< z7RIn?JJ|`mZhm_+nyq`}@ha_-m7Vi(+?kh$_uH1hcsQv;Gg8R@L^!74wF!BW*6k2ViRvX@P0DI7s83!ECTxALYudE_4ZY`cm z*6}nk9yJFf{?|&eEzxyvyjFl<%jprc?@Nb2r{S*sn#lKs+n0ViB)>DVpmlg>vNeHH z@*O47Rku=6ApYO9B3lw=O{}ls<7pFQtm2tVzcWQ36{w1F)(7LPFZmi1>(;&b;dS7% z2OB=T(T$gCwb*P+qnD~Q`CPtRffVD5zmKF&#fVW$Z%YA6^V@BiX5u>k(nxA3;^Vf6 z01DrTH691O$|S>dEcEA5rhtda0`Bd zps=qh#bm!zDs+UJzO{-K(u4g%aZbw`F|oL<|L z+&c3f0+lwQHGo|vOX>s{Uy-E2_a657_AE*Auq0tZTtdqvRbZiyB(G?aM=l90zd9NQt%~ll0?low2GsJGT|O~sxXzC z>8Z=for$(9cpFixk*4PhMg>szt-!>3vRRlBzmrk@yAl)g5sYpZELH((NLpBk)1$dw zIwHvq#bg4584c%tATiqolPG1W4IY=)hF&>UZJm+F>Qv3-WE5g zO}(WB2u|GnRih<$O9~Lrhy4Vm_O1g4H{(LUlx3u`+farrtm^Rlcy^$;#mJd1?aqa+j|J2`4#1lu8;apABE5nmUj;?#ylF zC!|}0g+*08yHOBBR|Rj79+qP4+ejM7aUl2Cnc4ZE&b&cnCpn1GxLF?`qcc5Woq1X@ zhbSf!7|ao2%y)>{o+PHHU6-DKFz<@ocrmo=sZ&(DhWK2R^K44bAlJsq1&EIcWl)-jV4?*Yf0yDU=qNALeAnTn3_ zLUv%p?OLsbBq^kaG0x0{sF;6*O>b(Cr2hOTmTHMq3zlL0G_m5|NKG3ACie#7%5c04 zg=>ywr=i^Ogfq05+>>**$I0Ivg;9Hxp!EC>bZvYlw1VvbNuCNP*K;hw{VipirYBdO?2qPRRNTBnWW zr6*%2%*NYD5`Zo>L6`i7k=#$T77j*R=(myW(n2zijOOu+Q>Acqpc7dz)3mk2jA~|J z9xCcD?PlFf;-gXQqct2tRA-*0MA?FB1KNXf^^s%^1%h4c$hmGN6ZTF0HId!C8_3298R_bh?mHMi_sh=m*Q*5*AW-?)h`kW*L?RtjW(r!ns z7cu1PWF@0^J;Of)h-lzTw1FpnRs)~a#BQTmb8ry1=SZ6z^rq{yb{j}*-#Mm5?=ZFN zwbSna3xn-Oa@d|EGSwvP=$9+4xr#+(VnJJr!u0+|OmyB2%E(r<=acpbwdHq|tG9ik zO}wo#6_Cd1@N6=G7KSof>9{yONAg8C5!r-2?LojW?Kru}YR7-B6hxfptkN~wGg)xf zT&}i!y(s@Y5wOkyq79dHbjs5GW;@<0PByz8Zeq;noAhq7d8iTb%fM{N1;Xw%Y4*v zsw=7IL|2rxfUdm8#@u#<5ldLh(3fX5T5`9NbJ`iGTLQTpg?{82??Ow73vA4!n9ARR>O{I zYgm%$hL`lA z;m@mvYelpWE2!a%AWXSG(vpL)=~GQk`B2=D1GVKfL6HRuIB!uRPfMVXd|NRVa zwF3z`$tsG(vRQO4o^WVT#9pH_;H2m#hX#^3_t4NM|LqzWRc7eB7I5o8A5a3wMx?;8B~I7A0vZNLfniDn6U4@`z}iT z&r01N8C3L5{VVMWRtoD(*eC0+64RM?of7v41{3-w{;&`?0|O?^sN@DEm|%E%V8HtT z%SFTiJ6QolG=~ZG9K_G+@?{MS=4jS^4Giif?_?cmc>@F1Uj~O)D^@)@Y!_tX51PW5 z6BM&SF`2+%4hmx)Mof${3`%a#alttC9JeeK&!LP3ByxfDryXI#62>w-$L}>-^0t!X zChR#%fMGlbImz-I`zeL=06{#*OiGLLVP0v^k)B4`N)m(z2;wKw$TC0(d5(1Q*JT{% z4G@53e1^N`w=&765)X*rF{aSM`GbT;awvm@1eD1AXt@j$>NjhUu$`2k&q2a^=I10= zQJCH!fo7ATG4Cl>7x`!xWTOXFVa%n9=_)1@7|dQ#|G?;F4lyy}FeoxeSou4IWeyU? z9H}}}(JVTntOW%2GDoGhBaB$WT87Rn)M&{aU3Fv@)|vZ&VRVMPWa&)3Qs`kxF-Q=d zG5O>kBwU09*@J|%&Y11!7$o4ql3as?BFWDiBmgVeh584?>84p*2k+j zz|qBdj1y#yJE(+bGa73ys8A?1SGv)YEDsZjDwf;}grygwhrpD|4$_-hCiI2?1scl) zM*UmEN_`bEG3YQTo>FIw6XqPDO8vl-qSVS*Kq5WkSvp-o!|0Z|29^396V1FanUCnO zQuCP&qtxUiOR47xg;^q2j1xquZOte5IAMk)m??s`&6$&#k}h+cAWEG*PB>kX&tpZH zaRLx?j}wk;f=pzbz*-286MjxY;24EkQo)5a%Vzm;2v>Un1ChqP2fo6nkT>PG1|2L6 zBepvN!w9UDKyIV~6&hx=B4g_RUt@{gQVA4{1nOk+#O(5ssZSjREFzXYpOgG`Gl|=G zNBveMc!3hs>3*E^kqwO5S6NI_!kpi)o5?hN6FyD}U#Nt2roV5(`zhg7LYQg#CcLE* zzDNnnBtK6k^}+Fn=!-y@oZ_#W$ut?Aq?Z`$v{W(NPS%S!Gp{$A&%TWFNUQn)B4+x4 z@Z9{3nqLQ1#Gb<$@r(?wlnj&Aq_giFndi>h0uxF7L9r5Kt6h+dRi|~r)1Jc>vraLY zz+mc{MxgJu7cnt(Feth0|L;Y{{vQ+jSML_E|K%l=rXy@v!dQm=H)*uwZ6(P~*#7Sz z2F3}D3U(0u8_#Qqft3K*Ar~F&nUqyuef@JJLtQVi9SWU`0b=VJef^O2r~CT*N%9u0 zhL%=fwdn$aJ7Adf^;O;h0c@%KOU=ODK&!xOJvo%V{s;uP>z|gD{7zAsR1b0BvXwF? zOq-2idzH8tT?vHOtIlweHA80}X0(&+pv?B2zw%B@TVl>0fE z9HDZPp$z4o&t~0rBws9qAv&zwcM=051E%zyf^k5e(5EOJ5h{B5R7vb3l-zPF_Zj5L zmX!zOpP)~{l&z?8MlZh)z#1)((qI@k6N#!b-gx$ztJ^^}g?% zA*$4ATk#zKk4K79=V6{6x=7?)O8pVAKx?345!onpmZdExCI$=!#Zzk3BLC@Hz$YVA zsh2$ZDa>&{C8J2lr}W_0Jr33kg65JPvqTGS-FJO3MD ze|{w>)XgMr-<{+BN^mzNsB`pvcaA?8L$S`U5W-B;H{nG@by{{;!a6~dHNl-hlyqDS^;|ay~xd;NincQ%uyRZe@|sf63i2E z>!KFB_e|#@%T&IE_-3E(-k&JRTeLb_T7jh*^t!*=Q@Zym_kbXFXRknu2)js79@r90 z4yAivfB<**N2Pl|NOgy{lc_sZVcmK4Fcv#isk+k@)}1Acc9PW;>%MbVsqUn`g{Kks z5es8MV2LU?V-o?*_0TLKHnuL>J~knE*j;Qm^l&a0Qe*2FFWvelhko>e1O#;fX<)MfX=5NLbr@JM-d&?5Yin#GtS(zI+5(q&JuX9jzOM8XO9moLbm~!e zuxQOEcfaQ)Nia{uYdaJ+bVbrpSNu*rr`&RM>hDxtQSrCr0V|z`FO|OpQF;BIz&;D4Y=crI?^wz>IPgs&pN&ar*y_43+aO+zEw8h%9E}8Gv|y6e4nbf1GjIg|AETp zp!y+#(HqLx=@Ug!Qirpy>{)7~ff`*bst>Kg_S?SPVozALZ($--qN;XpShc4x+8J99 zy_N}Z%d2ir881|n1}JR3QP?a4I#F>Nfzx1c*Z^es!EwZCAKS=y&k1&+toWz>p3+~d zO20T;R9Y1;Ab?S$SQfJ?HMTy(1MZ`7lB+egA*|0kGTIv3xDns?9}b<(asc}A)Iyt? z9swsgw?8JZ#7101C z9-&G+EW^L#+^_Z^6QNI`N<2t9y@B!REsS=?&V>>WWE`67uo?VCQJSEDbE$y2Jo9;q z(+r#@gOjD`L#apYW1AW8DKT6h|EUt!7OD~txKor^ISB|Lh6y~+FOQv@p~MGDInxT> z6jtIr8EuVihA1+H<~!v9LSa@b%a4A$DDkn#BzLF%-v^PGnIceoKE=yUyQpkNr(Ht) z);qn0=VD1dk1{u%b|B{Nv_IGYX~@((-CDTgzN-+DQ!i-L2MQm@HsM@$k$b=;K*s?- z-eHV}9x5~P1s+m1C7Ni-9o}tI<=CWl9`e1R`8^|1KF1!$#JKkZAFbCHX;2>46v3Lw z7vb3H^RV!|+?V~l5z0A&)gsR+;0%I?^@HiLjBF+PM5~q??NCF#C`DVXp5`3TiF{*L zt2}e&Io91sQ4vipQ9D7*Na`CX5hOtxXuKuQDQ&BSNKYn_6+Y&~XynpX2&odhA+xBx zKvFFczJ@$SUP#khj}9!aMnx8^ZPbU+s}2)~myIe=$32?q9BX%3m3oT-@2!t*?$ zWVrte4nSW96Uw#@x$W63T#!NxGmKy$k~$WcK+A(kA&g~!lw?GUqIlBVU$JBy%iBz? zc?9vE3rDF)M^>BpNXA4e4_y+;4mGeojf|RTkEBva zz$>mTThN1Lyn<1<_(8~s&LLNlNmIbVW1@K5xdjvP-oC&RiI$OjS_!yGPD(LC<0xl@ z4Uxu#upVvM0c!C6^XviTlJR?QL%J_To=1U;%_) zbodYMyEAczH4ejSJ~-q8cY!+6Q?SNbcqrCXWwVQFcDKfZMQ{>b%)?2R!%5)PhM<=i zXFW`_?88kokwwRLnH)2h_<$%1o#`e{!4M6z*FB0wWW~yK6NeHDvzF;-jCFA=X8g#v zjGHh;h+03$^PK|{`B0Z}*4<(LsZNP zIf`R^%(QwA1G+j2Ns-%8?CxidYG;z@fO=#Qb`%>h9!VF@ZW0VM@tQ+saaJj&w2V+B zQVc5nQVwj)5YF*UvW1C>^ECI65YOrlZY35~3QR~WP9J?=-w{s#pjGc>g zWE7f;!S>Cg&WY8a;99ZdT%QP3X|%m1pMK1l@jpeNLSN_e7*HS81)8&2XaVz9ZK!ZCX? zUZ~J1yl_*%DFnR%b;$PyLCJJ+#B5(Tmp-R3mtuUi2NAApCjARQpE%(m%L=mT4A^nc z@Kj-XOze5FC!|2v)u*$|8vPtC;=Hnv*=?91*=5ZjRI5~E3~kt!@p-UZ+bW$p(s@$o zEG_`s?t;6%Wf6M9MHnG8Wu^A+?z`gE z%IgaUa^FMD>R)IsEb)qcHj?wrm4PhxulU^L5N^E=_O8YW$|OX9<2-O2OFlOYc}+UT zG#DPS!vyn)tW0`$XHD^Sy5ht6I^8*77AMzblM(!)`2TO9yi@q?*Gx zU6S3rRm1_QlZDjc5kPY%+#%XEjD5RcS$->I<%SSwSAr>n(FKS`9aJNn^)#{jMYgNX zfWHmdgkI$#;U@H|VF*!-Ud^>PqE`tEXjLC zEc5D6ZYV1CYAY~^3qYt0viZpUl~$CDxq>2lVOOar8ONH6Y7LV=UdazJ74^uCQs+fs zw8<==GvMT1Jw>bH^p8#kWz^H3L=rk1VKVIqQ~OwO=H;1857A63!{Yg+l9E|EL#j7S z>Om#Njlop3&n%Hm0%-16w~3-Pg~_fIv6$KYM`XG=D78vNL(w>oOhvn-`&B4fWM~3^ zDT0x^Y(n22$IR0v^zE;LV)U&DEcfZhKG}uDvh{5|D5$<&en_AC)(|fFRt0|1jJ^#D zed$+Avc+aV1q3uEl{UCS?2VU{Adg zvRxzlq~NDJOQv~&OoxOseVWwld4o)*E2jzn^MDLGOL^_niE z7Q0lh`dQyXLYWu7{8>oK#ai%TS^L5ELNrv1JCLYa7k0k_osxaD&VbcBZ9=csqY>JK zUhM(;o*(O~-H2Xs$ev5DZbG=~)ous(saG9gDa}DgB3&+{S4Cl*!=&Qo%|+RbUX1~R z@F4YJWf)B!oB{5Ro-#B_pVYg#!)xV-3b(pR+0ZJokH~MfO7=%8!Va#WxThh|SnL|@G4dGhePN~V=VW{N~lR|24m{dYZ zH3mpEhDp7=qcFx>&8RT93#r+iBvm}S8rYH4c#w%X&kn>Gj?W9m9GMl0>B~2$!90NG zMF(v2{iHWKym!Fo7#yPN&Evv&i*DK(cy9rS=8*?J)4-PP#ua#QST7yJb#bNn-{E0{)Pg45wBGpyO5n^KgiSATXXj(i8p2C4pJV^p zEEmMB*D`wB|CJusUIoD9GjVnvx65G67F+kVc@F>+B#m+bqi32rGeP}*|Bo3rs+!x8 zBdNdrg5n*pjM2^!n5vQ;5S~fTkm#-C;@PXW18Qgpu(|+Y&b(IKUmCTGcxjhcfc&U) zDz0jJ70VE0vlLGNnp<(k6)l}CZ+5;7#f&Tf7PpJb>}qdm(d&_#i?@ayTAg^Dp%a98 zM?OE6IZVuV7;uBVrB_N~-w+}>4MCkpsAyTJ7~i43cqc0FmVbO(!jjl4=Fzk(r!;+4 zJZ>#pR;segk7*a48ra7T^WTX2&SHj<1*HQ`y^VYgHDXftnu}3;qs4Mf zS0pu(xrexWigb{>ih!AKB-80YmfX_vdM;8kW*~{K2+^(C27@1W0^GXyeg>Yo5P|Oe z<{6@I&->NY8`igdkx|^>1n}lIdmcG3asVUtmt#1_~o@5n2ed${Qkv~D- zK7lfa^=*L^Vm)Aa2E<{r|w?~Jx@mcG3rHkwvW*yr<+n!#I=Xl{LT;h+Qh z*2*h+oSiQ`ZIk+T2{MAdl>k%ptx1}(kbld=<@T`gY<)Tagt0DgeecPgcTHg{Po@6~ged${xB7cIu{hGxH`8OH= z|78E>$gIIn)3=92H8LBKGSb;x|F)0VXr{isu?1@;x4u2uni zb^=rMtpF*r{99LF`u0~O%cXDUOY1V=3P$JfZzoH%>f5W(g0Q|7GaeT{|8M%XJ&Kg& z-_V|}l$mh0r#-4~c^mU@gMONS+lIvm>Dvc%-9K61-jNRdPt&*Q{V2pteVa`>o2zeE z0Xf{BKFsbyANn?MvafIBM}9c6wfovfv2xwoy~W?;Q+2<5Ub>IV+1=af=h4{`c-b9X zFJhWGkI|3e{>e;Ef< zYcz)fNL6z9=+BbFam?YF>v_Yq_kP*`E1x|yp9=HDllvFV=i?#B=LFsr=Lf)dR|^aErGqSQj7Y=r2KoOm)sw*gWAeXCq_UsSrZvrW*wJizRe z)u3CSi{7P5kBYttdQ$@Q&K7!dHM_eD;O@WSiC1?zAosRw@MkI^hfp=4B%w zZHCFYXR`kA+A-l10K0KwoJ!my36&8lA~crJ0fdSP6%sm-(C&oxAvB249)z|ev;(2< z`DZKsS<65Fy&8X9LSGYlmrysM*9pBx=tV+r5NbtDfVoC8_Y`1v@|6h|I7Uqn|x~NS?bRBe!Ry)$R^Zq;h7s9>M@)U6_b)zfgD>!{1l9gzy`HF{$FdjIZ0l zKg;=N3I9BUKcx8tzoy>qSGbO8ZwJh@f7P&o3>#0zP_NAMT7H!X2y$dYCw>gjO-}5ekdsMkw4s9M(TG-BZIjKkRm1C7C7|-(SOv7~ZVm6%7Ae zC0j@vPa5Let3JX7n z25>G?nmxohOJSjN65u5mTBD_F7k?r@-0!O-TOrw_*4E0TqCdvkhA5c?!~dZ;jf7hj z7V$g<*o|C4rCH~GqV*^|--!r@{Zqpv;WQKWtcG=v*p(y()*dlBz_7Cz1_wb|okZ9f zy=NSzI1M=NR3bW~w`IW947i*Dpl|`dhV{Laf)r^7g{7tjDBQ$cc2ntz#J)fYkah&a z-&eSc@XnygQf#lZr4;!A_*DJ*F&P*^nQ zOoj8<8va#itaB};Az`BfxFZ!7-uF?sM9QbIXy5h<3;(4`N2EVW;U?n8R5lHSM=M-K zc%;IWg!dr~c`UwMN|@a+osu$Es|Sn@OS>0@&)}0(z#rCTHy{4w3 zF#H$|?;(7HhIbLZLg9MCzfv3#-R^*?dOK=ZGsU)LApS5-*UWSW1Zeycu;(y7LRr;e zymYaWTYi~eoOLSk1WA0Ru(V9~0Eb!Mrnus#ZG}sSJ6U1Th$|HqpM8D+=T1{oGnKB? z-ZX_J-QkKW>|dtgB7$=i77^?U9P$xSToGMAg{3W6cd3*`=xqzpxJRjcL?t%>(H#{K zST_R-H9(Yb6$5tC0O9`=!0x>uN|*7bMhN4tC@h7a8p!l&g(dNY3QIlxQQ>k5>qLcH z2_I!(*5zRVoJ#}rjsCdA|GkDw+HDknAj5x9ofO{I0w%woYFH8D-wnhY{Si|(`XeSz z`7qs+z;(aCkF@Xo8xtd9HN)_IRpS~+IK?<;^u(j!yq`h1`;_vJsL!n`F;4Nq0s>9Bsq)hU9n?K3cvj+?^>D@VGw{MM2Ts z8xT{x1$}4CPA0{+CdI!=$#}M>4bJ0Y8}06c=J$Js>Pq>pa343hx8t|FjDhxIk^KUN zMKlj9EIM+x!gZ9!&4A)xngB8A^EBg4c&0r*!6OZ}WC={^ztam(<-jVc9~RF6=2 zF5wY`+xz|OkgoAdT4kExJq+Eesbr%nF#y@;BN;^r)&81JxMR@WD#{ zt>@zF4i#orxTg{Ubs3T0E8U~z_X_vN3x%){`>iDW%H3Du*13C|G;a|HX?HTeU(z(f z%#SK&aROf}Tty~60Zb;|lLD`BZxb3T-J1-pc7(fAm5^|~z$AFgd>>#!Q{p=mUPZ#k zA&qMV0!Ay53f9p`g@uEC6fS4@ZULOK0O3-FD@b@0V6V;UMNzfQdUvAmAtYbBK;TxQ zZ>dSk32)FE=^^}>;)p=*1xz8_!7#j^db50?zw&K&muWh|y_#Y6Vxjji!kO-D4U_oa zD=d5*qp;+6gVGS5u28s&MgEoIh*(b3@VN{>PT?NHM-qmn9;RWUy#?a2_Q?_F|F7^%`DD=~pQ%B`s7Ok=_u6C$QAp1;X( z_F_ppOz8;yJp(j56-UCK1MCiEflzHE{58w*m|`@Mn-4Uyf$+Z+7C&~Q;z&tuP`Hu= zt_Z*r6&8HaA2Wq9BS3R%faW0qn!iz4bmHd*CjA{17GvKMFg?c)s^3yuYZaDwqd$`W z?wU_E%keY7OxI@Yf^-)uERuT#blt60tWv1O4CvLIMH?Swz$Y3Y6>_J-O(b<0NRj;c zN;1l@(-oHZo0PJ!n^ZVKwypu}S?^(}F}2=;bClc=B_XAWYceTKiNaFiZ_ZQP&+$c- zN)}xTcQA{=z@=4Ef84G3XA4a&cJmzz0W0`Y3scV&9{`sAj|pR8W+=r~j9+HbEY$ce zhEadqhxq3{6ML7&3cvLxtP<%{wzcAn3_{Ho48QdPQ*Ftqd0@^|?_>-WBwLN^Uz&%7a{lCgae7(X#ze?c_vV3j;r$MW^mH5*YUPU+o*mHYh z$udXX?r>#U*x5_TNYWF490BH;OmW(7y}6u_)YK_N|8h%HWMLc&iNFsRl1; z!e^WBqvpHRe7`Z}x(0mJpAxk`Dm}II_faR+*@NRXr<9(2Qgq7aL%8Y2N>w5VpMm_? z1BPvj{hrq1D1O;E`7yskVx3le@OEzM;VWfXM$$OjzqMv3thnxlr9=oBtD4b)ai=K{ zn_OZ%o7J8-@5cG1QES2=Y@g#vt2Ow?{dEH7uLa1xd--bxC!9O8p} z`e1<%7W&{YAKcFeNBH2uK3L>~NBCfg503M}s1L?{u-pev_Q450cm}|{wQE`83LiYz z2P=K>VjryX!AU+??St3*V8RD$e6Y?3r~6>N58mQ~4L*364>tPXeLgtX2OsspCLest z2b+Cxkq@@|;B!9M;e-G1!A>81oghRBNv`t2w|%h72S4;d*9SlM!5$y{#s_`bqMJ!@^L$SaFrr_ z4TQG=evN<04gDfFjpCB3)6j^mM$cerId37haKKl+7j;H0{C1~2cfc9+q3^lxoX&4M z55fU|)DP7ahLAxp5qL1@;7lVbki@(MPaRnZiezq6QBj zD~b7%3WyS0eh%M&U-y|`&M_w|3GO(iNcn9w-%fn7{LHdo12R2&U<0zZ{{RQ&fUpp! zKqwIB8+z@AR@t)kp8LBqRcsHOreecx0Ag)Lv7Lypm~%I8X)SXe7=axKl=srl`ni=z zRocSS_J)^RIgd4gW|{RENtbae?`a4|?-o1fnvf!HIlM>7ec+TeV6v7QKH@k&&luYv zER9*qxyLJ-?2YKAX`H4O%*eFwPhw5Y$det^9b`)x#S!3zEE6#AW@IGw)g7YzU6AO;_-12Wx zn7UmJT|B>%A()>bGUnXMYXoApFdS>IB{!lbR^~zV)!;9h-;RZ0D~CZ%LX;V)m|4=jcJv;$@Rc!iU38llW@}VW@@I9JL-&6nZq-mhXOg5_Ac- zj!7j*a2(fHum(K&_NI5QTdNu2-EXk|LQl$k^+)UW#@~~kbVk-jBm1>Rof&iK4Ip{T z8Npn&#xu4~%>kwKJr9rx@FS^dKJ|Frs8f~i{(=R8zViYDXkrO0 zzjGAg{tgD$fd5QeDDU5QP~mKm^Q>C$P(`2bhJJ z!S60fo55VlS!=<)=R`4s0w@X0Kx<%@BIGs$Vn(L%y=j<134RzQ%r*l?MpDmGC+#It zQkp@{o>AUuhR)&7PG*&4YOtjm%*z#^!ORO9OhUOd_+%if1`~f{8oUoMRfC1&TpIk} zO=$2Nz6Qr}e1J-%A`}fCb^=Duwgww9ZmtGT0%0Q{(O}kr+aVT_p}|d_1{bLY6EQ=B ze-28%1`|J$+QO$EFaCrY+?C-n-c${yvWNzsRi4)1FQA~c1vu%GegObCMfeH~#M1+r?Q)U3r%4xp1c%=AXHf@l@pQ>2S07&!`<86iFRKwwzkI8v+}-aiItN z6q%938U2`y9z9qO?(4@>m1k5?C)27d2xZpG7}Uv|2Jq;nICSYmjZFJpnKT;-{4n-y zbQzqnVCkC?kbpRm)b%hh)EL=`CBKJC_l~1Ve~5E3AQkozM-khZ!HYAX(##SnO+u>D zbV+sLnjFXiif?AJjcN93P*=?+NglO2F)ZXKfD-V{2SsrWQ_zK45-+~lcm+}EQ;wrb zi%X#B&~nkU1w1n9r$V^nQ)?wC0t49QqajC#sweIqYDDJ<0v@=})iB2sfHVlhRE!z8p)c)~_V#$8axHbH`-)^cM8 z(d2(3rn1p)6E8At57Z&*tCiwZ@8qikOX;=qwM2=e)}1ffAfgZlHx}9urGq;Ul)?@! zPguX37kNM%m?N}-gj5^ej9Rl3VQqL0IjA-;*~YYC7BE#Cg#BFFa0!X|Ej6^EE#sQ2 zD(|iq{haCoGSzCq#*)emqz7L<%i> zGN}|?OG6kZBk1r1TK5--?cB|ix9rNPo{*5QCsiVbz&&OT(wH8dlo(yfK6-S21g7cH zfn-jvaR`ZJ=t-v6_?g5TuiYEVVAAlCMR<&(aol##@CaClXHZJTgUjj~`%5aZLW|hs6cr|TyyRVFLxK`Um z6QyexQ)JB{u*x^Z3R8kjIq;zRj+d-5Epk{SHUCc{r5<$(t;`AoEBq3JtVOIEWC3De zkR>Ro``@F*AQ{w`<0f8Y+JIw7d`1Pu6L#Dzm2Z$liKHTA20c?UBL;cM?;t-Ks zcHFBgNd0aX*f2=uh}t8ev_V3pOb(IMeLyxnQz>fSmUtB~)e?p8TpqiO#4;=~(_`-= z@oA4Oa$@byI!Zh?e~YWOR`unrJ=Of@S}MF?)u{)SlhSIxAisscD6Wi(2nyG&DyFwfT+Y&t4)b5xj13SD~~(%(1xr$|B6P z(1xSM??!4y98W$M%Wq5_C!Up_VkL z2rQ(gO1KFX68#Rkh#B3YONa)YMx|Gws76TeC>8jH`%Re_uSSa5gA2=?iwmQBVe*FN&m2XB7G+ zLf6f(xFSTM(<(XJsG$t*BASlefyuebWHkj8tfzNQqf@0e#H<;Dgjh^IZA>ZthBT@X zuuQ49;`eZg8a+s$i*hBf;0jqcGIT6Fl7h^hs2C%7>Swi+7Aje;{kxmgBM#MrUPNBRI7q((Iq7Sx(5xGEF@r z%gmmNOzCL1FN2T4%7>`9EgjHoK0cfvVib*K(ci(W5z15!KHz$~$R0Z^ytx3K7Echg zDu()8K?z3SJd!`aVzegkZ^XTnxj8d>KoORR~Bxxo!f)#xL(tx8na%3+kprQq% ze}tE1dmsw3cuW$PBCnCZ8_+id!G6sM1$O;8u@XM$q-8sVp(g#cx2xkOUTq ziGxhB*(kFgBU$N}e7P@8NW4WE$Cc8q#jUT)tdHf|V`&jTJ2O3hUYb}`XmU7{F3oNs zrEq6$G@@`5QQRIA5N6yG+(>Fy#va~6o+33TA^(^&c~F^i8D;`Ahsaw2F=nIj$0iKa zk2`tHnjce`Ouhy!o|yetW!5)k)_<8v%z8JD{adVMo&Pj#0h(5()rScO3SXEc!pJVN z;FN((Xy#shP3a(w0nO;RV(^y7AHd3ZHf27VJLn7%>*^IT(|hbCj4u)WHkOlRqF;ar zJPegpJj}AuFbz(xI<+O#^zd|Q0nhMh@7}ip<=VG^dJ=VJi~ueyfJyvvm9Qu|ETCLV zlRz{qpdxGmi=yQ7)=J+3qPUL_ndISpVlvBTip2uTci$`vI12<}0lx$`^qa}kx|?n^ zMgapoqp_ynz^ZviBx7SN7xSOCe21)RJ0=2*b#dT(Sv|HrWP zwOdFhthhrFl}rDL8%eE40}1^nM|j%dm#nee`ri%24E;ai|Ed4&%$bvD-YXOrphww7ba;+DHnTqJ9?`PZ!th)(FOl(Zl-R^MI4fnTXR`UVUW0kC8))J{7 z=Xmt!0=fPu6G1M-#>^NsF{!iSSWZOT#L~!RwIWfgH>M4NUYJzRI>h2z#LVW6D_SNm zn7kHQuz*pKS8)n#M!gx@x6&0h^dK_1@>Yp8>ADr-uK0kU)om7=zi`fs;sq&#Jsc_rGE>}F8iCz z2IF8}v6@vcj|k2J5fAS9dg4$lE1PSFKkJ7}wsH0x$*3u%3%T0t=`OoZ>DP;JiIEG1 z&tp7f45fz|)KN~XO`fl&@{hA-GN>&nH;u!j8m0WHu1%X%gCtZ#0-byL(;FhcjfT%G zPKHhA8Hq}pj`9~6j?7?_=`0y2<;rO~=L^P+Ry7?#+^FdsDEPH=Q8M;`@S%poAezGl zv0hru+9t(h0)v@VMazK+ooEf14pR|5%XB(Jrt>Q5Gl%I^YV*siU^-F{&Z(KEQw$VT zD;x$h%QT&x5htc25)GM76DvneM`p{$6}8cHXdL=DP)TG~TWL*CB=sDcTo_N$(vryC zZG0RENNthS-=(b7?8Li2O+H;lHLn-`xWxfZ^#g1XuvkaS{yEZ?tHT z?FnhPphlVVzLC_|XpHEr(OLLE_TB`%s_N<+4?-kb^+s`s0~!?-v?y4uL>z()-m5n@ z6$k1VhayguDydNfBgw@ak4K{vnrhLsRz~L_u6Z%z4lsbk7sZ6xeBG{`yQOAVB}E4@s`18;}gCP z)B++S)ZFcW76X(&ufi^t8-~cVs2t<$$fzgcrbbCNVHY+-D9v28-InHWe}Y^~OSg-f z0Hy_3Ljc9VK@#k`Ix{Pv2(~GKRNBNsI9&4UBG;3BjCJnaGYHu^(T#`AhgvNJ79i`e z&MNTdSE;sEA(?Y8@t*7mQKK0$Pk@c+M1t|$^&fr@O_|BgJP$#tSuhAqomz&>*>&piAs1xWqC@vcMwq`^CZ>NTd)jxV8)6rI@s}4pM;<@ zm4GOzjz!v(mvT!zDDNqfUL*9r`<=TpkJl%=^3z^D_|!n zFSA}LjEU!YRk}jThS%IdsMcx%3z61p7EXfNVCP0j1;_ME+;#_YZ%7m#u&Eilth6xL zAokT4iOGF8A__?ZKfrTrqW`mBB+hCg`VEQmmg4;t{4X1uIPHb8i9@r$J480RAnS98 z#vZfNS0LixWg8NUzup=^|JjfjvwUpgjOUTM2uQbo>|i22Y*C^nvma#<+GS#L`^eAuyf+(ZFeA+8F}dM0a*pRecZys(dese zlEeK0J3{w2F(fi=N0T?vmZa-Yk}zxHu!+y&301 zj8jAGu%jB3OV?tDRr}>?8`535<}$^u(_@iE4)Qz_ZY{*ZZvcX^xY7~^d-xk8u+S=) z^-{+&cLrT*B9la9(yPJ*`3&`?a^p#GpVBZ*#>~a|f|TMqP=QwHpZAp>eL z2LYD3573KZXG-M`av-{n&r{A@wj&lmp9yF#X7o?Fdv&^gVH^`N``n)pLy{#O6Ap;4 zJk*yGB#l&zd%WNFFhxJ5&*Z%jvH7Z9EwTcp7Ilg2IE$>vNA|)Y_->;dropev7T`q| z>5gQpsU=_F_g2O_+MUA@_i=$7C|DOrTZ^l7*e-UOZ~E$Z7GvrlV&+ursibA+ZBY+_ zxE2eX^c;}PzC~wTcPg$zAJ^77Ts$D_IjA9OzLaekP@}1C0LQt>TvIAr6BpG65^l!)fTNv*TMrmvh zAQ-f`v)D{zwv{A*(4qwj4O-S5N7Y)8WI^3&U5)pf1>&AI9kd9bbcD3U={(-Q0r2SV z8JZs^cWG7li>vf0f_3V8g(5Ja(2&tBQ{+Ky7AgIDIB$Qf&s+*FV|d%es^n@(@>No3 z19{K~eH6)1$#LDQ8z3`0Uj#ZpW9Z^U%-Rf6IaQ-1nbm6 z3PoVmvCjR_kA{-NdzwNj$y{BkB+rZstEAW8SIH*fHl*n_KnsfeNs($yC4c-_dK;kWVl2Jkh%S}#6-0AnCu1Q_|4*(%wNQXzjg%p@ zXp*iuqn}WYV&pWE`Fom250c?s1#;sqFzSKe2^JBdc4AuoL@3ILSw+Bz^uvzhC}C^r zc%%V@Xl#8J#|7U*3sB4gs41ZgO~}N&nrRvO;El}ZctdRV9Oul~(|mEDa9&O0XOy)kx9%M>YAB(xAYr$gVV89Vg<1TF8H5% z|ECnd9ak7PoO^P~Nq0;^s^slN?HlC8k*Ub`3ljT`z?+w!YT6Z#iE>?4(m|^l9Y2l` zJV?e2Nx!5w98!wzka`Wh6Wj?VS5=+9KkTSxNq1*>O|)WAAbR)R zzOv(xrjG240?KA$fq-*Elp)A&OGB%_tytV5p@#{v4elL{>%fBr@lUO4*vNmYI_-PJBKbP-%A`eiu>u2{bz$8|-b% zbX|f5@1@{96O(bhJQy6OhfYi;YH#laHu+IjXp>DZDbLT6 znXe{XloQx{2LsX@m9*nRUN{hP8HvnXrn{q(xKGiJ94WODkx}KI!44kYznF5+)_;+` z5Os=kbnO~+aNeyQ62Te!vy9dQL!6xq;7c?d7=gF2?WV9!l=4JZTAl-pj*JD&gL>QT z=-%-D2+oSu5zaHB3&saphcVy8{_qS#0MV<~5g-6?fG~I;+F8(r3Cwh7 z#f0yhFDl|^7yHJou>DYyf&(I!_PxbgX$J7K<)WDASE0*dTY#YFG7H(#&P4?|Yifv= zC6T#KFl)Edox_oMg#A;H><`FsVhVF(g71$nbAnGc$Ag24EpV`vp!Y;J&dL3Aj8w!K zd2t{pc?ElQqw`{x~k9b@u1_P6MQu$+&M`pCi0 zq4_3e>W;MybRFb2YCF6Ns_jqWx@K^g&!G^qqx(WEb{uW545?kod4tR7sOY+64jvt8 z!uJ4b+4>V_=?Fk$oKHsh1xjrT2BR{Pj|+Qi;bDuT_;=t3c=HgHzg0H9{LNNY&Gc!|A|Z)sXEte$z=B?h#e_*VX|Kg z5Hn$mhzSsy>&Gv_T)l~ykeOOdTXit|L`Fsr$t`mB2*i0G70MaCC0m_9<-qRsu&qYD z{Yj!eYOCO295~3Hfqgmj=Z;LTp*8FHyUQriAgNJQkE-@x?Hpw%Ot(&Ch8ZizRy%7(Ny*+I1k$@GqhsF%S8bRpZb zRpygH4txbeU`*{-3y6GNZ10K*MA{D3rNic4D{szvJh(Z)nXN4_CjXr|q!_?IK~;xa zMV!!aJ}2SGv}!FtMBBjwdJep#kdqC}p$mNe$4ButWVo2oRJqkqH_M^%-QBzcImMl7 zA!#(j82ER5>JVWFgQ23t)DQY2Nwd%0oucrL{A4Z`4UZ}!n&|G`CcELzu+JPN7+zxdJhjNvIfX@?Zu0z{cmv$OBJIi}d$D&k)?+TH=rP=lv|-hghG4*1S|nh@2}X=vea?Rg!4nHAhjTjp_?_t*T21S& zRD@tQIZ)I?Nf$e*rLaMlIJ$ZJ`+kH`O<{j_PvY!i^Um`Ew?KT1tG)0}Q}A~wF50v7 zjhFWcaA9tO9*%{=S32#~ z1AbU$k<2|}Vd{skW;pP}jVxm~ez-!?LVkGs5BuSY?)~tP{iznWjR#h^<8gvqKi$A` zD&AzvR_JSmJix>B=%zWd1qkRN+-}SlzqB7C(eqGWY|tNoFD8{A@Wr`1i|d%8w9QbV7iGJm%benikH}@6YkhI{NwB?fN2 zVk(nFN7h){#*r^=4Y?BAFB{o)OYa~4H5 zq9o>a9G|qiQEIM!TYOgwhvnYi^R}YRV*KTIMDZ5Y*zM7fV1qFMu{D{SQJ~t)t*5}S zdz-9T+#^xfa~G^=;9w{2msxE0)~vvm8SJk)^kdyugV%K{v2(^?U(IU5t;D{XkGZer z6YQ&b4f|@i8E3KXr)k_7TXb;h%nBTYlS~Z9EcjCMP=YnP6cxc~%BX)6SF7`bLz`sh zP3@jQ0d=kdEDCAv{*sqgi=i)-Nq~;*#c$Ep?kPr##6v@5{>v_Yn{&43Wm4-}zDOL5 z|6k>AT!^hc!xEF9H76hiR$K(UVSrZvklWH1+r#$;dKBP$8S*6N)Q9%3N4g-7NWWvj zzTjtA-3+p|r5;F?LhncDrBj&+^t-EMV~{}vh)-Q)_f;F*A6(W?i5cHwRR~FPFm23`k8oqI|r_!Bf*aaQh(zrI>H>v>-81OT@kA&w zoTTjbmi(ndKDS#^=g2qXiRp_;qTNMYhF>dlum)TI}^+gynif`U(?lE`I?x zrgK$qj?|o;^RHAJFD$RYi>wU+dcku3U-+cle_9^UcrmloerL6rBf90 zUAxqP|9Qj+fXI1P#n_6NZJA4r@BWCa9Tee~kH~rj^&=?TFtwAt>nYBRVWxQblX%;N z1~<|I`PyzjhrJznMcQBUoLA2ioLC&h@|!NqgXkGt3>T^%mRRIx=rQ^%z8(nwZ`I> z7qaN^+9DiTjcY*$^V$C2AhTVY4h>J2iI0~(wOD}T>e?XU*H7`%?*0BEh4uK!nszK* zh@TJWPZAWJ79lT?Xf8r-`y>`2GpF@p>GKCp+v3{jfs!k-HX3zK-}PZ>b#1g{Yc%L3 z@8gIY70nPU8lTxNKFP&siJccNr28Axi{k>FA~18fD9&5lU&)ugOC!Mik34c62VI`$ z*S~&~nj_v6N8DG57l(+magEgGr|hn|b7ac{h@YXv#{f(=<8|5+56blxM{a|jPuZV_ z%W+v@VRKwI$2mA(IMA&^b`O7D9Of|PQuYdg%cfo;qVWKe?_I=2vuzK?y*YXf7MH&> z+m80_GA$I_x^?MP$;T^{ZW6h4Zy7hE`A1*!6iin>h0)z+k|rn~=QcClc*aQGaMls% z>_xoM)9c$SvTZ!CCz~)F2Wb&t; zA`}19w}A9=dNUHkmJeV7%?d##6Dk4JUjYT0grJ_1W>pt7D+O$hB!OmsrCAuSt`ihn zBOEZ~#qan#?-blgVDyv$cG=(BO8760wSglmcJ2H=yJ`)HRz|WWN-4=92;!aQ}tX z02(Y>iCK7HQGh04W^Dt9p&1#u-ZOW~&M%qVzmgtx*G z+K9v02BmM4W!X>@ax=Y}o<$1qCV+!e=3<{LS9+|6=1H;tjp_m4tPsnosi&87R@;5k zV1$Vg4cJ3nC(&karFyvYshO-cfPkkwu#yB%OQ@1iAQA;g zo*Wjy0{NBHuFi$&eno{Aq;LOf`BBG-8%;E@d(04UR-L0TjY6{tqEAv3D6co;# z*@(;`6(z1j<`FonoQk%Iir$Vm7!^ui954M9^lf@hD2ldTRg}3psc0#(rdf~lAmr3) zs`->IjO+HQxOU<=EP%}k5YZz>vyZ5XHUojy=%Lb%>zaRfd0+uEVG}_YDtb4PP@^rt zfijh{KEgT2GpC+2#WJai5>K>Ds8MnOkcyy2QKn+VncdYbGacn(R76F$vIPRp?1b5@ zbvd=ct^sC0evhJBXTHco<;B*Vz7g#niH5FfP<%~c0R}(V>Nf~I!-j`<&0DZ7y5ou3 zDRRsEy3H#R3)W{V^~E%Ff!aa#l-o7xaXHk-=1||gRv1;(x8a2&MNYcZp8PaRKA#OD z6y0po19Nsf9VdG@6VSd3)Xo^YiD9oXYr&9ATV^-Uk*w6!!7B%UWwsYtPA|lJ{QRCp zy;)N~;{2X%#OrM7{GRv3$o%trdgj9l{G%AgZzG!2D4G;q0lX4P&?FX<%;S^QkWU$* z`I@Le-Jjnx3Q#%}#J21oJijM#`-ox7~TyXByU#JEAvLoeobJWpGq9?wn=JX7S>H#LfAjJJeMO8zxI0d|u zm@d>j3t;P5gen^k;B5Y+~wiL_$(@ z=R9aC^9Q_V=SrmR&kY|^Hk%5Dr9d(zxo?C2#Tbshk|&jA&cJ*2I!5|B{cl4EsR?R+ z0<=}H7cs$y>i>QOD*K%el|Vwg1)2|cpPKIV5Z{NI`Q>{8j znV9>{@!3-mm2_UNd9MeD4#}6Z$!%WdG$+0k9Mh6DxNIfmjp~qLy$%4d9B3AaDL4Ws#`tkMq~1z`w79v0S83;{_uFd z29R~5_w~o~>5}`a!6EiKcQd3vk(hNz&(EZ=7n!?ue~03)wi6ZEQ;ERtH-4jr!x$I? zXH`-T78qix*}Od!ga8;z9GLP(;F~AZakz>Gz?X2>m25h0`wA6}$%nF%4=0=%ujjuA zvST_C8H0G;b-epux$F2M6b@^ncYbsLwLcS_rSNanGWCuRoR=rvIWkvDAA`f2 z5O2q*X2Xf!Zq81FGMZ(;0EF~U<{1swG2EnKTBrFd!sgx(;4}@hIW;GK4Fq8L#!&h- z3?uGxdA6F1G_9QQXWF!1;Au|MgfUDwE(CPAhK2fp8WyhhVAxq@c9Q2N^L&TCFUFIL z?B9HYcXvKj&wPfk*@h9w@UA@9m^T>tDu0&ahgYJt@?|l8kgu8f<}l(PHuWLC{56!? zz=&xYQAF?uQGRow2w}Oo9tq^*aN%pYIhaq}XZG@LdFDLLB)pxf;XEQ3O9cB`1SJ;1 z=bOm)a+BrLJ9Y{Y^wmrvh#duPoq3MwxFeg0z9SCpW1G(f-x`zExKgISZR4hEo+idU zXX6$rTp{Bsk#3q0VIIK`{q}wGTy7?!IA)G5+i&E##!O}8FTcP~nm;G<=T63X79hi? zJHLpyOEfI%R;Dyt7(YzIjSL^D;bw*hAZ&Ueli5tJ_TbL}d@W?cPw+!=_YCFOgjDjJ zAk@RKa*!6Z_aMxK*O7p@W}?4E)7fn>3p8Ft@HoRxt9cktbH65Vn#25x8IW)X6K>T6 z9yMV~6_ZrX1dVSdg>f3LV)z^li{Q&OT+H|p8Wt{xFbo2N@HB_;=Rk>BWA^6LigD$a zx$xEm+LwZPOPE_@cE$_E@}lxoL>g@b_!bcq!4r6z&+N+se0kTt{E;uO;)gu^4S7IO zBvPRiMN+3BOfsz~#v8kTZZkp7;GfM4ii5u6;#xOpQ0j@Ag__%lWf zV77jYMYg^0WVu*A8ufPey%*oD)pwA0cfQE=Z^ZB*ewgWAJoOqiTc7o0@i}Ets%^ez zZD9OA5N57Rk&C1+AgVth()>Z8B!?TyaU&D2=T9Yn#`EWL{1D4Ue3@(ue};|R!7{eB zJaPXe4W|T&V84Pq4AiiQ^<5+)Ua0P(@xtde8WxqB4;WJWQp+T9Sq)1Syscru_cFrf zMf@#0JqYjW$$DRI$fXZZ*cP(ZSHn`49W^X0=Ob*|fWW-TpRbVJ z^k98`!k@L4!Wuk5L2|qzaqCPw;ioD&L3=&IEW_g=;IYK_XN%>Jp@cFf)G@br?2asG zdc78A0)Wg_n$U3v5`JbA&Sq|iYP4WmXHF*IC6_{4D9bpjvEYn??88sc0Ce9NfuD=P~a`S=4^(UxKS|* zmTNRDxi7Kl=i_P4(gb1UHjpq5e`@i=TTMkiP{0XFoS*BPaT;zQqoP0V{0_#KYkURc zM`&1_TgPXrTkG+JWQ2te5H@QxLP+-sA#K%ok;J!Jt|}sZOyipvzE8sv{|AJrytjwa zYY;Y*H9`nHu88WHd920{V)!n^GxMp4Q<08Q;Wse#29=JKW02-*W_-T@`e20~!`M@R z%Ut@26jLI;`&7bJOdO0v6u+F|0|?lriPemFm0?ubb_ygJ@-$q?^o_R4{suVHJH*u8 zill)+%LNGgQSmg3ZH~DLEL8rEa2iw|Ai$qBK}b!PgmuP~=US7|v~ps-&ZhlV;{?$G z!tN^57f%Xr7qV4uGbiyh!})U@vw+@LDv}nWJJ`nUM*!xBs~vSK9OcXHifbDJ*iO-R z5VI-b&nu*`141OXRLEj}26*}YI^s#^Hl@={I*SE%otbOX9_Q0`tH%-!@b^estC@kP zxr0BHFZ&@Q1Y)^422b+`Mz;3c;pc557x2nWWX1eX0jaQk$bNLk=FN`Gk}hPjS@rSy z&Gt(vy)SqiKrsa7BRk+$WIKT9y<<;tQSLVY4aS;??j^_o;rP(YN<+GWDXE8b<|fTsNSsF^ z&Wx9gYt6V&!d^@`lexWP+ipQ=_RI=a;r4g}g5pt{K}vXthG!A$J{lg+a36%t4)`H! z2U6;bC>P{@8c#-kuKY^Q`w?f_5W(7C9ZI;B39ZcO9a~O*TqFF5lpa$+kgiAg2@TL{9s5EZxI9C-?9W1(=2v){ z)A)0hFtEz>7Kr5rEJ^O8n0UC-JDc=ATFZo1vo{lV*Mt+8@DdYrk<2VYZu1|YG`k7j zHRdtFx7<9)r)@~yw1s}fK3`ylrTlrqm#HAvLt(7wLUMu2@A5$6n&Y6 zXSN+bnA6SfRilwB?Q>82d=t;?5DQf4DNsJp5N&0hW`0lNTeG|4No3P&_4%bdTeGXD z>ihZfY|UPHv*thEWqfNkd8?-X*5+RWqQrASM$-?tP2c+Kj^BwO!_Rg)wL}0S!lX1B1_~<)#&2#g3y!K7pz31gG8r27jdu0VY;-Iys z-e1g*>GLu9jwPMXdZxYM72Mu~O>MY|v05)R=HzwjR5_#v*S_KmLB!|b+R|W2aZg~! zUMwKG9(%RqXsO_XnQt(mm-a@NCf$>Obyeg;?$m=LkCujk_Ifw@?;2U0?(w%tx zw%)Fe=RUl22;QiOw$D$fBj|K83mQq{1 z?OVq0gku1$M&LVCT$SaVO!-&fgRw_MIe0GRHMr}#wx6#T3{xvU|D0D`*Y?xHo1ho| zN>iiI7(EDdl1Zn$ExN#0d82R+FPV1688f{NQ?(`UR1Qs963CpFLo4F1u-CtyyF_I< z_`RGg4e?0JxPCDO0f>n^Y z#f$rS#FYwiZ!A*4f?l80N2b(uu4peaJKq)~v?^B+>VwM1VT5 zs}w-e9zHj29gZa^F2Y0?ruIcTmJU`R9qZtEc?+=hM{(Hf6#~_&*7lkML5XT zKO=h4FuY`{ksrH;<&0?TBbGCw7gb(~)d7}&gcCj=x^f)&z*l`zeU+0^S_MD=MH87b z2s5uj2}3sxv%9f1if;b#Hcfo9`6G-Ctsw~*58(wAl$i#DXUjb?lzJ7ys;ikbqO0k| z$hPc@{EQ*cRjLa+x+zCA!m>hD{><9rXjtKpq%)}*1xVCBgSFbEJF+?Hb-s&niJvBi z&5T?oJAOxN&;I<` z8$ZBD6X=BRkBV-vO4@{CR~x|H6;8!5F^EWYV3~oWu3xY8A{j z;H^13g6pw0&khN>Su=$QC-J9>KiBZ*H~hJjKNs`oeEyuppVRT<`v<8K^-qz;4sGwQ zm%?+&Hw3+t9&YCXapp&Fd43dS{PAU~E00 z2$8+`osH1IR`bbQkoJSH2lF07kG_td)d;bXSizVV?8_27bv&|#`uO`L|6TPlJoEqW z`grv%t&elwgv@t?VVYAJ>iZggk_fRrN*OcMzWfZ&Sbd;NYx}YVfuWp74fpat{WZV0 zfP)ibIdOg5(Xp{fyB8Ir6`Sx&-9!>|g?mIyg*O6}-G-r0Lu(o4 zAYJU`1;oM?!1UHEC`MRZ!nxVnSIK@Ux@O3LXlXWaXuT*`cP4Z<;Fv)yh+T1XyZv3- ziOlbb1^d9dFrsVG2NEOF)2A!Xw{USIIzixyvM1A4#Q)^%=TNa9cBcBjxp8y0KVHq> zSy7j+!p~nBy6P4D+=URK?qrN-Uov=h@@HG9uU}vM-&kLpZu*h+g%20~`g(Qc|99)F z>J7iXzGqcEO`F=t(88DT^D#oKulE@9wtaaGPg`F+lvGcMjI}|Pv$n#YN&RPSfAfeodDxI7&P)K`$roo zOekn0Rev`6OVT1F_Nhi>u03dfUCQ^P+OKVS7?` zu3NBU0hepREWe18_Ug%1>K~6m1H~@j5%O#`KV^e;5P$ZQm^Ee(zU*ROw#T!J@qfzM z%qf7h_vm``!00w_`lLvzE&u(=oC0`sHR4COMI-UCX69fxC{*Ih6EVN$e3tUA9g=Dt znWu9ABMVYI+{w$o5B{LcyT1`)NI4@h-dUUSX13uC-}2_>RAiz}rvY@PEpG#6SElka zC48vOU|}v;gdnB~zA&p$iunFJ54$}xX`kPd$j(vDb+_7Iku+|#RXn>czh5hkk zHlZdlz{G??W75gAu@f2|(%dMhYs!%a$@-BKAid$U@Y)iGK8V{+x$O{V0Rq4}@M@OX z|FU`2*6OzlAmSh8=>KdpY{v}ol5a#JO8ziPp7c&eiTNGL3bJMQJ*Kd;5mt$rjEq6F zS+*QH<4QPE&i4e7%i%Zhtz;944S_sP3yAEQkvgH{i2(5h5OSoPmb7ypGo>K3+$Td# z#O6*Hak;ZeSicj*QIhPGCY>i(3HDeh0IJyKZYMpb&zn|CP5|ozd84b7j+|wMPxxj~ zqB(LUAZK|@yDm{DGJVOOx3B?H}i{z?+PI1r)93d-TKmU{;2*l5Y#gI*(I2 z^6htM)V&xmX;D@vSXp~W9#SI_uaB@%6q(jC$dk>u_sf0gN^Yui+c`v9w#MWw(kL9 zFX-Q`O~Dq21&6m*BbE1C>l8tBA>w?bS#yWy-Fzy>OL`BtwfjWo$Xssp`l7ji8_Qi} zb7hNJh9BQf%5e|-kWhEyyl-Ukri*c2BJ*!1MO#XYKLhw!S2dBD>BASvG=gm}RMK&Q4$^Uf88pnIF6xB(FH~q;l+`tAR8JQuv!7vmcfD{T z8KVrDYnGz}p~M4L1WR@f@&)5%)PP9>G`kE_7#Lq^$yg*aSTIO4lI_jOlIm9UG$!(A z>Z7?NM`!BRPI8ib&(qvn3yrUV8B+M$e5Ytf%qZ4x&p60Tb@bhc@AeD~8 z?5>cMmf2n)mz$AD2P@{g=Q(kDRet`&op3hc$UM$0`)120I}$0}C;}FiV%_L)&cnF# zh5g0I%DL`uUgWdYY_QLVZTw&GG;br)tme-v{P`DtNOmb-7TK4l`BJ+SKabg%2O0Ah z`*Igwe$C{1gecEC#?;uCiF_HwxFocPKU?tOolRPKa_wtISAoM6VCY=oRU0? zQ{LOkY?L_|xB*JtA?Q-+iFEjNj+n8n8r6~XYP7}8L1{zJd^sq5-W(-cd0f)Vzx#|W z`SQx2fh+iNPQ-ux(niSZIhEvIp^6@aIA0|$rYf9|pKxz=gv^g`;J72VWW{dT=rW#M z`7L^_%_4yOffM|tzumQ|dF$Stuo^VHpo;~ohk$@1=alzkziMQcE>7BM>3Yoyy;5{b zb^Szx(pe3!5Z^=h>787d#zD8X2NgK&>yefodQIVk^Xwohn3-74T<%twzKzV;H6c3X zY@MtbUQke0@@^tS4D8K7y=4U@dDF8jbf|Tb^YgsZ`W45QpeK;H^Sj3Q67(dY@n-av zrwT+S7eY4tOQhXCybyQ*&WrEpiJO5EwTB_StmO4!3*npcVAmb#9vz7(wRpkT>Q~>$ z4o*cGeoEPDC7t<6pkZfHiQ`5qxu*(WmHXF9v6qu}_e?wIwfMJe1!CKuY0i0VY3Hkv zgEyxKykm>Ni%PKMFDnRX@0fprdS3fg^i$sCwxlMH^F^d z^43NI>K>9(3p+#-n{TvySPsAokmLCIVkF}`-u^76GkcJ{WroO`N3+7Gz7&DEY8?5 z$DThFz56ncS`2HqMez$J#l$!F%!Q(#;!PL?Ymr6MfZ`Bx;$FkO6jA(X7SW1zrigX; zB9c%ZVj>iP>}EPG`1@_qKylC0R`R@`nHN@zaAvcb%=6$C^&?g0dQ8@Kt1UZQ8pf;g z8z!)^(Pd2E3WLSVE78R88=;F>&1lo^hWq73Xru9NZeyf(>`@nBE7GZ?gVx}B0cbT> z(@%rHg8@VS_7emczsqxz*-4(O%t->j*=%LgkCuFE%;9{xVrJ%}C5mfr#DyJbDc2W& zjQ{-fH!=VDr@Q{ge`fy2fBwI~f1b3Edj5Zx|J-^$otriV|4sh0=QAYnBmC#{bhPkK zi{;tbf8GLKx9mU1T=gUT=l-l__|3D0%qr6d@8%RnddF@$8~$@A(t-ax+VXQap7fs| zFHrv8!;|U%mggq3Ql6{KwgQj++cy0hY+)tb20pF-d_dBcn|BcRU-F-iVj)&{OxgbD zIn)2gfBujE4E2BiAM1Z^JC}O?f0zGU`qY2B|6DTXNBYlO=xE_Tr^vIj|2zqM`M2yp zi!S>S{`38(>DE@6J%!9F^9tV04y=3c*r(2b|6B=~(C>XSPx;w^C;jKc03?5Z5d>?@ zUGm&y?v&>$^Sr=sHdAc+T*Wa^4L6;M+KhBqPw<87Vg$?Xrg1*;=I&d`z= zZhJujmI*ilN_i(#_&11Q?x_w{A{+aD#u2f1CaxGdp}5TX8nsm|!>d(T{KbtjJb90s z*k$nudvlq_S6q_D6mM=dIKo?nWSTw(k1Y4MkFB@IDEWLPGPlX9Nn1pQS0he#ET{~A zxTr@WgO4&&)Iuhxq_8RXS(!`V#$g{A^Rk>jaz)n4TaVn9{O2WYI?Gh%?iTC>e~{50 z@76vOBiiT^&s0srw!$th*1njDagN`a)X=@K!(M1~z0SdwTvt4ODw#Ii8> z6^p?X6iqiY1r^L6$H?bs`dn~QGhixja0~Rc1!VHF6QO=%>r>u|kYUE>-OdakhC)wd zjsbJn2%K`S4Np}dnX*b`1|T`*9zR#FB%Dd%@WeGd%timeY%8{?7l1=XKtL*OoT0lu zBowRliOh$vXe?bi%Tk+=3yMD|$2abQp29V|TnxC1>g?a?5M(#bF)QLe;Vz`5-QH!K z$ts6sVNNE*K_YW2(#yP)%FDc~OUvB;wg^0n*_|$R=*%96BG5@dY5ZEny`Ea4C9p{9 zKniZto0N=G|A@9cw2b!;LhkrNfmR#Leci=Ww=xeG5GHEx5!3L$c(?;em}5)ILM6@$ zr?bTYzR^90RRpp(5eh^{YSa2u$t#J>FM%u|^DBUrIVRDkc z^QQ-X5X*9zFP~tezuA0>rw{qAhFKgl9oE8ykC#H}ixB48i*o_HBU{PRi~0%f=BIC< zLij!+<=)wWqAcAiFZ&Ya(R?B|0ExMlSkdJ;3{=czc!9WQf$Tyy=H?;sbP}O6aVxUx zxPmlH0N0rkqS)AkpLO^F;&J47e~WWZ3)P2EJMiZ@K(T|uBXo&qB4bkyy zs9zB3AB2925Ian*)q{f25kY835IQCZl?I^^K`0r7(m|*^2%Q#$#sr~rf>1>ex;O}p z3qn@}q47azLJ+D7LN^4V>L64bgzAFOtwE?h2>m_?H3XqQ1);_u^w%IXD+v892sH(v zCxcLP5PBvEwFIGOgV3@d^kNWN5rke1LaT$&TS2IeAyi>&^#?)71ffrY(E1>>F$i@8 zq5lM-UN`yp3J}_`0Us4s7fXmV`URoW5aMpG*6J}f1jW}AT0?6NFa8oH;N@RA45s4F zlw{em3)g9_q`83G(YoAcbqh0NqZ90>wYonRE$AVW16Q!|z(y{b6`XM3YJZUfPePvq z_+<*;L8}eJ_X)v!4aAR|CQA8m8ivTjZVjK=3{nA^RpkL!02KIv%}Qc@oW#8$*w*S~ zz)~A}1S~=v{-Ovxk3Q5QuU7cRc##i^!1o6{7=efy;dp_+C=TEL3&eZ*_Z%VBA&yC^ zqehxVTYp|vX{Y+YO=u4@uYx4zl$Y9yfyn5uyUH*@GmuJMy#NISh7q14r><_cI4VMD zSdNfyF^$FhKzh2oB#8 z!C?`C!*c{L{{ezM!vqC5AlRv2xU!7W(ALL?nm3*V+)@BXhpmkSc4{m zy0RMDjkmWH%@dZam}QJyL#~5Q%6kEdH^4bHDh2+3uI!+h@F=`h7^yoz9QOTkX*Y%`Y+-p!nst&^E6K&{)SyI?K zl83?*TO89>VN4TIgNnkL#|$>Rd2OFfl#y0XNJhtLW^pts0ce1<0^ay>Aj;me>};Lg zNF8%B4X9xA)MLaZen0Ka8V{*Nj-2li;Am(>{Xrd*R12?Rmb|TwDB!Y_0iiV0Zlgfp z-i5dtJsg5Mq*1Uc{p3Jd!^S@0c$ON?!lI?UrWGtmYxOcbzyxYMJO)it9; zR0L|@s%O%JmJH+nf#@DVEh{ZCNir5!FefUAv^x0OQ56A5`*^#y`Acf zq$`MJqcF8|N*?+iEvfLwv{EHuO^Aqhk4w)}aOkX;z z_VYyM5(pdHJcqn&uZ5bvxU}~5M5Y|kaH?_wL=5SfKa3P#4t0gJ0MTD0uG$C3Kgg#} zi5|fL#y}9CWgtjl)`US z0yB>SM<-~P=YWBdz=Rsy!X49lCY#&DZmi)vaEuzxoJI~Gqj%ih4m@Hsv&m%07I5SR z8BJs=1>$SGlb5)`{Dgeq2zw-f_gPk8S^*t_*?d|GC~rmub=!g22) z(O)S}>{!u;_p8u&byJ8 z*O^&}S_p;)^NuH0-&P2SH$p_3q?sY8LmCCCuiQ-?)TCJBah<}et<1_pX`t8y1b(O} zdK95S&~_t*sOYTMP@urfL*Te=uN{#C#w>${3Ty>hz&~wdYD?5sZkb72iEeVd<~XKh zyKG>{Xd?3~Kv2P*o5@SuR)$OJ&B86Vr*>m22MAP08eLcScA`xVtSA6levK09xcJkX z-&4nYYnd&z;7-#zv5gEqbaJLOMtVWq+Z;x=KpL(@=II==l=EuReL|~E5Tyoe#Mdy) zX{|e1vSGpwSc~KRl0Uf{Z88^Ca3A%%k(#<%P>NSe;?6&aB{^`tRFd?9BCw|r*319s z!1%bWklFk$RkLOnf0#SC1VgtMDLIA0x2OVTyEk5Q5i!Jv-^epl^H7oM>IEnu^SN-@ z{Q&Ng2swcc$n9>)bvL9chnj&^B`d%ISxiMhhq@=bhi27bT)jXCWcT<1vhNQGkrm*8 z>=!>k_EF7h$qIBpcF!Lm`@1k%0S?G^Sh8W`P7AnduMaq{m^9pKg^!Nv_9Z;}QG~du zy<%IwN$akPnsaTyHQE;aJcU4SBfAFz~D)3d}qT9Ce4~Ibfh9Frnz- zzG82n0}=Rxhg+YRvf>C5+hk@nbUonvN_q?LcIoBFY$DSp5LtLR@{{g?K|rVr{X9~U zBpipUA=evdB82OS{GsUXABYwMJ+2o+1jME_m+SpHlEv>6NM*QBXw_IR$93B$+E7y8 zJ7KK%wbof2Su%`lo>rY=!Eg_GEC%z@j+@X1yhqO)^kj-OuQWVPjntv9qD@8AEBkc< zzhj);prgHd=#F{r&*9Nt(-ua1<)WT53#f5k!x(vsc2)=foC5S4V1aNm0w30(J|U<> z8U?Gbp>@y>T0mT>KjEm$0%2YP)gs#FjjxWk*8AuDCNA`=CRnn@Q~_yI9BEzzl%z2k zfn#?*KL~lEL%ib-*VcEt8C?(Yh5=P~Lp*Yu$Q&ZX>=5t7>Ua~pGm>?DcVbgu58mJ%H%{`nc}y!CfM)MKt=TN9D$K3}|(NL5suoHoRa!1{MBC{+6bx5ONRTD^i^y|~+tJo2IuGGS z)h9C3B^l$lQ4Mixn1Uq4H$q0UtkqiI7x=qN2!fHDZxf^HyEBLn3F&u4JxUn{p)wB} z1Tp0dqPiP{XbdBpr*+~R1oMOqVj-j!oI4=P`oUo9M~&Oo9vUCFy@r_o(zxxRPN@EX z7}x*~T=BAv0ythO+&2^u_$8XPwn6Y89JW10chM6mcM?YR(G zYiTn<0$V_{p0kCvpQb!mLDQ7LsLrHmWIZ3)3ZOR5PT~-N@{E z0a-62vr}eaSSB5ou7_o35Z1Sb*&=6mno8YTgAU73vRsFy1#j#D`5hL2SVrr&$aGky z6(fl3uq=cpG%TCobvG<~o+@rzdMAma;5x6;Jr)9SXBN>xI1O@7nVV^}?jlgHtqIJN zK!mBZJ6jhFYPeXJ^mc|n?^T{$)YdEM9f3o?YR1dLW2S;=yqY$-*eO$QwGln1W(M&U zDbsOy+*6TkDiU8;Xo7M+_us8LHv3urmyn0;?1evqwPp&VhoBY+bz%*xef7 z2B>lfcNVy>v$(q(IX;`q0vaFJ)dv~6S!Zm>Wg!0_gU0ngK;yDp7SL#~s}C}CL*uwy z2GVE;jfp=%qcE2RG}hPE`x(Mr*egY#s^{*g^+|7DwV;iT|I&a;d-t`8i>x7p9Zabm z^@#heeF2Z_=dd4d>-co&4kUO}JGXveSx78of(U3xUzr(iX8Y!Mv(kWoo!TK}N-IZBKSHT&j( z)7~R(WU#enH6B#9xZ-xuoLXxLWp%MAz+4r_{EFC~ufN96L%li}hmj<)|6bc$(W}LY zDf#_am$?6=PV}l1t0#d~nHlZe>`o=eiOdmF7*ANi$^zozI<*HNtWJ@m&I*-J6yA(t zRC5~M(G|?y^+rEH5zAN+t}I=rovF+xQw#oRY(}ZXbRLa0Aumo?#qNOx1PS*wmqNJM zZ~0#zf87B|b}|0CUIhGKT~|wWjr4Ftqp!(h)$O`kX?Xkj8Z>?!{Ar%F z%A!(ne;QT%oskuaUjfA**Nx(TO=qCk;a4l5_=SOb*2UHH)f_Mszk|BcTC*OHt`z?< z0=L!>$|^puC{;Gb#4+EJ!;Cz6c5y+z&Y!S%a;`t*^3}fYzQojOCS^qJJ286d7Sx{n zV&}K2y)fcTm7>I4>3%u2CqJFletT8>ia_m&xjVJ*fdWG9anpdW_RJSm`|57g{-pt; z_7zZjf`rxn8BzOE>OCp{?~Si!LyFOLvHL`>(ufDB&fMomEU{hN3DvI=107G~##c#> zug=8ysysZtiYohioO)7qgrcv|jst5as_49P6eYJKs-Um?H{^!CSK(%FCU>Lqx8-O+ z<*T6b9X)*DhPYy0ngfQ)7jo67wWh%8XWZA1AaH99p+KJf6%$}Ck7M2^hZ%W_Y#v|b zJM|z+_9l<5=9Vd<@|_s{tk`<``9>%OodQhxl3#4000h)e7;$cvqQq2w0U*@V-%Nfw ztNcSm;n*cGV#QcXx(;2BwWboEJEJ;}gmvKI{Gm~17Cy6MDNMfI+#)fZM_0d)?7yqx z(8_s=P#9A!wLOo}VCtZ1h=glgpUE26XFY>2opuKXe=vn@^AwENptsf5i2w$+6##-O zc$im2F9w~;)mD7ElRi)B0XM=CGJ*z>s(5Ag7ib1~2S^Wq~ zV*BX{da80-VIbD8IkfeXg-N%nIORMaJ?-xUO)N^fV{nvLEOEIEijzg@lC{@tpTt4# z#rW>0eH!J912Aikg2~WzIjik8C8g9T)&P61p4&!|I5qO%lFLo(iGw}deGynS_TcPA z_U0AyYY5aj^Po45x0DWQgr5Ij>?ImHIn*$5C0t<{MHd`EE%Tqw_Z);;tN8bRgu1gmJY&j0H0dn0di>R#M+! zlM^a##;0qm$fp>X0#@TXFU#L4Ri#V#ZnO9z9Fa8ypyK3_IWWd+!l2dk?nzt~g1xmY z*MRTM@ax&E=)6xj--*oU<$tnueC2;UZ4BSzF@HG2enM!T^o1-lW8yFeN0?#nD$2~9 z>x7y5c)stYX8rQ6CI^;8RUC9p7_^$>mMDqQLSnv*DbKu_XW0ca?Saf5%;8A8v+A|L zjd4mda)n8|jT}$og7?ZxvMbT*)kAc5e115pQkd7^ui)zkNmH|e&|{q@`cOOHq&CLg2b~&_4_Ft@ zYtnL*{zTfZa&df^P~`n8_&N~nh3$gGrWx9IVfsjXA$U6cee@9PS2e8&Uk_q#BbwI2 zG%jv+5J)X_jB;j^-Pp>33QV(^=TTeA194dfv2?b+a6+DR0? z>!vr8p;ZU#$t~!$eEXkW(703;rf&L7+v)`kP!G|_P`&h%rc0C16FV5_xzZf}i2uu- z*}~0{B=j{`UB!|`m#m*Bh_|2`xyzj6;&dJq1d_@gAL&TfL1(=Jqo`6kk_0-~gpC4E z#?+^1Lm=$5fFT~J`r!`0syM$3b|SyOt76CaU_~;>44$-+*ox!WZUu0eHx^C%dBySG z*f^%SzrjGTz4FrR-%&V!KZKWm-M722J5j(TQe1J+NwzQbArnYfE=Lx{x$Lnc0V<2g zW$ka+C;VrB5SoBKVRUQEzXfnoCko?1LHmUB_hg^&609xY{08FLyn-yxiI?Yl!GtT$ zc!k@q(LaLp`p5ZtS%7}mpL~6CrMSl;IJi{FQhXOi3sOZBKP1P^!vhik6C-v|q2Hyd zyVoV-4*cnov4*%%tRXHEYlw@)42^Ye;d-;}^DU^r6rA}zT>XyjB5aheidI8ra z+A?o7Yxb_|FnB|&iEy07feFOvF0-WDED*4zqm%h71UP!oNXHH#;A(~709j^Jp~?GQ zE*d1H!`}T#=JWE~H|+vi${9atMmr=#&}v4V&nV7Exta=YevyR@DV> zn?-iARSJVkg$VOD`BLGnfCUW;U|u+1b9qhx6e8s6SV?xjWiSIj)Q<|mh=am3Ao{Ibwo;63gFOqb^cPKd*lA4KkUL*;!?B{+)xBG zn^%zGr@AWJz)of3EgU0#bM%@EMf6eIcoi**55X-<)e4l8{yXOGOQebm<5ld1vbxPH zsFtcnZK9!3`x+Alu2zT~k?+mVm592Tsv&WS?9A+5{^~Dc3O<}OZ~fz({aXx5zip+aPSBenbZ38LMr zVLXoX$$ya-bSfqc|@;+JSr|myFuP zr9y92UYPwSYLoMu|5(4pdOTP1ZlNBJ5CE;m`4?@W9{UPlr+Vxm(X7W!g9$NFp;rAZN~^!Jxm!P8ybE z_|WbPB*yh&9$h2BnzbFt0Ui10z!jpoauyA4`6BrUPQ&AIPodNBz+2FPzG4H;I-E1W+Pj&GvY5dP-N4X6 z3|-AoUxvmp)Q6$HnPVGe)0BuAFPaZjGjVZWNf@)mJQMpSe(<8mZzE-%V9vrmhoO!r|kfah-ZnZVO*%VO-p z0_90`tLcD1xH7+rNUJcHqKGBK7v7x3pC{P+|LmHIL$j~T&u zd~5a}*6k+z@Z~TC=3JI}A8`Tok5E#j{$d3}KCjCe%@;G9(LS%u6w`fzvsLT=D^NE7 z;Lj)gd5}Nv@n=4TSw&Dxf>A1eLj=ObWT8>RD;PcvFf3kLp6kpA&C|fNAvSK1#+5U! zUnqSH@bGXp>N{{t@kU78I`f|kB~I`R3Z;L5c-G)v8rMRcD->4p_hLNihZ zCq>Befrj8~)3EUKs)j3wr%v(IG5n0=TVtNo^a94s(y-7xM`6b>zFy<&8LriE6~k2; zp2hGL8WtWaG%R>d({LkUk{T90I0j*}8-!&}<aMfe^n{g zq^&dWBaH=qQ=T}#PoC?{3pRdl;>J=C4t7_hcc>==PEOXk)!R)$$S~b z7t|7&`sRFvKAZ^y`O}|2`zwej$!-h-VFFJYVQ>4sSu0B9_65S`?-tibnjnS#HxpiC z1TehBmu37}A~9>sJU*?#Kc-=E1ougLtGQd_3drj1HtuGP<2cexL>T)j%r$tLLqXpR zAQSF^-SW065!rTbCXWwQ^n;{SgPQuev@rU|D{kt4bqP&Qa=SgI%IfF01 z=8%yFT#tC`=nB4afy z@;nP+D(y}-&$ja1Y)-KBj<(MY=cwR5#*_TMXP^JJ&y{?l?Is^f5vGtAXq+h_f5&SYEqeT}U*o#LFHdJlGxbOzP>>p$ zKdU?o00sa_w^?>3xf%L}5JV#LCVTTfRFmO+{($c>s83|xkQCjHwCqA*tP#1*F=VXT zXZbc%T>Dn-i3ktvI7d1>T(2CJBQb@;Oq;WVE**zno{LS z0WVE;Au{3V+xK+-)C#wXP*!ZG0B80*L}U-g5AFJL&;yk#fpwM@zrTQCH}T~#SDqfw zZmt3*yiwO8Rd#il_V_wWxb`X3ok_4*T+s@vSNpZ7Em7mPF~UmKk7A`V$I3DlLnl_A zifGU3Kn%DOGjpm~Vgn`aK!3CQQ@4T|D~d`2{Xc*lBp}M_kz_SLJ5_U4M}<&X7Vy?2 z8@f7e6stTcfM~8G4qH8sA)H;v?E}^$zN9Xk1k550H!?h1!=f<{X_&<~GZ7A70K)s{ zJ{*lHfn?1m1!nnQftfQh=XNaMS1ZqDC;c)8&n?3a4vUJN^V+MnrLtFI&eltSZxc|O zozGx5Qtr3=5T~;j<2vffKGBJw$^l8|sKG7gE+JejsSoDCdVIV~-nmJq_u!T#=6)EF zX~fTurT=?4nEylii?TWT2ata8{|5b?|Ig{)6rz7WEWCKh0S8+ zyih0CAEi3sJ=LSLbxi2xkb?u*kO*%L6^d)Rd zEESinpV+BK#4RQMS!MSU)VrgUpt5;1x^=5gFg?xWw6oavr`}B!i14>%S)2jNbA;0~ zP=c>`IA7AagU~_VVb#KqY--AV?y#+7g7|Pf=WnZ11HSU%@wLZ6a#yI=Ouy%NEqIti zPeG>zyRI08V~ZrEHR%m&O!wGC^hxKsMvEcZ>LfBNw?bX?Jf>e+~0{F zg9`9U_)7o~JT9PXK}K`|YYvCSrF4QMPUwe~0u?(J$a|!_#kHrfysWIprZNY&pQG4; zLUV1D@-C$56lAXiC-j45n3Ml$+WEK7=m;QJy@z^=?a|1VSl(d?yx+Fu^$7<>%eQ@w zCsuZL2SGmqCF`*{+XF9V=r7=@MnP)4isF0hZ}G1Ik5+docM14b$hMH4$gG3(F!V_C zQt~u6G+;#DUe12>TJydmdMK{^IOTW)?A8by}utC{#O<=c;~f$Q5KrcK>XFvHnvgj@%U6c zQZZ*R1$yWWFHB))#iJ|~^ z@&al_3W_i+U}Yy^1;^szdK@~Hz>{P_Iq8nZCMiyvEh!TQNqJ$s!l zE%>U0iCD=9Qdf#>{0NHdcLX9$Krf8UalR9${mfx zs+AM^B+)&YaIyJcw0#MDRmBx|0z~817gQ9t8WlCT2G`)4$o8Hlf}(qFW6xH$NdSX1CQ>(baIt%MdgbpVn$*s5cNF@e=dj@gW-Y9W z^6l}|^;i>J8Lw^2HCK#)R1B4dj3oisF1q!Io44&>TGy7;v_W zrQ%H4h*h23=|~m(ZWJ<(lZ@?VJznCA`1K|J*-7XGF5ZC9T4qT?YFIU{Tj*>T;$NF? z5`UbCzOaba*ymhCa&qb&yz@De1n_LbyY_e9_@LZfSb*Y{K|lYepFGk{Q+fAcmk3U& zc7c{^qMSE&^A6mHUya&=O1ESTDp}#)--?IGmzy^axDW&jg>P|e;{$?r@iv2x8|i0> z! zOw*gHNz`sa7jiy^Bp_EAkb?3^`K)nGIKu}u#geZBSv)m0AGva;oC~2=ie0s9aIv<3 zm6G2#JOi}Aqd?z?If257pW#{AWwsXqBu&87{B=JQ`&z-tu2T?OfvvMm_RNyc&ps@N z3rmj?PEDd6WmA?${ftjYJ{QMl;hCtV&)Su|2r%hNKqzZp@)q$#`jWJBS&|;~C52_& zPR^3yC@Y%0guFrPk`Kyze8;bD5G{#!+zGa#h)_7sp%8MW$MFvBN zK~f_M0agT2=*S`p6ugSml|?KYI+Nro!honR4_6F=N%)kVG5*6-f^of1eZ4g$$R)Tu`{F?vnYSUv`EHbNVILueTk66NSjMzgE||Q&aUIK+cM`4V-YY%F0Ygk zpW62#2~&$YharnMzQDvD^9^~^{E7WzYL<3>A@6;;$Q-&P`bO!}Ej~_a7W+rh`A7{e zBSB}_K+1I!`;d^x+HOqxFWX;%bLO6tScvo%Bnxp2Bd6GGSK?`^1On^-_Bq}@$KXjc`OK;9W{yLgIhp{e#cTz$4swn#WzFoV z@xrXPDF4X3jmC+vEFmn0FS%<`cjv{G-+gO)Z`!3&Wrn6n{&SUo|Gr0I(GpY|~ zyA>0s<<7h+B&dmBMY+cOU%&0)4u4vKudG%k+WQUC4>og{nzQICnaX3?-%lRIqoIsQVAjuR zjZXTBmN_F^w~E9b{c{RSq%*ByRrp2S)%ZSb zDcT3^CXTndzG&e%TXAbB4|)hTAlgv@+M$qaemKR1!S_>{-|y*G0NkAX(bYxs3E(a%us4m`tCXy{9IZn|yhGZAw{O3?54GWHGw{>G|V!a14Dd24}{WJ0D|kJfGD~?}8&lQQ_f7)KtdBZr)#kpu&B2P!n>Cs)qD5i;n8x*mK1JAf}Zb_?Z9ogGFmu z!BVJm;kQ^5yaM#6l7vHx=`-O6Gnuyqrb<1(#Lv2uWIbz4+&!r>?%psyeGcV#IvmdT z@zlw1qeGKEAG4D>_T=%gI^Gn>*~`lGS<>T(+8G_p`qmAqVd0_CZD|wVeMlvB!}u1c z0P@`A&;1VlX7V838Q)-u6MzF_BuN;9PZx#`iZwFX zZOK8-@)T~mNy~kl=4Nsno7>CX@nBCB!-veuHMls~6Z-Bm=h5Rs<6zG>s8M-pOl86y zT|?p(&bXR3z0w9zO^njl;A_sGlVGZPJQ&erL7zfYPWjN4llPNxM0!a_GDqwY-ZqBq z>EBLSji#IgAbS^1S62={-Fk9$&9~dc>i!54HQ)Bb{T33)oBZn` ztu^0ng-dRQ_Q$a#4Ynte2-=UCi`jw8NBxL&QKdOUUh2FC@{hr89TL%Bebsq7G}M3J z7%Rv-=xVNWz07g^J;!~si8nvs=Cp#m9fSB;*L}OG^9CSd{C8LR@a`nUUxauMKhxh2 z($BjV=_jS7cjfQk(K~qPH=8>5M!MMn9kDqC-I5v1pF#ZDlRvxiX8?Za*8FoVC&;xE zvV5SAgO4-|4-%>04-%gSUupiF9nn95m0ir=iG2lrmNbD0rWxqG?(bQdIA3iZBgjMi zna`j5@WXlYbB^Y7wYeSdjgjvoH)37c`#$n~QPN~JmzteqrK~vTRei)syoEe&o+`$M z@JR?bv#t;rmuJjzw?^_qoM47Ekeu$353SiAlD^$+aO7z=Sh>6JXWf`+;CYHkSjt+1 z?*USm_R6d;b-KDmsDJ_v&JKRC&NAZHnDkbRpCO5Y|uUcI*<30QAM z5QU0FPvev+xOL=waL62vI9}={rf)sFelrH(a>%!KF`3?HVwvO8L5tn?0W;pb zt_hqR^5#?MDIkw)cpMEK2|7o6vj6FNB$#7_*lM%AC5qd2Nc36~Z8?$!S#8#o;E7X6 zprR{vIAu}F?db5X`@r~1>keH=xrikjd9K=RVS@~5O}H~zq=Ysbb09b#F$~7#e8xAm zODoYw;baE*MqWsgQlrwNa z;bw4Zk|`+*uv9Kk#B?>hSK9~Uf&oItW}GhPT2qzcorGr#{E|5LF)^0xE62#6VKF#? z6icqf*+1Aq^l7Y3(Diyf?-r~UOpQ!H_ZOJV3FtnME>08(-t@VGCB)#~-ar{gd+=`00VKRz5S40U0=hEY&>T)=frDF`K6$%Bn4u1?<7zu3 z$#j{H{^ubSRE@?ZU5MrySOxA%f`rKhIw}p_*6Nk{m(e{g2i1icL%uS}A%cfp+lF}T zpw_ti)FN6yRxC93{xvdrD6kWNAhG1R0jBby>nCFc+(d+K52>Mk8r7YYYX(c1MS5j} z913g;3F+=a3Ch~*^oD61&W7B#QXGGTz`NBM(*m>fR6g~=m$pKrA*-@Cg~4A?ID)R1;pJQzm{4Rq#hjy8Ut~ z1ix6y+=Jlffahs~p98Pq#|eIh!#!S3!iypBr44>LuG2vR1it_zAoxOwf`11N#%1>S ze>p$|zZFH?Ji#YqY{nQKBE{{Kc`M-1-moOgoYv%nC1mL{9ve+jmz;0t^&g8v*KRq&-zLoHsmEVe z^iu_YSD`CRhTz|>3KSOnWmyEj1BaSJf=`fG@_9MwESRzi{&zyDmckWhkAJk3xd*{7 z0MF9|zZ4mNoZt^kH>3jV|Ui{R(bJZzS$ zL&(^S(U4vUeqqqxs(}M1b%R{Fa~~5iTSyg2J;P$C;P0N-yWq=J0Y4=8p8=}~KJlgt z{yjY2?B33KDhNIc48dnBWrgI+uhp>78x4J}VDn3ETmq!Rqh z2BXaIllYm9#WG~EO>o*ED3@v#r@`G{XSi)GTYy!m)38U%Wky`Wi2?k{G)8Hk&~yQa z1)ZAmp_?Y}mA(jkm(+e3ii)o2!F@0XkHI88Pi11B_jSNiNTAZ!8?^pVPU%3*8v^ue*6~ti@c1@}YAhdpGWXL;O=3&lc?DW`2L&zV2#V z$KUw@7#(b=l68~wc_#tq$R{?+ducb`xdLn72V;t-b2i4w6C1hEiMun`V!g2h1Ta)B zl}C7_WX7@fxb}`II!c1vaVP~IF_+gZ$CAH5we7io4DV{Bk3Io$)DAl?43CFiAq!nQ zrqvNnWPGVT5^~GP77L2y&g&>CNn`U5J0ElGe$q0wTqux~3zFk2-TBf^GE(at{-x#L zX4U%wbaI$)Vo#c{SM-^-Zf~o$T)BZt#=dS9d&SZ*b?>gP;Y`{t@kkWo9uS9 zJ+jsNe$cn0q0SLFN+)u&S@ph@W_z>R`-AdinIxiUJ4Zr8Hox9iQa(B5azy%(WSIh`Ho0L+Jbp%-$GKlwvU)Xgd+ zwzbA(Txu~pISqX!R31*pB<&$%!d=}a#=Q{ab zYM*29jALybC22!hPX8R+wK+X&7Ld4}6s<-9(D(7t`8{sQWZ8zGJmw%N)LQci3T#e9 z$(XXUjf@duYyRxZ9u`Yxivh;P_=UE9|FStg#nUv|$W?rKi$7nm1jtrzv)yPRnwj`0 zen{;y8+osNKO67n_x9yhLXNT!H}T~L{>-w74@L^hv#*6X&t@Ay9AoTDoH_bhh-3M3 z6n{>$I6C)K93SGz^6X->y@7YLt-P!^{g`7<Yd7!WY5p#W z>&0@pBHa2~jP5xJaJ+M+;y{USH~U!l_mIx`9q?{`CXqP*jwd%fe9*?{ z3!Qtk>+>jiwsj7}6H7e9b8@hs5g&NPeq*}@0!N^Vp>o{f=)8g@RT;yj@)o}|Ajg?i zB+B>@buv}V#fknL6}fqLEXNWA{H+S-3;!?_PeHkP_n;@>T116AkCW*<3}4wzKM~`Tu;m=cH0Sq&nuHIp zt2M@+x)FtPf(W42K>IuIZ-sZCNDVEM)xm`G?4TNCwfW*hzvcem?urR7MW~ELp5b<> z5HM5T$I?Cesgu6mDQCFj4g>ahN9Av4b!$NtwS4q z2hTE9P!@aSjhe-5z?>;qEyw1L?@3g)!ijtCZ-BcHfos# zw@qxugLGrv0cUo=H=EFJdOUZYBc(bJxU2Fm?CHxne(TQWZni|U& zbpi;O95Z*fu1)xMT#~D!-mE|@xgI61NIk3gc#hlLQS+p z{{f=h-(F@{3$VsOhHi0DY9&KP8Hl8Y)8 zOHM>I#9kN2kO^D&dNM3n*r95M7IsvhM-182$;ni2y_N58lLOe`KzAfD;SFIGBxy%R zJ21>2^}*t!un7y~c4w%x6Eg_czS;$y3@*V%4u~Byvem8u4}(^F9))MCF_YZfh&`y( z2x<0UJX{7a>_LgU7fZHxYI-YTb~f)^E>8t=C~3$66Qum|7E9YTbY5Y;8Rc zo>@@q$G`*Dnwfg3buGX)x7HUEb~9?d2jJ4%`uzJ)>;K?xVqWyzlts6*HMHHeZuZ(K zTYVSz3D@^+)V?Cc4eGp$HM>STfZ5#23hZW7mYbT=D|_K=R5ssMwq~;{TO*Bo#DHv- z<)l*3xZk1btZYp$mE~FH&8@5qKsKYYzXZ_ zGvjV@62+9)D(HDBT@6Z(mnKPakb$k5mn`NMsKr%y(;b_`WU)%2+=fc95aSs5p_PPp zw>$oSXfsT=#+^8Z7u(HbH>}&75QE*uNn*mrFS-f3$0^C7#g|`lk5e)jS|o#^h3?;! zT}R1aXps!|I3gwTZjqF=j&V8=At)lLfJbtZY0rvLLkY6XF<4 zUJ@ij_7vAq@xfjET;B8>Xm2d)+Ex@ZSx7Wt^eM?Nyu^*b1SulO;4VxL3yu%Os@qnI zm!>ZXjg$QMNSmc(2ocAJ6@j8nh8j4%p;>END@j*lgWZ*dVe$h3EQs7iIbw99L$Z2V zms9a@?j(dq!@XIJG|pym+8moWg}66O6-&-SR-V+yT%izfe#;C&_ENm@8{|D?=l#zW zsr_v3^1(O@c`vizY7Gy^qSs#4zr@l_eamBybn~-aGv7ydTaKf;IKRu5#~02D%eiy^ z9UVtuXc2d(8gtPzB#%jdel<6=YI6t1XZUa|hG^U3rh>FY-c?LrAh%%~?zQ1w*w1u# zzF;VM8OPq>Fb^OOcWN$>ln=@kNDK!7$!$&oiSeU?axm6#N-n>{CKt2UuEz$dvrx~9 zBvt}8EGj~6aLq7l5|5p_BA)w}$C1=Lks4QT;c)J(M5ds*yLn0TC7tC!Z|D2Bx}(>7 znri?lhitGEnDrj{OIfLu-qSo_YjkV403Jj#?~xxYm@@8Z-c_P`PxI&}2lRB0yr^7Q zS8f-o_oynW-uCU=`2e^g^DCaFg|n1P7E!PAR5AEC`G|A{KQaV@3xH%>IIqm6pq3B* z${e{B>SvWWjQU=wgKDnxE}OL6DFk3C^W}E<3q?tR(^B)8Vro_)8xZh5$S(!Nm5MoK zvSvn*p)R}bSn?T+&Ajc!cE(eBT?|;I!^|9@di)Rz;4bQ*;G|8k{9Oizs*b6JB2&0# z#ZxESYvkp?Dg=TA*G0$HvE*IIUXePg-1$`#gphUivMz$f=HE?mc7dB@pt0Pn7A^`M zoDIO-3FgYo0d-&btnHzH+)0XQ8xXt0!Y!~xIIXy1=+{%O=-h=I+p`80=4{zszvp*# zZnWRk*&>|>OHjB(3dxl7ggX*j5uzUlxE_N;eEdhgmvHcwn3AY%D*_$tGLtm|Icn>( zkEVJWW(5X+r@^nG@K zgyuYqzcxt1I+gT}sp7=oF3{vldG6A3_rd}g{QPpBg|LS2;9}u;vcMJ>bW0c<>aqf{ zBps{HfA_Kd=u^I=<9v~TEsag*S|=xozu1D^hAZ*B*#ie2bK!2V0}z~^x}>=qBV>f9 zoewNhN?I^pYcTB@Sa0vDra}NGQacXfnI_VA&hpQSxFcKTwk6tQ@B7-p-Ae0GNPa^U z?!jW_luMSbZ;f8E^fRm;!0ec}`_XU3C$+oFbHzma62E|GAc=6AcoT}a3-;7^T`RXe zOdvDdsRLpBcwb8vh>;dW|Aw)$5eOoQie~N;-bXR*BAEIUQ#KC=R_aFgM?lpT=_Jg38h ziQS1*KkT5z4RfKc{jbF4yqh?1IG7zyn1ckYws}_tgp-m|f~f`mtubkz(`UUW@pO5Eubso*}aW?$th46ol4wL?d`50Ku7my`G;Asdqvh_z-h!K?Z6xQ)4-FZG1IkfFQzGmbyM!w<>)5@}qvx=5a1n_tKi z>r*I@ontjyAWWDOEX+}OGB3-^q2n5s`$JKI)n*q9)L(#Bnyv6$!jY31P5Y(`igwt= zN9Wzx4#5HNf+x73nacxvwAxv%cu^rDA3kX%F`@;9kRyZIBJw$)~{f~T=OxG5VX z4Ef_F{+fwLNXfr8SA-G@Z3PB~GHxqzE6s4gu?db-ctKjsFb41jt?wbFtUb$SwLUw3aaK9~Z?VWWG6286jZ9FL_q4Dg3PcXE!gV~1NsX@s` zMB%aGV+wHIoQGvFJ0Fq5Qw1a7>nA^H1RU;VC7ip*&I@@|2YZ;tWAM7iy|lURfwYsX4uVxd z@QNTfDF{vug4IEAS`e%Wf^|W#Aqd_Y1RI0k?}Ol+Ab58WYzl&Z4ubQ8;DbSMK@fZ* z2rdeO&j!KPAoxNMTowdh4T39z;M+lPRSoZv5wwC|AVH~SQ4o7j5Ih$_p3g+(D{T-) za}I4{cy#ojLtA-|*h+1HI6j*x6CpL^e){3D>j9r|>YKix@s$RP=d}u4vLGGziNx(b zvrMoltopC`h9C3zvmSRe2-5kARPFYnQf?$qo2s}BfWR2%mXP-P!u7hsk`8qOq165Z zs>p+K7s*+cG?d)|@Dlpb$WiR&$ZKNF{NYx&+2@<}SoNDr!thtp=a1&G8m?qdI4uI? zj>cFg*^thes}+)5#9lVi15}fR^0qC+lE(pTVp;5JWjD{Q%fPA(3-R8nygMpp4;v0I zd9Exrw-qyu&CNNm39(tvmDR0?C1dqDIml32(iWTh95<)TYOTYiW8F!NUHfl|Gp}S& zIty=_Ayt{Q22+mi{RN*A;sX>_V8M_I_u)cBVrrpHXP3Dv(CG|#g|U(BMaI`;nR_b* z%LRKnuqzPELjo~i7esOyJUNr!4c5vc!z%AUp}(iQt!@x&6zpeMg*qit%3{pe?2p#Y z;PHHcX%^-V{@zsoXZScAW=@fVI{1qs6wugH@J70l`L= z;f?^c)euzp>N@NuY}1p6^hAKuEzh z5@S-Jc-tUN6i+z%6BX}-+N_GVKccO?LjNK~%F!f>mFR$|969HIS^{h;MN?rQCW7TQ z^5}@@-iO45DZ*7yCd)3U7=ink2mehbiC+1bB7|?6F8U;Vvig+KN&~?Ye6C%X1$`>o z0Gv1~g0hx9D_>`ZUJZE@N9Q}N9zlwT#Oe}QUA9=^!0F*m6Ezz0Q3F-N>jQiZl&FbK zf;$d5#zVb&(jI#!K-_s58^bU$;;D}P7Iw;2dl$OfWGB=VlVoIZpi4A#*a3GllV%l? z`;M(eNS9tfGIYtPE~$D)b?F{~m^nsuNdQHcR5dbn=@P)B-lK7e4YrXW zlLK8k1?i$oA{ReVm%h6xt1f+tXb4ui9wl!?Q{ik7%0qs^bb-y2&@8_6Lysy|k0@fW zT%tpA*hl3xWWNaL*)OOR@$@lgfB7TK)ohk0rR=lnmC<-Ff=y5_;UCoNr7uLiC^}_w zUB-wZ?{R}<+0)b^YS~(4G-TguSlC*r>PbTkcN(m!tx#3FJ*sLQ!l7i>52@O4XO3i4 z>!fzU)0fDkrMFc&5r>Joky2K5tvZuF*5i%P4L~nf=ZDO7$9XXf@c_)jx?n>VtrPn*b6AZY^xN) zKNm-}!b-7Wnt-TM%qGD|ayyf$3g1+u>b;gMlMCH+Tc$f|OMB(i@RHZz)-9#u*6gqW zN@KIy%Idz1C98o1j$O&p(%9U#x|iVBmDaVzk{2Qx>?(Jnw&f&5t4_=&yHqEd&^Te8 zXk=sID|tVt69f$D1o8ASBOtSz?ZW+SD$0{;5$^S*6TeImrdikromjZe*9lRDMV>iH zR{(E)1nyXU6Lu+aG0%D^Zr;w-m@tIk!RW}gW=`!}!xNnGYo9Jo8s!Cp9ogYn-$m)b zNl2(98?$|SK3aov)INP%KU82twils{ioff7_r3@_HvqX?Z;+fw&9zX*YM~ExJ)meK zScC)T5;$EIDg@&G1%zm?Cm_pYW!uM7|7)SPNTCQ!sB8)|hLG*`gcOkEd$BrMZ(oG& zgMV1gT@{-?X9}~gHU*!rN0B~@^Z}(`qF-&!*p5=(=u8KfR zqhL5f-})Eiwryg6$as<9nI#J5)|VPGai&t!^O~B$bC|KyhiFr?h0m#leCkA{lF3vk8rMnqJ4dZ|Us zTmlX+2H+ohx4oCxYnm@q@MQ#Vn9W?aw7yUc2SnV>dvUTZkfGHG3@+x-I0dQ_C_|}M zhRqh?%qA7j5+p+zEL2CjGJFasD8mX8IdCq4)0JVlK-|BI5bgB@qztDk!{3!!he8op zBt47uc$|6mlNFY#EU|}MHemIf>RbVO7RbdxW zcZ7N`s<5e=csy08QB_cC*;U~)WPmCNmvPsxssi&xRiUy6Rd`aeHGh zS?8X>CMejwT4zG{R_9|By1zxrT)ow~K*0wRykRzTrPq1mbyDY-CrO={5N0&OG-!&b?ynQLBYP%Iup9LI$xmBLoHI~>aEU43V3}btUT(> zT$w!>w2&i5ZWcjxu}vY}tYdp&H2`AC&&52x>tzTQ?Xj&47nuA5$>phsh^!)YNq+g@ zj`;C^L?;xRQPPL3s4qYdx}*s)m{H=9clMUn)|h)fp&ztZTxxnYAD|pGsAlq}Zt$L` z{gOp2Ci3`wbrTDu#V=1)aqkHW8UfjIdI+SFCkfYGg`V9VQ&2JV(^y>xN`v~*5aH(6 zg!A^g@&5j*skjDlpL7`+do}UlhHm(GcJpBLI`!3)380$?SAB>|$>za-P9B{;v+7<& zz>Lj`@J`^95Y46)iJ||T@}%8YWhn3}a@WEtUBd$xkMi z;61}YHPh5t_Ti!eG$N;6)hw3}USoD)@w4!BWsT%x^Vl&_x!cqvoWy1(Y(YwIgqfOw z1t@nWMu*xrA`OuPJNX?J?Ef0_9yWN~^p(HQjn+^$s$`u#^*FiArk4QYs@Aq{af#Lz@Xc8l8SIpD>e7Q|R-G1^`Vo)!s0yild_Xe>Y$BLeZv zz<6XD6U`J$9;1+z02%TXV420j=3Cf_I&~iupb9X+H9m?9nTds&@zF3a4h5Km2*fjk zMH-K0iX|Oku_zrNnL!}K-S0IDBH{o!%GOn!69Ne|ky|1MqrtwVLcYPR?1$vBTr-&jSh-jjL?wx+>`q@xFrYzY5@oQj=MlSZQQ`5 z6aSt3L>c;*biet4%h&+l;_B!enb8P|`_7WIWujJEialEzYh%e&73FLexrc$jG(cb$ zF`a?r#IY^pUaao3zQX%_nW>11>tjSCg^<)Rifbi_g&_8PC>yn#Qxau0i#d0KR^5YS zFc${^+9fU>%>huvDEv`inimiT^A5+qw|>*2ZnzJeKf$7utn}dO{9N2!E8DLn5ihz- zg)d8y(XqNDDv_lWA55*y~3ihbbg^21sVGkQvb3u!%OfThPlsS+Ra;SU0PQ6GNc$Nywz5jw8LuaQcTKe;mEaeCl+Dc z7)@fE$o!l;r4WT91H&_OR5-4#{3hSBf~-o3>JyuHZJJ#Wi8arn^qAWhv!<%8*x0P~ zR<#MahVNcX&dnOb#p+@w;h7h!TbsZW_sv~;CZ2uqel8hR-@%H!H)6;qI;qK?hOxHMVA7hT zP@JEX9uMpdCNs0mSx$8RKr`JoeS22gTbTMe+VwO?HvmuZJ$LFIhx`n)ND0~H-3rI|%MCXY^+Hv`nG8g7QvQWmhHT#!u`BEk$^%^s+J4DF#Ku0cSQmYZfTqG^{2 zed!G_qbf>_Qu=amBa$JAaZtfjDplDEq7P9e5D$+bM0-5}vq{RyEW2AjUZDuwlcW?8 z64&T^Ae5AkkEW!6cpxd~pQnQ8S^sO`h|nGoqc;= zBOC>8!hb1w!&@W74zPD($zoyhx4ecb9qG6qM!F3wQ5K~CK8F_xT+OeoU?|&K5bGcB zN}v|WZwSRu|JB5eq7T##YCjuV(>nk|kC_FXP>KE&#+(Z4Cn^s>Pv#fF;bJjPo_aXYu)f z3~V+tD29t*%i{Rn@h`+?FJt|53RtTu?G&-u$E#I+36QXs%+RaNenRGTT3%Lg=IYf5 z-=g5JC^&QV3SJ3tl!D+n=JL12X0O0Yrp-PUklZh#Hk&X%Y_l6L5`j|#lHF$iIs~FN zdzFySwAtqX5;nU7$!IDsO;b{U}1phlqxY#TJgW^c*)68LEz2z%no4nu*XHv5Bd zh=R>N|}lh|MO5&0yS7nKt`+fTH7)s-+f|s!DgUqGGezb(rKz zvDuT_adyq*B7-L~QmpoHJ7Q z3an%~(xv1MdijpfNB14&V7G6xFXQ0B?kR9*cZjG+<$JQ(2O$G&wlG-7(6HIe7q!_r zpD`xWW`8Jy+!i(fY&N07Hv468%3Bxt$#(nMGqRf4d5GrR*H5$C=VeDa649{RI|p`q zOwV@v5_k?6-ISuSyf#a-+b1YQoDl8x1XSsY+U>2$L#+QQ6oIortrl<0v)cp~sIZ7G zPzOq^sh!XBd^_KQ)d);8O&9IRWUczZT>;}i$OfgiJ9<-gSM&W8?4G`aG)(Jm4 zfX-reNEze(_R@YVSbJ%{KDO0|oXf?Cx^zeL(mUPUNneA$Aj5i8TFQQTUr={tAmZV> z7t!4aSNTr5A#VLZoa|*kz+^{w6oI-FP_P<>gu>Uu@L-|Rt;STc1`dNl5tvZGr&GeA z5yJ>VqEqoe*cXgF2(VGB@z`n1zS?vPlVw)|b;wuN!+=Kn0~RULAH} zAAHD~tJPpRGOflF!ns-GC~8#*)x@?SC6s+H#71IF%@!V~77E9m*~}N+3sEXDfyHRC z?)!Yn-mGoFZ9xKF>`nH}>2HhJHD(jL@q#?m!kp+sH=9r9rRG+pi{0JoG3MhGFT}1=#jYV8$uAX( z_a;DE2y6vvPCDcf3dv*lfMf;>Ip0o}`T8)F41x@MAQU|_I79TxMyu%khMdka$xtZu z^Y)&xXc6%nqRFHV@ndt$}WC)(j`vK7G9?o z3g?~K%oi2EnjXaOb;;hWZ9(x9z^vjo@OcqGw68Qo+Rn6Ji)&tAiCVQ(=Aul_Okn_> z87u+I{H2+)jC@@vJs>pvt2acpnlfZ7y^q;ZxZ=4rBHiKctWzD=ZqSBYiLz!a%*B#V zDpU@}k`M&~TWP6Im#do@syMfm5@E&na28 z$9e*@MrEqC2bt=kXP#=-cA!iNU{;xWq1BTqX@oSHdiZ=PbO9@zTF9;pQYFo<$nD0N=cxZ-v5h zD*eu;T45FeW`TvtFwj>JrhVihNvG7@uy2Ocw4ba}vz|;!%_MF3koyB>)A|%K+IhMgV)-8+&4qfw-9ixXXyUr`;r523bJkmqJ4`^suV% zhRUy3e&zs;lX|0ZXOIOnE*Bb_p$8fhf()z&bAZOM-e?>cWC4vyLPIiymFsH`@pN>J z<$nTUyGfjrC1L08RT&5liznRXY6Tg?jYHJ*@ed)3HXc$Nnd&U8rlohQtIWG!d(SW)6RR`vfVY$OqPM1>SiLt9VU0D0?22xnl5Z zFlKWkqwn}&V+3_2mfV1WP{Q%{?8ygNt=k$;H8MLsn#KWGszxvley)7zXR+Ep?jyx3 zjVeg6sN8@o6{-8RYWEYY&7X)UjArt0uph{hq~>ZB<`T;02?pNfLrrY@l_Tj)uQ#b@ zsaoyjLqCqyMaG{PDq`Hld1D?Kw)4iJDc&|78BRZ2(iKaV#j}n-4?r~UFyd8JX-LNg z^Ts_PT|Fu*(q)Lo_;WXG+rzwZJLZiq!1(ihoi|3$$;OFD+0V!NvKUW2BW;0^-$;T= z%Z57HwWqVd$w`b?;X~`ODOH4O7>^O*IZv^VwqThAXAz6Vt~$W-;G;RDU7ligN zc}-a7KXLaiS|)5XTqY$D@y4h#G5CLx5n`MaoPT&)i3vRK5`8dygt*HNY>fP^o3*~NGq^+BN|Cpre*gAd6Ol3vvT)s z*f;_v2LIdBNqhE#dmA2mQfwTMl>(+bS5D=koP6`Zvn_?6+D0gvifpF z&n_3J*^?BCzzX$Fq}M%Opjw&~iogo>y{#`MSI|jMgy+jeD^sZv|j+ziqU` z{}}wXfVM}3JQ5Q9uO!LwUHHF1xg`WlJ#S#4zeE(7zHFm8g)WC zY${!PPiycwE|Sc$(tnEr%P`2PWjz2@B|T(F1IH^A2R6Jw9%LEhXDvu2;ncFcx@*mm zTd#(+w#|@PE_eVPQi*Tm+Ztug&C+aoHz7XoA?@b-$_68DPeV}!3Q{cjIwBL!OK2a# z6zW!D$-g1ZYYsy?DHTFzsgOf6eCXjrbG#5s{z}I5E1*IIk90Gql8!sGOVHF)@JKa6 zZXbYnmKfzBXe`->`E&>cv@Ek7yhU8_6{iZ6GFl1=C}nhKaGY%}p&S`OlF_LrjTioQoxAqgZgsEfj*nIg~&0%%CXE&y(5^k92(pRW({kODJ-!k3mpPagDf zK`v0*RVev+dT?<226;${nLug74^UdhA-wiIptPG%^7HgS>9!ybDKQf$ZTtaBpPv$@ zR4A0ZJYgfbMBrq(3YtSWTx`=V$fG{>ZwctsG6{}zkpkvmj)wj-6yJQ0JMEBCswo3C zg&vQo!{)OJ_xzd+m-mryoOnlpXCvFNAaOrP+mIMZ}N>s|!cCpOBG%TMy7KRPa}%litVf4U!C zlO5@0h=w1%PvA#i+Pfe9pE5Gvu9!X}ZD9RqZPAb)-9k{z%CV(YjnHarLKU4##-n@M z;bHNk6)e+_zCsAj66}_)dcLF`_M?v$a=I6%?op&R?$?I|Y(Ie+%IMDbg1UyNRYE4ZU(Abz`GC%~hn6S1r`UXGV=Z*@o}z_t{4wV~>6_v&WKK61T_N zlpwZ#a~0>U-Mc0Um8jqRG6q7P2avj5A$-5Nph(oB2G@_!d9`<}1kzs_i2(~dzqtxn z;Wx7kf!|E%Aa7|jukR-AD_PG1=ho2o0bN1{`3lqWc@FcK6G`$quoZ^8nNYZI*;6`r z%e%86yi!=$Yku<}mW|=XP>A3N;P^0d;9h3-DX`Fd^D<}go!153bUf7{)y(vrTLK{7 z--WwaM-0I}Ub@fZGia``?lXyE$?k7-dwTGmHw|NsnMJ??@0rNZOF)#O-t!v*HM>}$ z2&_<1@A*DL;mC?jhH|UcFi_~qtZqbM#Z|dGIxO13Vc51aQtI!6wiD!(W1g>+F zlwV!v5zcQ3kl{1ugMz1>wEV~tOMWqeO+cSnP(+;AmyzZ*!F&=(spthh^EVzo9m4z; z8Q@1kbqF4^%=p+I>EPIppb@Fd@R^4J#1jLQhoG_K-pq$tWZ-0h;R-8rL$2_~;Q|8Y zO7Q@q%ngCOKoDo-BJqXS_mz8XRLPr*fmK*MQTdwgC*LgCTZDKvKlyA)7c?|PxBMmK zCwmUE%8+%CBW#ID?i=UpN!2W3@e$QDb=@N3K7_b&y z7M(i5bYr8CO1LANxG4};(C>85QBqAL_5A%pN~WnGXubtHP+(f<(s+0p((!CMNs8P+ zJW~bFVsQ#@dSWBo#g<~`Rqx+N+m0-#8NJGi)WZT1rg}G$F_TFbR1O5c&0f#VYdegG zgvS@GJ3lzjF>MewKJPBaHp3W>JDg=gB1^p=FJ>Q@UhKOBWHJ(1|9RM#hYtyW+~kwf zIl1C6B0DaF$m58Qr+7BA!`MW2|2!!L=ONZ-vfwqyX)SfmAYSF{_hWjLGgdEB?JtfA>Sg7u zAq#h!OIf^TCRsUq@=p|w#S{)%qBrlL(2#+Kbiun?vbN+Cs};QY>7_VP(GMgHn(Q3G zoq2=nwg7|R^*dCy_;ztej&AYgUX}FT<6ZIkS_I0orau~e>;XVPea`;wLFLLM#}wW^p@(2C-mES!1>e1Ir2$%^7ZC?+JYDP8h@FOV$*iNs3Yf^^BdSg{om z2Cens9|cuWI;u+n2FbhSAd$QrLaXHIro3oZa-`A~C@Uxh%#qgXZK0$k38W{2G zyZoXES7IoQ^%P062)hL=DtWb-F#u;I2?z%$!UcddyF4V4*OUc$oVncDbC3kdQ;4YK z?F0zeB7vafeUKx}IPbuyS@>0?TxOD$zX$J;uCNe7V;aF|bc@9d*J-$v;W-aTd=105N?10w zMf&2~1#^pRpR)2A2Ydu(f>NKxTOj&Z5B9D7if#~3B^RYbjszscbQP1Wm>P!DCVYQ_a*^rmL$;q>Mp5oPC8r4mF`Ue-D0KNkxq9frCY*e zOE;c{?pkpnn2}LSaMA8yw9=I% z(A`4l>ecE)?^yw)vE;S{ahLF}j3KQFSB8)Y{qP8M9G@9l+s*I0A!k zvQJJ9#)KQ=4`7TmBaZvr0bwtv$MnGrdm}xA_8NL#o6o!Fi2|{_THXfrK6#As+Gp?* zdBzecuh8eS+CF((J>TEAPu?=m_Y(U&15bQzj^AX-_#V34OEaJi72dOk_~rf3WzqM) zz7~CdetGnL$y?F)1Kx?gU$ZLuKIdKk9bRuGdyAA@P7`-@>!IFQgSR=5K>03S!qt0f za9xGoQ!^pKKE|*Eh%AY8?#E`EHmDE_0cXPci?xVAJSPymtmceCg#_qnrR4V()m}uU zCzN`)t>zLy65U?jZ8i90eG`kG9L|ABmXpHuM?r*Ab{Tnmfouo)4E%?b@3k-Pm~rMY zzZ%jP*If`ge-9H;;T)7Pl=^00DtfQ><7OF!S^KfW9JL>yoH<@|1zD#3n1pz>ALS!L z_5+{Ad%`-`!Y5Bv$guFxjDwjKz-u*nC?jo#<9pa5gy_wkTTQc6lU;(J<&?QpO z4w_Gfq4jh*VtT=DZ2I~R?7_id4?ZXzZT=RX)<{ff>UolkL2ez?1bmniAzYBFDLt3> zHN{^@uG!ozgy~j-1(|B1nf%>CyGovB4%2We7iQfEn025inOWG*?6Al(_`V&fJBw zJmwYxCSWe#dSUJ^v_mP6xvUPEyA9@}1Nogb5KV&fBZV8;L3NNtoHS||F+_8C2Q*{J zL&Qs)EsltL1HQ$F-X$~zx3xHH*W}ypM?kVq1WJemHi!|>UNS6~3b)%!g|RE*EQ~Sm zWr>@0$He)qGl??QOeiU+WtD6!=niCM$>zw)MZgu7 zmDBM~S=r(wk(El|fUK`(ygqV{@WiXEB6bEdNwyGE$E5T4FWb-(ygqRMpEj)w!(a?+W10!7Lnnt z17Rid=ZSX}oU>){l10(Kg7*+lMHHO`en`zx%>v&Y(;-sd>-qm^{D6#^qAbaIXs!Vxr$C;QS>Jv z%2f1!U_V8m==TgFcbf^-tD>`7KO`&5fh#O4OYlxv+2aH$b|r8?R=Rvy8F1DQ%gTqR zv97+Xy!n7HE2D<7NKsk2Kx$J@N>p?fI$H}KE?_ey-O9>k?vIm|hm}&3Qd-axrRf4T zSJJJlY`RqyJt8EXhv9?GA^*F+gBTsz-;3@5W;sW`MPRkxzskwl&WW|&hkw7>2XiRy z!`;Zvfg?@_mBj`tw(z3T%K(eJd-OdPxShF#hFo@8h^`ZD-Zzv9vS63B+M@{+ttQg!E+Adim4>x&VjPC@+0T_J|% z^gV|Cq#!!V#A$gCIt=plUQ!idIqy(yAiC5-dfHF-K^S_oX+`?Mxu=nd+k~(=luZ?G zpbJ?@oQ!A1vdqLDC3R?F!I4`VI=ZbarU(cD1Q|ZYCnW8)X#*PzZy7EBSQu*IBpNcB zG`*TQCHzx16SGBB$1m zO9XkZ?faiXuYK1c1lBj8wA}7>HEydyb5{Zd1gSti@<12%($li`R^ofN3GrNWcm=uV zyCsAEVtcDE%$+B&y3yWhZ`)ge5_>C_Ia_b_b0`Yk5~-8Au+KEuJ|xYBH9fen(@;e) zmdpdC1wBzZPr&9%y6vq_6G}1zq8z44%O+v@O2P{C2y;Valo;!;{zO=#Y=r@jvCBs~ z*)K4Vl&=utvx8u;AkIj&)V#zqF84^vuFZ%O93YzuE7A<0vV5;{dYfXHxFGFCC%MBIS z2)Rrekk^3=uQ2z!b&?`&(|tYgv78ddqUSyE{X~d$*kt@X0wgrtIjtbo`O#~f8z|)` z-zN_V^yo+-DplZq#{@PuLW3qr-z^*CGBju=v8}*FDqNA_8Iu2hp+~75GO$%63Qcih zphq{bX>LN(+@N|i3I+Lpp+^;|K#w{w_d>2(T?q7O%FfiI^XI@FB21t}tu_ags4|NZ zjYF#-UnBAnGP?%t7*h?}SSK?=R2^(80yb`+8~S{#?l~|%G(V|!(HH*B(xdroYj4|;^ddHz;fUkmoe6od!r zxZ_oqbo(uJiTvUcam&r3OPZdkOQS*r9Tjt>m99_QM*xM6JBQlQ`TE~gpU{PNh}8}G z;f0&$hwlpH@FKdAolYaXHu1xW4Q?A`iO!KfcK-z3RNUR;--l8nBop#>3F)lv_=6F6 z0(x?n#cRG6z%c$jdCjAVi+4`@es_jjS12I1FPzhpL;nR7+H1bpBXh(@bXJ>9_3-zc z7b)|#>vF4)j=S3q^ycv)=RK{9On}O?ZHX1I_T2c;Z+`tYoqBxPAgeczc(@L|dGmin z+0>h#g%Frqf;^r#k08{Ksv_Pzv4#8HX<9Fii7;`-i~=17sUTAL)od|(^S2URx;GEr zu+by9+u%%CkKT%gFBA76i5@+Rlf|R2W-{DpBzu(8hC|a80U>}O!=rx;R^4l^RF5A0 zoG_I5$nfal48V|34$RV8_2x+=&6}s&fT9Wg?B4wSP0R*H1`c7i_FW>#du`uEpr!5m zFhb?d7o2yPhe>HPbtST6dA|ft9q1cE-NF~d{6!tclq-U%MW8!qn>=F z7=e0Hq9@PtX7l8y{hS<`De3gDF&X{kv>!L`Fho$)lX3Lqd!lr-ShKm3PD+@Jo*|TE zxDv2Cn`AYk8}>ws!aR&)O)Jd4H{W?MV*T}qD84?A3VIQ23r<49lxuVP<}rRnUL??Vc#*#U6p?TPcN;eosy zt2-6tfj@-{;4?{H|HCBf4u^BuQh;)sm!_|$Q&opBDi3}QvHWVB8sgWfHTGZ?m?i6C z_9#?3v*rLv=Qpocg{onHI-&l#uj`>}rfM16N{JLKGH4#Pnf5WdkVdQ;&(GFA6 zIXo1DkBHLEv%}A_XNDa{_OKl$wk&qI9?-DEpBF*9V21%kJ1nHL+Tl|p@C59|4i8{a zdbY#QZpZ$(S3B$q$Pd}!b3#xdJAD03KgkZ?h5j_%4$D_3Lp=#)8clMv?t$se4nMT5 z*H7Pv5bZGXcy<^;V)E=Tv4xH7WyoRsE81ZXIt+3Qk-`qM#eS3>9#7mlv&OvPnYAN{ z6Ly%z$zq4MW-{zBl0C|4cDQXDK|lx~$gsmVAlY`9nAqrPW-T6u%3_CiVe4u!((Q0Q z>BA0_eouCoX+La-3Dvuwb<5UX`_>?2?eHF4Fo7LLcG%%Dz8xNL!VlZw4~LR#z8!w^ zcHa(L+3G6#n4!W&SxmW&>0M!$k53Q9m!BBLkBW+6hnJ4bPz*3Gqg8C zYVek!ni^{HLUa$7!rYn93IFwV`1y#Te<6f6E`@V!?!8vRbhb?riHN7uY<{g_&LEmA zH7qa}hT_lEFeh@2^J|dLM{iz7VW}Le^u-N8gn`aZh&28A^C~NZn0|cOM815BAG)L)`0|h>x0_G-(qU6p@#QW1 z@(N$t>`Mz@p0Y2G^5sGP{DnV%;?F(!K~01=zeSjHb(f*0BzP~L=3M^V!JlgsR4O`= zVU+)TJk8mXZ>>3vFINzBfBsyAAEMipFKmBiIM_b-wa-29wA@&2R$Sw?`#S-XR-32% z=p|&z*2I^`pB(<&1?@u(ipbmLh(K8Q{Je&xMXwhP#r(X&id0O{u%wT(>7zAHsyIx; z(w0YSSZG`xqH%$SMJ9Go7?FuBH7s=g3lT<{gwCM~FL?F|;n^8svTC`)N^A7hbO}F+ zc&0xRO21Fy)|$ICPULJkq=PLt!@}RBX;O}BH7sTQ3*hWj@oMvnU)dK^$;K!YGrHOg z^P{QH5^WCkp~g-#)wNa zLP~ijB9;_lLf@=MtHRF5W5bh;paGAWkIw(%*qc|jE3|q-qL4^HVj!Mo2mG)++sJdR z>B~ghvwRJ_OgofivX$IN!y*zuck{0zgD8FC_=B0Seqgj!5025WGwk> zQXw2mG+zhvJ&rKTaJenZ1bMDC=P10;8Ew;M2<%#OVkqr!4NIe48p3;yh1~(^<`9

)~2n&Bun}JN+#EL+`o+J`H9|<9=y^9y7}uJ)$}(xB=WOO1kjX4mmI%qeYFJ3#gD^AB!qYS|0c0BRln#$6rvRn7 zj>*4ba%!>UyF|l+?`#bV(F%rvA`e9UQsooH#;YW^6dbzvT+UpUJ`({W+|R#8nCe4Co487d=`A16R-&27km z8cJi;YFKJ@yubqu2+o@F|oPbi$Qf4+u=7k4A< zbMXSO-hVlr4f7iX%_quVA#AR|55?kQzFfdmARot<)A?crcO=3-qOFMN2n%|ULPLJeerTQw}Sp3tz+ zns0MgA~$7YJf2>&AM{HJ7cqY+;>}b5vrO0GX{)r}97R$WhESfVVIh@3*z{AJQrZ$G zqO@I?3vP)UWaHk~xJjf}phzUWACW9UA`AF6C4lmSQi=|&k=9*lM1Gu=<{iGb_x;&X z9V1$3$v_ReA$d;5ym#sPO`UUJqcv+c4=WBaI)BpeG6J2&QXqY%#)-k0uHhV(>?oUl zmBzI)&qW&UW_Tx?eyYYTV%$j@=9w(>6}bxhhcFK1T_f+y%?I*aZQfC?*ARAVz?6eq zeKfyF_E+R4;=W*ubmE6C@*!W|w<&Mq=}X)l%1B}AR7EYya{rgyObC zn03pOw0849k$`sdA7Sun^AM1*q@;4MhDFr>Zqr@Hp=3!5M>=Oi0=zsAS$y;GG_9zr zxr;gNnBg|T(r#vGoUneX#H}{hYMk)vQiP@cA-J&!n^SGV$WX#D8m?lI4`CRL+0VlL z98dF)D*Wuo(4i<0yY?*!X4~VxY!B3Eqiq+~Xsw0?<$IE6t$9nH?dBDx0lpSk`FzH{ zKh7tq<=@k2?SX0uPW*-5@i!_^F*tKsnsj}h4A zCSOReG^O(1Zo1h3*ql?0ce74jR-1h^qjc6gYgiC&qhUd~NeaS&{D-pk80{O#Mo1_V% zrOKanb1vd2uxDsmBiWm;ltj&r)A$ndrRO+mCe_%k9Tv2ysS3MnV4EE2%p!mAbe88g75)`QJD#nr`_CT-^a@P*CvH` zGZHW6W`?FQIkk8J>orNk(gj@1#0wY!T;uq1x~7Wd*n;`XZ0b;^9${Y&;>-SgNi8lQ zQ{KBo!t+TWrtuAo?@Pe%m6L*V1H$HCi~z+?HDL@P|DoZ548Nw~IKxXdoX_w=4bNft zuLzqzYeW?z=4g04!?$Xnwim8aD zd02Bu$^L{e;eMg%bC^CegmJp0uQkVn@?WLtf@|+k{8<_=!nSn?-w7Ho^bQNAYe|F%{!pp7By%?6S(llbE*yC-lzyE}zbce|k%pz- zXK7ek@?;I?lOH!Kj`0j%gD`0vpy>k{cfO`qGJHD1b}DP7$*YtK3c_zLlCU5-STjic zUK$q3|GUCSVP4X36_GC1a5ck^hVabOuvGLPU^ol*Mu_oAO_xeMg?PVUS6qO?z${#> z077t%W)x(%hQc)nQ%0-tG}mf^ly-m3ET!5_!{b@Pn1%(>Fv+~$9IN>x{ZI`{%|5;m zNKXJS4f$h^U}!x<0;L3bz&L^8K%EZ@sEet%o9D=qzh+V{U&IxB5_p3Q&Y zPKxJOcoI)v&>{X?Exq;jxvk~z5?h{KcU8DbJeluzwj6VI)Axt$vl#Va`tY9<{)Nse zsrQSW=b&9k=Ul`yeT}@o*m>3-`aZz6+t2NLf7>3rTKM1M8El=CeT#8-#v*J^Ig<;< z!BSq_`E<$zByrygmmD|5V+ROa6l}_9B)$YVF2xb%HrenLLA4=v_OdB^63KajML=U` zFWnFu-}*eQ&DJ8E?WT`?ev7UoDCF}&P|DjUBp$&b6kDzSI{0d8`Y_3mp6%G&D&anrpeCGHv;72i|sQ9fKFNa@If7@I4DgU zgm1RXu5@psr_+tK^V-(FB`c=o;~+|MH6)N3KEYEm7(at-ZK-RWEQlYoa0Ubf?sFeb zcVed_o@O(AuKRhSi>Td5gxut#0a#E>AXl+r!t3lW0i@iWS`cp^#x*vuQ|jUGuY8L6l=K5sFxEOs?`EUf}|#+v2ByK#3EK1d-~whxzG{;k;LuUpcwKd7v3SuFW7 ziiix@q3&-`=jnmRS#smzj5n?FEBNpAzy9rQoLGf-LS@1U;<=POZJ z?iwpQBjJ2EHh%NxMQqtrUMn{DLu+}dfokpCeu#JTRvq3S@#}nzb=@2a3Kf?JyxqJ% zf+JK~JsQJauf3MKnzLcUhW1)fRS9kD$MI9jn!2MO8t8|1XK3TbjTJ-J#Ol5SEnGXC zUygm;0V86`ol&?7XJaCDR(=A-D64r!dxwDM<5`+H_dv6|J zRdu}&CqOiHI-z34IkmKgVr{5XQc)8Lc(2}QEGk&1I23WJsGw0$uOv6*csw)?NwEsX z`qko$wWNp=f@lJcaj4)>i6iHVN)(kiRNm)Vd!O;%8>zqd_s9EwzI;C9oV)khYwx}G zv({dFJexe-`5anTZN3w?sz3yr?Qcee$PLDDg_xWl?kJ;B!HKg-FMKw>ae>p`;dLnA@rQBz0-au+EZDmr*qmFiYS1@cN zi6FjLW9G%l_{c9^dR!&EPs02xVcLrUlbC6D;^FLfJg{e}AWMR1c(qsb+o@hs`C{{7G#aEp5bEEnsr}Dx3O3@%77B+u;TNKOosSZxo`g@~fOi>Nw<=?f`xoOh{Cy)kXyg!#H#&?+#e-W0M( zqI*=b6=-~x9h#8He=Ao0DsG7g)o**hOl<7*4#K_)HOt?e&c_eukP~3b#zSh8jf)Dt z8!{9(C@5nbZtl31>PL&gHAxRbvSqYH&IU*i9*$V_1io0yxxw|YjU1G{E6*34TM1E2 zuDvY3H#yJf@D=*`m^?9muj$U0=Q`(3O@Aw%&WSp34t{WkisfZyS2MC)er$3smK5up zbMf@{SDp|%dsG$-{p+ZF?sP;ifvL4@uZ(0SCQMg;Q5J&!9BU2M{WX06Dw% zpT_^fW^MWFKbiuwzv{bT!10Am%H}VI=F>eh`(W6=EavvHjdaZ7%bmkxo8AzsZ^Yh0 zR_ubMfPD^<#7I7X?P+ zJ^X`{%>P~;gmhrt4^Tk_lYe3a1d${-}DaaQvc4k zX#7F3d~C~IfiB1%c4HzR{op%;;v-)?+8*E{oJNGGv<+H1(1f|h_BMGa@zP?7B;rmIKzp1KexLpZ+e;qfW z&d80_@A}lQE9xs0*CUMfF~{Jbry_gXCwp3v9R;$Arn361;b6#Z{aJMFq{^n@iSi4Z@J&<##CJjAcTQ0%S9xmXX_cp+xS{fg4{4`QFa z9V7^(p7weOOu!W zE#A;)IHjO>*4NMkbvr-951OJkwo|49(3)03`M{3S1W%;o38xh$_Vh9jnj?)WthO5vaZf~~QIcFkst4eI;z z)%jSle9GQ|>vnqN^gRQ$Hn76@zI!Zo8RHG3`pV?vGJHoqe?d^pf{$B=N&t}FrQy@B zhL63urpTk6{s%=jJVF--9rm0LXW2IQdh%NgS#Rz$mT#Rt3@`lN$y!pR|K~W|p^0YQ zml9uBjX!(y?c+H9496Qi`yz&HtA7l}6F7r?m!2ehdvv}2f2%#}`~Uyl9>Z!>d)#&m zWIhpE<=n`(Q)Bqkgg3TFk|8tnk4y24w8shF+(@CPD5nwOcEJg+p<(Q6a3qGxa%7)w zMUU(^AWK+h6;aqD=HKv!_)_t14;O54!BR?=mIwp5yk7kzn# z$gY19{MO3O2%xX>)Kk+>9?9ZY$ef7GJ22DjzDI>_We%7`5e=#~-{*x`{{1&MBQ|ag z8IeVaE{B;BC0Z(Dx9O*Ax~LN2OgQL!)Tsmbwz&#_eu+1>*WL`-L;u(bPu*Ueyi&JoM{M47YJ|mJCw%Sl2@;-% zj}$E1I4p1aRZzcI|K-|6klRPX?F)BQR6lf7#q6k#%+^uScks2(?Wm4`@|}k~@}`%B zJ1O+CY`9ZbG@pzVmGk1*4V!T`K1UD6`v$W2V3$Dk zLp|6@*?X`r?wXir|l}O5cA3NV1B)y4&=J+2V;2UxTH3GvzPEE@>iX*OOr4ELV%Y&Jy*Hir0TL1NI|8n+_CgsqG@a_^FTb%oJBA#422KLsf48I_eyCrW&lLBEWC&H-KAz8W_fPrxD+CI@m1 zetxU?9RYrdF2`tUd&LifxCuuL#ILtl$~PAL6kQH}kAM#48w-A82jbVL_{G3a=z@}E z*})@*HKJDyb7|yf$Itd^E0@z?8DO z6fR~ior1jMM%EBR#mo&ht0z)LaU6_S2%2ssjZ5#SKSq%CmLQfQtu#GCLjQB&OUHjL9^ti z)CXjl%FgnS2*!zO1J_-MyKM}O;aZh%@ZojZTy?>(=XVb(_8N&xEr%zRr{Tpfkw$DM zEFeA+l9>|mVFETe2A+$hLM;i6vfmNxxh6!ZnmyI{1Q{67x+LluidhoN53ePc<>k214p`Ecv6_DPh$EbZZ7461?dv&iaWScwV{x}YRYYtk2OasVYgHW8NXp} zGlfpCMWa+H&#T(GNE);1Ravoy!7n{M@9j1oxDr(j0Mf5vg<xHJmLoWM%*~-e@oD)AWMQ^kx;oj=92#+Ym9>Pmsjv!26`plODZb@Pvm3W zutl|)H)CO>l6ycLW+Ur_qq{wL#Oz1-aNfc;J$wtdk3ni4!CzNL=XJka7~a8Xqy45_ zR1bm*Og;rwWmBt5m8&D0p?H!SJoy-?yND^?pDErRb&gJGC&io8;LXQCyx+lXttj_K z#oMFK!CQSRw}aH+&Bs8zXDZ(PinmK0Y_>a8SxEuB$s;t;WTZ+-VKz#X{fI2R+8wg) zarsC#+atsCr9t^nJ>cf^*aP0gd(F`U@{uTi$7oos48kP-ju?dTk_RpjtY<9d`Z0cY z?{EL-Zo(9Fh$rXY=Ah>_mM|z^jNZ`a{(3P0XXouEb-TY~C8;zAx<68FN%!~B{@J@ft}6T8AAflT4|jiUB0CXyj_%Lq zWe;Yf)X9D%$tJTq_;A*~iA?Z4$td1C>3CQ&?#A!}3$s%ToxvmEh1gehVpU(pd)V`4 zE>$Vl4^|ssU@P*RZ^}1p_O_%f2cwu>aQL&skmtNk?&XBfNOSNWT?&4I$%lxVDH^{cR?jN(_vJ?YB zqimWe`<}U*No}sdQj8D>yU7Q3B%^`{p20kjwMgVG^FUad8H<@d>~a%F+xf?DXoZw3 z9!$RjZz8k{-hUeDCE-}isHC`+D>_Yxr7LDZ;}dR|A2`m8M!$LQ^GuwZS(k-r+Aah;$@gHW#A4x=W2)i^$JN9fvSL zb}od&eK2v19ubcHc(_wf8amu>_d%L^d$@0O@tw8O1_)TZZl- zm3>tcrZ-3hW37opR1XNRQ=2$BdMp|lO9|mY>q{VrEdhf({*PJMD_j1e(U89be$*ct ze+!Z{QZH|hmAnas_EDR$m$v?EtmN%dgm|o0L#*<-Vu26Re+V414IGNWC21;iA^xLx}RF28WrCsS2~!@N?5F>U2x_yD)&`b zv+pdTB5SOBg*jacd}>cXT;bBrB%jRE-iY!+T)V2$UXL}FaA}`rZLE2tt%ai=5Z>!xg)Bc>|hAfA6e&Hs`(Y8yhX2xqq77x9W7d#U>FVi+S=P%8G)B<4IYyb%gZCQ4lOQJ7M{T#YbN zr%+bdpkU2uLt#R|7(U}VJC zBFWW!iQd|(HhkoHipm=6nV~p80KSN+<0Jd@#t`oH`XzJbfG;>v;C#Jrbe2F3_==G} zk&meq8SwE}jvntC>WP{}h!2+er1Tpb-1RYhaeNYhj}3atiwk^@XdL|sOQPfE%J|6s zYWT-d)jWL^Zu#=( zhdjqu-AfI}7qdJML*B@)8sb5tXvbHkdE)-ek$>Hw?@+7T1}}$ZWaWDrA~s@=^6)rK zef{zxW9Uind~wm@BDwJO={T?%$uVXh=T0o;%(3Ah*js%>=mqB_bT!_3K87%zf}T{b zpht!W-e8?Cq7e01Fe+qomiLH#{ z7s!tWvkberBZ3-rBoi-s4w;Ftp6B{7eI{na5@2z4(15TIL*e zVd*6(2`|hCegFiW0DeF@z5MC_ z`*ON)t4ukagfPg3_upzc$yX9Cft>Wku5py)|6EQ_q91VObRayA7)*ilrxDVRFMuEc zs5U&@r7pFk6*Hg4dId@0v1!jYmj4;Q`+@oQ)Qw1xPRhp-_30gN0;{u{~E%2y20smg)JJeS~`^Xq@yayl&X<$1yR6<3EoVxTk_5eD6dAgr+aSWjB6tA4}A* zQ7*V#88=sDW?1>X88&eK%j?E({3W{aW)8byS-Cb4GQ(V7s0f7wCRlcZ#f5)VBZhrZ zv5;Y>v}rSiHq!3;f&kxNUswaZac5hxXDy$ z9%t6|1#9o@6D+-)SoPKy_{%GJc!K38$QI~fW#Q+g&&Q>h@F%=|E9r}9&b;rxE`f-- zzj5bu3BDUSx!I??MSkl^0RRWcks*P*z-N99pRWFi!DUa++@@g__P2amEc;vD@^)zA zW@^v`kqZcCppiO=6p|Pl&cca* zNxp%^!*!zcnPM>9Nexy0T$37Vf6i!C{v7*rdNIDrx?7L2G4~4ivpM$@eSuBMIm-JlMn%iZ^8{J<6&b0yF8%Pd z-A6RByx33K_6zI_8>!`t-_87ezECh;H;Z!xg56em76Tur9}ss2^Uw1?!=F|BGX$p3 zS;n`o`DYRT^zqMq{PO|-yv;wi@y}}hxsiXKrW*J z<#uN9cO!l~7xQf<-qiWPy&+a@Y`UGFqfb}C+XjPoU~n>1H$`@}!6Ad;*nW!VmOwPY@)K!B5Box$YppyPd7@bn@lLI_FUkJD)@N zO!*O>q6|7ByPa14xPgDJ z#~(LI67MWsnJ#T9sWrP1f%oBo_w54jd3bjY5hm-Ljlcz&t;f@4vX_qAIgshLd`De~ zcgp-5Hq)2*gGAEc_mTk^y`#c74dAp&81C;<%*8pFg?DEuQ!mCJX1$O~=aNu-j>8 z;KiEDJ^0~ro)>t(f$xxvN#X|lA={Y}i2aZ9`vvC$9e%ciuUKDzS@!j#V6KSo&?Nf( zP@cGT)dCJ-w~@pC&f&W zKPqMyEV_4~$;#>bqd=Ib1b)+2j8`ZyY>t+_5vX?tjl4bJTztLp+>sArW+^eBYAu z#{YqZ(Pws}Yk&XsdE*;)%E@at4i?P+lk>)}fcXzNZ#>U0Q;ze-&kTpjdKFURdE+@N zi~gJQ#@FlyVMopz&!9xn5nL+I7o1Vx&wlwV`Mt?GS%**1&!gpu`=2%4H-t3O@2}}c z;pu#!1NX!a_T?-uy{;V?xk`R)a`Gj`I_IlhFivvM7rOR0VJ|AeNX)zF=Gu=6(I(iD z{_+zvzKi+*7Yt#*z;Q=xMtJI~q;=D^Ix{V4XO z!~Hu_3Xyq$3^#RUccEeW}gQC71`DLSwW;JmQT+d$IYq{ZAR*Ojv_5A zkk@a{pMDAyQ{~u)xi%)%lOz>oeK;zMd6dWK7UO`s6zeP5^omif9$iX@Uba$EJ_yRD zoymIr4l-`4PGLNIG(xL!UaKv9k%*BR#B$gZa3e+)s(_PPZD>+>;IbCr?c92g+2>;^ zoh$0tT8?3PJxMGl%01CZXpuo#{s;vu4)HN+;Y7J?SsfULPMs&vO{{uYCflkG7Z$fWO z?KIL98Vm$ZuaWu(hr8B{5+un-TZtOV{usb42O!!9F^MkRcOWv_BY^O*^JkC>B2b&~ zRVDi{Uwc9+8^4o{-F`q*d~1caoMpvHMxYZ04k^h>-F=TASO z*$gi{AI+@Hu_X|%+peAsI9`g;Li~x)Lf9IMNZr1x6y_KK&{P~*e`9bAUWr`AF~i*;YjS|v%M;NVLHWuhqaU< zb83WmDJ9@k;oZ50f3C!zWu0gor(zhGN%z^8zV0g!9WKJz;2eynuDG+l2=5qWy^AOM zBVNnIp8CB`+>G%kuVmss6p8&$%xq%EqA`LUTm8tU3zOE*VLH>vXYOy}520|guwCt3 zkEgQ^hKGUI@W&ERV3ppvL{pr{z`Jy^dE_xyD7u|y{cO_DB%Wkb#+0hvoC^@`$($^K$Q3%KdAV z$0zK&YLLYQ96X6(JKD7$ah(!feBY8``X0Mi!~_)F{s9J7=vH||w|mG4b^RI+2BTwF z_pg0CIBx%AL=wpf0QB;mmE9>0B-yvK;qj9lygE{ra(j4$N*9mr+(X8_lI77w2@5^4 zDa-BNs_LPLOB`}((X_bsBfL~I5rfu70kAV=Y++$_`P)Y7w+N&;s5FL3VPFJU!@#Z- z&fm`*QLJZ@y?hR0KIUa@dIN};%NXWjz7f7QYA*!FmO+mEXC$m0QMSZL9JuHoj4xan z4WB^otIJ=N{Y&K zwxu1mrF!J6v2K(GZ>cY@`Teo>wEYXKQ@FS9F9_oGXu$+mA%R@tv0Eruj@p|m6WTTO zP;i)#&nQa&8CF8%{i@zx!SZCS()b^-N>X5530)=BlNCgjjF~9(r{jr!r2T^*0ncX zh|MIlnh!okr+&x?T2WVv*e~!-yYvGf;-KQu?a2`L7opvp3;5@6_!F}ao%-R>Jm-9H zr#-$&sJa~sPZnb=TN$YjAuZzfm*;9{lz#q9ho3C;Yn=&_9`{#BIMU;H^W?fi>jx0a`-!UU$49gwwV1Pj zEgRU5$$8k6j8!;7seN`ZdVF*ZEAQWk?@$Nb+|7zL@*)J4!EPhIQi)yIrgp#1lBbb{*4{ z#~>b_5=vM#wQ>8{Bo_v&&56k_+0}wY-DBGrq<;Eav7TgNt8Nkm(WXh|VxC0#>S@2w z>r-KQ{Ddithj^;6Fkb%k^tEWpsSt*I&z5W9-nCs-xVKBjPp_d|M*HukCcoZh+LYwi z+Zqkrv61|Gh|zF2UPgB&zb=?|PNMu>BlR(U1FC{Vyhb`fszsm^l=6*UmA(?VZ^q{d z&xEVt&BtyuiV6~k@gB#gsjW-5?ccoqHncCp5Aeemjr(fz`UV}oyC43*;a_b|p9{vW zf68><9_sPC>IlTg5${ZZ{c`>YW8s`jdjyMchCE+zcG((%$1~Yg_dHQ<3-MV>%_;-Wy(wpc_<&)*N%ytw1d`1I`Y(M0W_wk3zK>y~C zHT?53{sim|&hPhp&G`}D(Q@raQwq?WW@;gV@Mz=1Dp-I$-x!W|;+KWN(b5HYFKS_i`tr3b| z0hjxJ;GXrj=jNdhnHQ<43xwsyQ+)?6eJ=egJtms$&oPXo)p4Zk`?7xoiUrYSg zef`%`|MdX>)j#LoIK~gH@L#L^*O>pB@Ly~ADktw7kN00s!7FSp@Ap01f4#teo#wxC z1GA=W@L#X@UvKtbZ^J7L`9|jhK#*6!A5$Q(Sb~#nFfa#h_y8T;*Fz9z-OB&+ZekKvZXjAR%Hlxny?R965#gEPDldh=(%8pQ%+Ea zxyYThlao%}a$bRKx};0VNAal(A=m4(Jwz!O?%uJQunLXTO9+RodqmdvF{A3#Dx=|E z2Fph2CZvF;)QpF7#^z7ZF!h}%2izJ_W+H?%E}nL=sIzHT_%*6QWJ zRP~Rlpx*wSgn1_;jnq0R(yeLoz z_?=mR--Xk-0pOFwNZo@#-CS9Lf4?+mJ;5cw@BeB5;ByCD1o#z5_}>9P!*S1-B1VB< zWTbfZ7r?JU7688FMBpzLOUt7%L5qJ83J%~`02W&Yd{P>-n|L(OwrcSsC-50*q{`Vh zSdXz73i!Xm0ib~_{$w`d_W^$lhy?iL8wGv`&Zh$SF-BQZH2|Nb$p(Cr!~%Sw9{~8j z2c-hOR685+e?w*i0RKh-XfEJCvgwBczugxyfPbj^Fa{#H`v9InpPOyGAS;eQAG z;wbRXrO{Qcf56o#Hc~PA67cbn1%NL(5%|U8Vni+eRU4I$4_Is&@H+|T*^L+?(Z|P+ zoWN(Kk-CFNf#HjzGz$19aGYq9{p z7YAqqz$b~3S|O)ug;Q4Gi^YSFPg&8&-(Jd`75EyP-iAqRnY8OlqA;$O4AltMKJc)A zVE}H`l{h~KF0nn@N^$bXxcP}#0dLj9SUAZM5k?Yv)Q8Um;hv;#2}eb%W(>A!Y9_tW zVDmJb9w6Iwm_!N>%D98tjkYQftwvn9r2;z3Aqgjqa7TlQZ$>Plr^Kwe!qly%bAc$e zh~(c6aTZuY{qishO4yGT34?UPe2lDuQ=jgQAETrQ61@bo0YL77i(iElOq;`g=UHV9 z`R?r(>Eppw`>{fD#LXATNg+F2x2H%2S6@+)Sh26emRMW@hFRSY2g_C$xr@F`-x_}% zQgu}nAmw9ZoOP?g8NxaDR)a37lE2l!`9$0THYKNbFk@5MB@2rEv4UPfCa;l@>OQv} zZcwt8a@>mrSm!OI^Y&M&m7nf|y!^0>qw1lgoJjdm;8L4yFQP#R_6ID0spo#K3J=!R{MEtmPGf z7%S{ocfws-NbGP%kJ~4l0Xn$tQG9fbwF)gg3G?lQIioY1-FZU}OwQH3qlIjpcC8`xSpLF&lDR!r1>%ewEw(7G1mMmt` z+~9l5?0q5=t8YpVGov#rV5+2am*V;x6z%GD($g+L_G%xWTFbz)M*7$49C6GpIO$n7 zgS35mGlZh|u@qFQa5hQ$A)rBpTiIQ%-9^Q;;n}I{nV~phOP32)p`Z@GVn!A2UXl2; zfxTw#`X%#cwDPplH{h}o$%p%B8;-=0_4Ft32>dNq5`K__@VCHxaP?;^Ee<1*857Tlsgr!CPvkCcX+gjdKV*U)_T88+c#jnw`V4bZF>8( z#?cnldz19XThiWlK=u53S4h262vha0h}Qc|YN|UEs^-PiY-dKjYjf564Va99>-`1% zc=moMOnL{oZemN@do!`LML0K+hhOic)cZn&sd^`)^*)M4awo#nw1}GQ%BXi+u6n-< z(=~9t|1DmTOz)(PkN=zY-g=AceKGRz>)k2!J^^7`zoYg34U6Q?gQ!uln(N4@cWirZ98}*y*jZLdKAN@Y8ynX2Cl~~9g^B$A0a}HpbwUix?Get!Cf>Qz3F|~y^ zh7%Q5OB-T4xXXcCGu{DJ`r^0wEkA@f?~+)ZNL~ms7-&Aycf19LJA>5u4G5g)baqe4 zbA$6ADg6d#kq-ZuL=OLupYt6`<=mpN3Jv;Obu`XkJ_2(c8#Ii%WKC;2^poWMU*HPb?-N8WjwqKuLIGtfTt z%`Pxq_m1MT7R;Sp5lf~&Ce?E~y8`~mlb8+8Vjb8A{-j)>fA`|I^9KLC%0HhX2j_Z; z+u$^4n#XmTY5Mp5r2HHI{FQ%}GR-8Nrbg4S{h3b%emgVp0}ao<%lQ+j&(pDIGW{Mp z$qD>X!$0SwK6Ld{5W+iet6W|OfwL20ndSDRTB(!d@y9pJ z^Cz0=gNPy1yEVmIjC_rO_Mx-L)YeQ>2y;3SOQwrSb*WBrp9F4j?$pm)@g!VtBGH3V zofn+R^4#EDts^gEI9W`-BbJAC+{Qw@i6Z;W&ad%}VE3wh=x0lI%BzAe2^P-yyFTAs1=kVpIJQSu z&1nBSw=dzBy7i@^?6a9a;>}{I-N4Q&sx%&Yaa_ZDM(TV>8G}^vM1kcfRFWyZ{Wdn} zu^ngY*jb#xVisezF&Z8pl$SRKTP*o$%Q4tt`5-zmY^onqU^L8Se`=rAi4KX>9Tjlw zafAYvK)&0d=NzyLiRi#UuM_ck&ddMijIfb<20!4PGpoRKqf1ivs(@ zvc|OlwmqR2=Ng^Ss{nD_yQR0iGtbHDR$iAM#H+kMlkp)2f+J?maNzv2=d{N5 z#;gerzK%@P8maLJ#Lll`lHs4;D%!|o*g4rqm|>$AVISD&<&SShZ%!WxczF9oF|sb^ zw9#2FX}PR}T(I4rl2ZG8cvQwzLb^N3-^_c-n{-5?mM<0t7WRQUX=U+pbMC@3IG0`K{0db)XZ{_UdmAlhOm1i z0oa(14fZ-(-Y?lXYF+YF{qOF%)jCH&!^Ld*4n=bxhbNUY$(7ES2 zWP0zvkwb+mhmtq4$>9-bgfEBbC<5fbG;Sqx$l-8Mx+GgDhY3uw1#b(Nc_v?K(q>#Ymms_J8mA)5bF3A>F`Zy-pf=ZtZx@hBH zHW`(cJ3Q7x>P?_5aaL^zc&4C&uP zE%-c=HKu>yr(^K`K#Q}HjgK;{hah+z=OG)4tN42JI>?;_e5(uKc|8Eiz=;8O{_YKT- z8<6-92B!@719QaxIJ6@~(E_q$W`cxZ|Ak~;OEUVYCf<$Fe{ z-O(2MBqEzdzmD) z6D9pTX)_$mV_MVd#iL7HDV$DLN$WdnnXd}}!jKUhayN>3YfBo$fGFEl3)-&`_36eghNu$t&v!oIW^A7x<6$LpUgPM90xCO*hn z0pv^@;h-yWoW1|a?B$wZ{rz-)h0~bxXU;^Mufql1ZiMn|WkFj-9>7HzOu*+iZ9CAS z(oW=I&&UUp#0Y$}92Ot$F4o7nydUS~th=P;?Q3g69W!r{hu7Tw@$xsP{~?_Kd$~sx^W>+% z7T7o?F5gXniig*&lQQj#XzWD6H-5)rYn*uY-sJN+tdxa|JVt)!{NV*yEEc!?&1v{D zc368x3a3M&8SE;-TjRT}x6d4gy%ao##B&y4v$=<*{in7sAzisia-ApYWGTHJJUD$H zq~fk8NK{b05|TvJD`AyFj?)f^7V*FU2|pmiV|_T;BbtyH`K6P3o)%eYXpr?CQU?`o zzRo61n8hHhK=UN>dp%jDgie*3*Kk-Z#caUfoc4J_VoT}GN@|H1<#L;*8+V9c9O#}C zqtovmo8prO6N3O+{JG`N0<0Di~K(})H;nUT9AH_9CeGvb z_zaw1j=^8tx=e6>)w7tnsP8I4K1M?`+?MD;_XCC7VbM&s76VSKKXL`J{s&+kBG}xs z{$V{HSmxO-j2@NwqW5CVIhN8lTcmO9lZq4e*0XR8VJLU!vZ^gFO; zM*FrIR_pKVl4y>aVA5!ea%|SQQMy)GH>3UNfvwhspsZ}7Z4;aIWWa8r;RNfJszR{s zHi%R*`n^a;&Ve#}ai}AO5S4hV&m?Z`5W+^iABFWTv5r=0d?co-6Q=ub!7~lfU zkZ==>aG^&&+zlE4;pwVN#9)q7L$X@1V~=D>3&!f5ihve+rllCfackHJC>9gM%o96Z zw)Xhmg!!e^0Y?&5sQ-nF4Jc&Xnl=LN!BVC~j~FvA?~RZA(oK(VTw|s` z*`s4hK=q_ z&tgbsr`50&kVTubr*Uq3jzhW%vbbVLXU;PiVBCVH_U4P684T)LDM3}pO82?>ed&X- z?B1M>|EU4~V>EwffATpDteuev;*A28P!8u?1|N(!-cVrZn62@22Jz2-s6e3o7sxn( z+Ma1vGR{>(bz!zKu6y8y6Mi zU5Ptxy7PC?mo;SV#jR^)$O#KGic*9P`C97H?R>_kZnY0s!8=w-0FA52$BkUY$Oc4G zq>l$uO=SX9U`in2DH68Yxfyh%zh2P`(x}!OH~!ezg+6)|nItOGwyirwUK< z8xx2tRqw)kgwS=T3oSW3o1TOxaTplS)?lxJdw#owN1$Mb5roGSUK4K>IG4&`Hikf} zjQ6Dd{T>0RP}zH|x|Qd%vGsEjhOT?u$sX5Un8cA1xsJ>o#ZD^1SHd~!=T$g(<;1=k zzRIW9)ro8Q^tyg!v*}k3U-c`8ulkkVn11E(Rljohs$V&L)vx>duciL$0sbqGWNX?n z{%eK*TIIj;NVcX;_^&no>+$~UB>(jk|FzbCJ==es;=f+tzfSdEr^#y`zqek8*Uvta z*O~rn(tmC6Uz_~b>;2be|Mh17b*}${_98nYoGu6x&PYlzkch#=3VPaeTe^B z=)Z2~zZUtg!~ECb{_F1kYq9@2!hbFCU-$K2Oa0da{MR!7b&UU7;lEb-uQC5M;lI}S zugCkZll<3H{MTCl^=$ujivN0n|2oxwo#wyR`L8qm*QEd2;J-Hcuh;vp&Hn4n{_9-z ziu)gC-{!xz;Z;p<&Hf)hbb%jwzaP5L5B-N9y4VkW+z;*aL!b0RSNfsP`k_63=qrBc z8b9<+KeX2mea{bd{Lqj5&^|x(b3e4-5B=5;&70-PVF+GfKq#m}KXf}kw8#$~=7$bv zC`5^5yZfQVe&`54w8RhH*AFfALl5vn%Oq6n*%%3xEUFkPQ<$@3K5fFMt?_9m`Lw6_ zw6#9%**lp_vFa*DdCU*ADLyRlSULdiT<&?$7O z;Zfd4-ih?~V~d5GqE$|B%t|tiH{{s`gkV}0WU9{IrAnhr-)4dB$8fzMeOK0A7ZFUE z#$);j2*I=>$aJwPX-Smn48;_;3uiKA8joqUVj5FSm1DBFkMpgOS%|$9$9_a%wRh_0 z+0Wd3eg~rM#w4OaRp(KqilZg$P?Vk6*}AM4!vT;p6-Nea&n_7)^*i&Has%;v<_BLh zIxBWDm_TS`j_)y(C}E#d9Kyp9Dj&A*66RBF-pzc}>{$^KI_GsViw+hxTIoEMcp(aj zbfn;|#H@<>7IUe#AMC*|jDdA|xq;72`xU@2Ozw!N>}q7PZ4_BPL{y5w32KB0a=^-R zssc`<^YP;-jI3v@k%eny;+_@q7OuUK?MZEr8$R32l4EjcA1#Vd{YMH-yVM!;pre?PWtG$v8r$)=TuKAm3`gE+hD2jK@sr%$7S z7K-#zopgMWlp{eb(nzag;tdQHGc-V6S8@o_0brPf#SJQ;o@~m3c2|-N^SZqYUcBuM z2hL|ZFQRg2^You0UseWxf4X0j{vK3#i~iMQp;aqRzDVsIlWSuRv{Et zg}PMHvv#lF#zFmqiy?gI58HqOj&7g7A5>_;PDMpxQUP&pLwkU_O!(d16HrQ~QcPt& z!3S_Jh}d`S~uC~m<68cIS}L!p=CGsrmw z)!Jf({0-aBQnl0ynIYo~0a$$aPihE-e72o~0?a(agn zaS3hzmZ+8r@Ik|K$AP|PU;1W*tm98eKI2M__heux{2NU1BG311M@MY!6HdH5DA zs7Rbk5u1hI-F?X4?H!pat0LxM>wf?z&Ur*srXn5)mcAkiFrtdMm5?k7Dk5`r6|oYk zkV6rl{0nNP6p__ZidY&^#A5u0A`*-PD=DxNtm;DT!V__P{oHO%2h>HD5MCT6oK6#^|M;Dsp2i9Fk2NxrYinXG03Ac zW3^^e#Um91MUknB*C+;sia|D2oFo);DgXi$L(-zcBsUoYTRIwJ)3-*aM_zhJg_6Al(5(ZR}dce%KYRme+NFk2#l;VaDykMnpSVQem)RAD`alRY2GKyzZlb!V z3llsaHMLT!S!iA7{ggBW)9KTDM#DEeS8q4A;m8`Ay_7AAeFb`?X%{?s2f^X@KQkXJC$F2FA zYJskb=LT7dY60?%kD5Knytv7#ce)Bu};qGh|E zl--S_6q3{%MuXgovKVHD|E`JQ`|h)>>mg%I>c1?Fn(1Jl3%mk@kQ<8>@-aRIbl{LM zXGF6sF@6L);DxLvxCHD{4iDnq`1T25A%+W$T7%PK_-?AoC}_<60r5P6YpaObT0~L9 z>M$MmRB6i(PE#esbt3&W%kw08yJ~}%<7T@Mj@;<>8G6MMi*3#&570PZ#nOr8zZNn% zqsheXs9~$afnvS0EK2SuRE?d8#(HBbsRKTJ2A~T|t3v0p+rqYB}_tSWP80v{XgEHYHIYh(4gfPAqQP2YBiD;H0 zss$!B(UjcXQ35KfA?K;WMZ!lE5Tb8Zs-SYqt_tJH)@l_gTZ<@Vr~=c4RbgQcRoGFI zw^RHvld2$uSykbjd$lU`MpfZM_6&AM8yno5BbCph3ePGk2eP3mkUE>j+^fj$Ks1m_ z>VeeYT1B1KOqpsRHJGT#`$*o@%2ZJ`_$?@)2D1+I)PTe^OS#mbSMtm>hIbE9hT2BVeeYBUmkDJybJgs)5wt2}M4f<(er| z4WtI=2zg^0$6ZaWOah&Rin-w+! z$YA*5Zu>^8P*j9Nu5v)EP6#dY>VN z-A#d6Dl{7mAUU9L7%vC1eGvi%B$J2(3Hb_1mcj4fSz@@SLWeLLm&8N9b+8uZ8}2g_ zlZVS6Gb5~}t6J(xOw8glbzlO6Tn0I_0SuIZSKFX&$|i9)J(a*&l9JG7t>GwpXe>2v zx@UuEU2>7lP-HiN3k-m3gScFrZR6V(Jo*tb|@&`I^y3h*khE} zyRtcf4N>qQ<%yug#0-T%ZTcky(j@tYPf4&7>(D9p|5K3|~eLRt?pbdT!&3Xh}u z(U8cAaB$Zt1;WA7T%fXHlk&XAqIsNR;-&eR|5?oWpULA)b#Rx1d|7$K)sIPjiM+qR ztDe&@{pl*)i=_9}kA4FiOs~gnA?`kVEH}zV*-Rx_^CyVRnt+|ZJpt4C@`aRf7woD? z?ooI9du;i4gYn^=PhmG%;i*z**3B4bzb1FRJe_rz%evly4bm}EzIj3TgVirl`%TB?NdmeaGCJxGh{ zHUbMc7r+erXZigCb_d_IG0GA#J}5=;y3 z;3{A&M{NZ%93#ron{0zhM|zVm_`gCUxJQWgz7>vI`FbNY74kqEFC`KOVBcjrtQ-igje6ewjVSoKrEvGhJl|>G#+{aABjdCct&MEbaANJ0UV>@o4W{AN9UVTYw(^vdoZNBS5nev>2?!W zunsDB-TMw6TL`?DqB`~BI&lMsg8drf=2RZ5f-2&}iRc*T20BI*qEr=vwt+MX>y#0T zg_}DVpO;q*x5n#v%A}Yx3;C_)-_@~Kyt^448HiaU5LfDMSTTd5W12L8f?pyHaxPQN z#ZpUEeUH?e@+wr66^fDxG^JR$AkPmkm)8fn_%t##qCcsD8@(N>qSHmi!rL&@;%Seq- z#S>d)8lhrceHjG6d1!3i88`Z+nE4t?N9h^uEd)cx9v-w{4Himi;9X;ff&wh+!Unw| zz@Azhv+D|-zdZ_3;3KyH2wWfx?wA%iOv1yt?hZy$Ha$(qvkxuk%N|Ydx%doL++3 zKfywykax0GwWL{kzr6QLdDBP3lDm3uuHD>|-W?8*8h-$_o%n%?gQ<`P-C!;PO@VpC zO46f} zFlC+ zMomJm(6|Py(JmR<^`^+Bu@}*37gFgKblo1$3U??=pb3UQPhmF%%1=*$t$w={oi)_6rnt3CdrHM{CEhb24{!A%J<}%!c ziUmDE2&hJ8n_5*yoNdAfF=elze-=L`33QjM_Yq!-@hkRInC(ojttPlljm$VbO09>& z3>6kLaP@2lNpu3|zU_IOSGGiigs_l!($&+=X)o-}CoUWNSZDTEsXElanLFVhx6psN(OU zD9aQj6KKlPC~Uq_upy<8aP9WSTA*NYA50?tskPg!FH*qS`9=YbDq6si-z<)iyEMgm zgyv!vs%Yj8$+A3A;QR#?8aOOVCU8>1LT&GaA0fb*qcX}qO<7BhRy$>_*Vq6Urt<-_ zFanq*8Zh@<$)p`@2X0mL)5OZZGa6PxP(0Ucu8f;<|5znXbyn$~D`r>X0vZ`Rj4N9S zw_^D?_-g9rgi81i;~JJ5sZai>+=?d<-`VgIf-C9Jtwyl8EHW|+b3O9=x!#qX>l_4c zG%n)zVKI1GUStf7_pu(SL(sagR~g`Wgo4A!70rOTRcWk@4#`OEqI#hiso?P8p&=&D zZxFWMNXOAwqbQj`Qx->u2>&K!w@{Kqcd4G1Y$Giy&Eh&P8C~9Q$--)9Uq{E~a+`DN zj%;kg@C?NY4RT~ZGd&9F(_<`;U9j!0=oUHdknJ1-2^7Zf&Vk^h@>=Mp`<=VuzGN7T zj^EGwdpts}&Gi_XLdH$t!?`2f@0Muo43SG0npi=ytRBx5o(A`KIQ|4p(*2 z#bBI0$v%oZJ7jQY%ADK_75D2jclOPS`|zCH--aS#yfjI1XUd%1pPKE{03BCUZKUcJVyzh&upXr-7br?5a4AvwP|n+v z61#d_5*qXue@}xR+KlsRtupB2uNQ-!Cp}0&CTpSI@B~;8;GOjd8T5bXXylG)mM3b^ z8)+YuC&02~8uXJC3rZG}>Jfw9sxr#%3G7aeR=dbr?XiSGXFA_)pg#9SjJoRuyeO8T zgKYt$PAb6AM=JPxcn}IvTltz=cIsDDB-Vf_i~{CP zQex`HB_UuIP9k7J9)NL<0?ba=31H~Jg}kO?lPm;xZ?Q-ru+nq`TC<-2Q8 z1_c(DBNHqyg1}wuv|EdC=QUJXWduV_w^5xH@ph$`VnVOlQm%&DI96>oGM!Hhu(Xy+ z2)4LKlD|I_1+qHW2AqHZm7>7iMEyp?laOUI`i!`Vxf$GqC$~EMojnDXv(QoFVyX1t zsuQGuzXL#J8;v%uS8KYflC<$_Wh*9 z_)n8~zz+5A)6P)9p4=e7Rs{=4E+CtKh34uH5Eg=Q_5s2+ihHf%&XhU1 zbDJn`A02@t}3q{7@X9vTSAC}oqPbQC2MXv*d& zWPV3Vj5S;m4iV-hP}uMgVc5wEG@o51pixB&2rLgnQ-FpAf!tWM5NQ4-o}|s(5zX>M zLGw2{lX`X;(V3vRQn8?9;oKKt69^PrWt82S8itx>4ik_r>`awPNaiqM6$V9tVFIXv z!-VG`PyhboA3k3A6vOu{C4< zP%?q0tcwDDB`Glg0VPR1+ZY-%9L&Kn#thre5SXii1thXi4GIK=815pw5X@65nt21} zEKL;V6G5S2&XQ!p{FjOa<%)XM2+UhmM%iuS$63b=F3e@cg|>03g!rzNYa9O>9y1W; zIt@p|k3mv0A1=3A8|||FFJ>RH7dA~{@jNtM=yHd|obtv@_(_nZJ6=$0>qm-oarqz@ zgEgxow}jP@3|5~_6f7UC#tTniyl@G}3)f)0aAk13kU0fjO?lzWpMbiRZIl7(SQL-j zBMm^^N9$(Im#s6Q@xoLAVIe^{YCbp6U?5x*WwKCF@j_4Fki;6Q89MY5+XUkeMacx3 za%vRRw~-Qq4^85MIy7Fm-~{J0cw^e3hKYI zX3BQ6B$=R|E-YFoSJbOnKs`rglzqHVMvhjiNZMK?a>uD8fpGAE-ogl|J7h-RNPUbY zE%*znC1kun!mQ(k7pFrgGG35oh#b|i&G(k!$paWF(Xa@)`MaoWrciho5j1fc4eK(xwL_#FOTw!z1{;hBm?EcF zeV-Z;oU6I&1pv;Hzn5!7aDn1JTyba0oZJso+|Se8^#TC6_vPgNr5X|BDeg>}llw9? zA~;`jmjwXXk>KRyo>U`(en<_HVaky6R3I{*KNaC{9GXSMTr%hd!_aK!E2tO@M1|`0 zNS|RKGUo7a7P-kP01Sl8nFkHTxgx8^I_5oUzD$H?Y`rs52P>*1sj&G253TktX1v9P zd_~!$D49T0)@sOjl+RF(7+z?S>;~e5S`uo5g?yV8KELVx0)y zX5NTqX`%+=QInetz0s+n+u96W!O~!~I zI)ndHFoP}Vp+K$qGHV`$%a>}vw-Vd|7nusj=F@1?KB=gBNY&j)VhsY?h7k1UD9Sa8 zk_j|rO_b+2$^}CUmxQB%+bdAm@MvJaV-;ZDyI26DiWU&oS89OAfHJvIzKxt`~G=XZjF=;UjJB`~MXo|6k!I z(2P3*H?V}Y$`Z1jGAyAJ#o8umTUiDGvrn#9%^`c$P5a|DVp0Fa7_E1lX!z0g0q#7;m!} zKrRaw0(OUrX5NTqX`;YxCYBViS&~d(pQTt(uE2KjhI?|GzHi|Fzjv0kkSqKpL}7a+h{lpb(f(M-+wy%okv0NuppbJsl-bU}h;Y!8{lQm{_1Z z*;@*j=ctUb`~6*~fuq$b61Emim8@HtF0xJ_A^u?2_4{W?@^;k%;`fs}llxnzu*~y! z)!g+2KDc-0B(Nir?LfrpM=9i;}9Zyd4j|>#kVz>E&QL! z0u)u8fRIi{J^%{yhpbqt5KtaEQKFf3+(bzu7dbT`TA^foQzk@yuM$&^^z8fauyhvL zJ)b?u&T1FQTDHj9Y-c*blkEDDeLxbD>DKp99#|Dp5Ge(fcI#y)@Y4{JcV0T`-@Dr; zjJVko2Y{=}R&nyPVbwTo%8i!gl{izBdKvPBdtX`Bqjw9pJi)CLkBgYY?!D$Y@q2kr z&(8BS1V?B0WiRmIY$dBjVJ6U)m;X*2p*AstiKeeg7 zqe21sh_eL9sw9ER|AJEm&NEDCPGR1hXraAev6>8Nu~z%NVSIo&mTZNYw@ zyvnH4{fP+m2kiH`4++Em`z;6oTEBn4kHSZw<#wmYa4ti^c5Vi2GiKA*ax87hd_21D zB5`vAj@&;G4NNWoYzrBOEo@P^r4-3RKyCqKO6!`T9Q%qdHhwW^7HXP|9^qU1Jl*3L zGv0%3e-O4q43IQZXnI~kGTrp6Bze0Mx47vf%<889 z<}?UGrsqZCk?HvoFhbJ~7fn58A&VRGrN8BXQ=Rjn+dYNtYu8oaAVvo1QpGApqi4ws zc~cDtbL_^i_o>NPCd%!)N;1xM_Wo!;2{Pa$c~cD%3bHX@(*=uT zwy;v~LdJI+m=*`nq#*hXn*Tf6a5Q={QI6h63r zp+By4q7tjR?x*K%9!!#pcdE*`xQj*H@G0s(BlW8>aBwnDQU3(NfLDcdQ z`pzL)nLdMH^cg$injV~X>c)QOiJ0nnQ|)&~8?ns-t!SiRYaI21h|L6HVG zQooP+q-G1XQ@60XvxflWnWMociCtFlnHjDU7gnLzY*t`UxXXVcTdsD9UchM2Wfue4 zg=%sN2=P(p;Vg!NFu@ozn@Q|q(n!6Im0qYzpVA74*a&IOgIo!==duHWcF3$^!Akxx zKZh1bB|#q_`K?|*f+7&gM(SKyLe6?ORgw&F;*1pT^$8YpfC41SdyQEy{F<5G+R30u zwGR4!SbOuptg7RGI02%m+Y^^6ZZ&GuAXY=&LPeVh==10ki3OFqV5}B#M*)qZ5=fqq z>-A#Pf>;%?R!i$fRniC&!XiOjh-ihn5m#@_s%u=RSAIlZfB<_x$nPx#!H8 zbIzQZIdj%4q>7BOKzd(9Q8&qLNj`%-gq;$OS~Cu52)#Kc6l+z^41^UX_an=c6jEbopO%%nxV zj_glnzaM4yQ|iF8kHf^yTsex08In4(nEv5_W*m6u5;}5khJQtVFP|dc>(H#T(-!D_ zarUP*@)#v9Ex`jc(7r1}OQ}Z+){JJwBC=w=$?3fG?%piHYU)rdA}iL9roM2MfIb>? zkSLm|ej2u85XGd_9wS|QRACNjdu z9378c-3+F5vWQl5+-{H*fjo)v>gd>&*!QxBpE=r{N#Z$e$bgn4SnZF>15GxU7|0=O zi{ub4$sWn-e z%=7}wCrg{9^Z6We)1K`@@hLZ+8x-dI5!GAq^ zXo{t=)b&V!3!k7qL=RPRIP-C)l?a(ykx=9s%B~P7UO$cU5H*(Cp6P6v#0FWG8S9TB zKm0hwP$o+O%`AEds}HfT+r}_i*a=RvARebjD|d|+8y2UzwoeqABBJQk(u5)Zc_Q*K3P&fgFAM85c2I6IxX^#_ zvC$K74!W*hal7$Bf9)qAd6|uN8oxB;yK|&_`>Bu(C!_1?HBC3!tT{DQ3z?R78aMq! zHufp$RLF+2(slKkrkiXo^b@c)GC%D!ZvKgEe($HMs~2QDHCGxbppgd(lIK_FOEWC! z6v{FhPn+e3TXEOC5=nW%lSQIMT#1{RdLihf}2a_2m#M$tl`a{U1~RnjB}4UylG(UPJ)jUfg%BklXXZ%BF6=nJUBa$Y=J|{vqm75- zOk}UKh^6KTnoL+G?CWQS)nO7No!MiMB4OWJAJ3^I1gSS;N)#;7?}s!ih7cWJpE5&D z_*F*S;Daz_EVU0Xd|#P##aD)$Z9WXJbKh77MSSJ=F~95e?shdXJYN~+=SLU=ol1oYK={hMoO_77Xa>r(+8%^HyJ0?GSChQ9*S}_-- zm_bLmHImkIl&_bx*Vn6;Oswv_WJ-&BP*S{PksR@o=OTsgCG*Wx*Gvqio8<>it$8yH zb*ySKyFJR}N*@Ux@>ulv_%@JSHdJSL+bVuBXLIEyufNj4>BdJs7{uUWRMN%3B~N%{ z;TQFh|92v>W>hH_krgZKBexI>_dJ~0pWYz#ERZ$=L6U;?`Sfe_~*EixmIK4Iv&Ku^Et!k~y zTC~9(;sb-e?<99f`PE4tYTZeM(x`WQGji|@6erV=B$oOkvlZ`HNQB(glMv@MK_@e? zRrGxC_#2NNnxG|?I`y|?H@#z`hd5AxD2Q$rCa00N$Y#_#{st3EUc17PC2B0S7J&)- z3@~6C3k=6snH%_;%g2fW=1TE^qRb7TG!VrCb7=xM>h~+vo;9#2!s5}&*Qi%~w@`1A z%rkk#TUjyOA;ZF`|pB<|!?y&l~;qKi}kIXqXhtM{_wid3kXE)az4XEVDonjM|C5Y7+B zv&`bDd3184I3Ekn^>w6^4?0+1DTiklHKG3mTABSMqsmgBPuQv5^M4Z7yV0TM265GH zDNJBLiy17W^=pmvRznMMxovTLU?*6~t8LT$WGpoi?pDGsE9TO>b8WVJ`%qrxd`oVW z&}1cq``B*jn~~73l~7-g&|na1Du;okGj|+b@Cg^4KZHe%)2~)HE?w07g)_Ah(j56>tWpW894EWmp0a68eBQwxDfru@&j3TWlhxhR5z|4&W7LdtrB13Ma90Oa% zAB5hiO?skF&=YOIrH8a3+3v--99dz0N^)(~YOOm?tQp4zXvZR!FDTx}{YIvu3lT2& zw!eW)A*l_mkW^=6-iA@J18P_hn1-7RaOK*5axHO?!h46Xn24mP9hz{46oB=z_Lz3L z?>d4g;}HR2duA{=g?osR-T2d@%$Q$5%t2i;c>v*Yn|J?y*0DQoZ@(JlOi@WhA$1RlD%xGD!DYB!?DZneI&W(-EsjzNFbuOf%Vf2M1m_W~aX_FLMZ>UT`*H{g&W&ZT z3-&=^oRiruT+TSV2fHxla3c72VSH(3yRe@U>fSCKp@hCpWF7Ldtq zB12-O9L6qmWUvdTmjvxXVbm^sgQ<2%a9r&|70VZ_#jA+M5>+L_#V)*zOd+weUN&~& z>~8JBi-jy+kM`hRK{58=;otRO4@TybSXX-xN4VO9cRuUZ9r+c0dX%al~@jhxd(!?U>(=ML*$?9$HF${l9xuo#zx8iLCNleF$2AqiHO_a5d|geVkaK)>A+630t?9GI+0-~q#VXhWbumf#bN*9 zU{uNXAKIcqQ2=&8?p7city$0V1?@yOdVko7I)n%PhwE2tv;Xjfboo8ni#r6x*o$!| z^k6S`*^hPZYA?Rr*RvOoyWQFg)-hDMtB&z2XY9qCXfy64WowuT1{iy>`nRIf+{qrI zc91wqO?}Biqh4#H)OHY@hC<>%y{0Oucoc0B-61EZs3mP4s(82&qnK!>w@C5itcRDW z8>OZ=4S6Qh-89qID5*o0l$=d7<JNJf?)Yw+r7L$eha3c40$tz%IOpm!SX9NCrgg z!p4J1pjM)MyU-SGjpqfeX)Y+hE<8eMcbZmYpf`<(I0cBPOm<;^byrCTcA<@rmRhpg z*aazvu?u+_?81(yl5ZEf9DiNSC4P9om`I@;2)ZF*|2v{#7iJWgscB>|3}^01J*56sU~#ZUZspZ$ct*g6Yz^^by0C+ z50)G!_Fzo3p>F^tl=zCJX(evPWtQM+m#d~rg{EBc<|%Q0l$zo+@LFDKUYTaf%V$DW zcu$;Ixe3uDRTQnla=~ff#mywu3#2dz$iiz>iR*xg)@Kxzc$fAUAzZruG(-P>`d(m7 zH+p+!)LlND#YQieh}mC$FYoYCjvXfrm+u(5#`@f&^K^c&IVjwt44 z(@?w%#PpAiQ7wI2a2m4A3jN=eRA-b_y^@m4?+mHtD5**%#Si~bOV3wQbEBjt01Z~= z{X&WHP5n^OY#%`_Ta5Mq`xPV(<<5UghW8`iohnwU>swQ8TsF8eMHVwfjG`D>l#VLB@PegWKbP;gRXvDm98krg%6sV};rBoOJ^Vi5neh9#Md^1)YX$pU z{Wx3P(yRLPi*0!0nndh?A>@~SrwyM~)bF%C{7Y)KOT!S_2y^wWM83HoA3Hnnr8YI% zA&gT6gCA=fQIJoB?!MMmY)BnQQuqK0FuVC$+b@7gatC@}YfBE4ueHTeLoh)Jt!<4; zQ$O?`2#-@P@Ff%3qwp#C30eGd{_YqpJ|bohm)|F>7dBj&KHntsH)YllU8Y*=^D@|L z*%F+-jkz?RD8^oY_-nP-$hqr$?f@ay)m|4NT>Y>8-VWI7t*slLHS%EUnzXh2!7*pU z0qH6ytqSk^bnVWa_{PtX-h8#l(-AMG<-_BVlX0_c3`Wf?5+cW3Ff(CS&jA|~xE8wK z%CdnU#H!1nIAy8T}4O+0&LeBhQ?>Tblf_j!Y5%)+J9rT!Nt%vrLhNyGXkLBYaCbQ~+ghbiQnR&i$- zPK~YLa_7+E*RWH2$x#0&8oo?{?^LXerLKbBWh6WT5!e^r?)DPH4z$;UL204{wf0-d$#as8qxej<&1BE(aeUDXkA}0EMWm2M1#_xg##qJmP#>3I3vO^;eIBYSk65l-Z z3|sbxVFJt3PF0b3?|N3Oy}cFNKgL1F+*=_oZsJJ%)$s3k{JRN%n3r7vi|8H+*qnQF z?#r|(=#R+6+XY}(6T{Q&Ph(SV-wv;~Eupx84;xBM$q5EnWFj;q&={A=)PP+sRHqe% z66Hk`d5uvhiEgdJS!Lq**>rTXaqW$Kma?G|;-f7=#LO+RvCwOYR^a$8!8gApXpLCW z^vdT542{w)A$zf~39J47w_sQ6A*HQh5%G2wU=N;@Y?f#-4yO3cjYM-zA|9|oOQm_| zvEc%l@tQ@|$DtNtdj)K4Lrw9UBu5Ggf5EBG_3kD8q-tEI%f)b|F=W5Lkfz*n=#=#$Rs7V{$eb_8G>?N8+a-JqVhqB^(82<-78Va`} ztVc&9P$B8)Y)ktc`l;|OPjmL@Cpt6(t=QfMH$(fyjBeon zU;lCsh;>H7->JyV_cfZreV@>0gw_!HfY2+1))HDu z=v6|`5n4v*DMIrJJwj+Mq5B9uO6X2P_Y;~*=q^IH5UK+N4~O%??hy#!G4V^Fwc=pA z`y6N>y4w4r$9LmQG@Qv#B9zeNZ4gxVhXXYE#{rW6RhZ*L_Z7k*|3V<_Ife5W|2cTm zlnaq^pE)d@?JDNts{_GrE|6qa?SDBJb{*3`t7JdUpt4eg_lf zo(B1FkL7C-{!k`q=mG{D!1%)v@0IjO6oJjKonms9*!>n^?mQCdAhf}td}y9;Gw@CR zy~e+n@n`C~&Ykot^CImj!R|zIS(&)b9R&pBUC4ZX&n%umiO7pT$#cDX3u(?|F^BW- zYQA2EKd%}WD9q^)_e_OF^o9m-zEP2s%6zVH1@TKk5p+fpK9(>X494d*BwJnJj}mQ{JS@h z<|<7ib-YN@Y;^yiVM4P^;hA3jNb?)Wkh=#d4ddSq{2NM|v=@qBMf}%*2Yzsd*?mFb zIK!V)Sj^883Ue~keV;T;c5lcQ1_>HQG_(BDT=h{e_nzotqUF+J`YM zDsupy?puL~R|z9xc_3^)V0Rw>=JM|`jgq|9k;Q+}0Ac5=46rmnO1lsNBskAJjr>R{ zE(#=^&V=Ow(lreDodyUG#*u=(P~gKD&hiK1cL&VY-OBRR(j5g{_cX1vl>gQM{CiCx8EpqdO6HIIO8tf4Ccy5_ zA8M4K8~K?{nW6l!j7t$u5(@$(o(jMZOZ-OnZ7qV(S*5VhSqhkRCK@{9%<~F7-C9;; zG5>Dn-@p0ydqjAOU#o<~jNN1i{l*YDT=Aq%zml+x?w%%Ymq1)E6Zh>tT9Hk7dgkgA z2}A$Sd|(Fm#M9lCe>?GSdlS5s<@%O?|Knc=|2FXN6aKxAKPLV!U*6!0nT1-R@Eof5 z^9oB-kJS>0s$Zh8ux+Hmf^)m#OL&#SS?nE;QJg%&g$8DG?ME0AyoZLBFg~v7${9aM zVM$k{uyF39y`{WjCe|t}`R}ASBIsEPOZc;jBjIxt&ShTro3yNJy~5JbEz_2ex;83Y z$MQU`a4zB53dfmtgr;=~*J=1z;@qq-cSyNo6|N*)p!iakz6y^d{!R)@IZG5@%6W{! z($TqnrS5{ipN4birQ53E!nZzwygI>KszQhISlZ?zg)5lnT7|hu(tR1QJ5>pBPmjBR z0S)*gg(uB(G!Wd26sw9@(}`6tu^Zhw^Q^+tlh|VvS@=0d!=+u$S6IUHhy#I)2ht4! z>>iIlW`C4M#98Qr4HDtK6duWvytJ2OT1GxJx1oGcX$7T3q3JcDICY(IEJAgAafHP5H$?GX? zM9J%6z_gF&X}r{9n8H$z!3s+~j#pUtafQ+leq1OGzRulC<3(^!*Z7$%(}l_l5%k>^ zp3QK>Uulo;H0@l59~I!;hbDi98~(Nu-k>;Hj;IV+2RUIlMD)k7bz@7yhQ0$v4|rTu3+v-g~t*uQCNz8jKU)R zg$j!(?5A)R@mqJ7@~$AfOku%kR5+L6k1H(rvjg~l4&c`*EcrgKER%faDlGN7#L$uQ z1EwmJYnbq3h{9tTU#aQpnEw$PE}~wc;lk4q3QO5@6-PvFKjoR^-?5w2Ptq0y()JC| z-$`NNYqR1@+GiD(GR#p}#NZvJpU-l(DlGXfQ&`%*(ZIsL0RC*i?s-a#7pl2;FyNnB zdfwvZjsoo7s{y=u$}MAn;jhph#DE(VLj>b=(4brn){I1;j#pS3u)o66um>p2bBgZk zDy&k$?KNB)`8%a8;Ts8K)%Neiwbr=npjlTgk8s zg$~kdnAj0pVby=YtjqDhbAO|m6p@Rg5#e5>m_q3+h1Zg5LSf16M1_S50~8i6e4Hnu zAfotR!l>1*h<9I>mvwHR0Flms9DEj#gEqqGZ)ZxH7Pr;B*U9@9m-6FIG?C9pR7Ha#ciZSU!+`+w*RHVyfVg3 zC@dWK!Q{{I0U9nc@v(;U&K!4d4HquGqT!-35A8zEZglU(6Rj|ob$dl|rEZHA7IQXF z;Vj}@rufpUJ;3mm-n$&vHnfS;5U3CKhm0ZGUw8OoYkLb8zucLcsZ=VduoSWaiQRpb z8ZVAu-%J>_Fg&l_~EL8SUSQ>tp08TH3MWDV`nsJuvQ-!5k?-7RT z%-EUFHSVkO-s0BE`zp5y@9vHAvd;aNCKMhR`4O1%Lq6UqiPyQ7;tB6BK<(U_T18$f z z@Yh)jd!e-K-xa@;;cqKW72#D1izF;ncrEGNp!kyZ3WWvdLcr9`dx#53z5`D)@4U`E z`4^tz#nBQ(D+0RPU(;7IzYBryo~s#2JDs6$J|j<7SPbcL3Kui{aD~N5`W0YOesf2u zoP_P5@#0YZsFki@x=jk#vqFz5zEt`z3JcCOgdP;B0{?c@}De!cvC=G;I~> z?5?m&xJ7Zw2`^As>b6n&8fW;23QJ!1D2})+8#Lc6;ykFZcrLFi+?U}~74D=$FHkt2 z;nx@(*5zS^3mJZn!Xp{pU-J@P9S|tn@Icv4RanY@g2H2&b|a(-e2}ysDl9l_6qYjV zsQ9hScPsLx*lyAMJ4wUTubl9Mfpm8(EI3mYmOLjaEO}lNz`rCvZ*d<(ygS-N&JyG`?%DFb$~{A#>)cb! z^CUj8^2x@yPBRfkj#jvX8J=rs{9eP>6898^CEd5%2@RLwM+VXzq;N55ys0>XzfxhL z-K4NI%qC^92+5-wUPqd5X}HjTRpD~doT4~F|9XXm{^be_{ZR@_o$dww5jK0os;AZm>zlq8RxRa{V=2swDCQa%*ygMWT-{DcqOo77<4L z7Sjw2+n}&`PwNyG&**i)>`Go@7}6}oQ-{Gf_7dt=IV^48eg$({+Maw#pBLeY&CTtvnDXR~>}X`Y{%=hx=>k)bpE z4aNV^JjeV;-)}R|Yt8c`Jjs=lO>Le*?f5?OO~ro}PrffV&qLQ}_+#d|5Kn)5R(;%= z+KBxtXN8Y##I28Gsef~g-l1iDcoDeGz$M-tOcci36#K)9 z33A>6c&!Fc5_G{^JYkrjx7MS_1YPh}OgMlAyFf}NsIfmR-V__zyp)D)Qy*5m#r@Pg zKQPa?%yTuKqw7)ek4w)fJ-hU*r1J!CaVZ@!JZ^m#ul}quZs#;S!L9gDR3O~WN;St) zQ+kmnIj=6x!~Wk}O3JJ+5-mfsp1EyaX~G(siXQIc`mF50P+wB_D5~@PblOL>K&W*OXO49GNhmlyp`h&A4N8HS)JI?Sl!&a zxb@~-ZjfFF1~I`$cuE2{oj`Ua*EEk8!cQAK7a0iNXK6g$c^M^qvq|t-ZzM2V!qWQ* zI}sUo2Ipc(gjhtU#&C;MaLd$-KuJ0i^WrUoxeo>GlnrWW5)UDsx6iZ!f!hqS7RhD< zkTBce+XG2Ewt{`Ea$v7eIoRb&I^F%|lEp81*3&<&m(be8Iy}b~NO5+;H(WS9# zz+-6@x%k}s(lstVmm7eG6ffp&>i9lPLj&7TJ{GegmRgB&B0)*P&4>X|s+U!wqJcv{}cLts?;cP$L{G4vGa2rp+p3=2( zdt9)MCL9vCPr!LFZtBZ%&tb(*mV7q4Z=w5Pe!t0PdDgjy5uglBziGO6RIN`z!l^0f5V0Cs7i#t`2rRRom&mfzR@}Gq0-8?l+gX>?eX!{* z`%j6~PQpE02Sjh#5+$+EDNHz%a1GbDyoL)WkplOy-Nv-`sl^!7$DOg+ZqB!87-tf$ z;R;{ZHC*t{!7XV^T) zQX_?u;>5iJw-Mz(N^mqiPTOAdJxl$3ftZyD>Qx z{RrVr@vn-16Zv;N{^Is6b?#LF=w%&(nk_0pDcmcBhpM&b_oTH@)ncmZ?Lz1ys>nM2 z?L*qo)7|h4_g@ix_~y}|KHQf{ADYhkf1nQoRUaRBID`_lSwA!MaHeb|ZQ zdeVmvMCN~vKD7R?Cw*9Q_a=;C(0-KXM}WF5%5fDXb`1Y6=HDp({Q-X>)8zn2EEm;O z(5J+m)jQitH}o-w&{RRF41QDf{)&Uu;Udc%>tf&Lg~|Z+Mx7PcD2HpZuAe z&aiV{d3g6M?Kk%BBi8-qRWZ0NgLBKQj}t9@3e>|Jg4^<(oPW^xE&jZB!g;o!icGeK z6rc-fE^SG9-3CnwUfFeIZQ2AsFC~7Sf;%xZ*rCjV^AvYZ&r90lV6a#5BF(Br;vB?M zHz6*0!nnNgqoTLQuVN~@R}%FLUdT8X!3k@-c=h1i+_Hi1D+RjAIR7i{y>a1VtmY`hmkoTgbP)tM3ry?D5q-x zYUWv`-ZP!y@hqLaZ_*moY$7|;Y&)27)~L3m_4UvJTa*3XGgZJfyl|OfHE**F&LDQi zSsSiFf1Gq~ZHqgnG$*XL*?2JQtR1;w@?EZ9z4Yrlm#U!IYn-3EgQ&!!PG!X;Qw~!C zlm*1&*7STtCoQKCkGM6DH$39|BE|CNl*rq3vcNt3E2@j4DOB42{-5ErULP+V3cxUAosacjG>)Yp?P8s>~YIv3{{VyTDNFnW?I z-R=(n8G@d)_6?D~m87i|Neki|^m#`K`oz5U@la)RJ{aPfwBAfw>rnOX>-@==uneXu zUd5!ExFFnA{S0S(5wvARD92)!1fC`YIZQvvDjK#vAZ-1xgIQ=9A}kby_N_eCinWq| z!X_nQQvNE>N0@j~l`v->o5{U=leCXC8O{{5eZ(nmYoCYPK1Ro`$tnI@45qqI0bO!W z3lsK`0(#=@SILbtTio7kW-Q5^Ala;OH*bbHJCcFUiCb3ZWx1Qc6Z*sZnDXp~C(E}I zh%Da{JQ+V#PH39~+6+vO#oMWt!qeI;V#!lHhi zNf^5+^Ca_R0dfyZ=wd-cM?&rG_g&e*?_yJ0X)c^w@}tiB3&4$AlW8b8l3^{(Mn#uw z>IR3f9Tql=a))Or&H>f9b!M|EOu|XE5sNJn?H2|B6%vH{w;AksW4bG#Y|y2YE@0kV zln-`FIWk>1GUy9vr!6qdydcBHvH%e@8*1F$Ju4S|@T5MSkZ|b#mLUBFpJdq~0EX zMACZ8E9f4d!Ig#HsZNyLOk<|y@*1IZeMy;36se_ARZ44BqIzpAHgyzCKC-I_C^9Al zSUzJ0>D$karCtTGxIHl!sj?@W55?3RJE}jCSw1s)TT-Hk)4ZFyA1(QVh{sBVi3eOHtq{ULbSPo}6dZ;0&0;YRsqe)?y zUJFeIeZ=guF=0Aba^)0|+Z$JGczL&K-Y)=c`S#!=GU7bkkSGgZCmnmQxN~Y=eBcsJ zI)(3(X_ojKkja{O_3~`Dem#e)EEzp`XIwG!MeC9erXdqfO~F{O*K^o0Gh9e4%!MgN zcE~$D3^{1eIIhr8h!%1uqS!<8fmR4nXv-i9oSZJRufpsZrzx$}TH+wnv^-o{(446L z0rD^nI+cJOYcr0e!7#!=!D)-7o&`CySRs;Rqpd^@=V`v(2}DCLCNYKk5mM7dAkwmP z4^TxSP@Aypa2WG-h8MB%o7wnnnHv8W#FEA@BCEP<{3nr(Y5cZ~jsKz)x%*Um1qo@d0`_R!I53aR zHDEPe*j4C6qO+!_Gb?LwA=vA+YuW~kkYY@y{0Y-SvNafyrmY7TvpvIYE{%=c*|16L zQ;Z&H7TlNHSp;0!FRzrMbZ$S2Bup%9AB-fP{pR)sq#Ll>-3^83T|#&Zm1C92983~w z0ZSG3J0=Uw>9mic^P!ZNG13~`mvTLuHY6a_#*B&mGWH6bb9YAZO;o7tJ8Ri<+f(QU zQD(Btt#^sMkQ{Jp0K1F$Hy?kCn$b9J)u)J;?z5x)He7|~P7>y9bZ?Mcq~dOW7zGYD z4yJKv*^Xl%_S6OU#IoYgc?%XO68j%~;F2ASkr7UG(#U;ycsGWY!?3!LBigk;ya zFXQPRh8W7nBEEc!7mkabHW>e8;Kv5zUD8-97})=5o-53=8Bda_WJ+x}_i==|e*C6*p!L!YPJ>lD%; zS<}0pjQPj=kZf<}N%2AxCf+=h%seHt@tNB2;Ijdo<`h#4N&^VfOy~O3On*U8beicF zB*-w$#OWm1=q}StCkZ;HneKs=*tYql+I6gwPL=(f`K8@igl(H&s(w$nCTtC{i`Q9! zf6g4N=_geT2qHCq`&l8MAc{5W}H|GRl!Y{Kso{B`c#k{;)C zBpm7SULRPGvfh;WqIwic$15|bmv+v@w_};enpcHzO!8LZVZ%{z>ZI^bjKL%Ibu4uu zue5SG0k|jt9}|F!34;*n4+_9T03$02PX^!-0r>m?d3fw(*HhAgo$S|k6O1fW+8B&6I2wT|}_&I()n9jmzOSVUpR{z*1 zcE>v;F?mA;7(QV2bJT1$1m=seAzpB0H_805h;VZy+jVZ6h+fNlTnNPMFJkt$8*zMA zikYH3=v+-!yrm=$h#0c>!c-0gBbIs}_XTr7jEy&-IpN$c&C6vlnSTN{;gN`C%DCK} z`c3uX3M`fL%2dRWJraj?Uyy_!O1TWp0#z|S={(a+mN?ZNK*M~Y;S4J$(}p@{Nz~5$D`%{=;C-JU%cIl#i*g_%@Wh*fN6xeP|SDAqHK zjHUi8#guZytG~gZW;-Uu8iKW&$+`|5&qP6Gbny+YM%_(tILH-7cthL4ztn+@kd02l z?!z3NyE#D@x23%C7AYwi8XeDm%-lGnsTTYmMl}t<#Hc22FJQ$uuBnK6)YC}JAJmpa z2el^)FJxSU^%!jjhwUIk+$Yx3Hfyjg!MwK;-jB2bc!iGxQWQH)Yxus(2a>NaW5CxC zCTZF;-WD~ZF~laeBUuL&|><{^PO4_Ha}0uBfDLN=-kvv7GZbb%uVG!e@SU0`7{>B4117F`hh9(3Wiz*Joz zt&F;`8;SLx3+G9Ece?P(ib=iLU8HTXU5AKSeM#fSr&<&uSSPs>NAGhyW z7j|RCenJ=8K~!{sWTU!p26J@o7T?#dW_h6tEKDX{*qg|r3xeN+E_@0GsxFXLMqO9} zl%90qcfyPAbYc4H|En&XEcptLeO)*Xsuj_N72WH?w=iy)AyQsp`Ti@@>t=?(5@;&+B$QF?%Sow+F5m;H zzY%*!-61u|objKviej(9YNxl=9csnBz?ibE@@4*Hs8B@aW!HaZ`)^D7h|Ci|mKtM7 zQ{WGbIzmIFK7(=Y0lQ?^z1oAc7j=<#@u9hOfcu`h_5Yp9{HU0}265{u=J~PIT#>_x zaI_~ji?7PuQBIBk(1;Lo24Mt1gziTGK#fhk0~2r@0We*6V$R4| z>HsO>#AX%q<9qci<{Qc3pAhrqz!5QTj*9tXs3XLD1s#86PRAdQOk#c`F-6QV(CL0H zqYP-q>nFWTUjHGa)PtB$llptsU3o8a65{p$`(?525^PKGwu<>M$#-J2h`F@oc~F^% zn6FjS&giJizk{TYh&l0Nsr6i2wI?=b6!WvBKI2-Y_?<6FmX)Bdt1&vTj_sP&{?`t) zKEJOKPrVqc`6E6`)4nqr-=CtE@RM-=c>CvQA*{y`?-a}kw0}-KeLotD2JP=)9Btlr zKr^&2!@}I?dM@0&irFZPrAjEb)&p^QOb?E%tW-Gy-45wQ6+s7Akapr>wS@@2uOzf$ zsj1+O_hEC44x5o}EOj{oxkzM&mBGz!uNZxx4`AtF$PNxq&6psm=kuYh^=I;7|1clu zV8&8eX+AVZ`S318)Of;>2XTVb+YMVw$U~d3Dgz&$f90q6Fcz5@C%S!SDA>>E!&>QO ze}+8#531ae3!e_~bpE?ZFdpgo{U`%_(FTy-AUn;mJRjOkPtDAPQ`9uN=qfMdddJs_=E>Yi}D zn`0^NR5r#STJLiirJW<{-72Ou)Jf6|2vbb62TJI^0F$cXuf>v?(onrO!#t8^`wuXd z?w?k|1Un}_*@7w0y0%#@!!uInkeUlfRKEaIVX$DX75g5;l?Q zjHdvcu@-<+(E@OeS%4A`C*xS20E@b4}|<8VXcAcJwPNwynllo2z|zl#m} zcPq%97I(8ex48Wc`oTnc&jjvi0(W8HizYCeFW-ZVyO*K$l%e%^gRu_blL*rtDai2*xoHVAHms+MMH5fPI1+(n#wG6ynYQ0AI!@%<; z1@2EYNJ;ZPgvb{f;C7WXIyo+g2-+=B#ji~ClKR$@1ka;3bla(kKh`6hmoq{IDG zCjJ9?-{Sto#IH8-x8a>7TVmelaxalSj!dWrAnT--^F0o%X5`mC(kWziJ@F+ z;)_hqV@!O$i63d=^YEU)LF*~^b9sDO``vhY``r@Ok6aQT9(NvK|Afy{V$0hKxFA?U z%D)cl;kj@`QtKhYaVO{0d0Z5F2RqET-3=|wtrT1#Lfm6WjVYC8CdGy*t^sg@Hn=3@ zE#G(>-H79Dbt6m#^KfKI2&XsP;dQ;R6w%ZwbizeYf)vo;V>sGFCGYP|`WQ zB58%!>iYsoAV+#|M5h{=jHu`T{u~ zd+K<6Ate_G6V`zzRrF^gmEMZ5ggrx|oSRVZ@cG4F%+qPg0#?#KtqWWBks{4tblkbA zBI%q9&V(s!Vp875bQ(Cb(LX?QH`5F}4V*bsi2ojZy`us`!7X!T);$ExoPEL>R>e?# z%}O68iIaqV(@5~5%Ah`npseJ;HRB7t@18~Ko5M7*r}#ao+{D0d$(PEpV2XSb3_HhV zP3101;*jFi*pG)37nJVOs>-o_CY-i8mY4JD!Fj0AN~-2zTeoaY41711;?ZTUaEU|$ z-R!1Nk;YmUNrcw2vJ%xB^RYlD#kD%QpTRkH4vtu}uNrOXB3(XGxfd^^S;74~cp}gF zSOx*Fk9}tedaU(JH7*8)9njo3x3! z`)j|E^I{2Wi@6n`9!RAlO074z>$Z~ZYUey~6<4vHN<`s)fGRxfBCcD%!SR}t^XB*H zzMUI!dh82t?OKjKI63!&S#kmyE7WXEELLM^Yi8AGl-MwM?US}SdNy2EfNOoLRO=h1 zT7Q|k)?cNr^%tpY^L=6k1Yy6@!3ilwdqmoW)MnkcYry_9;SH(WZv$13Lb@r$8rlEt7MVl~gh;tbj`R`WQL59$-E zc@jn&n|KE0#A;@W!+dcwHuJGSO(hU}oF~Mlan1^u1wiB}(t%qInh~GnKD_Ajt-L|v zCA`3!rR4Zu;)2g)bHXG*Yt<0M* zJz_SgEtYx$185MY8sBtOU**plvq1fpdIuXF&y1UHqEl4*|N8IddP^ z2RQt;aBSCd5MRoX1%liv0Tc)u)3L$cLd*LlyOfQOMXhioACb2qU{fG6Tw0fT~i{8mWw+Beu&%YLH>_a%e0{?mV zS6kcv1kiz%)O>hu;CyL)k_?3}=#U;KQLA z_;A7y@Zkb<4RL2;RR%H0Bm#r0wEZ*IdnrWUulJ+iLJ~t{uSV$uqT$A;x8efy59|W3*@{`ry17>?XZd_U8UichOZA)YP1Sj%pR*&DwJ<7+Aa^%_5 z!I87^9c&ne_H&^Ye4ff0!=KvHn9hg6N7Ip5%Em_-7Jw1-KAyq5d-Z(${X=3F(t<5w zpY6J^6P%DBH8XV1#UP&^NZI9qaeI1uGrkW>`w9;vWLFcm(+PUZv$NXoMM>irnf`zp zQJa5>Qn*Dd!s#e0j1n!4djJFWk>adze-nt<4q-gB^rIAhz56wje~CZV=odhA`w$y1 z-~SsgYZzl_z7&8LG1*(d@a?-d9*a9EF`YaHs{8q5K@vM;gr}(@KJ1*-CG?9Hj=`Ir zMj9;3n~LSsRiL^F>jm_)B7Dxy7w~8-&eF%NI1UNzJdNh0Meb8VNFGFplmZ9q&7peT z#Mk9ey*NoJ-!n@2o>9v8j8eX5l#*^wlZj`fpzPhl9=gQ9<*^!`f=M`M!#N6XYrP2T zcxBl5W1oVdCVaN^(rc<5Wsao=B0gd767Pq#i&TlQM`{yyM#AM=8*+hixNd0AV`by^ zDS3gR9fTs5`ja0Gxl>xl#rt(62d;=sxk>vONn72FVy5y*21QEyc($b1-0c#g6cVLh z2Rz_e1lBzp4X97+Fxv>7%6Gp%K};jvs@LbQ{JL3K06UcOi8?Ssw_X5Sac zLda>%VrxfMgllDePQ}Bzml$3i&g6W-$5|(Ac;hflGS5nX-g-Kc@?;@r2c?L2GJ&7G z1aC?BP2D6Ph2&-Sp(b^*-v>$SAtu35V2?)QD=+IWv3P6mWbCQVICf`D<5tL|GcF%r zcVx>as+Z*_u}EmWNxvlFj4yCum0kDgCCvTPD;# z)t#P=QWoNij5x%!5&Qk{(Io6ZgnPBAu@JSvH`ZB`cUR=sfrC>i_=tJjf{KT`$SPpM=gsAMT|wL#jLiY~c=e~f z(+7|*WuKs)C*)i+iXSV#jOE1)0my7^ zWoiFy>n*Y;Y2^d4gz2)|(!5Fuo|c-&{%~Ie8!*Vu z%LT=eidm)964BX#4ox@CEMgpJoPV*P=&Vw7Q~9L9{hcVH0U@k7WsjcpX1NE`o$ z!(pS2_B@d(91Hi<$ykjXnbtwAjY4~tG?uy#$;<5HlGe4eL4|eplBQ2o$7uq$(tM%M z-YQ&_d}I=E31$`D)B1mvW^Fh2aZf3xX+UgU47R{_!MLKbfmxO@s@xY?A?ZW6$%@@6w*tB72RSeLiahyNoC!Nvx zGL9PO((t(Mw+pE@B6}gb^#b}G*t1x}juf*{)YI&eXpZV&(t0pkcS+X@>lSM`y=SX+ z6);QdXxqeQEeqH!XoJYQtyKuN-A3VxYEPiwi*)21D6fCbvVdKmH<}(3_Lqh(adp@dY?6xsWO4 zQivPc!k0qmN9Z@XUpcTwr$D+|IL}b;I}O$xG9Xwm>A8 z^-T_ZL>jT?BUnRNAM|U;F4dQe1EF-Kcqc{N$=U9Bbdl0M0xj5N+0h#qI2m6CU!pT% z_rdP&@FhO~AT{f)ME@aRcF^X}L)?J!Kjp!z`vJMm=frHuVTEpl9P5Kbselx zyhUV?#4ry$g&|!#t(t`pS+w~WRNB4rDGs$tu)n1F#@*dfPhfy?+2fq8HS@*p=?t3j zPYD`=EKQ%A-O+wLj+EACqJKev{!WIz+naQD0crOeDg~yQcg53vnZerw;=12D6SIaf ztNHhe33&u)96-HG^qKs7-GtbDd6c-52|drh8~FD${+P{eh-1Ba5IGUSAX=}#`ghOCr&VEvCz(`pT@;I&jqe655PAb-5b zdl@+bk!0!Zfrt{(;dO3tAmI@bw#K~#c*H+n@r7tPVRYNMiZAjqC_t}B;@7!_fw27~ zY>m4Io}{-^ATCQ`q5YLGv!(s=D!J9PeLkL)!@ziUF0RM+2D3BwgXmc)-ja<6KV^Zz zY~xunn4N>bA$IOiE;t_dq==~CIr|@VFV`J$Fl8hLulqj)E1h)a=HqyiT!7$>V@G1q zaVlLYU2~kuseL_dHkB)m2ISPf0oiZ{l#(gm9gXKOr!UsqC;Q2mS;e(g>;20mg(g(--;CLUb^ue12%);5Y zsZ{_sZ4$7?2kU(Bb|0+w!9V-pOdq`82WR`>-+XY64?gaLbA9kxA8ho&=Y6o*2bcNa z3Lkvg2U~sc4If~WK`r!9Im^C@g-#$K=>w`P_V4e@| z;e&mBa6cc+_rU@mEcC%6e6Yv|kMY4`9~|U^B|bRB2jf1N^ucl;9N~i_eeirAtnk50 zd~l2pUg?8leQ>-FR{G%0K3L_0H9lD9gSY!&y$}A`2WR@={XRI`2mj`SbA0e|ADruh z&-!4af;db&^?4s`2B>=?rY`eCSNNeX`=PCV=o^0MT0it1KeWvc{m2h>{m{?+&<;QJ zD?hZ;5B=T`&6<)fZy$g#6|7LMAG(ttn&*e^;fMD1L-+GT^Zn2QKeW&fJ;Dzy@b7!W=COm^@=NBj?uvO|ABI&>{MR?a%)+*_3#Kj2s) z;1)j(d|a#Eip`9ss_~q)bd{}CeU$c1r0p!|6l%g2<7CI}DyB&fv64!3ZIEcA60M99 z-9w3H$uY$UQKm@~{p1lTXBeNdA zSBjm8!s>D>_W#NVB!V^kZ-{nk+Yk+`wP~zWU9^PzD`p8kF0v>eq=3#;LKN+}T;_s1 z-5+N03aVMm558@57OwSS6copeKs;eJ5h>|hip>mxf!RYqL5bMD^O;YX^(gb<_W*D- z9tqu*4>5}-7B*Vx3pFu6;e~X>z^!X}_+CoflFL}joG1A?8jQJ3c`?dMTJ0==24L9d z@~w1vEtB;zp(s$jdh3WF1*~MJ=J0DwWvs8PpRARIa!D)mb~u4tBRk#vBgl<^Mcr(v zHz%VGGpq>JzfN!(q|TUkU3ecWTbso?;MB_^-UTppKr@phoN0M@(;{;8O739KJU3Ag zvK>_jyM-!HsfoFRd-8O$nvlUPvi~ zc8ZQ-*~Tk)Ku3v~V=4&EB%NOF{J;DRm9$tMTWe`lvI~z-W^33DQbVZZ;WIfDPiYHp z=q=ZC;CNBGd{jkph9SPzLF0-rpEBCAsd3QVatM`CJSZ)6k6$%H6X%-&@3!hsK-5kEOMGgsuE;U!_EhRVa-sCu4?!S zqn)M-C6GxCA5{X1F#oEh1Cz;9Epe>E+Iu{xh0l z5{iMQ5I%I*6oaI>8dx$e4)?tqO#$x&^=t)GOBb*yh7R}$+`Z3OySXTc?`E_zp_R(} z2__r!2Qsrs1`5{NZoD_~J7}gI-mu-2w&xI#Z6}@;96u&Ov6-^LM15B zWJ?vZO);6EyC!phiMFbz+GBNBoMMA+Y>D6d&W@=4>ttG27x_0J>8rgk${7}35$z2W z2m@?2ix`r&wtJD9pVs=O4Cf!f4aSzr@`-FUk2|MspZ>JgN_<*tF*d$?pVnGZTJu^g zbvHdTbPx!!k^YU3sKZ`6iB%1Olwu)EnTjjFj&;g5PKn89Gubn zJUzx69+^!qFd#$@2r;Q;yC2?_)^{;00R>m5)XOccdi`gLBnuC_ljJ2VyWJp+ge0F) zFPUify`lF|*b`XEQt9K-XLlHms+TRM*_qac;%OD&LD%MUBN@amZAc+uxkk<*$kO}! zijar%h?%=}E=!6Zk3`aR#?*x5o#p9H-fx*9ve_3oK;D@Vr8wHf9efY!k5VfZk&Uc- zjg#i)=R)t%xdB;cDkU1};+i9}zMgFDD(iQFq>(9lnxGbU&I!x#Ct;s`*U!}ZLq8lullako<<)t-W;_fC8fy5UtE{V-rj!(@^Z!W1> z9!pgqSY$OU?WG9Tu5$+2rCnzhDx7X{*r^T`cQ$;_T_+I(qE0&5Zs#38!(RX6&m!f* zJtO5^+U~u2k#b=lqPAw0JjkZ;BQiV;M&-frrVf&o%^V?*mP~!fM>h~ zjSm3>Pgx|6?9(Igxro9Dj5#BkWr=5F@E*XJLCFy(P|A^(-}GeDWFaw~))0Iw!kN{C zJxw#>jsx_uG7I3(PMuGR_6%XQJ&&x0H_mka_5|9`3@jWuqKy!DfRQ#6~lX zHNp|v-mpwioBjoq(z+Q;!D*-NwlMrvvClMwnM%+xZIEdnL#3jRBzoNprXn^_)AD&9 zFyc$idC9hf+(a;Bj#cvpg^7~#Ty@HwapGD+o zpYe%cJjzdALWGH^x*(lEy4)*N7se_Uk&P~B=NhQtqpAzUBnoD?@a>9yBbu;ms%pah zw{cMODWxwhr`Li&(t01PVPQftJ_=FL0uf=WmZgYl!THSHs%7plcb=jekxOTBvd|F) z1nbADDrjz*Rbg+EwPy&I@wozU8LGf^VO6N_MioA}lgS&%{;j%rCT!tDjU04lTyHf*; z$s5RiRRclHs0K%;x>W-bahl56;MO#$d?qzetl3}y)qvO;)j-kCL^LFo*gdL&Vn1Yv zGF6Xipy+dmUN?iOx~M^&sKFU|X*D1+#5oIhM`Y{`>Lt$%Y5+<|SBGe*fx&XSr~$B` z26LH7|5-%tq6UKTXd^LNYKfRh4gL%Bjog|Qi^$!n!M};wBA7(UtOl)K)Zj`*ZzX!&45o^z!7yN;H#l|Yv>Ff@0+!dE8nj8C zUDY53gNk%(5e+pkSZz@?xB^%h9<602{bv!`j9=33&RK%-WE(NCl8T3_L3n_0yyoUA z7LmJCgZ+uwQcKM4)!_A8RSl+2rW(l6mbAz6MzuFIU|~XP@F~oUuLdkdR1I2bh%Cvy z2Q|PrI-mwc5MGQAG`GxZ;E=36L%57DyQms4U04muyHSI4C3yqcuWBHO8P(vBN!^VP z4kQt$se=uU88lWtgBtWUScs+?5IYmv$y0>bmxzX>b{JyajSbE+*oG)m^`r*<6}^+_ zbu+#+#DimlU4Q{KDA^&c21JH9=Xa+DMRLxus~WrrgNk&8h=v*%thG@!cnVlhgDmVw zg&Gh!ss{H8#*?|kXsIP)CN-F-xp4zNu!!898vKEnn7%S7nR|mbC#o7$-P%S;Aza4il~fIwF02Oi z-KfE}lDvWJH)8`LW>kY;-Q1lTOq|39Zz^GfThpZS8Pwnc#o``nssXVxsli~_ArLD? zG$d6v+k;qlYOs%D^AHb-GF4A%&~Xci*XF~|2XUr?pHpc@>dR#Yf_MMVVTgl`z}3xt zfe;z};%p$tfU(ryL5-(rOagxxi8Eoi_x8_>wjUs|qrOX|+_o2TURWl`-a|P!pmf*F_?qs76j4qaq>6|WROKC0%vey8 z_86WYVR=I+@1RRaqf}tWbIz&TLGhjPyt08G$72-z&WAwr%4ODlL+=4!>!(QkIVZh?m)|JR z?*=Qx8C``qqpJ{SbQR)^u0ovAB{bTyIiVZg!hn27%Jh0_MYtD2Z@>bfnXs4W9}r_; zRv-Z3Ou%rcT~RnuEHz9q>w$tr9ZXyKxSlOB*D;xe7Lc6@9I9}b77~++7NC!kos9s5 zGl3~UVK`AN^@%i?2o*4yKrn+cv`SE9z7=95)&lm4hgH*FU6!DTeZm53e7<+65C?-O zZ$h#^PNDIm@3E(r;K&+6dCKt5Il`$uIOf1{c!cWZCv~Y}!0i|I=5eW{zy>kvLJRWP;l0f48#ypiV3>@k=+AHf-7R*mf6#U+l-c@c3oG*-o%QS ztnmg{#5ToJ?zoIAVy__>O+}t1tyswXHER@`fvdAvIE`EXMFGjcSi|&kgnR3=Ovfn& z?9NmM;ch4yC3&B0@m4&Ml}ME{Hb3rE=DP1c1Af>Gt0em`;FpmE@@T}v{qrpnS;+!8 z4H7h&K@mYHKsmVuldSD-wX8{S8pvDjZusqWXe-`0-q^{RbJR}PgHcTRp0!!)8a>o< zDC_ZR(U8K@>SftO20lXli`DGe3p=HuD6whx^JW~agj;ooz$~*n9f9{rIswPZ?JuPJ>YE|41A559#BP5Bm=r}IsPVH{BWGuYz_E1JN}W0mP6lX zf=c|kw1rvxBTdSe@$9AWGDLx`d7>0BFEv|`737>A5xPD~2(MwF@sy&JAaa79X9i)0 zxd%_C6iLH}=9#BnNBwDoXLuG?71LHgkDC4yk+>`k2b*yowg(sU7Qw^8W|Dz5fHT_7 z0?Zk}EUg<+fD0L3;0aQkCVhgn!leuex-1#ofDw~K4dwtVj<1Gzjv$WiF~oY$b#JG; zH@%3jzJnP{#U!D48gMc4+PTSjQ6F&5U4AX?f%NdPhTBApyo=R}SHkq+x1wNfkKQ-rgivlYeSU6e=5TJ? zYi{|P+oxlHvTkpS-II-f8S3@|Vv$>YH+6ejES^`ln}0gD+F)*7>NYl%TbH_Bptb)4m`tD)mW>6zle62m zyUCeo7+6*dG%KZc(nWXC$or588gL^}uyco)*kH647NQt}TK!@0^W4R2^}E;OHOWny zgej%JwjP)bpDA;aGEFj3lQ5-8GPBzxhiZ~4O~RB}?xb-PMSLLhfqHSz&_*-Uu2nV? zU~;1ZT{WuCbY^nHbmN&!!GEryQmU(F<65B%ql~5iO8=4YnriDn8to#sGuSS(fr;pP zM5A4ZWm-@SaS*ef`Un41F`2+%Rz|(f>xc=*)I;GugKsWCp~L$O4!ek=vd%r}DlkYV zwnGZv_iEPDVL`wfsX%Zi!RJ>=G;>EZ%M+bRSp*ET*MMc|y4T71j3EX=>*-R!V z*8|JUFI|Z`nCh7}cjmTbOH<5do$1h}n!yCO_WG}Q^kgh#sf!Rrlo*#NgD!eP%)5!2 z2OD}Bv&8H)sAIy7%8neyf=40e>n_D=BNpbK@i5|07sWhZF;m2@Wl%Lbc4#D#;D-rTp@y^MGNrkZ=9%4FFdknp((g|8qM6nS(Ya%I8(V6 zpqo@!mac+xu96^E6V8kd5gg$G1y}?}O>lZ+%>GD#-I3%qCI*R7X4NvCZmq)xl^J=T zv7Fu>QvX!TahQ?dX#phVhBaV$X$E%2YLSn6X0V=9f;;zo&{^V38h$V{{r!JA{(@OI=px`(k{ z;~fdqA$1~J7P7B`6KKa))?8Z%^4P(K>e3y>Qr}_{>;I$e?E|YQuK(c#iAF_ltZ%Vu zjW#Oy(grIUD%L~<@5LL9iWn=}_=ad}jTIEdmy5}Ttm|TY0r3SIUyJnxtdd3%2#QJY zjc8Sp~R~k5sz8PcJkxoeZL-qA{7e4>Sb43CClhU)*s=O`i)%l z$hN^PE%L3SjIrziI$Rf>WQ1%iGYz!@s0gG>(Ne65vIGX*>_g{nLUqm=hAM|!9iXd0 zID3(mvd$&LF{ra-uhbiXisAqFhB`-PrC!-9^>dUuM{ZDO$zG`+q}0ze)H#SN^=Plu z*W5tKBsso=I!mT$Qwy@eZ#B|!LwrIAG9D-bKKnNn4crVwc<_Wq1H{j8t`17 z5q9D?k_UC-|;Wl0L{*>%QFGe!P{oVa>zcOvy zSG5WBSJ=J1hgiiec~+D?cY?3ZYyH} ziTqwx;GL-IHW^FP?O$s)>xSx+o3MU&O-kuEImy)TXMq6gpkY(L+9<-2kW<;~U5$Vd64k=QnZwaaHv0+m0BL%v=4|KT*VOddRr>ai8 zU)UYlsalmfs=+L~Ds3#g65lhR)bmxT*+&^l-5%DRBjp>2EN8-9S8**WIJQ59&uLIX z7qp<%vJ7V>GZgR!V8ID@6AN=IJgBPS5qq@K>QO8r8-=b4GxZB%B78O|DTPL0%Ril> zefE1*=yT5!g;u5l3jGRNrrwwgr782@rO^)Sg))fPspcR zlzF;Vlznr=<0yc^UnGp*)D<(E$#UtNBMQFI9?)^yKSOYy6mQZV;Kpe|%(^)u_e=;R z>luRGNW_`SA56sN2x+%W1{yFKfRJGaI@Z@XouW#rRzC1rz)g!oYN(!_1G;P*3E7Yic^c61|4hy9?DhF+A+Z1H&r;xbYM2t)X=~DeNBHl zP%6GE5V!qNvG{X)uyi6DXHXW#9HN-x6_W)#Oo%qE)tk8(hq;A8$?i55jZwF8@EPJZ zl(B$B4w7|EhipL2=(p z!P7605{yQMHi_B&<(VhBfSNHW5J%yyAIhX4^3wO%I$_h}c;MGgL>fOeVIy)}w8pIB{E ziEI=hV^I68Vm2!#3mDAOFwOqN#0j-NNnOB#(%@qdlx8|*ph}Z@W^HFu#0g-A zsrYqEQRgO>qnkGh07?xZrFq7zA-7{CEigIol_om$he+K^kf+(_WVOdES#09E;Bm%% zN1bx4;mLQu5vA>no<`KQn5&%2aS(Mx-O@<%PzaUhQ=dbA3(jlLXekF$ayOAR&w8_I z=~R#ZajLIiT9<`t6Os`{{aR-TOR+L|Q8$~CJPH8?JZXjwEgUDoRHt~x>Fl!%HLni4 zs>vwR#|&Y2u01TZHDmAlU2!g8ZAn$j0Kc@Ac(;Akqu}uTal*Z-(MuztQ*0rOuMQQomHGUu>wG zC4W$#)GPH8r5;u4EZHmdug??S|7fVolE3UgsOgpZGbjQ97b$g?OdDBlR*E%9$E=`Q zM9hx_<3-8=b2uA_a1c4Th6DOe{0s+CMOA^`r7ZTXbEWPQDyluFhQx!}k02Y0KZ$i8 zJZHRiFRdXB>ng=c5DRPjco?(ybS72ZaK)@wOcpSh-O`u-#VBwO z2eE5~I0$7dAd#`$NPsmcFbs*1u`~y475q+$FJ@2hQvtAA&8LFPT4h$CA`i$Q zhb2tw>;?H01>#4xRUE-8G~T^njLZrOpg9>Tql>VCo_XL8qB8SI$)3{^R++1ijT+if z15~D2RfbldrZUGXRtK@L!jFeh1WF_gb0@`IrI;*WFgwF4v*K*ji~-6+;jCcIQD8QG zR`5Vnm8Nr)D2=if5ZLV?1EtZf5!TX_=48!g?GP*Z2`kMn$%h6^H4F+Z>x)YKSGi~F4Hz-T{ZlF38=+bkrI1A5Mq@Iy|mZ9d+u-=W8 zPD<9Rny(70`C!Tv>-?YvHSbV0XV0W5?`BjCESFeV>c_( zt3VU`84OCwGp5Z7mXA;+|MqxMa%C(akrPp!r-F!$Kr2nj-;p87Vck%2auZha$2cmf zl9Q86B~K^`%9Y;@RLQlX>`FeI!WZ>OH`@ZN9jr(UdTUE zMmzZo0KeTGR_FmbH1~iG6k68A5y^!W`fZLTINy)yH*}E5Molv!iw6`lUolz0U}jnx zW>+9aOroR|I&Cg6V7MyuK_f(=m9c;{icS-SmVww3#?lnJgHs9oG!nJZ686IieIGE4 zLX(qBg`P+ROah=;xbg+-IBY$w-CvsVHnK*NWkkk>E0nUmpqWktN9;v)y`M(n;$Oj9 zC82}3|J~pOdIMazB5p4MNaVB?v>bOC$8n$BK8d|n-PDa(L` zp#Py)Q$arqASUfFm}x=(jY_kSQbN!lm3c+7DjVvlBy7)aK^}sBmgqHb-b<*?*~3sZ z3;T$3D^WQ6r-1kPEyLp5j zPeWZ6_9f_N;q27cok8A{jY^#*(*}-KWP{%#q$B9B6cID%Cw|)3!x==hrV1jy5y2Ut zgDZ{Ms#@_psxB4>qPUZ_{!$u*J*S!#1OxvAr2eS`S^vRvsQZ? z=8Q~0L!;rk{&)hFA z(2n1L1_uGQ2h%}HCisykAKCKh861W#r~3pGO&uIY)@P0k3pow79m)@(PK~IAA)h_e z`Je*i>?V<@?TCD>CYDPNbs8ikGoHU)iZ7jDw~#vH^=DzQ` zMG%jk|JAQi|J(ZgsSsiM`hB?w^(U;~|1dO6>T{%+xu;pbFYCnnAFzI3m9~DbV{oA7 z`auIRSk_3qK5|xiu#SF_W!&Hx`~v6Bx_KBX+Zk5bD`*DJ$hlBiS&_zN{l*MZ>Ug}O z%!kU&rOLtaW*Elqiqj07`3A?+kQANO-~{>2%ulIpPf%^A9I0w+4;8glP6Ca;Q`%y_ zLXDi8rnZ-99&3e)&JU}uR4y~kkRK>@>(_oSN>3z_JRhf zaV86emgl>J{=^T|id({RB4!QC4?7Zqkg!}N9tz7p<{A`^XJN36StrgXyJ;cz zKQ(_YTE!51JZBotq%s~I4Wmx;Q4tF#K(W3Oh`sE;ukf)+SQg5iN#%W=NxhPjUE7>0 zlST6SN)+g&f}?$GRv>HT&ZJ}esvyrVf)>O~uTL`ynMBJ03W{RH*|9&frD#N2todiwInOy09b{zmuIt#d(8g(X( zcW=A!tIaqVFN=uiG5DFGAdZ%6 z-BCkmH#qCkhTmk5)U}dj?q|l%0@3|)>=KAHd6x%ZV}ZF&yOrA!!q;hcqqReHV?pRT z?L(0a6^rTDuQZ~=f1Kz?hi4`F5|S|%{1Vs7;2cvMBAE*@B)sdLW6JdKBe z1+D=YiKI~fMw0yksMZXcx#>{7T}GBXahhn)!^Hn;erAR3$NCjo4N{@j70>-FEnHfe zhDgPM63*o#^u6#QAjWkqJD_w?tAWU#?m;gFEjeG+o{LosKYi6Yp7~B=yH>=32I?=1 ztJZ%xQmTHFMd*KOT)K)}T3N48N|00}c>*zPPuWQ~s>*)D$xyhcb9W1qLTy?PJCAFj zm|aVZnAH*+)LubF-9#g?ZAryp&UNSra%D{?*qS=)G2dipioY;)U%3<)PVgJ$9JZ|K zik!B=r)0){J$ia1`nox1?`|IZ}5?@$We)taJ)VS9ck4Yxh5Fc zOXw-T?{y#dH?`g%zq6s0=cYW&p&*7!HmZ0G61VZ-flAS*=|76S`6yy7u2w7}D^~jN z`Mm)wcxR5gxG)Cfh)+b*Vo0L_DbXp;$_V+17#wEMCe~=r zl+{sFqjigj%bp=il<3w}=k2M;Ifx~r)-7xYXME%`h}DIl8zi|dOD#mT1aYkke$ZOi zdM#;_xe@B^aaKKhf;9Bj0x@;DgFT&!g`)@zCzu0*4g$uPQ5g&f%ZTB=wSGO&b2T?$VN!?y;O+PDZ>4x~cT z1+ zi;d@jjPWoG01K%UPII@YbY^($_}CZ+Dlu->Hz;l|@#Q&w0g{(hWv6+aA+OsTr`%6LcV&)?0hJ`&sIo+xLT4JqQ$klu z{wIl=)iP3TNJY&Tp?t4VGmA%(Pf8VDAR&ZJ)ZFk3n(ows^=JfDR`ddSv63AwT7Xfx&z%ajR)%=-?6BSy%)`$1mF93T3z?(+55|9)!R zJm#RRA?`6qhTZ?fxcMu@T-@aw3dY|nul6wSD zExiQEPOwWgZkDgAh?_q?K;mWzqq9Uce+W`(kP7uWuyEYG!*8giI$5?Fq#kEhl~k90 z2i&`d$tWtTM2gQ*6mc_=JwJ~t1qDsM!$Iq3rJF>8nNH()l!)2aHAM3!5yE`Yzkx0o zEhmYNZw7`ve}KhIf+Zu#GlAhp%cSc?%a`v9*txrgNug+YH9VOgEfXUUE$?1L-fT%l z?1a0~bcmTw&^Lt|lVOY}H6Wjls0YP`6GY2&3f6Q*4o1tBy%axYFpH0w1s}tOR1BMn z84Q(6LS?;Bc~c3?UeUmna(uG^R%}qWXNdUim%ZW^W~nRUDr@)H{c(+h(Hv_j{Q*kE zbLHwl@XR+;IJ_o21r>RHV%M{&St91qJ`y740$kOVikLmwZh6GK`PXQg(^Lp52ysaa zUK{kxWkAGSq*z2&taRsozNkzfVkSNj{m!jK3L@r&=*|{J%-i@x{fL?6Bgy}u zOT91^-6^D(){D}GBIbYim|l=p1$2#=r9@zSA;yP@(w0S%f1%PKOd{!2#M~gvr$@}? zeg%k_tAUS*IU8aD5D_tRQIr7{nFuV*piQD`W9E9*S+|I+vRr)fxMI#ntjrc*Jk+(? zKtz4LqBPUaDy)&s8YN@N)w+ee^%){Ju+4pk$b}&9hse~ONb)=AlZMDbAQ&S58(E(8 zXR+b-bZSx{L|*QP$T(ecuaLqArs>2^8|0^m)=qB}Dl@5O;Sl*+d>!^&Cv2UVk>pq; z#_SX4(g;~bk3ytHzK1Hxe4q$(`YA6B<%gruL`XDdjcN8GW3qtAc;wu^!{W6libX}K zJX=_79m4!3ZIFb;V~Q{s;jTrwba9;C>j)1){V@SCSxE&L_ellU&E<_V#lTaQ2p~)p zge#hP-OM-paG+aM4R`3i|Lb*tGdKC%DU-6rGJ`YMQtFf}Ky#?jY?3J%$lfnxWm|a~ zZ}K_I$%{Xl+F4R9OBb>dl!iG#E0rB$rqYb9Z;I2z4QFi2ue4IGrKXX(3_*%nJ@szk z>12o*6xtbr)3nNv`uG=L@QSQPyoUYrnOI+_upehuMe>V@k**iv-OPQKG3&kXhw7t> zP`PvMz?gj&j6xo9RkEC2 zUO-6>QnJ1s6yt7!wzvTl25)p1L2*HEZWekj-4f3KA&a!aLS8CfOXy(iYylRKDRLsi zI)sm*Z4>sx=+`JC-KW@hwFVPJ&f|04U!qA0^9QfThc#j~7VVIppa-@ePmsT0E1rf4 zVqtp{(&h5@J0Rv9R}9h1 zNxD=l^MIgCKn7Iz^1UD_-0z9%CX5OTV>`|$>|q&|e6crM!CSH)UH&uNV*Y1LbmG=( z(%IYrN)ww*8`Bz>hiQV>I9ytO5OBJ~*0Ahgk#ao*0D`=x{PjJ>Pq@I810?P`aEzi2 zVbNY7MHkCAF=MwmVVnvj@A9)78h?e*PBw(O7zH$leV@50XHg~}>g20^TANC>Qhq-6 zEW99GX}(s6Ru zyafHlB+4P=FV;cJ;4dW0_ZQrRl-6-0W%Yb<4Jg20IJ5$1aeWqgg+#>p4`k5=gyp>M z09Ge-;4d111!PK{$nY1!hw&Hlv-pb*g+YJ84P9w$j)g;jvX>$qy;)8Eg4RA*{Y4eh zgZ^UVgInw`uKYRK?9*SIEhxrc3Y76m3Fx7WHs)gDva}~bY@czce}z;PZv^)+TkAD zqYji1r6=qvUP2MLim&cZxr!P&G9A8@Hj&s8 z9nO`ws%=%$&{3>|vwkuHAXWv~dX{l&e;PmJj`p16EB^4+I@I89KD zfA}T%?A1Sv+m*yJ{X-1t>L11~?bSc9ZE1}vg2WL|NTZB@m=x~5x#}Ngq2b0q+_1CQ z)TA)A`_wQb{Gkq>fQia@md{RT3t3ppvRKq(1+daFrh> zRS7h7)WpB0+)EMr76EXW;BU3kfy_pw2SLk935SBe?X_OXLK7xvy#?WVUskvF{hU<2 z9sF4?JJ}hocc$@0%r^D9ghTcI&A20Gq2BLylJ*vc>pfiSP5SjFv|cw{?-g3F{P>@# zcUG2q*X~c1YmzL~Tc`CFh3nl*>)pYxw?ymh4%hps5L{f!Y*TMEOT8a!y^@7`2WY(m z!}Tr$8v1v}?3A24v7HTa?g`iXxYjE_eQWCFO|4-$U$6B_7V5pdK;%3(OzlyvcW=Mm z2CX+I%;6PU?<{7Udgo@TcTcTXvQY0{Qt#|WQX4wE1Jz@7i5}8f!r#Fe+!Ax{;DjmG zHjNK@%xLZEo^g@2El8@m!6Xg(#<{V&AB)}5i8L(eY=}Mr8ZKUdcaw*QqIiA4dmZ#m zdE2x{&6_joeB>=NZ(Q$+>@@9sydsrESvSC8wF)wK5|U9-bjq?>h-=R*A6tG_`Pt=v zIO};_T2e=RFf_G{a}@nihp1j#j>IC)=q&T|4%`A|e%}Ell9FK?kd}&y9lwgFo^xy10l4oU-G`vp>Q%B>e=~p`QT1El^VXk`zW+V9H%6 znI8*Z1}4ev>;1|@e5m}&19mI%D_WKxd6*_MKeGp~szAQ;jN$^IllR(PKSOl-T&=cWkraMA{nPbeq zkgj7)&odljF4j5g=8iS;viV-#(oFUifJG`@YTT?*}eR zy&GA^ZIw?jhJP6|9%Rpjsl=^n?{_o0Uc7ztW`DjW{5-M3r{~-~7i@+ZJ6yu#ukGN1AB*-Lbxy)AeOV_T zQTiHo%sn^4-};XmesJHWRhuU4C|`;`a`)OE9WS4U zYJlT!s^Xie2s&9E9t;kViJUS2hDHbq-yORyTZIYI&v*P{v%5d^$t@tXCm{Kqy}S7x zF|4PB7;b$%C5Er3H$1uxE!dV+{N{oAoDIFns6ac z-sef7>k<$)ar|m3(@Ug;JxU5RO@hY6EDD>5*@ZoCTTj>}!10C6w?2eD09tMwL)4aU zy%hGak2phkf9^@7uV07|>bUHF-Ln|Ar?gO+ZTxTwMcjhDgYn!pe%7NA--yJn zE_B|w2HdYjvx6a)^LWi8ETv1rFj+a%gP+{mCuY3{Bjz_F9Mcg)E>Br+gqNGX9!XvS z6$w#04@ofB1O2Opsyp$15{@cFal!_6dp?*jk&Ito<(AY}cPve|Nce4YFNjdRG{3hCOd;&ZsD$!sMqm%c`C7J9Ol%IZy$)>lPS40L;(7TT znbx}f(KYxQOIrM%g+=h8LO4?Q99Z`KduqUMQ1^KJKG+t39SYoemAXfkTZ{D`SzL97 zZ&cr5V61yx?~%opV{Eef3@OIeB29+W@GT+rKnAH>k%CUz9(y5hzN8KJ$ezSbI~ASA zO%bai$tWbB)k%;E9nhk7Gc}Ka=1sMx!7?U_HRWy{uqNzu;D!c!N)5f3 zZSkvkh8^8XO8-Lt`~4ZRfd7L%oe$na)J{ecx`NkLo1;IK>>9n|SL|sdnE8KUPrDBA z?WsH65&HIYo-Dt%YBR*=J*C!yKY!x1=-k)RLZ|9;gZF zju&)8bm;Sux~Cz1{Jx0UareL2&r2AX)9vSJr2HiN;W9}&sdJEmPT?MWV?P(Nll}<% zxzx9xuu=SN2Vb4-W@;O4ha2%T#^SmFswJ1oo9#2Yp@u5(wCqj?0Wng8}7C z+EDd7up#l^S;M~nYdl$p$Sy14KO(~zl=TCKH-U z=rTgr5W0}iUkIH=Xgr})37t(SMrbslV+f5RG=xwYp@Rt>L1hY;GG(0+t=AXG?b zD?&RG`kMdxQr)9~-PMqdvNwD4=L)egjTnGj>Hk(B zFp5}AM7GtPKtw`Bypo9b8JxTDbZ78igQTo>>&-L4ME1Ye)=LSay!8LMQ-N1s3h+M<`3?9HTJrKt8m5%&$$frVI? z<=xf%_YwY)nYa1!CjWKt-%BR>c{~lB58Nv}qV6@M_%|Znhkv9f`o4fosNugyq@uO% zd3?E!h*t$E9g?DSnD!dKpSZiB@etB1_TY93OV9oeMUh^@^urXEUOYfyt~$GKL&LnX zuPQ7!&nYZ)niQ@f9qK>(_9HNea-w)|k=fVy?`5V!D%2nM1NO;X{P(6ZC4?U)ic!7U z0r(bE)Y3G;y;fmbUD6QZU`rB_OZu2Q&>jiElUF@(<{488vvs)w`^;vO2PZ{I*Y zZyFqyTdr_|@FIn|pzb~qp!12=)6Voekxqf!f~Wf^m~>Ywr_zFjirGobd4SpF7n%}( zkmqXmbUZzYoIon$NabGs8>U%8szyn11>HU0klIT~taWz@kjM|f-$7qk<>6XZ=nM?d z*~`#*L-5wRFB|-2czP@~o4olZZ*Cy(9+US*pvmnDa|POc7BK6d06h0*Fy_9`_FAe0MX9({}!bgzOW&C#t|53hfKV-05zWH7yupUpZ zMW0IATK5+u2yOlJYspJcMYcnl`vQ@X@k$_L9jQKQGM>_mNi4C78Mm2?y8{_Zm~k~T zjGg>hVTL&Fxe802V*r!a6E#hQaje2Q#JvK9KtpgZCei&hQw;tLChWolOsV6FDOQ^k zpmum5eIQ{NMiC3X^PP~E0xJk3qb-oO5U|^1G9C|P(0>y7Z6oJfk*9;tEbS&??pb1) zp{fQLY~^^RBCQ!q7*s|F(xL{h%;c5g>HbC$gy@AJx}*rJ4DOEpT?&jp|F%b2r&8I zFOa_*Vbro?Ag!Ol`{ow8e?4yc)2?XSqylakI3@__kQ!dn@^(|D~R}_cD-=0SYhGd9|{Wx4-v*_bv6pScgo93_hg~C z$+hIY%`G=IT%`r8D9{QB!L3)RR5D>06Ry;R`Aj&N3F9<@$NSu$1E$;$0~tO)`!5ha z!9Q?a$d1{=WW2_Vmo=k>89B&szg1GwoO#HkPW?@hL`Uv61z6PzscNk|Es#Gskbfy) z_wOVK>7UCCYmP&TW-#2_AnwYivE${MFADZO`atJJPp$)j`p9tVj0POx+85vK+^O$g*wzh@| zhXHmk(S%7%*p~@oH9_hxQF>BOvBJ{HtJX`K1RkL2^T}_H!qO*Skq$i4qlzmu8Wk4Y zH?$tXZ8vqYZO;ba`2qL=!0h*X@Kg_W%39%~ku^_MOtFEh1Ev0~a3yi})7obd9;4}^ zlA{#vpa^~kn6+$Fm6dXzDJ=Q#17`kPf&3Q(`HK~nI@YW3>sjAJnlAO-rEn3`rz*TrG?ZOi(oKFV!Mj7V4Y4TnP zoxujatMDWub|{=+`Z9$}nBHt~nEt531DW2aa3Rx2vwloCZ_>0*<|h;`VE*B19r=U@ z8r*8lUq#%}gi$`OY28dKQ@Ds_zWNgR_9B<*0}XDG<_~248_Y-f0h-oByc~nqqq2-L z?Q?}i-kl1Ig1kW(o%^z;<&b8J!gE<}g4QqP#sj9(RBKv2^KVz&5|)c<`h2Eeqv_&T zY80+!`uPfXQb7t8U&;F|g7N|O=uoOKykUm&p5!VGuK1y+& zp>P@L?4UShOwUzVOy4Ox1Jn`Stbm8GOg-eK&r#MxFzg95{{$l~04>etI z-c(rVELT{%;9A9(vX>Etid>{=MXZ0e;?}SmpHP0IOus;38OQEaxRU7=3MYs^Md50u z4^dc#rHd4nI`>xCCH}fKsse1&Ckjs@yhnid9TaXL&0K|Z2rp24k?X?>i(F?bEUx7i zg=Y~zsc-?|s{%Ne1aQt#cszNSp?uCFY!QZ>$~9kjnW*`5*$zwPC-oey@O-9=eC{1jj0^r6CHgEJH-L7e59E^@k7)1{A} z&~$Mb4=OBuc&EamD;0__IOPgU{|`}Eba7AsXYT;cP6|&V&m)xPeDY)DBR24bEA6Uh zx{;5>2)io2@M+{DJbbS*5qu*bk;8*ZN91#-!s0g^g+(S)0(DJPSjt`$z&R^`bF#w1 zLlX5eE;%|-Zm_~tJzl}rgkrq3eIMG6a@vjDT6T@_dQY}){QwBkzoV1+A5-J ze>fbU7NEIppx*C4k#>qs8T}D{s6X(7HLUmFiX;5IqHvUQ$yFR_>wi_IVk-|Tj>u!S z!qQ*27&@exR9JN9X_Td$o=_UnhYu1)8_!a_25Gh9~;G*Zw0i5nXvfVUgR@gstbh1})^rlIOd6 z7V9&yM4vaB=hiLyexG@MZJtLg)%2)&-fhxPGVk}BXR~=eXP({Wxyd|_c~0q^W}c6l z=K}LAd|vbK27b(Xp=&JOV^&+&=O+CO!}o+26z?A|>2qeg;_d#5zTftYJ}Z{#^ISY* z*0Qea<-M)zb(6l{Jhw6QYVc%zxvl#At2|%m+NVw5Czxj~>Z2S_T&~ai@g)5h%=3a5 zHNDk5ha0~40KaV4Ir41l`cj_DaGtvZhjH-x0P}H0O4s+G;&10_h&t1nvESvi^y3y$ z>yt?GpZpr{OlwCrk1wo`Mh+6(bks<49`Jy$9JnpOCEm>}l#aJPaz^{a>q!WB9R^Pd zWZ*5Gc#@&F%A>~u8F=jz_aVKrgfl^noUwF$q^gBK!RMYUYT4%g-aJn+&r#-iES}>U z(C|;nPcI)+ep=jltOome%156XwZ4r``MjRZxAT7KaQFXX)yT4Qk}ddo)_xRA-U;tT zaIWFnvI^_VSlfu4CvRL(9S5xo8N4ntp52k;sfX!}BrMWxe=dHlCZDf}0NWff9En z7ew2Jb1w;`lYR$7f8_Igeg_b^Q6gu_0Q@5fvk}g1d!HTyzmk|<_w$CBbyUQ zER+7WRs{~lxNG0R%ED*-_X+-CJfJMx*^(24v$gKWA89L&qWr-5Z}7yv_Mkmn(ojAcTfk3^ z)_y)7maxZ-w1i6%Y6jDxJuznl^&d(iJK>zX_e)_EP<>+w^Mvma1!jMqXD4W(XAJDxq0IDT`xe`Xh+QY zAg=qoQybgoiC$ihdqegK-wJXsv+UDxZOHYwHsl*#8-kNXfeRjQ6olhTu`nNXYI5CO z-~JrOU9ZQrAvgjOxL)K`C4}R>*W=odEQE$BA>3V|gkJtPru&uPnHseOFLJ#1cN84j z>=N5~wzw0|boUoYa@S$ZR-CAJ&^Kg?zZM)Vk5j(aze}#4E$$I==~lZLJjB}SBo825 zUr5w%5&@znFXXL&3V;Ia7yiZUr-a`q4*TvCUHa)(ZucrQ$5!2_b&Jw8ceVS`73(&~ zhhCDHcviq9qqWP%2P}|6xeLz1oxy(%{AcsuRQ!wD*Ven&0$_YM1T9-q1}@ymB15&a zv3=PYjH?vJwdN)v^c8JG?EN6p#`v)xp6TO5$R4(x7PN=Ev)RL|L;fGw!%(${^?%s{ zd*~0`EwzVdr6aT2!{f~A%O36!1m7O=R%W+{Y9-XWJ;any${wD5D_{@v@Uq4BaDhVQuc5ubfq_Y7(jA;*~3So^Z$!IbiUb_J-mMTy3LCV(SOwE1wh>rQKDf^kd@ginzln{#N zy3D9C<@14%AqI}lS_z?ed`riUIsK9DKYgH!Q>)Cx=336ore2fbdr{ezeIQ zM2w!=YB^lwtY5#rtybbDfd-_~3R2K6DX1_7?L`PfO~uepBX!@Qt_o*lejMBFgW=8Y zV6MX2fLjgoV_>5^u~6cLNb(oVaN@-UG5d&^wK8tCvs`S@rqsyw6=J3e>!X-`9FQ<9 zl&8j?7m*`}HwX2^@%t+`ERf3M#i(7Vq)03k#N&7e((|!F-&PF$HgZD;lmekdzFldRL@+H=h2LsLm~AM3pn7xKulE*I2z!dYaj|bWuz9M4c%N zn>fA+X{DKJ(pcei3|g>}hk0=O9kEko%M-yu-kp!zNg;^ut*CWbA~tBFhmZRn4~$#u z^l<5`@ z?IJ3qMn;k!qc1p*i0mY4h6l9>5h2w#Fc_KhSBqL;_$IPWb=&En8nsStG0eoAWGAt} zqNd=am8V97feH;m`#TMGw7Cb8!{7Cy8+3^n_@dT=`QR@lkr^V9n;;Qh(n}%XdU_(L zd}3BBFPGmVI%qv)gi?a;z~F&EI4qzmQH|&VV#8mDrF5)hPAEp`reYuTgS+o*z0%rK zSZ=S#-Be*6Ud(2vB*WPPzo^?0PF=(8 z^zM$~W&-pWqwRhuwuGL@jbK#sV)i($u8G6*ZBL29sct3D74AZ+C|0F5&|_n}#u|C9 za6dNBcjUR+z0Kg+@?7mcZ}1o5>0V(HAIA&#evn^QHiwyK$jc`87AdjXweW24&hL8u zH|{JbfCiPH7PG$R6n4tz6g-^odDqeTF6c(1)cIC#jdUk(&Epis)||I4Vf=|ZbXxGh z?*nlT`yglmME7^Aoh5myuuWWm_~C>%fo)5q{BR=WhZ8A3oDe?@pXN@J?j)SM65?ktWHXaf31IxZ=i0@>LLv+LQB9 zDtF>o7^c?PUHd5xI;MVksI0VGFEUsNvY?|#U*qIXq=~Bp0n}1+>-portlyoa*X$_% z2a_E9<_1zLaHm3J`V0`iVM0EfQ!zpvhbCb0pb=dP$CQ4p>VH^$E~ekZrB+U1vR$|E z0?=BwfgDj0Cy>X5Mffk$v3HL;#}`D0KF3K@`uzqil7A&CSt)ldcD~GTi<~W8i{bt9 zQr3&!r5ubaZ@0@^^R zLT46L;8I70eF^w!tvUK*i~dB%cB~ z^w>ZY$wgm@87|OzdsB#}ek@{``vF>$p%5wExdW)85ok?%=x`wGbxti|=eMx)JF|8E zw#1UoFJXW7*7=X48q@imSv!BZu-tp+za;s7=YRYnT-s3aLy_dpk|{n#Rs`GBVSwMr zPU2%Q`ilQbTaVFk?--DdiX`~+5I4Ic>cdi|Oj zn??&8GpK(ev^3osj!e_n!%Nwp>3uGpjoR6<__bn0EOZO*UMt7~uE>;EYEi1TA4C%- z59}I_BA)-o0usv&e%(D6Lm5VPb$)>*g%zT6a7kzdJXQLveTvYWLH{T=pVoqlm}w0! zq+U1DhXj<`xH0ix#$Q2l?pd#r3Gr}gyzijA=XWJ}gPXmGB6B-mA@U+hz+u8?s-)T*keHEc{8T!1*;HyVBhYPxngXP(LDk8O6#mVD~c^ z-@JsxA*R@&c)9}xqs=Wc&qDJoz>{R!QNr|_`yDIYgnvk2FSw6rCUwpIK+{E7w;`Q{ zU(z&Dmg|X&F9H)6=*s=Wcs7jlSLXxkaWU(M^3%#k$DIdDP|K-t>thV@b0K96@d>Py z>DazRTtME6=XXTR^$Eg}SX=*MU7BWSwe$r+1lvshfi)m4k3_>j`|saSpJ*At3)|3|37`w~Ag??@b7&yNoMdg2EVOhW80-C*hY zAQ;IK!KAMfM=@hmDN}QY1q7cv8trAPj?3F8+ z(VV~NN--ei*>8~V+}573o0K$1Ii@CC%b-RZbebG_5QK`_#EN=@RM?Iw&* ze6xL|?q;MUzUd#SdkDbbmc%!C6E5WaNXako?r9=Gjv=_-O0*aw2hAcpZyUV4>uTtI z!{Ciw=fX5`7`qNwJRMz-_vjB?hj@_z_ueo6ck`Ni(7#N7_P~gjzU$VTHm|8O>DT!9 zJLGKI+;tHM2kF)M<=*Ru{221xC)?pDUR{oV%kd9A_RWsg3GlZk;0)U9NwQ3Oh52=TI;?CS^ z8a{r86seeZtVKrT!$?lFO}?$+Tz*R)JH4*h_f>Vp-YkUa^j7*wt*)5v%7f{zJeac) z59Yd%2Xke}gNp$7U*8{}=IV-z1Mon=TzO*pApy7~03RNJO9Sw*09+P;j|;%j02~j% zl>vBk0ImwaV*_w?06sqej}O3qB8HT}1ur zKW>C2<{r8EZI+y}rVM^>@aCuSKj)eAn1qjE&X8B$hX%pDkHbYlsG5PsXQ0mKbMkC^ z2Di1a=I2@SpH1FOaL1SfHR;L_)Ud1oh*%)#hj2;kVCVwcK_vP2ed)8G75(G=e>aJS za_yK^vJm0<2Ig&m(LrBNS=x*YxLRJvr!_^L3xZ!C;i8U%hjkvwficIOh1Ia93Tus- z^2ySqu8XpUR*9N(L5XIU7P|31CzI|iLdETZNb(@0$DJn&NZoppmEgXMNZo-<=IZ8b zUW^&HW~{=)J#7iQ#BSjth%HID<%$Run1tXS35Yma+Y>2QJ|O&BqDH0-!A&S}r?yiT zPg(*4ZHg7^5oSh`bBoC4Lh=!<{TdUek8n3f-0I+kZlLau7v6Aj^WN-a%wWF8b$~w; z98RP~5Z?6p+F$BqNyvs5dVj|no!jzRqb>YK+k{gJG(2Z6B{HW?wSv!jdzm(G3rx(M z&=xj~bEoR?;P~M_Am&e;%fcbfJNq$T=1%za8|?>&{UFomI~SoZY)!T$nHN`K2}F7U zvMM)2wCRnW)C}YtsaJ$Ccq1pe?AqV9h_2IjV26SOZ2dywP@g z@CSW|4U|zmtu^fBzBS*Olrcxzkj(%p^CQXE)dDI(8x|l{&;tIR)dEfhx3GX5UdX*= z7C_XHd~7Eo8RGX%yF^ghzi!YOHfMfG=XixS(eWnIu- z63V8%ONcDmEBJkAZv!w@dr2#+_Kqj9KD2jNno6(Q`_3~zReN2zeoN%zYwtBM2B>OK zds}+f-bNu2(q7_6lGpmwQQ-hA)g%|BIp=Sn|{e0=R)3?&L_Z+q|BTP-9)+Dm-=&WukztM;BR`L$FQ z(caVknN@pvzj}u6Z9$0JF)Fr z)&=b)p={dwCnAgX3Vt8jdo(ardr2#+_UQ9{lkKvs=YH1pBwr3+M5Go z2x+hE)65<@R0xTX_7Wc>pHDrj_BKg=EtN&I_l~EsYHwpU?d77}7HV%%SbH749xmj2 ziz3NCF*Zf)%!w;vXI4S&Jx)T_aO}JXxMl2&KHWm?C2C~aws4?~oms9o?PX>pxs12_ zA$A5Is=XVxM*_7sK^h_7JDu2wdx?#>7Ze!x;*m{z&mgkKy~OWBdk+GpiF-*hTin}E z`0QPKTP43Y?Y;LOKUI5gK>TFn<7@A+Foux!uIk5f}^W^87ap8@I zM3LK{LpWc8NwF#I?=jvwGJUjCVC@IJijrX|ZXrprq~hnK@NXtQ zqGV)0vmNMR>OQvvZMS45+g=3_#jR_M3Y5>d%a+Ky8?EG(l^`2u$< zryyme>+#JNyQ_CVvDUKfu$q7$kyYu&tuM_?E1K6y)v+)?)Jav|s~b@NB2L|q)XsU< z&*oEo9m2e5)wwETqJkec!rJi3xKiI?!ND83uV8zQeR{8cN(K{vXk-u{ik~D5e+Irz zI<{7X$=#|K&ZEX*TP|i=+SA*Vx5^$|>`YJ1v?A00##O$Q#+8O8w_iZx>@sAVZvSHf zectjv^!cN_w$`zWq|YZeM7j1lIgzcmuO_BFIpKBn_peG ziVja;7kd2`^>BX{;NmxX*Z<=u~k`!YXkUq0WT6g}@z7M4y-XnH1$Y_Bg{7?u4Gefcy4CC4t) zzUboD0qNGOPr{Y^5(hflowVw)fJP$4k-SE$GX3Ej?-5KCN{p z7LdN&HcMY#_+qdxWff$?qtcgzd$G=^cg)h4mvBr9^<_N}+0LwexvOd=R$0)OD;^2= z<+fS-awQ#jsxPDAzI;}cNBS~ukB^Gy{Ve;U*&LvjC$6QBzkx)ZAqT(Aj~r{=C!tq3 z-cmOlwu0o*CixysPTmBu$DP|6xuoShS;GfDc#58MAh@YU0%DaCrqkuaxCQgJ0z6vP z`QwYhuijvn*9w*3o}2#oQrA{XsvIInLCYY$1@l6rUzL`QAODcO53N$;Hj0cy+11!u z%eqrbqRupa4~$M}E5ajYecM_~p8BWn?R*IO61Qb|uW;rJB&NNX1uCrfWPK{JNEUM= z$qB%W9krM!k-D=PXIb*=E?6gCZq@_=zK-?E^^xC8OU;U(>2RQT+8+EsFSz6l}aQ2sq0L%*GqpQkbWiXR@ck>STVM6Ziu$i*NSm9 zaJaojmYKSyeeT`c+}BEU@fOhi7Mdb-xj@qO#g`skw4YYgN0hb$sVi1%g5eouvfu?@ zCUqNySRG9Vrri6A`9js!KHGn*$7c=#qpqbmF=)O+^g(yb7SKJ^qsu9F*CX8?UCvVK z&P8u`y?_X6Tmv`qyb6DWb?;^wfM>VJV@7Zl+(c9*GX5%p#(J1vNyw!UfxqG)v?9qy z__lHrMZpb~ft5(I7Kw~h%_1rWuGALiYG&!$Ezm1D{Zz$BpO3|@M~XP*VHopP`hvYX z04|;m+2c>tm<}!#%>A7X*XY~spLufmU*s?lJ{q56L{MHmeahk2>2k;sIs7aohZZ`O zpd8kU^OtWUkVApWfu%C!&?){XiyZp@{m0~R11ffHaL z4(p@K4Nt0&-{(eYgvWDu+Z^4p%Zu*B2oNS)S50#0)t!_9lnVANnylY>-Kr zEKPP@zQ&USzIby49sIg{@#e>F?D=tqytf470=&yOOMWlnnTZ$@+@1#!T;q#(2cFNs z4`TosGGQ?XkT#qub+2MFrUOi#{hUUF7nmXr)U~S$#}gPLc02$&*sqB3%I%N-b2l@= zIl8?CpU$7WpAA{*zJ!t(*AXiw=nI@DEBOeq(pysUs{ApSCk=Pykd9(V3eV1~(#h*}E?mYW}=FF$1r7j3LI_rrs7 zbIv%s>+dMvULD;H zG(@0$bRG7N8pBq==8mz2_wu>c-2im=ar5#iUq0l&@9v}Rt#ofkj=KW?U@x;4!Cq<` zwcKQ8Gy#=F9%qq@43T^Jau5Ig(-1jHa@ySE4aW7%yc#bcI-Ep*Wr!YtH1{0jlIR)4 z8qI%~f{weLDSWKS{>GF!j2MUVUzBJco8&zW+P}>E_Qcql|Mn%?vnKf~ys)xI@$PQJ zoR3V(+kBZ}&|cw7JO6!5wjr3wlGEm1ZZIBU=0kWvn=XA8ZTkIRmGUV_a~!#hUoWrx~0e^(Gp^(8ezvN4odHIkp-$@;t+&Az-VZCgg%LHkY=#TqP zdBOfidE$Ot!zks$LjRQa58Rva?%v3M46!#7@WeY zLC~PPCgm29S@imL-g{AQm2|3*iNjdcc-SS$jgjQl4ECJ7t>7d^u$Z9i z6HLnXN1UEbOE(@<$9SVe)b$f9q#GvCn zHXh?l7n)**|I{|@Yb6LgY9ZQ=BF`2xBr`>#*n3>uK-%ZM!=fp^mLp(_%-ctIVt+I7 zy*P#k=je~G-Z|$u+-3p-?vdxN$BtEjIUpj$k2L*57e$`K!cFaSTVdJ%nr+MK&_m-u z4cq1B{R_WU7D=83Mz|x1p9!!jjR$okU`ZX%DFDORkG4(pycf~mtZmqKk-7sQiDB5# z{Hv|-#D3C!Lb=I-K*|_raPmHJ9(LkI~c202m^6wR910# zYJy&`BWx*Dv2o4Xu4CS(S*11 z)UNfh;DeVT2t8jE{I0Uu6ULuVvg;bMy-$Lq25;_a#an8BI5y)KQN&QQppF%@<>-tS zmWeqt2MW$xX?CzH#o!4Hxb4J^B)4W!IidxjwM%XABhZE}US7kEq(##Hf0_0pNd5?G zcZ({)R#*VUBvp68EciiNaHtiFp=Z7vE`hdD4nh~dF$Y3C8eq2ErM2q_25SbzE{!4> zOs($CR|7GthEVuxc7}oQhgVm+yWPR_wX$Ak^~eLmfsri}Wo8`Jfm9Ua0kvL(%%R2-Df>>QJk*YXGsN#ZN+}ffoM_3hB3a z0ou{}~U|>s{ya{-rCf|4f zn!FvFOq0JBa}qu;q)j5ydF3{9uLct>qa)?__ zguuCGbG9*idJp8#nw!&Ahf28)5GBds_@`cV2_p-)7LL#x6Xlw!?ymDTDSn|E6a9ue zfG{k+7>ttEci_a`q`D^vYnbpU|K$S3z2|nulsMlaoa6pyqN02q(%mK6fl`k8>%Mv` z{yl6^mh$Bu^YRp4px%O7Pt^O7kbYi;k(XRUqDNL_Bb}w*&PtY0z;@ng$HR9<(x&yui=KLbuX7B08u$KD0e@ z!__*ZNZHyJu$e3(nc!$@OL?u`E+fi7q73TBr&yu!KE}*cFGb73C!bH3^0TGSY)8sk zb|*-*N^pkLxT*%VE!C`7Q~lFzMZxL|fh>fa<{b8RXyBj9ZymO(hjk}0ygHmIMS_pB z_T2M=&`p=JQeoK)6y?npsp7Y=fM2`}Z*fZ^`fSDdt+B#Bz?6;;`Y3MQ%Obdn#-q{v z%FFr}Te^9xcw}Br6uZ)BCw!V2FJ+Cb-iAGNBi01yw zlZgGG^t_|D)>wB|qmbX-J+YzhPQ0Ki4)Sbw)XDq05WWDDu4ruw6dop?9^xf{iO=}? z+{soPb%qz3sY2@O(1mQ2jf*RAH%i`3a8G1+=(`hQVe1(rl#U8l@N-GtCVEufF++cb zkI6hn$XFL-Hg}NQe;WUiW-GTzt+U0PY_;31w(5EWQqlO!OlyMX6_+AnUU9nwa^$bt zrW^Q>0E)FKD#vp9W7V{T_@IsM{x;pOT4<2;z0w9vx87#w$E_kDma$xJXG&H{!Behz z?Sws}2pGgqp6vYOe*N{1Ct6*9zw=x?xf;``uI(m zUaB?Q3uqm@mdGAtX+ zYlTQr>cK)0^a%?N?$5X8W)u}+FaB{iVbgRV4!8t+VC!IBNyX4j6aUn;2PLNQHS~O^ z-F^ley-xHzW^Emo!4jE|Ae(xQd3c7N^D75_@L2SGZwqtB+SY-nCsNmd;3a&>XNXYy zUAU5M#DP=SPd$RVegSkHBG}xq<_R-zSd#Kxn2xIZ;__=``bm_&HA6bbIio1mPcev-W=x|imfUck5me->%lKErGMgF_PCQ9WE5ol!4t>sA?B;oTxlC-m*L zIK?Wjr*9LVwJhMb(6^%Nwl*QycWXr|+Vz2PPiNvOEqhEvifegv!d60gs zpM?Ws;vMah*dVNY%S$|WY1}!dAnH`*$9Wdr419-{h{TfxGzXAFy`=<)I_!f;l4jQR zRX^iLpZ4yq0$NnimPT&i?I8?j->;|Ja3q}`78h9LBHW+@5SpI3guIa)@mFp*BH7Y| z(OS1GV1+5aQG|I^%-*FK$1i= zvGF(H+8-u0>+RUV$3fV^3@!ul!WMJca4;(=9Qd~ki%X)`4@PjXq)ID z+J@@_x^7@f=Ac!#2r7#{=Md@+yO9&FGVGRbxoT(kSI}2?D0cm%)_>eRnKUk$THi2Z zZfrI)Co>szb@tVD&w4JX zU7FW;1nzWc%k68n8nvg$Y!x2nU`i1_0wEq+Y^$S(;xeVvvMP%lN8HoynD$Y4_WCBcCHWeU@{#A{NEe2`xfOe&|Z20X+E zOMLKfALJzp26vbbmigduJ{a}ExDQtP;AkJ@tJV?LIim2k-I0Mjw2@2j}|Wqdqv_2cPo6W*>ak2U~n_ znGd%6;LASP;e&7Z;3^+{*9SX&@IOB2`rzk2*zJQ~`CyL^e&>TZQ&OVt?}Pb1IKT%B zd~g>ZEcC&>e6Yv|i+ymQ4<6!!B|dn#50?7iFdr=Q!Q*@|>Vt6~tn|UrK3L_0V|}pN z2haDx@jmz`AFT1g2|hT<2e0wLgb&vFV7(9CjI!@o^{kxRZR`oBY%UKlOGW zeAEZ$`(Qgj)AKL;sT~5!;KJLAr#Y9{UU-`I`9((--7{Q6k&^C%$~_d6kC@`(Rs3(4^HKrtFflK zI0u=ni5&P(1pnQE)rTCkc7 zCaXjSDMbAA2v!pB2U^T&N+7T+ub)ZG5rf}BnS^Yw!WZZT4J>9apos9ZH94{qELmu1 zQKr!;t*a3DwLi|ZwDO}~}x0Kw3P zSu+L0nJ$N=E$$PT6{h%QqmXJKDSM_Orc-PK2DoZwoz_f26y_3PQZtFDZ3K^k?<}5` zg~#zy=ky}<3n(fEnX54rRq1k56W;;ty@$F{|B@o$fGA55-OM&s4|Ab1IXE#uMbwlYcj6?5-H3HprD^kJAPI|P zRaARWi#jJ2rJ)=vD581KNUhd%){1slqm&z;%sAfV8grQ69F*jJWl#||3tM3zjWTdw z-dZPDKlPwBC3uJQsl#!r3+<34c8_sPqg`@?Q(r{PGY!Nm>GX5ozmf;U`1xs-+>9ou zdSEkENvc1_&jCNSwi0W32bzYbwYF4nnxutTwmxwpXEsUgVcd29bPx0bCfXu;=g^3t z@*@Vz6-=~0BX5)uQ&n42&^3pAd1()Ni03e1fVLQ5Ee#Ng(tR%~xkzt7?vZlp<`GlnaIe>Xv7htktv2r(hYq*yVndsM;l)rFK~Z>7`7DyXo)_ zyQCuaZI?t0*d^)ob4Q|CTWpt6@?$TSR`zC>JKs&5CTSt;vgxukyIexEgbSh`qGxkp zS|bLxQ1ZNaIQ6NKOLUZ%43x-J>oWFOgpdE@Ecp?F)|5FWL_W!UDdZJk$7YzLjI_O* z<9&BYZ>0^SGQQ!a7s;AIUjlO^?`ir!8rd+Ve5TtoIrfJ2|4?FBGZkY-!Za|o{@YIp z6et0XZ(#!O+$jVUL;OM-2lJWjEG|?6*(?wrdTD_P^gS36#6B(XWWk?W59z`JDP+nt zn{91Y%m&3|f&Y)aHvx~TO8Uk-U^L^_!4<`ksEmTB13GFlxFoFIm}p!EaL=fNsH4UO zjC&;MkZao*7a}u`G0u$2xWB@A>jP>09@lI(6#Qsj9P|YcRDQU9ICQz+`*kvO=aJdP?h9=(EDfRHxL|@e+_U zDudQhbir?h#{xyn4=3SKd(6?Xz(Z-;YQyPT$N$WQ%+v~5EZHbJX9_CMZH0Aeh15T3 z8b5S-6EuR1?q|PUPVS`vOr-TRYF{#|s20E&Sc>?v=Mbk^3IGH9%_SZKYZU`acrxdE z0v-b@m)$dwfao7j*gWNG+EN=bGY-gn`$qHmH{}R9>@1)A)oGyYvhUK0wjUxFgY2M)l&IWfJAsCUf&x#(+B@v{-9MEaQ%J zw)xHNz_98SBh@xPWOS^xR0*WB%~zFxVx-#U6G~u;5=dv8cMC>CquQn*inN?I+uv&~ z2R`hm10sfPQqnFHO0997Iy=eUa$Pl;MBu126B#Y0KK`Ck;!*HfS z4a8^WaOjCY7C%07d-E2bnQNe=cGOYHtpxuGA?;m9BdV!IbU4~zuzJ1I5W&b+z=G?T z$K9jD8;I;zt1oU9I!(lg*AP*aG?F6nzbj^oVlsijbX-t^w~NVJyoQ*9f>ns>6M0)# z3wkX4yD?O-GB|VnECgAr?4?q9=(4y5&v%0a6qQUsZe+{jy{a{uIU}09gtA~(uKw{Q za6yr_lo}hT2LcWWDXN)i$Yz z)<)L6HCxJ_X7%_1#cG3SurORt>~xFFyiFax_&!8q_J!DeEj8~tTd)@!qDGfq~=Gz<#IBJ=#o5B?ZJ|~YE~i|_F%Aj z{Pu7uuwW0J%w%{2k^S~?s$e{@k{IzCB2tCvHr@Vxl;+l@SVYcb3wsbVUPDZxpo5NY zTn+MB!nf6G2``;bH|Dj3To8c6g=ol(N5{XrMU2E)0lDy7!Gpj+O6KnL$`UTSH=)@v zq2riBqnd)|mfjT3BiU%ZNE!EDt0^#@*A&_^n8HX&-c0eUDF|X(Q#j{$VhWVM&kS}U zkyvXFE8Lnbg->G!>!X5YLjW`bVy82Mw-oIwL_<@F-Ip0Wtk~U#C{y)i1~(~s57BGu znaXbl=K%v|aD7k042TSEZ_8u`BedV?G;0Zi8Z3F$3`I1|z+iRx%^(|CFoRxZGQ5Gv zelyrkFdhhCvpKYvh}2%c8GL>-rCXDuSVSfknyZv1p|cj4_^P!*NpA+(Gt~?ho|kF{ zXCW&!12W?=gX0i|w!)YJx$v98Nb+e(?tPfSMX)sNL8MISXw(cexAbN(nzkLS7b)YL zKWYX{=QV=>?u$t}v1E%7l;q76znXy{rZt1p&&gy4%he1DS>YH+v+!xmphdBA5lu57 zb~?0rMJqxyG?mz**BS0KzgV#c7@|xi=oxk&jZyT0M6a!9D#JoTdG`kf%wWdn2{Rxv z>Y_7~8B|K1sjb0>Y6caEh8Y;Fm3}kWxkk(&kC_Z_AhO>KzKam!fuY2R*AOwC8LR>p zxE!HaM9yRe3yB#QOroSWgPa@H44QwNY6iz9%z(^z%-|r88ITLV8SJEHKtB60gI>#R z26<3koEFjC(wo6DB!C%+l=0y$H3O#enn7U(Gx(DvZ>IRw3~v#*}jFRPa(nG9sp7c}I|kLzQf?aZ|{BiZP~eAIfHqi9X2ovJUENGREHS}w0*oi6|QU{_?Yz>?)x|!EFRBZM{ zbh_cg&BVlxRE(`@7aThvks%Edl^M+AG)c!jl-55uY5-bz*w+qAX4HUAqkZHVVjSR^ zEn#t1x2MNCNEp=(scE;HZVd!%NX>&`WZ00}eLc&1-4JX@&Fjk`+JOg3`v|2zH>BnR zv-aeDoMRo>hguHSgVk5|W6gM1?e->Bd^etzj9tU-nsFu{5*D!WI=b!OJNr((9qNw} z8Hyd3qZ?0U{Y8fvuHok^ii6E?V{+6QxaLW@dPb?&3);2#O6izfMMG2RBJ(@O$H!pk zizm0EUZLAjse%l{cGN0_aKC6kXuGD6rdN{9>dFQp)M-Vm0L+ zlgvsULnuP}lGto|WtcU+D|5s>YSg~OsB}NUs;^ZnA}iJk&u7AJyOA|tUx~cJvDtNe z#A}#_$Sg4%A~UCEHV7mcIRlB!>1!7wIHb;S-FEGw7K|JvnW-zsJh{omY;JJ$K8wi@V7(IF@l|Mch- zgY0k>^*M5UEjxSONAOxWJr}O}Y^cBge8aW$2}j|$L9fSHFF|-H%7YQV&c?xr$dGN+ z^6>rdy;6|J3lV&~nrAik=jQH#{zFL%C{k={m(v=TPX26DNx^5q$c31Rktvc`O(BZK zuRh5{9Ol9ZbKZ*6VuU$g<-nY+a$wF^5gs@4BY53T)4pJ#w$VFT76N@BlzD}VYG=k& zqF`jORC`S+0uasw3`e3uZ=zr%OED{ff}J99Nn&u@91c8tJYXiZKn5+R!i=XtVp3t^ zV5C_*WKA6c5Y7aKXuh9;^{Nt76<{)fV7l9dRfpY=%LN+VCwGC-wr-Au}jU5e{VeWg3hOsG#=Fq1Z5uE0T9p`2H%CYg8-67OeiXKMPr7GaE}T-AMEZt{@p< zLSo${$|Q)2=Yk_oHT)cOIEzdiuK%3kd{2yK z5sIChrF$Qo&x#ebM`X(a-t}`{p#l+Wu=+1321LQ?_GHjDgBfT&TW^M9yWCy%O~2w8 zzNe&fd%-)UbAugEOy=8~1RQN~5|fDtDMc1VSfW6E2LgHcv~prQRlfKrT-%CvrXBeL znRN_gDhH6wm!J4OFpA5==zU}~R?`7VK&2@(qx=e8dmqzvN1a>P)hycf?7{$`U3@Lo5VJ- z&w{LicCtx2d^%N&pPej;&Za1tTE9N27h1$7zvsm{2@1uogCkCCXEI;fASb9(bS? zQOC-a8)qFSxrQ^3LCIC01%wHcEvBup8wPn|>qE-!s5o0wgK?`#+ z0Y;};^(s-UE@H(6**KpRhxSXwv=x&H3}(3$$CdK{V#aHTDJWd6iXIPUy{lC#FQv~| zW#^T0w3xd4>phT3u2``gp4pivG@7~7NEz*0v8o~;x?)9M622fI^+`&Cl7*zYU>oRK zXhs}zrrs^E2PsCIMb^=nNZx8@x+F4JtOB1i#PPk!eXvW#BwJY*+$RYv$2(v5%38`m zh3@Jc)G>uAowL=UsSAA!RE&a<(|lg!J7&#uWGny12MtvGV=%e;3P4P))vK5!qb|d8kozPBbI;7%r&-`$z)aosK&+Alc$*mB=5Q zm0({vl|t6Lp-A=eNdoKGC2H>W*67{ z$m!K5Ngxy}4Vn*cj?G_+#(1&6G5#tTIRN`+(Cf{A-D!-`VlNLyauLivq@LyW5S_ev zq8+HLG(?|3@Velo{EBrh+l=6VP)`^oi5!h;_AlSXZ)BmRx}c4PtTjs~F<^0t35&(3 zc0`8|4J9I$io}-Wjbc`!tsJ=?RGGvG87$Ol+jrpd&9;3c z0JCDFmu&Eu(^TupT~I&CwlBGV!PD-wNxN553p$*2Wy1#s%8UhhP%WThijNH&&Pu&jsWW9p>hED`01fZWhB}*3r5?&i{T?I%?`&ql zJ5y$)K2@nl4RyA-NUI|a!RZ=cu2|e|y=IiY;a*RQ(PojfZtz8CnCZL? zccp}+Hrx+O@>c0fVGAeyt#m_s?H3nI!>uBBH9HqGg(+Qh{#=$7doWm5X|V?)z<%aL zX|c;vT5Nx#IZ%qLqc5yM!|iyg_w$*^3qyWT=i)P?Qw~t!W*5ckAr?Akco-!J`PJnn zRjgjcWCDX(=vSBHh>6u~2ZcSo*T%q&V<6^6W|D$Hp_h}Acu=GYmCnoQES(fd}yqNCa&&{c3W%uFiEl4zXU zBqX(QUU4Jxing*IIGm=km^5shUxqBF)@^yaeEj)oo6sW>yyflkm#CmL?eaAdP4zzjaERFyZ`gYCjZEYzDd!9fhL-0Hmm(&W6qPCW*j zZ#Esy!69ooCsDCf5^Qc}V4=(dN!N4E3q{qh2P0>k7YJ0p!s#HHA3|?)B0`|mWFtXp z@(&@kO$w>6QBlxk-dCK5K4xBUz?3e$pWhQ4P^L;9LcKz%RbyOLxGiRIpwyMB)U2Zv zr7rVp*^(@c|QO z7$4mHgT@DAm5B<9HaR{B7+vs;4|Wjm;fxQMB4ZzWIs68C#8(@t$?-uiN|t_na2JxG z2VaYv(Z^=WjMU4N`Zb1na(vK}k@{j;5^}}|Oqr4T&2(C^hHDM=$= znbOlPIMVbnKBz`Cl&qF{C&mX& zrg{_OgOe1ij#yZJ!^1d*6mRtzr?p}=a}|>b3}%-U#x*`@CME_O1|_}w7#K4xk(^M?B%H-7hV3b|1CK~7TL#{o)$az(ky#C_CL0<1!5 z=A`?cY}DS+$#UQLK%9j$3n*LBeeT1VkC~+td(QX(SkCz1FY{%5pdxq4AgmL6Qo6+W zpphKP_@E7ix9=J!Npo*W<0COry$mtu7h3p2rZ z7$s=*WBv_uK%i$UCKDLUPQSWbK}-xb926NJ@URmaF87^gfkfJPY z8I)xrlDozSNoC=bS7Ln78Mfq#Tb*Jvi)f=U#by@KGR@lsPDfWhbrG#wM2rPG*-Ppn zI$`rf??dV|L~lkg`cS{r@xcq2x4Y0iK8U1f&MYhw!WTA)FRW)xrT9YjV?BB~flG^- z#|L{M8a3AgTFLQ2rC((~l|I~Ky~K(OGF8?;&JHN80IoU%7Lko&xA+x%30L(n&;TY; z5`NG#HkdG3)%w^YMXi-7ms&pyB04oq?ma49k0@w7^My*26TeDNBL}L|G9%0U^0QhZ1OAf$E1k? zhCxXvvu8|DeTpjc<-ZbTR>oY){2nI-qjHN0G?t8c6#5B`X5NS1HXuU`}I$z|DF@dPBb4@85^+E|r9TPl9hN8_{6EY?s zV%jmm4u?W0GA58}@Qn$6M?&a^pj548T)V^FClrA7qFKI!Q0xooB(M_go(IljiDTI- z7GlW2eTtUQfDV-zweWQ1S7QnY*-WGG(Ce?|Jed_KG_~zPeVnsJxZN9M&@X3-j6LTH zq1tePp=##%v5K7!!s&bc1xh`n)R{6P^?pkILPOom@q_w^jMQJ`WMcH5e5KBm8L8g_ zRP_2UGSp>`Ul#j^W~5#s)FUI5I#YUTWM8FNj&OL>IuS9K^z%f@E_3+mcj6#A)j<$H z#X;2i9mEqzgUT({Zk$YvyS1SP(Wu`F<~_VYX1i1N-vuKhF!KSdN@8J`Djvq{9i2&x z4>n>(1empo$pi+oQ3~Vg^}j?+3?U3kdIxcFDY(a~z=(R9+JG3HG!G1N?YTuSDVP z`wFDj-<8tqhiZiUDsuw}BHSjWXoF0RyH)07=7(K2$N?%N6YIETTDWpm?>|&!I*El1 zXm}V!pnveF|GkR2QZbppVAlCn=3~kR!w3h3J^xP*0<+$p|LEgYY4$oqltx)|Da{gO z2&Ew-kQ-U^C=K~RG;>EZ`SB~wW#m|uhPq&5jjoj#pH4-#!AyCVs`eMzL&S;p{LTMsY)a%(05uqnJ!!Ff05jz9TU)m@p^_ujm;A zTyz|`hgTdmTohaxb4lchGF3m84r5o)@+f$XMl)|jlN-N+PXUHeaB`BW;OCwxIZ&=# zA5aC?jM6Lk{z!n~tw=f=qujYZkfPuLY{N-64%md*A1F9$g6jjoax3@(NOMcq2O3KJ ze-^y0qlA;b(EsiL;XrU5ow^*b*%PiL!Mu$ z2O}Cv-HQr^QV&$6&XJ5=s(-j*1+Z=bJtQ)O3wfkA6PwNwdO3>81O{`2pXS>q0}}%a zgOX5c&sd-_#B5gCQ-_LDD`PH+^dLLOr(*OTFEO0O>;=WFb64#wc*7Z#1TOW{+)Um)nVH4;nXidmifQV_w z0$KY*C^8n1*4j4~h>;M+ZzvT{7qjg6E`Z;)Y=fE&#j-zmBGK_@0w_$t)R(6>{~F7N z)g(#^9sdedms~^CgIX_gyQdd%==d{5#?JHELbYKJL)A>)qpzET#OXW!c}l%UsWW9p z>fIuThCK~+GkFi{fsE82LK1ZRIb#prnKC2wE0p>mLtQ5CrQ_fGuS~rEN~lLxDs`sx z^n8A%6!Q>{j(??ym>vHf5cG8MR>J+EiOW@QK>QSMkTU;1mlNi?Aq?I?=7in7xQppg zvGJ054{xZ|>P=}UIqMFr5yZj*A0EcW9i2&Z{IeA^Uon}$U`|8fJ*8fT$wUw%CQ;IR zg9pZfdw7E>L&O^>V=iwn{6v;6MmAvXWX$6Y0vgS{5lwFV-r#FYGa7F|PEx%=2PZep zf&x1JJ)jh=*NoD4{G+6ZYs$nM#F~V&W~L)&?YgxGvaFPlq&ILn{)Ljfl{LYRKd?+1 z`{{e618MFr~v(1j~wv2bNVj{^ba;=i#%I(40JIPKwGj8oN)_;m@VWky^5j z9b_zsTV>Xsiu^EEL=I4ym8vq-Z4Xu(u`oyk7OOWL8(#@;MCaf^iwUvuwc+^qO5y>h z)Q@wfkQ!er@$BqxyAKe(o&76DGwmw-m-~vMC_648O=oK!jWK`7id=aVWuitiYkZ)& z(yt}wBU-g&d~K?V9Ic5dM|Sf?MNAgydqDvtKrcuni@j_}$8Nr(BFS#Pgrs)!=VPZb zdO?&tyZKZRS2w={Vlv-x-O~NJzlPPN>F4h!#<->Z{CCHsCHfMA(Kj;l{M?^A&C}0U zjd9iFWb}HW$|J-Hj<58pvwzloUv`mkjzLwq*RRT>5Dm?&gC5K5wH0Ce;!)7;6=g0I z6jw@7$x@dd-kT_78i6y{;Dr2i9wAPAd?Vu%zOD;;h(FzJ{?f6k!oMFZDy*Ei3VxcD z#jI3~uk+~dp&G|rp}%we`g^CI{`*-7*`ogY zpcGO+LkM(q^ZR?$mAdMz9=%y8YoRLZT(7c1G0Xj(_fy&Z#fp?FY_92bCS3C|VbQdC zP}m&CS&sHvGAFT?z zU@uWv<-}F^2fzt@u5_fW@$)oQWf?xNi3)B&=BQ8kHXR8 zK|g)>{7BM@){88o^F^|lsuvaYOw~(BvO#vX?|eo%;9QP~Te{~&Oxry_XHN(s(TQd>F(a?L{?9?$;9c(NeJWzg*|7ja)3GA<*r#pXd`Sz|&N*~zqX z?AlU$;w+HTnnEyVRztChY4(c~*KNW<)>7IoDmQt8b+Y6%hcl4Sq)z7K-o9UOfseV*7-AXhCEVShld**WZ++)~hv z^F=tv>pkq-6O35wzohO{xLo=#E5jJAv3lG@$84VH0qKe6A{cGM&iHC1&KJeef0>N_ z%X@mh2vw5e;ddk#j`FlfRp7+?cm(Ea97qHwPt3^;ao8-sjhS_-nojyI?x2EJS!9@bGpMZJ_p5{*hIj$p2%0rOs$ z*#wjJ(1ow$F+xL<*CibY+DI;(=p-MKDl$4ri6LQIhUBZDF=)UjK)9Qs^deThBD))~ zMqhHL0c(-u%_`Wpc_I)sh}0axX=HNd!4lKKr6sXC?g0lTcJ#ZIc5U z7-EtXXrM-yc-(5WVAba;7LgTex#v!uNrKf{s8~c+tnW>E(e=T;@~N0h#J+N)?d>(f zRw%m@xKtC5T7Nqlyr@~28*^Z_#0*B>lPL2InHuKFui*%h9xsuHl9*FN!Asg8ln`08 z+CtH0N#V+q7)z%Hf4>;l#3VCEO*2U-y9*i6W4Nyp`gIa76m22~#jZJcImrPgTD2Te zyJkUgWsNeSbBy+r6Qqv)SXC@8WFWJYz)l$NUlcijZe-Z1VmW{{f_}t}b(v!20E;&p zp&$f{f&+Bc+NvVIExbN@swE#j9NE@BOD_As{i`s9JdP{JV zg?t=$g(6Uqf{{-OS*NdHmOv=ETGL?UMZ`JPQOkU=Nkp<+EZLzN-gfArI$jJ$ZbpKT zh5dH3iS8-n`OF9RGYOd*Djsw7WOo!$oR%2;5H%S24~B_mLYfYUWOa$r)4z@@mI_*! z1eOZAyMN;2NppyW9W_SKB2q}3%sA3E5;h_iTn?N!Fvymz0Sj*bYWV4(@ zET^+@4`-Nb>oiS9BWy(nQSVPscP#4IU3Q~XP4~`5+tX;oF~<=pjRk+ zFH-ybC{|6CL`CPzoNa7DAS)2C2V6&OonOxSk1ZI;pxMVtru+g)`7|()roTLfBEVNWwyeM0`|GqsKSVJ4%ecSA=`T;{yT3dO z2@)pIA{Al!%f8wTMy9z?EnQ5FR^Nl%$z(i9p^gfX6kDNJE+EE*L~aAk?~Ic6)eyQf^4A^8rHe9~O_7*3y} zo2;1bZgQC~tvsa@J z0!<^4WIve+gOTke%IPOFEA*3V5f!$4{p5EFNklN6j#&=SwVy00ASlN-6Js?xW-kRJ zf0T6Ef}nfJ9J1?>fxn-8VG4g~Gpfk;hrL5h0~-d^B=!Tod}nRG(mEKV~a^V! ze;j(Kf;g_dV~Xr%_(YlnraRpGEdPNtOsKLl&ro&V)I>uz!Og?)$ z#zPea!WH&_qQZ6civNR-4ko2r#PNOEU%seF_E4=f01@$2m8-u$+{~o7$B>z)>krpS z?m~wB;oJJdm*7i2W-KG0)j8P?#&!=zO{c?c!9fYm-{mXtU_eZ9&>xYJ;XF|nf zkdUDA4CA8Xh9W*cRL~1S>LKt%8}t`JKxFJHT*el+Kg+_`kYd;8_z9h-gmMr!FfE~w z5*p|b`U?oPmV#$?;_C5&uZ!qBA^d5G0`jqXw8*7KqGG$Sv~&j2dxknDO0=ztxDXUZ zuD8$JitM&dNki|wU!myn8b*A=aA0}IHMHjy!k6Fv&!SMH0psa)Ej)o6#- zV9-Vnl4ZXb_{{kP71#{;NA*)d9^o7hpcY4vKX)lcAsQ3SLlIt5@NuyEJi5ME_HHrg zDQ@Ilb53w@H5oz0;`-clck8mx5{~wHx9)mMz`0v@XYkA0tR=hUpzIgOxzd?Qz45mS z8R?C`p+^Vcn{O9_XSvOAO%vv=<(|V?Prx4yt|Y&>OZw{Jc0J6|z7NV>C-ILym57)3 zq6LX}`y!$?gE_qyQRCx@czGZGruRK(6?S>-gsQi9zz;5ECb?MS}%+yyi=J>d!}x?-7r!zmEEQ$(+WSexZpJRkOVT_S2NX};4T9? zWrUF8dIfzH`LFYnx>HH5G^AF1w}x)25wCu&*&WoN;EbZ1+OE4DN&vk`p zQ@#6;sXqMFUQ8TK`l+#CDV?9XUkPRQQdRh;I4$uvtd zt@ZQ&vaJ8ejw^@%PCuzf1*h32H{iedKjI*o@anI+_lJvvU=$t1PTj9Tg{B^oXx~!% zGkfk0waDNnwq{&ke&WlY6Cvp*_5x?={6v%BjmzvO?o&buKk;U-%TKi9#qB5NQ4T&o zafgVgMxv5_qRlVKGL?2CD8Ns|Xa%v>mNfJZB_cjuh$u)f#ZPRPqysO_X0 z5I&5bs7vD~)@|_k3I8zd(_!K#nxHK36LsXzt*EOI4U?)xxcG^;kSP>*e6{ftXC>-8 zQ-3D57uoF7UtEq%68>Tg_|51q@`YHczZisY^%vLuHKV^^89jyjHQFAhaK>MB`K$5M zp(5qE5Pz6S|SxiHSLyfQyE z#cAe^y0pBXYo>MnA`~boUajbmn&vOUK3W9c*Gy7xd?iL4z^h-!UPN07BW4thc*pKn zpolu2OEV65cQ@2*20M;3t}i=o5`?539|f+`+3{scD6<`(ql6Oe`??;N9k0fV+m3r# zBR)I+gS0U<5|y;$xqc~56|`2~F%CO^lv)~V&7n+v^rX$gfiXm-v*Q6tIUBT3eIK;z zX6?DzZO`X^*@r#1??Pg!_WU=5t3ChePZ{l*W%QK27DGa(aK@g?{ndDg@Yc-h?~Ogb z@P*iOn;&z>LFBDfZo4q;d%i!%_0k$O*YR!=nATNFs?$&EVL@w^%k&JXLO-cHloUUU zK~e`PsXRa0-~*+8D-jM$&GFxvvJ0Xif&jwZ^;>hz53b)~J8!mrA9VNMsy^?de&5UzaIN3x|Cq6USw>I&_EUc?g){Zr;TQE_;jK9r0xUAsogo9oZ_S$c+8_O;1;JMk40l()y9@3riS;HVCxO?hp? zzVd3%rdgtp?=_n5K!3jbXukcE`BrMaz5aaHh;6pY_sdPbbJOH&s|iaK^6e-2-ZPE) z7TnVUqQOYN-BE(rB7Qq%YVgapa7;E_h2!!NM;o72JQH6Iz+b59litv<-dOD^{Q?22 z1EV2p@yNDyp@*?#GApMFOM&rOj~HBl9R@6l&+2qQAWi|y1iv@WTg>xM=4qQ}uX*kQ z!l9d9vf*S-FFK>>%%Zc3&OYj7#AZ9TT)W+X>jII zKV!W`B@|7I*;$D-BZBRT9h(6Yp&^O&vS6eL#F4&HsLm!@GEtXz*R->U6^sNVQ7gAv z<9H0wl8M^9i4uzypXsouL(+VU6)GaS7p^zRhwAxtpO?C!%~*}YRdm2fE?o1?WCRs1 z_bXjgI)t)P8j9AjMA!$>-JP6&hJ4d)dYA~#lJX^nOTIZOUkODQ99v%Ko*-X>u2P!_ z4ROi$LX~f+P@Uc8Nz~|1)WYQntSV2J(cBG{5H4|&*;IN{B!ruIN3^+;Q@~scymSN}wq0ghxqq5h8!hQaI_8Qx+JQC~a#c&nYGJ6}+kH(0Io{i1w z*1gi9=uK^?lTg%Z!2|2w*t1-m73b!Hf(^mQ?Fj6?>@Dn+Ow2Eht8Oa_&RL9mkmY9N zhnZTJz{UofWe093HhD0v2*eRU?o%0OzNJ-^|2jXeHI84;Lee*ak;kAKK5BO(2-|!3 zxeap_a;OXMlX2x)X%9K7{^-qw|4Zgq`a|-o9*v4<%@cm(_O58w&3CG5$}-vESu(ZV zAo8*rh?cDNLX!x)!b;)l5<2Fn%Wj&sOH8YQ*iJ>Zbb%54-6)&NmqVAiqWAPCq5?d1 zYYu^6vu?qQ!RiIzSkE(1$%D;xvLYXwFc_N+uPuN!$Dy)Lz^1H~!H6vRrEv-` zKsE`zZK39|c^mB?1wH6Ov&?yc$_`?C4lvyOktlbFtyr4OE*hN7%+l@UR+s;ixyN{r zB#+qKHe}U(#p`NKBa_zjtY)azgq>E{Nd-G9&tg8-G(1x5=w?zXc@K6}=hOKAg8ejL zIO(GnMNr@N)A8<4V?Y1+G-*G1*tP8vPkaH5@%_Ei1af3MwSQ$e&3 z@Z;=e5vs&X>QSvEp7}9$GnsaC1?=XkzU`(RL%yWlP}9=dO%+Ds4w@w=esP3=TlUm$ zC?bw*PWn*nrZI!vTo80o(Jg1Gc5{>mN%Dxzr6qKq{Wh1F3j_AMs2+Ruzt))bX!}*Z zFHgQN;CnmX?I#F552z&ahH1UtVCxlDM%CWr032E`_E}(c>|?>|8u8E$LUGoLiLqUV zN%M6FLc6bjm5c;%O0g)vgZ>dVg9fY!4N>Gtrndfrk3({o55EeDM#WMGdqOa5b#N2b zxJ1i^woE#2YyihqF9}9k&}&Rf_+CVyPLECAKUJaa*OyCZe~ZCuYL21J=U9C>8jjXn zVLNV@qRpVJekHz1WcBkdsZ^EKj7kWK{SOF=oh1lL`#kKax&|Y zP#d9#2{jYCkI*B8o1DMoTz+B*zQMSKW90n@MmY{5naf^zI}iTZ4ltr97^s(*e5V5i19Mm0$fJ;Kbo?T z@aGCoA^f4joUF0`p>QMNR{+~Dv0Tem;paI*F9QNSP7T{vzbCNZo~ijro+m0impJ1T z7MucC_`!h1KJ5M8HxPGW?GM%|tK*qkY#E#qN zBiN~%s8(+trEsj_@G4g2<@~vrKhy9-jVR;G>HHCWKhXr!pW0*jbF2wDk}rqy=V1N} z<(ZpTl3cJYQoonLmmO5TxpDgdI?BnFQ!X6{fW%ejM!K&!)rN9Nu z2_5@fN%y5aNaH2Hpu$qDF^U@?=Nq7G#c(NK%5$0h37(*n&$xFL&LbQ|8v9cav3~)z zw%asHvi>&$kc4wswhfp?ZWMB#*mD(6vc1IxS4#Ysc8jJJI*+*MG#WZn44qO}+Oa0= zC_L@SWa)PPl<=pPKl>@N5S^??hfGEkCEGMbmK^k^0n_)YLfS*no@Y%PqBpxC8|>#_174Je zDIGh9p^>&waBO56$`rEX(Y@~}gjd-XGr;pE&1fi#Jsq$;0_p7GOl)ni_rsIC50d96 z_Rb79dcKW`J60)(Ob%06+~EO$=>}J=1i_6!whxE&>>fhHUBX_3c+kuv2Tv(1n*5-` zQXzK(CLeFQ~R5L=5+<|?AgrW75-H7XAyr+Q)0s8Y)#li+0Rs1DtQKBh~yB( z6^72ycwu6a!a`$P#gTO1uaE+ilipguWaLvk?T7gO$5pW!lS#6Mnf>IjckIG6Ad3fsj0^dC}IF{=+0 z?qK*E3Re(+YsHsxJ*VMPuE!LXa{UvvK-s*bwIT6K6&Bgd1THn?8cidzy#%o70j;us z`-UU_^Ht=9WaBs&`mrwf3MDN#=PN8EPj`i%pm08mFh=20!bbwO*S;m?&SNI~GoVNX z7a+zzG;#zL;1z{MKnoQX0X?Cx2P2@a@Tw#~V`!aiqJXhJJ<{9GCc%$nylMu^QuCUbbc?u6@_!L906i?P- z$Q5^#!jgW3;H|Rl*HxBz8YlF20j$G`B}*lDo0PxTs+&UHS*!3ohObt*knlSSml0Tu&wm7L&oL1(zJ$mhhU!dlor(CB@Eo^)k9WkCGPg2? z#h0I=a3j-%6mBAXlnZ}^!nI601TZ;y4?0MWcQJ9~I7i`5rvFy!LL|_ma2atv0&L&Q z#LrH<{nd6?CZ}8mlCI5;emuFD$x+(8#KIw@Z}2Igr_Pz zm++p7UqSeI4HvB#t#BQ~j{t0cwM@7zXTUEQkcS^~JJ38kfndL?SZ&1G1fkm-O>MA{ zUno3j)pOG&>2;)Ns+O83vzp z4pbbWd6tGtc~4SU^mDwzrKIy~SK31q7MH&-V3z(J>N_-GCyle2W^0A(nBPW~?=*%# z23)pz56JTq`vZ-uWZYWitC8?(g=-1FW5Ow$OVqYRgU(S{G;5N=qP-JcILEkf4pmtA zn`(4~{GFz-(A-gRN*O*{!$sSVP+0Oh5HQQ|&(}m9h7xx-jjtfQt-|GmzgOO+j-OQg zHdpzC9?M61w`+VS%UPqa$l@A>o22{-3+;0h7TWcikKjyn;T+?_IaFaO+f>DuvYkd4 zEyGS4KZ5*?*Z85#^E8!7KEsWCMlgJ|hKCrwUeo3?{6Hf^hJT>pg$&R8a|id zfBd_e73DQsVUf`biZ3#1R#mBiVou<-Dh;?xnxtv|~Z zN9f$FI3n*G6b_NjafIfD3QPD|3QJv_q_FT-qiKb=YY0QX57l_dbE?Km_vjietF#M;C>$VP=P2Am zcsGS}2v1a4%JIF@%w_nYCN240t>FV1{@g2?|5}Zgd{!$g_4JOy9mHKq7<2SBl4gb7 zD(`W7hP*GcsXz7whTwggPzcUZSO{7Q3&HCN!&nZNG;#Z4^ZpBY-(a7KcYC0TES1O= zc8%rR^0KsRazZUPyy2MUyN2Ujb`>>5nf9 zC0puW_(S=>a`%2*) z#^0~$^9a8!VvgIdYrHs(mlf_{x@Q#*5cgt3_kImq$@o7IMqamRSOxJS3QGyER=7^m z17>6MI}MvB{3|b#uEdqDNZ}CEeci6*rIHTQaH;yA15OCN^F>GK%T*MT{YKO@Ykw_k zAAT$2sa0W7p|y%H{H#`3WbqDQ%Hj}MyfiT$=?fw+k=+Xl=TfH43J+yjA5yrE;U8#P!MWXqQ{%$9Mq!crY~@Y-y^*iTeTIey zNb@0uMK)(CEcqU)bUKMMRr3{Do<`VuvHR1~88vd|( zHa)HHSDWXP=DD|d9%<4)Wbofdc-VTW`%4pFIbYL%YM#TI8UJ$k-FT9pfzRmk5cAA8 z&&lTbd-HtSJeQc~Tjp8bqUouKi*xr@UuWzgLHhJfpq%ah}f(aY&H3fcawU^`ZySQ z2Fv83*sKmjb2IqbQ1C#(^`ZtNj{^?~uK>3VxWv1aiM)7ggQs>(mtEz+>ojKC5dnrA=r+_+f7*Wx*~4h8?X zXmZh{qTdw99_1}4MW>t`vc3sbeO3{QW-q^R0CsymT88jwAkr3$%Q>j6I$uvEP*;C>Q(Lif~Q|dy>DxAGh7%bt}APt<72s zkX!Asr)FL9TFacH`Uhw4ihPRVV~2D>w#ezaAs_iR##z1COj^~}Kfm+39PZ9t1pzU^ zM|esCJCQ*4KvlP0DTJRics4Q+yw4JNy5r7I_&Sr|v;Iin-&Ws?$WUxd4pvMMi|EKS zZeKzv6Zh4;0+iy|wYj1A8171eIK4OY+=uvZZ0wLuAaE~1V3F)7NbFDSwZqR#h57R? zBViD;MqvkYt|!l<4b85Qbuwx|K6E~{%Gu_)J7Q62TsJk`8)jIvJ7 zDJ~Fgntl>buSwmr-i#UmrRt>$6x4OJ!QS&(3<-ASPcDD9!_T5JP_qXtfLtPN#ldlQ z_cR__Zu#2$Q75^Ag{!idhoR3%oGq+q4+PfSg;tbo+K)iE+O>YNP3_{Hg=`P8{v%?? z`X`=zpUHQVYKDos1W))z)*o(8cD9uEdh2*P_TsJ#MXzwz(ez)0qNA{@ksBhj?W`z&w`JL2CepcBN@l5V#ckDG3i|~wp(Qk@Q!EXJNLpOa^OVhL>ZIpx+;mLm( z@UG_%2#k&9j*GxVKF0|{)Q%v;Y6CzA>me)FLDbj|hrS7$Ui>+3HV?s&cesO!92e6@ zNG>)z7sVr#X3~x$wBxV7q8&$Q$7<4%?UP`xfB_=x0&wkG0S6|*y^>&F5*(5Qhbjmi zi5!#!M*s}0T}!&ZN`@W*OEGE_wBJ+0L%h)-)Awxup;=84Wf4e1f9TIo^xI-Jcb0a3=4L^e&|p*s%6 zm3F7Z+ThiaV5AAlt>M^>LrbDK3gP9hj#1GGLlK?+8n2fG;e>oR{7~$&Ec+M2)r~3F zc#T#6bAt<1WG#P}bMPRa=tyb!FugPW_XjvjvQ z0OS(+9o~wf<8xxAn~JPSIaR$`!Rn>hSM5Ke@FY&Gpd4QmYB?IoREBb_0xFp!PDbWd zl3prv4a(f?bA-8-IF;*X?ie1whFB`efv>8!<|#awjl?;&SSN^H6*~#JvtA_*w)o4Z zRDWy;%1`ufAc~T|b*z?rTB798&@|}N4G)6n@KBM4hszBQ5(ge0eo}cTO;fNzhN#3j zM9b0;-PI74I1rr#q9rjn9pDyQJsv013Q5w*Fpf?px0|iAvwt{jt@8VsBTzZ?GvVl9 zzmSe*R9}8(f*>UQ%)%#F+EhRDfE>`CklD`+R6+?qb0r8R{LJ}yk>mG{pUFen#Dk}i z)qX;$dI)hc`I!>Ifp-TE{LBGQYaNXca$cMKIZ@#^7{@=qxWAT?DJlAy5b%gk@$--R zD6GKG40qfM+)Z(;sho$a&^In>mu^XxePh!ae1uV6aAo%MLvlz9ZAk!6f7Mj*XuGwN zHBB^+3Vek)nkv(k6l|D2)!f21mJ-Qcd=yn4jztW8Yd_&L<={4=p^!tuvGJ%9ua%7# zgc$>Q2;;TKA-Sq{+#Z_BLno(DnsrJc_LiB(tfL>^|f7 z<%ggS66Jwk{3V5xn?@+MRL0X12a?6Z*4x9$A<#Qf7k*A2l9HG>$Nstm(OcwNLs#NZ z?B$P<=ygNTzIwG~Ur>aIZy2RsrG>Oa%wtn=zY_60KSYq6+&j|b-p}MNamam&85jWIMTb6z5UW^jC7= z-04Ts^j9|KQWN^PJphLYe)@6M}F;!jBvTLJ2=I7%#5=%Ge?0 zti)-41%F#;AU2cx_=Rwbl<*yEjDA&$jJ=i7OG5AkjN_kQw4IU>=+JS@1p;rgDi34W zS|R9g8N+Zxx%5cjGD0`4sT5OB-a1DIPzuSn@w( zNNKp>gXzERZY7Jc?ECV0$-}kcA3_%T|K397%b6)e(?4Lao8lh|1>p+u505?!&t3K6 zP-Jjr*kAnlJ0d>}dwd!EL4vmljmrLHisK(J@FAH^o#gZxe+zcLQw}r|R}P~f%KSu& z#4fI6#H!G+5A0pQ7!mXQh{$2YWIr}y_tW?-si|Z<^R+Ulv*G1ZxIBQ|H0rcqx^>oVv78? zmte~O{K-E|{xwcv67q*qc(vy5s8qeHP33>*-`LrX$>6gf#?dJ**ha>jz{33pGP2~8 zBSt$0tX`v)YRB*rD(@XVz&%g zN&AW4MCxD<*1o*hS|(S*<2yd4jjvop((sc{ON$hY%!ll--Wqz9_O2}W*1T*+{4 zpAtila}ih^D{2YFi<*!N)Jvus67i#iUAZXBGzr}(9Y zirUP#R1>so7-w}9TT4RGtD8axOV^ILi7{RB3uPw(HypiFTotQ)eN+(gEbwfe1+)r^&4=NR2wQgt*(s;G* z3y8m&_NjGRD3{jl18vTzb(qV%6a#c~db*5xi0J>t6orX0+}#n!RxcRz)X3Iz5l# zG`LWrF|lT=md?=M@4uLc)~k+P^TdES#ZYXk!cgp2SdHii#Vjr^gkp7NqR{izrNpK% zQ)>?WN2uU~VD&`IqvMM&atIhloU!&WO)%~$#UpIJ#v9m+t^3R5?Iqqb5sfhMNHp$~ zbBfv{^35}R3T+|XuFGPK{Up5R6LOEm_vobNcs><(8}DivzD-f35WvhzJ_0His1rxm zj-{X$2O~QZGgdsLl07~$%;1o<6f>$cd=J$s;-Z^ZDL7b2sjg;0pLN`B7}My*1KHjc4&?5P4}VvR@{hxV(eE(1|y4cje^53ld`D6$h`D;?MD0dGaliRzeQL2iH-uU~{S^t99*_@*zY-JKk%mrE6G8=Cq^Ni@&K> z6MKH$7vp+FXMT+R#`WC|5S_WN5|T|w^j=)A*!?#+tYm+)1&Xn$C%AZzA9le*px(H1K6UUSNG!Z)*>Rf!hbdChT1a zl{MgJJ3_lLEQ`?gfM}Zk{xe>_TnYVgwR zT}yYsP$P?rnwuRhMQ`eb|7gcuA}{jNC*-*esPL)&bd|jUl7QoxLdUbl1+P+A9MYSF zk>*uA?RNZ7JU75J?W_6oG=m=(oK^Pu3^|8C_n445jIQHPlt1GccM~9Pyeq&9#XFLL z2jPd5&m-E|{5g$3yE3kn&~f--s{RZ)MqY6K0Z)=R#Jum1r@aq<_L7iK?6*+&i@3&V zpY|8v>gm)r`XOB+?Oyv&6#fh9MK@IVGhz6F;o-4!*odEW|9}OEX`L z03TAAuF*b=$&E)pNnsK4co%$);){?EK{#`m$}pT;I!m5$dy;t`Z=U1vw2RD3fq9O^ zlZl7P`zm`MdEa2aeUC694DG-;Y>_2%Sqe)zzcyn3oKkK1gCJE9={*D>!CbQTFwKj8-~QxJN^=#SOtZV-EVF}(BlM_0+{(OwI5>^XbdQ^K4{Dec z`fi~Sx8Fq7*h^V&AlNSTw9IZb&t`eX?MnoCgZ+RBZ@|-$-t7uYM&nIJ$G9>&6mW9? z%@179A2;C#ujilh)c>vP`MHPbZ2N!9dj6Vv&hPziThITEaFseg{-f9Pw+Erb{P?Ch zp84^gd_8~BT*~r)YCV4>N|Wd)cA9ArGoykHKW~k6zE;dmk-FV;|-C3D)ylM+?c{z|K?^T^wX{9pSXLU^8Zom`DcQ=jPl>?dj4J5S)lu^`j-C|tmnUE!7lSL zzrwkhXYk@*+}{mLl3TX8ulpkYC)f4sxp@3vT-Se8mf_Q{>sws-#@Z{}utW#;bOiRb zu78*i-0Zr3fM{vf^>5*#oOgw_4MCVN>T_NHJyhk7UDw|T@nu|L#k&3*i0^A%{{f0W z!@52W8ll<$Ve9%6fxE@)`q`5Bhp+3OJBxzqV_pA|pI}|TtdDj5$CFFzn_JhfNTT+& zt{+aKZEjtECl6|JT|X~%UH_CHu&zHz|K-dSC1MqY$5IUH>k| zIa;@wb$yu-*wS_Vy*ZnJ^+xW>_#x~1pI2{2>t3eW_r0!P??UNwUH^AP{Qudy{@<|D zj@9Utb$yE~@a}c}5eP`TuD=~If8e_QwZ!$T>tBaetkiY=gSpSl3@K>!UwvUH<{rWcqdec%?hT*{K;O*Y$6yajolL z%K3>j>-u|3DP8OOQ;|Vp75ay*>#ub$srRw2-|EKmuj_wtBNa1sUH>_EqCVF3YutGL zb^Y5w{J(Ww|94oC*Y*4d(y!+~$o2du{@;53|99*83lr=4!_0dAyVb7s{H1(JuIHZs z!?sU@E!g7-)!l%fQG~`bY$Tzh0C8D%NEKcN^Jh=~?1~>QEAPOU9Q<&#IKY>0#P_VS z*Wt<4(=~Y7tNHVx33=N*UuWRo`4i{Q3;bEgpBDZ+g&$`8Fkc?v&wVE359WCr18?Ea zEdI>o&-M7>dj7Z(Te6^N%p`G zBp|E4uR&!fscc3Bgw`gzm)f(=D9#$btmcam+B*ujaDjL>lbiMYnF>!~_zV~Pkm8Gw z&q6qrs-9t3te!2;xLsqOH<;%PJnhTO%LV57TRfTgRC!-zpD6De?4N07!qCx-Gwb;y z6_#@T!r=ZKPusp)kYwd~dqtW{t^5JpzlN(Q(em$V_Gi{yQSeXiyu>-oEz^g)wm zg5WH(k5wF@NB!Y?{+jEAFDdLQz~p6xhDo7cC5*8Cz_-7rD8a;fsi$T37v{NIo^iWD zkT=-NP54qgApjxtg2Ix~43p6%u8htBEbIASC9c#cI;A*veb%p z|2WVKlKWY0p(wWWuJW7|xL2bZL)Q4VcKP@w?o{xiTuELQp%LNVjX(tQQiB^B)~@p$ zd5FV)+@bQA&%LO{RxDp0kwQG|&nsBLa|AD{cu~cR3SQWHF^umEMg<#th>Np6@JQqg zmMhvXWXaKy#J3a=m*Or7Sz8wmTN1+M3%B0MIvdu3>fJd9UpoxmdN)UWS-m9@eAnp> z%FB12eIw#gFU8g%Plo4_GOiOTwoo^|<>xVoE4C0LGekxV!X%&IMI{Ep3B|6<&4BxkymUT zpGi+H6FDAF?ZAe@aU;?5LN)aG<*#Zc_Jw%9kjYLR(a6+^mX! z#T=Q$c-V!X=^iQ^qdJp`jq*fTT>D{oS))&hvI?p0zau4`(;n1GG=>A@xxx-#qwhs{ z@}jN+Jndgepw!c%N($SWFO~HzWQh7OmBk>GvN$-dtNU(re%bX{asa#xyo=dOD2fq| zjvc~#QM%t@J2Cev&8QAf`&ImeVn;uH+l~SIR#p%=yGjsN*_Sd7?+>!&_%jQJm(xt- zNqE8^hAo(ZTrm{3zAwV-*VtDcip?nnYrJddrq5(`Kbn2vzVwPlR-#bq9N*;-IF@}y z0ocYNn0PORP0OYYY#2(fRcIf*!~Ue>bAupxr#XRBX-Z71f&` zKm@8F2MxzO04#06R=c9S}vfSAGjvI6E z`Mt<|LB*9x{|Po9LF6LD8pH%r6iCHZXJPG;#n5mdRjqb#3iErE5Vv)QViU6PX})c- z>s`L9hYJDlB|me+(%AgvH~y@!8h5N5MAWEQ_1M}IvcM_mk*vu#L30CILP(Rgj?%&& zCZIj)lk`HSh+VZ6=mHhTL_|)yaUR*o#abv9C5zT+mSX3u3l5mP)#lMl6@7c4pMWN6 zbJX_$I#4HUA1-(;Sp6PNBYF=t1D(4yTh-yRHnfak>m8Lk<V?ppkN5HmvPIQvgOS}p9K|WhUtAb$XqS)q>UU+olLi_UioNozO&8q>KY;GXNS>DN z-w}Zi1P+!DDaOX+1_ul(F8DGS;dcIJO~E-@wo!<{ezy|qLmyclZpO`Co1yz9+*4Y* zY`bA3mkt>cu6ijKr9tV=?I|hvHW)dA#f!Xhxw#qY~=(gU=va<1E6~)_|mKL#-uN(kx&ws7#0zrqiWsUoW27_k$wo+JaTl!Y#qQGyqdTzY@7c(1uy-=gZRHuaPr8gP{Xz=T z-kLB5Imh5xj6v1|QGDN^{_J zVx)4dIqCpntTuZXR0Lfb2BGZ{{Xv%|DlH5gq@)Or>+D{!0RoF&KhPd1WO052<+QgW zPqCx_U5oz9GL*yK2yvkCv!6sP8T{0||5@Hw*?+_{dEN&dmL7Nih1adUb#f?nPdV7( zt!u2hX!fIfV9npUXBq;v*YXIIJ`}qmFKoS!+yBZ+hP{M-%K00&M;`_Uj&aLcyub8g z(2bY#$Fc0&NzfXTi#yHe;MjQq==59w=9nQ|@+|VP$fU$Nzb#z#Z7?|NCTeBu+CqPy z`!?|hJtfvvU33Tuv$;&{@)#t}nqJD0oP|qJ&}z{aO5NsnYNPe4sCzHI+FBHBmWDV@ z%k~(MYIn!=FKc$-Iku9>N5RNN2n)w1bor0WwMqD-oN#ofRGQ7YzYZ3-k|17rJ?J!-c z?~qx3YT#1_qq2xemDY z3Z$mHxs(YLw=SB`hJ`J?%s~~FmoA2)W5h?qCG!n-1@g5IBcV%#gSb7_JoiQ*ZE_Oc z`P`8i;5i=e>W`iK4~zc~ao++TRdKeTfRT7TL0d&pi8g8wwNS+-RU|P4c6Fms6tERc z>y4^a)I?E10-KFF9uLM#5cLz&f=Y`bRzVbj0Fnj$)TmYPN?NU+1%!weB2@T4&pUI@ z?m4>w{QCWWUw&}*oSAptnR#zB^Ulql&;BU~6hEk8M6Pf$=Vym;7%`t6TcwFr>`dA@ zbB1!*voDoyX(gypVL#PEqENoc?SrNpdbAD@#JypJ8ba`Z!{q28t_BYnN5pG+ zYy}4x@mij9NJoqF03`{%msFMOE@i>U-Ntbp-kG7fe(r-6Sqw3k8!_&}1l#jN<}EVT zXq6Jd8RGcxK1?vH9H0}SEiu)d_aDe{qhPwVq{1SI^vV=6Sda+?*ipI*CS(lyoXD<< z$aO3lT$WF}Ht2S0Hezo98P6m^s_dG}Ak-?yu6n48q${h7^}TeR-ClxgYIH|?E+88Y zC+ZCct8qW}!^X}-D34od4ju&4jpeY1Lo0@$3Yds(21~O-<~tnd)w&0se!=Xsv8%dF za_!2LtOS^t5@eLomb^ziK3gIut9tpgNPNUt&YI|~Vg10)ngHkT-Pex(zU19(hvDAX8&2Jb+ESmpTuD}*StBd;I`1+T*TL!mmh z)QqhqOp@aj=Jf?=QhtCuOr-(DTzEUT@FyX3c_=s-Ex!nK;8-D&WP?|thVxavT?xd3 zZcL&Jw^Le^fIxVon}-1^5`o%C8NBxEMCNM`?@#8pkooPIn7<#Pg!%nRhpw1E7ujg$ zXKqMVOOaC>59SUdj-llN2B;U ziohPF4pL>$O~l|eLob^obDFOr@M2QHc?j>$+kCu^$I=!s$7#F<@#1akNZj*1fbJ9G zYC-Rz76G&Z$ASKOFDDRzG6ysIZ?dp_H3A^ux}%AH}D~44-)*I5EVdZr3{=WzU-p zKII}FW#?O(|0q3%>W#cnZ_?jh=)Ilnz4iCrc&M>T zTjag*bYm4(;=K)#H*Z$Qi2bn-HC&zgU=Q4N&%sy%S4gLEUDbwNba$W3m2LLG<1NG> z_zyZbM>-}0&)2~tbnso+?*-`Fbnsvm9D5sM*RZ`bOZ3gJpuap8Cz$7x$T$_H1wsLd z&2s85hp?-2vfI%pRe{t#b^8~%#ip=Qgg!P3@zm6IYO;d_oLu!KSSSf5!b5vK0i+DN zE8(8>-l#xu8bdivD}+WvWj^QqGJvC71?U3>I@KB!Ar1mF8^;=|-5~5~@oanaDPm($ zHYfJm4m9s_LAh1BCmg2t%V&~kvHIMilW+ht| zS&-DKh3!}$>WaAxZX9-mj0GmeS~xA^&S&KY6 zkOxY`FaGi50cMLHzT~cQWhf6E(6*DGxkzBdK0QW|Fb+tF3;)t8{#OOXB!D6?|GXMW z>*%s{vV!H&&K!$Lv1$hgNf|ww`QBTw3c?h&9}+19YNk@C-|`JKBLIixtT3OE6ux=L zLdM}hdvK+6ArB(q30)i(l@EBA=j>3x5l?xgc(zJu(HMFX&eP^ddkS|Rfs;_$K@A8I zNbls2XK+lr9@_Cl8)jj|?n7XOJ$E+9BP_y_QC6?i=&K4*VLF@uDfJq#5yY~PS);30 z&vZuYR|pH6OZXACm(0TtNLPVsutq-hI)ISa6f+hP71lI@hRp>!g%7fj1;epF0vUU^ z8a2Hmt<`=4d`mqg`eqzq-D=c@dLcMEXe87arH%1Yh zcTX$#H)OwPEC_&Jbp#+drYZI`67aH#T*?zu9_Y!dcA~qtQ>?=66ljf}1tKQEs%cu8 zFMR^|JR-vuj-;2fUcUDYT^d;evx!xQe-2{@)*dInpbNt{^q&~zc>*p?qQb0LLb1dT7%^aQUB|MaHqw^~>QryAd1)auNRBs!G>m7?x%o0GZ7CH9UXcp18MW{WO4gW!4SE!Jz*3 zyDrW8+~0I**5BmO^?Nkye*%;?ATU80&H60_v2#{@BW9guGR>@uP{Ayp2nN6{AZC3% z%=!aj)|b=&PeZQU|3?5Hjr?+&b+{1r!?!l;(k|PJwMFI^|7fw9EzG*)o;2%OfJiax zw4fO72~!s{P8)10M^Io)1;r$QA~63lvvib#Witb_uIzNWSwBUfa6lr2iDtc)zsbgl zC$FXY54X~|D6`HKKC}KVR~3cE>o4BSs#Hs zQ_MQEFk;0B448EmR?Ipf(#(1mu>{Qe7d%xylUZkte8ze{Az{`F_h{DZ0ZFqikTC1G znIPS)Uk0F5^Qg`G?dToCtl!^F%=&NeQmGeWj2?xGmJx*X)P)K zySJ96SS(6@yWdzcD2rA~O-jQI$YMXQKXV;&)b3U8l44FqYzzW@M)O`(GxmiEk!Cap z5{o*A+3O3xE<;%91fyrmE-|LJ*cX=Kq&6gr1roO|q#DhssxR#CVNC}ljix{*jiw)H zJ%B3RXLVX zneJwGLxNKh#mFRVY0AoUgJ(QXkS(hj&kQ_NqQskHiFXHXrdrBObXjh6eKvZ&*cLM$ z0hnCm&d_+YghL$sQKgNZ;=jRB`#dl;9VV`U?8Ouuuah--)=5l`#2}H!>}H)!}6uyF2@# z6_CLqi-eopF#>0T`lW&@QBVXeN*ATHO)>}zdVnUPp<^>iev1ca`?nlZ zX7B)gPXS12DqRoIlSuHRJV5h-s7nvfZ6tRW9-t8{!QOj-`Xo^WJwSCkzlNuJ$9de4 znzay|Rp%K03!BQ4Q!vT!J2&z?{$T|C6algx;ALHI zBk|Q%fB8l;8?={qpeZ^By%ds7x?aYD=cNHQx$+>otb0k(Hxtwn+a}0C#C!+v5KzFU zE*f|XabfEhAz4?5izrf^tIx8dghgsX1$ro@;ZzMO=AuXzEUIS3c8!3mP6a4$8?1>Y z5!2|tE$BvEMl2$k?jN#m2lM1eb+0RYvdU9Yo_VGaf7gX@<_WRk6P0kqh{S(myQS*i zbRn$LlEE6orId1+F1wm3!uod~VNOS6Qds?y&xayT7Y&RCDTD@eSIV0%ows1(6E28X za*z7@Q7uB8mK30ZyAp|sEpZp?bgJvGOv>k{Ngi!U{-?|U zUn6D9%>SXPY~-2fgXX^ozwURX@?Sb)Sg^?AYZU=Og#XHwgQ^ji5&I{m;lkKXWRFkd z|4KxG9-9AJ${>V9nsnxR!X|);hd1qk|9TYR@?X2Fze8Rr{P(-7CrcSKyQ^ib)!rMc zzDZPR#_B?x>g}9A%w6{-;v|)I{u~lZ!-^+LQ9-c_e!brV-yej32P^e6@_mq$DKp=X zLL7KNUj6U#eGv-O1>YZ&vi?ZEZ^gy-O7DLZUGBq7Kv#5`CF_bZ(p}WBl^_J^9}3 zyn$b*RY%?hV3uux0KM(ZmESeaQ~Gy|AbZ;xfnVoBKJL|!b@+7#Gvrqq=4OVR!jM8d zh&7)d{RZK20v=xbPk3GMoh7P}Tjh7NbE?LEg8Z&=j?up#Af9Vp-omfb7i4o@$49Qqx*YD;FCq%vqsAb;Oq{RiyZhuCVfHe^ zZ7ODYoz2&)=0^ltdWwp65x~7QuS($6!C?;u!#3o>AhX-y^ZqT zF7w{1e&ek)in+8x$1g>^Ww5`1@92Pv$bf)$5g_?ff$XHx4?>>dds{|Ip8@#6DRW9za} zdV--zfP`s=P#bhqcL6}}rm9lYw8LMF$f-uf;NE z&mQ?r&vB>-iwBn`JsN&$zDx8t&kym?_$=6%kTtxDGQ{D# zBAYvzX2j+JI&8fp^ssE9p}9a3dBC`mygr|3fFhWDq9hm71dk%26hx=9f`zQxfg z1AkS(%f^T7v)Y1GiZL!a7!vpsm@w2eV*Z?3?Iy14DZyVkv4PruTRz~60WFDRJ~ zEcz9Iir8}&0tzZcVE(0XN8xHg6rc#qKa`!?luC)%iG++tTLd60Yd{NRr@D7&VF!)B zYyMBgT0NTo5&dcYC0G0qtuz3V8vF@ROf%S*-AGJQd#%B=DjEh$;p0KewDD6Y`^g63 z7f65xL8#?52$IeN25Y9(5))7alkeiPOlv8ri3WjC`mWp%PwkAO2Ff3W<%>ABff&>g z8glb?$gaep3vG0=4=GyB^A74(<17b}N32gTR>fugTW~7eM0HIEqO0X0fa}8+!iId zVvszMrsR(GN$xoHFXv)bd(J9Q6Ov1mY4RvbTBB`%fUpse zf7&GZ1mr}4T(p+aaY>*c{SDnu2^EhLihrKewo_(Zshxs#fRx(iQ$%W+E2NfXP*N*- z;J6pRo(GGrmde_s3FNF}-9TZp(C&5M)&z5W#{vNDAQ~2L3_H(yZtv*4vJD zFgh*?H011{eQYCSTp$Uuv=9&H{C<=uY?60;vimpcoYf>a%th^h4B5j9GpOZu$f4w& zsj3{`IG$v*Y|L>yRff5&o3~Zb$khx30q496K^*D7gquVYS2Mx zvU;Eqz$B2L7at)ClUqE3>0>YhJ-8B8^Z{FCsGx3I*ZVxYOt~~)o7Av1jd|nRXoP_% zVRITWq_|gnf;E%HwU>kdQen*%YQYVKFVZwv?tfFA%((&_`p7GaaKtCQf{k&YKW(19~g6K>RW0 zLm9i^upoD zhvp#5UI)v<_L$k&Zc7mAiTU_=F;^+-!t3XddN$Bk7l-We;JgfOT@(~wh;pu42EJPJ#p0*>3}pi*isw93h^3GA z&;}kgka6SCT)fdo1 z90VAE)#pj00#B1NY;w~`bWl^9i2VI9F*rjOcFi2F%-VV^7lXu6qDXJqs)r@AFRCT7 zseRpEXG3sMh z%^o@%O4Y2CO1!l`7|bqUa%bpl#wUKjArX3?5zL2`% zy@D4IRbfl>g(difXjpN)mk<5#2w}}77b3-*jJl;*7!itXHR@#cikC~eX5GI+3`~Fd zzfm+SzXvlg7QgpB2pE<(*YS`rXGtG#y^b}r7FFcoVn$tW=m1u)HlwZ5;;x4ps^DiC zv10)dUiJmfz4^+y6i9HC6}xye?uwz*!%2QTp6g&8`X{0a7)GYPk_`)lh z*C@QBO15*gli}tm~YMj@u0SWHOXbu)0bRD$J+EUZ$3E1hg1yBPfl|cJBAgk3t;gSJ4|OO(xh6WY7sPQ1d5n z+yuQ6uVZO6Si`O#bM3mY2codK&Y4nJ=-VflGR{!Uhot?)6!Ca;nEaVeWt6$jU&@<2 z__RV^=)(6(Y^S~3n?mBW*6IVCX}w&#)@3<+dHJT5vy<&ou$&yvhAh@At(=Qo_+E*Z zFgy4o%DL(AJ(ja?*X2CMrB5o?vpx8YQ&|AG8TJ$x&E#_}9yEUxKSn`S`EedUN?09yoXwB7V1@WGfFJ*bn4}F} z3L6VO!F1znYQF&p3=a677!1cgQ|&k!srCa3=eMw|&SiW&%*RE1*nEWeh~hz8cPBqi zI1G7jHaa|dc;fMo;nn4wzm5kz`wC;FL4ZpmF?p$Qsm6_Rragipr zm?tcISJZw7>wx!4PoyTN3YrhK!QnX1TarGJQ6ffZVmDf4&U%2cQUR%ym-cioZJMh5 zMAC`~{($`!NrwHC3Xb!MiPSf9%T7$IcJv6UXkuo8qh{rGkU z-;U+m$%NC#W9;7i3MJN~DMZ&O$ ztqM>hb{r+j&XU*nsuIM~@}v=JutFRZ7BE_H0~MnZFHu4F#(qmJ<)lC}Bfk$qeqHeU zYjpdy+-BzYYXrz_a%M?d;rCz2!x(kB?NF-tW*iHvWJqe{d~^ht9jDSO@gZ!HK888& z9C@TZliA5a`X%|&rs6&u2J^#790J@0WPTq$KKd#AI@aP4$mAT10Jm$>4e!X^c?Iz_ zG+Q7k{QirACx@KR5Vn*n|D4lNVEDE|#t?T!A%|6oSJzNn7qk4%^8m)XBz#i6ixqrC zy*Ci1LLnFUQoK78QUJ%PcZn>JQf+pQQVB|!I8VKg=lc)^E14Cg;Gn#_1#15Z-p#k2 zwfJ=oMTGMf-&Ql&T3kdli`6^bH_r2nd`1O~Wk5UYY%P{b{0;A<&b=yL@~y=?!xoa6 zx1>Z|qa!X?VN#*t>RpfxQ}2?=ohg~z$afU@L=`WY^v1h0f;sf$+s2-F3{!+CWFdDz z2%Iw%lKu=>j{uaqmhYdb_wjsxSG`LC_Eqqb!FC{X<}(tl)3f^bN&GtPjG1HL@c}bL z^ji#hgdq>{F$qutH;- z2VHqf)boVhNYW$!FG%cl`eO}0-qjzk>)+)Jr2IGw@n9+D@na4iL@<*dGx&IfkBfoa zEmTOoH;^k62(Gnr90B_wk~F;nA4J+)0SnsEG=j1i!cF-}U^}D)|}T zokRIy=^v+LXkoli!AZ4aDqMJUJKmjY2A~$#^Wz#7E0DdBikJ>nsYRE1 zq+r^ShXnf`ghw_Wq~?i1!M@oEl;qml8UQp-(WGmc-$LD(+Y+Ow@A{f$rd=MuiHO`w~}y zfPdv&#@LZ6Rw#TP-%Gk)A5e^@phM7v+0pVjI4BNN3)m8+Rp&@F4I{CGfSxAbc~U zp*nn({+^BBk<77?=ZDH|Yqp$or;&%E8*{zxM;%+x%qLUSN)%6~$oCI6 zYp#q~B?uEIov}c95bd(VGoQv4*QasCYx0vP&(!<`Y%JZ?MkLng&ebbKewJV|3M9aJ zu;k+yN)-0|jKHt*8~jh{*Wd3C7W(zq^y`EOk$xRCt{d=3q2K>BzoHEJ73usMbbhtW z?@oVyE0TGOpX7i0dkH5!6Nl~R(sM~A=$+U%`JLYl-&=FN_z=IpOnw*g`-9{+4<|^p zdf#$h?FjD}dV6$j*!&FQR)tmP(_hNVQfJUFB9rYqOW-{S*$4HX4zuju|FB)=oy_yF z5u>aMQ;t3BtriTYD;oo_^6K)Cp`nlpKxA179k7+XGFv?y)#wJPoN9=&2|6=Iz~f@snu z#B~5aXDQQ9{#UqMJnAH4Is1RZbIv&jfct~+NaeUX@3#y3jarbIejU^PPv}R`myD6>9;cb$J6h(p!t75 zzitp;=Lhg0K)*8w=Fh|85Zp0|W8%#n)feK(4tM&}?Npv0;$4mQz%g?{x#Dfrr?6kh z_A#&-YoEH7nxnBqy>{?Pm`ABZfnzN-`$X(3a4}3fn#0j6oX1#<1!SZ2S1t*iIk5S; zz0Lo#XMaroZQU~D-<$cL;mbdNuk#=Cqw=rJl>cMkfU^LFNU8tay{`W=KdSx^vNZbl zX8vdR^3UJv{Kx#L{3|o%{}_bASwJyN^8eYruK%1LRsXC^^=JNPq~xzA>XrO^W1KJt zR(8(8529qUUJA=Aam#^k5N5=B6Uw?gkJG8=q2Cx#8za_(0Kw_h>VBB4e&HFJN?nXT zk$>q2ySkz&?)YUdW&*`Y+l+)=&aOR*$c%28tZ(x6Fm9}-_~u%+x~bXR5ynF08aAbU-;GfYmWp64%Lvt7PZEI z)6XRmo)tPXXWQ(&teT0?v#0KJ^$g(lFgOZQy&a03P_%q1+OSNDc7%_cdfh_xIbi0w ze+Dya3jIpfsu(wVMdNQUkQa&8>mxk+Z1_Oxhs^do6Td$ux%YLR|9(Bro9t>Hek!ue ztbsQ}lzbX^X=V)^1h}LIdh4E)zWpsr>Dy(y){AfAV)>0b5&SyC_&7&G);Yi8$3XpY z3Vs9qfuudeu!z?+{(HD*7RzMDCg)+St_s;p+gK%0z`OfHdV)IG6To5X)dB&--GV3c z5WjY&32=8iIz~L6R^s~ZbY^$1hqG-$0em9}+)t9+UoB`BSbqV2u8~Tdi*#NFq@^0E!~yC1+fV{~P7k4C-^$L=|NC0r59?E$N%88q43z17TEdE+w8Gc|ztuupVmQC!~e{g2bHM8=v zjJn^8EUY7%U2QDxOTpU|dm_g$D?@$mOIB9}k?ufVVXK-E>q31$a~|5d9)?HGHt?kJ z5thf!>1$COCKO0x5H@kLmX888ublf}5uDcgS+*wU!-RftVsNZh%^K=1F`e}a)f$2&=y9;2NettzY5&eue z@11^9(f6RA+XN)3pWkgs*UtuKZ0DGF?_NLAc}dkzj4geR*E?wj^3hJmsfxU0mg-oC z{J5@1x?c!b;rtGE+Zl}qwe%ACjXM<#4B^2}OP1J>ZB7vbPvhfMKKkVEDQH?c1AB3vg`nM3uWVi8}W**o9ix1jx;OZHC-m(7GmZ$bEH(QebHt0|gkP zkxHD4v?T-58#Gdh1JY_iI-y`Z?>VhAk9WYDg5XFQCxa|<5S^y}m|?p(pZF??K# z2Nk#{OoVeFAH#IWU`GFn588X)u6PPR3i+|Yd}?`mby=Rz zKy8-i1N{C+9pPu#P+_bL^MOc-6TXMx3qMwfJ?e!h-@k8ox1r2)A=Ws}Dr@hH$(*ix+@~ZJU$9IB_PI z+gSnd1bX`T%!^0qvXJBKqMT!uo4n=9PxZztKh+zT%&0dWXQ$qHoSk~(ady1>yQ26@ z+$x8>D(s=qe*e~+!%?^j^V_gBA~%d;lzRJ8k6{EZTgGkHXM}n2-72y-+;^KdVE7L0 zV1@Y))!)Dzaj7@?`?|D{XBC9{c1Canz?AutIZi9rSUeQV1aL?J_HM2S_uWxZyu+B% z2F7B|D8>(do3|3MA#-v*sbrUzur&IJgjTh8Dw+Qi1hwxkA*h97D2IxdYlC2Vnb8q* zxk}v`vM0;x6i#*%wZiFD=`tzl2-IM!oI_~dPiQY~Dfa9n6D`p`r~M@n$cF=cZmJFU z-Ri;@Z>c^pVs63dy$c~GSk}n{ADkHp$e^WyED3@ThV0VjNcJ|O51F?&YYf4`+q>Ps zi|%I?1PRPpI^LC>8z~lvG-98F7wNZ?U|$)iCgaUN*kQKU&I@(Cz{(+T*FCAR{g=o7{P zks#s`p}t!{Bcu}WPd5qy`5q+DZOBGs0p{c9K`Naik<|sTVv6iAAUA#0B`!LufubUN zTZMUg0juqaRxYAQ?>wqbZsHFp3EQ;_jLc7tm>;3-J600*9?w zCpZSjU7{P3sY4wv{~E-WV}G4QM79#do!^k6FpI9atY)nd^>-Z4~+lu!JI$DZ=>5@UlKMu zI93-Eyi+;U{)Ptnao$=F6D(sSE9cpV(3+T!k42y|!OuVnapiq}*!(nN-&s}B_chqt zYid9q^BXzT&Rzdf1}lgIx$K+=h!E#9;tD|58spV9$Gq=VTh~%=!Tn%lQej?$%j*A) z%j)HBa4fP3b`|dj6n#?WZS6kExe4K6>uhdOtf_>RhGk+r_U-G<>_u&pa=9CuTN{bg z+=(*}^91Hg7sk7%elCC-4hOys2Pygc0(&G87Pg1A`&&&z?tu*3u>bL4RAZE4SPPDo z@$IO%kC|r2amD~#^~gJYvQo-64q$R`{iMv-);~ftR_e9&t-fsTlKAAc^&3BjgcJ)S zaY7v;d3z%Xk3W|Oc!l*M=Y7_z76Dm2#b6tht`gkDLibU^)cLgB&HxIZCGS}8u3}(F zI-rAu_VH#HP`P{#n`axbtq8#0TC|NLK`MV8mNU*A0bBBqh}r6d9m=u)%BiObVx671 z0lG)Qe;<17GaYUz<_nLEEU(lW5JU;G-r)70kF;*q-JmBMkY*O z1tkR2#7bEMm558`+nn*p*Ex-dZjchhovZclF$iSWe+0hydoVM=?@)Xv`=eAwHMtbs zh=Iz)d2-soN3v0?d|pU)l=+*FLuJ!D;xGXgGRs>~E2D05E=2<$84~9;4QLD>RT10ZZ~ejFL%>;nM#yE>PmYG@TuDr{x$ zq)ym(0LZ`TWq?qa;g_s(ZUHkx)+MU|h%u5MusZ?mJ4!;Lv(^dggPlirvQ=E zg`>E9e24_S4{Air&lC4!`LmvP(m-pR9iO>%jvWYnhj5@ee&HwtHxOQejpA;cu=5q& z={$t-)`$|opt{>xUB5hRq|{kMQQbAd%Da&#fKgq4Mr#FbL_XqB-3*AHRftesh3IS` zlA6$14Df1Fw}2J4YE~t9E|cz8a)Kd1-UU*z2?Zt8qO{mQNwtO)pk5O&)sboH;SK?b zAXd}=*-w>cE{&7NYh^7 zNR2qy^OI(hLWCMk%v_^O6(X-j%RlyNwAq=D4>7P033vXLxEVEa?)y9HX-R{TR`54~ zPRGGcj!KJvO5!UP;-pg8k=vzmg+YaxAV6Waav`%~m2(n`q7kQ6a##{6Y|8CYQ7jts zYvIuV3d8VPVW}(bl?GZm`<%Bw@~VvWAo!qv5^_9xVd9HU)b|OPXy;MdY4dGiw)40G zs`*$qv}x6xgHEW9%SeWLiLYqcEg3EopqqpYW(}I8TeF>?F=y4fr;$PK5w`@XM}{kd zWcYzih;>tck^&0p;(g=86dEkS2We0Y8nh&y<4(uNV04{neB95+y?orw#}qs~y2Mn% znmCHc9!nZs?vUIiE+bvkB8&?3Xp-X5FIa_sf<%c&3LA5oxym<Xd;x^Ac6q)aguwjdsTKaSIHa_0qqm_?ke7wrXOMLv5588WYHXk$jn8C**d_2U*eSF-5N7AL|+{v(88Rq$W z*`JNr^YhUBdK)PwZ{)xp&b{F{D=%Ws??5+1jV$GkDLd!m1srPcOUIrZdWN&mT7)ea zFWEUyvMD!*Vn`t4fRGvE;UaNyF^QkAmIF(cao7!fnz3At83@x%4VlFpgtI|Mv z^RP5Sl+2rt!NYoBDHK)9yEGGYQxc87!r?RD`XF?N@fL;O8K;# zjq`HNM(iyh3t2eOExIea<}wV@RE{0dFLBVRcT24|)?Ea&z@vb8s~(5Ns(kLyd6gPE z&jn<|;Y7XRVBt5%2C?%H%6YD4%w(ky%7}GiIqcyWJ&t1WEWF@%YBZ{)JvG5}zC!xBZQs0hVmpW_T=aDPq3hF6p3X zj}wyX3g{UrZIz3xJK_5%Bvi!Qt{}2~Ck>s59Kd6WL!rhHEaZU%vC7#lqxp-qXAlJn zUWN6CLYA!sFlz~eOg(wHz5p(gA0Q7?p;KY_$=dZn`x-(E0|lquh`k7O;8-D&WP?|t zhVxavT?vFNoUnA^c0y{>1j3V@hr4NsKyAdq?RoNKm%cr`KbhY`=C@~J{(dZ-Fuy-Z z)fMySA{)*8%#M-O0N5S#-;nqu^WS?HL-Jy=Lq_aSi4>bc&u%EL3|!ZK6Xj2c z=1Zs!=oem>6_KBQo(r>)I~mI}txcO|%QfT z*s6D3!f&G2>`Z3vmq`{_zba_gI0xd_xdt(mk6rKK<5$caj@~W!pf7hl1CQ6qj>fOk zOCaJ-p8n0%zghStnuScM_~vY6reEQK0CK^3Ohr=GoVQiD5G#ssCSIh%L|ASltQk*a zuf=WKomwYqkB5x=F?6?R2c1YKOv#XCsnA)+=fXla$p zy3jPuuO+7<${^qfgoUk#RGfVW2;;B!0LCLNr)(qExoL#sln~Q1UeWHNG}=X& z7Os*Y?RaBHsF<`{j7mkUbBerP_tZ=AV<6=iM0C!ucL= z%8WUQCvXk`C#qIXN={Vi+-axWoWoX=n5J%7iQAxVoE2K}4v;RvgO}Yc(7$u#7wbP6 z`A0tfi;su-cz_QZ4?3{R-)6$NGZEjMr5cEDU;jU3$LL}YV~$0(O<;1Lwg7!!l$d=Q z{#ZHXcW|C*eilH7Sb62}*2Y%jlxo;lfwKobvk$Me8;2hGc!XtfxGV_JjS-qY2iW7nXBiw!rU{Wd?Iv?@Aj5>oG$87Y_CvjKaG>72MF5j!5deo1 zlk*WPgNVrm-dmyfcCz=@-+MdVd-LwJn|ziRR>C(Rj87ioy@kBDh`eQe8=qW>H<-6? zlV>vH!m^uqh@oB9(i&|9@qPt!4XV;Th(KKFy$m-%%e)-OgITU7Hc-X_lRrhvN{kt; zJjkG>q^{M7HRG5*9&oVCn9)+V%!oDOJ7UiyS~Ma=i79)`#c*T0bJqJE%wd&xh%$2R zATMF7J^Rb+x7~dsA**ba5o;$->z0;C-;PkUv&5*2L6VUPr(|%<^~+#n)+;!p7^>6` z9&bJ7V7z@rq~7_~e|?jp_LXxFexYrz90X{9&gpY-mUD)RlM?>A4e^$cLY@13b-7uW zejf2rNY4^sIF*k&w6hL&G#|Q%uUytW+2aN}N+@IyQdbGni*#IQTG|dI`wlLj1t_%ccQ((ZW)hi+Rk) zhLc3U;5o;9g+Kq*GFS5(H3dT#WW+}Q67F}8Nx?NMQzS)9GDA<&W84^oS6DaXqBhL4 zq8O(OtPdMQ*LV=8Go{(d1qrDf?lb*1ePBn+Du7vyUp3d;HRT$X$owC7-y&~~&9#ZS?; ztIg-xHsRK{2A%fm=*tWNWk%#1v8#}tLmEkilca6hQs;a#8}WC}#_*QkU~RJ|Zpa)y z+v8ksW^UawTV^zP(YU8C=Sc+~Y$s2xvgU~Su8>jPr3G15a+EUqJs1hxT1Bk$YK_=; z@EJy|>l(s#7<-y95C3;`be2jFlIznpMa(8;_iYy5xpO(SPTnJy#DwkJq0xTR*IOKs z(GtpMw6qXjCaT07erxjJUU#2#{$12_JAkAzP&04k-4aCMy2S?a*Guis??I*p?S(>5 z01h@wIG?HjzXuSeJxdUmd2Xsq0B)^B^DOf#H!h=tjq{@Z+zwNS@)4JLZi+W)hOJxC z<5P%E0HP&TC>G~MkHCj+pyUL8TBMgdF<(P^=zCQVPcww82<}a9`d0xv0N8kdT>0cZ zQfkT|!f%}5I|-{<>~i5P0ZuWIs!%?*En=@&88o*5 zq&)mMl<$KfEXY2sgY^eVC4HQTb%o#UHX3n|$CirXjny|c20hV@-AGySMCW0m)2RCx zRP_&XaO;5padI*qj;_Qyd(4&1T#6aN;*B+PQ6BaQeYN;6H&5JIb0QS7Ey(e^rNFne z@w|z?o176qMlTO_ws=0r0!C~YG6imjVY@lmUw4bK$k{vD|6;U&>STY8F;a$CSZ~bo zqu54_>mCx+A3e^sqgnV!GCpY&daKI77|bDvW=mo<8S?B3J4g_<^|*`paZ$2v3(cX`f_+8*a!7{8Mw-{!WvUpQM?w*9?{UZ$y%0}u*<(*YCl0W zF&`R}rfLp5q1MEFSfP0=E!NB8Ct^O%t|fW`sivtCishLmiNRV%OCVujnGOp@Ex}WB z9J-Xa%B1hw&>+wVT?T`WxIrjgt#L1prZ?luHup(<&uQaPAQ~0762GwF%-IzDu_)S74pE(#SjdRWRK5{ds_}*4&zZe9Oyov&51`TdE!incDeI_ zPKPK(Q^9#ohp6Xth~mtAl7uU5;E5OZ`2E;)&=zKz0_?;sUC?G1 zFJQk1Y9_&?C6ffED>o$JiGM%f$K4QJ<6n@3XSo4O{7jM=|Kub*@$V1(2kZ`iBneOa zfF*t=$&CN-Bs}pS0Q?X^e`o4K(2&S@NH+dnB3=|EO~=Gca4Q4=Fzk5|`?3HT)8d$j*q>3ljx-g?)WNVZ=;? ztlN6QE-+sFbJ+8uJM-q^@scR464l}(Y()wReZKH~=7_OJFSI@c$NfE#;t!1}n>mWb zSO-P4M`_$%SKg3BDl()Jh0RrxZj!hZ=TohjS$+tY)WHk|7gV0di}lpWyCL&FLg*e` z$bPAX@Rpp)EF(SmXM$e$o=9GSxlY!cSw+mdnJS5+S@K-dRr*hbBu&CY3u{6xb?LhK zilm#oz7|c1i;OF#xy=Ty4K-HrW(l4UELswFMyzotjvPiF=?f>`yjm$khwJ!%<<0+A zf1zca-QnBDWxgwUZ_w0k=r@4M(@C;uYkq}zBjhTk2=+SBANXVg;wsF4SD111voL+` zPzoLANL(KtGCw5WwZpM)bu}E8pWwbN4ET%%r=>-$=2cwwo!O-g*bIVs11@w;beCyo z@mQ7=eS_r(xHNb+0GUg+vEY_$K!&LcaB#xesq-3Av`m3c0cdGMbwRdlPmjKa>F=*G z{e3YEIR-iKXf7;7XQcuNjPpCtyK4vMxZ;65r3!S5nDUeCGh%PnGNbN( zT-sG(t_+)RZ5Y!SvU83t<^EKx;*g!nqt#}wEe|vjJnq$4vlagg(PEMKUA zT#h}@-UklC6p4KWk(5|D8UXU=ANS4DvEw|ceWsw6ifA|TCsMrLs4EuPt))XJ=-Hnv z$=`LAAw~Q`Rs$0lF`VKF5q`Cx5nmx|UM+r-f{ZPeo*?5)GYE1bY$;f>*WJT|y+q8& zsw-gfP~Ui@c$+b$2mCt3tO^D>O_|@m7s!Z-ao}{g@A3+0z?5p4JCXJ>Y~3rdII^1K zZZyI2;|V*&|8C{m^29$`oJR!xy;2-(8dBca`v~qzy?u9!Z$$38@8MfST6iN=!;7=P z@nyb5;AGNfCdJG%Rgz7==wnj*`UwWzbiYzd49V78l5TNv_1#XxbQIe-F2t2I&X;p( zBUO4#a%R8dCby^5O7eT;CrOH+z?De#6*nT;9YLY$W02@|io90SIGXWdxdg6K;c|5&ZDX2DPe&H)&y8c>RGYq; zoTq0`X$t2b)j62uZF|nLj`Lx1M~w45EU@Qa z^aSCR66v}5jm7E7j;-6>HkSyXy(Ay-ZZ}Wa7~NHLJMczqJLW*OrQklVU`OQ?{)^)ZTf)?Ax-087#J{AbRxU$TESN%yaM1WSDh2MHFU**U>(&9v^- zabRJX0~FZsH~+%!RUF-`okm?AG6@>~a=|t@n8^m23*s?3-AA#9brooWs_$ys@Vxdf z>T!fNEB4)0)MIO^SQlCm^FD^SR>8izgz=t&V~)YyjWim?4O!D_DbkjVVRW+{!rkX7 zC>FL0XP~^ws7R~6IF}sS=Da_L+9hK&x33E>6~ArNtpr~7-_>?isEJytV&B{cJA0u6 zwEAvSURv~x))=K}Xw-QJA2g4e!&Y6iZxD&2HmfD^-L`l5wxPg5e4ir>I(H)U4Zi5* z`9{y*wd4E74x{I_P`>Yt*x5pUf3NXr;oPeQLO966FM>w#E&+H^wS)SGa)`t$vpqu; z?1(;(6u?4DN~|@E>#wiU;G~bUuIH3i=5f|wMD5i$ z>lgs;dUIQYm)vyfOPSNJ<17$d&qg5m@;U0vU{$y8KbM z%p*V^>5vB5GF+5VjWDcRd-?3hb%@ihgcxy#`*J>qZ;|ZQh<$5b#2%|pDpWTrtHE;U zl#Xr`Vs-bd=xb~&5+N-)6Dr2KnRmdr#H;A?aNZ}h)U}LA)F2>nV+tp_*tm@lnQCG< zmN`O5aAZk#8&^C6A@OQE5Ny^*a~e<&-_0#ys{+fNFzi1ii-k)X3(r;z1bo;!%yU3s z9L-tjhqF@bgnf6u-_&@ngLhC`%pr8*uo85D2o>(T&9zWCE8<}ESRnpR(NoY6bW?QS zHyaE6Qp`H15;3un7d9&}Z7tnspU-cS#82#nRY@aUXO<7(3WcC&X~QMbB(d?zjqlp3I3W4gtvR3({N_=}imI01(KIz%&cI04wpowO1%A>@LI-a5B`WD??8vY_?)N z4n(m^axk@4vp7<-bIJ$6f2_v!l%pi;vV$uwE<%C|`!1}M**9o-9!3<`)$|(*>ZA+| zxO)KYJuLv2%JR<_Zy+9yg*-&UA|g^X>mK)aunO^VbOjNIndsOx>@vhf92qvCC2<=< z^uCREtIyzcH>7jyEYIS(J#;--@P-HIgVhz(0tfo-;p{E2d||H}b{{Uo^!h6p_@OOm zLBFcBuFZ(eK@vTwZY=mKJ78h^BD4f;=#ohv%b(S(%m7_ko_;sIo>2>Q0v zxbcuM8UZ#2M$eJ$4EQx#g3F~ub=~B0Pw6G;3`6$Fy~(id9JC=z@=;!AJURuQy;bcv z2lM0LDQyp#Bikt$u3Ff`T9ThBt@aJ%6?~0jxi*wK%Z0nT6J-x;PD$gjZ(qg!*%Unv z|ILxj$rQEpNg&@$qYpXP#~r_@p%fMeShu3@d|Pd}Z-7Jgtr@v;ny3sE3G-+Ex`Mmv5 z=j_^A+EY&3VC_;xVf-2PXZ z$EmJQN;zHwq?E%_ce-bHCT1YoyQQ%j7F%`4fBi=wZekXVcyMBGHnX~dkGl}U%>pCv z!Qb=oONe?#mhv%ykCBpamvaHJT#FEqGfw9H(98szoq>o^!nXk-^2WUYn?NmHq&3bv z0=v!GPq1xvmd&Kvs(bS0W5QIA!~{|&-yH^krKV_9FAX-!;o+6f8@6os2v*iTl~_P zZIItJ&gcBK7PAB5tPvROe~`FsPF#O4)xR(4-y0!z#PPJi#hoW4e6#bg{=I;{^Ege?ZgZ{^Se*Z-)BHyN4%gx5;g@wOlW)xb>iE+ne4EoxzTb9!#&2E6!!?Yd ze|zB9Sq`~$Gv6wV#{GAi;a}?C8Gs>+rzuztwVc-x=foI*nl8o%OGEh~;Is0(&3RIP zKhAhviVHQea0;@CDqg63Qwr==cz5zs054`FGK$D=+!>;N6GgMtJnu#Lr9P(2=jNg3 ztQL=(J20QSmVF^JR?GX;vA|ERRnN(Zs?YQhdk=5a{`9!@@zSSG2 zan&2wj%RPl+7hon3U6EFjS8UNIE|~`IE|~`IE|~`s3Gc&)41x5)41x5)41x5Dx}^x zjjP@`jjP@`jjP_MW9p65xay75xay6b9rae_y{8K=Hw%i$pWz7T(_EI+;k1iqnja-;y))Np5ab;If0M%jw znoh);DO)d7b9od|xc;(;iTllzEkp1)QSnou#ww^1K!xK1jN+g?vBn_eFBD`*K{A1c z92>){(0 zhgCFlM>NY5$ws%+8z398(*S`eDY0E&3safWOY^#vM-a-c&x75MV1mV(rgCC8GT}L~ z)y%vR>pYfadV$2|6h0ur<^m>dRsB{V>W{BvKFK9>D<$b7G9qj)A(T$zym~0--2)Qa(yj#Ikh# zJ)3YH(8HAn*%63Tskn?e+d~^5I7ErenuAO(RUnPw_|S$d`~fC6m_uI$GaWGW_YkHk zh-VO_0P)iqFOqWP(E#+O-y+43lsgbaNjYu0NQx>;3Zb?hizL{+i~Ml_LW55TzEp9{ z7jcb28F>%UT#Svl>7}i)EEvvCPc)XRw4_|vo>mE#VQDYWb5#LkL$4}uWuqC;5lhGo zK_WzjSK8o})1A>@;G7D8ShIAX1XTy*I#JvIe@-j5t_29`FZQ>D0246y>ohf(!|| zfmTy&u)&B?)=a^WD$#L@nLV@_60MPp3rgaHC^)+&u zakGPR{A*@x4xI-Q_{o-(<}!lV(;6XNuw9U3WRO{sk7pUIs1Om$@Ig{YGSel+9$Rat z&Fey%H_3XzMWi#NnNX?Hyac4t<4=sUQ37MZJybggavyluA*OW584DILutcWHMWnUQ zqXhJ;!|ZrdEBq%S#it@fH=rJ>k$Zz`9u_0{QO!U|(u>{#=Sh|8MIWn#n2kx$S9Ola z)`EGU6IlZ}Zi5S{d;lCrI5#%dpoIkXxt8)vqT>J!hT1PQAOzd@8d=%a+HQF24aZoz zBb$-|N(Oi~Y2`7LkL{3gCM7a2>aUFIH$YU9x5G z5P5g~TFJTbZj$r`VXpZeIs^jP_gkHkM+nm`95O$0j%@@*%w?p1_J_WX*hf&2{%-ET~TCygTz0BP(+w>i!vzz>BeD2-<^ znO!f=^>{sj35C*?uzsml{YXN_1(G0`o5#^pu(%!2Qv6K{aR&C+Tto%O)4%Q$X;kJh zMH*-IVi72fiV!JAw+T@&y37~REJ-MPtxsNO!PX&)Wz~m4Yp#35pDNZ+bx!PQd0=eV zN|HQ7KjQaDP+<*iM8>2lOfM7pO>ch*N$*I^g64&-yBgTT1W2mgT@3KJrm+VIbr?>uF*5%07rS~p`1IbzzvmZ`+!Tf4az^?#E1yv`M?ZL@{~ zzM>$$Fd^dtNknpp?h9C@lpLC2$uf;5_dQKH=@5AOm)t9Aql%WIHm`$V(osQ z_Ke#oV!)1am~W}8GSj|kyFmak7L?^<^xpw;G{n}CK^Xv#5I_}e-+3&QtF)n>NdPQ8 zq~tPwePH)SE&#Qcd;=I`!EmJLdakb(plAx(L2h_$n4)|@3ki%j)i8|Gasy3JY+K(Q zErKXbT8oe-7}v8T{brC!9&Ikh(n}crQ3!+hF(W0%{`G~w0u;(orJx8bLTbkP2j$p? zaR4Gh62vq7ZZ|{>e+^c2(eVG<6bV+9N|E6AAl}OGvp_zx{u-iSf0-}jlO+in{=WbM zQ7l0hhW{Nx*!7JfwwN58s&dIJw(|&VO%rNkno9DW#-tSDr1Z|~LV9nP z-P8DuToP&5&yzf+F{_Y$E~bf_T+@e~8JVnd8(}2dq|&v@q)29y9iu2->FZ6?XkNV1q(c5~i% zrzCKzzdnxw4!|YjG@#yUtJ{4~{T?77RxMbMvw~2>anSt%?E5u}{H`~i`%GwT#>NW; zyi)h?3I%gwRfGoZI3-JMqGo5^jOPT z`h95>tR}&iy#g9c3iyiA!IhZi3}W{BO9XHSm*#R<;OO%1MXyhg4Hs_$o-P{TjkfF) zDqc~874QC<{Z&xbvr6b3ST?y3k9~osBm;-F++rKGM0MyWGuDDPE`i$Ajpddue#Cm# z<&qpL_jPU`&D=((8f zaO~Z?sGCOpLRwcb%0B9s5xZbMiEh-rk5yT5aTxJC>SW`@B>+Y2 z2|cpI+1pa+iEF~lkEn+rq9&(H%Tus+3Z#>IxkYEyE>p#Sz6! zUT4IHqt}Z4IjpK>vo4T?dHCTOT-S{`5D3Po#CGHec!tk2H}ImZgS(&Ram+x4FQ7Sk&G_OzS_9!^=0exu)}D=epsSt#IleG_I01D zO?KkDVO@94^k|IYovn-!cJ8yd^C{=q!a$7KV#RFdu8AV({5+Cya zSbG!rs*3Y{JSfpn(Hoam+@qq6YeAt6Rpdq%Z#2=kqjiZJVqJ=g8aE8yT$1DQlqe#x zilkzdYLzGgkq87Af_vO5F4bsNPDEwVN^DjB&-1=>)_W8A`up?wko%sQcix$K=b3lj zScq78ml1Ua2M%!=UgZi?CA1Fb1#jsBkwQsPxPX6=)Zi{k1MYU@0hRE(F#5g%G|C`sO~jr4RcN-o zCw#?_pPkaH(*1t0jYfN}Gg6o1L}EhaUG z)I3u6lKMNTJ4nqUbqlHKq?(8@tKDsc;dM`>NrG>F41ev#)qt3LHY4`0)`&ww5w!R2 zwuo?dMJTruh9G1~5en#jj$qojy~wj0{|sOp<`1yO?e2T9PaieMrQPeX793G526YvBRiVgs^KN1=PKPzdbZL@(leCi*y`F!a}0HF0`2}Ds(>&K zfbL})$+6BoSLr6wrzl-T`e@Ls@sS!P>Ar;~d9dy)>9dO^{l`j6ntzgR-<*dGm+0B8 z4>1EMqPV*urfU*+FHD6#*UCu)+^aH*$#}cclHU~2%x{v0Nq$#^;vawvwgKB8a_NM+ zw=2yFggZug8z}epN(;|rl5V3rPI;y7J8Qh8+Xgh{jzPS;Coyye|Ln*=C*Tj`_Sdvh z{@uy<9TK@i$g+mi3)k{KYYd-mGs4Wt%o>ygYoh$=>RuTrV8e|0#Y^2Fdpw6ZTpt>^T#*Si_`DkCDbX*1v05HRa7zTIzBeXuQ9S{yI?$ zR-=*Q$@qk}l4LkXTFLZpd$CmP4hdW1mLm<3Q%inicDTwAo(U3$^Yr+2&*Pu780!o_ z>cy?|+`X=rDoL@n5GkwNAFlz^clg7*bl2h61KU0ng`?*z+B+I90D4*JDuyppT6q4W z`Vg@|;I;UDV9EyW+`K#KNDCNAWk zt?&n-1DM%HvMo}*7BJvZ(9HcaL=bu(ghbq~eD&m;V)!O$SS7=*2*p33=|r7>5n1T} z^1H@8I>dK`k@MS-oc&1y8RwdGr<>pH5bjps0i~BhuzrdkH2x5ckVY<6S{im&rMp?e zj;pkGEAR^;NrpWYqMMZ#-cPhmMBYl$)-zqV+{m-G&vboleyeWM=gZCSHZ}S@%KW}* ze$O`XFHh3=i3s=Jj|iRDUHgT1UKfXTW!EgzfFPKq39D(5^2Zjz8MPL>2=p#{-fa>c zLgjOY|7I@E>^ei|(7Cm6@w(GeBa2J;d{|>AmS4?zUHM!GZ-BPCOWWnU9K7Y3E+Zbf z2gc*NAPYe9L@vnw0a+N3I|Sr_faIZFldLEp2L@zuKpq&7g936$K$Zj~5APbe$$%Uc zkg0$y56H@ZJSiZ@1|$#h8o5;gd452S56DXbaso)6--UK224rML@0%$ZrC&J0QOg$h^sZTl5Xcf`IHFkc9!cLqHA)$Xx@nC?E$0 zWN|h1ffkq=>0+HoFMccL1=Rj`a}@g z5`;b#gti5t&jz6@g3yIvif0lJlHH@*7e`|ja;IKQ$78w{Ewt~nGsWNvW{S780^uVQTWYlAZb=2==icU$ z!;*5Kij{m%4^=n&M*g(`6iyi!)CoGj6VBG%l~ij{#0gobwdr9H5*`U^w&QoVTNA zK^QrGi2Z3i!u*45a;)bsqjml_1)7rd1IZw#g^=LE_yc!F-n`vK>*py+E3QSi`OKgpHhq$Hhg{Y6NH5 zhuZ#)t_5FSr7V^9v0BOOCFR8=x>{eODvq9$_l_6ihM5k&P(7;Td7hIG?zF+h9HqQE zZ_%Z4ysfmPL+{GLd)9o-dNzXN$J>m|Q-4zCmAPe}fnc0&<9Bk*>9!QE>G&~`89~S1 zo-U2(%#$I*nqMG@2{rZme`1joh3M^s7*3DHarHF0oOH1wkX#0VO>xe50WEM2Qcfl? zoC7#5fzJG4?J07$3#YPdz?Q&6wQDzOFO1vnyU1>OJ3j^GR2OlrSKm00w-|2EtHJkH zUciln_^{0tU{5TlE=txf7PL6ksovOAU@wQ3fCX$tR;5htYAjHp{J)oNixUB%;_t%p zq=z>k9M^oE{s&fjUK@eu44+75j6Ke@A}oNKs}eY*g>0unF0g?w-N^cFJxr3?OlLsp zE9^sXlsnUsh$?sP&J)QtbHqFUW!T$b21Y9M%6RIv)NFJ3hP?@5j7+dm8rkqA8Yw`l z2z2ZTu$ZO*yp#s}6p7k{F38IpRsXjRv^kXY2=$7at{d^wbrVF4Yw(Nu;KitnzPjQc zO1Yb1Ha2~w%$rcqKOsS`f>vGv_rdga9EQf3hT3W8`>ct~&Pa!s4p@;=J;s8wCMoR0 zyijJnBzFX+KiLwu>pnR_X)mUcOw6tB4|vwF&CkR4 zW-s6xLxv5J1=PHwVnE$efx2pp0QI&rS%P^@l;jM55rGQSystMEsQc3$wYP4Nq&Cx; z+W4)%H8=p&B18eGoy^LznPX0%GM)&`0_uGcpi&yya2=TfRRlVAXAYnaJQoEp$jmT} z`QDdb!a*0YUF6j&AD;smCitXetrHDYkLByez#8B)DBl+d_Z!&Y6K_cdnA+EOc%%H?CS)VdEW>w+eEU>9CPB5@dR8JF3*RUpxr5rY>CXN(lJ^^pkr5J zAI%iNH=sk#76VF)88VetC#xO7>1%2~gX-YMlc@bX8>$9Qt56ih zDayI&INj~SFir{3hpVuUj;jbT6+yGXgi`m!Rxr@FmBw)TKI#RWK8SEM@{^~t1oNtx z+!=0@S#a7W@-_vhUtpX8PAes;&2({`z6uW5&rU?aem03@yNXY_271O5fLS=b5@Le( zr!=x9GUrJFT16l};FSZX_hXRg)qdW29_kRo=Pe8qd{T0N&mPNn782wt-w6nh;d6Tw z$gn0d{SXg)GM6lT_F_NZ#h?(wXAkysUl|D`1FszRbVW+Yb1op4r9k4?zUT}*Z}Qmw zJD_Tm{RjtlyShKNAY%Z&FhH;G9SephGYL- z%RV2_rz5eTZNH9D*2n3#8XSh~NEAOkt~4>}EqCf6YjGA+_z{b+%kUAi4Q@4pOViKx z88z&0%>1g^=nUNaz`I=N591NU{t$a@3q0Hz3U!#E;h0!~wS)+8>}GPdO9F0&|TVBqq}@=7DbyA*FZEIda8Gk$ZRHUbiv zGL^lSob72Q2n1I~(nh{@!${}UoU01 zQbUQ%P6$gK*2>x>YF85zv12)FuS6J5ld>EZj*N?<3iur(|MPDAh#@jC2Yo!-WM??A z0&m8?&WlATRU=jGC4$vShDjIDu>!!a&0OFEP- zVNVyO+pW@c;e*;mj6STD+D_E&&N);}S+7Bxw9MHlf5bv_L>wEh^So_U=F>O_RiU{^KEMjq(LPu=BxxhrX-`wA)3Jo>RTC@Ym z$g=M%RVbg6u19Hb;R;2eEZt`I9pU^3Ic-@FuAfGJ3}rnm3_Bupo~1&$GR$GmnvZW* z_q3KgPNgT&8uinduxIH{pUooAyG5mQ5m}`-_bmNxCb8!ii*zRJS^5ns-B#&bc2?<) zJxf1Xq-TmmIuk}fJyHa=HZa`QWofQNEN&1bhiu%wV8rUwwv2`C4pAnCm(zVY-PWY!~J{`~wa;2y()#8x~o<-uonkMVGI)Lzm(5b;n7vwx~r5>7-Tar%QO26fI=M@zDv5 zmfSIMMrQG~#oNJQ))rZothL4MRDviLLmRIsw5L&D)MIG=;7|O%gmZzbSVYyf(?$(h z^CTUH7d0zbR7|%~5=0n7<0oGv$y>E4TFGV7xCgx=>({FDkP7Z#3auOKO zmVMRaO3@;iSt}OV>16I!n&pXO=3Q_Y%&;t3m^l@TsNRA%9B;vl$tU-E^dXX9o~YV( zniVsWj+pTmqVv8*k%DD8A2XMWhuNx4(Mm2a4OgI-LsjN{>Caq$J{}6iI~Vcw=iejv zXRklMdP+{2EeMX^(h^>OR`i4#^E@oy!6;U$QIysSe+`y9Gfi=9?uax9pM!7!yckUw zF^YW|jkCpgr2qggVl=Kmj|;hrh5`FAeDMJ5qxdnft&ekFubhR-$pnV8BM$H?IxzUY zm|^j6vA_Xa{=?1fxOHgzlNH{Jj}v%n!9p5&pS{F(q!{p9m}}tJ;^lFTmb|eWgXlQC z=Yhk(nM&}B)}9)6m2`Ab}KFEBJfW30PkCMhN%tFN-hI$ zUf)x@1j_W@6Z#qVwj7T!Cwke+2#!J+fzi>tSB`KE?Iv6Sv;M@ms7{3Z^t~-`9~Cp9 zMqCBIkvokFYiXr8tZU*pyIWjQnREfxoIgZhUC(GIJ%BBKz--R50PB*tkWUCfY~2tx z*!shd0c&-fbGdRBD<=~e&Xzc=`zox-Nfsa0I6)r3ddvw5>(9ys)>^QTMt)5pwj;%` zq+k(P@1W6=H^7Cg;;{blTp!jfNfxaCh3E z-AoafFw%8TP=PfJ4^I(_M8}~{G8Bw38xz!QkUz`aTY(Bhgm0l)w;?>&6ya+fz3M5; zpCYgUBSL;qt_GH{Jxyj~3~J(>A1Y^~ax#J8oFkP9O%axn6Y~wjlG|+TbgY_l(H{K>(L#(CzkrS|$jp@sJ8$l$8*vTrfds zVhLq}P!GMkc_+vOp=OgN2vej8Jx&m=WVDlBNoD#I1ooQT(C0wV$^-3&=B}I#1 z=3$MN+%2g<95d6wVKBq8WMO7cx)|m}3?>MI8Iw4f}J_N3dRsV1II9>&X@xLw#HmfsGlWwZ?WEUImOn!%iVOJxue^e*8(aV- znGJ4R;+!8UXCB{&#{4$mtcfFh896cC04Ig{HZd|o*r`nMefLPgw-zjRKJhCo!>CZF8+ZbyRb8G@iWJONJj zfbS0!-`)%%hVQ2Wd`F!6O}f|d>QvmRKOZ4KYkp9YC2C)U0LvqA+NucaxVYD`x)aYg z;hPxM!M%=*_0 z@}z)T3l`GIH_~MtDF)`X%ry+^k2G5H#sM`%$3gufI1H#+k}Obvi=_!Pg{T$t0|B+I z`Q)A-ye0{ziK6ZKR6FMf0%8B|Bq+Q_LbB%v_e%14YzQ+y0Aud?!MR63Co(@^_2ct{ zb0`QUMx~Zi^#N_!tUtE|!~tkSd$>zsE8>UbhDgHv17E=O0lN$^29yB^b(%O>&)d4#K5y6WPUkWfF3ygj?ouanhjy+{S>nm^oV*%g-~dg54lE`75I zcA7;n)AZ>4+bZ{uDpyzgdzO2V$erIPa+#**t$~p$_+k|-3;o#b>8Zebir`Fx2xgjy zdsLvZCNSKV#eQRR{L^6T;Y46uYgQ9L#w-)i8SlN%h~uG0z3kas507Me2%KpkrJAx1 zm@TW^I6jF2Mz{tjR}G8SF3hH5d$91AivL?7^6Hh72@L1>xXky-iHU<@$!!8oK2lA< zGlz=_(1L|bz-$V!9Vvz-1&f$~291`yaasvk#ZAEH<-Q4ENwQ49KM{`%M6K}k#RN>F zwx}QG75DWYlLYfb(YDheCSabV^O?ohkC_0^*PkTGTeTrt+rZ`d`b7)G*H?{)R4|q) zv~uwES5VDPx`8E>u3mrucZV_J>$m0d^&8`e*-!iqxV{>}!k0%OKPSC{$~<7U zt81l=@z#9ng_Lz1TuHuQfVUTzXLz_sA_^@nIa_I#C61N*&_T^`!E$6_<oH01E{1sP!)D*gN z+@NFW-eF$KCdc%z^#1S|ITtKu-+~BMTeu&jLqIXtVYn6-4#nw(_u z?O(*Nzi2qKSmi$RJHfLSDum~y>}G;z7AS(}X3EDk?abFovm|jm-w6(bXOf_P0K1GEEm#)&aBYCDTkZ3K-!!NVx{FXzjvmy0LTw z_^`g`z7nG_a8{z6Okg!)w7p9sVi`@i+Hai%5s> zqu!Y0ZKYY7c!yt&B`-62vCLT={%yn~15qnH_ikihPt$yId+v`*f_b87+o9TdjECu> zp1Xtu9p3ZYZ<6G#+7PX6;PO28aSw^-t{M-i;54St%Aq&ZQ_VP6PECmCo`(SU^xuo; zp0_p!Vv=#haHIoDixn}Qam4(S(U|K|nFq{XsfekJx9(Hq!Xyt|Y~6C_%9^;7^EVMR zvOd*5vW7euKiK6WAQ=eH1@ZNa7jM0IFao09`rn5rMm{}QFrq~XDQI|-@$}YNng~7) zMig3Cvb9Fm#KEy6q7@t?>$8xtg{Bqt!B0m^q1S2-x&8EyMxY#UeW)?yN%g`{XS#@A zlg9oE&hV*XZ!18hUWCwe}mwk@2~%e_Q|oxzY4+d zd2)YODemg*gMJRnR|o*7*idmlrzH;J?+F~3S_N@;9K>CWc1AV;!Lp2v0}XNLy-ugu zxv~-PYoJ!+@Li%Jnh@WpS{K6}f$x3fX&>3dc;CQv0)_ae9?j0cGd-H(0|mTViV%2R zS-k{!N8(c?{xS1J=6&(w91bOjYm6iJ7w~ipa+|V{`<*xy`d&3Wa^lUV_}s948{B}c zsL*yqp*SZYAoYBAamRar?fsHjt7^G58H4SUP)q~Q9E1HUnUD2r$@BF-BpYCgo`XPqX34>MHf*X&=d%Q2eX5 zH+zx@lFLCb7`I99Jf=D2RF}A8E(W_Zy9jp)V)z^ckmzTNOpV4_@(TX)oAsMnvP9j- z_xyIJT*oeE;`TIUXlu%M#(l0i!^kylkaCe(x&F!9{A;|*-XdJBRmw$X<@#~CWZI37 zPk?=#B_9a@JYCdt`4oNcCBFl_WmX%n9Vqpkz9c0oqsb*@*N`)jIY^@P#sn)Zc}`xw zQiQCH3MmLU$E6aNwLwYaQ8r|jvKL5-P@d%Yc3k3;<%!J75nctzNTbe*};2h(rkV}#@$Ge!k6eRTiC;OtY;M7%MvkZ;tsh|j3*(Hd5A2y>RmLS7S8aX zH%+2tzU_gJ2iK$)X^qRH1}*I<=BRd9!bRl5E}zk}s1(X(VT`hkg1wiXRLi~sACmb- zE3@RC$$PV^;W~abh!qfdyfFFPz3`$Z-o?>;?DKj2`@@e1So2AKpcKhzAOLFcr6B%Eb56m>esN_<>MVI%+B=3fHu?%0*`7 zikgal(ec9(1TWfYEy9~1qX)ldOogdJNoG1|I~3|You1TyftB8&r( zlgJ!`Ks62`BGOxTL7b=k@vOWps0$f~?>u?XetROb9tu(8Anf7VeaI->_aR~ga9PIT zSwWAFzN-YOk6moex%kfjnSGzKT{9x?{L6##Yu3lP@GyL4ym>*sji0muTx?IQll z7Rvr^H%=e27xZRQh?9TcLkVvuzL2I53&3>9l6WM&a&_>E@Q#O!X-?6PM4 zXNi#3YUW|Sfsi$&-{k<->&kk{ITCWu z{5Vhq;qwKPa(}+ysbVr*J%EDv9fb2C$b7zFgL^6-xPp;6g357PQ~Ze)wT6ouc3Pr> z&llvyo9ZlKYvmdb?kd`S4>Y~gIyQ&At;pD(W)!V^T*d2Tuo5{KXeLlVdzuQDp}?Le zZ{Fo|ESY&PmE!v*9Js@vxl^)`1;ujv;X%wI9Gylq zRwauO&MyI6*wulo8vxkX9!#J;`DFr}A1Sl?W_?v)zEdhvjd(B7sjAoQHB^vL5<$fxaJwuhPWZ=u3gG*35jr zU{IWAH{b>Kp+u$f`vr%fl<1t||Ah7QF(zPG`;mAnRMX+0COVDJRit}8dnR4TgnO>opHpbcdsi+lX zGor@Ec@9-kD~+gMLljIy6CUHZErv9h21d~|Y}NG~P?OjrFNa+?9xd2|T{wbKz1fBD z#c%|6VfujFcA;1W^=ubfL{MNC$|1v^i9}>IXUDF=D>w_Xc5J(9@p=VI<8R;>_QuV+SAlFnE^2=hp`J)Iqkw9cZ}MF z?s!q3I+T*+h!E^T4YLS?^zVpoC?M2b4=upz&^~O3QsP|_ zc4|41=MILVp~nGnV=z$%;sPqxbhzrc_;+FTAbtRO(03R7QbZNT*@jY-lYjPozQLFv zE7-EO8fmrT#6!@fK9gxpT-xQDX`acnR5Rsbctlj8iju`{BdRGb>V9-9l;34(Q2uXs z5QEW-$2g!lSioQ~iUwo5u9pa>gFQUuup4{q)}!6%z)#-V$X0nUbV4KAT=ch5w@ za`_VjcNalD+l@~JcE^f8LC0hV2=eX5mUsx;jX6|9%x>Hv%1%pEU^nXHio8PDTALsN zcHd;vQN=L4jFzgTp8W49bDs=-?0TQ!&hFDJh4s8xwNcjYGDN+Krkx zE*_E*Ia7sj*o_8e5w;sM5DmLgkMO8JF?rTc*o)0X*S*?{PxfGY8GBK`T`%^ccvo^} z`x65YuJ&Tt(%$U_D;TNS-f&XAni+dBC*F>;L^<;sQF~)AKHgUPbV*#ABc4a=0P4?l zua>y9GgVqKdCf(VQX!M1lWZIKs@ZiiX8IJAtTObA}W_%I4Y0>yUMc6L9gJ|@gMug|~C(gP1C+x$CqVrzu!)XK9ZpJ>0 z-=-J)@Yfy5nQb3#L%7<9zqa;nALK-ExP}|xD|j_D_Q8#}=UfapG{ew>u@6sgE%sqh zT%E(k^N?GyJo``?=ebm+$?aC?U)QLp@o}~v&7i=Y7t>YO#tU|^isEHe?5YQ?>dnsl;a6k` z<}Lr)Dz}~Kr-FL6Gi!t)n77;kLB9V{hljA8S;?k|*_n~j3~7l9>`W@IzCE?IIv@dd z=2-L`*cms6yuM__Ynq74WoJIu0fus5XY$w@SO7Q7Xlf~ku`>-h?abr9h}xMI@uI#g z1CO~D>ESl!6z+wKWF(RMkfQ!apBX=4U+&t0jn=Duxn5X|eJMamdtA>rc6$oTwl7B@ zT|8aY#`^bnG?_j6PdBl zQJJ%p_lfZy0rqTHs50`O>A42#1|^SYrciS zk6Q2p7)FAA00Rr!_#_bc7|JyK>;-=Zncs5!@-vQ5ZRj(O=DV5tmBNkp^0U(XZYs7` zJNTIbXXtic;Y*ea%r#axHet=@a*R2T!w*Q{ID@|{`)iB~<@RnLd|8+`_;svtw^0UG zpvyLVZ9Z>~z$)H>Ypo!fmg{@puynJLB!sV&HN6AbBr+e1YnETjXy|8 z@*_--t<}VWQwx^sr{qhfb4X6qzSf_0eMAj#1arIgsFMHcb?nC>87|*MI&7Yp<}%5;(H!O;&+gPPqv-vSXaV;wFcHip%1Kw^zmcZL( zNF#V_qKr7+ig0ad7Tyj;k|^HTB#F#WNW{0ic3^Z4yzM2?inqe8s4+Ru}9=4cv}ZoN^^|iZQ&L@C6QM)7ZiXbgdAeJtapJ_!>Z$%oeg;i7LS+;jz+!T8u*7Sw8To&-ZA*~~Z8kE8X-jQb7cyZ=mnQjaq{7cr}sjo(pFw zjG#Omg5xdeP7FjAN3KyN%P=H2QE)&bwF2b({>&XPKm<^~!LL~s!OEEq)B>mYg+ zQY99Y=D=BW2vs%DV2xr9$`&TyFwT0+?gLR(spn~wmH=kI-;6?EjmfPoB+NeO%S)<> zGxgu@o8kFy`ux?y(dX?RjXv+aDEj=r=e%c3Dw=4?U>T@*gL}*|7z$PADd=#0eDt8^ zmza=&i`U1K%RPKE&TxijPTo4tEktP{w~g*!mRyGRBDW2ifn0t*LF9hYN8}zvxh{kbJ<8b}`f$6PX<} zkEG#ri!_gmn8!EJ6YrPdS5FA0a(r<*2g+ZC&&lIW*r*M=-HRnSO-ILgdR+sX(DCT0Rz?xJm2J#bf7b~gaI3#-FKD~$Hw4OB1 zsAn#L+vFD5CLULs69`Y>_jgJ86z^TH1o)j+L$e7jDQ9q*{dVwnFm=-_C-l~H+$$GU z8OP7uPfy&ivFq-?K>)s$)q|WRbYS9g$ZU#6IWsBe$@^JE51-SLJ;*y(<>^&88 zXPMXeX7^gUEjYy+?Hw_zQTu{Sqc^y({LtmiKRuvD#a*G|h^SC!i;H{1h`R~m%=|XK zIJKt=%7Z#Wf^ziQ+M~I)q9}C_4$7x4o?p-B>0-d{^=idx|6&UE+4o*O-CZEk-51a9 zwxo6?wI!+TKq22M;K2RWk3tud{sm|^Ujvd1_)ZI@?dQHo0ss6T{wyQ)5~+oxo+kAO zsTNYRNj*yHE>icCno8;~q%x!$NL^2A3aQIU-9YL>Qdf{Vlhk-pCz3jgR2iw0NR1$M zG^iy-P_nzW8&n~w&qy(hdxl*bO)wwc_a5A*J#Gd+=LbLR1-pL@3%Q>Rl<3~#F)*Fu zrQ<^l9^8ki+XwIm6H@nH{QBiM#}{!FM7TE#&j$B8^Lqt;-AnO@AHTjpeqVKmqlSDw znUR$Yv}Fe2_C>83akxn^#3bl%5)5R5JwgfI_^&3|$}s-h2>2g$_$^Zz$qyOEkBy{v z<@Z(hag*Q$CU{O0(6qYOngowA;vf9;ApY>f6tnp77kOCa-p&WdJV^W3$OFD#!I&%Y z$5+*F0YBCDt{XtHO{w5L7%UB(k6<@xgq1M;q2}QLJ`nHJ&h8Sp!Tl8jx8)zzPrKWf z4;xt}d?lW>nD?E~T-tUY1kLh)h5GW>O=i^%@~b#{&lUcrFrDy)sP1)ADDM-y~2@pPrD7+O;z3b`8 zoD8jTH?XD9B6G=RFD_)xvz31LK^jcQR^`QJ(xQ~x_E z&mhuUDJ|OFs2WW%{A;Dhl3u0sc+&4FT}}E`rE5q(LmK_*dd)}lG1tV4{*;#T&Ir-A zNjDPdsQbg19&!$q-?!b1CH`&qJWV5YG59HBzS6(dd3Q%BPb0&>{zi4X8o$tu)Z;^? zi&&mFloomIN=rRjm2RSpxunrPt3qvjH^R!Tx7}$b?nT78%gF%7NeFbW<)4X+wihRv z)f`P;O!^+BD=EGbJZ%1y@>VncaHX3WKS*h5rh%Z{J@AJ*+EF7!$H%C^DkdHQ+Wox- z)KkL2q@m2uzSi>XPKKRK;A;qU`{RKkpXI|wqx*$?SdRzy5hlb1!xZ*cLo=th$Z71y z^Gc6p_>Ia_Px^ADJDBzYrG@9T5Kl!Ye6q@EV%n>eE+W03@<>bVrnI!wwn|G&jaL3S z%xjp^Eu;@pTFS7e(wz+drP40xeo9v|uOC#iYZ<;y=>eoaRXUG6?(4`97hv z{^Q}_eYUXR|CuXW%;@7w}nr1xn9iqG|Nsrg?PNq9U=?>DBN_Uf|uktrCyd=bP zpwc3zbFJt__}^4o@>-#^@H`dbX;!+L^6v-DI^3gSEzGAwbuJ)%R_PM*RvX?4%3H*+ z^G(6&Cf#GeHS!+`PcF5IwIgz_c=0PY`amO z-*)HA^V{yFCWF6`xk?^3xPl)rSDMIrlQ@ZI>bP2-Uv&pD-jwEihS`gyG-H*Pwk`+F z()1Nsue#rVr7{eDq;t6l^W^NRY1^o(4h@$!Gx(9V=vKWqQNLd(J%_ZxkF>!ZnpVPz zA2%%-uW_%}2m$$AP29}PW|Qe&4aj4_?VxFSZ_%(0h8>}aC5J0CT#zljpbHQh9#W{q9Ru!C*d{A!r|^Mc7WDsj_V~tcF#~S9(0@?^Nxg>PH|8 zGO8Hgq@Pz>u28y(l`mC#0>g(Y-AH;r zr8~*9o6>_Ae!R+*GLKezEW?K>EpiT0x`g3-DqTf6Z+#*4Dq?s)4VSY0pba4PTBmde zc|KLTiu5~5i@A6iw7Y@}#ckXO+^z^InMORi#_V|BTY2sSR2X;a{z^ zX!%2>tC`m{@Uyc%tocjF`A})ebDGl9x!MiQbbru%M8Erq*G!=(`CDqYP)rzkBvM~8ThP+E|2h|)>Q z+*fI-O-(oBBx^c1JiB_|Gn>Kt1DsfiU+u1Q4=|HKLOd z>$H}Ne^$Fw16HsGZ!*B(r-%V*O<6*Ep)yLb9#&e4GK(~_KUH#j+dYnH@LXrYZ&Kby z=6l(r1~ZY=|l?-@oV6Zz~6|KWM$Hr~Wl$?w~435#>038a2F&hIS= z+ug&=^Y(l;OIW`)yc=0EWU?cljracID%IBJ8rH)E0IFM{4IE8VeL#f{v6>f0fl<+m~ODcN;YucuC3+X46UO{?}(ru*gQF<&3S!v2Z zx>m!bJk?5z%uAG(vQ#N8T0JSmU!Za%{BVR5{NH@4zh9W&Pt5Q8_(i>ISe6dZ?p7)& z#ej`afV)UI*SL=>r`VIvsOI*~xA|@7=p|f0Nnsn_o<4yEU;Po*lYjY+E}Sf(fG5dp zFBWw>D4M%M%QuMZH!xtK28fe;F#{gZ05K%jQO3|vF)vbiRV?9|N;gx~w%{qVp6z;M z?dKc2zQ-Rv+__$VA6}=wUz+FNeyz_V%x}v49&dh6$1k?wyZ&UJC!60v-)Oqg_)S^Q zbRBD+uQ9(jn%|Gi@8{-sx=DW-p1aOw2~RgA-0nYks4x$E;elckCfPBj*y9k+>b&SB z`c|K}Hq|-L#8;bN$Nb)Deh&tJ1NZ$-PFbIpo?3cJ%E?Ul9Cy&2TzXQO_0ox{$zK$6 z=NOmiBNRXI|Ildp`BQQJ*;-tHz)?JTFn>I#(HR_Rxa~@?<83gqJG;&N_154Ui*LfV z%)$9a-VN#O*1*?8t+9pWPD>Sz_wsx~sy%PsE%QswW;zb90BsxFVBV^<5U0esb-&g!lzn%>M5UU^QmWjYK2d|ndT>an7CBHDJ?`14c1N031>Bx8g6pl$=X>69kUTGl6nOPrxLFO9X+r3S5vi z7P88$x?J~5e07P8H^Sj89Z%s_ohFM4ZUZCoB?t4(yq@^;Cp^M+-n(T^-}lR$S#2yZ zE@5O|cp-o<0$G_#Q;Ra|IRtjDW#G(030%!1Z$%~^q4>57Q{Y=}8^<+Z2;_5VdnS+8 z1poAaQ=j;~55Kzz#j^Vj^p0LaLKPAwGTR}d!dY0za-1{rP%$Td@fU9z5NjcO1OC;)s;A~TwGgZC-A3gnw3zbPDB}ee=`4V`mp+} zjUe8Py_mW!a~AS`6mzo8yl3H_uvMV-M5c+|N7Y?EY(t`U6yEWR9h=EGC>zv4B;a)$ z_)-)4xVQe384JdYd^{1l265f^S2pa~MCM8y0;+Hx)`{6R?q-)xXZjiVy$G8wCfXlfn2oT z$2STO{gEv`wFvD3uvdz#7l3n-n&Xhz738;yhQBt&4UA{$r5wm^uBXY_GCX~v?67Fz&aTbPV`(GibmxhQyg@h| z{|Zcmu{gkoDsOrcdJu?Q^d=#q&jydUuJ`;B5=HmFoV}! zhjk=s?;6XdnpFdp0Z^UH##$LdQO}J$4hUm>K$gHOh}EqI(O1(Q((J-Ov_oEGw^!W5?y-K~Ypv&#l<+GO+_yXhj+T z5%77LC8C%Tf@lGZG4Yl2E(dgbDKqpM^SJ$>%g?1eOIJ`SvBsP$N$QvcjY*D8Y0ODX z1z1Zr)z&PxzV)!|p8fzeQjIIMqHm9sBbj`&pc3y}ggW1Q?3eZ+u4x_j7F1{9O_Y!~ z^Q`nuTngn>yK#}tl5~Pw-siKFvqVn7$1wGcW%T?TAY@^$J!$C z=c~b6T61FwE`R#=WP;+xkz}L9T!Y;k0Ap!l$yFm+@}V8XM7z5fh>H&^drJ+ZVZ5;$ zFQ)DpspWoXb0C+EEy(>PkBU{h{cDa|MEJIuub5keG)>9vzDEa93V7qE(1=9AwfnzQ2Tt zhheKxd>2Aix%Cn7EmFBgVel>1GB~z80A=C`4*o@W&w#z|Ray0LXo=m;#R?1U+x34T zC)lw9BmuseA=06MQHS+$a@lnh3@$RO`09Kasd_MJYfh(0%V&`&n9k0qxa4{m%aF5! zi_Mlrpg%%#5l~E_awvt!CMh-U}i(e zEj8~^ln?tjQCoUEaj;O{rt&?erk-HRh&W8=VOW93#J95mWC{XI3w@X-rRZ_PyQwhU z5jvtnr!cLAEQP5^?HQ&kQ6{LCTpmp4>?4ko!1RpQsJZ{d^kB-yir}pN zEDxrFgfL7mK$OvCk1#zLg$GR8K4F-?`NIDXFfAK~mjM@52*|2fZXc#?0@Ke~k&t5r z5HcbT(>`c9x@8Je3Ia^Iwhlv1M^B3Wb76W9bOe~TDNGw6OJOQfdxq(ms#43l>dmSZ7uT8x}a1ipGler8;yvt{vdQm_+RbeF(V@=$CtK zn0j;FL7Q`Wp^&mPCMCfqoBc>xtthv;E36Ipe(&o2xbIZv+%zCQqTF-}v%P6R1@@C_ zS3%ol&QZmtwBRp_zXtm~I;n9=E9^fN`^7&1;T84+Yz#zmn8SrAoP{UGyyY{!#Kcy2 znY}>47xG5_fv7SY`DI?8*w1|kT_V$V)xH&$+k!IHRP5JTHmvQ&-BT@4&rSHh5s!&K z9{^&M)rMDg>x&rX+~lo<`B^%Ui?^||Bt^<8?QPFzn<04((^>CmQhYH*U0C?GjPQm9@AFSjpm;_q zI4CcXae;)gVGAiWQ9EZ#n)bO#Y5dp3^MiK`%cGL|7(W-?LHINMPJ@b_N?eTmptOZk zjV638;&Fa}?o)10D8~2DDu#W7)d958n-Pwc;7HjMiqMiCqI`umOXriGtZnrpu#dLd zwPL?-$|mDIv5hw+rf!B(!EUx>}0(>w<4l-aYWDpMhtF{Vjm?n4y) z$ZlLp^!m~&6kU{!IltAwR!3HqniW~x@y}vl!CEg~!jtb~%PV@Od&ifZ-)FAQn4$`M zsupw7Ak(cXaM3s<_~ly!&T?x-nKh}1#rKu|kd(VmnR7W-OxGrAC&M2pv&Nulqhr@t z@L`B8vr7wU+I_hxgjYDFxanmKCfRKf*@Hzk;&AO zENYasmli->^3r|wdhqMgRVF3bGtkDR%S-Y8z0Rl0oU1FvbCL-|@bQx^AiB)DI%zr! zRF{e`5xcO{rgmlRV^YjMk@+{B6MIsUHm;```!OgE))lcr9pIxIWh+3Va%yJD>VBj* zyhF=uEhGo0Dg;PdnXS*(I=-5pa!x7oKE(l9JR}m5KCU$Fb@w|VDp>p~x13IH@mo$I zkmX)51;uxk7PBO_1eN0x(qI6*nX56~W!4O`_zf|fZEM#ttb&&pTsDSF7+#LYqq|l( zGuqfaY!Op#y&iH``2utpDLaARBnDgw^+e`78YFub6;S4s705k91?72pOEES9u1{kf zuiTo>lA=aZmFt#b3B;>|#FRPL0$tHbcN1;CQ&+@3lrAmqL!EIBhiB9D<+0Cgj6({T z2N`GKf3UNM3x5~|V)~AP5)noSdaewQ;SM!dsx+0Ha$7i@Dt7>e3JA;H8VX2t;LKxw6sNE?U%aqpe5fF-41V2hg>m&Z%MzJ)u||L{kjHf~ZAu^e^AN=WO!u!MShyG;^)#DtNL+}- zaL+|XuB|!Br%!0>#)|bW8j~Z7+q^6&3VJ2Ouw8g8WUI8UzoA;@{c5x8)O8Rt6MqW} zr1S%0qn$Utg^_!1CEHu}w#-nj9ymBJ*3MgCkN1k3Z#9C)t7^ zObTFS0Ooa}<8`mA>!ed|Y#bV>G7*l_+>ucVw` zF8{0Ev!csBxH?4Fbxg1GtaBpscMvdY8*~ zcsKgYcqC8dx5-o@Ljz5=h@kJFBEwkUAdF@b8&oZk>E!ehuqW7zP0d=w{sV`5tQZO) zv3FYeBn@G<8`-3?WFBB=@>r%US)Zva&NgM)v9HSFR1}!6K^9m;Ny;oYSko3?f|X`h z#WJ$|pcXo}u=X*tziF7;)A8ZGZ%VmF1p#JX5Cv8=yr$S%KFSGqFLiS zDYs?Ynf=09*uYw2?t~xTs>yA0Xj=9PzxS_)69}tTfNi9>QTvclHnyD5gJ979)j@+c zqf=U|j<<5~8-UK%|GCFAXu4JFmDsmvo=I`4X7}Dc_z;!Lq4!(I7%XPL9B|Rak3) zK&(&{K}D9*jM#mE}m(e&U<83$x5x*C;|e zFl)sN)T~vDS?jE@elWwJF>6>>Wv9eGTEs35H~|F(Ld360ET||jAn;bd(3i!yInB$& zx~&B)@*oGN3PLu{KM?aHd}EWOzFt#AcbB`sfF4P>fsX*oHcOY#AHgynd3ZeJ(kH~k ziLVznm)+2n>D`beo>P{IOQ3#`MUx@2_EcGFv{r9E_*-D8iWUxnv)$Rie8F0^(di3W zEB}N6Y-JndkF%QT{Lw%`;PM*zQXm&Uu`Lbh(htq9o1hREU5ai>m)Efb)tZ^0G-m#G zK}BKyssIX%;vL8f%g;D+z+6K(_r12S1sh8l?JWlO>mRH;4{s6QnacM&!U`sFmh!4u z4(+BJ#IgI32(F`GaFID5<%Lz~br&fFqbvMianw;fBE)7z4@_^jH?Bk?KR z*Avs1CGKsjU6#l!#xvXp0uauyd+9In;qch0iHE@gEK)N$PfgT|M_b3X6CB?4m|&V+ zQdTl`&gJ36Ad&8gv#`djlEbhC`+bH3HOk{LKF*PeU|kE-$iPGVl@x9U0Sph32~2Xo z`SZX5gf+>w$josD+=DZ^^hoMqhLON@URK`MQi(K^FABpP z6Pc$FX3PeS11sz_&Jlw%ll_`KjDhA{;;{cFYO5IQG&l1HQ-E@3PK)pxe*^{9zP9>W z%zt7|_Lk};OQlA& z%h)fhZj~lWAqIm81o+Zzo0oLlPymo+u0gVzR$2}m-`p~hxdhKo&5;T%>IzvA$=-(MeXTgn-@wHE`xvQQQw5i_W;qL6JYbW zmyzOJaHg!&7%Mr>s;4^Cf?|p+OK_%y1U&D`vV4!%@AvIuRiXE+b9qm&%$UR>At!K> zUN&41*hICU2!ymA*YF#3ODGdn6@F*vh51|O$(bMb2ihKIVS&hrt`Bw|!6Z^d7)WyT zLwQhI8R^n(IG2aj92`){b%5((BnBqQD#w1*!~_*@lcM^kbTxu0ip7eA0DBcAnZ1s3 zd-_;J%U;J>Ktf8kLkn?^0ZU3UW>V|ce4Iv8OQc73A=Pv2&6v0PVE=njSTMvsN3_B3 z8O|)P#y$r&Rk+Vl;fx9PImQ%m87ly6BD3glR#sL6D(uUOeT+YZaLgq|xwiPXp?wa| zhTO))u<+p%v=+j!I4K((nCyx3>OMX9=k0ThDU8?eMu^9qq0|_viG7Z5b)N(J6)oDT zLE9+;2T)ptOW!K28T?VJ4tG=l+U*q*C4%vM6Wq^8rh1}xQaFwlg_VZ0}3 zkywpHrVYCHP3lt0OJtrvSlO_pl#{6a-~Mcmxoc%cuwK^P!^`~M$}o^C{F&`B9GFcG zX$U7(6hE-seI)cDn;fr%H#tH_n(qTgPn#TnW*knf*RF#D0aHZGyT62y-~#Kv5Qbmt zJN)*%p^*m&W5~**g8dB*>1NQGYDmwghil5-LR@n>3JIa_BBl?#I!zc1o$E@8s##Qv zu?t*4MH-wXM--G=1MBw_8yaMBXFMz$8g0xL(2XpT#WytGrI)N58d}Woh6W}0v$7U& z=GxGhBYt+Up|ST+NX)UJ@gz(mhR3W8jct!&<#KOm{8wc6u%SUyn|niJOUa7vaPJ!$ z-w?TuY-kKZ8*j>n#uK#Ey>Dn-2o|HYU_;|{vGBcbXk3coxq&>#o=wDD+07vY!?ozvV@Q|ppbPh|R z+pX9WAOqmdT#fqgTfmcqfuXMXw(p=wutF{Lt|_3o0?-o zgHTUoO4x+gfeu!+!dS*ge<^vYon}&weMsnIy$=U0M#>L=E*}ZIN zj1~rOLqqc6hDQD0WKt&2IW{zYp<;`A+|c;s_srMZ(D20$4#Y00LJhg0u^YnvA2u{D zlbmGB2P&myl&OafZdn@|N$DJ(MebgG1nvtCcCv38V0N(+w{C}3NZrY<-x6dGh96B+ zWF-1{o<-ZHHri`IXK3U1{V{}HpL7$bt-Mv_`7?gHD0X)+@rS5|iUBhh-Kc zjGFvW>s_%i4|Za5Lx&uK;7`|JLIC)n{7Lbwd006FGFgCz_Lecs9+$h~4;m8lQ!B^L@CMEl zI;!{z3|_ox`!!oU&_ttFCOZj$_^O10 z>nIppWX7BX2D0}B&&5b(7Cb39=Yr?G;OT9_GcEDKg6H^|rcHfI;V*o+_wcYq_Cebe zL|~l9LE9oWH_+uHY98GQd2Kl+mT<2Vqo<9<)6llA#v14{U`PX)e@1dym)( zdB*ojrTu^UwFR%E7@)~{@LTpq2F@&g+7w;B*|QS<@Y1vO??PYTTDe*Ye)m+@%H=Dm zw6-zmEYrTlXb(DP$~Y-!1>9}_$|?!%L+4H=!5c3~0#GogT+pf3PD#PwBJ-v|r|A$M zIux7}I+MZE8+4Xq5D%ZxKIrbAp|kA-0wHRgUPafyCs<&d{teKX@fL=u&4+4j4*jf4!awx5z!Hn4Zxm29%O|+rP{V} zGApTL64(uL$P%(+-s$ea2YDW;=%`2E7d4w1?g5ub>5~u!+t?=NR8HH{@lt=99#qfO z{}v_%K6RT~)k-P0_|&9w_+GD&A3i6dy?X!pD;bcJ0HE7Siv*vVsrB$pTnp1hKph1@ z3uKIFp{F<7&qFd%0euC70_y|gQ>XGf1-5*-U5YXA32fxq^D8+}S?%tpGkk03Te=;C zisW>?Ho_KTBovL%!Y!Mww$@lTe~c#l1N-KSUm?i_AECiE`r7J0Gz&scyt}iI1(*f5 z_Dpu@3T#fpQ-emE#FN7o17L!cV`m;Wf24uD>G|3+eea&}JX))5&iN{=Z60=h`~|J> zfgHqDK{OMYrD!vb>Y$A?6yDj5U&!Dj`C@O0i983=<5)YGSgNa`JZDljxL9f_4|dc- z3B2X)XORH-z~XnCVKfbD&MNrYTz4?7!x(TbhdTt9d_>uX_sbKh&a(9P{Ss5Gy^(SH z`|}Jn`TJFy!$wV+!r;l@ConNdOiAMbouU|e%BcDR zXCy$0S?@Y#A>97m2cx{p55C$j$M*Z&o(>ztY}y z+hUJY(edkFiZ1dV-72r5&nHJaH|&W68a?|1`(k+ptJHmt#IZob8H{n#+5AH>d0;)- zIegF3`crxpc{Fxu#v~RD+!MybETv>0_bPds66GoodEAXi=;-cHnRKY*%;zAf}3(>YTM#Pp&VV2xcQi6ucv`{i2 zSXassRX^?=EftMN${JY+l_hHPQ3@DSmK0k98YQpS_^C!vqJST*!#t%8k!dy_cLgg} z?accgzJy;$zJi>b}%ybNmApSdO--sv|h$qHtUPoCbxMs1#mk*XkCG?2A~ zd$(pK_2dw}JF+sea9+4!@bl<8xIL7ON!Ud)ZVfeap$!0*A|$SN0>S)&EL;VN%eM7$v$XI2K5IVVFe9TA;;y9XJr8j!+imG5m-CjDG7 zvYL2ed~W**MtDfd%Hd6p{P<+zVfjOc@mug0%0bLEl(VsegUQ7Cs@_S(H9O%XAm-Xb>kD`5+8kT<(2 zUQlky*@7hstME*=gLUG7u7iOC^@oCk^Lj>p^x)TJ(}C$Q2qdvtw&*~#ltBqiiZk@y z4={p&!Fy|C+$i$`puR=wZUc0~8nX=U$AA_0*u{KFJu%`X@q|_h9rO)GJm9kp-9m3$ zMb0R(EN+;Q5XX|6rPs*qYq~BSdf8)d0(@4b=h+4E0)!+ZN>92dE!7AW=N+y{T)AtT^jVR?(&g16mQQEnMmeW@i|1JeWdpIsunxX1uzG32j&5=K?|uI7d<^L z)@6d5>hnPl7d=e}N4fpLT9iuO^SK4jWzG!)ON@ILtWyE?*C4XA29dCZ*UQLp!@!a1 zRG9i2cCUgT?}7x0B@@Z@p_`h*!neB}^=x`K0>{N{cdV*VuVz0uYt-*xU&bMF@^I8I zhCJh7_&RG5o~7*Jki#W`>7OT- z6$C=Yjm1C*b3OvHrC5K!s2euGsgbxfqev= zoxR)Bj-Ku5)4R!9{0ij3-5mU2Ek0%G%ZLTX&mH~(eohAv28hAp$PIxJTY3{% zq@rR==hmw&rRXeM8pX^Blx^2BfoDrIISk>Qa04QSiG@*HnnsjoORvKiM%(srZ8kbD zp$OSqVP-6>54h71g|-5cu3^~VNcye z?b2U_?CD76;>nFR&<<=_d%EOokAl>&LET7^c*_*!L0>44LPibhspzNT6yO%Wslouo z!>CqR2M#Q2A2m>HY0TmjBaIpLN|z%`0Az-j)Niw`!~iU$D$tYId))r8C8EDj)2VR+!gyu!kS+c{?%q5+sv?UUPJl>Mw1YZ|j!V?2!4+JI3nl{6tsRM?z_<}d#kk`@ zG|JY{Y0dR|6-69I2aP&7jE;&T7(gHh(zt>v%;-p5sTP!o3IRv*{eGwF-oE!HLEm}4 z_mA((^Q5b8)u~gbPMtbcr`G7ax8Kr#u_eDi+lnfSN1o5fnSB~1<<2#;qRFL&az5D^ zU8FGXQ0klqx4R7~&`CZl?+C2H+0wm=NPHl&8j);ixpjN)sNakchchNu4yfu0CYk+} z8$v~y_gSH@4&D!tb%QbYx2zZ>7RQp`;!5P9x~|)B;vjGWJf{_ivwa0HMVlc1h5Qtw zqZB^%;v<`=P;_G7$ZVq+Lozun(+>J>4;Ua!#eyyu!djxq^~K@g>_vMp%x|0hz)O`E;*4$p;kn zeKbbXr3DnzNOv;4*l@@z_*LC0l8Y-qliK5v?cn$H9&XB#>3qWi_c{Rv;R+wU8>7ww zp~JDco`wg*Wt%uMfcGA`3?1I&x|V3qtrM3#*eNFmUGWdmXCmE`yed7N@$UU0QZo+2v)!FI!rcT-GZy6muEo+#}4RN-P|@qm}uEOw3u~lm7G`XA9R# zyL87S$4;aUTB~*vH$5NouBgG2j`#@cVlRq*0z9^yeFPol-*T4u_yy+JpN-> zS&Ys37)ncaCLeta4;X)X7Z@8gMm0^`^n8?rxs@=t{=VF*kedwAMp(kO%w8|~%U3Gp z)7I^L^sJ_Q4QjwV%@sH84pD}_kNp0q#E42=FA#==y?fiZR@Jc@z|Y$DHq5K zFORK~{DPyWGRaT+>^Aw~>1;Fr$#1u8et*OvU0~DsUBnZ+8vjO>-%fen1dfi%^V04f z{%4Qy$}|LwxIPi+~54Ukd??$qIrQm6W-9zFU5b>*{6av#WP~ z>2KBP>vej9x#>5*=ccEU%2a(yY7$_34mJ=RcKVQA;dY-vg`Ro(UyP%|sKlo`n_aRr zQo9?*UE%rc!(m}_q{WUOw~-wYO>WDL)ZUM=3}!%OVd0T+7+@I)^aSSeb4-DU5B)%8 zVvz{9*bhcji+`Je^e^5L%;%^Hb7?+u&2*#E1ijW9(d4Vhdnnfxb?1f!0JT4|=X^ls zOoaR4WdAr`>O%6k^_}{0EXl+`H**d6mc8G?8J3sXL#{2UCA)H*VLN3y1-`pxHhn?V zI^JcNh3+dj5q}7!P^SJhyU_Fc&piDMPJ-#uPmu-aUc7GOg5AhZoa~U5!@y|zc!|QC zg-*%=IqSkoXMR|Ra!kgg0YoOjB^F!X!Dp?E+OuV|2(sXrLBtt3QTJZ;Q)}iiNQXDS z8FOx%#y(jMws{l4gjuFn4vcPQVom@+g&4LBZ5LpYOd3php>A-Y)NKbFbB1U<2NTZ= z0@%Umrx<{BgAWFAjmWo`ntQhpx28Sfw(KgzMTxXsfOi3alKrc0aCAq+&Cqya#PhEm z5qBE_^&$4U!7+@^Dgu=m*U?pMUDhX<1}}(}{HMAq^E#vp^@aKGT+V;PD$^6-&}pH8 zPzw}h%q9RvLWjqk{zYwuLT?MmXhdKRyk#`!z?T){^xbIOz8zGYa|H7AHknLAxSj*= zeF*2kuf&)|)aCk@OyL5h%FC9DPiV;yQ*X`4ynzPA^GD&!)HX@Z=jTV$jld1doXz)% z7b(4R@+EG`1c$jNE2^)9SLfc2&PHFCh5a5;a4~KR98}91G-e!s%FkR1EbV1^S9@9J zz`8(QxIsLdv%c_9?@(21Y9vb;8JkOwbY94rsU*w!Z(vOKC?M5e`12rt?&Hs?qwtfW zFM*#}$%;tr#@W=YcTMJWi!-K}j5IBf>Ybt4kZ|QM0i$D1O>xxjhv7CF63)cwWrLVg zTIg-dp9-*Ydl@;+$!t zD9q`&bP`N+SbXqq4II>36CAbQt<<>U2Y(ooYD9$zq%G;JG|mMPPxavtC6d>>HI9|3 zGav=H5b-05z!{)05xM6z_^tGkjR$2K;S=U1QC(nDDbHE8VO~Y>^i{YV`=JYK%RE+W zfO+4c|1!*D+;3oB26kmf%v-@YTbS3N`P3l~)ql`b4c83M$UNa1c*=CbJmL}Nk-Px& zu6rZPyg$INnRg*W^^$Brai2=kP}kOWXf?fj49SzdeZfxm008oJ9Nl%^so@X}buoT8YP>+)E$SSR7qLcFU+our z^mzEyF*-ozzz=4?>D3&Su2vtQRiT@gAx1tU4*in^NYa;eWG^sBtw#c8_XS+_C?Yu5 z_mKpyQ}2kIR?Fc68--QP*M3R-9Q~NNehRL7Ts~H+Kd?bR$Sj;QE=yyi9MxG1EvHKk z>(pqI@&=~7h9CAqL3xRZ7;N0@QR94=^H6cq>Jo8Rt3BbWW0|_AfjpQW`zx|`IajGwba`eqC@SHsbMRZnYwA^n%@ zl8XNUAFexn@ORLU8af`Z>KH-3RvqR`xUcpLyUKM!s&Q2&3D>Hvw-CWv)f?X&?q~3= zzU0rDSrQjvyq}{!6>h-(^r6{i81W)ge9IKTHA8Tw)qn7<)-cdHqxXe)oAxaMSgmRS zK*<;@fUDI@3^%jwD@-Bkbyxw3eMuLhlKwg!F4a3$@U2xR5|18+sw!Xn{wANjB%ie^ zZs7aFWi|bGq$c=BxTI%;albU-`y2QV@U2#wz^;;booWS9MDnspw9$P3oxo2>;5zlN zN%WpBflz&?2^&w>$j6I}2kV|TNS+WE@825O2h2C+M-4!rD&@~<_+hP`V8E_NG|?Q& z$O9Q@FBG}j1AcwrU@>3YR8QQTX*D-{ry8S?{_e}3J;373S@%HQ7Jk7 z`O`@nlz^qmowD~5RMt&Q0 z;gg@$Dh&WO*~N5C_0J}T=lJsv{E#b;i@Q<%g@L1me*RpCA4X3yV0XZ!LVJLb4u1HN z;zxoXrV{rem4k@s9R7In)AVSDFLFXWHa}C2GA(Wpu3NqBVgCsny2Fmvqbr2CF{uKFMr;dZGj^xi_ z{3&D}N0EZflIs?=JA+w1rpl#j*!zQ6*}jDfhCABFON@BaULNeh? z?Uy=RLqGCaCGKjqLdQw9y#l{_nLjV`=UE*k**(sHzcK)&d{9RWBkn4cLp_NfWGop_ ze#qgQ#oeN=F^Pu5RTuN;BK}mE;HYtPOd0wxaxn2BqxEF;P8zvTxF4ZSHNV5ioeAEB zKdmHWn~#w1v|q&FbNI>KhmnR{eE_bD=WFClf)^Q>TNwx4a+APrQD;{H@#Bb8e-((0 z>OzC%9(=o4N)e_`GZ9k^%IgULR97-V;v7Z{&=JouVrN8T9u(|l1>K{lYU6RRRXY5tq?b@ z63oyjg_k63wfd)lxr*o)o3i~Gfu!(0MjFgX?H7T#P5Y&~t{2#K>Pm4ps!McS3x}E! zouNoqXJ*isktos0aMkx@dYp`b7!;Y9{oyj}y^Q;o;o>sm{@0NB5)(hqpIs25zA|Y3 zBhg#bRD$c-FZDX(%svzH2mTz2_5JWuV6HW&uQ2Xki10$9eqJEfse$+)s02HuAc^^J*n zRS4Rs=8L;Uy$IK>w5RAt1wRT`{Xr@!tI0K zJ2c$&WA*n>_$Hn+O!$)sC!T|VpYAKS>iB-+wEJ(kIK7iOeSFY85#Pkq*Mxs)zR#GT z!#4>CC`MI?v(8eZt_r+LZlE315~eO6olSF6owL=HgqgPhAwXzAC#YX8lrj z;ozuqSprkM(d34zsGWD+?vWgumtmzQ=5n$q@B3GAJjE%;YR9M1bm!T3&nc6wyiQ&N zXG5_z5O?}ja>*T+LZ#)}h41mlsloJ}vO67bai{O(@^B8GzxEajeh=)}_JHSj?>*45D^8#{aNw0wlPm=$fp26ZlRd-&!e9Jqk5Utx^9EwO5VRc1Y5 z>05oW2xs4ccQa}l&1`10Waa1)^Ffbqe|KPJvHF7Om9Iu4&v)g;jn=EtlC9OBI$cV+ zNR`{TDRN|EtmKpG>!Z5#K$Bz2%dC>is$?DevZdUgu_;oJSJk~+&ic32)+^C!e#B24 zSHoRg2_)*-;b=HCHwUcAT+4-%hM&wIszgjhOE|CdWfbOJYnEd| zYb+`W>q$$qOGUE2w}=a_nL97T%B*~gJPZj+#HWymN=U@8ZmvWuV1aPM4W-4|t$D*P zSl>4iu}xpBcrN*0iM82hL^wF z5vrTkftY|q)7ohpPQhUbrIdGzr@Iufam6g`97EjG0Osksa_s1-WFT5ei+O~>ZD9B$ zFLO7MmnpKcOWPOb$FLb&Ys%7RH<3GR7+$0Kig8@zOmiL-V5ur{#19e!NZc7y;BF4a z15|yyvDX@p4Z0w5T%_M;3il4HT5;q3XSp zN!jUOIgkFB5Q|fnTL;)@%R*6^>^*W%dPD4F*|LA1*UNaA8z;TzQeEJrcNtFvCXgN{ zy-WKnM{H^BW;yF!eIus%S^oaGs5MJZbrsio@#Y1quXxA_^Bh3@WS9S3kk}tnSTvvPb#PVN@T+y-_{4 za~UsBU4Y`KGk{9$6Ep^9aSZhnvN#N`(I=;!RFIE-Qs`#T!&pC$J|8D<(TDhjr~168 zjWFk=^LE-bCyE!kkC93KwyUmPY8$H$z%$N?Gmjj-n<@oU@H$l&?yJW#A$We-Qe}f8 z^%R=6r_UrOoxdmV6lS!o5t-dlW899{HR|BSdZ={&)=zd#QF}O!u6;cETU6Su%fc~^gpigEA+pEQ<%&B6AI_ZK1mar;W#P5b5O^YvzcKnstg_6szX zn9T=G&{obwTiMcC>gHXDTN!i~TT7XRcxf+hA{0s$O>eENp+Ne|F1sxA!G$Eh6n#7r zP*9m^w_j;Ok1z)$kQT(QmT#*h7K$2=q#h&|Y%GsDHQ`2d8+4pBA`M$T+`xq!(OCl5 zBXcg>is-*=`#DzH50z!jCEvDYit(wov!dn=#m|rQEgp=YZ{Q=;=M4Fe`S=j7J3jN; z&kpL#`SSl_eGTmN8|$kO95wY-grfa_wZ2yT!L6_JhM`V&rjF}RU*rP(oC+W7>v)D7 zV?GXpYwF7x3Tax!jyBW>RkKCd>~(M{x9>>UMG}s?)bobtL~^Rfp`ZHtb5}0{+!W@bcY_Fr*H_-arj_NElzB`adPIYmxlY+sZJE_T+V4X#J1hfCnljrpx zN%?+L|Fui)d!u(!pH*a9CwJI`-HR;S@4^0rrXT3R{?L98b_U?GJ(!^vVbWi98K(8) zJ*Ye`9o34PR%2P2xAEss60%lZ$B(Pc$7OKa8Q;gORu1vw&a=%h!l*VUZs;L(kat4s z?ikKKy9VL??H&Vo6X%iKi0I@!{$w{?&HEiTf%S={2ju86{(yWQwnFTa&DrH{q)+YfHnM9p-CGI%g9`{C9T4kTyqq3$orG@WKrEK^*}N6W&MVaL=QY zcf)tPLv@ZT)r!4eV_30YL9ld^LtfC5vzX?h8hE}8AYm}nN3rRNc4&5(97#^v89}$U? zAA*ucopVv*cd!*mj)I_bi& zn4=&wy_a8!%Mk`fQXe1%?ofzZPcl&qY{*`vFpJpislqOM4l~m`v~jRmZGkBN8Hea-${hMrzMuk&*MA~2|N&H)kFw<{ZuKP91%0y*W8eT-Iw(ceZ6 zCb3T5%b!qW$E@n8vHnDK_9d%2v{$Ymp0K?VsU5CKy?Gn>Ref2oD>w=`<0YccBB>?# zfV$B-Mhgi(sb^4h9QEohlTSFcL1idCzdp5*RiWAb<8kRf7D4c6U!0N|0f`?m85_st zd!Nk5@*N)E3GX9wwZvQ7aLO+EYUe(0;5`#q^G^wR-uP}^3~jN@n=niuHfH0nQ#%ul z!x3kiJ2Y?W1+bU&d(fucM^YzcQ)AQ@Jj8uVD0L5$DpSZZ{PuoQ9%VU*8FggmZR32A zoS_a4QSp=}qk=sp>`xCqG*peGrn~SxGW&x}(_e=9Ee#I0CFzQ_wqvhda(U)To|zP-ecvx!B13eQ+N79PFix3{aUy z9FS2{5QSNzMq;{)ehgwJz*Y0h@iPZMh^69C*J?;|OI;?AE7erQgA`Sc?-7xk^Dg>% z7aVsUAiIm)W=od?i5M;<2MNnDFU)f+Kr7_$VNjjJtYG<{W!!D&@O!1&1Xmq{Kvm42 zL-}(cewgjv{OE2zcIL+iXXB?euA{fWRh!JmH~g5x=qC8c&ovBr-+a8ukB1n&1U^h- z`9~p6V9?=kIFlkz)*nkiyY1kE^E24%I+IgcyU2{Px&ycYO0lGBUM?F;TiiNF$cVHK z!i6G9-Q#AVbVtv0Stx9}Hn@ycUEMe8<>)54UJh_%7(<9sm&mSGt*+`Pu$Rl&BV@2{Kz5eR8pyD zr3dNmX4OA%0t+*~<+xTY-ARvE&*GHy-_bz1rh?%UmQT)tDrzZro}k>8>Dy1T^U@Qu z1J;Q$#g9cG0Lg$=d!;kc-o4n7Xs`4s{$--M)Uoy@P$O^38^iar{X4himpj)LVoJRM z#PdB>Uacw3S6|{!qs!q~9D+b8#v~_iJV5<$Yq$<)L&4VYS@Jb!CY-=~LldAmWl-wP ziF5}k<@N-;H-sClzCfvQdZ5_7QYPk1C=8ot?^Dz10c5#-Td{}ec~Os-VkSf@H>_ab zGS67#?2kNl-~<>+rUVok5Eh+&-236-rE@W!o$WEqNx`5X=l0b33Yt+gMvcQck-;>8ih z#cip9nDf^n4kbB)ZY;MhEkXnQH@PjNQr&F4@#7~_gJX*@SnOX^UvBB77&7VvxYS%a z9dw5AqWa|~vHJ|u!$@o}F+PE*e4;CPGg6x?cpogH{edFlXi|1WYz4<|Zwu();&S`u z{20y>gkKt(f%tNJR58b~B|qV^F4WD3fg$L4Kv1acQH3Pis~^sr#q2X#J-BXBL=X~3 zQG-B=K7C>UTd(-ROUB5#1M;@IM|sbmViPgX-#TGJ0X(eJu}Al}RI0J&u-q!Yb4 z@0B0fF?HgKMwqgg{ebjKopLfm!5J<$EMh+}j|v;T;S2our+lnce}l{UCd@bEQa`9b z(SKBNF>A9{ffF}GQBdl?jiUb=5NY?Jz<soePY5- z)Z=NPtxp*xg-&(T?;)Ob8dBYAS;F|vb@4^D%F{*10jI*_=FBz0modJ zfZNlj=|Hw)99NnQXZ^{Z{EU@_#(+?BrHZRc1@lm9JfhWzcVgkueJfGBO3-TtHwlIP zi_pTxqlJwvYNH$b3(iZ6(8gL~C2O_*!%DxeEu8@nXh@P7gB@4sVc%cU_SHJ@g0n;i z9u~K@iVlqS{8zMO$+!cuIuM_cdk=-^SSzRk_Z9sxmWR=T`u#oVcNVh;Zgt@Phy6v` zzJQJ+hatG+EsnbgPi^61x%1B=js?uIV$^lXvNT3>YZ-Pc$cra1VizNM)V=^i-3yBnt}=A5wVjSgmx7q}pyUxg;7l7O zX1^lz=uGe^2JVG?236@X^a=n$SjcIJ5NlMORwiv#1P3R25X5D1mMZ2vSkwz{)Tx2) z%j#IC0(BX@Xp%i<%o&m&w@cf^{nCj#c6>hW|0yrQJx1H0V=-Oon-Kj15M-Bqav=Bv z2HVKn=vb*0-K*n>mk{5#l*DVP*Zq~~SVC6sU*5ANHn9Z-z!|PQHsgs@FNv|c>Mqjy zo}vwrZS4Je%(<{=g%K0;L?ntB!%{*cfMCM=sJ#?qxtD{-&;_oDq|QeW1*(zD9xI@N zscK-ZBGa%6=4c2in3~C_g0q7<5>!ueX7XzK7GX5I*Gy+~JH5Hlh_uy{wn%CXvoW`o zlc1TA)T0O@LG>hPX68CD!PoAaFyo|~gqf1y{b#6PujjrB$&Jj!><2{{aG3{jLOPpB zoeE1pUq2C`O=;Tt8$y~$h{}8hz8HJiT9bJM9bnA4qcrMVoNoy7By~E{yUi=}H<~Hv zjGjkzql2O%Q%|z$v3KnexauMP+|QqT@B__9IbKZN?TYzTSuuxo$Y?bEcx1q;%nw^h z)6JMKY`14(g~jZioOr$N%t zjgcl>Vqi+_he2(;=PLJ%>y7M!&j1?|WAo7^T9@Iw+?k$0Sz}I$MUG>jZ>0nYvd{Jh zuA+#fYUMEVLIS(W!&*oTkyHhvPa^kLK5Zzun>{&o8Uz5kd!WK6|3bYgs~$0#D0LRF{NXQ&2dMdyP8GHk~KYWWFFUBv&gK&n6r>} z+yIP#w4@Uy77=7$mB-j-q;;zjN8zL@85VOKy>Kv$5yU>bkbO}v(vBK0c9%s^ zZGHyn<+zG)Ik%u2q)Fu!s_O%J8CKi5KsvN zdV|&Alzwy;l_i&vxkfT}s{srNFe2EM+;OJnBOqp<2W1l!{V!4IYy~+4i=^H_AS#GR zP(ch^zdPnP!5H>iQ#XNx66sPf6Ac?Bozx0fk8p6m6V)ahs((0z+M)8sXovlhZ?waH z%xD##O7Xh>5m==vx2IFw09DH742@$HbIj>itS$NAR;~mR!G<8u%k9B(g@K1~XCgGs zd4+DyJ_UL-Q-&n45MePKk>}9U*POL>a@bbUoT+2gD+4^a7#tqRr-Iy$MHtZkqbYM}XDO^cQ`19fsp63Y;$Z3fK0 zr16e-+vdmAas$MQfR5P44hyvt<#Y$8U3Y>r(Jh!3|p(x9}OWwzYw13mdFqnYZAf$9w^o+5@P$B61>?K|TdQNKpz znpParxU|5t8dulu3_)Lx`JIaz!@71N;<8QoBG@9=5!xQmh%NUkGBHrB$J2f6 zQw(Q7?~h~l5D<)k4wN_h^Y5tsr?C~gYrfW0raw$cze^@r7d7DybrrTRo<*ETM0$YIitYiXum?1uuqtz@ z$v=Vgb+|B2t3H$v+RrOt!e7GlfP}_#SfGgIb&=HWQoaERV6Mw_0%p&CT39L!^h_vo z#duZ^GhU<3BNAt7RG8!>*j7Rf;A4NW@G2-Eu?j1tN)Zou%dkv>&(=EQ>S+{;kd3fp zMcZXzdO*F-@*{MZF=xQc7#^0B{DK3n=Mg<3sb9Z|S>-Sl?_dIZKs^dNGqW$Up)Iuc zXS9X%#$&DNx#4(6T0UmqQ`CauE{Bb@3$UoAy)u};=3WKEe0+O6LxjqJDMa!(V?QB>RtqO|ao!}za{Rf=O@g1RcaP$*qyAMM$xeI-v1Z3 z*%+OkV!zto&L5D%>i@Xi0N~`^eLt+&v}~^`w_YuGrfFNXlnYmnvdLh86thZs0~tGp zGX0i@n6KRU=~Uz+h^tomWXdG2TG4ccGDL)H$WmqClfxN(>SB^>&LY&cXW?}XT6j6; z(W_gN#H5vlu1R9kYOffz+RK5Fffch>hl*XRy{=&kyrqQY$r<-Sx?D0smoAD&Ex7xT zv&9Zta=9}I6KzY>@(=0m@^22vw7ldyOif`-kX9I0~a7TX1mK}M;z@=6@M>N#qpk*_NIzCxv8g$ z52xI;vsy-@8>?k(B;IP-L4ft_Q5g(?e;8emrx;X_e;8DdaWUr+>in2vOE+mH>F{)& z%m-urOXh-GJd<=4Oww}a$$F--k~(QJ2Wfb2C63L}T(>8)g@j0|nk>fvj*Dn|5Eq>M zWtGJ3C~T{(O3!;^=J>!yw~wvO+IC;(q(uA`S@f zKwYvBJmG34ZmdU^A)X+D`R3VX|*rjF5 z2jJdz9LuXfZPVcCu&tnTlAK2um6|5WwV!C%9=nV{m>iEXbB~jq8H(!@OJz7Nt+6dp zQ)r$;ddr>bSKzi$_hAO^H%4fgr$e5S20o^#+`4i_@6DjQ6m)ar1d2+@A5mt#BqEO$ z?G=XB5oJkC3bU7qVyXoigN)KqrNH=!x|@iw>{H6vifBp8=t0I@@PzG;0s@i72o8ME43KJ! zCYR?LwX234OeLaY1C>m~h2d1e8w9b~(@a?4xu65e^MeQN@o>EC={OgU$N{&jr()ruuM@WU11Z6UtK+E-LBQu;_fqo9F=EndIGctIA>V+@{ zQEkJJ23=H*IR3uNv~tzC2+hwPfT5pm_1ZA>Ixd~k7Cf08TEivKAxB_-13Io!Ix0*g zd!=B9J~C8mp~mu>w4&RKF^%l@Bj|If3Sv%Kq1RxzyF6x}Ukth!7cKb+!)Z|r7?xrN zDpH$*Y=$-cp9J~%!kB$ao91%9U>sivu0%CgDl}KfBlPH+M>3bFE|Z2Re)Q&y;lL+F zNa!b~llwP`r}`Mk^gv>D zF8#3k`oUFw`BTE56Y+!gM)fO3yh;jXdW-^^A_E#T!!b!rbol~{`)klH`Q2dLW@uQ> zE;L}E3kz;{?g7%~4QCf9t?Yz4lH!;>Stb~;J?JPB`UJ44>HL|-pDFmERciC&PW*5g zc050Bmk-#lR1+5V&zkQm4A8zL&)Ckp3Cu<{0C5OoaaD<%o?(~^?`hmJa6p}oAC~ch zgYXeSjM{}idHl)252M@DO{cX(r0{oBx=NO!m5b4_6WkaG)Y9#hTxW;x3#vG7#zM==G4jYhR zdJlU;Oz$cL+PzYnBdLQ2iWU?o=cGR*d13!Z*Z?QtZhx$?O{?TUL7U=;z#tBG1O}$S zmKIe(qdT2&ZS8czHMP?T*UU~QTnjs$pn)xG$O+ol+DG$R`>4>gkH&TGrkqXb1a0e0 z;-hJ;eN=ebN5fkCXjf|=&1&r{^?dz3U)1x(Jzs_AyU_Cu^L&?jzDmz`mFKJSd^dQ$ zk)Cg~=d1C2V?AHO^VNF3I?s2H=bP;L9@M^^bYhz4n>A9pY!|93yoV;JWwkjq-Et-#JixTAv1G^Dt1qwMj!9?w6adI|A-4sdS)b!g?7CG4j$l`i03npqmi`UQc%3U`_5;!%m zbb=$%#|Y#skS^Hphp_WM&2)&P9`o4|+5CXG6cNjIub_n~5+s>$*hHLnv5X7EXb zrHS{?r_3cSeC|%@i08t}^3AsVjfpLx0Yz&v1zD~KZyxXO)L96Zo#bSW1neooCTelu z(+P6Cxuq*4fz|}JBRscn9VT;G)@-IXIUvtj#0E5B6i4bjr8U~O!vtACNRv+WzJDW` zp*inkMvg9z48lDrsX{iBrBZQ>b50ET(~{QT0Rzs#a)Gt1x``6zUlOjynrhX#pY> z;E{i@;gqmFI>|K}iok?I*`i^5`w%iMkOUFS@S#2kYiRaFttm1oQ^HKvsZqZZQPX7k zxO-|TI*b)_xR8jYlviv$*{xgmmbyBQ0+q>B$KGA#(FUxQPyA+r(G&j>LfrzT1)G2XY9s!%Q6T=! z8j8S#g826eQW;4dPRO)C5~Oz{Pw@Ue+R)?|{>>*di*@SOJvjK@6kyl)r!$U%PhtXM z{VAd#_>!lUBo$%7Uj_&x_@uRs;Lp++gtyL=0$N|T$)|ncze^Ih!cR6s_#06$5(eQX zimdSWB0|)CxCxaDmPJy()&fHLgUY3XO;H($QAvmua?!m z8%fOr8*5)@3y3_brTy;+F;hz(s-eFH)tEM@u0;rB98Wc+3(@r#M!Huv;rr(1NcSo& z>|6m43;QHizpfj|%PLBTmD0-1?RR_vzlw~^Tf<)~JP3fmuMmMhg(!6g1D!9x*jgSI z_yc@U&g>S-#!>ae9~Sr=0J1rPc*rRM0R*Vvl<4QEG4u-g8j8RgD%dMf+<=5ojP`^b z#pWLd70~p1o=Eh#|1{E8*t;?tD+7*^kbtoNC}R<}1LhtU^iL6sD4`E8Et*znx@=GN zG!n{;V=z@t)oj~3C;!q?cFz0HlE8@jLz>r84N+@~B!@VfOi;ZeMc2Fa>aK7_QkP}1 z#HbJxyx7XIoRhbGnrDPa0(V%_(2UwKW8r`uvZqk0tt@9j4`hOCz&Zk6&rCGRx+#Q( zMa07jI~vEol1+ng_r6jJSR<27y7q_i_hb{*O_4NC!=_)!=(21YWVD}`kxAEnM#p3m z)lHE!PQwqslF@c{s4im{{NBsRq-#H;ce0759zjU6L3pE(~FsAu|UC<1G!pfCYm< zcB~dhXREE2c~ToNT(U`3P3=ZV#pi+1#Z^qCi|GoOGmE!oAfr+g4l{KSijk~&D^&>o zsz2@}c1tfvT1R%vFiAhF0*H62$n^{dRQshG-9Q^0UnwChFE(4AMKQfk8amaNh0`vw^J@? zAL(fb#+dDtterBlV>_iE*=_YpuqB9E^#=Drof>xTG|Hg%TMnqWGin;dcrwK_JfeG!Sa>H^y5BU$L(D_QQzZ2OVbcBT z8Sf3#n$ULer-x~GAE|}>^E?jIf(P%$oW!{A!JWwg&3HY+A>ERj)o)r@x-Vojui&yn ztZec6u^Z5|AmasCFr(39GlTX8{2W4>8DHBWD_-fA*=_Xhfk7)g*Pd3C~>aCZ(7p9zjfQpXXWr^p2u`{lyY|2rR)bNx)nHIavH zjGslW$IOCUaykX^LF-~pFN8Ux8W2BA$D?2NjUA>V9&rtf^JKckFVl@j5G&q4>fDY@ z6Fco}To32h*w;%qWqnpyo`*3*t6u{c5aeQ3p3fji*5aU=wa-Nc?VB6>2()W%Xl@Fl zX>N#$4wxGf($?Jg4+cJ_WoUClY7*uK;M+Ghp34z)142<8og#RyyTro?Kyi0*EM`zi_N#`{g(6eida-h%)NI`44tezPOI zTMpBB*Vj#9f*s*~7ofm9#>KmSM|i6Zz0Gw~n1G=ErhJK}k=vQLW2f2ZOs|_LMLZ@g zDn$~K1PhKu!)C?r0MCo-aY0I#qVNn`j$~%nF|(uk7lFxdV0(jZ(Y|@h&!l|zWRi=e z6F6FG2t16E>7V3$MFUUMz)X_`?iB|9GlA1d0t*!Pf*l$`*dkYZ#V`!VS>8zx9E?yb zo|kV|P$W~^(D>1zh_^L4SXyE3eqD9}Gx8=3NmV=-jQ80#rvt)F7?PYeW6&RqfB{C0 z^v|EcuF-k4Z`ZsJVBQ$Sp#n0YCBqIjYspBboK;}%JrYk!y0NT@1+%g#)XTTY%1aw& zPa#oOQfkIY)`M>DhAa@CN!%!JWY5LpnyziC<152r8u1P!vszz z3G9hWm)#IHzu9zl(1kJvD|t0C>867a1CiP9zalbi2%G?s(eukBE+7mewKVDBTvCdV zy8`l~De_6pAOISvAysXp<}@OLOAU8{otTF% z>$3{WF1&hh>Z_c39w}T7E~^-vLw^BCL17 z0lV7>`a6IEvK-FtVz${pbA~If4;0&;}}++w85{oeMxC=%lKRpx=5R5W##wQf;E^ ztlAg!ivY~aVylofxz6Pilcy`T&Xisf(pJz5k(h#x>R3P+zuB@FjL+KT)LP!+$8TCk zSqx4F*HNAo-pV?jhw3-2qg>Yp)i8uWsv}vuB}%(xDD0LBzugjCG`#%)5$_5u-mIFS zWXGJ^q6CT!fl%Qg>D$PWQeG3YT_CFJW`Xmm>K6bPcV_3KVkzh~;c>#d62G88%Pzr? zVH*jb4Jb%>4-G|NLTL#X$?`Xv0||_aM_efO{BqT`*Yl%5(jwXWFWHxf(rVCVTv-`f zCOn`0j7T>L3rKfg_7_^j&`S#WqQ*5fJ~e&-m8YB(5sKPK_s)W7sxTN<09CMU>+IUM zXPPiviQ#6R1UX>O5st%>5#1fIWF#az5O8nT`=_KIjheGQ#OPrpz+ydrT5bjq-g?P5 z>f0aC=DA+7EuNbRbko%wG2uc7Y6;6`;mv47V?k9Vdxe&5++oOzg-ogoXxW5vbJqC0 zsmJ>P$gLbAoVJ)JFs}j6r7SV^yaQ1tS8pKuN6BM~REjl)X-poT%83euDZmuw;pKY? zrN>?%8J27teT&?K5FAMFN|CXK4CtE(R{SST}wY%>?-iH>^~aCaz9AQxf5ZK6jYr4~P*{l1CH)1ECYr{{Kc`}abo z7;%1jR*R0!J7m1Kwl$JE(r!D+HBS&OXlAs@R1f2PnLe&Fl@1A+cF~zer4#gl-KkNn z@u4aX!`!4%$si7><~5_0IVs;JI=AGi!$1>&QHjAkHt#Kna9Rn+Cfh@_ zP5=h=a1nj7QME3r0M*SWLYC@_feB2&ndDL^ht5#o82Cw)v)5}#iBB1v2MoVA zwMf?kL+{P%Axf&VprIXn&uPucDB=pwf$k^SEg&Aw7|=;4$#0$UY@wqhJ-P_(b~4-T zlCN>@2+y!Am*=Z;)z9B4uX_if!@`xlL3xv*xcy8W!f`sZ`s$1x zd_vvi#m2}}2l^4;4u+e zT_o-oWQhQ$tyv#joo)uxP;~4ajQ9LbJ^wAGuC{0vQp6@Mo6PzunZZe$NbO?`!!wiR zcuSu8%cyE78m|~nkz7yfL{dF4kCmB-AHd_Xncj_=?_v0udD^t70U*T`V5S=m=zHPR zHw@3L{fW{b-Ku=QfQSyrOI;xnoj2!pTvg_bYAm;YVzr@DmJS8RrpOL@KfPZgHt}02 zq_dDS@8O!g7D5@rxpqdOa*vETDNb~T>vvxPnY}e99B5-3y0!Z4E zq$WTiKSsOxk@w;J775rKi9&w4$Pafoz#It5qhOdE2lX1qG?N^Pq7ne51b}itcaR=k7`NwW#q~85o7`fY zs}2Ft?y*eJRT$1}84%*sRF87Ix-eWN&oa)wx<`4**VQ*=IMMFe(@x$kx9VwDqu9Ej zGYWDeUwDa!FDOBW^W-Vm8dPoHVB(gxzv#A_40YQw=(ev%4+_udjqFay)7xZHiEypk zUU-PQ?Ml&Y>xhTC?d$5fvad2+vX6TZ-MAhgEmFG=-k$(Ji^J6M;VlFC&{4ZVAW#XXVzZe5 z2Ww%POrIOzgoaq|>O7Qv?K&yDAafdKwv*kn7 zS27bPuVgd^g0>^nP&YgC>LruhFb|#)qI4{%85DH}VjvVO`m+=t?-iA(JqWME;SrT5 zx*<4u#)nhoC6`)!*#q#Gr=pBdl&_TWqnU6q=-f!>Z{p03e>BmZby&wC%fS7)4*5__N_7%tI!3}SDa<@p^e(WdCbIKS! zb6-3C*jF+}Ulr!UpZr|V<->OK0a=+TAY($p`LkAD!W2HQkkCMG%ZvB?T7CPOH18GR z{30l)i`;Hl#TwPkyNTXV^PZN?2bpLHHxqWLbvhGKMvL*H>g96jXs(+1scO}J+zS;xH;6Z~Ft#wIjN6s%9vFNo3xXT9Dq+>3`{rU_SE62~ZVUVRuJP{ss$M=|Y@Oz&R+;0@eGx;Kz zvdZN1GKo~LV)p;ZliOX<6b zzS|jl6@AY$>|*+6(AW6chStoznA)0;7RE;l3#?U{YTiZZ4D6O3h%%x+b>nsy>#SF? zX6<{S@Ez1`%=&M7AUZ$zf{(Z}l0y%tq9A;|=`)P77INMRPvOG6rO$Ue(w$yX)OPUt zwJQ<;>_rf(#Dm(VwYzVqq36uw31TR=@( zMKEwsr?O`$M2Kqsm+>*~2lT80sQ!guwHKf`W!(*~>dK!^60%WkBY%J5Pcwd)RKfM~ zc*etXzaRaB43=E)Krk5s8C5rdI@kXMu<#7^!$dvRk8+rYvacUynTPTSqQo612WfPi zkyU$YKiA;YKwxA1nRM-ieHecN{5h*g+D8`#T{$gkK4lOh}8nMJ+bpFT+)P zkiBms7nM)nQuqk_h`_8@T$vTyT4<>CPYeDTSgwvgA+XJz4m9qW#_jqc<5#QW@U4!Nz}4yyejH#RerMd>jGM>E9P`maRs(&r zaTQ$k1wYcAc?r-WxktuYi;Z{emQ)qeWuBtRiWe^#@G@Z8)SqV>dm+a<>PZP(r!H9| zVbqsukoF7H`f0zw-lo&lGu`#@tHTkguApx$_@GYV&!6~n41cQlb0~i<=g+?U`EfOV zHuC2i{D6omX80-mi%Gp&1|alL(MW&yQk_x&WEM3@DTJaIbV8wY2K=nL={ii}-z5s% zLqsZH(|)0Tp7u++8zmj|w@%lGMEpg^uVFd|eoFEH19ygolcv*K!f=0)i96UA_d63; zu5k(8G9TWP3|{gFG&V4=gSB5o?8A?kFZ3H+#EXb6(SC{lN!Nz>|6yPs_rXr}!M+Yy zmSKrb)5N^y_|jjiVRKma=?GWD5TK^ecQ1aJ^iB=lgCKWke*yi+>ZDS$p+_l9|$ht&FAp5{QuTrl4f^bnog2ti<&QR8`X;j z{%Q051YETc8LJll%;wK`{P_oezU0qi{P{P3=CF!q@#lI;zC~SW&|PA_2b=G6;kpud zri5)&r(ndpSu8DSLGF)T$DAr93LA~Hiw(m`;EuPdP_Kg4I`N186> zNA+Zcy|5RByiwIFQb2X62VnBYTBZ7_TA`L7xX=z~L_&AB*s7^w>8i@~el!lzl{Dy1DRrG85 z*{F6!JTZ3GaD7A~FmXSEWg0Hu+%uw*m714hyb9?1gM=T)}cdo&m`EkJ)!Tj#^rO_4?6( zrQ-7KvIv@k%Y+cQ90&BsP8-d)0wA+fKKpURP4ncZ=heQ_?>^jLb?Pt zia)dUAFl5X0O9>^peb`b06-K4#J6{YPac~>&E_5ke^f6J#w{J1fJo|n@FnvDnPcQ^ zj;;-sQ~g8xg`nx$FAC{?_}3R^enxrLUzOPt6SA3C;Lp%nl}WwKaD;_z>!+podLN2C zyAMhCbLRri>)H$Cl(BFJPhXI@Awry2MpHX(39}fguQb&|Q3xaFOSVyaRGr87Pu~>2 zOP};Pc6?s0NC9}|ZWmM4sKm?@Ev^?FA`aPRPIfQ?!c zc_{)`^-D>O*)6S`1Fn*FkM2K4Q0IQ0eAk-`Rt3sjCz$3j8jk?Ejmeo*1pU_^~vV=d8yYY1wNd)%c7S~%6v7Vb)12)psk<8Q3~VKaPq{Eb5fi$8)z3ebl<$z4JNPGelDFbwXWqJ{@Otf((u_9G9l& z$4f*aBdIS?7re$d_`C{c)hND!S+)MGhlP#QSfMGup}-oo&Tllu!3L-%La|0P+p>?A z^#&-PTD5LzGSL3}9UvPqWQ)-cPg{c(9!KQSOm+0@Y{~ioOd|;g&n2`=8cG%ICZ8lp+hZeJipnSLXbJJ$PjUAz zHrNKr_jZ;hR(580LC<>1ZO!E3L;Vbfsh#k{n{OB4B?PJ)TD`&Qj$sNPHMSgGRVO5q zw@cOY;2w<1cw~Ue#IY|gjuF%|(7NpXpA>hcx&)}>nEaR`-&>Sz!jr}wBknqNf`L0q z+;!?o1AhrzwYLd84<9u2NiQ?&%gC*41Q-!f-!tc)a3{Me_@Wpt_1Euj$Mk!oI2t=R z>P#QWraN&%9mUYj`-M9vtm#z<)GGL&kiw`lsxW5#C+>`?EbqAt`u61u!5-^->A=#R zsgrRE7+MfV+?>1zh|ua+P==T8phtPPEWBNea)np)F|=!$x5fKRWKwQj(-=$sHxjvP zA|xT|j42Jbo%;cWy$j{mZ9xIO1>rHfFB0cg<6v7OG!rEY6Tar?QlC{5@^Mfp^(GOc zL8APTxdj6AwF^_)r&$9%2cWurfN%Y7TK=%T?Rtd8oPkZ@Rgy*thyG6wYy$s4#lXh! z%Tf0+)7*5-I$IV=EU4!8Xd4MIycdACQJOde?H?{YueXlc`$V0A1vu=(jn2UbqsB}8 zZJ4lL%?sky=}RFYB~p=*6t@_}bajwSw+`Ti_F~Nkv{wQFIlbS5D523+pf+)z^f0N^ zIn>;0BKmghYoMQ3TuTfTNi*6Yp#!bD6{y^Lj#+&psm%z$6L{zwhk{kXGQ7$-YY247 z=W%Pf`U{$Ro4qOovHNCjLffO@9|jmzi6370-0~V-A#8Ca09Dszz<>?4q>6Js1J-1E zacqT_^A=#$5t1;i7BgW3FSq8OneD=>d6ZwnEQzrD)v)2GCG#z61v5_)(ISIru5oWd zAe-V-_@;X~Gl2ULzOlpwz2lCD%Fr->D!VZ5%wR*55tY{%M~;;DfyrUr7!QV|qlHaA zhVKL8T>&h5haU(28-Vru6F3^hCpI7vcrQuzKC&sYcJRsO$XlA>mFAdzwy4~?xhZB1 zRgn}Q&<-zsmI?*^aCJO7Xhv)1`umIS8l(U>5tx;j6xB&UEIy+^V*g_=0C5o`hYS3^ zZm>n&@G3cmyLZ+E64r~DYlPNmQD>+$W3R8OMl=kEa%>;421`^~vI$LGo=e5p2Nyhw z$o22D1Xt%Z;#SDHb6Lr9xDk)?u_s4@(muFAPwteQ+E}|JlIqS}`A$a54`q>?2MD1) z#AuYihL}5g+ClsrORWVo(p|11_KI>BqpZOge$1c%RL5hw5)q6BY-$Z4Z5lAHy(hnQ z3&>jnHtKg=bqCdyS-=+H3A_7xT+9*?F_}4tL>;DStKpOru)QMh7dKBHM_-N-$ zT+BQcGE-G8)wWjMNY{j2q5Xp5A`@Q@SJk0;a_jpypJ!Tq?~R$qvVHAAHX7J@l{ZRz z$aYR^bbF||YZr7MU-7&rwg7i25+ebkj~meQbbI)=bKDt>!v@kE)}TqdJ(F$@?e$Kk zA|T;pe!s~3YWQ+7?EppPey*^`!Nc*a|^)C9eeOQ#X_ae*yOdXnneVW^! zp(A87ff8;C_h+*a9(4wzKRdgLa)vE3jp1d?NrcHo!-NuGU)n)`b|t{1KO5hEfA#>; z1o|^v^6EQ^caOwp`?DKffhH}P!Mcy6IA_pde@0s~?>RVepg#(|c`nHL`LC>{08qNa`H~@KJE`&40`h%?ORTA9}V| z@s68X+{mm>%W%$#^OwW%&G-2J%y4V1Iu0#Lx8DH^5h5eYrpzAFb!}1I+1>!XGhB6* zBuT6PLI&Gw)!{zOZPl6gkhj{2sGKvNk9NyZ^O2SM3Fy=;;sO~n#oeNsOdf0ClFZ2n zcl)onGp7J7VK??m*@ZObIgrOlayE8v#H~5a$VPWwl{+Ea$vf>~4y|`$8(mxhwv;Itwk3U z_fqE~v>XGE`ta$2PNahx7JWv7FK5-GN+I#V9S~nKdr=*dYe%Z%HGqlgKt}D3h2JNh z9q5imiO=eeUJcaGq>PsqI-~lqz^GmFLGvEZ(9{;O!*TC2gsoFdDCk}dfv2NijgE2btTPkj?T9uftcHgNepq@AvLs%0s`U5&YuA) z5`oeP4W1S}g84dwd$IBxS@})vRsPO|lFIMJ96GA}=a7x5{PsJJ*M(%iAr$O5_#RW# z&Q|^jbJ=l-9zw?%=f5NnYY{^JL*L6Sa?1dZ#>Rc)txwB$< zEuH01EvOsjlLYLmluyctm)mxcgfYdLen{fZ2Vii6L$Qm^!`pf7ma85ltk3@)u3(&rsraeEC!n6u# zCd*VW0bWQBs3Q2)aQV6SeAi1j)!q%)Zl|0(u)=vic0_MZJRDDGH+n?P_%c zT$M(Q^zZz57!ch3a)E(3hk+@RY#dxwBM@md(zsQ|t%OT7J(yD0n~Eb$4I}`Te<%Py zL(bu+P^#l}xG?Jr%0CkSUWbXWtR$?J&b+(^Z+Tg-W!A%;JfhwC`m&2KSw9(PyL9J1 zO>|7&M^|u~$f@I*FKb~992=e4oCG|Wz~?mud!*x74?wYG@U_^w|rB$KCjYm3~ee5Q3L^XZrA zd;?R6v>YHXyvXM-_W6(Y`Fr{NC;I$-=m#R=Io;>)4?oaLc--f|(C5F@=f4Vmj8c9` zZtEPmFAeD}cjjw7p?Od0Rf@PV z409GFSpSbf?jX9X7d>H(ZQ7+`XD`p`g!yb%P$cyhPO-$CSA?~Ah26PFm>)xdXdMAH z#%Z-hmBt^HJ*8WdOR6wT=WbGs9}1H);nS8Q^mxIU(}ZHoFV`eK+qWAFqGmvZ>F#Dq zfI}WE!E-F#yq6H?%&tcX0K!QO^%1}uT3S$yGHstCzL5QRK_3Lmab#r4au<$O5qRt9q+XZkQ?MXL{v z+GQ5WwM(G_X!kM37Jzc-Yimgi8PeD*({!M@BncY1>k#Zik2}dqrqfGG2Ero+ulNx% z^}diPFVsepThXWg9Xr(H77V`kSl+f1oSS$Wg`MkG7=(KW%xCTdr+ZP}={O4AvX!2+Z2r)y$bib&yruo5db4!Lftz zxVb{&@eUZ%b<`FZ z8A(l$lS~Uphi(gxVWNFUZQ*q$_?5O$4;X0+L>q1kc|_n$i84n!NqW&1NKAWeVHMb- z+k(LFpe;NONZl5QtKGJMEg_Jaj@!a!iSMW_oILY?Z3|nWnINrXMz$@i)*|U|3vyZ? zGx*S-;eO!-K@n;TgpZ`|&0=r2U${t0m88H)zi`vvchD9J!Mi|P7?t=x>lAu~JB4rb zkT&+?vhENUD!eI3}Ux|9MP5?MVLjB)fk_{#j<3-$;aoe;LY%`HebAKFNgSlZN&r z{{ceE97mmG*FhIB0$^I~iKtz%?@LTO5c{L0{2ht?nKS-Zv0o&ouE!*@GXI4ZJ-^J; zKEd#)J(>5QCZme{gspbgz^bm)%qIC;qkrY3BlNP}&>+&!oLDEbR@D zI%6gHhCqdw%@uJRgYov5#{Y-2cY%+pIN!$;Bp6XVp|u)sH7aWG*5IWk6*Q5HyEf6F zsMLB%i=}DnB`RnX)P-b2j_biFf>ed3Uip1%+q9JwQ7|B8(W*r11@H07Sy70f5Yfv2 zdES|GX3yD8z<)lU?9R@-^Ul1Nd1vOGn`rx$VrRqJo{Q>oA^=(LqP7#%nl&8f5jYWG zzP=1T6Rr3lso^v%9&r}%@__!e{RA?4C)&OTU_{$H!rFcpXoR+}hPJbs9RK4ho3_^w zQnVfOoxTS=Cjm?k{)sQ!;D0Y->POpah5o*@ef$Ief3^K~NC5t+juBhkZu981!Y3EXgp5r;dqK78nw?(ci(!<8s9jSn33S@CKa zR8Su}!hAT6NouGLKFBCd4HbiS9DL~Riw{rHX!WZPY86XHps6d+z`j2p)=}rak3Nh= z(=&V+9kb%cAVw)e4vlxZGBh*Ek+Be*{a6s;dsFFyHa%F&+8ma_>h(|lp+HZZsVldF z_U~m6x~XH|hwi_^1f|j4Va4YlMjL}L-CNlGs%{%}cd`G-pqpK0R=S^U-5K4t13hu3 zCc*9L{^n0GU*|3c=)`ak0m%Q!lB#|j@`cMk?$E??xGV>Yg)Z3nQGAGg99 zs+_wLpzB-$MP7i;Ziy-P0@Vm`%PZK?iJfn|f4XA_wilt~r|H9Yk3?0lyNIS%Z-3p( zMXlB#+WkLN$$ge}#g>6fr0z-gd`*MH#Aac}HaEr}KfhN|cO8FR#SdHOJyhrQ=pV1M z?SXiAjQ%l_IkqiOIDWz(NAqK>#_^7%B;8jv#1NhBS^Rc$^p9Rv`(GMj1AlzNk6ex8 zj~d6F8sa&`Qyw#j<0<{)apt&MLp;hK5Ab6d>PkMGuW?jqh`7!+ia6@^k2>Z!Mng=( z5BU1sUo!G~Va4n2@q#Ytj*}cX|EPg}s(&9(;HU=9)xi7fMCy-wtWNxh1s|e+ypA6% zYm@xm;jWkOZSH&e_dWLmc(YV&hx@!lu5njtpqr4$4bK16zke^kaeh)~zDTFvrPI&S z=`(fu&vg2=I{j#!{%f6nfKESGrw`WYXXy0p_o>}vOO|s{YgOtwe4Fuzw_hdV#5u~) z+b(4fG`6fjm*M-(7h@goraC9`Hjm>P*RmQ!CJN(kS@9|A)-Qo_MvWebk(M)r`^zFW z{Ho3pwKsBp4I8i>fp=*|Bq;}0WXOiS(dg~5XyyWaat4Q7vY};$x5|~A;jM8cXLu`I z$r)^H&K;r?c@f`=!<1Jz>` z5c#REkj-H*eF*feIHMN}hcUlova`HZ#-HWjC#KVBdoZ3C2d{dh5i7?6ob@io`vt2) zXE%sXs-Xjp@Hm}_f%56|#!y!ZPC%xnZcv0;DPf=gaVms&zW|C0mI%r#<4$uo%;zSP zaqy+Yi^6!D#9^!OCd&(n!5d2R`lsd(;9lckdvnw}dX-lJb9+x{`Z2~}L#EHSShpN} z)xmq_Y_ruNo2`{!3q1w2H0bBfm?PMZ3PC^b1Z{i)?FzeY$?J0Ap1Oz{uY{{#YD^*9yttgl{q~?<62baT ze*rIpp^wlXsRh+gU$~DoYq<;;oE(L?mHn?XhXY!yZ&PjLtOC}A^hvuMbpXmdY9A4d zLRlnnHk3`C7S_k4Dq_pnULp2hh$B?3LD~sbOrl7N!Re+HirMJhGz$?oFon|gR-a|Ihz6>0o0O;lo;V&j}{kXdEpt+se6LU^(Ti|@VQ zpd7)Fms#f)GeInlXl}|O`a^QO1u(C0D}FF!8a@TwL{ zLa5hwJXz*5_h%{%n_la1Jnoy|M%+Rso=UI&7vM2YI7EOMleKDm!8%XmSA6Z1Be>CV z)?D<5cuk5BIFE@hjB*l^!;1ftI+hSVLlr59u_NB1s%fQx6D6jXOec@A|40Q2FKUhx z+O!%?YsQmu$bkpWQt&v~%+UDUltI?)0|-V@5;yKXXJ4tW4lmH>LF2dPTQ!KU5Jg1UkmK#A(G3F?b?klADIJ;|4Xpo;)f- z(cLRlU?j{|a5egRKvX79!TpX_tolKEs?1P^LY@*GFlTZ!n1GcKa9 zG7N8=cbJ?Q7a6fu-)~4>l?i929_-`3j78(fj{Rkd*XpbLRxwWn8E||{yB)zDFw&4T zQQ+TXTOJX?bUDDynZSmgg22*493a$m2VkxR4DEQ8a|KRAUN8|84-HP$RuV3N%`%)@i_v_G z!g^S%%E$iDWl|{WFj4BDlsUcKj8Ih6phUdZQA;yxi?)Oro&&&2`$0nJUIEYd@K*(w z9i^AYn~9Vr?#1U9NyOnE1Qi2w+{TAC5j%U7F!p;%xHB5wa`f-SN_EjKVY2;CHlXn9 z>X!+x<`5e!@oJoB>d?b@?R9Qk%MR{Df}t)?aI076=+(KCZ8@B#gV14b3$M{6OS{7;3@$XkvGN?9vO@BJEbzfz4H* zpNZIK51o(N+k=+}oWtV5#vTd))3Lx$1Q5<IzL;&g@VwCtQk{McP4qe_$~Vh;{0 zQxD+<8Rd`OY+bwpYD^QNW`x$#-qU4T*pR~NPsBMJeGOlmQ}!lp{{HR&G-l^tHI#JX zf@1wDnP2-~BN;O>w5&M-B1mc}goc&5R&y6@4~Jizxvtc+-7ZXCG2=b{12xmd&rNGF zhwa83_PCLI%S?A2;n!lO`vGUVA7iHb3TC>TCa+X8+_k_jNTdw*X8k2Ukr#JNu7IZMlf_-GABQLuRALK^%ka>3p zD$$V|1OUty5Ul`Gkv(2!T`>Vi>VdTAq5fZX*<+oHEJ?^BEhX&tko&S`_L;@0Ymt{y zQdC(>x1XLW5_Gu#Q}lNk71#X+CeYo=k1z0{r<(4^Dl8Chs4$y_>wqP^W8m4e^d}H@ zA6F4MM7cUgJI_zc;xTE5yRc79H12n8Y%g3wOE- zOTl%Lj{Cpx?M~vyrTq8>KPKRVeG9rq*dHy0({<#wCY87e-$2aP;(my*`(r}yj}OF( zI9_CIaa=JVhpFEHm%Ei8Ur+>URN#KhusI#^$WL)PA|Z31U77dbpakYUome>d8Vh+I z&N~%;kxQHfma{UasygSe&Q~+`_x$(`KAxsW)qRrb>f*+Hz+hO)uhbu~B0)ZrzwRl3 zrY3CCgdL@SAELka!?(K@iKvQjdJrNg;_r}~Ty2+(>)pTM+f5PWNBnpXAMhh-o0yuv zNFAH_9N#D|b>#n3zt52GWa?8%Po_rWH`8yyH_k`GUGs6pg%uZ6*e^vKrww+GUg$Md z_Om=&VgDCrC+o$WL4A?I_PR_xv@N&7@skef~OJVgu9gKS)=-Lcq=N3*!{3}Dk{gHEqx?2+_H+N zKo4>*%>jvCHe57+lL`U8%UvpdV zg>GrSd@XLpm+MA1LNg?cUR|XmSv71YAUOJXX13au)5Pw@gqlcku)m>Wx6-cZvVr1F z)F82Yh4uGUV;i?x@e`rE@QPTC+d)Q^^M5t8y_33^m*XdD!-A{qjdJD1Ce()_rxOvU z3|Il}IKxSdnaqehIqsb^slWn>8W90pYVn&F!8pF7wWoxYXb-?2Z993W!g>^zvE`C#K;N+n8qL)hda~$uDry}UR z?9D7V^VmWQuv_heMh>HDYV@GTqnTI-n{wn9%?&zdfH1iw^ z1)IM>5jaVT>DmAj$&(5-6Gw#>P)5<@8QtU__(~bNl{17;LV}6RD?>M8#Is)s$pbbL z)_4I{a)6zce`ju%_&I9EjI5KHnP*XmM@GhkF{b~v`l2-S(v9L7b9aAsM(2_#V022RX zHaU@Hzd2$ncU+bIsB=-P5tNdR{cdE%2LsS;YYAyJ zr^XvKKciJ@{#P&qIvx-YR^Wm<&YV{KA*e`I;@Vc3EJFM=C2}dgIplF8X8{Sym5m1xam%QR!b&r{!fxY9MCKROxfjD)_wssvW-l zY$Xs!6J<~55tk*zz-xE&bUL5624!f? zu0$IBe6x9q7g_4@HFm_>5gnEGvO0u;td>#EwFAaFzu#?)vU63Fwno_#IXC52P5Qjb z`umqvlXl?iGmM`oY6QPU!L)xM5@$E5DPu=`zF|N8etHEild77uv8wDdYxZEMjr-t@ z?hY_>$Y17iQE*eo7V8(%6-6d>VPgEdwTHwvj9;+>uEOm{4B!5TwU-a?+F~8uVf|$F zwU1-u$TBN)-AL{_G7K3oWocNCGitJwQC_-TZ9;5AL+{MV!Ghu_Y%%R0mK$aO3*@9? z7m!c#Ge}c1wZVxq*%TSt5xELvlMN_DbK;JzC~1uD9U4{+j80k^DT`Bj)*aLE3E5FR zmAEcfzC<#q+f-}oNNa7ZFIE-bqUv*tmA;S^c-Nb%Qcm*+T3HW$QW*t<0^5=5rKYhi^AguXZ$@cr>OQ$y}b# z)*}!e%+45nBxdx2W{4(Zn(qSfFO^DE)o%G!}vLr$}Uco)o#p{U74pLJd?P z-?-lk4CjQh8;P84t|QQv8&oC#BayGWAL6&WTz(|o*C-;OeMx_J3WiPYLXBY_zFD`~ z`gfE3UgO?^Z?_CB%bg-Ai>aA#u~9#sRs(wJ3*Cp&EojStk*Yw+xP^cbYwYUO>v#r* z{YKh;mp14f?^KGa%U0zGw;ivOo=9!tPvqqd<7St|ix8_4&yLZ|(t|N;Y`24|cY@ zbTcdFFIZaFvy)`QFhAsPcxmuAyfkoejr&ki45BSlt$XJe+W-z75GlF&4Oo{ z$b*-I(bo)*<6L@zBhH!d`FB~3a%+xrVNv+}^WRticDyFA2!w*YMO!r67G$^LO(H}= zPX9VQoGDyPQV8Dz)9KBP;7;HtI5j|Xl3Nt4cvNL#1)>S8ECm)Pi&$a;{&-AK>WO3c z*Og^&STi?qs*Ec`P(^2=#_c;&Ey<)3BPtWz2a@krwVj8}nuQf%w^cWWIpO^jKgX-b zsI|0y`wl|r?Hm#3-yMWcEE9%_j~c#YIihSP@R{v=qmCVYoi1W}zh5mgSy zxTh^9b?EW-ZY+KA1@#wM(Yss3`uj?5Bf_fm>;dRyz~%X5Swm-0mF&`5K!H~B);FMf zxEVjH>~E{=B+o}U*SXX}*WHe-f)V=-$~~~V<8ADVtF+&@+E2_#dX8J>oN2ig%~+78 z{|NhBQoG4DJ+eKdyz~oa}lp(;LNN6-zshS zV(aS^GlN|tE3C#lu|*Eft*HIdhD-TA-}rBappsZ*!~1-=?gMNLHrUSrj}eJ_pby)| z&=7-T>Kl1sMPjNv(0B1waLF}1i?^{i@=ej;!t+XnDWinT5wG49oqHf(bo(ucMt4j+ zd>ebCyeXh6~nlv>F$nHB~y1y6{}*cJdCcBW+C0o{2~zQisPIk<6>X z>MIlFwGg>J;8!)g(>KU>5ybv9nwT)GKDcmMj1=wwsErI?Sy{H(YEUn*InvG(s^vQ~ z#2=D4w~l&-Tg3TOt6RjGQN)3WuYkt*%#do&6^pAeCC;k#^w++P`8+I3um;1wTXh4k zEbFmm&f(c_j2Pb%aV=9~5qz}$Wv^R}&!fGxU~5;D>dHpPi--R~vmS9GR@8ouyYj}A zzJ?n|SHiUht+l;>^5beQ_GWkxc@uYW*?Q*DG)n6 z64Y2I_KM$Qkc-(Eeuj~)!5M!XtEhrr*8bcXSQ1d@rAVa4)>Dt5f5Gj(59!o8Uuz#5 z76a!aP7A*(%RaRl%S3l$&HN4FKplMLn<3fPdW``s#k=%Al4xcIZ0G}9s&F4p3w}is zZ83ZqMaC8MKCU|nXs4nH1xxXob^Rh1lxQ5r?2+Ng%Cc?N%-!ku0A^J&=xN%9quYUu zn3%>D{oyOC&;n*op~GMGm(j%Sk{WSl6Y`wsc?l^tI6S=}7}`!R66s9}W_|D&M4 zU8;jim8FkG9}aL&>z!F0!OZ?%gx?@9W*MsE#T~l%W&fMNCTlCR;>u|hEz8Ja{!QyU zMlk5EdufaqOz$mOH+#5-Z>M28h<%*B2178G>hAkH+DMfj+v#&(_Od%OW0L(g`SJHe@*x(Tv_%@FDj+YB3v zEWXLiT?yJEWzSpjyHK6f3czBJP*K_gle1!MV}~*|Us&;fv9|VWQ5zQsVWToE)flT? zHWWmhYhhC4zMQe8xKDQBN*E%TD(&N+3p7CAjJ=Q9F<*rg*pw5nIuDzYgiX0YYSefV zd1`+L)y^#PhMgDLcp_9@T{=7*T|O3l8bJf7@J1j@%Vp#iG!`eBfh`)57=L@Y06NQy z0PhX+l#S6tMQ;EP7w`}kAf01nq8{Urpwasyk}GY6NR4;WUuIZm&AbdUthC>AAN?(5 zw^+tbHn;$fmzYI&1d=&8!Je3m;niUfA@<3r_m3~<@G6Pn)pu57fp}Dc zJQyq3P7fEd0gGR1gmvN%IKujk^e8)abZz*DUa!;h2yIrJSv73OiP>UZXhrNh7~@$5 zXI44W{RR%p<~>z%iR{Cj7ZYVzEJFn~F30Ul%;ce1(v3Rc?^bP8q*q^-PYG>v^CnY5 zGDq{qx{wlT4xd3U6D2P*WLJib&oal_h@`yTk+FC zcX-tJs7UUqVZu1*!U-gQ;t9_8a_-KH<%@uoNC$v12!Qq`&52@!3~mrd8=&RaEJ4^W*|GoOoEZr43c~ zmza^^c^Kaw`17p0?xDfSoM&BuRjxkfS*Igum*!bN2H-BvvyKI5Xr8r6&$FOu6KNF_ zil~GT#UY;GYp;N&U_%tQ`Jwsy3GmQGkkN`?Bh5_r%ol(?G9V3$GYmdKpKeF~ zfHdt&h!baYIPMOWP-SjsrE|S{)f>bEOaB{^ zkQ|GlAkw&3;DZK#aP#5<{<_@`6QZ=ln~3i|38d^<=@H9$*aCI4lMn z7`I1rH^cHp{bAT0YLjFmQe~+heL)AtRTYh0R{SrBl4~5fCsS6Uqt3bL3A!*OqchPT zd6jnQF`P9QIbXKoZ=n5Q7MYH$6H$Da;s;T50_+T|p%->D;v)0}mr0Ep2cR;VFW%C( zbK$NO*mwpS@A4wl*Ik4`fp2fUxyb|!Nu4vhBld;ersZKuf|qbWhiWBhz4ld<6?~1g zmA33ijo9aSk6|HCYg!&(e1nwu7eV|HPXFx-TfN5ToEf7}0tIE8eaO3K-uy+}mpmz# zxE^EY8)DJnJsuV`;e+^S5L>YtpDn!DpTbImmf=NZ?uv-hR0DQI?Ah|=6PPjY;64@^ zo)V=;w?V->EzOS8BZ<`g!9I-K#FcQqYZ1KYzUI{*qCTpk8Vm<^G(% zF93*pD9ny~F+cX>$GQ9%!jA|)cH_rs`0&PG86Ky`L(=N;B_Q=V%z83Bw>PyA$ua_G zn|KHoTMfr=`y~)ROY9x^;5yklR%s4D{)QN?m^I-Czi+`eA=;UE4L=^^$1KUX!@ZSQ z?nR8~8Bg9F2jpBR`wdc*@*Rj6330E?kG)#(N3OtbbISzVCija=*lfj9<9>{=+pQw1 zL67?iBi7)9+x~V(4iEB4CcH*|4n;*+c1pgtx$n#O4)+awyRYEmX)cGkN7Fi}4T*K+ zlv+|tB|qZU=&SRl4dJ@w0Ipjm8@L$6Wy|1OV)#{r1blIk3SZ^wi%U$A#7uARup8 zl7i9jHo=egv-S6S{rxAX9fkk8z$M)^62Hk!>hD|GT)@v8^!L?-$4jFuW0K_E=H4f; zNq3IUlhEG{I({0yNy~Ne8}A3}^hpxG&HbhPUgMs}-L-rHCNWrXsHdDOGXMBd4R z_RblT$QjGt?#815kWc{5$#b6F6X11NQe48(QWSeIoRb%dQL8J6sHI+cC&9nFpdu+p z0?aPbSp%V<>H_XrT*=izuH))pfeD&jq_fN}(phE~=`1z@was*vw~~tzy`@|Py|r8f z*-kX9x0;JsZ#fr1SZ({-)>T2}>Jhv( z0-Lt$_08dize)cnH~8yvG#c_9ED8Xx##jI@G~>xk4c?0Nhmmf&zKQyKPRhVUu#yGfptx5w5YJP1O+j2c1?gnL3> zYEtWS>fD2=3D(hDfELYOAE@?M0<`Q4Amg%i zH4$;Fnr#%nN+TRkgv=qpeT?MW6ynVq@%a7_->MOxOT^64AL1&7c#B4SZhwdm(umI^ zV&>=%ad#23O3|ek_J?G-cw03m zmz9tq9ez=1@k@p^>8igCMZrGKhcZvFlMZ#ShN>f!oP@6C0^|~J9xeW>E#Ni%S7XnkF+jom zsO9tn-&`UIGJ|unj7qOTL2yL~KXAJ^r?ps`tD!p5=b%Q%<~Z47te57GUKY0hrKZdt zFoyl=J}{sNXTMRhFH;XnZ}?spE{L!oeSu1s#!awzQ9KURMqwPY zSmVslq02Pb5`gj51cLJ~n}AQ(upLMzEc5UWu%%(eI)t!*=@1C553N|*jrfa|q7NSy zDSbG3JoQ19&u`)utxzbS20=biF*1m`=`c|YVU;tdTogm06fX;PV`dI}LEm514t0cX z2=}ntm{i#RVn$IU0fugue(lWPN-T&I53gmq;UNlJK{ zEKup1d^D1&$*XfjlQqta_F!qSE`U+<2+lufr>8o8+j;a5qI_!DI;0bpdH5GL=D((T zAf0gh8-eR|w@x`Q;=ATWhll@I>F_mYD;;LZR- zbb4xtvM(L}V-ffQOysW8;YxK5KB&X3qScu?JQ>-c!-}m>5=!fE6z{kLp|hqIwE3(AxdfDnbXGN7Y?Jw@*02 zXM-!#a0MEU@Y&$@)o|>qh>!5u;I@G481&}DG7ya5{0nRA5q>Z76?8z?0QXjq3M-yM z99;akkE@7OnaNPVkY3EHI8PRnKZyo%3y}OGPs2mUQvi&nP2j3{$CU9=2UoqRw&c@) zVG;5zrBL`EO+@k|WiMNef8$83Vbn{Qv0%eUBYsuju@vX59Tj#YzhQf>)p!?oZ-Jq5 zm4$ze1=hpT=34)Pj#MTRtY209Iac6CR3JG1y$3LnC8h#6P+7JTtC9s$fhtu2ZV;+S zROI8`?FxH*J}!E;8r!g?m8ZDmF#m|PcsL14Eq!G}H%*rrae9|OISsB#@DxVdcd(E- zKtE_bocqC@(J^e+2ctWfAu0Ko9JN;gZ!@(EuO#zPFOWNVto^u@>~|M$9snHb;UO$S zTaC@K$K^*WzQWppbMo3ZhvbZS&Yg1>93pUAH2xl`6|tuT1uAfm5;14YxOO!b=9haA z>vvr0PtsEk}<9ng~`q3qGkC}0YX?f7!{Y^c2*rnq4x9&Q@->6aqoSMXW-JWh6E zC~-Ba&`_R_+qBg^&rU-PL^yWDM?BeR#`~h$W9(LPUG)LE9!I_PyH(s#>onA%^;J68 zb>QN#rze7sK;jL?z+ECIcROX8=`Ez-va6qgKg^6g4j9xyzW4&8-_G6+l?efH`Ah^JRi|M z%x8~wcR?@KZdPp{yAM3QcpbS#FA|HTs>(6pJ~5U)LbYs~-u! zEG~#D0CHAYOf4X2Nm%PUC_X@xco3ywhzB)9sRvOWhG^0dW0oG_mk2@E^bf^dg= z7m{{V7qCMeoKFJOblJJCbp*_zbT2}!x+=^kb}F*@4P*`rx8heKjw0f%FviFPm~bvD z;!InF>&3`afbom64Oe-TAe*!ZCljZfFNP!xv6#5hIx?PSVNdom%C2El!zPjLnHHrB z$D(v)fRVnehGPf2XSs}P*rnKVkUFeje?gb0tJ-%xX7i{=mU_IUwCtOWQjdps&sLA= zNJ|?&`POq!3;-$+rP>R6*={5oDsXF)=46AFuFk|a^zio*I&hbk;{XQ_ zog|s=)YrcsGTFJg8i#Js>Vtdqc-e0>aej5gIjc0{+yvY95+_0au8a4P&fQW8B&WyU z!4cM}XO&ou>+;C{GmtycA>&v+`6svadrj0ybn65a<6W3!#m|;q_BWL*=MXDmKS?;9 z3v-?+hsvf2f8&i6|0~ti`ui!ECFKKP>U$W>vg!MS*8$v&kNY5iFM{zLPGHb2VfGXr z0CFlZR)dY0uWf{%cQ;{$!?7mvmR35yD1l=lxUYD_4=9TFDicw#BW?fiI>whp!Hy>e zvV`%-o```OX0Np8FzCI0h?G{HqNB`K-Sf%r#Em6SXoe8b&xiXqiq-mYM!o)smJ7yk zGdUQued@cAWrR?0uAm0oW9n^_!^YBE_u&5acqgV4R{Ob;*mm6cFdkxzZO5e#trEbk z4^Jb2VO4BK%?*t&AA45N zvG5RgMPjV7Jm)LJ^Dql_o>U{r;K2NF;!NBpN%6O)oC7CxE+mxO z@~M=25z0-BX0i5ps??D}>Vid7k1}~m6c>PkQ&Hz>F0=8C);l=ni>d`V+G7OO(iAbT z@5Owqd(smLysO1tCS)IK34!Je0f!C#xsus? zrj+xju>C3p1g2P&Qf1j{tMP8$KZHU5$cR`eIQ@7EOC8d{dpLLqX9WlSU%4WhtI@^c zY}NF^*l2^ZGC3pN9UR}KP1Xc~-+srF3DR)bc%q4GAn%u~#)<6X8%DJ%q{CzY6U}$7 z!DQ{4d_LvWC|4?m=FYNb-j-QRi0;NDb^_4rf6nRLq!A;}AvdoR@SDbHHc9PnOvp4M zFxV|&vMqDFPyz8nC8ug^2^mlY#s!0sjEA@3w`&?weYeS6=zTMWJxjRNYmwE;_F8KRha3=g->=5;TFM*JE^jIr)Pj#cv z$t~vhMWNrPqWPq!|2gz~CAVd@ls<3W-o{GcS)xU(#D7j9Pjec^QO|f+-I=>r_l8x8f+{;G!^6sG`rfz~FD0}S9 zL88E-(yqq|@X0MWiJy1>HG_ef*DSb0PX7X0w<`5d*>yWcm6+C!8?V4~PE@Tgq3hgl z;C11R590Y1CcGsH@Dvv0jWKv%y>|1C4Ud3tMjqD#lPZ7Ay?*%%rf%Mmng$Vi_6I(X zy(*Fw;1syx%{wzLTG<14q2#$GKgEq0_DcZASPkuqYrFn1VRwuXqY2E(c>*{%!l26G z!W0hT@h}C3&t5Lvh{8zR;_qjM3oDq}LXzYvO#O8uSCDQ&eaG!mePbtc(+Hk0A1Cz< z3=`T=Cv1JpR^6wOgp+6GQr&H+E`|#H;hULxe%_4QUN58~Q(s@$X!Ad3YN_!@I50(19Cw%q5^L+4(Dt^p?whi#Y z)PA_-QrjfM_e>97Un3k#xG;kjomzH znqb8_64&qILizc$yx56vmZ)#Tjh<@rj z(1R~b-4C^;ak1w4Mx%BLjK(wt2S!aWKOSr_RIWfj|dOp(NExl7v%n>1ufpO}bZ|d7MC~D2-zj zx*vBTEEJ98Yxs6&jKIfqekA#GCO?+(qn;oC;>T6|_<#8D`!^0Z)csSRpf3?iuE#H_ zI8@KOTyB*lqL0$uaH<+jX{qiT_e5Tq1^Z8||xH8v-$LVP~h;g;) z%_=ZvxD{#91U8M`2rDbQ=Y0drTg|~-jvgzqnan$iAROK?9!5;q#hdB?p@*4X?-D;i z@M7QDP0;1wplUnntc3==JI*ciq_)7t%mhv^p@}|+ewv}&$oEG;`t2_~5Z#CoNi?w! zn%_+zEm(yYIZ?0|(NMwv-0v)MJ=&hHdNT3|)ZG_fmy+%q;s<2RcgkXsXH){OB&BBYIgi80@~H!gUPiBN6cu{ub;+ zxQ6jPDola6AE~gw%vX5@=5`epn3xJvIov5KOc#f{2zk|B>F*e;_DU~83K+?+)Z|wI zHB*(q#^C-`22$QWci?A1D~9#UK?fCH(fE2nAA$vmzG*lIr@?$j zjd}tI+{f_&8Mu$}$D{ms03YB?Im?)*!ZC(#Q(@u2Ed+c5NB~&N#48z5%a2L?xRf8i z;Ku}fu&1b$@Ad8&{muRYnX8$(gn-BJ<4ArSiVsgT=PNKNG^)bFi2p$=++KVDP!c~= z;X2|g0-$>!KKMM_5BXzn{bLZmkwa)(s&WYbHY&bKZ5}{8Yy2kS+_?n8cI6lOV>Le> zWEE#0jSm|iWZg9ik<{y9g0$j;nW#VR9DdB^hb_@-+!_2aoj>%Z{W?bc0Uwq2d+sEB zb3gqt@9qId!*1M9kIlU`aD(W3z+`hW?gN{}O~?E$(%;-H%-`kuo11*GJm_t!e;cEE zR(AkPTZZOCBEKT9QPs~C&xMumtFX{BSHr!i;zZUf5O$wZ5i#<+jS+w0$0PWtwBK_V z;v3WnZ_fn^gavXO!fpjWM)Knnew@IM;lm80z)OIH1yg_c?cm#O=ErDBS?^90$fSEBi#x3tAHQRABQ@(X zhK@!Y&AK#ZYtC2`0)pcNxGBx<5K zcTs<-zfU?>{T`*ir^)xKRL*&fUzNH^zMoC~Lf}`Wx^#R@!~bZ4bMU5lIHTa4yzU9ycFiH>L<~F+zL(|WmBo|MN%P8Rbv~ieX)FI3 z%a~XIvq0}C&nX}{5W(+rMnS{!jDijxWP&s)I(C=|@{EFx9<2o>9=j zktRsfq+`dJAkQf1Se{YPL7Fce&a z@E{W`GQnXcSZspFnqY|uo@9chCOFar%S~{M2}Vq?(gdqbaDoX=G{K8au*L*0H^EvH zyut+QOmK<>b9nlv9>L9ZAi6Wq%L3ruhy6D%~rgG{i<1c#Ymu?Ze)f+Z$+k_ncY;7AiJH^DI`7%{<0 z6RbAD2_`tv1TQwh8WX(S1Zz$33KOg|!6_zKZ-UcJFlK^{CfIC(x0_Epjj^G)!6 z6KplXhfHvh2|i|mZ7PUq^sL8CaES>%Wr7_hxXJ`qo8Sv3*kyvRnBY1SeA@)OP4FWV zbWQLx6YN0{hxlj|zBIADCiYtso73RK4nz`;m!tP3&hTw#USNX<~a#?6)R1r_tyyf}Z}G z*u6|_fr;J6#1@*^gG_9Zi5+HQi%smYCbq=Ho@8Q6P3%Y$TW(^@X8sY+{c!u_Y$< zBokX|Vn>?TauYko#A1Ud6zK#|&(nfn8!?pE9r=26mN!eZjzX8Q510>^cM6 zZD2n#u&#mq%)owWV0#VhUQIsN3lIeVIS|-Kg3`GkYhp`GFoK|NdX*-&T7r_TRsclC zR~Udg6KplXhfJ{B1V1vt(pfuh#Z~cJP_vOHSZ;!2bkKeE0xsz_b^V853p?<|4I@$9 zYKUc0xJ*7?w5L8V(5s)_)~5&D`;e2D@&mRUu=0WXxNAkxR)ehm!$qiVH>ksspC-2-B0oM_ zh^%7`oP~9YNMgf-OWBBQBYa}Hf&jCzw3_W0|2N;#Ikm2H_Yq9@*M~}^wB@xdE!o7< zz$X9(_ZsF&8BHt$Pz09EnNB_K`0kr44Jfj+Txv(e5!~@NCX9K?Y4o@47je!kz;5g* zxLq7)YtweK9w`2}8?Yk!y1cXg`48aIB!~Huyt6!psu&F_cp;7F{j1W{;lkw>mBz3J&b&6(z**R)h$OZ?RLPPWFINzKtJ0shXH})=j$&!c7qPTt6HCjcN{=5Q zTwxi2BJi%L(r^^1Rf&kRs?we#`&XsacykKx9{8&CK!H&uScWYR!D6ZRy1=ZGkSak* zI>d%w;CggN5DK6!f}OSK3WTS3wdp*z_W zRHSkZd^o+j<(UPYXbLJbor~OjwWe8-+p%C)Apiww1 zfCXr{`s?88cV}JCD*mkv1#S45+OH^vGVSEm^M_D7eh%$O;vs_a2p`do?F|jVL*vo1 zx2Iemv7f~3b;H|$m5OM?#z~KPkbKp}owi59UPuOCtl}I5t6X!IQyo3BWx2cPb zI0r@b30}1OiEqIa#|FeOUIA#G0*z_Vq`Id8;!4gONDv9-`=P!3P=O!XM~6CR7BYmh zO|cvVOc~WE5!ETzWq2p7w1lGeLIU`#{@`$$r>1T|NA20HE%r1p@6rLN4Dj>_cc7$L z6GgPAb8nr649>hB=5uD}bV@z~U;|$S*ZA3UT`!~UbR(!UM({K$WP{6pm^Dww@-85t z80=1fHlR9f;M0ibcp5og4>0D?hjk5|b&AeXKV1$VK^Z8TsNE##o@#JYFpw2#V4wVM zQAI~oBQZmuW~`ucsaLU!1wO4kECM{FS}%N3+0@2V=*tY|^S4gDLrA-ckRYf4)G3Z8 z7E6uo_&Se2iDjhGW9-bRFt!L8z}OPSSYZNg(m=^jLNK?>%WONVvoV+12O8!Q%>Z}X zUc%fSFc-%+aIO<}Kr;lG=cA@jme!y)wJ0u^XgKdsvKk3NnV9Z#^P?Y&GL@@JGY>j5 z;ss-JJca7;s*bHKX>{Kf5xT397j)Ms6jD8*yH(M>#LH|uZQ1B%_B{>VL^Hs}-BXl7 z$-8oz6{mSY_(YE&eQ4F_Rhy!?&;ZCuUMLo|2P6wXx_xQyM%Cifg@dmFVVaT}(vRpwj zgNE$YmGp7Gpdee(!#}+mKtRk~8a$@Kb){$}Wo+Ti3wzzy^VyZIgbHL-_cuU|jesl? zlS5WF;>=_jfoa8wQh|kq*t7~W4i#kac$3(@^6)R9S#uR_Y3A^+2Z*MUD3r(2jWld14v!+b_aWe^;R0Tk=-nfy=KbVfLvRQF?_}e|dpo*-XzIV&egcf< z#T=eGX`&D#8kcSZ%E^huayARJAWe8lq6Jh_CTp7%}(&B ztEr|~CY0@U5D8;p)d_D?O&V@i%4A-W5mOhg`07HbIQuTI;yGC=o~J4Ixazs8>TPLN?~ryC)ayHs6rM{G zs(yf~`a0mR5T;iM1wqD@dUe;m7^>(MkID-@hVR`ORdq9BV&Td!R0>m@iF z!vncpu)cnA_SZ6;IS@8`Yt$P#wScjN!&qvpbO}JL`rN`!)tJ}82 z9AfUV1oWaiAld8=!ZWHD00pjeC@2E!PJi~YcdoQJw| zL~?VLEYd7}D?ET8Rq*9LgRVg&kWTHj^by(%SqGeN3faA1S;(Gbi8zSYf%BKoOnp22 z9>Vk=5J3}bubIux#P2}@WE}dVemTE4lA{Uf=P=siE0Onc$G6jT_sc+T$7!w9O{mTB z0)$g{W0SkSS6fDA&XUzKM3TPAToNj`tacQzY#C-jmwQm%5o>WYrkOePz(HFEoO9GOH_<+TnT|Tr6=miUkEdXgdHVApoub zF`g07imI#NOZ+gEuNU=%;8|Uff|*(QkU0u6=RJXB1`X-8Lm%=3h?8~UNF^UYK+Iek z+_R{2;6Gfqk%(;i}$4Em6%rYAc3?V{aYhjd(SqhPah= zt+cH`mG0U*7sqb@DpVmOJH8%On|3Y~FIs(Ll`75W?KCPMO7yHic&u)9T#)RfGr zD~dSv`R?V@Kt&?6Iqr|!d!XRb2%V8{hTS%UkX&Vd9I^3QLHcpQEqgMX;w-Ot`Vr4| zBz!yT%l9ylgkcI&zXrRs?)I^(pDy|_qf0m%#SWGE2yV;R32BGs31 zsQ;K;@i)_eC0DE}elZt-IB*OHbZtxvJp0(rpm0qI->RjJ+sr z5{v8ut#|^+(EjS66_~|L!B?mm+P!F1Oco`UCYW)~QI0D$v+nSisCGz9nB<*~cE~19 zCMfaZLMU<4`8+*e<=cgJI88zBLOXP+23T*(-VQ^Cft3!;7#^ljGuz<;pbfXfbAs*g zYAQV*1F>g|lB9P8HI%*A49QVx1Gs1cuPTXEi%2_^$L_|;0~&bDtV7?f$cHn=Hm1+zn_PH6_{uilIU*mwrAN&s$!Qma0?w+_~Jxa(iwMcz}*;Wf= z(}R;{)KIiJ?fUYDXLP(HHYvP7=B9f-7Sy6Z4(l)qU><+Q zHbr?p%85X`Vbw{#NMepSKD^fu=`{wiIY|~cPvEh-XD?7K%Nr$m*R@&kV*!umU(X7n z@#2fz2{*jG1#44GK@nK-&>t%MP4qKyOe`!wZ}l|pMPj$cTnapWw+YB%3U8^DYR_h=@JzFNa7Unxk|exJCHzer%w5c! zTxhXpldMX6Be+on?wJ*v0{nvRQI&Wl4jk=VFAWt#kSYPKOb z7Sm<;%zK^A1I&O~b3$xWPq!T_>O76AxInM;9(+GKgCO?G~jwM{Mp zO4BAqL!5Jp0*XpM3B0t@CZ$w+HcR!|~y8r(B&vK+tACaHP- zw@Ct~wMk+b;En)MoEYs&n|zZq(8O{fv9C6{TwvOTJhaINHv4T-+z#xE*djp-M_P3B zqB&7(5!BtM-yAXjRt*%hX{n#@5Ylc4w7VA7?&#Pcri^x{-FU(E9<4w6hB=hSz6O&; zsZYp*__cTt1p@1xdfDMU0JFxZKn>Y%`-5IRTyys*QT;Dc={+YLwC7O)$Vzqd$-ucm2KH=q zQg+;Pa`h=Iy=(*pDL^UORC+Sg!*L@t!i2rDzt~sWB*hvO8mmpMp*%8$HsN>?S}Rhy z96+GNGO6ya2#wj&gvRWH-Iu@kzCwFIyba5RSz2i2S%lUgVC|9wp_OeyIW!b#%hLN{ zp;I|$VD}KE{@! zL^K%}e~YA=VvwVeRGX3%UDW-XLz#_gL4m#1A`;(<`NMwCGOBd(U-{PM$D{`{4PmHDOs*Wt8sur+zNrI@}-mu=k8Fs^N zxWkyMzG@h*?x01BsSk=vw>{Rz8!UoXU2Jq?YWEo3+A{Dv?6t zZ99{cy1Ldhua=OY{51)&h}bQGbPOpJSWsNrIa)3J?nhiikuI+0qeNUa$VWebWM3u~ z-6lnO<*@5S zY86`95;D>l#b7HFn+DO{6h>bx_tF-zJm1ZExJo}2m}V!M-`LiA%~l-G}C5!>zFRG9}g3ebs!%^)+O1ONu75lGG<8=8M6*>|Fr&lh)lcSg;82$ z9a%*7ig@bVB?&EUuaAAhNTG4);SW1{$-2H;n0Ug~NW>q26yWYTBeEVPviUm`nYz{5 zSje7`#MCms>B&@jP+)^d6(%0S@qPqGlF|is6lsZtk*ayl}9zF=3MySk4D5IXERN6|i9_2P^qD11`pi1O?SW`^S^9V7i}Xv#Yp-L>BK_+Gti7B`kp3m_`^{C~ zj=w?8@w8hWWYhtaGrlYhSM6lbz7BAyjpA1mJ=1JU=(5ATe!AJhO==WCc}mR!0k@c| z#eskq6uXY8QFNvG&dh)APT7Z?nM(E#O48hXk^O8bKi!qsf@ID^l`E0{&#)`;CPLWr z6hx*kvPK~&@7ou7mV$@?0(b;4bS2<%0F@4)1z`Z-F_o@|Ffi|-OvnxG zx$xEcqd_FCDW;$ZOsKRulMlI4AQx7rc`KPqfv+>d#si2=H6d?hA(e)UalsXYX+Wth z2Uie)4J!yiZ)J^uD0^CjvfyiGsqj{EwCZkx!jq(g{A4Bk`kug?bV6VS0XdYnvIg8U z-pXqHg0-ch_3y1DU|M4%mH}=mk zPTP%K{#!3<0H_{(Dc%N~-X(xSL(Tb6pa`s`=J`r|shukL9;suy_cp{JmnSe^3)${t zp>|(H9peDZXIZLD`kx{51t0Yee{88rr0(iGJw_+FwLG2Pp*3+lx&wf?+PQ0UKNvlU16sL*5mQG8H8fUqPx9SwT18~ zj#9(vp`BTpz;@0YU*DgH`m$nFa_RO%ivDGeD0-<<;SygbgNptdpiskd1x4V#6#a3* z_ee{cqBB=kMQ;r$`YCT_RrD)?(sXdFV#cnoa}|hE^m!~Wpm(P;*;90KgQ-f<+h}>z zh7&nLJErs{DmwR*w976}KHFK*_=Oc81SDBDer*D6SycT4L@QOFM|LSy?}lomt9lnz z1sgwhs_FzxQ+49Wbt6#4T~_srSRXiWSeL$3{WO7T7xqEbm%J8Kb(yU!kHI6HrnhN! zU#mF{A~v+Z0E<%;mP-#xc@$9j``rcX3PSemh3|ma$@}!zd{=FGK2WHa)dU-m_G14_ z#O?%&2>9g=K5#Qy+wuZgEtFZL*@lS+-ERXn7?&cT!F%TO2&OGZ0IU{Zz%mrc70vSq z&1DRpyUBJM{0s$ss7A_M0**C#fb(5r@miFwzBQjakqQ-fF~OVXF&Ci$>f7*dZ~`7Q zZ~c|fEDxc}YjRD@1DYdJ0%E-9-wHRNC-tU^ML00KFy`1`LO+k=C(d)ejTUf zL316;gPi*UTdctTp=c&_e>C5&ps&$LnX5mVf33jl3En)9xxzG$1q5i`yGG<3BZQ)v zc|f!4(_BKDk1rIObH4B~1?2oK54*#~%<@2UUtklI`sD%(szvDjXg)?k|4}1luKsAw zRp6}zZ=T0oVVXZuaz5ZPp?MJ@6wS;7noFew>UJ?{{@H#)b9Emy->3AdgXMwdzQAr& zVB;0dgzk^#Y6ZPQBW14sXg*SbuO@i&Jmw11yaym4=OZr_n!5<0Xl5SJ-0ag_M4HF# zD>S$EL34-FE0^Vg=DxrdDzHB*nhD(>%~KV0N+V^i{%HQW0`DPs^E~DX(|kN2K=TQg z2+h5OP&6|SXkO>jTu7SF{gKez(+ABTE8Q;mPSM;K*fR?33q>=b`=j|@1wB+FWv>2c zu2bNJ1aF?lTw$8e0|aP3?P8(1h!Bcq<^j#caDl77s(>_Kx{uIYF0q+?)nHX#Da(VL z`vP-un**AC1+bu6gzk^#rxkRWM#^0M(L7gymlM2s9&?3ht_K8YKI@l4bA%9zX66CS z^*+t{r1|$h6q;N5p!p}Nyc(7Vn)?D9qQLG{G!wc%nm_tlIP@!xl)3t&d8q=gC3y2Z z<_goi01%-0f{TRaIzlL#nFlnl_G!)`&DZ}xXzuQV<_W62W|jw<`vN;bfxWM2CUk!^ z@1dY=jg-0iqxrpFVNnago98iCnC6v$0L{Pth0r{o5Q=8z0nLT*HY(ljeFiko94a)I zNNlEV->AxKV|k#tFR%+0*a%=jwFuoG&BrU~e`uu4)gR3R75EZ@H_v0PFwN_?3C&kc z6q-8-p=f3v&|K@&+(VjY7YNO9)1!1D>-Jde4;G>-rTXl}YdXf7axqM3O>bH2FsI40>P z&5!IYG#5#1rf$1m3C+bU4|47ctW$v<3M{A=q5GqGfr1{ckuq0*G+(E{O9-C65t=*tpm~TYubSn7=Dxr-d?~D1p=c&_e>AUD z(Bm~y=IW2;`2yZFk>Jhqm~B z^+ET^s*3e26Lj|l_Cp1BjiQ^-{n6d^g>dOcjg-0iqx%s7Z)*FG(9K++yE@=U;xSaN z%S9Yqy5G=&!}0#UEzKA#c9a0XieCb3T>Q`(%u)?r`z^HMXCb*V@i37^6Bpyv=i%!j zqi)6I-yXw}ihJV7jGzJfcIJ>YQs_(_I-_=Vl?nxi^ z07f29mt&5B+3|mSP(xk4#6&*Z#F_^{o-!bniCVlfkoSM*4#7L|H3c}Wk9P?ycmeRd zY$@_)_+;^NgomD&(tv8!t+Dcz!#BE1o)!N`MwweTNj@%}_{UX7qv-j<3w2yGP|257 z29XOMI>=U+r2&vafK;;%AARL)d&~=E9 zBzSE??=`$O;rCEa-1)Twm(nO~Q~!x`OwTd{5`2R(Q)ecofh>RV1@#v>rW)@6dj||V;m++4@8rha zut^avLO6wfs>9Dp3Y1@TGaS4VF(M2c#<<}V0gfqP<}tw4 zVc@?IIN3yCfx^M>#xoB@lXQlaM^}8r>(t!cML}i3HBivq$cw|3#2L+9h1GOcEC6u} zAgb&~h<&m;&lb{&s#kyl2TZ*)+3F@5e+R&6D!CvMe45TrKb+~waz1Fq^W25Fej|74 zeGrCXN}303{YI%PzS-`wFp~g%GMVCeayH$W8Qh>2U;T`#CxSFq0PUg<+VKuQVQ~=>5L3)A zWz$A%)jI+7QWv5H2)xT&BXja@tAgE79ih}9A-GgkbHhH_IOJIAOu~68C>&N{(hmGz zizKXe^pc8WMx%M~sq+M#3< zfdz^UmIr^g;ys;6*$QjsxiQwxPtSJ^O~4xtjC1b%RvE72idk%JCfBxIr2F^4$YT z+~UDj2^)?t>Qh+VH?0wMk0~eu8{Mo8>*n(WhVk~GsGDc(jv6fYbQ4Xul)1_}t(nyi zBL(_dO2Mc)`jy+~WvL2nIFJRJRvwZ-zZ9Nuhz(Gz7@;D$t9|7wTPy0x+GvwO6YJSk zf4ux-fB_$sf|mGod?jY9QnTyn*Qfxjuc%2vEvUo#A|KV(=xa|O`r0Yk+bN5v{VG(J zfYIS!WvJ|@?n_UJ%0eb*IRl3WxNtkLa1j~AY4W+Zqp1hVA@_%Hp!9eKs$Uz6jt_BXxxzmk6cT%EFObgmV*0z(ckn$*;V?oeAIwVj#yW9 zfFr%Ek=+qVT%Vr~%jDN4TUTJw{w7l-q(ON06C4w#6L>M8(0k>iM4Q-q>C!QP#Y-Koo z!;0cXv8a6=td=x7+0R;G4VOkU*_L<3Ms#$}(#O`8m$35c8B;9@Z9%jod@BBgB6O8t zN~jrDd=BCw_Dd*A(4_O`W+Zw1E=3t=K-3e>LZ-Z-v4P{ySn=}(RTpe6p#$3F>JwllQK@>ByHfgwt4S>GY_z^R!#tYo6Hg{&FcUe1L5(vd!RY`C6S6bKizJf9w z&g3revvQir9vc;>eKtNUc$TvwoQ51O1MRdypd;z+LM7%Pr!P?p+iF{Kg)L_wwdsP{ z5-Iw)f1z+wo4kc>r73CnD-?dA1Nh(jH}<COY{}7r{n|{w00N(BTnArG2tFOOYfBRW#!w)*pwr1YD#si`h>rrvw< zzUsT(XtBx&(U!lf?5`ES=L67*U2-}CNEa*?q>aRd?!H?YyG$>{lkbs36NbSOcBMH% zeuk%rp+q!*5&w#ssB_XlZ|X<8`5o`^`T{Yck{kwXy&u>xuzwZVIO^vZPz~qVYcW>c zLYw*!J9Z`?@6iWqBUu8&OEAwH>8su)r^4sM*4N40Y1qIhlYL&71mwCCLQw^wUU{ju z+wI7wsTFg>2^Bz1#*$FRb)~c(<_OIhDimMJ!S5hXlbD{^;&PsmExfg7+9mL4P=e6HX5Q%EvNH zRTp*#px6{`%ca6m^a)01zJa}>&P0ayR+hQe%zHSU3hmPQKWVfO(#q)Y?Nw!;S`EL# zGH+FU?UX|*?X~W(e^Icw+jd&7s;;UPfh%)gi4kL|f=1uJy35If}Uc$bS{cJ2fk`8EleD;CGNH#tD@*>;40sJXwnd+!+d z@!;n%_wVJrL{@q?%c%SJCLpWd&L-jqW)DkHdD>lh`tg@wOrs6^-Z$DF)+Rs!Eu+BkH*;{gPX%-aNLdBxEfW2{$)G*s5&$=TQ6Cdn` zYW#W--Z`JLnAaPXgBjyuRnFvAuz=lQi|!7i&Qw?Ke$_7I_{ZO&Gm*WBfhp)nHNPxE z<2xTU&zyi>CbP<$bDPgAlBU_mhOQG8oYLX09IATS_m5EoFcvZ$m-R3fM# zUg7_U(X*x z6=8TR5d+zPsnT3&~ zZBralB|-{k;bMx>384bRw>+ijnBO^qpoiUNz6$ezaKi9_iR{v4o^C!ykkbyl&5~m7 z-12#bPL1NuYaj!A1coNdgU6Pfel=mz3Cnqz8drly+Q27U#EnqVp zt<7RYN$vG<0*Xt{9TV}^JY=#=LW6s!pa?+ODd;|doff)dKzM_VSnp~+^ z{$=^>fL)k@D}r8NBoELmJj6oepI zjii<_x-4mhWr-U4H?Uz0p-BXa)&Zg=iHL3M?z0t>man#uw)=bp1TKu-q`S}GQzwr7 zwPssl>SOhv<858rF^X!DOrn)$T`IO@DH@EpEw;-9J1!-U=0l zFDZ<~6VGl2t<-t|^QtVyPCFZigD{(NswxY>+9S)+FbpU;&p&uRjX|c2wZyeG!*!`0 z%ITSOCr$+4eVuwdyG*{_p*MG`U^EP6JXC}U!bWH6T*Q9p! zdoUADHDQP+pf+QY$8ED^o{5bgds6|%*m)YGp59{!Fz?ke&yoGhaHQ|1w4K6Maa!hC znTZ7(Tw5h8pa-iI^k<2kc7-ZtP(P{rZ+JU5ar$?u^(NjU%fdtzJQ#f@do{ z7x0`Q_@=oce;(kuLGZ;24+EYT1fQnx{(y&r;AILg0=$0^yh!22fENY9a}+)p@ZunN zrou}A9~=Z9s_icYyd((TmvDOzEq+d^pt$4mp@hr(qGHE5SO*rJ10zYjFPaU%;j%sT zWQz3`_W?#JZ^>!FTb@TtKs#4`fGkwr^iVQXG4m9Y6)dKNEcY_)bYkM88Wu&p<>}{= z-jWu@T2Y(P38VvWp$tLZ(#TjR-a^72Z+VrVmc3=Hf?M>KVuiQpEx8JB(Oa?<-lDgd zIU-(*-qN7(7QJPX!dvu~5{0+uEn$VX=q=d_Z_!&e%@+Av^p?d6Z_!((DZE8*DN}fh z-cm%k-co}@rF+XUfFp>~B(YY*m{NkAJ=cv-$9xRCiq-#uWPTavqFA5GiVj1IoPJ{O z_}dk-;=t5+QZ?&lIETgS>zt8IcziI58fx&uIJ)2AGV_d_4*>*K>dUdzCZT`r<=EaJ z5%6;Cv81f0P!~qLFUan!pgfAK@5xeU&MSi+0Y9Sg;HDIIw8JqxsiUhFXM*>F*d88g z3fL_gB{2CA9&Vu3aFln4!HX5l

WC@`KfCmkkQ7YruE!qEk6bFv}0-w!ilgkw4h` zk)80Un0FsyImO1EI{cg$qY{j6cyvogG=W55aEGFeNsE%Vi1QpX?e z13>Cq|+WP>IrfsTTFPe+Yr#OOy*1pEP-A zYB*M@Hz*d7i3Lwh^P>wCv)JWFtYy)CjtzlJe7^Js6`lUF`3A0k9bXdl`Cm4l3lww$ zZwP{+$Dp2O?W8XyyXdd^(m;?1@TJp*=}NyJ{f!LOTp$niMR}}I06+)xNVtmY$(EgI zCct={FwmJM2&_eCx>3QLI|Mn?nPePEIa3Z~HRBFQccwiBG2NMd;z|?;F|ds-Ia9sh z)Jnhb{^w)jOlq0-@H`&kTYr+*d(@?JyoH1OC+Ibkv-Ks*O*D`uI?l~}7qSE};4(%{Df zT`3b}Q?4Z1^Sja)?<8G`3_XTg3y|YV$*%cpu5<%P1h~=#!Zgj5P9#Iem8dVTD|Lop zdnEkrZFMDCJC^W)^h`fWTv23XiIsK$ifnzk$Dm0>QNMV*6;$4BNphsQeLwUZZqTKT zc*RnsHv!)9=1gH%FRE_IMb4d0scS{waFKT>dR&CdnEHWUM^NwR?=93v7W5>w)Nqr- zUQD`(dzlmDBBZzt7uf?FlDnG!R44DFR2HkBP`VxSll`y}f*~gtLcvcc8%^5pCk6|7 z4rq!+WNY|oezICIr?~utwJdu2kW=IHle|fRelnUn3C@rsD)jrwK%hW{2=Qd-vBI<7 za`{QJlm42Y{8k(2Co514W%-Ak1{A0Kg!@Q z<1zVcPu-@6k4-tpuH0@E4pZk#;e;Y8xV zPi}t5>nF)$gAj+Spmcc5BUblzl8Wwv`PX>WgF-3`&f`8L$hXn#SuV40v9>3VZTLxh z_{kn-Y!%0pa@wOvi{-=VmY2w*e=+!^ z`Pt`yZO6~Xy&x!FKT8{uE&~=m!KeEJ1<}be8qH`2RmJX3lc+0@aaX3;Z%nPlrL}un z^0C)3#>2;?kKkjOYg+Z8F^pO!4+c+^l)9DS1Cp@BRnyEmFQzdN`KO_m$_V?=U-FnZe}cuRMf%0QWG`z-Wg(W4bfBEQtE0jI1WODZDSB>he0UQw0S`M3Z$xZ z{GkBBKIz|_uf4JfVsibDgu{eCuGSV2ej1KlLbUqq%k+ca-mDVh%e8w153Rq(?9N10_+S;J?lJru6*NR>c?>mfzj&&x9ZqE^I;plTcx*ij&#k9p# z^tXz2l_aNAkvHhkb}3>%5(m@&6!o38hil}p=1lV2FlSw-pESl=|)>B~r2&BJfk-4&$f zEAuJpENDKp^=TnOgLbAgNrSFIWJY|gD??(Iw-K{u*ZBMK4bb4SnoVzMD)UPnacV_9 zFzwST#I$K3vQm)qihuFivuB>qgNnIaw(9kKUpjS&v1_73dub-Y7J@Zl+pz8sg7xm^ zqVnYU3~DuM-;~nYcMV4h#{rWkw>nl^I7O_s6f$#tjRcBhcuo3+gSil!9B}9|#bgCy z`oqKH+zN?J&U4u|Yba5lZTI)t_PbPgx^0`MlJQzJ>$mIK06V5Xj?6r^T}^N@UW>B- za=d;UZ3?jMXHX2rRFvPg?^ck@wyCcm+wP;7!M5E1wZXPA&B!oYaZZoz7Qbzu1R7il zv*|%0dpD?n%V5G`*Vl~_yY?Ce--SvBs`+}~4s7)E71Srzx3V3iv6$F-a7T}(pO)R* zdbt;>4|qDZb3Z^hKtpCN(L-_mgpNG;F4gyit=5_im6T|vgj9Yj;(N17OH{YtpBp>r zXc$!uPKq~iLUhcEi<~qqSbn1%BR>cI#OarP#xr*gj*-P(b{vR28(*SDknGmLLqQLd za5-J2SYf{Tj3jCs;5@MK5>TR976G74M0vko zpr$=%nr-yL=;uMnGW;0PG|N?BXqDuxAnbbW^qs3ZZn zEfJMWX**6*gC2F=3&CPfa~CM%VMyIyn4i}UEb;L1%#yimGG29emTBg^$v;8Qc+lN7 zv)Cl}J(d+l+k1+a$mURO4W#H7t8740h*zr12jk(UaO^HTd%rnUK76dWp{L$3+!E-A zSAJ0;9O)L0=Hs=S_dS#6bAbU8c(0~fvG5G9>*dqX<0N`M9@>|sqI27Q-D5QPYDxld z7G8{jG!UR)q?$tFX%7(+O?S8MLxlgr$YQ)f1C^2;EsGfGFJZhOs4Mg5w-yA)4o1ib zBK=y}bKS5L{GNKXz$WntP4Q1M+#>ZC_{qxyhWd&Z(mKFAl z59{QAcj+sEOOr7HY-O$kH_(7X<@`YNOde6;qMfpIc-DEu!t#3?p7jE@E!4B7Acyya z)s!$OTxzdF}XLAdD)M1Yj8C!ch4o5TGBv*ezw1 z1o=De4%8})EPzwMAjytOVnfpn@?=4ULE?;Uw}NgI#+B4R0!jvGgz@lmcmGOuA3>`X zI2TO%Hp1xntxWDc49#-w=^o5ywqurCWe=oYY^-M^-kkBd+fso`!^JE=x;tr>H57BZ zX4y!rZJXtlV4-GtA#yNFnbhDVuJw&#mL(tovs~$z<@fEjZx zasrBBmTVr(vVro`EJOE-O{rN9Kxp=x<(3IzmUXNbY?hw_1C}jul4SL@ddDnl2o5mI zDwZQo3Sn!OXF~v(r3i^Q36S3`pT&H{F-r>7vRSr~U7K{Xe2#t+t)%{8mSso+&9YL^ zY6b2!%ct-9U(K=tB4M(`_JxDYl6zGUjSUyG?4g`wD(9YdNVIgn*we&Lw=X)%TDxu; z6jK;0TTHL@Js36}$g-VPMI@e_;Os@cNIIuhu~w-3hY=)NLnLfpEl%xgF`L@gYHnd) z>yJlRy^ge!TU$i66=iE{7lfhY`%XR~cc&!V+uzy;))0=>7&aqz`mR@Ei|gC`ZEbO7 zmM;ude9ypr4T9|}HJFv+!G4&*K0MwDyIh#%a-wL%4JMvbWM@EL#fKIuvxwKiiixmo zteE79%R4waVBw`pC33Y(rR0*{4nY3F`!KCn2Z0ffDL2}To^^JjoN2%h*to)|*PxiJ zKurHqsUBDugo|BECDu@)zNJ!$Z>jW9DT8I&XNax=NwDD#CMcynX?YsJcAn4MDnxM) z7VQa6E|o;ffc~{@fVtQm3Ro(=jAAh6eA~ZNs#K6`sYHDREtTYm9(AzvZ2+}Dv2^q4 z21ID>7Jnb^B)eCtouH-Cl>)C7)nKV~%5a7mdEzl&zLm*ujMSe*1B7bqI>~C(RRXTC z>}IQ9@2$bqMu7Xp6xd_VHq4r^?V0t#{}Qv#SR@Y;Q$}mn$(4KyX8oUMrQRk;3$v!s zYJtuJ$c|Z01t!coqe(DXA;_#th2G>cmszt$pjlV>%=#VbJKd}g0ZBD$(W!q2;J0Vo zW=&=uvtGjOqoi4jmjAL@SAax-S>LJ~Q@Y>e-L*YOK`yhVzJkoUyzYW7CYts&#+ffyqi&FiD{XW2{cui&=!+wh3 zq+yGe|FU6U01^R)eX26n?H8|M@1r1>VN+j0hW+)EB7Lx7=c-|6BQc$R62n$*Uc(*_ z8r-&J(}Uu52-3i?MKv(&2W}I?reWmQ@q_QKpd1*c35OlKZlDEjZEb#nY?6=pCtpr> z_8UG2*xBCV*|NUP=E&^0p?30B|cXPkbk{1P^e+sWiyxI zKoF0I;iNCazsANBR6Bnc?<)LIYuA{B!Ol*M$l>n?lsGUZv0=(@*trp%?n(Gw^7m9ZZXT(Zb+JC z&QB=Xu34T(tZkcRWp&al??KMBIrJHB##mQ%37 z5@441qnObWf-_oDewt;cTTz@e%LGEFKU#J{Jw{8`3pUIBN!dnA!Sb2qy3wSX9xWHi z?ocH~aiZmN25&@5DXS*PZO4YJiwg-cNX!)Xy`IXc^qa{cLn&m_=g2Dvu zHOunr|5vkY2URdy3eSLO*+E5PyTvRIR8HI99D0O0_G}LI$IJ(tLt)N?eETj}16enR zh~(KEDk7azt5_>kUV0;m)({DsLs_3eDBNqpLr=`6HivR7AbAri`RLS|QLJ7^TKO~a zBORNnDZ4|v;-M<%evx0!mqDN~^y=~3eEw-6CkL^bVlcq|P@)j~LvxSc?*7nIti}DI zsU?{D;Y6d*t9UR*cAQhsh77|}z&;y-^+Aa)Gi1(#_%L55PcTJ1^+eeAmKjSpUBP>; zy38PLyUgGujS)vFVwoXgIBT!*S`QmCat9rWSbXXQUI+6s#Vk`yR02Fe^krPp zHk6{P=S0*1#oEmqt7O5U+`MYq9W)RDL_aM(C-Sn)scJ<*Saj@r4a1GBD&^~(yi*Mc zD{Xq1laaBbnW&*W{CL7t_wo&S4}l!t9rsfj1e#be@xZO_C{rr`Ldxtcg-3pR1f z-!aEOIT~Z?gI00p6Nxq4WMdr6I56R~4T{MM#Pr+Zqe^e3%e+~`5|yU}9G&rF%*{R% zf0!C)%okU1n4bc&V8v+@sHN?G?hUZx0prNfW8Q|dfaGdgH0_wTzPZ2s4>3Ou5&^5} za+zNQ?tWgbAT}90vecK?TGZ-YcRaJT0&U#&8ihXGS2I?EY+VTw)A+U6xb%=uPja^i zw?N1(Xi>cny9bYS6At_CUhLSnzFH(N;=P$>*%$3 z&_EOyB?Vdd`vR{O4Z*?(TrL*wwGj%JZqjlWfLa~oK3FPn)9A;CC`5A8DBC}KS_p7B zT9j=!hImjuo_)g8P~TleXc>20IrPqSvz$&RNZ&20MlqM=5S+_$%1^W0x7cNtdk;!C%ZE{q z%W~Fh!7NGHF3SbWZ{`2MgPX5Q;e1%0kfRqnB}40Z{IBYPV$)Le}<)-3i59m$cI!^2dahOi49Vac>kF!}}~KKshV0 z9F>yOBxg!|4lwP#FkrHRQrP+c@MCRfuEb-;dfY7A2%3|msTI@EPO2ePD}zRP0}uH< zP}oj>6e#)g1rx7&F2jM)Q-D4C8<||%3c1VlNc3mH2NR;lvC-pEv7fe5h(4)a+2J5NO>bzW`HWuZ~uV#>yo@_!sa$C9(RJr8vc0GYx_0;&R2t0gHC?^clK-I)w_sS2j=hd(0Xc@Is7d$SSJK76nX z58i`r16#|mR52KKWX`%A0|5ennoQC_lqpR{mOK!{@xWy=Cdm^!c-LE;+YNr1qXiDN z1($-OCrez3q%XYgR;-NQfQ6N9vX@gf>#*wsOpJM%ipdH==CbE7F&APv=5l^dnoBbd zx5wYeFwJGwRcbEv=Zm@MdeC*s^X|M~qO)1(jbeF>VkLIO3nMfc%HcPecK`*5GPqpL zcBo@AV}S)0R4bpd6qB)>cp^SbM&qt?5`SM*=ogish1*|Uc7yx2Luys zjp7ZV+m6CrfGmdjQ2XL$wxi;7bTRy2*U+vG{r-OG6R|Vwe6VXG=~3|MGO@7^0K*!I z6=Y*WfrVi|Td{~tEKYRY_8$fox}W_7T0|BsH_8D(v0cJFv7>;VHqM<+!SS#Ox{do{ zhxDVt>cfHZu!%i1)b~6I9Ipf%xo*WL1|UKh-$(ueLn8W>`U)D;VX{&uDr017glDTj z{(*ha+3``y(LO$kV?CSX8A)$K5p+`u=J>Qm^(2Wgq8JZaf1u&>3>;qZT}NVLybT^` zyDG)IEJcpAvZiR^dmXzdWsbt2%vvo{F1=4=9bqYRK;9PR3xN)qIaq@-Yqd!Ec%^)| zrOa`ATa?=g zg*vb#4&DggGBmhta~Mg+D#w!K(Ua{+pEd?Oe5+tp5i2gpV&stuZoPRLak*knQcPB` zn5BNqeK;A5R}xcDIN~?=gRtHazy4AbVZ{HVuY_j&PZU?S5CpB{tHCL3W{Br{Bmb4t4avh zT?`P*k&^Tg|4OM|&(7e&8(4NwF&E9l-OcC()w)Xw8`LnQdH9eb8yl;f!WPNE-#iBc z|G5{+z~5i3hVjZ}HE4NqKhhbA$E)RV(1hh=h-O{$g6q|CmZ2P$BV#Ad$G2o1BHa~h zC9&d_M7H*jRJ4P+mHiN_G>XXzNlZV@g~W^treis?`qFYdga0j;sO8Kq5X(`~TuM8E zBNje4z-dxMMDtkA#o$6Pl^Lm|JbugRLOCKLhmF-KjixMTqgo)P)e3cSM z_xtX$_QFstA0TRZ{C@=W1Mafki(&*n8UKAm&)GiF?!iR=hGLBWd

L{~V0}H@e4v z$6FmEI{Pj$qGA|1ZZk`#rB=A}MGt8=*>^)7^q)-Xqmw(>o^eR7C>4^;D2JVA5ge%~ z_S@vkinWPYxXFVgX}vZ)JM^9zDfBWn3MMO9%&?#4&BVmF9e_y`d|9?)`LKh+bt`3t zYp1vu7g6sk%<{9v%2hCzmEQ>>DJ!R79xE?YFnPn2DUILC|3)b=1BMjau<{>dD6Fo6 zxpBsf6RLz$@D%qgAj85%)w-I3g|n(yxSg)zL>(zfpW@z3UeS7X1s6rMaMu*~9jIy$ z&eM^t{NxEwvHpjKBcEg1%6EK2wY4&%&HX$%#hoxGzx?A+#qv8cGRhZnrW=~9v)Y9y zv7@j<>f#+Cbuwi1lInyK*m4ipayI5>*`f9&D{(!tIn=&XjXJE8T8)gZF3g1xwsYwB zfx|w!rUVg&4((XltKf)op67%B&aa4gz~xk9tTK~^c=TjB*C}8F<^sjaQgbD;HP_?_ z;;=hdF*_(GD_G22zhSSr7o6jj#3YJi*qhvjJ?ug??AE=-uFR`jBk2 zTGSo2A9Ai{J(`{(GkiBoNow#P*hgK!<4 zAbkX4O$Pf2O`=saaUQdM zh*b^-ozz01qDVT^w>uXGYHG2-8ZAz|y9m|EH(08=Sl~up9ta1oFIp@0NkG8ln>Aac z-t%J0Jo!dTT^9_b&dR~m_d=D_qJcGAq`u)Ip?;I4u8Rgz&uNkR2dIKx7^||grpJ-* zRf;7jcNPxZH|z|2k3|#`5z9X!;zUyoBvsL7LJraVnajH>rz2PF~=$%j zk_{rM(UF3Ag2=Cf1Wex08>R6FkvK2_q7>FoQo2k9kqN}Y>fwvz>OnaL&;N>voVI#E zRT&*qs|P7b_FHQDxUW=qRu90kp~G}|TUHNgs-e0+dJ(Rz+=Ay+vr!>^)sVCrXHHcjCFygjJE)n| zs)1JHno})=LhX0%@Lgc85dQkU;U8ED2J9Q^j{g6;Z}>zo(FzpXxt85GEODRFWB6aKr8n@X?Qv6pa)x8H+)QzvC5U`=hV6(%}t(CEJLigAY0p|<9KZM2E}Yv zOjfX%9sE{)A~CTDu_#Vt^sF0ZaKQ{4fA$oKj4GHbGAtYlGilJa0 zxSa)cHN+N$y&)4?oa99=>sEWe$)+<@3;6g^`)Gw0@C|{UU1$qtB5^ zVdUj0=m)&x%2VHx=ZE=c1M3m43Zi3p0NlB;)0(->?w0B!MziRd1w2;h_LDejdv7m4 zBk<`e^%#}fb=cRPq<-`m3L2ihsg0b6i(|${-2sd07uf>YR78;;-C6wm)~)r$t-zBr zB%UfV7)>z$?n!f--XD2}*3M@&bDrH0OPV_0y=UJX8a1>Jfg8;^=n3ls9)eqMB>W4y9^~`5-hT*+#zcnbbdskB zf-|wgt(u+Nq-%jl4+N+M51c8aWNnOI$oi6s9j6R~J+O=CG^nsSNuB=-oeYld&QPQl{GU;l$HBo<%B`wQpK3v!dX`eVh$1GmBdf^JNv=oo_i{Ct|GHe3YnVqAb)WMDB~HfBo>3ik*7fFh?-x- z9~=tv3EXJ9=16cz6ZH(eUqY#%EsE+15PK_FDg>7?)z>ALs^+G<9)p$J!NISQr9f>SK6gg=F~11qpHVJKg0$B zAM=mu^3-EbvQJT43J$QR1!BY;5~yZRuivGJWkke=4ial31(9Doh+#z>tBAx;*%DpJ zgUpAWL5|3RMO*s5huBgkacN7NmB+bB2(Ug(bSE>PwJ|j#H+=gH0=vjn*!Pj_no$ zY>D+!c2t9_R9~icm}_xM9P1c%5AT1Z>d0%gaj1^>Kk#(-Le}P^IK6EI4#)CwFHY`^ z^vPYoyB~dUWNqzu-VVvf`;vWlluUZtY}=Z zIe{A;AV*}!>UZu=WvjK(>QhU1Sp5(_7l{aJx{B4OiTOF)NUfeCdaV8pfME3^gAVR6 zZOZ4j`upgLc)*mPR1C4U=?rOwLQXzRfqq&1CG|T_nFd>Zh-9O+(gn6dBDJi?4oO*l zvy>#GSjzI}a_=l!FMBF@xD8mgj#&O2s4uyGfc?|>L$&t(%KSqMA`P(W0u=VK-jJW$ zkaU3w4f!oB;KFQF&E^u_82aJWfMkCN|d?hvg|8m&Kz-7EOnxC3|>SMb+k1#gZ1 z>;R*$k-HI<8Tdjm=77>yk&4B3S**=xn4c!G&vp0jX1OIXp@SN|uH#%*moH>%r)?Ia zmxZYqy$%;H#4JUuAtIjgMUt{*jfO7duN1jXky$5&oVG79jmUWB*J5F@7_Mj-))8Tz z`xCCmfMD|@daBL;a+KJ7nw0%P3x-XBV78RfWA{fW*zRUguEJJWA-Wy_-l+FRC=9ge z)o&`y&jS`B36%|l&svToI!?I-8~j+3jaG~Lqtm4`u%7QU;$~)t@faYuP;o zkx#j%Aj>YlIA*_|9}dETVzqhT2xvy`22u&<#&+uH3396$W$pYW#MRqnD{`tfaGy__?9qeyG&>_cxiNyeiy_+;n1zK@nTFJ8^ROL%cO>j zB{7-@g>J^#1ItwF#D!If*4wTG1N)SXvzna-Jd02mEh7fZt3etpkK;IqcPe5L8iQvh z1jDW5LgDuXMmbsNs(#!DScN0^Yts)TCY~~)Jw)zj6Tp9pKZqD82w0iKLWItjk5zy% z3-Uwlg$rLeiQG?_G3fB^5P6hXo@{Q7d4)Wz?CLu2`yx)m?f@s-J#Z-+QK;e)c2A@k zUDHeg?uPaNDA^O60ECbrm=~(p$_)?fHwcc`Fkbr7D-?j&=vdZCL;~)# zxI8QMDF(6GSe-%lnsZ*u>;Vk)5ET#C zC89_1tMf%k)P*ZnXcOL17}X5+B4U7=#{vt!2jVhUK~z>BMvKwY6e+3fK@s`9p+8of zscgtIFjhRnt!BK6AdQn)@y~-fwOA&>UxS3qgi}F+t@SkXG~rRtW+q>GNL!V45}aCT z1zvgRN+wa=QFiy<7kOxJtnDY${JWdF?s3g9Rsa*LgDMwOU2M5fNSW8*zaYAkU;>PC=4aO}@Z-;oiooXPu^Qwi!<@olL5iwt(5ycL#Q zuV4mO82QFCWBVs+;ftD``$aq90|(x{#9IlA4(JScZn)=WegOg7mM;I_CE#}iO&AKnb2n{WrqV>m+utv(MFy8UKOK14oNQxrh2Hxc8N1PLvl%J)st zfxngj3&`vMBEz~x4t&w%Fpg(3aIL{icWoOz`>BSReF*H_)F1Rt3@F9cY409iXn!^EwyyTpKFy3!eVFXe&+= zYH3_{JDbhfC46ezgXg!y>2J5nI&|IE#cOMl=lAP_4xU!-4%zHrgl}hN6~xXi!`UK5 ze2@|osE9m}YEJCl47ZkLsK>gO$w;YhMn=xcN_>9-Z~kRq-Ew?lmus0XlYXsexU1M~ zfp3{z@sm0)4&@h^H7x&`rj5&fW+^XM^U_KieX`UanXb-B2^}fQN(=#CGno|=(4KjO zxfRSkz}#oZB|jt7^NUbPLhsmJBNhkKWuWRQ~f3#GGFkSy~v8 zw^Q)tlRB7-{Wg!eRKlGU*~%D?YlxtO2S0=qy9@s6wTGFUkjGBji=QSUjBz38{EzwY zYrM|hFDhRpO58jVtz51P?-_Bt$jXj_gQO4Fc{VqsCt{gZrwM_aQ?0((8LTa!-LWh9{x101#JLs%_CGR!ryW+fm>(}gl%2%+!w=2R% zv!Hp;glVU&1q9Z^z#6m{1Q4u~slS3O!DRsis|BrAKp=Ry5S%+ROzVj2{F+-&Mp&4kJDn*;OzF1w+UNm>DWe3n00iYYl8^0VJWuH-x-N zFs2j2v9f^)c4;j#Y+0nVLr7l05b}?`ogoAQl%b;Ek(Y+j}t*>APZ`;Z+ax9cKHb7~`!nc+n8%dhm z)BR`C{pFzy6N2WGLx4L_Jtym!dfDu>p(XA4qL|>+^2r{CmPdrPJ~8hm)eC6oh|?!K zv(jSTR*0rQ2pd+W@?4u`+QA>FGL&gENr360VCq-*0ZK;h$yqXE{bUld0;^qS$t=s_ z;5Bs$F8y~>t`%LsLQuboABVR~@UD^AP2?*-=krK4c5ml`=;djheVlOY(;0BgLpO0A z>Yb?GCH=6dVnFxOa7*4m2-V)t~ykrD@^!@LuWyTr}_`fPP`P&q{JWqG0|2%< zbWg^Jxt+m=F~a-URX4#rEsaS>l8GL_@exMXYW1f$B|Z#Rgc z)s97rZCLnX4~YVtba`}hv0&C`QfJnaa{V|*DYXv7K_ka0ss4U60~>%^dpzbuffBy< z7EOOY@O3pSc`ME`4g0CRFF3Usmgxb?w9Jooo|0NdPL@=OAE$?s;>)y@|D$#i32JcZ z@1kEsx33VyNU&RiOS3^$1?h2M4j)i#Ar4F=EV_+2FhLMfao{va6ch)Flu*lY-~=V) z#DNXFyW+qaT)5-FG`8Po;7#(NWTk*oae%K#d#vP5L93qv3Wx(Q)5>G@bpiA$iHN&c z00qT?D^qk32NnYh$kaBG5eGyL8wZL5;=tZ$QYsGA_&fRsd9YDVZ9yEUqI~W+u=ZT> zoUtgEIPf8u!e9qYv~l2y(@HEIq2vQ&RFGpC#4bPpY$$%CMx4Qc|7$XkX!RX3Ruuz5z!| zD%+1UPjG4*hy$tqlPSSZ7UOP%7y>jjJQLM1a4f@VdG?B zho0njPR`sF@?oUF$7{SJ#edGB;KMp)VT6#;q7TFZ;o&t_1`qS^fo+*5*OC3P1ohyI z7mBrI6j0ZZ>vxfs7W*-$v8A!LcR%99<|)5`81C)9=c7rTK)urlhI+a3t7G2Rn8TH| zfS}s!j&<>! z6u@^slDc@Wd-*5omE0nK*!1n^>yVy_dQ02RP zoYUTwc(xPqImeHgI7swt2)26O#Jo?>n+2^_K;ZjjD{1dCKeaJHgI!&d;#;MBbN!eL zmG2}M-)RASdnsQ5f$!DIccq`&ZOV6T%xQ0)^3C&O_Ef%M7vG`)z9%bR0fF!L8PZ15!sRsj#2aF(8HNe0v9zTzz}TwYS^~V9Xo%_Kr+)SLr18{s*CHE_VNc z`wA`2Z%;atVs3MOJMTE6*!k_~t!te1?q&X2(X)b?KEHhm<+|Q|_a)!_*6zD=g2WxZ ziia^_O?=IHyf)aQ)0ya@WJ~SDcavYIM~MuzdC-QPAuZmr2GpL+pjvxAIT#f}70(}v zHpOP~c-L+X1+j^IBBCHZh6xuY;n7W-_>mJ7q4Tt;%jVbv{oAtI#Bk--k+|EqA^aj} z^v4BmIgeQhMAk!{b|?}uT{h>($LwRTVh?1rLiPBVV{8FPu;M^eBqLjjY~C2HPv~Gk zncid5Dx3bxrgb*`z@|SSg<#?P=VqRsk#TAMW%-xqUy*<13upb1{Hsr7kePh}Np3KgEI2{4N?=faCZ3A#z{ZQ$&fo|Y)m&*}FQ~khC zx$RIt3_M|n+HfTVvTXq6#Po;MmUxUe+wfLEKXlVJ@R)E{8}`{&8y-biO|^kFcc=}M zdDndVwluAMvJE`qJaOo=+7HP#+!4@*5vYRTlk086GgzEUzA}wG&m*qBWt|nCN7K zDgtak$7P>Dt*HpW@Qd&{=aPK8%O^sul#2+gCIqj1TC)I$1MR+(Rf(!&c5pHflghDA7>+y?l z3@KW-w<2tQ6np45{N{DF=Qpg`ntYtU{JQJWkmyW4^l9?ogAU-B5h=h?0NC|U3jSaU zzDo*jpA+}IqCOInNd(@)2#zYRt=4dbXQ9?4-#2&OPalkND1U5w`ZR(Drs9cQIbB&E zJ@=683&T&5)bJBXvMUgkBqJM19wJ+)=_Dth2MZ&M3nMsfvbI{|dLYSRK+GSz7soIJ zcs&1=_~Q#`ul2??Tg{))H6OVA6{%^)50-Yu*us@8TrL-J^YAZR?9JqI+{6hOg#u!~ zBM9m8T`uzjEIqb}eBuLv^4$u=vHRO07L&}xR6Zl2&+mJbc&?Ws-5vKB0{W$kLy=(! z6w6xBVOtNyyp}0`n`vd_w3rmvw?MH0h+K?mt2Jl`%J`H+5t<>zYg;g#yFC>7L7Em? z{mkJ>J<@|g@$(ia-UmdNj4e}akLi*03_YF{!?J%clef3bsP*jsb>FDxdL9|)kzY0%FT`sR;% z@q{9X=vA8M-8M59K?$r%n6J}3CyM4~OXOPOMjFj!a=p$B)Ve|^jJz4mpK(x@xr@1S zRhz&EE4?hzKA6CZ`E#`k<7~wkOpMK1E|UD@!dcBcB>2*%OKkc+QYxl3X~6Y2QxCtQ z&}8suEq{Jnho4o<{lMH(=2jzz7K^IZP@q-(p?=I#{*1>@QX_*x@B5)MQ?Pj?5P*Hs8XO$>RgI66suRZMO8k=EVeW zR(g^z*1VYGKoU8RKj&&`XV$#WMP&%`=2&8M=Fj1Z#%(yWzvhL|?ymB!6w3P5iAqY$ z;3&!?jKYnx@ZGZDun%?RPs}xZ{DFazw(FrF>95-G;o&Uid=7K zUdsQi`N3@Br7EK+{huz{qctz`-ibVU9}ir1|52<1B_As3QnSC(C?mbTz%@GyV52#M zM9`q$;H^xLlk1IU4X>j!h3SEo_+EtJX+9F!S<2R#Eo{--Qn}Im%$9-3^K!k^tdi?S z^Qep1hbW_@^9ZxV-j=d;<~6}zYMxh^Fn`j8_mJkZDL@~kDXqUt>a8%{P-bX9<|yV4 z1CBn|g&C0`RDCVUE5GoqNq23i4AdKY+lf<++Teg~PqAvddKl?HF zF>=)ILZpyAk8;e^d@=JstCpn)zjJB)3(a$r-NY0>llZeyZtg*Wc}p?G-d<Wh>|$sJp+G<*;mb%`awt7U2jJ z%)eoA*J@ikOMPj7+$`0yJeDoe{7U9WYW-ZPZ*gB#xMA6qmgYn)3$rY$`DUpvbvBwF zN@p;8_9lhrFn=ZTj5hnb;C&TdM%|pL`4Z+g7->rh={DIs%a^;#8#OO=-_yMC*{J#! z{%bWa*8P;?aL>zpqU93R;+hx#n!!B${SBn%R#c!RPvyna_`!V*^AA!%`Vb2XEk?Nj z{c?e&%>!dIA+ag8@(%yCk_!W?PSzaceW z0LgqxOiMMBP?!xl{1>XFpS-DXQHMcUJ(hMEX7aZ-hqu(s)2eOJ(mbiwWbQ>ZRv&KD zTWtEA_Lz`+RP&;_et_%LVu0=2yA>)_kC)nUbG=ZHi~X2Oh$izR(qwBp63=$?zMAh$E$oUsac2Wp`yviiuou@s|EM7n zJgbzX#!{4l5C!RiXVELjI-YnZ%!@`6A{YWFF2?Zqs{gI?Sdw+w@wSUS`vQHZ8PiZ=0TG z)01p^j7>WsHHY%THehefi_p6W(h9Tn8-a^bH)vjz+QdBcvjVC4euW^3^dAXeshMxn zdYjhT^c9;8&;$WYa5bdJ$4{J};s(v#7>CnlEL(r>p!} z&5N)dT{!z{Ub=5r=FtVMZ2J3ZZSBuCO(3Pob%gbpgZZ;RjmmDC?QU(V4a?$f_B?rI zpL)(Dp)Vd~#l8=(T(dQ?&no05lce?CCE5XE5%V?Qnc}~LJl$ysaMhhk#R%7#DYlzv zKW2cElKvZ`)rHjEnio>HAx~0AIi!xYq%Ksf!DP5ADWP{;DMcy!b(Plq5h-EcAvMik ziS&Z74+#L!&a#+GMBB1Ws@zNx|3UMenO}*#S*`_nENDcY@{K`dZE8%kxz0>iEMawn zmEkI7A!QdL&zk)#tPfJt-zDEL5`cU+TNa(PvZ$*_iHm%Nniu)&pbdGX8wc@8T@}$U5Y)@ekjT+NjI8D1vhR!7Tk5_c@<4?X9`Z- zOh;;J`17Ka;QVi<(U~2nw}}M2&L-c&pJ^ljre%V<(d3eWHP4%gdOZtbr!O9|TZTFM zYXM5*Ly9N?s*UD_eh=j(IBV72C7>N@8T3$zvnk6Wz}XY;DV`|uEzQfE=S9uSjQuH# zJ5kbgrcvq0NdBJYWli#yG-!pnjdjtmf8*L*t%T$(#cCEjWU(T=DCf^e{*1s61sZ~s zYQK@G)y5T?&*KE_0?p^L5oZf-+-!kzOi#JM`jwi(0NzCrvsmdX)gF&4FzsCg+G<|3 z^t);@llVVszBBWyG+)I0XULny{D~?>(c3H*j6eay%@9eO%#Dg5=5d9EUF0e|*W&fE zuo22e@Nd<;bmY~V7Z2Fa)@O7&mrUDgf%Irk7W|^VBw}@D!8a(NSj#1CGM^}du!&pP zELYhZ7Vkw1`$oG@@RuTQ#uFL+^SZ%{09s7v4w@Uur~|0{zpaXVGECE;rF)iFk5&aQ!L(SH+yPcbUKH4 z(V3#L-=I$Onyr|iZehi%Tsr{ zZDD7)%1*F&T`lZtWhwZyAJbluMTWM_qccudBq{60JjxQwmGLU1XrK`IjCsI5bd}B4 zywv>_>eXmD11h4}&)OQapCpw=+X-U;BhE+{wV@XOCRf>&nimcaI~>@btetOb+~wkM zi{_<9N5IW#RvW{Rn&bI%BY)Zwmd)Iy{MnB`=kw=>FY&VrbG`WUC-HKb`-Fg>aLI|} z&R}CYq8Ke3OaGn@DDt^9lIM}9d{1kc#E8!ncQD}#H7_g8dtLQtKdkq>g%kHl%~z4f z`HCy``q(^idLqxo+wH_fAKf77M%9m6^q1K5eWuo*>NPLjHN;hCs+J4?$6Wcp zyKs7GUd;1kD7Q1Q=)VxR(c9oxoyy-zwvD2sAgz@qZ=mg@4Td!Eg<}{h5 zNRuKj(tIA9HcRtDbb~S$k()H%%y9d-3;&B{_nF}%Z0s-@QnZe5$%>Bln zhX{CyxhDQR!=JDCQ%b`j!dz~Sug zEmm013UI&grEsZPjkG=+yC5ul4&5j56NSb=4hIrt$9_)9fcA>eXf#uWMw6LL^hfzK zkSu6FEn_4hr%)ioq#?GjE3S!jwOluvOKkZCHr-9|SD0S7X4p6dsUyvw?6S_p zv>V(9BX4>@Ji2vA0b=8AG%xeKw>|?2xkortH6>bGCAFH&qkyvZLy|U`2vU%jn)h1R za4i$c&r=!p=ELPESIXD8D37u7j6`bA{g~u2|C7SF=_(}AegY4{g(*dgofaMhIt=00 zpwOHF9O~|5Tlc3=QP@Q;aQ^}qROA69^r>7lns&HwXf;9yWW7;T+Gy5U)ixm|kFDCO zV#bP32#d}PGauKy#E=9n2K83t_ObZrp2pDIP&K7CbnKLA9GAAQV zs;sMp9fs5#yI3W>P%5r7{gjyKIbZX+l<3z_l6w9F4FoCa!9y*(rGT;r8zpTraiqYJ znlmiy4K367$96+=ThTS)&i4s1yr}hGniq8z zxd;?$UU2f5$ATr-ra6)>HJv40VTxTe25Me}2)p>4#yqt83%ZKwF>>8#zO~l}%k@(8 zsa(VUac$yqvC+(ubcJ~fDZ6qVRBOIh{ZCqc{gy}lgRlx$yK+nBSf=4Iu1 z2vlXPer4vqZ|TP4?Uz%}dXH4w9z5))HCgu^>YWgwZ34y?|O7 zt9hBo-GjW1Cy3`%HlDvA(lv_IfhwB_LTtr9m7r8SOW|T5cWQY9>m5yk5{tXBt1eTJ zgqr?BW}OLZnRwc1nlB+6u^;SlGW2vpxmf|%g*;;Ndn;lNc{G8CISftIX=L38HpfuP zcz`-W{3)$p$2y;=)$RTHxK#53KM#5H0visizgiU_o^Ub% ztazNH>&%f_Nni(RUI_eM7-0VDDmz655JexSd8x-~lFWgxsk7&e)!cx|Bw z)^gowHd~#)uDH^qshXG0e#}MVBa2&!GL`H>)uGg?5onVc%o^ZctThET5P5Pfbd}|4 zUbviO;m05~Jr+t+q@v$`&_t=w28b$xjR(^3W332BAT`YklxGKApjrp|NIT{s+uWqh zn|HuW{D9$erm8=K7XLI*P;CnHzpAJb3KE)^aPyVs#Y1jae6f`4m1D4^K}xdg7XkTLTo{8K zU77a7MlHiNF|v4#sV^HeS8(FyT?_j~xWvs%Qoh1`E5a@{|B2L?%6aQ3q)towNVhbaTWqBxERoCY^@T`HmgN+- zg&7tpSFTr>X80A`$A11+^U^UL2)Aa_PV<73p?T@xH;E4|zhr4pM-AkM8y3tzf^xG? z3-Va-Eb``SEfBL9&w>wGfZ$evl#QGzY1~YprfLMZ!i>cQ`=devB7%}2qR7mlUQ&BEzG(XRG6xwLCKO4=#tOj16(xqS%?daBTc zPATx|3nf~wn7G$!Ug~vl)oZ7D!O1|L^>S@}`nzLM8>FF)<{$vg=Shh9V4l|5N$Ka( zR_Xv{n<&>yHqY`Ynirf;rBBwG4^;IMQOguA-7pM!?S>n{!AgPo45dJ?u}SAEx>W7s zs@hZYqKaFTd@k`jYPqO%U(MICd{@m2&Yy9iFE~GIUU0tCd>NZ@fYK~seh*i^mF5Ni zS5>RvCp15eyk@#&ct~j$vHUwN7uR`I^WqgBYF^s-w9=7wP0)N1Wr%2Aa86g6f^$2{ z*_}5iT-18G<~xvQdlzmWE$1n7=6BdVbS(1zsCl8cO7nvAnG0v33umV0ODWgSkezOs zU>o2nt0cw#4RBcgv4ux)ZRW}ao}aYHZ(3xrA5{LAy}ry=e#BlE+UwquBL1V?+({sw zQD7^N27pVFYqYF0CA%1TY9>UO-Fj=Id4X?)$(i-|0Z`O-q*fLcAE^0E65mtvayw*> z^@Mk{p1@y`vbb5JsumCaQuET)A8B5AjM93-;||R)AdeRmr-Av&%p-i^PrT( zO*t<{@&Y0g^+Y8h7IM8sx{^o-Dj=Ike`i4feyELnUYy3C9{f2$ir1O0NI}%lG`^lI zc`={8v|LPMXU&UgY*s}|n|{*#Sn^tpJR{+kNR8MJ8WICRGnPN^vACW;Zwq3Rd5sq@ z@#o+Cc?Lg>ypKt`)Qo3gj6Y-XL&{~mxEntZw}kzC8w+k^0qR|?fc}h17umAEYgs0d zdud*F#!g0_Dml&tKTPwTiMyZXvzgyb^Wy4TRJZ+EZtX{0VU3n&vHVNTOMiZ(`BLhK z_5;BuQNBCgQ<-T$C~GGC8O>LT{Dfnxo%wSuuC*UST(KXt`Bts7l4VzGzKnJD)B4L; zelE*_+e_i4g#R{2%Z}5s#l$;I^TmXJPB?Cbu>5`GO27aiuO==}Ig#N%hsDF!; zuqTK;QLFPTGqZ@DKc7Fd`SUJ+-og(%^i@fl%)eRq41U0~j8v;NU&8zYnlEPlKF#-M z{sd)G#rz;G&t?7+&DSyicg=^H@1=P$oRe*yZS9IYC0{*TTXG<-&Azs94_;*A2a-vT z{#SbAy~G)7amq#Hy~NGGX!@*83x371W084@>wnX9wWRSx-;GUM6RkF}FrL`ork5in zkq&=oIv+$>d6vNEB|7}B_|uUl=7Iu|58SNj@kkS|qL37y*`hV?`mbJhCA;yq#u!^; zyiI?#>2;RvJxEC;18m44Lx{{vEVIPA{Hf(H*>toObB4WM1?H?bcB`h(B2DaQ+-Wafko3L8 z%Pj1$n=*Xe7M@@kkGImbZ>N-QM@nfQwAVk{G;0s#@V=z)C7#+-@%urqiLRoec%mZ( zJ##PR^tqLO1g)-746VNN^?7gp+FS6+x zsEp+&9iZu5Ha*Ms)9*I@8x+X?86;^uaT8jSxJ;}&p7_!hX50Sx$);1S(|&b`R^C<8 zcw&%E&qF7Z`IEM@6E;2mP%VGart^`K`El7`+Oz>KvzkBQj@p)QP|UV2K+h)*v$j!V zfmLWw;%6zGoA|Fy7h2PuE>;>(OtbE|aE&@mS+k~3<2o@BrbqJ6*z`7-GSA(;FdX?V z|Kj|M@-NIESQvY*1cxRM;5Pyye}qT>FgP4-+h%aPjA#bFh*@zj-gCwQMh_gkYer%0 zEWAU$rXapkr#Bv$o?j5@jdNRLZC^!ge!!?8Hn3AM3$yT=bG(v=yZnqlzLyrS+=)lM ztY5z#A9iML!-fsc;j(*^L$ohQy*&*#mI@-jyFVcns`#lj;lZ#E0V|B1msuFyzh7ig z0Y0gaUp7OImBFDighkH--MjEQ^p*mM*e9|m-}%l`IpF$-6gwatPd68Q-fVZ>=_Lp7LGnlnqD%Yiq?uT4WPnkR1og;^yGmM`QCH8QQ(3-&t{UV=G_4tr^Jf;4B2_->;fJ-PO{=Z#c06~#i4x)2- zv3h+t)OAHTwo_iXRRi;QdDVSJ4jQl+_yU^47UG{Ros0iP_3CGUKB=`Nj2YrF~$DxXPMOsLN^D0xj(<9zJ${@>9*k@Sz-F8%s!SVv;NH_`0Sp=e)y5p@MGI%o2(E0Pvt=2Oy!4uKbamFda zV?Yl0gu{{kD}CA?DJ{b+yf8A@nC@smLC>#;pITs9pmIivtk`U^wb{$xQ^WfhN|0B{ z`YcNvPbnS4v(E_^L{!`)9hx4#GjIiw?L2KW+=s@uvm-jlC*Zf7!kMzvz#a zkksF=ul;rXJp`TqU+C|I>CoS?_}O9og^Pprw=(bli~i_2N&QW)`Rn>S|MmYX{jG=k z&5v|kcRUN4|LEkW6dt_umHghtbH%WBodX>xV`JON<1%sH1||&s(kDSL0GTt4ezBYF zBnSggzIPUcOZsxF_4H+D9+W!FAoH{~p>fY665*)wZ}a1QRzh{jaXR^F;m9U@R(Nzn zo7~0sWa4Yt6~j5KMV0~kTjVl09dYI=3BoXEgQF^?iop_QTqy4Vr7(7PRydC6gug-@ zQs5>r9`J%#pT&Ul)u)VE#3IIgneS{eOVFhV& z*x>!V)b4;mZa{0ngZ7oA(Xu|A#TvW`4 zbkI*;?tDP=%74SBmHDCN7brA7mVwq6Mt1QCI+~!!fXu?4(%-|wI4@mhawt!B5>ZCa zEkzqW<#V9w?nT3lT=|-xa{; zIYQnJ+r{M6bO9ucy|#`>d3^w{ZLK{ltSwS3Hqc=*fwFz%?bG4i*jLe^SuQmUKO!7G z6-Q$7e+)Z69J>#5 zUAOfN|9d!!vRL;$FG1wNwD(xTIUB&y7ps%UrsOUjxl?(=@I&xr&9XDQ4%@?IK#>&< z!*?o-U4vQqN~rwu_e@{3%nTxL8K23&{|@sX_m98AzwDI&{)?avvj#$>_%FU>hui;` z_-6$1?@RuJT>O*kx5C&hRrv!8BL77%Z1_Liy?J;QMfL`K14aU(9n{gdM+e8?HsCTQ ztxg{VO`Ovgx3>l z0e9V`p3u|3h-U}KsDdP&8=#lq3n-9TSi@2Oz$BAT6o#(*EF>~puznfb5=87whS_j#iLJZ)xoOqG}xCbdw z>$gn4V-{ZSQ!iWFzAiSV-p?0&tH*lHY+qw-Ot)-@8;W)S>|dr9#l}Aeir_jkpL&f< zFO(X$cI^v-9q@WQy`kK+2B)|xbDu|N%4MvkJvddP7jt19$;h;zNG&vZ@oI^Yxf(G^ zyLaF87e%VYrDzbkGzC3w;|k0Ii=O|w;{02ee_f&I0ZoqcBbo1`uHEjMYsU%qi*qjt*1K2K{^ksDvD$EWW6P8*Yuhm_+0!w6cd9h zznk21a09+F^PULBpLlp&>siT?YrW>AdU=nJ8W z*Ep=sj_xzs8!!+QJA`d!WqjVB0=p464P2(Bc z#P|!q84LE_LoQN!si1HfQPa3bqUwr9BeMXt&6DsD>m!tm@vd!Ecwj+P;A6sSGkSI_okoTZS-e1eaV_$%2Ap0V}(3E=s2u4je5%uEp zoL{D}x;OA=0)MW>55@uYRh5+a##eCSLL$W%9s@~?PwknQ+p!Q0uS1Q@UWBqQD#fQ- z#+-_pMW!hu(}MtFoZ&FyLvI#dy$NVUsMBvm-)tRP>cft` zM8TCs;NpNY?dmetZ7a-3`!%qb*h0|8){Vxp9=Q_`Kz~~w5+c{|_*x;u-jAWLv!FP) z8$*L<$k=c`sDb5)+)`s9UHIi?#6sAD^*(&A3}H}T z%>@_a9(sV)uZE_L42Z_mFsX0vpJ1VT0LKi%L9klKmwgT$7|N3-gxbcImdmQ zJhdxBAN~bDy%6Gfbz{sA7)!oF2?BMrH)9hx_!r?oDYQ3j68#t5?J8W43pny6>~C)4QC(Zb6ZSXRvF)`k@)@ zcZFxL;{g}UVBWex&sT3etmNbQn4stmG)$gZH^B*O2!BqJm=E3G^5tm#au}Y0`lI9} z_P?-w!!59c^Tfs<@f~}J&HD9aG6u!nlk&;a)rcQtmks8}9uJ0q!XOSfu9vK%obi?3 zS}JcH^&sRy(s{TE-N5A(h$!yNZo^xFg;kyz2s>i{=nb_$rh=tGuna-0!8W_?XhrlB z5Du@Zc8dRbJij@7L$wm@1Dm6Q{r3dRrfJbaU1Ccs@Z;_dx`(v@7NRE8%Cw=WByXRv zCkyKIP~<`KN+w%?bOz4BYjYI(AVABEk+4PrK(w`pW$Uf&mE7h|%+C_^_v;M3nIYQp zuaJnAUxt>O&Z%hetN7$~E2||RUEU6#FPBe*!8Da|5$(FOK{K|5L+$)X5ZN@p0N-uL z0iU&sdx1oxxASCI({!W*jm#U!fhAMIdXSmov{L!KHka7!nbKYMd=^~uOh*U5GVnT= zSf(|PgU~XCvsDKGRkyG0K5CG46XQ&0Mm1{!SP#fKD`i@5a)%W_cMvEjt_sEJI%3z3 zsEy1%te&%U7J7~QW(hkj)wI4f`bh)0qZjPvO~DoWdP_8JOC}>T3UQ`2F$FA53<02Ol(Zrj}j_rP1C?3sGfiVO*B%cI9ux!Sn}oJxtDlk1qCcvc0(12 zS8X&Jj-f3fef(8%nWyeTVhva4&W+G3@CB9hkI=(+!P1I-^&ISYE6`R+L23dusk6&v_CHhJwpwu|0pb~(QkFS5g zih>FjsY*cA z*NLp^*bBl_!6CqzsCt6(3|3OSK-?%9qlg5rlrw003|~JH8xTHeLSZN~zxt#}AVP!r z<9?Yxo&(`=eQ``~6iOI1eUX0OJ@>A>NA)}Ddt-s3c-&JFP!`8 zNbWM7D_71o{QP-Rj4}JMpsv<=-_pj#RdQ5CH8SroDLhkR{prI;R8=E0+k-F5?Tu>c z`7+wy!f>!2se!eA#O=uGmhOWpTPDH81gp%Mz>=p@N4oq_q{i)|(B(J6k)k6k@OnuF z911-2IeK3tnl+Q6Z|gaEBOo#8Qc1TZgOPaw6sA#^bwd3B3EJukHHN1E%G?*?|Hh614q_p2gUatn~pUoT?>+|dBdt%T(T`hsd1o8+D$7-SmB^+2lIG!vS} zJ^Yz^mr@&V#B*sISjm0pUd%b#h{KOEyCacACqrTy&M=gi)A4j~x(7cQ{2-Nr!>v+~ z>@D{&fn4QYf^<~H-3RZZjH`+djKdTOgD$N>9^3LsMIvrzI#7icScZz$)aF@+F3te((%jjbbq)TKOOwp%%3mt!(u<<%YXID+kAQGcKoc;FOh=R~V| zRoUa-wdk)qRZkw5H_$^7=aEDMNntWaH*dFcP)zNY)7yX^LN&vnU!BF;<=O0v%)96W zd`vxQUlQJLqp6A?Q;%1@H)g1>Pv@KLjLbCYz++j{i=p8`u;#B!#$GFCyY%z&QPqvN zVZf^18@pJ((2k|bNzgozLsUMk2aP=^ZjXhTK^FXGzYp~CaXzvtISO3BVPvj_kXW~1 zIw2K|7y)k(V$8Z@@uUaw6(q{h{27TKrN4j4br@XO63%?`3q9H5+y$v$KY?PyF0QVj z3O~OIiFW_YH%4X^RWql!tv?m`ujAS6)jr5`oblt<$=InXehAf}IRUR&q1BYsWHX%1 zl_y3TSXFRu#=?CA1U+3-rtUGs%tegS z-tgfF)4n=?7?C_jlE0yzVplEdaj5~#Rz#fp_dkDzMaTKjSd?T-TJHdUA>hNR-an8R z`y=g8qVUm@{&{>6R<2Ea@kR=3K?Y?bIj%K4z((d_q$O-4nR(G@xE(zR^E(mxitugB z>zq6^Va;v?vZQlzb==;AAPMV9-Tm2Umh zD9|ryXMo17x3S5VI~zpe@sScn9(r2_$?t;Rf_^^7)wGLzz4=4x88k!NXOuv-4(Y;g zsI*crTnYAX!VZo9VB)c7l^PANaMH~$yCe5sl;Y1{=vV1tXGj|c+!>!j4>N+@Kgv=H zsuuWjJPX+>+KG+uRqYQ;f+w zHoHQk@F3vh&X8u$55a-kQ?P_oofq@fbvm5J?a8ReH2S4-VdtDxMuhCCxHVb3QwAM6 zoc#jJyu#EGC8262!@JM4kBJh%!A!E@pnw%$(I)rPl|PUOD^N6-Qes<2vCgTLvL2HO zqsXu5*Bk`v)C#XyXJG%R5$G*|Xn1%~^hcM$%KsAc*%K>R&jP#QvQ73FVDKF%J;rfo zvg=ib3qv(?`4veELZQ95b&(r)E^1jAr>ggu+ymEr8Ih@KtrB!AV$j91$Z4d;^i$ z6c6ZLh>EB0FcfjS2+Em}IT2%>YXt(Ie{{cxQLJ0+ega90xw?_nS3w0B|c_Nxyw^;5awgYASN$0+pPkV{r z9wBiu11#kf<{pFD)zfW7=iHdvaSJN(YXDMxJ4l|mKVLt0CjuN*VntuRMK$JmK5-8> z< z>}KqnDk<`zd(TZ?{;Qc!*Q)rz&v8GJ6!xK8!sc1eyVa<+d!CNmj2G$ySUYHaz;rVG zF_hE?s0@st47M^QBo8ao1SKD;!GTq9k*mbFg$Ip{LrC(s6Rv7F~?C1Axkz@>4%Q`J7u3(4Lz~L-zg?xxxm;tIs z(|rqy4!Y0nHxtstS==8T3?>%vkP2yJW+}W)d-Ou~A=o3jw$wgR3L01qtyyjE3Xy#k&a0FNjDyifseRlo}a;MxMf zQx)*edrEIK2f(8X01s2Z2P)u|0dQRb-~$wJC4upcUa&s8yA^X_3_%RnMlwyL2Xp(Q z2l0tzB;fX5e&ns`qC!3@^DGSi6lh%3^ z8AAqCpp6f4l+JW86Q(d|^kpP1D^o5=pCIKVtsk^~#?CO6uE;S(x3L!CQ>cF76;Ho? zs4eQ9_k_Kdk=X$;Nk_Dyq*^v4VfJr05YZ69P-9Dh**#z?gsClIIOL!USdDmktD(xl z%3=v?ZA{x^PokR{gVzVX#}x=Zb{p&L7am7*0c2$6;S=R?+^oS4NpCHJU(y~ddUXQo zJX#)dnG+>pE;5J`!Eb5pA~2oZXvP7EUuzP)x+eY@RkBED<{TCY%M6=>6%T5I!31hC zI{SL-M|&ZzD2w_Mep}khJV44OyNG*j1G`_n>%NC70u4pTz*#~k7`Q8{!RSC&3pr>@x{h_9EAScl{iQyY z1HN%%(>~jZa~_9wxObp1f0&HSYZy_lAFJAph920+PN)x(`5k&7(4Z$df12s%F_F|2 zxl7ZW(S)YWs^^W&a3P5$vp~^FYr}vS2YdrPLj<5=C@~`^om<;DD$ZXj@PLFA<^dVV zZns*NcHqy`=CQG%IV7vJL!_yRiBK&vF?DDqXq45+8HjZ8ai9g_eB}Fk{$4f4>a&OadqE zCJ)#zha{Xlq=^Oi9epn2oF`qBhCO8x<6R2?gQNtUJqx{DuRU z6-a_$qrhkwwL4%JVq<}SlM_{K#*`^C=4%p`L&Cy5NA^cMIec!5477wXU`&B3s^G%y z{Wq$WfG?&c6%Q={n~?-^X8n-N8Yha{LJ*IzO|lY_+Sas)QMbeIt=m+aYsq>1vg%TH z4tph7!vs426$G}YOPAa8Wwfk2B`ZqM{DKZl5gAaZ7;nGG$oxjK&y}IK-iTXI6ENQI zE1lD<+BD{%g!K`+OL{}81@39}WMUQNP5oI6U7~1BX!Jf&^fetF8Ml_oP&aa zB$6n#ov(0}C@2CG3iVnT#rIo6V&2psLTx3#0Bo4rs_+qQXzqonZ3KD|>OLtiOl?@d zsRrb&(Q=SoisD%L5IrqI5-5&!1jVt|QHpD#;8PUG8h4_&*-Uq)mHQN@$|y{6lL%~2 zmw~Y7b6Dcmosu;~an)T=++fK*mje|SiX-4pQrvqt{yd7?j2RciLSYDsV_$@boo>6dgKtz{?UL}Sv~dl`CWF2+b}EXk^ujfPellbkz0LDivGD1E3E z!elzig%2*qyOE)0Y!}b$K&Vj^E%@;#tyOVr1^p$p`t#Y~0pG;sgazEVQ6KCO%YlfN zsohf0b zWL*v@0xKwP$jh{bf%Q>#BXBlT=Rslp-S8Z~^lU!k$=Nv82vy6BDHZaOV;BKuQiQsf zVhBOmN}NJYOA@n@{w$gXJWufZ;R~qT0g+=4nnWVKo8gk=%v}QMnzp_@%u}~@I3tPc#fg<0M+Mej$46 zkdf&WAG;kL!4@`A%@IE+;XTydCx<75-ew5|Ht$ZCoruWlK1`j(^fQRtbJhyIQLebQ;FkVNSId4p#fkcjqNCMCJ&l|y$x z_K|*))>lcZWnGEvufp>LYIn#=zXt|~?pGCYd+)xonUE)Cwqqikxv}#xsc)rO_3fAg zquw!}VYqptKqcglENk5{-qEViQ3bdc2_&4Ph^(w2vIjM?G7s53Kqkgu8me!>3!I*j65P?e+Mpn@_yqSW&U1XbJ?2%NK8fsW}kIJSUDZmhZWVx(q#{+RX`%=uf{ zM&@_SmQ68tGBZ}7v@tk=61KEW z6~JSo1&o*I9`KLey}WnKXt-rBJ(L#c zpWjF)%vDJrdL7>H1Ghga>){Q|Dn9z|l5b%v`pgp$Z45^Q#8W%S;4;a_{iFNF)jsaH zGXbm;?$icmqHJ;&E=0z-Row~XVsuN~mSnH^l?94FNxBQMI8>pR zBrTuj3RqY^^-}1Wl!yMCI=^*Er*=Y=5pIVGIBAcb05O!zd_H!k+$+eg?6No+8C@(N z59qja1H{nd{H+ISH;bUbJ?X+Z!@fOZuw;d?<)UFXXj82_UB$NK^^VRRsL zUI}Orw2+b!2;-g4JlPbJ_!sT#do)T&oLNYYXR9X=EN<7;d9tRpYgu#HRh?zcgb_(+ z^w4PYjzu{bxujh?A|zrer2G*>MZUa>k~#NW66~Kk#aDl~P5Bb1Gfr5(gh}u)0Z{Vg zhAX!rUrOnxj^pc*fhm$`%9p42>Po)27}QX+UZxkjec@0FmZLG;wNb2<0#Jwee3A10 z6+UAiQ+YH~iDW`NIPM5+gY}OJlSybRhOU1euiX?vS&FggMBJw#ml~VWP$P~NY$M=A z4cY&a9vffozrAIf(E%3Kt? zUy68>SV}ca{tS_~jS;j;vOoJUbb2Csw2nj z6{_4n(89n;5mu-H&AE%e5Ely7AH*FZ%YofUd| z9j4|B^?u|!?_Jk558s%7>dteAij;1?u9tf<#DseW7{@)1p=ptQG?q-zx zbB;Off@4mG`_iTPPMBSKa?e4dy71(l3p()_+TzhCt#ssdXa*oEHwZU;9$`2T=jSfd zxpVhyf-&rTHu8@oa$gR)3+w#FVQw6Ojv>%V!U;>ng!U_leN1%jzVgKPvC%L!o2%U1UPC0;)`|zhsrJd?hV-%Uv%u#{K#7 zo^{{jQ!%gGDrxO*qv}m*&9iywb%-ba(M*RgPpv$0f4z<$s^d*QHO}L7+88Mh_8ut% z_rJ^ghwk2zj_*g~-94Q@r|@Smf2#4raw_?9Bz{=13KsW02>2R03|wO@9|s2(`F}i0Mq8z&4$usL7#mKAhf{M_(=8jb1g$xf@;nfVEim-b!BY^w_zEtU#0eJG7 zihg|8P2ESQ{Q^(7RX7g*Reb5rpC8c#ujQLKicbK)w^LqR zHx-sV-~1638<2za1E+Xl zO$j9bI{$E~dV(7Mt^VX#DGd)Ms4>mfKVukK| z)3mNmg*Sb$f4&J;>xERCETs#}%6Nj*w|x*Q85~t{Yzmy(YUK zhnteZ9c(h5rz&wskQ8ZTKE_zkiPz{OWQGT+2>LI{H>&@}oh8jsY@=E^l^D}HLpyAD zVCSoiP#h7}ZNY7^0{{vNLi^iPT1Wm~IB0`+ycHeB(t+&EXa|i)F+vMitNl9KOFRt98=^`zBI>dKy{JoEym$sE zl11%F3#aGu4Y^|2Lw{po2XfC{!7hg$43D!hhr-{>t7zX^*foUiL7u{UWU}Wm_^dlP zAyM>!S@kCF*$K7p^Po-~?A%U*eh?~OgzM??;_d~B<^Br2#yR+dor7a!;o-*eq5~tw zd9@p|DL>;?=soXHTLR*e_u0ODF{LQJpDxc;?nII(B<(zTZ+9Ql@ek_fJ@Q=Ro~7YV zl_%~$((n`Ubbq5Gufz-0Qr4G+UC6{eR12$XqlQhijT4nV8<$JMd*9#xfc0Hx`J60H;3nMsIcHMqKZO zUEaR~h~9xGtlLr1-zP@qa>T(Evn^_+XqI@$|Ll-9^dD-0BOMVR!CP4l3po9;8YZ}E-)Q0JHV=IgbeC@N^bXla0-^|3`4C`DG($uO$&5rY`CB9 zjdR>O!N`1rh`4hy=EmXZs&E^YGtD{&vIPE7E$%a5b)XI5dz2vd3Aro5dnEk9fN@j! z;i{?(Ob%(tiY=`LpnK2-z<|AMR-T$CF6`}(l|4CU{e{RlL^5XG5Ajmq7GLQw-C0z^ zaR->(9-WoK+g+E1KS)F$Xhd)7=Pih&*4l!1K5t|JcrL)ZTF>X-x6131ae)Xd{p(He z;9Wh#*VPukwF{&XOcBn=@@GKnpkXrG7uYjdv#2lMo#H3hM17UAbM%QN^x7X-ns6?! z?tIM#kK4JgNDbGx58=58MtC4O+Lde|)fe0(qYqcneR}o+->q@05C^kyn_K&P1gbt9 zdvy5TIJG~TM@mbpfxvzJT(n*Heizpzt3%lKK;GK6Za$7Qn>Cfn^b+Fj?s-rvd8Y25R)-uLM+Rdpn`7O1D$(q0qvp z@IzE_wDagoaYMxyPjr3S0pB8poPYDq#1NvlWD6FT10>k@XU zk(rA4gtNF5B_yoHEGP-eY`BooTrkYuh?Oxcj~z5OEkB3VwVQz)e%_3NS(E@tXsp(< zIBI03I}CkZ6MUa0s4=Yu!FB&>N%G%^oLHKiVAdJEj_{{n#k z$M}SW8(IoYR_|cbT_&?Fm`HaP7~V-N9Z|Vb&7;j2^TypxThK<=PPHu2nNh+nwxzzf zi-(nAhw`FRjJ&TM975nC_;;lRejAhM_o$bg5nN1MoJVV;^XMC_df;VW9o@t&LbZJ_ z`-~a~hn+67>h@=2EZ~zagCNmVdN4`JD4?%u)XC5e@ARy{avCP(N@ooC2E-&o`{;1t zo^n){{`IXUc0pwxm*l9wL2p4JQca^Ztvd*BWOATyudA!DJsrN_*vLE}S<@sq@l-fr zm=qjC!QwN9nk(Yq+5hQ?)w2d&bifMLq|pC?2b>9=wr2K{8VHN{^k)yn#j@X`>fi;X zXDpVI$O~s;)44M*EI3PeAx75ydAvYSV;b0=yihJI-%Nt-%nM9}lb`BI)%lFa9YIbHL@OhFOh1;QJdYXanDLg@;>r9-Uz%^4c9h^?0 z?KBF{a*tZ z(*ML%l=$bMjX2ES2+C(Qc_SMhg(!aP4|)={SbOp!ei6(suuh-n*SpObWb z(5llE(EarPE7?B)7Y+77K=Y3((#xm$xn0xz1(H8P^Mp4tztyNo@cT!Fe;;Yj!096S z2c5PJYMpysr?9+<(!4nr)gFb z)Xq8-qV^aeh1#Lxb!|1^y?ctQTzSU{;sI~7m5EXwI3?wPHYaS{TW4i z`P5DW3dlqBtH|ABNy#6fcEaP{NsYQ7wbLI3cZJ|CIUJ5W=hRNru9kD#X>})0&lz)# zCRDS4!^-NHR6I}Z+85$QI|%&q`z(Q?6igoCpP z0Rjb1a2c8FF-LeeFhWg_VPZEj7s$l%t*77T-;c43Nor{VaM4Ob>jbTw=>-bT>?N!> zQSrR0y7z*Zl+B66qjmDo;%gWR(K>N%5?mdW-Q|DjhlQMK?f;=4-p0;PUO!O5F*460 zG6A!n*6s59VGEO#T8e(43ar(lu*^a~l#{&+^~3EHkYao5svp#iK%#BsQeXdH`eCk2 zdH+K{^a=IDz?6|`2N9}HsAfy@JK+r`Da8bnI~;+0Eg>L-+OC>Elk#bs&A$TZi8EIZ zCJOlHf1!i9q_zKr4!#5n26Qm6*~swP8`b_)wEamYsi_L>Z{=LnY51GEY(K=#w%h+1 z#0GKZuHwxnN1p`HdBtTOX_e?MfE&L<{9HN%e(B|&In#iRrZtjZp6h-as zooJ1F8-Jc)D(W#9b)%j4czL$FW2L?De<4tXj;!U&dHfltv+bm_ZLc9tVB!|t-T`=F zX>7mSSHDnxyKiX-gD<`KbD&1JL{hTud=0_=A&!TMqfNi8#?zglAztQ73xA}4(U$8p zj;l4qUv;+M6UV*s0{;Q#cv7nUmitFM-6@En55x6LnWzO-nY6t z66a_QTrSV8Zl%W5UjtR>_i{Yly>#R*I+Ek#{z@0~qdZ~19-^B4g~s@ZysvUU(CK&R z^jCHIRGt2uP9LY!|E|+V;T`7Y+@z<}$C4d9!}f7pX2GKWqq;q4n^&!>;32%?SHH)Gc5-q~WH zr8*}-TjV=s-ugFU9sD~WMMo}Kx0Cpj&>u8&Dq{Kl*{0|crW@>JaP&Qg7kjsAwdb);kc!6hv8D+q--K|#njjQsPT2cZOvG0DNdOf53byh;zG zP!6yPkV2dEd3fKY$)?0Z+xT@7|{G3eXgz z>qEk6Gcx~FGie2~z(#zku;F}FZdU}cpc{KqSB`h=a^F=oBuYDX0#qmhtqFT#9K>>+ zv-)%Jn>qMxg$BPTp=9v;Z)?T$cNC)szwk!ui&AC(8kSCj|FWb9gTMI%h{P(9Lq=v7 zNfepF?GRI6b%pwB2R?3Mu;W4>&Qtr$pBoL`Kq0vlpv51W!$ma(_m!PYu^V>kIh93O z11r$I-na(FW~s(Z@}YEENZ%5#V>Q+TE1?;9{oN8si8gRL2R31?g@+Brf_vydRsrWZ z<&{+Am-eGj!o-T)KxFa!4WLjk9~`-_84USo&s4%M&nbJA&>Tb(S^-HFCSCCXuxC;} z3eSgHaH;g*z%tTx6IzfbF-dJKN#vK7SLiwSij!CgNeb0{M^*QdTn26%s586F-B?55 zCF}wB6NKIUklWo8KTDf2IBpyILa8nrb2m||`VdLp{db^SdfeGT%yAd}RHtzj9h%k=x;K;oaOke?VMLSBFM&e*c>OfbdDd6i;5p z6?I6UXp@y^@szbVreQ6qMWkKEvyfwOaRSGbP|orHezYf~#E!hL*0iwy;4G{r7dW$O zg;8)*9i+H-Zykt@2hk~|WksvbJBR{MPX&M<7tYqxQAJdNdT#2f5XVV{r~s&tx1F1< z_cn90_1;!)w%*&w&DML{xY>Hy#O1Ye^;DEap{PHKAq2Da+{#rU|LVGW>d`80kRRgK zE;sWvCnl`R+wffh*T|fWTN8B8@Ep3ckj`vT7CS@%u|L(l`(gqQamV-ED!1oL55tW@ zI{LXLVVP?5XILP;L|{wZ9PN$DLYm~G1>Y-J6*? zeuT{`%5Q#*o!4utmixz;^wj0u^)M*FhD@JtHg4SK$L11zX1?^mFYw%KoIARFlX2mS zQAWkT@xOAEG3@10#^L`RcO*Ccnvn-zf3MgG_~RD$5+)~ z9c7&J5}@#@_tG5~9zm#IFEi3R$!?XjT5y?FCzlvgc1LV{*(ckd>I^y%y)V1-@v0TL zBo}-I4^Z}Xwe()Pl@BY^U%kaBi-ar0*1~xSt(Qc^stJM{-JLr05w*J~{gj?iwyQDi zInj}vm_pn{#Lf&6=hg}ebMYMm6V|kdmrYDxx9e~Enu1YUX4c$yuXl4=yOe<4~P#j*f(EcE{{QEAfnH5Zsz7@*NIYhr-%-bmT3deQV z7`>Y)UrZchd zvEz%R$FSnDvSM)m^hk`5)A@sv|B*dKE{G=e@q%;-7d}03t?bsx zl!Ff>Irc_CP?9BVoOe}3uccSKo%&!Ldupde&@mZqojhZa1!lkv#*cyR?_LUzkx6 zI~QA*$zuog#eMdpF=xQhFm#jETS%7Ap=7=pGt%35z*-d1rgO5X?}~^z<~mHad5yik zK!SZ$B|S5CAO8lci&!*1TyK4q`#lIt-2-7uGq71Id^>=iV!}w8frUvIQWj_vlHZ$C zfDJPRfn|o+Q(Ds&>hWs;Lph#w{)BH6C}oMOmL!-zh`{v{V(WrAoIAVO!9 zV~DvNMEgbTscFF%|9|Das6C%jH*Vo;3LXzdmUYJqnBptkJ)cjEyGg0MA#4LLo=?0+ zPM;~A;-veNz`7{aenv zbx?vkzp4XND{9f$g!9;F$|OsuCEDRJ0P1g|=~=5H49HRukwge^UQ`z?%yeXtl-_Nee`{!Q8b+{Wz{{6<{d z#?=c3Lll7@;?EDf!@tPmCuBH20}1gs6FhSP%;$lC6H3;Qoq_%~x_p@+$BQr6J_h5mGX)Ow!Dc6BFRF@EsQ0 z6UG^$IWU~|5xd$RUP1OxqU>CX^ZR}cBx|tIY}w$%1&U}UrWlGEDuzc=W3ZgnfOXr^ zozM>%#!O2H(GMkuE%}YiI1ywUF*~up^q7PO;7qB~3N66&~;2LerW-Xd|-~bAZ33`5Z6!a>g8RU6a1=H^g2HsN+Og7N&E&k5a}BRIR`Mp# z9``{v^s+OrmLqMSQvp8(HqfvAACQcd7^aOG0wPF8a}d(YiwwEznCcf-uFLgmw-t+5 ztauOqNUe17y5V)KVcW2VJ?xdIWTm^B@ULT~`w>^VpJJu^23ERUCNEbj+}APoY)C8a z#5RqA%hCOA`JotQ41?X-Vz6F}6Cy#)W5<__IhPkimhZd{6qC0V(vf?ABZ;UD@{yG` zUvtwR6`4I{-QA2zH0QPl09Ff#RscD(=WDDh7T`$T3o<-9|BF@9799hNC+Q<_sXs&R zCYF#P6}fAWm!qUwWi0yk#9X)HsrQ~6V`VyhWDHV@)b6((D_`=CpB zM&sU&IURv`iAS?lXMaOQe`5blSrzj>PEx4n=WK^$ZrNW?^@* z3QOCslRR79tL545UaIm6z6%f*{f!8Qss!@4J4k@nxW_RL*3qNneS^FHU@1hpx?IPV z5k_x>_f}ykI40@%{u>(RZsyMy{K@g>6a3Kh`yOA~r3Gu;RXTDS3Sr`8RT#O&y%u5j z?}Yv+)Wed*z6@KZ*U$DT|e)xGAKY)m_)U9Zj`z?RI#?Mk(RNYhACViYAFwiDT@W@@h zmf=;>zit-LB%;Z{#esQRzdxy;kK*b6K^0WZg6=>Bsq7YcZg5YQjBDHz@N};u^cenJ zg&)|JwN8~g=XPagehg0(m-|G&$7V7i{GH}dHx){somx>oQQz{5-4X@x#XGML||v1jnFeqYXla%`^*GqBMAG| z^Pch9&JYVvu1Aln)9P%(hLZJ=vMA z7Hm?pUp02`@XERXp7?tLf3I!fUl72v-hd^3W+{xnK7c3wUckS@w(utcc;W{v@iR+d z{JRJ6#J>aZg9O7$`5EX#B7<2@Q6xK$Uyi6Q^(U=Of@g`02Rs6BP2Vss9A{E9Go2wY z<+K_Nvg_{*FOP1=|D6TE7t+R}3|;IQQJ|?#M|PZol0g*Y)OVqAz)uoagp38{ED_uKxEg3S!OebK5>;;-Q#Q~zgDp=~(HWs}`-+Vr znN*^wGR3Wxl5esY(&m$`Sy;L0Jl7_&F8qnQQ+WwGZh>NE>sCT&^Q!4Q-As6UiFBCg zjL_}mgjw(*y*|j!Z#=W*#F4P3F;{@2Ny>an>71SOzb{=fQ>2Q0eL0qtswRshU;Ww? z#I=VES`)_@HSrAY-i-@?5iFV#euQ@`s*{oG^ux`db5l}>jh-!a(Z60%f9A4|?xykN za)@JobMTgKqUJcW*k$RK@}&6E&gOt7a94%}R1Y`eC24)1w6a)-!Ck^76}s*_i`ji| zbKFDL*7vYam9YM6EI6`L(u>@}awZnlG~zM=n%^-lx$fd-Am-ZY$}e#k4IfaR13;Ei zWGuKA3(PHX@&Yy=a22*LYf$Af1v&+wHH~BX7V+NAc=`qWe-3wtVG!gngW(d3g%waq zlR#jchE;^L6pIOIp)GBT<2rRc2F{dP^jpG`2jxtQXz#e&W0}$LXKYy|trc_UdSoF#@#e*@0gcE3ISe%`X$MpaQ5LDwXk*SEUEn%%7Lv3TqAM!K2$(D7x%Di`_3eVd8JyEN<}zJ z0Q*#)$~}!3A+})CnW@-5FL4q{0*T-kf4=Dti&uZM>PLK>cJHCEyQmZjr_o3mov8Z4 zXyE6i6OLIIy$R!H_MO$P^W=#o(>Cky!D=MawP`}at{#mc*B(yEel5NM@iu~3YkBMN zgs>r@0x7h}jW+u&PgHF((ywxVfMg-eWp00ZGu zFV&tY5?5ogPZ{meuXPjic~}-=O~k*+5Toq=1inc>WriGDZFk?5;aZ}GMetGfSG}b! zT%3-rmN?0kO!pC(?kTRCO~*9i){ERF8&vsbjd9=d>V|IyYmTfoX13HcY%(%UC;`)4 zP34Q##(gb=!Ot_7RX4nf`wMw6l+28YD}wj*pMm%6uJ9g*gw4iP20kr}FZMQ<8hy-- zxU|{G9D~M3$#z%l{&LP4O;?9CB=fUm+c89*Ou2PvQ@me+xj*M zx*UC~vt9oXIWOWVYNSjc9fAJB+wTwQ)H#Ms3|>QaUo_^G0^FTybsy}88I=97PfxNli+|UV`T3_=F7N!v!t5% zJ{txIQ?NV&X^RA!WAey9sLox2{${BTE}f7$7IQejJ*s!6HHS0%o5q(QFKjJp;$6TB zn%2_<4ze~eE3OAeWyGu_;y>U%Ioj;8-J%pR1hcnf-Q?lw_bmm}-kjs`X;w>^-ra2v z%19*u;NDX7nwQ;~l9KE<$s_2BD8W@r^VMkJGayl-+7G+majQXfuRXJl6kIj477s83 zl_O_y3HW$GGd6*Qp@U=but|%u&o;@$p*YZgF(^S@8OlXg-bwyZ_$FQfnw}-w-bkQg zuSMU3+}DEL?_iFWm34P1lWBs3KsE&VLpDRkB8x9FigI)3k!>*0>#!-x2@*{(0Q&L@Sd8dWQW5vaUS*+P{b)3lqL8TIA1v5Tj25yETX0IlI&wKRtV?7@*}AoL-BZf% z-CIX!vEs;wH%$8$kuH=X)~&T62~$lz_Zb{s9E-?^5Z4#TzW5y}AqH8)=xH0b1~8K+ zSQfUS4)~j_g^JATOG-FG?QZdI93fewdAcq}scMzcumX6gzpLwxz$WSv7w4M&a5EHm zfKuO^%4;gW)Ewha>Sv%MzT($Cs{_ZS9^kp4`I2ez#nK zl~%&JvOM8%Gjp~mWY)uqm)7Ci7g&+u`aEAA`0K1|{!W3@d7bqqZsrwUXPt#PlWodrr0rpI?>(6{*9g=nXsCx6Yp43vTm3U0fB{rv=3=uG4= zGFJ6!T)bdM;c(P24DpwtS(9#WS`yAU+^qzE3@C?M zWFb+s@xwNDBN1n+NfpAcnb8d5<3M~=wcx9n7`WWCPNF2Op7w-Ny%@oFZHPV3wT)ishUv6I+X!m|Ic7%LYD3tv_o&22e* zH{Spu_6^gzGnOYOoN47@QRC}Duc5Yadt3)DyMzKnsCd72PrTqOT{}Tz0r|V5OTi&f zQ?%||B-@F;);NP$oE!0(nq)zn-(*R@p$u2foZ}g@LPo)n3PGQm# z5K$iil!{=95wQY34nQE=18$_S0<6G)OPQIL8H%5F=YSdw1EI;ptro1uQDSB&M-UzC zx9TC;^eY~VYZCOjo^%upT~Tx!=}cruIyZL2?d_w2=T0PnhU15vj73JC{NQO3%=cs# zp4H=DDBVCh3=5@5ghWK5n%4c%|Bg+_4yF+ZYxQKi_V)Qwk*T?*Z z7XyJhXYJ%mJWoT{iv@pq03EEMZwfe|x5tY%LGs0YHSAWkY1x3(SPTn4%mvNRt7;lr zjm%?+lIw@o!Wi@q&S-JxOw0tWP|0Xa^fj-_R;|MsGm-N(Bl9*$Jc6tRQJZUwt9FiK z65wQD^cmj9h;uO$TqHGW=!VMZe$fS*&hVd8V8cn^-sj3uUw00a0$<)LTOJqlb+DAS znbz>Ofb-xMh?@cfMhs@{D>+uMHI7bTgOxmMo$g&0iabqudHmtqUJd^ok{*};*6=2e z`J5ps+9Xg=q1A`n3+t_4)KUtbtM)kP&f`<@ej7b3VBx)asUo>p>1X%#_Ls22(4t-> zik6wq)LL|hY26|Zzk!_&vRz4%qeTN0ksH8`>CTp(hDSeVyx30NNqyM2)uhksc5$yX zxE}=a^YvZ#JVrf&dMQ21J(ch00L0xNV#ht7KfCkiO#bZ1ACo`b`Evq(JpHSa#i?Px zyn1{ENbMIrfgN7dk-HDcqJfhq9)iSH>iA7N0PzZ9Z^jQdLRYg&Gx+m&#BjfKDqi@! z5l=!aVB*#Md7MAfB;!_hGO^r^7@;$I!X5_X+*!N?DT?^^LX3oXY<6F-7JMlZ*mk!{ zux)U^+>vZ6mKyg{gxxk3LD}WL!H8A(;Wk_kb{Mq z`v!iNay!gDn9@OQ5Z1A$)RtN<`w5SrUsXJD3AQe?-FWMHHeCw_=Xsd$U8{I2M*<$o zqcdHt-nhkN+Ecu}%qKuT?s2Tyc>J(tr%R1ixkK<|51c6Pxc{H;nxGDmxDDa%fi~PO_;LP4KiBBzUqE&o{I>)y>#mac4Q^IHCzD;!pX2rO zD#F`Kq>Pc0x81!*V6*NFoyXSCw2q&MC);wZyl-_c*XbiAzTG`f-dDM2@m;rZh=w^) zKaauF-2`&*3jdW(*jJug-QD!_RZU(k3YJSP_gfOcd`6(9pW}tKA^ot&-qsVcW`yEGzfD4 zR>$%Lkq+)11WEilmV3B5$W2@w-%O9i)**2Wx}i#X)d1g4~Ekm;5OR)&;>aL2yD4oD>98L9ihRHU`0) zgW&WaczY0>6$I}Mf=xm2p&&RX2tFPJ=LW%l2Em0v@Yx{P90Xqof-8gID?zX|2)+>n zR|moOf?!(^{3HmvL2!K#+!zGE3W6O$@cSSbtM^A}yC7H+1bYX;(jd5N5G)IVdk4Yt zAlNquRs_LAgJAz4cvKLq41&i6!RjD5CE2x89m@r#Lfz0?+s!f3WARZ!G97~ zHFKfFViA*__AFynKQsrhD+Ab90@&67_Kg7cy#Tf?fc+|n?FeAM4*+(Z;`e%45bTd2 zk29iXM+LE!5|sK56#zAE0l=)B%{m5`>bqEC$Lm+Dl+9?khh|zK z`Q6495dY56^-uN0+G2ydN4L-5ABz1c0y}raXjY7T@8H_I5vQgFy`qB)K&b#OPSKG# za~vQRK>>$-~`OtGoPAq@F6>BLQxywc7jwvz+S#V-vH(;ED^-v4W#v$nBY#x z!eCKv#6uM}VLPr?=!m+uyR2O0>`~=Q*u=&i#~;qZmVfnj$hnyK3*K_@{k_E8^+EUR z<}f`t=-w-ahB;DSI$p6t_yh}QRH*q0>odJta5wEkk9AxK{)PK=5w!zZ_xKCqUGf^_ zES^p@_zdAe(KTrqT437qBwZf~vbKK83N&G~d4MWd3U%<#;~3-_uPeoHkseURX5vPL zzoFl5GhN|v6ckCa9I9`Tn^w_#zDuq3rTS=bj20!Bk~+|~>=fd~@CI(YBmc~rNg;X( z2J@`ChP9eW^)2eqt1abQ_{ze3e1{69=B?PP2&WGGRreFt;W<8wK%bD3P zc?v)lu}IK9j;BgpEvFKg+^wK{~T^Pbdg6TQ*xMm+@M-Jvbxe zjMF8pz~`OT6vDOVv$F7&$|rhKJ7Tjbk6JyQADNSAA`EIAw?ITU!^uVj>0{RC1nNb!dyXJ zh&|@6_(AH@lwTK|>Cr@zMIv+LqUGF`d^$vCB_p;n{aA=bnP>M6_U zkDfT>K>=0=2Yfsi-$aPV=+^~VLqIeGTo!g_YM&~l$@z$En!toYy(*&h`Vjblh>!%4 zqk~Vv6q~&Xbl@CvQK*$Q;gA+m4iKNj(xpn2-G( z6C<-LAuK5$BFpQ3)t?=H5#LTDo|)3CI1EUg!nWCXkx7*GO7!>{UXu(nq@V5i($Cfm z$w%Kq0frGYT}@2Bnb12wV`T1=?DIGPxD&**<`FR7?<+5-zpL<(yK@Jn_%e(@qUgV& z!Mub>oW}X3bbVvRR z>{n;;)e0^12y_=)4fiPM34m^#!8`=cpKT@!K=EN|wU2-iBP@EbIm|bzgE-sON|Buz z;kSY*Jp##K1P!$^$_V9v0wbhQ2pBVgeEk$TT#_bWCaU|}}c;p?v0 z;0=*Qi|d3QY#FZtY=HfH((5er4cI^d3kil;V5Wk)8n_x~5O!NEaD@WCOQB?*ZLz>0 z1>FSb#u>~LWq}F-C_XslvjAa1>8)L{z+5S^GYfpoQE}39kPH^kP}8F<@FJkV0&`FZ zSU`XyEO0cb(RpkxtC39;kdzx`f!zUx(iSQx0u!nb3w)hId{!U{QkVsHypct$aWDI- zGYd2Vftm?enGg$%C2oHvU@f97a2_EnRnM+xf{z4`Hxo1|7Eq-XW`SoU!wl(V`1-Jy ztr?OpG!ry;!2(ky`#g5PVgUjcW`UATU9mutNSwvZLJzi#R{<9IwT439fCUtAXBOB= zK`jHW#u?4*=55RZpG}s6mnoFYvn>`_qM%m-x^V{cL|Nc|0VqD`2A>583*u_)iUnfO z7YnIF=L@EE2WkWs&`^z07B~q|V1ZT?0u~V92n!r45Ra{9HL`M@Pz--j7T8IZ)~28c zOsGOEupZP6M1&+rVHVhRqGEwRe%YA?mfYa60Bap$fxjUsU;)-5$^z2~VM*!Rm<7fO z9*+gu6bq=*3bQ~`GR%-(w&zPPb)Jvq15Nu2|r| zlh8DLb4cjHmhmdU0?#Wb7kvX3P{5s8-~k2I2e=w%5O!NEaJ>S~DU{5!EfzRKL2m?f z;|%7BvcS;-P~7)=p9KgD7TDMo3k>ao1vY|;kk3RiSU^LiqAc(xY0a6}fkMCn0vut1 zW`TH&zA~U(0+Mp0EbxFTtwcc)m{5gS;K}O|k28Q8q%aHYH9@h!#b0!0fl)x9W&u_v z!~(x0ZhsbFEut*YlMwjS66@LZEU@7^;_+CZoP=xJs|UTNH5#-rI)cD5Q5KV zf$A<;;1J0^kKM1=16{H}xAk4Iz$Ww=SfEzu!ItqVzyfb5DBfuU7Er*QSzw-m`aN(p z&QSN?biEd^6>uMgl6khp0)G(bbPDv`ID>gWBpWqZdg?t~(PYJ4M<2AZ&H^4rM=s@z z%$`IBbDYiq6;&_MaLx~(;po9+Z85d4EJ^fp&EvD6OJmxx-Pl>_7@QlHt|UD>50t0{ll85;bo&h?9;u>ok&8u_y>D?9qCjxgjP7jS($imQH|K-E z+0H5cHSQzAnUPW)MKT)R6ZV> zS@=%hGljr#TQfX4%X4T~8mGUiJ{WU)^tP;+U^+Hhz?WlL0FO?cdj}nR$A}xuheWfr z6^O<@?faKVLv5*eXo+_g^+c~rMwj45l`)lgmo<0(QgALUxEc|rZTVs3G2$zP-X8%8Y0Vbe=8C{;K2hv zft%PLFsJEuLP&No^y6hwLm$4@JbHV+j62MIV{`AwW z$*Ql$oDxs3$I(6*pYm+z!>=JEzcU^#WC-FFH}#%DZn_fPf;A5hA%4X}h%>86t5AFK zIRTo_^KIcQfbTRh%7zf_k$9&)k?#!CqUeQbQ}n{LDoPkP@6oQ z!*AM zX*~IBxG$FAEZW!)2U$;$ppzp%JK;zk%&Co}9`JpXaJkh`#6sM+rCtxQ2(AvuS2!9o${le9BGL z%%Oi`g#^goC>k!xhrohEn}l**lv|F>*fJ9Q)LYO1U9#?-aG@xUIi$FUKU2j$ae3$B z_M)a}3oMFbb-~~$E+)l&^mB?6IfJb#uejgn;tp5EG1+h3KdIp1BQJnOadS8lxHJfA z3QN_ki(5aib=Ux5R+`Xt3%zSl(Kcq0w!H|>!SqT<_UPX(19ha9$PSN@IgLuE{TPAi z+G&oQ$&pY)Bo0X+!^O=^r4ah4n=Vqwv|eyv2t}*FZKqy|!^0(3phS!zq5M|9tHw}a z&MXvq+koLx-Kj4tLeWjSXw}k!#S%ms+V)APq4!_fx!6lpu?lNuu3xMonxNR!PZvuN zDfZHEv3nLMwjEkQ!k#Bs(aW8iS^LxF5k$({@^Pq{caQGe%$r3ENh_=wnfFw&1HSRv zs6@VdFOVrAz8N|KbI7!?AHQm#>G9n@oL#Tb>X1jDjO$46;6R z&jqO9;Dtws01Gn9c=&0$Cm@Y4H+lIby`7~^M3lQP(g)*$&RdFwG3FCza^xpS|HTlb zpW^fu2}Kk~((e;K$(VzkqjrL_0Brz&dJe(4AK7uoD?Gi^%W*Dv`J|pW*kfdRL!tly z-RQS7D%{P+MawrE7vzkJR-@uyX4S^A&mwi7e?HrTPzSoRdd6`36s}q}e*Ky;ZAWzW zX5+$FHXD5$VQL)LK>q~a1JeEnLHXZC%fFsYvvASDGT`q4WLh&@+kNvOT;p65cwr3k z+%OW%lFn{7sFAfUGOOCh-ige=I1-h^bH{^FS#)!vDCfz0W5IMXC!{~FCP+7TuWX$p z4jmtZ6m^>BOT7pwN_!S2xQ-6HK>m8fb9wKp~@Xs^siwnzwd z?r+=eypc=~^~GxO+Nty+)lRudLYi4!xk)wiOlI(#nW$Pbb~boTCKuF=uaN~HHMl>WR*uSR-h zq4fVknFD?k>D7hOUsUNsk!}`Be@dl~K>E-^>36C0TBMIClwPmWMrUh{{4f21p~7olTZVCtk(pd#O00PCAN@Dlw+ z@P#H#u0%1)noD`riCfCM;1*d`I_XBm11v+;H!~~B0}rDmsK#^!$qX8@Itn?Bkl3iy zAc|Ywd&lDzuev@TjwC#mEW~3iW*rJ~OPo-`EmDR2?)t4%txInCR*goN-13o1?~+?y zQt4fC%Rf|lm)vr*WsmM^& z9@FZ9m5P$v_^gJSB7M1iqSO}`hS~Gy2+fM7U|Mx0KFIzGqzoXk1W*_N5?meDU4fJX zMAUU3q|C?iDb~A47B$zZD|Z*Z_vzMUZND%_;t*PApTK7U4Y^Ru)tfz!m6z2{()tvZ z0WPKT)=&f@`?{IcYJIrmg8aqPzOp!-K`7XDD#G0v>9%j7!^N$0Dwb|Xd|$umvAHa% z^OxFhit4M3o_3@1uhe$q!aChkDg$R%%LvV{_ezQ$prQSv)1$>?YeIijr9b#p>WE6J z(V;4d!>ejtS8C?KE~~Uo2+ONdWwc7GnQTp{R8?B_AFr3{qE&iY@OZtXJY~1pOG5pM z93s7RL#RsC(JEc0s#LA2bevzM3DGKv|iz_Ki zyfSN}WxmT~>!ey$=HWkG=97ZYYo|7sN;~`e?VJ%R6USy@YRPl0OQ|wPt1=h8{*&z- z7Ao_JU@6k4!Y{KjRAybY%mY=Kb*jvhf4a;qSFqoto!t1rOTOyyv7~yvIw4wGE0e90 zCQxz0t`@{|GvqQa$;Of@Ga-j?vr^lp(9EjMzl9t@3;BDtOxYQkV*2uV&pF>oucQiv z`)0$-5~yS>oK3*73|0)DL4?do25A~TnHlB&eF3RE7m&=LFzhTCir+iW-)CkEfpeB@ z-zfthwzjKfoz57$G#y5um zsy9uPy^i|5|&v-pz=gLE(!Ml>kf zGo+(HKVnK1|Jh($;Tqar)RrFXwWS;wdmQ%bc#LHp^Lx7tI0Hd!K%-&;6vqk*$CSL+ zK_^^HNS)8U&|J^1@C1@y(>~0l8j12-+2Ppy;7IUN{tTyD_srGnDnf{O8Tj&Q~Zs6kB{dH9}EkR=L| z89WgIu@zFsW)Kn%85%_BX3|-t8<=(jy1DlpMK`a#B6OpwmS+zsB^uxNf>jGq#Sf%T z+v#BGR+{yR(#;w`Xu4rtI?>I)6$a^4XGXKojdTH7Tj)leX7VTE(?+7kwundE{6mO@ zm1aI|bb=YGP3XllLk0A*AF|?fnHq-0gwlE9RUJmdi|9&kAD9+XRzMj@=I&dWwh8Kjdh$n9>hg{5B$S8s0OEDTC#!eFPFhD_ymN6OU zlJFpjX)f>ydl%w@Q@w&BuqMQ^C?S5yPRH(-kEks~r|0C23H%%mf{^Ys-ThWz=E=l} z-Pxm8ekvQN3p*eJN7djGDlrE;={nX%X7Ld+i_)O3oM4BH`AXgXJW z{|bvOb7i3kp0&@4X7K;8_a)#}6j|Frj6~GA!Er_$w>U-(;sTCKaGS({+$)L3K?OI6 z8{)V{A(|N!g2|O!@3h2;2#%v>#8Di#Xau7X2nY$TMAQLS;)v1_ln5$uBmeuJQ{A__ z?+v0e-}gP=|9tm(Zg+LnTc@f{ojO%@y1I%b_VuLqTqsN_M>enuXO^gxXuDyE4_QHE zT)wfZt|cD*XAd3c1wypp4@#7st@gF6K(~h|$0~L9#?`fq*faG#mHHo*I{Rg%Uf(nI zFD{V0v*WI=WyGGTzfeMkG)z+J?8%jSP0!TtV-y@2Ce#_xCoMHfu!8`w@CG*B%} z_Q?7K0P)%s8{#6#O5*WGlcZXt8{$|XsZ$=VNpp6<71ku~)zU{w+LhI$HE^~W10>1n zuM}0o9OBzo+|uwvLrdy5z8g#CRL)W)BN$|-6vm_VuOr7>1xbi}Zj*5;{5)d-d~Wl0 zG27`{JH1tOxn|A7lU62(vyjbSn+iZw_)WK3&{D-*Iv$*sGDK7UoBH)&Ng5oO}o z7v@RduRQm#U=Cww;n4ibA!Hz5M@~rJzYLtW&%)Ath)#+0{Q;R#o$9HYizExurE^KL z@YEAr%wVf>6`6^($Re-iO|S=bw4&M>yH&tO5@c}VmBIdsv=zw+25ARnMrgKH66wm! znh}(lME?(&sfQR*W?ow&GE+~Ec*l(&juDxWjKm~sKAE{)gC%t=K}KRhnVAfPks0PC zNoIbp6oj!>ZJx-CNhfvxKMWDj{|l>i+=%{P;!$Q~umG8Ms=84s&8HiK zeSQ7eWSNWP23l$TaT1 zgwZe}7-U;e#{Y^*4w*$~H;Ke8$wo{v=9BOf zMP4jPJ0`gaN_Y+sM#7nsBnjV*6s+0`h#JqQikmc2OZO($O}tW=9Je?)gLE@0GnJ6; zx*nu^iWH_vE1`)fuXH~LCdocve#@0ryf1b{g}8G6;LyBz98KlP*5)$AHG9-mfsa+# z&3s0SYor<1nvK_b^F zDpSRjU?ZlDLF8yf_E#h$ILM$tFXY4?N8=cXE6|ClJ_Nd7LcJYRjbw%G(oNM^9ktPf4DA4m^?FI$8YSx3$MfOROv*OR)KY z+-UR51TS2hpP3}g3U76iAT)l`Bw;aHIHYSlQcp}0ij5@rCJFZw=}Z!c?|Gmd6`~E7 zDbd6vAs2Lem?Vr5>Q)K(R9(x6;n*@P2VzSwxo{2^%2VRMSK^5|!l0gs@1a>AEyNkI z=d6Dt>b~J}rJk508)r=i>rAO?Chsyc{IQXNTi zgeOjyq10QQMRj=yUL4zf2^hyX0s*X;ybfY?$lO)yg zu4n=^Lt=)oiB(jq>7!CuD{&3w*baTp}}sx^0>v>>*j`afYx1gYkNR%yef6Y?MBkUQf#% zUJWoGcr}2Vk%Q78a{JQ+xk{0YV32h|iMa;jMi3*C5Sdl3_$~bWGlUaQQki+YQDjE5 z=3&@9I)Imq#3XAznVF!$l6p)E5R@4X=#9)UFG(`9hf;862qH5koz!Efck$jld4}M~ zOk##0KFKqL86xHG41qGEal}ga5?JL~|M?T`gs+0BDdq=H!~3Vt53Wv4bTquTHa}1i z^3>>>^F%M!t6roC`SoIOe(*BzTbUpHjluZrfppyYL2*!S8x&;&1)(I^$ZgV;`$k1} zDv}WlvLYzq!->RX!$4e}=$jwB43X#jVEJ5;Z_Sv88#~i>>sr8>F`s;YpgK{~j#V-@ zLHTX~!pJvsk|f{znfU<(P3H%iMrzq!kB&8Yevl;Fbv?-T_tG!B^MinFKPR%C2NCqW z+1|GK!5Z+Avq)00Tu2%l}5AE>l=s_#1pe&Bi@r4b4 zOn9q<^MfoIU{rRot_RMp(D^}bkjPV%8}xDfnibPnfQ^`PP~=nYlNFhvNJcQo(x5

Fy-zWG7=2`bQAo)m%BjCnX>W5xu5mTX`~i;VdMdcFor+IU%jq=EuH9S9@P z%t?|!UnCTCZ5ZYU6h(YzE9s=39~>wVYK75p`5=z-0})~Gced@~lj!7~QvlzQ7TUmy zF!KWdQ_m0nQV*VRelUZ@KgHU9KcBTLN=jO2R9y)^&|6S~kBG2UzV`mj?ESxjq6n(M zJgLI;i;j0+yfZRntKC$gZ(zTJpqs`6f+;|`SWL{~9U0N+xuWlgW|>Pne|0UfJ&&R9 zC)8@XObM#Rzf0CVV3(hyBLc2b!u3L!ae5|vtP=jc5>{h>&xHFZ;Tj>#I6V`VDWTPL zxe^vTKUR75^zGr@I)I1&S!R{r@2S)n1Q4WIorU5AAc9o(C zHqufOq<15c808y?jPk$x4gCD0{540byu9#;$ctvn!>n&Gt>EYcBYdXmm6es8al|c2 z86$iI2W90`Ae<3Cf|F!rkx~$b;vImnbA{ROFso&xV4$M(J!uV`-Klb29hAcpB;f`dD2OA$spLAO zv^o_PQ?jz|n zX$3S9<(2A2F!Cpj>mP-(@Q>^7qWbtLlu4!GFUg@_KEr@!vvMa1vVJd18l^T zgESxVDMe-|k`W9tsdY^z5@UJ;aRu2oraybI3i81ZiXdy(;}PU3QaVerfiXQY<`d>K zG+5Hc@D@o0g?YtrSD2ZTBw>C4;TSawTQQ~=VUBA$smJt}NrYNqbbKz^&N01+u5U~) zK8fz!8Po40(HmG1W=s!Y>M?!G{osk%{8{{<&42K*Qe~Ma6m0%dbFpYRSY6#1E`_c1 z*@N8iyF`GE*&u!6_oaev>JOS2zmFGTNgludg(!UP3<}k?#P&QIzDNn~p#;?!-}7jA zkP;pwgc+x2!e8gITur}H!fJ@`nQ#ly@wqudm~nb0JY5Ozsf5J_zm@U(c|tgvC4?Er z*H(^DT3PguSC$Gp{YQW91O$4mOl|N4CiPmK!3O`*F)WOJKpdZ4L8{*2eI0|X${f-f zIJ;aDjn*TBKGao;8pN!%3fMH`B>j@B$XrD-f%!d7{4=~fBY^!i3abC-=|9SCas93ETCG<`2CK(Wc;oi_pqRVF|=?H z+F4{XzE~zFWc;265Bru!Wc;25!AX*s(xAkAoX-+OOSFq<3rb8AgYj;M%p5p-nMzD? zuy$iaVX6ldYq!vvP!Sxwj}bxz)#28H3S!XHu*vy^pnuFhdZ5b4 zFa9PnqFM1U=(;?HbIi`%_~avwAc&778!M;?3P%ZoRX7T&lO*F9jVtWKXk8@3q>y@C z^UZIWSc5PW^91P%8YG@?TvH;Fkvv)-A<>(Z;Y~z2VnV5_Jfl#c&8O)w)l3_hyZ?7Su7^Rd4Mm zAAa~;0c#i@%u&%j=N=JV&4ovF*D@Cr-GUmQ+;&U(EJ-m|Fe@mxZy?ynZC#SwKCE$t zXBdx*+>(51p}j`X4Z^~>MTYXT1QAi+V7v#R{m1cOLA*)1+*B73+NZ%7?)#>8c6a{{ zF|@xX3vsZ4(YzQJ;>fw(5LxG_$PW*ZGlL%d>8cz`7GMpbER1+jL1s;&h z-$F98uoN0=aLU{9JQic|(JG>asK20yK69LiXr7AbtRxW~P0o2cURyX5%~cT<^SC#7 zK7ye%=RibhG7gI09gJ-i)I+@HP`n_1NqT&oVj6*|H<+Y`GF0_zBg0)0?Ggz!BKorf zRYae@D|xfZpK(wbw};Q3Db;<&!edltnyqtG68jnb1iV%I7(CuDH%5SQ*M!9%7lY#{vn$q+{A_Y&XEYN z0<0lrE1tpVSm=kFR;U~^>Y^?bR1=Rh2Z~@t`3I555EY-n91a`&4uHfHXa*V_J&?lG z#clA!?keatxFU`{2eNsOWb}-E^~P&uwyYfW7AO)k9C%GQ zP@9Hk1wR%9{MQMvN7vu0C{8;m75UHbvknxC9ymsmi6Az!3Aq_Icm?XBnsq_Vt>0I2 z1xST3Y!OdzA2%#^8v*$(Q>C_CIXM(5<(m(QkbR0%D4Ge+NDN!RuS+`SRoYG1uexYGxHT*Ip5na>jrSM@cvIiccu;lKuhSSW9;qeX%J6f zLTRK?T&FIs=*2X)NogFBAevAbj30`IB*;-3O1h;QK}kyEJ=&7B)jJ}Zft;w)kQf1_ z@fAcAC%NA(Hs}Uo6H22-GVfO!R}hFjof5!=(jeZE!BF&AAUQ6GH3--%^}91Du6=ZT zLJ;?GYM+vJ;99gr{lnJX_Z*SX5Mkkz$el++eBjVioqY)+sD==@ zmsi6WWW|exOf3a1NkjY!5E&QO3d-BIH^W32j*pn*R14IAW`BhS)WbuXgjvhtjXRqV z(F-FR9i_nZ$=0oFgbReYG_sT$;dlvekXEJ|;rbenMsQUD#(4=ffSkDVjZK4CtVwfp z6XbaESwt-Yn9#_5TlWNQ4nl$xk~@g8(wFQNH18!%e@ZmD8Yj7=xy^4hxjH8I$aG0A z$p+9{FiquD#zrH0CsdQp#h`;}UNP`y{OlJ0jbF3(ABOE3GC zTTxTiWPDfcCv>tB%0O8El!UUBP=AL|6$mXTM#izJo_#$YXwitCNcLDwVcr5Ml%v$> zXC%nlZ7KCFsx>Xh!(jqzqRnUUSN4gwp*F0h(JAQdKp+mjLJ(;MnY?;G(ib}DZ%Ro3 z8_O9mEEkfGA*~Y+{mh&fa+4XcxG?^P|tedqurU(b*^5x0)$+>FcuG9y)cVB z2nhN!S!7|Q1SRxBMv#j)3#_RI6rdLlMB9X3n3ICuXaccQ2ZF%1B)u>=K?i!FnFLxZ z$#H@qW|9x17b;TfgK)9RWI~Ex<|dh!ud)y8GWBqDx(+12QjzpBP^~) z$)RYd7fwV5v2AHHkg_VzJ|D~l@5J!oW!fncwTXrer_GiI$-gEUZ;&;YXlVbMCZsZh zuzLuz(&M`&6a}fOh1FN4@j_4yex`L-1gYg}qMamxM6c9D#USp@f02?BgSR177bLZ} zl3Ha*{TigGDDW6m6vqNdMS&nHil22XgYfD;7b*0_OKhU4#}5xn>)p!uA*#*W9Y0t% zr`8jrl~B)m;s_<=>WQHs%oc@&yn3Rc-!|!q`d|Ejo|q;8)e}!wZADL%Dy1YnF%tf&CpriBs3+u|p|?~Q z@&%VEmC+M(f_2E=3oNci$&H@)B{GN>DoYN$)N%ybl)=g{&8jg-?FmR8sP!iXE%c>I zAvHP(n@pIMJ~7i(3#DB46i@!_IV=QqQI1KpEl91uCYojvJxdd<4~oD?zhvHY)tVt? z2T9EpOas5Pm+=osK`qqdF(|W@Kte4rXj{e)H~zdwy>LE*wl;n^MgWQNL)lHK^+I1I z)U#e#vnx|dj2{jGAy+T#iwCb>m_r@}^uhoU!%7KC=!IoLE`GI}aJmi@pcg)-JXuYR zDd;UD5Sw!$D3xCLTY?VsLK_LRR+8fcL(C)}MlaN*)C;Ff_3MQUv50sEyiXoLO0s1R zYO{t(cx8SXg3*Gi;qTQ8ckQxGdSOrD_g3Z=8~d?BR4?3Hu@$}WjsR~=(hCdVuX!3ubj9%yr*5OzP6ICxtZuG*IDbkqBgUA6yTImn&>^9~a?$eQCv$*4g zrhYwRT44VmwVgEGEY<`xNK6w=+VX1^Sc}Fi+fW!%T|rWF1k;dVNSz2$Xv}TMT(B{Z z0TR{EAU5W{9ZMi!<7!g$@41;hw&VT`+FCo_7?S4a_V15QPTh`|3hv~d+wr|h$nD=R z1|hc{kHUku9WP^L1ln3xZY%Bhi(LrNcD(Y2t+eC!1en~8o8hnRIP>5h+c7_y_LXW4B+e<7 zV;>0C;Tuwi4UH(dX~#z+gJ6{%MBbydE#G6ACY2Y&T&2`xw{o=3b(4ftUNB?FDJj|d z!H^0EN$sShiuw5$NYyE+v>1G6}pUItLgHjPAA=r6zRT$MhjV&Af#LWM`2xN zklGx=tn?v?bn7(TnjrF8A=t!2a-DP==?;{)K+}~Vq?@bh_776a)N~i^;Fh;o(_I!s zzKzxZK9BaKJ3d9a4-2e8f{|o#U0R%;%4I~fJ(yy>U>Y)wh<;zrBHZ1dq=wv`19T{QB^$K0kmrmIZjM+p z`Q9|rDu*%2Af<9`7mwC~*PN+%KaXE{z&F+{+;AG`K{s_z!Fz;F;fFyZ7Z1R_>);R- zk6$>C1HQ>`o%Wr1vQ8;M*lvhHl6<2Q>No8v1j3g@=?iDIaWZrnLXl8->XKC$x}KYV zUjF&{WAewI|Ga0vrdBzAh><(WXYw`wR~=v9fnqx6^|Z>zITC&i{KKtcsFmkWX2{Zn zj*D>j;OBr#629hk!BbVp^CkLR=Y)S;EAerj;En%q&J!HGqT}@YBB0vf!YW4Pm!4qbgd^eO-IZ=3zk zzj)bshNj7-oyGa~Qg^o16AHsvoQUL%{aMZ>%y7;ne0NtWWqASNTOWOQA4^F0JURQy*TgUW5wZH7lEo&A3BY{I<(#My-VIcF0V4w2IqL(x-c zXiqVc>)w$%z-;?lg0nf-hpO7ad9>sy+d+fG?v@=sV(YjkBtHM-@q&2>f=jiSjRD%__)lf2a?e(}!rYlC*M7pNX^?SO?=qjP>a=Ol< z>jJt8=^91XNpuy_l}lFvU5C*%gsyD54yWr^bR9rfCS7~dwF6!K==zSAKD=z?<@@V! zvFX}G*ZXvJ(DepgtLb`?t~cpwrt2lT=Fzo?u1Dy4ny$sH#aOHTEYq5c2fN<*%z+O> z>gaiw@vJ56Dr$0s+zihJ=yM@ut@Z$P-|}FGCSrJxeF%mFW}k$Mi%EAe>n6ZE%S8PQ zJ)u0#`43CTO+6&-jqF;I9d;&N9?(^X85Zng) zR)c5#kTsJ%m}^bg?@ic+p0Kk_*r_J$WKY%Gg<1qUi$Z;8}wuwRq=W_w4)2!kl^h|7VO{W;t$)I-48OPE?W zFVFJwB`-CegqCPT$zh{J+-#ScgvKk5>cKuw{5IQXn6N?(DwMM8b5RqM4AKpGY@oU59Y{ z8eXozg~?vbhq1hz!^@ex6yrk44D+FYmlJsz!po7kFzgWANimzA`||QD2{H1s3w<(p z>CMX)ltttpce@`iFSEFPjQ2v5y_F1E!OJTWvcX=;hh|=ycp-o6CvYJ#$`7;fH+ue+ zmzff>!JaPn&GztX6)^?RV2iL~g1R$U(U+>bi|jgA-BP)y!L8~n{c7W4LK&!L)iR%5 zWnA?B93|v^55%%}<3;u71}UHYFJ!{ro5gLsMzSGtNclmQq}JNuW`SlQoOM#8I1!&J zkGmZ1g%vR5**~Liv^g9a8kHe*Ol}>29LGV>TKhBxgWxo#dXjngBOi`34_8W(8|(w{ zZ09kDlXy9rmuq<0nLwl6@&JZg8t;+n=KCZ&N8QC_-(KqGTPwS*RPd#AZ?FD*X=HbS zFzA#Jzh2$tbhk6XcW`0hYZ2MLRVmOKYAdv5hq6vRQ{VM2Jr|vYm&tSOGfYs`s$BMqexQQuMw@vrO>K;V*Xmv|P?W}a_ z$-Fdm&mrbW#R#`^;I==!QWDG}aCdqfp={5g$H$0i@24Jv=>HG4EX=ToBg11~;|PjtIAxXn?5OE|&HA@q(08 z*0QXzmG->~>LR7r3A%?M1l-I3+$Wp+b>@DRxsNmVG3I`jxu43r8PA=hZmI2~)h!G< zSlxL{bRQ39SC9V=aNE6j`3{YOIr+le*PHtqbAQ*|-!S)=&3y^)@%g1prBU6SzS!%P zsnQnP)h+UMyJCdd)#?_e-eBCU!BTY#JMDJ9$h{`L#P==OzGWm?U+lajqSbIDppib%xcoy#VNaHy|sY!}cG^V8T z8;}2`^ap``O`P4$J!I~E%)R?E#cekC4d%Yq+*h0XN^@Us?ycs&(A=Lk_s4mU&lfSA zt!`-#D_J>zhKFtOQq4;xFOzXW1OYv%buyZ4xS1J5&rGR!R^7$)zm*uU{bY?N68A@q zC-r)jy1R(|wPJ+rzx25Ks=Jf4w`ijk{1F;gXddXH^R{Bd|5bGlBK^g1vns#Q{0R-p z5ArP%zgW`|1@@J?OBwGU>K5hqzPd~3PyQfoJ@eI~ZW$?^qiITopDJmh{OZ@A*kjc# zLU5S6rA7Q&-F2jW7V<@LJyr83ygEtU!k@1$m9!6Nx`lV+j6d<`7=O|~)q}Z7X-d6Zr*0|N@73MO zxcewx%Jn<-mvR~T5x##jPRcI)Jq#FofCA?*!S_6Yuc=#vHJuo=u=(m&%EUeg!|mrT z6lwx$XAJ-}OF`uXy-d(d52(q2CMZa%<3WP1^?>d$pi>FLDC8_p*dGZ#$^@L?3Alg( zdzpZ2Pe7Ofy-mPQo`Ay{@W~~b#>NX0CHW--USmLfzT|MJx<%-pQFl4{^{BdqnI9Hrr*09lrHT>vGwPPIJ!Ea`BjaA8aV776&~g-e_$%^uf%@mte}=lHOc8YtqW_KRmhuf&cMkn8QFk%Z z8e+=*`9)H0$=3n!V+Xjm=Bb2n`l-8y?sRpxG5*({^gdO$l=E+-2Vs3n{hEojG>uiH zF;(3{qfFi7iFruz!iT%zrs6si;k*|qPRcMS-^Ar$fbv6n zC*W@9%EMZFkP;C=%2u~9y1#^PwlnD$TW&8yld*ft!%Dk}?FXKlP-CR{u(l_uiPgr> zw0Gh5OUAEKagxjH)h*!?1bV3@a% zVns4~tGk}zo3*KTlKTTSp7`&pZo$8<7^#(3xJhrJ`W2H#gSw^MYn9WIr+3sX@un+A zr22XFuVCEk)nCedxuMCtj8(VL83{Mj9HD->44K59k!A+L*VH^xQDHGq0QNK;N!>^itYt>yxT9hBgdEFD<>Iwf= zt48Q=P`A)9@*`=?RJ_PWT-{QYQ{X1;>(#G@Xt?pd<|5Dw;pcfS{aTlmtFi)zxp7cNVr2DqI zMd`h&?k>_?tZo@X%u}}vA^rh3yWR^y&puy?iUv8|;~u7N$@CvI@+=a)Qr&sX#0BaW z2A-*IDg2HaS0wfX_)`Lp((q!^7@%%Rb8mH*(7&I$>qvX)xhgUAe@5Mcc~srf|9_xN zE+uA$$NvTRGtEUBE)1Bf?h4YrOtF&w`RbN@olZ9tUkuntY)QxS%RcVzIOb(sM&d%B za&v!$As4@jOExYHIsFCRmv(F-{LEKyxrUdQaAEK&b3fg{uW_O+*YFuD^j`3qk~!!N zeO_w9hrF)O3nY9=$66D9wuCS3aN-X(;W2~1-h|&`p1Vx=w%Yc@a$egQ8jb(i zyF8YWk%s%5_`d}I#kfC@{~P$fg8vfyU&sGt{OJ$(=ob8!F+TCnQx1C>dHnhumCMh~ zy#sf94KJ&4Ni3a_bp)QJG%24KVAr0sj(KMwqJ)F@ALXBwe`X~1ef}9E!`Ar~k=Xm; z%Jn7TczSj3ozmiIQ5+WCBrKYg7m2+awsy%{vjdh|_HMXkZa%6evS>XvBN&02DkuqC zM?K7i4frHF9Gj4t->RQeevTjA+hb+Y=b!i5NH>=&VVTS(_X9_*hLMA?LL(fG6}S6q zy=;o)9Ez(jHr}?s9EmnM$pJA&y z4reYKzcfE%(2tm7NF6#14aW=0Thh|jJ#HT{3LHNa7ca-tgny&R!9wd%es)SO#=Zpy z%fZ2uu@I#&_Gk(Gm611b8HklW=9d{$;VzB$;X==HYOJ5WE|f+bn7pWPJZjq>~<6!Dcz90#x$1D zJlPxqPnGR`Dw})fw%w3XwIUQ(V25{n%dwnk<8(mcV?gba>E4d57I>_}4SY;(6eLQ&{QtZQ6; z=xXp-Y6a{J&C^Xytl>BixP`m749#7H-O8G=^^+dSCYzNl3q|<_I3ncb;vhCXpsl%T z5w;$IHvrv6UaLt$u`-7y}A03_N2Ws+~WA~BIXL!Zqd5+5ebVJ3#M%%;5;G>N|3 z{q<;7Q>UPr6~?;l?GVB>BHw`heBQ`vFtI+CDp_s6k2|_e)?YKZXsXO#$7O?Ab@2(_ zIx0JWYpe!r31i)4y1n)^6l{g0v%&s7nkds-%;t{tpT#}VWX}vAcU0Y$zTo9Y+EOQ; z9!~l`zU(9J(weV* z+vgEr2^(NWhON?uy4P#MlSAO{$@41S(fAPq%Z>4 zPO9cym>8u;Fze=Bd0zKVDc?5rUwx}qOx(e~X81?lH9huVw<5{ZJ=nEq`o13Q?9@Hj z3BZNpP7h}C3qR?vZa-D^<6h{XXf@qU?pXiB4yz9@+lddxPpsZAaY0o3Q`{4S8P%5` z<&TQQy3G`XJD5;yPF>$$>LC63FZ*%|vI_n~p^5Mev*&H6DRbmDcwc%~gIT^x$ujhj0{Fe}l zb6gI3eMv92JiKT_ZxX6lDBVgVMq1dB51>%=4a9)SqcAp`k-V0VP_&-Z;xmL@_^Jen z={{}PuubRp4W!}h;~bx%Z2yG&Y?=+0Yx9^B*a94oHL?OnOLJlb&>hT=v(s59It0m? zJQl{|n!UZ5J!|1Ca19ohW*n>`*^GS?8XyITfOfuXK)5L#6m)0QNL(U?qT}Efj!min zl{&VE*p{N$bkg(ft@$QV)}-p9_=7|h#ikjs549sukcn!JDb6%wiY-3GUrA>HD9G@; zh+~Jykj_xmF__&#wH>}u`l&ks$d6CNxMDKK6&RXSZoxSG=a?UpR|~kI$wanf0)ras6l4OJ%5RW~ z_nA>N2H8N)>>3A26JhkWk^PuiD}DIu6xm5_JvG)ZNiKeFTaWXV3rNQugqdz2R5ez! z+TF>n;Cz2qFc35`38p>^MHk@#>PB^p3JD&mXHW-e_~cAf6}~s4GKAhu{@yInfBa4Q zj|Jc@=NHFxj6(|JYMwREyLY@J&%yDX@ZLE(J@ySgfT~>FJNHExRj?=d1Cl*=e781& zwwP~(JQS@!Xe7QH#&>^2>;Bre1MygdndT17+xV&fuSlkwRG_PJN800O@A5W zw=_6xlC2dR+k%s^D_dY^X}Mv?VO6kH{ti?;L+Sv_4?$=oek52v8K&MYzryP)8L+7E zz;ke4G+Jy1RbTg6o*EDzuvEh15&;%wNSGnrKZ*2x6zEt_q)|O7NOpXNO#^BqClb-0 zA$#P>7(i&^ZdQa!$2`Py=gS;43 z%l?#ZytX|6;mC^p9-haAu1^2x^BwT9s9K$zX3IMV5)tf;fh@GZZhZvzSBcM;?B8cd zy%febz!uD~b++hs#`P;(S23^Tj`Tt*_tjd`wemhJx*)e zTSGg*uRP`CcVvU9=uG%IA~BSr@LOEGqt&5lX?W4l$$1A%=8@Qzd`ua!aXH>Q+)dj# zMy=Jtfb?C~aqx0Cdpb3oKf=aoO~{JG-h(x`q-X#}d+Dt&L26>3%UZq4Q8+2hXg0Eu5stGM-hM0M|o%-SI zV*pj}4tnc$psRaJtnj{%2)GT8FoTf5^q~W;4)o_Ya@WM0>SHVkg6N zfwYs2nUc3sz*_o%!jyK__RwJAId}=jcgLhJljag^Kq`iAH<`&2emy$0m9hb8;jxg3 zAySc{Xcht^S{=-$QwMOOi)PJxx@e>S2d}`Z+`<@4Fx1M?(Nd{%IXtv)V?&RN8`~3W zNGmF)5NACQ2Hv}~3r-fCdXB_S3`M_!N5ncgGZG&KR{6`Ym|?NAFz5MKVXVb|2pZ9R zBJI+9kdXL^9ao_-7ee_QMXgzi3s(Z=wh9_Tz&%-nCS1|$HAHcJY z1Dy9)h1T3mWY2vOICVWIH+Y3riD^V(Y;Gsg(w11ld4ZL_Q-4TF>kfQ%wXhi6D6$@C zCQ~S1R{GyT3r?`Gll2hRnTskazQ}5=U_<0Ptkw!?hiHt0hU|(+Wj9pF7>sX`TuCA+ur$j2nOO^`V|AhE zs~`pin}Jxp%FcsOYl@xF3yd1n4NV+2qgRw{eJ!U0qrrJ+s=3RI76Gz)camQBGa2_4 zMKNq10pB9nC+qw#T0+t7m=9|tTE$Ql0hY-+RB0N{A@L1P*D(2Bn2r`msy(TarGSYl zK|(2Y$?K%!(3LSeP53@CU-@hfwfHzfe#hJkQvg);>V^^+Ej{zUYb?^USw zqo0PVwu6LpRG@Tte-xj~>S8y8@e1`cOXLxe5lkvpkOTHQ_3Co$49Ug}%AYXJ7qI|= zrmhQeS)cwom&!)!tk}ZXCzu^jE!fBBG7DW8*$1>(D7r-?VNy;<0irnX!!X_=aPSKI zCib-P85+$~faM}{P)R5SG?ia;WsDP_LH#Ig-dBRlm|(1+KjpfPI>aNgMvaO7GWrUf zvu`|xnGg*}u*<1kH18Q5QSO8mpHG(AX#$=v6N4|%w(Y~=VIPdk!e&&C-3eakoTIbj z7HqR)Z$J>2uC3LG!mzuJVz;o`uEibw5no$#LUH>?cys#ACcFopOpPxVzK<{rN?#I zRyXc7=>(=jWb> zQ5ZAU4S+PD-~S{7o2kvv{NB0TG!k3{BepBdVU=B16E64U+MPsmF(-K*RNz> zuP#=Jy4csNizXa(?d7_%TvxX1%5hzXxUNC2>qys?>$;A2U3so+sOt*5u0q#U?7Gfy zU87ysd9JGjt~AUoS}QMhUE^KX6|Sq)bxkxbiYK^?BL~G}(WNj$04@ z$Fn$#Eg8)92BE5EZZc>cbpb=*9Z~P-GuYiMVlBvG{REo*gQ!dF&6J0_LPLWU9lFZC zZIH}gb%GJ)ev`gzlrxSstyu=H(Fz1dV#C{kw$bI42hoF(X|a)FJ_UexY*N)cQI-KG4{9piEFw|Lm&~)F7T!DVtG{Bg&fAiJDrN!32?V9%C@I79H{0vkw#90l`Yj$N7j4f6|R$NP-o4 zSTyO?=3xS41yls4<2;7t%lao>0%i^GNEbE_Exa*%5=U26bqD3I!tbbaZSM^nB zMspQL=SP!nV_I5PKL1z*6D;C>c|h0iY=myxS<6i6WBEVen^;;g4DR#v$Tn zG9$JKS>V9dIi_z>Ug(|{9@E#HSA|n{h9WU6jpHeOvO4QX<}N;;Xm&BV*y9kYYk!6Qj?mM$3SD8K zBMJT8R-um==x9Pm3e;P*C1_(2>x}FY@G}p65M^gHWrv3n0T_B)U^MR<#SfD7XBIPQYkG;vK`HSZo3Og{ zZBnn6sqA3763KsYfG2sE;3vjcNVpk!7rsT-v&xu&i zW!Ix*V!(<=8D>oR9x+BsWNeQQswT~3!8v}=gzw_5>5F&BS7C{(`tngIk3# zQYW6jAnrr(hq-7+7;(h&7sSmoI2&QawG)J25EnDJPJ|IhJbyvld4jt=y9;5&5zn7l zx?z@@QkOm0i@q(Q%RYlHn@3(^Whs;~Rgwjhy~KNPsImtUpV(f&%E39fOF3da*cPub5w`Vl**D#_i!w|q}pcX|pm+dj7qPqxEIFFlE^TXir^ygw@%KY|Z!ONsFzU{m85p}5 zU|xtLmkvFgm5cyrzh+7A>3ZC#1P8ys=qwX|qWhl9&cUOj4Xv4VfSQI!Rj6HpdL-#U z=`Ku|PY2GRkYU35M88kE^Uobk5h_6{MhP--8i@?8T)A&swBk-4`1S_)uzz(aN<5*AAU`jZn@uwBlqY})|@+(0nJE7>l5|U7Y z4#80Ldy$OivdaTG+qx3ui8N$ug^9)3`~{WF41=pSIO0>`#v5FX!4aPd*KTmL4379z zxCVo(GdSW?;chm#IR;03D%{xyS8s5{r@|d#aE%5>d@5Ww+6Vd9Y;eSh_TYIXyG97x$La%@Ms2zm~%j}BU12v&NxXQnwROvo=b^2)?m^=kckt^ z^n`B>h6`LCPd%=f1)2H+wPD63JM~!tsR!hN3GaC`x*f*p#}k z)kt9t$Lbjz1ftqgza5BjhKqeS7jsS6n{ zxVgMg5IcmEzI+HpPm&<9w#CJ|hPrPjrOHJam7ny&n$nJ$;I_V}u^!0lotE|tTLO>x z64|kin*Vl2nq9pgO@Xlg;12WOv)~I<%u!R{LDBhr^wWMta<9i!ip=cMqttOiC1~=odQ;s*W zBvXd)%t>pd+O^=s{^8*PUso==0n;`eU`m?SCEhQTq`WXxrJ=mR`n(Au218XAio;wk z%mqJedvC{3)r={3lA&rl#<8ZAurbFRLsc^jRm<>LPJW}HZhcjw;vGX30D&31D7(p! z9$AMZZ2=R)A6o9Yn~9Kex##{!5FNtog^uybUErB)s9LGm4G%iltr@E3u_TztLdvvA zSwU{r8AO(Y$O$5Nq!4N3D53{L)rAH%h^Ul?szQUvb?D@Q_o5ZNyg!U6YZ?#T>e*2B z&7M6Ps*Y6wC0%2v8s#@s9YBCFRGr~9RMm^nilItgJ7EDC4ON^k&5JRN6Q%sB&QSC- z)FtLtxdG$QN@|qGw4O~>%>bZm6)0N-ACKgE`W1MW`g@vx*HpD967!m>c4F>Qn5u66 z`;RbH{Z;J8+hnR5VxZe#s`|NsZiA`nQ)nhO%^pluawZk5OeNrFJJ}4m9WC;v{Wh#E zJ({XUocT{tjPE*4ijB0aH~o{G7>IYExA`W1-3Y2vgN8v?eiC)zR^S-FN#GrYeS|oTV|g zU)1YN7J8bc-4B7-CR5dSI8|etOjY-5#bmNZV9%3^*=AGKg@~NmRFx+oiCJ1RYZ|{$ zbRS9Z$C|1>l$P+En4UOBUZ&)zuzWq#JcC<>aN^o}z{L!%-QbAlFG%k^gQMw*^x6r+ zFNizb;5rdT9P#`GaoY**KWM7DKQm~m`ZIj~9aB{i;&b-3&8Di|O@wVWRegZ7YnZ|| zo2ni*5i&5S=+RU)RY0z(s?>-wPavk*I_sp<*_q%u{FkVgM_iKbU<((CV}2ZiV=z!xyxv!2ca#LV%;MAgY!$pJp0 zl>!EOzww@?Doa%#?Dox~4DB7dJN`XW)ga`G8R$s~_L5?5gQ+TC>S-HHRXj`y6;&sU zd%E5*uGC?iyi4iPRP`#d<5hwsCR!-^h=e4RphGYey#*oUbvdw97voKwt(&S&GqEa6 zEXL+9sAPU^aMcD!d@9_Rav0Kc*);}7d@9^O4ZT?gM|>*WLk3r8aKxv=-C%HY4379z zIH$bz21k4xEYA$uTGU$BlO-&Kv=Wvu(ho&9!Ny{YTE-H_EYcFj@E$B-^AeGQmatk0ax7sL zTepN=-~n4xs<&UEJSr2P-+nUoEB`?_rZW)NwKkQde3F(&(|$juIZEuC(2R2U^0^mG+6R zXue8|z92<>fcL9LCE4Ub1=FmzQ7hvJ`Q!RK|X^uipQTyPd|6 zhY^DB`rPkWI}XJjhl!_4ZST`XD%*}tKJ+tcBB`$qtTtt?Y#{7uAiS_08qR(X={QSp zyXBC%tJ8-(%Ww5$rERERWl04W)Qa^NH?HNxElY1)iHWB?y>T5Sp12T_OK(BG1)ucU z$S#f9ukR>aDQEhvaNCP77x-L6)EHjQ=H(1tPUGbiT*&H^sA-i$k9QDE$K>Xz`;9{<uTCJXwgl{j3JCkG3D9M+eHn z!hV1|QE#zqP^D@1y{I>)H_a1elDetYgaP&+DK(dd;choIhTn@MWA}>qT zSMv7^+$_}->L>a9yZEiN?^Zv_%WcLlCims`RE8nHe=yJ2nEPdNUuj>!aGWN=a-3=W ziVS8L!@%PcaJO@LsYQwGTX~7`QiY2vPzSM4yArhvFB!b_=EZIAJNGoPT}dbpU# z8v3j@4=efbX9iomRO3RvRmgp{T~5yl=HYS^coFVQ`7T4FmNafP4>$9H^24MkKlViP za0MSOm51f_zCvrQJqpjHb(;9M+QZGgdwccYjJv&29$M{BjOWHQ2DaL78~+vN{-U`r z#@*hV%w9&Msh~&b`yem(@p3mWvv{fHne!BrL2czm7)`(M6RNDyDXX88w{ zWfvF5^We9Nqd|9{#)92ZELk}B2|WMI_ddCcxo(c{Qt&dUwJeLvffsWD@Zz%F@R><| zcbjZRcz)Ll&)X%ae%S+z-=FM{I=4?1p~Cqoxa)iEMvN8GJe#uqRDR-3T@pWY3_n*% zg|D>RSo<&G;^F5{O%DsylhPYx(i?BmD`9%){%6w5O_5%%Nw2H7%1bBi_BvcV<@2ys z-Xs6e^8(JlJLY}A#?+5|W8d*W;+b!lJMM6wDUUtvk30U6zA5*|O|$Urk@G$Jf8YMN z%sKRx?2r5AcWZw1{t}5{1f)a-4$BO-UXLL?Kt~W^{~Hi&cmts*Bt*J;U6W# z;oppJe?0#NLuj1OyMF@zR)GEXE4XYk|LliS^RMET{~7*4(>eS*^P3;fzt6$*e}jL2 z2K#M`4finT<48yTWV4B{WQ%65_T`=v-|;KB9QH8r^tw5+MH+Y5&`;d&fjI|;&_?X@ zRTx|6ZjNSIX^0>jHRF3X0PclBi<_ebzjfmV6Rq;Co4R&J$D%IIkJPoB>*BX=>c?;0 z_^o@tvi(-t04B?2gP8;RC!l_Ip4!@++FyZ}BZ81Mof^UvD2BiLEp? zyU2jeZqNAz=3GUwe?{V4MuIm0iM_L4L`Wole0F$1SK*MZ&`mVIkQP#xZy8(?+;NHV zGa|9)BLluI8uD%ErZzMj7$*W`QY=o+mjy3nlo9_R(#X zJ9^>4-t%jytpmX|ezpAmoWwRr{_6H@g0vl?Eu<#0m#fAsDC0ld-0zb6T6?6yjgb3VyVBq%;cg#oJg>q7NAAoo6C1<8J>+4t zeY(WJ_oukK`%lUE=?C<0*6h^!H+RMV>EEE>|DXQ#>fc*8N|XOT{fkzq`uAmUe}ewKGLfeq^zU7ppu!#i*QkHT%>GgO_cU}0Y8_AL z-x1^}^lyROm)qY`M4^9wBhQ=dfyRG-bKg_$uzwreOLAXp_cr)%zd-)x8PCt~K>f@7 zQvbfsz&qt(v;7(#nDPs_d-U&dN7dhr<-^-_-oa;gVdk*S=N+_zxk=|83?x5K)-0f( z<9Q2%wmJQ)epLD;snUNK+^}1a3AMj4{rbOdbNT=BqsqS_Rr#6z$)5CeB&Pg#M;#nP zVFIzkWL?;ePu;`++4CNrze`?B&&Mdwzkg~Ie7FPk^U9xX+Amx70j)#C9`^zLZ$Izh zj7>ILLaY7m=V13tRD`{PuDjRavJ5Vox$_?SciQH>hkIC2XRpWQcDlyexZDC4 zyOEpdbEA1E!(HsF7|*-yXDjt}=pX(I>uc>D+gM*&;I~YW&+k~W_J41EjsMK4ug_Rj z53xO~877+Y;}u`%`><}IGS7z^mstQp2aeJo#+U~3kafMSc?43G$0dvE`62A)P^ zC(XU@grz6kDu>d+CsXZM(Kb+P6xOUD8v2)7qo>??q ziYwc7aZfMxJH&Mja$QH#1siE_>&lC;Vjm-_`%Afa6^D1pH`dsPNVHXA*(yG3++S8k zPszaMyJuyj0(>uPc_;6fUyaYnK%l4nEZPJ&Wk(X)QGi(Kg$;i{a5nRGcHz8F3HAj+ z#r3d3Mwt@qOM-p3MsT3}TVMYR*yW;`Wbhm$4^k@^%tXdv_s^=YeUNJ0ThC zwR19IvqYba{p3ybXFMivyWjciK=EzgSO0e~2FhkK8L`IV#qZsnEXSdBf${~a;35?n zoEbyp2_KsavBAsTw?Vd9*yGuA;7~w{tgG{ogQTs__E7R+kk3m+zL$hg z34?q%D3za}W+T>=V$emHWNTcdezOpYu8_dV#Yq7RBw%>SCYtIb~3qsUGlF7#b5$sH%f2id&Vu5vtl_JK(KJ8FujlNCT&%Mq*2X5k8i@-+=wj;sr_UxSxU8XgG{jv7YjT zqBT+!$sM-o%kzYxC~OU(XqiG-b&>e>gZvP-j!<-rLMX5}8pf)wfrF-+Fhw-7`TE=z z-Ki^V9gnvvej$03abv3rV~2YvhoW~0$vvg`a!{Mo=1PoAMq*3X9mMf>P%(T>=EgN< zJGmQsS$MY_cBICRZ$Ulr2;sugo$=DipSM{O_J@=#oyxFSKEgyrxO(X-Nj^V}Pmxe? zgp!NSM?Ws=0H7lA;n-O3__koGzaWLm-{O7Vej-xf%|Tw?RP7_)tL=aN3qu3!Un??K zWL;HCl%pz?Uxikk#~P<$y|5qkcvHPWdH58o2u+?LeL~R9^IKf3Z4DnE890W~S@qs* zMVD%LDcZz%inXUPxHL)?Qw~h2!LSwh=;RU8I=r0W*j@myVfG9+cB0JqO$iTK6}o9E z)+lp2`N4ZI3C55kY;u5mX}H%a+{#Ur$c|JPEBNd58^+bfTNvv=9gG)+%!7&WK2+aM zCe-o%Qcu^FLZ7l8u}d^y&IWWiZee6ys#ssL1o6q>(xf``zB4>zOQ>owN>qadM-eJ; zynK~$f;lRwIE2lVxYyLs+!yd6`l6~AhM6&o92tx4SKr3EXPhpRpWB9D9(6Kfan);~ z==1P~`ipRpd4T4qAEa5En&u7g-W0kz6s~NQ0~SO5!ksKZ^tY%37?)Q`oq6>5a;_+6 z4LHqokc=&(s_-4Dgyh{^0Uj9P!&9|BDLv0SxwzDlw4F%&9zl6~mU0B+i)hk{47`W8 zpdeo;hzA)HyGIbgU$F6ih;rg!s2T(TMKFKC2Dy}|)@elX=gMR|CDM_}K3L{OnJjvT zGASwICOM~6N^Ee)&ycQQw#OnVBb^XAvLzB1S`f8n5!4hydITaB#g&&Tr-iWsK|8`{ zjciXPr|B$Md_e^Y1yOA`=m=i_S1o{_u@A;%-Jf&9kyX4e4wir%@|C)~*W5oTva;74 zw)ccka*2Yo*WBW~s(*%}>F_bVv`cE#wvc+OcaRi!TZeS+4(ZIoC_gLIuecr0V>?6r zMt3sz(J-n7XE(6S?rMN+RFG36Dy2o%W3yQFLn~BRrH^)1D2267I3r@{Q&Cx0735|<@!;WCAnXL%{(rJk3|d3k`B3vl81qMjt2k*?-pon>D{ zQdfZ#_WnL$;y(6BK93>k2wi8;^;^15q3bBRD&Abvy&#LFgW-M?G^IS)Z@mFmrtyB4 z-h8kZ!yBufDUJ0St!lpx4|41!-sAIGZhMM`Q<2&e)ZIn* zgN%G3JTU#U|Dq8n%J!}57SKr&zSce#cl#(_N)gH}*mLl(uo=>BKZe}OlC`0Z(SPqEV?FuUG}TU84_fT**DR5KElbfGCq{z!V=FRC2TuJ)L+#trLQsI zFmO)z@g8@n2Yaz#H`sd_nrC>z!XEeW26v?T31@A{0W;N(JJOLFegkfMxdd#mTRj2a zYLTRlH?S5y!G*E!6VTmcIA7h)j^fqb0^RN(8~(TU=1BmiL1WuAHs;g~IgWkH^i!l9 ze)Nj73CNc!O;-3)E-7xXOPTyYCW~sgh=ADh>(Y6-8VNCniRPR_d6ta^su3t0u7qKy zJKiZP#LTK(53?^D=P+b{h!5N%A2Z*xQU7cYNca|TQchdPH`81X*0t_sC)3NdvY3Yb zJl)9JUTAC`H%se!e%){QJ-|w~uBY18bg!%R(LwH3`ODBTNqSPzQbw_q9Ss;Wg*@kY z3K5Dnf-FkbUkehd;+fNS5t8Z17bEdcsb+JEF7p=5#rL~HcR$Xi| zFwG(cZLaF#$p$dKZfdRI$p&-la@Hnv%uy_5Y|aP^D-2OwZImg;OF8f zAn^uZ?AcQ4@uX`N;H1o(Q-S+y>Vh(`f-|iu^|1aKP$)u5@ZOL zq#!tkJ9EY7Ne~Er@{$y%Kaej+cdL+g(k$4$3 zqkTMcTe&m`DNGz0od1<8rKA+jg+D*5IxfAeye}HdrJ_(bkWP0S^EwZM&no0Lc@mM0 zN^%N?O*0P#TQXCyWml~v2GJxw;4{1SMPX|jc@QXYyOd{v1etnj7nuJ9rzZq9Pl8bL zlZ4=8*n<7Fo{OYK%B%&_g~Vts_CqoycM|N#`LYxVexM0T5E6Xt1t~$DApHcN)DlRL zDM5V-f@`Hd=1C9;RtUk#ac2#}yY1 z`Kavjj#rSHyq|@9y>=$$3oE`=z_w2^L|#Kvcbov^^+4_wm?UCP3uGb!kk3`iC}T?? zV;@aCEjX(@Opsh+?`KIr_^XWdyN)t;k#^doLmB(1Y@Xyd_}Y!{%A{C06#F3Xqa;Yz zywWJakgagQ$M*5en>9Hr2&?A{n>hTn5uH3Js79s<;JQI9;O15#f;Uch(HAJkIlw9Z zrB8I{L&s`HGm`m#`WZjK|K6}h{{a6F7l`5iXlAb`{{M9+5=-X)Z1^kxZ=3id_#Z{J zf&Urod&qxChQD4HOMPG0-|+vr77o8G8CDFjIO+&RBU1G;cEIi`94Nqw&!HvGRpSk^ zilIgY`+jj^S67%BxUcIKu!eG9*QrZ5n(<7AgU2>Yo}BM4gBx~S1?Kguk4BvBNb&rD$4C5CjN8@7(D2wU6Z>~kYmu7>1k^03m55cS*N9fmib6mu@kV~%?=y35n@zyJ@9)1iAJ}uw%rno-JoC&m z&ph+Y45wY(SdyF%6OYa+W-cLKTt8NbN8H>Lv7eiU5=HEL8~HHj6(JmfN=J!6T+Hsx z5N^ye=aDqnqtjhNu;6OW9w6XDu_X8gzB+4AuAkV9f`P?aLIlIj=cx;01(^A#W#x)Q zOi$WXvwwOLdx#2?ja?E;RhnVW#UGJgwiFMy(gKOQ5&@$Qio$t`!s>~p^E;Dwrf>lb zs#b4MOs|-Egv4SHVK8@0iL6!OPiURDrmCb0>|%T(U;4$L~T;*s@g5Go%LdB?`kv z{c_5_)Bqs$XkW+j+XdI1D;jSz2`Xydl#xQ)uq<7qvK1(uEe!31z{aZ==}JmsJhc~ly)YXBaTHDM8&P8gvj1X zcr78UX9S{bvyprk42<3p0fDgZF^)CXOCt8Zed6@x!|jiW72P9dkL<=2i{8mJx*%Bsc~g@(9SXcZn9{ti&gDH!GIzspe4Ng~}u|5Ji4 zQYPZcNWFsXR&nz`F-rs#_S02?5#si+dWb9~eI#kz{5(2olgMmjEaVNdR5!_3MQIM0 zBVb`&`a{qF<&!9o@!O@efi;I}`*)HF=Zg0iPOc`B+%wjs;8N>8I!h6)Sxon`%2J|` zWdd13zrd2ktSyht?hV%qz%66h{HH4~ zoV|pqTe<{!jEZIJx>pLOGqNeb9h*vXTl+_sKQVF=O=}|yy8z|PuFS!bDbzLCdXk2W zz-N_Ym+TwK@Rkyit`kZI;l)u27QJX0E~xt}xHW3eqok7a{iC)$0Pp$^18jN4cmoC{ zI}SIea9B&Ld%0R#TGCTEp5X;=9M9An$20ZD@r-XvIn5q!-bIzcc@|~J42E&rw{KB; z2W0)XXcbSGuQpQuz_qQJBfqB-lSC8<)qa<{5gJ)$4hl=Z4(wxPkBV~H`9*|fdZ5>X zjtlWz4+4&KUBMR)iFO(U@Fzbc@DkD&H6JHF4^3RaOZ6mAmyr2 zfRgWGggf*3^E`i^A{cVMG5|7^?;ywkd8VCO4O6Y3W99j=GZIfycm$r#@h{@%G95CQ zA?M?VsH6Po$DcFs!^)h@kF8{2L?6kI!}wDq(YU`}$8=|4SNssHlSFQCegHk$HeZvj z2;V5*8=Oz^bUxA_@ABg<{=9)7g1y3zW%^?QKmLv%mi`HTj9#QZX7l4-{4mWl{j6nR zfv=+F0rf`i8(q&%o?ht&Pfee>`;hTWme%NI_H(e%3nXQ z-yj=bDe(PtX-<5fzbg2g2|UzJ7nsYnt1>SwKpn3Vk^Ua{~adb3d<-u|#`CQaaE8QlWXT1R5ZMQh4%Ow~9Ks>3Xmd z=;;i{Q>}OZm*uG(rpt3G2{I`y=lTX1;;>+O7JlAdc|HQwS)OTA#h2SK-;s;z)LP6` zs-l5K7>v(=t4OCfT+&Y-uN-H$Y{}n>SD83sAqB58b5yS~byTk$qv`q%0M}a{udcg1 zUW!e7p=2+QEWAdL+J{NVO z9>q+N14Bi%B)!lQ$A%H4gdn||VfYXnA36ypIrW9`>XnatDvm z%I&Q^Lrg$uC^z|COyDOk$5+hU(q8h*kUVZ3u2aW)t&f=xF$s=-ad9mC#~bP|y1cbh z%y_mXl3HU-{QzPwX5S3mdp27hmP)AFU*pIVrFV@gb}-w+Bp#}j_M*~fqb1kyR;XF9 z?IW^S9@GOki?4N`2t9s z#S6&Kv2{Wq%S*Q^(9JM>)b`J(Jyj--iOu;&YB1^v8(G=~ZnptL zdB~X!rcLsAEor72qtVrCHl%b6~}$#ci!(a&f#DYe<56&m(4=igKJF*Q)D1YcGt1)f!*0R2-tn?+qp!^zK#KLOMujj`}uJ|832s9R*deBCpgFE?PRX`7K$CZ@1e;zYrAdN!fOUshH$ABI9= zB!`Kt+t%8+@p(0lN0xcA3$tA%zNotzQ_quee1*+wA%*%kHV z7TQ*L-BMKuw%ukmtf{{2k9%1I`O`wuvp6i02-$WZn*B8YM6EqTypiyuV7MSg;FvhJ;hX_U#2wEyr50AQRc#Nc-!q1r;6rBUov zI}p9EMGT_YSGeBWSJ;TLvEE1>sxTcOnD!D#m^E|lK-`WtsvUR$*N*itnkHMqmi9Mk z#N=m?4cu2`_eFN8LJ}^>nLL9mViz2KGKNS|(*YK2vixlO)>0&h+aM+v#Xzu-Xn&m8 z%=e;4^#iekGXGvg#Lxlh;|-8tR7y%Pu6rci7Xt&+e;zbAe|D^%^M{E4fp~Q16P&)@8h?AH;=p4?M3_b#HoXfYeyC7y8W47|= zD;@GGz-Y|)iQo(Q^R*6nk{=C(eTZ)#@Mji(*5HT8oRdpMwwAEz zowM*xm41wAP8@xr{R>=T2$~%HAM@u9|&i9)LlE zE}P94!#<+SCGC#lFoOV710>8*q#|U<7gCS3vz|}gYVWdwcReWq6s`{G<3=uGZBpFP8-H4UqVcok$?;R0&(_oPa0s z9UX`(QtyI$AAw8Loix31D7dIQxe8sdK}}hlo9D1bvr8CksSaB9@~HE}ireTvL@bEa zpk6xqYE)6yO1f`saE=iy^T@Jn-)9~LY3>6vJ4tEGmpAU=+|JL@Y~U)C$r*=F5~+u@ z?#mk?(lGyLbOoa?(b3|Y;sz1pB2g(JNEYwYF|CrIGnjU?V2d?@bJo-zjriKqmi4M#n;fV zr(aL-UrYVhQ~lR+|Fxh08u4Fa{_6n$^+Nx3sQ-G2|61k0Ug^J%@L#X>Uq||{qx{#g zd_^@tzVZHRwf|a+S2}~DA&0WLf@n&Pf_^J1U?1V4!&O~d}jjRnH}M4Dw6W` z1wIAW4!*g7Lp}QfU*C@KO;PwFz$b7)%ChXhfla7w_xzSH&cxPe`{n_F8Q^@1Yk%-D z0v{u=nQv5q^ZYxQXxNFmJdV_l@)0X*GLr8>sfgK$Is6Vu66}!yQYt;xpBc;LC4Kpm zk(!FAxIL3Aoe^tpoh1G^X0G8F9l;T_>Y!Je+R*q$Y6!v2Pp~D3DWbsSUN?sWHzNXa zG;3fj6;zhoRu#qRxXD$hpf3?+l9y}$C$`m5DfRY$>dkfT_RsGQlI@@6rSE!M?YPr!J%T}l z+}l5QfHIXE<@E^8-u=mnq?8+fQ>b(AQsgxf=Zj~=vgRr;N+XH~CCDd4!nA|%tWglcwDw#& zLxJJ>i1DCh8royFRnPCN(~et(ys&>rYy=~XcUkWTh{9O2lBp~lF2FLp4&lbKvV`$o zFMB1t1o_Lk%QFccK6!Zz@mN!cAku3^tn58w@;c$~rS%Mgd{ccUr0jkxi986Mt_fb| z@INs4J)>N}zQCSpn<;d9O&Zme^1P~DjieDP zp~{Lgk2fP(iWuDOr1p#=jn7T0ZUMu2)sj@2BMN^mKMfU_URxt&%{Wb$v#r_W8}v8w zjq~RR$uug+k|0PVR4$LXes*8aVmr zXt;DBLb| zaIkwwm6Z_SH+hg`foAJX2y===*>h>6L;id?8~)8^o69W%2;xJHfX}|k5pend1VAA0 z5i477B-bi|FqGd}0^uylqY))wY!n6bt$z!CCFnWx!4ODnI3SuPaJK{FG>l$fdn0BK zpc0`ll_|nPVKa$mshtqLzKfOp%Sa4_xj$xElCAA+HjCr+&%jtE1q*8 z^Nj@sQiiEg342DKfXVR36y7MZQm_dd`@`R4B!w_gC2@y8V(^E*CYi|lhl{ww-+7W$ znghchp>&!Khrb7A?FfE-jQH+E7NWS*-zE`@7 zm0HvYY;e%5tL(FCSqVoGJrPB>g3CK`u{?@ut~nDZWg~<$8f1^)xc{u4TFvspqZp`B zraEl?ngg^HoT1(tmuiiJoAafJ&$9P+KDCoEHN7`0KCT;#o1iz!rh#%_?|BcQ&2{O% z-2^x&7@qDyLYoUUnzvO0O&tPc)S1tykjC*iq3qjgc@hZXAgncT27|gu`5cT~Dxv(e6b+EH=}e8Cq;6AV*p~BNa=Cw@9^QFSg25 znUR{WX~V|k8?fpQ!$w^W!^W+oioK*7QJCHc1lG||3Ag(wo>E_Kk1$e~kXEn{SX1jz zm=I%}N{ps080~pgKO%34FcD~^IMLjy@?aPprOEdVp_J9Ku!3HkRB_R6tt)YzuFn` zCbFL>BSN&3*po8vZ{`XF_0*_6bto7g9$rd|3z)PjW%s8biq*Lt)F2EQ!m_;Y7*{& zp~STLsv8f3O}&m#A--GIWg>2-FJyAucHyUqENnu4_{04~n89!-r9Xqwb~?AIp1QN$ z84Qdsn87RqdioQn>kp0j&r=vg>~ng82XJB=hmK0UoLf=4(w)N0SYAO&Pes<4uy85^ zGhIb;CC)8Eklu#qAvmV@gsUL_ydeVMGX_(Ky95nY~c@y2~E;C(qc}S#UGg1%4R+?NW^>-3_{uG9gOxS z`9o%~ia&-xz+=W+Zp>&9Gwz#4svm+hfB4AG+Zkm0w?(!qf^zW34d9PIw$C3=gR8(F zG6!pqKYoSr6jR!{-dLXr!Ad+Zwj4+!g|K7CiZ#)-yxme-txasCv1;sL#N*%40Jitr_&9j(@mh$7)7Tj zO7cIa(pL{TGsa?_L&$}MRBT>QALjU z3>wCk8>yGzzLgX2!-xQpej__HWf;E^zSi#<$*EGozLz0RimcrYVGrh5};AxyW*B5>}CAWd4iH23r zz``%D;9&y`oio8btSt1r%r#hd4eeXIf#S^5_g(~vxu0$SObNYW0{hwSlW%Pl0ONpj z#O~>ypJ{#%oi4GWQ|W8t_eic9wVU%$QtmhS2xb9-M==mpcEs*nff*AiVdr z3SACkzRc9kk6SOmDFwHC^0#Ny*n^(s z&L@eE?q_Ji{EE&katcJ)@L$g;lzae6u8aqmFF)+Tqs(FWgbQDo*(jy#hxE$$(s4QW zMmhI%%1};EUupM7r2oK$t)23|!>H`?F7)8rl=og2zA$6sBxG%0$9}!rvAMCArZc>9 z%K3ebKoGxf7N-}2-Bx)66CY;|SlpSL!q0>n{M^c)7x;4nf1cpaF#go>=VJcMz1t4U%bL zlXl+0Sfzp&2ZNU~xCo8pJi=hU@iL8B!H@ernCf6~!VPAE;fQe_Vu>$dNrvDDGela; z91EfD`$W7is{*$;hcQ%nvQ<(1E5D`8rSIciO)Z_hi76gPo$pqS+djXU`Qf$I3erOcZxY0W5S+3rMCW*V9hc~V1l9@GhE>gSsvX$u@N~`w1oyR06rk147!7m-pSrqN zNZbbJV*MP5r?aOdOgm@sqk=!D;>S(W8}BTggAT;%HsR^U{WI{sGVuOV;QcwgI~ze& zCVm(nME2(Zz-k?r#?wW-nD3|y`JFU>5>Mw*1t1N65fK2<-71V@x^tFJRd}TYyvW&I1320LN&6!}w98KMMKLLw|I|li;1? zdz*vclezAVu43>9`bQRv2$K`>rfqX}LOT#DR zxz-tnr*o1-rk(30FzpOy;K3Tp@9^PbjtAZ=_zudH5%>iBFw3JQ5cl86_iE=r9lno* z>*F+!g191ohm7b?p|eM=+vaczyT{TJ3W3eHF=u6dhZQuSjUbe>y5SY+q?kloWax3{ zgZ>)X@N_kmctp2Z));+G#4Z@NXGg}$(Z{1eFjTg}o9gA1gZ4+qW|co@ZFC=Lio@A-~HsWF6UW~|L=a>h18MN zt=P`>;(qd{CkO!vp1t<>v+pObxV2qezv5)!e{?^2 z63G9H`*GubncBG@ciec0tSO)xJ@!vO`e)sbd)!12J5?VG-cNoMDT;yMP-$BRz3Ah3~v(aDdt&F4HBupz9ElVz;@s z&#PEA0D}=+D(dlo&n?!!3+SJYS6ojo+u@)uIzXTXn_Ty&(MMdJ%&XqIV^C6W;*Jj2 zYG>ZXJj4#!+B_V3^Czb(}3w3ys(RKm5FQ{^yaWm zAc2vZMKG%ppg7hP7Z|X&s3mDn(B2@JZYhuRU;Of5dsVGEbC>V~bWDwr_>%y$j1>55!ex{ZIwm)LDlng$53{ z!ob4NXUsky=kaqz9qV0~UFLE@=Z3?E-9blWL>-}DHVL^?p9fp<=HIdvI6N!sU?Q2< zT4RvO+DM`y-^m+|K8_t+)w1QmzPXh7qIF3J-H=f`Sv-<_tc@Hodt3>ju&GKx3S_*vbzKR|Ru7nUgh8erKU`Z}A5HuOewd0r z6}4l=q?XrtJiy=JT znFUbc2-GI5Ivl}#?SZ}7`1Nf3rd*BRg;3J?y*p}r?s(LV-;}fQS4olEZ~WIK-f#R) zH$$YAi5xOgJtR_OioUEu1P02hPnf}iw8$@WU1&|ZTwR8@3pgZG4QQT!IFHTMXAQS? zo$)7%&9JegKeICXmVmr&yLvLp$sFt5`KY&r`5u95J{*7BvrV> zPZFHs^Cf*gRD-J-Y4$B7U)NEF1ejWjF!vU)U*(+pau;j=k&J2n?On-RsckT$4u0Hc_QsQF9I&!(H|IcvIr|g9 zTEbDvd0j=4*PK@5>k?}&`5%clsxYbhUkGcaGsl1ZZ;ZCDX0F0>+Hl{vs?RUt{HkIQ zCRfw2W~>HxF{xPvj~)a!C)2LwinF<7EFKnEWL`C1V&>B$@krt7tv|qJE`oEwt9HlO z_GL6(DR-m7L9HCJ0DfOMPX&`tyI>m1kI(Mzq*EOLlPu=$Gj#$?vY4(WYA0H3fWO466_z8h`*@_0F#Ca;i z^*eWzzBBjz!od3_fp_jK;|{RI_q!T*ZU9@w?RoWJ`bCfec`sJt2fH65_K6?h zzzrvT6Wel|^A|i{#JV4-d*Z`!?(a)S%?2mJU^r7V_zeUT^aO-O?8Vh&i=n~-DK;0S zpRpHqk(R^BgS@;;tW4U?NLBKF0sAGPIGuZxZ&Xwp1PIM{*)oTB?cbAFUgbLXB#T)m z!l6SmFUVA~O-P`!noX$YixI-vC;rr56?YIi1&h7pM!$y3QFUFz;S7c^4U-(HYj^>| zV)ooR)DS6Yy?1T^mWA52OV&jYU(bGQCz?^9xXBH7BUm)Z$taG#t?(Ty8^o}YkFpDQ z2LgEU#u#84!Yhh7myz&} zkzz_YB8l&y3gj`V*(kfpaw%N>>BdHd>^p4x+ncCGH55X)3Sksn!Qo%fW}k;z#2oGE zD@kHY${R^bNreltw7`wbjfy}*;F)hbir`Ctgf4(gSutt@RoUavJw)cKg5}w&U_@vk z+5nDeR_&k;@up(9;g4tw7_g)fB$zAIn0rVofDRSzfx?Q`E#8>wNWMZ0-Z3S|!a23q zQS7ED1-C|wqZ@Jb(Lx!&^r>;-<)b=5Au>~=jhHhDZ=`PXvB%1AJZQGM{TGQ(km2An zobU8RJ&-Xun*8rGhg=K>+Ew@mSfVR%y!bhNz#iLX{C`&*+N6tk{AUg5Y@~R#2KoZ$ zgy;*B6Z^u;5HoH2!mO*MFI1DKcdjoG)TkMQg*x2VjPwOoFS{>1LVrDbx~NlDg5^bDU}19gg&PPgeL>(m=nLh5RDFTCa`uHn zBI}?p{9fYQ>kIE+_22e|d(pP&qa=pk7se}=4E6;MYv59fyzJG`0fHi{FA&~H9phuq z*%uNLpCH3YU+95)wA&ZDgLdJ*P>yszuP?Y0Bu^GhQSQ9qVPOEqg<*(5Ul<@3&iO1P7KfuKgs&2kuhqKWC+>kEv8u|OPor5vg+v|z=(qrOncitV5; zYz9*43#?vtU$~Gt+S4O!8|-yNDbN>Km|T6KCxN9e2z&>9p&0~JeSx@g_Jx(=)8@?* z+l&j9LW}nL!s;vk+rAKye1*n-Ul^%aGT0YZw%-@>1VvU~AiRXtysEfp($3aC7y~>j+qH4^Ikrk>X_!3WY}M6X>e2s7#BQ->D>QOs491zr~84EWV3&mR?sVg-75{gJ)v6=@U(NdLfd3x_ zjF?5Lvzh+`)DdQpT5vv#3C?F3a&i6>gcQ?Atz_4ZIy3=bitULgC)*DurVfSxN9x}m z+pqimf6Mmo(izhlSMBrsc*S}_o^NcQ=kJpASv*g8Bc*NzA6HG`e{i-UbdJ=g?=<1| z15o^SxxIHAZWkRI9N)42Z;vg?X7jaRH{_Eg?~9DoeBr}!P_)O@3!DE#1T5K{su^Yg zOyfLMC2U@fMg*HP2r~czv_As?s8M6WXw4Y_)3wLuj5Jb*NeRc*D>m=5OUG>f9}<5D zZ2ksdgw5--+5CCb5p2Gaia#=^;*UozHlIjHVRKA$+SmFA0!(ZD#FtC!KZ=+-VDo#V z{+i8gMSC@!qt?IWGP1b}R?WDxTK^!)cU--&xwPdB#dAS6U#Hld(b;NFcS)bc=7cv= zUx};OxcZ!IezDZ&W^DaI@qdHjgZ;(=zp`PfDLiKLnbKyoe$Q$lJ`6C)D(ptc6rlNG zujF?>xW;9^LbU#lVfRz!D?h#rMz+icn5*mf?r4C_D==gapGHIHo0wl?7FAA;g%7bj zW(4O{@MlAy(|Z^RtN_%NN#JwB!8%C*f5xEB_)bj+UECelm>1R~TO)M~0%=s$6Uv}Z z{J(Th;OK9vX;5fls%C3QEi+xpZVv|p@`%9&*6t&VLDLXF;d+;I@D*=Ap;hLWNcD&5cNlE zNg+82Iw+7FbolY|9noPtGSSLcrUyFK&*z6i^4!m%Lq4+f`Jr#Mk=lUHsX9d-g=3Hz zCMY5lG2FzEh~c;m8>q+})&Geyu|qcC-Sotn*#}o^h2#Odv~T26(pH;>m0hGr^a^DK zvm^%cfN+op?0o@wKwR)aMJ)sMt~XMqSMT!ddJkum8WvG+95DeQn%(SGLKIc+;CSE^ zxb>FdL5~U{F~AGGh%?iPD+zzQ1j5%oZBr?%vA?^RG~4E!&oFBV1-_FKd~2OcFI5Zw zmEHV>|HwjjSs`jBi>qKH#p!*@%}QQq%9a<}sO5#mXL%`iUk2bMiWOYcxdH1B_kYpa zY6>Zr(WdSUM^Q9swfn$$2c_DK(l~qIhim+Qm13uz`*rcxAe_~>9pBEY`eO-GjMWg& z@#9JUG_b0`ahb+3SVK(J$<88(hytt@g;wD*Es&7am>;Xs}W8T)DXuC z{qZ7GjMflOOUksgzf&(IUMuv0VcMA{DO#Oc4OFea$KvTs*TDVpB=8jsoFIX#oxuXR z&Dm3DbCyoI{dX+TYUd;!ze&EgIfv``k97QgI{r-^znhLn|{Y{Jv&A|Gp= z<*en8lH*$E1$pBB4-NT>JXbp}$#a`CN@IRX$6u*)en7`xsN?U@@%`|P^H4L-{EJIV zuVj|t$^9a}zsM5G=V;(OL+B&$7a6!Kmz3ryGr|&@0F>KEclJNKU_35zi`o<581h3j zy*s@}qUPE37f!CGk;I345cB4$8Atd$OL_XFXuozWjxn6E_bTZ$lbzQa*A&OQfyftHm6VvPBJ&qEdP zr8YF+==bqmxM?o+8nE{>o-K;oqnqO97O*xo+zR``;xn{GJC6#HT?*PQ-`*1|b4^H* zw|vtt?OlJ9XgZ@;^U`vB_gWFTVvVzP@Mo>5`q9H790~89HwRNMK%!Q>Nlea`v>etq zqgVX1Lhmne9vezF#0fXjo};f&t4;|iHFka@J=V6v>q322ZG4veQt=W=l*wo7f|28c zz_TUbE$2}DEUQMG^WBBOE@G}F^B#tNr!%7HZPmCNh3@WL(lz#y+@jXmV<9g$n$9Dr z4ZHdWC|Y2}Q=hjPHCrnN$L!J5+`~5SWf`r4`T63^G!~^%TzNc|BDgD^hNUTc!mhj@ zLisdlKc=Sv$wh>yeOWpEnZS}2X<1Nlc@m$(lwd$(e(vKc`fQ^fB78B}4> zk<7|k;zc*5ivf0W0~n}z6C2**=5}OKo8qO(QM;xZYYtH!ww@C69GTv%tadLQ1d&oq zp6=TOish-|ss$uLfRbVqAIm~mqifLs>As%<9OtK^$SxAh30Fj{%Ie7S^+jH%ybEct z;?C?Nr`@m%x=jtU;EX?R@6!-9=Q9clJ~xnh+ja&@s$H?&k$c0sn%_qHG|T(vyHEPO zdhpS+y>DaE=NL;jyWgAk=3D*><8GXIIw!FVJ`;tu_RGhWX7k8EMrsWLqG_QHYQjSh$Y$Yw01{!N?MT2o=dbw! zZX${TmIP(CgKA3ujM$gw<0J+l4|vS{kfisB_XKmYr0rbdy$-gB+WX)z*s=Lhb4}Eg zwBf^r#v)nV+6OyDc^+U_9LU%Qd-n0(wEJNsp7&`SM%uZb&fa`8o_)Ild*~7`p-$$y zK5MqEtJwCRin29E;t7Oc-%rFGU5h&s&9^t+;JX~SMZ%smU8GQZ&SI&pU9oCN(bv^n z6Y0|^@9TG;v|hER{x&A9SM903zd(C7MvZ4*DO-2*p)@;8Y9$lhawmNvwl$KnLY~Yx zHVyW#gFSHoSKM?^VQW8BzbI1yYWp+}5%^*$mtc4iwX1fi{4_quqEY))EX^-a&4a5t z0xU}3RaGUP57QM}4f~)I;{B7|Cw-!-q`!?xpQtLS?+mK6GV>H99?FYqfS%CwD{Gw+ zzdEQ!bqD%+5yFE-MFZoNLb|meDxe zg>J6RnP+j=opZKEcLtu$Df~GRKjMGr>JQ2vg+k-PsQKf-$fPgFf{qZ;J$~#0Vmv(- zQD5UKi)uVAZ0_94uEiUB@4+;_*acS{Kt%$_%*V~mo=XaRQ4n=6szet-_$HLIap007 za$8Q?@a1-mt4#cVITaZiiPwFv#{3@sgH=3TpG66=-q{j~+sz>Yi&%2=y9SXd&`K)H z3CclV1z|R|h;5N)q`ER0FGa`6W;8J9G@_d9i&2eA5bZx%x$#a==p{Wpy~LbN(H*rf zDK3I;aw%?zj+h@gqxYqHBKvWT`LY`lU+^nTG;}I2-A?rc@{g94uZ`|A*fLZJtlrCz zXP$9vqw(i@qcWYWA9VoYXR{5BW7oNXMrr}dX3Q^WKB;RShLa>Oki^}}O;V1fpW(sL z5*62#md2aHXrgBXwhgNby4|a^`<1R7-=&gL`ePZ$NjLa^7ny_cPZ+3p^+WKPtdo zT|dm+{aUJp`OU6*!$cch&IB;+Moe7SGif_72_O9!a(?CPu6g`mq3$>bx zeKtwd8zCy14T?>wQB<(Wf%ZPV6YGs$Ta{Md7`XyN`TAX)!u}yvp=J;u;D`7Gt3Vsu zX;#_L^hIl?5rwSm^I2vJn%5uS8B&87*ZBGA6>XOy-7yD000utAa1R9X2f2TAeVxWIV|c{v-<;Ldvfe8``5 z{CS%{QyIGoZ_7CFIR*!3Z9rPpf`R!N0zHl&POonKt^hKiGLQ4CpiN7pLbf!Zu?i>~ zK)Fxp!V8bv3bd4S36NoO)b!#}s4Beqems;;e(MlVIjy%BqA0J3y7IA<-*tI>u2?$+ zzTyl*!C%CxH&J|dIDH5=I2EkSj}cS>nO#UBfeB={vk|zUA3D)ltNso9rK?ZizQgBe zf*j=O6XN1(rbZ$ZlO5V8p$=C` zLu?L(O7ZI%WH^%-0lw_swP2-)g(eH%NTCcA^mUEE*(&WBDIi_a39(bakCcQ-Cn#c- z{lay3O($Gu*L1>lb;V+{2UqO9xlXR>gzMs(PSD1v@VV}->4Z49BFA(>TwCR>+=J`q zy+yn?y1FVXv{0%yy1c44aTXOUbbVD}ReY1luP7}T^KnPsMTJ6D5-18Ct+O$IKxax= z)Lf6P3ho#muA=tBVo-W)7s6q=2%wmLY3NdY;ZF-akrm;I7qH&}HIu;8F@W9mcHHR0 z6Mq-r-=ib^^L=>Y2Q2Y3NpAd;e0bvD1Nd?8U;E{c`S8RKSmI}r-1ztR;fcR1@VC() z&j$^Oth*+R5D%9Q;LbYFhP74jERptr$3Bj!HlMwC!EB1nVd?8KuA*aU_H!_h7obs46dF;!l=1smU>+|L0Zc|G5le>2pq z@2HIdvI5pFomaoo{qLXgYNncvFInQ>yF|&5f7AgQXz<^)4tRb_L>4LCift z#_)viD_Y>VYY;9G#hBxvBZ?i^>#euabKe>*7Yueg9ph`_Ra`K*Y1BC}(_!YGhPpyw zYLmm22&d-y+k(oCUGj`1O@G)oLp-gGz%+V3ctWvq09y&pJw`Y?T(lF|IAe1>&H*?K z<%_d>VlyF~n!|+}D5W1oInWxz;d$qrM(wC=c7FpL=&R_iay2QHU|_>`sNjld4KYG& zVGovaKL|#SCrco*2ziKn1AFv_$)>kD;{J^rR1dq0XF3}leIZoDb-dqw#0M^j;mz^1- zeiq*Ib^`C9_VAvH2pkJ$UNGGc;mce=U_WUclXB?h8X<;$5&wSgJ4!Ih(PZ(}5i0%+*lAn5)xJN*Srr!*q;e)^u`vaFuvKfxPV{U82 zaKLUBQ}u)gIGq)wZdiMulqi!;OXv(#j_k!;Pwi75v8_D3!26F6)B{_~5~A$$RhcyCH1&w46MBFS20Y8cE%hw-rG2Jq%rrhglw}4zqM#K3Rqdl0$()+)~CO zi6=67cBLGGsE!(34p`XeDoUGi&L9sARH6o_SI%W=xdMSxr*R4#Bo!=%Q>AA1O%N%v zF{rZCJ-*jF&=wL&P7JPvH*jvu*CWP1YB1g1JC^YT($w58LM%JWF+0vOc|2HNo%r=H zNMI}qB}sjckL(xAMRN-aixYgs)>XI7r2=R#Edso&<|%g2s-mmF8>#Pq!3<=ei&P<)q=u(S|rl))^vgH&1DYpaM>@7P06&_&gS7uzR=&0Ygb3 zaIhy{&QQ5i4~fGtUKENv7m-TGz~cEIGk3#oE9(c)uO3=X{c57tuXfKaHEvJ@vmsik zSD4(OJVA}&5Hw+w6BNksr%#}Im4vebr=L*68iv+` zz7x2=S?g)(IMLZYP*y|5X_*SR-MtD)p=|cXn*7%sT+)Kqm zIPk)6vV|i3@T1lXY8*6`x(AlH|2Li>%2q_5M+#t}Bqde~qab75PK1b?hB$W^%-o0t zG0-=<$en3L;ah?GyyM2R-_eUEfD#q8^gV)htxliAcRFu78FX=B{{1~am#`K~7q zym{6Nx#zp>JnLHVx6L`vIzyJuc4D4&5CC^_o^>=pv*uZw^*jrlCY|0=m9nH#RPj6| z0zDZ7-NJP-bbl`b7P<(?V5Dx8Zl*`(!61()q}k0fP@vQd!@|AnK_PPu;cOestV>1!rbp(VHON2Y5 z;iQvChKn52#M!woEIe6pqAUvT=NTZR4@S&cMFBYh?J6v4JU!?wlomcyEm0^s<^n=R zdu?;Y3+^XScYlBH6!u0BftsRq-~8~h$#i0YhgZ~$=d}4vl=$0t{r6zkm=!V#UZD`| zRZ%-gh9nMNxr~903K4Jr9^B#O-%kM$$Z-4jSvXYXRs3%#j3i`)DuFWx8`Mbl<%*cu zfcba}T5tecFvv%baHeJ#9JUztV>PcQANfs(8*?BG0;AgnT%ofk;QsI3vIWm9M1hB+ zN1cW7HgHl0Posh2M+rc?JpV%R2I65@=#EH8L`14#-D~|HWKpjYsvzRHXm}M>hP1FF z!*B)o7=-9^fYyvUi95-`osnZbiRWtQda~e651@l3i>rYHdV4f~DB^wND3|h!a(+Opvm1ZjVoW#49w*JWUHG)dvTQk)A61LfS zTVT@8>+)RdysTiTfH(^Trqy{?;F>Yj=Pdm^4{S$)SR-(0XQG5}cE;%EaqKRj z=MnnZlkhkNLOA&*OFYQFyX0hst-W^D7ewQuNWtdIgYOs^WzL=LcXb@$TdW08d~fGCEqGX{T5{GgF`ZHw%)PgD3l= zTF({#T`cL6;anG9hf+>!VP>ijF<8(2_d8wk`dN9E9PPdsxl|4?w%Brpkm1SM^s!TnCx5|gTQeH9krc)#Eiq&4f7>8uA^pNRQ-qNuq9?!WydSFPYlksq!I(9)po}k$s2nA? zu<7Rk0@?+GpCjP1X4KaOjd*nytwOEiKU$;+G61^xEP+-HA1d}!0<>%kka01401>sP z+mF)-&nH5r5MY^TU85HVA9g!r|dtk$xxHR208Lj1PC zjxQca#7xls~p9F}Y7 z?FKHQY{nOv7GLEW%0TrYsq2ks8feM!77AZ-;;;Q&j3pBz3*(lB;4EjYCN(y>$ zeG6A;Wkck+&~Q^l*-+~bQMg7D3JbMH@=UH12JDVltGc6zvx;rb$A?fDAY=Ye z1$7rr!o=WAtxcitl6u=h4_gkoqK;q1W}qcPk%^sZzNn1?h~voU}o zJ9$@0zD!YYF`*Phi!<&4I%&^PQS^OLu_F1@QU{e7vBcuS-cM%T2%0eS2 z()~(TaSurm>{>GvF9dk}_aL>~PT=Jt639$-*KDBLh`c;Paj)j(k?PBBN{_R80-Mjv z0u|)tZp;k4yfROCS>tTOpIcA~f;9q+{7G>Bpq-(z`lu`(XE<>C*2jnZ1n6j>J&N^@}(o7j>|kg;07 zf&BqY>@#prtkZ!%xme&`8<>6n=VDMM39kX(8VJ{dca7BJ2=UB3x-Sgb8C1jDpt=wt z;M)Dw>OwtM7Y1;3;d0oNhQg-QVyKOBIYjg>Y*yVsVBeLfvwJ`#x~jY4gh+t}S%g6Xum?v>i&z4heTWcu5z=1lFe6LRDJ`T02f0n|)$e`) zgPH`6r#>4sNObsYW$8xpig&#)1g;gcVQii`K6WepSQ4NQuD8Q|Hi73Q6 zb3`a41Q0hAmqlzIcquF9uDo&gbfVoKLh(R?AF$AjE9AtzFa~lY$~)&HH=Z*E$`m2f zN^(0gR6gWj24VVs=5w?bC?S{c64#tZ*^FCnNfHniSYr0g)$rY4Rtu5>NxAN-AT*Ja z(0ZWLQ$1|krPRhv?{&<;`F?+y@n)|tOjQCy(3yR!u_w3|*xSr(Zv&W~*4_Id7qO)!{V zL0r;W)5)erYCrOi$6DES3%6Wc@Bs9^RXBPHVlh`4_S$TC>X3H z0WYKmYwo5p@6MiLouvf9U?m4;QC8q`C*+K=rUaF&kh|xlsvc{~K6HXex2dMG!e&G+ns~*gsqMZNa;k>EVk3 zKUs=}EIJ%0FdTef@CXk+Y)f2e$ie4qv;~-n6+qXl0>VnTDk+-l&M5l4SK~EkXrFIb z0weV~!q`scbRr0JzYsB2O2=J10w9p*LaTIw4|de-(myFEA0}}7QvV1JGl8adrGBH5 z`oH8FI7$Z(t+rCX0HLU-(g^$!Kbzg&MHafW?U!RwJLR~a&5}6n?s_T5Q=M~_<5=-C zYHMp-i7tVRBOMapK>+0>Q)K(YVl`5RJL1_AMFuR*X23p(Y8_A-vmdUK!D+?Pa_~Uh{2VJh0Sg;#lmsAy zICc-&bC|jf3-RtXqPTU2q_+eL(k_DHK}gm2Sp#MsK+PP3t8k|3e0mS1l+zu}vG{K< zsD|1=cE{MqN+YBeAq~~L&^)ULB92rFgd)Q05HTr%^q{!)7?XtrA&3{JB zxde>%`o>M^`Bm zgC!97(zop=D+MHHKUpME>?cLp{p4Xp#msM85o0YOb2}S;(RN5o2?CsDNw+*?-iA-E z6#yKL2p_EdfB-OAsw;MHFe~d}YN+iCy5AB}4uQKTBn$-I<8a}C!2KFr*jC_P&XLB| zM#GY#mvG9TMgL5_KkB@QjSEq-hO>kB4rU#(2C8BGRF(5_D(9<_v(=ZQx;an{>fbV6 zogEq}n_UPJ8mUqAueO(P-HEq1*wh81YK4kD39oup26fudf*n$c<=3KXD7Rd$l?8Tf z!+r!M7VaN75W-&{Z_}HHaCT@b{W&eFICIhxE5v7XA&1zb+$Tup^>19fDxWd9^jWRL zL-R$xY)JK;J#>vCCO!4Q(bDWVN@MqW=tYDuWo1eChEP%H`3$p!G>at@#cclk3DpAA z5F%FAWF#-f>6z33%h5~Ff-rz_r;^&pVDdJt#ByBSaN49r!Y7^3HrRhOeor`~jOyat z(M|acaGDTgLBZn5rEfTFErK-FGrduq>Ah(rKO}o4PCHzoejHyFoQ~QzVx|We-tv&7 zLtSAZiFdv~i#Lt&B>h-w8P*ILHZ7Tz zga!v5e~t#IBmJ`U+`*Dk^;1i<(NNpWN3`y)v9iyNtyp&c&UDeu=Zf?HiT_$IvG?hlts(I{H})r0{Fel5IuI-^l|xWZN6SDKdR7 z#aob$$)ayb`fbi;h(&$QK{FXJGtfKF)SaYEg;J&B2Jwu~X)a49FH_ehv(r0|`FPxJ zmV610Q_RDXICbKb(D^nb^!NYEM3G#@No5D?p9Wg2tF!0su0Iabp@OU;hO)pA{-t=~ z?pln_h$%qWz5R8>!P%M04zvSI1~A*;hg+%CuTa~lC5d6Y3Mc+voOO|^F!S;zc7SST zkI7iKiu74%C!&^C&E`JvTNjkRz!|Wc3}XP7k_Ve4HBpMaUnIwU@-wB>G8G3wai2U| zyE98I);bJ5*GbylP~$d*-b)I-jfLJ1VruxbpGWTp3);{d`l=|ao|h+X^mS?0hTva; zHx;FQ<@GIuwMSzt4=eb>%!gn%$eo{8-c7sixV*pi^KVn$z1;k}`{n%)+P1y&E|tCl|gjb3J9IJ+51t z6QquK_7^B*7w3UaYJ9@^T6X{J4G>{9MhS7x;Mtf1cpaF#go>=VJVTuc-p4LmA^3l0?F4og(ue)ZIP9 zj}F+)96R)|msyCh_eDKs?Vp*GX$_2E2cY!XESS?;=?eFM6ktH{1?l?mFpaFERyPRJ zTfu+fo$dV)2hwtcSL7mLThE7NQbrXu7jVad)Gq|pK0x&+GQAjJ7{cQ9uHRa>MR(Z~!2tNLn42{~&pM1%8tbiM56 z%&A=Zyz~|)A%25~N5&R$A;yk)ai(wKDzCqF>s>kvn=?4w5*o;X?7CfA;a9ItXxD9n zW+tFFY~Cf9g;mhLVOANb5PPg5{ipSOx0$t`n7Ih*f^#`)?EI14XAs}2@Rm)04mseT z;p&Gu;A2F##{o+bhsv^Z_gZe!(Y^C>bs z0ral|$|l7o#{y(Y?~*18 z#ghu1WO+F;U8FGO5!1Qqy*J+{D?lmdE$Us$^GEewLYRYy;CBcBd7QoY(TzX5D>R)6 z@s?*cPZrIm8uuHLJd z;eumnj&5-y300905+@kDtOn zoNt$*^_?#KJb|Cb;fItuf*)rx?yDd0QE};~qSj=^YHL(SZ~B@hAOp z6F;ugAH(=jr9X!9W1#-%&yW3B1#5}$$oH)0i45(nK}z^>sQ&mBKlalfd-9`;{>Yc- zHs?DM_8U#{r+^KZg#+pW@Bc)+%O$lcTv+ZUoo-YhYy{rfigiSMaRAAk2%t~Ro7Yk52Oj!kij~SdcAdzYk_dTdY87Eig(8h1l*?H1=EdqC*NGnFd+H8ij$%Y!n-p-BGx()e)Q$f>G)xO z-=Yg3KPl-K>b;!(^P|A~ngC1#-kp46p!{UXzfsL3nK^FzsT2}+58j#68v?)0N$T&H zbov+YbZ$@yq!H>C%;bkQUfzy(Vwx-fY3GzcqU#l$u3s)(P^21uhS|24puG7++0%m||qf zTqLlsX6_Z(S2O)4s_5ANI=G{L*|NC?#hyOhebWF=_!kCNW7*_T$ z-~Ya5SNY6`mwOHEpvt|z#`nLMb1HS{K0bi&e_!MK->b7O#s9vh7_jhnC*BhO)%U-z zVUN}TC-|?v|9y?`e_z91uYBrj`uVRB|JC=uuc0K-uowETzW;rV?|)xYdS*-*0mL?^lSN5OtaRN-7#;ya?M3*OxG5!M4Bd$p8LcA!5P1)dXDH%!$7g zt{;5%U8|KWc`N_>ekc(MLjU`j0-Xa$e30#>TYzkb{`c=4q7c&mekM~eS5Cun-6=dr zfS1yno`^g2zaOI!)Bk=ZQ*?xQghKqaM%klP$>qV^OrExyXhXTk0=8>k>P2^F`V5vXAFO;Qr^ zzkfy}r2#$v`>%gQy7v9=rzm%BxAljr)-TO& zx_so~x4u9HTYnP-nD2l8jKx0A z_gz?XG!A(Hyj(<*KKjS7HBqaPjdy6&IVE6c;mR*S$NI zFI-FnsABgvT->G_eToSAbNuhSB1PPqB6RyN{O{MH^x$Ge*8gSy`;&n??0Dg`IHae;w|=VN{C{vQz%|N|XhB=C>fF+dyF7m0X~M(Vc+)4dZu zOT-e;?L+Fs1{S$}5B#u3+W_Dk9{6FP1Br1Vgot*Ba9tp`7a}}y+Wz-;p-0XFe6?LJ z-#`aFvsrtj1|9z>jzR^m0v)mLLOuw(W`xUfHi@01EDzIlCvloJOUiN^zS@@M3;26d zxA&d!MaM_;*O8w8r4#;5;xYX{b;94DSS;TOzpr>s?)%aVal-$_cyYpib0h2=bizLj zv4KUt*Hx2qLI}Pu7xP1yY!3*;PB`I@C7`LCh<5B|ec((XcLR9aUF7-(Ui`V7@W0CG zgg=KSgLa(oSMqFk*9rfcopr*O`5tbdp{n9J;g5D?+M9j+$zu>G=v1P^bl`-42L+Xy z-nQq2KN=vicEVn=GjrRhPLOkM+g&-~53&9?obX>qgZR>f%weQn=2nY#obcx&E@ofj zJK=}VWxJ29vU5)O+oub6jD+{G%N;+WX2Km|C;UPGrcF(n&J(Lg12kq`11J2i=!B00 zm$T$<4^k;03I-`}y+z#-4F4~c`TJNzudpiVu2*3WQvP}^lohTM{*xu3i0g!pszlC!O%`5tKV{!k^D72AbL(fzIJ@ zoH%X9UBYNpIi-HJINz;o>x3U>x3@5SVe}z4v_Ja1z!5J;IsOeULpgq-$}t8mI&DXv zJIR;2&jl6hX6{1RkrVz}?rX@A@qy%r#A)|T69UjTsBOlFB4gQPB7-oxHZM;1-4I%mqu z@t?k)Vks>DI?IlO-2Uv}NTqDaF^50wqj#@K? zdjEw#`;C%ZT5(7I?4Nzib)5G6*-wyCD9hZ4!HU+EysWxH=EF0^svzGh82pgXhgB#1h;rR@d?+8#1E0D4n0}G-X~DL zcg7>q#%jZdeQQbZQYV;E!+wfS(y!dC$mx)OkNBMDtb0nLaysPSfT)?bu5jL7D8aq69E7ib|BtJ0ZfA*%Q&8UWwzIi;PsBV1?s< zj;lq!r1-UKtC^9-n@8T{MheeW@f~&}t4^Zr8hP*xgn;+xGlE=4DPtS2GC6!#l(Os< zBl&B-*h?w+vTHI<)r3)is22#a-e&_2At0zh2)C28;3~@bk|rpLe?v$_5ELm^3eK<| zy%0l8+?vlQ+{{++`0Kl)snSS`BmDLbDxgkyuE&2Sx+#r2&Lo;YpF?J^n^fp}^(i0CcRE$~l2gc;}FB5^9w zS{-kZ=n={qn{8Cii&f6uRL(OysVx$bR@5Yg|Dz*;RyKYdFKeWnP=X}fq19(Nem=pV4p+@eX`{o zgvvh5$KQ{U-PVUm9%?$5dtXFXLbSgxVvCpyWXnLz-4_u)2m55`HP9P4>5Fq;#7fw7 zTuxyy5Ob)>27doK_+8*Fc9%ZVEWP{~bbY_%smxk2sMTjgTadFmapl zeTD3Hytxq@^m=Wt6%R=#x$aY33@q=N6#85sJvEn*9b&4aZ+Ab%I7XX#KSfxF?5C=Pq-4-Y%V(!1VIajQGS$@XXmW#>+jR{Q<#f&CQq|N1oG z$askC7w{b$Yk@qtUt&wv_b^mD82>R&B^o}h&3=lmYeBf4d5MaS_EUUDUJl43S3Q4% zy*_c;w<#(76fi(r?ewyC1mTTPsoASci}VW8F*yiZN4S3Vmk(IBYVv{FGf~UE4$?o} z`Q}|9xr)1;doBF^6w}pVq7)9!5Fd}rnaM36C-eBN#DULbyC7x$qE{*NH()Ns#IES? z^wF0P{h7{kTzGk4|HWiv^xy8knEH;_FWd7ny%`mZTH+gt*~C?; zhZfx5$!-=OOE|9p;9S%0b2RPRbjW%`Yfw9tXzJCjKIMy9M+TEw|YcaRzeH!6{eBu>E zc3*VY1@ZK5n9{w(V3}~@S>eQ9E&jq<J<)H=Epi_S|CPkaifnlhf`-XqO{1&Y~LUmK@f3P>s%yJ z@}3!{y!5r@9g{UlTq+Q1w}ak8@(T87IMW~vo!9v@g+I&r^Adl?@aOORxeh<9;otZ% zpbQljt9K0yIGVRtvY24p!P@22nzfEi2L&JsEX|EM2KYEI=IF?Dryj&0hbZR zEs@1;4NjCr+~YDC+@pdJWuYeTlLnds&h`gRdsLO+sT(fPQ$3Rh6A4&v)g}#{#@uoAodR2 zU%uagb%_6SqWs`JGr);SQ~vGpA0e@$?eZ@o4Vo>6`d~3=i)zw-%McXJBM{MnzZceKGoV(410Q@&Re` z+e1lL+81*%j{^K}^9)2{M{o1=KpKq5d8;TuW}3$N^`#!Mw7DOreL6WWi7tZ!8$U_m zhDM}0eQ9M<;+gmvPF)!5HlFqyF@3TxL38b^*+b4|=u(7&t?y{`k-d5EZsTjc-~7F$ z^+^u*+bkrlci;XIgUP$05(o3soR0aIN_*2g^UKm+!6%(r&HoH+Y>#8(?s%VsfIcG$ z_U9jxmlQM+pvF9OMSG8T@sbCouuxvY-V1X8eG4Rm(F8EM3!;--pbv$=Fm4%yBQ6j1 zwRd5hdCj4lh9&-2Xg8T}wT62W{~yguC$R5D*uG06B(X}v8BDxc!&HhrLj~~Wq5X(Q zaIHu1sle;oByG&8Uw$8n9PkXD+H*fb?n^a?R54de5UD$8xP&ESXjn=<+EZG-h6VrG zdv3FHJeaN;mb|kSBQl@y;OjLkExt#?bbPx7hJ(to86Jap`(;Fs)vv(?&rc{IpMY}- z81Bh^s)ji?w{KIR)Z#jX?IVcJ#{oWJ=ka219(L#9fVZSrk(z}F?vX!11&FX8;SQ)) zn1a7S*k1D{4y$0%GQmIBT&(jrWNcNUuKo0vF?hiiB^O!JA13m9=W4&0)D2F|QagpmVy zIEjaUnCK&MChRadzi%IC;y*#NlMeo#hNa%GBTR9dsLJH~Pa2lCe5GM&Z>{E&I=zK> z=6hY!#T*S1x6;1aQ`RGz-k0VW?kV?njh8Y`)OZPBCo)#shcOQ7x!8jf|FPb;cS0J= z9)vVIQ-DkCe!MuEhrclkvL4Ed19{k&hg=?Z;bA8n&{ipL8-ywA)wd;1+VKN)Z?~%f zL`k1BVkr+FG8P^eQ@E&QEW#A=vgXKRQ?JnUY=$pH*uFf0!@qDKGEPy|B5H|Ufiok^ z<$8nN+avl?0x!gY@5>x|OP1$-8QJBI_hIBl9M9%`8QFo)YxBO0?40Ly;C-13G4|SB zDgXUu95z9X_8SZ}@vuGnO9MiDTj>j?eJn~}X)of%JLX~`FXowxmwE954)njLByzR= zh)KMkiBou};UQ+CE6w>9Ij^>_H|HyGwo7@qNMcsnXPfgGj4a~e6cck2FaBXJj^xD= zJmj00Je-q+uqzWnj6k!x@nRbueisX_w11|h)|d;M7t77Xr@UBfF5bhr{l$OkyCByV z>$@SF%z3*r^m>h)-)Mj8OvUXsM9;s;b#QCHZzq5Iy+-y^I7WZ#eLno@Up zQ=`AUGbAzMYV7VI{;|43--~=~V?FR2~gEemO9yd7E z4Nh}|^=|M{1kEcr)7{`RZg8d>e8~;Yc7t==V3QkM=muNd;5%-x)eU~+2A8_Q&)i^} z8~oZ0+HUYWH@L_|8EN;kI5jUDC2mbXQ$H@3x%eaDS$bz?trW0$(IpSiJZZtT}?tnJ2r=fs{=07yFEh zo#|pIVMX{ynYQIJIANC;e>=HSHMCe0Jn~i(2|sa+rGGLH3AU)$j!3U zWI^m_29My^F4lIj-?`W|E_S1f{msQ@R62I*ieS<~7^^IiBeBwgU0rOhi#^>bA$B=@&!vY^HCGTud?=rmYw@!hu$yK5^BCEHl6Cm7Q@lm z?F-;}5$p5dqud>*B07F+ek682rZ@iW0N>rqyDQm;KkJc&UsAD!`*$|I_;WP)Y2O$u69Y zT^Xq6j%T8peb(EjHF3@0Z|eYYhDO$~sNc#_J%Wn+88*Zw9_k_-FUz!_c!iCvY7?`q z>H;;bF%~SczA|-VtMpglH{uWaE?{mgI3XK9sdfKO>`?TDT57#24=o-XpHa*TS+5Va zW=Lj8RJ^-I?`Gif77LSK#dA8D??1rDrQ%}tesCFVtO-*Ne&CUn3>c@I2&kd}7>7U= zQ;r|6?btZk_s}>yp`51lhMBUsL&Mw;!>VC$Yn+DZhYG_mFVMXMhM5SgXCCr<* zoa%~Zo1AJ_{Jct46Yl3Cp|45sShJ!l=cII5lmSS-ftU@RMmB5!T@wNY_#Y!qhGLOD zhYdhqNkZ_hp^@Z}>bZif%|In1@ku#6B_yB7jLnjw`Lm{TrJmN5JRL(v#UtdC50HqNwSd2aCUeMnk*y1@lcKfvj2hDNm934a*>|H^x#BkFQzB@=Smtk=3dOVSrg)ujKjvS z>ll-!^PD3?t8|pX@!BT&rr-zY4APC@Z+)6dEqRL=PK0}D>O|x9tLw@yjw^QK$Q!Ta zRyuTaijw*S-m3UmM#V|3iXxb(A`9g1+1vUwhq8EM2KF667Qrg(7vq@t2?W4PE&166 zw5u8#ZyqPrg7whZv_E>iv=;|hDC7U^{X6)v~&wMx=c$xZfY3RxYp=cxXSW6?;B!N~oVIJz=$$Pc5wkn5` zVy7Xw*&_)*0iiV~O+$#FybarjxnqVNXHgSjYfyGNW3P}QSZ!+!!b9U|fq1H?FTbo1 zav^_iyKoMFbO5C?qA=7`O2Ph3l#p9wwc;JICpgHkS2Rzg`Y}6dED(*zr}Y4sK|phr z*v6rZFp4oGgsRh5UX^VdVs z4>2wkSuG$m!hM}~rVLpP#Ue&rD`Kson52YIGztiwsH?)Oi1gLw97>2aa6Cc*tUPxS zh$bEThobq4l0npx>hd9d5VfAeqK-XbjPOwTYt%Sv46W1glUgse9uiHenjK9(3jwgi z;Mn8Th_9u$BMFpt)p7WcN~})$c6?+`q~B+u37<%xlR>7)igPqtb!H6+8b1Qt%kZ~T z!oe|W10Q^suh+*heg)k3xH|~AK9^swA29=?1h^KZ;4|@oDd4|uC-Asn0TGGr7CtWP zu5rHy@c9yDOix$3 z9b+rZLN^)Q@0-wscc62a@n#ki*3TZ<;Um!UyYoBwd^{b(EtOO}% zODiOeZZK{0X|Wl$PBscJ{~VT%L-HRMO_MSpu&Cz>`(#uQ$i6|gxmO1Athf8>po@+m z^YK_zM^KM4F9H&)2acXp%3&bhr83_7)$&-q6fH?m%v(=Oxw)p?XAm%VFE{7NtcdiykQTif1mWmZv&kV$FYU>k>CnO2;14DX8fV$;O z!Fij0+yZU|*2M+C9V05kScK6I`O8fObeY1%XmbUZDMV(QG`Eo>Obmp}emo-;s>_kR zXpWXuTn7GB9ERSi3bsk-zN{o<$i^(Q>`BO+fH|BGtF}rnmhs0-3Bs{SY)mNn6z3<_ zIuIJQ5gxZd-wvTh!cs@ihYfC^+T)GY1jcF==--|j+9$fqbP!jf^~E_@pO|aLDrRErZz7R7zkx9h0Bc+bq_9m z%aYSMna6Fk<4 zvAATWEPL~l()TNZZ5c*`>x{7|R)U*`Rk_j|nxRkjTw!IqRBdrMO4TIFyoAPJNO zO-G<@1k~6EGWcz&XsOW^ANyj0@Xpm2lxze8EqBMy74H7 zQ>C}#b*Z2xT3`k+oHXBXYQB{0=hWR;wOE5Bf#hy@oCd|c8{CUQ?ToEl)=6=mVOoq3 ziXqYzzkLk1l~D#ghD5qE%0L6zYvfn2;0aCWbolT`EMCrLNT*6lao5H2wJm8T=n|E4 zc@ol+6*KSTx*RKRRIILY$ip>6o&KmG&^D{xWIDM0u~wr(_M`5M@k>vW!Xb z7ng#9K$dK>zA~&e^zhnA0Kz?6go*AZKDgI8mk6s?5mQo#v0MeTs(^IFc!mm4#FP@z zs;35rsDPy^ARQNO;S?oaSAyP%iU?55mS;o-xo`pS;KCNDlj}#ENLxYfm?BcES`|ew zQP96qtcMFP)bjClPlpScOW^@7tO;=8xW_u=!rvj*_$IiJ)lA{S_XHx7{UjH%Wc%Fd zsa$wJu;4;A1YB4KWp%`b6-aZruvof5Di>0MhYOi0%U=6vdM^BZqO_$NvJ4lF6o2<} zVQ+ir!NP?VhLzA$eD`+tuA-gC^a%5IOh=#}qvEjRAcK?@?u-kY1UsfjgaVnPGcKG2 zFw8-9P;kV@g$eZ!hL0}-5^QesmmY!lU#U$ z79*5lxG=?UpOqze13iXZxGl;+1KDfjLj--^M5oH&+?VXDV>i-;46O&{%pp;H3P;76Xl1gLL+C{?HMpzl7<0pfsZxp1^3O(FGx3$?)> z{y7!NV8CoDB9@i`4@^RiXTTv!A07srEe=lxWS1kcjj+$e7hK?Mt%i$&40zUE)bp4gWhAVQ zK$|NFYFq>vB&xE`81P-ej_GkN1u{ox4ETjSFmM>~S|0;WgR_7EFYD@|8n z5;q{>)&ocSP(;-3gA70SA~b5=CJHXY6RAt*J;$*_rCmC&g}U)HC@g(#k~A4)?c8(EL{2zWSX7aAwfqU~Wz{Mol?9&@ zPirhz0qIz3*MviKsE#{*a~HJG!JY;+8?tZC3sknnX>F}?@!Ny`|F(6 z!-Co3=48PUY+!s&kLggD9od@~jeKK~loTYw<-BnTY!k4gu8wd`f@?egVhXG85qM{G zSZl=2H)5HuGe*1uc$h}#g!lAdHlIfnGd-R)w1-2A(yP-vfj*KY#~MW&h7@z9a6c*T zA+QEX0$~lg_fL}IGMyF*aTroeapQzw8t6bI#nrW9N_HZ>Rn1RIp%dZ<=aXw>PBmyf zw!H&tM2a#P?t#8vWLBVzH>i>3Qimv4Yk1aH9U$eahs}xtsV+#}mWFI`gYW zV|hoB#KQ;09#(8pL=6#%Dgra<0s+)RQiM)0ahoiO@(LQKf~K;J^pngh1W~sJ-3#O< zz)o-Y6#&m0eozOhMy;S`DdcvDNUf?-6v0G6e>z+DEONVQ`E4F@V=jdU$Zbu4+@{fm z(%v?S5rK$(=Hv7TMp7(a+6Mw z=7t|7Y}YJnAKBl6%Wdo(wMSGKk9Q_EJ8D}zA$MMGqvMvL!aL>yI211!j;Q?;Q0f-9KPPNglparZ%aC6cz5DCq(DBk>n zv~c{NvN_2qbuWd8R`CRz5Xg0syD=G?=B!`v!Eb7VsfDsnR$-`a z{)7)g4Go+?ho-z2M_d0%>Iy9we0-pK5f@mnK$m-@R@EJs7 zTS{*+@2W2NI2=X|stwQE-3JW?OpeV>j(@WKV+#FG6_V*bY@i?y_b_9oJ$bS?Yzp@r zi^8EV5BE$LA0qd#6Od2x;1Bki?!0JhGMyhf@!ezlbsQTpr$=94$vqw6mI|(MKE#k5 z2=9!0W*D*kj9BLDjC(49M{}qpif1;Tu17LGp7rZq4);ixNwe?hR3JeLq7B16v!!r9 z_xw#@4Uz<2*c5kofwN;{K2|}^v@z(f8!Dt2aa6-SDc$5#!8C}cqKsNCqX2D`XAgb& ztyl0v1zycePOVTNy0a;hU0I&b>SDk!ioa4E_4qw5WK4Pdko38P-pg|z>Y|sg5uN-o zf^aDWUW4@M+vaxxsGtJ z39hj_#E^&x?~Lar8?hG|vCP*Q&z0)hKFM>;<}?3kOpj;nvb)1`(oxg!+z~7})+pLA zJeMtn`+06>fi*}H=Rek;d|(QO1mFDT6#GTb;PUUT?3lsYz}# zoF=?Am4h%C7GzcczXB)LlNXR?I1j{A@80S1t|!xRTY>0dE4VHB&_dU+vXQXVx}60% zeC*kxx%?KoV%kRvA4yX3k%Cs{uXeUh0tDTSynO6OI0lWH%W?(vaQFh)zRxS0Nf?S3@>)$c~!xkzGBL zw~Fj2$U}C_xQ#t$VtTSW4~6^5E-bF>T~JO1W!QUX3)z(zwt>=oWEa1IjhNG8reURy zaF+|N@gj&J*%96u*%cVE&l|DK*BROE20WURBs*sFkzFP4V#c!$>F$u7^xQNH%6|a~ zvJ-6>vKt|V`^oN9fi*}Hi0I%6f0FEW)?&n0J0iRF<*Z7BI4jDSUnymz&`NyOA!~*wQiOeJ+T)ndlLqJOOq}c`pFZ8(h!_ zD35AMM|t;%)T$aq5o{=Lq$t`m)4EQ}FY!e^EP#z2PQ{HdrGeUWy(f^V1_CO(NC@%xiX((@&#*d4zXHc?TP@nMN%0bw+uc%ESYbl*ep7%DZedBH~#`?ch+J^wcyD z2zmht$`frE%9}2Q`zbF+U=5N4Iy!Q!sF`-zx^?W-QBsWfs-e7;o-#%-4dSdQ-%hReo3l1z-(NW$}~&atSRa6x^dV{v)qRPRGiRZ2fWrMD)?hw(_hEHlZ!SI zkhRu450DkLHH$4QwO-{!&!M{iTu;lTeN=jnBqf)!BGF5AtB?$;vxVx?J}UjMAnMxC z6F_wW6sl9kPhHAB1w5#(1?hcV!)v73h4b_#45pGChy>NKDBiDwH2f5Se|DFx5M{$!7U-wCr!O>Ar#A90WoODPT$Zx(O4ES8 z&0Nazrd)%?w1Uu-=E4bULeVma0onDPUl6Kmp718_*%eG^4n@yKbQ-cNLNtdP)CSYM zH6%6VJRjL*@PS5<>^R1&dwa-^5Oo$=b5#AZu^)O-|He#8tGqU^T3h{v?*)f}s z>}HS*<5`2cIb z(pLs*8RD)e|8`!c^Z5F2Ge*1cVmvz6&J&^6ii-Z)4TVgmFhRB9+pQ+}ZC&(N{ zG6RwPO>!&Rh8LCtKX@{YaNG|wxOuYywjYN%+_J|3;(R=r?>_>IUnL|ih`S%tF#=Sc4 zE<{SzCodV!ZkO-Qg&;%zdZ(=@R*F|R+ZZ3un$Yzx;Nuy6AubXh)dTOct>&w|D3&GI z&(8TC@z}GUuCHd5P_vE20HUj!&HBOF%Qs{tNI0Odx|Yb!dTFCz8_!4zJfO2KS_?2} zn$85#%;Pir#plQ`rtCNo--hK^^lvQ1*Wj#Wcn^!_vLEfqXQp3Ke~zjSMV5$#Prn9U z!P|6|sOs!4PG_fX&=-6Keg+geIyISm+bYa(&;kBUSc4>?6P|YiIw9U#lpP0c($&7X_D0u%2z?XPO;J1+H^ z`jtc3V7?g`*v#;ZbkzY&kelAl^R}r-K;Iad=@^_E3L1Qi&}k#}nKbxRh64s4`!6xL zB*EZUjQT@^w@L%I!r*tEmEPc0jE`qc&k%zzg*X~~gllk?U@w}j2A5rxL4$8k-NYN$ z04N4e3t9yZp=Pgz!HMk5;0p!Y_+wJwwVfG!nvhnqZ!?e2;4?s?H2BkptHB?**k|yT zgVo@8A>F^nCF63`7r)Vt1i!(*yG#^zwR(!H!zO~!J# zUC_We{j+%_rq`j>Z@PRQ%Aap9I!MjlJ81U*08XH7vvzwenS7^X*p-}KWU3#yC$>X@F23Yz{lpwQ!~ z*`(>Kr44@5zr=Y+C!a6ejJxQ^|L+qDEY)7WQ40zY{ zg7%vJ7jP;Wf7W@*_>bZ2_I`Wu{%Ze%p#9$^7sLKY@Z0|tB+&l9IQFNug7)uOjO?*S zsyAu>rP2_;{r947Vhxf+`(OEo_Rqo3Hf@#tk6=C1*#7{n{~H^{{<#oG`|}BkYkwAE zubQd$Pd)(&+W+Sv#{LArP`z2RG>kvLh{ zhHHO8d+q-;%I-~PIm3jLhMyD z)c(oa!9n}KdxkL-0WegBv@f;)KLs4^FSm=UYm=~!>|c=-OFYDmGh#ck|1jWP`wQA@ z{|C@q>Hi~){c-l%U$)qf=}{82|24=*`y;_`|8rG;(VrdrQ(Hm%Unj)gNcAS|UoH*t z+kXsaL$L-)qWwqyq5Wr4%GTI_Pg&*ay<6|{eMiQ_{`hel%=l(Q9PK|n!)t#QVy}8e z?cao?bp86jVPk&+V5nAUU#k6I0FL&b4;ZHVNmxhruTF|39%2_6u^rj}eBfRC3)*Y{ z=~w~M{;%Yz{hu7}xBs5n|MPLEI^Xr1>-MOFTh>O0^D)JBl2`Nck*~Il}}8Z&+E`FaA#FE z4w+{==B_8KHli8 zvfd{WU!OD(y%Rd^Uuad#d`#1G@9b585l#SRgNx^l zg^-hAm3;R{GvuzWN79KRNgB-CX9N3uZP>%JzZK2zi+s>av6w5GD^j@iN}lete*@o4 z*2DW<$}n;KF69k}Qi%!saoRE_I|6*|*@?BqZ+o*8t7-&G@qUr-*8)lA(-rh#N(HLK zpu(AJoF{Z&0Lc5{NFIq~+z01f@r{n&-V#Am@+ko~#reI-*d#5^yX(Te>-(5w7{cE; z<1-9A7GfrTk*!&uVnR%X#3Z+^B|j|}ioSaiYkhsTe%z9%^dGk@yckN6pJ-?MgCDrO zn^N%gN|;+(oQ6`y00L?#LyJQV`t`~nJ6)hsc=MPr5>rQv(^sWktI|5Xb-XjB>g&7^ z%pAN8;W+-r6H%{Noj9?6(GCyH=sQK3%k5sJ=epu0`nc;-Qv$}mJqgNA2O&>Z?L56@nb-#}Ort3dSRtZ46udFe#neC-QOu ziy;7}b1RsV)oLEeUsKZw>D7Ybmqu%7!i0RHmvQ*gXgy-|dwux*w41iK|BkL@7hbe{ zOO4%&!5c12{*EcTKfnX~yeS_@dmZ{%O3p=hCg-B*h@q}&68E*~#CZ8KGfGR0&#sVf z=_9m*31PUoJqO|D@jEjHm$I+Hj6RTEP1z~038P&slx+w_Z=`$1t7;H|cxGTcGNm|j zA&`dIfMf=hurK1n@vNP{b-GJES_+xdsW0=zW3-{w^%lth7`_{r-yLGx0z>R89M;iUp2WI>p9jVM5<*M{`=Q0{FryNgm&C3H zJlVe3tlls-YTC-y;!RX5+Jvgxcb9(4_x#%6J-&wWBg{%+9!qzZ@`Wcb&`Iz1ns8;Q z1b7QkTENCgzm1^@Ezp$w{ygT<5q@G8A7PdE0$-4A74c%cFSCdR4e0wG-wOhxk~$ia$sQe`g2q%{Xr3Vjy0Xk7PIyQQ|AmMF;9(++yd;R%Cy!LIhZ|N0KB z@c0^Gg*KL>TtimK4yy5bvD0F6= zSQ{XdlM=@kZS-tdO_9uCkfdT?$!m#B2vQ&%NQ!zxs(&E4n^9nfjiTr&Y7Y-Hj1{xW z$8XrjQJ{|*hHA151XdO66J&=&Xi)sF26D63&W~s~L-;SELD|iNmK6v&OwxkVuhZsG zTd`WvwSMUuOEu>6FV#vUCTTo(sdlYoZ%|u&h~Y|g zaOTH84evui_`E4vg#{bij^EwEb8RbHEsmEprVX<)x*~e5#{tJDr(0YO(c?lQ~piw($lZbSR+Mz0XM=R&dOLSzr2j z*uEmQ?j|FZ^F5s*&V=CfS-vO`>G>+1IXjg81fW=7+-#(CW~h_N>6D&+GWl;6-wFgO z&#WDa?=9l%ZZYCHi`2^3cUbu!Ph#bxoM1vabNVbj9?9HPgm?^GoMq~SbSm&Z>OBKM zOeiw&x;czXyl+G#gOou*KH=T9Y|bqCS#b*YGRO*|S+d8cKihq0ak<_+kxF zSV`cNfj@Wx$6O!s5Fo*BQx(Y!LpPhc<1Sg)2A{VqK9(i7!zEacptsCKsu^WikJM475&*OEz{IghoJHC$ z1f6CIItUps1r;5~;?y{%pv))EB9jVELE9uIbqe|`7HGWx%}Em$EOZu=hf~mZp(ytU zeE;`(=#kKAntABnqU!&39{Me8mqzG^h{im0e|H`_!Z#09@9}tl_!y`_%>}2-<0m-W z1+?qrxj`o{M?N@tZ*_9cH+=*)m}2q80+<<2Zl<>!c6_Ky6t$74gn-32t<*ks1d!-} z8Jh*k3`7PA@fym*rfHy<>=0s`cbRqdq3YsK4;B~KN_jMLGuH>Y{DorJ0-uZj6Uj=5 ztVPhpdmPst9?xiev_ZCPT3W|_VCRB>uDp^vvq zAOucLhlG2wVm_xnMw3}Kl3AOeV|QaMbcMj0q;mHsG<<9d^@?^Ch)++_g3_-L9z+JL z5Jc0lS$R^kTILI^5F{pbg>a^1Zzzz}nye5AOuIt3HVmc63PCz#V1@AeA!3OVw5n0I zis2mxD!^Kx69Bfzevf1kc+x%_8N>0c<37rUB^#?rfqJi>qS8HS9xo*|j->vu43HdY zo*oTXm<55D=Mvr7)1iYDeVGx;TmtW41M2F7+1ke2RXB5XCVY;fFE_%Ot25!%3XiF9 z=ITuNFhyTsgfmxX!Vge*jS6QjqP;V#P3Q!C?1m)pDS^R9<{uJ)1sxG^QXTE|j&;(* z}NLnLMS47Y7MNXoWna`Oe>x0~;&jqlYIYiPJjPAV>`h zkcuRh4g&RS(_IKm1_}(6t^80S{7|bUrC$gPrPx@5XgMaoY{`XyutDTnOxp31aXKnd7t#Vhud&K~u2yohhc{jp&hF1Z-FQExo1~UIUY? zQM+=zK)af$QgGV+>0i-^c$Ez>JjUcns>m4(o>Wu|i|tiMFv?8vs~drW$Fvfas3KT> zLGFGzvLY&LsiFub3i7CVp9k(jB-R#52$uxA?+2`ZNpKgVFtpqS9l|i#36IV3<^hufPciOOy|WJR9ltVN-}yIDSP;O7TwTPZ@g1!RjszHUCDC;(PF-_N zkn|gOO&~F;bEON}mtu`#F{}ybH=Z@Yo6x0sp7}rDmtJ#V+Nt};hz4K%kN2e~rx$uN zqA_PW)?E*jx$A*oCm0AYIICt)O$5G?h*LU0FVoW2D2ia?=jEotPLFtulQJwX48&na->Tq| zz17R76!L?X-})v`We}hq&dT^a{8l7^)mSDp$65qE{GTix>j);{#*$B)u7@9>a`f&) zZN@69P@3M6mw(OqSgb*`tX>XwWIk3e>9KhDA~C66z7K1KeG;@4Rs=wqJmG$uLla&S z#*j^5140er{$W`VERqF5I~D{MAH(!r#PtB*-_~z2a;iEIPok&H$8vxlXPnM?CHJ*_ z+r<^c<-i{ZDh&!6h6woaN$y&poNIxxSPPV|VV$EzSPKLldJp6RW0vVUfYwP_2b2d{ zaf!@h^?Qu5x;*;2caHH4r=9VtjgSTR&1G_$!Tn}MWuQ**Jc5mLrjYOeMP@6K84NOo z{yzW@Ll}{SIBx7)2OOQNZv5Vf=?D3;eZ^m86t<*dK6ide8gB;MD%K|G&Q+|ht^-(; z)OEnwA|m;V#RcNYlc)izk&e(T_wbS%B|_1CC^1$mn$};=5{XF;wCKx~w;Zr&a5ULST6HozPML&pmL@jos1C6)_WNez4AiGa(u)1ZGQjpMAFpO{2P_287uUqOfE+*i$U!=>30eJWE|G zbVuGD?dg)qDQ~)#$LyVzN0@ztr@XyPdHO)0lk)od%d6M&n7y;|2(#yU^tiVvPaY9) zV_iq>YmrRJ_C=>^dCczX^ZU}puz?uyVC4$2j&PcDLuWlk50JbdaF<(0PXBC%w~9-G zWV4#1j`>aVSe@L0_L9v)ip7IO7Tl|@TFEAbvF-v2EZ3VTJWIrh{t;YBTq>v;eHBHp zqWmj~oAyG9i7KL)oStMp-UYSBjPSC52+3%LJTt=cAVkMYR>ns#S4k3A60AXxTux!> zmXr>y`1R+GmAe6jF_+4Oaw>#jYPF>FD~Y9hLM$FvQ-85|nE9}h5I*s(BmxYRTuJ;a z*&C^TK2iWmDQu(N@?z>rVjj_^d9hg)a4A%>fQ@yxv^4dX#|bs?lav9eEa)Q_Lm;%# zm(`BdwXqRiA32ug#jDCu0(@kt`bdhSWf6sCCQ!8N;P}-Q!D-hg7&;>HLDiAOpbBCz zu2EIK!hX!7m_gN&&h^4;-2wEk7f#=qd6(Iz^%d{Xs(8F(4P@vu0@lXo9q%H^vCyE3 zpnE*R(q-%!RGm8Tlxt?{k81^S3u$miQ>cr$0>>;%-Gch%LLuoE?m|IgQm0V6BRi&0 z)Hoj=&@DU*g_ex}^Zu3l_fNY{s6_PtzD_tUz0e+r?qr?dpARpf9jrkWaJzb?Ix3Hj zq8%gcQ?c=49{o|xSlz~;8=r+_cx4TIn77=6&e>a%OQ*v5YBKN_WVgR?)fOLAkMyZb zhx4X%BrdR!Ue9!g*xKNO_|px0XXU8t)*O?%2)G-$h13Ss_!(}P_lxqP;-N~loB&A=zAA~6<{{!zf0K*MWAfw6eLdMz`tR!I-U#9v}ky|x?c z6KkZ7Iq?T7nTLd_XhOkf9EFtRy_~62a7WQ^MLV)F)IK^)K)zYI4MQc5r~10 z{=xHr;2Pj`^{CVGgl{hMV>h7bU11@(t?gU56N)E znQ*k}P$nHw$4Ad5#SlsWW;QT04Ti?|G3_$SNer6JbjMjYdY!d?M@nC2FF!KXS?@z` zI3-Jl^G)|T?6sOE?q&^|8Fbh|NH!!pJC$VfB(r`&62qGK>gkZ5ermr0tb(b14t1#S z8*pkbp6Q#~OH7hI+^PKolD&ay4qGy{Cot{Qep2t`)Lyh5nA$%qwKkiHXJv?1PM;Z$ zm-n^b_?BPos;l@Bnu=F7!Att!>kD`bLR(}$Ev!HGj&}pN>dkbfeT9&V-SySXj4k6h zW#Esy>(O}~)GmUmtx*)gidxJE9TklfiJi0Gu(9~&zoH0M)MhPoBb)&6Qattp9{yR4 zRy0@1SEa&Pt-waCrU+_X=y+-j5)IIl5{^|68H!#ZN%Ha?<|-^RJg#}E2wCG7($7Vf zhC^33K}jvFqSX|}d*RICDU*oxrJ*bFDYsv+nlgoBvc)XntTw>SSI(Vgzv<$_v6)1m zgjOxVD>@XtSCPHNdJTm5I?RSb1}p;|3UO=)Dw_+@ zKA9Pc$_6Ste>HbOGguhvuIqoRlJ%Xl(z6Lf< z%%)F}3Y*7?-5g8RU{P6U_SK?Rbfi%921G`zH(?)<1TjfSD0(K+lIF-}IWcuYYrYf* zGyIqYt2y@MvejzE!j^DfBiGC7W>SubsS+zq=*%uiUBS% z>6_JnD^*C2099;;Zzgc(4mU$ws$iCIRt*XWQpG+l4pcz|Oi`mH_?z?XZk(&x^dL$@ zv5uo8m}I_z85=Y`Jx6HEYXFVJFqB)BtUaojGD=^hklD%(H_XPXOZqKocw50l5SU^nO&ZUMo zJ=8Ei6sMQO@Ky)=nl#dod%pE&@x`z|A`rqRI0TQ5C zsTv4D73(2{*9fr&&6e)Ix{LFvT9~vL<%oJqRi{BJC>QCCEGjJ(e6R;g7cr!Qmk#w% zf#Co%scTjaaC~RVcFcL(bc*ef&K<@G;D*_hdmiP+=8{?Lf@4=Aocav-aKSgq-BGjG zK4lErRWLH}P;8cneUNh)PX1Q>eWmVN;KiQ#EHd?n0-pT~>)j~xC{s);s^v5F6u~s) za;}A9>JE|GSF`rxQ3k{o2S>*fRctPoaz^Z$px8rHY(6m%dy0y!q1av`cJ5M!_tzrw zf<@RlQghklVT!PWMYIa0fv;Y(h_4S8B@_fjMO9P=VMbJ2P}Ef_D#wVrPeeVKLs9)6 z+z50iTG9~hK4e9Aw#D!G>oAFhE8MX?+g_(bogOhwyd^$ z;B4=2E|wrAY`t%X7`Yv>yAhkg(Dn?i=OL4a?|9gt{=_-I{V6I+fgj3wrTsQ*-;4vh zMk6m?;b9c=6 zeGVgr@ledeX*>*~#4}J!$*=xzZLn9f-e~7aoEzbE8TRJ~nGkLh+HGjQMDyaqPf*I4DvYy5dl15QAr7oiKPCXs zM**U=Z={dSCAruiGmP}^%=C^XR4{?|N4?37_DcrzjHZ_{u1#%I!tgzax1VFA7OkJhl1m9JsJ?&HA}tBib>H$Y@0 z?B9Aw9VD)|rc2#>Xjt006d1a2tDINbEt;n{<;>Br)OU@xC!6tKvw;sG(f)*?pHU5a zCJ(E4c$|kXd6>q-5+3ftA?XFBDpgv3j)p}`leB~kmM~t!;v6?#fb$DBo1kN ziQLupnVKf~25DFW(_E>_KiS;_zn%P_bU2xv)izN2BONfUE^3i0;t%)W@B z9T-Y5v<*YQGc+3q(#VYXH)w;|#2%FGtw^LL>5+VP`E?61UZ`=;l?$loRs<^lCq; z5z?N|88KBO!bH3ynqO&8&^VD+j<9{bMwAfr1an=?2s7Q^MpUru;gv=#E(H<`$iQYHFz3{cBw{44en>eFpUt;w=}yb{{+OdQO9VyG_F@E{LUdH9frNj$vC!vr4Y@o+N_vv?@u;b|VI zANye*&g0=;9?s+;j)U|c6wUMbJR6VhuC3@T?brBRhWlw)OmhUn)WtV@Nj}lf#~POO zcRlg*H7xReX1Qi6+^*pg%6?EWVTLDr@#SnHrqZ$!=fuo!^MoBYw|JtyvSNx z?WW;!#{a5Xl$Nd5a4zHjqcWxS_aaP-+^KPbt<W^cdPks|B|qudpYgAwWkpRgC3>q)p~gQ^%Go14$dysG)7VlzzsQcV|` z_an|8rV*m8h=xUrCwSufcrqQVVezp&H7wR0tc8iD3pFg5V>B#Ny>_?}t@S0&sN@J% z^1m7`VfZhKmH6L{HD+nLG~!85_(6}1$r_eMy`dHpd9P@=jIyut4GivV2{yom@;}IEbZS-+AnqduZAW4O%2zR z6ZTcz%2arfmNT94SsI^Dd9P^vOvZn&;l7N2Si`d!Z}cnjEsYmFov&pT5II@gLE*@t={D`QxtAUXf$`N8;%})W;%Cmway^Oh^7* z>nd^2OWaEPNt5=l2S3H6#c@_{tL*OKwko6Yt2IkLtMiD87MH3;n8ls0=_8mvUel$D zH%R(Q`>&q#i#_SXlle7HWNxEaQPuigRQ?Y*C;RnR8kY3M3}Y^~6Y|?{%0TEWy5SUnO~$*pJCI#&08g3a;&9 zBe+^Kioh`%7J+3N7J-*C3FwY_C$1syAiOYGHhjr)IcUS==j z*{o_ml(;4K_C`<_bN-cTL_zD!d6hXYH|I~y`9pJl8)y4*)WZIfhiN=4;o&YG-sPd1 zhXpuL?gPwc>ToCXUe46VcqrlF0UYR^M`>NfwErgVWTxKALth@Q#{qJRsqJox9l`K% zl#g{him>Y<8Md2JrEJhe1e=Y|5tzr>1omq@Wkz= zVG$NV?R3IKd$K7TLegkyN-v7vmJKN6;WuQok3|Sdk*2(@SZT@@#-k}4aAs{*D^6;$ zT*K0oB^s8}W13&QWgg<~n-Rgx*U5RMeYpZe$OR^CxF_yZ4U4ebP&;kP0I}^#`*2My zpr%S#EUGs`i|@<0A`>@&MPJCq?Zv||9&&I%Gc$WW|DfV~YPvM+U8GZ17UER# zZ;Mh^+AnHOQOZg+g5-F>Bk_HYaVj-lT>53|?>Zy;ayhTGFIIq5akxo4-4k~*!uAOo zA&S1%BmG~Ln8!p+fO2tWrHcu`b+%@bsveIxrX6k4zS}_*AOiC!{N^p*jR_hfEB9Mfi)v{Hkub@LvR>Hv4+w&hTWU|JWHOU-TO79E@+>G zGk(ypeYu?9XkR#0ulKx9&zo>2ZpZ_AebO{Nhac2)3$B?y`+mKC(!}plr`Hi9=T4LV zE))OkLz;dB&dh%~egmH8o6Wi1oclbixN+v(S-&65^+g7Mwu$d)KB?TSxq0%UDCbl9E5 z`k|$v{Eyd$MtmHKu2%f|aC~Bm!N1Ime*D_d*R}-g+rxNN`h79_ zxQHnNnKN9msBPuK60@J<`~~w$kZOO4M$7Z|flJ%}##+w)+5DL&W&9Ng**#{e$5ebU zyu3LhW7Tu^_-#SKx8mT{I32k|{Y`y7z5aUN_5VeGw0u&3S=;=1{XGnw{~z>sau)P= z0uHJAJBuDz6n~a4vlR}FSicPkkNd6#)rAudnVF4RKbwan^~E082835fPkVP;>>#?e zc(CP zb@hVMi{!6x0o{`5zcAa2_KBRRpm!4eTdrTx$FHwO5?%3*7$roLuO~Wt2`dRo07SrG{M2YpQQ&JfXG=Bu*UzI2 zLRpZM)GRgGXt!qw;RfG@%9X0&r`eD15sv?}hp%owQI=JN9}10MKl4>e#t#AH!td;N z*hykqQS8u0lrQ6RCS!I{KXK{N*BC!TJO{rfY)#Ww6UHrM2SBAfyVC5PHh+ctnLKpm zVaxA0Y~tZ(9@g-%iiZ_A%&SL!`!^-MqR>FQy%}FbDGgnd)$)8-XkuUncL$i_Np;Dh zBeCd4WCiQB-@y9gzAsxgK9jRetpDV^(!S1IUygGUbAd@4jx&GDJKi5JU?ty3+2}XT z=%diMNNg08=$d`>A(0sL-E%WH7iQTPurU!4x?1>~73c>Qa~0eN=bB;Rx!@@(5HUCgyq*iy|v|k@#LQ=zZEu4DSvO4|94OM zXt?3et7=pQ7`{h>Irl`himaj3`q;eWIp7q7qS$OjVRH@AQ9AcF& zm(nIlUWJb&sS`ocW+qtC>4OhKGK#zIDQF>nOJ9Dyi$8Yavp^_%9Z~VDq18maHX9D6 z!2W%M$o?WvA@G93!tqKO3oQT7DPa&ru`fyzHmn}?>sM&eYjpacz)Mu1BpHEZZvR{k z-`rDyk_3TYZKA;bKbHwb|H`D;S@}rr8}7FW+o1V!y=UY0&H^|Ex>*vAmu1?+P#J*X*Zw!= z+b{q?5Pr2`Y;IaXiv@5>Zc@+=5Y$+VI52a3io3on+%GX| z|M2`3UF?6Nz93g|-by>qLoF~=F)BYoj1};_;Uaoqby4h+5k5m6jRY97cyPbpMqkq2 zg!1tH#`tcNqx=#bbY1y6e&>FVpvuqXkP|~PT&YJSeil?KU(k0x=&yhqbGeQB!Huw6 zWg;I%jEV#uWN(plQm_E7gTk>r!pH5lV3F>qx z^iUF%c?$se&88B_DMGcjMYYT;Ku$N_kC1rQ19wO_me22}>Bj4%a`+J){B%>e-$r__ z-y?U>lJc;G30F2}+7E2_YD-bSt13d#yO9-d+>lBT;tYMHXKbTCUP!t>hlgP}FlI0> z!aVfn;Y1wxh~n?O=);THt<|;-pSG9tu%DzS?A@8(oriZ2%^NEVah_LzV(snN_GnWP zN5ob9MLuU4kQ@{Ft{5sAi4E+_L2wB4Z!e^4L(Uk=W1%l1!k)ocUOmREdch>@sma(o z8GAbqQIm8l&K=Hw1N?n9tdE&5yo}jt`Ow#em7ircwbl<)+`SyeXcO4NlwrHvjlQWIoN;Eda+53sL*ed+Xba+O;i%W zKc9wUMi7iJooNZNg}`DjRxk#`z&gQQ%7QZQ6`9wUnQ>tAkD^IG6?{Zx$FuILk}l3~2BUlPm(~0>6)VO1=RxJY(~bm( zXj#3JBf(@8hGh@=EEN3|g>*a^xD*L?t=Q7KD#5{^07Hp=91nlzp$`v-@%m%v>1dHZvO?WLZGI|xlM_H`cS@=%UA z5%wIebWoW^MVdk7?5!D8wkN}|^eZT_l!uFO;Elht&G`(RvA*eWe%}vX;m+@O@D2{i z`Tgd>>Bk3sfx7b{!kHLg%>;(dx~oq7p`-czSfb)tqs9p1&!!Kl3RNp@K&chzgz$=&mRs!4$tq83lGnypWnYC)Y$R-{vJt}`TgoOVuK2? zmY)qSBdU}6eSwkE>HPkh?;-zB&+ji0(P`%QysMUGe*gSUo$~rf&{n6st^norx!-ld z>-i!nh1Ye4OkUq`-&XSa1CZ3|{JvVbHaWjP1l0=8?|Tsv&w8ywx^YMI`wS7($^3p# z0h}iD`}nn)_G_!)a5`gKX3xOI)=u`X17+G*@i3AD130{d7w7UYoQE@TAe%>cF@P83 z%U|Ks_Ie)rNP5EV$@CsPEJHLW=}T}{Uf!3AFueT5*6^}S_LtDlA?HrYW1-E6u;(+D zSF?HbqF@sC)5+LJ82ca(Q%zD0&fst9D0+X&;nav4<7!MXlqS1{seZDERvd%ZO-y z6hB$U!SZmhxV;iqwhv_JdmD#bGCRzd!8xQ`Q6(e&R!@X-* zr_e&W*ynA;VrdL{%NolOmL>A}?L*Npt~UykH@jKr4u4>>2Ur3!h{Xn5-|#1-3oGWz z3Op3O2dR-*SKQrBG1`r|zbZyk?C1*(E@-rq1((}?8f_!bA6RqVwZwHtp}v87$^ zA>RgqxBG!4>`@Gzv=WEQ5hChh#++|1hT$yohK1v2!KLIgB!NCOni`I=*Wh;zJ@?Lr zNqh#DtQXd1XNOE5}A$w9zO--GOg!3r56sces>RYi6!ypcZ@UQ}jM~=jz5?^X4twAv6q&vUUI$n5 z^}w^!_rMc?b9lb zxL+NywwOnL{Gc$mfJr>;y3n9 z&Rluln)k1f`X9L4;P_+pPy=sa!=v#0uMJme?q09uBWQW(hiCN+>O(&X>MzepFxNc) zv(Ey*1RoE@yGf-6;C?n9Ll&6HZc~sbE+Q;ZS%TW3E*bOkajrWW4`$TsHC2du!rQpoM{otV;lp(I)W3a>=&J1KCiqVE>3X z9yS>`Rlypg%~#-0NQA;SO(aF0h8DkvhhMF%7J693>WZ79c*+KHMs00G+u?6Ga7x%G z=qnKv8_9OYvks&@>wSI$hJq^Q2~9;T%8H8uL(z|r10TXEvi`$N{;$={q_o%+QCDmh z3)XMQ@OQBJ5kTuj8X@_8?${Jn`?p>2Q5o}FJ$DzdCj7Rtet|L^A5wu|?~xCN4X|oi zpX5haLeYLG&eX9e7Nh7;bYE64K5r_t#%Fm&u}Z0?wJFq78W4f(Lg)Fwc&ymWXM!Rz z$rOr~@^+hbO9iCVvp-my2U~Yip6?@P|0ODZOZDK`6GRQRCK|90^%tPv6V)7}hM6%+ zK4#>6HEbH?G(v(*9|m(Qiu79(n!uF}sO|k4bi7dXJ^%{woGv?l8^$RNb(NbjkJyE) zDYU0izuWZ!wG;eg+v~ED5?gsI;T`uw(Vba|SCf^9#ELjjE(%RJ8BwUjsalC1QV9%_ z@l#O=U@AAE5?`>Q=nSuFC3NBvi;6RP`^W*TY&>h_wRG8AT082jKT}-n+|~}~TNhDI za2NsBqT!_ z`So)oE`_?|$HOvyG{W&XKRUL(6eX0_ylSrZX#Z5MgYyN^eex5M)+T(2zVh`hec!@f zg*QXd$EAACd|_n-eX-CuIlgd&)JSYs%olD(@Af~hx)5omyK_o#CgywEe@(v+MUQcF zV;14heQh9juF2J&$2M&JI4K)0WNF>eJav<-71PX9L(xx|=3-rNiN){rXVf+Kiw4tZc@wEjN$(3qSE6JrhzqQ?!7}4@YVwb~IFe8<=&Y zpYXPm3dBix&^fd(jAl(C^=%pMv4F%trILh`NNKGW8ze4u9uQ7qp!kwy!l=cSXbLRW7fZYOUdkZ!zV7{O&W0UOXMS=_??cI=S zXMB#C#;>2@u%b=pkYC_DuMJ(v?)o*x9JBCi+5qqZBw?FE6D7UOFeWk2;cS2V6%OCv zfK(EPeV-z!EjupA_w6^3j;h$V;+k71qBWi^6lTi?yDujap8XR?@;7K=bEWzx8&Nm2 zD6CwrGv~uh{DC;z=OfY{!NU+9PRD`84&p_jx#-7>u59+-Ow6H-InZ3}#fxt~#UTeF z>NA@$UChO=U*e*f$v+|#n1#fBla6@*CK!=AS@l-}@hx$LeM30cmw0R>-}2h7Q6@J} zK{lZDGD4emmrc^JI^w!09q5_QMWyih=o7>7ti7-7jLSzKg<65fxryYa z z^ucDG4$o6x?uz(T&g4EG=e$Jrrr;x!u=ztEHcsvn@`*|ADos>!I|?T8YZH~Sqf|mN zotKb|v=ZVkOe;=?TZ!9;aoEZ-xZjGG|v6e!qssw{ld7-h>yXpyzH$coP&RmF9elobmpr!S#~!O8XLnKNn|vcN2L!E;vzPeOcK5W9?huqbjcd z6Ce6l`Y5#0hSon;IoH=vm%$YMYXXc*3 z#BbOMaJYAvAjl%rGC| z^i-)}@$#}Kl|;>VV(=fT9D>z}f@k457d1cUvZ3eiKN4CnO=Wy1bpiH;D_T=jD$2({ z<*H-Eiav&Rb?-?vQYS*xE6vNBqlxbfqjox2irH9sX5YC3P%*Q!(!8M~BstegM>zqR z^RKIdmWF0xI;JpsxudLoS|M%fmx&l167@G0iFYZiZVgjj)~SI00ie45qSgtY?ylqy zZg77!*l0Bacl z4AVMjos7z?JSW?9{^D)`mW$zD)e@T2-y-QZU;?&I+^ddT3kew^4QZrGkQi0XK{DMo zKoHuCEg#ZeaYW<|TMJP_r{iK%44>kaMqNnF?M#1*szt)M>Q|*ek#wRD%FR8Os&E}%V48Uj*62QnnJvye=;}ZlYU{@dVx5q_8hwv~{}aGC z)%fA5_UOnsygK6mTL5W~0+=hG^m(sVi)1~W zamWuYcY1!imF6`aQS)5KNS(*hGCs1EB5IjOlAXzat*yM%+DT1qJp@yM42{q$; z6iBQF4h2vVftC}LAT4SR;9U6{IfkXe^`VRpikPPaq4}GLeQseWc@mOwyKtrHM9eYL z!z*y=!dO&CO$6I#fEKw?Wh?M$y=JU!ELy}Qd{VC>*=D5Znt}}FW%%Isit^+ZqoFy8 zJDd%cifOj_O{Fzj$gD`b@1jMQ^wF-;U+fE@#rnxgR4xbFf3VIbP!9+_U)bP}iUP}4*M4i$@Z-4FCs4-L4ImXHX zr~5H0l5T%5T11_kZ^U3KAs#YKyvj0`vQXy+X|(s8YxvZ0<5gHtoU7B%z|*-6ofEUi z^gnL=2+c)XDus? zuU7|~tFO?4ZedR<_Pa(Q3J-umHRUnJe8EVyU~v;_umq>!M1r;0XMXPFZh}GNVAW&v zNOgnu%z@z=!E}9jr9}{_r9vjwR6Ls4jv@6TOi55&HL2055IoYS&2(ytMkDnqkYR#c zh+O&CT>vGoO6j{^Eex&#KG;7&tXn;umGS+^fif4Ya*$MBXkQl}x7 zimEYu0>nu5AszM@j1r>;$9XuX5lxB?5q7Blh{fkaM<^$>o`%+y0F$8=1?3!DUnic> z*c#qAY?gF?Z0&dnk_+t>=Q=2wyo|g->yi(;A)|J3Fco@5i`-n@4cn=dP;m8D5c!-g zd2^&AP=cDdO)AwGGTV7Kf>@(V09Fc7=*S@o)XA0BRX9B`YH%}P))U5W@k;Z`!7#DD zhdj_PJPJFzBcdj!S|&6-G{ZZL)H6Vb7Ar-Te6*Ex!-cBco&;i1A7;_SaZQtUR!AT` zJL_(MibS9`GC}rc0!Of1d(2Qaelr`tBUj@OB$PD%P?peB5<6d&M%%6^Sd=N=%{P~8DAqZ zH+66vwScA)E#cKbY<{bO`ce9Ps0P1hf#%3!%5@`kNIwIBy{A7|8AHf}iEhXdI^Ka21r znl|Ln?dE*PLc8#T2)2Urgi55WIq#`>Vb*NKGxN(TPK0F|VR8Qdv`IYgzcM`y&-z^J ze^Ilm;+%+`ssZ;o2j%|090|Y-gAIajB1*dyTsOHe&uq99FBTNY(ke-`C*qO9+1a9> zKp-&nV5~j-{byhfV;S14;ZeTXAUco#a>MRQ zA&-;0YlRHUY9ZDQ87=`lR}&c?90->N!bb(dLj&Ps1L3kj__#p0JP;lg2uA|p7{lVf zkQ`hU2#-S;qzP@+f$+pYxF!&u5(viw;W~t22z{AY%J%7N%xZ`9#_W03P&9NJ&0BIj zBh1ZcjfU3#c}_W1Cb~;8o)D8I2Ot?~zimU>i>-*WoAM}T4ka01!T?sa&IS*Oa+w~q zrUZu6^C(ZemXtHIJ^YoH{;Xmg)niTGl}@nPIhoX>M)*u8p;}qnFIrGr`-u|N z2%uaek^ZW_V^v|wq!Q`n)B|>+9hIpxKZWF%BSc*#i^*yw8(7_sqJdgUe@L0F!M7=N zyt`}|vxI1uUo%y(al2qq!daz))a{nGI!lI3r&mxmG!Lq+*st;iEkS zsKl^r;)7Q(Z4%u^MT?AQ(8CFW8nv(EJESpe%P^l>8P^aVDdrPQG*az{kjy5jaN--7 z5>vpgnAsjP;ee5AEo5)ji(~pXSf|-jU^v?n0$hV~&|%7a5VV;~^XITcdv=5@wuD}~ z+JG1`G}DSagupZ;5(3|gE%8N(*b=AH0!YA=EYI{oqNsUy8%9VYweeumshEp%(O4aqnT1#0-O{EBikO^3 zePuZ*Uq~62RY5z^A;krxdK0LZ-ELgDAf2Eaf9MEt$|rVrxQEiC*xOUGOoq`yqVx(+ zb5^h^@it)~gKS7P!)q9BeH!@#O;?G1QwM?;+ zbVMlr$!m89Z&O$n7-_H{G>7_h9ii>nb%eGOEDNGbM0xnj6x}fd7SR>>UPPBXff42$A8q$=?VyRDa1*Mu9oDWXXnvC z;Y%P9i~SK^YtXTOoV*myX=r$(xD_5N2itKzDFDxl6yX(?b$MOwGu1~ zl1oInB=>Lvi{uJ?FOu6;>`E?i<&@kt0O?I~AC!9ZB)L_K|5wR9Rmv6q_>%iM#0iqi z%`IrBp3Mg2^(B|kFdOjkKh5u)lDk35OHe#Sa_@RJOLEn$qMsJvfaDs|lFJ zNv2Syq;O>>+r$6;eE+k>4yJg zsZPL@AkVp665!4QTUYz>MeS$h1|`@-=0ftOho#YW5TmM6FuG2%7|HK})V1U7DOiUI zuYuUY9GCv{Nuo8el$5`R1~y+|Shc;AMq_!|7g&qM?0J8bqe$ytQCG_>M)EF@#z~1ZOri)nLum+_qO?zTWSz+x z9N@gM?J2)80@m43BQ+BVoSY~P!2)G+GJ(;eA&OBXCMP9$IDc6v8loKi1{#7xKZ458CorYz5#Sj0tmTn(XkXp>K&J_+3Vx@fjYZO%pJQi?+IO#)!Z4u<$i5|?t(8k6v219}Y*z07 zifh`@TDl~T?+lD@#no|VGsk%0S8DCFT`J640UqBz8hemQ2VankSPzu5HO+SuA7U?N z6MD7mT$Y!;J8d9$DpLmmM@4)&`!hQ9Gt4vM1!L9|2tb=Vnkk@D&!pYvI)?cY0?^Nx zH5K647EMl;GQG7JcOk|Yl$ZV^lInp{>A{|t-cP~@YQbNE^{ZG$h3aW2Do<9Y2jzLJ z%E_t}prBqFRQx&yDR61XV7sKp!$Q(}ySMd*rTC<+bkAPa8|DCp+Z8fTNuP|b->QB5 z-tdmw3A{gZ@zT0Z&;M#?@P3KmXh-zE1Q?+=4?Q=%Cp?`My;N<<)o8)=E8wp0qREYi zS25OLRYEC`2SnSN4YC+vsx=BV4Rhk?3edv>pq441oMoi2Y|hhsg)g{_)KC!PI<#V) zCC+9^?S)8q^0~9z;C5Ii7l>?`_5xg{Xo~S+Wgau1XyjbLd?{wW8D36tE}+IU9leaA z7ga*NG=W0g_PoB+<y6mARrIK zWu!VV#s>I6(Kk~6lGPCMp*fQejZDIllH!BRd(?C^OCA(R4ta?Eb4Psm1qXhGHeHGj z-K+i*kcS=eVWNz(icy*UA}1dfWBMH61G}n`x=-fRHlgj`*-E3IQKPZ@lg4IoPf2+MyowA|RudEYd_x8Sc{iV0%pV_ha@S;+TDw zpviuiC0Ukf7S9%XGUu9QMyg)8F{wUgPKt{Sc{L-lY{=g|ff{3l4@}!h8W7f2BZb17;%bU1rx&?6(if8tozpXmpC+h1rjA+v1$8 zX+4gt>&_%;b^k6rh4|rua3S7{lx>uv3(RfJaT6BL&9EmIXU@?vYxsX+kMq!JwFo(sMydD-K zB()imI`juiDi|4_zzsu#>-n80BUV*8_A4m~_9X0hOllTM-9*{SlGMfXL{gj4kUJ@< z1T|{s!xp7_WWJsxm5Dep&4oMbre-Cni#Z>qq#gnTHB(YekO)a_qdrDnuHoU4OH$7z zq(~|@jCwW)%K@er@Q5#$r2b6E>v@tR>yF+Mv>K57Nlx_f+>)AByC>=F-IM1yFUGQk z%~c_=g=-*!&)Enua;ZFGG*}uUly@k@amKOp_D-QZ8iG=WFR_y;acDExI|87D1KpKy@RlKf-}zI- z1=9vKLsv82RI_0<1rK*`+I<3|$N2!M&IMH4V*gUi zv;q%}-oHg5wEM_|y*In|4n*LRzsVYy^LvOT)|N)^zojCvVPLw?M!+27`up_I9@2_` zC1z@Z-0dJRHvvZ6zvAij-Q@RXX9W5$X4@inSAl$IA|veMpPq+Z^#ef++xb`sB>nPx zEB|$RWA32*X8|MSH>dl6L7dNe(@Y#fhFE89Id>B6PAnTp{honNb|qQ*SNcuz*yG<1 zk+&j<><^N+B`DmNw~@`*!r>4h!q%KAY&ZL(2aJt&>cK2A>*z_$zLsX;G^-~u`z6p5 zXZk*vhJO8bs0v=f;N&te-Q~&W58>N-j;BAQpB@?JTO7NH9^c}beA@&9IY|}P!1}Om zif8dnCFba!OCQ@&_dJV(GjOJ#`x;t6Ig~+hLS;uFugj{Li~|rKp#jpv-l4}gVw{hG z7i&%tW0dPqH4f0}HE%<0?f?)1u^S^j2c#=N+rTY$n#>1^xTAM?uhW znxJD5=UjtSV!oVEhw3cn@ntN3YBbV9NojTRG(;H_e}NYuol2zteOTdn56GP#A(cq? z#1m?!6nt@7bgA2b(fNiydu!ZJN=mEqsD}6tl%)W0x`_&(hbCusL)iJRpu_%y zMtirQS>vqH@wFQ5a-DvIPJc?LU#`<1(&^{w^jSLn6rDazr~gW)U!&8Hz`HYp^}bn{ zu+ACImm2+2%9ru_&D`v@i!PQ5jYK1+W&~d!sICroY4!M0mnfL{}?#!tN-~ZIGiT25gb>povjJi zv9mQHZk=$oY)yz$C(ev}F8wpb2-=2oZbhMkdec_b%Zb* z8o>!|_~YnCIc*a4>YtLHM@U>bA2sFv3Jo0@vCc>yh|!?pdGG~CmJc4qu_8QI{g_ie zzv-P_72nN;MM7fhRF822?Bqg!x_G74F9?f1#x>jsGm?uS6eDWiG9E6843Ca>G?0$PEI!+wYUNPjw%JCoJ z?4vUu9hE&15d6s2gg&(9)U`SI8U^UAw4@|ni3Rv!Tiin?aLc}wdB>+n@~|!N)8>&? zxC7$Vi6RM&+lVYeBaX9yKtggeP`ehQuL+~jl8q$KIZP2lFcR|_Dhq3m4d9;Z#2r4c zk}9prMiRtQKkx`z82EUr0TsbZO+ya9xp{I6N4w%=?UHM; zRX57q;BgoCS09<)Zi#?W0wHyfHOg5)s3PO(KS%yUywcwatIG3QQ!EWt)mkEbFV??C z58Ix88~}KJI#cP5(Zg1ye*#FJy)Yo*To%S5%AA!y&aFJp8Uv1X$JarICO$B58eiAH z9Wg1UIIHIN%7?>4Vt)meJ0SK_d~R0USd!kJ?$o3n3{p#aA@w%_QVHjidT^N3Jm=8f z$>hhc{N{QFM1xEzIyadMo_DsVqXCYV_d;fnqlELx%*oLk?(UVNA0fL>>K=+zDLyx; zk)y%U2O$qy?ByKj4kaIrU+t0F@8=o9|HfUtka%6CPu>O)3r-SM53+9_2J*IRvHhy|Uy9hGFniiS3pD}z#&X&aR0mXa&IwR@?2$}i zA7!L_0%)S_rCw8T@5yrTlm~snS48OXENac{2F{lJy!0VFG;}XYr6qY5Ie^kn#nVY4 z(uwouzB$y27j`T;3KhpH%=ve5$!NtMsTaVCdKBPXy`rXUOz69%~g#FpQgvA{L zFFKPMjcrCouSYa*iK=Ev2jEA}Cy@htP*6a_2Bze*`0o(obn)j4{%pigI$2KJEeT|D z%aMFf3Ws;*gXQW13Hcu1TrNbKsT#!MQO$={u&|u>NGzO(H+5RKSbP>0ls&gZ@;uN; zVEdrPEjS}(j>GAlgBsY}=D4B=4%q6GJ64KVXz68M@54_d66YhDP)^DHoELavJUp033G{&bsJn;_%{#|;*zsQFt-GC*2 zX333zx(`qMy8!>Lz2T4f@Wc;T;%AoJ`1kYSiGNq%hX{r(<}<-VA}hkGyGSINhxaZ|3dIA6~6I;QYy}3SgCOlblk>2q8&GFV@QwCp3OMU{0|m@{Tpjq33n{~1-zy8 z$Zn)=l|~fgmG+f`;Tf2QKRzzqlXM>Gh~SPXwWk5YZi-}zs!Sw+S7_o8^5(vtEm^`6 zt*L{Hu}Vg8oREr*c*~fv8Jh<@G71vyaf}Z-+Z&QeB}P=Hs0kY%`KDnr5(|6m)-0?f zVlPuGhvDl*PZMv}rPBM&S%lCBLn8LG&3HqNgkhL;kX)6R=TSWf*?A`^Yfc<7vzEDh z9F4?*dr*4F|BkSvLE6x|u>{7cqGXBWoBjz8dAZ1VLkZd#nAF{`{@m*!!J;K$Tg<8f zIVmA$2rS_FaiO8py)kylE34ICWJPyh+@|&dj!8x^%bnQ=Lj<@ymn=)PlvLXEYkhzX z&PlHcRFBo+rPBPa(rktQ8IGGfltRb(+do;$x7aquB9pV4uf0U$`r`ovbYq(MSkY6lUEaQ7@yk2-T*ck4x z8v!5Xtq9m^#NETA(GhVmP~b%JpXhaGT_+!8+s(r8n^=htvTd4tDmG47xVLNng2I$l z!lj6hSo{dq=rVq>;c`?OD?r4vy&agYTLhViqxnMEO2x@B#SRzk3o%aHU{Ef=%RpZx zyC*;2h9g*EqQ(gIlc@)`^V@Bw-i9$fM{$n<4*aErAz4brrSip*d~B$F65>ZKF+y$O zjCsR2p*p~u@PSwiC3v|En8&FQa#_EE|#x#Rt*~`V#=L&THGS`^b#5(o&fsFXS$HxK=B2a zlnyy{qAS1VZOrFlS&W@U{!P;zJcc`zXUrCNuP(=u=31(nMetGgbLr+Kh(kS(UQ%4y z>3EUV)9eQkJ7Pr5Rh+JmDti-G&Ab55Pi&H?&TBNZR3yK^UJpvZFjrBws@!<6WiZi(&jaG!l#QYWL?&6KbPf_-f*3};JTGm)*KqoBF95^_25G`ru>kT}x}ro>L6 z96|nMxb}wXM|EnAC$&!wj5LM3{@jLG6!A>(D#x?NK79s4{qP{f=i4^P-}YN)n@ z6CN)Hvqsld;vPYNyO4uRx@3&S7!Gh(>h0R*aAxl+w9Ake=7H)oxRVKQ=CcI$voqYCO;xXdKy1h3Ln_ znj^L)9oj$e3I`i8_E{%&SUs`gQglXtedSv-}Q{{re38S#>lx)bE2R{$2PX%%Iiyxj`dQ$S*@H<~eUc17nd(&2K$HSW+&Q zTUc2f>l?PNxos{JKzmsU;9WCM=@@M)x(2+F+A1DuGC0Pp?9+7jAUS3#M9SU?-t@WN zm~k=MForq*xzctEm30U%aA}uX(s^*3g9#u9XI!K~27&FKdO1;*PCKL$*Y%f#5>H2> zvN1+%yY7ngomkvZ`vrEv!+W^j!=inKJI=zYdMqai{`YSAsj;wmbtsxJ$_WZ|_}{0~ zyyEw6w;9PI6q2d$SGYk0?4nfMo`tTT9SeMC&vK?Rkds^>ns5yYER_xkp_G4?N z^gwD6a~5M(-BCX~ydf1MbcQKQTVsQs(z~3~jRAf|N zTF4gaau&{D?~*y1YwLn5Bi0znmB35;U41vgyZPPzSuxx=y~ip9Mvn5=zYhNIke$7_4V=HuygpjKyW6$4-tnx$33AE^t%#c(C<3%eodz_ z=n8bcZ`AjoGp)wQrRAOu<`brSB7%Qers5}R*5%(G5J(oW5@e#{L#SSLF%&pHaA z8S|{odY%PIlTL4`N?B4Vnt0x*y#kUF!-cJ?`+Eskp^JbGM(SGWW_n~k2ka4pG_zU8 z2$Y)8Sl1Va^vEBOrk4^@#2FoeyQ3r&%WsL<*B8a?i852uDhr8XjUV0FheDhxM;;IKR1R*x6q#yZJZFS*MRF zB&=#YJLok~8?~;-eUj^701+xWq{~$=xSM$oSS%2Kr}$>{5SS_2_RSZ+G*7M1CKmbr zLuJl2ziEWP~ck9xxl!NREUh6U7FSJsKsZs@Q^Huve=`WpS+FCxuu)R`Ytwk>7Or znGUt|ld80D>5f|cGZoL>NP-MUk31P8S70j3+x9TtQv>i$dH#jc4Wwg}kDtnhMns|- z*1gvMmQ`pSO(O!P6*fgLr^(PN;>fsR&FLEuqwhIfJNcKq=?2mnxyjRbu7$2A3*Phq zHdu0S95`UNNAtgc=8Jk}*ep1LdG-n>ei#dyVOLco+l|x{$RhXY@VM_kX@EuTvoI30 z!z2?M%(cAA+O-X5G$7||_z(d^JOfz^qQ0y!uGuq+L4bpSG3eY5Mx2L{;9{XD*@s?f ziZ8lAENSjcfytB5dzY1fUS}MP0#Dznah#L+nuvW$N5nk0!|!>RlHg`#za#pi_SI}F ztTj%InJ-D7HP2{vyOMoMV<3;$eEn7P&!gyZ`fr}w=;w%YO@ewT$LwPzcP-rci<(N| zetheC*v>b^qeD7fEa<`q@)BzHVr8G5{wHq=D;-*vNMrdcBKED-U`NE9DG#rLS@Vvo zDRNxVfJCH+;dBx@1r0iq>h_)FF;x;Lj%fxb{~z9R~;KcoexsZ>y!JNPc(v zK_rU>&OUKAG`2Fwe?0|=uVY~~_~E`)9g7;tpNkN~&8uJHh0kN~B*X+J9>Sjs`132t zi0_{e%c+PFIirWuJ2x=HW@jIyDB*hkbhvpt>otl{1DGQg0IWDL!R3m6Hh0JpXJ;RbKWLa z<+@qJmQ`D7>DG_f>#r#|@~7A&Ywg4B#@0kNIymnW%(?N1NN!T$QLY}`Vv1NZx=~6C zhSI-7e4Nin-VvIz?ebjXbm2*MY?gQU|M6W5)W0Qev-7&Zv^p=#bDi@q1w#YGc~)TH z|0!u*&g1(1A^p5tKhJ>JvGHpKuGN_)@td6+^z&!zF5u@8`gt(nt))`NUXr)VIZa?& zosl}vZ}jsQI{rvJS(n4)9s8d;eJ_dca(0#XHBMi?>pFHxJ8pJ1>*s&Tq(4J0-NN70 z39rf%{-gSN3t-s9*DF}*fq!G>%NT*0K7bceL-`?~Ezd3|rQdI6x~5{7Mm8jX>_U|; z95^!oI~rjpF93Kf6H(An@@#bmt7p39(2qz+x&zM)e>~pDh}Luc@f?VWYlY!3Fh$WS z`1sJmyir!(XpVN@lu`{UGtC~p>jdsS^XkRLJhRSy4SV>5qSR`Nco8Ge$w@5tB1ZT6 zFJ8Y)^@S@w`awXsnTsHoN7RLjDemLyV38l>F0PJsw{elp-N!|+1OVLdMbLHBLr^^R zRIIy~i&*v(4eRdaBG%o`MNsy00f8Vple$(hB|Ew*R>#Da4l=-D`7z(>F^CkHc*UYY%}PiFQ z`@+A}UeHOB#IW?fM=3rF+N|^`!A{!oK#NXUjV##5YDqBf?#@Y~j3g5;+IKIt3-RWq zpD<^^4F_`tW$NIFDp9_E8D2!WpMZA3Ip5>c5BBm-wx|#{3VGCeONgSslqiN&!4|(; zpmP8{DL~6V2QqGESM_Gw-=q_`h+jIG)mr|g zMtoLph+h}jse{K5F>~~W*j9+UHDY{TAraGIR=VC@Gc( z#KG%SHxB|#`V%%b?L?7keV304D_g%R)cWJpTUMhyf>Qj}7pP$C*OLf4Ie0!BFIR8Ma+Y+9h9846bXqZnFjI7GMUP4G)SY^*{1b;dAWtC(e$L5xgl=+SO z63Mil74CVm!Hgms>WgN$ovB%3+^_Ze*?DJa)LO2X+e9dL*`ABiFQQ;spM`4%Zh*&r z9`;b(52D&t*jZ@jM0!-2{=|p7mm!}Hfy=~2;0-3*6=m&ej|B5;POncvG_(qRtut|+ zyF;A&<;IzZ=5;6#9S+y9n$esgbd7_fuaY(>h zje2vN(vz%%fz6j?feOmoqQp}Rc9b(l|Oa!Q6m_wA} ztZMucMM8>&N%rgxaF$X`ls!pt5pq;oin~gRW2T&~4@+?#%rT;&%|1Zu72imh^Q zwwB^3z89EWih}id57~_I1^8g>JU~9VS>RnAMHd3~nH#kh)CA5X;Wf~J&)>kc;7ud- zSHyVBJorF!4 zfsO5&yc0}0-GZXq!!RD!ae`IdkctnRBsXExK2tc$pnkZOWmjRfHT)ryLy@$-s+iv> z8zsBMi4NA${G1mOh!Y6wFu|0CJ#SHec}ZTLrXXTjtOYh?5eE&x9vm?(VhLFGAx7Lm zNV~~lL6+iEtFRUlSVNu(cB^O!b9k9P}kK{S|9{Vg*7mPbIkW zqxB^C&Kw{Bl|GjDrP}JNwzga$hGgbraum}nCB(SL9-k~gnaROPsN^CE4PQ=**zd{*eYktL@WScnx342)Qb4?f(axHBvb6BIw4k3l3@&gNg=iu z=*tZiAg_{C-5}71=7~~B2p}%h91*d3JGbnZyZXlcf;ig!VUUONzr{wg`#td(*pVnN z%tvYRJvIzU==!56?I=+BkfYT_=tr4yVoNExe3!ZALh5ECbrV?v#zODL>`C#_!#-MG z2bKbfYj1$jL{3KQflg2L@G&WB?FpmIMl##kda2BKGxrx}Dup5F%#rbkJ+cni3$-Hf z2F05Jrf0U}0U!eDPu*l%I{_d+Xj=PgogwG6_E6Gg7yM@c8eetz(Zl}D$s@TIHT~JG z3|aQbMo{eztf=!`*4gXdKSqPNthHvBgI`8!U&^djg65dU(EPSP(!lR|xEzL3z?%hv zO7=^(ia8Qrf8L>VB#J~j9>eClD;>O)R-^-WEs)LDQdYrv`w}jlVJFyM@lO2_rwU@$ zqAW#-vIDL?{&f3#QgB7gP$zbLIxiW6c`RZ*a{2}K5WbFRYe?pPLT~>@mE6E ztOml-a8*(~*PT&pMX$zd(a^r!kOJJOia55DIhzOq-7mz9HtD!c69EE!F1*5VTD-BN zW>WuzpnQbDJ!}0VILri^+SU5)O6&g!-Yif<8{napt@R5Ki+U=H00O2*{7i9s6Gdoh zd#2;L9_YB2&5}6&&fSHMC-Iw2$1&n(n&uxB~Z|I9JipP znuo20Fjvts$KWcGsXAYa8fAq}cQnW5zx`|+W&_0?V;?Jxm^#F?#QQ*H=IMcmBb5T7 zitsu_Oel~ZRB1iVY$3%s9LbeOKF$I>rp<(7HAa;-dK zQcxEj$f8?Ewlf{lX(ZEq5wX}`(CIK{%t2q^zS$lZQJ035_VeN=t~^Q1k#+?<||#Rtwa=X1Y= zJ{DwpEdTwGmg)~gA~C16h2pnzZNvhTY+mCX_7&V9Y07FcYv4tWzSG@7e01D5NlPd)PH;};xYhNG$Oeq;0l%=y- zl=TQL)SeaHZ-^*|!lj~xqWddcIM5bfLJG6>&BYvPTx&F}DTauo+$Z)YKK^|M9I3K~ z^+Way79Fw1sA2s$Rr1eN$-hU*)<}-(_>ig?_gk25Jus2g9q~Xh9!6?1{j2Sz+;`$< zq-=^1&o5Jw@Q0w@l|h~48&ZiCSE6euw_NU(1>AD6egq>H?jLJD5m${-EH1k@pUc^y zt?XwU6!ZqYY`sqxa;QD(eG-#?*}ETDm2-o4Un?wMl*@)x`|_DX*JNVSQxDwe$BvUB zx91T;UCx_}XnHzr}q~*>5E^SKbpD+oKYqZbZ-Bx@*AKb7!*Ol;_0QYIBY!!ZK`K_lR49S z)kwZW@k$(jxI+C`yj8$Qq|m+=Gd;k_MelT|%dEl}nYYV^Sq>f~XNcp4+J}Dm0uqSU zkyew|nI)H>CK+bCi`okIswuQGtOgl2Et!>s28YYU2$5N1X6v0JIaNQk#2O8C*F{Ly z-8D91gOR)#4z@g9mi#j*@y%jmkr=Enuhi`qU{T)!;XZCl$}4N1dM$1wzse~Z_rdnX z5V`H0#)<*yjg+~1|9uRQ2j8F1c)yb}7=YhR-pgJxZl4coc(?dGJe~8$3jY!(hub%- z3y!8wZVnZ*4Mc=C6k}?xUl}a1?CF|>oWbz_hRR;ac(ZqS&$Kwu!$>-rX#WE~O%k%kqbLf6J znJ9u!GO6rk|Ie5fF+h@mVngnuSpxczX)BagO*hpd;824`mqEUW`e z1~A*;i(9GG0jO=%lFV4V3TNIb&br7|oc>h+0}TQ~Drp13`U( zQ#D40F@Ou@!6r#f6SDV>*f)aeaOb~EsFtZX7`pu|`gj(SYIPTlk-AFq_D7Agh#n+F zcai9QdCg?~599ZPXJzr5-(u%>uxE_!A*JVL_(sU4;P-8l-Mp{X_aw8sgUEr-{Z(pwikkmv9Vvuzb`e-_ z+YhKC7CsN1-Q8x%$nzfhSpfczj1>4RDi0?r6Y$096F?=k$I^2*Y)@YRjR7t@^ZBw8 zDvB?U@MRcenlE?pWe&6&o#srwd@m~QznEmOgFDwDP7hV-sW4iQ3A3JZCY>iq5>9vE zB%>s$!AoZ@69_$?3BL3bm~J@j&_<4hA5!`+zFd6^9T?IoY-8NmhkddxdL*sas$L?P(-NEg>OG!PW~2 zvIr-&k-3D@vy>3H!g4z2soUQQs@#J_nL@>EtPQq6@Ucj|q;JKLff(nrvmu-bRP8z4 zh(`Jxh^jw4mQu3LfogCrLD$P{&YZ@jFH5g+65=;#cx3GEM;72jyg1W$bC=gYy7eX! zaU3F|2*)5P)|=`8+u>JlsR(WD;c`xL#62u|hhP>_LHmY9rA8t4L`C|~_A}jP)^=k0 zJeUj4#i+4!1-s8!hN=8m7^fT2lYm-5 zm0JQP2r5AkmVj|+X6I&BuI_ZI3{L^QbryA#5|dv5$nzO4fxV!7hWp1{ci!(#^W1q` zo$VN&ou5KhV2G7~PUl@Q z^k;OA0lC^&cOp{2B0ka^-Xu>HiYF90De_`sI!|HBBc{_;cqqd&6rj*~y$TCGSEz6) zVSY>mzeNPQ${@b%#-9NSO(8+PtrYSj3~xr*`9wtst{*F0Ld;qfFZJ)Q;-$`8Stre- zO)4yTKTu(Txl830x^GkAY8H4)ApVakEcN+I0METDEO}2?Fhb9a0L=9a!(H|mq;v2% zTt6>TFap0P;+#rEIK@%?jAZB{w7xTtugCKBXZRtfj^N8lOxqa2kHeo2`LhQT-a?4u zVm@EKBR{+N^96r4;)fVMmS>mqj(-0?{d@&ar;R`V(lJf)>~a<`@*n)6{1C^(e7Rr0 z%)yg|-p6Zievaf#GgD(U1OTIk2oNvh3uQbP>1U6t631|$2-$1%6C3PxZMDBT=Z*m}RBEoFN zMj}2xfMgaT=+k~KPSQkL`_iYq0#|R*YnD&0z3@q|Up`Cplis>~FV{~x(PFvR^=rQp zLy1fB##2#bDQQLd9e6>4Nb25n=f4?KzSFMQVu;a(M%wwj5sV+JB_R$;+(Ey9$W-!l$KE>USh%2uv?W!9TGWJXM?`@;-^9_E;$M+EfxAgS%uRf$pbNcD8<%t!z zJlGo0-4tNFb?|{tT;Y+vQ(#x7+XZ%Idg>OH_u?)-SEkp?^To8I-+$Dt(mQk+Z)$iI zzcT%rDl`34z}5Ty_Z8>B`Mw?nR*a3B-&dSHCNllAHZ|tfDQe32unI2wa)9T5f7cKU z%9c9(Xb-=yEW`i46}|uuO2x$gzMNw+4l}veshRM-*o=vbSo0!G5sR5BmQ1wd~Ov_E51>|h15Wo>l=`gp{W*GYx-Sa>b8$%)KkUWj zeP~6Ah31(`zb7zme?UVKN;%!%0EwkHi1rjwR%XatKb+a@M5huC5nNNdm8kXP9In3q zJRB0Bo$k4k>@CO0UatEY!0m@96Sco`_@fR9#uTJ*|Ys7TAZ(xqz5O*ESYAyXzBktAd{&|5_PWKJW(Hr793URkaoaJ=? z+F->`INhfRl)0c-$8>mP)9GHjU2|L{d(;LVTvbzC@Q1RVf&{!#Xz|7YC)8br0Tomw zp(?F~0u`M6_7_UT>3*R`I588&n}>m#OsD&rOsLr!Y9i_buF^Z+cR}(x{UsWzhEU>k ze;qfw0(-Q$AnKtKuAOx`SCms33yWjfSL7sTg`r}mVmuEK0szasq35C`ds9@_aLbC64zfj}MlA-%F z*kZOUoQt`O!9NgIm099V8nzkfgk>K70k$|3_S!=fOtU~}dDti$o9?|~;;wuvx)bH$ zyb~x7y*b?v)kU-@?n5xLDicMaShEmT*|MV^-GS5n_7ae(xo6qrC4H4d5?l{`9LSi> z9d<60Wx6Y`mh4LvbLAtG+Fcp9Ga1QN)Low=HvdPz`zc^g4!`?L5Dmrhzxmx4<;M99 zTqt|bJ z7z^|K?iXsDL$yqJI%t>%YX?mf(KbB!2NBKcK!Zw%a>su64}&o~_PcL|+b9M3j^8Lj zX34Hwci8W~R1@tBa#l6y;_WWJ*|V!5eH3J(q!fzY$~ZkT8#yY$K5|zHGW_nD(Fdj= z$9oXuYGjY%>r~P>;)AON`Tv98{R(g0G-HneJf5t1Ohab$7*?gf$WW$!n3wQhdj z@4h1}5Z(g%6LPDxe5+JkA5WJ3`>5U5I*7lYP{v)FVhrS3Zc`kq(LW$@CNL^ z$=V!#_jk}CEOGoHA7f_;ca-2Q+wcCy+*GE8-8ZvC`pU`m)Klj`kRX#FUlD@u%t3A%sWswdO0R9@Y~`PD;517~ z@D7~r=MsgbveUcvTkbkr`>g(80Ge_5L94{+{@pUHYtZTb9i#>}^`<~EqHJ+Ap!0af zB=9mxo>YfLECoV$du7Z8x*-)8yzA=dOS1!C`{5r7i!X*SyL(wLz`a-d+LzOuvJ=%1 zHqY0d9g>3)WfZ=~^SLshT^#k00g|YX;gna*$|UGXNw>;?uf5x0!FwVn?^SPhZ|i}t zJvF%JYd>?iOms8dT&l%OSvk(o1(9BnIVQ9R08CG4#{)ow(4Rw!uYEs&$QB9IwZQIZ zHoG3pEIUNZ5HrJDB+Vj51vTr@=IU0*9(?Vq$W41>2dH*UuPk5t1JPYw)>>oYMruE$ z8l~OMG2xO2APvlRBG$;dOoz=bDnE0ivlDW0(EB161~#f)xuAQ$_OcgJ^_uA9pL5YH z>}7w7^0Ie&^s--0N&+2Ud)b%Ln>lfOV}}%eFS|yDmwhWWgLb{N7fgig?AXixF(o7v zT&PCo2~GKCO0u}GVDpF#67sUAeEMGY6iSX@0WW*Xx>y7mUiSM4-X`Xkp+@vTMt|Om zJw%RpVJ|i8W&gc6Ig6M5^g=@EJK-la?!d)P5tf3lolI_+2in{W6G^st)5g~hX&`|@ihMbitI(XbDdwjVx}Hnrz5lsN zr97k57v7QDKZ)P$Qu9@*HIS*E-0EEg?7P*Miv$K}e1!wK`Z9fH(=3yuobJLYBvsmv z#i7t*<_35TW3i__NBds}IIP4-lFi%a!`8)VOtx>X!gmg`(#~NK*66CJT|Xa~qxRx? zQb1Vr>GY1>@2Dj}Jb>!?NF7S#zo};~BN^>aLh6VKc6g&KKhO!^V$hv6W*_f%oxcAK zQzmK&1TktM%T; z4vs)lWW;TXL;JytB4*vGIAm1|htfFGFq=VFtEo@j`zp-8J$apYRxeQ$sLK(Qk`oWw z*obwLe~QU?5_FP#VKI=Km+@6J@+r#i#l=Rn8OcZ3WcK1_l|k<0<-vJgSbirhiBV;1 z@s)tJM(TC28Hd&X~2qnPzX8w|e6)J7fw!*%}|O8GEZVxvUxAKwd8zT>(uVHa z_igFEyZ2-F9ZhHP3UjRT7C)06+%?|9macqA!Mab3ww`>{k7dhw+A8Aus81@Ca;>+d z+{gN6`lvrnx~vE4Sk(~^l;MGoDS&&pAI0@ipGDS2)h-UIgsHdQ#2{wvqdpO%n(|TS ze$ptjBjl!DuKm>S{}e`>>}V18R#=*YA^j^u)vw%#qhVHcLnL4+{X z@!TB5b+Lonv!$slnP_DW&F=#OV-F$3Z~XvxEYhYbkJ!EWt*@fnF(e!98SqM4nxjf z$B3Vuo{NXtQO0w8K9Qnli6tM>gr61(qdB|EY^DD&N%v1n?6?r(l}HAM+l0gLPv0ND zT;iX;meSIzfBN`0h0C|&7dLC%{s1ySpClXbbS4gARg}KMXX>uNbuFI$8nz|a+aWt;2<{z5BE8##IJSC-^7R7b#_Q z{r^d2XF;U0ZHa2iMh+dg1~|nSTtNN?`JVh6yr#I~cJNW8Ee@V(pY_dI{_JX2j3!AF^${~Y_3_xHpKVW{h=#!Fz7CK#Z0&P>Km{jQ z_V6h~38c zg+92>!>Fws-}esEW2%~7Du*+Kz9c^7sK=JChd{kOgd|@+uA(3Q5A+A0IXKI|I@ABY z)OVrB!nCT)#)AF9NymP^8zKH780NPynvLN^Sr>t zNYcv*=oHF}v4mW9&kGzVd2^l@_)eHC=LKTyJ3ml9u0C|f{9WL;aTb4@pb;|p`w-%K zl8IYf_~P^cbzWenT-%lZ{fa~2e;+m9@%-91!XI`iZ4L}~kBE7a)2%(hBvZMyyWp35 z;8`*ths_CIoZby8**iCKGvr`2=qED0v{TQ*@up5K!3TU0eDHU7ib1HW|0~T+$OaJw zQ$oiu{s5I4wMIi(WIN)YiZtBDDUSH=G~yT^K8xh*gsp%tPJf6|f_{9*mcnm2rgh*E zd^t*c<^PB;ruNF;oiAU=%VsB!FMkFCG${mB{22yw(`IQ<9D5x<&AuSt@ef=xf| zIpT|jr87Sz5DgNg*4Wc;gQd=ZpTG_3B>sF41?7}6^d&>bFqB4!qr)M5S$!~mTKTgl zUMJN1zSHz+cm42bV5EN%@WVe!5|tl5ho5v_$Pa&R$PfQha3)hPi6YFrHN;(`x&dt= zi*jt%%!yg0G*oUse08=@tWe>xEVd{=d~z7WD0$_MVX3bX zKYVGWl}>hTdXiU}11$iiKyAVceZzbb(EGzA6bT6nxlE@dhGL=g7eiR)Vv?uE-og~B zC&jQBX;{!@Hr7V`@cEJ+r~U9R{Rw1q5~}9=;ok~L^@qnA3f8OWdCprU*>31Uc;+Eb zUzP@Nn$?pZ{_lkD9{ljjg>X4VAv~}H@~`~x1JaMlI4k81pWmWbePjde8}`G08)PZ5gMOsb?Xmk~l;BwACOuX-jH=RO~a=33e&~>=C7=m37 zWUbCjjL6GM^viU-I8RbS9>))C$8z9s&IhxoZDy-DuDm+6DqO|zO)4yNlxtMDgz<+V zpY{iH_EX^q<99{aIhlYcf7_20@-29}1%9H!g7960owrm(F>}A7!faY+g$hq$_-nK) z5Rj=(nr-~3N|Y+BSBZjgbpWuLfKB*WPG?JJ5UErbdDj6C+KrENR2>e86ZPr9>O}Da zk#j2eavXntg&&kIl>8x3*ug4Zh!_)yKShOw+HVh%yu!=PDqPK?K1A4gS4Bv5_Y7p} zufmdPNdUnn#b~K1rnv-o-*Ln(&h_#q>v@MV&I`2$~mNtjC*ntA|!&edsWFy>VKauQ#TmzVdP;dp}B z5)zw+ggRdp<7YE|Sim~OxK()>VfvuIzS{Fa=jWE2c&$4BqUJ-@usYyxF5a?y(hHW) z68)t2D&NcXlU}D0EAINE$DvZ4OG(5-Y(F?x->up)PUvBc{fc@eo4f6{{V}vkA zF!Rq@gA0-BjKdGBQN@?j^vg)ToPZx%BfsR!F#R%wFN5*JJO}b+KmB6xWf%R@hcDlw zF$wlH8@1?1_-Wb?KO6Y^G(-OpfG+0^4Y-;qt@>p-UnoD$LW${e{;pFV!;>|CfQfVY zGn+A5C)6@xhKdl`yq*!)^5;s%BIBiextKra@nJlWgjxiVe3TD>3ovU={S z-{;Ev%Je6APj^b>%Jhjku}i-n^a?YrOdl=JOvmPsA9MPxRSNc2z)q+i8!_GQjSu}0 zhSpf+9KTrh!c&bOh+dt@hgA@H5&N>{(o(>UvPws%HsUHtEEK`Y6F7VLtX=!Vkpwke2=+sFR=3jcoL9Vdkf&F?4$>=L2dOi3aIhbw zSG;2#yKRDM9-tPw+{otd1aGoE0 z(htr@P@g(@#t$y`gU|cHWlJkSr8_`$(`u+$G8NPuk>SU{MgBU>=ZwC zsvjHoW0QVtogaIv5kK0qkil>KlVvKcD^6`j32w$kA2>cZT4ed z^kdun*wub)yC3_8AG_X<{f{5p;m3aD$2xxOXMSv_AN!Rb+wI4G=f~#F@Y<~(f|ydU z2@Cz$-Tc@hKXz|Fw%Ct7(2p(gV+Z@OrGD&CKUn4m%l%-)4_5iXaelDc4^H%hHGXi4 zAB-cYRZvo5IWMlQW2~z2?LO=rA9k(}d%q9c=)=zQVW0G2=lihFXjlx694}Gm^Eyk+ zo|nhDp*_D78>L8S_7S|O5#YOdNN96$VBsFG?HU1sZ}?f(>nw= zJAK%0AND&RHZS4vs2_s5j~Dx~2l}xkey~gcL?0aI$CgV_YCF!4y}%FNjvzn70@CYs z&~?T!7v_P1M(W7B#MB@+W7d4K5PnkE?U`PUmYi^mzdniU48v|!FU^JlV&76)X&tUt zI=v;FzcIj%95O?~%MOAHRbbwfTfDLxuIaxMg>Kmw5}0Z$_K-YbEQ ziR4#se=4=-9F$`%BlV@)Cj|d8Kj!$94~1Q) zW-Q!9TH|aN1sq;=xe>rI`-yfyX{N=@CzuDj;Y>0&FTuJzx0-I-yMLZjPy{2}ZfYaa zOg{rDHlfN(5JF{~vI`rJOM0L5l}`$YXGGac$}!^0diY9~?}7pnZM#ur1F0AT-mB@gae2 zB89OLnWZd`n5Lbe{h}bpxPWM1oHc=0PxUm+XNH;U_l& zCNIIZJe&GGl#KnjVvytP1#v`+7c#dcQJD?Dk03}0ezTHcw7vUt9#d=D_hZFcR&g3Q{S`Cn}IB33bv4O~UpOU6h2dYWnnUHc~>EM@iT;fOAWm&B||i$yG-! zDQWtU447?@@Ib4VwW&t$w24m|UAM21yzv_HI#DjC_C*I^n-+DQ$4MQxg!RSTk?AdM zkO+2Vu$H?3=I8n@RuK!Z5E9poT&?QF2inkE`7M)*3U=LP@NH>@eNcavCmY`QRzL#qtzZ^sHi<0RXOu9r2l6IlZwZT}QZ+51)s+6>{Aw49 zwW^ltGNGLHlv+qAc082SZ|V!bZ&8J2%F#81NYt_Vrp4-4xV@r9J{y3u_P&M;sH}KD zDlg#X8X*|FUL$jz#a;Qv5QA^MX+DK$+kCswR?qzS!kY_%BMq6He@-B0w~K&V9elJV zm=7!g?-okg{Eq2Tsgl`H=hh4!2?)CFV#*b`EeRrW-TPY-#Fyu6D-cGiPJ~64L^BME zRCSmXNG-LJky>w|VDodW5U2xne}#`}v61>ala*BvvEWKRq&;$c4HnjoTc5p}89RAe z8|d4^=#A898G%n`8WpCWv3Ao$G{1EuR>ZWxz+@J zvW_5rh6v87xu}DM*=kjr^{9*8dX(h&@*+i_z9{;ohU`*kcAmq{W5^PzqEDZx^%(8Z+g`YAN zvO|DO5!EgFE7!5}S%4+BL!yZnOIf|tSzI=QpKvDEU-or-#KImsKK`U@>UF_q1Vpqj zE3m}xo`*7fQb^ zc;Ds`ohIE(!hR7P>Cl;j_>UiEaS9kWC;t@fHct2*Hn+ZOB-jE;!u`o_=84?1o3LqJ z*}&x-Sut)G_o#eMP&J7LK#517CRu&=VO5;;auj#DDvqecog5#`K&cZu!lt_rP3B)GN7EU@k}={uBshxtZ!k_a-Sx&IjQ-^*neyV4QUI zl$d9tNpg(A&O_u`&N28cNorV3Zb6KR6sSDowuQ;Cy%=xh93kfu!kn!j8ajpR0R(p` z1Vsvg8neTLVq73*HmmVf0O@sXvwMbUxj>;RR;Uzw7}ZGvv7nREZ_XA#8LH=MRGGu< zet^f2*rrrCSxx3=8RZX>2Z?qCMPNd?+69f`3Dmy<2`i~OFvK#KLMpQ!C7&E(Ycht| z@3+4XqB+Eh<_w~(CjpjHW^kzo#+ZFWc!>RzfJpo6k#+$~cGmTw>FH1aJt}_-SPr4E z$ff311Fgym_cDG?2JppWtrlsws&Pr&>D(Tck842yRN&l7vU4b$@6_@?$0Jxgb0O%s zI6Y=f#60-sLZ{|16ks>a??IowjLd+e%Iyi{@5FC6Jq--_;C%^|30?Z2Na^}6QxjL| z)+%h&xdazb$W1Ptdx@f&9THd%39;=|l@`!q?7o?H`%r;uVugACJGIz6DKxCb1_{^# zNrGA_`cAkg;}9<~b=x9Dz{mGf45a;un(C@e1Ut~SuLz8^J#Uv^{H@@n{8z{aCD^#| z0g?|gluJhvmBn&=)wy1N#}ws*44$Sl7lt)iSlM+58$9XGvc>ca$vs_) zj?1Z{`s70nowoTtazkbf-n_z8^NN~UCN(s(8m1-H4C=HCC0Wlmnj*r5J93%pc6bO+mBN!t!wpn;6huRoUfX=DR})AyFJp{YcjQuUYYAvuMeLpCYylKasieT2 zSG!+mGE$EN9-Xfc4Gx2o?XEOYc&2Fz)=0RA@DAina6g_wP$5wB$9^Vb-Mt zIdzbH|Jt-zJ4`~{IRb=)sxMNofrF2=R5Y*2gY|;?Z(DQgwpt%A*z1TEaCO^IXU>mV ziKaX_wQCb6WGkM7%+PHSXrrD$%81`?$_EB$p-6AKZBKi<+)K)12CX6m*kfofufUES6#ojR=B?ia8Hk^~)eY!@3yMn7)|nyxTOduvseC2f}} zG}6{UvvG?w24!6qKbqwGIQD{gK#Q4Qj2^a;6;rB1R!1ORN-N1CMYe*{YHh-3^L|-L zqweKrW@OzH>arXnlgoA+D(v2|IvZuqBON^}GJLWdvX|31yHS!{MJ6>3DzeL&jLENX z;I3R6>`X$KvlT?P2K$vlP_Gc=QeB5A1PUTsbq!Dm<|+iabk^rHNaKPMrLzQ3OtPMI z7ECnitOzv$RzC2^rL*P<)sG<{L}Nf_F;`BVH9tdVT|tq{)>$XAa8GBE z#4MdPSU^HLOK3IiI_j*7Z(N zfJkkBl|fj2`)i!jxj%_+s?sV0Jtm~1QY6B@xdvLLCy>jD-)=e+7@$pRdec!A3fr~7 zM)2Nr)B%cWt)s3Dai*V23+O0z!Au?XNu2d=Vuf8D6_G;2I_hNsTOdhLF7;nz>!|q( zleBM7I%U!=KWivPh;`j42tZ zhV3VlU9Hg6lnN|w=$0x4rhc=gu_@G=gKZ5J4PcWum`T!5gJL2rC|wGZrDNZCxe&QA zH>+d!!U`N7Br9_+b0W*YL|Q=+Scv!6^SmMFAi?)YGZg{PHgE~fLz*!+YtK*wm#|%_ z2*Q@ro42rnuJJ+=Blfw4A)#$jAky6CY#|eJ;1M;C$!0h1$ckABVFpswXRq}<|C9mz zHWt)V zB2SYdXA56(X&72X%?0u0Pp&{p*K?;I82+l*Y#@rWKeaHagc$dVD5fQdVpkp6k|2D( zQ}elWgeU9YEAAwpaJMy zfI_MC1a%_|vzs0Ul8U+p0{5n+&Qv(D0Y{w7)SIFzRTWW+s)3n8&Ax|94(KZBe3n!) z1lc*29BC3Z==tJYDLbsM{!hRbND>rH$423A9a)w+U(`w_Q&{g)1&NgQq|T-J~j~4ht6u{U9x+^n_p zyj`!$2#75x0n9eMPE&Z=mEDxf@EWcVD2Qys>i~sdy+V-7@G2091=Z}o=4=5J!^>Tv z%Ulb4`$o~XZDc7faPm}RcmB zEvz#`-+um?=vzG`hWg7G5`bGv(U81e0vNN_!j2P=@LCw)oR>FqkPp}AK!r|C5!I3y zK)v-BD8L#rP;C)J?D~%|Zjt=lOJamAa3+!Z97|%0|0JYqlWaXC-!sXU&I4PV^NLWE z-P9@lKU?c1ksn&G8KBTmJwc6UDR$H6Kmx5t;NG;}uNBS}3MVu5ru7D?8gMiNZe|L# zP1h8uU_k4!duJ-5cLb`5^>-~aN6HOryk!EmK$4(~HaWsnD%>oM=Z$T5D%7GDw2sS? z%NrG5kznAxOW|d&P+weCHd|QJQEh}ZSU@!OwK2ZFlfgcxzU#?o`{D*+93?|-&ckTC z?0ojeMn?-A@cAaXT(Gr4mY>AZi;RyG4%eT}Tu=aGpu3+j>%yym)3$1G8c?npnvgZq9yp<>1@=zjs zcw9SlWBq-+udZb^7eufy%^OU)l_y4Dw?0@11B-t@;Z-%m{m;!cnUgbL^i9*C*Jki4N{!bcR{q>R4=L7W5E>Q zA#4$9fWe?Auo#Q8o9+OXin=)h-P>qTtg#MKSedQ2;ozgmQW;jB*qJLN7>iYUU_78U zLJNqfphZ0)pCOPy$0Wl%WE z4U1zPfnfx5dXCNzFn+0CMpB&Pl^-YBjwOc-sIauDqU4~T%fB0P=KG# zM`VBY`pXPuaob*j}=jB@=XEMY;rB=1W~w34_mT;&v(9jKez7rmNS37B&a6e zZPLd$`KCgKZCL_ga9%GUFPl0fFLC_pH@y!eFck^ho8q}%;T)lG zGE;AQr&3i!PrjKc)ULl!$pN(^T`R|^vyVVF2^+N9SubUW)lQj!Es!MWgYoZr`XI{` z_4H79rD&V12Pi4kS|_WId>}9jL`+f2_wPt4?5GuLuOpXb>ME3Qx#<0K{oddE9K0u+ z^kTJy;oGDIM)J9_Y}7;^?qi}x!etuRNcQL7&^?XlM!uHH(ols^X@L!1z>9=amx)!G zrKf~TtqGz zzvtYUH}l>L)&BGOV+;wPyvh7q0?K$j}LJ^oyAl&C(&=1`r_-3|* z?Nop?%TCSCvQtAT7P)q6p7@>qfR!X-wIDi4_^^VIIq*1z%&?uhi^;y7A}2C-s)?-} zb2&+f9ok=DlJF%?65@WYXP#k_HGNTK!>gDJ1PWNGiXolW1B>u^Z_E)kEV7c7h|XiB zj$wgr^K7(DV4fV|ao#Dy4rxPwijbBh+e#5r#!3+$=Ot9WbEXIvWJcuNE`oL|hPbJR zL}j4q=>;owcp*92UM7jTR_ZMb^8i`{Q20kZK|RO8pxeGbkibe2cuQ8QNpYU8IGJfn zR_apXR%ZaXnJHqWsx>)er3#2JvosJ0<&gr}E^IJXs#MC3TB%(HtW}a=KjytD_CpN& zkDMZWOC0%jkx^r%ax`SVz_f~xqLj0>6sl?E^Yo3l0E_uNN-3NT*I90a!;B;e*r~o6 zs;Uw%XBEgBlK_t%)}@1*g2<>Rvt?Tb=Rp{=rBzum%u&zGA1iW9i>WVTYceC)%%a3U zI84EYG13Oez%)etqNg}Ez$BSM5x6INbEn{&*%7ig%tdg&iw`ErmS@?Ue~|UL_GY9E zOniGIT+CjSI8;HD`^`dT*xu~HWZ&LMOCt5B`bFhJUQDU7DD)o!XhI2Q_JyRgAnoC#8P)Z&Z~uvSTeEjZ=%pKNiq)q+Gyd$KrRT+8NZ6){CA zAOA~A5sRaKQO0ukCM6SNW(R(XLzHLc_Ii+DPRZeN)*X)cvbWq~_`oMcIP7yvvG>pr zHcXP7#hJlk!fb0`ZUl^3IE%S;{f)QeU9CAp1x$Kd1rQA{I z8|!L(g!=&@&g^xGqiwVwTOy~|8Jt;DAya|b$ozO~?TcSohc-&qJe5B;18VhOhywyF zx6Uv3(c`7+GM)@7GBLX%2WD`vxByRE3oMRd%dbp~moruOvOY*ZheA5cy{pD;#{RTJNU4tkIlw;e+DPW>mZ5iW7eVdtH90i))7l_O)?xd zN0HZ_vlV$q0@qs?ER{|QxlDh(-a2QYm8{KRB$eNPfh%s5x>9W+4qm*=juYlBRq9NQ zYqF^;i{hFMT)|D&MB6(O9vDNd*-q#2|PO6$Z56Q+?6UF!Z=UV5X8GM7pbc(U__6I&lQ5&%Y zI0t3pERN!w0GwVu zWF&*dt>!N2xBc1h?!c`}o{J`ccgvfBu{#qF$SswDrY&LyE>*}9g=7W;IXfG2_Ah}U zl^`TR7PR5VoB1j&=5(D-3h&P#urpndU=td#yN%32&nGwmFqu%;a*1TUA3FQ|>+i>~ zOcsZ`uRCzH_tMGl0Ik*{geWeEw7%*V=Zu~M-fS#b>ZKrLDnls+*){V`EHbM6KGP>vEP2X+z3VbFE?meIiCwSU$D3&S^99dBuK-Gu(hzw@*;Y0kO zMnE$9Pc>35o$$?ahvg-RA$5` zJj#@+iC875NoM^ub(_rSLvm8L71!(G`qXXPo6vZBZV7^&x9!XbD3=TkO@9N_jYMtE zJZ3B=WS?bH9Ia%LR3y#~D2Bc0$+<;`jKRr1JJ4fmgbxX^-emv!4;s+o1Q_sbW> zYWfW)lnEb(ot22iPYf0v6U=As_u9eI{bakf%1KH zu^?a0Lh*S(d@m#BGz){kLs{?_=4V#=|Ew9v@tAWBI4(!cx#NI&xkErQ@Bn6o#P~B$ z85Co~D0s_5J0VRz@9C4}o4U&L5F2)9F0CX?K>^FhED&1&=lbxiTGYUMbcM$i?h=Sl zj_fSWZp7!tvFb8ShL1Oheuw4ew@iiyiHj<$2F?`Al4s-Vc?wkmC^R*Jjm(Je9maPs zAR#lQ3ds!lWTyL9C^eNJ6q7|}>V_b$4GU~$lL2IA!zn5=2c<-2R%Oe~+qJBlX&NCb zA{DcdtTID7vSsE9&|qYSbmYiP1!;72voXF+Dl%ir&~1cfp&{6>*BcqI1y@+C>n!{< zsV^_O%>#~0y~Oywd`4c{E@FA@+63){(gM2W)1*tlrtwYio8#Qd%g^(?k28w0nl&0zL>&)!%7Z8H2k>82X+n8v;RBNb-&1q^i4#YZs4q%Lcbl`(N2hXK|RqL5cS4vNq+M}K)H$jOvbV^9^eTLSA90*M*`VC zNFkX)A)}*!=LlsckW&c)3shK|?+_4e6EnAz)61PBFs;%bVs6P^L|GMm(~pJKau5up z=76`q#|u{s;HGueYAUVQw3MAkg13<3L^v)8!Z(IlF}LW4Q_)efzXvb#b@X5zbD>oB zoSAN4OMQCR`pvgyJb=_~bO2b>NcXxEYk~eBcrf#5I%2=B5y-b?9!C1z#%98!i|nCq z{F8L_t+pa%53Jp31<33n%O#4PU36nJGxm)A2*rMfVrOry*oX9teJ%TU$7Ofk*vyP@ z2=aRZQiC~|i1ikw+{ApJ%`o*XO4nK3ekc$PBuRrJp%{53D`XrNku0-rZ3|fu zBYzKR5yyaZWE=TwkZO!P>BuqiBNe$Y6rQ#i`DXHo{LA$787o~Z8Cr$KjvQuzQy}>w zPC>T|yEZ{Pp*q!(@E$rr90GOdL)AZ;Iq+dD(qx7)Zeljy#32}jCikYl zBo4u#d=5cQ5B-LWPh~3-WTnK^HE{YK*HN*Dc*shz6cl*m0n`l&wT{WD1cBMaFj?ZU zgk@zoAR#N=3dszF%$Ai|G69qz6q7|(rVRqds6+7WaFvx^7l^FT+(n%0r^O3)g$P^7 zipa_|B&)2Dj%-=E2&qO^NJoyW9IMEMp_q~rSuthgcL=&M&V@rDEOun8DI5aHM_CCR zf`T<#i)?@5Ne+{pc5Q-oLWLcI#bDF_hd&S-nYZg6jOhQ}A1KI=G(AH6fxh_apz#OB zM*IPlnUKs6;}FwMh;+fh7E2fmPgG$F(c9Ha^M}mf=QZS*cn%Ql{%FLA8C=9<%);U` zixBNHCYuT0u)vn<__mneWyG8+9+XHxO2daCdjpbwxI!`mA&s>TRONM<%SjN5$%8u} z+>`c>wRMF(-9syxakY2i^CH-7*@AsFkp!MDiHZpJ2qah8Y4D6xWDE9w%nbL6xib3) zjo}H#NNYHbbL6rpc&TL+N=%GON%_V5uN)pa>W?{cC>7!@`6A+7(SvwjEakOpGqe*b zEZ%d$D)D!z-~N|n+?u{I`wh6U+^1r;r&J7i!#~qzHmWq7Tu{*PB9FGoMiqFuzd?+d ztqf3|{bNv_pM&ZY#K5;b1hQNPf2_>fskRI6SFf{nnxH}*>%qx=E(UhFkJo6wF72dy zAv=SC|GI2}ZmD9M$oc>Tx|mZgVF8@XWH(XDHXk^%fXSJ2Pk{jT!N?m>Wo$}}x$vMI z0y4*qcvbQBS4d_skU5%mJ{vEUAS6Kop~mhF{$Y_h)`E)vXl+MLUN8^1HxErQj%k_*NZ*SWd!)!BGQhQ6F%SXPf}UQIg+7S7;Vn$ z;IphEz+Z%h{Phx(>D~RWPF*h9TiFnC`(JPj2pAvmwYG3~tbF|>+$OmU6W(60XTcK; z~3PI+9#4Fwj(W z;6Xwle>vjbS0PIkk{Jx-#B7}D(?tFVNf7ZDF71!FsBgLYM3umgo)HOD(n1~0QBprW zQ2^V?a$&oI5q%#`meOHAk(@1oZ-Neg5&_9M68I$PboA^U{7aQUEhoPOPLvGI!fI!Z zEH2p1k`E@&mm|#H^&o)P z+AmXlI!x^O7QEjIUYVJ1Y-XOG$CPI);>#7Wju3lB+)oLv6@rMlc$aCh?^CyQ{}1O-FgWt5a>S?xtm+PRO{lf0AwQ$w@8qWc7@wNar6tWeQC z&sVZUEHaqn{N2p%LsZE(KP^hGMl2-BQ)GZi=ZKU?6nv~CaaNfWWGnYkNJWy+=mhs* z8YK+;rUF8l@u{2W5*2UOlJXBBU!afewCae?C9}~}NWRDrvH)LG&a_u%1X?HA+m-$8 zgfbL)7aWFRp#5n?jZYSbUaIG>JYg3z`5?nb3WYc3c^N*|=j#&GHxK#PAH$i4Y<8%O zh>ISUk-1EE6HCd?fipQUgrUz7v_?Q-P`I3QVCevle64?u;##Fp1SS;l3D3f%md6pF zN)VDD(j8PBg1G4L@q=SkQeK}el9DslzD$Of6bF(Nk(5zLLMv-)B}hxQw3GtEbOod% zrz== z0yh2s%qO1>?0M#sD_H3NJfGZsTwbK_BN{&T@tN`C#LW279IOk}@q%HbKw1cL*rc|P z%**H!$BsFJxf7Vrk^Mh(wZ$0?iM@qnNNg7y9(X5+1?q_qkk9(AyRDysM#caEG z6{P^D)fI}sgn~@xDB1)o{(_730Rb%(yYzdjI1jk*OAuVr{4FF=Ngku0VJs>%I_V89RhcNZdXBYQv6K}j^PEc0gR^(!2F*28?$^*oeG=dsV9g@D~&E*#)fX91a;Id zzKdP_3(tPa#iiLINJ0PE+4Ic90YY9dODy~jnZu1@4u3rhB2-LC$~x7#oW|50K0B5g zKFf$ij-0kAOdDWk8<-qFVIpBt!`qlHoqGFWh=`8y1|FeOQ}7o`P1O5YqhcXt+rwu? zL}sz3NtyQW+1c{)M<(0D+hF~CcSG5gv3^$x27h{?OGM@r6p~+FjuGhQ60*|Y!q+UB zA|ty3-fRFAT3AM2(kd_Hw#o|u^UKSlj~aQA8J_I9g+FM^_Lr>Y=M9iqa{QsX#^x)5WAJhY?q*> zmoUkEsK*flUggXnzrq;e@R2|OzI-G_ip@kE>i{n081Qc~lLCL0rZcv1ZSP81+eYDx zuy&F{u(KHmNHfkIp*8N3gM}2BX?hu^+uk8H`8JM$4lb$t9RVskr%Gc5)K5c6m1YVC zB~p;A%_Ry&V1cEf;}6F`ojNiB&HK*wvk{)D>+$`d?Uf4 zU?dumlw%}@XCQsKi~(GYgyaB^eE|s>iQbvQ(b&uqE7BtraxgbA60ga~PA#A_TOe?1 zQA-G`xRrbeknEcwI>Z*1gj<+oR)d`3v+`RAt@+QTyy3H<-J~A;kxny=tyDqUXd7bo zwQaBkY9g$ihR;Gi64|3ws29?=Y!&{BF`%!5unLqMD>)sJY83=UL`|3Jtx(9IRzbkSwcCwJuG1uFMBs9)!lyE|z`>S4g>^cGq?mKcEWgEJ z5|sRhHDNX)iUqZNnknL>TuTsmA;uVFuFyl1mXHTvFHEg`+&bG994YAASyoHQ)2d3_4r$x#bLgn{NDKC#!^T zW4MO4M5eb9g`lvttAH4L5gZtHrQo~s)8=Gp{gkxrd(0g5C&Ae^?#&lMhn5p zXH38YAGgCGBknFBlLmLh-Fw8VOo@)MABrQ0uSiOllvv#z8?ec}qqsM-+`|u8LiN9j zJ&yeRZY5yNM=hC+-=Pd=`^8NP9GkkBpTs`5^1RCPD=(At`j;p~IOOf(24@`~XUg^ntkIfm`Cqop|{YtKqNrkOzxD8;AK# zx9FjR`Una9oK;EO-U=V#jBW$njWjwp^(&^x92|E?^efFio_01YW!#yB1W3%Rd*9z| zKBqM47h;z~dhyTw2{|;Uka>uYIelU$7QH{RPl44iP8__|#It!hKFl=vnS7P}4y2h% zqNSwyajq==%H94sItDkyjS}^&A$$lv%9mOV`|nNFIHM$1 zzS3%Vy9iaYqweiE&;V97B17En6?e`kAvNIw%K;I0PsWELTNO%N8{ez3k~<+CE0S^h zH^q^BUvgyxD~E#lHRWe3kDpExt?xQAm3h2OYd{GrQ~DiCbZ?O& z-#?gD)oPV5;}>}4taTud&q=zlBF3^_{sWYoSDl2u_?O9w3)TtS(I zEoNWIa^Z^Q9^lO!*S(HvhQ%?WNC_t4p&sjq(0xUjKrUBEW-yR3Q&LcaQwf<$5RxDZ z*k5>GlV!FnAj5w6)CGACW*%K%=7^ewIC8WkLPJa6Fq9BoMC?E;ol9n|KR|qgW?^!b z^T=4zifIPo#{fl-LRXjy0E_l#i5l&}ILnrP5uOXrL(canh5;rWU3sTG$ZcSPL^0cdB6e(RN~f)OZDRT8|5O zvrri92VySHR+%DDwSuhOzW#ooQIfrtU347Ujjz`NFg{>|pEKAGwBB3)d$!m@XiZh& zmyu@Jfk@nT?b)kPQMGW9p7={}=D&Sla0R*NwzC`74-Bd$zTg{!=(pMUR>SYnklCH%)B!h@*IUB!i5#3q+VDF)1@NA z6LvW#(&qrM+MR?p9Vot0_Nv$nq{mZ`8fr_mdqqE()##*g!a$S|f?S0^5ZzyKnSsCq zRN>0pnf#1*7sMR`Q@Sz&q2$>|vN}hzV-Di3Z1QFJZjXES83wYC>~zuZc-AXT+KXFq}L~ zl66+cITfe$95wK#f`Pi)ABMh4pqZvr>rs+Oc_%4IDW%s8*X=Q-)=s!;#db&_I4D*o zGrLxkE$nOeW2MDzS2>Rhe}FBpvWqpDgEFk}k=`nIVhO#-OVTew`ZC|Er}P$A+sEYi zqmS$XDcQ2q+ae&w>1}nl;Pn1VAakAGk%(8Pcg+cOdaugo^wwop{uPXbOtrXE`cN<; zr_*}|Fuc!)Rkv4H4fw>{83Y=ZEs)_b7&hK+qjVZZr|)qffI6_dOyrh4HSGc3vflmf-=HjmN-D#MWosT^Sc2ha*u=`)9pQXCp+St z@3bseos**QkvYAes+QU9j!U9Ju* zNO;$2K|DIC;EMvR0EQ*f_30Jnes$El?Mx6;=IS!$FY?M*u{X*gsscp^_Irk@r=ebTj?K%~3T#55t45j8lR@iT64 z&bp3zPhm*&ZK(8LAAuQaQoH!-s*s&4finR+SAiQ2z5UZn_ODVTC)l(<^(w`2K$7Pw#VSO@OV7Sy`f!O}$J-Qz;N~*4m6-uS ze>Mnjf&H0ISvQi6ZJ@IQ ztQRR(X4{fMS|mPDV33%L{0Un3ZcPsvBvG(D2B}sc+l3d#AoZ8RqXucPfVD~zEXAe- zdmU{B@jqxF@|&lCrcZWf1E{d}WRRW_m{yTi6w{1ifSCGfwg#9RvjSNN8>YYzimt#2 zgsylZT#sg{=Z*V&2|Xe-`uUS0EDt^c336L&!6jIx0$dD=OLgX=1w@;X0A<3bE7%^! zEM+WL)M`b2cF0GFp%9)#1vMgciFGPL+$_l$3~$BXD67pi1azHRJfKflVv-fd>Apt* zUx6(2t&HEDGgn4?n7Pn9y&=E$6z#^w3v15=krHYTyA{gry`an+G)uIn-Uu+55Ye8t zT~*F!0~XrT6WE;qgZ4}X7HSWnx1>E|4c5OYR%Y9h_Vfcdq?eePBceG4EZJP*rd>H3 z8ZkMY=S#Z~vR(LKG^b7qj%v<*0@f-?5b2KkK%_Gkjne5iR#b`u?WxkzC~jO)pyN=& zFlUQHz011pSE&bBwZQ`hL@+_pCb8b{0m`-Uh03&cmP>`vu7+ z*Qtjv^G=FDF|lC-idBJXWi*Dh3$#Y`1JW!T1@HwSh-_p%Iec9*@);i){Vj_^OXuFw zz7r(DeLO}^!Xdx|K#+}SZ~FwGKz5Bn5tvXQTzrnO-Mvom&8!b~^~|M6vxW#wSzZ0z zY^&TM!rm<0@5kFoVoq1T26IXP5w6(HLS~q#$FVj6b_AX+O>jSVyk)r>&N2U_E&+07KPATvQ_JL zP^^WDmD#prtDXlq+Ljtj?93IhS52B8>gh$h^4P0$1hQRtVJyf}DLiUH1`1fKB*B7| z+?3B=84c;wzTX&!z*co_PnuO$O;4YrDYFHpRfHAA{AG$1Ln9b$O*7bi$%d3f%Ztos zY_Wq+zdr+(;s}3z+Dx2Qmw#Y29EG{_sw^YAz0jspr^ywQM#b zxV7GWx4gf3?sl|k?y;Mv@>=K=mb$$Wj4spYSq-vy>kUF&Z0cu&=npRZtT(>w!tEd4 zn>Q<_!Bdf(?6_fDFNGLy1*FHD$51gE*lT5IzDU$OS}X51VDsh?rK1toFyaaPSbZx% zhKOe5>sD{mBO|MRfcqfJo{$S%onyR=iXrQ6E#}&DJK!4kFl_+nMW)z^8;A)?ZLF%&&iGH>+022pJ8H6(5|evBFR_7;G(-fK+wf;0^fbQ`@w zEl1FOV~Ti^If3?Lyf0|MG=p~{W0D;)YecdI1*2!=BYn@W5O(Hq)0Ni-V1#Q)*AobP zmm&`de$Ze&n`NJ2BJVJfC(HX9XO~b;WDF}Pl@#h-;T*049`SUCOoX?-J{Gt4#AYs_gHS7`Bi5jNe z^^VlA5Obi0yBNPm2!1<+y-|#Chn-emRN!q3R!|Ao*GU1(nc))+OHK9y^(a^*bw}1@ z05bRy|6!Y(#4khPk2Nf8dlO;rYh+4KdN?EWFNC0<4TT>Mg&z!sn?vDg8kTyVsH6&C zk3`rzls^aYr;I;)^JjPd?8Kk#`O^nKpjvqIoz_Mq^GM+JK1GE0F@L)F^R}i-^(`&8 zhAetP<6{gzt6^c?XUc1Ve^0{_->fjwX45n*c&3E#T&>|c;v9#th%lvayitHt5lKb3 zosC$D7jGzkj_1!&_(7$lA_r?&D!7-1Mc8%>!SvFw5VQeGfV|RNhZ6V48nKR)?8%6K zX@p=bX2c7KAZhLL+~~C`z$)hWvq^g}6xVFv9E}tHEMQ~&AL#I2=T8%VURJWCZ7(!= z?qPdD@$SNtWlq!fld`60xSQo(tzoIsfm)7K>1@P%J8OhgX%r(0HA1R%EF;!UR`i}c zH+o%of-6$Yzf9UcLUA1iu1({lN^4k$6fk?_uQ!jOZY@Zvbf?L)nAJp;p2w3_`a9oE zS+g`OUcgP7SE@(;P*Y=qW35-At8E>^Kv7=W~YD+Y7Bn8S15}LiKS- z^>!kyRs8A0pOdv@q56$ULfyS&GVKK^XZ=XcCWR9!zR<8x`#~uFZq47u{4XKiOKOBP z#&e9Aq!H2>vluZ>Bc%AV5hlr}X`G-R9!ftx6hF|w9;k6ry|an%DAd=xl0W_VGft~7 z%-MLYR^>>*v1*6#X^i^b8kRCG4NJp(DCKPQz5^c8_LbsW&ML0fa2><<0XJ|-{4#;v z=uMOI)_S*w(poi-(EI05_*_HVXoVFPP79?^(Xhl{jj(s90!rZ*Xkv_noS|WfAEn_M z#-E_!T859%un>EIhJ~Qr5%ymFjnqVt6)~a%5mjJUcZmIK3|l^89May=v~|S3T*K0& z4an=g2U5NNH)f(#O{DMBYa}d!@ver2;uQ?r%e-wR?`p4?!Ldnjto7EjkR9YD zR!TzKC{aZ3A@NZLkkDY)ewNEVYn-kuEoOL0ltdBkM~EzfJbwm;&Ddnwa_oALuV z;q>1%EcKW`{4WsoE&O?oKa&)X;BOQ>xW5u`DjRH1zm}IPCMj`0K(NH!@kF}BZ7XqT zKa;lU>P*@Zf_bCJ4=LPFaS4SxYgj1kL>_N{()A9oczZCkLdy^eKS2b`eISJU_XyKo z-VutsMdH?a*O|1d@boTIfDShA3}POu5mNG97IT7q*Fz>23 zB)wR}F{XcimEaSruuj8L@CdC<8)1fMcp}4xX;`d98N;Yio93I%^fj6;GT5T=!o5Er z%-Y|naY9GEhNZl}Yd)#VEQHCuT1}UF{6W(tU!#V*Sl&$nyU{yF@UQg_(>#(^X5vaU zPU=#nd8L&oKcvajPs&~c#benY$#bLk4xZq5Bg-n$y!8xkrD3UCxAIf!yhixF*87vm zf4}Az+)XB~F%)-`hNWH;H7s~87FgInja$Vso3xyD3^!<4@D5QJq2-DYEww_+MqhrU z&f6=jh?Ta&L--c#N!2JbU?_pUL>|H4EwrkbH)X|CTPOmVdt zhZIuMr6iPmLW`* zm1>-{&p#AaSlWRw)7vyoha)n zMxd7G;>lW%mM7Ye@rFAmX;?BHE$J!m*MfYtmzW@MBCuy`;bK5f(Qpk3If-GEvGFp2 ztz+E4Q2Kis-_7{fG%Ps!D2yoLcgjd9XRU^%!ZSm8@6oWpI2v9~oVOy(x?L~Nwcgd5 z*2T1OA-H=KuQY&zus2E*#}e>HMod>u)G{KDu-BjwLl`lH5!Y%&1tSh&#IH0W#)#cD zEQEimj1%pCSHn`#6&hYe_)ZOrmDr?th3GFdyp%8xhw|PXf`2IlKVQRw^JxuBx#L55 z&(W~JoT_1wn3F^C*J)mfzg)u|q~QXD$>7};RwQ&=4NLkDmu7@%y@sWnPc$s>Z)sTI zUkSnV3YED*+d~@YF~G3QhZR=h?$)q0`W+e;VVJM^x=HKP3?tv=nm(3s4`{l;{yqeI zyM_hdjR>>6qeA5!94c=w4NJXtWEcW({s)A;zoKC8Q~o@{pDv|COvl@uxR`eiou| zg~Cd0wkAcO@CT%`)*BQ~s9^nk1O2Xs*O5cNM}ETIp?DY%Y7Cx|kTGEcDqz&UTG#`po*6jd; zWiP&)9&twv3ziRnkyOsryi$j!5N64b%5$~%!$lzTO*R1#zRIVW;#s0$(T(RcEC`+q z!Ca(>+gPRPi1%tVLO9pJh?5urExDdAM`@~X<6wkI+kP4++Ocyeed|#Cre7)SdOZDh zJD3OyQ9*Age>MW#J4!hy?cXVRHmdxfY8y07%KlWtQs%oFma0*Hs^E#dB+u2}B0RB@ zmOf^W4>za(fvh+!oBnvO56P?aU3>GrGrjwM8o$t_pN@3K|5MVJq?efVk>vNV#|Bs13?^uoh{ZaaK57*~+ z$e*5Q5^prf?lPJGYBJXu;K3%{IZ`vfdze1gnL^(&&;5_q_=n}WB>jSU|K8;9GI(oD zIg<@h<4`%ATRSUee_VNX<(SH|s@x|h;FICc~Po zkRfKTitLc+Mq*NcEW}my9Stvfs;rS-dIC?gwS8m$D3hEOjN2l420s(y&d9pXf`ZTP zfu&|sc|*@fUnUEgC!?=nH*@&~w6pNvkb)USZ!bLgzBAuVuB}bl4`>=*Qh@gB2sK%> zjVBLg(>d3M+i3DZG3P{_&F6l*BCiiCwjQ|H=iE`N<$VA;SpHjhX7*?J`WnaHCLYjy zsq!qNu~XOZsvB$}sX)D|Y&V7nHh2uM1|(|`v4YPmJw{CaD^B~8|VD_g7q zlV@84Ztj4-U)Klv?mk(M{R~k1iFl~mp9ArRaxb`y`!}YKMSQirw%UE91?^xIe;lyJ zahHxN{kmA9tIIbHm3k9}7=VOVk5~EN6+(s@$r6eerHS{rFWp@qQOx zc^k=B_=~%hWoRH&tri4e|LX*KB=7&@x&doz6drR& z7ss6upcD0OLA49ar`Uo^!MK4fR)~-H!LZT5;3#RnedV-l*e(=zZbCtj;}O|arLB!Q zd%&LH&VWTc$}W~DDCa>*zaAE9Wh~KI=oO5CuOh`GxXIYbUwVB<9YM z=EOA2zEl$Tl2uWg0Y31X)$d^=RSh^>WzV1}U_VtzaJ-|a*N&#BVBnw=0kGGKxgctp zhF=2`{HBiCy{+VD!nc=%k&#`pZ%M*%G{_vbDh~5{v2$oSPE5;1?kacQM^JgXp8GJ( zo=&-3&>~qSoA)moH!Nn55XI!1x=C_F^+xZuvCR3NcO#zTt*eV}!;LP!;WLb*#q*gK z$39Qshw)24ueI|k1j9;i2%cUuZO{=6T_z;G=j|`aDesr^zR^2fp3ARXf18-T6A z_z!V=vvDET0XGfO)HU+Scc6Li`A)bY${vD|sk7LRgc(Tv7yu`6BLKKg72b zv?cT&kUA3zo3SuB^1M3k&gepkA$J1799LpJy$ZD;64YaHy?p&>r5AEz=hfER~=sW%U>I$B%dXS>lSr6M)7gyU5;f-*>B^PS6S?d8u$8 zkS?zGw38j#(1!q8PQg~$;{cG)iCMWd*?<$_&=G;%K7h;b#5`m&Y|jkYHUAokVEEh( zTjOyGYWxZ`WjV<*_s_6&=}d0J@t%{tahVvK+-38mkZjD~ZYS;z-EqNuksg1YwlbUw)mcvDW!9iy-AxuQ{s)3zJgq&F+5qeog z`KwmL$#?}3+bR+Lgorai#4kVuF0_z+uCVDb(#ijrGrAZZmDO;el6v6};Me5yo#Dl9&1QRa zaiSZ18VH=|q-f8q}+8LlLfaw+g6PIPt7>1nMj6c@~UT$7zE$s8z zLss7r1qJiX`|;`VC}DilT=Tw1`fYg^wI^~40MuT1@8243Z^G%X#G>9qUc#X0MJxHJ zkT;?q_8e2HR?|PLG(rw90jY5Zmtna#Lc61P5L}2fqwcW0o6jimvl+d&lE-FpmuBa_ zCM$QT$(1f+9i*OUjqS~d=)c&mzAVUoCpBDZAy?hhW8*w|Y$Z3*p>fb()S(JdIed#r z_IsOaKzjf`FMtQpQTQ1?d}(?gkZI1{XY*SWZiMxWHmv;vJhDV*?-4^fZ`x)g_XeK< zEX+X?|L4IX$7?u$jUr_=Op9usN-i2+L`9pC#UwaLYh zFoU`@2lc}gXuBuWXr4SM(_i4DoJ~TBWQ-EzwT|XJdz#{tB3RPTAYb4g&_ihiXnHB! z7WidaGgisFMleV>lJ0|4uj3SWAMN~k^fY$RtGx&CT+jvK^%kFjKF199I&hBPOCSkO zd~#D*f*+*PaCpB|NOH@o7RY5@66q{p7~bhTjojQT<2)I)rTWs5 z2zgFN(f4xZ3HiMYsuNfg#*Igr=clLgeVO+Gp5DHQ^h)`&3x9UR4~yNJFNNmi+Zwzq z{sn&4BE;m?jQPO4yv3JCn7kYz@^dL;UNA51e7T*;e?thfcQ!Rqr(Pq9(Wxymt<}`U!0rEfg7NL0id11nM@Hs#EegAlzdv8#f6}>qi0M8 zD#xBr{#nc|di$*{+49LpgDTi@j+V>M_!#`ER#u${Ci)FM5pljq{0Xh&J;>G#j%JA6 z@yE|GS4PDgJ>zn6`m3v0b$l~X8CJrFQ*=o;otSy0cykb#(;orOgT>i#;A#Rwxx-Qy z_eMW-YzyBo#4@Fade!TN_Y0l zFALYFruu{8)Kqb1G5)n0R?%|1)+FP)qF8MDeo$2>p2o{11AQ6h3oL(ZG}YdH-6SM*h9!3cYh8)Zhg$z zPROCxC1R2JRNY5}a?XS+>C7Qvar^8Jx-SMP5Oifk$M>XgBd}zHyMha;;`a4j`ER4T zI7Z#z*XdaG>>H_0Ck}L&Wik5{-wB9}wFe5o&1D#ApGl}JuN$NBtqh9&8%Lw-=$76i z->7&c?XY|E{(@#6*||s6%*~Z;sYr<;e-zwhyr<&O`sW}DfxLV7G3t|1l#chj-?DRwBL-%I z)$)chjD%w~jtufH0}vOXE|TY3Z%<;FL<@T)e`pWAgG5mj$F~$cXP)Fe+~hq7&seIE zlaTCQKI!g-+RR{Wa2q<+J?2z$A~Jm_q88MnPTmUAs0Uqy6Kaamcgu&oQ{Y7bKr;%` z?Nu^^GouvI@$!F9IwCqjJh?ypqR9Nu{n4S3OZ^6e+)A7#may$Ma|_cWpXMa+r@eYG{3=t=e%I(_(ny6dq6 zdL#a`V6gAHbh~B#5EE^X>IYdBEE?cO9p54*Xb}rAIfyHQf{<85i4)ITLcBUy;l%xx zxP2H0%UNCiiPi8u%Bprp6=z>)wg-r-w!exyV=(pb8T?~^vTr{oU`nzi?i?4lS61oS z_V|E}fm(eH*R>FB4iP^IlD|(?VdJpZF z&EOd0Im*SLjS^`kKSW!oO%XAJLQ;xgS}4BRb|vo4>wtEe9wA`h*}xjJPwz0q#NA{U zp+KU>V6xB`qrm`$1fl+220HvX7|2E(#bSX0z4mO-7jR^{aO53u1U6V$;V6Lehj%99 zHpV?;18}JzC&dN)-y?;A3}-XwdK4qF04M6mY%Xzd&ijHf&!%k0x!G$fdc?K2ki4|I zaHCw+78WheNs-?rRGs4Vm1H^2TUKKTepr?LmQU#4fn4Pt;widEC3^vlPuiggRfX@y z%HNoDNVI-8XJ}$$r*{;EbI%sdw-F{jo$lsVaade*%ZvT;@b^o$u@5P5V)`IjEPiT ztN~FBouVf{r{6kb0wUD}KLII>x!0D(?SEr6vbK5v#tB82z5@2xU&`gQsoq@0rOU^! z4IuINLKs~bZ!m?=kuVOFYYGg>I_MywBW**6&2Q(p{WIJ+kBGQC3VkD-_^gxXiO{_D_duLOYH!8hw9K7}w{>3wz>FyMQpq>3KW$W`e< z^d+G=Hv!gLiQIHtu4cwy71$gNiDQri`)-)UC6SmjTzs#T6u!}`L%H73M0Axw%4eD25s@EnErs6SNY&LK&aXT|Mrbo~P^A2sp7PpOHnd=J5%$8WDW=tAf4 z8wyEG(Y?h}`fkzT?`{Rn`8uB3TJ}=7542DjAZvQXMbUIr9j{Qv2ATR&ZEWaAb<~{{ z4gIK(e%SYr2=Dty4EtUZ!@i%yuy{)FnHVku9(qy?_X~ylhr)+33?RY`3WbLtjI0u0 z5emmb;TnWtHoi)1+RD0XF$AI7eX16c{T!-XFt-#xSO&*%`giBxbnQ()Bj?w8XX1$_ zo{L-Mz^`M^Iu~gLjKe3IX!iTD#V{ZTTeWY;*txRot&&phws(mX%G$Yi0l?dM8R+;*{Xz=T3n=xtfF3og#>O0c zn-(D|lX$D@DwFa~@d0@ivB+B3vTm#QyfEJPORAg_eB%oY0&0%9fVYHr^{af<$rV;Z zJ5sUX46CQznD)Y2Y5@Q|X9%fwIuOP}DWkB6N`Pwk$NW?cytG8ORjK4a^b-WNrj5po zC@#n7Vg|c|^c#qPBDRZs>HX2R?@@u5mmgX4GTZ{`h(+{fj0SD^anc& z%=`YO4MUddoejT5%k$0#L7P1UcwG3<%;8zg5qh!2M`Fm=?A7t>2+Y-S{j}H?`huSW zQt#6sE_MgC*?nPOBFik1S8xg=e0}dYX*uZ*csYo)gIhpsTrgjD9NGfwx{St`CK1iP z8@o@W6~HTes*Hl&ZCWF9uls|zM8_~=@CNVtayVn|JWfb}!1XcfFc~gnW=Le~)OcRJ zgY8`QbO8kVpz9uzn`U56f$$Mb_G5cSyO7Iz$m$%dztS0n-QG)q^p%P4~&(?qgncE%Rh;D#n za;FC#`NaQAa6>mqHA-<fe*>zdY!FmHmCC+$r@C z{Ocfk5PWkNVox2w>TSq+b^rnxmiNU-k~Yd%|NSD?|9B}3LPbg612p$2>9cbrz2m1z zdTF+#A0WhKOM0o5EM$T^e0fIFJ4DhKVhc{Lr2p7*f_q`yE& z*$xN?uji5PJpfZlC!)NPel;;|LDFlb{yjBP{Z+K^n5}QIWr0YI`kqr@&R~2jY7Wd3D%B-cHP|E6bMQy`(geVeErj1@fz$ zx-t$eHXSB4>OvI1*O_JI{|$2)$L1)c@;=P>s_bvQZkTM)BEtl?u77rxN07xNqbTl< zK~eo#)LmLsNB*J)Yf(%^DN>r}YEfm-X%5T$F`2bfLHbgDS)wV1L2$z)Ea<0s{h)b2 z4Z#Jv4zDG(--x>QUMWqw-ECZYo(PvFOxC_m`k zVB=hb9|hF?Fh9P*Dhl{f#&}zu9rcT%wh;VKkUab-rs3TpKN?YpaktWk;DW^ejUQ!$ zeUU2lY=BAXeDY0_QsDKNF#6!JhaVo@uc%EN}jBmNs3&X>}s z)Ta-ELh1a}PmzZ*aV36&JbVn54Amb?ye}grQ~&zx`hSmP(-orr%W0Ey#Gs|e`qTbz zQ4FR_=~Ft=7QTDnJv0zz&K>qx9CIya9m>4e|HC+E9!;M&Rq(C!ehI6_#e&MWxmd6@ z*K9eh#c3+Dfm)}BupdY#(|E`R3W zhghEEOP$Hx!k7E_^Nh)Ts>wadu;Uh!?Kon)#=Km{9LE?O=keui{*ZoBu&u$-%iuV{ zWLrM~K!=-`gP3F8K}u99Uv}Zo5e7#mU^(qOU!J)C67eMeDdI>I2p6%jjs6`9zbcti z-ivs8A4%yez5DU>foB_lSp;~&089x1++_f!5#V71a1H@L3+u-Po6F^i^FKn{GVdJo zzO$kA7kKwe{Gq?*InKa*A@8_9DFpMDiQ738*NHf9e<5I*_pLm!f6D--Nsy`k48&op zR(j+QAINhIo=xL3=B>$JkH`Eo`x?$a|3~Maxe$l>=b6|kg8Alk&?;RCs--9477g5r z#|31%c|i@j+G_XF4m@Oi!!3FQxH#Wz=I!Aidq9wqDMrpcr|SKg=Tto>Q)T77-k)_& z)t3ZuGUJTdUTwNx(Ob3eA(%4~pf_U{DA_G0B|7}QS}IK;CV zOIR1cz@xa@Oi^H|&}WYQa65a8m3&3|z$wMZRXF)PEUIXU>##1xtY#uDbAhaMSB9)_ z6P&E{hvx>=pSFAokmA1=f3~CSbE(FsFO#Prwi?@+T3`-CjIxrwNQXNbt1=@{g`Up$ ze8l3#p_fwao`$XfgG1l*`JUH^C*#iNobTz7baCg!2QE8+o+>Hhe4l&m5Ymp;C9{(xd)&k5vYyKP6ZP>hp=3C^dL5V2OGaj_372f zjPf>wlExoIR`t~QPoWso`1uzyUlfviKHR0AK&J5zEQ6^m7dvDnchUONrs%Lh4F>4V zZ1fWQ6*&s3>$sm=4ZR>C>3Tq8JmAb>bJ5M&>|J;W&2e;F_bDvO9**5mLAwSVJ5$J* zPWcn2MPzF@5=~nVAHw#Gwz)JmN@v6JdyKlJ(>t#pNx*hI+2I*c&L2b)ru0t_M;3oQ zABLXhZtx24VcJJ$x*EUG_Pk{xb1+G$1uRumbY+bl)6YO5Iv=UQWh~GhUP`%cK@CC{ zlhVeL#C{oj1 zkrDb@D2Be)Kj|yH3iIMUfzl}o7Yo{z-g$U>&mx79r}5UyT=Ly+-**=Iu0&%=Z=$ z7PmG0aRYnzCFy_S8SdRf=ku{&c{VDkr+ufc;VDvBD$WYyQOk!@)b_G$R-T2o5l+EK zZn?ZsKLRIkxPSeKBIKHg?zXK9oQvDoTXpRJff8;`u5xFWQRHcx&9-Hp-4{E|E}ycD zSHIs!kR87+IO~yJS@)IWy1INkEwkD@Ewk7>P1c%a?RXlUXkA90CM(Sv+dmVVS!bR` zKij|_lL>Oc*~AXb1nG(wVCP?;>8J2|NJsdEgXbM+9Mm7yvd!ZoOR4npW=Zl9z}z$@ zz(oq1yjQrbym#BmM9HFB`5IQKdA=Q-WzGdOW!}#FzK;WqPgMxJU&!`YHi5vndwU5A zgC`afpkxy%uy1FU?A=_8SRtHfirWt;uuzb-V)qhn($|AFpun$ZPsDvP*2#1)p{C9Xe#34#w7W+3=i@X&JmVm`eyhGw-?eE zDkdnD3^Uw|J9o2eE7_M!bc7di`)*U+~bTBxOkcv z*0e+_xB||E3O)uxC=Z0>kqvQ8iMcx>x}!7qBN_Cz;|-mbwyYOwFN2|I~UtcZa4=%*BMx-_M*Xh>G58XFoHY!okzt>}t3v9Ve2c zyHOMBj_Lxnb%t70QZzcSz}o?pFO<%hdMmj#aXWGd1)CG2EZ6|02WP#wGpZ4$J<=bY zfP4@E??;Hh5$GQw_uW{=@Zo5)2W`mc zH_uY(dF4Z>6^UVgT`0wGgQeC=9x1>!&Pp3Fa&Gx=vi(N*WxjW)`leU@XtxwGIE#cQ zk4|8};uMYO;53qOhV5b4jL`))4Eza@;H>o6_5+duxC8k*O1{=6UPG0gHeVQrGZ43* zq0pKwN*FOwlIbO&EoIvhqA352GC%>&Pq8lwc7gfDE-c|%TUN*Tmh7Ekrw$h-+NnP) zNfjlr6N`qUVpej7tknhfE-2?dG(OrXZqKK6jPOUkBj|_$L`RfU>pxR$oCjzoe@ZX( z*#c?9Z)io#)mGa=`?uT+ZNu&+*Y3YdPbV?#UPZYQy+G9N{}DX_+HImyp`g;M(4MF? zdLc3L;kJAbNvM_7wx?bwL2<~X=438c|{oj0H=&m{C&DHW~WaPSm;M-uCdj}}d(kBRbDgJYWRVZ zNl1>nzpW7cpjO|_pix*P)S3j1@$cBMk}blXy0LO!1r&^MzAY$UIq48xzsgqXMQkziVbLxe)d6>j+3xe>HudI@yo&K{G*#9NhBd$8_v(1gt zy=3!#&j?z>`~KM+`xkT!yvJgBl;I2MfoXfh95TFZm9*Y$L6p^i_Y)Z3S#_EOD9riW znRH?kb6E{MFJXYd=K2i(8O-p%r3d~u%l@x6;_{n-pg|LCn$!>T583?xIdg4^|F@`G z86h?N&tX)pK-Kgo|F0Z{)6>fTcX#=8GcH$nJrqmo=01$U8`9fCN}!w#Ov0v; z^o`g$5xo0;&(mPoIyye-UR;X~#)JOlUk!t>ZNVQhZnXGOzIx*G{kMj@F=5ElPj5?F z!RF;0fqU?*;21W7N!9LEErS6eil#2Ke0b1Dkn9TzOqDSNr>9^gn^AM=&d0Q5xy%7` zCJ1wGnPqx8fU&b9Y1uMSPDuNLSv`v3E&#|Q5Ilg+d!n;Ba2I=`oBZ~@7fz6-9OYj9UpvWu1KVwps z1RoH9=TO_r2QN)u2%Gp*#p-6fYZJWqf#*UqVuk8LPX_I?` z{~_Fb(^#Fzob;fePB|T9m zgm#ziSWs`i1oh^fbUnyHdaIz`N03z1=hkgdpWmZ$nOb~qSaLCw{=+Yj1MY0TcrYdG z;eNrFlhFusgM3c&{Pr5x1wj%$5>gB z`+cRVdE1}&@@O08wEtoz*;{D;`+~eVc58~{m7ez=(|Wh|L4)nj+q;U;H+cp4feSbR z-15GLQT5j1hsSGI^XbeZ8@>Br2mJ6dNnh!u2)syvIBDcvr$8}=FGtv$ytxbCv%nAK zWUQu=&EB1wtB&CcjpvMmHwp2SwktGU;%YTKk?FeQEBDcTeP`rIEcK=sTKeNZM4;%yUIoNlg7c!q@ z7*&1*Pp?H2sIcCdhNddg0>T@RMjCG-;Pvu?@8|I4a{RCner2BLF!C(?@EC42Ut%Vu zf-fgZ%4+XuiG0r+Xr3PyVjBP#l)W2Rq+VAzPy} z6rPJPtGr%XXs!2{rb(;4i#YF(j6j3D$`_HJ6hn*f1AL-A*BKm7A&$5nCBQ@cIWL5* zNppyxj4()tYP!TtG`S}zoKSEck(|w+Gx$@jNunNw$m$Jc1O)CF^E`}Dhw$eB6SEgz zb~7(K@THhPFF-j->UXC3TdA>M;Dy*ep)9;_Uf$%(*KC-z{P~nW)4#({7efw0#M4Ts zr}*<1<}>OxgJJuH^bT))AkVkVGrm&Zu_B`@?q*qDOnLe}1N6KCrH7mM)9~)^x5R!7 z^30s~H&@8v24U_hh7| z?t?hl!a0>w3L26P8Q9X1nkxG-BqZB0BqX~rB;;?#NKN(kVx*=HB^IDbP1Pw3hDHQ& zF$o2`Gpg)gci~=J&q|)9OElyR?w1EEczI7OYg%=n0+FAxGKA|L7z}sb-^+r;9N{po zjSxo9Z;>@>%!_7?5MLs&ov2^~_^r}5OQvL_t1tHpRz+tgVuVk0U_nsNJBQ03AA)qy zrkOMd0u?sq?)o3P95`$V&?;QQfyluda3hAd_xD{`C>(|ER4y|J<+YNx%Swt;kgCAa zBq%qx9AMGNYWx86UYsjhAGZ$WzoN?>#YlxjJNu~hX1AGtutR9ZyoXebw~4nU#ap! zZVlj#>#eb`jJL+T1gM38+F|VaUVu7sp_SN?Q)OCf!4)eXRczfRb509C+WHiC#6i@f zQZOZ6zQk(y2lNUzUXj-o!o9?DK5*rsz7@z6C$&Nj`f#5hYA=akeP=yqPIxgw8NoA+ zfeUhVR#*6xs4Lyonl_(as~!%*iEx=ab(;&&*4UrLSthwF2TEAknIzT%y;`~;aEN1q zQQ9-+oC;xcnAu!FotfbHCBfX*4iauVT8vXB#o%@=%%bClt;$ZhWEM@FUv%%9%5ovghCd!$oY;5iVL#lE0FK&uNC~hyuXoLr% z`+1Kc+?>VA9M-8mSSF zi2GH+mUws+Sbu_7dFl#dr`>ei;-VRsF#1m_<{P`t#j2m`hz>I@jHF&@}kL4q|V$wl4N9sM>OR;s?n2qT`COO#j>rNRGEAQr~C!*7W7YfI)108$Ucsw)t z6$)B-Gj&?}pb9hkEN`5Xh zCfSYCW@A$;`&6^H6^{~qE7J#iR7~;H!^ocBr^v7od`>&kF4S57Y!*&BqaIN7xO2T+ z@0C%DF3jD!WQU6&QMSC{~ zA}|zYE810G2Z$=?czz6};Zu>G==D>l5`*i|_ron|)ZA+4U{h~gsma5jO1sd_n`2|y zMVy04#Of1A^ov;y6VPYC1&yqca!OjR<4$gvG|oR*in9TTj=7^^opSYHVf21ZE9o-X z85JA6LIJwt;8nSBX7awl`s;-(+WsVNf1SPvf~E7Vj#LBZ>qT#|GUB$1B84__*|I>A zeHGVjz=ocJz|uqPCAH~_aQrubp&hSwui*8<;w*7jd4EU*xfcSCr?%=xg~g1qWV)%J zoU4GQ>49V13)43QZY8-dfP2@6$khPfucbR5T|F%|h>BSx9V3zW97G>u!kmao1yL&y zff;3JVs6ie{D|4pZXq!Le~|^HIYb!8w|k0x9m|)+oRk+?e zZ#KArJ9EhtH^CiSJr(_`%(%+MU~%`>GJch)r!y=z%W!Y)5A`j}mct2JKF&AHlR`nm zs=tzk+`S5ewHCFYt;&6T0&S8l(vr>a0sxwCa+qEr=E!F~DrI?-y*$2=!zwy+sI^i> zM}|1czyj~@|BtvYkFT=0-cNvNY}FfF<4SC)264d!8`Nr2K zj!9GO9fAN~m)(r;cz{?6iPkmH<+Od&dOGj!DpR+|iYp=rFpf*kW{=JkfQe#8tSj>E z&p$F|gfk;HUV47hPfa%-V7}LZ?}nUch8i?OEVZTU81{3f5dBtG$L2EBpNZHfCg*B@ z6+FE5SSBBVgSsOPyh+Y<+daEr>qnF96||4}81+m(6HtV$kxTmrOU_PlIs&78EU!3I z1>sgwY$FLSPK{v$RW230OX`_b1+Nz7xhnW3?jU4ou>SEXN(>xa2t~}AggyoqNkuf@ zb=eP=k|(*^hZ7>9Wuc3+2xCn!8W>D_PbOt2ms0)DMA<#{wD0EoxO;kqLGzt2F#(k@NVm)K2g^BMqO+ z$APge@U*&-ar{Bg@sLI`0w?!^2k<24(&HWmp*RkvaY2VQZdu9BYu^cH#dS1ZB2btJ%`2Q?c|0J5_oHyVRe9WQ@e1S zNLY9@z9K);$mK6|zc_MTVMe=M7`$S{d%$}-(#7kBS22d|#u#@0OK`UdM!KsB|0+hh z?{TEN1|!|qFw*5Pd4-N}Uj<%Pqzf0yL?dHYfcZx zPCJeFhIPEM2A!vGYifVbzkAuAcB6kJ(a~4z_OepQS5_#E5TC2ztUme|3BIvOHN#u7a z1=k*lu)Q~b_R?4m66_s)5L;=O<4k)gSi;%6&(U$hf(eEmgzfh=B9EC@YnUFR{h}}2 zq2VINKh7{FNiPu|r|t91^G?Kb+tqCfE5tM+Y{&U?J!9dhU(J_W(9N1;{F z{bzid{n!&t+IWp4)9f!lBz&L!37*CZ+iNr|#qA<_HrYev*=KL1d4=q6QKra05%iJ( zHRJaC2$P7ng{W2bYx2IqzDW@aqdH7n8)3|$1`lai3T~0~P4;{|?PmVW;PAGR~;l>zJ-zBMJe7o`{d0i{(4yD&env0iY=f!vM>MnQY!qFwY5i+Jm&9au#$L zA}D1C$a91JfpBE4{U1E-T?oAce;fo z=glVlc|3jd&x^o4Jpft`-=EK!9c;)zEFRT-qzdk9=H&C@g?Q85|HatS0?rGRHNQmi z+|@#0YmcTF7vfF9V0Vvs&};-TfAzF8`J74+OB6@shmYsyg(XqUp?F91!%0oYw}yS; zR5EQtTFzLPiRHMnD=-~*C!pi*1azF^U4M#o3RrfMiEBg+Uou>`bD- zQ*ACKA9a@k#@e1tlyI*Qgf5 z+aq{%#Fv$0-i*xNz{wdLHvkf?DUA0!HWA6BiBZiIORkiBGj)2MYR$q*qSjO0($@v& zH;?1ywbWA4{^ZSs&>3e;{{5|k@TsN3F!34T+v#+)I}I@u*?GqxYfc=M$vMpB;b@UE z-%y`3GylJZCG(^WQ@<_2kWwjmLh{Y}upZsDgA861+ZdEM6YkCp6MqyeMiRcOOx1#% zgpe}=8#EWy3k|)_T7Ccl}OOmkhdb(KviwCxYEfjXLelWh~2ZGBs|j=URLa!5wfbNqy5&B(84 z#8q&den-3X=;gjLU0YuE2~I7>>EH_i$WrnnORmBI^GjUpM#u`etXn{FL$`v?253d& zwBh-@;3XD+2LJ1E{KO=5(0V_Q^6+}$(5;v{2GVbTVO(h3cDWg7$g_+ zNWkGSYdjP&IDq1A$yjQ7G48{*zxg*zWc=CEiDN#Vc5Hau&Mis6lu^QYi04j=BML^R zu$!mh0`W^X5YMJAV4A)JGNGdEn?@|oLT--u=oF{0bToX$K|dtc(U z6bU3kUtD`L3Nv1#qGP_p|Xxwc(pTZX)lv+dx&w z+ieg{MQbC8rAX#cGj)}z@)|U`IdNvXDA`|$J5gsh7jcfO&z>R8wN#r$@Uib7^G2lMdFV?MKDERsu2j1B za4gGX95qL+Xe5?=j$g^fmAzgOxqC%mNQyF152 zpXV(vZ+JP9n1cjSW*Dvz-V-(h?o09i4b>9&!31^oeo38%X2<4z@)&}B_FHgru~Cp54C%ls?5D%=Arm2&HOE;)Mg_$A z3GUTHN*(10@+aonHP!c;)LK_+9~xc{&PS~#J}Sq27-<+IvRmKCcQg*<#1+0-lKoXj z8K6eaB}ma!BQxLw8`v@thyFL=Et*cB@JQE2ZP1S_M|OPKE9|P!5uvLRdtQMaX?z3FH{x4l zcFdO{1%1lFm@Pw}k}ir2y+$Z%IFvk1eh1OcEb^M~FS6kfi2PG%#XS2t^uX@^Ay*Kd zC>PHy*jdP#^VloJvW>f)d`bYVr%C|txOv*g7+2A8;Bi~(kbVt*Wc9nARdCDM^sia3SmP2~It%f6StXb-q}XO`E#bJ*M-w z&N#w!E7qK9>TznW^e*g0k~cHP=@qOwL0{=IGiH58pMLhp!I=hw=RvanQMapl>D6 zyOPMzbGq?PU{c*ZnDrZ9bvvRk z%EyocSlE*iD?1~p1IZByH9>Yfpah zf_JhRBMxb}Sxy!x9nhq%E)Mh~7bDF~B_xP5HUekSNvJZvv(ma6$9!Qu21aV8%R-{` zru+8}q7Wy_O|OMu4TIP|$_Fk9PP~GK>v<9`kF2j-hVPL^{Su7=l8(&Y38^%0dy7oX zV?0NMa>ZwU7q9jNLgF?3K-jGFGpL~mA3$QMiTJRDVgLT!9G!ceh1*w*mM?!hxf2jF zzY$H|UJ~eP@`7EB>j%98YGbLZ`*FD^93Vo)M)Wy76z*2s79I=4-!ANehQLiRzHh#K zPISBVW@3^1+$Uz8^P4H@*A-VpmYnKLW(6h%xt|0GhE_D5A|{E(E8ApXmm=DQh=#I* z*6*4aVuWS^9|9nd?Sf^bm<4zi|DDBHtwl{M#Sx5b&`85rcrvkMC&uGIl&C^nK#uN} z$+3bRPsjYRj_WB$UejeRcWN^R<{M?T#iV zcYB(LnL*r)=qaLSwJ&2^VXm>JQoj&KlP5Trqasg>FOS=NTWIqyr08+@pPby{D4#W< zo^ui?DBI{m?!k4&FFKUM%GK1>@SU%zkB#Vcut0@(=OvQVV)f6CdeI%iib9JgS((2) zswXo=lXI)V6_41AgM}}n$ZTOr? zdl1CW^>^*B@n#fg{-gx16 z(>?Jd#Hmai%AZR9>?0X*{w1*-ju??Mu7rK*d1lyP7a&E2uN^TG;;y8pz%4`48}>Z{ z+h@NC1k7^_m;$!4yRZ{vP`yU5@3N;c;&S}(Gu-{i;Xt0wgsaKa$M8cuNx|1=&ywdR z`!{&n)9|y5pTq3uiB&%k){;~DNv$XSK9?$7Ua;GB+_N=^r^Kh@)lhJ5*$Un*i?4PP{&;DB4Y?b{f*>38%R^m3; z@0jNsc-pr@E}g#)WQp?q5Kf=I(hcl)6M&( zOgB{QWsvRSLpD*_Bl0DFUFGla7WX1+B92XY+r z>cnIpPAtRM#e%!goyDxX%XxSp=5Y`UYbuAn`%ZQp@KjfB6#_sJWY+|D@Phn+Yhw8Y z*90m2CdhBNCRpkP_w$0(5Cd?i7vzUr6U#5TCP+0hL4M0M!KfFk^n%ni12D-8@`J94 z@R(rwoykLzNyu=IEdckR4u+9r!>jmq*V1pNI^ny2e!6q+w8-o0@4X)4kf_Hks z7B6_O7hLECAM%2Wyx`+ru+0lT=>?Dm51dqLX^ ze&hvvz2IkFu-^-Q;|23(xr`d(1q;34)?Tp43+~_ri@o6PUa-Uq4)=nkUT{AzILZqi z>IKWZ;E`Ui+zXELf>AG6=>@C2;3O|N#S5O{1*^T_d0w!_3tr*{YrWt!FIeXVul0iU zUa-LnHhRIEykL_Typ2H!Fy+?^-suHfB$!8ldl7`FGq});eaMSl`E{8MK89?i+#2QRkRi{0IeE%9Q9d$FZn?0#PCC@=OO9ruZgWpJxrTP z8S~&e1a9NTn3d?q>;e*>F^TXK@U~LkB%#~O^}ZJ@jJqt@8bQ-~MP9HRLEdu>WaCT_ zw!8?G*h`+q9I$mm2fry_p4hJLG@N+IYF;@_8#1c_r35?UNJ8nsFo4q}=B-&fJEKoaD&iYU+N1up z7O;z@k>QJe@)Uhg8C|=RNvm&XVZw5Jqaqi%U0|Q^%ty^V1k-f0_2B6zg{*E6B6r5S zgLKZ@sQ8;*dALtGTA){bCMX{y(VLKx1AWiFjrhgWOwA#vRTW7lNOCFLP^wG#0zs%O`5M4)1Sf<$*SkkCVR2O zCEhW(Y~{7G5awYocjVC~xo+25q?b4{dyQL-?Ahy@sW=N-Cw`#=zL^2~s{W|5#7bk|fzT&y6r|5lrkyQIQ`G?`er`4xT*pKN|W3t!?@7<5duS0N2#b%biPkiUq(ehN**(2$Pr12p4&uOa<=P3u?Z^_k({MZC^@48 zJZE^F-IYi0n=3?^AB!{prIR_YWewXxf=OS?e;(`^YLMS;j+TX{W`8WHT zdGqD^3@;ma+W`WDtGXm? z=YQmB5>6K=iCWhWF@A|#mr!TZguXqR?8Sk(#}CmCHM^;+CY<_ofZZZX-+Qn^&?n$* z6&|TYzVl9U5%2~2T2murqn(R9Dt7#P2tPfLk7g42XmIV0=-N&83y+gkVg9`5iQs1P zX9If1&Ed~7zzJipXObMy{Z($ILlT7JJDf^mt1IELNy%4*ZzrG%g;}ex&=bDRtisNl zS%tOetOjo4zbnN5+Qh%~YKwZZ8#YaB&-(@HS<;%VsYd(7X|qK6byBw(qI|8^f_5N> zC?6lFXDI+vi`d=UYmNlHwSUbhk2Ew@V>RyRau8f6#hE z^1&dP;|lC>!gpD8D|txfIWV|<D10-=F(`w?O@(8n!;O68Y+4KPbYuFx zLt^q-V=KOrd#5l6#6x;G9Iyj)%;63{UZ4P2@h*jA213S?&w!kIrInSE1?8uvZS%z0;Pm5 za9GHat;t2v^aS&1IC#NBy{6+*(Fyu_IFx^irk5Zc`v)@dkJ9u~q?cq%FVyr=NH5Kn z-pz)#CYK?7RJQas5rlct0O<*30^+a!O-xTplcBwFfsU_X(Li}TJR3(ZnD)c&+7^|N z1$iG(Lf-4FHv-}uBLe|xZV>@-V*1@y-vlEKq=iHZ{|FByIZ+svkLMJ!NFkZQK=#s> z`z5KFkl6KQKvXm$i!esTwr4Cz31d!*YcNmnB-^Ea)`arFG{b2G!4d%6s?PHL>^huD zR!(WzUXyc53x|iuo>N+?G(D%ZlxliTY01;{oYJz8QxF44OTA1{%#-HGCM`cn=N{7J^c-LA;4l8I9l{XPzx<Q7D#ayG91%BY+_#$bk_Nr`{>9e?)?`{#63!oJ*g4Tht-G;HIPYydZ z=O7Pusy7mOI?l!b>%nyKVO?!Z(=|kIu22Lf6tMS)b?g=yQ$Dx|YQ2oT7?3g-!Og-d zLkQ4y1dW5;@Q|otn8csR{F4@yY!`X4P6)0r`}B7xDYckw`h=94ObM+x%K!x0r$5SB ztzJxp@HNmUJcgVyCQGJREzFgH8>7HFGdk;sg)1Q^Q%fnLSSW{T$l^EG%o%2N#tBzo zs@EAOl03WjK`d^C!9=B`7)X$FV3<>a%jJm}sbL09YKU3EeIQ2}{JmYA^j4`I_dh&< z#R{a6N}yY;(Dz&`kdLJ8B$zyoyto9lyA}%GJn-F(c_0Cm` zk2xcej0+&T4K^lLLK~8DsKsBA2ZFGY6^DD`N^npu29Z#>CJIGh$~mx$a{Eosix@jV zlDeoCMTCKe;v$v3Tb^2Uht%Ryv@tAHSS@yEwXpn+6-l;B6Vdtbm)a$B2Fm_qvyh48 zsc|`TDG!%qF+GX3hW%JYqAa9*%1%C!ji@ytVh(m2SJsKuVI5cw7T8nNi|u^Cf-Ggr zx%ikniU5SAnZH0ZR3kD}HU+yvz9*tp;hM=XRk&(2ilYu-Qx)3?7y5pm!V%6>I3mfj zXD-})g}YHEF_d>k;WAq8co{Oa3iF_Fz4xIpQX)Q7xr1Ik=(!5_MhtC3PHy`oUKrU% z9fd%Z<8WlCn9h1bbgB{UQxI3_vZ$Ddo;lRc)37`s^a-3fT*yh!X#^ja0n$9FZEhoY zk~lz)5!9jvG=it7P8O1=0gd1DGC)eJV@dPI$GC0t(_!B!-j7JU0R(ra15W&Y7E~wPzF{hi{V>~ z$sn0504$Wr`kd-G zUaIze>bRXKt)q@w$bjnj0l8|mH zJV!iK)`K$q(hVdb-lR|j&ZR)N0}>)w1l@2 zk4*b@#)%})KI@*%H;}(ckUPq`TxvNT9SPJ@SO>LSb+@P`g~#uhsymup9n}wol;q)r zuk}fa+|xqlwB2Hh2z`vzJsRiy{W>&8b*t4|S;~a=3qNjAUXFYV7ql z&PBIw54S7w(4%Bc;8D7g9aNW(;=l})3Ps>t>aqlokc3fGmqGy1qjZsm)TKP6F1!6P ztGb-d!d-PCiLf_?0d?6=L4=>ll#q!wbt{?AWIB{zR;H9aqAc7E;wTID3L0g3p1Tc# z%7Q%ykI&AkEO#Lq%2JsOdXv{9+9(T|rOMKa2JtCNH`^z*loC0hvJlQw79z>B4}vsr z?(pryS!7?k@GY0J)KGF#t-?4c%b)%r%0kg`yh%@lWOGYkn9?t9q~Cw6g2uQ`BaB|2 zY`t)0F)ivq5em(wgeOy4LKe%6({dWDJ)|C4^dvBevS21_>liF8V?cxT7UjokU--QS z>(A`jlW_q=zrhicSyAM6F&P?okrW~KB!G_N?SPx2nt3_ePku-!2*0l;^G*HL{QJ%r7V!At~6P}9> zcKdV?W4Zwi)*#@>VzAb+3(FkQy@+U|gJhTLUHQj6&fLjFi?I$lHFBJPHij(-8D(CPKEK!v5*>+G5pOz9;&KV-FhJ5ofQuY@ z@__|-Y02tHDYjNQZ6XH}tA%uS(qR@W&>+N-XYYQ;<~PVi=%CRl3Kw!UNZU-pv`UT8 zAlC1Q9wP=T-plSyPR_;rG1z<4aQan#_SP$V7k)o`#Q-@2WBBOm8936R^#&QaJcFNI zMJ@C@$zEwxXP+8;iJkpWY24#h`ae|(~Xi6;{X@w%N zX?6;3z(#MXmu9c^c?Qhow74tD{61G7>X-6w&)P4YB`RYUWwr~?`DMc|mIo?`@F&?U zWcrhbFxl}8g!f3TkbR;uK>{}#p22c1G#yv=GChOvOcOS>FGkI>%rxE23TOHTRo6QG zQZw15{ZcKO#MduXvxQPiDLw=CON8_KB_heQcN96@ys6!XW>9*Rb4Cp^+U;+Y9XQf# zVW`0k&C)MXc&H6}g6&hq`2cd8keyLbf;n!@!v<3*i%v4hciXh9GR92WHPGR(={x=! z;I;}D57Yg%kYi$^&{0afSxqCyNa&KKt}Y3#5$(U4nBpt>i6^zdK+QKg>Oyq_3jGt$ zx~v?zK!8JM_{;`HPz@eSihhfLD#bz7m?U(^ixrB%gz_n#TjHLC#2JqUM9!AK{yfCB zVW`i)nU!O@BeQ>cF$HR^{VAFtywUwYv?N3Y%#ouy{R^XSmj;v-%t$63mHAM0cT0UF zah-M#La7;adTr@WXPql}w5!2s@wkT)N1bbF+SsA#e^4}h@|pE)_*ZRhXVx7@=1VL7!e|J{K8zx*3E zd3(1=L`qsr7gl?ddN`DFkwC?HE=pucvjV$wX#vQT;(HhP8k-4ElgOFI6(I>I8fp{*MG)YSB3va9tvq5E@=L&Mig1dSR<2M4&ZP*45E2fC0Xe7x zsxWQ0kQ(e=qiXO`Q>Gd`P2CkWATa?UUqt2Ch5S$hQV~{z=pRz ze^4E$w%SLDKuGP4bOoe)dxeUE1JDBnHtTvtx3TOQ+OknPw)TU7tFf7|1L?r?3LI0E z%rlS<+@sJ{fNpGNp0Exi1fXEyF0Kv`7CNvfmk!iQk(tJzTrkCJkPIC#P<3G)*c(vL zfhi~iIv~Iy9oSDGTB}J$I!?fBIQmV5zIP@h8w zwvgA;Z+T?gpKX66a&K#2ep+`E&j1B8VRbmY>34kA=N=#1ahyl3fRmJV$ll z{TnlN;K(bPMt6wB1ax2ok~}*=D#AJtAp|yNF=uWa`1}guaonM9)d4LnyAHf48JdO5 zsl~!&Tp}S7@3#XTIdtF-$=*u#s}2w_yAI55%&7yvCKju`O9Ub=4VFa*DimryH~<|` z;7lDTQK(mdtFf7|1L;72trUERqGX~Kv>9fS1uhW z!k`__cZM%?nP7?+A{jbhplZT8FbPo5frTgpIv~Iy9XLuLS{IRwbew?Mbf82_Yf~r! z=hA_z2$_x(k|5c2;DBGN4s_4S)PYAYb##El1a#mMBzZbOD#AK&5+RaO`T#mmCU_hj zD3L={Qd(Me9f(MVX5liv534UE`2uzzkC?Iy31(bEx?9P9Gb8|Db{)7rky8gMh{b9z z5rK$HgJscypDI)bm;)V9;7lDDq)>YSS7S3_2hxGJFn~kB%M~T_45R~(DfCJ}H#Rd* zSO?|_K*60wt_~0uI?$a<2dbsWOdY5aOz|m5h7K60@~{s4^CHoKE))VC5a5swEE9;< z)g%KSe(=bq1NQ)mR4NpKbLqfLLSmb%0m-fdN1dTM@ODF{4s1t)#14>{fDUZB5V+m( z0I3M;z`IickyJW!+krHpTsu&$I-sRx*MYkwL$h!>wOF`}@qkFY-wu@L(1D93dn?&* z#sfKZ;J34l4rC0A_d5$jTkW+X4=HJ{EGkfUnLyd#4OBpZGgV-+LR|q|jm?A|NCgg5 z;2uTEJOin~77E=9=*DK|39G>87m&h&JGOCEfUpp*-drlMNQ%r80ZY%%%dPwX{MWw}+2Dc;r%nsT5O8G8vHUD)883 zRe?8WWval2a~%~RF##3Wk8@0}3XqDh3j7(Fp#nl@ZWX8xs6dOVfR>hB1tzNs2$xff zh07QYWT-%W4iy-pDnRy|;XqCmxMpTfGq96%L+u?R5Yh%lx&lMuJ*p{T?Y#dII-tOr zI`GY}1!_5PH8$&AnYpKh3bkV&swkOfARV|}P{->rx!c&xJaA&B7`fD`Fy7bP$mwR)FaEwZ;IV)4uWKKvSB8gpnAV*XeM;b;$ zSi_6S){E{H9KI#Yi`ClC{su?in&YL147V>__93*cy$A>SQZ&HDfRH_a?95Pto+B4b zz4jbKfr8QlanCJ`Cm4Kt0Ut9AB#wI$ud;@CF&pTcNuH~T(rLuYi*S2}Y@rLloFy>r z9kK*N>gKZdD9v6@5yQ%tl-Z|--iI^czGF*^1W>ktMw3XEJBorJws;~qVLY|CiyeVt zh$d^jD~dV@qMIy%r5ISir6891iUk!}Tq;yaGO(1mUn6Q2ONSIvR|+g;VJuMrSzIC* zYRkZtsL>*~>bnSt?z++zcay$UjrO>b#|8LSCRbii)h&}^vU6ntK`i0Q${bvop;$&K zmaJSkS+NYnm63|2Z2+!3NM9u-T){`L4txhZ2+aicVAleK5O>pJ|2DQt{g+CNPhh-! z2-ld+<~MQl=y~bcvSUg@g>Kx|CWLEjixBLby&WT_>cz|6_wbec?3l5x84eNYAa>lq zNpAoHe?q3?%)oeL5(fLl{y9K8_71}n7sGvmq2Q{)t{2IQ0cTW{Ot;^MmHRx&l!^hQ zQvf0TD{+nxdm4P)@pT5ym1BErj$_DydD!fqCaepkEd451er*S43>sxyW6DO#%d|GDw>04x2 z`$I`qpnG-J61d7x-~$<14b6{F5h~o1Z(PV#bVBhVg~^yd$HO!q4OyT^xkjb=Bdf_bzei&KNdogS^@uO z(>LiHrz|NwDHRC2&kq7Z*B}Wk&KG{SPnxM1guxgPi{soZj6!Ybs;h(NT`>Dg$)Hx> zIPN@e36S_c%KVxE=02YHu`(r$;?r|TH7-HGN-lty4EA>$J|GX2={_E`5nOABPN0Zw zg1roE0$$21C#l#QXRgIko(D@HLm9#|CESXq10g@urWS~9`d)sH5?^m zNZ%F;wTLueqRq51ATxyUv=anhn?f>!5tx#&zzioOCff{%2+Z3P5a(WqBwygy9;X6x z#WWEZCC$gIS4f&Wpdx93@z$R;SxQgFNl#c{>HuK`hO}e|Or>HFrdrJxk2E|7 zR+{saleiiVOJs1LkbKmZv%I4?2FvV3&%JP_670lj-({~jTeNUF)a#MNXhfsG!gAdV zq?5lN>H1@{B7GgvxEW$+@5ZMp91Gr$4PodGAowagzP3D@)z?;nOm?s>Gwe_#aUh|v zq=d;ZN_ZF%6F0yw++S7;WS2rRgMn;;;SBU%PZJXM$$*5{!`#Va(=KMwPKKqsL||IQ zCSuXfE0=R#p!bP>bbr{239i;L36}6K z$MkH+v@7--cYrxh&vvYQdNz%G9{vIj^51_gJSj!laA~7t4UYXY${B!+6B4)CBCWBR z@KGEwmE8i4XWgyT2t`Uet7f(kd9$0@ErnZ4X@?t|nJ+i;E}^)6Uqw!n3|Ls!JR;9d z&n-f_#c^F@Gc$sm>~x$i$WtQ~Ic>O7-j5c^o?{QeouFNBY-YZIAbl!car-F7r^L3a z8_+8f=acnk0f-KKlsW-~&*;Z9tZ_m-VX^0&GbXWqaX(GUj+YS^CX1yma-w4Zs#KxM zNg*z!#e*^eA;^&Ejc`=qenu6N83-BHjW)WoSbl1fr5_*uOT+~Y|A=Flb*=rtWzvs} zRr8Nqej{G7B}8DY2+Q;v{%?N8?EJb&I>LSU>qs?yIO)jf!~cSG7ztr0{2uAUo3)JW zM*mXD&?+oWS#{#@w@N^o3Fg~iNpJt z$}p9K&0h)O0{SX)L2UkNh>!iwruBQ9w3hmKQOx$Y8? z6&We{Wn~7@q%0ud&!vBX#3eSU8+80Wr>9m$=4F^OSwtat6uG5qB zYs2zAh{;%{M*)Cet9@Y^)ym&vc7akrEu;kDP)$52mgoyh!}9%mg+hl7VVV#CW%zb>Nbl>TYw}S&a&HRGnFs7+({fda?+0 zwFe5D~VT$&D^4X*rqTn-8;(Ye2!o zsma-N?=eUy?=8UmUgk`nekW2K;Q@j-Dxf^&7#S=)QHUr85#GpPrTDF(N%kuV>5L2r zpWAsqLXfuaqDXaQKu-J8yd9b#(slfU#vp| zX2jWfxLA>VF{7XIvR<9nJ3+DUrr33C0NQaQ`qD*)Ei(ER*Up)7+)?nG;Q_M-G%+arcI9MT&f&q@qsgpzDRH^O1CH(j z6fRb%QKT>}z)qhKE^a0uVYSN?k{JjY{vdTP+$|s?6qB>}9Qz)wJ;zPwNY9}}`M6Pr zo&{D%`pcvc73eu0`vtT2z}At9aIcX;ei?k9|LY@4y~RyJSZoH zGCXL4_}K4UEW?BP%^Dv3L~0;2ec6TweJsz4*O6xCLk-%Dikj=LK731O$_&` zBMf;nAR*3;3dszF3=bvGKLWWi&M?U`Joxeu#07^3MP({2XH|=|C|N#c?L@LtmJpE? zvI5fb4oWc#RZ&n_TKgiXf=m*a zA`9V`k)hw=dY}_H7)BN=Ha4!lHc{N17O#GMBDhV1-TB<93Ee%6QZr8K{73N!bN_6Gv|uBZCY9 z&+8?znF2mss^pFg0Oc3(72srG{QJL;4^9V9gX4o3Vt&y0V2cb?pB*YPeKaq?ga?N4 zK^?~jGjZBkU0{5mQsZmN5@-XZMlHOwS9p9-7#8em;gx(MN3UJTkKKMj9L;15XW8~6 z7s$78Z{d>r^>463Eut9UV-6k^R|s~79=!w-fv-&=nZZDI8~4Y>c_)PigAD`X3U^?9 zaLK_c+$=go(?qQ_3l?`FW2LQ}I zKKSJn@I=N3tbTZWa1#+>;u6cWo~q7AIGJMobw@8+j)G$U5q(D3qE3*E1zW$54-0aw zvfzOB7n$xZFyp_CK)2WED!Eb)(6TJCIaHcyX(2E)HWN0tfBr2$D_QLqDnc{OpL03a z^Mp>xynbUd^W;X{tcWjC#5%(-t~t~p(mixsg<@AG3rjs<96 z;0qU+$;ti3X66Z~xF;<5<)RKyZ&E&fYJX`MgLUoz5zJhvM38@; zm){iyR7$@vSn+Dn{$?#HyO-ZA8Cr$CDT~a;7k|meDKDMShgQ~ywBPaaqmsQ{+22km z$ICCkK~dq|cqx4))`-`XJ_zBGVhq>PLT%gADWOHax5 z50H_ju#DWuWPC*>J4Y^PR2j(-v?+jsjC7F>O#k4atsh);xUb?`txyCe6#6CMnO`zL z!&e|6A|ye?!{4VAaX}CN)csUauAL;3qGb6bWi_TpRZ>V&KvEX#%&E3koV0|cWey>d zLPL)6;I9;iuoOPNNQzXR=8C0-XE*QS%vVZ2H$cML&{q5eAadaVi;picnJ&;tGQ4fq zMriM#r1*UN=fSARuzKcmL~Vx)6Wmf{aGB};fQ^< zBUY4Y7f22UyFi^@^{uDemHE&$lGmvP3&Y~Qfxd>-+RgSKxu6BfVbl1mKs;!(VC;9n zgB%Wbe;W=$G1%LnPz2U?$keyL6EZE31aZ5-fPa6>2o|x{UT`99^~Qkv^WXiM##sy! z6Aidupoaq5_PiT{cLmW z1Qt$aIs|Ej%Hy{PY=QB_G(&H0QOH7tWQJS{?}fDqbS&3_9=~gutpSLcmJovR*o$>%)AlDm^E`lokaXRp#*Z~JVr0R|X|JlAQuU*;SUJoe8fgkrwrUQQ|p|yIL zqUVe}B%+)(5(l2P(>`*NOws4z#5vPUo&%pTXI>=5Cpf(g8L1D;$Ql|uOwp5_BNx=E zjARH}E1@vD1C%tobya=X5o#1fV;iICvGs(3-|qNdt|m+oG-Df?E>8Qky8|%jwEv?- zr6U~`=}=O9((w)ae4TeEH38{(lS2S?oTJIH^%>%UWVHcf8#4?*Lh}my;H-;ONPXG) zvy`y29wi;P#g-gP$r?dA>yA`-6Y>(1>8#(k4{2>zhPQW6DtylROJGo7zVZk9>rW5M z>aRb9=pXK{&&ZDSJVe9S$-Z7E?4|w{2A^$2)1?O|vJ>;1O^!ut!#>U}%!I8yD$4W2 zqCAyOgmqODOmv1(s<22YzZmO2jTv%A>|fo>KS;&Mv%5tup`!3<2DOH8JleYj+AC6|qx z4Z!Sf_Ldcnn@whi-Rw2&F`Z4niwoJ{$>SD9u@%ui{&axjr0g%a$VTq7kaad3C!lv5 zMWI3EI%o6pBOIH}_n{8DI~*9hWp zZw1g1o`9yepd&#HvsQE=5}yfT$u2f)4CLYD;Ri@r=PV(!!Zq1W2Lz8Xof2pj?nO=Ru{S$y-hNa7-4 zf?1c}EP#Gmj+SH@aC=-%X%%+XH083AXy&l|7}l_&V6PnR4DKos%|0dvt}^NmZx|=@ zFgjzAIMQI+-p1g#%&n93Rz;hRj_9-xJCdDdw=}1-!43q3_UD*B!)mk9y7o^dOJC7} zQnK_F>**}x#s`v!9ZPrsWkz4IOrYAk6pFwK73?c+BUH-QR}h|nUSE;NQ%`5D zsLy8^o7GqR+(YWh;jPRcN$em=PG6y@mp39Qqpx@mHp!^6Bb|kyMEeTK5$Y@6g?K&_ zN&JcVk}1NvJ;5zaLVU2VxWX#{{ZJ9$(N|;xEC(R`Rtgi5`UQRuCK==*>v&!;vfS*a?)(kwC= z>KNt%LWXnA0_3OTVrGVK;Oh~T_G2^wELFnHN)~_KoeARW4z%c#+sM&@JA!`}vs6*9 ztR2B3NpDqz(h=-m=IaPDdVsKZS0njnER13Y;P7mtoDVrjPF>Z;GrM2?IS0W@i(pgi zB@^C9ICIkW5rJM)7EMoZx9IbOr#(P=+rAJfXD ziHR?!Z_|@T_Z;Dtj)}NbG<3Dw4MWkDTGy9OOu=#m&-|Fmq|`B`NG^+x>4(tio(9*n z^5m%VQ z7cw~|cwh-yhyZ7x8eoTxF>g%U7w&x(ZL@W#&?Lv_<2qAu?pvX6Nmns+?4CQpXm`c+;xT0mBg&^ z#oPw$?|yfa2AN(=Nt<&BS~dP`Dax33e3%oX6%)GgP?IBG&->}YsVFO01u@FWRv*+>TqRF4UmrI5ntp#K-n~2TSL(Fbw$*wzySr@iM5h z)z0I5%sQlA217*Oqv9b@P{V!UJr}ZVL(I8Sz*;2<{JB7jZV4BCv=-g_l-rZEuuLqK z2t#hA40(khY@gyQ zwVU!(C&g~Sv%#{WhwW!0@f-?C^w?z_J^@e&~oQL>DISM%WVon4%KptedU&B zDfd0;W?Cf)<=%dv@YxPydz4^SL5Wvxhj?d+`C-W42*UQ2Og8eon&A+ikJWM|3FQvc za-(5B@2uElIjJ#@z2Oqq?nQ}eE#it)pnB~9sX|^D)#C!D2V5Cc^ zygYQW$ybo28wsCA9|RT~Awae`kjPw-p0m(-9q|8cJKj{5~I(nM_ zfV20+Ux=n{FhxN>GVJ)y;$K_I_XDGLpsj5!8guj;k#$_1i`2|9k5fHY-t@ZVboj9Z}E^k zbCO4LaXsSgTVp(9PtW3$g{hfN*(RAj5Rl4LEl-rIB}Tsmw*4SAIP$1q(BIYg(!`x^ z9ItW+OZId3mT&nzc9>%d=iI$bBL}d(8}C-jyokEhDLsGEYLSLbPAEi%nlQ8f zpczO&W%7ce01;Oq4Uxq?YafB^y8G2to_AQ{$g{qWZVSGzUmAShsw4Pb{9^F^!#6lj zN==zZe?M@1CDweEiNKkP1HPfSyEeil+za36|=f1R(Yo2p(ej zJqHJF+E!8R^}VN9b`cVOnVcDj<2T{xeU3hk&UThom~FCI7Abib^{X=ZU;0^CAii!A_Xk-s3M-R`g#E35>_l8M)D4YVV<9;*PwCDy?k|O1 zC2nZm2*^?V>C)Jke^2{mG`Tv^{=M4}(-GkUUX$o8qem1R{yL=}T?qPucQ$pcTkO;= zCG-Z6Z7l6Z0yc27RZ7sqyv`1X}D%e887@7sEh1k3dbnga(svEfcQM z1flR^lW;B*&e8-a=Oji<WnI-Q}Z44uf(84OKeNb36whFD+w5QdIoXaqxtGBk{#eHq%3p%R9E%+O8@ zv3>2W8S3NDApWf9Pd}>sR1q}JzI$&I;@JBh$dCOZB#XzWK7*&d3_k+MUo0i{+2^w4)KZ~(vWBHr#}Vilji6^}AC53dyON{`@=`x?(dSdyRBV5; zKE{{xzyNy~ewcN#JU7^sUwdCBQ@;tj2VaW$v%SRhN&A!7!ScS={@UR96i==41H(;- zU=KM>Y~RXgr^@pT|CUPbv^{+vrQ}k1rtS0bTvm?BTKj9ZdZHs58JDO=3&K9}(UU%d zU3i~;Bmlvt5^`dchD8;JYq*T@#TqW90Br4x&(rV}##7#br<~zZA*#>rM$u%#Y8AXF zalNQvDWk&||G0()m#wWMxZc*VG-H#()Uk|bObasHreV4b_CpA>%<~jh@=eij2h%Gx zT*G|znonpsRO6-m8ejYw8m6mbR|)PuyU>U48^{XkEoJv=SYY4Ru#~yl7yqJ$rTjc! zIiD#ng%%rOwoSK>*RN?<+F+$g@6fnWtjjduWxX$vXPu#q2WxA!5f;L8-^UajFMVT}9~GHm3h zh%m1yPKjTr@Pg+~U*0a|yTCkS+KYMHG|VYy`x0N?YLk~Rw*iLysnj^3t6uX-Iny*O z{4Z6Q64JOk!fcBjd~t<797p{1`Dw|}boMO=Ys$t>4X_DS&pTs!$G(SRSd%V27VSoD*M)ujGtWfxy3EFO~eTA7m-2@vywFve(Fa+P=%A{}b;P=l0=EOmqIt# zKbB{oJy^4evi}^}SgQ?t3A}W4KP7s@ft@uh4f_IMD2Pv+XS+Q6>{iVyWZZA!{-|+M z{y&r!QO2}}+bGyG4D3}3E9n>b@|~_>fjz;%PVm7Vp<$`V<-Yo!i!kZj&E(l$;mTMh z3evnxJzf&d(;6@FLn0#QEd^hruh~H^` zNxKnYdyP`FhzRdM*nUkTxG>yqWCZ!kwqwLKjQFcYa87x;4nf0py-TKupkle9dMx<(($ zYl-UTEbw6d?9ZQ4{`{0bd*TN*iLxdq5po;G7V>8>f4E1vcv3M>Hcfx(pdxA1fjysTl7^A#Ev z33`23g}uxmo2qz3#(!_{oMh6D)ilX>l!j|a(3Jw$XYY$RoBXls`Evw+uIA5B1(uqO zH^ATSCd3J{PY@=-Ba|?K+rt$8Pm}hNJp1ft<+;}W7KqvMpPT1;^IRj(KKm}fq4LsB zx0t+1jT0^}1spBfKX=jROMIH4_HqqN-gyS@VS!8A3BDUi`;CUB4wk`_@ZtH5hDDtz zKP>NjU;3|n>H8y{s=Njb0{nvWP186-wH)C~frh0c`x1O*p2q-Zzo?ay!t#{ig5T82p9#+|i{59PVme$PDL!qfgIejI|IGYR>I zsK4V~iCCsAks|)9#Yq)!H#u(hVTnszpFP8*U4^H88Gh6_OjA^neZJO5sH`;Lin5yW?ESP7Kiw6rW%aDSi|BM|53xDA}4ED+~YY0hVVydSSXA5s2S^{#>kJ<#K@1-@SPom z1CsY28kXkRQuEfcCSLd5_Vsz#Oh&$$Nl?1^y>K_?>+4Khm(2dz7!-gAumZZ!a~L7N}5eO=01q zH7uMSq2Uh3@1bF?QL`V@yk+FdeHxZZ-+{1w8-B>9n>0aKS}$>ZcAF`P86VQH)TTwl zB`j!?<`b%;8Wxx%HC#lPCasweHcrDr%oN~ZOH?U7E`hU$Xufim*}t7sT8RBf!%|K+ z!pyf?3}(a2_j?*06AX zhK36n|EPwm89rCTD;d6qgOZiiyMF`){(6I2TO2blr!yl=0nZ`>ael%2gA>v=6 z@uC(xXta(W0=z z_eqE+Y^5)~T*FegX$mViY8XcEU9EA#t{r@^g&G#vZ;GV80y9eCr45HOoF2UGVcp}O z;yg5la$HkRZjaceds9#KJN%@!w4M=0e)7lzBR{>2r~CjW%J|n922DnO<}=;MkInEj z(t-4cG*1)bS~T3x@FbIdlg8CCu3p1c#DAzszeM9|nO?2oZiaU->6IE+!?wXbqG`G|0~Z8_P;buaI7+6mOa>4P9KsyUK@*92r z_B->m_4&2I^J4@5?~gQoNArG*dG7g@#wX14JM-MqlzWMJ9&XCH-#o_|xP|6@sd?UJ z;8)?j=O;qcvpqWrvFVcy|7k%=`bsKT3kbzt4yK@BF(3JpVuN@8Cl4 zZ-4xF!v8(|i?(LxUw8BWcm6FN{J-<>#Df15|2BgC_IfI=yExC`-(+fVrS)(Xq`G2K zEcwmJ(ODlYL~}!l2mckzDU%PEBT04nO{}5CQnS;4I)sZV%O@s3j-|)vQJG`O@mzrU z06=5dKpL~|ss|3l;)ZIhsJ(&RG*y{Pz3@tH&fiFFzCo2aZwP{0Blc7Ld28+pk3^S# zn3Ou>ne^BW;1n|eERiL;-4@r9b#EE(7+ltcZv53qVz5H5Opkqr{h?Wu7DD+ZlF(Va zm>#NKn8`UcUzN`s65>aB#d{#f$J;Zf^^-2-V45?8WvIM(i6)$s%| z!9KbMTznbV~Gy(N&Ld)#n=8 z-{jZRPK;*k|C)Y$G=)83*66u#W};*IrtMFS;8rr6mHXb7MlZ^`W)5lSn7#)V*47`r z@3d{~daxnUF?~p-buNU496$4E`vs6=e}L{^{R5WY_3-AFzwQ52eqOfnk7M~$1Laq3 zcKLH3{GaL%!O5&Ybiw`rL^$;a<4ym0X_Gbq+@BisoXTrl_aK{(b3U8VM%k0MsPOIcg%^sBzE)9E(RLAu?*7Ql?+wtEn*V70G?0CVAQ zGzk|fxFc*e`lM(|AhZ-$_$63g?0{ zbtR#&O%qA{i>5-1jTtOhtlc8%SJiQ&CW8Enj!91&7G3_qVEf*$Pz3fOQZM3miLcv4 zFFFPES4WdT9is6Anl2PsnL4*R(4I8=m8lt1CXV@LdQH!>D9Sn4$lCGU8w%Kdh0*v5 zERWwg%=*)jHO8x&Az8y?)?~0acnE<4Tq8^3*MEuji>I+*|C?#Cu%PTFn2rt?u!$8< z?HoN~$9?YSlK2ZzXLRgDv*}e5{I?7)wm8)RP5XcuHDR1r0stG&Yk;Q`qJApK3NO$< znjlueI*a-`qbNro@r@8S7G`@!N!E?RdumA~@CTyt&U|}a|2vy1$6Q(;N!*F7G4=8g z;s~n7XQXMm9)%p)f8o!4_+iXle3{RmX8z2@4<~YN;L9w&q^@hU>-ct+^R_S4*!~%R zsu_hj#VJfbSzg|;e~gr6nSXRKSdv(TpFvX%p6e6WJF0 z$>-0LP$ePaQ+9(`@A4ocQ0Ra0ZvRIj)As9plGK+NyVAsV;3?y~?DpY-KZ|Ewhf>u< z+~zkC5O%BJvQx6#2WxE6z{UZ}QXf!=1Eo_(vxhY3M-tEd85e<^Na9K9GYg)*S9*0@ z+L6Wt5)sOIs^n}J5Yq|w6Fg*1E)cL*NrGfP`2{#-&8P}vD0G39n=DR-@Q{*r`p$4R;EY9@kRSfe78B!h5e7KDFRgpve=pMK7Y#G3>%QHw?l z@OvLX!=Nyk2Ia@556WgwF3oLECVrnisR-GVGeK20d$Jzg@PPK@TLJLw$qqo|+LJBu z;1XW`N^>F*7MIR+pkINu3~qY(6le&*KWjLO+G`ll$XZ~!* zpKT;MZ5QwfRQ5LzPcI}ZY98EIMO7k3tC;is}?9T{Dhc`Z%jt)3u zeDd+}+3k<+{TtW>x#N@V#R3#udAD@!tErLM$0uvFBNhnL5sP&RS(GxhD3XMZ_@Z8L zAj|mVV`8%kw!cn_?Qo0D9G@)Has|Yc+ePqDxvvYDJ3iUCf#qf$pIoR27ZQm<*p>z1 zIf_t{P?38CVfOLK-T>X)@yXG-?aA2hvnTt6?8!EuDVsg{9KGLw_GFy^c=qI(kA3#! zZ+P+9lkDS@Rnqh39-sV0(q()y=M!lP>Fh(sW+tHq7@wSKuzcV6WC%$9-^VAPN>`R; zeDVqsa*a$VRznS*qVW&FAeq@YK zW(rPse6r*ta5J=lxRM|^=YE!$k6hyu&N)U?1B_2b3cztPKC!OIw~s?nPxXuTy<;DR zm(3ll41(FQzlCJk{rI5)>E+8h{(Qinckx3L@+Mzi;|oUsFYv9?dAkn!W7qMgl~L%Z zA7uKU<>ejwETpJ)`G4%a33yf2^*5XV!8rBCp^9^CX$7?ws@9}xO$6j#z0oKRSf{iq z(rOheC<)qqTxnn>e315l|rxmGAdkYoGDn zi}-(^zTfx0&zI-PJ?HGb_S$=|z1G@m?eR`h#5gO{Hm7$HXXQ*HJC#2t@+SdwQi!7r z{x8V2$n^V+9W8-5cO;)IY9&K=God@+sq+D1gz0#qufuhyy6lA+XdF@ATO^(F6>$wo zhc{`TRguiTH1!zhWDILWExIiTAXAih0qBJCWc7-v`z14nxBdoNJ&|34V0%otQ@_!g zP@WkmO*C=pP(~hgybV~l$&D#TAoQ~!FE$6z1`y0Q=EiJ8U^(}x@nJdl3HjWNprk!G z=}bo@!b309uHx?W`ifc)cG|fLhhcySTuN5V)C^j*c}q$u6h?g+w<*}xA9XhQq$v>Fyu3|6&vS6JioL({yqJ_)gG7J_R@O!zhxXz@;amo&+;&QGwy~_Ch zxMKPqz%Ynd0jp%{@jU+{Pj&4YQC%tfM1Vkd9Rc07iUT)^?6VO6S{pNM&|b9`;zfTw ziBJrM0bkG3VR)Azu`~Mq@+OO4gEa?cYL1z1^Cc?n7UqBiqS5NCH+dsMZR3&>E`>;B ze~s9bJp^-cc}ndvOc;q$Q`l)|8MwGoJ5u0=^&LaqrRzz2eRhE8GtN9seDC#*eIxMc z{+kW;8y7#%^X;}b@bd)T2(^MCOU%bYJOv;85x;-gM0?GJJMv#^uYFp-dV38))&5Uy zuletI?R5aVn?B0aaK5d14L`f$jqSBPL$);^Wq5v-_F8huf3dwTyJ0iit1^(U6H&GQ zQ`>6*E0tBgu3~po4OQVz<=YXj;^$|0V|)FCA!nG6lkha{<(vYO28YArC>)s+_Eq6D zYA~BtTl9#ZlO((ZL%`p?zN}>0l~9*MFZKE{fIH)1Oh=~(sV-sYRnpKKU&zqgC88}o zL+?l^%X^VW$+QtsBagFfV&t`)28&uODl=c>+f(npAobfU`)@0(&Kv4(*W0tPrKkmX z9kMKJ0saE2I${ByTG#?S9B@2rOD#Zee#zK-2feNR*~4f&ItkSGw8ZacW8Tf5+a;vO zy_p|#&4+_$Y<d*0kqQQ~qo1`7g{T{qJed+JFDQ*q-OSsqOjR>)M`g$TR1@ zjHkPbKhH@>kNX5a9y1?H@Qk%5*9)Yqjb_Oeua%_PGV47#c+D-Q!BheEj6D3akvh(yhAm#L~_P~)euv`KC+ zg=d_yhG5coELQc%onO2F(w+Vac*f51J%Z>pL1a&6J)N>AWNH>uKBB2ra}00 zS&M~Q<<3>G>){$?zQDM|4jjBieU=Ukz*e^YEF`p^V31$K6*6ioZ51v=cpM>--JXSb z$SXw3#`3ph^|OhlBN2o`T%m;+E`@+o;T(-Z04DP(3h{3i6kH%T4w&03z-hvaVHw$* zg>}ky{ux-znbBQLSU)B@`?T(&<2OGhp7{7pqUkIx>NA-6DVcUgv}@lFI5B%eeU`{B z$A_D2=c4*cRs(zB_hJt$1>>w3;Y9F!BK4w`r4 z2^c?8`89H%$Q~S|hSwEMeQ_-HFq5jUl6Ba8yGPc(?!}CXjeC76!`%hOy%O1fF)C{L zU_8!;kGYD8tgH_r{II^=QA}h1#OW;pH#V_I7Fs}@GjDB?=^8w?s%z$!ue1F8I?1Mn^tDsp2q6%*-DX>F@2bzQWqS36mY=n&`hgv{FKvIdfB|##~6+#B} zusqbalcB9*sL?zHQ1*R*I}Tc;B$6;%AorWEz<|)q{h|n#zC}pqj~}81+<}*2|F8U< zG=&0V%~&P(RRnvCu?1q?QU8Rcu^)eSdQp9Xui?3@7oy}I^fLM!GoH*tqTLTyBLL^^ z5YyXyhf&&tr@QM*`1v}15KG13KCY0Ymb*kCpK(9_muB!sd~-GI!sh(duoPb!5-B^& z@%ApPsQDg>3A9rDUl>&P0u?Rc+s*SwCj10ET?c_~Gk-Gtxe7m^fO{!FE;b)O<;OwK zy02=$zB=vU%(AevL#h>m#(2kh_D|*Cgn6}0NuLwxB--Q^h_5(<>~WjKTKkt z>7TLd_Pgq5egv&3X&jddZ@@3gxaz)q;N^{#KMIu83;+N{at@6u+y4T4Hz6}P8> zZ@4ueml?v&VBNZPxOUoq6Z2HU#5$F?9sOA8J|t{+hGA)JUG-aurq7XAopTgUz3q%x zT|1+2b=HR|`(&8T??S(KFmv?tdoseolJ%7R{gkyTZLMUw)W{9K2mKLLYn}C43Wh>O z)RuXIN>`R{52ULH60;Xd=3L3HSbP-h0hzu$Wj&G_`B`1{XNlQd=K>qLGCISM$UX+x zq&2IORKm((;U>LkRGF$Iu%&JzsB;6^Ns6HZ0Di8`g6VIJPFpKHmULz{AV7>mpHWME zB*?UdKyi*%ZSi^+S*1bkFc_5#wD^_Ng|N%g`{7tUJjg{bGRVFmBSZSxl(VFhGOd1uPry@vHEA8+X$q5avb}@~$QH2C zXn=x)Q2$;7om@Ns%Ha>!jDj~fmxk%Yq_uDX%9pkbMdk`ct_4NVy$C661yK563uqEV z+$A}39keG>3Ah722LkdvPN2(>jqn1Lz<1+RqH!sk4-6unv%mKtQvp0k5)DcanjPH7O5Eu$uK6X-j= zPBQ%&NtkoDCPmTZ43_6JZUz3(ppBJ`o!$`#Fw>C8R0qi&_ks<1$R_v%GZ* z_z9y1&16Hn?2MV{4Xj(~Zm9~sA6$s7HiaPUygwm z?4vUN6%k^cXKgO)X=A*4>KL!0kD*>na=>9l%iX=oeFC(?)!fh#`=ue`1krmw^>&Hm%fD^Bu)6!hZIto)a z$QzDBp|VW@Sl1T7aCI+x+p+P=(|P1#Z_buux(FKrx*{&5k+$Jrjn~d8>xYT#dIY4LqtG{=0#Zfmu!3>c$ry8bEp2tVpHOCFcU;{~ z5c`LH7ee;P_*uZXo%rG5q!XX!Qwp}M0f6p#PXU9SmEMU*BV)tusQ;jZkevMi>&}sc zId?B6^jCHJyKsen4Y5LdaQkwSInICW29DSNj-Ng8Mq6|z^L!eC6!M|?=5r~&S;F0uhSH<8J}$ZlKbF7+ueVUtJ#OAzya_5nU%G$hp^;=MND`IlL6#d>+6I z@SKEiBafzYOgjrJpv3%x2xh%U!vsC!7|bN6t%d!NeeD?=I8N%6J$V;Rg`wQHysREd zr_T99Cmy2jowB=t3$IXV{nU#|b-{CD?TVk$$c~Y_mvfE2pVH|2DUH6L(kOlkdQJDw zIhOsAh?*TNWf_C5nYdvi2iqA%v$Ew;ZH%TUVTiRNk?q0=8`N+mPDg+f<@=5CJK=63 zm_3eO1$wXS04g8vm@X1bm)6wr5<0DAmm2f5<{DF(jp)+PgiD0%8iK4qQYoKt6F5BG zmdHL0WN0oNju^;_3lg9h%4xj}d1^OiODmrs?fZ>t*s+Fc!2W0ckME&m9 z#6HJnibVDpgmNo$TXaipB0GrXa3-La8jC7mr9_*OG`fN74Q5}Oqp`E8uKh89N!N~y z3ij>)Af8Cy9-Ub2l=z@;?|cL{Fy;mtIj)E%uOe;Gx}<~Bejw#E4X0&S+a-5d4ZwCA zK`3l3Q;5>Y05f-)sONae2!kQs&f5^g9#;vl5nzSh0<1vBtFwQOU5jIfqjS%$CXCNq zu<$Mz4ioP`;D_sIkigFGO{!aKIL0so8K)PQO#>ZTYy^^&qOC*>7izx67-G>NCNYKM z7-1em_;%h+02PivZNx$MJrS;5M&plS<9D+0dkZ!GmV}bVAGN8*e*oE-#xHExJuXEK z8+Osge^TOu#(!ivR9Lm>p+t5EiIibIdZx*oDh!l2v7HDZ>^sq4*6N6fR|CK5{Y29s za7bSxpt;{+A)AY4-$r*M^>k#JBIwVotT7`%Uawt!ik&FMm`nZ>qD6RX3?fZikHNK5 z;KN9pOJgHM4aTAfIQlvFNcOb$ zTum05M0rMd4k`(?fTj{H;nzTX4tp;3qsV-u20vwn)|g@B>o)3;5K|j9Ci=_hD^Sio z9K|}$Mn z*1y9#b}we`X~_cT{|VYv?&m8IcnV_3A8YurFEfWj`x-uIQU8m9UpL9N#nUYph@4wu zo&!kE?!Dijna`BkZtly>bQJ+`t~MFK)=2W2yG+A{SeGE2Ww=kngjvoaERN9HXCaT2 zdaUm>JX-{#PAPGIJDlD>n;yRJ`}BQpB>!-x6<$Nk`PCCj)cKVe@~rgo`pV87BafIj z&vNV$-Ta4{PX{HgzY)#P4yxvF5TcTYxbBZ5KF-014-77GlZF(PTK$K1Lp0(_KSA76 zP!ma~qlTm;y{R5jxuYro!uheGwOvC8rK_cqiR?F^+X;XO^?C3#V_PH*jF*u1P_l*=}=K2rKsx?*3xI=CjZTO2IM>C>Wy9O9*70q>^`;gQU=S z^N`LMi0pC$iLvfVCgoUzm7R}V2%4KEW3R}jOVnN|7~LB<_hoOC^d}PT--JFYk=<6* z{g@V@<2V`wy)l_xpJ;l6O3m5;hL|V83)@(wsSo#nMdclw%CckM`%w4R5Ab3Oq<`hv zo|4~;Mr=;}Vkv~uSvH@1W(}-MCia(wRk4F)K4FeJIZ$ieB*0o{L=qcri zY&FB3G2H>BJB89$2wCEk-byNvN9hCK&!_ZzL}1V81f_qyL@0fKKxygRi!JWwq`gn_ z>m^l~ z|0Y%B^tqV`#8QrUhxS)sN!d3MX!Lsa3-2z=R|0*Xq8D<143Z_C!;i+Sz58`xZP;mpRDAJj7k z-^a61|4@5!(#bCPoc^CbIW><`Wo4%z6pscz?6QNg9UT_;e0-@lI1I`cttL>;yh=%P zYa4-`Ju3GY1mKDA&i0tQkys|8IUh;p^80?ERg#G-mn+eF~L6TtEAOHg|HmXrvq$Y|@p&JnzT z{RF6)1WqLBYOs6mTc|`qPyyxQW-z*!orEOP$fPDpSt}*o)pCjHLW(srt4umed;dd$jxGl~W*2hT5|MuEdO~!` zvK4f+f9)i^y+jDc*-#W$BfUj%iR^Yr&NXDLIdP<|IZPGcXp=lw$!uzY^sfj>=1Lpd zS5(3m(vmzR>1Msth;G8T!2y zeh>ZqDg8?>9~hiWJh%d8IQ#bN2CVYAuF?VA0P9Aaf~AEV`r=kXX1w$XcBKXYv|bwOm1G1GKhz>hMyzu@-mg zrZVqiIO0So^13XFzkV^7qSgUH;@EzgfT&H<)*9MUw^S*@wGEuv4WL`vk_YvSG0W1` z%M(pE>_9w^rL3plJrg!u*{mPRl31KC#O0vlK*p%Rz7O~mN8K=AgtemMAc)b`^_PF0 z0Mq1vp=1Li@+1g z${`JJi4T)viel2hU{X}a)^CvFdP(^HjCh+?yp{zXnF$d&;oxWMRujo@#MfiQNWW|I>;RpoNag9o^3M|ZKLZYqB488#7;%CTUj9u z7<=s>dpIk?&p5JYwj(h=tpi_9KE@d`D#Ec3A(9+BoqPoTlkVE@s_!(hQ+%$ye|RGW z(P`l$UHwj?i4IxXY2*xWe09?-N&cKC43HFWkxk^YHK%d?oD#jd%o% zj3qlhBi}$kt1o3MESuMx6UN*$GlJ*9@6_hLd;q)$Z4ogzZS@diE$~M z`aE()9VB4()U83V{DT<^>guir-(beZQOEye~^Xcn=6N$al!4wAMpUrf-9wGU z;7Q&?nsl9qYviX?OuMs>qiZN>+@tQN&}n)MnUp^3C3j{wO3uHOhhO%AIK$OQ^|dw9 zrTuZH+KYGA<8*4Nn>|;&o$9#_;9Lj;jU%Unvsj}$la4I{tu77f!Oa_D?DIAA_5e(= z%<^-<2_Gi!!yq-Ctne69OF^EVhb$;tQ)t*5)prMMxn%aasr$0EteiXMK9WFHxPU_4 zQpX~RuQE$lpl-?PClc9Tp*VfZQGOT+wN?F4Im@w}SXIs1M7Eu!wO&YB*!u8+Xeum0 zzD`xuFQ8KTX~>SNdX}mRiU_Jo8nhM~XDb3UNbl4$LrS}^|r!7UKgHGXzwz7TrC+^_I+dRU%*_2?VnH>g5Q{o#sUywpUa}B9BF++8EVeO#4~iudsyKtb;Gx z`(W>U6xvr?#7lh=orK{0Mjk;EHLt#bCQQ*mfe!!9e41B0zwwJiQw1`K&nS))Y)8a1 zd3Q@vj>qPmeG)CKbEQYwh?A%Oq~okcr+I3{nbSZywyzTjq!zKRXNad2oH;d&_XRw( zwDSxUH)-G4NS4L`rm&e>tJaN7gKonP#Qm12+qYJIOOudP~ZB*yR5j|U+jd6XRX>a|UfzQaiz^{oPY zmu()t4JdFN-|Gm2%<>}zxYc;KwA{nPg>dN8 zzZX}ER|&vhv>(KnG${*~*!2%2m3+og{s}37g_@LDsf@y==r4&7aV?1Ry1~ASFkjLbDE8=GzAsg#9!_V5&?5W!fKWl%0#{8@`#?Jz$iO}0xDO)N<6VD%N zUk*-5V?m;dzaN1Sx+Gd7k-b>N%=FABgFIr8#+zk=K&gjeUpg$JN6tr_8A`|!XKEzJ z()pGy?MgeBR-~QD;;9*xg-8*x@AMBM6K8A46(X zT;{FuMWSXxR_7A4cS0;Xw?%jLAn9C&(<786@TJ{c`VNG|YlJx7t)Id5A;x{T)}`z^ zEIEeT|D6&RE>IS}@pv%cQ}#|q`J`0_lh#coAwA*Dsf?-`f63#sC~eBV6bHJmf&oOR z)W{8b!7;z9CH~4`Sj%~`H<%R} z6g*oIY**WSl5|Pzyb3M7>{dj(5Ycv2(A-r8KrSbd?#delydQu-whLCWU=-jn{C5pY zW~7H2fhl=5Xrf7uMzgx$j{{M*o-G)z)txdpR(96`WiW%puO}Y`O;;LUVRZr$)HzoT zr0k(_#d8y)z{9EijzWJMn(5|h4sZ!I0QXGrFA|?XJeG0r=vQb&L~6J0xBh1^#t;Wn z5OKVAE*m`BJ_acKyd21A7IIANqn$*j2Sn-H9x(0bnkuW@3@-kxV%!peN{t^C&i$XL(h1 z8yhY$7b%}jWM2e}$B=a)=)>B?Mcb#)39vIrYm5U)CY9trzD;6y-DkoUNVL!77kVp`=6m+`$D!A#u_K5bx&y0 zI?mfX05ol(G=B39)#jg1)}#AxozfQM(#SChWK_uekbNSa|DvuG_J7!y!gju_F*UN^ z!vYcBor|m~#cH2DeOr&`!iqqPd66z%o^-Bh06CJ@b@K2lnCEv~NS5P>1~{T`^jelY zZ=|Q`ZkhU%u~YZ{J4xFb^5^9{xWBs{6G8WY{d{}ZwHfpR%B6OcJA>cX0K`2WYRA2n zKjZl`n?Hy1XF7il*YczXRfr ziM<~`+!|2Mk4IRpf8&jlbc^u8=L2{W;w?u0opz(A34{orrL9%&89>e( z-sU1kneP~c$Q##Y9P5>WAEO0!gZop#w#MBXM1iKKx^TZv0##@Lb(cGY0UO(h9rwQ? zg$MaA!rY-ORV{v)_b!5OgS(wPabE_W?hyPe<8+w&BVy$$?6KZl+7x8amwSU!e^J?e z^;oEq8^nplT&4j|B-gA&_g>(u5)pWCk_wL+eQ=5?Y0t*$0@*D1N3f530n2?ieps@r zrG(G8m*GhYTqNH(KaJly81J4YVQbt80+Vx(k>@IRjKa_WaSss~oS!Oj8{B=&_g?0? zvw1!PwqxUOA#gc&qbbJ*^L&uv0(#zUp0^Pmw{$a)R!O_TeNtd^?jt5mhk3S}@cZy& zUG9)?-0x-LTO}O#$IJIKZie5cj+Ysji_G&pJl$`EGC$cw94pU_?osCXg<-GtczVe8 zW#Ts(fST@s50-}fLyB)J&kb&w`Th^M#Y^}ngY2FVvR5=-DDZ3u_AzooCo9E^jE3HJ4o8OHGTtr>0AID z#!x`umBM6R#|E#-;8o^y6_yfbU{4HQWm;DObf`!0HV8oIo{^>d1oX{iI=|2pJwRVq z=8B?Y2SX#kT#JC772`~cE76I^z3Ev5#*hw~Tg z(2BKeX?PJru@8rph?qAkrFM~ zO9@gBtNTWQ)IAO72oH3P0Ggpy^=Rq$%U zBaCih_HWRL?3bq9NM(SG`2%Cq<&#q+v_)7%&&D6gx$U) znS#|KL8qsjMLhfjoim75h1$7Wwjmv~psKlvkqWc*VSt5Nf1-YQbnz9RyEm!ewZ1@w ziS$oIr<|tY3)mR@Jz#J)#u;EZ%L=f?0OQq}1m|BNej8$7I}uM{ZzsOR$r6AEwW-h7;+)F2_=IZ8qDe^oO1QYCH&p}YZl z4nMCz!CNp`ox;!x_@h<^GR)CT^_W5Ero%k8>Tx>C!JGvKUTe8D;$NT)oa)-DZk-E( zA5Ay<5d?Dzg<~TzoTq##&ep;>XCgR%UiAy?2WF>h&lO1vbNC!2@LCC+T(6v5pU26Q zONBe;Ap(3oMZfYmc^s?cbFx5%Ir(>@_nU^V1UbRU2B*gkKF4JYSU14PodoA!Ax=J9 zU>_d78u5f>8vezV`OSN1JmL5^x)Xdnb+@EHG%q~71#Uwm5C8rKJY2pVIn{k zqZA?!=T+l$WC`VFc(~Ew%J6Us zXB#vvLB={yW67k8chZtmzZYg0z@MBPODtMc0^_nqmI`1Az4D+mGC?yR#2oftf-=Pbfm7(1GXv7>>6xso!UecwY$ zuYth6<=}GIakN%-^)>mG#Phq571F}*z{Dgh8^Rytd5{&-5#8FwJUT3CiQS7Z4kiXY z3R}C`gy4ua8PG)LC6pbodzVhg`FPI~yd6>~Y>{hvlrw`dTjLYq6`b5&k8#!Lmblj? zs%97_v|c+D6I$6jWmZ_M{P_IY(SiIWt&UFNlaPhIn5di#O@15zCj?Ml@XLBTAky|7 zgs7|TO*Hj^Y3j0_Q|nU>7p2P-r^h(nCEYj)Z3+s8lKl*;fpB2go>*13t9#{j98MaG zIiu0~AytMeCYkjOkfkW3OaG_-8@G{M6BUVJ!!d8eLx^^{I9K~s} zN}H9O0BK^)*hs#znWi-+F~Vhvnt~$Ai=p&rJ3z*`9=3W#qUoN5ll!m+;lbw6{nCxOxvi~R3Vlc5}-)b zfyA>+Kzt%6bydDGt00T(a3Cl(KQfrd4BSaJyN*ZW z(?vgqqJr8Gbk>+oG~G&IPZ}-aH_1`dXX*o=4FKW7z<(15_(Bl=D>=YNngj(M;C-bT z%iQmw&2@f*hf-o1g;YyfQ9l@^?nSAmn^L3Ba(l9`J>CRUdZe`{v?a3Nm1VoCEUgK> z5y9IFaUiyvw8zYkPaD2AO!&gx48HjI`@$E;VKULUvN}XVhO73xtV~tc($?cy!_iqp zqx+)%Ojo~}XnGOiikbYWHA&}kuW94^me7oULUB-@$I-Fkn(_pWsNULLI>(!@$kax= zt0;qfhf8xq4B~YSU08$%)F&(IGb;*p4S(2P2oTfr(5)T;Z0QNYHQYchv8Bz6cUMmp zDbr7&!cnFb+?y6ju{&@C{-zXQNYsEDT^NZ&?k)3Y=N_@_4C3ARMbLBCz z`MKUED`~_9md%QCJg!KLsu;uV8sdr26`z5-l^=sM@ZdaRDaC}vdE%$KA`)uRYi=;}tr zi_t=t8p{G~M8gvIr~Z#%$*b!}C7RwHj5kI*cB9yu6z5jHdgAQZZ6-oPf`ybevD?!} z$&tE{EW&xIs#i@NF6)e1$h~CG{ zd&XFDJ?;2S(f3QZq>Qnmuv)SEzREBPiL<7t@Ei8s&8$;BawXyT`;x?bAD)a zXUe&rJ#R{9l4!@_Rp(TQIdA#wdg9#0i~|r#W{d zOm574#`o1@5rjF9SN)=A-}C&U(d+v@1>=BOnh1(brhUIee*X7j^t*b0)dVbDo756~ z+w`qM-|wJd!{5J&!JjDcFqgK0r&jJ}uA-+_(odVI@4=5GOZaV`ImKn~8h77%#QEnv zdp>p3l65p$bhi7vCgN&j`8Rg+gt(dPdhjsul`m7ju zegDEzF37-){Y3_NxO;wwHkVzk)gS7|yk0BLLrqu7+6*P%hz!Z6?eEd0X&a11a8zn* zi8OSr65~l0-6fr48cEodq=$GrR+O;g!GQr`X}=}1`yot>EF2^s_%;!WZcS4FXSo}D z17I{Y3z73>bl>sw+@qqOV+UV=mZvK%zq_B%@)re-gLA~rTq(!7E8i?2-^BWWrZ>Tz zB-0zSNW+@YeQ#5-t7cM*I87`)9jr~v8mj$w;fx+{yaF>JC_QWVYGf?w1<)sD7rNCc zkzGjIS+acyt4?rd3G8ZG6Q0myuMHAo$S2y%osT$4HLP#N8Z?&56Pn`7@7L4&RsM8^ z+{f{Xf5Tq*GOArgq;R%I{4T0Q_IRHHygZOk@E?*1XGFy28p-`6h)(`t^yiY8>i2_HaTq;xTas)c0t=)3 zFiAWPiMi-M$`(&Z(bw=;_uHW}Hm}@UqTFR}+w>l62>rk-FE1vk^)1ZOAkRGV_Ii1O zyh|x{DC46D;}K?s%6NwdKdf&9dL_!?SL28Emwh=uG)Mso3*L!Rfbf-SiELE>pGVZ+ zLs1)knDL=1kR7hZA@4QN83=cW^5b3faQyh>Dtw%1z~1A>_NXM6m-X;t20j3W25v!{ zVkI9X-hC8NXl4nrC8%%Wy9hshCKlrCj2|kk_zXX8e&tlT+pI%Da=B9aE5ECmcqhvPokxH0&0AjO;8 zT!Os<<{%S*gGk2c2+sg)vSPO~tfIYUnSCjw7)hn_z&EpH#TSb_8??2ZNFBEAR{i){<2t8l)ml?u_ekEpocb5o8Ks^ zaBZaH==&74ArDTQ2~NAOZ{x;?Krq^aJNK2~D`>>uqswr6An@)xkfm5JnZ2F{ye+d4 zeT&}CCy=O|_z=IxY8>tg5$LtZ{K;{^oU*sUjS!bZjYRkFAHZDIHWmVz%>vr$$3qKh zfQko%g+{bz|782nEQXKJ_?RsX&F8G7{ou*l+eYLzo`B;C1hja=&qjzfd;kev84CP!I0b2zHMmW7gsa9s^aP`|&gc^y3Es9IkcW z(06*R?l1+U&FpTY?*h}QFcTU65gH0<1>V*78ixNigtMECX(PbB20s*|tN3xbg0L>` z&+)FyY6p`=bXl$Vle)68@6c6%nxh#=#)oQZsl`PaE`>W=-{%qQDf-SPbhlP`Dd-U) zm;>?delS&PL1VyuP2VX5?hAN#pVa_K`550Zj%C|o_b&XfIJdI3uq<{i1M5v0oB5I9&#@Qbr&^0C z)h7SAllgNXqIeYUZYI$L2JT7tUHCJWpGV+_xor3aKE8(!X0&kw!dCU80T z6ok1yxez~R;D;HFQz$9S7wJeEHxiFwoKsk{$yQ8qM^YL|lRLI8~w;WH@Qi|}o zHiR_xf++y%xIpRlI@^r=Ln6Mwk7xPwl!T!E{8%O*Yuvx`fUOY*e+vWRh_h$3_ z6`rocpJo%%$d4K3;}SeE<=MCV96btb-?`^0qKoDEXy4z=^H760353CTRA_yAHwM*s-k6+3$?M2H|fI$?QZ zdTr&dnD4ZftVEz5Hg^17?EV$XIe5EU2V)W)%lx=V{op%TT4!Df$Cz=#fUHv2rRh*X z^oh7G9j`<1`c>AX8(6MMH?O+~uT)pFd% z8oW*qUS|fcjlpYE@Y)=_UK_l&1h2miUgrg`zYSj7g4a8P*ZINg{lV*k;Po%T>*C<` z;o!A1czrZ@T^YPS6})x_ug?dstAp2;A#(sNnS=yqfJ5RYB-sL1;}7 zIyMMR2BGO7v_1$uF$kR)gq|LRHUy#P1fi3I&mTXb*o5Q5PGMCiqy@Q zP}xATfT6l0<}U&4;sEyH0QRXMv^xlWBM5ba(D#DZA+!DBl;ahdQS!HuSE0f1Aaq3V zIuWlts~51Rn^$k-+FH06WW}`3ZB*3@MWHLHacJBWjuG39Lgv`8*-eDEGE>%5^BWDn zcsk)@6LL2Q;w*Ck{B(t9Q>LA^)o8f*_{OOUPY7g92zD_1a&~--10u*ykcB7khapkg zUdTkF*JHINIl}$%Wb~lUQddw!2hLyro#2s7oW=cw$TbpBGRR4dpgQ)gEi#D=k@kQsEugXCC+?wQewnE@V|0W;>0H>B@S47>47|0 zw-d=8N3e?|hyMXdCM8blM#R1=*Ckf;4F&L;b%}|8pOiTH2|z6e)VA}_B-D48Co*zv ztkzd>C9AI?r4q9*f|r_h+Sx4e4ca@P)R>=S6y_(VZQ1uIYS@Bd;rL)iGJ`JJxpG9E zeQsWBRx{2XhokeatV&i7@P41@aOx0H7H7FtaXmlJntk|eHbVXqpm%2rqH@IHG{ypm zraFQMV-t$#5+GVyk0R6lnuHHbG2!S9SP>j|)upAF5rglH#ejPIrr~32UD~;$`x9^w zR$pN>V7@Tlf(0O$VZK~r33N9bJi3%Ek^Rwc(bSH;pr8DDUoVkZPo%B;Jah{0%>&_t zYQozcjOKV0(jI(88)xYqvatK~c|G9cj@872Tg=GSF>!hM79z+9{#=HDibhugZ~*8X zJt**3EOJfNI-LIZ91ifg8D+02M@B1Q$x=SB>ViWtX5Fkiwh@~uc#qn8cgu5n5E~s; zyh5^-a;K};VL>aw#N^TnX>Ys>Pad77u4O<3@*XEyitq zD306mlb(&-rhms3Bur+M44k~hv^azjODhE#CRKEQS810(Z0VjV$)@q#16kzR)XC!& zAN&n!ED$(_G}8jE#u`PmQ4O$q8UywB*C;j)ViOx)H>(Xzr;;q24_-hbR)<83$$J&D z@<5#h_XBpU-KI(U!B5S8Dp|OP2}&H#nCTqs4HmDR=O-VVc(9G>QR0#hG%wRYb!We! zZE|=byQL6&uCNE1+aI!f6Xk$#&!Y~_NtMa6I8P)pc=uP9*2%Of6oCoFJFKkiVjAk< zr5Zzj$tb6dRme#i0#me_elVz{u%rVc#;TP%^i_a=nt@EgKd*rT0oFdK3+!r2l@%VtFiOdSc^ zUk$JvMnM6b$qkX4)eh}z2Z8E$<$T{<)%T?d2Ht8VQa1 z#UOW_`(|E;=>%9Y%_CA5#`-ub;AFblHSBi>k%LdD!*E9V&>s(lt4pOuAxJI%d{Br$ zCGI}2!dNpCt6+PgNMy6b$2H;2f_VBiUVTj$TWtE^X9Pcc`ZlS=gP*&9lvd0(vKUz4 z!AC@&L2>qH(I6dEzFQ@lo&kr&4TaCp#~W&5LqlG@-#|1^Vcbtd80Y8?!Y~EOeceuw z0+4`tXWY`Xb4vqBW7{N6+Bxy4L1}9>MwxKTJBwu2W2BrNN2>}PWR{mjR{86WW7uCcbtX3mcFT}S)+t{Y&79&1nkT`|h|_7AiiU7qm@yticv`8G0(I)R zZvAa{hqHQsfs=BhKp=MrKJh+68>&dh^Cnuy2w(ba>2Xra-vAlkb{~TX;0eQ~Ui-Tw zkv=G)uVJM+t&hL7+fY40to@6tZe*gV+BS0R0pw~ueY7oeZA98P96QRM`q zb_!Y1^x?WMx?UiA6_N=Ic89Z2}zX`7R?cmQ^zx z@o-~@^KjJhQYctZITi*`(b=vaXmmv~2Lq}={9cUD)nMT!3z1*v zA&95uLe@edjJ<83HS%0-mobCTObuuz+!u*43_}JW z9`n%>XD7$$dyPU?DkKva$f`KxWJ2aLgd~Wz#7Cz}OK4$3Eis254bZat9`GI;se1(5n9hl(Z}TUU%KKY=evdVhfJyhlUU1D7>`nJ=X| z;%ntZbtSqpK0#|4X7bpo71(^-2?tAeaLT?-obvTg8+&1Tx2UPoeVf?N+1Ug$WGRAC zGLkE?om|i0YtTO;N=+G?&ZQc z>=jfjwmAyJ&*yk4f>H%%KpoWmE3SeZ4qH<-)ibEq-;JkVf0N-kv zqzf)lwju&Hoz9Dr5kM;?J9zjiUA`!sDNvq}X&zFOVnSBU1Ww1qFBP(3;yh&p5H6?+ znI{58gsjh^K(x;ziC_-np|T()@|);MK!G4_3PoT-fu_xdrat67g73cikbp6jB9*-o zrXWDq5EHNpwtT*T{e+ogwFtAaI;1h<0ybVj$iVc_SWL2pFJuQY-WM{VIpV*XkbTx% zP{`guFjz4zV5<;p1dQZ@fPu;j&?KISK)~8mz{r)1K-@ge_92=L#DPl_dc_}r>7V`}U z#>Pl4nZr7ZN(0bJ0m_%fQwfDpV#mYez-?8rzfI-Pja)xC?v<=2m@k)&L@B1&MhMox!L9D0?Bxg&0lSU z_paGrV~8OQQdB}xks_~lmrVy=81{(9V1t2jjoA8JbOxZ%WaY>mV-5lw+YmQiAlfTf zKJ3)MgT*nb&BwF*rJ9>U5tvXRS@a=4ordsSW|#qykwNENge``W;@{P29BD9n?9Z6V z+wKyy3U;JKM#9El#<;bh8jBNg+73b#yoqMg58ec#)1|QXLQ4^MC4?pUqd7f7o96WU z_!7Z0SE%Y_`l()QTXR!KOA`e4@hTF)ZWTJ)3x&>BtE7XDH`vh}6<84-FqAh^iPIt^ zc{>{*Wj&AWzXVK;{K!k`A0GUgd)hHQsM~V1K)Mt=9YQMMd})M^QNj_nv{-z)3BxsY z+EkUGO=3~20%wOAh6|K1ARE|JPRD5AYi=bx$3E#E6go`!2nF6-(K3xd7dcUQCxspX z=;l_YA#k`ZpH5}oWtYA%1S7H7=Wb;P3ktSHB}55?!v!)^B+Gn3SSXk>lMxL;Fi@-G zf^Z9<=+Pk)2!a5sdFnH6Rv_-HVli?V0;(WH_m7^Zxz#8Xfr|;kG(zSwgd_-$x_N^~ z-9*G-{CO${i;kukLLS<+g=Q=60_F};`3%HtF3A-r~cwB)~ijrwIB?4^< zT@UEyR;G!IK$8HJU4mQEjR+7HoW7u#2y{x8`66(HV9G2;G(^BaEr^T2w*UnZn21au z0sz=H}k1+;+(C~&?AT&Gaa0#|b@ zVK*fLlN9)3MaeXq5`n`7I@1sHuDO+IU@+91p2$<|#`brx#xK)}$$J?M33U}_zBG~j z_~&fSLZf4L&9FrFHAd^aTiTvfo*vno%nV$Xn6uj%BvqypGPMk*P6<(ay{?m)u7#o< z;C_$f#&TE|&}r8gntjrD>M8*Di?dwn)0KpgZp*hY{C?zIDTsii?FMcw6Kf^h`5hYu z;o`=!3!fPR*_>QaSN%~U`-9Usqq=@-71HJnrR_hN^;Gn{g$+ki$xLlIij*F?-ko_Q zYmuL)GgHz@Wek~|hUrP`sB%DiW|Cit)#nQ|b5C27Dnjj%$gV^j0_`<7N(0 zF+$$5n{aoH zBfwmC&Q%F4FpPe=+Z@5P>=}$#TJZAPm8Iz(OoCjF?C7J&AXT(xKa(h(b;AWcSiV6Z zC;#+@qX@_?qV~d3q^=pf#fn(plpP_s7A7ScCpVi}^)SC3@KY&C1-+#hjrWqMP)P)+ zKuJC$-)Tuk6e-Co!kP zpgnXHE10iIWTz61?MOi^tw2~4eM_SycS|Y|H(nBe(BVlbi$7%e5GR(v$DhG!LIy%$ zSd3z}v z$*F^oCY?KB@tY)t(mpf+_@A>;QmrJk*kUHeBr{uri-WQ&(E{%Ho?yVJlPxpwQd|HXI=Z4CDK`LJm_% zCNPkVao+h{d54fb@9ZzUgYgCa>Cnf;rz-EPIIM>RYnJ+j2)2aGYCBR841qA;Fz?)} z(ULocKoA}0oqbSZ!#gZX9`EeH8rhBz)@kh*-ZA-L1eUxLJJomTPf%i8v?bCB;jN6M z^MP^Vb8E#q7S&q~5U`$&sDLM$H84SpYs9m6yoO={K*H`Ys!BjD{qnyM!w(4n) zV)8Ily)@as%W&@>V;u9fEvla7{c5~4bwy1RPDb4d!RRj|2m2bNr+him7aIy9U4dYn ziFyQ9kC_`&r|Z$ETvv+5RPG5i<}FA7?iW78U>+MY82=-{Z{G%<*YY%)`GMSw89XyIC%$?2nJ4j|6YfYA)2=wzxg)e* zU=wnZ}$=%AZJaN5# zH6V;$XIb*}`UJ%wgvDxUYGb=q^C@f->?8@=h159dniR6yB^~v;43klq>qW17ZgZg5 zpG1YB*R?4+2xZc^;}?i6pfnF@Lx(A}a`e?nXc#Y~gxCTj5a1qKBep;Z&QHkWnY_+< zloUa{UGPkW&@|L?WsG(*E{VC{9p#ddET@Zb)<4*mm<3=oj_<|8a6+Ckyj39w6p{%H zWJ{c9HV`tGA*9bU2h}4ibX-5CL6#k_JhSMa9`cO%cwwHIehQM@yB<2#o9II z?sLZ~i~nPE4_SPe)HOtr4j|DnUKS|A-rd9xLzVf$5=L~Ky>BN3%qym}V{{zwhe)O< z9wDmJxXUq5-7<)Cx!NM&2{#l5|G&kFRzdLj+En6N( z(xFh5WCM)q;8B9VkYxGHomI#Y3dsZpvOdn-TN5&uAtXV3=Egt}|J0Xz_ZVgF^#}Bj zxwT*+=Dw0hY)1qflhVu;9ONHAcb!H{-j-Zm6=(1ULSS-~DIxw5dIk)BfZ&-c#6?FY z3~p;~g(orAp2>3Q@?)GmLwaI5Ti7`=o&6yT9Q6F&xi{aJ{My+ZW;z>yg(oqt-MDLIyz4Afe?I zI)N;SBqthhc+9>IGkrfUsdzjg%O957fx~xCR)Z+0R3GA?M*gr%MYr z18v|u}q=Q0E~%AqEn-CTv7r;towAUz5770x#a2~Wg;6jmcQk5@JF;J!Un zBU-SK8hHo9jZh;}3`+_YQ6ryewB(JAa>y#KM*dC+I2|mnosH5nP$Rb}9w9D>PvN&) zNd~qhbJPY1Jb*JLLEM25rQrdK0})dr-hkqG(k5^q0HqlWxI^z02SVu{BE*(Vp;ZhV zh&Gl`9Ebt1ll$p`dL5YZ4%t0WVYICl#~X9L)S{>paV4X%nT2Haod}PFemftdP`xY* z_Tl1TIAXvQz=!<0Lb?jc1O{?q9P%X^%kW4Hh&Y~ij73<~i8x`La?iyhddNLmv=GtW z6Wh^|f>=_t2=`q6BVfc{8Rl+fSe`id9771WA1tw*jZ!q=o?R4=kQc6maF5BYu$A?) z_yX-AR|4**q>ETtEy6wduEfKVpYKYLd!%z1Nb<6<$3aw{dx`EObYB?uiU~|;U~yd4 zN)9ZJ!$gY?Eb@BpaZSq;*<+D`I1;j^p970>W%nUMjqt!?fYl#aEROX9)VcuHRS1Ta zwO8OtOzdYxvWLRhK&vVngh(ea&&`Urvfq|@ybNlgxgj)hgg7}&;;!T`>r1E<9 zTRo(T7A!=SVan@L40yyWU)bOpsnL?RC8de;`e*chuwIkpwWT`?v`4_}j}r=abWoMR zqflPg+zRvhwUVG+95*vgFvmQHVsmhHqN-6`4*OUImGe=T+Q7Oj>oa4XU}cp%epQypfBOj;yi!E2t3c z&WJF^$xP^!ZM@H)(Pv`NsM*jc;tlLB&>fQj0>7bIMqOl~M+9mux@VAMIxsZ161KSA z{SZN&j-M(*ojSmh+#-u?=P2T&AZD84i0c*c&lIuF8Wcyoha#>K#7t8h@mn-UoR0Gq zu}m0Xg-Njzw-egVRtaLJi4fmXTvHfsXIg}u(ZgbXLI?~u5(3_$ssj=>U?*C5jd69b zr{>#$aHs>0tw%Eca11MvnGUp22Qp#~9*HAau27B4JSV_L$UWZh6~_(=*{qOEU?5rX zNY1aII|DL85~Q#?IPp+b2RH25Lv^493#o(Wi3DS@M2-`am}^)a+@sNww_ao!R|hi) z0l$JNv0pht&p;id6ps)W-T{T!Zq?ihdk0ODpk2sqrkgRnAi^K<4r1!S^A4&dd51Pf zM?IjrbV|Z?ZxZi7=^i4)XSGrfX(4;z2-X8zE?6s(`uX$@UfxH%gGS{YvUrqtlJRE0 znMiG!H308$j6KqV7ctu0=D6P-t}tyjY0&x{akxTtvn*I$f`{Pi-(ULoM z(j&7t_pGf00{9gyF@|3v6b-m%8KKNf2D}5|9&-<5VT)dN@g|JF=n%ZgXuOc!DX3%%oD9YA13(v6_N=IulLKaP$=h_goLAEKzuzJ@eQu|zH<6ccj+OgYr#Sk zxr9jk$+TYP8rG9Zjh4LO>>@hO>AMmFUIkMY*OM;PHJt*m9OpG~2$tkjn9F}CnYIh5 zZD*KrIg_$z9>+SZ-9@CsytlZ zMhJL0KEt3l4GU8ne?U1V)P%i1U@4?=z=&xj`#vE9;i(H z>z#T;eM`+k6xkO@boz`1iZJvR#P4f%mL$&5FG6BKPAO|~)qOXibo?Gf>L)AOw&qsY z4|qcovClQXEPW-0Dy)4fCF{}Rk%y&ojf1l7_FPM z7G_%699DK^T(RszD5vaeZN9-Tkm&D?T8o(SJqHuIV;GRZ;GZshl5g<;g;4gw5@2X< zC2Vo?`AS8&iz3t^|KjHJ(TbR3{LQUQQyg)L)T?7xMXaO!#S#Bq5p#IIxs_>(Bfe2f zyqhAHf&NVx{AUZ|EQk4Kj^2U1b3{Cn@g0oDdOM(nyf>@7=P8Xkg&M`oF%gJ|kz~)t@r|~f6|zbp znZQ7@sUkVQ3ULN9LK38~%)j(qmHD>qdnof-u#n8JCK5~skVP>!$Xvr6*&j7p^2X*t zM91a+YC_<&7p4qY-I&)t@h1uqb=Cm}Kc&@f)!Ygj{JTklb|E+Pj>vtxq+@3mtS5@e zy=U-uOSwC=IXdbAOm}cBe*eGyTmO>aob-SP%)694=^8d0w5=D*t+rFb5b74gfIYx^UBGkC=AqlK# zJl_X1lcm^!*f60{3> zZKp(eMbbqKpBCYjeDnTLN#3F4?;upz@OcWPD!7g)u*Cg^#l=VJpFp(>jMTRaHU7ur z|352?l?(%$XrEyGzacXIudEYl(yvJ|z*bmIUraCRcxuRw{`)BMflT3mw+G_9eLJI_ zF)d(h8O*l)_VzjU5(SwDj%_gPf;gA%w{kj*Z9rCH8T&_ z&kz!f@2|j$p%Z%XeH_O3j}DLT=j+m%7T zGF}fRt`M^~6up?yxD6IeT+TS0VjDLPECpEr2AppgG*303sxXTIv!EEG??IT{n8l3u znRp<~#O^-I#Q)q%m{>~^V&b<)ur5Y1;|AMsKmInc`wYGy&cqKh+8VPMq=*tN7<6$2 zgN%`b%KgH`#~_o!X8QzzZWV&sy5kGO_rkJ~;d`@2OIs*79SsUaV15jLqSyHTISvO!IwG`P}pR3!5Q31U}kL^Z6CbCybl(SMg$&BPVYx!zH zvNDR~m;iE8GI3!ih%yglLuN_ak`$pl39)uk;)1S3cIzN>oX^E1$+BK#z(xwQt#?ncFK+b+;cL$N{WN<|MNL%WbLQb__-K|+#_jjaZ;zuW~ zS*!wxG@U#V#2V+`!Z=6_M>m z0Qe07Oq=-yACHW@BHF@3xWt#4%Xu||AWW}vM0pLdn%Hfd9A!>sV$6%?G;6fOBZ+J` z$wq(57U*yl7cdI9p9_`@IWB2E9XXr(8lkzjSX=a)Y}W%(B72C&iL-2J z5zW9s7(*GMl&CUs_Re7c%s zFT%Dgg3~?>iJ>Ei11M4HsE+6vPNd=HO5!NtU%BgShaw25`_40^wvD2p-cCKT$yTiew-{gjxO& zM5j&XXrps1s3x@XC4y zD93ISEW&Sb)i9qB{+?0+43}jG@urTMAJPk2{txJ6*ywyMEu67N!fBVOAF{ZBMyMnW zK-?ng;7UO*LRL^6OqBR`V&McH)WK~V{~f3UP#{ka&xCQ|!D!0au)#OPYzt0v;MgfR{!HmFI69NwV)_6z*_IG)ezy-AYTs_{SJJO3=5EOEfB!i5EDLzB+ zWF#uM5r{)W5wN6yb<_aE==JW&B|Rv0$HW5ob|oNAM?w^pew^YTVk9PM3mzC>Q-}bz zrUAx$rj(;PH}%_Sv5I~c;;q4Q9-%q=yVQN?^M7NXmN^L=>{t^wcA6v$+3f=VXB4vt zSNK#!qs>PkysrAaMAHlGz?`zJEpV?kJsTWr=5gV`yT`L_($;?Q<{2b;pxJ~0oe5|L zQGQ0jC9*GyhkeN~GGIAMoXk8jB31qJ)Nx7cUlDFvA%KU*Gx&o9@0_k)muM1Bw|axzFuxO+_SJ*|sviLrn6C|=apt`Mkf1H2 zL|wc{*74Lu6WJa7o~JJk5GTH{zSvgs-h{qTCpXX+4}VfvUpyq<<%Ht;;@1KY=!;8$ z$k!K>@KHct%p*x+`eMA`$w*Y7FXqSf#f}2j(E<$67vH7?I34o~;CqYyBz6xYO6eC} zakh-nIQy3=ah3@mxL3yjLtjWajJ~KWs4spw5Y-pW@%FEmEM!3w^hG1H2=gr+UNri2 zgopLTOHG^B7f~(yG<_i3*B5MtsONZ>0I0rLy#dq;^u_8(ajp|U=!<9uzmVW9cnVZR zUvzu=VlqX$fWCNCj7Y7X>I+$a7RmE-aevyIK?0*M4rUSUB@}X5I_RDVDnpjLg(Wr=gd*eVL^7Y5J@lim3%qK-+jC!Ep z$w*Y7KL+BBT_Ru|ZNLEi@fO+5=~!F<-*fbAaJ&(s3h9sg#PJb4&>!7Ikjs$%1cv^Q zav1$Fs-XTj`lG1+$eT<(OtL6CnYx_{STphr>yKL+zl#2NNSw1x=#Lu&K=sGG4?(SB z`r`@#%+nudO7JH1$Mholqh&8)X{nykA5-E2;);9KJ`W@?`s3quBID(8d*jan*)fdm zWW=pLj%~JL8$lR|+r5fP4pIqA?r{Wj$|hXxtBXdiKGfw~5%so~2pU!|~1 zg~{|_%~Y1%g)?0wV7jc%l)oj8>KaAG^9ES{ch-u!=)`ASU7QF=s0&7Y1$$#B;xD2v zK1ORL?M>*5X9OV77ipBPkiNKG5f#@LGZm4qF9rjVuP@g37t|MvNs^ep_^#l|NK~LN zM#PEw`JSv;#{yu0zF0&Fa5`2N!1pu(vBnfpCHRYz{O$k`^hGZb6@~nXD~P&?e4i@WZbH5f6o5d!FL=MOd=FJb z#pSzqS5`3aC%y?pzI^Y5j{^37_Pzx^?xNa12$iCEQ&Fy!>toT1 zfYN4n^VkG|hEhl*#mYnRfoYm-+St6vZfJQ3#Wv72u2d04@lmg$h$0ArJZ_;pY{3@= zAK(ir$mJISd0n9DU-JKc&zaxu%=|X(CgD*q`Lz3+-N6bTGav*gP=8HOOc%DTf)~3rmjghKJqp zeUaZizVb%ajQC^3_};)W+T;5~L}Pr@Es#CFhgXbeeD5WBY-4;sK9hye@xA7uZH({V z3u0t^UoYTojPLWu7~ki=q4;@be6RBxvq^|wK`)YKd_VA48Q+ijn0pWt<9m5wd|yVV z%D5rh$NVGm#RNdd(HxZfqjhg(8152m;oT*Ic7;S?i1%o|GyFC@g=qO{PoGrCcai3M zx1aA}nlF8}hxx83k?)?GuS6l=r$*#^V}8E({io!6dNJP_ns4Ce`+Z^#UpLdrci$5E zenQZ$kSOH)3C(w{pYMB>njG~rEG^~Z?4_h)JS_-P2_?18M|*HjDVX>oF{F<7aegc~ zSIEtRAoXA%wXwlRTdkzlQloB2UG3Myu}W$kaX{)TLh7rRkyQOxHvqeG=(Rv;&fh>! z#JpR)Ie)gOkT%JKOrm+y+2#s*xSD@(Ytw#P^J{qhTYLi~`A;@|ud(vAXCH=(-f`bM zZC9W2P}Gk!ezfs~ z#*a0&o^W&L+UES}(--7s`&{$WuSKbcPgnV4bN+BFv)q1u!5&07N#u`f9=r#Rr8n*i z@7dXA|LT%Qgwnx&2KxZqG`wrmfzxjK{4N!htFt02e1>F3H-pgNk1+@%Gvi&-%Z+pI z{^{`2T{-lDZ;g(QX zCCLYeFPX_yt8R%k!Icz(550=#^Bb=R4rmIfIX@UQ=TDmz0EN>|@U=b2LB(CTV~D*T zLlrY0ZumY9pX+&8J|B^A624R8n%6@;u~j6`rLHttCU~dK+KFV~9~qcqoV0uoFdNq# zJ8P%rRX^DmpMd|*zM^t9l?5nyKT7J%fa_i*Uvw+s!wcyKn>7G&(sV+^UAuhUWjoQE z{CS$*5?>!H>D5Yvtu@a24yzbK|0|^s2fns+6<@a-Yw@&MF|A}>)i{mtdBI>kA3F<;I(9{O z8d7_2x1#HSNpgFK72Q&Q3HwUr&|zcL_3xyYnh{9{`VUmD{;71kX>aV`1-kEhT5=ci zL$#aV{LqK_QE(vtCg~gY!sRrOY92-;dGS8Y`NL*`>$pPEp`8kHz`3tu zrnnPf)mmA~S$*iuh@!=W^tiLOqI4r`R*#PIdUCzVyYkw995V85fP9Ae2XI#OYED*I z-kr~R$F8NzB>D=Ovxe8N1Qu+(V2Z^{tAn?I?~NRMc39@#>%!0{H-(`Seg%R$XmBS0 zTc#!Vq?5xRUsD)9EjiqvV1|#fgpr}IgD;%*z&)eEXa9nq(-=CNp#g?I!O&8MG7No; zp)Q7wWvG>*_cC-GLvtBAlA&6L8X0;wLv;+jjiCb=n#s`H8Tt=~-pJ4{3{^7p5B^N! z&tv@gCwfKjzYIOj&>tCkkfA#n`V&L9GW08ku4m{rgj%4~JQFYA(ZP5B7r<}v=j-@^ z{xkUTeTs1@ffo}vyj}+H*ARj5I@Ds2Kyrke7~V(2RSfT?;WZ2&rD?Bb_)rbA1%szi z1f(Tj5B9g{zeU4~2;W1)+{^|W6<^X`t6{h9%!mX}F5;(=GUb zhBp)bZPYHxvyl0IS;Io-DBxTl!-NqWw#9^4KS!Y30euUTE>m>Ls#U{O(+9^GIK#VY zRzmWhtis%;;7Rj*7xD0Zzr5cb{Fl7n75vJ?zZ&nsMtONAxE@dXfxn9H!|UanF4nMo z$30s2@*Qk{;7GwXYFG+zij}sP;oR*(C!WDKh=h`SNPy2sf9KaW8 zfc-tp=mbM!Poc3XSYY0#$@}fWLGpfA@YJ6*FX``rygUFmU3f|-m17X(g?noh!OXK z3(XH%@C6#)!ZM#}(vs$&hNYbQ0f#-c4Y2EHkXgLpk0!+7w3oJA1(O}D$s`-of8_J_ zG31Ez`+KBnsG$#jsbQ(6n>0L~MR{5qL~t&)aK2{Ye9nTOW#FWLt<~y3(6G>73>@Wk z$N!ox@+(N>$uq-Ow~9dkLJ!RPSV0qZANBNgyyFc(ksSK-XZD&>AAo36P__&o}j zuXyZFf-j_J8Padkco}J%5w8`x@%L7Rn)Z}F^@wIEAMj@lr&vMfDd{x~=QO;OMOkm* zoM_>sEu0T&xQS^OXjt+()5>d5!$N;Q#gWfGLE$Tz*ZVar`A)NN>MfiD5Do%#7u1^+ z{7nSZX@yCzeh4XpeH0)?`aJ=ADnP2~HUf50fE4Iz&1@+@;L}QbAq%up!)qD-5%AfR z-3pU7Sft^8#(!SvNxC%}mP&pPaI{Slzq8t;Mw@L7lYR*3g2LX<_Eh{=03Tm@HBp{M z4?|dT`m2T|r+YL^L2_^>!rV%}N^{@9bkE|;2zy$;P+))6uw;FYhA$%S=aiN-%SMGu zvs`Q8_bQH*X|aYS?FTfxjy3pR4WCb%r)yd%_ke~4zK?~!mxd+ZqpW-nHDS_x8vPmH zFJ<0e;cH3%EeHp{LuBx4{@ku;Lh5D>Hxcc}8or36zNg{2gpX?06%0SA;jP4ZP{YE- z{=hJ*Fr(?@Q@*1OFU7e)!}3}0Gx?C-JcQX{PDOlh5`Ru4CYrcQQ>-J_35=Kt)Zlge z`41%^nY=>7@-a_qUl92Inp`q`mWkd-8n5TioV&HTcO?pPx&g_8f8Yby2QF6{Qs{46 z;V&c1c=jLE@kK1wMh#!a%D7h2KN<{>Se_Z)%AZyIxgI}ABV{!GN9doW>7~4+&K9_LhHH@e9pWsY^+#8&%Ddfw0G%TH>4PmZ>-Uuqe>y^~4B-Mn7 zU><)C=1)C;s`;bi`_bT?cwx%7;)(pFSHDif(z3g1Sf+)4YAZ^MKB{5igWvp(q&=S* z-ep?aJ-V*m!v+8h~RAge3Cz(;Li&F zWbnftxJ;f82JLu)@>(YPpoXhR`A7{fBK!~y&m=sl;mw@t9{jbWT}Yh&QuuV@RBE`N z@SPb(d;F8N`!s(ZXBA@~KcZE=f+_x_;kgXofiS-*2kOCU&0;IFyBZO}75w=Df4<9~ zZ}Eryhu`!?c|I7NgD1Y}c;>%K!xf~wT*EQKmuYwl!>0diU?#8A3}+I4gyL*w_&^P( z7@os0zUiHK25;lfn+0-juqU6ehhuKLX?P7&{0mLPZ@Lo0D!2?kK%%W=@E11Rz5Mw- ze}2QC&HVWpe)vr{%k#nDIy{kR2B}@D;U%vW%v-ya0SB|la?i1 zqG8$nd_=<=2!9{KWUoh<=jfeUmO90ezWpwSvCDotp21uA^G1Q(8@!g!;r08H(liav zWzK&`&9V-D2x`Ii@dKoFtb;9V=s)o1X;#k<@k6{3c|IC^gTVh`^pE*-Ie#v}4{PT_ zc|I6?6;F_pz1U|pT){ktG%Wk3lQk^+rXCI7%Ix>nqHHDpaR#4dZ$>yc3_mQ)do)6j zsx(~11hX`JH51I#@Iu0Weuorj5yKlaEW_tV8eT~J03U`z3;d577W}oq;m55}T&c*@ zG(4T@`Zc_c;csa=8ARtJ%ybJ8uREf{e`!MOt9-z>$JVm+W?-9KCP_=wVxHZb)odHqJ=Qgyp2{#<^>dW8$k&niv%yq_q}@vO#^ z72eNqkA|h3Z5qCabk0+Jf#)^4*)6 z{{b2={jE{UzJ>6ol!lB<^L;Z2U$1Fp1pZLNf^(sUuV$XVGj+!BFElLQeWv0_*Bi9L zT^g2pJwd~3nD(=pR%i}sSiZYnape1Y6h4z>+gsr=hWFI4e9LYcmack`;tM>kVZna@ z9SmJq%Ws8$p<%(lQNz;jO#KMW5v?ovuCD?|w_E4_%!Jrwzo6ADpLMOGOLg`t$rUW} zVhu~Pd_coXSq-03e1V^?VfoyI;z$wpvBG;Uf4P)v)yNZ(8s#GK|kY2hZSB{29_%p>VQ>rBFQ@mO`~@ zI7Q0GSvbuW{2+!wxdzXmia+ntSfTJ%4PQhG8Pq>{|EmeZvmNijvp-iZ_%xpQh($#F ztA^!6?=kVewO~IL*fYT^P?y@Yw`z4r>hGHrf6y!RBdL2rb7qX;H`z_?7Q4DNfL=*D$;HTg6CQ#rV6_%!9$> z@_aNH!4o-4asOoEeq+II)3D_HFXlX*GPavBs>r0jXHwjc7?$J$#lD(d`W%FVFK@!n zdH5mrdSC^2^HYGj`L<%lw`f>0xmrJU13#k0A~#pEdL~GjXFrIc;DXm-vj3>URx<5E z4L31-n1<&vJX7hfVG&z2TtRq+hGjHXYj`Vh0xg3ux;H6Y7{X?S&n3<-3YSqDe&1TH zbg9@|G~WKcMa;8XaiqM9*e^bUA5PTgA_1RAn)g-m{!DP7CfLBlbC}@c1mq7&PTRR6 zn0@O#qc?B|6#R(wIkcgv^01-HfC>dWA0wanFp_OWT#y94;BQ*%SCiP=Oon?g;x`&0 zx!h)j|D~V2jtSU*f`=KIKWI zxzRlDG|xG=YW&6K`TE=VzHuZi&s#=*V4jy4x^MfL;&=a2pY1>AbJNHlZ`bz^|5Tq} zG|w*!{-%-loA_hQdy`53Yos4}p5+m!#}MNk$W2dfXuu%apV!0HXzUEZ_v=^q6GO^0BA za2s%*sNDY?;y+ROq1Wua3ll0qrzkUno zkJR3bLJ0o#I2~0nvXA%vpM`f+)B}wlXgsbt_piodk7>%Euo5N*O{*SW)HFOTIB}PX z;R+bS3|$8zFln9Jocm)_{xvI)#@nWyulUrJjWAB(>CdLzk+4Law+IG3-=m=eEc%*q znHi0n^cd*l_+xk%RO{o9@AL%2(-DT-ohJ*&cGpQB$5bJorl#D&E$)fS!87E`z;j!2 zOM~FJn=s1i?-@L>*<^vlF!Tz^qI|1vS!4{gPPqAoMTiX^$43hbnSb}l9G3Fxr_7&` zwKV6nxFH?JHVw~Pv9Y4!p-Y3$p<#o()4CY<+p52dV8BtT{^q{_MX5jbgs}ehym9>X z_e0eABXCKBe*4hNk4iuM{(|-O=mv52F;B@{Px~m(CU{gLX`H!16~1me%g^RT!WR_+y%M6 zHE)_VekTc2t=dq@yj|yf)h>0kY>JECoJVqw=ICC1EnQ@>I`_k;>r)T;~v%A z1@W|=+6)A$Un_3d9X}+oQOdD3*o4-{Oy=i(vmur{uqpq(-JLu?O$xaqXEf)tv$k9- z^m2Kg!4JO2Q8lck#4Sc{VF!;SXQdGt;FUvc(q?T6giyO$_oRL1Ez;h(yDjU~;L-m< zm-#Dy9>5QlmaL25whdgVj0QL28N#k)7_z@h%=1En^Hn^RdtPxpForg?RL&1Z*7KBA zfqSz1P*mNVlTKnvMdsN&JfF;ScjzqiOg+*s9T@ zZCbF1Uvi|B?9t$Uj0Aqgfvi82Jc*}_hXZS3%V?_qfR2PLTm--l*J*OCRJO7W3EB}S1^1lFMA-D>K2)`c{FZ`(S z*p~c-3(@k1A6?CEIy~*%F7l%bn-HP==qlw$JO38Z`O_CJ$o+5grd?(!KROJ?jQMF7 zvHO4_HRnG+YbB^O=MI}SHdoRGhK`IUo*;wTolA`;vDCPldU5Pu|H&fd9$PTnXLrWRp{L1Qa(6FySKo98|F^ed|BI&Fz?SCyo-!$#>mSd) zQ5-br_sqDa@IBzRsMVI89(5}A$Dq`dg9DIy=}(nIKLCKsbXe{SF9KVXZs)K?Sr@~c z9VX;|#p!bGx51OByx`eu@$+3)BV;n)z>}QiOY(dsxC7n41(spA;Thb5A2bH*`i7Or zH8>ZAMLiRsY;w!UK4g<1aK1^nR?<8ZtdaLk!CVmTtn8cCw2W+hmsw<@Vac0^51S>t zvmOMCGyYmxZ<_hOXRVp+S3SHGBdflXBdcen4y(~!Kr}y}^E)QN6)aYM+Ox06q|cG0 zGd)al!IwG1%Oo1E12AO5*&ace>Fv?;RZs@4w=DvfqZ=KuzemqHLe@D)OE-ioH&{54K5do}P{hEH9wU}$vk)gwOvCCoDmau2rTT$e!mW5v;&KLdV; z-s$dLDu-q>YWRcuBD%EOp*;lQ2-J&yHZn77Yq&cJ}6+eI;Bd{uh(=eM)tvtzs@8QSqzKE!rhfeuXt!0HU{nA9c$CL&y#EqPhxi$c^81J1 z&~)IQ&tOPZuI@tPptVS5^iGx)M|r&3`wS?_m!M4VL_I9>X`TuMtfdx#P7C_>J}8#Y z;NL=&{Iu(ikYPMyjA8tw6ppjSsj;T|t#p{>e$?)oQ;h53RU3B-uDcvuvc6|!<(_wLhBdp`d@xbivR z-xrhb<)%`%ocRf`88hUOD``ZfyR2;P&|UTRr}L<)x+h_ak6Im{QJ@$aJY<@f42+i68yUn z5heKd;|<&7-%G)uw#mO0LHYN;F4+eE-d89cQknyjf7gAaO#XcVC~cE}uTicV^6yR* z%je(kB1(SRKO48nzbk~$Hu(1&1>s2H-}w`F3jX`g`Q_+L!6p2; zh*JQV`8vK_z@IPh=d<`BGtcp5HDAa{55%wyYWZ`b#KZoR@g4kmD~B|g@Eh<{W_|)I z!Z7oD%4X(o5X$!kw`2TBrmrORod{(Sw*C-rPvQr2Snv>^gx*K!pZIgPiQ0_kHrC(Q zfT2Rt#-+9{!9Q!WaLnNOX{Ya2nt$i#&Di$h8|$2v&RXW(pxrbKA(prJnI>?a&Aan> zbjG1>GRge3!DFSDH0dml?%*Z6ThTVw3AdLuwyxHje9Z3(*S$_q40FC)dW*Zben`-+ zkSHubF1#4En!iq$;qH3!C?zOShTw)01kLhaqCoIiA-HOdU=C66=~T}j!H-*puVXZq z?Yoa)V#l(5ZmD&9&)9r_!S?umy~p?0f44N>9|h~$Cg1;;AQbt27K93Xe+gbJzAvk4 zEXY>)OZWA{IZbrw)%%r#a_S1K-s2eDXp^;4g>IXCO0cwAH?1ND|C|~gHiR$%T*Nrs z0xC%CmRGDK1k3n0$uj<;i#Vh(03vJnMfeJAKoI5i$9;vm%mGWCZ2tjv1*6IN zw;&(gzhfOgQoYT2U;rt%IS(j;&I3<Pzl~i=-c@7X8zm_m=LSPo6aM9 zvLekq@}Y9(kyn$$a9a4;_dszYe{RGNXN+si^M`m+J|ja(<9bEKkaL^Xd_{) zyEf&I02g+dCO3#p!>u!#hW?N}674kI9~Qe3K~%&D02Tg3>u<`wt7+9iNicY@`}F`1 z&zqiKIGQ_V`l_uvRj#J$RwUKoI5~B9qxUexE3Yc8j#-C<(D!bdM{Nb9Mi0ZK`v1%_ z64WmP)HDpo4=eBy_J(lqbWGTz3uhE?2h)m&ZL@>d47{e{xieX|(Jd(4YGzY$HR^fv zvC12F9ifZ@`}5qr#2C5dd-q9Yx3DYh3WgFK%Ft`Rg`X5cTqaZz@-Fl8c06^zwT<@p zU$Bhlr#-%T^vkwc$OZUeyPJU ze7x@cFYosF)*JhU&M;DyVTyNz|Uq2KcflU)^Fc8 zIzoj^aQcO4k@|1qC(F+LOX&# zhYIB0pn)$n=H&o9i<=Rd-zL$XM?g0s?fF{{Pg$2d=k0k)(+ky}e?I?3Y|mZRX?uPe z^8nj(wLCWkr{Nha=g)vZ?hQW9m$Z2~0Z*?zXe0rGnQjR0KzS3DSw&El`54Yr>7Y_*6$ma)ywWTU!u)E2DW?Xr9+Z4Q=qT z-wAA4v9;vdfgrmj&y?@3SL(WHI;yy=h1NGr0xAd58e5#={U0Jx;h)5pH|3AS7sH!i zelv@O5!RgB1xi-2@8K250l>jbbX=$r79I8ilri?Aa!z5if^kx<> zf5Z8xHCnN>46l-6=KfK+pL_rK8gpM?%4&o zb4bs*Y4110%AdMs!SHv8wIFx4L3Xfq0~Y$k8m{S&Gi&;19gMG#jyOOB6_^2GDc;Nr zE~rVvRP}XQc2o|136YJ%S*+_%$GRT==~g`pxy0)b$9lbf>sG9lbC*jM!qy0&f^pGJ zz=01pR)Sp!TaHjUvdZsziA^Ke_F3xE%sik z4(dA;(Ss+rQ~SF>6H++xv&x|x@dAFNJjR$hZ90p52A@tprlHjnnW3z_ji1tj$#8K! z@C{iHT#ve?yrpiW6FGFQz1Fa~e za#Nq^rJiY0jl}p3<*$3h3|UmUW68b#A#&*wa<9sv-!m%btb8DkE8u%u#mb?thWIl_ z-h^VB^^afQGT?CSw>E6<*;W6$>#B`AAC}s9+HUiC5AxBVVr^-?Dt|X(n}@4W<)?$Q zZgm;ntE2#VGCb5dYF{{7?mV)-(GOTHAP`ik#N!e}mUieC^e{tuW`y*AbL{2DB8}!L zL2~48@cX$|N+Jo}0tGjJ5eo>-yigROGIABt737C#f#7_gj_?8jtpD`O_({Q;LP4gH zkzElRY(57oji2%7s&jQ4@B=(=*n*)H{Q3)+bBus^13W4R=OT)S^GNAzhOvk_1JB^v z^YC*eeh^E>5p*dgYb!WGFmDgeLp+KSEWkTW{*If6+B0_3;jrbCrmD@j{1HXi2vd{< z{htr7m*VeXNWGU?VI_R1dH&;be7`+-0?*(80E2z`^LGBc1wYL8Kl$=n^RgRXe*GEz zjDB9DpTRSD!o2*IFIO^p3qq{Vy9xP~dHES%zQE`k5%N|cd1uj)=W<4v=|i)L2&bH= z6K4viyc%lEi)dW+DmkkZoC4ZFWiKP@#+5pkn(hZk=OzVVeqE(AZV^i-dZsI4MfmyP z!A<#T54~kuynHZ;Qh|@t?7aC4_o2Q%t5wyF$_pEMC17Ef_z_wsxRkA1V6|pHsrJw< z`70m87d1j=gWILKUQKa~3{PA29xhw1WN(2>+pF<7YLqEb7nS}&buCC7UD-wDA8YbsT!cTk_t0RXsEHlZGdVWQSO#z8_~2lw7`F#!rIXiAWexdG4ZJSLt5Uu8<^|EltC0_9xKYxtwv9dJBk>o43X6 zQa1aaxi{i*XeR)Pi+sDveKc)=o{_EJcCXmIUV6pTSTKaWg8D7?3K(xosSLY)a+m|& z?V?%uGKcaxfJp(R+%(U15v~M6aeYUHKV;mMt3Svk>Z*gzNlE5hS2qqHpxgr)8c34h09U?dbIL-be^yQZPbzB;QiTn+E#TH!@$=>H0vH>s&?4v4& z&IGF2*qDuvQ}gFawB&Guzl=)&r|$xD&?+wp6aZfNM!~B%I+Qdy$1^6o>)Z*RiJ0ox&P>_24R2S$tPbTj<{5ek{&qGsK>-OaXM zbo zHaqjmp({|p!oAueL#Q13B48Y^xy(vyc4)HirM@00+@qT^0F7f^rz5vdog>8DeY%VB zCDJO<>&{_cUvWnDD=JH$1^2EWAC^cwbVop@omf~_go4QJs33tB5_!NyiZ_IUl0 z<>=_bnImt(=b>KEq(%ArHaF9ANOr7!=&M+$;n$Ta?_beiYs@r!FN#}vmAn)TGAf5Q zut_CxWGB7OO+3>i9?8pY>M(SW@yG)ffZEub(|00vEP22y3+2G}#S#aY@>C98g)VCn zM?T^_#Ayvk;=)H9kBuUk^{c_Gq2IXUOvuBfj~NciIbdY2w$n8jY?O;sjy&z~2ih*B za@b!Ug>D({FVB;OgOOJ5g;-3tx|WctA2}Q}gT=@$H~~M{GLT;fjY9ym5c+4qI{jr`#6;rPb> zQ)vV@S@F*n;*0HcH~GX`3{K_1*5D>%YZ{id$jH^$%a^hn*iJn^%!x0w9PsOT6C%f~ z=LhE;gWawbA#7vM06KOy-gR`|bw$i&Ae>>?)ERTFPu z_@8GY9b_>{jTeINWEj49?nPX%7hd>PcjASFe#!(!RC7HMuHn!BVF(6~;)k@pqbTyJ ze_+HHh;j!*Kj+WccqOSD4E~P^yz>kNUd)%Y!T2g)mVN?1?fm%+URjp2U5h>kkAa@aRr+N7)o{;7|6j#~LMVK39<)_k~4QLa{ zWfpSX4~`!w!4s=6i!5e64X>A8eIlUN`Cd6c-;W}i^Ya&}yy%$yMP9TN5jaM_ z2oF7M@Eut%PCM-;I5EnfIzT3SBAPb5A6p)TaaHpYlEPjNx9*I{P$;Z8-|1Bl%etC zH%fa#V$(=+6^$E(rEM)N^*#eCW{caj=}c~JiB>suBH&HK*C^Fb&sqndlG-Yp-|us- z9sVlYY(f5`v#J)rk--MwpaB;G26;#m9?iJ-tT}hDT=|qgo`BpRTXN^kl_)4uAo_pk zl)?tPiCKDc?;E5TbxpY+vLxI4n7^S^r9S39z{Aq#o@v4zajSoe8MKh|m*i_Ci$=Yt zZKwH_SIK34^HYzVp?ZmxKx)Y!J{>WypS2)&V@vMlW^8X(-M=d!Iqs$#Q@||z} zhagK6Hs>}s=YA!MB;Z3abs*YvB@@g~-EG?bXe6n;3Wprg;Efy4WF*@3uFBPKm2wWT zuA1^+dJK=2{Ff@&(76k%2;>OBf7U-{54L?%{>&LtR|3&)piPjw3GRb{#ql8(f_9K} z=W#FxYU<>$)O}hh zG5L`5SRC95hs3JzkKc**412M;!mzm>Z6`-VJjV7HEes25DrqNElMV7s$O)2$9$>L} zDAzX1a}2PgAjRt5le#wmpEHmO0m2qyiNh^ zEx{%v2oe8jC3;lD9O1#uNJKT;b$C)^_sw^rAvUR#j*@JpdHD0Qw&JrlV#WIf&=1%?<)&mf?gb+GrP;w z-&74e2Q{!$1z0cs60z!3EFP>kXfsWSx>;NL-CeK7H znTR1p72-R3)5{wYi8#{$6c3X$B&tL7ifV&jjmXZvf!4+8C0)G@)zv0V!hqAg9R$|b zf>3?zJ%hc=di$359vq7&QZB?4V=TIfQgjm&pv#Xblvjz?7t2~c!MIdd-1#V}ar-M- z=MF}y%Kd6oE^FOR6GwG;O9#S+y%qY6DpWJJ`kj_`RLs4ON*P+b?R zVXe5mxrUYCf@=BA4yXcLOd8y7;nEFhW4sia(ra8W01twu^&n&k_DGkeb|K^u9t4f= zLC_^U2)csrthN%Xn zwKOP0PJ=qwOjw4Q3CjR8VHsK`EQ88~WjJweYfHp*B!w?zpt0~pDm{FW`VL>Ds>2ry zs6ra8U=zcE<501M!PpP^wDEr(%oKz3odI>XNY($GC^Y4&s5di`k!8&yL|@ z)w54{Aa(~A6t7}O@L}u%K8%g;!QyOoAI2f%!?bPf0>=|T^6?VnXkOYxw29+M<*mgr z7f&i*wIRw@El9O;RU4x1YeTeoEl7>Ft_{(~wISNJ7NizUi;As{4Wu&-by#iUvtFkB z8~xL6SYZWd4I5S?y|c=J)v=$TjVwH_QRXb|>q*ba4EBD!Yk9hBPES{FS9@P?M_Y1E zy7#2aoZjyK)@)y2_p+|+oE53s*4pX=B>I5vuHL~F2Q29woYU3Y-aXinp40xWcg=|> zW+!G>&FSjPWCzl1JyH7Yn3?Bv4z%^8m-h`U%gk=;nvEs*>dmIR53nGh4oGWy zMK;}=>FVp1HF$QQtv7?kcu#sDGiPyI`!ehnP}KC`KpRkdv7t!MS(YB?O?PJsrQ@=- zHG6V@dQKsfnZ2~XKU}AI_3)yVv%d|MBL#Je=eN@fR=#XHlPy%H@XemKfn{?Fq1o-l zIt=Tw)bpsmY%JZ|H@IYJtZiV)U{AU?n~C+eWisiGQs@y_TYGy1HXcBKdZ2SW_&%=L zfyR2!f&I90-ZN;}OuDtFE!#8LT}}cIr@QTB^l0??*2QgJR9Ua-?(FieOuDbo;mb-@ zM3OOwrT#=eA=B5H?P*(4Bsk7cTNWH33l1>ObRoJ`{-qPCz@DXTtsUS&6KZ@EqT}S= zww|u`))NQYdb1Pg22T3$qsH+J`>*oF7&+lMQdd+I27Iiut*blT0ej1F{b61n?9Rs8 z`vwNm?b-3BuBb5SG1*Lvg8A0{}XyL?~cWeIK1V33y&E`sv`YD z{_W|0$-l3?eGqj~9`~!LNUxaa$kj4r+j^H4OEdn^l63dr_)`^A3`{b;jGL}g#@A_! z*;jO=Cr1J6opjrPMR1%!@ZY}vR;*@Py9cx5OiT0(xc6ila40^?o|)YDFP>!nizk^szJF2^ zXia(Ye`|LgD=iGB(4viUe{Ecd-b%|U3V5nm*gjz!)^LslkIBa4tqTUT=@r=EWzx+pN7X~X-8d(%LVes=l$eObilti>YGimZu%!C3H3QQ^Kq)BJuOv$t?2~AgL za?rSX&7oF-^_s3usiZVP%6yULP^pq?$+At1mTu_58QreNA+>Is@y=;UlI zjTReA5Pn}zx~CxyyN0sMo{V88jj@uVrFK<+S^#JmQEzSmq(nxM%w~28kh3B zsCA_3n_Vv+%l7nZ*Gt7?v@cO;p+lxPJdA>}t7An2*p;VmdT#V?i@UO!Ic71)1r%43 zvsG%;`r%`b`LJ0?rG#_0_Vvd#M2*h-2Ih-sG(G>QV_M@?ia}+^{Q1XqpEAGIbk7RV zX%5$(;aajI-JQ;+2bvG-EjNd%QdyKW6Z1P>^pVy1wW%QloQ3+^~F!sx^~>P7&2Q?Zg$o+Dt7FdZ*}V@)|H7>?Hya#odyTMgqIl{NXJg@8;oVr*;sZdVoz!t=xSTsosMPuVr{)K zs@a#M2ga|L2}@MW|ETx19zFl?qvs#f6vmbFEEouvgIoe<#jo!lUHPl~M>lUS{$F$F z{iAE|xPNrZ?e~wK1g%rwKtn_80*n%ozhHZrNe`4Oi$6y6x7}TfPl%Psefyvu$ z0)Z+0(&Y3jQ~IUJ>9<~N{nGyBZkSk1V=@^VQImUTaD!&PhvH?2aR|{ z$(JfcE$O3&CBpv1=gQTy8_jvjT6`amQ-(52bJ(SDKGPl+7$*?fSrF0^eKm^2bDM>8 ziw%jYdLvHcc@@Gb3?;Nosh#_Y$Ok#UxZ}VZPgKp?bs?fp7gG9!n}<_UVeJY!c~a7Y zK+NkxcxuOkKw9ZRAdB`OqT(*}0X-sDlsiu9fSlD&#&J5%?O-vU%AuIJI%bqIu=j7k z5tA4X;xM8vR#e_d9K`QDe#5wUj4B*~R@cN*?7kGq)>*W0cAz01r^#7Lk~l3|R)0(3 zEQlFGSEb+Pk#ec4768Bf~D5PGMNU5tSTz%OLp_f0E?qabeggU z7@?7wWDH$~%aF$8;&wBVv#AOsn-Sf7rhzID`Af3g?3IHJNd%}p5zan1(7OapflsVL zv((0LsIwtir)f(4LX-^aWi(n>?~(?X@%AD(*e;f=odbP6vEIS%Zc3dfm^i7eyKB70 zN~+q2d<6nAXg1|wYK zy==Si@B=4n-#J2FWXIIA-p5g00xiQD7NqN zrCseyS)cDXWpKaP!6NTI;9!~AYZ2H^&@G?+ww;6~db0A>7u)`RvgZfv{(6kYqX0qzuq8R-1EJWnZb>?eW8VGqJLgw-!Ww29kvK(4Gq!k@8ZIQ%u%KRT z2DqL&a6*e%zI3 zNWmt&A7?S_{WyyO`Bo`0fcrIL)vx&S>Ke#oA!A6IZMaAjyW127`en+KA-jlm;#{kI zAvf8OffVHpZMw@wwOhMvn7-i8+Lt5Aw)f?lqGBb}g@5qfc}5u4=SsY1ieHGF>vIWf=}m zMTH!PabPO2v@?6Y%EKBle!lGNw%gH|4*dssXy{m1dqX1m!WG)M;<5`|TGAa6-Llx_ z_H&DiNbOtFH24~^V%0#m5M6;>Df7cH*^VD3V_ypD1(!#v95E4;pShnPBi9E(Xy<|0 z?R*fF13m~cG!JC#O6{68=A?zs3yn#O0Aos807xa{X9trHIVWl6IByfPMeAx0B^aGJ zVblSGve+?4&x^qrwLiUg8D?cC_4am!bv2P^TMrIS;f!qSiixe$+fG^m<;=uggn0fH zGX3fHuFkIZHcCj#`5w~*u-YvL`?<%NtbAg!@_`pyzVTwq|EA;S$td;HMsVa( z{kJu<4FQPV~8eXyHXO+>s~V+@Xq{G(Q^aBc2YYiw{W zoX9!&I!vf;UUXFKL0}%}LQ>$d9t5+p2N9vVU7mU|1au&gaa$i5x6mE=q~leQksGgy z4Ba@;yfQ)G5Rp8_<7h~s+Ch|Unt`J<1%qrt4pJ;2sn_|LAreM7MoEOZL&wq%2 zqpJRaz7;2{?<5|RPa0=N^oP`%m6P+H3naQ~lD|^vQzm(KJ`QJ870>NAA6bI<`~s;W zy_6%kb=)$kKPD$WEQ+Q#{+LSoR04*RUTt}sJFOz<_u$lw_&sRNI+2ot`%{XZdK&2L z?rWoSs;u{gCDK$JmOp?QFD-man)E zmx;8vyi#{Eg=k&-TG29t7NYe4K`~kvy~SvmIg8QJQ!8Vg9B^4Zl%QhqZ%s`xu4g^0 zy6F7JdpSHTE>2yL^iQGYPW+J-PGo@-naswrCvew!s21uH>!9%m%WCb&nA@;wAVR1=+cR_pp0S%N3IERAzbOX5Og>X5)nFa zFW}23u(#-eJtArAji^=KoP;k^KR7B@rLvUs3GWb-MT z7#~YmTybiNCxp>i0B&=E(yE}h}8?TFOMB{alR*lz1x1y=&Rx}meil(Al(fa6Cv_2wHi1WHN zw^q@re(4ic=-@tk;#fia(aU#IKq@E(}Z|lP8Qe4-TtitFeej>c8 zGv0u)8cSjfyYK|yTrYwO@a>KX{vyjb2gd8nd^}!X7J;GqeOL`D+lQI!$ecWCQIS4g z9hhANhD!2bDSQlIJj@MOgS5z%Zz#TlIa%@5RbWH*J6CVcJNlpfF7~yK3@KL4l@Ypw z{SDQ;^woz^#u$O2hj4mN#BiyY%6j89aBazcukg$OBnJ?ipq-%Np`=jZi@SKwj=-T3 zWJgxGEVc!XZdJ`$7jUO++#s;uBt$$WVzW4IC9RfD$JxFbj?Uop(2z%s)##mgE**?k z6ddJ*4kwhe#Z$ORlzd#^wILkNwEIuSoSceb1%#arGO6;8%hG4VZ-lqAup!=SE0cn5Ao$~Q`TR0 zpV>jIzr2A(<1a88yP$EZQap)T9Kzs;K^TT$34<`o9;2eD(rnr-j4##=c?h58>X5N* zOl;&5VdxRylc5Si#j2XB+N!#$R8@VvDh}2cPsXd`HBhG1LAwPx8q@>{2=1#BHHlhi za8rr;WK|N@Z^>k`ItgKGvM!lQ)S|Io5JE#slc-JBQpQ+Y3tNNQ`nsyRcpZe~b=7qcNY=p(YF#}vEAbTM-_X_7q-s-j zsZ^@I9!0E2_VplLk7V@#>8HgSD=Kb1^qiZXdJksD9iD%=txRs^K_Kz;AQ3f#=*iuF zDr#{L6z$DqOAs|f`r4iD%!&uW4qVNHc}eBr@K8F?wPa}_2A!IEX(oaAA0`&`IfxXs zxLF6BS0%hfBMV56YpK%XP;iWY;x!F*@U#F@J6QXSJ!c!Ay#+fQ)o~R$yehZ~(W=}k zMyqD47%d9RVsuoc71d}(6X;U0@I=Od?E$Dr+n4q{7BNitfhzbi2q&gmqm&Ld} zb{Q}I>`zn8C0{;e=VNydwez8;+4lB9=zJRD^=ee_F5IR}ee6o3k-u9%GOnGq$;;c+ zr;eV9p=qW=9)C)Vs>W3_?s?djU63rKFntxQ)Y1Q-8;oqCqAGGKD?P?T1-TCbTk%0r zyB4e?eJtw9y@YX@ChUHIC0T?#Odb4$)T4W}l2E?-pr~C-60?PeMP0iOiY!WS?_q>| zjo7smCpl<*NxU!#%JB%t=M|U7E&%g|mp4`QKFD7jW4#e+Pprlxby62?(NwfWso?i= z#0u00QOoRM)kj;jKGLFzsz{3_Xd>X!!otuepP*>M!=f0*2Sr*mQDT)zBLgoXEi^q4 z7n(jOYR*DK1P?2^|HbwoLOy!_DG@yYg{?<~eDnxZg3Qn>MD!3;B6Cu#OaZ0u5$3a^a=J4XWA`XMR-oA3Vq?vv?!821xrTD!a8MGjlt zSdw;rz$lpM(4xb1FPZ_> z0mK=VD{R~32WDb5VpE1VEC6{zsc-Se(PC%^wkW4S*AysG1?wKpK&(2uE^BJ>L7pj4 z0s`O~8d#Px1p>Q_)skgKoB;{Z+8{$sNC89P|}IfvEYzpZ%gYW${K(h4?- zahh&P`W@YVYqI{uCTrg^knSEFM|Y2)PS(E~C#M7Y`ePBoWdcI)eueu9#(86g`hOqD z+Sdb&=hD&zZ4d*lh;?-J=mn^_{Im_B?rc{-F0qAIcoB*)ew+m}-!JxUUD=c2v9NEj zyMvdjw(%m9!8C%|<>_>9tP1v{9kKYt=ug(Z%du~4z=8u8H4FxBF=Ff_3z?l+xSLX&Fhe$>f1 zmVL4BM>YEAl0F8siO-n#s=K~VmD3i{Ns=$K`wBcjHF&~!qI#X@S;Di%eus#7dP zt3t6Dt*7IP(IQGLM(eS-LbM)$3RP^v+}lq-ITQ$X*-D2|W7P`^Hm zC!Ty51?v$Q>duEzL=u5T#Zn>}_wgtPkHDg0sW@faE*||w0^&AgL})J!WfZh%q);ha zuHrVtk7{heK~&FNP63?em0~!MsKy{FRYpCzIh8p5Dnr<&tfSb0M05qwldol12?+m8 z8b^qhU;mme^G!L)7U)FSN@Nw*&JCwhN(>(SvZg z*5q+ZjVy(jBF#Wy*rL5}K2#kDyhe9E25vfjSsy$3^ICp!y!_&^=(Zw9cm_Vw^M z{O_+c<5TWbOsyu==+v&&7s8#2x0VopDn6`)1*&*diQq+-M~M>aBXO#Nw5~A&Z5GTz zDJ8DoV2~Myb8THH6+pn^wi3+9sVVYHAI80-2NNGD5g53U4>N91>^z?-bEKwez15T1%H z?;BVK)meKwj73f~)QMVg#BrpchF-KhgAJViz`+3;esBrl`)e_sUqn915_P1Zha%06exKBuN<;-h@Z z|In2GA(x|$?`_IHXTnpnRP^G^OWQujxIv)$P@LX`y$8}TNFrRZIZPE?02PML_WPx( z-z$8++}C0IQc~TX6)(z!;=3MugMh`mA58E4f}o~&mX!B{MXXq&r{DDO8*^AP*f!81 z{uW$4MD*ZW^xRwY*jx0}TlCOd^vqlI$XoQpTlBzN^t@a2xLfqJ8%162mqyRJy|ldO zLvCeDr^FkKC*TuJ4`iV#bJi=v=n`MVUhb7)6u$bht8a}x=Y8z^Qd-TSH=!FT<`x45q5{12=Tr%b^ zQ(iXf7P}6`XN^0?s3CPBvU;&oiGL^uXUe&XT|ZOKRqWaj{kYRxxURF;%T$*;@4_vM z>t($`$$FP9gY_QaojZ*C6{@Pc;Dlc6xN(YIUZFTg4Y>SGjJkbLWNi!oc|P-u^F9+V zp?X~@5VL)6I(Ag2vH`3ySPYU}_zi!f7#%X)~+@^|fLOQY)<%*2oLu zV5LwHb*^FxqHa~fg1jIeRW#=W*XwT6P5Dyk4Xur$`^p{Kmx|Yp(R7CN?qKKc&neOW#3>pBLBUXeds|H#AVt}O1Gv5273F% z9=!xqrr*M;G~KxrC2`NinYI+l0T%%M*@2iIX|n*OPqY<^qkij(#i13VJJxln#DNEq z4gADgtAm%mgF0W9# zno34s(bG1vcmf}_I1-Y9`S>t%i(JSk<8(O(oWi?jA_*Z%#N05>B@U(mJ1=n&$7OiZ zc*LSB-Ub|660dMRGI^;FqsKSaP*p5z>;Z9XOfF@_S2$V8u`|a6&&Wx_$MbFfHupy( zqwB618GY;Df%MrQ!F;_{KvDf_svB_{TKvHdp<_QLunJ%BTGyqdeAZM`$`H)1pQxB#JPWuYGRWV3IlA}^@UWs{`f>OJh>S8BQW$-fIaj^PoP2XGv73rl z7dD4-d~&|Ub!b;rUx=O`-tQH!DZpA6r+eF%hH18EKV{Jxhwap3hiH+i=8fv#8DSBB z@$;X{b>F4$FLY};BENht&1Aerpff1yHtJ%FyL#IOPR5xgb@-mDk)qQ$Q>`WBIBm88 zP8bZMlk>6wz(+U;IP$OOj?qzVv$!qWzO(@xmTb5h(#e6oV$2^poV`i-;4?N|mvMM-yQ?{UTWQtsot_AlQn5_?H? zObx&E^fHdCQ`XM@0c){r-vCZE<6Z zcP6(RWXdqgt_`jkTqdRtn{1pZZ%uY>lk))rnQsZ4Mc_KPKR-|dvdVU_^FVq-YjKu)jOraT%Y^vqzbvJ;ghjU(o3XF zuFu2$Q6?L`IVyaSRd)Ciam19+V>5Qe;htDuot=mtmw{%|U%pS>78M$GV4)YOoahM} zxh=}=$8uSe3wjw?IC&pLr!_t(VsfA4Qi>};+f5R0nW{GXhXS0pPhm*~uQs;MK8ah4 z-FL3biaLGVT`crN9e-6l;<4_wlhd$W8R%(U+-BVB;5sWT$)Tyo z4xt7HQt^7+7tJA}fD`5VB*+c0NCgdRxK-c==Wu^h^e6p1?r+>Bhm_eaj}+L$LF2fP zDSw?-3(AQWw|*!f@*t57>eTYju0uURQn0X6i>%_lQVp^S(cz*lJejGBx?;2}=ZdLy zC0B^nWt(M>kG!TVJE|+QJb+}57$t-j&rh^)MCz) z8pvIt5f~Y+D_R%REg#19T?EF>y_c33hWap?dPQJp-3SaK8o<~m#E?yH3^f2TWRvxt zmkN9|LS1abd^|c@#z)xID@N@Rm`+*_9+z`I9%rTq3{1XVY21fWS=ZFrPb^{gu6jnq$9A)Cy|{`@Sjvd+ogFH9b(S zElZ|9-QJo#xqqTg!c-t@_xSOJ?LLUR1WaLKnRGUm?K9%u*y59MlR#%1Zp|C_C3Y$h z9$%Sk?7`l`&4IC^R9Q+IW3`RDKsIrNCu`ri4f{tgr_!%>{g-7h>5;K5_8OiC`$>oX z1pK3^|F`b0#jy@{r)t+l-vDFWhGJY&UYs zKiuT(D;H-b!93AAFH8GEY%=Y2m+o3?tJxW0`G7sKFU9l#5jkf>%qgNcioCj@f32q zw6=a6-A#1YZhS<8EeC`>_MsLAPQs#{0Z$o^M>I`~ZWLxr3j-$`wK8x)RG;~hW(Z#a zBk6t!4xb-cv|Wl&9PPruDETjzp)GVmjL;!hwX9>|vrBaUzS0uBR{RatMP`rC+*RWf zc6=d9ICbGZVjIOZnKc|;AKSB^jk|e`!-kghXU=_m>HTZ2-1?V;@gjD5xH0EBn*Y2h z0^HJr$c$LjCJ8UFR57l4O07(b(Q0H`NUb)e#b`AzEkvtrsTHki3%gs8dwczrKKne0 zIc?Q^P*f%_3U5c>9ixe;myp^%7lcMo`tHESa>E4(YgOYGU00Z}RG|q=^@Rp7Bw)g# z)H7kx=V@3g%!EaaXTp)1V!uH-|Ggaa+C62;p3|<4DTTgW8*&@2vk0eiVwdr$mfudb zL#(iLyvs%6A+sFTK#7~`*=N{x?i(`DQ@Rf$^YLNym>GdVfB_gxZ3+(AQoy3Ka|fJgNa3JPmD&u}8}~j9D5SWW z^QekrIUhzha)1@=59u(@emAb+3h~A#T;Uye9y<7?oWZ$^DmcB5uH%w)3g_eTrdJ=v zdtH4PZ*h&lB8xegKLE+YtFFR82Mle{-iCutyqbxpoxpb0lG4ZLx;FwtOZzaca7nk$ zqTAUI!nCvG+SX6N%jJET__A}?t<`ioR+=zQ;?hg9PlQvT590(EfuZGTA(G&sg({W7 zp-k26bOm_az~i`B5;Z}%o+A@O6BxkO@T8=oe3C~+e%Pcqeu!|hDGp+4?D zm7TL9RohxyeSkzC0DaluiUXGP4$kT7ZSNlJNY811*SqG#6SEVutL7~3%4X(dvK`Xx z=uv-m!F5LKhmURku-Tj@YM?dk8SHLtJ81YDv1c6+JgS|D^fHb+^rL zWm)-$^v6sx{rK}&=l`AEeQjB&sj7RDaF9R0O3E~$@@$*lWXlIN8p{`duFB`oE7ML2 z-O!p^(Qb}@|9P+<@cMB(oZHRm9BAuF%MGow+q!0>D<5FG@@)8lNp~M$LC}i=jgQ%z~PDHp0 zhMGw7iYfb^$yn!9R7~0TOsLxx5@k|_;DiFS&%wfHLTQ<3%D!iUjK|dYlzq>HyPhPR zvhVr#w(r@0GMmOSy59KdZ(|c!Mt(lZWb=16c|P$n-lNxOYNDiwmBw(~<_eUfqUq<$ zmT3|&7so}6)`-y1YW!X@ibJJ7eXAI+rl0RFjvg6s^ zLDUG78#(6t+}lxD#C6yeI|mIY>}ZI0?eoHVdtVV<4d1MH$W3j0z5k*ZPcsXzgHj;v zfy%PH5QjOQgxdJra>fmGG#=KZ-EFz_!af{_bFUr0W4Zl)%0GhLSJ?U8`kC^NVAl`F zrCUEdG3`OpdU`dDJa zX7?-T>>^eY^pfKxhVG1Wy~%;)bBp|1l81T!UQip^FfiO70X!IwW!wGdJ z>f{)`XgCoyB1=TA$P!UQ{Y2DKKM}piD-qowU}#55AGOv`suvl%^hxz2TirAz3S8pjwtR*#6R+40@xJ2~7fg7>)9M7=2tsU>I^f zj5n78W?R?PLGe%1dIyY)!r=v&MqWYzK2(>aGY$1va?<2LV8xD~q}vw23YIavdenNa zrrSN|9W`LT8|Nxv`f9G;?07gZ@z=W5bgF`nv1?1*eMMm4A3lu64qUL?_8xUGPT@c) zz>U2JvaY5*6XBMYMuU^ouTSacCuWxuHdSZ; z=kzwtPnTIeR#sX;Hh@*+R=OVaE$H!7&{ouig_rWYSZY3OlkM_TlIDS)_oLI&U^VQn zZl{RIG9!}A(#~1j@*DOUhsa{01H!H#84@<2bc=g^0K>$CK#bu*D8q2tf?dOfOeyN@ znx9hC*|kBjo7-Dnx{5mcyLdN+`&|@!yAV~^*{MW==!_;FPxDgA7$hq;vz?4U>s`iQ z@%=@*70_jhQwqKC){^*?LT?#7to9zPu5jFL0_93%jWwmv3+ro2p=W9$saHeTob!cH z=(&AWba_4~vav`;ulPzv9Yx?q3BTVbqjy7ymwPWqI^m+u$Wb(66Gcy4*mXEnWE6ug z46~AtS5OJs>OUa)ct$1Y&@*a5Tlt9lghK6L5sah#*t+m$2B8`%AE9w8aSKC9Wg*fP zl!@l{g~GeJd7=1@MY)Yn<)HIUp8}qu(cH9Pe;t}6VvtwO3l<6*LUXX3nnP;73-D0U z2vB(7+*UL~OBkLv7ky=@Xv8%~;em5p#o68l;kc+3>u|c9x@pp{ik;GLpim&vN=6ip zaxh$wVKNx2;e}5AfpItisNwx>B@c{OA@6$WM6TwtRur{R{#q4N_Pq({rcSbmkmzNm zzZ^^dbW(3`S8H!ye;T)2868p_whZOzpdyuK0gTvS4Eh%07{R0ZV(H$#!6i#$@F_6Z zlkUxCV*PEIO#1(`cRuh{SM~q@2v=J&jf#q|uDTT!)l&8c7`SvQAnMdrWR%M>*bwvr zlMO{(x|ED+DitQyGEyqCWmJ5VY#H^XzI<&J-)~eD7a3!~fQw8VNHROW=lgsHl5j?EZQB;+Q?C(-RzL$iDWk zZC~^4OB4E@C?;)8h1vl#sDOO=3{Js#WFcr#USab?aQ!R`AB!SBqvnc+gXvh51u0=i zNXO@WOrn#_Jc-Sdi zZ)*t}@U(I~NYc7+i$wWb%k-CcKOBk_R1$U^a^a^<6D3ZNCjB<6afm@3O?rc%cm65M zZ^DVz)GbDhBRU}Ox89)Q{&x3JdcXCS6z{hz>4O<08y12jZ!kgY>>7$a|7N+%Y%;b* z(G~=%YshR7vRVvT4h%i>?n za+tF5xpqV2Sr47YvmQ8&=QK9MV5g<~Rhtyv4;NO%(>>U#Z119Gv#9dIJ8Sa7J8Sa7 zJ8N|RY^BS;qB*6m9?(4)fpmox@p)lI{D5x52n0WLVzY0|k`qjW<%pYzU32s`%@$YK zdF%hciOtqGihaT+>DhrwXZhg`%KAL$prF2#+9rj4^Td>5api?}u@=Zw$>Nezq-|1YnpY6s zpj;5%pj;5%pscUEwt^3DP}UbZ5PNeK?KG|;^qERl;n1|N6$0yfLW-uy{?ZhrQc_cWS7MUmMm`uANn-&XY z!>|t6vm1&jOuXu#C2fA1k^1fc?T8VsTbWRh&#M@O{GugwgAsRX zi}d~2nIttW&yqpPe{3NZVD_k2S4BA%o4c~#kv5uMi9`0|A^Y)={dmZJ+-nEX&x@h` znf+u?Kd*=E$3ymGE;q@a)XPNAHypqJoYIT)*bD!`i*pXPPdMv>d~X|@+{IhzTB6*a zdU@#n)TSr*r#3ptJ=m43R%OMCQPq9WOF{oZYq|XgQE>;YVR|Q8kR-M=i%Z|HX>ny@ zS#wQ4K|wL(TET0>PLNCQ05eau^|mj{iubxDm5Zxz?v+)#aqg`cbxnfbObf^vSsWXg z>E(ycO=C%#>88)1$;q|B8%uF*NHBxd3IcQO8+A7Trep@Q{m=a_vAid%K=_=t@G))f zDQ#q!w@C_L+p6jcE|c=^Qa3*!d{$fdsJ8G)ZC(kX@8H%S&RUx5x?qdK$Stb8qG;V> zPAHaF?!`cp^Kq#bm5NrhR8=pYe^Yf;#SK*`Qz*h<74goRP1OkRq}jq~&EokB^!2`$ zguV(i7>Jx*1y?cH3CDXhZ9(OmeAr zo}u;C(E3W&SNZltz+YJPUUl@FKJT9xuP>&|;B1+UuB1qtl1$>y%=h@@(vU&?{jFV3 zE@Ce?GKs(U>u#qGDsD+apYd}yN?xJ2aM#Nc?uQ*lAPT2{@754X%t1c`av{^w)*x5_GRV<#6tCx6sN!{WEG}I)Ob(NFHkB^FDmk`H$ zhCp56YA16K4Q^Vl)6e**T*JI*aYgOCO5D{ah%V4Q13hnIRA0kg!5&mM#0leh>3P(! zRetfD3RiZrv}ti8wR~a57fN^IXsud+B6`n28`tOA1u1l2 z7LT~d1bM{oC`b`iRFER7(g0=1Dc&p#tpN<=k2LFGj%lS}kSA1vuLh_1lrC5{1DV~l zbRU6dJ*XJ`T8(WyhdNDhiwkx@;hQ7E7e~Ad?(H&?&abPc8`9mTH+a7?UhT=a-J@mv zxL*-x_GH|0ybRL>chZtKAvECS$qf|2qRNxwM{H7P%*&e?8uaqyiVBW9@$+2nMo7gi z2h&)gP72Ms^5m!*n=3TpaWuT0==>qqb>E4*9lzgiOdNna~=*WAI}4MeDp>Lm;hxhl*aAY$zyV;hZb`P z9h%trwM=ZL!KuU0#LlcKdSx7#NJ*a+%rR}c4f4=7f;`%i8{m=EnZ+aC`azzTbz&DB z;=&hsWx8WK7*6;SugoW6C>bg$=CZDvU47mBg_Bu=dDAia2KPnv!YY@nhjp{0@`iRC zdV_DC?AizxN}inhXOY|;hso{#dG0JN=@Wp1fS#zC1U;x51b32V67+y>67&SkAh^>r zlb}n|B1kvYOg2(-LxlbE~L=jvwZ1qgC&zMhZdSg zC*;e<68dQ?d|$g{gdbKf<;zZRD|~lymdf(pr)DOPE=fuw@Uad=w@O@dd5sV&3><8&b!Jnr^E zlVBB~-a5zx#q6)}O`^NEX);x@X%t|HMoYA5GC>Q`WU#WrlZhKN^lMes+ULKbvaW9N zL+NeXAYUHH9@#c%(`4IV zh(>AHG}$v~(e#GF5RI(0X<`ou(a18JCK`R4CjMq28sBfzL``qgUXERab{wK)3DKxZ zHcb>I7EK>R8lurv+ccTO+ceq0AEMD-Wgea<2TImfEU2u}ZgBce-oP$It&Vkc0KPk; zp1(`?fEQ@Eb%fi7w(%JLZd6G9fwY0ZT4Q|gzGJU4|FD0#^CbtnKkrpmeqOaZ*TaE& z^SN!p{QvxgH&w>wS4m?!C*w_ZMgVgv7cN{>7oEf9pr|Tq>#8PCcC$PhBgpE-(Ykq+ z3!}0subqEu_1O0rq8a^MvT)I2JX`T=m6kravMwX36q7hCR>KZ>=dUL7YH~El)W?&yr1kN>zgNchi%o7o-!5U#UszE! zm>fvOuR|Je{IY*?FbPP-?}jwc1O)2mJYp6oIY6kd`10ror>v&Zq;7J(M1bmwe_>rwbpFB`oy{i^ zOMl_Z{d`ff-i=`3pF*t}#O_;Ne{1K&Do?h1lKOvZO`kd3_e(nKidc0Wl7{@3>Jo+t{Ep!G*KIb*b z#?7E+SM(efMJwdZKx7kUfXA(rT9Gq=_~kIi4C7~FW+%WRg*V?i?2=6P{lOG!A0RVq zQT+rX6RUENqVou&azzCPjZDtZ&nwXCAXHJJ_ncjg{izzVXj64oY1>of+`8(;P*FDso;JPe7<+89yC7exL2F5>yhSV-|^-dme>IDMB*uIcC&FREOKA3ug98OtDwg-^bX`m1>zmYVTf^WBB$AAv?2($&XcbTM(lLO_DoQ! zMKGq`j(0J`8QPFy=7#R10xfswmIgC_UJV;k%=EZhQUa9Fl1blK6DZS{`+j#bJu<@! zwo2L15~S!kpG^w+p6ac2Hdn~|G*4WgY%V?XH&dne;#pjxF0e^5=L=+=z7E1Bh4zQ( zZ8R2__N5AjB|axMSExgEHvn3Kp=B^?l~C&C0Cy{ReS&r{EbWtJk-~ke_`KL*g-SM0 z`)viol1)99pmyQ1xI%u#dE%^Z3xC*QjTcmK&%8l7p@cQs{OEBDO#nsIc z6xUgs6fRoX@MLp^i&i#5SzMFCMJwL7HdnZ4WxJHk6)xII;s+S`Y#OZ{yxvr0bA^gl z@3XSGLPg8uBOH9FXtmR?6_$1v3nn!@Bcgu^1s|Rh<;(2Z3O?PeC{(m8U_!yWN*J@N z)egmW@S&pBKE@W8) zr)G;wFZP1pD4Wo1u28EbpF?>c-sGXTs##%$w|V4;_j$0xDinNp3vYgS53hEpwqguV znzWC#%@rLo7Gj2MNqI_1sqc^p5@@Z#)h3s(+spidadfdZ7mdy$g|E9R5Z_pvi?(Bt!q;6D zgs;0|W?}og7lg07Dv%2bt>6ooT394^bFkU5AQuWgeBD)nOb4v6!kbSD!kbT+@q~hR z_n!n35#B0X5Z)?W5Z)@xCx(I#Zxt?Z_o4)1bhir!N#UXm?@B2M?-?$T5zpLhQ0VR$ zPObY3-TlHQK^Lk)kTY(B8C)nskxdE}X`u`<7MH&tC8eY(4DUxN4DUxNbd~mIrWB^p z*!$a2jNrrDQ3}J`Q3}I*g$v!i!bU!XcM2DVFS#lVZ%Zi*Z%Zi*Z%d(O+x4X|ye*}0 zVyLYahWDoEYkI7l59{v>!wUSuum-;{tispV`UHcg5!$IA@9t4Ci)_5ROC?BRSg^th zUvkC#J`}uY@GZgcC0FCamt2jP1(7AFJ^ll|`}puBSL0<}V+n>Yxf&n7~H>DZA(EOiEtCRdM4DH&x7M(=~R93CYW~QcZk<%#|q`Gdm{8oY$tw+|{PZ z95h6u7@%SP_pbX&rC<^ocQRkza$r#9!t1JQ+524>ogni(I~*)*AV*)*9yg=l1r zO_TXdh{lxDrpX+`qD_=3M2NCpe(Ar>_tq&8 zldMx9m=%R~l1&mN57}tu>m*Uu*fi-uLo~|MrWH}1HcfiL5RGPS(NK;;2@@&N5YgK+ zW+t26>10NTQ*2)$qFA;=M15*SFe@0QDc2CESg1loS#E}j6mf_sHm49#R&vr7@(sI~ z9FV5(1kB@PKiN|sX3EOoU&Lzxw0JcQ89adrF$#nWUY$PDp`@_nEk*zIqgkm6z;p50`C|E z5%$w_qwDaW3>7ydI$;0p4c1#Sg9_lky~%n@J}_}|-%g0KTUPeKc-b(XRH7L2MQ^Mi z=*4rq3mNmqOD0JTmIP1YL#>FZHF=Du`aS=@@rDX3?(xzW%fKxGOmG=)t?}a~JG35* zmw}3_wO1jc);Z}Iql>&Cw6BJ%f{}?zfxOTgHrcLOAfamoMDW#(yn2$5{pIW*Xpe? zdZ(Nv-{0e_Z2w@=kbQ3Q*WmUL4)%V%WHL@>^C6l4f9(Fmf4Tj+$-cj)ZgGEYDc$?z z=x~zq$?@YP<-HN+B&8k48--3<-W#b-T0S|Non#2fVdo^}lcUl}$`AJb%5mp#N?E#! zl_9Nm&f;-{*#U_BpziPV-4taXVD;if_@=XCGQ+eBTVA1mBJ%)*3|1rlliCM~Udjk< z;6MHNr5>D21`IBC$1&qSnG6_rjw~f)!ZPr07m1g9|NYSZ&Y}ICL;E|2_II9K@8mc` z`#VoI=Ulm={hj~X`#ZJz?Il0IYNDaJ(@M8V{`3PA2^y84iv;SGAR**^feI_g5pt># zC3}!XEI^jt24$B}+#Qx;HBG;V07a1A#-Vp?Oq{3>-@-}%B}=#1kKOHSZmNxEoo3_g zOG$9b+?+nS!H;|?w>*%ihK5Vu>q6cHNK$qdNeU}S@(f{O9`kO$C@j$Txk#4g%SMxm zx$~=I9HA<!bCJ=I$zaKPmZo zYH~l;_#`cOaaDi&uZ*xV+=1n%)W!b7&zqP^*vVXh)NlAjR`>(D0*R%qz6z&5>JBFLT6=A!Ub{QAlds*DYP{ACJ;~Q%g*F?%=%%f9)ysBlKGbL# zdb^oLN@KMN+iY|Nl~c`MqKvb;=d>NR#Up+Rfo};rw}yRNrI!p!rSojHLr&w@N}pqe zL!V<&D6=3%EVdSfg0v`Nh6sF@F5Dm`x}&36-Ghw<@jK9oqHgIm4+_PdAKb*IhNmFGHH?^C^xU!C_GL^jH+8L1q_``HALc z4!jIk%LIaVua^ms!mgxx#|wS5-gT<2sL2P{ebjs7I*$4e*!PlbuCu83#&r|*A858o zHrF-OdlPmF?fpdZzz2k#LSLEtV*-f?JB8*AT_oe*^b&Rs^^Pkyvye{T_94se^SuL zAa9&-%5o;?M}Py_IF6~&TUCN@@E~K6!$*)CmV-f|bm*3Ufj7wwt_XrpuQL0-am(+# z%5wuxCuisXsDD1_r=r0R&+zFZ)wN4voDV#&dd>~p0;>He<}av;RWD$}eRb93XdsOl z25PVk*8d<7{Ym&ByxjgH#;2atPs!_(67+_0>HpCS8?rLGu2gIba|VF27=*4jbJ z&#f6;0CGNZI8~WbhU`C?=T$n+A^Xpe{Rh26`s18}T70p&1Sw)-fs|5VdcNorIj#sl z>>?bW@SYX@N=nb6Z9B9Xr5(-#`oJOW zsJS5TO*`rp!jNLr%&hRP{+CuUYQ_~lS1hWvp~22Y@6@+Q(ugclxL48}?#*6F@2@uq z<~Uee$E6U17y_zpSy;JX5COc(*7pnMRnq+~tgSvb`RnA?^?n{Nac^{ux@X$CS8>dp zKS$rh;q&x`L+it#_2JO^aAaByAQwJl`P6g(Cy?0N_RBrZG7G^jvrE6j^$(i1l5R?+PM2iy2 zbvdsi$RmzY76o&nMbRDy=C_MeNsvdjzFQPreu5NP7FiTZ(V~P3Fw?8aL@_}+4I01xc-`q}XljU(uT z@4X}FaW8qb9YL>EMYy6&idI$DRaV?Md4leDhU$ao_|)4_m_dCg!YQ92k=Di~Ua9)v z_et9Qyn6A%>KK~+SAV@InkZdk-bC$q6x5u*d>c-J@uZ)`T0Am;GdpFyDJ4i5@?SRl z(jot4GdEslHGQCFx<1Ti?5-DcfI{~Xti4*19|)%?)BuO>E8s{gE8qT=?<)w-r#MYf zR!_z%#62w7AcT(!rt`HIPx>k^E9mf1!FfYo;oe*#&pj&GY~K1L{9qR9WAAN}R&xXd z!#a+986Pb{nO<8Y8U1WhsA%$`skY3MYpcndr?@Bp9Z3Ld1frnF_fBO3#J z-m#7NX8RjO<+Snw@AO6v)$={v=hWQ7F@M^jI^@h=AO|@TThjSl4skS}W5z_XEhq{3psi)@a%cTm|t3$=77e+H0LuGiXxBT1F~sOXJk&K z_vr=r((Ljp6zvtC7vPgAgiRA0y-ky0+op-J-Jl(%V8Q*4^_ z_aR!EIOL3Xn@{S4O_O6{ZJHc+9HOO(V^TPd333puC9BUQwrSG2g=pb8Cdi?$at<#| z!#@I18s7xnYseD9JONS8$ef_3UBNeOPhf}jL^*`pRuETw@{+4RnY^5GLFEk`6DjNK zMYFG;s&#sK++bI`XtC>@HFxgRB2GH464kn=fa(Qa{r(LV-um|B{2T_me_^ou3qs5E z0q!w)x%U^m%F2Jh=O5lw+e^jHkQ|*~we&+7@}^_^*g`yDs^_qkC0bSYj&vM-(<*Cf z7B5;_anf!yGZrtY?mLFQ-=Z*|9jmVJjxp<-H{IK5m9g5vB;ce!+)GSd<-)X{yYEM{fAzb^ToXVC0PF0Xkqz7IKg#^zHZ%8tfn)J2UXVM zR32C(r$1iI;z65bQMBRMTx+Md9tWi|xAvqQsKmZ(x?sr#T7hSll;*Rm8Vh?Tx%x5m zBLVlMG`DJn982o_1Sb(YpKPv5N| znuCB_nO2_LY6E}&RerBNEd2}=efr06R$Jc0)C%|IIseQGKhyKi_%8N*eQ41e?wZWc z|2X}x?rCy8H(yFEUveKZsT=pT6)%YRW%2*qzamgM^G{OP)KOo+tV4blM%zXg|H2$r@}pue>{*nL^*Gjw6Ts z?Q#`Kr#d+R$n&DYCKP1wyy|2!>ABUJ!Ro7=^{87_y?Fjj)m0TYRB^dsZ#|#UJIZ8_ zql%(6i{~#uy&u{nDZOB@2jIRu9d0U+@D5DfljRk8JGjl1zM=zKu&%wd6KM0J+$nT` zq9@S-ik?QBl%eAu&D_wnIAt|PR$LTwX+aVYQq zkNR4r#=<=JQg(yjUde6}^cDrPuNgXf)7|QSe)N^_va;uc)Oec|nptWWD~l_vO3DxG zlC;m2B^cHwX)i2mnw2kOfDBy)(b<(%RTXo&?va~93pj<71u`$p8k8?uqY#ZIY0;Q- zgq@z`8g-jb9BgcwD0f0MX7DymdIy^(mz{@bbi_7I?lHG%a!zN6#!|wjO{AlO<}Xh; zYm*Ep5LX<7iX%%AJCTYQ7i{@S%yprqoU>T&2cDU{n$NPiG4M=(XFkic(0rB!cHlYa zY*r|W4@*l>DC>a`O+L$V>=K=ZO_S+-h(@{EG=03N6`pJawE5&P6pJ=qE&>nHC|8nW z*3Th_t@EQ3w01t|h?PH%OX&4+LX@eQVH5QH()$v ztixZPJBuq~O9papv7f88H_xvfjQ`+}|K8xvcdoUs^70ENL#)h@uQ#^J*}c8Vj6Qpi z2^g&Z9Opk}me5|f<_z-ussHW%Yu@=NNyi0k!%yh}UBwB?3J#oFd|fRUgLw1xm&NZb z&7nK}c?Nr1dU`#>#x3^fuN`ua=)izK;X9-ZV_g$*&XI1d#T9~soc>ufgo z&@NaUCvSr{gB?1b!px1K`}NG+5XE?~R%(x{K&=-|xy2K*)D4|aVdknnn>vt(;ujjs zLw#IzkOvi_#S^y5jT29ifY|>&gRAto{>{kRztGIPyjRz)fypU>cV%E|v|zt?=;Vo^ z3j_U&`Q$>i;0z)EU*%{B>$Aho?@VPvnG<$@&wr_nmR7pLPVo9JL5nNw2Ct8ou(-mG z@cOPnn~M%ENYY18*d!)PafvIuO_Hfua9XFYHVo02_1ZM= zd<%>{{-G5$lP8Xk>T3^WY)#Q`oXqF@hg_IF9#*2Tfz&(QBEaXJa1o$+r(6VSL`2*H zns?eofaaZe5u}BU$^OX~CZB)$MSvzJU=VN8C@ZI51fE?Za`L^H$ISOK;|@I2hnUZO zoOHp(-<5Of=HH}m9B|J)oXFw4-kBHdfVsJP{&n-XI8$z{;jp+u@8k>fe#nV(*d&v3 z>K|v2)Xs@jR#(V933A5`{{G$>82S}D(u$kCS9wwYC=3>ET76oPymAt^%JFjKlLR?2 zdh?>iH`LZt&f(An9jF|{M|R80eAK3uwH}=;lmjyCgCNEcQ=!bfWS$T>2!ggSUMhn1 z2K)Iqj_{i#*~}N1Wi>%O8pGyE?^+v^Gu%TB`{=%TH*K~&N_Y_6=$^ZRtw;%og_TL$@Uk&d5 zmY2m2<)6P$=r~^ZDaD;$6{H@uz05ji085Ov60t5x9Wwwi14~h;A#a(wRTrGQcMH z#G-ynZHW!m2VD`d&T?X%_XT3JF`va0V?K+ILg1O|OSEQs3*#@<5u1z{1LcVPK={%aw24Tc-{o4A2w#pV*LI+M zIYM0A6)S2cBVVYuw2A2`_2Ndk;>q31$Ojss+lzo#nSCHIxeOY1GPURgzwjls?+nDS z<0WIrz7s4D`{{rC{;2s2YnEVq&eWV^e?I!z!8ynec7LCr{^)`wY|olqt$X)#GY|FU z-oLYC;iAPXCpk=ML1oS4$&~}m=pp;nko{`Ne)X?=e)!P-qrshDJ!C%`-2SWCgWP|G z{j4T5X&3-uqO1K(zx^k9=3sg~yR}Y~#VVwf;zgFKL7tPeC$S!s%VVTqjDUl<;m@;7;FW{z0Yyx{&u^X zlIuA2U=#Q>*zrNf=?06>ah$84+OAfDO@ik-&W_)2S6$$n|A^c;4zAv^UDf0{&OC4i z!~Ku{yj`_{fB*Y-RfwH;O?l^^H^x;QeBpy})%g*}*>(8U|U{XHGGWpN?JA@cS)an%H_yd|!V<6?LHt)zpq z7mnXXdchyp$5r$?$EmqDu9h;PoOWMaIrAN-4%`5K0W7(md|W}g!KWUHtF9aP&Na{% zIL?7@6ArlMJA}ij33oppSB(rUa3gqA)ebd!mE)`hw}L+fa~~w1tBDUh=lUHg4&J>Gxvx<#?%knszCpXW zZ-?3hJ_hDI`+@)Q-8j^Lyi3w=~#(8SbRV7;FI4cU2CYn%{$b(Z&UAoutTl- z4&?(zzej#OwnHriANn!jfbn1LP(>eboF1@;c9`>Pz8BZ5VsO-jlq0y3c2W53X2(JlI3|wtz*;kO#{@k32YL z1@fDAsOeut9^67cl!H?qMjkAB6nU^`tm8B~Ppj_tJ5K2vpH^c?e>qqR)`J`5JKpoO z>I8Gak@P2}VC#6=N72)&2kZgI@SQo6o>p5*NYCX@D~IoG1;>I3u#E3+y!L6;05*YZ z!4|NJd?>&1X|)oJfh}Mo=#VeD%br%_z+!MFSPrfwpL)LfwAuz{KLFpy;RDOax7P0w zJ{SkrNZ ze9t}#KbQkHfyH1cOZ(sjiJ06 z!BVgp98I~!N_ML0U_Dp^Hi2#T5zj}Fr#wr+A}|IO7zew*M|=;Fo<~U!SSt8UYZu^<4-X-4=e@i z!E$f~7z5XUE#M}w72F2K!33E5t(_|St;mB>un{Z)o4^@hGdK_I0hfc>5ARe>U^&%n4h1y~BM0n5P+U>s}%V`~Wa z?WE`1if*Z(hu>NO+`%a$0La_b`(jm{_GI{>_PSq^W z;8uBV*{OQu865L2o_|642|l?~Ed`r@xl^qZ+(@|y{)&1q2K@C-H5Sa>MEGF#Z~0EJ zelzI-<4;jvP-GTCitQo7x8EPH$6L?*(VHsVXoQYg4PhmSt_K6-<1iO*!;OiHF+MSg`4lHdQXq zPqe8OVCnDL)Ml`LOPiYUKI+q-+thL}=Rlik0do(xsV=Z(_%1c({iGvnmzoKdzjc>t z1Y_^qr8a^&6L+Z?{aY^B02YHQrN1lUJ=g#fx{Tc{1<1jQ3#EmAh0I z7`tkhTK7TtKE6w3(LXlU?NYPA?3;F}9OyZ>?NX&+&mFr|y*z(zmnx+HZ2rP7RRtE` zOL)?s-bXy+2=7b81GapH^nkf5cd7MYDYzMogWX^P%*rENFb6EYf0rr-<4wC%c0TgI z+NGMooWJZ+4)eO)r+Efj+6foF*zR3w1DM;fOU1#KXLhNj@HZdZr6L?Q(Q<-xg7u@? z)jY8I-`dqGFqYr0TEX0+b~TgmiYK?Lda(4ub`=+#)~>P{4_Ys4SH)oLs&*9vvuCuc zCc$gk)yPTY-?iistc@dMs70c0H=fXKOi1?23LW( zKWtYmVEH=I3&ws-e(~M0_3f$%tp91dngup);60f0M7tVYOng6YS7l)NFWS{oF#DJ7 zYBd<&NIJl#U-P}>ThkxNcfmhWZeZ?K;uYM{uEv~Cc>5_gu=v?_)d;payVXW8@xI-v z8*IL5w;Fo^;eyk_#+!Gm7?`_sw^{+VHtbfL!1~YZR-It^ox9bj62iS}w<-d&m+e;b zz?R0{s!8zv-Kz9L!g+AFS_(FQbGKRx_Iziz+6LA)?^anC5zhMEYL?*E-Ky+j(z9*1 zY5;S0@qP;Y&k#P?`r>YN9BkUqp*FzZ^F)X06#PYp%7H(=u|sVW`QLP?6(aXP9cmL; zysbmEPDO4T=QvMoY<$=w7uH%pS8>tp!`&yH`0^5dXP*RVSETuvcYYMYy%d zfh~6sKG=BgUX=h75A9VsWu&_udBNrTR9w>g`F+YEo?LJgSPYH@OTl8W94rH4;8L(2 zYy=y@)nF629&83TgNZNfQ{7;0BXXp(8Qccu-m_25B%O&b?^DYqy$|kF8^G3w_NkI9 ziDxz8gV_%gE*O7=^nopp?o)N7w{iVG)$?KAKe12E`Uvs<0(mg^SNl}FHG6VB62d`0C%dU#$U~=ImG7 zz*ca>$BB3Tel=w#-xu4j>cH~b_p9h92=DIwYV9YH`{I7pbPasp+poI8;@|C8tIG-h z69?3Bu=#%us4;}o0v3VITMwuzc?Oq)tzaXV{g(r(6U_PB0hL2MO<)(;vI9QiE8TfO ztpnq2@PUo(2h>R7FYX{*FnjL-RR^|o9Z=iAo}&j;mZawd@4;UV_6RL#pb7v(~oy1$sdwHIlQ0s`N{`!P!1G8fZHM){~TaZv?VCkZSS_<~mB-CoK zb#X$)!JOKJawHvKA@MggCDh6)=np5W&3>N<+p|a;nxg=B-nA;9N z*wd9z39$I5hg30g#g89Sv%tg?hg2zYJ-;}l>cN~}9a3?y{I`cx+jZp2?+&Rc^T@Y9 z9a2qTL3nNG+RBx}S$1EO(w!+1C@_>z+|nVC!F=Q7y=~{`DC(68`w#pHXAr zOYC??%>c7^J)>scKzMuM17puTqv~Vu9U+_ryw5qTs=&r~A66^D^1{O^0p|SUuquQ< z=Y_*+IoSH*VKoZ5rnh#f>0s3?JAGjv{}G|BLUJ`~qWO%Trxy%1zL>bg3<1>z~N4n@Qi(#0S>zB|S@dKG3Ds z-ax^0dv7|U~KqNRSq^sj;iHg;?$$6L*Bpks2WYW zo59Us?&(KWmpp?x4e*_LRF#5FuOl5`?(2`LbzsXIj;c;DHtMJv`5EGS^HEgMhL&poEf z!8jNv+}ONhYSib*xB16ZDcF3&F;xeaFF2-}z}!X0)HX1F%Q3a~i|~K?m?~=|p3f4` zJ*4yBkExZPM;=@cw*C};Fb*cb(#Maf>@V=$U@n-ufq1}Ta28k!)`8{Vaxexqf%V`z zuo2t@w*Ksx>H@QWeoW<%j?$K6YC72Zt7B@ZJpU*0$}@_kyk9TCn`7W2y~I z{O*_w_5 z$*+0Ot5Puc#^==E6!sSNX2^akUjp>^iO@4$eeeaLuAUQpd&@qfRd%D%;S9DYHq z1)F|wLPfp~K6XNl1)ITQumvmwTfr(Y4lV@~U?bQAt_HJzctWiQbHL4DE*J-6>rSXF z($oB-6RHqQ{P=`gAZ2lwZ0n7gzJ}~=F#1FP?J)ye5#=o3UxqNT=Ho^z9cb`y+N2DD1t~G>{ zIHA^p&4*8@`fo#jp6~n)&o9Eami#@tM@{)2e3$j8CNTbNkE(x|?>f?>R)h86dax1P z3^svruo>(ETfhkMwSuF;IG772z!I)-gI z%Ka|!z3D|&1-88TMb!w#M!%@mgE?otsN!Ja>=)H&zQ5(IFRB?}Z0w6_IoLFr@W9rK zUR1G1Ne9>fj&j_;Q*Ql;bIQ_@r<|SrnyeM4IHIa^a`?CILF{;0{5W&uv@=Iv_PP-_ zXEit%oORy&3&y-tBt?D-|CTkz{uA0V{%y8s zG!bW$L6fvJ^KT=xi$G`UnIrEW_Te*5`{e1Qc7Y&I{OG` z6p0(XER{x)84GO`wBF*L&ig5bjKp0It;C>7I5p6U`v^z!K4!_Rfi|xXnN7T3Wyx%V zwz3bIgYyjP0;R;kGRmQ6T_xSr6?P5p}>VBqP*&auNyvwOpoufGfunkuHnPx zyY*)*fjZ z{m0t~Urh#l>*3p+0bd-x=!5;o8+i@mR|b5!@NMabPx^)#@QwW%_Dz1f^}y?EmPjSI zWJImN)DdDHaT?(5fp?tQ=eTwEN@2Tv#ISD&UOeIwPs$yeHz3#j_5RDe4ZhZ<{`s;P zyw`lAZ@zJ)cN~26*lO=E(|gf9!#~y-x$D&7Z#hL0dF6=91TK_zB>7Q=%x3Jm{}Yh8 zcX-)75$RWle_BgjI6}f-A>m__t)jn9=G{g24xjGv?$f-bBc^*XNcfwO+3?6t^{HND zy0pwx9lp$E64+YfNnZN+Y0}3xo_Zx|xk@+aOOxSC8I5@@<16;q_k+naO_zQ^x`F9* z1E;@6l5_b8U1S&e{YN?cE3m`%F83BKcO?SS-!uw8_ST+kKOWK+h$s7WiAOpp;ctO| z)_-E}?v-D?R`{uEZvO{=bn8y_s3cI2mDI~i{qNv2-l5qqm)8=04wHk`JFzRD;q#N8 zi|!kKnfu`gtPZ57R3;PHXB&Fu|BB|9^hjCP!(WCSx7YNH(&?G*XRD-X9Wt%heCy3P zl6fRu^4&U)%e-%HgFgq`@Vm}5z9GQ><`I%d!e0c-NHFIeZK?-!^T_??tCQbc98A=Q zMf#EvGPjezQ*JFQ)ideWeEkO>KE*lPpHIv8O5eH>yL91b$B9h5D;54#ei>X&`k&2` z5T)^m%uN2ZPQkV|^%30`rjQ@F zMLMr!9^Lc(Huc$2$e3k!xqSC!(&0!Yd9VIx8j{3QkIc-CUOd0cES`196#mAG=W#=( zk9azeS@Ab7p6?qnR@s~1Gn%}fBa2Mn1$;weRD^ARteFB&p^#M6Mxn&;cpwOY~C zKx_Ky4)uDECjGz+Xj$+@h31F*ahd~do@|{qU!06Jz0tjohWaO#dA@(+KklH{Yka z_4_q`IZ3$X$ZSIoFB5Zqjb8HMIQ(ngu}l4$ z-${NiAEvU_yT{8j3408M+c|xgI#UW>=Pm1iWPc*#{{wo~AOpYf&wzjP6}!~-WLn_& z>eY02!tfy81k6!u_{Ar{wnb?qu!%x54|HO6rFLv8}4KnLV|4xy4i!Vd{4d&z3 zr^H~hbejAPw?r3PE;FI5}JaF5vkRZ3T^7b^wuT$@d+)I2C=2m#dRqRr4q_2qFnKsPfm+2Cl?zVsC zmrp(9g?V*2O!=l7`sbcmyVQkd|6}$if0hb6P0y#A;Em4Nr7DqgzyIh5xO7lfwv7_lCA$F%#j2ZH)ztI zkAb!g+9cuf+yC`C8G8KfA%Nq&|C+SL8*fjQmDlE(63Li--)WKrZ~icjj{dLY6;m`P z@)2aLHWP*3M7+@-?NUGFw;Qjwj{KYiG*##MMtGXx*=X~ua(Sd2JK&lA<6Y`A#F?x| zw7W~B-FbkjGS5mR7kg>Uoq_~(rwZBKqWB@>vZ7LQ=^_24PzIK%(RG;G)>`Ej1^ zZDcB~>)0rMWb=)e(QihM5svkZ()Sg?Q{1vkB_!{5`f<@;2K%u~b;w@d7egiszr~MI zevv=vh|Z{GM{AWC5$0DDkXD<|%a4GX6xihSMVXl+FBf^?zj6&r9073A17I zF7-2+n}4C#bX=j+!Qdcq)DTDZu3hS!UdNJu*CipHtHd!g8~(k!)Xyc3S-r+lpyNPD zWEzoK^-t=AeCu_Fj5&ACi+H)?#m~dS_~&J}s~(d-IJQpF;nOyS ze-w?f==Jzrn*71G{1Y?$Dey0UQ@i>~@BD%E)WLrozt&9o*T6sTZS6`MQgr_l%Ad3R zzD3f%75=4NnU9H)vqV4sbYNaP8d{_=H6}_wmdM;-JGhc!SU+%W<9D$=Gf}(>d(kW-iwS?j|!o8691a|c6E>W zO|G5ci;@ACj2L#mn-L#(GZq<`WKRqxx$(NbP&A6joecz04>OI57Ofc-b62+w> zgs%a!%Y-X<9_}O;#PYN#b+>%uOd-Kt0T9xn?t!|a|zJxFPt7fu~;|Raq zexI~WWzF#;NyFvIeqY*M4ZOKuX3t5)=S}Gbn7dp;AMshQt8?dnC{r50%HEJH((`ja zJ;><#aJjD!e5)S+Ns!c`Mr5)I+tue~%sb)B__vIY`@nbgCiAyZZq0M!S&vND z0rvBhA~V8}G3Uv*XmbK959|W`8}}~OmlJm@AXPV5Uw(p;jzTKqMI-MYPQLg0t2^BK zC_HuWj4s-(zH0M$hC#`LGUlu+;rWu}Tb_|NQx6l-S~3~3bQ!ivqAuL69Pm;Xex2$l(lloi&PaK}zNgioijl-v7 z^VUILaSdY%uLcVQm z8|j*IJo7nl*1MTkoF1G5%l^aYXHs+EC}|l7tqDH)ZO?&)G?ss|Uvb`y-Rf7v4IZ!W z8~!I$zCROXvy#b-}R#3)$4i5I{14&#U82TT8aD)@JDpJmUbcK(*}P| z_3n)4N}K63V%P6>$De(5sWkO5{>VJ~{Y~EZ^Cx7|^c_)|@7}mu{ey8R@}G9N$>rq} ze(pp``wV2(67D(tj{GDXu6*Cd_fY?qbf{~S<0|>#&mG;)+MR3CN~A8WmHF|~4)r4Z zk?ZVqOqaez=BF3xbf7yBnK&}rZtYNi7Mbb2%Dh#}U{I5E&3rGsUt;fCvY#hisWqg; zalh{Lyfu3Xol6ya*3OqY{&pjdK-u1?%T^kElsM{;`6>Fr5AZv3qBfN8y8KB;Pkl$m zy6cft;GAzzcTM}MucJkIEeVf!i)G21((g-)1ACi(a+J-5UEJ@2UWX9dwq4G~f=02mJF#Gf0>RgxR zp=CB8)4E}|`VEPR+-As_{n1~)ARV5Ti6WOkW(?_XWd0tx>gG`Tb@?+6tvv1NjP=s8 zmpJlo!TzTS+Q#}||FaHSVs!8QkK{w6%!j|+p(-RFzDfFg9loJw9e%nlL@WbtKaur* z()&P1M*W0*UlIHrO&uApkD|2uDXTl&e&y{(n)>KhW*{@Y+3QzEg!5UxbtydM-`}GS zN!uJ{hnsBAd1iZVMrQP5d(^WcbJQPO{W;YY-uxm*i}-pt*?Zry&Xf5||1y$C+8h9~x9+;_FYKkJu!)Zdb0 zDe2bz5aWw$LwmQLG}1>G67PD_uuhwTtp$yw~9&!>2k4REq&T&evt5gvnOL4V97h_|DW2U99j20;HSl}lSv%RAwz6|Bx zN8h^^nYE-{_q~sY(=PR@6`q+#JJh#-Oj-^bZOW`ykKQSLqV^S%JaL#;p8nW&wNCoP z$HTe|saG)u^0HBx_lZsLZ{)leX+H8dl7H)^yqzhU`M)0(y!Zvgqw9P_5S>Q0blhTXd^A!IB3}xZ3?t3i#8Kl1e!Eu`I9(e z&>H@@C!oJmT-HG(Ir(c4i9y1^fEdTQ$Jf^QS!-jU$qdb?}$}b&pzS_L1~6o-dTxMBTX? zeX69R4gS`C(gBOJ@!ho7zwdF!Cv(i~V|?mH=C{A_#-}fa$0teetUNdVpQITBF461o z$CI6l@UMWseCj^+_GBNHnm4-q5#5DJ!xR3E@Rwe`PuqHub>AB^T>eeT@Z~$@OqAwb zd(`#hU*xdgXPEuFba-{Tua-4i6qz_OKjAw+%*tX*uAZT@RhFAN zk7VK3*0NnaISiTKhx16%5i6knzH6`gMm7)roXjh$tK6w|g}Ek}L735Td)@y1Pewo7 zNB_PYnK%E^>)(HuzD{udAD?^GGB@!Gm?4gbkcDFbBM^;rJ;F-gj)zt$tU-zr(O%s33&WGp*7!Q6l8w>w>X%AL2S z(j3s~1??&Gh-1xl`!ee1rC(SD|BSVr{f-|S;fr6NS-4Tc?SlXG?c3GU{7&Yzt4IEe zl!|;`>j#+sF5Ksi1wS$BSRZ47bjB+P_s_^jz8RkXNVsF*S-oVR8uku&zG8%H*4?r< zN!`_UgD5gHkvTKIUFA!^@<2FT>3cU#q+kDT=YaZNk?TRu`QE_fq-+Z5kY_&H*=L_E zeC6=9eZOx$Se#}2Z{80&2i&(yo-Spc-Sb$d`WBTx(y%mDrt~4&zr}T1c#A)1N?Y$H zzOnc3Q`vv|9j9}myMZ3U-q-kjMgENrflY$Szp_yT4g;{VU%-HcOhf!qBEqR zMX?-3rUaRpl=GAPj(j{^&Ju1FJX?Ozsm?C*c>KP?tUvoN@VjBHuWlplEobactFM+Y z<&4GL$M>oCe1$Oo+eoV!X7%+FW^$}cAagxw-NGCta#^@Pm2l;(#>i!z>iLg?AB87f zTHm9`P;o)mc_8!X_V(>+lZ*!i^=bR*b(5LDd@%EVTF$!4DGBz|-Ox7Q5bUS3&Ld9v zqNF92R_PJkzmY&LJPo%UNY3e{pDlq_5A9IezWchQaZlu9@Roe~K%nn#fL096?7LS& zD}t6u-@Osu?Ckv+_uXqLoSfSas1Y*%djomwmrJ1Ue$lgtxwgwO#mt8q4)og>m%`_~ zet$-Nyws6G=B~3w?N_z0CoK=$mP$*YoovdM$|uUVC^GYqx%0qwwTR!5yO6Q;7s9iZ zbo>FH-uigy1CGP9hHpBX`AISlJUf=2mM1-STs>hM@fN;&zdI(p(MW3_W5OuronsDq zV}j(Kl~(D48$Lw&FFP>czIy|5D?d9hIq8$TkW1Wspx?fG^!ezQmiNskeRm0bJ)b{t zQhm3a^S9!Q2i(5o_Iz=#>li zt2^!`%niMSx&69im{G!9iA*W&uU*C|IRw{gf073q;A!dEul6x@h`a?JKM%~b#`Ww$ z>&hkE1TuelcDow!24w!@)1h#s9?iUveuy)PGxbYf4*!(W;5gL;t%P%>=^pi8l>e`T zmJ6T!PNg+9P6>T0JZpOny7pz?4{z8eO z5#uB8%H*A&9~B}#b;L{oI%5e#&SmfFC5)N8i%_2Zh0y?CE_~_I(Zsuwe!>_@IyS*K zlkYu6(#xOFwn3W#%^X(~(8{1?GOmthtviaiGxaf(&4Yz69#r3varKB%kG%E0yXUS^ z>s6%E%lFn3$2upGQ5-VYXola(KB$Hhhg&Y#aWQHomiuaWIED=#QPayEb}FxE)GfEGHdn{_S8!dOhLa z0Dm3dcS*$0iz5d2sA4)OqgNr$(K}oGp4#JxXU0IowO1?Rq;U*A~-}DOcOoHsXss z8y;UI9qUQQ(xQWEYBmqLf1>a5=L~K``=L$;D~oZkG!f>iDF#SG-F3MJSF6-_Bj$Bzpo608}vCETg-gqBL~$x))20l7IVCLs9G{Txh^V4 z=I^BC&2L6##O|L`I8Pa!17oA1jv=qic`{o`c5!OyEc`o|{1b&4|UA0>`u#8LL4 zgxfdmV_f#*=%a7a^XO4t-z0lUta6sT9djxD>6C=}y;<**>m}Cm!;Z zj~!BHCj047-?YRZ2!wwb{7YvZQfK$hf2YYW=K+ttG@%xl=?S*Oq!XTm-vNKoHHR{q z14}-(p3gk>iiG;xyQy!VGxE`_qo?Q=ChA^Y2OQ?X?<61fUePb6&&Swh^dFpSdfICd zvfHh|vdFMxH1#8<9ExJ#p99?8F^u(~)}x_4meSGH%!9@UN(QCZl#O z>7F9<-OnFVx6-a7t4Kqj|I)rJOlyjh4Nua&6q)tp-<$XysY{>k4$?92o)S z{#DNTm3DVYLnYNMD-V zLpT$eHH3Tj8|JQ(jx)S@)&G>JM)x@9!|e4d@;A`P(dYACSddv-C%v%vHU~bZZ$| zt4R4RE2Dq<%rhB}XY1kLu;p+@`gjSy1O8QiKCFJ4<(DJ#MV&YF1s~VizAtMVqc>la z^ZGY`_8IlT_wn858Q);m(}n&F(;NSolg=bRFFOU9sp0&Tbi_W4{pYb~)TgcSkNuUU zp3|8-ZhL;GGYGTm@n_t2_?(fBKHA}OWX>w`+Tp=)I;6fd!!z%<�eoBCY0`GUoSP zuiIhLzo!kEdctiViHuqnsxR_=9DL(M>oRYL_3*E_JJ=58O#Idv!FIR?+EVzU#F150zerCKY;4Qr%Gu{?> zXO(2e+X-*og_-e=`WW*_&X4c!+s5#1CGa*~+|{q1WgdK6|8_VdJ&T093jR&~ge!a- z;meuQcl^TF313|Xd?T->{bj&c1mBpceaAbFct>d*(4vjkgK28)u&!Y^ev-5PTbSG2CQ$9`aT2MRhM?DmBV?^ z<2QP`)Hxi(hwGukJ&&#d-s~$l%fA=iBYKjWI*(4$wjP;zWnJn>Qik=V&d(X%F!FvK z!)5-#X&;t;?xGQrwk~9IXLPAA@H@~KUMoCTB=`RauiWdgB17Kk@OEdwE8~RROEU9g zT^a8gl6iDi`(gElcT*mW` z|I9B3$H7(5I<5$ggU!%N;FEMD)9Q_bQeLgR-`MtSMz-kV@GfhAHZWdgf0FepG=D6S zFr(1wq4n1n>hT}m8tOo%I`cy2#)N6(BQO2@PwG}gjT^K@uOp7)z0daB-)@7i z>J!xSWE=74eBrZOCGB$e%%*bY2=eiMI!t@;QN7hf+pDDRbdmO|>$!~**SGaFra?&5lA>ECg&-U3L3ZL9nJL*8+e9||S z!Pj)~*#Y;PlBaSX>9|ABy7u6^jP zx2Nh|AWaLk&x9!Fr0klI`S{eh`n~jp&x?#z@!*lLWghK(hI?z?3f~<@{s-23K|Awt zWHwW$pF}irGcs1t5^fIj%%u-;AIt|l9>0B=cIJP7r{8_+ezpvmb1sRiM`^#24e7(J zgJ;XzkEqAW;Q5A;UUMDui4qA{>nU?cOV_oOCwIcUi{Ht1?(N@|v~DKMdhWWp{7Z!S zmWC9M8Rl>1Ntj8Sh0LRKFN>?cP)8#7g~tR5H!AbbPd=+2CCJDFMqd}m+Y9GNz|zS? zk&*kq9xaQjbE!*_i|-2Mt<;~56?{))=3`A4{4Ea!$C}72<|~(^#u|xxG_*DF$?s%7 zd1H-`B<|u_=m#3Q)w>uAlRU}3TYTKE(M?ajXCy2!;a}Dr7;_q+H5)W3qm|IsLc56H z>CX9I&--nb%w}j?`;h76edM#jxO<>E(ERaE(m1jbe-6SiX=9)rH)t|07D4NV*59}& zyjAdK-JKcl3V3rq*FUf1e+#_p797cVyj{cG*IC}}`t82o&?h9zOvc-k->&o>(b-hH zFLw7k-cE;a&Eg{&jknSs#xW0#+;l{Zl=l6@&!p;)*LSn?N)mICuV9WMBlplfbxmB& z<9Fn<$XI=iq<0JHh<~B)^a|f`_zL^^t|)xbIo!ofoKN#RS;q~3VJU|x@JwHNBqQ6k ze0whQ-_5rjapw;I*^mmikGaDvWJZ;HbBCWGV|}}Xy9}PPyN;;eo<;tA*9h0kQ+N1$ zo6b^yp1c8>FB5J)@{zBE!d%M*S z_?_ezeTzPaAax#w%y$~#&AG38!1J9A$knYFnA{lR>q4&eOWpm>cSfV=nemmr`HBdy z1itvn?vtAD6v}-0tKIH==k0f-@~V&d&N|{ttnOB4oMPmyfByO~ZF9YBguWBSJ2v9S z|HxRtvsvbHE{22|Wq!Ks>qk_>n+fw`KTPI2-W=Q8X&KY|PME7nS?h;uA-*+o ziO5)$9v-Qmn}~bukGPjDn+Jc6rrQ!{g}7$`{8?9ePp{fcxShW^;?8sAyi&hT2gc&y zJf{tr$@Bwyp7T61R#M~}Bg`#lZ9k$`^AI_wAr-FojqVoIu{n})V&I4(Q-sWF!W|z$ z<`c-I3AY@cwIjGkZow(|lN;e0TtG^&McgTS@16mP$QGO@$*07t@@qX#{qv}Gb!~Q)! z!?eD6*!AR#gu4o!HSwc?xk59v)fR0dv?gf&cs&-`tu+q9JaFbSfBJoQ#$#wF z^Y!{2M_v2OPd}UbKCeHt{k)_eR}shYwxj)yp^fmB{7-jAW2mIP?ppHW&;O6TGl8qB zTKoUn=kSoTI7<#4XEHS^D^n}V@@iR8*K2mu;=K*GZo}m^n3dH*1#v)e0C7N%1Bf$D zh%+jRC@MIhqUdqP85IyjJ^$ac*V+n4hLgA3zxTfK`RMmN&w8FU?X}mm*Vza69N!H>M^F_XK3$7f1O`*q}LOEF~@St9b*Q-&ih2!c09(+gkAi}+WpcVA=K;xQD#`Olv88 z-kNgAv8XD1#+TejTE4*k$jrkIe$VvnzO2}M>ciUyq&;Bb3;Tx4i}#WIJGT8h-svMP zu#D1$0?$ai()aP29A3JQ$!^Rz-`H$nxfi4TW zk62N{v##1-qW^ANdU$(;2bU7gY4$|;euQfrKmJ(v33Pj$bOWGEb<%}Gmja#9pR?~w z=(afN;-O0{qT_x)1-b<2%Io*pHXpWo+PP-i{GX_=N6WY6KG_$xH@x#)8c*2X3i}>U zxo#f|`!*MIT^b{}FXG=G{=8FOnPU&z@oy3*x?sn@Rot+1+3|TJJ`Xgm+Lv=>UtaV+ zFQYGi2|nM%sJ{IDyYF-E%VSmgzvj6Q`(?rJn`iTW)fm9_V)+-^)pL2}_T}AR*VCtL zJMPQ57a$qy;c3N1w@S3)S z=Vm=&+uOf!gc?t_O5Q$A?7dx)ss!_1X{JOVGJ87Px(j>(#V;Tdw;o z%+Wt@mM`CH;9jKe)to>bW36Uvd~)4?i1>qNxz@*XpEC~j-CE_B+h?zaovj=@&huo< ze}m`c$-el5fp@M`8GUgoe0FPETQHubyX)H}e9hRtd4KdbJM$cqie3-Ik8^9j>f!sp3$ z+cDPz&!a65UvqRpV|Tk+B;I0u`$2s_$k>nV2A_}T9CA$KczKV$gnIZKHuc}V=2)=~ zHq~Kc)I-s=&Z{==U-jqJT)#H>#4f;oP-FNMVqABMm#=e`AFzM5#`Q6xGwf4$79S&g zp-X9dXN=&n{zK^EVaNHnlUL*pBI^co_)GbYrW%A-jorglh5n<`gw4-L4kMWNj<~x z(eDjh2@8C@{L%MpQ_nEofsfO5;COq$Cf_$-_SerBiMNdY+6tfjh*$5gA1OKBnXs|; z&Uf5+7;WI8BJmb$=kjc^ao%M=e0rs*{<`wr@p3)CGywTcEN~1#o4tK~^}N2Wx(7KN z^1b5?lQx(G_f9U5ebzd(MWg+e(r2-cAAAx6@*VLNk@s3fuD|SujLB~uy{_?W-2$I+ z@JT_PRK;hvOLym;^Y9snc=bM|I($mFh8~!A&Wy>IeM;9N z@fNG=;(f|X@X12FdY>||BVtdSeACxm-d4ElY&nu~i9B*@< zGPC4(6JYbrt^)m?C~j{LxpPg5)x%P~%jUMhb(aGlKlcL1g?Pl-|t7?CT#c-Vg1E?!$+R_e1BQ6R<!eGCu166a&xKDw*9|&X=E5FUYZ+bNHkn4ZCMyne%4O zOAhS1WfZv7&s$*MJV1yaE|^2HMCG<-%u<3KPK<&$v8gH@RmpBBUVTe~BTi(Ar zUar$_n1*oqE$$GE{=C?nUA}^eCnRj&ANG45Ds&7e)xMAZNRq!)$@Vd@Z{D)d(Y93k z=DIz{&vW+uY~QBvSIybk&jU-k0d0T1UpM&eY4_{=xII|m*T%cB%ryer#lUXqW98eW z!mjfZW!v$ZBOi8&?F)bPnuC4Y4@Up=)UWsDcJ1{&_M@LJlw&{7Q;fD!#@OEnKEe3~ zj?gA(*F~Rm6&+`q>c@Ze@8DXHPAGilT{`4=*94!ZN*?={!Y0AD?WiupVzKChOb4rKEqgU)L+rM zcxwWWS-0zAGJJYpJ>(e3eaR8@rA}Gq`>O(?a@*(nnA-vN-<&Bv=JtTjqv@S7*MfNc zpz8xW{(L8|a_-M63;qp;P472uI8dADNR0XapNfsUF|bL5jm9?k^T5B=&?P|Uu4|+& z6}r{XQ4!q<=;EF1@}XPm6kio|ZLv-|3v~0GbnT&wfv)IS+zq;!PP#tO*^20REcS

+72B7T6}fS-ve_>m1nDf45G0 z=#6I9{-r{f##rv(f??nDFE`2^yJKPZ`JY^i*MfNMus`gc={StfxAR@({1&I}hG}T; za?)nIHn58=$BxV16LuAP-EjQHTqMn6XY5Am(-vbM-T7zqi?0;QF?l!|rcuAe$NFMp zvOj!Ipj`Tx{2hEsxTg;xANJli9Io!8RT+l%`Q8ne_QklIonbFNyy4Qm8~2ILF%Px8 zS?H+gjygYBq`#GGxZ4M$z4S@VpDJ76!}I7}D5pNBNrI14IeE-ZL_Q)uz45EZ`Wx`e z{>QKPYd;*_)aSp>kK51Z@Uwk!qujB69PGUQS-#zB*v0ynZO7yF3D|Y`>c%~d^_CHs zSAYHMeffIV4nY6^?F~8B*BX4MU6(P|^E~>{FE<>I)R=y+UNR@{on@T zc9@7f?jH@G3J?tYV)vV$zHT-4ALb#A_@0GwY(7#XU&YqCPUpYiGr%oVuKD-D$Eh81 zdEGGQ^!}>Q5#A1;*~Z9AvHiZI`r4f5sTTNjfX^|++u031rn}oW$J+-s{cVMgrL$o3 zK#}{V=$Kz~aIrBz6h50PWI8%khmRS3wo`t&-fo1Te>m#;nBQh3)@3=x$9x~?IzM=4 z%;){Sp3rrGodut7*MVARdqDLK!h1*DaF{q8f6ehP7`mR&q018D%@Gvj{(d!|K@W^F zjWmb2nR-{_OUr)_%m8lxo770FxkO8%WfjbU6={4$F{>g8p$YMmw4L;<_T(_4+_s{C zD7&pVQ;QC?6+v|nG+~ zYG3Xr!kQ3%{3fzV?cv{u6HRE+H)3})!q33X@uCyoip5^PVed@mzZJR9)75Xpu@`92H{wip+WC!0f0giKOoeaK z_-{qlpXuD!BC$7Z{aQr*m9Bm*Zv2goekTThP&*Az41~tDnwlc(rA8bVJg^Flt1d#S z)V^Fz8N{manhDxO_t6#2M=R3RieeB~&7JGw(3)L#qY14!-`Dn?b4zBR(#Rd8VTI6Qy&tOJ*z`=p>bRI zx=<(oJKLG;@l7rR@2bgP?~-I@}+i$$$J%IhYUwWmGZ#p=#YbVY(ra5iIVGAv6BLZ7demgOt%~&*U_8jj{tJ;ao2Pm$cm{^mB zc!~KnX@r+pQ->C`6DJzeyfz};LW|mnA+6|oJ26T3lx;EBluN5Bna5S8X_dvQ%Cbmm zEOKZK*K*PKVI-<9>{ZC>A+oA?=2sUn#%=i;g8ysVbou=k->c>Iow!1klTCDx#B6ue zmVU{q1Xra+)kSDk&lT1A66)O>*JF*@JVuSEM9J07$(3n+bu(I+yd0?KI$EWt2|o#& z$VRsV9Fv~ZT5on6XNs(&IV#iGI_8LK6j8@~$zv>RhSsGi^~@plXkk5(T(7uaIdc)r z6~Z2X$GOV1zlOM^clbO%If9O^sn|gj-Ap9cqPfk)@!GVqnRW>Cg=Si6T?%U|PByHL z&>Kx#3sKOFZZ%VP3YW2*`APeUX1d&7C%F#Edz!o(9g|+uUdVT&*xFh?QFv{0iaSlJt*xm{F}1bCDrBo| z-d~M18>({!jjlsi>zWJe(E7S!d0kpv*PO|t*zM|-eLy;o4@-#7RWj#Pzyvj~8pT!@ zNvaRfuaJHplyyI$?w&|?&%*t_zACM6qy_Mx7FlaMt!`wV?}^XL8`J8>Vsm54Y%KOS zhTzp`J=Af{iCit_M@)MlS=CVT{&!kt{gW&&qT}sJrj!u=;>`9pywrZ zfSz8)06le-j<2Hr?|z?B?tJWC`wH~wOSw-^FngYPUM%OC)H*lM%g>9#M^doR!4+_; z{h-bo=(MnrVLG6lAb}f6Dp#;_gK3ktnnWrq`C2v6>dkgrVv4My&8&!xx2siXa8)sd z#|U{x>aJDxak^{VlPkt~=;^eCsNXYXt%uMDE#N*Vx@x_P^~H_{b)Pr2mnT=G3!dhj ziZtXw?PN8&=Bb^pP7Y7=AdG(xY8Pw5?ochd;%QE*gS++IgEXk2`AP#i*U)^pA>kK5 z!yCcExU)DP80THR$ID(Plr*PxeQsE0|M9Ta=yv~+SVI3%v^-6;+W(?{B8Z2IP>paa zM->}2^lMU9fAYk79PKpf@0B~6g*3dftAnE`3ag>=R+e97SokB zLK)D5<^suY(+&6y?oHQ$F@L3*u#E7dBZzwJ$8;1pjt&IM8|Yjh4gX13PW?%jX8)vz zEs@HkpY%+m{iJ8&;7@vbmwu9Y`AJW2(9e46ce)_iHp5IOX?{g>P$e2$Nz5<4CY&_b zO}lPBS%*$uF^TBY+0PJ7{S z`3Z`Ia!7m1g|w|B1zt6YbRP3IXE2qGdovf69c(zMpA6;+`6eMgh(orqLRN+QE|uP8*?7 zCbBmoLZcAKp%XQV1XI8;aLr_ma$z4nU89@e(OK{VhsJ7j0SsBJ(O&o`gWDmmTnZTs zU5R+X%yk;gM?9-HXtWGmgvrb%=!26q3WR)m3-SY=-J!*MM&U#3WRqU*u`wn+W6S58 zlH569FZv>`tdmTX+<0DAS??fIEVQbDp_c8Z?7rfE^IEUir1qZYuGm1p zVbQ<>{7cn>=Gilk+*fkbYU!qNj?c$$k4KVFBI?2)-@31t!7tIHD^rk4pw!b$m zLlTqTp*6tjcPI*Bv)`p$#`|;u82AC5gk{#pvK z;DBlKO?u3G7hueV!4Z&4dj{m<4p|5X*hPVyqkvfGArA*R#W0Xlni^~J;rM1Og&q9k zw_r?%tcG6P{}_9B@+R`FYIL|}5&a);AO4>9cCh}@$HxpeI_54W@sylLkj|2SoUxPl z%6{!S&Y|_nMVxsQA1^=ZdF2;zuzf-$d>!I>&tt!c%om{^ z)}7A&B9eVOT!&rH4!eF4t9s3S0Q)HaLm9t_!cPz3DJ*J;Y9GVnNwkmuRpX^ggPy)- zc4aL7KRf3AW33wv9HNQ+?i3Adt3ua(OlbN_X7(mF~z*Z7NR)@gZ4T=uNxu0OX7}O~t%t zIF@$6v}Y)6K9;`E)6fN)IQ$B&2d2JC3X1Mc z>~Yh!xTUi}t_Q8zit&T@Yj)P8z4&~Y9|9HB_Pl`4BRwe-SlNKK1LG}Z-(nKyn$Xs5 zSZKDSfxrqc8Uu{O{sM5g9qmoVg8Ok=3~9=fGzZx7H044W{|s$HEHRyE84&R-%?E;? zr?W6gdVw|o_7^BD*`)34nyKmHYcw6&5x$fOe_KzAgvZWa6b0n;4l`|ncKqMy3dBtx z(skhM$Fve2fnQP>B(#`akj8vR6M-w=zsb_1pXet1_Wwd5FewPo=oqwzF_Z^F-XEw@ z2IM7Jjz__41250PiK8{I`QSk;%TK~C3`2ZADq{zh+2Bbm(^J6=JS4{;E7q^jC;?o&N~0KX@LG+sz|&a1 zhl0m8X{T!jBf=%w8pT4f0knacH#CX>H{-_Os&E9F^r}rCVfu*cD}1zx*e?oCTsb4r zlS~u}c^$|Zj>mGJGqeHZjHG~^fpm})wof)iXdJ;A8>R}NF%>|Y_0($2K@g?pc z-@UTG*Y$dNbB^C*>>pK?*Oj-_Ubltu(?DXc|Un+vjfFgh^SM}VxK2eLj+=?9@>foiE*hGP^gO2d@5=f}mXZE`%oZe8cjrGdnafMDum#} zjI*dF1PnTdYJq$l+yibruh9YMcU;nFH8=&`>2&BXf??p=%NorBH|6lv---V@`U4($OAt1ybx=G!ar^FB%9;eVeXFYuc7~iDN1F8x5PKX*WKg zS-{Yb8;pTT;1_fpVbNdGB`9P5NymW$-*%bHQGa(x<7j65O#9gM7up6yShd|K)hvw0 zyJ0jAmwO}BlY=$SRbWA|wpaI>HWC*AiUS~LEpsa7Vc;Q~rpFUAO?#P(vtqiYN1rf5 zJ9v9gQqRunpVIbWt&(1e4ptHys?hif0?)6;)lx&3oQst`-~TQ<2fI5nf{6kWMufGx(_=8m-$~%zJ-^`kxsx_4aU&XE_{d z&R)(M{OSw>Y%kX1@YvQaVnciW9NU2=b`kL%D6EUv*AbEJ=}aMA#G%ff!CmwjRp*81 zGtj(}wYZveWDy3(T67dxU5AnuW5{eksY|dPY)ZR;sCG0n7K3v;3WLei$0!Z!u=uBG z6D$*-rLj=jyHg>K``BKgo51oOG#w5L-lDy*oYtFm0~;|XVC^oD)ki_rJCr;$OoopI zSsw{F%hF&v^J;0H7G()YYWj! zCt^(I@EstB?+4dGJ_d3D2VX$CkVk@Cs0fftH67&g%mcXyaV$fh0CHKjf?SwApab$@ zkn^7fa{jL{p}zrg{(~;c{Nh^BO&k1jJh$QV zxwf<$xcC@t1lD(;h|6e7&rw7+CSqMF0Z8vgHYl(6ARDByZ;%}r-IL@Ez|R%=I~I5!+^7mCFrwInSDitS^tIECT>m<1M$gJl+y zy@o3t41={3V7VV5yQXMeCLM$6PVW4o7X*D=eU6R**6h=o}0X2b3rmb*f1Ujqr+ zn01;jdk;#`d^x=Z>oE<0{`f}ROklzmEW*Kp6phw`al5d%hW*R~GWj(Jw04}Gj11%x z{yPuK0%RT3+Hrwap5oLNXJJ|oD=Vgk9D69HgdF=QG_%>@vI`o%S-uvHU=bu^J=;X9 zpFs<;@j1K>4*Q9bIBHy1TzQDBP43Sd^t1Y|%p0bgX@k2M%@2M5t@!|VlO->f?_TY~ z^50lN>-luyyj6K?5qvM&Gw=Bq2z8a;+uV3fB#3%WjVBaIV*+w`W?@6aeF9pKlg;2mf;prgC<~) z?JA}%NhUF;J{?WOhUbG6uoKKXb-$i0gy7@ zp))YW*{|7%ZcrbZ2FsAY(`bZ^{FGK8Z1bm-4CV6AXc7E&{fkD!a^u%jfUx9#6o9a` z{b?u=^&_o^GW;h>2G0CKXP`{M5HtafyN75CJ=5TtkDi>fH+vtJ;|MqjPJ=!NoB-}c z555A9qYpt3I~zCw9CHK5!4WAZf!_mjf#Db;{I9R zV&5GocWg*-)0VqoPip&6vp8IvB8Qnp#Dla6(xnCz25DLo3W1d0lmdZyUUW0qEC#iv zK!nA&BgYUkFQIorncR_*fK(rfLRjj{v=7pmm+45bSxbDC)m zBk<4Z^W-`p;Qbn>psIiJ!^IBxouB7x-ZC=3YfPYdCf_dU&m zNy;y@5QxIQUK9dPgy3Kem^BJd&A^$Vcv=Qdz=C2gXvIn~6Woh~Gvncx7ljoWczqUD zWQa8`1}itnv*ut02-zBo6(5+k3@c9P_hBJ%8uGz)SOJ3L6R@&lyA60+hj^2d@N|#U z*^b>^=r8Zm=6PPjhy1xFy=Eep;~WhPX00;qbmxFgNhZBI_kvuXM?kL7Ga%RKRgi1u zCU_H$LvUb*3y!{8FZm>pOCAMs$rplL(iI?=bR)dIOI^ZNUJ@PGFE7UlNAId0k+x5Kq)$Vju;QHQ36n8mOLG#)tGkj6tf zx+zTsvYOE-;A%@sLRfHXnguDo4J`z69;HjbjK}F3urf@;jOe z&*|UOLP#gk`vU1d(xu^MF%=s-*O)`mJAwznBZwmf`#TQEF?e{k4(0{e#W@Xq>{Rrg zh-~RJ^tzC+|&*yB-)>1>eG~Lpy4Vt?DbJdf)M1+i(At^L4#X{_Vfx;7Y%Z>r>A2pu4_b z;Ltjr<^krCn`hKp;wX>UXUH@1EwQ=f8)0vWh}Lg}yd_SweIxKKG54AIIGTB|i}%ep z@h;>gawH;;IXrX!Kf4$7yYA`4xM`E!;_A{V%(Fu3Qz7PMI~vhgOv9ocqEnFeG^ZJm z@VbGkkj_6sn_;=WEnS3^`4}ArPCre9U~=(Ux`h{eL3Jo7>)-Ly@ zxsYc1ozo5@aQNRT9FC(trIEm{PiYbY(>|jhgeCT)k-(PzbQrM>{vPKq5$O*U4y58S zO&l17$26x9dIHbP^S~@jy3@g#==LvyVR&pa1$O5#`Cb67z+;+=U*QvWUL}`6t_5tU=+!U? zlW;D12*@QL4|2(|bEKDiF32Ta269Q)fm~8-py(yt&3dGt33AC!gIu!9AeW#JG-*l0 zUJ;RAw6(jwebRn2uKfSkv+reoZ|W!Q>uYA3(?^83Q(hl&sxl4gD{NJ1cwaH5Mv7@- zAMIp4n$%ZJYDDRM#Dykws*kwR!hErhNNz)i`-t)FXh$D$=~Ng zM_hZABN~q9}&I2K_x6pV3CM(BE!is8L`4!S^%p{QmwKp3>eg z#cSo_XLwLw_jv!7aW?0!^QgGy_julhuV-0(L`l!rCbiX?9pQ!i0XN!X5l7uTk6Q3z z14^`r%xaWk(YDv61D4|LG}=Ts`_E2A+Uf6bX6K&XZ}OX;YyH;_KESZEGFjgi!>dqM zFOgrBvU`cdS`_iNi18%b+aj(p?e8TnG^gOV#lqHP?d~ zy0O=-ZtNA@oxR3%XRpcK*(YLju66{l)1d4o1MNwW)j1&Rg9l3eWRUgIAnUIV!of`NCdio?g0mBxn@J#NAPVFh zECe|VD9tQ0-jIRQAT{XRG<@y8CS3(~)~3n8QP0zLhfl|Iyk}_~#0AgOVBqZYGy*vC z0?h>0{)rNSu-9k?EK~kM^C0bfn^r*@-kX*}vi_A;1NQeR1z7eG%|%%J7qk&r^-o%8 z!}Fql(j-V(|DqH~@!!#EAmAs;0j?mk=fPY|5XZu35%viR5m7ky2bMtIjR|8S!t*gf zJj@A%<52A&1Z=}OiaiLJaT;fj!9h4taRNLI-T?FPbZt4fmY=S{kq_+b0r}v~Dez=A zpToF>|2g^sP>=qyrdQ57kZUCwaSat%xX zxzIB}F7xeU=p|94Aib$!FN9z198RQC>??0zMwcL=lnpM zfyF;~9!5MNXp&c0J4~Z+7=@0((O3A)8mHAh3_afDLDL~`vSB(7c?$N*qroHKDfr`f zEG>W^4&>5Vt4UjfY5PU!lffY9Gv;gY+Au_rifx=c1OzSCwrV30G87Z~DPS1576H-m z+EVQnK15(+X9_2=8pnLr;=}0mINrmFL?&TQ4@MG3nCs&kDPrsv&GQy`6LJt7 z0+Th*k>G`76bj6R%)bJfxfTA9Pj1yb)4>_rG+(a7u_>A_XXz@)30~L*JLDvIw>Di* zc*apA1Vt#0>#)Ots~T~ku7F&qGuO0sob|c+GV2bIBPuA6c7=sFu?;`f+w0$N4dvE( zwmuE1YN+KL_5OUKtoZ~6`(p20bDxSY9=Y?~<@N^Zljq0xJjZA_PVL99eNv?JcFAEk z&$uVW+!nX@;~VQENEvgH-+D)g%k!U`%iMi#xUJS?44<1yuRyb^n{z7Byy_yOI^pGj zS9zGhSgB80jCa?eN$dHWTA%i}(Pns3RvT^BgJj3s3>(w>He&iil;1|1VxjCd+N$PH zrnb4=)ZvHsaDIa5Hffj(tufKA+s6s?Ygo>_x}^6~xcEKW-(jBh$2c2(FXxwVBfUG+ zjc)jf8Sb>yPlQ*Y)qWzQD$Vp0W2(_SKYn9hj-NPNhYtCP#f{8~ej=hdrQmZbI`1b2 zwK1RY6F0pJ>+kUs!+qI#_*?7{_7*!JdOF8D9(?U>OLj=X?Px!+wjG^@$>nx56w<6mX(6OTkJ2mv?|;~ZSI(rir>T%; zJxS3pnf)|f2L^Yb@lfLXycZzdc!o{_CpytQq%gZPWx#LCv$PW?GoGU);KcLq9|2-| z&_H11o0NE*3`W~%9EOCC6et<;oN0hsp#MZylOKcRaE~Ir zjGrkEVY7ds1CT;6o!kcT5O@T(YXfnR7Ac2d26h2ljhR;t;vbGNzW|(p=i~*5EFVMC zdiW2*Df7|br13b;1a83`;0g*i7|+YEgG=z_W)bv@rr~%K7=;05`(!8hCymX-hFZ7=?I`W9x4p$~9;=j$1*ViHTw) z?87lJWc&FTwNuha#Ntb>ET2cC9D-40VvvcpqP|v#m}ms#giySe0)D|b+Oz}t2*K&6 z^@wjb7Pb3eHwP~dN`qX8@2rl5ylV!Y-+(h_;S?3@ug}J5Ew-O;qH&1Nj@MrtLwrLQ zo9uK6hCA_4_dGIkX0?e* zxGN1ik%(vHE)&f}LXltq3UnsjM3bP8+J_xS#IqE<2qtCXfe`d_?AT$1UD7e^TtXjk z3Ok|Tlq?e+MLZejP!UM?+68>e7u=tX<6#INcNxdOz!~5W*j>ss(Liu+9(FXLUkOHo zL4`Or0KX03GB6E1jdV{3nDz8G4K~w3fGH!)G#mP1crZQLcXX}X!>z=%U;3PpJCQ8NXDS5KMg8vJvz%#@GxN8>bZHW-O7 z=vone0Gt3hGuKQu$h!;8vAk3&80-7l zi)t2n;mwe5(cHI;*}{M6el`X}r!#~Ot2piFf2I8e!vwSi&7Q33IIu1G!qqfn2ZnLbqN^F-*vdL9Uq9 zAXmg@kSijUiSP`N3wi?Nf?foe!=73y~r7R=d*r3+? z<{bCSXg)c0D7Ur|@L$?DyRru5?`N0yeIz}ed+L+RdlvC;Kd*d2>omuW#$)|ffgJCP z@anYOPfYZrYwwG>t?;uJ2U~eA^b@-urcr);x?mRmZ%;$~#HQzHsvi$GC|gy{eWx}B zZ9@P1<35}GNb76oHx%HN$1A-=VdeFv#a`O5nlz@Bm{N~IT8T9cX`Pq2_z=za5*sYA z3~9oFkxf`J3kRWFiA7D=WRvO18G`gDgbUiL;((yw%{2mxCN;r zEpp(AVkcUGu$5hC0+d5vrrA&?y-X8sV9xUzjfOP*4H^n0zd_T04R6zESdM;&j>2!( zJ2U|aP5mF523-6boq=-U2Xq-&^(hU7^6Y0c8kqYzB|~ZZn$p(@ip^*90 zn4zu*3ot`H0PeyQwKVYTbnJS;dgDyQir5b0iP~kzD`Q{>c`tU)ra@kYnd@xWr(*YP z0?NG%yJttCPsQ%p84kyc_5jP6(XN2qGTuF78M|j^SYC}>u|Jx`WtOpP764A&hzFgh zfiP^uFX9SGM!e7u+o5^z<++Ysv;xRC_M#lfPdsM2>%rOkkq)@yFyaF@+7Ul$#ev7O z(_nAKt{XOf#A@&W^z*U%HUSL9?%PrDB%YBi0)qqa-9m6EzVkR6Tn|nI69O?|hJW;M zd}k5yUBGT!8ua1VjpKaontFyFL5_i3!1Ev%C=cWeTd_NL6dVR}f#N_e0Posy z!dpSk@E(@oe|@9Oz~DrgA!DxtXMIoMw`_zsQfX2fyfEo$npi;`e#Xz3P3ZS+vXd~5d;$}T$89MpUP+u?p>N8fq!yZm)XQRq)uO&T)ta8Hb zOjZ0sWlirTcfy=P_l?W4;)Ncl`Gi3*0HJub5Vaj`bBMt5W!TBC;Co z>MIs`P*h)Wy$+r2D~2{OFT+F$>$SdOd<%-~D{@+yGy00`HiaH*a18Z1+TIt>^t?Cq z6+zDvG#g&y=vKVJQEYsJqu|+$F@yRq>WTGs7HP_)WfjdME77D%Vwux#;4CF=HjSv; z;rK-i=$>x`U&8o`r})79X0#j1oED=^2f>T&XbZGa-edy~deiO;c*A@LIt~PMq*Vak z5pj^E7ik+1)rI!6Wf#f8a4zJ{n6r%q=VC541Du1o z*d*{2n2Y@E!<@_t#$pa;0|PPV3Pg6ttiwB=IKP-0CxGX-@Jo;|r@?`w`3R5N#;*{< z90gAJJPCOy_6j$F zyqAb)e_|5$0#AZ_@gQ>z^g(!#IUMmPfrG#;ark;N^r>+sZ#(Q}FE>@^bk{65>D9P4 z-lSLC7LcoL7s%Cg2;^!y1#&fIgIqn=L9T|u7`C{YMuJ=&5g-?TI>^TSO4zWle`t0?dN5&GUlIcHp4$q&yxX5vQ`-NkJ6 zz9T#>I>I{-r%j&6+Xy`P!^1Q;&vR|Wl1BMOo)&R^+|$0KLo4I_(_Iq<P>D)(8`zw@5?nWRmi-0Fe2ug}%0y%sC1!WIYd&Z_f|(M2q~u63YWSl!__ z*QNj?pQmtGtbT!_A!T=^t1#Krje;O8dW~>WL1ex`hauVjOmV=lUbFx>_E!oAM)swV zz*IlV1FnBS7lG)HsSvRQ|AW#Yjq;}*0Co;G1LFtKtpfCzIC`@dBCnlJfzwCg04UNL zKTbQY4ZNLvaAB{&Yl^iBt>ff}dkO{Pk}Mg8VH(F6dCK^10w+L9UphW06NRf6<|^5(QQkXPe@q3j8k} zi(DPkT-nFZ`K{iuf!!-S%F;ZCRS`Jdgol?V&w*8NvUfW^V{j^e?OboH8y##S;vb+P zO+{*TQ+g9|gs&x!Y_1J=sF&Y!-1;5X%7270Mjlk%)Q1nMUhcyORY&&agR1!IAr7jZ z>B9$A=l9`*s>}L_sHU{0k671&ZsCiGZ8uR+U%dC%6xl}vcQQrw5sO}B=S{D%!`0W= zftv>ZW}R%)WuB{;=cY|_ORD8@b~{!Z_2HA%;Q3?te5w&$+JQNKbJ~lathm;SG9X=V zLqm3=n|+ifLOS>aIe;Ng(MlksBV7Ylbf&Gq0w219SeA9AVUXf`&}j~PljZ=KZ_<$+ zChbCB+5jp4ecB4krGB&vnEf}Ji4+39r0ED7>`w!M1pLYkNA?vhh7{4CrbEj8iN*k{ zgK*RVIoyP?Dgz9_e%%%D`Y;@gK=>(aZy$raEgauE0yCy)G!XjZSPU%&r{ajk954dM zK(8ZwAcn5V$iY4=iY7w84MW!ka1Vy8T}V9?M=Zu6{3bT0$3RZQMsxaALM#C26DZe2f3Q^K&~}wxJl37Fp$eP4&;1K0l6G8AeVD7 z$mI^%#@Qc^mwIeMtaru*{I=P<+we-W1vP2iAN#k>euMSNZ#5?9>%05^-LHZBHE_QM z?$^Nm8n|Br_iNyO4cxDR`!#UC2L2mrz?O+$3&Wp(wxka~#=@URT}eBiIL2RWCrRt; zlD_{VY57JnJ4iBNf@J7CNxyZH-uSkb{&!L`^_pZtC3jsN+DOvRThhCWr06A?`iW%X zr;?#6-+n6J-YVZh%MyJUO6QuGO>=q-Mvh~`-ZCaIi`4cg(}7C+wi+r`v0SN{Pk7*w$G(LROK^P$+0TG z34@FKc`H93WoK1>zDmyiUfO&ASlq9h^0R*?_5R8)TgkEgrQT;qaX&xhm#Xail%IG5 zkNoiG|DCi?z-hapfBlu8zw+}{epV%W_fzRP`GqP!4`t`0{47e&{zj!2SUkNLEsQU~JD2mZ1H zC7t8zt?c{;%W&I}lI@Iov-XzZKK&)_?-#d^Q1QhMkowr4OSUuWH})MF?*F~A|4P!x zXM&2)_JhSd0R46(PNvGo%_rEM*OP$ zeu}MvNpGk8XDdI8viCRqUz7Q>|5f@Wd?#s?yFv}wPCblt6vf-p&#$kv_i^&G zC_ka>eH0U3RrS$FrPr@`dX~wuT^jXhdw+@}&G^fnCs|s(+Ex4sDqXvx@8>d~-hWZ~?Nz+|`N}U=*{3Rc z_my_pO3rrjOMRe)QM6*rE=4aYNPFk;!lLxf*VVtSY==Hccw~n^>sHBPi9rZfcCnje zcPTQP%ckZX+Rs5j|Dn3Q; z6e-6VvdX7Xj_k)vu1Ak2O4fTTy>mVKDZO(&#@CbeC}L{pNm}Mg8tEHMRd%7XWq9hm zlI^mToo%-aPiR+DM$uf^M@stIBtyTJ^=$Q343qSYmEq3yDC*1nW=~P^O_j7Rkn~Wp z-wYM53SjA3a((%fqW4q!;D)MxjEX3QKE5T#UyAu%YTLi*Z+Y|IOkb7PxqS#vS$^NA zYUz0neMZu`0QHq!LVFo*f4XElZ)NB4tPJ;4Oz0uoNkVg}PiQIW(Y2nQl%<;t549>g z<>%Z!EDy^3`n8b${)#@Gq-<4k_M=iZ%3=4H_2+!OEe&LR-uyOq{P_e+T7OdUS(O|h zY2?@Adl~MtN;098{Q3+pIo$}At|h9Dp00O{Wa@O4&NL}|OjlI(x@wNP2%OY1D)0CuM)d((1vc>e>4{X&_nan z_iNZtkIHYmtQT+mTnGPor>gL6lG$Dl>iW=kB)yMGy>q?$tN6WBl)pi{if4!N+byX_ zivNO@U#d#aSNYkM9J^ilJNem^pTDy6QGQk>`zzUhPx16D&y;+feM-^$DZTS`wko~z z_4H_}?w7|K=|#+bQqm}|K`&)zc}#};v@hAtN7;Ftllx%4iIS-;<@NJ!t*YRP(w(mU6qkJ3BWqpcLZ z=po~Gw(s3c*1N}8nNR!p;`uf5?XT=?<7BvZc*%CbMtqB8cxsYl=o?D@MbgJAX$er_ ziejLYJu(yxy~>D9#UHBdViiLtNPFKv72g<1BmeQr&#LSb6tk11U8<5jocvOipT7#X zDf%ftkHIRv(Z$oVe<|zRs1G9_mVcIf9jvA3eOjpdVvCAjr5oy@zba6q{_v0C!#$kpv$u-hdw>l0RTLAXZ2L)Zkfc!#f8}RU_CAXCq0-Jz$rdL+ z>zI=3rFTnNPNB-_s~Edd#%GO^Ol{Ug&w@`Ci_U~jlJZ|y@zn9^Dim5gg-;XN307=8o zTlra(y|1E}D(zx_Q2tJS-ISk)viDI84OZzWIon!1z0fTsU+36T^r=eke4VqE-g)28 zl3eojx0a%}m!gmNlGn@oP&2)%e2+*P^=2?t+1WB=cL%VoRqnQg*?X*4@hCarqzw0~E%o?`Q$0%O@v^?kpT`;* z?xW~;LdvP@BrS=OM!vj_aw&UngQuij=vw8c%4zs@Q-0YhpBCk3-6QRNm26Y#Y09z2 zck+WFH+GUAb&@+f$vvFpcb(*qoaC>ZWUG?pE}syc`pf*t-CKONnskNSy%l0!Rk>en zk!GSpMcEGI-7G`}rI)i+A$(N0F}wRj6;#erg@~yn{pIWj>qq79rkEv@7rD0_|72Bt zwT($CJmv-YLAD+t)Ur*;-D!NgQV?!#aC$z1>?0A}?u>rt5>m8|ep1(&-OLdaFN z5cQOOw@;;|q3^#=1{n2_I$IXlcxqy#U+9qKfwgXWOUd=uN`HrvjdEoxdE81FUZ7+u zzKtjYe}n&H)p%&^@7(SW)c$Wmrfk=xC1q33hf{~inli?dyX;l};jF(suBZUxIg+!T zAsf#N4cVAC8nUsUmm1zwR~hs9?8$0h_EWV_uJ%(b17*JLMn9zH^+tUDZ%cdQ`KO^b z*42h=Ja@dk?o$EAa}>*9HP2AbRSZ92j9W^PVB|yXdl`1dex{!~Ph#i|8vUZt&l>X` zW53X_bEdJ6o)9MU=RZ)^o3ov(a(-)hUpd>iRQ=0Sid_|ZDSoWjPccX_TrpZPPI0qh zy5cFtJVo~adB4_GY^nH^VpqjpiXSWXQw&lJSBzGSQ{1eWu6Rl@Ptkp#N?)<1;!}!U z6?-Xutk_R6NHJV7S}{&>vtqj9DaAZR_dzOs#g>XsDRx!trTDR8KgA%$aK&iFIK|D1 z>58Wm^Az0&tMnCHDn6yyRk4@i$BO+FgA~IRqZQ*6H!G$qo>I(HbPrVNE4Ea8O0lbA zFU5})`zZz~hAT!Z#wl)AOjkUmn5XC-q|#SxsrZy)SH)h6A1n4#3{nhNj8=?O+^m?c zcuFx((S3+YU$Ld)Q;J;`dntab*iSJ?Fw!Oy&FoIKR?FKjime& zK6WZ1_rd4Ki^!evxjoBHf7Ul1)-`HcBz#o3zv|CzDt;r}=9JF_<=;X1`6+&1ihrz= zzr|Ok@9aOQ6#oP#e`_iJ<4W;Qb@I2D;yY%JsYAqQ6aDKV!XNq+j5ozn}6q)(eKe zctV!Txc@~dfA)u@opbsRxaeJ*wNWYqk{&tmrwJf`R{pz^rZ+Tee-%XXzNWY;QS-}(Ce%|(BU>VM>^ zk30R>r!M;Ysq1g-hZyDechz6rzs~LVI~VrO0bM!=A@mzUKe#dffW5%+GU(i~s!e|B;T$r;(mg3wxL1`vk`KDvf=8 zA7$v(LB{LspY80g(jTDwY>F|Z_=}f{r|+k}59qvp_j;z}^gW#YRep?i;-k`ERf@kw z`6s+3_YG{y-%a@${@Y9O_g4PvmA}uMCGS5N`xi$2-Ois={%xiB8~Yc|{=QEBLgmA` z{*3(#XMaB@f4@@vjr|K}e}5}elXI%?cnsm z=OyXyeEp8Onto^L@4S9LlX} z6n|s?pge!;3o?D@{MUCe|31%4f9Ls&F@BeqzFoC{=k?n|E~YP3`p)COvHwtB`a<3R z&ikLn{Hf9LDp(MA6RRetC6 zUvSaiN8LZp{@q;lFQxxB_Akr5elN=Q>zw`@uBKng{nN`uf2&I0x&HgQ>R(F#^`VRY z38nNO#{OS<^_QUTKj-z6v3@Jh-%s`5&h7s*SM#sx&w2d(%0+*n%I{qN#{P48`A<;y zk8}I^-o^BNN@;&q7yT1d`*-etjQy|j^6#ViU+4YbAugtGRoCBn{cY^um7D$>^8SlP zzsGs-DMgNQ3OD3`cl}~l^H1mc`{u7QUq*gZw=5ETy^N3lkF+m=ucEsCzX<^X2s{J@ z#AQ%KT!65MxQ`+#E|9vSt@0uShyqDNaH~H%TD7>;My;*2F3-AKmsG2Dsdcn!)h?!W zskYWWt!-(mHg<7u`#<-d?|Jf0W;p5J_t7`J@7(XX=bm%!erFP{(cwJ)*70*u4!>LR zi{R`0FUjGT6~B~||E)Rv9>p(%ugC8)e~|La{K0*uy8Q7G+4B#XKWKhI@k=>;nLlWL zr1UAj{m0f<>KQ#H!U$_5{_fh|1j{SeqM}FxD z)?e>G%leb0e{o@U`Jd}!{&6GoE9XUFVOf8*@-HKQ-GBYMkNKy_U)x{yPptflu%BN4 zk@1_wFXhCa?0;JP2=S-eU)g`O_~rR*e?9+vy|4B=jQP93Z$(^X{AJ~zfUo;+nLk?m z2=zz%U-oY-emST9tm|X>6U2|6|H=NBmA^yzwf-`Gv-rgv|I7Y~#dpwOuYdIPvHYc+ z`uAQR`3d50s)`>Ozgha14(IsO_LK3O#gCw;=KrUU`j>Ose>wkS<`4Vn{%62vxm+e6 z_6ujt$c9q?Wd3dO)6ZnvPtKoM{3xgX4DF--Mfk4{{*Q5$^%paL@a6de;mi7o#V@1% z*5iLUe`N6^wBLIEDCe&%evJCB`wv?YPu8z2zJvaH{Y>`nEdHmRe-(4a?=t@=HgWxu`A2em_535v;eTm5^EAIx@uQ=% z=MOUf(EO6(CprA-}r7Zr_Ax-g;}e-*C;H$j=VwfQ3-fjP<@}4qcR74He`WDg*iWB7lk-y+ zU+&+9a+h)aQXlP~!vEU-a(+hSLu~KMN!j+7_S@pe@V`7w9U})h|7h`(C7i!*zhwW+ z;>S6BS^u{93H+z?m;ED)A2+l9y8V~^AB&&Bewr`)j~3q{|6c9)ZmgHdNBrpZkMsK) zf2cp&esX@w%0JDi|8joZ;ybjTI{xMSi^)g+T7NmeXz>%2zYcLP@h|7MEq)69b^daG z$>PVTznU-OCzGGkf5`bSi|>|a*Izk5W%1*j{!i9VEPjIgwf*J%uF21dKRJJA@zb34 zNA_P#KI*R?|91AZ{-OMO{Y9QXF!M+IBjYQHU)le%_zvaQ_Luz+iy!C2k34^3@e|lj z&p%}U(BgmE@oPC}{8t&jp3n88V&9hP@oRi^_V}|hepUQN#V_UXE8|ziFDkxUn4N!R z{HpkEiXV4!`E>tLX+Ob7^VsW>UdntOKeGN(H2CiFY`(0YSp4`h=IimJtUp-%6y?|a zO8a5xpInlyf2I94_zwE(`Y-F}rv6{c&R^D_Eq)CD>HbI7|1EwB{p%1J(*DT)iN%kh zzs_IwFD!nN1HZzX$d!-F!-{K??Eocb&K zcb5L?#cY3F|10A^BmV^Pr}?t}YUZEQe#`!`#ZTZrJ^z#QmlnTxA(y`dzo)p${-4E9 za{52n|FQTf>c3t;|5G39f0EPw$o`v^f0EOG$@;&=j}bq0uv?64W&UEsj{{%Nzbo@k zgP(qt{inyTmHDH=cj&+L{Gl@cHuw?ZM~@%g?yLWxzmDI^`kRrz17Fv_%KXLPCpq=M zGJi1mG4j{`tE~SS{7*Z7DCUg6W&RM|#qlBSd$Oi_{*dPIW&WV~or)hFlRf^I`Ge+{ z6hFz~%ltv}yA(gVm&>R7KZ$?gqgwX5l)lgWI^2nImGP&=FF%mYm-fTrNB1*dj~`_I zV)08Tzvj#Q$>JC9%hq4UpBBFi`|0{G?XSfz<=9Wg-xl9>vHrUKlJg@LKZ5?c{gD3G z;+NpRIu$=M|FifJ%CGa6`HRIb=J;RQZ;M|BU(Y{e{$lZC*k8{-WdG0Nml1!u{4#&F z_|e_$e?5PY^Dh>^bPx0O`4?F~wD{43%-8;x`LD$<-pPDje`Wk*@yp=r`Gd?qEPe@m z-F`~{ZSf=cPsfj}UzmL4ugCu~f3^5!)IS|RGX6CAh+oZ@`Jcrv!vC5t>qi#9jQG># zm;GCdUqbxvg!ZS1t2}>W@^jjMnZH^5@^?9Y_4r5TuNJ?E^6T-R%s);39h`p&r(n2`(Mw0Wc+RM%i!zrgUmlHei8ML|JunX>sRvpj>$* z%zre$Q}IhVe3}1feo67&XS4H{`H$vzDSrAAmrwWKQvZeDtKtBNf5ZGb`HicbzqI(t zZ?pMd`#s|pKSlm}{Bu#ys-oPW3YagP6P@2mc>pUz*NKQZ%vCENbr>udht>->MvM}F~n&R>sT z<@~axzsrd~nZH{61bm&p%pWX%`IoG}o_|UEZ}CgdF<(FbA@e7TUq<}t_?PFGEq?Ju z&VM{2yNIjIKP`Te6Mypjjm0m)e!Bma@sGt%p}(F#%J|vj=hVN)`xw8aXW4#w|4PQc zR{jqCqi#Rt`7eu~fUoDDGJdo8G5EUwlII^RewtH%Wd3FHyV-tv{w2>3Tl^%a{rPEM z{fG8P$Df?vw(>8d{_6Hm&fi;n_bQiP^Plgd{_a=V_LKD+EB`pB|CRkSiyxu>Y5U3g zy~Qu*wEuGc-Qqj=U)P^k`e?rx^;gdy<@ql&e~dr0|K#~8lb_T6ywOMfi)jD#_+QSS zTKUHqzv}s$tlwDt1bjVzll4=JUqt=U{im#7So{d_Q-^*l##Nr5G5N?}_n$KVviJ_= z*YPjw&lW#H{nhdF_zbSs7T=-$ETP;fuJZmKiy!CYFZ%};zmyYy^8NygpCW%9zq0;g z@jvbSwU{&illg1(0sBGbukj_-^Vc+oFY{N;Z&du~bJ_DxnZIg&QSp-;zRX`WzfJT1 z!{yWczqH@NM_Bc~)NC6tUH>ZkSFm4iA|r9XY`)YVEB^%f>-JyzKa21F%jMVn%KDF? ze+s@Xe`WvE;K$Hkw?8s|HTBQ2pVS|VpE$O^ZhvI_Z}DAiw*M;Yr$+e`@OA#u{#f~^ zh+pl$%KDd)e+>WW`Cn!I(%_fg<@nL#e>uNr>0kVa`TF^BIX`6aBk=Y7N8X=f@#7r- zSN5-s@+Ud*C*ubz|LFbf`X}=ji(dj?&wr%)`nx?iVX2Cs~3W&hCNmp)|s>Hb6BKPB=Zw$~*>{FQJg z#kI13ZRDTk^uLw!8wNjy{(Al;&#zeem%-Qmm-nw({4}TitL&c}`lmVlpN!wF{1e2V zjvsk`%i^bK|Mm0#^8QeZ@6dnh`JX($X7XWw{rqTU|Ix5tjQsWd@lTllSox=DKlJ>o zvVU*opVR)x`A;){_)o{5Jil%66XdVwKb7+@hW?*+{!`AGKgj&2wB@#;MX1bwuBo2? z#K&dNUu6EH`HhNS%HhlWNArt{?~c#TU*{dNB>?@zG!4)Lq!Z?gYq@l)up+i#gaTKsZO{K@>s;uj~d z{dE5=<2Q>RLw`O0kny+0Pq*g$_54r9e-^)#Q~zcE&g7%~`u!;~{;~MQZCU^Rs{NGp zGn1dwf5`aT;wM{i{w3s};wsO-S^Nn0)AdKrzgYYP^+(Tt`{HOUc|Fifd@OAqq>)#eX zLiu(3C+}}D`N&_-e`NjE;+JyjpR8Y)e9RB-K-`saJ>zsV|l)&|324K z&|CC{+g$n`HNR`*^2|blBChrOGOqdEieJjf|G*r6S@V(qLC813^@<#RkK&he@?V$3 zcehuUKl%)pQ|CW>PImbVieJo`pUV7N>?iZCTML2M^IKC7(HpuWJZy)N;g%-8*c)KAI37ZE_RAe%4qON*Z(f8D=J|6uXs zgE@cAudHtw<#&f<>tETQH24Yh*YjiPA58so>?h-6lRrOOe_3C#_$m1P;m;UXSzoaD zF30|r^-aru;OqHmWq;h@#|Lov_54)Mhgtfk2WHz(#)lR^LHua@N&jl`(;WZH_}bz} zb6I~K|8l<3;+MeJ(2)=HA@MBq#o5 zeZ}I(^EiL)e;Hp|e24y5^JRU*EWU&Pbo|Twz~ZMEXXyFimoUDz_;UY#=v~HD&ezF(z`ZZ%KeYX1eqi!*+7DUZ zv-mOEKV5(1{d5*T&1wH+eZk_#7{BQGMP+};sQ(H0dVW~h-!b@Qv|qaauAFZ$_zwB& z`C(;$-QXuV?T4&yS@v@|d|BVL_zCK-ZolPxr^(NWA9=s5#ZRHXo*&Bkmc{?n`*S(t zZ<#-onzs#gR^|`o2dd{UaWQ-RFY^b@?@|0x4qxUEn(rR0&fhg<=P&aI%`Yf^@kB14 z_P^9W;rDL17%8t{zMelw{kQm0Yc^lnFNzh{nLH@e^lKF>~e^IsX5`UHbF(dzCPW;IFgOz^- z{q^`u<}W5cr~b(J*W#BEe>#3-{%i3|h#$?D^&g90&T0Q-{%7$^%Q${?{xW{C_(jBD z3GGgbtIR(vekrH_k@X*o@1Vb)f5`mJ;+JyjzpOu){G9e*=D!xdi1^X%f8~6DRey5y zm-(laf4PPIuiH-}zX<#3`HQUoSo}{r ze<|jS|7HFXC0svb{!)6fdj62+@MZp@`CW=1osd2Mkok+|r;4BC@MZp@`Q3^iujTUT z_D9Cg!bh{->ymtl`Fj0K>c7QLk-r}Q$oR|R$LDeWnlJU&;=3@7C(Xhy8V&< z*W$+)WalsapT$pd_%eU6`0@GK`Ah$2@l){oBQj!KW&ULG<^GQ(vQW_~&nKb&$?aa( z9RExEA@Y0i03_h+`HS>_CLi&m%P;d^i=XEBPsX1X-=Y5L_?Po#7Qg&O_J7{|!Q#ir zU$_62=aUToMQ3sT`tO6u`zfsa(;WZF_`&3#&H3y3gPbok`R6iUmtXpSi=X7wA6fsg z`0+WMe}BYtjB926(6GO|Fq<#y2WI{`?We3CSbPWjm!NlwtDG-1`LLgke;NN;{1W0% z&mSu9*R<@1_Fvmy=C4-%E~oy>`kTd1QU7%RU3ost&_B&-zhwQ!%0EW@>+z?IA1!_Y zz8?R}{Mq6=@b&mz=1&$s$*Di`_u(wQ%i&k{e+>H<5r6vmsLK0I4StIJ_4v8+e51kt zwBzS;&iGTt&*?SXzR38w{EO=GbG$fv{43*U&F@kCQVw6n&zkRkS)IRIlAXVdpEbXr z`8RO+bo(#;zwptl^|~ZCF<;kTss9!~y(OD3>qi#f-OPO5e#-dK;wLD-=1co$@zd)# zfBk-Dc|O?U$2Vq|U*-=MKZX7E_+7@|7C(mmdi*Hs&lbOQE$gq}uO{<?XS%Lt^6JI?+ z{R@kqB7SxGW&UOH-K}hY-G0dVCySq6&3s*dW&PaZ$A}L-e~|IN#dqNA_EYvhOg`dQ zKVK&6Ulu<>`={+E<6n#K;6EKdGXJ;u3GA=uAF_UK@l)up<44w?Eq?NC_P^%K{Mq8i zh(FDj^H&x>$!Wi3{BH5({twV@lyQ~)W4RBw*CoA*%U{Bs6xUl&E{h-M^q(?+xA+m- zPd)#T{R@j9qyN(Om-QEupVNQH`2maX5I?&9$oSpl=jboMi=UwV(&Go&|Frlq^4I;RtY29CG^hT``lrQ@bNI4;Ve!+P{$KVFExtqf_54NF z-z@&8oxc=w#{V*ZiSOk2lKD&N)#~{}n!}g*i{^JJe$<>j|B(5M=BJ9E+nJ@uS5r!hU-EDeFHbKPP@= z{$%pue;t4Fe51uLqyF~d&+ME39NK3+ewO)*m49?kcKplu#p0LXKV5%h{A%)Z+Fw~e zwD`pw{iXl5_@$isC+nvczl{8~|784O@^ktRnSWdS2=!OnU!E^F`LMqp|H=CaOg`dY zzh6qu&zXG0zaBrz_{ZX>=s!lN{zK-E7Qd7eKQjNb_)$*#BkSiDU+&)v{mZz@{9o<^ z?sX}{|9bo{@29l*pLYCM&KZBn_%V8s+ZP!>me*B}ALFIj<3AZcYJQL6mvZz zdB2dwPf>o&m-RP`FZZv9J<7Ps{9o<^?sZAZ+2xn_V_1BL^6U0T-mhZulizUuQ=wTA zR~i3W{Pg$P`pe(PvG_6Kr$6$Iah3OjnfzBce?9(_`HRI*VLu(eKRK4iFBZT2E6!i9 zUrPUP@nhIemw#O!^G|Z(SN4yr{L`0Ne_j7&{9*B9lwZFePR5TWKPP{gKUjSCTh?EX zpJo1J@^j)(-tT7d(_eG``uv>CUoC!|)Befdm$3N77dU_2e#!X7;-{!T+W#_tvG_6C z4_*Ic{mtT+p}*dLsQi6d^uK5pdRdVS@t?(y z(f;fBmG}Eu{0QaO^Cua9S^N^}@5V}hnSWb+m(za9`z0-Y@)F0d_P>ljEq;pn-(Tr3 z``;EnLH*Nw`TJxRKZgJH_*Ldl7C+7D|Ki;wL%dH+jF8#g7m_djCb%KP`Te)Bel*)l7cQ_($IFXz^pjuO5HM`4Nks zzRLd7@hAK57C%A%tJ^9N<@|xkNBrsbL)M=yev0_f_LucH zi=V*$di_%NZ!Nw9U$;N9|7h~zKfV4e>pvDh$!S03{EWqSIsKRHA6xtw<=5i}*}u2= zpLYIV%$YyS{J%JCVrbDS^Z)m&=ig}#U*`Xs-=p}^^6dG$%>OmteNdf$lEatzzvdSd zKN`#B)8i-k`-H+rv)=1c8pnKHe_r02af@HxBAfqOANkSd%-7@JKlhPeLisiSoj&s2 z#+<)?KdHPQ*(!f=v+VMJ(8v7Cu)m)F*KEUbt^AWsIh}65Wc|kCr=ys!`wyxA7Qd9^ z|3Q7#f0Jzc$@t02KOV_^?LXN+viK3~uiNhtebm2%_|^4iY#;d%?5Fb|-$#BiC;sK{ zLt5o8gRkc=+xId5csSc%pTCp+Q!D=z^-ssI?0;JPIEOFm7ZyJnogII&erfT`sQ=o3 z)B0#XmlMCTe`4jIqWuf{?+2k{hMXpfUE4^%l%#*j{wkLw|}yJ zXz^p{KLGM#T;=^07C*_czs#R4ehR)`Ka}ya$w&LA*B@m4%Hk)ef4ct2{*%Rz5Wl+q z@7l-sO$!`9di_Gq|5^E$(SGXsvrixMkG9ONzw`RYcRBr!tlwDrm(YG_{bl{Z;uoR6 z=6|M-`p2k0di_V{zgGS+`hOiivi@xGOF848WBRDSL;RFbZ&O_5{GOG6F=zbP)Ytqm zKhWzB^8BNjf6n-!rH}a+bH-1f?<3#Af4cpX{Xa|p@))i^dj9@3{C#1IAEW;0{cqX- zviK?5Z#{mKzfW)S(f;ZFL)O16ev&hOkn>X(zX<#3`Xhgz+2WUT+7J2re|Ue~gwSI7_kYVXMAC%$ zYdL3rFY6QW6s~`=K2aRR>j@$wZpmI>ko5`8Z&UnI4qw(MG(S;%cVc$_vOb~tor+%s zU&vcF{U%Q^K&#>W;vg8lUTLY}WP z`N&_#pNvl}ehKlb>%Yv;EPfgK>-=SZ$Kn?e|C%rBTP8oJ{geHDiy!TrU4LYMz~UEk z;$NN*viK$BukA1UBNo4i_Di2{+7kVy#V_T=udHuc{BlnFA^TGn-=X~-AN6KZ?aKA%67uSoZfV{-+%u zmvY9BGX9PB;`T}6BNRQ8uG{(jl|SI$Qo{5Xd%&*xa>chFz=pOy1P zR{r~Em%nm8(%`3iGhgRlIp1LLlN^5Ke6hh#_s!N{_D3xHB{B2$_*34GVew0dKiz&v z|7-DM z(e<}-KFZ*`Ioa_m{ja5e0{iLsubi(m@=tQwKUp8L@{e=$m-(~BPjc#ytnXR;6!~lW z%kwQJKd1ezoR2o_C-U|FnygRD{k-j~~PO zr2747$9K^R9-ry<@s{H`Ux)l8KI1K`$9F~W*Wv|KDX#4&W%C;qKT5L4hcdpD{AGMu zRQx1|FXKziZ&Uno4qwKXnx80sd{(ypGQQOOPQ_1}*-iss;22j~9~8dWzt?p+^L71@ z{b!3GpP0>Ghv#1`epF<>p5I9OVDa5j=IimbtnXO-6#QQ0NBy$+G3=+?Cpo`h@m-Gn z%cz7e*(UqU- z%0EH<)BT66?^^sa`WL;wA>$*9ALqocJb!BO-D0+%Uf+}b1(Uyo`MUnd{({9%5kHzQ z&u?0Mhxpa)pRA8s{22b%<(Kshi|-J>CB$8dtGvI?;>WPRj$hdyviK?5Z{7aL`B{r! zLi?fDS7m+I;-@+7hwKlS{G9e%o`1FY3GAo+FZ(kV-=Y21<5OARxA-ymdVV46%N9RH z{n71@tnXUz3JNQqJA7%Vz@uPED|GfF9#V>-d&)>@Y!Qwl_kG7x8pDljzLe^iO|CjNj z#dqg3U+XXHqb48uk4MW=#8vt~i=VFL{0D#+<0|_r7T;aKeBJ+7&bOfbhuB`1I46E& z{%Ykf_wR({vRq|<-^w55*Yg)ypA`AM6bF!?{JQ`xo{yPWzf?;o)8kCDF~|5whZ z82QH+vHx`Z%ld_te}eu;^X2@q#ZPnEe_4OA_zCP+f-`(Pdk5#zLY)xk@=Imj@xIM zKSi^v=PyYPU*=Dm-=O&A9KOt-G(T4S_?&G0W&Wi3jf(GX=JM(GSNc!kqgwR3B)2kO z*Z<1;HSE`m2q67VHedX2xRoVYB@=tGI{dN77^FdbrNsj-d{j&HD`s@5F>tBZc3H-0)Po8fu z^N0RAe`)_Lew-6O(tlX|6ns5@llhCqFT*~1{wB{iSo{?6tIJHp09 zug|W(^8BO4j}gE6{W)^J&*D4q2cTxfxPE}=b4)($r~Ox1KQ-)^p!|CNBY(fv%0ET? zYyZjk%i(`e3;6FY7k@Ia<{^_^5{3ZB3#kI2k zXXGE}^gosTJA+?D{(AmV*}paTaZdYLIsahsbJ}m2e_Hl)h#%el%JTshKe>tRugCu~ zf42B3_@R z7XQ;6y9e^~q!`fLBo_{rkO&|l{->mL^1<@itf4~w6Gug5_>$KprupKkwT{9y6pocbsGXBIztgv+n@FJ%43;+Gy~zOKKr ze_-*;;158{65}f8qb+^{{q_Edj2|t2niD@Vf4BHW=mvVLjt<-R^YC+DZ+e(#2t zk@QivzqX&uUoF1NslPIQw)k<%`Rn+X^$UxiAbvGp*8eSjoYQ{G{M+J}e!%*dVBi#2 z+5fTlNlyPI>qizpM*e#IE$5dkz5`$H|H%HQ#ZPkjA348Z@l(`4-G9jXoyCt4zk2*F z^Dm2^fUn2jvVLIk%V>Y~-yfIrKNi23)Bno)mBn|^UyuJ~{m$YiIrUH0Z!Esc;mi7w z#ZST4{gO<|)>TWICNFJmU)CHgTf8`$Hhs$UDGigS?Ycew>3nC-12P#FpjPAZHoYybzpDv1 zw>X|&eH3=jr{eCGXHYQLe0j(P6q8#W(XzzV+)*=h54u^u1AT40X2x`P?WX=_y$3fr zjQBNo)(qc;e*Bj{Q9I2QYVKmdjbBJDbIsila1x~%+31mbYKEUmKit+A_|#t=eG?h5 z{z3vaUQP0^A9bbweU<;cX4Xts+Way9`*!+WcPLf$+KC%|gZ^D#cN3{vyIuGZ*A1ga zw03*`vFXuNy4oGX4_6p=5S6%gGXHWe9yZDIrs8*v+h(axJ!8oCNhm&rQTI`rKh*r1 zdiMT%6MbDx{d=VAe)RtWuk$;kQ9rM->%W9ldb_6nY)ZPAZnem5H{dU1roYt;xRku{ zE~P(`}G9i@De?jB2bivF&*iW@MEvMkV_Y@KD+rgl}3~zY|INF5UWpx;>HbEo+j7xp^>z$-MO~@8^kQpz3)`+d&{}uYbk>EsU6@2yWz*vk9})HJ*gkwOoHd|kHVNE z+{Hdc{k9}-)C?;15w*{e$G7#rT}}Ue=-)@x_M1aL`qMv$(El}qef|^uo!X;o!}zUc zJpI^7?!g@~{r+riKS%oee;zEiYkU=TV?F=4+BJ0dc=}h1-0e5qOW*8zQuz4Vr^zv+ z=yyVQFRV@x_={@aqq1B>cW$lR9b4fSYe!N2^8A$U{-ioZrQp9KyNy~+*)6WUmIA21 z|7UY;zjLWz?-AwSp0<^jIbbM#FRQJ8k}Ts*ya}ZqNB`GM@OS(B@LFD5zmAZp^uMno z%#0T7{8+@V5u!*Q9@t z$l5~Pkeeyx7dQO^6|LXsjSuj=n(cR^Uxj`<)Sp6Gk9(4G*m3yLzNq7T*>)PpEqh1P zGS_c%__0yp5RyIB8~i#F>k3B@vEc~P$^Wb=%pqN;?L?*YKWhtfN&NKHzz;DQneqNV;UI|VztKZcm>o&UmNbe*_cMzr6 zt$qdN@k>hcl26wZ@_3yz-K+jWGSpuQd{=Jt3y$B>-|e?w{RhOpkg^*_Kewd+1Gk@d z3P=3rmyxXfhyR6&>6?*VJj?&X?Q}H7VZZsqPoy7Sz~;M91`9U*1ts@Ci$04qz^JQ#iws`(FQ4q@k>0HZs)z&EkVk3{pGNNveKz^Q}x=V+6Q-4_ZEPdBq7JmG@?pac|?rY)4J9P`m33Zo;AAhe~KpE71 zJ^c7b-7BPW-8aIIf7PAje_RoMyj%CSPxj65PQ!$H#S}ytHqHAMe+_=FE%8vWgd5aKnLb?8?V%q6^6gn#^Lr0H z3g|I%!H;U*_0Z#ho+IH;))YwMz#j)_;LEtvMQS`3my(p>>62Uk|D3n<>?h+JPrgaL~Pz=w9s)$QHFv`>z-M z*YEw;JN~PNd{#Sxz6N`-gMBwJ)c z)jr^VKk2`o^Iv8E^_Kto(0>giFV&9nU*rAP6#uom|2oir9qGSL@L#R|>kR*OzW@3r zeN7uMd_;+4OkYbn4;Vdo1?4uV-|Rt?NEPR1JV?dqztu$Q0o}ZjGwA{3+L`VEde*L= z|4pGb)Abv++mPLd>^fxL$UVkxP6qBjrDpH31dgxWz5lL5Rt=jzzGjcHOGegE>H+H3 zypikO9Jrg?ZF~)J#*Y|7Y3nD33sRk}3Z6D#R#KnN$nxy$FTVE1HI;(sAL0HkU2M1dXeu(hr&<>FkqFH z#!RKWed)(ak$tN1{b1({YGCTks&0?=g$cHGATNOrAiyu4^T<-5~6^t>&``W7&sX`9%g&a?5e3T%~*7ynYZKF&;!8ie`1j;Aw z+;(${iP=M@ldC3HrF2r;wwYTLSRI16nQ&ancpro2Lbzh24Z<7Z9x9O%)cn7+Vpy}%udGE2c zr16aPv+&~G;q-rh8Z{nF*P5D|kA@BP7ix28$nF2NRllk9@7w7Ar|AFMnyqSw5Aw9pOKQ^fF&dn>^I!@za zDq6NZS2V9$*0KD5waYXTM$yrTGy=h{~~8Lw=Y<*FP!VENGQ}{Ez6d7R5^Y{>#Ai@_*%1S zb<46rtz?xp<(_1R^==G7hTt-*r+J0#B>eWrln)h9^ zbSYH|@6$7TM?h9NpT! z#OF6})xIqqt2087Yd!)|2gC+VI$Lt_fikCQ@$%+F$fcohY>M{wrqkxMww~023f$i8 zgQ8a{H<-yTL~3q7*;k+ct%}c<_U3=B8M{`ZVeVlE_im)K8{C~2uV`*+_l7vExud1s zx5E@p`?piY;V?ik+yTuUjqS}Rw@@XRw~8Wi&Ek%h)>Vzv?#)|uGBp%ToVTCzO{(+u zTEu}>X-S$qVL|(n=6066`jn=&|IMa5pU}LlWtF!JRRU_H7dN+gne=n{DZXlOhM@$Z z5vL-pX1Nmfr^Qj$G{-AgZ?dlP&PQ-lnIL96hA+D=R zjv@B*&1kQ*>owogrukCuHy2ggx44Z>9m@|19gaI`?x{SeXq-d!x3{3&LC0YS!~A{e z-o*Z)+WPT{w_#QLr%SNGIy=*czc3cqNHk4ByM`p%m+ot!sk zO?!Lus*c9R8K&M~R=Z%(L3ve*zJGf&s%+?SLPxV`_2M;4T7BK7TE4pZh}H#%ht``a z?EL1Fo7)`^4>qi6@H2Y(xfIJXFbJLO5zE_~o4D>vbScMeX#6$9kM&M=N4Kn4L4DNz zO{+VOTy;tdtrc&`bkxB&9=!5Ivhtb}EHh7^nb&TS>B3mbXZ1g5%)^gGr&15W^~^Vn zy(*!r^!I+8w87F$Yxra=NgbqarPix(D7P>8%FJ?S*Qut%sXNH5Q|rg$E@(97HZG=^ zLSX8?;Dj&=SQLhRsQA4!-eBBTdK+EN?9M3+qyzNa3PB@V!DX8V^HTh=U zU%$N5cIg##b5HmmW1-Uepa=9reA>(?`T=tw76sSRNA**w77L;8%?&_S1+12 zYu2otr_Gu@qhZGMou{7C+I|ufrc!(@nl*EB5IK28%c?b}PNu+`+Dn%WYnp$zQX;R+ zN*aoN0)*hBRfR=MnwPGgvV6TeoU{sB?MIzqPD3zeIXr%-`b}kT=Aj=6raK$>(0Yff zbMO3DC!vKI+Ql8_HgBON!1X?U8$Bz%Urh4pyOI8KaE=Dv^#zu50WFmJ&lB{&PhU8n zc30^0O76VLiz~l3sk-=yWNNMMLs3r*Q~;4 zHkrletMLbcw;??(Lx;~EWLKX)0X_qQo&uiMdXLf;dHAL+OH#XrHinY969Tdq@Nm$9 zMAH=PRZ<=@uHfSf~L06YbL0_2FDaE;?0g2pPU0udiJvyekC6T)a&%VwY)x(o`h14{CG|_{ zUC$BK_$5eRhTZmoJzoYs{tC9|u85!dK`j5+Z&u^G5bo<^@o=^$=R(H;fR|xU!9Nc? zLVC6yol*f_Quxh;`*K}&CFko`dj01e;I{)m2o-W$vX4*y?zNnL67WlaZ+Ro*PorGY z&UC`gI!@e?+lTxl4kb=XM^xhyC$YjM{>vzr$nkr7Ue5;F8S)>fS&4^YPCUd&9|aa| zOVdhyRy_O}@_YP-CVfsp219Ax-sjuaQjJTTG;Yhd#Lv!1zwRcso77W@x2}fj^h=RG zp2_$xDDM5IKs%m3-~T|)3kOX8SA2@mB%qqo5U?5g-v+{yIeD7s+w-c@*{W0^r?? z86QRMo&P)qyr+fn17TZACvr;Bmz$4L)Y`*AMv{SWZ-__jL@_&F&9pM(D5$D*?59Y|kB`j3!a{9FEz_0j%) zfBC?ye&(|O0^j*sDZhJPE_`OoEl^pSJs`Q@^`K8^n>u;Ll-!7z& zP~N*~fS2W8@nif4YPjg5{h0dWJ@gSju7W;|Z!_M7`Y!%;e`j3#cmEgIK2lE~f}C;w zTq1ow6TY4MJMb02f5^9;__wU=CVq4WQNuzX?Z+KnW&z^IZO9JZU)>8i{gITP{~QSX z4W!>29lZG6eYV=~ry~7?S6Tj8JlOp#$qMpef9?0uvDI?K@3F$Khy1qr*?uqQ_&q`T z$QOz}JJ2@thWy^`k9*Qb{4V`tqd!_jAMv}?>++YY@f3RQ^*-C-)xe^|om8#79oqaM zGy3Rw*!%-dFY&O1>WxqTci_|cwiADq9$+~?-GVc?9O)-~%n5(Ua3xNP%3qHl{p(2I zjsEB_!0+zIf&BO&79@Tw!JgtriHGEK)qbqhYlVw{Q>D*1@{2ET+?4HK_Z1F6GH>|E zysQ!FBjAtF1LYg?Z(#}txcFE6*g1o7@$YQt6YawIYGk|y_{l@rKCQ@C{QgJa;`i?( zec1;vea4|4Nj#JeWcfNC9v{Z?B_71@DJrP;`<25vz4(1IvV*th!UE$X5kHl7L-}Ps z(y#Dmo9Xj9;z9gaIGVXx8od8BVcdXe*G znoxWDmz13CMzfrflG8!BPv4{Dd<;BlsMhBynvjP1sgfi8R~ynxoLA-xz5%3<;D07L zA>W-@KB~QwcF*m~xYP@Y+s-{0*KvF7jvNmXx6(hh?Z@e*f1FHy@%c9F#OYrOTDec! zD-|cwuce=7Il5n~pUiTkUt13Oj{)Bl3mg)kG2%qWXR@d|J|+Gmg-hI~O3$m2Z~46H zxP6>(zitpWZW~HE3+4Sc@DlKYfFDHtr%xY!hSLjPS!c+J{~qKULCzw2Kr1W$B~Hpq ztK&iZTU^1o_U}t`s{Q*7($o7f?%?!4L;Lni;CJrF=`Y4Sz0&?Ddy0SCZf7~#zkBRo z?cY&UE}!qaz<-GLS^Qi0GTUGK*EMDPw^Xdg#lNw_8>?CektboB}IDi%fih! z?3Yp}HBj`CdMbX2zRtMz%l8&$`^8BTQ+LjMbn;q+5NF3vp;{C?ontc8#GtElWI z{wh7g^0mKy4mnaE2T^&wzt$eh`JTsauJjjW*hl*-IicDwmHMb~@mH$!oQHf9{}2>? z_}f+1XObL$bs~KfSfo3Rvo=(Z8o$F9)Bfu2VqE)c*s5xOeF1vD0sJ6cUsQR;{@s6L z`P%+d{1I^ai2Wag{B^)zM+YYLr3>ZN_IFFN?O$44jf?$bh5rNjHa?PV|5A?qi%1`Z zQqaR*Irf+FTe1mtl=RX4RADUR+Wya+Rc-$*NxqNIl7ASFK0CnvM*)B4ON^J%zKPuu z=&9|NG*{cF61NH$yQRv`m3pV}i=j{S%WQk5IreNo`Y4o++Lf&~9MA4@wwuIr#m-k& z+xc$jQ@DolXLxz2GT$lR!nn5oZELITf8>^I|1X};_|IT}8TU3m!E*Gt_YnVF9DQWm z`x@j7zKHSL_)-~Hl$HGpWC!o}?|qdMzNp8&ORN33E7D)^bxuE(r(ud}{=Px?c~F`aNf z0{-9)Y=`AR8a+)z{jj&^);BW#DsUMGc0bB;WE?2{TJi+rI!@e8EawHtsgGFxi@>J? z-xYYnI+l}Qyw^OQ)7Rh3a;{PBYEk(`;w>6N0~q?~c)RHqmLu`@^{rX{l3N+y8STT9 zz^}Z6@zUNwTqRB-_)Eu0vb;J@D(!~CC4N$+|2xPxeJ?wH+{LUQhD%Q7(cMTNv6}AQ zZ8jV~otsm`P9GgVg{>Kv_!&lWd_3&-7TW=Csl-VmaEX(Bk$%eCoL>AZagy%A@^zdP zkzV3N;-R!3r`Pc?>u)Sy;-TXA&vJV0_vd>!z4-kU=y~ZojDMGx&no`yfjv{6t~l9` zF5>S>BMj*wE#piTJnc0LHa{_xmr~S^WEBlxs5@mHE#l zeA~&mD0znE=>4WL%BAa_{(ISCfBkp3#r{tHeQmM7{=4DtL(gQ3Z2QMK_U}acD3s2* zlP7N2{tf-M3@OF_6?@h*uI+hyKen6La})Bf_siLJj7zY(W(@gViC;daKQ-c|bdyBHsadL;e(8I##Q(!Wbwb>GeDbzHrH^b%M8 zHR0aQ4^Ck@Ip7VFZ|U%PWzj_$7~9LMResbPhrzfQiz>2*9j=HDtoABl%u$Uffh$s)$jR_lR{ zu&4H~eowpjSHJ&V{F{8K+P{_dQ{gM2PtTFrelO+t-5tg0qfk0}y$WTwA-^}?K?6ei zh~H&hxBG6!#qZKS6z{9XB_3)|X1hr|h~Il2;Pl$>WuzCsUykzLcM8jq_z^$${Fvov zKdwW1@#8a)bJM9TXAizq=6CwNr2j$s*Zs?h=(GFqaH-Vm2=>%|*Y5`vzw7soir?M2 z)qbB$KYhHV%d`EP=J>Z8=_6Lt&Cl_#-}d$LXQ=a>ep}7sM=E?3@QW3G4&mY4mpUie z8SqWrLY&`&I_~?B^SF}pqmX_R*MleMCH?D!XT@h;x&9I4Z0z3bpoT(yvgFk4;1i8- z`#m$UD)lJBYup(3{tgrX>wUHe={Iq=46eqf0FRaa`vf_gxph;k(;rRvcv_z?jpuxM zw&>g%!Z#-S7oHj%a(w~|aMuTXJw@=(`w8Eg(s!RdC?H=!`}Ql~>A4&~rz8DgJ97H+ z%7FnngqKC!*J1FpydVBJ;Pa=#ZZ~cc5E=iRP55{^|5=LI?_-eucL6WBo~PLF zTLQ0X;PhSFbNU$gHiY}~mVQkW0s4r4_XM8&gcjcEvv2rz?q*4U7CTn_zRNV0Q+%4s zbpg`fN_duizKir-Z*rX6$jkKZS!#HFd`1s*d3`(OKZ9n#4y%TR-!F66xibj&>C2Ni z9v)?J?iQp^Pi9>7oHuy*rmyB!SNVZT!Bxq@(SpYq|H@8FO-y>A8S-`|ww@V9fn zCOoUWZ$nOxe{&Fh-n-}MA{6ymQ*z|-GW`|%*c zeVjy_RhRchk(06<(Wn0|Y|rBLtj_>8pIbqAu*2DW{_1$ty9a@%{^dvX`Dgg1*RIe) zfsg0-G_GIA06&p%FQ?}V+@DImF9MHMdvfHioW5`=$Cdc;SHRQXvi{>5IfHkByYMO~ zI-gX}`10M@4o{6?J5YAv^9bQypRU!_ewn;`wjaNcgP%>fm*W<)!O%RoKL9U$mCHLB zaXWkuPG3Hs+mo$;pGJ7_*EZF9{sMUCR*V;+|67pLy<}`a?t~o%@5%B@U*z}^yM2}L ztoXbc>3e>_@gw>V*^A{g#%#!8{W;^6z#Cp37l1pU&wYgZ`dHkPXGT|Z^X-Q2?R&xA zpB)JI<&C%Ia*aWfLkQ2(vyE_X=hD|W9wh$H1@2V4`W@g6$}amc59B<|_7pjH5uPRI$Ao8<_qUMKbvBn*=2t~iJ7Rzte`&kSm)y*-yB&Wi!1bMk}-yFJIbkNhm(I2m{m?Hj_-T?IL7RsZrn z;aU2RpX2kT`o8PLAedWkIyX+Z&$j?Oh@O`d9@_a5d;GS9^?4-78SNT3=5q13bAN)I zu9vvJOB^1!AImR(i|un0GG0P>mOXFI!S9EhhE8tZ>N$Mfi@-a-$@2YsD*b2H+-&)~ z6Yk5~@K0_B?neiIJmKCiYYSYW0*i5{2RUS)x#I(}dQ-mf-5mK3Kz?ZjdrIuN#r~YH zTfiOQPTU;0iwQ6IIKlWC*}BUK_xdy_eO?8=5bclD$M+$>5&2RWgirGU?Dz7i6Ttr`vPuK_R7#S1$q{`?nF&$3JJ~KTipM_JHvF-{G6y z|48xc?UrCaZX4kH5gzP6Y*IkP{?{UXbRo-^dRlvEc6rC=;JXm+<)69{%NIXh3jBT* zpYH-+sQf;BKH_0QwVo#s?%SVuQGL*#h7I9!8qzzA>!e)WgnK)5-pZ|?^gDk>`U2#0 zF7#ebr0@Ee<)^T7>jIXe`=e)o7q8el=ppj|O1QT}ivF?4YPzA+&wG6ekFrOGBCh5F zZ#ZMCkW%W`4TO6+2M%QoMb0CHdpoop&w5I`{Q}`5Xy4~iJXa-h{s#G7YMijuXR`gf zn((at;xWR#{^>?sFYe>)UGrg#mrmq3lzg86-td2HpC6*Ub%(>A7$-~m%Rh83qINHt z$>oLF+!{*n?eP96#wD(PCUQ<=`66d3HE2Q4XSfuvLeDuzvYhTo)p9;ZxKCd?WN^sx z9%TFl!n5l8l|c@jqoX&J`OiqC|1RV-LLZh!=W~x@`CVhVUSC$nguMv&<+@^AwLYH* zJ{x!u@=Jt|pq{}MIA4fycR_yJQyYbpf5VNfj?VVu9)$b&DV@n0e!yYp+6eb{h%RJ3 zzXyY!wGjHNdEd3iWXpexa4)C(bQbJinC3rQ91A(euzWcWGmmgT4vDs&7}8O8;d3L> zcVZkR{{0Ae`DBhm!57l_$lJ3`wI`<&p5@2O3HN#?JMuU|7&y* zUzrD8L3oz@hmpSL`Ej8<*dB1d0$!ZUf%zNwtLHeD->?Jgvkw&7{CLJoY8<#1;l8|U z(e6pTI1K3vYW(~l@P=D93HppfeSAB}q32I0b6nB1H+&{8V)c<8NTXWrTZuijQv^037ddFCcx}%cBB*`H#cJ6`DAGr;3M@fJcvWxkR7W2+wLK zYfs?v7Ea@qVI};#7vbJt2aaS*N?ct}c&IOGz3#Cfhjm;3`{f{~;0iOg4KSC2o_mG- zThL#K{KFRed<*XVzjAu=Q1~n%JS%R`BHZhv+w<3em;JkX>4Wfe`z&GkU9WTeY!>d) zb8LiX*=H@`UVix-9Op95xgL1;Q#@Y~{C?m?^p8^Swr*y7j@g3kDe->>;a+~{Z&}ZT z!^_wV1et<0o(#S zQSI39WnO;o&kg~n`A9prlM-(w;9WoC_C)M3XgS;ADb+u27jSAwhre-kve$SyC_V1m3pq;Go|n^{nR$zLtQ`yIRJ+&!2Z^Ilm1!|9k!KnpXCU!#acbWg+2NemNiM4^;hN58=MNdfn{sHpaV7 z=cpEY{up?KIN29UP5V5jFR1z5Rls{rtM<#FcJ_O8@WdcN+L`Tu_h4UOOUR!`c<{?v z)$x22aM!@`FY)kPkVEY=>QNDL{we9NU^zQ*wRDBmC@<=1p^gdr0xziXZ5MF8{`oB7 z8zZmvFCXOK!#gC5PkMnV4)@DkP~CIEkm@GSe( zoWk7N+`(;-H@4@<>l&khMmQy}4 z3Uc4NpXE;_+{)S#;D-_J_0j7>Cjw6{8y!+!fb@SL-0Qh^JJyrG>A8w8u$-c5A65X@ z>vdlx+{#$+{^FA z{zeH_xVQt9rX0M6nef9($o0-X|7+#A^qGl zIeof&6p$^a@{Lmn5Amt&_A8`M7P9<06s+cpET^zP`{nv6EN3R+UQWYLhX+9V->(68 zs(t$p@ZyDBqh+0HX2NnBe>yJ6;o9WfWx&geINqfFypQm#din;^x2bW|A!o5YWAtm{ z_a%gT{fj@?$obD(wc+9}AUsR{uaLf>oAs1_dC1w2a~sFc$1KiG0p5oF)*2S?E(YFt zImgumw5ub(l%4NZgtMJFUy-v0cUM+cD{~qU*68OT&^=% zUAO6ZjCY~k6FYn!c+bz-K9cV*2>11IlG=y*3(}W<$Z`7z2-^6|l%D#91F`?YwSZnU zPq^2oYqzn%VjtJB;qLM2N&Y&tpHfeUujTZO^Vwq8LC`GVu6sZb(24q6B;3pCQR@;9 z6P{(CUqMcK`Bp)W%rE{0JWg1?`1dpC!=4597mg*l4#KnKe*x*cZ?EpZz9w?i{&jz9 zsI%mZ3plm&^v4AJXB*i0Ai{lr+KurTuQt$MLkc*x^J*RVEW*A0+fHNs8zAQn$WLx$ zJ8TX7m4JsVtG@i-0ts?1X8ZgS>9@Uz?SG)^zs@6^^W`{{xcZ22woH|Hx6@aU9_^T< z|2pulS2;eVo_?3`EW7;dl=z9 zeb;sz=Mq;Zh4f_q#p6SPJNe?SBizSrS*<73bh4c2a<=m;wQQ&J>EPPNZn(RB=!7c6 zjg|BhINu@t`1ZksXW8d`!hOD}+DG`K$T^$aAJM<&64s~iHO7CB+)pDs%a4~5o|W%w z0jG9p!4^Tn-VH2h=%v|mb|&20Gga-?R|)s=U%))20DZ0}Jd6V$<^EC5X$-pz6ha@auN@Z2As|j4cs>5DEYnsIbCP6oLcznZ@}B0O<4`4)f11EOL58Sf-KtKQv- z^gRi;x2Hq?%x|%rXc~J&>h*l!J=d@_H0SOX;03ilGx92yqtBU40^Y6GslF=d)j5s# zBt85)7v%^&&Jou_pD(dKV$WX_?(-dk`IO|lYl+iG%5K*K*Xw5g z1>SuvD|jw9TQ2%G$)|Bttj1BZJx=Z36<7xz4m}SJIQ9E#eQycjS#k1zg!}kUXO0XK zBp$9LJgc5w2l?Hwa}oLeIN)@?1?}(x;G12~_F1Ru#aV>=e2ZnSchaA}7ShxD#x!>9 z63Cf%1E+6jVf)-($2Xd8WV}s{qn-usus+!gIW;%2-6G8IjsSiP;a;Df-*cQi1V{c5 zc;|@&LQ1I@+uzLT3x8$&wjnI&5a21+dnJ8|aPKeIz~wp``F@1-V7Ot`OK z#ZlV?OnTl$J;ZZ z-)$UMv$4)L334V79`fCXTM`P(@RUNfsPGG-R^d#K-tMT7l zpxW4L<=ppxclNIjzb{50{uFY$OB@eU?`GctyQ%YdR{?jou)n0d4-)S4?U}@pQ^NS| z4@e)Yb>MyPT861`3)^YgZ>ZA2^Y7~U7Wtuz;Q0=&nDcr z!wc2=+pC0oIi1I_9ppV)^ei~a?e^pSIqp9Nmtf%|2=uGd{a zxYs{DkK;=8+333{*KfFe6T59ExLOB0lyI+qC-&E7huoce28RuKL)bzJ@3HSQHkM+6&^e^1Q za&$X619(xLbGw9aZ-0mLHcgOo6Vf+eJ}B+#$AphSn)NRSe~;y~-NJtFM1OH6;h{dh z&VIRWFthFuT&=Thd2hCz=MwJaxNA9gh;i2vKB9J%lfOjCxl?`2k@Fa z7pk6vxR`M7mjv-F_J0?+-X}WbK~CRx6x&(ajYol(&KMnX6glxj@Yk8#56b&I&j7B^ zQTz&cigChd^qWI}kZtEtgnRuPALBri@$hbhdppN!zPvC;&SOYlQuF0)9%eaf(LNVp z&w0S>;YVpVmH?lm&R3rqaPrqx++uT7IClf#UjH`LU)TPS0bW zP>tKa4!jHH6@4BJa-j5jmp_K|H15SbPU7UhgnNC;b*$%9Gz4!x!f{o;kK=7TWQ|BU zy*}qK0k}SQy)WTe?OrF+Ckc-${^R)bH1N({M+QLF2mT7Y>(uIYd9^`M8EN{t)Tw)jWN~kKmV?TZDune@DW7yV`|wTw|dB0l<3>ni!Bxfj0%5=BJ~% zq>}zj!n4YC1>_X(WI2l==f98>t9mi=an`dN^J{50&H>)mzaX!oB?yHUHZBNygoi9QY5Qz+VR5^A`8NLs2jMxv&r?YJKl- zz#YofjC?nFitBaTuekl3cp2Z=o$#Q~Ssedz4lMy*#`8GRPhAc?dSaU(K-%GvPqUoz zDO(5pU&wcV;Cfx|R>5CmIT9y7A>7AF3H{3|$l3E5maoq(dC2P3r{G+iTSvG*7ondk`Dc!t z#%EcdM&+;j3D3%R9pT>2W7IgLuAAk=>N$nQglF{^XA+(z|7(z=_b=`sJd7uCZbaI# zrvpCP)uX)?e9AAN&p88vN5r1D5}uWBSHQ{dA8>%2*pD6gy2w%MZ%x1Sawx98!x8@I zP)>huz!3!NUCNE#4&z)2>uG2X-3yf7$6KuCar2+c*7F9!z5QPrKPgC`jXY|eXM7UY zyNbXk6Yk~rw6Yzf-hGSkEIT}i^hFg9o4ml~jrQRBd;Vse!Cc^J7su^56l@jYKHtK3 zx&L|?9nx1s&hI(>EjzND?-TCJwN9Pee;IN*)j5YfUu5~+YG3{;;GJsS{`oYF3kdgmrfR#_4*%C`Sq zgnRjsis#`kGhR^hwH*of^0gf<0PY^B51QauzuW59@QYd>zJPF_uT$fndkD{plP4jk zcs<+mbd-1Z-?04Rp1SsWo|W%DgkMMfS`p)L>gK}d zG{}k9QeQ$J{FwxIE#bag3ny}Yk@n$n;A_=5?RDU7NM8)noE!LC#KT@}&oeQ8+nI20 z|MEp#FaE>XyZv9~^o^Gd3leI%n{@4jhxEr*_k*thZ^Ls0V$Y+02YbH3Exh#0KOj8I zKHWL^9|BI}C9JnxIgkssLpfW{-h_KSdv>lKe;%Kc{%oXg#5$g|Gd;lDF6Q<}_Ur%s zT6Vr0zs~vUb>rQE7tUdQME)IwXW8Lc60o>59u3m zE?vemo4mntx?X3$$hd2&;D2WvuVr^v0Z(CgIfr@4A6QPL){VOe=b8mYaCrmiohsLX ze`Gm5hjF|uL;7We`}luJ&7&UyUQ+$YxIc0F#!a}sP`4F6QwR@s!vm~R-UE=nO|5_4 zKzLYBdzLH2AK7j0xqw6HdY4J-*#2egD;ezp4YcQzfL}^@Ry_YE2mcr3L~7k&#$OQ+YG3s$gnK=^)pM8cAbpJY zMM(Rz;>~P*?j+pXzw1lfZc98o4ZJ6<4<(ZQ;%(mI^bI(dBJwW<-lq1;9s%BX_0~a- z$o~WIF6@i#3%f0No6D78o;w@(w}G!y?f+u|FSt%TU%=jU?nOzj`uDd8_vP)G%JC!m zFZ~_h_4_E+5$^S=M>`<)KlC4v zuf}Po0dK(H0~LF|N4T$FU2kj^EO0;Uv;99A?>wI4>TKBOD8hZdWt?-C{{4=ClUK2B zuo=?-gm7Qp@)azY)DE9N{)_RRQ#szyy}K>mW%*7$e{?e8UQTol>nZEimm~fCYJKT0 z;A^pOSVX?Rf}FN??jOa@`@9Ez;=v&$_a@FQ0bYil%yO{3tI@QNWuh-u;{g~zStm1f( z^T6{7-XpuEMM=B`~`TlisfH~d~5&9b}l@$Wk4Qygl{wu z?)5L>eL+n~e>w1S+qMDO5%}W)r~XB?w|^$w`>TX`SwEyN&=ZCpZ=A*Ls;rYt2i~Lh zDQ+P=%MMQA*kd2mqIfgnKzX z)7f9#N;o%%aIe3^d{EkxR^WQw@*LptDO_Jphx~#4z5LOxpw{1(0B=+0*&Zg`=Uey* z%aMJe9^jFBPeS7WPM@fCfO~-VU_312fh`7d`tmLuS2BLsjqt4aSweW0-Oh%bp7Yr< zRNca7MLo-JKm$1p0euzlUgs7F&&u~X!oA&2#kt=ig`S(#Q8S9 zyirK`n>r?(HyHYSp7oS|a}qaQaB?JHJY}f4@Yk-Y@YH;l3R%JUl97 zcslakeIu5Ws(tw;;JTkZV<^jsA7a1k`~>TN58*x@+SIzu8%W=EKgYQ|XT8NRPT!^8 zdvhV--kuTKpV=(lts^|k{vU_*)VCZmF{G6Ct!a36`b!A+dUmS!p4Aqz(~k}~r613J z+=HvVTR^z)FZ4R?XLIDV6CUcRn)kg(xUbg<-m4(v#u+0Rk8qw%@J`?<&VwBX`@BPV zmVLI_nDf=|i8>Q_quPJ@HSn(KTu^DxKM*-;-FxClmaory90I)j=D46Cg+chV5gx{e zgE;ba6>glE~|$Q=9*!o6Q2^**;$EY40xw{Kj*{1-8Z)iDSrg}OdQQ}wA~I8{F~g~u0{Pl ze+;MB=Ps@X-f$haC-OY(Bfz`A$o_4Ho_`|T`z0F6^Oy61zXv=W#qmD|`7RjC`8J%( z4cq5%gxx2FR6LWd4z}dWMh{9@eo$%pTHYl<@j-ESARE- z)2FYpo>TgB`dv5Ya`i;)myNmmbSnamLaui?lW?DJ0nZ7t^_;tlaNjPa>bZ}?7M#Ac zKljK#q^{V1o+Ui1yuT;h%PFb%fz)h?@?OLK{U;a04d05>H>l@gS_#jRe_BXS^J1Jo z*^Je3-^fXSH{rg#Jv(tc$Ufj-f*hKcq2C_^Iki!CzS|S-%hdxti%5Sk;lZB2X8AHs zI9+fxPq}J5^ik*f#&7NAQ#*fdc_)+cc9@{Xk$L*&P9^(H%mUcDb z{7$6T?|m9NAv@nmIrv_Ldwty1JW6>B^`a#weJkPKo@2&L3MJ)l=PrUAeeU|!963Km zdOaT7Zd;b$sp{Q1gkR^|VZ1LF)!N-fc$S@?g`5V=uf^~GB;1#`aZ8Tp|Dq!8Fp=}^ zR_7V^B|P}+W={WS)c0$)W4v@a<8_!%{W#zVg7q%zkX}F6xpETADWA&ifV4}`5gy_S z=WvdLteM+$dOg0n6}X=7`~-O0om_Fx!bD;pnwW?Bj`div&mzLJ#;X^G^c3fqZ%RM7 zj_?pa>v$X}^O12oGOo{c?+08zhgc%q>(i~C@9V!4<3;QrNxbbqc<>9>>!dwg2wcAx z_jKUJbJ@<)ua$u}{FuiB+n|2cPli6~od0UVL;R?D^jCoEb(`A>&x+5dkiKgccNF`f zA$yzfFy6p^!Cd%#mnn?Ln6F8@cR1l*p9t$MsJ`wV!n5-INYbnKOHZB3iy4u`G4(wdypi_SziOjATZ*vu{i^Vm61ROGn(q2r!Z#scKf8!?(Ggc zd(t5uwLLvGGp+5{b=Sx~P{y%cPo9FPk5zlk^7eCv~^Y`V2UnS#je+Tn(wI6>} z#@{D(%1U3K*XDWL{P|bQ_)|{*s2`JYx8LkXFz$oT19%X@$L-tv3XJ=%?^cZ8tr-82 zit*nx1X}`fA zl=0PXWQ9WQzScU=#!0Sy^rsk4{+ckIZ^pQf|NMiB@gKoBt>5j_ z_(6;lUVXawvG2;`pONd`xauEn`1j?3O@`Z(5`VuV^Bl|gU8x7$lkuO%IIX93!tnVZ zi-%Twi^uQEIc3Z(Tdl8^@u$9p^=)jQ`S}Tq)B2wgeyDu!KgsoOU#Zr%JompbAJcvO z%NVD59&_xGk&I6re*5hhr+sOCHzUp-o8_%%Wc>19xMbG%&9CBhe!;65K6j=6)ysIe zbs6IXho`@k?QzxLzE#FAJ9QbqX6E_4)?G*LfAv@S_vP&vr}^*xO9t>437-G>-{SYZ zBy#+&tmnfRr|VsR%g=l@zwgJ+zR+Cx>L1}7{wK!idbdvM^{7)e^ZX;GAcga{nfhhu z=VGUbI?(!Fj1xcqsN_8!$a;QX=5gzwKl^KV{SSy-zAV?j665q-w@=jDLZL39=_kv4}U|hchA@OLm7YC z$s0U-Ezh&%__2Qj4o2`z6M#cC_wc-0%&j{_(9CCp`bq&oG_- zMN#zsjm)##XF02Jy8o#2JN~Zj4cRXz53rB%OH4Y2|7RGd_22z5*3UJsCV7O%pAx>O z`1Z>fCpbLye#YCr%+|Td*MC&%XZ8AbV4U9Tqd&%Q9*9Bv;4QxXY3YCd5t-*LGVb=X z{Vy{9v9~c`{#dU6*Eo#t9Bjy@}!cUZK-(HP@q0 zi}Z{5D>BdbW8Cc5OW8uX{to`)-mg7+S&Clv8q} zzm4%r<;z=-Vcf?Den{qddWFALee44|zmu2sMSJ|*_k9iH?e7R4z8~ZCzVG{bRzR+Z z;QwX0{;5soSIXa_I}HEJzxi^rTGc1N3*!W5w_g6AJ<9v?pyOxzPJHJXbNvIYOM?GLvKM=g z@jS;)K2m~lnrHVd3?HS7Uy$pca`e&%zfPX(^auVb8F%vy9+Po5e*MH7{rK<$W*m6_ zCoJiH^5^*leg)&imtO1WrC;&&JpPR1FS;S)t4_VxdoWJ#`#z_C^{>kK>I&nP;z{Ql zc>Yx<|KtwF4cr|4;YTq}czF5iSRYXR{I}(Lmp}abZ}ju?-^I9D=ch5Ayj1M>y>Ieg z|1OLZd>(Y{_y1?c;qMi@fVFG%WsP}$*Kc-B#@#-*@0Ib^uN3+4KfcTJxcz3oyT@?Y z{qdKX4Sd@NcprZX0Szc(Dl zN2AF&o~ON8+KS(}mk#GUdubH)k9)H?o%IIke5c>--o{vGzc(NB55H2k{_HX1#QR74?qvX`|j?ywaZx5AfWzD5h6PvAn@}kPdpo z!JF|HUaU7hhz}B;oc>-N3~&>kHl0nTyyo_s4sJy2Q@qz?8ux}r@whiiy7V^Yo3(>p z+Kbcs)5Q5=1FIYLQqNc0=IZ(2&1Rat^u|^k4<@v-R{WY9w;sB31IsUW5AQI;Xfk=Z?qlnO#!cyahlMZ4HJN7=i2yUqUU@CtIzccqE!%!LFdW+=X@<|Sj$n8DiU^YU*Ag^^9SR@ z36SN^UMN=jkn*|3Hiqx2fRQXSd_}a7NaF)!EV`3iN-Mw_< zH0gtZ?d+a{*!J2nSnBn?DBc<1&q23aIZ+2JdeV=_$rJG?85tPsRK%X5C{4z5Fh(Fn zze`hhF!e6YynEd|YCHSe{CTbF$#-544*eW47uxDD_EkibDyVJGPR6`b;CS9!D!6cb zk`9J*=EZNkLxS+RgUFq|=k)OxAj+?cEZ=IBT_Tq4Op?3w7OnQ?^TE+LKJ5*=yQj{@homwXJ>xa6S47>U`IED0_prNmIO?4N zg{!_CjPa%azM8MhltLBJW^Xzj-iJ6gI?8Y`PrHZ2Zi}oyRhTrL8CF1CMpv#gt<$>g z3jxAi=Y5@fw&&B~AccN4+gSoBy4`NRPAGO@QCrM|HsagR9R;$5^+Br{CF3+6pAg40 ze6BN@iVA2ro$g4Hj{B1lDYG^aW5o)jA)Y<#c6)owdM%vEuMwshVTF8M0k^I*lZpV5 z-fW)4(EdgzL*_KwP^SCw;cznT=3-$lh30pVq`ko~+K6AoM%f zd=ZbKkAdT?O?!jc&h=#=O@O_z9|7V}t2ZE>&0~5g6jz%2b-qQJJY`n+`LEUy|J{^tX-smje zKLq&C@Xu)4QHN*_=1s!d!0Adn8BOEE@k%=vd`sMI3@*mVCGIx%MAO}splrIa^vX)+ zOYDKO>6P!gb(Dgj1J4jJ?|1C7Z^Iz54K0{2b2Lt7?RywSP=+v#{bkS(r+I;q!1vBM%^)#jVBn9{86 z{&4~W8aBYmFwGEBwSdUIJ_x9^=p3TYnPk~EHfTY%%#BO|)fF5o3Y%n@J~5al@UB$w zL#2&Ih&!wxT6d-{gE5D=b7_^BDDsT#CV_i4-PW*oKbei;0gCr~74Ox(dHvDhX?Hd5 z4~+@A{LR^WlbF7}4`Grd#=wi9T5rcY2hf%V>HQtpebBbnhRGaWJo+8l7D=@&Y#G!i zutu1oQAei`odZ``%8)XsfEUb`Reev*j`%=WL5#&su~(MUL$nz`d@}AEuir7;ena@h z9`5zJ3ibB7sB1mp@LT)PY7ItOi-be0*NwY`vnu)eBVg#TRoE50OCbH z%{UuIy>ag-nUSkCe;gRvI<`2%2J+YN?2#S}S0>hRXJ?t0t9b!dwV}lJfM?0ts1v7M zLZF2oLpQC~fe{yc9M@3`U~)BI9Dscw1So=Ec74=756}VkYTR7}=;vR@a|U!?dRH-Q z(TD`3{H?vUvXHM~AXUO-F}e|UT4Z(}rX@Ttcdyp0ti;-gZDfAg;2W#4`rMN_yQS8; zWr;`aWHcT2`UymB|K85)u8m>Dp1Kw+%b-r%0ob&?+KFQP53$GUM}>^hy=tDpf@#Q> zSu0R!C2NI3ZyX3l8yyp8AJIx_y-{bi9k|}GS?U$iPS7NQFJAFF#g}hy+b?gyDD15p zZeL`j*0Y@@wh;gwn-D<3)ze7rRrgg`#YKX5pQ&rIRllXYL+V;0jbfejL@?+*xZ3(@ zG*69JE>j)pYjM=otB&wEl~l)ehrMI-WZIAClfxA5(YQA|n!_2taCn#$*3PIm-QiWc zD|FteY`}JO0S%^yXt2f0%UGmLA}U^3g%QP_m8Ikxs=Q7NcG$ewl#MKcoy=l#^}=as zY;^FNf_as@QWn48)!f&6DFR7GcWz*k25mA==29Nv%*m;k$fySF$Zc8+H35{Z3;8tFFEZCajMNr51@>W%$Xb+1B2@a`0OU@dz11gs#a17OvNjrGDSaj(T!23-Mul|Xs_Y_&h{q#4|*H5*EcrTwmKWpI{t`gWUaluw!N|0-lh?{uD#LO z*luratbsO1=DM}jc6+n4y17nMMqB2;sJ+(dwAVX8$L`?v9R!{@69$5+H!=x47TM-; zyaN<#Eq5g$Skgx);s-3Oyg zXZKr&U{;9#oQy{Iaf|hrNqQ_?+MD;r@pP7uF3Hf1AHIF%5qLIlUW?(3ieGc{(YX6s z8GP-v+pYN8*WJAG$j()KM>l1&A-&QbkVH`tB)1-Zc&~dWzH{ZF8(ng7-Puz{=89yK zPss!u62QYl%`423>10N^2a{Q{Mm{tO5g;gKhuXU==-aMvE|9_AgvCsuL_%ws3e1^<`8OWoGPJ9?FGa)G&U&F8p}9A6 zIgTvI_+Fe|Gl{c^Kp|ijBlE??FL=}9_DXwY-E3yM(LbI|#*P=t<-#3G=8RWR!VS@cT{FUQ~EgHmf}< z<}1HP1=D#wD&$ScJSr?Wux6+edyg2wag}7o75Z-@P;fLjg-c#KVLOtVZ4f@Pb0Z0i zfJhhXe*}&qPQ@O;%ATbrn~uKgp4xR2h&5480Hm9itn5%Sw#mvi0SkJhvQl66n#)iF z2*8(r6ohMyI?Wx`nJR@2agrUwNg~TX!BGv9sL3F%AsS*AZIhED!XjkpEKr1rZD&au z5LB!-LQ6&R4KnbPc(ZW@YDVVV)(DS~buh!C@}j&Rqv2gQTsVb1N8Y!d9TFrp$01|& z0rEm8CytVVQdvC)+fhLXfDl-yb43tk$v+#f+_-Ip<%R&Uyji^SYpVq>j~daMCSrw$ zOfHDyv@d7bfnntdb@Uqin9e5bF&Hih68DVxVzSXWiOWmX?%iTDC;U*?<}RPg@8O{6Wgmr^kN@I z$n50}twlwlbD!@u=xL4YvYtHo@$Cvf&;_kP-qFpAQ)&)?Po;Gpv_e;Xp{k+=?$EZ= zxk4V&h*am@Xt&4?udtn^%(3`4b)~ZeE}h=EpX^gsU6z<$^#c{|QD@erzfP~P9TOIx{#rT2#H6_co?&juhw$C8l8y^yB>BVf zE9q5{#>I)q+pExcfzLq-FM_vJdz2}>nRNoBUo;zwYRt3hW7&>kl!O!EDd{9kK2zmX zK|r%Dj3TQn1}-WNAy1|QdxLg#1!zCw5}y70X);IR(r9!U-l!`N?ZnYahrJu5tz;ym zs3)&eArkbeM%qN-iRBLs!AM~chHH`U8L>O^`5q3?4`94w>cNU}t`XTmu*^E~9q_d4 zcXC&nt%~@(t;>qrOvOh!EO!zZOwfvefbH}qDDYQ9Gm6g(T6D(NTf&}e+Mo&rI%uP~ z`IJ6)#_nklYj=GayyR`_oPi9lI7JSVVXyRXK-%LBzG{5& z2$syzVgx-bz%;1bF^)Qd8RR!0v!|8z#>cLgnC2`NUz}9k|8` z0SKTGu&o;QW)x_`>WpDf%2gxM7#Pb$Yni~!f*fI5w{4Md>Prap+}A=hv=)#Cl@}J> zghI8`3J$m?V8|N;TLa5Ev4qRfd)u|QigmJZPsyRB3)Iv1IV7!xfBWD+MVbu(xMOGq z`w;o%)C4f$Zq(l##FS4YyrwdwpeiDRuKjT+5T!-Y2C7A*#$cvVZX>a4G#I1myb!RX z`_QmpQSE<|(j*`zsMCY3=dH9&{jzw{8~pwY`P4~DWM0q^qG`15s*@I2xdU6`rlE>%5wTp$S(MUx-;R*8V~z)i2SSxRg5RXK++JR5--1P9#0-=c2U+bff@s!M z@!jC;KF=!$O?@8kp;Py9xZKnVsI;-9Lx;eu9)kn^PP#p=GS|3JWB03c)u2vR8)0|EDce1!i^OU1Hx189XVO5O5w7K>y_p&Vbk|4?w+8@Q&_XDfBUWI}7$ zn;I7)1Aw{&>U@wOgTcX4#$hei3$t`n*Mks5G)WczHR-5Jqc1P7xfpPP^)|40VFPGV zqDocB+>EUaYVpz`Hr36cy1~48v1d1~Atm4#k16X?vv9>8iq~GEnMs*x>~} zK|kH~K2i{7syh?`0u;7{l5LSZKl9llN`ziEpY6^aTUXe!J1H-tDyW>SD(rF%WRXO9 zmP%T7FROmt?Bnvp)q<_Sdbwt)^#L=g!ra73OTLW|F|6QF&gktC2u99^<#aNbbJuO8 zslmBBL3Kx_)p`SsJ}tG`mhu}0A@9RXw+8$yC2AT-ggk{^rdIwO*d!G16nWrJz%oDJ z4u-CXv}j{Hols*+D)@adWXZOvW;MNq?ec%xA06bNS#gl(R>pt*-k-RyH_)UAq#PFUO zB$%zr-Q8smg&N^1q_Go93OXcjU6Je-ZBW{|K>)jGX5Ye%)wz)4SYBa*soncP-YncP-&nVk8`UnYm?n#<%CU<|eN9D<~7E`L2vDtTLKlGPnQ zXd4YJN*0iE58*snG-k253Gp7=zshL?qSiYO(`T_HA)pJPsAvgF zRdkcht@0n8gLyBla+Nq)J$izrEbk0q5yn=I-H{^NXaqb%F7nvYA^YmWzf>*8dYfLR zM!uW-RcA9bG597WE`XB+>23n3rL(9=Eu|yjMDfnrlP72WWJ-Ogiu!kedRMd|0^XC5 zffND_SDT1|Nr4>dI$NhN`$e7vG%J*I;pt};(k6>AdaLGW zse%tmo+e8~t&{6SxT?ulrPOaA-();~bC+VEK?qUr%36fD6qg-~U6v0fzN&6>louYQ z$JA_PG6U>w^+&@GTpllZZPr0Bh0je_T8sF5;{xQl5Q{gxyQk}1rA$FQ38C36NE)o-j7idI=`9zQ`-XB?q26_FrL zV#|@_w%9@=8$n$5njz&++61c-F5}GwE_hN_Ai%XJ0Yw5KSiD3TkIP)+gZTsOrp&~4FiEyq)S6lg76tn5`G9V6JV87Wd;oEs3m{9 z7AED*i-C9nb%peH>{?zVP0HOHU1t> zk6wChUPM)h%B+buFh5ARZ9+PF0x3$}b?5T4OljW1IeqC*Dvb0Wam za#QM~b%XN598o8qTP>z;z8Kx0M`j*CNBgSt01DYT`Z`<3S!AVQ=eP3bEuxjF;}Y@# z8!H9j{mBWM!q#`Rx*Dm<<;|Ak-FqF9pKc>-*;vPWs8BZ@667gq?`Q`_I;KepX`Lnp zXcEr|jh35rPJ}sH$`` z6!}w~pIfeM2(JinyA?8_tKFNd9}Z7YQd$V^(t8*!B-bB}EXtfs6-?BUs@Dhb2GV&z ze0r*V?vSTantEKhmC^+UdTdqAAWOMFn(7)0%7Q|-R_HLuF2+*k2ijSi;tMwMOy#@< zQer2LN2M1}ox0B1%}0c1WtQX{V)(ZqOWG;DsmkB3fYHp*ibS6RC7bfzycgeK+@B>z zKLnr7hbe2?lztc8<0SaOT3+mRZt9h2Z0&j;tC~om7UO{-7bfJa5(B8~*kZ`vv)=yT zG@^QgK6;{}cM+liNUlGebQcuG(URJ4Nr)Nt<+SF{Xuvuh4Z$8y4v{%fU`%ba_V12%<$gix)G-c zQbXyAgaWg7g*b+4KB9cqq^2S~2*Ha6!*tet3er%>wW73)y zUwnG?1YOGzkzgrNyLa-QZd@{C2k9kA$T(?F`kn!gy)b(rM;@|lRykjep=;Pb>kKyS&RucFY*+_S z)1hwX5X^;pvT;xZO}Crc=?PE^N*<=LOQS>RwoZW#L~VgsX47OkRk0>XrAIBcgGv^( zzAnHaegUFA1SP8~U;+e-athz3_X?X%B&`m9g*dUEVxvO+##J#1{JCS#PDg-4htT2q zmZMDhU@&*m$pQ1Fyf`O_IL@CmLx`br%p)RegqanPF9gsty9I=VLu#!8RdYTY1xGpW z#$mth||=CH<2$DR)+#KR!#{4Qb3nR^vM0bR##`Zo zEyP=!vm(wRL~4*rkhsPR5Q4|eT&h6=Vbhndg9%c&&zd5Gl#G~L1OR0R?FI&i8+^OA?RUN9U_ZV&s^?67=l6V zQvjd^EpoS5vjH*}!;SboARu1s0TXWuezPz*zon~sjqGPd&Zf`5+-UhN=i*nt@_dT$ zIkje@gs0qm055{|K!HsV`&(Uy8Thv?P2h2xZ- z7#t+yZYZWMv5jtYD%;eG2)WOw7q5TKxvhfwZkC3ky;4R&@Jm6mn00cArnLKzGaI z`3z(&ny67t_)e~dlBbJfK7@_7&?NhSQNT3!PR6IrJz|KBcylhpA#R258r8P*WJD;> zCFJnbZxw0Et!nnjcr4cEc(#oGb7YL>o^oH}aG2L_`F&{&Kvg2v615{sWU*d^oqq0g zzGxL`Es$n0GUdCwrzF6vz2{EO>L{)&EXb9H$n7lQ!#ZIj9XiK^&(WFezo~1)r=8~x zpH?&V@?7WfFgEXmS(%%4Q%H-hdcMk%`nUwYLo<^OVMw$U#2y^i2QS2v&_}1AH)P$& z@`CzlhAvT1z<1z;kFzVWa@kJu_(^Y^q7#?qz7q>yFKQW5O>F8~Eq&2{QaPVANXRCE z{$gC#@T!^>t13~es?esCTpA(Z(%E?dP)XDH$ed_FTix`?#he6cTNIj0FV;}WcMyeu zu8E3v@IMNCgZIN8hM0lPs-HQ?s^rIU4KzPfyuMGS$( z?cI{foU1L)WR2t|pm8POA~a4+qUa74gwsH7!MZejNp7*u_f;I#^<*JCF})-lWvc14 z)rsju6dC67#AJ34R!u*N=fq--2(gAPUlymlG`|IMcb&tz>$!Aa&-)Q0P$o<)qq|tTixlw*ZdH{9 z3CoN+GZYFBMo7u0URR_@*m=+vV=m=K0kj}#D{6&TuuNk+`un_W38jUD9dl4sX5&|| z1ywXuSyX01v|J=S?yzLe+6H-8@tcJ*)_q2V@Y80Q>^cw$`TRa?b*BYLijrR=gh!Nx z0@kuNpN{A4)mdVWWQ}n=2_=;u%_b+)o!*V%8Y%`UPsUC)zk_Jl5I;CCi1w@8W)!tM zg+b~b)dqTtkyVqlu6|z=qPL7iLu<8gA+Vl1lLLu$8pK=(QvZg8_vyyAD3NWMq5OP5OGu0>SZ}{$@zzMjL(oZqX`I(6&3P$ zb$*2S>b%k3kcR~A;9_lJ8o|MGJn$sG5dEOgfB{XbG>{KJ$g>d0_ogQ1m=OghVW(@; z-e9(KozBKa+7sS%Z<~`6D-#&$GOhQPWXEScfiddRsZ1tyk^eUdj9Vswu@UVHk<1W; zR8g+VUarX+4c3l;=iVPzC3LO|7S(k&5cqS>esq;h?|+VBo&bwdO}d&CO;`DN78k5A zAwleSYg_WE#tnJV>>-dJT|n)mLZA`Ij*4J+8Vx)7lr=d!%*#-#35xm5B&|8Bl9F}w zQ2UZfy-u&aAO?_Sr`O;JDNFE6S_XZ{)UN)^Y7O^0#SwJqYm=VL(2qUyDyuirupZgn zFf-nhKoYXT{MntNM_a2%?extcFZX`2bktP2g=sA3Ij<$h%vc=|{oXuv+7t-RR(Yh< z9HPy3D8?w&h4g|3@(ps@(}SEiWyq#Aa%#pi6BNmD%avLDw(*@uu0jX67L#~|8ov?8 z6YLqfG-+_BkI|@q+?&Ply${kkowiPXsSdYvlB;*_#MCPcdWA94hPIz!r8yL&J5zCd zLkLuK_v}=&sDmJt$u+~<4AP?eVM zxXx{PXa(|+O=E*jPSWWKvTG3}Ou9p`q^vwIuW7Hhp=3I5>P`Smn%12yxDyqt_e&H& zdB9*4mh%ms5Q-!EUTNcKGC3M1(dv!K(Id%x-aAS*n4CF|6@UuMBwIEo8!c|HL~YP+ ze=^4MPv8n475en+F-5iSWB7QU8JW>n!1e*5a_@G19R#5Kxp)gtw4U=BiXtP=z?G>ect;`l1iMCMC*kW1?czKaZ!iJJ`9dQ56A`EF*cb zp{&Z+i8_Q`DV!Sdayey36@Gs`yCjR{g~+F5;(Hs{?w-QUdK%uhwj`qhXh;{!`w`}T z@gWh#R;Y6mO7_pVFtfh%DbcPv^!V{EcC! zjjgfUSNpu-w6UA)73U^FF-qTcPj8~{*U|Wdnx0X=Gw6KKEaCxt&%Jaq+u6ls2H?RYa{=)_ zH2{^R)uo?wTgZz;y0?@6RIlUE))e|IhOoE*&8C+72`akz>_LxqYxtU9Z^_1UO1tXu zs)!2~yXBI|TEfHSx}gH1M@y2;d3ggdBqvEMK+)j$vs^&Ed2b9>Kb_1{ zB#QP9VCS<%&1W9Q)5!qsi!EB)>`kY``xu|6lW9Ce%&}Xru#9yb+3h9>E2&6RXSScV zK{L~iWyl7=a|Zt)@y|-H3H;c z^*Knv2%nX?QGVL=KCLj|!KKzwA%b^-7h5`^Otl?O7+AJHRxJdGCO2~@l=Qw10)PYw z$_z>^J~GLw2r}BplKTduK{_}^?=f(IRmqf?q6Qqb-f`YEf;{a}YzE!VU|umx%d(x4 zc6!$HvpI*SUqFQfRJ5d z-lh>_7x&hH6*NSR3?(?Ov9L&>nVY0rj;+EQi)R!HT{PIGv<_cOvj8SYPn9k5n6*Af z0@!0_J>~~e48}(l2U1`Wh;@+JIKMyccX#`vV$0~Zi67J_($fnroRdH>rktw9oE{1d zH_%g6nV{LbVV)V`J2h@jdew3mC159A{Y?t^a`d#LZ(@;o#Z1v0%h97;uI)P@rGaFW z{p8D9q^E@B!6_}*CGJZ8T_Mh6pbwXSW>EUOf*JjLoFG=E$DKMTOI^-I6+WcAI`xrC ztWbhs&zkUE*x4Y8N6^+(TEYP+NX(>`5$omuO|$-Yt3G;q46dM`@l3^`huXudr(zs?BrkpAay0NAxR&)=efohOz|a_ z)_}oS?ZQcqOso~H8;l4sal?#X5uONvmvVb+ai4H&ImX@`7Zey5oytaaGMOW#7BhT( zhoiiPi=tFUlT}R1^J#mv6UF#H>MWD6!lJiE%qdnZ!3mhlXg%=+sT3sSo!knp#YOOQyM;{Gh zfu%ri6IoUnoFPwJNVHa^Oocp%uwIEKL&bBtK|MXoLzWUCigl$XD+3XgIga9>?i`$j zQ8v4Z-c0{9_xv=ma`Tz2)k^FR!F{v7rj>=`6PR2xB}gnjS=XI`)I^^@9 z1PN-rv5N%8;Se^~;mNq44kqKBhvDK!=Vb1@Pk>jsmnGQcj?p58(Z$gr1(8#8Y8FMM zr1;{+*e>@IJZL|HebvRGyhx8lG?Q8m@0ymw4^y{c({~sZbZN9X7^lh647mp;^$l%) zcJ@sv4D5D;bw~=GQFyuDTm}9uVtgq#%uPjPl|aohv=O209j#4af!Y{^ z;iJVmZ{vV&_Fpa+GGQED!r`jONFXZ~S2MTuBuB6F!%95ZN}7EEFkr(xmg`q8nuW?Q zw%b&A5uk04fJ$f4jjDA2qQfPqAMbh7%(J|{FkT}blIqTAh9MP_DXUum5htoBqtEix zYD`0e%_ezckv!x8g9SX>G{Z`0J00hMS_K}1)TXk6$)Q-$WP2`|)ED$dFKj}#KyMO5 z*VD=VyiX_XNvda`MSY5*nnL4)W}&~B6R}9>kxwN1`r&|?s2Ftfwd+Kn8W)ZaK=7@B ze-QaB<%1;2Agc`vbU8sJiN^R6qA<0Db-Yasj4@ZAFp~FOEV^ogXi(X6sbXXSqpi_F z+u&Z*JzXLzjiA@)yL6)>A(klZxU9=(dsc))8}(_QSst>S1@_WMfkBU0B%0wmhyuK+ zY}@AvcI>7ok)yB^2o5A)aBdswu;R&KjLwWl2@+8p@uaks2Us%MNSo-ot%wJ_u(Dhf zHyB-;l^VUCAW`NyELThxGAfIYXOkz2^{-jNZ{(v3%Q(xjeWMPi%hTzN$TBO$Q;e(1 zRvQUZ(VvHk7m&Lo1|%9x$lu1EyoNtHw`qW@aqz{9W1ri|$nI#g zdlT*eQFqK3on=b>FS^|k!=^^36bVq{JqzI;Ox?EUu1WQ)vdF`qX7iHTrwL_TSf4_n zk;%c#n2{^SziE2qgZ0dk`Ef2i!qkNYofekFciEejowRsMm*;D)yyKM@ODdEf7EREb z1xwC4=C$P3A}*D0bVDh>Z-)bL*3V>@k+uP9k|$&kLX4UWWM!`-LC7SZHM5;6zd+c7 zjSj(1`JS8=?Q_6Ku@NbatPm1$v4hMS?a*=(xalazsF_Ph{*5@utyxzN7A{xDpvT=D zXFZYY_BFR2*^SXj1d=sDn*{o6ZDqCb1^K!Ddd2{x63rpN%JNw{0S^d88{{u79$X5Z zeA#4OJOCuU=2Kha!%+&=_06{Ky3%gmFw)T$uVqo+KrMrzsh5Z^s<}_M`$|)aCpy$u zz3VzLS6!M_<{8tX;^I3028mY$zqW|S$I^`wh zs+EVbg`s>x(S zsywts_aThv+8ZC2@|>&4w<;Z~*+x8%(}poIYg3Ihs=SgakeaLvDDhQLQsQ1R#5o7i zJT*1`h|v$xw954BAoY+EW!h8;NGV|4vLil5f{gmmP>_vjVX5BUM{~L);hdSgYX?nm z8*Mw(%w>HJZk{BjjDak3rQFTyJiQ!zY{`~8_dkg+Z1FI$`UD1Ozh|!(S z$I}@$mhx#Oq~>YB*lkTlQ-cd|Zqq@R4vY#WipOt2OMqo`~Qq=l#y;b3is{`tK z=sV-e@~Zoi)d`b?PrH_HbtFo(fwZ?jOt|IX3oV)Oe7P)*xaeRH=ew}}6jtWiYz#Em zp=22QyAlUc1c$$h=XaQruw__p(RD#ptMw|E;A^ocCuG^Gm|8qCywFx}7UfA>j@cb( zrD|ZA6H1pecz8`Fq?U{i;(jmf9~XjZo@6xRCRASJ$*Cx6;BOrFNXwT{XgR27lxNa` z%i&frud2HNHf}V{yQJ$_96&*ea4AVU1edUQ69V=fECCGyHc9uiyAF?C?+Eh!= zCNs2k!SUqj{rGgXuuTMC!K;c^PPUO!KB&>j8RvxY!6Baz%N)9BqcG@;eB7dnrIjYu zia@@ZqHb$Z(1y0?aL^%|O|cgBoHs)v^^FhN-9g~@Mr>L#ZN|5glX-G$c4dg%wVSBN z*nz%hzVh0^(82+7=-0QeBT1iNOPFgP5yLL`Lv;J0&N^vVJGo3nRk)YjkGTm4GsC#O z(q7TDQTTnO6-R%91Cw^@`wL!eYeTjgZ~W`9;M*uFfTV(!JB>Mxo*#{}L*0_8kTx=} z(1=YJdrj1*`U=w4qMw z+}7iy1-FrtuSMp~z17%CAErMe?EsbDNB0ghVyB}F2F6;{>gbH)M5R`DYL85&RAxdp z28JQh?Lt@WMoJv@BuKzFM9@R1S(&<%@h>_Ky|qVCF%VOuaMU}atTPyfbK5Gc7yIHa z6{FK>e34gPIKEHn)>LZDWc-{mQAt@dh*Hbtg$_&10z`1yt(QnS;xyEjulI*ZZ$_Tn zcD#dF75=4YSZUh_IZFuR)bi|%+toTfmGPZIQzOnd8q8sup@C%oI6g7~y&nh=t1 z+a2va6u;Tt zy|g!UzqigK(67+VXwiraAKFPFa8F3asCMOpbxku?!>)_9Z#kcfiYMQ6Sx1!8e?oDt z^9N@a@K&NR&1vMbLn4x%EfP13uhuv$0}#CsE(}L>TR-5JZO$5CtlGIlwuYY9h(l$D zlW}r~{FfG>eH_-)2$!`krx5~iKsK~}=>rP+f)TEoqMH6?Hw4{AqWwkR9D%z{QOn{- z$4?C4>AIcFPexD-u3n8h#=9P1ge=ZpX;bTsQX;3qriLIhZ>y#pmFb!63S;wCw1lVc zn!|pW!sI5Z!Kb3ZRj4&0Un?Q$J(m;a$;)#T>B7t0}m zp=>LGlXj395Zn=Zye5{vsKmfN=I;z|j06?oiQ0fKsh>ZlN~834|1d3T1FgcEejMjx z@dbN5get=JP*n&^Rbq#Vv1&t}!_>FvPLZFG9dzB>Uh)13J`H5XIRffVjS)H@C1V`O zX8fw`9|i0)OROrYT@-i3cx*40?QnsnMH>!UfwwEf<4isq%b#OvTm0w6ZdJG6H(&YN z@1s=4+kT&pi`64z)j_1}*cT06BwJ1h;+#b`s`Ucg^(CQ9rFmY~AzE|Nt~QFv3pj8j zXNTKpgWsDS&5>)yrl06!#XiQtP7O~KEQFH?&@7D34I5@h1~Nv#7u$8v%4Eroy=r}< z-4N=Uv6{Kdf?0)RtxOlpsSacY!GMhid6{!Ns7xsH=%hD0h_N(e|J+BYAE(7$?4FqgZtD zG*_H_LTA6~X|9OVRx91;jE3`h5Ws|vy%yU(WImnIuuff|o4Q;JY68phxibuA%axIN zZo)h-7U6mWyj4-LXZ@9|P;T|Ecv{NxE*tlu6q3ZflQR^Pc+S*h0kEH-cU|Ik2R9q) zseV!2M*Le>YJ6W-bRo=fRMV(87?-m%)MHW8k&Xcpo@04aHE z;8H=6ML#jQ05%uJx`L$MvRy;yy#<{$7+^Fpc+90L1CDW=c)f(zg-%O!a=EiOK4*V- zyMwdtEnG)$$qp-j%v1@kC?(B8y-Jgz>w^1RBw>(rS8=7P>DK-rJx4)>daR3!H!%gu zSsNh)%BU|%Kqb@MN|kv28|LKV(Q_$oUvF-2L0hRCqWZ&fG*)jse|&f<-e!@OoMmNE zmf1n=IC6yM$+REOCxtRE}BK1ZgkG9A#x(v0GbVCm60dV6TWel=2lO$fT-m% z4v_n5-MOwrnuwhMl-Okx+Vugg8=zU%0k&Cn{^Vx01I&EocYtBK00)>Ut7B1T@(z1* zRLo?D9ySOS0K;#SgzO-3bG3+@l7a}C@UUW>Pnfl&(Nx=xsE8EYH=>~6&@(=c=?N6r z)4?o7Hf+JgA*!GBZ_PV7i&0D~D%x9F1UZsJyzU`DW{d>7)>$GekrWNBiiP4PPcQ2A zQG0=2)FKo;ZnYp`N9*=vQ-y4BKL2x54Wwzr^wg#unx&jM~pDvfXp zk;xT)r<_R80fSZr)T(Q$V^vV%k4>x@>5uV=#hmqZ~+mFR^zA;q9Qp}caTis@N(n8 zQ9V5K(b5zuj4EK{4e{$u^_q?BpPYI@J)~S-PA~2*s$n8?*p=OlxI+ ze}uLv_)KT~d91(D$y&2%Yh^r1la(tE?R+J54p=!FpR7;;YHI~P0^H1>>5RXTLbbvl zF}|B6!yerrzfFf}YsH+Qy@Fp?jwbjsP0sMYN#9$UO~}vKT1k%M!&z^X#K#AiME}C< z{eH|bK$Lt@&BV%dme73g?9Y1ld2aokrqthQT3QUAU>4pR4f>dFlF~XbVxEfLv#pi> zWJKAb7)$m~j&MJ<q{)&Bl0In{3{bwqy$B=uJsM(k?Vx|p9Yz%-wPf1fPV zy<2|K&o6WyL)Y3rUxGhA4gd81pO;_s^OAire*bgM-|P7MQ}Ivle@1@M&%gPZd;?uW zKcCJY**p>c{=a|vrxU&Z!I$y{`uPnn;lJJcWt!Gk;TL*@-hcLa{EdFDemVdB=kY=B zuFrpvhg(9P_rLoJf1#hhb0!zchYP>!82?ji{onT!e4&1R2ThJ2UB5m@KOe() ze`4ML;0Mg*E&aSJ52W?eKV3gx)oLB#7upk@@9d}f8~yyydw2qUzTU6z|G3=$5Z~5% z?SN;}&)v`BFX;Zy$3OS}Z^jrsf!1bzo|Kiyr);Nwo9(*}fb$gYe?acj&yVQ?<%S2G z&u@3`Z+(|MUp}4Nv-Ruv-*fJNK&ckch;&w9D}NS6%zymzpp1^N)x@SU)cP{xH6?@4x)p ze7}Ca?|ZD9@vFj3NB^~R|HslVGhgZFEHVK zVO&i(ruSc#`}OmfUe5Z_`}FfS@!eP|T0h@!-Aljfefs&k7^6q%{dYf; z@7K@o|6;l;|GAX^YJU48{7|1+=R`}OlXoek6H>*q_I`=4Cn`}OmqpHH*rKbP`f&2OKJuW2rQ|GT&T zh8fq-r#Ff>Tq^$A`m6H%RcBo9f7BV*)#&G6m;0T|TK`Jcamo2Y@6+L^;{IQi`+x5< z^h*A8@BiwG`+xC`42QV_My7G^zg}_wtG|iwe?#%{X0Pp`H^}`i{N5q=4~aOeAAP=l z?pNIZ$0ONCy-?=RKkZKkMa`9WCw%|$5Acnzz{iJhlj5Dqua{ul0g<`kmtMkx|NV(R aiRUT))oOiC#r;oCzQWuvb8c|{Z2f;c_lB(i From ffb4c3d6224451290818cdb2d31f89c733601c14 Mon Sep 17 00:00:00 2001 From: Liangyx2 Date: Fri, 21 Jun 2024 16:29:54 +0800 Subject: [PATCH 19/21] fix codescan (#1632) Signed-off-by: Liangyx2 --- .pre-commit-config.yaml | 3 +- ...le Processors Product Specifications.html" | 17164 ---------------- 2 files changed, 1 insertion(+), 17166 deletions(-) delete mode 100644 "intel_extension_for_transformers/neural_chat/assets/docs/4th Generation Intel\302\256 Xeon\302\256 Scalable Processors Product Specifications.html" diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 24ce3d4fe3f..da91235b092 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -57,8 +57,7 @@ repos: examples/huggingface/pytorch/text-generation/inference/prompt.json| intel_extension_for_transformers/transformers/runtime/oneDNN-THIRD-PARTY-PROGRAMS| docker/intel-tensorflow-third-party-programs.txt| - .github/workflows/sample_data/alpaca_data_sample_45.json| - intel_extension_for_transformers/neural_chat/assets/docs/4th\ Generation\ Intel®\ Xeon®\ Scalable\ Processors\ Product\ Specifications.html + .github/workflows/sample_data/alpaca_data_sample_45.json )$ - repo: https://github.com/Lucas-C/pre-commit-hooks diff --git "a/intel_extension_for_transformers/neural_chat/assets/docs/4th Generation Intel\302\256 Xeon\302\256 Scalable Processors Product Specifications.html" "b/intel_extension_for_transformers/neural_chat/assets/docs/4th Generation Intel\302\256 Xeon\302\256 Scalable Processors Product Specifications.html" deleted file mode 100644 index 54b492697fa..00000000000 --- "a/intel_extension_for_transformers/neural_chat/assets/docs/4th Generation Intel\302\256 Xeon\302\256 Scalable Processors Product Specifications.html" +++ /dev/null @@ -1,17164 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - -4th Generation Intel® Xeon® Scalable Processors Product Specifications - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

- -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- - - - - - - - - - - - - - - - - - -
- - - - - -
-
-
-
-
- -
-
-
-
-
- - - - - -
- - -
- - - - - - - -
-
-
-
-
-
-
-
-
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
- -
-
-
-
- - -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
-

4th Generation Intel® Xeon® Scalable Processors

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- Filter: - - View All - - | Embedded - - | Retail Box - | Server - -
- - -
-
-
-
-
- -
- 54 - Products - - - - COMPARE ALL - - - COMPARE NONE - - -
-
-
-
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
- Product Name -
-
-
-
-
- Launch Date -
-
-
-
-
- Total Cores -
-
-
-
-
- Max Turbo Frequency -
-
-
-
-
- Processor Base Frequency -
-
-
-
-
- Cache -
-
-
-
-
- TDP -
-
-
- - - - - - - - Q3'23 - - - - - - - - - - 24 - - - - - - - - - - 3.60 GHz - - - - - - - - - - 1.90 GHz - - - - - - - - - - 45 MB - - - - - - - - - - 185 W - - - -
- - - - - - - - Q3'23 - - - - - - - - - - 28 - - - - - - - - - - 3.60 GHz - - - - - - - - - - 2.00 GHz - - - - - - - - - - 52.5 MB - - - - - - - - - - 195 W - - - -
- - - - - - - - Q3'23 - - - - - - - - - - 32 - - - - - - - - - - 3.60 GHz - - - - - - - - - - 2.00 GHz - - - - - - - - - - 60 MB - - - - - - - - - - 205 W - - - -
- - - - - - - - Q3'23 - - - - - - - - - - 32 - - - - - - - - - - 3.60 GHz - - - - - - - - - - 2.00 GHz - - - - - - - - - - 60 MB - - - - - - - - - - 205 W - - - -
- - - - - - - - Q3'23 - - - - - - - - - - 32 - - - - - - - - - - 3.60 GHz - - - - - - - - - - 2.00 GHz - - - - - - - - - - 60 MB - - - - - - - - - - 195 W - - - -
- - - - - - - - Q1'23 - - - - - - - - - - 16 - - - - - - - - - - 4.00 GHz - - - - - - - - - - 2.90 GHz - - - - - - - - - - 45 MB - - - - - - - - - - 270 W - - - -
- - - - - - - - Q1'23 - - - - - - - - - - 28 - - - - - - - - - - 3.50 GHz - - - - - - - - - - 2.00 GHz - - - - - - - - - - 75 MB undefined - - - - - - - - - - 250 W - - - -
- - - - - - - - Q1'23 - - - - - - - - - - 36 - - - - - - - - - - 3.20 GHz - - - - - - - - - - 2.00 GHz - - - - - - - - - - 67.5 MB - - - - - - - - - - 300 W - - - -
- - - - - - - - Q1'23 - - - - - - - - - - 32 - - - - - - - - - - 3.40 GHz - - - - - - - - - - 2.10 GHz - - - - - - - - - - 82.5 MB - - - - - - - - - - 270 W - - - -
- - - - - - - - Q1'23 - - - - - - - - - - 44 - - - - - - - - - - 3.80 GHz - - - - - - - - - - 2.70 GHz - - - - - - - - - - 82.5 MB - - - - - - - - - - 350 W - - - -
- - - - - - - - Q1'23 - - - - - - - - - - 40 - - - - - - - - - - 3.80 GHz - - - - - - - - - - 2.20 GHz - - - - - - - - - - 105 MB - - - - - - - - - - 330 W - - - -
- - - - - - - - Q1'23 - - - - - - - - - - 40 - - - - - - - - - - 3.70 GHz - - - - - - - - - - 2.00 GHz - - - - - - - - - - 105 MB - - - - - - - - - - 300 W - - - -
- - - - - - - - Q1'23 - - - - - - - - - - 48 - - - - - - - - - - 3.70 GHz - - - - - - - - - - 2.20 GHz - - - - - - - - - - 97.5 MB - - - - - - - - - - 300 W - - - -
- - - - - - - - Q1'23 - - - - - - - - - - 32 - - - - - - - - - - 4.10 GHz - - - - - - - - - - 2.80 GHz - - - - - - - - - - 60 MB - - - - - - - - - - 300 W - - - -
- - - - - - - - Q1'23 - - - - - - - - - - 48 - - - - - - - - - - 3.80 GHz - - - - - - - - - - 2.10 GHz - - - - - - - - - - 105 MB - - - - - - - - - - 350 W - - - -
- - - - - - - - Q1'23 - - - - - - - - - - 48 - - - - - - - - - - 3.80 GHz - - - - - - - - - - 2.10 GHz - - - - - - - - - - 105 MB - - - - - - - - - - 330 W - - - -
- - - - - - - - Q1'23 - - - - - - - - - - 48 - - - - - - - - - - 3.80 GHz - - - - - - - - - - 2.40 GHz - - - - - - - - - - 97.5 MB - - - - - - - - - - 330 W - - - -
- - - - - - - - Q1'23 - - - - - - - - - - 52 - - - - - - - - - - 3.80 GHz - - - - - - - - - - 2.00 GHz - - - - - - - - - - 105 MB - - - - - - - - - - 350 W - - - -
- - - - - - - - Q1'23 - - - - - - - - - - 52 - - - - - - - - - - 3.60 GHz - - - - - - - - - - 1.70 GHz - - - - - - - - - - 97.5 MB - - - - - - - - - - 300 W - - - -
- - - - - - - - Q1'23 - - - - - - - - - - 52 - - - - - - - - - - 3.80 GHz - - - - - - - - - - 2.10 GHz - - - - - - - - - - 105 MB - - - - - - - - - - 350 W - - - -
- - - - - - - - Q1'23 - - - - - - - - - - 52 - - - - - - - - - - 3.60 GHz - - - - - - - - - - 1.80 GHz - - - - - - - - - - 97.5 MB - - - - - - - - - - 300 W - - - -
- - - - - - - - Q1'23 - - - - - - - - - - 56 - - - - - - - - - - 3.80 GHz - - - - - - - - - - 2.00 GHz - - - - - - - - - - 105 MB - - - - - - - - - - 350 W - - - -
- - - - - - - - Q1'23 - - - - - - - - - - 60 - - - - - - - - - - 3.50 GHz - - - - - - - - - - 1.90 GHz - - - - - - - - - - 112.5 MB - - - - - - - - - - 350 W - - - -
- - - - - - - - Q1'23 - - - - - - - - - - 24 - - - - - - - - - - 3.90 GHz - - - - - - - - - - 1.90 GHz - - - - - - - - - - 45 MB - - - - - - - - - - 165 W - - - -
- - - - - - - - Q1'23 - - - - - - - - - - 24 - - - - - - - - - - 3.90 GHz - - - - - - - - - - 2.10 GHz - - - - - - - - - - 45 MB - - - - - - - - - - 185 W - - - -
- - - - - - - - Q1'23 - - - - - - - - - - 8 - - - - - - - - - - 4.10 GHz - - - - - - - - - - 2.90 GHz - - - - - - - - - - 22.5 MB - - - - - - - - - - 150 W - - - -
- - - - - - - - Q1'23 - - - - - - - - - - 16 - - - - - - - - - - 4.00 GHz - - - - - - - - - - 2.00 GHz - - - - - - - - - - 30 MB - - - - - - - - - - 150 W - - - -
- - - - - - - - Q1'23 - - - - - - - - - - 24 - - - - - - - - - - 3.80 GHz - - - - - - - - - - 1.80 GHz - - - - - - - - - - 45 MB - - - - - - - - - - 165 W - - - -
- - - - - - - - Q1'23 - - - - - - - - - - 24 - - - - - - - - - - 3.80 GHz - - - - - - - - - - 2.00 GHz - - - - - - - - - - 45 MB - - - - - - - - - - 185 W - - - -
- - - - - - - - Q1'23 - - - - - - - - - - 28 - - - - - - - - - - 4.10 GHz - - - - - - - - - - 2.00 GHz - - - - - - - - - - 52.5 MB - - - - - - - - - - 205 W - - - -
- - - - - - - - Q1'23 - - - - - - - - - - 20 - - - - - - - - - - 4.00 GHz - - - - - - - - - - 2.10 GHz - - - - - - - - - - 37.5 MB - - - - - - - - - - 145 W - - - -
- - - - - - - - Q1'23 - - - - - - - - - - 20 - - - - - - - - - - 4.10 GHz - - - - - - - - - - 2.30 GHz - - - - - - - - - - 37.5 MB - - - - - - - - - - 160 W - - - -
- - - - - - - - Q1'23 - - - - - - - - - - 32 - - - - - - - - - - 3.40 GHz - - - - - - - - - - 2.00 GHz - - - - - - - - - - 60 MB - - - - - - - - - - 250 W - - - -
- - - - - - - - Q1'23 - - - - - - - - - - 18 - - - - - - - - - - 4.20 GHz - - - - - - - - - - 2.20 GHz - - - - - - - - - - 45 MB - - - - - - - - - - 165 W - - - -
- - - - - - - - Q1'23 - - - - - - - - - - 24 - - - - - - - - - - 4.00 GHz - - - - - - - - - - 2.10 GHz - - - - - - - - - - 60 MB - - - - - - - - - - 185 W - - - -
- - - - - - - - Q1'23 - - - - - - - - - - 32 - - - - - - - - - - 3.60 GHz - - - - - - - - - - 1.80 GHz - - - - - - - - - - 60 MB - - - - - - - - - - 185 W - - - -
- - - - - - - - Q1'23 - - - - - - - - - - 16 - - - - - - - - - - 4.10 GHz - - - - - - - - - - 2.50 GHz - - - - - - - - - - 37.5 MB - - - - - - - - - - 185 W - - - -
- - - - - - - - Q1'23 - - - - - - - - - - 32 - - - - - - - - - - 3.80 GHz - - - - - - - - - - 1.80 GHz - - - - - - - - - - 60 MB - - - - - - - - - - 185 W - - - -
- - - - - - - - Q1'23 - - - - - - - - - - 32 - - - - - - - - - - 3.40 GHz - - - - - - - - - - 2.10 GHz - - - - - - - - - - 60 MB - - - - - - - - - - 270 W - - - -
- - - - - - - - Q1'23 - - - - - - - - - - 8 - - - - - - - - - - 4.10 GHz - - - - - - - - - - 3.70 GHz - - - - - - - - - - 22.5 MB - - - - - - - - - - 195 W - - - -
- - - - - - - - Q1'23 - - - - - - - - - - 8 - - - - - - - - - - 4.10 GHz - - - - - - - - - - 3.70 GHz - - - - - - - - - - 22.5 MB - - - - - - - - - - 195 W - - - -
- - - - - - - - Q1'23 - - - - - - - - - - 32 - - - - - - - - - - 3.90 GHz - - - - - - - - - - 2.20 GHz - - - - - - - - - - 60 MB - - - - - - - - - - 205 W - - - -
- - - - - - - - Q1'23 - - - - - - - - - - 32 - - - - - - - - - - 3.60 GHz - - - - - - - - - - 2.00 GHz - - - - - - - - - - 60 MB - - - - - - - - - - 205 W - - - -
- - - - - - - - Q1'23 - - - - - - - - - - 32 - - - - - - - - - - 4.00 GHz - - - - - - - - - - 2.00 GHz - - - - - - - - - - 60 MB - - - - - - - - - - 205 W - - - -
- - - - - - - - Q1'23 - - - - - - - - - - 24 - - - - - - - - - - 4.00 GHz - - - - - - - - - - 2.60 GHz - - - - - - - - - - 60 MB - - - - - - - - - - 225 W - - - -
- - - - - - - - Q1'23 - - - - - - - - - - 16 - - - - - - - - - - 4.00 GHz - - - - - - - - - - 3.60 GHz - - - - - - - - - - 45 MB - - - - - - - - - - 270 W - - - -
- - - - - - - - Q1'23 - - - - - - - - - - 32 - - - - - - - - - - 4.10 GHz - - - - - - - - - - 2.40 GHz - - - - - - - - - - 60 MB - - - - - - - - - - 250 W - - - -
- - - - - - - - Q1'23 - - - - - - - - - - 32 - - - - - - - - - - 4.10 GHz - - - - - - - - - - 2.10 GHz - - - - - - - - - - 60 MB - - - - - - - - - - 225 W - - - -
- - - - - - - - Q1'23 - - - - - - - - - - 32 - - - - - - - - - - 3.40 GHz - - - - - - - - - - 2.20 GHz - - - - - - - - - - 60 MB - - - - - - - - - - 270 W - - - -
- - - - - - - - Q1'23 - - - - - - - - - - 32 - - - - - - - - - - 4.00 GHz - - - - - - - - - - 3.10 GHz - - - - - - - - - - 60 MB - - - - - - - - - - 350 W - - - -
- - - - - - - - Q1'23 - - - - - - - - - - 10 - - - - - - - - - - 4.00 GHz - - - - - - - - - - 2.70 GHz - - - - - - - - - - 26.25 MB - - - - - - - - - - 150 W - - - -
- - - - - - - - Q1'23 - - - - - - - - - - 12 - - - - - - - - - - 3.90 GHz - - - - - - - - - - 2.00 GHz - - - - - - - - - - 30 MB - - - - - - - - - - 150 W - - - -
- - - - - - - - Q1'23 - - - - - - - - - - 20 - - - - - - - - - - 3.90 GHz - - - - - - - - - - 2.00 GHz - - - - - - - - - - 37.5 MB - - - - - - - - - - 165 W - - - -
- - - - - - - - Q1'23 - - - - - - - - - - 8 - - - - - - - - - - 1.90 GHz - - - - - - - - - - 1.80 GHz - - - - - - - - - - 22.5 MB - - - - - - - - - - 125 W - - - -
- -
-
-
-
-
-
- - -
-
-
-
-
-
-

Advanced Search

-

Use this tool to filter Intel® processors by socket, number of cores, cache size, maximum memory, and more

-
- -
-
-
-
-
-
-
-
- -
-
-
-
-
-
-
-
-
- - - - - -
- - - - - - - - - - - - - - - - - - - - -
- - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- - - - -
- - - - - - - - - - - - - - - - - - - - - - - -
- -
- - - - - -
- - -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
\ No newline at end of file From b7cbdd8d81ee603fbb6f04851d2d71a4ad6bb452 Mon Sep 17 00:00:00 2001 From: "Wang, Chang" Date: Mon, 24 Jun 2024 10:08:46 +0800 Subject: [PATCH 20/21] Migrate trainer INC 1.x API to 2.x (#1605) * migrate INC 1.x quantization api to 2.x Signed-off-by: changwangss * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * add examples Signed-off-by: changwangss --------- Signed-off-by: changwangss Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- docs/api_doc/optimization/optimizer.rst | 7 - docs/devcatalog.md | 7 +- docs/distillation.md | 53 +- docs/examples.md | 10 +- docs/export.md | 6 +- docs/get_started.md | 16 +- docs/pruning.md | 63 +- docs/quantization.md | 54 +- .../language-modeling/bert-base-uncased.ipynb | 16 +- .../bert-base-uncased_SWAG.ipynb | 23 +- .../Dynamic_MiniLM_SQuAD.ipynb | 10 +- .../bert-base-uncased_distilled-squad.ipynb | 19 +- ...d-whole-word-masking-finetuned-squad.ipynb | 19 +- .../orchestrate_optimizations.ipynb | 45 +- .../orchestrate_optimizations_bert_mini.ipynb | 26 +- .../pytorch/question-answering/pruning.ipynb | 16 +- .../summarization/pegasus-samsum.ipynb | 16 +- .../bert-base-uncased-MRPC.ipynb | 27 +- .../orchestrate_optimizations.ipynb | 46 +- .../orchestrate_optimizations_bert_mini.ipynb | 30 +- .../pytorch/text-classification/pruning.ipynb | 17 +- .../distilbert_base_ner.ipynb | 14 +- .../pytorch/translation/t5-small.ipynb | 10 +- .../onnxruntime/optimization_README.md | 2 +- .../speech-recognition/quantization/README.md | 4 +- .../quantization/run_tuning.sh | 2 +- .../imagenet/vit/model_quant_convert.py | 54 +- .../deployment/imagenet/vit/run_vit.sh | 2 +- .../deployment/imagenet/vit/vit.yaml | 28 - .../quantization/README.md | 6 +- .../quantization/conf.yaml | 18 - .../quantization/run_image_classification.py | 72 +- .../quantization/run_tuning.sh | 6 +- .../quantization/vit_config.yaml | 30 - .../language-modeling/quantization/README.md | 13 +- .../language-modeling/quantization/run_clm.py | 54 +- .../language-modeling/quantization/run_mlm.py | 48 +- .../language-modeling/quantization/run_plm.py | 52 +- .../quantization/run_tuning.sh | 41 +- .../multiple-choice/quantization/README.md | 8 +- .../multiple-choice/quantization/run_swag.py | 54 +- .../quantization/run_tuning.sh | 14 +- .../pytorch/optimization_README.md | 2 +- .../question-answering/dynamic/README.md | 2 +- .../question-answering/dynamic/run_qa.py | 42 +- .../orchestrate_optimizations/run_qa.py | 45 +- .../pruning/group_lasso/README.md | 68 - .../pruning/group_lasso/bert_config.json | 13 - .../pruning/group_lasso/extract_features.py | 298 --- .../pruning/group_lasso/file_utils.py | 263 -- .../pruning/group_lasso/modeling.py | 1285 ---------- .../pruning/group_lasso/optimization.py | 174 -- .../pruning/group_lasso/requirements.txt | 18 - .../pruning/group_lasso/run_squad_sparse.py | 1285 ---------- .../pruning/group_lasso/schedulers.py | 131 - .../group_lasso/scripts/run_squad_sparse.sh | 117 - .../pruning/group_lasso/tokenization.py | 392 --- .../pruning/group_lasso/utils.py | 65 - .../pruning/longformer_triviaqa/README.md | 52 - .../modeling_longformer.py | 2282 ----------------- .../longformer_triviaqa/requirements.txt | 5 - .../longformer_triviaqa/run_qa_no_trainer.py | 1305 ---------- .../scripts/download_data_and_convert.sh | 19 - .../scripts/longformer_base_dense_fintune.sh | 23 - ...ngformer_base_sparse_global_4x1_pruning.sh | 32 - .../pruning/longformer_triviaqa/squad.py | 144 -- .../pruning/longformer_triviaqa/trainer_qa.py | 150 -- .../longformer_triviaqa/utils/__init__.py | 0 .../utils/convert_to_squad_format.py | 143 -- .../utils/dataset_utils.py | 75 - .../longformer_triviaqa/utils/file_utils.py | 38 - .../pruning/longformer_triviaqa/utils_qa.py | 451 ---- .../{basic_magnitude => magnitude}/README.md | 0 .../requirements.txt | 0 .../run_benchmark.sh | 0 .../{basic_magnitude => magnitude}/run_qa.py | 20 +- .../run_tuning.sh | 0 .../trainer_qa.py | 0 .../utils_qa.py | 0 .../question-answering/quantization/README.md | 13 +- .../question-answering/quantization/run_qa.py | 78 +- .../quantization/run_tuning.sh | 23 +- .../summarization/quantization/README.md | 8 +- .../quantization/run_benchmark.sh | 4 +- .../quantization/run_summarization.py | 59 +- .../summarization/quantization/run_tuning.sh | 12 +- .../distillation_for_quantization/run_glue.py | 30 +- .../orchestrate_optimizations/README.md | 4 +- .../orchestrate_optimizations/run_glue.py | 45 +- .../text-classification/pruning/run_glue.py | 20 +- .../quantization/README.md | 17 +- .../quantization/ptq/run_tuning.sh | 44 +- .../quantization/qat/run_tuning.sh | 7 +- .../quantization/run_glue.py | 49 +- .../quantization/run_glue_no_trainer.py | 563 ---- .../quantization/run_tuning.sh | 2 +- .../text-to-image/quantization/ptq/README.md | 2 +- .../quantization/ptq/run_diffusion.py | 40 +- .../quantization/ptq/run_tuning.sh | 6 +- .../text2text-generation/run_tuning.sh | 2 +- .../textual_inversion.py | 30 +- .../quantization/README.md | 6 +- .../quantization/run_ner.py | 66 +- .../quantization/run_tuning.sh | 11 +- .../translation/quantization/README.md | 10 +- .../quantization/run_translation.py | 64 +- .../translation/quantization/run_tuning.sh | 6 +- .../language-modeling/quantization/README.md | 63 - .../quantization/ptq/requirements.txt | 7 - .../quantization/ptq/run_benchmark.sh | 119 - .../quantization/ptq/run_tuning.sh | 115 - .../language-modeling/quantization/run_clm.py | 814 ------ .../language-modeling/quantization/run_mlm.py | 848 ------ .../multiple-choice/quantization/README.md | 34 - .../quantization/requirements.txt | 6 - .../quantization/run_benchmark.sh | 101 - .../multiple-choice/quantization/run_swag.py | 653 ----- .../quantization/run_tuning.sh | 91 - .../text-classification/pruning/README.md | 86 - .../pruning/requirements.txt | 6 - .../pruning/run_benchmark.sh | 90 - .../text-classification/pruning/run_glue.py | 689 ----- .../text-classification/pruning/run_tuning.sh | 88 - .../quantization/README.md | 132 - .../quantization/ptq/requirements.txt | 7 - .../quantization/ptq/run_benchmark.sh | 131 - .../quantization/ptq/run_tuning.sh | 115 - .../quantization/run_glue.py | 731 ------ .../quantization/README.md | 35 - .../quantization/requirements.txt | 7 - .../quantization/run_benchmark.sh | 129 - .../quantization/run_ner.py | 696 ----- .../quantization/run_tuning.sh | 104 - .../transformers/__init__.py | 14 +- .../transformers/config.py | 656 +---- .../transformers/distillation.py | 50 - .../transformers/optimizer.py | 466 ---- .../transformers/optimizer_tf.py | 733 ------ .../transformers/pruning.py | 73 - .../transformers/quantization.py | 10 - .../transformers/trainer.py | 400 +-- tests/CI/test_config.py | 240 -- tests/CI/test_quantization.py | 175 +- tests/CI/test_quantization_qa_ipex.py | 4 +- tests/Nightly/test_distillation.py | 85 +- .../Nightly/test_orchestrate_optimization.py | 56 +- tests/Nightly/test_pruning.py | 73 +- tests/Nightly/test_tf_distillation.py | 134 - tests/Nightly/test_tf_pruning.py | 147 -- tests/Nightly/test_tf_quantization.py | 132 - .../config/README.md | 4 +- .../config/config.yaml | 2 +- .../config/distillation_with_qat.yaml | 2 +- .../config/qat.yaml | 2 +- .../config/sat.yaml | 2 +- .../src/itrex_opt.py | 79 +- .../compression_aware_training/src/utils.py | 8 +- workflows/dlsa/run_dlsa.py | 14 +- .../src/finetune_itrex.py | 3 - .../src/infer_itrex.py | 14 +- 160 files changed, 1254 insertions(+), 19439 deletions(-) delete mode 100644 docs/api_doc/optimization/optimizer.rst delete mode 100644 examples/huggingface/pytorch/image-classification/deployment/imagenet/vit/vit.yaml delete mode 100644 examples/huggingface/pytorch/image-classification/quantization/conf.yaml delete mode 100644 examples/huggingface/pytorch/image-classification/quantization/vit_config.yaml delete mode 100644 examples/huggingface/pytorch/question-answering/pruning/group_lasso/README.md delete mode 100644 examples/huggingface/pytorch/question-answering/pruning/group_lasso/bert_config.json delete mode 100644 examples/huggingface/pytorch/question-answering/pruning/group_lasso/extract_features.py delete mode 100644 examples/huggingface/pytorch/question-answering/pruning/group_lasso/file_utils.py delete mode 100644 examples/huggingface/pytorch/question-answering/pruning/group_lasso/modeling.py delete mode 100644 examples/huggingface/pytorch/question-answering/pruning/group_lasso/optimization.py delete mode 100644 examples/huggingface/pytorch/question-answering/pruning/group_lasso/requirements.txt delete mode 100644 examples/huggingface/pytorch/question-answering/pruning/group_lasso/run_squad_sparse.py delete mode 100644 examples/huggingface/pytorch/question-answering/pruning/group_lasso/schedulers.py delete mode 100644 examples/huggingface/pytorch/question-answering/pruning/group_lasso/scripts/run_squad_sparse.sh delete mode 100644 examples/huggingface/pytorch/question-answering/pruning/group_lasso/tokenization.py delete mode 100644 examples/huggingface/pytorch/question-answering/pruning/group_lasso/utils.py delete mode 100644 examples/huggingface/pytorch/question-answering/pruning/longformer_triviaqa/README.md delete mode 100644 examples/huggingface/pytorch/question-answering/pruning/longformer_triviaqa/modeling_longformer.py delete mode 100644 examples/huggingface/pytorch/question-answering/pruning/longformer_triviaqa/requirements.txt delete mode 100644 examples/huggingface/pytorch/question-answering/pruning/longformer_triviaqa/run_qa_no_trainer.py delete mode 100644 examples/huggingface/pytorch/question-answering/pruning/longformer_triviaqa/scripts/download_data_and_convert.sh delete mode 100644 examples/huggingface/pytorch/question-answering/pruning/longformer_triviaqa/scripts/longformer_base_dense_fintune.sh delete mode 100644 examples/huggingface/pytorch/question-answering/pruning/longformer_triviaqa/scripts/longformer_base_sparse_global_4x1_pruning.sh delete mode 100644 examples/huggingface/pytorch/question-answering/pruning/longformer_triviaqa/squad.py delete mode 100644 examples/huggingface/pytorch/question-answering/pruning/longformer_triviaqa/trainer_qa.py delete mode 100644 examples/huggingface/pytorch/question-answering/pruning/longformer_triviaqa/utils/__init__.py delete mode 100644 examples/huggingface/pytorch/question-answering/pruning/longformer_triviaqa/utils/convert_to_squad_format.py delete mode 100644 examples/huggingface/pytorch/question-answering/pruning/longformer_triviaqa/utils/dataset_utils.py delete mode 100644 examples/huggingface/pytorch/question-answering/pruning/longformer_triviaqa/utils/file_utils.py delete mode 100644 examples/huggingface/pytorch/question-answering/pruning/longformer_triviaqa/utils_qa.py rename examples/huggingface/pytorch/question-answering/pruning/{basic_magnitude => magnitude}/README.md (100%) rename examples/huggingface/pytorch/question-answering/pruning/{basic_magnitude => magnitude}/requirements.txt (100%) rename examples/huggingface/pytorch/question-answering/pruning/{basic_magnitude => magnitude}/run_benchmark.sh (100%) rename examples/huggingface/pytorch/question-answering/pruning/{basic_magnitude => magnitude}/run_qa.py (97%) rename examples/huggingface/pytorch/question-answering/pruning/{basic_magnitude => magnitude}/run_tuning.sh (100%) rename examples/huggingface/pytorch/question-answering/pruning/{basic_magnitude => magnitude}/trainer_qa.py (100%) rename examples/huggingface/pytorch/question-answering/pruning/{basic_magnitude => magnitude}/utils_qa.py (100%) delete mode 100644 examples/huggingface/pytorch/text-classification/quantization/run_glue_no_trainer.py delete mode 100644 examples/huggingface/tensorflow/language-modeling/quantization/README.md delete mode 100644 examples/huggingface/tensorflow/language-modeling/quantization/ptq/requirements.txt delete mode 100644 examples/huggingface/tensorflow/language-modeling/quantization/ptq/run_benchmark.sh delete mode 100644 examples/huggingface/tensorflow/language-modeling/quantization/ptq/run_tuning.sh delete mode 100644 examples/huggingface/tensorflow/language-modeling/quantization/run_clm.py delete mode 100644 examples/huggingface/tensorflow/language-modeling/quantization/run_mlm.py delete mode 100644 examples/huggingface/tensorflow/multiple-choice/quantization/README.md delete mode 100644 examples/huggingface/tensorflow/multiple-choice/quantization/requirements.txt delete mode 100644 examples/huggingface/tensorflow/multiple-choice/quantization/run_benchmark.sh delete mode 100644 examples/huggingface/tensorflow/multiple-choice/quantization/run_swag.py delete mode 100644 examples/huggingface/tensorflow/multiple-choice/quantization/run_tuning.sh delete mode 100644 examples/huggingface/tensorflow/text-classification/pruning/README.md delete mode 100644 examples/huggingface/tensorflow/text-classification/pruning/requirements.txt delete mode 100644 examples/huggingface/tensorflow/text-classification/pruning/run_benchmark.sh delete mode 100644 examples/huggingface/tensorflow/text-classification/pruning/run_glue.py delete mode 100644 examples/huggingface/tensorflow/text-classification/pruning/run_tuning.sh delete mode 100644 examples/huggingface/tensorflow/text-classification/quantization/README.md delete mode 100644 examples/huggingface/tensorflow/text-classification/quantization/ptq/requirements.txt delete mode 100644 examples/huggingface/tensorflow/text-classification/quantization/ptq/run_benchmark.sh delete mode 100644 examples/huggingface/tensorflow/text-classification/quantization/ptq/run_tuning.sh delete mode 100644 examples/huggingface/tensorflow/text-classification/quantization/run_glue.py delete mode 100644 examples/huggingface/tensorflow/token-classification/quantization/README.md delete mode 100644 examples/huggingface/tensorflow/token-classification/quantization/requirements.txt delete mode 100644 examples/huggingface/tensorflow/token-classification/quantization/run_benchmark.sh delete mode 100644 examples/huggingface/tensorflow/token-classification/quantization/run_ner.py delete mode 100644 examples/huggingface/tensorflow/token-classification/quantization/run_tuning.sh delete mode 100644 intel_extension_for_transformers/transformers/distillation.py delete mode 100644 intel_extension_for_transformers/transformers/optimizer.py delete mode 100644 intel_extension_for_transformers/transformers/optimizer_tf.py delete mode 100644 intel_extension_for_transformers/transformers/pruning.py delete mode 100644 tests/CI/test_config.py delete mode 100644 tests/Nightly/test_tf_distillation.py delete mode 100644 tests/Nightly/test_tf_pruning.py delete mode 100644 tests/Nightly/test_tf_quantization.py diff --git a/docs/api_doc/optimization/optimizer.rst b/docs/api_doc/optimization/optimizer.rst deleted file mode 100644 index f4b31c471b9..00000000000 --- a/docs/api_doc/optimization/optimizer.rst +++ /dev/null @@ -1,7 +0,0 @@ -PyTorch Optimizer -============== - -.. autoapisummary:: - - intel_extension_for_transformers.transformers.optimizer.NoTrainerOptimizer - intel_extension_for_transformers.transformers.optimizer.Orchestrate_optimizer diff --git a/docs/devcatalog.md b/docs/devcatalog.md index ab826d913d1..30f371489b2 100644 --- a/docs/devcatalog.md +++ b/docs/devcatalog.md @@ -99,7 +99,8 @@ raw_datasets = raw_datasets.map(lambda e: tokenizer(e['sentence'], truncation=Tr Documentation for API usage can be found [here](https://github.com/intel/intel-extension-for-transformers/tree/main/docs) ```python -from intel_extension_for_transformers.transformers import QuantizationConfig, metrics, objectives +from intel_extension_for_transformers.transformers import metrics, objectives +from neural_compressor.config import PostTrainingQuantConfig from intel_extension_for_transformers.transformers.trainer import NLPTrainer # load config, model and metric config = AutoConfig.from_pretrained("distilbert-base-uncased-finetuned-sst-2-english",num_labels=2) @@ -120,7 +121,9 @@ trainer = NLPTrainer(model=model, tokenizer=tokenizer ) # model quantization using trainer -q_config = QuantizationConfig(metrics=[metrics.Metric(name="eval_accuracy")]) +tune_metric = metrics.Metric(name="eval_accuracy") +trainer.metrics = tune_metric +q_config = PostTrainingQuantConfig() model = trainer.quantize(quant_config=q_config) # test sentiment analysis with quantization diff --git a/docs/distillation.md b/docs/distillation.md index 871bcc73b47..b74c5349f15 100644 --- a/docs/distillation.md +++ b/docs/distillation.md @@ -49,39 +49,20 @@ Where $D$ is a distance measurement as before, $F_t^{n_i}$ the output feature of ## usage ### Pytorch Script: ```python -from intel_extension_for_transformers.transformers import metric, objectives, DistillationConfig, Criterion + from intel_extension_for_transformers.transformers.trainer import NLPTrainer +from neural_compressor.config import DistillationConfig # Replace transformers.Trainer with NLPTrainer # trainer = transformers.Trainer(......) trainer = NLPTrainer(......) metric = metrics.Metric(name="eval_accuracy") -d_conf = DistillationConfig(metrics=tune_metric) -model = trainer.distill( - distillation_config=d_conf, teacher_model=teacher_model -) +trainer.metrics = metric +d_conf = DistillationConfig(teacher_model=teacher_model, criterion=criterion) +model = trainer.distill(distillation_config=d_conf) ``` Please refer to [example](../examples/huggingface/pytorch/text-classification/distillation/run_glue.py) for the details. -### Tensorflow Script: -```python -from intel_extension_for_transformers.transformers import (DistillationConfig, metrics) -from intel_extension_for_transformers.transformers.distillation import Criterion - -optimizer = TFOptimization(...) -metric_ = metrics.Metric(name="eval_accuracy") -criterion = Criterion(name='KnowledgeLoss', - layer_mappings=[['classifier', 'classifier']], - loss_types=['CE', 'CE'], - loss_weight_ratio=[0.5, 0.5], - add_origin_loss=False) -distillation_conf = DistillationConfig(metrics=metric_, - criterion=criterion) -distilled_model = optimizer.distill( - distillation_config=distillation_conf, - teacher_model=teacher_model) -``` -Please refer to [example](../examples/huggingface/tensorflow/text-classification/distillation/run_glue.py) for the details. ### Create an Instance of Metric The Metric defines which metric will be used to measure the performance of tuned models. - example: @@ -94,19 +75,23 @@ The Metric defines which metric will be used to measure the performance of tuned ### Create an Instance of Criterion(Optional) The criterion used in training phase. -- arguments: +- KnowledgeDistillationLossConfig arguments: |Argument |Type |Description |Default value | |:----------|:----------|:-----------------------------------------------|:----------------| - |name |String|Name of criterion, like:"KnowledgeLoss", "IntermediateLayersLoss" |"KnowledgeLoss"| |temperature|Float |parameter for KnowledgeDistillationLoss |1.0 | |loss_types|List of string|Type of loss |['CE', 'CE'] | |loss_weight_ratio|List of float|weight ratio of loss |[0.5, 0.5] | + +- IntermediateLayersKnowledgeDistillationLossConfig arguments: + |Argument |Type |Description |Default value | + |:----------|:----------|:-----------------------------------------------|:----------------| + |loss_types|List of string|Type of loss |['CE', 'CE'] | + |loss_weight_ratio|List of float|weight ratio of loss |[0.5, 0.5] | |layer_mappings|List|parameter for IntermediateLayersLoss |[] | |add_origin_loss|bool|parameter for IntermediateLayersLoss |False | - - example: ```python - criterion = Criterion(name='KnowledgeLoss') + criterion = KnowledgeDistillationLossConfig() ``` ### Create an Instance of DistillationConfig @@ -115,20 +100,18 @@ The DistillationConfig contains all the information related to the model distill - arguments: |Argument |Type |Description |Default value | |:----------|:----------|:-----------------------------------------------|:----------------| - |framework |string |which framework you used |"pytorch" | - |criterion|Criterion |criterion of training |"KnowledgeLoss"| - |metrics |Metric |Used to evaluate accuracy of tuning model, no need for NoTrainerOptimizer|None | + |teacher_model |torch.nn.Module | teacher model object |None | + |criterion|Criterion |criterion of training |KnowledgeLoss object| + - example: ```python - d_conf = DistillationConfig(metrics=metric, criterion=criterion) + d_conf = DistillationConfig(teacher_model=teacher_model, criterion=criterion) ``` ### Distill with Trainer - Distill with Trainer NLPTrainer inherits from transformers.Trainer, so you can create a trainer as in examples of Transformers. Then you can distill model with trainer.distill function. ```python - model = trainer.distill( - distillation_config=d_conf, teacher_model=teacher_model - ) + model = trainer.distill(distillation_config=d_conf) ``` diff --git a/docs/examples.md b/docs/examples.md index b6fe8b0e6af..5e833e9bbca 100644 --- a/docs/examples.md +++ b/docs/examples.md @@ -37,8 +37,8 @@ Intel Extension for Transformers is a powerful toolkit with multiple model optim Model Task Dataset - PostTrainingDynamic - PostTrainingStatic + dynamic + static @@ -177,7 +177,7 @@ Intel Extension for Transformers is a powerful toolkit with multiple model optim Model Task Dataset - QuantizationAwareTraining + qat No Trainer quantization @@ -206,7 +206,7 @@ Intel Extension for Transformers is a powerful toolkit with multiple model optim Model Task Dataset - PostTrainingStatic + static @@ -232,7 +232,7 @@ Intel Extension for Transformers is a powerful toolkit with multiple model optim Model Task Dataset - PostTrainingStatic + static diff --git a/docs/export.md b/docs/export.md index c0fda81a54a..619402e3b14 100644 --- a/docs/export.md +++ b/docs/export.md @@ -22,9 +22,9 @@ We support exporting PyTorch models into ONNX models with our well-designed API | Input Model | Export FP32 | Export BF16 | Export INT8 | | --- | --- | --- | --- | | FP32 PyTorch Model | ✔ | ✔ | / | -| INT8 PyTorch Model
(PostTrainingDynamic) | / | / | ✔ | -| INT8 PyTorch Model
(PostTrainingStatic) | / | / | ✔ | -| INT8 PyTorch Model
(QuantizationAwareTraining) | / | / | ✔ | +| INT8 PyTorch Model
(dynamic) | / | / | ✔ | +| INT8 PyTorch Model
(static) | / | / | ✔ | +| INT8 PyTorch Model
(qat) | / | / | ✔ | ## Examples diff --git a/docs/get_started.md b/docs/get_started.md index 492b505eeb8..ea807226f03 100644 --- a/docs/get_started.md +++ b/docs/get_started.md @@ -13,7 +13,7 @@ ## Quantization ```python -from intel_extension_for_transformers.transformers import QuantizationConfig, metrics, objectives +from neural_compressor.config import PostTrainingQuantConfig from intel_extension_for_transformers.transformers.trainer import NLPTrainer config = AutoConfig.from_pretrained("distilbert-base-uncased-finetuned-sst-2-english",num_labels=2) @@ -27,7 +27,9 @@ trainer = NLPTrainer(model=model, eval_dataset=raw_datasets["validation"], tokenizer=tokenizer ) -q_config = QuantizationConfig(metrics=[metrics.Metric(name="eval_loss", greater_is_better=False)]) +quantization_config = PostTrainingQuantConfig( + approach="static", +) model = trainer.quantize(quant_config=q_config) input = tokenizer("I like Intel Extension for Transformers", return_tensors="pt") @@ -73,17 +75,17 @@ model = trainer.distill(distillation_config=d_conf, teacher_model=teacher_model) ## Quantized Length Adaptive Transformer Quantized Length Adaptive Transformer leverages sequence-length reduction and low-bit representation techniques to further enhance model inference performance, enabling adaptive sequence-length sizes to accommodate different computational budget requirements with an optimal accuracy efficiency tradeoff. ```python -from intel_extension_for_transformers.transformers import QuantizationConfig, DynamicLengthConfig, metric, objectives +from intel_extension_for_transformers.transformers import DynamicLengthConfig, metric, objectives +from neural_compressor.config import PostTrainingQuantConfig from intel_extension_for_transformers.transformers.trainer import NLPTrainer # Replace transformers.Trainer with NLPTrainer # trainer = transformers.Trainer(...) trainer = NLPTrainer(...) metric = metrics.Metric(name="eval_f1", is_relative=True, criterion=0.01) -q_config = QuantizationConfig( - approach="PostTrainingStatic", - metrics=[metric], - objectives=[objectives.performance] +trainer.metrics = metric +q_config = PostTrainingQuantConfig( + approach="static" ) # Apply the length config dynamic_length_config = DynamicLengthConfig(length_config=length_config) diff --git a/docs/pruning.md b/docs/pruning.md index 3e5b46ae136..f5909bb2008 100644 --- a/docs/pruning.md +++ b/docs/pruning.md @@ -7,32 +7,23 @@ Pruning ## Introduction Pruning is the process of removing redundant parameters of a network. The idea bears similarity to the ["optimal brain damage"](http://yann.lecun.com/exdb/publis/pdf/lecun-90b.pdf) hypothesis by Yann LeCun. There are two types of pruning: Unstructured and Structured. Unstructured pruning means finding and removing the less salient connection in the model, the place could be anywhere in the matrix. Structured pruning means deleting entire blocks, filters, or channels. -## Pruning types - -There are three pruning types in Intel® Extension for Transformers: - -- Magnitude (Unstructured) - - The algorithm prunes the weight by the lowest absolute value at each layer with a given sparsity target. - -- Group Lasso (Structured) - - The algorithm uses Group lasso regularization to prune entire rows, columns, or blocks of parameters that result in a smaller dense network. - -- Pattern Lock (Unstructured & Structured) - - The algorithm locks the sparsity pattern in fine tune phase by freezing those zero values of the weight tensor during the weight update of training. - ## Usage ### Script: ```python -from intel_extension_for_transformers.transformers import metrics, objectives, PrunerConfig, PruningConfig, +from intel_extension_for_transformers.transformers import metrics +from neural_compressor.config import WeightPruningConfig from intel_extension_for_transformers.transformers.trainer import NLPTrainer # Replace transformers.Trainer with NLPTrainer # trainer = transformers.Trainer(......) trainer = NLPTrainer(......) metric = metrics.Metric(name="eval_accuracy") -pruner_config = PrunerConfig(prune_type='BasicMagnitude', target_sparsity_ratio=0.9) -p_conf = PruningConfig(pruner_config=[pruner_config], metrics=metric) -model = trainer.prune(pruning_config=p_conf) +trainer.metrics = tune_metric +pruning_conf = WeightPruningConfig([{"start_step": 0, "end_step": 2}], + target_sparsity=0,9, + pruning_scope="local", + pruning_type="magnitude") +model = trainer.prune(pruning_config=pruning_conf) ``` Please refer to [example](../examples/huggingface/pytorch/text-classification/pruning) for the details. @@ -45,41 +36,27 @@ The Metric defines which metric will be used to measure the performance of tuned Please refer to [metrics document](metrics.md) for the details. -### Create list of an instance of PrunerConfig(Optional) -PrunerConfig defines which pruning algorithm to use and how to apply it during the training process. Intel® Extension for Transformers supports pruning types "BasicMagnitude", "PatternLock", and "GroupLasso". You can create different pruners for different layers. +### Create an instance of WeightPruningConfig +[WeightPruningConfig](neural-compressor_neural_compressor_config.py at master · intel_neural-compressor.html) defines which pruning algorithm to use and how to apply it during the training process. Intel® Extension for Transformers supports pruning types "magnitude", "pattern_lock", and "GroupLasso". You can create different pruners for different layers. - arguments: |Argument |Type |Description |Default value | |:----------|:----------|:-----------------------------------------------|:----------------| - |epoch_range|list of integer|Which epochs to pruning |[0, 4] | - |initial_sparsity_ratio|float |Initial sparsity goal |0.0 | - |target_sparsity_ratio|float |Target sparsity goal |0.97 | + |pruning_configs |list of dicts|Which epochs to pruning |[{}] | + |target_sparsity |float |Initial sparsity goal |0.90 | |update_frequency|integer|Frequency to updating sparsity |1 | - |prune_type|string|Pruning algorithm |'BasicMagnitude' | - |method|string|Pruning method |'per_tensor' | - |names|list of string|List of weight name to be pruned. If no weight is specified, all weights of the model will be pruned|[]| - |parameters|dict of string|The hyper-parameters for pruning, refer to [the link](https://github.com/intel/neural-compressor/blob/master/docs/source/pruning.md)|None| + |pruning_type |string|Pruning algorithm |'snip_momentum' | + -- example: - ```python - pruner_config = PrunerConfig(prune_type='BasicMagnitude', target_sparsity_ratio=0.9) - ``` - -### Create an instance of PruningConfig -The PruningConfig contains all the information related to the model pruning behavior. If you have created Metric and PrunerConfig instance, then you can create an instance of PruningConfig. Metric and pruner are optional. - -- arguments: - |Argument |Type |Description |Default value | - |:----------|:----------|:-----------------------------------------------|:----------------| - |framework |string |Which framework you used |"pytorch" | - |initial_sparsity_ratio|float |Initial sparsity goal, if pruner_config argument is defined, it didn't need |0.0| - |target_sparsity_ratio|float |Target sparsity goal, if pruner argument is defined, it didn't need |0.97| - |metrics |Metric |Used to evaluate accuracy of tuning model, no need for NoTrainerOptimizer|None | - |pruner_config |PrunerConfig |Defined pruning behavior, if it is None, then NLP will create a default a pruner with 'BasicMagnitude' pruning type |None | +The WeightPruningConfig contains all the information related to the model pruning behavior. If you have created Metric and WeightPruningConfig instance, then you can create an instance of WeightPruningConfig. Metric and pruner are optional. - example: ```python - pruning_conf = PruningConfig(pruner_config=[pruner_config], metrics=tune_metric) + from neural_compressor.config import WeightPruningConfig + pruning_conf = WeightPruningConfig([{"start_step": 0, "end_step": 2}], + target_sparsity=0,9, + pruning_scope="local", + pruning_type="magnitude") ``` ### Prune with Trainer diff --git a/docs/quantization.md b/docs/quantization.md index 7d4ff061503..93e621a0db9 100644 --- a/docs/quantization.md +++ b/docs/quantization.md @@ -134,33 +134,18 @@ Quantization methods include the following three types: ## Get Started ### Script: ```python -from intel_extension_for_transformers.transformers import metric, objectives, QuantizationConfig +from neural_compressor.config import PostTrainingQuantConfig from intel_extension_for_transformers.transformers.trainer import NLPTrainer # Replace transformers.Trainer with NLPTrainer # trainer = transformers.Trainer(......) trainer = NLPTrainer(......) -metric = metrics.Metric( - name="eval_f1", is_relative=True, criterion=0.01 -) -objective = objectives.performance -q_config = QuantizationConfig( - approach="PostTrainingStatic", - metrics=[metric], - objectives=[objective] +q_config = PostTrainingQuantConfig( + approach="static" ) model = trainer.quantize(quant_config=q_config) ``` Please refer to [quantization example](../examples/huggingface/pytorch/text-classification/quantization/run_glue.py) for the details. -### Create an Instance of Metric -The Metric defines which metric will be used to measure the performance of tuned models. -- example: - ```python - metric = metrics.Metric(name="eval_f1", greater_is_better=True, is_relative=True, criterion=0.01, weight_ratio=None) - ``` - - Please refer to [metrics document](metrics.md) for the details. - ### Create an Instance of Objective(Optional) In terms of evaluating the status of a specific model during tuning, we should have general objectives to measure the status of different models. @@ -172,25 +157,28 @@ In terms of evaluating the status of a specific model during tuning, we should h Please refer to [objective document](objectives.md) for the details. ### Create an Instance of QuantizationConfig -The QuantizationConfig contains all the information related to the model quantization behavior. If you have created Metric and Objective instance(default Objective is "performance"), then you can create an instance of QuantizationConfig. - -- arguments: +The QuantizationConfig contains all the information related to the model quantization behavior. If you have created Metric and Objective instance(default Objective is "performance"), then you can create an instance of PostTrainingQuantConfig or QuantizationAwareTrainingConfig. -|Argument |Type |Description |Default value | -|:----------|:----------|:-----------------------------------------------|:----------------| -|framework |string |Which framework you used |"pytorch" | -|approach |string |Which quantization approach you used |"PostTrainingStatic"| -|timeout |integer |Tuning timeout(seconds), 0 means early stop; combine with max_trials field to decide when to exit|0 | -|max_trials |integer |Max tune times |100 | -|metrics |list of Metric|Used to evaluate accuracy of tuning model, no need for NoTrainerOptimizer|None | -|objectives |list of Objective|Objective with accuracy constraint guaranteed|performance| - example: ```python - q_config = QuantizationConfig( - approach="PostTrainingDynamic", - metrics=[metric], - objectives=[objective] + from neural_compressor.config import ( + PostTrainingQuantConfig, + QuantizationAwareTrainingConfig, + TuningCriterion, + AccuracyCriterion + ) + + tuning_criterion = TuningCriterion(max_trials=600, objective=["performance"]) + accuracy_criterion = AccuracyCriterion( + higher_is_better=True, # optional. + criterion="relative", # optional. Available values are "relative" and "absolute". + tolerable_loss=0.01, # optional. + ) + q_config = PostTrainingQuantConfig( + approach="dynamic", + tuning_criterion=tuning_criterion, + accuracy_criterion=accuracy_criterion ) ``` diff --git a/docs/tutorials/pytorch/language-modeling/bert-base-uncased.ipynb b/docs/tutorials/pytorch/language-modeling/bert-base-uncased.ipynb index c25aff405e5..f87803721a2 100644 --- a/docs/tutorials/pytorch/language-modeling/bert-base-uncased.ipynb +++ b/docs/tutorials/pytorch/language-modeling/bert-base-uncased.ipynb @@ -80,7 +80,8 @@ "from dataclasses import dataclass, field\n", "from datasets import load_dataset, load_metric\n", "from itertools import chain\n", - "from intel_extension_for_transformers.transformers import metrics, OptimizedModel, QuantizationConfig\n", + "from intel_extension_for_transformers.transformers import metrics, OptimizedModel\n", + "from neural_compressor.config import PostTrainingQuantConfig\n", "from intel_extension_for_transformers.transformers.trainer import NLPTrainer\n", "from transformers import (\n", " CONFIG_MAPPING,\n", @@ -373,9 +374,10 @@ " criterion=\"0.25\", # Performance tolerance when optimizing the model.\n", " greater_is_better=False \n", ")\n", - "quantization_config = QuantizationConfig(\n", - " approach=\"PostTrainingStatic\",\n", - " metrics=[tune_metric],\n", + "\n", + "trainer_ptq_static.metrics = tune_metric\n", + "quantization_config = PostTrainingQuantConfig(\n", + " approach=\"static\"\n", ")\n", "\n", "# run quantization\n", @@ -473,9 +475,9 @@ " criterion=\"0.25\", # why performance tolerance\n", " greater_is_better=False\n", ")\n", - "quantization_config = QuantizationConfig(\n", - " approach=\"PostTrainingDynamic\",\n", - " metrics=[tune_metric],\n", + "trainer_ptq_dynamic.metrics = tune_metric\n", + "quantization_config = PostTrainingQuantConfig(\n", + " approach=\"dynamic\",\n", ")\n", "\n", "# run quantization\n", diff --git a/docs/tutorials/pytorch/multiple-choice/bert-base-uncased_SWAG.ipynb b/docs/tutorials/pytorch/multiple-choice/bert-base-uncased_SWAG.ipynb index 9f554cbd1a0..07cd55ec7f9 100644 --- a/docs/tutorials/pytorch/multiple-choice/bert-base-uncased_SWAG.ipynb +++ b/docs/tutorials/pytorch/multiple-choice/bert-base-uncased_SWAG.ipynb @@ -108,7 +108,8 @@ "from dataclasses import dataclass, field\n", "from datasets import load_dataset\n", "from itertools import chain\n", - "from intel_extension_for_transformers.transformers import metrics, OptimizedModel, QuantizationConfig\n", + "from intel_extension_for_transformers.transformers import metrics, OptimizedModel\n", + "from neural_compressor.config import PostTrainingQuantConfig\n", "from intel_extension_for_transformers.transformers.trainer import NLPTrainer\n", "from transformers import (\n", " AutoConfig,\n", @@ -199,9 +200,9 @@ " metadata={\"help\": \"Whether or not to apply quantization.\"},\n", " )\n", " quantization_approach: Optional[str] = field(\n", - " default=\"POSTTRAININGSTATIC\",\n", - " metadata={\"help\": \"Quantization approach. Supported approach are POSTTRAININGSTATIC, \"\n", - " \"POSTTRAININGDYNAMIC and QUANTIZATIONAWARETRAINING.\"},\n", + " default=\"static\",\n", + " metadata={\"help\": \"Quantization approach. Supported approach are static, \"\n", + " \"dynamic and qat.\"},\n", " )" ] }, @@ -235,7 +236,7 @@ ")\n", "optim_args = OptimizationArguments(\n", " tune=True,\n", - " quantization_approach=\"PostTrainingStatic\"\n", + " quantization_approach=\"static\"\n", ")" ] }, @@ -438,9 +439,9 @@ " is_relative=True, # Metric tolerance mode, True is for relative, otherwise for absolute.\n", " criterion=\"0.25\", # Performance tolerance when optimizing the model.\n", ")\n", - "quantization_config = QuantizationConfig(\n", - " approach=\"PostTrainingStatic\",\n", - " metrics=[tune_metric],\n", + "trainer_static.metrics = tune_metric\n", + "quantization_config = PostTrainingQuantConfig(\n", + " approach=\"static\",\n", ")\n", "\n", "# run quantization\n", @@ -547,9 +548,9 @@ " is_relative=True, # Metric tolerance mode, True is for relative, otherwise for absolute.\n", " criterion=\"0.25\", # Performance tolerance when optimizing the model.\n", ")\n", - "quantization_config = QuantizationConfig(\n", - " approach=\"PostTrainingDynamic\",\n", - " metrics=[tune_metric],\n", + "trainer_dynamic.metrics = tune_metric\n", + "quantization_config = PostTrainingQuantConfig(\n", + " approach=\"dynamic\",\n", ")\n", "\n", "# run quantization\n", diff --git a/docs/tutorials/pytorch/question-answering/Dynamic_MiniLM_SQuAD.ipynb b/docs/tutorials/pytorch/question-answering/Dynamic_MiniLM_SQuAD.ipynb index 4f59134773f..26ddaa817c8 100644 --- a/docs/tutorials/pytorch/question-answering/Dynamic_MiniLM_SQuAD.ipynb +++ b/docs/tutorials/pytorch/question-answering/Dynamic_MiniLM_SQuAD.ipynb @@ -86,7 +86,8 @@ "from dataclasses import dataclass, field\n", "from datasets import load_dataset, load_metric\n", "from itertools import chain\n", - "from intel_extension_for_transformers.transformers import metrics, OptimizedModel, QuantizationConfig, DynamicLengthConfig\n", + "from intel_extension_for_transformers.transformers import metrics, OptimizedModel, DynamicLengthConfig\n", + "from neural_compressor.config import PostTrainingQuantConfig\n", "from intel_extension_for_transformers.transformers.trainer import NLPTrainer\n", "from transformers.trainer_utils import get_last_checkpoint\n", "from transformers.utils.versions import require_version\n", @@ -2162,10 +2163,9 @@ " is_relative=True, # Metric tolerance mode, True is for relative, otherwise for absolute.\n", " criterion=\"0.01\", # Performance tolerance when optimizing the model.\n", ")\n", - "quantization_config = QuantizationConfig(\n", - " approach=\"PostTrainingStatic\",\n", - " max_trials=200,\n", - " metrics=[tune_metric],\n", + "quant_dynamic_trainer.metrics = tune_metric\n", + "quantization_config = PostTrainingQuantConfig(\n", + " approach=\"static\",\n", ")\n", "\n", "# lc = \"(269, 253, 252, 202, 104, 34)\" # configure model with best length config\n", diff --git a/docs/tutorials/pytorch/question-answering/bert-base-uncased_distilled-squad.ipynb b/docs/tutorials/pytorch/question-answering/bert-base-uncased_distilled-squad.ipynb index c2902561cee..6ea589616ba 100644 --- a/docs/tutorials/pytorch/question-answering/bert-base-uncased_distilled-squad.ipynb +++ b/docs/tutorials/pytorch/question-answering/bert-base-uncased_distilled-squad.ipynb @@ -116,7 +116,8 @@ "import transformers\n", "from dataclasses import dataclass, field\n", "from datasets import load_dataset, load_metric\n", - "from intel_extension_for_transformers.transformers import metrics , QuantizationConfig\n", + "from intel_extension_for_transformers.transformers import metrics\n", + "from neural_compressor.config import PostTrainingQuantConfig\n", "from transformers import (\n", " AutoConfig,\n", " AutoModelForQuestionAnswering,\n", @@ -645,7 +646,7 @@ ")\n", "optim_args = OptimizationArguments(\n", " tune=True,\n", - " quantization_approach=\"PostTrainingStatic\"\n", + " quantization_approach=\"static\"\n", ")\n", "log_level = training_args.get_process_log_level()" ] @@ -999,10 +1000,9 @@ " is_relative=True, # Metric tolerance mode, True is for relative, otherwise for absolute.\n", " criterion=0.25, # Performance tolerance when optimizing the model.\n", ")\n", - "quantization_config = QuantizationConfig(\n", - " approach=\"PostTrainingStatic\",\n", - " max_trials=200,\n", - " metrics=[tune_metric],\n", + "trainer_static.metrics = tune_metric\n", + "quantization_config = PostTrainingQuantConfig(\n", + " approach=\"static\",\n", ")\n", "\n", "# run quantization\n", @@ -1113,10 +1113,9 @@ " is_relative=True, # Metric tolerance mode, True is for relative, otherwise for absolute.\n", " criterion=0.25, # Performance tolerance when optimizing the model.\n", ")\n", - "quantization_config = QuantizationConfig(\n", - " approach=\"PostTrainingDynamic\",\n", - " max_trials=200,\n", - " metrics=[tune_metric],\n", + "trainer_dynamic.metrics = tune_metric\n", + "quantization_config = PostTrainingQuantConfig(\n", + " approach=\"dynamic\",\n", ")\n", "\n", "# run quantization\n", diff --git a/docs/tutorials/pytorch/question-answering/bert-large-uncased-whole-word-masking-finetuned-squad.ipynb b/docs/tutorials/pytorch/question-answering/bert-large-uncased-whole-word-masking-finetuned-squad.ipynb index 9b893e6a340..3914a427e34 100644 --- a/docs/tutorials/pytorch/question-answering/bert-large-uncased-whole-word-masking-finetuned-squad.ipynb +++ b/docs/tutorials/pytorch/question-answering/bert-large-uncased-whole-word-masking-finetuned-squad.ipynb @@ -116,7 +116,8 @@ "import transformers\n", "from dataclasses import dataclass, field\n", "from datasets import load_dataset, load_metric\n", - "from intel_extension_for_transformers.transformers import metrics , QuantizationConfig\n", + "from intel_extension_for_transformers.transformers import metrics\n", + "from neural_compressor.config import PostTrainingQuantConfig\n", "from transformers import (\n", " AutoConfig,\n", " AutoModelForQuestionAnswering,\n", @@ -645,7 +646,7 @@ ")\n", "optim_args = OptimizationArguments(\n", " tune=True,\n", - " quantization_approach=\"PostTrainingStatic\"\n", + " quantization_approach=\"static\"\n", ")\n", "log_level = training_args.get_process_log_level()" ] @@ -999,10 +1000,9 @@ " is_relative=True, # Metric tolerance mode, True is for relative, otherwise for absolute.\n", " criterion=0.25, # Performance tolerance when optimizing the model.\n", ")\n", - "quantization_config = QuantizationConfig(\n", - " approach=\"PostTrainingStatic\",\n", - " max_trials=200,\n", - " metrics=[tune_metric],\n", + "trainer_static.metrics = tune_metric\n", + "quantization_config = PostTrainingQuantConfig(\n", + " approach=\"static\"\n", ")\n", "\n", "# run quantization\n", @@ -1092,10 +1092,9 @@ " is_relative=True, # Metric tolerance mode, True is for relative, otherwise for absolute.\n", " criterion=0.25, # Performance tolerance when optimizing the model.\n", ")\n", - "quantization_config = QuantizationConfig(\n", - " approach=\"PostTrainingDynamic\",\n", - " max_trials=200,\n", - " metrics=[tune_metric],\n", + "trainer_dynamic.metrics = tune_metric\n", + "quantization_config = PostTrainingQuantConfig(\n", + " approach=\"dynamic\",\n", ")\n", "\n", "# run quantization\n", diff --git a/docs/tutorials/pytorch/question-answering/orchestrate_optimizations.ipynb b/docs/tutorials/pytorch/question-answering/orchestrate_optimizations.ipynb index accba939b30..a70dd5a1ab5 100644 --- a/docs/tutorials/pytorch/question-answering/orchestrate_optimizations.ipynb +++ b/docs/tutorials/pytorch/question-answering/orchestrate_optimizations.ipynb @@ -74,13 +74,15 @@ "import transformers\n", "from intel_extension_for_transformers.transformers import (\n", " metrics,\n", - " PrunerConfig,\n", - " PruningConfig,\n", - " DistillationConfig,\n", - " QuantizationConfig,\n", " OptimizedModel,\n", " objectives\n", ")\n", + "from neural_compressor.config import (\n", + " WeightPruningConfig,\n", + " DistillationConfig,\n", + " KnowledgeDistillationLossConfig,\n", + " QuantizationAwareTrainingConfig,\n", + ")\n", "from torch.utils.data import DataLoader\n", "from tqdm import tqdm\n", "from trainer_qa import QuestionAnsweringTrainer\n", @@ -214,7 +216,7 @@ " metadata={\"help\": \"Whether or not to apply prune.\"},\n", " )\n", " pruning_approach: Optional[str] = field(\n", - " default=\"BasicMagnitude\",\n", + " default=\"magnitude\",\n", " metadata={\"help\": \"Pruning approach. Supported approach is basic_magnite.\"},\n", " )\n", " target_sparsity_ratio: Optional[float] = field(\n", @@ -234,9 +236,9 @@ " metadata={\"help\": \"Whether or not to apply quantization.\"},\n", " )\n", " quantization_approach: Optional[str] = field(\n", - " default=\"PostTrainingStatic\",\n", - " metadata={\"help\": \"Quantization approach. Supported approach are PostTrainingStatic, \"\n", - " \"PostTrainingDynamic and QuantizationAwareTraining.\"},\n", + " default=\"static\",\n", + " metadata={\"help\": \"Quantization approach. Supported approach are static, \"\n", + " \"dynamic and qat.\"},\n", " )\n", " metric_name: Optional[str] = field(\n", " default=None,\n", @@ -300,7 +302,7 @@ ")\n", "optim_args = OptimizationArguments(\n", " tune=True,\n", - " quantization_approach=\"PostTrainingStatic\"\n", + " quantization_approach=\"static\"\n", ")\n", "log_level = training_args.get_process_log_level()" ] @@ -730,9 +732,7 @@ "logger.info(\"***** Number of student model parameters: {:.2f}M *****\".format(\\\n", " para_counter(model)/10**6))\n", "\n", - "# Trace model\n", - "from neural_compressor.adaptor.torch_utils.symbolic_trace import symbolic_trace\n", - "model = symbolic_trace(model, optim_args.quantization_approach==\"QuantizationAwareTraining\")" + "# Trace model\n" ] }, { @@ -779,21 +779,18 @@ " tune_metric = metrics.Metric(\n", " name=metric_name, is_relative=optim_args.is_relative, criterion=optim_args.perf_tol\n", " )\n", - " prune_type = 'PatternLock' \\\n", + " prune_type = 'pattern_lock' \\\n", " if optim_args.pruning_approach else optim_args.pruning_approach\n", " target_sparsity_ratio = optim_args.target_sparsity_ratio \\\n", " if optim_args.target_sparsity_ratio else None\n", - " pruner_config = PrunerConfig(prune_type=prune_type, target_sparsity_ratio=target_sparsity_ratio)\n", - " pruning_conf = PruningConfig(framework=\"pytorch_fx\",pruner_config=[pruner_config], metrics=tune_metric)\n", - " distillation_conf = DistillationConfig(framework=\"pytorch_fx\", metrics=tune_metric)\n", - "\n", - " objective = objectives.performance\n", - " quantization_conf = QuantizationConfig(\n", - " approach=optim_args.quantization_approach,\n", - " max_trials=600,\n", - " metrics=[tune_metric],\n", - " objectives=[objective]\n", - " )\n", + " trainer.metrics = tune_metric\n", + " pruning_conf = WeightPruningConfig([{\"start_step\": 0, \"end_step\": 2}],\n", + " target_sparsity=target_sparsity_ratio,\n", + " pruning_scope=\"local\",\n", + " pruning_type=prune_type)\n", + " distillation_criterion = KnowledgeDistillationLossConfig(loss_types=[\"CE\", \"KL\"])\n", + " distillation_conf = DistillationConfig(teacher_model=teacher_model, criterion=distillation_criterion)\n", + " quantization_conf = QuantizationAwareTrainingConfig()\n", " conf_list = [pruning_conf, distillation_conf, quantization_conf]\n", " model = trainer.orchestrate_optimizations(config_list=conf_list, teacher_model=teacher_model)" ] diff --git a/docs/tutorials/pytorch/question-answering/orchestrate_optimizations_bert_mini.ipynb b/docs/tutorials/pytorch/question-answering/orchestrate_optimizations_bert_mini.ipynb index b3a983f8d35..78b1258d580 100644 --- a/docs/tutorials/pytorch/question-answering/orchestrate_optimizations_bert_mini.ipynb +++ b/docs/tutorials/pytorch/question-answering/orchestrate_optimizations_bert_mini.ipynb @@ -78,6 +78,12 @@ " DataCollatorWithPadding,\n", " EvalPrediction,\n", ")\n", + "from neural_compressor.config import (\n", + " WeightPruningConfig,\n", + " DistillationConfig,\n", + " KnowledgeDistillationLossConfig,\n", + " QuantizationAwareTrainingConfig,\n", + ")\n", "from transformers.utils import check_min_version\n", "from transformers.utils.versions import require_version\n", "from typing import Optional\n", @@ -430,18 +436,14 @@ " name=metric_name, is_relative=True, criterion=0.01\n", ")\n", "\n", - "target_sparsity_ratio = None\n", - "pruner_config = PrunerConfig(prune_type='PatternLock', target_sparsity_ratio=None)\n", - "pruning_conf = PruningConfig(framework=\"pytorch_fx\",pruner_config=[pruner_config], metrics=tune_metric)\n", - "distillation_conf = DistillationConfig(framework=\"pytorch_fx\", metrics=tune_metric)\n", - "\n", - "objective = objectives.performance\n", - "quantization_conf = QuantizationConfig(\n", - " approach=\"QuantizationAwareTraining\",\n", - " max_trials=600,\n", - " metrics=[tune_metric],\n", - " objectives=[objective]\n", - ")\n", + "trainer.metrics = tune_metric\n", + "pruning_conf = WeightPruningConfig([{\"start_step\": 0, \"end_step\": 2}],\n", + " target_sparsity=0.64,\n", + " pruning_scope=\"local\",\n", + " pruning_type=\"pattern_lock\")\n", + "distillation_criterion = KnowledgeDistillationLossConfig(loss_types=[\"CE\", \"KL\"])\n", + "distillation_conf = DistillationConfig(teacher_model=teacher_model, criterion=distillation_criterion)\n", + "quantization_conf = QuantizationAwareTrainingConfig()\n", "conf_list = [pruning_conf, distillation_conf, quantization_conf]\n", "model = trainer.orchestrate_optimizations(config_list=conf_list, teacher_model=teacher_model)" ] diff --git a/docs/tutorials/pytorch/question-answering/pruning.ipynb b/docs/tutorials/pytorch/question-answering/pruning.ipynb index 61dde668c8b..b49b7e11e57 100644 --- a/docs/tutorials/pytorch/question-answering/pruning.ipynb +++ b/docs/tutorials/pytorch/question-answering/pruning.ipynb @@ -66,7 +66,8 @@ "import transformers\n", "from dataclasses import dataclass, field\n", "from datasets import load_dataset, load_metric\n", - "from intel_extension_for_transformers.transformers import metrics, OptimizedModel, PrunerConfig, PruningConfig, PruningMode\n", + "from intel_extension_for_transformers.transformers import metrics, OptimizedModel\n", + "from neural_compressor.config import WeightPruningConfig\n", "from trainer_qa import QuestionAnsweringTrainer\n", "from transformers import (\n", " AutoConfig,\n", @@ -225,7 +226,7 @@ " metadata={\"help\": \"Whether or not to apply prune.\"},\n", " )\n", " pruning_approach: Optional[str] = field(\n", - " default=\"BasicMagnitude\",\n", + " default=\"magnitude\",\n", " metadata={\"help\": \"Pruning approach. Supported approach is basic_magnite.\"},\n", " )\n", " target_sparsity_ratio: Optional[float] = field(\n", @@ -278,7 +279,7 @@ ")\n", "optim_args = OptimizationArguments(\n", " tune=True,\n", - " quantization_approach=\"PostTrainingStatic\"\n", + " quantization_approach=\"static\"\n", ")\n", "log_level = training_args.get_process_log_level()" ] @@ -625,11 +626,14 @@ " raise ValueError(\"do_train must be set to True for pruning.\")\n", "\n", " tune_metric = metrics.Metric(name=metric_name)\n", - " prune_type = 'BasicMagnitude' if optim_args.pruning_approach else optim_args.pruning_approach\n", + " prune_type = 'magnitude' if optim_args.pruning_approach else optim_args.pruning_approach\n", " target_sparsity_ratio = optim_args.target_sparsity_ratio \\\n", " if optim_args.target_sparsity_ratio else None\n", - " pruner_config = PrunerConfig(prune_type=prune_type, target_sparsity_ratio=target_sparsity_ratio)\n", - " pruning_conf = PruningConfig(pruner_config=pruner_config, metrics=tune_metric)\n", + " trainer.metrics = tune_metric\n", + " pruning_conf = WeightPruningConfig([{\"start_step\": 0, \"end_step\": 2}],\n", + " target_sparsity=target_sparsity_ratio,\n", + " pruning_scope=\"local\",\n", + " pruning_type=prune_type)\n", "\n", " model = trainer.prune(pruning_config=pruning_conf)\n", " trainer.save_model(training_args.output_dir)" diff --git a/docs/tutorials/pytorch/summarization/pegasus-samsum.ipynb b/docs/tutorials/pytorch/summarization/pegasus-samsum.ipynb index 4c66e32752a..ae599745a0d 100644 --- a/docs/tutorials/pytorch/summarization/pegasus-samsum.ipynb +++ b/docs/tutorials/pytorch/summarization/pegasus-samsum.ipynb @@ -110,7 +110,8 @@ "from datasets import load_dataset, load_metric\n", "\n", "from filelock import FileLock\n", - "from intel_extension_for_transformers.transformers import OptimizedModel, QuantizationConfig\n", + "from intel_extension_for_transformers.transformers import OptimizedModel\n", + "from neural_compressor.config import PostTrainingQuantConfig\n", "from intel_extension_for_transformers.transformers import metrics as nlp_metrics\n", "from intel_extension_for_transformers.transformers.trainer import NLPSeq2SeqTrainer\n", "from transformers import (\n", @@ -277,9 +278,9 @@ " metadata={\"help\": \"Whether or not to apply quantization.\"},\n", " )\n", " quantization_approach: Optional[str] = field(\n", - " default=\"PostTrainingStatic\",\n", - " metadata={\"help\": \"Quantization approach. Supported approach are PostTrainingStatic, \"\n", - " \"PostTrainingDynamic and QuantizationAwareTraining.\"},\n", + " default=\"static\",\n", + " metadata={\"help\": \"Quantization approach. Supported approach are static, \"\n", + " \"dynamic and qat.\"},\n", " )\n" ] }, @@ -631,10 +632,9 @@ "tune_metric = nlp_metrics.Metric(\n", " name=metric_name, is_relative=True, criterion=0.25\n", ")\n", - "quantization_config = QuantizationConfig(\n", - " approach=\"PostTrainingDynamic\",\n", - " max_trials=200,\n", - " metrics=[tune_metric],\n", + "trainer.metrics = tune_metric\n", + "quantization_config = PostTrainingQuantConfig(\n", + " approach=\"dynamic\",\n", ")\n", "trainer.max_length = max_length\n", "trainer.num_beams = num_beams\n", diff --git a/docs/tutorials/pytorch/text-classification/bert-base-uncased-MRPC.ipynb b/docs/tutorials/pytorch/text-classification/bert-base-uncased-MRPC.ipynb index 4855c46ccec..a4f13fad850 100644 --- a/docs/tutorials/pytorch/text-classification/bert-base-uncased-MRPC.ipynb +++ b/docs/tutorials/pytorch/text-classification/bert-base-uncased-MRPC.ipynb @@ -113,7 +113,8 @@ "import transformers\n", "from dataclasses import dataclass, field\n", "from datasets import load_dataset, load_metric\n", - "from intel_extension_for_transformers.transformers import metrics, objectives, OptimizedModel, QuantizationConfig\n", + "from intel_extension_for_transformers.transformers import metrics, objectives, OptimizedModel\n", + "from neural_compressor.config import PostTrainingQuantConfig\n", "from intel_extension_for_transformers.transformers.trainer import NLPTrainer\n", "from transformers import (\n", " AutoConfig,\n", @@ -247,9 +248,9 @@ " metadata={\"help\": \"Whether or not to apply quantization.\"},\n", " )\n", " quantization_approach: Optional[str] = field(\n", - " default=\"PostTrainingStatic\",\n", - " metadata={\"help\": \"Quantization approach. Supported approach are PostTrainingStatic, \"\n", - " \"PostTrainingDynamic and QuantizationAwareTraining.\"},\n", + " default=\"static\",\n", + " metadata={\"help\": \"Quantization approach. Supported approach are static, \"\n", + " \"dynamic and qat.\"},\n", " )\n", " is_relative: Optional[bool] = field(\n", " default=True,\n", @@ -296,7 +297,7 @@ ")\n", "optim_args = OptimizationArguments(\n", " tune=True,\n", - " quantization_approach=\"PostTrainingStatic\"\n", + " quantization_approach=\"static\"\n", ")\n", "log_level = training_args.get_process_log_level()\n", "logger.setLevel(log_level)" @@ -532,11 +533,9 @@ " name=metric_name, is_relative=True, criterion=0.25\n", ")\n", "objective = objectives.performance\n", - "quantization_config = QuantizationConfig(\n", - " approach=\"PostTrainingStatic\",\n", - " max_trials=600,\n", - " metrics=[tune_metric],\n", - " objectives=[objective]\n", + "trainer_static.metrics = metrics\n", + "quantization_config = PostTrainingQuantConfig(\n", + " approach=\"static\",\n", ")\n", "trainer_static.quantize(quant_config=quantization_config)" ] @@ -642,11 +641,9 @@ " name=metric_name, is_relative=True, criterion=0.25\n", ")\n", "objective = objectives.performance\n", - "quantization_config = QuantizationConfig(\n", - " approach=\"PostTrainingDynamic\",\n", - " max_trials=600,\n", - " metrics=[tune_metric],\n", - " objectives=[objective]\n", + "trainer_dynamic.metrics = metrics\n", + "quantization_config = PostTrainingQuantConfig(\n", + " approach=\"dynamic\",\n", ")\n", "trainer_dynamic.quantize(quant_config=quantization_config)" ] diff --git a/docs/tutorials/pytorch/text-classification/orchestrate_optimizations.ipynb b/docs/tutorials/pytorch/text-classification/orchestrate_optimizations.ipynb index fd8c926085f..6fa3f293695 100644 --- a/docs/tutorials/pytorch/text-classification/orchestrate_optimizations.ipynb +++ b/docs/tutorials/pytorch/text-classification/orchestrate_optimizations.ipynb @@ -71,10 +71,6 @@ "from datasets import load_dataset, load_metric\n", "from intel_extension_for_transformers.transformers import (\n", " metrics,\n", - " PrunerConfig,\n", - " PruningConfig,\n", - " DistillationConfig,\n", - " QuantizationConfig,\n", " OptimizedModel,\n", " objectives\n", ")\n", @@ -93,9 +89,14 @@ " default_data_collator,\n", " set_seed,\n", ")\n", + "from neural_compressor.config import (\n", + " WeightPruningConfig,\n", + " DistillationConfig,\n", + " KnowledgeDistillationLossConfig,\n", + " QuantizationAwareTrainingConfig,\n", + ")\n", "from transformers.trainer_utils import get_last_checkpoint\n", "from transformers.utils import check_min_version\n", - "from transformers.utils.fx import symbolic_trace\n", "from typing import Optional\n", "\n", "\n", @@ -251,7 +252,7 @@ " metadata={\"help\": \"Whether or not to apply prune.\"},\n", " )\n", " pruning_approach: Optional[str] = field(\n", - " default=\"BasicMagnitude\",\n", + " default=\"magnitude\",\n", " metadata={\"help\": \"Pruning approach. Supported approach is basic_magnite.\"},\n", " )\n", " target_sparsity_ratio: Optional[float] = field(\n", @@ -271,9 +272,9 @@ " metadata={\"help\": \"Whether or not to apply quantization.\"},\n", " )\n", " quantization_approach: Optional[str] = field(\n", - " default=\"QuantizationAwareTraining\",\n", - " metadata={\"help\": \"Quantization approach. Supported approach are PostTrainingStatic, \"\n", - " \"PostTrainingDynamic and QuantizationAwareTraining.\"},\n", + " default=\"qat\",\n", + " metadata={\"help\": \"Quantization approach. Supported approach are static, \"\n", + " \"dynamic and qat.\"},\n", " )\n", " metric_name: Optional[str] = field(\n", " default=\"eval_f1\",\n", @@ -341,7 +342,7 @@ ")\n", "optim_args = OptimizationArguments(\n", " tune=True,\n", - " quantization_approach=\"PostTrainingStatic\"\n", + " quantization_approach=\"static\"\n", ")\n", "log_level = training_args.get_process_log_level()\n", "logger.setLevel(log_level)" @@ -618,7 +619,7 @@ "\n", "# Trace model\n", "from neural_compressor.adaptor.torch_utils.symbolic_trace import symbolic_trace\n", - "model = symbolic_trace(model, optim_args.quantization_approach==\"QuantizationAwareTraining\")" + "model = symbolic_trace(model, optim_args.quantization_approach==\"qat\")" ] }, { @@ -671,23 +672,20 @@ " tune_metric = metrics.Metric(\n", " name=metric_name, is_relative=optim_args.is_relative, criterion=optim_args.perf_tol\n", " )\n", - " prune_type = 'PatternLock' \\\n", + " prune_type = 'pattern_lock' \\\n", " if optim_args.pruning_approach else optim_args.pruning_approach\n", " target_sparsity_ratio = optim_args.target_sparsity_ratio \\\n", " if optim_args.target_sparsity_ratio else None\n", - " pruner_config = PrunerConfig(prune_type=prune_type, target_sparsity_ratio=target_sparsity_ratio)\n", - " pruning_conf = PruningConfig(framework=\"pytorch_fx\",pruner_config=[pruner_config], metrics=tune_metric)\n", - " distillation_conf = DistillationConfig(framework=\"pytorch_fx\", metrics=tune_metric)\n", - " \n", - " objective = objectives.performance\n", - " quantization_conf = QuantizationConfig(\n", - " approach=optim_args.quantization_approach,\n", - " max_trials=600,\n", - " metrics=[tune_metric],\n", - " objectives=[objective]\n", - " )\n", + " trainer.metrics = tune_metric\n", + " pruning_conf = WeightPruningConfig([{\"start_step\": 0, \"end_step\": 2}],\n", + " target_sparsity=target_sparsity_ratio,\n", + " pruning_scope=\"local\",\n", + " pruning_type=prune_type)\n", + " distillation_criterion = KnowledgeDistillationLossConfig(loss_types=[\"CE\", \"KL\"])\n", + " distillation_conf = DistillationConfig(teacher_model=teacher_model, criterion=distillation_criterion)\n", + " quantization_conf = QuantizationAwareTrainingConfig()\n", " conf_list = [pruning_conf, distillation_conf, quantization_conf]\n", - " model = trainer.orchestrate_optimizations(config_list=conf_list, teacher_model=teacher_model)" + " model = trainer.orchestrate_optimizations(config_list=conf_list)" ] }, { diff --git a/docs/tutorials/pytorch/text-classification/orchestrate_optimizations_bert_mini.ipynb b/docs/tutorials/pytorch/text-classification/orchestrate_optimizations_bert_mini.ipynb index e533ab555f9..ffbd067af30 100644 --- a/docs/tutorials/pytorch/text-classification/orchestrate_optimizations_bert_mini.ipynb +++ b/docs/tutorials/pytorch/text-classification/orchestrate_optimizations_bert_mini.ipynb @@ -70,12 +70,14 @@ "from datasets import load_dataset, load_metric\n", "from intel_extension_for_transformers.transformers import (\n", " metrics,\n", - " PrunerConfig,\n", - " PruningConfig,\n", - " DistillationConfig,\n", - " QuantizationConfig,\n", " objectives\n", ")\n", + "from neural_compressor.config import (\n", + " WeightPruningConfig,\n", + " DistillationConfig,\n", + " KnowledgeDistillationLossConfig,\n", + " QuantizationAwareTrainingConfig,\n", + ")\n", "from intel_extension_for_transformers.transformers.trainer import NLPTrainer\n", "from transformers import (\n", " AutoConfig,\n", @@ -343,18 +345,14 @@ " name=metric_name, is_relative=True, criterion=0.01\n", ")\n", "\n", - "target_sparsity_ratio = None\n", - "pruner_config = PrunerConfig(prune_type='PatternLock', target_sparsity_ratio=None)\n", - "pruning_conf = PruningConfig(framework=\"pytorch_fx\",pruner_config=[pruner_config], metrics=tune_metric)\n", - "distillation_conf = DistillationConfig(framework=\"pytorch_fx\", metrics=tune_metric)\n", - "\n", - "objective = objectives.performance\n", - "quantization_conf = QuantizationConfig(\n", - " approach=\"QuantizationAwareTraining\",\n", - " max_trials=600,\n", - " metrics=[tune_metric],\n", - " objectives=[objective]\n", - ")\n", + "trainer.metrics = tune_metric\n", + "pruning_conf = WeightPruningConfig([{\"start_step\": 0, \"end_step\": 2}],\n", + " target_sparsity=0.64,\n", + " pruning_scope=\"local\",\n", + " pruning_type=\"pattern_lock\")\n", + "distillation_criterion = KnowledgeDistillationLossConfig(loss_types=[\"CE\", \"KL\"])\n", + "distillation_conf = DistillationConfig(teacher_model=teacher_model, criterion=distillation_criterion)\n", + "quantization_conf = QuantizationAwareTrainingConfig()\n", "conf_list = [pruning_conf, distillation_conf, quantization_conf]\n", "model = trainer.orchestrate_optimizations(config_list=conf_list, teacher_model=teacher_model)" ] diff --git a/docs/tutorials/pytorch/text-classification/pruning.ipynb b/docs/tutorials/pytorch/text-classification/pruning.ipynb index 723de2b8f46..53d56051dc5 100644 --- a/docs/tutorials/pytorch/text-classification/pruning.ipynb +++ b/docs/tutorials/pytorch/text-classification/pruning.ipynb @@ -69,10 +69,9 @@ "from datasets import load_dataset, load_metric\n", "from intel_extension_for_transformers.transformers import (\n", " metrics,\n", - " OptimizedModel,\n", - " PrunerConfig,\n", - " PruningConfig,\n", + " OptimizedModel\n", ")\n", + "from neural_compressor.config import WeightPruningConfig\n", "from intel_extension_for_transformers.transformers.trainer import NLPTrainer\n", "from transformers import (\n", " AutoConfig,\n", @@ -283,7 +282,7 @@ ")\n", "optim_args = OptimizationArguments(\n", " tune=True,\n", - " quantization_approach=\"PostTrainingStatic\"\n", + " quantization_approach=\"static\"\n", ")\n", "log_level = training_args.get_process_log_level()\n", "logger.setLevel(log_level)" @@ -465,12 +464,14 @@ ")\n", "\n", "tune_metric = metrics.Metric(name=metric_name)\n", - "prune_type = 'BasicMagnitude' \\\n", - " if optim_args.pruning_approach else optim_args.pruning_approach\n", + "prune_type = 'magnitude' if optim_args.pruning_approach else optim_args.pruning_approach\n", "target_sparsity_ratio = optim_args.target_sparsity_ratio \\\n", " if optim_args.target_sparsity_ratio else None\n", - "pruner_config = PrunerConfig(prune_type=prune_type, target_sparsity_ratio=target_sparsity_ratio)\n", - "pruning_conf = PruningConfig(pruner_config=pruner_config, metrics=tune_metric)\n", + "trainer.metrics = tune_metric\n", + "pruning_conf = WeightPruningConfig([{\"start_step\": 0, \"end_step\": 2}],\n", + " target_sparsity=target_sparsity_ratio,\n", + " pruning_scope=\"local\",\n", + " pruning_type=prune_type)\n", "\n", "model = trainer.prune(pruning_config=pruning_conf)\n", "trainer.save_model(training_args.output_dir)" diff --git a/docs/tutorials/pytorch/token-classification/distilbert_base_ner.ipynb b/docs/tutorials/pytorch/token-classification/distilbert_base_ner.ipynb index 82b6ecbe4ce..8d43eb95e30 100644 --- a/docs/tutorials/pytorch/token-classification/distilbert_base_ner.ipynb +++ b/docs/tutorials/pytorch/token-classification/distilbert_base_ner.ipynb @@ -108,8 +108,8 @@ "from intel_extension_for_transformers.transformers import(\n", " metrics,\n", " OptimizedModel,\n", - " QuantizationConfig,\n", ")\n", + "from neural_compressor.config import PostTrainingQuantConfig\n", "from intel_extension_for_transformers.transformers.trainer import NLPTrainer\n", "from transformers import (\n", " AutoConfig,\n", @@ -559,9 +559,9 @@ "tune_metric = metrics.Metric(\n", " name=metric_name, is_relative=True, criterion=0.25\n", ")\n", - "quantization_config = QuantizationConfig(\n", - " approach=\"PostTrainingStatic\",\n", - " metrics=[tune_metric],\n", + "trainer_static.metrics = tune_metric\n", + "quantization_config = PostTrainingQuantConfig(\n", + " approach=\"static\",\n", ")\n", "trainer_static.quantize(quantization_config)" ] @@ -661,9 +661,9 @@ "tune_metric = metrics.Metric(\n", " name=metric_name, is_relative=True, criterion=0.25\n", ")\n", - "quantization_config = QuantizationConfig(\n", - " approach=\"PostTrainingDynamic\",\n", - " metrics=[tune_metric],\n", + "trainer_dynamic.metrics = tune_metric\n", + "quantization_config = PostTrainingQuantConfig(\n", + " approach=\"dynamic\",\n", ")\n", "trainer_dynamic.quantize(quantization_config)" ] diff --git a/docs/tutorials/pytorch/translation/t5-small.ipynb b/docs/tutorials/pytorch/translation/t5-small.ipynb index b10ee380c09..9356c5214ec 100644 --- a/docs/tutorials/pytorch/translation/t5-small.ipynb +++ b/docs/tutorials/pytorch/translation/t5-small.ipynb @@ -108,7 +108,8 @@ "import numpy as np\n", "from datasets import load_dataset, load_metric\n", "\n", - "from intel_extension_for_transformers.transformers import OptimizedModel, QuantizationConfig\n", + "from intel_extension_for_transformers.transformers import OptimizedModel\n", + "from neural_compressor.config import PostTrainingQuantConfig\n", "from intel_extension_for_transformers.transformers import metrics as nlp_metrics\n", "from intel_extension_for_transformers.transformers.trainer import NLPSeq2SeqTrainer\n", "import transformers\n", @@ -510,10 +511,9 @@ "tune_metric = nlp_metrics.Metric(\n", " name=metric_name, is_relative=True, criterion=0.25\n", ")\n", - "quantization_config = QuantizationConfig(\n", - " approach=\"PostTrainingDynamic\",\n", - " max_trials=200,\n", - " metrics=[tune_metric],\n", + "trainer.metrics = tune_metric\n", + "quantization_config = PostTrainingQuantConfig(\n", + " approach=\"dynamic\",\n", ")\n", "trainer.max_length = max_length\n", "trainer.num_beams = num_beams\n", diff --git a/examples/huggingface/onnxruntime/optimization_README.md b/examples/huggingface/onnxruntime/optimization_README.md index 6f2da2cd552..3c801acfe8b 100644 --- a/examples/huggingface/onnxruntime/optimization_README.md +++ b/examples/huggingface/onnxruntime/optimization_README.md @@ -4,7 +4,7 @@ Welcome to ONNX Runtime Huggingface examples. The models are from [Huggingface]( ## Quantization approach -| Task | PostTrainingDynamic | PostTrainingStatic +| Task | dynamic | static |---|:---:|:---:| |**`speech-recognition`**| ✅ | ✅ | diff --git a/examples/huggingface/onnxruntime/speech-recognition/quantization/README.md b/examples/huggingface/onnxruntime/speech-recognition/quantization/README.md index 5638882e2ca..ae878cc4cb8 100644 --- a/examples/huggingface/onnxruntime/speech-recognition/quantization/README.md +++ b/examples/huggingface/onnxruntime/speech-recognition/quantization/README.md @@ -1,6 +1,6 @@ Step-by-Step​ ============ -The script `run_whisper.py` provides two quantization approaches (PostTrainingStatic and PostTrainingDynamic) based on [Intel® Neural Compressor](https://github.com/intel/neural-compressor) with [LibriSpeech test-clean](https://huggingface.co/datasets/librispeech_asr) dataset. +The script `run_whisper.py` provides two quantization approaches (static and dynamic) based on [Intel® Neural Compressor](https://github.com/intel/neural-compressor) with [LibriSpeech test-clean](https://huggingface.co/datasets/librispeech_asr) dataset. # Prerequisite​ ## 1. Create Environment​ @@ -96,7 +96,7 @@ Available INT4 models on huggingface: # Validated model list -|Topology|Pretrained model|PostTrainingDynamic|PostTrainingStatic|WeightOnly4Bit| +|Topology|Pretrained model|dynamic|static|WeightOnly4Bit| |---|------------------------------------|---|---|--- |whisper_tiny|openai/whisper-tiny| | | ✅| |whisper_base|openai/whisper-base| | | ✅| diff --git a/examples/huggingface/pytorch/code-generation/quantization/run_tuning.sh b/examples/huggingface/pytorch/code-generation/quantization/run_tuning.sh index 524a5e8d46d..0a793301dbb 100644 --- a/examples/huggingface/pytorch/code-generation/quantization/run_tuning.sh +++ b/examples/huggingface/pytorch/code-generation/quantization/run_tuning.sh @@ -16,7 +16,7 @@ function init_params { model_name_or_path="bigcode/starcoder" extra_cmd="" batch_size=8 - approach="PostTrainingStatic" + approach="static" alpha=0.5 script="run_generation.py" for var in "$@" diff --git a/examples/huggingface/pytorch/image-classification/deployment/imagenet/vit/model_quant_convert.py b/examples/huggingface/pytorch/image-classification/deployment/imagenet/vit/model_quant_convert.py index 9fdf4114e54..c73a4f6dcb7 100644 --- a/examples/huggingface/pytorch/image-classification/deployment/imagenet/vit/model_quant_convert.py +++ b/examples/huggingface/pytorch/image-classification/deployment/imagenet/vit/model_quant_convert.py @@ -49,7 +49,13 @@ from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version -from intel_extension_for_transformers.transformers import metrics, objectives, OptimizedModel, QuantizationConfig +from intel_extension_for_transformers.transformers import OptimizedModel, metrics +from neural_compressor.config import ( + PostTrainingQuantConfig, + QuantizationAwareTrainingConfig, + TuningCriterion, + AccuracyCriterion +) from intel_extension_for_transformers.transformers.trainer import NLPTrainer os.environ["WANDB_DISABLED"] = "true" @@ -171,9 +177,9 @@ class OptimizationArguments: metadata={"help": "Whether or not to apply quantization."}, ) quantization_approach: Optional[str] = field( - default="PostTrainingStatic", - metadata={"help": "Quantization approach. Supported approach are PostTrainingStatic, " - "PostTrainingDynamic and QuantizationAwareTraining."}, + default="static", + metadata={"help": "Quantization approach. Supported approach are static, " + "dynamic and qat."}, ) metric_name: Optional[str] = field( default="eval_accuracy", @@ -431,28 +437,38 @@ def val_transforms(example_batch): raise ValueError("do_eval must be set to True for quantization.") trainer.save_model(training_args.output_dir) - if optim_args.quantization_approach != "PostTrainingDynamic": + if optim_args.quantization_approach != "dynamic": if not training_args.do_train: raise ValueError( "do_train must be set to True for static and aware training quantization." ) - if optim_args.quantization_approach == "QuantizationAwareTraining": - early_stopping_patience = 6 - early_stopping_threshold = 0.001 # optional - trainer.add_callback(transformers.EarlyStoppingCallback(early_stopping_patience, - early_stopping_threshold)) - tune_metric = metrics.Metric( name=metric_name, is_relative=optim_args.is_relative, criterion=optim_args.perf_tol ) - objective = objectives.performance - quantization_config = QuantizationConfig( - approach=optim_args.quantization_approach, - max_trials=600, - metrics=[tune_metric], - objectives=[objective], - config_file='vit.yaml' - ) + trainer.metrics = tune_metric + if optim_args.quantization_approach != "qat": + op_name_dict = { + 'vit.embeddings.patch_embeddings.projection.module': { + 'activation': {'dtype': ['fp32']}, + 'weight': {'dtype': ['fp32']} + }, + 'vit.embeddings.dropout': { + 'activation': {'dtype': ['fp32']}, + 'weight': {'dtype': ['fp32']} + }, + } + tuning_criterion = TuningCriterion(max_trials=600) + accuracy_criterion = AccuracyCriterion( + higher_is_better=True, # optional. + criterion="relative" if optim_args.is_relative else "absolute", # optional. Available values are "relative" and "absolute". + tolerable_loss=optim_args.perf_tol, # optional. + ) + quantization_config = PostTrainingQuantConfig( + approach=optim_args.quantization_approach, + tuning_criterion=tuning_criterion, + accuracy_criterion=accuracy_criterion, + op_name_dict=op_name_dict, + ) model = trainer.quantize(quant_config=quantization_config) diff --git a/examples/huggingface/pytorch/image-classification/deployment/imagenet/vit/run_vit.sh b/examples/huggingface/pytorch/image-classification/deployment/imagenet/vit/run_vit.sh index 73c1add48ca..a9350cde559 100644 --- a/examples/huggingface/pytorch/image-classification/deployment/imagenet/vit/run_vit.sh +++ b/examples/huggingface/pytorch/image-classification/deployment/imagenet/vit/run_vit.sh @@ -123,7 +123,7 @@ else echo "========== Prepare Model ${MODEL_NAME_OR_PATH} with Precision ${PRECISION} ========" mode_cmd="" if [[ ${PRECISION} = 'int8' ]]; then - mode_cmd=$mode_cmd" --tune --quantization_approach PostTrainingStatic" + mode_cmd=$mode_cmd" --tune --quantization_approach static" elif [[ ${PRECISION} = 'bf16' ]]; then mode_cmd=$mode_cmd" --enable_bf16" fi diff --git a/examples/huggingface/pytorch/image-classification/deployment/imagenet/vit/vit.yaml b/examples/huggingface/pytorch/image-classification/deployment/imagenet/vit/vit.yaml deleted file mode 100644 index 66ac67fcc58..00000000000 --- a/examples/huggingface/pytorch/image-classification/deployment/imagenet/vit/vit.yaml +++ /dev/null @@ -1,28 +0,0 @@ -model: # mandatory. used to specify model specific information. - name: vit - framework: pytorch_fx # mandatory. possible values are tensorflow, mxnet, pytorch, pytorch_ipex, onnxrt_integerops and onnxrt_qlinearops. - -quantization: # optional. tuning constraints on model-wise for advance user to reduce tuning space. - approach: post_training_static_quant - calibration: - sampling_size: [1000] - op_wise: { # optional. tuning constraints on op-wise for advance user to reduce tuning space. - 'vit.embeddings.patch_embeddings.projection.module': { # optional. set default qconfig to fp32 for FX model - 'activation': {'dtype': ['fp32']}, - 'weight': {'dtype': ['fp32']} - }, - 'vit.embeddings.dropout': { # optional. set default qconfig to fp32 for FX model - 'activation': {'dtype': ['fp32']}, - 'weight': {'dtype': ['fp32']} - }, - } - -tuning: - accuracy_criterion: - relative: 0.01 # optional. default value is relative, other value is absolute. this example allows relative accuracy loss: 1%. - exit_policy: - timeout: 0 # optional. tuning timeout (seconds). default value is 0 which means early stop. combine with max_trials field to decide when to exit. - max_trials: 500 - random_seed: 1978 # optional. random seed for deterministic tuning. - workspace: - path: nc_workspace/vit/ diff --git a/examples/huggingface/pytorch/image-classification/quantization/README.md b/examples/huggingface/pytorch/image-classification/quantization/README.md index 09d2eaa2e83..d9a9f8c1055 100644 --- a/examples/huggingface/pytorch/image-classification/quantization/README.md +++ b/examples/huggingface/pytorch/image-classification/quantization/README.md @@ -1,11 +1,11 @@ # Image classification -The script `run_image_classification.py` provides three quantization approaches (PostTrainingStatic, PostTrainingStatic and QuantizationAwareTraining) based on [Intel® Neural Compressor](https://github.com/intel/neural-compressor). +The script `run_image_classification.py` provides three quantization approaches (dynamic, static and qat) based on [Intel® Neural Compressor](https://github.com/intel/neural-compressor). Here is how to run the script: >**Note**: Please use transformers no higher than 4.34.1 -1. quantization with PostTrainingStatic +1. static quantization ``` sh run_tuning.sh @@ -26,6 +26,6 @@ run run_benchmark.sh ### Validated model list -|Dataset|Pretrained model|PostTrainingDynamic | PostTrainingStatic | QuantizationAwareTraining +|Dataset|Pretrained model|dynamic | static | qat |---|------------------------------------|---|---|--- |imagenet-1k|google/vit-base-patch16-224| ✅| ✅| N/A| diff --git a/examples/huggingface/pytorch/image-classification/quantization/conf.yaml b/examples/huggingface/pytorch/image-classification/quantization/conf.yaml deleted file mode 100644 index a9c4b4879b6..00000000000 --- a/examples/huggingface/pytorch/image-classification/quantization/conf.yaml +++ /dev/null @@ -1,18 +0,0 @@ -model: # mandatory. used to specify model specific information. - name: vit - framework: pytorch_fx # mandatory. possible values are tensorflow, mxnet, pytorch, pytorch_ipex, onnxrt_integerops and onnxrt_qlinearops. - -quantization: # optional. tuning constraints on model-wise for advance user to reduce tuning space. - approach: post_training_static_quant - calibration: - sampling_size: [1000] - -tuning: - accuracy_criterion: - relative: 0.01 # optional. default value is relative, other value is absolute. this example allows relative accuracy loss: 1%. - exit_policy: - timeout: 0 # optional. tuning timeout (seconds). default value is 0 which means early stop. combine with max_trials field to decide when to exit. - max_trials: 300 - random_seed: 1978 # optional. random seed for deterministic tuning. - workspace: - path: nc_workspace/vit/ \ No newline at end of file diff --git a/examples/huggingface/pytorch/image-classification/quantization/run_image_classification.py b/examples/huggingface/pytorch/image-classification/quantization/run_image_classification.py index 92d11d9c380..47cd548107c 100644 --- a/examples/huggingface/pytorch/image-classification/quantization/run_image_classification.py +++ b/examples/huggingface/pytorch/image-classification/quantization/run_image_classification.py @@ -49,7 +49,13 @@ from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version -from intel_extension_for_transformers.transformers import metrics, objectives, OptimizedModel, QuantizationConfig +from intel_extension_for_transformers.transformers import OptimizedModel, metrics +from neural_compressor.config import ( + PostTrainingQuantConfig, + QuantizationAwareTrainingConfig, + TuningCriterion, + AccuracyCriterion +) from intel_extension_for_transformers.transformers.trainer import NLPTrainer os.environ["WANDB_DISABLED"] = "true" @@ -171,9 +177,9 @@ class OptimizationArguments: metadata={"help": "Whether or not to apply quantization."}, ) quantization_approach: Optional[str] = field( - default="PostTrainingStatic", - metadata={"help": "Quantization approach. Supported approach are PostTrainingStatic, " - "PostTrainingDynamic and QuantizationAwareTraining."}, + default="static", + metadata={"help": "Quantization approach. Supported approach are static, " + "dynamic and qat."}, ) metric_name: Optional[str] = field( default="eval_accuracy", @@ -208,9 +214,6 @@ class OptimizationArguments: num_of_instance: int = field( default=-1, metadata={"help":"the number of instance for benchmark."}) - inc_config_file: Optional[str] = field( - default="vit_config.yaml", metadata={"help": "quantization configuration file"} - ) def collate_fn(examples): @@ -446,29 +449,54 @@ def val_transforms(example_batch): model.config.save_pretrained(training_args.output_dir) trainer.save_model(training_args.output_dir) - if optim_args.quantization_approach != "PostTrainingDynamic": + if optim_args.quantization_approach != "dynamic": if not training_args.do_train: raise ValueError( "do_train must be set to True for static and aware training quantization." ) model.config.save_pretrained(training_args.output_dir) - if optim_args.quantization_approach == "QuantizationAwareTraining": - early_stopping_patience = 6 - early_stopping_threshold = 0.001 # optional - trainer.add_callback(transformers.EarlyStoppingCallback(early_stopping_patience, - early_stopping_threshold)) - tune_metric = metrics.Metric( name=metric_name, is_relative=optim_args.is_relative, criterion=optim_args.perf_tol ) - objective = objectives.performance - quantization_config = QuantizationConfig( - approach=optim_args.quantization_approach, - max_trials=600, - metrics=[tune_metric], - objectives=[objective], - config_file=optim_args.inc_config_file - ) + trainer.metrics = tune_metric + if optim_args.quantization_approach != "qat": + op_name_dict = { + 'vit.embeddings.patch_embeddings.projection.module': { + 'activation': {'dtype': ['fp32']}, + 'weight': {'dtype': ['fp32']} + }, + 'vit.embeddings.dropout': { + 'activation': {'dtype': ['fp32']}, + 'weight': {'dtype': ['fp32']} + }, + } + tuning_criterion = TuningCriterion(max_trials=600) + accuracy_criterion = AccuracyCriterion( + higher_is_better=True, # optional. + criterion="relative" if optim_args.is_relative else "absolute", # optional. Available values are "relative" and "absolute". + tolerable_loss=optim_args.perf_tol, # optional. + ) + quantization_config = PostTrainingQuantConfig( + approach=optim_args.quantization_approach, + tuning_criterion=tuning_criterion, + accuracy_criterion=accuracy_criterion, + op_name_dict=op_name_dict, + ) + else: + tuning_criterion = TuningCriterion(max_trials=600) + accuracy_criterion = AccuracyCriterion( + higher_is_better=True, # optional. + criterion="relative" if optim_args.is_relative else "absolute", # optional. Available values are "relative" and "absolute". + tolerable_loss=optim_args.perf_tol, # optional. + ) + quantization_config = QuantizationAwareTrainingConfig( + tuning_criterion=tuning_criterion, + accuracy_criterion=accuracy_criterion + ) + early_stopping_patience = 2 + early_stopping_threshold = 0.001 # optional + trainer.add_callback(transformers.EarlyStoppingCallback(early_stopping_patience, \ + early_stopping_threshold)) model = trainer.quantize(quant_config=quantization_config) diff --git a/examples/huggingface/pytorch/image-classification/quantization/run_tuning.sh b/examples/huggingface/pytorch/image-classification/quantization/run_tuning.sh index 2733f3e555c..e596d93e824 100644 --- a/examples/huggingface/pytorch/image-classification/quantization/run_tuning.sh +++ b/examples/huggingface/pytorch/image-classification/quantization/run_tuning.sh @@ -16,7 +16,7 @@ function init_params { model_name_or_path="google/vit-base-patch16-224" extra_cmd="" batch_size=8 - approach="PostTrainingStatic" + approach="static" for var in "$@" do case $var in @@ -45,8 +45,7 @@ function init_params { function run_tuning { if [ "${topology}" = "vit-base-patch16-224_static" ]; then model_name_or_path="/tf_dataset2/models/nlp_toolkit/vit-base" - approach="PostTrainingStatic" - inc_config_file="vit_config.yaml" + approach="static" fi python -u ./run_image_classification.py \ @@ -62,7 +61,6 @@ function run_tuning { --tune \ --overwrite_output_dir \ --quantization_approach ${approach} \ - --inc_config_file ${inc_config_file} \ ${extra_cmd} } diff --git a/examples/huggingface/pytorch/image-classification/quantization/vit_config.yaml b/examples/huggingface/pytorch/image-classification/quantization/vit_config.yaml deleted file mode 100644 index 5b9b73e5472..00000000000 --- a/examples/huggingface/pytorch/image-classification/quantization/vit_config.yaml +++ /dev/null @@ -1,30 +0,0 @@ -version: 2.0 - -model: # mandatory. used to specify model specific information. - name: vit - framework: pytorch_fx # mandatory. possible values are tensorflow, mxnet, pytorch, pytorch_ipex, onnxrt_integerops and onnxrt_qlinearops. - -quantization: # optional. tuning constraints on model-wise for advance user to reduce tuning space. - approach: post_training_static_quant - calibration: - sampling_size: [1000] - op_wise: { # optional. tuning constraints on op-wise for advance user to reduce tuning space. - 'vit.embeddings.patch_embeddings.projection.module': { # optional. set default qconfig to fp32 for FX model - 'activation': {'dtype': ['fp32']}, - 'weight': {'dtype': ['fp32']} - }, - 'vit.embeddings.dropout': { # optional. set default qconfig to fp32 for FX model - 'activation': {'dtype': ['fp32']}, - 'weight': {'dtype': ['fp32']} - }, - } - -tuning: - accuracy_criterion: - relative: 0.01 # optional. default value is relative, other value is absolute. this example allows relative accuracy loss: 1%. - exit_policy: - timeout: 0 # optional. tuning timeout (seconds). default value is 0 which means early stop. combine with max_trials field to decide when to exit. - max_trials: 300 - random_seed: 1978 # optional. random seed for deterministic tuning. - workspace: - path: nc_workspace/vit/ \ No newline at end of file diff --git a/examples/huggingface/pytorch/language-modeling/quantization/README.md b/examples/huggingface/pytorch/language-modeling/quantization/README.md index a053aef7617..e4c71ee8ba3 100644 --- a/examples/huggingface/pytorch/language-modeling/quantization/README.md +++ b/examples/huggingface/pytorch/language-modeling/quantization/README.md @@ -2,7 +2,7 @@ Step-by-Step ============ This document describes the step-by-step instructions to run large language models (LLMs) on 4th Gen Intel® Xeon® Scalable Processor (codenamed Sapphire Rapids) with PyTorch and Intel® Extension for PyTorch. -The scripts `run_clm.py`, `run_mlm.py` and `run_plm.py` provide three quantization approaches respectively (PostTrainingDynamic, PostTrainingStatic, QuantAwareTraining) based on [Intel® Neural Compressor](https://github.com/intel/neural-compressor) and return last token prediction accuracy by `trainer`. +The scripts `run_clm.py`, `run_mlm.py` and `run_plm.py` provide three quantization approaches respectively (dynamic, static, qat) based on [Intel® Neural Compressor](https://github.com/intel/neural-compressor) and return last token prediction accuracy by `trainer`. The large language model quantization is moved to [text-generation](../../text-generation/quantization/) now. @@ -16,9 +16,7 @@ pip install -r requirements.txt pip install -v . cd examples/huggingface/pytorch/language-modeling/quantization pip install -r requirements.txt -pip install transformers==4.34.1 ``` ->**Note**: Please use transformers no higher than 4.34.1 # Run @@ -32,7 +30,7 @@ python run_clm.py \ --dataset_name wikitext \ --dataset_config_name wikitext-2-raw-v1 \ --tune \ - --quantization_approach PostTrainingStatic \ + --quantization_approach static \ --do_train \ --do_eval \ --output_dir ./tmp/clm_output \ @@ -47,7 +45,7 @@ python run_mlm.py \ --dataset_name wikitext \ --dataset_config_name wikitext-2-raw-v1 \ --tune \ - --quantization_approach PostTrainingStatic \ + --quantization_approach static \ --do_train \ --do_eval \ --output_dir ./tmp/mlm_output \ @@ -62,12 +60,9 @@ python run_mlm.py \ --dataset_name wikitext \ --dataset_config_name wikitext-2-raw-v1 \ --tune \ - --quantization_approach PostTrainingStatic \ + --quantization_approach static \ --do_train \ --do_eval \ --output_dir ./tmp/plm_output \ --overwrite_output_dir ``` - -[1]. Elias, Frantar, et al. "GPTQ: Accurate Post-training Compression for Generative Pretrained Transformers." arXiv preprint arXiv:2210.17323 (2023). -[2]. Lin, Ji, et al. "AWQ: Activation-aware Weight Quantization for LLM Compression and Acceleration." arXiv preprint arXiv:2306.00978 (2023). diff --git a/examples/huggingface/pytorch/language-modeling/quantization/run_clm.py b/examples/huggingface/pytorch/language-modeling/quantization/run_clm.py index d58abdeaa87..9a9637785ec 100644 --- a/examples/huggingface/pytorch/language-modeling/quantization/run_clm.py +++ b/examples/huggingface/pytorch/language-modeling/quantization/run_clm.py @@ -28,7 +28,13 @@ from dataclasses import dataclass, field from datasets import load_dataset, load_metric from itertools import chain -from intel_extension_for_transformers.transformers import metrics, OptimizedModel, QuantizationConfig +from intel_extension_for_transformers.transformers import OptimizedModel, metrics +from neural_compressor.config import ( + PostTrainingQuantConfig, + QuantizationAwareTrainingConfig, + TuningCriterion, + AccuracyCriterion +) from intel_extension_for_transformers.transformers.trainer import NLPTrainer from transformers import ( CONFIG_MAPPING, @@ -199,9 +205,9 @@ class OptimizationArguments: metadata={"help": "Whether or not to apply quantization."}, ) quantization_approach: Optional[str] = field( - default="PostTrainingStatic", - metadata={"help": "Quantization approach. Supported approach are PostTrainingStatic, " - "PostTrainingDynamic and QuantizationAwareTraining."}, + default="static", + metadata={"help": "Quantization approach. Supported approach are static, " + "dynamic and qat."}, ) metric_name: Optional[str] = field( default="eval_loss", @@ -565,33 +571,39 @@ def compute_metrics(eval_preds): raise ValueError("do_eval must be set to True for quantization.") trainer.save_model(training_args.output_dir) - if optim_args.quantization_approach != "PostTrainingDynamic": + if optim_args.quantization_approach != "dynamic": if not training_args.do_train: raise ValueError( "do_train must be set to True for static and aware training quantization." ) - if optim_args.quantization_approach == "QuantizationAwareTraining": - early_stopping_patience = 6 - early_stopping_threshold = 0.001 # optional - trainer.add_callback(transformers.EarlyStoppingCallback(early_stopping_patience, - early_stopping_threshold)) - tune_metric = metrics.Metric( name=metric_name, is_relative=optim_args.is_relative, criterion=optim_args.perf_tol, greater_is_better=False ) - quantization_config = QuantizationConfig(approach=optim_args.quantization_approach, - metrics=[tune_metric], - sampling_size=optim_args.sampling_size - if optim_args.sampling_size is not None else len(train_dataset) // 100 * 5 , - recipes={ - "smooth_quant": True, - "smooth_quant_args": { - "alpha": optim_args.smooth_quant_alpha - } - } if optim_args.smooth_quant else None) + trainer.metrics = tune_metric + tuning_criterion = TuningCriterion(max_trials=600) + accuracy_criterion = AccuracyCriterion( + higher_is_better=False, # optional. + criterion="relative" if optim_args.is_relative else "absolute", # optional. Available values are "relative" and "absolute". + tolerable_loss=optim_args.perf_tol, # optional. + ) + if optim_args.quantization_approach != "qat": + quantization_config = PostTrainingQuantConfig( + approach=optim_args.quantization_approach, + tuning_criterion=tuning_criterion, + accuracy_criterion=accuracy_criterion + ) + else: + quantization_config = QuantizationAwareTrainingConfig( + tuning_criterion=tuning_criterion, + accuracy_criterion=accuracy_criterion + ) + early_stopping_patience = 2 + early_stopping_threshold = 0.001 # optional + trainer.add_callback(transformers.EarlyStoppingCallback(early_stopping_patience, \ + early_stopping_threshold)) model = trainer.quantize(quant_config=quantization_config) if optim_args.benchmark_only: diff --git a/examples/huggingface/pytorch/language-modeling/quantization/run_mlm.py b/examples/huggingface/pytorch/language-modeling/quantization/run_mlm.py index ec01cacbeb4..ea78bdd0e08 100644 --- a/examples/huggingface/pytorch/language-modeling/quantization/run_mlm.py +++ b/examples/huggingface/pytorch/language-modeling/quantization/run_mlm.py @@ -28,7 +28,13 @@ from dataclasses import dataclass, field from datasets import load_dataset, load_metric from itertools import chain -from intel_extension_for_transformers.transformers import metrics, OptimizedModel, QuantizationConfig +from intel_extension_for_transformers.transformers import OptimizedModel, metrics +from neural_compressor.config import ( + PostTrainingQuantConfig, + QuantizationAwareTrainingConfig, + TuningCriterion, + AccuracyCriterion +) from intel_extension_for_transformers.transformers.trainer import NLPTrainer from transformers import ( CONFIG_MAPPING, @@ -207,9 +213,9 @@ class OptimizationArguments: metadata={"help": "Whether or not to apply quantization."}, ) quantization_approach: Optional[str] = field( - default="PostTrainingStatic", - metadata={"help": "Quantization approach. Supported approach are PostTrainingStatic, " - "PostTrainingDynamic and QuantizationAwareTraining."}, + default="static", + metadata={"help": "Quantization approach. Supported approach are static, " + "dynamic and qat."}, ) metric_name: Optional[str] = field( default="eval_loss", @@ -561,6 +567,7 @@ def compute_metrics(eval_preds): mlm_probability=data_args.mlm_probability, pad_to_multiple_of=8 if pad_to_multiple_of_8 else None, ) + metric_name = optim_args.metric_name training_args.metric_for_best_model = metric_name @@ -584,28 +591,39 @@ def compute_metrics(eval_preds): raise ValueError("do_eval must be set to True for quantization.") trainer.save_model(training_args.output_dir) - if optim_args.quantization_approach != "PostTrainingDynamic": + if optim_args.quantization_approach != "dynamic": if not training_args.do_train: raise ValueError( "do_train must be set to True for static and aware training quantization." ) - if optim_args.quantization_approach == "QuantizationAwareTraining": - early_stopping_patience = 6 - early_stopping_threshold = 0.001 # optional - trainer.add_callback(transformers.EarlyStoppingCallback(early_stopping_patience, - early_stopping_threshold)) - tune_metric = metrics.Metric( name=metric_name, is_relative=optim_args.is_relative, criterion=optim_args.perf_tol, greater_is_better=False ) - quantization_config = QuantizationConfig( - approach=optim_args.quantization_approach, - metrics=[tune_metric], - sampling_size = len(train_dataset)//20 + trainer.metrics = tune_metric + tuning_criterion = TuningCriterion(max_trials=600) + accuracy_criterion = AccuracyCriterion( + higher_is_better=False, # optional. + criterion="relative" if optim_args.is_relative else "absolute", # optional. Available values are "relative" and "absolute". + tolerable_loss=optim_args.perf_tol, # optional. ) + if optim_args.quantization_approach != "qat": + quantization_config = PostTrainingQuantConfig( + approach=optim_args.quantization_approach, + tuning_criterion=tuning_criterion, + accuracy_criterion=accuracy_criterion + ) + else: + quantization_config = QuantizationAwareTrainingConfig( + tuning_criterion=tuning_criterion, + accuracy_criterion=accuracy_criterion + ) + early_stopping_patience = 2 + early_stopping_threshold = 0.001 # optional + trainer.add_callback(transformers.EarlyStoppingCallback(early_stopping_patience, \ + early_stopping_threshold)) model = trainer.quantize(quant_config=quantization_config) if optim_args.benchmark_only: diff --git a/examples/huggingface/pytorch/language-modeling/quantization/run_plm.py b/examples/huggingface/pytorch/language-modeling/quantization/run_plm.py index 0c15cedb9c8..4550de43e69 100644 --- a/examples/huggingface/pytorch/language-modeling/quantization/run_plm.py +++ b/examples/huggingface/pytorch/language-modeling/quantization/run_plm.py @@ -27,7 +27,13 @@ from datasets import load_dataset from itertools import chain -from intel_extension_for_transformers.transformers import metrics, OptimizedModel, QuantizationConfig +from intel_extension_for_transformers.transformers import OptimizedModel, metrics +from neural_compressor.config import ( + PostTrainingQuantConfig, + QuantizationAwareTrainingConfig, + TuningCriterion, + AccuracyCriterion +) from intel_extension_for_transformers.transformers.trainer import NLPTrainer from transformers import ( AutoConfig, @@ -202,15 +208,15 @@ class OptimizationArguments: default=False, metadata={"help": "Whether or not to apply quantization."}, ) - quantization_approach: Optional[str] = field( - default="PostTrainingStatic", - metadata={"help": "Quantization approach. Supported approach are PostTrainingStatic, " - "PostTrainingDynamic and QuantizationAwareTraining."}, - ) metric_name: Optional[str] = field( default="eval_loss", metadata={"help": "Metric used for the tuning strategy."}, ) + quantization_approach: Optional[str] = field( + default="static", + metadata={"help": "Quantization approach. Supported approach are static, " + "dynamic and qat."}, + ) is_relative: Optional[bool] = field( default=False, metadata={"help": "Metric tolerance mode, True for relative, otherwise for absolute."}, @@ -511,6 +517,7 @@ def group_texts(examples): plm_probability=data_args.plm_probability, max_span_length=data_args.max_span_length, ) + metric_name = optim_args.metric_name training_args.metric_for_best_model = metric_name @@ -530,28 +537,39 @@ def group_texts(examples): raise ValueError("do_eval must be set to True for quantization.") trainer.save_model(training_args.output_dir) - if optim_args.quantization_approach != "PostTrainingDynamic": + if optim_args.quantization_approach != "dynamic": if not training_args.do_train: raise ValueError( "do_train must be set to True for static and aware training quantization." ) - if optim_args.quantization_approach == "QuantizationAwareTraining": - early_stopping_patience = 6 - early_stopping_threshold = 0.001 # optional - trainer.add_callback(transformers.EarlyStoppingCallback(early_stopping_patience, - early_stopping_threshold)) - tune_metric = metrics.Metric( name=metric_name, is_relative=optim_args.is_relative, criterion=optim_args.perf_tol, greater_is_better=False ) - quantization_config = QuantizationConfig( - approach=optim_args.quantization_approach, - metrics=[tune_metric], - sampling_size = len(train_dataset)//20 + trainer.metrics = tune_metric + tuning_criterion = TuningCriterion(max_trials=600) + accuracy_criterion = AccuracyCriterion( + higher_is_better=False, # optional. + criterion="relative" if optim_args.is_relative else "absolute", # optional. Available values are "relative" and "absolute". + tolerable_loss=optim_args.perf_tol, # optional. ) + if optim_args.quantization_approach != "qat": + quantization_config = PostTrainingQuantConfig( + approach=optim_args.quantization_approach, + tuning_criterion=tuning_criterion, + accuracy_criterion=accuracy_criterion + ) + else: + quantization_config = QuantizationAwareTrainingConfig( + tuning_criterion=tuning_criterion, + accuracy_criterion=accuracy_criterion + ) + early_stopping_patience = 2 + early_stopping_threshold = 0.001 # optional + trainer.add_callback(transformers.EarlyStoppingCallback(early_stopping_patience, \ + early_stopping_threshold)) model = trainer.quantize(quant_config=quantization_config) if optim_args.benchmark_only: diff --git a/examples/huggingface/pytorch/language-modeling/quantization/run_tuning.sh b/examples/huggingface/pytorch/language-modeling/quantization/run_tuning.sh index eae534dac65..18a709a59e7 100644 --- a/examples/huggingface/pytorch/language-modeling/quantization/run_tuning.sh +++ b/examples/huggingface/pytorch/language-modeling/quantization/run_tuning.sh @@ -17,7 +17,7 @@ function init_params { extra_cmd="" batch_size=8 model_type="bert" - approach="PostTrainingStatic" + approach="static" alpha=0.5 for var in "$@" do @@ -60,7 +60,7 @@ function run_tuning { DATASET_CONFIG_NAME="wikitext-2-raw-v1" model_name_or_path="EleutherAI/gpt-neo-125m" task="clm" - approach="PostTrainingStatic" + approach="static" backend="" elif [ "${topology}" = "gpt_neo" ]; then if [ "${task}" = "clm" ]; then @@ -70,11 +70,11 @@ function run_tuning { DATASET_CONFIG_NAME="wikitext-2-raw-v1" model_name_or_path="EleutherAI/gpt-neo-125M" if [ "${approach}" = "dynamic" ]; then - approach="PostTrainingDynamic" + approach="dynamic" elif [ "${approach}" = "static" ]; then - approach="PostTrainingStatic" + approach="static" elif [ "${approach}" = "qat" ]; then - approach="QuantizationAwareTraining" + approach="qat" extra_cmd=$extra_cmd" --learning_rate 1e-5 \ --num_train_epochs 6 \ --eval_steps 100 \ @@ -83,7 +83,8 @@ function run_tuning { --load_best_model_at_end True \ --evaluation_strategy steps \ --save_strategy steps \ - --save_total_limit 1" + --save_total_limit 1 \ + --save_safetensors False" fi elif [ "${topology}" = "gpt_j" ]; then if [ "${task}" = "clm" ]; then @@ -93,9 +94,9 @@ function run_tuning { DATASET_CONFIG_NAME="wikitext-2-raw-v1" model_name_or_path="/tf_dataset2/models/pytorch/gpt-j-6B" if [ "${approach}" = "dynamic" ]; then - approach="PostTrainingDynamic" + approach="dynamic" elif [ "${approach}" = "static" ]; then - approach="PostTrainingStatic" + approach="static" fi elif [ "${topology}" = "bert" ]; then if [ "${task}" = "mlm" ]; then @@ -105,11 +106,11 @@ function run_tuning { DATASET_CONFIG_NAME="wikitext-2-raw-v1" model_name_or_path="bert-base-uncased" if [ "${approach}" = "dynamic" ]; then - approach="PostTrainingDynamic" + approach="dynamic" elif [ "${approach}" = "static" ]; then - approach="PostTrainingStatic" + approach="static" elif [ "${approach}" = "qat" ]; then - approach="QuantizationAwareTraining" + approach="qat" extra_cmd=$extra_cmd" --learning_rate 1e-5 \ --num_train_epochs 6 \ --eval_steps 100 \ @@ -119,7 +120,8 @@ function run_tuning { --evaluation_strategy steps \ --save_strategy steps \ --metric_for_best_model accuracy \ - --save_total_limit 1" + --save_total_limit 1 \ + --save_safetensors False" fi elif [ "${topology}" = "xlnet" ]; then if [ "${task}" = "plm" ]; then @@ -129,11 +131,11 @@ function run_tuning { DATASET_CONFIG_NAME="wikitext-2-raw-v1" model_name_or_path="xlnet-base-cased" if [ "${approach}" = "dynamic" ]; then - approach="PostTrainingDynamic" + approach="dynamic" elif [ "${approach}" = "static" ]; then - approach="PostTrainingStatic" + approach="static" elif [ "${approach}" = "qat" ]; then - approach="QuantizationAwareTraining" + approach="qat" extra_cmd=$extra_cmd" --learning_rate 1e-5 \ --num_train_epochs 6 \ --eval_steps 100 \ @@ -143,7 +145,8 @@ function run_tuning { --evaluation_strategy steps \ --save_strategy steps \ --metric_for_best_model accuracy \ - --save_total_limit 1" + --save_total_limit 1 \ + --save_safetensors False" fi elif [ "${topology}" = "gpt_neox" ]; then if [ "${task}" = "clm" ]; then @@ -153,9 +156,9 @@ function run_tuning { DATASET_CONFIG_NAME="unshuffled_original_ast" model_name_or_path="abeja/gpt-neox-japanese-2.7b" if [ "${approach}" = "dynamic" ]; then - approach="PostTrainingDynamic" + approach="dynamic" elif [ "${approach}" = "static" ]; then - approach="PostTrainingStatic" + approach="static" fi elif [ "${topology}" = "bloom" ]; then if [ "${task}" = "clm" ]; then @@ -164,7 +167,7 @@ function run_tuning { DATASET_NAME="lambada" model_name_or_path="bigscience/bloom-560m" if [ "${approach}" = "static" ]; then - approach="PostTrainingStatic" + approach="static" fi extra_cmd=$extra_cmd" --smooth_quant --sampling_size 400 --torchscript" fi diff --git a/examples/huggingface/pytorch/multiple-choice/quantization/README.md b/examples/huggingface/pytorch/multiple-choice/quantization/README.md index d966319be83..96caf84bc49 100644 --- a/examples/huggingface/pytorch/multiple-choice/quantization/README.md +++ b/examples/huggingface/pytorch/multiple-choice/quantization/README.md @@ -9,19 +9,17 @@ This example shows the model quantization for multiple choice task. A multiple c ``` pip install intel-extension-for-transformers pip install -r requirements.txt -pip install transformers==4.34.1 ``` ->**Note**: Please use transformers no higher than 4.34.1 # Run -The script `run_swag.py` provides three quantization approaches (PostTrainingStatic, PostTrainingStatic and QuantizationAwareTraining) based on [Intel® Neural Compressor](https://github.com/intel/neural-compressor). +The script `run_swag.py` provides three quantization approaches (dynamic, static and qat) based on [Intel® Neural Compressor](https://github.com/intel/neural-compressor). ``` python run_swag.py \ --model_name_or_path ehdwns1516/bert-base-uncased_SWAG \ --tune \ - --quantization_approach PostTrainingStatic \ + --quantization_approach static \ --do_train \ --do_eval \ --pad_to_max_length \ @@ -31,6 +29,6 @@ python run_swag.py \ # Validated model list -|DATASET|Pretrained model|PostTrainingDynamic | PostTrainingStatic | QuantizationAwareTraining +|DATASET|Pretrained model|dynamic | static | qat |---|------------------------------------|---|---|--- |SWAG|ehdwns1516/bert-base-uncased_SWAG| ✅| ✅| ✅ diff --git a/examples/huggingface/pytorch/multiple-choice/quantization/run_swag.py b/examples/huggingface/pytorch/multiple-choice/quantization/run_swag.py index 511010f4da6..0304dd71c29 100644 --- a/examples/huggingface/pytorch/multiple-choice/quantization/run_swag.py +++ b/examples/huggingface/pytorch/multiple-choice/quantization/run_swag.py @@ -28,7 +28,13 @@ from dataclasses import dataclass, field from datasets import load_dataset from itertools import chain -from intel_extension_for_transformers.transformers import metrics, OptimizedModel, QuantizationConfig +from intel_extension_for_transformers.transformers import OptimizedModel, metrics +from neural_compressor.config import ( + PostTrainingQuantConfig, + QuantizationAwareTrainingConfig, + TuningCriterion, + AccuracyCriterion +) from intel_extension_for_transformers.transformers.trainer import NLPTrainer from transformers import ( AutoConfig, @@ -215,9 +221,9 @@ class OptimizationArguments: metadata={"help": "Whether or not to apply quantization."}, ) quantization_approach: Optional[str] = field( - default="POSTTRAININGSTATIC", - metadata={"help": "Quantization approach. Supported approach are POSTTRAININGSTATIC, " - "POSTTRAININGDYNAMIC and QUANTIZATIONAWARETRAINING."}, + default="static", + metadata={"help": "Quantization approach. Supported approach are static, " + "dynamic and qat."}, ) metric_name: Optional[str] = field( default=None, @@ -473,26 +479,44 @@ def compute_metrics(eval_predictions): model.config.save_pretrained(training_args.output_dir) trainer.save_model(training_args.output_dir) - if optim_args.quantization_approach != "POSTTRAININGDYNAMIC": + if optim_args.quantization_approach != "dynamic": if not training_args.do_train: raise ValueError( "do_train must be set to True for static and aware training quantization." ) model.config.save_pretrained(training_args.output_dir) - if optim_args.quantization_approach == "QUANTIZATIONAWARETRAINING": - early_stopping_patience = 6 - early_stopping_threshold = 0.001 # optional - trainer.add_callback(transformers.EarlyStoppingCallback(early_stopping_patience, - early_stopping_threshold)) tune_metric = metrics.Metric( name=metric_name, is_relative=optim_args.is_relative, criterion=optim_args.perf_tol ) - quantization_config = QuantizationConfig( - approach=optim_args.quantization_approach, - metrics=[tune_metric], - sampling_size = len(train_dataset)//20 - ) + trainer.metrics = tune_metric + if optim_args.quantization_approach != "qat": + tuning_criterion = TuningCriterion(max_trials=600) + accuracy_criterion = AccuracyCriterion( + higher_is_better=True, # optional. + criterion="relative" if optim_args.is_relative else "absolute", # optional. Available values are "relative" and "absolute". + tolerable_loss=optim_args.perf_tol, # optional. + ) + quantization_config = PostTrainingQuantConfig( + approach=optim_args.quantization_approach, + tuning_criterion=tuning_criterion, + accuracy_criterion=accuracy_criterion + ) + else: + tuning_criterion = TuningCriterion(max_trials=600) + accuracy_criterion = AccuracyCriterion( + higher_is_better=True, # optional. + criterion="relative" if optim_args.is_relative else "absolute", # optional. Available values are "relative" and "absolute". + tolerable_loss=optim_args.perf_tol, # optional. + ) + quantization_config = QuantizationAwareTrainingConfig( + tuning_criterion=tuning_criterion, + accuracy_criterion=accuracy_criterion + ) + early_stopping_patience = 2 + early_stopping_threshold = 0.001 # optional + trainer.add_callback(transformers.EarlyStoppingCallback(early_stopping_patience, \ + early_stopping_threshold)) model = trainer.quantize(quant_config=quantization_config) if optim_args.benchmark_only: diff --git a/examples/huggingface/pytorch/multiple-choice/quantization/run_tuning.sh b/examples/huggingface/pytorch/multiple-choice/quantization/run_tuning.sh index 3a718e34f0e..b8123802f38 100644 --- a/examples/huggingface/pytorch/multiple-choice/quantization/run_tuning.sh +++ b/examples/huggingface/pytorch/multiple-choice/quantization/run_tuning.sh @@ -11,7 +11,7 @@ function main { # init params function init_params { tuned_checkpoint="saved_results" - approach="PostTrainingStatic" + approach="static" batch_size=8 for var in "$@" do @@ -41,13 +41,13 @@ function init_params { function run_tuning { if [ "${topology}" = "bert_base_swag_static" ]; then model_name_or_path="ehdwns1516/bert-base-uncased_SWAG" - approach="PostTrainingStatic" + approach="static" elif [ "${topology}" = "bert_base_swag_dynamic" ]; then model_name_or_path="ehdwns1516/bert-base-uncased_SWAG" - approach="PostTrainingDynamic" + approach="dynamic" elif [ "${topology}" = "bert_base_swag_qat" ]; then model_name_or_path="ehdwns1516/bert-base-uncased_SWAG" - approach="QuantizationAwareTraining" + approach="qat" extra_cmd=$extra_cmd" --learning_rate 1e-5 \ --num_train_epochs 6 \ --eval_steps 100 \ @@ -56,7 +56,8 @@ function run_tuning { --load_best_model_at_end True \ --evaluation_strategy steps \ --save_strategy steps \ - --save_total_limit 1" + --save_total_limit 1 \ + --save_safetensors False" fi python -u ./run_swag.py \ @@ -72,7 +73,8 @@ function run_tuning { --tune \ --pad_to_max_length \ --overwrite_cache \ - --overwrite_output_dir + --overwrite_output_dir \ + ${extra_cmd} } main "$@" diff --git a/examples/huggingface/pytorch/optimization_README.md b/examples/huggingface/pytorch/optimization_README.md index 2bafa814908..3d1909b01a6 100644 --- a/examples/huggingface/pytorch/optimization_README.md +++ b/examples/huggingface/pytorch/optimization_README.md @@ -4,7 +4,7 @@ Welcome to Pytorch Huggingface examples. The examples is following from [Hugging ## Quantization approach -| Task | PostTrainingDynamic | PostTrainingStatic | QuantizationAwareTraining +| Task | dynamic | static | qat |---|:---:|:---:|:---:| |**`language-modeling`**| ✅ | ✅ | ✅ |**`multi-choice`**| ✅ | ✅ | ✅ diff --git a/examples/huggingface/pytorch/question-answering/dynamic/README.md b/examples/huggingface/pytorch/question-answering/dynamic/README.md index 8b79be87732..b1d1ab9e662 100644 --- a/examples/huggingface/pytorch/question-answering/dynamic/README.md +++ b/examples/huggingface/pytorch/question-answering/dynamic/README.md @@ -82,7 +82,7 @@ python run_qa.py \ python run_qa.py \ --model_name_or_path "sguskin/dynamic-minilmv2-L6-H384-squad1.1" \ --dataset_name squad \ ---quantization_approach PostTrainingStatic \ +--quantization_approach static \ --do_eval \ --do_train \ --tune \ diff --git a/examples/huggingface/pytorch/question-answering/dynamic/run_qa.py b/examples/huggingface/pytorch/question-answering/dynamic/run_qa.py index 348b3a56977..d6c32905700 100644 --- a/examples/huggingface/pytorch/question-answering/dynamic/run_qa.py +++ b/examples/huggingface/pytorch/question-answering/dynamic/run_qa.py @@ -29,7 +29,12 @@ import transformers from dataclasses import dataclass, field from datasets import load_dataset, load_metric -from intel_extension_for_transformers.transformers import metrics , OptimizedModel, QuantizationConfig, DynamicLengthConfig +from intel_extension_for_transformers.transformers import metrics, DynamicLengthConfig +from neural_compressor.config import ( + PostTrainingQuantConfig, + TuningCriterion, + AccuracyCriterion +) from trainer_qa import QuestionAnsweringTrainer from intel_extension_for_transformers.transformers.modeling.modeling_roberta_dynamic import RobertaForQuestionAnswering @@ -221,9 +226,9 @@ class OptimizationArguments: metadata={"help": "Whether or not to apply quantization."}, ) quantization_approach: Optional[str] = field( - default="PostTrainingStatic", - metadata={"help": "Quantization approach. Supported approach are PostTrainingStatic, " - "PostTrainingDynamic and QuantizationAwareTraining."}, + default="static", + metadata={"help": "Quantization approach. Supported approach are static, " + "dynamic and qat."}, ) metric_name: Optional[str] = field( default="eval_f1", @@ -771,26 +776,29 @@ def compute_metrics(p: EvalPrediction): trainer.save_model(training_args.output_dir) trainer.calib_dataloader = trainer.get_eval_dataloader() - if optim_args.quantization_approach != "PostTrainingDynamic": + if optim_args.quantization_approach != "dynamic": if not training_args.do_train: raise ValueError( "do_train must be set to True for static and aware training quantization." ) - if optim_args.quantization_approach == "QuantizationAwareTraining": - early_stopping_patience = 6 - early_stopping_threshold = 0.001 # optional - trainer.add_callback(transformers.EarlyStoppingCallback(early_stopping_patience, - early_stopping_threshold)) - tune_metric = metrics.Metric( name=metric_name, is_relative=optim_args.is_relative, criterion=optim_args.perf_tol ) - quantization_config = QuantizationConfig( - approach=optim_args.quantization_approach, - max_trials=200, - metrics=[tune_metric], - ) - quantization_config.framework = "pytorch_ipex" + trainer.metrics = tune_metric + if optim_args.quantization_approach != "qat": + tuning_criterion = TuningCriterion(max_trials=600, objective=["performance"]) + accuracy_criterion = AccuracyCriterion( + higher_is_better=True, # optional. + criterion="relative" if optim_args.is_relative else "absolute", # optional. Available values are "relative" and "absolute". + tolerable_loss=optim_args.perf_tol, # optional. + ) + quantization_config = PostTrainingQuantConfig( + backend = "ipex", + approach=optim_args.quantization_approach, + tuning_criterion=tuning_criterion, + accuracy_criterion=accuracy_criterion + ) + model = trainer.quantize(quant_config=quantization_config) if optim_args.benchmark or optim_args.accuracy_only: diff --git a/examples/huggingface/pytorch/question-answering/orchestrate_optimizations/run_qa.py b/examples/huggingface/pytorch/question-answering/orchestrate_optimizations/run_qa.py index c07e38affc1..b9e416d41b9 100644 --- a/examples/huggingface/pytorch/question-answering/orchestrate_optimizations/run_qa.py +++ b/examples/huggingface/pytorch/question-answering/orchestrate_optimizations/run_qa.py @@ -33,13 +33,15 @@ import transformers from intel_extension_for_transformers.transformers import ( metrics, - PrunerConfig, - PruningConfig, - DistillationConfig, - QuantizationConfig, OptimizedModel, objectives ) +from neural_compressor.config import ( + WeightPruningConfig, + DistillationConfig, + KnowledgeDistillationLossConfig, + QuantizationAwareTrainingConfig, +) from torch.utils.data import DataLoader from tqdm import tqdm from trainer_qa import QuestionAnsweringTrainer @@ -225,7 +227,7 @@ class OptimizationArguments: metadata={"help": "Whether or not to apply prune."}, ) pruning_approach: Optional[str] = field( - default="BasicMagnitude", + default="magnitude", metadata={"help": "Pruning approach. Supported approach is basic_magnite."}, ) target_sparsity_ratio: Optional[float] = field( @@ -245,9 +247,9 @@ class OptimizationArguments: metadata={"help": "Whether or not to apply quantization."}, ) quantization_approach: Optional[str] = field( - default="QuantizationAwareTraining", - metadata={"help": "Quantization approach. Supported approach are PostTrainingStatic, " - "PostTrainingDynamic and QuantizationAwareTraining."}, + default="qat", + metadata={"help": "Quantization approach. Supported approach are static, " + "dynamic and qat."}, ) metric_name: Optional[str] = field( default="eval_f1", @@ -789,7 +791,7 @@ def get_logits(teacher_model, train_dataset, teacher_train_dataset): # Trace model from neural_compressor.adaptor.torch_utils.symbolic_trace import symbolic_trace - model = symbolic_trace(model, optim_args.quantization_approach=="QuantizationAwareTraining") + model = symbolic_trace(model, optim_args.quantization_approach=="qat") # Initialize our Trainer trainer = QuestionAnsweringTrainer( @@ -814,23 +816,20 @@ def get_logits(teacher_model, train_dataset, teacher_train_dataset): tune_metric = metrics.Metric( name=metric_name, is_relative=optim_args.is_relative, criterion=optim_args.perf_tol ) - prune_type = 'PatternLock' \ + prune_type = 'pattern_lock' \ if optim_args.pruning_approach else optim_args.pruning_approach target_sparsity_ratio = optim_args.target_sparsity_ratio \ if optim_args.target_sparsity_ratio else None - pruner_config = PrunerConfig(prune_type=prune_type, target_sparsity_ratio=target_sparsity_ratio) - pruning_conf = PruningConfig(framework="pytorch_fx",pruner_config=[pruner_config], metrics=tune_metric) - distillation_conf = DistillationConfig(framework="pytorch_fx", metrics=tune_metric) - - objective = objectives.performance - quantization_conf = QuantizationConfig( - approach=optim_args.quantization_approach, - max_trials=600, - metrics=[tune_metric], - objectives=[objective] - ) + trainer.metrics = tune_metric + pruning_conf = WeightPruningConfig([{"start_step": 0, "end_step": 2}], + target_sparsity=target_sparsity_ratio, + pruning_scope="local", + pruning_type=prune_type) + distillation_criterion = KnowledgeDistillationLossConfig(loss_types=["CE", "KL"]) + distillation_conf = DistillationConfig(teacher_model=teacher_model, criterion=distillation_criterion) + quantization_conf = QuantizationAwareTrainingConfig() conf_list = [pruning_conf, distillation_conf, quantization_conf] - model = trainer.orchestrate_optimizations(config_list=conf_list, teacher_model=teacher_model) + model = trainer.orchestrate_optimizations(config_list=conf_list) if optim_args.benchmark or optim_args.accuracy_only: start_time = timeit.default_timer() @@ -839,7 +838,7 @@ def get_logits(teacher_model, train_dataset, teacher_train_dataset): max_eval_samples = data_args.max_eval_samples \ if data_args.max_eval_samples is not None else len(eval_dataset) eval_samples = min(max_eval_samples, len(eval_dataset)) - samples = eval_samples - (eval_samples % batch_size) \ + samples = eval_samples - (eval_samples % optim_args.batch_size) \ if training_args.dataloader_drop_last else eval_samples logger.info("metrics keys: {}".format(results.keys())) bert_task_acc_keys = ['eval_f1', 'eval_accuracy', 'eval_matthews_correlation', diff --git a/examples/huggingface/pytorch/question-answering/pruning/group_lasso/README.md b/examples/huggingface/pytorch/question-answering/pruning/group_lasso/README.md deleted file mode 100644 index f7054bf95fa..00000000000 --- a/examples/huggingface/pytorch/question-answering/pruning/group_lasso/README.md +++ /dev/null @@ -1,68 +0,0 @@ -Step-by-Step -============ - -This document is used to list steps of reproducing PyTorch BERT pruning result. - -# Prerequisite - -## 1. Environment - -Recommend python 3.7 or higher version. - -### Install [intel-extension-for-transformers]() -``` -pip install intel-extension-for-transformers -``` - -### Install PyTorch - -Install pytorch-gpu, visit [pytorch.org](https://pytorch.org/). -```bash -# Install pytorch -pip3 install torch==1.10.0+cu113 torchvision==0.11.1+cu113 torchaudio==0.10.0+cu113 -f https://download.pytorch.org/whl/cu113/torch_stable.html -``` - -### Install BERT dependency - -```bash -cd examples/pytorch/huggingface/question-answering/pruning/group_lasso -pip3 install -r requirements.txt --ignore-installed PyYAML -``` -```bash -git clone https://github.com/NVIDIA/apex -cd apex -pip install -v --disable-pip-version-check --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" ./ -``` -> **Note** -> -> If no CUDA runtime is found, please export CUDA_HOME='/usr/local/cuda'. - -## 2. Prepare Dataset - -* For SQuAD task, you should download SQuAD dataset from [SQuAD dataset link](https://rajpurkar.github.io/SQuAD-explorer/). -## 3. Prepare Model -* Please download BERT large pretrained model from [NGC](https://catalog.ngc.nvidia.com/orgs/nvidia/models/bert_pyt_ckpt_large_pretraining_amp_lamb/files?version=20.03.0). -```bash -# wget cmd -wget https://api.ngc.nvidia.com/v2/models/nvidia/bert_pyt_ckpt_large_pretraining_amp_lamb/versions/20.03.0/files/bert_large_pretrained_amp.pt - -# curl cmd -curl -LO https://api.ngc.nvidia.com/v2/models/nvidia/bert_pyt_ckpt_large_pretraining_amp_lamb/versions/20.03.0/files/bert_large_pretrained_amp.pt -``` -# Run -Enter your created conda env, then run the script. -```bash -bash scripts/run_squad_sparse.sh /path/to/model.pt 2.0 16 5e-5 tf32 /path/to/data /path/to/outdir prune_bert.yaml -``` -The default parameters are as follows: -```shell -init_checkpoint=${1:-"/path/to/ckpt_8601.pt"} -epochs=${2:-"2.0"} -batch_size=${3:-"4"} -learning_rate=${4:-"3e-5"} -precision=${5:-"tf32"} -BERT_PREP_WORKING_DIR=${6:-'/path/to/bert_data'} -OUT_DIR=${7:-"./results/SQuAD"} -prune_config=${8:-"prune_bert.yaml"} -``` - >**Note**: For original BERT readme, please refer [BERT README](https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/LanguageModeling/BERT/README.md) diff --git a/examples/huggingface/pytorch/question-answering/pruning/group_lasso/bert_config.json b/examples/huggingface/pytorch/question-answering/pruning/group_lasso/bert_config.json deleted file mode 100644 index a7efa973d74..00000000000 --- a/examples/huggingface/pytorch/question-answering/pruning/group_lasso/bert_config.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "attention_probs_dropout_prob": 0.1, - "hidden_act": "gelu", - "hidden_dropout_prob": 0.1, - "hidden_size": 1024, - "initializer_range": 0.02, - "intermediate_size": 4096, - "max_position_embeddings": 512, - "num_attention_heads": 16, - "num_hidden_layers": 24, - "type_vocab_size": 2, - "vocab_size": 30522 -} diff --git a/examples/huggingface/pytorch/question-answering/pruning/group_lasso/extract_features.py b/examples/huggingface/pytorch/question-answering/pruning/group_lasso/extract_features.py deleted file mode 100644 index dd206f52221..00000000000 --- a/examples/huggingface/pytorch/question-answering/pruning/group_lasso/extract_features.py +++ /dev/null @@ -1,298 +0,0 @@ -# coding=utf-8 -# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Extract pre-computed feature vectors from a PyTorch BERT model.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import argparse -import collections -import logging -import json -import re - -import torch -from torch.utils.data import TensorDataset, DataLoader, SequentialSampler -from torch.utils.data.distributed import DistributedSampler - -from tokenization import BertTokenizer -from modeling import BertModel - -logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s', - datefmt = '%m/%d/%Y %H:%M:%S', - level = logging.INFO) -logger = logging.getLogger(__name__) - - -class InputExample(object): - - def __init__(self, unique_id, text_a, text_b): - self.unique_id = unique_id - self.text_a = text_a - self.text_b = text_b - - -class InputFeatures(object): - """A single set of features of data.""" - - def __init__(self, unique_id, tokens, input_ids, input_mask, input_type_ids): - self.unique_id = unique_id - self.tokens = tokens - self.input_ids = input_ids - self.input_mask = input_mask - self.input_type_ids = input_type_ids - - -def convert_examples_to_features(examples, seq_length, tokenizer): - """Loads a data file into a list of `InputBatch`s.""" - - features = [] - for (ex_index, example) in enumerate(examples): - tokens_a = tokenizer.tokenize(example.text_a) - - tokens_b = None - if example.text_b: - tokens_b = tokenizer.tokenize(example.text_b) - - if tokens_b: - # Modifies `tokens_a` and `tokens_b` in place so that the total - # length is less than the specified length. - # Account for [CLS], [SEP], [SEP] with "- 3" - _truncate_seq_pair(tokens_a, tokens_b, seq_length - 3) - else: - # Account for [CLS] and [SEP] with "- 2" - if len(tokens_a) > seq_length - 2: - tokens_a = tokens_a[0:(seq_length - 2)] - - # The convention in BERT is: - # (a) For sequence pairs: - # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP] - # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1 - # (b) For single sequences: - # tokens: [CLS] the dog is hairy . [SEP] - # type_ids: 0 0 0 0 0 0 0 - # - # Where "type_ids" are used to indicate whether this is the first - # sequence or the second sequence. The embedding vectors for `type=0` and - # `type=1` were learned during pre-training and are added to the wordpiece - # embedding vector (and position vector). This is not *strictly* necessary - # since the [SEP] token unambiguously separates the sequences, but it makes - # it easier for the model to learn the concept of sequences. - # - # For classification tasks, the first vector (corresponding to [CLS]) is - # used as as the "sentence vector". Note that this only makes sense because - # the entire model is fine-tuned. - tokens = [] - input_type_ids = [] - tokens.append("[CLS]") - input_type_ids.append(0) - for token in tokens_a: - tokens.append(token) - input_type_ids.append(0) - tokens.append("[SEP]") - input_type_ids.append(0) - - if tokens_b: - for token in tokens_b: - tokens.append(token) - input_type_ids.append(1) - tokens.append("[SEP]") - input_type_ids.append(1) - - input_ids = tokenizer.convert_tokens_to_ids(tokens) - - # The mask has 1 for real tokens and 0 for padding tokens. Only real - # tokens are attended to. - input_mask = [1] * len(input_ids) - - # Zero-pad up to the sequence length. - while len(input_ids) < seq_length: - input_ids.append(0) - input_mask.append(0) - input_type_ids.append(0) - - assert len(input_ids) == seq_length - assert len(input_mask) == seq_length - assert len(input_type_ids) == seq_length - - if ex_index < 5: - logger.info("*** Example ***") - logger.info("unique_id: %s" % (example.unique_id)) - logger.info("tokens: %s" % " ".join([str(x) for x in tokens])) - logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) - logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask])) - logger.info( - "input_type_ids: %s" % " ".join([str(x) for x in input_type_ids])) - - features.append( - InputFeatures( - unique_id=example.unique_id, - tokens=tokens, - input_ids=input_ids, - input_mask=input_mask, - input_type_ids=input_type_ids)) - return features - - -def _truncate_seq_pair(tokens_a, tokens_b, max_length): - """Truncates a sequence pair in place to the maximum length.""" - - # This is a simple heuristic which will always truncate the longer sequence - # one token at a time. This makes more sense than truncating an equal percent - # of tokens from each, since if one sequence is very short then each token - # that's truncated likely contains more information than a longer sequence. - while True: - total_length = len(tokens_a) + len(tokens_b) - if total_length <= max_length: - break - if len(tokens_a) > len(tokens_b): - tokens_a.pop() - else: - tokens_b.pop() - - -def read_examples(input_file): - """Read a list of `InputExample`s from an input file.""" - examples = [] - unique_id = 0 - with open(input_file, "r", encoding='utf-8') as reader: - while True: - line = reader.readline() - if not line: - break - line = line.strip() - text_a = None - text_b = None - m = re.match(r"^(.*) \|\|\| (.*)$", line) - if m is None: - text_a = line - else: - text_a = m.group(1) - text_b = m.group(2) - examples.append( - InputExample(unique_id=unique_id, text_a=text_a, text_b=text_b)) - unique_id += 1 - return examples - - -def main(): - parser = argparse.ArgumentParser() - - ## Required parameters - parser.add_argument("--input_file", default=None, type=str, required=True) - parser.add_argument("--output_file", default=None, type=str, required=True) - parser.add_argument("--bert_model", default=None, type=str, required=True, - help="Bert pre-trained model selected in the list: bert-base-uncased, " - "bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.") - - ## Other parameters - parser.add_argument("--do_lower_case", action='store_true', help="Set this flag if you are using an uncased model.") - parser.add_argument("--layers", default="-1,-2,-3,-4", type=str) - parser.add_argument("--max_seq_length", default=128, type=int, - help="The maximum total input sequence length after WordPiece tokenization. Sequences longer " - "than this will be truncated, and sequences shorter than this will be padded.") - parser.add_argument("--batch_size", default=32, type=int, help="Batch size for predictions.") - parser.add_argument("--local_rank", - type=int, - default=-1, - help = "local_rank for distributed training on gpus") - parser.add_argument("--no_cuda", - action='store_true', - help="Whether not to use CUDA when available") - - args = parser.parse_args() - - if args.local_rank == -1 or args.no_cuda: - device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") - n_gpu = torch.cuda.device_count() - else: - device = torch.device("cuda", args.local_rank) - n_gpu = 1 - # Initializes the distributed backend which will take care of synchronizing nodes/GPUs - torch.distributed.init_process_group(backend='nccl') - logger.info("device: {} n_gpu: {} distributed training: {}".format(device, n_gpu, bool(args.local_rank != -1))) - - layer_indexes = [int(x) for x in args.layers.split(",")] - - tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case) - - examples = read_examples(args.input_file) - - features = convert_examples_to_features( - examples=examples, seq_length=args.max_seq_length, tokenizer=tokenizer) - - unique_id_to_feature = {} - for feature in features: - unique_id_to_feature[feature.unique_id] = feature - - model = BertModel.from_pretrained(args.bert_model) - model.to(device) - - if args.local_rank != -1: - model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], - output_device=args.local_rank) - elif n_gpu > 1: - model = torch.nn.DataParallel(model) - - all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long) - all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long) - all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long) - - eval_data = TensorDataset(all_input_ids, all_input_mask, all_example_index) - if args.local_rank == -1: - eval_sampler = SequentialSampler(eval_data) - else: - eval_sampler = DistributedSampler(eval_data) - eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.batch_size) - - model.eval() - with open(args.output_file, "w", encoding='utf-8') as writer: - for input_ids, input_mask, example_indices in eval_dataloader: - input_ids = input_ids.to(device) - input_mask = input_mask.to(device) - - all_encoder_layers, _ = model(input_ids, token_type_ids=None, attention_mask=input_mask) - all_encoder_layers = all_encoder_layers - - for b, example_index in enumerate(example_indices): - feature = features[example_index.item()] - unique_id = int(feature.unique_id) - # feature = unique_id_to_feature[unique_id] - output_json = collections.OrderedDict() - output_json["linex_index"] = unique_id - all_out_features = [] - for (i, token) in enumerate(feature.tokens): - all_layers = [] - for (j, layer_index) in enumerate(layer_indexes): - layer_output = all_encoder_layers[int(layer_index)].detach().cpu().numpy() - layer_output = layer_output[b] - layers = collections.OrderedDict() - layers["index"] = layer_index - layers["values"] = [ - round(x.item(), 6) for x in layer_output[i] - ] - all_layers.append(layers) - out_features = collections.OrderedDict() - out_features["token"] = token - out_features["layers"] = all_layers - all_out_features.append(out_features) - output_json["features"] = all_out_features - writer.write(json.dumps(output_json) + "\n") - - -if __name__ == "__main__": - main() diff --git a/examples/huggingface/pytorch/question-answering/pruning/group_lasso/file_utils.py b/examples/huggingface/pytorch/question-answering/pruning/group_lasso/file_utils.py deleted file mode 100644 index cdefb125839..00000000000 --- a/examples/huggingface/pytorch/question-answering/pruning/group_lasso/file_utils.py +++ /dev/null @@ -1,263 +0,0 @@ -# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team. -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -Utilities for working with the local dataset cache. -This file is adapted from the AllenNLP library at https://github.com/allenai/allennlp -Copyright by the AllenNLP authors. -""" - -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import json -import logging -import os -import shutil -import tempfile -from functools import wraps -from hashlib import sha256 -import sys -from io import open - -import boto3 -import requests -from botocore.exceptions import ClientError -from tqdm import tqdm - -try: - from urllib.parse import urlparse -except ImportError: - from urlparse import urlparse - -try: - from pathlib import Path - PYTORCH_PRETRAINED_BERT_CACHE = Path(os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', - Path.home() / '.pytorch_pretrained_bert')) -except AttributeError: - PYTORCH_PRETRAINED_BERT_CACHE = os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', - os.path.join(os.path.expanduser("~"), '.pytorch_pretrained_bert')) - -logger = logging.getLogger(__name__) # pylint: disable=invalid-name - - -def url_to_filename(url, etag=None): - """ - Convert `url` into a hashed filename in a repeatable way. - If `etag` is specified, append its hash to the url's, delimited - by a period. - """ - url_bytes = url.encode('utf-8') - url_hash = sha256(url_bytes) - filename = url_hash.hexdigest() - - if etag: - etag_bytes = etag.encode('utf-8') - etag_hash = sha256(etag_bytes) - filename += '.' + etag_hash.hexdigest() - - return filename - - -def filename_to_url(filename, cache_dir=None): - """ - Return the url and etag (which may be ``None``) stored for `filename`. - Raise ``EnvironmentError`` if `filename` or its stored metadata do not exist. - """ - if cache_dir is None: - cache_dir = PYTORCH_PRETRAINED_BERT_CACHE - if sys.version_info[0] == 3 and isinstance(cache_dir, Path): - cache_dir = str(cache_dir) - - cache_path = os.path.join(cache_dir, filename) - if not os.path.exists(cache_path): - raise EnvironmentError("file {} not found".format(cache_path)) - - meta_path = cache_path + '.json' - if not os.path.exists(meta_path): - raise EnvironmentError("file {} not found".format(meta_path)) - - with open(meta_path, encoding="utf-8") as meta_file: - metadata = json.load(meta_file) - url = metadata['url'] - etag = metadata['etag'] - - return url, etag - - -def cached_path(url_or_filename, cache_dir=None): - """ - Given something that might be a URL (or might be a local path), - determine which. If it's a URL, download the file and cache it, and - return the path to the cached file. If it's already a local path, - make sure the file exists and then return the path. - """ - if cache_dir is None: - cache_dir = PYTORCH_PRETRAINED_BERT_CACHE - if sys.version_info[0] == 3 and isinstance(url_or_filename, Path): - url_or_filename = str(url_or_filename) - if sys.version_info[0] == 3 and isinstance(cache_dir, Path): - cache_dir = str(cache_dir) - - parsed = urlparse(url_or_filename) - - if parsed.scheme in ('http', 'https', 's3'): - # URL, so get it from the cache (downloading if necessary) - return get_from_cache(url_or_filename, cache_dir) - elif os.path.exists(url_or_filename): - # File, and it exists. - return url_or_filename - elif parsed.scheme == '': - # File, but it doesn't exist. - raise EnvironmentError("file {} not found".format(url_or_filename)) - else: - # Something unknown - raise ValueError("unable to parse {} as a URL or as a local path".format(url_or_filename)) - - -def split_s3_path(url): - """Split a full s3 path into the bucket name and path.""" - parsed = urlparse(url) - if not parsed.netloc or not parsed.path: - raise ValueError("bad s3 path {}".format(url)) - bucket_name = parsed.netloc - s3_path = parsed.path - # Remove '/' at beginning of path. - if s3_path.startswith("/"): - s3_path = s3_path[1:] - return bucket_name, s3_path - - -def s3_request(func): - """ - Wrapper function for s3 requests in order to create more helpful error - messages. - """ - - @wraps(func) - def wrapper(url, *args, **kwargs): - try: - return func(url, *args, **kwargs) - except ClientError as exc: - if int(exc.response["Error"]["Code"]) == 404: - raise EnvironmentError("file {} not found".format(url)) - else: - raise - - return wrapper - - -@s3_request -def s3_etag(url): - """Check ETag on S3 object.""" - s3_resource = boto3.resource("s3") - bucket_name, s3_path = split_s3_path(url) - s3_object = s3_resource.Object(bucket_name, s3_path) - return s3_object.e_tag - - -@s3_request -def s3_get(url, temp_file): - """Pull a file directly from S3.""" - s3_resource = boto3.resource("s3") - bucket_name, s3_path = split_s3_path(url) - s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file) - - -def http_get(url, temp_file): - req = requests.get(url, stream=True) - content_length = req.headers.get('Content-Length') - total = int(content_length) if content_length is not None else None - progress = tqdm(unit="B", total=total) - for chunk in req.iter_content(chunk_size=1024): - if chunk: # filter out keep-alive new chunks - progress.update(len(chunk)) - temp_file.write(chunk) - progress.close() - - -def get_from_cache(url, cache_dir=None): - """ - Given a URL, look for the corresponding dataset in the local cache. - If it's not there, download it. Then return the path to the cached file. - """ - if cache_dir is None: - cache_dir = PYTORCH_PRETRAINED_BERT_CACHE - if sys.version_info[0] == 3 and isinstance(cache_dir, Path): - cache_dir = str(cache_dir) - - if not os.path.exists(cache_dir): - os.makedirs(cache_dir) - - # Get eTag to add to filename, if it exists. - if url.startswith("s3://"): - etag = s3_etag(url) - else: - response = requests.head(url, allow_redirects=True) - if response.status_code != 200: - raise IOError("HEAD request failed for url {} with status code {}" - .format(url, response.status_code)) - etag = response.headers.get("ETag") - - filename = url_to_filename(url, etag) - - # get cache path to put the file - cache_path = os.path.join(cache_dir, filename) - - if not os.path.exists(cache_path): - # Download to temporary file, then copy to cache dir once finished. - # Otherwise you get corrupt cache entries if the download gets interrupted. - with tempfile.NamedTemporaryFile() as temp_file: - logger.info("%s not found in cache, downloading to %s", url, temp_file.name) - - # GET file object - if url.startswith("s3://"): - s3_get(url, temp_file) - else: - http_get(url, temp_file) - - # we are copying the file before closing it, so flush to avoid truncation - temp_file.flush() - # shutil.copyfileobj() starts at the current position, so go to the start - temp_file.seek(0) - - logger.info("copying %s to cache at %s", temp_file.name, cache_path) - with open(cache_path, 'wb') as cache_file: - shutil.copyfileobj(temp_file, cache_file) - - logger.info("creating metadata file for %s", cache_path) - meta = {'url': url, 'etag': etag} - meta_path = cache_path + '.json' - with open(meta_path, 'w', encoding="utf-8") as meta_file: - json.dump(meta, meta_file) - - logger.info("removing temp file %s", temp_file.name) - - return cache_path - - -def read_set_from_file(filename): - ''' - Extract a de-duped collection (set) of text from a file. - Expected file format is one item per line. - ''' - collection = set() - with open(filename, 'r', encoding='utf-8') as file_: - for line in file_: - collection.add(line.rstrip()) - return collection - - -def get_file_extension(path, dot=True, lower=True): - ext = os.path.splitext(path)[1] - ext = ext if dot else ext[1:] - return ext.lower() if lower else ext diff --git a/examples/huggingface/pytorch/question-answering/pruning/group_lasso/modeling.py b/examples/huggingface/pytorch/question-answering/pruning/group_lasso/modeling.py deleted file mode 100644 index cebd2b17f75..00000000000 --- a/examples/huggingface/pytorch/question-answering/pruning/group_lasso/modeling.py +++ /dev/null @@ -1,1285 +0,0 @@ -# coding=utf-8 -# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved. -# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team. -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""PyTorch BERT model.""" - -from __future__ import absolute_import, division, print_function, unicode_literals - -import copy -import json -import logging -import math -import os -import shutil -import tarfile -import tempfile -import sys -from io import open - -import torch -from torch import nn -from torch.nn import CrossEntropyLoss -from torch.utils import checkpoint - -sys.path.append('/workspace/bert/') -from file_utils import cached_path - -from torch.nn import Module -from torch.nn.parameter import Parameter -import torch.nn.functional as F -import torch.nn.init as init - -logger = logging.getLogger(__name__) - -PRETRAINED_MODEL_ARCHIVE_MAP = { - 'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased.tar.gz", - 'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased.tar.gz", - 'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased.tar.gz", - 'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased.tar.gz", - 'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased.tar.gz", - 'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased.tar.gz", - 'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese.tar.gz", -} -CONFIG_NAME = 'bert_config.json' -WEIGHTS_NAME = 'pytorch_model.bin' -TF_WEIGHTS_NAME = 'model.ckpt' - -def load_tf_weights_in_bert(model, tf_checkpoint_path): - """ Load tf checkpoints in a pytorch model - """ - try: - import re - import numpy as np - import tensorflow as tf - except ImportError: - print("Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see " - "https://www.tensorflow.org/install/ for installation instructions.") - raise - tf_path = os.path.abspath(tf_checkpoint_path) - print("Converting TensorFlow checkpoint from {}".format(tf_path)) - # Load weights from TF model - init_vars = tf.train.list_variables(tf_path) - names = [] - arrays = [] - for name, shape in init_vars: - print("Loading TF weight {} with shape {}".format(name, shape)) - array = tf.train.load_variable(tf_path, name) - names.append(name) - arrays.append(array) - - for name, array in zip(names, arrays): - name = name.split('/') - # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v - # which are not required for using pretrained model - if any(n in ["adam_v", "adam_m"] for n in name): - print("Skipping {}".format("/".join(name))) - continue - pointer = model - for m_name in name: - if re.fullmatch(r'[A-Za-z]+_\d+', m_name): - l = re.split(r'_(\d+)', m_name) - else: - l = [m_name] - if l[0] == 'kernel' or l[0] == 'gamma': - pointer = getattr(pointer, 'weight') - elif l[0] == 'output_bias' or l[0] == 'beta': - pointer = getattr(pointer, 'bias') - elif l[0] == 'output_weights': - pointer = getattr(pointer, 'weight') - else: - pointer = getattr(pointer, l[0]) - if len(l) >= 2: - num = int(l[1]) - pointer = pointer[num] - if m_name[-11:] == '_embeddings': - pointer = getattr(pointer, 'weight') - elif m_name == 'kernel': - array = np.ascontiguousarray(np.transpose(array)) - try: - assert pointer.shape == array.shape - except AssertionError as e: - e.args += (pointer.shape, array.shape) - raise - print("Initialize PyTorch weight {}".format(name)) - pointer.data = torch.from_numpy(array) - return model - -def gelu(x): - return x * 0.5 * (1.0 + torch.erf(x / 1.41421)) - -#used only for triton inference -def bias_gelu(bias, y): - x = bias + y - return x * 0.5 * (1.0 + torch.erf(x / 1.41421)) - -# used specifically for training since torch.nn.functional.gelu breaks ONNX export -def bias_gelu_training(bias, y): - x = bias + y - return torch.nn.functional.gelu(x) # Breaks ONNX export - -def bias_tanh(bias, y): - x = bias + y - return torch.tanh(x) - -def swish(x): - return x * torch.sigmoid(x) - -#torch.nn.functional.gelu(x) # Breaks ONNX export -ACT2FN = {"gelu": gelu, "bias_gelu": bias_gelu, "bias_tanh": bias_tanh, "relu": torch.nn.functional.relu, "swish": swish} - -class LinearActivation(Module): - r"""Fused Linear and activation Module. - """ - __constants__ = ['bias'] - - def __init__(self, in_features, out_features, act='gelu', bias=True): - super(LinearActivation, self).__init__() - self.in_features = in_features - self.out_features = out_features - self.act_fn = nn.Identity() # - self.biased_act_fn = None # - self.bias = None # - if isinstance(act, str) or (sys.version_info[0] == 2 and isinstance(act, unicode)): # For TorchScript - if bias and not 'bias' in act: # compatibility - act = 'bias_' + act # - self.biased_act_fn = ACT2FN[act] # - - else: - self.act_fn = ACT2FN[act] - else: - self.act_fn = act - self.weight = Parameter(torch.Tensor(out_features, in_features)) - if bias: - self.bias = Parameter(torch.Tensor(out_features)) - else: - self.register_parameter('bias', None) - self.reset_parameters() - - def reset_parameters(self): - init.kaiming_uniform_(self.weight, a=math.sqrt(5)) - if self.bias is not None: - fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight) - bound = 1 / math.sqrt(fan_in) - init.uniform_(self.bias, -bound, bound) - - def forward(self, input): - if not self.bias is None: - return self.biased_act_fn(self.bias, F.linear(input, self.weight, None)) - else: - return self.act_fn(F.linear(input, self.weight, self.bias)) - - def extra_repr(self): - return 'in_features={}, out_features={}, bias={}'.format( - self.in_features, self.out_features, self.bias is not None - ) - - -class BertConfig(object): - """Configuration class to store the configuration of a `BertModel`. - """ - def __init__(self, - vocab_size_or_config_json_file, - hidden_size=768, - num_hidden_layers=12, - num_attention_heads=12, - intermediate_size=3072, - hidden_act="gelu", - hidden_dropout_prob=0.1, - attention_probs_dropout_prob=0.1, - max_position_embeddings=512, - type_vocab_size=2, - initializer_range=0.02, - output_all_encoded_layers=False): - """Constructs BertConfig. - - Args: - vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`. - hidden_size: Size of the encoder layers and the pooler layer. - num_hidden_layers: Number of hidden layers in the Transformer encoder. - num_attention_heads: Number of attention heads for each attention layer in - the Transformer encoder. - intermediate_size: The size of the "intermediate" (i.e., feed-forward) - layer in the Transformer encoder. - hidden_act: The non-linear activation function (function or string) in the - encoder and pooler. If string, "gelu", "relu" and "swish" are supported. - hidden_dropout_prob: The dropout probabilitiy for all fully connected - layers in the embeddings, encoder, and pooler. - attention_probs_dropout_prob: The dropout ratio for the attention - probabilities. - max_position_embeddings: The maximum sequence length that this model might - ever be used with. Typically set this to something large just in case - (e.g., 512 or 1024 or 2048). - type_vocab_size: The vocabulary size of the `token_type_ids` passed into - `BertModel`. - initializer_range: The sttdev of the truncated_normal_initializer for - initializing all weight matrices. - """ - if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2 - and isinstance(vocab_size_or_config_json_file, unicode)): - with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader: - json_config = json.loads(reader.read()) - for key, value in json_config.items(): - self.__dict__[key] = value - elif isinstance(vocab_size_or_config_json_file, int): - self.vocab_size = vocab_size_or_config_json_file - self.hidden_size = hidden_size - self.num_hidden_layers = num_hidden_layers - self.num_attention_heads = num_attention_heads - self.hidden_act = hidden_act - self.intermediate_size = intermediate_size - self.hidden_dropout_prob = hidden_dropout_prob - self.attention_probs_dropout_prob = attention_probs_dropout_prob - self.max_position_embeddings = max_position_embeddings - self.type_vocab_size = type_vocab_size - self.initializer_range = initializer_range - self.output_all_encoded_layers = output_all_encoded_layers - else: - raise ValueError("First argument must be either a vocabulary size (int)" - "or the path to a pretrained model config file (str)") - - @classmethod - def from_dict(cls, json_object): - """Constructs a `BertConfig` from a Python dictionary of parameters.""" - config = BertConfig(vocab_size_or_config_json_file=-1) - for key, value in json_object.items(): - config.__dict__[key] = value - return config - - @classmethod - def from_json_file(cls, json_file): - """Constructs a `BertConfig` from a json file of parameters.""" - with open(json_file, "r", encoding='utf-8') as reader: - text = reader.read() - return cls.from_dict(json.loads(text)) - - def __repr__(self): - return str(self.to_json_string()) - - def to_dict(self): - """Serializes this instance to a Python dictionary.""" - output = copy.deepcopy(self.__dict__) - return output - - def to_json_string(self): - """Serializes this instance to a JSON string.""" - return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n" - -class BertNonFusedLayerNorm(nn.Module): - def __init__(self, hidden_size, eps=1e-12): - """Construct a layernorm module in the TF style (epsilon inside the square root). - """ - super(BertNonFusedLayerNorm, self).__init__() - self.weight = nn.Parameter(torch.ones(hidden_size)) - self.bias = nn.Parameter(torch.zeros(hidden_size)) - self.variance_epsilon = eps - - def forward(self, x): - u = x.mean(-1, keepdim=True) - s = (x - u) - s = s * s - s = s.mean(-1, keepdim=True) - x = (x - u) / torch.sqrt(s + self.variance_epsilon) - return self.weight * x + self.bias - -try: - import apex - #apex.amp.register_half_function(apex.normalization.fused_layer_norm, 'FusedLayerNorm') - import apex.normalization - from apex.normalization.fused_layer_norm import FusedLayerNormAffineFunction - #apex.amp.register_float_function(apex.normalization.FusedLayerNorm, 'forward') - #BertLayerNorm = apex.normalization.FusedLayerNorm - APEX_IS_AVAILABLE = True -except ImportError: - print("Better speed can be achieved with apex installed from https://www.github.com/nvidia/apex.") - #BertLayerNorm = BertNonFusedLayerNorm - APEX_IS_AVAILABLE = False -class BertLayerNorm(Module): - def __init__(self, hidden_size, eps=1e-12): - super(BertLayerNorm, self).__init__() - self.shape = torch.Size((hidden_size,)) - self.eps = eps - self.weight = nn.Parameter(torch.ones(hidden_size)) - self.bias = nn.Parameter(torch.zeros(hidden_size)) - self.apex_enabled = APEX_IS_AVAILABLE - - @torch.jit.unused - def fused_layer_norm(self, x): - return FusedLayerNormAffineFunction.apply( - x, self.weight, self.bias, self.shape, self.eps) - - - def forward(self, x): - if self.apex_enabled and not torch.jit.is_scripting(): - x = self.fused_layer_norm(x) - else: - u = x.mean(-1, keepdim=True) - s = (x - u) - s = s * s - s = s.mean(-1, keepdim=True) - x = (x - u) / torch.sqrt(s + self.eps) - x = self.weight * x + self.bias - return x - -class BertEmbeddings(nn.Module): - """Construct the embeddings from word, position and token_type embeddings. - """ - def __init__(self, config): - super(BertEmbeddings, self).__init__() - self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size) - self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) - self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) - - # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load - # any TensorFlow checkpoint file - self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12) - self.dropout = nn.Dropout(config.hidden_dropout_prob) - - def forward(self, input_ids, token_type_ids): - seq_length = input_ids.size(1) - position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device) - position_ids = position_ids.unsqueeze(0).expand_as(input_ids) - - words_embeddings = self.word_embeddings(input_ids) - position_embeddings = self.position_embeddings(position_ids) - token_type_embeddings = self.token_type_embeddings(token_type_ids) - - embeddings = words_embeddings + position_embeddings + token_type_embeddings - embeddings = self.LayerNorm(embeddings) - embeddings = self.dropout(embeddings) - return embeddings - - -class BertSelfAttention(nn.Module): - def __init__(self, config): - super(BertSelfAttention, self).__init__() - if config.hidden_size % config.num_attention_heads != 0: - raise ValueError( - "The hidden size (%d) is not a multiple of the number of attention " - "heads (%d)" % (config.hidden_size, config.num_attention_heads)) - self.num_attention_heads = config.num_attention_heads - self.attention_head_size = int(config.hidden_size / config.num_attention_heads) - self.all_head_size = self.num_attention_heads * self.attention_head_size - - self.query = nn.Linear(config.hidden_size, self.all_head_size) - self.key = nn.Linear(config.hidden_size, self.all_head_size) - self.value = nn.Linear(config.hidden_size, self.all_head_size) - - self.dropout = nn.Dropout(config.attention_probs_dropout_prob) - - def transpose_for_scores(self, x): - new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) - x = torch.reshape(x, new_x_shape) - return x.permute(0, 2, 1, 3) - - def transpose_key_for_scores(self, x): - new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) - x = torch.reshape(x, new_x_shape) - return x.permute(0, 2, 3, 1) - - def forward(self, hidden_states, attention_mask): - mixed_query_layer = self.query(hidden_states) - mixed_key_layer = self.key(hidden_states) - mixed_value_layer = self.value(hidden_states) - - query_layer = self.transpose_for_scores(mixed_query_layer) - key_layer = self.transpose_key_for_scores(mixed_key_layer) - value_layer = self.transpose_for_scores(mixed_value_layer) - - # Take the dot product between "query" and "key" to get the raw attention scores. - attention_scores = torch.matmul(query_layer, key_layer) - attention_scores = attention_scores / math.sqrt(self.attention_head_size) - # Apply the attention mask is (precomputed for all layers in BertModel forward() function) - attention_scores = attention_scores + attention_mask - - # Normalize the attention scores to probabilities. - attention_probs = F.softmax(attention_scores, dim=-1) - - # This is actually dropping out entire tokens to attend to, which might - # seem a bit unusual, but is taken from the original Transformer paper. - attention_probs = self.dropout(attention_probs) - - context_layer = torch.matmul(attention_probs, value_layer) - context_layer = context_layer.permute(0, 2, 1, 3).contiguous() - new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) - context_layer = torch.reshape(context_layer, new_context_layer_shape) - return context_layer - - -class BertSelfOutput(nn.Module): - def __init__(self, config): - super(BertSelfOutput, self).__init__() - self.dense = nn.Linear(config.hidden_size, config.hidden_size) - self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12) - self.dropout = nn.Dropout(config.hidden_dropout_prob) - - def forward(self, hidden_states, input_tensor): - hidden_states = self.dense(hidden_states) - hidden_states = self.dropout(hidden_states) - hidden_states = self.LayerNorm(hidden_states + input_tensor) - return hidden_states - - -class BertAttention(nn.Module): - def __init__(self, config): - super(BertAttention, self).__init__() - self.self = BertSelfAttention(config) - self.output = BertSelfOutput(config) - - def forward(self, input_tensor, attention_mask): - self_output = self.self(input_tensor, attention_mask) - attention_output = self.output(self_output, input_tensor) - return attention_output - - -class BertIntermediate(nn.Module): - def __init__(self, config): - super(BertIntermediate, self).__init__() - self.dense_act = LinearActivation(config.hidden_size, config.intermediate_size, act=config.hidden_act) - - def forward(self, hidden_states): - hidden_states = self.dense_act(hidden_states) - return hidden_states - - -class BertOutput(nn.Module): - def __init__(self, config): - super(BertOutput, self).__init__() - self.dense = nn.Linear(config.intermediate_size, config.hidden_size) - self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12) - self.dropout = nn.Dropout(config.hidden_dropout_prob) - - def forward(self, hidden_states, input_tensor): - hidden_states = self.dense(hidden_states) - hidden_states = self.dropout(hidden_states) - hidden_states = self.LayerNorm(hidden_states + input_tensor) - return hidden_states - - -class BertLayer(nn.Module): - def __init__(self, config): - super(BertLayer, self).__init__() - self.attention = BertAttention(config) - self.intermediate = BertIntermediate(config) - self.output = BertOutput(config) - - def forward(self, hidden_states, attention_mask): - attention_output = self.attention(hidden_states, attention_mask) - intermediate_output = self.intermediate(attention_output) - layer_output = self.output(intermediate_output, attention_output) - return layer_output - -class BertEncoder(nn.Module): - def __init__(self, config): - super(BertEncoder, self).__init__() - self.layer = nn.ModuleList([BertLayer(config) for _ in range(config.num_hidden_layers)]) - self.output_all_encoded_layers = config.output_all_encoded_layers - self._checkpoint_activations = False - - @torch.jit.unused - def checkpointed_forward(self, hidden_states, attention_mask): - def custom(start, end): - def custom_forward(*inputs): - layers = self.layer[start:end] - x_ = inputs[0] - for layer in layers: - x_ = layer(x_, inputs[1]) - return x_ - return custom_forward - - l = 0 - num_layers = len(self.layer) - chunk_length = math.ceil(math.sqrt(num_layers)) - while l < num_layers: - hidden_states = checkpoint.checkpoint(custom(l, l+chunk_length), hidden_states, attention_mask*1) - l += chunk_length - - return hidden_states - - def forward(self, hidden_states, attention_mask): - all_encoder_layers = [] - - if self._checkpoint_activations: - hidden_states = self.checkpointed_forward(hidden_states, attention_mask) - else: - for i,layer_module in enumerate(self.layer): - hidden_states = layer_module(hidden_states, attention_mask) - - if self.output_all_encoded_layers: - all_encoder_layers.append(hidden_states) - - if not self.output_all_encoded_layers or self._checkpoint_activations: - all_encoder_layers.append(hidden_states) - return all_encoder_layers - -class BertPooler(nn.Module): - def __init__(self, config): - super(BertPooler, self).__init__() - self.dense_act = LinearActivation(config.hidden_size, config.hidden_size, act="tanh") - - def forward(self, hidden_states): - # We "pool" the model by simply taking the hidden state corresponding - # to the first token. - first_token_tensor = hidden_states[:, 0] - pooled_output = self.dense_act(first_token_tensor) - return pooled_output - - -class BertPredictionHeadTransform(nn.Module): - def __init__(self, config): - super(BertPredictionHeadTransform, self).__init__() - self.dense_act = LinearActivation(config.hidden_size, config.hidden_size, act=config.hidden_act) - self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12) - - def forward(self, hidden_states): - hidden_states = self.dense_act(hidden_states) - hidden_states = self.LayerNorm(hidden_states) - return hidden_states - - -class BertLMPredictionHead(nn.Module): - def __init__(self, config, bert_model_embedding_weights): - super(BertLMPredictionHead, self).__init__() - self.transform = BertPredictionHeadTransform(config) - - # The output weights are the same as the input embeddings, but there is - # an output-only bias for each token. - self.decoder = nn.Linear(bert_model_embedding_weights.size(1), - bert_model_embedding_weights.size(0), - bias=False) - self.decoder.weight = bert_model_embedding_weights - self.bias = nn.Parameter(torch.zeros(bert_model_embedding_weights.size(0))) - - def forward(self, hidden_states): - hidden_states = self.transform(hidden_states) - hidden_states = self.decoder(hidden_states) + self.bias - return hidden_states - - -class BertOnlyMLMHead(nn.Module): - def __init__(self, config, bert_model_embedding_weights): - super(BertOnlyMLMHead, self).__init__() - self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights) - - def forward(self, sequence_output): - prediction_scores = self.predictions(sequence_output) - return prediction_scores - - -class BertOnlyNSPHead(nn.Module): - def __init__(self, config): - super(BertOnlyNSPHead, self).__init__() - self.seq_relationship = nn.Linear(config.hidden_size, 2) - - def forward(self, pooled_output): - seq_relationship_score = self.seq_relationship(pooled_output) - return seq_relationship_score - - -class BertPreTrainingHeads(nn.Module): - def __init__(self, config, bert_model_embedding_weights): - super(BertPreTrainingHeads, self).__init__() - self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights) - self.seq_relationship = nn.Linear(config.hidden_size, 2) - - def forward(self, sequence_output, pooled_output): - prediction_scores = self.predictions(sequence_output) - seq_relationship_score = self.seq_relationship(pooled_output) - return prediction_scores, seq_relationship_score - - -class BertPreTrainedModel(nn.Module): - """ An abstract class to handle weights initialization and - a simple interface for downloading and loading pretrained models. - """ - def __init__(self, config, *inputs, **kwargs): - super(BertPreTrainedModel, self).__init__() - if not isinstance(config, BertConfig): - raise ValueError( - "Parameter config in `{}(config)` should be an instance of class `BertConfig`. " - "To create a model from a Google pretrained model use " - "`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format( - self.__class__.__name__, self.__class__.__name__ - )) - self.config = config - - def init_bert_weights(self, module): - """ Initialize the weights. - """ - if isinstance(module, (nn.Linear, nn.Embedding)): - # Slightly different from the TF version which uses truncated_normal for initialization - # cf https://github.com/pytorch/pytorch/pull/5617 - module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) - elif isinstance(module, BertLayerNorm): - module.bias.data.zero_() - module.weight.data.fill_(1.0) - if isinstance(module, nn.Linear) and module.bias is not None: - module.bias.data.zero_() - - def checkpoint_activations(self, val): - def _apply_flag(module): - if hasattr(module, "_checkpoint_activations"): - module._checkpoint_activations=val - self.apply(_apply_flag) - def enable_apex(self, val): - def _apply_flag(module): - if hasattr(module, "apex_enabled"): - module.apex_enabled=val - self.apply(_apply_flag) - - @classmethod - def from_pretrained(cls, pretrained_model_name_or_path, state_dict=None, cache_dir=None, - from_tf=False, *inputs, **kwargs): - """ - Instantiate a BertPreTrainedModel from a pre-trained model file or a pytorch state dict. - Download and cache the pre-trained model file if needed. - - Params: - pretrained_model_name_or_path: either: - - a str with the name of a pre-trained model to load selected in the list of: - . `bert-base-uncased` - . `bert-large-uncased` - . `bert-base-cased` - . `bert-large-cased` - . `bert-base-multilingual-uncased` - . `bert-base-multilingual-cased` - . `bert-base-chinese` - - a path or url to a pretrained model archive containing: - . `bert_config.json` a configuration file for the model - . `pytorch_model.bin` a PyTorch dump of a BertForPreTraining instance - - a path or url to a pretrained model archive containing: - . `bert_config.json` a configuration file for the model - . `model.chkpt` a TensorFlow checkpoint - from_tf: should we load the weights from a locally saved TensorFlow checkpoint - cache_dir: an optional path to a folder in which the pre-trained models will be cached. - state_dict: an optional state dictionary (collections.OrderedDict object) to use instead of Google pre-trained models - *inputs, **kwargs: additional input for the specific Bert class - (ex: num_labels for BertForSequenceClassification) - """ - if pretrained_model_name_or_path in PRETRAINED_MODEL_ARCHIVE_MAP: - archive_file = PRETRAINED_MODEL_ARCHIVE_MAP[pretrained_model_name_or_path] - else: - archive_file = pretrained_model_name_or_path - # redirect to the cache, if necessary - try: - resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir) - except EnvironmentError: - logger.error( - "Model name '{}' was not found in model name list ({}). " - "We assumed '{}' was a path or url but couldn't find any file " - "associated to this path or url.".format( - pretrained_model_name_or_path, - ', '.join(PRETRAINED_MODEL_ARCHIVE_MAP.keys()), - archive_file)) - return None - if resolved_archive_file == archive_file: - logger.info("loading archive file {}".format(archive_file)) - else: - logger.info("loading archive file {} from cache at {}".format( - archive_file, resolved_archive_file)) - tempdir = None - if os.path.isdir(resolved_archive_file) or from_tf: - serialization_dir = resolved_archive_file - else: - # Extract archive to temp dir - tempdir = tempfile.mkdtemp() - logger.info("extracting archive file {} to temp dir {}".format( - resolved_archive_file, tempdir)) - if os.path.isfile(resolved_archive_file) and tarfile.is_tarfile(resolved_archive_file): - with tarfile.open(resolved_archive_file, 'r:gz') as archive: - archive.extractall(tempdir) - else: - logger.error("Invalid tar file {}".format(resolved_archive_file)) - serialization_dir = tempdir - # Load config - config_file = os.path.join(serialization_dir, CONFIG_NAME) - config = BertConfig.from_json_file(config_file) - logger.info("Model config {}".format(config)) - # Instantiate model. - model = cls(config, *inputs, **kwargs) - if state_dict is None and not from_tf: - weights_path = os.path.join(serialization_dir, WEIGHTS_NAME) - state_dict = torch.load(weights_path, map_location='cpu' if not torch.cuda.is_available() else None) - if tempdir: - # Clean up temp dir - shutil.rmtree(tempdir) - if from_tf: - # Directly load from a TensorFlow checkpoint - weights_path = os.path.join(serialization_dir, TF_WEIGHTS_NAME) - return load_tf_weights_in_bert(model, weights_path) - # Load from a PyTorch state_dict - old_keys = [] - new_keys = [] - for key in state_dict.keys(): - new_key = None - if 'gamma' in key: - new_key = key.replace('gamma', 'weight') - if 'beta' in key: - new_key = key.replace('beta', 'bias') - if new_key: - old_keys.append(key) - new_keys.append(new_key) - for old_key, new_key in zip(old_keys, new_keys): - state_dict[new_key] = state_dict.pop(old_key) - - missing_keys = [] - unexpected_keys = [] - error_msgs = [] - # copy state_dict so _load_from_state_dict can modify it - metadata = getattr(state_dict, '_metadata', None) - state_dict = state_dict.copy() - if metadata is not None: - state_dict._metadata = metadata - - def load(module, prefix=''): - local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {}) - module._load_from_state_dict( - state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs) - for name, child in module._modules.items(): - if child is not None: - load(child, prefix + name + '.') - start_prefix = '' - if not hasattr(model, 'bert') and any(s.startswith('bert.') for s in state_dict.keys()): - start_prefix = 'bert.' - load(model, prefix=start_prefix) - if len(missing_keys) > 0: - logger.info("Weights of {} not initialized from pretrained model: {}".format( - model.__class__.__name__, missing_keys)) - if len(unexpected_keys) > 0: - logger.info("Weights from pretrained model not used in {}: {}".format( - model.__class__.__name__, unexpected_keys)) - if len(error_msgs) > 0: - raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format( - model.__class__.__name__, "\n\t".join(error_msgs))) - return model - - -class BertModel(BertPreTrainedModel): - """BERT model ("Bidirectional Embedding Representations from a Transformer"). - - Params: - config: a BertConfig class instance with the configuration to build a new model - - Inputs: - `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] - with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts - `extract_features.py`, `run_classifier.py` and `run_squad.py`) - `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token - types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to - a `sentence B` token (see BERT paper for more details). - `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices - selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max - input sequence length in the current batch. It's the mask that we typically use for attention when - a batch has varying length sentences. - - Outputs: Tuple of (encoded_layers, pooled_output) - `encoded_layers`: controlled by `output_all_encoded_layers` argument: - - `output_all_encoded_layers=True`: outputs a list of the full sequences of encoded-hidden-states at the end - of each attention block (i.e. 12 full sequences for BERT-base, 24 for BERT-large), each - encoded-hidden-state is a torch.FloatTensor of size [batch_size, sequence_length, hidden_size], - - `output_all_encoded_layers=False`: outputs only the full sequence of hidden-states corresponding - to the last attention block of shape [batch_size, sequence_length, hidden_size], - `pooled_output`: a torch.FloatTensor of size [batch_size, hidden_size] which is the output of a - classifier pretrained on top of the hidden state associated to the first character of the - input (`CLS`) to train on the Next-Sentence task (see BERT's paper). - - Example usage: - ```python - # Already been converted into WordPiece token ids - input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]]) - input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]]) - token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]]) - - config = modeling.BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768, - num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072) - - model = modeling.BertModel(config=config) - all_encoder_layers, pooled_output = model(input_ids, token_type_ids, input_mask) - ``` - """ - def __init__(self, config): - super(BertModel, self).__init__(config) - self.embeddings = BertEmbeddings(config) - self.encoder = BertEncoder(config) - self.pooler = BertPooler(config) - self.apply(self.init_bert_weights) - self.output_all_encoded_layers = config.output_all_encoded_layers - - def forward(self, input_ids, token_type_ids, attention_mask): - # We create a 3D attention mask from a 2D tensor mask. - # Sizes are [batch_size, 1, 1, to_seq_length] - # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] - # this attention mask is more simple than the triangular masking of causal attention - # used in OpenAI GPT, we just need to prepare the broadcast dimension here. - extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2) - - # Since attention_mask is 1.0 for positions we want to attend and 0.0 for - # masked positions, this operation will create a tensor which is 0.0 for - # positions we want to attend and -10000.0 for masked positions. - # Since we are adding it to the raw scores before the softmax, this is - # effectively the same as removing these entirely. - extended_attention_mask = extended_attention_mask.to(dtype=self.embeddings.word_embeddings.weight.dtype) # fp16 compatibility - extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 - - embedding_output = self.embeddings(input_ids, token_type_ids) - encoded_layers = self.encoder(embedding_output, extended_attention_mask) - sequence_output = encoded_layers[-1] - pooled_output = self.pooler(sequence_output) - if not self.output_all_encoded_layers: - encoded_layers = encoded_layers[-1:] - return encoded_layers, pooled_output - - -class BertForPreTraining(BertPreTrainedModel): - """BERT model with pre-training heads. - This module comprises the BERT model followed by the two pre-training heads: - - the masked language modeling head, and - - the next sentence classification head. - - Params: - config: a BertConfig class instance with the configuration to build a new model. - - Inputs: - `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] - with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts - `extract_features.py`, `run_classifier.py` and `run_squad.py`) - `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token - types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to - a `sentence B` token (see BERT paper for more details). - `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices - selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max - input sequence length in the current batch. It's the mask that we typically use for attention when - a batch has varying length sentences. - `masked_lm_labels`: optional masked language modeling labels: torch.LongTensor of shape [batch_size, sequence_length] - with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss - is only computed for the labels set in [0, ..., vocab_size] - `next_sentence_label`: optional next sentence classification loss: torch.LongTensor of shape [batch_size] - with indices selected in [0, 1]. - 0 => next sentence is the continuation, 1 => next sentence is a random sentence. - - Outputs: - if `masked_lm_labels` and `next_sentence_label` are not `None`: - Outputs the total_loss which is the sum of the masked language modeling loss and the next - sentence classification loss. - if `masked_lm_labels` or `next_sentence_label` is `None`: - Outputs a tuple comprising - - the masked language modeling logits of shape [batch_size, sequence_length, vocab_size], and - - the next sentence classification logits of shape [batch_size, 2]. - - Example usage: - ```python - # Already been converted into WordPiece token ids - input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]]) - input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]]) - token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]]) - - config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768, - num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072) - - model = BertForPreTraining(config) - masked_lm_logits_scores, seq_relationship_logits = model(input_ids, token_type_ids, input_mask) - ``` - """ - def __init__(self, config): - super(BertForPreTraining, self).__init__(config) - self.bert = BertModel(config) - self.cls = BertPreTrainingHeads(config, self.bert.embeddings.word_embeddings.weight) - self.apply(self.init_bert_weights) - - def forward(self, input_ids, token_type_ids, attention_mask): - encoded_layers, pooled_output = self.bert(input_ids, token_type_ids, attention_mask) - sequence_output = encoded_layers[-1] - prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output) - - return prediction_scores, seq_relationship_score - - -class BertForMaskedLM(BertPreTrainedModel): - """BERT model with the masked language modeling head. - This module comprises the BERT model followed by the masked language modeling head. - - Params: - config: a BertConfig class instance with the configuration to build a new model. - - Inputs: - `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] - with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts - `extract_features.py`, `run_classifier.py` and `run_squad.py`) - `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token - types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to - a `sentence B` token (see BERT paper for more details). - `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices - selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max - input sequence length in the current batch. It's the mask that we typically use for attention when - a batch has varying length sentences. - `masked_lm_labels`: masked language modeling labels: torch.LongTensor of shape [batch_size, sequence_length] - with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss - is only computed for the labels set in [0, ..., vocab_size] - - Outputs: - if `masked_lm_labels` is not `None`: - Outputs the masked language modeling loss. - if `masked_lm_labels` is `None`: - Outputs the masked language modeling logits of shape [batch_size, sequence_length, vocab_size]. - - Example usage: - ```python - # Already been converted into WordPiece token ids - input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]]) - input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]]) - token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]]) - - config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768, - num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072) - - model = BertForMaskedLM(config) - masked_lm_logits_scores = model(input_ids, token_type_ids, input_mask) - ``` - """ - def __init__(self, config): - super(BertForMaskedLM, self).__init__(config) - self.bert = BertModel(config) - self.cls = BertOnlyMLMHead(config, self.bert.embeddings.word_embeddings.weight) - self.apply(self.init_bert_weights) - - def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None): - encoded_layers, _ = self.bert(input_ids, token_type_ids, attention_mask) - sequence_output = encoded_layers[-1] - prediction_scores = self.cls(sequence_output) - - if masked_lm_labels is not None: - loss_fct = CrossEntropyLoss(ignore_index=-1) - masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1)) - return masked_lm_loss - else: - return prediction_scores - - -class BertForNextSentencePrediction(BertPreTrainedModel): - """BERT model with next sentence prediction head. - This module comprises the BERT model followed by the next sentence classification head. - - Params: - config: a BertConfig class instance with the configuration to build a new model. - - Inputs: - `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] - with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts - `extract_features.py`, `run_classifier.py` and `run_squad.py`) - `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token - types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to - a `sentence B` token (see BERT paper for more details). - `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices - selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max - input sequence length in the current batch. It's the mask that we typically use for attention when - a batch has varying length sentences. - `next_sentence_label`: next sentence classification loss: torch.LongTensor of shape [batch_size] - with indices selected in [0, 1]. - 0 => next sentence is the continuation, 1 => next sentence is a random sentence. - - Outputs: - if `next_sentence_label` is not `None`: - Outputs the total_loss which is the sum of the masked language modeling loss and the next - sentence classification loss. - if `next_sentence_label` is `None`: - Outputs the next sentence classification logits of shape [batch_size, 2]. - - Example usage: - ```python - # Already been converted into WordPiece token ids - input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]]) - input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]]) - token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]]) - - config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768, - num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072) - - model = BertForNextSentencePrediction(config) - seq_relationship_logits = model(input_ids, token_type_ids, input_mask) - ``` - """ - def __init__(self, config): - super(BertForNextSentencePrediction, self).__init__(config) - self.bert = BertModel(config) - self.cls = BertOnlyNSPHead(config) - self.apply(self.init_bert_weights) - - def forward(self, input_ids, token_type_ids=None, attention_mask=None, next_sentence_label=None): - _, pooled_output = self.bert(input_ids, token_type_ids, attention_mask) - seq_relationship_score = self.cls( pooled_output) - - if next_sentence_label is not None: - loss_fct = CrossEntropyLoss(ignore_index=-1) - next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1)) - return next_sentence_loss - else: - return seq_relationship_score - - -class BertForSequenceClassification(BertPreTrainedModel): - """BERT model for classification. - This module is composed of the BERT model with a linear layer on top of - the pooled output. - - Params: - `config`: a BertConfig class instance with the configuration to build a new model. - `num_labels`: the number of classes for the classifier. Default = 2. - - Inputs: - `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] - with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts - `extract_features.py`, `run_classifier.py` and `run_squad.py`) - `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token - types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to - a `sentence B` token (see BERT paper for more details). - `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices - selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max - input sequence length in the current batch. It's the mask that we typically use for attention when - a batch has varying length sentences. - `labels`: labels for the classification output: torch.LongTensor of shape [batch_size] - with indices selected in [0, ..., num_labels]. - - Outputs: - if `labels` is not `None`: - Outputs the CrossEntropy classification loss of the output with the labels. - if `labels` is `None`: - Outputs the classification logits of shape [batch_size, num_labels]. - - Example usage: - ```python - # Already been converted into WordPiece token ids - input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]]) - input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]]) - token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]]) - - config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768, - num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072) - - num_labels = 2 - - model = BertForSequenceClassification(config, num_labels) - logits = model(input_ids, token_type_ids, input_mask) - ``` - """ - def __init__(self, config, num_labels): - super(BertForSequenceClassification, self).__init__(config) - self.num_labels = num_labels - self.bert = BertModel(config) - self.dropout = nn.Dropout(config.hidden_dropout_prob) - self.classifier = nn.Linear(config.hidden_size, num_labels) - self.apply(self.init_bert_weights) - - def forward(self, input_ids, token_type_ids=None, attention_mask=None): - _, pooled_output = self.bert(input_ids, token_type_ids, attention_mask) - pooled_output = self.dropout(pooled_output) - return self.classifier(pooled_output) - - -class BertForMultipleChoice(BertPreTrainedModel): - """BERT model for multiple choice tasks. - This module is composed of the BERT model with a linear layer on top of - the pooled output. - - Params: - `config`: a BertConfig class instance with the configuration to build a new model. - `num_choices`: the number of classes for the classifier. Default = 2. - - Inputs: - `input_ids`: a torch.LongTensor of shape [batch_size, num_choices, sequence_length] - with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts - `extract_features.py`, `run_classifier.py` and `run_squad.py`) - `token_type_ids`: an optional torch.LongTensor of shape [batch_size, num_choices, sequence_length] - with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` - and type 1 corresponds to a `sentence B` token (see BERT paper for more details). - `attention_mask`: an optional torch.LongTensor of shape [batch_size, num_choices, sequence_length] with indices - selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max - input sequence length in the current batch. It's the mask that we typically use for attention when - a batch has varying length sentences. - `labels`: labels for the classification output: torch.LongTensor of shape [batch_size] - with indices selected in [0, ..., num_choices]. - - Outputs: - if `labels` is not `None`: - Outputs the CrossEntropy classification loss of the output with the labels. - if `labels` is `None`: - Outputs the classification logits of shape [batch_size, num_labels]. - - Example usage: - ```python - # Already been converted into WordPiece token ids - input_ids = torch.LongTensor([[[31, 51, 99], [15, 5, 0]], [[12, 16, 42], [14, 28, 57]]]) - input_mask = torch.LongTensor([[[1, 1, 1], [1, 1, 0]],[[1,1,0], [1, 0, 0]]]) - token_type_ids = torch.LongTensor([[[0, 0, 1], [0, 1, 0]],[[0, 1, 1], [0, 0, 1]]]) - config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768, - num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072) - - num_choices = 2 - - model = BertForMultipleChoice(config, num_choices) - logits = model(input_ids, token_type_ids, input_mask) - ``` - """ - def __init__(self, config, num_choices): - super(BertForMultipleChoice, self).__init__(config) - self.num_choices = num_choices - self.bert = BertModel(config) - self.dropout = nn.Dropout(config.hidden_dropout_prob) - self.classifier = nn.Linear(config.hidden_size, 1) - self.apply(self.init_bert_weights) - - def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None): - flat_input_ids = input_ids.view(-1, input_ids.size(-1)) - flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) - flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) - _, pooled_output = self.bert(flat_input_ids, flat_token_type_ids, flat_attention_mask) - pooled_output = self.dropout(pooled_output) - logits = self.classifier(pooled_output) - reshaped_logits = logits.view(-1, self.num_choices) - - if labels is not None: - loss_fct = CrossEntropyLoss() - loss = loss_fct(reshaped_logits, labels) - return loss - else: - return reshaped_logits - - -class BertForTokenClassification(BertPreTrainedModel): - """BERT model for token-level classification. - This module is composed of the BERT model with a linear layer on top of - the full hidden state of the last layer. - - Params: - `config`: a BertConfig class instance with the configuration to build a new model. - `num_labels`: the number of classes for the classifier. Default = 2. - - Inputs: - `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] - with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts - `extract_features.py`, `run_classifier.py` and `run_squad.py`) - `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token - types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to - a `sentence B` token (see BERT paper for more details). - `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices - selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max - input sequence length in the current batch. It's the mask that we typically use for attention when - a batch has varying length sentences. - `labels`: labels for the classification output: torch.LongTensor of shape [batch_size, sequence_length] - with indices selected in [0, ..., num_labels]. - - Outputs: - if `labels` is not `None`: - Outputs the CrossEntropy classification loss of the output with the labels. - if `labels` is `None`: - Outputs the classification logits of shape [batch_size, sequence_length, num_labels]. - - Example usage: - ```python - # Already been converted into WordPiece token ids - input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]]) - input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]]) - token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]]) - - config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768, - num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072) - - num_labels = 2 - - model = BertForTokenClassification(config, num_labels) - logits = model(input_ids, token_type_ids, input_mask) - ``` - """ - def __init__(self, config, num_labels): - super(BertForTokenClassification, self).__init__(config) - self.num_labels = num_labels - self.bert = BertModel(config) - self.dropout = nn.Dropout(config.hidden_dropout_prob) - self.classifier = nn.Linear(config.hidden_size, num_labels) - self.apply(self.init_bert_weights) - - def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None): - encoded_layers, _ = self.bert(input_ids, token_type_ids, attention_mask) - sequence_output = encoded_layers[-1] - sequence_output = self.dropout(sequence_output) - logits = self.classifier(sequence_output) - - if labels is not None: - loss_fct = CrossEntropyLoss() - # Only keep active parts of the loss - if attention_mask is not None: - active_loss = attention_mask.view(-1) == 1 - active_logits = logits.view(-1, self.num_labels)[active_loss] - active_labels = labels.view(-1)[active_loss] - loss = loss_fct(active_logits, active_labels) - else: - loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) - return loss - else: - return logits - - -class BertForQuestionAnswering(BertPreTrainedModel): - """BERT model for Question Answering (span extraction). - This module is composed of the BERT model with a linear layer on top of - the sequence output that computes start_logits and end_logits - - Params: - `config`: a BertConfig class instance with the configuration to build a new model. - - Inputs: - `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] - with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts - `extract_features.py`, `run_classifier.py` and `run_squad.py`) - `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token - types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to - a `sentence B` token (see BERT paper for more details). - `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices - selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max - input sequence length in the current batch. It's the mask that we typically use for attention when - a batch has varying length sentences. - - Outputs: - Outputs a tuple of start_logits, end_logits which are the logits respectively for the start and end - position tokens of shape [batch_size, sequence_length]. - - Example usage: - ```python - # Already been converted into WordPiece token ids - input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]]) - input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]]) - token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]]) - - config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768, - num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072) - - model = BertForQuestionAnswering(config) - start_logits, end_logits = model(input_ids, token_type_ids, input_mask) - ``` - """ - def __init__(self, config): - super(BertForQuestionAnswering, self).__init__(config) - self.bert = BertModel(config) - # TODO check with Google if it's normal there is no dropout on the token classifier of SQuAD in the TF version - # self.dropout = nn.Dropout(config.hidden_dropout_prob) - self.qa_outputs = nn.Linear(config.hidden_size, 2) - self.apply(self.init_bert_weights) - - def forward(self, input_ids, token_type_ids, attention_mask): - encoded_layers, _ = self.bert(input_ids, token_type_ids, attention_mask) - sequence_output = encoded_layers[-1] - logits = self.qa_outputs(sequence_output) - start_logits, end_logits = logits.split(1, dim=-1) - start_logits = start_logits.squeeze(-1) - end_logits = end_logits.squeeze(-1) - return start_logits, end_logits diff --git a/examples/huggingface/pytorch/question-answering/pruning/group_lasso/optimization.py b/examples/huggingface/pytorch/question-answering/pruning/group_lasso/optimization.py deleted file mode 100644 index 5881a5b5156..00000000000 --- a/examples/huggingface/pytorch/question-answering/pruning/group_lasso/optimization.py +++ /dev/null @@ -1,174 +0,0 @@ -# coding=utf-8 -# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved. -# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""PyTorch optimization for BERT model.""" - -import math -import torch -from torch.optim import Optimizer -from torch.optim.optimizer import required -from torch.nn.utils import clip_grad_norm_ -#from fused_adam_local import FusedAdam -from apex.optimizers import FusedAdam -from apex.multi_tensor_apply import multi_tensor_applier -import amp_C -from utils import is_main_process - -multi_tensor_l2norm = amp_C.multi_tensor_l2norm -lamb_compute_update = amp_C.multi_tensor_lamb_stage1_cuda -lamb_apply_update = amp_C.multi_tensor_lamb_stage2_cuda -scale = amp_C.multi_tensor_scale - - -def warmup_cosine(x, warmup=0.002): - if x < warmup: - return x/warmup - return 0.5 * (1.0 + torch.cos(math.pi * x)) - -def warmup_constant(x, warmup=0.002): - if x < warmup: - return x/warmup - return 1.0 - -def warmup_linear(x, warmup=0.002): - if x < warmup: - return x/warmup - return max((x - 1. )/ (warmup - 1.), 0.) - -def warmup_poly(x, warmup=0.002, degree=0.5): - if x < warmup: - return x/warmup - return (1.0 - x)**degree - - -SCHEDULES = { - 'warmup_cosine':warmup_cosine, - 'warmup_constant':warmup_constant, - 'warmup_linear':warmup_linear, - 'warmup_poly':warmup_poly, -} - -class BertAdam(Optimizer): - """Implements BERT version of Adam algorithm with weight decay fix. - Params: - lr: learning rate - warmup: portion of t_total for the warmup, -1 means no warmup. Default: -1 - t_total: total number of training steps for the learning - rate schedule, -1 means constant learning rate. Default: -1 - schedule: schedule to use for the warmup (see above). Default: 'warmup_linear' - b1: Adams b1. Default: 0.9 - b2: Adams b2. Default: 0.999 - e: Adams epsilon. Default: 1e-6 - weight_decay: Weight decay. Default: 0.01 - max_grad_norm: Maximum norm for the gradients (-1 means no clipping). Default: 1.0 - """ - def __init__(self, params, lr=required, warmup=-1, t_total=-1, schedule='warmup_linear', - b1=0.9, b2=0.999, e=1e-6, weight_decay=0.01, - max_grad_norm=1.0): - if lr is not required and lr < 0.0: - raise ValueError("Invalid learning rate: {} - should be >= 0.0".format(lr)) - if schedule not in SCHEDULES: - raise ValueError("Invalid schedule parameter: {}".format(schedule)) - if not 0.0 <= warmup < 1.0 and not warmup == -1: - raise ValueError("Invalid warmup: {} - should be in [0.0, 1.0[ or -1".format(warmup)) - if not 0.0 <= b1 < 1.0: - raise ValueError("Invalid b1 parameter: {} - should be in [0.0, 1.0[".format(b1)) - if not 0.0 <= b2 < 1.0: - raise ValueError("Invalid b2 parameter: {} - should be in [0.0, 1.0[".format(b2)) - if not e >= 0.0: - raise ValueError("Invalid epsilon value: {} - should be >= 0.0".format(e)) - defaults = dict(lr=lr, schedule=schedule, warmup=warmup, t_total=t_total, - b1=b1, b2=b2, e=e, weight_decay=weight_decay, - max_grad_norm=max_grad_norm) - super(BertAdam, self).__init__(params, defaults) - - def get_lr(self): - lr = [] - for group in self.param_groups: - for p in group['params']: - state = self.state[p] - if len(state) == 0: - return [0] - if group['t_total'] != -1: - schedule_fct = SCHEDULES[group['schedule']] - lr_scheduled = group['lr'] * schedule_fct(state['step']/group['t_total'], group['warmup']) - else: - lr_scheduled = group['lr'] - lr.append(lr_scheduled) - return lr - - def step(self, closure=None): - """Performs a single optimization step. - Arguments: - closure (callable, optional): A closure that reevaluates the model - and returns the loss. - """ - loss = None - if closure is not None: - loss = closure() - for group in self.param_groups: - for p in group['params']: - if p.grad is None: - continue - grad = p.grad.data - if grad.is_sparse: - raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead') - - state = self.state[p] - - # State initialization - if len(state) == 0: - state['step'] = 0 - # Exponential moving average of gradient values - state['next_m'] = torch.zeros_like(p.data) - # Exponential moving average of squared gradient values - state['next_v'] = torch.zeros_like(p.data) - - next_m, next_v = state['next_m'], state['next_v'] - beta1, beta2 = group['b1'], group['b2'] - - # Add grad clipping - if group['max_grad_norm'] > 0: - clip_grad_norm_(p, group['max_grad_norm']) - - # Decay the first and second moment running average coefficient - # In-place operations to update the averages at the same time - next_m.mul_(beta1).add_(1 - beta1, grad) - next_v.mul_(beta2).addcmul_(1 - beta2, grad, grad) - update = next_m / (next_v.sqrt() + group['e']) - - # Just adding the square of the weights to the loss function is *not* - # the correct way of using L2 regularization/weight decay with Adam, - # since that will interact with the m and v parameters in strange ways. - # - # Instead we want to decay the weights in a manner that doesn't interact - # with the m/v parameters. This is equivalent to adding the square - # of the weights to the loss with plain (non-momentum) SGD. - if group['weight_decay'] > 0.0: - update += group['weight_decay'] * p.data - - if group['t_total'] != -1: - schedule_fct = SCHEDULES[group['schedule']] - lr_scheduled = group['lr'] * schedule_fct(state['step']/group['t_total'], group['warmup']) - else: - lr_scheduled = group['lr'] - - update_with_lr = lr_scheduled * update - p.data.add_(-update_with_lr) - - state['step'] += 1 - - return loss diff --git a/examples/huggingface/pytorch/question-answering/pruning/group_lasso/requirements.txt b/examples/huggingface/pytorch/question-answering/pruning/group_lasso/requirements.txt deleted file mode 100644 index 9741bff445c..00000000000 --- a/examples/huggingface/pytorch/question-answering/pruning/group_lasso/requirements.txt +++ /dev/null @@ -1,18 +0,0 @@ -# progress bars in model download and training scripts -tqdm -# Accessing files from S3 directly. -boto3 -# Used for downloading models over HTTP -requests -six -ipdb -#Data processing -h5py -nltk -progressbar -#Others -numpy -onnxruntime -requests -urllib3 -git+https://github.com/NVIDIA/dllogger diff --git a/examples/huggingface/pytorch/question-answering/pruning/group_lasso/run_squad_sparse.py b/examples/huggingface/pytorch/question-answering/pruning/group_lasso/run_squad_sparse.py deleted file mode 100644 index 864c3cb8666..00000000000 --- a/examples/huggingface/pytorch/question-answering/pruning/group_lasso/run_squad_sparse.py +++ /dev/null @@ -1,1285 +0,0 @@ -# coding=utf-8 -# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved. -# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team. -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Run BERT on SQuAD.""" - -from __future__ import absolute_import, division, print_function - -import argparse -import collections -import dllogger, time -import json -import logging -import math -import modeling -import numpy as np -import os -import random -import sys -import torch -from apex import amp -from file_utils import PYTORCH_PRETRAINED_BERT_CACHE -from io import open -from optimization import BertAdam, warmup_linear -from schedulers import LinearWarmUpScheduler -from torch.utils.data import( - DataLoader, - RandomSampler, - SequentialSampler, - TensorDataset -) -from torch.utils.data.distributed import DistributedSampler -from tqdm import tqdm -from tokenization import (BasicTokenizer, BertTokenizer, whitespace_tokenize) -from utils import is_main_process, format_step -import builtins -import io - -safe_builtins = { - 'range', - 'complex', - 'set', - 'frozenset', - 'slice', -} - -torch._C._jit_set_profiling_mode(False) -torch._C._jit_set_profiling_executor(False) - -if sys.version_info[0] == 2: - import cPickle as pickle -else: - import pickle - -logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', - datefmt='%m/%d/%Y %H:%M:%S', - level=logging.INFO) -logger = logging.getLogger(__name__) - - -class SquadExample(object): - """ - A single training/test example for the Squad dataset. - For examples without an answer, the start and end position are -1. - """ - - def __init__(self, - qas_id, - question_text, - doc_tokens, - orig_answer_text=None, - start_position=None, - end_position=None, - is_impossible=None): - self.qas_id = qas_id - self.question_text = question_text - self.doc_tokens = doc_tokens - self.orig_answer_text = orig_answer_text - self.start_position = start_position - self.end_position = end_position - self.is_impossible = is_impossible - - def __str__(self): - return self.__repr__() - - def __repr__(self): - s = "" - s += "qas_id: %s" % (self.qas_id) - s += ", question_text: %s" % ( - self.question_text) - s += ", doc_tokens: [%s]" % (" ".join(self.doc_tokens)) - if self.start_position: - s += ", start_position: %d" % (self.start_position) - if self.end_position: - s += ", end_position: %d" % (self.end_position) - if self.is_impossible: - s += ", is_impossible: %r" % (self.is_impossible) - return s - - -class InputFeatures(object): - """A single set of features of data.""" - - def __init__(self, - unique_id, - example_index, - doc_span_index, - tokens, - token_to_orig_map, - token_is_max_context, - input_ids, - input_mask, - segment_ids, - start_position=None, - end_position=None, - is_impossible=None): - self.unique_id = unique_id - self.example_index = example_index - self.doc_span_index = doc_span_index - self.tokens = tokens - self.token_to_orig_map = token_to_orig_map - self.token_is_max_context = token_is_max_context - self.input_ids = input_ids - self.input_mask = input_mask - self.segment_ids = segment_ids - self.start_position = start_position - self.end_position = end_position - self.is_impossible = is_impossible - - -def read_squad_examples(input_file, is_training, version_2_with_negative): - """Read a SQuAD json file into a list of SquadExample.""" - with open(input_file, "r", encoding='utf-8') as reader: - input_data = json.load(reader)["data"] - - def is_whitespace(c): - if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F: - return True - return False - - examples = [] - for entry in input_data: - for paragraph in entry["paragraphs"]: - paragraph_text = paragraph["context"] - doc_tokens = [] - char_to_word_offset = [] - prev_is_whitespace = True - for c in paragraph_text: - if is_whitespace(c): - prev_is_whitespace = True - else: - if prev_is_whitespace: - doc_tokens.append(c) - else: - doc_tokens[-1] += c - prev_is_whitespace = False - char_to_word_offset.append(len(doc_tokens) - 1) - - for qa in paragraph["qas"]: - qas_id = qa["id"] - question_text = qa["question"] - start_position = None - end_position = None - orig_answer_text = None - is_impossible = False - if is_training: - if version_2_with_negative: - is_impossible = qa["is_impossible"] - if (len(qa["answers"]) != 1) and (not is_impossible): - raise ValueError( - "For training, each question should have exactly 1 answer.") - if not is_impossible: - answer = qa["answers"][0] - orig_answer_text = answer["text"] - answer_offset = answer["answer_start"] - answer_length = len(orig_answer_text) - start_position = char_to_word_offset[answer_offset] - end_position = char_to_word_offset[answer_offset + answer_length - 1] - # Only add answers where the text can be exactly recovered from the - # document. If this CAN'T happen it's likely due to weird Unicode - # stuff so we will just skip the example. - # - # Note that this means for training mode, every example is NOT - # guaranteed to be preserved. - actual_text = " ".join(doc_tokens[start_position:(end_position + 1)]) - cleaned_answer_text = " ".join( - whitespace_tokenize(orig_answer_text)) - if actual_text.find(cleaned_answer_text) == -1: - logger.warning("Could not find answer: '%s' vs. '%s'", - actual_text, cleaned_answer_text) - continue - else: - start_position = -1 - end_position = -1 - orig_answer_text = "" - - example = SquadExample( - qas_id=qas_id, - question_text=question_text, - doc_tokens=doc_tokens, - orig_answer_text=orig_answer_text, - start_position=start_position, - end_position=end_position, - is_impossible=is_impossible) - examples.append(example) - return examples - - -def convert_examples_to_features(examples, tokenizer, max_seq_length, - doc_stride, max_query_length, is_training): - """Loads a data file into a list of `InputBatch`s.""" - - unique_id = 1000000000 - - features = [] - for (example_index, example) in enumerate(examples): - query_tokens = tokenizer.tokenize(example.question_text) - - if len(query_tokens) > max_query_length: - query_tokens = query_tokens[0:max_query_length] - - tok_to_orig_index = [] - orig_to_tok_index = [] - all_doc_tokens = [] - for (i, token) in enumerate(example.doc_tokens): - orig_to_tok_index.append(len(all_doc_tokens)) - sub_tokens = tokenizer.tokenize(token) - for sub_token in sub_tokens: - tok_to_orig_index.append(i) - all_doc_tokens.append(sub_token) - - tok_start_position = None - tok_end_position = None - if is_training and example.is_impossible: - tok_start_position = -1 - tok_end_position = -1 - if is_training and not example.is_impossible: - tok_start_position = orig_to_tok_index[example.start_position] - if example.end_position < len(example.doc_tokens) - 1: - tok_end_position = orig_to_tok_index[example.end_position + 1] - 1 - else: - tok_end_position = len(all_doc_tokens) - 1 - (tok_start_position, tok_end_position) = _improve_answer_span( - all_doc_tokens, tok_start_position, tok_end_position, tokenizer, - example.orig_answer_text) - - # The -3 accounts for [CLS], [SEP] and [SEP] - max_tokens_for_doc = max_seq_length - len(query_tokens) - 3 - - # We can have documents that are longer than the maximum sequence length. - # To deal with this we do a sliding window approach, where we take chunks - # of the up to our max length with a stride of `doc_stride`. - _DocSpan = collections.namedtuple( # pylint: disable=invalid-name - "DocSpan", ["start", "length"]) - doc_spans = [] - start_offset = 0 - while start_offset < len(all_doc_tokens): - length = len(all_doc_tokens) - start_offset - if length > max_tokens_for_doc: - length = max_tokens_for_doc - doc_spans.append(_DocSpan(start=start_offset, length=length)) - if start_offset + length == len(all_doc_tokens): - break - start_offset += min(length, doc_stride) - - for (doc_span_index, doc_span) in enumerate(doc_spans): - tokens = [] - token_to_orig_map = {} - token_is_max_context = {} - segment_ids = [] - tokens.append("[CLS]") - segment_ids.append(0) - for token in query_tokens: - tokens.append(token) - segment_ids.append(0) - tokens.append("[SEP]") - segment_ids.append(0) - - for i in range(doc_span.length): - split_token_index = doc_span.start + i - token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index] - - is_max_context = _check_is_max_context(doc_spans, doc_span_index, - split_token_index) - token_is_max_context[len(tokens)] = is_max_context - tokens.append(all_doc_tokens[split_token_index]) - segment_ids.append(1) - tokens.append("[SEP]") - segment_ids.append(1) - - input_ids = tokenizer.convert_tokens_to_ids(tokens) - - # The mask has 1 for real tokens and 0 for padding tokens. Only real - # tokens are attended to. - input_mask = [1] * len(input_ids) - - # Zero-pad up to the sequence length. - while len(input_ids) < max_seq_length: - input_ids.append(0) - input_mask.append(0) - segment_ids.append(0) - - assert len(input_ids) == max_seq_length - assert len(input_mask) == max_seq_length - assert len(segment_ids) == max_seq_length - - start_position = None - end_position = None - if is_training and not example.is_impossible: - # For training, if our document chunk does not contain an annotation - # we throw it out, since there is nothing to predict. - doc_start = doc_span.start - doc_end = doc_span.start + doc_span.length - 1 - out_of_span = False - if not (tok_start_position >= doc_start and - tok_end_position <= doc_end): - out_of_span = True - if out_of_span: - start_position = 0 - end_position = 0 - else: - doc_offset = len(query_tokens) + 2 - start_position = tok_start_position - doc_start + doc_offset - end_position = tok_end_position - doc_start + doc_offset - if is_training and example.is_impossible: - start_position = 0 - end_position = 0 - - features.append( - InputFeatures( - unique_id=unique_id, - example_index=example_index, - doc_span_index=doc_span_index, - tokens=tokens, - token_to_orig_map=token_to_orig_map, - token_is_max_context=token_is_max_context, - input_ids=input_ids, - input_mask=input_mask, - segment_ids=segment_ids, - start_position=start_position, - end_position=end_position, - is_impossible=example.is_impossible)) - unique_id += 1 - - return features - - -def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer, - orig_answer_text): - """Returns tokenized answer spans that better match the annotated answer.""" - - # The SQuAD annotations are character based. We first project them to - # whitespace-tokenized words. But then after WordPiece tokenization, we can - # often find a "better match". For example: - # - # Question: What year was John Smith born? - # Context: The leader was John Smith (1895-1943). - # Answer: 1895 - # - # The original whitespace-tokenized answer will be "(1895-1943).". However - # after tokenization, our tokens will be "( 1895 - 1943 ) .". So we can match - # the exact answer, 1895. - # - # However, this is not always possible. Consider the following: - # - # Question: What country is the top exporter of electornics? - # Context: The Japanese electronics industry is the lagest in the world. - # Answer: Japan - # - # In this case, the annotator chose "Japan" as a character sub-span of - # the word "Japanese". Since our WordPiece tokenizer does not split - # "Japanese", we just use "Japanese" as the annotation. This is fairly rare - # in SQuAD, but does happen. - tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text)) - - for new_start in range(input_start, input_end + 1): - for new_end in range(input_end, new_start - 1, -1): - text_span = " ".join(doc_tokens[new_start:(new_end + 1)]) - if text_span == tok_answer_text: - return (new_start, new_end) - - return (input_start, input_end) - - -def _check_is_max_context(doc_spans, cur_span_index, position): - """Check if this is the 'max context' doc span for the token.""" - - # Because of the sliding window approach taken to scoring documents, a single - # token can appear in multiple documents. E.g. - # Doc: the man went to the store and bought a gallon of milk - # Span A: the man went to the - # Span B: to the store and bought - # Span C: and bought a gallon of - # ... - # - # Now the word 'bought' will have two scores from spans B and C. We only - # want to consider the score with "maximum context", which we define as - # the *minimum* of its left and right context (the *sum* of left and - # right context will always be the same, of course). - # - # In the example the maximum context for 'bought' would be span C since - # it has 1 left context and 3 right context, while span B has 4 left context - # and 0 right context. - best_score = None - best_span_index = None - for (span_index, doc_span) in enumerate(doc_spans): - end = doc_span.start + doc_span.length - 1 - if position < doc_span.start: - continue - if position > end: - continue - num_left_context = position - doc_span.start - num_right_context = end - position - score = min(num_left_context, num_right_context) + 0.01 * doc_span.length - if best_score is None or score > best_score: - best_score = score - best_span_index = span_index - - return cur_span_index == best_span_index - - -RawResult = collections.namedtuple("RawResult", - ["unique_id", "start_logits", "end_logits"]) - - -def get_answers(examples, features, results, args): - predictions = collections.defaultdict(list) #it is possible that one example corresponds to multiple features - Prediction = collections.namedtuple('Prediction', ['text', 'start_logit', 'end_logit']) - - if args.version_2_with_negative: - null_vals = collections.defaultdict(lambda: (float("inf"),0,0)) - for ex, feat, result in match_results(examples, features, results): - start_indices = _get_best_indices(result.start_logits, args.n_best_size) - end_indices = _get_best_indices(result.end_logits, args.n_best_size) - prelim_predictions = get_valid_prelim_predictions(start_indices, end_indices, feat, result, args) - prelim_predictions = sorted( - prelim_predictions, - key=lambda x: (x.start_logit + x.end_logit), - reverse=True) - if args.version_2_with_negative: - score = result.start_logits[0] + result.end_logits[0] - if score < null_vals[ex.qas_id][0]: - null_vals[ex.qas_id] = (score, result.start_logits[0], result.end_logits[0]) - - curr_predictions = [] - seen_predictions = [] - for pred in prelim_predictions: - if len(curr_predictions) == args.n_best_size: - break - if pred.start_index > 0: # this is a non-null prediction TODO: this probably is irrelevant - final_text = get_answer_text(ex, feat, pred, args) - if final_text in seen_predictions: - continue - else: - final_text = "" - - seen_predictions.append(final_text) - curr_predictions.append(Prediction(final_text, pred.start_logit, pred.end_logit)) - predictions[ex.qas_id] += curr_predictions - - #Add empty prediction - if args.version_2_with_negative: - for qas_id in predictions.keys(): - predictions[qas_id].append(Prediction('', - null_vals[ex.qas_id][1], - null_vals[ex.qas_id][2])) - - - nbest_answers = collections.defaultdict(list) - answers = {} - for qas_id, preds in predictions.items(): - nbest = sorted( - preds, - key=lambda x: (x.start_logit + x.end_logit), - reverse=True)[:args.n_best_size] - - # In very rare edge cases we could only have single null prediction. - # So we just create a nonce prediction in this case to avoid failure. - if not nbest: - nbest.append(Prediction(text="empty", start_logit=0.0, end_logit=0.0)) - - total_scores = [] - best_non_null_entry = None - for entry in nbest: - total_scores.append(entry.start_logit + entry.end_logit) - if not best_non_null_entry and entry.text: - best_non_null_entry = entry - probs = _compute_softmax(total_scores) - for (i, entry) in enumerate(nbest): - output = collections.OrderedDict() - output["text"] = entry.text - output["probability"] = probs[i] - output["start_logit"] = entry.start_logit - output["end_logit"] = entry.end_logit - nbest_answers[qas_id].append(output) - if args.version_2_with_negative: - score_diff = null_vals[qas_id][0] - best_non_null_entry.start_logit - best_non_null_entry.end_logit - if score_diff > args.null_score_diff_threshold: - answers[qas_id] = "" - else: - answers[qas_id] = best_non_null_entry.text - else: - answers[qas_id] = nbest_answers[qas_id][0]['text'] - - return answers, nbest_answers - -def get_answer_text(example, feature, pred, args): - tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)] - orig_doc_start = feature.token_to_orig_map[pred.start_index] - orig_doc_end = feature.token_to_orig_map[pred.end_index] - orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)] - tok_text = " ".join(tok_tokens) - - # De-tokenize WordPieces that have been split off. - tok_text = tok_text.replace(" ##", "") - tok_text = tok_text.replace("##", "") - - # Clean whitespace - tok_text = tok_text.strip() - tok_text = " ".join(tok_text.split()) - orig_text = " ".join(orig_tokens) - - final_text = get_final_text(tok_text, orig_text, args.do_lower_case, args.verbose_logging) - return final_text - -def get_valid_prelim_predictions(start_indices, end_indices, feature, result, args): - - _PrelimPrediction = collections.namedtuple( - "PrelimPrediction", - ["start_index", "end_index", "start_logit", "end_logit"]) - prelim_predictions = [] - for start_index in start_indices: - for end_index in end_indices: - if start_index >= len(feature.tokens): - continue - if end_index >= len(feature.tokens): - continue - if start_index not in feature.token_to_orig_map: - continue - if end_index not in feature.token_to_orig_map: - continue - if not feature.token_is_max_context.get(start_index, False): - continue - if end_index < start_index: - continue - length = end_index - start_index + 1 - if length > args.max_answer_length: - continue - prelim_predictions.append( - _PrelimPrediction( - start_index=start_index, - end_index=end_index, - start_logit=result.start_logits[start_index], - end_logit=result.end_logits[end_index])) - return prelim_predictions - -def match_results(examples, features, results): - unique_f_ids = set([f.unique_id for f in features]) - unique_r_ids = set([r.unique_id for r in results]) - matching_ids = unique_f_ids & unique_r_ids - features = [f for f in features if f.unique_id in matching_ids] - results = [r for r in results if r.unique_id in matching_ids] - features.sort(key=lambda x: x.unique_id) - results.sort(key=lambda x: x.unique_id) - - for f, r in zip(features, results): #original code assumes strict ordering of examples. TODO: rewrite this - yield examples[f.example_index], f, r - -def get_final_text(pred_text, orig_text, do_lower_case, verbose_logging=False): - """Project the tokenized prediction back to the original text.""" - - # When we created the data, we kept track of the alignment between original - # (whitespace tokenized) tokens and our WordPiece tokenized tokens. So - # now `orig_text` contains the span of our original text corresponding to the - # span that we predicted. - # - # However, `orig_text` may contain extra characters that we don't want in - # our prediction. - # - # For example, let's say: - # pred_text = steve smith - # orig_text = Steve Smith's - # - # We don't want to return `orig_text` because it contains the extra "'s". - # - # We don't want to return `pred_text` because it's already been normalized - # (the SQuAD eval script also does punctuation stripping/lower casing but - # our tokenizer does additional normalization like stripping accent - # characters). - # - # What we really want to return is "Steve Smith". - # - # Therefore, we have to apply a semi-complicated alignment heruistic between - # `pred_text` and `orig_text` to get a character-to-charcter alignment. This - # can fail in certain cases in which case we just return `orig_text`. - - def _strip_spaces(text): - ns_chars = [] - ns_to_s_map = collections.OrderedDict() - for (i, c) in enumerate(text): - if c == " ": - continue - ns_to_s_map[len(ns_chars)] = i - ns_chars.append(c) - ns_text = "".join(ns_chars) - return (ns_text, ns_to_s_map) - - # We first tokenize `orig_text`, strip whitespace from the result - # and `pred_text`, and check if they are the same length. If they are - # NOT the same length, the heuristic has failed. If they are the same - # length, we assume the characters are one-to-one aligned. - - tokenizer = BasicTokenizer(do_lower_case=do_lower_case) - - tok_text = " ".join(tokenizer.tokenize(orig_text)) - - start_position = tok_text.find(pred_text) - if start_position == -1: - if verbose_logging: - logger.info( - "Unable to find text: '%s' in '%s'" % (pred_text, orig_text)) - return orig_text - end_position = start_position + len(pred_text) - 1 - - (orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text) - (tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text) - - if len(orig_ns_text) != len(tok_ns_text): - if verbose_logging: - logger.info("Length not equal after stripping spaces: '%s' vs '%s'", - orig_ns_text, tok_ns_text) - return orig_text - - # We then project the characters in `pred_text` back to `orig_text` using - # the character-to-character alignment. - tok_s_to_ns_map = {} - for (i, tok_index) in tok_ns_to_s_map.items(): - tok_s_to_ns_map[tok_index] = i - - orig_start_position = None - if start_position in tok_s_to_ns_map: - ns_start_position = tok_s_to_ns_map[start_position] - if ns_start_position in orig_ns_to_s_map: - orig_start_position = orig_ns_to_s_map[ns_start_position] - - if orig_start_position is None: - if verbose_logging: - logger.info("Couldn't map start position") - return orig_text - - orig_end_position = None - if end_position in tok_s_to_ns_map: - ns_end_position = tok_s_to_ns_map[end_position] - if ns_end_position in orig_ns_to_s_map: - orig_end_position = orig_ns_to_s_map[ns_end_position] - - if orig_end_position is None: - if verbose_logging: - logger.info("Couldn't map end position") - return orig_text - - output_text = orig_text[orig_start_position:(orig_end_position + 1)] - return output_text - - -def _get_best_indices(logits, n_best_size): - """Get the n-best logits from a list.""" - index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True) - - best_indices = [] - for i in range(len(index_and_score)): - if i >= n_best_size: - break - best_indices.append(index_and_score[i][0]) - return best_indices - - -def _compute_softmax(scores): - """Compute softmax probability over raw logits.""" - if not scores: - return [] - - max_score = None - for score in scores: - if max_score is None or score > max_score: - max_score = score - - exp_scores = [] - total_sum = 0.0 - for score in scores: - x = math.exp(score - max_score) - exp_scores.append(x) - total_sum += x - - probs = [] - for score in exp_scores: - probs.append(score / total_sum) - return probs - - - -from apex.multi_tensor_apply import multi_tensor_applier -class GradientClipper: - """ - Clips gradient norm of an iterable of parameters. - """ - def __init__(self, max_grad_norm): - self.max_norm = max_grad_norm - if multi_tensor_applier.available: - import amp_C - self._overflow_buf = torch.cuda.IntTensor([0]) - self.multi_tensor_l2norm = amp_C.multi_tensor_l2norm - self.multi_tensor_scale = amp_C.multi_tensor_scale - else: - raise RuntimeError('Gradient clipping requires cuda extensions') - - def step(self, parameters): - l = [p.grad for p in parameters if p.grad is not None] - total_norm, _ = multi_tensor_applier(self.multi_tensor_l2norm, self._overflow_buf, [l], False) - total_norm = total_norm.item() - if (total_norm == float('inf')): return - clip_coef = self.max_norm / (total_norm + 1e-6) - if clip_coef < 1: - multi_tensor_applier(self.multi_tensor_scale, self._overflow_buf, [l, l], clip_coef) - -class RestrictedUnpickler(pickle.Unpickler): - - def find_class(self, module, name): - # Only allow safe classes from builtins. - if module == "builtins" and name in safe_builtins: - return getattr(builtins, name) - # Forbid everything else. - raise pickle.UnpicklingError("global '%s.%s' is forbidden" % - (module, name)) - -def restricted_loads(s): - """Helper function analogous to pickle.loads().""" - return RestrictedUnpickler(io.BytesIO(s)).load() - - -def train_func(model, agent, args, dllogger, global_step, train_examples, num_train_optimization_steps, n_gpu, device, optimizer): - model = agent.model.model - - if args.cache_dir is None: - cached_train_features_file = args.train_file + '_{0}_{1}_{2}_{3}'.format( - list(filter(None, args.bert_model.split('/'))).pop(), str(args.max_seq_length), str(args.doc_stride), - str(args.max_query_length)) - else: - cached_train_features_file = args.cache_dir.strip('/') + '/' + args.train_file.split('/')[-1] + '_{0}_{1}_{2}_{3}'.format( - list(filter(None, args.bert_model.split('/'))).pop(), str(args.max_seq_length), str(args.doc_stride), - str(args.max_query_length)) - - train_features = None - try: - with open(cached_train_features_file, "rb") as reader: - train_features = restricted_loads(reader) - except: - train_features = convert_examples_to_features( - examples=train_examples, - tokenizer=tokenizer, - max_seq_length=args.max_seq_length, - doc_stride=args.doc_stride, - max_query_length=args.max_query_length, - is_training=True) - - if not args.skip_cache and is_main_process(): - dllogger.log(step="PARAMETER", data={"Cached_train features_file": cached_train_features_file}) - with open(cached_train_features_file, "wb") as writer: - pickle.dump(train_features, writer) - - dllogger.log(step="PARAMETER", data={"train_start": True}) - dllogger.log(step="PARAMETER", data={"training_samples": len(train_examples)}) - dllogger.log(step="PARAMETER", data={"training_features": len(train_features)}) - dllogger.log(step="PARAMETER", data={"train_batch_size":args.train_batch_size}) - dllogger.log(step="PARAMETER", data={"steps":num_train_optimization_steps}) - all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long) - all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long) - all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long) - all_start_positions = torch.tensor([f.start_position for f in train_features], dtype=torch.long) - all_end_positions = torch.tensor([f.end_position for f in train_features], dtype=torch.long) - train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, - all_start_positions, all_end_positions) - if args.local_rank == -1: - train_sampler = RandomSampler(train_data) - else: - train_sampler = DistributedSampler(train_data) - train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size * n_gpu) - - args.train_features = train_features - model.train() - gradClipper = GradientClipper(max_grad_norm=1.0) - final_loss = None - train_start = time.time() - - #pruning - agent.pre_epoch_begin() - - for epoch in range(int(args.num_train_epochs)): - train_iter = tqdm(train_dataloader, desc="Iteration", disable=args.disable_progress_bar) if is_main_process() else train_dataloader - agent.on_epoch_begin(epoch) - for step, batch in enumerate(train_iter): - # Terminate early for benchmarking - - agent.on_batch_begin(step) - - if args.max_steps > 0 and global_step > args.max_steps: - break - - if n_gpu == 1: - batch = tuple(t.to(device) for t in batch) # multi-gpu does scattering it-self - input_ids, input_mask, segment_ids, start_positions, end_positions = batch - start_logits, end_logits = model(input_ids, segment_ids, input_mask) - # If we are on multi-GPU, split add a dimension - if len(start_positions.size()) > 1: - start_positions = start_positions.squeeze(-1) - if len(end_positions.size()) > 1: - end_positions = end_positions.squeeze(-1) - # sometimes the start/end positions are outside our model inputs, we ignore these terms - ignored_index = start_logits.size(1) - start_positions.clamp_(0, ignored_index) - end_positions.clamp_(0, ignored_index) - - loss_fct = torch.nn.CrossEntropyLoss(ignore_index=ignored_index) - start_loss = loss_fct(start_logits, start_positions) - end_loss = loss_fct(end_logits, end_positions) - loss = (start_loss + end_loss) / 2 - if n_gpu > 1: - loss = loss.mean() # mean() to average on multi-gpu. - if args.gradient_accumulation_steps > 1: - loss = loss / args.gradient_accumulation_steps - if args.fp16: - with amp.scale_loss(loss, optimizer) as scaled_loss: - scaled_loss.backward() - else: - loss.backward() - - # gradient clipping - gradClipper.step(amp.master_params(optimizer)) - - if (step + 1) % args.gradient_accumulation_steps == 0: - if args.fp16 : - # modify learning rate with special warm up for BERT which FusedAdam doesn't do - scheduler.step() - - optimizer.step() - agent.on_post_grad() - optimizer.zero_grad() - - global_step += 1 - - final_loss = loss.item() - if step % args.log_freq == 0: - dllogger.log(step=(epoch, global_step,), data={"step_loss": final_loss, - "learning_rate": optimizer.param_groups[0]['lr']}) - - agent.on_batch_end() - - agent.on_epoch_end() - args.time_to_train = time.time() - train_start - args.final_loss = final_loss - -def eval_func(model, args, dllogger, tokenizer, device): - if not args.do_train and args.fp16: - model.half() - - eval_examples = read_squad_examples( - input_file=args.predict_file, is_training=False, version_2_with_negative=args.version_2_with_negative) - eval_features = convert_examples_to_features( - examples=eval_examples, - tokenizer=tokenizer, - max_seq_length=args.max_seq_length, - doc_stride=args.doc_stride, - max_query_length=args.max_query_length, - is_training=False) - - dllogger.log(step="PARAMETER", data={"infer_start": True}) - dllogger.log(step="PARAMETER", data={"eval_samples": len(eval_examples)}) - dllogger.log(step="PARAMETER", data={"eval_features": len(eval_features)}) - dllogger.log(step="PARAMETER", data={"predict_batch_size": args.predict_batch_size}) - - all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long) - all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long) - all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long) - all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long) - eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_example_index) - # Run prediction for full data - eval_sampler = SequentialSampler(eval_data) - eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.predict_batch_size) - - args.eval_features = eval_features - infer_start = time.time() - model.eval() - all_results = [] - dllogger.log(step="PARAMETER", data={"eval_start": True}) - for input_ids, input_mask, segment_ids, example_indices in tqdm(eval_dataloader, desc="Evaluating", disable=args.disable_progress_bar): - if len(all_results) % 1000 == 0: - dllogger.log(step="PARAMETER", data={"sample_number": len(all_results)}) - input_ids = input_ids.to(device) - input_mask = input_mask.to(device) - segment_ids = segment_ids.to(device) - with torch.no_grad(): - batch_start_logits, batch_end_logits = model(input_ids, segment_ids, input_mask) - for i, example_index in enumerate(example_indices): - start_logits = batch_start_logits[i].detach().cpu().tolist() - end_logits = batch_end_logits[i].detach().cpu().tolist() - eval_feature = eval_features[example_index.item()] - unique_id = int(eval_feature.unique_id) - all_results.append(RawResult(unique_id=unique_id, - start_logits=start_logits, - end_logits=end_logits)) - - time_to_infer = time.time() - infer_start - output_prediction_file = os.path.join(args.output_dir, "predictions.json") - output_nbest_file = os.path.join(args.output_dir, "nbest_predictions.json") - - answers, nbest_answers = get_answers(eval_examples, eval_features, all_results, args) - with open(output_prediction_file, "w") as f: - f.write(json.dumps(answers, indent=4) + "\n") - with open(output_nbest_file, "w") as f: - f.write(json.dumps(nbest_answers, indent=4) + "\n") - - # output_null_log_odds_file = os.path.join(args.output_dir, "null_odds.json") - # write_predictions(eval_examples, eval_features, all_results, - # args.n_best_size, args.max_answer_length, - # args.do_lower_case, output_prediction_file, - # output_nbest_file, output_null_log_odds_file, args.verbose_logging, - # args.version_2_with_negative, args.null_score_diff_threshold) - - if args.do_eval and is_main_process(): - import sys - import subprocess - import shlex - eval_out = subprocess.check_output([sys.executable, shlex.quote(args.eval_script), - shlex.quote(args.predict_file), shlex.quote(args.output_dir) + "/predictions.json"]) - scores = str(eval_out).strip() - exact_match = float(scores.split(":")[1].split(",")[0]) - f1 = float(scores.split(":")[2].split("}")[0]) - args.exact_match = exact_match - args.f1 = f1 - args.time_to_infer = time_to_infer - -def main(): - parser = argparse.ArgumentParser() - - ## Required parameters - parser.add_argument("--bert_model", default=None, type=str, required=True, - help="Bert pre-trained model selected in the list: bert-base-uncased, " - "bert-large-uncased, bert-base-cased, bert-large-cased, bert-base-multilingual-uncased, " - "bert-base-multilingual-cased, bert-base-chinese.") - parser.add_argument("--output_dir", default=None, type=str, required=True, - help="The output directory where the model checkpoints and predictions will be written.") - parser.add_argument("--init_checkpoint", - default=None, - type=str, - required=True, - help="The checkpoint file from pretraining") - - ## Other parameters - parser.add_argument("--train_file", default=None, type=str, help="SQuAD json for training. E.g., train-v1.1.json") - parser.add_argument("--predict_file", default=None, type=str, - help="SQuAD json for predictions. E.g., dev-v1.1.json or test-v1.1.json") - parser.add_argument("--max_seq_length", default=384, type=int, - help="The maximum total input sequence length after WordPiece tokenization. Sequences " - "longer than this will be truncated, and sequences shorter than this will be padded.") - parser.add_argument("--doc_stride", default=128, type=int, - help="When splitting up a long document into chunks, how much stride to take between chunks.") - parser.add_argument("--max_query_length", default=64, type=int, - help="The maximum number of tokens for the question. Questions longer than this will " - "be truncated to this length.") - parser.add_argument("--do_train", action='store_true', help="Whether to run training.") - parser.add_argument("--do_predict", action='store_true', help="Whether to run eval on the dev set.") - parser.add_argument("--train_batch_size", default=32, type=int, help="Total batch size for training.") - parser.add_argument("--predict_batch_size", default=8, type=int, help="Total batch size for predictions.") - parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") - parser.add_argument("--num_train_epochs", default=3.0, type=float, - help="Total number of training epochs to perform.") - parser.add_argument("--max_steps", default=-1.0, type=float, - help="Total number of training steps to perform.") - parser.add_argument("--warmup_proportion", default=0.1, type=float, - help="Proportion of training to perform linear learning rate warmup for. E.g., 0.1 = 10%% " - "of training.") - parser.add_argument("--n_best_size", default=20, type=int, - help="The total number of n-best predictions to generate in the nbest_predictions.json " - "output file.") - parser.add_argument("--max_answer_length", default=30, type=int, - help="The maximum length of an answer that can be generated. This is needed because the start " - "and end predictions are not conditioned on one another.") - parser.add_argument("--verbose_logging", action='store_true', - help="If true, all of the warnings related to data processing will be printed. " - "A number of warnings are expected for a normal SQuAD evaluation.") - parser.add_argument("--no_cuda", - action='store_true', - help="Whether not to use CUDA when available") - parser.add_argument('--seed', - type=int, - default=42, - help="random seed for initialization") - parser.add_argument('--gradient_accumulation_steps', - type=int, - default=1, - help="Number of updates steps to accumulate before performing a backward/update pass.") - parser.add_argument("--do_lower_case", - action='store_true', - help="Whether to lower case the input text. True for uncased models, False for cased models.") - parser.add_argument("--local_rank", - type=int, - default=os.getenv('LOCAL_RANK', -1), - help="local_rank for distributed training on gpus") - parser.add_argument('--fp16', - default=False, - action='store_true', - help="Mixed precision training") - parser.add_argument('--amp', - default=False, - action='store_true', - help="Mixed precision training") - parser.add_argument('--loss_scale', - type=float, default=0, - help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n" - "0 (default value): dynamic loss scaling.\n" - "Positive power of 2: static loss scaling value.\n") - parser.add_argument('--version_2_with_negative', - action='store_true', - help='If true, the SQuAD examples contain some that do not have an answer.') - parser.add_argument('--null_score_diff_threshold', - type=float, default=0.0, - help="If null_score - best_non_null is greater than the threshold predict null.") - parser.add_argument('--vocab_file', - type=str, default=None, required=True, - help="Vocabulary mapping/file BERT was pretrainined on") - parser.add_argument("--config_file", - default=None, - type=str, - required=True, - help="The BERT model config") - parser.add_argument('--log_freq', - type=int, default=50, - help='frequency of logging loss.') - parser.add_argument('--json-summary', type=str, default="results/dllogger.json", - help='If provided, the json summary will be written to' - 'the specified file.') - parser.add_argument("--eval_script", - help="Script to evaluate squad predictions", - default="evaluate.py", - type=str) - parser.add_argument("--do_eval", - action='store_true', - help="Whether to use evaluate accuracy of predictions") - parser.add_argument("--use_env", - action='store_true', - help="Whether to read local rank from ENVVAR") - parser.add_argument('--skip_checkpoint', - default=False, - action='store_true', - help="Whether to save checkpoints") - parser.add_argument('--disable-progress-bar', - default=False, - action='store_true', - help='Disable tqdm progress bar') - parser.add_argument("--skip_cache", - default=False, - action='store_true', - help="Whether to cache train features") - parser.add_argument("--cache_dir", - default=None, - type=str, - help="Location to cache train feaures. Will default to the dataset directory") - parser.add_argument("--prune_config", - default='prune_bert.yaml', - help="pruning config") - parser.add_argument('--do_prune', - action='store_true', - help="prune model") - - args = parser.parse_args() - args.fp16 = args.fp16 or args.amp - - if args.local_rank == -1 or args.no_cuda: - device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") - n_gpu = torch.cuda.device_count() - else: - torch.cuda.set_device(args.local_rank) - device = torch.device("cuda", args.local_rank) - # Initializes the distributed backend which will take care of synchronizing nodes/GPUs - torch.distributed.init_process_group(backend='nccl', init_method='env://') - n_gpu = 1 - - if is_main_process(): - dllogger.init(backends=[dllogger.JSONStreamBackend(verbosity=dllogger.Verbosity.VERBOSE, - filename=args.json_summary), - dllogger.StdOutBackend(verbosity=dllogger.Verbosity.VERBOSE, step_format=format_step)]) - else: - dllogger.init(backends=[]) - - print("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format( - device, n_gpu, bool(args.local_rank != -1), args.fp16)) - - dllogger.log(step="PARAMETER", data={"Config": [str(args)]}) - - if args.gradient_accumulation_steps < 1: - raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format( - args.gradient_accumulation_steps)) - - args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps - - random.seed(args.seed) - np.random.seed(args.seed) - torch.manual_seed(args.seed) - dllogger.log(step="PARAMETER", data={"SEED": args.seed}) - - if n_gpu > 0: - torch.cuda.manual_seed_all(args.seed) - - if not args.do_train and not args.do_predict: - raise ValueError("At least one of `do_train` or `do_predict` must be True.") - - if args.do_train: - if not args.train_file: - raise ValueError( - "If `do_train` is True, then `train_file` must be specified.") - if args.do_predict: - if not args.predict_file: - raise ValueError( - "If `do_predict` is True, then `predict_file` must be specified.") - - if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and os.listdir(args.output_dir)!=['logfile.txt']: - print("WARNING: Output directory {} already exists and is not empty.".format(args.output_dir), os.listdir(args.output_dir)) - if not os.path.exists(args.output_dir) and is_main_process(): - os.makedirs(args.output_dir) - - tokenizer = BertTokenizer(args.vocab_file, do_lower_case=args.do_lower_case, max_len=512) # for bert large - # tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case) - - train_examples = None - num_train_optimization_steps = None - if args.do_train: - train_examples = read_squad_examples( - input_file=args.train_file, is_training=True, version_2_with_negative=args.version_2_with_negative) - num_train_optimization_steps = int( - len(train_examples) / args.train_batch_size / args.gradient_accumulation_steps) * args.num_train_epochs - if args.local_rank != -1: - num_train_optimization_steps = num_train_optimization_steps // torch.distributed.get_world_size() - - # Prepare model - config = modeling.BertConfig.from_json_file(args.config_file) - # Padding for divisibility by 8 - if config.vocab_size % 8 != 0: - config.vocab_size += 8 - (config.vocab_size % 8) - - modeling.ACT2FN["bias_gelu"] = modeling.bias_gelu_training - model = modeling.BertForQuestionAnswering(config) - # model = modeling.BertForQuestionAnswering.from_pretrained(args.bert_model, - # cache_dir=os.path.join(str(PYTORCH_PRETRAINED_BERT_CACHE), 'distributed_{}'.format(args.local_rank))) - dllogger.log(step="PARAMETER", data={"loading_checkpoint": True}) - model.load_state_dict(torch.load(args.init_checkpoint, map_location='cpu')["model"], strict=False) - #model.load_state_dict(torch.load(args.init_checkpoint, map_location='cpu'), strict=False) - dllogger.log(step="PARAMETER", data={"loaded_checkpoint": True}) - model.to(device) - num_weights = sum([p.numel() for p in model.parameters() if p.requires_grad]) - dllogger.log(step="PARAMETER", data={"model_weights_num":num_weights}) - - # Prepare optimizer - param_optimizer = list(model.named_parameters()) - - # hack to remove pooler, which is not used - # thus it produce None grad that break apex - param_optimizer = [n for n in param_optimizer if 'pooler' not in n[0]] - - no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] - optimizer_grouped_parameters = [ - {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01}, - {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0} - ] - if args.do_train: - if args.fp16: - try: - from apex.optimizers import FusedAdam - except ImportError: - raise ImportError( - "Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.") - optimizer = FusedAdam(optimizer_grouped_parameters, - lr=args.learning_rate, - bias_correction=False) - - if args.loss_scale == 0: - model, optimizer = amp.initialize(model, optimizer, opt_level="O2", keep_batchnorm_fp32=False, - loss_scale="dynamic") - else: - model, optimizer = amp.initialize(model, optimizer, opt_level="O2", keep_batchnorm_fp32=False, loss_scale=args.loss_scale) - if args.do_train: - scheduler = LinearWarmUpScheduler(optimizer, warmup=args.warmup_proportion, total_steps=num_train_optimization_steps) - - else: - optimizer = BertAdam(optimizer_grouped_parameters, - lr=args.learning_rate, - warmup=args.warmup_proportion, - t_total=num_train_optimization_steps) - - if args.local_rank != -1: - try: - from apex.parallel import DistributedDataParallel as DDP - except ImportError: - raise ImportError( - "Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.") - - model = DDP(model) - elif n_gpu > 1: - model = torch.nn.DataParallel(model) - - global_step = 0 - - if args.do_prune: - # Pruning! - from intel_extension_for_transformers.transformers import NoTrainerOptimizer, PrunerConfig, PruningConfig - pruner_config = PrunerConfig( - prune_type="GroupLasso", - target_sparsity_ratio=0.7, - names=['bert.encoder.layer.0.attention.output.dense.weight'], - parameters={"alpha": 0.006, "pattern": "tile_pattern_1x2"}, - ) - pruning_conf = PruningConfig(pruner_config=pruner_config) - no_trainer_optimizer = NoTrainerOptimizer(model, output_dir=args.output_dir) - agent = no_trainer_optimizer.init_pruner(pruning_config=pruning_conf) - - def train_func_nc(model): - return train_func(model, agent, args, dllogger, global_step, train_examples, num_train_optimization_steps, n_gpu, device, optimizer) - - def eval_func_nc(model): - return eval_func(model, args, dllogger, tokenizer, device) - - if args.do_train: - # train_func(args, dllogger, global_step) - no_trainer_optimizer.train_func = train_func_nc - - - if args.do_train and is_main_process() and not args.skip_checkpoint: - # Save a trained model and the associated configuration - model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self - output_model_file = os.path.join(args.output_dir, modeling.WEIGHTS_NAME) - torch.save({"model":model_to_save.state_dict()}, output_model_file) - output_config_file = os.path.join(args.output_dir, modeling.CONFIG_NAME) - with open(output_config_file, 'w') as f: - f.write(model_to_save.config.to_json_string()) - - if args.do_predict and (args.local_rank == -1 or is_main_process()): - no_trainer_optimizer.eval_func = eval_func_nc - - if args.do_prune: - model = no_trainer_optimizer.prune() - - if args.do_train: - gpu_count = n_gpu - if torch.distributed.is_initialized(): - gpu_count = torch.distributed.get_world_size() - - if args.max_steps == -1: - dllogger.log(step=tuple(), data={"e2e_train_time": args.time_to_train, - "training_sequences_per_second": len(args.train_features) * args.num_train_epochs / args.time_to_train, - "final_loss": args.final_loss}) - else: - dllogger.log(step=tuple(), data={"e2e_train_time": time_to_train, - "training_sequences_per_second": args.train_batch_size * args.gradient_accumulation_steps \ - * args.max_steps * gpu_count / time_to_train, - "final_loss": final_loss}) - if args.do_predict and is_main_process(): - dllogger.log(step=tuple(), data={"e2e_inference_time": args.time_to_infer, - "inference_sequences_per_second": len(args.eval_features) / args.time_to_infer}) - if args.do_eval and is_main_process(): - dllogger.log(step=tuple(), data={"exact_match": args.exact_match, "F1": args.f1}) - -if __name__ == "__main__": - main() - dllogger.flush() - diff --git a/examples/huggingface/pytorch/question-answering/pruning/group_lasso/schedulers.py b/examples/huggingface/pytorch/question-answering/pruning/group_lasso/schedulers.py deleted file mode 100644 index 4dd99b43a15..00000000000 --- a/examples/huggingface/pytorch/question-answering/pruning/group_lasso/schedulers.py +++ /dev/null @@ -1,131 +0,0 @@ -# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved. -# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team. -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import math -import torch -from torch.optim.optimizer import Optimizer -from torch.optim.lr_scheduler import _LRScheduler - - -class LRScheduler(_LRScheduler): - def __init__(self, optimizer, last_epoch=-1): - # Check if using mixed precision training - self.mixed_training = False - base_optimizer = optimizer - - # Check that optimizer param is valid - if not isinstance(optimizer, Optimizer): - raise TypeError('{} is not an Optimizer'.format( - type(optimizer).__name__)) - - super(LRScheduler, self).__init__(base_optimizer, last_epoch) - - def step(self, epoch=None): - # Set the current training step - # ('epoch' is used to be consistent with _LRScheduler) - if self.mixed_training: - # The assumption is that the step will be constant - state_dict = self.optimizer.state[self.optimizer.param_groups[0]['params'][0]] - if 'step' in state_dict: - self.last_epoch = state_dict['step'] + 1 - else: - self.last_epoch = 1 - else: - self.last_epoch = epoch if epoch is not None else self.last_epoch + 1 - - for param_group, lr in zip(self.optimizer.param_groups, self.get_lr()): - param_group['lr'] = lr - - -class CosineWarmUpScheduler(LRScheduler): - """ - Applies a warm up period to the learning rate. - """ - - def __init__(self, optimizer, warmup, total_steps, last_epoch=-1): - self.warmup = warmup - self.total_steps = total_steps - super(CosineWarmUpScheduler, self).__init__(optimizer, last_epoch) - - def get_lr(self): - progress = self.last_epoch / self.total_steps - if progress < self.warmup: - return [base_lr * progress / self.warmup for base_lr in self.base_lrs] - else: - return [base_lr * (0.5 * (1.0 + torch.cos(math.pi + progress))) for base_lr in self.base_lrs] - - -class ConstantWarmUpScheduler(LRScheduler): - """ - Applies a warm up period to the learning rate. - """ - - def __init__(self, optimizer, warmup, total_steps, last_epoch=-1): - self.warmup = warmup - self.total_steps = total_steps - super(ConstantWarmUpScheduler, self).__init__(optimizer, last_epoch) - - def get_lr(self): - progress = self.last_epoch / self.total_steps - if progress < self.warmup: - return [base_lr * progress / self.warmup for base_lr in self.base_lrs] - else: - return self.base_lrs - - -class LinearWarmUpScheduler(LRScheduler): - """ - Applies a warm up period to the learning rate. - """ - - def __init__(self, optimizer, warmup, total_steps, last_epoch=-1): - self.warmup = warmup - self.total_steps = total_steps - super(LinearWarmUpScheduler, self).__init__(optimizer, last_epoch) - - def get_lr(self): - progress = self.last_epoch / self.total_steps - if progress < self.warmup: - return [base_lr * progress / self.warmup for base_lr in self.base_lrs] - else: - return [base_lr * max(( progress - 1.0)/(self.warmup - 1.0), 0.) for base_lr in self.base_lrs] - - -class PolyWarmUpScheduler(LRScheduler): - """ - Applies a warm up period to the learning rate. - """ - - def __init__(self, optimizer, warmup, total_steps, degree=0.5, last_epoch=-1): - self.warmup = warmup - self.total_steps = total_steps - self.degree = degree - super(PolyWarmUpScheduler, self).__init__(optimizer, last_epoch) - - def step(self, epoch=None): - param_group = self.optimizer.param_groups[0] - if 'step' in param_group: - self.last_epoch = param_group['step'] + 1 - else: - self.last_epoch = 1 - - for param_group, lr in zip(self.optimizer.param_groups, self.get_lr()): - param_group['lr'] = lr - - def get_lr(self): - progress = self.last_epoch / self.total_steps - if progress < self.warmup: - return [base_lr * progress / self.warmup for base_lr in self.base_lrs] - else: - return [base_lr * ((1.0 - progress) ** self.degree) for base_lr in self.base_lrs] diff --git a/examples/huggingface/pytorch/question-answering/pruning/group_lasso/scripts/run_squad_sparse.sh b/examples/huggingface/pytorch/question-answering/pruning/group_lasso/scripts/run_squad_sparse.sh deleted file mode 100644 index 581dd1db883..00000000000 --- a/examples/huggingface/pytorch/question-answering/pruning/group_lasso/scripts/run_squad_sparse.sh +++ /dev/null @@ -1,117 +0,0 @@ -#!/usr/bin/env bash - -# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved. -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -echo "Container nvidia build = " $NVIDIA_BUILD_ID - -init_checkpoint=${1:-"/path/to/ckpt_8601.pt"} -epochs=${2:-"2.0"} -batch_size=${3:-"4"} -learning_rate=${4:-"3e-5"} -precision=${5:-"tf32"} -num_gpu="1" -seed="1" -BERT_PREP_WORKING_DIR=${6:-'/path/to/bert_data'} -squad_dir="$BERT_PREP_WORKING_DIR/download/squad/v1.1" -vocab_file="$BERT_PREP_WORKING_DIR/download/google_pretrained_weights/uncased_L-24_H-1024_A-16/vocab.txt" -OUT_DIR=${7:-"./results/SQuAD/"} -prune_config=${8:-"prune_bert.yaml"} -json_summary=${9:-"$OUT_DIR/dllogger.json"} -echo $init_checkpoint $epochs $batch_size $learning_rate \ -$precision $num_gpu $seed $squad_dir $vocab_file \ -$OUT_DIR $prune_config $json_summary - - -#init_checkpoint=${1:-"/workspace/bert/checkpoints/bert_uncased.pt"} -#epochs=${2:-"2.0"} -#batch_size=${3:-"4"} -#learning_rate=${4:-"3e-5"} -#precision=${5:-"fp16"} -#num_gpu=${6:-"8"} -#seed=${7:-"1"} -#squad_dir=${8:-"$BERT_PREP_WORKING_DIR/download/squad/v1.1"} -#vocab_file=${9:-"$BERT_PREP_WORKING_DIR/download/google_pretrained_weights/uncased_L-24_H-1024_A-16/vocab.txt"} -#OUT_DIR=${10:-"/workspace/bert/results/SQuAD"} -mode=${11:-"train eval"} -CONFIG_FILE=${12:-"/workspace/bert/bert_config.json"} -CONFIG_FILE="$PWD/bert_config.json" -max_steps=${13:-"-1"} - -echo "out dir is $OUT_DIR" -mkdir -p $OUT_DIR -if [ ! -d "$OUT_DIR" ]; then - echo "ERROR: non existing $OUT_DIR" - exit 1 -fi - -use_fp16="" -if [ "$precision" = "fp16" ] ; then - echo "fp16 activated!" - use_fp16=" --fp16 " -fi - -if [ "$num_gpu" = "1" ] ; then - export CUDA_VISIBLE_DEVICES=0 - mpi_command="" -else - unset CUDA_VISIBLE_DEVICES - mpi_command=" -m torch.distributed.launch --nproc_per_node=$num_gpu" -fi - -CMD="python $mpi_command run_squad_sparse.py " -CMD+="--do_prune " -CMD+="--prune_config=$prune_config " -CMD+="--json-summary=$json_summary " -CMD+="--init_checkpoint=$init_checkpoint " -if [ "$mode" = "train" ] ; then - CMD+="--do_train " - CMD+="--train_file=$squad_dir/train-v1.1.json " - CMD+="--train_batch_size=$batch_size " -elif [ "$mode" = "eval" ] ; then - CMD+="--do_predict " - CMD+="--predict_file=$squad_dir/dev-v1.1.json " - CMD+="--predict_batch_size=$batch_size " - CMD+="--eval_script=$squad_dir/evaluate-v1.1.py " - CMD+="--do_eval " -elif [ "$mode" = "prediction" ] ; then - CMD+="--do_predict " - CMD+="--predict_file=$squad_dir/dev-v1.1.json " - CMD+="--predict_batch_size=$batch_size " -else - CMD+=" --do_train " - CMD+=" --train_file=$squad_dir/train-v1.1.json " - CMD+=" --train_batch_size=$batch_size " - CMD+="--do_predict " - CMD+="--predict_file=$squad_dir/dev-v1.1.json " - CMD+="--predict_batch_size=$batch_size " - CMD+="--eval_script=$squad_dir/evaluate-v1.1.py " - CMD+="--do_eval " -fi - -CMD+=" --do_lower_case " -CMD+=" --bert_model=bert-large-uncased " -CMD+=" --learning_rate=$learning_rate " -CMD+=" --seed=$seed " -CMD+=" --num_train_epochs=$epochs " -CMD+=" --max_seq_length=384 " -CMD+=" --doc_stride=128 " -CMD+=" --output_dir=$OUT_DIR " -CMD+=" --vocab_file=$vocab_file " -CMD+=" --config_file=$CONFIG_FILE " -CMD+=" --max_steps=$max_steps " -CMD+=" $use_fp16" - -LOGFILE=$OUT_DIR/logfile.txt -echo "$CMD |& tee $LOGFILE" -time $CMD |& tee $LOGFILE diff --git a/examples/huggingface/pytorch/question-answering/pruning/group_lasso/tokenization.py b/examples/huggingface/pytorch/question-answering/pruning/group_lasso/tokenization.py deleted file mode 100644 index fb3cffe20ca..00000000000 --- a/examples/huggingface/pytorch/question-answering/pruning/group_lasso/tokenization.py +++ /dev/null @@ -1,392 +0,0 @@ -# coding=utf-8 -# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved. -# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team. -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Tokenization classes.""" - -from __future__ import absolute_import, division, print_function, unicode_literals - -import collections -import logging -import os -import unicodedata -import six -from io import open - -from file_utils import cached_path - -logger = logging.getLogger(__name__) - -PRETRAINED_VOCAB_ARCHIVE_MAP = { - 'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt", - 'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-vocab.txt", - 'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-vocab.txt", - 'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-vocab.txt", - 'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-vocab.txt", - 'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-vocab.txt", - 'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-vocab.txt", -} -PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP = { - 'bert-base-uncased': 512, - 'bert-large-uncased': 512, - 'bert-base-cased': 512, - 'bert-large-cased': 512, - 'bert-base-multilingual-uncased': 512, - 'bert-base-multilingual-cased': 512, - 'bert-base-chinese': 512, -} -VOCAB_NAME = 'vocab.txt' - -def convert_to_unicode(text): - """Converts `text` to Unicode (if it's not already), assuming utf-8 input.""" - if six.PY3: - if isinstance(text, str): - return text - elif isinstance(text, bytes): - return text.decode("utf-8", "ignore") - else: - raise ValueError("Unsupported string type: %s" % (type(text))) - elif six.PY2: - if isinstance(text, str): - return text.decode("utf-8", "ignore") - elif isinstance(text, unicode): - return text - else: - raise ValueError("Unsupported string type: %s" % (type(text))) - else: - raise ValueError("Not running on Python2 or Python 3?") - - -def load_vocab(vocab_file): - """Loads a vocabulary file into a dictionary.""" - vocab = collections.OrderedDict() - index = 0 - with open(vocab_file, "r", encoding="utf-8") as reader: - while True: - token = reader.readline() - if not token: - break - token = token.strip() - vocab[token] = index - index += 1 - return vocab - - -def whitespace_tokenize(text): - """Runs basic whitespace cleaning and splitting on a piece of text.""" - text = text.strip() - if not text: - return [] - tokens = text.split() - return tokens - - -class BertTokenizer(object): - """Runs end-to-end tokenization: punctuation splitting + wordpiece""" - - def __init__(self, vocab_file, do_lower_case=True, max_len=None, - never_split=("[UNK]", "[SEP]", "[PAD]", "[CLS]", "[MASK]")): - if not os.path.isfile(vocab_file): - raise ValueError( - "Can't find a vocabulary file at path '{}'. To load the vocabulary from a Google pretrained " - "model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`".format(vocab_file)) - self.vocab = load_vocab(vocab_file) - self.ids_to_tokens = collections.OrderedDict( - [(ids, tok) for tok, ids in self.vocab.items()]) - self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case, - never_split=never_split) - self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab) - self.max_len = max_len if max_len is not None else int(1e12) - - def tokenize(self, text): - split_tokens = [] - for token in self.basic_tokenizer.tokenize(text): - for sub_token in self.wordpiece_tokenizer.tokenize(token): - split_tokens.append(sub_token) - return split_tokens - - def convert_tokens_to_ids(self, tokens): - """Converts a sequence of tokens into ids using the vocab.""" - ids = [] - for token in tokens: - ids.append(self.vocab[token]) - if len(ids) > self.max_len: - raise ValueError( - "Token indices sequence length is longer than the specified maximum " - " sequence length for this BERT model ({} > {}). Running this" - " sequence through BERT will result in indexing errors".format(len(ids), self.max_len) - ) - return ids - - def convert_ids_to_tokens(self, ids): - """Converts a sequence of ids in wordpiece tokens using the vocab.""" - tokens = [] - for i in ids: - tokens.append(self.ids_to_tokens[i]) - return tokens - - @classmethod - def from_pretrained(cls, pretrained_model_name_or_path, cache_dir=None, *inputs, **kwargs): - """ - Instantiate a PreTrainedBertModel from a pre-trained model file. - Download and cache the pre-trained model file if needed. - """ - if pretrained_model_name_or_path in PRETRAINED_VOCAB_ARCHIVE_MAP: - vocab_file = PRETRAINED_VOCAB_ARCHIVE_MAP[pretrained_model_name_or_path] - else: - vocab_file = pretrained_model_name_or_path - if os.path.isdir(vocab_file): - vocab_file = os.path.join(vocab_file, VOCAB_NAME) - # redirect to the cache, if necessary - try: - resolved_vocab_file = cached_path(vocab_file, cache_dir=cache_dir) - except EnvironmentError: - logger.error( - "Model name '{}' was not found in model name list ({}). " - "We assumed '{}' was a path or url but couldn't find any file " - "associated to this path or url.".format( - pretrained_model_name_or_path, - ', '.join(PRETRAINED_VOCAB_ARCHIVE_MAP.keys()), - vocab_file)) - return None - if resolved_vocab_file == vocab_file: - logger.info("loading vocabulary file {}".format(vocab_file)) - else: - logger.info("loading vocabulary file {} from cache at {}".format( - vocab_file, resolved_vocab_file)) - if pretrained_model_name_or_path in PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP: - # if we're using a pretrained model, ensure the tokenizer won't index sequences longer - # than the number of positional embeddings - max_len = PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP[pretrained_model_name_or_path] - kwargs['max_len'] = min(kwargs.get('max_len', int(1e12)), max_len) - # Instantiate tokenizer. - tokenizer = cls(resolved_vocab_file, *inputs, **kwargs) - return tokenizer - - -class BasicTokenizer(object): - """Runs basic tokenization (punctuation splitting, lower casing, etc.).""" - - def __init__(self, - do_lower_case=True, - never_split=("[UNK]", "[SEP]", "[PAD]", "[CLS]", "[MASK]")): - """Constructs a BasicTokenizer. - - Args: - do_lower_case: Whether to lower case the input. - """ - self.do_lower_case = do_lower_case - self.never_split = never_split - - def tokenize(self, text): - """Tokenizes a piece of text.""" - text = self._clean_text(text) - # This was added on November 1st, 2018 for the multilingual and Chinese - # models. This is also applied to the English models now, but it doesn't - # matter since the English models were not trained on any Chinese data - # and generally don't have any Chinese data in them (there are Chinese - # characters in the vocabulary because Wikipedia does have some Chinese - # words in the English Wikipedia.). - text = self._tokenize_chinese_chars(text) - orig_tokens = whitespace_tokenize(text) - split_tokens = [] - for token in orig_tokens: - if self.do_lower_case and token not in self.never_split: - token = token.lower() - token = self._run_strip_accents(token) - split_tokens.extend(self._run_split_on_punc(token)) - - output_tokens = whitespace_tokenize(" ".join(split_tokens)) - return output_tokens - - def _run_strip_accents(self, text): - """Strips accents from a piece of text.""" - text = unicodedata.normalize("NFD", text) - output = [] - for char in text: - cat = unicodedata.category(char) - if cat == "Mn": - continue - output.append(char) - return "".join(output) - - def _run_split_on_punc(self, text): - """Splits punctuation on a piece of text.""" - if text in self.never_split: - return [text] - chars = list(text) - i = 0 - start_new_word = True - output = [] - while i < len(chars): - char = chars[i] - if _is_punctuation(char): - output.append([char]) - start_new_word = True - else: - if start_new_word: - output.append([]) - start_new_word = False - output[-1].append(char) - i += 1 - - return ["".join(x) for x in output] - - def _tokenize_chinese_chars(self, text): - """Adds whitespace around any CJK character.""" - output = [] - for char in text: - cp = ord(char) - if self._is_chinese_char(cp): - output.append(" ") - output.append(char) - output.append(" ") - else: - output.append(char) - return "".join(output) - - def _is_chinese_char(self, cp): - """Checks whether CP is the codepoint of a CJK character.""" - # This defines a "chinese character" as anything in the CJK Unicode block: - # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) - # - # Note that the CJK Unicode block is NOT all Japanese and Korean characters, - # despite its name. The modern Korean Hangul alphabet is a different block, - # as is Japanese Hiragana and Katakana. Those alphabets are used to write - # space-separated words, so they are not treated specially and handled - # like the all of the other languages. - if ((cp >= 0x4E00 and cp <= 0x9FFF) or # - (cp >= 0x3400 and cp <= 0x4DBF) or # - (cp >= 0x20000 and cp <= 0x2A6DF) or # - (cp >= 0x2A700 and cp <= 0x2B73F) or # - (cp >= 0x2B740 and cp <= 0x2B81F) or # - (cp >= 0x2B820 and cp <= 0x2CEAF) or - (cp >= 0xF900 and cp <= 0xFAFF) or # - (cp >= 0x2F800 and cp <= 0x2FA1F)): # - return True - - return False - - def _clean_text(self, text): - """Performs invalid character removal and whitespace cleanup on text.""" - output = [] - for char in text: - cp = ord(char) - if cp == 0 or cp == 0xfffd or _is_control(char): - continue - if _is_whitespace(char): - output.append(" ") - else: - output.append(char) - return "".join(output) - - -class WordpieceTokenizer(object): - """Runs WordPiece tokenization.""" - - def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=100): - self.vocab = vocab - self.unk_token = unk_token - self.max_input_chars_per_word = max_input_chars_per_word - - def tokenize(self, text): - """Tokenizes a piece of text into its word pieces. - - This uses a greedy longest-match-first algorithm to perform tokenization - using the given vocabulary. - - For example: - input = "unaffable" - output = ["un", "##aff", "##able"] - - Args: - text: A single token or whitespace separated tokens. This should have - already been passed through `BasicTokenizer`. - - Returns: - A list of wordpiece tokens. - """ - - output_tokens = [] - for token in whitespace_tokenize(text): - chars = list(token) - if len(chars) > self.max_input_chars_per_word: - output_tokens.append(self.unk_token) - continue - - is_bad = False - start = 0 - sub_tokens = [] - while start < len(chars): - end = len(chars) - cur_substr = None - while start < end: - substr = "".join(chars[start:end]) - if start > 0: - substr = "##" + substr - if substr in self.vocab: - cur_substr = substr - break - end -= 1 - if cur_substr is None: - is_bad = True - break - sub_tokens.append(cur_substr) - start = end - - if is_bad: - output_tokens.append(self.unk_token) - else: - output_tokens.extend(sub_tokens) - return output_tokens - - -def _is_whitespace(char): - """Checks whether `chars` is a whitespace character.""" - # \t, \n, and \r are technically control characters but we treat them - # as whitespace since they are generally considered as such. - if char == " " or char == "\t" or char == "\n" or char == "\r": - return True - cat = unicodedata.category(char) - if cat == "Zs": - return True - return False - - -def _is_control(char): - """Checks whether `chars` is a control character.""" - # These are technically control characters but we count them as whitespace - # characters. - if char == "\t" or char == "\n" or char == "\r": - return False - cat = unicodedata.category(char) - if cat.startswith("C"): - return True - return False - - -def _is_punctuation(char): - """Checks whether `chars` is a punctuation character.""" - cp = ord(char) - # We treat all non-letter/number ASCII as punctuation. - # Characters such as "^", "$", and "`" are not in the Unicode - # Punctuation class but we treat them as punctuation anyways, for - # consistency. - if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or - (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)): - return True - cat = unicodedata.category(char) - if cat.startswith("P"): - return True - return False diff --git a/examples/huggingface/pytorch/question-answering/pruning/group_lasso/utils.py b/examples/huggingface/pytorch/question-answering/pruning/group_lasso/utils.py deleted file mode 100644 index f4f88e8eff9..00000000000 --- a/examples/huggingface/pytorch/question-answering/pruning/group_lasso/utils.py +++ /dev/null @@ -1,65 +0,0 @@ -# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved. -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import torch -import torch.distributed as dist - -from pathlib import Path - - -def get_rank(): - if not dist.is_available(): - return 0 - if not dist.is_initialized(): - return 0 - return dist.get_rank() - - -def get_world_size(): - if not dist.is_available(): - return 1 - if not dist.is_initialized(): - return 1 - return dist.get_world_size() - - -def is_main_process(): - return get_rank() == 0 - - -def barrier(): - if dist.is_available() and dist.is_initialized(): - dist.barrier() - - -def format_step(step): - if isinstance(step, str): - return step - s = "" - if len(step) > 0: - s += "Training Epoch: {} ".format(step[0]) - if len(step) > 1: - s += "Training Iteration: {} ".format(step[1]) - if len(step) > 2: - s += "Validation Iteration: {} ".format(step[2]) - return s - - -def mkdir(path): - Path(path).mkdir(parents=True, exist_ok=True) - - -def mkdir_by_main_process(path): - if is_main_process(): - mkdir(path) - barrier() diff --git a/examples/huggingface/pytorch/question-answering/pruning/longformer_triviaqa/README.md b/examples/huggingface/pytorch/question-answering/pruning/longformer_triviaqa/README.md deleted file mode 100644 index 9da0b53c3d2..00000000000 --- a/examples/huggingface/pytorch/question-answering/pruning/longformer_triviaqa/README.md +++ /dev/null @@ -1,52 +0,0 @@ -Step-by-Step -============ - -This document is used to list steps of reproducing PyTorch longformer-base-4096 pruning result. - - -# Prerequisite - -## 1. Environment - -```shell -pip install intel-extension-for-transformers -pip install -r requirements.txt -pip install transformers==4.34.1 -``` ->**Note**: Please use transformers no higher than 4.34.1 - - -## 2. Prepare Dataset - -The dataset will be downloaded and converted to squad format automatically with `./scripts/download_data_and_convert.sh`. - -```shell -bash ./scripts/download_data_and_convert.sh -``` - -There will generate two squad format files: `squad-wikipedia-train-4096.json` and `squad-wikipedia-dev-4096.json` - - -# Run Examples - -### pruning longformer-base-4096 - -Run the `./scripts/longformer_base_sparse_global_4x1_pruning.sh` to prune with `global sparse 80% and 4*1 pattern`. In this script, we set `per_device_train_batch_size=1` which is same with [the original longformer codes](https://github.com/allenai/longformer). - -```shell -bash ./scripts/longformer_base_sparse_global_4x1_pruning.sh -``` - -Fine-tuning of the dense model is also supported by running the `./scripts/longformer_base_dense_fintune.sh` - - -### Results -The snip-momentum pruning method is used by default and the initial dense model is well fine-tuned. - -| Model | Dataset | Sparsity pattern | sparsity ratio | Dense F1 |Sparse F1 | Relative drop| -| :----: | :----: | :----: | :----: |:----: |:----:| :----: | -| longformer-base-4096 | triviaqa | 4x1 | global 80% | 75.2 (from [the paper](https://arxiv.org/abs/2004.05150))/74.9235 (ours) | 74.48 | -0.96% | - -## References -* [Longformer: The Long-Document Transformer](https://arxiv.org/abs/2004.05150) - diff --git a/examples/huggingface/pytorch/question-answering/pruning/longformer_triviaqa/modeling_longformer.py b/examples/huggingface/pytorch/question-answering/pruning/longformer_triviaqa/modeling_longformer.py deleted file mode 100644 index 3a08b4aaf96..00000000000 --- a/examples/huggingface/pytorch/question-answering/pruning/longformer_triviaqa/modeling_longformer.py +++ /dev/null @@ -1,2282 +0,0 @@ -# coding=utf-8 -# Copyright 2020 The Allen Institute for AI team and The HuggingFace Inc. team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""PyTorch Longformer model.""" - -import math -from dataclasses import dataclass -from typing import Optional, Tuple, Union - -import torch -import torch.utils.checkpoint -from torch import nn -from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss - -from transformers.activations import ACT2FN, gelu -from transformers.modeling_utils import PreTrainedModel -from transformers.pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer -from transformers.utils import ( - ModelOutput, - add_code_sample_docstrings, - add_start_docstrings, - add_start_docstrings_to_model_forward, - logging, - replace_return_docstrings, -) -from transformers.models.longformer.configuration_longformer import LongformerConfig - - -logger = logging.get_logger(__name__) - -_CHECKPOINT_FOR_DOC = "allenai/longformer-base-4096" -_CONFIG_FOR_DOC = "LongformerConfig" -_TOKENIZER_FOR_DOC = "LongformerTokenizer" - -LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = [ - "allenai/longformer-base-4096", - "allenai/longformer-large-4096", - "allenai/longformer-large-4096-finetuned-triviaqa", - "allenai/longformer-base-4096-extra.pos.embd.only", - "allenai/longformer-large-4096-extra.pos.embd.only", - # See all Longformer models at https://huggingface.co/models?filter=longformer -] - - -@dataclass -class LongformerBaseModelOutput(ModelOutput): - """ - Base class for Longformer's outputs, with potential hidden states, local and global attentions. - Args: - last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): - Sequence of hidden-states at the output of the last layer of the model. - hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): - Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of - shape `(batch_size, sequence_length, hidden_size)`. - Hidden-states of the model at the output of each layer plus the initial embedding outputs. - attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): - Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x + - attention_window + 1)`, where `x` is the number of tokens with global attention mask. - Local attentions weights after the attention softmax, used to compute the weighted average in the - self-attention heads. Those are the attention weights from every token in the sequence to every token with - global attention (first `x` values) and to every token in the attention window (remaining `attention_window - + 1` values). Note that the first `x` values refer to tokens with fixed positions in the text, but the - remaining `attention_window + 1` values refer to tokens with relative positions: the attention weight of a - token to itself is located at index `x + attention_window / 2` and the `attention_window / 2` preceding - (succeeding) values are the attention weights to the `attention_window / 2` preceding (succeeding) tokens. - If the attention window contains a token with global attention, the attention weight at the corresponding - index is set to 0; the value should be accessed from the first `x` attention weights. If a token has global - attention, the attention weights to all other tokens in `attentions` is set to 0, the values should be - accessed from `global_attentions`. - global_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): - Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x)`, - where `x` is the number of tokens with global attention mask. - Global attentions weights after the attention softmax, used to compute the weighted average in the - self-attention heads. Those are the attention weights from every token with global attention to every token - in the sequence. - """ - - last_hidden_state: torch.FloatTensor - hidden_states: Optional[Tuple[torch.FloatTensor]] = None - attentions: Optional[Tuple[torch.FloatTensor]] = None - global_attentions: Optional[Tuple[torch.FloatTensor]] = None - - -@dataclass -class LongformerBaseModelOutputWithPooling(ModelOutput): - """ - Base class for Longformer's outputs that also contains a pooling of the last hidden states. - Args: - last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): - Sequence of hidden-states at the output of the last layer of the model. - pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`): - Last layer hidden-state of the first token of the sequence (classification token) further processed by a - Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence - prediction (classification) objective during pretraining. - hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): - Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of - shape `(batch_size, sequence_length, hidden_size)`. - Hidden-states of the model at the output of each layer plus the initial embedding outputs. - attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): - Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x + - attention_window + 1)`, where `x` is the number of tokens with global attention mask. - Local attentions weights after the attention softmax, used to compute the weighted average in the - self-attention heads. Those are the attention weights from every token in the sequence to every token with - global attention (first `x` values) and to every token in the attention window (remaining `attention_window - + 1` values). Note that the first `x` values refer to tokens with fixed positions in the text, but the - remaining `attention_window + 1` values refer to tokens with relative positions: the attention weight of a - token to itself is located at index `x + attention_window / 2` and the `attention_window / 2` preceding - (succeeding) values are the attention weights to the `attention_window / 2` preceding (succeeding) tokens. - If the attention window contains a token with global attention, the attention weight at the corresponding - index is set to 0; the value should be accessed from the first `x` attention weights. If a token has global - attention, the attention weights to all other tokens in `attentions` is set to 0, the values should be - accessed from `global_attentions`. - global_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): - Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x)`, - where `x` is the number of tokens with global attention mask. - Global attentions weights after the attention softmax, used to compute the weighted average in the - self-attention heads. Those are the attention weights from every token with global attention to every token - in the sequence. - """ - - last_hidden_state: torch.FloatTensor - pooler_output: torch.FloatTensor = None - hidden_states: Optional[Tuple[torch.FloatTensor]] = None - attentions: Optional[Tuple[torch.FloatTensor]] = None - global_attentions: Optional[Tuple[torch.FloatTensor]] = None - - -@dataclass -class LongformerMaskedLMOutput(ModelOutput): - """ - Base class for masked language models outputs. - Args: - loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): - Masked language modeling (MLM) loss. - logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): - Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). - hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): - Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of - shape `(batch_size, sequence_length, hidden_size)`. - Hidden-states of the model at the output of each layer plus the initial embedding outputs. - attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): - Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x + - attention_window + 1)`, where `x` is the number of tokens with global attention mask. - Local attentions weights after the attention softmax, used to compute the weighted average in the - self-attention heads. Those are the attention weights from every token in the sequence to every token with - global attention (first `x` values) and to every token in the attention window (remaining `attention_window - + 1` values). Note that the first `x` values refer to tokens with fixed positions in the text, but the - remaining `attention_window + 1` values refer to tokens with relative positions: the attention weight of a - token to itself is located at index `x + attention_window / 2` and the `attention_window / 2` preceding - (succeeding) values are the attention weights to the `attention_window / 2` preceding (succeeding) tokens. - If the attention window contains a token with global attention, the attention weight at the corresponding - index is set to 0; the value should be accessed from the first `x` attention weights. If a token has global - attention, the attention weights to all other tokens in `attentions` is set to 0, the values should be - accessed from `global_attentions`. - global_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): - Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x)`, - where `x` is the number of tokens with global attention mask. - Global attentions weights after the attention softmax, used to compute the weighted average in the - self-attention heads. Those are the attention weights from every token with global attention to every token - in the sequence. - """ - - loss: Optional[torch.FloatTensor] = None - logits: torch.FloatTensor = None - hidden_states: Optional[Tuple[torch.FloatTensor]] = None - attentions: Optional[Tuple[torch.FloatTensor]] = None - global_attentions: Optional[Tuple[torch.FloatTensor]] = None - - -@dataclass -class LongformerQuestionAnsweringModelOutput(ModelOutput): - """ - Base class for outputs of question answering Longformer models. - Args: - loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): - Total span extraction loss is the sum of a Cross-Entropy for the start and end positions. - start_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): - Span-start scores (before SoftMax). - end_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): - Span-end scores (before SoftMax). - hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): - Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of - shape `(batch_size, sequence_length, hidden_size)`. - Hidden-states of the model at the output of each layer plus the initial embedding outputs. - attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): - Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x + - attention_window + 1)`, where `x` is the number of tokens with global attention mask. - Local attentions weights after the attention softmax, used to compute the weighted average in the - self-attention heads. Those are the attention weights from every token in the sequence to every token with - global attention (first `x` values) and to every token in the attention window (remaining `attention_window - + 1` values). Note that the first `x` values refer to tokens with fixed positions in the text, but the - remaining `attention_window + 1` values refer to tokens with relative positions: the attention weight of a - token to itself is located at index `x + attention_window / 2` and the `attention_window / 2` preceding - (succeeding) values are the attention weights to the `attention_window / 2` preceding (succeeding) tokens. - If the attention window contains a token with global attention, the attention weight at the corresponding - index is set to 0; the value should be accessed from the first `x` attention weights. If a token has global - attention, the attention weights to all other tokens in `attentions` is set to 0, the values should be - accessed from `global_attentions`. - global_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): - Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x)`, - where `x` is the number of tokens with global attention mask. - Global attentions weights after the attention softmax, used to compute the weighted average in the - self-attention heads. Those are the attention weights from every token with global attention to every token - in the sequence. - """ - - loss: Optional[torch.FloatTensor] = None - start_logits: torch.FloatTensor = None - end_logits: torch.FloatTensor = None - hidden_states: Optional[Tuple[torch.FloatTensor]] = None - attentions: Optional[Tuple[torch.FloatTensor]] = None - global_attentions: Optional[Tuple[torch.FloatTensor]] = None - - -@dataclass -class LongformerSequenceClassifierOutput(ModelOutput): - """ - Base class for outputs of sentence classification models. - Args: - loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): - Classification (or regression if config.num_labels==1) loss. - logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`): - Classification (or regression if config.num_labels==1) scores (before SoftMax). - hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): - Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of - shape `(batch_size, sequence_length, hidden_size)`. - Hidden-states of the model at the output of each layer plus the initial embedding outputs. - attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): - Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x + - attention_window + 1)`, where `x` is the number of tokens with global attention mask. - Local attentions weights after the attention softmax, used to compute the weighted average in the - self-attention heads. Those are the attention weights from every token in the sequence to every token with - global attention (first `x` values) and to every token in the attention window (remaining `attention_window - + 1` values). Note that the first `x` values refer to tokens with fixed positions in the text, but the - remaining `attention_window + 1` values refer to tokens with relative positions: the attention weight of a - token to itself is located at index `x + attention_window / 2` and the `attention_window / 2` preceding - (succeeding) values are the attention weights to the `attention_window / 2` preceding (succeeding) tokens. - If the attention window contains a token with global attention, the attention weight at the corresponding - index is set to 0; the value should be accessed from the first `x` attention weights. If a token has global - attention, the attention weights to all other tokens in `attentions` is set to 0, the values should be - accessed from `global_attentions`. - global_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): - Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x)`, - where `x` is the number of tokens with global attention mask. - Global attentions weights after the attention softmax, used to compute the weighted average in the - self-attention heads. Those are the attention weights from every token with global attention to every token - in the sequence. - """ - - loss: Optional[torch.FloatTensor] = None - logits: torch.FloatTensor = None - hidden_states: Optional[Tuple[torch.FloatTensor]] = None - attentions: Optional[Tuple[torch.FloatTensor]] = None - global_attentions: Optional[Tuple[torch.FloatTensor]] = None - - -@dataclass -class LongformerMultipleChoiceModelOutput(ModelOutput): - """ - Base class for outputs of multiple choice Longformer models. - Args: - loss (`torch.FloatTensor` of shape *(1,)*, *optional*, returned when `labels` is provided): - Classification loss. - logits (`torch.FloatTensor` of shape `(batch_size, num_choices)`): - *num_choices* is the second dimension of the input tensors. (see *input_ids* above). - Classification scores (before SoftMax). - hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): - Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of - shape `(batch_size, sequence_length, hidden_size)`. - Hidden-states of the model at the output of each layer plus the initial embedding outputs. - attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): - Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x + - attention_window + 1)`, where `x` is the number of tokens with global attention mask. - Local attentions weights after the attention softmax, used to compute the weighted average in the - self-attention heads. Those are the attention weights from every token in the sequence to every token with - global attention (first `x` values) and to every token in the attention window (remaining `attention_window - + 1` values). Note that the first `x` values refer to tokens with fixed positions in the text, but the - remaining `attention_window + 1` values refer to tokens with relative positions: the attention weight of a - token to itself is located at index `x + attention_window / 2` and the `attention_window / 2` preceding - (succeeding) values are the attention weights to the `attention_window / 2` preceding (succeeding) tokens. - If the attention window contains a token with global attention, the attention weight at the corresponding - index is set to 0; the value should be accessed from the first `x` attention weights. If a token has global - attention, the attention weights to all other tokens in `attentions` is set to 0, the values should be - accessed from `global_attentions`. - global_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): - Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x)`, - where `x` is the number of tokens with global attention mask. - Global attentions weights after the attention softmax, used to compute the weighted average in the - self-attention heads. Those are the attention weights from every token with global attention to every token - in the sequence. - """ - - loss: Optional[torch.FloatTensor] = None - logits: torch.FloatTensor = None - hidden_states: Optional[Tuple[torch.FloatTensor]] = None - attentions: Optional[Tuple[torch.FloatTensor]] = None - global_attentions: Optional[Tuple[torch.FloatTensor]] = None - - -@dataclass -class LongformerTokenClassifierOutput(ModelOutput): - """ - Base class for outputs of token classification models. - Args: - loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided) : - Classification loss. - logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_labels)`): - Classification scores (before SoftMax). - hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): - Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of - shape `(batch_size, sequence_length, hidden_size)`. - Hidden-states of the model at the output of each layer plus the initial embedding outputs. - attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): - Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x + - attention_window + 1)`, where `x` is the number of tokens with global attention mask. - Local attentions weights after the attention softmax, used to compute the weighted average in the - self-attention heads. Those are the attention weights from every token in the sequence to every token with - global attention (first `x` values) and to every token in the attention window (remaining `attention_window - + 1` values). Note that the first `x` values refer to tokens with fixed positions in the text, but the - remaining `attention_window + 1` values refer to tokens with relative positions: the attention weight of a - token to itself is located at index `x + attention_window / 2` and the `attention_window / 2` preceding - (succeeding) values are the attention weights to the `attention_window / 2` preceding (succeeding) tokens. - If the attention window contains a token with global attention, the attention weight at the corresponding - index is set to 0; the value should be accessed from the first `x` attention weights. If a token has global - attention, the attention weights to all other tokens in `attentions` is set to 0, the values should be - accessed from `global_attentions`. - global_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): - Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x)`, - where `x` is the number of tokens with global attention mask. - Global attentions weights after the attention softmax, used to compute the weighted average in the - self-attention heads. Those are the attention weights from every token with global attention to every token - in the sequence. - """ - - loss: Optional[torch.FloatTensor] = None - logits: torch.FloatTensor = None - hidden_states: Optional[Tuple[torch.FloatTensor]] = None - attentions: Optional[Tuple[torch.FloatTensor]] = None - global_attentions: Optional[Tuple[torch.FloatTensor]] = None - - -def _get_question_end_index(input_ids, sep_token_id): - """ - Computes the index of the first occurrence of `sep_token_id`. - """ - - sep_token_indices = (input_ids == sep_token_id).nonzero() - batch_size = input_ids.shape[0] - - assert sep_token_indices.shape[1] == 2, "`input_ids` should have two dimensions" - # here is the revised because of data preprocessing, - # but same to longformer codes: https://github.com/allenai/longformer - assert sep_token_indices.shape[0] == 2 * batch_size, ( - f"There should be exactly three separator tokens: {sep_token_id} in every sample for questions answering. You" - " might also consider to set `global_attention_mask` manually in the forward function to avoid this error." - ) - return sep_token_indices.view(batch_size, 2, 2)[:, 0, 1] - - -def _compute_global_attention_mask(input_ids, sep_token_id, before_sep_token=True): - """ - Computes global attention mask by putting attention on all tokens before `sep_token_id` if `before_sep_token is - True` else after `sep_token_id`. - """ - question_end_index = _get_question_end_index(input_ids, sep_token_id) - question_end_index = question_end_index.unsqueeze(dim=1) # size: batch_size x 1 - # bool attention mask with True in locations of global attention - attention_mask = torch.arange(input_ids.shape[1], device=input_ids.device) - if before_sep_token is True: - attention_mask = (attention_mask.expand_as(input_ids) < question_end_index).to(torch.uint8) - else: - # last token is separation token and should not be counted and in the middle are two separation tokens - attention_mask = (attention_mask.expand_as(input_ids) > (question_end_index + 1)).to(torch.uint8) * ( - attention_mask.expand_as(input_ids) < input_ids.shape[-1] - ).to(torch.uint8) - - return attention_mask - - -def create_position_ids_from_input_ids(input_ids, padding_idx): - """ - Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols - are ignored. This is modified from fairseq's `utils.make_positions`. - Args: - x: torch.Tensor x: - Returns: torch.Tensor - """ - # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA. - mask = input_ids.ne(padding_idx).int() - incremental_indices = torch.cumsum(mask, dim=1).type_as(mask) * mask - return incremental_indices.long() + padding_idx - - -class LongformerEmbeddings(nn.Module): - """ - Same as BertEmbeddings with a tiny tweak for positional embeddings indexing. - """ - - def __init__(self, config): - super().__init__() - self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) - self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) - self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) - - # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load - # any TensorFlow checkpoint file - self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) - self.dropout = nn.Dropout(config.hidden_dropout_prob) - - self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") - - self.padding_idx = config.pad_token_id - self.position_embeddings = nn.Embedding( - config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx - ) - - def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None): - if position_ids is None: - if input_ids is not None: - # Create the position ids from the input token ids. Any padded tokens remain padded. - position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx).to(input_ids.device) - else: - position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds) - - if input_ids is not None: - input_shape = input_ids.size() - else: - input_shape = inputs_embeds.size()[:-1] - - if token_type_ids is None: - token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=position_ids.device) - - if inputs_embeds is None: - inputs_embeds = self.word_embeddings(input_ids) - position_embeddings = self.position_embeddings(position_ids) - token_type_embeddings = self.token_type_embeddings(token_type_ids) - - embeddings = inputs_embeds + position_embeddings + token_type_embeddings - embeddings = self.LayerNorm(embeddings) - embeddings = self.dropout(embeddings) - return embeddings - - def create_position_ids_from_inputs_embeds(self, inputs_embeds): - """ - We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids. - Args: - inputs_embeds: torch.Tensor inputs_embeds: - Returns: torch.Tensor - """ - input_shape = inputs_embeds.size()[:-1] - sequence_length = input_shape[1] - - position_ids = torch.arange( - self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device - ) - return position_ids.unsqueeze(0).expand(input_shape) - - -class LongformerSelfAttention(nn.Module): - def __init__(self, config, layer_id): - super().__init__() - if config.hidden_size % config.num_attention_heads != 0: - raise ValueError( - f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " - f"heads ({config.num_attention_heads})" - ) - self.num_heads = config.num_attention_heads - self.head_dim = int(config.hidden_size / config.num_attention_heads) - self.embed_dim = config.hidden_size - - self.query = nn.Linear(config.hidden_size, self.embed_dim) - self.key = nn.Linear(config.hidden_size, self.embed_dim) - self.value = nn.Linear(config.hidden_size, self.embed_dim) - - # separate projection layers for tokens with global attention - self.query_global = nn.Linear(config.hidden_size, self.embed_dim) - self.key_global = nn.Linear(config.hidden_size, self.embed_dim) - self.value_global = nn.Linear(config.hidden_size, self.embed_dim) - - self.dropout = config.attention_probs_dropout_prob - - self.layer_id = layer_id - attention_window = config.attention_window[self.layer_id] - assert ( - attention_window % 2 == 0 - ), f"`attention_window` for layer {self.layer_id} has to be an even value. Given {attention_window}" - assert ( - attention_window > 0 - ), f"`attention_window` for layer {self.layer_id} has to be positive. Given {attention_window}" - - self.one_sided_attn_window_size = attention_window // 2 - - def forward( - self, - hidden_states, - attention_mask=None, - layer_head_mask=None, - is_index_masked=None, - is_index_global_attn=None, - is_global_attn=None, - output_attentions=False, - ): - """ - [`LongformerSelfAttention`] expects *len(hidden_states)* to be multiple of *attention_window*. Padding to - *attention_window* happens in [`LongformerModel.forward`] to avoid redoing the padding on each layer. - The *attention_mask* is changed in [`LongformerModel.forward`] from 0, 1, 2 to: - - -10000: no attention - - 0: local attention - - +10000: global attention - """ - hidden_states = hidden_states.transpose(0, 1) - - # project hidden states - query_vectors = self.query(hidden_states) - key_vectors = self.key(hidden_states) - value_vectors = self.value(hidden_states) - - seq_len, batch_size, embed_dim = hidden_states.size() - assert ( - embed_dim == self.embed_dim - ), f"hidden_states should have embed_dim = {self.embed_dim}, but has {embed_dim}" - - # normalize query - query_vectors /= math.sqrt(self.head_dim) - - query_vectors = query_vectors.view(seq_len, batch_size, self.num_heads, self.head_dim).transpose(0, 1) - key_vectors = key_vectors.view(seq_len, batch_size, self.num_heads, self.head_dim).transpose(0, 1) - - attn_scores = self._sliding_chunks_query_key_matmul( - query_vectors, key_vectors, self.one_sided_attn_window_size - ) - - # values to pad for attention probs - remove_from_windowed_attention_mask = (attention_mask != 0)[:, :, None, None] - - # cast to fp32/fp16 then replace 1's with -inf - float_mask = remove_from_windowed_attention_mask.type_as(query_vectors).masked_fill( - remove_from_windowed_attention_mask, torch.finfo(query_vectors.dtype).min - ) - # diagonal mask with zeros everywhere and -inf inplace of padding - diagonal_mask = self._sliding_chunks_query_key_matmul( - float_mask.new_ones(size=float_mask.size()), float_mask, self.one_sided_attn_window_size - ) - - # pad local attention probs - attn_scores += diagonal_mask - - assert list(attn_scores.size()) == [ - batch_size, - seq_len, - self.num_heads, - self.one_sided_attn_window_size * 2 + 1, - ], ( - f"local_attn_probs should be of size ({batch_size}, {seq_len}, {self.num_heads}," - f" {self.one_sided_attn_window_size * 2 + 1}), but is of size {attn_scores.size()}" - ) - - # compute local attention probs from global attention keys and contact over window dim - if is_global_attn: - # compute global attn indices required through out forward fn - ( - max_num_global_attn_indices, - is_index_global_attn_nonzero, - is_local_index_global_attn_nonzero, - is_local_index_no_global_attn_nonzero, - ) = self._get_global_attn_indices(is_index_global_attn) - # calculate global attn probs from global key - - global_key_attn_scores = self._concat_with_global_key_attn_probs( - query_vectors=query_vectors, - key_vectors=key_vectors, - max_num_global_attn_indices=max_num_global_attn_indices, - is_index_global_attn_nonzero=is_index_global_attn_nonzero, - is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero, - is_local_index_no_global_attn_nonzero=is_local_index_no_global_attn_nonzero, - ) - # concat to local_attn_probs - # (batch_size, seq_len, num_heads, extra attention count + 2*window+1) - attn_scores = torch.cat((global_key_attn_scores, attn_scores), dim=-1) - - # free memory - del global_key_attn_scores - - attn_probs = nn.functional.softmax( - attn_scores, dim=-1, dtype=torch.float32 - ) # use fp32 for numerical stability - - if layer_head_mask is not None: - assert layer_head_mask.size() == ( - self.num_heads, - ), f"Head mask for a single layer should be of size {(self.num_heads,)}, but is {layer_head_mask.size()}" - attn_probs = layer_head_mask.view(1, 1, -1, 1) * attn_probs - - # softmax sometimes inserts NaN if all positions are masked, replace them with 0 - attn_probs = torch.masked_fill(attn_probs, is_index_masked[:, :, None, None], 0.0) - attn_probs = attn_probs.type_as(attn_scores) - - # free memory - del attn_scores - - # apply dropout - attn_probs = nn.functional.dropout(attn_probs, p=self.dropout, training=self.training) - - value_vectors = value_vectors.view(seq_len, batch_size, self.num_heads, self.head_dim).transpose(0, 1) - - # compute local attention output with global attention value and add - if is_global_attn: - # compute sum of global and local attn - attn_output = self._compute_attn_output_with_global_indices( - value_vectors=value_vectors, - attn_probs=attn_probs, - max_num_global_attn_indices=max_num_global_attn_indices, - is_index_global_attn_nonzero=is_index_global_attn_nonzero, - is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero, - ) - else: - # compute local attn only - attn_output = self._sliding_chunks_matmul_attn_probs_value( - attn_probs, value_vectors, self.one_sided_attn_window_size - ) - - assert attn_output.size() == (batch_size, seq_len, self.num_heads, self.head_dim), "Unexpected size" - attn_output = attn_output.transpose(0, 1).reshape(seq_len, batch_size, embed_dim).contiguous() - - # compute value for global attention and overwrite to attention output - # TODO: remove the redundant computation - if is_global_attn: - global_attn_output, global_attn_probs = self._compute_global_attn_output_from_hidden( - hidden_states=hidden_states, - max_num_global_attn_indices=max_num_global_attn_indices, - layer_head_mask=layer_head_mask, - is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero, - is_index_global_attn_nonzero=is_index_global_attn_nonzero, - is_local_index_no_global_attn_nonzero=is_local_index_no_global_attn_nonzero, - is_index_masked=is_index_masked, - ) - - # get only non zero global attn output - nonzero_global_attn_output = global_attn_output[ - is_local_index_global_attn_nonzero[0], :, is_local_index_global_attn_nonzero[1] - ] - - # overwrite values with global attention - attn_output[is_index_global_attn_nonzero[::-1]] = nonzero_global_attn_output.view( - len(is_local_index_global_attn_nonzero[0]), -1 - ) - # The attention weights for tokens with global attention are - # just filler values, they were never used to compute the output. - # Fill with 0 now, the correct values are in 'global_attn_probs'. - attn_probs[is_index_global_attn_nonzero] = 0 - - outputs = (attn_output.transpose(0, 1),) - - if output_attentions: - outputs += (attn_probs,) - - return outputs + (global_attn_probs,) if (is_global_attn and output_attentions) else outputs - - @staticmethod - def _pad_and_transpose_last_two_dims(hidden_states_padded, padding): - """pads rows and then flips rows and columns""" - hidden_states_padded = nn.functional.pad( - hidden_states_padded, padding - ) # padding value is not important because it will be overwritten - hidden_states_padded = hidden_states_padded.view( - *hidden_states_padded.size()[:-2], hidden_states_padded.size(-1), hidden_states_padded.size(-2) - ) - return hidden_states_padded - - @staticmethod - def _pad_and_diagonalize(chunked_hidden_states): - """ - shift every row 1 step right, converting columns into diagonals. - Example: - ```python - chunked_hidden_states: [ - 0.4983, - 2.6918, - -0.0071, - 1.0492, - -1.8348, - 0.7672, - 0.2986, - 0.0285, - -0.7584, - 0.4206, - -0.0405, - 0.1599, - 2.0514, - -1.1600, - 0.5372, - 0.2629, - ] - window_overlap = num_rows = 4 - ``` - (pad & diagonalize) => [ 0.4983, 2.6918, -0.0071, 1.0492, 0.0000, 0.0000, 0.0000 - 0.0000, -1.8348, 0.7672, 0.2986, 0.0285, 0.0000, 0.0000 0.0000, 0.0000, -0.7584, 0.4206, - -0.0405, 0.1599, 0.0000 0.0000, 0.0000, 0.0000, 2.0514, -1.1600, 0.5372, 0.2629 ] - """ - total_num_heads, num_chunks, window_overlap, hidden_dim = chunked_hidden_states.size() - chunked_hidden_states = nn.functional.pad( - chunked_hidden_states, (0, window_overlap + 1) - ) # total_num_heads x num_chunks x window_overlap x (hidden_dim+window_overlap+1). Padding value is not important because it'll be overwritten - chunked_hidden_states = chunked_hidden_states.view( - total_num_heads, num_chunks, -1 - ) # total_num_heads x num_chunks x window_overlap*window_overlap+window_overlap - chunked_hidden_states = chunked_hidden_states[ - :, :, :-window_overlap - ] # total_num_heads x num_chunks x window_overlap*window_overlap - chunked_hidden_states = chunked_hidden_states.view( - total_num_heads, num_chunks, window_overlap, window_overlap + hidden_dim - ) - chunked_hidden_states = chunked_hidden_states[:, :, :, :-1] - return chunked_hidden_states - - @staticmethod - def _chunk(hidden_states, window_overlap): - """convert into overlapping chunks. Chunk size = 2w, overlap size = w""" - - # non-overlapping chunks of size = 2w - hidden_states = hidden_states.view( - hidden_states.size(0), - hidden_states.size(1) // (window_overlap * 2), - window_overlap * 2, - hidden_states.size(2), - ) - - # use `as_strided` to make the chunks overlap with an overlap size = window_overlap - chunk_size = list(hidden_states.size()) - chunk_size[1] = chunk_size[1] * 2 - 1 - - chunk_stride = list(hidden_states.stride()) - chunk_stride[1] = chunk_stride[1] // 2 - return hidden_states.as_strided(size=chunk_size, stride=chunk_stride) - - @staticmethod - def _mask_invalid_locations(input_tensor, affected_seq_len) -> torch.Tensor: - beginning_mask_2d = input_tensor.new_ones(affected_seq_len, affected_seq_len + 1).tril().flip(dims=[0]) - beginning_mask = beginning_mask_2d[None, :, None, :] - ending_mask = beginning_mask.flip(dims=(1, 3)) - beginning_input = input_tensor[:, :affected_seq_len, :, : affected_seq_len + 1] - beginning_mask = beginning_mask.expand(beginning_input.size()) - beginning_input.masked_fill_(beginning_mask == 1, -float("inf")) # `== 1` converts to bool or uint8 - ending_input = input_tensor[:, -affected_seq_len:, :, -(affected_seq_len + 1) :] - ending_mask = ending_mask.expand(ending_input.size()) - ending_input.masked_fill_(ending_mask == 1, -float("inf")) # `== 1` converts to bool or uint8 - - def _sliding_chunks_query_key_matmul(self, query: torch.Tensor, key: torch.Tensor, window_overlap: int): - """ - Matrix multiplication of query and key tensors using with a sliding window attention pattern. This - implementation splits the input into overlapping chunks of size 2w (e.g. 512 for pretrained Longformer) with an - overlap of size window_overlap - """ - batch_size, seq_len, num_heads, head_dim = query.size() - assert ( - seq_len % (window_overlap * 2) == 0 - ), f"Sequence length should be multiple of {window_overlap * 2}. Given {seq_len}" - assert query.size() == key.size() - - chunks_count = seq_len // window_overlap - 1 - - # group batch_size and num_heads dimensions into one, then chunk seq_len into chunks of size window_overlap * 2 - query = query.transpose(1, 2).reshape(batch_size * num_heads, seq_len, head_dim) - key = key.transpose(1, 2).reshape(batch_size * num_heads, seq_len, head_dim) - - query = self._chunk(query, window_overlap) - key = self._chunk(key, window_overlap) - - # matrix multiplication - # bcxd: batch_size * num_heads x chunks x 2window_overlap x head_dim - # bcyd: batch_size * num_heads x chunks x 2window_overlap x head_dim - # bcxy: batch_size * num_heads x chunks x 2window_overlap x 2window_overlap - diagonal_chunked_attention_scores = torch.einsum("bcxd,bcyd->bcxy", (query, key)) # multiply - - # convert diagonals into columns - diagonal_chunked_attention_scores = self._pad_and_transpose_last_two_dims( - diagonal_chunked_attention_scores, padding=(0, 0, 0, 1) - ) - - # allocate space for the overall attention matrix where the chunks are combined. The last dimension - # has (window_overlap * 2 + 1) columns. The first (window_overlap) columns are the window_overlap lower triangles (attention from a word to - # window_overlap previous words). The following column is attention score from each word to itself, then - # followed by window_overlap columns for the upper triangle. - - diagonal_attention_scores = diagonal_chunked_attention_scores.new_empty( - (batch_size * num_heads, chunks_count + 1, window_overlap, window_overlap * 2 + 1) - ) - - # copy parts from diagonal_chunked_attention_scores into the combined matrix of attentions - # - copying the main diagonal and the upper triangle - diagonal_attention_scores[:, :-1, :, window_overlap:] = diagonal_chunked_attention_scores[ - :, :, :window_overlap, : window_overlap + 1 - ] - diagonal_attention_scores[:, -1, :, window_overlap:] = diagonal_chunked_attention_scores[ - :, -1, window_overlap:, : window_overlap + 1 - ] - # - copying the lower triangle - diagonal_attention_scores[:, 1:, :, :window_overlap] = diagonal_chunked_attention_scores[ - :, :, -(window_overlap + 1) : -1, window_overlap + 1 : - ] - - diagonal_attention_scores[:, 0, 1:window_overlap, 1:window_overlap] = diagonal_chunked_attention_scores[ - :, 0, : window_overlap - 1, 1 - window_overlap : - ] - - # separate batch_size and num_heads dimensions again - diagonal_attention_scores = diagonal_attention_scores.view( - batch_size, num_heads, seq_len, 2 * window_overlap + 1 - ).transpose(2, 1) - - self._mask_invalid_locations(diagonal_attention_scores, window_overlap) - return diagonal_attention_scores - - def _sliding_chunks_matmul_attn_probs_value( - self, attn_probs: torch.Tensor, value: torch.Tensor, window_overlap: int - ): - """ - Same as _sliding_chunks_query_key_matmul but for attn_probs and value tensors. Returned tensor will be of the - same shape as `attn_probs` - """ - batch_size, seq_len, num_heads, head_dim = value.size() - - assert seq_len % (window_overlap * 2) == 0 - assert attn_probs.size()[:3] == value.size()[:3] - assert attn_probs.size(3) == 2 * window_overlap + 1 - chunks_count = seq_len // window_overlap - 1 - # group batch_size and num_heads dimensions into one, then chunk seq_len into chunks of size 2 window overlap - - chunked_attn_probs = attn_probs.transpose(1, 2).reshape( - batch_size * num_heads, seq_len // window_overlap, window_overlap, 2 * window_overlap + 1 - ) - - # group batch_size and num_heads dimensions into one - value = value.transpose(1, 2).reshape(batch_size * num_heads, seq_len, head_dim) - - # pad seq_len with w at the beginning of the sequence and another window overlap at the end - padded_value = nn.functional.pad(value, (0, 0, window_overlap, window_overlap), value=-1) - - # chunk padded_value into chunks of size 3 window overlap and an overlap of size window overlap - chunked_value_size = (batch_size * num_heads, chunks_count + 1, 3 * window_overlap, head_dim) - chunked_value_stride = padded_value.stride() - chunked_value_stride = ( - chunked_value_stride[0], - window_overlap * chunked_value_stride[1], - chunked_value_stride[1], - chunked_value_stride[2], - ) - chunked_value = padded_value.as_strided(size=chunked_value_size, stride=chunked_value_stride) - - chunked_attn_probs = self._pad_and_diagonalize(chunked_attn_probs) - - context = torch.einsum("bcwd,bcdh->bcwh", (chunked_attn_probs, chunked_value)) - return context.view(batch_size, num_heads, seq_len, head_dim).transpose(1, 2) - - @staticmethod - def _get_global_attn_indices(is_index_global_attn): - """compute global attn indices required throughout forward pass""" - # helper variable - num_global_attn_indices = is_index_global_attn.long().sum(dim=1) - - # max number of global attn indices in batch - max_num_global_attn_indices = num_global_attn_indices.max() - - # indices of global attn - is_index_global_attn_nonzero = is_index_global_attn.nonzero(as_tuple=True) - - # helper variable - is_local_index_global_attn = torch.arange( - max_num_global_attn_indices, device=is_index_global_attn.device - ) < num_global_attn_indices.unsqueeze(dim=-1) - - # location of the non-padding values within global attention indices - is_local_index_global_attn_nonzero = is_local_index_global_attn.nonzero(as_tuple=True) - - # location of the padding values within global attention indices - is_local_index_no_global_attn_nonzero = (is_local_index_global_attn == 0).nonzero(as_tuple=True) - return ( - max_num_global_attn_indices, - is_index_global_attn_nonzero, - is_local_index_global_attn_nonzero, - is_local_index_no_global_attn_nonzero, - ) - - def _concat_with_global_key_attn_probs( - self, - key_vectors, - query_vectors, - max_num_global_attn_indices, - is_index_global_attn_nonzero, - is_local_index_global_attn_nonzero, - is_local_index_no_global_attn_nonzero, - ): - batch_size = key_vectors.shape[0] - - # create only global key vectors - key_vectors_only_global = key_vectors.new_zeros( - batch_size, max_num_global_attn_indices, self.num_heads, self.head_dim - ) - - key_vectors_only_global[is_local_index_global_attn_nonzero] = key_vectors[is_index_global_attn_nonzero] - - # (batch_size, seq_len, num_heads, max_num_global_attn_indices) - attn_probs_from_global_key = torch.einsum("blhd,bshd->blhs", (query_vectors, key_vectors_only_global)) - - attn_probs_from_global_key[ - is_local_index_no_global_attn_nonzero[0], :, :, is_local_index_no_global_attn_nonzero[1] - ] = torch.finfo(attn_probs_from_global_key.dtype).min - - return attn_probs_from_global_key - - def _compute_attn_output_with_global_indices( - self, - value_vectors, - attn_probs, - max_num_global_attn_indices, - is_index_global_attn_nonzero, - is_local_index_global_attn_nonzero, - ): - batch_size = attn_probs.shape[0] - - # cut local attn probs to global only - attn_probs_only_global = attn_probs.narrow(-1, 0, max_num_global_attn_indices) - # get value vectors for global only - value_vectors_only_global = value_vectors.new_zeros( - batch_size, max_num_global_attn_indices, self.num_heads, self.head_dim - ) - value_vectors_only_global[is_local_index_global_attn_nonzero] = value_vectors[is_index_global_attn_nonzero] - - # use `matmul` because `einsum` crashes sometimes with fp16 - # attn = torch.einsum('blhs,bshd->blhd', (selected_attn_probs, selected_v)) - # compute attn output only global - attn_output_only_global = torch.matmul( - attn_probs_only_global.transpose(1, 2).clone(), value_vectors_only_global.transpose(1, 2).clone() - ).transpose(1, 2) - - # reshape attn probs - attn_probs_without_global = attn_probs.narrow( - -1, max_num_global_attn_indices, attn_probs.size(-1) - max_num_global_attn_indices - ).contiguous() - - # compute attn output with global - attn_output_without_global = self._sliding_chunks_matmul_attn_probs_value( - attn_probs_without_global, value_vectors, self.one_sided_attn_window_size - ) - return attn_output_only_global + attn_output_without_global - - def _compute_global_attn_output_from_hidden( - self, - hidden_states, - max_num_global_attn_indices, - layer_head_mask, - is_local_index_global_attn_nonzero, - is_index_global_attn_nonzero, - is_local_index_no_global_attn_nonzero, - is_index_masked, - ): - seq_len, batch_size = hidden_states.shape[:2] - - # prepare global hidden states - global_attn_hidden_states = hidden_states.new_zeros(max_num_global_attn_indices, batch_size, self.embed_dim) - global_attn_hidden_states[is_local_index_global_attn_nonzero[::-1]] = hidden_states[ - is_index_global_attn_nonzero[::-1] - ] - - # global key, query, value - global_query_vectors_only_global = self.query_global(global_attn_hidden_states) - global_key_vectors = self.key_global(hidden_states) - global_value_vectors = self.value_global(hidden_states) - - # normalize - global_query_vectors_only_global /= math.sqrt(self.head_dim) - - # reshape - global_query_vectors_only_global = ( - global_query_vectors_only_global.contiguous() - .view(max_num_global_attn_indices, batch_size * self.num_heads, self.head_dim) - .transpose(0, 1) - ) # (batch_size * self.num_heads, max_num_global_attn_indices, head_dim) - global_key_vectors = ( - global_key_vectors.contiguous().view(-1, batch_size * self.num_heads, self.head_dim).transpose(0, 1) - ) # batch_size * self.num_heads, seq_len, head_dim) - global_value_vectors = ( - global_value_vectors.contiguous().view(-1, batch_size * self.num_heads, self.head_dim).transpose(0, 1) - ) # batch_size * self.num_heads, seq_len, head_dim) - - # compute attn scores - global_attn_scores = torch.bmm(global_query_vectors_only_global, global_key_vectors.transpose(1, 2)) - - assert list(global_attn_scores.size()) == [ - batch_size * self.num_heads, - max_num_global_attn_indices, - seq_len, - ], ( - "global_attn_scores have the wrong size. Size should be" - f" {(batch_size * self.num_heads, max_num_global_attn_indices, seq_len)}, but is" - f" {global_attn_scores.size()}." - ) - - global_attn_scores = global_attn_scores.view(batch_size, self.num_heads, max_num_global_attn_indices, seq_len) - - global_attn_scores[ - is_local_index_no_global_attn_nonzero[0], :, is_local_index_no_global_attn_nonzero[1], : - ] = torch.finfo(global_attn_scores.dtype).min - - global_attn_scores = global_attn_scores.masked_fill( - is_index_masked[:, None, None, :], - torch.finfo(global_attn_scores.dtype).min, - ) - - global_attn_scores = global_attn_scores.view(batch_size * self.num_heads, max_num_global_attn_indices, seq_len) - - # compute global attn probs - global_attn_probs_float = nn.functional.softmax( - global_attn_scores, dim=-1, dtype=torch.float32 - ) # use fp32 for numerical stability - - # apply layer head masking - if layer_head_mask is not None: - assert layer_head_mask.size() == ( - self.num_heads, - ), f"Head mask for a single layer should be of size {(self.num_heads,)}, but is {layer_head_mask.size()}" - global_attn_probs_float = layer_head_mask.view(1, -1, 1, 1) * global_attn_probs_float.view( - batch_size, self.num_heads, max_num_global_attn_indices, seq_len - ) - global_attn_probs_float = global_attn_probs_float.view( - batch_size * self.num_heads, max_num_global_attn_indices, seq_len - ) - - global_attn_probs = nn.functional.dropout( - global_attn_probs_float.type_as(global_attn_scores), p=self.dropout, training=self.training - ) - - # global attn output - global_attn_output = torch.bmm(global_attn_probs, global_value_vectors) - - assert list(global_attn_output.size()) == [ - batch_size * self.num_heads, - max_num_global_attn_indices, - self.head_dim, - ], ( - "global_attn_output tensor has the wrong size. Size should be" - f" {(batch_size * self.num_heads, max_num_global_attn_indices, self.head_dim)}, but is" - f" {global_attn_output.size()}." - ) - - global_attn_probs = global_attn_probs.view(batch_size, self.num_heads, max_num_global_attn_indices, seq_len) - global_attn_output = global_attn_output.view( - batch_size, self.num_heads, max_num_global_attn_indices, self.head_dim - ) - return global_attn_output, global_attn_probs - - -# Copied from transformers.models.bert.modeling_bert.BertSelfOutput -class LongformerSelfOutput(nn.Module): - def __init__(self, config): - super().__init__() - self.dense = nn.Linear(config.hidden_size, config.hidden_size) - self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) - self.dropout = nn.Dropout(config.hidden_dropout_prob) - - def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: - hidden_states = self.dense(hidden_states) - hidden_states = self.dropout(hidden_states) - hidden_states = self.LayerNorm(hidden_states + input_tensor) - return hidden_states - - -class LongformerAttention(nn.Module): - def __init__(self, config, layer_id=0): - super().__init__() - self.self = LongformerSelfAttention(config, layer_id) - self.output = LongformerSelfOutput(config) - self.pruned_heads = set() - - def prune_heads(self, heads): - if len(heads) == 0: - return - heads, index = find_pruneable_heads_and_indices( - heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads - ) - - # Prune linear layers - self.self.query = prune_linear_layer(self.self.query, index) - self.self.key = prune_linear_layer(self.self.key, index) - self.self.value = prune_linear_layer(self.self.value, index) - self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) - - # Update hyper params and store pruned heads - self.self.num_attention_heads = self.self.num_attention_heads - len(heads) - self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads - self.pruned_heads = self.pruned_heads.union(heads) - - def forward( - self, - hidden_states, - attention_mask=None, - layer_head_mask=None, - is_index_masked=None, - is_index_global_attn=None, - is_global_attn=None, - output_attentions=False, - ): - self_outputs = self.self( - hidden_states, - attention_mask=attention_mask, - layer_head_mask=layer_head_mask, - is_index_masked=is_index_masked, - is_index_global_attn=is_index_global_attn, - is_global_attn=is_global_attn, - output_attentions=output_attentions, - ) - attn_output = self.output(self_outputs[0], hidden_states) - outputs = (attn_output,) + self_outputs[1:] - return outputs - - -# Copied from transformers.models.bert.modeling_bert.BertIntermediate -class LongformerIntermediate(nn.Module): - def __init__(self, config): - super().__init__() - self.dense = nn.Linear(config.hidden_size, config.intermediate_size) - if isinstance(config.hidden_act, str): - self.intermediate_act_fn = ACT2FN[config.hidden_act] - else: - self.intermediate_act_fn = config.hidden_act - - def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: - hidden_states = self.dense(hidden_states) - hidden_states = self.intermediate_act_fn(hidden_states) - return hidden_states - - -# Copied from transformers.models.bert.modeling_bert.BertOutput -class LongformerOutput(nn.Module): - def __init__(self, config): - super().__init__() - self.dense = nn.Linear(config.intermediate_size, config.hidden_size) - self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) - self.dropout = nn.Dropout(config.hidden_dropout_prob) - - def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: - hidden_states = self.dense(hidden_states) - hidden_states = self.dropout(hidden_states) - hidden_states = self.LayerNorm(hidden_states + input_tensor) - return hidden_states - - -class LongformerLayer(nn.Module): - def __init__(self, config, layer_id=0): - super().__init__() - self.attention = LongformerAttention(config, layer_id) - self.intermediate = LongformerIntermediate(config) - self.output = LongformerOutput(config) - self.chunk_size_feed_forward = config.chunk_size_feed_forward - self.seq_len_dim = 1 - - def forward( - self, - hidden_states, - attention_mask=None, - layer_head_mask=None, - is_index_masked=None, - is_index_global_attn=None, - is_global_attn=None, - output_attentions=False, - ): - self_attn_outputs = self.attention( - hidden_states, - attention_mask=attention_mask, - layer_head_mask=layer_head_mask, - is_index_masked=is_index_masked, - is_index_global_attn=is_index_global_attn, - is_global_attn=is_global_attn, - output_attentions=output_attentions, - ) - attn_output = self_attn_outputs[0] - outputs = self_attn_outputs[1:] - - layer_output = apply_chunking_to_forward( - self.ff_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attn_output - ) - outputs = (layer_output,) + outputs - return outputs - - def ff_chunk(self, attn_output): - intermediate_output = self.intermediate(attn_output) - layer_output = self.output(intermediate_output, attn_output) - return layer_output - - -class LongformerEncoder(nn.Module): - def __init__(self, config): - super().__init__() - self.config = config - self.layer = nn.ModuleList([LongformerLayer(config, layer_id=i) for i in range(config.num_hidden_layers)]) - self.gradient_checkpointing = False - - def forward( - self, - hidden_states, - attention_mask=None, - head_mask=None, - padding_len=0, - output_attentions=False, - output_hidden_states=False, - return_dict=True, - ): - - is_index_masked = attention_mask < 0 - is_index_global_attn = attention_mask > 0 - is_global_attn = is_index_global_attn.flatten().any().item() - - all_hidden_states = () if output_hidden_states else None - all_attentions = () if output_attentions else None # All local attentions. - all_global_attentions = () if (output_attentions and is_global_attn) else None - - # check if head_mask has a correct number of layers specified if desired - if head_mask is not None: - assert head_mask.size()[0] == ( - len(self.layer) - ), f"The head_mask should be specified for {len(self.layer)} layers, but it is for {head_mask.size()[0]}." - for idx, layer_module in enumerate(self.layer): - if output_hidden_states: - all_hidden_states = all_hidden_states + (hidden_states,) - - if self.gradient_checkpointing and self.training: - - def create_custom_forward(module): - def custom_forward(*inputs): - return module(*inputs, is_global_attn, output_attentions) - - return custom_forward - - layer_outputs = torch.utils.checkpoint.checkpoint( - create_custom_forward(layer_module), - hidden_states, - attention_mask, - head_mask[idx] if head_mask is not None else None, - is_index_masked, - is_index_global_attn, - ) - else: - layer_outputs = layer_module( - hidden_states, - attention_mask=attention_mask, - layer_head_mask=head_mask[idx] if head_mask is not None else None, - is_index_masked=is_index_masked, - is_index_global_attn=is_index_global_attn, - is_global_attn=is_global_attn, - output_attentions=output_attentions, - ) - hidden_states = layer_outputs[0] - - if output_attentions: - # bzs x seq_len x num_attn_heads x (num_global_attn + attention_window_len + 1) => bzs x num_attn_heads x seq_len x (num_global_attn + attention_window_len + 1) - all_attentions = all_attentions + (layer_outputs[1].transpose(1, 2),) - - if is_global_attn: - # bzs x num_attn_heads x num_global_attn x seq_len => bzs x num_attn_heads x seq_len x num_global_attn - all_global_attentions = all_global_attentions + (layer_outputs[2].transpose(2, 3),) - - # Add last layer - if output_hidden_states: - all_hidden_states = all_hidden_states + (hidden_states,) - - # undo padding - if padding_len > 0: - # unpad `hidden_states` because the calling function is expecting a length == input_ids.size(1) - hidden_states = hidden_states[:, :-padding_len] - if output_hidden_states: - all_hidden_states = tuple([state[:, :-padding_len] for state in all_hidden_states]) - - if output_attentions: - all_attentions = tuple([state[:, :, :-padding_len, :] for state in all_attentions]) - - if not return_dict: - return tuple( - v for v in [hidden_states, all_hidden_states, all_attentions, all_global_attentions] if v is not None - ) - return LongformerBaseModelOutput( - last_hidden_state=hidden_states, - hidden_states=all_hidden_states, - attentions=all_attentions, - global_attentions=all_global_attentions, - ) - - -# Copied from transformers.models.bert.modeling_bert.BertPooler -class LongformerPooler(nn.Module): - def __init__(self, config): - super().__init__() - self.dense = nn.Linear(config.hidden_size, config.hidden_size) - self.activation = nn.Tanh() - - def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: - # We "pool" the model by simply taking the hidden state corresponding - # to the first token. - first_token_tensor = hidden_states[:, 0] - pooled_output = self.dense(first_token_tensor) - pooled_output = self.activation(pooled_output) - return pooled_output - - -# Copied from transformers.models.roberta.modeling_roberta.RobertaLMHead with Roberta->Longformer -class LongformerLMHead(nn.Module): - """Longformer Head for masked language modeling.""" - - def __init__(self, config): - super().__init__() - self.dense = nn.Linear(config.hidden_size, config.hidden_size) - self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) - - self.decoder = nn.Linear(config.hidden_size, config.vocab_size) - self.bias = nn.Parameter(torch.zeros(config.vocab_size)) - self.decoder.bias = self.bias - - def forward(self, features, **kwargs): - x = self.dense(features) - x = gelu(x) - x = self.layer_norm(x) - - # project back to size of vocabulary with bias - x = self.decoder(x) - - return x - - def _tie_weights(self): - # To tie those two weights if they get disconnected (on TPU or when the bias is resized) - self.bias = self.decoder.bias - - -class LongformerPreTrainedModel(PreTrainedModel): - """ - An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained - models. - """ - - config_class = LongformerConfig - base_model_prefix = "longformer" - supports_gradient_checkpointing = True - _keys_to_ignore_on_load_unexpected = [r"position_ids"] - - def _init_weights(self, module): - """Initialize the weights""" - if isinstance(module, nn.Linear): - # Slightly different from the TF version which uses truncated_normal for initialization - # cf https://github.com/pytorch/pytorch/pull/5617 - module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) - if module.bias is not None: - module.bias.data.zero_() - elif isinstance(module, nn.Embedding): - module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) - if module.padding_idx is not None: - module.weight.data[module.padding_idx].zero_() - elif isinstance(module, nn.LayerNorm): - module.bias.data.zero_() - module.weight.data.fill_(1.0) - - def _set_gradient_checkpointing(self, module, value=False): - if isinstance(module, LongformerEncoder): - module.gradient_checkpointing = value - - -LONGFORMER_START_DOCSTRING = r""" - This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the - library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads - etc.) - This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. - Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage - and behavior. - Parameters: - config ([`LongformerConfig`]): Model configuration class with all the parameters of the - model. Initializing with a config file does not load the weights associated with the model, only the - configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. -""" - -LONGFORMER_INPUTS_DOCSTRING = r""" - Args: - input_ids (`torch.LongTensor` of shape `({0})`): - Indices of input sequence tokens in the vocabulary. - Indices can be obtained using [`LongformerTokenizer`]. See [`PreTrainedTokenizer.encode`] and - [`PreTrainedTokenizer.__call__`] for details. - [What are input IDs?](../glossary#input-ids) - attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*): - Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - - 1 for tokens that are **not masked**, - - 0 for tokens that are **masked**. - [What are attention masks?](../glossary#attention-mask) - global_attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*): - Mask to decide the attention given on each token, local attention or global attention. Tokens with global - attention attends to all other tokens, and all other tokens attend to them. This is important for - task-specific finetuning because it makes the model more flexible at representing the task. For example, - for classification, the token should be given global attention. For QA, all question tokens should also - have global attention. Please refer to the [Longformer paper](https://arxiv.org/abs/2004.05150) for more - details. Mask values selected in `[0, 1]`: - - 0 for local attention (a sliding window attention), - - 1 for global attention (tokens that attend to all other tokens, and all other tokens attend to them). - head_mask (`torch.Tensor` of shape `(num_layers, num_heads)`, *optional*): - Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`: - - 1 indicates the head is **not masked**, - - 0 indicates the head is **masked**. - decoder_head_mask (`torch.Tensor` of shape `(num_layers, num_heads)`, *optional*): - Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`: - - 1 indicates the head is **not masked**, - - 0 indicates the head is **masked**. - token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*): - Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, - 1]`: - - 0 corresponds to a *sentence A* token, - - 1 corresponds to a *sentence B* token. - [What are token type IDs?](../glossary#token-type-ids) - position_ids (`torch.LongTensor` of shape `({0})`, *optional*): - Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, - config.max_position_embeddings - 1]`. - [What are position IDs?](../glossary#position-ids) - inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*): - Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This - is useful if you want more control over how to convert `input_ids` indices into associated vectors than the - model's internal embedding lookup matrix. - output_attentions (`bool`, *optional*): - Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned - tensors for more detail. - output_hidden_states (`bool`, *optional*): - Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for - more detail. - return_dict (`bool`, *optional*): - Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. -""" - - -@add_start_docstrings( - "The bare Longformer Model outputting raw hidden-states without any specific head on top.", - LONGFORMER_START_DOCSTRING, -) -class LongformerModel(LongformerPreTrainedModel): - """ - This class copied code from [`RobertaModel`] and overwrote standard self-attention with longformer self-attention - to provide the ability to process long sequences following the self-attention approach described in [Longformer: - the Long-Document Transformer](https://arxiv.org/abs/2004.05150) by Iz Beltagy, Matthew E. Peters, and Arman Cohan. - Longformer self-attention combines a local (sliding window) and global attention to extend to long documents - without the O(n^2) increase in memory and compute. - The self-attention module `LongformerSelfAttention` implemented here supports the combination of local and global - attention but it lacks support for autoregressive attention and dilated attention. Autoregressive and dilated - attention are more relevant for autoregressive language modeling than finetuning on downstream tasks. Future - release will add support for autoregressive attention, but the support for dilated attention requires a custom CUDA - kernel to be memory and compute efficient. - """ - - def __init__(self, config, add_pooling_layer=True): - super().__init__(config) - self.config = config - - if isinstance(config.attention_window, int): - assert config.attention_window % 2 == 0, "`config.attention_window` has to be an even value" - assert config.attention_window > 0, "`config.attention_window` has to be positive" - config.attention_window = [config.attention_window] * config.num_hidden_layers # one value per layer - else: - assert len(config.attention_window) == config.num_hidden_layers, ( - "`len(config.attention_window)` should equal `config.num_hidden_layers`. " - f"Expected {config.num_hidden_layers}, given {len(config.attention_window)}" - ) - - self.embeddings = LongformerEmbeddings(config) - self.encoder = LongformerEncoder(config) - self.pooler = LongformerPooler(config) if add_pooling_layer else None - - # Initialize weights and apply final processing - self.post_init() - - def get_input_embeddings(self): - return self.embeddings.word_embeddings - - def set_input_embeddings(self, value): - self.embeddings.word_embeddings = value - - def _prune_heads(self, heads_to_prune): - """ - Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base - class PreTrainedModel - """ - for layer, heads in heads_to_prune.items(): - self.encoder.layer[layer].attention.prune_heads(heads) - - def _pad_to_window_size( - self, - input_ids: torch.Tensor, - attention_mask: torch.Tensor, - token_type_ids: torch.Tensor, - position_ids: torch.Tensor, - inputs_embeds: torch.Tensor, - pad_token_id: int, - ): - """A helper function to pad tokens and mask to work with implementation of Longformer self-attention.""" - # padding - attention_window = ( - self.config.attention_window - if isinstance(self.config.attention_window, int) - else max(self.config.attention_window) - ) - - assert attention_window % 2 == 0, f"`attention_window` should be an even value. Given {attention_window}" - input_shape = input_ids.shape if input_ids is not None else inputs_embeds.shape - batch_size, seq_len = input_shape[:2] - - padding_len = (attention_window - seq_len % attention_window) % attention_window - if padding_len > 0: - ''' - logger.info( - f"Input ids are automatically padded from {seq_len} to {seq_len + padding_len} to be a multiple of " - f"`config.attention_window`: {attention_window}" - ) - ''' - if input_ids is not None: - input_ids = nn.functional.pad(input_ids, (0, padding_len), value=pad_token_id) - if position_ids is not None: - # pad with position_id = pad_token_id as in modeling_roberta.RobertaEmbeddings - position_ids = nn.functional.pad(position_ids, (0, padding_len), value=pad_token_id) - if inputs_embeds is not None: - input_ids_padding = inputs_embeds.new_full( - (batch_size, padding_len), - self.config.pad_token_id, - dtype=torch.long, - ) - inputs_embeds_padding = self.embeddings(input_ids_padding) - inputs_embeds = torch.cat([inputs_embeds, inputs_embeds_padding], dim=-2) - - attention_mask = nn.functional.pad( - attention_mask, (0, padding_len), value=False - ) # no attention on the padding tokens - token_type_ids = nn.functional.pad(token_type_ids, (0, padding_len), value=0) # pad with token_type_id = 0 - - return padding_len, input_ids, attention_mask, token_type_ids, position_ids, inputs_embeds - - def _merge_to_attention_mask(self, attention_mask: torch.Tensor, global_attention_mask: torch.Tensor): - # longformer self attention expects attention mask to have 0 (no attn), 1 (local attn), 2 (global attn) - # (global_attention_mask + 1) => 1 for local attention, 2 for global attention - # => final attention_mask => 0 for no attention, 1 for local attention 2 for global attention - if attention_mask is not None: - attention_mask = attention_mask * (global_attention_mask + 1) - else: - # simply use `global_attention_mask` as `attention_mask` - # if no `attention_mask` is given - attention_mask = global_attention_mask + 1 - return attention_mask - - @add_start_docstrings_to_model_forward(LONGFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length")) - @replace_return_docstrings(output_type=LongformerBaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC) - def forward( - self, - input_ids: Optional[torch.Tensor] = None, - attention_mask: Optional[torch.Tensor] = None, - global_attention_mask: Optional[torch.Tensor] = None, - head_mask: Optional[torch.Tensor] = None, - token_type_ids: Optional[torch.Tensor] = None, - position_ids: Optional[torch.Tensor] = None, - inputs_embeds: Optional[torch.Tensor] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - ) -> Union[Tuple, LongformerBaseModelOutputWithPooling]: - r""" - Returns: - Examples: - ```python - >>> import torch - >>> from transformers import LongformerModel, LongformerTokenizer - >>> model = LongformerModel.from_pretrained("allenai/longformer-base-4096") - >>> tokenizer = LongformerTokenizer.from_pretrained("allenai/longformer-base-4096") - >>> SAMPLE_TEXT = " ".join(["Hello world! "] * 1000) # long input document - >>> input_ids = torch.tensor(tokenizer.encode(SAMPLE_TEXT)).unsqueeze(0) # batch of size 1 - >>> attention_mask = torch.ones( - ... input_ids.shape, dtype=torch.long, device=input_ids.device - ... ) # initialize to local attention - >>> global_attention_mask = torch.zeros( - ... input_ids.shape, dtype=torch.long, device=input_ids.device - ... ) # initialize to global attention to be deactivated for all tokens - >>> global_attention_mask[ - ... :, - ... [ - ... 1, - ... 4, - ... 21, - ... ], - ... ] = 1 # Set global attention to random tokens for the sake of this example - >>> # Usually, set global attention based on the task. For example, - >>> # classification: the token - >>> # QA: question tokens - >>> # LM: potentially on the beginning of sentences and paragraphs - >>> outputs = model(input_ids, attention_mask=attention_mask, global_attention_mask=global_attention_mask) - >>> sequence_output = outputs.last_hidden_state - >>> pooled_output = outputs.pooler_output - ```""" - - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - - if input_ids is not None and inputs_embeds is not None: - raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") - elif input_ids is not None: - input_shape = input_ids.size() - elif inputs_embeds is not None: - input_shape = inputs_embeds.size()[:-1] - else: - raise ValueError("You have to specify either input_ids or inputs_embeds") - - device = input_ids.device if input_ids is not None else inputs_embeds.device - - if attention_mask is None: - attention_mask = torch.ones(input_shape, device=device) - if token_type_ids is None: - token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) - - # merge `global_attention_mask` and `attention_mask` - if global_attention_mask is not None: - attention_mask = self._merge_to_attention_mask(attention_mask, global_attention_mask) - - padding_len, input_ids, attention_mask, token_type_ids, position_ids, inputs_embeds = self._pad_to_window_size( - input_ids=input_ids, - attention_mask=attention_mask, - token_type_ids=token_type_ids, - position_ids=position_ids, - inputs_embeds=inputs_embeds, - pad_token_id=self.config.pad_token_id, - ) - - # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] - # ourselves in which case we just need to make it broadcastable to all heads. - extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape)[ - :, 0, 0, : - ] - - embedding_output = self.embeddings( - input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds - ) - - encoder_outputs = self.encoder( - embedding_output, - attention_mask=extended_attention_mask, - head_mask=head_mask, - padding_len=padding_len, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - ) - sequence_output = encoder_outputs[0] - pooled_output = self.pooler(sequence_output) if self.pooler is not None else None - - if not return_dict: - return (sequence_output, pooled_output) + encoder_outputs[1:] - - return LongformerBaseModelOutputWithPooling( - last_hidden_state=sequence_output, - pooler_output=pooled_output, - hidden_states=encoder_outputs.hidden_states, - attentions=encoder_outputs.attentions, - global_attentions=encoder_outputs.global_attentions, - ) - - -@add_start_docstrings("""Longformer Model with a `language modeling` head on top.""", LONGFORMER_START_DOCSTRING) -class LongformerForMaskedLM(LongformerPreTrainedModel): - - _keys_to_ignore_on_load_unexpected = [r"pooler"] - - def __init__(self, config): - super().__init__(config) - - self.longformer = LongformerModel(config, add_pooling_layer=False) - self.lm_head = LongformerLMHead(config) - - # Initialize weights and apply final processing - self.post_init() - - def get_output_embeddings(self): - return self.lm_head.decoder - - def set_output_embeddings(self, new_embeddings): - self.lm_head.decoder = new_embeddings - - @add_start_docstrings_to_model_forward(LONGFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length")) - @replace_return_docstrings(output_type=LongformerMaskedLMOutput, config_class=_CONFIG_FOR_DOC) - def forward( - self, - input_ids: Optional[torch.Tensor] = None, - attention_mask: Optional[torch.Tensor] = None, - global_attention_mask: Optional[torch.Tensor] = None, - head_mask: Optional[torch.Tensor] = None, - token_type_ids: Optional[torch.Tensor] = None, - position_ids: Optional[torch.Tensor] = None, - inputs_embeds: Optional[torch.Tensor] = None, - labels: Optional[torch.Tensor] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - ) -> Union[Tuple, LongformerMaskedLMOutput]: - r""" - labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): - Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., - config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the - loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` - kwargs (`Dict[str, any]`, optional, defaults to *{}*): - Used to hide legacy arguments that have been deprecated. - Returns: - Mask filling example: - ```python - >>> from transformers import LongformerTokenizer, LongformerForMaskedLM - >>> tokenizer = LongformerTokenizer.from_pretrained("allenai/longformer-base-4096") - >>> model = LongformerForMaskedLM.from_pretrained("allenai/longformer-base-4096") - ``` - Let's try a very long input. - ```python - >>> TXT = ( - ... "My friends are but they eat too many carbs." - ... + " That's why I decide not to eat with them." * 300 - ... ) - >>> input_ids = tokenizer([TXT], return_tensors="pt")["input_ids"] - >>> logits = model(input_ids).logits - >>> masked_index = (input_ids[0] == tokenizer.mask_token_id).nonzero().item() - >>> probs = logits[0, masked_index].softmax(dim=0) - >>> values, predictions = probs.topk(5) - >>> tokenizer.decode(predictions).split() - ['healthy', 'skinny', 'thin', 'good', 'vegetarian'] - ```""" - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - - outputs = self.longformer( - input_ids, - attention_mask=attention_mask, - global_attention_mask=global_attention_mask, - head_mask=head_mask, - token_type_ids=token_type_ids, - position_ids=position_ids, - inputs_embeds=inputs_embeds, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - ) - sequence_output = outputs[0] - prediction_scores = self.lm_head(sequence_output) - - masked_lm_loss = None - if labels is not None: - loss_fct = CrossEntropyLoss() - masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) - - if not return_dict: - output = (prediction_scores,) + outputs[2:] - return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output - - return LongformerMaskedLMOutput( - loss=masked_lm_loss, - logits=prediction_scores, - hidden_states=outputs.hidden_states, - attentions=outputs.attentions, - global_attentions=outputs.global_attentions, - ) - - -@add_start_docstrings( - """ - Longformer Model transformer with a sequence classification/regression head on top (a linear layer on top of the - pooled output) e.g. for GLUE tasks. - """, - LONGFORMER_START_DOCSTRING, -) -class LongformerForSequenceClassification(LongformerPreTrainedModel): - - _keys_to_ignore_on_load_unexpected = [r"pooler"] - - def __init__(self, config): - super().__init__(config) - self.num_labels = config.num_labels - self.config = config - - self.longformer = LongformerModel(config, add_pooling_layer=False) - self.classifier = LongformerClassificationHead(config) - - # Initialize weights and apply final processing - self.post_init() - - @add_start_docstrings_to_model_forward(LONGFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length")) - @add_code_sample_docstrings( - processor_class=_TOKENIZER_FOR_DOC, - checkpoint="jpelhaw/longformer-base-plagiarism-detection", - output_type=LongformerSequenceClassifierOutput, - config_class=_CONFIG_FOR_DOC, - expected_output="'ORIGINAL'", - expected_loss=5.44, - ) - def forward( - self, - input_ids: Optional[torch.Tensor] = None, - attention_mask: Optional[torch.Tensor] = None, - global_attention_mask: Optional[torch.Tensor] = None, - head_mask: Optional[torch.Tensor] = None, - token_type_ids: Optional[torch.Tensor] = None, - position_ids: Optional[torch.Tensor] = None, - inputs_embeds: Optional[torch.Tensor] = None, - labels: Optional[torch.Tensor] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - ) -> Union[Tuple, LongformerSequenceClassifierOutput]: - r""" - labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): - Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., - config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If - `config.num_labels > 1` a classification loss is computed (Cross-Entropy). - """ - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - - if global_attention_mask is None: - logger.info("Initializing global attention on CLS token...") - global_attention_mask = torch.zeros_like(input_ids) - # global attention on cls token - global_attention_mask[:, 0] = 1 - - outputs = self.longformer( - input_ids, - attention_mask=attention_mask, - global_attention_mask=global_attention_mask, - head_mask=head_mask, - token_type_ids=token_type_ids, - position_ids=position_ids, - inputs_embeds=inputs_embeds, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - ) - sequence_output = outputs[0] - logits = self.classifier(sequence_output) - - loss = None - if labels is not None: - if self.config.problem_type is None: - if self.num_labels == 1: - self.config.problem_type = "regression" - elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): - self.config.problem_type = "single_label_classification" - else: - self.config.problem_type = "multi_label_classification" - - if self.config.problem_type == "regression": - loss_fct = MSELoss() - if self.num_labels == 1: - loss = loss_fct(logits.squeeze(), labels.squeeze()) - else: - loss = loss_fct(logits, labels) - elif self.config.problem_type == "single_label_classification": - loss_fct = CrossEntropyLoss() - loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) - elif self.config.problem_type == "multi_label_classification": - loss_fct = BCEWithLogitsLoss() - loss = loss_fct(logits, labels) - - if not return_dict: - output = (logits,) + outputs[2:] - return ((loss,) + output) if loss is not None else output - - return LongformerSequenceClassifierOutput( - loss=loss, - logits=logits, - hidden_states=outputs.hidden_states, - attentions=outputs.attentions, - global_attentions=outputs.global_attentions, - ) - - -class LongformerClassificationHead(nn.Module): - """Head for sentence-level classification tasks.""" - - def __init__(self, config): - super().__init__() - self.dense = nn.Linear(config.hidden_size, config.hidden_size) - self.dropout = nn.Dropout(config.hidden_dropout_prob) - self.out_proj = nn.Linear(config.hidden_size, config.num_labels) - - def forward(self, hidden_states, **kwargs): - hidden_states = hidden_states[:, 0, :] # take token (equiv. to [CLS]) - hidden_states = self.dropout(hidden_states) - hidden_states = self.dense(hidden_states) - hidden_states = torch.tanh(hidden_states) - hidden_states = self.dropout(hidden_states) - output = self.out_proj(hidden_states) - return output - - -@add_start_docstrings( - """ - Longformer Model with a span classification head on top for extractive question-answering tasks like SQuAD / - TriviaQA (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`). - """, - LONGFORMER_START_DOCSTRING, -) -class LongformerForQuestionAnswering(LongformerPreTrainedModel): - - _keys_to_ignore_on_load_unexpected = [r"pooler"] - - def __init__(self, config): - super().__init__(config) - self.num_labels = config.num_labels - - self.longformer = LongformerModel(config, add_pooling_layer=False) - self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) - - # Initialize weights and apply final processing - self.post_init() - - @add_start_docstrings_to_model_forward(LONGFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length")) - @replace_return_docstrings(output_type=LongformerQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC) - def forward( - self, - input_ids: Optional[torch.Tensor] = None, - attention_mask: Optional[torch.Tensor] = None, - global_attention_mask: Optional[torch.Tensor] = None, - head_mask: Optional[torch.Tensor] = None, - token_type_ids: Optional[torch.Tensor] = None, - position_ids: Optional[torch.Tensor] = None, - inputs_embeds: Optional[torch.Tensor] = None, - start_positions: Optional[torch.Tensor] = None, - end_positions: Optional[torch.Tensor] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - ) -> Union[Tuple, LongformerQuestionAnsweringModelOutput]: - r""" - start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): - Labels for position (index) of the start of the labelled span for computing the token classification loss. - Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence - are not taken into account for computing the loss. - end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): - Labels for position (index) of the end of the labelled span for computing the token classification loss. - Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence - are not taken into account for computing the loss. - Returns: - Examples: - ```python - >>> from transformers import LongformerTokenizer, LongformerForQuestionAnswering - >>> import torch - >>> tokenizer = LongformerTokenizer.from_pretrained("allenai/longformer-large-4096-finetuned-triviaqa") - >>> model = LongformerForQuestionAnswering.from_pretrained("allenai/longformer-large-4096-finetuned-triviaqa") - >>> question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" - >>> encoding = tokenizer(question, text, return_tensors="pt") - >>> input_ids = encoding["input_ids"] - >>> # default is local attention everywhere - >>> # the forward method will automatically set global attention on question tokens - >>> attention_mask = encoding["attention_mask"] - >>> outputs = model(input_ids, attention_mask=attention_mask) - >>> start_logits = outputs.start_logits - >>> end_logits = outputs.end_logits - >>> all_tokens = tokenizer.convert_ids_to_tokens(input_ids[0].tolist()) - >>> answer_tokens = all_tokens[torch.argmax(start_logits) : torch.argmax(end_logits) + 1] - >>> answer = tokenizer.decode( - ... tokenizer.convert_tokens_to_ids(answer_tokens) - ... ) # remove space prepending space token - ```""" - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - - if global_attention_mask is None: - if input_ids is None: - logger.warning( - "It is not possible to automatically generate the `global_attention_mask` because input_ids is" - " None. Please make sure that it is correctly set." - ) - else: - # set global attention on question tokens automatically - global_attention_mask = _compute_global_attention_mask(input_ids, self.config.sep_token_id) - - outputs = self.longformer( - input_ids, - attention_mask=attention_mask, - global_attention_mask=global_attention_mask, - head_mask=head_mask, - token_type_ids=token_type_ids, - position_ids=position_ids, - inputs_embeds=inputs_embeds, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - ) - - sequence_output = outputs[0] - # because of batch=1 and not max_seq_length, the blow code not use. - padding_len = input_ids[0].eq(1).sum() - if padding_len > 0: - sequence_output = sequence_output[:, :-padding_len] - - logits = self.qa_outputs(sequence_output) - start_logits, end_logits = logits.split(1, dim=-1) - start_logits = start_logits.squeeze(-1).contiguous() - end_logits = end_logits.squeeze(-1).contiguous() - - # align to original longformer loss. - regular_softmax_loss = False - - total_loss = None - if start_positions is not None and end_positions is not None: - # If we are on multi-GPU, split add a dimension - if len(start_positions.size()) > 1: - start_positions = start_positions.squeeze(-1) - if len(end_positions.size()) > 1: - end_positions = end_positions.squeeze(-1) - - if not regular_softmax_loss: - # loss function suggested in section 2.2 here https://arxiv.org/pdf/1710.10723.pdf - # NOTE: this returns sum of losses, not mean, so loss won't be normalized across different batch sizes - # but batch size is always 1, so this is not a problem - start_loss = self.or_softmax_cross_entropy_loss_one_doc(start_logits, start_positions, ignore_index=-1) - end_loss = self.or_softmax_cross_entropy_loss_one_doc(end_logits, end_positions, ignore_index=-1) - else: - loss_fct = torch.nn.CrossEntropyLoss(ignore_index=-1) - start_positions = start_positions[:, 0:1] - end_positions = end_positions[:, 0:1] - start_loss = loss_fct(start_logits, start_positions[:, 0]) - end_loss = loss_fct(end_logits, end_positions[:, 0]) - - total_loss = (start_loss + end_loss) / 2 - - if not return_dict: - output = (start_logits, end_logits) + outputs[2:] - return ((total_loss,) + output) if total_loss is not None else output - - return LongformerQuestionAnsweringModelOutput( - loss=total_loss, - start_logits=start_logits, - end_logits=end_logits, - hidden_states=outputs.hidden_states, - attentions=outputs.attentions, - global_attentions=outputs.global_attentions, - ) - - def or_softmax_cross_entropy_loss_one_doc(self, logits, target, ignore_index=-1, dim=-1): - """loss function suggested in section 2.2 here https://arxiv.org/pdf/1710.10723.pdf""" - assert logits.ndim == 2 - assert target.ndim == 2 - assert logits.size(0) == target.size(0) - - # with regular CrossEntropyLoss, the numerator is only one of the logits specified by the target - # here, the numerator is the sum of a few potential targets, where some of them is the correct answer - - # compute a target mask - target_mask = target == ignore_index - # replaces ignore_index with 0, so `gather` will select logit at index 0 for the msked targets - masked_target = target * (1 - target_mask.long()) - # gather logits - gathered_logits = logits.gather(dim=dim, index=masked_target) - # Apply the mask to gathered_logits. Use a mask of -inf because exp(-inf) = 0 - gathered_logits[target_mask] = float('-inf') - - # each batch is one example - gathered_logits = gathered_logits.view(1, -1) - logits = logits.view(1, -1) - - # numerator = log(sum(exp(gathered logits))) - log_score = torch.logsumexp(gathered_logits, dim=dim, keepdim=False) - # denominator = log(sum(exp(logits))) - log_norm = torch.logsumexp(logits, dim=dim, keepdim=False) - - # compute the loss - loss = -(log_score - log_norm) - - # some of the examples might have a loss of `inf` when `target` is all `ignore_index`. - # remove those from the loss before computing the sum. Use sum instead of mean because - # it is easier to compute - return loss[~torch.isinf(loss)].sum() - - -@add_start_docstrings( - """ - Longformer Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. - for Named-Entity-Recognition (NER) tasks. - """, - LONGFORMER_START_DOCSTRING, -) -class LongformerForTokenClassification(LongformerPreTrainedModel): - - _keys_to_ignore_on_load_unexpected = [r"pooler"] - - def __init__(self, config): - super().__init__(config) - self.num_labels = config.num_labels - - self.longformer = LongformerModel(config, add_pooling_layer=False) - self.dropout = nn.Dropout(config.hidden_dropout_prob) - self.classifier = nn.Linear(config.hidden_size, config.num_labels) - - # Initialize weights and apply final processing - self.post_init() - - @add_start_docstrings_to_model_forward(LONGFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length")) - @add_code_sample_docstrings( - processor_class=_TOKENIZER_FOR_DOC, - checkpoint="brad1141/Longformer-finetuned-norm", - output_type=LongformerTokenClassifierOutput, - config_class=_CONFIG_FOR_DOC, - expected_output=( - "['Evidence', 'Evidence', 'Evidence', 'Evidence', 'Evidence', 'Evidence', 'Evidence', 'Evidence'," - " 'Evidence', 'Evidence', 'Evidence', 'Evidence']" - ), - expected_loss=0.63, - ) - def forward( - self, - input_ids: Optional[torch.Tensor] = None, - attention_mask: Optional[torch.Tensor] = None, - global_attention_mask: Optional[torch.Tensor] = None, - head_mask: Optional[torch.Tensor] = None, - token_type_ids: Optional[torch.Tensor] = None, - position_ids: Optional[torch.Tensor] = None, - inputs_embeds: Optional[torch.Tensor] = None, - labels: Optional[torch.Tensor] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - ) -> Union[Tuple, LongformerTokenClassifierOutput]: - r""" - labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): - Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. - """ - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - - outputs = self.longformer( - input_ids, - attention_mask=attention_mask, - global_attention_mask=global_attention_mask, - head_mask=head_mask, - token_type_ids=token_type_ids, - position_ids=position_ids, - inputs_embeds=inputs_embeds, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - ) - - sequence_output = outputs[0] - - sequence_output = self.dropout(sequence_output) - logits = self.classifier(sequence_output) - - loss = None - if labels is not None: - loss_fct = CrossEntropyLoss() - loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) - - if not return_dict: - output = (logits,) + outputs[2:] - return ((loss,) + output) if loss is not None else output - - return LongformerTokenClassifierOutput( - loss=loss, - logits=logits, - hidden_states=outputs.hidden_states, - attentions=outputs.attentions, - global_attentions=outputs.global_attentions, - ) - - -@add_start_docstrings( - """ - Longformer Model with a multiple choice classification head on top (a linear layer on top of the pooled output and - a softmax) e.g. for RocStories/SWAG tasks. - """, - LONGFORMER_START_DOCSTRING, -) -class LongformerForMultipleChoice(LongformerPreTrainedModel): - def __init__(self, config): - super().__init__(config) - - self.longformer = LongformerModel(config) - self.dropout = nn.Dropout(config.hidden_dropout_prob) - self.classifier = nn.Linear(config.hidden_size, 1) - - # Initialize weights and apply final processing - self.post_init() - - @add_start_docstrings_to_model_forward( - LONGFORMER_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length") - ) - @add_code_sample_docstrings( - processor_class=_TOKENIZER_FOR_DOC, - checkpoint=_CHECKPOINT_FOR_DOC, - output_type=LongformerMultipleChoiceModelOutput, - config_class=_CONFIG_FOR_DOC, - ) - def forward( - self, - input_ids: Optional[torch.Tensor] = None, - token_type_ids: Optional[torch.Tensor] = None, - attention_mask: Optional[torch.Tensor] = None, - global_attention_mask: Optional[torch.Tensor] = None, - head_mask: Optional[torch.Tensor] = None, - labels: Optional[torch.Tensor] = None, - position_ids: Optional[torch.Tensor] = None, - inputs_embeds: Optional[torch.Tensor] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - ) -> Union[Tuple, LongformerMultipleChoiceModelOutput]: - r""" - labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): - Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., - num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See - `input_ids` above) - """ - num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - - # set global attention on question tokens - if global_attention_mask is None and input_ids is not None: - logger.info("Initializing global attention on multiple choice...") - # put global attention on all tokens after `config.sep_token_id` - global_attention_mask = torch.stack( - [ - _compute_global_attention_mask(input_ids[:, i], self.config.sep_token_id, before_sep_token=False) - for i in range(num_choices) - ], - dim=1, - ) - - flat_input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None - flat_position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None - flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None - flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None - flat_global_attention_mask = ( - global_attention_mask.view(-1, global_attention_mask.size(-1)) - if global_attention_mask is not None - else None - ) - flat_inputs_embeds = ( - inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) - if inputs_embeds is not None - else None - ) - - outputs = self.longformer( - flat_input_ids, - position_ids=flat_position_ids, - token_type_ids=flat_token_type_ids, - attention_mask=flat_attention_mask, - global_attention_mask=flat_global_attention_mask, - head_mask=head_mask, - inputs_embeds=flat_inputs_embeds, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - ) - pooled_output = outputs[1] - - pooled_output = self.dropout(pooled_output) - logits = self.classifier(pooled_output) - reshaped_logits = logits.view(-1, num_choices) - - loss = None - if labels is not None: - loss_fct = CrossEntropyLoss() - loss = loss_fct(reshaped_logits, labels) - - if not return_dict: - output = (reshaped_logits,) + outputs[2:] - return ((loss,) + output) if loss is not None else output - - return LongformerMultipleChoiceModelOutput( - loss=loss, - logits=reshaped_logits, - hidden_states=outputs.hidden_states, - attentions=outputs.attentions, - global_attentions=outputs.global_attentions, - ) diff --git a/examples/huggingface/pytorch/question-answering/pruning/longformer_triviaqa/requirements.txt b/examples/huggingface/pytorch/question-answering/pruning/longformer_triviaqa/requirements.txt deleted file mode 100644 index 84310f9ea50..00000000000 --- a/examples/huggingface/pytorch/question-answering/pruning/longformer_triviaqa/requirements.txt +++ /dev/null @@ -1,5 +0,0 @@ -accelerate -datasets -transformers -torch==2.3.0 -neural-compressor==2.0 diff --git a/examples/huggingface/pytorch/question-answering/pruning/longformer_triviaqa/run_qa_no_trainer.py b/examples/huggingface/pytorch/question-answering/pruning/longformer_triviaqa/run_qa_no_trainer.py deleted file mode 100644 index a0ff5e1e30a..00000000000 --- a/examples/huggingface/pytorch/question-answering/pruning/longformer_triviaqa/run_qa_no_trainer.py +++ /dev/null @@ -1,1305 +0,0 @@ -#!/usr/bin/env python -# coding=utf-8 - -# Apache v2 license -# Copyright (C) 2021 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -# Copyright 2020 The HuggingFace Team All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -Fine-tuning the library models for question answering. -""" -# You can also adapt this script on your own question answering task. Pointers for this are left as comments. -""" -This script is based on HuggingFace/transformers example: https://github.com/huggingface/transformers/blob/v4.6.1/examples/pytorch/question-answering/run_qa.py -Changes made to the script: - 1. Added pruning capabilities - 2. Added model distillation capabilities - 3. Added learning rate rewinding option - 4. Added methods to save all hyper-parameters used - 5. Added quantization capabilities -""" - -import logging -import os -import sys -from dataclasses import dataclass, field -from typing import Optional -from collections import defaultdict -from tqdm.auto import tqdm -import math - -import torch -import datasets -from datasets import load_dataset, load_metric - -import transformers -from trainer_qa import QuestionAnsweringTrainer -from transformers import ( - AutoConfig, - AutoModelForQuestionAnswering, - AutoTokenizer, - DataCollatorWithPadding, - EvalPrediction, - HfArgumentParser, - PreTrainedTokenizerFast, - TrainingArguments, - default_data_collator, - set_seed, - get_scheduler, - CONFIG_MAPPING, - MODEL_MAPPING, - SchedulerType -) -from transformers.trainer_utils import get_last_checkpoint, is_main_process -from transformers.utils import check_min_version -from transformers.file_utils import get_full_repo_name - -from utils_qa import postprocess_qa_predictions - -from huggingface_hub import Repository - -from functools import partial -from accelerate import Accelerator -from torch.utils.data import DataLoader -import argparse -from accelerate.logging import get_logger -import numpy as np -import utils_qa -import json -from neural_compressor.training import Pruning, prepare_compression -from neural_compressor.training import WeightPruningConfig - -os.environ["WANDB_DISABLED"] = "true" -os.environ["HTTP_PROXY"] = "" - -# Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.6.0") - -logger = get_logger(__name__) -# You should update this to your particular problem to have better documentation of `model_type` -MODEL_CONFIG_CLASSES = list(MODEL_MAPPING.keys()) -MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) - -# (['loss', 'start_logits', 'end_logits']) -# batch(['attention_mask', 'end_positions', 'input_ids', 'start_positions', 'token_type_ids'] -def get_loss_one_logit(student_logit, teacher_logit): - t = 2.0 - from torch.nn import functional as F - return F.kl_div( - input=F.log_softmax(student_logit / t, dim=-1), - target=F.softmax(teacher_logit / t, dim=-1), - reduction="batchmean" - ) * (t ** 2) - -def save_prefixed_metrics(results, output_dir, file_name: str = "all_results.json", metric_key_prefix: str = "eval"): - """ - Save results while prefixing metric names. - Args: - results: (:obj:`dict`): - A dictionary of results. - output_dir: (:obj:`str`): - An output directory. - file_name: (:obj:`str`, `optional`, defaults to :obj:`all_results.json`): - An output file name. - metric_key_prefix: (:obj:`str`, `optional`, defaults to :obj:`eval`): - A metric name prefix. - """ - # Prefix all keys with metric_key_prefix + '_' - for key in list(results.keys()): - if not key.startswith(f"{metric_key_prefix}_"): - results[f"{metric_key_prefix}_{key}"] = results.pop(key) - - with open(os.path.join(output_dir, file_name), "w") as f: - json.dump(results, f, indent=4) - -def parse_args(): - parser = argparse.ArgumentParser(description="Finetune a transformers model on a Question Answering task") - parser.add_argument( - "--dataset_name", - type=str, - default=None, - help="The name of the dataset to use (via the datasets library).", - ) - parser.add_argument( - "--dataset_config_name", - type=str, - default=None, - help="The configuration name of the dataset to use (via the datasets library).", - ) - parser.add_argument( - "--train_file", - type=str, - default=None, - help="A csv or a json file containing the training data." - ) - parser.add_argument( - "--preprocessing_num_workers", - type=int, default=10, - help="A csv or a json file containing the training data." - ) - - parser.add_argument( - "--do_predict", - action="store_true", - help="To do prediction on the question answering model" - ) - parser.add_argument( - "--validation_file", - type=str, - default=None, - help="A csv or a json file containing the validation data." - ) - parser.add_argument( - "--test_file", - type=str, - default=None, - help="A csv or a json file containing the Prediction data." - ) - parser.add_argument( - "--max_seq_length", - type=int, - default=384, - help=( - "The maximum total input sequence length after tokenization. Sequences longer than this will be truncated," - " sequences shorter will be padded if `--pad_to_max_lengh` is passed." - ), - ) - parser.add_argument( - "--pad_to_max_length", - action="store_true", - help="If passed, pad all samples to `max_seq_length`. Otherwise, dynamic padding is used.", - ) - parser.add_argument( - "--model_name_or_path", - type=str, - help="Path to pretrained model or model identifier from huggingface.co/models." - ) - parser.add_argument( - "--teacher_model_name_or_path", - type=str, - default=None, - help="Path to pretrained model or model identifier from huggingface.co/models.", - required=False - ) - parser.add_argument( - "--config_name", - type=str, - default=None, - help="Pretrained config name or path if not the same as model_name", - ) - parser.add_argument( - "--tokenizer_name", - type=str, - default=None, - help="Pretrained tokenizer name or path if not the same as model_name", - ) - parser.add_argument( - "--use_slow_tokenizer", - action="store_true", - help="If passed, will use a slow tokenizer (not backed by the 🤗 Tokenizers library).", - ) - parser.add_argument( - "--per_device_train_batch_size", - type=int, - default=8, - help="Batch size (per device) for the training dataloader.", - ) - parser.add_argument( - "--distill_loss_weight", - type=float, - default=0.0, - help="distiller loss weight" - ) - parser.add_argument( - "--per_device_eval_batch_size", - type=int, - default=8, - help="Batch size (per device) for the evaluation dataloader.", - ) - parser.add_argument( - "--learning_rate", - type=float, - default=5e-5, - help="Initial learning rate (after the potential warmup period) to use.", - ) - parser.add_argument( - "--weight_decay", - type=float, - default=0.0, - help="Weight decay to use." - ) - parser.add_argument( - "--num_train_epochs", - type=int, - default=3, - help="Total number of training epochs to perform." - ) - parser.add_argument( - "--max_train_steps", - type=int, - default=None, - help="Total number of training steps to perform. If provided, overrides num_train_epochs.", - ) - parser.add_argument( - "--gradient_accumulation_steps", - type=int, - default=1, - help="Number of updates steps to accumulate before performing a backward/update pass.", - ) - parser.add_argument( - "--lr_scheduler_type", - type=SchedulerType, - default="linear", - help="The scheduler type to use.", - choices=["linear", "cosine", "cosine_with_restarts", "polynomial", "constant", "constant_with_warmup"], - ) - - parser.add_argument( - "--warm_epochs", - type=int, - default=0, - help="Number of epochs the network not be purned" - ) - parser.add_argument( - "--num_warmup_steps", - type=int, - default=0, - help="Number of steps for the warmup in the lr scheduler." - ) - parser.add_argument( - "--output_dir", - type=str, - default=None, - help="Where to store the final model." - ) - parser.add_argument( - "--seed", - type=int, - default=None, - help="A seed for reproducible training." - ) - parser.add_argument( - "--doc_stride", - type=int, - default=128, - help="When splitting up a long document into chunks how much stride to take between chunks.", - ) - parser.add_argument( - "--n_best_size", - type=int, - default=20, - help="The total number of n-best predictions to generate when looking for an answer.", - ) - parser.add_argument( - "--null_score_diff_threshold", - type=float, - default=0.0, - help=( - "The threshold used to select the null answer: if the best answer has a score that is less than " - "the score of the null answer minus this threshold, the null answer is selected for this example. " - "Only useful when `version_2_with_negative=True`." - ), - ) - parser.add_argument( - "--version_2_with_negative", - action="store_true", - help="If true, some of the examples do not have an answer.", - ) - parser.add_argument( - "--max_answer_length", - type=int, - default=30, - help=( - "The maximum length of an answer that can be generated. This is needed because the start " - "and end predictions are not conditioned on one another." - ), - ) - parser.add_argument( - "--max_train_samples", - type=int, - default=None, - help=( - "For debugging purposes or quicker training, truncate the number of training examples to this " - "value if set." - ), - ) - parser.add_argument( - "--max_eval_samples", - type=int, - default=None, - help=( - "For debugging purposes or quicker training, truncate the number of evaluation examples to this " - "value if set." - ), - ) - parser.add_argument( - "--overwrite_cache", type=bool, default=False, help="Overwrite the cached training and evaluation sets" - ) - parser.add_argument( - "--max_predict_samples", - type=int, - default=None, - help="For debugging purposes or quicker training, truncate the number of prediction examples to this", - ) - parser.add_argument( - "--model_type", - type=str, - default=None, - help="Model type to use if training from scratch.", - choices=MODEL_TYPES, - ) - parser.add_argument( - "--cooldown_epochs", - type=int, default=0, - help="Cooling epochs after pruning." - ) - parser.add_argument( - "--do_prune", action="store_true", - help="Whether or not to prune the model" - ) - parser.add_argument( - "--pruning_scope", - type=str, default="global", - help="pruning scope, we support global and local." - ) - parser.add_argument( - "--pruning_pattern", - type=str, default="4x1", - help="pruning pattern type, we support NxM and N:M." - ) - parser.add_argument( - "--target_sparsity", - type=float, default=0.8, - help="Target sparsity of the model." - ) - parser.add_argument( - "--pruning_frequency", - type=int, default=-1, - help="Sparse step frequency for iterative pruning, default to a quarter of pruning steps." - ) - - parser.add_argument( - "--keep_conf", action="store_true", - help="Whether or not to keep the prune config infos" - ) - parser.add_argument( - "--pruning_config", - type=str, - help="pruning_config" - ) - - parser.add_argument( - "--push_to_hub", - action="store_true", - help="Whether or not to push the model to the Hub." - ) - parser.add_argument( - "--hub_model_id", - type=str, - help="The name of the repository to keep in sync with the local `output_dir`." - ) - parser.add_argument( - "--hub_token", - type=str, - help="The token to use to push to the Model Hub." - ) - parser.add_argument( - "--checkpointing_steps", - type=str, - default=None, - help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.", - ) - parser.add_argument( - "--resume_from_checkpoint", - type=str, - default=None, - help="If the training should continue from a checkpoint folder.", - ) - - parser.add_argument( - "--with_tracking", - action="store_true", - help="Whether to enable experiment trackers for logging.", - ) - parser.add_argument( - "--report_to", - type=str, - default="all", - help=( - 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`,' - ' `"wandb"` and `"comet_ml"`. Use `"all"` (default) to report to all integrations.' - "Only applicable when `--with_tracking` is passed." - ), - ) - - parser.add_argument( - "--cache_dir", - type=str, - default=None, - help="Path to directory to store the pretrained models downloaded from huggingface.co", - ) - - parser.add_argument( - "--model_revision", - type=str, - default="main", - help="The specific model version to use (can be a branch name, tag name or commit id).", - ) - - parser.add_argument( - "--use_auth_token", - type=bool, - default=False, - help="Will use the token generated when running `transformers-cli login` (necessary to use this script with private models).", - ) - - parser.add_argument( - "--do_train", - action="store_true", - help="Whether to run training.", - ) - - parser.add_argument( - "--do_eval", - action="store_true", - help="Whether to run eval on the dev set.", - ) - - args = parser.parse_args() - - # Sanity checks - if ( - args.dataset_name is None - and args.train_file is None - and args.validation_file is None - and args.test_file is None - ): - raise ValueError("Need either a dataset name or a training/validation/test file.") - else: - if args.train_file is not None: - extension = args.train_file.split(".")[-1] - assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." - if args.validation_file is not None: - extension = args.validation_file.split(".")[-1] - assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." - if args.test_file is not None: - extension = args.test_file.split(".")[-1] - assert extension in ["csv", "json"], "`test_file` should be a csv or a json file." - - if args.push_to_hub: - assert args.output_dir is not None, "Need an `output_dir` to create a repo when `--push_to_hub` is passed." - - return args - -def main(): - - args = parse_args() - - # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The - # information sent is the one passed as arguments along with your Python/PyTorch versions. - # send_example_telemetry("run_qa_no_trainer", args) - - # Initialize the accelerator. We will let the accelerator handle device placement for us in this example. - # If we're using tracking, we also need to initialize it here and it will by default pick up all supported trackers - # in the environment - - accelerator = ( - Accelerator(log_with=args.report_to, logging_dir=args.output_dir) if args.with_tracking else Accelerator() - ) - - ''' - accelerator_log_kwargs = {} - if args.with_tracking: - accelerator_log_kwargs["log_with"] = args.report_to - accelerator_log_kwargs["logging_dir"] = args.output_dir - accelerator = Accelerator(gradient_accumulation_steps=args.gradient_accumulation_steps, **accelerator_log_kwargs) - ''' - # Make one log on every process with the configuration for debugging. - logging.basicConfig( - format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", - datefmt="%m/%d/%Y %H:%M:%S", - level=logging.INFO, - ) - logger.info(accelerator.state, main_process_only=False) - if accelerator.is_local_main_process: - datasets.utils.logging.set_verbosity_warning() - transformers.utils.logging.set_verbosity_info() - else: - datasets.utils.logging.set_verbosity_error() - transformers.utils.logging.set_verbosity_error() - - # If passed along, set the training seed now. - if args.seed is not None: - set_seed(args.seed) - - # Handle the repository creation - if accelerator.is_main_process: - if args.push_to_hub: - if args.hub_model_id is None: - repo_name = get_full_repo_name(Path(args.output_dir).name, token=args.hub_token) - else: - repo_name = args.hub_model_id - repo = Repository(args.output_dir, clone_from=repo_name) - - with open(os.path.join(args.output_dir, ".gitignore"), "w+") as gitignore: - if "step_*" not in gitignore: - gitignore.write("step_*\n") - if "epoch_*" not in gitignore: - gitignore.write("epoch_*\n") - elif args.output_dir is not None: - os.makedirs(args.output_dir, exist_ok=True) - accelerator.wait_for_everyone() - - script_path = os.path.split(os.path.abspath(__file__))[0] - - # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) - # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ - # (the dataset will be downloaded automatically from the datasets Hub). - # - # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called - # 'text' is found. You can easily tweak this behavior (see below). - # - # In distributed training, the load_dataset function guarantee that only one local process can concurrently - # download the dataset. - if args.dataset_name is not None: - # Downloading and loading a dataset from the hub. - raw_datasets = load_dataset(args.dataset_name, args.dataset_config_name, cache_dir=args.cache_dir) - else: - data_files = {} - if args.train_file is not None: - data_files["train"] = args.train_file - extension = args.train_file.split(".")[-1] - - if args.validation_file is not None: - data_files["dev"] = args.validation_file - extension = args.validation_file.split(".")[-1] - if args.test_file is not None: - data_files["test"] = args.test_file - extension = args.test_file.split(".")[-1] - # datasets = load_dataset(extension, data_files=data_files, field="data", cache_dir=model_args.cache_dir) - raw_datasets = load_dataset(os.path.join(script_path, "squad.py"), data_files=data_files, cache_dir=args.cache_dir) - - # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at - # https://huggingface.co/docs/datasets/loading_datasets.html. - - # Load pretrained model and tokenizer - # - # Distributed training: - # The .from_pretrained methods guarantee that only one local process can concurrently - # download model & vocab. - config = AutoConfig.from_pretrained( - args.config_name if args.config_name else args.model_name_or_path, - cache_dir=args.cache_dir, - revision=args.model_revision, - use_auth_token=True if args.use_auth_token else None, - ) - tokenizer = AutoTokenizer.from_pretrained( - args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, - cache_dir=args.cache_dir, - use_fast=True, - revision=args.model_revision, - use_auth_token=True if args.use_auth_token else None, - ) - - # local py module - from modeling_longformer import LongformerForQuestionAnswering - model_class = LongformerForQuestionAnswering - - if args.distill_loss_weight > 0: - teacher_path = args.teacher_model_name_or_path - if teacher_path is None: - teacher_path = args.model_name_or_path - teacher_model = model_class.from_pretrained( - teacher_path, - from_tf=bool(".ckpt" in args.model_name_or_path), - config=config, - ) - - if args.model_name_or_path: - model = model_class.from_pretrained( - args.model_name_or_path, - from_tf=bool(".ckpt" in args.model_name_or_path), - config=config, - cache_dir=args.cache_dir, - revision=args.model_revision, - use_auth_token=True if args.use_auth_token else None, - ) - else: - logger.info("Training new model from scratch") - model = model_class.from_config(config) - - # Preprocessing the datasets. - # Preprocessing is slightly different for training and evaluation. - if args.do_train: - column_names = raw_datasets["train"].column_names - elif args.do_eval: - column_names = raw_datasets["validation"].column_names - else: - column_names = raw_datasets["test"].column_names - question_column_name = "question" if "question" in column_names else column_names[0] - context_column_name = "context" if "context" in column_names else column_names[1] - answer_column_name = "answers" if "answers" in column_names else column_names[2] - - # Padding side determines if we do (question|context) or (context|question). - # pad_on_right = tokenizer.padding_side == "right" - # max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length) - max_seq_length = args.max_seq_length - - # preprocess context and answers - def preprocess_context(examples): - new_examples = {} - - def is_whitespace(c): - if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F: - return True - return False - - def pre_tokenize(p): - doc_tokens = [] - char_to_word_offset = [] - prev_is_whitespace = True - for c in p: - if is_whitespace(c): - prev_is_whitespace = True - else: - if prev_is_whitespace: - doc_tokens.append(c) - else: - doc_tokens[-1] += c - prev_is_whitespace = False - char_to_word_offset.append(len(doc_tokens) - 1) - - return ' '.join(doc_tokens), char_to_word_offset - - new_examples[context_column_name] = [] - new_examples["answer_spans"] = [] - for i, p in enumerate(examples[context_column_name]): - tokenized_p, char_to_word_offset = pre_tokenize(p) - new_examples[context_column_name].append(tokenized_p) - - answer_spans = [] - for orig_answer_text, answer_offset in zip(examples[answer_column_name][i]['text'], examples[answer_column_name][i]['answer_start']): - answer_length = len(orig_answer_text) - try: - start_position = char_to_word_offset[answer_offset] - end_position = char_to_word_offset[answer_offset + answer_length - 1] - token_ids = tokenizer.encode(orig_answer_text) - except RuntimeError: - logger.info(f'Reading example {idx} failed') - start_position = 0 - end_position = 0 - answer_spans.append({'start': start_position, 'end': end_position, - 'text': orig_answer_text, 'token_ids': token_ids}) - new_examples["answer_spans"].append(answer_spans) - - for key in examples: - if key != context_column_name: - new_examples[key] = examples[key] - return new_examples - - # preprocessing - def prepare_features(examples, max_question_len=55, max_doc_len=4096, max_num_answers=64, ignore_seq_with_no_answers=False, mode="eval"): - - tokenized_examples = {} - tokenized_examples["input_ids"] = [] - tokenized_examples["attention_mask"] = [] - if mode == "train": - tokenized_examples["start_positions"] = [] - tokenized_examples["end_positions"] = [] - elif mode == "eval": - tokenized_examples["example_id"] = [] - else: - raise NotImplementedError("not implemented yet.") - - # not use for roberta - #tokenized_examples["token_type_ids"] = [] - - # Some of the questions have lots of whitespace on the left, which is not useful and will make the - # truncation of the context fail (the tokenized question will take a lots of space). So we remove that - # left whitespace - examples[question_column_name] = [q.lstrip() for q in examples[question_column_name]] - - for example_index in range(len(examples[question_column_name])): - question_text = examples[question_column_name][example_index] - query_tokens = tokenizer.tokenize(question_text) - query_tokens = query_tokens[:max_question_len] - doc_tokens = examples[context_column_name][example_index].split(" ") - answer_spans = examples["answer_spans"][example_index] - tok_to_orig_index = [] - orig_to_tok_index = [] - all_doc_tokens = [] - for (i, token) in enumerate(doc_tokens): - orig_to_tok_index.append(len(all_doc_tokens)) - sub_tokens = tokenizer.tokenize(f'. {token}')[1:] if i > 0 else tokenizer.tokenize(token) - for sub_token in sub_tokens: - tok_to_orig_index.append(i) - all_doc_tokens.append(sub_token) - all_doc_tokens = all_doc_tokens[:max_doc_len] - # The -3 accounts for , and - max_tokens_per_doc_slice = max_seq_length - len(query_tokens) - 3 - assert max_tokens_per_doc_slice > 0 - - if args.doc_stride < 0: - # negative doc_stride indicates no sliding window, but using first slice - args.doc_stride = -100 * len(all_doc_tokens) # large -ve value for the next loop to execute once - - input_ids_list = [] - input_mask_list = [] - segment_ids_list = [] - start_positions_list = [] - end_positions_list = [] - answer_token_ids_list = [] - - for slice_start in range(0, len(all_doc_tokens), max_tokens_per_doc_slice - args.doc_stride): - slice_end = min(slice_start + max_tokens_per_doc_slice, len(all_doc_tokens)) - doc_slice_tokens = all_doc_tokens[slice_start:slice_end] - tokens = [tokenizer.cls_token] + query_tokens + [tokenizer.sep_token] \ - + doc_slice_tokens + [tokenizer.sep_token] - - # but don't use for roberta - segment_ids = [0] * (len(query_tokens) + 2) + [1] * (len(doc_slice_tokens) + 1) - assert len(segment_ids) == len(tokens) - - input_ids = tokenizer.convert_tokens_to_ids(tokens) - input_mask = [1] * len(input_ids) - - #if data_args.pad_to_max_length: # no need to pad if document is not strided - if False: - # Zero-pad up to the sequence length. - padding_len = max_seq_length - len(input_ids) - input_ids.extend([tokenizer.pad_token_id] * padding_len) - input_mask.extend([0] * padding_len) - segment_ids.extend([0] * padding_len) - - assert len(input_ids) == max_seq_length - assert len(input_mask) == max_seq_length - assert len(segment_ids) == max_seq_length - - doc_offset = len(query_tokens) + 2 - slice_start - - start_positions = [] - end_positions = [] - answer_token_ids = [] - for answer_span in answer_spans: - start_position = answer_span['start'] - end_position = answer_span['end'] - tok_start_position_in_doc = orig_to_tok_index[start_position] - not_end_of_doc = int(end_position + 1 < len(orig_to_tok_index)) - tok_end_position_in_doc = orig_to_tok_index[end_position + not_end_of_doc] - not_end_of_doc - if tok_start_position_in_doc < slice_start or tok_end_position_in_doc > slice_end: - # this answer is outside the current slice - continue - - start_positions.append(tok_start_position_in_doc + doc_offset) - end_positions.append(tok_end_position_in_doc + doc_offset) - answer_token_ids.append(answer_span['token_ids']) - - assert len(start_positions) == len(end_positions) - if ignore_seq_with_no_answers and len(start_positions) == 0: - continue - - # answers from start_positions and end_positions if > self.max_num_answers - start_positions = start_positions[:max_num_answers] - end_positions = end_positions[:max_num_answers] - answer_token_ids = answer_token_ids[:max_num_answers] - - # -1 padding up to self.max_num_answers - # -1 means empty answer in last token, while normal squad in [CLS] token - padding_len = max_num_answers - len(start_positions) - start_positions.extend([-1] * padding_len) - end_positions.extend([-1] * padding_len) - answer_token_ids.extend([[]] * padding_len) - - # replace duplicate start/end positions with `-1` because duplicates can result into -ve loss values - found_start_positions = set() - found_end_positions = set() - found_answer_token_ids = set() - for i, (start_position, end_position, answer_tokens) in enumerate( - zip(start_positions, end_positions, answer_token_ids) - ): - if start_position in found_start_positions: - start_positions[i] = -1 - if end_position in found_end_positions: - end_positions[i] = -1 - answer_tokens_as_str = ','.join([str(x) for x in answer_tokens]) - if answer_tokens_as_str in found_answer_token_ids: - answer_token_ids[i] = [] - - found_start_positions.add(start_position) - found_end_positions.add(end_position) - found_answer_token_ids.add(answer_tokens_as_str) - - input_ids_list.append(input_ids) - input_mask_list.append(input_mask) - segment_ids_list.append(segment_ids) - start_positions_list.append(start_positions) - end_positions_list.append(end_positions) - answer_token_ids_list.append(answer_token_ids) - - # pad answers in answer_token_ids_list to the longest answer - max_answer_len = max([len(item) for sublist in answer_token_ids_list for item in sublist]) # flat list - if max_answer_len == 0: - max_answer_len = 2 - for answers_of_one_slice in answer_token_ids_list: - for answer_tokens in answers_of_one_slice: - if len(answer_tokens) == 0: - # TODO: or ? - padding_len = max_answer_len - len(answer_tokens) - 2 - answer_tokens.extend([tokenizer.bos_token_id, tokenizer.eos_token_id] + - ([tokenizer.pad_token_id] * padding_len)) - else: - padding_len = max_answer_len - len(answer_tokens) - answer_tokens.extend([tokenizer.pad_token_id] * padding_len) - - - tokenized_examples["input_ids"].extend(input_ids_list) - tokenized_examples["attention_mask"].extend(input_mask_list) - - if mode == "train": - # only one answer used for training - #tokenized_examples["start_positions"].extend([each[0] for each in start_positions_list]) - #tokenized_examples["end_positions"].extend([each[0] for each in end_positions_list]) - tokenized_examples["start_positions"].append(start_positions_list[0]) - tokenized_examples["end_positions"].append(end_positions_list[0]) - elif mode == "eval": - tokenized_examples["example_id"].append(examples["id"][example_index]) - - return tokenized_examples - - prepare_train_features = partial(prepare_features, mode="train") - if args.do_train: - if "train" not in raw_datasets: - raise ValueError("--do_train requires a train dataset") - train_dataset = raw_datasets["train"] - if args.max_train_samples is not None: - # We will select sample from whole data if augment is specified - train_dataset = train_dataset.select(range(args.max_train_samples)) - with accelerator.main_process_first(): - # preprocess - train_dataset = train_dataset.map( - preprocess_context, - batched=True, - num_proc=args.preprocessing_num_workers, - remove_columns=column_names, - load_from_cache_file=not args.overwrite_cache, - ) - - # Create train feature from dataset - train_dataset = train_dataset.map( - prepare_train_features, - batched=True, - num_proc=args.preprocessing_num_workers, - remove_columns=column_names + ["answer_spans"], - load_from_cache_file=not args.overwrite_cache, - ) - if args.max_train_samples is not None: - # Number of samples might increase during Feature Creation, We select only specified max samples - train_dataset = train_dataset.select(range(args.max_train_samples)) - - prepare_validation_features = partial(prepare_features, mode="eval") - - if args.do_eval: - if "validation" not in raw_datasets: - raise ValueError("--do_eval requires a validation dataset") - eval_examples = raw_datasets["validation"] - if args.max_eval_samples is not None: - # We will select sample from whole data - eval_examples = eval_examples.select(range(args.max_eval_samples)) - with accelerator.main_process_first(): - # preprocess - eval_examples = eval_examples.map( - preprocess_context, - batched=True, - num_proc=args.preprocessing_num_workers, - remove_columns=column_names, - load_from_cache_file=not args.overwrite_cache, - ) - # Validation Feature Creation - eval_dataset = eval_examples.map( - prepare_validation_features, - batched=True, - num_proc=args.preprocessing_num_workers, - remove_columns=column_names, - load_from_cache_file=not args.overwrite_cache, - ) - - if args.max_eval_samples is not None: - # During Feature creation dataset samples might increase, we will select required samples again - eval_dataset = eval_dataset.select(range(args.max_eval_samples)) - - - # DataLoaders creation: - if args.pad_to_max_length: - # If padding was already done ot max length, we use the default data collator that will just convert everything - # to tensors. - data_collator = default_data_collator - else: - # Otherwise, `DataCollatorWithPadding` will apply dynamic padding for us (by padding to the maximum length of - # the samples passed). When using mixed precision, we add `pad_to_multiple_of=8` to pad all tensors to multiple - # of 8s, which will enable the use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta). - data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=(8 if accelerator.use_fp16 else None)) - - train_dataloader = DataLoader( - train_dataset, shuffle=True, collate_fn=data_collator, batch_size=args.per_device_train_batch_size - ) - - eval_dataset_for_model = eval_dataset.remove_columns(["example_id", "answer_spans"]) - eval_dataloader = DataLoader( - eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size - ) - - # Post-processing: - def post_processing_function(examples, features, predictions, stage="eval"): - # Post-processing: we match the start logits and end logits to answers in the original context. - predictions = postprocess_qa_predictions( - examples=examples, - features=features, - predictions=predictions, - tokenizer=tokenizer, - version_2_with_negative=args.version_2_with_negative, - n_best_size=args.n_best_size, - max_answer_length=args.max_answer_length, - null_score_diff_threshold=args.null_score_diff_threshold, - output_dir=args.output_dir, - prefix=stage, - ) - # Format the result to the format the metric expects. - if args.version_2_with_negative: - formatted_predictions = [ - {"id": k, "prediction_text": v, "no_answer_probability": 0.0} for k, v in predictions.items() - ] - else: - formatted_predictions = [{"id": k, "prediction_text": v} for k, v in predictions.items()] - - references = [{"id": ex["id"], "answers": ex[answer_column_name], "aliases": ex["aliases"]} for ex in examples] - - return EvalPrediction(predictions=predictions, label_ids=references) - - # Create and fill numpy array of size len_of_validation_data * max_length_of_output_tensor - def create_and_fill_np_array(start_or_end_logits, dataset, max_len): - """ - Create and fill numpy array of size len_of_validation_data * max_length_of_output_tensor - Args: - start_or_end_logits(:obj:`tensor`): - This is the output predictions of the model. We can only enter either start or end logits. - eval_dataset: Evaluation dataset - max_len(:obj:`int`): - The maximum length of the output tensor. ( See the model.eval() part for more details ) - """ - - step = 0 - # create a numpy array and fill it with -100. - logits_concat = np.full((len(dataset), max_len), -100, dtype=np.float64) - # Now since we have create an array now we will populate it with the outputs gathered using accelerator.gather - for i, output_logit in enumerate(start_or_end_logits): # populate columns - # We have to fill it such that we have to take the whole tensor and replace it on the newly created array - # And after every iteration we have to change the step - - batch_size = output_logit.shape[0] - cols = output_logit.shape[1] - - if step + batch_size < len(dataset): - logits_concat[step: step + batch_size, :cols] = output_logit - else: - logits_concat[step:, :cols] = output_logit[: len(dataset) - step] - - step += batch_size - - return logits_concat - - # Optimizer - # Split weights in two groups, one with weight decay and the other not. - no_decay = ["bias", "LayerNorm.weight"] - no_decay_outputs = ["bias", "LayerNorm.weight", "qa_outputs"] - optimizer_grouped_parameters = [ - { - "params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], - "weight_decay": args.weight_decay, - }, - { - "params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], - "weight_decay": 0.0, - }, - ] - if args.do_prune: - optimizer = torch.optim.AdamW(optimizer_grouped_parameters, lr=args.learning_rate, betas=[0.9, 0.9]) - else: - optimizer = torch.optim.AdamW(optimizer_grouped_parameters, lr=args.learning_rate) - - # Scheduler and math around the number of training steps. - overrode_max_train_steps = False - num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) - if args.max_train_steps is None: - args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch - overrode_max_train_steps = True - - lr_scheduler = get_scheduler( - name=args.lr_scheduler_type, - optimizer=optimizer, - num_warmup_steps=args.num_warmup_steps, - num_training_steps=args.max_train_steps, - ) - - if args.distill_loss_weight > 0: - teacher_model, model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( - teacher_model, model, optimizer, train_dataloader, eval_dataloader, lr_scheduler - ) - teacher_model.eval() - else: - # Prepare everything with our `accelerator`. - model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( - model, optimizer, train_dataloader, eval_dataloader, lr_scheduler - ) - # We need to recalculate our total training steps as the size of the training dataloader may have changed. - num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) - if overrode_max_train_steps: - args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch - # Afterwards we recalculate our number of training epochs - args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) - - # Figure out how many steps we should save the Accelerator states - if hasattr(args.checkpointing_steps, "isdigit"): - checkpointing_steps = args.checkpointing_steps - if args.checkpointing_steps.isdigit(): - checkpointing_steps = int(args.checkpointing_steps) - else: - checkpointing_steps = None - - # We need to initialize the trackers we use, and also store our configuration. - # We initialize the trackers only on main process because `accelerator.log` - # only logs on main process and we don't want empty logs/runs on other processes. - if args.with_tracking: - if accelerator.is_main_process: - experiment_config = vars(args) - # TensorBoard cannot log Enums, need the raw value - experiment_config["lr_scheduler_type"] = experiment_config["lr_scheduler_type"].value - accelerator.init_trackers("qa_no_trainer", experiment_config) - - # Train! - total_batch_size = args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps - - logger.info("***** Running training *****") - logger.info(f" Num examples = {len(train_dataset)}") - logger.info(f" Num Epochs = {args.num_train_epochs}") - logger.info(f" Instantaneous batch size per device = {args.per_device_train_batch_size}") - logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") - logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") - logger.info(f" Total optimization steps = {args.max_train_steps}") - - # Only show the progress bar once on each machine. - progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process) - completed_steps = 0 - starting_epoch = 0 - - # Potentially load in the weights and states from a previous save - if args.resume_from_checkpoint: - if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "": - accelerator.print(f"Resumed from checkpoint: {args.resume_from_checkpoint}") - accelerator.load_state(args.resume_from_checkpoint) - path = os.path.basename(args.resume_from_checkpoint) - else: - # Get the most recent checkpoint - dirs = [f.name for f in os.scandir(os.getcwd()) if f.is_dir()] - dirs.sort(key=os.path.getctime) - path = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last - # Extract `epoch_{i}` or `step_{i}` - training_difference = os.path.splitext(path)[0] - - if "epoch" in training_difference: - starting_epoch = int(training_difference.replace("epoch_", "")) + 1 - resume_step = None - else: - resume_step = int(training_difference.replace("step_", "")) - starting_epoch = resume_step // len(train_dataloader) - resume_step -= starting_epoch * len(train_dataloader) - - # Pruning preparation - num_iterations = len(train_dataset) / total_batch_size - num_warm = int(args.warm_epochs * num_iterations) + args.num_warmup_steps - total_iterations = int(num_iterations * (args.num_train_epochs - args.cooldown_epochs)) - frequency = int((total_iterations - num_warm + 1) / 40) if args.pruning_frequency == -1 \ - else args.pruning_frequency - - pruning_start = num_warm - pruning_end = total_iterations - if not args.do_prune: - pruning_start = num_iterations * args.num_train_epochs + 1 - pruning_end = pruning_start - - pruning_configs=[ - { - "pruning_type": "snip_momentum", - "pruning_scope": "global", - "sparsity_decay_type": "exp", - "excluded_op_names": ["qa_outputs", "pooler", ".*embeddings*"], - "pruning_op_types": ["Linear"], - "max_sparsity_ratio_per_op": 0.98 - } - ] - - configs = WeightPruningConfig( - pruning_configs, - pruning_scope=args.pruning_scope, - target_sparsity=args.target_sparsity, - pattern=args.pruning_pattern, - pruning_frequency=frequency, - start_step=pruning_start, - end_step=pruning_end - ) - - compression_manager = prepare_compression(model=model, confs=configs) - compression_manager.callbacks.on_train_begin() - model = compression_manager.model - - - for epoch in range(starting_epoch, args.num_train_epochs): - model.train() - if epoch >= args.warm_epochs: - if args.with_tracking: - total_loss = 0 - for step, batch in enumerate(train_dataloader): - compression_manager.callbacks.on_step_begin(step) - - outputs = model(**batch) - loss = outputs.loss - # We keep track of the loss at each epoch - if args.with_tracking: - total_loss += loss.detach().float() - if args.distill_loss_weight > 0: - distill_loss_weight = args.distill_loss_weight - with torch.no_grad(): - teacher_outputs = teacher_model(**batch) - loss = (distill_loss_weight) / 2 * get_loss_one_logit(outputs['start_logits'], - teacher_outputs['start_logits']) \ - + (distill_loss_weight) / 2 * get_loss_one_logit(outputs['end_logits'], - teacher_outputs['end_logits']) - loss = loss / args.gradient_accumulation_steps - accelerator.backward(loss) - - if step % args.gradient_accumulation_steps == 0 or step == len(train_dataloader) - 1: - compression_manager.callbacks.on_before_optimizer_step() - optimizer.step() - compression_manager.callbacks.on_after_optimizer_step() - lr_scheduler.step() - optimizer.zero_grad() - progress_bar.update(1) - completed_steps += 1 - - - if isinstance(checkpointing_steps, int): - if completed_steps % checkpointing_steps == 0: - output_dir = f"step_{completed_steps}" - if args.output_dir is not None: - output_dir = os.path.join(args.output_dir, output_dir) - accelerator.save_state(output_dir) - - if completed_steps >= args.max_train_steps: - break - else: - for step, batch in enumerate(train_dataloader): - outputs = model(**batch) - loss = outputs.loss - loss = loss / args.gradient_accumulation_steps - accelerator.backward(loss) - if step % args.gradient_accumulation_steps == 0 or step == len(train_dataloader) - 1: - optimizer.step() - lr_scheduler.step() - optimizer.zero_grad() - progress_bar.update(1) - completed_steps += 1 - - if completed_steps >= args.max_train_steps: - break - - if args.checkpointing_steps == "epoch": - output_dir = f"epoch_{epoch}" - if args.output_dir is not None: - output_dir = os.path.join(args.output_dir, output_dir) - accelerator.save_state(output_dir) - - if args.push_to_hub and epoch < args.num_train_epochs - 1: - accelerator.wait_for_everyone() - unwrapped_model = accelerator.unwrap_model(model) - unwrapped_model.save_pretrained( - args.output_dir, is_main_process=accelerator.is_main_process, save_function=accelerator.save - ) - if accelerator.is_main_process: - tokenizer.save_pretrained(args.output_dir) - repo.push_to_hub( - commit_message=f"Training in progress epoch {epoch}", blocking=False, auto_lfs_prune=True - ) - - # eval each epoch - logger.info(f"***** Running Evaluation*****") - all_start_logits = [] - all_end_logits = [] - - # pruner.on_before_eval() - model.eval() - for step, batch in enumerate(eval_dataloader): - with torch.no_grad(): - outputs = model(**batch) - start_logits = outputs.start_logits - end_logits = outputs.end_logits - - if not args.pad_to_max_length: # necessary to pad predictions and labels for being gathered - start_logits = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100) - end_logits = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100) - - all_start_logits.append(accelerator.gather(start_logits).cpu().numpy()) - all_end_logits.append(accelerator.gather(end_logits).cpu().numpy()) - - max_len = max([x.shape[1] for x in all_start_logits]) # Get the max_length of the tensor - # pruner.on_after_eval() - - # concatenate the numpy array - start_logits_concat = create_and_fill_np_array(all_start_logits, eval_dataset, max_len) - end_logits_concat = create_and_fill_np_array(all_end_logits, eval_dataset, max_len) - - # delete the list of numpy arrays - del all_start_logits - del all_end_logits - - outputs_numpy = (start_logits_concat, end_logits_concat) - eval_preds = post_processing_function(eval_examples, eval_dataset, outputs_numpy) - - metrics = utils_qa.evaluate_triviaqa(eval_preds.label_ids, eval_preds.predictions) - logger.info(metrics) - - - if args.output_dir is not None: - accelerator.wait_for_everyone() - unwrapped_model = accelerator.unwrap_model(model.model) - unwrapped_model.save_pretrained( - args.output_dir + f"eph{args.num_train_epochs}_lr{args.learning_rate}_bs{total_batch_size}", - is_main_process=accelerator.is_main_process, save_function=accelerator.save - ) - if accelerator.is_main_process: - tokenizer.save_pretrained(args.output_dir) - if args.push_to_hub: - repo.push_to_hub(commit_message="End of training", auto_lfs_prune=True) - - logger.info(json.dumps(metrics, indent=4)) - save_prefixed_metrics(metrics, args.output_dir) - - -def _mp_fn(index): - # For xla_spawn (TPUs) - main() - - -if __name__ == "__main__": - main() diff --git a/examples/huggingface/pytorch/question-answering/pruning/longformer_triviaqa/scripts/download_data_and_convert.sh b/examples/huggingface/pytorch/question-answering/pruning/longformer_triviaqa/scripts/download_data_and_convert.sh deleted file mode 100644 index f0d7d0f3fa4..00000000000 --- a/examples/huggingface/pytorch/question-answering/pruning/longformer_triviaqa/scripts/download_data_and_convert.sh +++ /dev/null @@ -1,19 +0,0 @@ -# from http://nlp.cs.washington.edu/triviaqa/ and https://github.com/mandarjoshi90/triviaqa -wget http://nlp.cs.washington.edu/triviaqa/data/triviaqa-rc.tar.gz - -tar -xvzf triviaqa-rc.tar.gz - -# the blow codes from the original paper code: https://github.com/allenai/longformer -python -m utils.convert_to_squad_format \ - --triviaqa_file ./qa/wikipedia-train.json \ - --wikipedia_dir ./evidence/wikipedia/ \ - --web_dir ./evidence/web/ \ - --max_num_tokens 4096 \ - --squad_file squad-wikipedia-train-4096.json - -python utils.convert_to_squad_format \ - --triviaqa_file ./qa/wikipedia-dev.json \ - --wikipedia_dir ./evidence/wikipedia/ \ - --web_dir ./evidence/web/ \ - --max_num_tokens 4096 \ - --squad_file squad-wikipedia-dev-4096.json diff --git a/examples/huggingface/pytorch/question-answering/pruning/longformer_triviaqa/scripts/longformer_base_dense_fintune.sh b/examples/huggingface/pytorch/question-answering/pruning/longformer_triviaqa/scripts/longformer_base_dense_fintune.sh deleted file mode 100644 index ce21e329c16..00000000000 --- a/examples/huggingface/pytorch/question-answering/pruning/longformer_triviaqa/scripts/longformer_base_dense_fintune.sh +++ /dev/null @@ -1,23 +0,0 @@ -#!/bin/bash -set -x - -train_file=./squad-wikipedia-train-4096.json -validation_file=./squad-wikipedia-dev-4096.json -pretrained_model=allenai/longformer-base-4096 - -accelerate launch --main_process_port 29245 run_qa_no_trainer.py \ - --model_name_or_path $pretrained_model \ - --do_train \ - --do_eval \ - --train_file $train_file \ - --validation_file $validation_file \ - --cache_dir ./tmp_cached \ - --max_seq_length 4096 \ - --doc_stride -1 \ - --per_device_train_batch_size 1 \ - --gradient_accumulation_steps 16 \ - --per_device_eval_batch_size 1 \ - --num_warmup_steps 1000 \ - --learning_rate 3.5e-5 \ - --num_train_epochs 4 \ - --output_dir longformer-base-4096-dense-baseline diff --git a/examples/huggingface/pytorch/question-answering/pruning/longformer_triviaqa/scripts/longformer_base_sparse_global_4x1_pruning.sh b/examples/huggingface/pytorch/question-answering/pruning/longformer_triviaqa/scripts/longformer_base_sparse_global_4x1_pruning.sh deleted file mode 100644 index 3c08207aa62..00000000000 --- a/examples/huggingface/pytorch/question-answering/pruning/longformer_triviaqa/scripts/longformer_base_sparse_global_4x1_pruning.sh +++ /dev/null @@ -1,32 +0,0 @@ -#!/bin/bash -set -x - -train_file=./squad-wikipedia-train-4096.json -validation_file=./squad-wikipedia-dev-4096.json -teacher_model=Intel/longformer-base-4096-finetuned-triviaqa - -accelerate launch --main_process_port 29745 run_qa_no_trainer.py \ - --model_name_or_path $teacher_model \ - --do_train \ - --do_eval \ - --train_file $train_file \ - --validation_file $validation_file \ - --cache_dir ./tmp_cached \ - --max_seq_length 4096 \ - --doc_stride -1 \ - --per_device_train_batch_size 1 \ - --gradient_accumulation_steps 8 \ - --per_device_eval_batch_size 1 \ - --num_warmup_steps 1000 \ - --do_prune \ - --target_sparsity 0.8 \ - --pruning_scope "global" \ - --pruning_pattern "4x1" \ - --pruning_frequency 1000 \ - --cooldown_epochs 10 \ - --learning_rate 1e-4 \ - --num_train_epochs 18 \ - --weight_decay 0.01 \ - --output_dir longformer-base-4096-pruned-global-sparse80 \ - --teacher_model_name_or_path $teacher_model \ - --distill_loss_weight 3 diff --git a/examples/huggingface/pytorch/question-answering/pruning/longformer_triviaqa/squad.py b/examples/huggingface/pytorch/question-answering/pruning/longformer_triviaqa/squad.py deleted file mode 100644 index b9a2847449d..00000000000 --- a/examples/huggingface/pytorch/question-answering/pruning/longformer_triviaqa/squad.py +++ /dev/null @@ -1,144 +0,0 @@ -# coding=utf-8 -# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Lint as: python3 -"""SQUAD: The Stanford Question Answering Dataset.""" - - -import json - -import datasets -from datasets.tasks import QuestionAnsweringExtractive - - -logger = datasets.logging.get_logger(__name__) - - -_CITATION = """\ -@article{2016arXiv160605250R, - author = {{Rajpurkar}, Pranav and {Zhang}, Jian and {Lopyrev}, - Konstantin and {Liang}, Percy}, - title = "{SQuAD: 100,000+ Questions for Machine Comprehension of Text}", - journal = {arXiv e-prints}, - year = 2016, - eid = {arXiv:1606.05250}, - pages = {arXiv:1606.05250}, -archivePrefix = {arXiv}, - eprint = {1606.05250}, -} -""" - -_DESCRIPTION = """\ -Stanford Question Answering Dataset (SQuAD) is a reading comprehension \ -dataset, consisting of questions posed by crowdworkers on a set of Wikipedia \ -articles, where the answer to every question is a segment of text, or span, \ -from the corresponding reading passage, or the question might be unanswerable. -""" - -_URL = "https://rajpurkar.github.io/SQuAD-explorer/dataset/" -_URLS = { - "train": _URL + "train-v1.1.json", - "dev": _URL + "dev-v1.1.json", -} - - -class SquadConfig(datasets.BuilderConfig): - """BuilderConfig for SQUAD.""" - - def __init__(self, **kwargs): - """BuilderConfig for SQUAD. - Args: - **kwargs: keyword arguments forwarded to super. - """ - super(SquadConfig, self).__init__(**kwargs) - - -class Squad(datasets.GeneratorBasedBuilder): - """SQUAD: The Stanford Question Answering Dataset. Version 1.1.""" - - BUILDER_CONFIGS = [ - SquadConfig( - name="plain_text", - version=datasets.Version("1.0.0", ""), - description="Plain text", - ), - ] - print(BUILDER_CONFIGS) - - def _info(self): - return datasets.DatasetInfo( - description=_DESCRIPTION, - features=datasets.Features( - { - "id": datasets.Value("string"), - "title": datasets.Value("string"), - "context": datasets.Value("string"), - "question": datasets.Value("string"), - "answers": datasets.features.Sequence( - { - "text": datasets.Value("string"), - "answer_start": datasets.Value("int32"), - } - ), - "aliases": datasets.features.Sequence(datasets.Value("string")), - } - ), - # No default supervised_keys (as we have to pass both question - # and context as input). - supervised_keys=None, - homepage="https://rajpurkar.github.io/SQuAD-explorer/", - citation=_CITATION, - task_templates=[ - QuestionAnsweringExtractive( - question_column="question", context_column="context", answers_column="answers" - ) - ], - ) - - def _split_generators(self, dl_manager): - #downloaded_files = dl_manager.download_and_extract(_URLS) - downloaded_files = self.config.data_files - return [ - datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"][0]}), - datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"][0]}), - ] - - def _generate_examples(self, filepath): - """This function returns the examples in the raw (text) form.""" - logger.info("generating examples from = %s", filepath) - key = 0 - with open(filepath, encoding="utf-8") as f: - squad = json.load(f) - for article in squad["data"]: - title = article.get("title", "") - for paragraph in article["paragraphs"]: - context = paragraph["context"] # do not strip leading blank spaces GH-2585 - for qa in paragraph["qas"]: - answer_starts = [answer["answer_start"] for answer in qa["answers"]] - answers = [answer["text"] for answer in qa["answers"]] - # Features currently used are "context", "question", and "answers". - # Others are extracted here for the ease of future expansions. - yield key, { - "title": title, - "context": context, - "question": qa["question"], - "id": qa["id"].split('--')[0], - "answers": { - "answer_start": answer_starts, - "text": answers, - }, - "aliases": qa["aliases"] if qa.get("aliases") is not None else [], - } - key += 1 diff --git a/examples/huggingface/pytorch/question-answering/pruning/longformer_triviaqa/trainer_qa.py b/examples/huggingface/pytorch/question-answering/pruning/longformer_triviaqa/trainer_qa.py deleted file mode 100644 index af237521e8a..00000000000 --- a/examples/huggingface/pytorch/question-answering/pruning/longformer_triviaqa/trainer_qa.py +++ /dev/null @@ -1,150 +0,0 @@ -# coding=utf-8 - -# Apache v2 license -# Copyright (C) 2021 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -# Copyright 2020 The HuggingFace Team All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -A subclass of `Trainer` specific to Question-Answering tasks -""" -""" -This script is based on HuggingFace/transformers example: https://github.com/huggingface/transformers/blob/v4.6.1/examples/pytorch/question-answering/trainer_qa.py -""" - -from transformers import Trainer, is_torch_tpu_available -from transformers.trainer_utils import PredictionOutput -import utils_qa -import collections -from collections import defaultdict -import numpy as np -import torch -import json - - -if is_torch_tpu_available(): - import torch_xla.core.xla_model as xm - import torch_xla.debug.metrics as met - - -class QuestionAnsweringTrainer(Trainer): - def __init__(self, *args, eval_examples=None, post_process_function=None, **kwargs): - super().__init__(*args, **kwargs) - self.eval_examples = eval_examples - self.post_process_function = post_process_function - - def evaluate(self, eval_dataset=None, eval_examples=None, ignore_keys=None): - eval_dataset = self.eval_dataset if eval_dataset is None else eval_dataset - eval_dataloader = self.get_eval_dataloader(eval_dataset) - eval_examples = self.eval_examples if eval_examples is None else eval_examples - - # Temporarily disable metric computation, we will do it in the loop here. - compute_metrics = self.compute_metrics - self.compute_metrics = None - eval_loop = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop - try: - output = eval_loop( - eval_dataloader, - description="Evaluation", - # No point gathering the predictions if there are no metrics, otherwise we defer to - # self.args.prediction_loss_only - prediction_loss_only=None, - ignore_keys=ignore_keys, - ) - finally: - self.compute_metrics = compute_metrics - - if self.post_process_function is not None and self.compute_metrics is None: - eval_preds = self.post_process_function(eval_examples, eval_dataset, output.predictions) - metrics = utils_qa.evaluate_triviaqa(eval_preds.label_ids, eval_preds.predictions) - #metrics = self.compute_metrics(eval_preds) - - #self.log(metrics) - else: - metrics = {} - - #if self.args.tpu_metrics_debug or self.args.debug: - # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) - # xm.master_print(met.metrics_report()) - - #self.control = self.callback_handler.on_evaluate(self.args, self.state, self.control, metrics) - return metrics - - def predict(self, predict_dataset, predict_examples, ignore_keys=None, n_best_size=20, max_answer_length=30): - predict_dataloader = self.get_test_dataloader(predict_dataset) - - # Temporarily disable metric computation, we will do it in the loop here. - compute_metrics = self.compute_metrics - self.compute_metrics = None - eval_loop = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop - output = eval_loop( - predict_dataloader, - description="Prediction", - # No point gathering the predictions if there are no metrics, otherwise we defer to - # self.args.prediction_loss_only - prediction_loss_only=None, - ignore_keys=ignore_keys, - ) - - all_start_logits, all_end_logits = output.predictions - - all_predictions = collections.OrderedDict() - - qa_with_duplicates = defaultdict(list) - - for example_index, example in enumerate(predict_examples): - input_ids = torch.tensor([predict_dataset[example_index]["input_ids"]]) - qid = predict_dataset[example_index]["example_id"] - - eos_token_indices = (input_ids == self.tokenizer.eos_token_id).nonzero() - question_end_index = eos_token_indices.view(input_ids.size(0), 2, 2)[:, 0, 1] - start_logits = all_start_logits[example_index] - end_logits = all_end_logits[example_index] - start_indexes = np.argsort(start_logits)[-1 : -n_best_size - 1 : -1].tolist() - end_indexes = np.argsort(end_logits)[-1 : -n_best_size - 1 : -1].tolist() - potential_answers = [] - for start_index in start_indexes: - for end_index in end_indexes: - if start_index <= question_end_index[0]: - continue - if end_index <= question_end_index[0]: - continue - if start_index > end_index: - continue - answer_len = end_index - start_index + 1 - if answer_len > max_answer_length: - continue - potential_answers.append({'start': start_index, 'end': end_index, - 'start_logit': start_logits[start_index].item(), - 'end_logit': end_logits[end_index].item()}) - sorted_answers = sorted(potential_answers, key=lambda x: (x['start_logit'] + x['end_logit']), reverse=True) - if len(sorted_answers) == 0: - answer = {'text': 'NoAnswerFound', 'score': -1000000} - else: - answer = sorted_answers[0] - answer_token_ids = input_ids[0, answer['start']: answer['end'] + 1] - answer_tokens = self.tokenizer.convert_ids_to_tokens(answer_token_ids.tolist()) - text = self.tokenizer.convert_tokens_to_string(answer_tokens) - score = answer['start_logit'] + answer['end_logit'] - answer = {'text': text, 'score': score} - qa_with_duplicates[qid].append({'answer_score': answer['score'], 'answer_text': answer['text'], }) - - qid_to_answer_text = {} - for qid, answer_metrics in qa_with_duplicates.items(): - top_answer = sorted(answer_metrics, key=lambda x: x['answer_score'], reverse=True)[0] - qid_to_answer_text[qid] = top_answer['answer_text'] - - with open('predictions.json', 'w') as f: - f.write(json.dumps(qid_to_answer_text, indent=4) + "\n") diff --git a/examples/huggingface/pytorch/question-answering/pruning/longformer_triviaqa/utils/__init__.py b/examples/huggingface/pytorch/question-answering/pruning/longformer_triviaqa/utils/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/examples/huggingface/pytorch/question-answering/pruning/longformer_triviaqa/utils/convert_to_squad_format.py b/examples/huggingface/pytorch/question-answering/pruning/longformer_triviaqa/utils/convert_to_squad_format.py deleted file mode 100644 index 6279320e045..00000000000 --- a/examples/huggingface/pytorch/question-answering/pruning/longformer_triviaqa/utils/convert_to_squad_format.py +++ /dev/null @@ -1,143 +0,0 @@ -# Copyright 2020 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from . import file_utils -from . import dataset_utils -import os -from tqdm import tqdm -import random -import nltk -import argparse - - -def get_text(qad, domain): - local_file = os.path.join(args.web_dir, qad['Filename']) if domain == 'SearchResults' else os.path.join(args.wikipedia_dir, qad['Filename']) - return file_utils.get_file_contents(local_file, encoding='utf-8') - - -def select_relevant_portion(text): - paras = text.split('\n') - selected = [] - done = False - for para in paras: - # nltk is slow, but we have to use its word tokenizer for the distant supervision matching to work - # TODO: try both see which one works better - # words = para.split() - # extra_words = args.max_num_tokens - len(selected) - # selected.extend(words[:extra_words]) - # if len(selected) >= args.max_num_tokens: - # break - sents = sent_tokenize.tokenize(para) - for sent in sents: - words = nltk.word_tokenize(sent) - for word in words: - selected.append(word) - if len(selected) >= args.max_num_tokens: - done = True - break - if done: - break - if done: - break - selected.append('\n') - st = ' '.join(selected).strip() - return st - - -def add_triple_data(datum, page, domain): - qad = {'Source': domain} - for key in ['QuestionId', 'Question', 'Answer']: - if key == 'Answer' and key not in datum: - qad[key] = {'NormalizedAliases': []} - qid = datum['QuestionId'] - print(f'qid: {qid} does not have an answer.') - else: - qad[key] = datum[key] - for key in page: - qad[key] = page[key] - return qad - - -def get_qad_triples(data): - qad_triples = [] - for datum in data['Data']: - for key in ['EntityPages', 'SearchResults']: - for page in datum.get(key, []): - qad = add_triple_data(datum, page, key) - qad_triples.append(qad) - return qad_triples - - -def convert_to_squad_format(qa_json_file, squad_file): - qa_json = dataset_utils.read_triviaqa_data(qa_json_file) - qad_triples = get_qad_triples(qa_json) - random.seed(args.seed) - random.shuffle(qad_triples) - - data = [] - for qad in tqdm(qad_triples): - qid = qad['QuestionId'] - - text = get_text(qad, qad['Source']) - selected_text = select_relevant_portion(text) - - question = qad['Question'] - para = {'context': selected_text, 'qas': [{'question': question, 'answers': []}]} - data.append({'paragraphs': [para]}) - qa = para['qas'][0] - qa['id'] = dataset_utils.get_question_doc_string(qid, qad['Filename']) - qa['qid'] = qid - - answers_in_doc = dataset_utils.answer_index_in_document(qad['Answer'], selected_text) - qa['answers'] = answers_in_doc - # We want all answers in the document, not just the first answer - # if index == -1: - # if qa_json['Split'] == 'train': - # continue - # else: - # qa['answers'].append({'text': ans_string, 'answer_start': index}) - - # This doesn't fit the squad format, but we need it for evaluation - qa['aliases'] = qad['Answer']['NormalizedAliases'] - - if qa_json['Split'] == 'train' and len(data) >= args.sample_size and qa_json['Domain'] == 'Web': - break - - if len(data) >= args.sample_size: - break - - squad = {'data': data, 'version': qa_json['Version']} - file_utils.write_json_to_file(squad, squad_file) - print('Added', len(data)) - - -def get_args(): - parser = argparse.ArgumentParser() - parser.add_argument('--triviaqa_file', help='Triviaqa file') - parser.add_argument('--squad_file', help='Squad file') - parser.add_argument('--wikipedia_dir', help='Wikipedia doc dir') - parser.add_argument('--web_dir', help='Web doc dir') - - parser.add_argument('--seed', default=10, type=int, help='Random seed') - parser.add_argument('--max_num_tokens', default=800, type=int, help='Maximum number of tokens from a document') - parser.add_argument('--sample_size', default=8000000000000, type=int, help='Random seed') - parser.add_argument('--tokenizer', default='tokenizers/punkt/english.pickle', help='Sentence tokenizer') - args = parser.parse_args() - return args - - -if __name__ == '__main__': - args = get_args() - sent_tokenize = nltk.data.load(args.tokenizer) - convert_to_squad_format(args.triviaqa_file, args.squad_file) diff --git a/examples/huggingface/pytorch/question-answering/pruning/longformer_triviaqa/utils/dataset_utils.py b/examples/huggingface/pytorch/question-answering/pruning/longformer_triviaqa/utils/dataset_utils.py deleted file mode 100644 index dd42c6cac2a..00000000000 --- a/examples/huggingface/pytorch/question-answering/pruning/longformer_triviaqa/utils/dataset_utils.py +++ /dev/null @@ -1,75 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from . import file_utils -import re - - -# Key for wikipedia eval is question-id. Key for web eval is the (question_id, filename) tuple -def get_key_to_ground_truth(data): - if data['Domain'] == 'Wikipedia': - return {datum['QuestionId']: datum['Answer'] for datum in data['Data']} - else: - return get_qd_to_answer(data) - - -def get_question_doc_string(qid, doc_name): - return '{}--{}'.format(qid, doc_name) - -def get_qd_to_answer(data): - key_to_answer = {} - for datum in data['Data']: - for page in datum.get('EntityPages', []) + datum.get('SearchResults', []): - qd_tuple = get_question_doc_string(datum['QuestionId'], page['Filename']) - key_to_answer[qd_tuple] = datum['Answer'] - return key_to_answer - - -def read_clean_part(datum): - for key in ['EntityPages', 'SearchResults']: - new_page_list = [] - for page in datum.get(key, []): - if page['DocPartOfVerifiedEval']: - new_page_list.append(page) - datum[key] = new_page_list - assert len(datum['EntityPages']) + len(datum['SearchResults']) > 0 - return datum - - -def read_triviaqa_data(qajson): - data = file_utils.read_json(qajson) - # read only documents and questions that are a part of clean data set - if data['VerifiedEval']: - clean_data = [] - for datum in data['Data']: - if datum['QuestionPartOfVerifiedEval']: - if data['Domain'] == 'Web': - datum = read_clean_part(datum) - clean_data.append(datum) - data['Data'] = clean_data - return data - - -def answer_index_in_document(answer, document): - answer_list = answer['NormalizedAliases'] - answers_in_doc = [] - for answer_string_in_doc in answer_list: - indices = [m.start() for m in re.finditer(answer_string_in_doc, document, flags=re.IGNORECASE)] - for index in indices: - answers_in_doc.append({ - 'text': answer_string_in_doc, - 'answer_start': index - }) - return answers_in_doc diff --git a/examples/huggingface/pytorch/question-answering/pruning/longformer_triviaqa/utils/file_utils.py b/examples/huggingface/pytorch/question-answering/pruning/longformer_triviaqa/utils/file_utils.py deleted file mode 100644 index ad165c545e4..00000000000 --- a/examples/huggingface/pytorch/question-answering/pruning/longformer_triviaqa/utils/file_utils.py +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright 2020 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import json - - -def write_json_to_file(json_object, json_file, mode='w', encoding='utf-8'): - with open(json_file, mode, encoding=encoding) as outfile: - json.dump(json_object, outfile, indent=4, sort_keys=True, ensure_ascii=False) - - -def get_file_contents(filename, encoding='utf-8'): - with open(filename, encoding=encoding) as f: - content = f.read() - return content - - -def read_json(filename, encoding='utf-8'): - contents = get_file_contents(filename, encoding=encoding) - return json.loads(contents) - - -def get_file_contents_as_list(file_path, encoding='utf-8', ignore_blanks=True): - contents = get_file_contents(file_path, encoding=encoding) - lines = contents.split('\n') - lines = [line for line in lines if line != ''] if ignore_blanks else lines - return lines diff --git a/examples/huggingface/pytorch/question-answering/pruning/longformer_triviaqa/utils_qa.py b/examples/huggingface/pytorch/question-answering/pruning/longformer_triviaqa/utils_qa.py deleted file mode 100644 index 53924013612..00000000000 --- a/examples/huggingface/pytorch/question-answering/pruning/longformer_triviaqa/utils_qa.py +++ /dev/null @@ -1,451 +0,0 @@ -# coding=utf-8 - -# Apache v2 license -# Copyright (C) 2021 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -# Copyright 2020 The HuggingFace Team All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -Post-processing utilities for question answering. -""" -""" -This script is based on HuggingFace/transformers examples: https://github.com/huggingface/transformers/blob/v4.6.1/examples/pytorch/question-answering/utils_qa.py -""" -import collections -import json -import logging -import os -from typing import Optional, Tuple - -import numpy as np -from tqdm.auto import tqdm -import sys -from collections import Counter -import string -import re -from collections import defaultdict -import torch - - -logger = logging.getLogger(__name__) - - -def postprocess_qa_predictions( - examples, - features, - predictions: Tuple[np.ndarray, np.ndarray], - tokenizer=None, - version_2_with_negative: bool = False, - n_best_size: int = 20, - max_answer_length: int = 30, - null_score_diff_threshold: float = 0.0, - output_dir: Optional[str] = None, - prefix: Optional[str] = None, - is_world_process_zero: bool = True, -): - """ - Post-processes the predictions of a question-answering model to convert them to answers that are substrings of the - original contexts. This is the base postprocessing functions for models that only return start and end logits. - Args: - examples: The non-preprocessed dataset (see the main script for more information). - features: The processed dataset (see the main script for more information). - predictions (:obj:`Tuple[np.ndarray, np.ndarray]`): - The predictions of the model: two arrays containing the start logits and the end logits respectively. Its - first dimension must match the number of elements of :obj:`features`. - version_2_with_negative (:obj:`bool`, `optional`, defaults to :obj:`False`): - Whether or not the underlying dataset contains examples with no answers. - n_best_size (:obj:`int`, `optional`, defaults to 20): - The total number of n-best predictions to generate when looking for an answer. - max_answer_length (:obj:`int`, `optional`, defaults to 30): - The maximum length of an answer that can be generated. This is needed because the start and end predictions - are not conditioned on one another. - null_score_diff_threshold (:obj:`float`, `optional`, defaults to 0): - The threshold used to select the null answer: if the best answer has a score that is less than the score of - the null answer minus this threshold, the null answer is selected for this example (note that the score of - the null answer for an example giving several features is the minimum of the scores for the null answer on - each feature: all features must be aligned on the fact they `want` to predict a null answer). - Only useful when :obj:`version_2_with_negative` is :obj:`True`. - output_dir (:obj:`str`, `optional`): - If provided, the dictionaries of predictions, n_best predictions (with their scores and logits) and, if - :obj:`version_2_with_negative=True`, the dictionary of the scores differences between best and null - answers, are saved in `output_dir`. - prefix (:obj:`str`, `optional`): - If provided, the dictionaries mentioned above are saved with `prefix` added to their names. - is_world_process_zero (:obj:`bool`, `optional`, defaults to :obj:`True`): - Whether this process is the main process or not (used to determine if logging/saves should be done). - """ - assert len(predictions) == 2, "`predictions` should be a tuple with two elements (start_logits, end_logits)." - all_start_logits, all_end_logits = predictions - - assert len(predictions[0]) == len(features), f"Got {len(predictions[0])} predictions and {len(features)} features." - - # Build a map example to its corresponding features. - example_id_to_index = {} - index = 0 - for qid in examples["id"]: - if qid in example_id_to_index: - continue - example_id_to_index[qid] = index - index += 1 - - features_per_example = collections.defaultdict(list) - for i, feature in enumerate(features): - features_per_example[example_id_to_index[feature["example_id"]]].append(i) - - # Logging. - logger.setLevel(logging.INFO if is_world_process_zero else logging.WARN) - logger.info(f"Post-processing {len(examples)} example predictions split into {len(features)} features.") - - qa_with_duplicates = defaultdict(list) - - for qid in tqdm(example_id_to_index): - - feature_indices = features_per_example[example_id_to_index[qid]] - - # Looping through all the features associated to the current example. - for feature_index in feature_indices: - potential_answers = [] - # We grab the predictions of the model for this feature. - start_logits = all_start_logits[feature_index] - end_logits = all_end_logits[feature_index] - - input_ids = torch.tensor([features[feature_index]["input_ids"]]) - - # Go through all possibilities for the `n_best_size` greater start and end logits. - start_indexes = np.argsort(start_logits)[-1 : -n_best_size - 1 : -1].tolist() - end_indexes = np.argsort(end_logits)[-1 : -n_best_size - 1 : -1].tolist() - - eos_token_indices = (input_ids == tokenizer.eos_token_id).nonzero() - question_end_index = eos_token_indices.view(input_ids.size(0), 2, 2)[:, 0, 1] - doc_end_index = eos_token_indices.view(input_ids.size(0), 2, 2)[:, 1, 1] - for start_index in start_indexes: - for end_index in end_indexes: - if start_index >= doc_end_index[0]: - continue - if end_index >= doc_end_index[0]: - continue - if start_index <= question_end_index[0]: - continue - if end_index <= question_end_index[0]: - continue - if start_index > end_index: - continue - answer_len = end_index - start_index + 1 - if answer_len > max_answer_length: - continue - potential_answers.append({'start': start_index, 'end': end_index, - 'start_logit': start_logits[start_index].item(), - 'end_logit': end_logits[end_index].item()}) - sorted_answers = sorted(potential_answers, key=lambda x: (x['start_logit'] + x['end_logit']), reverse=True) - - if len(sorted_answers) == 0: - answer = {'text': 'NoAnswerFound', 'score': -1000000} - else: - answer = sorted_answers[0] - answer_token_ids = input_ids[0, answer['start']: answer['end'] + 1] - answer_tokens = tokenizer.convert_ids_to_tokens(answer_token_ids.tolist()) - text = tokenizer.convert_tokens_to_string(answer_tokens) - score = answer['start_logit'] + answer['end_logit'] - answer = {'text': text, 'score': score} - - qa_with_duplicates[qid].append({'answer_score': answer['score'], 'answer_text': answer['text'], }) - - qid_to_answer_text = {} - for qid, answer_metrics in qa_with_duplicates.items(): - top_answer = sorted(answer_metrics, key=lambda x: x['answer_score'], reverse=True)[0] - qid_to_answer_text[qid] = top_answer['answer_text'] - - - # If we have an output_dir, let's save all those dicts. - if output_dir is not None: - assert os.path.isdir(output_dir), f"{output_dir} is not a directory." - - prediction_file = os.path.join( - output_dir, "predictions.json" if prefix is None else f"{prefix}_predictions.json" - ) - logger.info(f"Saving predictions to {prediction_file}.") - with open(prediction_file, "w") as writer: - writer.write(json.dumps(qid_to_answer_text, indent=4) + "\n") - return qid_to_answer_text - - -def postprocess_qa_predictions_with_beam_search( - examples, - features, - predictions: Tuple[np.ndarray, np.ndarray], - version_2_with_negative: bool = False, - n_best_size: int = 20, - max_answer_length: int = 30, - start_n_top: int = 5, - end_n_top: int = 5, - output_dir: Optional[str] = None, - prefix: Optional[str] = None, - is_world_process_zero: bool = True, -): - """ - Post-processes the predictions of a question-answering model with beam search to convert them to answers that are substrings of the - original contexts. This is the postprocessing functions for models that return start and end logits, indices, as well as - cls token predictions. - Args: - examples: The non-preprocessed dataset (see the main script for more information). - features: The processed dataset (see the main script for more information). - predictions (:obj:`Tuple[np.ndarray, np.ndarray]`): - The predictions of the model: two arrays containing the start logits and the end logits respectively. Its - first dimension must match the number of elements of :obj:`features`. - version_2_with_negative (:obj:`bool`, `optional`, defaults to :obj:`False`): - Whether or not the underlying dataset contains examples with no answers. - n_best_size (:obj:`int`, `optional`, defaults to 20): - The total number of n-best predictions to generate when looking for an answer. - max_answer_length (:obj:`int`, `optional`, defaults to 30): - The maximum length of an answer that can be generated. This is needed because the start and end predictions - are not conditioned on one another. - start_n_top (:obj:`int`, `optional`, defaults to 5): - The number of top start logits too keep when searching for the :obj:`n_best_size` predictions. - end_n_top (:obj:`int`, `optional`, defaults to 5): - The number of top end logits too keep when searching for the :obj:`n_best_size` predictions. - output_dir (:obj:`str`, `optional`): - If provided, the dictionaries of predictions, n_best predictions (with their scores and logits) and, if - :obj:`version_2_with_negative=True`, the dictionary of the scores differences between best and null - answers, are saved in `output_dir`. - prefix (:obj:`str`, `optional`): - If provided, the dictionaries mentioned above are saved with `prefix` added to their names. - is_world_process_zero (:obj:`bool`, `optional`, defaults to :obj:`True`): - Whether this process is the main process or not (used to determine if logging/saves should be done). - """ - assert len(predictions) == 5, "`predictions` should be a tuple with five elements." - start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits = predictions - - assert len(predictions[0]) == len( - features - ), f"Got {len(predictions[0])} predicitions and {len(features)} features." - - # Build a map example to its corresponding features. - example_id_to_index = {k: i for i, k in enumerate(examples["id"])} - features_per_example = collections.defaultdict(list) - for i, feature in enumerate(features): - features_per_example[example_id_to_index[feature["example_id"]]].append(i) - - # The dictionaries we have to fill. - all_predictions = collections.OrderedDict() - all_nbest_json = collections.OrderedDict() - scores_diff_json = collections.OrderedDict() if version_2_with_negative else None - - # Logging. - logger.setLevel(logging.INFO if is_world_process_zero else logging.WARN) - logger.info(f"Post-processing {len(examples)} example predictions split into {len(features)} features.") - - # Let's loop over all the examples! - for example_index, example in enumerate(tqdm(examples)): - # Those are the indices of the features associated to the current example. - feature_indices = features_per_example[example_index] - - min_null_score = None - prelim_predictions = [] - - # Looping through all the features associated to the current example. - for feature_index in feature_indices: - # We grab the predictions of the model for this feature. - start_log_prob = start_top_log_probs[feature_index] - start_indexes = start_top_index[feature_index] - end_log_prob = end_top_log_probs[feature_index] - end_indexes = end_top_index[feature_index] - feature_null_score = cls_logits[feature_index] - # This is what will allow us to map some the positions in our logits to span of texts in the original - # context. - offset_mapping = features[feature_index]["offset_mapping"] - # Optional `token_is_max_context`, if provided we will remove answers that do not have the maximum context - # available in the current feature. - token_is_max_context = features[feature_index].get("token_is_max_context", None) - - # Update minimum null prediction - if min_null_score is None or feature_null_score < min_null_score: - min_null_score = feature_null_score - - # Go through all possibilities for the `n_start_top`/`n_end_top` greater start and end logits. - for i in range(start_n_top): - for j in range(end_n_top): - start_index = int(start_indexes[i]) - j_index = i * end_n_top + j - end_index = int(end_indexes[j_index]) - # Don't consider out-of-scope answers (last part of the test should be unnecessary because of the - # p_mask but let's not take any risk) - if ( - start_index >= len(offset_mapping) - or end_index >= len(offset_mapping) - or offset_mapping[start_index] is None - or offset_mapping[end_index] is None - ): - continue - # Don't consider answers with a length negative or > max_answer_length. - if end_index < start_index or end_index - start_index + 1 > max_answer_length: - continue - # Don't consider answer that don't have the maximum context available (if such information is - # provided). - if token_is_max_context is not None and not token_is_max_context.get(str(start_index), False): - continue - prelim_predictions.append( - { - "offsets": (offset_mapping[start_index][0], offset_mapping[end_index][1]), - "score": start_log_prob[i] + end_log_prob[j_index], - "start_log_prob": start_log_prob[i], - "end_log_prob": end_log_prob[j_index], - } - ) - - # Only keep the best `n_best_size` predictions. - predictions = sorted(prelim_predictions, key=lambda x: x["score"], reverse=True)[:n_best_size] - - # Use the offsets to gather the answer text in the original context. - context = example["context"] - for pred in predictions: - offsets = pred.pop("offsets") - pred["text"] = context[offsets[0] : offsets[1]] - - # In the very rare edge case we have not a single non-null prediction, we create a fake prediction to avoid - # failure. - if len(predictions) == 0: - predictions.insert(0, {"text": "", "start_logit": -1e-6, "end_logit": -1e-6, "score": -2e-6}) - - # Compute the softmax of all scores (we do it with numpy to stay independent from torch/tf in this file, using - # the LogSumExp trick). - scores = np.array([pred.pop("score") for pred in predictions]) - exp_scores = np.exp(scores - np.max(scores)) - probs = exp_scores / exp_scores.sum() - - # Include the probabilities in our predictions. - for prob, pred in zip(probs, predictions): - pred["probability"] = prob - - # Pick the best prediction and set the probability for the null answer. - all_predictions[example["id"]] = predictions[0]["text"] - if version_2_with_negative: - scores_diff_json[example["id"]] = float(min_null_score) - - # Make `predictions` JSON-serializable by casting np.float back to float. - all_nbest_json[example["id"]] = [ - {k: (float(v) if isinstance(v, (np.float16, np.float32, np.float64)) else v) for k, v in pred.items()} - for pred in predictions - ] - - # If we have an output_dir, let's save all those dicts. - if output_dir is not None: - assert os.path.isdir(output_dir), f"{output_dir} is not a directory." - - prediction_file = os.path.join( - output_dir, "predictions.json" if prefix is None else f"{prefix}_predictions.json" - ) - nbest_file = os.path.join( - output_dir, "nbest_predictions.json" if prefix is None else f"{prefix}_nbest_predictions.json" - ) - if version_2_with_negative: - null_odds_file = os.path.join( - output_dir, "null_odds.json" if prefix is None else f"{prefix}_null_odds.json" - ) - - print(f"Saving predictions to {prediction_file}.") - with open(prediction_file, "w") as writer: - writer.write(json.dumps(all_predictions, indent=4) + "\n") - print(f"Saving nbest_preds to {nbest_file}.") - with open(nbest_file, "w") as writer: - writer.write(json.dumps(all_nbest_json, indent=4) + "\n") - if version_2_with_negative: - print(f"Saving null_odds to {null_odds_file}.") - with open(null_odds_file, "w") as writer: - writer.write(json.dumps(scores_diff_json, indent=4) + "\n") - - return all_predictions, scores_diff_json - - -def normalize_answer(s): - """Lower text and remove punctuation, articles and extra whitespace.""" - - def remove_articles(text): - return re.sub(r'\b(a|an|the)\b', ' ', text) - - def white_space_fix(text): - return ' '.join(text.split()) - - def handle_punc(text): - exclude = set(string.punctuation + "".join([u"‘", u"’", u"´", u"`"])) - return ''.join(ch if ch not in exclude else ' ' for ch in text) - - def lower(text): - return text.lower() - - def replace_underscore(text): - return text.replace('_', ' ') - - return white_space_fix(remove_articles(handle_punc(lower(replace_underscore(s))))).strip() - -def f1_score(prediction, ground_truth): - prediction_tokens = normalize_answer(prediction).split() - ground_truth_tokens = normalize_answer(ground_truth).split() - common = Counter(prediction_tokens) & Counter(ground_truth_tokens) - num_same = sum(common.values()) - if num_same == 0: - return 0 - precision = 1.0 * num_same / len(prediction_tokens) - recall = 1.0 * num_same / len(ground_truth_tokens) - f1 = (2 * precision * recall) / (precision + recall) - return f1 - -def metric_max_over_ground_truths(metric_fn, prediction, ground_truths): - scores_for_ground_truths = [] - for ground_truth in ground_truths: - score = metric_fn(prediction, ground_truth) - scores_for_ground_truths.append(score) - return max(scores_for_ground_truths) - - -def is_exact_match(answer_object, prediction): - ground_truths = get_ground_truths(answer_object) - for ground_truth in ground_truths: - if exact_match_score(prediction, ground_truth): - return True - return False - - -def has_exact_match(ground_truths, candidates): - for ground_truth in ground_truths: - if ground_truth in candidates: - return True - return False - -def exact_match_score(prediction, ground_truth): - return int(normalize_answer(prediction) == normalize_answer(ground_truth)) - -def evaluate_triviaqa(references, predictions): - f1 = exact_match = common = total = 0 - for qa in references: - total += 1 - if qa["id"] not in predictions: - message = "Unanswered question " + qa["id"] + " will receive score 0." - print(message, file=sys.stderr) - continue - common += 1 - prediction = predictions[qa["id"]] - ground_truths = qa["answers"]["text"] + qa["aliases"] - em_for_this_question = metric_max_over_ground_truths( - exact_match_score, prediction, ground_truths) - - exact_match += em_for_this_question - - f1_for_this_question = metric_max_over_ground_truths( - f1_score, prediction, ground_truths) - f1 += f1_for_this_question - exact_match = 100.0 * exact_match / total - f1 = 100.0 * f1 / total - - return {"exact_match": exact_match, "f1": f1} diff --git a/examples/huggingface/pytorch/question-answering/pruning/basic_magnitude/README.md b/examples/huggingface/pytorch/question-answering/pruning/magnitude/README.md similarity index 100% rename from examples/huggingface/pytorch/question-answering/pruning/basic_magnitude/README.md rename to examples/huggingface/pytorch/question-answering/pruning/magnitude/README.md diff --git a/examples/huggingface/pytorch/question-answering/pruning/basic_magnitude/requirements.txt b/examples/huggingface/pytorch/question-answering/pruning/magnitude/requirements.txt similarity index 100% rename from examples/huggingface/pytorch/question-answering/pruning/basic_magnitude/requirements.txt rename to examples/huggingface/pytorch/question-answering/pruning/magnitude/requirements.txt diff --git a/examples/huggingface/pytorch/question-answering/pruning/basic_magnitude/run_benchmark.sh b/examples/huggingface/pytorch/question-answering/pruning/magnitude/run_benchmark.sh similarity index 100% rename from examples/huggingface/pytorch/question-answering/pruning/basic_magnitude/run_benchmark.sh rename to examples/huggingface/pytorch/question-answering/pruning/magnitude/run_benchmark.sh diff --git a/examples/huggingface/pytorch/question-answering/pruning/basic_magnitude/run_qa.py b/examples/huggingface/pytorch/question-answering/pruning/magnitude/run_qa.py similarity index 97% rename from examples/huggingface/pytorch/question-answering/pruning/basic_magnitude/run_qa.py rename to examples/huggingface/pytorch/question-answering/pruning/magnitude/run_qa.py index 35f572aa50c..7bc9b835440 100644 --- a/examples/huggingface/pytorch/question-answering/pruning/basic_magnitude/run_qa.py +++ b/examples/huggingface/pytorch/question-answering/pruning/magnitude/run_qa.py @@ -26,7 +26,8 @@ import transformers from dataclasses import dataclass, field from datasets import load_dataset, load_metric -from intel_extension_for_transformers.transformers import metrics, OptimizedModel, PrunerConfig, PruningConfig, PruningMode +from intel_extension_for_transformers.transformers import metrics, OptimizedModel +from neural_compressor.config import WeightPruningConfig from trainer_qa import QuestionAnsweringTrainer from transformers import ( AutoConfig, @@ -210,8 +211,8 @@ class OptimizationArguments: metadata={"help": "Whether or not to apply prune."}, ) pruning_approach: Optional[str] = field( - default="BasicMagnitude", - metadata={"help": "Pruning approach. Supported approach is basic_magnite."}, + default="magnitude", + metadata={"help": "Pruning approach. Supported approach is magnite."}, ) target_sparsity_ratio: Optional[float] = field( default=None, @@ -631,12 +632,15 @@ def compute_metrics(p: EvalPrediction): raise ValueError("do_train must be set to True for pruning.") tune_metric = metrics.Metric(name=metric_name) - prune_type = 'BasicMagnitude' if optim_args.pruning_approach else optim_args.pruning_approach + prune_type = optim_args.pruning_approach \ + if optim_args.pruning_approach else 'pattern_lock' target_sparsity_ratio = optim_args.target_sparsity_ratio \ if optim_args.target_sparsity_ratio else None - pruner_config = PrunerConfig(prune_type=prune_type, target_sparsity_ratio=target_sparsity_ratio) - pruning_conf = PruningConfig(pruner_config=pruner_config, metrics=tune_metric) - + trainer.metrics = tune_metric + pruning_conf = WeightPruningConfig([{"start_step": 0, "end_step": 2}], + target_sparsity=target_sparsity_ratio, + pruning_scope="local", + pruning_type=prune_type) model = trainer.prune(pruning_config=pruning_conf) trainer.save_model(training_args.output_dir) @@ -653,7 +657,7 @@ def compute_metrics(p: EvalPrediction): max_eval_samples = data_args.max_eval_samples \ if data_args.max_eval_samples is not None else len(eval_dataset) eval_samples = min(max_eval_samples, len(eval_dataset)) - samples = eval_samples - (eval_samples % batch_size) \ + samples = eval_samples - (eval_samples % optim_args.batch_size) \ if training_args.dataloader_drop_last else eval_samples logger.info("metrics keys: {}".format(results.keys())) bert_task_acc_keys = ['eval_f1', 'eval_accuracy', 'eval_matthews_correlation', diff --git a/examples/huggingface/pytorch/question-answering/pruning/basic_magnitude/run_tuning.sh b/examples/huggingface/pytorch/question-answering/pruning/magnitude/run_tuning.sh similarity index 100% rename from examples/huggingface/pytorch/question-answering/pruning/basic_magnitude/run_tuning.sh rename to examples/huggingface/pytorch/question-answering/pruning/magnitude/run_tuning.sh diff --git a/examples/huggingface/pytorch/question-answering/pruning/basic_magnitude/trainer_qa.py b/examples/huggingface/pytorch/question-answering/pruning/magnitude/trainer_qa.py similarity index 100% rename from examples/huggingface/pytorch/question-answering/pruning/basic_magnitude/trainer_qa.py rename to examples/huggingface/pytorch/question-answering/pruning/magnitude/trainer_qa.py diff --git a/examples/huggingface/pytorch/question-answering/pruning/basic_magnitude/utils_qa.py b/examples/huggingface/pytorch/question-answering/pruning/magnitude/utils_qa.py similarity index 100% rename from examples/huggingface/pytorch/question-answering/pruning/basic_magnitude/utils_qa.py rename to examples/huggingface/pytorch/question-answering/pruning/magnitude/utils_qa.py diff --git a/examples/huggingface/pytorch/question-answering/quantization/README.md b/examples/huggingface/pytorch/question-answering/quantization/README.md index 0e7132666ff..4d1707dc6f7 100644 --- a/examples/huggingface/pytorch/question-answering/quantization/README.md +++ b/examples/huggingface/pytorch/question-answering/quantization/README.md @@ -1,6 +1,6 @@ Step-by-Step​ ============ -The script `run_qa.py` provides three quantization approaches (PostTrainingStatic, PostTrainingStatic and QuantizationAwareTraining) based on [Intel® Neural Compressor](https://github.com/intel/neural-compressor). +The script `run_qa.py` provides three quantization approaches (dynamic, static and qat) based on [Intel® Neural Compressor](https://github.com/intel/neural-compressor). # Prerequisite​ ## 1. Create Environment​ @@ -8,9 +8,8 @@ Recommended python 3.9 or higher version. ```shell pip install intel-extension-for-transformers pip install -r requirements.txt -pip install transformers==4.34.1 + ``` ->**Note**: Please use transformers no higher than 4.34.1 # Run ## 1. Quantization @@ -22,7 +21,7 @@ python run_qa.py \ --model_name_or_path distilbert-base-uncased-distilled-squad \ --dataset_name squad \ --tune \ - --quantization_approach PostTrainingStatic \ + --quantization_approach static \ --do_train \ --do_eval \ --output_dir ./tmp/squad_output \ @@ -36,7 +35,7 @@ python run_qa.py \ --model_name_or_path bert-large-uncased-whole-word-masking-finetuned-squad \ --dataset_name squad \ --tune \ - --quantization_approach PostTrainingStatic \ + --quantization_approach static \ --do_train \ --do_eval \ --output_dir ./tmp/squad_output \ @@ -65,7 +64,7 @@ python -m torch.distributed.launch --master_addr= --nproc_per_no --model_name_or_path bert-large-uncased-whole-word-masking-finetuned-squad \ --dataset_name squad \ --tune \ - --quantization_approach QuantizationAwareTraining \ + --quantization_approach qat \ --do_train \ --do_eval \ --output_dir ./tmp/squad_output \ @@ -75,7 +74,7 @@ python -m torch.distributed.launch --master_addr= --nproc_per_no ## 3. Validated Model List ### Stock PyTorch Validated model list -|Dataset|Pretrained model|PostTrainingDynamic | PostTrainingStatic | QuantizationAwareTraining +|Dataset|Pretrained model|dynamic | static | qat |---|------------------------------------|---|---|--- |squad|distilbert-base-uncased-distilled-squad| ✅| ✅| ✅ |squad|valhalla/longformer-base-4096-finetuned-squadv1| ✅| ✅| N/A diff --git a/examples/huggingface/pytorch/question-answering/quantization/run_qa.py b/examples/huggingface/pytorch/question-answering/quantization/run_qa.py index ac4b0237bf9..6f7bebfaa76 100644 --- a/examples/huggingface/pytorch/question-answering/quantization/run_qa.py +++ b/examples/huggingface/pytorch/question-answering/quantization/run_qa.py @@ -26,7 +26,13 @@ import transformers from dataclasses import dataclass, field from datasets import load_dataset, load_metric -from intel_extension_for_transformers.transformers import metrics , OptimizedModel, QuantizationConfig +from intel_extension_for_transformers.transformers import OptimizedModel, metrics +from neural_compressor.config import ( + PostTrainingQuantConfig, + QuantizationAwareTrainingConfig, + TuningCriterion, + AccuracyCriterion +) from trainer_qa import QuestionAnsweringTrainer from transformers import ( AutoConfig, @@ -215,13 +221,13 @@ class OptimizationArguments: metadata={"help": "Tuning strategy. Supported strategies are basic, bayesian, mse, mse_v2."}, ) quantization_approach: Optional[str] = field( - default="PostTrainingStatic", - metadata={"help": "Quantization approach. Supported approach are PostTrainingStatic, " - "PostTrainingDynamic and QuantizationAwareTraining."}, + default="static", + metadata={"help": "Quantization approach. Supported approach are static, " + "dynamic and qat."}, ) framework: Optional[str] = field( - default="pytorch", - metadata={"help": "Deep learning framework. Supported framework are pytorch, ipex"}, + default="default", + metadata={"help": "Deep learning framework. Supported framework are default, ipex"}, ) metric_name: Optional[str] = field( default="eval_f1", @@ -320,10 +326,12 @@ def main(): # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. + if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. raw_datasets = load_dataset( - data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir + data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir, + trust_remote_code=True ) else: data_files = {} @@ -337,7 +345,8 @@ def main(): if data_args.test_file is not None: data_files["test"] = data_args.test_file extension = data_args.test_file.split(".")[-1] - raw_datasets = load_dataset(extension, data_files=data_files, field="data", cache_dir=model_args.cache_dir) + raw_datasets = load_dataset(extension, data_files=data_files, field="data", cache_dir=model_args.cache_dir, + trust_remote_code=True) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. @@ -627,7 +636,7 @@ def post_processing_function(examples, features, predictions, stage="eval"): references = [{"id": ex["id"], "answers": ex[answer_column_name]} for ex in examples] return EvalPrediction(predictions=formatted_predictions, label_ids=references) - metric = load_metric("squad_v2" if data_args.version_2_with_negative else "squad") + metric = load_metric("squad_v2" if data_args.version_2_with_negative else "squad", trust_remote_code=True) def compute_metrics(p: EvalPrediction): return metric.compute(predictions=p.predictions, references=p.label_ids) @@ -655,31 +664,46 @@ def compute_metrics(p: EvalPrediction): if not training_args.do_eval: raise ValueError("do_eval must be set to True for quantization.") - if optim_args.quantization_approach != "PostTrainingDynamic": + if optim_args.quantization_approach != "dynamic": if not training_args.do_train: raise ValueError( "do_train must be set to True for static and aware training quantization." ) - if optim_args.quantization_approach == "QuantizationAwareTraining": - early_stopping_patience = 6 - early_stopping_threshold = 0.001 # optional - trainer.add_callback(transformers.EarlyStoppingCallback(early_stopping_patience, - early_stopping_threshold)) - tune_metric = metrics.Metric( name=metric_name, is_relative=optim_args.is_relative, criterion=optim_args.perf_tol ) - quantization_config = QuantizationConfig( - approach=optim_args.quantization_approach, - max_trials=200, - metrics=[tune_metric], - sampling_size = len(train_dataset)//20 - ) - if optim_args.strategy == "mse_v2": - quantization_config.strategy = "mse_v2" - if optim_args.framework == "ipex": - quantization_config.framework = "pytorch_ipex" - trainer.calib_dataloader = calib_dataloader + trainer.metrics = tune_metric + if optim_args.quantization_approach != "qat": + tuning_criterion = TuningCriterion(max_trials=600, objective=["performance"]) + accuracy_criterion = AccuracyCriterion( + higher_is_better=True, # optional. + criterion="relative" if optim_args.is_relative else "absolute", # optional. Available values are "relative" and "absolute". + tolerable_loss=optim_args.perf_tol, # optional. + ) + quantization_config = PostTrainingQuantConfig( + backend = optim_args.framework, + approach=optim_args.quantization_approach, + tuning_criterion=tuning_criterion, + accuracy_criterion=accuracy_criterion + ) + if optim_args.framework == "ipex": + trainer.calib_dataloader = calib_dataloader + else: + tuning_criterion = TuningCriterion(max_trials=600, objective=["performance"]) + accuracy_criterion = AccuracyCriterion( + higher_is_better=True, # optional. + criterion="relative" if optim_args.is_relative else "absolute", # optional. Available values are "relative" and "absolute". + tolerable_loss=optim_args.perf_tol, # optional. + ) + quantization_config = QuantizationAwareTrainingConfig( + tuning_criterion=tuning_criterion, + accuracy_criterion=accuracy_criterion + ) + early_stopping_patience = 2 + early_stopping_threshold = 0.001 # optional + trainer.add_callback(transformers.EarlyStoppingCallback(early_stopping_patience, \ + early_stopping_threshold)) + model = trainer.quantize(quant_config=quantization_config) if optim_args.benchmark_only: diff --git a/examples/huggingface/pytorch/question-answering/quantization/run_tuning.sh b/examples/huggingface/pytorch/question-answering/quantization/run_tuning.sh index 02dc6a88075..d4d1d24fe36 100644 --- a/examples/huggingface/pytorch/question-answering/quantization/run_tuning.sh +++ b/examples/huggingface/pytorch/question-answering/quantization/run_tuning.sh @@ -17,7 +17,7 @@ function init_params { extra_cmd="" batch_size=8 MAX_SEQ_LENGTH=384 - approach="PostTrainingStatic" + approach="static" for var in "$@" do case $var in @@ -52,15 +52,15 @@ function run_tuning { if [ "${topology}" = "distilbert_base_squad_static" ]; then DATASET_NAME="squad" model_name_or_path="distilbert-base-uncased-distilled-squad" - approach="PostTrainingStatic" + approach="static" elif [ "${topology}" = "distilbert_base_squad_dynamic" ]; then DATASET_NAME="squad" model_name_or_path="distilbert-base-uncased-distilled-squad" - approach="PostTrainingDynamic" + approach="dynamic" elif [ "${topology}" = "distilbert_base_squad_qat" ]; then DATASET_NAME="squad" model_name_or_path="distilbert-base-uncased-distilled-squad" - approach="QuantizationAwareTraining" + approach="qat" extra_cmd=$extra_cmd" --learning_rate 1e-5 \ --num_train_epochs 6 \ --eval_steps 100 \ @@ -69,35 +69,36 @@ function run_tuning { --load_best_model_at_end True \ --evaluation_strategy steps \ --save_strategy steps \ - --save_total_limit 1" + --save_total_limit 1 \ + --save_safetensors False" elif [ "${topology}" = "bert_large_SQuAD_static" ]; then DATASET_NAME="squad" model_name_or_path="bert-large-uncased-whole-word-masking-finetuned-squad" - approach="PostTrainingStatic" + approach="static" elif [ "${topology}" = "roberta_base_SQuAD2_static" ]; then DATASET_NAME="squad" model_name_or_path="deepset/roberta-base-squad2" - approach="PostTrainingStatic" + approach="static" # extra_cmd=$extra_cmd" --version_2_with_negative" elif [ "${topology}" = "longformer_base_squad_static" ]; then DATASET_NAME="squad" model_name_or_path="valhalla/longformer-base-4096-finetuned-squadv1" - approach="PostTrainingStatic" + approach="static" extra_cmd=$extra_cmd" --strategy mse_v2" elif [ "${topology}" = "longformer_base_squad_dynamic" ]; then DATASET_NAME="squad" model_name_or_path="valhalla/longformer-base-4096-finetuned-squadv1" - approach="PostTrainingDynamic" + approach="dynamic" extra_cmd=$extra_cmd" --strategy mse_v2" elif [ "${topology}" = "distilbert_base_squad_ipex" ]; then DATASET_NAME="squad" model_name_or_path="distilbert-base-uncased-distilled-squad" extra_cmd=$extra_cmd" --perf_tol 0.02" - approach="PostTrainingStatic" + approach="static" elif [ "${topology}" = "bert_large_squad_ipex" ]; then DATASET_NAME="squad" model_name_or_path="bert-large-uncased-whole-word-masking-finetuned-squad" - approach="PostTrainingStatic" + approach="static" fi python -u ./run_qa.py \ diff --git a/examples/huggingface/pytorch/summarization/quantization/README.md b/examples/huggingface/pytorch/summarization/quantization/README.md index c48c2a83704..f8f64520f8a 100644 --- a/examples/huggingface/pytorch/summarization/quantization/README.md +++ b/examples/huggingface/pytorch/summarization/quantization/README.md @@ -49,18 +49,16 @@ python examples/pytorch/summarization/run_summarization.py \ --dataset_name samsum \ --do_train \ --do_eval \ - --train_file path_to_csv_or_jsonlines_file \ - --validation_file path_to_csv_or_jsonlines_file \ --output_dir /tmp/tst-summarization \ --overwrite_output_dir \ - --per_device_train_batch_size=8 \ - --per_device_eval_batch_size=8 \ + --per_device_train_batch_size 8 \ + --per_device_eval_batch_size 8 \ --tune \ --predict_with_generate \ --perf_tol 0.03 ``` ### 2. Validated Model List -|Dataset|Pretrained model|PostTrainingDynamic | PostTrainingStatic | QuantizationAwareTraining +|Dataset|Pretrained model|dynamic | static | qat |---|------------------------------------|---|---|--- |samsum|pegasus_samsum| ✅| N/A | N/A |cnn_dailymail|t5_base_cnn| ✅| N/A | N/A diff --git a/examples/huggingface/pytorch/summarization/quantization/run_benchmark.sh b/examples/huggingface/pytorch/summarization/quantization/run_benchmark.sh index 4fe01c952dd..d3b375060aa 100644 --- a/examples/huggingface/pytorch/summarization/quantization/run_benchmark.sh +++ b/examples/huggingface/pytorch/summarization/quantization/run_benchmark.sh @@ -78,11 +78,11 @@ function run_benchmark { elif [ "${topology}" == "flan_t5_large_samsum_dynamic" ]; then DATASET_NAME="samsum" model_name_or_path="stacked-summaries/flan-t5-large-stacked-samsum-1024" - approach="PostTrainingDynamic" + approach="dynamic" elif [ "${topology}" == "flan_t5_large_samsum_static" ]; then DATASET_NAME="samsum" model_name_or_path="stacked-summaries/flan-t5-large-stacked-samsum-1024" - approach="PostTrainingStatic" + approach="static" else echo "unsupported topology: ${topology}" exit 1 diff --git a/examples/huggingface/pytorch/summarization/quantization/run_summarization.py b/examples/huggingface/pytorch/summarization/quantization/run_summarization.py index 9d9cc794d38..3844e45173f 100755 --- a/examples/huggingface/pytorch/summarization/quantization/run_summarization.py +++ b/examples/huggingface/pytorch/summarization/quantization/run_summarization.py @@ -30,8 +30,13 @@ from datasets import load_dataset, load_metric from filelock import FileLock -from intel_extension_for_transformers.transformers import OptimizedModel, QuantizationConfig -from intel_extension_for_transformers.transformers import metrics as nlp_metrics +from intel_extension_for_transformers.transformers import OptimizedModel, metrics +from neural_compressor.config import ( + PostTrainingQuantConfig, + QuantizationAwareTrainingConfig, + TuningCriterion, + AccuracyCriterion +) from intel_extension_for_transformers.transformers.trainer import NLPSeq2SeqTrainer from transformers import ( AutoConfig, @@ -267,9 +272,9 @@ class OptimizationArguments: metadata={"help": "Whether or not to apply quantization."}, ) quantization_approach: Optional[str] = field( - default="PostTrainingStatic", - metadata={"help": "Quantization approach. Supported approach are PostTrainingStatic, " - "PostTrainingDynamic and QuantizationAwareTraining."}, + default="static", + metadata={"help": "Quantization approach. Supported approach are static, " + "dynamic and qat."}, ) metric_name: Optional[str] = field( default="eval_rougeLsum", @@ -700,26 +705,42 @@ def compute_metrics(eval_preds): trainer.save_model(training_args.output_dir) - if optim_args.quantization_approach != "PostTrainingDynamic": + if optim_args.quantization_approach != "dynamic": if not training_args.do_train: raise ValueError( "do_train must be set to True for static and aware training quantization." ) - if optim_args.quantization_approach == "QuantizationAwareTraining": - early_stopping_patience = 6 - early_stopping_threshold = 0.001 # optional - trainer.add_callback(transformers.EarlyStoppingCallback(early_stopping_patience, - early_stopping_threshold)) - - tune_metric = nlp_metrics.Metric( + tune_metric = metrics.Metric( name=metric_name, is_relative=optim_args.is_relative, criterion=optim_args.perf_tol ) - quantization_config = QuantizationConfig( - approach=optim_args.quantization_approach, - max_trials=200, - metrics=[tune_metric], - sampling_size = len(train_dataset)//20 - ) + trainer.metrics = tune_metric + if optim_args.quantization_approach != "qat": + tuning_criterion = TuningCriterion(max_trials=600, objective=["performance"]) + accuracy_criterion = AccuracyCriterion( + higher_is_better=True, # optional. + criterion="relative" if optim_args.is_relative else "absolute", # optional. Available values are "relative" and "absolute". + tolerable_loss=optim_args.perf_tol, # optional. + ) + quantization_config = PostTrainingQuantConfig( + approach=optim_args.quantization_approach, + tuning_criterion=tuning_criterion, + accuracy_criterion=accuracy_criterion + ) + else: + tuning_criterion = TuningCriterion(max_trials=600, objective=["performance"]) + accuracy_criterion = AccuracyCriterion( + higher_is_better=True, # optional. + criterion="relative" if optim_args.is_relative else "absolute", # optional. Available values are "relative" and "absolute". + tolerable_loss=optim_args.perf_tol, # optional. + ) + quantization_config = QuantizationAwareTrainingConfig( + tuning_criterion=tuning_criterion, + accuracy_criterion=accuracy_criterion + ) + early_stopping_patience = 2 + early_stopping_threshold = 0.001 # optional + trainer.add_callback(transformers.EarlyStoppingCallback(early_stopping_patience, \ + early_stopping_threshold)) trainer.max_length = max_length trainer.num_beams = num_beams model = trainer.quantize(quant_config=quantization_config) diff --git a/examples/huggingface/pytorch/summarization/quantization/run_tuning.sh b/examples/huggingface/pytorch/summarization/quantization/run_tuning.sh index c42d6045c89..e9f714d2bfb 100644 --- a/examples/huggingface/pytorch/summarization/quantization/run_tuning.sh +++ b/examples/huggingface/pytorch/summarization/quantization/run_tuning.sh @@ -15,7 +15,7 @@ function init_params { DATASET_NAME="xsum" extra_cmd="" batch_size=8 - approach="PostTrainingStatic" + approach="static" for var in "$@" do case $var in @@ -45,24 +45,24 @@ function run_tuning { if [ "${topology}" == "pegasus_samsum_dynamic" ]; then DATASET_NAME="samsum" model_name_or_path="lvwerra/pegasus-samsum" - approach="PostTrainingDynamic" + approach="dynamic" elif [ "${topology}" == "t5_base_cnn_dynamic" ]; then DATASET_NAME="cnn_dailymail" model_name_or_path="flax-community/t5-base-cnn-dm" - approach="PostTrainingDynamic" + approach="dynamic" elif [ "${topology}" == "t5_large_cnn_dynamic" ]; then DATASET_NAME="cnn_dailymail" model_name_or_path="sysresearch101/t5-large-finetuned-xsum-cnn" - approach="PostTrainingDynamic" + approach="dynamic" elif [ "${topology}" == "flan_t5_large_samsum_dynamic" ]; then DATASET_NAME="samsum" model_name_or_path="stacked-summaries/flan-t5-large-stacked-samsum-1024" - approach="PostTrainingDynamic" + approach="dynamic" extra_cmd=$extra_cmd" --perf_tol 0.03" elif [ "${topology}" == "flan_t5_large_samsum_static" ]; then DATASET_NAME="samsum" model_name_or_path="stacked-summaries/flan-t5-large-stacked-samsum-1024" - approach="PostTrainingStatic" + approach="static" extra_cmd=$extra_cmd" --perf_tol 0.03" else echo "unsupported topology: ${topology}" diff --git a/examples/huggingface/pytorch/text-classification/distillation_for_quantization/run_glue.py b/examples/huggingface/pytorch/text-classification/distillation_for_quantization/run_glue.py index 831ecb734ea..2524d5e1ced 100644 --- a/examples/huggingface/pytorch/text-classification/distillation_for_quantization/run_glue.py +++ b/examples/huggingface/pytorch/text-classification/distillation_for_quantization/run_glue.py @@ -31,15 +31,15 @@ from datasets import load_dataset, load_metric from intel_extension_for_transformers.transformers import ( metrics, - PrunerConfig, - PruningConfig, - DistillationConfig, - QuantizationConfig, OptimizedModel, objectives ) +from neural_compressor.config import ( + DistillationConfig, + IntermediateLayersKnowledgeDistillationLossConfig, + QuantizationAwareTrainingConfig, +) from intel_extension_for_transformers.transformers.trainer import NLPTrainer -from intel_extension_for_transformers.transformers.distillation import Criterion from torch.utils.data import DataLoader from tqdm.auto import tqdm from transformers import ( @@ -543,29 +543,21 @@ def compute_metrics(p: EvalPrediction): tune_metric = metrics.Metric( name=metric_name, is_relative=optim_args.is_relative, criterion=optim_args.perf_tol ) + trainer.metrics = tune_metric + layer_mappings = [[[f"bert.encoder.layer.{i}", "0"]] for i in range(12)] +\ [[[f"bert.encoder.layer.{i}.attention", "1"]] for i in range(12)] +\ [[["classifier"]]] - distillation_conf = DistillationConfig( - framework="pytorch_fx", metrics=tune_metric, - criterion=Criterion( - name="IntermediateLayersLoss", + criterion_conf = IntermediateLayersKnowledgeDistillationLossConfig( layer_mappings=layer_mappings, loss_types=["MSE"] * len(layer_mappings), loss_weight_ratio=[1.0 / len(layer_mappings)] * len(layer_mappings), add_origin_loss=True - ) - ) - - objective = objectives.performance - quantization_conf = QuantizationConfig( - approach="QuantizationAwareTraining", - max_trials=600, - metrics=[tune_metric], - objectives=[objective] ) + distillation_conf = DistillationConfig(teacher_model=teacher_model, criterion=criterion_conf) + quantization_conf = QuantizationAwareTrainingConfig() conf_list = [distillation_conf, quantization_conf] - model = trainer.orchestrate_optimizations(config_list=conf_list, teacher_model=teacher_model) + model = trainer.orchestrate_optimizations(config_list=conf_list) if optim_args.benchmark or optim_args.accuracy_only: # Load the model obtained after Intel Neural Compressor (INC) quantization diff --git a/examples/huggingface/pytorch/text-classification/orchestrate_optimizations/README.md b/examples/huggingface/pytorch/text-classification/orchestrate_optimizations/README.md index afd199d00c4..461e84d3dd7 100644 --- a/examples/huggingface/pytorch/text-classification/orchestrate_optimizations/README.md +++ b/examples/huggingface/pytorch/text-classification/orchestrate_optimizations/README.md @@ -17,7 +17,7 @@ python run_glue.py \ --model_name_or_path Intel/distilbert-base-uncased-sparse-90-unstructured-pruneofa \ --teacher_model distilbert-base-uncased-finetuned-sst-2-english \ --task_name sst2 \ - --quantization_approach QuantizationAwareTraining \ + --quantization_approach qat \ --do_train \ --do_eval \ --orchestrate_optimization \ @@ -37,7 +37,7 @@ python -m torch.distributed.launch --master_addr= --nproc_per_no --model_name_or_path Intel/distilbert-base-uncased-sparse-90-unstructured-pruneofa \ --teacher_model distilbert-base-uncased-finetuned-sst-2-english \ --task_name sst2 \ - --quantization_approach QuantizationAwareTraining \ + --quantization_approach qat \ --do_train \ --do_eval \ --orchestrate_optimization \ diff --git a/examples/huggingface/pytorch/text-classification/orchestrate_optimizations/run_glue.py b/examples/huggingface/pytorch/text-classification/orchestrate_optimizations/run_glue.py index 26f3cd535b7..89c0f104064 100644 --- a/examples/huggingface/pytorch/text-classification/orchestrate_optimizations/run_glue.py +++ b/examples/huggingface/pytorch/text-classification/orchestrate_optimizations/run_glue.py @@ -30,13 +30,15 @@ from datasets import load_dataset, load_metric from intel_extension_for_transformers.transformers import ( metrics, - PrunerConfig, - PruningConfig, - DistillationConfig, - QuantizationConfig, OptimizedModel, objectives ) +from neural_compressor.config import ( + WeightPruningConfig, + DistillationConfig, + KnowledgeDistillationLossConfig, + QuantizationAwareTrainingConfig, +) from intel_extension_for_transformers.transformers.trainer import NLPTrainer from torch.utils.data import DataLoader from tqdm.auto import tqdm @@ -211,7 +213,7 @@ class OptimizationArguments: metadata={"help": "Whether or not to apply prune."}, ) pruning_approach: Optional[str] = field( - default="BasicMagnitude", + default="magnitude", metadata={"help": "Pruning approach. Supported approach is basic_magnite."}, ) target_sparsity_ratio: Optional[float] = field( @@ -231,9 +233,9 @@ class OptimizationArguments: metadata={"help": "Whether or not to apply quantization."}, ) quantization_approach: Optional[str] = field( - default="PostTrainingStatic", - metadata={"help": "Quantization approach. Supported approach are PostTrainingStatic, " - "PostTrainingDynamic and QuantizationAwareTraining."}, + default="static", + metadata={"help": "Quantization approach. Supported approach are static, " + "dynamic and qat."}, ) metric_name: Optional[str] = field( default=None, @@ -637,10 +639,6 @@ def get_logits(teacher_model, train_dataset, teacher_train_dataset): logger.info("***** Number of student model parameters: {:.2f}M *****".format(\ para_counter(model)/10**6)) - # Trace model - from neural_compressor.adaptor.torch_utils.symbolic_trace import symbolic_trace - model = symbolic_trace(model, optim_args.quantization_approach=="QuantizationAwareTraining") - # Initialize our Trainer trainer = NLPTrainer( model=model, @@ -673,23 +671,20 @@ def get_logits(teacher_model, train_dataset, teacher_train_dataset): tune_metric = metrics.Metric( name=metric_name, is_relative=optim_args.is_relative, criterion=optim_args.perf_tol ) - prune_type = 'PatternLock' \ + prune_type = 'pattern_lock' \ if optim_args.pruning_approach else optim_args.pruning_approach target_sparsity_ratio = optim_args.target_sparsity_ratio \ if optim_args.target_sparsity_ratio else None - pruner_config = PrunerConfig(prune_type=prune_type, target_sparsity_ratio=target_sparsity_ratio) - pruning_conf = PruningConfig(framework="pytorch_fx",pruner_config=[pruner_config], metrics=tune_metric) - distillation_conf = DistillationConfig(framework="pytorch_fx", metrics=tune_metric) - - objective = objectives.performance - quantization_conf = QuantizationConfig( - approach=optim_args.quantization_approach, - max_trials=600, - metrics=[tune_metric], - objectives=[objective] - ) + trainer.metrics = tune_metric + pruning_conf = WeightPruningConfig([{"start_step": 0, "end_step": 2}], + target_sparsity=target_sparsity_ratio, + pruning_scope="local", + pruning_type=prune_type) + distillation_criterion = KnowledgeDistillationLossConfig(loss_types=["CE", "KL"]) + distillation_conf = DistillationConfig(teacher_model=teacher_model, criterion=distillation_criterion) + quantization_conf = QuantizationAwareTrainingConfig() conf_list = [pruning_conf, distillation_conf, quantization_conf] - model = trainer.orchestrate_optimizations(config_list=conf_list, teacher_model=teacher_model) + model = trainer.orchestrate_optimizations(config_list=conf_list) if optim_args.benchmark or optim_args.accuracy_only: # Load the model obtained after Intel Neural Compressor (INC) quantization diff --git a/examples/huggingface/pytorch/text-classification/pruning/run_glue.py b/examples/huggingface/pytorch/text-classification/pruning/run_glue.py index 9b0f5edd087..bee4f20a070 100644 --- a/examples/huggingface/pytorch/text-classification/pruning/run_glue.py +++ b/examples/huggingface/pytorch/text-classification/pruning/run_glue.py @@ -29,9 +29,8 @@ from intel_extension_for_transformers.transformers import ( metrics, OptimizedModel, - PrunerConfig, - PruningConfig, ) +from neural_compressor.config import WeightPruningConfig from intel_extension_for_transformers.transformers.trainer import NLPTrainer from transformers import ( AutoConfig, @@ -47,7 +46,6 @@ ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version -from transformers.utils.fx import symbolic_trace from typing import Optional @@ -204,8 +202,8 @@ class OptimizationArguments: metadata={"help": "Whether or not to apply prune."}, ) pruning_approach: Optional[str] = field( - default="BasicMagnitude", - metadata={"help": "Pruning approach. Supported approach is basic_magnite."}, + default="magnitude", + metadata={"help": "Pruning approach. Supported approach is magnite."}, ) target_sparsity_ratio: Optional[float] = field( default=None, @@ -521,13 +519,15 @@ def compute_metrics(p: EvalPrediction): raise ValueError("do_train must be set to True for pruning.") tune_metric = metrics.Metric(name=metric_name) - prune_type = 'BasicMagnitude' \ - if optim_args.pruning_approach else optim_args.pruning_approach + prune_type = optim_args.pruning_approach \ + if optim_args.pruning_approach else 'pattern_lock' target_sparsity_ratio = optim_args.target_sparsity_ratio \ if optim_args.target_sparsity_ratio else None - pruner_config = PrunerConfig(prune_type=prune_type, target_sparsity_ratio=target_sparsity_ratio) - pruning_conf = PruningConfig(pruner_config=pruner_config, metrics=tune_metric) - + trainer.metrics = tune_metric + pruning_conf = WeightPruningConfig([{"start_step": 0, "end_step": 2}], + target_sparsity=target_sparsity_ratio, + pruning_scope="local", + pruning_type=prune_type) model = trainer.prune(pruning_config=pruning_conf) trainer.save_model(training_args.output_dir) diff --git a/examples/huggingface/pytorch/text-classification/quantization/README.md b/examples/huggingface/pytorch/text-classification/quantization/README.md index d864de0263e..c851b4c165b 100644 --- a/examples/huggingface/pytorch/text-classification/quantization/README.md +++ b/examples/huggingface/pytorch/text-classification/quantization/README.md @@ -1,6 +1,6 @@ Step-by-Step​ ============ -The script `run_glue.py` provides three quantization approaches (PostTrainingStatic, PostTrainingStatic and QuantizationAwareTraining) based on [Intel® Neural Compressor](https://github.com/intel/neural-compressor). +The script `run_glue.py` provides three quantization approaches (dynamic, static and qat) based on [Intel® Neural Compressor](https://github.com/intel/neural-compressor). # Prerequisite​ ## 1. Create Environment​ @@ -8,9 +8,7 @@ Recommend python 3.9 or higher version. ```shell pip install intel-extension-for-transformers pip install -r requirements.txt -pip install transformers==4.34.1 ``` ->**Note**: Please use transformers no higher than 4.34.1 # Run @@ -23,7 +21,7 @@ python run_glue.py \ --model_name_or_path distilbert-base-uncased-finetuned-sst-2-english \ --task_name sst2 \ --tune \ - --quantization_approach PostTrainingStatic \ + --quantization_approach static \ --do_train \ --do_eval \ --output_dir ./saved_result \ @@ -43,7 +41,7 @@ python run_glue.py \ ``` **Notes**: - - Choice of `quantization_approach` can be `PostTrainingDynamic`, `PostTrainingStatic`, and `QuantizationAwareTraining`. + - Choice of `quantization_approach` can be `dynamic`, `static`, and `qat`. - Choice of `task_name` can be `cola`, `sst2`, `mrpc`, `stsb`, `qqp`, `mnli`, `qnli`, `rte`, and `wnli`. ## 2. Distributed Data Parallel Support @@ -66,16 +64,17 @@ python -m torch.distributed.launch --master_addr= --nproc_per_no --model_name_or_path distilbert-base-uncased-finetuned-sst-2-english \ --task_name sst2 \ --tune \ - --quantization_approach QuantizationAwareTraining \ + --quantization_approach qat \ --do_train \ --do_eval \ --output_dir ./saved_result \ - --overwrite_output_dir + --overwrite_output_dir \ + --save_safetensors False ``` ## 3. Validated model list -|Task|Pretrained model|PostTrainingDynamic | PostTrainingStatic | QuantizationAwareTraining +|Task|Pretrained model| dynamic| static | qat |---|------------------------------------|---|---|--- |MRPC|textattack/bert-base-uncased-MRPC| ✅| ✅| ✅ |MRPC|textattack/albert-base-v2-MRPC| ✅| ✅| N/A @@ -107,7 +106,7 @@ python -m torch.distributed.launch --master_addr= --nproc_per_no bash run_benchmark.sh --topology=[topology] --config=./saved_int8 --mode=benchmark --int8=true ``` -### QuantizationAwareTraining +### qat - Topology: - BERT-MRPC: bert_base_mrpc diff --git a/examples/huggingface/pytorch/text-classification/quantization/ptq/run_tuning.sh b/examples/huggingface/pytorch/text-classification/quantization/ptq/run_tuning.sh index cc6a24cf325..6719dc50825 100755 --- a/examples/huggingface/pytorch/text-classification/quantization/ptq/run_tuning.sh +++ b/examples/huggingface/pytorch/text-classification/quantization/ptq/run_tuning.sh @@ -16,7 +16,7 @@ function init_params { batch_size=8 MAX_SEQ_LENGTH=128 model_type="bert" - approach="PostTrainingStatic" + approach="static" script="../run_glue.py" for var in "$@" do @@ -46,88 +46,88 @@ function init_params { function run_tuning { if [ "${topology}" = "bert_base_mrpc_static" ]; then model_name_or_path="textattack/bert-base-uncased-MRPC" - approach="PostTrainingStatic" + approach="static" extra_cmd=$extra_cmd" --task_name mrpc" elif [ "${topology}" = "bert_base_mrpc_dynamic" ]; then extra_cmd=$extra_cmd" --task_name mrpc" model_name_or_path="textattack/bert-base-uncased-MRPC" - approach="PostTrainingDynamic" + approach="dynamic" elif [ "${topology}" = "bert_base_SST-2_static" ]; then extra_cmd=$extra_cmd" --task_name sst2" model_name_or_path="echarlaix/bert-base-uncased-sst2-acc91.1-d37-hybrid" - approach="PostTrainingStatic" + approach="static" elif [ "${topology}" = "bert_base_SST-2_dynamic" ]; then extra_cmd=$extra_cmd" --task_name sst2" model_name_or_path="echarlaix/bert-base-uncased-sst2-acc91.1-d37-hybrid" - approach="PostTrainingDynamic" + approach="dynamic" elif [ "${topology}" = "bert_base_CoLA_static" ]; then extra_cmd=$extra_cmd" --task_name cola" model_name_or_path="textattack/bert-base-uncased-CoLA" - approach="PostTrainingStatic" + approach="static" elif [ "${topology}" = "bert_base_STS-B_static" ]; then extra_cmd=$extra_cmd" --task_name stsb" model_name_or_path="Contrastive-Tension/BERT-Base-CT-STSb" - approach="PostTrainingStatic" + approach="static" elif [ "${topology}" = "bert_base_RTE_static" ]; then extra_cmd=$extra_cmd" --task_name rte" model_name_or_path="textattack/bert-base-uncased-RTE" - approach="PostTrainingStatic" + approach="static" elif [ "${topology}" = "bert_large_RTE_static" ]; then extra_cmd=$extra_cmd" --task_name rte" model_name_or_path="yoshitomo-matsubara/bert-large-uncased-rte" - approach="PostTrainingStatic" + approach="static" elif [ "${topology}" = "bert_large_CoLA_static" ]; then extra_cmd=$extra_cmd" --task_name cola" model_name_or_path="yoshitomo-matsubara/bert-large-uncased-cola" - approach="PostTrainingStatic" + approach="static" elif [ "${topology}" = "bert_large_MRPC_static" ]; then extra_cmd=$extra_cmd" --task_name mrpc" model_name_or_path="yoshitomo-matsubara/bert-large-uncased-mrpc" - approach="PostTrainingStatic" + approach="static" elif [ "${topology}" = "bert_large_QNLI_static" ]; then extra_cmd=$extra_cmd" --task_name qnli" model_name_or_path="yoshitomo-matsubara/bert-large-uncased-qnli" - approach="PostTrainingStatic" + approach="static" elif [ "${topology}" = "camembert_base_XNLI_dynamic" ]; then model_name_or_path="BaptisteDoyen/camembert-base-xnli" - approach="PostTrainingDynamic" + approach="dynamic" extra_cmd=$extra_cmd" --dataset_name xnli --dataset_config_name fr" elif [ "${topology}" = "xlnet_base_SST-2_static" ]; then extra_cmd=$extra_cmd" --task_name sst2" model_name_or_path="textattack/xlnet-base-cased-SST-2" - approach="PostTrainingStatic" + approach="static" elif [ "${topology}" = "funnel_small_MRPC_static" ]; then extra_cmd=$extra_cmd" --task_name mrpc" model_name_or_path="funnel-transformer/small-base" - approach="PostTrainingStatic" + approach="static" elif [ "${topology}" = "roberta_base_SST-2_dynamic" ]; then extra_cmd=$extra_cmd" --task_name sst2" model_name_or_path="textattack/roberta-base-SST-2" - approach="PostTrainingDynamic" + approach="dynamic" elif [ "${topology}" = "distillbert_base_SST-2_static" ]; then extra_cmd=$extra_cmd" --task_name sst2" model_name_or_path="distilbert-base-uncased-finetuned-sst-2-english" - approach="PostTrainingStatic" + approach="static" elif [ "${topology}" = "distillbert_base_SST-2_dynamic" ]; then extra_cmd=$extra_cmd" --task_name sst2" model_name_or_path="distilbert-base-uncased-finetuned-sst-2-english" - approach="PostTrainingDynamic" + approach="dynamic" elif [ "${topology}" = "albert_base_MRPC_static" ]; then extra_cmd=$extra_cmd" --task_name mrpc" model_name_or_path="textattack/albert-base-v2-MRPC" - approach="PostTrainingStatic" + approach="static" elif [ "${topology}" = "albert_base_MRPC_dynamic" ]; then extra_cmd=$extra_cmd" --task_name mrpc" model_name_or_path="textattack/albert-base-v2-MRPC" - approach="PostTrainingDynamic" + approach="dynamic" elif [ "${topology}" = "xlm_roberta_large_XNLI_dynamic" ]; then extra_cmd=$extra_cmd" --dataset_name xnli --dataset_config_name en" model_name_or_path="joeddav/xlm-roberta-large-xnli" - approach="PostTrainingDynamic" + approach="dynamic" elif [ "${topology}" = "bert_base_SST-2_static_no_trainer" ]; then extra_cmd=" --task_name sst2" model_name_or_path="echarlaix/bert-base-uncased-sst2-acc91.1-d37-hybrid" - approach="PostTrainingStatic" + approach="static" script="../run_glue_no_trainer.py" fi diff --git a/examples/huggingface/pytorch/text-classification/quantization/qat/run_tuning.sh b/examples/huggingface/pytorch/text-classification/quantization/qat/run_tuning.sh index 3d5e71b0212..28aba1b27b7 100644 --- a/examples/huggingface/pytorch/text-classification/quantization/qat/run_tuning.sh +++ b/examples/huggingface/pytorch/text-classification/quantization/qat/run_tuning.sh @@ -18,7 +18,7 @@ function init_params { batch_size=8 MAX_SEQ_LENGTH=128 model_type="bert" - approach="PostTrainingStatic" + approach="static" for var in "$@" do case $var in @@ -49,7 +49,7 @@ function run_tuning { TASK_NAME="mrpc" model_name_or_path="bert-base-uncased" model_type="bert" - approach="QuantizationAwareTraining" + approach="qat" extra_cmd=$extra_cmd" --learning_rate 1e-5 \ --num_train_epochs 6 \ --eval_steps 100 \ @@ -59,7 +59,8 @@ function run_tuning { --evaluation_strategy steps \ --save_strategy steps \ --metric_for_best_model accuracy \ - --save_total_limit 1" + --save_total_limit 1 \ + --save_safetensors False" fi diff --git a/examples/huggingface/pytorch/text-classification/quantization/run_glue.py b/examples/huggingface/pytorch/text-classification/quantization/run_glue.py index 0c7df87abea..7a915a56a8a 100644 --- a/examples/huggingface/pytorch/text-classification/quantization/run_glue.py +++ b/examples/huggingface/pytorch/text-classification/quantization/run_glue.py @@ -26,8 +26,14 @@ import transformers from dataclasses import dataclass, field from datasets import load_dataset, load_metric -from intel_extension_for_transformers.transformers import metrics, objectives, OptimizedModel, QuantizationConfig +from intel_extension_for_transformers.transformers import OptimizedModel, metrics, objectives from intel_extension_for_transformers.transformers.trainer import NLPTrainer +from neural_compressor.config import ( + PostTrainingQuantConfig, + QuantizationAwareTrainingConfig, + TuningCriterion, + AccuracyCriterion +) from transformers import ( AutoConfig, AutoModelForSequenceClassification, @@ -198,9 +204,9 @@ class OptimizationArguments: metadata={"help": "Whether or not to apply quantization."}, ) quantization_approach: Optional[str] = field( - default="PostTrainingStatic", - metadata={"help": "Quantization approach. Supported approach are PostTrainingStatic, " - "PostTrainingDynamic and QuantizationAwareTraining."}, + default="static", + metadata={"help": "Quantization approach. Supported approach are static, " + "dynamic and qat."}, ) metric_name: Optional[str] = field( default=None, @@ -534,28 +540,37 @@ def compute_metrics(p: EvalPrediction): if not training_args.do_eval: raise ValueError("do_eval must be set to True for quantization.") - if optim_args.quantization_approach != "PostTrainingDynamic": + if optim_args.quantization_approach != "dynamic": if not training_args.do_train: raise ValueError( "do_train must be set to True for static and aware training quantization." ) - if optim_args.quantization_approach == "QuantizationAwareTraining": - early_stopping_patience = 6 - early_stopping_threshold = 0.001 # optional - trainer.add_callback(transformers.EarlyStoppingCallback(early_stopping_patience, - early_stopping_threshold)) - tune_metric = metrics.Metric( name=metric_name, is_relative=optim_args.is_relative, criterion=optim_args.perf_tol ) + trainer.metrics = tune_metric objective = objectives.performance - quantization_config = QuantizationConfig( - approach=optim_args.quantization_approach, - max_trials=600, - metrics=[tune_metric], - objectives=[objective], - sampling_size = len(train_dataset)//20 + tuning_criterion = TuningCriterion(max_trials=600, objective=[objective.name]) + accuracy_criterion = AccuracyCriterion( + higher_is_better=True, # optional. + criterion="relative" if optim_args.is_relative else "absolute", # optional. Available values are "relative" and "absolute". + tolerable_loss=optim_args.perf_tol, # optional. ) + if optim_args.quantization_approach != "qat": + quantization_config = PostTrainingQuantConfig( + approach=optim_args.quantization_approach, + tuning_criterion=tuning_criterion, + accuracy_criterion=accuracy_criterion + ) + else: + quantization_config = QuantizationAwareTrainingConfig( + tuning_criterion=tuning_criterion, + accuracy_criterion=accuracy_criterion + ) + early_stopping_patience = 2 + early_stopping_threshold = 0.001 # optional + trainer.add_callback(transformers.EarlyStoppingCallback(early_stopping_patience, \ + early_stopping_threshold)) model = trainer.quantize(quant_config=quantization_config) if optim_args.benchmark_only: diff --git a/examples/huggingface/pytorch/text-classification/quantization/run_glue_no_trainer.py b/examples/huggingface/pytorch/text-classification/quantization/run_glue_no_trainer.py deleted file mode 100644 index 00006ac0526..00000000000 --- a/examples/huggingface/pytorch/text-classification/quantization/run_glue_no_trainer.py +++ /dev/null @@ -1,563 +0,0 @@ -# coding=utf-8 -# Copyright 2021 The HuggingFace Inc. team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" Finetuning a 🤗 Transformers model for sequence classification on GLUE.""" -import argparse -import logging -import math -import os -import random -import time -from pathlib import Path - -import pandas as pd # to read in different data - -import datasets -from datasets import load_dataset, load_metric -from torch.utils.data import DataLoader - -import transformers -from accelerate import Accelerator -from huggingface_hub import Repository -from intel_extension_for_transformers.transformers import (metrics, NoTrainerOptimizer, objectives, OptimizedModel, - QuantizationConfig) -from transformers import ( - AdamW, - AutoConfig, - AutoModelForSequenceClassification, - AutoTokenizer, - DataCollatorWithPadding, - PretrainedConfig, - SchedulerType, - default_data_collator, - get_scheduler, - set_seed, -) -from transformers.file_utils import get_full_repo_name -from transformers.utils.versions import require_version - -logger = logging.getLogger(__name__) - -require_version("datasets>=1.8.0", - "To fix: pip install -r examples/pytorch/text-classification/requirements.txt") - -task_to_keys = { - "cola": ("sentence", None), - "mnli": ("premise", "hypothesis"), - "mrpc": ("sentence1", "sentence2"), - "qnli": ("question", "sentence"), - "qqp": ("question1", "question2"), - "rte": ("sentence1", "sentence2"), - "sst2": ("sentence", None), - "stsb": ("sentence1", "sentence2"), - "wnli": ("sentence1", "sentence2"), -} - - -def parse_args(): - parser = argparse.ArgumentParser( - description="Finetune a transformers model on a text classification task") - parser.add_argument( - "--task_name", - type=str, - default=None, - help="The name of the glue task to train on.", - choices=list(task_to_keys.keys()), - ) - parser.add_argument("--train_file", - type=str, - default=None, - help="A csv or a json file containing the training data.") - parser.add_argument("--validation_file", - type=str, - default=None, - help="A csv or a json file containing the validation data.") - parser.add_argument( - "--max_length", - type=int, - default=128, - help= - ("The maximum total input sequence length after tokenization. Sequences longer than this will be truncated," - " sequences shorter will be padded if `--pad_to_max_lengh` is passed."), - ) - parser.add_argument( - "--pad_to_max_length", - action="store_true", - help="If passed, pad all samples to `max_length`. Otherwise, dynamic padding is used.", - ) - parser.add_argument( - "--model_name_or_path", - type=str, - help="Path to pretrained model or model identifier from huggingface.co/models.", - required=True, - ) - parser.add_argument( - "--use_slow_tokenizer", - action="store_true", - help="If passed, will use a slow tokenizer (not backed by the 🤗 Tokenizers library).", - ) - parser.add_argument( - "--per_device_train_batch_size", - type=int, - default=8, - help="Batch size (per device) for the training dataloader.", - ) - parser.add_argument( - "--per_device_eval_batch_size", - type=int, - default=8, - help="Batch size (per device) for the evaluation dataloader.", - ) - parser.add_argument( - "--learning_rate", - type=float, - default=5e-5, - help="Initial learning rate (after the potential warmup period) to use.", - ) - parser.add_argument("--weight_decay", type=float, default=0.0, help="Weight decay to use.") - parser.add_argument("--num_train_epochs", - type=int, - default=3, - help="Total number of training epochs to perform.") - parser.add_argument( - "--max_train_steps", - type=int, - default=None, - help="Total number of training steps to perform. If provided, overrides num_train_epochs.", - ) - parser.add_argument( - "--gradient_accumulation_steps", - type=int, - default=1, - help="Number of updates steps to accumulate before performing a backward/update pass.", - ) - parser.add_argument( - "--lr_scheduler_type", - type=SchedulerType, - default="linear", - help="The scheduler type to use.", - choices=[ - "linear", "cosine", "cosine_with_restarts", "polynomial", "constant", - "constant_with_warmup" - ], - ) - parser.add_argument("--num_warmup_steps", - type=int, - default=0, - help="Number of steps for the warmup in the lr scheduler.") - parser.add_argument("--output_dir", - type=str, - default=None, - help="Where to store the final model.") - parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") - parser.add_argument("--push_to_hub", - action="store_true", - help="Whether or not to push the model to the Hub.") - parser.add_argument( - "--hub_model_id", - type=str, - help="The name of the repository to keep in sync with the local `output_dir`.") - parser.add_argument("--hub_token", type=str, help="The token to use to push to the Model Hub.") - parser.add_argument("--tune", action="store_true", help="tune a best model with Intel Extension for Transformers.") - parser.add_argument("--quantization_approach", - type=str, - default="PostTrainingStatic", - help="Quantization approach. Supported approach are PostTrainingStatic, " - "PostTrainingDynamic and QuantizationAwareTraining.") - parser.add_argument("--metric_name", - type=str, - default=None, - help="Metric name used for the tuning strategy.") - parser.add_argument("--is_relative", - type=bool, - default=True, - help="Metric tolerance model, expected to be relative or absolute.") - parser.add_argument("--perf_tol", - type=float, - default=0.01, - help="Performance tolerance when optimizing the model.") - parser.add_argument("--benchmark", action="store_true", help="run benchmark.") - parser.add_argument("--int8", action="store_true", help="run benchmark with int8 model.") - parser.add_argument("--accuracy_only", - action="store_true", - help="Whether to only test accuracy for model tuned by Neural Compressor.") - parser.add_argument('-i', "--iter", default=0, type=int, help='For accuracy measurement only.') - parser.add_argument('-w', - "--warmup_iter", - default=1, - type=int, - help='For benchmark measurement only.') - args = parser.parse_args() - - # Sanity checks - if args.task_name is None and args.train_file is None and args.validation_file is None: - raise ValueError("Need either a task name or a training/validation file.") - else: - if args.train_file is not None: - extension = args.train_file.split(".")[-1] - assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." - if args.validation_file is not None: - extension = args.validation_file.split(".")[-1] - assert extension in ["csv", - "json"], "`validation_file` should be a csv or a json file." - - if args.push_to_hub: - assert args.output_dir is not None, "Need an `output_dir` to create a repo when `--push_to_hub` is passed." - - return args - - -def eval_func(args, model, accelerator, eval_dataloader, metric): - # Evaluation - batch_time = AverageMeter('Time', ':6.3f') - is_regression = args.task_name == "stsb" - model.eval() - for step, batch in enumerate(eval_dataloader): - if step >= args.warmup_iter: - start = time.time() - # soft labels - outputs = model(**batch) - # measure elapsed time - if step >= args.warmup_iter: - batch_time.update(time.time() - start) - predictions = outputs.logits.argmax( - dim=-1) if not is_regression else outputs.logits.squeeze() - metric.add_batch( - predictions=accelerator.gather(predictions), - references=accelerator.gather(batch["labels"]), - ) - eval_metric = metric.compute() - batch_size = args.per_device_eval_batch_size - print('Batch size = {}'.format(batch_size)) - print('Latency: %.3f ms' % (batch_time.avg / batch_size * 1000)) - print('Throughput: %.3f images/sec' % (batch_size / batch_time.avg)) - logger.info(f"{eval_metric}") - return eval_metric - - -def main(): - # read in the arguments - args = parse_args() - - # Initialize the accelerator. We will let the accelerator handle device placement for us in this example. - accelerator = Accelerator() - # Make one log on every process with the configuration for debugging. - logging.basicConfig( - format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", - datefmt="%m/%d/%Y %H:%M:%S", - level=logging.INFO, - ) - logger.info(accelerator.state) - - # Setup logging, we only want one process per machine to log things on the screen. - # accelerator.is_local_main_process is only True for one process per machine. - logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR) - if accelerator.is_local_main_process: - datasets.utils.logging.set_verbosity_warning() - transformers.utils.logging.set_verbosity_info() - else: - datasets.utils.logging.set_verbosity_error() - transformers.utils.logging.set_verbosity_error() - - # If passed along, set the training seed now. - if args.seed is not None: - set_seed(args.seed) - - # Handle the repository creation - if accelerator.is_main_process: - if args.push_to_hub: - if args.hub_model_id is None: - repo_name = get_full_repo_name(Path(args.output_dir).name, token=args.hub_token) - else: - repo_name = args.hub_model_id - repo = Repository(args.output_dir, clone_from=repo_name) - elif args.output_dir is not None: - os.makedirs(args.output_dir, exist_ok=True) - accelerator.wait_for_everyone() - - # Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below) - # or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub). - - # For CSV/JSON files, this script will use as labels the column called 'label' and as pair of sentences the - # sentences in columns called 'sentence1' and 'sentence2' if such column exists or the first two columns not named - # label if at least two columns are provided. - - # If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this - # single column. You can easily tweak this behavior (see below) - - # In distributed training, the load_dataset function guarantee that only one local process can concurrently - # download the dataset. - if args.task_name is not None: - # Downloading and loading a dataset from the hub. - raw_datasets = load_dataset("glue", args.task_name) - ''' - 06/25/2022 - Distilled-sparse training for bert_mini, on sst2 - pre-load an augmented dataset - ''' - else: - # Loading the dataset from local csv or json file. - data_files = {} - if args.train_file is not None: - data_files["train"] = args.train_file - if args.validation_file is not None: - data_files["validation"] = args.validation_file - extension = (args.train_file - if args.train_file is not None else args.valid_file).split(".")[-1] - raw_datasets = load_dataset(extension, data_files=data_files) - # See more about loading any type of standard or custom dataset at - # https://huggingface.co/docs/datasets/loading_datasets.html. - - if args.task_name is not None: - is_regression = args.task_name == "stsb" - if not is_regression: - label_list = raw_datasets["train"].features["label"].names - num_labels = len(label_list) - else: - num_labels = 1 - else: - # Trying to have good defaults here, don't hesitate to tweak to your needs. - is_regression = raw_datasets["train"].features["label"].dtype in ["float32", "float64"] - if is_regression: - num_labels = 1 - else: - # A useful fast method: - # https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.unique - label_list = raw_datasets["train"].unique("label") - label_list.sort() # Let's sort it for determinism - num_labels = len(label_list) - - # Load pretrained model and tokenizer - # - # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently - # download model & vocab. - config = AutoConfig.from_pretrained(args.model_name_or_path, - num_labels=num_labels, - finetuning_task=args.task_name) - tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path, - use_fast=not args.use_slow_tokenizer) - if args.int8: - # Load the model obtained after Intel Neural Compressor (INC) quantization - model = OptimizedModel.from_pretrained( - args.model_name_or_path, - from_tf=bool(".ckpt" in args.model_name_or_path), - config=config - ) - else: - model = AutoModelForSequenceClassification.from_pretrained( - args.model_name_or_path, - from_tf=bool(".ckpt" in args.model_name_or_path), - config=config - ) - - # Preprocessing the datasets - if args.task_name is not None: - sentence1_key, sentence2_key = task_to_keys[args.task_name] - else: - # Again, we try to have some nice defaults but don't hesitate to tweak to your use case. - non_label_column_names = [ - name for name in raw_datasets["train"].column_names if name != "label" - ] - if "sentence1" in non_label_column_names and "sentence2" in non_label_column_names: - sentence1_key, sentence2_key = "sentence1", "sentence2" - else: - if len(non_label_column_names) >= 2: - sentence1_key, sentence2_key = non_label_column_names[:2] - else: - sentence1_key, sentence2_key = non_label_column_names[0], None - - # Some models have set the order of the labels to use, so let's make sure we do use it. - label_to_id = None - if (model.config.label2id != PretrainedConfig(num_labels=num_labels).label2id - and args.task_name is not None and not is_regression): - # Some have all caps in their config, some don't. - label_name_to_id = {k.lower(): v for k, v in model.config.label2id.items()} - if list(sorted(label_name_to_id.keys())) == list(sorted(label_list)): - logger.info( - f"The configuration of the model provided the following label correspondence: {label_name_to_id}. " - "Using it!") - else: - logger.warning( - "Your model seems to have been trained with labels, but they don't match the dataset: ", - f"model labels: {list(sorted(label_name_to_id.keys()))}, dataset labels: {list(sorted(label_list))}." - "\nIgnoring the model labels as a result.", - ) - elif args.task_name is None: - label_to_id = {v: i for i, v in enumerate(label_list)} - - if label_to_id is not None: - model.config.label2id = label_to_id - model.config.id2label = {id: label for label, id in config.label2id.items()} - elif args.task_name is not None and not is_regression: - model.config.label2id = {l: i for i, l in enumerate(label_list)} - model.config.id2label = {id: label for label, id in config.label2id.items()} - - padding = "max_length" if args.pad_to_max_length else False - - def preprocess_function(examples): - # Tokenize the texts - texts = ((examples[sentence1_key], ) if sentence2_key is None else - (examples[sentence1_key], examples[sentence2_key])) - result = tokenizer(*texts, padding=padding, max_length=args.max_length, truncation=True) - - if "label" in examples: - if label_to_id is not None: - # Map labels to IDs (not necessary for GLUE tasks) - result["labels"] = [label_to_id[l] for l in examples["label"]] - else: - # In all cases, rename the column to labels because the model will expect that. - result["labels"] = examples["label"] - - return result - - with accelerator.main_process_first(): - - # original process - processed_datasets = raw_datasets.map( - preprocess_function, - batched=True, - remove_columns=raw_datasets["train"].column_names, - desc="Running tokenizer on dataset", - ) - train_dataset = processed_datasets["train"] - eval_dataset = processed_datasets["validation_matched" if args.task_name == - "mnli" else "validation"] - #if use_augmented: - # test_dataset = processed_datasets["test"] - # Log a few random samples from the training set: - for index in random.sample(range(len(train_dataset)), 3): - logger.info(f"Sample {index} of the training set: {train_dataset[index]}.") - - # DataLoaders creation: - if args.pad_to_max_length: - # If padding was already done ot max length, we use the default data collator that will just convert everything - # to tensors. - data_collator = default_data_collator - else: - # Otherwise, `DataCollatorWithPadding` will apply dynamic padding for us (by padding to the maximum length of - # the samples passed). When using mixed precision, we add `pad_to_multiple_of=8` to pad all tensors to multiple - # of 8s, which will enable the use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta). - data_collator = DataCollatorWithPadding( - tokenizer, pad_to_multiple_of=(8 if accelerator.use_fp16 else None)) - - train_dataloader = DataLoader(train_dataset, - shuffle=True, - collate_fn=data_collator, - batch_size=args.per_device_train_batch_size) - eval_dataloader = DataLoader(eval_dataset, - collate_fn=data_collator, - batch_size=args.per_device_eval_batch_size) - - # Optimizer - # Split weights in two groups, one with weight decay and the other not. - no_decay = ["bias", "LayerNorm.weight"] - optimizer_grouped_parameters = [ - { - "params": - [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], - "weight_decay": args.weight_decay, - }, - { - "params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], - "weight_decay": 0.0, - }, - ] - optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate) - - # Prepare everything with our `accelerator`. - model, optimizer = accelerator.prepare(model, optimizer) - - # Note -> the training dataloader needs to be prepared before we grab his length below (cause its length will be - # shorter in multiprocess) - - # Scheduler and math around the number of training steps. - num_update_steps_per_epoch = math.ceil( - len(train_dataloader) / args.gradient_accumulation_steps) - if args.max_train_steps is None: - args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch - else: - args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) - - lr_scheduler = get_scheduler( - name=args.lr_scheduler_type, - optimizer=optimizer, - num_warmup_steps=args.num_warmup_steps, - num_training_steps=args.max_train_steps, - ) - - # Get the metric function - if args.task_name is not None: - metric = load_metric("glue", args.task_name) - else: - metric = load_metric("accuracy") - - metric_name = (args.metric_name if args.metric_name is not None else - ("pearson" if args.task_name == "stsb" else - "matthews_correlation" if args.task_name == "cola" else "accuracy")) - - def eval_func_nc(model): - ret = eval_func(args, model, accelerator, eval_dataloader, metric) - return ret[metric_name] - - # Train! - if args.tune: - if accelerator.is_main_process: - tokenizer.save_pretrained(args.output_dir) - config.save_pretrained(args.output_dir, - is_main_process=accelerator.is_main_process, - save_function=accelerator.save) - tune_metric = metrics.Metric(name=metric_name, - is_relative=args.is_relative, - criterion=args.perf_tol) - objective = objectives.performance - q_config = QuantizationConfig(approach=args.quantization_approach, - max_trials=600, - metrics=[tune_metric], - objectives=[objective]) - quantizer = NoTrainerOptimizer(model, args.output_dir) - model = quantizer.quantize(q_config, - eval_func=eval_func_nc, - calib_dataloader=train_dataloader) - - if args.benchmark or args.accuracy_only: - results = eval_func(args, model, accelerator, eval_dataloader, metric) - print("Finally Eval {} Accuracy: {:.5f}".format(metric_name, results[metric_name])) - - -class AverageMeter(object): - """Computes and stores the average and current value""" - def __init__(self, name, fmt=':f'): - self.name = name - self.fmt = fmt - self.reset() - - def reset(self): - self.val = 0 - self.avg = 0 - self.sum = 0 - self.count = 0 - - def update(self, val, n=1): - self.val = val - self.sum += val * n - self.count += n - self.avg = self.sum / self.count - - def __str__(self): - fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})' - return fmtstr.format(**self.__dict__) - - -if __name__ == "__main__": - main() diff --git a/examples/huggingface/pytorch/text-generation/quantization/run_tuning.sh b/examples/huggingface/pytorch/text-generation/quantization/run_tuning.sh index 16eaaa3182e..7c3919a132a 100644 --- a/examples/huggingface/pytorch/text-generation/quantization/run_tuning.sh +++ b/examples/huggingface/pytorch/text-generation/quantization/run_tuning.sh @@ -16,7 +16,7 @@ function init_params { model_name_or_path="EleutherAI/gpt-j-6b" extra_cmd="" batch_size=8 - approach="PostTrainingStatic" + approach="static" script="run_generation_sq.py" alpha=0.5 weight_dtype="int4" diff --git a/examples/huggingface/pytorch/text-to-image/quantization/ptq/README.md b/examples/huggingface/pytorch/text-to-image/quantization/ptq/README.md index 345ccaba5c3..d74432313d2 100644 --- a/examples/huggingface/pytorch/text-to-image/quantization/ptq/README.md +++ b/examples/huggingface/pytorch/text-to-image/quantization/ptq/README.md @@ -22,7 +22,7 @@ pip install -r requirements.txt python run_diffusion.py \ --model_name_or_path lambdalabs/sd-pokemon-diffusers \ --tune \ - --quantization_approach PostTrainingStatic \ + --quantization_approach static \ --perf_tol 0.02 \ --output_dir /tmp/diffusion_output \ --base_images base_images \ diff --git a/examples/huggingface/pytorch/text-to-image/quantization/ptq/run_diffusion.py b/examples/huggingface/pytorch/text-to-image/quantization/ptq/run_diffusion.py index 5e2e2e62669..3f4ace9720d 100644 --- a/examples/huggingface/pytorch/text-to-image/quantization/ptq/run_diffusion.py +++ b/examples/huggingface/pytorch/text-to-image/quantization/ptq/run_diffusion.py @@ -30,7 +30,13 @@ from accelerate.utils import set_seed from diffusers import StableDiffusionPipeline -from intel_extension_for_transformers.transformers import metrics , NoTrainerOptimizer, OptimizedModel, QuantizationConfig +from intel_extension_for_transformers.transformers import metrics +from neural_compressor.config import ( + PostTrainingQuantConfig, + TuningCriterion, + AccuracyCriterion +) +from neural_compressor.quantization import fit from intel_extension_for_transformers.transformers.config import WEIGHTS_NAME from pytorch_fid import fid_score @@ -100,9 +106,9 @@ def parse_args(): parser.add_argument( "--quantization_approach", type=str, - default="PostTrainingStatic", - help="Quantization approach. Supported approach are PostTrainingStatic, " - "PostTrainingDynamic and QuantizationAwareTraining.", + default="static", + help="Quantization approach. Supported approach are static, " + "dynamic and qat.", ) parser.add_argument( "--framework", @@ -301,18 +307,28 @@ def eval_func(model): criterion=args.perf_tol, greater_is_better=False ) - quantization_config = QuantizationConfig( - approach=args.quantization_approach, - max_trials=200, - metrics=[tune_metric], - ) + tuning_criterion = TuningCriterion(max_trials=600, objective=["performance"]) + accuracy_criterion = AccuracyCriterion( + higher_is_better=False, # optional. + criterion="relative" if args.is_relative else "absolute", # optional. Available values are "relative" and "absolute". + tolerable_loss=args.perf_tol, # optional. + ) + quantization_config = PostTrainingQuantConfig( + approach=args.quantization_approach, + tuning_criterion=tuning_criterion, + accuracy_criterion=accuracy_criterion + ) os.makedirs(args.output_dir, exist_ok=True) - quantizer = NoTrainerOptimizer(model, args.output_dir) - model = quantizer.quantize(quantization_config, + model = fit(model, + quantization_config, eval_func=eval_func, calib_func=calibration_func, calib_dataloader=DataLoader(CalibDataset(), batch_size=1), - ) + ) + + weights_file = os.path.join(os.path.abspath( + os.path.expanduser(args.output_dir)), WEIGHTS_NAME) + torch.save(model.quantized_state_dict(), weights_file) setattr(pipe, name, model) logger.info(f"Optimized model {name} saved to: {args.output_dir}.") diff --git a/examples/huggingface/pytorch/text-to-image/quantization/ptq/run_tuning.sh b/examples/huggingface/pytorch/text-to-image/quantization/ptq/run_tuning.sh index 117762d8940..c288b195d27 100644 --- a/examples/huggingface/pytorch/text-to-image/quantization/ptq/run_tuning.sh +++ b/examples/huggingface/pytorch/text-to-image/quantization/ptq/run_tuning.sh @@ -15,7 +15,7 @@ function init_params { model_name_or_path="lambdalabs/sd-pokemon-diffusers" extra_cmd="" batch_size=8 - approach="PostTrainingStatic" + approach="static" for var in "$@" do case $var in @@ -45,10 +45,10 @@ function run_tuning { if [ "${topology}" = "sd_pokemon_diffusers_static" ]; then model_name_or_path="lambdalabs/sd-pokemon-diffusers" - approach="PostTrainingStatic" + approach="static" elif [ "${topology}" = "sd_pokemon_diffusers_dynamic" ]; then model_name_or_path="lambdalabs/sd-pokemon-diffusers" - approach="PostTrainingDynamic" + approach="dynamic" fi python -u ./run_diffusion.py \ diff --git a/examples/huggingface/pytorch/text2text-generation/run_tuning.sh b/examples/huggingface/pytorch/text2text-generation/run_tuning.sh index 3d35086f578..826469b1b3c 100644 --- a/examples/huggingface/pytorch/text2text-generation/run_tuning.sh +++ b/examples/huggingface/pytorch/text2text-generation/run_tuning.sh @@ -16,7 +16,7 @@ function init_params { model_name_or_path="google/flan-t5-large" extra_cmd="" batch_size=8 - approach="PostTrainingStatic" + approach="static" alpha=0.7 for var in "$@" do diff --git a/examples/huggingface/pytorch/textual-inversion/distillation_for_quantization/textual_inversion.py b/examples/huggingface/pytorch/textual-inversion/distillation_for_quantization/textual_inversion.py index 128c9248341..832da5ceb52 100644 --- a/examples/huggingface/pytorch/textual-inversion/distillation_for_quantization/textual_inversion.py +++ b/examples/huggingface/pytorch/textual-inversion/distillation_for_quantization/textual_inversion.py @@ -19,9 +19,10 @@ from diffusers.optimization import get_scheduler from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker from huggingface_hub import HfFolder, Repository, whoami -from intel_extension_for_transformers.transformers.config import ( +from neural_compressor.config import ( DistillationConfig, - QuantizationConfig, + IntermediateLayersKnowledgeDistillationLossConfig, + QuantizationAwareTrainingConfig, ) from intel_extension_for_transformers.transformers.utils import metrics, objectives from intel_extension_for_transformers.transformers.trainer import NLPTrainer @@ -769,12 +770,7 @@ def train_func(model): tune_metric = metrics.Metric(name="") if args.do_quantization: objective = objectives.performance - quantization_conf = QuantizationConfig( - approach="QuantizationAwareTraining", - max_trials=600, - metrics=[tune_metric], - objectives=[objective] - ) + quantization_conf = QuantizationAwareTrainingConfig() conf_list.append(quantization_conf) if args.do_distillation: @@ -828,17 +824,13 @@ def train_func(model): [['mid_block.resnets.1', ]], [['conv_out', ]], ] - - distillation_conf = DistillationConfig( - framework="pytorch_fx", metrics=tune_metric, - criterion=Criterion( - name="IntermediateLayersLoss", - layer_mappings=layer_mappings, - loss_types=["MSE"] * len(layer_mappings), - loss_weight_ratio=[1.0 / len(layer_mappings)] * len(layer_mappings), - add_origin_loss=True - ) + criterion_conf = IntermediateLayersKnowledgeDistillationLossConfig( + layer_mappings=layer_mappings, + loss_types=["MSE"] * len(layer_mappings), + loss_weight_ratio=[1.0 / len(layer_mappings)] * len(layer_mappings), + add_origin_loss=True ) + distillation_conf = DistillationConfig(teacher_model=teacher_model, criterion=criterion_conf) conf_list.append(distillation_conf) # Initialize our Trainer @@ -846,10 +838,10 @@ def train_func(model): model=model, args=TrainingArguments(output_dir=args.output_dir), ) + trainer.metrics = tune_metric model = trainer.orchestrate_optimizations( config_list=conf_list, - teacher_model=teacher_model, eval_func=lambda model:1, train_func=train_func) diff --git a/examples/huggingface/pytorch/token-classification/quantization/README.md b/examples/huggingface/pytorch/token-classification/quantization/README.md index 0cc950c742f..a718fceb82a 100644 --- a/examples/huggingface/pytorch/token-classification/quantization/README.md +++ b/examples/huggingface/pytorch/token-classification/quantization/README.md @@ -8,9 +8,7 @@ Token classification assigns a label to individual tokens in a sentence. One of ## 1. Environment ``` pip install -r requirements.txt -pip install transformers==4.34.1 ``` ->**Note**: Please use transformers no higher than 4.34.1 # Run @@ -22,7 +20,7 @@ pip install transformers==4.34.1 --model_name_or_path elastic/distilbert-base-uncased-finetuned-conll03-english \ --dataset_name conll2003 \ --tune \ - --quantization_approach PostTrainingStatic \ + --quantization_approach static \ --do_train \ --do_eval \ --pad_to_max_length \ @@ -32,7 +30,7 @@ pip install transformers==4.34.1 # Performance Data -|Dataset|Pretrained model|PostTrainingDynamic | PostTrainingStatic | QuantizationAwareTraining +|Dataset|Pretrained model|dynamic | static | qat |---|------------------------------------|---|---|--- |NER|elastic/distilbert-base-uncased-finetuned-conll03-english| ✅| ✅| ✅ diff --git a/examples/huggingface/pytorch/token-classification/quantization/run_ner.py b/examples/huggingface/pytorch/token-classification/quantization/run_ner.py index 7076b1bd5fe..d83ae181630 100644 --- a/examples/huggingface/pytorch/token-classification/quantization/run_ner.py +++ b/examples/huggingface/pytorch/token-classification/quantization/run_ner.py @@ -27,10 +27,12 @@ import transformers from dataclasses import dataclass, field from datasets import ClassLabel, load_dataset, load_metric -from intel_extension_for_transformers.transformers import( - metrics, - OptimizedModel, - QuantizationConfig, +from intel_extension_for_transformers.transformers import OptimizedModel, metrics +from neural_compressor.config import ( + PostTrainingQuantConfig, + QuantizationAwareTrainingConfig, + TuningCriterion, + AccuracyCriterion ) from intel_extension_for_transformers.transformers.trainer import NLPTrainer from transformers import ( @@ -200,9 +202,9 @@ class OptimizationArguments: metadata={"help": "Whether or not to apply quantization."}, ) quantization_approach: Optional[str] = field( - default="PostTrainingStatic", - metadata={"help": "Quantization approach. Supported approach are PostTrainingStatic, " - "PostTrainingDynamic and QuantizationAwareTraining."}, + default="static", + metadata={"help": "Quantization approach. Supported approach are static, " + "dynamic and qat."}, ) metric_name: Optional[str] = field( default="eval_f1", @@ -299,7 +301,8 @@ def main(): if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. raw_datasets = load_dataset( - data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir + data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir, + trust_remote_code=True ) else: data_files = {} @@ -310,7 +313,8 @@ def main(): if data_args.test_file is not None: data_files["test"] = data_args.test_file extension = data_args.train_file.split(".")[-1] - raw_datasets = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir) + raw_datasets = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir, + trust_remote_code=True) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. @@ -536,7 +540,7 @@ def tokenize_and_align_labels(examples): data_collator = DataCollatorForTokenClassification(tokenizer, pad_to_multiple_of=8 if training_args.fp16 else None) # Metrics - metric = load_metric("seqeval") + metric = load_metric("seqeval", trust_remote_code=True) def compute_metrics(p): predictions, labels = p @@ -573,6 +577,7 @@ def compute_metrics(p): metric_name = optim_args.metric_name training_args.metric_for_best_model = metric_name + # Initialize our Trainer trainer = NLPTrainer( model=model, @@ -590,25 +595,42 @@ def compute_metrics(p): raise ValueError("do_eval must be set to True for quantization.") trainer.save_model(training_args.output_dir) - if optim_args.quantization_approach != "PostTrainingDynamic": + if optim_args.quantization_approach != "dynamic": if not training_args.do_train: raise ValueError( "do_train must be set to True for static and aware training quantization." ) - if optim_args.quantization_approach == "QuantizationAwareTraining": - early_stopping_patience = 6 - early_stopping_threshold = 0.001 # optional - trainer.add_callback(transformers.EarlyStoppingCallback(early_stopping_patience, - early_stopping_threshold)) - tune_metric = metrics.Metric( name=metric_name, is_relative=optim_args.is_relative, criterion=optim_args.perf_tol ) - quantization_config = QuantizationConfig( - approach=optim_args.quantization_approach, - metrics=[tune_metric], - sampling_size = len(train_dataset)//20 - ) + trainer.metrics = tune_metric + if optim_args.quantization_approach != "qat": + tuning_criterion = TuningCriterion(max_trials=600, objective=["performance"]) + accuracy_criterion = AccuracyCriterion( + higher_is_better=True, # optional. + criterion="relative" if optim_args.is_relative else "absolute", # optional. Available values are "relative" and "absolute". + tolerable_loss=optim_args.perf_tol, # optional. + ) + quantization_config = PostTrainingQuantConfig( + approach=optim_args.quantization_approach, + tuning_criterion=tuning_criterion, + accuracy_criterion=accuracy_criterion + ) + else: + tuning_criterion = TuningCriterion(max_trials=600, objective=["performance"]) + accuracy_criterion = AccuracyCriterion( + higher_is_better=True, # optional. + criterion="relative" if optim_args.is_relative else "absolute", # optional. Available values are "relative" and "absolute". + tolerable_loss=optim_args.perf_tol, # optional. + ) + quantization_config = QuantizationAwareTrainingConfig( + tuning_criterion=tuning_criterion, + accuracy_criterion=accuracy_criterion + ) + early_stopping_patience = 2 + early_stopping_threshold = 0.001 # optional + trainer.add_callback(transformers.EarlyStoppingCallback(early_stopping_patience, \ + early_stopping_threshold)) model = trainer.quantize(quantization_config) if optim_args.benchmark_only: diff --git a/examples/huggingface/pytorch/token-classification/quantization/run_tuning.sh b/examples/huggingface/pytorch/token-classification/quantization/run_tuning.sh index 35616849055..ce43f4c35b9 100644 --- a/examples/huggingface/pytorch/token-classification/quantization/run_tuning.sh +++ b/examples/huggingface/pytorch/token-classification/quantization/run_tuning.sh @@ -18,7 +18,7 @@ function init_params { batch_size=8 MAX_SEQ_LENGTH=384 model_type="bert" - approach="PostTrainingStatic" + approach="static" for var in "$@" do case $var in @@ -49,17 +49,17 @@ function run_tuning { DATASET_NAME="conll2003" model_name_or_path="elastic/distilbert-base-uncased-finetuned-conll03-english " model_type="bert" - approach="PostTrainingStatic" + approach="static" elif [ "${topology}" = "distilbert_base_ner_dynamic" ]; then DATASET_NAME="conll2003" model_name_or_path="elastic/distilbert-base-uncased-finetuned-conll03-english " model_type="bert" - approach="PostTrainingDynamic" + approach="dynamic" elif [ "${topology}" = "distilbert_base_ner_qat" ]; then DATASET_NAME="conll2003" model_name_or_path="elastic/distilbert-base-uncased-finetuned-conll03-english " model_type="bert" - approach="QuantizationAwareTraining" + approach="qat" extra_cmd=$extra_cmd" --learning_rate 1e-5 \ --num_train_epochs 6 \ --eval_steps 100 \ @@ -68,7 +68,8 @@ function run_tuning { --load_best_model_at_end True \ --evaluation_strategy steps \ --save_strategy steps \ - --save_total_limit 1" + --save_total_limit 1 \ + --save_safetensors False" fi python -u ./run_ner.py \ diff --git a/examples/huggingface/pytorch/translation/quantization/README.md b/examples/huggingface/pytorch/translation/quantization/README.md index 3eecf1d4316..8fd4c4844fe 100644 --- a/examples/huggingface/pytorch/translation/quantization/README.md +++ b/examples/huggingface/pytorch/translation/quantization/README.md @@ -9,9 +9,7 @@ This directory contains the example for quantization models on translation tasks ``` pip install intel-extension-for-transformers pip install -r requirements.txt -pip install transformers==4.34.1 ``` ->**Note**: Please use transformers no higher than 4.34.1 # Run @@ -30,8 +28,8 @@ python examples/pytorch/translation/run_translation.py \ --dataset_name wmt16 \ --dataset_config_name ro-en \ --output_dir /tmp/tst-translation \ - --per_device_train_batch_size=4 \ - --per_device_eval_batch_size=4 \ + --per_device_train_batch_size 4 \ + --per_device_eval_batch_size 4 \ --overwrite_output_dir \ --tune \ --predict_with_generate @@ -51,8 +49,8 @@ python examples/pytorch/translation/run_translation.py \ --dataset_name wmt16 \ --dataset_config_name ro-en \ --output_dir /tmp/tst-translation \ - --per_device_train_batch_size=4 \ - --per_device_eval_batch_size=4 \ + --per_device_train_batch_size 4 \ + --per_device_eval_batch_size 4 \ --overwrite_output_dir \ --tune \ --predict_with_generate diff --git a/examples/huggingface/pytorch/translation/quantization/run_translation.py b/examples/huggingface/pytorch/translation/quantization/run_translation.py index f5554905dcc..4d02698d21a 100755 --- a/examples/huggingface/pytorch/translation/quantization/run_translation.py +++ b/examples/huggingface/pytorch/translation/quantization/run_translation.py @@ -28,8 +28,13 @@ import numpy as np from datasets import load_dataset, load_metric -from intel_extension_for_transformers.transformers import OptimizedModel, QuantizationConfig -from intel_extension_for_transformers.transformers import metrics as nlp_metrics +from intel_extension_for_transformers.transformers import OptimizedModel, objectives, metrics +from neural_compressor.config import ( + PostTrainingQuantConfig, + QuantizationAwareTrainingConfig, + TuningCriterion, + AccuracyCriterion +) from intel_extension_for_transformers.transformers.trainer import NLPSeq2SeqTrainer import transformers from transformers import ( @@ -244,9 +249,9 @@ class OptimizationArguments: metadata={"help": "Whether or not to apply quantization."}, ) quantization_approach: Optional[str] = field( - default="PostTrainingStatic", - metadata={"help": "Quantization approach. Supported approach are PostTrainingStatic, " - "PostTrainingDynamic and QuantizationAwareTraining."}, + default="static", + metadata={"help": "Quantization approach. Supported approach are static, " + "dynamic and qat."}, ) metric_name: Optional[str] = field( default="eval_bleu", @@ -362,6 +367,7 @@ def main(): data_args.dataset_config_name, cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, + trust_remote_code=True, ) else: data_files = {} @@ -379,6 +385,7 @@ def main(): data_files=data_files, cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, + trust_remote_code=True, ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. @@ -565,7 +572,7 @@ def preprocess_function(examples): ) # Metric - metric = load_metric("sacrebleu") + metric = load_metric("sacrebleu", trust_remote_code=True) def postprocess_text(preds, labels): preds = [pred.strip() for pred in preds] @@ -620,26 +627,43 @@ def compute_metrics(eval_preds): raise ValueError("do_eval must be set to True for quantization.") trainer.save_model(training_args.output_dir) - if optim_args.quantization_approach != "PostTrainingDynamic": + if optim_args.quantization_approach != "dynamic": if not training_args.do_train: raise ValueError( "do_train must be set to True for static and aware training quantization." ) - if optim_args.quantization_approach == "QuantizationAwareTraining": - early_stopping_patience = 6 - early_stopping_threshold = 0.001 # optional - trainer.add_callback(transformers.EarlyStoppingCallback(early_stopping_patience, - early_stopping_threshold)) - - tune_metric = nlp_metrics.Metric( + objective = objectives.performance + tune_metric = metrics.Metric( name=metric_name, is_relative=optim_args.is_relative, criterion=optim_args.perf_tol ) - quantization_config = QuantizationConfig( - approach=optim_args.quantization_approach, - max_trials=200, - metrics=[tune_metric], - sampling_size = len(train_dataset)//20 - ) + trainer.metrics = tune_metric + if optim_args.quantization_approach != "qat": + tuning_criterion = TuningCriterion(max_trials=600, objective=[objective.name]) + accuracy_criterion = AccuracyCriterion( + higher_is_better=True, # optional. + criterion="relative" if optim_args.is_relative else "absolute", # optional. Available values are "relative" and "absolute". + tolerable_loss=optim_args.perf_tol, # optional. + ) + quantization_config = PostTrainingQuantConfig( + approach=optim_args.quantization_approach, + tuning_criterion=tuning_criterion, + accuracy_criterion=accuracy_criterion + ) + else: + tuning_criterion = TuningCriterion(max_trials=600, objective=[objective.name]) + accuracy_criterion = AccuracyCriterion( + higher_is_better=True, # optional. + criterion="relative" if optim_args.is_relative else "absolute", # optional. Available values are "relative" and "absolute". + tolerable_loss=optim_args.perf_tol, # optional. + ) + quantization_config = QuantizationAwareTrainingConfig( + tuning_criterion=tuning_criterion, + accuracy_criterion=accuracy_criterion + ) + early_stopping_patience = 2 + early_stopping_threshold = 0.001 # optional + trainer.add_callback(transformers.EarlyStoppingCallback(early_stopping_patience, \ + early_stopping_threshold)) trainer.max_length = max_length trainer.num_beams = num_beams model = trainer.quantize(quant_config=quantization_config) diff --git a/examples/huggingface/pytorch/translation/quantization/run_tuning.sh b/examples/huggingface/pytorch/translation/quantization/run_tuning.sh index 14fef28308b..ef294a54587 100644 --- a/examples/huggingface/pytorch/translation/quantization/run_tuning.sh +++ b/examples/huggingface/pytorch/translation/quantization/run_tuning.sh @@ -15,7 +15,7 @@ function init_params { DATASET_NAME="xsum" extra_cmd="" batch_size=8 - approach="PostTrainingStatic" + approach="static" for var in "$@" do case $var in @@ -45,11 +45,11 @@ function run_tuning { if [ "${topology}" = "t5-small_dynamic" ]; then model_name_or_path="t5-small" extra_cmd=$extra_cmd" --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config_name ro-en" - approach="PostTrainingDynamic" + approach="dynamic" elif [ "${topology}" = "marianmt_WMT_en_ro_dynamic" ]; then model_name_or_path='Helsinki-NLP/opus-mt-en-ro' extra_cmd=$extra_cmd" --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config_name ro-en" - approach="PostTrainingDynamic" + approach="dynamic" else echo "unsupported topology: ${topology}" exit 1 diff --git a/examples/huggingface/tensorflow/language-modeling/quantization/README.md b/examples/huggingface/tensorflow/language-modeling/quantization/README.md deleted file mode 100644 index bb41901b183..00000000000 --- a/examples/huggingface/tensorflow/language-modeling/quantization/README.md +++ /dev/null @@ -1,63 +0,0 @@ -Step-by-Step -========= - -This document describes the step-by-step instructions for reproducing the quantization on models for the Language Modeling tasks. - -There are mainly two kinds of language modeling tasks: Causal Language Modeling (CLM) and Masked Language Modeling (MLM). Two scripts `run_clm.py` and `run_mlm.py` provide quantization examples on the above two kinds of models based on [Intel® Neural Compressor](https://github.com/intel/neural-compressor). Users can easily run the quantization with `run_tuning.sh` and the benchmarking with `run_benchmark.sh`. - -Please note that language modeling tasks use `loss` as the evaluation metric so the loss will appear where the accuracy should be in the final tune result statistics, and the `greater_is_better=False` should be set in the Python scripts. - -Users can also change the `--max_training_samples`, `--max_eval_samples`, and `--max_seq_length` in the scripts for quicker debugging and to avoid potential lack of memory. - -# Prerequisite -## 1. Installation - -Make sure you have installed Intel® Extension for Transformers and all the dependencies in the current example: - -```shell -pip install intel-extension-for-transformers -cd ptq -pip install -r requirements.txt -``` - -# Run - -## 1. Run Command for the CLM task (Shell) - -- Topology: - - distilgpt2_clm - -* To get the int8 model - -``` -cd ptq -bash run_tuning.sh --topology=[topology] -``` - -* To benchmark the int8 model - - -``` -cd ptq -bash run_benchmark.sh --topology=[topology] --mode=benchmark --int8=true -``` - -## 2. Run Command for the MLM task (Shell) - -- Topology: - - distilbert_mlm - - distilroberta_mlm - -* To get the int8 model - -``` -cd ptq -bash run_tuning.sh --topology=[topology] -``` - -* To benchmark the int8 model - -``` -cd ptq -bash run_benchmark.sh --topology=[topology] --mode=benchmark --int8=true -``` diff --git a/examples/huggingface/tensorflow/language-modeling/quantization/ptq/requirements.txt b/examples/huggingface/tensorflow/language-modeling/quantization/ptq/requirements.txt deleted file mode 100644 index 62aa53701f4..00000000000 --- a/examples/huggingface/tensorflow/language-modeling/quantization/ptq/requirements.txt +++ /dev/null @@ -1,7 +0,0 @@ -datasets >= 1.17 -sentencepiece != 0.1.92 -protobuf -intel-tensorflow -transformers -scikit-learn -accelerate \ No newline at end of file diff --git a/examples/huggingface/tensorflow/language-modeling/quantization/ptq/run_benchmark.sh b/examples/huggingface/tensorflow/language-modeling/quantization/ptq/run_benchmark.sh deleted file mode 100644 index e3cb8c3c55a..00000000000 --- a/examples/huggingface/tensorflow/language-modeling/quantization/ptq/run_benchmark.sh +++ /dev/null @@ -1,119 +0,0 @@ -#!/bin/bash -set -x - -function main { - - init_params "$@" - run_benchmark - -} - -# init params -function init_params { - topology="distilgpt2_clm" - iters=100 - batch_size=16 - tuned_checkpoint=saved_results - cache_dir="cache" - for var in "$@" - do - case $var in - --topology=*) - topology=$(echo $var |cut -f2 -d=) - ;; - --dataset_location=*) - dataset_location=$(echo $var |cut -f2 -d=) - ;; - --input_model=*) - input_model=$(echo $var |cut -f2 -d=) - ;; - --mode=*) - mode=$(echo $var |cut -f2 -d=) - ;; - --batch_size=*) - batch_size=$(echo $var |cut -f2 -d=) - ;; - --iters=*) - iters=$(echo ${var} |cut -f2 -d=) - ;; - --int8=*) - int8=$(echo ${var} |cut -f2 -d=) - ;; - --config=*) - tuned_checkpoint=$(echo $var |cut -f2 -d=) - ;; - --worker=*) - worker=$(echo $var |cut -f2 -d=) - ;; - --task_index=*) - task_index=$(echo $var |cut -f2 -d=) - ;; - --cache_dir=*) - cache_dir=$(echo $var |cut -f2 -d=) - ;; - *) - echo "Error: No such parameter: ${var}" - exit 1 - ;; - esac - done - -} - - -# run_benchmark -function run_benchmark { - extra_cmd='' - MAX_SEQ_LENGTH=128 - - if [[ ${mode} == "accuracy" ]]; then - mode_cmd=" --accuracy_only" - elif [[ ${mode} == "benchmark" ]]; then - mode_cmd=" --benchmark " - else - echo "Error: No such mode: ${mode}" - exit 1 - fi - - if [ "${topology}" = "distilgpt2_clm" ]; then - script="run_clm.py" - dataset_name="wikitext" - model_name_or_path="distilgpt2" - dataset_config_name="wikitext-2-raw-v1" - # remove following two parameters if you have enough memory - extra_cmd=$extra_cmd" --max_eval_samples 196 --block_size 128" - elif [ "${topology}" = "distilbert_mlm" ]; then - script="run_mlm.py" - dataset_name="wikitext" - model_name_or_path="distilbert-base-cased" - dataset_config_name="wikitext-2-raw-v1" - # remove following two parameters if you have enough memory - extra_cmd=$extra_cmd" --max_eval_samples 196 --max_seq_length 128" - elif [ "${topology}" = "distilroberta_mlm" ]; then - script="run_mlm.py" - dataset_name="wikitext" - model_name_or_path="Rocketknight1/distilroberta-base-finetuned-wikitext2" - dataset_config_name="wikitext-2-raw-v1" - # remove following two parameters if you have enough memory - extra_cmd=$extra_cmd" --max_eval_samples 196 --max_seq_length 128" - fi - - if [[ ${int8} == "true" ]]; then - extra_cmd=$extra_cmd" --int8" - fi - echo $extra_cmd - - python -u ../${script} \ - --model_name_or_path ${model_name_or_path} \ - --dataset_name ${dataset_name} \ - --dataset_config_name ${dataset_config_name} \ - --do_eval \ - --per_device_eval_batch_size ${batch_size} \ - --output_dir ${tuned_checkpoint} \ - --overwrite_output_dir \ - --cache_dir ${cache_dir} \ - ${mode_cmd} \ - ${extra_cmd} -} - -main "$@" diff --git a/examples/huggingface/tensorflow/language-modeling/quantization/ptq/run_tuning.sh b/examples/huggingface/tensorflow/language-modeling/quantization/ptq/run_tuning.sh deleted file mode 100644 index 6ffa270911f..00000000000 --- a/examples/huggingface/tensorflow/language-modeling/quantization/ptq/run_tuning.sh +++ /dev/null @@ -1,115 +0,0 @@ -#!/bin/bash -set -x - -function main { - - init_params "$@" - run_tuning - -} - -# init params -function init_params { - topology="distilgpt2_clm" - tuned_checkpoint="saved_results" - extra_cmd="" - batch_size=8 - MAX_SEQ_LENGTH=128 - model_type="bert" - approach="PostTrainingStatic" - cache_dir="cache" - for var in "$@" - do - case $var in - --topology=*) - topology=$(echo $var |cut -f2 -d=) - ;; - --dataset_location=*) - dataset_location=$(echo $var |cut -f2 -d=) - ;; - --input_model=*) - input_model=$(echo $var |cut -f2 -d=) - ;; - --output_model=*) - tuned_checkpoint=$(echo $var |cut -f2 -d=) - ;; - --worker=*) - worker=$(echo $var |cut -f2 -d=) - ;; - --task_index=*) - task_index=$(echo $var |cut -f2 -d=) - ;; - --cache_dir=*) - cache_dir=$(echo $var |cut -f2 -d=) - ;; - *) - echo "Error: No such parameter: ${var}" - exit 1 - ;; - esac - done - -} - -# run_tuning -function run_tuning { - if [ "${topology}" = "distilgpt2_clm" ]; then - script="run_clm.py" - model_name_or_path="distilgpt2" - dataset_name="wikitext" - approach="PostTrainingStatic" - dataset_config_name="wikitext-2-raw-v1" - # remove or change following two parameters if you have enough memory - extra_cmd=$extra_cmd" --max_eval_samples 96 --block_size 128 --perf_tol 0.08" - elif [ "${topology}" = "distilbert_mlm" ]; then - script="run_mlm.py" - model_name_or_path="distilbert-base-cased" - dataset_name="wikitext" - approach="PostTrainingStatic" - dataset_config_name="wikitext-2-raw-v1" - # remove or change following two parameters if you have enough memory - extra_cmd=$extra_cmd" --max_eval_samples 96 --max_seq_length 128 --perf_tol 0.08" - elif [ "${topology}" = "distilroberta_mlm" ]; then - script="run_mlm.py" - model_name_or_path="Rocketknight1/distilroberta-base-finetuned-wikitext2" - dataset_name="wikitext" - approach="PostTrainingStatic" - dataset_config_name="wikitext-2-raw-v1" - # remove or change following two parameters if you have enough memory - extra_cmd=$extra_cmd" --max_eval_samples 96 --max_seq_length 128 --perf_tol 0.08" - fi - - if [ "${worker}" = "" ] - then - python -u ../${script} \ - --model_name_or_path ${model_name_or_path} \ - --dataset_name ${dataset_name} \ - --dataset_config_name ${dataset_config_name} \ - --do_eval \ - --output_dir ${tuned_checkpoint} \ - --quantization_approach ${approach} \ - --do_train \ - --overwrite_output_dir \ - --cache_dir ${cache_dir} \ - --tune \ - ${extra_cmd} - else - python -u ../${script} \ - --model_name_or_path ${model_name_or_path} \ - --dataset_name ${dataset_name} \ - --dataset_config_name ${dataset_config_name} \ - --task_name ${TASK_NAME} \ - --do_eval \ - --output_dir ${tuned_checkpoint} \ - --quantization_approach ${approach} \ - --do_train \ - --overwrite_output_dir \ - --cache_dir ${cache_dir} \ - --tune \ - --worker "${worker}" \ - --task_index ${task_index} \ - ${extra_cmd} - fi -} - -main "$@" diff --git a/examples/huggingface/tensorflow/language-modeling/quantization/run_clm.py b/examples/huggingface/tensorflow/language-modeling/quantization/run_clm.py deleted file mode 100644 index 1b82d1ccf0f..00000000000 --- a/examples/huggingface/tensorflow/language-modeling/quantization/run_clm.py +++ /dev/null @@ -1,814 +0,0 @@ -#!/usr/bin/env python -# coding=utf-8 -# Copyright 2021 The HuggingFace Inc. team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -Fine-tuning the library models for causal language modeling (GPT-2, GPT-Neo...) -on a text file or a dataset without using HuggingFace Trainer. -Here is the full list of checkpoints on the hub that can be fine-tuned by this script: -https://huggingface.co/models?filter=text-generation -""" -# You can also adapt this script on your own clm task. Pointers for this are left as comments. - -import json - -# region Imports -import logging -import math -import os -import random -import sys -from dataclasses import dataclass, field -from itertools import chain -from pathlib import Path -from typing import Optional -import time - -import numpy as np -import datasets -import tensorflow as tf -from datasets import load_dataset, load_metric -from sklearn.model_selection import train_test_split -from transformers.trainer_utils import get_last_checkpoint, is_main_process - -import transformers -from transformers import ( - CONFIG_MAPPING, - CONFIG_NAME, - TF2_WEIGHTS_NAME, - TF_MODEL_FOR_CAUSAL_LM_MAPPING, - AutoConfig, - AutoTokenizer, - HfArgumentParser, - TFAutoModelForCausalLM, - TFTrainingArguments, - create_optimizer, - set_seed, -) -from transformers.utils.versions import require_version - -logger = logging.getLogger(__name__) -require_version("datasets>=1.8.0", "To fix: pip install -r examples/tensorflow/language-modeling/requirements.txt") -MODEL_CONFIG_CLASSES = list(TF_MODEL_FOR_CAUSAL_LM_MAPPING.keys()) -MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) -# endregion - -# region Command-line arguments -@dataclass -class ModelArguments: - """ - Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch. - """ - - model_name_or_path: Optional[str] = field( - default=None, - metadata={ - "help": ( - "The model checkpoint for weights initialization.Don't set if you want to train a model from scratch." - ) - }, - ) - model_type: Optional[str] = field( - default=None, - metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(MODEL_TYPES)}, - ) - config_overrides: Optional[str] = field( - default=None, - metadata={ - "help": ( - "Override some existing default config settings when a model is trained from scratch. Example: " - "n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index" - ) - }, - ) - config_name: Optional[str] = field( - default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} - ) - tokenizer_name: Optional[str] = field( - default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} - ) - cache_dir: Optional[str] = field( - default=None, - metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, - ) - use_fast_tokenizer: bool = field( - default=True, - metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, - ) - model_revision: str = field( - default="main", - metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, - ) - use_auth_token: bool = field( - default=False, - metadata={ - "help": ( - "Will use the token generated when running `huggingface-cli login` (necessary to use this script " - "with private models)." - ) - }, - ) - - def __post_init__(self): - if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None): - raise ValueError( - "--config_overrides can't be used in combination with --config_name or --model_name_or_path" - ) - - -@dataclass -class DataTrainingArguments: - """ - Arguments pertaining to what data we are going to input our model for training and eval. - """ - - dataset_name: Optional[str] = field( - default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."} - ) - dataset_config_name: Optional[str] = field( - default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} - ) - train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."}) - validation_file: Optional[str] = field( - default=None, - metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."}, - ) - max_seq_length: int = field( - default=128, - metadata={ - "help": "The maximum total input sequence length after tokenization. Sequences longer " - "than this will be truncated, sequences shorter will be padded." - }, - ) - pad_to_max_length: bool = field( - default=False, - metadata={ - "help": "Whether to pad all samples to `max_seq_length`. " - "If False, will pad the samples dynamically when batching to the maximum length in the batch." - }, - ) - overwrite_cache: bool = field( - default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} - ) - validation_split_percentage: Optional[int] = field( - default=5, - metadata={ - "help": "The percentage of the train set used as validation set in case there's no validation split" - }, - ) - block_size: Optional[int] = field( - default=None, - metadata={ - "help": ( - "Optional input sequence length after tokenization. " - "The training dataset will be truncated in block of this size for training. " - "Default to the model max input length for single sentence inputs (take into account special tokens)." - ) - }, - ) - preprocessing_num_workers: Optional[int] = field( - default=None, - metadata={"help": "The number of processes to use for the preprocessing."}, - ) - line_by_line: bool = field( - default=False, - metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."}, - ) - max_train_samples: Optional[int] = field( - default=None, - metadata={ - "help": ( - "For debugging purposes or quicker training, truncate the number of training examples to this " - "value if set." - ) - }, - ) - max_eval_samples: Optional[int] = field( - default=None, - metadata={ - "help": ( - "For debugging purposes or quicker training, truncate the number of evaluation examples to this " - "value if set." - ) - }, - ) - keep_linebreaks: bool = field( - default=True, metadata={"help": "Whether to keep line breaks when using TXT files or not."} - ) - - def __post_init__(self): - if self.dataset_name is None and self.train_file is None and self.validation_file is None: - raise ValueError("Need either a dataset name or a training/validation file.") - else: - if self.train_file is not None: - extension = self.train_file.split(".")[-1] - assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file." - if self.validation_file is not None: - extension = self.validation_file.split(".")[-1] - assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file." - -@dataclass -class OptimizationArguments: - """ - Arguments pertaining to what type of optimization we are going to apply on the model. - """ - - tune: bool = field( - default=False, - metadata={"help": "Whether or not to apply quantization."}, - ) - quantization_approach: Optional[str] = field( - default="PostTrainingStatic", - metadata={"help": "Quantization approach. Supported approach are PostTrainingStatic, " - "PostTrainingDynamic and QuantizationAwareTraining."}, - ) - metric_name: Optional[str] = field( - default=None, - metadata={"help": "Metric used for the tuning strategy."}, - ) - is_relative: Optional[bool] = field( - default=True, - metadata={"help": "Metric tolerance model, expected to be relative or absolute."}, - ) - perf_tol: Optional[float] = field( - default=0.01, - metadata={"help": "Performance tolerance when optimizing the model."}, - ) - benchmark: bool = field( - default=False, - metadata={"help": "run benchmark."}) - int8: bool = field( - default=False, - metadata={"help":"Whether to use the quantized int8 model."}) - accuracy_only: bool = field( - default=False, - metadata={"help":"Whether to only test accuracy for model tuned by Neural Compressor."}) - -@dataclass -class DistributedArguments: - """ - Arguments setting the distributed multinode environment - """ - - worker: str = field( - default=None, - metadata={"help": "List of node ip addresses in a string, and there should not be space between addresses."}, - ) - task_index: int = field( - default=0, - metadata={"help": "Worker index, and 0 represents the chief worker while other workers are set as 1,2,3..."}, - ) - - -# endregion - -def main(): - # region Argument Parsing - parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments, OptimizationArguments, DistributedArguments)) - if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): - # If we pass only one argument to the script and it's the path to a json file, - # let's parse it to get our arguments. - model_args, data_args, training_args, optim_args, distributed_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) - else: - model_args, data_args, training_args, optim_args, distributed_args = parser.parse_args_into_dataclasses() - - # region Setup logging - logging.basicConfig( - format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", - datefmt="%m/%d/%Y %H:%M:%S", - handlers=[logging.StreamHandler(sys.stdout)], - ) - logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN) - - # Set the verbosity to info of the Transformers logger (on main process only): - if is_main_process(training_args.local_rank): - transformers.utils.logging.set_verbosity_info() - transformers.utils.logging.enable_default_handler() - transformers.utils.logging.enable_explicit_format() - logger.info(f"Training/evaluation parameters {training_args}") - # endregion - - # Sanity checks - if data_args.dataset_name is None and data_args.train_file is None and data_args.validation_file is None: - raise ValueError("Need either a dataset name or a training/validation file.") - else: - if data_args.train_file is not None: - extension = data_args.train_file.split(".")[-1] - assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, json or txt file." - if data_args.validation_file is not None: - extension = data_args.validation_file.split(".")[-1] - assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, json or txt file." - - if training_args.output_dir is not None: - training_args.output_dir = Path(training_args.output_dir) - os.makedirs(training_args.output_dir, exist_ok=True) - # endregion - - # region Set the multinode environment, the strategy and paths - strategy = None - worker_list = None - if distributed_args.worker is not None: - logger.info("distributed environment initialization...") - - worker_list = distributed_args.worker.split(",") - - from intel_extension_for_transformers.transformers.utils.utility_tf import distributed_init - distributed_init(worker_list, "worker", distributed_args.task_index) - - strategy = tf.distribute.MultiWorkerMirroredStrategy() - from intel_extension_for_transformers.transformers.utils.utility_tf import get_filepath - training_args.output_dir = get_filepath(training_args.output_dir, strategy.cluster_resolver.task_type, strategy.cluster_resolver.task_id) - else: - strategy = training_args.strategy - #endregion - - # region Checkpoints - # Detecting last checkpoint. - checkpoint = None - if len(os.listdir(training_args.output_dir)) > 0 and not training_args.overwrite_output_dir and not training_args.do_eval: - config_path = training_args.output_dir / CONFIG_NAME - weights_path = training_args.output_dir / TF2_WEIGHTS_NAME - if config_path.is_file() and weights_path.is_file(): - checkpoint = training_args.output_dir - logger.info( - f"Checkpoint detected, resuming training from checkpoint in {training_args.output_dir}. To avoid this" - " behavior, change the `--output_dir` or add `--overwrite_output_dir` to train from scratch." - ) - else: - raise ValueError( - f"Output directory ({training_args.output_dir}) already exists and is not empty. " - "Use --overwrite_output_dir to continue regardless." - ) - - # endregion - - # If passed along, set the training seed now. - if training_args.seed is not None: - set_seed(training_args.seed) - - # region Load datasets - # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) - # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ - # (the dataset will be downloaded automatically from the datasets Hub). - # - # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called - # 'text' is found. You can easily tweak this behavior (see below). - # - # In distributed training, the load_dataset function guarantee that only one local process can concurrently - # download the dataset. - if data_args.dataset_name is not None: - # Downloading and loading a dataset from the hub. - raw_datasets = load_dataset( - data_args.dataset_name, - data_args.dataset_config_name, - cache_dir=model_args.cache_dir, - use_auth_token=True if model_args.use_auth_token else None, - ) - if "validation" not in raw_datasets.keys(): - raw_datasets["validation"] = load_dataset( - data_args.dataset_name, - data_args.dataset_config_name, - split=f"train[:{data_args.validation_split_percentage}%]", - cache_dir=model_args.cache_dir, - use_auth_token=True if model_args.use_auth_token else None, - ) - raw_datasets["train"] = load_dataset( - data_args.dataset_name, - data_args.dataset_config_name, - split=f"train[{data_args.validation_split_percentage}%:]", - cache_dir=model_args.cache_dir, - use_auth_token=True if model_args.use_auth_token else None, - ) - else: - data_files = {} - dataset_args = {} - if data_args.train_file is not None: - data_files["train"] = data_args.train_file - if data_args.validation_file is not None: - data_files["validation"] = data_args.validation_file - extension = ( - data_args.train_file.split(".")[-1] - if data_args.train_file is not None - else data_args.validation_file.split(".")[-1] - ) - if extension == "txt": - extension = "text" - dataset_args["keep_linebreaks"] = data_args.keep_linebreaks - raw_datasets = load_dataset( - extension, - data_files=data_files, - cache_dir=model_args.cache_dir, - use_auth_token=True if model_args.use_auth_token else None, - **dataset_args, - ) - # If no validation data is there, validation_split_percentage will be used to divide the dataset. - if "validation" not in raw_datasets.keys(): - raw_datasets["validation"] = load_dataset( - extension, - data_files=data_files, - split=f"train[:{data_args.validation_split_percentage}%]", - cache_dir=model_args.cache_dir, - use_auth_token=True if model_args.use_auth_token else None, - **dataset_args, - ) - raw_datasets["train"] = load_dataset( - extension, - data_files=data_files, - split=f"train[{data_args.validation_split_percentage}%:]", - cache_dir=model_args.cache_dir, - use_auth_token=True if model_args.use_auth_token else None, - **dataset_args, - ) - # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at - # https://huggingface.co/docs/datasets/loading_datasets.html. - # endregion - # region Load pretrained model and tokenizer - # - # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently - # download model & vocab. - config = AutoConfig.from_pretrained( - model_args.config_name if model_args.config_name else model_args.model_name_or_path, - cache_dir=model_args.cache_dir, - revision=model_args.model_revision, - use_auth_token=True if model_args.use_auth_token else None, - _commit_hash="main", - ) - tokenizer = AutoTokenizer.from_pretrained( - model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, - cache_dir=model_args.cache_dir, - use_fast=model_args.use_fast_tokenizer, - revision=model_args.model_revision, - use_auth_token=True if model_args.use_auth_token else None, - _commit_hash="main", - ) - # endregion - - - # region Dataset preprocessing - # First we tokenize all the texts. - column_names = raw_datasets["train"].column_names - text_column_name = "text" if "text" in column_names else column_names[0] - - def tokenize_function(examples): - return tokenizer(examples[text_column_name], return_token_type_ids=True) - - tokenized_datasets = raw_datasets.map( - tokenize_function, - batched=True, - num_proc=data_args.preprocessing_num_workers, - remove_columns=column_names, - load_from_cache_file=not data_args.overwrite_cache, - desc="Running tokenizer on dataset", - ) - - if data_args.block_size is None: - block_size = tokenizer.model_max_length - if block_size > 1024: - logger.warning( - f"The tokenizer picked seems to have a very large `model_max_length` ({tokenizer.model_max_length}). " - "Picking 1024 instead. You can change that default value by passing --block_size xxx." - ) - block_size = 1024 - else: - if data_args.block_size > tokenizer.model_max_length: - logger.warning( - f"The block_size passed ({data_args.block_size}) is larger than the maximum length for the model" - f"({tokenizer.model_max_length}). Using block_size={tokenizer.model_max_length}." - ) - block_size = min(data_args.block_size, tokenizer.model_max_length) - - # Main data processing function that will concatenate all texts from our dataset and generate chunks of block_size. - def group_texts(examples): - # Concatenate all texts. - concatenated_examples = {k: list(chain(*examples[k])) for k in examples.keys()} - total_length = len(concatenated_examples[list(examples.keys())[0]]) - # We drop the small remainder, we could add padding if the model supported it instead of this drop, you can - # customize this part to your needs. - if total_length >= block_size: - total_length = (total_length // block_size) * block_size - # Split by chunks of max_len. - result = { - k: [t[i : i + block_size] for i in range(0, total_length, block_size)] - for k, t in concatenated_examples.items() - } - result["labels"] = result["input_ids"].copy() - return result - - # Note that with `batched=True`, this map processes 1,000 texts together, so group_texts throws away a remainder - # for each of those groups of 1,000 texts. You can adjust that batch_size here but a higher value might be slower - # to preprocess. - # - # To speed up this part, we use multiprocessing. See the documentation of the map method for more information: - # https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.map - - tokenized_datasets = tokenized_datasets.map( - group_texts, - batched=True, - num_proc=data_args.preprocessing_num_workers, - load_from_cache_file=not data_args.overwrite_cache, - desc=f"Grouping texts in chunks of {block_size}", - ) - - train_dataset = tokenized_datasets["train"] - if data_args.validation_file is not None: - eval_dataset = tokenized_datasets["validation"] - else: - logger.info( - f"Validation file not found: using {data_args.validation_split_percentage}% of the dataset as validation" - " as provided in data_args" - ) - train_indices, val_indices = train_test_split( - list(range(len(train_dataset))), test_size=data_args.validation_split_percentage / 100 - ) - - eval_dataset = train_dataset.select(val_indices) - train_dataset = train_dataset.select(train_indices) - - if data_args.max_train_samples is not None: - max_train_samples = min(len(train_dataset), data_args.max_train_samples) - train_dataset = train_dataset.select(range(max_train_samples)) - if data_args.max_eval_samples is not None: - max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples) - eval_dataset = eval_dataset.select(range(max_eval_samples)) - - # Log a few random samples from the training set: - for index in random.sample(range(len(train_dataset)), min(3, len(train_dataset))): - logger.info(f"Sample {index} of the training set: {train_dataset[index]}.") - # endregion - - with strategy.scope(): - # region Prepare model - if checkpoint is not None: - model = TFAutoModelForCausalLM.from_pretrained(checkpoint, config=config, cache_dir=model_args.cache_dir,) - elif model_args.model_name_or_path: - model = TFAutoModelForCausalLM.from_pretrained(model_args.model_name_or_path, config=config, - cache_dir=model_args.cache_dir, revision=model_args.model_revision, - use_auth_token=True if model_args.use_auth_token else None,) - else: - logger.info("Training new model from scratch") - model = TFAutoModelForCausalLM.from_config(config) - - model.resize_token_embeddings(len(tokenizer)) - # endregion - - # region TF Dataset preparation - num_replicas = (len(worker_list) if worker_list is not None else 1) - options = tf.data.Options() - options.experimental_distribute.auto_shard_policy = tf.data.experimental.AutoShardPolicy.OFF - - # model.prepare_tf_dataset() wraps a Hugging Face dataset in a tf.data.Dataset which is ready to use in - # training. This is the recommended way to use a Hugging Face dataset when training with Keras. You can also - # use the lower-level dataset.to_tf_dataset() method, but you will have to specify things like column names - # yourself if you use this method, whereas they are automatically inferred from the model input names when - # using model.prepare_tf_dataset() - # For more info see the docs: - # https://huggingface.co/docs/transformers/main/en/main_classes/model#transformers.TFPreTrainedModel.prepare_tf_dataset - # https://huggingface.co/docs/datasets/main/en/package_reference/main_classes#datasets.Dataset.to_tf_dataset - - if model_args.model_name_or_path == "distilgpt2": - train_dataset = train_dataset.remove_columns('token_type_ids') - eval_dataset = eval_dataset.remove_columns('token_type_ids') - - tf_train_dataset = model.prepare_tf_dataset( - train_dataset, - shuffle=True, - batch_size=num_replicas * training_args.per_device_train_batch_size, - ).with_options(options) - - tf_eval_dataset = model.prepare_tf_dataset( - eval_dataset, - shuffle=False, - batch_size=num_replicas * training_args.per_device_eval_batch_size, - drop_remainder=True, - ).with_options(options) - # endregion - - # region Optimizer and loss - num_train_steps = len(tf_train_dataset) * int(training_args.num_train_epochs) - if training_args.warmup_steps > 0: - num_warmup_steps = training_args.warmup_steps - elif training_args.warmup_ratio > 0: - num_warmup_steps = int(num_train_steps * training_args.warmup_ratio) - else: - num_warmup_steps = 0 - - # Bias and layernorm weights are automatically excluded from the decay - optimizer, lr_schedule = create_optimizer( - init_lr=training_args.learning_rate, - num_train_steps=num_train_steps, - num_warmup_steps=num_warmup_steps, - adam_beta1=training_args.adam_beta1, - adam_beta2=training_args.adam_beta2, - adam_epsilon=training_args.adam_epsilon, - weight_decay_rate=training_args.weight_decay, - adam_global_clipnorm=training_args.max_grad_norm, - ) - # no user-specified loss = will use the model internal loss - model.compile(optimizer=optimizer, jit_compile=training_args.xla) - - def compute_metrics(preds, labels): - preds = preds["logits"] - # preds have the same shape as the labels, after the argmax(-1) has been calculated - # by preprocess_logits_for_metrics but we need to shift the labels - labels = labels[:, 1:] - preds = preds[:, :-1] - return hf_compute_loss(labels, preds) - - # loss function for CLM model - def hf_compute_loss(labels, logits): - loss_fn = tf.keras.losses.SparseCategoricalCrossentropy( - from_logits=True, reduction=tf.keras.losses.Reduction.NONE - ) - - # Clip negative labels to zero here to avoid NaNs and errors - those positions will get masked later anyway - unmasked_loss = loss_fn(tf.nn.relu(labels), logits) - # make sure only labels that are not equal to -100 affect the loss - loss_mask = tf.cast(labels != -100, dtype=unmasked_loss.dtype) - masked_loss = unmasked_loss * loss_mask - reduced_masked_loss = tf.reduce_sum(masked_loss) / tf.reduce_sum(loss_mask) - return tf.reshape(reduced_masked_loss, (1,)) - - def eval_func_clm(model): - label_ids: np.ndarray = None - - num_examples = sum(1 for _ in ( - tf_eval_dataset.unbatch() if hasattr(tf_eval_dataset, "unbatch") else tf_eval_dataset)) - logger.info(f"***** Running Evaluation *****") - logger.info(f" Num examples in dataset = {num_examples}") - logger.info(f" Batch size = {training_args.per_device_eval_batch_size}") - - preds: np.ndarray = None - infer = model.signatures["serving_default"] - - for idx, (inputs, labels) in enumerate(tf_eval_dataset): - for name in inputs: - inputs[name] = tf.constant(inputs[name].numpy(), dtype=infer.inputs[0].dtype) - - results = infer(**inputs) - if preds is None: - preds = results["Identity"].numpy() - else: - preds = np.append(preds, results["Identity"].numpy(), axis=0) - - if label_ids is None: - label_ids = labels[0].numpy() if isinstance( - labels, list) else labels.numpy() - else: - label_ids = np.append( - label_ids, - labels[0].numpy() - if isinstance(labels, list) else labels.numpy(), - axis=0) - test_predictions = {"logits": preds} - loss = compute_metrics(test_predictions, label_ids) - - return loss.numpy()[0] - - # region tuning - if optim_args.tune: - from intel_extension_for_transformers.transformers import metrics, objectives, QuantizationConfig, TFOptimization - optimization = TFOptimization( - model=model, - args=training_args, - train_dataset=tf_train_dataset, - eval_dataset=tf_eval_dataset, - compute_metrics=compute_metrics, - task_type=strategy.cluster_resolver.task_type if isinstance(strategy, tf.distribute.MultiWorkerMirroredStrategy) else None, - task_id=strategy.cluster_resolver.task_id if isinstance(strategy, tf.distribute.MultiWorkerMirroredStrategy) else None, - ) - - # use customized eval function - optimization.eval_func = eval_func_clm - - tune_metric = metrics.Metric( - name="loss", greater_is_better=False, is_relative=True, criterion=optim_args.perf_tol, - ) - quantization_config = QuantizationConfig( - framework="tensorflow", - approach="POSTTRAININGSTATIC", - metrics=[tune_metric], - objectives=[objectives.performance] - ) - quantized_model = optimization.quantize(quant_config=quantization_config) - exit(0) - # endregion - - # region Training and validation - if training_args.do_train: - logger.info("***** Running training *****") - logger.info(f" Num examples = {len(train_dataset)}") - logger.info(f" Num Epochs = {training_args.num_train_epochs}") - logger.info(f" Instantaneous batch size per device = {training_args.per_device_train_batch_size}") - logger.info(f" Total train batch size = {training_args.per_device_train_batch_size * num_replicas}") - - # For long training runs, you may wish to use the PushToHub() callback here to save intermediate checkpoints - # to the Hugging Face Hub rather than just pushing the finished model. - # See https://huggingface.co/docs/transformers/main_classes/keras_callbacks#transformers.PushToHubCallback - history = model.fit( - tf_train_dataset, - validation_data=tf_eval_dataset, - epochs=int(training_args.num_train_epochs), - ) - train_loss = history.history["loss"][-1] - try: - train_perplexity = math.exp(train_loss) - except OverflowError: - train_perplexity = math.inf - logger.info(f" Final train loss: {train_loss:.3f}") - logger.info(f" Final train perplexity: {train_perplexity:.3f}") - validation_loss = history.history["val_loss"][-1] - try: - validation_perplexity = math.exp(validation_loss) - except OverflowError: - validation_perplexity = math.inf - logger.info(f" Final validation loss: {validation_loss:.3f}") - logger.info(f" Final validation perplexity: {validation_perplexity:.3f}") - - if training_args.output_dir is not None: - output_eval_file = os.path.join(training_args.output_dir, "all_results.json") - results_dict = dict() - results_dict["train_loss"] = train_loss - results_dict["train_perplexity"] = train_perplexity - results_dict["eval_loss"] = validation_loss - results_dict["eval_perplexity"] = validation_perplexity - with open(output_eval_file, "w") as writer: - writer.write(json.dumps(results_dict)) - - if training_args.output_dir is not None and not training_args.push_to_hub: - # If we're not pushing to hub, at least save a local copy when we're done - model.save_pretrained(training_args.output_dir) - # endregion - - # region Evaluation - if training_args.do_eval: - num_examples = sum(1 for _ in ( - tf_eval_dataset.unbatch() if hasattr(tf_eval_dataset, "unbatch") else tf_eval_dataset)) - - if optim_args.int8: - model = tf.saved_model.load(training_args.output_dir) - else: - from intel_extension_for_transformers.transformers.utils.utility_tf import keras2SavedModel - model = keras2SavedModel(model) - - preds: np.ndarray = None - label_ids: np.ndarray = None - infer = model.signatures["serving_default"] - - if optim_args.accuracy_only: - iterations = 1 - warmup = 0 - else: - iterations = 10 - warmup = 5 - latency_list = [] - - for idx in range(iterations): - iteration_time = 0 - for i, (inputs, labels) in enumerate(tf_eval_dataset): - for name in inputs: - inputs[name] = tf.constant(inputs[name].numpy(), dtype=infer.inputs[0].dtype) - - start = time.time() - results = infer(**inputs) - iteration_time += time.time() - start - if idx == 0: # only accumulate once all the preds and labels - if preds is None: - preds = results["Identity"].numpy() - else: - preds = np.append(preds, results["Identity"].numpy(), axis=0) - if label_ids is None: - label_ids = labels[0].numpy() if isinstance( - labels, list) else labels.numpy() - else: - label_ids = np.append( - label_ids, - labels[0].numpy() - if isinstance(labels, list) else labels.numpy(), - axis=0) - latency_list.append(iteration_time) - logger.info("Iteration {} time: {} sec".format(idx, iteration_time)) - - loss = compute_metrics({"logits": preds}, label_ids) - logger.info("\nEvaluation result: ") - logger.info("Accuracy: {}".format(loss.numpy()[0])) - - average_iteration_time = np.array(latency_list[warmup:]).mean() - logger.info( - "Throughput: {} samples/sec".format( - num_examples / average_iteration_time) - ) - #endregion - -if __name__ == "__main__": - main() \ No newline at end of file diff --git a/examples/huggingface/tensorflow/language-modeling/quantization/run_mlm.py b/examples/huggingface/tensorflow/language-modeling/quantization/run_mlm.py deleted file mode 100644 index be683113ccf..00000000000 --- a/examples/huggingface/tensorflow/language-modeling/quantization/run_mlm.py +++ /dev/null @@ -1,848 +0,0 @@ -#!/usr/bin/env python -# coding=utf-8 -# Copyright 2021 The HuggingFace Inc. team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -Fine-tuning the library models for masked language modeling (BERT, ALBERT, RoBERTa...) -on a text file or a dataset without using HuggingFace Trainer. -Here is the full list of checkpoints on the hub that can be fine-tuned by this script: -https://huggingface.co/models?filter=fill-mask -""" -# You can also adapt this script on your own mlm task. Pointers for this are left as comments. - -import json -import logging -import math -import os -import random -import sys -from dataclasses import dataclass, field -from itertools import chain -from pathlib import Path -from typing import Optional -import time - -import datasets -import tensorflow as tf -from datasets import load_dataset -from sklearn.model_selection import train_test_split - -import numpy as np - -import transformers -from transformers import ( - CONFIG_MAPPING, - CONFIG_NAME, - TF2_WEIGHTS_NAME, - TF_MODEL_FOR_MASKED_LM_MAPPING, - AutoConfig, - AutoTokenizer, - DataCollatorForLanguageModeling, - HfArgumentParser, - PushToHubCallback, - TFAutoModelForMaskedLM, - TFTrainingArguments, - create_optimizer, - set_seed, -) - -from transformers.utils.versions import require_version -from transformers.trainer_utils import get_last_checkpoint, is_main_process - -logger = logging.getLogger(__name__) -require_version("datasets>=1.8.0", "To fix: pip install -r examples/tensorflow/language-modeling/requirements.txt") -MODEL_CONFIG_CLASSES = list(TF_MODEL_FOR_MASKED_LM_MAPPING.keys()) -MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) - -# region Command-line arguments -@dataclass -class ModelArguments: - """ - Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch. - """ - - model_name_or_path: Optional[str] = field( - default=None, - metadata={ - "help": ( - "The model checkpoint for weights initialization.Don't set if you want to train a model from scratch." - ) - }, - ) - model_type: Optional[str] = field( - default=None, - metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(MODEL_TYPES)}, - ) - config_overrides: Optional[str] = field( - default=None, - metadata={ - "help": ( - "Override some existing default config settings when a model is trained from scratch. Example: " - "n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index" - ) - }, - ) - config_name: Optional[str] = field( - default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} - ) - tokenizer_name: Optional[str] = field( - default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} - ) - cache_dir: Optional[str] = field( - default=None, - metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, - ) - use_fast_tokenizer: bool = field( - default=True, - metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, - ) - model_revision: str = field( - default="main", - metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, - ) - use_auth_token: bool = field( - default=False, - metadata={ - "help": ( - "Will use the token generated when running `huggingface-cli login` (necessary to use this script " - "with private models)." - ) - }, - ) - - def __post_init__(self): - if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None): - raise ValueError( - "--config_overrides can't be used in combination with --config_name or --model_name_or_path" - ) - - -@dataclass -class DataTrainingArguments: - """ - Arguments pertaining to what data we are going to input our model for training and eval. - """ - - dataset_name: Optional[str] = field( - default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."} - ) - dataset_config_name: Optional[str] = field( - default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} - ) - train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."}) - validation_file: Optional[str] = field( - default=None, - metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."}, - ) - overwrite_cache: bool = field( - default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} - ) - validation_split_percentage: Optional[int] = field( - default=5, - metadata={ - "help": "The percentage of the train set used as validation set in case there's no validation split" - }, - ) - max_seq_length: Optional[int] = field( - default=None, - metadata={ - "help": ( - "The maximum total input sequence length after tokenization. Sequences longer " - "than this will be truncated." - ) - }, - ) - preprocessing_num_workers: Optional[int] = field( - default=None, - metadata={"help": "The number of processes to use for the preprocessing."}, - ) - mlm_probability: float = field( - default=0.15, metadata={"help": "Ratio of tokens to mask for masked language modeling loss"} - ) - line_by_line: bool = field( - default=False, - metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."}, - ) - pad_to_max_length: bool = field( - default=False, - metadata={ - "help": ( - "Whether to pad all samples to `max_seq_length`. " - "If False, will pad the samples dynamically when batching to the maximum length in the batch." - ) - }, - ) - max_train_samples: Optional[int] = field( - default=None, - metadata={ - "help": ( - "For debugging purposes or quicker training, truncate the number of training examples to this " - "value if set." - ) - }, - ) - max_eval_samples: Optional[int] = field( - default=None, - metadata={ - "help": ( - "For debugging purposes or quicker training, truncate the number of evaluation examples to this " - "value if set." - ) - }, - ) - - def __post_init__(self): - if self.dataset_name is None and self.train_file is None and self.validation_file is None: - raise ValueError("Need either a dataset name or a training/validation file.") - else: - if self.train_file is not None: - extension = self.train_file.split(".")[-1] - assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file." - if self.validation_file is not None: - extension = self.validation_file.split(".")[-1] - assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file." - - -@dataclass -class OptimizationArguments: - """ - Arguments pertaining to what type of optimization we are going to apply on the model. - """ - - tune: bool = field( - default=False, - metadata={"help": "Whether or not to apply quantization."}, - ) - quantization_approach: Optional[str] = field( - default="PostTrainingStatic", - metadata={"help": "Quantization approach. Supported approach are PostTrainingStatic, " - "PostTrainingDynamic and QuantizationAwareTraining."}, - ) - metric_name: Optional[str] = field( - default=None, - metadata={"help": "Metric used for the tuning strategy."}, - ) - is_relative: Optional[bool] = field( - default=True, - metadata={"help": "Metric tolerance model, expected to be relative or absolute."}, - ) - perf_tol: Optional[float] = field( - default=0.01, - metadata={"help": "Performance tolerance when optimizing the model."}, - ) - benchmark: bool = field( - default=False, - metadata={"help": "run benchmark."}) - int8: bool = field( - default=False, - metadata={"help":"Whether to use the quantized int8 model."}) - accuracy_only: bool = field( - default=False, - metadata={"help":"Whether to only test accuracy for model tuned by Neural Compressor."}) - -@dataclass -class DistributedArguments: - """ - Arguments setting the distributed multinode environment - """ - - worker: str = field( - default=None, - metadata={"help": "List of node ip addresses in a string, and there should not be space between addresses."}, - ) - task_index: int = field( - default=0, - metadata={"help": "Worker index, and 0 represents the chief worker while other workers are set as 1,2,3..."}, - ) - - -# endregion - - -def main(): - # region Argument Parsing - parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments, OptimizationArguments, DistributedArguments)) - if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): - # If we pass only one argument to the script and it's the path to a json file, - # let's parse it to get our arguments. - model_args, data_args, training_args, optim_args, distributed_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) - else: - model_args, data_args, training_args, optim_args, distributed_args = parser.parse_args_into_dataclasses() - - # region Setup logging - logging.basicConfig( - format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", - datefmt="%m/%d/%Y %H:%M:%S", - handlers=[logging.StreamHandler(sys.stdout)], - ) - logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN) - - # Set the verbosity to info of the Transformers logger (on main process only): - if is_main_process(training_args.local_rank): - transformers.utils.logging.set_verbosity_info() - transformers.utils.logging.enable_default_handler() - transformers.utils.logging.enable_explicit_format() - logger.info(f"Training/evaluation parameters {training_args}") - # endregion - - # Sanity checks - if data_args.dataset_name is None and data_args.train_file is None and data_args.validation_file is None: - raise ValueError("Need either a dataset name or a training/validation file.") - else: - if data_args.train_file is not None: - extension = data_args.train_file.split(".")[-1] - assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, json or txt file." - if data_args.validation_file is not None: - extension = data_args.validation_file.split(".")[-1] - assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, json or txt file." - - if training_args.output_dir is not None: - training_args.output_dir = Path(training_args.output_dir) - os.makedirs(training_args.output_dir, exist_ok=True) - # endregion - - # region Set the multinode environment, the strategy and paths - strategy = None - worker_list = None - if distributed_args.worker is not None: - logger.info("distributed environment initialization...") - - worker_list = distributed_args.worker.split(",") - - from intel_extension_for_transformers.transformers.utils.utility_tf import distributed_init - distributed_init(worker_list, "worker", distributed_args.task_index) - - strategy = tf.distribute.MultiWorkerMirroredStrategy() - from intel_extension_for_transformers.transformers.utils.utility_tf import get_filepath - training_args.output_dir = get_filepath(training_args.output_dir, strategy.cluster_resolver.task_type, strategy.cluster_resolver.task_id) - else: - strategy = training_args.strategy - #endregion - - - # region Checkpoints - # Detecting last checkpoint. - checkpoint = None - if len(os.listdir(training_args.output_dir)) > 0 and not training_args.overwrite_output_dir: - config_path = training_args.output_dir / CONFIG_NAME - weights_path = training_args.output_dir / TF2_WEIGHTS_NAME - if config_path.is_file() and weights_path.is_file(): - checkpoint = training_args.output_dir - logger.warning( - f"Checkpoint detected, resuming training from checkpoint in {training_args.output_dir}. To avoid this" - " behavior, change the `--output_dir` or add `--overwrite_output_dir` to train from scratch." - ) - else: - raise ValueError( - f"Output directory ({training_args.output_dir}) already exists and is not empty. " - "Use --overwrite_output_dir to continue regardless." - ) - - # endregion - - # If passed along, set the training seed now. - if training_args.seed is not None: - set_seed(training_args.seed) - - # region Load datasets - # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) - # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ - # (the dataset will be downloaded automatically from the datasets Hub). - # - # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called - # 'text' is found. You can easily tweak this behavior (see below). - # - # In distributed training, the load_dataset function guarantee that only one local process can concurrently - # download the dataset. - if data_args.dataset_name is not None: - # Downloading and loading a dataset from the hub. - raw_datasets = load_dataset( - data_args.dataset_name, - data_args.dataset_config_name, - cache_dir=model_args.cache_dir, - use_auth_token=True if model_args.use_auth_token else None, - ) - if "validation" not in raw_datasets.keys(): - raw_datasets["validation"] = load_dataset( - data_args.dataset_name, - data_args.dataset_config_name, - split=f"train[:{data_args.validation_split_percentage}%]", - cache_dir=model_args.cache_dir, - use_auth_token=True if model_args.use_auth_token else None, - ) - raw_datasets["train"] = load_dataset( - data_args.dataset_name, - data_args.dataset_config_name, - split=f"train[{data_args.validation_split_percentage}%:]", - cache_dir=model_args.cache_dir, - use_auth_token=True if model_args.use_auth_token else None, - ) - else: - data_files = {} - dataset_args = {} - if data_args.train_file is not None: - data_files["train"] = data_args.train_file - if data_args.validation_file is not None: - data_files["validation"] = data_args.validation_file - extension = ( - data_args.train_file.split(".")[-1] - if data_args.train_file is not None - else data_args.validation_file.split(".")[-1] - ) - if extension == "txt": - extension = "text" - dataset_args["keep_linebreaks"] = data_args.keep_linebreaks - raw_datasets = load_dataset( - extension, - data_files=data_files, - cache_dir=model_args.cache_dir, - use_auth_token=True if model_args.use_auth_token else None, - **dataset_args, - ) - # If no validation data is there, validation_split_percentage will be used to divide the dataset. - if "validation" not in raw_datasets.keys(): - raw_datasets["validation"] = load_dataset( - extension, - data_files=data_files, - split=f"train[:{data_args.validation_split_percentage}%]", - cache_dir=model_args.cache_dir, - use_auth_token=True if model_args.use_auth_token else None, - **dataset_args, - ) - raw_datasets["train"] = load_dataset( - extension, - data_files=data_files, - split=f"train[{data_args.validation_split_percentage}%:]", - cache_dir=model_args.cache_dir, - use_auth_token=True if model_args.use_auth_token else None, - **dataset_args, - ) - - # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at - # https://huggingface.co/docs/datasets/loading_datasets.html. - # endregion - - # region Load pretrained model and tokenizer - # - # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently - # download model & vocab. - config = AutoConfig.from_pretrained( - model_args.config_name if model_args.config_name else model_args.model_name_or_path, - cache_dir=model_args.cache_dir, - revision=model_args.model_revision, - use_auth_token=True if model_args.use_auth_token else None, - _commit_hash="main", - ) - tokenizer = AutoTokenizer.from_pretrained( - model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, - cache_dir=model_args.cache_dir, - use_fast=model_args.use_fast_tokenizer, - revision=model_args.model_revision, - use_auth_token=True if model_args.use_auth_token else None, - _commit_hash="main", - ) - # endregion - - # region Dataset preprocessing - # First we tokenize all the texts. - column_names = raw_datasets["train"].column_names - text_column_name = "text" if "text" in column_names else column_names[0] - - if data_args.max_seq_length is None: - max_seq_length = tokenizer.model_max_length - if max_seq_length > 1024: - logger.warning( - f"The tokenizer picked seems to have a very large `model_max_length` ({tokenizer.model_max_length}). " - "Picking 1024 instead. You can reduce that default value by passing --max_seq_length xxx." - ) - max_seq_length = 1024 - else: - if data_args.max_seq_length > tokenizer.model_max_length: - logger.warning( - f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the" - f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." - ) - max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length) - - if data_args.line_by_line: - # When using line_by_line, we just tokenize each nonempty line. - padding = "max_length" if data_args.pad_to_max_length else False - - def tokenize_function(examples): - # Remove empty lines - examples[text_column_name] = [ - line for line in examples[text_column_name] if len(line) > 0 and not line.isspace() - ] - return tokenizer( - examples[text_column_name], - padding=padding, - truncation=True, - max_length=max_seq_length, - # We use this option because DataCollatorForLanguageModeling (see below) is more efficient when it - # receives the `special_tokens_mask`. - return_special_tokens_mask=True, - return_token_type_ids=True - ) - - tokenized_datasets = raw_datasets.map( - tokenize_function, - batched=True, - num_proc=data_args.preprocessing_num_workers, - remove_columns=[text_column_name], - load_from_cache_file=not data_args.overwrite_cache, - desc="Running tokenizer on dataset line_by_line", - ) - else: - # Otherwise, we tokenize every text, then concatenate them together before splitting them in smaller parts. - # We use `return_special_tokens_mask=True` because DataCollatorForLanguageModeling (see below) is more - # efficient when it receives the `special_tokens_mask`. - def tokenize_function(examples): - return tokenizer(examples[text_column_name], return_special_tokens_mask=True, return_token_type_ids=True) - - tokenized_datasets = raw_datasets.map( - tokenize_function, - batched=True, - num_proc=data_args.preprocessing_num_workers, - remove_columns=column_names, - load_from_cache_file=not data_args.overwrite_cache, - desc="Running tokenizer on every text in dataset", - ) - - # Main data processing function that will concatenate all texts from our dataset and generate chunks of - # max_seq_length. - def group_texts(examples): - # Concatenate all texts. - concatenated_examples = {k: list(chain(*examples[k])) for k in examples.keys()} - total_length = len(concatenated_examples[list(examples.keys())[0]]) - # We drop the small remainder, we could add padding if the model supported it instead of this drop, you can - # customize this part to your needs. - if total_length >= max_seq_length: - total_length = (total_length // max_seq_length) * max_seq_length - # Split by chunks of max_len. - result = { - k: [t[i : i + max_seq_length] for i in range(0, total_length, max_seq_length)] - for k, t in concatenated_examples.items() - } - return result - - # Note that with `batched=True`, this map processes 1,000 texts together, so group_texts throws away a - # remainder for each of those groups of 1,000 texts. You can adjust that batch_size here but a higher value - # might be slower to preprocess. - # - # To speed up this part, we use multiprocessing. See the documentation of the map method for more information: - # https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.map - - tokenized_datasets = tokenized_datasets.map( - group_texts, - batched=True, - num_proc=data_args.preprocessing_num_workers, - load_from_cache_file=not data_args.overwrite_cache, - desc=f"Grouping texts in chunks of {max_seq_length}", - ) - - train_dataset = tokenized_datasets["train"] - - if data_args.validation_file is not None: - eval_dataset = tokenized_datasets["validation"] - else: - logger.info( - f"Validation file not found: using {data_args.validation_split_percentage}% of the dataset as validation" - " as provided in data_args" - ) - train_indices, val_indices = train_test_split( - list(range(len(train_dataset))), test_size=data_args.validation_split_percentage / 100 - ) - - eval_dataset = train_dataset.select(val_indices) - train_dataset = train_dataset.select(train_indices) - - if data_args.max_train_samples is not None: - max_train_samples = min(len(train_dataset), data_args.max_train_samples) - train_dataset = train_dataset.select(range(max_train_samples)) - if data_args.max_eval_samples is not None: - max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples) - eval_dataset = eval_dataset.select(range(max_eval_samples)) - - # Log a few random samples from the training set: - for index in random.sample(range(len(train_dataset)), min(3, len(train_dataset))): - logger.info(f"Sample {index} of the training set: {train_dataset[index]}.") - # endregion - - with strategy.scope(): - # region Prepare model - if checkpoint is not None: - model = TFAutoModelForMaskedLM.from_pretrained(checkpoint, config=config, cache_dir=model_args.cache_dir,) - elif model_args.model_name_or_path: - model = TFAutoModelForMaskedLM.from_pretrained(model_args.model_name_or_path, config=config, - cache_dir=model_args.cache_dir, revision=model_args.model_revision, - use_auth_token=True if model_args.use_auth_token else None,) - else: - logger.info("Training new model from scratch") - model = TFAutoModelForMaskedLM.from_config(config) - - model.resize_token_embeddings(len(tokenizer)) - # endregion - - # region TF Dataset preparation - num_replicas = training_args.strategy.num_replicas_in_sync - data_collator = DataCollatorForLanguageModeling( - tokenizer=tokenizer, mlm_probability=data_args.mlm_probability, return_tensors="tf" - ) - options = tf.data.Options() - options.experimental_distribute.auto_shard_policy = tf.data.experimental.AutoShardPolicy.OFF - - # model.prepare_tf_dataset() wraps a Hugging Face dataset in a tf.data.Dataset which is ready to use in - # training. This is the recommended way to use a Hugging Face dataset when training with Keras. You can also - # use the lower-level dataset.to_tf_dataset() method, but you will have to specify things like column names - # yourself if you use this method, whereas they are automatically inferred from the model input names when - # using model.prepare_tf_dataset() - # For more info see the docs: - # https://huggingface.co/docs/transformers/main/en/main_classes/model#transformers.TFPreTrainedModel.prepare_tf_dataset - # https://huggingface.co/docs/datasets/main/en/package_reference/main_classes#datasets.Dataset.to_tf_dataset - - tf_train_dataset = model.prepare_tf_dataset( - train_dataset, - shuffle=True, - batch_size=num_replicas * training_args.per_device_train_batch_size, - collate_fn=data_collator, - ).with_options(options) - - tf_eval_dataset = model.prepare_tf_dataset( - eval_dataset, - # labels are passed as input, as we will use the model's internal loss - shuffle=False, - batch_size=num_replicas * training_args.per_device_eval_batch_size, - collate_fn=data_collator, - drop_remainder=True, - ).with_options(options) - # endregion - - # region Optimizer and loss - num_train_steps = len(tf_train_dataset) * int(training_args.num_train_epochs) - if training_args.warmup_steps > 0: - num_warmup_steps = training_args.warmup_steps - elif training_args.warmup_ratio > 0: - num_warmup_steps = int(num_train_steps * training_args.warmup_ratio) - else: - num_warmup_steps = 0 - - # Bias and layernorm weights are automatically excluded from the decay - optimizer, lr_schedule = create_optimizer( - init_lr=training_args.learning_rate, - num_train_steps=num_train_steps, - num_warmup_steps=num_warmup_steps, - adam_beta1=training_args.adam_beta1, - adam_beta2=training_args.adam_beta2, - adam_epsilon=training_args.adam_epsilon, - weight_decay_rate=training_args.weight_decay, - adam_global_clipnorm=training_args.max_grad_norm, - ) - - # no user-specified loss = will use the model internal loss - model.compile(optimizer=optimizer, jit_compile=training_args.xla, run_eagerly=True) - # endregion - - def compute_metrics(preds, labels): - preds = preds["logits"] - return hf_compute_loss(labels, preds) - - # loss function for CLM model - def hf_compute_loss(labels, logits): - loss_fn = tf.keras.losses.SparseCategoricalCrossentropy( - from_logits=True, reduction=tf.keras.losses.Reduction.NONE - ) - - # Clip negative labels to zero here to avoid NaNs and errors - those positions will get masked later anyway - unmasked_loss = loss_fn(tf.nn.relu(labels), logits) - # make sure only labels that are not equal to -100 affect the loss - loss_mask = tf.cast(labels != -100, dtype=unmasked_loss.dtype) - masked_loss = unmasked_loss * loss_mask - reduced_masked_loss = tf.reduce_sum(masked_loss) / tf.reduce_sum(loss_mask) - return tf.reshape(reduced_masked_loss, (1,)) - - def eval_func_mlm(model): - label_ids: np.ndarray = None - - num_examples = sum(1 for _ in ( - tf_eval_dataset.unbatch() if hasattr(tf_eval_dataset, "unbatch") else tf_eval_dataset)) - logger.info(f"***** Running Evaluation *****") - logger.info(f" Num examples in dataset = {num_examples}") - logger.info(f" Batch size = {training_args.per_device_eval_batch_size}") - - preds: np.ndarray = None - infer = model.signatures["serving_default"] - - for idx, (inputs, labels) in enumerate(tf_eval_dataset): - for name in inputs: - inputs[name] = tf.constant(inputs[name].numpy(), dtype=infer.inputs[0].dtype) - - results = infer(**inputs) - - if preds is None: - preds = results["Identity"].numpy() - else: - preds = np.append(preds, results["Identity"].numpy(), axis=0) - - if label_ids is None: - label_ids = labels[0].numpy() if isinstance( - labels, list) else labels.numpy() - else: - label_ids = np.append( - label_ids, - labels[0].numpy() - if isinstance(labels, list) else labels.numpy(), - axis=0) - test_predictions = {"logits": preds} - loss = compute_metrics(test_predictions, label_ids) - - return loss.numpy()[0] - - # region tuning - if optim_args.tune: - from intel_extension_for_transformers.transformers import metrics, objectives, QuantizationConfig, TFOptimization - optimization = TFOptimization( - model=model, - args=training_args, - train_dataset=tf_train_dataset, - eval_dataset=tf_eval_dataset, - compute_metrics=compute_metrics, - task_type=strategy.cluster_resolver.task_type if isinstance(strategy, tf.distribute.MultiWorkerMirroredStrategy) else None, - task_id=strategy.cluster_resolver.task_id if isinstance(strategy, tf.distribute.MultiWorkerMirroredStrategy) else None, - ) - - # use customized eval function - optimization.eval_func = eval_func_mlm - - tune_metric = metrics.Metric( - name="loss", greater_is_better=False, is_relative=True, criterion=optim_args.perf_tol, - ) - quantization_config = QuantizationConfig( - framework="tensorflow", - approach="POSTTRAININGSTATIC", - metrics=[tune_metric], - objectives=[objectives.performance] - ) - quantized_model = optimization.quantize(quant_config=quantization_config) - exit(0) - # endregion - - # region Training and validation - if training_args.do_train: - logger.info("***** Running training *****") - logger.info(f" Num examples = {len(train_dataset)}") - logger.info(f" Num Epochs = {training_args.num_train_epochs}") - logger.info(f" Instantaneous batch size per device = {training_args.per_device_train_batch_size}") - logger.info(f" Total train batch size = {training_args.per_device_train_batch_size * num_replicas}") - - # For long training runs, you may wish to use the PushToHub() callback here to save intermediate checkpoints - # to the Hugging Face Hub rather than just pushing the finished model. - # See https://huggingface.co/docs/transformers/main_classes/keras_callbacks#transformers.PushToHubCallback - - history = model.fit( - tf_train_dataset, - validation_data=tf_eval_dataset, - epochs=int(training_args.num_train_epochs), - callbacks=callbacks, - ) - train_loss = history.history["loss"][-1] - try: - train_perplexity = math.exp(train_loss) - except OverflowError: - train_perplexity = math.inf - logger.info(f" Final train loss: {train_loss:.3f}") - logger.info(f" Final train perplexity: {train_perplexity:.3f}") - - validation_loss = history.history["val_loss"][-1] - try: - validation_perplexity = math.exp(validation_loss) - except OverflowError: - validation_perplexity = math.inf - logger.info(f" Final validation loss: {validation_loss:.3f}") - logger.info(f" Final validation perplexity: {validation_perplexity:.3f}") - - if training_args.output_dir is not None: - output_eval_file = os.path.join(training_args.output_dir, "all_results.json") - results_dict = dict() - results_dict["train_loss"] = train_loss - results_dict["train_perplexity"] = train_perplexity - results_dict["eval_loss"] = validation_loss - results_dict["eval_perplexity"] = validation_perplexity - with open(output_eval_file, "w") as writer: - writer.write(json.dumps(results_dict)) - # endregion - - # region Evaluation - if training_args.do_eval: - num_examples = sum(1 for _ in ( - tf_eval_dataset.unbatch() if hasattr(tf_eval_dataset, "unbatch") else tf_eval_dataset)) - - if optim_args.int8: - model = tf.saved_model.load(training_args.output_dir) - else: - from intel_extension_for_transformers.transformers.utils.utility_tf import keras2SavedModel - model = keras2SavedModel(model) - - preds: np.ndarray = None - label_ids: np.ndarray = None - infer = model.signatures["serving_default"] - - if optim_args.accuracy_only: - iterations = 1 - warmup = 0 - else: - iterations = 10 - warmup = 5 - latency_list = [] - - for idx in range(iterations): - iteration_time = 0 - for i, (inputs, labels) in enumerate(tf_eval_dataset): - for name in inputs: - inputs[name] = tf.constant(inputs[name].numpy(), dtype=infer.inputs[0].dtype) - - start = time.time() - results = infer(**inputs) - iteration_time += time.time() - start - if idx == 0: # only accumulate once all the preds and labels - if preds is None: - preds = results["Identity"].numpy() - else: - preds = np.append(preds, results["Identity"].numpy(), axis=0) - - if label_ids is None: - label_ids = labels[0].numpy() if isinstance( - labels, list) else labels.numpy() - else: - label_ids = np.append( - label_ids, - labels[0].numpy() - if isinstance(labels, list) else labels.numpy(), - axis=0) - latency_list.append(iteration_time) - logger.info("Iteration {} time: {} sec".format(idx, iteration_time)) - - loss = compute_metrics({"logits": preds}, label_ids) - logger.info("\nEvaluation result: ") - logger.info("Accuracy: {}".format(loss.numpy()[0])) - - average_iteration_time = np.array(latency_list[warmup:]).mean() - logger.info( - "Throughput: {} samples/sec".format( - num_examples / average_iteration_time) - ) - #endregion - - -if __name__ == "__main__": - main() \ No newline at end of file diff --git a/examples/huggingface/tensorflow/multiple-choice/quantization/README.md b/examples/huggingface/tensorflow/multiple-choice/quantization/README.md deleted file mode 100644 index d204e4f0ae0..00000000000 --- a/examples/huggingface/tensorflow/multiple-choice/quantization/README.md +++ /dev/null @@ -1,34 +0,0 @@ -Step-by-Step -========= - -This document describes the step-by-step instructions for reproducing the quantization on models for the multiple choice tasks on the SWAG dataset. - -# Prerequisite -## 1. Installation - -Make sure you have installed Intel® Extension for Transformers and all the dependencies in the current example: - -```shell -pip install intel-extension-for-transformers -pip install -r requirements.txt -``` - -# Run - -## 1. Run Command (Shell) - -- Topology: - - distilbert_swag - -- To get the int8 model - -``` -bash run_tuning.sh --topology=[topology] -``` - -- To benchmark the int8 model - - -``` -bash run_benchmark.sh --topology=[topology] --mode=benchmark --int8=true -``` \ No newline at end of file diff --git a/examples/huggingface/tensorflow/multiple-choice/quantization/requirements.txt b/examples/huggingface/tensorflow/multiple-choice/quantization/requirements.txt deleted file mode 100644 index ffa62da04e1..00000000000 --- a/examples/huggingface/tensorflow/multiple-choice/quantization/requirements.txt +++ /dev/null @@ -1,6 +0,0 @@ -datasets >= 1.17 -sentencepiece != 0.1.92 -protobuf -intel-tensorflow -transformers -accelerate \ No newline at end of file diff --git a/examples/huggingface/tensorflow/multiple-choice/quantization/run_benchmark.sh b/examples/huggingface/tensorflow/multiple-choice/quantization/run_benchmark.sh deleted file mode 100644 index d43bc97a53f..00000000000 --- a/examples/huggingface/tensorflow/multiple-choice/quantization/run_benchmark.sh +++ /dev/null @@ -1,101 +0,0 @@ -#!/bin/bash -set -x - -function main { - - init_params "$@" - run_benchmark - -} - -# init params -function init_params { - topology="distilbert_swag" - iters=100 - batch_size=16 - tuned_checkpoint=saved_results - cache_dir="cache" - for var in "$@" - do - case $var in - --topology=*) - topology=$(echo $var |cut -f2 -d=) - ;; - --dataset_location=*) - dataset_location=$(echo $var |cut -f2 -d=) - ;; - --input_model=*) - input_model=$(echo $var |cut -f2 -d=) - ;; - --mode=*) - mode=$(echo $var |cut -f2 -d=) - ;; - --batch_size=*) - batch_size=$(echo $var |cut -f2 -d=) - ;; - --iters=*) - iters=$(echo ${var} |cut -f2 -d=) - ;; - --int8=*) - int8=$(echo ${var} |cut -f2 -d=) - ;; - --config=*) - tuned_checkpoint=$(echo $var |cut -f2 -d=) - ;; - --worker=*) - worker=$(echo $var |cut -f2 -d=) - ;; - --task_index=*) - task_index=$(echo $var |cut -f2 -d=) - ;; - --cache_dir=*) - cache_dir=$(echo $var |cut -f2 -d=) - ;; - *) - echo "Error: No such parameter: ${var}" - exit 1 - ;; - esac - done - -} - - -# run_benchmark -function run_benchmark { - extra_cmd='' - MAX_SEQ_LENGTH=128 - - if [[ ${mode} == "accuracy" ]]; then - mode_cmd=" --accuracy_only" - elif [[ ${mode} == "benchmark" ]]; then - mode_cmd=" --benchmark " - else - echo "Error: No such mode: ${mode}" - exit 1 - fi - - if [ "${topology}" = "distilbert_swag" ]; then - script="run_swag.py" - model_name_or_path="Rocketknight1/bert-base-uncased-finetuned-swag" - # add following parameters for quicker debugging - extra_cmd=$extra_cmd" --max_eval_samples 512" - fi - - if [[ ${int8} == "true" ]]; then - extra_cmd=$extra_cmd" --int8" - fi - echo $extra_cmd - - python -u ${script} \ - --model_name_or_path ${model_name_or_path} \ - --do_eval \ - --per_device_eval_batch_size ${batch_size} \ - --output_dir ${tuned_checkpoint} \ - --overwrite_output_dir \ - --cache_dir ${cache_dir} \ - ${mode_cmd} \ - ${extra_cmd} -} - -main "$@" diff --git a/examples/huggingface/tensorflow/multiple-choice/quantization/run_swag.py b/examples/huggingface/tensorflow/multiple-choice/quantization/run_swag.py deleted file mode 100644 index dff1ae14227..00000000000 --- a/examples/huggingface/tensorflow/multiple-choice/quantization/run_swag.py +++ /dev/null @@ -1,653 +0,0 @@ -#!/usr/bin/env python -# coding=utf-8 -# Copyright The HuggingFace Team and The HuggingFace Inc. team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -Fine-tuning the library models for multiple choice. -""" -# You can also adapt this script on your own multiple choice task. Pointers for this are left as comments. - -import json -import logging -import os -import sys -from dataclasses import dataclass, field -from itertools import chain -from pathlib import Path -from typing import Optional, Union -import numpy as np - -import datasets -import tensorflow as tf -from datasets import load_dataset - -import time - -import transformers -from transformers import ( - CONFIG_NAME, - TF2_WEIGHTS_NAME, - AutoConfig, - AutoTokenizer, - DefaultDataCollator, - HfArgumentParser, - PushToHubCallback, - TFAutoModelForMultipleChoice, - TFTrainingArguments, - create_optimizer, - set_seed, -) -from transformers.tokenization_utils_base import PreTrainedTokenizerBase -from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry -from transformers.trainer_utils import is_main_process - -logger = logging.getLogger(__name__) - - -# region Helper classes and functions - - -@dataclass -class DataCollatorForMultipleChoice: - """ - Data collator that will dynamically pad the inputs for multiple choice received. - Args: - tokenizer ([`PreTrainedTokenizer`] or [`PreTrainedTokenizerFast`]): - The tokenizer used for encoding the data. - padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`): - Select a strategy to pad the returned sequences (according to the model's padding side and padding index) - among: - - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence - if provided). - - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum - acceptable input length for the model if that argument is not provided. - - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different - lengths). - max_length (`int`, *optional*): - Maximum length of the returned list and optionally padding length (see above). - pad_to_multiple_of (`int`, *optional*): - If set will pad the sequence to a multiple of the provided value. - This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >= - 7.5 (Volta). - """ - - tokenizer: PreTrainedTokenizerBase - padding: Union[bool, str, PaddingStrategy] = True - max_length: Optional[int] = None - pad_to_multiple_of: Optional[int] = None - - def __call__(self, features): - label_name = "label" if "label" in features[0].keys() else "labels" - labels = [feature.pop(label_name) for feature in features] - batch_size = len(features) - num_choices = len(features[0]["input_ids"]) - flattened_features = [ - [{k: v[i] for k, v in feature.items()} for i in range(num_choices)] for feature in features - ] - flattened_features = list(chain(*flattened_features)) - - batch = self.tokenizer.pad( - flattened_features, - padding=self.padding, - max_length=self.max_length, - pad_to_multiple_of=self.pad_to_multiple_of, - return_tensors="tf", - ) - - # Un-flatten - batch = {k: tf.reshape(v, (batch_size, num_choices, -1)) for k, v in batch.items()} - # Add back labels - batch["labels"] = tf.convert_to_tensor(labels, dtype=tf.int64) - return batch - - -# endregion - -# region Arguments -@dataclass -class ModelArguments: - """ - Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. - """ - - model_name_or_path: str = field( - metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} - ) - config_name: Optional[str] = field( - default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} - ) - tokenizer_name: Optional[str] = field( - default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} - ) - cache_dir: Optional[str] = field( - default=None, - metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, - ) - use_fast_tokenizer: bool = field( - default=True, - metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, - ) - model_revision: str = field( - default="main", - metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, - ) - use_auth_token: bool = field( - default=False, - metadata={ - "help": ( - "Will use the token generated when running `huggingface-cli login` (necessary to use this script " - "with private models)." - ) - }, - ) - - -@dataclass -class DataTrainingArguments: - """ - Arguments pertaining to what data we are going to input our model for training and eval. - """ - - train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."}) - validation_file: Optional[str] = field( - default=None, - metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."}, - ) - overwrite_cache: bool = field( - default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} - ) - preprocessing_num_workers: Optional[int] = field( - default=None, - metadata={"help": "The number of processes to use for the preprocessing."}, - ) - max_seq_length: Optional[int] = field( - default=None, - metadata={ - "help": ( - "The maximum total input sequence length after tokenization. If passed, sequences longer " - "than this will be truncated, sequences shorter will be padded." - ) - }, - ) - pad_to_max_length: bool = field( - default=False, - metadata={ - "help": ( - "Whether to pad all samples to the maximum sentence length. " - "If False, will pad the samples dynamically when batching to the maximum length in the batch. More " - "efficient on GPU but very bad for TPU." - ) - }, - ) - max_train_samples: Optional[int] = field( - default=None, - metadata={ - "help": ( - "For debugging purposes or quicker training, truncate the number of training examples to this " - "value if set." - ) - }, - ) - max_eval_samples: Optional[int] = field( - default=None, - metadata={ - "help": ( - "For debugging purposes or quicker training, truncate the number of evaluation examples to this " - "value if set." - ) - }, - ) - - def __post_init__(self): - if self.train_file is not None: - extension = self.train_file.split(".")[-1] - assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." - if self.validation_file is not None: - extension = self.validation_file.split(".")[-1] - assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." - - - -@dataclass -class OptimizationArguments: - """ - Arguments pertaining to what type of optimization we are going to apply on the model. - """ - - tune: bool = field( - default=False, - metadata={"help": "Whether or not to apply quantization."}, - ) - quantization_approach: Optional[str] = field( - default="PostTrainingStatic", - metadata={"help": "Quantization approach. Supported approach are PostTrainingStatic, " - "PostTrainingDynamic and QuantizationAwareTraining."}, - ) - metric_name: Optional[str] = field( - default=None, - metadata={"help": "Metric used for the tuning strategy."}, - ) - is_relative: Optional[bool] = field( - default=True, - metadata={"help": "Metric tolerance model, expected to be relative or absolute."}, - ) - perf_tol: Optional[float] = field( - default=0.01, - metadata={"help": "Performance tolerance when optimizing the model."}, - ) - benchmark: bool = field( - default=False, - metadata={"help": "run benchmark."}) - int8: bool = field( - default=False, - metadata={"help":"Whether to use the quantized int8 model."}) - accuracy_only: bool = field( - default=False, - metadata={"help":"Whether to only test accuracy for model tuned by Neural Compressor."}) - -@dataclass -class DistributedArguments: - """ - Arguments setting the distributed multinode environment - """ - - worker: str = field( - default=None, - metadata={"help": "List of node ip addresses in a string, and there should not be space between addresses."}, - ) - task_index: int = field( - default=0, - metadata={"help": "Worker index, and 0 represents the chief worker while other workers are set as 1,2,3..."}, - ) - -# endregion - - - -def main(): - # region Argument Parsing - parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments, OptimizationArguments, DistributedArguments)) - if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): - # If we pass only one argument to the script and it's the path to a json file, - # let's parse it to get our arguments. - model_args, data_args, training_args, optim_args, distributed_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) - else: - model_args, data_args, training_args, optim_args, distributed_args = parser.parse_args_into_dataclasses() - - output_dir = Path(training_args.output_dir) - output_dir.mkdir(parents=True, exist_ok=True) - # endregion - - # region Logging - logging.basicConfig( - format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", - datefmt="%m/%d/%Y %H:%M:%S", - handlers=[logging.StreamHandler(sys.stdout)], - ) - logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN) - - # Set the verbosity to info of the Transformers logger (on main process only): - if is_main_process(training_args.local_rank): - transformers.utils.logging.set_verbosity_info() - transformers.utils.logging.enable_default_handler() - transformers.utils.logging.enable_explicit_format() - logger.info(f"Training/evaluation parameters {training_args}") - # endregion - - # region Set the multinode environment, the strategy and paths - strategy = None - worker_list = None - if distributed_args.worker is not None: - logger.info("distributed environment initialization...") - - worker_list = distributed_args.worker.split(",") - - from intel_extension_for_transformers.transformers.utils.utility_tf import distributed_init - distributed_init(worker_list, "worker", distributed_args.task_index) - - strategy = tf.distribute.MultiWorkerMirroredStrategy() - from intel_extension_for_transformers.transformers.utils.utility_tf import get_filepath - training_args.output_dir = get_filepath(training_args.output_dir, strategy.cluster_resolver.task_type, strategy.cluster_resolver.task_id) - else: - strategy = training_args.strategy - #endregion - - # region Checkpoints - checkpoint = None - if len(os.listdir(training_args.output_dir)) > 0 and not training_args.overwrite_output_dir: - if (output_dir / CONFIG_NAME).is_file() and (output_dir / TF2_WEIGHTS_NAME).is_file(): - checkpoint = output_dir - logger.info( - f"Checkpoint detected, resuming training from checkpoint in {training_args.output_dir}. To avoid this" - " behavior, change the `--output_dir` or add `--overwrite_output_dir` to train from scratch." - ) - else: - raise ValueError( - f"Output directory ({training_args.output_dir}) already exists and is not empty. " - "Use --overwrite_output_dir to continue regardless." - ) - # endregion - - # Set seed before initializing model. - set_seed(training_args.seed) - - # region Load datasets - # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) - # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ - # (the dataset will be downloaded automatically from the datasets Hub). - - # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called - # 'text' is found. You can easily tweak this behavior (see below). - - # In distributed training, the load_dataset function guarantee that only one local process can concurrently - # download the dataset. - if data_args.train_file is not None or data_args.validation_file is not None: - data_files = {} - if data_args.train_file is not None: - data_files["train"] = data_args.train_file - if data_args.validation_file is not None: - data_files["validation"] = data_args.validation_file - extension = data_args.train_file.split(".")[-1] - raw_datasets = load_dataset( - extension, - data_files=data_files, - cache_dir=model_args.cache_dir, - use_auth_token=True if model_args.use_auth_token else None, - ) - else: - # Downloading and loading the swag dataset from the hub. - raw_datasets = load_dataset( - "swag", - "regular", - cache_dir=model_args.cache_dir, - use_auth_token=True if model_args.use_auth_token else None, - ) - # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at - # https://huggingface.co/docs/datasets/loading_datasets.html. - - # When using your own dataset or a different dataset from swag, you will probably need to change this. - ending_names = [f"ending{i}" for i in range(4)] - context_name = "sent1" - question_header_name = "sent2" - # endregion - - # region Load model config and tokenizer - if checkpoint is not None: - config_path = training_args.output_dir - elif model_args.config_name: - config_path = model_args.config_name - else: - config_path = model_args.model_name_or_path - - # Distributed training: - # The .from_pretrained methods guarantee that only one local process can concurrently - # download model & vocab. - config = AutoConfig.from_pretrained( - config_path, - cache_dir=model_args.cache_dir, - revision=model_args.model_revision, - use_auth_token=True if model_args.use_auth_token else None, - _commit_hash="main", - ) - tokenizer = AutoTokenizer.from_pretrained( - model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, - cache_dir=model_args.cache_dir, - use_fast=model_args.use_fast_tokenizer, - revision=model_args.model_revision, - use_auth_token=True if model_args.use_auth_token else None, - _commit_hash="main", - ) - # endregion - - # region Dataset preprocessing - if data_args.max_seq_length is None: - max_seq_length = tokenizer.model_max_length - if max_seq_length > 1024: - logger.warning( - f"The tokenizer picked seems to have a very large `model_max_length` ({tokenizer.model_max_length}). " - "Picking 1024 instead. You can change that default value by passing --max_seq_length xxx." - ) - max_seq_length = 1024 - else: - if data_args.max_seq_length > tokenizer.model_max_length: - logger.warning( - f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the" - f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." - ) - max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length) - - def preprocess_function(examples): - first_sentences = [[context] * 4 for context in examples[context_name]] - question_headers = examples[question_header_name] - second_sentences = [ - [f"{header} {examples[end][i]}" for end in ending_names] for i, header in enumerate(question_headers) - ] - - # Flatten out - first_sentences = list(chain(*first_sentences)) - second_sentences = list(chain(*second_sentences)) - - # Tokenize - tokenized_examples = tokenizer(first_sentences, second_sentences, truncation=True, max_length=max_seq_length) - # Un-flatten - data = {k: [v[i : i + 4] for i in range(0, len(v), 4)] for k, v in tokenized_examples.items()} - return data - - - train_dataset = raw_datasets["train"] - if data_args.max_train_samples is not None: - max_train_samples = min(len(train_dataset), data_args.max_train_samples) - train_dataset = train_dataset.select(range(max_train_samples)) - train_dataset = train_dataset.map( - preprocess_function, - batched=True, - num_proc=data_args.preprocessing_num_workers, - load_from_cache_file=not data_args.overwrite_cache, - ) - - - eval_dataset = raw_datasets["validation"] - if data_args.max_eval_samples is not None: - max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples) - eval_dataset = eval_dataset.select(range(max_eval_samples)) - eval_dataset = eval_dataset.map( - preprocess_function, - batched=True, - num_proc=data_args.preprocessing_num_workers, - load_from_cache_file=not data_args.overwrite_cache, - ) - - if data_args.pad_to_max_length: - data_collator = DefaultDataCollator(return_tensors="tf") - else: - # custom class defined above, as HF has no data collator for multiple choice - data_collator = DataCollatorForMultipleChoice(tokenizer) - # endregion - - with strategy.scope(): - # region Build model - if checkpoint is None: - model_path = model_args.model_name_or_path - else: - model_path = checkpoint - model = TFAutoModelForMultipleChoice.from_pretrained( - model_path, - config=config, - cache_dir=model_args.cache_dir, - revision=model_args.model_revision, - use_auth_token=True if model_args.use_auth_token else None, - ) - - num_replicas = training_args.strategy.num_replicas_in_sync - total_train_batch_size = training_args.per_device_train_batch_size * num_replicas - total_eval_batch_size = training_args.per_device_eval_batch_size * num_replicas - - num_train_steps = (len(train_dataset) // total_train_batch_size) * int(training_args.num_train_epochs) - if training_args.warmup_steps > 0: - num_warmup_steps = training_args.warmup_steps - elif training_args.warmup_ratio > 0: - num_warmup_steps = int(num_train_steps * training_args.warmup_ratio) - else: - num_warmup_steps = 0 - optimizer, lr_schedule = create_optimizer( - init_lr=training_args.learning_rate, - num_train_steps=num_train_steps, - num_warmup_steps=num_warmup_steps, - adam_beta1=training_args.adam_beta1, - adam_beta2=training_args.adam_beta2, - adam_epsilon=training_args.adam_epsilon, - weight_decay_rate=training_args.weight_decay, - adam_global_clipnorm=training_args.max_grad_norm, - ) - - - dataset_options = tf.data.Options() - dataset_options.experimental_distribute.auto_shard_policy = tf.data.experimental.AutoShardPolicy.OFF - - # model.prepare_tf_dataset() wraps a Hugging Face dataset in a tf.data.Dataset which is ready to use in - # training. This is the recommended way to use a Hugging Face dataset when training with Keras. You can also - # use the lower-level dataset.to_tf_dataset() method, but you will have to specify things like column names - # yourself if you use this method, whereas they are automatically inferred from the model input names when - # using model.prepare_tf_dataset() - # For more info see the docs: - # https://huggingface.co/docs/transformers/main/en/main_classes/model#transformers.TFPreTrainedModel.prepare_tf_dataset - # https://huggingface.co/docs/datasets/main/en/package_reference/main_classes#datasets.Dataset.to_tf_dataset - - tf_train_dataset = model.prepare_tf_dataset( - train_dataset, - shuffle=True, - batch_size=total_train_batch_size, - collate_fn=data_collator, - ).with_options(dataset_options) - - tf_eval_dataset = model.prepare_tf_dataset( - eval_dataset, - shuffle=False, - batch_size=total_eval_batch_size, - collate_fn=data_collator, - drop_remainder=True, - ).with_options(dataset_options) - - model.compile(optimizer=optimizer, metrics=["accuracy"], jit_compile=training_args.xla) - # endregion - - def compute_metrics(preds, labels): - predictions = preds["logits"] - preds = np.argmax(predictions, axis=1) - return {"accuracy": (preds == labels).astype(np.float32).mean().item()} - - # region tuning - if optim_args.tune: - from intel_extension_for_transformers.transformers import metrics, objectives, QuantizationConfig, TFOptimization - optimization = TFOptimization( - model=model, - args=training_args, - train_dataset=tf_train_dataset, - eval_dataset=tf_eval_dataset, - compute_metrics=compute_metrics, - task_type=strategy.cluster_resolver.task_type if isinstance(strategy, tf.distribute.MultiWorkerMirroredStrategy) else None, - task_id=strategy.cluster_resolver.task_id if isinstance(strategy, tf.distribute.MultiWorkerMirroredStrategy) else None, - ) - - # use customized eval function - tune_metric = metrics.Metric( - name="accuracy", greater_is_better=True, is_relative=True, criterion=optim_args.perf_tol, - ) - quantization_config = QuantizationConfig( - framework="tensorflow", - approach="POSTTRAININGSTATIC", - metrics=[tune_metric], - objectives=[objectives.performance] - ) - quantized_model = optimization.quantize(quant_config=quantization_config) - exit(0) - # endregion - - # region Training - eval_metrics = None - if training_args.do_train: - history = model.fit( - tf_train_dataset, - validation_data=tf_eval_dataset, - epochs=int(training_args.num_train_epochs), - ) - model.save("finetuned_model") - eval_metrics = {key: val[-1] for key, val in history.history.items()} - # endregion - - # region Evaluation - if training_args.do_eval: - num_examples = sum(1 for _ in ( - tf_eval_dataset.unbatch() if hasattr(tf_eval_dataset, "unbatch") else tf_eval_dataset)) - - if optim_args.int8: - model = tf.saved_model.load(training_args.output_dir) - else: - from intel_extension_for_transformers.transformers.utils.utility_tf import keras2SavedModel - model = keras2SavedModel(model) - - logger.info(f"***** Running Evaluation *****") - logger.info(f" Num examples in dataset = {num_examples}") - logger.info(f" Batch size = {training_args.per_device_eval_batch_size}") - - preds: np.ndarray = None - label_ids: np.ndarray = None - infer = model.signatures["serving_default"] - - if optim_args.accuracy_only: - iterations = 1 - warmup = 0 - else: - iterations = 10 - warmup = 5 - latency_list = [] - - for idx in range(iterations): - iteration_time = 0 - for i, (inputs, labels) in enumerate(tf_eval_dataset): - for name in inputs: - inputs[name] = tf.constant(inputs[name].numpy(), dtype=infer.inputs[0].dtype) - - start = time.time() - results = infer(**inputs) - iteration_time += time.time() - start - if idx == 0: # only accumulate once all the preds and labels - if preds is None: - preds = results["Identity"].numpy() - else: - preds = np.append(preds, results["Identity"].numpy(), axis=0) - if label_ids is None: - label_ids = labels[0].numpy() if isinstance( - labels, list) else labels.numpy() - else: - label_ids = np.append( - label_ids, - labels[0].numpy() if isinstance(labels, list) else labels.numpy(), - axis=0) - latency_list.append(iteration_time) - logger.info("Iteration {} time: {} sec".format(idx, iteration_time)) - - test_predictions = {"logits": preds} - eval_metrics = compute_metrics(test_predictions, label_ids) - logger.info("\nEvaluation result: ") - logger.info("Accuracy: {}".format(eval_metrics["accuracy"])) - - average_iteration_time = np.array(latency_list[warmup:]).mean() - logger.info( - "Throughput: {} samples/sec".format( - num_examples / average_iteration_time) - ) - -if __name__ == "__main__": - main() \ No newline at end of file diff --git a/examples/huggingface/tensorflow/multiple-choice/quantization/run_tuning.sh b/examples/huggingface/tensorflow/multiple-choice/quantization/run_tuning.sh deleted file mode 100644 index 79e6c5b7e87..00000000000 --- a/examples/huggingface/tensorflow/multiple-choice/quantization/run_tuning.sh +++ /dev/null @@ -1,91 +0,0 @@ -#!/bin/bash -set -x - -function main { - - init_params "$@" - run_tuning - -} - -# init params -function init_params { - topology="distilbert" - tuned_checkpoint="saved_results" - extra_cmd="" - batch_size=8 - MAX_SEQ_LENGTH=128 - model_type="bert" - approach="PostTrainingStatic" - cache_dir="cache" - for var in "$@" - do - case $var in - --topology=*) - topology=$(echo $var |cut -f2 -d=) - ;; - --dataset_location=*) - dataset_location=$(echo $var |cut -f2 -d=) - ;; - --input_model=*) - input_model=$(echo $var |cut -f2 -d=) - ;; - --output_model=*) - tuned_checkpoint=$(echo $var |cut -f2 -d=) - ;; - --worker=*) - worker=$(echo $var |cut -f2 -d=) - ;; - --task_index=*) - task_index=$(echo $var |cut -f2 -d=) - ;; - --cache_dir=*) - cache_dir=$(echo $var |cut -f2 -d=) - ;; - *) - echo "Error: No such parameter: ${var}" - exit 1 - ;; - esac - done - -} - -# run_tuning -function run_tuning { - if [ "${topology}" = "distilbert_swag" ]; then - script="run_swag.py" - model_name_or_path="Rocketknight1/bert-base-uncased-finetuned-swag" - approach="PostTrainingStatic" - # add following parameters for quicker debugging - extra_cmd=$extra_cmd" --max_train_samples 512 --max_eval_samples 1024 --perf_tol 0.035" - fi - - if [ "${worker}" = "" ] - then - python -u ${script} \ - --model_name_or_path ${model_name_or_path} \ - --output_dir ${tuned_checkpoint} \ - --quantization_approach ${approach} \ - --do_train \ - --tune \ - --overwrite_output_dir \ - --cache_dir ${cache_dir} \ - ${extra_cmd} - else - python -u ${script} \ - --model_name_or_path ${model_name_or_path} \ - --task_name ${TASK_NAME} \ - --output_dir ${tuned_checkpoint} \ - --quantization_approach ${approach} \ - --do_train \ - --tune \ - --overwrite_output_dir \ - --cache_dir ${cache_dir} \ - --worker "${worker}" \ - --task_index ${task_index} \ - ${extra_cmd} - fi -} - -main "$@" diff --git a/examples/huggingface/tensorflow/text-classification/pruning/README.md b/examples/huggingface/tensorflow/text-classification/pruning/README.md deleted file mode 100644 index b636b9de404..00000000000 --- a/examples/huggingface/tensorflow/text-classification/pruning/README.md +++ /dev/null @@ -1,86 +0,0 @@ -Step-by-Step -========= - -This document describes the step-by-step instructions for reproducing the pruning on models for the text classification (GLUE) tasks. - -# Prerequisite -## 1. Installation - -Make sure you have installed Intel® Extension for Transformers and all the dependencies in the current example: - -```shell -pip install intel-extension-for-transformers -pip install -r requirements.txt -pip install transformers==4.34.1 -``` ->**Note**: Please use transformers no higher than 4.34.1 - - -# Run - -## 1. Run Command (Shell) - -- Topology: - - distilbert_base_sst2 - -``` -bash run_tuning.sh --topology=[topology] -``` - -``` -bash run_benchmark.sh --topology=[topology] --mode=benchmark --use_pruned_model=true -``` - -## 2. Run Command (Python) - -``` -python run_glue.py \ - --model_name_or_path distilbert-base-uncased-finetuned-sst-2-english \ - --task_name sst2 \ - --prune \ - --do_train \ - --do_eval \ - --output_dir ./tmp/sst2_output \ - --overwrite_output_dir -``` - -# Multi-node Usage - -We also supported Distributed Data Parallel training on multi nodes settings for pruning. - -The default strategy we used is `MultiWorkerMirroredStrategy` in Tensorflow, and with `task_type` set as "worker", we are expected to pass following extra parameters to the script: - -* `worker`: a string of your worker ip addresses which is separated by comma and there should not be space between each two of them - -* `task_index`: 0 should be set on the chief node (leader) and 1, 2, 3... should be set as the rank of other follower nodes - -## Multi-node Example - -* On leader node - -``` -bash run_tuning.sh --topology=distilbert_base_sst2 --worker="localhost:12345,localhost:23456" --task_index=0 -``` - -which is equal to - -``` -python run_glue.py \ - --model_name_or_path distilbert-base-uncased-finetuned-sst-2-english \ - --task_name sst2 \ - --prune \ - --do_train \ - --do_eval \ - --output_dir ./tmp/sst2_output \ - --overwrite_output_dir \ - --worker "localhost:12345,localhost:23456" \ - --task_index 0 -``` - -* On follower node - -``` -bash run_tuning.sh --topology=distilbert_base_sst2 --worker="localhost:12345,localhost:23456" --task_index=1 -``` - -Please replace the worker ip address list with your own. diff --git a/examples/huggingface/tensorflow/text-classification/pruning/requirements.txt b/examples/huggingface/tensorflow/text-classification/pruning/requirements.txt deleted file mode 100644 index 245a729ec94..00000000000 --- a/examples/huggingface/tensorflow/text-classification/pruning/requirements.txt +++ /dev/null @@ -1,6 +0,0 @@ -accelerate -datasets >= 1.17 -sentencepiece != 0.1.92 -protobuf -intel-tensorflow -transformers diff --git a/examples/huggingface/tensorflow/text-classification/pruning/run_benchmark.sh b/examples/huggingface/tensorflow/text-classification/pruning/run_benchmark.sh deleted file mode 100644 index 76c9b07045f..00000000000 --- a/examples/huggingface/tensorflow/text-classification/pruning/run_benchmark.sh +++ /dev/null @@ -1,90 +0,0 @@ -#!/bin/bash -set -x - -function main { - - init_params "$@" - run_benchmark - -} - -# init params -function init_params { - iters=100 - batch_size=64 - tuned_checkpoint=saved_results - topology="distilbert_base_sst2" - mode="benchmark" - for var in "$@" - do - case $var in - --topology=*) - topology=$(echo $var |cut -f2 -d=) - ;; - --dataset_location=*) - dataset_location=$(echo $var |cut -f2 -d=) - ;; - --input_model=*) - input_model=$(echo $var |cut -f2 -d=) - ;; - --mode=*) - mode=$(echo $var |cut -f2 -d=) - ;; - --batch_size=*) - batch_size=$(echo $var |cut -f2 -d=) - ;; - --iters=*) - iters=$(echo ${var} |cut -f2 -d=) - ;; - --use_pruned_model=*) - use_pruned_model=$(echo ${var} |cut -f2 -d=) - ;; - --config=*) - tuned_checkpoint=$(echo $var |cut -f2 -d=) - ;; - *) - echo "Error: No such parameter: ${var}" - exit 1 - ;; - esac - done - -} - - -# run_benchmark -function run_benchmark { - extra_cmd='' - MAX_SEQ_LENGTH=128 - - if [[ ${mode} == "accuracy" ]]; then - mode_cmd=" --accuracy_only" - elif [[ ${mode} == "benchmark" ]]; then - mode_cmd=" --benchmark " - else - echo "Error: No such mode: ${mode}" - exit 1 - fi - - if [ "${topology}" = "distilbert_base_sst2" ]; then - TASK_NAME='sst2' - model_name_or_path=distilbert-base-uncased-finetuned-sst-2-english - fi - - if [[ ${use_pruned_model} == "true" ]]; then - extra_cmd=$extra_cmd" --use_pruned_model" - fi - - python -u ./run_glue.py \ - --model_name_or_path ${model_name_or_path} \ - --task_name ${TASK_NAME} \ - --do_eval \ - --per_device_eval_batch_size ${batch_size} \ - --output_dir ${tuned_checkpoint} \ - --overwrite_cache \ - ${mode_cmd} \ - ${extra_cmd} - -} - -main "$@" diff --git a/examples/huggingface/tensorflow/text-classification/pruning/run_glue.py b/examples/huggingface/tensorflow/text-classification/pruning/run_glue.py deleted file mode 100644 index 67c35ff2471..00000000000 --- a/examples/huggingface/tensorflow/text-classification/pruning/run_glue.py +++ /dev/null @@ -1,689 +0,0 @@ -#!/usr/bin/env python -# coding=utf-8 -# Copyright 2020 The HuggingFace Inc. team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" Finetuning the library models for sequence classification on GLUE.""" -# You can also adapt this script on your own text classification task. Pointers for this are left as comments. - -import logging -import os -import sys -import numpy as np -import tensorflow as tf -import time -import transformers -from dataclasses import dataclass, field -from typing import Optional - -from datasets import load_dataset, load_metric - -from transformers import ( - AutoConfig, - AutoTokenizer, - DataCollatorWithPadding, - DefaultDataCollator, - HfArgumentParser, - PretrainedConfig, - TFAutoModelForSequenceClassification, - TFTrainingArguments, - set_seed, -) -from transformers.trainer_utils import get_last_checkpoint, is_main_process -from transformers.utils import check_min_version - - -# region Helper functions - - -class SavePretrainedCallback(tf.keras.callbacks.Callback): - # Hugging Face models have a save_pretrained() method that saves both the weights and the necessary - # metadata to allow them to be loaded as a pretrained model in future. This is a simple Keras callback - # that saves the model with this method after each epoch. - def __init__(self, output_dir, **kwargs): - super().__init__() - self.output_dir = output_dir - - def on_epoch_end(self, epoch, logs=None): - self.model.save_pretrained(self.output_dir) - - -# endregion - - -# Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.17.0") - -task_to_keys = { - "cola": ("sentence", None), - "mnli": ("premise", "hypothesis"), - "mrpc": ("sentence1", "sentence2"), - "qnli": ("question", "sentence"), - "qqp": ("question1", "question2"), - "rte": ("sentence1", "sentence2"), - "sst2": ("sentence", None), - "stsb": ("sentence1", "sentence2"), - "wnli": ("sentence1", "sentence2"), -} - -logger = logging.getLogger(__name__) - - -# region Command-line arguments -@dataclass -class DataTrainingArguments: - """ - Arguments pertaining to what data we are going to input our model for training and eval. - - Using `HfArgumentParser` we can turn this class - into argparse arguments to be able to specify them on - the command line. - """ - - task_name: str = field( - metadata={"help": "The name of the task to train on: " + ", ".join(task_to_keys.keys())}, - ) - predict_file: str = field( - metadata={"help": "A file containing user-supplied examples to make predictions for"}, - default=None, - ) - max_seq_length: int = field( - default=128, - metadata={ - "help": "The maximum total input sequence length after tokenization. Sequences longer " - "than this will be truncated, sequences shorter will be padded." - }, - ) - overwrite_cache: bool = field( - default=False, metadata={"help": "Overwrite the cached preprocessed datasets or not."} - ) - pad_to_max_length: bool = field( - default=False, - metadata={ - "help": "Whether to pad all samples to `max_seq_length`. " - "If False, will pad the samples dynamically when batching to the maximum length in the batch." - }, - ) - max_train_samples: Optional[int] = field( - default=None, - metadata={ - "help": "For debugging purposes or quicker training, truncate the number of training examples to this " - "value if set." - }, - ) - max_eval_samples: Optional[int] = field( - default=None, - metadata={ - "help": "For debugging purposes or quicker training, truncate the number of evaluation examples to this " - "value if set." - }, - ) - max_predict_samples: Optional[int] = field( - default=None, - metadata={ - "help": "For debugging purposes or quicker training, truncate the number of prediction examples to this " - "value if set." - }, - ) - - def __post_init__(self): - self.task_name = self.task_name.lower() - if self.task_name not in task_to_keys.keys(): - raise ValueError("Unknown task, you should pick one in " + ",".join(task_to_keys.keys())) - - -@dataclass -class ModelArguments: - """ - Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. - """ - - model_name_or_path: str = field( - metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} - ) - config_name: Optional[str] = field( - default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} - ) - tokenizer_name: Optional[str] = field( - default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} - ) - cache_dir: Optional[str] = field( - default=None, - metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, - ) - use_fast_tokenizer: bool = field( - default=True, - metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, - ) - model_revision: str = field( - default="main", - metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, - ) - use_auth_token: bool = field( - default=False, - metadata={ - "help": "Will use the token generated when running `transformers-cli login` (necessary to use this script " - "with private models)." - }, - ) - - -@dataclass -class OptimizationArguments: - """ - Arguments pertaining to what type of optimization we are going to apply on the model. - """ - - prune: bool = field( - default=False, - metadata={"help": "Whether or not to apply prune."}, - ) - pruning_approach: Optional[str] = field( - default="BasicMagnitude", - metadata={"help": "Pruning approach. Supported approach is basic_magnite."}, - ) - target_sparsity_ratio: Optional[float] = field( - default=None, - metadata={"help": "Targeted sparsity when pruning the model."}, - ) - metric_name: Optional[str] = field( - default=None, - metadata={"help": "Metric used for the tuning strategy."}, - ) - tolerance_mode: Optional[str] = field( - default="relative", - metadata={"help": "Metric tolerance model, expected to be relative or absolute."}, - ) - perf_tol: Optional[float] = field( - default=0.01, - metadata={"help": "Performance tolerance when optimizing the model."}, - ) - benchmark: bool = field( - default=False, - metadata={"help": "Run benchmark."}) - use_pruned_model: bool = field( - default=False, - metadata={"help":"Whether to use pretrained pruned model."}) - accuracy_only: bool = field( - default=False, - metadata={"help":"Whether to only test accuracy for model tuned by Neural Compressor."}) - -@dataclass -class DistributedArguments: - """ - Arguments setting the distributed multinode environment - """ - - worker: str = field( - default=None, - metadata={"help": "List of node ip addresses in a string, and there should not be space between addresses."}, - ) - task_index: int = field( - default=0, - metadata={"help": "Worker index, and 0 represents the chief worker while other workers are set as 1,2,3..."}, - ) -# endregion - -def main(): - # region Argument parsing - # See all possible arguments in src/transformers/training_args.py - # or by passing the --help flag to this script. - # We now keep distinct sets of args, for a cleaner separation of concerns. - - parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments, OptimizationArguments, DistributedArguments)) - if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): - # If we pass only one argument to the script and it's the path to a json file, - # let's parse it to get our arguments. - model_args, data_args, training_args, optim_args, distributed_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) - else: - model_args, data_args, training_args, optim_args, distributed_args = parser.parse_args_into_dataclasses() - - if not (training_args.do_train or training_args.do_eval or training_args.do_predict): - exit("Must specify at least one of --do_train, --do_eval or --do_predict!") - # endregion - - # region Set the multinode environment, the strategy and paths - strategy = None - worker_list = None - if distributed_args.worker is not None: - logger.info("distributed environment initialization...") - - worker_list = distributed_args.worker.split(",") - - from intel_extension_for_transformers.transformers.utils.utility_tf import distributed_init - distributed_init(worker_list, "worker", distributed_args.task_index) - - strategy = tf.distribute.MultiWorkerMirroredStrategy() - from intel_extension_for_transformers.transformers.utils.utility_tf import get_filepath - training_args.output_dir = get_filepath(training_args.output_dir, strategy.cluster_resolver.task_type, strategy.cluster_resolver.task_id) - else: - strategy = training_args.strategy - #endregion - - # region Checkpoints - checkpoint = None - if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: - checkpoint = get_last_checkpoint(training_args.output_dir) - if checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: - raise ValueError( - f"Output directory ({training_args.output_dir}) already exists and is not empty. " - "Use --overwrite_output_dir to overcome." - ) - elif checkpoint is not None and training_args.resume_from_checkpoint is None: - logger.info( - f"Checkpoint detected, resuming training at {checkpoint}. To avoid this behavior, change " - "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." - ) - # endregion - - # region Logging - logging.basicConfig( - format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", - datefmt="%m/%d/%Y %H:%M:%S", - handlers=[logging.StreamHandler(sys.stdout)], - ) - logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN) - - # Set the verbosity to info of the Transformers logger (on main process only): - if is_main_process(training_args.local_rank): - transformers.utils.logging.set_verbosity_info() - transformers.utils.logging.enable_default_handler() - transformers.utils.logging.enable_explicit_format() - logger.info(f"Training/evaluation parameters {training_args}") - # endregion - - # region Dataset and labels - # Set seed before initializing model. - set_seed(training_args.seed) - - # Downloading and loading a dataset from the hub. In distributed training, the load_dataset function guarantee - # that only one local process can concurrently download the dataset. - datasets = load_dataset( - "glue", - data_args.task_name, - cache_dir=model_args.cache_dir, - use_auth_token=True if model_args.use_auth_token else None, - ) - # See more about loading any type of standard or custom dataset at - # https://huggingface.co/docs/datasets/loading_datasets.html. - - is_regression = data_args.task_name == "stsb" - if not is_regression: - label_list = datasets["train"].features["label"].names - num_labels = len(label_list) - else: - num_labels = 1 - - if data_args.predict_file is not None: - logger.info("Preparing user-supplied file for predictions...") - - data_files = {"data": data_args.predict_file} - - for key in data_files.keys(): - logger.info(f"Loading a local file for {key}: {data_files[key]}") - - if data_args.predict_file.endswith(".csv"): - # Loading a dataset from local csv files - user_dataset = load_dataset("csv", data_files=data_files, cache_dir=model_args.cache_dir) - else: - # Loading a dataset from local json files - user_dataset = load_dataset("json", data_files=data_files, cache_dir=model_args.cache_dir) - needed_keys = task_to_keys[data_args.task_name] - for key in needed_keys: - assert key in user_dataset["data"].features, f"Your supplied predict_file is missing the {key} key!" - datasets["user_data"] = user_dataset["data"] - # endregion - - # region Load model config and tokenizer - # - # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently - # download model & vocab. - config = AutoConfig.from_pretrained( - model_args.config_name if model_args.config_name else model_args.model_name_or_path, - num_labels=num_labels, - finetuning_task=data_args.task_name, - cache_dir=model_args.cache_dir, - revision=model_args.model_revision, - use_auth_token=True if model_args.use_auth_token else None, - ) - tokenizer = AutoTokenizer.from_pretrained( - model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, - cache_dir=model_args.cache_dir, - use_fast=model_args.use_fast_tokenizer, - revision=model_args.model_revision, - use_auth_token=True if model_args.use_auth_token else None, - ) - # endregion - - # region Dataset preprocessing - sentence1_key, sentence2_key = task_to_keys[data_args.task_name] - non_label_column_names = [name for name in datasets["train"].column_names if name != "label"] - - # Padding strategy - if data_args.pad_to_max_length: - padding = "max_length" - else: - # We will pad later, dynamically at batch creation, to the max sequence length in each batch - padding = False - - # Some models have set the order of the labels to use, so let's make sure we do use it. - label_to_id = None - if config.label2id != PretrainedConfig(num_labels=num_labels).label2id and not is_regression: - # Some have all caps in their config, some don't. - label_name_to_id = {k.lower(): v for k, v in config.label2id.items()} - if list(sorted(label_name_to_id.keys())) == list(sorted(label_list)): - label_to_id = {i: int(label_name_to_id[label_list[i]]) for i in range(num_labels)} - else: - logger.warning( - "Your model seems to have been trained with labels, but they don't match the dataset: ", - f"model labels: {list(sorted(label_name_to_id.keys()))}, dataset labels: {list(sorted(label_list))}." - "\nIgnoring the model labels as a result.", - ) - label_to_id = {label: i for i, label in enumerate(label_list)} - if label_to_id is not None: - config.label2id = label_to_id - config.id2label = {id: label for label, id in config.label2id.items()} - elif data_args.task_name is not None and not is_regression: - config.label2id = {l: i for i, l in enumerate(label_list)} - config.id2label = {id: label for label, id in config.label2id.items()} - - if data_args.max_seq_length > tokenizer.model_max_length: - logger.warning( - f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the" - f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." - ) - max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length) - - def preprocess_function(examples): - # Tokenize the texts - args = ( - (examples[sentence1_key],) if sentence2_key is None else (examples[sentence1_key], examples[sentence2_key]) - ) - result = tokenizer(*args, padding=padding, max_length=max_seq_length, truncation=True) - - return result - - datasets = datasets.map(preprocess_function, batched=True, load_from_cache_file=not data_args.overwrite_cache) - - if data_args.pad_to_max_length: - data_collator = DefaultDataCollator(return_tensors="tf") - else: - data_collator = DataCollatorWithPadding(tokenizer, return_tensors="tf") - # endregion - - # region Metric function - metric = load_metric("glue", data_args.task_name) - - def compute_metrics(preds, label_ids): - preds = preds["logits"] - preds = np.squeeze(preds) if is_regression else np.argmax(preds, axis=1) - result = metric.compute(predictions=preds, references=label_ids) - if len(result) > 1: - result["combined_score"] = np.mean(list(result.values())).item() - return result - # endregion - - if distributed_args.worker is None: - strategy = training_args.strategy - - with strategy.scope(): - # region Load pretrained model - if checkpoint is None: - model_path = model_args.model_name_or_path - else: - model_path = checkpoint - model = TFAutoModelForSequenceClassification.from_pretrained( - model_path, - config=config, - cache_dir=model_args.cache_dir, - revision=model_args.model_revision, - use_auth_token=True if model_args.use_auth_token else None, - ) - # endregion - - # region Optimizer, loss and compilation - optimizer = tf.keras.optimizers.Adam( - learning_rate=training_args.learning_rate, - beta_1=training_args.adam_beta1, - beta_2=training_args.adam_beta2, - epsilon=training_args.adam_epsilon, - clipnorm=training_args.max_grad_norm, - ) - if is_regression: - loss_fn = tf.keras.losses.MeanSquaredError() - metrics = [] - else: - loss_fn = tf.keras.losses.SparseCategoricalCrossentropy( - from_logits=True, reduction=tf.keras.losses.Reduction.SUM - ) - metrics = ["accuracy"] - model.compile(optimizer=optimizer, loss=loss_fn, metrics=metrics) - # endregion - - # region Convert data to a tf.data.Dataset - tf_data = dict() - max_samples = { - "train": data_args.max_train_samples, - "validation": data_args.max_eval_samples, - "validation_matched": data_args.max_eval_samples, - "validation_mismatched": data_args.max_eval_samples, - "test": data_args.max_predict_samples, - "test_matched": data_args.max_predict_samples, - "test_mismatched": data_args.max_predict_samples, - "user_data": None, - } - - for key in datasets.keys(): - if key == "train" or key.startswith("validation"): - assert "label" in datasets[key].features, f"Missing labels from {key} data!" - if key == "train": - shuffle = True - batch_size = training_args.per_device_train_batch_size * (len(worker_list) if worker_list is not None else 1) - drop_remainder = True # Saves us worrying about scaling gradients for the last batch - else: - shuffle = False - batch_size = training_args.per_device_eval_batch_size * (len(worker_list) if worker_list is not None else 1) - drop_remainder = False - samples_limit = max_samples[key] - dataset = datasets[key] - if samples_limit is not None: - dataset = dataset.select(range(samples_limit)) - data = dataset.to_tf_dataset( - columns=[col for col in dataset.column_names if col not in set(non_label_column_names + ["label"])], - shuffle=shuffle, - batch_size=batch_size, - collate_fn=data_collator, - drop_remainder=drop_remainder, - # `label_cols` is needed for user-defined losses, such as in this example - # datasets v2.3.x need "labels", not "label" - label_cols=["labels"] if "label" in dataset.column_names else None, - ) - tf_data[key] = data - # endregion - - # region Pruning - if optim_args.prune: - from intel_extension_for_transformers.transformers import metrics, PrunerConfig, PruningConfig, TFOptimization - optimization = TFOptimization( - model=model, - args=training_args, - train_dataset=tf_data["train"], - eval_dataset=tf_data["validation"], - compute_metrics=compute_metrics, - criterion=loss_fn, - optimizer=optimizer, - task_type=strategy.cluster_resolver.task_type if isinstance(strategy, tf.distribute.MultiWorkerMirroredStrategy) else None, - task_id=strategy.cluster_resolver.task_id if isinstance(strategy, tf.distribute.MultiWorkerMirroredStrategy) else None, - ) - tune_metric = metrics.Metric( - name="accuracy", greater_is_better=True, is_relative=True, criterion=0.01, - ) - prune_type = 'BasicMagnitude' \ - if optim_args.pruning_approach else optim_args.pruning_approach - target_sparsity_ratio = None \ - if optim_args.target_sparsity_ratio is None else optim_args.target_sparsity_ratio - pruner_config = PrunerConfig(prune_type=prune_type, target_sparsity_ratio=target_sparsity_ratio) - pruning_conf = PruningConfig( - epochs=int(training_args.num_train_epochs), pruner_config=pruner_config, metrics=tune_metric, - framework="tensorflow" - ) - p_model = optimization.prune(pruning_config=pruning_conf) - return - # endregion - - # region Training and validation - if training_args.do_train: - callbacks = [SavePretrainedCallback(output_dir=training_args.output_dir)] - if training_args.do_eval and not data_args.task_name == "mnli": - # Do both evaluation and training in the Keras fit loop, unless the task is MNLI - # because MNLI has two validation sets - validation_data = tf_data["validation"] - else: - validation_data = None - model.fit( - tf_data["train"], - validation_data=validation_data, - epochs=int(training_args.num_train_epochs), - callbacks=callbacks, - ) - # endregion - - # region Evaluation - if training_args.do_eval: - # We normally do validation as part of the Keras fit loop, but we run it independently - # if there was no fit() step (because we didn't train the model) or if the task is MNLI, - # because MNLI has a separate validation-mismatched validation set - logger.info("*** Evaluate ***") - - # Loop to handle MNLI double evaluation (matched, mis-matched) - if data_args.task_name == "mnli": - tasks = ["mnli", "mnli-mm"] - tf_datasets = [tf_data["validation_matched"], tf_data["validation_mismatched"]] - raw_datasets = [datasets["validation_matched"], datasets["validation_mismatched"]] - else: - tasks = [data_args.task_name] - tf_datasets = [tf_data["validation"]] - raw_datasets = [datasets["validation"]] - - total_time = 0 - num_examples = 0 - if optim_args.use_pruned_model: - model = tf.saved_model.load(training_args.output_dir) - for raw_dataset, tf_dataset, task in zip(raw_datasets, tf_datasets, - tasks): - num_examples += sum( - 1 for _ in (tf_dataset.unbatch() - if hasattr(tf_dataset, "unbatch") else tf_dataset - ) - ) - if optim_args.use_pruned_model: - preds: np.ndarray = None - label_ids: np.ndarray = None - infer = model.signatures[list(model.signatures.keys())[0]] - for i, (inputs, labels) in enumerate(tf_dataset): - for name in inputs: - inputs[name] = tf.constant(inputs[name].numpy(), dtype=infer.inputs[0].dtype) - start = time.time() - results = infer(**inputs) - total_time += time.time() - start - for val in results: - if preds is None: - preds = results[val].numpy() - else: - preds = np.append(preds, results[val].numpy(), axis=0) - if label_ids is None: - label_ids = labels.numpy() - else: - label_ids = np.append(label_ids, labels.numpy(), axis=0) - eval_metrics = compute_metrics({"logits": preds}, label_ids) - else: - start = time.time() - eval_predictions = model.predict(tf_dataset) - total_time += time.time() - start - eval_metrics = compute_metrics(eval_predictions, raw_dataset["label"]) - print(f"Evaluation metrics ({task}):") - print(eval_metrics) - - logger.info("metric ({}) Accuracy: {}".format(task, eval_metrics["accuracy"])) - logger.info( - "Throughput: {} samples/sec".format( - num_examples / total_time) - ) - # endregion - - # region Prediction - if training_args.do_predict or data_args.predict_file: - logger.info("*** Predict ***") - - # Loop to handle MNLI double evaluation (matched, mis-matched) - tasks = [] - tf_datasets = [] - raw_datasets = [] - if training_args.do_predict: - if data_args.task_name == "mnli": - tasks.extend(["mnli", "mnli-mm"]) - tf_datasets.extend([tf_data["test_matched"], tf_data["test_mismatched"]]) - raw_datasets.extend([datasets["test_matched"], datasets["test_mismatched"]]) - else: - tasks.append(data_args.task_name) - tf_datasets.append(tf_data["test"]) - raw_datasets.append(datasets["test"]) - if data_args.predict_file: - tasks.append("user_data") - tf_datasets.append(tf_data["user_data"]) - raw_datasets.append(datasets["user_data"]) - - if optim_args.use_pruned_model: - model = tf.saved_model.load(training_args.output_dir) - - for raw_dataset, tf_dataset, task in zip(raw_datasets, tf_datasets, tasks): - if optim_args.use_pruned_model: - preds: np.ndarray = None - infer = model.signatures[list(model.signatures.keys())[0]] - for i, (inputs, labels) in enumerate(tf_dataset): - for name in inputs: - inputs[name] = tf.constant(inputs[name].numpy(), dtype=infer.inputs[0].dtype) - results = infer(**inputs) - for val in results: - if preds is None: - preds = results[val].numpy() - else: - preds = np.append(preds, results[val].numpy(), axis=0) - test_predictions = {"logits": preds} - else: - test_predictions = model.predict(tf_dataset) - if "label" in raw_dataset: - test_metrics = compute_metrics(test_predictions, raw_dataset["label"]) - print(f"Test metrics ({task}):") - print(test_metrics) - - if is_regression: - predictions_to_write = np.squeeze(test_predictions["logits"]) - else: - predictions_to_write = np.argmax(test_predictions["logits"], axis=1) - - output_predict_file = os.path.join(training_args.output_dir, f"predict_results_{task}.txt") - with open(output_predict_file, "w") as writer: - logger.info(f"***** Writing prediction results for {task} *****") - writer.write("index\tprediction\n") - for index, item in enumerate(predictions_to_write): - if is_regression: - writer.write(f"{index}\t{item:3.3f}\n") - else: - item = config.id2label[item] - writer.write(f"{index}\t{item}\n") - # endregion - - -if __name__ == "__main__": - main() diff --git a/examples/huggingface/tensorflow/text-classification/pruning/run_tuning.sh b/examples/huggingface/tensorflow/text-classification/pruning/run_tuning.sh deleted file mode 100644 index 3fca9c69a4f..00000000000 --- a/examples/huggingface/tensorflow/text-classification/pruning/run_tuning.sh +++ /dev/null @@ -1,88 +0,0 @@ -#!/bin/bash -set -x - -function main { - - init_params "$@" - run_tuning - -} - -# init params -function init_params { - tuned_checkpoint=saved_results - topology="distilbert_base_sst2" - # topology="bert_base_mrpc_static" - for var in "$@" - do - case $var in - --topology=*) - topology=$(echo $var |cut -f2 -d=) - ;; - --dataset_location=*) - dataset_location=$(echo $var |cut -f2 -d=) - ;; - --input_model=*) - input_model=$(echo $var |cut -f2 -d=) - ;; - --output_model=*) - tuned_checkpoint=$(echo $var |cut -f2 -d=) - ;; - --worker=*) - worker=$(echo $var |cut -f2 -d=) - ;; - --task_index=*) - task_index=$(echo $var |cut -f2 -d=) - ;; - *) - echo "Error: No such parameter: ${var}" - exit 1 - ;; - esac - done - -} - -# run_tuning -function run_tuning { - extra_cmd='' - batch_size=64 - if [ "${topology}" = "distilbert_base_sst2" ]; then - TASK_NAME='sst2' - model_name_or_path=distilbert-base-uncased-finetuned-sst-2-english - fi - - if [ "${worker}" = "" ] - then - python -u ./run_glue.py \ - --model_name_or_path ${model_name_or_path} \ - --task_name ${TASK_NAME} \ - --target_sparsity_ratio 0.1 \ - --prune \ - --do_eval \ - --do_train \ - --per_device_train_batch_size ${batch_size} \ - --per_device_eval_batch_size ${batch_size} \ - --output_dir ${tuned_checkpoint} \ - --overwrite_output_dir \ - --overwrite_cache - else - python -u ./run_glue.py \ - --model_name_or_path ${model_name_or_path} \ - --task_name ${TASK_NAME} \ - --target_sparsity_ratio 0.1 \ - --prune \ - --do_eval \ - --do_train \ - --per_device_train_batch_size ${batch_size} \ - --per_device_eval_batch_size ${batch_size} \ - --output_dir ${tuned_checkpoint} \ - --overwrite_output_dir \ - --overwrite_cache \ - --worker "${worker}" \ - --task_index ${task_index} \ - ${extra_cmd} - fi -} - -main "$@" diff --git a/examples/huggingface/tensorflow/text-classification/quantization/README.md b/examples/huggingface/tensorflow/text-classification/quantization/README.md deleted file mode 100644 index ab459bf84d4..00000000000 --- a/examples/huggingface/tensorflow/text-classification/quantization/README.md +++ /dev/null @@ -1,132 +0,0 @@ -Step-by-Step -========= - -This document describes the step-by-step instructions for reproducing the quantization on models for the text classification (GLUE) tasks. - -GLUE is made up of a total of 9 different tasks. Here is how to run the script on one of them: - -# Prerequisite -## 1. Installation - -Make sure you have installed Intel® Extension for Transformers and all the dependencies in the current example: - -```shell -pip install intel-extension-for-transformers -cd ptq -pip install -r requirements.txt -``` - -# Run - -Here are two options: running with the shell script or running with the python script. Basically, they are equivalent and the shell script just wraps the invocation of the python script and is more concise and easy for users to get started. - -## 1. Run Command (Shell) - -- Topology: - - bert_base_mrpc_static - - xlnet_mrpc - - albert_large_mrpc - - legalbert_mrpc - -- To get the int8 model - - ``` - cd ptq - bash run_tuning.sh --topology=[topology] --output_model=./saved_int8 - ``` - -- To benchmark the int8 model - - ``` - cd ptq - bash run_benchmark.sh --topology=[topology] --config=./saved_int8 --mode=benchmark --int8=true - ``` - -## 2. Run Command (Python) - -- model_name_or_path: - - bert-base-cased-finetuned-mrpc - - xlnet-base-cased - - albert-large-v2 - - nlpaueb/legal-bert-small-uncased - -- To get int8 model - -``` -python run_glue.py - --model_name_or_path [model_name_or_path] \ - --task_name mrpc \ - --tune \ - --quantization_approach PostTrainingStatic \ - --do_train \ - --do_eval \ - --output_dir ./saved_result \ - --overwrite_output_dir -``` - - To reload int8 model - -``` -python run_glue.py - --model_name_or_path [model_name_or_path] \ - --task_name mrpc \ - --benchmark \ - --int8 \ - --do_eval \ - --output_dir ./saved_result \ - --overwrite_output_dir -``` - -> **Notes**: - - quantization_approach in Tensorflow consist of `PostTrainingStatic`, `QuantizationAwareTraining`. - - task_name consist of cola, sst2, mrpc, stsb, qqp, mnli, qnli, rte, wnli. - - -# Multi-node Usage - -We also supported Distributed Data Parallel training on multi nodes settings for quantization. - -> **Note**: multi node settings boost performance in the training process and may not show good performance with PostTrainingStatic quantization strategy - -The default strategy we used is `MultiWorkerMirroredStrategy` in Tensorflow, and with `task_type` set as "worker", we are expected to pass following extra parameters to the script: - -* `worker`: a string of your worker ip addresses which is separated by comma and there should not be space between each two of them - -* `task_index`: 0 should be set on the chief node (leader) and 1, 2, 3... should be set as the rank of other follower nodes - -## Multi-node Example - -### 1. Get Int8 Model - -* On leader node - -``` -bash run_tuning.sh --topology=bert_base_mrpc_static --output_model=./saved_int8 --worker="localhost:12345,localhost:23456" --task_index=0 -``` - -* On follower node - -``` -bash run_tuning.sh --topology=bert_base_mrpc_static --output_model=./saved_int8 --worker="localhost:12345,localhost:23456" --task_index=1 -``` - -Please replace the worker ip address list with your own. - -### 2. Reload Int8 Model - -* On leader node - -``` -bash run_benchmark.sh --topology=bert_base_mrpc_static --config=./saved_int8 --mode=benchmark --int8=true --worker="localhost:12345,localhost:23456" --task_index=0 -``` - -* On follower node - -``` -bash run_benchmark.sh --topology=bert_base_mrpc_static --config=./saved_int8 --mode=benchmark --int8=true --worker="localhost:12345,localhost:23456" --task_index=1 -``` - -Please replace the worker ip address list with your own. - - - - diff --git a/examples/huggingface/tensorflow/text-classification/quantization/ptq/requirements.txt b/examples/huggingface/tensorflow/text-classification/quantization/ptq/requirements.txt deleted file mode 100644 index 8067cf9633a..00000000000 --- a/examples/huggingface/tensorflow/text-classification/quantization/ptq/requirements.txt +++ /dev/null @@ -1,7 +0,0 @@ -datasets >= 1.17 -sentencepiece != 0.1.92 -protobuf -intel-tensorflow -transformers -evaluate -accelerate \ No newline at end of file diff --git a/examples/huggingface/tensorflow/text-classification/quantization/ptq/run_benchmark.sh b/examples/huggingface/tensorflow/text-classification/quantization/ptq/run_benchmark.sh deleted file mode 100644 index 403a0e41b52..00000000000 --- a/examples/huggingface/tensorflow/text-classification/quantization/ptq/run_benchmark.sh +++ /dev/null @@ -1,131 +0,0 @@ -#!/bin/bash -set -x - -function main { - - init_params "$@" - run_benchmark - -} - -# init params -function init_params { - topology="bert_base_mrpc_static" - iters=100 - batch_size=1 - tuned_checkpoint=saved_results - cache_dir="cache" - for var in "$@" - do - case $var in - --topology=*) - topology=$(echo $var |cut -f2 -d=) - ;; - --dataset_location=*) - dataset_location=$(echo $var |cut -f2 -d=) - ;; - --input_model=*) - input_model=$(echo $var |cut -f2 -d=) - ;; - --mode=*) - mode=$(echo $var |cut -f2 -d=) - ;; - --batch_size=*) - batch_size=$(echo $var |cut -f2 -d=) - ;; - --iters=*) - iters=$(echo ${var} |cut -f2 -d=) - ;; - --int8=*) - int8=$(echo ${var} |cut -f2 -d=) - ;; - --config=*) - tuned_checkpoint=$(echo $var |cut -f2 -d=) - ;; - --worker=*) - worker=$(echo $var |cut -f2 -d=) - ;; - --task_index=*) - task_index=$(echo $var |cut -f2 -d=) - ;; - --cache_dir=*) - cache_dir=$(echo $var |cut -f2 -d=) - ;; - *) - echo "Error: No such parameter: ${var}" - exit 1 - ;; - esac - done - -} - - -# run_benchmark -function run_benchmark { - extra_cmd='' - MAX_SEQ_LENGTH=128 - - if [[ ${mode} == "accuracy" ]]; then - mode_cmd=" --accuracy_only" - elif [[ ${mode} == "benchmark" ]]; then - mode_cmd=" --benchmark " - else - echo "Error: No such mode: ${mode}" - exit 1 - fi - - if [ "${topology}" = "bert_base_mrpc_static" ]; then - TASK_NAME="mrpc" - model_name_or_path="bert-base-cased-finetuned-mrpc" - elif [ "${topology}" = "legalbert_mrpc" ]; then - TASK_NAME="mrpc" - model_name_or_path="nlpaueb/legal-bert-small-uncased" - elif [ "${topology}" = "xlnet_mrpc" ]; then - TASK_NAME="mrpc" - model_name_or_path="xlnet-base-cased" - elif [ "${topology}" = "albert_large_mrpc" ]; then - TASK_NAME="mrpc" - model_name_or_path="albert-large-v2" - # add following parameters for quicker debugging - extra_cmd=$extra_cmd" --max_eval_samples 48" - fi - - if [[ ${int8} == "true" ]]; then - extra_cmd=$extra_cmd" --int8" - fi - echo $extra_cmd - - if [ "${worker}" = "" ] - then - python -u ../run_glue.py \ - --model_name_or_path ${model_name_or_path} \ - --task_name ${TASK_NAME} \ - --do_eval \ - --max_seq_length ${MAX_SEQ_LENGTH} \ - --per_device_eval_batch_size ${batch_size} \ - --output_dir ${tuned_checkpoint} \ - --overwrite_output_dir \ - --cache_dir ${cache_dir} \ - --no_cuda \ - ${mode_cmd} \ - ${extra_cmd} - else - python -u ../run_glue.py \ - --model_name_or_path ${model_name_or_path} \ - --task_name ${TASK_NAME} \ - --do_eval \ - --max_seq_length ${MAX_SEQ_LENGTH} \ - --per_device_eval_batch_size ${batch_size} \ - --output_dir ${tuned_checkpoint} \ - --overwrite_output_dir \ - --cache_dir ${cache_dir} \ - --no_cuda \ - --worker "${worker}" \ - --task_index ${task_index} \ - ${mode_cmd} \ - ${extra_cmd} - fi -} - -main "$@" diff --git a/examples/huggingface/tensorflow/text-classification/quantization/ptq/run_tuning.sh b/examples/huggingface/tensorflow/text-classification/quantization/ptq/run_tuning.sh deleted file mode 100644 index c84c8654f62..00000000000 --- a/examples/huggingface/tensorflow/text-classification/quantization/ptq/run_tuning.sh +++ /dev/null @@ -1,115 +0,0 @@ -#!/bin/bash -set -x - -function main { - - init_params "$@" - run_tuning - -} - -# init params -function init_params { - topology="bert_base_mrpc_static" - tuned_checkpoint="saved_results" - extra_cmd="" - batch_size=8 - MAX_SEQ_LENGTH=128 - model_type="bert" - approach="PostTrainingStatic" - cache_dir="cache" - for var in "$@" - do - case $var in - --topology=*) - topology=$(echo $var |cut -f2 -d=) - ;; - --dataset_location=*) - dataset_location=$(echo $var |cut -f2 -d=) - ;; - --input_model=*) - input_model=$(echo $var |cut -f2 -d=) - ;; - --output_model=*) - tuned_checkpoint=$(echo $var |cut -f2 -d=) - ;; - --worker=*) - worker=$(echo $var |cut -f2 -d=) - ;; - --task_index=*) - task_index=$(echo $var |cut -f2 -d=) - ;; - --cache_dir=*) - cache_dir=$(echo $var |cut -f2 -d=) - ;; - *) - echo "Error: No such parameter: ${var}" - exit 1 - ;; - esac - done - -} - -# run_tuning -function run_tuning { - batch_size=64 - if [ "${topology}" = "bert_base_mrpc_static" ]; then - TASK_NAME="mrpc" - model_name_or_path="bert-base-cased-finetuned-mrpc" - approach="PostTrainingStatic" - elif [ "${topology}" = "legalbert_mrpc" ]; then - TASK_NAME="mrpc" - model_name_or_path="nlpaueb/legal-bert-small-uncased" - approach="PostTrainingStatic" - extra_cmd=$extra_cmd" --perf_tol 0.1" - elif [ "${topology}" = "xlnet_mrpc" ]; then - TASK_NAME="mrpc" - model_name_or_path="xlnet-base-cased" - approach="PostTrainingStatic" - elif [ "${topology}" = "albert_large_mrpc" ]; then - TASK_NAME="mrpc" - model_name_or_path="albert-large-v2" - approach="PostTrainingStatic" - extra_cmd=$extra_cmd" --perf_tol 0.05" - fi - - if [ "${worker}" = "" ] - then - python -u ../run_glue.py \ - --model_name_or_path ${model_name_or_path} \ - --task_name ${TASK_NAME} \ - --do_eval \ - --max_seq_length ${MAX_SEQ_LENGTH} \ - --per_device_train_batch_size ${batch_size} \ - --per_device_eval_batch_size ${batch_size} \ - --output_dir ${tuned_checkpoint} \ - --no_cuda \ - --overwrite_output_dir \ - --cache_dir ${cache_dir} \ - --quantization_approach ${approach} \ - --do_train \ - --tune \ - ${extra_cmd} - else - python -u ../run_glue.py \ - --model_name_or_path ${model_name_or_path} \ - --task_name ${TASK_NAME} \ - --do_eval \ - --max_seq_length ${MAX_SEQ_LENGTH} \ - --per_device_train_batch_size ${batch_size} \ - --per_device_eval_batch_size ${batch_size} \ - --output_dir ${tuned_checkpoint} \ - --no_cuda \ - --overwrite_output_dir \ - --cache_dir ${cache_dir} \ - --quantization_approach ${approach} \ - --do_train \ - --tune \ - --worker "${worker}" \ - --task_index ${task_index} \ - ${extra_cmd} - fi -} - -main "$@" diff --git a/examples/huggingface/tensorflow/text-classification/quantization/run_glue.py b/examples/huggingface/tensorflow/text-classification/quantization/run_glue.py deleted file mode 100644 index c2ca0c45603..00000000000 --- a/examples/huggingface/tensorflow/text-classification/quantization/run_glue.py +++ /dev/null @@ -1,731 +0,0 @@ -#!/usr/bin/env python -# coding=utf-8 -# Copyright 2020 The HuggingFace Inc. team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" Finetuning the library models for sequence classification on GLUE.""" -# You can also adapt this script on your own text classification task. Pointers for this are left as comments. - -import logging -import os -import sys -import time -from dataclasses import dataclass, field -from typing import Optional - -import numpy as np -import tensorflow as tf -from datasets import load_dataset - -import transformers -from transformers import ( - AutoConfig, - AutoTokenizer, - DataCollatorWithPadding, - DefaultDataCollator, - HfArgumentParser, - PretrainedConfig, - TFAutoModelForSequenceClassification, - TFTrainingArguments, - set_seed, -) -from transformers.trainer_utils import get_last_checkpoint, is_main_process -from transformers.utils import check_min_version - - -# region Helper functions - - -class SavePretrainedCallback(tf.keras.callbacks.Callback): - # Hugging Face models have a save_pretrained() method that saves both the weights and the necessary - # metadata to allow them to be loaded as a pretrained model in future. This is a simple Keras callback - # that saves the model with this method after each epoch. - def __init__(self, output_dir, **kwargs): - super().__init__() - self.output_dir = output_dir - - def on_epoch_end(self, epoch, logs=None): - self.model.save_pretrained(self.output_dir) - - -# endregion - - -# Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.17.0") - -task_to_keys = { - "cola": ("sentence", None), - "mnli": ("premise", "hypothesis"), - "mrpc": ("sentence1", "sentence2"), - "qnli": ("question", "sentence"), - "qqp": ("question1", "question2"), - "rte": ("sentence1", "sentence2"), - "sst2": ("sentence", None), - "stsb": ("sentence1", "sentence2"), - "wnli": ("sentence1", "sentence2"), -} - -logger = logging.getLogger(__name__) - - -# region Command-line arguments -@dataclass -class DataTrainingArguments: - """ - Arguments pertaining to what data we are going to input our model for training and eval. - - Using `HfArgumentParser` we can turn this class - into argparse arguments to be able to specify them on - the command line. - """ - - task_name: str = field( - metadata={"help": "The name of the task to train on: " + ", ".join(task_to_keys.keys())}, - ) - predict_file: str = field( - metadata={"help": "A file containing user-supplied examples to make predictions for"}, - default=None, - ) - max_seq_length: int = field( - default=128, - metadata={ - "help": "The maximum total input sequence length after tokenization. Sequences longer " - "than this will be truncated, sequences shorter will be padded." - }, - ) - overwrite_cache: bool = field( - default=False, metadata={"help": "Overwrite the cached preprocessed datasets or not."} - ) - pad_to_max_length: bool = field( - default=False, - metadata={ - "help": "Whether to pad all samples to `max_seq_length`. " - "If False, will pad the samples dynamically when batching to the maximum length in the batch." - }, - ) - max_train_samples: Optional[int] = field( - default=None, - metadata={ - "help": "For debugging purposes or quicker training, truncate the number of training examples to this " - "value if set." - }, - ) - max_eval_samples: Optional[int] = field( - default=None, - metadata={ - "help": "For debugging purposes or quicker training, truncate the number of evaluation examples to this " - "value if set." - }, - ) - max_predict_samples: Optional[int] = field( - default=None, - metadata={ - "help": "For debugging purposes or quicker training, truncate the number of prediction examples to this " - "value if set." - }, - ) - - def __post_init__(self): - self.task_name = self.task_name.lower() - if self.task_name not in task_to_keys.keys(): - raise ValueError("Unknown task, you should pick one in " + ",".join(task_to_keys.keys())) - - -@dataclass -class ModelArguments: - """ - Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. - """ - - model_name_or_path: str = field( - metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} - ) - config_name: Optional[str] = field( - default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} - ) - tokenizer_name: Optional[str] = field( - default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} - ) - cache_dir: Optional[str] = field( - default=None, - metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, - ) - use_fast_tokenizer: bool = field( - default=True, - metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, - ) - model_revision: str = field( - default="main", - metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, - ) - use_auth_token: bool = field( - default=False, - metadata={ - "help": "Will use the token generated when running `transformers-cli login` (necessary to use this script " - "with private models)." - }, - ) - - -@dataclass -class OptimizationArguments: - """ - Arguments pertaining to what type of optimization we are going to apply on the model. - """ - - tune: bool = field( - default=False, - metadata={"help": "Whether or not to apply quantization."}, - ) - quantization_approach: Optional[str] = field( - default="PostTrainingStatic", - metadata={"help": "Quantization approach. Supported approach are PostTrainingStatic, " - "PostTrainingDynamic and QuantizationAwareTraining."}, - ) - metric_name: Optional[str] = field( - default=None, - metadata={"help": "Metric used for the tuning strategy."}, - ) - is_relative: Optional[bool] = field( - default=True, - metadata={"help": "Metric tolerance model, expected to be relative or absolute."}, - ) - perf_tol: Optional[float] = field( - default=0.01, - metadata={"help": "Performance tolerance when optimizing the model."}, - ) - benchmark: bool = field( - default=False, - metadata={"help": "run benchmark."}) - int8: bool = field( - default=False, - metadata={"help":"Whether to use the quantized int8 model."}) - accuracy_only: bool = field( - default=False, - metadata={"help":"Whether to only test accuracy for model tuned by Neural Compressor."}) - -@dataclass -class DistributedArguments: - """ - Arguments setting the distributed multinode environment - """ - - worker: str = field( - default=None, - metadata={"help": "List of node ip addresses in a string, and there should not be space between addresses."}, - ) - task_index: int = field( - default=0, - metadata={"help": "Worker index, and 0 represents the chief worker while other workers are set as 1,2,3..."}, - ) -# endregion - - -def main(): - # region Argument parsing - # See all possible arguments in src/transformers/training_args.py - # or by passing the --help flag to this script. - # We now keep distinct sets of args, for a cleaner separation of concerns. - - parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments, OptimizationArguments, DistributedArguments)) - if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): - # If we pass only one argument to the script and it's the path to a json file, - # let's parse it to get our arguments. - model_args, data_args, training_args, optim_args, distributed_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) - else: - model_args, data_args, training_args, optim_args, distributed_args = parser.parse_args_into_dataclasses() - - if not (training_args.do_train or training_args.do_eval or training_args.do_predict): - exit("Must specify at least one of --do_train, --do_eval or --do_predict!") - # endregion - - # region Logging - logging.basicConfig( - format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", - datefmt="%m/%d/%Y %H:%M:%S", - handlers=[logging.StreamHandler(sys.stdout)], - ) - logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN) - - # Set the verbosity to info of the Transformers logger (on main process only): - if is_main_process(training_args.local_rank): - transformers.utils.logging.set_verbosity_info() - transformers.utils.logging.enable_default_handler() - transformers.utils.logging.enable_explicit_format() - logger.info(f"Training/evaluation parameters {training_args}") - # endregion - - # region Set the multinode environment, the strategy and paths - strategy = None - worker_list = None - if distributed_args.worker is not None: - logger.info("distributed environment initialization...") - - worker_list = distributed_args.worker.split(",") - - from intel_extension_for_transformers.transformers.utils.utility_tf import distributed_init - distributed_init(worker_list, "worker", distributed_args.task_index) - - strategy = tf.distribute.MultiWorkerMirroredStrategy() - from intel_extension_for_transformers.transformers.utils.utility_tf import get_filepath - training_args.output_dir = get_filepath(training_args.output_dir, strategy.cluster_resolver.task_type, strategy.cluster_resolver.task_id) - else: - strategy = training_args.strategy - #endregion - - # region Checkpoints - checkpoint = None - if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: - checkpoint = get_last_checkpoint(training_args.output_dir) - if checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: - raise ValueError( - f"Output directory ({training_args.output_dir}) already exists and is not empty. " - "Use --overwrite_output_dir to overcome." - ) - elif checkpoint is not None and training_args.resume_from_checkpoint is None: - logger.info( - f"Checkpoint detected, resuming training at {checkpoint}. To avoid this behavior, change " - "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." - ) - # endregion - - # region Dataset and labels - # Set seed before initializing model. - set_seed(training_args.seed) - - # Downloading and loading a dataset from the hub. In distributed training, the load_dataset function guarantee - # that only one local process can concurrently download the dataset. - datasets = load_dataset( - "glue", - data_args.task_name, - cache_dir=model_args.cache_dir, - use_auth_token=True if model_args.use_auth_token else None, - ) - # See more about loading any type of standard or custom dataset at - # https://huggingface.co/docs/datasets/loading_datasets.html. - - is_regression = data_args.task_name == "stsb" - if not is_regression: - label_list = datasets["train"].features["label"].names - num_labels = len(label_list) - else: - num_labels = 1 - - if data_args.predict_file is not None: - logger.info("Preparing user-supplied file for predictions...") - - data_files = {"data": data_args.predict_file} - - for key in data_files.keys(): - logger.info(f"Loading a local file for {key}: {data_files[key]}") - - if data_args.predict_file.endswith(".csv"): - # Loading a dataset from local csv files - user_dataset = load_dataset("csv", data_files=data_files, cache_dir=model_args.cache_dir) - else: - # Loading a dataset from local json files - user_dataset = load_dataset("json", data_files=data_files, cache_dir=model_args.cache_dir) - needed_keys = task_to_keys[data_args.task_name] - for key in needed_keys: - assert key in user_dataset["data"].features, f"Your supplied predict_file is missing the {key} key!" - datasets["user_data"] = user_dataset["data"] - # endregion - - # region Load model config and tokenizer - # - # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently - # download model & vocab. - config = AutoConfig.from_pretrained( - model_args.config_name if model_args.config_name else model_args.model_name_or_path, - num_labels=num_labels, - finetuning_task=data_args.task_name, - cache_dir=model_args.cache_dir, - revision=model_args.model_revision, - use_auth_token=True if model_args.use_auth_token else None, - _commit_hash="main", - ) - tokenizer = AutoTokenizer.from_pretrained( - model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, - cache_dir=model_args.cache_dir, - use_fast=model_args.use_fast_tokenizer, - revision=model_args.model_revision, - use_auth_token=True if model_args.use_auth_token else None, - _commit_hash="main", - ) - # endregion - - # region Dataset preprocessing - sentence1_key, sentence2_key = task_to_keys[data_args.task_name] - non_label_column_names = [name for name in datasets["train"].column_names if name != "label"] - - # Padding strategy - if data_args.pad_to_max_length: - padding = "max_length" - else: - # We will pad later, dynamically at batch creation, to the max sequence length in each batch - padding = False - - # Some models have set the order of the labels to use, so let's make sure we do use it. - label_to_id = None - if config.label2id != PretrainedConfig(num_labels=num_labels).label2id and not is_regression: - # Some have all caps in their config, some don't. - label_name_to_id = {k.lower(): v for k, v in config.label2id.items()} - if list(sorted(label_name_to_id.keys())) == list(sorted(label_list)): - label_to_id = {i: int(label_name_to_id[label_list[i]]) for i in range(num_labels)} - else: - logger.warning( - "Your model seems to have been trained with labels, but they don't match the dataset: ", - f"model labels: {list(sorted(label_name_to_id.keys()))}, dataset labels: {list(sorted(label_list))}." - "\nIgnoring the model labels as a result.", - ) - label_to_id = {label: i for i, label in enumerate(label_list)} - if label_to_id is not None: - config.label2id = label_to_id - config.id2label = {id: label for label, id in config.label2id.items()} - elif data_args.task_name is not None and not is_regression: - config.label2id = {l: i for i, l in enumerate(label_list)} - config.id2label = {id: label for label, id in config.label2id.items()} - - if data_args.max_seq_length > tokenizer.model_max_length: - logger.warning( - f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the" - f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." - ) - max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length) - - def preprocess_function(examples): - # Tokenize the texts - args = ( - (examples[sentence1_key],) if sentence2_key is None else (examples[sentence1_key], examples[sentence2_key]) - ) - result = tokenizer(*args, padding=padding, max_length=max_seq_length, truncation=True) - - return result - - datasets = datasets.map(preprocess_function, batched=True, load_from_cache_file=not data_args.overwrite_cache) - - if data_args.pad_to_max_length: - data_collator = DefaultDataCollator(return_tensors="tf") - else: - data_collator = DataCollatorWithPadding(tokenizer, return_tensors="tf") - # endregion - - # region Metric function - from evaluate import load - metric = load("glue", data_args.task_name, cache_dir=model_args.cache_dir) - - def compute_metrics(preds, label_ids): - preds = preds["logits"] - preds = np.squeeze(preds) if is_regression else np.argmax(preds, axis=1) - result = metric.compute(predictions=preds, references=label_ids) - if len(result) > 1: - result["combined_score"] = np.mean(list(result.values())).item() - return result - - # endregion - - def eval_func_mrpc(model): - label_ids: np.ndarray = None - tf_eval_dataset = tf_data["validation"] - - num_examples = sum(1 for _ in ( - tf_eval_dataset.unbatch() if hasattr(tf_eval_dataset, "unbatch") else tf_eval_dataset)) - logger.info(f"***** Running Evaluation *****") - logger.info(f" Num examples in dataset = {num_examples}") - logger.info(f" Batch size = {training_args.per_device_eval_batch_size}") - - preds: np.ndarray = None - infer = model.signatures["serving_default"] - - for idx, (inputs, labels) in enumerate(tf_eval_dataset): - for name in inputs: - inputs[name] = tf.constant(inputs[name].numpy(), dtype=infer.inputs[0].dtype) - - results = infer(**inputs) - if preds is None: - preds = results["Identity"].numpy() - else: - preds = np.append(preds, results["Identity"].numpy(), axis=0) - - if label_ids is None: - label_ids = labels[0].numpy() if isinstance( - labels, list) else labels.numpy() - else: - label_ids = np.append( - label_ids, - labels[0].numpy() - if isinstance(labels, list) else labels.numpy(), - axis=0) - test_predictions = {"logits": preds} - metrics = compute_metrics(test_predictions, label_ids) - - return metrics["accuracy"] - - with strategy.scope(): - # region Load pretrained model - if checkpoint is None: - model_path = model_args.model_name_or_path - else: - model_path = checkpoint - model = TFAutoModelForSequenceClassification.from_pretrained( - model_path, - config=config, - cache_dir=model_args.cache_dir, - revision=model_args.model_revision, - use_auth_token=True if model_args.use_auth_token else None, - ) - # endregion - - # region Optimizer, loss and compilation - optimizer = tf.keras.optimizers.Adam( - learning_rate=training_args.learning_rate, - beta_1=training_args.adam_beta1, - beta_2=training_args.adam_beta2, - epsilon=training_args.adam_epsilon, - clipnorm=training_args.max_grad_norm, - ) - if is_regression: - loss_fn = tf.keras.losses.MeanSquaredError() - metrics = [] - else: - loss_fn = tf.keras.losses.SparseCategoricalCrossentropy( - from_logits=True, reduction=tf.keras.losses.Reduction.SUM - ) - metrics = ["accuracy"] - model.compile(optimizer=optimizer, loss=loss_fn, metrics=metrics) - # endregion - - # region Convert data to a tf.data.Dataset - tf_data = dict() - max_samples = { - "train": data_args.max_train_samples, - "validation": data_args.max_eval_samples, - "validation_matched": data_args.max_eval_samples, - "validation_mismatched": data_args.max_eval_samples, - "test": data_args.max_predict_samples, - "test_matched": data_args.max_predict_samples, - "test_mismatched": data_args.max_predict_samples, - "user_data": None, - } - for key in datasets.keys(): - if key == "train" or key.startswith("validation"): - assert "label" in datasets[key].features, f"Missing labels from {key} data!" - if key == "train": - shuffle = True - batch_size = training_args.per_device_train_batch_size * (len(worker_list) if worker_list is not None else 1) - drop_remainder = True # Saves us worrying about scaling gradients for the last batch - else: - shuffle = False - batch_size = training_args.per_device_eval_batch_size * (len(worker_list) if worker_list is not None else 1) - drop_remainder = False - samples_limit = max_samples[key] - dataset = datasets[key] - if samples_limit is not None: - dataset = dataset.select(range(samples_limit)) - data = dataset.to_tf_dataset( - columns=[col for col in dataset.column_names if col not in set(non_label_column_names + ["label"])], - shuffle=shuffle, - batch_size=batch_size, - collate_fn=data_collator, - drop_remainder=drop_remainder, - # `label_cols` is needed for user-defined losses, such as in this example - # datasets v2.3.x need "labels", not "label" - label_cols=["labels"] if "label" in dataset.column_names else None, - ) - tf_data[key] = data - # endregion - - if optim_args.tune: - from intel_extension_for_transformers.transformers import metrics, objectives, QuantizationConfig, TFOptimization - optimization = TFOptimization( - model=model, - args=training_args, - train_dataset=tf_data["train"], - eval_dataset=tf_data["validation"], - compute_metrics=compute_metrics, - task_type=strategy.cluster_resolver.task_type if isinstance(strategy, tf.distribute.MultiWorkerMirroredStrategy) else None, - task_id=strategy.cluster_resolver.task_id if isinstance(strategy, tf.distribute.MultiWorkerMirroredStrategy) else None, - ) - - # use customized eval function - optimization.eval_func = eval_func_mrpc - - tune_metric = metrics.Metric( - name="accuracy", greater_is_better=True, is_relative=True, criterion=optim_args.perf_tol, - ) - quantization_config = QuantizationConfig( - framework="tensorflow", - approach="POSTTRAININGSTATIC", - metrics=[tune_metric], - objectives=[objectives.performance] - ) - quantized_model = optimization.quantize(quant_config=quantization_config) - exit(0) - - # region Training and validation - if training_args.do_train: - callbacks = [SavePretrainedCallback(output_dir=training_args.output_dir)] - if training_args.do_eval and not data_args.task_name == "mnli": - # Do both evaluation and training in the Keras fit loop, unless the task is MNLI - # because MNLI has two validation sets - validation_data = tf_data["validation"] - else: - validation_data = None - model.fit( - tf_data["train"], - validation_data=validation_data, - epochs=2, - callbacks=callbacks, - ) - # endregion - - # region Evaluation - if training_args.do_eval: - # We normally do validation as part of the Keras fit loop, but we run it independently - # if there was no fit() step (because we didn't train the model) or if the task is MNLI, - # because MNLI has a separate validation-mismatched validation set - logger.info("*** Evaluate ***") - - # Loop to handle MNLI double evaluation (matched, mis-matched) - if data_args.task_name == "mnli": - tasks = ["mnli", "mnli-mm"] - tf_datasets = [tf_data["validation_matched"], tf_data["validation_mismatched"]] - raw_datasets = [datasets["validation_matched"], datasets["validation_mismatched"]] - else: - tasks = [data_args.task_name] - tf_datasets = [tf_data["validation"]] - raw_datasets = [datasets["validation"]] - - num_examples = 0 - if optim_args.int8: - model = tf.saved_model.load(training_args.output_dir) - else: - from intel_extension_for_transformers.transformers.utils.utility_tf import keras2SavedModel - model = keras2SavedModel(model) - for raw_dataset, tf_dataset, task in zip(raw_datasets, tf_datasets, tasks): - num_examples += sum( - 1 for _ in (tf_dataset.unbatch() - if hasattr(tf_dataset, "unbatch") else tf_dataset - ) - ) - preds: np.ndarray = None - label_ids: np.ndarray = None - infer = model.signatures[list(model.signatures.keys())[0]] - - if optim_args.accuracy_only: - iterations = 1 - warmup = 0 - else: - iterations = 10 - warmup = 5 - latency_list = [] - - for idx in range(iterations): - iteration_time = 0 - for i, (inputs, labels) in enumerate(tf_dataset): - for name in inputs: - inputs[name] = tf.constant(inputs[name].numpy(), dtype=infer.inputs[0].dtype) - start = time.time() - results = infer(**inputs) - iteration_time += time.time() - start - if idx == 0: # only accumulate once all the preds and labels - if preds is None: - preds = results["Identity"].numpy() - else: - preds = np.append(preds, results["Identity"].numpy(), axis=0) - if label_ids is None: - label_ids = labels.numpy() - else: - label_ids = np.append(label_ids, labels.numpy(), axis=0) - latency_list.append(iteration_time) - logger.info("Iteration {} time: {} sec".format(idx, iteration_time)) - eval_metrics = compute_metrics({"logits": preds}, label_ids) - logger.info("\nEvaluation result: ") - logger.info("metric ({}) Accuracy: {}".format(task, eval_metrics["accuracy"])) - - average_iteration_time = np.array(latency_list[warmup:]).mean() - logger.info( - "Throughput: {} samples/sec".format( - num_examples / average_iteration_time) - ) - - # endregion - - # region Prediction - if training_args.do_predict or data_args.predict_file: - logger.info("*** Predict ***") - - # Loop to handle MNLI double evaluation (matched, mis-matched) - tasks = [] - tf_datasets = [] - raw_datasets = [] - if training_args.do_predict: - if data_args.task_name == "mnli": - tasks.extend(["mnli", "mnli-mm"]) - tf_datasets.extend([tf_data["test_matched"], tf_data["test_mismatched"]]) - raw_datasets.extend([datasets["test_matched"], datasets["test_mismatched"]]) - else: - tasks.append(data_args.task_name) - tf_datasets.append(tf_data["test"]) - raw_datasets.append(datasets["test"]) - if data_args.predict_file: - tasks.append("user_data") - tf_datasets.append(tf_data["user_data"]) - raw_datasets.append(datasets["user_data"]) - - if optim_args.int8: - model = tf.saved_model.load(training_args.output_dir) - - for raw_dataset, tf_dataset, task in zip(raw_datasets, tf_datasets, tasks): - if optim_args.int8: - preds: np.ndarray = None - infer = model.signatures[list(model.signatures.keys())[0]] - for i, (inputs, labels) in enumerate(tf_dataset): - for name in inputs: - inputs[name] = tf.constant(inputs[name].numpy(), dtype=infer.inputs[0].dtype) - results = infer(**inputs) - for val in results: - if preds is None: - preds = results[val].numpy() - else: - preds = np.append(preds, results[val].numpy(), axis=0) - test_predictions = {"logits": preds} - else: - test_predictions = model.predict(tf_dataset) - if "label" in raw_dataset: - test_metrics = compute_metrics(test_predictions, - raw_dataset["label"]) - print(f"Test metrics ({task}):") - print(test_metrics) - - if is_regression: - predictions_to_write = np.squeeze(test_predictions["logits"]) - else: - predictions_to_write = np.argmax(test_predictions["logits"], axis=1) - - output_predict_file = os.path.join(training_args.output_dir, f"predict_results_{task}.txt") - with open(output_predict_file, "w") as writer: - logger.info(f"***** Writing prediction results for {task} *****") - writer.write("index\tprediction\n") - for index, item in enumerate(predictions_to_write): - if is_regression: - writer.write(f"{index}\t{item:3.3f}\n") - else: - item = config.id2label[item] - writer.write(f"{index}\t{item}\n") - # endregion - - -if __name__ == "__main__": - main() diff --git a/examples/huggingface/tensorflow/token-classification/quantization/README.md b/examples/huggingface/tensorflow/token-classification/quantization/README.md deleted file mode 100644 index 8b05a9c1974..00000000000 --- a/examples/huggingface/tensorflow/token-classification/quantization/README.md +++ /dev/null @@ -1,35 +0,0 @@ -Step-by-Step -========= - -This document describes the step-by-step instructions for reproducing the quantization on models for the token classification (NER) tasks. - -# Prerequisite -## 1. Installation - -Make sure you have installed Intel® Extension for Transformers and all the dependencies in the current example: - -```shell -pip install intel-extension-for-transformers -pip install -r requirements.txt -``` - -# Run - -## 1. Run Command (Shell) - -- Topology: - - bert_base_ner - -- To get the int8 model - - ``` - cd ptq - bash run_tuning.sh --topology=[topology] --output_model=./saved_int8 - ``` - -- To benchmark the int8 model - - ``` - cd ptq - bash run_benchmark.sh --topology=[topology] --config=./saved_int8 --mode=benchmark --int8=true - ``` \ No newline at end of file diff --git a/examples/huggingface/tensorflow/token-classification/quantization/requirements.txt b/examples/huggingface/tensorflow/token-classification/quantization/requirements.txt deleted file mode 100644 index 6e419404871..00000000000 --- a/examples/huggingface/tensorflow/token-classification/quantization/requirements.txt +++ /dev/null @@ -1,7 +0,0 @@ -datasets >= 1.17 -sentencepiece != 0.1.92 -seqeval -protobuf -intel-tensorflow -transformers -accelerate \ No newline at end of file diff --git a/examples/huggingface/tensorflow/token-classification/quantization/run_benchmark.sh b/examples/huggingface/tensorflow/token-classification/quantization/run_benchmark.sh deleted file mode 100644 index ddf9d917410..00000000000 --- a/examples/huggingface/tensorflow/token-classification/quantization/run_benchmark.sh +++ /dev/null @@ -1,129 +0,0 @@ -#!/bin/bash -set -x - -function main { - - init_params "$@" - run_benchmark - -} - -# init params -function init_params { - topology="bert_base_ner" - iters=100 - batch_size=16 - tuned_checkpoint=saved_results - cache_dir="cache" - for var in "$@" - do - case $var in - --topology=*) - topology=$(echo $var |cut -f2 -d=) - ;; - --dataset_location=*) - dataset_location=$(echo $var |cut -f2 -d=) - ;; - --input_model=*) - input_model=$(echo $var |cut -f2 -d=) - ;; - --mode=*) - mode=$(echo $var |cut -f2 -d=) - ;; - --batch_size=*) - batch_size=$(echo $var |cut -f2 -d=) - ;; - --iters=*) - iters=$(echo ${var} |cut -f2 -d=) - ;; - --int8=*) - int8=$(echo ${var} |cut -f2 -d=) - ;; - --config=*) - tuned_checkpoint=$(echo $var |cut -f2 -d=) - ;; - --worker=*) - worker=$(echo $var |cut -f2 -d=) - ;; - --task_index=*) - task_index=$(echo $var |cut -f2 -d=) - ;; - --cache_dir=*) - cache_dir=$(echo $var |cut -f2 -d=) - ;; - *) - echo "Error: No such parameter: ${var}" - exit 1 - ;; - esac - done - -} - - -# run_benchmark -function run_benchmark { - extra_cmd='' - MAX_SEQ_LENGTH=128 - batch_size=1 - - if [[ ${mode} == "accuracy" ]]; then - mode_cmd=" --accuracy_only" - elif [[ ${mode} == "benchmark" ]]; then - mode_cmd=" --benchmark " - else - echo "Error: No such mode: ${mode}" - exit 1 - fi - - if [ "${topology}" = "bert_base_ner" ]; then - TASK_NAME="ner" - model_name_or_path="dslim/bert-base-NER" - approach="PostTrainingStatic" - dataset_name=conll2003 - fi - - if [[ ${int8} == "true" ]]; then - extra_cmd=$extra_cmd" --int8" - fi - echo $extra_cmd - - if [ "${worker}" = "" ] - then - python -u run_ner.py \ - --model_name_or_path ${model_name_or_path} \ - --dataset_name ${dataset_name} \ - --task_name ${TASK_NAME} \ - --pad_to_max_length \ - --do_eval \ - --max_length ${MAX_SEQ_LENGTH} \ - --per_device_eval_batch_size ${batch_size} \ - --max_eval_samples 408 \ - --output_dir ${tuned_checkpoint} \ - --overwrite_output_dir \ - --cache_dir ${cache_dir} \ - --no_cuda \ - ${mode_cmd} \ - ${extra_cmd} - else - python -u ../run_ner.py \ - --model_name_or_path ${model_name_or_path} \ - --task_name ${TASK_NAME} \ - --dataset_name ${dataset_name} \ - --pad_to_max_length \ - --do_eval \ - --max_length ${MAX_SEQ_LENGTH} \ - --per_device_eval_batch_size ${batch_size} \ - --max_eval_samples 408 \ - --output_dir ${tuned_checkpoint} \ - --overwrite_output_dir \ - --cache_dir ${cache_dir} \ - --no_cuda \ - --worker "${worker}" \ - --task_index ${task_index} \ - ${mode_cmd} \ - ${extra_cmd} - fi -} - -main "$@" diff --git a/examples/huggingface/tensorflow/token-classification/quantization/run_ner.py b/examples/huggingface/tensorflow/token-classification/quantization/run_ner.py deleted file mode 100644 index 30b9855c97f..00000000000 --- a/examples/huggingface/tensorflow/token-classification/quantization/run_ner.py +++ /dev/null @@ -1,696 +0,0 @@ -#!/usr/bin/env python -# coding=utf-8 -# Copyright 2021 The HuggingFace Inc. team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -Fine-tuning a 🤗 Transformers model on token classification tasks (NER, POS, CHUNKS) relying on the accelerate library -without using a Trainer. -""" - -import logging -import sys -import random -import time -from dataclasses import dataclass, field -from typing import Optional - -from datasets import ClassLabel, load_dataset, load_metric -import numpy as np -import tensorflow as tf - -import transformers -from transformers import ( - AutoConfig, - AutoTokenizer, - DataCollatorForTokenClassification, - HfArgumentParser, - TFAutoModelForTokenClassification, - TFTrainingArguments, - set_seed, -) -from transformers.utils.versions import require_version -from transformers.trainer_utils import get_last_checkpoint, is_main_process - -logger = logging.getLogger(__name__) -require_version("datasets>=1.8.0", "To fix: pip install -r examples/tensorflow/token-classification/requirements.txt") - -class SavePretrainedCallback(tf.keras.callbacks.Callback): - # Hugging Face models have a save_pretrained() method that saves both the weights and the necessary - # metadata to allow them to be loaded as a pretrained model in future. This is a simple Keras callback - # that saves the model with this method after each epoch. - def __init__(self, output_dir, **kwargs): - super().__init__() - self.output_dir = output_dir - - def on_epoch_end(self, epoch, logs=None): - self.model.save_pretrained(self.output_dir) - - -# region Command-line arguments -@dataclass -class ModelArguments: - """ - Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. - """ - - model_name_or_path: str = field( - metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} - ) - config_name: Optional[str] = field( - default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} - ) - tokenizer_name: Optional[str] = field( - default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} - ) - cache_dir: Optional[str] = field( - default=None, - metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, - ) - model_revision: str = field( - default="main", - metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, - ) - use_auth_token: bool = field( - default=False, - metadata={ - "help": ( - "Will use the token generated when running `huggingface-cli login` (necessary to use this script " - "with private models)." - ) - }, - ) - -@dataclass -class DataTrainingArguments: - """ - Arguments pertaining to what data we are going to input our model for training and eval. - """ - - task_name: Optional[str] = field(default="ner", metadata={"help": "The name of the task (ner, pos...)."}) - dataset_name: Optional[str] = field( - default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."} - ) - dataset_config_name: Optional[str] = field( - default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} - ) - train_file: Optional[str] = field( - default=None, metadata={"help": "The input training data file (a csv or JSON file)."} - ) - validation_file: Optional[str] = field( - default=None, - metadata={"help": "An optional input evaluation data file to evaluate on (a csv or JSON file)."}, - ) - test_file: Optional[str] = field( - default=None, - metadata={"help": "An optional input test data file to predict on (a csv or JSON file)."}, - ) - text_column_name: Optional[str] = field( - default=None, metadata={"help": "The column name of text to input in the file (a csv or JSON file)."} - ) - label_column_name: Optional[str] = field( - default=None, metadata={"help": "The column name of label to input in the file (a csv or JSON file)."} - ) - overwrite_cache: bool = field( - default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} - ) - preprocessing_num_workers: Optional[int] = field( - default=None, - metadata={"help": "The number of processes to use for the preprocessing."}, - ) - max_length: Optional[int] = field(default=256, metadata={"help": "Max length (in tokens) for truncation/padding"}) - pad_to_max_length: bool = field( - default=False, - metadata={ - "help": ( - "Whether to pad all samples to model maximum sentence length. " - "If False, will pad the samples dynamically when batching to the maximum length in the batch. More " - "efficient on GPU but very bad for TPU." - ) - }, - ) - max_train_samples: Optional[int] = field( - default=None, - metadata={ - "help": ( - "For debugging purposes or quicker training, truncate the number of training examples to this " - "value if set." - ) - }, - ) - max_eval_samples: Optional[int] = field( - default=None, - metadata={ - "help": ( - "For debugging purposes or quicker training, truncate the number of evaluation examples to this " - "value if set." - ) - }, - ) - max_predict_samples: Optional[int] = field( - default=None, - metadata={ - "help": ( - "For debugging purposes or quicker training, truncate the number of prediction examples to this " - "value if set." - ) - }, - ) - label_all_tokens: bool = field( - default=False, - metadata={ - "help": ( - "Whether to put the label for one word on all tokens of generated by that word or just on the " - "one (in which case the other tokens will have a padding index)." - ) - }, - ) - return_entity_level_metrics: bool = field( - default=False, - metadata={"help": "Whether to return all the entity levels during evaluation or just the overall ones."}, - ) - - def __post_init__(self): - if self.dataset_name is None and self.train_file is None and self.validation_file is None: - raise ValueError("Need either a dataset name or a training/validation file.") - else: - if self.train_file is not None: - extension = self.train_file.split(".")[-1] - assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." - if self.validation_file is not None: - extension = self.validation_file.split(".")[-1] - assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." - self.task_name = self.task_name.lower() - -@dataclass -class OptimizationArguments: - """ - Arguments pertaining to what type of optimization we are going to apply on the model. - """ - - tune: bool = field( - default=False, - metadata={"help": "Whether or not to apply quantization."}, - ) - quantization_approach: Optional[str] = field( - default="PostTrainingStatic", - metadata={"help": "Quantization approach. Supported approach are PostTrainingStatic, " - "PostTrainingDynamic and QuantizationAwareTraining."}, - ) - metric_name: Optional[str] = field( - default=None, - metadata={"help": "Metric used for the tuning strategy."}, - ) - is_relative: Optional[bool] = field( - default=True, - metadata={"help": "Metric tolerance model, expected to be relative or absolute."}, - ) - perf_tol: Optional[float] = field( - default=0.01, - metadata={"help": "Performance tolerance when optimizing the model."}, - ) - benchmark: bool = field( - default=False, - metadata={"help": "run benchmark."}) - int8: bool = field( - default=False, - metadata={"help":"Whether to use the quantized int8 model."}) - accuracy_only: bool = field( - default=False, - metadata={"help":"Whether to only test accuracy for model tuned by Neural Compressor."}) - -@dataclass -class DistributedArguments: - """ - Arguments setting the distributed multinode environment - """ - - worker: str = field( - default=None, - metadata={"help": "List of node ip addresses in a string, and there should not be space between addresses."}, - ) - task_index: int = field( - default=0, - metadata={"help": "Worker index, and 0 represents the chief worker while other workers are set as 1,2,3..."}, - ) - -# endregion - - -def main(): - # region Argument Parsing - parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments, OptimizationArguments, DistributedArguments)) - model_args, data_args, training_args, optim_args, distributed_args = parser.parse_args_into_dataclasses() - # endregion - - # region Setup logging - # we only want one process per machine to log things on the screen. - # accelerator.is_local_main_process is only True for one process per machine. - # region Logging - logging.basicConfig( - format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", - datefmt="%m/%d/%Y %H:%M:%S", - handlers=[logging.StreamHandler(sys.stdout)], - ) - logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN) - - # Set the verbosity to info of the Transformers logger (on main process only): - if is_main_process(training_args.local_rank): - transformers.utils.logging.set_verbosity_info() - transformers.utils.logging.enable_default_handler() - transformers.utils.logging.enable_explicit_format() - logger.info(f"Training/evaluation parameters {training_args}") - # endregion - - # If passed along, set the training seed now. - if training_args.seed is not None: - set_seed(training_args.seed) - # endregion - - # region Set the multinode environment, the strategy and paths - strategy = None - worker_list = None - if distributed_args.worker is not None: - logger.info("distributed environment initialization...") - - worker_list = distributed_args.worker.split(",") - - from intel_extension_for_transformers.transformers.utils.utility_tf import distributed_init - distributed_init(worker_list, "worker", distributed_args.task_index) - - strategy = tf.distribute.MultiWorkerMirroredStrategy() - from intel_extension_for_transformers.transformers.utils.utility_tf import get_filepath - training_args.output_dir = get_filepath(training_args.output_dir, strategy.cluster_resolver.task_type, strategy.cluster_resolver.task_id) - else: - strategy = training_args.strategy - #endregion - - # region Loading datasets - # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) - # or just provide the name of one of the public datasets for token classification task available on the hub at https://huggingface.co/datasets/ - # (the dataset will be downloaded automatically from the datasets Hub). - # - # For CSV/JSON files, this script will use the column called 'tokens' or the first column if no column called - # 'tokens' is found. You can easily tweak this behavior (see below). - # - # In distributed training, the load_dataset function guarantee that only one local process can concurrently - # download the dataset. - if data_args.dataset_name is not None: - # Downloading and loading a dataset from the hub. - raw_datasets = load_dataset( - data_args.dataset_name, - data_args.dataset_config_name, - use_auth_token=True if model_args.use_auth_token else None, - cache_dir=model_args.cache_dir, - ) - else: - data_files = {} - if data_args.train_file is not None: - data_files["train"] = data_args.train_file - if data_args.validation_file is not None: - data_files["validation"] = data_args.validation_file - extension = data_args.train_file.split(".")[-1] - raw_datasets = load_dataset( - extension, - data_files=data_files, - use_auth_token=True if model_args.use_auth_token else None, - cache_dir=model_args.cache_dir, - ) - # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at - # https://huggingface.co/docs/datasets/loading_datasets.html. - - if raw_datasets["train"] is not None: - column_names = raw_datasets["train"].column_names - features = raw_datasets["train"].features - else: - column_names = raw_datasets["validation"].column_names - features = raw_datasets["validation"].features - - if data_args.text_column_name is not None: - text_column_name = data_args.text_column_name - elif "tokens" in column_names: - text_column_name = "tokens" - else: - text_column_name = column_names[0] - - if data_args.label_column_name is not None: - label_column_name = data_args.label_column_name - elif f"{data_args.task_name}_tags" in column_names: - label_column_name = f"{data_args.task_name}_tags" - else: - label_column_name = column_names[1] - - # In the event the labels are not a `Sequence[ClassLabel]`, we will need to go through the dataset to get the - # unique labels. - def get_label_list(labels): - unique_labels = set() - for label in labels: - unique_labels = unique_labels | set(label) - label_list = list(unique_labels) - label_list.sort() - return label_list - - if isinstance(features[label_column_name].feature, ClassLabel): - label_list = features[label_column_name].feature.names - # No need to convert the labels since they are already ints. - label_to_id = {i: i for i in range(len(label_list))} - else: - label_list = get_label_list(raw_datasets["train"][label_column_name]) - label_to_id = {l: i for i, l in enumerate(label_list)} - num_labels = len(label_list) - # endregion - - # region Load config and tokenizer - # - # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently - # download model & vocab. - - config = AutoConfig.from_pretrained( - model_args.config_name if model_args.config_name else model_args.model_name_or_path, - num_labels=num_labels, - finetuning_task=data_args.task_name, - cache_dir=model_args.cache_dir, - revision=model_args.model_revision, - use_auth_token=True if model_args.use_auth_token else None, - _commit_hash="main", - ) - - tokenizer_name_or_path = model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path - if not tokenizer_name_or_path: - raise ValueError( - "You are instantiating a new tokenizer from scratch. This is not supported by this script." - "You can do it from another script, save it, and load it from here, using --tokenizer_name." - ) - - if config.model_type in {"gpt2", "roberta"}: - tokenizer = AutoTokenizer.from_pretrained(tokenizer_name_or_path, cache_dir=model_args.cache_dir, use_fast=True, - add_prefix_space=True, _commit_hash="main",) - else: - tokenizer = AutoTokenizer.from_pretrained(tokenizer_name_or_path, cache_dir=model_args.cache_dir, use_fast=True, - _commit_hash="main",) - # endregion - - # region Preprocessing the raw datasets - # First we tokenize all the texts. - # should always use padding because the current ptq does not use tf > 2.8 - # so no RaggedTensor is supported - padding = "max_length" if data_args.pad_to_max_length else False - - # Tokenize all texts and align the labels with them. - - def tokenize_and_align_labels(examples): - tokenized_inputs = tokenizer( - examples[text_column_name], - max_length=data_args.max_length, - padding=padding, - truncation=True, - # We use this argument because the texts in our dataset are lists of words (with a label for each word). - is_split_into_words=True, - ) - - labels = [] - for i, label in enumerate(examples[label_column_name]): - word_ids = tokenized_inputs.word_ids(batch_index=i) - previous_word_idx = None - label_ids = [] - for word_idx in word_ids: - # Special tokens have a word id that is None. We set the label to -100 so they are automatically - # ignored in the loss function. - if word_idx is None: - label_ids.append(-100) - # We set the label for the first token of each word. - elif word_idx != previous_word_idx: - label_ids.append(label_to_id[label[word_idx]]) - # For the other tokens in a word, we set the label to either the current label or -100, depending on - # the label_all_tokens flag. - else: - label_ids.append(label_to_id[label[word_idx]] if data_args.label_all_tokens else -100) - previous_word_idx = word_idx - - labels.append(label_ids) - tokenized_inputs["labels"] = labels - return tokenized_inputs - - processed_raw_datasets = raw_datasets.map( - tokenize_and_align_labels, - batched=True, - remove_columns=raw_datasets["train"].column_names, - desc="Running tokenizer on dataset", - ) - - train_dataset = processed_raw_datasets["train"] - eval_dataset = processed_raw_datasets["validation"] - - if data_args.max_train_samples is not None: - max_train_samples = min(len(train_dataset), data_args.max_train_samples) - train_dataset = train_dataset.select(range(max_train_samples)) - - if data_args.max_eval_samples is not None: - max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples) - eval_dataset = eval_dataset.select(range(max_eval_samples)) - - # Log a few random samples from the training set: - for index in random.sample(range(len(train_dataset)), 3): - logger.info(f"Sample {index} of the training set: {train_dataset[index]}.") - # endregion - - # Metrics - metric = load_metric("seqeval") - - def get_labels(y_pred, y_true): - # Transform predictions and references tensos to numpy arrays - - # Remove ignored index (special tokens) - true_predictions = [ - [label_list[p] for (p, l) in zip(pred, gold_label) if l != -100] - for pred, gold_label in zip(y_pred, y_true) - ] - true_labels = [ - [label_list[l] for (p, l) in zip(pred, gold_label) if l != -100] - for pred, gold_label in zip(y_pred, y_true) - ] - return true_predictions, true_labels - - def compute_metrics(predictions, labels): - predictions = predictions["logits"] - predictions = np.argmax(predictions, axis=-1) - - attention_mask = eval_dataset.with_format("tf")["attention_mask"] - labels[attention_mask == 0] = -100 - - # Remove ignored index (special tokens) - preds, refs = get_labels(predictions, labels) - - metric.add_batch( - predictions=preds, - references=refs, - ) - results = metric.compute() - - if data_args.return_entity_level_metrics: - # Unpack nested dictionaries - final_results = {} - for key, value in results.items(): - if isinstance(value, dict): - for n, v in value.items(): - final_results[f"{key}_{n}"] = v - else: - final_results[key] = value - return final_results - else: - return { - "precision": results["overall_precision"], - "recall": results["overall_recall"], - "f1": results["overall_f1"], - "accuracy": results["overall_accuracy"], - } - - # endregion - - with strategy.scope(): - # region Initialize model - if model_args.model_name_or_path: - model = TFAutoModelForTokenClassification.from_pretrained( - model_args.model_name_or_path, - config=config, - cache_dir=model_args.cache_dir, - revision=model_args.model_revision, - use_auth_token=True if model_args.use_auth_token else None, - ) - else: - logger.info("Training new model from scratch") - model = TFAutoModelForTokenClassification.from_config(config) - - model.resize_token_embeddings(len(tokenizer)) - # endregion - - # region Create TF datasets - - # We need the DataCollatorForTokenClassification here, as we need to correctly pad labels as - # well as inputs. - collate_fn = DataCollatorForTokenClassification(tokenizer=tokenizer, return_tensors="tf") - total_train_batch_size = training_args.per_device_train_batch_size * (len(worker_list) if worker_list is not None else 1) - - dataset_options = tf.data.Options() - dataset_options.experimental_distribute.auto_shard_policy = tf.data.experimental.AutoShardPolicy.OFF - - # model.prepare_tf_dataset() wraps a Hugging Face dataset in a tf.data.Dataset which is ready to use in - # training. This is the recommended way to use a Hugging Face dataset when training with Keras. You can also - # use the lower-level dataset.to_tf_dataset() method, but you will have to specify things like column names - # yourself if you use this method, whereas they are automatically inferred from the model input names when - # using model.prepare_tf_dataset() - # For more info see the docs: - # https://huggingface.co/docs/transformers/main/en/main_classes/model#transformers.TFPreTrainedModel.prepare_tf_dataset - # https://huggingface.co/docs/datasets/main/en/package_reference/main_classes#datasets.Dataset.to_tf_dataset - - tf_train_dataset = model.prepare_tf_dataset( - train_dataset, - collate_fn=collate_fn, - batch_size=total_train_batch_size, - shuffle=True, - ).with_options(dataset_options) - total_eval_batch_size = training_args.per_device_eval_batch_size * (len(worker_list) if worker_list is not None else 1) - tf_eval_dataset = model.prepare_tf_dataset( - eval_dataset, - collate_fn=collate_fn, - batch_size=total_eval_batch_size, - shuffle=False, - ).with_options(dataset_options) - - # endregion - - # region Optimizer, loss and compilation - optimizer = tf.keras.optimizers.Adam( - learning_rate=training_args.learning_rate, - beta_1=training_args.adam_beta1, - beta_2=training_args.adam_beta2, - epsilon=training_args.adam_epsilon, - clipnorm=training_args.max_grad_norm, - ) - - model.compile(optimizer=optimizer, jit_compile=training_args.xla) - # endregion - - if optim_args.tune: - from intel_extension_for_transformers.transformers import metrics, objectives, QuantizationConfig, TFOptimization - optimization = TFOptimization( - model=model, - args=training_args, - train_dataset=tf_train_dataset, - eval_dataset=tf_eval_dataset, - compute_metrics=compute_metrics, - task_type=strategy.cluster_resolver.task_type if isinstance(strategy, tf.distribute.MultiWorkerMirroredStrategy) else None, - task_id=strategy.cluster_resolver.task_id if isinstance(strategy, tf.distribute.MultiWorkerMirroredStrategy) else None, - ) - tune_metric = metrics.Metric( - name="accuracy", greater_is_better=True, is_relative=True, criterion=optim_args.perf_tol, - ) - quantization_config = QuantizationConfig( - framework="tensorflow", - approach="POSTTRAININGSTATIC", - metrics=[tune_metric], - objectives=[objectives.performance] - ) - quantized_model = optimization.quantize(quant_config=quantization_config) - exit(0) - - # region Training - if training_args.do_train: - callbacks = [SavePretrainedCallback(output_dir=training_args.output_dir)] - logger.info("***** Running training *****") - logger.info(f" Num examples = {len(train_dataset)}") - logger.info(f" Num Epochs = {training_args.num_train_epochs}") - logger.info(f" Instantaneous batch size per device = {training_args.per_device_train_batch_size}") - logger.info(f" Total train batch size = {total_train_batch_size}") - # Only show the progress bar once on each machine. - - model.fit( - tf_train_dataset, - validation_data=tf_eval_dataset, - epochs=int(training_args.num_train_epochs), - callbacks=callbacks, - ) - # endregion - - # region Evaluation - if training_args.do_eval: - # We normally do validation as part of the Keras fit loop, but we run it independently - # if there was no fit() step (because we didn't train the model) or if the task is MNLI, - # because MNLI has a separate validation-mismatched validation set - logger.info("*** Evaluate ***") - - tasks = [data_args.task_name] - tf_datasets = [tf_eval_dataset] - raw_datasets = [processed_raw_datasets["validation"]] - - num_examples = 0 - - if optim_args.int8: - model = tf.saved_model.load(training_args.output_dir) - else: - from intel_extension_for_transformers.transformers.utils.utility_tf import keras2SavedModel - model = keras2SavedModel(model) - - for raw_dataset, tf_dataset, task in zip(raw_datasets, tf_datasets, tasks): - num_examples += sum( - 1 for _ in (tf_dataset.unbatch() - if hasattr(tf_dataset, "unbatch") else tf_dataset - ) - ) - - preds: np.ndarray = None - label_ids: np.ndarray = None - infer = model.signatures[list(model.signatures.keys())[0]] - - if optim_args.accuracy_only: - iterations = 1 - warmup = 0 - else: - iterations = 10 - warmup = 5 - latency_list = [] - - for idx in range(iterations): - iteration_time = 0 - for i, (inputs, labels) in enumerate(tf_dataset): - for name in inputs: - inputs[name] = tf.constant(inputs[name].numpy(), dtype=infer.inputs[0].dtype) - start = time.time() - results = infer(**inputs) - iteration_time += time.time() - start - if idx == 0: # only accumulate once all the preds and labels - for val in results: - if preds is None: - preds = results[val].numpy() - else: - preds = np.append(preds, results[val].numpy(), axis=0) - if label_ids is None: - label_ids = labels.numpy() - else: - label_ids = np.append(label_ids, labels.numpy(), axis=0) - - latency_list.append(iteration_time) - logger.info("Iteration {} time: {} sec".format(idx, iteration_time)) - eval_metrics = compute_metrics({"logits": preds}, label_ids) - logger.info("\nEvaluation result: ") - logger.info("metric ({}) Accuracy: {}".format(task, eval_metrics["accuracy"])) - - average_iteration_time = np.array(latency_list[warmup:]).mean() - logger.info( - "Throughput: {} samples/sec".format( - num_examples / average_iteration_time) - ) - # endregion - -if __name__ == "__main__": - main() \ No newline at end of file diff --git a/examples/huggingface/tensorflow/token-classification/quantization/run_tuning.sh b/examples/huggingface/tensorflow/token-classification/quantization/run_tuning.sh deleted file mode 100644 index 415cf26ddd1..00000000000 --- a/examples/huggingface/tensorflow/token-classification/quantization/run_tuning.sh +++ /dev/null @@ -1,104 +0,0 @@ -#!/bin/bash -set -x - -function main { - - init_params "$@" - run_tuning - -} - -# init params -function init_params { - topology="bert_base_ner" - tuned_checkpoint="saved_results" - extra_cmd="" - batch_size=8 - MAX_SEQ_LENGTH=128 - model_type="bert" - approach="PostTrainingStatic" - cache_dir="cache" - for var in "$@" - do - case $var in - --topology=*) - topology=$(echo $var |cut -f2 -d=) - ;; - --dataset_location=*) - dataset_location=$(echo $var |cut -f2 -d=) - ;; - --input_model=*) - input_model=$(echo $var |cut -f2 -d=) - ;; - --output_model=*) - tuned_checkpoint=$(echo $var |cut -f2 -d=) - ;; - --worker=*) - worker=$(echo $var |cut -f2 -d=) - ;; - --task_index=*) - task_index=$(echo $var |cut -f2 -d=) - ;; - --cache_dir=*) - cache_dir=$(echo $var |cut -f2 -d=) - ;; - *) - echo "Error: No such parameter: ${var}" - exit 1 - ;; - esac - done - -} - -# run_tuning -function run_tuning { - batch_size=64 - if [ "${topology}" = "bert_base_ner" ]; then - TASK_NAME="ner" - model_name_or_path="dslim/bert-base-NER" - approach="PostTrainingStatic" - dataset_name=conll2003 - fi - - if [ "${worker}" = "" ] - then - python -u run_ner.py \ - --model_name_or_path ${model_name_or_path} \ - --dataset_name ${dataset_name} \ - --task_name ${TASK_NAME} \ - --pad_to_max_length \ - --do_eval \ - --max_length ${MAX_SEQ_LENGTH} \ - --per_device_train_batch_size ${batch_size} \ - --per_device_eval_batch_size ${batch_size} \ - --output_dir ${tuned_checkpoint} \ - --no_cuda \ - --overwrite_output_dir \ - --cache_dir ${cache_dir} \ - --quantization_approach ${approach} \ - --tune \ - ${extra_cmd} - else - python -u run_ner.py \ - --model_name_or_path ${model_name_or_path} \ - --dataset_name ${dataset_name} \ - --task_name ${TASK_NAME} \ - --pad_to_max_length \ - --do_eval \ - --max_length ${MAX_SEQ_LENGTH} \ - --per_device_train_batch_size ${batch_size} \ - --per_device_eval_batch_size ${batch_size} \ - --output_dir ${tuned_checkpoint} \ - --no_cuda \ - --overwrite_output_dir \ - --cache_dir ${cache_dir} \ - --quantization_approach ${approach} \ - --tune \ - --worker "${worker}" \ - --task_index ${task_index} \ - ${extra_cmd} - fi -} - -main "$@" diff --git a/intel_extension_for_transformers/transformers/__init__.py b/intel_extension_for_transformers/transformers/__init__.py index c7b129f1ba0..94b8091c60e 100644 --- a/intel_extension_for_transformers/transformers/__init__.py +++ b/intel_extension_for_transformers/transformers/__init__.py @@ -19,22 +19,12 @@ from .config import ( WEIGHTS_NAME, BenchmarkConfig, - DistillationConfig, DynamicLengthConfig, Provider, PrunerV2, - PruningConfig, - QuantizationConfig, - TFDistillationConfig, -) -from .distillation import ( - SUPPORTED_DISTILLATION_CRITERION_MODE, - DistillationCriterionMode, + ) -from .optimizer import NoTrainerOptimizer, Orchestrate_optimizer -from .optimizer_tf import TFOptimization -from .pruning import SUPPORTED_PRUNING_MODE, PrunerConfig, PruningMode -from .quantization import SUPPORTED_QUANT_MODE, QuantizationMode + from .utils import ( MixedPrecisionConfig, BitsAndBytesConfig, diff --git a/intel_extension_for_transformers/transformers/config.py b/intel_extension_for_transformers/transformers/config.py index 640efdb8cf5..a0009e7d3ed 100644 --- a/intel_extension_for_transformers/transformers/config.py +++ b/intel_extension_for_transformers/transformers/config.py @@ -18,16 +18,11 @@ import yaml from enum import Enum -from neural_compressor.conf.config import ( - Distillation_Conf, Pruner, Pruning_Conf, Quantization_Conf -) -from neural_compressor.conf.dotdict import DotDict, deep_set + +from neural_compressor.conf.dotdict import DotDict from .utils.metrics import Metric from .utils.objectives import Objective, performance -from .quantization import QuantizationMode, SUPPORTED_QUANT_MODE -from .distillation import ( - Criterion, DistillationCriterionMode, SUPPORTED_DISTILLATION_CRITERION_MODE -) + from typing import List, Union from xmlrpc.client import boolean @@ -150,651 +145,6 @@ def __init__( self.latency_constraint = latency_constraint self.evo_eval_metric = evo_eval_metric - -class QuantizationConfig(object): - """Configure the quantization process. - - Args: - framework: Which framework you used - approach: Which quantization approach to use - strategy: Which quantization tuning strategy to use - timeout: Tuning timeout(seconds), 0 means early stop. Combined with max_trials field to decide when to exit - max_trials: Max tune times - metrics: Used to evaluate accuracy of tuning model, no need for NoTrainerOptimize - objectives: Objective with accuracy constraint guaranteed - config_file: Path to the config file - sampling_size: How many samples to use - use_bf16: Whether to use bf16 - recipes: apply recipes for quantization, neural_compressor support below recipes: - 'smooth_quant': whether do smooth quant - 'smooth_quant_args': parameters for smooth_quant - 'fast_bias_correction': whether do fast bias correction - 'weight_correction': whether do weight correction - 'gemm_to_matmul': whether convert gemm to matmul and add, only valid for onnx models - 'graph_optimization_level': support 'DISABLE_ALL', 'ENABLE_BASIC', 'ENABLE_EXTENDED', 'ENABLE_ALL' - only valid for onnx models - 'first_conv_or_matmul_quantization': whether quantize the first conv or matmul - 'last_conv_or_matmul_quantization': whether quantize the last conv or matmul - 'pre_post_process_quantization': whether quantize the ops in preprocess and postprocess - 'add_qdq_pair_to_weight': whether add QDQ pair for weights, only valid for onnxrt_trt_ep - 'optypes_to_exclude_output_quant': don't quantize output of specified optypes - 'dedicated_qdq_pair': whether dedicate QDQ pair, only valid for onnxrt_trt_ep. - """ - def __init__( - self, - framework: str = "pytorch", - approach: str = "PostTrainingStatic", - strategy: str = "basic", - timeout: int = 0, - max_trials: int = 100, - metrics: Union[Metric, List] = None, - objectives: Union[Objective, List] = performance, - config_file: str = None, - sampling_size: int = 100, - use_bf16: bool = False, - recipes: dict = None, - ): - """Init a QuantizationConfig object.""" - super().__init__() - if config_file is None: - self.inc_config = Quantization_Conf() - else: - self.inc_config = Quantization_Conf(config_file) - self.framework = framework - if approach is not None: - self.approach = approach - if strategy is not None: - self.strategy = strategy - if timeout is not None: - self.timeout = timeout - if max_trials is not None: - self.max_trials = max_trials - if metrics is not None: - self.metrics = metrics - else: - self._metrics = None - if objectives is not None: - self.objectives = objectives - else: - self._objectives = None - if sampling_size is not None: - self.sampling_size = sampling_size - self.inc_config.usr_cfg.use_bf16 = use_bf16 - if recipes is not None: - self.recipes = recipes - - @property - def approach(self): - """Get the quantization approach.""" - return self.inc_config.usr_cfg.quantization.approach - - @approach.setter - def approach(self, approach): - """Set the quantization approach.""" - approach = approach.upper() - assert approach in SUPPORTED_QUANT_MODE, \ - f"quantization approach: {approach} is not support!" + \ - "PostTrainingStatic, PostTrainingDynamic and QuantizationAwareTraining are supported!" - self.inc_config.usr_cfg.quantization.approach = QuantizationMode[approach].value - - @property - def input_names(self): - """Get the input names.""" - return self.inc_config.usr_cfg.model.inputs - - @input_names.setter - def input_names(self, input_names): - """Set the input names.""" - assert isinstance(input_names, list), "input_names must be a list" - self.inc_config.usr_cfg.model.inputs = input_names - - @property - def output_names(self): - """Get the output names.""" - return self.inc_config.usr_cfg.model.outputs - - @output_names.setter - def output_names(self, output_names): - """Set the output names.""" - assert isinstance(output_names, list), "output_names must be a list" - self.inc_config.usr_cfg.model.outputs = output_names - - @property - def metrics(self): - """Get the metrics.""" - return self._metrics - - @metrics.setter - def metrics(self, metrics: Union[Metric, List]): - """Set the metrics.""" - self._metrics = metrics - rel_or_abs = {True: "relative", False: "absolute"} - assert isinstance(metrics[0] if isinstance(metrics, list) else metrics, Metric), \ - "metric should be a Metric class!" - if isinstance(metrics, Metric) or len(metrics) == 1: - self.inc_config.usr_cfg.tuning.accuracy_criterion = { - rel_or_abs[metrics[0].is_relative] - if isinstance(metrics, list) else rel_or_abs[metrics.is_relative]: - metrics[0].criterion if isinstance(metrics, list) else metrics.criterion, - "higher_is_better": metrics[0].greater_is_better if isinstance(metrics, list) else - metrics.greater_is_better - } - else: - weights = [metric.weight_ratio for metric in metrics] - if not any(weights): - weight = 1 / len(metrics) - for metric in metrics: - metric.weight_ratio = weight - else: # pragma: no cover - assert all(weights), "Please set the weight ratio for all metrics!" - - assert all(metric.is_relative == metrics[0].is_relative for metric in metrics), \ - "Unsupported different is_relative for different metric now, will support soon!" - assert all(metric.criterion == metrics[0].criterion for metric in metrics), \ - "Unsupported different criterion for different metric now, will support soon!" - - self.inc_config.usr_cfg.tuning.accuracy_criterion = { - rel_or_abs[metrics[0].is_relative]: metrics[0].criterion, - "higher_is_better": metrics[0].greater_is_better - } - - @property - def framework(self): - """Get the framework.""" - return self.inc_config.usr_cfg.model.framework - - @framework.setter - def framework(self, framework): - """Set the framework.""" - assert framework in ["pytorch", "pytorch_fx", "pytorch_ipex", "tensorflow"], \ - "framework: {} is not support!".format(framework) - self.inc_config.usr_cfg.model.framework = framework - - @property - def objectives(self): - """Get the objectives.""" - return self._objectives - - @objectives.setter - def objectives(self, objectives: Union[List, Objective]): - """Set the objectives.""" - self._objectives = objectives - if isinstance(objectives, Objective) or len(objectives) == 1: - self.inc_config.usr_cfg.tuning.objective = objectives.name \ - if isinstance(objectives, Objective) else objectives[0].name - else: - weights = [objective.weight_ratio for objective in objectives] - if not any(weights): - weight = 1 / len(objectives) - for objective in objectives: - objective.weight_ratio = weight - else: - assert all(weights), "Please set the weight ratio for all metrics!" - - self.inc_config.usr_cfg.tuning.multi_objective = { - "objective": [objective.name for objective in objectives], - "higher_is_better": [objective.greater_is_better for objective in objectives], - "weight": [objective.weight_ratio for objective in objectives], - } - - @property - def strategy(self): - """Get the strategy.""" - return self.inc_config.usr_cfg.tuning.strategy.name - - @strategy.setter - def strategy(self, strategy): - """Set the strategy.""" - assert strategy in ["basic", "bayesian", "mse", "mse_v2"], \ - "strategy: {} is not support!".format(strategy) - self.inc_config.usr_cfg.tuning.strategy.name = strategy - if strategy == "mse_v2": - self.inc_config.usr_cfg.tuning.strategy_kwargs = {"confidence_batches": 1} - - @property - def timeout(self): - """Get the timeout.""" - return self.inc_config.usr_cfg.tuning.exit_policy.timeout - - @timeout.setter - def timeout(self, timeout): - """Set the timeout.""" - assert isinstance(timeout, int), "timeout should be integer!" - self.inc_config.usr_cfg.tuning.exit_policy.timeout = timeout - - @property - def op_wise(self): - """Get the op_wise dict.""" - return self.inc_config.usr_cfg.quantization.op_wise - - @op_wise.setter - def op_wise(self, op_wise): - """Set the op_wise dict.""" - self.inc_config.usr_cfg.quantization.op_wise = op_wise - - @property - def optype_wise(self): - """Get the optype_wise dict.""" - return self.inc_config.usr_cfg.quantization.optype_wise - - @optype_wise.setter - def optype_wise(self, optype_wise): - """Set the optype_wise dict.""" - self.inc_config.usr_cfg.quantization.optype_wise = optype_wise - - @property - def max_trials(self): - """Get the number of maximum trials.""" - return self.inc_config.usr_cfg.tuning.exit_policy.max_trials - - @max_trials.setter - def max_trials(self, max_trials): - """Set the number of maximum trials.""" - assert isinstance(max_trials, int), "max_trials should be integer!" - self.inc_config.usr_cfg.tuning.exit_policy.max_trials = max_trials - - @property - def performance_only(self): - """Get the boolean whether to use performance only.""" - return self.inc_config.usr_cfg.tuning.exit_policy.performance_only - - @performance_only.setter - def performance_only(self, performance_only): - """Set the boolean whether to use performance only.""" - assert isinstance(performance_only, boolean), "performance_only should be boolean!" - self.inc_config.usr_cfg.tuning.exit_policy.performance_only = performance_only - - @property - def random_seed(self): - """Get the random seed.""" - return self.inc_config.usr_cfg.tuning.random_seed - - @random_seed.setter - def random_seed(self, random_seed): - """Set the random seed.""" - assert isinstance(random_seed, int), "random_seed should be integer!" - self.inc_config.usr_cfg.tuning.random_seed = random_seed - - @property - def tensorboard(self): - """Get the boolean whether to use tensorboard.""" - return self.inc_config.usr_cfg.tuning.tensorboard - - @tensorboard.setter - def tensorboard(self, tensorboard): - """Set the boolean whether to use tensorboard.""" - assert isinstance(tensorboard, boolean), "tensorboard should be boolean!" - self.inc_config.usr_cfg.tuning.tensorboard = tensorboard - - @property - def output_dir(self): - """Get the output directory.""" - return self.inc_config.usr_cfg.tuning.workspace.path - - @output_dir.setter - def output_dir(self, path): - """Set the output directory.""" - assert isinstance(path, str), "save_path should be a string of directory!" - self.inc_config.usr_cfg.tuning.workspace.path = path - - @property - def resume_path(self): - """Get the resume path.""" - return self.inc_config.usr_cfg.tuning.workspace.resume - - @resume_path.setter - def resume_path(self, path): - """Set the resume path.""" - assert isinstance(path, str), "resume_path should be a string of directory!" - self.inc_config.usr_cfg.tuning.workspace.resume = path - - @property - def sampling_size(self): - """Get the sampling size.""" - return self.inc_config.usr_cfg.quantization.calibration.sampling_size - - @sampling_size.setter - def sampling_size(self, sampling_size): - """Set the sampling size.""" - if isinstance(sampling_size, int): - self.inc_config.usr_cfg.quantization.calibration.sampling_size = [sampling_size] - elif isinstance(sampling_size, list): - self.inc_config.usr_cfg.quantization.calibration.sampling_size = sampling_size - else: - assert False, "The sampling_size must be a list of int numbers" - - @property - def recipes(self): - """Get the sampling size.""" - return self.inc_config.usr_cfg.quantization.recipes - - @recipes.setter - def recipes(self, recipes): - """Set recipes.""" - if recipes is not None and not isinstance(recipes, dict): - raise ValueError("recipes should be a dict.") - - # Support PyTorch only - def smooth_quant(val=None): - if val is not None: - return check_value("smooth_quant", val, bool) - else: - return False - - # Support PyTorch only - def smooth_quant_args(val=None): - if val is not None: - check_value("smooth_quant_args", val, dict) - for k, v in val.items(): - if k == "alpha": - assert isinstance(v, str) or isinstance(v, float),\ - "Smooth_quant_args.alpha should be a float or 'auto'." - return True - else: - return {} - - # Support tensorflow, but not enabled now - def fast_bias_correction(val=None): # pragma: no cover - if val is not None: - return check_value("fast_bias_correction", val, bool) - else: - return False - - # Support tensorflow, but not enabled now - def weight_correction(val=None): # pragma: no cover - if val is not None: - return check_value("weight_correction", val, bool) - else: - return False - - # Support Tensorflow only - def first_conv_or_matmul_quantization(val=None): - if val is not None: - return check_value("first_conv_or_matmul_quantization", val, bool) - else: - return True - - # Support Tensorflow only - def last_conv_or_matmul_quantization(val=None): - if val is not None: - return check_value("last_conv_or_matmul_quantization", val, bool) - else: - return True - - RECIPES = {"smooth_quant": smooth_quant, # Only for PyTorch - "smooth_quant_args": smooth_quant_args, # Only for PyTorch - "fast_bias_correction": fast_bias_correction, # Support PyTorch and Tensorflow, not used now. - "weight_correction": weight_correction, # Support PyTorch and Tensorflow, not used now. - "first_conv_or_matmul_quantization": first_conv_or_matmul_quantization, # Only for Tensorflow - "last_conv_or_matmul_quantization": last_conv_or_matmul_quantization, # Only for Tensorflow - } - _recipes = {} - for k in RECIPES.keys(): - if k in recipes and RECIPES[k](recipes[k]): - _recipes.update({k: recipes[k]}) - else: - _recipes.update({k: RECIPES[k]()}) - deep_set(self.inc_config.usr_cfg, 'quantization.recipes', _recipes) - - -class PruningConfig(object): - """Configure the pruning process. - - Args: - framework: Which framework you used - epochs: How many epochs to prune - epoch_range: Epoch range list - initial_sparsity_ratio: Initial sparsity goal, and not needed if pruner_config argument is defined - target_sparsity_ratio: Target sparsity goal, and not needed if pruner_config argument is defined - metrics: Used to evaluate accuracy of tuning model, not needed for NoTrainerOptimizer - pruner_config: Defined pruning behavior, if it is None, then NLP will create a default pruner with - 'BasicMagnitude' pruning typel - config_file: Path to the config file - """ - def __init__( - self, - framework: str = "pytorch", - epochs: int = 1, - epoch_range: List = [0, 4], - initial_sparsity_ratio: float=0.0, - target_sparsity_ratio: float = 0.97, - metrics: Metric = None, - pruner_config: Union[List, Pruner] = None, - config_file: str = None - ): - """Init a PruningConfig object.""" - super().__init__() - self.inc_config = Pruning_Conf(config_file) - self.framework = framework - - if initial_sparsity_ratio is not None: - self.initial_sparsity_ratio = initial_sparsity_ratio - if target_sparsity_ratio is not None: - self.target_sparsity_ratio = target_sparsity_ratio - if epoch_range is not None: - self.epoch_range = epoch_range - if metrics is not None: - self.metrics = metrics - else: - self._metrics = None - if pruner_config is not None: - self.pruner_config = pruner_config - else: - self.init_prune_config() - self.epochs = epochs - - - def init_prune_config(self): - """Init the pruning config.""" - pruner_config = Pruner() - self.inc_config.usr_cfg.pruning.approach.weight_compression['pruners'] = [pruner_config] - - @property - def pruner_config(self): - """Get the pruner config.""" - return self.inc_config.usr_cfg.pruning.approach.weight_compression.pruners - - @pruner_config.setter - def pruner_config(self, pruner_config): - """Set the pruner config.""" - if isinstance(pruner_config, list): - self.inc_config.usr_cfg.pruning.approach.weight_compression.pruners = pruner_config - else: - self.inc_config.usr_cfg.pruning.approach.weight_compression.pruners = [pruner_config] - - @property - def target_sparsity_ratio(self): - """Get the target sparsity ratio.""" - return self.inc_config.usr_cfg.pruning.approach.weight_compression.target_sparsity - - @target_sparsity_ratio.setter - def target_sparsity_ratio(self, target_sparsity_ratio): - """Set the target sparsity ratio.""" - self.inc_config.usr_cfg.pruning.approach.weight_compression.target_sparsity = \ - target_sparsity_ratio - - @property - def initial_sparsity_ratio(self): - """Get the initial sparsity ratio.""" - return self.inc_config.usr_cfg.pruning.approach.weight_compression.initial_sparsity - - @initial_sparsity_ratio.setter - def initial_sparsity_ratio(self, initial_sparsity_ratio): - """Set the initial sparsity ratio.""" - self.inc_config.usr_cfg.pruning.approach.weight_compression.initial_sparsity = \ - initial_sparsity_ratio - - @property - def epoch_range(self): - """Get the epoch range.""" - return [self.inc_config.usr_cfg.pruning.approach.weight_compression.start_epoch, - self.inc_config.usr_cfg.pruning.approach.weight_compression.end_epoch] - - @epoch_range.setter - def epoch_range(self, epoch_range): - """Set the epoch range.""" - assert isinstance(epoch_range, list) and len(epoch_range) == 2, \ - "You should set epoch_range like [a,b] format to match the pruning start and end epoch." - self.inc_config.usr_cfg.pruning.approach.weight_compression.start_epoch = epoch_range[0] - self.inc_config.usr_cfg.pruning.approach.weight_compression.end_epoch = epoch_range[1] - - @property - def epochs(self): - """Get the epochs.""" - eps = self.inc_config.usr_cfg.pruning.train.epoch \ - if hasattr(self.inc_config.usr_cfg.pruning, "train") else 1 - return eps - - @epochs.setter - def epochs(self, epochs): - """Set the epochs.""" - assert isinstance(epochs, int) and epochs > 0, \ - "You should set epochs > 0 and int, not {}.".format(epochs) - self.inc_config.usr_cfg.pruning["train"] = {"epoch": epochs} - - @property - def framework(self): - """Get the framework.""" - return self.inc_config.usr_cfg.model.framework - - @framework.setter - def framework(self, framework): - """Set the framework.""" - assert framework.lower() in ["pytorch", "pytorch_fx", "tensorflow"], \ - "framework: {} is not support!".format(framework) - self.inc_config.usr_cfg.model.framework = framework.lower() - - @property - def metrics(self): - """Get the metrics.""" - return self._metrics - - @metrics.setter - def metrics(self, metrics: Metric): - """Set the metrics.""" - self._metrics = metrics - - -class DistillationConfig(object): - """Configure the distillation process. - - Args: - framework: Which framework you used - criterion: Criterion of training, example: "KnowledgeLoss" - metrics: Metrics for distillation - inc_config: Distillation config - """ - def __init__( - self, - framework: str = "pytorch", - criterion: Criterion = None, - metrics: Metric = None, - inc_config = None - ): - """Init a DistillationConfig object.""" - super().__init__() - self.inc_config = Distillation_Conf(inc_config) - self.framework = framework - if criterion is not None: - self.criterion = criterion - if metrics is not None: - self.metrics = metrics - else: - self._metrics = None - - @property - def framework(self): - """Get the framework.""" - return self.inc_config.usr_cfg.model.framework - - @framework.setter - def framework(self, framework): - """Set the framework.""" - assert framework in ["pytorch", "pytorch_fx", "tensorflow"], \ - "framework: {} is not support!".format(framework) - self.inc_config.usr_cfg.model.framework = framework - - @property - def criterion(self): - """Get the criterion.""" - return self.inc_config.usr_cfg.distillation.train.criterion - - @criterion.setter - def criterion(self, criterion: Criterion): - """Set the criterion.""" - assert criterion.name.upper() in SUPPORTED_DISTILLATION_CRITERION_MODE, \ - "The criterion name must be in ['KnowledgeLoss', 'IntermediateLayersLoss']" - if criterion.name.upper() == DistillationCriterionMode.KNOWLEDGELOSS.name: - assert criterion.temperature is not None, \ - "Please pass the temperature to Criterion.temperature!" - assert criterion.loss_types is not None, \ - "Please pass the loss_types to Criterion.loss_types!" - assert criterion.loss_weight_ratio is not None, \ - "Please pass the loss_weight_ratio to Criterion.loss_weight_ratio!" - self.inc_config.usr_cfg.distillation.train.criterion = { - DistillationCriterionMode.KNOWLEDGELOSS.value: { - "temperature": criterion.temperature, - "loss_types": criterion.loss_types, - "loss_weights": criterion.loss_weight_ratio - } - } - - if criterion.name.upper() == DistillationCriterionMode.INTERMEDIATELAYERSLOSS.name: - assert criterion.layer_mappings is not None, \ - "Please pass the layer_mappings to Criterion.layer_mappings!" - assert criterion.loss_types is not None, \ - "Please pass the loss_types to Criterion.loss_types!" - assert criterion.loss_weight_ratio is not None, \ - "Please pass the loss_weight_ratio to Criterion.loss_weight_ratio!" - assert criterion.add_origin_loss is not None, \ - "Please pass the add_origin_loss to Criterion.add_origin_loss!" - self.inc_config.usr_cfg.distillation.train.criterion = { - DistillationCriterionMode.INTERMEDIATELAYERSLOSS.value: { - "layer_mappings": criterion.layer_mappings, - "loss_types": criterion.loss_types, - "loss_weights": criterion.loss_weight_ratio, - "add_origin_loss": criterion.add_origin_loss - } - } - - @property - def metrics(self): - """Get the metrics.""" - return self._metrics - - @metrics.setter - def metrics(self, metrics): - """Set the metrics.""" - assert isinstance(metrics, Metric), \ - "metric should be a Metric class!" - self._metrics = metrics - - -class TFDistillationConfig(object): - """Configure the distillation process for Tensorflow. - - Args: - loss_types: Type of loss - loss_weights: Weight ratio of loss - train_steps: Steps of training - temperature: Parameter for KnowledgeDistillationLoss - """ - def __init__( - self, - loss_types: list = [], - loss_weights: list = [], - train_steps: list = [], - temperature: float = 1.0 - ): - """Init a TFDistillationConfig object.""" - super().__init__() - self.loss_types = loss_types - self.loss_weights = loss_weights - self.train_steps = train_steps - self.temperature = temperature - - - class BenchmarkConfig: """Config Class for Benchmark. diff --git a/intel_extension_for_transformers/transformers/distillation.py b/intel_extension_for_transformers/transformers/distillation.py deleted file mode 100644 index 9f801b9c112..00000000000 --- a/intel_extension_for_transformers/transformers/distillation.py +++ /dev/null @@ -1,50 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Distillation: set criterion mode to distillation.""" -from enum import Enum -from typing import List - - -class Criterion(object): - """Criterion class for distillation.""" - def __init__( - self, - name: str = "KNOWLEDGELOSS", - temperature: float = 1.0, - loss_types: List = ['CE', 'CE'], - loss_weight_ratio: List = [0.5, 0.5], - layer_mappings: List = None, - add_origin_loss: bool = False - ): - """Init a Criterion object.""" - self.name = name - self.temperature = temperature - self.loss_types = loss_types - self.loss_weight_ratio = loss_weight_ratio - self.layer_mappings = layer_mappings - self.add_origin_loss = add_origin_loss - - -class DistillationCriterionMode(Enum): - """Criterion mode class for distillation.""" - KNOWLEDGELOSS = "KnowledgeDistillationLoss" - INTERMEDIATELAYERSLOSS = "IntermediateLayersKnowledgeDistillationLoss" - - - -SUPPORTED_DISTILLATION_CRITERION_MODE = \ - set([approach.name for approach in DistillationCriterionMode]) diff --git a/intel_extension_for_transformers/transformers/optimizer.py b/intel_extension_for_transformers/transformers/optimizer.py deleted file mode 100644 index db1eeb6a973..00000000000 --- a/intel_extension_for_transformers/transformers/optimizer.py +++ /dev/null @@ -1,466 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Optimization: provides the orchestrate optimizer for Pytorch.""" -import logging -import os -import shlex - -from neural_compressor.experimental import( - common, - Component, - Distillation, - Quantization, - Pruning, -) -from neural_compressor.experimental.scheduler import Scheduler -from intel_extension_for_transformers.transformers import( - DistillationConfig, - Provider, - QuantizationConfig, - PruningConfig -) -from intel_extension_for_transformers.transformers.utils.utility import LazyImport -from intel_extension_for_transformers.transformers.quantization import QuantizationMode -from transformers import PreTrainedModel, PretrainedConfig -from transformers.file_utils import WEIGHTS_NAME -from typing import Callable, Optional, Union, List - -torch = LazyImport("torch") - -logger = logging.getLogger(__name__) - - -class Orchestrate_optimizer: - """Orchestrate_optimizer aggregates and orchestrates components such as Quantization, Pruning and Distillation.""" - def __init__( - self, - model, - components: Optional[List[Component]] = [], - eval_func: Optional[Callable] = None, - train_func: Optional[Callable] = None, - output_dir: Optional[str] = "saved_results", - ): - """Init an orchestrate optimizer. - - Args: - model: Model to quantize and/or prune. - components: List of Component objects which contains Quantization, Pruning, Distillation objects. - eval_func: Evaluation function to evaluate the tuning objective. - train_func: Training function which will be combined with pruning. - """ - if len(components) == 0: - raise RuntimeError("`NLPOptimizer` requires at least one `Quantization`, " - "`Pruning` or `Distillation` object") - self.output_dir = output_dir - if hasattr(model, 'config') and isinstance(model.config, PretrainedConfig): - self.model_config = model.config - self.enable_inc_quant = False - self.enable_inc_pruning = False - self.scheduler = Scheduler() - self.scheduler.model = common.Model(model) - - if len(components) > 1: - agent = self.scheduler.combine(*components) - agent.train_func = train_func - agent.eval_func = eval_func - for component in components: - if isinstance(component, Distillation) and hasattr(component, 'criterion'): - agent.criterion = component.criterion - if isinstance(component, Quantization): - self.enable_inc_quant = True - if isinstance(component, Pruning): - self.enable_inc_pruning = True - self.scheduler.append(agent) - else: - self.scheduler.append(*components) - - def fit(self): - """Run the scheduler.""" - self.opt_model = self.scheduler() - self.save_model(self.output_dir) - if self.enable_inc_pruning == True: - stats, sparsity = self.opt_model.report_sparsity() - logger.info(stats) - logger.info(sparsity) - return self.opt_model.model - - def save_model(self, output_dir, tokenizer=None): - """Save the model and tokenizer in the output directory. - - Args: - output_dir: the path to save config.json and pytorch_model.bin. - tokenizer (object, optional): the tokenizer object, use it if you want to - save tokenizer.json in output_dir. Defaults to None. - """ - os.makedirs(shlex.quote(output_dir), exist_ok=True) - torch.save(self.opt_model.quantized_state_dict(), os.path.join(shlex.quote(output_dir), WEIGHTS_NAME)) - if hasattr(self, 'model_config') and isinstance(self.model_config, PretrainedConfig): - if self.enable_inc_quant == True: - self.model_config.torch_dtype = "int8" - self.model_config.save_pretrained(output_dir) - if tokenizer: # pragma: no cover - tokenizer.save_pretrained(output_dir) - logger.info("orchestrate_optimizations model and configure file have saved to {}".format( - output_dir)) - - -class NoTrainerOptimizer: # pragma: no cover - """Optimizer without using Trainer.""" - def __init__( - self, - model, - output_dir: Optional[str] = "saved_results", - ): - """Init a NoTrainerOptimizer object. - - Args: - model: FP32 model specified for low precision tuning. - output_dir: The folder for saving the results. - """ - self.model = model - self.teacher_model = None - self._eval_func = None - self._train_func = None - self._calib_func = None - self._calib_dataloader = None - self.output_dir = output_dir - self.quant_config = None - self.pruning_config = None - self.distillation_config = None - self._provider = Provider.INC.value - self.pruner = None - self.quantizer = None - self.distiller = None - self.in_training = False - self.enable_inc_quant = False - - @property - def eval_func(self): - """Get the evaluation function.""" - return self._eval_func - - @property - def train_func(self): - """Get the train function.""" - return self._train_func - - @property - def calib_func(self): - """Get the calib function.""" - return self._calib_func - - @property - def provider(self): - """Get the provider.""" - return self._provider - - @property - def calib_dataloader(self): - """Get the calibration dataloader.""" - return self._calib_dataloader - - @eval_func.setter - def eval_func(self, func: Callable): - """Set the evaluation function. - - Args: - func: evaluation function. - """ - self._eval_func = func - - @train_func.setter - def train_func(self, func: Callable): - """Set the train function. - - Args: - func: train function. - """ - self._train_func = func - - @provider.setter - def provider(self, provider): - """Set the provider. - - Args: - provider: optimization provider. - """ - self._provider = provider - - @calib_dataloader.setter - def calib_dataloader(self, dataloader): - """Set the calibration dataloader. - - Args: - dataloader: calibration dataloader. - """ - # transformer issue #1 - if dataloader.batch_size is None: - from .utils.utility import _build_inc_dataloader - self._calib_dataloader = _build_inc_dataloader(dataloader) - else: - self._calib_dataloader = dataloader - - def init_quantizer( - self, - quant_config, - provider: str = Provider.INC.value, - ): - """Init a Quantization object with config. - - Args: - quant_config: quantization config. - provider: define the quantization provider. - """ - from neural_compressor.experimental import Quantization - - assert isinstance(quant_config, QuantizationConfig), \ - "Please pass QuantizationConfig instance to trainer.quantize!" - self.quant_config = quant_config - self.metrics = self.quant_config.metrics - self._provider = Provider[provider.upper()].value - - if self.quant_config.framework == "pytorch": - if self.quant_config.approach == \ - QuantizationMode.POSTTRAININGDYNAMIC.value: - self.quant_config.framework = "pytorch" - else: - self.quant_config.framework = "pytorch_fx" - - quantizer = Quantization(self.quant_config.inc_config) - quantizer.model = common.Model(self.model) - - self.quantizer = quantizer - return quantizer - - def _inc_quantize( - self, - quant_config, - provider: str = Provider.INC.value, - ): - """Do the quantization.""" - if self.quantizer is None: - self.init_quantizer(quant_config=quant_config, provider=provider) - if self._eval_func is not None: - self.quantizer.eval_func = self._eval_func - if self._calib_func is not None: - self.quantizer.calib_func = self._calib_func - if self.quant_config.approach == QuantizationMode.POSTTRAININGSTATIC.value: - assert self._calib_dataloader is not None, \ - "Please pass calib_dataloader to NoTrainerOptimizer.calib_dataloader" - self.quantizer.calib_dataloader = self._calib_dataloader - elif self.quant_config.approach == QuantizationMode.QUANTIZATIONAWARETRAINING.value: - assert self._train_func is not None, \ - "Please pass train_func to NoTrainerOptimizer.train_func" - self.quantizer.q_func = self._train_func - self.opt_model = self.quantizer.fit() - self.enable_inc_quant = True - self.save_model(self.output_dir) - return self.opt_model.model - - def quantize( - self, - quant_config: QuantizationConfig = None, - provider: str = Provider.INC.value, - eval_func: Optional[Callable] = None, - train_func: Optional[Callable] = None, - calib_func: Optional[Callable] = None, - calib_dataloader=None, - ): - """Prepare for invoking the _inc_quantize function. - - Args: - quant_config: quantization config. - provider: define the quantization provider. - eval_func: evaluation function. - train_func: train function. - calib_func: calibration function. - calib_dataloader: calibration dataloader. - """ - if eval_func is not None: - self._eval_func = eval_func - if train_func is not None: - self._train_func = train_func - if calib_func is not None: - self._calib_func = calib_func - if calib_dataloader is not None: - self._calib_dataloader = calib_dataloader - - if self.quantizer is None: - self._provider = Provider[provider.upper()].value - - if self._provider == Provider.INC.value: - return self._inc_quantize(quant_config=quant_config, provider=provider) - else: - assert False, "Unsupported provider:{}".format(self._provider) - - def init_pruner( - self, - pruning_config = None, - provider: str = Provider.INC.value, - ): - """Init a Pruning object with config. - - Args: - pruning_config: pruning config. - provider: define the pruning provider. - """ - from neural_compressor.experimental import Pruning - self.pruning_config = pruning_config - self.metrics = self.pruning_config.metrics - self._provider = Provider[provider.upper()].value - - assert isinstance(self.pruning_config, PruningConfig), \ - "please pass a instance of PruningConfig to trainer.prune!" - - pruner = Pruning(self.pruning_config.inc_config) - pruner.model = common.Model(self.model) - - self.pruner = pruner - return pruner - - def prune( - self, - pruning_config = None, - provider: str = Provider.INC.value, - eval_func: Optional[Callable] = None, - train_func: Optional[Callable] = None, - ): - """Do the pruning. - - Args: - pruning_config: pruning config. - provider: define the pruning provider. - eval_func: evaluation function. - train_func: train function. - """ - if self.pruner is None: - self.init_pruner(pruning_config=pruning_config, provider=provider) - if eval_func is not None: - self._eval_func = eval_func - if train_func is not None: - self._train_func = train_func - - self.pruner.eval_func = self._eval_func - - self.pruner.pruning_func = self._train_func - - self.opt_model = self.pruner.fit() - self.save_model(self.output_dir) - stats, sparsity = self.opt_model.report_sparsity() - logger.info(stats) - logger.info(sparsity) - - return self.opt_model.model - - def init_distiller( - self, - distillation_config, - teacher_model, - provider: str = Provider.INC.value, - ): - """Init a Distillation object with config and the teacher model. - - Args: - distillation_config: distillation config. - teacher_model: set the teacher model. - provider: define the distillation provider. - """ - from neural_compressor.experimental import Distillation, common - assert isinstance(distillation_config, DistillationConfig), \ - "please pass a instance of PruningConfig to trainer.prune!" - self.distillation_config = distillation_config - self._provider = Provider[provider.upper()].value - self.metrics = self.distillation_config.metrics - self.teacher_model = teacher_model - - distiller = Distillation(self.distillation_config.inc_config) - distiller.model = common.Model(self.model) - distiller.teacher_model = common.Model(self.teacher_model) - - self.distiller = distiller - return distiller - - def distill( - self, - distillation_config, - teacher_model, - provider: str = Provider.INC.value, - eval_func: Optional[Callable] = None, - train_func: Optional[Callable] = None, - ): - """Do the distillation. - - Args: - distillation_config: distillation config. - teacher_model: set the teacher model. - provider: define the distillation provider. - eval_func: evaluation function. - train_func: train function. - """ - if self.distiller is None: - self.init_distiller( - distillation_config=distillation_config, - teacher_model=teacher_model, - provider=provider - ) - if eval_func is not None: - self._eval_func = eval_func - if train_func is not None: - self._train_func = train_func - - self.distiller.eval_func = self._eval_func - self.distiller.train_func = self._train_func - self.distiller.create_criterion() - - self.opt_model = self.distiller.fit() - self.save_model(self.output_dir) - return self.opt_model.model - - def _save_inc_int8(self, opt_model, output_dir): - """Save the optimized model in the output directory. - - Args: - opt_model: optimized model. - output_dir: output path. - """ - self.model.config.architectures = [self.model.__class__.__name__] - self.model.config.torch_dtype = "int8" - if isinstance(self.model.config, PretrainedConfig): - self.model.config.save_pretrained(output_dir) - weights_file = os.path.join(os.path.abspath( - os.path.expanduser(output_dir)), WEIGHTS_NAME) - torch.save(opt_model.quantized_state_dict(), weights_file) - - def save_model(self, output_dir, tokenizer=None): - """Save the model and tokenizer in the output directory. - - Args: - output_dir: the path to save config.json and pytorch_model.bin. - tokenizer (object, optional): the tokenizer object, use it if you want to - save tokenizer.json in output_dir. Defaults to None. - """ - os.makedirs(shlex.quote(output_dir), exist_ok=True) - torch.save(self.opt_model.quantized_state_dict(), os.path.join(shlex.quote(output_dir), WEIGHTS_NAME)) - if self.enable_inc_quant and self.opt_model: - self._save_inc_int8(self.opt_model, output_dir) - else: - self.model.save_pretrained(output_dir) - self.model.config.save_pretrained(output_dir) - if tokenizer: # pragma: no cover - tokenizer.save_pretrained(output_dir) - logger.info("Optimized model and configure file have saved to {}".format( - output_dir)) diff --git a/intel_extension_for_transformers/transformers/optimizer_tf.py b/intel_extension_for_transformers/transformers/optimizer_tf.py deleted file mode 100644 index e1ee4f7b416..00000000000 --- a/intel_extension_for_transformers/transformers/optimizer_tf.py +++ /dev/null @@ -1,733 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""TFOptimization: provides the optimization class for Tensorflow.""" -import logging -import pstats -import numpy as np -import os -import time -from neural_compressor import __version__ -from neural_compressor.experimental import common -try: - from neural_compressor.model.model import saved_model_session, get_model_type -except ImportError: - from neural_compressor.model.tensorflow_model import saved_model_session, get_model_type -from intel_extension_for_transformers.transformers import (DistillationConfig, - QuantizationConfig, - PruningConfig) -from intel_extension_for_transformers.transformers.quantization import QuantizationMode -from intel_extension_for_transformers.transformers.utils.metrics import Metric -from intel_extension_for_transformers.transformers.utils.utility import LazyImport -from packaging import version -from transformers import PreTrainedModel -from typing import Callable, Optional, List -from .utils.utility_tf import TFDataloader, TMPPATH, TEACHERPATH, get_filepath - -tf = LazyImport("tensorflow") -logger = logging.getLogger(__name__) -logger.setLevel('INFO') - - -class TFOptimization: - """TFOptimization is the entry class for Tensorflow to use the optimization techniques in neural compressor.""" - def __init__(self, - model: PreTrainedModel, - args, - train_dataset=None, - eval_dataset=None, - compute_metrics: Optional[Callable] = None, - criterion=None, - optimizer=None, - task_type=None, - task_id=None, - strategy=None): - """Init a TFOptimziation object. - - Args: - model: FP32 model specified for low precision tuning - args: Training arguments for TF - train_dataset: Training datas - eval_dataset: Evaluation data - compute_metrics: Metrics computing function during the evaluation process - criterion: Tuning criterion - optimizer: The optimizer you used - task_type: Used for distributed multi-node settings. Default set as "worker" - task_id: Used for distributed multi-node settings. - Set as 0 on the leader node and 1, 2, 3... on the followers - strategy: Equals to MultiWorkerMirroredStrategy if use distributed distributed multi-node settings, - otherwise None - """ - self.model = model - self.teacher_model = None - self.component = None - self.eval_dataset = eval_dataset - self.train_dataset = train_dataset - self._eval_func = None - self._train_func = None - self.quant_config = None - self.pruning_config = None - self.distillation_config = None - self.pruner = None - self.quantizer = None - self.distiller = None - self.in_training = False - self._input_names = None - self._output_names = None - self._inputs = None - self.compute_metrics = compute_metrics - self.args = args - self.optimizer = optimizer - self.task_type = task_type - self.task_id = task_id - self.criterion = criterion if criterion is not None else \ - self.model.loss if hasattr(self.model, "loss") else None - self.model.save_pretrained(get_filepath(TMPPATH, self.task_type, self.task_id), saved_model=True) - _, self.input_names, self.output_names = saved_model_session( - os.path.join(get_filepath(TMPPATH, self.task_type, self.task_id), "saved_model/1"), input_tensor_names=[], - output_tensor_names=[]) - self.eval_distributed = False - self.strategy = strategy - - @property - def inputs(self): - """Get the inputs.""" - return self._inputs - - @inputs.setter - def inputs(self, inputs: dict): - """Set the inputs.""" - self._inputs = inputs - - @property - def input_names(self): - """Get the input names.""" - return self._input_names - - @input_names.setter - def input_names(self, input_names: List): - """Set the input names. - - Args: - input_names: the names of inputs. - """ - self._input_names = input_names - - @property - def output_names(self): - """Get the output names.""" - return self._output_names - - @output_names.setter - def output_names(self, output_names: List): - """Set the output names. - - Args: - output_names: the names of outputs. - """ - self._output_names = output_names - - @property - def eval_func(self): - """Get the evaluation function.""" - return self._eval_func - - @eval_func.setter - def eval_func(self, func: Callable): - """Set the evaluation function. - - Args: - func: evaluation function. - """ - self._eval_func = func - - @property - def train_func(self): - """Get the training function.""" - return self._train_func - - @train_func.setter - def train_func(self, func: Callable): - """Set the training function. - - Args: - func: train function. - """ - self._train_func = func - - @property - def train_dataset(self): - """Get the training dataset.""" - return self._train_dataset - - @train_dataset.setter - def train_dataset(self, train_dataset): - """Set the training dataset. - - Args: - train_dataset: train dataset. - """ - assert isinstance(train_dataset, tf.data.Dataset) or train_dataset is None, \ - "train_dataset should be obj of tf.data.Dataset" - self._train_dataset = train_dataset - - @property - def eval_dataset(self): - """Get the evaluation dataset.""" - return self._eval_dataset - - @eval_dataset.setter - def eval_dataset(self, eval_dataset): - """Set the evaluation dataset. - - Args: - eval_dataset: evaluation dataset. - """ - assert isinstance(eval_dataset, tf.data.Dataset) or eval_dataset is None, \ - "eval_dataset should be obj of tf.data.Dataset" - self._eval_dataset = eval_dataset - - def builtin_eval_func(self, model): - """Customize Evaluate function to inference the model for specified metric on the validation dataset. - - Args: - model ([tf.saved_model.load]): The model will be the class of tf.saved_model.load(quantized_model_path). - - Returns: - [float]: evaluation result, the larger is better. - """ - model_type = None - label_ids: np.ndarray = None - try: - model_type = get_model_type(model) - except ValueError: - logger.info("use keras savedModel") - - num_examples = sum(1 for _ in ( - self._eval_dataset.unbatch() if hasattr(self._eval_dataset, "unbatch") else self._eval_dataset)) - logger.info(f"***** Running Evaluation *****") - logger.info(f" Num examples in dataset = {num_examples}") - logger.info(f" Batch size = {self.args.per_device_eval_batch_size}") - - if model_type is None: - preds: np.ndarray = None - infer = model.signatures["serving_default"] - - for idx, (inputs, labels) in enumerate(self._eval_dataset): - for name in inputs: - inputs[name] = tf.constant(inputs[name].numpy(), dtype=infer.inputs[0].dtype) - - results = infer(**inputs) - for val in results: - if preds is None: - preds = results[val].numpy() - else: - preds = np.append(preds, results[val].numpy(), axis=0) - - if label_ids is None: - label_ids = labels[0].numpy() if isinstance( - labels, list) else labels.numpy() - else: - label_ids = np.append( - label_ids, - labels[0].numpy() - if isinstance(labels, list) else labels.numpy(), - axis=0) - test_predictions = {"logits": preds} - eval_metrics = self.compute_metrics(test_predictions, label_ids) - acc = eval_metrics["accuracy"] - return acc - else: # pragma: no cover - from neural_compressor.adaptor.tf_utils.util import get_tensor_by_name - input_tensor = [get_tensor_by_name(\ - model, x) for x in self.input_names] - output_tensor = [get_tensor_by_name(\ - model, x) for x in self.output_names] - - logger.info("Start to evaluate the TensorFlow model.") - - total_time = 0 - config = tf.compat.v1.ConfigProto() - config.use_per_session_threads = 1 - config.inter_op_parallelism_threads = 1 - sess = tf.compat.v1.Session(graph=model, config=config) - feed_dict = {} - label_ids: np.ndarray = None - preds: np.ndarray = None - for idx, (inputs, labels) in enumerate(self._eval_dataset): - assert len(input_tensor) == len(inputs), \ - 'inputs len must equal with input_tensor' - feed_dict = {} - for name in inputs: - for tensor in input_tensor: - pos = tensor.name.rfind(":") - t_name = tensor.name if pos < 0 else tensor.name[:pos] - if name == t_name: - feed_dict[tensor] = inputs[name].numpy() - break - - start = time.time() - logits = sess.run(output_tensor, feed_dict) - total_time += time.time() - start - if not self.args.prediction_loss_only: - if isinstance(logits, tuple): - logits = logits[0] - - if isinstance(labels, tuple): - labels = labels[0].numpy() - - if isinstance(logits, - list) and len(logits) > 1: # pragma: no cover - for val in logits: - if preds is None: - preds = val - else: - preds = np.append(preds, val, axis=0) - - for val in labels: - if label_ids is None: - label_ids = val.numpy() - else: - label_ids = np.append(label_ids, - val.numpy(), - axis=0) - else: - if preds is None: - preds = logits[0] if isinstance(logits, - list) else logits - else: - preds = np.append( - preds, - logits[0] if isinstance(logits, list) else logits, - axis=0) - - if label_ids is None: - label_ids = labels[0].numpy() if isinstance( - labels, list) else labels.numpy() - else: - label_ids = np.append( - label_ids, - labels[0].numpy() - if isinstance(labels, list) else labels.numpy(), - axis=0) - - if self.compute_metrics is not None and preds is not None and label_ids is not None: - try: - loss = self.criterion( - label_ids, preds) if self.criterion is not None else None - except Exception as e: # pragma: no cover - logger.info(e) - logger.info("There is no loss function or loss compute error, \ - Please compute loss in compute_metrics function" - ) - loss = None - results = self.compute_metrics({"logits": preds}, label_ids) - if loss is not None: - results["loss"] = loss.numpy() - - if isinstance(self.metrics, list): - nums = len(self.metrics) - for metric in self.metrics: - assert metric.name in results.keys(), \ - "Please set metric from {}".format(results.keys()) - if nums == 1: - result = results.get(self.metrics[0].name) - else: # pragma: no cover - result = 0 - for metric in self.metrics: - assert metric.weight_ratio is not None, \ - "Please set weights for metric if you want to use more than one metric" - result += results[metric.name] * metric.weighted - logger.info("metric Accuracy: {}".format(result)) - elif isinstance(self.metrics, Metric): - assert self.metrics.name in results.keys(), \ - "Please set metric from {}".format(results.keys()) - result = results.get(self.metrics.name) - logger.info("metric Accuracy: {}".format(result)) - else: # pragma: no cover - assert False, "Please set the correct metrics format from the README" - else: - result = 0 - logger.info("Throughput: {} samples/sec".format(num_examples / total_time)) - return result - - def init_quantizer( - self, - quant_config, - ): - """Init a Quantization object with config. - - Args: - quant_config: quantization config. - """ - from neural_compressor.experimental import Quantization - - self.quant_config = QuantizationConfig() if quant_config is None else quant_config - self.quant_config.framework = "tensorflow" - self.metrics = self.quant_config.metrics - - quantizer = Quantization(self.quant_config.inc_config) - quantizer.model = common.Model( - os.path.join(get_filepath(TMPPATH, self.task_type, self.task_id),"saved_model/1"), modelType="saved_model") - - self.quantizer = quantizer - return quantizer - - def _inc_quantize( - self, - quant_config, - ): - """Do the quantization. - - Args: - quant_config: quantization config. - """ - if self.quantizer is None: - self.init_quantizer(quant_config=quant_config) - if self._eval_func is not None: - self.quantizer.eval_func = self._eval_func - else: - assert self.metrics is not None, \ - "Please pass the metrics to QuantizationConfig.metrics!" - self.quantizer.eval_func = self.builtin_eval_func - - if self.quant_config.approach == QuantizationMode.POSTTRAININGSTATIC.value: - if self._train_dataset is not None: - self.quantizer.calib_dataloader = TFDataloader( - self._train_dataset, - batch_size=self.args.per_device_train_batch_size) - elif self._eval_dataset is not None: - self.quantizer.calib_dataloader = TFDataloader( - self._eval_dataset, - batch_size=self.args.per_device_eval_batch_size) - else: # pragma: no cover - assert False, "Please pass calibration dataset to TFNoTrainerOptimizer.calib_dataloader" - elif self.quant_config.approach == QuantizationMode.QUANTIZATIONAWARETRAINING.value: # pragma: no cover - assert False, \ - "Unsupported quantization aware training for tensorflow framework" - - opt_model = self.quantizer.fit() - opt_model.save(self.args.output_dir) - logger.info( - "quantized model have saved to {}".format(self.args.output_dir) - ) - return opt_model.model - - def quantize( - self, - quant_config: QuantizationConfig = None, - eval_func: Optional[Callable] = None, - train_func: Optional[Callable] = None, - train_dataset=None, - eval_dataset=None, - ): - """Prepare for invoking INC quantize function. - - Args: - quant_config: quantization config. - eval_func: evaluation function. - train_func: train function. - train_dataset: train dataset. - eval_dataset: evaluation dataset. - """ - if eval_func is not None: - self._eval_func = eval_func - if train_func is not None: - self._train_func = train_func - if train_dataset is not None: - self.train_dataset = train_dataset - - if eval_dataset is not None: - self.eval_dataset = eval_dataset - - return self._inc_quantize(quant_config=quant_config) - - def init_pruner( - self, - pruning_config=None, - ): - """Init a Pruning object with config. - - Args: - pruning_config: pruning config. - """ - from neural_compressor.experimental import Pruning - if pruning_config.framework != 'tensorflow': - logger.warning('pruning_config.framework is {}, should be tensorflow'.format(pruning_config.framework)) - pruning_config.framework = 'tensorflow' - self.pruning_config = pruning_config - self.metrics = self.pruning_config.metrics - - assert isinstance(self.pruning_config, PruningConfig), \ - "please pass a instance of PruningConfig to trainer.prune!" - - pruner = Pruning(self.pruning_config.inc_config) - pruner.model = os.path.join(get_filepath(TMPPATH, self.task_type, self.task_id), "saved_model/1") - pruner.model.model_type = "saved_model" - - self.pruner = pruner - self.component = pruner - return pruner - - def prune( - self, - pruning_config=None, - eval_func: Optional[Callable] = None, - train_func: Optional[Callable] = None, - train_dataset=None, - eval_dataset=None, - ): - """Do the pruning. - - Args: - pruning_config: pruning config. - eval_func: evaluation function. - train_func: train function. - train_dataset: train dataset. - eval_dataset: evaluation dataset. - """ - if self.pruner is None: - self.init_pruner(pruning_config=pruning_config) - if eval_func is not None: - self.eval_func = eval_func - if train_func is not None: - self.train_func = train_func - - if train_dataset is not None: - self.train_dataset = train_dataset - - if eval_dataset is not None: - self.eval_dataset = eval_dataset - - if self._eval_func is not None: - self.pruner.eval_func = self._eval_func - else: - assert self.metrics is not None, \ - "Please pass the metrics to PruningConfig.metrics!" - self.pruner.eval_func = self.builtin_eval_func - - if self.train_func is not None: - if version.parse(__version__) <= version.parse("1.12"): - self.pruner.pruning_func = self._train_func - else: - self.pruner.train_func = self._train_func - else: - if version.parse(__version__) <= version.parse("1.12"): - self.pruner.pruning_func = self.build_train_func - else: - self.pruner.train_func = self.build_train_func - - opt_model = self.pruner.fit() - stats, sparsity = opt_model.report_sparsity() - logger.info(stats) - logger.info(sparsity) - - opt_model.save(self.args.output_dir) - logger.info( - "pruned model have saved to {}".format(self.args.output_dir) - ) - return opt_model.model - - def init_distiller( - self, - distillation_config, - teacher_model: PreTrainedModel, - ): - """Init a Distillation object with config and the teacher model. - - Args: - distillation_config: distillation config. - teacher_model: set the teacher model. - """ - from neural_compressor.experimental import Distillation - assert isinstance(distillation_config, DistillationConfig), \ - "please pass a instance of DistillationConfig to trainer.distill!" - - def train_step(data): - if len(data) == 3: - x, y, sample_weight = data # pragma: no cover - else: - sample_weight = None - x, y = data - with tf.GradientTape() as tape: - y_pred = self.model(x) - teacher_outputs = self.distiller.criterion.teacher_model_forward( - input=x, teacher_model=teacher_model) - - loss = self.model.compute_loss(x, y, y_pred, sample_weight) - # _on_after_compute_loss(self, input, student_output, student_loss, teacher_output=None) - # TODO: check, combile - loss = self.distiller.on_after_compute_loss( - x, y_pred.logits, loss, teacher_outputs.logits) - self.model._validate_target_and_loss(y, loss) - # Run backwards pass. - self.model.optimizer.minimize(loss, - self.model.trainable_variables, - tape=tape) - return self.model.compute_metrics(x, y, y_pred, sample_weight) - - self.model.train_step = train_step - # re-compile - self.model.compile( - optimizer=self.model.optimizer, - loss=self.model.loss, - metrics=self.model.compiled_metrics._user_metrics - ) - - if distillation_config.framework != 'tensorflow': - logger.warning( - 'distillation_config.framework is {}, should be tensorflow'. - format(distillation_config.framework)) - distillation_config.framework = 'tensorflow' - self.distillation_config = distillation_config - self.metrics = self.distillation_config.metrics - self.teacher_model = teacher_model - - distiller = Distillation(self.distillation_config.inc_config) - distiller.model = os.path.join(TMPPATH, "saved_model/1") - distiller.model.model_type = "saved_model" - self.teacher_model.save_pretrained(TEACHERPATH, saved_model=True) - distiller.teacher_model = os.path.join(TEACHERPATH, "saved_model/1") - distiller.teacher_model.model_type = "saved_model" - - self.distiller = distiller - self.component = distiller - return distiller - - def distill( - self, - distillation_config, - teacher_model: PreTrainedModel, - eval_func: Optional[Callable] = None, - train_func: Optional[Callable] = None, - ): - """Do the distillation. - - Args: - distillation_config: distillation config. - teacher_model: set the teacher model. - eval_func: evaluation function. - train_func: train function. - """ - if self.distiller is None: - self.init_distiller( - distillation_config=distillation_config, - teacher_model=teacher_model, - ) - if eval_func is not None: - self._eval_func = eval_func - if train_func is not None: - self._train_func = train_func - else: - self._train_func = self.build_train_func - - self.distiller.eval_func = self._eval_func - self.distiller.train_func = self._train_func - self.distiller.create_criterion() - - opt_model = self.distiller.fit() - opt_model.save(self.args.output_dir) - logger.info( - "distilled model have saved to {}".format(self.args.output_dir) - ) - - return opt_model.model - - def model_builder_builtin(self, arch_paras=None, model_cls=None): - """Specify model_cls to use the built-in model builder. - - Args: - arch_paras: architecture parameters. - model_cls: model information. - """ - config = self.model.config - if arch_paras is not None: - assert isinstance(arch_paras, dict), "Expect arch_paras to be a dict." - for k in arch_paras: - if hasattr(config, k): - config.__setattr__(k, arch_paras[k]) - # for MobileBERT, 'intra_bottleneck_size' is associated with - # 'true_hidden_size', and must have the same values. - if k == 'intra_bottleneck_size': - config.__setattr__('true_hidden_size', arch_paras[k]) - return model_cls.from_config(config) - - def build_train_func(self, model): - """Build the training function for pruning or distillation. - - Args: - model (object): the input model - """ - tf.random.set_seed(1) - epochs = 1 - - component = self.component - prune_model = self.model - model_path = get_filepath(TMPPATH, self.task_type, self.task_id) - - if 'distillation' in self.component.cfg: - epochs = max(epochs, self.component.cfg.distillation.train.get("epoch", 1)) - hooks = self.component.hooks - if 'pruning' in self.component.cfg: - epochs = max(epochs, self.component.cfg.pruning.train.get("epoch", 1)) - callbacks = self.pruner.callbacks - hooks = callbacks['tf_pruning'](self.pruner.model, self.model, - self.pruner.hooks) - - class callback(tf.keras.callbacks.Callback): - def on_train_begin(self, logs=None): - if version.parse(__version__) <= version.parse("1.12"): - hooks['pre_epoch_begin']() # pragma: no cover - else: - hooks['on_train_begin']() - - def on_train_end(self, logs=None): - if version.parse(__version__) <= version.parse("1.12"): - hooks['post_epoch_end']() # pragma: no cover - else: - hooks['on_train_end']() - - def on_epoch_begin(self, epoch, logs=None): - # pylint: disable=E1121 - hooks['on_epoch_begin'](epoch) - - def on_epoch_end(self, epoch, logs=None): - component.model._session = None - prune_model.save_pretrained(model_path, saved_model=True) - component.model = os.path.join(model_path, "saved_model/1") - component.model.model_type = "saved_model" - component.model.sess - hooks['on_epoch_end']() - - # pylint: disable=E1121 - def on_train_batch_begin(self, batch, logs=None): - if version.parse(__version__) <= version.parse("1.12"): - hooks['on_batch_begin'](batch) # pragma: no cover - else: - hooks['on_step_begin'](batch) - - def on_train_batch_end(self, batch, logs=None): - if version.parse(__version__) <= version.parse("1.12"): - hooks['on_batch_end']() # pragma: no cover - else: - hooks['on_step_end']() - - self.model.fit(self.train_dataset, - validation_data=self.eval_dataset, - epochs=epochs, - callbacks=[callback()]) - self.component.model._session = None - self.model.save_pretrained(get_filepath(TMPPATH, self.task_type, self.task_id), saved_model=True) diff --git a/intel_extension_for_transformers/transformers/pruning.py b/intel_extension_for_transformers/transformers/pruning.py deleted file mode 100644 index 9ed8688ebe4..00000000000 --- a/intel_extension_for_transformers/transformers/pruning.py +++ /dev/null @@ -1,73 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Pruning: specify the supported pruning mode.""" - -from packaging import version -from enum import Enum -from neural_compressor.conf.config import Pruner as INCPruner -from typing import Dict, List -from neural_compressor import __version__ as nc_version - - -class PruningMode(Enum): - """Currently support three pruning modes.""" - BASICMAGNITUDE = "basic_magnitude" - PATTERNLOCK = "pattern_lock" - GROUPLASSO = "group_lasso" - - -SUPPORTED_PRUNING_MODE = set([approach.name for approach in PruningMode]) - - -class PrunerConfig(INCPruner): - """Pruner configuration.""" - def __init__(self, epoch_range: List=[0, 4], initial_sparsity_ratio: float=0.0, - target_sparsity_ratio: float=0.97, update_frequency: int=1, - prune_type: str='BasicMagnitude', method: str='per_tensor', - names: List=[], parameters: Dict=None): - """Init the pruner config. - - Args: - epoch_range: A list with length of 2. The first element is the start epoch and the second element - is the end epoch. Pruning will be done from the start epoch to the end epoch. - initial_sparsity_ratio: Initial sparsity goal - target_sparsity_ratio: Target sparsity goal - update_frequency: How many epochs to update once - prune_type: "BasicMagnitude", "PatternLock", or "GroupLasso" - method: TODO (Remove this parameter) - names: A list of layer names that need to be pruned - parameters: A dictionary of extra parameters - """ - if epoch_range is not None: - assert len(epoch_range) == 2, "Please set the epoch_range as [start_epoch, end_epoch]" - self.start_epoch = epoch_range[0] - self.end_epoch = epoch_range[1] - else: # pragma: no cover - self.start_epoch = None - self.end_epoch = None - self.update_frequency = update_frequency - self.target_sparsity = target_sparsity_ratio - self.initial_sparsity = initial_sparsity_ratio - self.update_frequency = update_frequency - assert prune_type.upper() in SUPPORTED_PRUNING_MODE, \ - "prune_type only support {}!".format( - [mode.lower() for mode in SUPPORTED_PRUNING_MODE] - ) - self.prune_type = PruningMode[prune_type.upper()].value - self.method = method - self.names = names - self.parameters = parameters diff --git a/intel_extension_for_transformers/transformers/quantization.py b/intel_extension_for_transformers/transformers/quantization.py index 529abb931f5..d8cc47eaab2 100644 --- a/intel_extension_for_transformers/transformers/quantization.py +++ b/intel_extension_for_transformers/transformers/quantization.py @@ -23,13 +23,3 @@ require_version("neural_compressor>=1.9.0") except: require_version("neural_compressor_full>=1.9.0", "To fix: pip install neural_compressor") - - -class QuantizationMode(Enum): - """Currently support three quantization modes.""" - POSTTRAININGSTATIC = "post_training_static_quant" - POSTTRAININGDYNAMIC = "post_training_dynamic_quant" - QUANTIZATIONAWARETRAINING = "quant_aware_training" - - -SUPPORTED_QUANT_MODE = set([approach.name for approach in QuantizationMode]) diff --git a/intel_extension_for_transformers/transformers/trainer.py b/intel_extension_for_transformers/transformers/trainer.py index e3720b5da59..4d4b378353e 100644 --- a/intel_extension_for_transformers/transformers/trainer.py +++ b/intel_extension_for_transformers/transformers/trainer.py @@ -27,18 +27,20 @@ import warnings from functools import partial from neural_compressor import __version__ as nc_version -from neural_compressor.experimental import Component from neural_compressor.utils import logger from intel_extension_for_transformers.transformers import ( - DistillationConfig, Provider, - PruningMode, - QuantizationConfig, - QuantizationMode, - PruningConfig, DynamicLengthConfig, BenchmarkConfig, ) +from neural_compressor.training import prepare_compression +from neural_compressor.quantization import fit +from neural_compressor.config import ( + DistillationConfig, + WeightPruningConfig, + PostTrainingQuantConfig, + QuantizationAwareTrainingConfig, +) from intel_extension_for_transformers.transformers.benchmark import benchmark from intel_extension_for_transformers.transformers.utils.metrics import Metric from intel_extension_for_transformers.transformers.utils.utility import LazyImport @@ -128,11 +130,7 @@ def __init__(self, *args, **kwargs): self._calib_dataloader = None self._resuming_checkpoint = None self.compression_ctrl = None - self.component = None self.enable_inc_quant = False - self.pruner = None - self.quantizer = None - self.distiller = None self.fp32_model = None self.opt_model = None # This flag is set for the engine in the export_to_int8_onnx API. @@ -141,7 +139,8 @@ def __init__(self, *args, **kwargs): self.orchestrate_opt = False self.orchestrate_opt_pruning = False self.dynamic_config = None - + self.model_config = None + self.compression_manager = None @property def resuming_checkpoint(self): @@ -239,7 +238,7 @@ def builtin_train_func(self, model): """ self.model_wrapped = model self.model = model - train_result = self.train(component=self.component, + train_result = self.train(compression_manager=self.compression_manager, resume_from_checkpoint=self._resuming_checkpoint) metrics = train_result.metrics if not self.orchestrate_opt: @@ -249,42 +248,6 @@ def builtin_train_func(self, model): self.save_state() return self.model - def init_quantizer( - self, - quant_config, - provider: str = Provider.INC.value, - ): - """Initialize the quantizer. - - Args: - quant_config: The path to the YAML configuration file or QuantizationConfig class containing - accuracy goal, quantization objective and related dataloaders etc. - provider: The provider used to quantize. - - Returns: - An objective of neural_compressor Quantization class, which can automativally searches for - optimal quantization recipes for low precision model inference and achieving best tuning - objectives. - """ - from neural_compressor.experimental import Quantization - - assert isinstance(quant_config, QuantizationConfig), \ - "Please pass QuantizationConfig instance to trainer.quantize!" - self.quant_config = quant_config - self.metrics = self.quant_config.metrics - self._provider = Provider[provider.upper()].value - - if self.quant_config.framework == "pytorch": - if self.quant_config.approach != QuantizationMode.POSTTRAININGDYNAMIC.value \ - or self.quant_config.strategy == 'mse_v2': - self.quant_config.framework = "pytorch_fx" - - quantizer = Quantization(self.quant_config.inc_config) - quantizer.model = self.model - - self.quantizer = quantizer - return quantizer - def _inc_quantize( self, quant_config, @@ -295,34 +258,29 @@ def _inc_quantize( self.fp32_model = copy.deepcopy(self.model) except Exception as e: # pragma: no cover logger.warning("Model deepcopy failed: {}!".format(repr(e))) - if self.quantizer is None: - self.init_quantizer(quant_config=quant_config, provider=provider) - if self._eval_func is not None: - self.quantizer.eval_func = self._eval_func - else: # pragma: no cover - assert self.metrics is not None, \ - "Please pass the metrics to QuantizationConfig.metrics!" - self.quantizer.eval_func = self.builtin_eval_func - - if self.quant_config.framework == "pytorch_ipex": - self.model_config = self.model.config # jit model will loss config - if self.quant_config.approach != QuantizationMode.POSTTRAININGDYNAMIC.value \ - or self.quant_config.strategy == 'mse_v2': - # pylint: disable=E1101 - self.quantizer.calib_dataloader = self.get_train_dataloader() \ - if self._calib_dataloader is None else self._calib_dataloader - if self.quant_config.approach == QuantizationMode.QUANTIZATIONAWARETRAINING.value: - self.quantizer.q_func = \ - self.builtin_train_func if self._train_func is None else self._train_func - self.component = self.quantizer - self.opt_model = self.quantizer.fit() + if isinstance(quant_config, PostTrainingQuantConfig): + if quant_config.backend == "ipex": + self.model_config = self.model.config # jit model will loss config + if self._calib_dataloader is None: + self._calib_dataloader = self.get_train_dataloader() + self.opt_model = fit(self.model, + conf=quant_config, + calib_dataloader=self._calib_dataloader, + eval_func=self._eval_func) + else: + compression_manager = prepare_compression(self.model, quant_config) + self.compression_manager = compression_manager + self.compression_manager.callbacks.on_train_begin() + self._train_func(compression_manager.model._model) + self.compression_manager.callbacks.on_train_end() + self.opt_model = self.compression_manager.model self.enable_inc_quant = True self.save_model(self.args.output_dir) return self.opt_model.model def quantize( self, - quant_config: QuantizationConfig = None, + quant_config: Union[PostTrainingQuantConfig, QuantizationAwareTrainingConfig] = None, provider: str = Provider.INC.value, eval_func: Optional[Callable] = None, train_func: Optional[Callable] = None, @@ -331,7 +289,7 @@ def quantize( """The main entry point of automatic quantization tuning. Args: - quant_config: The path to the YAML configuration file or QuantizationConfig class containing + quant_config: QuantizationConfig class containing accuracy goal, quantization objective and related dataloaders etc. provider: The provider used to quantize. eval_func (:obj:`Callable`, optional): The function used to evaluate the model. @@ -348,9 +306,6 @@ def quantize( if calib_dataloader is not None: self._calib_dataloader = calib_dataloader - if self.quantizer is None: - self._provider = Provider[provider.upper()].value - if self._provider == Provider.INC.value: return self._inc_quantize(quant_config=quant_config, provider=provider) else: @@ -375,54 +330,9 @@ def _save_inc_int8(self, opt_model, output_dir): torch.save(opt_model.quantized_state_dict(), weights_file) logger.info("quantized model and configure file have saved to {}".format(output_dir)) - def init_pruner( - self, - pruning_config=None, - provider: str = Provider.INC.value, - ): - """Initialize the pruner. - - Args: - pruning_config: The path to the YAML configuration file or PruningConf class containing - accuracy goal, pruning objective and related dataloaders etc. - provider: The provider used to quantize. - - Returns: - An objective of neural_compressor Pruning class. - """ - - from neural_compressor.experimental import Pruning - self.pruning_config = pruning_config - self.metrics = self.pruning_config.metrics - self._provider = Provider[provider.upper()].value - - assert isinstance(self.pruning_config, PruningConfig), \ - "please pass a instance of PruningConfig to trainer.prune!" - - pruning_start_epoch, pruning_end_epoch = self.pruning_config.epoch_range - - # pylint: disable=E1101 - if pruning_start_epoch > self.args.num_train_epochs - 1: - logger.warning(f"Pruning end epoch {pruning_start_epoch} is higher than " - f"the total number of training epoch " - f"{self.args.num_train_epochs}. No pruning will be applied.") - - # pylint: disable=E1101 - if pruning_end_epoch > self.args.num_train_epochs - 1: - logger.warning( - f"Pruning end epoch {pruning_end_epoch} is higher than " - f"the total number of training epoch " - f"{self.args.num_train_epochs}. The target sparsity will not be reached.") - - pruner = Pruning(self.pruning_config.inc_config) - pruner.model = self.model - - self.pruner = pruner - return pruner - def prune( self, - pruning_config=None, + pruning_config: Union[WeightPruningConfig] = None, provider: str = Provider.INC.value, eval_func: Optional[Callable] = None, train_func: Optional[Callable] = None, @@ -439,72 +349,19 @@ def prune( Returns: An objective of neural_compressor Pruning class. """ - if self.pruner is None: - self.init_pruner(pruning_config=pruning_config, provider=provider) - if eval_func is not None: - self._eval_func = eval_func - if train_func is not None: - self._train_func = train_func - - if self._eval_func is not None: - self.pruner.eval_func = self._eval_func - else: - assert self.metrics is not None, "Please pass metrics to trainer.pruning.metrics!" - assert self.pruning_config.pruner_config[0].prune_type == PruningMode.BASICMAGNITUDE.value, \ - "Please pass eval_func to trainer.eval_func" - self.pruner.eval_func = self.builtin_eval_func - - if self._train_func is not None: - self.pruner.pruning_func = self._train_func - else: - assert self.pruning_config.pruner_config[0].prune_type == PruningMode.BASICMAGNITUDE.value, \ - "Please pass train_func to trainer.train_func" - self.pruner.pruning_func = self.builtin_train_func - - self.component = self.pruner - self.opt_model = self.pruner.fit() - stats, sparsity = self.opt_model.report_sparsity() - logger.info(stats) - logger.info(sparsity) - + self._eval_func = self.builtin_eval_func if eval_func is None else eval_func + self._train_func = self.builtin_train_func if train_func is None else train_func + compression_manager = prepare_compression(model=self.model, confs=pruning_config) + self.compression_manager = compression_manager + self.compression_manager.callbacks.on_train_begin() + self._train_func(compression_manager.model._model) + self.compression_manager.callbacks.on_train_end() + self.opt_model = self.compression_manager.model return self.opt_model.model - def init_distiller( - self, - distillation_config, - teacher_model: Union[PreTrainedModel, torch.nn.Module], - provider: str = Provider.INC.value, - ): - """The main entry point of automatic distillation tuning. - - Args: - quant_config: The path to the YAML configuration file or DistillationConfig class containing. - accuracy goal, distillation objective and related dataloaders etc. - teacher_model: The model(torch.nn.Module) transfers knowledge to a smaller model. - provider (str): The provider used to quantize. - - Returns: - An objective of neural_compressor Distillation class. - """ - from neural_compressor.experimental import Distillation - assert isinstance(distillation_config, DistillationConfig), \ - "please pass a instance of PruningConfig to trainer.prune!" - self.distillation_config = distillation_config - self._provider = Provider[provider.upper()].value - self.metrics = self.distillation_config.metrics - self.teacher_model = teacher_model - - distiller = Distillation(self.distillation_config.inc_config) - distiller.model = self.model - distiller.teacher_model = self.teacher_model - - self.distiller = distiller - return distiller - def distill( self, - distillation_config, - teacher_model: Union[PreTrainedModel, torch.nn.Module], + distillation_config: Union[DistillationConfig] = None, provider: str = Provider.INC.value, eval_func: Optional[Callable] = None, train_func: Optional[Callable] = None, @@ -514,7 +371,6 @@ def distill( Args: quant_config: The path to the YAML configuration file or DistillationConfig class containing accuracy goal, distillation objective and related dataloaders etc. - teacher_model: The model(torch.nn.Module) transfers knowledge to a smaller model. provider (str): The provider used to quantize. eval_func (:obj:`Callable`, optional: The function to evaluate the model. train_func (:obj:`Callable`, optional: The function to train the model. @@ -522,34 +378,25 @@ def distill( Returns: An objective of neural_compressor Distillation class. """ - if self.distiller is None: - self.init_distiller(distillation_config=distillation_config, - teacher_model=teacher_model, - provider=provider) - if eval_func is not None: - self._eval_func = eval_func - if train_func is not None: - self._train_func = train_func - - if self._eval_func is not None: - self.distiller.eval_func = self._eval_func + if distillation_config.teacher_model is not None: + self.teacher_model = distillation_config.teacher_model else: - assert self.metrics is not None, \ - "Please pass metrics to trainer.distillation.metrics!" - self.distiller.eval_func = self.builtin_eval_func + assert False, "Please provide teacher model for DistillationConfig." + self._eval_func = self.builtin_eval_func if eval_func is None else eval_func + self._train_func = self.builtin_train_func if train_func is None else train_func - self.distiller.train_func = \ - self.builtin_train_func if self._train_func is None else self._train_func - self.distiller.create_criterion() - self.component = self.distiller - self.opt_model = self.distiller.fit() + compression_manager = prepare_compression(self.model, distillation_config) + self.compression_manager = compression_manager + self.compression_manager.callbacks.on_train_begin() + self._train_func(compression_manager.model._model) + self.compression_manager.callbacks.on_epoch_end() + self.opt_model = self.compression_manager.model return self.opt_model.model def orchestrate_optimizations( self, config_list, - teacher_model: Optional[Callable] = None, eval_func: Optional[Callable] = None, train_func: Optional[Callable] = None, ): @@ -562,54 +409,25 @@ def orchestrate_optimizations( eval_func (:obj:`Callable`, optional): Evaluation function to evaluate the tuning objective. train_func (:obj:`Callable`, optional): Training function which will be combined with pruning. """ - from intel_extension_for_transformers.transformers.optimizer import Orchestrate_optimizer + # from intel_extension_for_transformers.transformers.optimizer import Orchestrate_optimizer self.orchestrate_opt = True + for config in config_list: + if isinstance(config, DistillationConfig): + self.teacher_model = config.teacher_model + assert self.teacher_model is not None, "Distillation need teacher model, please provide." self._eval_func = self.builtin_eval_func if eval_func is None else eval_func self._train_func = self.builtin_train_func if train_func is None else train_func - components = self.create_optimizer_builtin(config_list, teacher_model) - self.orchestrate_optimizer = Orchestrate_optimizer(self.model, components, \ - eval_func=self.eval_func, train_func=self.train_func, \ - output_dir=self.args.output_dir) - self.component = self.orchestrate_optimizer.scheduler.components[0] - torch_model = self.orchestrate_optimizer.fit() - return torch_model - - def create_optimizer_builtin(self, config_list, teacher_model=None): - """The function to create optimizer. - - Args: - config_list: The list of configs. - teacher_model (:obj:`Callable`, optional): The model(torch.nn.Module) transfers knowledge - to a smaller model. - """ - components = [] - for config in config_list: - if isinstance(config, QuantizationConfig): - component = self.init_quantizer(config) - component.eval_func = self._eval_func - component.q_func = self._train_func - self.enable_inc_quant = True - elif isinstance(config, PruningConfig): - self.orchestrate_opt_pruning = True - component = self.init_pruner(config) - component.eval_func = self._eval_func - component.pruning_func = self._train_func - elif isinstance(config, DistillationConfig): - assert isinstance(teacher_model, torch.nn.Module), \ - "The teacher_model is needed for distiller" - component = self.init_distiller(config, teacher_model) - component.eval_func = self._eval_func - component.train_func = self._train_func - component.create_criterion() - else: # pragma: no cover - assert False, "Orchestrate_optimizations config_list requires at least one" \ - " `QuantizationConfig`, `PruningConfig` or `DistillationConfig` object" - components.append(component) - return components + compression_manager = prepare_compression(model=self.model, confs=config_list) + self.compression_manager = compression_manager + self.compression_manager.callbacks.on_train_begin() + self._train_func(compression_manager.model._model) + self.compression_manager.callbacks.on_train_end() + self.opt_model = self.compression_manager.model + return self.opt_model.model def train( self, - component: Optional[Component] = None, + compression_manager = None, resume_from_checkpoint: Optional[Union[str, bool]] = None, trial: Union["optuna.Trial", Dict[str, Any]] = None, ignore_keys_for_eval: Optional[List[str]] = None, @@ -618,7 +436,7 @@ def train( """The main entry point tor train model. Args: - component (:obj:`Component`, `optional`): Component object handling the training process. + compression_manager (:obj:`CompressionManager`, `optional`): handling the training process. resume_from_checkpoint (:obj:`str` or :obj:`bool`, `optional`): If a :obj:`str`, local path to a saved checkpoint as saved by a previous instance of :class:`~transformers.Trainer`. If a :obj:`bool` and equals `True`, load the last checkpoint in `args.output_dir` as saved @@ -642,7 +460,7 @@ def train( self.is_in_train = True - self.component = component + self.compression_manager = compression_manager # do_train is not a reliable argument, as it might not be set and .train() still called, so # the following is a workaround: @@ -866,17 +684,12 @@ def train( # We just need to begin an iteration to create the randomization of the sampler. for _ in train_dataloader: break - if isinstance(component, Component): - if hasattr(self.component, "teacher_model"): - self.component.teacher_model._model = self._wrap_model( - self.component.teacher_model.model) - component.pre_epoch_begin(self.calib_dataloader if self.calib_dataloader else None) - if component.combination is not None and "Quantization" in component.combination: - model = component.model.model + if self.compression_manager is not None: + if self.teacher_model is not None: + self.teacher_model = self._wrap_model( + self.teacher_model) + # compression_manager.pre_epoch_begin(self.calib_dataloader if self.calib_dataloader else None) for epoch in range(epochs_trained, num_train_epochs): - if self.compression_ctrl is not None: - self.compression_ctrl.scheduler.epoch_step() - print(self.compression_ctrl.statistics().to_str()) if isinstance(train_dataloader, torch.utils.data.dataloader.DataLoader) and \ isinstance(train_dataloader.sampler, torch.utils.data.distributed.DistributedSampler): train_dataloader.sampler.set_epoch(epoch) @@ -892,8 +705,8 @@ def train( steps_in_epoch = (len(epoch_iterator) if train_dataset_is_sized else args.max_steps * args.gradient_accumulation_steps) self.control = self.callback_handler.on_epoch_begin(args, self.state, self.control) - if isinstance(component, Component): - component.on_epoch_begin(epoch) + if self.compression_manager is not None: + self.compression_manager.callbacks.on_epoch_begin(epoch) self.in_training = True for step, inputs in enumerate(epoch_iterator): @@ -913,8 +726,8 @@ def train( if step % args.gradient_accumulation_steps == 0: self.control = self.callback_handler.on_step_begin( args, self.state, self.control) - if isinstance(component, Component): - component.on_batch_begin(step) + if compression_manager is not None: + self.compression_manager.callbacks.on_step_begin(step) training_step = self.training_step_length_adaptive if self.dynamic_config is not None and \ self.dynamic_config.dynamic_training else self.training_step @@ -943,8 +756,8 @@ def train( # last step in epoch but step is always smaller than gradient_accumulation_steps steps_in_epoch <= args.gradient_accumulation_steps and (step + 1) == steps_in_epoch): - if isinstance(component, Component): - component.on_post_grad() + # if isinstance(component, Component): + # component.on_post_grad() # Gradient clipping if args.max_grad_norm is not None and args.max_grad_norm > 0: @@ -962,11 +775,15 @@ def train( args.max_grad_norm, ) - # Optimizer step - if self.compression_ctrl is not None: - self.compression_ctrl.scheduler.step() + # # Optimizer step + # if self.compression_ctrl is not None: + # self.compression_ctrl.scheduler.step() + if self.compression_manager is not None: + self.compression_manager.callbacks.on_before_optimizer_step() optimizer_was_run = True self.optimizer.step() + if self.compression_manager is not None: + self.compression_manager.callbacks.on_after_optimizer_step() if optimizer_was_run: self.lr_scheduler.step() @@ -977,8 +794,9 @@ def train( self.state.curr_loss = tr_loss_step.cpu().detach().item() self.control = self.callback_handler.on_step_end(args, self.state, self.control) - if isinstance(component, Component): - component.on_batch_end() + + if self.compression_manager is not None: + compression_manager.callbacks.on_step_end() self._maybe_log_save_evaluate(tr_loss, model, trial, epoch, ignore_keys_for_eval) else: @@ -990,16 +808,8 @@ def train( self.in_training = False self.control = self.callback_handler.on_epoch_end(args, self.state, self.control) - if isinstance(component, Component): - # When Distillation is involved, model will be evaluated in "on_epoch_end" hook, while in SQuAD - # evaluation, "start_positions" and "end_positions" will be removed from inputs of the fx model, - # this will damage the training afterward, so use the copied model for evaluation, - # and then restore the model. - component.model.model = copy.deepcopy(model) - component.on_epoch_end() - component.model.model = model - if 'Distillation' in component.__repr__(): - model.train() + if self.compression_manager is not None: + self.compression_manager.callbacks.on_epoch_end() self._maybe_log_save_evaluate(tr_loss, model, trial, epoch, ignore_keys_for_eval) # pylint: disable=E1101 @@ -1011,11 +821,6 @@ def train( if self.control.should_training_stop: break - if isinstance(component, Component): - component.post_epoch_end() - if component.combination is not None and "Quantization" in component.combination: - self.model = component.model.model - if args.past_index and hasattr(self, "_past"): # Clean the state at the end of training delattr(self, "_past") @@ -1188,7 +993,6 @@ def training_step( return loss.detach() - def training_step_length_adaptive( self, model: torch.nn.Module, @@ -1385,11 +1189,10 @@ def compute_loss(self, model, inputs, return_outputs=False): # pragma: no cover if self.label_smoother is not None and "labels" in inputs else None teacher_logits = inputs.pop("teacher_logits") if "teacher_logits" in inputs else None - outputs = model(**inputs) - if self.in_training and hasattr(self, "component") and \ - hasattr(self.component, "criterion"): + if self.in_training and hasattr(self, "compression_manager") and \ + hasattr(self.compression_manager, "criterion"): qa_output_merger = lambda outputs: torch.vstack([ torch.vstack([sl, el]) for sl, el in zip(outputs["start_logits"], outputs["end_logits"]) @@ -1416,8 +1219,8 @@ def get_logits(outputs): if "start_positions" in inputs and "end_positions" in inputs: # for SQuAD teacher_logits = torch.vstack(list(teacher_logits)) else: - teacher_outputs = self.component.criterion.teacher_model_forward(inputs) - teacher_logits = get_logits(self.component.criterion.teacher_outputs + teacher_outputs = self.compression_manager.criterion.teacher_model_forward(inputs) + teacher_logits = get_logits(self.compression_manager.criterion.teacher_outputs if teacher_outputs is None else teacher_outputs) logits = get_logits(outputs) @@ -1431,14 +1234,14 @@ def get_logits(outputs): else: raise AssertionError( "Labels of input data not provided, can't compute loss") - if hasattr(self.component, "on_post_forward"): - self.component.on_post_forward(inputs, teacher_output=teacher_logits) - if hasattr(self.component.criterion, "teacher_outputs"): - self.component.criterion.teacher_outputs = \ - get_logits(self.component.criterion.teacher_outputs) - loss = self.component.criterion(logits, labels) - if hasattr(self.component.criterion, 'add_origin_loss') and \ - self.component.criterion.add_origin_loss: + if hasattr(self.compression_manager, "on_post_forward"): + self.compression_manager.on_post_forward(inputs, teacher_output=teacher_logits) + if hasattr(self.compression_manager.criterion, "teacher_outputs"): + self.compression_manager.criterion.teacher_outputs = \ + get_logits(self.compression_manager.criterion.teacher_outputs) + loss = self.compression_manager.criterion(logits, labels) + if hasattr(self.compression_manager.criterion, 'add_origin_loss') and \ + self.compression_manager.criterion.add_origin_loss: loss = loss + outputs['loss'] else: if self.args.past_index >= 0: @@ -1449,7 +1252,8 @@ def get_logits(outputs): else: # We don't use .loss here since the model may return tuples instead of ModelOutput. loss = outputs["loss"] if isinstance(outputs, dict) else outputs[0] - loss = self.component.on_after_compute_loss(inputs, logits, loss, teacher_logits) + if self.compression_manager is not None: + loss = self.compression_manager.on_after_compute_loss(inputs, logits, loss, teacher_logits) if "start_positions" in inputs and "end_positions" in inputs: start_logits, end_logits = qa_output_spliter(logits) outputs = {"start_logits": start_logits, "end_logits": end_logits, "loss": loss} diff --git a/tests/CI/test_config.py b/tests/CI/test_config.py deleted file mode 100644 index b3d32a35d4b..00000000000 --- a/tests/CI/test_config.py +++ /dev/null @@ -1,240 +0,0 @@ -# Copyright (c) 2024 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import numpy -import shutil -import torch -import unittest - -from intel_extension_for_transformers.transformers import ( - DistillationConfig, - metrics, - objectives, - PrunerConfig, - PruningConfig, - QuantizationConfig, - TFOptimization, -) -from intel_extension_for_transformers.transformers.distillation import Criterion as DistillationCriterion -from intel_extension_for_transformers.transformers.distillation import DistillationCriterionMode -from intel_extension_for_transformers.transformers.trainer import NLPTrainer -from intel_extension_for_transformers.transformers.utils.objectives import Objective -from intel_extension_for_transformers.transformers.utils.utility_tf import TFDataloader - -from transformers import ( - AutoModelForPreTraining, - HfArgumentParser, - TFTrainingArguments, - TFAutoModelForSequenceClassification, -) - - -class CustomPruner(): - def __init__(self, start_epoch=None, end_epoch=None, initial_sparsity=None, - target_sparsity_ratio=None, update_frequency=1, prune_type='BasicMagnitude', - method='per_tensor', names=[], parameters=None): - self.start_epoch = start_epoch - self.end_epoch = end_epoch - self.update_frequency = update_frequency - self.target_sparsity_ratio = target_sparsity_ratio - self.initial_sparsity = initial_sparsity - self.update_frequency = update_frequency - - -class TestConfig(unittest.TestCase): - @classmethod - def tearDownClass(self): - shutil.rmtree('./tmp_trainer', ignore_errors=True) - - def test_quantization_config_with_init(self): - metric1 = metrics.Metric( - name="F1", greater_is_better=False, is_relative=False, criterion=0.02, weight_ratio=0.5 - ) - metric2 = metrics.Metric( - name="accuracy", greater_is_better=False, is_relative=False, - criterion=0.02, weight_ratio=0.5 - ) - objective1 = objectives.performance - objective2 = objectives.modelsize - quantization_config = QuantizationConfig( - framework="pytorch", - approach="PostTrainingDynamic", - timeout=600, - max_trials=300, - metrics=[metric1, metric2], - objectives=[objective1, objective2], - ) - self.assertEqual(quantization_config.approach, "post_training_dynamic_quant") - self.assertEqual(quantization_config.metrics[0].criterion, 0.02) - self.assertEqual(quantization_config.objectives[1].name, "modelsize") - self.assertEqual(quantization_config.timeout, 600) - self.assertEqual(quantization_config.max_trials, 300) - - from neural_compressor.utils import constant - quantization_config.op_wise = { - 'bert.encoder.layer.0.output.dense': constant.FP32, - } - quantization_config.resume_path = './saved_results' - quantization_config.random_seed = 1 - quantization_config.strategy = 'basic' - quantization_config.performance_only = True - quantization_config.tensorboard = True - quantization_config.sampling_size = [300] - quantization_config.input_names = ['input_ids', 'tokentype_ids'] - quantization_config.output_names = ['seq1, seq2'] - self.assertTrue(isinstance(quantization_config.op_wise, dict)) - self.assertTrue(isinstance(quantization_config.strategy, str)) - self.assertEqual(quantization_config.random_seed, 1) - self.assertEqual(quantization_config.strategy, 'basic') - self.assertTrue(quantization_config.performance_only) - self.assertTrue(quantization_config.tensorboard) - self.assertTrue(quantization_config.resume_path, './saved_results') - self.assertTrue(quantization_config.sampling_size, [300]) - self.assertTrue(quantization_config.input_names, ['input_ids', 'tokentype_ids']) - self.assertTrue(quantization_config.output_names, ['seq1, seq2']) - - def test_quantization_config(self): - quantization_config = QuantizationConfig() - quantization_config.approach = "PostTrainingStatic" - quantization_config.framework = "pytorch" - metric = metrics.Metric(name="F1", greater_is_better=False, criterion=0.02, is_relative=True) - quantization_config.metrics = metric - objective1 = objectives.Objective(name="performance", greater_is_better=True) - objective2 = objectives.Objective(name="modelsize", greater_is_better=False) - quantization_config.objectives = [objective1, objective2] - - quantization_config.timeout = 600 - quantization_config.max_trials = 300 - quantization_config.output_dir = "./savedresult" - - self.assertEqual(quantization_config.approach, "post_training_static_quant") - self.assertEqual(quantization_config.metrics.criterion, 0.02) - self.assertEqual(quantization_config.objectives[1].name, "modelsize") - self.assertEqual(quantization_config.timeout, 600) - self.assertEqual(quantization_config.max_trials, 300) - self.assertEqual(quantization_config.output_dir, "./savedresult") - - def test_pruning_config(self): - pruning_config = PruningConfig() - pruner_config = PrunerConfig() - metric = metrics.Metric(name="F1") - pruning_config.pruner_config = pruner_config - pruning_config.framework = "pytorch" - pruning_config.target_sparsity_ratio = 0.1 - pruning_config.epoch_range = [0, 4] - pruning_config.metrics = metric - - self.assertEqual(pruning_config.pruner_config, [pruner_config]) - self.assertEqual(pruning_config.framework, "pytorch") - self.assertEqual(pruning_config.initial_sparsity_ratio, 0) - self.assertEqual(pruning_config.target_sparsity_ratio, 0.1) - self.assertEqual(pruning_config.epoch_range, [0, 4]) - self.assertEqual(pruning_config.metrics, metric) - self.assertEqual(pruning_config.epochs, 1) - - pruning_config.pruner_config = [pruner_config] - self.assertEqual(pruning_config.pruner_config, [pruner_config]) - - def test_distillation_config(self): - metric = metrics.Metric(name="eval_F1") - criterion = DistillationCriterion( - name="KnowledgeLoss", - temperature=1.0, - loss_types=["CE", "KL"], - loss_weight_ratio=[0, 1] - ) - distillation_config = DistillationConfig( - framework="pytorch", - criterion=criterion, - metrics=metric - ) - - self.assertEqual(distillation_config.framework, "pytorch") - self.assertEqual(list(distillation_config.criterion.keys())[0], - DistillationCriterionMode[criterion.name.upper()].value) - self.assertEqual(distillation_config.metrics, metric) - - criterion = DistillationCriterion( - name="InterMediateLayersloss", - layer_mappings=[['classifier', 'classifier']], - loss_types=['MSE'], - loss_weight_ratio=[1.0], - add_origin_loss=False - ) - distillation_config = DistillationConfig( - framework="pytorch", - criterion=criterion, - metrics=metric - ) - - - def test_trainer_config(self): - model = AutoModelForPreTraining.from_pretrained( - 'google/bert_uncased_L-2_H-128_A-2' - ) - trainer = NLPTrainer(model) - trainer.resuming_checkpoint = 'saved_results' - trainer.eval_func = None - trainer.train_func = None - trainer.calib_dataloader = None - trainer.provider = 'inc' - self.assertEqual(trainer.resuming_checkpoint, 'saved_results') - self.assertEqual(trainer.eval_func, None) - self.assertEqual(trainer.train_func, None) - self.assertEqual(trainer.calib_dataloader, None) - self.assertEqual(trainer.provider, 'inc') - - def test_TFOptimization_config(self): - parser = HfArgumentParser(TFTrainingArguments) - args = parser.parse_args_into_dataclasses( - args=["--output_dir", "./quantized_model", - "--per_device_eval_batch_size", "2"] - ) - model = TFAutoModelForSequenceClassification.from_pretrained( - 'bhadresh-savani/distilbert-base-uncased-sentiment-sst2' - ) - tf_optimizer = TFOptimization(model, args=args[0]) - tf_optimizer.input = 1 - tf_optimizer.eval_func = None - tf_optimizer.train_func = None - self.assertEqual(tf_optimizer.input, 1) - self.assertEqual(tf_optimizer.eval_func, None) - self.assertEqual(tf_optimizer.train_func, None) - - def test_tf_dataloader(self): - def dummy_dataset(type='list'): - if type == 'list': - yield [torch.tensor(1),torch.tensor(2)], \ - [torch.tensor(1),torch.tensor(2)] - else: - yield torch.tensor(1), torch.tensor(1) - - dataloader = TFDataloader(dummy_dataset()) - for input, label in dataloader: - self.assertTrue(type(input) == list) - self.assertTrue(type(label) == list) - dataloader = TFDataloader(dummy_dataset(type='int')) - for input, label in dataloader: - self.assertTrue(type(input) == numpy.ndarray) - self.assertTrue(type(label) == numpy.ndarray) - - def test_Objective_config(self): - perform= Objective.performance() - model_size = Objective.modelsize() - self.assertEqual(perform.name, 'performance') - self.assertEqual(model_size.name, 'modelsize') - - -if __name__ == "__main__": - unittest.main() diff --git a/tests/CI/test_quantization.py b/tests/CI/test_quantization.py index e3c2799e94f..7f5911855cb 100644 --- a/tests/CI/test_quantization.py +++ b/tests/CI/test_quantization.py @@ -23,11 +23,13 @@ import unittest from intel_extension_for_transformers.transformers import ( metrics, - objectives, OptimizedModel, - QuantizationConfig, - QuantizationMode, - NoTrainerOptimizer, +) +from neural_compressor.config import ( + PostTrainingQuantConfig, + QuantizationAwareTrainingConfig, + TuningCriterion, + AccuracyCriterion ) from intel_extension_for_transformers.transformers.trainer import NLPTrainer from intel_extension_for_transformers.transformers.trainer import NLPSeq2SeqTrainer @@ -93,7 +95,6 @@ def setUpClass(self): train_dataset=self.dummy_dataset, eval_dataset=self.dummy_dataset, ) - self.optimizer = NoTrainerOptimizer(self.model) @classmethod def tearDownClass(self): @@ -107,116 +108,47 @@ def tearDownClass(self): def test_fx_model_quant(self): fp32_output = self.trainer.predict(self.dummy_dataset).predictions - for mode in QuantizationMode: - print("Quantization approach:", mode.value) - self.trainer = NLPTrainer( - model=self.model, - train_dataset=self.dummy_dataset, - eval_dataset=self.dummy_dataset, - ) - - # Check fp32 jit and onnx model, only once. - if mode == QuantizationMode.POSTTRAININGSTATIC: - jit_model = self.trainer.export_to_jit() - self.trainer.export_to_onnx('fp32-model.onnx') - self.assertTrue(check_onnx('fp32-model.onnx', self.trainer.get_eval_dataloader())) - - self.trainer.benchmark(num_of_instance=1) - tune_metric = metrics.Metric( - name="eval_loss", greater_is_better=False, is_relative=False, criterion=0.5 - ) - quantization_config = QuantizationConfig( - approach=mode.name, - metrics=[tune_metric], - objectives=[objectives.performance] - ) - quantized_model = self.trainer.quantize(quant_config=quantization_config, provider="inc") - self.trainer.benchmark(self.trainer.args.output_dir, num_of_instance=1) - # By default, model will be saved into tmp_trainer dir. - self.trainer.save_model('./quantized_model') - - # Check int8 onnx model - if mode == QuantizationMode.POSTTRAININGSTATIC: - # test different configure to improve UT coverage - self.trainer.export_to_onnx( - save_path=None, - quant_format='Qlinear', - dtype='S8S8', - opset_version=13, - ) - self.assertTrue(check_onnx('./tmp_trainer/int8-model.onnx', self.trainer.get_eval_dataloader())) - else: - self.trainer.export_to_onnx('int8-model.onnx') - self.assertTrue(check_onnx('int8-model.onnx', self.trainer.get_eval_dataloader())) - - if mode == QuantizationMode.QUANTIZATIONAWARETRAINING: - model = onnx.load('int8-model.onnx') - tensor_list = {tensor.name:tensor for tensor in model.graph.initializer} - torch_data = quantized_model.classifier.state_dict()\ - ['module._packed_params._packed_params'][0].\ - dequantize().detach().cpu().numpy().T - from onnx.numpy_helper import to_array - onnx_data = to_array(tensor_list['classifier.weight_quantized']) - onnx_scale = to_array(tensor_list['classifier.weight_scale']) - self.assertTrue(np.allclose(torch_data, onnx_data * onnx_scale, atol=0.001)) - # Check quantized model - output_1 = self.trainer.predict(self.dummy_dataset).predictions - loaded_model = OptimizedModel.from_pretrained( - './quantized_model', - ) - self.trainer.model = loaded_model - output_2 = self.trainer.predict(self.dummy_dataset).predictions - self.assertTrue((fp32_output != output_1).any()) - - # check loaded model - self.assertTrue((output_1 == output_2).all()) - - def test_fx_model_with_smooth_quant(self): - def eval_func(model): - return 1 - - def train_func(model): - return model - - trainer = NLPTrainer( + self.trainer = NLPTrainer( model=self.model, train_dataset=self.dummy_dataset, eval_dataset=self.dummy_dataset, ) + jit_model = self.trainer.export_to_jit() + self.trainer.export_to_onnx('fp32-model.onnx') + self.assertTrue(check_onnx('fp32-model.onnx', self.trainer.get_eval_dataloader())) + + self.trainer.benchmark(num_of_instance=1) tune_metric = metrics.Metric( name="eval_loss", greater_is_better=False, is_relative=False, criterion=0.5 ) - quantization_config = QuantizationConfig( - approach="PostTrainingStatic", - metrics=[tune_metric], - objectives=[objectives.performance], - recipes={"smooth_quant": True, - "smooth_quant_args": {"alpha": 0.6}, - } + self.trainer.metrics = tune_metric + quantization_config = PostTrainingQuantConfig( + approach="static", ) - recipes = quantization_config.recipes - self.assertTrue(recipes["smooth_quant"]) - quantized_model = trainer.quantize(quant_config=quantization_config) - self.assertTrue("quantize" in str(type(quantized_model.classifier.module))) - quantization_config = QuantizationConfig( - approach="PostTrainingStatic", - metrics=[tune_metric], - objectives=[objectives.performance], - recipes={} + quantized_model = self.trainer.quantize(quant_config=quantization_config, provider="inc") + self.trainer.benchmark(self.trainer.args.output_dir, num_of_instance=1) + # By default, model will be saved into tmp_trainer dir. + self.trainer.save_model('./quantized_model') + # test different configure to improve UT coverage + self.trainer.export_to_onnx( + save_path=None, + quant_format='Qlinear', + dtype='S8S8', + opset_version=13, ) - quantized_model = trainer.quantize(quant_config=quantization_config, - train_func=train_func, - eval_func=eval_func) - self.assertTrue("quantize" in str(type(quantized_model.classifier.module))) - - with self.assertRaises(ValueError): - quantization_config = QuantizationConfig( - approach="PostTrainingStatic", - metrics=[tune_metric], - objectives=[objectives.performance], - recipes=[] - ) + self.assertTrue(check_onnx('./tmp_trainer/int8-model.onnx', self.trainer.get_eval_dataloader())) + # Check quantized model + output_1 = self.trainer.predict(self.dummy_dataset).predictions + loaded_model = OptimizedModel.from_pretrained( + './quantized_model', + ) + self.trainer.model = loaded_model + output_2 = self.trainer.predict(self.dummy_dataset).predictions + self.assertTrue((fp32_output != output_1).any()) + + # check loaded model + self.assertTrue((output_1 == output_2).all()) def test_functional_quant(self): def eval_func(model): @@ -226,39 +158,18 @@ def train_func(model): return model self.trainer = NLPTrainer(self.model, train_dataset=self.dummy_dataset) - quantization_config = QuantizationConfig( - approach='PostTrainingStatic', - objectives=[objectives.performance] - ) - self.trainer.quantize(quant_config=quantization_config, - provider="inc", - train_func = train_func, - eval_func = eval_func,) - - def test_no_trainer_quant(self): - def eval_func(model): - return 1 - - def train_func(model): - return model - tune_metric = metrics.Metric( name="eval_loss", greater_is_better=False, is_relative=False, criterion=0.5 ) - quantization_config = QuantizationConfig( - approach='PostTrainingStatic', - metrics=[tune_metric], - objectives=[objectives.performance] + self.trainer.metrics = tune_metric + quantization_config = PostTrainingQuantConfig( + approach='static', ) - self.optimizer.eval_func = eval_func - self.optimizer.train_func = train_func - self.optimizer.provider = "INC" - self.optimizer.calib_dataloader = self.trainer.get_eval_dataloader() - - opt_model = self.optimizer.quantize(quant_config=quantization_config, + self.trainer.quantize(quant_config=quantization_config, provider="inc", train_func = train_func, - eval_func = eval_func) + eval_func = eval_func,) + def test_online_models(self): model = OptimizedModel.from_pretrained( diff --git a/tests/CI/test_quantization_qa_ipex.py b/tests/CI/test_quantization_qa_ipex.py index 67e75a45bd5..0e9dd9a78ad 100644 --- a/tests/CI/test_quantization_qa_ipex.py +++ b/tests/CI/test_quantization_qa_ipex.py @@ -43,7 +43,7 @@ def test_run_qa_ipex(self): --model_name_or_path bert-large-uncased-whole-word-masking-finetuned-squad --dataset_name squad --tune - --quantization_approach PostTrainingStatic + --quantization_approach static --do_train --do_eval --max_eval_samples 100 @@ -62,7 +62,7 @@ def test_run_qa_ipex(self): run_qa.py --model_name_or_path bert-large-uncased-whole-word-masking-finetuned-squad --dataset_name squad - --quantization_approach PostTrainingStatic + --quantization_approach static --do_train --do_eval --max_eval_samples 100 diff --git a/tests/Nightly/test_distillation.py b/tests/Nightly/test_distillation.py index 118c7bb4444..8f818f38e51 100644 --- a/tests/Nightly/test_distillation.py +++ b/tests/Nightly/test_distillation.py @@ -21,14 +21,15 @@ import unittest from datasets import load_dataset, load_metric from intel_extension_for_transformers.transformers import ( - DistillationConfig, - DistillationCriterionMode, metrics, OptimizedModel, - NoTrainerOptimizer +) + +from neural_compressor.config import ( + DistillationConfig, + KnowledgeDistillationLossConfig, ) from intel_extension_for_transformers.transformers.trainer import NLPTrainer -from intel_extension_for_transformers.transformers.distillation import Criterion from transformers import ( AutoModelForSequenceClassification, AutoTokenizer, @@ -48,7 +49,6 @@ def setUpClass(self): self.teacher_model = AutoModelForSequenceClassification.from_pretrained( 'distilbert-base-uncased-finetuned-sst-2-english' ) - self.optimizer = NoTrainerOptimizer(self.model) raw_datasets = load_dataset("glue", "sst2")["validation"] tokenizer = AutoTokenizer.from_pretrained("distilbert-base-uncased") def preprocess_function(examples): @@ -76,38 +76,32 @@ def compute_metrics(p): preds = np.argmax(preds, axis=1) return metric.compute(predictions=preds, references=p.label_ids) origin_weight = copy.deepcopy(self.model.classifier.weight) - for mode in DistillationCriterionMode: - print("Distillation approach:", mode.value) - self.trainer = NLPTrainer( - model=copy.deepcopy(self.model), - train_dataset=self.dataset, - eval_dataset=self.dataset, - compute_metrics=compute_metrics, - ) - metric_ = metrics.Metric(name="eval_accuracy") - criterion = Criterion( - name='IntermediateLayersLoss', - layer_mappings=[['classifier', 'classifier']], - loss_types=['MSE'], - loss_weight_ratio=[1.0], - add_origin_loss=False - ) if mode.value == "IntermediateLayersKnowledgeDistillationLoss" else None - distillation_conf = DistillationConfig(metrics=metric_, criterion=criterion) - distilled_model = self.trainer.distill( - distillation_config=distillation_conf, teacher_model=self.teacher_model - ) - # By default, model will be saved in tmp_trainer dir. - self.trainer.save_model('./distilled_model') - loaded_model = OptimizedModel.from_pretrained( - './distilled_model', - ) - distilled_weight = copy.deepcopy(distilled_model.classifier.weight) - loaded_weight = copy.deepcopy(loaded_model.classifier.weight) - # check distilled model - self.assertTrue((distilled_weight != origin_weight).any()) - # check loaded model - self.assertTrue((distilled_weight == loaded_weight).all()) - mlflow.end_run() + + self.trainer = NLPTrainer( + model=copy.deepcopy(self.model), + train_dataset=self.dataset, + eval_dataset=self.dataset, + compute_metrics=compute_metrics, + ) + metric_ = metrics.Metric(name="eval_accuracy") + self.trainer.metrics = metric_ + distillation_criterion_conf = KnowledgeDistillationLossConfig(loss_types=["CE", "KL"]) + distillation_conf = DistillationConfig(self.teacher_model, distillation_criterion_conf) + distilled_model = self.trainer.distill( + distillation_config=distillation_conf + ) + # By default, model will be saved in tmp_trainer dir. + self.trainer.save_model('./distilled_model') + loaded_model = OptimizedModel.from_pretrained( + './distilled_model', + ) + distilled_weight = copy.deepcopy(distilled_model.classifier.weight) + loaded_weight = copy.deepcopy(loaded_model.classifier.weight) + # check distilled model + self.assertTrue((distilled_weight != origin_weight).any()) + # check loaded model + self.assertTrue((distilled_weight == loaded_weight).all()) + mlflow.end_run() def test_functional_distil(self): def eval_func(model): @@ -118,27 +112,12 @@ def train_func(model): self.trainer = NLPTrainer(self.model) - distillation_conf = DistillationConfig() + distillation_conf = DistillationConfig(teacher_model=self.teacher_model) self.trainer.distill(distillation_conf, - teacher_model=self.teacher_model, provider="inc", train_func = train_func, eval_func = eval_func,) - def test_no_trainer_distill(self): - def eval_func(model): - return 1 - def train_func(model): - return model - - distillation_conf = DistillationConfig() - self.optimizer.eval_func = eval_func - self.optimizer.train_func = train_func - self.optimizer.distill(distillation_conf, - teacher_model=self.teacher_model, - provider="inc", - train_func = train_func, - eval_func = eval_func,) if __name__ == "__main__": unittest.main() diff --git a/tests/Nightly/test_orchestrate_optimization.py b/tests/Nightly/test_orchestrate_optimization.py index 422b10700a9..d65ece8099c 100644 --- a/tests/Nightly/test_orchestrate_optimization.py +++ b/tests/Nightly/test_orchestrate_optimization.py @@ -20,18 +20,14 @@ import torch.utils.data as data import unittest from datasets import load_dataset, load_metric -from intel_extension_for_transformers.transformers import ( - PrunerConfig, - PruningConfig, +from neural_compressor.config import ( + WeightPruningConfig, DistillationConfig, - QuantizationConfig, - DistillationCriterionMode, - metrics, - objectives, - OptimizedModel, + KnowledgeDistillationLossConfig, + QuantizationAwareTrainingConfig, ) +from intel_extension_for_transformers.transformers import metrics from intel_extension_for_transformers.transformers.trainer import NLPTrainer -from intel_extension_for_transformers.transformers.distillation import Criterion from transformers import ( AutoModelForSequenceClassification, @@ -77,36 +73,26 @@ def compute_metrics(p): preds = p.predictions preds = np.argmax(preds, axis=1) return metric.compute(predictions=preds, references=p.label_ids) - origin_weight = copy.deepcopy(self.model.classifier.weight) - for mode in DistillationCriterionMode: - print("Distillation approach:", mode.value) - self.trainer = NLPTrainer( - model=copy.deepcopy(self.model), - train_dataset=self.dataset, - eval_dataset=self.dataset, - compute_metrics=compute_metrics, - ) - self.trainer.calib_dataloader = self.trainer.get_eval_dataloader() + + self.trainer = NLPTrainer( + model=copy.deepcopy(self.model), + train_dataset=self.dataset, + eval_dataset=self.dataset, + compute_metrics=compute_metrics, + ) + self.trainer.calib_dataloader = self.trainer.get_eval_dataloader() tune_metric = metrics.Metric( name="eval_accuracy", is_relative=True, criterion=0.5 ) - pruner_config = PrunerConfig(prune_type='PatternLock', target_sparsity_ratio=0.9) - pruning_conf = PruningConfig(framework="pytorch_fx",pruner_config=[pruner_config], metrics=tune_metric) - distillation_conf = DistillationConfig(framework="pytorch_fx", metrics=tune_metric) - - objective = objectives.performance - quantization_conf = QuantizationConfig( - approach="QuantizationAwareTraining", - max_trials=600, - metrics=[tune_metric], - objectives=[objective] - ) - - from neural_compressor.adaptor.torch_utils.symbolic_trace import symbolic_trace - self.model = symbolic_trace(self.model, is_qat=True) - self.trainer.model = self.model + self.trainer.metrics = tune_metric + pruning_conf = WeightPruningConfig([{"start_step": 0, "end_step": 2}], + target_sparsity=0.64, + pruning_scope="local") + distillation_criterion = KnowledgeDistillationLossConfig(loss_types=["CE", "KL"]) + distillation_conf = DistillationConfig(teacher_model=self.teacher_model, criterion=distillation_criterion) + quantization_conf = QuantizationAwareTrainingConfig() conf_list = [pruning_conf, distillation_conf, quantization_conf] - opt_model = self.trainer.orchestrate_optimizations(config_list=conf_list, teacher_model=self.teacher_model) + opt_model = self.trainer.orchestrate_optimizations(config_list=conf_list) self.assertTrue("quantize" in str(type(opt_model.classifier.module))) diff --git a/tests/Nightly/test_pruning.py b/tests/Nightly/test_pruning.py index b7284ddfe6b..01c7045bf2d 100644 --- a/tests/Nightly/test_pruning.py +++ b/tests/Nightly/test_pruning.py @@ -20,11 +20,8 @@ from intel_extension_for_transformers.transformers import ( metrics, OptimizedModel, - PrunerConfig, - PruningConfig, - PruningMode, - NoTrainerOptimizer ) +from neural_compressor.config import WeightPruningConfig from intel_extension_for_transformers.transformers.trainer import NLPTrainer from transformers import ( AutoModelForSequenceClassification, @@ -63,7 +60,6 @@ def setUpClass(self): train_dataset=self.dummy_dataset, eval_dataset=self.dummy_dataset, ) - self.optimizer = NoTrainerOptimizer(self.model) @classmethod def tearDownClass(self): @@ -72,31 +68,29 @@ def tearDownClass(self): def test_fx_model_prune(self): origin_weight = copy.deepcopy(self.model.classifier.weight) - for mode in PruningMode: - # not supported yet - if mode.name != "BasicMagnitude".upper(): - continue - self.trainer = NLPTrainer( - model=self.model, - train_dataset=self.dummy_dataset, - eval_dataset=self.dummy_dataset, - ) - metric = metrics.Metric(name="eval_loss") - pruner_config = PrunerConfig(prune_type=mode.name, target_sparsity_ratio=0.9) - pruning_conf = PruningConfig(pruner_config=pruner_config, metrics=metric) - agent = self.trainer.init_pruner(pruning_config=pruning_conf) - pruned_model = self.trainer.prune() - # By default, model will be saved in tmp_trainer dir. - self.trainer.save_model('./pruned_model') - loaded_model = OptimizedModel.from_pretrained( - './pruned_model', - ) - pruned_weight = copy.deepcopy(pruned_model.classifier.weight) - loaded_weight = copy.deepcopy(loaded_model.classifier.weight) - # check pruned model - self.assertTrue((pruned_weight != origin_weight).any()) - # check loaded model - self.assertTrue((pruned_weight == loaded_weight).all()) + + self.trainer = NLPTrainer( + model=self.model, + train_dataset=self.dummy_dataset, + eval_dataset=self.dummy_dataset, + ) + metric = metrics.Metric(name="eval_loss") + self.trainer.metrics = metric + pruning_conf = WeightPruningConfig([{"start_step": 0, "end_step": 2}], + target_sparsity=0.64, + pruning_scope="local") + pruned_model = self.trainer.prune(pruning_config=pruning_conf) + # By default, model will be saved in tmp_trainer dir. + self.trainer.save_model('./pruned_model') + loaded_model = OptimizedModel.from_pretrained( + './pruned_model', + ) + pruned_weight = copy.deepcopy(pruned_model.classifier.weight) + loaded_weight = copy.deepcopy(loaded_model.classifier.weight) + # check pruned model + self.assertTrue((pruned_weight != origin_weight).any()) + # check loaded model + self.assertTrue((pruned_weight == loaded_weight).all()) def test_functional_prune(self): def eval_func(model): @@ -106,27 +100,14 @@ def train_func(model): return model self.trainer = NLPTrainer(self.model) - pruner_conf = PrunerConfig(prune_type='BasicMagnitude', target_sparsity_ratio=0.9) - pruning_conf = PruningConfig(pruner_config=pruner_conf) + pruning_conf = WeightPruningConfig([{"start_step": 0, "end_step": 2}], + target_sparsity=0.64, + pruning_scope="local") self.trainer.prune(pruning_conf, provider="inc", train_func = train_func, eval_func = eval_func,) - def test_no_trainer_prune(self): - def eval_func(model): - return 1 - - def train_func(model): - return model - pruner_conf = PrunerConfig(prune_type='BasicMagnitude', target_sparsity_ratio=0.9) - pruning_conf = PruningConfig(pruner_config=pruner_conf) - self.optimizer.eval_func = eval_func - self.optimizer.train_func = train_func - self.optimizer.prune(pruning_conf, - provider="inc", - train_func = train_func, - eval_func = eval_func,) if __name__ == "__main__": unittest.main() diff --git a/tests/Nightly/test_tf_distillation.py b/tests/Nightly/test_tf_distillation.py deleted file mode 100644 index d5521845439..00000000000 --- a/tests/Nightly/test_tf_distillation.py +++ /dev/null @@ -1,134 +0,0 @@ -# Copyright (c) 2024 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import shutil -import numpy as np -import unittest -import tensorflow as tf -from datasets import load_dataset, load_metric -from transformers import (TFAutoModelForSequenceClassification, AutoTokenizer, - HfArgumentParser, TFTrainingArguments, set_seed, - DefaultDataCollator) -from intel_extension_for_transformers.transformers import (DistillationConfig, metrics) -from intel_extension_for_transformers.transformers.distillation import Criterion -from intel_extension_for_transformers.transformers.optimizer_tf import TFOptimization - - -class TestDistillation(unittest.TestCase): - @classmethod - def setUpClass(self): - set_seed(42) - self.model = TFAutoModelForSequenceClassification.from_pretrained( - 'hf-internal-testing/tiny-random-distilbert') - self.teacher_model = TFAutoModelForSequenceClassification.from_pretrained( - 'hf-internal-testing/tiny-random-DistilBertForSequenceClassification') - - raw_datasets = load_dataset("glue", "sst2")["validation"] - self.tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-DistilBertForSequenceClassification") - non_label_column_names = [ - name for name in raw_datasets.column_names if name != "label" - ] - - def preprocess_function(examples): - # Tokenize the texts - args = ((examples['sentence'], )) - result = self.tokenizer(*args, - padding=True, - max_length=64, - truncation=True) - return result - - raw_datasets = raw_datasets.map(preprocess_function, - batched=True, - load_from_cache_file=False) - data_collator = DefaultDataCollator(return_tensors="tf") - dataset = raw_datasets.select(range(10)) - self.dummy_dataset = dataset.to_tf_dataset( - columns=[ - col for col in dataset.column_names - if col not in set(non_label_column_names + ["label"]) - ], - shuffle=False, - batch_size=2, - collate_fn=data_collator, - drop_remainder=False, - # `label_cols` is needed for user-defined losses, such as in this example - # datasets v2.3.x need "labels", not "label" - label_cols=["labels"] - if "label" in dataset.column_names else None, - ) - parser = HfArgumentParser(TFTrainingArguments) - self.args = parser.parse_args_into_dataclasses(args=[ - "--output_dir", "./distilled_model", - "--per_device_eval_batch_size", "2" - ])[0] - optimizer = tf.keras.optimizers.Adam( - learning_rate=self.args.learning_rate, - beta_1=self.args.adam_beta1, - beta_2=self.args.adam_beta2, - epsilon=self.args.adam_epsilon, - clipnorm=self.args.max_grad_norm, - ) - loss_fn = tf.keras.losses.SparseCategoricalCrossentropy( - from_logits=True, reduction=tf.keras.losses.Reduction.SUM) - metrics = ["accuracy"] - self.model.compile(optimizer=optimizer, loss=loss_fn, metrics=metrics) - - @classmethod - def tearDownClass(self): - shutil.rmtree('./tmp', ignore_errors=True) - shutil.rmtree('./distilled_model', ignore_errors=True) - - def test_tf_model_distil(self): - metric = load_metric("glue", "sst2") - def compute_metrics(preds, label_ids): - preds = preds["logits"] - preds = np.argmax(preds, axis=1) - result = metric.compute(predictions=preds, references=label_ids) - if len(result) > 1: - result["combined_score"] = np.mean(list(result.values())).item() - return result - - self.optimizer = TFOptimization(model=self.model, - args=self.args, - train_dataset=self.dummy_dataset, - compute_metrics=compute_metrics) - metric_ = metrics.Metric(name="eval_accuracy") - # 'CrossEntropyLoss', 'SparseCategoricalCrossentropy', 'KnowledgeDistillationLoss' - criterion = Criterion(name='KnowledgeLoss', - layer_mappings=[['classifier', 'classifier']], - loss_types=['CE', 'CE'], - loss_weight_ratio=[0.5, 0.5], - add_origin_loss=False) - distillation_conf = DistillationConfig(metrics=metric_, - criterion=criterion) - def eval_func(model): - return 1 - distilled_model = self.optimizer.distill( - distillation_config=distillation_conf, - teacher_model=self.teacher_model, - eval_func=eval_func, - train_func=self.optimizer.build_train_func - ) - distilled_model2 = self.optimizer.distill( - distillation_config=distillation_conf, - teacher_model=self.teacher_model, - eval_func=None, - train_func=None - ) - self.assertEqual(distilled_model.signatures['serving_default'].output_shapes['Identity'], distilled_model2.signatures['serving_default'].output_shapes['Identity']) - - -if __name__ == "__main__": - unittest.main() diff --git a/tests/Nightly/test_tf_pruning.py b/tests/Nightly/test_tf_pruning.py deleted file mode 100644 index 5fa4806957a..00000000000 --- a/tests/Nightly/test_tf_pruning.py +++ /dev/null @@ -1,147 +0,0 @@ -# Copyright (c) 2024 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from intel_extension_for_transformers.transformers.utils.utility_tf import get_filepath -import numpy as np -import os -import shutil -import tensorflow as tf -import unittest -from datasets import load_dataset, load_metric -from intel_extension_for_transformers.transformers import ( - metrics, - PrunerConfig, - PruningConfig, - TFOptimization -) -from transformers import ( - AutoTokenizer, - DefaultDataCollator, - HfArgumentParser, - TFAutoModelForSequenceClassification, - TFTrainingArguments, -) - -os.environ["WANDB_DISABLED"] = "true" - - -class TestTFPruning(unittest.TestCase): - @classmethod - def setUpClass(self): - self.model = TFAutoModelForSequenceClassification.from_pretrained( - 'hf-internal-testing/tiny-random-DistilBertForSequenceClassification' - ) - raw_datasets = load_dataset("glue", "sst2")["validation"] - tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-DistilBertForSequenceClassification") - non_label_column_names = [name for name in raw_datasets.column_names if name != "label"] - def preprocess_function(examples): - # Tokenize the texts - args = ( - (examples["sentence"],) - ) - result = tokenizer(*args, padding=True, max_length=64, truncation=True) - - return result - raw_datasets = raw_datasets.map(preprocess_function, batched=True, load_from_cache_file=False) - data_collator = DefaultDataCollator(return_tensors="tf") - dataset = raw_datasets.select(range(10)) - self.dummy_dataset = dataset.to_tf_dataset( - columns=[col for col in dataset.column_names if col not in - set(non_label_column_names + ["label"])], - shuffle=False, - batch_size=2, - collate_fn=data_collator, - drop_remainder=False, - # `label_cols` is needed for user-defined losses, such as in this example - # datasets v2.3.x need "labels", not "label" - label_cols=["labels"] if "label" in dataset.column_names else None, - ) - parser = HfArgumentParser(TFTrainingArguments) - self.args = parser.parse_args_into_dataclasses(args=["--output_dir", "./quantized_model", - "--per_device_eval_batch_size", "2"])[0] - optimizer = tf.keras.optimizers.Adam( - learning_rate=self.args.learning_rate, - beta_1=self.args.adam_beta1, - beta_2=self.args.adam_beta2, - epsilon=self.args.adam_epsilon, - clipnorm=self.args.max_grad_norm, - ) - loss_fn = tf.keras.losses.SparseCategoricalCrossentropy( - from_logits=True, reduction=tf.keras.losses.Reduction.SUM - ) - metrics = ["accuracy"] - self.model.compile(optimizer=optimizer, loss=loss_fn, metrics=metrics) - - @classmethod - def tearDownClass(self): - shutil.rmtree('./tmp', ignore_errors=True) - shutil.rmtree('./quantized_model', ignore_errors=True) - - def test_tf_model_quant(self): - # check whether it is possible to set distributed environment - # only for coverage currently - from intel_extension_for_transformers.transformers.utils.utility_tf import distributed_init - distributed_init(["localhost:12345","localhost:23456"], "worker", 0) - self.assertTrue(os.environ['TF_CONFIG'] != None) - del os.environ['TF_CONFIG'] - # check whether filepath can be set correctly if using distributed environment - # only for coverage currently - from intel_extension_for_transformers.transformers.utils.utility_tf import get_filepath - self.assertTrue(type(get_filepath("dummy", "worker", 0)) == str) - self.assertTrue(type(get_filepath("dummy", "worker", 1)) == str) - self.assertTrue(get_filepath("dummy", "worker", 0) != get_filepath("dummy", "worker", 1)) - - metric = load_metric("glue", "sst2") - def compute_metrics(preds, label_ids): - preds = preds["logits"] - preds = np.argmax(preds, axis=1) - result = metric.compute(predictions=preds, references=label_ids) - if len(result) > 1: - result["combined_score"] = np.mean(list(result.values())).item() - return result - self.optimizer = TFOptimization( - model=self.model, - args=self.args, - train_dataset=self.dummy_dataset, - eval_dataset=self.dummy_dataset, - compute_metrics=compute_metrics, - ) - tune_metric = metrics.Metric( - name="accuracy", greater_is_better=True, is_relative=True, criterion=0.01, - ) - prune_type = 'BasicMagnitude' - target_sparsity_ratio = 0.1 - pruner_config = PrunerConfig(prune_type=prune_type, target_sparsity_ratio=target_sparsity_ratio) - pruning_conf = PruningConfig( - epochs=int(1), pruner_config=pruner_config, metrics=tune_metric - ) - p_model = self.optimizer.prune(pruning_config=pruning_conf) - loaded_model = tf.saved_model.load(self.args.output_dir) - p_model = self.optimizer.prune(pruning_config=pruning_conf, - train_dataset=self.dummy_dataset, - eval_dataset=self.dummy_dataset,) - - def eval_func(model): - return 1 - - def train_func(model): - return model - - self.optimizer.prune(pruning_config=pruning_conf, - train_func=train_func, - eval_func=eval_func) - - -if __name__ == "__main__": - unittest.main() diff --git a/tests/Nightly/test_tf_quantization.py b/tests/Nightly/test_tf_quantization.py deleted file mode 100644 index 3162950c68a..00000000000 --- a/tests/Nightly/test_tf_quantization.py +++ /dev/null @@ -1,132 +0,0 @@ -# Copyright (c) 2024 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import numpy as np -import os -import shutil -import tensorflow as tf -import unittest -from datasets import load_dataset, load_metric -from intel_extension_for_transformers.transformers import ( - metrics, - objectives, - QuantizationConfig, - TFOptimization -) -# from intel_extension_for_transformers.transformers import metrics, objectives -from transformers import ( - AutoTokenizer, - DefaultDataCollator, - HfArgumentParser, - TFAutoModelForSequenceClassification, - TFTrainingArguments, -) - -os.environ["WANDB_DISABLED"] = "true" - - -class TestTFQuantization(unittest.TestCase): - @classmethod - def setUpClass(self): - self.model = TFAutoModelForSequenceClassification.from_pretrained( - 'hf-internal-testing/tiny-random-DistilBertForSequenceClassification' - ) - raw_datasets = load_dataset("glue", "sst2")["validation"] - tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-DistilBertForSequenceClassification") - non_label_column_names = [name for name in raw_datasets.column_names if name != "label"] - def preprocess_function(examples): - # Tokenize the texts - args = ( - (examples["sentence"],) - ) - result = tokenizer(*args, padding=True, max_length=64, truncation=True) - - return result - raw_datasets = raw_datasets.map(preprocess_function, batched=True, load_from_cache_file=False) - data_collator = DefaultDataCollator(return_tensors="tf") - dataset = raw_datasets.select(range(10)) - self.dummy_dataset = dataset.to_tf_dataset( - columns=[col for col in dataset.column_names if col not in - set(non_label_column_names + ["label"])], - shuffle=False, - batch_size=2, - collate_fn=data_collator, - drop_remainder=False, - # `label_cols` is needed for user-defined losses, such as in this example - # datasets v2.3.x need "labels", not "label" - label_cols=["labels"] if "label" in dataset.column_names else None, - ) - - - @classmethod - def tearDownClass(self): - shutil.rmtree('./tmp', ignore_errors=True) - shutil.rmtree('./quantized_model', ignore_errors=True) - - def test_tf_model_quant(self): - parser = HfArgumentParser(TFTrainingArguments) - args = parser.parse_args_into_dataclasses(args=["--output_dir", "./quantized_model", - "--per_device_eval_batch_size", "2"]) - metric = load_metric("glue", "sst2") - def compute_metrics(preds, label_ids): - preds = preds["logits"] - preds = np.argmax(preds, axis=1) - result = metric.compute(predictions=preds, references=label_ids) - if len(result) > 1: - result["combined_score"] = np.mean(list(result.values())).item() - return result - self.optimizer = TFOptimization( - model=self.model, - args=args[0], - compute_metrics=compute_metrics - ) - tune_metric = metrics.Metric( - name="accuracy", greater_is_better=True, is_relative=False, criterion=0.5 - ) - quantization_config = QuantizationConfig( - framework="tensorflow", - approach="POSTTRAININGSTATIC", - metrics=[tune_metric], - objectives=[objectives.performance] - ) - quantized_model = self.optimizer.quantize(quant_config=quantization_config, - train_dataset=self.dummy_dataset, eval_dataset=self.dummy_dataset) - loaded_model = tf.saved_model.load(args[0].output_dir) - - def eval_func(model): - return 1 - - def train_func(model): - return model - - self.optimizer.quantize(quant_config=quantization_config, - train_func=train_func, - eval_func=eval_func) - - quantization_config = QuantizationConfig( - framework="tensorflow", - approach="POSTTRAININGSTATIC", - metrics=[tune_metric], - objectives=[objectives.performance], - recipes={"first_conv_or_matmul_quantization": True, - "last_conv_or_matmul_quantization": True, - } - ) - self.optimizer.quantize(quant_config=quantization_config, - train_func=train_func, - eval_func=eval_func) - - -if __name__ == "__main__": - unittest.main() diff --git a/workflows/compression_aware_training/config/README.md b/workflows/compression_aware_training/config/README.md index c72b4ba0af9..86e13e21260 100644 --- a/workflows/compression_aware_training/config/README.md +++ b/workflows/compression_aware_training/config/README.md @@ -23,7 +23,7 @@ output_dir: Path to output directory. overwrite_output_dir: Whether to overwrite Output cache. perf_tol: Performance tolerance when optimizing the model. quantization: Needs to be true in this case. -quantization_approach: Quantization approach. Supported approach are PostTrainingStatic, PostTrainingDynamic and QuantizationAwareTraining. +quantization_approach: Quantization approach. Supported approach are static, dynamic and qat. is_relative: Metric tolerance model, expected to be relative or absolute. int8: Load int8 model. ``` @@ -41,7 +41,7 @@ output_dir: Path to output directory. overwrite_output_dir: Whether to overwrite Output cache. perf_tol: Performance tolerance when optimizing the model. quantization: Needs to be true in this case. -quantization_approach: Quantization approach. Supported approach are PostTrainingStatic, PostTrainingDynamic and QuantizationAwareTraining. +quantization_approach: Quantization approach. Supported approach are static, dynamic and qat. is_relative: Metric tolerance model, expected to be relative or absolute. int8: Load int8 model. ``` diff --git a/workflows/compression_aware_training/config/config.yaml b/workflows/compression_aware_training/config/config.yaml index 48e31757b6e..0bc18386cfe 100755 --- a/workflows/compression_aware_training/config/config.yaml +++ b/workflows/compression_aware_training/config/config.yaml @@ -25,6 +25,6 @@ overwrite_output_dir: true perf_tol: 0.03 quantization: true -quantization_approach: "QuantizationAwareTraining" +quantization_approach: "qat" is_relative: true int8: false diff --git a/workflows/compression_aware_training/config/distillation_with_qat.yaml b/workflows/compression_aware_training/config/distillation_with_qat.yaml index 48e31757b6e..0bc18386cfe 100755 --- a/workflows/compression_aware_training/config/distillation_with_qat.yaml +++ b/workflows/compression_aware_training/config/distillation_with_qat.yaml @@ -25,6 +25,6 @@ overwrite_output_dir: true perf_tol: 0.03 quantization: true -quantization_approach: "QuantizationAwareTraining" +quantization_approach: "qat" is_relative: true int8: false diff --git a/workflows/compression_aware_training/config/qat.yaml b/workflows/compression_aware_training/config/qat.yaml index faf0416ed2f..be783e839bf 100644 --- a/workflows/compression_aware_training/config/qat.yaml +++ b/workflows/compression_aware_training/config/qat.yaml @@ -24,6 +24,6 @@ overwrite_output_dir: true perf_tol: 0.03 quantization: true -quantization_approach: "QuantizationAwareTraining" +quantization_approach: "qat" is_relative: true int8: false diff --git a/workflows/compression_aware_training/config/sat.yaml b/workflows/compression_aware_training/config/sat.yaml index 7731f0dfb69..439828b0f1f 100755 --- a/workflows/compression_aware_training/config/sat.yaml +++ b/workflows/compression_aware_training/config/sat.yaml @@ -16,7 +16,7 @@ model_name_or_path: "Intel/distilbert-base-uncased-sparse-90-unstructured-pruneo teacher_model_name_or_path: "distilbert-base-uncased-finetuned-sst-2-english" task_name: "sst2" sat: true -quantization_approach: "QuantizationAwareTraining" +quantization_approach: "qat" learning_rate: 0.000012 num_train_epochs: 6 do_train: true diff --git a/workflows/compression_aware_training/src/itrex_opt.py b/workflows/compression_aware_training/src/itrex_opt.py index b727d22c412..fcfd5f7eab7 100755 --- a/workflows/compression_aware_training/src/itrex_opt.py +++ b/workflows/compression_aware_training/src/itrex_opt.py @@ -28,14 +28,19 @@ # Need to use itrex domain toolkit from intel_extension_for_transformers.transformers import ( - DistillationConfig, - PrunerConfig, - PruningConfig, OptimizedModel, - QuantizationConfig, metrics, objectives, ) +from neural_compressor.config import ( + WeightPruningConfig, + DistillationConfig, + KnowledgeDistillationLossConfig, + QuantizationAwareTrainingConfig, + PostTrainingQuantConfig, + TuningCriterion, + AccuracyCriterion +) from intel_extension_for_transformers.transformers.trainer import NLPTrainer from torch.utils.data import DataLoader from tqdm.auto import tqdm @@ -529,7 +534,7 @@ def compute_metrics(p: EvalPrediction): # Initialize and setup our itrexTrainer from neural_compressor.adaptor.torch_utils.symbolic_trace import symbolic_trace - self.model = symbolic_trace(self.model, self.optim_args.quantization_approach=="QuantizationAwareTraining") + self.model = symbolic_trace(self.model, self.optim_args.quantization_approach=="qat") self.trainer = NLPTrainer( model=self.model, @@ -746,30 +751,38 @@ def _do_quantization_aware_training(self): raise ValueError("do_eval must be set to True for quantization.") self.trainer.save_model(self.training_args.output_dir) - if self.optim_args.quantization_approach != "PostTrainingDynamic": + if self.optim_args.quantization_approach != "dynamic": if not self.training_args.do_train: raise ValueError( "do_train must be set to True for static and aware training quantization." ) - elif self.optim_args.quantization_approach == "QuantizationAwareTraining": - early_stopping_patience = 6 - early_stopping_threshold = 0.001 # optional - # trainer.add_callback(transformers.EarlyStoppingCallback(early_stopping_patience, - # early_stopping_threshold)) tune_metric = metrics.Metric( - name=metric_name, - is_relative=self.optim_args.is_relative, - criterion=self.optim_args.perf_tol, + name=metric_name, is_relative=self.optim_args.is_relative, criterion=self.optim_args.perf_tol ) + self.trainer.metrics = tune_metric objective = objectives.performance - quantization_config = QuantizationConfig( - approach=self.optim_args.quantization_approach, - max_trials=600, - metrics=[tune_metric], - objectives=[objective], - sampling_size=len(self.train_dataset) // 20, - ) + tuning_criterion = TuningCriterion(max_trials=600, objective=[objective.name]) + accuracy_criterion = AccuracyCriterion( + higher_is_better=True, # optional. + criterion="relative" if self.optim_args.is_relative else "absolute", # optional. Available values are "relative" and "absolute". + tolerable_loss=self.optim_args.perf_tol, # optional. + ) + if self.optim_args.quantization_approach != "qat": + quantization_config = PostTrainingQuantConfig( + approach=self.optim_args.quantization_approach, + tuning_criterion=tuning_criterion, + accuracy_criterion=accuracy_criterion + ) + else: + quantization_config = QuantizationAwareTrainingConfig( + tuning_criterion=tuning_criterion, + accuracy_criterion=accuracy_criterion + ) + early_stopping_patience = 2 + early_stopping_threshold = 0.001 # optional + self.trainer.add_callback(transformers.EarlyStoppingCallback(early_stopping_patience, \ + early_stopping_threshold)) model = self.trainer.quantize(quant_config=quantization_config) if self.optim_args.benchmark or self.optim_args.accuracy_only: @@ -939,23 +952,15 @@ def get_logits(teacher_model, train_dataset, teacher_train_dataset): tune_metric = metrics.Metric( name=metric_name, is_relative=self.optim_args.is_relative, criterion=self.optim_args.perf_tol ) - prune_type = 'PatternLock' \ - if self.optim_args.pruning_approach else self.optim_args.pruning_approach - target_sparsity_ratio = self.optim_args.target_sparsity_ratio \ - if self.optim_args.target_sparsity_ratio else None - pruner_config = PrunerConfig(prune_type=prune_type, target_sparsity_ratio=target_sparsity_ratio) - pruning_conf = PruningConfig(framework="pytorch_fx",pruner_config=[pruner_config], metrics=tune_metric) - distillation_conf = DistillationConfig(framework="pytorch_fx", metrics=tune_metric) - - objective = objectives.performance - quantization_conf = QuantizationConfig( - approach=self.optim_args.quantization_approach, - max_trials=600, - metrics=[tune_metric], - objectives=[objective] - ) + self.trainer.metrics = tune_metric + pruning_conf = WeightPruningConfig([{"start_step": 0, "end_step": 2}], + target_sparsity=self.optim_args.target_sparsity_ratio, + pruning_scope="local") + distillation_criterion = KnowledgeDistillationLossConfig(loss_types=["CE", "KL"]) + distillation_conf = DistillationConfig(teacher_model=self.teacher_model, criterion=distillation_criterion) + quantization_conf = QuantizationAwareTrainingConfig() conf_list = [pruning_conf, distillation_conf, quantization_conf] - model = self.trainer.orchestrate_optimizations(config_list=conf_list, teacher_model=self.teacher_model) + model = self.trainer.orchestrate_optimizations(config_list=conf_list) # ############################################################ print( diff --git a/workflows/compression_aware_training/src/utils.py b/workflows/compression_aware_training/src/utils.py index 46467c2b6ab..2ce1a4c819e 100755 --- a/workflows/compression_aware_training/src/utils.py +++ b/workflows/compression_aware_training/src/utils.py @@ -187,7 +187,7 @@ class OptimizationArguments: metadata={"help": "Whether or not to apply prune."}, ) pruning_approach: Optional[str] = field( - default="BasicMagnitude", + default="magnitude", metadata={"help": "Pruning approach. Supported approach is basic_magnite."}, ) target_sparsity_ratio: Optional[float] = field( @@ -207,9 +207,9 @@ class OptimizationArguments: metadata={"help": "Whether or not to apply quantization."}, ) quantization_approach: Optional[str] = field( - default="PostTrainingStatic", - metadata={"help": "Quantization approach. Supported approach are PostTrainingStatic, " - "PostTrainingDynamic and QuantizationAwareTraining."}, + default="static", + metadata={"help": "Quantization approach. Supported approach are static, " + "dynamic and qat."}, ) metric_name: Optional[str] = field( default=None, diff --git a/workflows/dlsa/run_dlsa.py b/workflows/dlsa/run_dlsa.py index 583e37847ea..92d6827998d 100644 --- a/workflows/dlsa/run_dlsa.py +++ b/workflows/dlsa/run_dlsa.py @@ -39,10 +39,10 @@ ) from intel_extension_for_transformers.transformers import ( OptimizedModel, - QuantizationConfig, metrics, objectives, ) +from neural_compressor.config import PostTrainingQuantConfig, TuningCriterion from intel_extension_for_transformers.transformers.trainer import NLPTrainer hf_logging.set_verbosity_info() @@ -288,12 +288,12 @@ def preprocess(examples): if args.do_quantize: with track("Quantize"): metric = metrics.Metric(name="eval_acc", is_relative=True, criterion=0.01) - q_config = QuantizationConfig( - framework="pytorch_ipex", - approach="PostTrainingStatic", - max_trials=200, # set the Max tune times - metrics=[metric], - objectives=[objectives.performance], + trainer.metrics = metric + tuning_criterion = TuningCriterion(max_trials=600) + q_config = PostTrainingQuantConfig( + backend="ipex", + approach="static", + tuning_criterion=tuning_criterion ) def eval_func(model): diff --git a/workflows/hf_finetuning_and_inference_nlp/src/finetune_itrex.py b/workflows/hf_finetuning_and_inference_nlp/src/finetune_itrex.py index 9e3ba13c89d..3cdab98655f 100644 --- a/workflows/hf_finetuning_and_inference_nlp/src/finetune_itrex.py +++ b/workflows/hf_finetuning_and_inference_nlp/src/finetune_itrex.py @@ -24,9 +24,6 @@ Trainer, ) from intel_extension_for_transformers.transformers import ( - QuantizationConfig, - PruningConfig, - PrunerConfig, metrics, objectives, ) diff --git a/workflows/hf_finetuning_and_inference_nlp/src/infer_itrex.py b/workflows/hf_finetuning_and_inference_nlp/src/infer_itrex.py index b666c6f8bbc..3b6c743c485 100644 --- a/workflows/hf_finetuning_and_inference_nlp/src/infer_itrex.py +++ b/workflows/hf_finetuning_and_inference_nlp/src/infer_itrex.py @@ -17,7 +17,6 @@ import torch from os import path from intel_extension_for_transformers.transformers import ( - QuantizationConfig, metrics, objectives, ) @@ -29,7 +28,7 @@ DataCollatorWithPadding, Trainer, ) - +from neural_compressor.config import PostTrainingQuantConfig, TuningCriterion from infer import DlsaInference from utils import PredsLabels, compute_metrics, save_performance_metrics @@ -76,12 +75,11 @@ def _load_model(self): ) metric = metrics.Metric(name="eval_acc", is_relative=True, criterion=0.03) - q_config = QuantizationConfig( - framework="pytorch", - approach="PostTrainingStatic", - max_trials=200, # set the Max tune times - metrics=[metric], - objectives=[objectives.performance], + self.trainer.metrics = metric + tuning_criterion = TuningCriterion(max_trials=200) + q_config = PostTrainingQuantConfig( + approach="static", + tuning_criterion=tuning_criterion, ) eval_dataloader = self.trainer.get_eval_dataloader() self.model = self.trainer.quantize( From 8c0242edce75a9a1c4f4c7eca084d592cfd6e0ec Mon Sep 17 00:00:00 2001 From: "Sun, Xuehao" Date: Wed, 26 Jun 2024 13:19:35 +0800 Subject: [PATCH 21/21] update torch version to 2.3.0 in example/requirements.txt (#1635) Signed-off-by: Sun, Xuehao --- .../deployment/squad/ipex/bert_large/requirements.txt | 2 +- .../squad/ipex/distilbert_base_uncased_sparse/requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/huggingface/pytorch/question-answering/deployment/squad/ipex/bert_large/requirements.txt b/examples/huggingface/pytorch/question-answering/deployment/squad/ipex/bert_large/requirements.txt index 1a42dc1dc28..1b6c3d214c6 100644 --- a/examples/huggingface/pytorch/question-answering/deployment/squad/ipex/bert_large/requirements.txt +++ b/examples/huggingface/pytorch/question-answering/deployment/squad/ipex/bert_large/requirements.txt @@ -1,5 +1,5 @@ datasets >= 1.8.0 -torch == 2.0 +torch == 2.3.0 transformers intel-extension-for-pytorch==2.3.0 wandb diff --git a/examples/huggingface/pytorch/question-answering/deployment/squad/ipex/distilbert_base_uncased_sparse/requirements.txt b/examples/huggingface/pytorch/question-answering/deployment/squad/ipex/distilbert_base_uncased_sparse/requirements.txt index 1a42dc1dc28..1b6c3d214c6 100644 --- a/examples/huggingface/pytorch/question-answering/deployment/squad/ipex/distilbert_base_uncased_sparse/requirements.txt +++ b/examples/huggingface/pytorch/question-answering/deployment/squad/ipex/distilbert_base_uncased_sparse/requirements.txt @@ -1,5 +1,5 @@ datasets >= 1.8.0 -torch == 2.0 +torch == 2.3.0 transformers intel-extension-for-pytorch==2.3.0 wandb